Checkout APM and dependencies from upstream WebRTC

Generate source code by command:
./script/sync-apm.sh  ~/webrtc-checkout/src .

where webrtc-checkout HEAD is at 8a1b20a21

BUG=chromium:710465
TEST=None

Change-Id: Iceab82cf72dc4c0550c8864580ed9f75d4f9a596
Reviewed-on: https://chromium-review.googlesource.com/970197
Commit-Ready: Hsinyu Chao <hychao@chromium.org>
Tested-by: Hsinyu Chao <hychao@chromium.org>
Reviewed-by: Cheng-Yi Chiang <cychiang@chromium.org>
diff --git a/api/array_view.h b/api/array_view.h
new file mode 100644
index 0000000..d951d0f
--- /dev/null
+++ b/api/array_view.h
@@ -0,0 +1,263 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_ARRAY_VIEW_H_
+#define API_ARRAY_VIEW_H_
+
+#include <algorithm>
+#include <type_traits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/type_traits.h"
+
+namespace rtc {
+
+// tl;dr: rtc::ArrayView is the same thing as gsl::span from the Guideline
+//        Support Library.
+//
+// Many functions read from or write to arrays. The obvious way to do this is
+// to use two arguments, a pointer to the first element and an element count:
+//
+//   bool Contains17(const int* arr, size_t size) {
+//     for (size_t i = 0; i < size; ++i) {
+//       if (arr[i] == 17)
+//         return true;
+//     }
+//     return false;
+//   }
+//
+// This is flexible, since it doesn't matter how the array is stored (C array,
+// std::vector, rtc::Buffer, ...), but it's error-prone because the caller has
+// to correctly specify the array length:
+//
+//   Contains17(arr, arraysize(arr));     // C array
+//   Contains17(arr.data(), arr.size());  // std::vector
+//   Contains17(arr, size);               // pointer + size
+//   ...
+//
+// It's also kind of messy to have two separate arguments for what is
+// conceptually a single thing.
+//
+// Enter rtc::ArrayView<T>. It contains a T pointer (to an array it doesn't
+// own) and a count, and supports the basic things you'd expect, such as
+// indexing and iteration. It allows us to write our function like this:
+//
+//   bool Contains17(rtc::ArrayView<const int> arr) {
+//     for (auto e : arr) {
+//       if (e == 17)
+//         return true;
+//     }
+//     return false;
+//   }
+//
+// And even better, because a bunch of things will implicitly convert to
+// ArrayView, we can call it like this:
+//
+//   Contains17(arr);                             // C array
+//   Contains17(arr);                             // std::vector
+//   Contains17(rtc::ArrayView<int>(arr, size));  // pointer + size
+//   Contains17(nullptr);                         // nullptr -> empty ArrayView
+//   ...
+//
+// ArrayView<T> stores both a pointer and a size, but you may also use
+// ArrayView<T, N>, which has a size that's fixed at compile time (which means
+// it only has to store the pointer).
+//
+// One important point is that ArrayView<T> and ArrayView<const T> are
+// different types, which allow and don't allow mutation of the array elements,
+// respectively. The implicit conversions work just like you'd hope, so that
+// e.g. vector<int> will convert to either ArrayView<int> or ArrayView<const
+// int>, but const vector<int> will convert only to ArrayView<const int>.
+// (ArrayView itself can be the source type in such conversions, so
+// ArrayView<int> will convert to ArrayView<const int>.)
+//
+// Note: ArrayView is tiny (just a pointer and a count if variable-sized, just
+// a pointer if fix-sized) and trivially copyable, so it's probably cheaper to
+// pass it by value than by const reference.
+
+namespace impl {
+
+// Magic constant for indicating that the size of an ArrayView is variable
+// instead of fixed.
+enum : std::ptrdiff_t { kArrayViewVarSize = -4711 };
+
+// Base class for ArrayViews of fixed nonzero size.
+template <typename T, std::ptrdiff_t Size>
+class ArrayViewBase {
+  static_assert(Size > 0, "ArrayView size must be variable or non-negative");
+
+ public:
+  ArrayViewBase(T* data, size_t size) : data_(data) {}
+
+  static constexpr size_t size() { return Size; }
+  static constexpr bool empty() { return false; }
+  T* data() const { return data_; }
+
+ protected:
+  static constexpr bool fixed_size() { return true; }
+
+ private:
+  T* data_;
+};
+
+// Specialized base class for ArrayViews of fixed zero size.
+template <typename T>
+class ArrayViewBase<T, 0> {
+ public:
+  explicit ArrayViewBase(T* data, size_t size) {}
+
+  static constexpr size_t size() { return 0; }
+  static constexpr bool empty() { return true; }
+  T* data() const { return nullptr; }
+
+ protected:
+  static constexpr bool fixed_size() { return true; }
+};
+
+// Specialized base class for ArrayViews of variable size.
+template <typename T>
+class ArrayViewBase<T, impl::kArrayViewVarSize> {
+ public:
+  ArrayViewBase(T* data, size_t size)
+      : data_(size == 0 ? nullptr : data), size_(size) {}
+
+  size_t size() const { return size_; }
+  bool empty() const { return size_ == 0; }
+  T* data() const { return data_; }
+
+ protected:
+  static constexpr bool fixed_size() { return false; }
+
+ private:
+  T* data_;
+  size_t size_;
+};
+
+}  // namespace impl
+
+template <typename T, std::ptrdiff_t Size = impl::kArrayViewVarSize>
+class ArrayView final : public impl::ArrayViewBase<T, Size> {
+ public:
+  using value_type = T;
+  using const_iterator = const T*;
+
+  // Construct an ArrayView from a pointer and a length.
+  template <typename U>
+  ArrayView(U* data, size_t size)
+      : impl::ArrayViewBase<T, Size>::ArrayViewBase(data, size) {
+    RTC_DCHECK_EQ(size == 0 ? nullptr : data, this->data());
+    RTC_DCHECK_EQ(size, this->size());
+    RTC_DCHECK_EQ(!this->data(),
+                  this->size() == 0);  // data is null iff size == 0.
+  }
+
+  // Construct an empty ArrayView. Note that fixed-size ArrayViews of size > 0
+  // cannot be empty.
+  ArrayView() : ArrayView(nullptr, 0) {}
+  ArrayView(std::nullptr_t)  // NOLINT
+      : ArrayView() {}
+  ArrayView(std::nullptr_t, size_t size)
+      : ArrayView(static_cast<T*>(nullptr), size) {
+    static_assert(Size == 0 || Size == impl::kArrayViewVarSize, "");
+    RTC_DCHECK_EQ(0, size);
+  }
+
+  // Construct an ArrayView from an array.
+  template <typename U, size_t N>
+  ArrayView(U (&array)[N])  // NOLINT
+      : ArrayView(array, N) {
+    static_assert(Size == N || Size == impl::kArrayViewVarSize,
+                  "Array size must match ArrayView size");
+  }
+
+  // (Only if size is fixed.) Construct an ArrayView from any type U that has a
+  // static constexpr size() method whose return value is equal to Size, and a
+  // data() method whose return value converts implicitly to T*. In particular,
+  // this means we allow conversion from ArrayView<T, N> to ArrayView<const T,
+  // N>, but not the other way around. We also don't allow conversion from
+  // ArrayView<T> to ArrayView<T, N>, or from ArrayView<T, M> to ArrayView<T,
+  // N> when M != N.
+  template <
+      typename U,
+      typename std::enable_if<Size != impl::kArrayViewVarSize &&
+                              HasDataAndSize<U, T>::value>::type* = nullptr>
+  ArrayView(U& u)  // NOLINT
+      : ArrayView(u.data(), u.size()) {
+    static_assert(U::size() == Size, "Sizes must match exactly");
+  }
+
+  // (Only if size is variable.) Construct an ArrayView from any type U that
+  // has a size() method whose return value converts implicitly to size_t, and
+  // a data() method whose return value converts implicitly to T*. In
+  // particular, this means we allow conversion from ArrayView<T> to
+  // ArrayView<const T>, but not the other way around. Other allowed
+  // conversions include
+  // ArrayView<T, N> to ArrayView<T> or ArrayView<const T>,
+  // std::vector<T> to ArrayView<T> or ArrayView<const T>,
+  // const std::vector<T> to ArrayView<const T>,
+  // rtc::Buffer to ArrayView<uint8_t> or ArrayView<const uint8_t>, and
+  // const rtc::Buffer to ArrayView<const uint8_t>.
+  template <
+      typename U,
+      typename std::enable_if<Size == impl::kArrayViewVarSize &&
+                              HasDataAndSize<U, T>::value>::type* = nullptr>
+  ArrayView(U& u)  // NOLINT
+      : ArrayView(u.data(), u.size()) {}
+
+  // Indexing and iteration. These allow mutation even if the ArrayView is
+  // const, because the ArrayView doesn't own the array. (To prevent mutation,
+  // use a const element type.)
+  T& operator[](size_t idx) const {
+    RTC_DCHECK_LT(idx, this->size());
+    RTC_DCHECK(this->data());
+    return this->data()[idx];
+  }
+  T* begin() const { return this->data(); }
+  T* end() const { return this->data() + this->size(); }
+  const T* cbegin() const { return this->data(); }
+  const T* cend() const { return this->data() + this->size(); }
+
+  ArrayView<T> subview(size_t offset, size_t size) const {
+    return offset < this->size()
+               ? ArrayView<T>(this->data() + offset,
+                              std::min(size, this->size() - offset))
+               : ArrayView<T>();
+  }
+  ArrayView<T> subview(size_t offset) const {
+    return subview(offset, this->size());
+  }
+};
+
+// Comparing two ArrayViews compares their (pointer,size) pairs; it does *not*
+// dereference the pointers.
+template <typename T, std::ptrdiff_t Size1, std::ptrdiff_t Size2>
+bool operator==(const ArrayView<T, Size1>& a, const ArrayView<T, Size2>& b) {
+  return a.data() == b.data() && a.size() == b.size();
+}
+template <typename T, std::ptrdiff_t Size1, std::ptrdiff_t Size2>
+bool operator!=(const ArrayView<T, Size1>& a, const ArrayView<T, Size2>& b) {
+  return !(a == b);
+}
+
+// Variable-size ArrayViews are the size of two pointers; fixed-size ArrayViews
+// are the size of one pointer. (And as a special case, fixed-size ArrayViews
+// of size 0 require no storage.)
+static_assert(sizeof(ArrayView<int>) == 2 * sizeof(int*), "");
+static_assert(sizeof(ArrayView<int, 17>) == sizeof(int*), "");
+static_assert(std::is_empty<ArrayView<int, 0>>::value, "");
+
+template <typename T>
+inline ArrayView<T> MakeArrayView(T* data, size_t size) {
+  return ArrayView<T>(data, size);
+}
+
+}  // namespace rtc
+
+#endif  // API_ARRAY_VIEW_H_
diff --git a/api/array_view_unittest.cc b/api/array_view_unittest.cc
new file mode 100644
index 0000000..48dff2c
--- /dev/null
+++ b/api/array_view_unittest.cc
@@ -0,0 +1,412 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "test/gmock.h"
+
+namespace rtc {
+
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::IsEmpty;
+
+template <typename T>
+void Call(ArrayView<T>) {}
+
+}  // namespace
+
+TEST(ArrayViewTest, TestConstructFromPtrAndArray) {
+  char arr[] = "Arrr!";
+  const char carr[] = "Carrr!";
+  Call<const char>(arr);
+  Call<const char>(carr);
+  Call<char>(arr);
+  // Call<char>(carr);  // Compile error, because can't drop const.
+  // Call<int>(arr);  // Compile error, because incompatible types.
+  ArrayView<int*> x;
+  EXPECT_EQ(0u, x.size());
+  EXPECT_EQ(nullptr, x.data());
+  ArrayView<char> y = arr;
+  EXPECT_EQ(6u, y.size());
+  EXPECT_EQ(arr, y.data());
+  ArrayView<char, 6> yf = arr;
+  static_assert(yf.size() == 6, "");
+  EXPECT_EQ(arr, yf.data());
+  ArrayView<const char> z(arr + 1, 3);
+  EXPECT_EQ(3u, z.size());
+  EXPECT_EQ(arr + 1, z.data());
+  ArrayView<const char, 3> zf(arr + 1, 3);
+  static_assert(zf.size() == 3, "");
+  EXPECT_EQ(arr + 1, zf.data());
+  ArrayView<const char> w(arr, 2);
+  EXPECT_EQ(2u, w.size());
+  EXPECT_EQ(arr, w.data());
+  ArrayView<const char, 2> wf(arr, 2);
+  static_assert(wf.size() == 2, "");
+  EXPECT_EQ(arr, wf.data());
+  ArrayView<char> q(arr, 0);
+  EXPECT_EQ(0u, q.size());
+  EXPECT_EQ(nullptr, q.data());
+  ArrayView<char, 0> qf(arr, 0);
+  static_assert(qf.size() == 0, "");
+  EXPECT_EQ(nullptr, qf.data());
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+  // DCHECK error (nullptr with nonzero size).
+  EXPECT_DEATH(ArrayView<int>(static_cast<int*>(nullptr), 5), "");
+#endif
+  // These are compile errors, because incompatible types.
+  // ArrayView<int> m = arr;
+  // ArrayView<float> n(arr + 2, 2);
+}
+
+TEST(ArrayViewTest, TestCopyConstructorVariable) {
+  char arr[] = "Arrr!";
+  ArrayView<char> x = arr;
+  EXPECT_EQ(6u, x.size());
+  EXPECT_EQ(arr, x.data());
+  ArrayView<char> y = x;  // Copy non-const -> non-const.
+  EXPECT_EQ(6u, y.size());
+  EXPECT_EQ(arr, y.data());
+  ArrayView<const char> z = x;  // Copy non-const -> const.
+  EXPECT_EQ(6u, z.size());
+  EXPECT_EQ(arr, z.data());
+  ArrayView<const char> w = z;  // Copy const -> const.
+  EXPECT_EQ(6u, w.size());
+  EXPECT_EQ(arr, w.data());
+  // ArrayView<char> v = z;  // Compile error, because can't drop const.
+}
+
+TEST(ArrayViewTest, TestCopyConstructorFixed) {
+  char arr[] = "Arrr!";
+  ArrayView<char, 6> x = arr;
+  static_assert(x.size() == 6, "");
+  EXPECT_EQ(arr, x.data());
+
+  // Copy fixed -> fixed.
+  ArrayView<char, 6> y = x;  // Copy non-const -> non-const.
+  static_assert(y.size() == 6, "");
+  EXPECT_EQ(arr, y.data());
+  ArrayView<const char, 6> z = x;  // Copy non-const -> const.
+  static_assert(z.size() == 6, "");
+  EXPECT_EQ(arr, z.data());
+  ArrayView<const char, 6> w = z;  // Copy const -> const.
+  static_assert(w.size() == 6, "");
+  EXPECT_EQ(arr, w.data());
+  // ArrayView<char, 6> v = z;  // Compile error, because can't drop const.
+
+  // Copy fixed -> variable.
+  ArrayView<char> yv = x;  // Copy non-const -> non-const.
+  EXPECT_EQ(6u, yv.size());
+  EXPECT_EQ(arr, yv.data());
+  ArrayView<const char> zv = x;  // Copy non-const -> const.
+  EXPECT_EQ(6u, zv.size());
+  EXPECT_EQ(arr, zv.data());
+  ArrayView<const char> wv = z;  // Copy const -> const.
+  EXPECT_EQ(6u, wv.size());
+  EXPECT_EQ(arr, wv.data());
+  // ArrayView<char> vv = z;  // Compile error, because can't drop const.
+}
+
+TEST(ArrayViewTest, TestCopyAssignmentVariable) {
+  char arr[] = "Arrr!";
+  ArrayView<char> x(arr);
+  EXPECT_EQ(6u, x.size());
+  EXPECT_EQ(arr, x.data());
+  ArrayView<char> y;
+  y = x;  // Copy non-const -> non-const.
+  EXPECT_EQ(6u, y.size());
+  EXPECT_EQ(arr, y.data());
+  ArrayView<const char> z;
+  z = x;  // Copy non-const -> const.
+  EXPECT_EQ(6u, z.size());
+  EXPECT_EQ(arr, z.data());
+  ArrayView<const char> w;
+  w = z;  // Copy const -> const.
+  EXPECT_EQ(6u, w.size());
+  EXPECT_EQ(arr, w.data());
+  // ArrayView<char> v;
+  // v = z;  // Compile error, because can't drop const.
+}
+
+TEST(ArrayViewTest, TestCopyAssignmentFixed) {
+  char arr[] = "Arrr!";
+  char init[] = "Init!";
+  ArrayView<char, 6> x(arr);
+  EXPECT_EQ(arr, x.data());
+
+  // Copy fixed -> fixed.
+  ArrayView<char, 6> y(init);
+  y = x;  // Copy non-const -> non-const.
+  EXPECT_EQ(arr, y.data());
+  ArrayView<const char, 6> z(init);
+  z = x;  // Copy non-const -> const.
+  EXPECT_EQ(arr, z.data());
+  ArrayView<const char, 6> w(init);
+  w = z;  // Copy const -> const.
+  EXPECT_EQ(arr, w.data());
+  // ArrayView<char, 6> v(init);
+  // v = z;  // Compile error, because can't drop const.
+
+  // Copy fixed -> variable.
+  ArrayView<char> yv;
+  yv = x;  // Copy non-const -> non-const.
+  EXPECT_EQ(6u, yv.size());
+  EXPECT_EQ(arr, yv.data());
+  ArrayView<const char> zv;
+  zv = x;  // Copy non-const -> const.
+  EXPECT_EQ(6u, zv.size());
+  EXPECT_EQ(arr, zv.data());
+  ArrayView<const char> wv;
+  wv = z;  // Copy const -> const.
+  EXPECT_EQ(6u, wv.size());
+  EXPECT_EQ(arr, wv.data());
+  // ArrayView<char> v;
+  // v = z;  // Compile error, because can't drop const.
+}
+
+TEST(ArrayViewTest, TestStdVector) {
+  std::vector<int> v;
+  v.push_back(3);
+  v.push_back(11);
+  Call<const int>(v);
+  Call<int>(v);
+  // Call<unsigned int>(v);  // Compile error, because incompatible types.
+  ArrayView<int> x = v;
+  EXPECT_EQ(2u, x.size());
+  EXPECT_EQ(v.data(), x.data());
+  ArrayView<const int> y;
+  y = v;
+  EXPECT_EQ(2u, y.size());
+  EXPECT_EQ(v.data(), y.data());
+  // ArrayView<double> d = v;  // Compile error, because incompatible types.
+  const std::vector<int> cv;
+  Call<const int>(cv);
+  // Call<int>(cv);  // Compile error, because can't drop const.
+  ArrayView<const int> z = cv;
+  EXPECT_EQ(0u, z.size());
+  EXPECT_EQ(nullptr, z.data());
+  // ArrayView<int> w = cv;  // Compile error, because can't drop const.
+}
+
+TEST(ArrayViewTest, TestRtcBuffer) {
+  rtc::Buffer b = "so buffer";
+  Call<const uint8_t>(b);
+  Call<uint8_t>(b);
+  // Call<int8_t>(b);  // Compile error, because incompatible types.
+  ArrayView<uint8_t> x = b;
+  EXPECT_EQ(10u, x.size());
+  EXPECT_EQ(b.data(), x.data());
+  ArrayView<const uint8_t> y;
+  y = b;
+  EXPECT_EQ(10u, y.size());
+  EXPECT_EQ(b.data(), y.data());
+  // ArrayView<char> d = b;  // Compile error, because incompatible types.
+  const rtc::Buffer cb = "very const";
+  Call<const uint8_t>(cb);
+  // Call<uint8_t>(cb);  // Compile error, because can't drop const.
+  ArrayView<const uint8_t> z = cb;
+  EXPECT_EQ(11u, z.size());
+  EXPECT_EQ(cb.data(), z.data());
+  // ArrayView<uint8_t> w = cb;  // Compile error, because can't drop const.
+}
+
+TEST(ArrayViewTest, TestSwapVariable) {
+  const char arr[] = "Arrr!";
+  const char aye[] = "Aye, Cap'n!";
+  ArrayView<const char> x(arr);
+  EXPECT_EQ(6u, x.size());
+  EXPECT_EQ(arr, x.data());
+  ArrayView<const char> y(aye);
+  EXPECT_EQ(12u, y.size());
+  EXPECT_EQ(aye, y.data());
+  using std::swap;
+  swap(x, y);
+  EXPECT_EQ(12u, x.size());
+  EXPECT_EQ(aye, x.data());
+  EXPECT_EQ(6u, y.size());
+  EXPECT_EQ(arr, y.data());
+  // ArrayView<char> z;
+  // swap(x, z);  // Compile error, because can't drop const.
+}
+
+TEST(FixArrayViewTest, TestSwapFixed) {
+  const char arr[] = "Arr!";
+  char aye[] = "Aye!";
+  ArrayView<const char, 5> x(arr);
+  EXPECT_EQ(arr, x.data());
+  ArrayView<const char, 5> y(aye);
+  EXPECT_EQ(aye, y.data());
+  using std::swap;
+  swap(x, y);
+  EXPECT_EQ(aye, x.data());
+  EXPECT_EQ(arr, y.data());
+  // ArrayView<char, 5> z(aye);
+  // swap(x, z);  // Compile error, because can't drop const.
+  // ArrayView<const char, 4> w(aye, 4);
+  // swap(x, w);  // Compile error, because different sizes.
+}
+
+TEST(ArrayViewTest, TestIndexing) {
+  char arr[] = "abcdefg";
+  ArrayView<char> x(arr);
+  const ArrayView<char> y(arr);
+  ArrayView<const char, 8> z(arr);
+  EXPECT_EQ(8u, x.size());
+  EXPECT_EQ(8u, y.size());
+  EXPECT_EQ(8u, z.size());
+  EXPECT_EQ('b', x[1]);
+  EXPECT_EQ('c', y[2]);
+  EXPECT_EQ('d', z[3]);
+  x[3] = 'X';
+  y[2] = 'Y';
+  // z[1] = 'Z';  // Compile error, because z's element type is const char.
+  EXPECT_EQ('b', x[1]);
+  EXPECT_EQ('Y', y[2]);
+  EXPECT_EQ('X', z[3]);
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+  EXPECT_DEATH(z[8], "");  // DCHECK error (index out of bounds).
+#endif
+}
+
+TEST(ArrayViewTest, TestIterationEmpty) {
+  // Variable-size.
+  ArrayView<std::vector<std::vector<std::vector<std::string>>>> av;
+  EXPECT_EQ(av.begin(), av.end());
+  EXPECT_EQ(av.cbegin(), av.cend());
+  for (auto& e : av) {
+    EXPECT_TRUE(false);
+    EXPECT_EQ(42u, e.size());  // Dummy use of e to prevent unused var warning.
+  }
+
+  // Fixed-size.
+  ArrayView<std::vector<std::vector<std::vector<std::string>>>, 0> af;
+  EXPECT_EQ(af.begin(), af.end());
+  EXPECT_EQ(af.cbegin(), af.cend());
+  for (auto& e : af) {
+    EXPECT_TRUE(false);
+    EXPECT_EQ(42u, e.size());  // Dummy use of e to prevent unused var warning.
+  }
+}
+
+TEST(ArrayViewTest, TestIterationVariable) {
+  char arr[] = "Arrr!";
+  ArrayView<char> av(arr);
+  EXPECT_EQ('A', *av.begin());
+  EXPECT_EQ('A', *av.cbegin());
+  EXPECT_EQ('\0', *(av.end() - 1));
+  EXPECT_EQ('\0', *(av.cend() - 1));
+  char i = 0;
+  for (auto& e : av) {
+    EXPECT_EQ(arr + i, &e);
+    e = 's' + i;
+    ++i;
+  }
+  i = 0;
+  for (auto& e : ArrayView<const char>(av)) {
+    EXPECT_EQ(arr + i, &e);
+    // e = 'q' + i;  // Compile error, because e is a const char&.
+    ++i;
+  }
+}
+
+TEST(ArrayViewTest, TestIterationFixed) {
+  char arr[] = "Arrr!";
+  ArrayView<char, 6> av(arr);
+  EXPECT_EQ('A', *av.begin());
+  EXPECT_EQ('A', *av.cbegin());
+  EXPECT_EQ('\0', *(av.end() - 1));
+  EXPECT_EQ('\0', *(av.cend() - 1));
+  char i = 0;
+  for (auto& e : av) {
+    EXPECT_EQ(arr + i, &e);
+    e = 's' + i;
+    ++i;
+  }
+  i = 0;
+  for (auto& e : ArrayView<const char, 6>(av)) {
+    EXPECT_EQ(arr + i, &e);
+    // e = 'q' + i;  // Compile error, because e is a const char&.
+    ++i;
+  }
+}
+
+TEST(ArrayViewTest, TestEmpty) {
+  EXPECT_TRUE(ArrayView<int>().empty());
+  const int a[] = {1, 2, 3};
+  EXPECT_FALSE(ArrayView<const int>(a).empty());
+
+  static_assert(ArrayView<int, 0>::empty(), "");
+  static_assert(!ArrayView<int, 3>::empty(), "");
+}
+
+TEST(ArrayViewTest, TestCompare) {
+  int a[] = {1, 2, 3};
+  int b[] = {1, 2, 3};
+
+  EXPECT_EQ(ArrayView<int>(a), ArrayView<int>(a));
+  EXPECT_EQ((ArrayView<int, 3>(a)), (ArrayView<int, 3>(a)));
+  EXPECT_EQ(ArrayView<int>(a), (ArrayView<int, 3>(a)));
+  EXPECT_EQ(ArrayView<int>(), ArrayView<int>());
+  EXPECT_EQ(ArrayView<int>(), ArrayView<int>(a, 0));
+  EXPECT_EQ(ArrayView<int>(a, 0), ArrayView<int>(b, 0));
+  EXPECT_EQ((ArrayView<int, 0>(a, 0)), ArrayView<int>());
+
+  EXPECT_NE(ArrayView<int>(a), ArrayView<int>(b));
+  EXPECT_NE((ArrayView<int, 3>(a)), (ArrayView<int, 3>(b)));
+  EXPECT_NE((ArrayView<int, 3>(a)), ArrayView<int>(b));
+  EXPECT_NE(ArrayView<int>(a), ArrayView<int>());
+  EXPECT_NE(ArrayView<int>(a), ArrayView<int>(a, 2));
+  EXPECT_NE((ArrayView<int, 3>(a)), (ArrayView<int, 2>(a, 2)));
+}
+
+TEST(ArrayViewTest, TestSubViewVariable) {
+  int a[] = {1, 2, 3};
+  ArrayView<int> av(a);
+
+  EXPECT_EQ(av.subview(0), av);
+
+  EXPECT_THAT(av.subview(1), ElementsAre(2, 3));
+  EXPECT_THAT(av.subview(2), ElementsAre(3));
+  EXPECT_THAT(av.subview(3), IsEmpty());
+  EXPECT_THAT(av.subview(4), IsEmpty());
+
+  EXPECT_THAT(av.subview(1, 0), IsEmpty());
+  EXPECT_THAT(av.subview(1, 1), ElementsAre(2));
+  EXPECT_THAT(av.subview(1, 2), ElementsAre(2, 3));
+  EXPECT_THAT(av.subview(1, 3), ElementsAre(2, 3));
+}
+
+TEST(ArrayViewTest, TestSubViewFixed) {
+  int a[] = {1, 2, 3};
+  ArrayView<int, 3> av(a);
+
+  EXPECT_EQ(av.subview(0), av);
+
+  EXPECT_THAT(av.subview(1), ElementsAre(2, 3));
+  EXPECT_THAT(av.subview(2), ElementsAre(3));
+  EXPECT_THAT(av.subview(3), IsEmpty());
+  EXPECT_THAT(av.subview(4), IsEmpty());
+
+  EXPECT_THAT(av.subview(1, 0), IsEmpty());
+  EXPECT_THAT(av.subview(1, 1), ElementsAre(2));
+  EXPECT_THAT(av.subview(1, 2), ElementsAre(2, 3));
+  EXPECT_THAT(av.subview(1, 3), ElementsAre(2, 3));
+}
+
+}  // namespace rtc
diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc
new file mode 100644
index 0000000..108a523
--- /dev/null
+++ b/api/audio/audio_frame.cc
@@ -0,0 +1,185 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "api/audio/audio_frame.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+AudioFrame::AudioFrame() {
+  // Visual Studio doesn't like this in the class definition.
+  static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
+}
+
+void AudioFrame::Reset() {
+  ResetWithoutMuting();
+  muted_ = true;
+}
+
+void AudioFrame::ResetWithoutMuting() {
+  // TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
+  // to an invalid value, or add a new member to indicate invalidity.
+  timestamp_ = 0;
+  elapsed_time_ms_ = -1;
+  ntp_time_ms_ = -1;
+  samples_per_channel_ = 0;
+  sample_rate_hz_ = 0;
+  num_channels_ = 0;
+  speech_type_ = kUndefined;
+  vad_activity_ = kVadUnknown;
+  profile_timestamp_ms_ = 0;
+}
+
+void AudioFrame::UpdateFrame(uint32_t timestamp,
+                                    const int16_t* data,
+                                    size_t samples_per_channel,
+                                    int sample_rate_hz,
+                                    SpeechType speech_type,
+                                    VADActivity vad_activity,
+                                    size_t num_channels) {
+  timestamp_ = timestamp;
+  samples_per_channel_ = samples_per_channel;
+  sample_rate_hz_ = sample_rate_hz;
+  speech_type_ = speech_type;
+  vad_activity_ = vad_activity;
+  num_channels_ = num_channels;
+
+  const size_t length = samples_per_channel * num_channels;
+  RTC_CHECK_LE(length, kMaxDataSizeSamples);
+  if (data != nullptr) {
+    memcpy(data_, data, sizeof(int16_t) * length);
+    muted_ = false;
+  } else {
+    muted_ = true;
+  }
+}
+
+void AudioFrame::CopyFrom(const AudioFrame& src) {
+  if (this == &src) return;
+
+  timestamp_ = src.timestamp_;
+  elapsed_time_ms_ = src.elapsed_time_ms_;
+  ntp_time_ms_ = src.ntp_time_ms_;
+  muted_ = src.muted();
+  samples_per_channel_ = src.samples_per_channel_;
+  sample_rate_hz_ = src.sample_rate_hz_;
+  speech_type_ = src.speech_type_;
+  vad_activity_ = src.vad_activity_;
+  num_channels_ = src.num_channels_;
+
+  const size_t length = samples_per_channel_ * num_channels_;
+  RTC_CHECK_LE(length, kMaxDataSizeSamples);
+  if (!src.muted()) {
+    memcpy(data_, src.data(), sizeof(int16_t) * length);
+    muted_ = false;
+  }
+}
+
+void AudioFrame::UpdateProfileTimeStamp() {
+  profile_timestamp_ms_ = rtc::TimeMillis();
+}
+
+int64_t AudioFrame::ElapsedProfileTimeMs() const {
+  if (profile_timestamp_ms_ == 0) {
+    // Profiling has not been activated.
+    return -1;
+  }
+  return rtc::TimeSince(profile_timestamp_ms_);
+}
+
+const int16_t* AudioFrame::data() const {
+  return muted_ ? empty_data() : data_;
+}
+
+// TODO(henrik.lundin) Can we skip zeroing the buffer?
+// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
+int16_t* AudioFrame::mutable_data() {
+  if (muted_) {
+    memset(data_, 0, kMaxDataSizeBytes);
+    muted_ = false;
+  }
+  return data_;
+}
+
+void AudioFrame::Mute() {
+  muted_ = true;
+}
+
+bool AudioFrame::muted() const { return muted_; }
+
+AudioFrame& AudioFrame::operator>>=(const int rhs) {
+  RTC_CHECK_GT(num_channels_, 0);
+  RTC_CHECK_LT(num_channels_, 3);
+  if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
+  if (muted_) return *this;
+
+  for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
+    data_[i] = static_cast<int16_t>(data_[i] >> rhs);
+  }
+  return *this;
+}
+
+AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
+  // Sanity check
+  RTC_CHECK_GT(num_channels_, 0);
+  RTC_CHECK_LT(num_channels_, 3);
+  if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
+  if (num_channels_ != rhs.num_channels_) return *this;
+
+  bool noPrevData = muted_;
+  if (samples_per_channel_ != rhs.samples_per_channel_) {
+    if (samples_per_channel_ == 0) {
+      // special case we have no data to start with
+      samples_per_channel_ = rhs.samples_per_channel_;
+      noPrevData = true;
+    } else {
+      return *this;
+    }
+  }
+
+  if ((vad_activity_ == kVadActive) || rhs.vad_activity_ == kVadActive) {
+    vad_activity_ = kVadActive;
+  } else if (vad_activity_ == kVadUnknown || rhs.vad_activity_ == kVadUnknown) {
+    vad_activity_ = kVadUnknown;
+  }
+
+  if (speech_type_ != rhs.speech_type_) speech_type_ = kUndefined;
+
+  if (!rhs.muted()) {
+    muted_ = false;
+    if (noPrevData) {
+      memcpy(data_, rhs.data(),
+             sizeof(int16_t) * rhs.samples_per_channel_ * num_channels_);
+    } else {
+      // IMPROVEMENT this can be done very fast in assembly
+      for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
+        int32_t wrap_guard =
+            static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
+        data_[i] = rtc::saturated_cast<int16_t>(wrap_guard);
+      }
+    }
+  }
+
+  return *this;
+}
+
+// static
+const int16_t* AudioFrame::empty_data() {
+  static const int16_t kEmptyData[kMaxDataSizeSamples] = {0};
+  static_assert(sizeof(kEmptyData) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
+  return kEmptyData;
+}
+
+}  // namespace webrtc
diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h
new file mode 100644
index 0000000..5cb2019
--- /dev/null
+++ b/api/audio/audio_frame.h
@@ -0,0 +1,152 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_AUDIO_FRAME_H_
+#define API_AUDIO_AUDIO_FRAME_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/deprecation.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+/* This class holds up to 60 ms of super-wideband (32 kHz) stereo audio. It
+ * allows for adding and subtracting frames while keeping track of the resulting
+ * states.
+ *
+ * Notes
+ * - This is a de-facto api, not designed for external use. The AudioFrame class
+ *   is in need of overhaul or even replacement, and anyone depending on it
+ *   should be prepared for that.
+ * - The total number of samples is samples_per_channel_ * num_channels_.
+ * - Stereo data is interleaved starting with the left channel.
+ */
+class AudioFrame {
+ public:
+  // Using constexpr here causes linker errors unless the variable also has an
+  // out-of-class definition, which is impractical in this header-only class.
+  // (This makes no sense because it compiles as an enum value, which we most
+  // certainly cannot take the address of, just fine.) C++17 introduces inline
+  // variables which should allow us to switch to constexpr and keep this a
+  // header-only class.
+  enum : size_t {
+    // Stereo, 32 kHz, 60 ms (2 * 32 * 60)
+    kMaxDataSizeSamples = 3840,
+    kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t),
+  };
+
+  enum VADActivity {
+    kVadActive = 0,
+    kVadPassive = 1,
+    kVadUnknown = 2
+  };
+  enum SpeechType {
+    kNormalSpeech = 0,
+    kPLC = 1,
+    kCNG = 2,
+    kPLCCNG = 3,
+    kUndefined = 4
+  };
+
+  AudioFrame();
+
+  // Resets all members to their default state.
+  void Reset();
+  // Same as Reset(), but leaves mute state unchanged. Muting a frame requires
+  // the buffer to be zeroed on the next call to mutable_data(). Callers
+  // intending to write to the buffer immediately after Reset() can instead use
+  // ResetWithoutMuting() to skip this wasteful zeroing.
+  void ResetWithoutMuting();
+
+  // TODO(solenberg): Remove once downstream users of AudioFrame have updated.
+  RTC_DEPRECATED
+      void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
+                       size_t samples_per_channel, int sample_rate_hz,
+                       SpeechType speech_type, VADActivity vad_activity,
+                       size_t num_channels = 1) {
+    RTC_UNUSED(id);
+    UpdateFrame(timestamp, data, samples_per_channel, sample_rate_hz,
+                speech_type, vad_activity, num_channels);
+  }
+
+  void UpdateFrame(uint32_t timestamp, const int16_t* data,
+                   size_t samples_per_channel, int sample_rate_hz,
+                   SpeechType speech_type, VADActivity vad_activity,
+                   size_t num_channels = 1);
+
+  void CopyFrom(const AudioFrame& src);
+
+  // Sets a wall-time clock timestamp in milliseconds to be used for profiling
+  // of time between two points in the audio chain.
+  // Example:
+  //   t0: UpdateProfileTimeStamp()
+  //   t1: ElapsedProfileTimeMs() => t1 - t0 [msec]
+  void UpdateProfileTimeStamp();
+  // Returns the time difference between now and when UpdateProfileTimeStamp()
+  // was last called. Returns -1 if UpdateProfileTimeStamp() has not yet been
+  // called.
+  int64_t ElapsedProfileTimeMs() const;
+
+  // data() returns a zeroed static buffer if the frame is muted.
+  // mutable_frame() always returns a non-static buffer; the first call to
+  // mutable_frame() zeros the non-static buffer and marks the frame unmuted.
+  const int16_t* data() const;
+  int16_t* mutable_data();
+
+  // Prefer to mute frames using AudioFrameOperations::Mute.
+  void Mute();
+  // Frame is muted by default.
+  bool muted() const;
+
+  // These methods are deprecated. Use the functions in
+  // webrtc/audio/utility instead. These methods will exists for a
+  // short period of time until webrtc clients have updated. See
+  // webrtc:6548 for details.
+  RTC_DEPRECATED AudioFrame& operator>>=(const int rhs);
+  RTC_DEPRECATED AudioFrame& operator+=(const AudioFrame& rhs);
+
+  // RTP timestamp of the first sample in the AudioFrame.
+  uint32_t timestamp_ = 0;
+  // Time since the first frame in milliseconds.
+  // -1 represents an uninitialized value.
+  int64_t elapsed_time_ms_ = -1;
+  // NTP time of the estimated capture time in local timebase in milliseconds.
+  // -1 represents an uninitialized value.
+  int64_t ntp_time_ms_ = -1;
+  size_t samples_per_channel_ = 0;
+  int sample_rate_hz_ = 0;
+  size_t num_channels_ = 0;
+  SpeechType speech_type_ = kUndefined;
+  VADActivity vad_activity_ = kVadUnknown;
+  // Monotonically increasing timestamp intended for profiling of audio frames.
+  // Typically used for measuring elapsed time between two different points in
+  // the audio path. No lock is used to save resources and we are thread safe
+  // by design. Also, rtc::Optional is not used since it will cause a "complex
+  // class/struct needs an explicit out-of-line destructor" build error.
+  int64_t profile_timestamp_ms_ = 0;
+
+ private:
+  // A permamently zeroed out buffer to represent muted frames. This is a
+  // header-only class, so the only way to avoid creating a separate empty
+  // buffer per translation unit is to wrap a static in an inline function.
+  static const int16_t* empty_data();
+
+  int16_t data_[kMaxDataSizeSamples];
+  bool muted_ = true;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
+};
+
+}  // namespace webrtc
+
+#endif  // API_AUDIO_AUDIO_FRAME_H_
diff --git a/api/audio/audio_mixer.h b/api/audio/audio_mixer.h
new file mode 100644
index 0000000..14eefc1
--- /dev/null
+++ b/api/audio/audio_mixer.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_AUDIO_MIXER_H_
+#define API_AUDIO_AUDIO_MIXER_H_
+
+#include <memory>
+
+#include "api/audio/audio_frame.h"
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+
+// WORK IN PROGRESS
+// This class is under development and is not yet intended for for use outside
+// of WebRtc/Libjingle.
+class AudioMixer : public rtc::RefCountInterface {
+ public:
+  // A callback class that all mixer participants must inherit from/implement.
+  class Source {
+   public:
+    enum class AudioFrameInfo {
+      kNormal,  // The samples in audio_frame are valid and should be used.
+      kMuted,   // The samples in audio_frame should not be used, but
+                // should be implicitly interpreted as zero. Other
+                // fields in audio_frame may be read and should
+                // contain meaningful values.
+      kError,   // The audio_frame will not be used.
+    };
+
+    // Overwrites |audio_frame|. The data_ field is overwritten with
+    // 10 ms of new audio (either 1 or 2 interleaved channels) at
+    // |sample_rate_hz|. All fields in |audio_frame| must be updated.
+    virtual AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
+                                                 AudioFrame* audio_frame) = 0;
+
+    // A way for a mixer implementation to distinguish participants.
+    virtual int Ssrc() const = 0;
+
+    // A way for this source to say that GetAudioFrameWithInfo called
+    // with this sample rate or higher will not cause quality loss.
+    virtual int PreferredSampleRate() const = 0;
+
+    virtual ~Source() {}
+  };
+
+  // Returns true if adding was successful. A source is never added
+  // twice. Addition and removal can happen on different threads.
+  virtual bool AddSource(Source* audio_source) = 0;
+
+  // Removal is never attempted if a source has not been successfully
+  // added to the mixer.
+  virtual void RemoveSource(Source* audio_source) = 0;
+
+  // Performs mixing by asking registered audio sources for audio. The
+  // mixed result is placed in the provided AudioFrame. This method
+  // will only be called from a single thread. The channels argument
+  // specifies the number of channels of the mix result. The mixer
+  // should mix at a rate that doesn't cause quality loss of the
+  // sources' audio. The mixing rate is one of the rates listed in
+  // AudioProcessing::NativeRate. All fields in
+  // |audio_frame_for_mixing| must be updated.
+  virtual void Mix(size_t number_of_channels,
+                   AudioFrame* audio_frame_for_mixing) = 0;
+
+ protected:
+  // Since the mixer is reference counted, the destructor may be
+  // called from any thread.
+  ~AudioMixer() override {}
+};
+}  // namespace webrtc
+
+#endif  // API_AUDIO_AUDIO_MIXER_H_
diff --git a/api/audio/echo_canceller3_config.cc b/api/audio/echo_canceller3_config.cc
new file mode 100644
index 0000000..d74d7a8
--- /dev/null
+++ b/api/audio/echo_canceller3_config.cc
@@ -0,0 +1,16 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "api/audio/echo_canceller3_config.h"
+
+namespace webrtc {
+
+EchoCanceller3Config::EchoCanceller3Config() = default;
+
+}  // namespace webrtc
diff --git a/api/audio/echo_canceller3_config.h b/api/audio/echo_canceller3_config.h
new file mode 100644
index 0000000..fd5bf09
--- /dev/null
+++ b/api/audio/echo_canceller3_config.h
@@ -0,0 +1,126 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
+#define API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
+
+#include <stddef.h>  // size_t
+
+namespace webrtc {
+
+// Configuration struct for EchoCanceller3
+struct EchoCanceller3Config {
+  EchoCanceller3Config();
+
+  struct Delay {
+    size_t default_delay = 5;
+    size_t down_sampling_factor = 4;
+    size_t num_filters = 5;
+    size_t api_call_jitter_blocks = 26;
+    size_t min_echo_path_delay_blocks = 0;
+    size_t delay_headroom_blocks = 2;
+    size_t hysteresis_limit_1_blocks = 1;
+    size_t hysteresis_limit_2_blocks = 1;
+    size_t skew_hysteresis_blocks = 1;
+  } delay;
+
+  struct Filter {
+    struct MainConfiguration {
+      size_t length_blocks;
+      float leakage_converged;
+      float leakage_diverged;
+      float error_floor;
+      float noise_gate;
+    };
+
+    struct ShadowConfiguration {
+      size_t length_blocks;
+      float rate;
+      float noise_gate;
+    };
+
+    MainConfiguration main = {13, 0.005f, 0.1f, 0.001f, 20075344.f};
+    ShadowConfiguration shadow = {13, 0.7f, 20075344.f};
+
+    MainConfiguration main_initial = {12, 0.05f, 5.f, 0.001f, 20075344.f};
+    ShadowConfiguration shadow_initial = {12, 0.9f, 20075344.f};
+
+    size_t config_change_duration_blocks = 250;
+  } filter;
+
+  struct Erle {
+    float min = 1.f;
+    float max_l = 8.f;
+    float max_h = 1.5f;
+  } erle;
+
+  struct EpStrength {
+    float lf = 10.f;
+    float mf = 10.f;
+    float hf = 10.f;
+    float default_len = 0.f;
+    bool echo_can_saturate = true;
+    bool bounded_erl = false;
+  } ep_strength;
+
+  struct Mask {
+    float m1 = 0.01f;
+    float m2 = 0.0001f;
+    float m3 = 0.01f;
+    float m4 = 0.1f;
+    float m5 = 0.1f;
+    float m6 = 0.0001f;
+    float m7 = 0.01f;
+    float m8 = 0.0001f;
+    float m9 = 0.1f;
+  } gain_mask;
+
+  struct EchoAudibility {
+    float low_render_limit = 4 * 64.f;
+    float normal_render_limit = 64.f;
+  } echo_audibility;
+
+  struct RenderLevels {
+    float active_render_limit = 100.f;
+    float poor_excitation_render_limit = 150.f;
+  } render_levels;
+
+  struct GainUpdates {
+    struct GainChanges {
+      float max_inc;
+      float max_dec;
+      float rate_inc;
+      float rate_dec;
+      float min_inc;
+      float min_dec;
+    };
+
+    GainChanges low_noise = {2.f, 2.f, 1.4f, 1.4f, 1.1f, 1.1f};
+    GainChanges initial = {2.f, 2.f, 1.5f, 1.5f, 1.2f, 1.2f};
+    GainChanges normal = {2.f, 2.f, 1.5f, 1.5f, 1.2f, 1.2f};
+    GainChanges saturation = {1.2f, 1.2f, 1.5f, 1.5f, 1.f, 1.f};
+    GainChanges nonlinear = {1.5f, 1.5f, 1.2f, 1.2f, 1.1f, 1.1f};
+
+    float floor_first_increase = 0.00001f;
+  } gain_updates;
+
+  struct EchoRemovalControl {
+    struct GainRampup {
+      float first_non_zero_gain = 0.001f;
+      int non_zero_gain_blocks = 187;
+      int full_gain_blocks = 312;
+    } gain_rampup;
+
+    bool has_clock_drift = false;
+  } echo_removal_control;
+};
+}  // namespace webrtc
+
+#endif  // API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
diff --git a/api/audio/echo_canceller3_factory.cc b/api/audio/echo_canceller3_factory.cc
new file mode 100644
index 0000000..7e2c143
--- /dev/null
+++ b/api/audio/echo_canceller3_factory.cc
@@ -0,0 +1,27 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "api/audio/echo_canceller3_factory.h"
+
+#include <memory>
+
+#include "modules/audio_processing/aec3/echo_canceller3.h"
+#include "rtc_base/ptr_util.h"
+
+namespace webrtc {
+
+EchoCanceller3Factory::EchoCanceller3Factory() {}
+
+EchoCanceller3Factory::EchoCanceller3Factory(const EchoCanceller3Config& config)
+    : config_(config) {}
+
+std::unique_ptr<EchoControl> EchoCanceller3Factory::Create(int sample_rate_hz) {
+  return rtc::MakeUnique<EchoCanceller3>(config_, sample_rate_hz, true);
+}
+}  // namespace webrtc
diff --git a/api/audio/echo_canceller3_factory.h b/api/audio/echo_canceller3_factory.h
new file mode 100644
index 0000000..f6db116
--- /dev/null
+++ b/api/audio/echo_canceller3_factory.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
+#define API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
+
+#include <memory>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+
+namespace webrtc {
+
+class EchoCanceller3Factory : public EchoControlFactory {
+ public:
+  // Factory producing EchoCanceller3 instances with the default configuration.
+  EchoCanceller3Factory();
+
+  // Factory producing EchoCanceller3 instances with the specified
+  // configuration.
+  explicit EchoCanceller3Factory(const EchoCanceller3Config& config);
+
+  // Creates an EchoCanceller3 running at the specified sampling rate.
+  std::unique_ptr<EchoControl> Create(int sample_rate_hz) override;
+
+ private:
+  const EchoCanceller3Config config_;
+};
+}  // namespace webrtc
+
+#endif  // API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
diff --git a/api/audio/echo_control.h b/api/audio/echo_control.h
new file mode 100644
index 0000000..021bbf8
--- /dev/null
+++ b/api/audio/echo_control.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_ECHO_CONTROL_H_
+#define API_AUDIO_ECHO_CONTROL_H_
+
+#include <memory>
+
+namespace webrtc {
+
+class AudioBuffer;
+
+// Interface for an acoustic echo cancellation (AEC) submodule.
+class EchoControl {
+ public:
+  // Analysis (not changing) of the render signal.
+  virtual void AnalyzeRender(AudioBuffer* render) = 0;
+
+  // Analysis (not changing) of the capture signal.
+  virtual void AnalyzeCapture(AudioBuffer* capture) = 0;
+
+  // Processes the capture signal in order to remove the echo.
+  virtual void ProcessCapture(AudioBuffer* capture, bool echo_path_change) = 0;
+
+  struct Metrics {
+    double echo_return_loss;
+    double echo_return_loss_enhancement;
+    int delay_ms;
+  };
+
+  // Collect current metrics from the echo controller.
+  virtual Metrics GetMetrics() const = 0;
+
+  virtual ~EchoControl() {}
+};
+
+// Interface for a factory that creates EchoControllers.
+class EchoControlFactory {
+ public:
+  virtual std::unique_ptr<EchoControl> Create(int sample_rate_hz) = 0;
+  virtual ~EchoControlFactory() = default;
+};
+}  // namespace webrtc
+
+#endif  // API_AUDIO_ECHO_CONTROL_H_
diff --git a/api/audio_options.h b/api/audio_options.h
new file mode 100644
index 0000000..8d2880b
--- /dev/null
+++ b/api/audio_options.h
@@ -0,0 +1,194 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_AUDIO_OPTIONS_H_
+#define API_AUDIO_OPTIONS_H_
+
+#include <string>
+
+#include "api/optional.h"
+#include "rtc_base/stringencode.h"
+
+namespace cricket {
+
+// Options that can be applied to a VoiceMediaChannel or a VoiceMediaEngine.
+// Used to be flags, but that makes it hard to selectively apply options.
+// We are moving all of the setting of options to structs like this,
+// but some things currently still use flags.
+struct AudioOptions {
+  void SetAll(const AudioOptions& change) {
+    SetFrom(&echo_cancellation, change.echo_cancellation);
+#if defined(WEBRTC_IOS)
+    SetFrom(&ios_force_software_aec_HACK, change.ios_force_software_aec_HACK);
+#endif
+    SetFrom(&auto_gain_control, change.auto_gain_control);
+    SetFrom(&noise_suppression, change.noise_suppression);
+    SetFrom(&highpass_filter, change.highpass_filter);
+    SetFrom(&stereo_swapping, change.stereo_swapping);
+    SetFrom(&audio_jitter_buffer_max_packets,
+            change.audio_jitter_buffer_max_packets);
+    SetFrom(&audio_jitter_buffer_fast_accelerate,
+            change.audio_jitter_buffer_fast_accelerate);
+    SetFrom(&typing_detection, change.typing_detection);
+    SetFrom(&aecm_generate_comfort_noise, change.aecm_generate_comfort_noise);
+    SetFrom(&experimental_agc, change.experimental_agc);
+    SetFrom(&extended_filter_aec, change.extended_filter_aec);
+    SetFrom(&delay_agnostic_aec, change.delay_agnostic_aec);
+    SetFrom(&experimental_ns, change.experimental_ns);
+    SetFrom(&intelligibility_enhancer, change.intelligibility_enhancer);
+    SetFrom(&residual_echo_detector, change.residual_echo_detector);
+    SetFrom(&tx_agc_target_dbov, change.tx_agc_target_dbov);
+    SetFrom(&tx_agc_digital_compression_gain,
+            change.tx_agc_digital_compression_gain);
+    SetFrom(&tx_agc_limiter, change.tx_agc_limiter);
+    SetFrom(&combined_audio_video_bwe, change.combined_audio_video_bwe);
+    SetFrom(&audio_network_adaptor, change.audio_network_adaptor);
+    SetFrom(&audio_network_adaptor_config, change.audio_network_adaptor_config);
+  }
+
+  bool operator==(const AudioOptions& o) const {
+    return echo_cancellation == o.echo_cancellation &&
+#if defined(WEBRTC_IOS)
+           ios_force_software_aec_HACK == o.ios_force_software_aec_HACK &&
+#endif
+           auto_gain_control == o.auto_gain_control &&
+           noise_suppression == o.noise_suppression &&
+           highpass_filter == o.highpass_filter &&
+           stereo_swapping == o.stereo_swapping &&
+           audio_jitter_buffer_max_packets ==
+               o.audio_jitter_buffer_max_packets &&
+           audio_jitter_buffer_fast_accelerate ==
+               o.audio_jitter_buffer_fast_accelerate &&
+           typing_detection == o.typing_detection &&
+           aecm_generate_comfort_noise == o.aecm_generate_comfort_noise &&
+           experimental_agc == o.experimental_agc &&
+           extended_filter_aec == o.extended_filter_aec &&
+           delay_agnostic_aec == o.delay_agnostic_aec &&
+           experimental_ns == o.experimental_ns &&
+           intelligibility_enhancer == o.intelligibility_enhancer &&
+           residual_echo_detector == o.residual_echo_detector &&
+           tx_agc_target_dbov == o.tx_agc_target_dbov &&
+           tx_agc_digital_compression_gain ==
+               o.tx_agc_digital_compression_gain &&
+           tx_agc_limiter == o.tx_agc_limiter &&
+           combined_audio_video_bwe == o.combined_audio_video_bwe &&
+           audio_network_adaptor == o.audio_network_adaptor &&
+           audio_network_adaptor_config == o.audio_network_adaptor_config;
+  }
+  bool operator!=(const AudioOptions& o) const { return !(*this == o); }
+
+  std::string ToString() const {
+    std::ostringstream ost;
+    ost << "AudioOptions {";
+    ost << ToStringIfSet("aec", echo_cancellation);
+#if defined(WEBRTC_IOS)
+    ost << ToStringIfSet("ios_force_software_aec_HACK",
+                         ios_force_software_aec_HACK);
+#endif
+    ost << ToStringIfSet("agc", auto_gain_control);
+    ost << ToStringIfSet("ns", noise_suppression);
+    ost << ToStringIfSet("hf", highpass_filter);
+    ost << ToStringIfSet("swap", stereo_swapping);
+    ost << ToStringIfSet("audio_jitter_buffer_max_packets",
+                         audio_jitter_buffer_max_packets);
+    ost << ToStringIfSet("audio_jitter_buffer_fast_accelerate",
+                         audio_jitter_buffer_fast_accelerate);
+    ost << ToStringIfSet("typing", typing_detection);
+    ost << ToStringIfSet("comfort_noise", aecm_generate_comfort_noise);
+    ost << ToStringIfSet("experimental_agc", experimental_agc);
+    ost << ToStringIfSet("extended_filter_aec", extended_filter_aec);
+    ost << ToStringIfSet("delay_agnostic_aec", delay_agnostic_aec);
+    ost << ToStringIfSet("experimental_ns", experimental_ns);
+    ost << ToStringIfSet("intelligibility_enhancer", intelligibility_enhancer);
+    ost << ToStringIfSet("residual_echo_detector", residual_echo_detector);
+    ost << ToStringIfSet("tx_agc_target_dbov", tx_agc_target_dbov);
+    ost << ToStringIfSet("tx_agc_digital_compression_gain",
+        tx_agc_digital_compression_gain);
+    ost << ToStringIfSet("tx_agc_limiter", tx_agc_limiter);
+    ost << ToStringIfSet("combined_audio_video_bwe", combined_audio_video_bwe);
+    ost << ToStringIfSet("audio_network_adaptor", audio_network_adaptor);
+    // The adaptor config is a serialized proto buffer and therefore not human
+    // readable. So we comment out the following line.
+    // ost << ToStringIfSet("audio_network_adaptor_config",
+    //     audio_network_adaptor_config);
+    ost << "}";
+    return ost.str();
+  }
+
+  // Audio processing that attempts to filter away the output signal from
+  // later inbound pickup.
+  rtc::Optional<bool> echo_cancellation;
+#if defined(WEBRTC_IOS)
+  // Forces software echo cancellation on iOS. This is a temporary workaround
+  // (until Apple fixes the bug) for a device with non-functioning AEC. May
+  // improve performance on that particular device, but will cause unpredictable
+  // behavior in all other cases. See http://bugs.webrtc.org/8682.
+  rtc::Optional<bool> ios_force_software_aec_HACK;
+#endif
+  // Audio processing to adjust the sensitivity of the local mic dynamically.
+  rtc::Optional<bool> auto_gain_control;
+  // Audio processing to filter out background noise.
+  rtc::Optional<bool> noise_suppression;
+  // Audio processing to remove background noise of lower frequencies.
+  rtc::Optional<bool> highpass_filter;
+  // Audio processing to swap the left and right channels.
+  rtc::Optional<bool> stereo_swapping;
+  // Audio receiver jitter buffer (NetEq) max capacity in number of packets.
+  rtc::Optional<int> audio_jitter_buffer_max_packets;
+  // Audio receiver jitter buffer (NetEq) fast accelerate mode.
+  rtc::Optional<bool> audio_jitter_buffer_fast_accelerate;
+  // Audio processing to detect typing.
+  rtc::Optional<bool> typing_detection;
+  rtc::Optional<bool> aecm_generate_comfort_noise;
+  rtc::Optional<bool> experimental_agc;
+  rtc::Optional<bool> extended_filter_aec;
+  rtc::Optional<bool> delay_agnostic_aec;
+  rtc::Optional<bool> experimental_ns;
+  rtc::Optional<bool> intelligibility_enhancer;
+  // Note that tx_agc_* only applies to non-experimental AGC.
+  rtc::Optional<bool> residual_echo_detector;
+  rtc::Optional<uint16_t> tx_agc_target_dbov;
+  rtc::Optional<uint16_t> tx_agc_digital_compression_gain;
+  rtc::Optional<bool> tx_agc_limiter;
+  // Enable combined audio+bandwidth BWE.
+  // TODO(pthatcher): This flag is set from the
+  // "googCombinedAudioVideoBwe", but not used anywhere. So delete it,
+  // and check if any other AudioOptions members are unused.
+  rtc::Optional<bool> combined_audio_video_bwe;
+  // Enable audio network adaptor.
+  rtc::Optional<bool> audio_network_adaptor;
+  // Config string for audio network adaptor.
+  rtc::Optional<std::string> audio_network_adaptor_config;
+
+ private:
+  template <class T>
+  static std::string ToStringIfSet(const char* key,
+                                   const rtc::Optional<T>& val) {
+    std::string str;
+    if (val) {
+      str = key;
+      str += ": ";
+      str += val ? rtc::ToString(*val) : "";
+      str += ", ";
+    }
+    return str;
+  }
+
+  template <typename T>
+  static void SetFrom(rtc::Optional<T>* s, const rtc::Optional<T>& o) {
+    if (o) {
+      *s = o;
+    }
+  }
+};
+
+}  // namespace cricket
+
+#endif  // API_AUDIO_OPTIONS_H_
diff --git a/api/candidate.cc b/api/candidate.cc
new file mode 100644
index 0000000..62cd1bd
--- /dev/null
+++ b/api/candidate.cc
@@ -0,0 +1,123 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/candidate.h"
+
+namespace cricket {
+
+Candidate::Candidate()
+    : id_(rtc::CreateRandomString(8)),
+      component_(0),
+      priority_(0),
+      network_type_(rtc::ADAPTER_TYPE_UNKNOWN),
+      generation_(0),
+      network_id_(0),
+      network_cost_(0) {}
+
+Candidate::Candidate(int component,
+                     const std::string& protocol,
+                     const rtc::SocketAddress& address,
+                     uint32_t priority,
+                     const std::string& username,
+                     const std::string& password,
+                     const std::string& type,
+                     uint32_t generation,
+                     const std::string& foundation,
+                     uint16_t network_id,
+                     uint16_t network_cost)
+    : id_(rtc::CreateRandomString(8)),
+      component_(component),
+      protocol_(protocol),
+      address_(address),
+      priority_(priority),
+      username_(username),
+      password_(password),
+      type_(type),
+      network_type_(rtc::ADAPTER_TYPE_UNKNOWN),
+      generation_(generation),
+      foundation_(foundation),
+      network_id_(network_id),
+      network_cost_(network_cost) {}
+
+Candidate::Candidate(const Candidate&) = default;
+
+Candidate::~Candidate() = default;
+
+bool Candidate::IsEquivalent(const Candidate& c) const {
+  // We ignore the network name, since that is just debug information, and
+  // the priority and the network cost, since they should be the same if the
+  // rest are.
+  return (component_ == c.component_) && (protocol_ == c.protocol_) &&
+         (address_ == c.address_) && (username_ == c.username_) &&
+         (password_ == c.password_) && (type_ == c.type_) &&
+         (generation_ == c.generation_) && (foundation_ == c.foundation_) &&
+         (related_address_ == c.related_address_) &&
+         (network_id_ == c.network_id_);
+}
+
+bool Candidate::MatchesForRemoval(const Candidate& c) const {
+  return component_ == c.component_ && protocol_ == c.protocol_ &&
+         address_ == c.address_;
+}
+
+std::string Candidate::ToStringInternal(bool sensitive) const {
+  std::ostringstream ost;
+  std::string address =
+      sensitive ? address_.ToSensitiveString() : address_.ToString();
+  ost << "Cand[" << transport_name_ << ":" << foundation_ << ":" << component_
+      << ":" << protocol_ << ":" << priority_ << ":" << address << ":" << type_
+      << ":" << related_address_ << ":" << username_ << ":" << password_ << ":"
+      << network_id_ << ":" << network_cost_ << ":" << generation_ << "]";
+  return ost.str();
+}
+
+uint32_t Candidate::GetPriority(uint32_t type_preference,
+                                int network_adapter_preference,
+                                int relay_preference) const {
+  // RFC 5245 - 4.1.2.1.
+  // priority = (2^24)*(type preference) +
+  //            (2^8)*(local preference) +
+  //            (2^0)*(256 - component ID)
+
+  // |local_preference| length is 2 bytes, 0-65535 inclusive.
+  // In our implemenation we will partion local_preference into
+  //              0                 1
+  //       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+  //      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  //      |  NIC Pref     |    Addr Pref  |
+  //      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  // NIC Type - Type of the network adapter e.g. 3G/Wifi/Wired.
+  // Addr Pref - Address preference value as per RFC 3484.
+  // local preference =  (NIC Type << 8 | Addr_Pref) - relay preference.
+
+  int addr_pref = IPAddressPrecedence(address_.ipaddr());
+  int local_preference =
+      ((network_adapter_preference << 8) | addr_pref) + relay_preference;
+
+  return (type_preference << 24) | (local_preference << 8) | (256 - component_);
+}
+
+bool Candidate::operator==(const Candidate& o) const {
+  return id_ == o.id_ && component_ == o.component_ &&
+         protocol_ == o.protocol_ && relay_protocol_ == o.relay_protocol_ &&
+         address_ == o.address_ && priority_ == o.priority_ &&
+         username_ == o.username_ && password_ == o.password_ &&
+         type_ == o.type_ && network_name_ == o.network_name_ &&
+         network_type_ == o.network_type_ && generation_ == o.generation_ &&
+         foundation_ == o.foundation_ &&
+         related_address_ == o.related_address_ && tcptype_ == o.tcptype_ &&
+         transport_name_ == o.transport_name_ && network_id_ == o.network_id_;
+}
+
+bool Candidate::operator!=(const Candidate& o) const {
+  return !(*this == o);
+}
+
+}  // namespace cricket
diff --git a/api/candidate.h b/api/candidate.h
new file mode 100644
index 0000000..a1f45c2
--- /dev/null
+++ b/api/candidate.h
@@ -0,0 +1,208 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_CANDIDATE_H_
+#define API_CANDIDATE_H_
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/helpers.h"
+#include "rtc_base/network_constants.h"
+#include "rtc_base/socketaddress.h"
+
+namespace cricket {
+
+// Candidate for ICE based connection discovery.
+// TODO(phoglund): remove things in here that are not needed in the public API.
+
+class Candidate {
+ public:
+  Candidate();
+  // TODO(pthatcher): Match the ordering and param list as per RFC 5245
+  // candidate-attribute syntax. http://tools.ietf.org/html/rfc5245#section-15.1
+  Candidate(int component,
+            const std::string& protocol,
+            const rtc::SocketAddress& address,
+            uint32_t priority,
+            const std::string& username,
+            const std::string& password,
+            const std::string& type,
+            uint32_t generation,
+            const std::string& foundation,
+            uint16_t network_id = 0,
+            uint16_t network_cost = 0);
+  Candidate(const Candidate&);
+  ~Candidate();
+
+  const std::string & id() const { return id_; }
+  void set_id(const std::string & id) { id_ = id; }
+
+  int component() const { return component_; }
+  void set_component(int component) { component_ = component; }
+
+  const std::string & protocol() const { return protocol_; }
+  void set_protocol(const std::string & protocol) { protocol_ = protocol; }
+
+  // The protocol used to talk to relay.
+  const std::string& relay_protocol() const { return relay_protocol_; }
+  void set_relay_protocol(const std::string& protocol) {
+    relay_protocol_ = protocol;
+  }
+
+  const rtc::SocketAddress & address() const { return address_; }
+  void set_address(const rtc::SocketAddress & address) {
+    address_ = address;
+  }
+
+  uint32_t priority() const { return priority_; }
+  void set_priority(const uint32_t priority) { priority_ = priority; }
+
+  // TODO(pthatcher): Remove once Chromium's jingle/glue/utils.cc
+  // doesn't use it.
+  // Maps old preference (which was 0.0-1.0) to match priority (which
+  // is 0-2^32-1) to to match RFC 5245, section 4.1.2.1.  Also see
+  // https://docs.google.com/a/google.com/document/d/
+  // 1iNQDiwDKMh0NQOrCqbj3DKKRT0Dn5_5UJYhmZO-t7Uc/edit
+  float preference() const {
+    // The preference value is clamped to two decimal precision.
+    return static_cast<float>(((priority_ >> 24) * 100 / 127) / 100.0);
+  }
+
+  // TODO(pthatcher): Remove once Chromium's jingle/glue/utils.cc
+  // doesn't use it.
+  void set_preference(float preference) {
+    // Limiting priority to UINT_MAX when value exceeds uint32_t max.
+    // This can happen for e.g. when preference = 3.
+    uint64_t prio_val = static_cast<uint64_t>(preference * 127) << 24;
+    priority_ = static_cast<uint32_t>(
+        std::min(prio_val, static_cast<uint64_t>(UINT_MAX)));
+  }
+
+  // TODO(honghaiz): Change to usernameFragment or ufrag.
+  const std::string & username() const { return username_; }
+  void set_username(const std::string & username) { username_ = username; }
+
+  const std::string & password() const { return password_; }
+  void set_password(const std::string & password) { password_ = password; }
+
+  const std::string & type() const { return type_; }
+  void set_type(const std::string & type) { type_ = type; }
+
+  const std::string & network_name() const { return network_name_; }
+  void set_network_name(const std::string & network_name) {
+    network_name_ = network_name;
+  }
+
+  rtc::AdapterType network_type() const { return network_type_; }
+  void set_network_type(rtc::AdapterType network_type) {
+    network_type_ = network_type;
+  }
+
+  // Candidates in a new generation replace those in the old generation.
+  uint32_t generation() const { return generation_; }
+  void set_generation(uint32_t generation) { generation_ = generation; }
+
+  // |network_cost| measures the cost/penalty of using this candidate. A network
+  // cost of 0 indicates this candidate can be used freely. A value of
+  // rtc::kNetworkCostMax indicates it should be used only as the last resort.
+  void set_network_cost(uint16_t network_cost) {
+    RTC_DCHECK_LE(network_cost, rtc::kNetworkCostMax);
+    network_cost_ = network_cost;
+  }
+  uint16_t network_cost() const { return network_cost_; }
+
+  // An ID assigned to the network hosting the candidate.
+  uint16_t network_id() const { return network_id_; }
+  void set_network_id(uint16_t network_id) { network_id_ = network_id; }
+
+  const std::string& foundation() const {
+    return foundation_;
+  }
+  void set_foundation(const std::string& foundation) {
+    foundation_ = foundation;
+  }
+
+  const rtc::SocketAddress & related_address() const {
+    return related_address_;
+  }
+  void set_related_address(
+      const rtc::SocketAddress & related_address) {
+    related_address_ = related_address;
+  }
+  const std::string& tcptype() const { return tcptype_; }
+  void set_tcptype(const std::string& tcptype) {
+    tcptype_ = tcptype;
+  }
+
+  // The name of the transport channel of this candidate.
+  // TODO(phoglund): remove.
+  const std::string& transport_name() const { return transport_name_; }
+  void set_transport_name(const std::string& transport_name) {
+    transport_name_ = transport_name;
+  }
+
+  // The URL of the ICE server which this candidate is gathered from.
+  const std::string& url() const { return url_; }
+  void set_url(const std::string& url) { url_ = url; }
+
+  // Determines whether this candidate is equivalent to the given one.
+  bool IsEquivalent(const Candidate& c) const;
+
+  // Determines whether this candidate can be considered equivalent to the
+  // given one when looking for a matching candidate to remove.
+  bool MatchesForRemoval(const Candidate& c) const;
+
+  std::string ToString() const {
+    return ToStringInternal(false);
+  }
+
+  std::string ToSensitiveString() const {
+    return ToStringInternal(true);
+  }
+
+  uint32_t GetPriority(uint32_t type_preference,
+                       int network_adapter_preference,
+                       int relay_preference) const;
+
+  bool operator==(const Candidate& o) const;
+  bool operator!=(const Candidate& o) const;
+
+ private:
+  std::string ToStringInternal(bool sensitive) const;
+
+  std::string id_;
+  int component_;
+  std::string protocol_;
+  std::string relay_protocol_;
+  rtc::SocketAddress address_;
+  uint32_t priority_;
+  std::string username_;
+  std::string password_;
+  std::string type_;
+  std::string network_name_;
+  rtc::AdapterType network_type_;
+  uint32_t generation_;
+  std::string foundation_;
+  rtc::SocketAddress related_address_;
+  std::string tcptype_;
+  std::string transport_name_;
+  uint16_t network_id_;
+  uint16_t network_cost_;
+  std::string url_;
+};
+
+}  // namespace cricket
+
+#endif  // API_CANDIDATE_H_
diff --git a/api/cryptoparams.h b/api/cryptoparams.h
new file mode 100644
index 0000000..2350528
--- /dev/null
+++ b/api/cryptoparams.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_CRYPTOPARAMS_H_
+#define API_CRYPTOPARAMS_H_
+
+#include <string>
+
+namespace cricket {
+
+// Parameters for SRTP negotiation, as described in RFC 4568.
+struct CryptoParams {
+  CryptoParams() : tag(0) {}
+  CryptoParams(int t,
+               const std::string& cs,
+               const std::string& kp,
+               const std::string& sp)
+      : tag(t), cipher_suite(cs), key_params(kp), session_params(sp) {}
+
+  bool Matches(const CryptoParams& params) const {
+    return (tag == params.tag && cipher_suite == params.cipher_suite);
+  }
+
+  int tag;
+  std::string cipher_suite;
+  std::string key_params;
+  std::string session_params;
+};
+
+}  // namespace cricket
+
+#endif  // API_CRYPTOPARAMS_H_
diff --git a/api/datachannelinterface.h b/api/datachannelinterface.h
new file mode 100644
index 0000000..4ab7efb
--- /dev/null
+++ b/api/datachannelinterface.h
@@ -0,0 +1,183 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains interfaces for DataChannels
+// http://dev.w3.org/2011/webrtc/editor/webrtc.html#rtcdatachannel
+
+#ifndef API_DATACHANNELINTERFACE_H_
+#define API_DATACHANNELINTERFACE_H_
+
+#include <string>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/copyonwritebuffer.h"
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+
+// C++ version of: https://www.w3.org/TR/webrtc/#idl-def-rtcdatachannelinit
+// TODO(deadbeef): Use rtc::Optional for the "-1 if unset" things.
+struct DataChannelInit {
+  // Deprecated. Reliability is assumed, and channel will be unreliable if
+  // maxRetransmitTime or MaxRetransmits is set.
+  bool reliable = false;
+
+  // True if ordered delivery is required.
+  bool ordered = true;
+
+  // The max period of time in milliseconds in which retransmissions will be
+  // sent. After this time, no more retransmissions will be sent. -1 if unset.
+  //
+  // Cannot be set along with |maxRetransmits|.
+  int maxRetransmitTime = -1;
+
+  // The max number of retransmissions. -1 if unset.
+  //
+  // Cannot be set along with |maxRetransmitTime|.
+  int maxRetransmits = -1;
+
+  // This is set by the application and opaque to the WebRTC implementation.
+  std::string protocol;
+
+  // True if the channel has been externally negotiated and we do not send an
+  // in-band signalling in the form of an "open" message. If this is true, |id|
+  // below must be set; otherwise it should be unset and will be negotiated
+  // in-band.
+  bool negotiated = false;
+
+  // The stream id, or SID, for SCTP data channels. -1 if unset (see above).
+  int id = -1;
+};
+
+// At the JavaScript level, data can be passed in as a string or a blob, so
+// this structure's |binary| flag tells whether the data should be interpreted
+// as binary or text.
+struct DataBuffer {
+  DataBuffer(const rtc::CopyOnWriteBuffer& data, bool binary)
+      : data(data),
+        binary(binary) {
+  }
+  // For convenience for unit tests.
+  explicit DataBuffer(const std::string& text)
+      : data(text.data(), text.length()),
+        binary(false) {
+  }
+  size_t size() const { return data.size(); }
+
+  rtc::CopyOnWriteBuffer data;
+  // Indicates if the received data contains UTF-8 or binary data.
+  // Note that the upper layers are left to verify the UTF-8 encoding.
+  // TODO(jiayl): prefer to use an enum instead of a bool.
+  bool binary;
+};
+
+// Used to implement RTCDataChannel events.
+//
+// The code responding to these callbacks should unwind the stack before
+// using any other webrtc APIs; re-entrancy is not supported.
+class DataChannelObserver {
+ public:
+  // The data channel state have changed.
+  virtual void OnStateChange() = 0;
+  //  A data buffer was successfully received.
+  virtual void OnMessage(const DataBuffer& buffer) = 0;
+  // The data channel's buffered_amount has changed.
+  virtual void OnBufferedAmountChange(uint64_t previous_amount) {}
+
+ protected:
+  virtual ~DataChannelObserver() {}
+};
+
+class DataChannelInterface : public rtc::RefCountInterface {
+ public:
+  // C++ version of: https://www.w3.org/TR/webrtc/#idl-def-rtcdatachannelstate
+  // Unlikely to change, but keep in sync with DataChannel.java:State and
+  // RTCDataChannel.h:RTCDataChannelState.
+  enum DataState {
+    kConnecting,
+    kOpen,  // The DataChannel is ready to send data.
+    kClosing,
+    kClosed
+  };
+
+  static const char* DataStateString(DataState state) {
+    switch (state) {
+      case kConnecting:
+        return "connecting";
+      case kOpen:
+        return "open";
+      case kClosing:
+        return "closing";
+      case kClosed:
+        return "closed";
+    }
+    RTC_CHECK(false) << "Unknown DataChannel state: " << state;
+    return "";
+  }
+
+  // Used to receive events from the data channel. Only one observer can be
+  // registered at a time. UnregisterObserver should be called before the
+  // observer object is destroyed.
+  virtual void RegisterObserver(DataChannelObserver* observer) = 0;
+  virtual void UnregisterObserver() = 0;
+
+  // The label attribute represents a label that can be used to distinguish this
+  // DataChannel object from other DataChannel objects.
+  virtual std::string label() const = 0;
+
+  // The accessors below simply return the properties from the DataChannelInit
+  // the data channel was constructed with.
+  virtual bool reliable() const = 0;
+  // TODO(deadbeef): Remove these dummy implementations when all classes have
+  // implemented these APIs. They should all just return the values the
+  // DataChannel was created with.
+  virtual bool ordered() const { return false; }
+  virtual uint16_t maxRetransmitTime() const { return 0; }
+  virtual uint16_t maxRetransmits() const { return 0; }
+  virtual std::string protocol() const { return std::string(); }
+  virtual bool negotiated() const { return false; }
+
+  // Returns the ID from the DataChannelInit, if it was negotiated out-of-band.
+  // If negotiated in-band, this ID will be populated once the DTLS role is
+  // determined, and until then this will return -1.
+  virtual int id() const = 0;
+  virtual DataState state() const = 0;
+  virtual uint32_t messages_sent() const = 0;
+  virtual uint64_t bytes_sent() const = 0;
+  virtual uint32_t messages_received() const = 0;
+  virtual uint64_t bytes_received() const = 0;
+
+  // Returns the number of bytes of application data (UTF-8 text and binary
+  // data) that have been queued using Send but have not yet been processed at
+  // the SCTP level. See comment above Send below.
+  virtual uint64_t buffered_amount() const = 0;
+
+  // Begins the graceful data channel closing procedure. See:
+  // https://tools.ietf.org/html/draft-ietf-rtcweb-data-channel-13#section-6.7
+  virtual void Close() = 0;
+
+  // Sends |data| to the remote peer. If the data can't be sent at the SCTP
+  // level (due to congestion control), it's buffered at the data channel level,
+  // up to a maximum of 16MB. If Send is called while this buffer is full, the
+  // data channel will be closed abruptly.
+  //
+  // So, it's important to use buffered_amount() and OnBufferedAmountChange to
+  // ensure the data channel is used efficiently but without filling this
+  // buffer.
+  virtual bool Send(const DataBuffer& buffer) = 0;
+
+ protected:
+  virtual ~DataChannelInterface() {}
+};
+
+}  // namespace webrtc
+
+#endif  // API_DATACHANNELINTERFACE_H_
diff --git a/api/dtmfsenderinterface.h b/api/dtmfsenderinterface.h
new file mode 100644
index 0000000..8f0ab71
--- /dev/null
+++ b/api/dtmfsenderinterface.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_DTMFSENDERINTERFACE_H_
+#define API_DTMFSENDERINTERFACE_H_
+
+#include <string>
+
+#include "api/mediastreaminterface.h"
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+
+// DtmfSender callback interface, used to implement RTCDtmfSender events.
+// Applications should implement this interface to get notifications from the
+// DtmfSender.
+class DtmfSenderObserverInterface {
+ public:
+  // Triggered when DTMF |tone| is sent.
+  // If |tone| is empty that means the DtmfSender has sent out all the given
+  // tones.
+  virtual void OnToneChange(const std::string& tone) = 0;
+
+ protected:
+  virtual ~DtmfSenderObserverInterface() {}
+};
+
+// The interface of native implementation of the RTCDTMFSender defined by the
+// WebRTC W3C Editor's Draft.
+// See: https://www.w3.org/TR/webrtc/#peer-to-peer-dtmf
+class DtmfSenderInterface : public rtc::RefCountInterface {
+ public:
+  // Used to receive events from the DTMF sender. Only one observer can be
+  // registered at a time. UnregisterObserver should be called before the
+  // observer object is destroyed.
+  virtual void RegisterObserver(DtmfSenderObserverInterface* observer) = 0;
+  virtual void UnregisterObserver() = 0;
+
+  // Returns true if this DtmfSender is capable of sending DTMF. Otherwise
+  // returns false. To be able to send DTMF, the associated RtpSender must be
+  // able to send packets, and a "telephone-event" codec must be negotiated.
+  virtual bool CanInsertDtmf() = 0;
+
+  // Queues a task that sends the DTMF |tones|. The |tones| parameter is treated
+  // as a series of characters. The characters 0 through 9, A through D, #, and
+  // * generate the associated DTMF tones. The characters a to d are equivalent
+  // to A to D. The character ',' indicates a delay of 2 seconds before
+  // processing the next character in the tones parameter.
+  //
+  // Unrecognized characters are ignored.
+  //
+  // The |duration| parameter indicates the duration in ms to use for each
+  // character passed in the |tones| parameter. The duration cannot be more
+  // than 6000 or less than 70.
+  //
+  // The |inter_tone_gap| parameter indicates the gap between tones in ms. The
+  // |inter_tone_gap| must be at least 50 ms but should be as short as
+  // possible.
+  //
+  // If InsertDtmf is called on the same object while an existing task for this
+  // object to generate DTMF is still running, the previous task is canceled.
+  // Returns true on success and false on failure.
+  virtual bool InsertDtmf(const std::string& tones, int duration,
+                          int inter_tone_gap) = 0;
+
+  // Returns the track given as argument to the constructor. Only exists for
+  // backwards compatibilty; now that DtmfSenders are tied to RtpSenders, it's
+  // no longer relevant.
+  virtual const AudioTrackInterface* track() const = 0;
+
+  // Returns the tones remaining to be played out.
+  virtual std::string tones() const = 0;
+
+  // Returns the current tone duration value in ms.
+  // This value will be the value last set via the InsertDtmf() method, or the
+  // default value of 100 ms if InsertDtmf() was never called.
+  virtual int duration() const = 0;
+
+  // Returns the current value of the between-tone gap in ms.
+  // This value will be the value last set via the InsertDtmf() method, or the
+  // default value of 50 ms if InsertDtmf() was never called.
+  virtual int inter_tone_gap() const = 0;
+
+ protected:
+  virtual ~DtmfSenderInterface() {}
+};
+
+}  // namespace webrtc
+
+#endif  // API_DTMFSENDERINTERFACE_H_
diff --git a/api/fakemetricsobserver.cc b/api/fakemetricsobserver.cc
new file mode 100644
index 0000000..beb30c2
--- /dev/null
+++ b/api/fakemetricsobserver.cc
@@ -0,0 +1,87 @@
+/*
+ *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/fakemetricsobserver.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FakeMetricsObserver::FakeMetricsObserver() {
+  Reset();
+}
+
+void FakeMetricsObserver::Reset() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  counters_.clear();
+  memset(histogram_samples_, 0, sizeof(histogram_samples_));
+}
+
+void FakeMetricsObserver::IncrementEnumCounter(
+    PeerConnectionEnumCounterType type,
+    int counter,
+    int counter_max) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (counters_.size() <= static_cast<size_t>(type)) {
+    counters_.resize(type + 1);
+  }
+  auto& counters = counters_[type];
+  ++counters[counter];
+}
+
+void FakeMetricsObserver::AddHistogramSample(PeerConnectionMetricsName type,
+    int value) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_EQ(histogram_samples_[type], 0);
+  histogram_samples_[type] = value;
+}
+
+int FakeMetricsObserver::GetEnumCounter(PeerConnectionEnumCounterType type,
+                                        int counter) const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (counters_.size() <= static_cast<size_t>(type)) {
+    return 0;
+  }
+  const auto& it = counters_[type].find(counter);
+  if (it == counters_[type].end()) {
+    return 0;
+  }
+  return it->second;
+}
+
+int FakeMetricsObserver::GetHistogramSample(
+    PeerConnectionMetricsName type) const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  return histogram_samples_[type];
+}
+
+bool FakeMetricsObserver::ExpectOnlySingleEnumCount(
+    PeerConnectionEnumCounterType type,
+    int counter) const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (counters_.size() <= static_cast<size_t>(type)) {
+    // If a counter has not been allocated then there has been no call to
+    // |IncrementEnumCounter| so all the values are 0.
+    return false;
+  }
+  bool pass = true;
+  if (GetEnumCounter(type, counter) != 1) {
+    RTC_LOG(LS_ERROR) << "Expected single count for counter: " << counter;
+    pass = false;
+  }
+  for (const auto& entry : counters_[type]) {
+    if (entry.first != counter && entry.second > 0) {
+      RTC_LOG(LS_ERROR) << "Expected no count for counter: " << entry.first;
+      pass = false;
+    }
+  }
+  return pass;
+}
+
+}  // namespace webrtc
diff --git a/api/fakemetricsobserver.h b/api/fakemetricsobserver.h
new file mode 100644
index 0000000..3adc5a6
--- /dev/null
+++ b/api/fakemetricsobserver.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_FAKEMETRICSOBSERVER_H_
+#define API_FAKEMETRICSOBSERVER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "api/peerconnectioninterface.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class FakeMetricsObserver : public MetricsObserverInterface {
+ public:
+  FakeMetricsObserver();
+  void Reset();
+
+  void IncrementEnumCounter(PeerConnectionEnumCounterType,
+                            int counter,
+                            int counter_max) override;
+  void AddHistogramSample(PeerConnectionMetricsName type,
+                          int value) override;
+
+  // Accessors to be used by the tests.
+  int GetEnumCounter(PeerConnectionEnumCounterType type, int counter) const;
+  int GetHistogramSample(PeerConnectionMetricsName type) const;
+
+  // Returns true if and only if there is a count of 1 for the given counter and
+  // a count of 0 for all other counters of the given enum type.
+  bool ExpectOnlySingleEnumCount(PeerConnectionEnumCounterType type,
+                                 int counter) const;
+
+ protected:
+  ~FakeMetricsObserver() {}
+
+ private:
+  rtc::ThreadChecker thread_checker_;
+  // The vector contains maps for each counter type. In the map, it's a mapping
+  // from individual counter to its count, such that it's memory efficient when
+  // comes to sparse enum types, like the SSL ciphers in the IANA registry.
+  std::vector<std::map<int, int>> counters_;
+  int histogram_samples_[kPeerConnectionMetricsName_Max];
+};
+
+}  // namespace webrtc
+
+#endif  // API_FAKEMETRICSOBSERVER_H_
diff --git a/api/fec_controller.h b/api/fec_controller.h
new file mode 100644
index 0000000..59e86cc
--- /dev/null
+++ b/api/fec_controller.h
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_FEC_CONTROLLER_H_
+#define API_FEC_CONTROLLER_H_
+
+#include <memory>
+#include <vector>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/include/module_fec_types.h"
+
+namespace webrtc {
+// TODO(yinwa): work in progress. API in class FecController should not be
+// used by other users until this comment is removed.
+
+// Callback class used for telling the user about how to configure the FEC,
+// and the rates sent the last second is returned to the VCM.
+class VCMProtectionCallback {
+ public:
+  virtual int ProtectionRequest(const FecProtectionParams* delta_params,
+                                const FecProtectionParams* key_params,
+                                uint32_t* sent_video_rate_bps,
+                                uint32_t* sent_nack_rate_bps,
+                                uint32_t* sent_fec_rate_bps) = 0;
+
+ protected:
+  virtual ~VCMProtectionCallback() {}
+};
+
+// FecController calculates how much of the allocated network
+// capacity that can be used by an encoder and how much that
+// is needed for redundant packets such as FEC and NACK. It uses an
+// implementation of |VCMProtectionCallback| to set new FEC parameters and get
+// the bitrate currently used for FEC and NACK.
+// Usage:
+// Setup by calling SetProtectionMethod and SetEncodingData.
+// For each encoded image, call UpdateWithEncodedData.
+// Each time the bandwidth estimate change, call UpdateFecRates. UpdateFecRates
+// will return the bitrate that can be used by an encoder.
+// A lock is used to protect internal states, so methods can be called on an
+// arbitrary thread.
+class FecController {
+ public:
+  virtual ~FecController() {}
+
+  virtual void SetProtectionCallback(
+      VCMProtectionCallback* protection_callback) = 0;
+  virtual void SetProtectionMethod(bool enable_fec, bool enable_nack) = 0;
+
+  // Informs loss protectoin logic of initial encoding state.
+  virtual void SetEncodingData(size_t width,
+                               size_t height,
+                               size_t num_temporal_layers,
+                               size_t max_payload_size) = 0;
+
+  // Returns target rate for the encoder given the channel parameters.
+  // Inputs:  estimated_bitrate_bps - the estimated network bitrate in bits/s.
+  //          actual_framerate - encoder frame rate.
+  //          fraction_lost - packet loss rate in % in the network.
+  //          loss_mask_vector - packet loss mask since last time this method
+  //          was called. round_trip_time_ms - round trip time in milliseconds.
+  virtual uint32_t UpdateFecRates(uint32_t estimated_bitrate_bps,
+                                  int actual_framerate,
+                                  uint8_t fraction_lost,
+                                  std::vector<bool> loss_mask_vector,
+                                  int64_t round_trip_time_ms) = 0;
+
+  // Informs of encoded output.
+  virtual void UpdateWithEncodedData(size_t encoded_image_length,
+                                     FrameType encoded_image_frametype) = 0;
+
+  // Returns whether this FEC Controller needs Loss Vector Mask as input.
+  virtual bool UseLossVectorMask() = 0;
+};
+
+class FecControllerFactoryInterface {
+ public:
+  virtual std::unique_ptr<FecController> CreateFecController() = 0;
+  virtual ~FecControllerFactoryInterface() = default;
+};
+
+}  // namespace webrtc
+#endif  // API_FEC_CONTROLLER_H_
diff --git a/api/jsep.cc b/api/jsep.cc
new file mode 100644
index 0000000..1f4afba
--- /dev/null
+++ b/api/jsep.cc
@@ -0,0 +1,24 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/jsep.h"
+
+namespace webrtc {
+
+std::string IceCandidateInterface::server_url() const {
+  return "";
+}
+
+size_t SessionDescriptionInterface::RemoveCandidates(
+    const std::vector<cricket::Candidate>& candidates) {
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/api/jsep.h b/api/jsep.h
new file mode 100644
index 0000000..8fd2dac
--- /dev/null
+++ b/api/jsep.h
@@ -0,0 +1,238 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains declarations of interfaces that wrap SDP-related
+// constructs; session descriptions and ICE candidates. The inner "cricket::"
+// objects shouldn't be accessed directly; the intention is that an application
+// using the PeerConnection API only creates these objects from strings, and
+// them passes them into the PeerConnection.
+//
+// Though in the future, we're planning to provide an SDP parsing API, with a
+// structure more friendly than cricket::SessionDescription.
+
+#ifndef API_JSEP_H_
+#define API_JSEP_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "api/rtcerror.h"
+#include "rtc_base/refcount.h"
+
+namespace cricket {
+class Candidate;
+class SessionDescription;
+}  // namespace cricket
+
+namespace webrtc {
+
+struct SdpParseError {
+ public:
+  // The sdp line that causes the error.
+  std::string line;
+  // Explains the error.
+  std::string description;
+};
+
+// Class representation of an ICE candidate.
+//
+// An instance of this interface is supposed to be owned by one class at
+// a time and is therefore not expected to be thread safe.
+//
+// An instance can be created by CreateIceCandidate.
+class IceCandidateInterface {
+ public:
+  virtual ~IceCandidateInterface() {}
+  // If present, this is the value of the "a=mid" attribute of the candidate's
+  // m= section in SDP, which identifies the m= section.
+  virtual std::string sdp_mid() const = 0;
+  // This indicates the index (starting at zero) of m= section this candidate
+  // is associated with. Needed when an endpoint doesn't support MIDs.
+  virtual int sdp_mline_index() const = 0;
+  // Only for use internally.
+  virtual const cricket::Candidate& candidate() const = 0;
+  // The URL of the ICE server which this candidate was gathered from.
+  // TODO(zhihuang): Remove the default implementation once the subclasses
+  // implement this method.
+  virtual std::string server_url() const;
+  // Creates a SDP-ized form of this candidate.
+  virtual bool ToString(std::string* out) const = 0;
+};
+
+// Creates a IceCandidateInterface based on SDP string.
+// Returns null if the sdp string can't be parsed.
+// |error| may be null.
+IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid,
+                                          int sdp_mline_index,
+                                          const std::string& sdp,
+                                          SdpParseError* error);
+
+// This class represents a collection of candidates for a specific m= section.
+// Used in SessionDescriptionInterface.
+class IceCandidateCollection {
+ public:
+  virtual ~IceCandidateCollection() {}
+  virtual size_t count() const = 0;
+  // Returns true if an equivalent |candidate| exist in the collection.
+  virtual bool HasCandidate(const IceCandidateInterface* candidate) const = 0;
+  virtual const IceCandidateInterface* at(size_t index) const = 0;
+};
+
+// Enum that describes the type of the SessionDescriptionInterface.
+// Corresponds to RTCSdpType in the WebRTC specification.
+// https://w3c.github.io/webrtc-pc/#dom-rtcsdptype
+enum class SdpType {
+  kOffer,     // Description must be treated as an SDP offer.
+  kPrAnswer,  // Description must be treated as an SDP answer, but not a final
+              // answer.
+  kAnswer  // Description must be treated as an SDP final answer, and the offer-
+           // answer exchange must be considered complete after receiving this.
+};
+
+// Returns the string form of the given SDP type. String forms are defined in
+// SessionDescriptionInterface.
+const char* SdpTypeToString(SdpType type);
+
+// Returns the SdpType from its string form. The string form can be one of the
+// constants defined in SessionDescriptionInterface. Passing in any other string
+// results in nullopt.
+rtc::Optional<SdpType> SdpTypeFromString(const std::string& type_str);
+
+// Class representation of an SDP session description.
+//
+// An instance of this interface is supposed to be owned by one class at a time
+// and is therefore not expected to be thread safe.
+//
+// An instance can be created by CreateSessionDescription.
+class SessionDescriptionInterface {
+ public:
+  // String representations of the supported SDP types.
+  static const char kOffer[];
+  static const char kPrAnswer[];
+  static const char kAnswer[];
+
+  virtual ~SessionDescriptionInterface() {}
+
+  // Only for use internally.
+  virtual cricket::SessionDescription* description() = 0;
+  virtual const cricket::SessionDescription* description() const = 0;
+
+  // Get the session id and session version, which are defined based on
+  // RFC 4566 for the SDP o= line.
+  virtual std::string session_id() const = 0;
+  virtual std::string session_version() const = 0;
+
+  // Returns the type of this session description as an SdpType. Descriptions of
+  // the various types are found in the SdpType documentation.
+  // TODO(steveanton): Remove default implementation once Chromium has been
+  // updated.
+  virtual SdpType GetType() const;
+
+  // kOffer/kPrAnswer/kAnswer
+  // TODO(steveanton): Remove this in favor of |GetType| that returns SdpType.
+  virtual std::string type() const = 0;
+
+  // Adds the specified candidate to the description.
+  //
+  // Ownership is not transferred.
+  //
+  // Returns false if the session description does not have a media section
+  // that corresponds to |candidate.sdp_mid()| or
+  // |candidate.sdp_mline_index()|.
+  virtual bool AddCandidate(const IceCandidateInterface* candidate) = 0;
+
+  // Removes the candidates from the description, if found.
+  //
+  // Returns the number of candidates removed.
+  virtual size_t RemoveCandidates(
+      const std::vector<cricket::Candidate>& candidates);
+
+  // Returns the number of m= sections in the session description.
+  virtual size_t number_of_mediasections() const = 0;
+
+  // Returns a collection of all candidates that belong to a certain m=
+  // section.
+  virtual const IceCandidateCollection* candidates(
+      size_t mediasection_index) const = 0;
+
+  // Serializes the description to SDP.
+  virtual bool ToString(std::string* out) const = 0;
+};
+
+// Creates a SessionDescriptionInterface based on the SDP string and the type.
+// Returns null if the sdp string can't be parsed or the type is unsupported.
+// |error| may be null.
+// TODO(steveanton): This function is deprecated. Please use the functions below
+// which take an SdpType enum instead. Remove this once it is no longer used.
+SessionDescriptionInterface* CreateSessionDescription(const std::string& type,
+                                                      const std::string& sdp,
+                                                      SdpParseError* error);
+
+// Creates a SessionDescriptionInterface based on the SDP string and the type.
+// Returns null if the SDP string cannot be parsed.
+// If using the signature with |error_out|, details of the parsing error may be
+// written to |error_out| if it is not null.
+std::unique_ptr<SessionDescriptionInterface> CreateSessionDescription(
+    SdpType type,
+    const std::string& sdp);
+std::unique_ptr<SessionDescriptionInterface> CreateSessionDescription(
+    SdpType type,
+    const std::string& sdp,
+    SdpParseError* error_out);
+
+// CreateOffer and CreateAnswer callback interface.
+class CreateSessionDescriptionObserver : public rtc::RefCountInterface {
+ public:
+  // This callback transfers the ownership of the |desc|.
+  // TODO(deadbeef): Make this take an std::unique_ptr<> to avoid confusion
+  // around ownership.
+  virtual void OnSuccess(SessionDescriptionInterface* desc) = 0;
+  // The OnFailure callback takes an RTCError, which consists of an
+  // error code and a string.
+  // RTCError is non-copyable, so it must be passed using std::move.
+  // Earlier versions of the API used a string argument. This version
+  // is deprecated; in order to let clients remove the old version, it has a
+  // default implementation. If both versions are unimplemented, the
+  // result will be a runtime error (stack overflow). This is intentional.
+  virtual void OnFailure(RTCError error) {
+    OnFailure(error.message());
+  }
+  virtual void OnFailure(const std::string& error) {
+    OnFailure(RTCError(RTCErrorType::INTERNAL_ERROR, std::string(error)));
+  }
+
+ protected:
+  ~CreateSessionDescriptionObserver() override = default;
+};
+
+// SetLocalDescription and SetRemoteDescription callback interface.
+class SetSessionDescriptionObserver : public rtc::RefCountInterface {
+ public:
+  virtual void OnSuccess() = 0;
+  // See description in CreateSessionDescriptionObserver for OnFailure.
+  virtual void OnFailure(RTCError error) {
+    std::string message(error.message());
+    OnFailure(message);
+  }
+  virtual void OnFailure(const std::string& error) {
+    OnFailure(RTCError(RTCErrorType::INTERNAL_ERROR, std::string(error)));
+  }
+
+ protected:
+  ~SetSessionDescriptionObserver() override = default;
+};
+
+}  // namespace webrtc
+
+#endif  // API_JSEP_H_
diff --git a/api/jsepicecandidate.h b/api/jsepicecandidate.h
new file mode 100644
index 0000000..dae6121
--- /dev/null
+++ b/api/jsepicecandidate.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// TODO(deadbeef): Move this out of api/; it's an implementation detail and
+// shouldn't be used externally.
+
+#ifndef API_JSEPICECANDIDATE_H_
+#define API_JSEPICECANDIDATE_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/candidate.h"
+#include "api/jsep.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Implementation of IceCandidateInterface.
+class JsepIceCandidate : public IceCandidateInterface {
+ public:
+  JsepIceCandidate(const std::string& sdp_mid, int sdp_mline_index);
+  JsepIceCandidate(const std::string& sdp_mid, int sdp_mline_index,
+                   const cricket::Candidate& candidate);
+  ~JsepIceCandidate();
+  // |err| may be null.
+  bool Initialize(const std::string& sdp, SdpParseError* err);
+  void SetCandidate(const cricket::Candidate& candidate) {
+    candidate_ = candidate;
+  }
+
+  virtual std::string sdp_mid() const { return sdp_mid_; }
+  virtual int sdp_mline_index() const { return sdp_mline_index_; }
+  virtual const cricket::Candidate& candidate() const {
+    return candidate_;
+  }
+
+  virtual std::string server_url() const { return candidate_.url(); }
+
+  virtual bool ToString(std::string* out) const;
+
+ private:
+  std::string sdp_mid_;
+  int sdp_mline_index_;
+  cricket::Candidate candidate_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(JsepIceCandidate);
+};
+
+// Implementation of IceCandidateCollection which stores JsepIceCandidates.
+class JsepCandidateCollection : public IceCandidateCollection {
+ public:
+  JsepCandidateCollection() {}
+  // Move constructor is defined so that a vector of JsepCandidateCollections
+  // can be resized.
+  JsepCandidateCollection(JsepCandidateCollection&& o)
+      : candidates_(std::move(o.candidates_)) {}
+  ~JsepCandidateCollection();
+  virtual size_t count() const {
+    return candidates_.size();
+  }
+  virtual bool HasCandidate(const IceCandidateInterface* candidate) const;
+  // Adds and takes ownership of the JsepIceCandidate.
+  // TODO(deadbeef): Make this use an std::unique_ptr<>, so ownership logic is
+  // more clear.
+  virtual void add(JsepIceCandidate* candidate) {
+    candidates_.push_back(candidate);
+  }
+  virtual const IceCandidateInterface* at(size_t index) const {
+    return candidates_[index];
+  }
+  // Removes the candidate that has a matching address and protocol.
+  //
+  // Returns the number of candidates that were removed.
+  size_t remove(const cricket::Candidate& candidate);
+
+ private:
+  std::vector<JsepIceCandidate*> candidates_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(JsepCandidateCollection);
+};
+
+}  // namespace webrtc
+
+#endif  // API_JSEPICECANDIDATE_H_
diff --git a/api/jsepsessiondescription.h b/api/jsepsessiondescription.h
new file mode 100644
index 0000000..70bb277
--- /dev/null
+++ b/api/jsepsessiondescription.h
@@ -0,0 +1,89 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// TODO(deadbeef): Move this out of api/; it's an implementation detail and
+// shouldn't be used externally.
+
+#ifndef API_JSEPSESSIONDESCRIPTION_H_
+#define API_JSEPSESSIONDESCRIPTION_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/candidate.h"
+#include "api/jsep.h"
+#include "api/jsepicecandidate.h"
+#include "rtc_base/constructormagic.h"
+
+namespace cricket {
+class SessionDescription;
+}
+
+namespace webrtc {
+
+// Implementation of SessionDescriptionInterface.
+class JsepSessionDescription : public SessionDescriptionInterface {
+ public:
+  explicit JsepSessionDescription(SdpType type);
+  // TODO(steveanton): Remove this once callers have switched to SdpType.
+  explicit JsepSessionDescription(const std::string& type);
+  virtual ~JsepSessionDescription();
+
+  // Takes ownership of |description|.
+  // TODO(deadbeef): Make this use an std::unique_ptr<>, so ownership logic is
+  // more clear.
+  bool Initialize(cricket::SessionDescription* description,
+      const std::string& session_id,
+      const std::string& session_version);
+
+  virtual cricket::SessionDescription* description() {
+    return description_.get();
+  }
+  virtual const cricket::SessionDescription* description() const {
+    return description_.get();
+  }
+  virtual std::string session_id() const {
+    return session_id_;
+  }
+  virtual std::string session_version() const {
+    return session_version_;
+  }
+  virtual SdpType GetType() const { return type_; }
+  virtual std::string type() const { return SdpTypeToString(type_); }
+  // Allows changing the type. Used for testing.
+  virtual bool AddCandidate(const IceCandidateInterface* candidate);
+  virtual size_t RemoveCandidates(
+      const std::vector<cricket::Candidate>& candidates);
+  virtual size_t number_of_mediasections() const;
+  virtual const IceCandidateCollection* candidates(
+      size_t mediasection_index) const;
+  virtual bool ToString(std::string* out) const;
+
+  static const int kDefaultVideoCodecId;
+  static const char kDefaultVideoCodecName[];
+
+ private:
+  std::unique_ptr<cricket::SessionDescription> description_;
+  std::string session_id_;
+  std::string session_version_;
+  SdpType type_;
+  std::vector<JsepCandidateCollection> candidate_collection_;
+
+  bool GetMediasectionIndex(const IceCandidateInterface* candidate,
+                            size_t* index);
+  int GetMediasectionIndex(const cricket::Candidate& candidate);
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(JsepSessionDescription);
+};
+
+}  // namespace webrtc
+
+#endif  // API_JSEPSESSIONDESCRIPTION_H_
diff --git a/api/mediaconstraintsinterface.cc b/api/mediaconstraintsinterface.cc
new file mode 100644
index 0000000..8358644
--- /dev/null
+++ b/api/mediaconstraintsinterface.cc
@@ -0,0 +1,266 @@
+/*
+ *  Copyright 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/mediaconstraintsinterface.h"
+
+#include "api/peerconnectioninterface.h"
+#include "rtc_base/stringencode.h"
+
+namespace {
+
+// Find the highest-priority instance of the T-valued constraint named by
+// |key| and return its value as |value|. |constraints| can be null.
+// If |mandatory_constraints| is non-null, it is incremented if the key appears
+// among the mandatory constraints.
+// Returns true if the key was found and has a valid value for type T.
+// If the key appears multiple times as an optional constraint, appearances
+// after the first are ignored.
+// Note: Because this uses FindFirst, repeated optional constraints whose
+// first instance has an unrecognized value are not handled precisely in
+// accordance with the specification.
+template <typename T>
+bool FindConstraint(const webrtc::MediaConstraintsInterface* constraints,
+                    const std::string& key,
+                    T* value,
+                    size_t* mandatory_constraints) {
+  std::string string_value;
+  if (!FindConstraint(constraints, key, &string_value, mandatory_constraints)) {
+    return false;
+  }
+  return rtc::FromString(string_value, value);
+}
+
+// Specialization for std::string, since a string doesn't need conversion.
+template <>
+bool FindConstraint(const webrtc::MediaConstraintsInterface* constraints,
+                    const std::string& key,
+                    std::string* value,
+                    size_t* mandatory_constraints) {
+  if (!constraints) {
+    return false;
+  }
+  if (constraints->GetMandatory().FindFirst(key, value)) {
+    if (mandatory_constraints) {
+      ++*mandatory_constraints;
+    }
+    return true;
+  }
+  if (constraints->GetOptional().FindFirst(key, value)) {
+    return true;
+  }
+  return false;
+}
+
+// Converts a constraint (mandatory takes precedence over optional) to an
+// rtc::Optional.
+template <typename T>
+void ConstraintToOptional(const webrtc::MediaConstraintsInterface* constraints,
+                          const std::string& key,
+                          rtc::Optional<T>* value_out) {
+  T value;
+  bool present = FindConstraint<T>(constraints, key, &value, nullptr);
+  if (present) {
+    *value_out = value;
+  }
+}
+}  // namespace
+
+namespace webrtc {
+
+const char MediaConstraintsInterface::kValueTrue[] = "true";
+const char MediaConstraintsInterface::kValueFalse[] = "false";
+
+// Constraints declared as static members in mediastreaminterface.h
+// Specified by draft-alvestrand-constraints-resolution-00b
+const char MediaConstraintsInterface::kMinAspectRatio[] = "minAspectRatio";
+const char MediaConstraintsInterface::kMaxAspectRatio[] = "maxAspectRatio";
+const char MediaConstraintsInterface::kMaxWidth[] = "maxWidth";
+const char MediaConstraintsInterface::kMinWidth[] = "minWidth";
+const char MediaConstraintsInterface::kMaxHeight[] = "maxHeight";
+const char MediaConstraintsInterface::kMinHeight[] = "minHeight";
+const char MediaConstraintsInterface::kMaxFrameRate[] = "maxFrameRate";
+const char MediaConstraintsInterface::kMinFrameRate[] = "minFrameRate";
+
+// Audio constraints.
+const char MediaConstraintsInterface::kEchoCancellation[] =
+    "echoCancellation";
+const char MediaConstraintsInterface::kGoogEchoCancellation[] =
+    "googEchoCancellation";
+const char MediaConstraintsInterface::kExtendedFilterEchoCancellation[] =
+    "googEchoCancellation2";
+const char MediaConstraintsInterface::kDAEchoCancellation[] =
+    "googDAEchoCancellation";
+const char MediaConstraintsInterface::kAutoGainControl[] =
+    "googAutoGainControl";
+const char MediaConstraintsInterface::kExperimentalAutoGainControl[] =
+    "googAutoGainControl2";
+const char MediaConstraintsInterface::kNoiseSuppression[] =
+    "googNoiseSuppression";
+const char MediaConstraintsInterface::kExperimentalNoiseSuppression[] =
+    "googNoiseSuppression2";
+const char MediaConstraintsInterface::kIntelligibilityEnhancer[] =
+    "intelligibilityEnhancer";
+const char MediaConstraintsInterface::kHighpassFilter[] =
+    "googHighpassFilter";
+const char MediaConstraintsInterface::kTypingNoiseDetection[] =
+    "googTypingNoiseDetection";
+const char MediaConstraintsInterface::kAudioMirroring[] = "googAudioMirroring";
+const char MediaConstraintsInterface::kAudioNetworkAdaptorConfig[] =
+    "googAudioNetworkAdaptorConfig";
+
+// Google-specific constraint keys for a local video source (getUserMedia).
+const char MediaConstraintsInterface::kNoiseReduction[] = "googNoiseReduction";
+
+// Constraint keys for CreateOffer / CreateAnswer defined in W3C specification.
+const char MediaConstraintsInterface::kOfferToReceiveAudio[] =
+    "OfferToReceiveAudio";
+const char MediaConstraintsInterface::kOfferToReceiveVideo[] =
+    "OfferToReceiveVideo";
+const char MediaConstraintsInterface::kVoiceActivityDetection[] =
+    "VoiceActivityDetection";
+const char MediaConstraintsInterface::kIceRestart[] =
+    "IceRestart";
+// Google specific constraint for BUNDLE enable/disable.
+const char MediaConstraintsInterface::kUseRtpMux[] =
+    "googUseRtpMUX";
+
+// Below constraints should be used during PeerConnection construction.
+const char MediaConstraintsInterface::kEnableDtlsSrtp[] =
+    "DtlsSrtpKeyAgreement";
+const char MediaConstraintsInterface::kEnableRtpDataChannels[] =
+    "RtpDataChannels";
+// Google-specific constraint keys.
+const char MediaConstraintsInterface::kEnableDscp[] = "googDscp";
+const char MediaConstraintsInterface::kEnableIPv6[] = "googIPv6";
+const char MediaConstraintsInterface::kEnableVideoSuspendBelowMinBitrate[] =
+    "googSuspendBelowMinBitrate";
+const char MediaConstraintsInterface::kCombinedAudioVideoBwe[] =
+    "googCombinedAudioVideoBwe";
+const char MediaConstraintsInterface::kScreencastMinBitrate[] =
+    "googScreencastMinBitrate";
+// TODO(ronghuawu): Remove once cpu overuse detection is stable.
+const char MediaConstraintsInterface::kCpuOveruseDetection[] =
+    "googCpuOveruseDetection";
+const char MediaConstraintsInterface::kPayloadPadding[] = "googPayloadPadding";
+
+
+// Set |value| to the value associated with the first appearance of |key|, or
+// return false if |key| is not found.
+bool MediaConstraintsInterface::Constraints::FindFirst(
+    const std::string& key, std::string* value) const {
+  for (Constraints::const_iterator iter = begin(); iter != end(); ++iter) {
+    if (iter->key == key) {
+      *value = iter->value;
+      return true;
+    }
+  }
+  return false;
+}
+
+bool FindConstraint(const MediaConstraintsInterface* constraints,
+                    const std::string& key, bool* value,
+                    size_t* mandatory_constraints) {
+  return ::FindConstraint<bool>(constraints, key, value, mandatory_constraints);
+}
+
+bool FindConstraint(const MediaConstraintsInterface* constraints,
+                    const std::string& key,
+                    int* value,
+                    size_t* mandatory_constraints) {
+  return ::FindConstraint<int>(constraints, key, value, mandatory_constraints);
+}
+
+void CopyConstraintsIntoRtcConfiguration(
+    const MediaConstraintsInterface* constraints,
+    PeerConnectionInterface::RTCConfiguration* configuration) {
+  // Copy info from constraints into configuration, if present.
+  if (!constraints) {
+    return;
+  }
+
+  bool enable_ipv6;
+  if (FindConstraint(constraints, MediaConstraintsInterface::kEnableIPv6,
+                     &enable_ipv6, nullptr)) {
+    configuration->disable_ipv6 = !enable_ipv6;
+  }
+  FindConstraint(constraints, MediaConstraintsInterface::kEnableDscp,
+                 &configuration->media_config.enable_dscp, nullptr);
+  FindConstraint(
+      constraints, MediaConstraintsInterface::kCpuOveruseDetection,
+      &configuration->media_config.video.enable_cpu_adaptation, nullptr);
+  FindConstraint(constraints, MediaConstraintsInterface::kEnableRtpDataChannels,
+                 &configuration->enable_rtp_data_channel, nullptr);
+  // Find Suspend Below Min Bitrate constraint.
+  FindConstraint(constraints,
+                 MediaConstraintsInterface::kEnableVideoSuspendBelowMinBitrate,
+                 &configuration->media_config.video.suspend_below_min_bitrate,
+                 nullptr);
+  ConstraintToOptional<int>(constraints,
+                            MediaConstraintsInterface::kScreencastMinBitrate,
+                            &configuration->screencast_min_bitrate);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kCombinedAudioVideoBwe,
+                             &configuration->combined_audio_video_bwe);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kEnableDtlsSrtp,
+                             &configuration->enable_dtls_srtp);
+}
+
+void CopyConstraintsIntoAudioOptions(
+    const MediaConstraintsInterface* constraints,
+    cricket::AudioOptions* options) {
+  if (!constraints) {
+    return;
+  }
+
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kGoogEchoCancellation,
+                             &options->echo_cancellation);
+  ConstraintToOptional<bool>(
+      constraints, MediaConstraintsInterface::kExtendedFilterEchoCancellation,
+      &options->extended_filter_aec);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kDAEchoCancellation,
+                             &options->delay_agnostic_aec);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kAutoGainControl,
+                             &options->auto_gain_control);
+  ConstraintToOptional<bool>(
+      constraints, MediaConstraintsInterface::kExperimentalAutoGainControl,
+      &options->experimental_agc);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kNoiseSuppression,
+                             &options->noise_suppression);
+  ConstraintToOptional<bool>(
+      constraints, MediaConstraintsInterface::kExperimentalNoiseSuppression,
+      &options->experimental_ns);
+  ConstraintToOptional<bool>(
+      constraints, MediaConstraintsInterface::kIntelligibilityEnhancer,
+      &options->intelligibility_enhancer);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kHighpassFilter,
+                             &options->highpass_filter);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kTypingNoiseDetection,
+                             &options->typing_detection);
+  ConstraintToOptional<bool>(constraints,
+                             MediaConstraintsInterface::kAudioMirroring,
+                             &options->stereo_swapping);
+  ConstraintToOptional<std::string>(
+      constraints, MediaConstraintsInterface::kAudioNetworkAdaptorConfig,
+      &options->audio_network_adaptor_config);
+  // When |kAudioNetworkAdaptorConfig| is defined, it both means that audio
+  // network adaptor is desired, and provides the config string.
+  if (options->audio_network_adaptor_config) {
+    options->audio_network_adaptor = true;
+  }
+}
+
+}  // namespace webrtc
diff --git a/api/mediaconstraintsinterface.h b/api/mediaconstraintsinterface.h
new file mode 100644
index 0000000..90661b8
--- /dev/null
+++ b/api/mediaconstraintsinterface.h
@@ -0,0 +1,149 @@
+/*
+ *  Copyright 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains the interface for MediaConstraints, corresponding to
+// the definition at
+// http://www.w3.org/TR/mediacapture-streams/#mediastreamconstraints and also
+// used in WebRTC: http://dev.w3.org/2011/webrtc/editor/webrtc.html#constraints.
+
+// This interface is being deprecated in Chrome, and may be removed
+// from WebRTC too.
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5617
+
+#ifndef API_MEDIACONSTRAINTSINTERFACE_H_
+#define API_MEDIACONSTRAINTSINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "api/peerconnectioninterface.h"
+
+namespace webrtc {
+
+// Interface used for passing arguments about media constraints
+// to the MediaStream and PeerConnection implementation.
+//
+// Constraints may be either "mandatory", which means that unless satisfied,
+// the method taking the constraints should fail, or "optional", which means
+// they may not be satisfied..
+class MediaConstraintsInterface {
+ public:
+  struct Constraint {
+    Constraint() {}
+    Constraint(const std::string& key, const std::string value)
+        : key(key), value(value) {
+    }
+    std::string key;
+    std::string value;
+  };
+
+  class Constraints : public std::vector<Constraint> {
+   public:
+    bool FindFirst(const std::string& key, std::string* value) const;
+  };
+
+  // Constraint keys used by a local video source.
+  // Specified by draft-alvestrand-constraints-resolution-00b
+  static const char kMinAspectRatio[];  // minAspectRatio
+  static const char kMaxAspectRatio[];  // maxAspectRatio
+  static const char kMaxWidth[];  // maxWidth
+  static const char kMinWidth[];  // minWidth
+  static const char kMaxHeight[];  // maxHeight
+  static const char kMinHeight[];  // minHeight
+  static const char kMaxFrameRate[];  // maxFrameRate
+  static const char kMinFrameRate[];  // minFrameRate
+
+  // Constraint keys used by a local audio source.
+  static const char kEchoCancellation[];  // echoCancellation
+
+  // These keys are google specific.
+  static const char kGoogEchoCancellation[];  // googEchoCancellation
+
+  static const char kExtendedFilterEchoCancellation[];  // googEchoCancellation2
+  static const char kDAEchoCancellation[];  // googDAEchoCancellation
+  static const char kAutoGainControl[];  // googAutoGainControl
+  static const char kExperimentalAutoGainControl[];  // googAutoGainControl2
+  static const char kNoiseSuppression[];  // googNoiseSuppression
+  static const char kExperimentalNoiseSuppression[];  // googNoiseSuppression2
+  static const char kIntelligibilityEnhancer[];  // intelligibilityEnhancer
+  static const char kHighpassFilter[];  // googHighpassFilter
+  static const char kTypingNoiseDetection[];  // googTypingNoiseDetection
+  static const char kAudioMirroring[];  // googAudioMirroring
+  static const char
+      kAudioNetworkAdaptorConfig[];  // goodAudioNetworkAdaptorConfig
+
+  // Google-specific constraint keys for a local video source
+  static const char kNoiseReduction[];  // googNoiseReduction
+
+  // Constraint keys for CreateOffer / CreateAnswer
+  // Specified by the W3C PeerConnection spec
+  static const char kOfferToReceiveVideo[];  // OfferToReceiveVideo
+  static const char kOfferToReceiveAudio[];  // OfferToReceiveAudio
+  static const char kVoiceActivityDetection[];  // VoiceActivityDetection
+  static const char kIceRestart[];  // IceRestart
+  // These keys are google specific.
+  static const char kUseRtpMux[];  // googUseRtpMUX
+
+  // Constraints values.
+  static const char kValueTrue[];  // true
+  static const char kValueFalse[];  // false
+
+  // PeerConnection constraint keys.
+  // Temporary pseudo-constraints used to enable DTLS-SRTP
+  static const char kEnableDtlsSrtp[];  // Enable DTLS-SRTP
+  // Temporary pseudo-constraints used to enable DataChannels
+  static const char kEnableRtpDataChannels[];  // Enable RTP DataChannels
+  // Google-specific constraint keys.
+  // Temporary pseudo-constraint for enabling DSCP through JS.
+  static const char kEnableDscp[];  // googDscp
+  // Constraint to enable IPv6 through JS.
+  static const char kEnableIPv6[];  // googIPv6
+  // Temporary constraint to enable suspend below min bitrate feature.
+  static const char kEnableVideoSuspendBelowMinBitrate[];
+      // googSuspendBelowMinBitrate
+  // Constraint to enable combined audio+video bandwidth estimation.
+  static const char kCombinedAudioVideoBwe[];  // googCombinedAudioVideoBwe
+  static const char kScreencastMinBitrate[];  // googScreencastMinBitrate
+  static const char kCpuOveruseDetection[];  // googCpuOveruseDetection
+  static const char kPayloadPadding[];  // googPayloadPadding
+
+  // The prefix of internal-only constraints whose JS set values should be
+  // stripped by Chrome before passed down to Libjingle.
+  static const char kInternalConstraintPrefix[];
+
+  virtual ~MediaConstraintsInterface() = default;
+
+  virtual const Constraints& GetMandatory() const = 0;
+  virtual const Constraints& GetOptional() const = 0;
+};
+
+bool FindConstraint(const MediaConstraintsInterface* constraints,
+                    const std::string& key, bool* value,
+                    size_t* mandatory_constraints);
+
+bool FindConstraint(const MediaConstraintsInterface* constraints,
+                    const std::string& key,
+                    int* value,
+                    size_t* mandatory_constraints);
+
+// Copy all relevant constraints into an RTCConfiguration object.
+void CopyConstraintsIntoRtcConfiguration(
+    const MediaConstraintsInterface* constraints,
+    PeerConnectionInterface::RTCConfiguration* configuration);
+
+// Copy all relevant constraints into an AudioOptions object.
+void CopyConstraintsIntoAudioOptions(
+    const MediaConstraintsInterface* constraints,
+    cricket::AudioOptions* options);
+
+}  // namespace webrtc
+
+#endif  // API_MEDIACONSTRAINTSINTERFACE_H_
diff --git a/api/mediastreaminterface.cc b/api/mediastreaminterface.cc
new file mode 100644
index 0000000..6f08a0c
--- /dev/null
+++ b/api/mediastreaminterface.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/mediastreaminterface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+const char MediaStreamTrackInterface::kVideoKind[] = "video";
+const char MediaStreamTrackInterface::kAudioKind[] = "audio";
+
+void AudioProcessorInterface::GetStats(AudioProcessorStats* /*stats*/) {
+  RTC_NOTREACHED() << "Old-style GetStats() is called but it has no "
+                   << "implementation.";
+  RTC_LOG(LS_ERROR) << "Old-style GetStats() is called but it has no "
+                    << "implementation.";
+}
+
+// TODO(ivoc): Remove this when the function becomes pure virtual.
+AudioProcessorInterface::AudioProcessorStatistics
+AudioProcessorInterface::GetStats(bool /*has_remote_tracks*/) {
+  AudioProcessorStats stats;
+  GetStats(&stats);
+  AudioProcessorStatistics new_stats;
+  new_stats.apm_statistics.divergent_filter_fraction =
+      stats.aec_divergent_filter_fraction;
+  new_stats.apm_statistics.delay_median_ms = stats.echo_delay_median_ms;
+  new_stats.apm_statistics.delay_standard_deviation_ms =
+      stats.echo_delay_std_ms;
+  new_stats.apm_statistics.echo_return_loss = stats.echo_return_loss;
+  new_stats.apm_statistics.echo_return_loss_enhancement =
+      stats.echo_return_loss_enhancement;
+  new_stats.apm_statistics.residual_echo_likelihood =
+      stats.residual_echo_likelihood;
+  new_stats.apm_statistics.residual_echo_likelihood_recent_max =
+      stats.residual_echo_likelihood_recent_max;
+  return new_stats;
+}
+
+VideoTrackInterface::ContentHint VideoTrackInterface::content_hint() const {
+  return ContentHint::kNone;
+}
+
+bool AudioTrackInterface::GetSignalLevel(int* level) {
+  return false;
+}
+
+rtc::scoped_refptr<AudioProcessorInterface>
+AudioTrackInterface::GetAudioProcessor() {
+  return nullptr;
+}
+
+}  // namespace webrtc
diff --git a/api/mediastreaminterface.h b/api/mediastreaminterface.h
new file mode 100644
index 0000000..2e2cff0
--- /dev/null
+++ b/api/mediastreaminterface.h
@@ -0,0 +1,336 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains interfaces for MediaStream, MediaTrack and MediaSource.
+// These interfaces are used for implementing MediaStream and MediaTrack as
+// defined in http://dev.w3.org/2011/webrtc/editor/webrtc.html#stream-api. These
+// interfaces must be used only with PeerConnection. PeerConnectionManager
+// interface provides the factory methods to create MediaStream and MediaTracks.
+
+#ifndef API_MEDIASTREAMINTERFACE_H_
+#define API_MEDIASTREAMINTERFACE_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "api/video/video_frame.h"
+// TODO(zhihuang): Remove unrelated headers once downstream applications stop
+// relying on them; they were previously transitively included by
+// mediachannel.h, which is no longer a dependency of this file.
+#include "api/videosinkinterface.h"
+#include "api/videosourceinterface.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "rtc_base/ratetracker.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+// Generic observer interface.
+class ObserverInterface {
+ public:
+  virtual void OnChanged() = 0;
+
+ protected:
+  virtual ~ObserverInterface() {}
+};
+
+class NotifierInterface {
+ public:
+  virtual void RegisterObserver(ObserverInterface* observer) = 0;
+  virtual void UnregisterObserver(ObserverInterface* observer) = 0;
+
+  virtual ~NotifierInterface() {}
+};
+
+// Base class for sources. A MediaStreamTrack has an underlying source that
+// provides media. A source can be shared by multiple tracks.
+class MediaSourceInterface : public rtc::RefCountInterface,
+                             public NotifierInterface {
+ public:
+  enum SourceState {
+    kInitializing,
+    kLive,
+    kEnded,
+    kMuted
+  };
+
+  virtual SourceState state() const = 0;
+
+  virtual bool remote() const = 0;
+
+ protected:
+  ~MediaSourceInterface() override = default;
+};
+
+// C++ version of MediaStreamTrack.
+// See: https://www.w3.org/TR/mediacapture-streams/#mediastreamtrack
+class MediaStreamTrackInterface : public rtc::RefCountInterface,
+                                  public NotifierInterface {
+ public:
+  enum TrackState {
+    kLive,
+    kEnded,
+  };
+
+  static const char kAudioKind[];
+  static const char kVideoKind[];
+
+  // The kind() method must return kAudioKind only if the object is a
+  // subclass of AudioTrackInterface, and kVideoKind only if the
+  // object is a subclass of VideoTrackInterface. It is typically used
+  // to protect a static_cast<> to the corresponding subclass.
+  virtual std::string kind() const = 0;
+
+  // Track identifier.
+  virtual std::string id() const = 0;
+
+  // A disabled track will produce silence (if audio) or black frames (if
+  // video). Can be disabled and re-enabled.
+  virtual bool enabled() const = 0;
+  virtual bool set_enabled(bool enable) = 0;
+
+  // Live or ended. A track will never be live again after becoming ended.
+  virtual TrackState state() const = 0;
+
+ protected:
+  ~MediaStreamTrackInterface() override = default;
+};
+
+// VideoTrackSourceInterface is a reference counted source used for
+// VideoTracks. The same source can be used by multiple VideoTracks.
+// VideoTrackSourceInterface is designed to be invoked on the signaling thread
+// except for rtc::VideoSourceInterface<VideoFrame> methods that will be invoked
+// on the worker thread via a VideoTrack. A custom implementation of a source
+// can inherit AdaptedVideoTrackSource instead of directly implementing this
+// interface.
+class VideoTrackSourceInterface
+    : public MediaSourceInterface,
+      public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+  struct Stats {
+    // Original size of captured frame, before video adaptation.
+    int input_width;
+    int input_height;
+  };
+
+  // Indicates that parameters suitable for screencasts should be automatically
+  // applied to RtpSenders.
+  // TODO(perkj): Remove these once all known applications have moved to
+  // explicitly setting suitable parameters for screencasts and don't need this
+  // implicit behavior.
+  virtual bool is_screencast() const = 0;
+
+  // Indicates that the encoder should denoise video before encoding it.
+  // If it is not set, the default configuration is used which is different
+  // depending on video codec.
+  // TODO(perkj): Remove this once denoising is done by the source, and not by
+  // the encoder.
+  virtual rtc::Optional<bool> needs_denoising() const = 0;
+
+  // Returns false if no stats are available, e.g, for a remote source, or a
+  // source which has not seen its first frame yet.
+  //
+  // Implementation should avoid blocking.
+  virtual bool GetStats(Stats* stats) = 0;
+
+ protected:
+  ~VideoTrackSourceInterface() override = default;
+};
+
+// VideoTrackInterface is designed to be invoked on the signaling thread except
+// for rtc::VideoSourceInterface<VideoFrame> methods that must be invoked
+// on the worker thread.
+// PeerConnectionFactory::CreateVideoTrack can be used for creating a VideoTrack
+// that ensures thread safety and that all methods are called on the right
+// thread.
+class VideoTrackInterface
+    : public MediaStreamTrackInterface,
+      public rtc::VideoSourceInterface<VideoFrame> {
+ public:
+  // Video track content hint, used to override the source is_screencast
+  // property.
+  // See https://crbug.com/653531 and https://github.com/WICG/mst-content-hint.
+  enum class ContentHint { kNone, kFluid, kDetailed };
+
+  // Register a video sink for this track. Used to connect the track to the
+  // underlying video engine.
+  void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
+                       const rtc::VideoSinkWants& wants) override {}
+  void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink) override {}
+
+  virtual VideoTrackSourceInterface* GetSource() const = 0;
+
+  virtual ContentHint content_hint() const;
+  virtual void set_content_hint(ContentHint hint) {}
+
+ protected:
+  ~VideoTrackInterface() override = default;
+};
+
+// Interface for receiving audio data from a AudioTrack.
+class AudioTrackSinkInterface {
+ public:
+  virtual void OnData(const void* audio_data,
+                      int bits_per_sample,
+                      int sample_rate,
+                      size_t number_of_channels,
+                      size_t number_of_frames) = 0;
+
+ protected:
+  virtual ~AudioTrackSinkInterface() {}
+};
+
+// AudioSourceInterface is a reference counted source used for AudioTracks.
+// The same source can be used by multiple AudioTracks.
+class AudioSourceInterface : public MediaSourceInterface {
+ public:
+  class AudioObserver {
+   public:
+    virtual void OnSetVolume(double volume) = 0;
+
+   protected:
+    virtual ~AudioObserver() {}
+  };
+
+  // TODO(deadbeef): Makes all the interfaces pure virtual after they're
+  // implemented in chromium.
+
+  // Sets the volume of the source. |volume| is in  the range of [0, 10].
+  // TODO(tommi): This method should be on the track and ideally volume should
+  // be applied in the track in a way that does not affect clones of the track.
+  virtual void SetVolume(double volume) {}
+
+  // Registers/unregisters observers to the audio source.
+  virtual void RegisterAudioObserver(AudioObserver* observer) {}
+  virtual void UnregisterAudioObserver(AudioObserver* observer) {}
+
+  // TODO(tommi): Make pure virtual.
+  virtual void AddSink(AudioTrackSinkInterface* sink) {}
+  virtual void RemoveSink(AudioTrackSinkInterface* sink) {}
+};
+
+// Interface of the audio processor used by the audio track to collect
+// statistics.
+class AudioProcessorInterface : public rtc::RefCountInterface {
+ public:
+  // Deprecated, use AudioProcessorStatistics instead.
+  // TODO(ivoc): Remove this when all implementations have switched to the new
+  //             GetStats function. See b/67926135.
+  struct AudioProcessorStats {
+    AudioProcessorStats()
+        : typing_noise_detected(false),
+          echo_return_loss(0),
+          echo_return_loss_enhancement(0),
+          echo_delay_median_ms(0),
+          echo_delay_std_ms(0),
+          residual_echo_likelihood(0.0f),
+          residual_echo_likelihood_recent_max(0.0f),
+          aec_divergent_filter_fraction(0.0) {}
+    ~AudioProcessorStats() {}
+
+    bool typing_noise_detected;
+    int echo_return_loss;
+    int echo_return_loss_enhancement;
+    int echo_delay_median_ms;
+    int echo_delay_std_ms;
+    float residual_echo_likelihood;
+    float residual_echo_likelihood_recent_max;
+    float aec_divergent_filter_fraction;
+  };
+  // This struct maintains the optionality of the stats, and will replace the
+  // regular stats struct when all users have been updated.
+  struct AudioProcessorStatistics {
+    bool typing_noise_detected = false;
+    AudioProcessingStats apm_statistics;
+  };
+
+  // Get audio processor statistics.
+  virtual void GetStats(AudioProcessorStats* stats);
+
+  // Get audio processor statistics. The |has_remote_tracks| argument should be
+  // set if there are active remote tracks (this would usually be true during
+  // a call). If there are no remote tracks some of the stats will not be set by
+  // the AudioProcessor, because they only make sense if there is at least one
+  // remote track.
+  // TODO(ivoc): Make pure virtual when all implementions are updated.
+  virtual AudioProcessorStatistics GetStats(bool has_remote_tracks);
+
+ protected:
+  ~AudioProcessorInterface() override = default;
+};
+
+class AudioTrackInterface : public MediaStreamTrackInterface {
+ public:
+  // TODO(deadbeef): Figure out if the following interface should be const or
+  // not.
+  virtual AudioSourceInterface* GetSource() const =  0;
+
+  // Add/Remove a sink that will receive the audio data from the track.
+  virtual void AddSink(AudioTrackSinkInterface* sink) = 0;
+  virtual void RemoveSink(AudioTrackSinkInterface* sink) = 0;
+
+  // Get the signal level from the audio track.
+  // Return true on success, otherwise false.
+  // TODO(deadbeef): Change the interface to int GetSignalLevel() and pure
+  // virtual after it's implemented in chromium.
+  virtual bool GetSignalLevel(int* level);
+
+  // Get the audio processor used by the audio track. Return null if the track
+  // does not have any processor.
+  // TODO(deadbeef): Make the interface pure virtual.
+  virtual rtc::scoped_refptr<AudioProcessorInterface> GetAudioProcessor();
+
+ protected:
+  ~AudioTrackInterface() override = default;
+};
+
+typedef std::vector<rtc::scoped_refptr<AudioTrackInterface> >
+    AudioTrackVector;
+typedef std::vector<rtc::scoped_refptr<VideoTrackInterface> >
+    VideoTrackVector;
+
+// C++ version of https://www.w3.org/TR/mediacapture-streams/#mediastream.
+//
+// A major difference is that remote audio/video tracks (received by a
+// PeerConnection/RtpReceiver) are not synchronized simply by adding them to
+// the same stream; a session description with the correct "a=msid" attributes
+// must be pushed down.
+//
+// Thus, this interface acts as simply a container for tracks.
+class MediaStreamInterface : public rtc::RefCountInterface,
+                             public NotifierInterface {
+ public:
+  virtual std::string id() const = 0;
+
+  virtual AudioTrackVector GetAudioTracks() = 0;
+  virtual VideoTrackVector GetVideoTracks() = 0;
+  virtual rtc::scoped_refptr<AudioTrackInterface>
+      FindAudioTrack(const std::string& track_id) = 0;
+  virtual rtc::scoped_refptr<VideoTrackInterface>
+      FindVideoTrack(const std::string& track_id) = 0;
+
+  virtual bool AddTrack(AudioTrackInterface* track) = 0;
+  virtual bool AddTrack(VideoTrackInterface* track) = 0;
+  virtual bool RemoveTrack(AudioTrackInterface* track) = 0;
+  virtual bool RemoveTrack(VideoTrackInterface* track) = 0;
+
+ protected:
+  ~MediaStreamInterface() override = default;
+};
+
+}  // namespace webrtc
+
+#endif  // API_MEDIASTREAMINTERFACE_H_
diff --git a/api/mediastreamproxy.h b/api/mediastreamproxy.h
new file mode 100644
index 0000000..3f261db
--- /dev/null
+++ b/api/mediastreamproxy.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_MEDIASTREAMPROXY_H_
+#define API_MEDIASTREAMPROXY_H_
+
+#include <string>
+
+#include "api/mediastreaminterface.h"
+#include "api/proxy.h"
+
+namespace webrtc {
+
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+BEGIN_SIGNALING_PROXY_MAP(MediaStream)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_CONSTMETHOD0(std::string, id)
+  PROXY_METHOD0(AudioTrackVector, GetAudioTracks)
+  PROXY_METHOD0(VideoTrackVector, GetVideoTracks)
+  PROXY_METHOD1(rtc::scoped_refptr<AudioTrackInterface>,
+                FindAudioTrack,
+                const std::string&)
+  PROXY_METHOD1(rtc::scoped_refptr<VideoTrackInterface>,
+                FindVideoTrack,
+                const std::string&)
+  PROXY_METHOD1(bool, AddTrack, AudioTrackInterface*)
+  PROXY_METHOD1(bool, AddTrack, VideoTrackInterface*)
+  PROXY_METHOD1(bool, RemoveTrack, AudioTrackInterface*)
+  PROXY_METHOD1(bool, RemoveTrack, VideoTrackInterface*)
+  PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+  PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_MEDIASTREAMPROXY_H_
diff --git a/api/mediastreamtrackproxy.h b/api/mediastreamtrackproxy.h
new file mode 100644
index 0000000..57a7695
--- /dev/null
+++ b/api/mediastreamtrackproxy.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file includes proxy classes for tracks. The purpose is
+// to make sure tracks are only accessed from the signaling thread.
+
+#ifndef API_MEDIASTREAMTRACKPROXY_H_
+#define API_MEDIASTREAMTRACKPROXY_H_
+
+#include <string>
+
+#include "api/mediastreaminterface.h"
+#include "api/proxy.h"
+
+namespace webrtc {
+
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+
+BEGIN_SIGNALING_PROXY_MAP(AudioTrack)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_CONSTMETHOD0(std::string, kind)
+  PROXY_CONSTMETHOD0(std::string, id)
+  PROXY_CONSTMETHOD0(TrackState, state)
+  PROXY_CONSTMETHOD0(bool, enabled)
+  PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource)
+  PROXY_METHOD1(void, AddSink, AudioTrackSinkInterface*)
+  PROXY_METHOD1(void, RemoveSink, AudioTrackSinkInterface*)
+  PROXY_METHOD1(bool, GetSignalLevel, int*)
+  PROXY_METHOD0(rtc::scoped_refptr<AudioProcessorInterface>, GetAudioProcessor)
+  PROXY_METHOD1(bool, set_enabled, bool)
+  PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+  PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY_MAP()
+
+BEGIN_PROXY_MAP(VideoTrack)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_CONSTMETHOD0(std::string, kind)
+  PROXY_CONSTMETHOD0(std::string, id)
+  PROXY_CONSTMETHOD0(TrackState, state)
+  PROXY_CONSTMETHOD0(bool, enabled)
+  PROXY_METHOD1(bool, set_enabled, bool)
+  PROXY_CONSTMETHOD0(ContentHint, content_hint)
+  PROXY_METHOD1(void, set_content_hint, ContentHint)
+  PROXY_WORKER_METHOD2(void,
+                       AddOrUpdateSink,
+                       rtc::VideoSinkInterface<VideoFrame>*,
+                       const rtc::VideoSinkWants&)
+  PROXY_WORKER_METHOD1(void, RemoveSink, rtc::VideoSinkInterface<VideoFrame>*)
+  PROXY_CONSTMETHOD0(VideoTrackSourceInterface*, GetSource)
+
+  PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+  PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_MEDIASTREAMTRACKPROXY_H_
diff --git a/api/mediatypes.cc b/api/mediatypes.cc
new file mode 100644
index 0000000..599542d
--- /dev/null
+++ b/api/mediatypes.cc
@@ -0,0 +1,49 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/mediatypes.h"
+
+#include "api/mediastreaminterface.h"
+#include "rtc_base/checks.h"
+
+namespace {
+static const char* kMediaTypeData = "data";
+}  // namespace
+
+namespace cricket {
+
+std::string MediaTypeToString(MediaType type) {
+  switch (type) {
+    case MEDIA_TYPE_AUDIO:
+      return webrtc::MediaStreamTrackInterface::kAudioKind;
+    case MEDIA_TYPE_VIDEO:
+      return webrtc::MediaStreamTrackInterface::kVideoKind;
+    case MEDIA_TYPE_DATA:
+      return kMediaTypeData;
+  }
+  FATAL();
+  // Not reachable; avoids compile warning.
+  return "";
+}
+
+MediaType MediaTypeFromString(const std::string& type_str) {
+  if (type_str == webrtc::MediaStreamTrackInterface::kAudioKind) {
+    return MEDIA_TYPE_AUDIO;
+  } else if (type_str == webrtc::MediaStreamTrackInterface::kVideoKind) {
+    return MEDIA_TYPE_VIDEO;
+  } else if (type_str == kMediaTypeData) {
+    return MEDIA_TYPE_DATA;
+  }
+  FATAL();
+  // Not reachable; avoids compile warning.
+  return static_cast<MediaType>(-1);
+}
+
+}  // namespace cricket
diff --git a/api/mediatypes.h b/api/mediatypes.h
new file mode 100644
index 0000000..93ce1a2
--- /dev/null
+++ b/api/mediatypes.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_MEDIATYPES_H_
+#define API_MEDIATYPES_H_
+
+#include <string>
+
+namespace cricket {
+
+enum MediaType {
+  MEDIA_TYPE_AUDIO,
+  MEDIA_TYPE_VIDEO,
+  MEDIA_TYPE_DATA
+};
+
+std::string MediaTypeToString(MediaType type);
+// Aborts on invalid string. Only expected to be used on strings that are
+// guaranteed to be valid, such as MediaStreamTrackInterface::kind().
+MediaType MediaTypeFromString(const std::string& type_str);
+
+}  // namespace cricket
+
+#endif  // API_MEDIATYPES_H_
diff --git a/api/notifier.h b/api/notifier.h
new file mode 100644
index 0000000..ceeda4d
--- /dev/null
+++ b/api/notifier.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_NOTIFIER_H_
+#define API_NOTIFIER_H_
+
+#include <list>
+
+#include "api/mediastreaminterface.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Implements a template version of a notifier.
+// TODO(deadbeef): This is an implementation detail; move out of api/.
+template <class T>
+class Notifier : public T {
+ public:
+  Notifier() {
+  }
+
+  virtual void RegisterObserver(ObserverInterface* observer) {
+    RTC_DCHECK(observer != nullptr);
+    observers_.push_back(observer);
+  }
+
+  virtual void UnregisterObserver(ObserverInterface* observer) {
+    for (std::list<ObserverInterface*>::iterator it = observers_.begin();
+         it != observers_.end(); it++) {
+      if (*it == observer) {
+        observers_.erase(it);
+        break;
+      }
+    }
+  }
+
+  void FireOnChanged() {
+    // Copy the list of observers to avoid a crash if the observer object
+    // unregisters as a result of the OnChanged() call. If the same list is used
+    // UnregisterObserver will affect the list make the iterator invalid.
+    std::list<ObserverInterface*> observers = observers_;
+    for (std::list<ObserverInterface*>::iterator it = observers.begin();
+         it != observers.end(); ++it) {
+      (*it)->OnChanged();
+    }
+  }
+
+ protected:
+  std::list<ObserverInterface*> observers_;
+};
+
+}  // namespace webrtc
+
+#endif  // API_NOTIFIER_H_
diff --git a/api/optional.cc b/api/optional.cc
new file mode 100644
index 0000000..9412617
--- /dev/null
+++ b/api/optional.cc
@@ -0,0 +1,34 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/optional.h"
+
+namespace rtc {
+namespace optional_internal {
+
+#if RTC_HAS_ASAN
+
+void* FunctionThatDoesNothingImpl(void* x) {
+  return x;
+}
+
+#endif
+
+struct NulloptArg {
+  constexpr NulloptArg() {}
+};
+
+static NulloptArg nullopt_arg;
+
+}  // namespace optional_internal
+
+const nullopt_t nullopt(rtc::optional_internal::nullopt_arg);
+
+}  // namespace rtc
diff --git a/api/optional.h b/api/optional.h
new file mode 100644
index 0000000..7a62335
--- /dev/null
+++ b/api/optional.h
@@ -0,0 +1,449 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_OPTIONAL_H_
+#define API_OPTIONAL_H_
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#ifdef UNIT_TEST
+#include <iomanip>
+#include <ostream>
+#endif  // UNIT_TEST
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/sanitizer.h"
+
+namespace rtc {
+
+namespace optional_internal {
+
+#if RTC_HAS_ASAN
+
+// This is a non-inlined function. The optimizer can't see inside it.  It
+// prevents the compiler from generating optimized code that reads value_ even
+// if it is unset. Although safe, this causes memory sanitizers to complain.
+void* FunctionThatDoesNothingImpl(void*);
+
+template <typename T>
+inline T* FunctionThatDoesNothing(T* x) {
+  return reinterpret_cast<T*>(
+      FunctionThatDoesNothingImpl(reinterpret_cast<void*>(x)));
+}
+
+#else
+
+template <typename T>
+inline T* FunctionThatDoesNothing(T* x) {
+  return x;
+}
+
+#endif
+
+struct NulloptArg;
+
+}  // namespace optional_internal
+
+// nullopt_t must be a non-aggregate literal type with a constexpr constructor
+// that takes some implementation-defined literal type. It mustn't have a
+// default constructor nor an initializer-list constructor.
+// See:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+// That page uses int, though this seems to confuse older versions of GCC.
+struct nullopt_t {
+  constexpr explicit nullopt_t(rtc::optional_internal::NulloptArg&) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+extern const nullopt_t nullopt;
+
+// Simple std::optional-wannabe. It either contains a T or not.
+//
+// A moved-from Optional<T> may only be destroyed, and assigned to if T allows
+// being assigned to after having been moved from. Specifically, you may not
+// assume that it just doesn't contain a value anymore.
+//
+// Examples of good places to use Optional:
+//
+// - As a class or struct member, when the member doesn't always have a value:
+//     struct Prisoner {
+//       std::string name;
+//       Optional<int> cell_number;  // Empty if not currently incarcerated.
+//     };
+//
+// - As a return value for functions that may fail to return a value on all
+//   allowed inputs. For example, a function that searches an array might
+//   return an Optional<size_t> (the index where it found the element, or
+//   nothing if it didn't find it); and a function that parses numbers might
+//   return Optional<double> (the parsed number, or nothing if parsing failed).
+//
+// Examples of bad places to use Optional:
+//
+// - As a return value for functions that may fail because of disallowed
+//   inputs. For example, a string length function should not return
+//   Optional<size_t> so that it can return nothing in case the caller passed
+//   it a null pointer; the function should probably use RTC_[D]CHECK instead,
+//   and return plain size_t.
+//
+// - As a return value for functions that may fail to return a value on all
+//   allowed inputs, but need to tell the caller what went wrong. Returning
+//   Optional<double> when parsing a single number as in the example above
+//   might make sense, but any larger parse job is probably going to need to
+//   tell the caller what the problem was, not just that there was one.
+//
+// - As a non-mutable function argument. When you want to pass a value of a
+//   type T that can fail to be there, const T* is almost always both fastest
+//   and cleanest. (If you're *sure* that the the caller will always already
+//   have an Optional<T>, const Optional<T>& is slightly faster than const T*,
+//   but this is a micro-optimization. In general, stick to const T*.)
+//
+// TODO(kwiberg): Get rid of this class when the standard library has
+// std::optional (and we're allowed to use it).
+template <typename T>
+class Optional final {
+ public:
+  // Construct an empty Optional.
+  Optional() : has_value_(false), empty_('\0') { PoisonValue(); }
+
+  Optional(rtc::nullopt_t)  // NOLINT(runtime/explicit)
+      : Optional() {}
+
+  // Construct an Optional that contains a value.
+  Optional(const T& value)  // NOLINT(runtime/explicit)
+      : has_value_(true) {
+    new (&value_) T(value);
+  }
+  Optional(T&& value)  // NOLINT(runtime/explicit)
+      : has_value_(true) {
+    new (&value_) T(std::move(value));
+  }
+
+  // Copy constructor: copies the value from m if it has one.
+  Optional(const Optional& m) : has_value_(m.has_value_) {
+    if (has_value_)
+      new (&value_) T(m.value_);
+    else
+      PoisonValue();
+  }
+
+  // Move constructor: if m has a value, moves the value from m, leaving m
+  // still in a state where it has a value, but a moved-from one (the
+  // properties of which depends on T; the only general guarantee is that we
+  // can destroy m).
+  Optional(Optional&& m) : has_value_(m.has_value_) {
+    if (has_value_)
+      new (&value_) T(std::move(m.value_));
+    else
+      PoisonValue();
+  }
+
+  ~Optional() {
+    if (has_value_)
+      value_.~T();
+    else
+      UnpoisonValue();
+  }
+
+  Optional& operator=(rtc::nullopt_t) {
+    reset();
+    return *this;
+  }
+
+  // Copy assignment. Uses T's copy assignment if both sides have a value, T's
+  // copy constructor if only the right-hand side has a value.
+  Optional& operator=(const Optional& m) {
+    if (m.has_value_) {
+      if (has_value_) {
+        value_ = m.value_;  // T's copy assignment.
+      } else {
+        UnpoisonValue();
+        new (&value_) T(m.value_);  // T's copy constructor.
+        has_value_ = true;
+      }
+    } else {
+      reset();
+    }
+    return *this;
+  }
+
+  // Move assignment. Uses T's move assignment if both sides have a value, T's
+  // move constructor if only the right-hand side has a value. The state of m
+  // after it's been moved from is as for the move constructor.
+  Optional& operator=(Optional&& m) {
+    if (m.has_value_) {
+      if (has_value_) {
+        value_ = std::move(m.value_);  // T's move assignment.
+      } else {
+        UnpoisonValue();
+        new (&value_) T(std::move(m.value_));  // T's move constructor.
+        has_value_ = true;
+      }
+    } else {
+      reset();
+    }
+    return *this;
+  }
+
+  // Swap the values if both m1 and m2 have values; move the value if only one
+  // of them has one.
+  friend void swap(Optional& m1, Optional& m2) {
+    if (m1.has_value_) {
+      if (m2.has_value_) {
+        // Both have values: swap.
+        using std::swap;
+        swap(m1.value_, m2.value_);
+      } else {
+        // Only m1 has a value: move it to m2.
+        m2.UnpoisonValue();
+        new (&m2.value_) T(std::move(m1.value_));
+        m1.value_.~T();  // Destroy the moved-from value.
+        m1.has_value_ = false;
+        m2.has_value_ = true;
+        m1.PoisonValue();
+      }
+    } else if (m2.has_value_) {
+      // Only m2 has a value: move it to m1.
+      m1.UnpoisonValue();
+      new (&m1.value_) T(std::move(m2.value_));
+      m2.value_.~T();  // Destroy the moved-from value.
+      m1.has_value_ = true;
+      m2.has_value_ = false;
+      m2.PoisonValue();
+    }
+  }
+
+  // Destroy any contained value. Has no effect if we have no value.
+  void reset() {
+    if (!has_value_)
+      return;
+    value_.~T();
+    has_value_ = false;
+    PoisonValue();
+  }
+
+  template <class... Args>
+  void emplace(Args&&... args) {
+    if (has_value_)
+      value_.~T();
+    else
+      UnpoisonValue();
+    new (&value_) T(std::forward<Args>(args)...);
+    has_value_ = true;
+  }
+
+  // Conversion to bool to test if we have a value.
+  explicit operator bool() const { return has_value_; }
+  bool has_value() const { return has_value_; }
+
+  // Dereferencing. Only allowed if we have a value.
+  const T* operator->() const {
+    RTC_DCHECK(has_value_);
+    return &value_;
+  }
+  T* operator->() {
+    RTC_DCHECK(has_value_);
+    return &value_;
+  }
+  const T& operator*() const {
+    RTC_DCHECK(has_value_);
+    return value_;
+  }
+  T& operator*() {
+    RTC_DCHECK(has_value_);
+    return value_;
+  }
+  const T& value() const {
+    RTC_DCHECK(has_value_);
+    return value_;
+  }
+  T& value() {
+    RTC_DCHECK(has_value_);
+    return value_;
+  }
+
+  // Dereference with a default value in case we don't have a value.
+  const T& value_or(const T& default_val) const {
+    // The no-op call prevents the compiler from generating optimized code that
+    // reads value_ even if !has_value_, but only if FunctionThatDoesNothing is
+    // not completely inlined; see its declaration.).
+    return has_value_ ? *optional_internal::FunctionThatDoesNothing(&value_)
+                      : default_val;
+  }
+
+  // Dereference and move value.
+  T MoveValue() {
+    RTC_DCHECK(has_value_);
+    return std::move(value_);
+  }
+
+  // Equality tests. Two Optionals are equal if they contain equivalent values,
+  // or if they're both empty.
+  friend bool operator==(const Optional& m1, const Optional& m2) {
+    return m1.has_value_ && m2.has_value_ ? m1.value_ == m2.value_
+                                          : m1.has_value_ == m2.has_value_;
+  }
+  friend bool operator==(const Optional& opt, const T& value) {
+    return opt.has_value_ && opt.value_ == value;
+  }
+  friend bool operator==(const T& value, const Optional& opt) {
+    return opt.has_value_ && value == opt.value_;
+  }
+
+  friend bool operator==(const Optional& opt, rtc::nullopt_t) {
+    return !opt.has_value_;
+  }
+
+  friend bool operator==(rtc::nullopt_t, const Optional& opt) {
+    return !opt.has_value_;
+  }
+
+  friend bool operator!=(const Optional& m1, const Optional& m2) {
+    return m1.has_value_ && m2.has_value_ ? m1.value_ != m2.value_
+                                          : m1.has_value_ != m2.has_value_;
+  }
+  friend bool operator!=(const Optional& opt, const T& value) {
+    return !opt.has_value_ || opt.value_ != value;
+  }
+  friend bool operator!=(const T& value, const Optional& opt) {
+    return !opt.has_value_ || value != opt.value_;
+  }
+
+  friend bool operator!=(const Optional& opt, rtc::nullopt_t) {
+    return opt.has_value_;
+  }
+
+  friend bool operator!=(rtc::nullopt_t, const Optional& opt) {
+    return opt.has_value_;
+  }
+
+ private:
+  // Tell sanitizers that value_ shouldn't be touched.
+  void PoisonValue() {
+    rtc::AsanPoison(rtc::MakeArrayView(&value_, 1));
+    rtc::MsanMarkUninitialized(rtc::MakeArrayView(&value_, 1));
+  }
+
+  // Tell sanitizers that value_ is OK to touch again.
+  void UnpoisonValue() { rtc::AsanUnpoison(rtc::MakeArrayView(&value_, 1)); }
+
+  bool has_value_;  // True iff value_ contains a live value.
+  union {
+    // empty_ exists only to make it possible to initialize the union, even when
+    // it doesn't contain any data. If the union goes uninitialized, it may
+    // trigger compiler warnings.
+    char empty_;
+    // By placing value_ in a union, we get to manage its construction and
+    // destruction manually: the Optional constructors won't automatically
+    // construct it, and the Optional destructor won't automatically destroy
+    // it. Basically, this just allocates a properly sized and aligned block of
+    // memory in which we can manually put a T with placement new.
+    T value_;
+  };
+};
+
+#ifdef UNIT_TEST
+namespace optional_internal {
+
+// Checks if there's a valid PrintTo(const T&, std::ostream*) call for T.
+template <typename T>
+struct HasPrintTo {
+ private:
+  struct No {};
+
+  template <typename T2>
+  static auto Test(const T2& obj)
+      -> decltype(PrintTo(obj, std::declval<std::ostream*>()));
+
+  template <typename>
+  static No Test(...);
+
+ public:
+  static constexpr bool value =
+      !std::is_same<decltype(Test<T>(std::declval<const T&>())), No>::value;
+};
+
+// Checks if there's a valid operator<<(std::ostream&, const T&) call for T.
+template <typename T>
+struct HasOstreamOperator {
+ private:
+  struct No {};
+
+  template <typename T2>
+  static auto Test(const T2& obj)
+      -> decltype(std::declval<std::ostream&>() << obj);
+
+  template <typename>
+  static No Test(...);
+
+ public:
+  static constexpr bool value =
+      !std::is_same<decltype(Test<T>(std::declval<const T&>())), No>::value;
+};
+
+// Prefer using PrintTo to print the object.
+template <typename T>
+typename std::enable_if<HasPrintTo<T>::value, void>::type OptionalPrintToHelper(
+    const T& value,
+    std::ostream* os) {
+  PrintTo(value, os);
+}
+
+// Fall back to operator<<(std::ostream&, ...) if it exists.
+template <typename T>
+typename std::enable_if<HasOstreamOperator<T>::value && !HasPrintTo<T>::value,
+                        void>::type
+OptionalPrintToHelper(const T& value, std::ostream* os) {
+  *os << value;
+}
+
+inline void OptionalPrintObjectBytes(const unsigned char* bytes,
+                                     size_t size,
+                                     std::ostream* os) {
+  *os << "<optional with " << size << "-byte object [";
+  for (size_t i = 0; i != size; ++i) {
+    *os << (i == 0 ? "" : ((i & 1) ? "-" : " "));
+    *os << std::hex << std::setw(2) << std::setfill('0')
+        << static_cast<int>(bytes[i]);
+  }
+  *os << "]>";
+}
+
+// As a final back-up, just print the contents of the objcets byte-wise.
+template <typename T>
+typename std::enable_if<!HasOstreamOperator<T>::value && !HasPrintTo<T>::value,
+                        void>::type
+OptionalPrintToHelper(const T& value, std::ostream* os) {
+  OptionalPrintObjectBytes(reinterpret_cast<const unsigned char*>(&value),
+                           sizeof(value), os);
+}
+
+}  // namespace optional_internal
+
+// PrintTo is used by gtest to print out the results of tests. We want to ensure
+// the object contained in an Optional can be printed out if it's set, while
+// avoiding touching the object's storage if it is undefined.
+template <typename T>
+void PrintTo(const rtc::Optional<T>& opt, std::ostream* os) {
+  if (opt) {
+    optional_internal::OptionalPrintToHelper(*opt, os);
+  } else {
+    *os << "<empty optional>";
+  }
+}
+
+#endif  // UNIT_TEST
+
+}  // namespace rtc
+
+#endif  // API_OPTIONAL_H_
diff --git a/api/optional_unittest.cc b/api/optional_unittest.cc
new file mode 100644
index 0000000..2149033
--- /dev/null
+++ b/api/optional_unittest.cc
@@ -0,0 +1,902 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/optional.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+namespace {
+
+struct MyUnprintableType {
+  int value;
+};
+
+struct MyPrintableType {
+  int value;
+};
+
+struct MyOstreamPrintableType {
+  int value;
+};
+
+void PrintTo(const MyPrintableType& mpt, std::ostream* os) {
+  *os << "The value is " << mpt.value;
+}
+
+std::ostream& operator<<(std::ostream& os, const MyPrintableType& mpt) {
+  os << mpt.value;
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const MyOstreamPrintableType& mpt) {
+  os << mpt.value;
+  return os;
+}
+
+// Class whose instances logs various method calls (constructor, destructor,
+// etc.). Each instance has a unique ID (a simple global sequence number) and
+// an origin ID. When a copy is made, the new object gets a fresh ID but copies
+// the origin ID from the original. When a new Logger is created from scratch,
+// it gets a fresh ID, and the origin ID is the same as the ID (default
+// constructor) or given as an argument (explicit constructor).
+class Logger {
+ public:
+  Logger() : id_(g_next_id++), origin_(id_) { Log("default constructor"); }
+  explicit Logger(int origin) : id_(g_next_id++), origin_(origin) {
+    Log("explicit constructor");
+  }
+  Logger(int origin, const Logger& pass_by_ref, Logger pass_by_value)
+      : id_(g_next_id++), origin_(origin) {
+    Log("multi parameter constructor");
+  }
+  Logger(const Logger& other) : id_(g_next_id++), origin_(other.origin_) {
+    LogFrom("copy constructor", other);
+  }
+  Logger(Logger&& other) : id_(g_next_id++), origin_(other.origin_) {
+    LogFrom("move constructor", other);
+  }
+  ~Logger() { Log("destructor"); }
+  Logger& operator=(const Logger& other) {
+    origin_ = other.origin_;
+    LogFrom("operator= copy", other);
+    return *this;
+  }
+  Logger& operator=(Logger&& other) {
+    origin_ = other.origin_;
+    LogFrom("operator= move", other);
+    return *this;
+  }
+  friend void swap(Logger& a, Logger& b) {
+    using std::swap;
+    swap(a.origin_, b.origin_);
+    Log2("swap", a, b);
+  }
+  friend bool operator==(const Logger& a, const Logger& b) {
+    Log2("operator==", a, b);
+    return a.origin_ == b.origin_;
+  }
+  friend bool operator!=(const Logger& a, const Logger& b) {
+    Log2("operator!=", a, b);
+    return a.origin_ != b.origin_;
+  }
+  void Foo() { Log("Foo()"); }
+  void Foo() const { Log("Foo() const"); }
+  static std::unique_ptr<std::vector<std::string>> Setup() {
+    std::unique_ptr<std::vector<std::string>> s(new std::vector<std::string>);
+    g_log = s.get();
+    g_next_id = 0;
+    return s;
+  }
+
+ private:
+  int id_;
+  int origin_;
+  static std::vector<std::string>* g_log;
+  static int g_next_id;
+  void Log(const char* msg) const {
+    std::ostringstream oss;
+    oss << id_ << ':' << origin_ << ". " << msg;
+    g_log->push_back(oss.str());
+  }
+  void LogFrom(const char* msg, const Logger& other) const {
+    std::ostringstream oss;
+    oss << id_ << ':' << origin_ << ". " << msg << " (from " << other.id_ << ':'
+        << other.origin_ << ")";
+    g_log->push_back(oss.str());
+  }
+  static void Log2(const char* msg, const Logger& a, const Logger& b) {
+    std::ostringstream oss;
+    oss << msg << ' ' << a.id_ << ':' << a.origin_ << ", " << b.id_ << ':'
+        << b.origin_;
+    g_log->push_back(oss.str());
+  }
+};
+
+std::vector<std::string>* Logger::g_log = nullptr;
+int Logger::g_next_id = 0;
+
+// Append all the other args to the vector pointed to by the first arg.
+template <typename T>
+void VectorAppend(std::vector<T>* v) {}
+template <typename T, typename... Ts>
+void VectorAppend(std::vector<T>* v, const T& e, Ts... es) {
+  v->push_back(e);
+  VectorAppend(v, es...);
+}
+
+// Create a vector of strings. Because we're not allowed to use
+// std::initializer_list.
+template <typename... Ts>
+std::vector<std::string> V(Ts... es) {
+  std::vector<std::string> strings;
+  VectorAppend(&strings, static_cast<std::string>(es)...);
+  return strings;
+}
+
+}  // namespace
+
+TEST(OptionalTest, TestConstructDefault) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    EXPECT_FALSE(x);
+    EXPECT_FALSE(x.has_value());
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestConstructNullopt) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(nullopt);
+    EXPECT_FALSE(x);
+    EXPECT_FALSE(x.has_value());
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestConstructCopyEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    EXPECT_FALSE(x);
+    EXPECT_FALSE(x.has_value());
+    auto y = x;
+    EXPECT_FALSE(y);
+    EXPECT_FALSE(y.has_value());
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestConstructCopyFull) {
+  auto log = Logger::Setup();
+  {
+    Logger a;
+    Optional<Logger> x(a);
+    EXPECT_TRUE(x);
+    EXPECT_TRUE(x.has_value());
+    log->push_back("---");
+    auto y = x;
+    EXPECT_TRUE(y);
+    EXPECT_TRUE(y.has_value());
+    log->push_back("---");
+  }
+  EXPECT_EQ(V("0:0. default constructor", "1:0. copy constructor (from 0:0)",
+              "---", "2:0. copy constructor (from 1:0)", "---",
+              "2:0. destructor", "1:0. destructor", "0:0. destructor"),
+            *log);
+}
+
+TEST(OptionalTest, TestConstructMoveEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    EXPECT_FALSE(x);
+    EXPECT_FALSE(x.has_value());
+    auto y = std::move(x);
+    EXPECT_FALSE(y);
+    EXPECT_FALSE(y.has_value());
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestConstructMoveFull) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    EXPECT_TRUE(x);
+    EXPECT_TRUE(x.has_value());
+    log->push_back("---");
+    auto y = std::move(x);
+    EXPECT_TRUE(x);
+    EXPECT_TRUE(x.has_value());
+    EXPECT_TRUE(y);
+    EXPECT_TRUE(y.has_value());
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "2:17. move constructor (from 1:17)", "---",
+        "2:17. destructor", "1:17. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToEmptyFromEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x, y;
+    x = y;
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToFullFromEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Optional<Logger> y;
+    log->push_back("---");
+    x = y;
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "1:17. destructor", "---"),
+      *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToFullFromNullopt) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    log->push_back("---");
+    x = nullopt;
+    log->push_back("---");
+    EXPECT_FALSE(x);
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "1:17. destructor", "---"),
+      *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToFullFromEmptyBraces) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    log->push_back("---");
+    x = {};
+    log->push_back("---");
+    EXPECT_FALSE(x);
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "1:17. destructor", "---"),
+      *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToEmptyFromFull) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Optional<Logger> y(Logger(17));
+    log->push_back("---");
+    x = y;
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "2:17. copy constructor (from 1:17)", "---",
+        "1:17. destructor", "2:17. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToFullFromFull) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Optional<Logger> y(Logger(42));
+    log->push_back("---");
+    x = y;
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "2:42. explicit constructor",
+        "3:42. move constructor (from 2:42)", "2:42. destructor", "---",
+        "1:42. operator= copy (from 3:42)", "---", "3:42. destructor",
+        "1:42. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToEmptyFromT) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Logger y(17);
+    log->push_back("---");
+    x = Optional<Logger>(y);
+    log->push_back("---");
+  }
+  EXPECT_EQ(V("0:17. explicit constructor", "---",
+              "1:17. copy constructor (from 0:17)",
+              "2:17. move constructor (from 1:17)", "1:17. destructor", "---",
+              "0:17. destructor", "2:17. destructor"),
+            *log);
+}
+
+TEST(OptionalTest, TestCopyAssignToFullFromT) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Logger y(42);
+    log->push_back("---");
+    x = Optional<Logger>(y);
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "2:42. explicit constructor", "---",
+        "3:42. copy constructor (from 2:42)",
+        "1:42. operator= move (from 3:42)", "3:42. destructor", "---",
+        "2:42. destructor", "1:42. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestMoveAssignToEmptyFromEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x, y;
+    x = std::move(y);
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestMoveAssignToFullFromEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Optional<Logger> y;
+    log->push_back("---");
+    x = std::move(y);
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "1:17. destructor", "---"),
+      *log);
+}
+
+TEST(OptionalTest, TestMoveAssignToEmptyFromFull) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Optional<Logger> y(Logger(17));
+    log->push_back("---");
+    x = std::move(y);
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "2:17. move constructor (from 1:17)", "---",
+        "1:17. destructor", "2:17. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestMoveAssignToFullFromFull) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Optional<Logger> y(Logger(42));
+    log->push_back("---");
+    x = std::move(y);
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "2:42. explicit constructor",
+        "3:42. move constructor (from 2:42)", "2:42. destructor", "---",
+        "1:42. operator= move (from 3:42)", "---", "3:42. destructor",
+        "1:42. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestMoveAssignToEmptyFromT) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Logger y(17);
+    log->push_back("---");
+    x = Optional<Logger>(std::move(y));
+    log->push_back("---");
+  }
+  EXPECT_EQ(V("0:17. explicit constructor", "---",
+              "1:17. move constructor (from 0:17)",
+              "2:17. move constructor (from 1:17)", "1:17. destructor", "---",
+              "0:17. destructor", "2:17. destructor"),
+            *log);
+}
+
+TEST(OptionalTest, TestMoveAssignToFullFromT) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Logger y(42);
+    log->push_back("---");
+    x = Optional<Logger>(std::move(y));
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "2:42. explicit constructor", "---",
+        "3:42. move constructor (from 2:42)",
+        "1:42. operator= move (from 3:42)", "3:42. destructor", "---",
+        "2:42. destructor", "1:42. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestResetEmpty) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    x.reset();
+  }
+  EXPECT_EQ(V(), *log);
+}
+
+TEST(OptionalTest, TestResetFull) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    log->push_back("---");
+    x.reset();
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:17. move constructor (from 0:17)",
+        "0:17. destructor", "---", "1:17. destructor", "---"),
+      *log);
+}
+
+TEST(OptionalTest, TestEmplaceEmptyWithExplicit) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    log->push_back("---");
+    x.emplace(42);
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("---",
+              "0:42. explicit constructor",
+              "---",
+              "0:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceEmptyWithMultipleParameters) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Logger ref(21);
+    Logger value(35);
+    log->push_back("---");
+    x.emplace(42, ref, std::move(value));
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:21. explicit constructor",
+              "1:35. explicit constructor",
+              "---",
+              "2:35. move constructor (from 1:35)",
+              "3:42. multi parameter constructor",
+              "2:35. destructor",
+              "---",
+              "1:35. destructor",
+              "0:21. destructor",
+              "3:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceEmptyWithCopy) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Logger y(42);
+    log->push_back("---");
+    x.emplace(y);
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:42. explicit constructor",
+              "---",
+              "1:42. copy constructor (from 0:42)",
+              "---",
+              "0:42. destructor",
+              "1:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceEmptyWithMove) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x;
+    Logger y(42);
+    log->push_back("---");
+    x.emplace(std::move(y));
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:42. explicit constructor",
+              "---",
+              "1:42. move constructor (from 0:42)",
+              "---",
+              "0:42. destructor",
+              "1:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceFullWithExplicit) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    log->push_back("---");
+    x.emplace(42);
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(
+      V("0:17. explicit constructor",
+        "1:17. move constructor (from 0:17)",
+        "0:17. destructor",
+        "---",
+        "1:17. destructor",
+        "2:42. explicit constructor",
+        "---",
+        "2:42. destructor"),
+      *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceFullWithMultipleParameters) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Logger ref(21);
+    Logger value(35);
+    log->push_back("---");
+    x.emplace(42, ref, std::move(value));
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:17. explicit constructor",
+              "1:17. move constructor (from 0:17)",
+              "0:17. destructor",
+              "2:21. explicit constructor",
+              "3:35. explicit constructor",
+              "---",
+              "1:17. destructor",
+              "4:35. move constructor (from 3:35)",
+              "5:42. multi parameter constructor",
+              "4:35. destructor",
+              "---",
+              "3:35. destructor",
+              "2:21. destructor",
+              "5:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceFullWithCopy) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Logger y(42);
+    log->push_back("---");
+    x.emplace(y);
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:17. explicit constructor",
+              "1:17. move constructor (from 0:17)",
+              "0:17. destructor",
+              "2:42. explicit constructor",
+              "---",
+              "1:17. destructor",
+              "3:42. copy constructor (from 2:42)",
+              "---",
+              "2:42. destructor",
+              "3:42. destructor"),
+           *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEmplaceFullWithMove) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(17));
+    Logger y(42);
+    log->push_back("---");
+    x.emplace(std::move(y));
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:17. explicit constructor",
+              "1:17. move constructor (from 0:17)",
+              "0:17. destructor",
+              "2:42. explicit constructor",
+              "---",
+              "1:17. destructor",
+              "3:42. move constructor (from 2:42)",
+              "---",
+              "2:42. destructor",
+              "3:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestDereference) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(42));
+    const auto& y = x;
+    log->push_back("---");
+    x->Foo();
+    y->Foo();
+    std::move(x)->Foo();
+    std::move(y)->Foo();
+    log->push_back("---");
+    (*x).Foo();
+    (*y).Foo();
+    (*std::move(x)).Foo();
+    (*std::move(y)).Foo();
+    log->push_back("---");
+    x.value().Foo();
+    y.value().Foo();
+    std::move(x).value().Foo();
+    std::move(y).value().Foo();
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:42. explicit constructor",
+              "1:42. move constructor (from 0:42)",
+              "0:42. destructor",
+              "---",
+              "1:42. Foo()",
+              "1:42. Foo() const",
+              "1:42. Foo()",
+              "1:42. Foo() const",
+              "---",
+              "1:42. Foo()",
+              "1:42. Foo() const",
+              "1:42. Foo()",
+              "1:42. Foo() const",
+              "---",
+              "1:42. Foo()",
+              "1:42. Foo() const",
+              "1:42. Foo()",
+              "1:42. Foo() const",
+              "---",
+              "1:42. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestDereferenceWithDefault) {
+  auto log = Logger::Setup();
+  {
+    const Logger a(17), b(42);
+    Optional<Logger> x(a);
+    Optional<Logger> y;
+    log->push_back("-1-");
+    EXPECT_EQ(a, x.value_or(Logger(42)));
+    log->push_back("-2-");
+    EXPECT_EQ(b, y.value_or(Logger(42)));
+    log->push_back("-3-");
+    EXPECT_EQ(a, Optional<Logger>(Logger(17)).value_or(b));
+    log->push_back("-4-");
+    EXPECT_EQ(b, Optional<Logger>().value_or(b));
+    log->push_back("-5-");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:42. explicit constructor",
+        "2:17. copy constructor (from 0:17)", "-1-",
+        "3:42. explicit constructor", "operator== 0:17, 2:17",
+        "3:42. destructor", "-2-", "4:42. explicit constructor",
+        "operator== 1:42, 4:42", "4:42. destructor", "-3-",
+        "5:17. explicit constructor", "6:17. move constructor (from 5:17)",
+        "operator== 0:17, 6:17", "6:17. destructor", "5:17. destructor", "-4-",
+        "operator== 1:42, 1:42", "-5-", "2:17. destructor", "1:42. destructor",
+        "0:17. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestEquality) {
+  auto log = Logger::Setup();
+  {
+    Logger a(17), b(42);
+    Optional<Logger> ma1(a), ma2(a), mb(b), me1, me2;
+    log->push_back("---");
+    EXPECT_EQ(ma1, ma1);
+    EXPECT_EQ(ma1, ma2);
+    EXPECT_NE(ma1, mb);
+    EXPECT_NE(ma1, me1);
+    EXPECT_EQ(me1, me1);
+    EXPECT_EQ(me1, me2);
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:17. explicit constructor", "1:42. explicit constructor",
+        "2:17. copy constructor (from 0:17)",
+        "3:17. copy constructor (from 0:17)",
+        "4:42. copy constructor (from 1:42)", "---", "operator== 2:17, 2:17",
+        "operator== 2:17, 3:17", "operator!= 2:17, 4:42", "---",
+        "4:42. destructor", "3:17. destructor", "2:17. destructor",
+        "1:42. destructor", "0:17. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestEqualityWithNullopt) {
+  auto log = Logger::Setup();
+  {
+    Logger a(17);
+    Optional<Logger> ma(a), me;
+    // Using operator== and operator!= explicitly instead of EXPECT_EQ/EXPECT_NE
+    // macros because those operators are under test.
+    log->push_back("---");
+
+    EXPECT_FALSE(ma == nullopt);
+    EXPECT_FALSE(nullopt == ma);
+    EXPECT_TRUE(me == nullopt);
+    EXPECT_TRUE(nullopt == me);
+
+    EXPECT_TRUE(ma != nullopt);
+    EXPECT_TRUE(nullopt != ma);
+    EXPECT_FALSE(me != nullopt);
+    EXPECT_FALSE(nullopt != me);
+
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:17. explicit constructor",
+              "1:17. copy constructor (from 0:17)",
+              "---",
+              // No operators should be called when comparing to empty.
+              "---",
+              "1:17. destructor",
+              "0:17. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestEqualityWithObject) {
+  auto log = Logger::Setup();
+  {
+    Logger a(17), b(42);
+    Optional<Logger> ma(a), me;
+    // Using operator== and operator!= explicitly instead of EXPECT_EQ/EXPECT_NE
+    // macros because those operators are under test.
+    log->push_back("---");
+
+    EXPECT_TRUE(ma == a);
+    EXPECT_TRUE(a == ma);
+    EXPECT_FALSE(ma == b);
+    EXPECT_FALSE(b == ma);
+    EXPECT_FALSE(me == a);
+    EXPECT_FALSE(a == me);
+
+    EXPECT_FALSE(ma != a);
+    EXPECT_FALSE(a != ma);
+    EXPECT_TRUE(ma != b);
+    EXPECT_TRUE(b != ma);
+    EXPECT_TRUE(me != a);
+    EXPECT_TRUE(a != me);
+
+    log->push_back("---");
+  }
+  // clang-format off
+  EXPECT_EQ(V("0:17. explicit constructor",
+              "1:42. explicit constructor",
+              "2:17. copy constructor (from 0:17)",
+              "---",
+              "operator== 2:17, 0:17",
+              "operator== 0:17, 2:17",
+              "operator== 2:17, 1:42",
+              "operator== 1:42, 2:17",
+              // No operator should be called when comparing to empty.
+              "operator!= 2:17, 0:17",
+              "operator!= 0:17, 2:17",
+              "operator!= 2:17, 1:42",
+              "operator!= 1:42, 2:17",
+              // No operator should be called when comparing to empty.
+              "---",
+              "2:17. destructor",
+              "1:42. destructor",
+              "0:17. destructor"),
+            *log);
+  // clang-format on
+}
+
+TEST(OptionalTest, TestSwap) {
+  auto log = Logger::Setup();
+  {
+    Logger a(17), b(42);
+    Optional<Logger> x1(a), x2(b), y1(a), y2, z1, z2;
+    log->push_back("---");
+    swap(x1, x2);  // Swap full <-> full.
+    swap(y1, y2);  // Swap full <-> empty.
+    swap(z1, z2);  // Swap empty <-> empty.
+    log->push_back("---");
+  }
+  EXPECT_EQ(V("0:17. explicit constructor", "1:42. explicit constructor",
+              "2:17. copy constructor (from 0:17)",
+              "3:42. copy constructor (from 1:42)",
+              "4:17. copy constructor (from 0:17)", "---", "swap 2:42, 3:17",
+              "5:17. move constructor (from 4:17)", "4:17. destructor", "---",
+              "5:17. destructor", "3:17. destructor", "2:42. destructor",
+              "1:42. destructor", "0:17. destructor"),
+            *log);
+}
+
+TEST(OptionalTest, TestMoveValue) {
+  auto log = Logger::Setup();
+  {
+    Optional<Logger> x(Logger(42));
+    log->push_back("---");
+    Logger moved = x.MoveValue();
+    log->push_back("---");
+  }
+  EXPECT_EQ(
+      V("0:42. explicit constructor", "1:42. move constructor (from 0:42)",
+        "0:42. destructor", "---", "2:42. move constructor (from 1:42)", "---",
+        "2:42. destructor", "1:42. destructor"),
+      *log);
+}
+
+TEST(OptionalTest, TestPrintTo) {
+  constexpr char kEmptyOptionalMessage[] = "<empty optional>";
+  const Optional<MyUnprintableType> empty_unprintable;
+  const Optional<MyPrintableType> empty_printable;
+  const Optional<MyOstreamPrintableType> empty_ostream_printable;
+  EXPECT_EQ(kEmptyOptionalMessage, ::testing::PrintToString(empty_unprintable));
+  EXPECT_EQ(kEmptyOptionalMessage, ::testing::PrintToString(empty_printable));
+  EXPECT_EQ(kEmptyOptionalMessage,
+            ::testing::PrintToString(empty_ostream_printable));
+  EXPECT_NE("1", ::testing::PrintToString(Optional<MyUnprintableType>({1})));
+  EXPECT_NE("1", ::testing::PrintToString(Optional<MyPrintableType>({1})));
+  EXPECT_EQ("The value is 1",
+            ::testing::PrintToString(Optional<MyPrintableType>({1})));
+  EXPECT_EQ("1",
+            ::testing::PrintToString(Optional<MyOstreamPrintableType>({1})));
+}
+
+void UnusedFunctionWorkaround() {
+  // These are here to ensure we don't get warnings about ostream and PrintTo
+  // for MyPrintableType never getting called.
+  const MyPrintableType dont_warn{17};
+  const MyOstreamPrintableType dont_warn2{18};
+  std::stringstream sstr;
+  sstr << dont_warn;
+  PrintTo(dont_warn, &sstr);
+  sstr << dont_warn2;
+}
+
+}  // namespace rtc
diff --git a/api/peerconnectionfactoryproxy.h b/api/peerconnectionfactoryproxy.h
new file mode 100644
index 0000000..7601ed1
--- /dev/null
+++ b/api/peerconnectionfactoryproxy.h
@@ -0,0 +1,73 @@
+/*
+ *  Copyright 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_PEERCONNECTIONFACTORYPROXY_H_
+#define API_PEERCONNECTIONFACTORYPROXY_H_
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "api/peerconnectioninterface.h"
+#include "api/proxy.h"
+#include "rtc_base/bind.h"
+
+namespace webrtc {
+
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+BEGIN_SIGNALING_PROXY_MAP(PeerConnectionFactory)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  // Use the overloads of CreateVideoSource that take raw VideoCapturer
+  // pointers from PeerConnectionFactoryInterface.
+  // TODO(deadbeef): Remove this using statement once those overloads are
+  // removed.
+  using PeerConnectionFactoryInterface::CreateVideoSource;
+  PROXY_METHOD1(void, SetOptions, const Options&)
+  PROXY_METHOD5(rtc::scoped_refptr<PeerConnectionInterface>,
+                CreatePeerConnection,
+                const PeerConnectionInterface::RTCConfiguration&,
+                const MediaConstraintsInterface*,
+                std::unique_ptr<cricket::PortAllocator>,
+                std::unique_ptr<rtc::RTCCertificateGeneratorInterface>,
+                PeerConnectionObserver*);
+  PROXY_METHOD4(rtc::scoped_refptr<PeerConnectionInterface>,
+                CreatePeerConnection,
+                const PeerConnectionInterface::RTCConfiguration&,
+                std::unique_ptr<cricket::PortAllocator>,
+                std::unique_ptr<rtc::RTCCertificateGeneratorInterface>,
+                PeerConnectionObserver*);
+  PROXY_METHOD1(rtc::scoped_refptr<MediaStreamInterface>,
+                CreateLocalMediaStream, const std::string&)
+  PROXY_METHOD1(rtc::scoped_refptr<AudioSourceInterface>,
+                CreateAudioSource, const MediaConstraintsInterface*)
+  PROXY_METHOD1(rtc::scoped_refptr<AudioSourceInterface>,
+                CreateAudioSource,
+                const cricket::AudioOptions&)
+  PROXY_METHOD2(rtc::scoped_refptr<VideoTrackSourceInterface>,
+                CreateVideoSource,
+                std::unique_ptr<cricket::VideoCapturer>,
+                const MediaConstraintsInterface*)
+  PROXY_METHOD1(rtc::scoped_refptr<VideoTrackSourceInterface>,
+                CreateVideoSource,
+                std::unique_ptr<cricket::VideoCapturer>)
+  PROXY_METHOD2(rtc::scoped_refptr<VideoTrackInterface>,
+                CreateVideoTrack,
+                const std::string&,
+                VideoTrackSourceInterface*)
+  PROXY_METHOD2(rtc::scoped_refptr<AudioTrackInterface>,
+                CreateAudioTrack, const std::string&,  AudioSourceInterface*)
+  PROXY_METHOD2(bool, StartAecDump, rtc::PlatformFile, int64_t)
+  PROXY_METHOD0(void, StopAecDump)
+END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_PEERCONNECTIONFACTORYPROXY_H_
diff --git a/api/peerconnectioninterface.h b/api/peerconnectioninterface.h
new file mode 100644
index 0000000..4cb3d65
--- /dev/null
+++ b/api/peerconnectioninterface.h
@@ -0,0 +1,1477 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains the PeerConnection interface as defined in
+// https://w3c.github.io/webrtc-pc/#peer-to-peer-connections
+//
+// The PeerConnectionFactory class provides factory methods to create
+// PeerConnection, MediaStream and MediaStreamTrack objects.
+//
+// The following steps are needed to setup a typical call using WebRTC:
+//
+// 1. Create a PeerConnectionFactoryInterface. Check constructors for more
+// information about input parameters.
+//
+// 2. Create a PeerConnection object. Provide a configuration struct which
+// points to STUN and/or TURN servers used to generate ICE candidates, and
+// provide an object that implements the PeerConnectionObserver interface,
+// which is used to receive callbacks from the PeerConnection.
+//
+// 3. Create local MediaStreamTracks using the PeerConnectionFactory and add
+// them to PeerConnection by calling AddTrack (or legacy method, AddStream).
+//
+// 4. Create an offer, call SetLocalDescription with it, serialize it, and send
+// it to the remote peer
+//
+// 5. Once an ICE candidate has been gathered, the PeerConnection will call the
+// observer function OnIceCandidate. The candidates must also be serialized and
+// sent to the remote peer.
+//
+// 6. Once an answer is received from the remote peer, call
+// SetRemoteDescription with the remote answer.
+//
+// 7. Once a remote candidate is received from the remote peer, provide it to
+// the PeerConnection by calling AddIceCandidate.
+//
+// The receiver of a call (assuming the application is "call"-based) can decide
+// to accept or reject the call; this decision will be taken by the application,
+// not the PeerConnection.
+//
+// If the application decides to accept the call, it should:
+//
+// 1. Create PeerConnectionFactoryInterface if it doesn't exist.
+//
+// 2. Create a new PeerConnection.
+//
+// 3. Provide the remote offer to the new PeerConnection object by calling
+// SetRemoteDescription.
+//
+// 4. Generate an answer to the remote offer by calling CreateAnswer and send it
+// back to the remote peer.
+//
+// 5. Provide the local answer to the new PeerConnection by calling
+// SetLocalDescription with the answer.
+//
+// 6. Provide the remote ICE candidates by calling AddIceCandidate.
+//
+// 7. Once a candidate has been gathered, the PeerConnection will call the
+// observer function OnIceCandidate. Send these candidates to the remote peer.
+
+#ifndef API_PEERCONNECTIONINTERFACE_H_
+#define API_PEERCONNECTIONINTERFACE_H_
+
+// TODO(sakal): Remove this define after migration to virtual PeerConnection
+// observer is complete.
+#define VIRTUAL_PEERCONNECTION_OBSERVER_DESTRUCTOR
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/audio_options.h"
+#include "api/call/callfactoryinterface.h"
+#include "api/datachannelinterface.h"
+#include "api/dtmfsenderinterface.h"
+#include "api/fec_controller.h"
+#include "api/jsep.h"
+#include "api/mediastreaminterface.h"
+#include "api/rtcerror.h"
+#include "api/rtceventlogoutput.h"
+#include "api/rtpreceiverinterface.h"
+#include "api/rtpsenderinterface.h"
+#include "api/rtptransceiverinterface.h"
+#include "api/setremotedescriptionobserverinterface.h"
+#include "api/stats/rtcstatscollectorcallback.h"
+#include "api/statstypes.h"
+#include "api/turncustomizer.h"
+#include "api/umametrics.h"
+#include "logging/rtc_event_log/rtc_event_log_factory_interface.h"
+#include "media/base/mediaconfig.h"
+// TODO(bugs.webrtc.org/6353): cricket::VideoCapturer is deprecated and should
+// be deleted from the PeerConnection api.
+#include "media/base/videocapturer.h"  // nogncheck
+// TODO(bugs.webrtc.org/7447): We plan to provide a way to let applications
+// inject a PacketSocketFactory and/or NetworkManager, and not expose
+// PortAllocator in the PeerConnection api.
+#include "p2p/base/portallocator.h"  // nogncheck
+// TODO(nisse): The interface for bitrate allocation strategy belongs in api/.
+#include "rtc_base/bitrateallocationstrategy.h"
+#include "rtc_base/network.h"
+#include "rtc_base/platform_file.h"
+#include "rtc_base/rtccertificate.h"
+#include "rtc_base/rtccertificategenerator.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/sslstreamadapter.h"
+
+namespace rtc {
+class SSLIdentity;
+class Thread;
+}
+
+namespace cricket {
+class MediaEngineInterface;
+class WebRtcVideoDecoderFactory;
+class WebRtcVideoEncoderFactory;
+}
+
+namespace webrtc {
+class AudioDeviceModule;
+class AudioMixer;
+class AudioProcessing;
+class MediaConstraintsInterface;
+class VideoDecoderFactory;
+class VideoEncoderFactory;
+
+// MediaStream container interface.
+class StreamCollectionInterface : public rtc::RefCountInterface {
+ public:
+  // TODO(ronghuawu): Update the function names to c++ style, e.g. find -> Find.
+  virtual size_t count() = 0;
+  virtual MediaStreamInterface* at(size_t index) = 0;
+  virtual MediaStreamInterface* find(const std::string& label) = 0;
+  virtual MediaStreamTrackInterface* FindAudioTrack(
+      const std::string& id) = 0;
+  virtual MediaStreamTrackInterface* FindVideoTrack(
+      const std::string& id) = 0;
+
+ protected:
+  // Dtor protected as objects shouldn't be deleted via this interface.
+  ~StreamCollectionInterface() {}
+};
+
+class StatsObserver : public rtc::RefCountInterface {
+ public:
+  virtual void OnComplete(const StatsReports& reports) = 0;
+
+ protected:
+  virtual ~StatsObserver() {}
+};
+
+// For now, kDefault is interpreted as kPlanB.
+// TODO(bugs.webrtc.org/8530): Switch default to kUnifiedPlan.
+enum class SdpSemantics { kDefault, kPlanB, kUnifiedPlan };
+
+class PeerConnectionInterface : public rtc::RefCountInterface {
+ public:
+  // See https://w3c.github.io/webrtc-pc/#state-definitions
+  enum SignalingState {
+    kStable,
+    kHaveLocalOffer,
+    kHaveLocalPrAnswer,
+    kHaveRemoteOffer,
+    kHaveRemotePrAnswer,
+    kClosed,
+  };
+
+  enum IceGatheringState {
+    kIceGatheringNew,
+    kIceGatheringGathering,
+    kIceGatheringComplete
+  };
+
+  enum IceConnectionState {
+    kIceConnectionNew,
+    kIceConnectionChecking,
+    kIceConnectionConnected,
+    kIceConnectionCompleted,
+    kIceConnectionFailed,
+    kIceConnectionDisconnected,
+    kIceConnectionClosed,
+    kIceConnectionMax,
+  };
+
+  // TLS certificate policy.
+  enum TlsCertPolicy {
+    // For TLS based protocols, ensure the connection is secure by not
+    // circumventing certificate validation.
+    kTlsCertPolicySecure,
+    // For TLS based protocols, disregard security completely by skipping
+    // certificate validation. This is insecure and should never be used unless
+    // security is irrelevant in that particular context.
+    kTlsCertPolicyInsecureNoCheck,
+  };
+
+  struct IceServer {
+    // TODO(jbauch): Remove uri when all code using it has switched to urls.
+    // List of URIs associated with this server. Valid formats are described
+    // in RFC7064 and RFC7065, and more may be added in the future. The "host"
+    // part of the URI may contain either an IP address or a hostname.
+    std::string uri;
+    std::vector<std::string> urls;
+    std::string username;
+    std::string password;
+    TlsCertPolicy tls_cert_policy = kTlsCertPolicySecure;
+    // If the URIs in |urls| only contain IP addresses, this field can be used
+    // to indicate the hostname, which may be necessary for TLS (using the SNI
+    // extension). If |urls| itself contains the hostname, this isn't
+    // necessary.
+    std::string hostname;
+    // List of protocols to be used in the TLS ALPN extension.
+    std::vector<std::string> tls_alpn_protocols;
+    // List of elliptic curves to be used in the TLS elliptic curves extension.
+    std::vector<std::string> tls_elliptic_curves;
+
+    bool operator==(const IceServer& o) const {
+      return uri == o.uri && urls == o.urls && username == o.username &&
+             password == o.password && tls_cert_policy == o.tls_cert_policy &&
+             hostname == o.hostname &&
+             tls_alpn_protocols == o.tls_alpn_protocols &&
+             tls_elliptic_curves == o.tls_elliptic_curves;
+    }
+    bool operator!=(const IceServer& o) const { return !(*this == o); }
+  };
+  typedef std::vector<IceServer> IceServers;
+
+  enum IceTransportsType {
+    // TODO(pthatcher): Rename these kTransporTypeXXX, but update
+    // Chromium at the same time.
+    kNone,
+    kRelay,
+    kNoHost,
+    kAll
+  };
+
+  // https://tools.ietf.org/html/draft-ietf-rtcweb-jsep-24#section-4.1.1
+  enum BundlePolicy {
+    kBundlePolicyBalanced,
+    kBundlePolicyMaxBundle,
+    kBundlePolicyMaxCompat
+  };
+
+  // https://tools.ietf.org/html/draft-ietf-rtcweb-jsep-24#section-4.1.1
+  enum RtcpMuxPolicy {
+    kRtcpMuxPolicyNegotiate,
+    kRtcpMuxPolicyRequire,
+  };
+
+  enum TcpCandidatePolicy {
+    kTcpCandidatePolicyEnabled,
+    kTcpCandidatePolicyDisabled
+  };
+
+  enum CandidateNetworkPolicy {
+    kCandidateNetworkPolicyAll,
+    kCandidateNetworkPolicyLowCost
+  };
+
+  enum ContinualGatheringPolicy {
+    GATHER_ONCE,
+    GATHER_CONTINUALLY
+  };
+
+  enum class RTCConfigurationType {
+    // A configuration that is safer to use, despite not having the best
+    // performance. Currently this is the default configuration.
+    kSafe,
+    // An aggressive configuration that has better performance, although it
+    // may be riskier and may need extra support in the application.
+    kAggressive
+  };
+
+  // TODO(hbos): Change into class with private data and public getters.
+  // TODO(nisse): In particular, accessing fields directly from an
+  // application is brittle, since the organization mirrors the
+  // organization of the implementation, which isn't stable. So we
+  // need getters and setters at least for fields which applications
+  // are interested in.
+  struct RTCConfiguration {
+    // This struct is subject to reorganization, both for naming
+    // consistency, and to group settings to match where they are used
+    // in the implementation. To do that, we need getter and setter
+    // methods for all settings which are of interest to applications,
+    // Chrome in particular.
+
+    RTCConfiguration() = default;
+    explicit RTCConfiguration(RTCConfigurationType type) {
+      if (type == RTCConfigurationType::kAggressive) {
+        // These parameters are also defined in Java and IOS configurations,
+        // so their values may be overwritten by the Java or IOS configuration.
+        bundle_policy = kBundlePolicyMaxBundle;
+        rtcp_mux_policy = kRtcpMuxPolicyRequire;
+        ice_connection_receiving_timeout =
+            kAggressiveIceConnectionReceivingTimeout;
+
+        // These parameters are not defined in Java or IOS configuration,
+        // so their values will not be overwritten.
+        enable_ice_renomination = true;
+        redetermine_role_on_ice_restart = false;
+      }
+    }
+
+    bool operator==(const RTCConfiguration& o) const;
+    bool operator!=(const RTCConfiguration& o) const;
+
+    bool dscp() const { return media_config.enable_dscp; }
+    void set_dscp(bool enable) { media_config.enable_dscp = enable; }
+
+    bool cpu_adaptation() const {
+      return media_config.video.enable_cpu_adaptation;
+    }
+    void set_cpu_adaptation(bool enable) {
+      media_config.video.enable_cpu_adaptation = enable;
+    }
+
+    bool suspend_below_min_bitrate() const {
+      return media_config.video.suspend_below_min_bitrate;
+    }
+    void set_suspend_below_min_bitrate(bool enable) {
+      media_config.video.suspend_below_min_bitrate = enable;
+    }
+
+    bool prerenderer_smoothing() const {
+      return media_config.video.enable_prerenderer_smoothing;
+    }
+    void set_prerenderer_smoothing(bool enable) {
+      media_config.video.enable_prerenderer_smoothing = enable;
+    }
+
+    bool experiment_cpu_load_estimator() const {
+      return media_config.video.experiment_cpu_load_estimator;
+    }
+    void set_experiment_cpu_load_estimator(bool enable) {
+      media_config.video.experiment_cpu_load_estimator = enable;
+    }
+    static const int kUndefined = -1;
+    // Default maximum number of packets in the audio jitter buffer.
+    static const int kAudioJitterBufferMaxPackets = 50;
+    // ICE connection receiving timeout for aggressive configuration.
+    static const int kAggressiveIceConnectionReceivingTimeout = 1000;
+
+    ////////////////////////////////////////////////////////////////////////
+    // The below few fields mirror the standard RTCConfiguration dictionary:
+    // https://w3c.github.io/webrtc-pc/#rtcconfiguration-dictionary
+    ////////////////////////////////////////////////////////////////////////
+
+    // TODO(pthatcher): Rename this ice_servers, but update Chromium
+    // at the same time.
+    IceServers servers;
+    // TODO(pthatcher): Rename this ice_transport_type, but update
+    // Chromium at the same time.
+    IceTransportsType type = kAll;
+    BundlePolicy bundle_policy = kBundlePolicyBalanced;
+    RtcpMuxPolicy rtcp_mux_policy = kRtcpMuxPolicyRequire;
+    std::vector<rtc::scoped_refptr<rtc::RTCCertificate>> certificates;
+    int ice_candidate_pool_size = 0;
+
+    //////////////////////////////////////////////////////////////////////////
+    // The below fields correspond to constraints from the deprecated
+    // constraints interface for constructing a PeerConnection.
+    //
+    // rtc::Optional fields can be "missing", in which case the implementation
+    // default will be used.
+    //////////////////////////////////////////////////////////////////////////
+
+    // If set to true, don't gather IPv6 ICE candidates.
+    // TODO(deadbeef): Remove this? IPv6 support has long stopped being
+    // experimental
+    bool disable_ipv6 = false;
+
+    // If set to true, don't gather IPv6 ICE candidates on Wi-Fi.
+    // Only intended to be used on specific devices. Certain phones disable IPv6
+    // when the screen is turned off and it would be better to just disable the
+    // IPv6 ICE candidates on Wi-Fi in those cases.
+    bool disable_ipv6_on_wifi = false;
+
+    // By default, the PeerConnection will use a limited number of IPv6 network
+    // interfaces, in order to avoid too many ICE candidate pairs being created
+    // and delaying ICE completion.
+    //
+    // Can be set to INT_MAX to effectively disable the limit.
+    int max_ipv6_networks = cricket::kDefaultMaxIPv6Networks;
+
+    // Exclude link-local network interfaces
+    // from considertaion for gathering ICE candidates.
+    bool disable_link_local_networks = false;
+
+    // If set to true, use RTP data channels instead of SCTP.
+    // TODO(deadbeef): Remove this. We no longer commit to supporting RTP data
+    // channels, though some applications are still working on moving off of
+    // them.
+    bool enable_rtp_data_channel = false;
+
+    // Minimum bitrate at which screencast video tracks will be encoded at.
+    // This means adding padding bits up to this bitrate, which can help
+    // when switching from a static scene to one with motion.
+    rtc::Optional<int> screencast_min_bitrate;
+
+    // Use new combined audio/video bandwidth estimation?
+    rtc::Optional<bool> combined_audio_video_bwe;
+
+    // Can be used to disable DTLS-SRTP. This should never be done, but can be
+    // useful for testing purposes, for example in setting up a loopback call
+    // with a single PeerConnection.
+    rtc::Optional<bool> enable_dtls_srtp;
+
+    /////////////////////////////////////////////////
+    // The below fields are not part of the standard.
+    /////////////////////////////////////////////////
+
+    // Can be used to disable TCP candidate generation.
+    TcpCandidatePolicy tcp_candidate_policy = kTcpCandidatePolicyEnabled;
+
+    // Can be used to avoid gathering candidates for a "higher cost" network,
+    // if a lower cost one exists. For example, if both Wi-Fi and cellular
+    // interfaces are available, this could be used to avoid using the cellular
+    // interface.
+    CandidateNetworkPolicy candidate_network_policy =
+        kCandidateNetworkPolicyAll;
+
+    // The maximum number of packets that can be stored in the NetEq audio
+    // jitter buffer. Can be reduced to lower tolerated audio latency.
+    int audio_jitter_buffer_max_packets = kAudioJitterBufferMaxPackets;
+
+    // Whether to use the NetEq "fast mode" which will accelerate audio quicker
+    // if it falls behind.
+    bool audio_jitter_buffer_fast_accelerate = false;
+
+    // Timeout in milliseconds before an ICE candidate pair is considered to be
+    // "not receiving", after which a lower priority candidate pair may be
+    // selected.
+    int ice_connection_receiving_timeout = kUndefined;
+
+    // Interval in milliseconds at which an ICE "backup" candidate pair will be
+    // pinged. This is a candidate pair which is not actively in use, but may
+    // be switched to if the active candidate pair becomes unusable.
+    //
+    // This is relevant mainly to Wi-Fi/cell handoff; the application may not
+    // want this backup cellular candidate pair pinged frequently, since it
+    // consumes data/battery.
+    int ice_backup_candidate_pair_ping_interval = kUndefined;
+
+    // Can be used to enable continual gathering, which means new candidates
+    // will be gathered as network interfaces change. Note that if continual
+    // gathering is used, the candidate removal API should also be used, to
+    // avoid an ever-growing list of candidates.
+    ContinualGatheringPolicy continual_gathering_policy = GATHER_ONCE;
+
+    // If set to true, candidate pairs will be pinged in order of most likely
+    // to work (which means using a TURN server, generally), rather than in
+    // standard priority order.
+    bool prioritize_most_likely_ice_candidate_pairs = false;
+
+    // Implementation defined settings. A public member only for the benefit of
+    // the implementation. Applications must not access it directly, and should
+    // instead use provided accessor methods, e.g., set_cpu_adaptation.
+    struct cricket::MediaConfig media_config;
+
+    // If set to true, only one preferred TURN allocation will be used per
+    // network interface. UDP is preferred over TCP and IPv6 over IPv4. This
+    // can be used to cut down on the number of candidate pairings.
+    bool prune_turn_ports = false;
+
+    // If set to true, this means the ICE transport should presume TURN-to-TURN
+    // candidate pairs will succeed, even before a binding response is received.
+    // This can be used to optimize the initial connection time, since the DTLS
+    // handshake can begin immediately.
+    bool presume_writable_when_fully_relayed = false;
+
+    // If true, "renomination" will be added to the ice options in the transport
+    // description.
+    // See: https://tools.ietf.org/html/draft-thatcher-ice-renomination-00
+    bool enable_ice_renomination = false;
+
+    // If true, the ICE role is re-determined when the PeerConnection sets a
+    // local transport description that indicates an ICE restart.
+    //
+    // This is standard RFC5245 ICE behavior, but causes unnecessary role
+    // thrashing, so an application may wish to avoid it. This role
+    // re-determining was removed in ICEbis (ICE v2).
+    bool redetermine_role_on_ice_restart = true;
+
+    // The following fields define intervals in milliseconds at which ICE
+    // connectivity checks are sent.
+    //
+    // We consider ICE is "strongly connected" for an agent when there is at
+    // least one candidate pair that currently succeeds in connectivity check
+    // from its direction i.e. sending a STUN ping and receives a STUN ping
+    // response, AND all candidate pairs have sent a minimum number of pings for
+    // connectivity (this number is implementation-specific). Otherwise, ICE is
+    // considered in "weak connectivity".
+    //
+    // Note that the above notion of strong and weak connectivity is not defined
+    // in RFC 5245, and they apply to our current ICE implementation only.
+    //
+    // 1) ice_check_interval_strong_connectivity defines the interval applied to
+    // ALL candidate pairs when ICE is strongly connected, and it overrides the
+    // default value of this interval in the ICE implementation;
+    // 2) ice_check_interval_weak_connectivity defines the counterpart for ALL
+    // pairs when ICE is weakly connected, and it overrides the default value of
+    // this interval in the ICE implementation;
+    // 3) ice_check_min_interval defines the minimal interval (equivalently the
+    // maximum rate) that overrides the above two intervals when either of them
+    // is less.
+    rtc::Optional<int> ice_check_interval_strong_connectivity;
+    rtc::Optional<int> ice_check_interval_weak_connectivity;
+    rtc::Optional<int> ice_check_min_interval;
+
+    // The min time period for which a candidate pair must wait for response to
+    // connectivity checks before it becomes unwritable. This parameter
+    // overrides the default value in the ICE implementation if set.
+    rtc::Optional<int> ice_unwritable_timeout;
+
+    // The min number of connectivity checks that a candidate pair must sent
+    // without receiving response before it becomes unwritable. This parameter
+    // overrides the default value in the ICE implementation if set.
+    rtc::Optional<int> ice_unwritable_min_checks;
+
+    // The interval in milliseconds at which STUN candidates will resend STUN
+    // binding requests to keep NAT bindings open.
+    rtc::Optional<int> stun_candidate_keepalive_interval;
+
+    // ICE Periodic Regathering
+    // If set, WebRTC will periodically create and propose candidates without
+    // starting a new ICE generation. The regathering happens continuously with
+    // interval specified in milliseconds by the uniform distribution [a, b].
+    rtc::Optional<rtc::IntervalRange> ice_regather_interval_range;
+
+    // Optional TurnCustomizer.
+    // With this class one can modify outgoing TURN messages.
+    // The object passed in must remain valid until PeerConnection::Close() is
+    // called.
+    webrtc::TurnCustomizer* turn_customizer = nullptr;
+
+    // Preferred network interface.
+    // A candidate pair on a preferred network has a higher precedence in ICE
+    // than one on an un-preferred network, regardless of priority or network
+    // cost.
+    rtc::Optional<rtc::AdapterType> network_preference;
+
+    // Configure the SDP semantics used by this PeerConnection. Note that the
+    // WebRTC 1.0 specification requires kUnifiedPlan semantics. The
+    // RtpTransceiver API is only available with kUnifiedPlan semantics.
+    //
+    // kPlanB will cause PeerConnection to create offers and answers with at
+    // most one audio and one video m= section with multiple RtpSenders and
+    // RtpReceivers specified as multiple a=ssrc lines within the section. This
+    // will also cause PeerConnection to ignore all but the first m= section of
+    // the same media type.
+    //
+    // kUnifiedPlan will cause PeerConnection to create offers and answers with
+    // multiple m= sections where each m= section maps to one RtpSender and one
+    // RtpReceiver (an RtpTransceiver), either both audio or both video. This
+    // will also cause PeerConnection to ignore all but the first a=ssrc lines
+    // that form a Plan B stream.
+    //
+    // For users who only send at most one audio and one video track, this
+    // choice does not matter and should be left as kDefault.
+    //
+    // For users who wish to send multiple audio/video streams and need to stay
+    // interoperable with legacy WebRTC implementations, specify kPlanB.
+    //
+    // For users who wish to send multiple audio/video streams and/or wish to
+    // use the new RtpTransceiver API, specify kUnifiedPlan.
+    SdpSemantics sdp_semantics = SdpSemantics::kDefault;
+
+    //
+    // Don't forget to update operator== if adding something.
+    //
+  };
+
+  // See: https://www.w3.org/TR/webrtc/#idl-def-rtcofferansweroptions
+  struct RTCOfferAnswerOptions {
+    static const int kUndefined = -1;
+    static const int kMaxOfferToReceiveMedia = 1;
+
+    // The default value for constraint offerToReceiveX:true.
+    static const int kOfferToReceiveMediaTrue = 1;
+
+    // These options are left as backwards compatibility for clients who need
+    // "Plan B" semantics. Clients who have switched to "Unified Plan" semantics
+    // should use the RtpTransceiver API (AddTransceiver) instead.
+    //
+    // offer_to_receive_X set to 1 will cause a media description to be
+    // generated in the offer, even if no tracks of that type have been added.
+    // Values greater than 1 are treated the same.
+    //
+    // If set to 0, the generated directional attribute will not include the
+    // "recv" direction (meaning it will be "sendonly" or "inactive".
+    int offer_to_receive_video = kUndefined;
+    int offer_to_receive_audio = kUndefined;
+
+    bool voice_activity_detection = true;
+    bool ice_restart = false;
+
+    // If true, will offer to BUNDLE audio/video/data together. Not to be
+    // confused with RTCP mux (multiplexing RTP and RTCP together).
+    bool use_rtp_mux = true;
+
+    RTCOfferAnswerOptions() = default;
+
+    RTCOfferAnswerOptions(int offer_to_receive_video,
+                          int offer_to_receive_audio,
+                          bool voice_activity_detection,
+                          bool ice_restart,
+                          bool use_rtp_mux)
+        : offer_to_receive_video(offer_to_receive_video),
+          offer_to_receive_audio(offer_to_receive_audio),
+          voice_activity_detection(voice_activity_detection),
+          ice_restart(ice_restart),
+          use_rtp_mux(use_rtp_mux) {}
+  };
+
+  // Used by GetStats to decide which stats to include in the stats reports.
+  // |kStatsOutputLevelStandard| includes the standard stats for Javascript API;
+  // |kStatsOutputLevelDebug| includes both the standard stats and additional
+  // stats for debugging purposes.
+  enum StatsOutputLevel {
+    kStatsOutputLevelStandard,
+    kStatsOutputLevelDebug,
+  };
+
+  // Accessor methods to active local streams.
+  // This method is not supported with kUnifiedPlan semantics. Please use
+  // GetSenders() instead.
+  virtual rtc::scoped_refptr<StreamCollectionInterface>
+      local_streams() = 0;
+
+  // Accessor methods to remote streams.
+  // This method is not supported with kUnifiedPlan semantics. Please use
+  // GetReceivers() instead.
+  virtual rtc::scoped_refptr<StreamCollectionInterface>
+      remote_streams() = 0;
+
+  // Add a new MediaStream to be sent on this PeerConnection.
+  // Note that a SessionDescription negotiation is needed before the
+  // remote peer can receive the stream.
+  //
+  // This has been removed from the standard in favor of a track-based API. So,
+  // this is equivalent to simply calling AddTrack for each track within the
+  // stream, with the one difference that if "stream->AddTrack(...)" is called
+  // later, the PeerConnection will automatically pick up the new track. Though
+  // this functionality will be deprecated in the future.
+  //
+  // This method is not supported with kUnifiedPlan semantics. Please use
+  // AddTrack instead.
+  virtual bool AddStream(MediaStreamInterface* stream) = 0;
+
+  // Remove a MediaStream from this PeerConnection.
+  // Note that a SessionDescription negotiation is needed before the
+  // remote peer is notified.
+  //
+  // This method is not supported with kUnifiedPlan semantics. Please use
+  // RemoveTrack instead.
+  virtual void RemoveStream(MediaStreamInterface* stream) = 0;
+
+  // Add a new MediaStreamTrack to be sent on this PeerConnection, and return
+  // the newly created RtpSender. The RtpSender will be associated with the
+  // streams specified in the |stream_ids| list.
+  //
+  // Errors:
+  // - INVALID_PARAMETER: |track| is null, has a kind other than audio or video,
+  //       or a sender already exists for the track.
+  // - INVALID_STATE: The PeerConnection is closed.
+  // TODO(steveanton): Remove default implementation once downstream
+  // implementations have been updated.
+  virtual RTCErrorOr<rtc::scoped_refptr<RtpSenderInterface>> AddTrack(
+      rtc::scoped_refptr<MediaStreamTrackInterface> track,
+      const std::vector<std::string>& stream_ids) {
+    return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, "Not implemented");
+  }
+  // |streams| indicates which stream ids the track should be associated
+  // with.
+  // TODO(steveanton): Remove this overload once callers have moved to the
+  // signature with stream ids.
+  virtual rtc::scoped_refptr<RtpSenderInterface> AddTrack(
+      MediaStreamTrackInterface* track,
+      std::vector<MediaStreamInterface*> streams) {
+    // Default implementation provided so downstream implementations can remove
+    // this.
+    return nullptr;
+  }
+
+  // Remove an RtpSender from this PeerConnection.
+  // Returns true on success.
+  virtual bool RemoveTrack(RtpSenderInterface* sender) = 0;
+
+  // AddTransceiver creates a new RtpTransceiver and adds it to the set of
+  // transceivers. Adding a transceiver will cause future calls to CreateOffer
+  // to add a media description for the corresponding transceiver.
+  //
+  // The initial value of |mid| in the returned transceiver is null. Setting a
+  // new session description may change it to a non-null value.
+  //
+  // https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver
+  //
+  // Optionally, an RtpTransceiverInit structure can be specified to configure
+  // the transceiver from construction. If not specified, the transceiver will
+  // default to having a direction of kSendRecv and not be part of any streams.
+  //
+  // These methods are only available when Unified Plan is enabled (see
+  // RTCConfiguration).
+  //
+  // Common errors:
+  // - INTERNAL_ERROR: The configuration does not have Unified Plan enabled.
+  // TODO(steveanton): Make these pure virtual once downstream projects have
+  // updated.
+
+  // Adds a transceiver with a sender set to transmit the given track. The kind
+  // of the transceiver (and sender/receiver) will be derived from the kind of
+  // the track.
+  // Errors:
+  // - INVALID_PARAMETER: |track| is null.
+  virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
+  AddTransceiver(rtc::scoped_refptr<MediaStreamTrackInterface> track) {
+    return RTCError(RTCErrorType::INTERNAL_ERROR, "not implemented");
+  }
+  virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
+  AddTransceiver(rtc::scoped_refptr<MediaStreamTrackInterface> track,
+                 const RtpTransceiverInit& init) {
+    return RTCError(RTCErrorType::INTERNAL_ERROR, "not implemented");
+  }
+
+  // Adds a transceiver with the given kind. Can either be MEDIA_TYPE_AUDIO or
+  // MEDIA_TYPE_VIDEO.
+  // Errors:
+  // - INVALID_PARAMETER: |media_type| is not MEDIA_TYPE_AUDIO or
+  //                      MEDIA_TYPE_VIDEO.
+  virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
+  AddTransceiver(cricket::MediaType media_type) {
+    return RTCError(RTCErrorType::INTERNAL_ERROR, "not implemented");
+  }
+  virtual RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>
+  AddTransceiver(cricket::MediaType media_type,
+                 const RtpTransceiverInit& init) {
+    return RTCError(RTCErrorType::INTERNAL_ERROR, "not implemented");
+  }
+
+  // Returns pointer to a DtmfSender on success. Otherwise returns null.
+  //
+  // This API is no longer part of the standard; instead DtmfSenders are
+  // obtained from RtpSenders. Which is what the implementation does; it finds
+  // an RtpSender for |track| and just returns its DtmfSender.
+  virtual rtc::scoped_refptr<DtmfSenderInterface> CreateDtmfSender(
+      AudioTrackInterface* track) = 0;
+
+  // TODO(deadbeef): Make these pure virtual once all subclasses implement them.
+
+  // Creates a sender without a track. Can be used for "early media"/"warmup"
+  // use cases, where the application may want to negotiate video attributes
+  // before a track is available to send.
+  //
+  // The standard way to do this would be through "addTransceiver", but we
+  // don't support that API yet.
+  //
+  // |kind| must be "audio" or "video".
+  //
+  // |stream_id| is used to populate the msid attribute; if empty, one will
+  // be generated automatically.
+  //
+  // This method is not supported with kUnifiedPlan semantics. Please use
+  // AddTransceiver instead.
+  virtual rtc::scoped_refptr<RtpSenderInterface> CreateSender(
+      const std::string& kind,
+      const std::string& stream_id) {
+    return rtc::scoped_refptr<RtpSenderInterface>();
+  }
+
+  // If Plan B semantics are specified, gets all RtpSenders, created either
+  // through AddStream, AddTrack, or CreateSender. All senders of a specific
+  // media type share the same media description.
+  //
+  // If Unified Plan semantics are specified, gets the RtpSender for each
+  // RtpTransceiver.
+  virtual std::vector<rtc::scoped_refptr<RtpSenderInterface>> GetSenders()
+      const {
+    return std::vector<rtc::scoped_refptr<RtpSenderInterface>>();
+  }
+
+  // If Plan B semantics are specified, gets all RtpReceivers created when a
+  // remote description is applied. All receivers of a specific media type share
+  // the same media description. It is also possible to have a media description
+  // with no associated RtpReceivers, if the directional attribute does not
+  // indicate that the remote peer is sending any media.
+  //
+  // If Unified Plan semantics are specified, gets the RtpReceiver for each
+  // RtpTransceiver.
+  virtual std::vector<rtc::scoped_refptr<RtpReceiverInterface>> GetReceivers()
+      const {
+    return std::vector<rtc::scoped_refptr<RtpReceiverInterface>>();
+  }
+
+  // Get all RtpTransceivers, created either through AddTransceiver, AddTrack or
+  // by a remote description applied with SetRemoteDescription.
+  //
+  // Note: This method is only available when Unified Plan is enabled (see
+  // RTCConfiguration).
+  virtual std::vector<rtc::scoped_refptr<RtpTransceiverInterface>>
+  GetTransceivers() const {
+    return {};
+  }
+
+  virtual bool GetStats(StatsObserver* observer,
+                        MediaStreamTrackInterface* track,
+                        StatsOutputLevel level) = 0;
+  // Gets stats using the new stats collection API, see webrtc/api/stats/. These
+  // will replace old stats collection API when the new API has matured enough.
+  // TODO(hbos): Default implementation that does nothing only exists as to not
+  // break third party projects. As soon as they have been updated this should
+  // be changed to "= 0;".
+  virtual void GetStats(RTCStatsCollectorCallback* callback) {}
+  // Clear cached stats in the RTCStatsCollector.
+  // Exposed for testing while waiting for automatic cache clear to work.
+  // https://bugs.webrtc.org/8693
+  virtual void ClearStatsCache() {}
+
+  // Create a data channel with the provided config, or default config if none
+  // is provided. Note that an offer/answer negotiation is still necessary
+  // before the data channel can be used.
+  //
+  // Also, calling CreateDataChannel is the only way to get a data "m=" section
+  // in SDP, so it should be done before CreateOffer is called, if the
+  // application plans to use data channels.
+  virtual rtc::scoped_refptr<DataChannelInterface> CreateDataChannel(
+      const std::string& label,
+      const DataChannelInit* config) = 0;
+
+  // Returns the more recently applied description; "pending" if it exists, and
+  // otherwise "current". See below.
+  virtual const SessionDescriptionInterface* local_description() const = 0;
+  virtual const SessionDescriptionInterface* remote_description() const = 0;
+
+  // A "current" description the one currently negotiated from a complete
+  // offer/answer exchange.
+  virtual const SessionDescriptionInterface* current_local_description() const {
+    return nullptr;
+  }
+  virtual const SessionDescriptionInterface* current_remote_description()
+      const {
+    return nullptr;
+  }
+
+  // A "pending" description is one that's part of an incomplete offer/answer
+  // exchange (thus, either an offer or a pranswer). Once the offer/answer
+  // exchange is finished, the "pending" description will become "current".
+  virtual const SessionDescriptionInterface* pending_local_description() const {
+    return nullptr;
+  }
+  virtual const SessionDescriptionInterface* pending_remote_description()
+      const {
+    return nullptr;
+  }
+
+  // Create a new offer.
+  // The CreateSessionDescriptionObserver callback will be called when done.
+  virtual void CreateOffer(CreateSessionDescriptionObserver* observer,
+                           const MediaConstraintsInterface* constraints) {}
+
+  // TODO(jiayl): remove the default impl and the old interface when chromium
+  // code is updated.
+  virtual void CreateOffer(CreateSessionDescriptionObserver* observer,
+                           const RTCOfferAnswerOptions& options) {}
+
+  // Create an answer to an offer.
+  // The CreateSessionDescriptionObserver callback will be called when done.
+  virtual void CreateAnswer(CreateSessionDescriptionObserver* observer,
+                            const RTCOfferAnswerOptions& options) {}
+  // Deprecated - use version above.
+  // TODO(hta): Remove and remove default implementations when all callers
+  // are updated.
+  virtual void CreateAnswer(CreateSessionDescriptionObserver* observer,
+                            const MediaConstraintsInterface* constraints) {}
+
+  // Sets the local session description.
+  // The PeerConnection takes the ownership of |desc| even if it fails.
+  // The |observer| callback will be called when done.
+  // TODO(deadbeef): Change |desc| to be a unique_ptr, to make it clear
+  // that this method always takes ownership of it.
+  virtual void SetLocalDescription(SetSessionDescriptionObserver* observer,
+                                   SessionDescriptionInterface* desc) = 0;
+  // Sets the remote session description.
+  // The PeerConnection takes the ownership of |desc| even if it fails.
+  // The |observer| callback will be called when done.
+  // TODO(hbos): Remove when Chrome implements the new signature.
+  virtual void SetRemoteDescription(SetSessionDescriptionObserver* observer,
+                                    SessionDescriptionInterface* desc) {}
+  // TODO(hbos): Make pure virtual when Chrome has updated its signature.
+  virtual void SetRemoteDescription(
+      std::unique_ptr<SessionDescriptionInterface> desc,
+      rtc::scoped_refptr<SetRemoteDescriptionObserverInterface> observer) {}
+  // Deprecated; Replaced by SetConfiguration.
+  // TODO(deadbeef): Remove once Chrome is moved over to SetConfiguration.
+  virtual bool UpdateIce(const IceServers& configuration,
+                         const MediaConstraintsInterface* constraints) {
+    return false;
+  }
+  virtual bool UpdateIce(const IceServers& configuration) { return false; }
+
+  // TODO(deadbeef): Make this pure virtual once all Chrome subclasses of
+  // PeerConnectionInterface implement it.
+  virtual PeerConnectionInterface::RTCConfiguration GetConfiguration() {
+    return PeerConnectionInterface::RTCConfiguration();
+  }
+
+  // Sets the PeerConnection's global configuration to |config|.
+  //
+  // The members of |config| that may be changed are |type|, |servers|,
+  // |ice_candidate_pool_size| and |prune_turn_ports| (though the candidate
+  // pool size can't be changed after the first call to SetLocalDescription).
+  // Note that this means the BUNDLE and RTCP-multiplexing policies cannot be
+  // changed with this method.
+  //
+  // Any changes to STUN/TURN servers or ICE candidate policy will affect the
+  // next gathering phase, and cause the next call to createOffer to generate
+  // new ICE credentials, as described in JSEP. This also occurs when
+  // |prune_turn_ports| changes, for the same reasoning.
+  //
+  // If an error occurs, returns false and populates |error| if non-null:
+  // - INVALID_MODIFICATION if |config| contains a modified parameter other
+  //   than one of the parameters listed above.
+  // - INVALID_RANGE if |ice_candidate_pool_size| is out of range.
+  // - SYNTAX_ERROR if parsing an ICE server URL failed.
+  // - INVALID_PARAMETER if a TURN server is missing |username| or |password|.
+  // - INTERNAL_ERROR if an unexpected error occurred.
+  //
+  // TODO(deadbeef): Make this pure virtual once all Chrome subclasses of
+  // PeerConnectionInterface implement it.
+  virtual bool SetConfiguration(
+      const PeerConnectionInterface::RTCConfiguration& config,
+      RTCError* error) {
+    return false;
+  }
+  // Version without error output param for backwards compatibility.
+  // TODO(deadbeef): Remove once chromium is updated.
+  virtual bool SetConfiguration(
+      const PeerConnectionInterface::RTCConfiguration& config) {
+    return false;
+  }
+
+  // Provides a remote candidate to the ICE Agent.
+  // A copy of the |candidate| will be created and added to the remote
+  // description. So the caller of this method still has the ownership of the
+  // |candidate|.
+  virtual bool AddIceCandidate(const IceCandidateInterface* candidate) = 0;
+
+  // Removes a group of remote candidates from the ICE agent. Needed mainly for
+  // continual gathering, to avoid an ever-growing list of candidates as
+  // networks come and go.
+  virtual bool RemoveIceCandidates(
+      const std::vector<cricket::Candidate>& candidates) {
+    return false;
+  }
+
+  // Register a metric observer (used by chromium). It's reference counted, and
+  // this method takes a reference. RegisterUMAObserver(nullptr) will release
+  // the reference.
+  // TODO(deadbeef): Take argument as scoped_refptr?
+  virtual void RegisterUMAObserver(UMAObserver* observer) = 0;
+
+  // 0 <= min <= current <= max should hold for set parameters.
+  struct BitrateParameters {
+    rtc::Optional<int> min_bitrate_bps;
+    rtc::Optional<int> current_bitrate_bps;
+    rtc::Optional<int> max_bitrate_bps;
+  };
+
+  // SetBitrate limits the bandwidth allocated for all RTP streams sent by
+  // this PeerConnection. Other limitations might affect these limits and
+  // are respected (for example "b=AS" in SDP).
+  //
+  // Setting |current_bitrate_bps| will reset the current bitrate estimate
+  // to the provided value.
+  virtual RTCError SetBitrate(const BitrateParameters& bitrate) = 0;
+
+  // Sets current strategy. If not set default WebRTC allocator will be used.
+  // May be changed during an active session. The strategy
+  // ownership is passed with std::unique_ptr
+  // TODO(alexnarest): Make this pure virtual when tests will be updated
+  virtual void SetBitrateAllocationStrategy(
+      std::unique_ptr<rtc::BitrateAllocationStrategy>
+          bitrate_allocation_strategy) {}
+
+  // Enable/disable playout of received audio streams. Enabled by default. Note
+  // that even if playout is enabled, streams will only be played out if the
+  // appropriate SDP is also applied. Setting |playout| to false will stop
+  // playout of the underlying audio device but starts a task which will poll
+  // for audio data every 10ms to ensure that audio processing happens and the
+  // audio statistics are updated.
+  // TODO(henrika): deprecate and remove this.
+  virtual void SetAudioPlayout(bool playout) {}
+
+  // Enable/disable recording of transmitted audio streams. Enabled by default.
+  // Note that even if recording is enabled, streams will only be recorded if
+  // the appropriate SDP is also applied.
+  // TODO(henrika): deprecate and remove this.
+  virtual void SetAudioRecording(bool recording) {}
+
+  // Returns the current SignalingState.
+  virtual SignalingState signaling_state() = 0;
+
+  // Returns the aggregate state of all ICE *and* DTLS transports.
+  // TODO(deadbeef): Implement "PeerConnectionState" according to the standard,
+  // to aggregate ICE+DTLS state, and change the scope of IceConnectionState to
+  // be just the ICE layer. See: crbug.com/webrtc/6145
+  virtual IceConnectionState ice_connection_state() = 0;
+
+  virtual IceGatheringState ice_gathering_state() = 0;
+
+  // Starts RtcEventLog using existing file. Takes ownership of |file| and
+  // passes it on to Call, which will take the ownership. If the
+  // operation fails the file will be closed. The logging will stop
+  // automatically after 10 minutes have passed, or when the StopRtcEventLog
+  // function is called.
+  // TODO(eladalon): Deprecate and remove this.
+  virtual bool StartRtcEventLog(rtc::PlatformFile file,
+                                int64_t max_size_bytes) {
+    return false;
+  }
+
+  // Start RtcEventLog using an existing output-sink. Takes ownership of
+  // |output| and passes it on to Call, which will take the ownership. If the
+  // operation fails the output will be closed and deallocated. The event log
+  // will send serialized events to the output object every |output_period_ms|.
+  virtual bool StartRtcEventLog(std::unique_ptr<RtcEventLogOutput> output,
+                                int64_t output_period_ms) {
+    return false;
+  }
+
+  // Stops logging the RtcEventLog.
+  // TODO(ivoc): Make this pure virtual when Chrome is updated.
+  virtual void StopRtcEventLog() {}
+
+  // Terminates all media, closes the transports, and in general releases any
+  // resources used by the PeerConnection. This is an irreversible operation.
+  //
+  // Note that after this method completes, the PeerConnection will no longer
+  // use the PeerConnectionObserver interface passed in on construction, and
+  // thus the observer object can be safely destroyed.
+  virtual void Close() = 0;
+
+ protected:
+  // Dtor protected as objects shouldn't be deleted via this interface.
+  ~PeerConnectionInterface() {}
+};
+
+// PeerConnection callback interface, used for RTCPeerConnection events.
+// Application should implement these methods.
+class PeerConnectionObserver {
+ public:
+  virtual ~PeerConnectionObserver() = default;
+
+  // Triggered when the SignalingState changed.
+  virtual void OnSignalingChange(
+      PeerConnectionInterface::SignalingState new_state) = 0;
+
+  // Triggered when media is received on a new stream from remote peer.
+  virtual void OnAddStream(rtc::scoped_refptr<MediaStreamInterface> stream) {}
+
+  // Triggered when a remote peer close a stream.
+  // Deprecated: This callback will no longer be fired with Unified Plan
+  // semantics.
+  virtual void OnRemoveStream(rtc::scoped_refptr<MediaStreamInterface> stream) {
+  }
+
+  // Triggered when a remote peer opens a data channel.
+  virtual void OnDataChannel(
+      rtc::scoped_refptr<DataChannelInterface> data_channel) = 0;
+
+  // Triggered when renegotiation is needed. For example, an ICE restart
+  // has begun.
+  virtual void OnRenegotiationNeeded() = 0;
+
+  // Called any time the IceConnectionState changes.
+  //
+  // Note that our ICE states lag behind the standard slightly. The most
+  // notable differences include the fact that "failed" occurs after 15
+  // seconds, not 30, and this actually represents a combination ICE + DTLS
+  // state, so it may be "failed" if DTLS fails while ICE succeeds.
+  virtual void OnIceConnectionChange(
+      PeerConnectionInterface::IceConnectionState new_state) = 0;
+
+  // Called any time the IceGatheringState changes.
+  virtual void OnIceGatheringChange(
+      PeerConnectionInterface::IceGatheringState new_state) = 0;
+
+  // A new ICE candidate has been gathered.
+  virtual void OnIceCandidate(const IceCandidateInterface* candidate) = 0;
+
+  // Ice candidates have been removed.
+  // TODO(honghaiz): Make this a pure virtual method when all its subclasses
+  // implement it.
+  virtual void OnIceCandidatesRemoved(
+      const std::vector<cricket::Candidate>& candidates) {}
+
+  // Called when the ICE connection receiving status changes.
+  virtual void OnIceConnectionReceivingChange(bool receiving) {}
+
+  // This is called when a receiver and its track are created.
+  // TODO(zhihuang): Make this pure virtual when all subclasses implement it.
+  // Note: This is called with both Plan B and Unified Plan semantics. Unified
+  // Plan users should prefer OnTrack, OnAddTrack is only called as backwards
+  // compatibility (and is called in the exact same situations as OnTrack).
+  virtual void OnAddTrack(
+      rtc::scoped_refptr<RtpReceiverInterface> receiver,
+      const std::vector<rtc::scoped_refptr<MediaStreamInterface>>& streams) {}
+
+  // This is called when signaling indicates a transceiver will be receiving
+  // media from the remote endpoint. This is fired during a call to
+  // SetRemoteDescription. The receiving track can be accessed by:
+  // |transceiver->receiver()->track()| and its associated streams by
+  // |transceiver->receiver()->streams()|.
+  // Note: This will only be called if Unified Plan semantics are specified.
+  // This behavior is specified in section 2.2.8.2.5 of the "Set the
+  // RTCSessionDescription" algorithm:
+  // https://w3c.github.io/webrtc-pc/#set-description
+  virtual void OnTrack(
+      rtc::scoped_refptr<RtpTransceiverInterface> transceiver) {}
+
+  // TODO(hbos,deadbeef): Add |OnAssociatedStreamsUpdated| with |receiver| and
+  // |streams| as arguments. This should be called when an existing receiver its
+  // associated streams updated. https://crbug.com/webrtc/8315
+  // This may be blocked on supporting multiple streams per sender or else
+  // this may count as the removal and addition of a track?
+  // https://crbug.com/webrtc/7932
+
+  // Called when a receiver is completely removed. This is current (Plan B SDP)
+  // behavior that occurs when processing the removal of a remote track, and is
+  // called when the receiver is removed and the track is muted. When Unified
+  // Plan SDP is supported, transceivers can change direction (and receivers
+  // stopped) but receivers are never removed, so this is never called.
+  // https://w3c.github.io/webrtc-pc/#process-remote-track-removal
+  // TODO(hbos,deadbeef): When Unified Plan SDP is supported and receivers are
+  // no longer removed, deprecate and remove this callback.
+  // TODO(hbos,deadbeef): Make pure virtual when all subclasses implement it.
+  virtual void OnRemoveTrack(
+      rtc::scoped_refptr<RtpReceiverInterface> receiver) {}
+};
+
+// PeerConnectionFactoryInterface is the factory interface used for creating
+// PeerConnection, MediaStream and MediaStreamTrack objects.
+//
+// The simplest method for obtaiing one, CreatePeerConnectionFactory will
+// create the required libjingle threads, socket and network manager factory
+// classes for networking if none are provided, though it requires that the
+// application runs a message loop on the thread that called the method (see
+// explanation below)
+//
+// If an application decides to provide its own threads and/or implementation
+// of networking classes, it should use the alternate
+// CreatePeerConnectionFactory method which accepts threads as input, and use
+// the CreatePeerConnection version that takes a PortAllocator as an argument.
+class PeerConnectionFactoryInterface : public rtc::RefCountInterface {
+ public:
+  class Options {
+   public:
+    Options() : crypto_options(rtc::CryptoOptions::NoGcm()) {}
+
+    // If set to true, created PeerConnections won't enforce any SRTP
+    // requirement, allowing unsecured media. Should only be used for
+    // testing/debugging.
+    bool disable_encryption = false;
+
+    // Deprecated. The only effect of setting this to true is that
+    // CreateDataChannel will fail, which is not that useful.
+    bool disable_sctp_data_channels = false;
+
+    // If set to true, any platform-supported network monitoring capability
+    // won't be used, and instead networks will only be updated via polling.
+    //
+    // This only has an effect if a PeerConnection is created with the default
+    // PortAllocator implementation.
+    bool disable_network_monitor = false;
+
+    // Sets the network types to ignore. For instance, calling this with
+    // ADAPTER_TYPE_ETHERNET | ADAPTER_TYPE_LOOPBACK will ignore Ethernet and
+    // loopback interfaces.
+    int network_ignore_mask = rtc::kDefaultNetworkIgnoreMask;
+
+    // Sets the maximum supported protocol version. The highest version
+    // supported by both ends will be used for the connection, i.e. if one
+    // party supports DTLS 1.0 and the other DTLS 1.2, DTLS 1.0 will be used.
+    rtc::SSLProtocolVersion ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
+
+    // Sets crypto related options, e.g. enabled cipher suites.
+    rtc::CryptoOptions crypto_options;
+  };
+
+  // Set the options to be used for subsequently created PeerConnections.
+  virtual void SetOptions(const Options& options) = 0;
+
+  // |allocator| and |cert_generator| may be null, in which case default
+  // implementations will be used.
+  //
+  // |observer| must not be null.
+  //
+  // Note that this method does not take ownership of |observer|; it's the
+  // responsibility of the caller to delete it. It can be safely deleted after
+  // Close has been called on the returned PeerConnection, which ensures no
+  // more observer callbacks will be invoked.
+  virtual rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
+      const PeerConnectionInterface::RTCConfiguration& configuration,
+      std::unique_ptr<cricket::PortAllocator> allocator,
+      std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
+      PeerConnectionObserver* observer) = 0;
+
+  // Deprecated; should use RTCConfiguration for everything that previously
+  // used constraints.
+  virtual rtc::scoped_refptr<PeerConnectionInterface> CreatePeerConnection(
+      const PeerConnectionInterface::RTCConfiguration& configuration,
+      const MediaConstraintsInterface* constraints,
+      std::unique_ptr<cricket::PortAllocator> allocator,
+      std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
+      PeerConnectionObserver* observer) = 0;
+
+  virtual rtc::scoped_refptr<MediaStreamInterface> CreateLocalMediaStream(
+      const std::string& stream_id) = 0;
+
+  // Creates an AudioSourceInterface.
+  // |options| decides audio processing settings.
+  virtual rtc::scoped_refptr<AudioSourceInterface> CreateAudioSource(
+      const cricket::AudioOptions& options) = 0;
+  // Deprecated - use version above.
+  // Can use CopyConstraintsIntoAudioOptions to bridge the gap.
+  virtual rtc::scoped_refptr<AudioSourceInterface> CreateAudioSource(
+      const MediaConstraintsInterface* constraints) = 0;
+
+  // Creates a VideoTrackSourceInterface from |capturer|.
+  // TODO(deadbeef): We should aim to remove cricket::VideoCapturer from the
+  // API. It's mainly used as a wrapper around webrtc's provided
+  // platform-specific capturers, but these should be refactored to use
+  // VideoTrackSourceInterface directly.
+  // TODO(deadbeef): Make pure virtual once downstream mock PC factory classes
+  // are updated.
+  virtual rtc::scoped_refptr<VideoTrackSourceInterface> CreateVideoSource(
+      std::unique_ptr<cricket::VideoCapturer> capturer) {
+    return nullptr;
+  }
+
+  // A video source creator that allows selection of resolution and frame rate.
+  // |constraints| decides video resolution and frame rate but can be null.
+  // In the null case, use the version above.
+  //
+  // |constraints| is only used for the invocation of this method, and can
+  // safely be destroyed afterwards.
+  virtual rtc::scoped_refptr<VideoTrackSourceInterface> CreateVideoSource(
+      std::unique_ptr<cricket::VideoCapturer> capturer,
+      const MediaConstraintsInterface* constraints) {
+    return nullptr;
+  }
+
+  // Deprecated; please use the versions that take unique_ptrs above.
+  // TODO(deadbeef): Remove these once safe to do so.
+  virtual rtc::scoped_refptr<VideoTrackSourceInterface> CreateVideoSource(
+      cricket::VideoCapturer* capturer) {
+    return CreateVideoSource(std::unique_ptr<cricket::VideoCapturer>(capturer));
+  }
+  virtual rtc::scoped_refptr<VideoTrackSourceInterface> CreateVideoSource(
+      cricket::VideoCapturer* capturer,
+      const MediaConstraintsInterface* constraints) {
+    return CreateVideoSource(std::unique_ptr<cricket::VideoCapturer>(capturer),
+                             constraints);
+  }
+
+  // Creates a new local VideoTrack. The same |source| can be used in several
+  // tracks.
+  virtual rtc::scoped_refptr<VideoTrackInterface> CreateVideoTrack(
+      const std::string& label,
+      VideoTrackSourceInterface* source) = 0;
+
+  // Creates an new AudioTrack. At the moment |source| can be null.
+  virtual rtc::scoped_refptr<AudioTrackInterface>
+      CreateAudioTrack(const std::string& label,
+                       AudioSourceInterface* source) = 0;
+
+  // Starts AEC dump using existing file. Takes ownership of |file| and passes
+  // it on to VoiceEngine (via other objects) immediately, which will take
+  // the ownerhip. If the operation fails, the file will be closed.
+  // A maximum file size in bytes can be specified. When the file size limit is
+  // reached, logging is stopped automatically. If max_size_bytes is set to a
+  // value <= 0, no limit will be used, and logging will continue until the
+  // StopAecDump function is called.
+  virtual bool StartAecDump(rtc::PlatformFile file, int64_t max_size_bytes) = 0;
+
+  // Stops logging the AEC dump.
+  virtual void StopAecDump() = 0;
+
+ protected:
+  // Dtor and ctor protected as objects shouldn't be created or deleted via
+  // this interface.
+  PeerConnectionFactoryInterface() {}
+  ~PeerConnectionFactoryInterface() {} // NOLINT
+};
+
+// Create a new instance of PeerConnectionFactoryInterface.
+//
+// This method relies on the thread it's called on as the "signaling thread"
+// for the PeerConnectionFactory it creates.
+//
+// As such, if the current thread is not already running an rtc::Thread message
+// loop, an application using this method must eventually either call
+// rtc::Thread::Current()->Run(), or call
+// rtc::Thread::Current()->ProcessMessages() within the application's own
+// message loop.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface.
+//
+// |network_thread|, |worker_thread| and |signaling_thread| are
+// the only mandatory parameters.
+//
+// If non-null, a reference is added to |default_adm|, and ownership of
+// |video_encoder_factory| and |video_decoder_factory| is transferred to the
+// returned factory.
+// TODO(deadbeef): Use rtc::scoped_refptr<> and std::unique_ptr<> to make this
+// ownership transfer and ref counting more obvious.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional
+// external audio mixed and audio processing modules.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+// If |audio_processing| is null, an internal audio processing module will be
+// created and used.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional
+// external audio mixer, audio processing, and fec controller modules.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+// If |audio_processing| is null, an internal audio processing module will be
+// created and used.
+// If |fec_controller_factory| is null, an internal fec controller module will
+// be created and used.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing,
+    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional video
+// codec factories. These video factories represents all video codecs, i.e. no
+// extra internal video codecs will be added.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    rtc::scoped_refptr<AudioDeviceModule> default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
+    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+// Create a new instance of PeerConnectionFactoryInterface with external audio
+// mixer.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactoryWithAudioMixer(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer);
+
+// Create a new instance of PeerConnectionFactoryInterface.
+// Same thread is used as worker and network thread.
+inline rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* worker_and_network_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
+  return CreatePeerConnectionFactory(
+      worker_and_network_thread, worker_and_network_thread, signaling_thread,
+      default_adm, audio_encoder_factory, audio_decoder_factory,
+      video_encoder_factory, video_decoder_factory);
+}
+
+// This is a lower-level version of the CreatePeerConnectionFactory functions
+// above. It's implemented in the "peerconnection" build target, whereas the
+// above methods are only implemented in the broader "libjingle_peerconnection"
+// build target, which pulls in the implementations of every module webrtc may
+// use.
+//
+// If an application knows it will only require certain modules, it can reduce
+// webrtc's impact on its binary size by depending only on the "peerconnection"
+// target and the modules the application requires, using
+// CreateModularPeerConnectionFactory instead of one of the
+// CreatePeerConnectionFactory methods above. For example, if an application
+// only uses WebRTC for audio, it can pass in null pointers for the
+// video-specific interfaces, and omit the corresponding modules from its
+// build.
+//
+// If |network_thread| or |worker_thread| are null, the PeerConnectionFactory
+// will create the necessary thread internally. If |signaling_thread| is null,
+// the PeerConnectionFactory will use the thread on which this method is called
+// as the signaling thread, wrapping it in an rtc::Thread object if needed.
+//
+// If non-null, a reference is added to |default_adm|, and ownership of
+// |video_encoder_factory| and |video_decoder_factory| is transferred to the
+// returned factory.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+//
+// TODO(deadbeef): Use rtc::scoped_refptr<> and std::unique_ptr<> to make this
+// ownership transfer and ref counting more obvious.
+//
+// TODO(deadbeef): Encapsulate these modules in a struct, so that when a new
+// module is inevitably exposed, we can just add a field to the struct instead
+// of adding a whole new CreateModularPeerConnectionFactory overload.
+rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreateModularPeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    std::unique_ptr<cricket::MediaEngineInterface> media_engine,
+    std::unique_ptr<CallFactoryInterface> call_factory,
+    std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory);
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreateModularPeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    std::unique_ptr<cricket::MediaEngineInterface> media_engine,
+    std::unique_ptr<CallFactoryInterface> call_factory,
+    std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory,
+    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory);
+
+}  // namespace webrtc
+
+#endif  // API_PEERCONNECTIONINTERFACE_H_
diff --git a/api/peerconnectionproxy.h b/api/peerconnectionproxy.h
new file mode 100644
index 0000000..7235f5b
--- /dev/null
+++ b/api/peerconnectionproxy.h
@@ -0,0 +1,148 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_PEERCONNECTIONPROXY_H_
+#define API_PEERCONNECTIONPROXY_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/peerconnectioninterface.h"
+#include "api/proxy.h"
+
+namespace webrtc {
+
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+BEGIN_SIGNALING_PROXY_MAP(PeerConnection)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_METHOD0(rtc::scoped_refptr<StreamCollectionInterface>, local_streams)
+  PROXY_METHOD0(rtc::scoped_refptr<StreamCollectionInterface>, remote_streams)
+  PROXY_METHOD1(bool, AddStream, MediaStreamInterface*)
+  PROXY_METHOD1(void, RemoveStream, MediaStreamInterface*)
+  PROXY_METHOD2(RTCErrorOr<rtc::scoped_refptr<RtpSenderInterface>>,
+                AddTrack,
+                rtc::scoped_refptr<MediaStreamTrackInterface>,
+                const std::vector<std::string>&);
+  PROXY_METHOD2(rtc::scoped_refptr<RtpSenderInterface>,
+                AddTrack,
+                MediaStreamTrackInterface*,
+                std::vector<MediaStreamInterface*>)
+  PROXY_METHOD1(bool, RemoveTrack, RtpSenderInterface*)
+  PROXY_METHOD1(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
+                AddTransceiver,
+                rtc::scoped_refptr<MediaStreamTrackInterface>)
+  PROXY_METHOD2(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
+                AddTransceiver,
+                rtc::scoped_refptr<MediaStreamTrackInterface>,
+                const RtpTransceiverInit&)
+  PROXY_METHOD1(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
+                AddTransceiver,
+                cricket::MediaType)
+  PROXY_METHOD2(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
+                AddTransceiver,
+                cricket::MediaType,
+                const RtpTransceiverInit&)
+  PROXY_METHOD1(rtc::scoped_refptr<DtmfSenderInterface>,
+                CreateDtmfSender,
+                AudioTrackInterface*)
+  PROXY_METHOD2(rtc::scoped_refptr<RtpSenderInterface>,
+                CreateSender,
+                const std::string&,
+                const std::string&)
+  PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpSenderInterface>>,
+                     GetSenders)
+  PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpReceiverInterface>>,
+                     GetReceivers)
+  PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpTransceiverInterface>>,
+                     GetTransceivers)
+  PROXY_METHOD3(bool,
+                GetStats,
+                StatsObserver*,
+                MediaStreamTrackInterface*,
+                StatsOutputLevel)
+  PROXY_METHOD1(void, GetStats, RTCStatsCollectorCallback*)
+  PROXY_METHOD2(rtc::scoped_refptr<DataChannelInterface>,
+                CreateDataChannel,
+                const std::string&,
+                const DataChannelInit*)
+  PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, local_description)
+  PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, remote_description)
+  PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
+                     pending_local_description)
+  PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
+                     pending_remote_description)
+  PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
+                     current_local_description)
+  PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
+                     current_remote_description)
+  PROXY_METHOD2(void,
+                CreateOffer,
+                CreateSessionDescriptionObserver*,
+                const MediaConstraintsInterface*)
+  PROXY_METHOD2(void,
+                CreateAnswer,
+                CreateSessionDescriptionObserver*,
+                const MediaConstraintsInterface*)
+  PROXY_METHOD2(void,
+                CreateOffer,
+                CreateSessionDescriptionObserver*,
+                const RTCOfferAnswerOptions&)
+  PROXY_METHOD2(void,
+                CreateAnswer,
+                CreateSessionDescriptionObserver*,
+                const RTCOfferAnswerOptions&)
+  PROXY_METHOD2(void,
+                SetLocalDescription,
+                SetSessionDescriptionObserver*,
+                SessionDescriptionInterface*)
+  PROXY_METHOD2(void,
+                SetRemoteDescription,
+                SetSessionDescriptionObserver*,
+                SessionDescriptionInterface*)
+  PROXY_METHOD2(void,
+                SetRemoteDescription,
+                std::unique_ptr<SessionDescriptionInterface>,
+                rtc::scoped_refptr<SetRemoteDescriptionObserverInterface>);
+  PROXY_METHOD0(PeerConnectionInterface::RTCConfiguration, GetConfiguration);
+  PROXY_METHOD2(bool,
+                SetConfiguration,
+                const PeerConnectionInterface::RTCConfiguration&,
+                RTCError*);
+  PROXY_METHOD1(bool,
+                SetConfiguration,
+                const PeerConnectionInterface::RTCConfiguration&);
+  PROXY_METHOD1(bool, AddIceCandidate, const IceCandidateInterface*)
+  PROXY_METHOD1(bool,
+                RemoveIceCandidates,
+                const std::vector<cricket::Candidate>&);
+  PROXY_METHOD1(void, SetAudioPlayout, bool)
+  PROXY_METHOD1(void, SetAudioRecording, bool)
+  PROXY_METHOD1(void, RegisterUMAObserver, UMAObserver*)
+  PROXY_METHOD1(RTCError, SetBitrate, const BitrateParameters&);
+  PROXY_METHOD1(void,
+                SetBitrateAllocationStrategy,
+                std::unique_ptr<rtc::BitrateAllocationStrategy>);
+  PROXY_METHOD0(SignalingState, signaling_state)
+  PROXY_METHOD0(IceConnectionState, ice_connection_state)
+  PROXY_METHOD0(IceGatheringState, ice_gathering_state)
+  PROXY_METHOD2(bool, StartRtcEventLog, rtc::PlatformFile, int64_t)
+  PROXY_METHOD2(bool,
+                StartRtcEventLog,
+                std::unique_ptr<RtcEventLogOutput>,
+                int64_t);
+  PROXY_METHOD0(void, StopRtcEventLog)
+  PROXY_METHOD0(void, Close)
+END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_PEERCONNECTIONPROXY_H_
diff --git a/api/proxy.cc b/api/proxy.cc
new file mode 100644
index 0000000..c86bddf
--- /dev/null
+++ b/api/proxy.cc
@@ -0,0 +1,38 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/proxy.h"
+
+namespace webrtc {
+namespace internal {
+
+SynchronousMethodCall::SynchronousMethodCall(rtc::MessageHandler* proxy)
+    : e_(), proxy_(proxy) {}
+
+SynchronousMethodCall::~SynchronousMethodCall() = default;
+
+void SynchronousMethodCall::Invoke(const rtc::Location& posted_from,
+                                   rtc::Thread* t) {
+  if (t->IsCurrent()) {
+    proxy_->OnMessage(nullptr);
+  } else {
+    e_.reset(new rtc::Event(false, false));
+    t->Post(posted_from, this, 0);
+    e_->Wait(rtc::Event::kForever);
+  }
+}
+
+void SynchronousMethodCall::OnMessage(rtc::Message*) {
+  proxy_->OnMessage(nullptr);
+  e_->Set();
+}
+
+}  // namespace internal
+}  // namespace webrtc
diff --git a/api/proxy.h b/api/proxy.h
new file mode 100644
index 0000000..dd7182e
--- /dev/null
+++ b/api/proxy.h
@@ -0,0 +1,572 @@
+/*
+ *  Copyright 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains Macros for creating proxies for webrtc MediaStream and
+// PeerConnection classes.
+// TODO(deadbeef): Move this to pc/; this is part of the implementation.
+
+//
+// Example usage:
+//
+// class TestInterface : public rtc::RefCountInterface {
+//  public:
+//   std::string FooA() = 0;
+//   std::string FooB(bool arg1) const = 0;
+//   std::string FooC(bool arg1) = 0;
+//  };
+//
+// Note that return types can not be a const reference.
+//
+// class Test : public TestInterface {
+// ... implementation of the interface.
+// };
+//
+// BEGIN_PROXY_MAP(Test)
+//   PROXY_SIGNALING_THREAD_DESTRUCTOR()
+//   PROXY_METHOD0(std::string, FooA)
+//   PROXY_CONSTMETHOD1(std::string, FooB, arg1)
+//   PROXY_WORKER_METHOD1(std::string, FooC, arg1)
+// END_PROXY_MAP()
+//
+// Where the destructor and first two methods are invoked on the signaling
+// thread, and the third is invoked on the worker thread.
+//
+// The proxy can be created using
+//
+//   TestProxy::Create(Thread* signaling_thread, Thread* worker_thread,
+//                     TestInterface*).
+//
+// The variant defined with BEGIN_SIGNALING_PROXY_MAP is unaware of
+// the worker thread, and invokes all methods on the signaling thread.
+//
+// The variant defined with BEGIN_OWNED_PROXY_MAP does not use
+// refcounting, and instead just takes ownership of the object being proxied.
+
+#ifndef API_PROXY_H_
+#define API_PROXY_H_
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/event.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+template <typename R>
+class ReturnType {
+ public:
+  template<typename C, typename M>
+  void Invoke(C* c, M m) { r_ = (c->*m)(); }
+  template <typename C, typename M, typename T1>
+  void Invoke(C* c, M m, T1 a1) {
+    r_ = (c->*m)(std::move(a1));
+  }
+  template <typename C, typename M, typename T1, typename T2>
+  void Invoke(C* c, M m, T1 a1, T2 a2) {
+    r_ = (c->*m)(std::move(a1), std::move(a2));
+  }
+  template <typename C, typename M, typename T1, typename T2, typename T3>
+  void Invoke(C* c, M m, T1 a1, T2 a2, T3 a3) {
+    r_ = (c->*m)(std::move(a1), std::move(a2), std::move(a3));
+  }
+  template<typename C, typename M, typename T1, typename T2, typename T3,
+      typename T4>
+  void Invoke(C* c, M m, T1 a1, T2 a2, T3 a3, T4 a4) {
+    r_ = (c->*m)(std::move(a1), std::move(a2), std::move(a3), std::move(a4));
+  }
+  template<typename C, typename M, typename T1, typename T2, typename T3,
+     typename T4, typename T5>
+  void Invoke(C* c, M m, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5) {
+    r_ = (c->*m)(std::move(a1), std::move(a2), std::move(a3), std::move(a4),
+                 std::move(a5));
+  }
+
+  R moved_result() { return std::move(r_); }
+
+ private:
+  R r_;
+};
+
+template <>
+class ReturnType<void> {
+ public:
+  template<typename C, typename M>
+  void Invoke(C* c, M m) { (c->*m)(); }
+  template <typename C, typename M, typename T1>
+  void Invoke(C* c, M m, T1 a1) {
+    (c->*m)(std::move(a1));
+  }
+  template <typename C, typename M, typename T1, typename T2>
+  void Invoke(C* c, M m, T1 a1, T2 a2) {
+    (c->*m)(std::move(a1), std::move(a2));
+  }
+  template <typename C, typename M, typename T1, typename T2, typename T3>
+  void Invoke(C* c, M m, T1 a1, T2 a2, T3 a3) {
+    (c->*m)(std::move(a1), std::move(a2), std::move(a3));
+  }
+
+  void moved_result() {}
+};
+
+namespace internal {
+
+class SynchronousMethodCall
+    : public rtc::MessageData,
+      public rtc::MessageHandler {
+ public:
+  explicit SynchronousMethodCall(rtc::MessageHandler* proxy);
+  ~SynchronousMethodCall() override;
+
+  void Invoke(const rtc::Location& posted_from, rtc::Thread* t);
+
+ private:
+  void OnMessage(rtc::Message*) override;
+
+  std::unique_ptr<rtc::Event> e_;
+  rtc::MessageHandler* proxy_;
+};
+
+}  // namespace internal
+
+template <typename C, typename R>
+class MethodCall0 : public rtc::Message,
+                    public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)();
+  MethodCall0(C* c, Method m) : c_(c), m_(m) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) {  r_.Invoke(c_, m_); }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+};
+
+template <typename C, typename R>
+class ConstMethodCall0 : public rtc::Message,
+                         public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)() const;
+  ConstMethodCall0(C* c, Method m) : c_(c), m_(m) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) { r_.Invoke(c_, m_); }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+};
+
+template <typename C, typename R,  typename T1>
+class MethodCall1 : public rtc::Message,
+                    public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)(T1 a1);
+  MethodCall1(C* c, Method m, T1 a1) : c_(c), m_(m), a1_(std::move(a1)) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) { r_.Invoke(c_, m_, std::move(a1_)); }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+  T1 a1_;
+};
+
+template <typename C, typename R,  typename T1>
+class ConstMethodCall1 : public rtc::Message,
+                         public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)(T1 a1) const;
+  ConstMethodCall1(C* c, Method m, T1 a1) : c_(c), m_(m), a1_(std::move(a1)) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) { r_.Invoke(c_, m_, std::move(a1_)); }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+  T1 a1_;
+};
+
+template <typename C, typename R, typename T1, typename T2>
+class MethodCall2 : public rtc::Message,
+                    public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)(T1 a1, T2 a2);
+  MethodCall2(C* c, Method m, T1 a1, T2 a2)
+      : c_(c), m_(m), a1_(std::move(a1)), a2_(std::move(a2)) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) {
+    r_.Invoke(c_, m_, std::move(a1_), std::move(a2_));
+  }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+  T1 a1_;
+  T2 a2_;
+};
+
+template <typename C, typename R, typename T1, typename T2, typename T3>
+class MethodCall3 : public rtc::Message,
+                    public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)(T1 a1, T2 a2, T3 a3);
+  MethodCall3(C* c, Method m, T1 a1, T2 a2, T3 a3)
+      : c_(c),
+        m_(m),
+        a1_(std::move(a1)),
+        a2_(std::move(a2)),
+        a3_(std::move(a3)) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) {
+    r_.Invoke(c_, m_, std::move(a1_), std::move(a2_), std::move(a3_));
+  }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+  T1 a1_;
+  T2 a2_;
+  T3 a3_;
+};
+
+template <typename C, typename R, typename T1, typename T2, typename T3,
+    typename T4>
+class MethodCall4 : public rtc::Message,
+                    public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)(T1 a1, T2 a2, T3 a3, T4 a4);
+  MethodCall4(C* c, Method m, T1 a1, T2 a2, T3 a3, T4 a4)
+      : c_(c),
+        m_(m),
+        a1_(std::move(a1)),
+        a2_(std::move(a2)),
+        a3_(std::move(a3)),
+        a4_(std::move(a4)) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) {
+    r_.Invoke(c_, m_, std::move(a1_), std::move(a2_), std::move(a3_),
+              std::move(a4_));
+  }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+  T1 a1_;
+  T2 a2_;
+  T3 a3_;
+  T4 a4_;
+};
+
+template <typename C, typename R, typename T1, typename T2, typename T3,
+    typename T4, typename T5>
+class MethodCall5 : public rtc::Message,
+                    public rtc::MessageHandler {
+ public:
+  typedef R (C::*Method)(T1 a1, T2 a2, T3 a3, T4 a4, T5 a5);
+  MethodCall5(C* c, Method m, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5)
+      : c_(c),
+        m_(m),
+        a1_(std::move(a1)),
+        a2_(std::move(a2)),
+        a3_(std::move(a3)),
+        a4_(std::move(a4)),
+        a5_(std::move(a5)) {}
+
+  R Marshal(const rtc::Location& posted_from, rtc::Thread* t) {
+    internal::SynchronousMethodCall(this).Invoke(posted_from, t);
+    return r_.moved_result();
+  }
+
+ private:
+  void OnMessage(rtc::Message*) {
+    r_.Invoke(c_, m_, std::move(a1_), std::move(a2_), std::move(a3_),
+              std::move(a4_), std::move(a5_));
+  }
+
+  C* c_;
+  Method m_;
+  ReturnType<R> r_;
+  T1 a1_;
+  T2 a2_;
+  T3 a3_;
+  T4 a4_;
+  T5 a5_;
+};
+
+
+// Helper macros to reduce code duplication.
+#define PROXY_MAP_BOILERPLATE(c)                          \
+  template <class INTERNAL_CLASS>                         \
+  class c##ProxyWithInternal;                             \
+  typedef c##ProxyWithInternal<c##Interface> c##Proxy;    \
+  template <class INTERNAL_CLASS>                         \
+  class c##ProxyWithInternal : public c##Interface {      \
+   protected:                                             \
+    typedef c##Interface C;                               \
+                                                          \
+   public:                                                \
+    const INTERNAL_CLASS* internal() const { return c_; } \
+    INTERNAL_CLASS* internal() { return c_; }
+
+#define END_PROXY_MAP() \
+  };
+
+#define SIGNALING_PROXY_MAP_BOILERPLATE(c)                               \
+ protected:                                                              \
+  c##ProxyWithInternal(rtc::Thread* signaling_thread, INTERNAL_CLASS* c) \
+      : signaling_thread_(signaling_thread), c_(c) {}                    \
+                                                                         \
+ private:                                                                \
+  mutable rtc::Thread* signaling_thread_;
+
+#define WORKER_PROXY_MAP_BOILERPLATE(c)                               \
+ protected:                                                           \
+  c##ProxyWithInternal(rtc::Thread* signaling_thread,                 \
+                       rtc::Thread* worker_thread, INTERNAL_CLASS* c) \
+      : signaling_thread_(signaling_thread),                          \
+        worker_thread_(worker_thread),                                \
+        c_(c) {}                                                      \
+                                                                      \
+ private:                                                             \
+  mutable rtc::Thread* signaling_thread_;                             \
+  mutable rtc::Thread* worker_thread_;
+
+// Note that the destructor is protected so that the proxy can only be
+// destroyed via RefCountInterface.
+#define REFCOUNTED_PROXY_MAP_BOILERPLATE(c)            \
+ protected:                                            \
+  ~c##ProxyWithInternal() {                            \
+    MethodCall0<c##ProxyWithInternal, void> call(      \
+        this, &c##ProxyWithInternal::DestroyInternal); \
+    call.Marshal(RTC_FROM_HERE, destructor_thread());  \
+  }                                                    \
+                                                       \
+ private:                                              \
+  void DestroyInternal() { c_ = nullptr; }             \
+  rtc::scoped_refptr<INTERNAL_CLASS> c_;
+
+// Note: This doesn't use a unique_ptr, because it intends to handle a corner
+// case where an object's deletion triggers a callback that calls back into
+// this proxy object. If relying on a unique_ptr to delete the object, its
+// inner pointer would be set to null before this reentrant callback would have
+// a chance to run, resulting in a segfault.
+#define OWNED_PROXY_MAP_BOILERPLATE(c)                 \
+ public:                                               \
+  ~c##ProxyWithInternal() {                            \
+    MethodCall0<c##ProxyWithInternal, void> call(      \
+        this, &c##ProxyWithInternal::DestroyInternal); \
+    call.Marshal(RTC_FROM_HERE, destructor_thread());  \
+  }                                                    \
+                                                       \
+ private:                                              \
+  void DestroyInternal() { delete c_; }                \
+  INTERNAL_CLASS* c_;
+
+#define BEGIN_SIGNALING_PROXY_MAP(c)                                         \
+  PROXY_MAP_BOILERPLATE(c)                                                   \
+  SIGNALING_PROXY_MAP_BOILERPLATE(c)                                         \
+  REFCOUNTED_PROXY_MAP_BOILERPLATE(c)                                        \
+ public:                                                                     \
+  static rtc::scoped_refptr<c##ProxyWithInternal> Create(                    \
+      rtc::Thread* signaling_thread, INTERNAL_CLASS* c) {                    \
+    return new rtc::RefCountedObject<c##ProxyWithInternal>(signaling_thread, \
+                                                           c);               \
+  }
+
+#define BEGIN_PROXY_MAP(c)                                                    \
+  PROXY_MAP_BOILERPLATE(c)                                                    \
+  WORKER_PROXY_MAP_BOILERPLATE(c)                                             \
+  REFCOUNTED_PROXY_MAP_BOILERPLATE(c)                                         \
+ public:                                                                      \
+  static rtc::scoped_refptr<c##ProxyWithInternal> Create(                     \
+      rtc::Thread* signaling_thread, rtc::Thread* worker_thread,              \
+      INTERNAL_CLASS* c) {                                                    \
+    return new rtc::RefCountedObject<c##ProxyWithInternal>(signaling_thread,  \
+                                                           worker_thread, c); \
+  }
+
+#define BEGIN_OWNED_PROXY_MAP(c)                                   \
+  PROXY_MAP_BOILERPLATE(c)                                         \
+  WORKER_PROXY_MAP_BOILERPLATE(c)                                  \
+  OWNED_PROXY_MAP_BOILERPLATE(c)                                   \
+ public:                                                           \
+  static std::unique_ptr<c##Interface> Create(                     \
+      rtc::Thread* signaling_thread, rtc::Thread* worker_thread,   \
+      std::unique_ptr<INTERNAL_CLASS> c) {                         \
+    return std::unique_ptr<c##Interface>(new c##ProxyWithInternal( \
+        signaling_thread, worker_thread, c.release()));            \
+  }
+
+#define PROXY_SIGNALING_THREAD_DESTRUCTOR()                            \
+ private:                                                              \
+  rtc::Thread* destructor_thread() const { return signaling_thread_; } \
+                                                                       \
+ public:  // NOLINTNEXTLINE
+
+#define PROXY_WORKER_THREAD_DESTRUCTOR()                            \
+ private:                                                           \
+  rtc::Thread* destructor_thread() const { return worker_thread_; } \
+                                                                    \
+ public:  // NOLINTNEXTLINE
+
+#define PROXY_METHOD0(r, method)                           \
+  r method() override {                                    \
+    MethodCall0<C, r> call(c_, &C::method);                \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_); \
+  }
+
+#define PROXY_CONSTMETHOD0(r, method)                      \
+  r method() const override {                              \
+    ConstMethodCall0<C, r> call(c_, &C::method);           \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_); \
+  }
+
+#define PROXY_METHOD1(r, method, t1)                           \
+  r method(t1 a1) override {                                   \
+    MethodCall1<C, r, t1> call(c_, &C::method, std::move(a1)); \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_);     \
+  }
+
+#define PROXY_CONSTMETHOD1(r, method, t1)                           \
+  r method(t1 a1) const override {                                  \
+    ConstMethodCall1<C, r, t1> call(c_, &C::method, std::move(a1)); \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_);          \
+  }
+
+#define PROXY_METHOD2(r, method, t1, t2)                          \
+  r method(t1 a1, t2 a2) override {                               \
+    MethodCall2<C, r, t1, t2> call(c_, &C::method, std::move(a1), \
+                                   std::move(a2));                \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_);        \
+  }
+
+#define PROXY_METHOD3(r, method, t1, t2, t3)                          \
+  r method(t1 a1, t2 a2, t3 a3) override {                            \
+    MethodCall3<C, r, t1, t2, t3> call(c_, &C::method, std::move(a1), \
+                                       std::move(a2), std::move(a3)); \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_);            \
+  }
+
+#define PROXY_METHOD4(r, method, t1, t2, t3, t4)                          \
+  r method(t1 a1, t2 a2, t3 a3, t4 a4) override {                         \
+    MethodCall4<C, r, t1, t2, t3, t4> call(c_, &C::method, std::move(a1), \
+                                           std::move(a2), std::move(a3),  \
+                                           std::move(a4));                \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_);                \
+  }
+
+#define PROXY_METHOD5(r, method, t1, t2, t3, t4, t5)                          \
+  r method(t1 a1, t2 a2, t3 a3, t4 a4, t5 a5) override {                      \
+    MethodCall5<C, r, t1, t2, t3, t4, t5> call(c_, &C::method, std::move(a1), \
+                                               std::move(a2), std::move(a3),  \
+                                               std::move(a4), std::move(a5)); \
+    return call.Marshal(RTC_FROM_HERE, signaling_thread_);                    \
+  }
+
+// Define methods which should be invoked on the worker thread.
+#define PROXY_WORKER_METHOD0(r, method)                 \
+  r method() override {                                 \
+    MethodCall0<C, r> call(c_, &C::method);             \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_); \
+  }
+
+#define PROXY_WORKER_CONSTMETHOD0(r, method)            \
+  r method() const override {                           \
+    ConstMethodCall0<C, r> call(c_, &C::method);        \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_); \
+  }
+
+#define PROXY_WORKER_METHOD1(r, method, t1)                    \
+  r method(t1 a1) override {                                   \
+    MethodCall1<C, r, t1> call(c_, &C::method, std::move(a1)); \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_);        \
+  }
+
+#define PROXY_WORKER_CONSTMETHOD1(r, method, t1)                    \
+  r method(t1 a1) const override {                                  \
+    ConstMethodCall1<C, r, t1> call(c_, &C::method, std::move(a1)); \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_);             \
+  }
+
+#define PROXY_WORKER_METHOD2(r, method, t1, t2)                   \
+  r method(t1 a1, t2 a2) override {                               \
+    MethodCall2<C, r, t1, t2> call(c_, &C::method, std::move(a1), \
+                                   std::move(a2));                \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_);           \
+  }
+
+#define PROXY_WORKER_CONSTMETHOD2(r, method, t1, t2)                   \
+  r method(t1 a1, t2 a2) const override {                              \
+    ConstMethodCall2<C, r, t1, t2> call(c_, &C::method, std::move(a1), \
+                                        std::move(a2));                \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_);                \
+  }
+
+#define PROXY_WORKER_METHOD3(r, method, t1, t2, t3)                   \
+  r method(t1 a1, t2 a2, t3 a3) override {                            \
+    MethodCall3<C, r, t1, t2, t3> call(c_, &C::method, std::move(a1), \
+                                       std::move(a2), std::move(a3)); \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_);               \
+  }
+
+#define PROXY_WORKER_CONSTMETHOD3(r, method, t1, t2)                       \
+  r method(t1 a1, t2 a2, t3 a3) const override {                           \
+    ConstMethodCall3<C, r, t1, t2, t3> call(c_, &C::method, std::move(a1), \
+                                            std::move(a2), std::move(a3)); \
+    return call.Marshal(RTC_FROM_HERE, worker_thread_);                    \
+  }
+
+}  // namespace webrtc
+
+#endif  //  API_PROXY_H_
diff --git a/api/refcountedbase.h b/api/refcountedbase.h
new file mode 100644
index 0000000..8c26efd
--- /dev/null
+++ b/api/refcountedbase.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef API_REFCOUNTEDBASE_H_
+#define API_REFCOUNTEDBASE_H_
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcounter.h"
+
+namespace rtc {
+
+class RefCountedBase {
+ public:
+  RefCountedBase() = default;
+
+  void AddRef() const { ref_count_.IncRef(); }
+  RefCountReleaseStatus Release() const {
+    const auto status = ref_count_.DecRef();
+    if (status == RefCountReleaseStatus::kDroppedLastRef) {
+      delete this;
+    }
+    return status;
+  }
+
+ protected:
+  virtual ~RefCountedBase() = default;
+
+ private:
+  mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
+};
+
+}  // namespace rtc
+
+#endif  // API_REFCOUNTEDBASE_H_
diff --git a/api/rtcerror.cc b/api/rtcerror.cc
new file mode 100644
index 0000000..f9a31d0
--- /dev/null
+++ b/api/rtcerror.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/rtcerror.h"
+
+#include "rtc_base/arraysize.h"
+
+namespace {
+
+static const char* const kRTCErrorTypeNames[] = {
+    "NONE",
+    "UNSUPPORTED_OPERATION",
+    "UNSUPPORTED_PARAMETER",
+    "INVALID_PARAMETER",
+    "INVALID_RANGE",
+    "SYNTAX_ERROR",
+    "INVALID_STATE",
+    "INVALID_MODIFICATION",
+    "NETWORK_ERROR",
+    "RESOURCE_EXHAUSTED",
+    "INTERNAL_ERROR",
+};
+static_assert(static_cast<int>(webrtc::RTCErrorType::INTERNAL_ERROR) ==
+                  (arraysize(kRTCErrorTypeNames) - 1),
+              "kRTCErrorTypeNames must have as many strings as RTCErrorType "
+              "has values.");
+
+}  // namespace
+
+namespace webrtc {
+
+RTCError::RTCError(RTCError&& other)
+    : type_(other.type_), have_string_message_(other.have_string_message_) {
+  if (have_string_message_) {
+    new (&string_message_) std::string(std::move(other.string_message_));
+  } else {
+    static_message_ = other.static_message_;
+  }
+}
+
+RTCError& RTCError::operator=(RTCError&& other) {
+  type_ = other.type_;
+  if (other.have_string_message_) {
+    set_message(std::move(other.string_message_));
+  } else {
+    set_message(other.static_message_);
+  }
+  return *this;
+}
+
+RTCError::~RTCError() {
+  // If we hold a message string that was built, rather than a static string,
+  // we need to delete it.
+  if (have_string_message_) {
+    string_message_.~basic_string();
+  }
+}
+
+// static
+RTCError RTCError::OK() {
+  return RTCError();
+}
+
+const char* RTCError::message() const {
+  if (have_string_message_) {
+    return string_message_.c_str();
+  } else {
+    return static_message_;
+  }
+}
+
+void RTCError::set_message(const char* message) {
+  if (have_string_message_) {
+    string_message_.~basic_string();
+    have_string_message_ = false;
+  }
+  static_message_ = message;
+}
+
+void RTCError::set_message(std::string&& message) {
+  if (!have_string_message_) {
+    new (&string_message_) std::string(std::move(message));
+    have_string_message_ = true;
+  } else {
+    string_message_ = message;
+  }
+}
+
+std::ostream& operator<<(std::ostream& stream, RTCErrorType error) {
+  int index = static_cast<int>(error);
+  return stream << kRTCErrorTypeNames[index];
+}
+
+}  // namespace webrtc
diff --git a/api/rtcerror.h b/api/rtcerror.h
new file mode 100644
index 0000000..962f46d
--- /dev/null
+++ b/api/rtcerror.h
@@ -0,0 +1,300 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_RTCERROR_H_
+#define API_RTCERROR_H_
+
+#include <ostream>
+#include <string>
+#include <utility>  // For std::move.
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+// Enumeration to represent distinct classes of errors that an application
+// may wish to act upon differently. These roughly map to DOMExceptions or
+// RTCError "errorDetailEnum" values in the web API, as described in the
+// comments below.
+enum class RTCErrorType {
+  // No error.
+  NONE,
+
+  // An operation is valid, but currently unsupported.
+  // Maps to OperationError DOMException.
+  UNSUPPORTED_OPERATION,
+
+  // A supplied parameter is valid, but currently unsupported.
+  // Maps to OperationError DOMException.
+  UNSUPPORTED_PARAMETER,
+
+  // General error indicating that a supplied parameter is invalid.
+  // Maps to InvalidAccessError or TypeError DOMException depending on context.
+  INVALID_PARAMETER,
+
+  // Slightly more specific than INVALID_PARAMETER; a parameter's value was
+  // outside the allowed range.
+  // Maps to RangeError DOMException.
+  INVALID_RANGE,
+
+  // Slightly more specific than INVALID_PARAMETER; an error occurred while
+  // parsing string input.
+  // Maps to SyntaxError DOMException.
+  SYNTAX_ERROR,
+
+  // The object does not support this operation in its current state.
+  // Maps to InvalidStateError DOMException.
+  INVALID_STATE,
+
+  // An attempt was made to modify the object in an invalid way.
+  // Maps to InvalidModificationError DOMException.
+  INVALID_MODIFICATION,
+
+  // An error occurred within an underlying network protocol.
+  // Maps to NetworkError DOMException.
+  NETWORK_ERROR,
+
+  // Some resource has been exhausted; file handles, hardware resources, ports,
+  // etc.
+  // Maps to OperationError DOMException.
+  RESOURCE_EXHAUSTED,
+
+  // The operation failed due to an internal error.
+  // Maps to OperationError DOMException.
+  INTERNAL_ERROR,
+};
+
+// Roughly corresponds to RTCError in the web api. Holds an error type, a
+// message, and possibly additional information specific to that error.
+//
+// Doesn't contain anything beyond a type and message now, but will in the
+// future as more errors are implemented.
+class RTCError {
+ public:
+  // Constructors.
+
+  // Creates a "no error" error.
+  RTCError() {}
+  explicit RTCError(RTCErrorType type) : type_(type) {}
+  // For performance, prefer using the constructor that takes a const char* if
+  // the message is a static string.
+  RTCError(RTCErrorType type, const char* message)
+      : type_(type), static_message_(message), have_string_message_(false) {}
+  RTCError(RTCErrorType type, std::string&& message)
+      : type_(type), string_message_(message), have_string_message_(true) {}
+
+  // Delete the copy constructor and assignment operator; there aren't any use
+  // cases where you should need to copy an RTCError, as opposed to moving it.
+  // Can revisit this decision if use cases arise in the future.
+  RTCError(const RTCError& other) = delete;
+  RTCError& operator=(const RTCError& other) = delete;
+
+  // Move constructor and move-assignment operator.
+  RTCError(RTCError&& other);
+  RTCError& operator=(RTCError&& other);
+
+  ~RTCError();
+
+  // Identical to default constructed error.
+  //
+  // Preferred over the default constructor for code readability.
+  static RTCError OK();
+
+  // Error type.
+  RTCErrorType type() const { return type_; }
+  void set_type(RTCErrorType type) { type_ = type; }
+
+  // Human-readable message describing the error. Shouldn't be used for
+  // anything but logging/diagnostics, since messages are not guaranteed to be
+  // stable.
+  const char* message() const;
+  // For performance, prefer using the method that takes a const char* if the
+  // message is a static string.
+  void set_message(const char* message);
+  void set_message(std::string&& message);
+
+  // Convenience method for situations where you only care whether or not an
+  // error occurred.
+  bool ok() const { return type_ == RTCErrorType::NONE; }
+
+ private:
+  RTCErrorType type_ = RTCErrorType::NONE;
+  // For performance, we use static strings wherever possible. But in some
+  // cases the error string may need to be constructed, in which case an
+  // std::string is used.
+  union {
+    const char* static_message_ = "";
+    std::string string_message_;
+  };
+  // Whether or not |static_message_| or |string_message_| is being used in the
+  // above union.
+  bool have_string_message_ = false;
+};
+
+// Outputs the error as a friendly string. Update this method when adding a new
+// error type.
+//
+// Only intended to be used for logging/disagnostics.
+std::ostream& operator<<(std::ostream& stream, RTCErrorType error);
+
+// Helper macro that can be used by implementations to create an error with a
+// message and log it. |message| should be a string literal or movable
+// std::string.
+#define LOG_AND_RETURN_ERROR_EX(type, message, severity) \
+  {                                                      \
+    RTC_DCHECK(type != RTCErrorType::NONE);              \
+    RTC_LOG(severity) << message << " (" << type << ")"; \
+    return webrtc::RTCError(type, message);              \
+  }
+
+#define LOG_AND_RETURN_ERROR(type, message) \
+  LOG_AND_RETURN_ERROR_EX(type, message, LS_ERROR)
+
+// RTCErrorOr<T> is the union of an RTCError object and a T object. RTCErrorOr
+// models the concept of an object that is either a usable value, or an error
+// Status explaining why such a value is not present. To this end RTCErrorOr<T>
+// does not allow its RTCErrorType value to be RTCErrorType::NONE. This is
+// enforced by a debug check in most cases.
+//
+// The primary use-case for RTCErrorOr<T> is as the return value of a function
+// which may fail. For example, CreateRtpSender will fail if the parameters
+// could not be successfully applied at the media engine level, but if
+// successful will return a unique_ptr to an RtpSender.
+//
+// Example client usage for a RTCErrorOr<std::unique_ptr<T>>:
+//
+//  RTCErrorOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
+//  if (result.ok()) {
+//    std::unique_ptr<Foo> foo = result.ConsumeValue();
+//    foo->DoSomethingCool();
+//  } else {
+//    RTC_LOG(LS_ERROR) << result.error();
+//  }
+//
+// Example factory implementation returning RTCErrorOr<std::unique_ptr<T>>:
+//
+//  RTCErrorOr<std::unique_ptr<Foo>> FooFactory::MakeNewFoo(int arg) {
+//    if (arg <= 0) {
+//      return RTCError(RTCErrorType::INVALID_RANGE, "Arg must be positive");
+//    } else {
+//      return std::unique_ptr<Foo>(new Foo(arg));
+//    }
+//  }
+//
+template <typename T>
+class RTCErrorOr {
+  // Used to convert between RTCErrorOr<Foo>/RtcErrorOr<Bar>, when an implicit
+  // conversion from Foo to Bar exists.
+  template <typename U>
+  friend class RTCErrorOr;
+
+ public:
+  typedef T element_type;
+
+  // Constructs a new RTCErrorOr with RTCErrorType::INTERNAL_ERROR error. This
+  // is marked 'explicit' to try to catch cases like 'return {};', where people
+  // think RTCErrorOr<std::vector<int>> will be initialized with an empty
+  // vector, instead of a RTCErrorType::INTERNAL_ERROR error.
+  RTCErrorOr() : error_(RTCErrorType::INTERNAL_ERROR) {}
+
+  // Constructs a new RTCErrorOr with the given non-ok error. After calling
+  // this constructor, calls to value() will DCHECK-fail.
+  //
+  // NOTE: Not explicit - we want to use RTCErrorOr<T> as a return
+  // value, so it is convenient and sensible to be able to do 'return
+  // RTCError(...)' when the return type is RTCErrorOr<T>.
+  //
+  // REQUIRES: !error.ok(). This requirement is DCHECKed.
+  RTCErrorOr(RTCError&& error) : error_(std::move(error)) {  // NOLINT
+    RTC_DCHECK(!error.ok());
+  }
+
+  // Constructs a new RTCErrorOr with the given value. After calling this
+  // constructor, calls to value() will succeed, and calls to error() will
+  // return a default-constructed RTCError.
+  //
+  // NOTE: Not explicit - we want to use RTCErrorOr<T> as a return type
+  // so it is convenient and sensible to be able to do 'return T()'
+  // when the return type is RTCErrorOr<T>.
+  RTCErrorOr(T&& value) : value_(std::move(value)) {}  // NOLINT
+
+  // Delete the copy constructor and assignment operator; there aren't any use
+  // cases where you should need to copy an RTCErrorOr, as opposed to moving
+  // it. Can revisit this decision if use cases arise in the future.
+  RTCErrorOr(const RTCErrorOr& other) = delete;
+  RTCErrorOr& operator=(const RTCErrorOr& other) = delete;
+
+  // Move constructor and move-assignment operator.
+  //
+  // Visual Studio doesn't support "= default" with move constructors or
+  // assignment operators (even though they compile, they segfault), so define
+  // them explicitly.
+  RTCErrorOr(RTCErrorOr&& other)
+      : error_(std::move(other.error_)), value_(std::move(other.value_)) {}
+  RTCErrorOr& operator=(RTCErrorOr&& other) {
+    error_ = std::move(other.error_);
+    value_ = std::move(other.value_);
+    return *this;
+  }
+
+  // Conversion constructor and assignment operator; T must be copy or move
+  // constructible from U.
+  template <typename U>
+  RTCErrorOr(RTCErrorOr<U> other)  // NOLINT
+      : error_(std::move(other.error_)), value_(std::move(other.value_)) {}
+  template <typename U>
+  RTCErrorOr& operator=(RTCErrorOr<U> other) {
+    error_ = std::move(other.error_);
+    value_ = std::move(other.value_);
+    return *this;
+  }
+
+  // Returns a reference to our error. If this contains a T, then returns
+  // default-constructed RTCError.
+  const RTCError& error() const { return error_; }
+
+  // Moves the error. Can be useful if, say "CreateFoo" returns an
+  // RTCErrorOr<Foo>, and internally calls "CreateBar" which returns an
+  // RTCErrorOr<Bar>, and wants to forward the error up the stack.
+  RTCError MoveError() { return std::move(error_); }
+
+  // Returns this->error().ok()
+  bool ok() const { return error_.ok(); }
+
+  // Returns a reference to our current value, or DCHECK-fails if !this->ok().
+  //
+  // Can be convenient for the implementation; for example, a method may want
+  // to access the value in some way before returning it to the next method on
+  // the stack.
+  const T& value() const {
+    RTC_DCHECK(ok());
+    return value_;
+  }
+  T& value() {
+    RTC_DCHECK(ok());
+    return value_;
+  }
+
+  // Moves our current value out of this object and returns it, or DCHECK-fails
+  // if !this->ok().
+  T MoveValue() {
+    RTC_DCHECK(ok());
+    return std::move(value_);
+  }
+
+ private:
+  RTCError error_;
+  T value_;
+};
+
+}  // namespace webrtc
+
+#endif  // API_RTCERROR_H_
diff --git a/api/rtcerror_unittest.cc b/api/rtcerror_unittest.cc
new file mode 100644
index 0000000..d8f7ca6
--- /dev/null
+++ b/api/rtcerror_unittest.cc
@@ -0,0 +1,249 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "api/rtcerror.h"
+#include "test/gtest.h"
+
+namespace {
+
+const int kDefaultMoveOnlyIntValue = 0xbadf00d;
+
+// Class that has no copy constructor, ensuring that RTCErrorOr can
+struct MoveOnlyInt {
+  MoveOnlyInt() {}
+  explicit MoveOnlyInt(int value) : value(value) {}
+  MoveOnlyInt(const MoveOnlyInt& other) = delete;
+  MoveOnlyInt& operator=(const MoveOnlyInt& other) = delete;
+  MoveOnlyInt(MoveOnlyInt&& other) : value(other.value) {}
+  MoveOnlyInt& operator=(MoveOnlyInt&& other) {
+    value = other.value;
+    return *this;
+  }
+
+  int value = kDefaultMoveOnlyIntValue;
+};
+
+// Same as above. Used to test conversion from RTCErrorOr<A> to RTCErrorOr<B>
+// when A can be converted to B.
+struct MoveOnlyInt2 {
+  MoveOnlyInt2() {}
+  explicit MoveOnlyInt2(int value) : value(value) {}
+  MoveOnlyInt2(const MoveOnlyInt2& other) = delete;
+  MoveOnlyInt2& operator=(const MoveOnlyInt2& other) = delete;
+  MoveOnlyInt2(MoveOnlyInt2&& other) : value(other.value) {}
+  MoveOnlyInt2& operator=(MoveOnlyInt2&& other) {
+    value = other.value;
+    return *this;
+  }
+
+  explicit MoveOnlyInt2(MoveOnlyInt&& other) : value(other.value) {}
+  MoveOnlyInt2& operator=(MoveOnlyInt&& other) {
+    value = other.value;
+    return *this;
+  }
+
+  int value = kDefaultMoveOnlyIntValue;
+};
+
+}  // namespace
+
+namespace webrtc {
+
+// Simple test for ostream operator for RTCErrorType.
+TEST(RTCErrorTypeTest, OstreamOperator) {
+  std::ostringstream oss;
+  oss << webrtc::RTCErrorType::NONE << ' '
+      << webrtc::RTCErrorType::INVALID_PARAMETER << ' '
+      << webrtc::RTCErrorType::INTERNAL_ERROR;
+  EXPECT_EQ("NONE INVALID_PARAMETER INTERNAL_ERROR", oss.str());
+}
+
+// Test that the default constructor creates a "no error" error.
+TEST(RTCErrorTest, DefaultConstructor) {
+  RTCError e;
+  EXPECT_EQ(RTCErrorType::NONE, e.type());
+  EXPECT_EQ(std::string(), e.message());
+  EXPECT_TRUE(e.ok());
+}
+
+TEST(RTCErrorTest, NormalConstructors) {
+  RTCError a(RTCErrorType::INVALID_PARAMETER);
+  EXPECT_EQ(RTCErrorType::INVALID_PARAMETER, a.type());
+  EXPECT_EQ(std::string(), a.message());
+
+  // Constructor that takes const char* message.
+  RTCError b(RTCErrorType::UNSUPPORTED_PARAMETER, "foobar");
+  EXPECT_EQ(RTCErrorType::UNSUPPORTED_PARAMETER, b.type());
+  EXPECT_EQ(std::string("foobar"), b.message());
+
+  // Constructor that takes std::string message.
+  RTCError c(RTCErrorType::INVALID_RANGE, std::string("new"));
+  EXPECT_EQ(RTCErrorType::INVALID_RANGE, c.type());
+  EXPECT_EQ(std::string("new"), c.message());
+}
+
+TEST(RTCErrorTest, MoveConstructor) {
+  // Static string.
+  RTCError a(RTCErrorType::INVALID_PARAMETER, "foo");
+  RTCError b(std::move(a));
+  EXPECT_EQ(RTCErrorType::INVALID_PARAMETER, b.type());
+  EXPECT_EQ(std::string("foo"), b.message());
+
+  // Non-static string.
+  RTCError c(RTCErrorType::UNSUPPORTED_PARAMETER, std::string("bar"));
+  RTCError d(std::move(c));
+  EXPECT_EQ(RTCErrorType::UNSUPPORTED_PARAMETER, d.type());
+  EXPECT_EQ(std::string("bar"), d.message());
+}
+
+TEST(RTCErrorTest, MoveAssignment) {
+  // Try all combinations of "is static string"/"is non-static string" moves.
+  RTCError e(RTCErrorType::INVALID_PARAMETER, "foo");
+
+  e = RTCError(RTCErrorType::UNSUPPORTED_PARAMETER, "bar");
+  EXPECT_EQ(RTCErrorType::UNSUPPORTED_PARAMETER, e.type());
+  EXPECT_EQ(std::string("bar"), e.message());
+
+  e = RTCError(RTCErrorType::SYNTAX_ERROR, std::string("baz"));
+  EXPECT_EQ(std::string("baz"), e.message());
+
+  e = RTCError(RTCErrorType::SYNTAX_ERROR, std::string("another"));
+  EXPECT_EQ(std::string("another"), e.message());
+
+  e = RTCError(RTCErrorType::SYNTAX_ERROR, "last");
+  EXPECT_EQ(std::string("last"), e.message());
+}
+
+// Test that the error returned by RTCError::OK() is a "no error" error.
+TEST(RTCErrorTest, OKConstant) {
+  RTCError ok = RTCError::OK();
+  EXPECT_EQ(RTCErrorType::NONE, ok.type());
+  EXPECT_EQ(std::string(), ok.message());
+  EXPECT_TRUE(ok.ok());
+}
+
+// Test that "error.ok()" behaves as expected.
+TEST(RTCErrorTest, OkMethod) {
+  RTCError success;
+  RTCError failure(RTCErrorType::INTERNAL_ERROR);
+  EXPECT_TRUE(success.ok());
+  EXPECT_FALSE(failure.ok());
+}
+
+// Test that a message can be set using either static const strings or
+// std::strings.
+TEST(RTCErrorTest, SetMessage) {
+  RTCError e;
+  // Try all combinations of "is static string"/"is non-static string" calls.
+  e.set_message("foo");
+  EXPECT_EQ(std::string("foo"), e.message());
+
+  e.set_message("bar");
+  EXPECT_EQ(std::string("bar"), e.message());
+
+  e.set_message(std::string("string"));
+  EXPECT_EQ(std::string("string"), e.message());
+
+  e.set_message(std::string("more"));
+  EXPECT_EQ(std::string("more"), e.message());
+
+  e.set_message("love to test");
+  EXPECT_EQ(std::string("love to test"), e.message());
+}
+
+// Test that the default constructor creates an "INTERNAL_ERROR".
+TEST(RTCErrorOrTest, DefaultConstructor) {
+  RTCErrorOr<MoveOnlyInt> e;
+  EXPECT_EQ(RTCErrorType::INTERNAL_ERROR, e.error().type());
+}
+
+// Test that an RTCErrorOr can be implicitly constructed from a value.
+TEST(RTCErrorOrTest, ImplicitValueConstructor) {
+  RTCErrorOr<MoveOnlyInt> e = [] { return MoveOnlyInt(100); }();
+  EXPECT_EQ(100, e.value().value);
+}
+
+// Test that an RTCErrorOr can be implicitly constructed from an RTCError.
+TEST(RTCErrorOrTest, ImplicitErrorConstructor) {
+  RTCErrorOr<MoveOnlyInt> e = [] {
+    return RTCError(RTCErrorType::SYNTAX_ERROR);
+  }();
+  EXPECT_EQ(RTCErrorType::SYNTAX_ERROR, e.error().type());
+}
+
+TEST(RTCErrorOrTest, MoveConstructor) {
+  RTCErrorOr<MoveOnlyInt> a(MoveOnlyInt(5));
+  RTCErrorOr<MoveOnlyInt> b(std::move(a));
+  EXPECT_EQ(5, b.value().value);
+}
+
+TEST(RTCErrorOrTest, MoveAssignment) {
+  RTCErrorOr<MoveOnlyInt> a(MoveOnlyInt(5));
+  RTCErrorOr<MoveOnlyInt> b(MoveOnlyInt(10));
+  a = std::move(b);
+  EXPECT_EQ(10, a.value().value);
+}
+
+TEST(RTCErrorOrTest, ConversionConstructor) {
+  RTCErrorOr<MoveOnlyInt> a(MoveOnlyInt(1));
+  RTCErrorOr<MoveOnlyInt2> b(std::move(a));
+}
+
+TEST(RTCErrorOrTest, ConversionAssignment) {
+  RTCErrorOr<MoveOnlyInt> a(MoveOnlyInt(5));
+  RTCErrorOr<MoveOnlyInt2> b(MoveOnlyInt2(10));
+  b = std::move(a);
+  EXPECT_EQ(5, b.value().value);
+}
+
+TEST(RTCErrorOrTest, OkMethod) {
+  RTCErrorOr<int> success(1337);
+  RTCErrorOr<int> error = RTCError(RTCErrorType::INTERNAL_ERROR);
+  EXPECT_TRUE(success.ok());
+  EXPECT_FALSE(error.ok());
+}
+
+TEST(RTCErrorOrTest, MoveError) {
+  RTCErrorOr<int> e({RTCErrorType::SYNTAX_ERROR, "message"});
+  RTCError err = e.MoveError();
+  EXPECT_EQ(RTCErrorType::SYNTAX_ERROR, err.type());
+  EXPECT_EQ(std::string("message"), err.message());
+}
+
+TEST(RTCErrorOrTest, MoveValue) {
+  RTCErrorOr<MoveOnlyInt> e(MoveOnlyInt(88));
+  MoveOnlyInt value = e.MoveValue();
+  EXPECT_EQ(88, value.value);
+}
+
+// Death tests.
+// Disabled on Android because death tests misbehave on Android, see
+// base/test/gtest_util.h.
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(RTCErrorOrDeathTest, ConstructWithOkError) {
+  EXPECT_DEATH(RTCErrorOr<int> err = RTCError::OK(), "");
+}
+
+TEST(RTCErrorOrDeathTest, DereferenceErrorValue) {
+  RTCErrorOr<int> error = RTCError(RTCErrorType::INTERNAL_ERROR);
+  EXPECT_DEATH(error.value(), "");
+}
+
+TEST(RTCErrorOrDeathTest, MoveErrorValue) {
+  RTCErrorOr<int> error = RTCError(RTCErrorType::INTERNAL_ERROR);
+  EXPECT_DEATH(error.MoveValue(), "");
+}
+
+#endif  // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+}  // namespace webrtc
diff --git a/api/rtceventlogoutput.h b/api/rtceventlogoutput.h
new file mode 100644
index 0000000..67e408d
--- /dev/null
+++ b/api/rtceventlogoutput.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_RTCEVENTLOGOUTPUT_H_
+#define API_RTCEVENTLOGOUTPUT_H_
+
+#include <string>
+
+namespace webrtc {
+
+// NOTE: This class is still under development and may change without notice.
+class RtcEventLogOutput {
+ public:
+  virtual ~RtcEventLogOutput() = default;
+
+  // An output normally starts out active, though that might not always be
+  // the case (e.g. failed to open a file for writing).
+  // Once an output has become inactive (e.g. maximum file size reached), it can
+  // never become active again.
+  virtual bool IsActive() const = 0;
+
+  // Write encoded events to an output. Returns true if the output was
+  // successfully written in its entirety. Otherwise, no guarantee is given
+  // about how much data was written, if any. The output sink becomes inactive
+  // after the first time |false| is returned. Write() may not be called on
+  // an inactive output sink.
+  virtual bool Write(const std::string& output) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // API_RTCEVENTLOGOUTPUT_H_
diff --git a/api/rtp_headers.cc b/api/rtp_headers.cc
new file mode 100644
index 0000000..a0b1a15
--- /dev/null
+++ b/api/rtp_headers.cc
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/rtp_headers.h"
+
+#include <string.h>
+#include <algorithm>
+#include <limits>
+#include <type_traits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/stringutils.h"
+
+namespace webrtc {
+
+RTPHeaderExtension::RTPHeaderExtension()
+    : hasTransmissionTimeOffset(false),
+      transmissionTimeOffset(0),
+      hasAbsoluteSendTime(false),
+      absoluteSendTime(0),
+      hasTransportSequenceNumber(false),
+      transportSequenceNumber(0),
+      hasAudioLevel(false),
+      voiceActivity(false),
+      audioLevel(0),
+      hasVideoRotation(false),
+      videoRotation(kVideoRotation_0),
+      hasVideoContentType(false),
+      videoContentType(VideoContentType::UNSPECIFIED),
+      has_video_timing(false) {}
+
+RTPHeaderExtension::RTPHeaderExtension(const RTPHeaderExtension& other) =
+    default;
+
+RTPHeaderExtension& RTPHeaderExtension::operator=(
+    const RTPHeaderExtension& other) = default;
+
+RTPHeader::RTPHeader()
+    : markerBit(false),
+      payloadType(0),
+      sequenceNumber(0),
+      timestamp(0),
+      ssrc(0),
+      numCSRCs(0),
+      arrOfCSRCs(),
+      paddingLength(0),
+      headerLength(0),
+      payload_type_frequency(0),
+      extension() {}
+
+RTPHeader::RTPHeader(const RTPHeader& other) = default;
+
+RTPHeader& RTPHeader::operator=(const RTPHeader& other) = default;
+
+}  // namespace webrtc
diff --git a/api/rtp_headers.h b/api/rtp_headers.h
new file mode 100644
index 0000000..c5496b6
--- /dev/null
+++ b/api/rtp_headers.h
@@ -0,0 +1,173 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_RTP_HEADERS_H_
+#define API_RTP_HEADERS_H_
+
+#include <stddef.h>
+#include <string.h>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/optional.h"
+#include "api/video/video_content_type.h"
+#include "api/video/video_rotation.h"
+#include "api/video/video_timing.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/deprecation.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Class to represent the value of RTP header extensions that are
+// variable-length strings (e.g., RtpStreamId and RtpMid).
+// Unlike std::string, it can be copied with memcpy and cleared with memset.
+//
+// Empty value represents unset header extension (use empty() to query).
+class StringRtpHeaderExtension {
+ public:
+  // String RTP header extensions are limited to 16 bytes because it is the
+  // maximum length that can be encoded with one-byte header extensions.
+  static constexpr size_t kMaxSize = 16;
+
+  static bool IsLegalName(rtc::ArrayView<const char> name);
+
+  StringRtpHeaderExtension() { value_[0] = 0; }
+  explicit StringRtpHeaderExtension(rtc::ArrayView<const char> value) {
+    Set(value.data(), value.size());
+  }
+  StringRtpHeaderExtension(const StringRtpHeaderExtension&) = default;
+  StringRtpHeaderExtension& operator=(const StringRtpHeaderExtension&) =
+      default;
+
+  bool empty() const { return value_[0] == 0; }
+  const char* data() const { return value_; }
+  size_t size() const { return strnlen(value_, kMaxSize); }
+
+  void Set(rtc::ArrayView<const uint8_t> value) {
+    Set(reinterpret_cast<const char*>(value.data()), value.size());
+  }
+  void Set(const char* data, size_t size);
+
+  friend bool operator==(const StringRtpHeaderExtension& lhs,
+                         const StringRtpHeaderExtension& rhs) {
+    return strncmp(lhs.value_, rhs.value_, kMaxSize) == 0;
+  }
+  friend bool operator!=(const StringRtpHeaderExtension& lhs,
+                         const StringRtpHeaderExtension& rhs) {
+    return !(lhs == rhs);
+  }
+
+ private:
+  char value_[kMaxSize];
+};
+
+// StreamId represents RtpStreamId which is a string.
+typedef StringRtpHeaderExtension StreamId;
+
+// Mid represents RtpMid which is a string.
+typedef StringRtpHeaderExtension Mid;
+
+struct RTPHeaderExtension {
+  RTPHeaderExtension();
+  RTPHeaderExtension(const RTPHeaderExtension& other);
+  RTPHeaderExtension& operator=(const RTPHeaderExtension& other);
+
+  bool hasTransmissionTimeOffset;
+  int32_t transmissionTimeOffset;
+  bool hasAbsoluteSendTime;
+  uint32_t absoluteSendTime;
+  bool hasTransportSequenceNumber;
+  uint16_t transportSequenceNumber;
+
+  // Audio Level includes both level in dBov and voiced/unvoiced bit. See:
+  // https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/
+  bool hasAudioLevel;
+  bool voiceActivity;
+  uint8_t audioLevel;
+
+  // For Coordination of Video Orientation. See
+  // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+  // ts_126114v120700p.pdf
+  bool hasVideoRotation;
+  VideoRotation videoRotation;
+
+  // TODO(ilnik): Refactor this and one above to be rtc::Optional() and remove
+  // a corresponding bool flag.
+  bool hasVideoContentType;
+  VideoContentType videoContentType;
+
+  bool has_video_timing;
+  VideoSendTiming video_timing;
+
+  PlayoutDelay playout_delay = {-1, -1};
+
+  // For identification of a stream when ssrc is not signaled. See
+  // https://tools.ietf.org/html/draft-ietf-avtext-rid-09
+  // TODO(danilchap): Update url from draft to release version.
+  StreamId stream_id;
+  StreamId repaired_stream_id;
+
+  // For identifying the media section used to interpret this RTP packet. See
+  // https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38
+  Mid mid;
+};
+
+struct RTPHeader {
+  RTPHeader();
+  RTPHeader(const RTPHeader& other);
+  RTPHeader& operator=(const RTPHeader& other);
+
+  bool markerBit;
+  uint8_t payloadType;
+  uint16_t sequenceNumber;
+  uint32_t timestamp;
+  uint32_t ssrc;
+  uint8_t numCSRCs;
+  uint32_t arrOfCSRCs[kRtpCsrcSize];
+  size_t paddingLength;
+  size_t headerLength;
+  int payload_type_frequency;
+  RTPHeaderExtension extension;
+};
+
+// RTCP mode to use. Compound mode is described by RFC 4585 and reduced-size
+// RTCP mode is described by RFC 5506.
+enum class RtcpMode { kOff, kCompound, kReducedSize };
+
+enum NetworkState {
+  kNetworkUp,
+  kNetworkDown,
+};
+
+struct RtpKeepAliveConfig final {
+  // If no packet has been sent for |timeout_interval_ms|, send a keep-alive
+  // packet. The keep-alive packet is an empty (no payload) RTP packet with a
+  // payload type of 20 as long as the other end has not negotiated the use of
+  // this value. If this value has already been negotiated, then some other
+  // unused static payload type from table 5 of RFC 3551 shall be used and set
+  // in |payload_type|.
+  int64_t timeout_interval_ms = -1;
+  uint8_t payload_type = 20;
+
+  bool operator==(const RtpKeepAliveConfig& o) const {
+    return timeout_interval_ms == o.timeout_interval_ms &&
+           payload_type == o.payload_type;
+  }
+  bool operator!=(const RtpKeepAliveConfig& o) const { return !(*this == o); }
+};
+
+}  // namespace webrtc
+
+#endif  // API_RTP_HEADERS_H_
diff --git a/api/rtpparameters.cc b/api/rtpparameters.cc
new file mode 100644
index 0000000..79fd3a9
--- /dev/null
+++ b/api/rtpparameters.cc
@@ -0,0 +1,192 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "api/rtpparameters.h"
+
+#include <algorithm>
+#include <sstream>
+#include <string>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+const double kDefaultBitratePriority = 1.0;
+
+RtcpFeedback::RtcpFeedback() {}
+RtcpFeedback::RtcpFeedback(RtcpFeedbackType type) : type(type) {}
+RtcpFeedback::RtcpFeedback(RtcpFeedbackType type,
+                           RtcpFeedbackMessageType message_type)
+    : type(type), message_type(message_type) {}
+RtcpFeedback::~RtcpFeedback() {}
+
+RtpCodecCapability::RtpCodecCapability() {}
+RtpCodecCapability::~RtpCodecCapability() {}
+
+RtpHeaderExtensionCapability::RtpHeaderExtensionCapability() {}
+RtpHeaderExtensionCapability::RtpHeaderExtensionCapability(
+    const std::string& uri)
+    : uri(uri) {}
+RtpHeaderExtensionCapability::RtpHeaderExtensionCapability(
+    const std::string& uri,
+    int preferred_id)
+    : uri(uri), preferred_id(preferred_id) {}
+RtpHeaderExtensionCapability::~RtpHeaderExtensionCapability() {}
+
+RtpExtension::RtpExtension() {}
+RtpExtension::RtpExtension(const std::string& uri, int id) : uri(uri), id(id) {}
+RtpExtension::RtpExtension(const std::string& uri, int id, bool encrypt)
+    : uri(uri), id(id), encrypt(encrypt) {}
+RtpExtension::~RtpExtension() {}
+
+RtpFecParameters::RtpFecParameters() {}
+RtpFecParameters::RtpFecParameters(FecMechanism mechanism)
+    : mechanism(mechanism) {}
+RtpFecParameters::RtpFecParameters(FecMechanism mechanism, uint32_t ssrc)
+    : ssrc(ssrc), mechanism(mechanism) {}
+RtpFecParameters::~RtpFecParameters() {}
+
+RtpRtxParameters::RtpRtxParameters() {}
+RtpRtxParameters::RtpRtxParameters(uint32_t ssrc) : ssrc(ssrc) {}
+RtpRtxParameters::~RtpRtxParameters() {}
+
+RtpEncodingParameters::RtpEncodingParameters() {}
+RtpEncodingParameters::~RtpEncodingParameters() {}
+
+RtpCodecParameters::RtpCodecParameters() {}
+RtpCodecParameters::~RtpCodecParameters() {}
+
+RtpCapabilities::RtpCapabilities() {}
+RtpCapabilities::~RtpCapabilities() {}
+
+RtpParameters::RtpParameters() {}
+RtpParameters::~RtpParameters() {}
+
+std::string RtpExtension::ToString() const {
+  std::stringstream ss;
+  ss << "{uri: " << uri;
+  ss << ", id: " << id;
+  if (encrypt) {
+    ss << ", encrypt";
+  }
+  ss << '}';
+  return ss.str();
+}
+
+const char RtpExtension::kAudioLevelUri[] =
+    "urn:ietf:params:rtp-hdrext:ssrc-audio-level";
+const int RtpExtension::kAudioLevelDefaultId = 1;
+
+const char RtpExtension::kTimestampOffsetUri[] =
+    "urn:ietf:params:rtp-hdrext:toffset";
+const int RtpExtension::kTimestampOffsetDefaultId = 2;
+
+const char RtpExtension::kAbsSendTimeUri[] =
+    "http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
+const int RtpExtension::kAbsSendTimeDefaultId = 3;
+
+const char RtpExtension::kVideoRotationUri[] = "urn:3gpp:video-orientation";
+const int RtpExtension::kVideoRotationDefaultId = 4;
+
+const char RtpExtension::kTransportSequenceNumberUri[] =
+    "http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01";
+const int RtpExtension::kTransportSequenceNumberDefaultId = 5;
+
+// This extension allows applications to adaptively limit the playout delay
+// on frames as per the current needs. For example, a gaming application
+// has very different needs on end-to-end delay compared to a video-conference
+// application.
+const char RtpExtension::kPlayoutDelayUri[] =
+    "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay";
+const int RtpExtension::kPlayoutDelayDefaultId = 6;
+
+const char RtpExtension::kVideoContentTypeUri[] =
+    "http://www.webrtc.org/experiments/rtp-hdrext/video-content-type";
+const int RtpExtension::kVideoContentTypeDefaultId = 7;
+
+const char RtpExtension::kVideoTimingUri[] =
+    "http://www.webrtc.org/experiments/rtp-hdrext/video-timing";
+const int RtpExtension::kVideoTimingDefaultId = 8;
+
+const char RtpExtension::kEncryptHeaderExtensionsUri[] =
+    "urn:ietf:params:rtp-hdrext:encrypt";
+
+const int RtpExtension::kMinId = 1;
+const int RtpExtension::kMaxId = 14;
+
+bool RtpExtension::IsSupportedForAudio(const std::string& uri) {
+  return uri == webrtc::RtpExtension::kAudioLevelUri ||
+         uri == webrtc::RtpExtension::kTransportSequenceNumberUri;
+}
+
+bool RtpExtension::IsSupportedForVideo(const std::string& uri) {
+  return uri == webrtc::RtpExtension::kTimestampOffsetUri ||
+         uri == webrtc::RtpExtension::kAbsSendTimeUri ||
+         uri == webrtc::RtpExtension::kVideoRotationUri ||
+         uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
+         uri == webrtc::RtpExtension::kPlayoutDelayUri ||
+         uri == webrtc::RtpExtension::kVideoContentTypeUri ||
+         uri == webrtc::RtpExtension::kVideoTimingUri;
+}
+
+bool RtpExtension::IsEncryptionSupported(const std::string& uri) {
+  return uri == webrtc::RtpExtension::kAudioLevelUri ||
+         uri == webrtc::RtpExtension::kTimestampOffsetUri ||
+#if !defined(ENABLE_EXTERNAL_AUTH)
+         // TODO(jbauch): Figure out a way to always allow "kAbsSendTimeUri"
+         // here and filter out later if external auth is really used in
+         // srtpfilter. External auth is used by Chromium and replaces the
+         // extension header value of "kAbsSendTimeUri", so it must not be
+         // encrypted (which can't be done by Chromium).
+         uri == webrtc::RtpExtension::kAbsSendTimeUri ||
+#endif
+         uri == webrtc::RtpExtension::kVideoRotationUri ||
+         uri == webrtc::RtpExtension::kTransportSequenceNumberUri ||
+         uri == webrtc::RtpExtension::kPlayoutDelayUri ||
+         uri == webrtc::RtpExtension::kVideoContentTypeUri;
+}
+
+const RtpExtension* RtpExtension::FindHeaderExtensionByUri(
+    const std::vector<RtpExtension>& extensions,
+    const std::string& uri) {
+  for (const auto& extension : extensions) {
+    if (extension.uri == uri) {
+      return &extension;
+    }
+  }
+  return nullptr;
+}
+
+std::vector<RtpExtension> RtpExtension::FilterDuplicateNonEncrypted(
+    const std::vector<RtpExtension>& extensions) {
+  std::vector<RtpExtension> filtered;
+  for (auto extension = extensions.begin(); extension != extensions.end();
+       ++extension) {
+    if (extension->encrypt) {
+      filtered.push_back(*extension);
+      continue;
+    }
+
+    // Only add non-encrypted extension if no encrypted with the same URI
+    // is also present...
+    if (std::find_if(extension + 1, extensions.end(),
+                     [extension](const RtpExtension& check) {
+                       return extension->uri == check.uri;
+                     }) != extensions.end()) {
+      continue;
+    }
+
+    // ...and has not been added before.
+    if (!FindHeaderExtensionByUri(filtered, extension->uri)) {
+      filtered.push_back(*extension);
+    }
+  }
+  return filtered;
+}
+}  // namespace webrtc
diff --git a/api/rtpparameters.h b/api/rtpparameters.h
new file mode 100644
index 0000000..d9ac1b6
--- /dev/null
+++ b/api/rtpparameters.h
@@ -0,0 +1,574 @@
+/*
+ *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_RTPPARAMETERS_H_
+#define API_RTPPARAMETERS_H_
+
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "api/mediatypes.h"
+#include "api/optional.h"
+
+namespace webrtc {
+
+// These structures are intended to mirror those defined by:
+// http://draft.ortc.org/#rtcrtpdictionaries*
+// Contains everything specified as of 2017 Jan 24.
+//
+// They are used when retrieving or modifying the parameters of an
+// RtpSender/RtpReceiver, or retrieving capabilities.
+//
+// Note on conventions: Where ORTC may use "octet", "short" and "unsigned"
+// types, we typically use "int", in keeping with our style guidelines. The
+// parameter's actual valid range will be enforced when the parameters are set,
+// rather than when the parameters struct is built. An exception is made for
+// SSRCs, since they use the full unsigned 32-bit range, and aren't expected to
+// be used for any numeric comparisons/operations.
+//
+// Additionally, where ORTC uses strings, we may use enums for things that have
+// a fixed number of supported values. However, for things that can be extended
+// (such as codecs, by providing an external encoder factory), a string
+// identifier is used.
+
+enum class FecMechanism {
+  RED,
+  RED_AND_ULPFEC,
+  FLEXFEC,
+};
+
+// Used in RtcpFeedback struct.
+enum class RtcpFeedbackType {
+  CCM,
+  NACK,
+  REMB,  // "goog-remb"
+  TRANSPORT_CC,
+};
+
+// Used in RtcpFeedback struct when type is NACK or CCM.
+enum class RtcpFeedbackMessageType {
+  // Equivalent to {type: "nack", parameter: undefined} in ORTC.
+  GENERIC_NACK,
+  PLI,  // Usable with NACK.
+  FIR,  // Usable with CCM.
+};
+
+enum class DtxStatus {
+  DISABLED,
+  ENABLED,
+};
+
+enum class DegradationPreference {
+  MAINTAIN_FRAMERATE,
+  MAINTAIN_RESOLUTION,
+  BALANCED,
+};
+
+extern const double kDefaultBitratePriority;
+
+struct RtcpFeedback {
+  RtcpFeedbackType type = RtcpFeedbackType::CCM;
+
+  // Equivalent to ORTC "parameter" field with slight differences:
+  // 1. It's an enum instead of a string.
+  // 2. Generic NACK feedback is represented by a GENERIC_NACK message type,
+  //    rather than an unset "parameter" value.
+  rtc::Optional<RtcpFeedbackMessageType> message_type;
+
+  // Constructors for convenience.
+  RtcpFeedback();
+  explicit RtcpFeedback(RtcpFeedbackType type);
+  RtcpFeedback(RtcpFeedbackType type, RtcpFeedbackMessageType message_type);
+  ~RtcpFeedback();
+
+  bool operator==(const RtcpFeedback& o) const {
+    return type == o.type && message_type == o.message_type;
+  }
+  bool operator!=(const RtcpFeedback& o) const { return !(*this == o); }
+};
+
+// RtpCodecCapability is to RtpCodecParameters as RtpCapabilities is to
+// RtpParameters. This represents the static capabilities of an endpoint's
+// implementation of a codec.
+struct RtpCodecCapability {
+  RtpCodecCapability();
+  ~RtpCodecCapability();
+
+  // Build MIME "type/subtype" string from |name| and |kind|.
+  std::string mime_type() const { return MediaTypeToString(kind) + "/" + name; }
+
+  // Used to identify the codec. Equivalent to MIME subtype.
+  std::string name;
+
+  // The media type of this codec. Equivalent to MIME top-level type.
+  cricket::MediaType kind = cricket::MEDIA_TYPE_AUDIO;
+
+  // Clock rate in Hertz. If unset, the codec is applicable to any clock rate.
+  rtc::Optional<int> clock_rate;
+
+  // Default payload type for this codec. Mainly needed for codecs that use
+  // that have statically assigned payload types.
+  rtc::Optional<int> preferred_payload_type;
+
+  // Maximum packetization time supported by an RtpReceiver for this codec.
+  // TODO(deadbeef): Not implemented.
+  rtc::Optional<int> max_ptime;
+
+  // Preferred packetization time for an RtpReceiver or RtpSender of this
+  // codec.
+  // TODO(deadbeef): Not implemented.
+  rtc::Optional<int> ptime;
+
+  // The number of audio channels supported. Unused for video codecs.
+  rtc::Optional<int> num_channels;
+
+  // Feedback mechanisms supported for this codec.
+  std::vector<RtcpFeedback> rtcp_feedback;
+
+  // Codec-specific parameters that must be signaled to the remote party.
+  //
+  // Corresponds to "a=fmtp" parameters in SDP.
+  //
+  // Contrary to ORTC, these parameters are named using all lowercase strings.
+  // This helps make the mapping to SDP simpler, if an application is using
+  // SDP. Boolean values are represented by the string "1".
+  std::unordered_map<std::string, std::string> parameters;
+
+  // Codec-specific parameters that may optionally be signaled to the remote
+  // party.
+  // TODO(deadbeef): Not implemented.
+  std::unordered_map<std::string, std::string> options;
+
+  // Maximum number of temporal layer extensions supported by this codec.
+  // For example, a value of 1 indicates that 2 total layers are supported.
+  // TODO(deadbeef): Not implemented.
+  int max_temporal_layer_extensions = 0;
+
+  // Maximum number of spatial layer extensions supported by this codec.
+  // For example, a value of 1 indicates that 2 total layers are supported.
+  // TODO(deadbeef): Not implemented.
+  int max_spatial_layer_extensions = 0;
+
+  // Whether the implementation can send/receive SVC layers with distinct
+  // SSRCs. Always false for audio codecs. True for video codecs that support
+  // scalable video coding with MRST.
+  // TODO(deadbeef): Not implemented.
+  bool svc_multi_stream_support = false;
+
+  bool operator==(const RtpCodecCapability& o) const {
+    return name == o.name && kind == o.kind && clock_rate == o.clock_rate &&
+           preferred_payload_type == o.preferred_payload_type &&
+           max_ptime == o.max_ptime && ptime == o.ptime &&
+           num_channels == o.num_channels && rtcp_feedback == o.rtcp_feedback &&
+           parameters == o.parameters && options == o.options &&
+           max_temporal_layer_extensions == o.max_temporal_layer_extensions &&
+           max_spatial_layer_extensions == o.max_spatial_layer_extensions &&
+           svc_multi_stream_support == o.svc_multi_stream_support;
+  }
+  bool operator!=(const RtpCodecCapability& o) const { return !(*this == o); }
+};
+
+// Used in RtpCapabilities; represents the capabilities/preferences of an
+// implementation for a header extension.
+//
+// Just called "RtpHeaderExtension" in ORTC, but the "Capability" suffix was
+// added here for consistency and to avoid confusion with
+// RtpHeaderExtensionParameters.
+//
+// Note that ORTC includes a "kind" field, but we omit this because it's
+// redundant; if you call "RtpReceiver::GetCapabilities(MEDIA_TYPE_AUDIO)",
+// you know you're getting audio capabilities.
+struct RtpHeaderExtensionCapability {
+  // URI of this extension, as defined in RFC5285.
+  std::string uri;
+
+  // Preferred value of ID that goes in the packet.
+  rtc::Optional<int> preferred_id;
+
+  // If true, it's preferred that the value in the header is encrypted.
+  // TODO(deadbeef): Not implemented.
+  bool preferred_encrypt = false;
+
+  // Constructors for convenience.
+  RtpHeaderExtensionCapability();
+  explicit RtpHeaderExtensionCapability(const std::string& uri);
+  RtpHeaderExtensionCapability(const std::string& uri, int preferred_id);
+  ~RtpHeaderExtensionCapability();
+
+  bool operator==(const RtpHeaderExtensionCapability& o) const {
+    return uri == o.uri && preferred_id == o.preferred_id &&
+           preferred_encrypt == o.preferred_encrypt;
+  }
+  bool operator!=(const RtpHeaderExtensionCapability& o) const {
+    return !(*this == o);
+  }
+};
+
+// RTP header extension, see RFC 5285.
+struct RtpExtension {
+  RtpExtension();
+  RtpExtension(const std::string& uri, int id);
+  RtpExtension(const std::string& uri, int id, bool encrypt);
+  ~RtpExtension();
+  std::string ToString() const;
+  bool operator==(const RtpExtension& rhs) const {
+    return uri == rhs.uri && id == rhs.id && encrypt == rhs.encrypt;
+  }
+  static bool IsSupportedForAudio(const std::string& uri);
+  static bool IsSupportedForVideo(const std::string& uri);
+  // Return "true" if the given RTP header extension URI may be encrypted.
+  static bool IsEncryptionSupported(const std::string& uri);
+
+  // Returns the named header extension if found among all extensions,
+  // nullptr otherwise.
+  static const RtpExtension* FindHeaderExtensionByUri(
+      const std::vector<RtpExtension>& extensions,
+      const std::string& uri);
+
+  // Return a list of RTP header extensions with the non-encrypted extensions
+  // removed if both the encrypted and non-encrypted extension is present for
+  // the same URI.
+  static std::vector<RtpExtension> FilterDuplicateNonEncrypted(
+      const std::vector<RtpExtension>& extensions);
+
+  // Header extension for audio levels, as defined in:
+  // http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
+  static const char kAudioLevelUri[];
+  static const int kAudioLevelDefaultId;
+
+  // Header extension for RTP timestamp offset, see RFC 5450 for details:
+  // http://tools.ietf.org/html/rfc5450
+  static const char kTimestampOffsetUri[];
+  static const int kTimestampOffsetDefaultId;
+
+  // Header extension for absolute send time, see url for details:
+  // http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
+  static const char kAbsSendTimeUri[];
+  static const int kAbsSendTimeDefaultId;
+
+  // Header extension for coordination of video orientation, see url for
+  // details:
+  // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/ts_126114v120700p.pdf
+  static const char kVideoRotationUri[];
+  static const int kVideoRotationDefaultId;
+
+  // Header extension for video content type. E.g. default or screenshare.
+  static const char kVideoContentTypeUri[];
+  static const int kVideoContentTypeDefaultId;
+
+  // Header extension for video timing.
+  static const char kVideoTimingUri[];
+  static const int kVideoTimingDefaultId;
+
+  // Header extension for transport sequence number, see url for details:
+  // http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions
+  static const char kTransportSequenceNumberUri[];
+  static const int kTransportSequenceNumberDefaultId;
+
+  static const char kPlayoutDelayUri[];
+  static const int kPlayoutDelayDefaultId;
+
+  // Encryption of Header Extensions, see RFC 6904 for details:
+  // https://tools.ietf.org/html/rfc6904
+  static const char kEncryptHeaderExtensionsUri[];
+
+  // Inclusive min and max IDs for one-byte header extensions, per RFC5285.
+  static const int kMinId;
+  static const int kMaxId;
+
+  std::string uri;
+  int id = 0;
+  bool encrypt = false;
+};
+
+// TODO(deadbeef): This is missing the "encrypt" flag, which is unimplemented.
+typedef RtpExtension RtpHeaderExtensionParameters;
+
+struct RtpFecParameters {
+  // If unset, a value is chosen by the implementation.
+  // Works just like RtpEncodingParameters::ssrc.
+  rtc::Optional<uint32_t> ssrc;
+
+  FecMechanism mechanism = FecMechanism::RED;
+
+  // Constructors for convenience.
+  RtpFecParameters();
+  explicit RtpFecParameters(FecMechanism mechanism);
+  RtpFecParameters(FecMechanism mechanism, uint32_t ssrc);
+  ~RtpFecParameters();
+
+  bool operator==(const RtpFecParameters& o) const {
+    return ssrc == o.ssrc && mechanism == o.mechanism;
+  }
+  bool operator!=(const RtpFecParameters& o) const { return !(*this == o); }
+};
+
+struct RtpRtxParameters {
+  // If unset, a value is chosen by the implementation.
+  // Works just like RtpEncodingParameters::ssrc.
+  rtc::Optional<uint32_t> ssrc;
+
+  // Constructors for convenience.
+  RtpRtxParameters();
+  explicit RtpRtxParameters(uint32_t ssrc);
+  ~RtpRtxParameters();
+
+  bool operator==(const RtpRtxParameters& o) const { return ssrc == o.ssrc; }
+  bool operator!=(const RtpRtxParameters& o) const { return !(*this == o); }
+};
+
+struct RtpEncodingParameters {
+  RtpEncodingParameters();
+  ~RtpEncodingParameters();
+
+  // If unset, a value is chosen by the implementation.
+  //
+  // Note that the chosen value is NOT returned by GetParameters, because it
+  // may change due to an SSRC conflict, in which case the conflict is handled
+  // internally without any event. Another way of looking at this is that an
+  // unset SSRC acts as a "wildcard" SSRC.
+  rtc::Optional<uint32_t> ssrc;
+
+  // Can be used to reference a codec in the |codecs| member of the
+  // RtpParameters that contains this RtpEncodingParameters. If unset, the
+  // implementation will choose the first possible codec (if a sender), or
+  // prepare to receive any codec (for a receiver).
+  // TODO(deadbeef): Not implemented. Implementation of RtpSender will always
+  // choose the first codec from the list.
+  rtc::Optional<int> codec_payload_type;
+
+  // Specifies the FEC mechanism, if set.
+  // TODO(deadbeef): Not implemented. Current implementation will use whatever
+  // FEC codecs are available, including red+ulpfec.
+  rtc::Optional<RtpFecParameters> fec;
+
+  // Specifies the RTX parameters, if set.
+  // TODO(deadbeef): Not implemented with PeerConnection senders/receivers.
+  rtc::Optional<RtpRtxParameters> rtx;
+
+  // Only used for audio. If set, determines whether or not discontinuous
+  // transmission will be used, if an available codec supports it. If not
+  // set, the implementation default setting will be used.
+  // TODO(deadbeef): Not implemented. Current implementation will use a CN
+  // codec as long as it's present.
+  rtc::Optional<DtxStatus> dtx;
+
+  // The relative bitrate priority of this encoding. Currently this is
+  // implemented for the entire rtp sender by using the value of the first
+  // encoding parameter.
+  // TODO(webrtc.bugs.org/8630): Implement this per encoding parameter.
+  // Currently there is logic for how bitrate is distributed per simulcast layer
+  // in the VideoBitrateAllocator. This must be updated to incorporate relative
+  // bitrate priority.
+  double bitrate_priority = kDefaultBitratePriority;
+
+  // Indicates the preferred duration of media represented by a packet in
+  // milliseconds for this encoding. If set, this will take precedence over the
+  // ptime set in the RtpCodecParameters. This could happen if SDP negotiation
+  // creates a ptime for a specific codec, which is later changed in the
+  // RtpEncodingParameters by the application.
+  // TODO(bugs.webrtc.org/8819): Not implemented.
+  rtc::Optional<int> ptime;
+
+  // If set, this represents the Transport Independent Application Specific
+  // maximum bandwidth defined in RFC3890. If unset, there is no maximum
+  // bitrate. Currently this is implemented for the entire rtp sender by using
+  // the value of the first encoding parameter.
+  //
+  // TODO(webrtc.bugs.org/8655): Implement this per encoding parameter.
+  // Current implementation for a sender:
+  // The max bitrate is decided by taking the minimum of the first encoding
+  // parameter's max_bitrate_bps and the max bitrate specified by the sdp with
+  // the b=AS attribute. In the case of simulcast video, default values are used
+  // for each simulcast layer, and if there is some bitrate left over from the
+  // sender's max bitrate then it will roll over into the highest quality layer.
+  //
+  // Just called "maxBitrate" in ORTC spec.
+  //
+  // TODO(deadbeef): With ORTC RtpSenders, this currently sets the total
+  // bandwidth for the entire bandwidth estimator (audio and video). This is
+  // just always how "b=AS" was handled, but it's not correct and should be
+  // fixed.
+  rtc::Optional<int> max_bitrate_bps;
+
+  // TODO(deadbeef): Not implemented.
+  rtc::Optional<int> max_framerate;
+
+  // For video, scale the resolution down by this factor.
+  // TODO(deadbeef): Not implemented.
+  double scale_resolution_down_by = 1.0;
+
+  // Scale the framerate down by this factor.
+  // TODO(deadbeef): Not implemented.
+  double scale_framerate_down_by = 1.0;
+
+  // For an RtpSender, set to true to cause this encoding to be encoded and
+  // sent, and false for it not to be encoded and sent. This allows control
+  // across multiple encodings of a sender for turning simulcast layers on and
+  // off.
+  // TODO(webrtc.bugs.org/8807): Updating this parameter will trigger an encoder
+  // reset, but this isn't necessarily required.
+  bool active = true;
+
+  // Value to use for RID RTP header extension.
+  // Called "encodingId" in ORTC.
+  // TODO(deadbeef): Not implemented.
+  std::string rid;
+
+  // RIDs of encodings on which this layer depends.
+  // Called "dependencyEncodingIds" in ORTC spec.
+  // TODO(deadbeef): Not implemented.
+  std::vector<std::string> dependency_rids;
+
+  bool operator==(const RtpEncodingParameters& o) const {
+    return ssrc == o.ssrc && codec_payload_type == o.codec_payload_type &&
+           fec == o.fec && rtx == o.rtx && dtx == o.dtx &&
+           bitrate_priority == o.bitrate_priority && ptime == o.ptime &&
+           max_bitrate_bps == o.max_bitrate_bps &&
+           max_framerate == o.max_framerate &&
+           scale_resolution_down_by == o.scale_resolution_down_by &&
+           scale_framerate_down_by == o.scale_framerate_down_by &&
+           active == o.active && rid == o.rid &&
+           dependency_rids == o.dependency_rids;
+  }
+  bool operator!=(const RtpEncodingParameters& o) const {
+    return !(*this == o);
+  }
+};
+
+struct RtpCodecParameters {
+  RtpCodecParameters();
+  ~RtpCodecParameters();
+
+  // Build MIME "type/subtype" string from |name| and |kind|.
+  std::string mime_type() const { return MediaTypeToString(kind) + "/" + name; }
+
+  // Used to identify the codec. Equivalent to MIME subtype.
+  std::string name;
+
+  // The media type of this codec. Equivalent to MIME top-level type.
+  cricket::MediaType kind = cricket::MEDIA_TYPE_AUDIO;
+
+  // Payload type used to identify this codec in RTP packets.
+  // This must always be present, and must be unique across all codecs using
+  // the same transport.
+  int payload_type = 0;
+
+  // If unset, the implementation default is used.
+  rtc::Optional<int> clock_rate;
+
+  // The number of audio channels used. Unset for video codecs. If unset for
+  // audio, the implementation default is used.
+  // TODO(deadbeef): The "implementation default" part isn't fully implemented.
+  // Only defaults to 1, even though some codecs (such as opus) should really
+  // default to 2.
+  rtc::Optional<int> num_channels;
+
+  // The maximum packetization time to be used by an RtpSender.
+  // If |ptime| is also set, this will be ignored.
+  // TODO(deadbeef): Not implemented.
+  rtc::Optional<int> max_ptime;
+
+  // The packetization time to be used by an RtpSender.
+  // If unset, will use any time up to max_ptime.
+  // TODO(deadbeef): Not implemented.
+  rtc::Optional<int> ptime;
+
+  // Feedback mechanisms to be used for this codec.
+  // TODO(deadbeef): Not implemented with PeerConnection senders/receivers.
+  std::vector<RtcpFeedback> rtcp_feedback;
+
+  // Codec-specific parameters that must be signaled to the remote party.
+  //
+  // Corresponds to "a=fmtp" parameters in SDP.
+  //
+  // Contrary to ORTC, these parameters are named using all lowercase strings.
+  // This helps make the mapping to SDP simpler, if an application is using
+  // SDP. Boolean values are represented by the string "1".
+  //
+  // TODO(deadbeef): Not implemented with PeerConnection senders/receivers.
+  std::unordered_map<std::string, std::string> parameters;
+
+  bool operator==(const RtpCodecParameters& o) const {
+    return name == o.name && kind == o.kind && payload_type == o.payload_type &&
+           clock_rate == o.clock_rate && num_channels == o.num_channels &&
+           max_ptime == o.max_ptime && ptime == o.ptime &&
+           rtcp_feedback == o.rtcp_feedback && parameters == o.parameters;
+  }
+  bool operator!=(const RtpCodecParameters& o) const { return !(*this == o); }
+};
+
+// RtpCapabilities is used to represent the static capabilities of an
+// endpoint. An application can use these capabilities to construct an
+// RtpParameters.
+struct RtpCapabilities {
+  RtpCapabilities();
+  ~RtpCapabilities();
+
+  // Supported codecs.
+  std::vector<RtpCodecCapability> codecs;
+
+  // Supported RTP header extensions.
+  std::vector<RtpHeaderExtensionCapability> header_extensions;
+
+  // Supported Forward Error Correction (FEC) mechanisms. Note that the RED,
+  // ulpfec and flexfec codecs used by these mechanisms will still appear in
+  // |codecs|.
+  std::vector<FecMechanism> fec;
+
+  bool operator==(const RtpCapabilities& o) const {
+    return codecs == o.codecs && header_extensions == o.header_extensions &&
+           fec == o.fec;
+  }
+  bool operator!=(const RtpCapabilities& o) const { return !(*this == o); }
+};
+
+// Note that unlike in ORTC, an RtcpParameters structure is not included in
+// RtpParameters, because our API includes an additional "RtpTransport"
+// abstraction on which RTCP parameters are set.
+struct RtpParameters {
+  RtpParameters();
+  ~RtpParameters();
+
+  // Used when calling getParameters/setParameters with a PeerConnection
+  // RtpSender, to ensure that outdated parameters are not unintentionally
+  // applied successfully.
+  // TODO(deadbeef): Not implemented.
+  std::string transaction_id;
+
+  // Value to use for MID RTP header extension.
+  // Called "muxId" in ORTC.
+  // TODO(deadbeef): Not implemented.
+  std::string mid;
+
+  std::vector<RtpCodecParameters> codecs;
+
+  // TODO(deadbeef): Not implemented with PeerConnection senders/receivers.
+  std::vector<RtpHeaderExtensionParameters> header_extensions;
+
+  std::vector<RtpEncodingParameters> encodings;
+
+  // TODO(deadbeef): Not implemented.
+  DegradationPreference degradation_preference =
+      DegradationPreference::BALANCED;
+
+  bool operator==(const RtpParameters& o) const {
+    return mid == o.mid && codecs == o.codecs &&
+           header_extensions == o.header_extensions &&
+           encodings == o.encodings &&
+           degradation_preference == o.degradation_preference;
+  }
+  bool operator!=(const RtpParameters& o) const { return !(*this == o); }
+};
+
+}  // namespace webrtc
+
+#endif  // API_RTPPARAMETERS_H_
diff --git a/api/rtpparameters_unittest.cc b/api/rtpparameters_unittest.cc
new file mode 100644
index 0000000..ac3b1c7
--- /dev/null
+++ b/api/rtpparameters_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "api/rtpparameters.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using webrtc::RtpExtension;
+
+static const char kExtensionUri1[] = "extension-uri1";
+static const char kExtensionUri2[] = "extension-uri2";
+
+static const RtpExtension kExtension1(kExtensionUri1, 1);
+static const RtpExtension kExtension1Encrypted(kExtensionUri1, 10, true);
+static const RtpExtension kExtension2(kExtensionUri2, 2);
+
+TEST(RtpExtensionTest, FilterDuplicateNonEncrypted) {
+  std::vector<RtpExtension> extensions;
+  std::vector<RtpExtension> filtered;
+
+  extensions.push_back(kExtension1);
+  extensions.push_back(kExtension1Encrypted);
+  filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions);
+  EXPECT_EQ(1u, filtered.size());
+  EXPECT_EQ(std::vector<RtpExtension>{kExtension1Encrypted}, filtered);
+
+  extensions.clear();
+  extensions.push_back(kExtension1Encrypted);
+  extensions.push_back(kExtension1);
+  filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions);
+  EXPECT_EQ(1u, filtered.size());
+  EXPECT_EQ(std::vector<RtpExtension>{kExtension1Encrypted}, filtered);
+
+  extensions.clear();
+  extensions.push_back(kExtension1);
+  extensions.push_back(kExtension2);
+  filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions);
+  EXPECT_EQ(2u, filtered.size());
+  EXPECT_EQ(extensions, filtered);
+}
+}  // namespace webrtc
diff --git a/api/rtpreceiverinterface.cc b/api/rtpreceiverinterface.cc
new file mode 100644
index 0000000..96815a9
--- /dev/null
+++ b/api/rtpreceiverinterface.cc
@@ -0,0 +1,44 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/rtpreceiverinterface.h"
+
+namespace webrtc {
+
+RtpSource::RtpSource(int64_t timestamp_ms,
+                     uint32_t source_id,
+                     RtpSourceType source_type)
+    : timestamp_ms_(timestamp_ms),
+      source_id_(source_id),
+      source_type_(source_type) {}
+
+RtpSource::RtpSource(int64_t timestamp_ms,
+                     uint32_t source_id,
+                     RtpSourceType source_type,
+                     uint8_t audio_level)
+    : timestamp_ms_(timestamp_ms),
+      source_id_(source_id),
+      source_type_(source_type),
+      audio_level_(audio_level) {}
+
+RtpSource::RtpSource(const RtpSource&) = default;
+RtpSource& RtpSource::operator=(const RtpSource&) = default;
+RtpSource::~RtpSource() = default;
+
+std::vector<rtc::scoped_refptr<MediaStreamInterface>>
+RtpReceiverInterface::streams() const {
+  return {};
+}
+
+std::vector<RtpSource> RtpReceiverInterface::GetSources() const {
+  return {};
+}
+
+}  // namespace webrtc
diff --git a/api/rtpreceiverinterface.h b/api/rtpreceiverinterface.h
new file mode 100644
index 0000000..0e32eae
--- /dev/null
+++ b/api/rtpreceiverinterface.h
@@ -0,0 +1,145 @@
+/*
+ *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains interfaces for RtpReceivers
+// http://w3c.github.io/webrtc-pc/#rtcrtpreceiver-interface
+
+#ifndef API_RTPRECEIVERINTERFACE_H_
+#define API_RTPRECEIVERINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "api/mediastreaminterface.h"
+#include "api/mediatypes.h"
+#include "api/proxy.h"
+#include "api/rtpparameters.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+enum class RtpSourceType {
+  SSRC,
+  CSRC,
+};
+
+class RtpSource {
+ public:
+  RtpSource() = delete;
+  RtpSource(int64_t timestamp_ms,
+            uint32_t source_id,
+            RtpSourceType source_type);
+  RtpSource(int64_t timestamp_ms,
+            uint32_t source_id,
+            RtpSourceType source_type,
+            uint8_t audio_level);
+  RtpSource(const RtpSource&);
+  RtpSource& operator=(const RtpSource&);
+  ~RtpSource();
+
+  int64_t timestamp_ms() const { return timestamp_ms_; }
+  void update_timestamp_ms(int64_t timestamp_ms) {
+    RTC_DCHECK_LE(timestamp_ms_, timestamp_ms);
+    timestamp_ms_ = timestamp_ms;
+  }
+
+  // The identifier of the source can be the CSRC or the SSRC.
+  uint32_t source_id() const { return source_id_; }
+
+  // The source can be either a contributing source or a synchronization source.
+  RtpSourceType source_type() const { return source_type_; }
+
+  rtc::Optional<uint8_t> audio_level() const { return audio_level_; }
+  void set_audio_level(const rtc::Optional<uint8_t>& level) {
+    audio_level_ = level;
+  }
+
+  bool operator==(const RtpSource& o) const {
+    return timestamp_ms_ == o.timestamp_ms() && source_id_ == o.source_id() &&
+           source_type_ == o.source_type() && audio_level_ == o.audio_level_;
+  }
+
+ private:
+  int64_t timestamp_ms_;
+  uint32_t source_id_;
+  RtpSourceType source_type_;
+  rtc::Optional<uint8_t> audio_level_;
+};
+
+class RtpReceiverObserverInterface {
+ public:
+  // Note: Currently if there are multiple RtpReceivers of the same media type,
+  // they will all call OnFirstPacketReceived at once.
+  //
+  // In the future, it's likely that an RtpReceiver will only call
+  // OnFirstPacketReceived when a packet is received specifically for its
+  // SSRC/mid.
+  virtual void OnFirstPacketReceived(cricket::MediaType media_type) = 0;
+
+ protected:
+  virtual ~RtpReceiverObserverInterface() {}
+};
+
+class RtpReceiverInterface : public rtc::RefCountInterface {
+ public:
+  virtual rtc::scoped_refptr<MediaStreamTrackInterface> track() const = 0;
+  // The list of streams that |track| is associated with. This is the same as
+  // the [[AssociatedRemoteMediaStreams]] internal slot in the spec.
+  // https://w3c.github.io/webrtc-pc/#dfn-x%5B%5Bassociatedremotemediastreams%5D%5D
+  // TODO(hbos): Make pure virtual as soon as Chromium's mock implements this.
+  virtual std::vector<rtc::scoped_refptr<MediaStreamInterface>> streams() const;
+
+  // Audio or video receiver?
+  virtual cricket::MediaType media_type() const = 0;
+
+  // Not to be confused with "mid", this is a field we can temporarily use
+  // to uniquely identify a receiver until we implement Unified Plan SDP.
+  virtual std::string id() const = 0;
+
+  // The WebRTC specification only defines RTCRtpParameters in terms of senders,
+  // but this API also applies them to receivers, similar to ORTC:
+  // http://ortc.org/wp-content/uploads/2016/03/ortc.html#rtcrtpparameters*.
+  virtual RtpParameters GetParameters() const = 0;
+  // Currently, doesn't support changing any parameters, but may in the future.
+  virtual bool SetParameters(const RtpParameters& parameters) = 0;
+
+  // Does not take ownership of observer.
+  // Must call SetObserver(nullptr) before the observer is destroyed.
+  virtual void SetObserver(RtpReceiverObserverInterface* observer) = 0;
+
+  // TODO(zhihuang): Remove the default implementation once the subclasses
+  // implement this. Currently, the only relevant subclass is the
+  // content::FakeRtpReceiver in Chromium.
+  virtual std::vector<RtpSource> GetSources() const;
+
+ protected:
+  ~RtpReceiverInterface() override = default;
+};
+
+// Define proxy for RtpReceiverInterface.
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+BEGIN_SIGNALING_PROXY_MAP(RtpReceiver)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_CONSTMETHOD0(rtc::scoped_refptr<MediaStreamTrackInterface>, track)
+  PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<MediaStreamInterface>>,
+                     streams)
+  PROXY_CONSTMETHOD0(cricket::MediaType, media_type)
+  PROXY_CONSTMETHOD0(std::string, id)
+  PROXY_CONSTMETHOD0(RtpParameters, GetParameters);
+  PROXY_METHOD1(bool, SetParameters, const RtpParameters&)
+  PROXY_METHOD1(void, SetObserver, RtpReceiverObserverInterface*);
+  PROXY_CONSTMETHOD0(std::vector<RtpSource>, GetSources);
+  END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_RTPRECEIVERINTERFACE_H_
diff --git a/api/rtpsenderinterface.h b/api/rtpsenderinterface.h
new file mode 100644
index 0000000..2ca2edc
--- /dev/null
+++ b/api/rtpsenderinterface.h
@@ -0,0 +1,85 @@
+/*
+ *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains interfaces for RtpSenders
+// http://w3c.github.io/webrtc-pc/#rtcrtpsender-interface
+
+#ifndef API_RTPSENDERINTERFACE_H_
+#define API_RTPSENDERINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "api/dtmfsenderinterface.h"
+#include "api/mediastreaminterface.h"
+#include "api/mediatypes.h"
+#include "api/proxy.h"
+#include "api/rtcerror.h"
+#include "api/rtpparameters.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+class RtpSenderInterface : public rtc::RefCountInterface {
+ public:
+  // Returns true if successful in setting the track.
+  // Fails if an audio track is set on a video RtpSender, or vice-versa.
+  virtual bool SetTrack(MediaStreamTrackInterface* track) = 0;
+  virtual rtc::scoped_refptr<MediaStreamTrackInterface> track() const = 0;
+
+  // Returns primary SSRC used by this sender for sending media.
+  // Returns 0 if not yet determined.
+  // TODO(deadbeef): Change to rtc::Optional.
+  // TODO(deadbeef): Remove? With GetParameters this should be redundant.
+  virtual uint32_t ssrc() const = 0;
+
+  // Audio or video sender?
+  virtual cricket::MediaType media_type() const = 0;
+
+  // Not to be confused with "mid", this is a field we can temporarily use
+  // to uniquely identify a receiver until we implement Unified Plan SDP.
+  virtual std::string id() const = 0;
+
+  // Returns a list of streams associated with this sender's track. Although we
+  // only support one track per stream, in theory the API allows for multiple.
+  virtual std::vector<std::string> stream_ids() const = 0;
+
+  virtual RtpParameters GetParameters() const = 0;
+  // Note that only a subset of the parameters can currently be changed. See
+  // rtpparameters.h
+  virtual RTCError SetParameters(const RtpParameters& parameters) = 0;
+
+  // Returns null for a video sender.
+  virtual rtc::scoped_refptr<DtmfSenderInterface> GetDtmfSender() const = 0;
+
+ protected:
+  virtual ~RtpSenderInterface() {}
+};
+
+// Define proxy for RtpSenderInterface.
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+BEGIN_SIGNALING_PROXY_MAP(RtpSender)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*)
+  PROXY_CONSTMETHOD0(rtc::scoped_refptr<MediaStreamTrackInterface>, track)
+  PROXY_CONSTMETHOD0(uint32_t, ssrc)
+  PROXY_CONSTMETHOD0(cricket::MediaType, media_type)
+  PROXY_CONSTMETHOD0(std::string, id)
+  PROXY_CONSTMETHOD0(std::vector<std::string>, stream_ids)
+  PROXY_CONSTMETHOD0(RtpParameters, GetParameters);
+  PROXY_METHOD1(RTCError, SetParameters, const RtpParameters&)
+  PROXY_CONSTMETHOD0(rtc::scoped_refptr<DtmfSenderInterface>, GetDtmfSender);
+  END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_RTPSENDERINTERFACE_H_
diff --git a/api/rtptransceiverinterface.h b/api/rtptransceiverinterface.h
new file mode 100644
index 0000000..3ea75fd
--- /dev/null
+++ b/api/rtptransceiverinterface.h
@@ -0,0 +1,133 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_RTPTRANSCEIVERINTERFACE_H_
+#define API_RTPTRANSCEIVERINTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "api/rtpreceiverinterface.h"
+#include "api/rtpsenderinterface.h"
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+
+// https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiverdirection
+enum class RtpTransceiverDirection {
+  kSendRecv,
+  kSendOnly,
+  kRecvOnly,
+  kInactive
+};
+
+// This is provided as a debugging aid. The format of the output is unspecified.
+std::ostream& operator<<(std::ostream& os, RtpTransceiverDirection direction);
+
+// Structure for initializing an RtpTransceiver in a call to
+// PeerConnectionInterface::AddTransceiver.
+// https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiverinit
+struct RtpTransceiverInit final {
+  // Direction of the RtpTransceiver. See RtpTransceiverInterface::direction().
+  RtpTransceiverDirection direction = RtpTransceiverDirection::kSendRecv;
+
+  // The added RtpTransceiver will be added to these streams.
+  // TODO(shampson): Change name to stream_id & update native wrapper's naming
+  // as well.
+  // TODO(bugs.webrtc.org/7600): Not implemented.
+  std::vector<std::string> stream_ids;
+
+  // TODO(bugs.webrtc.org/7600): Not implemented.
+  std::vector<RtpEncodingParameters> send_encodings;
+};
+
+// The RtpTransceiverInterface maps to the RTCRtpTransceiver defined by the
+// WebRTC specification. A transceiver represents a combination of an RtpSender
+// and an RtpReceiver than share a common mid. As defined in JSEP, an
+// RtpTransceiver is said to be associated with a media description if its mid
+// property is non-null; otherwise, it is said to be disassociated.
+// JSEP: https://tools.ietf.org/html/draft-ietf-rtcweb-jsep-24
+//
+// Note that RtpTransceivers are only supported when using PeerConnection with
+// Unified Plan SDP.
+//
+// This class is thread-safe.
+//
+// WebRTC specification for RTCRtpTransceiver, the JavaScript analog:
+// https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver
+class RtpTransceiverInterface : public rtc::RefCountInterface {
+ public:
+  // Media type of the transceiver. Any sender(s)/receiver(s) will have this
+  // type as well.
+  virtual cricket::MediaType media_type() const = 0;
+
+  // The mid attribute is the mid negotiated and present in the local and
+  // remote descriptions. Before negotiation is complete, the mid value may be
+  // null. After rollbacks, the value may change from a non-null value to null.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-mid
+  virtual rtc::Optional<std::string> mid() const = 0;
+
+  // The sender attribute exposes the RtpSender corresponding to the RTP media
+  // that may be sent with the transceiver's mid. The sender is always present,
+  // regardless of the direction of media.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-sender
+  virtual rtc::scoped_refptr<RtpSenderInterface> sender() const = 0;
+
+  // The receiver attribute exposes the RtpReceiver corresponding to the RTP
+  // media that may be received with the transceiver's mid. The receiver is
+  // always present, regardless of the direction of media.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-receiver
+  virtual rtc::scoped_refptr<RtpReceiverInterface> receiver() const = 0;
+
+  // The stopped attribute indicates that the sender of this transceiver will no
+  // longer send, and that the receiver will no longer receive. It is true if
+  // either stop has been called or if setting the local or remote description
+  // has caused the RtpTransceiver to be stopped.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stopped
+  virtual bool stopped() const = 0;
+
+  // The direction attribute indicates the preferred direction of this
+  // transceiver, which will be used in calls to CreateOffer and CreateAnswer.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction
+  virtual RtpTransceiverDirection direction() const = 0;
+
+  // Sets the preferred direction of this transceiver. An update of
+  // directionality does not take effect immediately. Instead, future calls to
+  // CreateOffer and CreateAnswer mark the corresponding media descriptions as
+  // sendrecv, sendonly, recvonly, or inactive.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction
+  virtual void SetDirection(RtpTransceiverDirection new_direction) = 0;
+
+  // The current_direction attribute indicates the current direction negotiated
+  // for this transceiver. If this transceiver has never been represented in an
+  // offer/answer exchange, or if the transceiver is stopped, the value is null.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-currentdirection
+  virtual rtc::Optional<RtpTransceiverDirection> current_direction() const = 0;
+
+  // The Stop method irreversibly stops the RtpTransceiver. The sender of this
+  // transceiver will no longer send, the receiver will no longer receive.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stop
+  virtual void Stop() = 0;
+
+  // The SetCodecPreferences method overrides the default codec preferences used
+  // by WebRTC for this transceiver.
+  // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-setcodecpreferences
+  // TODO(steveanton): Not implemented.
+  virtual void SetCodecPreferences(
+      rtc::ArrayView<RtpCodecCapability> codecs) = 0;
+
+ protected:
+  virtual ~RtpTransceiverInterface() = default;
+};
+
+}  // namespace webrtc
+
+#endif  // API_RTPTRANSCEIVERINTERFACE_H_
diff --git a/api/setremotedescriptionobserverinterface.h b/api/setremotedescriptionobserverinterface.h
new file mode 100644
index 0000000..bea8b82
--- /dev/null
+++ b/api/setremotedescriptionobserverinterface.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_SETREMOTEDESCRIPTIONOBSERVERINTERFACE_H_
+#define API_SETREMOTEDESCRIPTIONOBSERVERINTERFACE_H_
+
+#include "api/rtcerror.h"
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+
+// An observer for PeerConnectionInterface::SetRemoteDescription(). The
+// callback is invoked such that the state of the peer connection can be
+// examined to accurately reflect the effects of the SetRemoteDescription
+// operation.
+class SetRemoteDescriptionObserverInterface : public rtc::RefCountInterface {
+ public:
+  // On success, |error.ok()| is true.
+  virtual void OnSetRemoteDescriptionComplete(RTCError error) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // API_SETREMOTEDESCRIPTIONOBSERVERINTERFACE_H_
diff --git a/api/statstypes.cc b/api/statstypes.cc
new file mode 100644
index 0000000..49a00bb
--- /dev/null
+++ b/api/statstypes.cc
@@ -0,0 +1,839 @@
+/*
+ *  Copyright 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/statstypes.h"
+
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/refcountedobject.h"
+
+// TODO(tommi): Could we have a static map of value name -> expected type
+// and use this to RTC_DCHECK on correct usage (somewhat strongly typed values)?
+// Alternatively, we could define the names+type in a separate document and
+// generate strongly typed inline C++ code that forces the correct type to be
+// used for a given name at compile time.
+
+using rtc::RefCountedObject;
+
+namespace webrtc {
+namespace {
+
+// The id of StatsReport of type kStatsReportTypeBwe.
+const char kStatsReportVideoBweId[] = "bweforvideo";
+
+// NOTE: These names need to be consistent with an external
+// specification (W3C Stats Identifiers).
+const char* InternalTypeToString(StatsReport::StatsType type) {
+  switch (type) {
+    case StatsReport::kStatsReportTypeSession:
+      return "googLibjingleSession";
+    case StatsReport::kStatsReportTypeBwe:
+      return "VideoBwe";
+    case StatsReport::kStatsReportTypeRemoteSsrc:
+      return "remoteSsrc";
+    case StatsReport::kStatsReportTypeSsrc:
+      return "ssrc";
+    case StatsReport::kStatsReportTypeTrack:
+      return "googTrack";
+    case StatsReport::kStatsReportTypeIceLocalCandidate:
+      return "localcandidate";
+    case StatsReport::kStatsReportTypeIceRemoteCandidate:
+      return "remotecandidate";
+    case StatsReport::kStatsReportTypeTransport:
+      return "transport";
+    case StatsReport::kStatsReportTypeComponent:
+      return "googComponent";
+    case StatsReport::kStatsReportTypeCandidatePair:
+      return "googCandidatePair";
+    case StatsReport::kStatsReportTypeCertificate:
+      return "googCertificate";
+    case StatsReport::kStatsReportTypeDataChannel:
+      return "datachannel";
+  }
+  RTC_NOTREACHED();
+  return nullptr;
+}
+
+class BandwidthEstimationId : public StatsReport::IdBase {
+ public:
+  BandwidthEstimationId()
+      : StatsReport::IdBase(StatsReport::kStatsReportTypeBwe) {}
+  std::string ToString() const override { return kStatsReportVideoBweId; }
+};
+
+class TypedId : public StatsReport::IdBase {
+ public:
+  TypedId(StatsReport::StatsType type, const std::string& id)
+      : StatsReport::IdBase(type), id_(id) {}
+
+  bool Equals(const IdBase& other) const override {
+    return IdBase::Equals(other) &&
+           static_cast<const TypedId&>(other).id_ == id_;
+  }
+
+  std::string ToString() const override {
+    return std::string(InternalTypeToString(type_)) + kSeparator + id_;
+  }
+
+ protected:
+  const std::string id_;
+};
+
+class TypedIntId : public StatsReport::IdBase {
+ public:
+  TypedIntId(StatsReport::StatsType type, int id)
+      : StatsReport::IdBase(type), id_(id) {}
+
+  bool Equals(const IdBase& other) const override {
+    return IdBase::Equals(other) &&
+           static_cast<const TypedIntId&>(other).id_ == id_;
+  }
+
+  std::string ToString() const override {
+    return std::string(InternalTypeToString(type_)) +
+           kSeparator +
+           rtc::ToString<int>(id_);
+  }
+
+ protected:
+  const int id_;
+};
+
+class IdWithDirection : public TypedId {
+ public:
+  IdWithDirection(StatsReport::StatsType type, const std::string& id,
+                  StatsReport::Direction direction)
+      : TypedId(type, id), direction_(direction) {}
+
+  bool Equals(const IdBase& other) const override {
+    return TypedId::Equals(other) &&
+           static_cast<const IdWithDirection&>(other).direction_ == direction_;
+  }
+
+  std::string ToString() const override {
+    std::string ret(TypedId::ToString());
+    ret += kSeparator;
+    ret += direction_ == StatsReport::kSend ? "send" : "recv";
+    return ret;
+  }
+
+ private:
+  const StatsReport::Direction direction_;
+};
+
+class CandidateId : public TypedId {
+ public:
+  CandidateId(bool local, const std::string& id)
+      : TypedId(local ?
+                    StatsReport::kStatsReportTypeIceLocalCandidate :
+                    StatsReport::kStatsReportTypeIceRemoteCandidate,
+                id) {
+  }
+
+  std::string ToString() const override {
+    return "Cand-" + id_;
+  }
+};
+
+class ComponentId : public StatsReport::IdBase {
+ public:
+  ComponentId(const std::string& content_name, int component)
+      : ComponentId(StatsReport::kStatsReportTypeComponent, content_name,
+            component) {}
+
+  bool Equals(const IdBase& other) const override {
+    return IdBase::Equals(other) &&
+        static_cast<const ComponentId&>(other).component_ == component_ &&
+        static_cast<const ComponentId&>(other).content_name_ == content_name_;
+  }
+
+  std::string ToString() const override {
+    return ToString("Channel-");
+  }
+
+ protected:
+  ComponentId(StatsReport::StatsType type, const std::string& content_name,
+              int component)
+      : IdBase(type),
+        content_name_(content_name),
+        component_(component) {}
+
+  std::string ToString(const char* prefix) const {
+    std::string ret(prefix);
+    ret += content_name_;
+    ret += '-';
+    ret += rtc::ToString<>(component_);
+    return ret;
+  }
+
+ private:
+  const std::string content_name_;
+  const int component_;
+};
+
+class CandidatePairId : public ComponentId {
+ public:
+  CandidatePairId(const std::string& content_name, int component, int index)
+      : ComponentId(StatsReport::kStatsReportTypeCandidatePair, content_name,
+            component),
+        index_(index) {}
+
+  bool Equals(const IdBase& other) const override {
+    return ComponentId::Equals(other) &&
+        static_cast<const CandidatePairId&>(other).index_ == index_;
+  }
+
+  std::string ToString() const override {
+    std::string ret(ComponentId::ToString("Conn-"));
+    ret += '-';
+    ret += rtc::ToString<>(index_);
+    return ret;
+  }
+
+ private:
+  const int index_;
+};
+
+}  // namespace
+
+StatsReport::IdBase::IdBase(StatsType type) : type_(type) {}
+StatsReport::IdBase::~IdBase() {}
+
+StatsReport::StatsType StatsReport::IdBase::type() const { return type_; }
+
+bool StatsReport::IdBase::Equals(const IdBase& other) const {
+  return other.type_ == type_;
+}
+
+StatsReport::Value::Value(StatsValueName name, int64_t value, Type int_type)
+    : name(name), type_(int_type) {
+  RTC_DCHECK(type_ == kInt || type_ == kInt64);
+  type_ == kInt ? value_.int_ = static_cast<int>(value) : value_.int64_ = value;
+}
+
+StatsReport::Value::Value(StatsValueName name, float f)
+    : name(name), type_(kFloat) {
+  value_.float_ = f;
+}
+
+StatsReport::Value::Value(StatsValueName name, const std::string& value)
+    : name(name), type_(kString) {
+  value_.string_ = new std::string(value);
+}
+
+StatsReport::Value::Value(StatsValueName name, const char* value)
+    : name(name), type_(kStaticString) {
+  value_.static_string_ = value;
+}
+
+StatsReport::Value::Value(StatsValueName name, bool b)
+    : name(name), type_(kBool) {
+  value_.bool_ = b;
+}
+
+StatsReport::Value::Value(StatsValueName name, const Id& value)
+    : name(name), type_(kId) {
+  value_.id_ = new Id(value);
+}
+
+StatsReport::Value::~Value() {
+  switch (type_) {
+    case kInt:
+    case kInt64:
+    case kFloat:
+    case kBool:
+    case kStaticString:
+      break;
+    case kString:
+      delete value_.string_;
+      break;
+    case kId:
+      delete value_.id_;
+      break;
+  }
+}
+
+bool StatsReport::Value::Equals(const Value& other) const {
+  if (name != other.name)
+    return false;
+
+  // There's a 1:1 relation between a name and a type, so we don't have to
+  // check that.
+  RTC_DCHECK_EQ(type_, other.type_);
+
+  switch (type_) {
+    case kInt:
+      return value_.int_ == other.value_.int_;
+    case kInt64:
+      return value_.int64_ == other.value_.int64_;
+    case kFloat:
+      return value_.float_ == other.value_.float_;
+    case kStaticString: {
+#if RTC_DCHECK_IS_ON
+      if (value_.static_string_ != other.value_.static_string_) {
+        RTC_DCHECK(strcmp(value_.static_string_, other.value_.static_string_) !=
+                   0)
+            << "Duplicate global?";
+      }
+#endif
+      return value_.static_string_ == other.value_.static_string_;
+    }
+    case kString:
+      return *value_.string_ == *other.value_.string_;
+    case kBool:
+      return value_.bool_ == other.value_.bool_;
+    case kId:
+      return (*value_.id_)->Equals(*other.value_.id_);
+  }
+  RTC_NOTREACHED();
+  return false;
+}
+
+bool StatsReport::Value::operator==(const std::string& value) const {
+  return (type_ == kString && value_.string_->compare(value) == 0) ||
+         (type_ == kStaticString && value.compare(value_.static_string_) == 0);
+}
+
+bool StatsReport::Value::operator==(const char* value) const {
+  if (type_ == kString)
+    return value_.string_->compare(value) == 0;
+  if (type_ != kStaticString)
+    return false;
+#if RTC_DCHECK_IS_ON
+  if (value_.static_string_ != value)
+    RTC_DCHECK(strcmp(value_.static_string_, value) != 0)
+        << "Duplicate global?";
+#endif
+  return value == value_.static_string_;
+}
+
+bool StatsReport::Value::operator==(int64_t value) const {
+  return type_ == kInt ? value_.int_ == static_cast<int>(value) :
+      (type_ == kInt64 ? value_.int64_ == value : false);
+}
+
+bool StatsReport::Value::operator==(bool value) const {
+  return type_ == kBool && value_.bool_ == value;
+}
+
+bool StatsReport::Value::operator==(float value) const {
+  return type_ == kFloat && value_.float_ == value;
+}
+
+bool StatsReport::Value::operator==(const Id& value) const {
+  return type_ == kId && (*value_.id_)->Equals(value);
+}
+
+int StatsReport::Value::int_val() const {
+  RTC_DCHECK(type_ == kInt);
+  return value_.int_;
+}
+
+int64_t StatsReport::Value::int64_val() const {
+  RTC_DCHECK(type_ == kInt64);
+  return value_.int64_;
+}
+
+float StatsReport::Value::float_val() const {
+  RTC_DCHECK(type_ == kFloat);
+  return value_.float_;
+}
+
+const char* StatsReport::Value::static_string_val() const {
+  RTC_DCHECK(type_ == kStaticString);
+  return value_.static_string_;
+}
+
+const std::string& StatsReport::Value::string_val() const {
+  RTC_DCHECK(type_ == kString);
+  return *value_.string_;
+}
+
+bool StatsReport::Value::bool_val() const {
+  RTC_DCHECK(type_ == kBool);
+  return value_.bool_;
+}
+
+const char* StatsReport::Value::display_name() const {
+  switch (name) {
+    case kStatsValueNameAecDivergentFilterFraction:
+      return "aecDivergentFilterFraction";
+    case kStatsValueNameAudioOutputLevel:
+      return "audioOutputLevel";
+    case kStatsValueNameAudioInputLevel:
+      return "audioInputLevel";
+    case kStatsValueNameBytesSent:
+      return "bytesSent";
+    case kStatsValueNameConcealedSamples:
+      return "concealedSamples";
+    case kStatsValueNameConcealmentEvents:
+      return "concealmentEvents";
+    case kStatsValueNamePacketsSent:
+      return "packetsSent";
+    case kStatsValueNameBytesReceived:
+      return "bytesReceived";
+    case kStatsValueNameLabel:
+      return "label";
+    case kStatsValueNamePacketsReceived:
+      return "packetsReceived";
+    case kStatsValueNamePacketsLost:
+      return "packetsLost";
+    case kStatsValueNameProtocol:
+      return "protocol";
+    case kStatsValueNameTotalSamplesReceived:
+      return "totalSamplesReceived";
+    case kStatsValueNameTransportId:
+      return "transportId";
+    case kStatsValueNameSelectedCandidatePairId:
+      return "selectedCandidatePairId";
+    case kStatsValueNameSsrc:
+      return "ssrc";
+    case kStatsValueNameState:
+      return "state";
+    case kStatsValueNameDataChannelId:
+      return "datachannelid";
+    case kStatsValueNameFramesDecoded:
+      return "framesDecoded";
+    case kStatsValueNameFramesEncoded:
+      return "framesEncoded";
+    case kStatsValueNameJitterBufferDelay:
+      return "jitterBufferDelay";
+    case kStatsValueNameCodecImplementationName:
+      return "codecImplementationName";
+    case kStatsValueNameMediaType:
+      return "mediaType";
+    case kStatsValueNameQpSum:
+      return "qpSum";
+    // 'goog' prefixed constants.
+    case kStatsValueNameAccelerateRate:
+      return "googAccelerateRate";
+    case kStatsValueNameActiveConnection:
+      return "googActiveConnection";
+    case kStatsValueNameActualEncBitrate:
+      return "googActualEncBitrate";
+    case kStatsValueNameAvailableReceiveBandwidth:
+      return "googAvailableReceiveBandwidth";
+    case kStatsValueNameAvailableSendBandwidth:
+      return "googAvailableSendBandwidth";
+    case kStatsValueNameAvgEncodeMs:
+      return "googAvgEncodeMs";
+    case kStatsValueNameBucketDelay:
+      return "googBucketDelay";
+    case kStatsValueNameBandwidthLimitedResolution:
+      return "googBandwidthLimitedResolution";
+    // STUN ping related attributes.
+    //
+    // TODO(zhihuang) Rename these stats to follow the standards.
+    // Connectivity checks.
+    case kStatsValueNameSentPingRequestsTotal:
+      return "requestsSent";
+    case kStatsValueNameSentPingRequestsBeforeFirstResponse:
+      return "consentRequestsSent";
+    case kStatsValueNameSentPingResponses:
+      return "responsesSent";
+    case kStatsValueNameRecvPingRequests:
+      return "requestsReceived";
+    case kStatsValueNameRecvPingResponses:
+      return "responsesReceived";
+    // STUN Keepalive pings.
+    case kStatsValueNameSentStunKeepaliveRequests:
+      return "stunKeepaliveRequestsSent";
+    case kStatsValueNameRecvStunKeepaliveResponses:
+      return "stunKeepaliveResponsesReceived";
+    case kStatsValueNameStunKeepaliveRttTotal:
+      return "stunKeepaliveRttTotal";
+    case kStatsValueNameStunKeepaliveRttSquaredTotal:
+      return "stunKeepaliveRttSquaredTotal";
+
+    // Candidate related attributes. Values are taken from
+    // http://w3c.github.io/webrtc-stats/#rtcstatstype-enum*.
+    case kStatsValueNameCandidateIPAddress:
+      return "ipAddress";
+    case kStatsValueNameCandidateNetworkType:
+      return "networkType";
+    case kStatsValueNameCandidatePortNumber:
+      return "portNumber";
+    case kStatsValueNameCandidatePriority:
+      return "priority";
+    case kStatsValueNameCandidateTransportType:
+      return "transport";
+    case kStatsValueNameCandidateType:
+      return "candidateType";
+
+    case kStatsValueNameChannelId:
+      return "googChannelId";
+    case kStatsValueNameCodecName:
+      return "googCodecName";
+    case kStatsValueNameComponent:
+      return "googComponent";
+    case kStatsValueNameContentName:
+      return "googContentName";
+    case kStatsValueNameContentType:
+      return "googContentType";
+    case kStatsValueNameCpuLimitedResolution:
+      return "googCpuLimitedResolution";
+    case kStatsValueNameDecodingCTSG:
+      return "googDecodingCTSG";
+    case kStatsValueNameDecodingCTN:
+      return "googDecodingCTN";
+    case kStatsValueNameDecodingMutedOutput:
+      return "googDecodingMuted";
+    case kStatsValueNameDecodingNormal:
+      return "googDecodingNormal";
+    case kStatsValueNameDecodingPLC:
+      return "googDecodingPLC";
+    case kStatsValueNameDecodingCNG:
+      return "googDecodingCNG";
+    case kStatsValueNameDecodingPLCCNG:
+      return "googDecodingPLCCNG";
+    case kStatsValueNameDer:
+      return "googDerBase64";
+    case kStatsValueNameDtlsCipher:
+      return "dtlsCipher";
+    case kStatsValueNameEchoDelayMedian:
+      return "googEchoCancellationEchoDelayMedian";
+    case kStatsValueNameEchoDelayStdDev:
+      return "googEchoCancellationEchoDelayStdDev";
+    case kStatsValueNameEchoReturnLoss:
+      return "googEchoCancellationReturnLoss";
+    case kStatsValueNameEchoReturnLossEnhancement:
+      return "googEchoCancellationReturnLossEnhancement";
+    case kStatsValueNameEncodeUsagePercent:
+      return "googEncodeUsagePercent";
+    case kStatsValueNameExpandRate:
+      return "googExpandRate";
+    case kStatsValueNameFingerprint:
+      return "googFingerprint";
+    case kStatsValueNameFingerprintAlgorithm:
+      return "googFingerprintAlgorithm";
+    case kStatsValueNameFirsReceived:
+      return "googFirsReceived";
+    case kStatsValueNameFirsSent:
+      return "googFirsSent";
+    case kStatsValueNameFrameHeightInput:
+      return "googFrameHeightInput";
+    case kStatsValueNameFrameHeightReceived:
+      return "googFrameHeightReceived";
+    case kStatsValueNameFrameHeightSent:
+      return "googFrameHeightSent";
+    case kStatsValueNameFrameRateReceived:
+      return "googFrameRateReceived";
+    case kStatsValueNameFrameRateDecoded:
+      return "googFrameRateDecoded";
+    case kStatsValueNameFrameRateOutput:
+      return "googFrameRateOutput";
+    case kStatsValueNameDecodeMs:
+      return "googDecodeMs";
+    case kStatsValueNameMaxDecodeMs:
+      return "googMaxDecodeMs";
+    case kStatsValueNameCurrentDelayMs:
+      return "googCurrentDelayMs";
+    case kStatsValueNameTargetDelayMs:
+      return "googTargetDelayMs";
+    case kStatsValueNameJitterBufferMs:
+      return "googJitterBufferMs";
+    case kStatsValueNameMinPlayoutDelayMs:
+      return "googMinPlayoutDelayMs";
+    case kStatsValueNameRenderDelayMs:
+      return "googRenderDelayMs";
+    case kStatsValueNameCaptureStartNtpTimeMs:
+      return "googCaptureStartNtpTimeMs";
+    case kStatsValueNameFrameRateInput:
+      return "googFrameRateInput";
+    case kStatsValueNameFrameRateSent:
+      return "googFrameRateSent";
+    case kStatsValueNameFrameWidthInput:
+      return "googFrameWidthInput";
+    case kStatsValueNameFrameWidthReceived:
+      return "googFrameWidthReceived";
+    case kStatsValueNameFrameWidthSent:
+      return "googFrameWidthSent";
+    case kStatsValueNameHasEnteredLowResolution:
+      return "googHasEnteredLowResolution";
+    case kStatsValueNameHugeFramesSent:
+      return "hugeFramesSent";
+    case kStatsValueNameInitiator:
+      return "googInitiator";
+    case kStatsValueNameInterframeDelayMaxMs:
+      return "googInterframeDelayMax";
+    case kStatsValueNameIssuerId:
+      return "googIssuerId";
+    case kStatsValueNameJitterReceived:
+      return "googJitterReceived";
+    case kStatsValueNameLocalAddress:
+      return "googLocalAddress";
+    case kStatsValueNameLocalCandidateId:
+      return "localCandidateId";
+    case kStatsValueNameLocalCandidateType:
+      return "googLocalCandidateType";
+    case kStatsValueNameLocalCertificateId:
+      return "localCertificateId";
+    case kStatsValueNameAdaptationChanges:
+      return "googAdaptationChanges";
+    case kStatsValueNameNacksReceived:
+      return "googNacksReceived";
+    case kStatsValueNameNacksSent:
+      return "googNacksSent";
+    case kStatsValueNamePreemptiveExpandRate:
+      return "googPreemptiveExpandRate";
+    case kStatsValueNamePlisReceived:
+      return "googPlisReceived";
+    case kStatsValueNamePlisSent:
+      return "googPlisSent";
+    case kStatsValueNamePreferredJitterBufferMs:
+      return "googPreferredJitterBufferMs";
+    case kStatsValueNameReceiving:
+      return "googReadable";
+    case kStatsValueNameRemoteAddress:
+      return "googRemoteAddress";
+    case kStatsValueNameRemoteCandidateId:
+      return "remoteCandidateId";
+    case kStatsValueNameRemoteCandidateType:
+      return "googRemoteCandidateType";
+    case kStatsValueNameRemoteCertificateId:
+      return "remoteCertificateId";
+    case kStatsValueNameResidualEchoLikelihood:
+      return "googResidualEchoLikelihood";
+    case kStatsValueNameResidualEchoLikelihoodRecentMax:
+      return "googResidualEchoLikelihoodRecentMax";
+    case kStatsValueNameAnaBitrateActionCounter:
+      return "googAnaBitrateActionCounter";
+    case kStatsValueNameAnaChannelActionCounter:
+      return "googAnaChannelActionCounter";
+    case kStatsValueNameAnaDtxActionCounter:
+      return "googAnaDtxActionCounter";
+    case kStatsValueNameAnaFecActionCounter:
+      return "googAnaFecActionCounter";
+    case kStatsValueNameAnaFrameLengthIncreaseCounter:
+      return "googAnaFrameLengthIncreaseCounter";
+    case kStatsValueNameAnaFrameLengthDecreaseCounter:
+      return "googAnaFrameLengthDecreaseCounter";
+    case kStatsValueNameAnaUplinkPacketLossFraction:
+      return "googAnaUplinkPacketLossFraction";
+    case kStatsValueNameRetransmitBitrate:
+      return "googRetransmitBitrate";
+    case kStatsValueNameRtt:
+      return "googRtt";
+    case kStatsValueNameSecondaryDecodedRate:
+      return "googSecondaryDecodedRate";
+    case kStatsValueNameSecondaryDiscardedRate:
+      return "googSecondaryDiscardedRate";
+    case kStatsValueNameSendPacketsDiscarded:
+      return "packetsDiscardedOnSend";
+    case kStatsValueNameSpeechExpandRate:
+      return "googSpeechExpandRate";
+    case kStatsValueNameSrtpCipher:
+      return "srtpCipher";
+    case kStatsValueNameTargetEncBitrate:
+      return "googTargetEncBitrate";
+    case kStatsValueNameTotalAudioEnergy:
+      return "totalAudioEnergy";
+    case kStatsValueNameTotalSamplesDuration:
+      return "totalSamplesDuration";
+    case kStatsValueNameTransmitBitrate:
+      return "googTransmitBitrate";
+    case kStatsValueNameTransportType:
+      return "googTransportType";
+    case kStatsValueNameTrackId:
+      return "googTrackId";
+    case kStatsValueNameTimingFrameInfo:
+      return "googTimingFrameInfo";
+    case kStatsValueNameTypingNoiseState:
+      return "googTypingNoiseState";
+    case kStatsValueNameWritable:
+      return "googWritable";
+  }
+
+  return nullptr;
+}
+
+std::string StatsReport::Value::ToString() const {
+  switch (type_) {
+    case kInt:
+      return rtc::ToString(value_.int_);
+    case kInt64:
+      return rtc::ToString(value_.int64_);
+    case kFloat:
+      return rtc::ToString(value_.float_);
+    case kStaticString:
+      return std::string(value_.static_string_);
+    case kString:
+      return *value_.string_;
+    case kBool:
+      return value_.bool_ ? "true" : "false";
+    case kId:
+      return (*value_.id_)->ToString();
+  }
+  RTC_NOTREACHED();
+  return std::string();
+}
+
+StatsReport::StatsReport(const Id& id) : id_(id), timestamp_(0.0) {
+  RTC_DCHECK(id_.get());
+}
+
+StatsReport::~StatsReport() = default;
+
+// static
+StatsReport::Id StatsReport::NewBandwidthEstimationId() {
+  return Id(new RefCountedObject<BandwidthEstimationId>());
+}
+
+// static
+StatsReport::Id StatsReport::NewTypedId(StatsType type, const std::string& id) {
+  return Id(new RefCountedObject<TypedId>(type, id));
+}
+
+// static
+StatsReport::Id StatsReport::NewTypedIntId(StatsType type, int id) {
+  return Id(new RefCountedObject<TypedIntId>(type, id));
+}
+
+// static
+StatsReport::Id StatsReport::NewIdWithDirection(
+    StatsType type, const std::string& id, StatsReport::Direction direction) {
+  return Id(new RefCountedObject<IdWithDirection>(type, id, direction));
+}
+
+// static
+StatsReport::Id StatsReport::NewCandidateId(bool local, const std::string& id) {
+  return Id(new RefCountedObject<CandidateId>(local, id));
+}
+
+// static
+StatsReport::Id StatsReport::NewComponentId(
+    const std::string& content_name, int component) {
+  return Id(new RefCountedObject<ComponentId>(content_name, component));
+}
+
+// static
+StatsReport::Id StatsReport::NewCandidatePairId(
+    const std::string& content_name, int component, int index) {
+  return Id(new RefCountedObject<CandidatePairId>(
+      content_name, component, index));
+}
+
+const char* StatsReport::TypeToString() const {
+  return InternalTypeToString(id_->type());
+}
+
+void StatsReport::AddString(StatsReport::StatsValueName name,
+                            const std::string& value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == value))
+    values_[name] = ValuePtr(new Value(name, value));
+}
+
+void StatsReport::AddString(StatsReport::StatsValueName name,
+                            const char* value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == value))
+    values_[name] = ValuePtr(new Value(name, value));
+}
+
+void StatsReport::AddInt64(StatsReport::StatsValueName name, int64_t value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == value))
+    values_[name] = ValuePtr(new Value(name, value, Value::kInt64));
+}
+
+void StatsReport::AddInt(StatsReport::StatsValueName name, int value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == static_cast<int64_t>(value)))
+    values_[name] = ValuePtr(new Value(name, value, Value::kInt));
+}
+
+void StatsReport::AddFloat(StatsReport::StatsValueName name, float value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == value))
+    values_[name] = ValuePtr(new Value(name, value));
+}
+
+void StatsReport::AddBoolean(StatsReport::StatsValueName name, bool value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == value))
+    values_[name] = ValuePtr(new Value(name, value));
+}
+
+void StatsReport::AddId(StatsReport::StatsValueName name,
+                        const Id& value) {
+  const Value* found = FindValue(name);
+  if (!found || !(*found == value))
+    values_[name] = ValuePtr(new Value(name, value));
+}
+
+const StatsReport::Value* StatsReport::FindValue(StatsValueName name) const {
+  Values::const_iterator it = values_.find(name);
+  return it == values_.end() ? nullptr : it->second.get();
+}
+
+StatsCollection::StatsCollection() {
+}
+
+StatsCollection::~StatsCollection() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  for (auto* r : list_)
+    delete r;
+}
+
+StatsCollection::const_iterator StatsCollection::begin() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  return list_.begin();
+}
+
+StatsCollection::const_iterator StatsCollection::end() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  return list_.end();
+}
+
+size_t StatsCollection::size() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  return list_.size();
+}
+
+StatsReport* StatsCollection::InsertNew(const StatsReport::Id& id) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(Find(id) == nullptr);
+  StatsReport* report = new StatsReport(id);
+  list_.push_back(report);
+  return report;
+}
+
+StatsReport* StatsCollection::FindOrAddNew(const StatsReport::Id& id) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  StatsReport* ret = Find(id);
+  return ret ? ret : InsertNew(id);
+}
+
+StatsReport* StatsCollection::ReplaceOrAddNew(const StatsReport::Id& id) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(id.get());
+  Container::iterator it = std::find_if(list_.begin(), list_.end(),
+      [&id](const StatsReport* r)->bool { return r->id()->Equals(id); });
+  if (it != end()) {
+    StatsReport* report = new StatsReport((*it)->id());
+    delete *it;
+    *it = report;
+    return report;
+  }
+  return InsertNew(id);
+}
+
+// Looks for a report with the given |id|.  If one is not found, null
+// will be returned.
+StatsReport* StatsCollection::Find(const StatsReport::Id& id) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  Container::iterator it = std::find_if(list_.begin(), list_.end(),
+      [&id](const StatsReport* r)->bool { return r->id()->Equals(id); });
+  return it == list_.end() ? nullptr : *it;
+}
+
+}  // namespace webrtc
diff --git a/api/statstypes.h b/api/statstypes.h
new file mode 100644
index 0000000..2e3de06
--- /dev/null
+++ b/api/statstypes.h
@@ -0,0 +1,451 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains structures used for retrieving statistics from an ongoing
+// libjingle session.
+
+#ifndef API_STATSTYPES_H_
+#define API_STATSTYPES_H_
+
+#include <algorithm>
+#include <list>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class StatsReport {
+ public:
+  // Indicates whether a track is for sending or receiving.
+  // Used in reports for audio/video tracks.
+  enum Direction {
+    kSend = 0,
+    kReceive,
+  };
+
+  enum StatsType {
+    // StatsReport types.
+    // A StatsReport of |type| = "googSession" contains overall information
+    // about the thing libjingle calls a session (which may contain one
+    // or more RTP sessions.
+    kStatsReportTypeSession,
+
+    // A StatsReport of |type| = "googTransport" contains information
+    // about a libjingle "transport".
+    kStatsReportTypeTransport,
+
+    // A StatsReport of |type| = "googComponent" contains information
+    // about a libjingle "channel" (typically, RTP or RTCP for a transport).
+    // This is intended to be the same thing as an ICE "Component".
+    kStatsReportTypeComponent,
+
+    // A StatsReport of |type| = "googCandidatePair" contains information
+    // about a libjingle "connection" - a single source/destination port pair.
+    // This is intended to be the same thing as an ICE "candidate pair".
+    kStatsReportTypeCandidatePair,
+
+    // A StatsReport of |type| = "VideoBWE" is statistics for video Bandwidth
+    // Estimation, which is global per-session.  The |id| field is "bweforvideo"
+    // (will probably change in the future).
+    kStatsReportTypeBwe,
+
+    // A StatsReport of |type| = "ssrc" is statistics for a specific rtp stream.
+    // The |id| field is the SSRC in decimal form of the rtp stream.
+    kStatsReportTypeSsrc,
+
+    // A StatsReport of |type| = "remoteSsrc" is statistics for a specific
+    // rtp stream, generated by the remote end of the connection.
+    kStatsReportTypeRemoteSsrc,
+
+    // A StatsReport of |type| = "googTrack" is statistics for a specific media
+    // track. The |id| field is the track id.
+    kStatsReportTypeTrack,
+
+    // A StatsReport of |type| = "localcandidate" or "remotecandidate" is
+    // attributes on a specific ICE Candidate. It links to its connection pair
+    // by candidate id. The string value is taken from
+    // http://w3c.github.io/webrtc-stats/#rtcstatstype-enum*.
+    kStatsReportTypeIceLocalCandidate,
+    kStatsReportTypeIceRemoteCandidate,
+
+    // A StatsReport of |type| = "googCertificate" contains an SSL certificate
+    // transmitted by one of the endpoints of this connection.  The |id| is
+    // controlled by the fingerprint, and is used to identify the certificate in
+    // the Channel stats (as "googLocalCertificateId" or
+    // "googRemoteCertificateId") and in any child certificates (as
+    // "googIssuerId").
+    kStatsReportTypeCertificate,
+
+    // A StatsReport of |type| = "datachannel" with statistics for a
+    // particular DataChannel.
+    kStatsReportTypeDataChannel,
+  };
+
+  enum StatsValueName {
+    kStatsValueNameActiveConnection,
+    kStatsValueNameAecDivergentFilterFraction,
+    kStatsValueNameAudioInputLevel,
+    kStatsValueNameAudioOutputLevel,
+    kStatsValueNameBytesReceived,
+    kStatsValueNameBytesSent,
+    kStatsValueNameCodecImplementationName,
+    kStatsValueNameConcealedSamples,
+    kStatsValueNameConcealmentEvents,
+    kStatsValueNameDataChannelId,
+    kStatsValueNameFramesDecoded,
+    kStatsValueNameFramesEncoded,
+    kStatsValueNameJitterBufferDelay,
+    kStatsValueNameMediaType,
+    kStatsValueNamePacketsLost,
+    kStatsValueNamePacketsReceived,
+    kStatsValueNamePacketsSent,
+    kStatsValueNameProtocol,
+    kStatsValueNameQpSum,
+    kStatsValueNameReceiving,
+    kStatsValueNameSelectedCandidatePairId,
+    kStatsValueNameSsrc,
+    kStatsValueNameState,
+    kStatsValueNameTotalAudioEnergy,
+    kStatsValueNameTotalSamplesDuration,
+    kStatsValueNameTotalSamplesReceived,
+    kStatsValueNameTransportId,
+    kStatsValueNameSentPingRequestsTotal,
+    kStatsValueNameSentPingRequestsBeforeFirstResponse,
+    kStatsValueNameSentPingResponses,
+    kStatsValueNameRecvPingRequests,
+    kStatsValueNameRecvPingResponses,
+    kStatsValueNameSentStunKeepaliveRequests,
+    kStatsValueNameRecvStunKeepaliveResponses,
+    kStatsValueNameStunKeepaliveRttTotal,
+    kStatsValueNameStunKeepaliveRttSquaredTotal,
+
+    // Internal StatsValue names.
+    kStatsValueNameAccelerateRate,
+    kStatsValueNameActualEncBitrate,
+    kStatsValueNameAdaptationChanges,
+    kStatsValueNameAvailableReceiveBandwidth,
+    kStatsValueNameAvailableSendBandwidth,
+    kStatsValueNameAvgEncodeMs,
+    kStatsValueNameBandwidthLimitedResolution,
+    kStatsValueNameBucketDelay,
+    kStatsValueNameCaptureStartNtpTimeMs,
+    kStatsValueNameCandidateIPAddress,
+    kStatsValueNameCandidateNetworkType,
+    kStatsValueNameCandidatePortNumber,
+    kStatsValueNameCandidatePriority,
+    kStatsValueNameCandidateTransportType,
+    kStatsValueNameCandidateType,
+    kStatsValueNameChannelId,
+    kStatsValueNameCodecName,
+    kStatsValueNameComponent,
+    kStatsValueNameContentName,
+    kStatsValueNameContentType,
+    kStatsValueNameCpuLimitedResolution,
+    kStatsValueNameCurrentDelayMs,
+    kStatsValueNameDecodeMs,
+    kStatsValueNameDecodingCNG,
+    kStatsValueNameDecodingCTN,
+    kStatsValueNameDecodingCTSG,
+    kStatsValueNameDecodingMutedOutput,
+    kStatsValueNameDecodingNormal,
+    kStatsValueNameDecodingPLC,
+    kStatsValueNameDecodingPLCCNG,
+    kStatsValueNameDer,
+    kStatsValueNameDtlsCipher,
+    kStatsValueNameEchoDelayMedian,
+    kStatsValueNameEchoDelayStdDev,
+    kStatsValueNameEchoReturnLoss,
+    kStatsValueNameEchoReturnLossEnhancement,
+    kStatsValueNameEncodeUsagePercent,
+    kStatsValueNameExpandRate,
+    kStatsValueNameFingerprint,
+    kStatsValueNameFingerprintAlgorithm,
+    kStatsValueNameFirsReceived,
+    kStatsValueNameFirsSent,
+    kStatsValueNameFrameHeightInput,
+    kStatsValueNameFrameHeightReceived,
+    kStatsValueNameFrameHeightSent,
+    kStatsValueNameFrameRateDecoded,
+    kStatsValueNameFrameRateInput,
+    kStatsValueNameFrameRateOutput,
+    kStatsValueNameFrameRateReceived,
+    kStatsValueNameFrameRateSent,
+    kStatsValueNameFrameWidthInput,
+    kStatsValueNameFrameWidthReceived,
+    kStatsValueNameFrameWidthSent,
+    kStatsValueNameHasEnteredLowResolution,
+    kStatsValueNameHugeFramesSent,
+    kStatsValueNameInitiator,
+    kStatsValueNameInterframeDelayMaxMs,  // Max over last 10 seconds.
+    kStatsValueNameIssuerId,
+    kStatsValueNameJitterBufferMs,
+    kStatsValueNameJitterReceived,
+    kStatsValueNameLabel,
+    kStatsValueNameLocalAddress,
+    kStatsValueNameLocalCandidateId,
+    kStatsValueNameLocalCandidateType,
+    kStatsValueNameLocalCertificateId,
+    kStatsValueNameMaxDecodeMs,
+    kStatsValueNameMinPlayoutDelayMs,
+    kStatsValueNameNacksReceived,
+    kStatsValueNameNacksSent,
+    kStatsValueNamePlisReceived,
+    kStatsValueNamePlisSent,
+    kStatsValueNamePreemptiveExpandRate,
+    kStatsValueNamePreferredJitterBufferMs,
+    kStatsValueNameRemoteAddress,
+    kStatsValueNameRemoteCandidateId,
+    kStatsValueNameRemoteCandidateType,
+    kStatsValueNameRemoteCertificateId,
+    kStatsValueNameRenderDelayMs,
+    kStatsValueNameResidualEchoLikelihood,
+    kStatsValueNameResidualEchoLikelihoodRecentMax,
+    kStatsValueNameAnaBitrateActionCounter,
+    kStatsValueNameAnaChannelActionCounter,
+    kStatsValueNameAnaDtxActionCounter,
+    kStatsValueNameAnaFecActionCounter,
+    kStatsValueNameAnaFrameLengthIncreaseCounter,
+    kStatsValueNameAnaFrameLengthDecreaseCounter,
+    kStatsValueNameAnaUplinkPacketLossFraction,
+    kStatsValueNameRetransmitBitrate,
+    kStatsValueNameRtt,
+    kStatsValueNameSecondaryDecodedRate,
+    kStatsValueNameSecondaryDiscardedRate,
+    kStatsValueNameSendPacketsDiscarded,
+    kStatsValueNameSpeechExpandRate,
+    kStatsValueNameSrtpCipher,
+    kStatsValueNameTargetDelayMs,
+    kStatsValueNameTargetEncBitrate,
+    kStatsValueNameTimingFrameInfo,  // Result of |TimingFrameInfo::ToString|
+    kStatsValueNameTrackId,
+    kStatsValueNameTransmitBitrate,
+    kStatsValueNameTransportType,
+    kStatsValueNameTypingNoiseState,
+    kStatsValueNameWritable,
+  };
+
+  class IdBase : public rtc::RefCountInterface {
+   public:
+    ~IdBase() override;
+    StatsType type() const;
+
+    // Users of IdBase will be using the Id typedef, which is compatible with
+    // this Equals() function.  It simply calls the protected (and overridden)
+    // Equals() method.
+    bool Equals(const rtc::scoped_refptr<IdBase>& other) const {
+      return Equals(*other.get());
+    }
+
+    virtual std::string ToString() const = 0;
+
+   protected:
+    // Protected since users of the IdBase type will be using the Id typedef.
+    virtual bool Equals(const IdBase& other) const;
+
+    explicit IdBase(StatsType type);  // Only meant for derived classes.
+    const StatsType type_;
+
+    static const char kSeparator = '_';
+  };
+
+  typedef rtc::scoped_refptr<IdBase> Id;
+
+  struct Value {
+    enum Type {
+      kInt,           // int.
+      kInt64,         // int64_t.
+      kFloat,         // float.
+      kString,        // std::string
+      kStaticString,  // const char*.
+      kBool,          // bool.
+      kId,            // Id.
+    };
+
+    Value(StatsValueName name, int64_t value, Type int_type);
+    Value(StatsValueName name, float f);
+    Value(StatsValueName name, const std::string& value);
+    Value(StatsValueName name, const char* value);
+    Value(StatsValueName name, bool b);
+    Value(StatsValueName name, const Id& value);
+
+    ~Value();
+
+    // Support ref counting. Note that for performance reasons, we
+    // don't use thread safe operations. Therefore, all operations
+    // affecting the ref count (in practice, creation and copying of
+    // the Values mapping) must occur on webrtc's signalling thread.
+    int AddRef() const {
+      RTC_DCHECK_RUN_ON(&thread_checker_);
+      return ++ref_count_;
+    }
+    int Release() const {
+      RTC_DCHECK_RUN_ON(&thread_checker_);
+      int count = --ref_count_;
+      if (!count)
+        delete this;
+      return count;
+    }
+
+    // TODO(tommi): This compares name as well as value...
+    // I think we should only need to compare the value part and
+    // move the name part into a hash map.
+    bool Equals(const Value& other) const;
+
+    // Comparison operators. Return true iff the current instance is of the
+    // correct type and holds the same value.  No conversion is performed so
+    // a string value of "123" is not equal to an int value of 123 and an int
+    // value of 123 is not equal to a float value of 123.0f.
+    // One exception to this is that types kInt and kInt64 can be compared and
+    // kString and kStaticString too.
+    bool operator==(const std::string& value) const;
+    bool operator==(const char* value) const;
+    bool operator==(int64_t value) const;
+    bool operator==(bool value) const;
+    bool operator==(float value) const;
+    bool operator==(const Id& value) const;
+
+    // Getters that allow getting the native value directly.
+    // The caller must know the type beforehand or else hit a check.
+    int int_val() const;
+    int64_t int64_val() const;
+    float float_val() const;
+    const char* static_string_val() const;
+    const std::string& string_val() const;
+    bool bool_val() const;
+    const Id& id_val() const;
+
+    // Returns the string representation of |name|.
+    const char* display_name() const;
+
+    // Converts the native value to a string representation of the value.
+    std::string ToString() const;
+
+    Type type() const { return type_; }
+
+    // TODO(tommi): Move |name| and |display_name| out of the Value struct.
+    const StatsValueName name;
+
+   private:
+    rtc::ThreadChecker thread_checker_;
+    mutable int ref_count_ RTC_GUARDED_BY(thread_checker_) = 0;
+
+    const Type type_;
+    // TODO(tommi): Use C++ 11 union and make value_ const.
+    union InternalType {
+      int int_;
+      int64_t int64_;
+      float float_;
+      bool bool_;
+      std::string* string_;
+      const char* static_string_;
+      Id* id_;
+    } value_;
+
+    RTC_DISALLOW_COPY_AND_ASSIGN(Value);
+  };
+
+  typedef rtc::scoped_refptr<Value> ValuePtr;
+  typedef std::map<StatsValueName, ValuePtr> Values;
+
+  // Ownership of |id| is passed to |this|.
+  explicit StatsReport(const Id& id);
+  ~StatsReport();
+
+  // Factory functions for various types of stats IDs.
+  static Id NewBandwidthEstimationId();
+  static Id NewTypedId(StatsType type, const std::string& id);
+  static Id NewTypedIntId(StatsType type, int id);
+  static Id NewIdWithDirection(
+      StatsType type, const std::string& id, Direction direction);
+  static Id NewCandidateId(bool local, const std::string& id);
+  static Id NewComponentId(
+      const std::string& content_name, int component);
+  static Id NewCandidatePairId(
+      const std::string& content_name, int component, int index);
+
+  const Id& id() const { return id_; }
+  StatsType type() const { return id_->type(); }
+  double timestamp() const { return timestamp_; }
+  void set_timestamp(double t) { timestamp_ = t; }
+  bool empty() const { return values_.empty(); }
+  const Values& values() const { return values_; }
+
+  const char* TypeToString() const;
+
+  void AddString(StatsValueName name, const std::string& value);
+  void AddString(StatsValueName name, const char* value);
+  void AddInt64(StatsValueName name, int64_t value);
+  void AddInt(StatsValueName name, int value);
+  void AddFloat(StatsValueName name, float value);
+  void AddBoolean(StatsValueName name, bool value);
+  void AddId(StatsValueName name, const Id& value);
+
+  const Value* FindValue(StatsValueName name) const;
+
+ private:
+  // The unique identifier for this object.
+  // This is used as a key for this report in ordered containers,
+  // so it must never be changed.
+  const Id id_;
+  double timestamp_;  // Time since 1970-01-01T00:00:00Z in milliseconds.
+  Values values_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(StatsReport);
+};
+
+// Typedef for an array of const StatsReport pointers.
+// Ownership of the pointers held by this implementation is assumed to lie
+// elsewhere and lifetime guarantees are made by the implementation that uses
+// this type.  In the StatsCollector, object ownership lies with the
+// StatsCollection class.
+typedef std::vector<const StatsReport*> StatsReports;
+
+// A map from the report id to the report.
+// This class wraps an STL container and provides a limited set of
+// functionality in order to keep things simple.
+class StatsCollection {
+ public:
+  StatsCollection();
+  ~StatsCollection();
+
+  typedef std::list<StatsReport*> Container;
+  typedef Container::iterator iterator;
+  typedef Container::const_iterator const_iterator;
+
+  const_iterator begin() const;
+  const_iterator end() const;
+  size_t size() const;
+
+  // Creates a new report object with |id| that does not already
+  // exist in the list of reports.
+  StatsReport* InsertNew(const StatsReport::Id& id);
+  StatsReport* FindOrAddNew(const StatsReport::Id& id);
+  StatsReport* ReplaceOrAddNew(const StatsReport::Id& id);
+
+  // Looks for a report with the given |id|.  If one is not found, null
+  // will be returned.
+  StatsReport* Find(const StatsReport::Id& id);
+
+ private:
+  Container list_;
+  rtc::ThreadChecker thread_checker_;
+};
+
+}  // namespace webrtc
+
+#endif  // API_STATSTYPES_H_
diff --git a/api/turncustomizer.h b/api/turncustomizer.h
new file mode 100644
index 0000000..517abcc
--- /dev/null
+++ b/api/turncustomizer.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_TURNCUSTOMIZER_H_
+#define API_TURNCUSTOMIZER_H_
+
+#include <stdlib.h>
+
+namespace cricket {
+class PortInterface;
+class StunMessage;
+}  // namespace cricket
+
+
+namespace webrtc {
+
+class TurnCustomizer {
+ public:
+  // This is called before a TURN message is sent.
+  // This could be used to add implementation specific attributes to a request.
+  virtual void MaybeModifyOutgoingStunMessage(
+      cricket::PortInterface* port,
+      cricket::StunMessage* message) = 0;
+
+  // TURN can send data using channel data messages or Send indication.
+  // This method should return false if |data| should be sent using
+  // a Send indication instead of a ChannelData message, even if a
+  // channel is bound.
+  virtual bool AllowChannelData(cricket::PortInterface* port,
+                                const void* data,
+                                size_t size,
+                                bool payload) = 0;
+
+  virtual ~TurnCustomizer() {}
+};
+
+}  // namespace webrtc
+
+#endif  // API_TURNCUSTOMIZER_H_
diff --git a/api/umametrics.cc b/api/umametrics.cc
new file mode 100644
index 0000000..d5f2bb6
--- /dev/null
+++ b/api/umametrics.cc
@@ -0,0 +1,21 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/umametrics.h"
+
+namespace webrtc {
+
+void MetricsObserverInterface::IncrementSparseEnumCounter(
+    PeerConnectionEnumCounterType type,
+    int counter) {
+  IncrementEnumCounter(type, counter, 0 /* Ignored */);
+}
+
+}  // namespace webrtc
diff --git a/api/umametrics.h b/api/umametrics.h
new file mode 100644
index 0000000..4de1ce4
--- /dev/null
+++ b/api/umametrics.h
@@ -0,0 +1,191 @@
+/*
+ *  Copyright 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains enums related to IPv4/IPv6 metrics.
+
+#ifndef API_UMAMETRICS_H_
+#define API_UMAMETRICS_H_
+
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+
+// Used to specify which enum counter type we're incrementing in
+// MetricsObserverInterface::IncrementEnumCounter.
+enum PeerConnectionEnumCounterType {
+  kEnumCounterAddressFamily,
+  // For the next 2 counters, we track them separately based on the "first hop"
+  // protocol used by the local candidate. "First hop" means the local candidate
+  // type in the case of non-TURN candidates, and the protocol used to connect
+  // to the TURN server in the case of TURN candidates.
+  kEnumCounterIceCandidatePairTypeUdp,
+  kEnumCounterIceCandidatePairTypeTcp,
+
+  kEnumCounterAudioSrtpCipher,
+  kEnumCounterAudioSslCipher,
+  kEnumCounterVideoSrtpCipher,
+  kEnumCounterVideoSslCipher,
+  kEnumCounterDataSrtpCipher,
+  kEnumCounterDataSslCipher,
+  kEnumCounterDtlsHandshakeError,
+  kEnumCounterIceRegathering,
+  kEnumCounterIceRestart,
+  kEnumCounterKeyProtocol,
+  kEnumCounterSdpSemanticRequested,
+  kEnumCounterSdpSemanticNegotiated,
+  kEnumCounterKeyProtocolMediaType,
+  kEnumCounterSdpFormatReceived,
+  kPeerConnectionEnumCounterMax
+};
+
+// Currently this contains information related to WebRTC network/transport
+// information.
+
+// The difference between PeerConnectionEnumCounter and
+// PeerConnectionMetricsName is that the "EnumCounter" is only counting the
+// occurrences of events, while "Name" has a value associated with it which is
+// used to form a histogram.
+
+// This enum is backed by Chromium's histograms.xml,
+// chromium/src/tools/metrics/histograms/histograms.xml
+// Existing values cannot be re-ordered and new enums must be added
+// before kBoundary.
+enum PeerConnectionAddressFamilyCounter {
+  kPeerConnection_IPv4,
+  kPeerConnection_IPv6,
+  kBestConnections_IPv4,
+  kBestConnections_IPv6,
+  kPeerConnectionAddressFamilyCounter_Max,
+};
+
+// TODO(guoweis): Keep previous name here until all references are renamed.
+#define kBoundary kPeerConnectionAddressFamilyCounter_Max
+
+// TODO(guoweis): Keep previous name here until all references are renamed.
+typedef PeerConnectionAddressFamilyCounter PeerConnectionUMAMetricsCounter;
+
+// This enum defines types for UMA samples, which will have a range.
+enum PeerConnectionMetricsName {
+  kNetworkInterfaces_IPv4,  // Number of IPv4 interfaces.
+  kNetworkInterfaces_IPv6,  // Number of IPv6 interfaces.
+  kTimeToConnect,           // In milliseconds.
+  kLocalCandidates_IPv4,    // Number of IPv4 local candidates.
+  kLocalCandidates_IPv6,    // Number of IPv6 local candidates.
+  kPeerConnectionMetricsName_Max
+};
+
+// TODO(guoweis): Keep previous name here until all references are renamed.
+typedef PeerConnectionMetricsName PeerConnectionUMAMetricsName;
+
+// The IceCandidatePairType has the format of
+// <local_candidate_type>_<remote_candidate_type>. It is recorded based on the
+// type of candidate pair used when the PeerConnection first goes to a completed
+// state. When BUNDLE is enabled, only the first transport gets recorded.
+enum IceCandidatePairType {
+  // HostHost is deprecated. It was replaced with the set of types at the bottom
+  // to report private or public host IP address.
+  kIceCandidatePairHostHost,
+  kIceCandidatePairHostSrflx,
+  kIceCandidatePairHostRelay,
+  kIceCandidatePairHostPrflx,
+  kIceCandidatePairSrflxHost,
+  kIceCandidatePairSrflxSrflx,
+  kIceCandidatePairSrflxRelay,
+  kIceCandidatePairSrflxPrflx,
+  kIceCandidatePairRelayHost,
+  kIceCandidatePairRelaySrflx,
+  kIceCandidatePairRelayRelay,
+  kIceCandidatePairRelayPrflx,
+  kIceCandidatePairPrflxHost,
+  kIceCandidatePairPrflxSrflx,
+  kIceCandidatePairPrflxRelay,
+
+  // The following 4 types tell whether local and remote hosts have private or
+  // public IP addresses.
+  kIceCandidatePairHostPrivateHostPrivate,
+  kIceCandidatePairHostPrivateHostPublic,
+  kIceCandidatePairHostPublicHostPrivate,
+  kIceCandidatePairHostPublicHostPublic,
+  kIceCandidatePairMax
+};
+
+enum KeyExchangeProtocolType {
+  kEnumCounterKeyProtocolDtls,
+  kEnumCounterKeyProtocolSdes,
+  kEnumCounterKeyProtocolMax
+};
+
+enum KeyExchangeProtocolMedia {
+  kEnumCounterKeyProtocolMediaTypeDtlsAudio,
+  kEnumCounterKeyProtocolMediaTypeDtlsVideo,
+  kEnumCounterKeyProtocolMediaTypeDtlsData,
+  kEnumCounterKeyProtocolMediaTypeSdesAudio,
+  kEnumCounterKeyProtocolMediaTypeSdesVideo,
+  kEnumCounterKeyProtocolMediaTypeSdesData,
+  kEnumCounterKeyProtocolMediaTypeMax
+};
+
+enum SdpSemanticRequested {
+  kSdpSemanticRequestDefault,
+  kSdpSemanticRequestPlanB,
+  kSdpSemanticRequestUnifiedPlan,
+  kSdpSemanticRequestMax
+};
+
+enum SdpSemanticNegotiated {
+  kSdpSemanticNegotiatedNone,
+  kSdpSemanticNegotiatedPlanB,
+  kSdpSemanticNegotiatedUnifiedPlan,
+  kSdpSemanticNegotiatedMixed,
+  kSdpSemanticNegotiatedMax
+};
+
+// Metric which records the format of the received SDP for tracking how much the
+// difference between Plan B and Unified Plan affect users.
+enum SdpFormatReceived {
+  // No audio or video tracks. This is worth special casing since it seems to be
+  // the most common scenario (data-channel only).
+  kSdpFormatReceivedNoTracks,
+  // No more than one audio and one video track. Should be compatible with both
+  // Plan B and Unified Plan endpoints.
+  kSdpFormatReceivedSimple,
+  // More than one audio track or more than one video track in the Plan B format
+  // (e.g., one audio media section with multiple streams).
+  kSdpFormatReceivedComplexPlanB,
+  // More than one audio track or more than one video track in the Unified Plan
+  // format (e.g., two audio media sections).
+  kSdpFormatReceivedComplexUnifiedPlan,
+  kSdpFormatReceivedMax
+};
+
+class MetricsObserverInterface : public rtc::RefCountInterface {
+ public:
+  // |type| is the type of the enum counter to be incremented. |counter|
+  // is the particular counter in that type. |counter_max| is the next sequence
+  // number after the highest counter.
+  virtual void IncrementEnumCounter(PeerConnectionEnumCounterType type,
+                                    int counter,
+                                    int counter_max) {}
+
+  // This is used to handle sparse counters like SSL cipher suites.
+  // TODO(guoweis): Remove the implementation once the dependency's interface
+  // definition is updated.
+  virtual void IncrementSparseEnumCounter(PeerConnectionEnumCounterType type,
+                                          int counter);
+
+  virtual void AddHistogramSample(PeerConnectionMetricsName type,
+                                  int value) = 0;
+};
+
+typedef MetricsObserverInterface UMAObserver;
+
+}  // namespace webrtc
+
+#endif  // API_UMAMETRICS_H_
diff --git a/api/video/encoded_frame.h b/api/video/encoded_frame.h
new file mode 100644
index 0000000..1374ea0
--- /dev/null
+++ b/api/video/encoded_frame.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_ENCODED_FRAME_H_
+#define API_VIDEO_ENCODED_FRAME_H_
+
+#include "modules/video_coding/encoded_frame.h"
+
+namespace webrtc {
+namespace video_coding {
+
+// TODO(philipel): Remove webrtc::VCMEncodedFrame inheritance.
+class EncodedFrame : public webrtc::VCMEncodedFrame {
+ public:
+  static const uint8_t kMaxFrameReferences = 5;
+
+  EncodedFrame() = default;
+  virtual ~EncodedFrame() {}
+
+  virtual bool GetBitstream(uint8_t* destination) const = 0;
+
+  // The capture timestamp of this frame.
+  virtual uint32_t Timestamp() const = 0;
+
+  // When this frame was received.
+  virtual int64_t ReceivedTime() const = 0;
+
+  // When this frame should be rendered.
+  virtual int64_t RenderTime() const = 0;
+
+  // This information is currently needed by the timing calculation class.
+  // TODO(philipel): Remove this function when a new timing class has
+  //                 been implemented.
+  virtual bool delayed_by_retransmission() const { return 0; }
+
+  size_t size() const { return _length; }
+
+  bool is_keyframe() const { return num_references == 0; }
+
+  // The tuple (|picture_id|, |spatial_layer|) uniquely identifies a frame
+  // object. For codec types that don't necessarily have picture ids they
+  // have to be constructed from the header data relevant to that codec.
+  int64_t picture_id = 0;
+  uint8_t spatial_layer = 0;
+  uint32_t timestamp = 0;
+
+  // TODO(philipel): Add simple modify/access functions to prevent adding too
+  // many |references|.
+  size_t num_references = 0;
+  int64_t references[kMaxFrameReferences];
+  bool inter_layer_predicted = false;
+};
+
+}  // namespace video_coding
+}  // namespace webrtc
+
+#endif  // API_VIDEO_ENCODED_FRAME_H_
diff --git a/api/video/i420_buffer.cc b/api/video/i420_buffer.cc
new file mode 100644
index 0000000..66071e1
--- /dev/null
+++ b/api/video/i420_buffer.cc
@@ -0,0 +1,239 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "api/video/i420_buffer.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/keep_ref_until_done.h"
+#include "third_party/libyuv/include/libyuv/convert.h"
+#include "third_party/libyuv/include/libyuv/planar_functions.h"
+#include "third_party/libyuv/include/libyuv/scale.h"
+
+// Aligning pointer to 64 bytes for improved performance, e.g. use SIMD.
+static const int kBufferAlignment = 64;
+
+namespace webrtc {
+
+namespace {
+
+int I420DataSize(int height, int stride_y, int stride_u, int stride_v) {
+  return stride_y * height + (stride_u + stride_v) * ((height + 1) / 2);
+}
+
+}  // namespace
+
+I420Buffer::I420Buffer(int width, int height)
+    : I420Buffer(width, height, width, (width + 1) / 2, (width + 1) / 2) {
+}
+
+I420Buffer::I420Buffer(int width,
+                       int height,
+                       int stride_y,
+                       int stride_u,
+                       int stride_v)
+    : width_(width),
+      height_(height),
+      stride_y_(stride_y),
+      stride_u_(stride_u),
+      stride_v_(stride_v),
+      data_(static_cast<uint8_t*>(AlignedMalloc(
+          I420DataSize(height, stride_y, stride_u, stride_v),
+          kBufferAlignment))) {
+  RTC_DCHECK_GT(width, 0);
+  RTC_DCHECK_GT(height, 0);
+  RTC_DCHECK_GE(stride_y, width);
+  RTC_DCHECK_GE(stride_u, (width + 1) / 2);
+  RTC_DCHECK_GE(stride_v, (width + 1) / 2);
+}
+
+I420Buffer::~I420Buffer() {
+}
+
+// static
+rtc::scoped_refptr<I420Buffer> I420Buffer::Create(int width, int height) {
+  return new rtc::RefCountedObject<I420Buffer>(width, height);
+}
+
+// static
+rtc::scoped_refptr<I420Buffer> I420Buffer::Create(int width,
+                                                  int height,
+                                                  int stride_y,
+                                                  int stride_u,
+                                                  int stride_v) {
+  return new rtc::RefCountedObject<I420Buffer>(
+      width, height, stride_y, stride_u, stride_v);
+}
+
+// static
+rtc::scoped_refptr<I420Buffer> I420Buffer::Copy(
+    const I420BufferInterface& source) {
+  return Copy(source.width(), source.height(),
+              source.DataY(), source.StrideY(),
+              source.DataU(), source.StrideU(),
+              source.DataV(), source.StrideV());
+}
+
+// static
+rtc::scoped_refptr<I420Buffer> I420Buffer::Copy(
+      int width, int height,
+      const uint8_t* data_y, int stride_y,
+      const uint8_t* data_u, int stride_u,
+      const uint8_t* data_v, int stride_v) {
+  // Note: May use different strides than the input data.
+  rtc::scoped_refptr<I420Buffer> buffer = Create(width, height);
+  RTC_CHECK_EQ(0, libyuv::I420Copy(data_y, stride_y,
+                                   data_u, stride_u,
+                                   data_v, stride_v,
+                                   buffer->MutableDataY(), buffer->StrideY(),
+                                   buffer->MutableDataU(), buffer->StrideU(),
+                                   buffer->MutableDataV(), buffer->StrideV(),
+                                   width, height));
+  return buffer;
+}
+
+// static
+rtc::scoped_refptr<I420Buffer> I420Buffer::Rotate(
+    const I420BufferInterface& src,
+    VideoRotation rotation) {
+  RTC_CHECK(src.DataY());
+  RTC_CHECK(src.DataU());
+  RTC_CHECK(src.DataV());
+
+  int rotated_width = src.width();
+  int rotated_height = src.height();
+  if (rotation == webrtc::kVideoRotation_90 ||
+      rotation == webrtc::kVideoRotation_270) {
+    std::swap(rotated_width, rotated_height);
+  }
+
+  rtc::scoped_refptr<webrtc::I420Buffer> buffer =
+      I420Buffer::Create(rotated_width, rotated_height);
+
+  RTC_CHECK_EQ(0, libyuv::I420Rotate(
+      src.DataY(), src.StrideY(),
+      src.DataU(), src.StrideU(),
+      src.DataV(), src.StrideV(),
+      buffer->MutableDataY(), buffer->StrideY(), buffer->MutableDataU(),
+      buffer->StrideU(), buffer->MutableDataV(), buffer->StrideV(),
+      src.width(), src.height(),
+      static_cast<libyuv::RotationMode>(rotation)));
+
+  return buffer;
+}
+
+void I420Buffer::InitializeData() {
+  memset(data_.get(), 0,
+         I420DataSize(height_, stride_y_, stride_u_, stride_v_));
+}
+
+int I420Buffer::width() const {
+  return width_;
+}
+
+int I420Buffer::height() const {
+  return height_;
+}
+
+const uint8_t* I420Buffer::DataY() const {
+  return data_.get();
+}
+const uint8_t* I420Buffer::DataU() const {
+  return data_.get() + stride_y_ * height_;
+}
+const uint8_t* I420Buffer::DataV() const {
+  return data_.get() + stride_y_ * height_ + stride_u_ * ((height_ + 1) / 2);
+}
+
+int I420Buffer::StrideY() const {
+  return stride_y_;
+}
+int I420Buffer::StrideU() const {
+  return stride_u_;
+}
+int I420Buffer::StrideV() const {
+  return stride_v_;
+}
+
+uint8_t* I420Buffer::MutableDataY() {
+  return const_cast<uint8_t*>(DataY());
+}
+uint8_t* I420Buffer::MutableDataU() {
+  return const_cast<uint8_t*>(DataU());
+}
+uint8_t* I420Buffer::MutableDataV() {
+  return const_cast<uint8_t*>(DataV());
+}
+
+// static
+void I420Buffer::SetBlack(I420Buffer* buffer) {
+  RTC_CHECK(libyuv::I420Rect(buffer->MutableDataY(), buffer->StrideY(),
+                             buffer->MutableDataU(), buffer->StrideU(),
+                             buffer->MutableDataV(), buffer->StrideV(),
+                             0, 0, buffer->width(), buffer->height(),
+                             0, 128, 128) == 0);
+}
+
+void I420Buffer::CropAndScaleFrom(const I420BufferInterface& src,
+                                  int offset_x,
+                                  int offset_y,
+                                  int crop_width,
+                                  int crop_height) {
+  RTC_CHECK_LE(crop_width, src.width());
+  RTC_CHECK_LE(crop_height, src.height());
+  RTC_CHECK_LE(crop_width + offset_x, src.width());
+  RTC_CHECK_LE(crop_height + offset_y, src.height());
+  RTC_CHECK_GE(offset_x, 0);
+  RTC_CHECK_GE(offset_y, 0);
+
+  // Make sure offset is even so that u/v plane becomes aligned.
+  const int uv_offset_x = offset_x / 2;
+  const int uv_offset_y = offset_y / 2;
+  offset_x = uv_offset_x * 2;
+  offset_y = uv_offset_y * 2;
+
+  const uint8_t* y_plane =
+      src.DataY() + src.StrideY() * offset_y + offset_x;
+  const uint8_t* u_plane =
+      src.DataU() + src.StrideU() * uv_offset_y + uv_offset_x;
+  const uint8_t* v_plane =
+      src.DataV() + src.StrideV() * uv_offset_y + uv_offset_x;
+  int res = libyuv::I420Scale(y_plane, src.StrideY(),
+                              u_plane, src.StrideU(),
+                              v_plane, src.StrideV(),
+                              crop_width, crop_height,
+                              MutableDataY(), StrideY(),
+                              MutableDataU(), StrideU(),
+                              MutableDataV(), StrideV(),
+                              width(), height(), libyuv::kFilterBox);
+
+  RTC_DCHECK_EQ(res, 0);
+}
+
+void I420Buffer::CropAndScaleFrom(const I420BufferInterface& src) {
+  const int crop_width =
+      std::min(src.width(), width() * src.height() / height());
+  const int crop_height =
+      std::min(src.height(), height() * src.width() / width());
+
+  CropAndScaleFrom(
+      src,
+      (src.width() - crop_width) / 2, (src.height() - crop_height) / 2,
+      crop_width, crop_height);
+}
+
+void I420Buffer::ScaleFrom(const I420BufferInterface& src) {
+  CropAndScaleFrom(src, 0, 0, src.width(), src.height());
+}
+
+}  // namespace webrtc
diff --git a/api/video/i420_buffer.h b/api/video/i420_buffer.h
new file mode 100644
index 0000000..bdac80b
--- /dev/null
+++ b/api/video/i420_buffer.h
@@ -0,0 +1,111 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_I420_BUFFER_H_
+#define API_VIDEO_I420_BUFFER_H_
+
+#include <memory>
+
+#include "api/video/video_rotation.h"
+#include "api/video/video_frame_buffer.h"
+#include "system_wrappers/include/aligned_malloc.h"
+
+namespace webrtc {
+
+// Plain I420 buffer in standard memory.
+class I420Buffer : public I420BufferInterface {
+ public:
+  static rtc::scoped_refptr<I420Buffer> Create(int width, int height);
+  static rtc::scoped_refptr<I420Buffer> Create(int width,
+                                               int height,
+                                               int stride_y,
+                                               int stride_u,
+                                               int stride_v);
+
+  // Create a new buffer and copy the pixel data.
+  static rtc::scoped_refptr<I420Buffer> Copy(const I420BufferInterface& buffer);
+  // Deprecated.
+  static rtc::scoped_refptr<I420Buffer> Copy(const VideoFrameBuffer& buffer) {
+    return Copy(*buffer.GetI420());
+  }
+
+  static rtc::scoped_refptr<I420Buffer> Copy(
+      int width, int height,
+      const uint8_t* data_y, int stride_y,
+      const uint8_t* data_u, int stride_u,
+      const uint8_t* data_v, int stride_v);
+
+  // Returns a rotated copy of |src|.
+  static rtc::scoped_refptr<I420Buffer> Rotate(const I420BufferInterface& src,
+                                               VideoRotation rotation);
+  // Deprecated.
+  static rtc::scoped_refptr<I420Buffer> Rotate(const VideoFrameBuffer& src,
+                                               VideoRotation rotation) {
+    return Rotate(*src.GetI420(), rotation);
+  }
+
+  // Sets the buffer to all black.
+  static void SetBlack(I420Buffer* buffer);
+
+  // Sets all three planes to all zeros. Used to work around for
+  // quirks in memory checkers
+  // (https://bugs.chromium.org/p/libyuv/issues/detail?id=377) and
+  // ffmpeg (http://crbug.com/390941).
+  // TODO(nisse): Deprecated. Should be deleted if/when those issues
+  // are resolved in a better way. Or in the mean time, use SetBlack.
+  void InitializeData();
+
+  int width() const override;
+  int height() const override;
+  const uint8_t* DataY() const override;
+  const uint8_t* DataU() const override;
+  const uint8_t* DataV() const override;
+
+  int StrideY() const override;
+  int StrideU() const override;
+  int StrideV() const override;
+
+  uint8_t* MutableDataY();
+  uint8_t* MutableDataU();
+  uint8_t* MutableDataV();
+
+  // Scale the cropped area of |src| to the size of |this| buffer, and
+  // write the result into |this|.
+  void CropAndScaleFrom(const I420BufferInterface& src,
+                        int offset_x,
+                        int offset_y,
+                        int crop_width,
+                        int crop_height);
+
+  // The common case of a center crop, when needed to adjust the
+  // aspect ratio without distorting the image.
+  void CropAndScaleFrom(const I420BufferInterface& src);
+
+  // Scale all of |src| to the size of |this| buffer, with no cropping.
+  void ScaleFrom(const I420BufferInterface& src);
+
+ protected:
+  I420Buffer(int width, int height);
+  I420Buffer(int width, int height, int stride_y, int stride_u, int stride_v);
+
+  ~I420Buffer() override;
+
+ private:
+  const int width_;
+  const int height_;
+  const int stride_y_;
+  const int stride_u_;
+  const int stride_v_;
+  const std::unique_ptr<uint8_t, AlignedFreeDeleter> data_;
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_I420_BUFFER_H_
diff --git a/api/video/video_content_type.cc b/api/video/video_content_type.cc
new file mode 100644
index 0000000..149b4f9
--- /dev/null
+++ b/api/video/video_content_type.cc
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/video/video_content_type.h"
+
+// VideoContentType stored as a single byte, which is sent over the network.
+// Structure:
+//
+//  0 1 2 3 4 5 6 7
+// +---------------+
+// |r r e e e s s c|
+//
+// where:
+// r - reserved bits.
+// e - 3-bit number of an experiment group counted from 1. 0 means there's no
+// experiment ongoing.
+// s - 2-bit simulcast stream id or spatial layer, counted from 1. 0 means that
+// no simulcast information is set.
+// c - content type. 0 means real-time video, 1 means screenshare.
+//
+
+namespace webrtc {
+namespace videocontenttypehelpers {
+
+namespace {
+static constexpr uint8_t kScreenshareBitsSize = 1;
+static constexpr uint8_t kScreenshareBitsMask =
+    (1u << kScreenshareBitsSize) - 1;
+
+static constexpr uint8_t kSimulcastShift = 1;
+static constexpr uint8_t kSimulcastBitsSize = 2;
+static constexpr uint8_t kSimulcastBitsMask = ((1u << kSimulcastBitsSize) - 1)
+                                              << kSimulcastShift;  // 0b00000110
+
+static constexpr uint8_t kExperimentShift = 3;
+static constexpr uint8_t kExperimentBitsSize = 3;
+static constexpr uint8_t kExperimentBitsMask =
+    ((1u << kExperimentBitsSize) - 1) << kExperimentShift;  // 0b00111000
+
+static constexpr uint8_t kTotalBitsSize =
+    kScreenshareBitsSize + kSimulcastBitsSize + kExperimentBitsSize;
+}  // namespace
+
+bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id) {
+  // Store in bits 2-4.
+  if (experiment_id >= (1 << kExperimentBitsSize))
+    return false;
+  *content_type = static_cast<VideoContentType>(
+      (static_cast<uint8_t>(*content_type) & ~kExperimentBitsMask) |
+      ((experiment_id << kExperimentShift) & kExperimentBitsMask));
+  return true;
+}
+
+bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id) {
+  // Store in bits 5-6.
+  if (simulcast_id >= (1 << kSimulcastBitsSize))
+    return false;
+  *content_type = static_cast<VideoContentType>(
+      (static_cast<uint8_t>(*content_type) & ~kSimulcastBitsMask) |
+      ((simulcast_id << kSimulcastShift) & kSimulcastBitsMask));
+  return true;
+}
+
+uint8_t GetExperimentId(
+    const VideoContentType& content_type) {
+  return (static_cast<uint8_t>(content_type) & kExperimentBitsMask) >>
+         kExperimentShift;
+}
+uint8_t GetSimulcastId(
+    const VideoContentType& content_type) {
+  return (static_cast<uint8_t>(content_type) & kSimulcastBitsMask) >>
+         kSimulcastShift;
+}
+
+bool IsScreenshare(
+    const VideoContentType& content_type) {
+  return (static_cast<uint8_t>(content_type) & kScreenshareBitsMask) > 0;
+}
+
+bool IsValidContentType(uint8_t value) {
+  // Any 6-bit value is allowed.
+  return value < (1 << kTotalBitsSize);
+}
+
+const char* ToString(const VideoContentType& content_type) {
+  return IsScreenshare(content_type) ? "screen" : "realtime";
+}
+}  // namespace videocontenttypehelpers
+}  // namespace webrtc
diff --git a/api/video/video_content_type.h b/api/video/video_content_type.h
new file mode 100644
index 0000000..8c64602
--- /dev/null
+++ b/api/video/video_content_type.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_CONTENT_TYPE_H_
+#define API_VIDEO_VIDEO_CONTENT_TYPE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+namespace webrtc {
+
+enum class VideoContentType : uint8_t {
+  UNSPECIFIED = 0,
+  SCREENSHARE = 1,
+};
+
+namespace videocontenttypehelpers {
+bool SetExperimentId(VideoContentType* content_type, uint8_t experiment_id);
+bool SetSimulcastId(VideoContentType* content_type, uint8_t simulcast_id);
+
+uint8_t GetExperimentId(const VideoContentType& content_type);
+uint8_t GetSimulcastId(const VideoContentType& content_type);
+
+bool IsScreenshare(const VideoContentType& content_type);
+
+bool IsValidContentType(uint8_t value);
+
+const char* ToString(const VideoContentType& content_type);
+}  // namespace videocontenttypehelpers
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_CONTENT_TYPE_H_
diff --git a/api/video/video_frame.cc b/api/video/video_frame.cc
new file mode 100644
index 0000000..93b3c9c
--- /dev/null
+++ b/api/video/video_frame.cc
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/video/video_frame.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+VideoFrame::VideoFrame(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
+                       webrtc::VideoRotation rotation,
+                       int64_t timestamp_us)
+    : video_frame_buffer_(buffer),
+      timestamp_rtp_(0),
+      ntp_time_ms_(0),
+      timestamp_us_(timestamp_us),
+      rotation_(rotation) {}
+
+VideoFrame::VideoFrame(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
+                       uint32_t timestamp,
+                       int64_t render_time_ms,
+                       VideoRotation rotation)
+    : video_frame_buffer_(buffer),
+      timestamp_rtp_(timestamp),
+      ntp_time_ms_(0),
+      timestamp_us_(render_time_ms * rtc::kNumMicrosecsPerMillisec),
+      rotation_(rotation) {
+  RTC_DCHECK(buffer);
+}
+
+VideoFrame::~VideoFrame() = default;
+
+VideoFrame::VideoFrame(const VideoFrame&) = default;
+VideoFrame::VideoFrame(VideoFrame&&) = default;
+VideoFrame& VideoFrame::operator=(const VideoFrame&) = default;
+VideoFrame& VideoFrame::operator=(VideoFrame&&) = default;
+
+int VideoFrame::width() const {
+  return video_frame_buffer_ ? video_frame_buffer_->width() : 0;
+}
+
+int VideoFrame::height() const {
+  return video_frame_buffer_ ? video_frame_buffer_->height() : 0;
+}
+
+uint32_t VideoFrame::size() const {
+  return width() * height();
+}
+
+rtc::scoped_refptr<VideoFrameBuffer> VideoFrame::video_frame_buffer() const {
+  return video_frame_buffer_;
+}
+
+int64_t VideoFrame::render_time_ms() const {
+  return timestamp_us() / rtc::kNumMicrosecsPerMillisec;
+}
+
+}  // namespace webrtc
diff --git a/api/video/video_frame.h b/api/video/video_frame.h
new file mode 100644
index 0000000..a72bef1
--- /dev/null
+++ b/api/video/video_frame.h
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_FRAME_H_
+#define API_VIDEO_VIDEO_FRAME_H_
+
+#include <stdint.h>
+
+#include "api/video/video_rotation.h"
+#include "api/video/video_frame_buffer.h"
+
+namespace webrtc {
+
+class VideoFrame {
+ public:
+  // TODO(nisse): This constructor is consistent with the now deleted
+  // cricket::WebRtcVideoFrame. We should consider whether or not we
+  // want to stick to this style and deprecate the other constructor.
+  VideoFrame(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
+             webrtc::VideoRotation rotation,
+             int64_t timestamp_us);
+
+  // Preferred constructor.
+  VideoFrame(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
+             uint32_t timestamp,
+             int64_t render_time_ms,
+             VideoRotation rotation);
+
+  ~VideoFrame();
+
+  // Support move and copy.
+  VideoFrame(const VideoFrame&);
+  VideoFrame(VideoFrame&&);
+  VideoFrame& operator=(const VideoFrame&);
+  VideoFrame& operator=(VideoFrame&&);
+
+  // Get frame width.
+  int width() const;
+  // Get frame height.
+  int height() const;
+  // Get frame size in pixels.
+  uint32_t size() const;
+
+  // System monotonic clock, same timebase as rtc::TimeMicros().
+  int64_t timestamp_us() const { return timestamp_us_; }
+  void set_timestamp_us(int64_t timestamp_us) { timestamp_us_ = timestamp_us; }
+
+  // TODO(nisse): After the cricket::VideoFrame and webrtc::VideoFrame
+  // merge, timestamps other than timestamp_us will likely be
+  // deprecated.
+
+  // Set frame timestamp (90kHz).
+  void set_timestamp(uint32_t timestamp) { timestamp_rtp_ = timestamp; }
+
+  // Get frame timestamp (90kHz).
+  uint32_t timestamp() const { return timestamp_rtp_; }
+
+  // For now, transport_frame_id and rtp timestamp are the same.
+  // TODO(nisse): Must be handled differently for QUIC.
+  uint32_t transport_frame_id() const { return timestamp(); }
+
+  // Set capture ntp time in milliseconds.
+  // TODO(nisse): Deprecated. Migrate all users to timestamp_us().
+  void set_ntp_time_ms(int64_t ntp_time_ms) { ntp_time_ms_ = ntp_time_ms; }
+
+  // Get capture ntp time in milliseconds.
+  // TODO(nisse): Deprecated. Migrate all users to timestamp_us().
+  int64_t ntp_time_ms() const { return ntp_time_ms_; }
+
+  // Naming convention for Coordination of Video Orientation. Please see
+  // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/ts_126114v120700p.pdf
+  //
+  // "pending rotation" or "pending" = a frame that has a VideoRotation > 0.
+  //
+  // "not pending" = a frame that has a VideoRotation == 0.
+  //
+  // "apply rotation" = modify a frame from being "pending" to being "not
+  //                    pending" rotation (a no-op for "unrotated").
+  //
+  VideoRotation rotation() const { return rotation_; }
+  void set_rotation(VideoRotation rotation) { rotation_ = rotation; }
+
+  // Get render time in milliseconds.
+  // TODO(nisse): Deprecated. Migrate all users to timestamp_us().
+  int64_t render_time_ms() const;
+
+  // Return the underlying buffer. Never nullptr for a properly
+  // initialized VideoFrame.
+  rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer() const;
+
+  // TODO(nisse): Deprecated.
+  // Return true if the frame is stored in a texture.
+  bool is_texture() const {
+    return video_frame_buffer()->type() == VideoFrameBuffer::Type::kNative;
+  }
+
+ private:
+  // An opaque reference counted handle that stores the pixel data.
+  rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
+  uint32_t timestamp_rtp_;
+  int64_t ntp_time_ms_;
+  int64_t timestamp_us_;
+  VideoRotation rotation_;
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_FRAME_H_
diff --git a/api/video/video_frame_buffer.cc b/api/video/video_frame_buffer.cc
new file mode 100644
index 0000000..867f249
--- /dev/null
+++ b/api/video/video_frame_buffer.cc
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/video/video_frame_buffer.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr<I420BufferInterface> VideoFrameBuffer::GetI420() {
+  RTC_CHECK(type() == Type::kI420);
+  return static_cast<I420BufferInterface*>(this);
+}
+
+rtc::scoped_refptr<const I420BufferInterface> VideoFrameBuffer::GetI420()
+    const {
+  RTC_CHECK(type() == Type::kI420);
+  return static_cast<const I420BufferInterface*>(this);
+}
+
+I420ABufferInterface* VideoFrameBuffer::GetI420A() {
+  RTC_CHECK(type() == Type::kI420A);
+  return static_cast<I420ABufferInterface*>(this);
+}
+
+const I420ABufferInterface* VideoFrameBuffer::GetI420A() const {
+  RTC_CHECK(type() == Type::kI420A);
+  return static_cast<const I420ABufferInterface*>(this);
+}
+
+I444BufferInterface* VideoFrameBuffer::GetI444() {
+  RTC_CHECK(type() == Type::kI444);
+  return static_cast<I444BufferInterface*>(this);
+}
+
+const I444BufferInterface* VideoFrameBuffer::GetI444() const {
+  RTC_CHECK(type() == Type::kI444);
+  return static_cast<const I444BufferInterface*>(this);
+}
+
+VideoFrameBuffer::Type I420BufferInterface::type() const {
+  return Type::kI420;
+}
+
+int I420BufferInterface::ChromaWidth() const {
+  return (width() + 1) / 2;
+}
+
+int I420BufferInterface::ChromaHeight() const {
+  return (height() + 1) / 2;
+}
+
+rtc::scoped_refptr<I420BufferInterface> I420BufferInterface::ToI420() {
+  return this;
+}
+
+VideoFrameBuffer::Type I420ABufferInterface::type() const {
+  return Type::kI420A;
+}
+
+VideoFrameBuffer::Type I444BufferInterface::type() const {
+  return Type::kI444;
+}
+
+int I444BufferInterface::ChromaWidth() const {
+  return width();
+}
+
+int I444BufferInterface::ChromaHeight() const {
+  return height();
+}
+
+}  // namespace webrtc
diff --git a/api/video/video_frame_buffer.h b/api/video/video_frame_buffer.h
new file mode 100644
index 0000000..2be7e0b
--- /dev/null
+++ b/api/video/video_frame_buffer.h
@@ -0,0 +1,138 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_FRAME_BUFFER_H_
+#define API_VIDEO_VIDEO_FRAME_BUFFER_H_
+
+#include <stdint.h>
+
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+class I420BufferInterface;
+class I420ABufferInterface;
+class I444BufferInterface;
+
+// Base class for frame buffers of different types of pixel format and storage.
+// The tag in type() indicates how the data is represented, and each type is
+// implemented as a subclass. To access the pixel data, call the appropriate
+// GetXXX() function, where XXX represents the type. There is also a function
+// ToI420() that returns a frame buffer in I420 format, converting from the
+// underlying representation if necessary. I420 is the most widely accepted
+// format and serves as a fallback for video sinks that can only handle I420,
+// e.g. the internal WebRTC software encoders. A special enum value 'kNative' is
+// provided for external clients to implement their own frame buffer
+// representations, e.g. as textures. The external client can produce such
+// native frame buffers from custom video sources, and then cast it back to the
+// correct subclass in custom video sinks. The purpose of this is to improve
+// performance by providing an optimized path without intermediate conversions.
+// Frame metadata such as rotation and timestamp are stored in
+// webrtc::VideoFrame, and not here.
+class VideoFrameBuffer : public rtc::RefCountInterface {
+ public:
+  // New frame buffer types will be added conservatively when there is an
+  // opportunity to optimize the path between some pair of video source and
+  // video sink.
+  enum class Type {
+    kNative,
+    kI420,
+    kI420A,
+    kI444,
+  };
+
+  // This function specifies in what pixel format the data is stored in.
+  virtual Type type() const = 0;
+
+  // The resolution of the frame in pixels. For formats where some planes are
+  // subsampled, this is the highest-resolution plane.
+  virtual int width() const = 0;
+  virtual int height() const = 0;
+
+  // Returns a memory-backed frame buffer in I420 format. If the pixel data is
+  // in another format, a conversion will take place. All implementations must
+  // provide a fallback to I420 for compatibility with e.g. the internal WebRTC
+  // software encoders.
+  virtual rtc::scoped_refptr<I420BufferInterface> ToI420() = 0;
+
+  // These functions should only be called if type() is of the correct type.
+  // Calling with a different type will result in a crash.
+  // TODO(magjed): Return raw pointers for GetI420 once deprecated interface is
+  // removed.
+  rtc::scoped_refptr<I420BufferInterface> GetI420();
+  rtc::scoped_refptr<const I420BufferInterface> GetI420() const;
+  I420ABufferInterface* GetI420A();
+  const I420ABufferInterface* GetI420A() const;
+  I444BufferInterface* GetI444();
+  const I444BufferInterface* GetI444() const;
+
+ protected:
+  ~VideoFrameBuffer() override {}
+};
+
+// This interface represents Type::kI420 and Type::kI444.
+class PlanarYuvBuffer : public VideoFrameBuffer {
+ public:
+  virtual int ChromaWidth() const = 0;
+  virtual int ChromaHeight() const = 0;
+
+  // Returns pointer to the pixel data for a given plane. The memory is owned by
+  // the VideoFrameBuffer object and must not be freed by the caller.
+  virtual const uint8_t* DataY() const = 0;
+  virtual const uint8_t* DataU() const = 0;
+  virtual const uint8_t* DataV() const = 0;
+
+  // Returns the number of bytes between successive rows for a given plane.
+  virtual int StrideY() const = 0;
+  virtual int StrideU() const = 0;
+  virtual int StrideV() const = 0;
+
+ protected:
+  ~PlanarYuvBuffer() override {}
+};
+
+class I420BufferInterface : public PlanarYuvBuffer {
+ public:
+  Type type() const override;
+
+  int ChromaWidth() const final;
+  int ChromaHeight() const final;
+
+  rtc::scoped_refptr<I420BufferInterface> ToI420() final;
+
+ protected:
+  ~I420BufferInterface() override {}
+};
+
+class I420ABufferInterface : public I420BufferInterface {
+ public:
+  Type type() const final;
+  virtual const uint8_t* DataA() const = 0;
+  virtual int StrideA() const = 0;
+
+ protected:
+  ~I420ABufferInterface() override {}
+};
+
+class I444BufferInterface : public PlanarYuvBuffer {
+ public:
+  Type type() const final;
+
+  int ChromaWidth() const final;
+  int ChromaHeight() const final;
+
+ protected:
+  ~I444BufferInterface() override {}
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_FRAME_BUFFER_H_
diff --git a/api/video/video_rotation.h b/api/video/video_rotation.h
new file mode 100644
index 0000000..6a29588
--- /dev/null
+++ b/api/video/video_rotation.h
@@ -0,0 +1,26 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_ROTATION_H_
+#define API_VIDEO_VIDEO_ROTATION_H_
+
+namespace webrtc {
+
+// enum for clockwise rotation.
+enum VideoRotation {
+  kVideoRotation_0 = 0,
+  kVideoRotation_90 = 90,
+  kVideoRotation_180 = 180,
+  kVideoRotation_270 = 270
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_ROTATION_H_
diff --git a/api/video/video_timing.cc b/api/video/video_timing.cc
new file mode 100644
index 0000000..3ccbe4e
--- /dev/null
+++ b/api/video/video_timing.cc
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/video/video_timing.h"
+
+#include <sstream>
+
+namespace webrtc {
+
+TimingFrameInfo::TimingFrameInfo()
+    : rtp_timestamp(0),
+      capture_time_ms(-1),
+      encode_start_ms(-1),
+      encode_finish_ms(-1),
+      packetization_finish_ms(-1),
+      pacer_exit_ms(-1),
+      network_timestamp_ms(-1),
+      network2_timestamp_ms(-1),
+      receive_start_ms(-1),
+      receive_finish_ms(-1),
+      decode_start_ms(-1),
+      decode_finish_ms(-1),
+      render_time_ms(-1),
+      flags(TimingFrameFlags::kDefault) {}
+
+int64_t TimingFrameInfo::EndToEndDelay() const {
+  return capture_time_ms >= 0 ? decode_finish_ms - capture_time_ms : -1;
+}
+
+bool TimingFrameInfo::IsLongerThan(const TimingFrameInfo& other) const {
+  int64_t other_delay = other.EndToEndDelay();
+  return other_delay == -1 || EndToEndDelay() > other_delay;
+}
+
+bool TimingFrameInfo::operator<(const TimingFrameInfo& other) const {
+  return other.IsLongerThan(*this);
+}
+
+bool TimingFrameInfo::operator<=(const TimingFrameInfo& other) const {
+  return !IsLongerThan(other);
+}
+
+bool TimingFrameInfo::IsOutlier() const {
+  return !IsInvalid() && (flags & TimingFrameFlags::kTriggeredBySize);
+}
+
+bool TimingFrameInfo::IsTimerTriggered() const {
+  return !IsInvalid() && (flags & TimingFrameFlags::kTriggeredByTimer);
+}
+
+bool TimingFrameInfo::IsInvalid() const {
+  return flags == TimingFrameFlags::kInvalid;
+}
+
+std::string TimingFrameInfo::ToString() const {
+  std::stringstream out;
+  if (IsInvalid()) {
+    out << "";
+  } else {
+    out << rtp_timestamp << ',' << capture_time_ms << ',' << encode_start_ms
+        << ',' << encode_finish_ms << ',' << packetization_finish_ms << ','
+        << pacer_exit_ms << ',' << network_timestamp_ms << ','
+        << network2_timestamp_ms << ',' << receive_start_ms << ','
+        << receive_finish_ms << ',' << decode_start_ms << ','
+        << decode_finish_ms << ',' << render_time_ms << ','
+        << IsOutlier() << ',' << IsTimerTriggered();
+  }
+  return out.str();
+}
+
+}  // namespace webrtc
diff --git a/api/video/video_timing.h b/api/video/video_timing.h
new file mode 100644
index 0000000..ab8cd99
--- /dev/null
+++ b/api/video/video_timing.h
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_TIMING_H_
+#define API_VIDEO_VIDEO_TIMING_H_
+
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+enum TimingFrameFlags : uint8_t {
+  kNotTriggered = 0,           // Timing info valid, but not to be transmitted.
+                               // Used on send-side only.
+  // TODO(ilnik): Delete compatibility alias.
+  // Used to be sent over the wire, for the old protocol.
+  kDefault = 0,                // Old name, for API compatibility.
+  kTriggeredByTimer = 1 << 0,  // Frame marked for tracing by periodic timer.
+  kTriggeredBySize = 1 << 1,   // Frame marked for tracing due to size.
+  kInvalid = std::numeric_limits<uint8_t>::max()  // Invalid, ignore!
+};
+
+// Video timing timestamps in ms counted from capture_time_ms of a frame.
+// This structure represents data sent in video-timing RTP header extension.
+struct VideoSendTiming {
+  // Offsets of the fields in the RTP header extension, counting from the first
+  // byte after the one-byte header.
+  static constexpr uint8_t kFlagsOffset = 0;
+  static constexpr uint8_t kEncodeStartDeltaOffset = 1;
+  static constexpr uint8_t kEncodeFinishDeltaOffset = 3;
+  static constexpr uint8_t kPacketizationFinishDeltaOffset = 5;
+  static constexpr uint8_t kPacerExitDeltaOffset = 7;
+  static constexpr uint8_t kNetworkTimestampDeltaOffset = 9;
+  static constexpr uint8_t kNetwork2TimestampDeltaOffset = 11;
+
+  // Returns |time_ms - base_ms| capped at max 16-bit value.
+  // Used to fill this data structure as per
+  // https://webrtc.org/experiments/rtp-hdrext/video-timing/ extension stores
+  // 16-bit deltas of timestamps from packet capture time.
+  static uint16_t GetDeltaCappedMs(int64_t base_ms, int64_t time_ms) {
+    RTC_DCHECK_GE(time_ms, base_ms);
+    return rtc::saturated_cast<uint16_t>(time_ms - base_ms);
+  }
+
+  uint16_t encode_start_delta_ms;
+  uint16_t encode_finish_delta_ms;
+  uint16_t packetization_finish_delta_ms;
+  uint16_t pacer_exit_delta_ms;
+  uint16_t network_timestamp_delta_ms;
+  uint16_t network2_timestamp_delta_ms;
+  uint8_t flags;
+};
+
+// Used to report precise timings of a 'timing frames'. Contains all important
+// timestamps for a lifetime of that specific frame. Reported as a string via
+// GetStats(). Only frame which took the longest between two GetStats calls is
+// reported.
+struct TimingFrameInfo {
+  TimingFrameInfo();
+
+  // Returns end-to-end delay of a frame, if sender and receiver timestamps are
+  // synchronized, -1 otherwise.
+  int64_t EndToEndDelay() const;
+
+  // Returns true if current frame took longer to process than |other| frame.
+  // If other frame's clocks are not synchronized, current frame is always
+  // preferred.
+  bool IsLongerThan(const TimingFrameInfo& other) const;
+
+  // Returns true if flags are set to indicate this frame was marked for tracing
+  // due to the size being outside some limit.
+  bool IsOutlier() const;
+
+  // Returns true if flags are set to indicate this frame was marked fro tracing
+  // due to cyclic timer.
+  bool IsTimerTriggered() const;
+
+  // Returns true if the timing data is marked as invalid, in which case it
+  // should be ignored.
+  bool IsInvalid() const;
+
+  std::string ToString() const;
+
+  bool operator<(const TimingFrameInfo& other) const;
+
+  bool operator<=(const TimingFrameInfo& other) const;
+
+  uint32_t rtp_timestamp;  // Identifier of a frame.
+  // All timestamps below are in local monotonous clock of a receiver.
+  // If sender clock is not yet estimated, sender timestamps
+  // (capture_time_ms ... pacer_exit_ms) are negative values, still
+  // relatively correct.
+  int64_t capture_time_ms;          // Captrue time of a frame.
+  int64_t encode_start_ms;          // Encode start time.
+  int64_t encode_finish_ms;         // Encode completion time.
+  int64_t packetization_finish_ms;  // Time when frame was passed to pacer.
+  int64_t pacer_exit_ms;  // Time when last packet was pushed out of pacer.
+  // Two in-network RTP processor timestamps: meaning is application specific.
+  int64_t network_timestamp_ms;
+  int64_t network2_timestamp_ms;
+  int64_t receive_start_ms;   // First received packet time.
+  int64_t receive_finish_ms;  // Last received packet time.
+  int64_t decode_start_ms;    // Decode start time.
+  int64_t decode_finish_ms;   // Decode completion time.
+  int64_t render_time_ms;     // Proposed render time to insure smooth playback.
+
+  uint8_t flags;  // Flags indicating validity and/or why tracing was triggered.
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_TIMING_H_
diff --git a/api/videosinkinterface.h b/api/videosinkinterface.h
new file mode 100644
index 0000000..2399320
--- /dev/null
+++ b/api/videosinkinterface.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEOSINKINTERFACE_H_
+#define API_VIDEOSINKINTERFACE_H_
+
+#include <rtc_base/checks.h>
+
+namespace rtc {
+
+template <typename VideoFrameT>
+class VideoSinkInterface {
+ public:
+  virtual ~VideoSinkInterface() {}
+
+  virtual void OnFrame(const VideoFrameT& frame) = 0;
+
+  // Should be called by the source when it discards the frame due to rate
+  // limiting.
+  virtual void OnDiscardedFrame() {}
+};
+
+}  // namespace rtc
+
+#endif  // API_VIDEOSINKINTERFACE_H_
diff --git a/api/videosourceinterface.cc b/api/videosourceinterface.cc
new file mode 100644
index 0000000..5eda369
--- /dev/null
+++ b/api/videosourceinterface.cc
@@ -0,0 +1,18 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/videosourceinterface.h"
+
+namespace rtc {
+
+VideoSinkWants::VideoSinkWants() = default;
+VideoSinkWants::~VideoSinkWants() = default;
+
+}  // namespace rtc
diff --git a/api/videosourceinterface.h b/api/videosourceinterface.h
new file mode 100644
index 0000000..ffb017a
--- /dev/null
+++ b/api/videosourceinterface.h
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEOSOURCEINTERFACE_H_
+#define API_VIDEOSOURCEINTERFACE_H_
+
+#include <limits>
+
+#include "api/optional.h"
+#include "api/videosinkinterface.h"
+
+namespace rtc {
+
+// VideoSinkWants is used for notifying the source of properties a video frame
+// should have when it is delivered to a certain sink.
+struct VideoSinkWants {
+  VideoSinkWants();
+  ~VideoSinkWants();
+  // Tells the source whether the sink wants frames with rotation applied.
+  // By default, any rotation must be applied by the sink.
+  bool rotation_applied = false;
+
+  // Tells the source that the sink only wants black frames.
+  bool black_frames = false;
+
+  // Tells the source the maximum number of pixels the sink wants.
+  int max_pixel_count = std::numeric_limits<int>::max();
+  // Tells the source the desired number of pixels the sinks wants. This will
+  // typically be used when stepping the resolution up again when conditions
+  // have improved after an earlier downgrade. The source should select the
+  // closest resolution to this pixel count, but if max_pixel_count is set, it
+  // still sets the absolute upper bound.
+  rtc::Optional<int> target_pixel_count;
+  // Tells the source the maximum framerate the sink wants.
+  int max_framerate_fps = std::numeric_limits<int>::max();
+};
+
+template <typename VideoFrameT>
+class VideoSourceInterface {
+ public:
+  virtual void AddOrUpdateSink(VideoSinkInterface<VideoFrameT>* sink,
+                               const VideoSinkWants& wants) = 0;
+  // RemoveSink must guarantee that at the time the method returns,
+  // there is no current and no future calls to VideoSinkInterface::OnFrame.
+  virtual void RemoveSink(VideoSinkInterface<VideoFrameT>* sink) = 0;
+
+ protected:
+  virtual ~VideoSourceInterface() {}
+};
+
+}  // namespace rtc
+#endif  // API_VIDEOSOURCEINTERFACE_H_
diff --git a/api/videosourceproxy.h b/api/videosourceproxy.h
new file mode 100644
index 0000000..f2d8be0
--- /dev/null
+++ b/api/videosourceproxy.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEOSOURCEPROXY_H_
+#define API_VIDEOSOURCEPROXY_H_
+
+#include "api/proxy.h"
+#include "api/mediastreaminterface.h"
+
+namespace webrtc {
+
+// Makes sure the real VideoTrackSourceInterface implementation is destroyed on
+// the signaling thread and marshals all method calls to the signaling thread.
+// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods
+// are called on is an implementation detail.
+BEGIN_PROXY_MAP(VideoTrackSource)
+  PROXY_SIGNALING_THREAD_DESTRUCTOR()
+  PROXY_CONSTMETHOD0(SourceState, state)
+  PROXY_CONSTMETHOD0(bool, remote)
+  PROXY_CONSTMETHOD0(bool, is_screencast)
+  PROXY_CONSTMETHOD0(rtc::Optional<bool>, needs_denoising)
+  PROXY_METHOD1(bool, GetStats, Stats*)
+  PROXY_WORKER_METHOD2(void,
+                       AddOrUpdateSink,
+                       rtc::VideoSinkInterface<VideoFrame>*,
+                       const rtc::VideoSinkWants&)
+  PROXY_WORKER_METHOD1(void, RemoveSink, rtc::VideoSinkInterface<VideoFrame>*)
+  PROXY_METHOD1(void, RegisterObserver, ObserverInterface*)
+  PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*)
+END_PROXY_MAP()
+
+}  // namespace webrtc
+
+#endif  // API_VIDEOSOURCEPROXY_H_
diff --git a/audio/BUILD.gn b/audio/BUILD.gn
new file mode 100644
index 0000000..b086971
--- /dev/null
+++ b/audio/BUILD.gn
@@ -0,0 +1,250 @@
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../webrtc.gni")
+if (is_android) {
+  import("//build/config/android/config.gni")
+  import("//build/config/android/rules.gni")
+}
+
+rtc_static_library("audio") {
+  sources = [
+    "audio_level.cc",
+    "audio_level.h",
+    "audio_receive_stream.cc",
+    "audio_receive_stream.h",
+    "audio_send_stream.cc",
+    "audio_send_stream.h",
+    "audio_state.cc",
+    "audio_state.h",
+    "audio_transport_impl.cc",
+    "audio_transport_impl.h",
+    "channel.cc",
+    "channel.h",
+    "channel_proxy.cc",
+    "channel_proxy.h",
+    "conversion.h",
+    "null_audio_poller.cc",
+    "null_audio_poller.h",
+    "remix_resample.cc",
+    "remix_resample.h",
+    "time_interval.cc",
+    "time_interval.h",
+    "transport_feedback_packet_loss_tracker.cc",
+    "transport_feedback_packet_loss_tracker.h",
+  ]
+
+  if (!build_with_chromium && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  deps = [
+    "..:webrtc_common",
+    "../api:array_view",
+    "../api:call_api",
+    "../api:libjingle_peerconnection_api",
+    "../api:optional",
+    "../api:transport_api",
+    "../api/audio:aec3_factory",
+    "../api/audio:audio_mixer_api",
+    "../api/audio_codecs:audio_codecs_api",
+    "../api/audio_codecs:builtin_audio_encoder_factory",
+    "../call:bitrate_allocator",
+    "../call:call_interfaces",
+    "../call:rtp_interfaces",
+    "../common_audio",
+    "../common_audio:common_audio_c",
+    "../logging:rtc_event_audio",
+    "../logging:rtc_event_log_api",
+    "../modules:module_api",
+    "../modules/audio_coding",
+    "../modules/audio_coding:audio_format_conversion",
+    "../modules/audio_coding:audio_network_adaptor_config",
+    "../modules/audio_coding:cng",
+    "../modules/audio_device",
+    "../modules/audio_processing",
+    "../modules/bitrate_controller:bitrate_controller",
+    "../modules/pacing:pacing",
+    "../modules/remote_bitrate_estimator:remote_bitrate_estimator",
+    "../modules/rtp_rtcp",
+    "../modules/rtp_rtcp:rtp_rtcp_format",
+    "../modules/utility",
+    "../rtc_base:checks",
+    "../rtc_base:rate_limiter",
+    "../rtc_base:rtc_base",
+    "../rtc_base:rtc_base_approved",
+    "../rtc_base:rtc_task_queue",
+    "../rtc_base:safe_minmax",
+    "../rtc_base:stringutils",
+    "../system_wrappers",
+    "../system_wrappers:field_trial_api",
+    "../system_wrappers:metrics_api",
+    "utility:audio_frame_operations",
+  ]
+}
+if (rtc_include_tests) {
+  rtc_source_set("audio_end_to_end_test") {
+    testonly = true
+
+    sources = [
+      "test/audio_end_to_end_test.cc",
+      "test/audio_end_to_end_test.h",
+    ]
+    deps = [
+      ":audio",
+      "../system_wrappers:system_wrappers",
+      "../test:test_common",
+      "../test:test_support",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  rtc_source_set("audio_tests") {
+    testonly = true
+
+    sources = [
+      "audio_receive_stream_unittest.cc",
+      "audio_send_stream_tests.cc",
+      "audio_send_stream_unittest.cc",
+      "audio_state_unittest.cc",
+      "mock_voe_channel_proxy.h",
+      "remix_resample_unittest.cc",
+      "time_interval_unittest.cc",
+      "transport_feedback_packet_loss_tracker_unittest.cc",
+    ]
+    deps = [
+      ":audio",
+      ":audio_end_to_end_test",
+      "../api:mock_audio_mixer",
+      "../call:mock_call_interfaces",
+      "../call:mock_rtp_interfaces",
+      "../call:rtp_interfaces",
+      "../call:rtp_receiver",
+      "../common_audio",
+      "../logging:mocks",
+      "../modules:module_api",
+      "../modules/audio_device:mock_audio_device",
+      "../modules/audio_mixer:audio_mixer_impl",
+      "../modules/audio_processing:audio_processing_statistics",
+      "../modules/audio_processing:mocks",
+      "../modules/bitrate_controller:mocks",
+      "../modules/pacing:pacing",
+      "../modules/rtp_rtcp:mock_rtp_rtcp",
+      "../modules/rtp_rtcp:rtp_rtcp_format",
+      "../rtc_base:checks",
+      "../rtc_base:rtc_base_approved",
+      "../rtc_base:rtc_base_tests_utils",
+      "../rtc_base:rtc_task_queue",
+      "../rtc_base:safe_compare",
+      "../system_wrappers:system_wrappers",
+      "../test:audio_codec_mocks",
+      "../test:rtp_test_utils",
+      "../test:test_common",
+      "../test:test_support",
+      "utility:utility_tests",
+      "//testing/gtest",
+    ]
+
+    if (!rtc_use_memcheck) {
+      # This test is timing dependent, which rules out running on memcheck bots.
+      sources += [ "test/audio_stats_test.cc" ]
+    }
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  if (rtc_enable_protobuf) {
+    rtc_test("low_bandwidth_audio_test") {
+      testonly = true
+
+      sources = [
+        "test/low_bandwidth_audio_test.cc",
+      ]
+
+      deps = [
+        ":audio_end_to_end_test",
+        "../common_audio",
+        "../rtc_base:rtc_base_approved",
+        "../system_wrappers",
+        "../test:fileutils",
+        "../test:test_common",
+        "../test:test_main",
+        "//testing/gtest",
+      ]
+      if (is_android) {
+        deps += [ "//testing/android/native_test:native_test_native_code" ]
+      }
+
+      data = [
+        "../resources/voice_engine/audio_dtx16.wav",
+        "../resources/voice_engine/audio_tiny16.wav",
+        "../resources/voice_engine/audio_tiny48.wav",
+        "test/low_bandwidth_audio_test.py",
+      ]
+      if (is_linux || is_android) {
+        data += [
+          "../tools_webrtc/audio_quality/linux/PolqaOem64",
+          "../tools_webrtc/audio_quality/linux/pesq",
+        ]
+      }
+      if (is_win) {
+        data += [
+          "../tools_webrtc/audio_quality/win/PolqaOem64.dll",
+          "../tools_webrtc/audio_quality/win/PolqaOem64.exe",
+          "../tools_webrtc/audio_quality/win/pesq.exe",
+          "../tools_webrtc/audio_quality/win/vcomp120.dll",
+        ]
+      }
+      if (is_mac) {
+        data += [ "../tools_webrtc/audio_quality/mac/pesq" ]
+      }
+
+      if (!build_with_chromium && is_clang) {
+        # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163)
+        suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+      }
+    }
+  }
+
+  rtc_source_set("audio_perf_tests") {
+    testonly = true
+
+    sources = [
+      "test/audio_bwe_integration_test.cc",
+      "test/audio_bwe_integration_test.h",
+    ]
+    deps = [
+      "../common_audio",
+      "../rtc_base:rtc_base_approved",
+      "../system_wrappers",
+      "../test:field_trial",
+      "../test:fileutils",
+      "../test:single_threaded_task_queue",
+      "../test:test_common",
+      "../test:test_main",
+      "//testing/gtest",
+    ]
+
+    data = [
+      "//resources/voice_engine/audio_dtx16.wav",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+}
diff --git a/audio/DEPS b/audio/DEPS
new file mode 100644
index 0000000..8bb1f80
--- /dev/null
+++ b/audio/DEPS
@@ -0,0 +1,26 @@
+include_rules = [
+  "+call",
+  "+common_audio",
+  "+logging/rtc_event_log",
+  "+modules/audio_coding",
+  "+modules/audio_device",
+  "+modules/audio_mixer",
+  "+modules/audio_processing",
+  "+modules/audio_processing/include",
+  "+modules/bitrate_controller",
+  "+modules/congestion_controller",
+  "+modules/pacing",
+  "+modules/remote_bitrate_estimator",
+  "+modules/rtp_rtcp",
+  "+modules/utility",
+  "+system_wrappers",
+]
+
+specific_include_rules = {
+  "audio_send_stream.cc": [
+    "+modules/audio_coding/codecs/cng/audio_encoder_cng.h",
+  ],
+  "audio_transport_impl.h": [
+    "+modules/audio_processing/typing_detection.h",
+  ]
+}
diff --git a/audio/OWNERS b/audio/OWNERS
new file mode 100644
index 0000000..d53e4fa
--- /dev/null
+++ b/audio/OWNERS
@@ -0,0 +1,7 @@
+solenberg@webrtc.org
+ossu@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/audio/audio_level.cc b/audio/audio_level.cc
new file mode 100644
index 0000000..ca52522
--- /dev/null
+++ b/audio/audio_level.cc
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_level.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+namespace voe {
+
+AudioLevel::AudioLevel()
+    : abs_max_(0), count_(0), current_level_full_range_(0) {
+  WebRtcSpl_Init();
+}
+
+AudioLevel::~AudioLevel() {}
+
+int16_t AudioLevel::LevelFullRange() const {
+  rtc::CritScope cs(&crit_sect_);
+  return current_level_full_range_;
+}
+
+void AudioLevel::Clear() {
+  rtc::CritScope cs(&crit_sect_);
+  abs_max_ = 0;
+  count_ = 0;
+  current_level_full_range_ = 0;
+}
+
+double AudioLevel::TotalEnergy() const {
+  rtc::CritScope cs(&crit_sect_);
+  return total_energy_;
+}
+
+double AudioLevel::TotalDuration() const {
+  rtc::CritScope cs(&crit_sect_);
+  return total_duration_;
+}
+
+void AudioLevel::ComputeLevel(const AudioFrame& audioFrame, double duration) {
+  // Check speech level (works for 2 channels as well)
+  int16_t abs_value = audioFrame.muted() ? 0 :
+      WebRtcSpl_MaxAbsValueW16(
+          audioFrame.data(),
+          audioFrame.samples_per_channel_ * audioFrame.num_channels_);
+
+  // Protect member access using a lock since this method is called on a
+  // dedicated audio thread in the RecordedDataIsAvailable() callback.
+  rtc::CritScope cs(&crit_sect_);
+
+  if (abs_value > abs_max_)
+    abs_max_ = abs_value;
+
+  // Update level approximately 10 times per second
+  if (count_++ == kUpdateFrequency) {
+    current_level_full_range_ = abs_max_;
+
+    count_ = 0;
+
+    // Decay the absolute maximum (divide by 4)
+    abs_max_ >>= 2;
+  }
+
+  // See the description for "totalAudioEnergy" in the WebRTC stats spec
+  // (https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy)
+  // for an explanation of these formulas. In short, we need a value that can
+  // be used to compute RMS audio levels over different time intervals, by
+  // taking the difference between the results from two getStats calls. To do
+  // this, the value needs to be of units "squared sample value * time".
+  double additional_energy =
+      static_cast<double>(current_level_full_range_) / INT16_MAX;
+  additional_energy *= additional_energy;
+  total_energy_ += additional_energy * duration;
+  total_duration_ += duration;
+}
+
+}  // namespace voe
+}  // namespace webrtc
diff --git a/audio/audio_level.h b/audio/audio_level.h
new file mode 100644
index 0000000..3bbe5fd
--- /dev/null
+++ b/audio/audio_level.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_LEVEL_H_
+#define AUDIO_AUDIO_LEVEL_H_
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class AudioFrame;
+namespace voe {
+
+class AudioLevel {
+ public:
+  AudioLevel();
+  ~AudioLevel();
+
+  // Called on "API thread(s)" from APIs like VoEBase::CreateChannel(),
+  // VoEBase::StopSend()
+  int16_t LevelFullRange() const;
+  void Clear();
+  // See the description for "totalAudioEnergy" in the WebRTC stats spec
+  // (https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy)
+  double TotalEnergy() const;
+  double TotalDuration() const;
+
+  // Called on a native capture audio thread (platform dependent) from the
+  // AudioTransport::RecordedDataIsAvailable() callback.
+  // In Chrome, this method is called on the AudioInputDevice thread.
+  void ComputeLevel(const AudioFrame& audioFrame, double duration);
+
+ private:
+  enum { kUpdateFrequency = 10 };
+
+  rtc::CriticalSection crit_sect_;
+
+  int16_t abs_max_ RTC_GUARDED_BY(crit_sect_);
+  int16_t count_ RTC_GUARDED_BY(crit_sect_);
+  int16_t current_level_full_range_ RTC_GUARDED_BY(crit_sect_);
+
+  double total_energy_ RTC_GUARDED_BY(crit_sect_) = 0.0;
+  double total_duration_ RTC_GUARDED_BY(crit_sect_) = 0.0;
+};
+
+}  // namespace voe
+}  // namespace webrtc
+
+#endif  // AUDIO_AUDIO_LEVEL_H_
diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc
new file mode 100644
index 0000000..8a1fbed
--- /dev/null
+++ b/audio/audio_receive_stream.cc
@@ -0,0 +1,381 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_receive_stream.h"
+
+#include <string>
+#include <utility>
+
+#include "api/call/audio_sink.h"
+#include "audio/audio_send_stream.h"
+#include "audio/audio_state.h"
+#include "audio/channel_proxy.h"
+#include "audio/conversion.h"
+#include "call/rtp_stream_receiver_controller_interface.h"
+#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+std::string AudioReceiveStream::Config::Rtp::ToString() const {
+  char ss_buf[1024];
+  rtc::SimpleStringBuilder ss(ss_buf);
+  ss << "{remote_ssrc: " << remote_ssrc;
+  ss << ", local_ssrc: " << local_ssrc;
+  ss << ", transport_cc: " << (transport_cc ? "on" : "off");
+  ss << ", nack: " << nack.ToString();
+  ss << ", extensions: [";
+  for (size_t i = 0; i < extensions.size(); ++i) {
+    ss << extensions[i].ToString();
+    if (i != extensions.size() - 1) {
+      ss << ", ";
+    }
+  }
+  ss << ']';
+  ss << '}';
+  return ss.str();
+}
+
+std::string AudioReceiveStream::Config::ToString() const {
+  char ss_buf[1024];
+  rtc::SimpleStringBuilder ss(ss_buf);
+  ss << "{rtp: " << rtp.ToString();
+  ss << ", rtcp_send_transport: "
+     << (rtcp_send_transport ? "(Transport)" : "null");
+  if (!sync_group.empty()) {
+    ss << ", sync_group: " << sync_group;
+  }
+  ss << '}';
+  return ss.str();
+}
+
+namespace internal {
+namespace {
+std::unique_ptr<voe::ChannelProxy> CreateChannelAndProxy(
+    webrtc::AudioState* audio_state,
+    ProcessThread* module_process_thread,
+    const webrtc::AudioReceiveStream::Config& config) {
+  RTC_DCHECK(audio_state);
+  internal::AudioState* internal_audio_state =
+      static_cast<internal::AudioState*>(audio_state);
+  return std::unique_ptr<voe::ChannelProxy>(new voe::ChannelProxy(
+      std::unique_ptr<voe::Channel>(new voe::Channel(
+              module_process_thread,
+              internal_audio_state->audio_device_module(),
+              config.jitter_buffer_max_packets,
+              config.jitter_buffer_fast_accelerate,
+              config.decoder_factory))));
+}
+}  // namespace
+
+AudioReceiveStream::AudioReceiveStream(
+    RtpStreamReceiverControllerInterface* receiver_controller,
+    PacketRouter* packet_router,
+    ProcessThread* module_process_thread,
+    const webrtc::AudioReceiveStream::Config& config,
+    const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+    webrtc::RtcEventLog* event_log)
+    : AudioReceiveStream(receiver_controller,
+                         packet_router,
+                         config,
+                         audio_state,
+                         event_log,
+                         CreateChannelAndProxy(audio_state.get(),
+                                               module_process_thread,
+                                               config)) {}
+
+AudioReceiveStream::AudioReceiveStream(
+    RtpStreamReceiverControllerInterface* receiver_controller,
+    PacketRouter* packet_router,
+    const webrtc::AudioReceiveStream::Config& config,
+    const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+    webrtc::RtcEventLog* event_log,
+    std::unique_ptr<voe::ChannelProxy> channel_proxy)
+    : audio_state_(audio_state),
+      channel_proxy_(std::move(channel_proxy)) {
+  RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc;
+  RTC_DCHECK(receiver_controller);
+  RTC_DCHECK(packet_router);
+  RTC_DCHECK(config.decoder_factory);
+  RTC_DCHECK(audio_state_);
+  RTC_DCHECK(channel_proxy_);
+
+  module_process_thread_checker_.DetachFromThread();
+
+  channel_proxy_->SetRtcEventLog(event_log);
+  channel_proxy_->RegisterTransport(config.rtcp_send_transport);
+
+  // Configure bandwidth estimation.
+  channel_proxy_->RegisterReceiverCongestionControlObjects(packet_router);
+
+  // Register with transport.
+  rtp_stream_receiver_ =
+      receiver_controller->CreateReceiver(config.rtp.remote_ssrc,
+                                          channel_proxy_.get());
+
+  ConfigureStream(this, config, true);
+}
+
+AudioReceiveStream::~AudioReceiveStream() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_LOG(LS_INFO) << "~AudioReceiveStream: " << config_.rtp.remote_ssrc;
+  Stop();
+  channel_proxy_->DisassociateSendChannel();
+  channel_proxy_->RegisterTransport(nullptr);
+  channel_proxy_->ResetReceiverCongestionControlObjects();
+  channel_proxy_->SetRtcEventLog(nullptr);
+}
+
+void AudioReceiveStream::Reconfigure(
+    const webrtc::AudioReceiveStream::Config& config) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  ConfigureStream(this, config, false);
+}
+
+void AudioReceiveStream::Start() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  if (playing_) {
+    return;
+  }
+  channel_proxy_->StartPlayout();
+  playing_ = true;
+  audio_state()->AddReceivingStream(this);
+}
+
+void AudioReceiveStream::Stop() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  if (!playing_) {
+    return;
+  }
+  channel_proxy_->StopPlayout();
+  playing_ = false;
+  audio_state()->RemoveReceivingStream(this);
+}
+
+webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  webrtc::AudioReceiveStream::Stats stats;
+  stats.remote_ssrc = config_.rtp.remote_ssrc;
+
+  webrtc::CallStatistics call_stats = channel_proxy_->GetRTCPStatistics();
+  // TODO(solenberg): Don't return here if we can't get the codec - return the
+  //                  stats we *can* get.
+  webrtc::CodecInst codec_inst = {0};
+  if (!channel_proxy_->GetRecCodec(&codec_inst)) {
+    return stats;
+  }
+
+  stats.bytes_rcvd = call_stats.bytesReceived;
+  stats.packets_rcvd = call_stats.packetsReceived;
+  stats.packets_lost = call_stats.cumulativeLost;
+  stats.fraction_lost = Q8ToFloat(call_stats.fractionLost);
+  stats.capture_start_ntp_time_ms = call_stats.capture_start_ntp_time_ms_;
+  if (codec_inst.pltype != -1) {
+    stats.codec_name = codec_inst.plname;
+    stats.codec_payload_type = codec_inst.pltype;
+  }
+  stats.ext_seqnum = call_stats.extendedMax;
+  if (codec_inst.plfreq / 1000 > 0) {
+    stats.jitter_ms = call_stats.jitterSamples / (codec_inst.plfreq / 1000);
+  }
+  stats.delay_estimate_ms = channel_proxy_->GetDelayEstimate();
+  stats.audio_level = channel_proxy_->GetSpeechOutputLevelFullRange();
+  stats.total_output_energy = channel_proxy_->GetTotalOutputEnergy();
+  stats.total_output_duration = channel_proxy_->GetTotalOutputDuration();
+
+  // Get jitter buffer and total delay (alg + jitter + playout) stats.
+  auto ns = channel_proxy_->GetNetworkStatistics();
+  stats.jitter_buffer_ms = ns.currentBufferSize;
+  stats.jitter_buffer_preferred_ms = ns.preferredBufferSize;
+  stats.total_samples_received = ns.totalSamplesReceived;
+  stats.concealed_samples = ns.concealedSamples;
+  stats.concealment_events = ns.concealmentEvents;
+  stats.jitter_buffer_delay_seconds =
+      static_cast<double>(ns.jitterBufferDelayMs) /
+      static_cast<double>(rtc::kNumMillisecsPerSec);
+  stats.expand_rate = Q14ToFloat(ns.currentExpandRate);
+  stats.speech_expand_rate = Q14ToFloat(ns.currentSpeechExpandRate);
+  stats.secondary_decoded_rate = Q14ToFloat(ns.currentSecondaryDecodedRate);
+  stats.secondary_discarded_rate = Q14ToFloat(ns.currentSecondaryDiscardedRate);
+  stats.accelerate_rate = Q14ToFloat(ns.currentAccelerateRate);
+  stats.preemptive_expand_rate = Q14ToFloat(ns.currentPreemptiveRate);
+
+  auto ds = channel_proxy_->GetDecodingCallStatistics();
+  stats.decoding_calls_to_silence_generator = ds.calls_to_silence_generator;
+  stats.decoding_calls_to_neteq = ds.calls_to_neteq;
+  stats.decoding_normal = ds.decoded_normal;
+  stats.decoding_plc = ds.decoded_plc;
+  stats.decoding_cng = ds.decoded_cng;
+  stats.decoding_plc_cng = ds.decoded_plc_cng;
+  stats.decoding_muted_output = ds.decoded_muted_output;
+
+  return stats;
+}
+
+void AudioReceiveStream::SetSink(AudioSinkInterface* sink) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  channel_proxy_->SetSink(sink);
+}
+
+void AudioReceiveStream::SetGain(float gain) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  channel_proxy_->SetChannelOutputVolumeScaling(gain);
+}
+
+std::vector<RtpSource> AudioReceiveStream::GetSources() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  return channel_proxy_->GetSources();
+}
+
+AudioMixer::Source::AudioFrameInfo AudioReceiveStream::GetAudioFrameWithInfo(
+    int sample_rate_hz,
+    AudioFrame* audio_frame) {
+  return channel_proxy_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
+}
+
+int AudioReceiveStream::Ssrc() const {
+  return config_.rtp.remote_ssrc;
+}
+
+int AudioReceiveStream::PreferredSampleRate() const {
+  return channel_proxy_->PreferredSampleRate();
+}
+
+int AudioReceiveStream::id() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  return config_.rtp.remote_ssrc;
+}
+
+rtc::Optional<Syncable::Info> AudioReceiveStream::GetInfo() const {
+  RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
+  Syncable::Info info;
+
+  RtpRtcp* rtp_rtcp = nullptr;
+  RtpReceiver* rtp_receiver = nullptr;
+  channel_proxy_->GetRtpRtcp(&rtp_rtcp, &rtp_receiver);
+  RTC_DCHECK(rtp_rtcp);
+  RTC_DCHECK(rtp_receiver);
+
+  if (!rtp_receiver->GetLatestTimestamps(
+          &info.latest_received_capture_timestamp,
+          &info.latest_receive_time_ms)) {
+    return rtc::nullopt;
+  }
+  if (rtp_rtcp->RemoteNTP(&info.capture_time_ntp_secs,
+                          &info.capture_time_ntp_frac,
+                          nullptr,
+                          nullptr,
+                          &info.capture_time_source_clock) != 0) {
+    return rtc::nullopt;
+  }
+
+  info.current_delay_ms = channel_proxy_->GetDelayEstimate();
+  return info;
+}
+
+uint32_t AudioReceiveStream::GetPlayoutTimestamp() const {
+  // Called on video capture thread.
+  return channel_proxy_->GetPlayoutTimestamp();
+}
+
+void AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) {
+  RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
+  return channel_proxy_->SetMinimumPlayoutDelay(delay_ms);
+}
+
+void AudioReceiveStream::AssociateSendStream(AudioSendStream* send_stream) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  if (send_stream) {
+    channel_proxy_->AssociateSendChannel(send_stream->GetChannelProxy());
+  } else {
+    channel_proxy_->DisassociateSendChannel();
+  }
+  associated_send_stream_ = send_stream;
+}
+
+void AudioReceiveStream::SignalNetworkState(NetworkState state) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+}
+
+bool AudioReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+  // TODO(solenberg): Tests call this function on a network thread, libjingle
+  // calls on the worker thread. We should move towards always using a network
+  // thread. Then this check can be enabled.
+  // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
+  return channel_proxy_->ReceivedRTCPPacket(packet, length);
+}
+
+void AudioReceiveStream::OnRtpPacket(const RtpPacketReceived& packet) {
+  // TODO(solenberg): Tests call this function on a network thread, libjingle
+  // calls on the worker thread. We should move towards always using a network
+  // thread. Then this check can be enabled.
+  // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
+  channel_proxy_->OnRtpPacket(packet);
+}
+
+const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  return config_;
+}
+
+const AudioSendStream*
+    AudioReceiveStream::GetAssociatedSendStreamForTesting() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  return associated_send_stream_;
+}
+
+internal::AudioState* AudioReceiveStream::audio_state() const {
+  auto* audio_state = static_cast<internal::AudioState*>(audio_state_.get());
+  RTC_DCHECK(audio_state);
+  return audio_state;
+}
+
+void AudioReceiveStream::ConfigureStream(AudioReceiveStream* stream,
+                                         const Config& new_config,
+                                         bool first_time) {
+  RTC_LOG(LS_INFO) << "AudioReceiveStream::ConfigureStream: "
+                   << new_config.ToString();
+  RTC_DCHECK(stream);
+  const auto& channel_proxy = stream->channel_proxy_;
+  const auto& old_config = stream->config_;
+
+  // Configuration parameters which cannot be changed.
+  RTC_DCHECK(first_time ||
+             old_config.rtp.remote_ssrc == new_config.rtp.remote_ssrc);
+  RTC_DCHECK(first_time ||
+             old_config.rtcp_send_transport == new_config.rtcp_send_transport);
+  // Decoder factory cannot be changed because it is configured at
+  // voe::Channel construction time.
+  RTC_DCHECK(first_time ||
+             old_config.decoder_factory == new_config.decoder_factory);
+
+  if (first_time || old_config.rtp.local_ssrc != new_config.rtp.local_ssrc) {
+    channel_proxy->SetLocalSSRC(new_config.rtp.local_ssrc);
+  }
+  // TODO(solenberg): Config NACK history window (which is a packet count),
+  // using the actual packet size for the configured codec.
+  if (first_time || old_config.rtp.nack.rtp_history_ms !=
+                        new_config.rtp.nack.rtp_history_ms) {
+    channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
+                                 new_config.rtp.nack.rtp_history_ms / 20);
+  }
+  if (first_time || old_config.decoder_map != new_config.decoder_map) {
+    channel_proxy->SetReceiveCodecs(new_config.decoder_map);
+  }
+
+  stream->config_ = new_config;
+}
+}  // namespace internal
+}  // namespace webrtc
diff --git a/audio/audio_receive_stream.h b/audio/audio_receive_stream.h
new file mode 100644
index 0000000..a47b59c
--- /dev/null
+++ b/audio/audio_receive_stream.h
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_RECEIVE_STREAM_H_
+#define AUDIO_AUDIO_RECEIVE_STREAM_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/audio/audio_mixer.h"
+#include "audio/audio_state.h"
+#include "call/audio_receive_stream.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "call/syncable.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+class PacketRouter;
+class ProcessThread;
+class RtcEventLog;
+class RtpPacketReceived;
+class RtpStreamReceiverControllerInterface;
+class RtpStreamReceiverInterface;
+
+namespace voe {
+class ChannelProxy;
+}  // namespace voe
+
+namespace internal {
+class AudioSendStream;
+
+class AudioReceiveStream final : public webrtc::AudioReceiveStream,
+                                 public AudioMixer::Source,
+                                 public Syncable {
+ public:
+  AudioReceiveStream(RtpStreamReceiverControllerInterface* receiver_controller,
+                     PacketRouter* packet_router,
+                     ProcessThread* module_process_thread,
+                     const webrtc::AudioReceiveStream::Config& config,
+                     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+                     webrtc::RtcEventLog* event_log);
+  // For unit tests, which need to supply a mock channel proxy.
+  AudioReceiveStream(RtpStreamReceiverControllerInterface* receiver_controller,
+                     PacketRouter* packet_router,
+                     const webrtc::AudioReceiveStream::Config& config,
+                     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+                     webrtc::RtcEventLog* event_log,
+                     std::unique_ptr<voe::ChannelProxy> channel_proxy);
+  ~AudioReceiveStream() override;
+
+  // webrtc::AudioReceiveStream implementation.
+  void Reconfigure(const webrtc::AudioReceiveStream::Config& config) override;
+  void Start() override;
+  void Stop() override;
+  webrtc::AudioReceiveStream::Stats GetStats() const override;
+  void SetSink(AudioSinkInterface* sink) override;
+  void SetGain(float gain) override;
+  std::vector<webrtc::RtpSource> GetSources() const override;
+
+  // TODO(nisse): We don't formally implement RtpPacketSinkInterface, and this
+  // method shouldn't be needed. But it's currently used by the
+  // AudioReceiveStreamTest.ReceiveRtpPacket unittest. Figure out if that test
+  // shuld be refactored or deleted, and then delete this method.
+  void OnRtpPacket(const RtpPacketReceived& packet);
+
+  // AudioMixer::Source
+  AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz,
+                                       AudioFrame* audio_frame) override;
+  int Ssrc() const override;
+  int PreferredSampleRate() const override;
+
+  // Syncable
+  int id() const override;
+  rtc::Optional<Syncable::Info> GetInfo() const override;
+  uint32_t GetPlayoutTimestamp() const override;
+  void SetMinimumPlayoutDelay(int delay_ms) override;
+
+  void AssociateSendStream(AudioSendStream* send_stream);
+  void SignalNetworkState(NetworkState state);
+  bool DeliverRtcp(const uint8_t* packet, size_t length);
+  const webrtc::AudioReceiveStream::Config& config() const;
+  const AudioSendStream* GetAssociatedSendStreamForTesting() const;
+
+ private:
+  static void ConfigureStream(AudioReceiveStream* stream,
+                              const Config& new_config,
+                              bool first_time);
+
+  AudioState* audio_state() const;
+
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker module_process_thread_checker_;
+  webrtc::AudioReceiveStream::Config config_;
+  rtc::scoped_refptr<webrtc::AudioState> audio_state_;
+  std::unique_ptr<voe::ChannelProxy> channel_proxy_;
+  AudioSendStream* associated_send_stream_ = nullptr;
+
+  bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false;
+
+  std::unique_ptr<RtpStreamReceiverInterface> rtp_stream_receiver_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioReceiveStream);
+};
+}  // namespace internal
+}  // namespace webrtc
+
+#endif  // AUDIO_AUDIO_RECEIVE_STREAM_H_
diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc
new file mode 100644
index 0000000..fa663fe
--- /dev/null
+++ b/audio/audio_receive_stream_unittest.cc
@@ -0,0 +1,387 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "api/test/mock_audio_mixer.h"
+#include "audio/audio_receive_stream.h"
+#include "audio/conversion.h"
+#include "audio/mock_voe_channel_proxy.h"
+#include "call/rtp_stream_receiver_controller.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/bitrate_controller/include/mock/mock_bitrate_controller.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using testing::_;
+using testing::FloatEq;
+using testing::Return;
+using testing::ReturnRef;
+
+AudioDecodingCallStats MakeAudioDecodeStatsForTest() {
+  AudioDecodingCallStats audio_decode_stats;
+  audio_decode_stats.calls_to_silence_generator = 234;
+  audio_decode_stats.calls_to_neteq = 567;
+  audio_decode_stats.decoded_normal = 890;
+  audio_decode_stats.decoded_plc = 123;
+  audio_decode_stats.decoded_cng = 456;
+  audio_decode_stats.decoded_plc_cng = 789;
+  audio_decode_stats.decoded_muted_output = 987;
+  return audio_decode_stats;
+}
+
+const uint32_t kRemoteSsrc = 1234;
+const uint32_t kLocalSsrc = 5678;
+const size_t kOneByteExtensionHeaderLength = 4;
+const size_t kOneByteExtensionLength = 4;
+const int kAudioLevelId = 3;
+const int kTransportSequenceNumberId = 4;
+const int kJitterBufferDelay = -7;
+const int kPlayoutBufferDelay = 302;
+const unsigned int kSpeechOutputLevel = 99;
+const double kTotalOutputEnergy = 0.25;
+const double kTotalOutputDuration = 0.5;
+
+const CallStatistics kCallStats = {
+    345,  678,  901, 234, -12, 3456, 7890, 567, 890, 123};
+const CodecInst kCodecInst = {
+    123, "codec_name_recv", 96000, -187, 0, -103};
+const NetworkStatistics kNetworkStats = {
+    123, 456, false, 789012, 3456, 123, 456, 0,  {}, 789, 12,
+    345, 678, 901,   0,      -1,   -1,  -1,  -1, -1, 0};
+const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
+
+struct ConfigHelper {
+  ConfigHelper()
+      : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>()) {}
+
+  explicit ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer)
+      : audio_mixer_(audio_mixer) {
+    using testing::Invoke;
+
+    AudioState::Config config;
+    config.audio_mixer = audio_mixer_;
+    config.audio_processing = new rtc::RefCountedObject<MockAudioProcessing>();
+    config.audio_device_module =
+        new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
+    audio_state_ = AudioState::Create(config);
+
+    channel_proxy_ = new testing::StrictMock<MockVoEChannelProxy>();
+    EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kLocalSsrc)).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 15)).Times(1);
+    EXPECT_CALL(*channel_proxy_,
+        RegisterReceiverCongestionControlObjects(&packet_router_))
+            .Times(1);
+    EXPECT_CALL(*channel_proxy_, ResetReceiverCongestionControlObjects())
+        .Times(1);
+    EXPECT_CALL(*channel_proxy_, RegisterTransport(nullptr)).Times(2);
+    testing::Expectation expect_set =
+        EXPECT_CALL(*channel_proxy_, SetRtcEventLog(&event_log_))
+            .Times(1);
+    EXPECT_CALL(*channel_proxy_, SetRtcEventLog(testing::IsNull()))
+        .Times(1)
+        .After(expect_set);
+    EXPECT_CALL(*channel_proxy_, DisassociateSendChannel()).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetReceiveCodecs(_))
+        .WillRepeatedly(
+            Invoke([](const std::map<int, SdpAudioFormat>& codecs) {
+              EXPECT_THAT(codecs, testing::IsEmpty());
+            }));
+
+    stream_config_.rtp.local_ssrc = kLocalSsrc;
+    stream_config_.rtp.remote_ssrc = kRemoteSsrc;
+    stream_config_.rtp.nack.rtp_history_ms = 300;
+    stream_config_.rtp.extensions.push_back(
+        RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+    stream_config_.rtp.extensions.push_back(RtpExtension(
+        RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
+    stream_config_.decoder_factory =
+        new rtc::RefCountedObject<MockAudioDecoderFactory>;
+  }
+
+  std::unique_ptr<internal::AudioReceiveStream> CreateAudioReceiveStream() {
+    return std::unique_ptr<internal::AudioReceiveStream>(
+        new internal::AudioReceiveStream(
+            &rtp_stream_receiver_controller_,
+            &packet_router_,
+            stream_config_,
+            audio_state_,
+            &event_log_,
+            std::unique_ptr<voe::ChannelProxy>(channel_proxy_)));
+  }
+
+  AudioReceiveStream::Config& config() { return stream_config_; }
+  rtc::scoped_refptr<MockAudioMixer> audio_mixer() { return audio_mixer_; }
+  MockVoEChannelProxy* channel_proxy() { return channel_proxy_; }
+
+  void SetupMockForGetStats() {
+    using testing::DoAll;
+    using testing::SetArgPointee;
+
+    ASSERT_TRUE(channel_proxy_);
+    EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+        .WillOnce(Return(kCallStats));
+    EXPECT_CALL(*channel_proxy_, GetDelayEstimate())
+        .WillOnce(Return(kJitterBufferDelay + kPlayoutBufferDelay));
+    EXPECT_CALL(*channel_proxy_, GetSpeechOutputLevelFullRange())
+        .WillOnce(Return(kSpeechOutputLevel));
+    EXPECT_CALL(*channel_proxy_, GetTotalOutputEnergy())
+        .WillOnce(Return(kTotalOutputEnergy));
+    EXPECT_CALL(*channel_proxy_, GetTotalOutputDuration())
+        .WillOnce(Return(kTotalOutputDuration));
+    EXPECT_CALL(*channel_proxy_, GetNetworkStatistics())
+        .WillOnce(Return(kNetworkStats));
+    EXPECT_CALL(*channel_proxy_, GetDecodingCallStatistics())
+        .WillOnce(Return(kAudioDecodeStats));
+    EXPECT_CALL(*channel_proxy_, GetRecCodec(_))
+        .WillOnce(DoAll(SetArgPointee<0>(kCodecInst), Return(true)));
+  }
+
+ private:
+  PacketRouter packet_router_;
+  MockRtcEventLog event_log_;
+  rtc::scoped_refptr<AudioState> audio_state_;
+  rtc::scoped_refptr<MockAudioMixer> audio_mixer_;
+  AudioReceiveStream::Config stream_config_;
+  testing::StrictMock<MockVoEChannelProxy>* channel_proxy_ = nullptr;
+  RtpStreamReceiverController rtp_stream_receiver_controller_;
+};
+
+void BuildOneByteExtension(std::vector<uint8_t>::iterator it,
+                           int id,
+                           uint32_t extension_value,
+                           size_t value_length) {
+  const uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE;
+  ByteWriter<uint16_t>::WriteBigEndian(&(*it), kRtpOneByteHeaderExtensionId);
+  it += 2;
+
+  ByteWriter<uint16_t>::WriteBigEndian(&(*it), kOneByteExtensionLength / 4);
+  it += 2;
+  const size_t kExtensionDataLength = kOneByteExtensionLength - 1;
+  uint32_t shifted_value = extension_value
+                           << (8 * (kExtensionDataLength - value_length));
+  *it = (id << 4) + (static_cast<uint8_t>(value_length) - 1);
+  ++it;
+  ByteWriter<uint32_t, kExtensionDataLength>::WriteBigEndian(&(*it),
+                                                             shifted_value);
+}
+
+const std::vector<uint8_t> CreateRtpHeaderWithOneByteExtension(
+    int extension_id,
+    uint32_t extension_value,
+    size_t value_length) {
+  std::vector<uint8_t> header;
+  header.resize(webrtc::kRtpHeaderSize + kOneByteExtensionHeaderLength +
+                kOneByteExtensionLength);
+  header[0] = 0x80;   // Version 2.
+  header[0] |= 0x10;  // Set extension bit.
+  header[1] = 100;    // Payload type.
+  header[1] |= 0x80;  // Marker bit is set.
+  ByteWriter<uint16_t>::WriteBigEndian(&header[2], 0x1234);  // Sequence number.
+  ByteWriter<uint32_t>::WriteBigEndian(&header[4], 0x5678);  // Timestamp.
+  ByteWriter<uint32_t>::WriteBigEndian(&header[8], 0x4321);  // SSRC.
+
+  BuildOneByteExtension(header.begin() + webrtc::kRtpHeaderSize, extension_id,
+                        extension_value, value_length);
+  return header;
+}
+
+const std::vector<uint8_t> CreateRtcpSenderReport() {
+  std::vector<uint8_t> packet;
+  const size_t kRtcpSrLength = 28;  // In bytes.
+  packet.resize(kRtcpSrLength);
+  packet[0] = 0x80;  // Version 2.
+  packet[1] = 0xc8;  // PT = 200, SR.
+  // Length in number of 32-bit words - 1.
+  ByteWriter<uint16_t>::WriteBigEndian(&packet[2], 6);
+  ByteWriter<uint32_t>::WriteBigEndian(&packet[4], kLocalSsrc);
+  return packet;
+}
+}  // namespace
+
+TEST(AudioReceiveStreamTest, ConfigToString) {
+  AudioReceiveStream::Config config;
+  config.rtp.remote_ssrc = kRemoteSsrc;
+  config.rtp.local_ssrc = kLocalSsrc;
+  config.rtp.extensions.push_back(
+      RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+  EXPECT_EQ(
+      "{rtp: {remote_ssrc: 1234, local_ssrc: 5678, transport_cc: off, nack: "
+      "{rtp_history_ms: 0}, extensions: [{uri: "
+      "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 3}]}, "
+      "rtcp_send_transport: null}",
+      config.ToString());
+}
+
+TEST(AudioReceiveStreamTest, ConstructDestruct) {
+  ConfigHelper helper;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+}
+
+TEST(AudioReceiveStreamTest, ReceiveRtpPacket) {
+  ConfigHelper helper;
+  helper.config().rtp.transport_cc = true;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+  const int kTransportSequenceNumberValue = 1234;
+  std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
+      kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
+  PacketTime packet_time(5678000, 0);
+
+  RtpPacketReceived parsed_packet;
+  ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
+  parsed_packet.set_arrival_time_ms((packet_time.timestamp + 500) / 1000);
+
+  EXPECT_CALL(*helper.channel_proxy(),
+              OnRtpPacket(testing::Ref(parsed_packet)));
+
+  recv_stream->OnRtpPacket(parsed_packet);
+}
+
+TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) {
+  ConfigHelper helper;
+  helper.config().rtp.transport_cc = true;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+  std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
+  EXPECT_CALL(*helper.channel_proxy(),
+              ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
+      .WillOnce(Return(true));
+  EXPECT_TRUE(recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()));
+}
+
+TEST(AudioReceiveStreamTest, GetStats) {
+  ConfigHelper helper;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+  helper.SetupMockForGetStats();
+  AudioReceiveStream::Stats stats = recv_stream->GetStats();
+  EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
+  EXPECT_EQ(static_cast<int64_t>(kCallStats.bytesReceived), stats.bytes_rcvd);
+  EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
+            stats.packets_rcvd);
+  EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
+  EXPECT_EQ(Q8ToFloat(kCallStats.fractionLost), stats.fraction_lost);
+  EXPECT_EQ(std::string(kCodecInst.plname), stats.codec_name);
+  EXPECT_EQ(kCallStats.extendedMax, stats.ext_seqnum);
+  EXPECT_EQ(kCallStats.jitterSamples / (kCodecInst.plfreq / 1000),
+            stats.jitter_ms);
+  EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
+  EXPECT_EQ(kNetworkStats.preferredBufferSize,
+            stats.jitter_buffer_preferred_ms);
+  EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
+            stats.delay_estimate_ms);
+  EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
+  EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
+  EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
+  EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
+  EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
+  EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
+  EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
+                static_cast<double>(rtc::kNumMillisecsPerSec),
+            stats.jitter_buffer_delay_seconds);
+  EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
+  EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
+            stats.speech_expand_rate);
+  EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
+            stats.secondary_decoded_rate);
+  EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
+            stats.secondary_discarded_rate);
+  EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
+            stats.accelerate_rate);
+  EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
+            stats.preemptive_expand_rate);
+  EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
+            stats.decoding_calls_to_silence_generator);
+  EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
+  EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
+  EXPECT_EQ(kAudioDecodeStats.decoded_plc, stats.decoding_plc);
+  EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
+  EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
+  EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
+            stats.decoding_muted_output);
+  EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
+            stats.capture_start_ntp_time_ms);
+}
+
+TEST(AudioReceiveStreamTest, SetGain) {
+  ConfigHelper helper;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+  EXPECT_CALL(*helper.channel_proxy(),
+      SetChannelOutputVolumeScaling(FloatEq(0.765f)));
+  recv_stream->SetGain(0.765f);
+}
+
+TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) {
+  ConfigHelper helper1;
+  ConfigHelper helper2(helper1.audio_mixer());
+  auto recv_stream1 = helper1.CreateAudioReceiveStream();
+  auto recv_stream2 = helper2.CreateAudioReceiveStream();
+
+  EXPECT_CALL(*helper1.channel_proxy(), StartPlayout()).Times(1);
+  EXPECT_CALL(*helper2.channel_proxy(), StartPlayout()).Times(1);
+  EXPECT_CALL(*helper1.channel_proxy(), StopPlayout()).Times(1);
+  EXPECT_CALL(*helper2.channel_proxy(), StopPlayout()).Times(1);
+  EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
+      .WillOnce(Return(true));
+  EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
+      .WillOnce(Return(true));
+  EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
+      .Times(1);
+  EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
+      .Times(1);
+
+  recv_stream1->Start();
+  recv_stream2->Start();
+
+  // One more should not result in any more mixer sources added.
+  recv_stream1->Start();
+
+  // Stop stream before it is being destructed.
+  recv_stream2->Stop();
+}
+
+TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) {
+  ConfigHelper helper;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+  recv_stream->Reconfigure(helper.config());
+}
+
+TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) {
+  ConfigHelper helper;
+  auto recv_stream = helper.CreateAudioReceiveStream();
+
+  auto new_config = helper.config();
+  new_config.rtp.local_ssrc = kLocalSsrc + 1;
+  new_config.rtp.nack.rtp_history_ms = 300 + 20;
+  new_config.rtp.extensions.clear();
+  new_config.rtp.extensions.push_back(
+        RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
+  new_config.rtp.extensions.push_back(RtpExtension(
+        RtpExtension::kTransportSequenceNumberUri,
+        kTransportSequenceNumberId + 1));
+  new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
+
+  MockVoEChannelProxy& channel_proxy = *helper.channel_proxy();
+  EXPECT_CALL(channel_proxy, SetLocalSSRC(kLocalSsrc + 1)).Times(1);
+  EXPECT_CALL(channel_proxy, SetNACKStatus(true, 15 + 1)).Times(1);
+  EXPECT_CALL(channel_proxy, SetReceiveCodecs(new_config.decoder_map));
+
+  recv_stream->Reconfigure(new_config);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc
new file mode 100644
index 0000000..04dffcd
--- /dev/null
+++ b/audio/audio_send_stream.cc
@@ -0,0 +1,726 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_send_stream.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "audio/audio_state.h"
+#include "audio/channel_proxy.h"
+#include "audio/conversion.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/function_view.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace internal {
+namespace {
+// TODO(eladalon): Subsequent CL will make these values experiment-dependent.
+constexpr size_t kPacketLossTrackerMaxWindowSizeMs = 15000;
+constexpr size_t kPacketLossRateMinNumAckedPackets = 50;
+constexpr size_t kRecoverablePacketLossRateMinNumAckedPairs = 40;
+
+void CallEncoder(const std::unique_ptr<voe::ChannelProxy>& channel_proxy,
+                 rtc::FunctionView<void(AudioEncoder*)> lambda) {
+  channel_proxy->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+    RTC_DCHECK(encoder_ptr);
+    lambda(encoder_ptr->get());
+  });
+}
+
+std::unique_ptr<voe::ChannelProxy> CreateChannelAndProxy(
+    webrtc::AudioState* audio_state,
+    rtc::TaskQueue* worker_queue,
+    ProcessThread* module_process_thread) {
+  RTC_DCHECK(audio_state);
+  internal::AudioState* internal_audio_state =
+      static_cast<internal::AudioState*>(audio_state);
+  return std::unique_ptr<voe::ChannelProxy>(new voe::ChannelProxy(
+      std::unique_ptr<voe::Channel>(new voe::Channel(
+          worker_queue,
+          module_process_thread,
+          internal_audio_state->audio_device_module()))));
+}
+}  // namespace
+
+// Helper class to track the actively sending lifetime of this stream.
+class AudioSendStream::TimedTransport : public Transport {
+ public:
+  TimedTransport(Transport* transport, TimeInterval* time_interval)
+      : transport_(transport), lifetime_(time_interval) {}
+  bool SendRtp(const uint8_t* packet,
+               size_t length,
+               const PacketOptions& options) {
+    if (lifetime_) {
+      lifetime_->Extend();
+    }
+    return transport_->SendRtp(packet, length, options);
+  }
+  bool SendRtcp(const uint8_t* packet, size_t length) {
+    return transport_->SendRtcp(packet, length);
+  }
+  ~TimedTransport() {}
+
+ private:
+  Transport* transport_;
+  TimeInterval* lifetime_;
+};
+
+AudioSendStream::AudioSendStream(
+    const webrtc::AudioSendStream::Config& config,
+    const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+    rtc::TaskQueue* worker_queue,
+    ProcessThread* module_process_thread,
+    RtpTransportControllerSendInterface* transport,
+    BitrateAllocator* bitrate_allocator,
+    RtcEventLog* event_log,
+    RtcpRttStats* rtcp_rtt_stats,
+    const rtc::Optional<RtpState>& suspended_rtp_state,
+    TimeInterval* overall_call_lifetime)
+    : AudioSendStream(config,
+                      audio_state,
+                      worker_queue,
+                      transport,
+                      bitrate_allocator,
+                      event_log,
+                      rtcp_rtt_stats,
+                      suspended_rtp_state,
+                      overall_call_lifetime,
+                      CreateChannelAndProxy(audio_state.get(),
+                                            worker_queue,
+                                            module_process_thread)) {}
+
+AudioSendStream::AudioSendStream(
+    const webrtc::AudioSendStream::Config& config,
+    const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+    rtc::TaskQueue* worker_queue,
+    RtpTransportControllerSendInterface* transport,
+    BitrateAllocator* bitrate_allocator,
+    RtcEventLog* event_log,
+    RtcpRttStats* rtcp_rtt_stats,
+    const rtc::Optional<RtpState>& suspended_rtp_state,
+    TimeInterval* overall_call_lifetime,
+    std::unique_ptr<voe::ChannelProxy> channel_proxy)
+    : worker_queue_(worker_queue),
+      config_(Config(nullptr)),
+      audio_state_(audio_state),
+      channel_proxy_(std::move(channel_proxy)),
+      event_log_(event_log),
+      bitrate_allocator_(bitrate_allocator),
+      transport_(transport),
+      packet_loss_tracker_(kPacketLossTrackerMaxWindowSizeMs,
+                           kPacketLossRateMinNumAckedPackets,
+                           kRecoverablePacketLossRateMinNumAckedPairs),
+      rtp_rtcp_module_(nullptr),
+      suspended_rtp_state_(suspended_rtp_state),
+      overall_call_lifetime_(overall_call_lifetime) {
+  RTC_LOG(LS_INFO) << "AudioSendStream: " << config.rtp.ssrc;
+  RTC_DCHECK(worker_queue_);
+  RTC_DCHECK(audio_state_);
+  RTC_DCHECK(channel_proxy_);
+  RTC_DCHECK(bitrate_allocator_);
+  RTC_DCHECK(transport);
+  RTC_DCHECK(overall_call_lifetime_);
+
+  channel_proxy_->SetRtcEventLog(event_log_);
+  channel_proxy_->SetRtcpRttStats(rtcp_rtt_stats);
+  channel_proxy_->SetRTCPStatus(true);
+  RtpReceiver* rtpReceiver = nullptr;  // Unused, but required for call.
+  channel_proxy_->GetRtpRtcp(&rtp_rtcp_module_, &rtpReceiver);
+  RTC_DCHECK(rtp_rtcp_module_);
+
+  ConfigureStream(this, config, true);
+
+  pacer_thread_checker_.DetachFromThread();
+  // Signal congestion controller this object is ready for OnPacket* callbacks.
+  transport_->RegisterPacketFeedbackObserver(this);
+}
+
+AudioSendStream::~AudioSendStream() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  RTC_LOG(LS_INFO) << "~AudioSendStream: " << config_.rtp.ssrc;
+  RTC_DCHECK(!sending_);
+  transport_->DeRegisterPacketFeedbackObserver(this);
+  channel_proxy_->RegisterTransport(nullptr);
+  channel_proxy_->ResetSenderCongestionControlObjects();
+  channel_proxy_->SetRtcEventLog(nullptr);
+  channel_proxy_->SetRtcpRttStats(nullptr);
+  // Lifetime can only be updated after deregistering
+  // |timed_send_transport_adapter_| in the underlying channel object to avoid
+  // data races in |active_lifetime_|.
+  overall_call_lifetime_->Extend(active_lifetime_);
+}
+
+const webrtc::AudioSendStream::Config& AudioSendStream::GetConfig() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return config_;
+}
+
+void AudioSendStream::Reconfigure(
+    const webrtc::AudioSendStream::Config& new_config) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  ConfigureStream(this, new_config, false);
+}
+
+AudioSendStream::ExtensionIds AudioSendStream::FindExtensionIds(
+    const std::vector<RtpExtension>& extensions) {
+  ExtensionIds ids;
+  for (const auto& extension : extensions) {
+    if (extension.uri == RtpExtension::kAudioLevelUri) {
+      ids.audio_level = extension.id;
+    } else if (extension.uri == RtpExtension::kTransportSequenceNumberUri) {
+      ids.transport_sequence_number = extension.id;
+    }
+  }
+  return ids;
+}
+
+void AudioSendStream::ConfigureStream(
+    webrtc::internal::AudioSendStream* stream,
+    const webrtc::AudioSendStream::Config& new_config,
+    bool first_time) {
+  RTC_LOG(LS_INFO) << "AudioSendStream::ConfigureStream: "
+                   << new_config.ToString();
+  const auto& channel_proxy = stream->channel_proxy_;
+  const auto& old_config = stream->config_;
+
+  if (first_time || old_config.rtp.ssrc != new_config.rtp.ssrc) {
+    channel_proxy->SetLocalSSRC(new_config.rtp.ssrc);
+    if (stream->suspended_rtp_state_) {
+      stream->rtp_rtcp_module_->SetRtpState(*stream->suspended_rtp_state_);
+    }
+  }
+  if (first_time || old_config.rtp.c_name != new_config.rtp.c_name) {
+    channel_proxy->SetRTCP_CNAME(new_config.rtp.c_name);
+  }
+  // TODO(solenberg): Config NACK history window (which is a packet count),
+  // using the actual packet size for the configured codec.
+  if (first_time || old_config.rtp.nack.rtp_history_ms !=
+                        new_config.rtp.nack.rtp_history_ms) {
+    channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
+                                 new_config.rtp.nack.rtp_history_ms / 20);
+  }
+
+  if (first_time ||
+      new_config.send_transport != old_config.send_transport) {
+    if (old_config.send_transport) {
+      channel_proxy->RegisterTransport(nullptr);
+    }
+    if (new_config.send_transport) {
+      stream->timed_send_transport_adapter_.reset(new TimedTransport(
+          new_config.send_transport, &stream->active_lifetime_));
+    } else {
+      stream->timed_send_transport_adapter_.reset(nullptr);
+    }
+    channel_proxy->RegisterTransport(
+        stream->timed_send_transport_adapter_.get());
+  }
+
+  const ExtensionIds old_ids = FindExtensionIds(old_config.rtp.extensions);
+  const ExtensionIds new_ids = FindExtensionIds(new_config.rtp.extensions);
+  // Audio level indication
+  if (first_time || new_ids.audio_level != old_ids.audio_level) {
+    channel_proxy->SetSendAudioLevelIndicationStatus(new_ids.audio_level != 0,
+                                                     new_ids.audio_level);
+  }
+  bool transport_seq_num_id_changed =
+      new_ids.transport_sequence_number != old_ids.transport_sequence_number;
+  if (first_time || transport_seq_num_id_changed) {
+    if (!first_time) {
+      channel_proxy->ResetSenderCongestionControlObjects();
+    }
+
+    RtcpBandwidthObserver* bandwidth_observer = nullptr;
+    bool has_transport_sequence_number = new_ids.transport_sequence_number != 0;
+    if (has_transport_sequence_number) {
+      channel_proxy->EnableSendTransportSequenceNumber(
+          new_ids.transport_sequence_number);
+      // Probing in application limited region is only used in combination with
+      // send side congestion control, wich depends on feedback packets which
+      // requires transport sequence numbers to be enabled.
+      stream->transport_->EnablePeriodicAlrProbing(true);
+      bandwidth_observer = stream->transport_->GetBandwidthObserver();
+    }
+
+    channel_proxy->RegisterSenderCongestionControlObjects(stream->transport_,
+                                                          bandwidth_observer);
+  }
+
+  if (!ReconfigureSendCodec(stream, new_config)) {
+    RTC_LOG(LS_ERROR) << "Failed to set up send codec state.";
+  }
+
+  if (stream->sending_) {
+    ReconfigureBitrateObserver(stream, new_config);
+  }
+  stream->config_ = new_config;
+}
+
+void AudioSendStream::Start() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  if (sending_) {
+    return;
+  }
+
+  if (config_.min_bitrate_bps != -1 && config_.max_bitrate_bps != -1 &&
+      (FindExtensionIds(config_.rtp.extensions).transport_sequence_number !=
+           0 ||
+       !webrtc::field_trial::IsEnabled("WebRTC-Audio-SendSideBwe"))) {
+    // Audio BWE is enabled.
+    transport_->packet_sender()->SetAccountForAudioPackets(true);
+    ConfigureBitrateObserver(config_.min_bitrate_bps, config_.max_bitrate_bps,
+                             config_.bitrate_priority);
+  }
+  channel_proxy_->StartSend();
+  sending_ = true;
+  audio_state()->AddSendingStream(this, encoder_sample_rate_hz_,
+                                  encoder_num_channels_);
+}
+
+void AudioSendStream::Stop() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  if (!sending_) {
+    return;
+  }
+
+  RemoveBitrateObserver();
+  channel_proxy_->StopSend();
+  sending_ = false;
+  audio_state()->RemoveSendingStream(this);
+}
+
+void AudioSendStream::SendAudioData(std::unique_ptr<AudioFrame> audio_frame) {
+  RTC_CHECK_RUNS_SERIALIZED(&audio_capture_race_checker_);
+  channel_proxy_->ProcessAndEncodeAudio(std::move(audio_frame));
+}
+
+bool AudioSendStream::SendTelephoneEvent(int payload_type,
+                                         int payload_frequency, int event,
+                                         int duration_ms) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_proxy_->SetSendTelephoneEventPayloadType(payload_type,
+                                                          payload_frequency) &&
+         channel_proxy_->SendTelephoneEventOutband(event, duration_ms);
+}
+
+void AudioSendStream::SetMuted(bool muted) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_proxy_->SetInputMute(muted);
+}
+
+webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
+  return GetStats(true);
+}
+
+webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
+    bool has_remote_tracks) const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  webrtc::AudioSendStream::Stats stats;
+  stats.local_ssrc = config_.rtp.ssrc;
+
+  webrtc::CallStatistics call_stats = channel_proxy_->GetRTCPStatistics();
+  stats.bytes_sent = call_stats.bytesSent;
+  stats.packets_sent = call_stats.packetsSent;
+  // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
+  // returns 0 to indicate an error value.
+  if (call_stats.rttMs > 0) {
+    stats.rtt_ms = call_stats.rttMs;
+  }
+  if (config_.send_codec_spec) {
+    const auto& spec = *config_.send_codec_spec;
+    stats.codec_name = spec.format.name;
+    stats.codec_payload_type = spec.payload_type;
+
+    // Get data from the last remote RTCP report.
+    for (const auto& block : channel_proxy_->GetRemoteRTCPReportBlocks()) {
+      // Lookup report for send ssrc only.
+      if (block.source_SSRC == stats.local_ssrc) {
+        stats.packets_lost = block.cumulative_num_packets_lost;
+        stats.fraction_lost = Q8ToFloat(block.fraction_lost);
+        stats.ext_seqnum = block.extended_highest_sequence_number;
+        // Convert timestamps to milliseconds.
+        if (spec.format.clockrate_hz / 1000 > 0) {
+          stats.jitter_ms =
+              block.interarrival_jitter / (spec.format.clockrate_hz / 1000);
+        }
+        break;
+      }
+    }
+  }
+
+  AudioState::Stats input_stats = audio_state()->GetAudioInputStats();
+  stats.audio_level = input_stats.audio_level;
+  stats.total_input_energy = input_stats.total_energy;
+  stats.total_input_duration = input_stats.total_duration;
+
+  stats.typing_noise_detected = audio_state()->typing_noise_detected();
+  stats.ana_statistics = channel_proxy_->GetANAStatistics();
+  RTC_DCHECK(audio_state_->audio_processing());
+  stats.apm_statistics =
+      audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
+
+  return stats;
+}
+
+void AudioSendStream::SignalNetworkState(NetworkState state) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+}
+
+bool AudioSendStream::DeliverRtcp(const uint8_t* packet, size_t length) {
+  // TODO(solenberg): Tests call this function on a network thread, libjingle
+  // calls on the worker thread. We should move towards always using a network
+  // thread. Then this check can be enabled.
+  // RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
+  return channel_proxy_->ReceivedRTCPPacket(packet, length);
+}
+
+uint32_t AudioSendStream::OnBitrateUpdated(uint32_t bitrate_bps,
+                                           uint8_t fraction_loss,
+                                           int64_t rtt,
+                                           int64_t bwe_period_ms) {
+  // A send stream may be allocated a bitrate of zero if the allocator decides
+  // to disable it. For now we ignore this decision and keep sending on min
+  // bitrate.
+  if (bitrate_bps == 0) {
+    bitrate_bps = config_.min_bitrate_bps;
+  }
+  RTC_DCHECK_GE(bitrate_bps,
+                static_cast<uint32_t>(config_.min_bitrate_bps));
+  // The bitrate allocator might allocate an higher than max configured bitrate
+  // if there is room, to allow for, as example, extra FEC. Ignore that for now.
+  const uint32_t max_bitrate_bps = config_.max_bitrate_bps;
+  if (bitrate_bps > max_bitrate_bps)
+    bitrate_bps = max_bitrate_bps;
+
+  channel_proxy_->SetBitrate(bitrate_bps, bwe_period_ms);
+
+  // The amount of audio protection is not exposed by the encoder, hence
+  // always returning 0.
+  return 0;
+}
+
+void AudioSendStream::OnPacketAdded(uint32_t ssrc, uint16_t seq_num) {
+  RTC_DCHECK(pacer_thread_checker_.CalledOnValidThread());
+  // Only packets that belong to this stream are of interest.
+  if (ssrc == config_.rtp.ssrc) {
+    rtc::CritScope lock(&packet_loss_tracker_cs_);
+    // TODO(eladalon): This function call could potentially reset the window,
+    // setting both PLR and RPLR to unknown. Consider (during upcoming
+    // refactoring) passing an indication of such an event.
+    packet_loss_tracker_.OnPacketAdded(seq_num, rtc::TimeMillis());
+  }
+}
+
+void AudioSendStream::OnPacketFeedbackVector(
+    const std::vector<PacketFeedback>& packet_feedback_vector) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  rtc::Optional<float> plr;
+  rtc::Optional<float> rplr;
+  {
+    rtc::CritScope lock(&packet_loss_tracker_cs_);
+    packet_loss_tracker_.OnPacketFeedbackVector(packet_feedback_vector);
+    plr = packet_loss_tracker_.GetPacketLossRate();
+    rplr = packet_loss_tracker_.GetRecoverablePacketLossRate();
+  }
+  // TODO(eladalon): If R/PLR go back to unknown, no indication is given that
+  // the previously sent value is no longer relevant. This will be taken care
+  // of with some refactoring which is now being done.
+  if (plr) {
+    channel_proxy_->OnTwccBasedUplinkPacketLossRate(*plr);
+  }
+  if (rplr) {
+    channel_proxy_->OnRecoverableUplinkPacketLossRate(*rplr);
+  }
+}
+
+void AudioSendStream::SetTransportOverhead(int transport_overhead_per_packet) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_proxy_->SetTransportOverhead(transport_overhead_per_packet);
+}
+
+RtpState AudioSendStream::GetRtpState() const {
+  return rtp_rtcp_module_->GetRtpState();
+}
+
+const voe::ChannelProxy& AudioSendStream::GetChannelProxy() const {
+  RTC_DCHECK(channel_proxy_.get());
+  return *channel_proxy_.get();
+}
+
+internal::AudioState* AudioSendStream::audio_state() {
+  internal::AudioState* audio_state =
+      static_cast<internal::AudioState*>(audio_state_.get());
+  RTC_DCHECK(audio_state);
+  return audio_state;
+}
+
+const internal::AudioState* AudioSendStream::audio_state() const {
+  internal::AudioState* audio_state =
+      static_cast<internal::AudioState*>(audio_state_.get());
+  RTC_DCHECK(audio_state);
+  return audio_state;
+}
+
+void AudioSendStream::StoreEncoderProperties(int sample_rate_hz,
+                                             size_t num_channels) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  encoder_sample_rate_hz_ = sample_rate_hz;
+  encoder_num_channels_ = num_channels;
+  if (sending_) {
+    // Update AudioState's information about the stream.
+    audio_state()->AddSendingStream(this, sample_rate_hz, num_channels);
+  }
+}
+
+// Apply current codec settings to a single voe::Channel used for sending.
+bool AudioSendStream::SetupSendCodec(AudioSendStream* stream,
+                                     const Config& new_config) {
+  RTC_DCHECK(new_config.send_codec_spec);
+  const auto& spec = *new_config.send_codec_spec;
+
+  RTC_DCHECK(new_config.encoder_factory);
+  std::unique_ptr<AudioEncoder> encoder =
+      new_config.encoder_factory->MakeAudioEncoder(spec.payload_type,
+                                                   spec.format, rtc::nullopt);
+
+  if (!encoder) {
+    RTC_DLOG(LS_ERROR) << "Unable to create encoder for " << spec.format;
+    return false;
+  }
+  // If a bitrate has been specified for the codec, use it over the
+  // codec's default.
+  if (spec.target_bitrate_bps) {
+    encoder->OnReceivedTargetAudioBitrate(*spec.target_bitrate_bps);
+  }
+
+  // Enable ANA if configured (currently only used by Opus).
+  if (new_config.audio_network_adaptor_config) {
+    if (encoder->EnableAudioNetworkAdaptor(
+            *new_config.audio_network_adaptor_config, stream->event_log_)) {
+      RTC_DLOG(LS_INFO) << "Audio network adaptor enabled on SSRC "
+                        << new_config.rtp.ssrc;
+    } else {
+      RTC_NOTREACHED();
+    }
+  }
+
+  // Wrap the encoder in a an AudioEncoderCNG, if VAD is enabled.
+  if (spec.cng_payload_type) {
+    AudioEncoderCng::Config cng_config;
+    cng_config.num_channels = encoder->NumChannels();
+    cng_config.payload_type = *spec.cng_payload_type;
+    cng_config.speech_encoder = std::move(encoder);
+    cng_config.vad_mode = Vad::kVadNormal;
+    encoder.reset(new AudioEncoderCng(std::move(cng_config)));
+
+    stream->RegisterCngPayloadType(
+        *spec.cng_payload_type,
+        new_config.send_codec_spec->format.clockrate_hz);
+  }
+
+  stream->StoreEncoderProperties(encoder->SampleRateHz(),
+                                 encoder->NumChannels());
+  stream->channel_proxy_->SetEncoder(new_config.send_codec_spec->payload_type,
+                                     std::move(encoder));
+  return true;
+}
+
+bool AudioSendStream::ReconfigureSendCodec(AudioSendStream* stream,
+                                           const Config& new_config) {
+  const auto& old_config = stream->config_;
+
+  if (!new_config.send_codec_spec) {
+    // We cannot de-configure a send codec. So we will do nothing.
+    // By design, the send codec should have not been configured.
+    RTC_DCHECK(!old_config.send_codec_spec);
+    return true;
+  }
+
+  if (new_config.send_codec_spec == old_config.send_codec_spec &&
+      new_config.audio_network_adaptor_config ==
+          old_config.audio_network_adaptor_config) {
+    return true;
+  }
+
+  // If we have no encoder, or the format or payload type's changed, create a
+  // new encoder.
+  if (!old_config.send_codec_spec ||
+      new_config.send_codec_spec->format !=
+          old_config.send_codec_spec->format ||
+      new_config.send_codec_spec->payload_type !=
+          old_config.send_codec_spec->payload_type) {
+    return SetupSendCodec(stream, new_config);
+  }
+
+  const rtc::Optional<int>& new_target_bitrate_bps =
+      new_config.send_codec_spec->target_bitrate_bps;
+  // If a bitrate has been specified for the codec, use it over the
+  // codec's default.
+  if (new_target_bitrate_bps &&
+      new_target_bitrate_bps !=
+          old_config.send_codec_spec->target_bitrate_bps) {
+    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+      encoder->OnReceivedTargetAudioBitrate(*new_target_bitrate_bps);
+    });
+  }
+
+  ReconfigureANA(stream, new_config);
+  ReconfigureCNG(stream, new_config);
+
+  return true;
+}
+
+void AudioSendStream::ReconfigureANA(AudioSendStream* stream,
+                                     const Config& new_config) {
+  if (new_config.audio_network_adaptor_config ==
+      stream->config_.audio_network_adaptor_config) {
+    return;
+  }
+  if (new_config.audio_network_adaptor_config) {
+    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+      if (encoder->EnableAudioNetworkAdaptor(
+              *new_config.audio_network_adaptor_config, stream->event_log_)) {
+        RTC_DLOG(LS_INFO) << "Audio network adaptor enabled on SSRC "
+                          << new_config.rtp.ssrc;
+      } else {
+        RTC_NOTREACHED();
+      }
+    });
+  } else {
+    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+      encoder->DisableAudioNetworkAdaptor();
+    });
+    RTC_DLOG(LS_INFO) << "Audio network adaptor disabled on SSRC "
+                      << new_config.rtp.ssrc;
+  }
+}
+
+void AudioSendStream::ReconfigureCNG(AudioSendStream* stream,
+                                     const Config& new_config) {
+  if (new_config.send_codec_spec->cng_payload_type ==
+      stream->config_.send_codec_spec->cng_payload_type) {
+    return;
+  }
+
+  // Register the CNG payload type if it's been added, don't do anything if CNG
+  // is removed. Payload types must not be redefined.
+  if (new_config.send_codec_spec->cng_payload_type) {
+    stream->RegisterCngPayloadType(
+        *new_config.send_codec_spec->cng_payload_type,
+        new_config.send_codec_spec->format.clockrate_hz);
+  }
+
+  // Wrap or unwrap the encoder in an AudioEncoderCNG.
+  stream->channel_proxy_->ModifyEncoder(
+      [&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+        std::unique_ptr<AudioEncoder> old_encoder(std::move(*encoder_ptr));
+        auto sub_encoders = old_encoder->ReclaimContainedEncoders();
+        if (!sub_encoders.empty()) {
+          // Replace enc with its sub encoder. We need to put the sub
+          // encoder in a temporary first, since otherwise the old value
+          // of enc would be destroyed before the new value got assigned,
+          // which would be bad since the new value is a part of the old
+          // value.
+          auto tmp = std::move(sub_encoders[0]);
+          old_encoder = std::move(tmp);
+        }
+        if (new_config.send_codec_spec->cng_payload_type) {
+          AudioEncoderCng::Config config;
+          config.speech_encoder = std::move(old_encoder);
+          config.num_channels = config.speech_encoder->NumChannels();
+          config.payload_type = *new_config.send_codec_spec->cng_payload_type;
+          config.vad_mode = Vad::kVadNormal;
+          encoder_ptr->reset(new AudioEncoderCng(std::move(config)));
+        } else {
+          *encoder_ptr = std::move(old_encoder);
+        }
+      });
+}
+
+void AudioSendStream::ReconfigureBitrateObserver(
+    AudioSendStream* stream,
+    const webrtc::AudioSendStream::Config& new_config) {
+  // Since the Config's default is for both of these to be -1, this test will
+  // allow us to configure the bitrate observer if the new config has bitrate
+  // limits set, but would only have us call RemoveBitrateObserver if we were
+  // previously configured with bitrate limits.
+  int new_transport_seq_num_id =
+      FindExtensionIds(new_config.rtp.extensions).transport_sequence_number;
+  if (stream->config_.min_bitrate_bps == new_config.min_bitrate_bps &&
+      stream->config_.max_bitrate_bps == new_config.max_bitrate_bps &&
+      stream->config_.bitrate_priority == new_config.bitrate_priority &&
+      (FindExtensionIds(stream->config_.rtp.extensions)
+               .transport_sequence_number == new_transport_seq_num_id ||
+       !webrtc::field_trial::IsEnabled("WebRTC-Audio-SendSideBwe"))) {
+    return;
+  }
+
+  if (new_config.min_bitrate_bps != -1 && new_config.max_bitrate_bps != -1 &&
+      (new_transport_seq_num_id != 0 ||
+       !webrtc::field_trial::IsEnabled("WebRTC-Audio-SendSideBwe"))) {
+    stream->ConfigureBitrateObserver(new_config.min_bitrate_bps,
+                                     new_config.max_bitrate_bps,
+                                     new_config.bitrate_priority);
+  } else {
+    stream->RemoveBitrateObserver();
+  }
+}
+
+void AudioSendStream::ConfigureBitrateObserver(int min_bitrate_bps,
+                                               int max_bitrate_bps,
+                                               double bitrate_priority) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_GE(max_bitrate_bps, min_bitrate_bps);
+  rtc::Event thread_sync_event(false /* manual_reset */, false);
+  worker_queue_->PostTask([&] {
+    // We may get a callback immediately as the observer is registered, so make
+    // sure the bitrate limits in config_ are up-to-date.
+    config_.min_bitrate_bps = min_bitrate_bps;
+    config_.max_bitrate_bps = max_bitrate_bps;
+    config_.bitrate_priority = bitrate_priority;
+    // This either updates the current observer or adds a new observer.
+    bitrate_allocator_->AddObserver(this, min_bitrate_bps, max_bitrate_bps, 0,
+                                    true, config_.track_id, bitrate_priority);
+    thread_sync_event.Set();
+  });
+  thread_sync_event.Wait(rtc::Event::kForever);
+}
+
+void AudioSendStream::RemoveBitrateObserver() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  rtc::Event thread_sync_event(false /* manual_reset */, false);
+  worker_queue_->PostTask([this, &thread_sync_event] {
+    bitrate_allocator_->RemoveObserver(this);
+    thread_sync_event.Set();
+  });
+  thread_sync_event.Wait(rtc::Event::kForever);
+}
+
+void AudioSendStream::RegisterCngPayloadType(int payload_type,
+                                             int clockrate_hz) {
+  const CodecInst codec = {payload_type, "CN", clockrate_hz, 0, 1, 0};
+  if (rtp_rtcp_module_->RegisterSendPayload(codec) != 0) {
+    rtp_rtcp_module_->DeRegisterSendPayload(codec.pltype);
+    if (rtp_rtcp_module_->RegisterSendPayload(codec) != 0) {
+      RTC_DLOG(LS_ERROR) << "RegisterCngPayloadType() failed to register CN to "
+                            "RTP/RTCP module";
+    }
+  }
+}
+}  // namespace internal
+}  // namespace webrtc
diff --git a/audio/audio_send_stream.h b/audio/audio_send_stream.h
new file mode 100644
index 0000000..1cda778
--- /dev/null
+++ b/audio/audio_send_stream.h
@@ -0,0 +1,169 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_SEND_STREAM_H_
+#define AUDIO_AUDIO_SEND_STREAM_H_
+
+#include <memory>
+#include <vector>
+
+#include "audio/time_interval.h"
+#include "audio/transport_feedback_packet_loss_tracker.h"
+#include "call/audio_send_stream.h"
+#include "call/audio_state.h"
+#include "call/bitrate_allocator.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+class RtcEventLog;
+class RtcpBandwidthObserver;
+class RtcpRttStats;
+class RtpTransportControllerSendInterface;
+
+namespace voe {
+class ChannelProxy;
+}  // namespace voe
+
+namespace internal {
+class AudioState;
+
+class AudioSendStream final : public webrtc::AudioSendStream,
+                              public webrtc::BitrateAllocatorObserver,
+                              public webrtc::PacketFeedbackObserver {
+ public:
+  AudioSendStream(const webrtc::AudioSendStream::Config& config,
+                  const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+                  rtc::TaskQueue* worker_queue,
+                  ProcessThread* module_process_thread,
+                  RtpTransportControllerSendInterface* transport,
+                  BitrateAllocator* bitrate_allocator,
+                  RtcEventLog* event_log,
+                  RtcpRttStats* rtcp_rtt_stats,
+                  const rtc::Optional<RtpState>& suspended_rtp_state,
+                  TimeInterval* overall_call_lifetime);
+  // For unit tests, which need to supply a mock channel proxy.
+  AudioSendStream(const webrtc::AudioSendStream::Config& config,
+                  const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+                  rtc::TaskQueue* worker_queue,
+                  RtpTransportControllerSendInterface* transport,
+                  BitrateAllocator* bitrate_allocator,
+                  RtcEventLog* event_log,
+                  RtcpRttStats* rtcp_rtt_stats,
+                  const rtc::Optional<RtpState>& suspended_rtp_state,
+                  TimeInterval* overall_call_lifetime,
+                  std::unique_ptr<voe::ChannelProxy> channel_proxy);
+  ~AudioSendStream() override;
+
+  // webrtc::AudioSendStream implementation.
+  const webrtc::AudioSendStream::Config& GetConfig() const override;
+  void Reconfigure(const webrtc::AudioSendStream::Config& config) override;
+  void Start() override;
+  void Stop() override;
+  void SendAudioData(std::unique_ptr<AudioFrame> audio_frame) override;
+  bool SendTelephoneEvent(int payload_type, int payload_frequency, int event,
+                          int duration_ms) override;
+  void SetMuted(bool muted) override;
+  webrtc::AudioSendStream::Stats GetStats() const override;
+  webrtc::AudioSendStream::Stats GetStats(
+      bool has_remote_tracks) const override;
+
+  void SignalNetworkState(NetworkState state);
+  bool DeliverRtcp(const uint8_t* packet, size_t length);
+
+  // Implements BitrateAllocatorObserver.
+  uint32_t OnBitrateUpdated(uint32_t bitrate_bps,
+                            uint8_t fraction_loss,
+                            int64_t rtt,
+                            int64_t bwe_period_ms) override;
+
+  // From PacketFeedbackObserver.
+  void OnPacketAdded(uint32_t ssrc, uint16_t seq_num) override;
+  void OnPacketFeedbackVector(
+      const std::vector<PacketFeedback>& packet_feedback_vector) override;
+
+  void SetTransportOverhead(int transport_overhead_per_packet);
+
+  RtpState GetRtpState() const;
+  const voe::ChannelProxy& GetChannelProxy() const;
+
+ private:
+  class TimedTransport;
+
+  internal::AudioState* audio_state();
+  const internal::AudioState* audio_state() const;
+
+  void StoreEncoderProperties(int sample_rate_hz, size_t num_channels);
+
+  // These are all static to make it less likely that (the old) config_ is
+  // accessed unintentionally.
+  static void ConfigureStream(AudioSendStream* stream,
+                              const Config& new_config,
+                              bool first_time);
+  static bool SetupSendCodec(AudioSendStream* stream, const Config& new_config);
+  static bool ReconfigureSendCodec(AudioSendStream* stream,
+                                   const Config& new_config);
+  static void ReconfigureANA(AudioSendStream* stream, const Config& new_config);
+  static void ReconfigureCNG(AudioSendStream* stream, const Config& new_config);
+  static void ReconfigureBitrateObserver(AudioSendStream* stream,
+                                         const Config& new_config);
+
+  void ConfigureBitrateObserver(int min_bitrate_bps,
+                                int max_bitrate_bps,
+                                double bitrate_priority);
+  void RemoveBitrateObserver();
+
+  void RegisterCngPayloadType(int payload_type, int clockrate_hz);
+
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker pacer_thread_checker_;
+  rtc::RaceChecker audio_capture_race_checker_;
+  rtc::TaskQueue* worker_queue_;
+  webrtc::AudioSendStream::Config config_;
+  rtc::scoped_refptr<webrtc::AudioState> audio_state_;
+  std::unique_ptr<voe::ChannelProxy> channel_proxy_;
+  RtcEventLog* const event_log_;
+
+  int encoder_sample_rate_hz_ = 0;
+  size_t encoder_num_channels_ = 0;
+  bool sending_ = false;
+
+  BitrateAllocator* const bitrate_allocator_;
+  RtpTransportControllerSendInterface* const transport_;
+
+  rtc::CriticalSection packet_loss_tracker_cs_;
+  TransportFeedbackPacketLossTracker packet_loss_tracker_
+      RTC_GUARDED_BY(&packet_loss_tracker_cs_);
+
+  RtpRtcp* rtp_rtcp_module_;
+  rtc::Optional<RtpState> const suspended_rtp_state_;
+
+  std::unique_ptr<TimedTransport> timed_send_transport_adapter_;
+  TimeInterval active_lifetime_;
+  TimeInterval* overall_call_lifetime_ = nullptr;
+
+  // RFC 5285: Each distinct extension MUST have a unique ID. The value 0 is
+  // reserved for padding and MUST NOT be used as a local identifier.
+  // So it should be safe to use 0 here to indicate "not configured".
+  struct ExtensionIds {
+    int audio_level = 0;
+    int transport_sequence_number = 0;
+  };
+  static ExtensionIds FindExtensionIds(
+      const std::vector<RtpExtension>& extensions);
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSendStream);
+};
+}  // namespace internal
+}  // namespace webrtc
+
+#endif  // AUDIO_AUDIO_SEND_STREAM_H_
diff --git a/audio/audio_send_stream_tests.cc b/audio/audio_send_stream_tests.cc
new file mode 100644
index 0000000..3f96c33
--- /dev/null
+++ b/audio/audio_send_stream_tests.cc
@@ -0,0 +1,238 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/call_test.h"
+#include "test/gtest.h"
+#include "test/rtcp_packet_parser.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+class AudioSendTest : public SendTest {
+ public:
+  AudioSendTest() : SendTest(CallTest::kDefaultTimeoutMs) {}
+
+  size_t GetNumVideoStreams() const override {
+    return 0;
+  }
+  size_t GetNumAudioStreams() const override {
+    return 1;
+  }
+  size_t GetNumFlexfecStreams() const override {
+    return 0;
+  }
+};
+}  // namespace
+
+using AudioSendStreamCallTest = CallTest;
+
+TEST_F(AudioSendStreamCallTest, SupportsCName) {
+  static std::string kCName = "PjqatC14dGfbVwGPUOA9IH7RlsFDbWl4AhXEiDsBizo=";
+  class CNameObserver : public AudioSendTest {
+   public:
+    CNameObserver() = default;
+
+   private:
+    Action OnSendRtcp(const uint8_t* packet, size_t length) override {
+      RtcpPacketParser parser;
+      EXPECT_TRUE(parser.Parse(packet, length));
+      if (parser.sdes()->num_packets() > 0) {
+        EXPECT_EQ(1u, parser.sdes()->chunks().size());
+        EXPECT_EQ(kCName, parser.sdes()->chunks()[0].cname);
+
+        observation_complete_.Set();
+      }
+
+      return SEND_PACKET;
+    }
+
+    void ModifyAudioConfigs(
+        AudioSendStream::Config* send_config,
+        std::vector<AudioReceiveStream::Config>* receive_configs) override {
+      send_config->rtp.c_name = kCName;
+    }
+
+    void PerformTest() override {
+      EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP with CNAME.";
+    }
+  } test;
+
+  RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, NoExtensionsByDefault) {
+  class NoExtensionsObserver : public AudioSendTest {
+   public:
+    NoExtensionsObserver() = default;
+
+   private:
+    Action OnSendRtp(const uint8_t* packet, size_t length) override {
+      RTPHeader header;
+      EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+      EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+      EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+      EXPECT_FALSE(header.extension.hasTransportSequenceNumber);
+      EXPECT_FALSE(header.extension.hasAudioLevel);
+      EXPECT_FALSE(header.extension.hasVideoRotation);
+      EXPECT_FALSE(header.extension.hasVideoContentType);
+      observation_complete_.Set();
+
+      return SEND_PACKET;
+    }
+
+    void ModifyAudioConfigs(
+        AudioSendStream::Config* send_config,
+        std::vector<AudioReceiveStream::Config>* receive_configs) override {
+      send_config->rtp.extensions.clear();
+    }
+
+    void PerformTest() override {
+      EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+    }
+  } test;
+
+  RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, SupportsAudioLevel) {
+  class AudioLevelObserver : public AudioSendTest {
+   public:
+    AudioLevelObserver() : AudioSendTest() {
+      EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+          kRtpExtensionAudioLevel, test::kAudioLevelExtensionId));
+    }
+
+    Action OnSendRtp(const uint8_t* packet, size_t length) override {
+      RTPHeader header;
+      EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+      EXPECT_TRUE(header.extension.hasAudioLevel);
+      if (header.extension.audioLevel != 0) {
+        // Wait for at least one packet with a non-zero level.
+        observation_complete_.Set();
+      } else {
+        RTC_LOG(LS_WARNING) << "Got a packet with zero audioLevel - waiting"
+                               " for another packet...";
+      }
+
+      return SEND_PACKET;
+    }
+
+    void ModifyAudioConfigs(
+        AudioSendStream::Config* send_config,
+        std::vector<AudioReceiveStream::Config>* receive_configs) override {
+      send_config->rtp.extensions.clear();
+      send_config->rtp.extensions.push_back(RtpExtension(
+          RtpExtension::kAudioLevelUri, test::kAudioLevelExtensionId));
+    }
+
+    void PerformTest() override {
+      EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
+    }
+  } test;
+
+  RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, SupportsTransportWideSequenceNumbers) {
+  static const uint8_t kExtensionId = test::kTransportSequenceNumberExtensionId;
+  class TransportWideSequenceNumberObserver : public AudioSendTest {
+   public:
+    TransportWideSequenceNumberObserver() : AudioSendTest() {
+      EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
+          kRtpExtensionTransportSequenceNumber, kExtensionId));
+    }
+
+   private:
+    Action OnSendRtp(const uint8_t* packet, size_t length) override {
+      RTPHeader header;
+      EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+      EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
+      EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
+      EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
+
+      observation_complete_.Set();
+
+      return SEND_PACKET;
+    }
+
+    void ModifyAudioConfigs(
+        AudioSendStream::Config* send_config,
+        std::vector<AudioReceiveStream::Config>* receive_configs) override {
+      send_config->rtp.extensions.clear();
+      send_config->rtp.extensions.push_back(RtpExtension(
+          RtpExtension::kTransportSequenceNumberUri, kExtensionId));
+    }
+
+    void PerformTest() override {
+      EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
+    }
+  } test;
+
+  RunBaseTest(&test);
+}
+
+TEST_F(AudioSendStreamCallTest, SendDtmf) {
+  static const uint8_t kDtmfPayloadType = 120;
+  static const int kDtmfPayloadFrequency = 8000;
+  static const int kDtmfEventFirst = 12;
+  static const int kDtmfEventLast = 31;
+  static const int kDtmfDuration = 50;
+  class DtmfObserver : public AudioSendTest {
+   public:
+    DtmfObserver() = default;
+
+   private:
+    Action OnSendRtp(const uint8_t* packet, size_t length) override {
+      RTPHeader header;
+      EXPECT_TRUE(parser_->Parse(packet, length, &header));
+
+      if (header.payloadType == kDtmfPayloadType) {
+        EXPECT_EQ(12u, header.headerLength);
+        EXPECT_EQ(16u, length);
+        const int event = packet[12];
+        if (event != expected_dtmf_event_) {
+          ++expected_dtmf_event_;
+          EXPECT_EQ(event, expected_dtmf_event_);
+          if (expected_dtmf_event_ == kDtmfEventLast) {
+            observation_complete_.Set();
+          }
+        }
+      }
+
+      return SEND_PACKET;
+    }
+
+    void OnAudioStreamsCreated(
+        AudioSendStream* send_stream,
+        const std::vector<AudioReceiveStream*>& receive_streams) override {
+      // Need to start stream here, else DTMF events are dropped.
+      send_stream->Start();
+      for (int event = kDtmfEventFirst; event <= kDtmfEventLast; ++event) {
+        send_stream->SendTelephoneEvent(kDtmfPayloadType, kDtmfPayloadFrequency,
+                                        event, kDtmfDuration);
+      }
+    }
+
+    void PerformTest() override {
+      EXPECT_TRUE(Wait()) << "Timed out while waiting for DTMF stream.";
+    }
+
+    int expected_dtmf_event_ = kDtmfEventFirst;
+  } test;
+
+  RunBaseTest(&test);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/audio/audio_send_stream_unittest.cc b/audio/audio_send_stream_unittest.cc
new file mode 100644
index 0000000..d8ff0fd
--- /dev/null
+++ b/audio/audio_send_stream_unittest.cc
@@ -0,0 +1,558 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "audio/audio_send_stream.h"
+#include "audio/audio_state.h"
+#include "audio/conversion.h"
+#include "audio/mock_voe_channel_proxy.h"
+#include "call/test/mock_rtp_transport_controller_send.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h"
+#include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h"
+#include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/timedelta.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+#include "test/mock_audio_encoder_factory.h"
+#include "test/mock_transport.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+using testing::_;
+using testing::Eq;
+using testing::Ne;
+using testing::Invoke;
+using testing::Return;
+using testing::StrEq;
+
+const uint32_t kSsrc = 1234;
+const char* kCName = "foo_name";
+const int kAudioLevelId = 2;
+const int kTransportSequenceNumberId = 4;
+const int32_t kEchoDelayMedian = 254;
+const int32_t kEchoDelayStdDev = -3;
+const double kDivergentFilterFraction = 0.2f;
+const double kEchoReturnLoss = -65;
+const double kEchoReturnLossEnhancement = 101;
+const double kResidualEchoLikelihood = -1.0f;
+const double kResidualEchoLikelihoodMax = 23.0f;
+const CallStatistics kCallStats = {
+    1345,  1678,  1901, 1234,  112, 13456, 17890, 1567, -1890, -1123};
+const ReportBlock kReportBlock = {456, 780, 123, 567, 890, 132, 143, 13354};
+const int kTelephoneEventPayloadType = 123;
+const int kTelephoneEventPayloadFrequency = 65432;
+const int kTelephoneEventCode = 45;
+const int kTelephoneEventDuration = 6789;
+const CodecInst kIsacCodec = {103, "isac", 16000, 320, 1, 32000};
+constexpr int kIsacPayloadType = 103;
+const SdpAudioFormat kIsacFormat = {"isac", 16000, 1};
+const SdpAudioFormat kOpusFormat = {"opus", 48000, 2};
+const SdpAudioFormat kG722Format = {"g722", 8000, 1};
+const AudioCodecSpec kCodecSpecs[] = {
+    {kIsacFormat, {16000, 1, 32000, 10000, 32000}},
+    {kOpusFormat, {48000, 1, 32000, 6000, 510000}},
+    {kG722Format, {16000, 1, 64000}}};
+
+class MockLimitObserver : public BitrateAllocator::LimitObserver {
+ public:
+  MOCK_METHOD3(OnAllocationLimitsChanged,
+               void(uint32_t min_send_bitrate_bps,
+                    uint32_t max_padding_bitrate_bps,
+                    uint32_t total_bitrate_bps));
+};
+
+std::unique_ptr<MockAudioEncoder> SetupAudioEncoderMock(
+    int payload_type,
+    const SdpAudioFormat& format) {
+  for (const auto& spec : kCodecSpecs) {
+    if (format == spec.format) {
+      std::unique_ptr<MockAudioEncoder> encoder(
+          new testing::NiceMock<MockAudioEncoder>());
+      ON_CALL(*encoder.get(), SampleRateHz())
+          .WillByDefault(Return(spec.info.sample_rate_hz));
+      ON_CALL(*encoder.get(), NumChannels())
+          .WillByDefault(Return(spec.info.num_channels));
+      ON_CALL(*encoder.get(), RtpTimestampRateHz())
+          .WillByDefault(Return(spec.format.clockrate_hz));
+      return encoder;
+    }
+  }
+  return nullptr;
+}
+
+rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
+  rtc::scoped_refptr<MockAudioEncoderFactory> factory =
+      new rtc::RefCountedObject<MockAudioEncoderFactory>();
+  ON_CALL(*factory.get(), GetSupportedEncoders())
+      .WillByDefault(Return(std::vector<AudioCodecSpec>(
+          std::begin(kCodecSpecs), std::end(kCodecSpecs))));
+  ON_CALL(*factory.get(), QueryAudioEncoder(_))
+      .WillByDefault(Invoke(
+          [](const SdpAudioFormat& format) -> rtc::Optional<AudioCodecInfo> {
+            for (const auto& spec : kCodecSpecs) {
+              if (format == spec.format) {
+                return spec.info;
+              }
+            }
+            return rtc::nullopt;
+          }));
+  ON_CALL(*factory.get(), MakeAudioEncoderMock(_, _, _, _))
+      .WillByDefault(Invoke([](int payload_type, const SdpAudioFormat& format,
+                               rtc::Optional<AudioCodecPairId> codec_pair_id,
+                               std::unique_ptr<AudioEncoder>* return_value) {
+        *return_value = SetupAudioEncoderMock(payload_type, format);
+      }));
+  return factory;
+}
+
+struct ConfigHelper {
+  ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call)
+      : stream_config_(nullptr),
+        audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()),
+        bitrate_allocator_(&limit_observer_),
+        worker_queue_("ConfigHelper_worker_queue"),
+        audio_encoder_(nullptr) {
+    using testing::Invoke;
+
+    AudioState::Config config;
+    config.audio_mixer = AudioMixerImpl::Create();
+    config.audio_processing = audio_processing_;
+    config.audio_device_module =
+        new rtc::RefCountedObject<MockAudioDeviceModule>();
+    audio_state_ = AudioState::Create(config);
+
+    SetupDefaultChannelProxy(audio_bwe_enabled);
+    SetupMockForSetupSendCodec(expect_set_encoder_call);
+
+    // Use ISAC as default codec so as to prevent unnecessary |channel_proxy_|
+    // calls from the default ctor behavior.
+    stream_config_.send_codec_spec =
+        AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
+    stream_config_.rtp.ssrc = kSsrc;
+    stream_config_.rtp.nack.rtp_history_ms = 200;
+    stream_config_.rtp.c_name = kCName;
+    stream_config_.rtp.extensions.push_back(
+        RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+    if (audio_bwe_enabled) {
+      AddBweToConfig(&stream_config_);
+    }
+    stream_config_.encoder_factory = SetupEncoderFactoryMock();
+    stream_config_.min_bitrate_bps = 10000;
+    stream_config_.max_bitrate_bps = 65000;
+  }
+
+  std::unique_ptr<internal::AudioSendStream> CreateAudioSendStream() {
+    return std::unique_ptr<internal::AudioSendStream>(
+        new internal::AudioSendStream(
+            stream_config_, audio_state_, &worker_queue_, &rtp_transport_,
+            &bitrate_allocator_, &event_log_, &rtcp_rtt_stats_, rtc::nullopt,
+            &active_lifetime_,
+            std::unique_ptr<voe::ChannelProxy>(channel_proxy_)));
+  }
+
+  AudioSendStream::Config& config() { return stream_config_; }
+  MockAudioEncoderFactory& mock_encoder_factory() {
+    return *static_cast<MockAudioEncoderFactory*>(
+        stream_config_.encoder_factory.get());
+  }
+  MockVoEChannelProxy* channel_proxy() { return channel_proxy_; }
+  RtpTransportControllerSendInterface* transport() { return &rtp_transport_; }
+  TimeInterval* active_lifetime() { return &active_lifetime_; }
+
+  static void AddBweToConfig(AudioSendStream::Config* config) {
+    config->rtp.extensions.push_back(
+        RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+                     kTransportSequenceNumberId));
+    config->send_codec_spec->transport_cc_enabled = true;
+  }
+
+  void SetupDefaultChannelProxy(bool audio_bwe_enabled) {
+    EXPECT_TRUE(channel_proxy_ == nullptr);
+    channel_proxy_ = new testing::StrictMock<MockVoEChannelProxy>();
+    EXPECT_CALL(*channel_proxy_, GetRtpRtcp(_, _))
+        .WillRepeatedly(Invoke(
+            [this](RtpRtcp** rtp_rtcp_module, RtpReceiver** rtp_receiver) {
+              *rtp_rtcp_module = &this->rtp_rtcp_;
+              *rtp_receiver = nullptr;  // Not deemed necessary for tests yet.
+            }));
+    EXPECT_CALL(*channel_proxy_, SetRTCPStatus(true)).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kSsrc)).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 10)).Times(1);
+    EXPECT_CALL(*channel_proxy_,
+                SetSendAudioLevelIndicationStatus(true, kAudioLevelId))
+        .Times(1);
+    EXPECT_CALL(rtp_transport_, GetBandwidthObserver())
+        .WillRepeatedly(Return(&bandwidth_observer_));
+    if (audio_bwe_enabled) {
+      EXPECT_CALL(*channel_proxy_,
+                  EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
+          .Times(1);
+      EXPECT_CALL(*channel_proxy_,
+                  RegisterSenderCongestionControlObjects(
+                      &rtp_transport_, Eq(&bandwidth_observer_)))
+          .Times(1);
+    } else {
+      EXPECT_CALL(*channel_proxy_, RegisterSenderCongestionControlObjects(
+                                       &rtp_transport_, Eq(nullptr)))
+          .Times(1);
+    }
+    EXPECT_CALL(*channel_proxy_, ResetSenderCongestionControlObjects())
+        .Times(1);
+    {
+      ::testing::InSequence unregister_on_destruction;
+      EXPECT_CALL(*channel_proxy_, RegisterTransport(_)).Times(1);
+      EXPECT_CALL(*channel_proxy_, RegisterTransport(nullptr)).Times(1);
+    }
+    EXPECT_CALL(*channel_proxy_, SetRtcEventLog(testing::NotNull())).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetRtcEventLog(testing::IsNull()))
+        .Times(1);  // Destructor resets the event log
+    EXPECT_CALL(*channel_proxy_, SetRtcpRttStats(&rtcp_rtt_stats_)).Times(1);
+    EXPECT_CALL(*channel_proxy_, SetRtcpRttStats(testing::IsNull()))
+        .Times(1);  // Destructor resets the rtt stats.
+  }
+
+  void SetupMockForSetupSendCodec(bool expect_set_encoder_call) {
+    if (expect_set_encoder_call) {
+      EXPECT_CALL(*channel_proxy_, SetEncoderForMock(_, _))
+          .WillOnce(Invoke(
+              [this](int payload_type, std::unique_ptr<AudioEncoder>* encoder) {
+                this->audio_encoder_ = std::move(*encoder);
+                return true;
+              }));
+    }
+  }
+
+  void SetupMockForModifyEncoder() {
+    // Let ModifyEncoder to invoke mock audio encoder.
+    EXPECT_CALL(*channel_proxy_, ModifyEncoder(_))
+        .WillRepeatedly(Invoke(
+            [this](rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
+                       modifier) {
+              if (this->audio_encoder_)
+                modifier(&this->audio_encoder_);
+            }));
+  }
+
+  void SetupMockForSendTelephoneEvent() {
+    EXPECT_TRUE(channel_proxy_);
+    EXPECT_CALL(*channel_proxy_,
+        SetSendTelephoneEventPayloadType(kTelephoneEventPayloadType,
+                                         kTelephoneEventPayloadFrequency))
+            .WillOnce(Return(true));
+    EXPECT_CALL(*channel_proxy_,
+        SendTelephoneEventOutband(kTelephoneEventCode, kTelephoneEventDuration))
+            .WillOnce(Return(true));
+  }
+
+  void SetupMockForGetStats() {
+    using testing::DoAll;
+    using testing::SetArgPointee;
+    using testing::SetArgReferee;
+
+    std::vector<ReportBlock> report_blocks;
+    webrtc::ReportBlock block = kReportBlock;
+    report_blocks.push_back(block);  // Has wrong SSRC.
+    block.source_SSRC = kSsrc;
+    report_blocks.push_back(block);  // Correct block.
+    block.fraction_lost = 0;
+    report_blocks.push_back(block);  // Duplicate SSRC, bad fraction_lost.
+
+    EXPECT_TRUE(channel_proxy_);
+    EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+        .WillRepeatedly(Return(kCallStats));
+    EXPECT_CALL(*channel_proxy_, GetRemoteRTCPReportBlocks())
+        .WillRepeatedly(Return(report_blocks));
+    EXPECT_CALL(*channel_proxy_, GetANAStatistics())
+        .WillRepeatedly(Return(ANAStats()));
+
+    audio_processing_stats_.echo_return_loss = kEchoReturnLoss;
+    audio_processing_stats_.echo_return_loss_enhancement =
+        kEchoReturnLossEnhancement;
+    audio_processing_stats_.delay_median_ms = kEchoDelayMedian;
+    audio_processing_stats_.delay_standard_deviation_ms = kEchoDelayStdDev;
+    audio_processing_stats_.divergent_filter_fraction =
+        kDivergentFilterFraction;
+    audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
+    audio_processing_stats_.residual_echo_likelihood_recent_max =
+        kResidualEchoLikelihoodMax;
+
+    EXPECT_CALL(*audio_processing_, GetStatistics(true))
+        .WillRepeatedly(Return(audio_processing_stats_));
+  }
+
+ private:
+  rtc::scoped_refptr<AudioState> audio_state_;
+  AudioSendStream::Config stream_config_;
+  testing::StrictMock<MockVoEChannelProxy>* channel_proxy_ = nullptr;
+  rtc::scoped_refptr<MockAudioProcessing> audio_processing_;
+  AudioProcessingStats audio_processing_stats_;
+  TimeInterval active_lifetime_;
+  testing::StrictMock<MockRtcpBandwidthObserver> bandwidth_observer_;
+  testing::NiceMock<MockRtcEventLog> event_log_;
+  testing::NiceMock<MockRtpTransportControllerSend> rtp_transport_;
+  testing::NiceMock<MockRtpRtcp> rtp_rtcp_;
+  MockRtcpRttStats rtcp_rtt_stats_;
+  testing::NiceMock<MockLimitObserver> limit_observer_;
+  BitrateAllocator bitrate_allocator_;
+  // |worker_queue| is defined last to ensure all pending tasks are cancelled
+  // and deleted before any other members.
+  rtc::TaskQueue worker_queue_;
+  std::unique_ptr<AudioEncoder> audio_encoder_;
+};
+}  // namespace
+
+TEST(AudioSendStreamTest, ConfigToString) {
+  AudioSendStream::Config config(nullptr);
+  config.rtp.ssrc = kSsrc;
+  config.rtp.c_name = kCName;
+  config.min_bitrate_bps = 12000;
+  config.max_bitrate_bps = 34000;
+  config.send_codec_spec =
+      AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
+  config.send_codec_spec->nack_enabled = true;
+  config.send_codec_spec->transport_cc_enabled = false;
+  config.send_codec_spec->cng_payload_type = 42;
+  config.encoder_factory = MockAudioEncoderFactory::CreateUnusedFactory();
+  config.rtp.extensions.push_back(
+      RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+  EXPECT_EQ(
+      "{rtp: {ssrc: 1234, extensions: [{uri: "
+      "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], nack: "
+      "{rtp_history_ms: 0}, c_name: foo_name}, send_transport: null, "
+      "min_bitrate_bps: 12000, max_bitrate_bps: 34000, "
+      "send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, "
+      "cng_payload_type: 42, payload_type: 103, "
+      "format: {name: isac, clockrate_hz: 16000, num_channels: 1, "
+      "parameters: {}}}}",
+      config.ToString());
+}
+
+TEST(AudioSendStreamTest, ConstructDestruct) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+}
+
+TEST(AudioSendStreamTest, SendTelephoneEvent) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+  helper.SetupMockForSendTelephoneEvent();
+  EXPECT_TRUE(send_stream->SendTelephoneEvent(kTelephoneEventPayloadType,
+      kTelephoneEventPayloadFrequency, kTelephoneEventCode,
+      kTelephoneEventDuration));
+}
+
+TEST(AudioSendStreamTest, SetMuted) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+  EXPECT_CALL(*helper.channel_proxy(), SetInputMute(true));
+  send_stream->SetMuted(true);
+}
+
+TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
+  ConfigHelper helper(true, true);
+  auto send_stream = helper.CreateAudioSendStream();
+}
+
+TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+}
+
+TEST(AudioSendStreamTest, GetStats) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+  helper.SetupMockForGetStats();
+  AudioSendStream::Stats stats = send_stream->GetStats(true);
+  EXPECT_EQ(kSsrc, stats.local_ssrc);
+  EXPECT_EQ(static_cast<int64_t>(kCallStats.bytesSent), stats.bytes_sent);
+  EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
+  EXPECT_EQ(static_cast<int32_t>(kReportBlock.cumulative_num_packets_lost),
+            stats.packets_lost);
+  EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
+  EXPECT_EQ(std::string(kIsacCodec.plname), stats.codec_name);
+  EXPECT_EQ(static_cast<int32_t>(kReportBlock.extended_highest_sequence_number),
+            stats.ext_seqnum);
+  EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
+                                 (kIsacCodec.plfreq / 1000)),
+            stats.jitter_ms);
+  EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
+  EXPECT_EQ(0, stats.audio_level);
+  EXPECT_EQ(0, stats.total_input_energy);
+  EXPECT_EQ(0, stats.total_input_duration);
+  EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
+  EXPECT_EQ(kEchoDelayStdDev, stats.apm_statistics.delay_standard_deviation_ms);
+  EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
+  EXPECT_EQ(kEchoReturnLossEnhancement,
+            stats.apm_statistics.echo_return_loss_enhancement);
+  EXPECT_EQ(kDivergentFilterFraction,
+            stats.apm_statistics.divergent_filter_fraction);
+  EXPECT_EQ(kResidualEchoLikelihood,
+            stats.apm_statistics.residual_echo_likelihood);
+  EXPECT_EQ(kResidualEchoLikelihoodMax,
+            stats.apm_statistics.residual_echo_likelihood_recent_max);
+  EXPECT_FALSE(stats.typing_noise_detected);
+}
+
+TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
+  ConfigHelper helper(false, true);
+  helper.config().send_codec_spec =
+      AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
+  const std::string kAnaConfigString = "abcde";
+  const std::string kAnaReconfigString = "12345";
+
+  helper.config().audio_network_adaptor_config = kAnaConfigString;
+
+  EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
+      .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
+                           int payload_type, const SdpAudioFormat& format,
+                           rtc::Optional<AudioCodecPairId> codec_pair_id,
+                           std::unique_ptr<AudioEncoder>* return_value) {
+        auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
+        EXPECT_CALL(*mock_encoder,
+                    EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
+            .WillOnce(Return(true));
+        EXPECT_CALL(*mock_encoder,
+                    EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
+            .WillOnce(Return(true));
+        *return_value = std::move(mock_encoder);
+      }));
+
+  auto send_stream = helper.CreateAudioSendStream();
+
+  auto stream_config = helper.config();
+  stream_config.audio_network_adaptor_config = kAnaReconfigString;
+
+  helper.SetupMockForModifyEncoder();
+  send_stream->Reconfigure(stream_config);
+}
+
+// VAD is applied when codec is mono and the CNG frequency matches the codec
+// clock rate.
+TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
+  ConfigHelper helper(false, false);
+  helper.config().send_codec_spec =
+      AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+  helper.config().send_codec_spec->cng_payload_type = 105;
+  using ::testing::Invoke;
+  std::unique_ptr<AudioEncoder> stolen_encoder;
+  EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+      .WillOnce(
+          Invoke([&stolen_encoder](int payload_type,
+                                   std::unique_ptr<AudioEncoder>* encoder) {
+            stolen_encoder = std::move(*encoder);
+            return true;
+          }));
+
+  auto send_stream = helper.CreateAudioSendStream();
+
+  // We cannot truly determine if the encoder created is an AudioEncoderCng.  It
+  // is the only reasonable implementation that will return something from
+  // ReclaimContainedEncoders, though.
+  ASSERT_TRUE(stolen_encoder);
+  EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
+}
+
+TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+  EXPECT_CALL(*helper.channel_proxy(),
+              SetBitrate(helper.config().max_bitrate_bps, _));
+  send_stream->OnBitrateUpdated(helper.config().max_bitrate_bps + 5000, 0.0, 50,
+                                6000);
+}
+
+TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+  EXPECT_CALL(*helper.channel_proxy(), SetBitrate(_, 5000));
+  send_stream->OnBitrateUpdated(50000, 0.0, 50, 5000);
+}
+
+// Test that AudioSendStream doesn't recreate the encoder unnecessarily.
+TEST(AudioSendStreamTest, DontRecreateEncoder) {
+  ConfigHelper helper(false, false);
+  // WillOnce is (currently) the default used by ConfigHelper if asked to set an
+  // expectation for SetEncoder. Since this behavior is essential for this test
+  // to be correct, it's instead set-up manually here. Otherwise a simple change
+  // to ConfigHelper (say to WillRepeatedly) would silently make this test
+  // useless.
+  EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+      .WillOnce(Return(true));
+
+  helper.config().send_codec_spec =
+      AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+  helper.config().send_codec_spec->cng_payload_type = 105;
+  auto send_stream = helper.CreateAudioSendStream();
+  send_stream->Reconfigure(helper.config());
+}
+
+TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
+  ConfigHelper helper(false, true);
+  auto send_stream = helper.CreateAudioSendStream();
+  auto new_config = helper.config();
+  ConfigHelper::AddBweToConfig(&new_config);
+  EXPECT_CALL(*helper.channel_proxy(),
+              EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
+      .Times(1);
+  {
+    ::testing::InSequence seq;
+    EXPECT_CALL(*helper.channel_proxy(), ResetSenderCongestionControlObjects())
+        .Times(1);
+    EXPECT_CALL(*helper.channel_proxy(), RegisterSenderCongestionControlObjects(
+        helper.transport(), Ne(nullptr)))
+        .Times(1);
+  }
+  send_stream->Reconfigure(new_config);
+}
+
+// Checks that AudioSendStream logs the times at which RTP packets are sent
+// through its interface.
+TEST(AudioSendStreamTest, UpdateLifetime) {
+  ConfigHelper helper(false, true);
+
+  MockTransport mock_transport;
+  helper.config().send_transport = &mock_transport;
+
+  Transport* registered_transport;
+  ON_CALL(*helper.channel_proxy(), RegisterTransport(_))
+      .WillByDefault(Invoke([&registered_transport](Transport* transport) {
+        registered_transport = transport;
+      }));
+
+  rtc::ScopedFakeClock fake_clock;
+  constexpr int64_t kTimeBetweenSendRtpCallsMs = 100;
+  {
+    auto send_stream = helper.CreateAudioSendStream();
+    EXPECT_CALL(mock_transport, SendRtp(_, _, _)).Times(2);
+    const PacketOptions options;
+    registered_transport->SendRtp(nullptr, 0, options);
+    fake_clock.AdvanceTime(
+        rtc::TimeDelta::FromMilliseconds(kTimeBetweenSendRtpCallsMs));
+    registered_transport->SendRtp(nullptr, 0, options);
+  }
+  EXPECT_TRUE(!helper.active_lifetime()->Empty());
+  EXPECT_EQ(helper.active_lifetime()->Length(), kTimeBetweenSendRtpCallsMs);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/audio/audio_state.cc b/audio/audio_state.cc
new file mode 100644
index 0000000..d738884
--- /dev/null
+++ b/audio/audio_state.cc
@@ -0,0 +1,195 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_state.h"
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "audio/audio_receive_stream.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+namespace internal {
+
+AudioState::AudioState(const AudioState::Config& config)
+    : config_(config),
+      audio_transport_(config_.audio_mixer,
+                       config_.audio_processing.get()) {
+  process_thread_checker_.DetachFromThread();
+  RTC_DCHECK(config_.audio_mixer);
+  RTC_DCHECK(config_.audio_device_module);
+}
+
+AudioState::~AudioState() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(receiving_streams_.empty());
+  RTC_DCHECK(sending_streams_.empty());
+}
+
+bool AudioState::typing_noise_detected() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  return audio_transport_.typing_noise_detected();
+}
+
+void AudioState::AddReceivingStream(webrtc::AudioReceiveStream* stream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK_EQ(0, receiving_streams_.count(stream));
+  receiving_streams_.insert(stream);
+  if (!config_.audio_mixer->AddSource(
+      static_cast<internal::AudioReceiveStream*>(stream))) {
+    RTC_DLOG(LS_ERROR) << "Failed to add source to mixer.";
+  }
+
+  // Make sure playback is initialized; start playing if enabled.
+  auto* adm = config_.audio_device_module.get();
+  if (!adm->Playing()) {
+    if (adm->InitPlayout() == 0) {
+      if (playout_enabled_) {
+        adm->StartPlayout();
+      }
+    } else {
+      RTC_DLOG_F(LS_ERROR) << "Failed to initialize playout.";
+    }
+  }
+}
+
+void AudioState::RemoveReceivingStream(webrtc::AudioReceiveStream* stream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  auto count = receiving_streams_.erase(stream);
+  RTC_DCHECK_EQ(1, count);
+  config_.audio_mixer->RemoveSource(
+      static_cast<internal::AudioReceiveStream*>(stream));
+  if (receiving_streams_.empty()) {
+    config_.audio_device_module->StopPlayout();
+  }
+}
+
+void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
+                                  int sample_rate_hz, size_t num_channels) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  auto& properties = sending_streams_[stream];
+  properties.sample_rate_hz = sample_rate_hz;
+  properties.num_channels = num_channels;
+  UpdateAudioTransportWithSendingStreams();
+
+  // Make sure recording is initialized; start recording if enabled.
+  auto* adm = config_.audio_device_module.get();
+  if (!adm->Recording()) {
+    if (adm->InitRecording() == 0) {
+      if (recording_enabled_) {
+        adm->StartRecording();
+      }
+    } else {
+      RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
+    }
+  }
+}
+
+void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  auto count = sending_streams_.erase(stream);
+  RTC_DCHECK_EQ(1, count);
+  UpdateAudioTransportWithSendingStreams();
+  if (sending_streams_.empty()) {
+    config_.audio_device_module->StopRecording();
+  }
+}
+
+void AudioState::SetPlayout(bool enabled) {
+  RTC_LOG(INFO) << "SetPlayout(" << enabled << ")";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (playout_enabled_ != enabled) {
+    playout_enabled_ = enabled;
+    if (enabled) {
+      null_audio_poller_.reset();
+      if (!receiving_streams_.empty()) {
+        config_.audio_device_module->StartPlayout();
+      }
+    } else {
+      config_.audio_device_module->StopPlayout();
+      null_audio_poller_ =
+          rtc::MakeUnique<NullAudioPoller>(&audio_transport_);
+    }
+  }
+}
+
+void AudioState::SetRecording(bool enabled) {
+  RTC_LOG(INFO) << "SetRecording(" << enabled << ")";
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (recording_enabled_ != enabled) {
+    recording_enabled_ = enabled;
+    if (enabled) {
+      if (!sending_streams_.empty()) {
+        config_.audio_device_module->StartRecording();
+      }
+    } else {
+      config_.audio_device_module->StopRecording();
+    }
+  }
+}
+
+AudioState::Stats AudioState::GetAudioInputStats() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  const voe::AudioLevel& audio_level = audio_transport_.audio_level();
+  Stats result;
+  result.audio_level = audio_level.LevelFullRange();
+  RTC_DCHECK_LE(0, result.audio_level);
+  RTC_DCHECK_GE(32767, result.audio_level);
+  result.total_energy = audio_level.TotalEnergy();
+  result.total_duration = audio_level.TotalDuration();
+  return result;
+}
+
+void AudioState::SetStereoChannelSwapping(bool enable) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  audio_transport_.SetStereoChannelSwapping(enable);
+}
+
+// Reference count; implementation copied from rtc::RefCountedObject.
+void AudioState::AddRef() const {
+  rtc::AtomicOps::Increment(&ref_count_);
+}
+
+// Reference count; implementation copied from rtc::RefCountedObject.
+rtc::RefCountReleaseStatus AudioState::Release() const {
+  if (rtc::AtomicOps::Decrement(&ref_count_) == 0) {
+    delete this;
+    return rtc::RefCountReleaseStatus::kDroppedLastRef;
+  }
+  return rtc::RefCountReleaseStatus::kOtherRefsRemained;
+}
+
+void AudioState::UpdateAudioTransportWithSendingStreams() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  std::vector<webrtc::AudioSendStream*> sending_streams;
+  int max_sample_rate_hz = 8000;
+  size_t max_num_channels = 1;
+  for (const auto& kv : sending_streams_) {
+    sending_streams.push_back(kv.first);
+    max_sample_rate_hz = std::max(max_sample_rate_hz, kv.second.sample_rate_hz);
+    max_num_channels = std::max(max_num_channels, kv.second.num_channels);
+  }
+  audio_transport_.UpdateSendingStreams(std::move(sending_streams),
+                                        max_sample_rate_hz, max_num_channels);
+}
+}  // namespace internal
+
+rtc::scoped_refptr<AudioState> AudioState::Create(
+    const AudioState::Config& config) {
+  return rtc::scoped_refptr<AudioState>(new internal::AudioState(config));
+}
+}  // namespace webrtc
diff --git a/audio/audio_state.h b/audio/audio_state.h
new file mode 100644
index 0000000..d4e4e3f
--- /dev/null
+++ b/audio/audio_state.h
@@ -0,0 +1,104 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_STATE_H_
+#define AUDIO_AUDIO_STATE_H_
+
+#include <map>
+#include <memory>
+#include <unordered_set>
+
+#include "audio/audio_transport_impl.h"
+#include "audio/null_audio_poller.h"
+#include "call/audio_state.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class AudioSendStream;
+class AudioReceiveStream;
+
+namespace internal {
+
+class AudioState final : public webrtc::AudioState {
+ public:
+  explicit AudioState(const AudioState::Config& config);
+  ~AudioState() override;
+
+  AudioProcessing* audio_processing() override {
+    RTC_DCHECK(config_.audio_processing);
+    return config_.audio_processing.get();
+  }
+  AudioTransport* audio_transport() override {
+    return &audio_transport_;
+  }
+
+  void SetPlayout(bool enabled) override;
+  void SetRecording(bool enabled) override;
+
+  Stats GetAudioInputStats() const override;
+  void SetStereoChannelSwapping(bool enable) override;
+
+  AudioDeviceModule* audio_device_module() {
+    RTC_DCHECK(config_.audio_device_module);
+    return config_.audio_device_module.get();
+  }
+
+  bool typing_noise_detected() const;
+
+  void AddReceivingStream(webrtc::AudioReceiveStream* stream);
+  void RemoveReceivingStream(webrtc::AudioReceiveStream* stream);
+
+  void AddSendingStream(webrtc::AudioSendStream* stream,
+                        int sample_rate_hz, size_t num_channels);
+  void RemoveSendingStream(webrtc::AudioSendStream* stream);
+
+ private:
+  // rtc::RefCountInterface implementation.
+  void AddRef() const override;
+  rtc::RefCountReleaseStatus Release() const override;
+
+  void UpdateAudioTransportWithSendingStreams();
+
+  rtc::ThreadChecker thread_checker_;
+  rtc::ThreadChecker process_thread_checker_;
+  const webrtc::AudioState::Config config_;
+  bool recording_enabled_ = true;
+  bool playout_enabled_ = true;
+
+  // Reference count; implementation copied from rtc::RefCountedObject.
+  // TODO(nisse): Use RefCountedObject or RefCountedBase instead.
+  mutable volatile int ref_count_ = 0;
+
+  // Transports mixed audio from the mixer to the audio device and
+  // recorded audio to the sending streams.
+  AudioTransportImpl audio_transport_;
+
+  // Null audio poller is used to continue polling the audio streams if audio
+  // playout is disabled so that audio processing still happens and the audio
+  // stats are still updated.
+  std::unique_ptr<NullAudioPoller> null_audio_poller_;
+
+  std::unordered_set<webrtc::AudioReceiveStream*> receiving_streams_;
+  struct StreamProperties {
+    int sample_rate_hz = 0;
+    size_t num_channels = 0;
+  };
+  std::map<webrtc::AudioSendStream*, StreamProperties> sending_streams_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioState);
+};
+}  // namespace internal
+}  // namespace webrtc
+
+#endif  // AUDIO_AUDIO_STATE_H_
diff --git a/audio/audio_state_unittest.cc b/audio/audio_state_unittest.cc
new file mode 100644
index 0000000..e825ec6
--- /dev/null
+++ b/audio/audio_state_unittest.cc
@@ -0,0 +1,283 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "audio/audio_state.h"
+#include "call/test/mock_audio_send_stream.h"
+#include "modules/audio_device/include/mock_audio_device.h"
+#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "rtc_base/refcountedobject.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+
+constexpr int kSampleRate = 16000;
+constexpr int kNumberOfChannels = 1;
+
+struct ConfigHelper {
+  ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) {
+    audio_state_config.audio_mixer = audio_mixer;
+    audio_state_config.audio_processing =
+        new rtc::RefCountedObject<testing::NiceMock<MockAudioProcessing>>();
+    audio_state_config.audio_device_module =
+        new rtc::RefCountedObject<MockAudioDeviceModule>();
+  }
+  AudioState::Config& config() { return audio_state_config; }
+  rtc::scoped_refptr<AudioMixer> mixer() { return audio_mixer; }
+
+ private:
+  AudioState::Config audio_state_config;
+  rtc::scoped_refptr<AudioMixer> audio_mixer;
+};
+
+class FakeAudioSource : public AudioMixer::Source {
+ public:
+  // TODO(aleloi): Valid overrides commented out, because the gmock
+  // methods don't use any override declarations, and we want to avoid
+  // warnings from -Winconsistent-missing-override. See
+  // http://crbug.com/428099.
+  int Ssrc() const /*override*/ { return 0; }
+
+  int PreferredSampleRate() const /*override*/ { return kSampleRate; }
+
+  MOCK_METHOD2(GetAudioFrameWithInfo,
+               AudioFrameInfo(int sample_rate_hz, AudioFrame* audio_frame));
+};
+
+std::vector<int16_t> Create10msSilentTestData(int sample_rate_hz,
+                                              size_t num_channels) {
+  const int samples_per_channel = sample_rate_hz / 100;
+  std::vector<int16_t> audio_data(samples_per_channel * num_channels, 0);
+  return audio_data;
+}
+
+std::vector<int16_t> Create10msTestData(int sample_rate_hz,
+                                        size_t num_channels) {
+  const int samples_per_channel = sample_rate_hz / 100;
+  std::vector<int16_t> audio_data(samples_per_channel * num_channels, 0);
+  // Fill the first channel with a 1kHz sine wave.
+  const float inc = (2 * 3.14159265f * 1000) / sample_rate_hz;
+  float w = 0.f;
+  for (int i = 0; i < samples_per_channel; ++i) {
+    audio_data[i * num_channels] =
+        static_cast<int16_t>(32767.f * std::sin(w));
+    w += inc;
+  }
+  return audio_data;
+}
+
+std::vector<uint32_t> ComputeChannelLevels(AudioFrame* audio_frame) {
+  const size_t num_channels = audio_frame->num_channels_;
+  const size_t samples_per_channel = audio_frame->samples_per_channel_;
+  std::vector<uint32_t> levels(num_channels, 0);
+  for (size_t i = 0; i < samples_per_channel; ++i) {
+    for (size_t j = 0; j < num_channels; ++j) {
+      levels[j] += std::abs(audio_frame->data()[i * num_channels + j]);
+    }
+  }
+  return levels;
+}
+}  // namespace
+
+TEST(AudioStateTest, Create) {
+  ConfigHelper helper;
+  auto audio_state = AudioState::Create(helper.config());
+  EXPECT_TRUE(audio_state.get());
+}
+
+TEST(AudioStateTest, ConstructDestruct) {
+  ConfigHelper helper;
+  std::unique_ptr<internal::AudioState> audio_state(
+      new internal::AudioState(helper.config()));
+}
+
+TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) {
+  ConfigHelper helper;
+  std::unique_ptr<internal::AudioState> audio_state(
+      new internal::AudioState(helper.config()));
+
+  MockAudioSendStream stream;
+  audio_state->AddSendingStream(&stream, 8000, 2);
+
+  EXPECT_CALL(stream, SendAudioDataForMock(testing::AllOf(
+      testing::Field(&AudioFrame::sample_rate_hz_, testing::Eq(8000)),
+      testing::Field(&AudioFrame::num_channels_, testing::Eq(2u)))))
+          .WillOnce(
+              // Verify that channels are not swapped by default.
+              testing::Invoke([](AudioFrame* audio_frame) {
+                auto levels = ComputeChannelLevels(audio_frame);
+                EXPECT_LT(0u, levels[0]);
+                EXPECT_EQ(0u, levels[1]);
+              }));
+  MockAudioProcessing* ap =
+      static_cast<MockAudioProcessing*>(audio_state->audio_processing());
+  EXPECT_CALL(*ap, set_stream_delay_ms(0));
+  EXPECT_CALL(*ap, set_stream_key_pressed(false));
+  EXPECT_CALL(*ap, ProcessStream(testing::_));
+
+  constexpr int kSampleRate = 16000;
+  constexpr size_t kNumChannels = 2;
+  auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+  uint32_t new_mic_level = 667;
+  audio_state->audio_transport()->RecordedDataIsAvailable(
+      &audio_data[0], kSampleRate / 100, kNumChannels * 2,
+      kNumChannels, kSampleRate, 0, 0, 0, false, new_mic_level);
+  EXPECT_EQ(667u, new_mic_level);
+
+  audio_state->RemoveSendingStream(&stream);
+}
+
+TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) {
+  ConfigHelper helper;
+  std::unique_ptr<internal::AudioState> audio_state(
+      new internal::AudioState(helper.config()));
+
+  MockAudioSendStream stream_1;
+  MockAudioSendStream stream_2;
+  audio_state->AddSendingStream(&stream_1, 8001, 2);
+  audio_state->AddSendingStream(&stream_2, 32000, 1);
+
+  EXPECT_CALL(stream_1, SendAudioDataForMock(testing::AllOf(
+      testing::Field(&AudioFrame::sample_rate_hz_, testing::Eq(16000)),
+      testing::Field(&AudioFrame::num_channels_, testing::Eq(1u)))))
+          .WillOnce(
+              // Verify that there is output signal.
+              testing::Invoke([](AudioFrame* audio_frame) {
+                auto levels = ComputeChannelLevels(audio_frame);
+                EXPECT_LT(0u, levels[0]);
+              }));
+  EXPECT_CALL(stream_2, SendAudioDataForMock(testing::AllOf(
+      testing::Field(&AudioFrame::sample_rate_hz_, testing::Eq(16000)),
+      testing::Field(&AudioFrame::num_channels_, testing::Eq(1u)))))
+          .WillOnce(
+              // Verify that there is output signal.
+              testing::Invoke([](AudioFrame* audio_frame) {
+                auto levels = ComputeChannelLevels(audio_frame);
+                EXPECT_LT(0u, levels[0]);
+              }));
+  MockAudioProcessing* ap =
+      static_cast<MockAudioProcessing*>(audio_state->audio_processing());
+  EXPECT_CALL(*ap, set_stream_delay_ms(5));
+  EXPECT_CALL(*ap, set_stream_key_pressed(true));
+  EXPECT_CALL(*ap, ProcessStream(testing::_));
+
+  constexpr int kSampleRate = 16000;
+  constexpr size_t kNumChannels = 1;
+  auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+  uint32_t new_mic_level = 667;
+  audio_state->audio_transport()->RecordedDataIsAvailable(
+      &audio_data[0], kSampleRate / 100, kNumChannels * 2,
+      kNumChannels, kSampleRate, 5, 0, 0, true, new_mic_level);
+  EXPECT_EQ(667u, new_mic_level);
+
+  audio_state->RemoveSendingStream(&stream_1);
+  audio_state->RemoveSendingStream(&stream_2);
+}
+
+TEST(AudioStateTest, EnableChannelSwap) {
+  constexpr int kSampleRate = 16000;
+  constexpr size_t kNumChannels = 2;
+
+  ConfigHelper helper;
+  std::unique_ptr<internal::AudioState> audio_state(
+      new internal::AudioState(helper.config()));
+  audio_state->SetStereoChannelSwapping(true);
+
+  MockAudioSendStream stream;
+  audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
+
+  EXPECT_CALL(stream, SendAudioDataForMock(testing::_))
+      .WillOnce(
+          // Verify that channels are swapped.
+          testing::Invoke([](AudioFrame* audio_frame) {
+            auto levels = ComputeChannelLevels(audio_frame);
+            EXPECT_EQ(0u, levels[0]);
+            EXPECT_LT(0u, levels[1]);
+          }));
+
+  auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+  uint32_t new_mic_level = 667;
+  audio_state->audio_transport()->RecordedDataIsAvailable(
+      &audio_data[0], kSampleRate / 100, kNumChannels * 2,
+      kNumChannels, kSampleRate, 0, 0, 0, false, new_mic_level);
+  EXPECT_EQ(667u, new_mic_level);
+
+  audio_state->RemoveSendingStream(&stream);
+}
+
+TEST(AudioStateTest, InputLevelStats) {
+  constexpr int kSampleRate = 16000;
+  constexpr size_t kNumChannels = 1;
+
+  ConfigHelper helper;
+  std::unique_ptr<internal::AudioState> audio_state(
+      new internal::AudioState(helper.config()));
+
+  // Push a silent buffer -> Level stats should be zeros except for duration.
+  {
+    auto audio_data = Create10msSilentTestData(kSampleRate, kNumChannels);
+    uint32_t new_mic_level = 667;
+    audio_state->audio_transport()->RecordedDataIsAvailable(
+        &audio_data[0], kSampleRate / 100, kNumChannels * 2,
+        kNumChannels, kSampleRate, 0, 0, 0, false, new_mic_level);
+    auto stats = audio_state->GetAudioInputStats();
+    EXPECT_EQ(0, stats.audio_level);
+    EXPECT_THAT(stats.total_energy, testing::DoubleEq(0.0));
+    EXPECT_THAT(stats.total_duration, testing::DoubleEq(0.01));
+  }
+
+  // Push 10 non-silent buffers -> Level stats should be non-zero.
+  {
+    auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+    uint32_t new_mic_level = 667;
+    for (int i = 0; i < 10; ++i) {
+      audio_state->audio_transport()->RecordedDataIsAvailable(
+          &audio_data[0], kSampleRate / 100, kNumChannels * 2,
+          kNumChannels, kSampleRate, 0, 0, 0, false, new_mic_level);
+    }
+    auto stats = audio_state->GetAudioInputStats();
+    EXPECT_EQ(32767, stats.audio_level);
+    EXPECT_THAT(stats.total_energy, testing::DoubleEq(0.01));
+    EXPECT_THAT(stats.total_duration, testing::DoubleEq(0.11));
+  }
+}
+
+TEST(AudioStateTest,
+     QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) {
+  ConfigHelper helper;
+  auto audio_state = AudioState::Create(helper.config());
+
+  FakeAudioSource fake_source;
+  helper.mixer()->AddSource(&fake_source);
+
+  EXPECT_CALL(fake_source, GetAudioFrameWithInfo(testing::_, testing::_))
+      .WillOnce(
+          testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
+            audio_frame->sample_rate_hz_ = sample_rate_hz;
+            audio_frame->samples_per_channel_ = sample_rate_hz / 100;
+            audio_frame->num_channels_ = kNumberOfChannels;
+            return AudioMixer::Source::AudioFrameInfo::kNormal;
+          }));
+
+  int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
+  size_t n_samples_out;
+  int64_t elapsed_time_ms;
+  int64_t ntp_time_ms;
+  audio_state->audio_transport()->NeedMorePlayData(
+      kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, kSampleRate,
+      audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc
new file mode 100644
index 0000000..f9b0311
--- /dev/null
+++ b/audio/audio_transport_impl.cc
@@ -0,0 +1,257 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/audio_transport_impl.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "audio/remix_resample.h"
+#include "audio/utility/audio_frame_operations.h"
+#include "call/audio_send_stream.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+// We want to process at the lowest sample rate and channel count possible
+// without losing information. Choose the lowest native rate at least equal to
+// the minimum of input and codec rates, choose lowest channel count, and
+// configure the audio frame.
+void InitializeCaptureFrame(int input_sample_rate,
+                            int send_sample_rate_hz,
+                            size_t input_num_channels,
+                            size_t send_num_channels,
+                            AudioFrame* audio_frame) {
+  RTC_DCHECK(audio_frame);
+  int min_processing_rate_hz = std::min(input_sample_rate, send_sample_rate_hz);
+  for (int native_rate_hz : AudioProcessing::kNativeSampleRatesHz) {
+    audio_frame->sample_rate_hz_ = native_rate_hz;
+    if (audio_frame->sample_rate_hz_ >= min_processing_rate_hz) {
+      break;
+    }
+  }
+  audio_frame->num_channels_ = std::min(input_num_channels, send_num_channels);
+}
+
+void ProcessCaptureFrame(uint32_t delay_ms,
+                         bool key_pressed,
+                         bool swap_stereo_channels,
+                         AudioProcessing* audio_processing,
+                         AudioFrame* audio_frame) {
+  RTC_DCHECK(audio_processing);
+  RTC_DCHECK(audio_frame);
+  RTC_DCHECK(
+      !audio_processing->echo_cancellation()->is_drift_compensation_enabled());
+  audio_processing->set_stream_delay_ms(delay_ms);
+  audio_processing->set_stream_key_pressed(key_pressed);
+  int error = audio_processing->ProcessStream(audio_frame);
+  RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
+  if (swap_stereo_channels) {
+    AudioFrameOperations::SwapStereoChannels(audio_frame);
+  }
+}
+
+// Resample audio in |frame| to given sample rate preserving the
+// channel count and place the result in |destination|.
+int Resample(const AudioFrame& frame,
+             const int destination_sample_rate,
+             PushResampler<int16_t>* resampler,
+             int16_t* destination) {
+  const int number_of_channels = static_cast<int>(frame.num_channels_);
+  const int target_number_of_samples_per_channel =
+      destination_sample_rate / 100;
+  resampler->InitializeIfNeeded(frame.sample_rate_hz_, destination_sample_rate,
+                                number_of_channels);
+
+  // TODO(yujo): make resampler take an AudioFrame, and add special case
+  // handling of muted frames.
+  return resampler->Resample(
+      frame.data(), frame.samples_per_channel_ * number_of_channels,
+      destination, number_of_channels * target_number_of_samples_per_channel);
+}
+}  // namespace
+
+AudioTransportImpl::AudioTransportImpl(AudioMixer* mixer,
+                                       AudioProcessing* audio_processing)
+    : audio_processing_(audio_processing),
+      mixer_(mixer) {
+  RTC_DCHECK(mixer);
+  RTC_DCHECK(audio_processing);
+}
+
+AudioTransportImpl::~AudioTransportImpl() {}
+
+// Not used in Chromium. Process captured audio and distribute to all sending
+// streams, and try to do this at the lowest possible sample rate.
+int32_t AudioTransportImpl::RecordedDataIsAvailable(
+    const void* audio_data,
+    const size_t number_of_frames,
+    const size_t bytes_per_sample,
+    const size_t number_of_channels,
+    const uint32_t sample_rate,
+    const uint32_t audio_delay_milliseconds,
+    const int32_t /*clock_drift*/,
+    const uint32_t /*volume*/,
+    const bool key_pressed,
+    uint32_t& /*new_mic_volume*/) {  // NOLINT: to avoid changing APIs
+  RTC_DCHECK(audio_data);
+  RTC_DCHECK_GE(number_of_channels, 1);
+  RTC_DCHECK_LE(number_of_channels, 2);
+  RTC_DCHECK_EQ(2 * number_of_channels, bytes_per_sample);
+  RTC_DCHECK_GE(sample_rate, AudioProcessing::NativeRate::kSampleRate8kHz);
+  // 100 = 1 second / data duration (10 ms).
+  RTC_DCHECK_EQ(number_of_frames * 100, sample_rate);
+  RTC_DCHECK_LE(bytes_per_sample * number_of_frames * number_of_channels,
+                AudioFrame::kMaxDataSizeBytes);
+
+  int send_sample_rate_hz = 0;
+  size_t send_num_channels = 0;
+  bool swap_stereo_channels = false;
+  {
+    rtc::CritScope lock(&capture_lock_);
+    send_sample_rate_hz = send_sample_rate_hz_;
+    send_num_channels = send_num_channels_;
+    swap_stereo_channels = swap_stereo_channels_;
+  }
+
+  std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
+  InitializeCaptureFrame(sample_rate, send_sample_rate_hz,
+                         number_of_channels, send_num_channels,
+                         audio_frame.get());
+  voe::RemixAndResample(static_cast<const int16_t*>(audio_data),
+                        number_of_frames, number_of_channels, sample_rate,
+                        &capture_resampler_, audio_frame.get());
+  ProcessCaptureFrame(audio_delay_milliseconds, key_pressed,
+                      swap_stereo_channels, audio_processing_,
+                      audio_frame.get());
+
+  // Typing detection (utilizes the APM/VAD decision). We let the VAD determine
+  // if we're using this feature or not.
+  // TODO(solenberg): is_enabled() takes a lock. Work around that.
+  bool typing_detected = false;
+  if (audio_processing_->voice_detection()->is_enabled()) {
+    if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) {
+      bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive;
+      typing_detected = typing_detection_.Process(key_pressed, vad_active);
+    }
+  }
+
+  // Measure audio level of speech after all processing.
+  double sample_duration = static_cast<double>(number_of_frames) / sample_rate;
+  audio_level_.ComputeLevel(*audio_frame.get(), sample_duration);
+
+  // Copy frame and push to each sending stream. The copy is required since an
+  // encoding task will be posted internally to each stream.
+  {
+    rtc::CritScope lock(&capture_lock_);
+    typing_noise_detected_ = typing_detected;
+
+    RTC_DCHECK_GT(audio_frame->samples_per_channel_, 0);
+    if (!sending_streams_.empty()) {
+      auto it = sending_streams_.begin();
+      while (++it != sending_streams_.end()) {
+        std::unique_ptr<AudioFrame> audio_frame_copy(new AudioFrame());
+        audio_frame_copy->CopyFrom(*audio_frame.get());
+        (*it)->SendAudioData(std::move(audio_frame_copy));
+      }
+      // Send the original frame to the first stream w/o copying.
+      (*sending_streams_.begin())->SendAudioData(std::move(audio_frame));
+    }
+  }
+
+  return 0;
+}
+
+// Mix all received streams, feed the result to the AudioProcessing module, then
+// resample the result to the requested output rate.
+int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples,
+                                              const size_t nBytesPerSample,
+                                              const size_t nChannels,
+                                              const uint32_t samplesPerSec,
+                                              void* audioSamples,
+                                              size_t& nSamplesOut,
+                                              int64_t* elapsed_time_ms,
+                                              int64_t* ntp_time_ms) {
+  RTC_DCHECK_EQ(sizeof(int16_t) * nChannels, nBytesPerSample);
+  RTC_DCHECK_GE(nChannels, 1);
+  RTC_DCHECK_LE(nChannels, 2);
+  RTC_DCHECK_GE(
+      samplesPerSec,
+      static_cast<uint32_t>(AudioProcessing::NativeRate::kSampleRate8kHz));
+
+  // 100 = 1 second / data duration (10 ms).
+  RTC_DCHECK_EQ(nSamples * 100, samplesPerSec);
+  RTC_DCHECK_LE(nBytesPerSample * nSamples * nChannels,
+                AudioFrame::kMaxDataSizeBytes);
+
+  mixer_->Mix(nChannels, &mixed_frame_);
+  *elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
+  *ntp_time_ms = mixed_frame_.ntp_time_ms_;
+
+  const auto error = audio_processing_->ProcessReverseStream(&mixed_frame_);
+  RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
+
+  nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_,
+                         static_cast<int16_t*>(audioSamples));
+  RTC_DCHECK_EQ(nSamplesOut, nChannels * nSamples);
+  return 0;
+}
+
+// Used by Chromium - same as NeedMorePlayData() but because Chrome has its
+// own APM instance, does not call audio_processing_->ProcessReverseStream().
+void AudioTransportImpl::PullRenderData(int bits_per_sample,
+                                         int sample_rate,
+                                         size_t number_of_channels,
+                                         size_t number_of_frames,
+                                         void* audio_data,
+                                         int64_t* elapsed_time_ms,
+                                         int64_t* ntp_time_ms) {
+  RTC_DCHECK_EQ(bits_per_sample, 16);
+  RTC_DCHECK_GE(number_of_channels, 1);
+  RTC_DCHECK_LE(number_of_channels, 2);
+  RTC_DCHECK_GE(sample_rate, AudioProcessing::NativeRate::kSampleRate8kHz);
+
+  // 100 = 1 second / data duration (10 ms).
+  RTC_DCHECK_EQ(number_of_frames * 100, sample_rate);
+
+  // 8 = bits per byte.
+  RTC_DCHECK_LE(bits_per_sample / 8 * number_of_frames * number_of_channels,
+                AudioFrame::kMaxDataSizeBytes);
+  mixer_->Mix(number_of_channels, &mixed_frame_);
+  *elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
+  *ntp_time_ms = mixed_frame_.ntp_time_ms_;
+
+  auto output_samples = Resample(mixed_frame_, sample_rate, &render_resampler_,
+                                 static_cast<int16_t*>(audio_data));
+  RTC_DCHECK_EQ(output_samples, number_of_channels * number_of_frames);
+}
+
+void AudioTransportImpl::UpdateSendingStreams(
+    std::vector<AudioSendStream*> streams, int send_sample_rate_hz,
+    size_t send_num_channels) {
+  rtc::CritScope lock(&capture_lock_);
+  sending_streams_ = std::move(streams);
+  send_sample_rate_hz_ = send_sample_rate_hz;
+  send_num_channels_ = send_num_channels;
+}
+
+void AudioTransportImpl::SetStereoChannelSwapping(bool enable) {
+  rtc::CritScope lock(&capture_lock_);
+  swap_stereo_channels_ = enable;
+}
+
+bool AudioTransportImpl::typing_noise_detected() const {
+  rtc::CritScope lock(&capture_lock_);
+  return typing_noise_detected_;
+}
+}  // namespace webrtc
diff --git a/audio/audio_transport_impl.h b/audio/audio_transport_impl.h
new file mode 100644
index 0000000..4e6e047
--- /dev/null
+++ b/audio/audio_transport_impl.h
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_AUDIO_TRANSPORT_IMPL_H_
+#define AUDIO_AUDIO_TRANSPORT_IMPL_H_
+
+#include <vector>
+
+#include "api/audio/audio_mixer.h"
+#include "audio/audio_level.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/typing_detection.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class AudioSendStream;
+
+class AudioTransportImpl : public AudioTransport {
+ public:
+  AudioTransportImpl(AudioMixer* mixer,
+                     AudioProcessing* audio_processing);
+  ~AudioTransportImpl() override;
+
+  int32_t RecordedDataIsAvailable(const void* audioSamples,
+                                  const size_t nSamples,
+                                  const size_t nBytesPerSample,
+                                  const size_t nChannels,
+                                  const uint32_t samplesPerSec,
+                                  const uint32_t totalDelayMS,
+                                  const int32_t clockDrift,
+                                  const uint32_t currentMicLevel,
+                                  const bool keyPressed,
+                                  uint32_t& newMicLevel) override;
+
+  int32_t NeedMorePlayData(const size_t nSamples,
+                           const size_t nBytesPerSample,
+                           const size_t nChannels,
+                           const uint32_t samplesPerSec,
+                           void* audioSamples,
+                           size_t& nSamplesOut,
+                           int64_t* elapsed_time_ms,
+                           int64_t* ntp_time_ms) override;
+
+  void PullRenderData(int bits_per_sample,
+                      int sample_rate,
+                      size_t number_of_channels,
+                      size_t number_of_frames,
+                      void* audio_data,
+                      int64_t* elapsed_time_ms,
+                      int64_t* ntp_time_ms) override;
+
+  void UpdateSendingStreams(std::vector<AudioSendStream*> streams,
+                            int send_sample_rate_hz, size_t send_num_channels);
+  void SetStereoChannelSwapping(bool enable);
+  bool typing_noise_detected() const;
+  const voe::AudioLevel& audio_level() const {
+    return audio_level_;
+  }
+
+ private:
+  // Shared.
+  AudioProcessing* audio_processing_ = nullptr;
+
+  // Capture side.
+  rtc::CriticalSection capture_lock_;
+  std::vector<AudioSendStream*> sending_streams_ RTC_GUARDED_BY(capture_lock_);
+  int send_sample_rate_hz_ RTC_GUARDED_BY(capture_lock_) = 8000;
+  size_t send_num_channels_ RTC_GUARDED_BY(capture_lock_) = 1;
+  bool typing_noise_detected_ RTC_GUARDED_BY(capture_lock_) = false;
+  bool swap_stereo_channels_ RTC_GUARDED_BY(capture_lock_) = false;
+  PushResampler<int16_t> capture_resampler_;
+  voe::AudioLevel audio_level_;
+  TypingDetection typing_detection_;
+
+  // Render side.
+  rtc::scoped_refptr<AudioMixer> mixer_;
+  AudioFrame mixed_frame_;
+  // Converts mixed audio to the audio device output rate.
+  PushResampler<int16_t> render_resampler_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTransportImpl);
+};
+}  // namespace webrtc
+
+#endif  // AUDIO_AUDIO_TRANSPORT_IMPL_H_
diff --git a/audio/channel.cc b/audio/channel.cc
new file mode 100644
index 0000000..d50c161
--- /dev/null
+++ b/audio/channel.cc
@@ -0,0 +1,1507 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/channel.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "api/array_view.h"
+#include "audio/utility/audio_frame_operations.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "logging/rtc_event_log/rtc_event_log.h"
+#include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/codecs/audio_format_conversion.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/include/module_common_types.h"
+#include "modules/pacing/packet_router.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "modules/rtp_rtcp/source/rtp_receiver_strategy.h"
+#include "modules/utility/include/process_thread.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/location.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_checker.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace voe {
+
+namespace {
+
+constexpr double kAudioSampleDurationSeconds = 0.01;
+constexpr int64_t kMaxRetransmissionWindowMs = 1000;
+constexpr int64_t kMinRetransmissionWindowMs = 30;
+
+// Video Sync.
+constexpr int kVoiceEngineMinMinPlayoutDelayMs = 0;
+constexpr int kVoiceEngineMaxMinPlayoutDelayMs = 10000;
+
+}  // namespace
+
+const int kTelephoneEventAttenuationdB = 10;
+
+class RtcEventLogProxy final : public webrtc::RtcEventLog {
+ public:
+  RtcEventLogProxy() : event_log_(nullptr) {}
+
+  bool StartLogging(std::unique_ptr<RtcEventLogOutput> output,
+                    int64_t output_period_ms) override {
+    RTC_NOTREACHED();
+    return false;
+  }
+
+  void StopLogging() override { RTC_NOTREACHED(); }
+
+  void Log(std::unique_ptr<RtcEvent> event) override {
+    rtc::CritScope lock(&crit_);
+    if (event_log_) {
+      event_log_->Log(std::move(event));
+    }
+  }
+
+  void SetEventLog(RtcEventLog* event_log) {
+    rtc::CritScope lock(&crit_);
+    event_log_ = event_log;
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  RtcEventLog* event_log_ RTC_GUARDED_BY(crit_);
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogProxy);
+};
+
+class RtcpRttStatsProxy final : public RtcpRttStats {
+ public:
+  RtcpRttStatsProxy() : rtcp_rtt_stats_(nullptr) {}
+
+  void OnRttUpdate(int64_t rtt) override {
+    rtc::CritScope lock(&crit_);
+    if (rtcp_rtt_stats_)
+      rtcp_rtt_stats_->OnRttUpdate(rtt);
+  }
+
+  int64_t LastProcessedRtt() const override {
+    rtc::CritScope lock(&crit_);
+    if (!rtcp_rtt_stats_)
+      return 0;
+    return rtcp_rtt_stats_->LastProcessedRtt();
+  }
+
+  void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
+    rtc::CritScope lock(&crit_);
+    rtcp_rtt_stats_ = rtcp_rtt_stats;
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  RtcpRttStats* rtcp_rtt_stats_ RTC_GUARDED_BY(crit_);
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtcpRttStatsProxy);
+};
+
+class TransportFeedbackProxy : public TransportFeedbackObserver {
+ public:
+  TransportFeedbackProxy() : feedback_observer_(nullptr) {
+    pacer_thread_.DetachFromThread();
+    network_thread_.DetachFromThread();
+  }
+
+  void SetTransportFeedbackObserver(
+      TransportFeedbackObserver* feedback_observer) {
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    rtc::CritScope lock(&crit_);
+    feedback_observer_ = feedback_observer;
+  }
+
+  // Implements TransportFeedbackObserver.
+  void AddPacket(uint32_t ssrc,
+                 uint16_t sequence_number,
+                 size_t length,
+                 const PacedPacketInfo& pacing_info) override {
+    RTC_DCHECK(pacer_thread_.CalledOnValidThread());
+    rtc::CritScope lock(&crit_);
+    if (feedback_observer_)
+      feedback_observer_->AddPacket(ssrc, sequence_number, length, pacing_info);
+  }
+
+  void OnTransportFeedback(const rtcp::TransportFeedback& feedback) override {
+    RTC_DCHECK(network_thread_.CalledOnValidThread());
+    rtc::CritScope lock(&crit_);
+    if (feedback_observer_)
+      feedback_observer_->OnTransportFeedback(feedback);
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  rtc::ThreadChecker thread_checker_;
+  rtc::ThreadChecker pacer_thread_;
+  rtc::ThreadChecker network_thread_;
+  TransportFeedbackObserver* feedback_observer_ RTC_GUARDED_BY(&crit_);
+};
+
+class TransportSequenceNumberProxy : public TransportSequenceNumberAllocator {
+ public:
+  TransportSequenceNumberProxy() : seq_num_allocator_(nullptr) {
+    pacer_thread_.DetachFromThread();
+  }
+
+  void SetSequenceNumberAllocator(
+      TransportSequenceNumberAllocator* seq_num_allocator) {
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    rtc::CritScope lock(&crit_);
+    seq_num_allocator_ = seq_num_allocator;
+  }
+
+  // Implements TransportSequenceNumberAllocator.
+  uint16_t AllocateSequenceNumber() override {
+    RTC_DCHECK(pacer_thread_.CalledOnValidThread());
+    rtc::CritScope lock(&crit_);
+    if (!seq_num_allocator_)
+      return 0;
+    return seq_num_allocator_->AllocateSequenceNumber();
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  rtc::ThreadChecker thread_checker_;
+  rtc::ThreadChecker pacer_thread_;
+  TransportSequenceNumberAllocator* seq_num_allocator_ RTC_GUARDED_BY(&crit_);
+};
+
+class RtpPacketSenderProxy : public RtpPacketSender {
+ public:
+  RtpPacketSenderProxy() : rtp_packet_sender_(nullptr) {}
+
+  void SetPacketSender(RtpPacketSender* rtp_packet_sender) {
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    rtc::CritScope lock(&crit_);
+    rtp_packet_sender_ = rtp_packet_sender;
+  }
+
+  // Implements RtpPacketSender.
+  void InsertPacket(Priority priority,
+                    uint32_t ssrc,
+                    uint16_t sequence_number,
+                    int64_t capture_time_ms,
+                    size_t bytes,
+                    bool retransmission) override {
+    rtc::CritScope lock(&crit_);
+    if (rtp_packet_sender_) {
+      rtp_packet_sender_->InsertPacket(priority, ssrc, sequence_number,
+                                       capture_time_ms, bytes, retransmission);
+    }
+  }
+
+  void SetAccountForAudioPackets(bool account_for_audio) override {
+    RTC_NOTREACHED();
+  }
+
+ private:
+  rtc::ThreadChecker thread_checker_;
+  rtc::CriticalSection crit_;
+  RtpPacketSender* rtp_packet_sender_ RTC_GUARDED_BY(&crit_);
+};
+
+class VoERtcpObserver : public RtcpBandwidthObserver {
+ public:
+  explicit VoERtcpObserver(Channel* owner)
+      : owner_(owner), bandwidth_observer_(nullptr) {}
+  virtual ~VoERtcpObserver() {}
+
+  void SetBandwidthObserver(RtcpBandwidthObserver* bandwidth_observer) {
+    rtc::CritScope lock(&crit_);
+    bandwidth_observer_ = bandwidth_observer;
+  }
+
+  void OnReceivedEstimatedBitrate(uint32_t bitrate) override {
+    rtc::CritScope lock(&crit_);
+    if (bandwidth_observer_) {
+      bandwidth_observer_->OnReceivedEstimatedBitrate(bitrate);
+    }
+  }
+
+  void OnReceivedRtcpReceiverReport(const ReportBlockList& report_blocks,
+                                    int64_t rtt,
+                                    int64_t now_ms) override {
+    {
+      rtc::CritScope lock(&crit_);
+      if (bandwidth_observer_) {
+        bandwidth_observer_->OnReceivedRtcpReceiverReport(report_blocks, rtt,
+                                                          now_ms);
+      }
+    }
+    // TODO(mflodman): Do we need to aggregate reports here or can we jut send
+    // what we get? I.e. do we ever get multiple reports bundled into one RTCP
+    // report for VoiceEngine?
+    if (report_blocks.empty())
+      return;
+
+    int fraction_lost_aggregate = 0;
+    int total_number_of_packets = 0;
+
+    // If receiving multiple report blocks, calculate the weighted average based
+    // on the number of packets a report refers to.
+    for (ReportBlockList::const_iterator block_it = report_blocks.begin();
+         block_it != report_blocks.end(); ++block_it) {
+      // Find the previous extended high sequence number for this remote SSRC,
+      // to calculate the number of RTP packets this report refers to. Ignore if
+      // we haven't seen this SSRC before.
+      std::map<uint32_t, uint32_t>::iterator seq_num_it =
+          extended_max_sequence_number_.find(block_it->source_ssrc);
+      int number_of_packets = 0;
+      if (seq_num_it != extended_max_sequence_number_.end()) {
+        number_of_packets =
+            block_it->extended_highest_sequence_number - seq_num_it->second;
+      }
+      fraction_lost_aggregate += number_of_packets * block_it->fraction_lost;
+      total_number_of_packets += number_of_packets;
+
+      extended_max_sequence_number_[block_it->source_ssrc] =
+          block_it->extended_highest_sequence_number;
+    }
+    int weighted_fraction_lost = 0;
+    if (total_number_of_packets > 0) {
+      weighted_fraction_lost =
+          (fraction_lost_aggregate + total_number_of_packets / 2) /
+          total_number_of_packets;
+    }
+    owner_->OnUplinkPacketLossRate(weighted_fraction_lost / 255.0f);
+  }
+
+ private:
+  Channel* owner_;
+  // Maps remote side ssrc to extended highest sequence number received.
+  std::map<uint32_t, uint32_t> extended_max_sequence_number_;
+  rtc::CriticalSection crit_;
+  RtcpBandwidthObserver* bandwidth_observer_ RTC_GUARDED_BY(crit_);
+};
+
+class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
+ public:
+  ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame,
+                            Channel* channel)
+      : audio_frame_(std::move(audio_frame)), channel_(channel) {
+    RTC_DCHECK(channel_);
+  }
+
+ private:
+  bool Run() override {
+    RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
+    channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get());
+    return true;
+  }
+
+  std::unique_ptr<AudioFrame> audio_frame_;
+  Channel* const channel_;
+};
+
+int32_t Channel::SendData(FrameType frameType,
+                          uint8_t payloadType,
+                          uint32_t timeStamp,
+                          const uint8_t* payloadData,
+                          size_t payloadSize,
+                          const RTPFragmentationHeader* fragmentation) {
+  RTC_DCHECK_RUN_ON(encoder_queue_);
+  if (_includeAudioLevelIndication) {
+    // Store current audio level in the RTP/RTCP module.
+    // The level will be used in combination with voice-activity state
+    // (frameType) to add an RTP header extension
+    _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
+  }
+
+  // Push data from ACM to RTP/RTCP-module to deliver audio frame for
+  // packetization.
+  // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
+  if (!_rtpRtcpModule->SendOutgoingData(
+          (FrameType&)frameType, payloadType, timeStamp,
+          // Leaving the time when this frame was
+          // received from the capture device as
+          // undefined for voice for now.
+          -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
+    RTC_DLOG(LS_ERROR)
+        << "Channel::SendData() failed to send data to RTP/RTCP module";
+    return -1;
+  }
+
+  return 0;
+}
+
+bool Channel::SendRtp(const uint8_t* data,
+                      size_t len,
+                      const PacketOptions& options) {
+  rtc::CritScope cs(&_callbackCritSect);
+
+  if (_transportPtr == NULL) {
+    RTC_DLOG(LS_ERROR)
+        << "Channel::SendPacket() failed to send RTP packet due to"
+        << " invalid transport object";
+    return false;
+  }
+
+  if (!_transportPtr->SendRtp(data, len, options)) {
+    RTC_DLOG(LS_ERROR) << "Channel::SendPacket() RTP transmission failed";
+    return false;
+  }
+  return true;
+}
+
+bool Channel::SendRtcp(const uint8_t* data, size_t len) {
+  rtc::CritScope cs(&_callbackCritSect);
+  if (_transportPtr == NULL) {
+    RTC_DLOG(LS_ERROR)
+        << "Channel::SendRtcp() failed to send RTCP packet due to"
+        << " invalid transport object";
+    return false;
+  }
+
+  int n = _transportPtr->SendRtcp(data, len);
+  if (n < 0) {
+    RTC_DLOG(LS_ERROR) << "Channel::SendRtcp() transmission failed";
+    return false;
+  }
+  return true;
+}
+
+void Channel::OnIncomingSSRCChanged(uint32_t ssrc) {
+  // Update ssrc so that NTP for AV sync can be updated.
+  _rtpRtcpModule->SetRemoteSSRC(ssrc);
+}
+
+void Channel::OnIncomingCSRCChanged(uint32_t CSRC, bool added) {
+  // TODO(saza): remove.
+}
+
+int32_t Channel::OnInitializeDecoder(int payload_type,
+                                     const SdpAudioFormat& audio_format,
+                                     uint32_t rate) {
+  if (!audio_coding_->RegisterReceiveCodec(payload_type, audio_format)) {
+    RTC_DLOG(LS_WARNING) << "Channel::OnInitializeDecoder() invalid codec (pt="
+                         << payload_type << ", " << audio_format
+                         << ") received -1";
+    return -1;
+  }
+
+  return 0;
+}
+
+int32_t Channel::OnReceivedPayloadData(const uint8_t* payloadData,
+                                       size_t payloadSize,
+                                       const WebRtcRTPHeader* rtpHeader) {
+  if (!channel_state_.Get().playing) {
+    // Avoid inserting into NetEQ when we are not playing. Count the
+    // packet as discarded.
+    return 0;
+  }
+
+  // Push the incoming payload (parsed and ready for decoding) into the ACM
+  if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) !=
+      0) {
+    RTC_DLOG(LS_ERROR)
+        << "Channel::OnReceivedPayloadData() unable to push data to the ACM";
+    return -1;
+  }
+
+  int64_t round_trip_time = 0;
+  _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), &round_trip_time, NULL, NULL,
+                      NULL);
+
+  std::vector<uint16_t> nack_list = audio_coding_->GetNackList(round_trip_time);
+  if (!nack_list.empty()) {
+    // Can't use nack_list.data() since it's not supported by all
+    // compilers.
+    ResendPackets(&(nack_list[0]), static_cast<int>(nack_list.size()));
+  }
+  return 0;
+}
+
+AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
+    int sample_rate_hz,
+    AudioFrame* audio_frame) {
+  audio_frame->sample_rate_hz_ = sample_rate_hz;
+
+  unsigned int ssrc;
+  RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0);
+  event_log_proxy_->Log(rtc::MakeUnique<RtcEventAudioPlayout>(ssrc));
+  // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
+  bool muted;
+  if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
+                                     &muted) == -1) {
+    RTC_DLOG(LS_ERROR) << "Channel::GetAudioFrame() PlayoutData10Ms() failed!";
+    // In all likelihood, the audio in this frame is garbage. We return an
+    // error so that the audio mixer module doesn't add it to the mix. As
+    // a result, it won't be played out and the actions skipped here are
+    // irrelevant.
+    return AudioMixer::Source::AudioFrameInfo::kError;
+  }
+
+  if (muted) {
+    // TODO(henrik.lundin): We should be able to do better than this. But we
+    // will have to go through all the cases below where the audio samples may
+    // be used, and handle the muted case in some way.
+    AudioFrameOperations::Mute(audio_frame);
+  }
+
+  {
+    // Pass the audio buffers to an optional sink callback, before applying
+    // scaling/panning, as that applies to the mix operation.
+    // External recipients of the audio (e.g. via AudioTrack), will do their
+    // own mixing/dynamic processing.
+    rtc::CritScope cs(&_callbackCritSect);
+    if (audio_sink_) {
+      AudioSinkInterface::Data data(
+          audio_frame->data(), audio_frame->samples_per_channel_,
+          audio_frame->sample_rate_hz_, audio_frame->num_channels_,
+          audio_frame->timestamp_);
+      audio_sink_->OnData(data);
+    }
+  }
+
+  float output_gain = 1.0f;
+  {
+    rtc::CritScope cs(&volume_settings_critsect_);
+    output_gain = _outputGain;
+  }
+
+  // Output volume scaling
+  if (output_gain < 0.99f || output_gain > 1.01f) {
+    // TODO(solenberg): Combine with mute state - this can cause clicks!
+    AudioFrameOperations::ScaleWithSat(output_gain, audio_frame);
+  }
+
+  // Measure audio level (0-9)
+  // TODO(henrik.lundin) Use the |muted| information here too.
+  // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see
+  // https://crbug.com/webrtc/7517).
+  _outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds);
+
+  if (capture_start_rtp_time_stamp_ < 0 && audio_frame->timestamp_ != 0) {
+    // The first frame with a valid rtp timestamp.
+    capture_start_rtp_time_stamp_ = audio_frame->timestamp_;
+  }
+
+  if (capture_start_rtp_time_stamp_ >= 0) {
+    // audio_frame.timestamp_ should be valid from now on.
+
+    // Compute elapsed time.
+    int64_t unwrap_timestamp =
+        rtp_ts_wraparound_handler_->Unwrap(audio_frame->timestamp_);
+    audio_frame->elapsed_time_ms_ =
+        (unwrap_timestamp - capture_start_rtp_time_stamp_) /
+        (GetRtpTimestampRateHz() / 1000);
+
+    {
+      rtc::CritScope lock(&ts_stats_lock_);
+      // Compute ntp time.
+      audio_frame->ntp_time_ms_ =
+          ntp_estimator_.Estimate(audio_frame->timestamp_);
+      // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
+      if (audio_frame->ntp_time_ms_ > 0) {
+        // Compute |capture_start_ntp_time_ms_| so that
+        // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
+        capture_start_ntp_time_ms_ =
+            audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_;
+      }
+    }
+  }
+
+  {
+    RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs",
+                              audio_coding_->TargetDelayMs());
+    const int jitter_buffer_delay = audio_coding_->FilteredCurrentDelayMs();
+    rtc::CritScope lock(&video_sync_lock_);
+    RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs",
+                              jitter_buffer_delay + playout_delay_ms_);
+    RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs",
+                              jitter_buffer_delay);
+    RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs",
+                              playout_delay_ms_);
+  }
+
+  return muted ? AudioMixer::Source::AudioFrameInfo::kMuted
+               : AudioMixer::Source::AudioFrameInfo::kNormal;
+}
+
+int Channel::PreferredSampleRate() const {
+  // Return the bigger of playout and receive frequency in the ACM.
+  return std::max(audio_coding_->ReceiveFrequency(),
+                  audio_coding_->PlayoutFrequency());
+}
+
+Channel::Channel(rtc::TaskQueue* encoder_queue,
+                 ProcessThread* module_process_thread,
+                 AudioDeviceModule* audio_device_module)
+    : Channel(module_process_thread,
+              audio_device_module,
+              0,
+              false,
+              rtc::scoped_refptr<AudioDecoderFactory>()) {
+  RTC_DCHECK(encoder_queue);
+  encoder_queue_ = encoder_queue;
+}
+
+Channel::Channel(ProcessThread* module_process_thread,
+                 AudioDeviceModule* audio_device_module,
+                 size_t jitter_buffer_max_packets,
+                 bool jitter_buffer_fast_playout,
+                 rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
+    : event_log_proxy_(new RtcEventLogProxy()),
+      rtcp_rtt_stats_proxy_(new RtcpRttStatsProxy()),
+      rtp_payload_registry_(new RTPPayloadRegistry()),
+      rtp_receive_statistics_(
+          ReceiveStatistics::Create(Clock::GetRealTimeClock())),
+      rtp_receiver_(
+          RtpReceiver::CreateAudioReceiver(Clock::GetRealTimeClock(),
+                                           this,
+                                           this,
+                                           rtp_payload_registry_.get())),
+      telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()),
+      _outputAudioLevel(),
+      _timeStamp(0),  // This is just an offset, RTP module will add it's own
+                      // random offset
+      ntp_estimator_(Clock::GetRealTimeClock()),
+      playout_timestamp_rtp_(0),
+      playout_delay_ms_(0),
+      send_sequence_number_(0),
+      rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
+      capture_start_rtp_time_stamp_(-1),
+      capture_start_ntp_time_ms_(-1),
+      _moduleProcessThreadPtr(module_process_thread),
+      _audioDeviceModulePtr(audio_device_module),
+      _transportPtr(NULL),
+      input_mute_(false),
+      previous_frame_muted_(false),
+      _outputGain(1.0f),
+      _includeAudioLevelIndication(false),
+      transport_overhead_per_packet_(0),
+      rtp_overhead_per_packet_(0),
+      rtcp_observer_(new VoERtcpObserver(this)),
+      associated_send_channel_(nullptr),
+      feedback_observer_proxy_(new TransportFeedbackProxy()),
+      seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
+      rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
+      retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
+                                                   kMaxRetransmissionWindowMs)),
+      use_twcc_plr_for_ana_(
+          webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled") {
+  RTC_DCHECK(module_process_thread);
+  RTC_DCHECK(audio_device_module);
+  AudioCodingModule::Config acm_config;
+  acm_config.decoder_factory = decoder_factory;
+  acm_config.neteq_config.max_packets_in_buffer = jitter_buffer_max_packets;
+  acm_config.neteq_config.enable_fast_accelerate = jitter_buffer_fast_playout;
+  acm_config.neteq_config.enable_muted_state = true;
+  audio_coding_.reset(AudioCodingModule::Create(acm_config));
+
+  _outputAudioLevel.Clear();
+
+  RtpRtcp::Configuration configuration;
+  configuration.audio = true;
+  configuration.outgoing_transport = this;
+  configuration.overhead_observer = this;
+  configuration.receive_statistics = rtp_receive_statistics_.get();
+  configuration.bandwidth_callback = rtcp_observer_.get();
+  if (pacing_enabled_) {
+    configuration.paced_sender = rtp_packet_sender_proxy_.get();
+    configuration.transport_sequence_number_allocator =
+        seq_num_allocator_proxy_.get();
+    configuration.transport_feedback_callback = feedback_observer_proxy_.get();
+  }
+  configuration.event_log = &(*event_log_proxy_);
+  configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_);
+  configuration.retransmission_rate_limiter =
+      retransmission_rate_limiter_.get();
+
+  _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
+  _rtpRtcpModule->SetSendingMediaStatus(false);
+
+  Init();
+}
+
+Channel::~Channel() {
+  Terminate();
+  RTC_DCHECK(!channel_state_.Get().sending);
+  RTC_DCHECK(!channel_state_.Get().playing);
+}
+
+void Channel::Init() {
+  channel_state_.Reset();
+
+  // --- Add modules to process thread (for periodic schedulation)
+  _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
+
+  // --- ACM initialization
+  int error = audio_coding_->InitializeReceiver();
+  RTC_DCHECK_EQ(0, error);
+
+  // --- RTP/RTCP module initialization
+
+  // Ensure that RTCP is enabled by default for the created channel.
+  // Note that, the module will keep generating RTCP until it is explicitly
+  // disabled by the user.
+  // After StopListen (when no sockets exists), RTCP packets will no longer
+  // be transmitted since the Transport object will then be invalid.
+  telephone_event_handler_->SetTelephoneEventForwardToDecoder(true);
+  // RTCP is enabled by default.
+  _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
+
+  // --- Register all permanent callbacks
+  error = audio_coding_->RegisterTransportCallback(this);
+  RTC_DCHECK_EQ(0, error);
+}
+
+void Channel::Terminate() {
+  RTC_DCHECK(construction_thread_.CalledOnValidThread());
+  // Must be called on the same thread as Init().
+  rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
+
+  StopSend();
+  StopPlayout();
+
+  // The order to safely shutdown modules in a channel is:
+  // 1. De-register callbacks in modules
+  // 2. De-register modules in process thread
+  // 3. Destroy modules
+  int error = audio_coding_->RegisterTransportCallback(NULL);
+  RTC_DCHECK_EQ(0, error);
+
+  // De-register modules in process thread
+  if (_moduleProcessThreadPtr)
+    _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
+
+  // End of modules shutdown
+}
+
+void Channel::SetSink(AudioSinkInterface* sink) {
+  rtc::CritScope cs(&_callbackCritSect);
+  audio_sink_ = sink;
+}
+
+int32_t Channel::StartPlayout() {
+  if (channel_state_.Get().playing) {
+    return 0;
+  }
+
+  channel_state_.SetPlaying(true);
+
+  return 0;
+}
+
+int32_t Channel::StopPlayout() {
+  if (!channel_state_.Get().playing) {
+    return 0;
+  }
+
+  channel_state_.SetPlaying(false);
+  _outputAudioLevel.Clear();
+
+  return 0;
+}
+
+int32_t Channel::StartSend() {
+  if (channel_state_.Get().sending) {
+    return 0;
+  }
+  channel_state_.SetSending(true);
+
+  // Resume the previous sequence number which was reset by StopSend(). This
+  // needs to be done before |sending| is set to true on the RTP/RTCP module.
+  if (send_sequence_number_) {
+    _rtpRtcpModule->SetSequenceNumber(send_sequence_number_);
+  }
+  _rtpRtcpModule->SetSendingMediaStatus(true);
+  if (_rtpRtcpModule->SetSendingStatus(true) != 0) {
+    RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to start sending";
+    _rtpRtcpModule->SetSendingMediaStatus(false);
+    rtc::CritScope cs(&_callbackCritSect);
+    channel_state_.SetSending(false);
+    return -1;
+  }
+  {
+    // It is now OK to start posting tasks to the encoder task queue.
+    rtc::CritScope cs(&encoder_queue_lock_);
+    encoder_queue_is_active_ = true;
+  }
+  return 0;
+}
+
+void Channel::StopSend() {
+  if (!channel_state_.Get().sending) {
+    return;
+  }
+  channel_state_.SetSending(false);
+
+  // Post a task to the encoder thread which sets an event when the task is
+  // executed. We know that no more encoding tasks will be added to the task
+  // queue for this channel since sending is now deactivated. It means that,
+  // if we wait for the event to bet set, we know that no more pending tasks
+  // exists and it is therfore guaranteed that the task queue will never try
+  // to acccess and invalid channel object.
+  RTC_DCHECK(encoder_queue_);
+
+  rtc::Event flush(false, false);
+  {
+    // Clear |encoder_queue_is_active_| under lock to prevent any other tasks
+    // than this final "flush task" to be posted on the queue.
+    rtc::CritScope cs(&encoder_queue_lock_);
+    encoder_queue_is_active_ = false;
+    encoder_queue_->PostTask([&flush]() { flush.Set(); });
+  }
+  flush.Wait(rtc::Event::kForever);
+
+  // Store the sequence number to be able to pick up the same sequence for
+  // the next StartSend(). This is needed for restarting device, otherwise
+  // it might cause libSRTP to complain about packets being replayed.
+  // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
+  // CL is landed. See issue
+  // https://code.google.com/p/webrtc/issues/detail?id=2111 .
+  send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
+
+  // Reset sending SSRC and sequence number and triggers direct transmission
+  // of RTCP BYE
+  if (_rtpRtcpModule->SetSendingStatus(false) == -1) {
+    RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to stop sending";
+  }
+  _rtpRtcpModule->SetSendingMediaStatus(false);
+}
+
+bool Channel::SetEncoder(int payload_type,
+                         std::unique_ptr<AudioEncoder> encoder) {
+  RTC_DCHECK_GE(payload_type, 0);
+  RTC_DCHECK_LE(payload_type, 127);
+  // TODO(ossu): Make CodecInsts up, for now: one for the RTP/RTCP module and
+  // one for for us to keep track of sample rate and number of channels, etc.
+
+  // The RTP/RTCP module needs to know the RTP timestamp rate (i.e. clockrate)
+  // as well as some other things, so we collect this info and send it along.
+  CodecInst rtp_codec;
+  rtp_codec.pltype = payload_type;
+  strncpy(rtp_codec.plname, "audio", sizeof(rtp_codec.plname));
+  rtp_codec.plname[sizeof(rtp_codec.plname) - 1] = 0;
+  // Seems unclear if it should be clock rate or sample rate. CodecInst
+  // supposedly carries the sample rate, but only clock rate seems sensible to
+  // send to the RTP/RTCP module.
+  rtp_codec.plfreq = encoder->RtpTimestampRateHz();
+  rtp_codec.pacsize = rtc::CheckedDivExact(
+      static_cast<int>(encoder->Max10MsFramesInAPacket() * rtp_codec.plfreq),
+      100);
+  rtp_codec.channels = encoder->NumChannels();
+  rtp_codec.rate = 0;
+
+  if (_rtpRtcpModule->RegisterSendPayload(rtp_codec) != 0) {
+    _rtpRtcpModule->DeRegisterSendPayload(payload_type);
+    if (_rtpRtcpModule->RegisterSendPayload(rtp_codec) != 0) {
+      RTC_DLOG(LS_ERROR)
+          << "SetEncoder() failed to register codec to RTP/RTCP module";
+      return false;
+    }
+  }
+
+  audio_coding_->SetEncoder(std::move(encoder));
+  return true;
+}
+
+void Channel::ModifyEncoder(
+    rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+  audio_coding_->ModifyEncoder(modifier);
+}
+
+int32_t Channel::GetRecCodec(CodecInst& codec) {
+  return (audio_coding_->ReceiveCodec(&codec));
+}
+
+void Channel::SetBitRate(int bitrate_bps, int64_t probing_interval_ms) {
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      (*encoder)->OnReceivedUplinkBandwidth(bitrate_bps, probing_interval_ms);
+    }
+  });
+  retransmission_rate_limiter_->SetMaxRate(bitrate_bps);
+}
+
+void Channel::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
+  if (!use_twcc_plr_for_ana_)
+    return;
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      (*encoder)->OnReceivedUplinkPacketLossFraction(packet_loss_rate);
+    }
+  });
+}
+
+void Channel::OnRecoverableUplinkPacketLossRate(
+    float recoverable_packet_loss_rate) {
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      (*encoder)->OnReceivedUplinkRecoverablePacketLossFraction(
+          recoverable_packet_loss_rate);
+    }
+  });
+}
+
+void Channel::OnUplinkPacketLossRate(float packet_loss_rate) {
+  if (use_twcc_plr_for_ana_)
+    return;
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      (*encoder)->OnReceivedUplinkPacketLossFraction(packet_loss_rate);
+    }
+  });
+}
+
+void Channel::SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+  rtp_payload_registry_->SetAudioReceivePayloads(codecs);
+  audio_coding_->SetReceiveCodecs(codecs);
+}
+
+bool Channel::EnableAudioNetworkAdaptor(const std::string& config_string) {
+  bool success = false;
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      success = (*encoder)->EnableAudioNetworkAdaptor(config_string,
+                                                      event_log_proxy_.get());
+    }
+  });
+  return success;
+}
+
+void Channel::DisableAudioNetworkAdaptor() {
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder)
+      (*encoder)->DisableAudioNetworkAdaptor();
+  });
+}
+
+void Channel::SetReceiverFrameLengthRange(int min_frame_length_ms,
+                                          int max_frame_length_ms) {
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      (*encoder)->SetReceiverFrameLengthRange(min_frame_length_ms,
+                                              max_frame_length_ms);
+    }
+  });
+}
+
+void Channel::RegisterTransport(Transport* transport) {
+  rtc::CritScope cs(&_callbackCritSect);
+  _transportPtr = transport;
+}
+
+void Channel::OnRtpPacket(const RtpPacketReceived& packet) {
+  RTPHeader header;
+  packet.GetHeader(&header);
+
+  // Store playout timestamp for the received RTP packet
+  UpdatePlayoutTimestamp(false);
+
+  header.payload_type_frequency =
+      rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
+  if (header.payload_type_frequency >= 0) {
+    bool in_order = IsPacketInOrder(header);
+    rtp_receive_statistics_->IncomingPacket(
+        header, packet.size(), IsPacketRetransmitted(header, in_order));
+
+    ReceivePacket(packet.data(), packet.size(), header);
+  }
+}
+
+bool Channel::ReceivePacket(const uint8_t* packet,
+                            size_t packet_length,
+                            const RTPHeader& header) {
+  const uint8_t* payload = packet + header.headerLength;
+  assert(packet_length >= header.headerLength);
+  size_t payload_length = packet_length - header.headerLength;
+  const auto pl =
+      rtp_payload_registry_->PayloadTypeToPayload(header.payloadType);
+  if (!pl) {
+    return false;
+  }
+  return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length,
+                                          pl->typeSpecific);
+}
+
+bool Channel::IsPacketInOrder(const RTPHeader& header) const {
+  StreamStatistician* statistician =
+      rtp_receive_statistics_->GetStatistician(header.ssrc);
+  if (!statistician)
+    return false;
+  return statistician->IsPacketInOrder(header.sequenceNumber);
+}
+
+bool Channel::IsPacketRetransmitted(const RTPHeader& header,
+                                    bool in_order) const {
+  StreamStatistician* statistician =
+      rtp_receive_statistics_->GetStatistician(header.ssrc);
+  if (!statistician)
+    return false;
+  // Check if this is a retransmission.
+  int64_t min_rtt = 0;
+  _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), NULL, NULL, &min_rtt, NULL);
+  return !in_order && statistician->IsRetransmitOfOldPacket(header, min_rtt);
+}
+
+int32_t Channel::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
+  // Store playout timestamp for the received RTCP packet
+  UpdatePlayoutTimestamp(true);
+
+  // Deliver RTCP packet to RTP/RTCP module for parsing
+  _rtpRtcpModule->IncomingRtcpPacket(data, length);
+
+  int64_t rtt = GetRTT(true);
+  if (rtt == 0) {
+    // Waiting for valid RTT.
+    return 0;
+  }
+
+  int64_t nack_window_ms = rtt;
+  if (nack_window_ms < kMinRetransmissionWindowMs) {
+    nack_window_ms = kMinRetransmissionWindowMs;
+  } else if (nack_window_ms > kMaxRetransmissionWindowMs) {
+    nack_window_ms = kMaxRetransmissionWindowMs;
+  }
+  retransmission_rate_limiter_->SetWindowSize(nack_window_ms);
+
+  // Invoke audio encoders OnReceivedRtt().
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder)
+      (*encoder)->OnReceivedRtt(rtt);
+  });
+
+  uint32_t ntp_secs = 0;
+  uint32_t ntp_frac = 0;
+  uint32_t rtp_timestamp = 0;
+  if (0 !=
+      _rtpRtcpModule->RemoteNTP(&ntp_secs, &ntp_frac, NULL, NULL,
+                                &rtp_timestamp)) {
+    // Waiting for RTCP.
+    return 0;
+  }
+
+  {
+    rtc::CritScope lock(&ts_stats_lock_);
+    ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
+  }
+  return 0;
+}
+
+int Channel::GetSpeechOutputLevelFullRange() const {
+  return _outputAudioLevel.LevelFullRange();
+}
+
+double Channel::GetTotalOutputEnergy() const {
+  return _outputAudioLevel.TotalEnergy();
+}
+
+double Channel::GetTotalOutputDuration() const {
+  return _outputAudioLevel.TotalDuration();
+}
+
+void Channel::SetInputMute(bool enable) {
+  rtc::CritScope cs(&volume_settings_critsect_);
+  input_mute_ = enable;
+}
+
+bool Channel::InputMute() const {
+  rtc::CritScope cs(&volume_settings_critsect_);
+  return input_mute_;
+}
+
+void Channel::SetChannelOutputVolumeScaling(float scaling) {
+  rtc::CritScope cs(&volume_settings_critsect_);
+  _outputGain = scaling;
+}
+
+int Channel::SendTelephoneEventOutband(int event, int duration_ms) {
+  RTC_DCHECK_LE(0, event);
+  RTC_DCHECK_GE(255, event);
+  RTC_DCHECK_LE(0, duration_ms);
+  RTC_DCHECK_GE(65535, duration_ms);
+  if (!Sending()) {
+    return -1;
+  }
+  if (_rtpRtcpModule->SendTelephoneEventOutband(
+      event, duration_ms, kTelephoneEventAttenuationdB) != 0) {
+    RTC_DLOG(LS_ERROR) << "SendTelephoneEventOutband() failed to send event";
+    return -1;
+  }
+  return 0;
+}
+
+int Channel::SetSendTelephoneEventPayloadType(int payload_type,
+                                              int payload_frequency) {
+  RTC_DCHECK_LE(0, payload_type);
+  RTC_DCHECK_GE(127, payload_type);
+  CodecInst codec = {0};
+  codec.pltype = payload_type;
+  codec.plfreq = payload_frequency;
+  memcpy(codec.plname, "telephone-event", 16);
+  if (_rtpRtcpModule->RegisterSendPayload(codec) != 0) {
+    _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
+    if (_rtpRtcpModule->RegisterSendPayload(codec) != 0) {
+      RTC_DLOG(LS_ERROR)
+          << "SetSendTelephoneEventPayloadType() failed to register "
+             "send payload type";
+      return -1;
+    }
+  }
+  return 0;
+}
+
+int Channel::SetLocalSSRC(unsigned int ssrc) {
+  if (channel_state_.Get().sending) {
+    RTC_DLOG(LS_ERROR) << "SetLocalSSRC() already sending";
+    return -1;
+  }
+  _rtpRtcpModule->SetSSRC(ssrc);
+  return 0;
+}
+
+int Channel::GetRemoteSSRC(unsigned int& ssrc) {
+  ssrc = rtp_receiver_->SSRC();
+  return 0;
+}
+
+int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
+  _includeAudioLevelIndication = enable;
+  return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+}
+
+void Channel::EnableSendTransportSequenceNumber(int id) {
+  int ret =
+      SetSendRtpHeaderExtension(true, kRtpExtensionTransportSequenceNumber, id);
+  RTC_DCHECK_EQ(0, ret);
+}
+
+void Channel::RegisterSenderCongestionControlObjects(
+    RtpTransportControllerSendInterface* transport,
+    RtcpBandwidthObserver* bandwidth_observer) {
+  RtpPacketSender* rtp_packet_sender = transport->packet_sender();
+  TransportFeedbackObserver* transport_feedback_observer =
+      transport->transport_feedback_observer();
+  PacketRouter* packet_router = transport->packet_router();
+
+  RTC_DCHECK(rtp_packet_sender);
+  RTC_DCHECK(transport_feedback_observer);
+  RTC_DCHECK(packet_router);
+  RTC_DCHECK(!packet_router_);
+  rtcp_observer_->SetBandwidthObserver(bandwidth_observer);
+  feedback_observer_proxy_->SetTransportFeedbackObserver(
+      transport_feedback_observer);
+  seq_num_allocator_proxy_->SetSequenceNumberAllocator(packet_router);
+  rtp_packet_sender_proxy_->SetPacketSender(rtp_packet_sender);
+  _rtpRtcpModule->SetStorePacketsStatus(true, 600);
+  constexpr bool remb_candidate = false;
+  packet_router->AddSendRtpModule(_rtpRtcpModule.get(), remb_candidate);
+  packet_router_ = packet_router;
+}
+
+void Channel::RegisterReceiverCongestionControlObjects(
+    PacketRouter* packet_router) {
+  RTC_DCHECK(packet_router);
+  RTC_DCHECK(!packet_router_);
+  constexpr bool remb_candidate = false;
+  packet_router->AddReceiveRtpModule(_rtpRtcpModule.get(), remb_candidate);
+  packet_router_ = packet_router;
+}
+
+void Channel::ResetSenderCongestionControlObjects() {
+  RTC_DCHECK(packet_router_);
+  _rtpRtcpModule->SetStorePacketsStatus(false, 600);
+  rtcp_observer_->SetBandwidthObserver(nullptr);
+  feedback_observer_proxy_->SetTransportFeedbackObserver(nullptr);
+  seq_num_allocator_proxy_->SetSequenceNumberAllocator(nullptr);
+  packet_router_->RemoveSendRtpModule(_rtpRtcpModule.get());
+  packet_router_ = nullptr;
+  rtp_packet_sender_proxy_->SetPacketSender(nullptr);
+}
+
+void Channel::ResetReceiverCongestionControlObjects() {
+  RTC_DCHECK(packet_router_);
+  packet_router_->RemoveReceiveRtpModule(_rtpRtcpModule.get());
+  packet_router_ = nullptr;
+}
+
+void Channel::SetRTCPStatus(bool enable) {
+  _rtpRtcpModule->SetRTCPStatus(enable ? RtcpMode::kCompound : RtcpMode::kOff);
+}
+
+int Channel::SetRTCP_CNAME(const char cName[256]) {
+  if (_rtpRtcpModule->SetCNAME(cName) != 0) {
+    RTC_DLOG(LS_ERROR) << "SetRTCP_CNAME() failed to set RTCP CNAME";
+    return -1;
+  }
+  return 0;
+}
+
+int Channel::GetRemoteRTCPReportBlocks(
+    std::vector<ReportBlock>* report_blocks) {
+  if (report_blocks == NULL) {
+    RTC_DLOG(LS_ERROR) << "GetRemoteRTCPReportBlock()s invalid report_blocks.";
+    return -1;
+  }
+
+  // Get the report blocks from the latest received RTCP Sender or Receiver
+  // Report. Each element in the vector contains the sender's SSRC and a
+  // report block according to RFC 3550.
+  std::vector<RTCPReportBlock> rtcp_report_blocks;
+  if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
+    return -1;
+  }
+
+  if (rtcp_report_blocks.empty())
+    return 0;
+
+  std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
+  for (; it != rtcp_report_blocks.end(); ++it) {
+    ReportBlock report_block;
+    report_block.sender_SSRC = it->sender_ssrc;
+    report_block.source_SSRC = it->source_ssrc;
+    report_block.fraction_lost = it->fraction_lost;
+    report_block.cumulative_num_packets_lost = it->packets_lost;
+    report_block.extended_highest_sequence_number =
+        it->extended_highest_sequence_number;
+    report_block.interarrival_jitter = it->jitter;
+    report_block.last_SR_timestamp = it->last_sender_report_timestamp;
+    report_block.delay_since_last_SR = it->delay_since_last_sender_report;
+    report_blocks->push_back(report_block);
+  }
+  return 0;
+}
+
+int Channel::GetRTPStatistics(CallStatistics& stats) {
+  // --- RtcpStatistics
+
+  // The jitter statistics is updated for each received RTP packet and is
+  // based on received packets.
+  RtcpStatistics statistics;
+  StreamStatistician* statistician =
+      rtp_receive_statistics_->GetStatistician(rtp_receiver_->SSRC());
+  if (statistician) {
+    statistician->GetStatistics(&statistics,
+                                _rtpRtcpModule->RTCP() == RtcpMode::kOff);
+  }
+
+  stats.fractionLost = statistics.fraction_lost;
+  stats.cumulativeLost = statistics.packets_lost;
+  stats.extendedMax = statistics.extended_highest_sequence_number;
+  stats.jitterSamples = statistics.jitter;
+
+  // --- RTT
+  stats.rttMs = GetRTT(true);
+
+  // --- Data counters
+
+  size_t bytesSent(0);
+  uint32_t packetsSent(0);
+  size_t bytesReceived(0);
+  uint32_t packetsReceived(0);
+
+  if (statistician) {
+    statistician->GetDataCounters(&bytesReceived, &packetsReceived);
+  }
+
+  if (_rtpRtcpModule->DataCountersRTP(&bytesSent, &packetsSent) != 0) {
+    RTC_DLOG(LS_WARNING)
+        << "GetRTPStatistics() failed to retrieve RTP datacounters"
+        << " => output will not be complete";
+  }
+
+  stats.bytesSent = bytesSent;
+  stats.packetsSent = packetsSent;
+  stats.bytesReceived = bytesReceived;
+  stats.packetsReceived = packetsReceived;
+
+  // --- Timestamps
+  {
+    rtc::CritScope lock(&ts_stats_lock_);
+    stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
+  }
+  return 0;
+}
+
+void Channel::SetNACKStatus(bool enable, int maxNumberOfPackets) {
+  // None of these functions can fail.
+  // If pacing is enabled we always store packets.
+  if (!pacing_enabled_)
+    _rtpRtcpModule->SetStorePacketsStatus(enable, maxNumberOfPackets);
+  rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
+  if (enable)
+    audio_coding_->EnableNack(maxNumberOfPackets);
+  else
+    audio_coding_->DisableNack();
+}
+
+// Called when we are missing one or more packets.
+int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
+  return _rtpRtcpModule->SendNACK(sequence_numbers, length);
+}
+
+void Channel::ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame) {
+  // Avoid posting any new tasks if sending was already stopped in StopSend().
+  rtc::CritScope cs(&encoder_queue_lock_);
+  if (!encoder_queue_is_active_) {
+    return;
+  }
+  // Profile time between when the audio frame is added to the task queue and
+  // when the task is actually executed.
+  audio_frame->UpdateProfileTimeStamp();
+  encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
+      new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
+}
+
+void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
+  RTC_DCHECK_RUN_ON(encoder_queue_);
+  RTC_DCHECK_GT(audio_input->samples_per_channel_, 0);
+  RTC_DCHECK_LE(audio_input->num_channels_, 2);
+
+  // Measure time between when the audio frame is added to the task queue and
+  // when the task is actually executed. Goal is to keep track of unwanted
+  // extra latency added by the task queue.
+  RTC_HISTOGRAM_COUNTS_10000("WebRTC.Audio.EncodingTaskQueueLatencyMs",
+                             audio_input->ElapsedProfileTimeMs());
+
+  bool is_muted = InputMute();
+  AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
+
+  if (_includeAudioLevelIndication) {
+    size_t length =
+        audio_input->samples_per_channel_ * audio_input->num_channels_;
+    RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes);
+    if (is_muted && previous_frame_muted_) {
+      rms_level_.AnalyzeMuted(length);
+    } else {
+      rms_level_.Analyze(
+          rtc::ArrayView<const int16_t>(audio_input->data(), length));
+    }
+  }
+  previous_frame_muted_ = is_muted;
+
+  // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
+
+  // The ACM resamples internally.
+  audio_input->timestamp_ = _timeStamp;
+  // This call will trigger AudioPacketizationCallback::SendData if encoding
+  // is done and payload is ready for packetization and transmission.
+  // Otherwise, it will return without invoking the callback.
+  if (audio_coding_->Add10MsData(*audio_input) < 0) {
+    RTC_DLOG(LS_ERROR) << "ACM::Add10MsData() failed.";
+    return;
+  }
+
+  _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
+}
+
+void Channel::SetAssociatedSendChannel(Channel* channel) {
+  RTC_DCHECK_NE(this, channel);
+  rtc::CritScope lock(&assoc_send_channel_lock_);
+  associated_send_channel_ = channel;
+}
+
+void Channel::SetRtcEventLog(RtcEventLog* event_log) {
+  event_log_proxy_->SetEventLog(event_log);
+}
+
+void Channel::SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
+  rtcp_rtt_stats_proxy_->SetRtcpRttStats(rtcp_rtt_stats);
+}
+
+void Channel::UpdateOverheadForEncoder() {
+  size_t overhead_per_packet =
+      transport_overhead_per_packet_ + rtp_overhead_per_packet_;
+  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+    if (*encoder) {
+      (*encoder)->OnReceivedOverhead(overhead_per_packet);
+    }
+  });
+}
+
+void Channel::SetTransportOverhead(size_t transport_overhead_per_packet) {
+  rtc::CritScope cs(&overhead_per_packet_lock_);
+  transport_overhead_per_packet_ = transport_overhead_per_packet;
+  UpdateOverheadForEncoder();
+}
+
+// TODO(solenberg): Make AudioSendStream an OverheadObserver instead.
+void Channel::OnOverheadChanged(size_t overhead_bytes_per_packet) {
+  rtc::CritScope cs(&overhead_per_packet_lock_);
+  rtp_overhead_per_packet_ = overhead_bytes_per_packet;
+  UpdateOverheadForEncoder();
+}
+
+int Channel::GetNetworkStatistics(NetworkStatistics& stats) {
+  return audio_coding_->GetNetworkStatistics(&stats);
+}
+
+void Channel::GetDecodingCallStatistics(AudioDecodingCallStats* stats) const {
+  audio_coding_->GetDecodingCallStatistics(stats);
+}
+
+ANAStats Channel::GetANAStatistics() const {
+  return audio_coding_->GetANAStats();
+}
+
+uint32_t Channel::GetDelayEstimate() const {
+  rtc::CritScope lock(&video_sync_lock_);
+  return audio_coding_->FilteredCurrentDelayMs() + playout_delay_ms_;
+}
+
+int Channel::SetMinimumPlayoutDelay(int delayMs) {
+  if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
+      (delayMs > kVoiceEngineMaxMinPlayoutDelayMs)) {
+    RTC_DLOG(LS_ERROR) << "SetMinimumPlayoutDelay() invalid min delay";
+    return -1;
+  }
+  if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0) {
+    RTC_DLOG(LS_ERROR)
+        << "SetMinimumPlayoutDelay() failed to set min playout delay";
+    return -1;
+  }
+  return 0;
+}
+
+int Channel::GetPlayoutTimestamp(unsigned int& timestamp) {
+  uint32_t playout_timestamp_rtp = 0;
+  {
+    rtc::CritScope lock(&video_sync_lock_);
+    playout_timestamp_rtp = playout_timestamp_rtp_;
+  }
+  if (playout_timestamp_rtp == 0) {
+    RTC_DLOG(LS_ERROR) << "GetPlayoutTimestamp() failed to retrieve timestamp";
+    return -1;
+  }
+  timestamp = playout_timestamp_rtp;
+  return 0;
+}
+
+int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
+                        RtpReceiver** rtp_receiver) const {
+  *rtpRtcpModule = _rtpRtcpModule.get();
+  *rtp_receiver = rtp_receiver_.get();
+  return 0;
+}
+
+void Channel::UpdatePlayoutTimestamp(bool rtcp) {
+  jitter_buffer_playout_timestamp_ = audio_coding_->PlayoutTimestamp();
+
+  if (!jitter_buffer_playout_timestamp_) {
+    // This can happen if this channel has not received any RTP packets. In
+    // this case, NetEq is not capable of computing a playout timestamp.
+    return;
+  }
+
+  uint16_t delay_ms = 0;
+  if (_audioDeviceModulePtr->PlayoutDelay(&delay_ms) == -1) {
+    RTC_DLOG(LS_WARNING) << "Channel::UpdatePlayoutTimestamp() failed to read"
+                         << " playout delay from the ADM";
+    return;
+  }
+
+  RTC_DCHECK(jitter_buffer_playout_timestamp_);
+  uint32_t playout_timestamp = *jitter_buffer_playout_timestamp_;
+
+  // Remove the playout delay.
+  playout_timestamp -= (delay_ms * (GetRtpTimestampRateHz() / 1000));
+
+  {
+    rtc::CritScope lock(&video_sync_lock_);
+    if (!rtcp) {
+      playout_timestamp_rtp_ = playout_timestamp;
+    }
+    playout_delay_ms_ = delay_ms;
+  }
+}
+
+int Channel::SetSendRtpHeaderExtension(bool enable,
+                                       RTPExtensionType type,
+                                       unsigned char id) {
+  int error = 0;
+  _rtpRtcpModule->DeregisterSendRtpHeaderExtension(type);
+  if (enable) {
+    error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(type, id);
+  }
+  return error;
+}
+
+int Channel::GetRtpTimestampRateHz() const {
+  const auto format = audio_coding_->ReceiveFormat();
+  // Default to the playout frequency if we've not gotten any packets yet.
+  // TODO(ossu): Zero clockrate can only happen if we've added an external
+  // decoder for a format we don't support internally. Remove once that way of
+  // adding decoders is gone!
+  return (format && format->clockrate_hz != 0)
+             ? format->clockrate_hz
+             : audio_coding_->PlayoutFrequency();
+}
+
+int64_t Channel::GetRTT(bool allow_associate_channel) const {
+  RtcpMode method = _rtpRtcpModule->RTCP();
+  if (method == RtcpMode::kOff) {
+    return 0;
+  }
+  std::vector<RTCPReportBlock> report_blocks;
+  _rtpRtcpModule->RemoteRTCPStat(&report_blocks);
+
+  int64_t rtt = 0;
+  if (report_blocks.empty()) {
+    if (allow_associate_channel) {
+      rtc::CritScope lock(&assoc_send_channel_lock_);
+      // Tries to get RTT from an associated channel. This is important for
+      // receive-only channels.
+      if (associated_send_channel_) {
+        // To prevent infinite recursion and deadlock, calling GetRTT of
+        // associate channel should always use "false" for argument:
+        // |allow_associate_channel|.
+        rtt = associated_send_channel_->GetRTT(false);
+      }
+    }
+    return rtt;
+  }
+
+  uint32_t remoteSSRC = rtp_receiver_->SSRC();
+  std::vector<RTCPReportBlock>::const_iterator it = report_blocks.begin();
+  for (; it != report_blocks.end(); ++it) {
+    if (it->sender_ssrc == remoteSSRC)
+      break;
+  }
+  if (it == report_blocks.end()) {
+    // We have not received packets with SSRC matching the report blocks.
+    // To calculate RTT we try with the SSRC of the first report block.
+    // This is very important for send-only channels where we don't know
+    // the SSRC of the other end.
+    remoteSSRC = report_blocks[0].sender_ssrc;
+  }
+
+  int64_t avg_rtt = 0;
+  int64_t max_rtt = 0;
+  int64_t min_rtt = 0;
+  if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
+      0) {
+    return 0;
+  }
+  return rtt;
+}
+
+}  // namespace voe
+}  // namespace webrtc
diff --git a/audio/channel.h b/audio/channel.h
new file mode 100644
index 0000000..e59cae1
--- /dev/null
+++ b/audio/channel.h
@@ -0,0 +1,418 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CHANNEL_H_
+#define AUDIO_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/call/audio_sink.h"
+#include "api/call/transport.h"
+#include "api/optional.h"
+#include "audio/audio_level.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_processing/rms_level.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "modules/rtp_rtcp/include/rtp_receiver.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_checker.h"
+
+// TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence
+// warnings about use of unsigned short, and non-const reference arguments.
+// These need cleanup, in a separate cl.
+
+namespace rtc {
+class TimestampWrapAroundHandler;
+}
+
+namespace webrtc {
+
+class AudioDeviceModule;
+class PacketRouter;
+class ProcessThread;
+class RateLimiter;
+class ReceiveStatistics;
+class RemoteNtpTimeEstimator;
+class RtcEventLog;
+class RTPPayloadRegistry;
+class RTPReceiverAudio;
+class RtpPacketReceived;
+class RtpRtcp;
+class RtpTransportControllerSendInterface;
+class TelephoneEventHandler;
+
+struct SenderInfo;
+
+struct CallStatistics {
+  unsigned short fractionLost;  // NOLINT
+  unsigned int cumulativeLost;
+  unsigned int extendedMax;
+  unsigned int jitterSamples;
+  int64_t rttMs;
+  size_t bytesSent;
+  int packetsSent;
+  size_t bytesReceived;
+  int packetsReceived;
+  // The capture ntp time (in local timebase) of the first played out audio
+  // frame.
+  int64_t capture_start_ntp_time_ms_;
+};
+
+// See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details.
+struct ReportBlock {
+  uint32_t sender_SSRC;  // SSRC of sender
+  uint32_t source_SSRC;
+  uint8_t fraction_lost;
+  uint32_t cumulative_num_packets_lost;
+  uint32_t extended_highest_sequence_number;
+  uint32_t interarrival_jitter;
+  uint32_t last_SR_timestamp;
+  uint32_t delay_since_last_SR;
+};
+
+namespace voe {
+
+class RtcEventLogProxy;
+class RtcpRttStatsProxy;
+class RtpPacketSenderProxy;
+class TransportFeedbackProxy;
+class TransportSequenceNumberProxy;
+class VoERtcpObserver;
+
+// Helper class to simplify locking scheme for members that are accessed from
+// multiple threads.
+// Example: a member can be set on thread T1 and read by an internal audio
+// thread T2. Accessing the member via this class ensures that we are
+// safe and also avoid TSan v2 warnings.
+class ChannelState {
+ public:
+  struct State {
+    bool playing = false;
+    bool sending = false;
+  };
+
+  ChannelState() {}
+  virtual ~ChannelState() {}
+
+  void Reset() {
+    rtc::CritScope lock(&lock_);
+    state_ = State();
+  }
+
+  State Get() const {
+    rtc::CritScope lock(&lock_);
+    return state_;
+  }
+
+  void SetPlaying(bool enable) {
+    rtc::CritScope lock(&lock_);
+    state_.playing = enable;
+  }
+
+  void SetSending(bool enable) {
+    rtc::CritScope lock(&lock_);
+    state_.sending = enable;
+  }
+
+ private:
+  rtc::CriticalSection lock_;
+  State state_;
+};
+
+class Channel
+    : public RtpData,
+      public RtpFeedback,
+      public Transport,
+      public AudioPacketizationCallback,  // receive encoded packets from the
+                                          // ACM
+      public OverheadObserver {
+ public:
+  friend class VoERtcpObserver;
+
+  enum { KNumSocketThreads = 1 };
+  enum { KNumberOfSocketBuffers = 8 };
+  // Used for send streams.
+  Channel(rtc::TaskQueue* encoder_queue,
+          ProcessThread* module_process_thread,
+          AudioDeviceModule* audio_device_module);
+  // Used for receive streams.
+  Channel(ProcessThread* module_process_thread,
+          AudioDeviceModule* audio_device_module,
+          size_t jitter_buffer_max_packets,
+          bool jitter_buffer_fast_playout,
+          rtc::scoped_refptr<AudioDecoderFactory> decoder_factory);
+  virtual ~Channel();
+
+  void SetSink(AudioSinkInterface* sink);
+
+  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
+
+  // Send using this encoder, with this payload type.
+  bool SetEncoder(int payload_type, std::unique_ptr<AudioEncoder> encoder);
+  void ModifyEncoder(
+      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
+
+  // API methods
+
+  // VoEBase
+  int32_t StartPlayout();
+  int32_t StopPlayout();
+  int32_t StartSend();
+  void StopSend();
+
+  // Codecs
+  int32_t GetRecCodec(CodecInst& codec);  // NOLINT
+  void SetBitRate(int bitrate_bps, int64_t probing_interval_ms);
+  bool EnableAudioNetworkAdaptor(const std::string& config_string);
+  void DisableAudioNetworkAdaptor();
+  void SetReceiverFrameLengthRange(int min_frame_length_ms,
+                                   int max_frame_length_ms);
+
+  // Network
+  void RegisterTransport(Transport* transport);
+  // TODO(nisse, solenberg): Delete when VoENetwork is deleted.
+  int32_t ReceivedRTCPPacket(const uint8_t* data, size_t length);
+  void OnRtpPacket(const RtpPacketReceived& packet);
+
+  // Muting, Volume and Level.
+  void SetInputMute(bool enable);
+  void SetChannelOutputVolumeScaling(float scaling);
+  int GetSpeechOutputLevelFullRange() const;
+  // See description of "totalAudioEnergy" in the WebRTC stats spec:
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+  double GetTotalOutputEnergy() const;
+  double GetTotalOutputDuration() const;
+
+  // Stats.
+  int GetNetworkStatistics(NetworkStatistics& stats);  // NOLINT
+  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+  ANAStats GetANAStatistics() const;
+
+  // Audio+Video Sync.
+  uint32_t GetDelayEstimate() const;
+  int SetMinimumPlayoutDelay(int delayMs);
+  int GetPlayoutTimestamp(unsigned int& timestamp);  // NOLINT
+  int GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const;
+
+  // DTMF.
+  int SendTelephoneEventOutband(int event, int duration_ms);
+  int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
+
+  // RTP+RTCP
+  int SetLocalSSRC(unsigned int ssrc);
+  int SetSendAudioLevelIndicationStatus(bool enable, unsigned char id);
+  void EnableSendTransportSequenceNumber(int id);
+
+  void RegisterSenderCongestionControlObjects(
+      RtpTransportControllerSendInterface* transport,
+      RtcpBandwidthObserver* bandwidth_observer);
+  void RegisterReceiverCongestionControlObjects(PacketRouter* packet_router);
+  void ResetSenderCongestionControlObjects();
+  void ResetReceiverCongestionControlObjects();
+  void SetRTCPStatus(bool enable);
+  int SetRTCP_CNAME(const char cName[256]);
+  int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
+  int GetRTPStatistics(CallStatistics& stats);  // NOLINT
+  void SetNACKStatus(bool enable, int maxNumberOfPackets);
+
+  // From AudioPacketizationCallback in the ACM
+  int32_t SendData(FrameType frameType,
+                   uint8_t payloadType,
+                   uint32_t timeStamp,
+                   const uint8_t* payloadData,
+                   size_t payloadSize,
+                   const RTPFragmentationHeader* fragmentation) override;
+
+  // From RtpData in the RTP/RTCP module
+  int32_t OnReceivedPayloadData(const uint8_t* payloadData,
+                                size_t payloadSize,
+                                const WebRtcRTPHeader* rtpHeader) override;
+
+  // From RtpFeedback in the RTP/RTCP module
+  int32_t OnInitializeDecoder(int payload_type,
+                              const SdpAudioFormat& audio_format,
+                              uint32_t rate) override;
+  void OnIncomingSSRCChanged(uint32_t ssrc) override;
+  void OnIncomingCSRCChanged(uint32_t CSRC, bool added) override;
+
+  // From Transport (called by the RTP/RTCP module)
+  bool SendRtp(const uint8_t* data,
+               size_t len,
+               const PacketOptions& packet_options) override;
+  bool SendRtcp(const uint8_t* data, size_t len) override;
+
+  // From AudioMixer::Source.
+  AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+      int sample_rate_hz,
+      AudioFrame* audio_frame);
+
+  int PreferredSampleRate() const;
+
+  bool Playing() const { return channel_state_.Get().playing; }
+  bool Sending() const { return channel_state_.Get().sending; }
+  RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); }
+
+  // ProcessAndEncodeAudio() posts a task on the shared encoder task queue,
+  // which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where
+  // the actual processing of the audio takes place. The processing mainly
+  // consists of encoding and preparing the result for sending by adding it to a
+  // send queue.
+  // The main reason for using a task queue here is to release the native,
+  // OS-specific, audio capture thread as soon as possible to ensure that it
+  // can go back to sleep and be prepared to deliver an new captured audio
+  // packet.
+  void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame);
+
+  // Associate to a send channel.
+  // Used for obtaining RTT for a receive-only channel.
+  void SetAssociatedSendChannel(Channel* channel);
+
+  // Set a RtcEventLog logging object.
+  void SetRtcEventLog(RtcEventLog* event_log);
+
+  void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
+  void SetTransportOverhead(size_t transport_overhead_per_packet);
+
+  // From OverheadObserver in the RTP/RTCP module
+  void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
+
+  // The existence of this function alongside OnUplinkPacketLossRate is
+  // a compromise. We want the encoder to be agnostic of the PLR source, but
+  // we also don't want it to receive conflicting information from TWCC and
+  // from RTCP-XR.
+  void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
+
+  void OnRecoverableUplinkPacketLossRate(float recoverable_packet_loss_rate);
+
+  std::vector<RtpSource> GetSources() const {
+    return rtp_receiver_->GetSources();
+  }
+
+ private:
+  class ProcessAndEncodeAudioTask;
+
+  void Init();
+  void Terminate();
+
+  int GetRemoteSSRC(unsigned int& ssrc);  // NOLINT
+  void OnUplinkPacketLossRate(float packet_loss_rate);
+  bool InputMute() const;
+
+  bool ReceivePacket(const uint8_t* packet,
+                     size_t packet_length,
+                     const RTPHeader& header);
+  bool IsPacketInOrder(const RTPHeader& header) const;
+  bool IsPacketRetransmitted(const RTPHeader& header, bool in_order) const;
+  int ResendPackets(const uint16_t* sequence_numbers, int length);
+  void UpdatePlayoutTimestamp(bool rtcp);
+
+  int SetSendRtpHeaderExtension(bool enable,
+                                RTPExtensionType type,
+                                unsigned char id);
+
+  void UpdateOverheadForEncoder()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_);
+
+  int GetRtpTimestampRateHz() const;
+  int64_t GetRTT(bool allow_associate_channel) const;
+
+  // Called on the encoder task queue when a new input audio frame is ready
+  // for encoding.
+  void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
+
+  rtc::CriticalSection _callbackCritSect;
+  rtc::CriticalSection volume_settings_critsect_;
+
+  ChannelState channel_state_;
+
+  std::unique_ptr<voe::RtcEventLogProxy> event_log_proxy_;
+  std::unique_ptr<voe::RtcpRttStatsProxy> rtcp_rtt_stats_proxy_;
+
+  std::unique_ptr<RTPPayloadRegistry> rtp_payload_registry_;
+  std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+  std::unique_ptr<RtpReceiver> rtp_receiver_;
+  TelephoneEventHandler* telephone_event_handler_;
+  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
+  std::unique_ptr<AudioCodingModule> audio_coding_;
+  AudioSinkInterface* audio_sink_ = nullptr;
+  AudioLevel _outputAudioLevel;
+  uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_);
+
+  RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
+
+  // Timestamp of the audio pulled from NetEq.
+  rtc::Optional<uint32_t> jitter_buffer_playout_timestamp_;
+
+  rtc::CriticalSection video_sync_lock_;
+  uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
+  uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
+  uint16_t send_sequence_number_;
+
+  rtc::CriticalSection ts_stats_lock_;
+
+  std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
+  // The rtp timestamp of the first played out audio frame.
+  int64_t capture_start_rtp_time_stamp_;
+  // The capture ntp time (in local timebase) of the first played out audio
+  // frame.
+  int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_);
+
+  // uses
+  ProcessThread* _moduleProcessThreadPtr;
+  AudioDeviceModule* _audioDeviceModulePtr;
+  Transport* _transportPtr;  // WebRtc socket or external transport
+  RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_);
+  bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_);
+  bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_);
+  float _outputGain RTC_GUARDED_BY(volume_settings_critsect_);
+  // VoeRTP_RTCP
+  // TODO(henrika): can today be accessed on the main thread and on the
+  // task queue; hence potential race.
+  bool _includeAudioLevelIndication;
+  size_t transport_overhead_per_packet_
+      RTC_GUARDED_BY(overhead_per_packet_lock_);
+  size_t rtp_overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_);
+  rtc::CriticalSection overhead_per_packet_lock_;
+  // RtcpBandwidthObserver
+  std::unique_ptr<VoERtcpObserver> rtcp_observer_;
+  // An associated send channel.
+  rtc::CriticalSection assoc_send_channel_lock_;
+  Channel* associated_send_channel_ RTC_GUARDED_BY(assoc_send_channel_lock_);
+
+  bool pacing_enabled_ = true;
+  PacketRouter* packet_router_ = nullptr;
+  std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
+  std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
+  std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
+  std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
+
+  rtc::ThreadChecker construction_thread_;
+
+  const bool use_twcc_plr_for_ana_;
+
+  rtc::CriticalSection encoder_queue_lock_;
+  bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
+  rtc::TaskQueue* encoder_queue_ = nullptr;
+};
+
+}  // namespace voe
+}  // namespace webrtc
+
+#endif  // AUDIO_CHANNEL_H_
diff --git a/audio/channel_proxy.cc b/audio/channel_proxy.cc
new file mode 100644
index 0000000..1a546f6
--- /dev/null
+++ b/audio/channel_proxy.cc
@@ -0,0 +1,335 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/channel_proxy.h"
+
+#include <utility>
+
+#include "api/call/audio_sink.h"
+#include "call/rtp_transport_controller_send_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace voe {
+ChannelProxy::ChannelProxy() {}
+
+ChannelProxy::ChannelProxy(std::unique_ptr<Channel> channel) :
+    channel_(std::move(channel)) {
+  RTC_DCHECK(channel_);
+  module_process_thread_checker_.DetachFromThread();
+}
+
+ChannelProxy::~ChannelProxy() {}
+
+bool ChannelProxy::SetEncoder(int payload_type,
+                              std::unique_ptr<AudioEncoder> encoder) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->SetEncoder(payload_type, std::move(encoder));
+}
+
+void ChannelProxy::ModifyEncoder(
+    rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->ModifyEncoder(modifier);
+}
+
+void ChannelProxy::SetRTCPStatus(bool enable) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetRTCPStatus(enable);
+}
+
+void ChannelProxy::SetLocalSSRC(uint32_t ssrc) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel_->SetLocalSSRC(ssrc);
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetRTCP_CNAME(const std::string& c_name) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  // Note: VoERTP_RTCP::SetRTCP_CNAME() accepts a char[256] array.
+  std::string c_name_limited = c_name.substr(0, 255);
+  int error = channel_->SetRTCP_CNAME(c_name_limited.c_str());
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::SetNACKStatus(bool enable, int max_packets) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetNACKStatus(enable, max_packets);
+}
+
+void ChannelProxy::SetSendAudioLevelIndicationStatus(bool enable, int id) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel_->SetSendAudioLevelIndicationStatus(enable, id);
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::EnableSendTransportSequenceNumber(int id) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->EnableSendTransportSequenceNumber(id);
+}
+
+void ChannelProxy::RegisterSenderCongestionControlObjects(
+    RtpTransportControllerSendInterface* transport,
+    RtcpBandwidthObserver* bandwidth_observer) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->RegisterSenderCongestionControlObjects(transport,
+                                                    bandwidth_observer);
+}
+
+void ChannelProxy::RegisterReceiverCongestionControlObjects(
+    PacketRouter* packet_router) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->RegisterReceiverCongestionControlObjects(packet_router);
+}
+
+void ChannelProxy::ResetSenderCongestionControlObjects() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->ResetSenderCongestionControlObjects();
+}
+
+void ChannelProxy::ResetReceiverCongestionControlObjects() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->ResetReceiverCongestionControlObjects();
+}
+
+CallStatistics ChannelProxy::GetRTCPStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  CallStatistics stats = {0};
+  int error = channel_->GetRTPStatistics(stats);
+  RTC_DCHECK_EQ(0, error);
+  return stats;
+}
+
+std::vector<ReportBlock> ChannelProxy::GetRemoteRTCPReportBlocks() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  std::vector<webrtc::ReportBlock> blocks;
+  int error = channel_->GetRemoteRTCPReportBlocks(&blocks);
+  RTC_DCHECK_EQ(0, error);
+  return blocks;
+}
+
+NetworkStatistics ChannelProxy::GetNetworkStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  NetworkStatistics stats = {0};
+  int error = channel_->GetNetworkStatistics(stats);
+  RTC_DCHECK_EQ(0, error);
+  return stats;
+}
+
+AudioDecodingCallStats ChannelProxy::GetDecodingCallStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  AudioDecodingCallStats stats;
+  channel_->GetDecodingCallStatistics(&stats);
+  return stats;
+}
+
+ANAStats ChannelProxy::GetANAStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->GetANAStatistics();
+}
+
+int ChannelProxy::GetSpeechOutputLevelFullRange() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->GetSpeechOutputLevelFullRange();
+}
+
+double ChannelProxy::GetTotalOutputEnergy() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->GetTotalOutputEnergy();
+}
+
+double ChannelProxy::GetTotalOutputDuration() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->GetTotalOutputDuration();
+}
+
+uint32_t ChannelProxy::GetDelayEstimate() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+             module_process_thread_checker_.CalledOnValidThread());
+  return channel_->GetDelayEstimate();
+}
+
+bool ChannelProxy::SetSendTelephoneEventPayloadType(int payload_type,
+                                                    int payload_frequency) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->SetSendTelephoneEventPayloadType(payload_type,
+                                                     payload_frequency) == 0;
+}
+
+bool ChannelProxy::SendTelephoneEventOutband(int event, int duration_ms) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->SendTelephoneEventOutband(event, duration_ms) == 0;
+}
+
+void ChannelProxy::SetBitrate(int bitrate_bps, int64_t probing_interval_ms) {
+  // This method can be called on the worker thread, module process thread
+  // or on a TaskQueue via VideoSendStreamImpl::OnEncoderConfigurationChanged.
+  // TODO(solenberg): Figure out a good way to check this or enforce calling
+  // rules.
+  // RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+  //            module_process_thread_checker_.CalledOnValidThread());
+  channel_->SetBitRate(bitrate_bps, probing_interval_ms);
+}
+
+void ChannelProxy::SetReceiveCodecs(
+    const std::map<int, SdpAudioFormat>& codecs) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetReceiveCodecs(codecs);
+}
+
+void ChannelProxy::SetSink(AudioSinkInterface* sink) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetSink(sink);
+}
+
+void ChannelProxy::SetInputMute(bool muted) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetInputMute(muted);
+}
+
+void ChannelProxy::RegisterTransport(Transport* transport) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->RegisterTransport(transport);
+}
+
+void ChannelProxy::OnRtpPacket(const RtpPacketReceived& packet) {
+  // May be called on either worker thread or network thread.
+  channel_->OnRtpPacket(packet);
+}
+
+bool ChannelProxy::ReceivedRTCPPacket(const uint8_t* packet, size_t length) {
+  // May be called on either worker thread or network thread.
+  return channel_->ReceivedRTCPPacket(packet, length) == 0;
+}
+
+void ChannelProxy::SetChannelOutputVolumeScaling(float scaling) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetChannelOutputVolumeScaling(scaling);
+}
+
+void ChannelProxy::SetRtcEventLog(RtcEventLog* event_log) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetRtcEventLog(event_log);
+}
+
+AudioMixer::Source::AudioFrameInfo ChannelProxy::GetAudioFrameWithInfo(
+    int sample_rate_hz,
+    AudioFrame* audio_frame) {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
+  return channel_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
+}
+
+int ChannelProxy::PreferredSampleRate() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
+  return channel_->PreferredSampleRate();
+}
+
+void ChannelProxy::ProcessAndEncodeAudio(
+    std::unique_ptr<AudioFrame> audio_frame) {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
+  return channel_->ProcessAndEncodeAudio(std::move(audio_frame));
+}
+
+void ChannelProxy::SetTransportOverhead(int transport_overhead_per_packet) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetTransportOverhead(transport_overhead_per_packet);
+}
+
+void ChannelProxy::AssociateSendChannel(
+    const ChannelProxy& send_channel_proxy) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetAssociatedSendChannel(send_channel_proxy.channel_.get());
+}
+
+void ChannelProxy::DisassociateSendChannel() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetAssociatedSendChannel(nullptr);
+}
+
+void ChannelProxy::GetRtpRtcp(RtpRtcp** rtp_rtcp,
+                              RtpReceiver** rtp_receiver) const {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(rtp_rtcp);
+  RTC_DCHECK(rtp_receiver);
+  int error = channel_->GetRtpRtcp(rtp_rtcp, rtp_receiver);
+  RTC_DCHECK_EQ(0, error);
+}
+
+uint32_t ChannelProxy::GetPlayoutTimestamp() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_);
+  unsigned int timestamp = 0;
+  int error = channel_->GetPlayoutTimestamp(timestamp);
+  RTC_DCHECK(!error || timestamp == 0);
+  return timestamp;
+}
+
+void ChannelProxy::SetMinimumPlayoutDelay(int delay_ms) {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
+  // Limit to range accepted by both VoE and ACM, so we're at least getting as
+  // close as possible, instead of failing.
+  delay_ms = rtc::SafeClamp(delay_ms, 0, 10000);
+  int error = channel_->SetMinimumPlayoutDelay(delay_ms);
+  if (0 != error) {
+    RTC_LOG(LS_WARNING) << "Error setting minimum playout delay.";
+  }
+}
+
+void ChannelProxy::SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->SetRtcpRttStats(rtcp_rtt_stats);
+}
+
+bool ChannelProxy::GetRecCodec(CodecInst* codec_inst) const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->GetRecCodec(*codec_inst) == 0;
+}
+
+void ChannelProxy::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->OnTwccBasedUplinkPacketLossRate(packet_loss_rate);
+}
+
+void ChannelProxy::OnRecoverableUplinkPacketLossRate(
+    float recoverable_packet_loss_rate) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->OnRecoverableUplinkPacketLossRate(recoverable_packet_loss_rate);
+}
+
+std::vector<RtpSource> ChannelProxy::GetSources() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return channel_->GetSources();
+}
+
+void ChannelProxy::StartSend() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel_->StartSend();
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::StopSend() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  channel_->StopSend();
+}
+
+void ChannelProxy::StartPlayout() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel_->StartPlayout();
+  RTC_DCHECK_EQ(0, error);
+}
+
+void ChannelProxy::StopPlayout() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  int error = channel_->StopPlayout();
+  RTC_DCHECK_EQ(0, error);
+}
+}  // namespace voe
+}  // namespace webrtc
diff --git a/audio/channel_proxy.h b/audio/channel_proxy.h
new file mode 100644
index 0000000..e11bd2d
--- /dev/null
+++ b/audio/channel_proxy.h
@@ -0,0 +1,144 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CHANNEL_PROXY_H_
+#define AUDIO_CHANNEL_PROXY_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/rtpreceiverinterface.h"
+#include "audio/channel.h"
+#include "call/rtp_packet_sink_interface.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class AudioSinkInterface;
+class PacketRouter;
+class RtcEventLog;
+class RtcpBandwidthObserver;
+class RtcpRttStats;
+class RtpPacketSender;
+class RtpPacketReceived;
+class RtpReceiver;
+class RtpRtcp;
+class RtpTransportControllerSendInterface;
+class Transport;
+class TransportFeedbackObserver;
+
+namespace voe {
+
+// This class provides the "view" of a voe::Channel that we need to implement
+// webrtc::AudioSendStream and webrtc::AudioReceiveStream. It serves two
+// purposes:
+//  1. Allow mocking just the interfaces used, instead of the entire
+//     voe::Channel class.
+//  2. Provide a refined interface for the stream classes, including assumptions
+//     on return values and input adaptation.
+class ChannelProxy : public RtpPacketSinkInterface {
+ public:
+  ChannelProxy();
+  explicit ChannelProxy(std::unique_ptr<Channel> channel);
+  virtual ~ChannelProxy();
+
+  virtual bool SetEncoder(int payload_type,
+                          std::unique_ptr<AudioEncoder> encoder);
+  virtual void ModifyEncoder(
+      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
+
+  virtual void SetRTCPStatus(bool enable);
+  virtual void SetLocalSSRC(uint32_t ssrc);
+  virtual void SetRTCP_CNAME(const std::string& c_name);
+  virtual void SetNACKStatus(bool enable, int max_packets);
+  virtual void SetSendAudioLevelIndicationStatus(bool enable, int id);
+  virtual void EnableSendTransportSequenceNumber(int id);
+  virtual void RegisterSenderCongestionControlObjects(
+      RtpTransportControllerSendInterface* transport,
+      RtcpBandwidthObserver* bandwidth_observer);
+  virtual void RegisterReceiverCongestionControlObjects(
+      PacketRouter* packet_router);
+  virtual void ResetSenderCongestionControlObjects();
+  virtual void ResetReceiverCongestionControlObjects();
+  virtual CallStatistics GetRTCPStatistics() const;
+  virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const;
+  virtual NetworkStatistics GetNetworkStatistics() const;
+  virtual AudioDecodingCallStats GetDecodingCallStatistics() const;
+  virtual ANAStats GetANAStatistics() const;
+  virtual int GetSpeechOutputLevelFullRange() const;
+  // See description of "totalAudioEnergy" in the WebRTC stats spec:
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+  virtual double GetTotalOutputEnergy() const;
+  virtual double GetTotalOutputDuration() const;
+  virtual uint32_t GetDelayEstimate() const;
+  virtual bool SetSendTelephoneEventPayloadType(int payload_type,
+                                                int payload_frequency);
+  virtual bool SendTelephoneEventOutband(int event, int duration_ms);
+  virtual void SetBitrate(int bitrate_bps, int64_t probing_interval_ms);
+  virtual void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
+  virtual void SetSink(AudioSinkInterface* sink);
+  virtual void SetInputMute(bool muted);
+  virtual void RegisterTransport(Transport* transport);
+
+  // Implements RtpPacketSinkInterface
+  void OnRtpPacket(const RtpPacketReceived& packet) override;
+  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length);
+  virtual void SetChannelOutputVolumeScaling(float scaling);
+  virtual void SetRtcEventLog(RtcEventLog* event_log);
+  virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+      int sample_rate_hz,
+      AudioFrame* audio_frame);
+  virtual int PreferredSampleRate() const;
+  virtual void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame);
+  virtual void SetTransportOverhead(int transport_overhead_per_packet);
+  virtual void AssociateSendChannel(const ChannelProxy& send_channel_proxy);
+  virtual void DisassociateSendChannel();
+  virtual void GetRtpRtcp(RtpRtcp** rtp_rtcp,
+                          RtpReceiver** rtp_receiver) const;
+  virtual uint32_t GetPlayoutTimestamp() const;
+  virtual void SetMinimumPlayoutDelay(int delay_ms);
+  virtual void SetRtcpRttStats(RtcpRttStats* rtcp_rtt_stats);
+  virtual bool GetRecCodec(CodecInst* codec_inst) const;
+  virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
+  virtual void OnRecoverableUplinkPacketLossRate(
+      float recoverable_packet_loss_rate);
+  virtual std::vector<webrtc::RtpSource> GetSources() const;
+  virtual void StartSend();
+  virtual void StopSend();
+  virtual void StartPlayout();
+  virtual void StopPlayout();
+
+ private:
+  // Thread checkers document and lock usage of some methods on voe::Channel to
+  // specific threads we know about. The goal is to eventually split up
+  // voe::Channel into parts with single-threaded semantics, and thereby reduce
+  // the need for locks.
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker module_process_thread_checker_;
+  // Methods accessed from audio and video threads are checked for sequential-
+  // only access. We don't necessarily own and control these threads, so thread
+  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
+  // audio thread to another, but access is still sequential.
+  rtc::RaceChecker audio_thread_race_checker_;
+  rtc::RaceChecker video_capture_thread_race_checker_;
+  std::unique_ptr<Channel> channel_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ChannelProxy);
+};
+}  // namespace voe
+}  // namespace webrtc
+
+#endif  // AUDIO_CHANNEL_PROXY_H_
diff --git a/audio/conversion.h b/audio/conversion.h
new file mode 100644
index 0000000..920aa3a
--- /dev/null
+++ b/audio/conversion.h
@@ -0,0 +1,27 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CONVERSION_H_
+#define AUDIO_CONVERSION_H_
+
+namespace webrtc {
+
+// Convert fixed point number with 8 bit fractional part, to floating point.
+inline float Q8ToFloat(uint32_t v) {
+  return static_cast<float>(v) / (1 << 8);
+}
+
+// Convert fixed point number with 14 bit fractional part, to floating point.
+inline float Q14ToFloat(uint32_t v) {
+  return static_cast<float>(v) / (1 << 14);
+}
+}  // namespace webrtc
+
+#endif  // AUDIO_CONVERSION_H_
diff --git a/audio/mock_voe_channel_proxy.h b/audio/mock_voe_channel_proxy.h
new file mode 100644
index 0000000..2f6754e
--- /dev/null
+++ b/audio/mock_voe_channel_proxy.h
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_MOCK_VOE_CHANNEL_PROXY_H_
+#define AUDIO_MOCK_VOE_CHANNEL_PROXY_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "audio/channel_proxy.h"
+#include "modules/rtp_rtcp/source/rtp_packet_received.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockVoEChannelProxy : public voe::ChannelProxy {
+ public:
+  // GMock doesn't like move-only types, like std::unique_ptr.
+  virtual bool SetEncoder(int payload_type,
+                          std::unique_ptr<AudioEncoder> encoder) {
+    return SetEncoderForMock(payload_type, &encoder);
+  }
+  MOCK_METHOD2(SetEncoderForMock,
+               bool(int payload_type,
+                    std::unique_ptr<AudioEncoder>* encoder));
+  MOCK_METHOD1(
+      ModifyEncoder,
+      void(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier));
+  MOCK_METHOD1(SetRTCPStatus, void(bool enable));
+  MOCK_METHOD1(SetLocalSSRC, void(uint32_t ssrc));
+  MOCK_METHOD1(SetRTCP_CNAME, void(const std::string& c_name));
+  MOCK_METHOD2(SetNACKStatus, void(bool enable, int max_packets));
+  MOCK_METHOD2(SetSendAudioLevelIndicationStatus, void(bool enable, int id));
+  MOCK_METHOD1(EnableSendTransportSequenceNumber, void(int id));
+  MOCK_METHOD2(RegisterSenderCongestionControlObjects,
+               void(RtpTransportControllerSendInterface* transport,
+                    RtcpBandwidthObserver* bandwidth_observer));
+  MOCK_METHOD1(RegisterReceiverCongestionControlObjects,
+               void(PacketRouter* packet_router));
+  MOCK_METHOD0(ResetSenderCongestionControlObjects, void());
+  MOCK_METHOD0(ResetReceiverCongestionControlObjects, void());
+  MOCK_CONST_METHOD0(GetRTCPStatistics, CallStatistics());
+  MOCK_CONST_METHOD0(GetRemoteRTCPReportBlocks, std::vector<ReportBlock>());
+  MOCK_CONST_METHOD0(GetNetworkStatistics, NetworkStatistics());
+  MOCK_CONST_METHOD0(GetDecodingCallStatistics, AudioDecodingCallStats());
+  MOCK_CONST_METHOD0(GetANAStatistics, ANAStats());
+  MOCK_CONST_METHOD0(GetSpeechOutputLevelFullRange, int());
+  MOCK_CONST_METHOD0(GetTotalOutputEnergy, double());
+  MOCK_CONST_METHOD0(GetTotalOutputDuration, double());
+  MOCK_CONST_METHOD0(GetDelayEstimate, uint32_t());
+  MOCK_METHOD2(SetSendTelephoneEventPayloadType, bool(int payload_type,
+                                                      int payload_frequency));
+  MOCK_METHOD2(SendTelephoneEventOutband, bool(int event, int duration_ms));
+  MOCK_METHOD2(SetBitrate, void(int bitrate_bps, int64_t probing_interval_ms));
+  MOCK_METHOD1(SetSink, void(AudioSinkInterface* sink));
+  MOCK_METHOD1(SetInputMute, void(bool muted));
+  MOCK_METHOD1(RegisterTransport, void(Transport* transport));
+  MOCK_METHOD1(OnRtpPacket, void(const RtpPacketReceived& packet));
+  MOCK_METHOD2(ReceivedRTCPPacket, bool(const uint8_t* packet, size_t length));
+  MOCK_METHOD1(SetChannelOutputVolumeScaling, void(float scaling));
+  MOCK_METHOD1(SetRtcEventLog, void(RtcEventLog* event_log));
+  MOCK_METHOD1(SetRtcpRttStats, void(RtcpRttStats* rtcp_rtt_stats));
+  MOCK_METHOD2(GetAudioFrameWithInfo,
+      AudioMixer::Source::AudioFrameInfo(int sample_rate_hz,
+                                         AudioFrame* audio_frame));
+  MOCK_CONST_METHOD0(PreferredSampleRate, int());
+  // GMock doesn't like move-only types, like std::unique_ptr.
+  virtual void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame) {
+    ProcessAndEncodeAudioForMock(&audio_frame);
+  }
+  MOCK_METHOD1(ProcessAndEncodeAudioForMock,
+               void(std::unique_ptr<AudioFrame>* audio_frame));
+  MOCK_METHOD1(SetTransportOverhead, void(int transport_overhead_per_packet));
+  MOCK_METHOD1(AssociateSendChannel,
+               void(const ChannelProxy& send_channel_proxy));
+  MOCK_METHOD0(DisassociateSendChannel, void());
+  MOCK_CONST_METHOD2(GetRtpRtcp, void(RtpRtcp** rtp_rtcp,
+                                      RtpReceiver** rtp_receiver));
+  MOCK_CONST_METHOD0(GetPlayoutTimestamp, uint32_t());
+  MOCK_METHOD1(SetMinimumPlayoutDelay, void(int delay_ms));
+  MOCK_CONST_METHOD1(GetRecCodec, bool(CodecInst* codec_inst));
+  MOCK_METHOD1(SetReceiveCodecs,
+               void(const std::map<int, SdpAudioFormat>& codecs));
+  MOCK_METHOD1(OnTwccBasedUplinkPacketLossRate, void(float packet_loss_rate));
+  MOCK_METHOD1(OnRecoverableUplinkPacketLossRate,
+               void(float recoverable_packet_loss_rate));
+  MOCK_CONST_METHOD0(GetSources, std::vector<RtpSource>());
+  MOCK_METHOD0(StartSend, void());
+  MOCK_METHOD0(StopSend, void());
+  MOCK_METHOD0(StartPlayout, void());
+  MOCK_METHOD0(StopPlayout, void());
+};
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // AUDIO_MOCK_VOE_CHANNEL_PROXY_H_
diff --git a/audio/null_audio_poller.cc b/audio/null_audio_poller.cc
new file mode 100644
index 0000000..c22b3d8
--- /dev/null
+++ b/audio/null_audio_poller.cc
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/null_audio_poller.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+namespace internal {
+
+namespace {
+
+constexpr int64_t kPollDelayMs = 10;  // WebRTC uses 10ms by default
+
+constexpr size_t kNumChannels = 1;
+constexpr uint32_t kSamplesPerSecond = 48000;            // 48kHz
+constexpr size_t kNumSamples = kSamplesPerSecond / 100;  // 10ms of samples
+
+}  // namespace
+
+NullAudioPoller::NullAudioPoller(AudioTransport* audio_transport)
+    : audio_transport_(audio_transport),
+      reschedule_at_(rtc::TimeMillis() + kPollDelayMs) {
+  RTC_DCHECK(audio_transport);
+  OnMessage(nullptr);  // Start the poll loop.
+}
+
+NullAudioPoller::~NullAudioPoller() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  rtc::Thread::Current()->Clear(this);
+}
+
+void NullAudioPoller::OnMessage(rtc::Message* msg) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Buffer to hold the audio samples.
+  int16_t buffer[kNumSamples * kNumChannels];
+  // Output variables from |NeedMorePlayData|.
+  size_t n_samples;
+  int64_t elapsed_time_ms;
+  int64_t ntp_time_ms;
+  audio_transport_->NeedMorePlayData(kNumSamples, sizeof(int16_t), kNumChannels,
+                                     kSamplesPerSecond, buffer, n_samples,
+                                     &elapsed_time_ms, &ntp_time_ms);
+
+  // Reschedule the next poll iteration. If, for some reason, the given
+  // reschedule time has already passed, reschedule as soon as possible.
+  int64_t now = rtc::TimeMillis();
+  if (reschedule_at_ < now) {
+    reschedule_at_ = now;
+  }
+  rtc::Thread::Current()->PostAt(RTC_FROM_HERE, reschedule_at_, this, 0);
+
+  // Loop after next will be kPollDelayMs later.
+  reschedule_at_ += kPollDelayMs;
+}
+
+}  // namespace internal
+}  // namespace webrtc
diff --git a/audio/null_audio_poller.h b/audio/null_audio_poller.h
new file mode 100644
index 0000000..b6ddf17
--- /dev/null
+++ b/audio/null_audio_poller.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_NULL_AUDIO_POLLER_H_
+#define AUDIO_NULL_AUDIO_POLLER_H_
+
+#include "modules/audio_device/include/audio_device_defines.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+namespace internal {
+
+class NullAudioPoller final : public rtc::MessageHandler {
+ public:
+  explicit NullAudioPoller(AudioTransport* audio_transport);
+  ~NullAudioPoller();
+
+ protected:
+  void OnMessage(rtc::Message* msg) override;
+
+ private:
+  rtc::ThreadChecker thread_checker_;
+  AudioTransport* const audio_transport_;
+  int64_t reschedule_at_;
+};
+
+}  // namespace internal
+}  // namespace webrtc
+
+#endif  // AUDIO_NULL_AUDIO_POLLER_H_
diff --git a/audio/remix_resample.cc b/audio/remix_resample.cc
new file mode 100644
index 0000000..52a491f
--- /dev/null
+++ b/audio/remix_resample.cc
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/remix_resample.h"
+
+#include "audio/utility/audio_frame_operations.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace voe {
+
+void RemixAndResample(const AudioFrame& src_frame,
+                      PushResampler<int16_t>* resampler,
+                      AudioFrame* dst_frame) {
+  RemixAndResample(src_frame.data(), src_frame.samples_per_channel_,
+                   src_frame.num_channels_, src_frame.sample_rate_hz_,
+                   resampler, dst_frame);
+  dst_frame->timestamp_ = src_frame.timestamp_;
+  dst_frame->elapsed_time_ms_ = src_frame.elapsed_time_ms_;
+  dst_frame->ntp_time_ms_ = src_frame.ntp_time_ms_;
+}
+
+void RemixAndResample(const int16_t* src_data,
+                      size_t samples_per_channel,
+                      size_t num_channels,
+                      int sample_rate_hz,
+                      PushResampler<int16_t>* resampler,
+                      AudioFrame* dst_frame) {
+  const int16_t* audio_ptr = src_data;
+  size_t audio_ptr_num_channels = num_channels;
+  int16_t downmixed_audio[AudioFrame::kMaxDataSizeSamples];
+
+  // Downmix before resampling.
+  if (num_channels > dst_frame->num_channels_) {
+    RTC_DCHECK(num_channels == 2 || num_channels == 4)
+        << "num_channels: " << num_channels;
+    RTC_DCHECK(dst_frame->num_channels_ == 1 || dst_frame->num_channels_ == 2)
+        << "dst_frame->num_channels_: " << dst_frame->num_channels_;
+
+    AudioFrameOperations::DownmixChannels(
+        src_data, num_channels, samples_per_channel, dst_frame->num_channels_,
+        downmixed_audio);
+    audio_ptr = downmixed_audio;
+    audio_ptr_num_channels = dst_frame->num_channels_;
+  }
+
+  if (resampler->InitializeIfNeeded(sample_rate_hz, dst_frame->sample_rate_hz_,
+                                    audio_ptr_num_channels) == -1) {
+    FATAL() << "InitializeIfNeeded failed: sample_rate_hz = " << sample_rate_hz
+            << ", dst_frame->sample_rate_hz_ = " << dst_frame->sample_rate_hz_
+            << ", audio_ptr_num_channels = " << audio_ptr_num_channels;
+  }
+
+  // TODO(yujo): for muted input frames, don't resample. Either 1) allow
+  // resampler to return output length without doing the resample, so we know
+  // how much to zero here; or 2) make resampler accept a hint that the input is
+  // zeroed.
+  const size_t src_length = samples_per_channel * audio_ptr_num_channels;
+  int out_length = resampler->Resample(audio_ptr, src_length,
+                                       dst_frame->mutable_data(),
+                                       AudioFrame::kMaxDataSizeSamples);
+  if (out_length == -1) {
+    FATAL() << "Resample failed: audio_ptr = " << audio_ptr
+            << ", src_length = " << src_length
+            << ", dst_frame->mutable_data() = " << dst_frame->mutable_data();
+  }
+  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
+
+  // Upmix after resampling.
+  if (num_channels == 1 && dst_frame->num_channels_ == 2) {
+    // The audio in dst_frame really is mono at this point; MonoToStereo will
+    // set this back to stereo.
+    dst_frame->num_channels_ = 1;
+    AudioFrameOperations::MonoToStereo(dst_frame);
+  }
+}
+
+}  // namespace voe
+}  // namespace webrtc
diff --git a/audio/remix_resample.h b/audio/remix_resample.h
new file mode 100644
index 0000000..ddd8086
--- /dev/null
+++ b/audio/remix_resample.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_REMIX_RESAMPLE_H_
+#define AUDIO_REMIX_RESAMPLE_H_
+
+#include "common_audio/resampler/include/push_resampler.h"
+
+namespace webrtc {
+
+class AudioFrame;
+
+namespace voe {
+
+// Upmix or downmix and resample the audio to |dst_frame|. Expects |dst_frame|
+// to have its sample rate and channels members set to the desired values.
+// Updates the |samples_per_channel_| member accordingly.
+//
+// This version has an AudioFrame |src_frame| as input and sets the output
+// |timestamp_|, |elapsed_time_ms_| and |ntp_time_ms_| members equals to the
+// input ones.
+void RemixAndResample(const AudioFrame& src_frame,
+                      PushResampler<int16_t>* resampler,
+                      AudioFrame* dst_frame);
+
+// This version has a pointer to the samples |src_data| as input and receives
+// |samples_per_channel|, |num_channels| and |sample_rate_hz| of the data as
+// parameters.
+void RemixAndResample(const int16_t* src_data,
+                      size_t samples_per_channel,
+                      size_t num_channels,
+                      int sample_rate_hz,
+                      PushResampler<int16_t>* resampler,
+                      AudioFrame* dst_frame);
+
+}  // namespace voe
+}  // namespace webrtc
+
+#endif  // AUDIO_REMIX_RESAMPLE_H_
diff --git a/audio/remix_resample_unittest.cc b/audio/remix_resample_unittest.cc
new file mode 100644
index 0000000..753584b
--- /dev/null
+++ b/audio/remix_resample_unittest.cc
@@ -0,0 +1,275 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "audio/remix_resample.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/format_macros.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace voe {
+namespace {
+
+class UtilityTest : public ::testing::Test {
+ protected:
+  UtilityTest() {
+    src_frame_.sample_rate_hz_ = 16000;
+    src_frame_.samples_per_channel_ = src_frame_.sample_rate_hz_ / 100;
+    src_frame_.num_channels_ = 1;
+    dst_frame_.CopyFrom(src_frame_);
+    golden_frame_.CopyFrom(src_frame_);
+  }
+
+  void RunResampleTest(int src_channels,
+                       int src_sample_rate_hz,
+                       int dst_channels,
+                       int dst_sample_rate_hz);
+
+  PushResampler<int16_t> resampler_;
+  AudioFrame src_frame_;
+  AudioFrame dst_frame_;
+  AudioFrame golden_frame_;
+};
+
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(float data, int sample_rate_hz, AudioFrame* frame) {
+  frame->Mute();
+  frame->num_channels_ = 1;
+  frame->sample_rate_hz_ = sample_rate_hz;
+  frame->samples_per_channel_ = rtc::CheckedDivExact(sample_rate_hz, 100);
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+    frame_data[i] = static_cast<int16_t>(data * i);
+  }
+}
+
+// Keep the existing sample rate.
+void SetMonoFrame(float data, AudioFrame* frame) {
+  SetMonoFrame(data, frame->sample_rate_hz_, frame);
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(float left,
+                    float right,
+                    int sample_rate_hz,
+                    AudioFrame* frame) {
+  frame->Mute();
+  frame->num_channels_ = 2;
+  frame->sample_rate_hz_ = sample_rate_hz;
+  frame->samples_per_channel_ = rtc::CheckedDivExact(sample_rate_hz, 100);
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+    frame_data[i * 2] = static_cast<int16_t>(left * i);
+    frame_data[i * 2 + 1] = static_cast<int16_t>(right * i);
+  }
+}
+
+// Keep the existing sample rate.
+void SetStereoFrame(float left, float right, AudioFrame* frame) {
+  SetStereoFrame(left, right, frame->sample_rate_hz_, frame);
+}
+
+// Sets the signal value to increase by |ch1|, |ch2|, |ch3|, |ch4| with every
+// sample in each channel respectively.
+void SetQuadFrame(float ch1,
+                  float ch2,
+                  float ch3,
+                  float ch4,
+                  int sample_rate_hz,
+                  AudioFrame* frame) {
+  frame->Mute();
+  frame->num_channels_ = 4;
+  frame->sample_rate_hz_ = sample_rate_hz;
+  frame->samples_per_channel_ = rtc::CheckedDivExact(sample_rate_hz, 100);
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+    frame_data[i * 4] = static_cast<int16_t>(ch1 * i);
+    frame_data[i * 4 + 1] = static_cast<int16_t>(ch2 * i);
+    frame_data[i * 4 + 2] = static_cast<int16_t>(ch3 * i);
+    frame_data[i * 4 + 3] = static_cast<int16_t>(ch4 * i);
+  }
+}
+
+void VerifyParams(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
+  EXPECT_EQ(ref_frame.num_channels_, test_frame.num_channels_);
+  EXPECT_EQ(ref_frame.samples_per_channel_, test_frame.samples_per_channel_);
+  EXPECT_EQ(ref_frame.sample_rate_hz_, test_frame.sample_rate_hz_);
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for up to a |max_delay| in samples between the
+// signals to compensate for the resampling delay.
+float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame,
+                 size_t max_delay) {
+  VerifyParams(ref_frame, test_frame);
+  float best_snr = 0;
+  size_t best_delay = 0;
+  for (size_t delay = 0; delay <= max_delay; delay++) {
+    float mse = 0;
+    float variance = 0;
+    const int16_t* ref_frame_data = ref_frame.data();
+    const int16_t* test_frame_data = test_frame.data();
+    for (size_t i = 0; i < ref_frame.samples_per_channel_ *
+        ref_frame.num_channels_ - delay; i++) {
+      int error = ref_frame_data[i] - test_frame_data[i + delay];
+      mse += error * error;
+      variance += ref_frame_data[i] * ref_frame_data[i];
+    }
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%" PRIuS "\n", best_snr, best_delay);
+  return best_snr;
+}
+
+void VerifyFramesAreEqual(const AudioFrame& ref_frame,
+                          const AudioFrame& test_frame) {
+  VerifyParams(ref_frame, test_frame);
+  const int16_t* ref_frame_data = ref_frame.data();
+  const int16_t* test_frame_data  = test_frame.data();
+  for (size_t i = 0;
+       i < ref_frame.samples_per_channel_ * ref_frame.num_channels_; i++) {
+    EXPECT_EQ(ref_frame_data[i], test_frame_data[i]);
+  }
+}
+
+void UtilityTest::RunResampleTest(int src_channels,
+                                  int src_sample_rate_hz,
+                                  int dst_channels,
+                                  int dst_sample_rate_hz) {
+  PushResampler<int16_t> resampler;  // Create a new one with every test.
+  const int16_t kSrcCh1 = 30;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcCh2 = 15;
+  const int16_t kSrcCh3 = 22;
+  const int16_t kSrcCh4 = 8;
+  const float resampling_factor = (1.0 * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float dst_ch1 = resampling_factor * kSrcCh1;
+  const float dst_ch2 = resampling_factor * kSrcCh2;
+  const float dst_ch3 = resampling_factor * kSrcCh3;
+  const float dst_ch4 = resampling_factor * kSrcCh4;
+  const float dst_stereo_to_mono = (dst_ch1 + dst_ch2) / 2;
+  const float dst_quad_to_mono = (dst_ch1 + dst_ch2 + dst_ch3 + dst_ch4) / 4;
+  const float dst_quad_to_stereo_ch1 = (dst_ch1 + dst_ch2) / 2;
+  const float dst_quad_to_stereo_ch2 = (dst_ch3 + dst_ch4) / 2;
+  if (src_channels == 1)
+    SetMonoFrame(kSrcCh1, src_sample_rate_hz, &src_frame_);
+  else if (src_channels == 2)
+    SetStereoFrame(kSrcCh1, kSrcCh2, src_sample_rate_hz, &src_frame_);
+  else
+    SetQuadFrame(kSrcCh1, kSrcCh2, kSrcCh3, kSrcCh4, src_sample_rate_hz,
+                 &src_frame_);
+
+  if (dst_channels == 1) {
+    SetMonoFrame(0, dst_sample_rate_hz, &dst_frame_);
+    if (src_channels == 1)
+      SetMonoFrame(dst_ch1, dst_sample_rate_hz, &golden_frame_);
+    else if (src_channels == 2)
+      SetMonoFrame(dst_stereo_to_mono, dst_sample_rate_hz, &golden_frame_);
+    else
+      SetMonoFrame(dst_quad_to_mono, dst_sample_rate_hz, &golden_frame_);
+  } else {
+    SetStereoFrame(0, 0, dst_sample_rate_hz, &dst_frame_);
+    if (src_channels == 1)
+      SetStereoFrame(dst_ch1, dst_ch1, dst_sample_rate_hz, &golden_frame_);
+    else if (src_channels == 2)
+      SetStereoFrame(dst_ch1, dst_ch2, dst_sample_rate_hz, &golden_frame_);
+    else
+      SetStereoFrame(dst_quad_to_stereo_ch1, dst_quad_to_stereo_ch2,
+                     dst_sample_rate_hz, &golden_frame_);
+  }
+
+  // The sinc resampler has a known delay, which we compute here. Multiplying by
+  // two gives us a crude maximum for any resampling, as the old resampler
+  // typically (but not always) has lower delay.
+  static const size_t kInputKernelDelaySamples = 16;
+  const size_t max_delay = static_cast<size_t>(
+      static_cast<double>(dst_sample_rate_hz) / src_sample_rate_hz *
+      kInputKernelDelaySamples * dst_channels * 2);
+  printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
+      src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
+  RemixAndResample(src_frame_, &resampler, &dst_frame_);
+
+  if (src_sample_rate_hz == 96000 && dst_sample_rate_hz == 8000) {
+    // The sinc resampler gives poor SNR at this extreme conversion, but we
+    // expect to see this rarely in practice.
+    EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 14.0f);
+  } else {
+    EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 46.0f);
+  }
+}
+
+TEST_F(UtilityTest, RemixAndResampleCopyFrameSucceeds) {
+  // Stereo -> stereo.
+  SetStereoFrame(10, 10, &src_frame_);
+  SetStereoFrame(0, 0, &dst_frame_);
+  RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+  VerifyFramesAreEqual(src_frame_, dst_frame_);
+
+  // Mono -> mono.
+  SetMonoFrame(20, &src_frame_);
+  SetMonoFrame(0, &dst_frame_);
+  RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+  VerifyFramesAreEqual(src_frame_, dst_frame_);
+}
+
+TEST_F(UtilityTest, RemixAndResampleMixingOnlySucceeds) {
+  // Stereo -> mono.
+  SetStereoFrame(0, 0, &dst_frame_);
+  SetMonoFrame(10, &src_frame_);
+  SetStereoFrame(10, 10, &golden_frame_);
+  RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+  VerifyFramesAreEqual(dst_frame_, golden_frame_);
+
+  // Mono -> stereo.
+  SetMonoFrame(0, &dst_frame_);
+  SetStereoFrame(10, 20, &src_frame_);
+  SetMonoFrame(15, &golden_frame_);
+  RemixAndResample(src_frame_, &resampler_, &dst_frame_);
+  VerifyFramesAreEqual(golden_frame_, dst_frame_);
+}
+
+TEST_F(UtilityTest, RemixAndResampleSucceeds) {
+  const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000, 96000};
+  const int kSampleRatesSize = arraysize(kSampleRates);
+  const int kSrcChannels[] = {1, 2, 4};
+  const int kSrcChannelsSize = arraysize(kSrcChannels);
+  const int kDstChannels[] = {1, 2};
+  const int kDstChannelsSize = arraysize(kDstChannels);
+
+  for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+    for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+      for (int src_channel = 0; src_channel < kSrcChannelsSize;
+           src_channel++) {
+        for (int dst_channel = 0; dst_channel < kDstChannelsSize;
+             dst_channel++) {
+          RunResampleTest(kSrcChannels[src_channel], kSampleRates[src_rate],
+                          kDstChannels[dst_channel], kSampleRates[dst_rate]);
+        }
+      }
+    }
+  }
+}
+
+}  // namespace
+}  // namespace voe
+}  // namespace webrtc
diff --git a/audio/time_interval.cc b/audio/time_interval.cc
new file mode 100644
index 0000000..cc10340
--- /dev/null
+++ b/audio/time_interval.cc
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/time_interval.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+TimeInterval::TimeInterval() = default;
+TimeInterval::~TimeInterval() = default;
+
+void TimeInterval::Extend() {
+  Extend(rtc::TimeMillis());
+}
+
+void TimeInterval::Extend(int64_t time) {
+  if (!interval_) {
+    interval_.emplace(time, time);
+  } else {
+    if (time < interval_->first) {
+      interval_->first = time;
+    }
+    if (time > interval_->last) {
+      interval_->last = time;
+    }
+  }
+}
+
+void TimeInterval::Extend(const TimeInterval& other_interval) {
+  if (!other_interval.Empty()) {
+    Extend(other_interval.interval_->first);
+    Extend(other_interval.interval_->last);
+  }
+}
+
+bool TimeInterval::Empty() const {
+  return !interval_;
+}
+
+int64_t TimeInterval::Length() const {
+  RTC_DCHECK(interval_);
+  return interval_->last - interval_->first;
+}
+
+TimeInterval::Interval::Interval(int64_t first, int64_t last)
+    : first(first), last(last) {}
+
+}  // namespace webrtc
diff --git a/audio/time_interval.h b/audio/time_interval.h
new file mode 100644
index 0000000..88b2f7d
--- /dev/null
+++ b/audio/time_interval.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_TIME_INTERVAL_H_
+#define AUDIO_TIME_INTERVAL_H_
+
+#include <stdint.h>
+
+#include "api/optional.h"
+
+namespace webrtc {
+
+// This class logs the first and last time its Extend() function is called.
+//
+// This class is not thread-safe; Extend() calls should only be made by a
+// single thread at a time, such as within a lock or destructor.
+//
+// Example usage:
+//   // let x < y < z < u < v
+//   rtc::TimeInterval interval;
+//   ...  //   interval.Extend(); // at time x
+//   ...
+//   interval.Extend(); // at time y
+//   ...
+//   interval.Extend(); // at time u
+//   ...
+//   interval.Extend(z); // at time v
+//   ...
+//   if (!interval.Empty()) {
+//     int64_t active_time = interval.Length(); // returns (u - x)
+//   }
+class TimeInterval {
+ public:
+  TimeInterval();
+  ~TimeInterval();
+  // Extend the interval with the current time.
+  void Extend();
+  // Extend the interval with a given time.
+  void Extend(int64_t time);
+  // Take the convex hull with another interval.
+  void Extend(const TimeInterval& other_interval);
+  // True iff Extend has never been called.
+  bool Empty() const;
+  // Returns the time between the first and the last tick, in milliseconds.
+  int64_t Length() const;
+
+ private:
+  struct Interval {
+    Interval(int64_t first, int64_t last);
+
+    int64_t first, last;
+  };
+  rtc::Optional<Interval> interval_;
+};
+
+}  // namespace webrtc
+
+#endif  // AUDIO_TIME_INTERVAL_H_
diff --git a/audio/time_interval_unittest.cc b/audio/time_interval_unittest.cc
new file mode 100644
index 0000000..7f8b44e
--- /dev/null
+++ b/audio/time_interval_unittest.cc
@@ -0,0 +1,48 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/time_interval.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/timedelta.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(TimeIntervalTest, TimeInMs) {
+  rtc::ScopedFakeClock fake_clock;
+  TimeInterval interval;
+  interval.Extend();
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(100));
+  interval.Extend();
+  EXPECT_EQ(interval.Length(), 100);
+}
+
+TEST(TimeIntervalTest, Empty) {
+  TimeInterval interval;
+  EXPECT_TRUE(interval.Empty());
+  interval.Extend();
+  EXPECT_FALSE(interval.Empty());
+  interval.Extend(200);
+  EXPECT_FALSE(interval.Empty());
+}
+
+TEST(TimeIntervalTest, MonotoneIncreasing) {
+  const size_t point_count = 7;
+  const int64_t interval_points[] = {3, 2, 5, 0, 4, 1, 6};
+  const int64_t interval_differences[] = {0, 1, 3, 5, 5, 5, 6};
+  TimeInterval interval;
+  EXPECT_TRUE(interval.Empty());
+  for (size_t i = 0; i < point_count; ++i) {
+    interval.Extend(interval_points[i]);
+    EXPECT_EQ(interval_differences[i], interval.Length());
+  }
+}
+
+}  // namespace webrtc
diff --git a/audio/transport_feedback_packet_loss_tracker.cc b/audio/transport_feedback_packet_loss_tracker.cc
new file mode 100644
index 0000000..101b6b4
--- /dev/null
+++ b/audio/transport_feedback_packet_loss_tracker.cc
@@ -0,0 +1,366 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/transport_feedback_packet_loss_tracker.h"
+
+#include <limits>
+#include <utility>
+
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/mod_ops.h"
+
+namespace {
+constexpr uint16_t kSeqNumHalf = 0x8000u;
+void UpdateCounter(size_t* counter, bool increment) {
+  if (increment) {
+    RTC_DCHECK_LT(*counter, std::numeric_limits<std::size_t>::max());
+    ++(*counter);
+  } else {
+    RTC_DCHECK_GT(*counter, 0);
+    --(*counter);
+  }
+}
+}  // namespace
+
+namespace webrtc {
+
+TransportFeedbackPacketLossTracker::TransportFeedbackPacketLossTracker(
+    int64_t max_window_size_ms,
+    size_t plr_min_num_acked_packets,
+    size_t rplr_min_num_acked_pairs)
+    : max_window_size_ms_(max_window_size_ms),
+      ref_packet_status_(packet_status_window_.begin()),
+      plr_state_(plr_min_num_acked_packets),
+      rplr_state_(rplr_min_num_acked_pairs) {
+  RTC_DCHECK_GT(max_window_size_ms, 0);
+  RTC_DCHECK_GT(plr_min_num_acked_packets, 0);
+  RTC_DCHECK_GT(rplr_min_num_acked_pairs, 0);
+  Reset();
+}
+
+void TransportFeedbackPacketLossTracker::Reset() {
+  acked_packets_ = 0;
+  plr_state_.Reset();
+  rplr_state_.Reset();
+  packet_status_window_.clear();
+  ref_packet_status_ = packet_status_window_.begin();
+}
+
+uint16_t TransportFeedbackPacketLossTracker::ReferenceSequenceNumber() const {
+  RTC_DCHECK(!packet_status_window_.empty());
+  return ref_packet_status_->first;
+}
+
+uint16_t TransportFeedbackPacketLossTracker::NewestSequenceNumber() const {
+  RTC_DCHECK(!packet_status_window_.empty());
+  return PreviousPacketStatus(packet_status_window_.end())->first;
+}
+
+void TransportFeedbackPacketLossTracker::OnPacketAdded(uint16_t seq_num,
+                                                       int64_t send_time_ms) {
+  // Sanity - time can't flow backwards.
+  RTC_DCHECK(
+      packet_status_window_.empty() ||
+      PreviousPacketStatus(packet_status_window_.end())->second.send_time_ms <=
+          send_time_ms);
+
+  if (packet_status_window_.find(seq_num) != packet_status_window_.end() ||
+      (!packet_status_window_.empty() &&
+       ForwardDiff(seq_num, NewestSequenceNumber()) <= kSeqNumHalf)) {
+    // The only way for these two to happen is when the stream lies dormant for
+    // long enough for the sequence numbers to wrap. Everything in the window in
+    // such a case would be too old to use.
+    Reset();
+  }
+
+  // Maintain a window where the newest sequence number is at most 0x7fff away
+  // from the oldest, so that would could still distinguish old/new.
+  while (!packet_status_window_.empty() &&
+         ForwardDiff(ref_packet_status_->first, seq_num) >= kSeqNumHalf) {
+    RemoveOldestPacketStatus();
+  }
+
+  SentPacket sent_packet(send_time_ms, PacketStatus::Unacked);
+  packet_status_window_.insert(packet_status_window_.end(),
+                               std::make_pair(seq_num, sent_packet));
+
+  if (packet_status_window_.size() == 1) {
+    ref_packet_status_ = packet_status_window_.cbegin();
+  }
+}
+
+void TransportFeedbackPacketLossTracker::OnPacketFeedbackVector(
+    const std::vector<PacketFeedback>& packet_feedback_vector) {
+  for (const PacketFeedback& packet : packet_feedback_vector) {
+    const auto& it = packet_status_window_.find(packet.sequence_number);
+
+    // Packets which aren't at least marked as unacked either do not belong to
+    // this media stream, or have been shifted out of window.
+    if (it == packet_status_window_.end())
+      continue;
+
+    const bool lost = packet.arrival_time_ms == PacketFeedback::kNotReceived;
+    const PacketStatus packet_status =
+        lost ? PacketStatus::Lost : PacketStatus::Received;
+
+    UpdatePacketStatus(it, packet_status);
+  }
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::GetPacketLossRate() const {
+  return plr_state_.GetMetric();
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::GetRecoverablePacketLossRate() const {
+  return rplr_state_.GetMetric();
+}
+
+void TransportFeedbackPacketLossTracker::UpdatePacketStatus(
+    SentPacketStatusMap::iterator it,
+    PacketStatus new_status) {
+  if (it->second.status != PacketStatus::Unacked) {
+    // Normally, packets are sent (inserted into window as "unacked"), then we
+    // receive one feedback for them.
+    // But it is possible that a packet would receive two feedbacks. Then:
+    if (it->second.status == PacketStatus::Lost &&
+        new_status == PacketStatus::Received) {
+      // If older status said that the packet was lost but newer one says it
+      // is received, we take the newer one.
+      UpdateMetrics(it, false);
+      it->second.status =
+          PacketStatus::Unacked;  // For clarity; overwritten shortly.
+    } else {
+      // If the value is unchanged or if older status said that the packet was
+      // received but the newer one says it is lost, we ignore it.
+      // The standard allows for previously-reported packets to carry
+      // no report when the reports overlap, which also looks like the
+      // packet is being reported as lost.
+      return;
+    }
+  }
+
+  // Change from UNACKED to RECEIVED/LOST.
+  it->second.status = new_status;
+  UpdateMetrics(it, true);
+
+  // Remove packets from the beginning of the window until we only hold packets,
+  // be they acked or unacked, which are not more than |max_window_size_ms|
+  // older from the newest packet. (If the packet we're now inserting into the
+  // window isn't the newest, it would not trigger any removals; the newest
+  // already removed all relevant.)
+  while (ref_packet_status_ != packet_status_window_.end() &&
+         (it->second.send_time_ms - ref_packet_status_->second.send_time_ms) >
+             max_window_size_ms_) {
+    RemoveOldestPacketStatus();
+  }
+}
+
+void TransportFeedbackPacketLossTracker::RemoveOldestPacketStatus() {
+  UpdateMetrics(ref_packet_status_, false);
+  const auto it = ref_packet_status_;
+  ref_packet_status_ = NextPacketStatus(it);
+  packet_status_window_.erase(it);
+}
+
+void TransportFeedbackPacketLossTracker::UpdateMetrics(
+    ConstPacketStatusIterator it,
+    bool apply /* false = undo */) {
+  RTC_DCHECK(it != packet_status_window_.end());
+  // Metrics are dependent on feedbacks from the other side. We don't want
+  // to update the metrics each time a packet is sent, except for the case
+  // when it shifts old sent-but-unacked-packets out of window.
+  RTC_DCHECK(!apply || it->second.status != PacketStatus::Unacked);
+
+  if (it->second.status != PacketStatus::Unacked) {
+    UpdateCounter(&acked_packets_, apply);
+  }
+
+  UpdatePlr(it, apply);
+  UpdateRplr(it, apply);
+}
+
+void TransportFeedbackPacketLossTracker::UpdatePlr(
+    ConstPacketStatusIterator it,
+    bool apply /* false = undo */) {
+  switch (it->second.status) {
+    case PacketStatus::Unacked:
+      return;
+    case PacketStatus::Received:
+      UpdateCounter(&plr_state_.num_received_packets_, apply);
+      break;
+    case PacketStatus::Lost:
+      UpdateCounter(&plr_state_.num_lost_packets_, apply);
+      break;
+    default:
+      RTC_NOTREACHED();
+  }
+}
+
+void TransportFeedbackPacketLossTracker::UpdateRplr(
+    ConstPacketStatusIterator it,
+    bool apply /* false = undo */) {
+  if (it->second.status == PacketStatus::Unacked) {
+    // Unacked packets cannot compose a pair.
+    return;
+  }
+
+  // Previous packet and current packet might compose a pair.
+  if (it != ref_packet_status_) {
+    const auto& prev = PreviousPacketStatus(it);
+    if (prev->second.status != PacketStatus::Unacked) {
+      UpdateCounter(&rplr_state_.num_acked_pairs_, apply);
+      if (prev->second.status == PacketStatus::Lost &&
+          it->second.status == PacketStatus::Received) {
+        UpdateCounter(
+            &rplr_state_.num_recoverable_losses_, apply);
+      }
+    }
+  }
+
+  // Current packet and next packet might compose a pair.
+  const auto& next = NextPacketStatus(it);
+  if (next != packet_status_window_.end() &&
+      next->second.status != PacketStatus::Unacked) {
+    UpdateCounter(&rplr_state_.num_acked_pairs_, apply);
+    if (it->second.status == PacketStatus::Lost &&
+        next->second.status == PacketStatus::Received) {
+      UpdateCounter(&rplr_state_.num_recoverable_losses_, apply);
+    }
+  }
+}
+
+TransportFeedbackPacketLossTracker::ConstPacketStatusIterator
+TransportFeedbackPacketLossTracker::PreviousPacketStatus(
+    ConstPacketStatusIterator it) const {
+  RTC_DCHECK(it != ref_packet_status_);
+  if (it == packet_status_window_.end()) {
+    // This is to make PreviousPacketStatus(packet_status_window_.end()) point
+    // to the last element.
+    it = ref_packet_status_;
+  }
+
+  if (it == packet_status_window_.begin()) {
+    // Due to the circular nature of sequence numbers, we let the iterator
+    // go to the end.
+    it = packet_status_window_.end();
+  }
+  return --it;
+}
+
+TransportFeedbackPacketLossTracker::ConstPacketStatusIterator
+TransportFeedbackPacketLossTracker::NextPacketStatus(
+    ConstPacketStatusIterator it) const {
+  RTC_DCHECK(it != packet_status_window_.end());
+  ++it;
+  if (it == packet_status_window_.end()) {
+    // Due to the circular nature of sequence numbers, we let the iterator
+    // goes back to the beginning.
+    it = packet_status_window_.begin();
+  }
+  if (it == ref_packet_status_) {
+    // This is to make the NextPacketStatus of the last element to return the
+    // beyond-the-end iterator.
+    it = packet_status_window_.end();
+  }
+  return it;
+}
+
+// TODO(minyue): This method checks the states of this class do not misbehave.
+// The method is used both in unit tests and a fuzzer test. The fuzzer test
+// is present to help finding potential errors. Once the fuzzer test shows no
+// error after long period, we can remove the fuzzer test, and move this method
+// to unit test.
+void TransportFeedbackPacketLossTracker::Validate() const {  // Testing only!
+  RTC_CHECK_EQ(plr_state_.num_received_packets_ + plr_state_.num_lost_packets_,
+               acked_packets_);
+  RTC_CHECK_LE(acked_packets_, packet_status_window_.size());
+  RTC_CHECK_LE(rplr_state_.num_recoverable_losses_,
+               rplr_state_.num_acked_pairs_);
+  RTC_CHECK_LE(rplr_state_.num_acked_pairs_, acked_packets_ - 1);
+
+  size_t unacked_packets = 0;
+  size_t received_packets = 0;
+  size_t lost_packets = 0;
+  size_t acked_pairs = 0;
+  size_t recoverable_losses = 0;
+
+  if (!packet_status_window_.empty()) {
+    ConstPacketStatusIterator it = ref_packet_status_;
+    do {
+      switch (it->second.status) {
+        case PacketStatus::Unacked:
+          ++unacked_packets;
+          break;
+        case PacketStatus::Received:
+          ++received_packets;
+          break;
+        case PacketStatus::Lost:
+          ++lost_packets;
+          break;
+        default:
+          RTC_NOTREACHED();
+      }
+
+      auto next = std::next(it);
+      if (next == packet_status_window_.end())
+        next = packet_status_window_.begin();
+
+      if (next != ref_packet_status_) {  // If we have a next packet...
+        RTC_CHECK_GE(next->second.send_time_ms, it->second.send_time_ms);
+
+        if (it->second.status != PacketStatus::Unacked &&
+            next->second.status != PacketStatus::Unacked) {
+          ++acked_pairs;
+          if (it->second.status == PacketStatus::Lost &&
+              next->second.status == PacketStatus::Received) {
+            ++recoverable_losses;
+          }
+        }
+      }
+
+      RTC_CHECK_LT(ForwardDiff(ReferenceSequenceNumber(), it->first),
+                   kSeqNumHalf);
+
+      it = next;
+    } while (it != ref_packet_status_);
+  }
+
+  RTC_CHECK_EQ(plr_state_.num_received_packets_, received_packets);
+  RTC_CHECK_EQ(plr_state_.num_lost_packets_, lost_packets);
+  RTC_CHECK_EQ(packet_status_window_.size(),
+               unacked_packets + received_packets + lost_packets);
+  RTC_CHECK_EQ(rplr_state_.num_acked_pairs_, acked_pairs);
+  RTC_CHECK_EQ(rplr_state_.num_recoverable_losses_, recoverable_losses);
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::PlrState::GetMetric() const {
+  const size_t total = num_lost_packets_ + num_received_packets_;
+  if (total < min_num_acked_packets_) {
+    return rtc::nullopt;
+  } else {
+    return static_cast<float>(num_lost_packets_) / total;
+  }
+}
+
+rtc::Optional<float>
+TransportFeedbackPacketLossTracker::RplrState::GetMetric() const {
+  if (num_acked_pairs_ < min_num_acked_pairs_) {
+    return rtc::nullopt;
+  } else {
+    return static_cast<float>(num_recoverable_losses_) / num_acked_pairs_;
+  }
+}
+
+}  // namespace webrtc
diff --git a/audio/transport_feedback_packet_loss_tracker.h b/audio/transport_feedback_packet_loss_tracker.h
new file mode 100644
index 0000000..7e73210
--- /dev/null
+++ b/audio/transport_feedback_packet_loss_tracker.h
@@ -0,0 +1,142 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_TRANSPORT_FEEDBACK_PACKET_LOSS_TRACKER_H_
+#define AUDIO_TRANSPORT_FEEDBACK_PACKET_LOSS_TRACKER_H_
+
+#include <map>
+#include <vector>
+
+#include "api/optional.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+namespace rtcp {
+class TransportFeedback;
+}
+
+struct PacketFeedback;
+
+class TransportFeedbackPacketLossTracker final {
+ public:
+  // * We count up to |max_window_size_ms| from the sent
+  //   time of the latest acked packet for the calculation of the metrics.
+  // * PLR (packet-loss-rate) is reliably computable once the statuses of
+  //   |plr_min_num_acked_packets| packets are known.
+  // * RPLR (recoverable-packet-loss-rate) is reliably computable once the
+  //   statuses of |rplr_min_num_acked_pairs| pairs are known.
+  TransportFeedbackPacketLossTracker(int64_t max_window_size_ms,
+                                     size_t plr_min_num_acked_packets,
+                                     size_t rplr_min_num_acked_pairs);
+
+  void OnPacketAdded(uint16_t seq_num, int64_t send_time_ms);
+
+  void OnPacketFeedbackVector(
+      const std::vector<PacketFeedback>& packet_feedbacks_vector);
+
+  // Returns the packet loss rate, if the window has enough packet statuses to
+  // reliably compute it. Otherwise, returns empty.
+  rtc::Optional<float> GetPacketLossRate() const;
+
+  // Returns the first-order-FEC recoverable packet loss rate, if the window has
+  // enough status pairs to reliably compute it. Otherwise, returns empty.
+  rtc::Optional<float> GetRecoverablePacketLossRate() const;
+
+  // Verifies that the internal states are correct. Only used for tests.
+  void Validate() const;
+
+ private:
+  // When a packet is sent, we memorize its association with the stream by
+  // marking it as (sent-but-so-far-) unacked. If we ever receive a feedback
+  // that reports it as received/lost, we update the state and
+  // metrics accordingly.
+
+  enum class PacketStatus { Unacked = 0, Received = 1, Lost = 2 };
+  struct SentPacket {
+    SentPacket(int64_t send_time_ms, PacketStatus status)
+        : send_time_ms(send_time_ms), status(status) {}
+    int64_t send_time_ms;
+    PacketStatus status;
+  };
+  typedef std::map<uint16_t, SentPacket> SentPacketStatusMap;
+  typedef SentPacketStatusMap::const_iterator ConstPacketStatusIterator;
+
+  void Reset();
+
+  // ReferenceSequenceNumber() provides a sequence number that defines the
+  // order of packet reception info stored in |packet_status_window_|. In
+  // particular, given any sequence number |x|,
+  // (2^16 + x - ref_seq_num_) % 2^16 defines its actual position in
+  // |packet_status_window_|.
+  uint16_t ReferenceSequenceNumber() const;
+  uint16_t NewestSequenceNumber() const;
+  void UpdatePacketStatus(SentPacketStatusMap::iterator it,
+                          PacketStatus new_status);
+  void RemoveOldestPacketStatus();
+
+  void UpdateMetrics(ConstPacketStatusIterator it,
+                     bool apply /* false = undo */);
+  void UpdatePlr(ConstPacketStatusIterator it, bool apply /* false = undo */);
+  void UpdateRplr(ConstPacketStatusIterator it, bool apply /* false = undo */);
+
+  ConstPacketStatusIterator PreviousPacketStatus(
+      ConstPacketStatusIterator it) const;
+  ConstPacketStatusIterator NextPacketStatus(
+      ConstPacketStatusIterator it) const;
+
+  const int64_t max_window_size_ms_;
+  size_t acked_packets_;
+
+  SentPacketStatusMap packet_status_window_;
+  // |ref_packet_status_| points to the oldest item in |packet_status_window_|.
+  ConstPacketStatusIterator ref_packet_status_;
+
+  // Packet-loss-rate calculation (lost / all-known-packets).
+  struct PlrState {
+    explicit PlrState(size_t min_num_acked_packets)
+        : min_num_acked_packets_(min_num_acked_packets) {
+      Reset();
+    }
+    void Reset() {
+      num_received_packets_ = 0;
+      num_lost_packets_ = 0;
+    }
+    rtc::Optional<float> GetMetric() const;
+    const size_t min_num_acked_packets_;
+    size_t num_received_packets_;
+    size_t num_lost_packets_;
+  } plr_state_;
+
+  // Recoverable packet loss calculation (first-order-FEC recoverable).
+  struct RplrState {
+    explicit RplrState(size_t min_num_acked_pairs)
+        : min_num_acked_pairs_(min_num_acked_pairs) {
+      Reset();
+    }
+    void Reset() {
+      num_acked_pairs_ = 0;
+      num_recoverable_losses_ = 0;
+    }
+    rtc::Optional<float> GetMetric() const;
+    // Recoverable packets are those which were lost, but immediately followed
+    // by a properly received packet. If that second packet carried FEC,
+    // the data from the former (lost) packet could be recovered.
+    // The RPLR is calculated as the fraction of such pairs (lost-received) out
+    // of all pairs of consecutive acked packets.
+    const size_t min_num_acked_pairs_;
+    size_t num_acked_pairs_;
+    size_t num_recoverable_losses_;
+  } rplr_state_;
+};
+
+}  // namespace webrtc
+
+#endif  // AUDIO_TRANSPORT_FEEDBACK_PACKET_LOSS_TRACKER_H_
diff --git a/audio/transport_feedback_packet_loss_tracker_unittest.cc b/audio/transport_feedback_packet_loss_tracker_unittest.cc
new file mode 100644
index 0000000..8f8fe05
--- /dev/null
+++ b/audio/transport_feedback_packet_loss_tracker_unittest.cc
@@ -0,0 +1,574 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "audio/transport_feedback_packet_loss_tracker.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "rtc_base/checks.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int64_t kDefaultSendIntervalMs = 10;
+constexpr int64_t kDefaultMaxWindowSizeMs = 500 * kDefaultSendIntervalMs;
+
+class TransportFeedbackPacketLossTrackerTest
+    : public ::testing::TestWithParam<uint16_t> {
+ public:
+  TransportFeedbackPacketLossTrackerTest() = default;
+  virtual ~TransportFeedbackPacketLossTrackerTest() = default;
+
+ protected:
+  void SendPackets(TransportFeedbackPacketLossTracker* tracker,
+                   const std::vector<uint16_t>& sequence_numbers,
+                   int64_t send_time_interval_ms,
+                   bool validate_all = true) {
+    RTC_CHECK_GE(send_time_interval_ms, 0);
+    for (uint16_t sequence_number : sequence_numbers) {
+      tracker->OnPacketAdded(sequence_number, time_ms_);
+      if (validate_all) {
+        tracker->Validate();
+      }
+      time_ms_ += send_time_interval_ms;
+    }
+
+    // We've either validated after each packet, or, for making sure the UT
+    // doesn't run too long, we might validate only at the end of the range.
+    if (!validate_all) {
+      tracker->Validate();
+    }
+  }
+
+  void SendPackets(TransportFeedbackPacketLossTracker* tracker,
+                   uint16_t first_seq_num,
+                   size_t num_of_packets,
+                   int64_t send_time_interval_ms,
+                   bool validate_all = true) {
+    RTC_CHECK_GE(send_time_interval_ms, 0);
+    std::vector<uint16_t> sequence_numbers(num_of_packets);
+    std::iota(sequence_numbers.begin(), sequence_numbers.end(), first_seq_num);
+    SendPackets(tracker, sequence_numbers, send_time_interval_ms, validate_all);
+  }
+
+  void AdvanceClock(int64_t time_delta_ms) {
+    RTC_CHECK_GT(time_delta_ms, 0);
+    time_ms_ += time_delta_ms;
+  }
+
+  void AddTransportFeedbackAndValidate(
+      TransportFeedbackPacketLossTracker* tracker,
+      uint16_t base_sequence_num,
+      const std::vector<bool>& reception_status_vec) {
+    // Any positive integer signals reception. kNotReceived signals loss.
+    // Other values are just illegal.
+    constexpr int64_t kArrivalTimeMs = 1234;
+
+    std::vector<PacketFeedback> packet_feedback_vector;
+    uint16_t seq_num = base_sequence_num;
+    for (bool received : reception_status_vec) {
+      packet_feedback_vector.emplace_back(PacketFeedback(
+          received ? kArrivalTimeMs : PacketFeedback::kNotReceived, seq_num));
+      ++seq_num;
+    }
+
+    tracker->OnPacketFeedbackVector(packet_feedback_vector);
+    tracker->Validate();
+  }
+
+  // Checks that validty is as expected. If valid, checks also that
+  // value is as expected.
+  void ValidatePacketLossStatistics(
+      const TransportFeedbackPacketLossTracker& tracker,
+      rtc::Optional<float> expected_plr,
+      rtc::Optional<float> expected_rplr) {
+    // TODO(eladalon): Comparing the rtc::Optional<float> directly would have
+    // given concise code, but less readable error messages. If we modify
+    // the way rtc::Optional is printed, we can get rid of this.
+    rtc::Optional<float> plr = tracker.GetPacketLossRate();
+    EXPECT_EQ(static_cast<bool>(expected_plr), static_cast<bool>(plr));
+    if (expected_plr && plr) {
+      EXPECT_EQ(*expected_plr, *plr);
+    }
+
+    rtc::Optional<float> rplr = tracker.GetRecoverablePacketLossRate();
+    EXPECT_EQ(static_cast<bool>(expected_rplr), static_cast<bool>(rplr));
+    if (expected_rplr && rplr) {
+      EXPECT_EQ(*expected_rplr, *rplr);
+    }
+  }
+
+  uint16_t base_{GetParam()};
+
+ private:
+  int64_t time_ms_{0};
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(TransportFeedbackPacketLossTrackerTest);
+};
+
+}  // namespace
+
+// Sanity check on an empty window.
+TEST_P(TransportFeedbackPacketLossTrackerTest, EmptyWindow) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 5);
+
+  // PLR and RPLR reported as unknown before reception of first feedback.
+  ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+// A feedback received for an empty window has no effect.
+TEST_P(TransportFeedbackPacketLossTrackerTest, EmptyWindowFeedback) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 3, 2);
+
+  // Feedback doesn't correspond to any packets - ignored.
+  AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+  ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+
+  // After the packets are transmitted, acking them would have an effect.
+  SendPackets(&tracker, base_, 3, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+  ValidatePacketLossStatistics(tracker, 1.0f / 3.0f, 0.5f);
+}
+
+// Sanity check on partially filled window.
+TEST_P(TransportFeedbackPacketLossTrackerTest, PartiallyFilledWindow) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+  // PLR unknown before minimum window size reached.
+  // RPLR unknown before minimum pairs reached.
+  // Expected window contents: [] -> [1001].
+  SendPackets(&tracker, base_, 3, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_, {true, false, false, true});
+  ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+// Sanity check on minimum filled window - PLR known, RPLR unknown.
+TEST_P(TransportFeedbackPacketLossTrackerTest, PlrMinimumFilledWindow) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 5);
+
+  // PLR correctly calculated after minimum window size reached.
+  // RPLR not necessarily known at that time (not if min-pairs not reached).
+  // Expected window contents: [] -> [10011].
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, rtc::nullopt);
+}
+
+// Sanity check on minimum filled window - PLR unknown, RPLR known.
+TEST_P(TransportFeedbackPacketLossTrackerTest, RplrMinimumFilledWindow) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 6, 4);
+
+  // RPLR correctly calculated after minimum pairs reached.
+  // PLR not necessarily known at that time (not if min window not reached).
+  // Expected window contents: [] -> [10011].
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, rtc::nullopt, 1.0f / 4.0f);
+}
+
+// If packets are sent close enough together that the clock reading for both
+// is the same, that's handled properly.
+TEST_P(TransportFeedbackPacketLossTrackerTest, SameSentTime) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 3, 2);
+
+  // Expected window contents: [] -> [101].
+  SendPackets(&tracker, base_, 3, 0);  // Note: time interval = 0ms.
+  AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+
+  ValidatePacketLossStatistics(tracker, 1.0f / 3.0f, 0.5f);
+}
+
+// Additional reports update PLR and RPLR.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ExtendWindow) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 5);
+
+  SendPackets(&tracker, base_, 25, kDefaultSendIntervalMs);
+
+  // Expected window contents: [] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, rtc::nullopt);
+
+  // Expected window contents: [10011] -> [1001110101].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 5,
+                                  {true, false, true, false, true});
+  ValidatePacketLossStatistics(tracker, 4.0f / 10.0f, 3.0f / 9.0f);
+
+  // Expected window contents: [1001110101] -> [1001110101-GAP-10001].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 20,
+                                  {true, false, false, false, true});
+  ValidatePacketLossStatistics(tracker, 7.0f / 15.0f, 4.0f / 13.0f);
+}
+
+// Correct calculation with different packet lengths.
+TEST_P(TransportFeedbackPacketLossTrackerTest, DifferentSentIntervals) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+  int64_t frames[] = {20, 60, 120, 20, 60};
+  for (size_t i = 0; i < sizeof(frames) / sizeof(frames[0]); i++) {
+    SendPackets(&tracker, {static_cast<uint16_t>(base_ + i)}, frames[i]);
+  }
+
+  // Expected window contents: [] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// The window retains information up to sent times that exceed the the max
+// window size. The oldest packets get shifted out of window to make room
+// for the newer ones.
+TEST_P(TransportFeedbackPacketLossTrackerTest, MaxWindowSize) {
+  TransportFeedbackPacketLossTracker tracker(4 * kDefaultSendIntervalMs, 5, 1);
+
+  SendPackets(&tracker, base_, 6, kDefaultSendIntervalMs, true);
+
+  // Up to the maximum time-span retained (first + 4 * kDefaultSendIntervalMs).
+  // Expected window contents: [] -> [01001].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {false, true, false, false, true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 5.0f, 2.0f / 4.0f);
+
+  // After the maximum time-span, older entries are discarded to accommodate
+  // newer ones.
+  // Expected window contents: [01001] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 5, {true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// All packets received.
+TEST_P(TransportFeedbackPacketLossTrackerTest, AllReceived) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+  // Expected window contents: [] -> [11111].
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, true, true, true, true});
+  ValidatePacketLossStatistics(tracker, 0.0f, 0.0f);
+}
+
+// All packets lost.
+TEST_P(TransportFeedbackPacketLossTrackerTest, AllLost) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+  // Note: The last packet in the feedback does not belong to the stream.
+  // It's only there because we're not allowed to end a feedback with a loss.
+  // Expected window contents: [] -> [00000].
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {false, false, false, false, false, true});
+  ValidatePacketLossStatistics(tracker, 1.0f, 0.0f);
+}
+
+// Repeated reports are ignored.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ReportRepetition) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+
+  // Expected window contents: [] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+  // Repeat entire previous feedback
+  // Expected window contents: [10011] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// Report overlap.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ReportOverlap) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 1);
+
+  SendPackets(&tracker, base_, 15, kDefaultSendIntervalMs);
+
+  // Expected window contents: [] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+  // Expected window contents: [10011] -> [1001101].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 3,
+                                  {true, true, false, true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 7.0f, 2.0f / 6.0f);
+}
+
+// Report conflict.
+TEST_P(TransportFeedbackPacketLossTrackerTest, ReportConflict) {
+  TransportFeedbackPacketLossTracker tracker(kDefaultMaxWindowSizeMs, 5, 4);
+
+  SendPackets(&tracker, base_, 15, 10);
+
+  // Expected window contents: [] -> [01001].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {false, true, false, false, true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 5.0f, 2.0f / 4.0f);
+
+  // Expected window contents: [01001] -> [11101].
+  // While false->true will be applied, true -> false will be ignored.
+  AddTransportFeedbackAndValidate(&tracker, base_, {true, false, true});
+  ValidatePacketLossStatistics(tracker, 1.0f / 5.0f, 1.0f / 4.0f);
+}
+
+// Skipped packets treated as unknown (not lost).
+TEST_P(TransportFeedbackPacketLossTrackerTest, SkippedPackets) {
+  TransportFeedbackPacketLossTracker tracker(200 * kDefaultSendIntervalMs, 5,
+                                             1);
+
+  SendPackets(&tracker, base_, 200, kDefaultSendIntervalMs);
+
+  // Expected window contents: [] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+  // Expected window contents: [10011] -> [10011-GAP-101].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 100, {true, false, true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 8.0f, 2.0f / 6.0f);
+}
+
+// Moving a window, if it excludes some old acked messages, can leave
+// in-window unacked messages intact, and ready to be used later.
+TEST_P(TransportFeedbackPacketLossTrackerTest, MovedWindowRetainsRelevantInfo) {
+  constexpr int64_t max_window_size_ms = 100;
+  TransportFeedbackPacketLossTracker tracker(max_window_size_ms, 5, 1);
+
+  // Note: All messages in this test are sent 1ms apart from each other.
+  // Therefore, the delta in sequence numbers equals the timestamps delta.
+  SendPackets(&tracker, base_, 4 * max_window_size_ms, 1);
+
+  // Expected window contents: [] -> [10101].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, true, false, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+  // Expected window contents: [10101] -> [100011].
+  const int64_t moved_oldest_acked = base_ + 2 * max_window_size_ms;
+  const std::vector<bool> feedback = {true, false, false, false, true, true};
+  AddTransportFeedbackAndValidate(&tracker, moved_oldest_acked, feedback);
+  ValidatePacketLossStatistics(tracker, 3.0f / 6.0f, 1.0f / 5.0f);
+
+  // Having acked |feedback.size()| starting with |moved_oldest_acked|, the
+  // newest of the acked ones is now:
+  const int64_t moved_newest_acked = moved_oldest_acked + feedback.size() - 1;
+
+  // Messages that *are* more than the span-limit away from the newest
+  // acked message *are* too old. Acking them would have no effect.
+  AddTransportFeedbackAndValidate(
+      &tracker, moved_newest_acked - max_window_size_ms - 1, {true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 6.0f, 1.0f / 5.0f);
+
+  // Messages that are *not* more than the span-limit away from the newest
+  // acked message are *not* too old. Acking them would have an effect.
+  AddTransportFeedbackAndValidate(
+      &tracker, moved_newest_acked - max_window_size_ms, {true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 7.0f, 1.0f / 5.0f);
+}
+
+// Inserting feedback into the middle of a window works correctly - can
+// complete two pairs.
+TEST_P(TransportFeedbackPacketLossTrackerTest, InsertionCompletesTwoPairs) {
+  TransportFeedbackPacketLossTracker tracker(150 * kDefaultSendIntervalMs, 5,
+                                             1);
+
+  SendPackets(&tracker, base_, 15, kDefaultSendIntervalMs);
+
+  // Expected window contents: [] -> [10111].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, true, true, true});
+  ValidatePacketLossStatistics(tracker, 1.0f / 5.0f, 1.0f / 4.0f);
+
+  // Expected window contents: [10111] -> [10111-GAP-10101].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 7,
+                                  {true, false, true, false, true});
+  ValidatePacketLossStatistics(tracker, 3.0f / 10.0f, 3.0f / 8.0f);
+
+  // Insert in between, closing the gap completely.
+  // Expected window contents: [10111-GAP-10101] -> [101110110101].
+  AddTransportFeedbackAndValidate(&tracker, base_ + 5, {false, true});
+  ValidatePacketLossStatistics(tracker, 4.0f / 12.0f, 4.0f / 11.0f);
+}
+
+// Sequence number gaps are not gaps in reception. However, gaps in reception
+// are still possible, if a packet which WAS sent on the stream is not acked.
+TEST_P(TransportFeedbackPacketLossTrackerTest, SanityGapsInSequenceNumbers) {
+  TransportFeedbackPacketLossTracker tracker(50 * kDefaultSendIntervalMs, 5, 1);
+
+  SendPackets(&tracker,
+              {static_cast<uint16_t>(base_),
+               static_cast<uint16_t>(base_ + 2),
+               static_cast<uint16_t>(base_ + 4),
+               static_cast<uint16_t>(base_ + 6),
+               static_cast<uint16_t>(base_ + 8)},
+              kDefaultSendIntervalMs);
+
+  // Gaps in sequence numbers not considered as gaps in window, because  only
+  // those sequence numbers which were associated with the stream count.
+  // Expected window contents: [] -> [11011].
+  AddTransportFeedbackAndValidate(
+      // Note: Left packets belong to this stream, right ones ignored.
+      &tracker, base_, {true, false,
+                        true, false,
+                        false, false,
+                        true, false,
+                        true, true});
+  ValidatePacketLossStatistics(tracker, 1.0f / 5.0f, 1.0f / 4.0f);
+
+  // Create gap by sending [base + 10] but not acking it.
+  // Note: Acks for [base + 11] and [base + 13] ignored (other stream).
+  // Expected window contents: [11011] -> [11011-GAP-01].
+  SendPackets(&tracker,
+              {static_cast<uint16_t>(base_ + 10),
+               static_cast<uint16_t>(base_ + 12),
+               static_cast<uint16_t>(base_ + 14)},
+              kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_ + 11,
+                                  {false, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 7.0f, 2.0f / 5.0f);
+}
+
+// The window cannot span more than 0x8000 in sequence numbers, regardless
+// of time stamps and ack/unacked status.
+TEST_P(TransportFeedbackPacketLossTrackerTest, MaxUnackedPackets) {
+  TransportFeedbackPacketLossTracker tracker(0x10000, 4, 1);
+
+  SendPackets(&tracker, base_, 0x2000, 1, false);
+
+  // Expected window contents: [] -> [10011].
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {true, false, false, true, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+  // Sending more unacked packets, up to 0x7fff from the base, does not
+  // move the window or discard any information.
+  SendPackets(&tracker, static_cast<uint16_t>(base_ + 0x8000 - 0x2000), 0x2000,
+              1, false);
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 1.0f / 4.0f);
+
+  // Sending more unacked packets, up to 0x7fff from the base, does not
+  // move the window or discard any information.
+  // Expected window contents: [10011] -> [0011].
+  SendPackets(&tracker, static_cast<uint16_t>(base_ + 0x8000), 1, 1);
+  ValidatePacketLossStatistics(tracker, 2.0f / 4.0f, 1.0f / 3.0f);
+}
+
+// The window holds acked packets up until the difference in timestamps between
+// the oldest and newest reaches the configured maximum. Once this maximum
+// is exceeded, old packets are shifted out of window until the maximum is
+// once again observed.
+TEST_P(TransportFeedbackPacketLossTrackerTest, TimeDifferenceMaximumObserved) {
+  constexpr int64_t max_window_size_ms = 500;
+  TransportFeedbackPacketLossTracker tracker(max_window_size_ms, 3, 1);
+
+  // Note: All messages in this test are sent 1ms apart from each other.
+  // Therefore, the delta in sequence numbers equals the timestamps delta.
+
+  // Baseline - window has acked messages.
+  // Expected window contents: [] -> [01101].
+  const std::vector<bool> feedback = {false, true, true, false, true};
+  SendPackets(&tracker, base_, feedback.size(), 1);
+  AddTransportFeedbackAndValidate(&tracker, base_, feedback);
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+  // Test - window base not moved.
+  // Expected window contents: [01101] -> [011011].
+  AdvanceClock(max_window_size_ms - feedback.size());
+  SendPackets(&tracker, static_cast<uint16_t>(base_ + feedback.size()), 1, 1);
+  AddTransportFeedbackAndValidate(
+      &tracker, static_cast<uint16_t>(base_ + feedback.size()), {true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 6.0f, 2.0f / 5.0f);
+
+  // Another packet, sent 1ms later, would already be too late. The window will
+  // be moved, but only after the ACK is received.
+  const uint16_t new_packet_seq_num =
+      static_cast<uint16_t>(base_ + feedback.size() + 1);
+  SendPackets(&tracker, {new_packet_seq_num}, 1);
+  ValidatePacketLossStatistics(tracker, 2.0f / 6.0f, 2.0f / 5.0f);
+  // Expected window contents: [011011] -> [110111].
+  AddTransportFeedbackAndValidate(&tracker, new_packet_seq_num, {true});
+  ValidatePacketLossStatistics(tracker, 1.0f / 6.0f, 1.0f / 5.0f);
+}
+
+TEST_P(TransportFeedbackPacketLossTrackerTest, RepeatedSeqNumResetsWindow) {
+  TransportFeedbackPacketLossTracker tracker(50 * kDefaultSendIntervalMs, 2, 1);
+
+  // Baseline - window has acked messages.
+  // Expected window contents: [] -> [01101].
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {false, true, true, false, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+  // A reset occurs.
+  SendPackets(&tracker, {static_cast<uint16_t>(base_ + 2)},
+              kDefaultSendIntervalMs);
+  ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+// The window is reset by the sending of a packet which is 0x8000 or more
+// away from the newest packet acked/unacked packet.
+TEST_P(TransportFeedbackPacketLossTrackerTest,
+       SendAfterLongSuspensionResetsWindow) {
+  TransportFeedbackPacketLossTracker tracker(50 * kDefaultSendIntervalMs, 2, 1);
+
+  // Baseline - window has acked messages.
+  // Expected window contents: [] -> [01101].
+  SendPackets(&tracker, base_, 5, kDefaultSendIntervalMs);
+  AddTransportFeedbackAndValidate(&tracker, base_,
+                                  {false, true, true, false, true});
+  ValidatePacketLossStatistics(tracker, 2.0f / 5.0f, 2.0f / 4.0f);
+
+  // A reset occurs.
+  SendPackets(&tracker, {static_cast<uint16_t>(base_ + 5 + 0x8000)},
+              kDefaultSendIntervalMs);
+  ValidatePacketLossStatistics(tracker, rtc::nullopt, rtc::nullopt);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(TransportFeedbackPacketLossTrackerTest, InvalidConfigMaxWindowSize) {
+  EXPECT_DEATH(TransportFeedbackPacketLossTracker tracker(0, 20, 10), "");
+}
+
+TEST(TransportFeedbackPacketLossTrackerTest, InvalidConfigPlrMinAcked) {
+  EXPECT_DEATH(TransportFeedbackPacketLossTracker tracker(5000, 0, 10), "");
+}
+
+TEST(TransportFeedbackPacketLossTrackerTest, InvalidConfigRplrMinPairs) {
+  EXPECT_DEATH(TransportFeedbackPacketLossTracker tracker(5000, 20, 0), "");
+}
+
+TEST(TransportFeedbackPacketLossTrackerTest, TimeCantFlowBackwards) {
+  TransportFeedbackPacketLossTracker tracker(5000, 2, 1);
+  tracker.OnPacketAdded(100, 0);
+  tracker.OnPacketAdded(101, 2);
+  EXPECT_DEATH(tracker.OnPacketAdded(102, 1), "");
+}
+#endif
+
+// All tests are run multiple times with various baseline sequence number,
+// to weed out potential bugs with wrap-around handling.
+constexpr uint16_t kBases[] = {0x0000, 0x3456, 0xc032, 0xfffe};
+
+INSTANTIATE_TEST_CASE_P(_,
+                        TransportFeedbackPacketLossTrackerTest,
+                        testing::ValuesIn(kBases));
+
+}  // namespace webrtc
diff --git a/audio/utility/BUILD.gn b/audio/utility/BUILD.gn
new file mode 100644
index 0000000..aa8445c
--- /dev/null
+++ b/audio/utility/BUILD.gn
@@ -0,0 +1,52 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+import("../../webrtc.gni")
+
+group("utility") {
+  deps = [
+    ":audio_frame_operations",
+  ]
+}
+
+rtc_static_library("audio_frame_operations") {
+  visibility = [ "*" ]
+  sources = [
+    "audio_frame_operations.cc",
+    "audio_frame_operations.h",
+  ]
+
+  deps = [
+    "../..:webrtc_common",
+    "../../:typedefs",
+    "../../modules:module_api",
+    "../../modules/audio_coding:audio_format_conversion",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+if (rtc_include_tests) {
+  rtc_source_set("utility_tests") {
+    testonly = true
+    sources = [
+      "audio_frame_operations_unittest.cc",
+    ]
+    deps = [
+      ":audio_frame_operations",
+      "../../modules:module_api",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:test_support",
+      "//testing/gtest",
+    ]
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+}
diff --git a/audio/utility/audio_frame_operations.cc b/audio/utility/audio_frame_operations.cc
new file mode 100644
index 0000000..a7c7782
--- /dev/null
+++ b/audio/utility/audio_frame_operations.cc
@@ -0,0 +1,330 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/utility/audio_frame_operations.h"
+
+#include <algorithm>
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace {
+
+// 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
+const size_t kMuteFadeFrames = 128;
+const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
+
+}  // namespace
+
+void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
+                               AudioFrame* result_frame) {
+  // Sanity check.
+  RTC_DCHECK(result_frame);
+  RTC_DCHECK_GT(result_frame->num_channels_, 0);
+  RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
+
+  bool no_previous_data = result_frame->muted();
+  if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
+    // Special case we have no data to start with.
+    RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
+    result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
+    no_previous_data = true;
+  }
+
+  if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
+      frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
+    result_frame->vad_activity_ = AudioFrame::kVadActive;
+  } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
+             frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
+    result_frame->vad_activity_ = AudioFrame::kVadUnknown;
+  }
+
+  if (result_frame->speech_type_ != frame_to_add.speech_type_)
+    result_frame->speech_type_ = AudioFrame::kUndefined;
+
+  if (!frame_to_add.muted()) {
+    const int16_t* in_data = frame_to_add.data();
+    int16_t* out_data = result_frame->mutable_data();
+    size_t length =
+        frame_to_add.samples_per_channel_ * frame_to_add.num_channels_;
+    if (no_previous_data) {
+      std::copy(in_data, in_data + length, out_data);
+    } else {
+      for (size_t i = 0; i < length; i++) {
+        const int32_t wrap_guard = static_cast<int32_t>(out_data[i]) +
+                                   static_cast<int32_t>(in_data[i]);
+        out_data[i] = rtc::saturated_cast<int16_t>(wrap_guard);
+      }
+    }
+  }
+}
+
+void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
+                                        size_t samples_per_channel,
+                                        int16_t* dst_audio) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
+    dst_audio[2 * i] = src_audio[i];
+    dst_audio[2 * i + 1] = src_audio[i];
+  }
+}
+
+int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
+  if (frame->num_channels_ != 1) {
+    return -1;
+  }
+  if ((frame->samples_per_channel_ * 2) >= AudioFrame::kMaxDataSizeSamples) {
+    // Not enough memory to expand from mono to stereo.
+    return -1;
+  }
+
+  if (!frame->muted()) {
+    // TODO(yujo): this operation can be done in place.
+    int16_t data_copy[AudioFrame::kMaxDataSizeSamples];
+    memcpy(data_copy, frame->data(),
+           sizeof(int16_t) * frame->samples_per_channel_);
+    MonoToStereo(data_copy, frame->samples_per_channel_, frame->mutable_data());
+  }
+  frame->num_channels_ = 2;
+
+  return 0;
+}
+
+void AudioFrameOperations::StereoToMono(const int16_t* src_audio,
+                                        size_t samples_per_channel,
+                                        int16_t* dst_audio) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
+    dst_audio[i] =
+        (static_cast<int32_t>(src_audio[2 * i]) + src_audio[2 * i + 1]) >> 1;
+  }
+}
+
+int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
+  if (frame->num_channels_ != 2) {
+    return -1;
+  }
+
+  RTC_DCHECK_LE(frame->samples_per_channel_ * 2,
+                AudioFrame::kMaxDataSizeSamples);
+
+  if (!frame->muted()) {
+    StereoToMono(frame->data(), frame->samples_per_channel_,
+                 frame->mutable_data());
+  }
+  frame->num_channels_ = 1;
+
+  return 0;
+}
+
+void AudioFrameOperations::QuadToStereo(const int16_t* src_audio,
+                                        size_t samples_per_channel,
+                                        int16_t* dst_audio) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
+    dst_audio[i * 2] =
+        (static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1]) >> 1;
+    dst_audio[i * 2 + 1] =
+        (static_cast<int32_t>(src_audio[4 * i + 2]) + src_audio[4 * i + 3]) >>
+        1;
+  }
+}
+
+int AudioFrameOperations::QuadToStereo(AudioFrame* frame) {
+  if (frame->num_channels_ != 4) {
+    return -1;
+  }
+
+  RTC_DCHECK_LE(frame->samples_per_channel_ * 4,
+                AudioFrame::kMaxDataSizeSamples);
+
+  if (!frame->muted()) {
+    QuadToStereo(frame->data(), frame->samples_per_channel_,
+                 frame->mutable_data());
+  }
+  frame->num_channels_ = 2;
+
+  return 0;
+}
+
+void AudioFrameOperations::QuadToMono(const int16_t* src_audio,
+                                      size_t samples_per_channel,
+                                      int16_t* dst_audio) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
+    dst_audio[i] =
+        (static_cast<int32_t>(src_audio[4 * i]) + src_audio[4 * i + 1] +
+         src_audio[4 * i + 2] + src_audio[4 * i + 3]) >> 2;
+  }
+}
+
+int AudioFrameOperations::QuadToMono(AudioFrame* frame) {
+  if (frame->num_channels_ != 4) {
+    return -1;
+  }
+
+  RTC_DCHECK_LE(frame->samples_per_channel_ * 4,
+                AudioFrame::kMaxDataSizeSamples);
+
+  if (!frame->muted()) {
+    QuadToMono(frame->data(), frame->samples_per_channel_,
+               frame->mutable_data());
+  }
+  frame->num_channels_ = 1;
+
+  return 0;
+}
+
+void AudioFrameOperations::DownmixChannels(const int16_t* src_audio,
+                                           size_t src_channels,
+                                           size_t samples_per_channel,
+                                           size_t dst_channels,
+                                           int16_t* dst_audio) {
+  if (src_channels == 2 && dst_channels == 1) {
+    StereoToMono(src_audio, samples_per_channel, dst_audio);
+    return;
+  } else if (src_channels == 4 && dst_channels == 2) {
+    QuadToStereo(src_audio, samples_per_channel, dst_audio);
+    return;
+  } else if (src_channels == 4 && dst_channels == 1) {
+    QuadToMono(src_audio, samples_per_channel, dst_audio);
+    return;
+  }
+
+  RTC_NOTREACHED() << "src_channels: " << src_channels
+                   << ", dst_channels: " << dst_channels;
+}
+
+int AudioFrameOperations::DownmixChannels(size_t dst_channels,
+                                          AudioFrame* frame) {
+  if (frame->num_channels_ == 2 && dst_channels == 1) {
+    return StereoToMono(frame);
+  } else if (frame->num_channels_ == 4 && dst_channels == 2) {
+    return QuadToStereo(frame);
+  } else if (frame->num_channels_ == 4 && dst_channels == 1) {
+    return QuadToMono(frame);
+  }
+
+  return -1;
+}
+
+void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
+  RTC_DCHECK(frame);
+  if (frame->num_channels_ != 2 || frame->muted()) {
+    return;
+  }
+
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+    int16_t temp_data = frame_data[i];
+    frame_data[i] = frame_data[i + 1];
+    frame_data[i + 1] = temp_data;
+  }
+}
+
+void AudioFrameOperations::Mute(AudioFrame* frame,
+                                bool previous_frame_muted,
+                                bool current_frame_muted) {
+  RTC_DCHECK(frame);
+  if (!previous_frame_muted && !current_frame_muted) {
+    // Not muted, don't touch.
+  } else if (previous_frame_muted && current_frame_muted) {
+    // Frame fully muted.
+    size_t total_samples = frame->samples_per_channel_ * frame->num_channels_;
+    RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, total_samples);
+    frame->Mute();
+  } else {
+    // Fade is a no-op on a muted frame.
+    if (frame->muted()) {
+      return;
+    }
+
+    // Limit number of samples to fade, if frame isn't long enough.
+    size_t count = kMuteFadeFrames;
+    float inc = kMuteFadeInc;
+    if (frame->samples_per_channel_ < kMuteFadeFrames) {
+      count = frame->samples_per_channel_;
+      if (count > 0) {
+        inc = 1.0f / count;
+      }
+    }
+
+    size_t start = 0;
+    size_t end = count;
+    float start_g = 0.0f;
+    if (current_frame_muted) {
+      // Fade out the last |count| samples of frame.
+      RTC_DCHECK(!previous_frame_muted);
+      start = frame->samples_per_channel_ - count;
+      end = frame->samples_per_channel_;
+      start_g = 1.0f;
+      inc = -inc;
+    } else {
+      // Fade in the first |count| samples of frame.
+      RTC_DCHECK(previous_frame_muted);
+    }
+
+    // Perform fade.
+    int16_t* frame_data = frame->mutable_data();
+    size_t channels = frame->num_channels_;
+    for (size_t j = 0; j < channels; ++j) {
+      float g = start_g;
+      for (size_t i = start * channels; i < end * channels; i += channels) {
+        g += inc;
+        frame_data[i + j] *= g;
+      }
+    }
+  }
+}
+
+void AudioFrameOperations::Mute(AudioFrame* frame) {
+  Mute(frame, true, true);
+}
+
+void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
+  RTC_DCHECK(frame);
+  RTC_DCHECK_GT(frame->num_channels_, 0);
+  if (frame->num_channels_ < 1 || frame->muted()) {
+    return;
+  }
+
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+       i++) {
+    frame_data[i] = frame_data[i] >> 1;
+  }
+}
+
+int AudioFrameOperations::Scale(float left, float right, AudioFrame* frame) {
+  if (frame->num_channels_ != 2) {
+    return -1;
+  } else if (frame->muted()) {
+    return 0;
+  }
+
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_; i++) {
+    frame_data[2 * i] = static_cast<int16_t>(left * frame_data[2 * i]);
+    frame_data[2 * i + 1] = static_cast<int16_t>(right * frame_data[2 * i + 1]);
+  }
+  return 0;
+}
+
+int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
+  if (frame->muted()) {
+    return 0;
+  }
+
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+       i++) {
+    frame_data[i] = rtc::saturated_cast<int16_t>(scale * frame_data[i]);
+  }
+  return 0;
+}
+}  // namespace webrtc
diff --git a/audio/utility/audio_frame_operations.h b/audio/utility/audio_frame_operations.h
new file mode 100644
index 0000000..cd55f19
--- /dev/null
+++ b/audio/utility/audio_frame_operations.h
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
+#define AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
+
+#include <stddef.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+
+// TODO(andrew): consolidate this with utility.h and audio_frame_manipulator.h.
+// Change reference parameters to pointers. Consider using a namespace rather
+// than a class.
+class AudioFrameOperations {
+ public:
+  // Add samples in |frame_to_add| with samples in |result_frame|
+  // putting the results in |results_frame|.  The fields
+  // |vad_activity_| and |speech_type_| of the result frame are
+  // updated. If |result_frame| is empty (|samples_per_channel_|==0),
+  // the samples in |frame_to_add| are added to it.  The number of
+  // channels and number of samples per channel must match except when
+  // |result_frame| is empty.
+  static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
+
+  // Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place
+  // operation, meaning src_audio and dst_audio must point to different
+  // buffers. It is the caller's responsibility to ensure that |dst_audio| is
+  // sufficiently large.
+  static void MonoToStereo(const int16_t* src_audio,
+                           size_t samples_per_channel,
+                           int16_t* dst_audio);
+
+  // |frame.num_channels_| will be updated. This version checks for sufficient
+  // buffer size and that |num_channels_| is mono.
+  static int MonoToStereo(AudioFrame* frame);
+
+  // Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
+  // operation, meaning |src_audio| and |dst_audio| may point to the same
+  // buffer.
+  static void StereoToMono(const int16_t* src_audio,
+                           size_t samples_per_channel,
+                           int16_t* dst_audio);
+
+  // |frame.num_channels_| will be updated. This version checks that
+  // |num_channels_| is stereo.
+  static int StereoToMono(AudioFrame* frame);
+
+  // Downmixes 4 channels |src_audio| to stereo |dst_audio|. This is an in-place
+  // operation, meaning |src_audio| and |dst_audio| may point to the same
+  // buffer.
+  static void QuadToStereo(const int16_t* src_audio,
+                           size_t samples_per_channel,
+                           int16_t* dst_audio);
+
+  // |frame.num_channels_| will be updated. This version checks that
+  // |num_channels_| is 4 channels.
+  static int QuadToStereo(AudioFrame* frame);
+
+  // Downmixes 4 channels |src_audio| to mono |dst_audio|. This is an in-place
+  // operation, meaning |src_audio| and |dst_audio| may point to the same
+  // buffer.
+  static void QuadToMono(const int16_t* src_audio,
+                         size_t samples_per_channel,
+                         int16_t* dst_audio);
+
+  // |frame.num_channels_| will be updated. This version checks that
+  // |num_channels_| is 4 channels.
+  static int QuadToMono(AudioFrame* frame);
+
+  // Downmixes |src_channels| |src_audio| to |dst_channels| |dst_audio|.
+  // This is an in-place operation, meaning |src_audio| and |dst_audio|
+  // may point to the same buffer. Supported channel combinations are
+  // Stereo to Mono, Quad to Mono, and Quad to Stereo.
+  static void DownmixChannels(const int16_t* src_audio,
+                              size_t src_channels,
+                              size_t samples_per_channel,
+                              size_t dst_channels,
+                              int16_t* dst_audio);
+
+  // |frame.num_channels_| will be updated. This version checks that
+  // |num_channels_| and |dst_channels| are valid and performs relevant
+  // downmix.  Supported channel combinations are Stereo to Mono, Quad to Mono,
+  // and Quad to Stereo.
+  static int DownmixChannels(size_t dst_channels, AudioFrame* frame);
+
+  // Swap the left and right channels of |frame|. Fails silently if |frame| is
+  // not stereo.
+  static void SwapStereoChannels(AudioFrame* frame);
+
+  // Conditionally zero out contents of |frame| for implementing audio mute:
+  //  |previous_frame_muted| &&  |current_frame_muted| - Zero out whole frame.
+  //  |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
+  // !|previous_frame_muted| &&  |current_frame_muted| - Fade-out at frame end.
+  // !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
+  static void Mute(AudioFrame* frame,
+                   bool previous_frame_muted,
+                   bool current_frame_muted);
+
+  // Zero out contents of frame.
+  static void Mute(AudioFrame* frame);
+
+  // Halve samples in |frame|.
+  static void ApplyHalfGain(AudioFrame* frame);
+
+  static int Scale(float left, float right, AudioFrame* frame);
+
+  static int ScaleWithSat(float scale, AudioFrame* frame);
+};
+
+}  // namespace webrtc
+
+#endif  // AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
diff --git a/audio/utility/audio_frame_operations_unittest.cc b/audio/utility/audio_frame_operations_unittest.cc
new file mode 100644
index 0000000..6d23731
--- /dev/null
+++ b/audio/utility/audio_frame_operations_unittest.cc
@@ -0,0 +1,629 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio/utility/audio_frame_operations.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+class AudioFrameOperationsTest : public ::testing::Test {
+ protected:
+  AudioFrameOperationsTest() {
+    // Set typical values.
+    frame_.samples_per_channel_ = 320;
+    frame_.num_channels_ = 2;
+  }
+
+  AudioFrame frame_;
+};
+
+void SetFrameData(int16_t ch1,
+                  int16_t ch2,
+                  int16_t ch3,
+                  int16_t ch4,
+                  AudioFrame* frame) {
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * 4; i += 4) {
+    frame_data[i] = ch1;
+    frame_data[i + 1] = ch2;
+    frame_data[i + 2] = ch3;
+    frame_data[i + 3] = ch4;
+  }
+}
+
+void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) {
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+    frame_data[i] = left;
+    frame_data[i + 1] = right;
+  }
+}
+
+void SetFrameData(int16_t data, AudioFrame* frame) {
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0;
+       i < frame->samples_per_channel_ * frame->num_channels_; i++) {
+    frame_data[i] = data;
+  }
+}
+
+void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
+  EXPECT_EQ(frame1.num_channels_, frame2.num_channels_);
+  EXPECT_EQ(frame1.samples_per_channel_,
+            frame2.samples_per_channel_);
+  const int16_t* frame1_data = frame1.data();
+  const int16_t* frame2_data = frame2.data();
+  for (size_t i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
+      i++) {
+    EXPECT_EQ(frame1_data[i], frame2_data[i]);
+  }
+  EXPECT_EQ(frame1.muted(), frame2.muted());
+}
+
+void InitFrame(AudioFrame* frame, size_t channels, size_t samples_per_channel,
+               int16_t left_data, int16_t right_data) {
+  RTC_DCHECK(frame);
+  RTC_DCHECK_GE(2, channels);
+  RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples,
+                samples_per_channel * channels);
+  frame->samples_per_channel_ = samples_per_channel;
+  frame->num_channels_ = channels;
+  if (channels == 2) {
+    SetFrameData(left_data, right_data, frame);
+  } else if (channels == 1) {
+    SetFrameData(left_data, frame);
+  }
+}
+
+int16_t GetChannelData(const AudioFrame& frame, size_t channel, size_t index) {
+  RTC_DCHECK_LT(channel, frame.num_channels_);
+  RTC_DCHECK_LT(index, frame.samples_per_channel_);
+  return frame.data()[index * frame.num_channels_ + channel];
+}
+
+void VerifyFrameDataBounds(const AudioFrame& frame, size_t channel, int16_t max,
+                           int16_t min) {
+  for (size_t i = 0; i < frame.samples_per_channel_; ++i) {
+    int16_t s = GetChannelData(frame, channel, i);
+    EXPECT_LE(min, s);
+    EXPECT_GE(max, s);
+  }
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoFailsWithBadParameters) {
+  EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(&frame_));
+
+  frame_.samples_per_channel_ = AudioFrame::kMaxDataSizeSamples;
+  frame_.num_channels_ = 1;
+  EXPECT_EQ(-1, AudioFrameOperations::MonoToStereo(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoSucceeds) {
+  frame_.num_channels_ = 1;
+  SetFrameData(1, &frame_);
+
+  EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(&frame_));
+
+  AudioFrame stereo_frame;
+  stereo_frame.samples_per_channel_ = 320;
+  stereo_frame.num_channels_ = 2;
+  SetFrameData(1, 1, &stereo_frame);
+  VerifyFramesAreEqual(stereo_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoMuted) {
+  frame_.num_channels_ = 1;
+  ASSERT_TRUE(frame_.muted());
+  EXPECT_EQ(0, AudioFrameOperations::MonoToStereo(&frame_));
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, MonoToStereoBufferSucceeds) {
+  AudioFrame target_frame;
+  frame_.num_channels_ = 1;
+  SetFrameData(4, &frame_);
+
+  target_frame.num_channels_ = 2;
+  target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+  AudioFrameOperations::MonoToStereo(frame_.data(), frame_.samples_per_channel_,
+                                     target_frame.mutable_data());
+
+  AudioFrame stereo_frame;
+  stereo_frame.samples_per_channel_ = 320;
+  stereo_frame.num_channels_ = 2;
+  SetFrameData(4, 4, &stereo_frame);
+  VerifyFramesAreEqual(stereo_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoFailsWithBadParameters) {
+  frame_.num_channels_ = 1;
+  EXPECT_EQ(-1, AudioFrameOperations::StereoToMono(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoSucceeds) {
+  SetFrameData(4, 2, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+
+  AudioFrame mono_frame;
+  mono_frame.samples_per_channel_ = 320;
+  mono_frame.num_channels_ = 1;
+  SetFrameData(3, &mono_frame);
+  VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoMuted) {
+  ASSERT_TRUE(frame_.muted());
+  EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoBufferSucceeds) {
+  AudioFrame target_frame;
+  SetFrameData(4, 2, &frame_);
+
+  target_frame.num_channels_ = 1;
+  target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+  AudioFrameOperations::StereoToMono(frame_.data(), frame_.samples_per_channel_,
+                                     target_frame.mutable_data());
+
+  AudioFrame mono_frame;
+  mono_frame.samples_per_channel_ = 320;
+  mono_frame.num_channels_ = 1;
+  SetFrameData(3, &mono_frame);
+  VerifyFramesAreEqual(mono_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, StereoToMonoDoesNotWrapAround) {
+  SetFrameData(-32768, -32768, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::StereoToMono(&frame_));
+
+  AudioFrame mono_frame;
+  mono_frame.samples_per_channel_ = 320;
+  mono_frame.num_channels_ = 1;
+  SetFrameData(-32768, &mono_frame);
+  VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoFailsWithBadParameters) {
+  frame_.num_channels_ = 1;
+  EXPECT_EQ(-1, AudioFrameOperations::QuadToMono(&frame_));
+  frame_.num_channels_ = 2;
+  EXPECT_EQ(-1, AudioFrameOperations::QuadToMono(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoSucceeds) {
+  frame_.num_channels_ = 4;
+  SetFrameData(4, 2, 6, 8, &frame_);
+
+  EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+
+  AudioFrame mono_frame;
+  mono_frame.samples_per_channel_ = 320;
+  mono_frame.num_channels_ = 1;
+  SetFrameData(5, &mono_frame);
+  VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoMuted) {
+  frame_.num_channels_ = 4;
+  ASSERT_TRUE(frame_.muted());
+  EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoBufferSucceeds) {
+  AudioFrame target_frame;
+  frame_.num_channels_ = 4;
+  SetFrameData(4, 2, 6, 8, &frame_);
+
+  target_frame.num_channels_ = 1;
+  target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+  AudioFrameOperations::QuadToMono(frame_.data(), frame_.samples_per_channel_,
+                                   target_frame.mutable_data());
+  AudioFrame mono_frame;
+  mono_frame.samples_per_channel_ = 320;
+  mono_frame.num_channels_ = 1;
+  SetFrameData(5, &mono_frame);
+  VerifyFramesAreEqual(mono_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToMonoDoesNotWrapAround) {
+  frame_.num_channels_ = 4;
+  SetFrameData(-32768, -32768, -32768, -32768, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::QuadToMono(&frame_));
+
+  AudioFrame mono_frame;
+  mono_frame.samples_per_channel_ = 320;
+  mono_frame.num_channels_ = 1;
+  SetFrameData(-32768, &mono_frame);
+  VerifyFramesAreEqual(mono_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoFailsWithBadParameters) {
+  frame_.num_channels_ = 1;
+  EXPECT_EQ(-1, AudioFrameOperations::QuadToStereo(&frame_));
+  frame_.num_channels_ = 2;
+  EXPECT_EQ(-1, AudioFrameOperations::QuadToStereo(&frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoSucceeds) {
+  frame_.num_channels_ = 4;
+  SetFrameData(4, 2, 6, 8, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+
+  AudioFrame stereo_frame;
+  stereo_frame.samples_per_channel_ = 320;
+  stereo_frame.num_channels_ = 2;
+  SetFrameData(3, 7, &stereo_frame);
+  VerifyFramesAreEqual(stereo_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoMuted) {
+  frame_.num_channels_ = 4;
+  ASSERT_TRUE(frame_.muted());
+  EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoBufferSucceeds) {
+  AudioFrame target_frame;
+  frame_.num_channels_ = 4;
+  SetFrameData(4, 2, 6, 8, &frame_);
+
+  target_frame.num_channels_ = 2;
+  target_frame.samples_per_channel_ = frame_.samples_per_channel_;
+
+  AudioFrameOperations::QuadToStereo(frame_.data(), frame_.samples_per_channel_,
+                                     target_frame.mutable_data());
+  AudioFrame stereo_frame;
+  stereo_frame.samples_per_channel_ = 320;
+  stereo_frame.num_channels_ = 2;
+  SetFrameData(3, 7, &stereo_frame);
+  VerifyFramesAreEqual(stereo_frame, target_frame);
+}
+
+TEST_F(AudioFrameOperationsTest, QuadToStereoDoesNotWrapAround) {
+  frame_.num_channels_ = 4;
+  SetFrameData(-32768, -32768, -32768, -32768, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::QuadToStereo(&frame_));
+
+  AudioFrame stereo_frame;
+  stereo_frame.samples_per_channel_ = 320;
+  stereo_frame.num_channels_ = 2;
+  SetFrameData(-32768, -32768, &stereo_frame);
+  VerifyFramesAreEqual(stereo_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsSucceedsOnStereo) {
+  SetFrameData(0, 1, &frame_);
+
+  AudioFrame swapped_frame;
+  swapped_frame.samples_per_channel_ = 320;
+  swapped_frame.num_channels_ = 2;
+  SetFrameData(1, 0, &swapped_frame);
+
+  AudioFrameOperations::SwapStereoChannels(&frame_);
+  VerifyFramesAreEqual(swapped_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsMuted) {
+  ASSERT_TRUE(frame_.muted());
+  AudioFrameOperations::SwapStereoChannels(&frame_);
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
+  frame_.num_channels_ = 1;
+  // Set data to "stereo", despite it being a mono frame.
+  SetFrameData(0, 1, &frame_);
+
+  AudioFrame orig_frame;
+  orig_frame.CopyFrom(frame_);
+  AudioFrameOperations::SwapStereoChannels(&frame_);
+  // Verify that no swap occurred.
+  VerifyFramesAreEqual(orig_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, MuteDisabled) {
+  SetFrameData(1000, -1000, &frame_);
+  AudioFrameOperations::Mute(&frame_, false, false);
+
+  AudioFrame muted_frame;
+  muted_frame.samples_per_channel_ = 320;
+  muted_frame.num_channels_ = 2;
+  SetFrameData(1000, -1000, &muted_frame);
+  VerifyFramesAreEqual(muted_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEnabled) {
+  SetFrameData(1000, -1000, &frame_);
+  AudioFrameOperations::Mute(&frame_, true, true);
+
+  AudioFrame muted_frame;
+  muted_frame.samples_per_channel_ = frame_.samples_per_channel_;
+  muted_frame.num_channels_ = frame_.num_channels_;
+  ASSERT_TRUE(muted_frame.muted());
+  VerifyFramesAreEqual(muted_frame, frame_);
+}
+
+// Verify that *beginning* to mute works for short and long (>128) frames, mono
+// and stereo. Beginning mute should yield a ramp down to zero.
+TEST_F(AudioFrameOperationsTest, MuteBeginMonoLong) {
+  InitFrame(&frame_, 1, 228, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, false, true);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  EXPECT_EQ(1000, GetChannelData(frame_, 0, 99));
+  EXPECT_EQ(992, GetChannelData(frame_, 0, 100));
+  EXPECT_EQ(7, GetChannelData(frame_, 0, 226));
+  EXPECT_EQ(0, GetChannelData(frame_, 0, 227));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginMonoShort) {
+  InitFrame(&frame_, 1, 93, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, false, true);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  EXPECT_EQ(989, GetChannelData(frame_, 0, 0));
+  EXPECT_EQ(978, GetChannelData(frame_, 0, 1));
+  EXPECT_EQ(10, GetChannelData(frame_, 0, 91));
+  EXPECT_EQ(0, GetChannelData(frame_, 0, 92));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginStereoLong) {
+  InitFrame(&frame_, 2, 228, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, false, true);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  VerifyFrameDataBounds(frame_, 1, 0, -1000);
+  EXPECT_EQ(1000, GetChannelData(frame_, 0, 99));
+  EXPECT_EQ(-1000, GetChannelData(frame_, 1, 99));
+  EXPECT_EQ(992, GetChannelData(frame_, 0, 100));
+  EXPECT_EQ(-992, GetChannelData(frame_, 1, 100));
+  EXPECT_EQ(7, GetChannelData(frame_, 0, 226));
+  EXPECT_EQ(-7, GetChannelData(frame_, 1, 226));
+  EXPECT_EQ(0, GetChannelData(frame_, 0, 227));
+  EXPECT_EQ(0, GetChannelData(frame_, 1, 227));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginStereoShort) {
+  InitFrame(&frame_, 2, 93, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, false, true);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  VerifyFrameDataBounds(frame_, 1, 0, -1000);
+  EXPECT_EQ(989, GetChannelData(frame_, 0, 0));
+  EXPECT_EQ(-989, GetChannelData(frame_, 1, 0));
+  EXPECT_EQ(978, GetChannelData(frame_, 0, 1));
+  EXPECT_EQ(-978, GetChannelData(frame_, 1, 1));
+  EXPECT_EQ(10, GetChannelData(frame_, 0, 91));
+  EXPECT_EQ(-10, GetChannelData(frame_, 1, 91));
+  EXPECT_EQ(0, GetChannelData(frame_, 0, 92));
+  EXPECT_EQ(0, GetChannelData(frame_, 1, 92));
+}
+
+// Verify that *ending* to mute works for short and long (>128) frames, mono
+// and stereo. Ending mute should yield a ramp up from zero.
+TEST_F(AudioFrameOperationsTest, MuteEndMonoLong) {
+  InitFrame(&frame_, 1, 228, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, true, false);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  EXPECT_EQ(7, GetChannelData(frame_, 0, 0));
+  EXPECT_EQ(15, GetChannelData(frame_, 0, 1));
+  EXPECT_EQ(1000, GetChannelData(frame_, 0, 127));
+  EXPECT_EQ(1000, GetChannelData(frame_, 0, 128));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndMonoShort) {
+  InitFrame(&frame_, 1, 93, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, true, false);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  EXPECT_EQ(10, GetChannelData(frame_, 0, 0));
+  EXPECT_EQ(21, GetChannelData(frame_, 0, 1));
+  EXPECT_EQ(989, GetChannelData(frame_, 0, 91));
+  EXPECT_EQ(999, GetChannelData(frame_, 0, 92));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndStereoLong) {
+  InitFrame(&frame_, 2, 228, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, true, false);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  VerifyFrameDataBounds(frame_, 1, 0, -1000);
+  EXPECT_EQ(7, GetChannelData(frame_, 0, 0));
+  EXPECT_EQ(-7, GetChannelData(frame_, 1, 0));
+  EXPECT_EQ(15, GetChannelData(frame_, 0, 1));
+  EXPECT_EQ(-15, GetChannelData(frame_, 1, 1));
+  EXPECT_EQ(1000, GetChannelData(frame_, 0, 127));
+  EXPECT_EQ(-1000, GetChannelData(frame_, 1, 127));
+  EXPECT_EQ(1000, GetChannelData(frame_, 0, 128));
+  EXPECT_EQ(-1000, GetChannelData(frame_, 1, 128));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndStereoShort) {
+  InitFrame(&frame_, 2, 93, 1000, -1000);
+  AudioFrameOperations::Mute(&frame_, true, false);
+  VerifyFrameDataBounds(frame_, 0, 1000, 0);
+  VerifyFrameDataBounds(frame_, 1, 0, -1000);
+  EXPECT_EQ(10, GetChannelData(frame_, 0, 0));
+  EXPECT_EQ(-10, GetChannelData(frame_, 1, 0));
+  EXPECT_EQ(21, GetChannelData(frame_, 0, 1));
+  EXPECT_EQ(-21, GetChannelData(frame_, 1, 1));
+  EXPECT_EQ(989, GetChannelData(frame_, 0, 91));
+  EXPECT_EQ(-989, GetChannelData(frame_, 1, 91));
+  EXPECT_EQ(999, GetChannelData(frame_, 0, 92));
+  EXPECT_EQ(-999, GetChannelData(frame_, 1, 92));
+}
+
+TEST_F(AudioFrameOperationsTest, MuteBeginAlreadyMuted) {
+  ASSERT_TRUE(frame_.muted());
+  AudioFrameOperations::Mute(&frame_, false, true);
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, MuteEndAlreadyMuted) {
+  ASSERT_TRUE(frame_.muted());
+  AudioFrameOperations::Mute(&frame_, true, false);
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, ApplyHalfGainSucceeds) {
+  SetFrameData(2, &frame_);
+
+  AudioFrame half_gain_frame;
+  half_gain_frame.num_channels_ = frame_.num_channels_;
+  half_gain_frame.samples_per_channel_ = frame_.samples_per_channel_;
+  SetFrameData(1, &half_gain_frame);
+
+  AudioFrameOperations::ApplyHalfGain(&frame_);
+  VerifyFramesAreEqual(half_gain_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ApplyHalfGainMuted) {
+  ASSERT_TRUE(frame_.muted());
+  AudioFrameOperations::ApplyHalfGain(&frame_);
+  EXPECT_TRUE(frame_.muted());
+}
+
+// TODO(andrew): should not allow negative scales.
+TEST_F(AudioFrameOperationsTest, DISABLED_ScaleFailsWithBadParameters) {
+  frame_.num_channels_ = 1;
+  EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
+
+  frame_.num_channels_ = 3;
+  EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, 1.0, &frame_));
+
+  frame_.num_channels_ = 2;
+  EXPECT_EQ(-1, AudioFrameOperations::Scale(-1.0, 1.0, &frame_));
+  EXPECT_EQ(-1, AudioFrameOperations::Scale(1.0, -1.0, &frame_));
+}
+
+// TODO(andrew): fix the wraparound bug. We should always saturate.
+TEST_F(AudioFrameOperationsTest, DISABLED_ScaleDoesNotWrapAround) {
+  SetFrameData(4000, -4000, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::Scale(10.0, 10.0, &frame_));
+
+  AudioFrame clipped_frame;
+  clipped_frame.samples_per_channel_ = 320;
+  clipped_frame.num_channels_ = 2;
+  SetFrameData(32767, -32768, &clipped_frame);
+  VerifyFramesAreEqual(clipped_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleSucceeds) {
+  SetFrameData(1, -1, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
+
+  AudioFrame scaled_frame;
+  scaled_frame.samples_per_channel_ = 320;
+  scaled_frame.num_channels_ = 2;
+  SetFrameData(2, -3, &scaled_frame);
+  VerifyFramesAreEqual(scaled_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleMuted) {
+  ASSERT_TRUE(frame_.muted());
+  EXPECT_EQ(0, AudioFrameOperations::Scale(2.0, 3.0, &frame_));
+  EXPECT_TRUE(frame_.muted());
+}
+
+// TODO(andrew): should fail with a negative scale.
+TEST_F(AudioFrameOperationsTest, DISABLED_ScaleWithSatFailsWithBadParameters) {
+  EXPECT_EQ(-1, AudioFrameOperations::ScaleWithSat(-1.0, &frame_));
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleWithSatDoesNotWrapAround) {
+  frame_.num_channels_ = 1;
+  SetFrameData(4000, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
+
+  AudioFrame clipped_frame;
+  clipped_frame.samples_per_channel_ = 320;
+  clipped_frame.num_channels_ = 1;
+  SetFrameData(32767, &clipped_frame);
+  VerifyFramesAreEqual(clipped_frame, frame_);
+
+  SetFrameData(-4000, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(10.0, &frame_));
+  SetFrameData(-32768, &clipped_frame);
+  VerifyFramesAreEqual(clipped_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
+  frame_.num_channels_ = 1;
+  SetFrameData(1, &frame_);
+  EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
+
+  AudioFrame scaled_frame;
+  scaled_frame.samples_per_channel_ = 320;
+  scaled_frame.num_channels_ = 1;
+  SetFrameData(2, &scaled_frame);
+  VerifyFramesAreEqual(scaled_frame, frame_);
+}
+
+TEST_F(AudioFrameOperationsTest, ScaleWithSatMuted) {
+  ASSERT_TRUE(frame_.muted());
+  EXPECT_EQ(0, AudioFrameOperations::ScaleWithSat(2.0, &frame_));
+  EXPECT_TRUE(frame_.muted());
+}
+
+TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
+  // When samples_per_channel_ is 0, the frame counts as empty and zero.
+  AudioFrame frame_to_add_to;
+  frame_to_add_to.mutable_data();  // Unmute the frame.
+  ASSERT_FALSE(frame_to_add_to.muted());
+  frame_to_add_to.samples_per_channel_ = 0;
+  frame_to_add_to.num_channels_ = frame_.num_channels_;
+
+  SetFrameData(1000, &frame_);
+  AudioFrameOperations::Add(frame_, &frame_to_add_to);
+  VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingXToMutedGivesX) {
+  AudioFrame frame_to_add_to;
+  ASSERT_TRUE(frame_to_add_to.muted());
+  frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+  frame_to_add_to.num_channels_ = frame_.num_channels_;
+
+  SetFrameData(1000, &frame_);
+  AudioFrameOperations::Add(frame_, &frame_to_add_to);
+  VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingMutedToXGivesX) {
+  AudioFrame frame_to_add_to;
+  frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+  frame_to_add_to.num_channels_ = frame_.num_channels_;
+  SetFrameData(1000, &frame_to_add_to);
+
+  AudioFrame frame_copy;
+  frame_copy.CopyFrom(frame_to_add_to);
+
+  ASSERT_TRUE(frame_.muted());
+  AudioFrameOperations::Add(frame_, &frame_to_add_to);
+  VerifyFramesAreEqual(frame_copy, frame_to_add_to);
+}
+
+TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
+  AudioFrame frame_to_add_to;
+  frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
+  frame_to_add_to.num_channels_ = frame_.num_channels_;
+  SetFrameData(1000, &frame_to_add_to);
+  SetFrameData(2000, &frame_);
+
+  AudioFrameOperations::Add(frame_, &frame_to_add_to);
+  SetFrameData(frame_.data()[0] + 1000, &frame_);
+  VerifyFramesAreEqual(frame_, frame_to_add_to);
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/common_audio/BUILD.gn b/common_audio/BUILD.gn
new file mode 100644
index 0000000..13b1e55
--- /dev/null
+++ b/common_audio/BUILD.gn
@@ -0,0 +1,470 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//build/config/arm.gni")
+import("../webrtc.gni")
+
+visibility = [ ":*" ]
+
+config("common_audio_config") {
+  include_dirs = [
+    "resampler/include",
+    "signal_processing/include",
+    "vad/include",
+  ]
+}
+
+rtc_static_library("common_audio") {
+  visibility += [ "*" ]
+  sources = [
+    "audio_converter.cc",
+    "audio_converter.h",
+    "audio_ring_buffer.cc",
+    "audio_ring_buffer.h",
+    "audio_util.cc",
+    "blocker.cc",
+    "blocker.h",
+    "channel_buffer.cc",
+    "channel_buffer.h",
+    "include/audio_util.h",
+    "lapped_transform.cc",
+    "lapped_transform.h",
+    "real_fourier.cc",
+    "real_fourier.h",
+    "real_fourier_ooura.cc",
+    "real_fourier_ooura.h",
+    "resampler/include/push_resampler.h",
+    "resampler/include/resampler.h",
+    "resampler/push_resampler.cc",
+    "resampler/push_sinc_resampler.cc",
+    "resampler/push_sinc_resampler.h",
+    "resampler/resampler.cc",
+    "resampler/sinc_resampler.cc",
+    "smoothing_filter.cc",
+    "smoothing_filter.h",
+    "sparse_fir_filter.cc",
+    "sparse_fir_filter.h",
+    "vad/include/vad.h",
+    "vad/vad.cc",
+    "wav_file.cc",
+    "wav_file.h",
+    "wav_header.cc",
+    "wav_header.h",
+    "window_generator.cc",
+    "window_generator.h",
+  ]
+
+  deps = [
+    ":common_audio_c",
+    ":sinc_resampler",
+    "..:webrtc_common",
+    "../:typedefs",
+    "../api:optional",
+    "../rtc_base:checks",
+    "../rtc_base:gtest_prod",
+    "../rtc_base:rtc_base_approved",
+    "../system_wrappers",
+    "../system_wrappers:cpu_features_api",
+  ]
+
+  defines = []
+  if (rtc_use_openmax_dl) {
+    sources += [
+      "real_fourier_openmax.cc",
+      "real_fourier_openmax.h",
+    ]
+    defines += [ "RTC_USE_OPENMAX_DL" ]
+    if (rtc_build_openmax_dl) {
+      deps += [ "//third_party/openmax_dl/dl" ]
+    }
+  }
+
+  if (rtc_build_with_neon) {
+    deps += [ ":common_audio_neon" ]
+  }
+
+  if (is_win) {
+    cflags = [ "/wd4334" ]  # Ignore warning on shift operator promotion.
+  }
+
+  public_configs = [ ":common_audio_config" ]
+
+  if (!build_with_chromium && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  if (current_cpu == "x86" || current_cpu == "x64") {
+    deps += [ ":common_audio_sse2" ]
+  }
+}
+
+rtc_source_set("mock_common_audio") {
+  visibility += webrtc_default_visibility
+  testonly = true
+  sources = [
+    "mocks/mock_smoothing_filter.h",
+    "vad/mock/mock_vad.h",
+  ]
+  deps = [
+    ":common_audio",
+    "../test:test_support",
+  ]
+}
+
+rtc_source_set("common_audio_c_arm_asm") {
+  sources = []
+  deps = []
+  if (current_cpu == "arm") {
+    sources += [
+      "signal_processing/complex_bit_reverse_arm.S",
+      "signal_processing/spl_sqrt_floor_arm.S",
+    ]
+
+    if (arm_version >= 7) {
+      sources += [ "signal_processing/filter_ar_fast_q12_armv7.S" ]
+    } else {
+      sources += [ "signal_processing/filter_ar_fast_q12.c" ]
+    }
+    deps += [ "../system_wrappers:asm_defines" ]
+  }
+}
+
+rtc_source_set("common_audio_c") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "fft4g.c",
+    "fft4g.h",
+    "ring_buffer.c",
+    "ring_buffer.h",
+    "signal_processing/auto_corr_to_refl_coef.c",
+    "signal_processing/auto_correlation.c",
+    "signal_processing/complex_fft_tables.h",
+    "signal_processing/copy_set_operations.c",
+    "signal_processing/cross_correlation.c",
+    "signal_processing/division_operations.c",
+    "signal_processing/downsample_fast.c",
+    "signal_processing/energy.c",
+    "signal_processing/filter_ar.c",
+    "signal_processing/filter_ma_fast_q12.c",
+    "signal_processing/get_hanning_window.c",
+    "signal_processing/get_scaling_square.c",
+    "signal_processing/ilbc_specific_functions.c",
+    "signal_processing/include/real_fft.h",
+    "signal_processing/include/signal_processing_library.h",
+    "signal_processing/include/spl_inl.h",
+    "signal_processing/include/spl_inl_armv7.h",
+    "signal_processing/levinson_durbin.c",
+    "signal_processing/lpc_to_refl_coef.c",
+    "signal_processing/min_max_operations.c",
+    "signal_processing/randomization_functions.c",
+    "signal_processing/real_fft.c",
+    "signal_processing/refl_coef_to_lpc.c",
+    "signal_processing/resample.c",
+    "signal_processing/resample_48khz.c",
+    "signal_processing/resample_by_2.c",
+    "signal_processing/resample_by_2_internal.c",
+    "signal_processing/resample_by_2_internal.h",
+    "signal_processing/resample_fractional.c",
+    "signal_processing/spl_init.c",
+    "signal_processing/spl_inl.c",
+    "signal_processing/spl_sqrt.c",
+    "signal_processing/splitting_filter.c",
+    "signal_processing/sqrt_of_one_minus_x_squared.c",
+    "signal_processing/vector_scaling_operations.c",
+    "vad/include/webrtc_vad.h",
+    "vad/vad_core.c",
+    "vad/vad_core.h",
+    "vad/vad_filterbank.c",
+    "vad/vad_filterbank.h",
+    "vad/vad_gmm.c",
+    "vad/vad_gmm.h",
+    "vad/vad_sp.c",
+    "vad/vad_sp.h",
+    "vad/webrtc_vad.c",
+  ]
+
+  if (current_cpu == "mipsel") {
+    sources += [
+      "signal_processing/complex_bit_reverse_mips.c",
+      "signal_processing/complex_fft_mips.c",
+      "signal_processing/cross_correlation_mips.c",
+      "signal_processing/downsample_fast_mips.c",
+      "signal_processing/filter_ar_fast_q12_mips.c",
+      "signal_processing/include/spl_inl_mips.h",
+      "signal_processing/min_max_operations_mips.c",
+      "signal_processing/resample_by_2_mips.c",
+      "signal_processing/spl_sqrt_floor_mips.c",
+    ]
+    if (mips_dsp_rev > 0) {
+      sources += [ "signal_processing/vector_scaling_operations_mips.c" ]
+    }
+  } else {
+    sources += [ "signal_processing/complex_fft.c" ]
+  }
+
+  if (current_cpu != "arm" && current_cpu != "mipsel") {
+    sources += [
+      "signal_processing/complex_bit_reverse.c",
+      "signal_processing/filter_ar_fast_q12.c",
+      "signal_processing/spl_sqrt_floor.c",
+    ]
+  }
+
+  if (is_win) {
+    cflags = [ "/wd4334" ]  # Ignore warning on shift operator promotion.
+  }
+
+  public_configs = [ ":common_audio_config" ]
+  deps = [
+    ":common_audio_c_arm_asm",
+    ":common_audio_cc",
+    "..:webrtc_common",
+    "../:typedefs",
+    "../rtc_base:checks",
+    "../rtc_base:compile_assert_c",
+    "../rtc_base:rtc_base_approved",
+    "../rtc_base:sanitizer",
+    "../system_wrappers",
+    "../system_wrappers:cpu_features_api",
+  ]
+}
+
+rtc_source_set("common_audio_cc") {
+  sources = [
+    "signal_processing/dot_product_with_scale.cc",
+    "signal_processing/dot_product_with_scale.h",
+  ]
+
+  public_configs = [ ":common_audio_config" ]
+  deps = [
+    "..:webrtc_common",
+    "../:typedefs",
+    "../rtc_base:rtc_base_approved",
+    "../system_wrappers",
+  ]
+}
+
+rtc_source_set("sinc_resampler") {
+  sources = [
+    "resampler/sinc_resampler.h",
+  ]
+  deps = [
+    "..:webrtc_common",
+    "../:typedefs",
+    "../rtc_base:gtest_prod",
+    "../rtc_base:rtc_base_approved",
+    "../system_wrappers",
+  ]
+}
+
+rtc_source_set("fir_filter") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "fir_filter.h",
+  ]
+}
+
+rtc_source_set("fir_filter_factory") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "fir_filter_c.cc",
+    "fir_filter_c.h",
+    "fir_filter_factory.cc",
+    "fir_filter_factory.h",
+  ]
+  deps = [
+    ":fir_filter",
+    "../rtc_base:checks",
+    "../rtc_base:rtc_base_approved",
+    "../system_wrappers:cpu_features_api",
+  ]
+  if (current_cpu == "x86" || current_cpu == "x64") {
+    deps += [ ":common_audio_sse2" ]
+  }
+  if (rtc_build_with_neon) {
+    deps += [ ":common_audio_neon" ]
+  }
+}
+
+if (current_cpu == "x86" || current_cpu == "x64") {
+  rtc_static_library("common_audio_sse2") {
+    sources = [
+      "fir_filter_sse.cc",
+      "fir_filter_sse.h",
+      "resampler/sinc_resampler_sse.cc",
+    ]
+
+    if (is_posix) {
+      cflags = [ "-msse2" ]
+    }
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+    deps = [
+      ":fir_filter",
+      ":sinc_resampler",
+      "../rtc_base:checks",
+      "../rtc_base:rtc_base_approved",
+      "../system_wrappers",
+    ]
+  }
+}
+
+if (rtc_build_with_neon) {
+  rtc_static_library("common_audio_neon") {
+    sources = [
+      "fir_filter_neon.cc",
+      "fir_filter_neon.h",
+      "resampler/sinc_resampler_neon.cc",
+    ]
+
+    if (current_cpu != "arm64") {
+      # Enable compilation for the NEON instruction set. This is needed
+      # since //build/config/arm.gni only enables NEON for iOS, not Android.
+      # This provides the same functionality as webrtc/build/arm_neon.gypi.
+      suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+      cflags = [ "-mfpu=neon" ]
+    }
+
+    # Disable LTO on NEON targets due to compiler bug.
+    # TODO(fdegans): Enable this. See crbug.com/408997.
+    if (rtc_use_lto) {
+      cflags -= [
+        "-flto",
+        "-ffat-lto-objects",
+      ]
+    }
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":common_audio_neon_c",
+      ":fir_filter",
+      ":sinc_resampler",
+      "../rtc_base:checks",
+      "../rtc_base:rtc_base_approved",
+      "../system_wrappers",
+    ]
+  }
+
+  rtc_source_set("common_audio_neon_c") {
+    visibility += webrtc_default_visibility
+    sources = [
+      "signal_processing/cross_correlation_neon.c",
+      "signal_processing/downsample_fast_neon.c",
+      "signal_processing/min_max_operations_neon.c",
+    ]
+
+    if (current_cpu != "arm64") {
+      # Enable compilation for the NEON instruction set. This is needed
+      # since //build/config/arm.gni only enables NEON for iOS, not Android.
+      # This provides the same functionality as webrtc/build/arm_neon.gypi.
+      suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+      cflags = [ "-mfpu=neon" ]
+    }
+
+    # Disable LTO on NEON targets due to compiler bug.
+    # TODO(fdegans): Enable this. See crbug.com/408997.
+    if (rtc_use_lto) {
+      cflags -= [
+        "-flto",
+        "-ffat-lto-objects",
+      ]
+    }
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+    deps = [
+      ":common_audio_c",
+      "../rtc_base:checks",
+      "../rtc_base:rtc_base_approved",
+    ]
+  }
+}
+
+if (rtc_include_tests) {
+  rtc_test("common_audio_unittests") {
+    visibility += webrtc_default_visibility
+    testonly = true
+
+    sources = [
+      "audio_converter_unittest.cc",
+      "audio_ring_buffer_unittest.cc",
+      "audio_util_unittest.cc",
+      "blocker_unittest.cc",
+      "channel_buffer_unittest.cc",
+      "fir_filter_unittest.cc",
+      "lapped_transform_unittest.cc",
+      "real_fourier_unittest.cc",
+      "resampler/push_resampler_unittest.cc",
+      "resampler/push_sinc_resampler_unittest.cc",
+      "resampler/resampler_unittest.cc",
+      "resampler/sinusoidal_linear_chirp_source.cc",
+      "resampler/sinusoidal_linear_chirp_source.h",
+      "ring_buffer_unittest.cc",
+      "signal_processing/real_fft_unittest.cc",
+      "signal_processing/signal_processing_unittest.cc",
+      "smoothing_filter_unittest.cc",
+      "sparse_fir_filter_unittest.cc",
+      "vad/vad_core_unittest.cc",
+      "vad/vad_filterbank_unittest.cc",
+      "vad/vad_gmm_unittest.cc",
+      "vad/vad_sp_unittest.cc",
+      "vad/vad_unittest.cc",
+      "vad/vad_unittest.h",
+      "wav_file_unittest.cc",
+      "wav_header_unittest.cc",
+      "window_generator_unittest.cc",
+    ]
+
+    # Does not compile on iOS for arm: webrtc:5544.
+    if (!is_ios || target_cpu != "arm") {
+      sources += [ "resampler/sinc_resampler_unittest.cc" ]
+    }
+
+    if (rtc_use_openmax_dl) {
+      defines = [ "RTC_USE_OPENMAX_DL" ]
+    }
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":common_audio",
+      ":common_audio_c",
+      ":fir_filter",
+      ":fir_filter_factory",
+      ":sinc_resampler",
+      "..:webrtc_common",
+      "../:typedefs",
+      "../rtc_base:checks",
+      "../rtc_base:rtc_base_approved",
+      "../rtc_base:rtc_base_tests_utils",
+      "../system_wrappers:cpu_features_api",
+      "../test:fileutils",
+      "../test:test_main",
+      "//testing/gtest",
+    ]
+
+    if (is_android) {
+      deps += [ "//testing/android/native_test:native_test_support" ]
+
+      shard_timeout = 900
+    }
+  }
+}
diff --git a/common_audio/DEPS b/common_audio/DEPS
new file mode 100644
index 0000000..47ce4c3
--- /dev/null
+++ b/common_audio/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+  "+dl/sp/api",  # For openmax_dl.
+  "+system_wrappers",
+]
diff --git a/common_audio/OWNERS b/common_audio/OWNERS
new file mode 100644
index 0000000..7f721de
--- /dev/null
+++ b/common_audio/OWNERS
@@ -0,0 +1,9 @@
+henrik.lundin@webrtc.org
+jan.skoglund@webrtc.org
+kwiberg@webrtc.org
+tina.legrand@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/common_audio/audio_converter.cc b/common_audio/audio_converter.cc
new file mode 100644
index 0000000..47d2be2
--- /dev/null
+++ b/common_audio/audio_converter.cc
@@ -0,0 +1,207 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/audio_converter.h"
+
+#include <cstring>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "common_audio/channel_buffer.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+using rtc::checked_cast;
+
+namespace webrtc {
+
+class CopyConverter : public AudioConverter {
+ public:
+  CopyConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
+                size_t dst_frames)
+      : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
+  ~CopyConverter() override {};
+
+  void Convert(const float* const* src, size_t src_size, float* const* dst,
+               size_t dst_capacity) override {
+    CheckSizes(src_size, dst_capacity);
+    if (src != dst) {
+      for (size_t i = 0; i < src_channels(); ++i)
+        std::memcpy(dst[i], src[i], dst_frames() * sizeof(*dst[i]));
+    }
+  }
+};
+
+class UpmixConverter : public AudioConverter {
+ public:
+  UpmixConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
+                 size_t dst_frames)
+      : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {}
+  ~UpmixConverter() override {};
+
+  void Convert(const float* const* src, size_t src_size, float* const* dst,
+               size_t dst_capacity) override {
+    CheckSizes(src_size, dst_capacity);
+    for (size_t i = 0; i < dst_frames(); ++i) {
+      const float value = src[0][i];
+      for (size_t j = 0; j < dst_channels(); ++j)
+        dst[j][i] = value;
+    }
+  }
+};
+
+class DownmixConverter : public AudioConverter {
+ public:
+  DownmixConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
+                   size_t dst_frames)
+      : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
+  }
+  ~DownmixConverter() override {};
+
+  void Convert(const float* const* src, size_t src_size, float* const* dst,
+               size_t dst_capacity) override {
+    CheckSizes(src_size, dst_capacity);
+    float* dst_mono = dst[0];
+    for (size_t i = 0; i < src_frames(); ++i) {
+      float sum = 0;
+      for (size_t j = 0; j < src_channels(); ++j)
+        sum += src[j][i];
+      dst_mono[i] = sum / src_channels();
+    }
+  }
+};
+
+class ResampleConverter : public AudioConverter {
+ public:
+  ResampleConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
+                    size_t dst_frames)
+      : AudioConverter(src_channels, src_frames, dst_channels, dst_frames) {
+    resamplers_.reserve(src_channels);
+    for (size_t i = 0; i < src_channels; ++i)
+      resamplers_.push_back(std::unique_ptr<PushSincResampler>(
+          new PushSincResampler(src_frames, dst_frames)));
+  }
+  ~ResampleConverter() override {};
+
+  void Convert(const float* const* src, size_t src_size, float* const* dst,
+               size_t dst_capacity) override {
+    CheckSizes(src_size, dst_capacity);
+    for (size_t i = 0; i < resamplers_.size(); ++i)
+      resamplers_[i]->Resample(src[i], src_frames(), dst[i], dst_frames());
+  }
+
+ private:
+  std::vector<std::unique_ptr<PushSincResampler>> resamplers_;
+};
+
+// Apply a vector of converters in serial, in the order given. At least two
+// converters must be provided.
+class CompositionConverter : public AudioConverter {
+ public:
+  explicit CompositionConverter(
+    std::vector<std::unique_ptr<AudioConverter>> converters)
+      : converters_(std::move(converters)) {
+    RTC_CHECK_GE(converters_.size(), 2);
+    // We need an intermediate buffer after every converter.
+    for (auto it = converters_.begin(); it != converters_.end() - 1; ++it)
+      buffers_.push_back(
+          std::unique_ptr<ChannelBuffer<float>>(new ChannelBuffer<float>(
+              (*it)->dst_frames(), (*it)->dst_channels())));
+  }
+  ~CompositionConverter() override {};
+
+  void Convert(const float* const* src, size_t src_size, float* const* dst,
+               size_t dst_capacity) override {
+    converters_.front()->Convert(src, src_size, buffers_.front()->channels(),
+                                 buffers_.front()->size());
+    for (size_t i = 2; i < converters_.size(); ++i) {
+      auto& src_buffer = buffers_[i - 2];
+      auto& dst_buffer = buffers_[i - 1];
+      converters_[i]->Convert(src_buffer->channels(),
+                              src_buffer->size(),
+                              dst_buffer->channels(),
+                              dst_buffer->size());
+    }
+    converters_.back()->Convert(buffers_.back()->channels(),
+                                buffers_.back()->size(), dst, dst_capacity);
+  }
+
+ private:
+  std::vector<std::unique_ptr<AudioConverter>> converters_;
+  std::vector<std::unique_ptr<ChannelBuffer<float>>> buffers_;
+};
+
+std::unique_ptr<AudioConverter> AudioConverter::Create(size_t src_channels,
+                                                       size_t src_frames,
+                                                       size_t dst_channels,
+                                                       size_t dst_frames) {
+  std::unique_ptr<AudioConverter> sp;
+  if (src_channels > dst_channels) {
+    if (src_frames != dst_frames) {
+      std::vector<std::unique_ptr<AudioConverter>> converters;
+      converters.push_back(std::unique_ptr<AudioConverter>(new DownmixConverter(
+          src_channels, src_frames, dst_channels, src_frames)));
+      converters.push_back(
+          std::unique_ptr<AudioConverter>(new ResampleConverter(
+              dst_channels, src_frames, dst_channels, dst_frames)));
+      sp.reset(new CompositionConverter(std::move(converters)));
+    } else {
+      sp.reset(new DownmixConverter(src_channels, src_frames, dst_channels,
+                                    dst_frames));
+    }
+  } else if (src_channels < dst_channels) {
+    if (src_frames != dst_frames) {
+      std::vector<std::unique_ptr<AudioConverter>> converters;
+      converters.push_back(
+          std::unique_ptr<AudioConverter>(new ResampleConverter(
+              src_channels, src_frames, src_channels, dst_frames)));
+      converters.push_back(std::unique_ptr<AudioConverter>(new UpmixConverter(
+          src_channels, dst_frames, dst_channels, dst_frames)));
+      sp.reset(new CompositionConverter(std::move(converters)));
+    } else {
+      sp.reset(new UpmixConverter(src_channels, src_frames, dst_channels,
+                                  dst_frames));
+    }
+  } else if (src_frames != dst_frames) {
+    sp.reset(new ResampleConverter(src_channels, src_frames, dst_channels,
+                                   dst_frames));
+  } else {
+    sp.reset(new CopyConverter(src_channels, src_frames, dst_channels,
+                               dst_frames));
+  }
+
+  return sp;
+}
+
+// For CompositionConverter.
+AudioConverter::AudioConverter()
+    : src_channels_(0),
+      src_frames_(0),
+      dst_channels_(0),
+      dst_frames_(0) {}
+
+AudioConverter::AudioConverter(size_t src_channels, size_t src_frames,
+                               size_t dst_channels, size_t dst_frames)
+    : src_channels_(src_channels),
+      src_frames_(src_frames),
+      dst_channels_(dst_channels),
+      dst_frames_(dst_frames) {
+  RTC_CHECK(dst_channels == src_channels || dst_channels == 1 ||
+            src_channels == 1);
+}
+
+void AudioConverter::CheckSizes(size_t src_size, size_t dst_capacity) const {
+  RTC_CHECK_EQ(src_size, src_channels() * src_frames());
+  RTC_CHECK_GE(dst_capacity, dst_channels() * dst_frames());
+}
+
+}  // namespace webrtc
diff --git a/common_audio/audio_converter.h b/common_audio/audio_converter.h
new file mode 100644
index 0000000..3f7b9a8
--- /dev/null
+++ b/common_audio/audio_converter.h
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_AUDIO_CONVERTER_H_
+#define COMMON_AUDIO_AUDIO_CONVERTER_H_
+
+#include <memory>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Format conversion (remixing and resampling) for audio. Only simple remixing
+// conversions are supported: downmix to mono (i.e. |dst_channels| == 1) or
+// upmix from mono (i.e. |src_channels == 1|).
+//
+// The source and destination chunks have the same duration in time; specifying
+// the number of frames is equivalent to specifying the sample rates.
+class AudioConverter {
+ public:
+  // Returns a new AudioConverter, which will use the supplied format for its
+  // lifetime. Caller is responsible for the memory.
+  static std::unique_ptr<AudioConverter> Create(size_t src_channels,
+                                                size_t src_frames,
+                                                size_t dst_channels,
+                                                size_t dst_frames);
+  virtual ~AudioConverter() {}
+
+  // Convert |src|, containing |src_size| samples, to |dst|, having a sample
+  // capacity of |dst_capacity|. Both point to a series of buffers containing
+  // the samples for each channel. The sizes must correspond to the format
+  // passed to Create().
+  virtual void Convert(const float* const* src, size_t src_size,
+                       float* const* dst, size_t dst_capacity) = 0;
+
+  size_t src_channels() const { return src_channels_; }
+  size_t src_frames() const { return src_frames_; }
+  size_t dst_channels() const { return dst_channels_; }
+  size_t dst_frames() const { return dst_frames_; }
+
+ protected:
+  AudioConverter();
+  AudioConverter(size_t src_channels, size_t src_frames, size_t dst_channels,
+                 size_t dst_frames);
+
+  // Helper to RTC_CHECK that inputs are correctly sized.
+  void CheckSizes(size_t src_size, size_t dst_capacity) const;
+
+ private:
+  const size_t src_channels_;
+  const size_t src_frames_;
+  const size_t dst_channels_;
+  const size_t dst_frames_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioConverter);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_AUDIO_CONVERTER_H_
diff --git a/common_audio/audio_converter_unittest.cc b/common_audio/audio_converter_unittest.cc
new file mode 100644
index 0000000..e9937fd
--- /dev/null
+++ b/common_audio/audio_converter_unittest.cc
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cmath>
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "common_audio/audio_converter.h"
+#include "common_audio/channel_buffer.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/format_macros.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+typedef std::unique_ptr<ChannelBuffer<float>> ScopedBuffer;
+
+// Sets the signal value to increase by |data| with every sample.
+ScopedBuffer CreateBuffer(const std::vector<float>& data, size_t frames) {
+  const size_t num_channels = data.size();
+  ScopedBuffer sb(new ChannelBuffer<float>(frames, num_channels));
+  for (size_t i = 0; i < num_channels; ++i)
+    for (size_t j = 0; j < frames; ++j)
+      sb->channels()[i][j] = data[i] * j;
+  return sb;
+}
+
+void VerifyParams(const ChannelBuffer<float>& ref,
+                  const ChannelBuffer<float>& test) {
+  EXPECT_EQ(ref.num_channels(), test.num_channels());
+  EXPECT_EQ(ref.num_frames(), test.num_frames());
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It searches around |expected_delay| in samples between the
+// signals to compensate for the resampling delay.
+float ComputeSNR(const ChannelBuffer<float>& ref,
+                 const ChannelBuffer<float>& test,
+                 size_t expected_delay) {
+  VerifyParams(ref, test);
+  float best_snr = 0;
+  size_t best_delay = 0;
+
+  // Search within one sample of the expected delay.
+  for (size_t delay = std::max(expected_delay, static_cast<size_t>(1)) - 1;
+       delay <= std::min(expected_delay + 1, ref.num_frames());
+       ++delay) {
+    float mse = 0;
+    float variance = 0;
+    float mean = 0;
+    for (size_t i = 0; i < ref.num_channels(); ++i) {
+      for (size_t j = 0; j < ref.num_frames() - delay; ++j) {
+        float error = ref.channels()[i][j] - test.channels()[i][j + delay];
+        mse += error * error;
+        variance += ref.channels()[i][j] * ref.channels()[i][j];
+        mean += ref.channels()[i][j];
+      }
+    }
+
+    const size_t length = ref.num_channels() * (ref.num_frames() - delay);
+    mse /= length;
+    variance /= length;
+    mean /= length;
+    variance -= mean * mean;
+    float snr = 100;  // We assign 100 dB to the zero-error case.
+    if (mse > 0)
+      snr = 10 * std::log10(variance / mse);
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_delay = delay;
+    }
+  }
+  printf("SNR=%.1f dB at delay=%" PRIuS "\n", best_snr, best_delay);
+  return best_snr;
+}
+
+// Sets the source to a linearly increasing signal for which we can easily
+// generate a reference. Runs the AudioConverter and ensures the output has
+// sufficiently high SNR relative to the reference.
+void RunAudioConverterTest(size_t src_channels,
+                           int src_sample_rate_hz,
+                           size_t dst_channels,
+                           int dst_sample_rate_hz) {
+  const float kSrcLeft = 0.0002f;
+  const float kSrcRight = 0.0001f;
+  const float resampling_factor = (1.f * src_sample_rate_hz) /
+      dst_sample_rate_hz;
+  const float dst_left = resampling_factor * kSrcLeft;
+  const float dst_right = resampling_factor * kSrcRight;
+  const float dst_mono = (dst_left + dst_right) / 2;
+  const size_t src_frames = static_cast<size_t>(src_sample_rate_hz / 100);
+  const size_t dst_frames = static_cast<size_t>(dst_sample_rate_hz / 100);
+
+  std::vector<float> src_data(1, kSrcLeft);
+  if (src_channels == 2)
+    src_data.push_back(kSrcRight);
+  ScopedBuffer src_buffer = CreateBuffer(src_data, src_frames);
+
+  std::vector<float> dst_data(1, 0);
+  std::vector<float> ref_data;
+  if (dst_channels == 1) {
+    if (src_channels == 1)
+      ref_data.push_back(dst_left);
+    else
+      ref_data.push_back(dst_mono);
+  } else {
+    dst_data.push_back(0);
+    ref_data.push_back(dst_left);
+    if (src_channels == 1)
+      ref_data.push_back(dst_left);
+    else
+      ref_data.push_back(dst_right);
+  }
+  ScopedBuffer dst_buffer = CreateBuffer(dst_data, dst_frames);
+  ScopedBuffer ref_buffer = CreateBuffer(ref_data, dst_frames);
+
+  // The sinc resampler has a known delay, which we compute here.
+  const size_t delay_frames = src_sample_rate_hz == dst_sample_rate_hz ? 0 :
+      static_cast<size_t>(
+          PushSincResampler::AlgorithmicDelaySeconds(src_sample_rate_hz) *
+          dst_sample_rate_hz);
+  // SNR reported on the same line later.
+  printf("(%" PRIuS ", %d Hz) -> (%" PRIuS ", %d Hz) ",
+         src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
+
+  std::unique_ptr<AudioConverter> converter = AudioConverter::Create(
+      src_channels, src_frames, dst_channels, dst_frames);
+  converter->Convert(src_buffer->channels(), src_buffer->size(),
+                     dst_buffer->channels(), dst_buffer->size());
+
+  EXPECT_LT(43.f,
+            ComputeSNR(*ref_buffer.get(), *dst_buffer.get(), delay_frames));
+}
+
+TEST(AudioConverterTest, ConversionsPassSNRThreshold) {
+  const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000};
+  const size_t kChannels[] = {1, 2};
+  for (size_t src_rate = 0; src_rate < arraysize(kSampleRates); ++src_rate) {
+    for (size_t dst_rate = 0; dst_rate < arraysize(kSampleRates); ++dst_rate) {
+      for (size_t src_channel = 0; src_channel < arraysize(kChannels);
+           ++src_channel) {
+        for (size_t dst_channel = 0; dst_channel < arraysize(kChannels);
+             ++dst_channel) {
+          RunAudioConverterTest(kChannels[src_channel], kSampleRates[src_rate],
+                                kChannels[dst_channel], kSampleRates[dst_rate]);
+        }
+      }
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/audio_ring_buffer.cc b/common_audio/audio_ring_buffer.cc
new file mode 100644
index 0000000..e7b5d81
--- /dev/null
+++ b/common_audio/audio_ring_buffer.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/audio_ring_buffer.h"
+
+#include "common_audio/ring_buffer.h"
+#include "rtc_base/checks.h"
+
+// This is a simple multi-channel wrapper over the ring_buffer.h C interface.
+
+namespace webrtc {
+
+AudioRingBuffer::AudioRingBuffer(size_t channels, size_t max_frames) {
+  buffers_.reserve(channels);
+  for (size_t i = 0; i < channels; ++i)
+    buffers_.push_back(WebRtc_CreateBuffer(max_frames, sizeof(float)));
+}
+
+AudioRingBuffer::~AudioRingBuffer() {
+  for (auto buf : buffers_)
+    WebRtc_FreeBuffer(buf);
+}
+
+void AudioRingBuffer::Write(const float* const* data, size_t channels,
+                            size_t frames) {
+  RTC_DCHECK_EQ(buffers_.size(), channels);
+  for (size_t i = 0; i < channels; ++i) {
+    const size_t written = WebRtc_WriteBuffer(buffers_[i], data[i], frames);
+    RTC_CHECK_EQ(written, frames);
+  }
+}
+
+void AudioRingBuffer::Read(float* const* data, size_t channels, size_t frames) {
+  RTC_DCHECK_EQ(buffers_.size(), channels);
+  for (size_t i = 0; i < channels; ++i) {
+    const size_t read =
+        WebRtc_ReadBuffer(buffers_[i], nullptr, data[i], frames);
+    RTC_CHECK_EQ(read, frames);
+  }
+}
+
+size_t AudioRingBuffer::ReadFramesAvailable() const {
+  // All buffers have the same amount available.
+  return WebRtc_available_read(buffers_[0]);
+}
+
+size_t AudioRingBuffer::WriteFramesAvailable() const {
+  // All buffers have the same amount available.
+  return WebRtc_available_write(buffers_[0]);
+}
+
+void AudioRingBuffer::MoveReadPositionForward(size_t frames) {
+  for (auto buf : buffers_) {
+    const size_t moved =
+        static_cast<size_t>(WebRtc_MoveReadPtr(buf, static_cast<int>(frames)));
+    RTC_CHECK_EQ(moved, frames);
+  }
+}
+
+void AudioRingBuffer::MoveReadPositionBackward(size_t frames) {
+  for (auto buf : buffers_) {
+    const size_t moved = static_cast<size_t>(
+        -WebRtc_MoveReadPtr(buf, -static_cast<int>(frames)));
+    RTC_CHECK_EQ(moved, frames);
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/audio_ring_buffer.h b/common_audio/audio_ring_buffer.h
new file mode 100644
index 0000000..67d24f0
--- /dev/null
+++ b/common_audio/audio_ring_buffer.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef COMMON_AUDIO_AUDIO_RING_BUFFER_H_
+#define COMMON_AUDIO_AUDIO_RING_BUFFER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+struct RingBuffer;
+
+namespace webrtc {
+
+// A ring buffer tailored for float deinterleaved audio. Any operation that
+// cannot be performed as requested will cause a crash (e.g. insufficient data
+// in the buffer to fulfill a read request.)
+class AudioRingBuffer final {
+ public:
+  // Specify the number of channels and maximum number of frames the buffer will
+  // contain.
+  AudioRingBuffer(size_t channels, size_t max_frames);
+  ~AudioRingBuffer();
+
+  // Copies |data| to the buffer and advances the write pointer. |channels| must
+  // be the same as at creation time.
+  void Write(const float* const* data, size_t channels, size_t frames);
+
+  // Copies from the buffer to |data| and advances the read pointer. |channels|
+  // must be the same as at creation time.
+  void Read(float* const* data, size_t channels, size_t frames);
+
+  size_t ReadFramesAvailable() const;
+  size_t WriteFramesAvailable() const;
+
+  // Moves the read position. The forward version advances the read pointer
+  // towards the write pointer and the backward verison withdraws the read
+  // pointer away from the write pointer (i.e. flushing and stuffing the buffer
+  // respectively.)
+  void MoveReadPositionForward(size_t frames);
+  void MoveReadPositionBackward(size_t frames);
+
+ private:
+  // TODO(kwiberg): Use std::vector<std::unique_ptr<RingBuffer>> instead.
+  std::vector<RingBuffer*> buffers_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_AUDIO_RING_BUFFER_H_
diff --git a/common_audio/audio_ring_buffer_unittest.cc b/common_audio/audio_ring_buffer_unittest.cc
new file mode 100644
index 0000000..2fcf800
--- /dev/null
+++ b/common_audio/audio_ring_buffer_unittest.cc
@@ -0,0 +1,112 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "common_audio/audio_ring_buffer.h"
+
+#include "common_audio/channel_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class AudioRingBufferTest :
+    public ::testing::TestWithParam< ::testing::tuple<int, int, int, int> > {
+};
+
+void ReadAndWriteTest(const ChannelBuffer<float>& input,
+                      size_t num_write_chunk_frames,
+                      size_t num_read_chunk_frames,
+                      size_t buffer_frames,
+                      ChannelBuffer<float>* output) {
+  const size_t num_channels = input.num_channels();
+  const size_t total_frames = input.num_frames();
+  AudioRingBuffer buf(num_channels, buffer_frames);
+  std::unique_ptr<float* []> slice(new float*[num_channels]);
+
+  size_t input_pos = 0;
+  size_t output_pos = 0;
+  while (input_pos + buf.WriteFramesAvailable() < total_frames) {
+    // Write until the buffer is as full as possible.
+    while (buf.WriteFramesAvailable() >= num_write_chunk_frames) {
+      buf.Write(input.Slice(slice.get(), input_pos), num_channels,
+                num_write_chunk_frames);
+      input_pos += num_write_chunk_frames;
+    }
+    // Read until the buffer is as empty as possible.
+    while (buf.ReadFramesAvailable() >= num_read_chunk_frames) {
+      EXPECT_LT(output_pos, total_frames);
+      buf.Read(output->Slice(slice.get(), output_pos), num_channels,
+               num_read_chunk_frames);
+      output_pos += num_read_chunk_frames;
+    }
+  }
+
+  // Write and read the last bit.
+  if (input_pos < total_frames) {
+    buf.Write(input.Slice(slice.get(), input_pos), num_channels,
+              total_frames - input_pos);
+  }
+  if (buf.ReadFramesAvailable()) {
+    buf.Read(output->Slice(slice.get(), output_pos), num_channels,
+             buf.ReadFramesAvailable());
+  }
+  EXPECT_EQ(0u, buf.ReadFramesAvailable());
+}
+
+TEST_P(AudioRingBufferTest, ReadDataMatchesWrittenData) {
+  const size_t kFrames = 5000;
+  const size_t num_channels = ::testing::get<3>(GetParam());
+
+  // Initialize the input data to an increasing sequence.
+  ChannelBuffer<float> input(kFrames, static_cast<int>(num_channels));
+  for (size_t i = 0; i < num_channels; ++i)
+    for (size_t j = 0; j < kFrames; ++j)
+      input.channels()[i][j] = (i + 1) * (j + 1);
+
+  ChannelBuffer<float> output(kFrames, static_cast<int>(num_channels));
+  ReadAndWriteTest(input,
+                   ::testing::get<0>(GetParam()),
+                   ::testing::get<1>(GetParam()),
+                   ::testing::get<2>(GetParam()),
+                   &output);
+
+  // Verify the read data matches the input.
+  for (size_t i = 0; i < num_channels; ++i)
+    for (size_t j = 0; j < kFrames; ++j)
+      EXPECT_EQ(input.channels()[i][j], output.channels()[i][j]);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    AudioRingBufferTest, AudioRingBufferTest,
+    ::testing::Combine(::testing::Values(10, 20, 42),  // num_write_chunk_frames
+                       ::testing::Values(1, 10, 17),   // num_read_chunk_frames
+                       ::testing::Values(100, 256),    // buffer_frames
+                       ::testing::Values(1, 4)));      // num_channels
+
+TEST_F(AudioRingBufferTest, MoveReadPosition) {
+  const size_t kNumChannels = 1;
+  const float kInputArray[] = {1, 2, 3, 4};
+  const size_t kNumFrames = sizeof(kInputArray) / sizeof(*kInputArray);
+  ChannelBuffer<float> input(kNumFrames, kNumChannels);
+  input.SetDataForTesting(kInputArray, kNumFrames);
+  AudioRingBuffer buf(kNumChannels, kNumFrames);
+  buf.Write(input.channels(), kNumChannels, kNumFrames);
+
+  buf.MoveReadPositionForward(3);
+  ChannelBuffer<float> output(1, kNumChannels);
+  buf.Read(output.channels(), kNumChannels, 1);
+  EXPECT_EQ(4, output.channels()[0][0]);
+  buf.MoveReadPositionBackward(3);
+  buf.Read(output.channels(), kNumChannels, 1);
+  EXPECT_EQ(2, output.channels()[0][0]);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/audio_util.cc b/common_audio/audio_util.cc
new file mode 100644
index 0000000..b442a14
--- /dev/null
+++ b/common_audio/audio_util.cc
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/include/audio_util.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+void FloatToS16(const float* src, size_t size, int16_t* dest) {
+  for (size_t i = 0; i < size; ++i)
+    dest[i] = FloatToS16(src[i]);
+}
+
+void S16ToFloat(const int16_t* src, size_t size, float* dest) {
+  for (size_t i = 0; i < size; ++i)
+    dest[i] = S16ToFloat(src[i]);
+}
+
+void FloatS16ToS16(const float* src, size_t size, int16_t* dest) {
+  for (size_t i = 0; i < size; ++i)
+    dest[i] = FloatS16ToS16(src[i]);
+}
+
+void FloatToFloatS16(const float* src, size_t size, float* dest) {
+  for (size_t i = 0; i < size; ++i)
+    dest[i] = FloatToFloatS16(src[i]);
+}
+
+void FloatS16ToFloat(const float* src, size_t size, float* dest) {
+  for (size_t i = 0; i < size; ++i)
+    dest[i] = FloatS16ToFloat(src[i]);
+}
+
+template <>
+void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
+                                       size_t num_frames,
+                                       int num_channels,
+                                       int16_t* deinterleaved) {
+  DownmixInterleavedToMonoImpl<int16_t, int32_t>(interleaved, num_frames,
+                                                 num_channels, deinterleaved);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/audio_util_unittest.cc b/common_audio/audio_util_unittest.cc
new file mode 100644
index 0000000..230669e
--- /dev/null
+++ b/common_audio/audio_util_unittest.cc
@@ -0,0 +1,270 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/include/audio_util.h"
+
+#include "rtc_base/arraysize.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace {
+
+using ::testing::ElementsAreArray;
+
+void ExpectArraysEq(const int16_t* ref, const int16_t* test, size_t length) {
+  for (size_t i = 0; i < length; ++i) {
+    EXPECT_EQ(ref[i], test[i]);
+  }
+}
+
+void ExpectArraysEq(const float* ref, const float* test, size_t length) {
+  for (size_t i = 0; i < length; ++i) {
+    EXPECT_NEAR(ref[i], test[i], 0.01f);
+  }
+}
+
+TEST(AudioUtilTest, FloatToS16) {
+  static constexpr float kInput[] = {0.f,
+                                     0.4f / 32767.f,
+                                     0.6f / 32767.f,
+                                     -0.4f / 32768.f,
+                                     -0.6f / 32768.f,
+                                     1.f,
+                                     -1.f,
+                                     1.1f,
+                                     -1.1f};
+  static constexpr int16_t kReference[] = {0,     0,      1,     0,     -1,
+                                           32767, -32768, 32767, -32768};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+  int16_t output[kSize];
+  FloatToS16(kInput, kSize, output);
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, S16ToFloat) {
+  static constexpr int16_t kInput[] = {0, 1, -1, 16384, -16384, 32767, -32768};
+  static constexpr float kReference[] = {
+      0.f, 1.f / 32767.f, -1.f / 32768.f, 16384.f / 32767.f, -0.5f, 1.f, -1.f};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+  float output[kSize];
+  S16ToFloat(kInput, kSize, output);
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, FloatS16ToS16) {
+  static constexpr float kInput[] = {0.f,   0.4f,    0.5f,    -0.4f,
+                                     -0.5f, 32768.f, -32769.f};
+  static constexpr int16_t kReference[] = {0, 0, 1, 0, -1, 32767, -32768};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+  int16_t output[kSize];
+  FloatS16ToS16(kInput, kSize, output);
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, FloatToFloatS16) {
+  static constexpr float kInput[] = {0.f,
+                                     0.4f / 32767.f,
+                                     0.6f / 32767.f,
+                                     -0.4f / 32768.f,
+                                     -0.6f / 32768.f,
+                                     1.f,
+                                     -1.f,
+                                     1.1f,
+                                     -1.1f};
+  static constexpr float kReference[] = {
+      0.f, 0.4f, 0.6f, -0.4f, -0.6f, 32767.f, -32768.f, 36043.7f, -36044.8f};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+  float output[kSize];
+  FloatToFloatS16(kInput, kSize, output);
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, FloatS16ToFloat) {
+  static constexpr float kInput[] = {
+      0.f, 0.4f, 0.6f, -0.4f, -0.6f, 32767.f, -32768.f, 36043.7f, -36044.8f};
+  static constexpr float kReference[] = {0.f,
+                                         0.4f / 32767.f,
+                                         0.6f / 32767.f,
+                                         -0.4f / 32768.f,
+                                         -0.6f / 32768.f,
+                                         1.f,
+                                         -1.f,
+                                         1.1f,
+                                         -1.1f};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+  float output[kSize];
+  FloatS16ToFloat(kInput, kSize, output);
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, DbfsToFloatS16) {
+  static constexpr float kInput[] = {-90.f, -70.f, -30.f, -20.f, -10.f,
+                                     -5.f,  -1.f,  0.f,   1.f};
+  static constexpr float kReference[] = {
+      1.036215186f, 10.36215115f, 1036.215088f, 3276.800049f, 10362.15137f,
+      18426.80078f, 29204.51172f, 32768.f,      36766.30078f};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+  float output[kSize];
+  for (size_t i = 0; i < kSize; ++i) {
+    output[i] = DbfsToFloatS16(kInput[i]);
+  }
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, FloatS16ToDbfs) {
+  static constexpr float kInput[] = {1.036215143f, 10.36215143f,  1036.215143f,
+                                     3276.8f,      10362.151436f, 18426.800543f,
+                                     29204.51074f, 32768.0f,      36766.30071f};
+
+  static constexpr float kReference[] = {
+      -90.f, -70.f, -30.f, -20.f, -10.f, -5.f, -1.f, 0.f, 0.9999923706f};
+  static constexpr size_t kSize = arraysize(kInput);
+  static_assert(arraysize(kReference) == kSize, "");
+
+  float output[kSize];
+  for (size_t i = 0; i < kSize; ++i) {
+    output[i] = FloatS16ToDbfs(kInput[i]);
+  }
+  ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, InterleavingStereo) {
+  const int16_t kInterleaved[] = {2, 3, 4, 9, 8, 27, 16, 81};
+  const size_t kSamplesPerChannel = 4;
+  const int kNumChannels = 2;
+  const size_t kLength = kSamplesPerChannel * kNumChannels;
+  int16_t left[kSamplesPerChannel], right[kSamplesPerChannel];
+  int16_t* deinterleaved[] = {left, right};
+  Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
+  const int16_t kRefLeft[] = {2, 4, 8, 16};
+  const int16_t kRefRight[] = {3, 9, 27, 81};
+  ExpectArraysEq(kRefLeft, left, kSamplesPerChannel);
+  ExpectArraysEq(kRefRight, right, kSamplesPerChannel);
+
+  int16_t interleaved[kLength];
+  Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
+  ExpectArraysEq(kInterleaved, interleaved, kLength);
+}
+
+TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
+  const int16_t kInterleaved[] = {1, 2, 3, 4, 5};
+  const size_t kSamplesPerChannel = 5;
+  const int kNumChannels = 1;
+  int16_t mono[kSamplesPerChannel];
+  int16_t* deinterleaved[] = {mono};
+  Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
+  ExpectArraysEq(kInterleaved, mono, kSamplesPerChannel);
+
+  int16_t interleaved[kSamplesPerChannel];
+  Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
+  ExpectArraysEq(mono, interleaved, kSamplesPerChannel);
+}
+
+TEST(AudioUtilTest, DownmixInterleavedToMono) {
+  {
+    const size_t kNumFrames = 4;
+    const int kNumChannels = 1;
+    const int16_t interleaved[kNumChannels * kNumFrames] = {1, 2, -1, -3};
+    int16_t deinterleaved[kNumFrames];
+
+    DownmixInterleavedToMono(interleaved, kNumFrames, kNumChannels,
+                             deinterleaved);
+
+    EXPECT_THAT(deinterleaved, ElementsAreArray(interleaved));
+  }
+  {
+    const size_t kNumFrames = 2;
+    const int kNumChannels = 2;
+    const int16_t interleaved[kNumChannels * kNumFrames] = {10, 20, -10, -30};
+    int16_t deinterleaved[kNumFrames];
+
+    DownmixInterleavedToMono(interleaved, kNumFrames, kNumChannels,
+                             deinterleaved);
+    const int16_t expected[kNumFrames] = {15, -20};
+
+    EXPECT_THAT(deinterleaved, ElementsAreArray(expected));
+  }
+  {
+    const size_t kNumFrames = 3;
+    const int kNumChannels = 3;
+    const int16_t interleaved[kNumChannels * kNumFrames] = {
+        30000, 30000, 24001, -5, -10, -20, -30000, -30999, -30000};
+    int16_t deinterleaved[kNumFrames];
+
+    DownmixInterleavedToMono(interleaved, kNumFrames, kNumChannels,
+                             deinterleaved);
+    const int16_t expected[kNumFrames] = {28000, -11, -30333};
+
+    EXPECT_THAT(deinterleaved, ElementsAreArray(expected));
+  }
+}
+
+TEST(AudioUtilTest, DownmixToMonoTest) {
+  {
+    const size_t kNumFrames = 4;
+    const int kNumChannels = 1;
+    const float input_data[kNumChannels][kNumFrames] = {{1.f, 2.f, -1.f, -3.f}};
+    const float* input[kNumChannels];
+    for (int i = 0; i < kNumChannels; ++i) {
+      input[i] = input_data[i];
+    }
+
+    float downmixed[kNumFrames];
+
+    DownmixToMono<float, float>(input, kNumFrames, kNumChannels, downmixed);
+
+    EXPECT_THAT(downmixed, ElementsAreArray(input_data[0]));
+  }
+  {
+    const size_t kNumFrames = 3;
+    const int kNumChannels = 2;
+    const float input_data[kNumChannels][kNumFrames] = {{1.f, 2.f, -1.f},
+                                                        {3.f, 0.f, 1.f}};
+    const float* input[kNumChannels];
+    for (int i = 0; i < kNumChannels; ++i) {
+      input[i] = input_data[i];
+    }
+
+    float downmixed[kNumFrames];
+    const float expected[kNumFrames] = {2.f, 1.f, 0.f};
+
+    DownmixToMono<float, float>(input, kNumFrames, kNumChannels, downmixed);
+
+    EXPECT_THAT(downmixed, ElementsAreArray(expected));
+  }
+  {
+    const size_t kNumFrames = 3;
+    const int kNumChannels = 3;
+    const int16_t input_data[kNumChannels][kNumFrames] = {
+        {30000, -5, -30000}, {30000, -10, -30999}, {24001, -20, -30000}};
+    const int16_t* input[kNumChannels];
+    for (int i = 0; i < kNumChannels; ++i) {
+      input[i] = input_data[i];
+    }
+
+    int16_t downmixed[kNumFrames];
+    const int16_t expected[kNumFrames] = {28000, -11, -30333};
+
+    DownmixToMono<int16_t, int32_t>(input, kNumFrames, kNumChannels, downmixed);
+
+    EXPECT_THAT(downmixed, ElementsAreArray(expected));
+  }
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/common_audio/blocker.cc b/common_audio/blocker.cc
new file mode 100644
index 0000000..7d09d21
--- /dev/null
+++ b/common_audio/blocker.cc
@@ -0,0 +1,238 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/blocker.h"
+
+#include <string.h>
+
+#include "rtc_base/checks.h"
+
+namespace {
+
+// Adds |a| and |b| frame by frame into |result| (basically matrix addition).
+void AddFrames(const float* const* a,
+               size_t a_start_index,
+               const float* const* b,
+               int b_start_index,
+               size_t num_frames,
+               size_t num_channels,
+               float* const* result,
+               size_t result_start_index) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    for (size_t j = 0; j < num_frames; ++j) {
+      result[i][j + result_start_index] =
+          a[i][j + a_start_index] + b[i][j + b_start_index];
+    }
+  }
+}
+
+// Copies |src| into |dst| channel by channel.
+void CopyFrames(const float* const* src,
+                size_t src_start_index,
+                size_t num_frames,
+                size_t num_channels,
+                float* const* dst,
+                size_t dst_start_index) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    memcpy(&dst[i][dst_start_index],
+           &src[i][src_start_index],
+           num_frames * sizeof(dst[i][dst_start_index]));
+  }
+}
+
+// Moves |src| into |dst| channel by channel.
+void MoveFrames(const float* const* src,
+                size_t src_start_index,
+                size_t num_frames,
+                size_t num_channels,
+                float* const* dst,
+                size_t dst_start_index) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    memmove(&dst[i][dst_start_index],
+            &src[i][src_start_index],
+            num_frames * sizeof(dst[i][dst_start_index]));
+  }
+}
+
+void ZeroOut(float* const* buffer,
+             size_t starting_idx,
+             size_t num_frames,
+             size_t num_channels) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    memset(&buffer[i][starting_idx], 0,
+           num_frames * sizeof(buffer[i][starting_idx]));
+  }
+}
+
+// Pointwise multiplies each channel of |frames| with |window|. Results are
+// stored in |frames|.
+void ApplyWindow(const float* window,
+                 size_t num_frames,
+                 size_t num_channels,
+                 float* const* frames) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    for (size_t j = 0; j < num_frames; ++j) {
+      frames[i][j] = frames[i][j] * window[j];
+    }
+  }
+}
+
+size_t gcd(size_t a, size_t b) {
+  size_t tmp;
+  while (b) {
+     tmp = a;
+     a = b;
+     b = tmp % b;
+  }
+  return a;
+}
+
+}  // namespace
+
+namespace webrtc {
+
+Blocker::Blocker(size_t chunk_size,
+                 size_t block_size,
+                 size_t num_input_channels,
+                 size_t num_output_channels,
+                 const float* window,
+                 size_t shift_amount,
+                 BlockerCallback* callback)
+    : chunk_size_(chunk_size),
+      block_size_(block_size),
+      num_input_channels_(num_input_channels),
+      num_output_channels_(num_output_channels),
+      initial_delay_(block_size_ - gcd(chunk_size, shift_amount)),
+      frame_offset_(0),
+      input_buffer_(num_input_channels_, chunk_size_ + initial_delay_),
+      output_buffer_(chunk_size_ + initial_delay_, num_output_channels_),
+      input_block_(block_size_, num_input_channels_),
+      output_block_(block_size_, num_output_channels_),
+      window_(new float[block_size_]),
+      shift_amount_(shift_amount),
+      callback_(callback) {
+  RTC_CHECK_LE(num_output_channels_, num_input_channels_);
+  RTC_CHECK_LE(shift_amount_, block_size_);
+
+  memcpy(window_.get(), window, block_size_ * sizeof(*window_.get()));
+  input_buffer_.MoveReadPositionBackward(initial_delay_);
+}
+
+Blocker::~Blocker() = default;
+
+// When block_size < chunk_size the input and output buffers look like this:
+//
+//                      delay*             chunk_size    chunk_size + delay*
+//  buffer: <-------------|---------------------|---------------|>
+//                _a_              _b_                 _c_
+//
+// On each call to ProcessChunk():
+// 1. New input gets read into sections _b_ and _c_ of the input buffer.
+// 2. We block starting from frame_offset.
+// 3. We block until we reach a block |bl| that doesn't contain any frames
+//    from sections _a_ or _b_ of the input buffer.
+// 4. We window the current block, fire the callback for processing, window
+//    again, and overlap/add to the output buffer.
+// 5. We copy sections _a_ and _b_ of the output buffer into output.
+// 6. For both the input and the output buffers, we copy section _c_ into
+//    section _a_.
+// 7. We set the new frame_offset to be the difference between the first frame
+//    of |bl| and the border between sections _b_ and _c_.
+//
+// When block_size > chunk_size the input and output buffers look like this:
+//
+//                   chunk_size               delay*       chunk_size + delay*
+//  buffer: <-------------|---------------------|---------------|>
+//                _a_              _b_                 _c_
+//
+// On each call to ProcessChunk():
+// The procedure is the same as above, except for:
+// 1. New input gets read into section _c_ of the input buffer.
+// 3. We block until we reach a block |bl| that doesn't contain any frames
+//    from section _a_ of the input buffer.
+// 5. We copy section _a_ of the output buffer into output.
+// 6. For both the input and the output buffers, we copy sections _b_ and _c_
+//    into section _a_ and _b_.
+// 7. We set the new frame_offset to be the difference between the first frame
+//    of |bl| and the border between sections _a_ and _b_.
+//
+// * delay here refers to inintial_delay_
+//
+// TODO(claguna): Look at using ring buffers to eliminate some copies.
+void Blocker::ProcessChunk(const float* const* input,
+                           size_t chunk_size,
+                           size_t num_input_channels,
+                           size_t num_output_channels,
+                           float* const* output) {
+  RTC_CHECK_EQ(chunk_size, chunk_size_);
+  RTC_CHECK_EQ(num_input_channels, num_input_channels_);
+  RTC_CHECK_EQ(num_output_channels, num_output_channels_);
+
+  input_buffer_.Write(input, num_input_channels, chunk_size_);
+  size_t first_frame_in_block = frame_offset_;
+
+  // Loop through blocks.
+  while (first_frame_in_block < chunk_size_) {
+    input_buffer_.Read(input_block_.channels(), num_input_channels,
+                       block_size_);
+    input_buffer_.MoveReadPositionBackward(block_size_ - shift_amount_);
+
+    ApplyWindow(window_.get(),
+                block_size_,
+                num_input_channels_,
+                input_block_.channels());
+    callback_->ProcessBlock(input_block_.channels(),
+                            block_size_,
+                            num_input_channels_,
+                            num_output_channels_,
+                            output_block_.channels());
+    ApplyWindow(window_.get(),
+                block_size_,
+                num_output_channels_,
+                output_block_.channels());
+
+    AddFrames(output_buffer_.channels(),
+              first_frame_in_block,
+              output_block_.channels(),
+              0,
+              block_size_,
+              num_output_channels_,
+              output_buffer_.channels(),
+              first_frame_in_block);
+
+    first_frame_in_block += shift_amount_;
+  }
+
+  // Copy output buffer to output
+  CopyFrames(output_buffer_.channels(),
+             0,
+             chunk_size_,
+             num_output_channels_,
+             output,
+             0);
+
+  // Copy output buffer [chunk_size_, chunk_size_ + initial_delay]
+  // to output buffer [0, initial_delay], zero the rest.
+  MoveFrames(output_buffer_.channels(),
+             chunk_size,
+             initial_delay_,
+             num_output_channels_,
+             output_buffer_.channels(),
+             0);
+  ZeroOut(output_buffer_.channels(),
+          initial_delay_,
+          chunk_size_,
+          num_output_channels_);
+
+  // Calculate new starting frames.
+  frame_offset_ = first_frame_in_block - chunk_size_;
+}
+
+}  // namespace webrtc
diff --git a/common_audio/blocker.h b/common_audio/blocker.h
new file mode 100644
index 0000000..9bce896
--- /dev/null
+++ b/common_audio/blocker.h
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_BLOCKER_H_
+#define COMMON_AUDIO_BLOCKER_H_
+
+#include <memory>
+
+#include "common_audio/audio_ring_buffer.h"
+#include "common_audio/channel_buffer.h"
+
+namespace webrtc {
+
+// The callback function to process audio in the time domain. Input has already
+// been windowed, and output will be windowed. The number of input channels
+// must be >= the number of output channels.
+class BlockerCallback {
+ public:
+  virtual ~BlockerCallback() {}
+
+  virtual void ProcessBlock(const float* const* input,
+                            size_t num_frames,
+                            size_t num_input_channels,
+                            size_t num_output_channels,
+                            float* const* output) = 0;
+};
+
+// The main purpose of Blocker is to abstract away the fact that often we
+// receive a different number of audio frames than our transform takes. For
+// example, most FFTs work best when the fft-size is a power of 2, but suppose
+// we receive 20ms of audio at a sample rate of 48000. That comes to 960 frames
+// of audio, which is not a power of 2. Blocker allows us to specify the
+// transform and all other necessary processing via the Process() callback
+// function without any constraints on the transform-size
+// (read: |block_size_|) or received-audio-size (read: |chunk_size_|).
+// We handle this for the multichannel audio case, allowing for different
+// numbers of input and output channels (for example, beamforming takes 2 or
+// more input channels and returns 1 output channel). Audio signals are
+// represented as deinterleaved floats in the range [-1, 1].
+//
+// Blocker is responsible for:
+// - blocking audio while handling potential discontinuities on the edges
+//   of chunks
+// - windowing blocks before sending them to Process()
+// - windowing processed blocks, and overlap-adding them together before
+//   sending back a processed chunk
+//
+// To use blocker:
+// 1. Impelment a BlockerCallback object |bc|.
+// 2. Instantiate a Blocker object |b|, passing in |bc|.
+// 3. As you receive audio, call b.ProcessChunk() to get processed audio.
+//
+// A small amount of delay is added to the first received chunk to deal with
+// the difference in chunk/block sizes. This delay is <= chunk_size.
+//
+// Ownership of window is retained by the caller.  That is, Blocker makes a
+// copy of window and does not attempt to delete it.
+class Blocker {
+ public:
+  Blocker(size_t chunk_size,
+          size_t block_size,
+          size_t num_input_channels,
+          size_t num_output_channels,
+          const float* window,
+          size_t shift_amount,
+          BlockerCallback* callback);
+  ~Blocker();
+
+  void ProcessChunk(const float* const* input,
+                    size_t chunk_size,
+                    size_t num_input_channels,
+                    size_t num_output_channels,
+                    float* const* output);
+
+  size_t initial_delay() const { return initial_delay_; }
+
+ private:
+  const size_t chunk_size_;
+  const size_t block_size_;
+  const size_t num_input_channels_;
+  const size_t num_output_channels_;
+
+  // The number of frames of delay to add at the beginning of the first chunk.
+  const size_t initial_delay_;
+
+  // The frame index into the input buffer where the first block should be read
+  // from. This is necessary because shift_amount_ is not necessarily a
+  // multiple of chunk_size_, so blocks won't line up at the start of the
+  // buffer.
+  size_t frame_offset_;
+
+  // Since blocks nearly always overlap, there are certain blocks that require
+  // frames from the end of one chunk and the beginning of the next chunk. The
+  // input and output buffers are responsible for saving those frames between
+  // calls to ProcessChunk().
+  //
+  // Both contain |initial delay| + |chunk_size| frames. The input is a fairly
+  // standard FIFO, but due to the overlap-add it's harder to use an
+  // AudioRingBuffer for the output.
+  AudioRingBuffer input_buffer_;
+  ChannelBuffer<float> output_buffer_;
+
+  // Space for the input block (can't wrap because of windowing).
+  ChannelBuffer<float> input_block_;
+
+  // Space for the output block (can't wrap because of overlap/add).
+  ChannelBuffer<float> output_block_;
+
+  std::unique_ptr<float[]> window_;
+
+  // The amount of frames between the start of contiguous blocks. For example,
+  // |shift_amount_| = |block_size_| / 2 for a Hann window.
+  size_t shift_amount_;
+
+  BlockerCallback* callback_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_BLOCKER_H_
diff --git a/common_audio/blocker_unittest.cc b/common_audio/blocker_unittest.cc
new file mode 100644
index 0000000..296efab
--- /dev/null
+++ b/common_audio/blocker_unittest.cc
@@ -0,0 +1,345 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "common_audio/blocker.h"
+
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+namespace {
+
+// Callback Function to add 3 to every sample in the signal.
+class PlusThreeBlockerCallback : public webrtc::BlockerCallback {
+ public:
+  void ProcessBlock(const float* const* input,
+                    size_t num_frames,
+                    size_t num_input_channels,
+                    size_t num_output_channels,
+                    float* const* output) override {
+    for (size_t i = 0; i < num_output_channels; ++i) {
+      for (size_t j = 0; j < num_frames; ++j) {
+        output[i][j] = input[i][j] + 3;
+      }
+    }
+  }
+};
+
+// No-op Callback Function.
+class CopyBlockerCallback : public webrtc::BlockerCallback {
+ public:
+  void ProcessBlock(const float* const* input,
+                    size_t num_frames,
+                    size_t num_input_channels,
+                    size_t num_output_channels,
+                    float* const* output) override {
+    for (size_t i = 0; i < num_output_channels; ++i) {
+      for (size_t j = 0; j < num_frames; ++j) {
+        output[i][j] = input[i][j];
+      }
+    }
+  }
+};
+
+}  // namespace
+
+namespace webrtc {
+
+// Tests blocking with a window that multiplies the signal by 2, a callback
+// that adds 3 to each sample in the signal, and different combinations of chunk
+// size, block size, and shift amount.
+class BlockerTest : public ::testing::Test {
+ protected:
+  void RunTest(Blocker* blocker,
+               size_t chunk_size,
+               size_t num_frames,
+               const float* const* input,
+               float* const* input_chunk,
+               float* const* output,
+               float* const* output_chunk,
+               size_t num_input_channels,
+               size_t num_output_channels) {
+    size_t start = 0;
+    size_t end = chunk_size - 1;
+    while (end < num_frames) {
+      CopyTo(input_chunk, 0, start, num_input_channels, chunk_size, input);
+      blocker->ProcessChunk(input_chunk,
+                            chunk_size,
+                            num_input_channels,
+                            num_output_channels,
+                            output_chunk);
+      CopyTo(output, start, 0, num_output_channels, chunk_size, output_chunk);
+
+      start += chunk_size;
+      end += chunk_size;
+    }
+  }
+
+  void ValidateSignalEquality(const float* const* expected,
+                              const float* const* actual,
+                              size_t num_channels,
+                              size_t num_frames) {
+    for (size_t i = 0; i < num_channels; ++i) {
+      for (size_t j = 0; j < num_frames; ++j) {
+        EXPECT_FLOAT_EQ(expected[i][j], actual[i][j]);
+      }
+    }
+  }
+
+  void ValidateInitialDelay(const float* const* output,
+                            size_t num_channels,
+                            size_t num_frames,
+                            size_t initial_delay) {
+    for (size_t i = 0; i < num_channels; ++i) {
+      for (size_t j = 0; j < num_frames; ++j) {
+        if (j < initial_delay) {
+          EXPECT_FLOAT_EQ(output[i][j], 0.f);
+        } else {
+          EXPECT_GT(output[i][j], 0.f);
+        }
+      }
+    }
+  }
+
+  static void CopyTo(float* const* dst,
+                     size_t start_index_dst,
+                     size_t start_index_src,
+                     size_t num_channels,
+                     size_t num_frames,
+                     const float* const* src) {
+    for (size_t i = 0; i < num_channels; ++i) {
+      memcpy(&dst[i][start_index_dst],
+             &src[i][start_index_src],
+             num_frames * sizeof(float));
+    }
+  }
+};
+
+TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
+  const size_t kNumFrames = 10;
+  const size_t kBlockSize = 4;
+  const size_t kChunkSize = 5;
+  const size_t kShiftAmount = 2;
+
+  const float kInput[kNumInputChannels][kNumFrames] = {
+      {1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+      {2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+      {3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
+  ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+  input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
+
+  const float kExpectedOutput[kNumInputChannels][kNumFrames] = {
+      {6, 6, 12, 20, 20, 20, 20, 20, 20, 20},
+      {6, 6, 12, 28, 28, 28, 28, 28, 28, 28}};
+  ChannelBuffer<float> expected_output_cb(kNumFrames, kNumInputChannels);
+  expected_output_cb.SetDataForTesting(
+      kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
+
+  const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
+
+  ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
+  ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
+  ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
+
+  PlusThreeBlockerCallback callback;
+  Blocker blocker(kChunkSize,
+                  kBlockSize,
+                  kNumInputChannels,
+                  kNumOutputChannels,
+                  kWindow,
+                  kShiftAmount,
+                  &callback);
+
+  RunTest(&blocker,
+          kChunkSize,
+          kNumFrames,
+          input_cb.channels(),
+          input_chunk_cb.channels(),
+          actual_output_cb.channels(),
+          output_chunk_cb.channels(),
+          kNumInputChannels,
+          kNumOutputChannels);
+
+  ValidateSignalEquality(expected_output_cb.channels(),
+                         actual_output_cb.channels(),
+                         kNumOutputChannels,
+                         kNumFrames);
+}
+
+TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
+  const size_t kNumFrames = 12;
+  const size_t kBlockSize = 4;
+  const size_t kChunkSize = 6;
+  const size_t kShiftAmount = 3;
+
+  const float kInput[kNumInputChannels][kNumFrames] = {
+      {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+      {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+      {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
+  ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+  input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
+
+  const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
+      {6, 10, 10, 20, 10, 10, 20, 10, 10, 20, 10, 10},
+      {6, 14, 14, 28, 14, 14, 28, 14, 14, 28, 14, 14}};
+  ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
+  expected_output_cb.SetDataForTesting(
+      kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
+
+  const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
+
+  ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
+  ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
+  ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
+
+  PlusThreeBlockerCallback callback;
+  Blocker blocker(kChunkSize,
+                  kBlockSize,
+                  kNumInputChannels,
+                  kNumOutputChannels,
+                  kWindow,
+                  kShiftAmount,
+                  &callback);
+
+  RunTest(&blocker,
+          kChunkSize,
+          kNumFrames,
+          input_cb.channels(),
+          input_chunk_cb.channels(),
+          actual_output_cb.channels(),
+          output_chunk_cb.channels(),
+          kNumInputChannels,
+          kNumOutputChannels);
+
+  ValidateSignalEquality(expected_output_cb.channels(),
+                         actual_output_cb.channels(),
+                         kNumOutputChannels,
+                         kNumFrames);
+}
+
+TEST_F(BlockerTest, TestBlockerNoOverlap) {
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
+  const size_t kNumFrames = 12;
+  const size_t kBlockSize = 4;
+  const size_t kChunkSize = 4;
+  const size_t kShiftAmount = 4;
+
+  const float kInput[kNumInputChannels][kNumFrames] = {
+      {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+      {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+      {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}};
+  ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+  input_cb.SetDataForTesting(kInput[0], sizeof(kInput) / sizeof(**kInput));
+
+  const float kExpectedOutput[kNumOutputChannels][kNumFrames] = {
+      {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+      {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}};
+  ChannelBuffer<float> expected_output_cb(kNumFrames, kNumOutputChannels);
+  expected_output_cb.SetDataForTesting(
+      kExpectedOutput[0], sizeof(kExpectedOutput) / sizeof(**kExpectedOutput));
+
+  const float kWindow[kBlockSize] = {2.f, 2.f, 2.f, 2.f};
+
+  ChannelBuffer<float> actual_output_cb(kNumFrames, kNumOutputChannels);
+  ChannelBuffer<float> input_chunk_cb(kChunkSize, kNumInputChannels);
+  ChannelBuffer<float> output_chunk_cb(kChunkSize, kNumOutputChannels);
+
+  PlusThreeBlockerCallback callback;
+  Blocker blocker(kChunkSize,
+                  kBlockSize,
+                  kNumInputChannels,
+                  kNumOutputChannels,
+                  kWindow,
+                  kShiftAmount,
+                  &callback);
+
+  RunTest(&blocker,
+          kChunkSize,
+          kNumFrames,
+          input_cb.channels(),
+          input_chunk_cb.channels(),
+          actual_output_cb.channels(),
+          output_chunk_cb.channels(),
+          kNumInputChannels,
+          kNumOutputChannels);
+
+  ValidateSignalEquality(expected_output_cb.channels(),
+                         actual_output_cb.channels(),
+                         kNumOutputChannels,
+                         kNumFrames);
+}
+
+TEST_F(BlockerTest, InitialDelaysAreMinimum) {
+  const size_t kNumInputChannels = 3;
+  const size_t kNumOutputChannels = 2;
+  const size_t kNumFrames = 1280;
+  const size_t kChunkSize[] =
+      {80, 80, 80, 80, 80, 80, 160, 160, 160, 160, 160, 160};
+  const size_t kBlockSize[] =
+      {64, 64, 64, 128, 128, 128, 128, 128, 128, 256, 256, 256};
+  const size_t kShiftAmount[] =
+      {16, 32, 64, 32, 64, 128, 32, 64, 128, 64, 128, 256};
+  const size_t kInitialDelay[] =
+      {48, 48, 48, 112, 112, 112, 96, 96, 96, 224, 224, 224};
+
+  float input[kNumInputChannels][kNumFrames];
+  for (size_t i = 0; i < kNumInputChannels; ++i) {
+    for (size_t j = 0; j < kNumFrames; ++j) {
+      input[i][j] = i + 1;
+    }
+  }
+  ChannelBuffer<float> input_cb(kNumFrames, kNumInputChannels);
+  input_cb.SetDataForTesting(input[0], sizeof(input) / sizeof(**input));
+
+  ChannelBuffer<float> output_cb(kNumFrames, kNumOutputChannels);
+
+  CopyBlockerCallback callback;
+
+  for (size_t i = 0; i < arraysize(kChunkSize); ++i) {
+    std::unique_ptr<float[]> window(new float[kBlockSize[i]]);
+    for (size_t j = 0; j < kBlockSize[i]; ++j) {
+      window[j] = 1.f;
+    }
+
+    ChannelBuffer<float> input_chunk_cb(kChunkSize[i], kNumInputChannels);
+    ChannelBuffer<float> output_chunk_cb(kChunkSize[i], kNumOutputChannels);
+
+    Blocker blocker(kChunkSize[i],
+                    kBlockSize[i],
+                    kNumInputChannels,
+                    kNumOutputChannels,
+                    window.get(),
+                    kShiftAmount[i],
+                    &callback);
+
+    RunTest(&blocker,
+            kChunkSize[i],
+            kNumFrames,
+            input_cb.channels(),
+            input_chunk_cb.channels(),
+            output_cb.channels(),
+            output_chunk_cb.channels(),
+            kNumInputChannels,
+            kNumOutputChannels);
+
+    ValidateInitialDelay(output_cb.channels(),
+                         kNumOutputChannels,
+                         kNumFrames,
+                         kInitialDelay[i]);
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/channel_buffer.cc b/common_audio/channel_buffer.cc
new file mode 100644
index 0000000..df45f6d
--- /dev/null
+++ b/common_audio/channel_buffer.cc
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/channel_buffer.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+IFChannelBuffer::IFChannelBuffer(size_t num_frames,
+                                 size_t num_channels,
+                                 size_t num_bands)
+    : ivalid_(true),
+      ibuf_(num_frames, num_channels, num_bands),
+      fvalid_(true),
+      fbuf_(num_frames, num_channels, num_bands) {}
+
+IFChannelBuffer::~IFChannelBuffer() = default;
+
+ChannelBuffer<int16_t>* IFChannelBuffer::ibuf() {
+  RefreshI();
+  fvalid_ = false;
+  return &ibuf_;
+}
+
+ChannelBuffer<float>* IFChannelBuffer::fbuf() {
+  RefreshF();
+  ivalid_ = false;
+  return &fbuf_;
+}
+
+const ChannelBuffer<int16_t>* IFChannelBuffer::ibuf_const() const {
+  RefreshI();
+  return &ibuf_;
+}
+
+const ChannelBuffer<float>* IFChannelBuffer::fbuf_const() const {
+  RefreshF();
+  return &fbuf_;
+}
+
+void IFChannelBuffer::RefreshF() const {
+  if (!fvalid_) {
+    RTC_DCHECK(ivalid_);
+    fbuf_.set_num_channels(ibuf_.num_channels());
+    const int16_t* const* int_channels = ibuf_.channels();
+    float* const* float_channels = fbuf_.channels();
+    for (size_t i = 0; i < ibuf_.num_channels(); ++i) {
+      for (size_t j = 0; j < ibuf_.num_frames(); ++j) {
+        float_channels[i][j] = int_channels[i][j];
+      }
+    }
+    fvalid_ = true;
+  }
+}
+
+void IFChannelBuffer::RefreshI() const {
+  if (!ivalid_) {
+    RTC_DCHECK(fvalid_);
+    int16_t* const* int_channels = ibuf_.channels();
+    ibuf_.set_num_channels(fbuf_.num_channels());
+    const float* const* float_channels = fbuf_.channels();
+    for (size_t i = 0; i < fbuf_.num_channels(); ++i) {
+      FloatS16ToS16(float_channels[i],
+                    ibuf_.num_frames(),
+                    int_channels[i]);
+    }
+    ivalid_ = true;
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/channel_buffer.h b/common_audio/channel_buffer.h
new file mode 100644
index 0000000..024868c
--- /dev/null
+++ b/common_audio/channel_buffer.h
@@ -0,0 +1,186 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_CHANNEL_BUFFER_H_
+#define COMMON_AUDIO_CHANNEL_BUFFER_H_
+
+#include <string.h>
+
+#include <memory>
+
+#include "common_audio/include/audio_util.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gtest_prod_util.h"
+
+namespace webrtc {
+
+// Helper to encapsulate a contiguous data buffer, full or split into frequency
+// bands, with access to a pointer arrays of the deinterleaved channels and
+// bands. The buffer is zero initialized at creation.
+//
+// The buffer structure is showed below for a 2 channel and 2 bands case:
+//
+// |data_|:
+// { [ --- b1ch1 --- ] [ --- b2ch1 --- ] [ --- b1ch2 --- ] [ --- b2ch2 --- ] }
+//
+// The pointer arrays for the same example are as follows:
+//
+// |channels_|:
+// { [ b1ch1* ] [ b1ch2* ] [ b2ch1* ] [ b2ch2* ] }
+//
+// |bands_|:
+// { [ b1ch1* ] [ b2ch1* ] [ b1ch2* ] [ b2ch2* ] }
+template <typename T>
+class ChannelBuffer {
+ public:
+  ChannelBuffer(size_t num_frames,
+                size_t num_channels,
+                size_t num_bands = 1)
+      : data_(new T[num_frames * num_channels]()),
+        channels_(new T*[num_channels * num_bands]),
+        bands_(new T*[num_channels * num_bands]),
+        num_frames_(num_frames),
+        num_frames_per_band_(num_frames / num_bands),
+        num_allocated_channels_(num_channels),
+        num_channels_(num_channels),
+        num_bands_(num_bands) {
+    for (size_t i = 0; i < num_allocated_channels_; ++i) {
+      for (size_t j = 0; j < num_bands_; ++j) {
+        channels_[j * num_allocated_channels_ + i] =
+            &data_[i * num_frames_ + j * num_frames_per_band_];
+        bands_[i * num_bands_ + j] = channels_[j * num_allocated_channels_ + i];
+      }
+    }
+  }
+
+  // Returns a pointer array to the full-band channels (or lower band channels).
+  // Usage:
+  // channels()[channel][sample].
+  // Where:
+  // 0 <= channel < |num_allocated_channels_|
+  // 0 <= sample < |num_frames_|
+  T* const* channels() { return channels(0); }
+  const T* const* channels() const { return channels(0); }
+
+  // Returns a pointer array to the channels for a specific band.
+  // Usage:
+  // channels(band)[channel][sample].
+  // Where:
+  // 0 <= band < |num_bands_|
+  // 0 <= channel < |num_allocated_channels_|
+  // 0 <= sample < |num_frames_per_band_|
+  const T* const* channels(size_t band) const {
+    RTC_DCHECK_LT(band, num_bands_);
+    return &channels_[band * num_allocated_channels_];
+  }
+  T* const* channels(size_t band) {
+    const ChannelBuffer<T>* t = this;
+    return const_cast<T* const*>(t->channels(band));
+  }
+
+  // Returns a pointer array to the bands for a specific channel.
+  // Usage:
+  // bands(channel)[band][sample].
+  // Where:
+  // 0 <= channel < |num_channels_|
+  // 0 <= band < |num_bands_|
+  // 0 <= sample < |num_frames_per_band_|
+  const T* const* bands(size_t channel) const {
+    RTC_DCHECK_LT(channel, num_channels_);
+    RTC_DCHECK_GE(channel, 0);
+    return &bands_[channel * num_bands_];
+  }
+  T* const* bands(size_t channel) {
+    const ChannelBuffer<T>* t = this;
+    return const_cast<T* const*>(t->bands(channel));
+  }
+
+  // Sets the |slice| pointers to the |start_frame| position for each channel.
+  // Returns |slice| for convenience.
+  const T* const* Slice(T** slice, size_t start_frame) const {
+    RTC_DCHECK_LT(start_frame, num_frames_);
+    for (size_t i = 0; i < num_channels_; ++i)
+      slice[i] = &channels_[i][start_frame];
+    return slice;
+  }
+  T** Slice(T** slice, size_t start_frame) {
+    const ChannelBuffer<T>* t = this;
+    return const_cast<T**>(t->Slice(slice, start_frame));
+  }
+
+  size_t num_frames() const { return num_frames_; }
+  size_t num_frames_per_band() const { return num_frames_per_band_; }
+  size_t num_channels() const { return num_channels_; }
+  size_t num_bands() const { return num_bands_; }
+  size_t size() const {return num_frames_ * num_allocated_channels_; }
+
+  void set_num_channels(size_t num_channels) {
+    RTC_DCHECK_LE(num_channels, num_allocated_channels_);
+    num_channels_ = num_channels;
+  }
+
+  void SetDataForTesting(const T* data, size_t size) {
+    RTC_CHECK_EQ(size, this->size());
+    memcpy(data_.get(), data, size * sizeof(*data));
+  }
+
+ private:
+  std::unique_ptr<T[]> data_;
+  std::unique_ptr<T* []> channels_;
+  std::unique_ptr<T* []> bands_;
+  const size_t num_frames_;
+  const size_t num_frames_per_band_;
+  // Number of channels the internal buffer holds.
+  const size_t num_allocated_channels_;
+  // Number of channels the user sees.
+  size_t num_channels_;
+  const size_t num_bands_;
+};
+
+// One int16_t and one float ChannelBuffer that are kept in sync. The sync is
+// broken when someone requests write access to either ChannelBuffer, and
+// reestablished when someone requests the outdated ChannelBuffer. It is
+// therefore safe to use the return value of ibuf_const() and fbuf_const()
+// until the next call to ibuf() or fbuf(), and the return value of ibuf() and
+// fbuf() until the next call to any of the other functions.
+class IFChannelBuffer {
+ public:
+  IFChannelBuffer(size_t num_frames, size_t num_channels, size_t num_bands = 1);
+  ~IFChannelBuffer();
+
+  ChannelBuffer<int16_t>* ibuf();
+  ChannelBuffer<float>* fbuf();
+  const ChannelBuffer<int16_t>* ibuf_const() const;
+  const ChannelBuffer<float>* fbuf_const() const;
+
+  size_t num_frames() const { return ibuf_.num_frames(); }
+  size_t num_frames_per_band() const { return ibuf_.num_frames_per_band(); }
+  size_t num_channels() const {
+    return ivalid_ ? ibuf_.num_channels() : fbuf_.num_channels();
+  }
+  void set_num_channels(size_t num_channels) {
+    ibuf_.set_num_channels(num_channels);
+    fbuf_.set_num_channels(num_channels);
+  }
+  size_t num_bands() const { return ibuf_.num_bands(); }
+
+ private:
+  void RefreshF() const;
+  void RefreshI() const;
+
+  mutable bool ivalid_;
+  mutable ChannelBuffer<int16_t> ibuf_;
+  mutable bool fvalid_;
+  mutable ChannelBuffer<float> fbuf_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_CHANNEL_BUFFER_H_
diff --git a/common_audio/channel_buffer_unittest.cc b/common_audio/channel_buffer_unittest.cc
new file mode 100644
index 0000000..196f87f
--- /dev/null
+++ b/common_audio/channel_buffer_unittest.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/channel_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kNumFrames = 480u;
+const size_t kStereo = 2u;
+const size_t kMono = 1u;
+
+void ExpectNumChannels(const IFChannelBuffer& ifchb, size_t num_channels) {
+  EXPECT_EQ(ifchb.ibuf_const()->num_channels(), num_channels);
+  EXPECT_EQ(ifchb.fbuf_const()->num_channels(), num_channels);
+  EXPECT_EQ(ifchb.num_channels(), num_channels);
+}
+
+}  // namespace
+
+TEST(ChannelBufferTest, SetNumChannelsSetsNumChannels) {
+  ChannelBuffer<float> chb(kNumFrames, kStereo);
+  EXPECT_EQ(chb.num_channels(), kStereo);
+  chb.set_num_channels(kMono);
+  EXPECT_EQ(chb.num_channels(), kMono);
+}
+
+TEST(IFChannelBufferTest, SetNumChannelsSetsChannelBuffersNumChannels) {
+  IFChannelBuffer ifchb(kNumFrames, kStereo);
+  ExpectNumChannels(ifchb, kStereo);
+  ifchb.set_num_channels(kMono);
+  ExpectNumChannels(ifchb, kMono);
+}
+
+TEST(IFChannelBufferTest, SettingNumChannelsOfOneChannelBufferSetsTheOther) {
+  IFChannelBuffer ifchb(kNumFrames, kStereo);
+  ExpectNumChannels(ifchb, kStereo);
+  ifchb.ibuf()->set_num_channels(kMono);
+  ExpectNumChannels(ifchb, kMono);
+  ifchb.fbuf()->set_num_channels(kStereo);
+  ExpectNumChannels(ifchb, kStereo);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(ChannelBufferTest, SetNumChannelsDeathTest) {
+  ChannelBuffer<float> chb(kNumFrames, kMono);
+  EXPECT_DEATH(chb.set_num_channels(kStereo), "num_channels");
+}
+
+TEST(IFChannelBufferTest, SetNumChannelsDeathTest) {
+  IFChannelBuffer ifchb(kNumFrames, kMono);
+  EXPECT_DEATH(ifchb.ibuf()->set_num_channels(kStereo), "num_channels");
+}
+#endif
+
+}  // namespace webrtc
diff --git a/common_audio/fft4g.c b/common_audio/fft4g.c
new file mode 100644
index 0000000..9cf7b9f
--- /dev/null
+++ b/common_audio/fft4g.c
@@ -0,0 +1,1332 @@
+/*
+ * http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html
+ * Copyright Takuya OOURA, 1996-2001
+ *
+ * You may use, copy, modify and distribute this code for any purpose (include
+ * commercial use) and without fee. Please refer to this package when you modify
+ * this code.
+ *
+ * Changes:
+ * Trivial type modifications by the WebRTC authors.
+ */
+
+/*
+Fast Fourier/Cosine/Sine Transform
+    dimension   :one
+    data length :power of 2
+    decimation  :frequency
+    radix       :4, 2
+    data        :inplace
+    table       :use
+functions
+    cdft: Complex Discrete Fourier Transform
+    rdft: Real Discrete Fourier Transform
+    ddct: Discrete Cosine Transform
+    ddst: Discrete Sine Transform
+    dfct: Cosine Transform of RDFT (Real Symmetric DFT)
+    dfst: Sine Transform of RDFT (Real Anti-symmetric DFT)
+function prototypes
+    void cdft(int, int, float *, int *, float *);
+    void rdft(size_t, int, float *, size_t *, float *);
+    void ddct(int, int, float *, int *, float *);
+    void ddst(int, int, float *, int *, float *);
+    void dfct(int, float *, float *, int *, float *);
+    void dfst(int, float *, float *, int *, float *);
+
+
+-------- Complex DFT (Discrete Fourier Transform) --------
+    [definition]
+        <case1>
+            X[k] = sum_j=0^n-1 x[j]*exp(2*pi*i*j*k/n), 0<=k<n
+        <case2>
+            X[k] = sum_j=0^n-1 x[j]*exp(-2*pi*i*j*k/n), 0<=k<n
+        (notes: sum_j=0^n-1 is a summation from j=0 to n-1)
+    [usage]
+        <case1>
+            ip[0] = 0; // first time only
+            cdft(2*n, 1, a, ip, w);
+        <case2>
+            ip[0] = 0; // first time only
+            cdft(2*n, -1, a, ip, w);
+    [parameters]
+        2*n            :data length (int)
+                        n >= 1, n = power of 2
+        a[0...2*n-1]   :input/output data (float *)
+                        input data
+                            a[2*j] = Re(x[j]),
+                            a[2*j+1] = Im(x[j]), 0<=j<n
+                        output data
+                            a[2*k] = Re(X[k]),
+                            a[2*k+1] = Im(X[k]), 0<=k<n
+        ip[0...*]      :work area for bit reversal (int *)
+                        length of ip >= 2+sqrt(n)
+                        strictly,
+                        length of ip >=
+                            2+(1<<(int)(log(n+0.5)/log(2))/2).
+                        ip[0],ip[1] are pointers of the cos/sin table.
+        w[0...n/2-1]   :cos/sin table (float *)
+                        w[],ip[] are initialized if ip[0] == 0.
+    [remark]
+        Inverse of
+            cdft(2*n, -1, a, ip, w);
+        is
+            cdft(2*n, 1, a, ip, w);
+            for (j = 0; j <= 2 * n - 1; j++) {
+                a[j] *= 1.0 / n;
+            }
+        .
+
+
+-------- Real DFT / Inverse of Real DFT --------
+    [definition]
+        <case1> RDFT
+            R[k] = sum_j=0^n-1 a[j]*cos(2*pi*j*k/n), 0<=k<=n/2
+            I[k] = sum_j=0^n-1 a[j]*sin(2*pi*j*k/n), 0<k<n/2
+        <case2> IRDFT (excluding scale)
+            a[k] = (R[0] + R[n/2]*cos(pi*k))/2 +
+                   sum_j=1^n/2-1 R[j]*cos(2*pi*j*k/n) +
+                   sum_j=1^n/2-1 I[j]*sin(2*pi*j*k/n), 0<=k<n
+    [usage]
+        <case1>
+            ip[0] = 0; // first time only
+            rdft(n, 1, a, ip, w);
+        <case2>
+            ip[0] = 0; // first time only
+            rdft(n, -1, a, ip, w);
+    [parameters]
+        n              :data length (size_t)
+                        n >= 2, n = power of 2
+        a[0...n-1]     :input/output data (float *)
+                        <case1>
+                            output data
+                                a[2*k] = R[k], 0<=k<n/2
+                                a[2*k+1] = I[k], 0<k<n/2
+                                a[1] = R[n/2]
+                        <case2>
+                            input data
+                                a[2*j] = R[j], 0<=j<n/2
+                                a[2*j+1] = I[j], 0<j<n/2
+                                a[1] = R[n/2]
+        ip[0...*]      :work area for bit reversal (size_t *)
+                        length of ip >= 2+sqrt(n/2)
+                        strictly,
+                        length of ip >=
+                            2+(1<<(int)(log(n/2+0.5)/log(2))/2).
+                        ip[0],ip[1] are pointers of the cos/sin table.
+        w[0...n/2-1]   :cos/sin table (float *)
+                        w[],ip[] are initialized if ip[0] == 0.
+    [remark]
+        Inverse of
+            rdft(n, 1, a, ip, w);
+        is
+            rdft(n, -1, a, ip, w);
+            for (j = 0; j <= n - 1; j++) {
+                a[j] *= 2.0 / n;
+            }
+        .
+
+
+-------- DCT (Discrete Cosine Transform) / Inverse of DCT --------
+    [definition]
+        <case1> IDCT (excluding scale)
+            C[k] = sum_j=0^n-1 a[j]*cos(pi*j*(k+1/2)/n), 0<=k<n
+        <case2> DCT
+            C[k] = sum_j=0^n-1 a[j]*cos(pi*(j+1/2)*k/n), 0<=k<n
+    [usage]
+        <case1>
+            ip[0] = 0; // first time only
+            ddct(n, 1, a, ip, w);
+        <case2>
+            ip[0] = 0; // first time only
+            ddct(n, -1, a, ip, w);
+    [parameters]
+        n              :data length (int)
+                        n >= 2, n = power of 2
+        a[0...n-1]     :input/output data (float *)
+                        output data
+                            a[k] = C[k], 0<=k<n
+        ip[0...*]      :work area for bit reversal (int *)
+                        length of ip >= 2+sqrt(n/2)
+                        strictly,
+                        length of ip >=
+                            2+(1<<(int)(log(n/2+0.5)/log(2))/2).
+                        ip[0],ip[1] are pointers of the cos/sin table.
+        w[0...n*5/4-1] :cos/sin table (float *)
+                        w[],ip[] are initialized if ip[0] == 0.
+    [remark]
+        Inverse of
+            ddct(n, -1, a, ip, w);
+        is
+            a[0] *= 0.5;
+            ddct(n, 1, a, ip, w);
+            for (j = 0; j <= n - 1; j++) {
+                a[j] *= 2.0 / n;
+            }
+        .
+
+
+-------- DST (Discrete Sine Transform) / Inverse of DST --------
+    [definition]
+        <case1> IDST (excluding scale)
+            S[k] = sum_j=1^n A[j]*sin(pi*j*(k+1/2)/n), 0<=k<n
+        <case2> DST
+            S[k] = sum_j=0^n-1 a[j]*sin(pi*(j+1/2)*k/n), 0<k<=n
+    [usage]
+        <case1>
+            ip[0] = 0; // first time only
+            ddst(n, 1, a, ip, w);
+        <case2>
+            ip[0] = 0; // first time only
+            ddst(n, -1, a, ip, w);
+    [parameters]
+        n              :data length (int)
+                        n >= 2, n = power of 2
+        a[0...n-1]     :input/output data (float *)
+                        <case1>
+                            input data
+                                a[j] = A[j], 0<j<n
+                                a[0] = A[n]
+                            output data
+                                a[k] = S[k], 0<=k<n
+                        <case2>
+                            output data
+                                a[k] = S[k], 0<k<n
+                                a[0] = S[n]
+        ip[0...*]      :work area for bit reversal (int *)
+                        length of ip >= 2+sqrt(n/2)
+                        strictly,
+                        length of ip >=
+                            2+(1<<(int)(log(n/2+0.5)/log(2))/2).
+                        ip[0],ip[1] are pointers of the cos/sin table.
+        w[0...n*5/4-1] :cos/sin table (float *)
+                        w[],ip[] are initialized if ip[0] == 0.
+    [remark]
+        Inverse of
+            ddst(n, -1, a, ip, w);
+        is
+            a[0] *= 0.5;
+            ddst(n, 1, a, ip, w);
+            for (j = 0; j <= n - 1; j++) {
+                a[j] *= 2.0 / n;
+            }
+        .
+
+
+-------- Cosine Transform of RDFT (Real Symmetric DFT) --------
+    [definition]
+        C[k] = sum_j=0^n a[j]*cos(pi*j*k/n), 0<=k<=n
+    [usage]
+        ip[0] = 0; // first time only
+        dfct(n, a, t, ip, w);
+    [parameters]
+        n              :data length - 1 (int)
+                        n >= 2, n = power of 2
+        a[0...n]       :input/output data (float *)
+                        output data
+                            a[k] = C[k], 0<=k<=n
+        t[0...n/2]     :work area (float *)
+        ip[0...*]      :work area for bit reversal (int *)
+                        length of ip >= 2+sqrt(n/4)
+                        strictly,
+                        length of ip >=
+                            2+(1<<(int)(log(n/4+0.5)/log(2))/2).
+                        ip[0],ip[1] are pointers of the cos/sin table.
+        w[0...n*5/8-1] :cos/sin table (float *)
+                        w[],ip[] are initialized if ip[0] == 0.
+    [remark]
+        Inverse of
+            a[0] *= 0.5;
+            a[n] *= 0.5;
+            dfct(n, a, t, ip, w);
+        is
+            a[0] *= 0.5;
+            a[n] *= 0.5;
+            dfct(n, a, t, ip, w);
+            for (j = 0; j <= n; j++) {
+                a[j] *= 2.0 / n;
+            }
+        .
+
+
+-------- Sine Transform of RDFT (Real Anti-symmetric DFT) --------
+    [definition]
+        S[k] = sum_j=1^n-1 a[j]*sin(pi*j*k/n), 0<k<n
+    [usage]
+        ip[0] = 0; // first time only
+        dfst(n, a, t, ip, w);
+    [parameters]
+        n              :data length + 1 (int)
+                        n >= 2, n = power of 2
+        a[0...n-1]     :input/output data (float *)
+                        output data
+                            a[k] = S[k], 0<k<n
+                        (a[0] is used for work area)
+        t[0...n/2-1]   :work area (float *)
+        ip[0...*]      :work area for bit reversal (int *)
+                        length of ip >= 2+sqrt(n/4)
+                        strictly,
+                        length of ip >=
+                            2+(1<<(int)(log(n/4+0.5)/log(2))/2).
+                        ip[0],ip[1] are pointers of the cos/sin table.
+        w[0...n*5/8-1] :cos/sin table (float *)
+                        w[],ip[] are initialized if ip[0] == 0.
+    [remark]
+        Inverse of
+            dfst(n, a, t, ip, w);
+        is
+            dfst(n, a, t, ip, w);
+            for (j = 1; j <= n - 1; j++) {
+                a[j] *= 2.0 / n;
+            }
+        .
+
+
+Appendix :
+    The cos/sin table is recalculated when the larger table required.
+    w[] and ip[] are compatible with all routines.
+*/
+
+#include <stddef.h>
+
+static void makewt(size_t nw, size_t *ip, float *w);
+static void makect(size_t nc, size_t *ip, float *c);
+static void bitrv2(size_t n, size_t *ip, float *a);
+#if 0  // Not used.
+static void bitrv2conj(int n, int *ip, float *a);
+#endif
+static void cftfsub(size_t n, float *a, float *w);
+static void cftbsub(size_t n, float *a, float *w);
+static void cft1st(size_t n, float *a, float *w);
+static void cftmdl(size_t n, size_t l, float *a, float *w);
+static void rftfsub(size_t n, float *a, size_t nc, float *c);
+static void rftbsub(size_t n, float *a, size_t nc, float *c);
+#if 0  // Not used.
+static void dctsub(int n, float *a, int nc, float *c)
+static void dstsub(int n, float *a, int nc, float *c)
+#endif
+
+
+#if 0  // Not used.
+void WebRtc_cdft(int n, int isgn, float *a, int *ip, float *w)
+{
+    if (n > (ip[0] << 2)) {
+        makewt(n >> 2, ip, w);
+    }
+    if (n > 4) {
+        if (isgn >= 0) {
+            bitrv2(n, ip + 2, a);
+            cftfsub(n, a, w);
+        } else {
+            bitrv2conj(n, ip + 2, a);
+            cftbsub(n, a, w);
+        }
+    } else if (n == 4) {
+        cftfsub(n, a, w);
+    }
+}
+#endif
+
+
+void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w)
+{
+    size_t nw, nc;
+    float xi;
+
+    nw = ip[0];
+    if (n > (nw << 2)) {
+        nw = n >> 2;
+        makewt(nw, ip, w);
+    }
+    nc = ip[1];
+    if (n > (nc << 2)) {
+        nc = n >> 2;
+        makect(nc, ip, w + nw);
+    }
+    if (isgn >= 0) {
+        if (n > 4) {
+            bitrv2(n, ip + 2, a);
+            cftfsub(n, a, w);
+            rftfsub(n, a, nc, w + nw);
+        } else if (n == 4) {
+            cftfsub(n, a, w);
+        }
+        xi = a[0] - a[1];
+        a[0] += a[1];
+        a[1] = xi;
+    } else {
+        a[1] = 0.5f * (a[0] - a[1]);
+        a[0] -= a[1];
+        if (n > 4) {
+            rftbsub(n, a, nc, w + nw);
+            bitrv2(n, ip + 2, a);
+            cftbsub(n, a, w);
+        } else if (n == 4) {
+            cftfsub(n, a, w);
+        }
+    }
+}
+
+#if 0  // Not used.
+static void ddct(int n, int isgn, float *a, int *ip, float *w)
+{
+    int j, nw, nc;
+    float xr;
+
+    nw = ip[0];
+    if (n > (nw << 2)) {
+        nw = n >> 2;
+        makewt(nw, ip, w);
+    }
+    nc = ip[1];
+    if (n > nc) {
+        nc = n;
+        makect(nc, ip, w + nw);
+    }
+    if (isgn < 0) {
+        xr = a[n - 1];
+        for (j = n - 2; j >= 2; j -= 2) {
+            a[j + 1] = a[j] - a[j - 1];
+            a[j] += a[j - 1];
+        }
+        a[1] = a[0] - xr;
+        a[0] += xr;
+        if (n > 4) {
+            rftbsub(n, a, nc, w + nw);
+            bitrv2(n, ip + 2, a);
+            cftbsub(n, a, w);
+        } else if (n == 4) {
+            cftfsub(n, a, w);
+        }
+    }
+    dctsub(n, a, nc, w + nw);
+    if (isgn >= 0) {
+        if (n > 4) {
+            bitrv2(n, ip + 2, a);
+            cftfsub(n, a, w);
+            rftfsub(n, a, nc, w + nw);
+        } else if (n == 4) {
+            cftfsub(n, a, w);
+        }
+        xr = a[0] - a[1];
+        a[0] += a[1];
+        for (j = 2; j < n; j += 2) {
+            a[j - 1] = a[j] - a[j + 1];
+            a[j] += a[j + 1];
+        }
+        a[n - 1] = xr;
+    }
+}
+
+
+static void ddst(int n, int isgn, float *a, int *ip, float *w)
+{
+    int j, nw, nc;
+    float xr;
+
+    nw = ip[0];
+    if (n > (nw << 2)) {
+        nw = n >> 2;
+        makewt(nw, ip, w);
+    }
+    nc = ip[1];
+    if (n > nc) {
+        nc = n;
+        makect(nc, ip, w + nw);
+    }
+    if (isgn < 0) {
+        xr = a[n - 1];
+        for (j = n - 2; j >= 2; j -= 2) {
+            a[j + 1] = -a[j] - a[j - 1];
+            a[j] -= a[j - 1];
+        }
+        a[1] = a[0] + xr;
+        a[0] -= xr;
+        if (n > 4) {
+            rftbsub(n, a, nc, w + nw);
+            bitrv2(n, ip + 2, a);
+            cftbsub(n, a, w);
+        } else if (n == 4) {
+            cftfsub(n, a, w);
+        }
+    }
+    dstsub(n, a, nc, w + nw);
+    if (isgn >= 0) {
+        if (n > 4) {
+            bitrv2(n, ip + 2, a);
+            cftfsub(n, a, w);
+            rftfsub(n, a, nc, w + nw);
+        } else if (n == 4) {
+            cftfsub(n, a, w);
+        }
+        xr = a[0] - a[1];
+        a[0] += a[1];
+        for (j = 2; j < n; j += 2) {
+            a[j - 1] = -a[j] - a[j + 1];
+            a[j] -= a[j + 1];
+        }
+        a[n - 1] = -xr;
+    }
+}
+
+
+static void dfct(int n, float *a, float *t, int *ip, float *w)
+{
+    int j, k, l, m, mh, nw, nc;
+    float xr, xi, yr, yi;
+
+    nw = ip[0];
+    if (n > (nw << 3)) {
+        nw = n >> 3;
+        makewt(nw, ip, w);
+    }
+    nc = ip[1];
+    if (n > (nc << 1)) {
+        nc = n >> 1;
+        makect(nc, ip, w + nw);
+    }
+    m = n >> 1;
+    yi = a[m];
+    xi = a[0] + a[n];
+    a[0] -= a[n];
+    t[0] = xi - yi;
+    t[m] = xi + yi;
+    if (n > 2) {
+        mh = m >> 1;
+        for (j = 1; j < mh; j++) {
+            k = m - j;
+            xr = a[j] - a[n - j];
+            xi = a[j] + a[n - j];
+            yr = a[k] - a[n - k];
+            yi = a[k] + a[n - k];
+            a[j] = xr;
+            a[k] = yr;
+            t[j] = xi - yi;
+            t[k] = xi + yi;
+        }
+        t[mh] = a[mh] + a[n - mh];
+        a[mh] -= a[n - mh];
+        dctsub(m, a, nc, w + nw);
+        if (m > 4) {
+            bitrv2(m, ip + 2, a);
+            cftfsub(m, a, w);
+            rftfsub(m, a, nc, w + nw);
+        } else if (m == 4) {
+            cftfsub(m, a, w);
+        }
+        a[n - 1] = a[0] - a[1];
+        a[1] = a[0] + a[1];
+        for (j = m - 2; j >= 2; j -= 2) {
+            a[2 * j + 1] = a[j] + a[j + 1];
+            a[2 * j - 1] = a[j] - a[j + 1];
+        }
+        l = 2;
+        m = mh;
+        while (m >= 2) {
+            dctsub(m, t, nc, w + nw);
+            if (m > 4) {
+                bitrv2(m, ip + 2, t);
+                cftfsub(m, t, w);
+                rftfsub(m, t, nc, w + nw);
+            } else if (m == 4) {
+                cftfsub(m, t, w);
+            }
+            a[n - l] = t[0] - t[1];
+            a[l] = t[0] + t[1];
+            k = 0;
+            for (j = 2; j < m; j += 2) {
+                k += l << 2;
+                a[k - l] = t[j] - t[j + 1];
+                a[k + l] = t[j] + t[j + 1];
+            }
+            l <<= 1;
+            mh = m >> 1;
+            for (j = 0; j < mh; j++) {
+                k = m - j;
+                t[j] = t[m + k] - t[m + j];
+                t[k] = t[m + k] + t[m + j];
+            }
+            t[mh] = t[m + mh];
+            m = mh;
+        }
+        a[l] = t[0];
+        a[n] = t[2] - t[1];
+        a[0] = t[2] + t[1];
+    } else {
+        a[1] = a[0];
+        a[2] = t[0];
+        a[0] = t[1];
+    }
+}
+
+static void dfst(int n, float *a, float *t, int *ip, float *w)
+{
+    int j, k, l, m, mh, nw, nc;
+    float xr, xi, yr, yi;
+
+    nw = ip[0];
+    if (n > (nw << 3)) {
+        nw = n >> 3;
+        makewt(nw, ip, w);
+    }
+    nc = ip[1];
+    if (n > (nc << 1)) {
+        nc = n >> 1;
+        makect(nc, ip, w + nw);
+    }
+    if (n > 2) {
+        m = n >> 1;
+        mh = m >> 1;
+        for (j = 1; j < mh; j++) {
+            k = m - j;
+            xr = a[j] + a[n - j];
+            xi = a[j] - a[n - j];
+            yr = a[k] + a[n - k];
+            yi = a[k] - a[n - k];
+            a[j] = xr;
+            a[k] = yr;
+            t[j] = xi + yi;
+            t[k] = xi - yi;
+        }
+        t[0] = a[mh] - a[n - mh];
+        a[mh] += a[n - mh];
+        a[0] = a[m];
+        dstsub(m, a, nc, w + nw);
+        if (m > 4) {
+            bitrv2(m, ip + 2, a);
+            cftfsub(m, a, w);
+            rftfsub(m, a, nc, w + nw);
+        } else if (m == 4) {
+            cftfsub(m, a, w);
+        }
+        a[n - 1] = a[1] - a[0];
+        a[1] = a[0] + a[1];
+        for (j = m - 2; j >= 2; j -= 2) {
+            a[2 * j + 1] = a[j] - a[j + 1];
+            a[2 * j - 1] = -a[j] - a[j + 1];
+        }
+        l = 2;
+        m = mh;
+        while (m >= 2) {
+            dstsub(m, t, nc, w + nw);
+            if (m > 4) {
+                bitrv2(m, ip + 2, t);
+                cftfsub(m, t, w);
+                rftfsub(m, t, nc, w + nw);
+            } else if (m == 4) {
+                cftfsub(m, t, w);
+            }
+            a[n - l] = t[1] - t[0];
+            a[l] = t[0] + t[1];
+            k = 0;
+            for (j = 2; j < m; j += 2) {
+                k += l << 2;
+                a[k - l] = -t[j] - t[j + 1];
+                a[k + l] = t[j] - t[j + 1];
+            }
+            l <<= 1;
+            mh = m >> 1;
+            for (j = 1; j < mh; j++) {
+                k = m - j;
+                t[j] = t[m + k] + t[m + j];
+                t[k] = t[m + k] - t[m + j];
+            }
+            t[0] = t[m + mh];
+            m = mh;
+        }
+        a[l] = t[0];
+    }
+    a[0] = 0;
+}
+#endif  // Not used.
+
+
+/* -------- initializing routines -------- */
+
+
+#include <math.h>
+
+static void makewt(size_t nw, size_t *ip, float *w)
+{
+    size_t j, nwh;
+    float delta, x, y;
+
+    ip[0] = nw;
+    ip[1] = 1;
+    if (nw > 2) {
+        nwh = nw >> 1;
+        delta = atanf(1.0f) / nwh;
+        w[0] = 1;
+        w[1] = 0;
+        w[nwh] = (float)cos(delta * nwh);
+        w[nwh + 1] = w[nwh];
+        if (nwh > 2) {
+            for (j = 2; j < nwh; j += 2) {
+                x = (float)cos(delta * j);
+                y = (float)sin(delta * j);
+                w[j] = x;
+                w[j + 1] = y;
+                w[nw - j] = y;
+                w[nw - j + 1] = x;
+            }
+            bitrv2(nw, ip + 2, w);
+        }
+    }
+}
+
+
+static void makect(size_t nc, size_t *ip, float *c)
+{
+    size_t j, nch;
+    float delta;
+
+    ip[1] = nc;
+    if (nc > 1) {
+        nch = nc >> 1;
+        delta = atanf(1.0f) / nch;
+        c[0] = (float)cos(delta * nch);
+        c[nch] = 0.5f * c[0];
+        for (j = 1; j < nch; j++) {
+            c[j] = 0.5f * (float)cos(delta * j);
+            c[nc - j] = 0.5f * (float)sin(delta * j);
+        }
+    }
+}
+
+
+/* -------- child routines -------- */
+
+
+static void bitrv2(size_t n, size_t *ip, float *a)
+{
+    size_t j, j1, k, k1, l, m, m2;
+    float xr, xi, yr, yi;
+
+    ip[0] = 0;
+    l = n;
+    m = 1;
+    while ((m << 3) < l) {
+        l >>= 1;
+        for (j = 0; j < m; j++) {
+            ip[m + j] = ip[j] + l;
+        }
+        m <<= 1;
+    }
+    m2 = 2 * m;
+    if ((m << 3) == l) {
+        for (k = 0; k < m; k++) {
+            for (j = 0; j < k; j++) {
+                j1 = 2 * j + ip[k];
+                k1 = 2 * k + ip[j];
+                xr = a[j1];
+                xi = a[j1 + 1];
+                yr = a[k1];
+                yi = a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 += 2 * m2;
+                xr = a[j1];
+                xi = a[j1 + 1];
+                yr = a[k1];
+                yi = a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 -= m2;
+                xr = a[j1];
+                xi = a[j1 + 1];
+                yr = a[k1];
+                yi = a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 += 2 * m2;
+                xr = a[j1];
+                xi = a[j1 + 1];
+                yr = a[k1];
+                yi = a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+            }
+            j1 = 2 * k + m2 + ip[k];
+            k1 = j1 + m2;
+            xr = a[j1];
+            xi = a[j1 + 1];
+            yr = a[k1];
+            yi = a[k1 + 1];
+            a[j1] = yr;
+            a[j1 + 1] = yi;
+            a[k1] = xr;
+            a[k1 + 1] = xi;
+        }
+    } else {
+        for (k = 1; k < m; k++) {
+            for (j = 0; j < k; j++) {
+                j1 = 2 * j + ip[k];
+                k1 = 2 * k + ip[j];
+                xr = a[j1];
+                xi = a[j1 + 1];
+                yr = a[k1];
+                yi = a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 += m2;
+                xr = a[j1];
+                xi = a[j1 + 1];
+                yr = a[k1];
+                yi = a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+            }
+        }
+    }
+}
+
+#if 0  // Not used.
+static void bitrv2conj(int n, int *ip, float *a)
+{
+    int j, j1, k, k1, l, m, m2;
+    float xr, xi, yr, yi;
+
+    ip[0] = 0;
+    l = n;
+    m = 1;
+    while ((m << 3) < l) {
+        l >>= 1;
+        for (j = 0; j < m; j++) {
+            ip[m + j] = ip[j] + l;
+        }
+        m <<= 1;
+    }
+    m2 = 2 * m;
+    if ((m << 3) == l) {
+        for (k = 0; k < m; k++) {
+            for (j = 0; j < k; j++) {
+                j1 = 2 * j + ip[k];
+                k1 = 2 * k + ip[j];
+                xr = a[j1];
+                xi = -a[j1 + 1];
+                yr = a[k1];
+                yi = -a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 += 2 * m2;
+                xr = a[j1];
+                xi = -a[j1 + 1];
+                yr = a[k1];
+                yi = -a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 -= m2;
+                xr = a[j1];
+                xi = -a[j1 + 1];
+                yr = a[k1];
+                yi = -a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 += 2 * m2;
+                xr = a[j1];
+                xi = -a[j1 + 1];
+                yr = a[k1];
+                yi = -a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+            }
+            k1 = 2 * k + ip[k];
+            a[k1 + 1] = -a[k1 + 1];
+            j1 = k1 + m2;
+            k1 = j1 + m2;
+            xr = a[j1];
+            xi = -a[j1 + 1];
+            yr = a[k1];
+            yi = -a[k1 + 1];
+            a[j1] = yr;
+            a[j1 + 1] = yi;
+            a[k1] = xr;
+            a[k1 + 1] = xi;
+            k1 += m2;
+            a[k1 + 1] = -a[k1 + 1];
+        }
+    } else {
+        a[1] = -a[1];
+        a[m2 + 1] = -a[m2 + 1];
+        for (k = 1; k < m; k++) {
+            for (j = 0; j < k; j++) {
+                j1 = 2 * j + ip[k];
+                k1 = 2 * k + ip[j];
+                xr = a[j1];
+                xi = -a[j1 + 1];
+                yr = a[k1];
+                yi = -a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+                j1 += m2;
+                k1 += m2;
+                xr = a[j1];
+                xi = -a[j1 + 1];
+                yr = a[k1];
+                yi = -a[k1 + 1];
+                a[j1] = yr;
+                a[j1 + 1] = yi;
+                a[k1] = xr;
+                a[k1 + 1] = xi;
+            }
+            k1 = 2 * k + ip[k];
+            a[k1 + 1] = -a[k1 + 1];
+            a[k1 + m2 + 1] = -a[k1 + m2 + 1];
+        }
+    }
+}
+#endif
+
+static void cftfsub(size_t n, float *a, float *w)
+{
+    size_t j, j1, j2, j3, l;
+    float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+    l = 2;
+    if (n > 8) {
+        cft1st(n, a, w);
+        l = 8;
+        while ((l << 2) < n) {
+            cftmdl(n, l, a, w);
+            l <<= 2;
+        }
+    }
+    if ((l << 2) == n) {
+        for (j = 0; j < l; j += 2) {
+            j1 = j + l;
+            j2 = j1 + l;
+            j3 = j2 + l;
+            x0r = a[j] + a[j1];
+            x0i = a[j + 1] + a[j1 + 1];
+            x1r = a[j] - a[j1];
+            x1i = a[j + 1] - a[j1 + 1];
+            x2r = a[j2] + a[j3];
+            x2i = a[j2 + 1] + a[j3 + 1];
+            x3r = a[j2] - a[j3];
+            x3i = a[j2 + 1] - a[j3 + 1];
+            a[j] = x0r + x2r;
+            a[j + 1] = x0i + x2i;
+            a[j2] = x0r - x2r;
+            a[j2 + 1] = x0i - x2i;
+            a[j1] = x1r - x3i;
+            a[j1 + 1] = x1i + x3r;
+            a[j3] = x1r + x3i;
+            a[j3 + 1] = x1i - x3r;
+        }
+    } else {
+        for (j = 0; j < l; j += 2) {
+            j1 = j + l;
+            x0r = a[j] - a[j1];
+            x0i = a[j + 1] - a[j1 + 1];
+            a[j] += a[j1];
+            a[j + 1] += a[j1 + 1];
+            a[j1] = x0r;
+            a[j1 + 1] = x0i;
+        }
+    }
+}
+
+
+static void cftbsub(size_t n, float *a, float *w)
+{
+    size_t j, j1, j2, j3, l;
+    float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+    l = 2;
+    if (n > 8) {
+        cft1st(n, a, w);
+        l = 8;
+        while ((l << 2) < n) {
+            cftmdl(n, l, a, w);
+            l <<= 2;
+        }
+    }
+    if ((l << 2) == n) {
+        for (j = 0; j < l; j += 2) {
+            j1 = j + l;
+            j2 = j1 + l;
+            j3 = j2 + l;
+            x0r = a[j] + a[j1];
+            x0i = -a[j + 1] - a[j1 + 1];
+            x1r = a[j] - a[j1];
+            x1i = -a[j + 1] + a[j1 + 1];
+            x2r = a[j2] + a[j3];
+            x2i = a[j2 + 1] + a[j3 + 1];
+            x3r = a[j2] - a[j3];
+            x3i = a[j2 + 1] - a[j3 + 1];
+            a[j] = x0r + x2r;
+            a[j + 1] = x0i - x2i;
+            a[j2] = x0r - x2r;
+            a[j2 + 1] = x0i + x2i;
+            a[j1] = x1r - x3i;
+            a[j1 + 1] = x1i - x3r;
+            a[j3] = x1r + x3i;
+            a[j3 + 1] = x1i + x3r;
+        }
+    } else {
+        for (j = 0; j < l; j += 2) {
+            j1 = j + l;
+            x0r = a[j] - a[j1];
+            x0i = -a[j + 1] + a[j1 + 1];
+            a[j] += a[j1];
+            a[j + 1] = -a[j + 1] - a[j1 + 1];
+            a[j1] = x0r;
+            a[j1 + 1] = x0i;
+        }
+    }
+}
+
+
+static void cft1st(size_t n, float *a, float *w)
+{
+    size_t j, k1, k2;
+    float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
+    float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+    x0r = a[0] + a[2];
+    x0i = a[1] + a[3];
+    x1r = a[0] - a[2];
+    x1i = a[1] - a[3];
+    x2r = a[4] + a[6];
+    x2i = a[5] + a[7];
+    x3r = a[4] - a[6];
+    x3i = a[5] - a[7];
+    a[0] = x0r + x2r;
+    a[1] = x0i + x2i;
+    a[4] = x0r - x2r;
+    a[5] = x0i - x2i;
+    a[2] = x1r - x3i;
+    a[3] = x1i + x3r;
+    a[6] = x1r + x3i;
+    a[7] = x1i - x3r;
+    wk1r = w[2];
+    x0r = a[8] + a[10];
+    x0i = a[9] + a[11];
+    x1r = a[8] - a[10];
+    x1i = a[9] - a[11];
+    x2r = a[12] + a[14];
+    x2i = a[13] + a[15];
+    x3r = a[12] - a[14];
+    x3i = a[13] - a[15];
+    a[8] = x0r + x2r;
+    a[9] = x0i + x2i;
+    a[12] = x2i - x0i;
+    a[13] = x0r - x2r;
+    x0r = x1r - x3i;
+    x0i = x1i + x3r;
+    a[10] = wk1r * (x0r - x0i);
+    a[11] = wk1r * (x0r + x0i);
+    x0r = x3i + x1r;
+    x0i = x3r - x1i;
+    a[14] = wk1r * (x0i - x0r);
+    a[15] = wk1r * (x0i + x0r);
+    k1 = 0;
+    for (j = 16; j < n; j += 16) {
+        k1 += 2;
+        k2 = 2 * k1;
+        wk2r = w[k1];
+        wk2i = w[k1 + 1];
+        wk1r = w[k2];
+        wk1i = w[k2 + 1];
+        wk3r = wk1r - 2 * wk2i * wk1i;
+        wk3i = 2 * wk2i * wk1r - wk1i;
+        x0r = a[j] + a[j + 2];
+        x0i = a[j + 1] + a[j + 3];
+        x1r = a[j] - a[j + 2];
+        x1i = a[j + 1] - a[j + 3];
+        x2r = a[j + 4] + a[j + 6];
+        x2i = a[j + 5] + a[j + 7];
+        x3r = a[j + 4] - a[j + 6];
+        x3i = a[j + 5] - a[j + 7];
+        a[j] = x0r + x2r;
+        a[j + 1] = x0i + x2i;
+        x0r -= x2r;
+        x0i -= x2i;
+        a[j + 4] = wk2r * x0r - wk2i * x0i;
+        a[j + 5] = wk2r * x0i + wk2i * x0r;
+        x0r = x1r - x3i;
+        x0i = x1i + x3r;
+        a[j + 2] = wk1r * x0r - wk1i * x0i;
+        a[j + 3] = wk1r * x0i + wk1i * x0r;
+        x0r = x1r + x3i;
+        x0i = x1i - x3r;
+        a[j + 6] = wk3r * x0r - wk3i * x0i;
+        a[j + 7] = wk3r * x0i + wk3i * x0r;
+        wk1r = w[k2 + 2];
+        wk1i = w[k2 + 3];
+        wk3r = wk1r - 2 * wk2r * wk1i;
+        wk3i = 2 * wk2r * wk1r - wk1i;
+        x0r = a[j + 8] + a[j + 10];
+        x0i = a[j + 9] + a[j + 11];
+        x1r = a[j + 8] - a[j + 10];
+        x1i = a[j + 9] - a[j + 11];
+        x2r = a[j + 12] + a[j + 14];
+        x2i = a[j + 13] + a[j + 15];
+        x3r = a[j + 12] - a[j + 14];
+        x3i = a[j + 13] - a[j + 15];
+        a[j + 8] = x0r + x2r;
+        a[j + 9] = x0i + x2i;
+        x0r -= x2r;
+        x0i -= x2i;
+        a[j + 12] = -wk2i * x0r - wk2r * x0i;
+        a[j + 13] = -wk2i * x0i + wk2r * x0r;
+        x0r = x1r - x3i;
+        x0i = x1i + x3r;
+        a[j + 10] = wk1r * x0r - wk1i * x0i;
+        a[j + 11] = wk1r * x0i + wk1i * x0r;
+        x0r = x1r + x3i;
+        x0i = x1i - x3r;
+        a[j + 14] = wk3r * x0r - wk3i * x0i;
+        a[j + 15] = wk3r * x0i + wk3i * x0r;
+    }
+}
+
+
+static void cftmdl(size_t n, size_t l, float *a, float *w)
+{
+    size_t j, j1, j2, j3, k, k1, k2, m, m2;
+    float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
+    float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+    m = l << 2;
+    for (j = 0; j < l; j += 2) {
+        j1 = j + l;
+        j2 = j1 + l;
+        j3 = j2 + l;
+        x0r = a[j] + a[j1];
+        x0i = a[j + 1] + a[j1 + 1];
+        x1r = a[j] - a[j1];
+        x1i = a[j + 1] - a[j1 + 1];
+        x2r = a[j2] + a[j3];
+        x2i = a[j2 + 1] + a[j3 + 1];
+        x3r = a[j2] - a[j3];
+        x3i = a[j2 + 1] - a[j3 + 1];
+        a[j] = x0r + x2r;
+        a[j + 1] = x0i + x2i;
+        a[j2] = x0r - x2r;
+        a[j2 + 1] = x0i - x2i;
+        a[j1] = x1r - x3i;
+        a[j1 + 1] = x1i + x3r;
+        a[j3] = x1r + x3i;
+        a[j3 + 1] = x1i - x3r;
+    }
+    wk1r = w[2];
+    for (j = m; j < l + m; j += 2) {
+        j1 = j + l;
+        j2 = j1 + l;
+        j3 = j2 + l;
+        x0r = a[j] + a[j1];
+        x0i = a[j + 1] + a[j1 + 1];
+        x1r = a[j] - a[j1];
+        x1i = a[j + 1] - a[j1 + 1];
+        x2r = a[j2] + a[j3];
+        x2i = a[j2 + 1] + a[j3 + 1];
+        x3r = a[j2] - a[j3];
+        x3i = a[j2 + 1] - a[j3 + 1];
+        a[j] = x0r + x2r;
+        a[j + 1] = x0i + x2i;
+        a[j2] = x2i - x0i;
+        a[j2 + 1] = x0r - x2r;
+        x0r = x1r - x3i;
+        x0i = x1i + x3r;
+        a[j1] = wk1r * (x0r - x0i);
+        a[j1 + 1] = wk1r * (x0r + x0i);
+        x0r = x3i + x1r;
+        x0i = x3r - x1i;
+        a[j3] = wk1r * (x0i - x0r);
+        a[j3 + 1] = wk1r * (x0i + x0r);
+    }
+    k1 = 0;
+    m2 = 2 * m;
+    for (k = m2; k < n; k += m2) {
+        k1 += 2;
+        k2 = 2 * k1;
+        wk2r = w[k1];
+        wk2i = w[k1 + 1];
+        wk1r = w[k2];
+        wk1i = w[k2 + 1];
+        wk3r = wk1r - 2 * wk2i * wk1i;
+        wk3i = 2 * wk2i * wk1r - wk1i;
+        for (j = k; j < l + k; j += 2) {
+            j1 = j + l;
+            j2 = j1 + l;
+            j3 = j2 + l;
+            x0r = a[j] + a[j1];
+            x0i = a[j + 1] + a[j1 + 1];
+            x1r = a[j] - a[j1];
+            x1i = a[j + 1] - a[j1 + 1];
+            x2r = a[j2] + a[j3];
+            x2i = a[j2 + 1] + a[j3 + 1];
+            x3r = a[j2] - a[j3];
+            x3i = a[j2 + 1] - a[j3 + 1];
+            a[j] = x0r + x2r;
+            a[j + 1] = x0i + x2i;
+            x0r -= x2r;
+            x0i -= x2i;
+            a[j2] = wk2r * x0r - wk2i * x0i;
+            a[j2 + 1] = wk2r * x0i + wk2i * x0r;
+            x0r = x1r - x3i;
+            x0i = x1i + x3r;
+            a[j1] = wk1r * x0r - wk1i * x0i;
+            a[j1 + 1] = wk1r * x0i + wk1i * x0r;
+            x0r = x1r + x3i;
+            x0i = x1i - x3r;
+            a[j3] = wk3r * x0r - wk3i * x0i;
+            a[j3 + 1] = wk3r * x0i + wk3i * x0r;
+        }
+        wk1r = w[k2 + 2];
+        wk1i = w[k2 + 3];
+        wk3r = wk1r - 2 * wk2r * wk1i;
+        wk3i = 2 * wk2r * wk1r - wk1i;
+        for (j = k + m; j < l + (k + m); j += 2) {
+            j1 = j + l;
+            j2 = j1 + l;
+            j3 = j2 + l;
+            x0r = a[j] + a[j1];
+            x0i = a[j + 1] + a[j1 + 1];
+            x1r = a[j] - a[j1];
+            x1i = a[j + 1] - a[j1 + 1];
+            x2r = a[j2] + a[j3];
+            x2i = a[j2 + 1] + a[j3 + 1];
+            x3r = a[j2] - a[j3];
+            x3i = a[j2 + 1] - a[j3 + 1];
+            a[j] = x0r + x2r;
+            a[j + 1] = x0i + x2i;
+            x0r -= x2r;
+            x0i -= x2i;
+            a[j2] = -wk2i * x0r - wk2r * x0i;
+            a[j2 + 1] = -wk2i * x0i + wk2r * x0r;
+            x0r = x1r - x3i;
+            x0i = x1i + x3r;
+            a[j1] = wk1r * x0r - wk1i * x0i;
+            a[j1 + 1] = wk1r * x0i + wk1i * x0r;
+            x0r = x1r + x3i;
+            x0i = x1i - x3r;
+            a[j3] = wk3r * x0r - wk3i * x0i;
+            a[j3 + 1] = wk3r * x0i + wk3i * x0r;
+        }
+    }
+}
+
+
+static void rftfsub(size_t n, float *a, size_t nc, float *c)
+{
+    size_t j, k, kk, ks, m;
+    float wkr, wki, xr, xi, yr, yi;
+
+    m = n >> 1;
+    ks = 2 * nc / m;
+    kk = 0;
+    for (j = 2; j < m; j += 2) {
+        k = n - j;
+        kk += ks;
+        wkr = 0.5f - c[nc - kk];
+        wki = c[kk];
+        xr = a[j] - a[k];
+        xi = a[j + 1] + a[k + 1];
+        yr = wkr * xr - wki * xi;
+        yi = wkr * xi + wki * xr;
+        a[j] -= yr;
+        a[j + 1] -= yi;
+        a[k] += yr;
+        a[k + 1] -= yi;
+    }
+}
+
+
+static void rftbsub(size_t n, float *a, size_t nc, float *c)
+{
+    size_t j, k, kk, ks, m;
+    float wkr, wki, xr, xi, yr, yi;
+
+    a[1] = -a[1];
+    m = n >> 1;
+    ks = 2 * nc / m;
+    kk = 0;
+    for (j = 2; j < m; j += 2) {
+        k = n - j;
+        kk += ks;
+        wkr = 0.5f - c[nc - kk];
+        wki = c[kk];
+        xr = a[j] - a[k];
+        xi = a[j + 1] + a[k + 1];
+        yr = wkr * xr + wki * xi;
+        yi = wkr * xi - wki * xr;
+        a[j] -= yr;
+        a[j + 1] = yi - a[j + 1];
+        a[k] += yr;
+        a[k + 1] = yi - a[k + 1];
+    }
+    a[m + 1] = -a[m + 1];
+}
+
+#if 0  // Not used.
+static void dctsub(int n, float *a, int nc, float *c)
+{
+    int j, k, kk, ks, m;
+    float wkr, wki, xr;
+
+    m = n >> 1;
+    ks = nc / n;
+    kk = 0;
+    for (j = 1; j < m; j++) {
+        k = n - j;
+        kk += ks;
+        wkr = c[kk] - c[nc - kk];
+        wki = c[kk] + c[nc - kk];
+        xr = wki * a[j] - wkr * a[k];
+        a[j] = wkr * a[j] + wki * a[k];
+        a[k] = xr;
+    }
+    a[m] *= c[0];
+}
+
+
+static void dstsub(int n, float *a, int nc, float *c)
+{
+    int j, k, kk, ks, m;
+    float wkr, wki, xr;
+
+    m = n >> 1;
+    ks = nc / n;
+    kk = 0;
+    for (j = 1; j < m; j++) {
+        k = n - j;
+        kk += ks;
+        wkr = c[kk] - c[nc - kk];
+        wki = c[kk] + c[nc - kk];
+        xr = wki * a[k] - wkr * a[j];
+        a[k] = wkr * a[k] + wki * a[j];
+        a[j] = xr;
+    }
+    a[m] *= c[0];
+}
+#endif  // Not used.
diff --git a/common_audio/fft4g.h b/common_audio/fft4g.h
new file mode 100644
index 0000000..1f0e29d
--- /dev/null
+++ b/common_audio/fft4g.h
@@ -0,0 +1,25 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_FFT4G_H_
+#define COMMON_AUDIO_FFT4G_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+// Refer to fft4g.c for documentation.
+void WebRtc_rdft(size_t n, int isgn, float *a, size_t *ip, float *w);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif  // COMMON_AUDIO_FFT4G_H_
diff --git a/common_audio/fir_filter.h b/common_audio/fir_filter.h
new file mode 100644
index 0000000..a76e936
--- /dev/null
+++ b/common_audio/fir_filter.h
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_FIR_FILTER_H_
+#define COMMON_AUDIO_FIR_FILTER_H_
+
+#include <string.h>
+
+namespace webrtc {
+
+// Finite Impulse Response filter using floating-point arithmetic.
+class FIRFilter {
+ public:
+  virtual ~FIRFilter() {}
+
+  // Filters the |in| data supplied.
+  // |out| must be previously allocated and it must be at least of |length|.
+  virtual void Filter(const float* in, size_t length, float* out) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_FIR_FILTER_H_
diff --git a/common_audio/fir_filter_c.cc b/common_audio/fir_filter_c.cc
new file mode 100644
index 0000000..6fe2470
--- /dev/null
+++ b/common_audio/fir_filter_c.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/fir_filter_c.h"
+
+#include <string.h>
+
+#include <memory>
+
+#include "common_audio/fir_filter_neon.h"
+#include "common_audio/fir_filter_sse.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FIRFilterC::~FIRFilterC() {
+}
+
+FIRFilterC::FIRFilterC(const float* coefficients, size_t coefficients_length)
+    : coefficients_length_(coefficients_length),
+      state_length_(coefficients_length - 1),
+      coefficients_(new float[coefficients_length_]),
+      state_(new float[state_length_]) {
+  for (size_t i = 0; i < coefficients_length_; ++i) {
+    coefficients_[i] = coefficients[coefficients_length_ - i - 1];
+  }
+  memset(state_.get(), 0, state_length_ * sizeof(state_[0]));
+}
+
+void FIRFilterC::Filter(const float* in, size_t length, float* out) {
+  RTC_DCHECK_GT(length, 0);
+
+  // Convolves the input signal |in| with the filter kernel |coefficients_|
+  // taking into account the previous state.
+  for (size_t i = 0; i < length; ++i) {
+    out[i] = 0.f;
+    size_t j;
+    for (j = 0; state_length_ > i && j < state_length_ - i; ++j) {
+      out[i] += state_[i + j] * coefficients_[j];
+    }
+    for (; j < coefficients_length_; ++j) {
+      out[i] += in[j + i - state_length_] * coefficients_[j];
+    }
+  }
+
+  // Update current state.
+  if (length >= state_length_) {
+    memcpy(
+        state_.get(), &in[length - state_length_], state_length_ * sizeof(*in));
+  } else {
+    memmove(state_.get(),
+            &state_[length],
+            (state_length_ - length) * sizeof(state_[0]));
+    memcpy(&state_[state_length_ - length], in, length * sizeof(*in));
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/fir_filter_c.h b/common_audio/fir_filter_c.h
new file mode 100644
index 0000000..ffce838
--- /dev/null
+++ b/common_audio/fir_filter_c.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_FIR_FILTER_C_H_
+#define COMMON_AUDIO_FIR_FILTER_C_H_
+
+#include <string.h>
+#include <memory>
+
+#include "common_audio/fir_filter.h"
+
+namespace webrtc {
+
+class FIRFilterC : public FIRFilter {
+ public:
+  FIRFilterC(const float* coefficients,
+             size_t coefficients_length);
+  ~FIRFilterC() override;
+
+  void Filter(const float* in, size_t length, float* out) override;
+
+ private:
+  size_t coefficients_length_;
+  size_t state_length_;
+  std::unique_ptr<float[]> coefficients_;
+  std::unique_ptr<float[]> state_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_FIR_FILTER_C_H_
diff --git a/common_audio/fir_filter_factory.cc b/common_audio/fir_filter_factory.cc
new file mode 100644
index 0000000..c15c2e0
--- /dev/null
+++ b/common_audio/fir_filter_factory.cc
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/fir_filter_factory.h"
+
+#include "common_audio/fir_filter_c.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include "common_audio/fir_filter_neon.h"
+#elif defined(WEBRTC_ARCH_X86_FAMILY)
+#include "common_audio/fir_filter_sse.h"
+#endif
+
+namespace webrtc {
+
+FIRFilter* CreateFirFilter(const float* coefficients,
+                           size_t coefficients_length,
+                           size_t max_input_length) {
+  if (!coefficients || coefficients_length <= 0 || max_input_length <= 0) {
+    RTC_NOTREACHED();
+    return nullptr;
+  }
+
+  FIRFilter* filter = nullptr;
+// If we know the minimum architecture at compile time, avoid CPU detection.
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#if defined(__SSE2__)
+  filter =
+      new FIRFilterSSE2(coefficients, coefficients_length, max_input_length);
+#else
+  // x86 CPU detection required.
+  if (WebRtc_GetCPUInfo(kSSE2)) {
+    filter =
+        new FIRFilterSSE2(coefficients, coefficients_length, max_input_length);
+  } else {
+    filter = new FIRFilterC(coefficients, coefficients_length);
+  }
+#endif
+#elif defined(WEBRTC_HAS_NEON)
+  filter =
+      new FIRFilterNEON(coefficients, coefficients_length, max_input_length);
+#else
+  filter = new FIRFilterC(coefficients, coefficients_length);
+#endif
+
+  return filter;
+}
+
+}  // namespace webrtc
diff --git a/common_audio/fir_filter_factory.h b/common_audio/fir_filter_factory.h
new file mode 100644
index 0000000..2e7ca9b
--- /dev/null
+++ b/common_audio/fir_filter_factory.h
@@ -0,0 +1,32 @@
+  /*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_FIR_FILTER_FACTORY_H_
+#define COMMON_AUDIO_FIR_FILTER_FACTORY_H_
+
+#include <string.h>
+
+namespace webrtc {
+
+class FIRFilter;
+
+// Creates a filter with the given coefficients. All initial state values will
+// be zeros.
+// The length of the chunks fed to the filter should never be greater than
+// |max_input_length|. This is needed because, when vectorizing it is
+// necessary to concatenate the input after the state, and resizing this array
+// dynamically is expensive.
+FIRFilter* CreateFirFilter(const float* coefficients,
+                           size_t coefficients_length,
+                           size_t max_input_length);
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_FIR_FILTER_FACTORY_H_
diff --git a/common_audio/fir_filter_neon.cc b/common_audio/fir_filter_neon.cc
new file mode 100644
index 0000000..d9f91b7
--- /dev/null
+++ b/common_audio/fir_filter_neon.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/fir_filter_neon.h"
+
+#include <arm_neon.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/aligned_malloc.h"
+
+namespace webrtc {
+
+FIRFilterNEON::~FIRFilterNEON() {
+}
+
+FIRFilterNEON::FIRFilterNEON(const float* coefficients,
+                             size_t coefficients_length,
+                             size_t max_input_length)
+    :  // Closest higher multiple of four.
+      coefficients_length_((coefficients_length + 3) & ~0x03),
+      state_length_(coefficients_length_ - 1),
+      coefficients_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * coefficients_length_, 16))),
+      state_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * (max_input_length + state_length_),
+                        16))) {
+  // Add zeros at the end of the coefficients.
+  size_t padding = coefficients_length_ - coefficients_length;
+  memset(coefficients_.get(), 0.f, padding * sizeof(coefficients_[0]));
+  // The coefficients are reversed to compensate for the order in which the
+  // input samples are acquired (most recent last).
+  for (size_t i = 0; i < coefficients_length; ++i) {
+    coefficients_[i + padding] = coefficients[coefficients_length - i - 1];
+  }
+  memset(state_.get(),
+         0.f,
+         (max_input_length + state_length_) * sizeof(state_[0]));
+}
+
+void FIRFilterNEON::Filter(const float* in, size_t length, float* out) {
+  RTC_DCHECK_GT(length, 0);
+
+  memcpy(&state_[state_length_], in, length * sizeof(*in));
+
+  // Convolves the input signal |in| with the filter kernel |coefficients_|
+  // taking into account the previous state.
+  for (size_t i = 0; i < length; ++i) {
+    float* in_ptr = &state_[i];
+    float* coef_ptr = coefficients_.get();
+
+    float32x4_t m_sum = vmovq_n_f32(0);
+    float32x4_t m_in;
+
+    for (size_t j = 0; j < coefficients_length_; j += 4) {
+       m_in = vld1q_f32(in_ptr + j);
+       m_sum = vmlaq_f32(m_sum, m_in, vld1q_f32(coef_ptr + j));
+    }
+
+    float32x2_t m_half = vadd_f32(vget_high_f32(m_sum), vget_low_f32(m_sum));
+    out[i] = vget_lane_f32(vpadd_f32(m_half, m_half), 0);
+  }
+
+  // Update current state.
+  memmove(state_.get(), &state_[length], state_length_ * sizeof(state_[0]));
+}
+
+}  // namespace webrtc
diff --git a/common_audio/fir_filter_neon.h b/common_audio/fir_filter_neon.h
new file mode 100644
index 0000000..5696df8
--- /dev/null
+++ b/common_audio/fir_filter_neon.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_FIR_FILTER_NEON_H_
+#define COMMON_AUDIO_FIR_FILTER_NEON_H_
+
+#include <memory>
+
+#include "common_audio/fir_filter.h"
+#include "system_wrappers/include/aligned_malloc.h"
+
+namespace webrtc {
+
+class FIRFilterNEON : public FIRFilter {
+ public:
+  FIRFilterNEON(const float* coefficients,
+                size_t coefficients_length,
+                size_t max_input_length);
+  ~FIRFilterNEON() override;
+
+  void Filter(const float* in, size_t length, float* out) override;
+
+ private:
+  size_t coefficients_length_;
+  size_t state_length_;
+  std::unique_ptr<float[], AlignedFreeDeleter> coefficients_;
+  std::unique_ptr<float[], AlignedFreeDeleter> state_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_FIR_FILTER_NEON_H_
diff --git a/common_audio/fir_filter_sse.cc b/common_audio/fir_filter_sse.cc
new file mode 100644
index 0000000..3302d56
--- /dev/null
+++ b/common_audio/fir_filter_sse.cc
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/fir_filter_sse.h"
+
+#include <stdint.h>
+#include <string.h>
+#include <xmmintrin.h>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/aligned_malloc.h"
+
+namespace webrtc {
+
+FIRFilterSSE2::~FIRFilterSSE2() {
+}
+
+FIRFilterSSE2::FIRFilterSSE2(const float* coefficients,
+                             size_t coefficients_length,
+                             size_t max_input_length)
+    :  // Closest higher multiple of four.
+      coefficients_length_((coefficients_length + 3) & ~0x03),
+      state_length_(coefficients_length_ - 1),
+      coefficients_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * coefficients_length_, 16))),
+      state_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * (max_input_length + state_length_),
+                        16))) {
+  // Add zeros at the end of the coefficients.
+  size_t padding = coefficients_length_ - coefficients_length;
+  memset(coefficients_.get(), 0, padding * sizeof(coefficients_[0]));
+  // The coefficients are reversed to compensate for the order in which the
+  // input samples are acquired (most recent last).
+  for (size_t i = 0; i < coefficients_length; ++i) {
+    coefficients_[i + padding] = coefficients[coefficients_length - i - 1];
+  }
+  memset(state_.get(),
+         0,
+         (max_input_length + state_length_) * sizeof(state_[0]));
+}
+
+void FIRFilterSSE2::Filter(const float* in, size_t length, float* out) {
+  RTC_DCHECK_GT(length, 0);
+
+  memcpy(&state_[state_length_], in, length * sizeof(*in));
+
+  // Convolves the input signal |in| with the filter kernel |coefficients_|
+  // taking into account the previous state.
+  for (size_t i = 0; i < length; ++i) {
+    float* in_ptr = &state_[i];
+    float* coef_ptr = coefficients_.get();
+
+    __m128 m_sum = _mm_setzero_ps();
+    __m128 m_in;
+
+    // Depending on if the pointer is aligned with 16 bytes or not it is loaded
+    // differently.
+    if (reinterpret_cast<uintptr_t>(in_ptr) & 0x0F) {
+      for (size_t j = 0; j < coefficients_length_; j += 4) {
+        m_in = _mm_loadu_ps(in_ptr + j);
+        m_sum = _mm_add_ps(m_sum, _mm_mul_ps(m_in, _mm_load_ps(coef_ptr + j)));
+      }
+    } else {
+      for (size_t j = 0; j < coefficients_length_; j += 4) {
+        m_in = _mm_load_ps(in_ptr + j);
+        m_sum = _mm_add_ps(m_sum, _mm_mul_ps(m_in, _mm_load_ps(coef_ptr + j)));
+      }
+    }
+    m_sum = _mm_add_ps(_mm_movehl_ps(m_sum, m_sum), m_sum);
+    _mm_store_ss(out + i, _mm_add_ss(m_sum, _mm_shuffle_ps(m_sum, m_sum, 1)));
+  }
+
+  // Update current state.
+  memmove(state_.get(), &state_[length], state_length_ * sizeof(state_[0]));
+}
+
+}  // namespace webrtc
diff --git a/common_audio/fir_filter_sse.h b/common_audio/fir_filter_sse.h
new file mode 100644
index 0000000..6506024
--- /dev/null
+++ b/common_audio/fir_filter_sse.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_FIR_FILTER_SSE_H_
+#define COMMON_AUDIO_FIR_FILTER_SSE_H_
+
+#include <memory>
+
+#include "common_audio/fir_filter.h"
+#include "system_wrappers/include/aligned_malloc.h"
+
+namespace webrtc {
+
+class FIRFilterSSE2 : public FIRFilter {
+ public:
+  FIRFilterSSE2(const float* coefficients,
+                size_t coefficients_length,
+                size_t max_input_length);
+  ~FIRFilterSSE2() override;
+
+  void Filter(const float* in, size_t length, float* out) override;
+
+ private:
+  size_t coefficients_length_;
+  size_t state_length_;
+  std::unique_ptr<float[], AlignedFreeDeleter> coefficients_;
+  std::unique_ptr<float[], AlignedFreeDeleter> state_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_FIR_FILTER_SSE_H_
diff --git a/common_audio/fir_filter_unittest.cc b/common_audio/fir_filter_unittest.cc
new file mode 100644
index 0000000..4696621
--- /dev/null
+++ b/common_audio/fir_filter_unittest.cc
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/fir_filter.h"
+#include "common_audio/fir_filter_factory.h"
+
+#include <string.h>
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+static const float kCoefficients[] = {0.2f, 0.3f, 0.5f, 0.7f, 0.11f};
+static const size_t kCoefficientsLength = sizeof(kCoefficients) /
+                                       sizeof(kCoefficients[0]);
+
+static const float kInput[] = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f,
+                                      8.f, 9.f, 10.f};
+static const size_t kInputLength = sizeof(kInput) /
+                                      sizeof(kInput[0]);
+
+void VerifyOutput(const float* expected_output,
+                  const float* output,
+                  size_t length) {
+  EXPECT_EQ(0, memcmp(expected_output,
+                      output,
+                      length * sizeof(expected_output[0])));
+}
+
+}  // namespace
+
+TEST(FIRFilterTest, FilterAsIdentity) {
+  const float kCoefficients[] = {1.f, 0.f, 0.f, 0.f, 0.f};
+  float output[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, kInputLength));
+  filter->Filter(kInput, kInputLength, output);
+
+  VerifyOutput(kInput, output, kInputLength);
+}
+
+TEST(FIRFilterTest, FilterUsedAsScalarMultiplication) {
+  const float kCoefficients[] = {5.f, 0.f, 0.f, 0.f, 0.f};
+  float output[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, kInputLength));
+  filter->Filter(kInput, kInputLength, output);
+
+  EXPECT_FLOAT_EQ(5.f, output[0]);
+  EXPECT_FLOAT_EQ(20.f, output[3]);
+  EXPECT_FLOAT_EQ(25.f, output[4]);
+  EXPECT_FLOAT_EQ(50.f, output[kInputLength - 1]);
+}
+
+TEST(FIRFilterTest, FilterUsedAsInputShifting) {
+  const float kCoefficients[] = {0.f, 0.f, 0.f, 0.f, 1.f};
+  float output[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, kInputLength));
+  filter->Filter(kInput, kInputLength, output);
+
+  EXPECT_FLOAT_EQ(0.f, output[0]);
+  EXPECT_FLOAT_EQ(0.f, output[3]);
+  EXPECT_FLOAT_EQ(1.f, output[4]);
+  EXPECT_FLOAT_EQ(2.f, output[5]);
+  EXPECT_FLOAT_EQ(6.f, output[kInputLength - 1]);
+}
+
+TEST(FIRFilterTest, FilterUsedAsArbitraryWeighting) {
+  float output[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, kInputLength));
+  filter->Filter(kInput, kInputLength, output);
+
+  EXPECT_FLOAT_EQ(0.2f, output[0]);
+  EXPECT_FLOAT_EQ(3.4f, output[3]);
+  EXPECT_FLOAT_EQ(5.21f, output[4]);
+  EXPECT_FLOAT_EQ(7.02f, output[5]);
+  EXPECT_FLOAT_EQ(14.26f, output[kInputLength - 1]);
+}
+
+TEST(FIRFilterTest, FilterInLengthLesserOrEqualToCoefficientsLength) {
+  float output[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, 2));
+  filter->Filter(kInput, 2, output);
+
+  EXPECT_FLOAT_EQ(0.2f, output[0]);
+  EXPECT_FLOAT_EQ(0.7f, output[1]);
+  filter.reset(CreateFirFilter(
+      kCoefficients, kCoefficientsLength, kCoefficientsLength));
+  filter->Filter(kInput, kCoefficientsLength, output);
+
+  EXPECT_FLOAT_EQ(0.2f, output[0]);
+  EXPECT_FLOAT_EQ(3.4f, output[3]);
+  EXPECT_FLOAT_EQ(5.21f, output[4]);
+}
+
+TEST(FIRFilterTest, MultipleFilterCalls) {
+  float output[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, 3));
+  filter->Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(0.2f, output[0]);
+  EXPECT_FLOAT_EQ(0.7f, output[1]);
+
+  filter->Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(1.3f, output[0]);
+  EXPECT_FLOAT_EQ(2.4f, output[1]);
+
+  filter->Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(2.81f, output[0]);
+  EXPECT_FLOAT_EQ(2.62f, output[1]);
+
+  filter->Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(2.81f, output[0]);
+  EXPECT_FLOAT_EQ(2.62f, output[1]);
+
+  filter->Filter(&kInput[3], 3, output);
+  EXPECT_FLOAT_EQ(3.41f, output[0]);
+  EXPECT_FLOAT_EQ(4.12f, output[1]);
+  EXPECT_FLOAT_EQ(6.21f, output[2]);
+
+  filter->Filter(&kInput[3], 3, output);
+  EXPECT_FLOAT_EQ(8.12f, output[0]);
+  EXPECT_FLOAT_EQ(9.14f, output[1]);
+  EXPECT_FLOAT_EQ(9.45f, output[2]);
+}
+
+TEST(FIRFilterTest, VerifySampleBasedVsBlockBasedFiltering) {
+  float output_block_based[kInputLength];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoefficients, kCoefficientsLength, kInputLength));
+  filter->Filter(kInput, kInputLength, output_block_based);
+
+  float output_sample_based[kInputLength];
+  filter.reset(CreateFirFilter(kCoefficients, kCoefficientsLength, 1));
+  for (size_t i = 0; i < kInputLength; ++i) {
+    filter->Filter(&kInput[i], 1, &output_sample_based[i]);
+  }
+
+  EXPECT_EQ(0, memcmp(output_sample_based,
+                      output_block_based,
+                      kInputLength));
+}
+
+TEST(FIRFilterTest, SimplestHighPassFilter) {
+  const float kCoefficients[] = {1.f, -1.f};
+  const size_t kCoefficientsLength = sizeof(kCoefficients) /
+                                  sizeof(kCoefficients[0]);
+
+  float kConstantInput[] = {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f};
+  const size_t kConstantInputLength = sizeof(kConstantInput) /
+      sizeof(kConstantInput[0]);
+
+  float output[kConstantInputLength];
+  std::unique_ptr<FIRFilter> filter(CreateFirFilter(
+      kCoefficients, kCoefficientsLength, kConstantInputLength));
+  filter->Filter(kConstantInput, kConstantInputLength, output);
+  EXPECT_FLOAT_EQ(1.f, output[0]);
+  for (size_t i = kCoefficientsLength - 1; i < kConstantInputLength; ++i) {
+    EXPECT_FLOAT_EQ(0.f, output[i]);
+  }
+}
+
+TEST(FIRFilterTest, SimplestLowPassFilter) {
+  const float kCoefficients[] = {1.f, 1.f};
+  const size_t kCoefficientsLength = sizeof(kCoefficients) /
+                                  sizeof(kCoefficients[0]);
+
+  float kHighFrequencyInput[] = {-1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f};
+  const size_t kHighFrequencyInputLength = sizeof(kHighFrequencyInput) /
+                                        sizeof(kHighFrequencyInput[0]);
+
+  float output[kHighFrequencyInputLength];
+  std::unique_ptr<FIRFilter> filter(CreateFirFilter(
+      kCoefficients, kCoefficientsLength, kHighFrequencyInputLength));
+  filter->Filter(kHighFrequencyInput, kHighFrequencyInputLength, output);
+  EXPECT_FLOAT_EQ(-1.f, output[0]);
+  for (size_t i = kCoefficientsLength - 1; i < kHighFrequencyInputLength; ++i) {
+    EXPECT_FLOAT_EQ(0.f, output[i]);
+  }
+}
+
+TEST(FIRFilterTest, SameOutputWhenSwapedCoefficientsAndInput) {
+  float output[kCoefficientsLength];
+  float output_swaped[kCoefficientsLength];
+  std::unique_ptr<FIRFilter> filter(CreateFirFilter(
+      kCoefficients, kCoefficientsLength, kCoefficientsLength));
+  // Use kCoefficientsLength for in_length to get same-length outputs.
+  filter->Filter(kInput, kCoefficientsLength, output);
+
+  filter.reset(CreateFirFilter(
+      kInput, kCoefficientsLength, kCoefficientsLength));
+  filter->Filter(kCoefficients, kCoefficientsLength, output_swaped);
+
+  for (size_t i = 0 ; i < kCoefficientsLength; ++i) {
+    EXPECT_FLOAT_EQ(output[i], output_swaped[i]);
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/include/audio_util.h b/common_audio/include/audio_util.h
new file mode 100644
index 0000000..e4ea9a1
--- /dev/null
+++ b/common_audio/include/audio_util.h
@@ -0,0 +1,214 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
+#define COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <limits>
+
+#include "rtc_base/checks.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+typedef std::numeric_limits<int16_t> limits_int16;
+
+// The conversion functions use the following naming convention:
+// S16:      int16_t [-32768, 32767]
+// Float:    float   [-1.0, 1.0]
+// FloatS16: float   [-32768.0, 32767.0]
+// Dbfs: float [-20.0*log(10, 32768), 0] = [-90.3, 0]
+// The ratio conversion functions use this naming convention:
+// Ratio: float (0, +inf)
+// Db: float (-inf, +inf)
+static inline int16_t FloatToS16(float v) {
+  if (v > 0)
+    return v >= 1 ? limits_int16::max()
+                  : static_cast<int16_t>(v * limits_int16::max() + 0.5f);
+  return v <= -1 ? limits_int16::min()
+                 : static_cast<int16_t>(-v * limits_int16::min() - 0.5f);
+}
+
+static inline float S16ToFloat(int16_t v) {
+  static const float kMaxInt16Inverse = 1.f / limits_int16::max();
+  static const float kMinInt16Inverse = 1.f / limits_int16::min();
+  return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse);
+}
+
+static inline int16_t FloatS16ToS16(float v) {
+  static const float kMaxRound = limits_int16::max() - 0.5f;
+  static const float kMinRound = limits_int16::min() + 0.5f;
+  if (v > 0)
+    return v >= kMaxRound ? limits_int16::max()
+                          : static_cast<int16_t>(v + 0.5f);
+  return v <= kMinRound ? limits_int16::min() : static_cast<int16_t>(v - 0.5f);
+}
+
+static inline float FloatToFloatS16(float v) {
+  return v * (v > 0 ? limits_int16::max() : -limits_int16::min());
+}
+
+static inline float FloatS16ToFloat(float v) {
+  static const float kMaxInt16Inverse = 1.f / limits_int16::max();
+  static const float kMinInt16Inverse = 1.f / limits_int16::min();
+  return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse);
+}
+
+void FloatToS16(const float* src, size_t size, int16_t* dest);
+void S16ToFloat(const int16_t* src, size_t size, float* dest);
+void FloatS16ToS16(const float* src, size_t size, int16_t* dest);
+void FloatToFloatS16(const float* src, size_t size, float* dest);
+void FloatS16ToFloat(const float* src, size_t size, float* dest);
+
+inline float DbToRatio(float v) {
+  return std::pow(10.0f, v / 20.0f);
+}
+
+inline float DbfsToFloatS16(float v) {
+  static constexpr float kMaximumAbsFloatS16 = -limits_int16::min();
+  return DbToRatio(v) * kMaximumAbsFloatS16;
+}
+
+inline float FloatS16ToDbfs(float v) {
+  RTC_DCHECK_GE(v, 0);
+
+  // kMinDbfs is equal to -20.0 * log10(-limits_int16::min())
+  static constexpr float kMinDbfs = -90.30899869919436f;
+  if (v <= 1.0f) {
+    return kMinDbfs;
+  }
+  // Equal to 20 * log10(v / (-limits_int16::min()))
+  return 20.0f * std::log10(v) + kMinDbfs;
+}
+
+// Copy audio from |src| channels to |dest| channels unless |src| and |dest|
+// point to the same address. |src| and |dest| must have the same number of
+// channels, and there must be sufficient space allocated in |dest|.
+template <typename T>
+void CopyAudioIfNeeded(const T* const* src,
+                       int num_frames,
+                       int num_channels,
+                       T* const* dest) {
+  for (int i = 0; i < num_channels; ++i) {
+    if (src[i] != dest[i]) {
+      std::copy(src[i], src[i] + num_frames, dest[i]);
+    }
+  }
+}
+
+// Deinterleave audio from |interleaved| to the channel buffers pointed to
+// by |deinterleaved|. There must be sufficient space allocated in the
+// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel|
+// per buffer).
+template <typename T>
+void Deinterleave(const T* interleaved,
+                  size_t samples_per_channel,
+                  size_t num_channels,
+                  T* const* deinterleaved) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    T* channel = deinterleaved[i];
+    size_t interleaved_idx = i;
+    for (size_t j = 0; j < samples_per_channel; ++j) {
+      channel[j] = interleaved[interleaved_idx];
+      interleaved_idx += num_channels;
+    }
+  }
+}
+
+// Interleave audio from the channel buffers pointed to by |deinterleaved| to
+// |interleaved|. There must be sufficient space allocated in |interleaved|
+// (|samples_per_channel| * |num_channels|).
+template <typename T>
+void Interleave(const T* const* deinterleaved,
+                size_t samples_per_channel,
+                size_t num_channels,
+                T* interleaved) {
+  for (size_t i = 0; i < num_channels; ++i) {
+    const T* channel = deinterleaved[i];
+    size_t interleaved_idx = i;
+    for (size_t j = 0; j < samples_per_channel; ++j) {
+      interleaved[interleaved_idx] = channel[j];
+      interleaved_idx += num_channels;
+    }
+  }
+}
+
+// Copies audio from a single channel buffer pointed to by |mono| to each
+// channel of |interleaved|. There must be sufficient space allocated in
+// |interleaved| (|samples_per_channel| * |num_channels|).
+template <typename T>
+void UpmixMonoToInterleaved(const T* mono,
+                            int num_frames,
+                            int num_channels,
+                            T* interleaved) {
+  int interleaved_idx = 0;
+  for (int i = 0; i < num_frames; ++i) {
+    for (int j = 0; j < num_channels; ++j) {
+      interleaved[interleaved_idx++] = mono[i];
+    }
+  }
+}
+
+template <typename T, typename Intermediate>
+void DownmixToMono(const T* const* input_channels,
+                   size_t num_frames,
+                   int num_channels,
+                   T* out) {
+  for (size_t i = 0; i < num_frames; ++i) {
+    Intermediate value = input_channels[0][i];
+    for (int j = 1; j < num_channels; ++j) {
+      value += input_channels[j][i];
+    }
+    out[i] = value / num_channels;
+  }
+}
+
+// Downmixes an interleaved multichannel signal to a single channel by averaging
+// all channels.
+template <typename T, typename Intermediate>
+void DownmixInterleavedToMonoImpl(const T* interleaved,
+                                  size_t num_frames,
+                                  int num_channels,
+                                  T* deinterleaved) {
+  RTC_DCHECK_GT(num_channels, 0);
+  RTC_DCHECK_GT(num_frames, 0);
+
+  const T* const end = interleaved + num_frames * num_channels;
+
+  while (interleaved < end) {
+    const T* const frame_end = interleaved + num_channels;
+
+    Intermediate value = *interleaved++;
+    while (interleaved < frame_end) {
+      value += *interleaved++;
+    }
+
+    *deinterleaved++ = value / num_channels;
+  }
+}
+
+template <typename T>
+void DownmixInterleavedToMono(const T* interleaved,
+                              size_t num_frames,
+                              int num_channels,
+                              T* deinterleaved);
+
+template <>
+void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
+                                       size_t num_frames,
+                                       int num_channels,
+                                       int16_t* deinterleaved);
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
diff --git a/common_audio/lapped_transform.cc b/common_audio/lapped_transform.cc
new file mode 100644
index 0000000..517709f
--- /dev/null
+++ b/common_audio/lapped_transform.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/lapped_transform.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <cstring>
+
+#include "common_audio/real_fourier.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void LappedTransform::BlockThunk::ProcessBlock(const float* const* input,
+                                               size_t num_frames,
+                                               size_t num_input_channels,
+                                               size_t num_output_channels,
+                                               float* const* output) {
+  RTC_CHECK_EQ(num_input_channels, parent_->num_in_channels_);
+  RTC_CHECK_EQ(num_output_channels, parent_->num_out_channels_);
+  RTC_CHECK_EQ(parent_->block_length_, num_frames);
+
+  for (size_t i = 0; i < num_input_channels; ++i) {
+    memcpy(parent_->real_buf_.Row(i), input[i],
+           num_frames * sizeof(*input[0]));
+    parent_->fft_->Forward(parent_->real_buf_.Row(i),
+                           parent_->cplx_pre_.Row(i));
+  }
+
+  size_t block_length = RealFourier::ComplexLength(
+      RealFourier::FftOrder(num_frames));
+  RTC_CHECK_EQ(parent_->cplx_length_, block_length);
+  parent_->block_processor_->ProcessAudioBlock(parent_->cplx_pre_.Array(),
+                                               num_input_channels,
+                                               parent_->cplx_length_,
+                                               num_output_channels,
+                                               parent_->cplx_post_.Array());
+
+  for (size_t i = 0; i < num_output_channels; ++i) {
+    parent_->fft_->Inverse(parent_->cplx_post_.Row(i),
+                           parent_->real_buf_.Row(i));
+    memcpy(output[i], parent_->real_buf_.Row(i),
+           num_frames * sizeof(*input[0]));
+  }
+}
+
+LappedTransform::LappedTransform(size_t num_in_channels,
+                                 size_t num_out_channels,
+                                 size_t chunk_length,
+                                 const float* window,
+                                 size_t block_length,
+                                 size_t shift_amount,
+                                 Callback* callback)
+    : blocker_callback_(this),
+      num_in_channels_(num_in_channels),
+      num_out_channels_(num_out_channels),
+      block_length_(block_length),
+      chunk_length_(chunk_length),
+      block_processor_(callback),
+      blocker_(chunk_length_,
+               block_length_,
+               num_in_channels_,
+               num_out_channels_,
+               window,
+               shift_amount,
+               &blocker_callback_),
+      fft_(RealFourier::Create(RealFourier::FftOrder(block_length_))),
+      cplx_length_(RealFourier::ComplexLength(fft_->order())),
+      real_buf_(num_in_channels,
+                block_length_,
+                RealFourier::kFftBufferAlignment),
+      cplx_pre_(num_in_channels,
+                cplx_length_,
+                RealFourier::kFftBufferAlignment),
+      cplx_post_(num_out_channels,
+                 cplx_length_,
+                 RealFourier::kFftBufferAlignment) {
+  RTC_CHECK(num_in_channels_ > 0);
+  RTC_CHECK_GT(block_length_, 0);
+  RTC_CHECK_GT(chunk_length_, 0);
+  RTC_CHECK(block_processor_);
+
+  // block_length_ power of 2?
+  RTC_CHECK_EQ(0, block_length_ & (block_length_ - 1));
+}
+
+LappedTransform::~LappedTransform() = default;
+
+void LappedTransform::ProcessChunk(const float* const* in_chunk,
+                                   float* const* out_chunk) {
+  blocker_.ProcessChunk(in_chunk, chunk_length_, num_in_channels_,
+                        num_out_channels_, out_chunk);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/lapped_transform.h b/common_audio/lapped_transform.h
new file mode 100644
index 0000000..fe3a8cd
--- /dev/null
+++ b/common_audio/lapped_transform.h
@@ -0,0 +1,131 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_LAPPED_TRANSFORM_H_
+#define COMMON_AUDIO_LAPPED_TRANSFORM_H_
+
+#include <complex>
+#include <memory>
+
+#include "common_audio/blocker.h"
+#include "common_audio/real_fourier.h"
+#include "system_wrappers/include/aligned_array.h"
+
+namespace webrtc {
+
+// Helper class for audio processing modules which operate on frequency domain
+// input derived from the windowed time domain audio stream.
+//
+// The input audio chunk is sliced into possibly overlapping blocks, multiplied
+// by a window and transformed with an FFT implementation. The transformed data
+// is supplied to the given callback for processing. The processed output is
+// then inverse transformed into the time domain and spliced back into a chunk
+// which constitutes the final output of this processing module.
+class LappedTransform {
+ public:
+  class Callback {
+   public:
+    virtual ~Callback() {}
+
+    virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
+                                   size_t num_in_channels, size_t frames,
+                                   size_t num_out_channels,
+                                   std::complex<float>* const* out_block) = 0;
+  };
+
+  // Construct a transform instance. |chunk_length| is the number of samples in
+  // each channel. |window| defines the window, owned by the caller (a copy is
+  // made internally); |window| should have length equal to |block_length|.
+  // |block_length| defines the length of a block, in samples.
+  // |shift_amount| is in samples. |callback| is the caller-owned audio
+  // processing function called for each block of the input chunk.
+  LappedTransform(size_t num_in_channels,
+                  size_t num_out_channels,
+                  size_t chunk_length,
+                  const float* window,
+                  size_t block_length,
+                  size_t shift_amount,
+                  Callback* callback);
+  ~LappedTransform();
+
+  // Main audio processing helper method. Internally slices |in_chunk| into
+  // blocks, transforms them to frequency domain, calls the callback for each
+  // block and returns a de-blocked time domain chunk of audio through
+  // |out_chunk|. Both buffers are caller-owned.
+  void ProcessChunk(const float* const* in_chunk, float* const* out_chunk);
+
+  // Get the chunk length.
+  //
+  // The chunk length is the number of samples per channel that must be passed
+  // to ProcessChunk via the parameter in_chunk.
+  //
+  // Returns the same chunk_length passed to the LappedTransform constructor.
+  size_t chunk_length() const { return chunk_length_; }
+
+  // Get the number of input channels.
+  //
+  // This is the number of arrays that must be passed to ProcessChunk via
+  // in_chunk.
+  //
+  // Returns the same num_in_channels passed to the LappedTransform constructor.
+  size_t num_in_channels() const { return num_in_channels_; }
+
+  // Get the number of output channels.
+  //
+  // This is the number of arrays that must be passed to ProcessChunk via
+  // out_chunk.
+  //
+  // Returns the same num_out_channels passed to the LappedTransform
+  // constructor.
+  size_t num_out_channels() const { return num_out_channels_; }
+
+  // Returns the initial delay.
+  //
+  // This is the delay introduced by the |blocker_| to be able to get and return
+  // chunks of |chunk_length|, but process blocks of |block_length|.
+  size_t initial_delay() const { return blocker_.initial_delay(); }
+
+ private:
+  // Internal middleware callback, given to the blocker. Transforms each block
+  // and hands it over to the processing method given at construction time.
+  class BlockThunk : public BlockerCallback {
+   public:
+    explicit BlockThunk(LappedTransform* parent) : parent_(parent) {}
+
+    void ProcessBlock(const float* const* input,
+                      size_t num_frames,
+                      size_t num_input_channels,
+                      size_t num_output_channels,
+                      float* const* output) override;
+
+   private:
+    LappedTransform* const parent_;
+  } blocker_callback_;
+
+  const size_t num_in_channels_;
+  const size_t num_out_channels_;
+
+  const size_t block_length_;
+  const size_t chunk_length_;
+
+  Callback* const block_processor_;
+  Blocker blocker_;
+
+  std::unique_ptr<RealFourier> fft_;
+  const size_t cplx_length_;
+  AlignedArray<float> real_buf_;
+  AlignedArray<std::complex<float> > cplx_pre_;
+  AlignedArray<std::complex<float> > cplx_post_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_LAPPED_TRANSFORM_H_
+
diff --git a/common_audio/lapped_transform_unittest.cc b/common_audio/lapped_transform_unittest.cc
new file mode 100644
index 0000000..d6a312d
--- /dev/null
+++ b/common_audio/lapped_transform_unittest.cc
@@ -0,0 +1,208 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/lapped_transform.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+
+#include "test/gtest.h"
+
+using std::complex;
+
+namespace {
+
+class NoopCallback : public webrtc::LappedTransform::Callback {
+ public:
+  NoopCallback() : block_num_(0) {}
+
+  virtual void ProcessAudioBlock(const complex<float>* const* in_block,
+                                 size_t in_channels,
+                                 size_t frames,
+                                 size_t out_channels,
+                                 complex<float>* const* out_block) {
+    RTC_CHECK_EQ(in_channels, out_channels);
+    for (size_t i = 0; i < out_channels; ++i) {
+      memcpy(out_block[i], in_block[i], sizeof(**in_block) * frames);
+    }
+    ++block_num_;
+  }
+
+  size_t block_num() {
+    return block_num_;
+  }
+
+ private:
+  size_t block_num_;
+};
+
+class FftCheckerCallback : public webrtc::LappedTransform::Callback {
+ public:
+  FftCheckerCallback() : block_num_(0) {}
+
+  virtual void ProcessAudioBlock(const complex<float>* const* in_block,
+                                 size_t in_channels,
+                                 size_t frames,
+                                 size_t out_channels,
+                                 complex<float>* const* out_block) {
+    RTC_CHECK_EQ(in_channels, out_channels);
+
+    size_t full_length = (frames - 1) * 2;
+    ++block_num_;
+
+    if (block_num_ > 0) {
+      ASSERT_NEAR(in_block[0][0].real(), static_cast<float>(full_length),
+                  1e-5f);
+      ASSERT_NEAR(in_block[0][0].imag(), 0.0f, 1e-5f);
+      for (size_t i = 1; i < frames; ++i) {
+        ASSERT_NEAR(in_block[0][i].real(), 0.0f, 1e-5f);
+        ASSERT_NEAR(in_block[0][i].imag(), 0.0f, 1e-5f);
+      }
+    }
+  }
+
+  size_t block_num() {
+    return block_num_;
+  }
+
+ private:
+  size_t block_num_;
+};
+
+void SetFloatArray(float value, int rows, int cols, float* const* array) {
+  for (int i = 0; i < rows; ++i) {
+    for (int j = 0; j < cols; ++j) {
+      array[i][j] = value;
+    }
+  }
+}
+
+}  // namespace
+
+namespace webrtc {
+
+TEST(LappedTransformTest, Windowless) {
+  const size_t kChannels = 3;
+  const size_t kChunkLength = 512;
+  const size_t kBlockLength = 64;
+  const size_t kShiftAmount = 64;
+  NoopCallback noop;
+
+  // Rectangular window.
+  float window[kBlockLength];
+  std::fill(window, &window[kBlockLength], 1.0f);
+
+  LappedTransform trans(kChannels, kChannels, kChunkLength, window,
+                        kBlockLength, kShiftAmount, &noop);
+  float in_buffer[kChannels][kChunkLength];
+  float* in_chunk[kChannels];
+  float out_buffer[kChannels][kChunkLength];
+  float* out_chunk[kChannels];
+
+  in_chunk[0] = in_buffer[0];
+  in_chunk[1] = in_buffer[1];
+  in_chunk[2] = in_buffer[2];
+  out_chunk[0] = out_buffer[0];
+  out_chunk[1] = out_buffer[1];
+  out_chunk[2] = out_buffer[2];
+  SetFloatArray(2.0f, kChannels, kChunkLength, in_chunk);
+  SetFloatArray(-1.0f, kChannels, kChunkLength, out_chunk);
+
+  trans.ProcessChunk(in_chunk, out_chunk);
+
+  for (size_t i = 0; i < kChannels; ++i) {
+    for (size_t j = 0; j < kChunkLength; ++j) {
+      ASSERT_NEAR(out_chunk[i][j], 2.0f, 1e-5f);
+    }
+  }
+
+  ASSERT_EQ(kChunkLength / kBlockLength, noop.block_num());
+}
+
+TEST(LappedTransformTest, IdentityProcessor) {
+  const size_t kChunkLength = 512;
+  const size_t kBlockLength = 64;
+  const size_t kShiftAmount = 32;
+  NoopCallback noop;
+
+  // Identity window for |overlap = block_size / 2|.
+  float window[kBlockLength];
+  std::fill(window, &window[kBlockLength], std::sqrt(0.5f));
+
+  LappedTransform trans(1, 1, kChunkLength, window, kBlockLength, kShiftAmount,
+                        &noop);
+  float in_buffer[kChunkLength];
+  float* in_chunk = in_buffer;
+  float out_buffer[kChunkLength];
+  float* out_chunk = out_buffer;
+
+  SetFloatArray(2.0f, 1, kChunkLength, &in_chunk);
+  SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
+
+  trans.ProcessChunk(&in_chunk, &out_chunk);
+
+  for (size_t i = 0; i < kChunkLength; ++i) {
+    ASSERT_NEAR(out_chunk[i],
+                (i < kBlockLength - kShiftAmount) ? 0.0f : 2.0f,
+                1e-5f);
+  }
+
+  ASSERT_EQ(kChunkLength / kShiftAmount, noop.block_num());
+}
+
+TEST(LappedTransformTest, Callbacks) {
+  const size_t kChunkLength = 512;
+  const size_t kBlockLength = 64;
+  FftCheckerCallback call;
+
+  // Rectangular window.
+  float window[kBlockLength];
+  std::fill(window, &window[kBlockLength], 1.0f);
+
+  LappedTransform trans(1, 1, kChunkLength, window, kBlockLength,
+                        kBlockLength, &call);
+  float in_buffer[kChunkLength];
+  float* in_chunk = in_buffer;
+  float out_buffer[kChunkLength];
+  float* out_chunk = out_buffer;
+
+  SetFloatArray(1.0f, 1, kChunkLength, &in_chunk);
+  SetFloatArray(-1.0f, 1, kChunkLength, &out_chunk);
+
+  trans.ProcessChunk(&in_chunk, &out_chunk);
+
+  ASSERT_EQ(kChunkLength / kBlockLength, call.block_num());
+}
+
+TEST(LappedTransformTest, chunk_length) {
+  const size_t kBlockLength = 64;
+  FftCheckerCallback call;
+  const float window[kBlockLength] = {};
+
+  // Make sure that chunk_length returns the same value passed to the
+  // LappedTransform constructor.
+  {
+    const size_t kExpectedChunkLength = 512;
+    const LappedTransform trans(1, 1, kExpectedChunkLength, window,
+                                kBlockLength, kBlockLength, &call);
+
+    EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
+  }
+  {
+    const size_t kExpectedChunkLength = 160;
+    const LappedTransform trans(1, 1, kExpectedChunkLength, window,
+                                kBlockLength, kBlockLength, &call);
+
+    EXPECT_EQ(kExpectedChunkLength, trans.chunk_length());
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/mocks/mock_smoothing_filter.h b/common_audio/mocks/mock_smoothing_filter.h
new file mode 100644
index 0000000..dec6ea5
--- /dev/null
+++ b/common_audio/mocks/mock_smoothing_filter.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_MOCKS_MOCK_SMOOTHING_FILTER_H_
+#define COMMON_AUDIO_MOCKS_MOCK_SMOOTHING_FILTER_H_
+
+#include "common_audio/smoothing_filter.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockSmoothingFilter : public SmoothingFilter {
+ public:
+  MOCK_METHOD1(AddSample, void(float));
+  MOCK_METHOD0(GetAverage, rtc::Optional<float>());
+  MOCK_METHOD1(SetTimeConstantMs, bool(int));
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_MOCKS_MOCK_SMOOTHING_FILTER_H_
diff --git a/common_audio/real_fourier.cc b/common_audio/real_fourier.cc
new file mode 100644
index 0000000..cb0a005
--- /dev/null
+++ b/common_audio/real_fourier.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/real_fourier.h"
+
+#include "common_audio/real_fourier_ooura.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+
+#ifdef RTC_USE_OPENMAX_DL
+#include "common_audio/real_fourier_openmax.h"
+#endif
+
+namespace webrtc {
+
+using std::complex;
+
+const size_t RealFourier::kFftBufferAlignment = 32;
+
+std::unique_ptr<RealFourier> RealFourier::Create(int fft_order) {
+#if defined(RTC_USE_OPENMAX_DL)
+  return std::unique_ptr<RealFourier>(new RealFourierOpenmax(fft_order));
+#else
+  return std::unique_ptr<RealFourier>(new RealFourierOoura(fft_order));
+#endif
+}
+
+int RealFourier::FftOrder(size_t length) {
+  RTC_CHECK_GT(length, 0U);
+  return WebRtcSpl_GetSizeInBits(static_cast<uint32_t>(length - 1));
+}
+
+size_t RealFourier::FftLength(int order) {
+  RTC_CHECK_GE(order, 0);
+  return static_cast<size_t>(1 << order);
+}
+
+size_t RealFourier::ComplexLength(int order) {
+  return FftLength(order) / 2 + 1;
+}
+
+RealFourier::fft_real_scoper RealFourier::AllocRealBuffer(int count) {
+  return fft_real_scoper(static_cast<float*>(
+      AlignedMalloc(sizeof(float) * count, kFftBufferAlignment)));
+}
+
+RealFourier::fft_cplx_scoper RealFourier::AllocCplxBuffer(int count) {
+  return fft_cplx_scoper(static_cast<complex<float>*>(
+      AlignedMalloc(sizeof(complex<float>) * count, kFftBufferAlignment)));
+}
+
+}  // namespace webrtc
diff --git a/common_audio/real_fourier.h b/common_audio/real_fourier.h
new file mode 100644
index 0000000..4c69c3c
--- /dev/null
+++ b/common_audio/real_fourier.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_REAL_FOURIER_H_
+#define COMMON_AUDIO_REAL_FOURIER_H_
+
+#include <complex>
+#include <memory>
+
+#include "system_wrappers/include/aligned_malloc.h"
+
+// Uniform interface class for the real DFT and its inverse, for power-of-2
+// input lengths. Also contains helper functions for buffer allocation, taking
+// care of any memory alignment requirements the underlying library might have.
+
+namespace webrtc {
+
+class RealFourier {
+ public:
+  // Shorthand typenames for the scopers used by the buffer allocation helpers.
+  typedef std::unique_ptr<float[], AlignedFreeDeleter> fft_real_scoper;
+  typedef std::unique_ptr<std::complex<float>[], AlignedFreeDeleter>
+      fft_cplx_scoper;
+
+  // The alignment required for all input and output buffers, in bytes.
+  static const size_t kFftBufferAlignment;
+
+  // Construct a wrapper instance for the given input order, which must be
+  // between 1 and kMaxFftOrder, inclusively.
+  static std::unique_ptr<RealFourier> Create(int fft_order);
+  virtual ~RealFourier() {}
+
+  // Helper to compute the smallest FFT order (a power of 2) which will contain
+  // the given input length.
+  static int FftOrder(size_t length);
+
+  // Helper to compute the input length from the FFT order.
+  static size_t FftLength(int order);
+
+  // Helper to compute the exact length, in complex floats, of the transform
+  // output (i.e. |2^order / 2 + 1|).
+  static size_t ComplexLength(int order);
+
+  // Buffer allocation helpers. The buffers are large enough to hold |count|
+  // floats/complexes and suitably aligned for use by the implementation.
+  // The returned scopers are set up with proper deleters; the caller owns
+  // the allocated memory.
+  static fft_real_scoper AllocRealBuffer(int count);
+  static fft_cplx_scoper AllocCplxBuffer(int count);
+
+  // Main forward transform interface. The output array need only be big
+  // enough for |2^order / 2 + 1| elements - the conjugate pairs are not
+  // returned. Input and output must be properly aligned (e.g. through
+  // AllocRealBuffer and AllocCplxBuffer) and input length must be
+  // |2^order| (same as given at construction time).
+  virtual void Forward(const float* src, std::complex<float>* dest) const = 0;
+
+  // Inverse transform. Same input format as output above, conjugate pairs
+  // not needed.
+  virtual void Inverse(const std::complex<float>* src, float* dest) const = 0;
+
+  virtual int order() const = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_REAL_FOURIER_H_
+
diff --git a/common_audio/real_fourier_ooura.cc b/common_audio/real_fourier_ooura.cc
new file mode 100644
index 0000000..5d75717
--- /dev/null
+++ b/common_audio/real_fourier_ooura.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/real_fourier_ooura.h"
+
+#include <cmath>
+#include <algorithm>
+
+#include "common_audio/fft4g.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+using std::complex;
+
+namespace {
+
+void Conjugate(complex<float>* array, size_t complex_length) {
+  std::for_each(array, array + complex_length,
+                [=](complex<float>& v) { v = std::conj(v); });
+}
+
+size_t ComputeWorkIpSize(size_t fft_length) {
+  return static_cast<size_t>(2 + std::ceil(std::sqrt(
+      static_cast<float>(fft_length))));
+}
+
+}  // namespace
+
+RealFourierOoura::RealFourierOoura(int fft_order)
+    : order_(fft_order),
+      length_(FftLength(order_)),
+      complex_length_(ComplexLength(order_)),
+      // Zero-initializing work_ip_ will cause rdft to initialize these work
+      // arrays on the first call.
+      work_ip_(new size_t[ComputeWorkIpSize(length_)]()),
+      work_w_(new float[complex_length_]()) {
+  RTC_CHECK_GE(fft_order, 1);
+}
+
+void RealFourierOoura::Forward(const float* src, complex<float>* dest) const {
+  {
+    // This cast is well-defined since C++11. See "Non-static data members" at:
+    // http://en.cppreference.com/w/cpp/numeric/complex
+    auto dest_float = reinterpret_cast<float*>(dest);
+    std::copy(src, src + length_, dest_float);
+    WebRtc_rdft(length_, 1, dest_float, work_ip_.get(), work_w_.get());
+  }
+
+  // Ooura places real[n/2] in imag[0].
+  dest[complex_length_ - 1] = complex<float>(dest[0].imag(), 0.0f);
+  dest[0] = complex<float>(dest[0].real(), 0.0f);
+  // Ooura returns the conjugate of the usual Fourier definition.
+  Conjugate(dest, complex_length_);
+}
+
+void RealFourierOoura::Inverse(const complex<float>* src, float* dest) const {
+  {
+    auto dest_complex = reinterpret_cast<complex<float>*>(dest);
+    // The real output array is shorter than the input complex array by one
+    // complex element.
+    const size_t dest_complex_length = complex_length_ - 1;
+    std::copy(src, src + dest_complex_length, dest_complex);
+    // Restore Ooura's conjugate definition.
+    Conjugate(dest_complex, dest_complex_length);
+    // Restore real[n/2] to imag[0].
+    dest_complex[0] = complex<float>(dest_complex[0].real(),
+                                     src[complex_length_ - 1].real());
+  }
+
+  WebRtc_rdft(length_, -1, dest, work_ip_.get(), work_w_.get());
+
+  // Ooura returns a scaled version.
+  const float scale = 2.0f / length_;
+  std::for_each(dest, dest + length_, [scale](float& v) { v *= scale; });
+}
+
+}  // namespace webrtc
diff --git a/common_audio/real_fourier_ooura.h b/common_audio/real_fourier_ooura.h
new file mode 100644
index 0000000..f885a34
--- /dev/null
+++ b/common_audio/real_fourier_ooura.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_REAL_FOURIER_OOURA_H_
+#define COMMON_AUDIO_REAL_FOURIER_OOURA_H_
+
+#include <complex>
+#include <memory>
+
+#include "common_audio/real_fourier.h"
+
+namespace webrtc {
+
+class RealFourierOoura : public RealFourier {
+ public:
+  explicit RealFourierOoura(int fft_order);
+
+  void Forward(const float* src, std::complex<float>* dest) const override;
+  void Inverse(const std::complex<float>* src, float* dest) const override;
+
+  int order() const override {
+    return order_;
+  }
+
+ private:
+  const int order_;
+  const size_t length_;
+  const size_t complex_length_;
+  // These are work arrays for Ooura. The names are based on the comments in
+  // fft4g.c.
+  const std::unique_ptr<size_t[]> work_ip_;
+  const std::unique_ptr<float[]> work_w_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_REAL_FOURIER_OOURA_H_
+
diff --git a/common_audio/real_fourier_openmax.cc b/common_audio/real_fourier_openmax.cc
new file mode 100644
index 0000000..6c5c9ce
--- /dev/null
+++ b/common_audio/real_fourier_openmax.cc
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/real_fourier_openmax.h"
+
+#include <cstdlib>
+
+#include "dl/sp/api/omxSP.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+using std::complex;
+
+namespace {
+
+// Creates and initializes the Openmax state. Transfers ownership to caller.
+OMXFFTSpec_R_F32* CreateOpenmaxState(int order) {
+  RTC_CHECK_GE(order, 1);
+  // The omx implementation uses this macro to check order validity.
+  RTC_CHECK_LE(order, TWIDDLE_TABLE_ORDER);
+
+  OMX_INT buffer_size;
+  OMXResult r = omxSP_FFTGetBufSize_R_F32(order, &buffer_size);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
+
+  OMXFFTSpec_R_F32* omx_spec = malloc(buffer_size);
+  RTC_DCHECK(omx_spec);
+
+  r = omxSP_FFTInit_R_F32(omx_spec, order);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
+  return omx_spec;
+}
+
+}  // namespace
+
+RealFourierOpenmax::RealFourierOpenmax(int fft_order)
+    : order_(fft_order),
+      omx_spec_(CreateOpenmaxState(order_)) {
+}
+
+RealFourierOpenmax::~RealFourierOpenmax() {
+  free(omx_spec_);
+}
+
+void RealFourierOpenmax::Forward(const float* src, complex<float>* dest) const {
+  // This cast is well-defined since C++11. See "Non-static data members" at:
+  // http://en.cppreference.com/w/cpp/numeric/complex
+  OMXResult r =
+      omxSP_FFTFwd_RToCCS_F32(src, reinterpret_cast<OMX_F32*>(dest), omx_spec_);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
+}
+
+void RealFourierOpenmax::Inverse(const complex<float>* src, float* dest) const {
+  OMXResult r =
+      omxSP_FFTInv_CCSToR_F32(reinterpret_cast<const OMX_F32*>(src), dest,
+                              omx_spec_);
+  RTC_CHECK_EQ(r, OMX_Sts_NoErr);
+}
+
+}  // namespace webrtc
+
diff --git a/common_audio/real_fourier_openmax.h b/common_audio/real_fourier_openmax.h
new file mode 100644
index 0000000..af91dde
--- /dev/null
+++ b/common_audio/real_fourier_openmax.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_REAL_FOURIER_OPENMAX_H_
+#define COMMON_AUDIO_REAL_FOURIER_OPENMAX_H_
+
+#ifndef RTC_USE_OPENMAX_DL
+#error "Only include this header if RTC_USE_OPENMAX_DL is defined."
+#endif
+
+#include <complex>
+
+#include "common_audio/real_fourier.h"
+
+namespace webrtc {
+
+class RealFourierOpenmax : public RealFourier {
+ public:
+  explicit RealFourierOpenmax(int fft_order);
+  ~RealFourierOpenmax() override;
+
+  void Forward(const float* src, std::complex<float>* dest) const override;
+  void Inverse(const std::complex<float>* src, float* dest) const override;
+
+  int order() const override {
+    return order_;
+  }
+
+ private:
+  // Basically a forward declare of OMXFFTSpec_R_F32. To get rid of the
+  // dependency on openmax.
+  typedef void OMXFFTSpec_R_F32_;
+  const int order_;
+
+  OMXFFTSpec_R_F32_* const omx_spec_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_REAL_FOURIER_OPENMAX_H_
diff --git a/common_audio/real_fourier_unittest.cc b/common_audio/real_fourier_unittest.cc
new file mode 100644
index 0000000..5ac39b2
--- /dev/null
+++ b/common_audio/real_fourier_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/real_fourier.h"
+
+#include <stdlib.h>
+
+#include "common_audio/real_fourier_ooura.h"
+#include "test/gtest.h"
+
+#ifdef RTC_USE_OPENMAX_DL
+#include "common_audio/real_fourier_openmax.h"
+#endif
+
+namespace webrtc {
+
+using std::complex;
+
+TEST(RealFourierStaticsTest, AllocatorAlignment) {
+  {
+    RealFourier::fft_real_scoper real;
+    real = RealFourier::AllocRealBuffer(3);
+    ASSERT_TRUE(real.get() != nullptr);
+    uintptr_t ptr_value = reinterpret_cast<uintptr_t>(real.get());
+    EXPECT_EQ(0u, ptr_value % RealFourier::kFftBufferAlignment);
+  }
+  {
+    RealFourier::fft_cplx_scoper cplx;
+    cplx = RealFourier::AllocCplxBuffer(3);
+    ASSERT_TRUE(cplx.get() != nullptr);
+    uintptr_t ptr_value = reinterpret_cast<uintptr_t>(cplx.get());
+    EXPECT_EQ(0u, ptr_value % RealFourier::kFftBufferAlignment);
+  }
+}
+
+TEST(RealFourierStaticsTest, OrderComputation) {
+  EXPECT_EQ(4, RealFourier::FftOrder(13));
+  EXPECT_EQ(5, RealFourier::FftOrder(32));
+  EXPECT_EQ(1, RealFourier::FftOrder(2));
+  EXPECT_EQ(0, RealFourier::FftOrder(1));
+}
+
+TEST(RealFourierStaticsTest, ComplexLengthComputation) {
+  EXPECT_EQ(2U, RealFourier::ComplexLength(1));
+  EXPECT_EQ(3U, RealFourier::ComplexLength(2));
+  EXPECT_EQ(5U, RealFourier::ComplexLength(3));
+  EXPECT_EQ(9U, RealFourier::ComplexLength(4));
+  EXPECT_EQ(17U, RealFourier::ComplexLength(5));
+  EXPECT_EQ(65U, RealFourier::ComplexLength(7));
+}
+
+template <typename T>
+class RealFourierTest : public ::testing::Test {
+ protected:
+  RealFourierTest()
+      : rf_(2),
+        real_buffer_(RealFourier::AllocRealBuffer(4)),
+        cplx_buffer_(RealFourier::AllocCplxBuffer(3)) {}
+
+  ~RealFourierTest() {
+  }
+
+  T rf_;
+  const RealFourier::fft_real_scoper real_buffer_;
+  const RealFourier::fft_cplx_scoper cplx_buffer_;
+};
+
+using FftTypes = ::testing::Types<
+#if defined(RTC_USE_OPENMAX_DL)
+    RealFourierOpenmax,
+#endif
+    RealFourierOoura>;
+TYPED_TEST_CASE(RealFourierTest, FftTypes);
+
+TYPED_TEST(RealFourierTest, SimpleForwardTransform) {
+  this->real_buffer_[0] = 1.0f;
+  this->real_buffer_[1] = 2.0f;
+  this->real_buffer_[2] = 3.0f;
+  this->real_buffer_[3] = 4.0f;
+
+  this->rf_.Forward(this->real_buffer_.get(), this->cplx_buffer_.get());
+
+  EXPECT_NEAR(this->cplx_buffer_[0].real(), 10.0f, 1e-8f);
+  EXPECT_NEAR(this->cplx_buffer_[0].imag(), 0.0f, 1e-8f);
+  EXPECT_NEAR(this->cplx_buffer_[1].real(), -2.0f, 1e-8f);
+  EXPECT_NEAR(this->cplx_buffer_[1].imag(), 2.0f, 1e-8f);
+  EXPECT_NEAR(this->cplx_buffer_[2].real(), -2.0f, 1e-8f);
+  EXPECT_NEAR(this->cplx_buffer_[2].imag(), 0.0f, 1e-8f);
+}
+
+TYPED_TEST(RealFourierTest, SimpleBackwardTransform) {
+  this->cplx_buffer_[0] = complex<float>(10.0f, 0.0f);
+  this->cplx_buffer_[1] = complex<float>(-2.0f, 2.0f);
+  this->cplx_buffer_[2] = complex<float>(-2.0f, 0.0f);
+
+  this->rf_.Inverse(this->cplx_buffer_.get(), this->real_buffer_.get());
+
+  EXPECT_NEAR(this->real_buffer_[0], 1.0f, 1e-8f);
+  EXPECT_NEAR(this->real_buffer_[1], 2.0f, 1e-8f);
+  EXPECT_NEAR(this->real_buffer_[2], 3.0f, 1e-8f);
+  EXPECT_NEAR(this->real_buffer_[3], 4.0f, 1e-8f);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/include/push_resampler.h b/common_audio/resampler/include/push_resampler.h
new file mode 100644
index 0000000..046415b
--- /dev/null
+++ b/common_audio/resampler/include/push_resampler.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
+#define COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
+
+#include <memory>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class PushSincResampler;
+
+// Wraps PushSincResampler to provide stereo support.
+// TODO(ajm): add support for an arbitrary number of channels.
+template <typename T>
+class PushResampler {
+ public:
+  PushResampler();
+  virtual ~PushResampler();
+
+  // Must be called whenever the parameters change. Free to be called at any
+  // time as it is a no-op if parameters have not changed since the last call.
+  int InitializeIfNeeded(int src_sample_rate_hz, int dst_sample_rate_hz,
+                         size_t num_channels);
+
+  // Returns the total number of samples provided in destination (e.g. 32 kHz,
+  // 2 channel audio gives 640 samples).
+  int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity);
+
+ private:
+  std::unique_ptr<PushSincResampler> sinc_resampler_;
+  std::unique_ptr<PushSincResampler> sinc_resampler_right_;
+  int src_sample_rate_hz_;
+  int dst_sample_rate_hz_;
+  size_t num_channels_;
+  std::unique_ptr<T[]> src_left_;
+  std::unique_ptr<T[]> src_right_;
+  std::unique_ptr<T[]> dst_left_;
+  std::unique_ptr<T[]> dst_right_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
diff --git a/common_audio/resampler/include/resampler.h b/common_audio/resampler/include/resampler.h
new file mode 100644
index 0000000..fec2c1a
--- /dev/null
+++ b/common_audio/resampler/include/resampler.h
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * A wrapper for resampling a numerous amount of sampling combinations.
+ */
+
+#ifndef COMMON_AUDIO_RESAMPLER_INCLUDE_RESAMPLER_H_
+#define COMMON_AUDIO_RESAMPLER_INCLUDE_RESAMPLER_H_
+
+#include <stddef.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// All methods return 0 on success and -1 on failure.
+class Resampler {
+ public:
+  Resampler();
+  Resampler(int inFreq, int outFreq, size_t num_channels);
+  ~Resampler();
+
+  // Reset all states
+  int Reset(int inFreq, int outFreq, size_t num_channels);
+
+  // Reset all states if any parameter has changed
+  int ResetIfNeeded(int inFreq, int outFreq, size_t num_channels);
+
+  // Resample samplesIn to samplesOut.
+  int Push(const int16_t* samplesIn, size_t lengthIn, int16_t* samplesOut,
+           size_t maxLen, size_t& outLen);  // NOLINT: to avoid changing APIs
+
+ private:
+  enum ResamplerMode {
+    kResamplerMode1To1,
+    kResamplerMode1To2,
+    kResamplerMode1To3,
+    kResamplerMode1To4,
+    kResamplerMode1To6,
+    kResamplerMode1To12,
+    kResamplerMode2To3,
+    kResamplerMode2To11,
+    kResamplerMode4To11,
+    kResamplerMode8To11,
+    kResamplerMode11To16,
+    kResamplerMode11To32,
+    kResamplerMode2To1,
+    kResamplerMode3To1,
+    kResamplerMode4To1,
+    kResamplerMode6To1,
+    kResamplerMode12To1,
+    kResamplerMode3To2,
+    kResamplerMode11To2,
+    kResamplerMode11To4,
+    kResamplerMode11To8
+  };
+
+  // Computes the resampler mode for a given sampling frequency pair.
+  // Returns -1 for unsupported frequency pairs.
+  static int ComputeResamplerMode(int in_freq_hz,
+                                  int out_freq_hz,
+                                  ResamplerMode* mode);
+
+  // Generic pointers since we don't know what states we'll need
+  void* state1_;
+  void* state2_;
+  void* state3_;
+
+  // Storage if needed
+  int16_t* in_buffer_;
+  int16_t* out_buffer_;
+  size_t in_buffer_size_;
+  size_t out_buffer_size_;
+  size_t in_buffer_size_max_;
+  size_t out_buffer_size_max_;
+
+  int my_in_frequency_khz_;
+  int my_out_frequency_khz_;
+  ResamplerMode my_mode_;
+  size_t num_channels_;
+
+  // Extra instance for stereo
+  Resampler* slave_left_;
+  Resampler* slave_right_;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_RESAMPLER_INCLUDE_RESAMPLER_H_
diff --git a/common_audio/resampler/push_resampler.cc b/common_audio/resampler/push_resampler.cc
new file mode 100644
index 0000000..3930624
--- /dev/null
+++ b/common_audio/resampler/push_resampler.cc
@@ -0,0 +1,147 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/resampler/include/push_resampler.h"
+
+#include <string.h>
+
+#include "common_audio/include/audio_util.h"
+#include "common_audio/resampler/include/resampler.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+// These checks were factored out into a non-templatized function
+// due to problems with clang on Windows in debug builds.
+// For some reason having the DCHECKs inline in the template code
+// caused the compiler to generate code that threw off the linker.
+// TODO(tommi): Re-enable when we've figured out what the problem is.
+// http://crbug.com/615050
+void CheckValidInitParams(int src_sample_rate_hz, int dst_sample_rate_hz,
+                          size_t num_channels) {
+// The below checks are temporarily disabled on WEBRTC_WIN due to problems
+// with clang debug builds.
+#if !defined(WEBRTC_WIN) && defined(__clang__)
+  RTC_DCHECK_GT(src_sample_rate_hz, 0);
+  RTC_DCHECK_GT(dst_sample_rate_hz, 0);
+  RTC_DCHECK_GT(num_channels, 0);
+  RTC_DCHECK_LE(num_channels, 2);
+#endif
+}
+
+void CheckExpectedBufferSizes(size_t src_length,
+                              size_t dst_capacity,
+                              size_t num_channels,
+                              int src_sample_rate,
+                              int dst_sample_rate) {
+// The below checks are temporarily disabled on WEBRTC_WIN due to problems
+// with clang debug builds.
+// TODO(tommi): Re-enable when we've figured out what the problem is.
+// http://crbug.com/615050
+#if !defined(WEBRTC_WIN) && defined(__clang__)
+  const size_t src_size_10ms = src_sample_rate * num_channels / 100;
+  const size_t dst_size_10ms = dst_sample_rate * num_channels / 100;
+  RTC_DCHECK_EQ(src_length, src_size_10ms);
+  RTC_DCHECK_GE(dst_capacity, dst_size_10ms);
+#endif
+}
+}  // namespace
+
+template <typename T>
+PushResampler<T>::PushResampler()
+    : src_sample_rate_hz_(0),
+      dst_sample_rate_hz_(0),
+      num_channels_(0) {
+}
+
+template <typename T>
+PushResampler<T>::~PushResampler() {
+}
+
+template <typename T>
+int PushResampler<T>::InitializeIfNeeded(int src_sample_rate_hz,
+                                         int dst_sample_rate_hz,
+                                         size_t num_channels) {
+  CheckValidInitParams(src_sample_rate_hz, dst_sample_rate_hz, num_channels);
+
+  if (src_sample_rate_hz == src_sample_rate_hz_ &&
+      dst_sample_rate_hz == dst_sample_rate_hz_ &&
+      num_channels == num_channels_) {
+    // No-op if settings haven't changed.
+    return 0;
+  }
+
+  if (src_sample_rate_hz <= 0 || dst_sample_rate_hz <= 0 || num_channels <= 0 ||
+      num_channels > 2) {
+    return -1;
+  }
+
+  src_sample_rate_hz_ = src_sample_rate_hz;
+  dst_sample_rate_hz_ = dst_sample_rate_hz;
+  num_channels_ = num_channels;
+
+  const size_t src_size_10ms_mono =
+      static_cast<size_t>(src_sample_rate_hz / 100);
+  const size_t dst_size_10ms_mono =
+      static_cast<size_t>(dst_sample_rate_hz / 100);
+  sinc_resampler_.reset(new PushSincResampler(src_size_10ms_mono,
+                                              dst_size_10ms_mono));
+  if (num_channels_ == 2) {
+    src_left_.reset(new T[src_size_10ms_mono]);
+    src_right_.reset(new T[src_size_10ms_mono]);
+    dst_left_.reset(new T[dst_size_10ms_mono]);
+    dst_right_.reset(new T[dst_size_10ms_mono]);
+    sinc_resampler_right_.reset(new PushSincResampler(src_size_10ms_mono,
+                                                      dst_size_10ms_mono));
+  }
+
+  return 0;
+}
+
+template <typename T>
+int PushResampler<T>::Resample(const T* src, size_t src_length, T* dst,
+                               size_t dst_capacity) {
+  CheckExpectedBufferSizes(src_length, dst_capacity, num_channels_,
+                           src_sample_rate_hz_, dst_sample_rate_hz_);
+
+  if (src_sample_rate_hz_ == dst_sample_rate_hz_) {
+    // The old resampler provides this memcpy facility in the case of matching
+    // sample rates, so reproduce it here for the sinc resampler.
+    memcpy(dst, src, src_length * sizeof(T));
+    return static_cast<int>(src_length);
+  }
+  if (num_channels_ == 2) {
+    const size_t src_length_mono = src_length / num_channels_;
+    const size_t dst_capacity_mono = dst_capacity / num_channels_;
+    T* deinterleaved[] = {src_left_.get(), src_right_.get()};
+    Deinterleave(src, src_length_mono, num_channels_, deinterleaved);
+
+    size_t dst_length_mono =
+        sinc_resampler_->Resample(src_left_.get(), src_length_mono,
+                                  dst_left_.get(), dst_capacity_mono);
+    sinc_resampler_right_->Resample(src_right_.get(), src_length_mono,
+                                    dst_right_.get(), dst_capacity_mono);
+
+    deinterleaved[0] = dst_left_.get();
+    deinterleaved[1] = dst_right_.get();
+    Interleave(deinterleaved, dst_length_mono, num_channels_, dst);
+    return static_cast<int>(dst_length_mono * num_channels_);
+  } else {
+    return static_cast<int>(
+        sinc_resampler_->Resample(src, src_length, dst, dst_capacity));
+  }
+}
+
+// Explictly generate required instantiations.
+template class PushResampler<int16_t>;
+template class PushResampler<float>;
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/push_resampler_unittest.cc b/common_audio/resampler/push_resampler_unittest.cc
new file mode 100644
index 0000000..6a0c60a
--- /dev/null
+++ b/common_audio/resampler/push_resampler_unittest.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/resampler/include/push_resampler.h"
+#include "rtc_base/checks.h"  // RTC_DCHECK_IS_ON
+#include "test/gtest.h"
+
+// Quality testing of PushResampler is handled through output_mixer_unittest.cc.
+
+namespace webrtc {
+
+// The below tests are temporarily disabled on WEBRTC_WIN due to problems
+// with clang debug builds.
+// TODO(tommi): Re-enable when we've figured out what the problem is.
+// http://crbug.com/615050
+#if !defined(WEBRTC_WIN) && defined(__clang__) && !defined(NDEBUG)
+TEST(PushResamplerTest, VerifiesInputParameters) {
+  PushResampler<int16_t> resampler;
+  EXPECT_EQ(0, resampler.InitializeIfNeeded(16000, 16000, 1));
+  EXPECT_EQ(0, resampler.InitializeIfNeeded(16000, 16000, 2));
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(PushResamplerTest, VerifiesBadInputParameters1) {
+  PushResampler<int16_t> resampler;
+  EXPECT_DEATH(resampler.InitializeIfNeeded(-1, 16000, 1),
+               "src_sample_rate_hz");
+}
+
+TEST(PushResamplerTest, VerifiesBadInputParameters2) {
+  PushResampler<int16_t> resampler;
+  EXPECT_DEATH(resampler.InitializeIfNeeded(16000, -1, 1),
+               "dst_sample_rate_hz");
+}
+
+TEST(PushResamplerTest, VerifiesBadInputParameters3) {
+  PushResampler<int16_t> resampler;
+  EXPECT_DEATH(resampler.InitializeIfNeeded(16000, 16000, 0), "num_channels");
+}
+
+TEST(PushResamplerTest, VerifiesBadInputParameters4) {
+  PushResampler<int16_t> resampler;
+  EXPECT_DEATH(resampler.InitializeIfNeeded(16000, 16000, 3), "num_channels");
+}
+#endif
+#endif
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/push_sinc_resampler.cc b/common_audio/resampler/push_sinc_resampler.cc
new file mode 100644
index 0000000..14ab330
--- /dev/null
+++ b/common_audio/resampler/push_sinc_resampler.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/resampler/push_sinc_resampler.h"
+
+#include <cstring>
+
+#include "common_audio/include/audio_util.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+PushSincResampler::PushSincResampler(size_t source_frames,
+                                     size_t destination_frames)
+    : resampler_(new SincResampler(source_frames * 1.0 / destination_frames,
+                                   source_frames,
+                                   this)),
+      source_ptr_(nullptr),
+      source_ptr_int_(nullptr),
+      destination_frames_(destination_frames),
+      first_pass_(true),
+      source_available_(0) {}
+
+PushSincResampler::~PushSincResampler() {
+}
+
+size_t PushSincResampler::Resample(const int16_t* source,
+                                   size_t source_length,
+                                   int16_t* destination,
+                                   size_t destination_capacity) {
+  if (!float_buffer_.get())
+    float_buffer_.reset(new float[destination_frames_]);
+
+  source_ptr_int_ = source;
+  // Pass nullptr as the float source to have Run() read from the int16 source.
+  Resample(nullptr, source_length, float_buffer_.get(), destination_frames_);
+  FloatS16ToS16(float_buffer_.get(), destination_frames_, destination);
+  source_ptr_int_ = nullptr;
+  return destination_frames_;
+}
+
+size_t PushSincResampler::Resample(const float* source,
+                                   size_t source_length,
+                                   float* destination,
+                                   size_t destination_capacity) {
+  RTC_CHECK_EQ(source_length, resampler_->request_frames());
+  RTC_CHECK_GE(destination_capacity, destination_frames_);
+  // Cache the source pointer. Calling Resample() will immediately trigger
+  // the Run() callback whereupon we provide the cached value.
+  source_ptr_ = source;
+  source_available_ = source_length;
+
+  // On the first pass, we call Resample() twice. During the first call, we
+  // provide dummy input and discard the output. This is done to prime the
+  // SincResampler buffer with the correct delay (half the kernel size), thereby
+  // ensuring that all later Resample() calls will only result in one input
+  // request through Run().
+  //
+  // If this wasn't done, SincResampler would call Run() twice on the first
+  // pass, and we'd have to introduce an entire |source_frames| of delay, rather
+  // than the minimum half kernel.
+  //
+  // It works out that ChunkSize() is exactly the amount of output we need to
+  // request in order to prime the buffer with a single Run() request for
+  // |source_frames|.
+  if (first_pass_)
+    resampler_->Resample(resampler_->ChunkSize(), destination);
+
+  resampler_->Resample(destination_frames_, destination);
+  source_ptr_ = nullptr;
+  return destination_frames_;
+}
+
+void PushSincResampler::Run(size_t frames, float* destination) {
+  // Ensure we are only asked for the available samples. This would fail if
+  // Run() was triggered more than once per Resample() call.
+  RTC_CHECK_EQ(source_available_, frames);
+
+  if (first_pass_) {
+    // Provide dummy input on the first pass, the output of which will be
+    // discarded, as described in Resample().
+    std::memset(destination, 0, frames * sizeof(*destination));
+    first_pass_ = false;
+    return;
+  }
+
+  if (source_ptr_) {
+    std::memcpy(destination, source_ptr_, frames * sizeof(*destination));
+  } else {
+    for (size_t i = 0; i < frames; ++i)
+      destination[i] = static_cast<float>(source_ptr_int_[i]);
+  }
+  source_available_ -= frames;
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/push_sinc_resampler.h b/common_audio/resampler/push_sinc_resampler.h
new file mode 100644
index 0000000..bdc03a2
--- /dev/null
+++ b/common_audio/resampler/push_sinc_resampler.h
@@ -0,0 +1,77 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_
+#define COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_
+
+#include <memory>
+
+#include "common_audio/resampler/sinc_resampler.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// A thin wrapper over SincResampler to provide a push-based interface as
+// required by WebRTC. SincResampler uses a pull-based interface, and will
+// use SincResamplerCallback::Run() to request data upon a call to Resample().
+// These Run() calls will happen on the same thread Resample() is called on.
+class PushSincResampler : public SincResamplerCallback {
+ public:
+  // Provide the size of the source and destination blocks in samples. These
+  // must correspond to the same time duration (typically 10 ms) as the sample
+  // ratio is inferred from them.
+  PushSincResampler(size_t source_frames, size_t destination_frames);
+  ~PushSincResampler() override;
+
+  // Perform the resampling. |source_frames| must always equal the
+  // |source_frames| provided at construction. |destination_capacity| must be
+  // at least as large as |destination_frames|. Returns the number of samples
+  // provided in destination (for convenience, since this will always be equal
+  // to |destination_frames|).
+  size_t Resample(const int16_t* source, size_t source_frames,
+                  int16_t* destination, size_t destination_capacity);
+  size_t Resample(const float* source,
+                  size_t source_frames,
+                  float* destination,
+                  size_t destination_capacity);
+
+  // Delay due to the filter kernel. Essentially, the time after which an input
+  // sample will appear in the resampled output.
+  static float AlgorithmicDelaySeconds(int source_rate_hz) {
+    return 1.f / source_rate_hz * SincResampler::kKernelSize / 2;
+  }
+
+ protected:
+  // Implements SincResamplerCallback.
+  void Run(size_t frames, float* destination) override;
+
+ private:
+  friend class PushSincResamplerTest;
+  SincResampler* get_resampler_for_testing() { return resampler_.get(); }
+
+  std::unique_ptr<SincResampler> resampler_;
+  std::unique_ptr<float[]> float_buffer_;
+  const float* source_ptr_;
+  const int16_t* source_ptr_int_;
+  const size_t destination_frames_;
+
+  // True on the first call to Resample(), to prime the SincResampler buffer.
+  bool first_pass_;
+
+  // Used to assert we are only requested for as much data as is available.
+  size_t source_available_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(PushSincResampler);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_RESAMPLER_PUSH_SINC_RESAMPLER_H_
diff --git a/common_audio/resampler/push_sinc_resampler_unittest.cc b/common_audio/resampler/push_sinc_resampler_unittest.cc
new file mode 100644
index 0000000..3be7c0a
--- /dev/null
+++ b/common_audio/resampler/push_sinc_resampler_unittest.cc
@@ -0,0 +1,338 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <memory>
+
+#include "common_audio/include/audio_util.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "common_audio/resampler/sinusoidal_linear_chirp_source.h"
+#include "rtc_base/timeutils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace {
+
+// Almost all conversions have an RMS error of around -14 dbFS.
+const double kResamplingRMSError = -14.42;
+
+// Used to convert errors to dbFS.
+template <typename T>
+T DBFS(T x) {
+  return 20 * std::log10(x);
+}
+
+}  // namespace
+
+class PushSincResamplerTest : public ::testing::TestWithParam<
+    ::testing::tuple<int, int, double, double>> {
+ public:
+  PushSincResamplerTest()
+      : input_rate_(::testing::get<0>(GetParam())),
+        output_rate_(::testing::get<1>(GetParam())),
+        rms_error_(::testing::get<2>(GetParam())),
+        low_freq_error_(::testing::get<3>(GetParam())) {
+  }
+
+  ~PushSincResamplerTest() override {}
+
+ protected:
+  void ResampleBenchmarkTest(bool int_format);
+  void ResampleTest(bool int_format);
+
+  int input_rate_;
+  int output_rate_;
+  double rms_error_;
+  double low_freq_error_;
+};
+
+class ZeroSource : public SincResamplerCallback {
+ public:
+  void Run(size_t frames, float* destination) {
+    std::memset(destination, 0, sizeof(float) * frames);
+  }
+};
+
+void PushSincResamplerTest::ResampleBenchmarkTest(bool int_format) {
+  const size_t input_samples = static_cast<size_t>(input_rate_ / 100);
+  const size_t output_samples = static_cast<size_t>(output_rate_ / 100);
+  const int kResampleIterations = 500000;
+
+  // Source for data to be resampled.
+  ZeroSource resampler_source;
+
+  std::unique_ptr<float[]> resampled_destination(new float[output_samples]);
+  std::unique_ptr<float[]> source(new float[input_samples]);
+  std::unique_ptr<int16_t[]> source_int(new int16_t[input_samples]);
+  std::unique_ptr<int16_t[]> destination_int(new int16_t[output_samples]);
+
+  resampler_source.Run(input_samples, source.get());
+  for (size_t i = 0; i < input_samples; ++i) {
+    source_int[i] = static_cast<int16_t>(floor(32767 * source[i] + 0.5));
+  }
+
+  printf("Benchmarking %d iterations of %d Hz -> %d Hz:\n",
+         kResampleIterations, input_rate_, output_rate_);
+  const double io_ratio = input_rate_ / static_cast<double>(output_rate_);
+  SincResampler sinc_resampler(io_ratio, SincResampler::kDefaultRequestSize,
+                               &resampler_source);
+  int64_t start = rtc::TimeNanos();
+  for (int i = 0; i < kResampleIterations; ++i) {
+    sinc_resampler.Resample(output_samples, resampled_destination.get());
+  }
+  double total_time_sinc_us =
+      (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec;
+  printf("SincResampler took %.2f us per frame.\n",
+         total_time_sinc_us / kResampleIterations);
+
+  PushSincResampler resampler(input_samples, output_samples);
+  start = rtc::TimeNanos();
+  if (int_format) {
+    for (int i = 0; i < kResampleIterations; ++i) {
+      EXPECT_EQ(output_samples,
+                resampler.Resample(source_int.get(),
+                                   input_samples,
+                                   destination_int.get(),
+                                   output_samples));
+    }
+  } else {
+    for (int i = 0; i < kResampleIterations; ++i) {
+      EXPECT_EQ(output_samples,
+                resampler.Resample(source.get(),
+                                   input_samples,
+                                   resampled_destination.get(),
+                                   output_samples));
+    }
+  }
+  double total_time_us =
+      (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec;
+  printf("PushSincResampler took %.2f us per frame; which is a %.1f%% overhead "
+         "on SincResampler.\n\n", total_time_us / kResampleIterations,
+         (total_time_us - total_time_sinc_us) / total_time_sinc_us * 100);
+}
+
+// Disabled because it takes too long to run routinely. Use for performance
+// benchmarking when needed.
+TEST_P(PushSincResamplerTest, DISABLED_BenchmarkInt) {
+  ResampleBenchmarkTest(true);
+}
+
+TEST_P(PushSincResamplerTest, DISABLED_BenchmarkFloat) {
+  ResampleBenchmarkTest(false);
+}
+
+// Tests resampling using a given input and output sample rate.
+void PushSincResamplerTest::ResampleTest(bool int_format) {
+  // Make comparisons using one second of data.
+  static const double kTestDurationSecs = 1;
+  // 10 ms blocks.
+  const size_t kNumBlocks = static_cast<size_t>(kTestDurationSecs * 100);
+  const size_t input_block_size = static_cast<size_t>(input_rate_ / 100);
+  const size_t output_block_size = static_cast<size_t>(output_rate_ / 100);
+  const size_t input_samples =
+      static_cast<size_t>(kTestDurationSecs * input_rate_);
+  const size_t output_samples =
+      static_cast<size_t>(kTestDurationSecs * output_rate_);
+
+  // Nyquist frequency for the input sampling rate.
+  const double input_nyquist_freq = 0.5 * input_rate_;
+
+  // Source for data to be resampled.
+  SinusoidalLinearChirpSource resampler_source(
+      input_rate_, input_samples, input_nyquist_freq, 0);
+
+  PushSincResampler resampler(input_block_size, output_block_size);
+
+  // TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to
+  // allocate these on 32-byte boundaries and ensure they're sized % 32 bytes.
+  std::unique_ptr<float[]> resampled_destination(new float[output_samples]);
+  std::unique_ptr<float[]> pure_destination(new float[output_samples]);
+  std::unique_ptr<float[]> source(new float[input_samples]);
+  std::unique_ptr<int16_t[]> source_int(new int16_t[input_block_size]);
+  std::unique_ptr<int16_t[]> destination_int(new int16_t[output_block_size]);
+
+  // The sinc resampler has an implicit delay of approximately half the kernel
+  // size at the input sample rate. By moving to a push model, this delay
+  // becomes explicit and is managed by zero-stuffing in PushSincResampler. We
+  // deal with it in the test by delaying the "pure" source to match. It must be
+  // checked before the first call to Resample(), because ChunkSize() will
+  // change afterwards.
+  const size_t output_delay_samples = output_block_size -
+      resampler.get_resampler_for_testing()->ChunkSize();
+
+  // Generate resampled signal.
+  // With the PushSincResampler, we produce the signal block-by-10ms-block
+  // rather than in a single pass, to exercise how it will be used in WebRTC.
+  resampler_source.Run(input_samples, source.get());
+  if (int_format) {
+    for (size_t i = 0; i < kNumBlocks; ++i) {
+      FloatToS16(&source[i * input_block_size], input_block_size,
+               source_int.get());
+      EXPECT_EQ(output_block_size,
+                resampler.Resample(source_int.get(),
+                                   input_block_size,
+                                   destination_int.get(),
+                                   output_block_size));
+      S16ToFloat(destination_int.get(), output_block_size,
+               &resampled_destination[i * output_block_size]);
+    }
+  } else {
+    for (size_t i = 0; i < kNumBlocks; ++i) {
+      EXPECT_EQ(
+          output_block_size,
+          resampler.Resample(&source[i * input_block_size],
+                             input_block_size,
+                             &resampled_destination[i * output_block_size],
+                             output_block_size));
+    }
+  }
+
+  // Generate pure signal.
+  SinusoidalLinearChirpSource pure_source(
+      output_rate_, output_samples, input_nyquist_freq, output_delay_samples);
+  pure_source.Run(output_samples, pure_destination.get());
+
+  // Range of the Nyquist frequency (0.5 * min(input rate, output_rate)) which
+  // we refer to as low and high.
+  static const double kLowFrequencyNyquistRange = 0.7;
+  static const double kHighFrequencyNyquistRange = 0.9;
+
+  // Calculate Root-Mean-Square-Error and maximum error for the resampling.
+  double sum_of_squares = 0;
+  double low_freq_max_error = 0;
+  double high_freq_max_error = 0;
+  int minimum_rate = std::min(input_rate_, output_rate_);
+  double low_frequency_range = kLowFrequencyNyquistRange * 0.5 * minimum_rate;
+  double high_frequency_range = kHighFrequencyNyquistRange * 0.5 * minimum_rate;
+
+  for (size_t i = 0; i < output_samples; ++i) {
+    double error = fabs(resampled_destination[i] - pure_destination[i]);
+
+    if (pure_source.Frequency(i) < low_frequency_range) {
+      if (error > low_freq_max_error)
+        low_freq_max_error = error;
+    } else if (pure_source.Frequency(i) < high_frequency_range) {
+      if (error > high_freq_max_error)
+        high_freq_max_error = error;
+    }
+    // TODO(dalecurtis): Sanity check frequencies > kHighFrequencyNyquistRange.
+
+    sum_of_squares += error * error;
+  }
+
+  double rms_error = sqrt(sum_of_squares / output_samples);
+
+  rms_error = DBFS(rms_error);
+  // In order to keep the thresholds in this test identical to SincResamplerTest
+  // we must account for the quantization error introduced by truncating from
+  // float to int. This happens twice (once at input and once at output) and we
+  // allow for the maximum possible error (1 / 32767) for each step.
+  //
+  // The quantization error is insignificant in the RMS calculation so does not
+  // need to be accounted for there.
+  low_freq_max_error = DBFS(low_freq_max_error - 2.0 / 32767);
+  high_freq_max_error = DBFS(high_freq_max_error - 2.0 / 32767);
+
+  EXPECT_LE(rms_error, rms_error_);
+  EXPECT_LE(low_freq_max_error, low_freq_error_);
+
+  // All conversions currently have a high frequency error around -6 dbFS.
+  static const double kHighFrequencyMaxError = -6.02;
+  EXPECT_LE(high_freq_max_error, kHighFrequencyMaxError);
+}
+
+TEST_P(PushSincResamplerTest, ResampleInt) { ResampleTest(true); }
+
+TEST_P(PushSincResamplerTest, ResampleFloat) { ResampleTest(false); }
+
+// Thresholds chosen arbitrarily based on what each resampling reported during
+// testing.  All thresholds are in dbFS, http://en.wikipedia.org/wiki/DBFS.
+INSTANTIATE_TEST_CASE_P(
+    PushSincResamplerTest,
+    PushSincResamplerTest,
+    ::testing::Values(
+        // First run through the rates tested in SincResamplerTest. The
+        // thresholds are identical.
+        //
+        // We don't test rates which fail to provide an integer number of
+        // samples in a 10 ms block (22050 and 11025 Hz). WebRTC doesn't support
+        // these rates in any case (for the same reason).
+
+        // To 44.1kHz
+        ::testing::make_tuple(8000, 44100, kResamplingRMSError, -62.73),
+        ::testing::make_tuple(16000, 44100, kResamplingRMSError, -62.54),
+        ::testing::make_tuple(32000, 44100, kResamplingRMSError, -63.32),
+        ::testing::make_tuple(44100, 44100, kResamplingRMSError, -73.53),
+        ::testing::make_tuple(48000, 44100, -15.01, -64.04),
+        ::testing::make_tuple(96000, 44100, -18.49, -25.51),
+        ::testing::make_tuple(192000, 44100, -20.50, -13.31),
+
+        // To 48kHz
+        ::testing::make_tuple(8000, 48000, kResamplingRMSError, -63.43),
+        ::testing::make_tuple(16000, 48000, kResamplingRMSError, -63.96),
+        ::testing::make_tuple(32000, 48000, kResamplingRMSError, -64.04),
+        ::testing::make_tuple(44100, 48000, kResamplingRMSError, -62.63),
+        ::testing::make_tuple(48000, 48000, kResamplingRMSError, -73.52),
+        ::testing::make_tuple(96000, 48000, -18.40, -28.44),
+        ::testing::make_tuple(192000, 48000, -20.43, -14.11),
+
+        // To 96kHz
+        ::testing::make_tuple(8000, 96000, kResamplingRMSError, -63.19),
+        ::testing::make_tuple(16000, 96000, kResamplingRMSError, -63.39),
+        ::testing::make_tuple(32000, 96000, kResamplingRMSError, -63.95),
+        ::testing::make_tuple(44100, 96000, kResamplingRMSError, -62.63),
+        ::testing::make_tuple(48000, 96000, kResamplingRMSError, -73.52),
+        ::testing::make_tuple(96000, 96000, kResamplingRMSError, -73.52),
+        ::testing::make_tuple(192000, 96000, kResamplingRMSError, -28.41),
+
+        // To 192kHz
+        ::testing::make_tuple(8000, 192000, kResamplingRMSError, -63.10),
+        ::testing::make_tuple(16000, 192000, kResamplingRMSError, -63.14),
+        ::testing::make_tuple(32000, 192000, kResamplingRMSError, -63.38),
+        ::testing::make_tuple(44100, 192000, kResamplingRMSError, -62.63),
+        ::testing::make_tuple(48000, 192000, kResamplingRMSError, -73.44),
+        ::testing::make_tuple(96000, 192000, kResamplingRMSError, -73.52),
+        ::testing::make_tuple(192000, 192000, kResamplingRMSError, -73.52),
+
+        // Next run through some additional cases interesting for WebRTC.
+        // We skip some extreme downsampled cases (192 -> {8, 16}, 96 -> 8)
+        // because they violate |kHighFrequencyMaxError|, which is not
+        // unexpected. It's very unlikely that we'll see these conversions in
+        // practice anyway.
+
+        // To 8 kHz
+        ::testing::make_tuple(8000, 8000, kResamplingRMSError, -75.50),
+        ::testing::make_tuple(16000, 8000, -18.56, -28.79),
+        ::testing::make_tuple(32000, 8000, -20.36, -14.13),
+        ::testing::make_tuple(44100, 8000, -21.00, -11.39),
+        ::testing::make_tuple(48000, 8000, -20.96, -11.04),
+
+        // To 16 kHz
+        ::testing::make_tuple(8000, 16000, kResamplingRMSError, -70.30),
+        ::testing::make_tuple(16000, 16000, kResamplingRMSError, -75.51),
+        ::testing::make_tuple(32000, 16000, -18.48, -28.59),
+        ::testing::make_tuple(44100, 16000, -19.30, -19.67),
+        ::testing::make_tuple(48000, 16000, -19.81, -18.11),
+        ::testing::make_tuple(96000, 16000, -20.95, -10.96),
+
+        // To 32 kHz
+        ::testing::make_tuple(8000, 32000, kResamplingRMSError, -70.30),
+        ::testing::make_tuple(16000, 32000, kResamplingRMSError, -75.51),
+        ::testing::make_tuple(32000, 32000, kResamplingRMSError, -75.51),
+        ::testing::make_tuple(44100, 32000, -16.44, -51.10),
+        ::testing::make_tuple(48000, 32000, -16.90, -44.03),
+        ::testing::make_tuple(96000, 32000, -19.61, -18.04),
+        ::testing::make_tuple(192000, 32000, -21.02, -10.94)));
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/resampler.cc b/common_audio/resampler/resampler.cc
new file mode 100644
index 0000000..ea85d82
--- /dev/null
+++ b/common_audio/resampler/resampler.cc
@@ -0,0 +1,921 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * A wrapper for resampling a numerous amount of sampling combinations.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "common_audio/resampler/include/resampler.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+Resampler::Resampler()
+    : state1_(nullptr),
+      state2_(nullptr),
+      state3_(nullptr),
+      in_buffer_(nullptr),
+      out_buffer_(nullptr),
+      in_buffer_size_(0),
+      out_buffer_size_(0),
+      in_buffer_size_max_(0),
+      out_buffer_size_max_(0),
+      my_in_frequency_khz_(0),
+      my_out_frequency_khz_(0),
+      my_mode_(kResamplerMode1To1),
+      num_channels_(0),
+      slave_left_(nullptr),
+      slave_right_(nullptr) {
+}
+
+Resampler::Resampler(int inFreq, int outFreq, size_t num_channels)
+    : Resampler() {
+  Reset(inFreq, outFreq, num_channels);
+}
+
+Resampler::~Resampler() {
+  if (state1_) {
+    free(state1_);
+  }
+  if (state2_) {
+    free(state2_);
+  }
+  if (state3_) {
+    free(state3_);
+  }
+  if (in_buffer_) {
+    free(in_buffer_);
+  }
+  if (out_buffer_) {
+    free(out_buffer_);
+  }
+  if (slave_left_) {
+    delete slave_left_;
+  }
+  if (slave_right_) {
+    delete slave_right_;
+  }
+}
+
+int Resampler::ResetIfNeeded(int inFreq, int outFreq, size_t num_channels) {
+  int tmpInFreq_kHz = inFreq / 1000;
+  int tmpOutFreq_kHz = outFreq / 1000;
+
+  if ((tmpInFreq_kHz != my_in_frequency_khz_)
+      || (tmpOutFreq_kHz != my_out_frequency_khz_)
+      || (num_channels != num_channels_)) {
+    return Reset(inFreq, outFreq, num_channels);
+  } else {
+    return 0;
+  }
+}
+
+int Resampler::Reset(int inFreq, int outFreq, size_t num_channels) {
+  if (num_channels != 1 && num_channels != 2) {
+    RTC_LOG(LS_WARNING)
+        << "Reset() called with unsupported channel count, num_channels = "
+        << num_channels;
+    return -1;
+  }
+  ResamplerMode mode;
+  if (ComputeResamplerMode(inFreq, outFreq, &mode) != 0) {
+    RTC_LOG(LS_WARNING)
+        << "Reset() called with unsupported sample rates, inFreq = " << inFreq
+        << ", outFreq = " << outFreq;
+    return -1;
+  }
+  // Reinitialize internal state for the frequencies and sample rates.
+  num_channels_ = num_channels;
+  my_mode_ = mode;
+
+  if (state1_) {
+    free(state1_);
+    state1_ = nullptr;
+  }
+  if (state2_) {
+    free(state2_);
+    state2_ = nullptr;
+  }
+  if (state3_) {
+    free(state3_);
+    state3_ = nullptr;
+  }
+  if (in_buffer_) {
+    free(in_buffer_);
+    in_buffer_ = nullptr;
+  }
+  if (out_buffer_) {
+    free(out_buffer_);
+    out_buffer_ = nullptr;
+  }
+  if (slave_left_) {
+    delete slave_left_;
+    slave_left_ = nullptr;
+  }
+  if (slave_right_) {
+    delete slave_right_;
+    slave_right_ = nullptr;
+  }
+
+  in_buffer_size_ = 0;
+  out_buffer_size_ = 0;
+  in_buffer_size_max_ = 0;
+  out_buffer_size_max_ = 0;
+
+  // We need to track what domain we're in.
+  my_in_frequency_khz_ = inFreq / 1000;
+  my_out_frequency_khz_ = outFreq / 1000;
+
+  if (num_channels_ == 2) {
+    // Create two mono resamplers.
+    slave_left_ = new Resampler(inFreq, outFreq, 1);
+    slave_right_ = new Resampler(inFreq, outFreq, 1);
+  }
+
+  // Now create the states we need.
+  switch (my_mode_) {
+    case kResamplerMode1To1:
+      // No state needed;
+      break;
+    case kResamplerMode1To2:
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode1To3:
+      state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
+      WebRtcSpl_ResetResample16khzTo48khz(
+          static_cast<WebRtcSpl_State16khzTo48khz*>(state1_));
+      break;
+    case kResamplerMode1To4:
+      // 1:2
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      // 2:4
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode1To6:
+      // 1:2
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      // 2:6
+      state2_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
+      WebRtcSpl_ResetResample16khzTo48khz(
+          static_cast<WebRtcSpl_State16khzTo48khz*>(state2_));
+      break;
+    case kResamplerMode1To12:
+      // 1:2
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      // 2:4
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+      // 4:12
+      state3_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
+      WebRtcSpl_ResetResample16khzTo48khz(
+          static_cast<WebRtcSpl_State16khzTo48khz*>(state3_));
+      break;
+    case kResamplerMode2To3:
+      // 2:6
+      state1_ = malloc(sizeof(WebRtcSpl_State16khzTo48khz));
+      WebRtcSpl_ResetResample16khzTo48khz(
+        static_cast<WebRtcSpl_State16khzTo48khz*>(state1_));
+      // 6:3
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode2To11:
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+
+      state2_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
+      WebRtcSpl_ResetResample8khzTo22khz(
+          static_cast<WebRtcSpl_State8khzTo22khz*>(state2_));
+      break;
+    case kResamplerMode4To11:
+      state1_ = malloc(sizeof(WebRtcSpl_State8khzTo22khz));
+      WebRtcSpl_ResetResample8khzTo22khz(
+          static_cast<WebRtcSpl_State8khzTo22khz*>(state1_));
+      break;
+    case kResamplerMode8To11:
+      state1_ = malloc(sizeof(WebRtcSpl_State16khzTo22khz));
+      WebRtcSpl_ResetResample16khzTo22khz(
+          static_cast<WebRtcSpl_State16khzTo22khz*>(state1_));
+      break;
+    case kResamplerMode11To16:
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+
+      state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
+      WebRtcSpl_ResetResample22khzTo16khz(
+          static_cast<WebRtcSpl_State22khzTo16khz*>(state2_));
+      break;
+    case kResamplerMode11To32:
+      // 11 -> 22
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+
+      // 22 -> 16
+      state2_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
+      WebRtcSpl_ResetResample22khzTo16khz(
+          static_cast<WebRtcSpl_State22khzTo16khz*>(state2_));
+
+      // 16 -> 32
+      state3_ = malloc(8 * sizeof(int32_t));
+      memset(state3_, 0, 8 * sizeof(int32_t));
+
+      break;
+    case kResamplerMode2To1:
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode3To1:
+      state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
+      WebRtcSpl_ResetResample48khzTo16khz(
+          static_cast<WebRtcSpl_State48khzTo16khz*>(state1_));
+      break;
+    case kResamplerMode4To1:
+      // 4:2
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      // 2:1
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode6To1:
+      // 6:2
+      state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
+      WebRtcSpl_ResetResample48khzTo16khz(
+          static_cast<WebRtcSpl_State48khzTo16khz*>(state1_));
+      // 2:1
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode12To1:
+      // 12:4
+      state1_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
+      WebRtcSpl_ResetResample48khzTo16khz(
+          static_cast<WebRtcSpl_State48khzTo16khz*>(state1_));
+      // 4:2
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+      // 2:1
+      state3_ = malloc(8 * sizeof(int32_t));
+      memset(state3_, 0, 8 * sizeof(int32_t));
+      break;
+    case kResamplerMode3To2:
+      // 3:6
+      state1_ = malloc(8 * sizeof(int32_t));
+      memset(state1_, 0, 8 * sizeof(int32_t));
+      // 6:2
+      state2_ = malloc(sizeof(WebRtcSpl_State48khzTo16khz));
+      WebRtcSpl_ResetResample48khzTo16khz(
+          static_cast<WebRtcSpl_State48khzTo16khz*>(state2_));
+      break;
+    case kResamplerMode11To2:
+      state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
+      WebRtcSpl_ResetResample22khzTo8khz(
+          static_cast<WebRtcSpl_State22khzTo8khz*>(state1_));
+
+      state2_ = malloc(8 * sizeof(int32_t));
+      memset(state2_, 0, 8 * sizeof(int32_t));
+
+      break;
+    case kResamplerMode11To4:
+      state1_ = malloc(sizeof(WebRtcSpl_State22khzTo8khz));
+      WebRtcSpl_ResetResample22khzTo8khz(
+          static_cast<WebRtcSpl_State22khzTo8khz*>(state1_));
+      break;
+    case kResamplerMode11To8:
+      state1_ = malloc(sizeof(WebRtcSpl_State22khzTo16khz));
+      WebRtcSpl_ResetResample22khzTo16khz(
+          static_cast<WebRtcSpl_State22khzTo16khz*>(state1_));
+      break;
+  }
+
+  return 0;
+}
+
+int Resampler::ComputeResamplerMode(int in_freq_hz,
+                                    int out_freq_hz,
+                                    ResamplerMode* mode) {
+  // Start with a math exercise, Euclid's algorithm to find the gcd:
+  int a = in_freq_hz;
+  int b = out_freq_hz;
+  int c = a % b;
+  while (c != 0) {
+    a = b;
+    b = c;
+    c = a % b;
+  }
+  // b is now the gcd;
+
+  // Scale with GCD
+  const int reduced_in_freq = in_freq_hz / b;
+  const int reduced_out_freq = out_freq_hz / b;
+
+  if (reduced_in_freq == reduced_out_freq) {
+    *mode = kResamplerMode1To1;
+  } else if (reduced_in_freq == 1) {
+    switch (reduced_out_freq) {
+      case 2:
+        *mode = kResamplerMode1To2;
+        break;
+      case 3:
+        *mode = kResamplerMode1To3;
+        break;
+      case 4:
+        *mode = kResamplerMode1To4;
+        break;
+      case 6:
+        *mode = kResamplerMode1To6;
+        break;
+      case 12:
+        *mode = kResamplerMode1To12;
+        break;
+      default:
+        return -1;
+    }
+  } else if (reduced_out_freq == 1) {
+    switch (reduced_in_freq) {
+      case 2:
+        *mode = kResamplerMode2To1;
+        break;
+      case 3:
+        *mode = kResamplerMode3To1;
+        break;
+      case 4:
+        *mode = kResamplerMode4To1;
+        break;
+      case 6:
+        *mode = kResamplerMode6To1;
+        break;
+      case 12:
+        *mode = kResamplerMode12To1;
+        break;
+      default:
+        return -1;
+    }
+  } else if ((reduced_in_freq == 2) && (reduced_out_freq == 3)) {
+    *mode = kResamplerMode2To3;
+  } else if ((reduced_in_freq == 2) && (reduced_out_freq == 11)) {
+    *mode = kResamplerMode2To11;
+  } else if ((reduced_in_freq == 4) && (reduced_out_freq == 11)) {
+    *mode = kResamplerMode4To11;
+  } else if ((reduced_in_freq == 8) && (reduced_out_freq == 11)) {
+    *mode = kResamplerMode8To11;
+  } else if ((reduced_in_freq == 3) && (reduced_out_freq == 2)) {
+    *mode = kResamplerMode3To2;
+  } else if ((reduced_in_freq == 11) && (reduced_out_freq == 2)) {
+    *mode = kResamplerMode11To2;
+  } else if ((reduced_in_freq == 11) && (reduced_out_freq == 4)) {
+    *mode = kResamplerMode11To4;
+  } else if ((reduced_in_freq == 11) && (reduced_out_freq == 16)) {
+    *mode = kResamplerMode11To16;
+  } else if ((reduced_in_freq == 11) && (reduced_out_freq == 32)) {
+    *mode = kResamplerMode11To32;
+  } else if ((reduced_in_freq == 11) && (reduced_out_freq == 8)) {
+    *mode = kResamplerMode11To8;
+  } else {
+    return -1;
+  }
+  return 0;
+}
+
+// Synchronous resampling, all output samples are written to samplesOut
+int Resampler::Push(const int16_t * samplesIn, size_t lengthIn,
+                    int16_t* samplesOut, size_t maxLen, size_t& outLen) {
+  if (num_channels_ == 2) {
+    // Split up the signal and call the slave object for each channel
+    int16_t* left =
+        static_cast<int16_t*>(malloc(lengthIn * sizeof(int16_t) / 2));
+    int16_t* right =
+        static_cast<int16_t*>(malloc(lengthIn * sizeof(int16_t) / 2));
+    int16_t* out_left =
+        static_cast<int16_t*>(malloc(maxLen / 2 * sizeof(int16_t)));
+    int16_t* out_right =
+        static_cast<int16_t*>(malloc(maxLen / 2 * sizeof(int16_t)));
+    int res = 0;
+    for (size_t i = 0; i < lengthIn; i += 2) {
+      left[i >> 1] = samplesIn[i];
+      right[i >> 1] = samplesIn[i + 1];
+    }
+
+    // It's OK to overwrite the local parameter, since it's just a copy
+    lengthIn = lengthIn / 2;
+
+    size_t actualOutLen_left = 0;
+    size_t actualOutLen_right = 0;
+    // Do resampling for right channel
+    res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2,
+                             actualOutLen_left);
+    res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2,
+                              actualOutLen_right);
+    if (res || (actualOutLen_left != actualOutLen_right)) {
+      free(left);
+      free(right);
+      free(out_left);
+      free(out_right);
+      return -1;
+    }
+
+    // Reassemble the signal
+    for (size_t i = 0; i < actualOutLen_left; i++) {
+      samplesOut[i * 2] = out_left[i];
+      samplesOut[i * 2 + 1] = out_right[i];
+    }
+    outLen = 2 * actualOutLen_left;
+
+    free(left);
+    free(right);
+    free(out_left);
+    free(out_right);
+
+    return 0;
+  }
+
+  // Containers for temp samples
+  int16_t* tmp;
+  int16_t* tmp_2;
+  // tmp data for resampling routines
+  int32_t* tmp_mem;
+
+  switch (my_mode_) {
+    case kResamplerMode1To1:
+      memcpy(samplesOut, samplesIn, lengthIn * sizeof(int16_t));
+      outLen = lengthIn;
+      break;
+    case kResamplerMode1To2:
+      if (maxLen < (lengthIn * 2)) {
+        return -1;
+      }
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
+                            static_cast<int32_t*>(state1_));
+      outLen = lengthIn * 2;
+      return 0;
+    case kResamplerMode1To3:
+
+      // We can only handle blocks of 160 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 160) != 0) {
+        return -1;
+      }
+      if (maxLen < (lengthIn * 3)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(336 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 160) {
+        WebRtcSpl_Resample16khzTo48khz(
+            samplesIn + i, samplesOut + i * 3,
+            static_cast<WebRtcSpl_State16khzTo48khz*>(state1_), tmp_mem);
+      }
+      outLen = lengthIn * 3;
+      free(tmp_mem);
+      return 0;
+    case kResamplerMode1To4:
+      if (maxLen < (lengthIn * 4)) {
+        return -1;
+      }
+
+      tmp = static_cast<int16_t*>(malloc(sizeof(int16_t) * 2 * lengthIn));
+      // 1:2
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp,
+                            static_cast<int32_t*>(state1_));
+      // 2:4
+      WebRtcSpl_UpsampleBy2(tmp, lengthIn * 2, samplesOut,
+                            static_cast<int32_t*>(state2_));
+      outLen = lengthIn * 4;
+      free(tmp);
+      return 0;
+    case kResamplerMode1To6:
+      // We can only handle blocks of 80 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 80) != 0) {
+        return -1;
+      }
+      if (maxLen < (lengthIn * 6)) {
+        return -1;
+      }
+
+      // 1:2
+
+      tmp_mem = static_cast<int32_t*>(malloc(336 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(malloc(sizeof(int16_t) * 2 * lengthIn));
+
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp,
+                            static_cast<int32_t*>(state1_));
+      outLen = lengthIn * 2;
+
+      for (size_t i = 0; i < outLen; i += 160) {
+        WebRtcSpl_Resample16khzTo48khz(
+            tmp + i, samplesOut + i * 3,
+            static_cast<WebRtcSpl_State16khzTo48khz*>(state2_), tmp_mem);
+      }
+      outLen = outLen * 3;
+      free(tmp_mem);
+      free(tmp);
+
+      return 0;
+    case kResamplerMode1To12:
+      // We can only handle blocks of 40 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 40) != 0) {
+        return -1;
+      }
+      if (maxLen < (lengthIn * 12)) {
+        return -1;
+      }
+
+      tmp_mem = static_cast<int32_t*>(malloc(336 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(malloc(sizeof(int16_t) * 4 * lengthIn));
+      // 1:2
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
+                            static_cast<int32_t*>(state1_));
+      outLen = lengthIn * 2;
+      // 2:4
+      WebRtcSpl_UpsampleBy2(samplesOut, outLen, tmp,
+                            static_cast<int32_t*>(state2_));
+      outLen = outLen * 2;
+      // 4:12
+      for (size_t i = 0; i < outLen; i += 160) {
+        // WebRtcSpl_Resample16khzTo48khz() takes a block of 160 samples
+        // as input and outputs a resampled block of 480 samples. The
+        // data is now actually in 32 kHz sampling rate, despite the
+        // function name, and with a resampling factor of three becomes
+        // 96 kHz.
+        WebRtcSpl_Resample16khzTo48khz(
+            tmp + i, samplesOut + i * 3,
+            static_cast<WebRtcSpl_State16khzTo48khz*>(state3_), tmp_mem);
+      }
+      outLen = outLen * 3;
+      free(tmp_mem);
+      free(tmp);
+
+      return 0;
+    case kResamplerMode2To3:
+      if (maxLen < (lengthIn * 3 / 2)) {
+        return -1;
+      }
+      // 2:6
+      // We can only handle blocks of 160 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 160) != 0) {
+        return -1;
+      }
+      tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 3));
+      tmp_mem = static_cast<int32_t*>(malloc(336 * sizeof(int32_t)));
+      for (size_t i = 0; i < lengthIn; i += 160) {
+        WebRtcSpl_Resample16khzTo48khz(
+            samplesIn + i, tmp + i * 3,
+            static_cast<WebRtcSpl_State16khzTo48khz*>(state1_), tmp_mem);
+      }
+      lengthIn = lengthIn * 3;
+      // 6:3
+      WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut,
+                              static_cast<int32_t*>(state2_));
+      outLen = lengthIn / 2;
+      free(tmp);
+      free(tmp_mem);
+      return 0;
+    case kResamplerMode2To11:
+
+      // We can only handle blocks of 80 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 80) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 11) / 2)) {
+        return -1;
+      }
+      tmp = static_cast<int16_t*>(malloc(sizeof(int16_t) * 2 * lengthIn));
+      // 1:2
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp,
+                            static_cast<int32_t*>(state1_));
+      lengthIn *= 2;
+
+      tmp_mem = static_cast<int32_t*>(malloc(98 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 80) {
+        WebRtcSpl_Resample8khzTo22khz(
+            tmp + i, samplesOut + (i * 11) / 4,
+            static_cast<WebRtcSpl_State8khzTo22khz*>(state2_), tmp_mem);
+      }
+      outLen = (lengthIn * 11) / 4;
+      free(tmp_mem);
+      free(tmp);
+      return 0;
+    case kResamplerMode4To11:
+
+      // We can only handle blocks of 80 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 80) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 11) / 4)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(98 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 80) {
+        WebRtcSpl_Resample8khzTo22khz(
+            samplesIn + i, samplesOut + (i * 11) / 4,
+            static_cast<WebRtcSpl_State8khzTo22khz*>(state1_), tmp_mem);
+      }
+      outLen = (lengthIn * 11) / 4;
+      free(tmp_mem);
+      return 0;
+    case kResamplerMode8To11:
+      // We can only handle blocks of 160 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 160) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 11) / 8)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(88 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 160) {
+        WebRtcSpl_Resample16khzTo22khz(
+            samplesIn + i, samplesOut + (i * 11) / 8,
+            static_cast<WebRtcSpl_State16khzTo22khz*>(state1_), tmp_mem);
+      }
+      outLen = (lengthIn * 11) / 8;
+      free(tmp_mem);
+      return 0;
+
+    case kResamplerMode11To16:
+      // We can only handle blocks of 110 samples
+      if ((lengthIn % 110) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 16) / 11)) {
+        return -1;
+      }
+
+      tmp_mem = static_cast<int32_t*>(malloc(104 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(malloc((sizeof(int16_t) * lengthIn * 2)));
+
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp,
+                            static_cast<int32_t*>(state1_));
+
+      for (size_t i = 0; i < (lengthIn * 2); i += 220) {
+        WebRtcSpl_Resample22khzTo16khz(
+            tmp + i, samplesOut + (i / 220) * 160,
+            static_cast<WebRtcSpl_State22khzTo16khz*>(state2_), tmp_mem);
+      }
+
+      outLen = (lengthIn * 16) / 11;
+
+      free(tmp_mem);
+      free(tmp);
+      return 0;
+
+    case kResamplerMode11To32:
+
+      // We can only handle blocks of 110 samples
+      if ((lengthIn % 110) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 32) / 11)) {
+        return -1;
+      }
+
+      tmp_mem = static_cast<int32_t*>(malloc(104 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(malloc((sizeof(int16_t) * lengthIn * 2)));
+
+      // 11 -> 22 kHz in samplesOut
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, samplesOut,
+                            static_cast<int32_t*>(state1_));
+
+      // 22 -> 16 in tmp
+      for (size_t i = 0; i < (lengthIn * 2); i += 220) {
+        WebRtcSpl_Resample22khzTo16khz(
+            samplesOut + i, tmp + (i / 220) * 160,
+            static_cast<WebRtcSpl_State22khzTo16khz*>(state2_), tmp_mem);
+      }
+
+      // 16 -> 32 in samplesOut
+      WebRtcSpl_UpsampleBy2(tmp, (lengthIn * 16) / 11, samplesOut,
+                            static_cast<int32_t*>(state3_));
+
+      outLen = (lengthIn * 32) / 11;
+
+      free(tmp_mem);
+      free(tmp);
+      return 0;
+
+    case kResamplerMode2To1:
+      if (maxLen < (lengthIn / 2)) {
+        return -1;
+      }
+      WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, samplesOut,
+                              static_cast<int32_t*>(state1_));
+      outLen = lengthIn / 2;
+      return 0;
+    case kResamplerMode3To1:
+      // We can only handle blocks of 480 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 480) != 0) {
+        return -1;
+      }
+      if (maxLen < (lengthIn / 3)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(496 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 480) {
+        WebRtcSpl_Resample48khzTo16khz(
+            samplesIn + i, samplesOut + i / 3,
+            static_cast<WebRtcSpl_State48khzTo16khz*>(state1_), tmp_mem);
+      }
+      outLen = lengthIn / 3;
+      free(tmp_mem);
+      return 0;
+    case kResamplerMode4To1:
+      if (maxLen < (lengthIn / 4)) {
+        return -1;
+      }
+      tmp = static_cast<int16_t*>(malloc(sizeof(int16_t) * lengthIn / 2));
+      // 4:2
+      WebRtcSpl_DownsampleBy2(samplesIn, lengthIn, tmp,
+                              static_cast<int32_t*>(state1_));
+      // 2:1
+      WebRtcSpl_DownsampleBy2(tmp, lengthIn / 2, samplesOut,
+                              static_cast<int32_t*>(state2_));
+      outLen = lengthIn / 4;
+      free(tmp);
+      return 0;
+
+    case kResamplerMode6To1:
+      // We can only handle blocks of 480 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 480) != 0) {
+        return -1;
+      }
+      if (maxLen < (lengthIn / 6)) {
+        return -1;
+      }
+
+      tmp_mem = static_cast<int32_t*>(malloc(496 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(malloc((sizeof(int16_t) * lengthIn) / 3));
+
+      for (size_t i = 0; i < lengthIn; i += 480) {
+        WebRtcSpl_Resample48khzTo16khz(
+            samplesIn + i, tmp + i / 3,
+            static_cast<WebRtcSpl_State48khzTo16khz*>(state1_), tmp_mem);
+      }
+      outLen = lengthIn / 3;
+      free(tmp_mem);
+      WebRtcSpl_DownsampleBy2(tmp, outLen, samplesOut,
+                              static_cast<int32_t*>(state2_));
+      free(tmp);
+      outLen = outLen / 2;
+      return 0;
+    case kResamplerMode12To1:
+      // We can only handle blocks of 480 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 480) != 0) {
+        return -1;
+      }
+      if (maxLen < (lengthIn / 12)) {
+        return -1;
+      }
+
+      tmp_mem = static_cast<int32_t*>(malloc(496 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(malloc((sizeof(int16_t) * lengthIn) / 3));
+      tmp_2 = static_cast<int16_t*>(malloc((sizeof(int16_t) * lengthIn) / 6));
+      // 12:4
+      for (size_t i = 0; i < lengthIn; i += 480) {
+        // WebRtcSpl_Resample48khzTo16khz() takes a block of 480 samples
+        // as input and outputs a resampled block of 160 samples. The
+        // data is now actually in 96 kHz sampling rate, despite the
+        // function name, and with a resampling factor of 1/3 becomes
+        // 32 kHz.
+        WebRtcSpl_Resample48khzTo16khz(
+            samplesIn + i, tmp + i / 3,
+            static_cast<WebRtcSpl_State48khzTo16khz*>(state1_), tmp_mem);
+      }
+      outLen = lengthIn / 3;
+      free(tmp_mem);
+      // 4:2
+      WebRtcSpl_DownsampleBy2(tmp, outLen, tmp_2,
+                              static_cast<int32_t*>(state2_));
+      outLen = outLen / 2;
+      free(tmp);
+      // 2:1
+      WebRtcSpl_DownsampleBy2(tmp_2, outLen, samplesOut,
+                              static_cast<int32_t*>(state3_));
+      free(tmp_2);
+      outLen = outLen / 2;
+      return 0;
+    case kResamplerMode3To2:
+      if (maxLen < (lengthIn * 2 / 3)) {
+        return -1;
+      }
+      // 3:6
+      tmp = static_cast<int16_t*> (malloc(sizeof(int16_t) * lengthIn * 2));
+      WebRtcSpl_UpsampleBy2(samplesIn, lengthIn, tmp,
+                            static_cast<int32_t*>(state1_));
+      lengthIn *= 2;
+      // 6:2
+      // We can only handle blocks of 480 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 480) != 0) {
+        free(tmp);
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(496 * sizeof(int32_t)));
+      for (size_t i = 0; i < lengthIn; i += 480) {
+        WebRtcSpl_Resample48khzTo16khz(
+            tmp + i, samplesOut + i / 3,
+            static_cast<WebRtcSpl_State48khzTo16khz*>(state2_), tmp_mem);
+      }
+      outLen = lengthIn / 3;
+      free(tmp);
+      free(tmp_mem);
+      return 0;
+    case kResamplerMode11To2:
+      // We can only handle blocks of 220 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 220) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 2) / 11)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(126 * sizeof(int32_t)));
+      tmp = static_cast<int16_t*>(
+          malloc((lengthIn * 4) / 11 * sizeof(int16_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 220) {
+        WebRtcSpl_Resample22khzTo8khz(
+            samplesIn + i, tmp + (i * 4) / 11,
+            static_cast<WebRtcSpl_State22khzTo8khz*>(state1_), tmp_mem);
+      }
+      lengthIn = (lengthIn * 4) / 11;
+
+      WebRtcSpl_DownsampleBy2(tmp, lengthIn, samplesOut,
+                              static_cast<int32_t*>(state2_));
+      outLen = lengthIn / 2;
+
+      free(tmp_mem);
+      free(tmp);
+      return 0;
+    case kResamplerMode11To4:
+      // We can only handle blocks of 220 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 220) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 4) / 11)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(126 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 220) {
+        WebRtcSpl_Resample22khzTo8khz(
+            samplesIn + i, samplesOut + (i * 4) / 11,
+            static_cast<WebRtcSpl_State22khzTo8khz*>(state1_), tmp_mem);
+      }
+      outLen = (lengthIn * 4) / 11;
+      free(tmp_mem);
+      return 0;
+    case kResamplerMode11To8:
+      // We can only handle blocks of 160 samples
+      // Can be fixed, but I don't think it's needed
+      if ((lengthIn % 220) != 0) {
+        return -1;
+      }
+      if (maxLen < ((lengthIn * 8) / 11)) {
+        return -1;
+      }
+      tmp_mem = static_cast<int32_t*>(malloc(104 * sizeof(int32_t)));
+
+      for (size_t i = 0; i < lengthIn; i += 220) {
+        WebRtcSpl_Resample22khzTo16khz(
+            samplesIn + i, samplesOut + (i * 8) / 11,
+            static_cast<WebRtcSpl_State22khzTo16khz*>(state1_), tmp_mem);
+      }
+      outLen = (lengthIn * 8) / 11;
+      free(tmp_mem);
+      return 0;
+      break;
+  }
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/resampler_unittest.cc b/common_audio/resampler/resampler_unittest.cc
new file mode 100644
index 0000000..0300719
--- /dev/null
+++ b/common_audio/resampler/resampler_unittest.cc
@@ -0,0 +1,175 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+
+#include "common_audio/resampler/include/resampler.h"
+#include "test/gtest.h"
+
+// TODO(andrew): this is a work-in-progress. Many more tests are needed.
+
+namespace webrtc {
+namespace {
+
+const int kNumChannels[] = {1, 2};
+const size_t kNumChannelsSize = sizeof(kNumChannels) / sizeof(*kNumChannels);
+
+// Rates we must support.
+const int kMaxRate = 96000;
+const int kRates[] = {
+  8000,
+  16000,
+  32000,
+  44000,
+  48000,
+  kMaxRate
+};
+const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
+const int kMaxChannels = 2;
+const size_t kDataSize = static_cast<size_t> (kMaxChannels * kMaxRate / 100);
+
+// TODO(andrew): should we be supporting these combinations?
+bool ValidRates(int in_rate, int out_rate) {
+  // Not the most compact notation, for clarity.
+  if ((in_rate == 44000 && (out_rate == 48000 || out_rate == 96000)) ||
+      (out_rate == 44000 && (in_rate == 48000 || in_rate == 96000))) {
+    return false;
+  }
+
+  return true;
+}
+
+class ResamplerTest : public testing::Test {
+ protected:
+  ResamplerTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  void ResetIfNeededAndPush(int in_rate, int out_rate, int num_channels);
+
+  Resampler rs_;
+  int16_t data_in_[kDataSize];
+  int16_t data_out_[kDataSize];
+};
+
+ResamplerTest::ResamplerTest() {}
+
+void ResamplerTest::SetUp() {
+  // Initialize input data with anything. The tests are content independent.
+  memset(data_in_, 1, sizeof(data_in_));
+}
+
+void ResamplerTest::TearDown() {}
+
+void ResamplerTest::ResetIfNeededAndPush(int in_rate,
+                                         int out_rate,
+                                         int num_channels) {
+  std::ostringstream ss;
+  ss << "Input rate: " << in_rate << ", output rate: " << out_rate
+     << ", channel count: " << num_channels;
+  SCOPED_TRACE(ss.str());
+
+  if (ValidRates(in_rate, out_rate)) {
+    size_t in_length = static_cast<size_t>(in_rate / 100);
+    size_t out_length = 0;
+    EXPECT_EQ(0, rs_.ResetIfNeeded(in_rate, out_rate, num_channels));
+    EXPECT_EQ(0,
+              rs_.Push(data_in_, in_length, data_out_, kDataSize, out_length));
+    EXPECT_EQ(static_cast<size_t>(out_rate / 100), out_length);
+  } else {
+    EXPECT_EQ(-1, rs_.ResetIfNeeded(in_rate, out_rate, num_channels));
+  }
+}
+
+TEST_F(ResamplerTest, Reset) {
+  // The only failure mode for the constructor is if Reset() fails. For the
+  // time being then (until an Init function is added), we rely on Reset()
+  // to test the constructor.
+
+  // Check that all required combinations are supported.
+  for (size_t i = 0; i < kRatesSize; ++i) {
+    for (size_t j = 0; j < kRatesSize; ++j) {
+      for (size_t k = 0; k < kNumChannelsSize; ++k) {
+        std::ostringstream ss;
+        ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j]
+            << ", channels: " << kNumChannels[k];
+        SCOPED_TRACE(ss.str());
+        if (ValidRates(kRates[i], kRates[j]))
+          EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kNumChannels[k]));
+        else
+          EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kNumChannels[k]));
+      }
+    }
+  }
+}
+
+// TODO(tlegrand): Replace code inside the two tests below with a function
+// with number of channels and ResamplerType as input.
+TEST_F(ResamplerTest, Mono) {
+  const int kChannels = 1;
+  for (size_t i = 0; i < kRatesSize; ++i) {
+    for (size_t j = 0; j < kRatesSize; ++j) {
+      std::ostringstream ss;
+      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
+      SCOPED_TRACE(ss.str());
+
+      if (ValidRates(kRates[i], kRates[j])) {
+        size_t in_length = static_cast<size_t>(kRates[i] / 100);
+        size_t out_length = 0;
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j], kChannels));
+        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                              out_length));
+        EXPECT_EQ(static_cast<size_t>(kRates[j] / 100), out_length);
+      } else {
+        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j], kChannels));
+      }
+    }
+  }
+}
+
+TEST_F(ResamplerTest, Stereo) {
+  const int kChannels = 2;
+  for (size_t i = 0; i < kRatesSize; ++i) {
+    for (size_t j = 0; j < kRatesSize; ++j) {
+      std::ostringstream ss;
+      ss << "Input rate: " << kRates[i] << ", output rate: " << kRates[j];
+      SCOPED_TRACE(ss.str());
+
+      if (ValidRates(kRates[i], kRates[j])) {
+        size_t in_length = static_cast<size_t>(kChannels * kRates[i] / 100);
+        size_t out_length = 0;
+        EXPECT_EQ(0, rs_.Reset(kRates[i], kRates[j],
+                               kChannels));
+        EXPECT_EQ(0, rs_.Push(data_in_, in_length, data_out_, kDataSize,
+                              out_length));
+        EXPECT_EQ(static_cast<size_t>(kChannels * kRates[j] / 100), out_length);
+      } else {
+        EXPECT_EQ(-1, rs_.Reset(kRates[i], kRates[j],
+                                kChannels));
+      }
+    }
+  }
+}
+
+// Try multiple resets between a few supported and unsupported rates.
+TEST_F(ResamplerTest, MultipleResets) {
+  constexpr size_t kNumChanges = 5;
+  constexpr std::array<int, kNumChanges> kInRates = {
+      {8000, 44000, 44000, 32000, 32000}};
+  constexpr std::array<int, kNumChanges> kOutRates = {
+      {16000, 48000, 48000, 16000, 16000}};
+  constexpr std::array<int, kNumChanges> kNumChannels = {{2, 2, 2, 2, 1}};
+  for (size_t i = 0; i < kNumChanges; ++i) {
+    ResetIfNeededAndPush(kInRates[i], kOutRates[i], kNumChannels[i]);
+  }
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/common_audio/resampler/sinc_resampler.cc b/common_audio/resampler/sinc_resampler.cc
new file mode 100644
index 0000000..c857755
--- /dev/null
+++ b/common_audio/resampler/sinc_resampler.cc
@@ -0,0 +1,374 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original:
+// src/media/base/sinc_resampler.cc
+
+// Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
+// and r4_ will move after the first load):
+//
+// |----------------|-----------------------------------------|----------------|
+//
+//                                        request_frames_
+//                   <--------------------------------------------------------->
+//                                    r0_ (during first load)
+//
+//  kKernelSize / 2   kKernelSize / 2         kKernelSize / 2   kKernelSize / 2
+// <---------------> <--------------->       <---------------> <--------------->
+//        r1_               r2_                     r3_               r4_
+//
+//                             block_size_ == r4_ - r2_
+//                   <--------------------------------------->
+//
+//                                                  request_frames_
+//                                    <------------------ ... ----------------->
+//                                               r0_ (during second load)
+//
+// On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
+// and block_size_ are reinitialized via step (3) in the algorithm below.
+//
+// These new regions remain constant until a Flush() occurs.  While complicated,
+// this allows us to reduce jitter by always requesting the same amount from the
+// provided callback.
+//
+// The algorithm:
+//
+// 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
+//    there's enough room to read request_frames_ from the callback into region
+//    r0_ (which will move between the first and subsequent passes).
+//
+// 2) Let r1_, r2_ each represent half the kernel centered around r0_:
+//
+//        r0_ = input_buffer_ + kKernelSize / 2
+//        r1_ = input_buffer_
+//        r2_ = r0_
+//
+//    r0_ is always request_frames_ in size.  r1_, r2_ are kKernelSize / 2 in
+//    size.  r1_ must be zero initialized to avoid convolution with garbage (see
+//    step (5) for why).
+//
+// 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
+//    r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
+//
+//        r3_ = r0_ + request_frames_ - kKernelSize
+//        r4_ = r0_ + request_frames_ - kKernelSize / 2
+//        block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
+//
+// 4) Consume request_frames_ frames into r0_.
+//
+// 5) Position kernel centered at start of r2_ and generate output frames until
+//    the kernel is centered at the start of r4_ or we've finished generating
+//    all the output frames.
+//
+// 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
+//
+// 7) If we're on the second load, in order to avoid overwriting the frames we
+//    just wrapped from r4_ we need to slide r0_ to the right by the size of
+//    r4_, which is kKernelSize / 2:
+//
+//        r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
+//
+//    r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
+//
+// 8) Else, if we're not on the second load, goto (4).
+//
+// Note: we're glossing over how the sub-sample handling works with
+// |virtual_source_idx_|, etc.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "common_audio/resampler/sinc_resampler.h"
+
+#include <math.h>
+#include <string.h>
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+namespace {
+
+double SincScaleFactor(double io_ratio) {
+  // |sinc_scale_factor| is basically the normalized cutoff frequency of the
+  // low-pass filter.
+  double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
+
+  // The sinc function is an idealized brick-wall filter, but since we're
+  // windowing it the transition from pass to stop does not happen right away.
+  // So we should adjust the low pass filter cutoff slightly downward to avoid
+  // some aliasing at the very high-end.
+  // TODO(crogers): this value is empirical and to be more exact should vary
+  // depending on kKernelSize.
+  sinc_scale_factor *= 0.9;
+
+  return sinc_scale_factor;
+}
+
+}  // namespace
+
+const size_t SincResampler::kKernelSize;
+
+// If we know the minimum architecture at compile time, avoid CPU detection.
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#if defined(__SSE2__)
+#define CONVOLVE_FUNC Convolve_SSE
+void SincResampler::InitializeCPUSpecificFeatures() {}
+#else
+// x86 CPU detection required.  Function will be set by
+// InitializeCPUSpecificFeatures().
+// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed.
+#define CONVOLVE_FUNC convolve_proc_
+
+void SincResampler::InitializeCPUSpecificFeatures() {
+  convolve_proc_ = WebRtc_GetCPUInfo(kSSE2) ? Convolve_SSE : Convolve_C;
+}
+#endif
+#elif defined(WEBRTC_HAS_NEON)
+#define CONVOLVE_FUNC Convolve_NEON
+void SincResampler::InitializeCPUSpecificFeatures() {}
+#else
+// Unknown architecture.
+#define CONVOLVE_FUNC Convolve_C
+void SincResampler::InitializeCPUSpecificFeatures() {}
+#endif
+
+SincResampler::SincResampler(double io_sample_rate_ratio,
+                             size_t request_frames,
+                             SincResamplerCallback* read_cb)
+    : io_sample_rate_ratio_(io_sample_rate_ratio),
+      read_cb_(read_cb),
+      request_frames_(request_frames),
+      input_buffer_size_(request_frames_ + kKernelSize),
+      // Create input buffers with a 16-byte alignment for SSE optimizations.
+      kernel_storage_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))),
+      kernel_pre_sinc_storage_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))),
+      kernel_window_storage_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))),
+      input_buffer_(static_cast<float*>(
+          AlignedMalloc(sizeof(float) * input_buffer_size_, 16))),
+#if defined(WEBRTC_CPU_DETECTION)
+      convolve_proc_(nullptr),
+#endif
+      r1_(input_buffer_.get()),
+      r2_(input_buffer_.get() + kKernelSize / 2) {
+#if defined(WEBRTC_CPU_DETECTION)
+  InitializeCPUSpecificFeatures();
+  RTC_DCHECK(convolve_proc_);
+#endif
+  RTC_DCHECK_GT(request_frames_, 0);
+  Flush();
+  RTC_DCHECK_GT(block_size_, kKernelSize);
+
+  memset(kernel_storage_.get(), 0,
+         sizeof(*kernel_storage_.get()) * kKernelStorageSize);
+  memset(kernel_pre_sinc_storage_.get(), 0,
+         sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
+  memset(kernel_window_storage_.get(), 0,
+         sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
+
+  InitializeKernel();
+}
+
+SincResampler::~SincResampler() {}
+
+void SincResampler::UpdateRegions(bool second_load) {
+  // Setup various region pointers in the buffer (see diagram above).  If we're
+  // on the second load we need to slide r0_ to the right by kKernelSize / 2.
+  r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
+  r3_ = r0_ + request_frames_ - kKernelSize;
+  r4_ = r0_ + request_frames_ - kKernelSize / 2;
+  block_size_ = r4_ - r2_;
+
+  // r1_ at the beginning of the buffer.
+  RTC_DCHECK_EQ(r1_, input_buffer_.get());
+  // r1_ left of r2_, r4_ left of r3_ and size correct.
+  RTC_DCHECK_EQ(r2_ - r1_, r4_ - r3_);
+  // r2_ left of r3.
+  RTC_DCHECK_LT(r2_, r3_);
+}
+
+void SincResampler::InitializeKernel() {
+  // Blackman window parameters.
+  static const double kAlpha = 0.16;
+  static const double kA0 = 0.5 * (1.0 - kAlpha);
+  static const double kA1 = 0.5;
+  static const double kA2 = 0.5 * kAlpha;
+
+  // Generates a set of windowed sinc() kernels.
+  // We generate a range of sub-sample offsets from 0.0 to 1.0.
+  const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
+  for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
+    const float subsample_offset =
+        static_cast<float>(offset_idx) / kKernelOffsetCount;
+
+    for (size_t i = 0; i < kKernelSize; ++i) {
+      const size_t idx = i + offset_idx * kKernelSize;
+      const float pre_sinc = static_cast<float>(M_PI *
+          (static_cast<int>(i) - static_cast<int>(kKernelSize / 2) -
+           subsample_offset));
+      kernel_pre_sinc_storage_[idx] = pre_sinc;
+
+      // Compute Blackman window, matching the offset of the sinc().
+      const float x = (i - subsample_offset) / kKernelSize;
+      const float window = static_cast<float>(kA0 - kA1 * cos(2.0 * M_PI * x) +
+          kA2 * cos(4.0 * M_PI * x));
+      kernel_window_storage_[idx] = window;
+
+      // Compute the sinc with offset, then window the sinc() function and store
+      // at the correct offset.
+      kernel_storage_[idx] = static_cast<float>(window *
+          ((pre_sinc == 0) ?
+              sinc_scale_factor :
+              (sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
+    }
+  }
+}
+
+void SincResampler::SetRatio(double io_sample_rate_ratio) {
+  if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
+      std::numeric_limits<double>::epsilon()) {
+    return;
+  }
+
+  io_sample_rate_ratio_ = io_sample_rate_ratio;
+
+  // Optimize reinitialization by reusing values which are independent of
+  // |sinc_scale_factor|.  Provides a 3x speedup.
+  const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
+  for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
+    for (size_t i = 0; i < kKernelSize; ++i) {
+      const size_t idx = i + offset_idx * kKernelSize;
+      const float window = kernel_window_storage_[idx];
+      const float pre_sinc = kernel_pre_sinc_storage_[idx];
+
+      kernel_storage_[idx] = static_cast<float>(window *
+          ((pre_sinc == 0) ?
+              sinc_scale_factor :
+              (sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
+    }
+  }
+}
+
+void SincResampler::Resample(size_t frames, float* destination) {
+  size_t remaining_frames = frames;
+
+  // Step (1) -- Prime the input buffer at the start of the input stream.
+  if (!buffer_primed_ && remaining_frames) {
+    read_cb_->Run(request_frames_, r0_);
+    buffer_primed_ = true;
+  }
+
+  // Step (2) -- Resample!  const what we can outside of the loop for speed.  It
+  // actually has an impact on ARM performance.  See inner loop comment below.
+  const double current_io_ratio = io_sample_rate_ratio_;
+  const float* const kernel_ptr = kernel_storage_.get();
+  while (remaining_frames) {
+    // |i| may be negative if the last Resample() call ended on an iteration
+    // that put |virtual_source_idx_| over the limit.
+    //
+    // Note: The loop construct here can severely impact performance on ARM
+    // or when built with clang.  See https://codereview.chromium.org/18566009/
+    for (int i = static_cast<int>(
+             ceil((block_size_ - virtual_source_idx_) / current_io_ratio));
+         i > 0; --i) {
+      RTC_DCHECK_LT(virtual_source_idx_, block_size_);
+
+      // |virtual_source_idx_| lies in between two kernel offsets so figure out
+      // what they are.
+      const int source_idx = static_cast<int>(virtual_source_idx_);
+      const double subsample_remainder = virtual_source_idx_ - source_idx;
+
+      const double virtual_offset_idx =
+          subsample_remainder * kKernelOffsetCount;
+      const int offset_idx = static_cast<int>(virtual_offset_idx);
+
+      // We'll compute "convolutions" for the two kernels which straddle
+      // |virtual_source_idx_|.
+      const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
+      const float* const k2 = k1 + kKernelSize;
+
+      // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage.  Should always be
+      // true so long as kKernelSize is a multiple of 16.
+      RTC_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(k1) % 16);
+      RTC_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(k2) % 16);
+
+      // Initialize input pointer based on quantized |virtual_source_idx_|.
+      const float* const input_ptr = r1_ + source_idx;
+
+      // Figure out how much to weight each kernel's "convolution".
+      const double kernel_interpolation_factor =
+          virtual_offset_idx - offset_idx;
+      *destination++ = CONVOLVE_FUNC(
+          input_ptr, k1, k2, kernel_interpolation_factor);
+
+      // Advance the virtual index.
+      virtual_source_idx_ += current_io_ratio;
+
+      if (!--remaining_frames)
+        return;
+    }
+
+    // Wrap back around to the start.
+    virtual_source_idx_ -= block_size_;
+
+    // Step (3) -- Copy r3_, r4_ to r1_, r2_.
+    // This wraps the last input frames back to the start of the buffer.
+    memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
+
+    // Step (4) -- Reinitialize regions if necessary.
+    if (r0_ == r2_)
+      UpdateRegions(true);
+
+    // Step (5) -- Refresh the buffer with more input.
+    read_cb_->Run(request_frames_, r0_);
+  }
+}
+
+#undef CONVOLVE_FUNC
+
+size_t SincResampler::ChunkSize() const {
+  return static_cast<size_t>(block_size_ / io_sample_rate_ratio_);
+}
+
+void SincResampler::Flush() {
+  virtual_source_idx_ = 0;
+  buffer_primed_ = false;
+  memset(input_buffer_.get(), 0,
+         sizeof(*input_buffer_.get()) * input_buffer_size_);
+  UpdateRegions(false);
+}
+
+float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
+                                const float* k2,
+                                double kernel_interpolation_factor) {
+  float sum1 = 0;
+  float sum2 = 0;
+
+  // Generate a single output sample.  Unrolling this loop hurt performance in
+  // local testing.
+  size_t n = kKernelSize;
+  while (n--) {
+    sum1 += *input_ptr * *k1++;
+    sum2 += *input_ptr++ * *k2++;
+  }
+
+  // Linearly interpolate the two "convolutions".
+  return static_cast<float>((1.0 - kernel_interpolation_factor) * sum1 +
+      kernel_interpolation_factor * sum2);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/sinc_resampler.h b/common_audio/resampler/sinc_resampler.h
new file mode 100644
index 0000000..d011224
--- /dev/null
+++ b/common_audio/resampler/sinc_resampler.h
@@ -0,0 +1,171 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original here:
+// src/media/base/sinc_resampler.h
+
+#ifndef COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_
+#define COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_
+
+#include <memory>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "system_wrappers/include/aligned_malloc.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Callback class for providing more data into the resampler.  Expects |frames|
+// of data to be rendered into |destination|; zero padded if not enough frames
+// are available to satisfy the request.
+class SincResamplerCallback {
+ public:
+  virtual ~SincResamplerCallback() {}
+  virtual void Run(size_t frames, float* destination) = 0;
+};
+
+// SincResampler is a high-quality single-channel sample-rate converter.
+class SincResampler {
+ public:
+  // The kernel size can be adjusted for quality (higher is better) at the
+  // expense of performance.  Must be a multiple of 32.
+  // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
+  static const size_t kKernelSize = 32;
+
+  // Default request size.  Affects how often and for how much SincResampler
+  // calls back for input.  Must be greater than kKernelSize.
+  static const size_t kDefaultRequestSize = 512;
+
+  // The kernel offset count is used for interpolation and is the number of
+  // sub-sample kernel shifts.  Can be adjusted for quality (higher is better)
+  // at the expense of allocating more memory.
+  static const size_t kKernelOffsetCount = 32;
+  static const size_t kKernelStorageSize =
+      kKernelSize * (kKernelOffsetCount + 1);
+
+  // Constructs a SincResampler with the specified |read_cb|, which is used to
+  // acquire audio data for resampling.  |io_sample_rate_ratio| is the ratio
+  // of input / output sample rates.  |request_frames| controls the size in
+  // frames of the buffer requested by each |read_cb| call.  The value must be
+  // greater than kKernelSize.  Specify kDefaultRequestSize if there are no
+  // request size constraints.
+  SincResampler(double io_sample_rate_ratio,
+                size_t request_frames,
+                SincResamplerCallback* read_cb);
+  virtual ~SincResampler();
+
+  // Resample |frames| of data from |read_cb_| into |destination|.
+  void Resample(size_t frames, float* destination);
+
+  // The maximum size in frames that guarantees Resample() will only make a
+  // single call to |read_cb_| for more data.
+  size_t ChunkSize() const;
+
+  size_t request_frames() const { return request_frames_; }
+
+  // Flush all buffered data and reset internal indices.  Not thread safe, do
+  // not call while Resample() is in progress.
+  void Flush();
+
+  // Update |io_sample_rate_ratio_|.  SetRatio() will cause a reconstruction of
+  // the kernels used for resampling.  Not thread safe, do not call while
+  // Resample() is in progress.
+  //
+  // TODO(ajm): Use this in PushSincResampler rather than reconstructing
+  // SincResampler.  We would also need a way to update |request_frames_|.
+  void SetRatio(double io_sample_rate_ratio);
+
+  float* get_kernel_for_testing() { return kernel_storage_.get(); }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve);
+  FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, ConvolveBenchmark);
+
+  void InitializeKernel();
+  void UpdateRegions(bool second_load);
+
+  // Selects runtime specific CPU features like SSE.  Must be called before
+  // using SincResampler.
+  // TODO(ajm): Currently managed by the class internally. See the note with
+  // |convolve_proc_| below.
+  void InitializeCPUSpecificFeatures();
+
+  // Compute convolution of |k1| and |k2| over |input_ptr|, resultant sums are
+  // linearly interpolated using |kernel_interpolation_factor|.  On x86 and ARM
+  // the underlying implementation is chosen at run time.
+  static float Convolve_C(const float* input_ptr, const float* k1,
+                          const float* k2, double kernel_interpolation_factor);
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+  static float Convolve_SSE(const float* input_ptr, const float* k1,
+                            const float* k2,
+                            double kernel_interpolation_factor);
+#elif defined(WEBRTC_HAS_NEON)
+  static float Convolve_NEON(const float* input_ptr, const float* k1,
+                             const float* k2,
+                             double kernel_interpolation_factor);
+#endif
+
+  // The ratio of input / output sample rates.
+  double io_sample_rate_ratio_;
+
+  // An index on the source input buffer with sub-sample precision.  It must be
+  // double precision to avoid drift.
+  double virtual_source_idx_;
+
+  // The buffer is primed once at the very beginning of processing.
+  bool buffer_primed_;
+
+  // Source of data for resampling.
+  SincResamplerCallback* read_cb_;
+
+  // The size (in samples) to request from each |read_cb_| execution.
+  const size_t request_frames_;
+
+  // The number of source frames processed per pass.
+  size_t block_size_;
+
+  // The size (in samples) of the internal buffer used by the resampler.
+  const size_t input_buffer_size_;
+
+  // Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
+  // The kernel offsets are sub-sample shifts of a windowed sinc shifted from
+  // 0.0 to 1.0 sample.
+  std::unique_ptr<float[], AlignedFreeDeleter> kernel_storage_;
+  std::unique_ptr<float[], AlignedFreeDeleter> kernel_pre_sinc_storage_;
+  std::unique_ptr<float[], AlignedFreeDeleter> kernel_window_storage_;
+
+  // Data from the source is copied into this buffer for each processing pass.
+  std::unique_ptr<float[], AlignedFreeDeleter> input_buffer_;
+
+  // Stores the runtime selection of which Convolve function to use.
+  // TODO(ajm): Move to using a global static which must only be initialized
+  // once by the user. We're not doing this initially, because we don't have
+  // e.g. a LazyInstance helper in webrtc.
+#if defined(WEBRTC_CPU_DETECTION)
+  typedef float (*ConvolveProc)(const float*, const float*, const float*,
+                                double);
+  ConvolveProc convolve_proc_;
+#endif
+
+  // Pointers to the various regions inside |input_buffer_|.  See the diagram at
+  // the top of the .cc file for more information.
+  float* r0_;
+  float* const r1_;
+  float* const r2_;
+  float* r3_;
+  float* r4_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SincResampler);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_RESAMPLER_SINC_RESAMPLER_H_
diff --git a/common_audio/resampler/sinc_resampler_neon.cc b/common_audio/resampler/sinc_resampler_neon.cc
new file mode 100644
index 0000000..9d77f0d
--- /dev/null
+++ b/common_audio/resampler/sinc_resampler_neon.cc
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original:
+// src/media/base/sinc_resampler.cc
+
+#include "common_audio/resampler/sinc_resampler.h"
+
+#include <arm_neon.h>
+
+namespace webrtc {
+
+float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
+                                   const float* k2,
+                                   double kernel_interpolation_factor) {
+  float32x4_t m_input;
+  float32x4_t m_sums1 = vmovq_n_f32(0);
+  float32x4_t m_sums2 = vmovq_n_f32(0);
+
+  const float* upper = input_ptr + kKernelSize;
+  for (; input_ptr < upper; ) {
+    m_input = vld1q_f32(input_ptr);
+    input_ptr += 4;
+    m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
+    k1 += 4;
+    m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
+    k2 += 4;
+  }
+
+  // Linearly interpolate the two "convolutions".
+  m_sums1 = vmlaq_f32(
+      vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
+      m_sums2, vmovq_n_f32(kernel_interpolation_factor));
+
+  // Sum components together.
+  float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
+  return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/sinc_resampler_sse.cc b/common_audio/resampler/sinc_resampler_sse.cc
new file mode 100644
index 0000000..7111108
--- /dev/null
+++ b/common_audio/resampler/sinc_resampler_sse.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original:
+// src/media/base/simd/sinc_resampler_sse.cc
+
+#include "common_audio/resampler/sinc_resampler.h"
+
+#include <xmmintrin.h>
+
+namespace webrtc {
+
+float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
+                                  const float* k2,
+                                  double kernel_interpolation_factor) {
+  __m128 m_input;
+  __m128 m_sums1 = _mm_setzero_ps();
+  __m128 m_sums2 = _mm_setzero_ps();
+
+  // Based on |input_ptr| alignment, we need to use loadu or load.  Unrolling
+  // these loops hurt performance in local testing.
+  if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
+    for (size_t i = 0; i < kKernelSize; i += 4) {
+      m_input = _mm_loadu_ps(input_ptr + i);
+      m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
+      m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
+    }
+  } else {
+    for (size_t i = 0; i < kKernelSize; i += 4) {
+      m_input = _mm_load_ps(input_ptr + i);
+      m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
+      m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
+    }
+  }
+
+  // Linearly interpolate the two "convolutions".
+  m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(
+      static_cast<float>(1.0 - kernel_interpolation_factor)));
+  m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(
+      static_cast<float>(kernel_interpolation_factor)));
+  m_sums1 = _mm_add_ps(m_sums1, m_sums2);
+
+  // Sum components together.
+  float result;
+  m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
+  _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
+      m_sums2, m_sums2, 1)));
+
+  return result;
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/sinc_resampler_unittest.cc b/common_audio/resampler/sinc_resampler_unittest.cc
new file mode 100644
index 0000000..87e991d
--- /dev/null
+++ b/common_audio/resampler/sinc_resampler_unittest.cc
@@ -0,0 +1,394 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original:
+// src/media/base/sinc_resampler_unittest.cc
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <math.h>
+
+#include <algorithm>
+#include <memory>
+#include <tuple>
+
+#include "common_audio/resampler/sinc_resampler.h"
+#include "common_audio/resampler/sinusoidal_linear_chirp_source.h"
+#include "rtc_base/stringize_macros.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using testing::_;
+
+namespace webrtc {
+
+static const double kSampleRateRatio = 192000.0 / 44100.0;
+static const double kKernelInterpolationFactor = 0.5;
+
+// Helper class to ensure ChunkedResample() functions properly.
+class MockSource : public SincResamplerCallback {
+ public:
+  MOCK_METHOD2(Run, void(size_t frames, float* destination));
+};
+
+ACTION(ClearBuffer) {
+  memset(arg1, 0, arg0 * sizeof(float));
+}
+
+ACTION(FillBuffer) {
+  // Value chosen arbitrarily such that SincResampler resamples it to something
+  // easily representable on all platforms; e.g., using kSampleRateRatio this
+  // becomes 1.81219.
+  memset(arg1, 64, arg0 * sizeof(float));
+}
+
+// Test requesting multiples of ChunkSize() frames results in the proper number
+// of callbacks.
+TEST(SincResamplerTest, ChunkedResample) {
+  MockSource mock_source;
+
+  // Choose a high ratio of input to output samples which will result in quick
+  // exhaustion of SincResampler's internal buffers.
+  SincResampler resampler(kSampleRateRatio, SincResampler::kDefaultRequestSize,
+                          &mock_source);
+
+  static const int kChunks = 2;
+  size_t max_chunk_size = resampler.ChunkSize() * kChunks;
+  std::unique_ptr<float[]> resampled_destination(new float[max_chunk_size]);
+
+  // Verify requesting ChunkSize() frames causes a single callback.
+  EXPECT_CALL(mock_source, Run(_, _))
+      .Times(1).WillOnce(ClearBuffer());
+  resampler.Resample(resampler.ChunkSize(), resampled_destination.get());
+
+  // Verify requesting kChunks * ChunkSize() frames causes kChunks callbacks.
+  testing::Mock::VerifyAndClear(&mock_source);
+  EXPECT_CALL(mock_source, Run(_, _))
+      .Times(kChunks).WillRepeatedly(ClearBuffer());
+  resampler.Resample(max_chunk_size, resampled_destination.get());
+}
+
+// Test flush resets the internal state properly.
+TEST(SincResamplerTest, Flush) {
+  MockSource mock_source;
+  SincResampler resampler(kSampleRateRatio, SincResampler::kDefaultRequestSize,
+                          &mock_source);
+  std::unique_ptr<float[]> resampled_destination(
+      new float[resampler.ChunkSize()]);
+
+  // Fill the resampler with junk data.
+  EXPECT_CALL(mock_source, Run(_, _))
+      .Times(1).WillOnce(FillBuffer());
+  resampler.Resample(resampler.ChunkSize() / 2, resampled_destination.get());
+  ASSERT_NE(resampled_destination[0], 0);
+
+  // Flush and request more data, which should all be zeros now.
+  resampler.Flush();
+  testing::Mock::VerifyAndClear(&mock_source);
+  EXPECT_CALL(mock_source, Run(_, _))
+      .Times(1).WillOnce(ClearBuffer());
+  resampler.Resample(resampler.ChunkSize() / 2, resampled_destination.get());
+  for (size_t i = 0; i < resampler.ChunkSize() / 2; ++i)
+    ASSERT_FLOAT_EQ(resampled_destination[i], 0);
+}
+
+// Test flush resets the internal state properly.
+TEST(SincResamplerTest, DISABLED_SetRatioBench) {
+  MockSource mock_source;
+  SincResampler resampler(kSampleRateRatio, SincResampler::kDefaultRequestSize,
+                          &mock_source);
+
+  int64_t start = rtc::TimeNanos();
+  for (int i = 1; i < 10000; ++i)
+    resampler.SetRatio(1.0 / i);
+  double total_time_c_us =
+      (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec;
+  printf("SetRatio() took %.2fms.\n", total_time_c_us / 1000);
+}
+
+
+// Define platform independent function name for Convolve* tests.
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#define CONVOLVE_FUNC Convolve_SSE
+#elif defined(WEBRTC_ARCH_ARM_V7)
+#define CONVOLVE_FUNC Convolve_NEON
+#endif
+
+// Ensure various optimized Convolve() methods return the same value.  Only run
+// this test if other optimized methods exist, otherwise the default Convolve()
+// will be tested by the parameterized SincResampler tests below.
+#if defined(CONVOLVE_FUNC)
+TEST(SincResamplerTest, Convolve) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+  ASSERT_TRUE(WebRtc_GetCPUInfo(kSSE2));
+#elif defined(WEBRTC_ARCH_ARM_V7)
+  ASSERT_TRUE(WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON);
+#endif
+
+  // Initialize a dummy resampler.
+  MockSource mock_source;
+  SincResampler resampler(kSampleRateRatio, SincResampler::kDefaultRequestSize,
+                          &mock_source);
+
+  // The optimized Convolve methods are slightly more precise than Convolve_C(),
+  // so comparison must be done using an epsilon.
+  static const double kEpsilon = 0.00000005;
+
+  // Use a kernel from SincResampler as input and kernel data, this has the
+  // benefit of already being properly sized and aligned for Convolve_SSE().
+  double result = resampler.Convolve_C(
+      resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+      resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  double result2 = resampler.CONVOLVE_FUNC(
+      resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+      resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  EXPECT_NEAR(result2, result, kEpsilon);
+
+  // Test Convolve() w/ unaligned input pointer.
+  result = resampler.Convolve_C(
+      resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
+      resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  result2 = resampler.CONVOLVE_FUNC(
+      resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
+      resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  EXPECT_NEAR(result2, result, kEpsilon);
+}
+#endif
+
+// Benchmark for the various Convolve() methods.  Make sure to build with
+// branding=Chrome so that RTC_DCHECKs are compiled out when benchmarking.
+// Original benchmarks were run with --convolve-iterations=50000000.
+TEST(SincResamplerTest, ConvolveBenchmark) {
+  // Initialize a dummy resampler.
+  MockSource mock_source;
+  SincResampler resampler(kSampleRateRatio, SincResampler::kDefaultRequestSize,
+                          &mock_source);
+
+  // Retrieve benchmark iterations from command line.
+  // TODO(ajm): Reintroduce this as a command line option.
+  const int kConvolveIterations = 1000000;
+
+  printf("Benchmarking %d iterations:\n", kConvolveIterations);
+
+  // Benchmark Convolve_C().
+  int64_t start = rtc::TimeNanos();
+  for (int i = 0; i < kConvolveIterations; ++i) {
+    resampler.Convolve_C(
+        resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+        resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  }
+  double total_time_c_us =
+      (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec;
+  printf("Convolve_C took %.2fms.\n", total_time_c_us / 1000);
+
+#if defined(CONVOLVE_FUNC)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+  ASSERT_TRUE(WebRtc_GetCPUInfo(kSSE2));
+#elif defined(WEBRTC_ARCH_ARM_V7)
+  ASSERT_TRUE(WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON);
+#endif
+
+  // Benchmark with unaligned input pointer.
+  start = rtc::TimeNanos();
+  for (int j = 0; j < kConvolveIterations; ++j) {
+    resampler.CONVOLVE_FUNC(
+        resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
+        resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  }
+  double total_time_optimized_unaligned_us =
+      (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec;
+  printf(STRINGIZE(CONVOLVE_FUNC) "(unaligned) took %.2fms; which is %.2fx "
+         "faster than Convolve_C.\n", total_time_optimized_unaligned_us / 1000,
+         total_time_c_us / total_time_optimized_unaligned_us);
+
+  // Benchmark with aligned input pointer.
+  start = rtc::TimeNanos();
+  for (int j = 0; j < kConvolveIterations; ++j) {
+    resampler.CONVOLVE_FUNC(
+        resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
+        resampler.kernel_storage_.get(), kKernelInterpolationFactor);
+  }
+  double total_time_optimized_aligned_us =
+      (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec;
+  printf(STRINGIZE(CONVOLVE_FUNC) " (aligned) took %.2fms; which is %.2fx "
+         "faster than Convolve_C and %.2fx faster than "
+         STRINGIZE(CONVOLVE_FUNC) " (unaligned).\n",
+         total_time_optimized_aligned_us / 1000,
+         total_time_c_us / total_time_optimized_aligned_us,
+         total_time_optimized_unaligned_us / total_time_optimized_aligned_us);
+#endif
+}
+
+#undef CONVOLVE_FUNC
+
+typedef std::tuple<int, int, double, double> SincResamplerTestData;
+class SincResamplerTest
+    : public testing::TestWithParam<SincResamplerTestData> {
+ public:
+  SincResamplerTest()
+      : input_rate_(std::get<0>(GetParam())),
+        output_rate_(std::get<1>(GetParam())),
+        rms_error_(std::get<2>(GetParam())),
+        low_freq_error_(std::get<3>(GetParam())) {}
+
+  virtual ~SincResamplerTest() {}
+
+ protected:
+  int input_rate_;
+  int output_rate_;
+  double rms_error_;
+  double low_freq_error_;
+};
+
+// Tests resampling using a given input and output sample rate.
+TEST_P(SincResamplerTest, Resample) {
+  // Make comparisons using one second of data.
+  static const double kTestDurationSecs = 1;
+  const size_t input_samples =
+      static_cast<size_t>(kTestDurationSecs * input_rate_);
+  const size_t output_samples =
+      static_cast<size_t>(kTestDurationSecs * output_rate_);
+
+  // Nyquist frequency for the input sampling rate.
+  const double input_nyquist_freq = 0.5 * input_rate_;
+
+  // Source for data to be resampled.
+  SinusoidalLinearChirpSource resampler_source(
+      input_rate_, input_samples, input_nyquist_freq, 0);
+
+  const double io_ratio = input_rate_ / static_cast<double>(output_rate_);
+  SincResampler resampler(io_ratio, SincResampler::kDefaultRequestSize,
+                          &resampler_source);
+
+  // Force an update to the sample rate ratio to ensure dyanmic sample rate
+  // changes are working correctly.
+  std::unique_ptr<float[]> kernel(new float[SincResampler::kKernelStorageSize]);
+  memcpy(kernel.get(), resampler.get_kernel_for_testing(),
+         SincResampler::kKernelStorageSize);
+  resampler.SetRatio(M_PI);
+  ASSERT_NE(0, memcmp(kernel.get(), resampler.get_kernel_for_testing(),
+                      SincResampler::kKernelStorageSize));
+  resampler.SetRatio(io_ratio);
+  ASSERT_EQ(0, memcmp(kernel.get(), resampler.get_kernel_for_testing(),
+                      SincResampler::kKernelStorageSize));
+
+  // TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to
+  // allocate these on 32-byte boundaries and ensure they're sized % 32 bytes.
+  std::unique_ptr<float[]> resampled_destination(new float[output_samples]);
+  std::unique_ptr<float[]> pure_destination(new float[output_samples]);
+
+  // Generate resampled signal.
+  resampler.Resample(output_samples, resampled_destination.get());
+
+  // Generate pure signal.
+  SinusoidalLinearChirpSource pure_source(
+      output_rate_, output_samples, input_nyquist_freq, 0);
+  pure_source.Run(output_samples, pure_destination.get());
+
+  // Range of the Nyquist frequency (0.5 * min(input rate, output_rate)) which
+  // we refer to as low and high.
+  static const double kLowFrequencyNyquistRange = 0.7;
+  static const double kHighFrequencyNyquistRange = 0.9;
+
+  // Calculate Root-Mean-Square-Error and maximum error for the resampling.
+  double sum_of_squares = 0;
+  double low_freq_max_error = 0;
+  double high_freq_max_error = 0;
+  int minimum_rate = std::min(input_rate_, output_rate_);
+  double low_frequency_range = kLowFrequencyNyquistRange * 0.5 * minimum_rate;
+  double high_frequency_range = kHighFrequencyNyquistRange * 0.5 * minimum_rate;
+  for (size_t i = 0; i < output_samples; ++i) {
+    double error = fabs(resampled_destination[i] - pure_destination[i]);
+
+    if (pure_source.Frequency(i) < low_frequency_range) {
+      if (error > low_freq_max_error)
+        low_freq_max_error = error;
+    } else if (pure_source.Frequency(i) < high_frequency_range) {
+      if (error > high_freq_max_error)
+        high_freq_max_error = error;
+    }
+    // TODO(dalecurtis): Sanity check frequencies > kHighFrequencyNyquistRange.
+
+    sum_of_squares += error * error;
+  }
+
+  double rms_error = sqrt(sum_of_squares / output_samples);
+
+  // Convert each error to dbFS.
+  #define DBFS(x) 20 * log10(x)
+  rms_error = DBFS(rms_error);
+  low_freq_max_error = DBFS(low_freq_max_error);
+  high_freq_max_error = DBFS(high_freq_max_error);
+
+  EXPECT_LE(rms_error, rms_error_);
+  EXPECT_LE(low_freq_max_error, low_freq_error_);
+
+  // All conversions currently have a high frequency error around -6 dbFS.
+  static const double kHighFrequencyMaxError = -6.02;
+  EXPECT_LE(high_freq_max_error, kHighFrequencyMaxError);
+}
+
+// Almost all conversions have an RMS error of around -14 dbFS.
+static const double kResamplingRMSError = -14.58;
+
+// Thresholds chosen arbitrarily based on what each resampling reported during
+// testing.  All thresholds are in dbFS, http://en.wikipedia.org/wiki/DBFS.
+INSTANTIATE_TEST_CASE_P(
+    SincResamplerTest,
+    SincResamplerTest,
+    testing::Values(
+        // To 44.1kHz
+        std::make_tuple(8000, 44100, kResamplingRMSError, -62.73),
+        std::make_tuple(11025, 44100, kResamplingRMSError, -72.19),
+        std::make_tuple(16000, 44100, kResamplingRMSError, -62.54),
+        std::make_tuple(22050, 44100, kResamplingRMSError, -73.53),
+        std::make_tuple(32000, 44100, kResamplingRMSError, -63.32),
+        std::make_tuple(44100, 44100, kResamplingRMSError, -73.53),
+        std::make_tuple(48000, 44100, -15.01, -64.04),
+        std::make_tuple(96000, 44100, -18.49, -25.51),
+        std::make_tuple(192000, 44100, -20.50, -13.31),
+
+        // To 48kHz
+        std::make_tuple(8000, 48000, kResamplingRMSError, -63.43),
+        std::make_tuple(11025, 48000, kResamplingRMSError, -62.61),
+        std::make_tuple(16000, 48000, kResamplingRMSError, -63.96),
+        std::make_tuple(22050, 48000, kResamplingRMSError, -62.42),
+        std::make_tuple(32000, 48000, kResamplingRMSError, -64.04),
+        std::make_tuple(44100, 48000, kResamplingRMSError, -62.63),
+        std::make_tuple(48000, 48000, kResamplingRMSError, -73.52),
+        std::make_tuple(96000, 48000, -18.40, -28.44),
+        std::make_tuple(192000, 48000, -20.43, -14.11),
+
+        // To 96kHz
+        std::make_tuple(8000, 96000, kResamplingRMSError, -63.19),
+        std::make_tuple(11025, 96000, kResamplingRMSError, -62.61),
+        std::make_tuple(16000, 96000, kResamplingRMSError, -63.39),
+        std::make_tuple(22050, 96000, kResamplingRMSError, -62.42),
+        std::make_tuple(32000, 96000, kResamplingRMSError, -63.95),
+        std::make_tuple(44100, 96000, kResamplingRMSError, -62.63),
+        std::make_tuple(48000, 96000, kResamplingRMSError, -73.52),
+        std::make_tuple(96000, 96000, kResamplingRMSError, -73.52),
+        std::make_tuple(192000, 96000, kResamplingRMSError, -28.41),
+
+        // To 192kHz
+        std::make_tuple(8000, 192000, kResamplingRMSError, -63.10),
+        std::make_tuple(11025, 192000, kResamplingRMSError, -62.61),
+        std::make_tuple(16000, 192000, kResamplingRMSError, -63.14),
+        std::make_tuple(22050, 192000, kResamplingRMSError, -62.42),
+        std::make_tuple(32000, 192000, kResamplingRMSError, -63.38),
+        std::make_tuple(44100, 192000, kResamplingRMSError, -62.63),
+        std::make_tuple(48000, 192000, kResamplingRMSError, -73.44),
+        std::make_tuple(96000, 192000, kResamplingRMSError, -73.52),
+        std::make_tuple(192000, 192000, kResamplingRMSError, -73.52)));
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/sinusoidal_linear_chirp_source.cc b/common_audio/resampler/sinusoidal_linear_chirp_source.cc
new file mode 100644
index 0000000..134044e
--- /dev/null
+++ b/common_audio/resampler/sinusoidal_linear_chirp_source.cc
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "common_audio/resampler/sinusoidal_linear_chirp_source.h"
+
+#include <math.h>
+
+namespace webrtc {
+
+SinusoidalLinearChirpSource::SinusoidalLinearChirpSource(int sample_rate,
+                                                         size_t samples,
+                                                         double max_frequency,
+                                                         double delay_samples)
+    : sample_rate_(sample_rate),
+      total_samples_(samples),
+      max_frequency_(max_frequency),
+      current_index_(0),
+      delay_samples_(delay_samples) {
+  // Chirp rate.
+  double duration = static_cast<double>(total_samples_) / sample_rate_;
+  k_ = (max_frequency_ - kMinFrequency) / duration;
+}
+
+void SinusoidalLinearChirpSource::Run(size_t frames, float* destination) {
+  for (size_t i = 0; i < frames; ++i, ++current_index_) {
+    // Filter out frequencies higher than Nyquist.
+    if (Frequency(current_index_) > 0.5 * sample_rate_) {
+      destination[i] = 0;
+    } else {
+      // Calculate time in seconds.
+      if (current_index_ < delay_samples_) {
+        destination[i] = 0;
+      } else {
+        // Sinusoidal linear chirp.
+        double t = (current_index_ - delay_samples_) / sample_rate_;
+        destination[i] =
+            sin(2 * M_PI * (kMinFrequency * t + (k_ / 2) * t * t));
+      }
+    }
+  }
+}
+
+double SinusoidalLinearChirpSource::Frequency(size_t position) {
+  return kMinFrequency + (position - delay_samples_) *
+      (max_frequency_ - kMinFrequency) / total_samples_;
+}
+
+}  // namespace webrtc
diff --git a/common_audio/resampler/sinusoidal_linear_chirp_source.h b/common_audio/resampler/sinusoidal_linear_chirp_source.h
new file mode 100644
index 0000000..7fcbaa0
--- /dev/null
+++ b/common_audio/resampler/sinusoidal_linear_chirp_source.h
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original here:
+// src/media/base/sinc_resampler_unittest.cc
+
+#ifndef COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_
+#define COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_
+
+#include "common_audio/resampler/sinc_resampler.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Fake audio source for testing the resampler.  Generates a sinusoidal linear
+// chirp (http://en.wikipedia.org/wiki/Chirp) which can be tuned to stress the
+// resampler for the specific sample rate conversion being used.
+class SinusoidalLinearChirpSource : public SincResamplerCallback {
+ public:
+  // |delay_samples| can be used to insert a fractional sample delay into the
+  // source.  It will produce zeros until non-negative time is reached.
+  SinusoidalLinearChirpSource(int sample_rate, size_t samples,
+                              double max_frequency, double delay_samples);
+
+  virtual ~SinusoidalLinearChirpSource() {}
+
+  void Run(size_t frames, float* destination) override;
+
+  double Frequency(size_t position);
+
+ private:
+  enum {
+    kMinFrequency = 5
+  };
+
+  int sample_rate_;
+  size_t total_samples_;
+  double max_frequency_;
+  double k_;
+  size_t current_index_;
+  double delay_samples_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SinusoidalLinearChirpSource);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_RESAMPLER_SINUSOIDAL_LINEAR_CHIRP_SOURCE_H_
diff --git a/common_audio/ring_buffer.c b/common_audio/ring_buffer.c
new file mode 100644
index 0000000..a20ada5
--- /dev/null
+++ b/common_audio/ring_buffer.c
@@ -0,0 +1,232 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
+// otherwise specified, functions return 0 on success and -1 on error.
+
+#include "common_audio/ring_buffer.h"
+
+#include <stddef.h>  // size_t
+#include <stdlib.h>
+#include <string.h>
+
+// Get address of region(s) from which we can read data.
+// If the region is contiguous, |data_ptr_bytes_2| will be zero.
+// If non-contiguous, |data_ptr_bytes_2| will be the size in bytes of the second
+// region. Returns room available to be read or |element_count|, whichever is
+// smaller.
+static size_t GetBufferReadRegions(RingBuffer* buf,
+                                   size_t element_count,
+                                   void** data_ptr_1,
+                                   size_t* data_ptr_bytes_1,
+                                   void** data_ptr_2,
+                                   size_t* data_ptr_bytes_2) {
+
+  const size_t readable_elements = WebRtc_available_read(buf);
+  const size_t read_elements = (readable_elements < element_count ?
+      readable_elements : element_count);
+  const size_t margin = buf->element_count - buf->read_pos;
+
+  // Check to see if read is not contiguous.
+  if (read_elements > margin) {
+    // Write data in two blocks that wrap the buffer.
+    *data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
+    *data_ptr_bytes_1 = margin * buf->element_size;
+    *data_ptr_2 = buf->data;
+    *data_ptr_bytes_2 = (read_elements - margin) * buf->element_size;
+  } else {
+    *data_ptr_1 = buf->data + buf->read_pos * buf->element_size;
+    *data_ptr_bytes_1 = read_elements * buf->element_size;
+    *data_ptr_2 = NULL;
+    *data_ptr_bytes_2 = 0;
+  }
+
+  return read_elements;
+}
+
+RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size) {
+  RingBuffer* self = NULL;
+  if (element_count == 0 || element_size == 0) {
+    return NULL;
+  }
+
+  self = malloc(sizeof(RingBuffer));
+  if (!self) {
+    return NULL;
+  }
+
+  self->data = malloc(element_count * element_size);
+  if (!self->data) {
+    free(self);
+    self = NULL;
+    return NULL;
+  }
+
+  self->element_count = element_count;
+  self->element_size = element_size;
+  WebRtc_InitBuffer(self);
+
+  return self;
+}
+
+void WebRtc_InitBuffer(RingBuffer* self) {
+  self->read_pos = 0;
+  self->write_pos = 0;
+  self->rw_wrap = SAME_WRAP;
+
+  // Initialize buffer to zeros
+  memset(self->data, 0, self->element_count * self->element_size);
+}
+
+void WebRtc_FreeBuffer(void* handle) {
+  RingBuffer* self = (RingBuffer*)handle;
+  if (!self) {
+    return;
+  }
+
+  free(self->data);
+  free(self);
+}
+
+size_t WebRtc_ReadBuffer(RingBuffer* self,
+                         void** data_ptr,
+                         void* data,
+                         size_t element_count) {
+
+  if (self == NULL) {
+    return 0;
+  }
+  if (data == NULL) {
+    return 0;
+  }
+
+  {
+    void* buf_ptr_1 = NULL;
+    void* buf_ptr_2 = NULL;
+    size_t buf_ptr_bytes_1 = 0;
+    size_t buf_ptr_bytes_2 = 0;
+    const size_t read_count = GetBufferReadRegions(self,
+                                                   element_count,
+                                                   &buf_ptr_1,
+                                                   &buf_ptr_bytes_1,
+                                                   &buf_ptr_2,
+                                                   &buf_ptr_bytes_2);
+    if (buf_ptr_bytes_2 > 0) {
+      // We have a wrap around when reading the buffer. Copy the buffer data to
+      // |data| and point to it.
+      memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
+      memcpy(((char*) data) + buf_ptr_bytes_1, buf_ptr_2, buf_ptr_bytes_2);
+      buf_ptr_1 = data;
+    } else if (!data_ptr) {
+      // No wrap, but a memcpy was requested.
+      memcpy(data, buf_ptr_1, buf_ptr_bytes_1);
+    }
+    if (data_ptr) {
+      // |buf_ptr_1| == |data| in the case of a wrap.
+      *data_ptr = read_count == 0 ? NULL : buf_ptr_1;
+    }
+
+    // Update read position
+    WebRtc_MoveReadPtr(self, (int) read_count);
+
+    return read_count;
+  }
+}
+
+size_t WebRtc_WriteBuffer(RingBuffer* self,
+                          const void* data,
+                          size_t element_count) {
+  if (!self) {
+    return 0;
+  }
+  if (!data) {
+    return 0;
+  }
+
+  {
+    const size_t free_elements = WebRtc_available_write(self);
+    const size_t write_elements = (free_elements < element_count ? free_elements
+        : element_count);
+    size_t n = write_elements;
+    const size_t margin = self->element_count - self->write_pos;
+
+    if (write_elements > margin) {
+      // Buffer wrap around when writing.
+      memcpy(self->data + self->write_pos * self->element_size,
+             data, margin * self->element_size);
+      self->write_pos = 0;
+      n -= margin;
+      self->rw_wrap = DIFF_WRAP;
+    }
+    memcpy(self->data + self->write_pos * self->element_size,
+           ((const char*) data) + ((write_elements - n) * self->element_size),
+           n * self->element_size);
+    self->write_pos += n;
+
+    return write_elements;
+  }
+}
+
+int WebRtc_MoveReadPtr(RingBuffer* self, int element_count) {
+  if (!self) {
+    return 0;
+  }
+
+  {
+    // We need to be able to take care of negative changes, hence use "int"
+    // instead of "size_t".
+    const int free_elements = (int) WebRtc_available_write(self);
+    const int readable_elements = (int) WebRtc_available_read(self);
+    int read_pos = (int) self->read_pos;
+
+    if (element_count > readable_elements) {
+      element_count = readable_elements;
+    }
+    if (element_count < -free_elements) {
+      element_count = -free_elements;
+    }
+
+    read_pos += element_count;
+    if (read_pos > (int) self->element_count) {
+      // Buffer wrap around. Restart read position and wrap indicator.
+      read_pos -= (int) self->element_count;
+      self->rw_wrap = SAME_WRAP;
+    }
+    if (read_pos < 0) {
+      // Buffer wrap around. Restart read position and wrap indicator.
+      read_pos += (int) self->element_count;
+      self->rw_wrap = DIFF_WRAP;
+    }
+
+    self->read_pos = (size_t) read_pos;
+
+    return element_count;
+  }
+}
+
+size_t WebRtc_available_read(const RingBuffer* self) {
+  if (!self) {
+    return 0;
+  }
+
+  if (self->rw_wrap == SAME_WRAP) {
+    return self->write_pos - self->read_pos;
+  } else {
+    return self->element_count - self->read_pos + self->write_pos;
+  }
+}
+
+size_t WebRtc_available_write(const RingBuffer* self) {
+  if (!self) {
+    return 0;
+  }
+
+  return self->element_count - WebRtc_available_read(self);
+}
diff --git a/common_audio/ring_buffer.h b/common_audio/ring_buffer.h
new file mode 100644
index 0000000..aa2ac27
--- /dev/null
+++ b/common_audio/ring_buffer.h
@@ -0,0 +1,76 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// A ring buffer to hold arbitrary data. Provides no thread safety. Unless
+// otherwise specified, functions return 0 on success and -1 on error.
+
+#ifndef COMMON_AUDIO_RING_BUFFER_H_
+#define COMMON_AUDIO_RING_BUFFER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>  // size_t
+
+enum Wrap { SAME_WRAP, DIFF_WRAP };
+
+typedef struct RingBuffer {
+  size_t read_pos;
+  size_t write_pos;
+  size_t element_count;
+  size_t element_size;
+  enum Wrap rw_wrap;
+  char* data;
+} RingBuffer;
+
+// Creates and initializes the buffer. Returns null on failure.
+RingBuffer* WebRtc_CreateBuffer(size_t element_count, size_t element_size);
+void WebRtc_InitBuffer(RingBuffer* handle);
+void WebRtc_FreeBuffer(void* handle);
+
+// Reads data from the buffer. Returns the number of elements that were read.
+// The |data_ptr| will point to the address where the read data is located.
+// If no data can be read, |data_ptr| is set to |NULL|. If all data can be read
+// without buffer wrap around then |data_ptr| will point to the location in the
+// buffer. Otherwise, the data will be copied to |data| (memory allocation done
+// by the user) and |data_ptr| points to the address of |data|. |data_ptr| is
+// only guaranteed to be valid until the next call to WebRtc_WriteBuffer().
+//
+// To force a copying to |data|, pass a null |data_ptr|.
+//
+// Returns number of elements read.
+size_t WebRtc_ReadBuffer(RingBuffer* handle,
+                         void** data_ptr,
+                         void* data,
+                         size_t element_count);
+
+// Writes |data| to buffer and returns the number of elements written.
+size_t WebRtc_WriteBuffer(RingBuffer* handle, const void* data,
+                          size_t element_count);
+
+// Moves the buffer read position and returns the number of elements moved.
+// Positive |element_count| moves the read position towards the write position,
+// that is, flushing the buffer. Negative |element_count| moves the read
+// position away from the the write position, that is, stuffing the buffer.
+// Returns number of elements moved.
+int WebRtc_MoveReadPtr(RingBuffer* handle, int element_count);
+
+// Returns number of available elements to read.
+size_t WebRtc_available_read(const RingBuffer* handle);
+
+// Returns number of available elements for write.
+size_t WebRtc_available_write(const RingBuffer* handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // COMMON_AUDIO_RING_BUFFER_H_
diff --git a/common_audio/ring_buffer_unittest.cc b/common_audio/ring_buffer_unittest.cc
new file mode 100644
index 0000000..4bb1497
--- /dev/null
+++ b/common_audio/ring_buffer_unittest.cc
@@ -0,0 +1,151 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/ring_buffer.h"
+
+#include <stdlib.h>
+#include <time.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+struct FreeBufferDeleter {
+  inline void operator()(void* ptr) const {
+    WebRtc_FreeBuffer(ptr);
+  }
+};
+typedef std::unique_ptr<RingBuffer, FreeBufferDeleter> scoped_ring_buffer;
+
+static void AssertElementEq(int expected, int actual) {
+  ASSERT_EQ(expected, actual);
+}
+
+static int SetIncrementingData(int* data, int num_elements,
+                               int starting_value) {
+  for (int i = 0; i < num_elements; i++) {
+    data[i] = starting_value++;
+  }
+  return starting_value;
+}
+
+static int CheckIncrementingData(int* data, int num_elements,
+                                 int starting_value) {
+  for (int i = 0; i < num_elements; i++) {
+    AssertElementEq(starting_value++, data[i]);
+  }
+  return starting_value;
+}
+
+// We use ASSERTs in this test to avoid obscuring the seed in the case of a
+// failure.
+static void RandomStressTest(int** data_ptr) {
+  const int kNumTests = 10;
+  const int kNumOps = 1000;
+  const int kMaxBufferSize = 1000;
+
+  unsigned int seed = time(nullptr);
+  printf("seed=%u\n", seed);
+  srand(seed);
+  for (int i = 0; i < kNumTests; i++) {
+    // rand_r is not supported on many platforms, so rand is used.
+    const int buffer_size = std::max(rand() % kMaxBufferSize, 1);  // NOLINT
+    std::unique_ptr<int[]> write_data(new int[buffer_size]);
+    std::unique_ptr<int[]> read_data(new int[buffer_size]);
+    scoped_ring_buffer buffer(WebRtc_CreateBuffer(buffer_size, sizeof(int)));
+    ASSERT_TRUE(buffer.get() != nullptr);
+    WebRtc_InitBuffer(buffer.get());
+    int buffer_consumed = 0;
+    int write_element = 0;
+    int read_element = 0;
+    for (int j = 0; j < kNumOps; j++) {
+      const bool write = rand() % 2 == 0 ? true : false;  // NOLINT
+      const int num_elements = rand() % buffer_size;  // NOLINT
+      if (write) {
+        const int buffer_available = buffer_size - buffer_consumed;
+        ASSERT_EQ(static_cast<size_t>(buffer_available),
+                  WebRtc_available_write(buffer.get()));
+        const int expected_elements = std::min(num_elements, buffer_available);
+        write_element = SetIncrementingData(write_data.get(), expected_elements,
+                                     write_element);
+        ASSERT_EQ(static_cast<size_t>(expected_elements),
+                  WebRtc_WriteBuffer(buffer.get(), write_data.get(),
+                                     num_elements));
+        buffer_consumed = std::min(buffer_consumed + expected_elements,
+                                   buffer_size);
+      } else {
+        const int expected_elements = std::min(num_elements,
+                                               buffer_consumed);
+        ASSERT_EQ(static_cast<size_t>(buffer_consumed),
+                  WebRtc_available_read(buffer.get()));
+        ASSERT_EQ(static_cast<size_t>(expected_elements),
+                  WebRtc_ReadBuffer(buffer.get(),
+                                    reinterpret_cast<void**>(data_ptr),
+                                    read_data.get(),
+                                    num_elements));
+        int* check_ptr = read_data.get();
+        if (data_ptr) {
+          check_ptr = *data_ptr;
+        }
+        read_element = CheckIncrementingData(check_ptr, expected_elements,
+                                             read_element);
+        buffer_consumed = std::max(buffer_consumed - expected_elements, 0);
+      }
+    }
+  }
+}
+
+TEST(RingBufferTest, RandomStressTest) {
+  int* data_ptr = nullptr;
+  RandomStressTest(&data_ptr);
+}
+
+TEST(RingBufferTest, RandomStressTestWithNullPtr) {
+  RandomStressTest(nullptr);
+}
+
+TEST(RingBufferTest, PassingNulltoReadBufferForcesMemcpy) {
+  const size_t kDataSize = 2;
+  int write_data[kDataSize];
+  int read_data[kDataSize];
+  int* data_ptr;
+
+  scoped_ring_buffer buffer(WebRtc_CreateBuffer(kDataSize, sizeof(int)));
+  ASSERT_TRUE(buffer.get() != nullptr);
+  WebRtc_InitBuffer(buffer.get());
+
+  SetIncrementingData(write_data, kDataSize, 0);
+  EXPECT_EQ(kDataSize, WebRtc_WriteBuffer(buffer.get(), write_data, kDataSize));
+  SetIncrementingData(read_data, kDataSize, kDataSize);
+  EXPECT_EQ(kDataSize, WebRtc_ReadBuffer(buffer.get(),
+      reinterpret_cast<void**>(&data_ptr), read_data, kDataSize));
+  // Copying was not necessary, so |read_data| has not been updated.
+  CheckIncrementingData(data_ptr, kDataSize, 0);
+  CheckIncrementingData(read_data, kDataSize, kDataSize);
+
+  EXPECT_EQ(kDataSize, WebRtc_WriteBuffer(buffer.get(), write_data, kDataSize));
+  EXPECT_EQ(kDataSize,
+            WebRtc_ReadBuffer(buffer.get(), nullptr, read_data, kDataSize));
+  // Passing null forces a memcpy, so |read_data| is now updated.
+  CheckIncrementingData(read_data, kDataSize, 0);
+}
+
+TEST(RingBufferTest, CreateHandlesErrors) {
+  EXPECT_TRUE(WebRtc_CreateBuffer(0, 1) == nullptr);
+  EXPECT_TRUE(WebRtc_CreateBuffer(1, 0) == nullptr);
+  RingBuffer* buffer = WebRtc_CreateBuffer(1, 1);
+  EXPECT_TRUE(buffer != nullptr);
+  WebRtc_FreeBuffer(buffer);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/signal_processing/auto_corr_to_refl_coef.c b/common_audio/signal_processing/auto_corr_to_refl_coef.c
new file mode 100644
index 0000000..a3ec24f
--- /dev/null
+++ b/common_audio/signal_processing/auto_corr_to_refl_coef.c
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_AutoCorrToReflCoef().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_AutoCorrToReflCoef(const int32_t *R, int use_order, int16_t *K)
+{
+    int i, n;
+    int16_t tmp;
+    const int32_t *rptr;
+    int32_t L_num, L_den;
+    int16_t *acfptr, *pptr, *wptr, *p1ptr, *w1ptr, ACF[WEBRTC_SPL_MAX_LPC_ORDER],
+            P[WEBRTC_SPL_MAX_LPC_ORDER], W[WEBRTC_SPL_MAX_LPC_ORDER];
+
+    // Initialize loop and pointers.
+    acfptr = ACF;
+    rptr = R;
+    pptr = P;
+    p1ptr = &P[1];
+    w1ptr = &W[1];
+    wptr = w1ptr;
+
+    // First loop; n=0. Determine shifting.
+    tmp = WebRtcSpl_NormW32(*R);
+    *acfptr = (int16_t)((*rptr++ << tmp) >> 16);
+    *pptr++ = *acfptr++;
+
+    // Initialize ACF, P and W.
+    for (i = 1; i <= use_order; i++)
+    {
+        *acfptr = (int16_t)((*rptr++ << tmp) >> 16);
+        *wptr++ = *acfptr;
+        *pptr++ = *acfptr++;
+    }
+
+    // Compute reflection coefficients.
+    for (n = 1; n <= use_order; n++, K++)
+    {
+        tmp = WEBRTC_SPL_ABS_W16(*p1ptr);
+        if (*P < tmp)
+        {
+            for (i = n; i <= use_order; i++)
+                *K++ = 0;
+
+            return;
+        }
+
+        // Division: WebRtcSpl_div(tmp, *P)
+        *K = 0;
+        if (tmp != 0)
+        {
+            L_num = tmp;
+            L_den = *P;
+            i = 15;
+            while (i--)
+            {
+                (*K) <<= 1;
+                L_num <<= 1;
+                if (L_num >= L_den)
+                {
+                    L_num -= L_den;
+                    (*K)++;
+                }
+            }
+            if (*p1ptr > 0)
+                *K = -*K;
+        }
+
+        // Last iteration; don't do Schur recursion.
+        if (n == use_order)
+            return;
+
+        // Schur recursion.
+        pptr = P;
+        wptr = w1ptr;
+        tmp = (int16_t)(((int32_t)*p1ptr * (int32_t)*K + 16384) >> 15);
+        *pptr = WebRtcSpl_AddSatW16(*pptr, tmp);
+        pptr++;
+        for (i = 1; i <= use_order - n; i++)
+        {
+            tmp = (int16_t)(((int32_t)*wptr * (int32_t)*K + 16384) >> 15);
+            *pptr = WebRtcSpl_AddSatW16(*(pptr + 1), tmp);
+            pptr++;
+            tmp = (int16_t)(((int32_t)*pptr * (int32_t)*K + 16384) >> 15);
+            *wptr = WebRtcSpl_AddSatW16(*wptr, tmp);
+            wptr++;
+        }
+    }
+}
diff --git a/common_audio/signal_processing/auto_correlation.c b/common_audio/signal_processing/auto_correlation.c
new file mode 100644
index 0000000..1455820
--- /dev/null
+++ b/common_audio/signal_processing/auto_correlation.c
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#include "rtc_base/checks.h"
+
+size_t WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
+                                 size_t in_vector_length,
+                                 size_t order,
+                                 int32_t* result,
+                                 int* scale) {
+  int32_t sum = 0;
+  size_t i = 0, j = 0;
+  int16_t smax = 0;
+  int scaling = 0;
+
+  RTC_DCHECK_LE(order, in_vector_length);
+
+  // Find the maximum absolute value of the samples.
+  smax = WebRtcSpl_MaxAbsValueW16(in_vector, in_vector_length);
+
+  // In order to avoid overflow when computing the sum we should scale the
+  // samples so that (in_vector_length * smax * smax) will not overflow.
+  if (smax == 0) {
+    scaling = 0;
+  } else {
+    // Number of bits in the sum loop.
+    int nbits = WebRtcSpl_GetSizeInBits((uint32_t)in_vector_length);
+    // Number of bits to normalize smax.
+    int t = WebRtcSpl_NormW32(WEBRTC_SPL_MUL(smax, smax));
+
+    if (t > nbits) {
+      scaling = 0;
+    } else {
+      scaling = nbits - t;
+    }
+  }
+
+  // Perform the actual correlation calculation.
+  for (i = 0; i < order + 1; i++) {
+    sum = 0;
+    /* Unroll the loop to improve performance. */
+    for (j = 0; i + j + 3 < in_vector_length; j += 4) {
+      sum += (in_vector[j + 0] * in_vector[i + j + 0]) >> scaling;
+      sum += (in_vector[j + 1] * in_vector[i + j + 1]) >> scaling;
+      sum += (in_vector[j + 2] * in_vector[i + j + 2]) >> scaling;
+      sum += (in_vector[j + 3] * in_vector[i + j + 3]) >> scaling;
+    }
+    for (; j < in_vector_length - i; j++) {
+      sum += (in_vector[j] * in_vector[i + j]) >> scaling;
+    }
+    *result++ = sum;
+  }
+
+  *scale = scaling;
+  return order + 1;
+}
diff --git a/common_audio/signal_processing/complex_bit_reverse.c b/common_audio/signal_processing/complex_bit_reverse.c
new file mode 100644
index 0000000..1c82cff
--- /dev/null
+++ b/common_audio/signal_processing/complex_bit_reverse.c
@@ -0,0 +1,108 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+/* Tables for data buffer indexes that are bit reversed and thus need to be
+ * swapped. Note that, index_7[{0, 2, 4, ...}] are for the left side of the swap
+ * operations, while index_7[{1, 3, 5, ...}] are for the right side of the
+ * operation. Same for index_8.
+ */
+
+/* Indexes for the case of stages == 7. */
+static const int16_t index_7[112] = {
+  1, 64, 2, 32, 3, 96, 4, 16, 5, 80, 6, 48, 7, 112, 9, 72, 10, 40, 11, 104,
+  12, 24, 13, 88, 14, 56, 15, 120, 17, 68, 18, 36, 19, 100, 21, 84, 22, 52,
+  23, 116, 25, 76, 26, 44, 27, 108, 29, 92, 30, 60, 31, 124, 33, 66, 35, 98,
+  37, 82, 38, 50, 39, 114, 41, 74, 43, 106, 45, 90, 46, 58, 47, 122, 49, 70,
+  51, 102, 53, 86, 55, 118, 57, 78, 59, 110, 61, 94, 63, 126, 67, 97, 69,
+  81, 71, 113, 75, 105, 77, 89, 79, 121, 83, 101, 87, 117, 91, 109, 95, 125,
+  103, 115, 111, 123
+};
+
+/* Indexes for the case of stages == 8. */
+static const int16_t index_8[240] = {
+  1, 128, 2, 64, 3, 192, 4, 32, 5, 160, 6, 96, 7, 224, 8, 16, 9, 144, 10, 80,
+  11, 208, 12, 48, 13, 176, 14, 112, 15, 240, 17, 136, 18, 72, 19, 200, 20,
+  40, 21, 168, 22, 104, 23, 232, 25, 152, 26, 88, 27, 216, 28, 56, 29, 184,
+  30, 120, 31, 248, 33, 132, 34, 68, 35, 196, 37, 164, 38, 100, 39, 228, 41,
+  148, 42, 84, 43, 212, 44, 52, 45, 180, 46, 116, 47, 244, 49, 140, 50, 76,
+  51, 204, 53, 172, 54, 108, 55, 236, 57, 156, 58, 92, 59, 220, 61, 188, 62,
+  124, 63, 252, 65, 130, 67, 194, 69, 162, 70, 98, 71, 226, 73, 146, 74, 82,
+  75, 210, 77, 178, 78, 114, 79, 242, 81, 138, 83, 202, 85, 170, 86, 106, 87,
+  234, 89, 154, 91, 218, 93, 186, 94, 122, 95, 250, 97, 134, 99, 198, 101,
+  166, 103, 230, 105, 150, 107, 214, 109, 182, 110, 118, 111, 246, 113, 142,
+  115, 206, 117, 174, 119, 238, 121, 158, 123, 222, 125, 190, 127, 254, 131,
+  193, 133, 161, 135, 225, 137, 145, 139, 209, 141, 177, 143, 241, 147, 201,
+  149, 169, 151, 233, 155, 217, 157, 185, 159, 249, 163, 197, 167, 229, 171,
+  213, 173, 181, 175, 245, 179, 205, 183, 237, 187, 221, 191, 253, 199, 227,
+  203, 211, 207, 243, 215, 235, 223, 251, 239, 247
+};
+
+void WebRtcSpl_ComplexBitReverse(int16_t* __restrict complex_data, int stages) {
+  /* For any specific value of stages, we know exactly the indexes that are
+   * bit reversed. Currently (Feb. 2012) in WebRTC the only possible values of
+   * stages are 7 and 8, so we use tables to save unnecessary iterations and
+   * calculations for these two cases.
+   */
+  if (stages == 7 || stages == 8) {
+    int m = 0;
+    int length = 112;
+    const int16_t* index = index_7;
+
+    if (stages == 8) {
+      length = 240;
+      index = index_8;
+    }
+
+    /* Decimation in time. Swap the elements with bit-reversed indexes. */
+    for (m = 0; m < length; m += 2) {
+      /* We declare a int32_t* type pointer, to load both the 16-bit real
+       * and imaginary elements from complex_data in one instruction, reducing
+       * complexity.
+       */
+      int32_t* complex_data_ptr = (int32_t*)complex_data;
+      int32_t temp = 0;
+
+      temp = complex_data_ptr[index[m]];  /* Real and imaginary */
+      complex_data_ptr[index[m]] = complex_data_ptr[index[m + 1]];
+      complex_data_ptr[index[m + 1]] = temp;
+    }
+  }
+  else {
+    int m = 0, mr = 0, l = 0;
+    int n = 1 << stages;
+    int nn = n - 1;
+
+    /* Decimation in time - re-order data */
+    for (m = 1; m <= nn; ++m) {
+      int32_t* complex_data_ptr = (int32_t*)complex_data;
+      int32_t temp = 0;
+
+      /* Find out indexes that are bit-reversed. */
+      l = n;
+      do {
+        l >>= 1;
+      } while (l > nn - mr);
+      mr = (mr & (l - 1)) + l;
+
+      if (mr <= m) {
+        continue;
+      }
+
+      /* Swap the elements with bit-reversed indexes.
+       * This is similar to the loop in the stages == 7 or 8 cases.
+       */
+      temp = complex_data_ptr[m];  /* Real and imaginary */
+      complex_data_ptr[m] = complex_data_ptr[mr];
+      complex_data_ptr[mr] = temp;
+    }
+  }
+}
diff --git a/common_audio/signal_processing/complex_bit_reverse_arm.S b/common_audio/signal_processing/complex_bit_reverse_arm.S
new file mode 100644
index 0000000..c70349a
--- /dev/null
+++ b/common_audio/signal_processing/complex_bit_reverse_arm.S
@@ -0,0 +1,119 @@
+@
+@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS.  All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+
+@ This file contains the function WebRtcSpl_ComplexBitReverse(), optimized
+@ for ARMv5 platforms.
+@ Reference C code is in file complex_bit_reverse.c. Bit-exact.
+
+#include "system_wrappers/include/asm_defines.h"
+
+GLOBAL_FUNCTION WebRtcSpl_ComplexBitReverse
+.align  2
+DEFINE_FUNCTION WebRtcSpl_ComplexBitReverse
+  push {r4-r7}
+
+  cmp r1, #7
+  adr r3, index_7                 @ Table pointer.
+  mov r4, #112                    @ Number of interations.
+  beq PRE_LOOP_STAGES_7_OR_8
+
+  cmp r1, #8
+  adr r3, index_8                 @ Table pointer.
+  mov r4, #240                    @ Number of interations.
+  beq PRE_LOOP_STAGES_7_OR_8
+
+  mov r3, #1                      @ Initialize m.
+  mov r1, r3, asl r1              @ n = 1 << stages;
+  subs r6, r1, #1                 @ nn = n - 1;
+  ble END
+
+  mov r5, r0                      @ &complex_data
+  mov r4, #0                      @ ml
+
+LOOP_GENERIC:
+  rsb r12, r4, r6                 @ l > nn - mr
+  mov r2, r1                      @ n
+
+LOOP_SHIFT:
+  asr r2, #1                      @ l >>= 1;
+  cmp r2, r12
+  bgt LOOP_SHIFT
+
+  sub r12, r2, #1
+  and r4, r12, r4
+  add r4, r2                      @ mr = (mr & (l - 1)) + l;
+  cmp r4, r3                      @ mr <= m ?
+  ble UPDATE_REGISTERS
+
+  mov r12, r4, asl #2
+  ldr r7, [r5, #4]                @ complex_data[2 * m, 2 * m + 1].
+                                  @   Offset 4 due to m incrementing from 1.
+  ldr r2, [r0, r12]               @ complex_data[2 * mr, 2 * mr + 1].
+  str r7, [r0, r12]
+  str r2, [r5, #4]
+
+UPDATE_REGISTERS:
+  add r3, r3, #1
+  add r5, #4
+  cmp r3, r1
+  bne LOOP_GENERIC
+
+  b END
+
+PRE_LOOP_STAGES_7_OR_8:
+  add r4, r3, r4, asl #1
+
+LOOP_STAGES_7_OR_8:
+  ldrsh r2, [r3], #2              @ index[m]
+  ldrsh r5, [r3], #2              @ index[m + 1]
+  ldr r1, [r0, r2]                @ complex_data[index[m], index[m] + 1]
+  ldr r12, [r0, r5]               @ complex_data[index[m + 1], index[m + 1] + 1]
+  cmp r3, r4
+  str r1, [r0, r5]
+  str r12, [r0, r2]
+  bne LOOP_STAGES_7_OR_8
+
+END:
+  pop {r4-r7}
+  bx lr
+
+@ The index tables. Note the values are doubles of the actual indexes for 16-bit
+@ elements, different from the generic C code. It actually provides byte offsets
+@ for the indexes.
+
+.align  2
+index_7:  @ Indexes for stages == 7.
+  .short 4, 256, 8, 128, 12, 384, 16, 64, 20, 320, 24, 192, 28, 448, 36, 288
+  .short 40, 160, 44, 416, 48, 96, 52, 352, 56, 224, 60, 480, 68, 272, 72, 144
+  .short 76, 400, 84, 336, 88, 208, 92, 464, 100, 304, 104, 176, 108, 432, 116
+  .short 368, 120, 240, 124, 496, 132, 264, 140, 392, 148, 328, 152, 200, 156
+  .short 456, 164, 296, 172, 424, 180, 360, 184, 232, 188, 488, 196, 280, 204
+  .short 408, 212, 344, 220, 472, 228, 312, 236, 440, 244, 376, 252, 504, 268
+  .short 388, 276, 324, 284, 452, 300, 420, 308, 356, 316, 484, 332, 404, 348
+  .short 468, 364, 436, 380, 500, 412, 460, 444, 492
+
+index_8:  @ Indexes for stages == 8.
+  .short 4, 512, 8, 256, 12, 768, 16, 128, 20, 640, 24, 384, 28, 896, 32, 64
+  .short 36, 576, 40, 320, 44, 832, 48, 192, 52, 704, 56, 448, 60, 960, 68, 544
+  .short 72, 288, 76, 800, 80, 160, 84, 672, 88, 416, 92, 928, 100, 608, 104
+  .short 352, 108, 864, 112, 224, 116, 736, 120, 480, 124, 992, 132, 528, 136
+  .short 272, 140, 784, 148, 656, 152, 400, 156, 912, 164, 592, 168, 336, 172
+  .short 848, 176, 208, 180, 720, 184, 464, 188, 976, 196, 560, 200, 304, 204
+  .short 816, 212, 688, 216, 432, 220, 944, 228, 624, 232, 368, 236, 880, 244
+  .short 752, 248, 496, 252, 1008, 260, 520, 268, 776, 276, 648, 280, 392, 284
+  .short 904, 292, 584, 296, 328, 300, 840, 308, 712, 312, 456, 316, 968, 324
+  .short 552, 332, 808, 340, 680, 344, 424, 348, 936, 356, 616, 364, 872, 372
+  .short 744, 376, 488, 380, 1000, 388, 536, 396, 792, 404, 664, 412, 920, 420
+  .short 600, 428, 856, 436, 728, 440, 472, 444, 984, 452, 568, 460, 824, 468
+  .short 696, 476, 952, 484, 632, 492, 888, 500, 760, 508, 1016, 524, 772, 532
+  .short 644, 540, 900, 548, 580, 556, 836, 564, 708, 572, 964, 588, 804, 596
+  .short 676, 604, 932, 620, 868, 628, 740, 636, 996, 652, 788, 668, 916, 684
+  .short 852, 692, 724, 700, 980, 716, 820, 732, 948, 748, 884, 764, 1012, 796
+  .short 908, 812, 844, 828, 972, 860, 940, 892, 1004, 956, 988
diff --git a/common_audio/signal_processing/complex_bit_reverse_mips.c b/common_audio/signal_processing/complex_bit_reverse_mips.c
new file mode 100644
index 0000000..9007b19
--- /dev/null
+++ b/common_audio/signal_processing/complex_bit_reverse_mips.c
@@ -0,0 +1,176 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+static int16_t coefTable_7[] = {
+    4, 256,   8, 128,  12, 384,  16,  64,
+   20, 320,  24, 192,  28, 448,  36, 288,
+   40, 160,  44, 416,  48,  96,  52, 352,
+   56, 224,  60, 480,  68, 272,  72, 144,
+   76, 400,  84, 336,  88, 208,  92, 464,
+  100, 304, 104, 176, 108, 432, 116, 368,
+  120, 240, 124, 496, 132, 264, 140, 392,
+  148, 328, 152, 200, 156, 456, 164, 296,
+  172, 424, 180, 360, 184, 232, 188, 488,
+  196, 280, 204, 408, 212, 344, 220, 472,
+  228, 312, 236, 440, 244, 376, 252, 504,
+  268, 388, 276, 324, 284, 452, 300, 420,
+  308, 356, 316, 484, 332, 404, 348, 468,
+  364, 436, 380, 500, 412, 460, 444, 492
+};
+
+static int16_t coefTable_8[] = {
+    4,  512,    8,  256,   12,  768,   16,  128,
+   20,  640,   24,  384,   28,  896,   32,   64,
+   36,  576,   40,  320,   44,  832,   48,  192,
+   52,  704,   56,  448,   60,  960,   68,  544,
+   72,  288,   76,  800,   80,  160,   84,  672,
+   88,  416,   92,  928,  100,  608,  104,  352,
+  108,  864,  112,  224,  116,  736,  120,  480,
+  124,  992,  132,  528,  136,  272,  140,  784,
+  148,  656,  152,  400,  156,  912,  164,  592,
+  168,  336,  172,  848,  176,  208,  180,  720,
+  184,  464,  188,  976,  196,  560,  200,  304,
+  204,  816,  212,  688,  216,  432,  220,  944,
+  228,  624,  232,  368,  236,  880,  244,  752,
+  248,  496,  252, 1008,  260,  520,  268,  776,
+  276,  648,  280,  392,  284,  904,  292,  584,
+  296,  328,  300,  840,  308,  712,  312,  456,
+  316,  968,  324,  552,  332,  808,  340,  680,
+  344,  424,  348,  936,  356,  616,  364,  872,
+  372,  744,  376,  488,  380, 1000,  388,  536,
+  396,  792,  404,  664,  412,  920,  420,  600,
+  428,  856,  436,  728,  440,  472,  444,  984,
+  452,  568,  460,  824,  468,  696,  476,  952,
+  484,  632,  492,  888,  500,  760,  508, 1016,
+  524,  772,  532,  644,  540,  900,  548,  580,
+  556,  836,  564,  708,  572,  964,  588,  804,
+  596,  676,  604,  932,  620,  868,  628,  740,
+  636,  996,  652,  788,  668,  916,  684,  852,
+  692,  724,  700,  980,  716,  820,  732,  948,
+  748,  884,  764, 1012,  796,  908,  812,  844,
+  828,  972,  860,  940,  892, 1004,  956,  988
+};
+
+void WebRtcSpl_ComplexBitReverse(int16_t frfi[], int stages) {
+  int l;
+  int16_t tr, ti;
+  int32_t tmp1, tmp2, tmp3, tmp4;
+  int32_t* ptr_i;
+  int32_t* ptr_j;
+
+  if (stages == 8) {
+    int16_t* pcoeftable_8 = coefTable_8;
+
+    __asm __volatile (
+      ".set         push                                             \n\t"
+      ".set         noreorder                                        \n\t"
+      "addiu        %[l],            $zero,               120        \n\t"
+     "1:                                                             \n\t"
+      "addiu        %[l],            %[l],                -4         \n\t"
+      "lh           %[tr],           0(%[pcoeftable_8])              \n\t"
+      "lh           %[ti],           2(%[pcoeftable_8])              \n\t"
+      "lh           %[tmp3],         4(%[pcoeftable_8])              \n\t"
+      "lh           %[tmp4],         6(%[pcoeftable_8])              \n\t"
+      "addu         %[ptr_i],        %[frfi],             %[tr]      \n\t"
+      "addu         %[ptr_j],        %[frfi],             %[ti]      \n\t"
+      "addu         %[tr],           %[frfi],             %[tmp3]    \n\t"
+      "addu         %[ti],           %[frfi],             %[tmp4]    \n\t"
+      "ulw          %[tmp1],         0(%[ptr_i])                     \n\t"
+      "ulw          %[tmp2],         0(%[ptr_j])                     \n\t"
+      "ulw          %[tmp3],         0(%[tr])                        \n\t"
+      "ulw          %[tmp4],         0(%[ti])                        \n\t"
+      "usw          %[tmp1],         0(%[ptr_j])                     \n\t"
+      "usw          %[tmp2],         0(%[ptr_i])                     \n\t"
+      "usw          %[tmp4],         0(%[tr])                        \n\t"
+      "usw          %[tmp3],         0(%[ti])                        \n\t"
+      "lh           %[tmp1],         8(%[pcoeftable_8])              \n\t"
+      "lh           %[tmp2],         10(%[pcoeftable_8])             \n\t"
+      "lh           %[tr],           12(%[pcoeftable_8])             \n\t"
+      "lh           %[ti],           14(%[pcoeftable_8])             \n\t"
+      "addu         %[ptr_i],        %[frfi],             %[tmp1]    \n\t"
+      "addu         %[ptr_j],        %[frfi],             %[tmp2]    \n\t"
+      "addu         %[tr],           %[frfi],             %[tr]      \n\t"
+      "addu         %[ti],           %[frfi],             %[ti]      \n\t"
+      "ulw          %[tmp1],         0(%[ptr_i])                     \n\t"
+      "ulw          %[tmp2],         0(%[ptr_j])                     \n\t"
+      "ulw          %[tmp3],         0(%[tr])                        \n\t"
+      "ulw          %[tmp4],         0(%[ti])                        \n\t"
+      "usw          %[tmp1],         0(%[ptr_j])                     \n\t"
+      "usw          %[tmp2],         0(%[ptr_i])                     \n\t"
+      "usw          %[tmp4],         0(%[tr])                        \n\t"
+      "usw          %[tmp3],         0(%[ti])                        \n\t"
+      "bgtz         %[l],            1b                              \n\t"
+      " addiu       %[pcoeftable_8], %[pcoeftable_8],     16         \n\t"
+      ".set         pop                                              \n\t"
+
+      : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [ptr_i] "=&r" (ptr_i),
+        [ptr_j] "=&r" (ptr_j), [tr] "=&r" (tr), [l] "=&r" (l),
+        [tmp3] "=&r" (tmp3), [pcoeftable_8] "+r" (pcoeftable_8),
+        [ti] "=&r" (ti), [tmp4] "=&r" (tmp4)
+      : [frfi] "r" (frfi)
+      : "memory"
+    );
+  } else if (stages == 7) {
+    int16_t* pcoeftable_7 = coefTable_7;
+
+    __asm __volatile (
+      ".set push                                                     \n\t"
+      ".set noreorder                                                \n\t"
+      "addiu        %[l],            $zero,               56         \n\t"
+     "1:                                                             \n\t"
+      "addiu        %[l],            %[l],                -4         \n\t"
+      "lh           %[tr],           0(%[pcoeftable_7])              \n\t"
+      "lh           %[ti],           2(%[pcoeftable_7])              \n\t"
+      "lh           %[tmp3],         4(%[pcoeftable_7])              \n\t"
+      "lh           %[tmp4],         6(%[pcoeftable_7])              \n\t"
+      "addu         %[ptr_i],        %[frfi],             %[tr]      \n\t"
+      "addu         %[ptr_j],        %[frfi],             %[ti]      \n\t"
+      "addu         %[tr],           %[frfi],             %[tmp3]    \n\t"
+      "addu         %[ti],           %[frfi],             %[tmp4]    \n\t"
+      "ulw          %[tmp1],         0(%[ptr_i])                     \n\t"
+      "ulw          %[tmp2],         0(%[ptr_j])                     \n\t"
+      "ulw          %[tmp3],         0(%[tr])                        \n\t"
+      "ulw          %[tmp4],         0(%[ti])                        \n\t"
+      "usw          %[tmp1],         0(%[ptr_j])                     \n\t"
+      "usw          %[tmp2],         0(%[ptr_i])                     \n\t"
+      "usw          %[tmp4],         0(%[tr])                        \n\t"
+      "usw          %[tmp3],         0(%[ti])                        \n\t"
+      "lh           %[tmp1],         8(%[pcoeftable_7])              \n\t"
+      "lh           %[tmp2],         10(%[pcoeftable_7])             \n\t"
+      "lh           %[tr],           12(%[pcoeftable_7])             \n\t"
+      "lh           %[ti],           14(%[pcoeftable_7])             \n\t"
+      "addu         %[ptr_i],        %[frfi],             %[tmp1]    \n\t"
+      "addu         %[ptr_j],        %[frfi],             %[tmp2]    \n\t"
+      "addu         %[tr],           %[frfi],             %[tr]      \n\t"
+      "addu         %[ti],           %[frfi],             %[ti]      \n\t"
+      "ulw          %[tmp1],         0(%[ptr_i])                     \n\t"
+      "ulw          %[tmp2],         0(%[ptr_j])                     \n\t"
+      "ulw          %[tmp3],         0(%[tr])                        \n\t"
+      "ulw          %[tmp4],         0(%[ti])                        \n\t"
+      "usw          %[tmp1],         0(%[ptr_j])                     \n\t"
+      "usw          %[tmp2],         0(%[ptr_i])                     \n\t"
+      "usw          %[tmp4],         0(%[tr])                        \n\t"
+      "usw          %[tmp3],         0(%[ti])                        \n\t"
+      "bgtz         %[l],            1b                              \n\t"
+      " addiu       %[pcoeftable_7], %[pcoeftable_7],     16         \n\t"
+      ".set pop                                                      \n\t"
+
+      : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [ptr_i] "=&r" (ptr_i),
+        [ptr_j] "=&r" (ptr_j), [ti] "=&r" (ti), [tr] "=&r" (tr),
+        [l] "=&r" (l), [pcoeftable_7] "+r" (pcoeftable_7),
+        [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4)
+      : [frfi] "r" (frfi)
+      : "memory"
+    );
+  }
+}
diff --git a/common_audio/signal_processing/complex_fft.c b/common_audio/signal_processing/complex_fft.c
new file mode 100644
index 0000000..36689b3
--- /dev/null
+++ b/common_audio/signal_processing/complex_fft.c
@@ -0,0 +1,298 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_ComplexFFT().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/complex_fft_tables.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#define CFFTSFT 14
+#define CFFTRND 1
+#define CFFTRND2 16384
+
+#define CIFFTSFT 14
+#define CIFFTRND 1
+
+
+int WebRtcSpl_ComplexFFT(int16_t frfi[], int stages, int mode)
+{
+    int i, j, l, k, istep, n, m;
+    int16_t wr, wi;
+    int32_t tr32, ti32, qr32, qi32;
+
+    /* The 1024-value is a constant given from the size of kSinTable1024[],
+     * and should not be changed depending on the input parameter 'stages'
+     */
+    n = 1 << stages;
+    if (n > 1024)
+        return -1;
+
+    l = 1;
+    k = 10 - 1; /* Constant for given kSinTable1024[]. Do not change
+         depending on the input parameter 'stages' */
+
+    if (mode == 0)
+    {
+        // mode==0: Low-complexity and Low-accuracy mode
+        while (l < n)
+        {
+            istep = l << 1;
+
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = -kSinTable1024[j];
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+                    tr32 = (wr * frfi[2 * j] - wi * frfi[2 * j + 1]) >> 15;
+
+                    ti32 = (wr * frfi[2 * j + 1] + wi * frfi[2 * j]) >> 15;
+
+                    qr32 = (int32_t)frfi[2 * i];
+                    qi32 = (int32_t)frfi[2 * i + 1];
+                    frfi[2 * j] = (int16_t)((qr32 - tr32) >> 1);
+                    frfi[2 * j + 1] = (int16_t)((qi32 - ti32) >> 1);
+                    frfi[2 * i] = (int16_t)((qr32 + tr32) >> 1);
+                    frfi[2 * i + 1] = (int16_t)((qi32 + ti32) >> 1);
+                }
+            }
+
+            --k;
+            l = istep;
+
+        }
+
+    } else
+    {
+        // mode==1: High-complexity and High-accuracy mode
+        while (l < n)
+        {
+            istep = l << 1;
+
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = -kSinTable1024[j];
+
+#ifdef WEBRTC_ARCH_ARM_V7
+                int32_t wri = 0;
+                __asm __volatile("pkhbt %0, %1, %2, lsl #16" : "=r"(wri) :
+                    "r"((int32_t)wr), "r"((int32_t)wi));
+#endif
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+#ifdef WEBRTC_ARCH_ARM_V7
+                    register int32_t frfi_r;
+                    __asm __volatile(
+                        "pkhbt %[frfi_r], %[frfi_even], %[frfi_odd],"
+                        " lsl #16\n\t"
+                        "smlsd %[tr32], %[wri], %[frfi_r], %[cfftrnd]\n\t"
+                        "smladx %[ti32], %[wri], %[frfi_r], %[cfftrnd]\n\t"
+                        :[frfi_r]"=&r"(frfi_r),
+                         [tr32]"=&r"(tr32),
+                         [ti32]"=r"(ti32)
+                        :[frfi_even]"r"((int32_t)frfi[2*j]),
+                         [frfi_odd]"r"((int32_t)frfi[2*j +1]),
+                         [wri]"r"(wri),
+                         [cfftrnd]"r"(CFFTRND));
+#else
+                    tr32 = wr * frfi[2 * j] - wi * frfi[2 * j + 1] + CFFTRND;
+
+                    ti32 = wr * frfi[2 * j + 1] + wi * frfi[2 * j] + CFFTRND;
+#endif
+
+                    tr32 >>= 15 - CFFTSFT;
+                    ti32 >>= 15 - CFFTSFT;
+
+                    qr32 = ((int32_t)frfi[2 * i]) * (1 << CFFTSFT);
+                    qi32 = ((int32_t)frfi[2 * i + 1]) * (1 << CFFTSFT);
+
+                    frfi[2 * j] = (int16_t)(
+                        (qr32 - tr32 + CFFTRND2) >> (1 + CFFTSFT));
+                    frfi[2 * j + 1] = (int16_t)(
+                        (qi32 - ti32 + CFFTRND2) >> (1 + CFFTSFT));
+                    frfi[2 * i] = (int16_t)(
+                        (qr32 + tr32 + CFFTRND2) >> (1 + CFFTSFT));
+                    frfi[2 * i + 1] = (int16_t)(
+                        (qi32 + ti32 + CFFTRND2) >> (1 + CFFTSFT));
+                }
+            }
+
+            --k;
+            l = istep;
+        }
+    }
+    return 0;
+}
+
+int WebRtcSpl_ComplexIFFT(int16_t frfi[], int stages, int mode)
+{
+    size_t i, j, l, istep, n, m;
+    int k, scale, shift;
+    int16_t wr, wi;
+    int32_t tr32, ti32, qr32, qi32;
+    int32_t tmp32, round2;
+
+    /* The 1024-value is a constant given from the size of kSinTable1024[],
+     * and should not be changed depending on the input parameter 'stages'
+     */
+    n = 1 << stages;
+    if (n > 1024)
+        return -1;
+
+    scale = 0;
+
+    l = 1;
+    k = 10 - 1; /* Constant for given kSinTable1024[]. Do not change
+         depending on the input parameter 'stages' */
+
+    while (l < n)
+    {
+        // variable scaling, depending upon data
+        shift = 0;
+        round2 = 8192;
+
+        tmp32 = WebRtcSpl_MaxAbsValueW16(frfi, 2 * n);
+        if (tmp32 > 13573)
+        {
+            shift++;
+            scale++;
+            round2 <<= 1;
+        }
+        if (tmp32 > 27146)
+        {
+            shift++;
+            scale++;
+            round2 <<= 1;
+        }
+
+        istep = l << 1;
+
+        if (mode == 0)
+        {
+            // mode==0: Low-complexity and Low-accuracy mode
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = kSinTable1024[j];
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+                    tr32 = (wr * frfi[2 * j] - wi * frfi[2 * j + 1]) >> 15;
+
+                    ti32 = (wr * frfi[2 * j + 1] + wi * frfi[2 * j]) >> 15;
+
+                    qr32 = (int32_t)frfi[2 * i];
+                    qi32 = (int32_t)frfi[2 * i + 1];
+                    frfi[2 * j] = (int16_t)((qr32 - tr32) >> shift);
+                    frfi[2 * j + 1] = (int16_t)((qi32 - ti32) >> shift);
+                    frfi[2 * i] = (int16_t)((qr32 + tr32) >> shift);
+                    frfi[2 * i + 1] = (int16_t)((qi32 + ti32) >> shift);
+                }
+            }
+        } else
+        {
+            // mode==1: High-complexity and High-accuracy mode
+
+            for (m = 0; m < l; ++m)
+            {
+                j = m << k;
+
+                /* The 256-value is a constant given as 1/4 of the size of
+                 * kSinTable1024[], and should not be changed depending on the input
+                 * parameter 'stages'. It will result in 0 <= j < N_SINE_WAVE/2
+                 */
+                wr = kSinTable1024[j + 256];
+                wi = kSinTable1024[j];
+
+#ifdef WEBRTC_ARCH_ARM_V7
+                int32_t wri = 0;
+                __asm __volatile("pkhbt %0, %1, %2, lsl #16" : "=r"(wri) :
+                    "r"((int32_t)wr), "r"((int32_t)wi));
+#endif
+
+                for (i = m; i < n; i += istep)
+                {
+                    j = i + l;
+
+#ifdef WEBRTC_ARCH_ARM_V7
+                    register int32_t frfi_r;
+                    __asm __volatile(
+                      "pkhbt %[frfi_r], %[frfi_even], %[frfi_odd], lsl #16\n\t"
+                      "smlsd %[tr32], %[wri], %[frfi_r], %[cifftrnd]\n\t"
+                      "smladx %[ti32], %[wri], %[frfi_r], %[cifftrnd]\n\t"
+                      :[frfi_r]"=&r"(frfi_r),
+                       [tr32]"=&r"(tr32),
+                       [ti32]"=r"(ti32)
+                      :[frfi_even]"r"((int32_t)frfi[2*j]),
+                       [frfi_odd]"r"((int32_t)frfi[2*j +1]),
+                       [wri]"r"(wri),
+                       [cifftrnd]"r"(CIFFTRND)
+                    );
+#else
+
+                    tr32 = wr * frfi[2 * j] - wi * frfi[2 * j + 1] + CIFFTRND;
+
+                    ti32 = wr * frfi[2 * j + 1] + wi * frfi[2 * j] + CIFFTRND;
+#endif
+                    tr32 >>= 15 - CIFFTSFT;
+                    ti32 >>= 15 - CIFFTSFT;
+
+                    qr32 = ((int32_t)frfi[2 * i]) * (1 << CIFFTSFT);
+                    qi32 = ((int32_t)frfi[2 * i + 1]) * (1 << CIFFTSFT);
+
+                    frfi[2 * j] = (int16_t)(
+                        (qr32 - tr32 + round2) >> (shift + CIFFTSFT));
+                    frfi[2 * j + 1] = (int16_t)(
+                        (qi32 - ti32 + round2) >> (shift + CIFFTSFT));
+                    frfi[2 * i] = (int16_t)(
+                        (qr32 + tr32 + round2) >> (shift + CIFFTSFT));
+                    frfi[2 * i + 1] = (int16_t)(
+                        (qi32 + ti32 + round2) >> (shift + CIFFTSFT));
+                }
+            }
+
+        }
+        --k;
+        l = istep;
+    }
+    return scale;
+}
diff --git a/common_audio/signal_processing/complex_fft_mips.c b/common_audio/signal_processing/complex_fft_mips.c
new file mode 100644
index 0000000..27071f8
--- /dev/null
+++ b/common_audio/signal_processing/complex_fft_mips.c
@@ -0,0 +1,328 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "common_audio/signal_processing/complex_fft_tables.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#define CFFTSFT 14
+#define CFFTRND 1
+#define CFFTRND2 16384
+
+#define CIFFTSFT 14
+#define CIFFTRND 1
+
+int WebRtcSpl_ComplexFFT(int16_t frfi[], int stages, int mode) {
+  int i = 0;
+  int l = 0;
+  int k = 0;
+  int istep = 0;
+  int n = 0;
+  int m = 0;
+  int32_t wr = 0, wi = 0;
+  int32_t tmp1 = 0;
+  int32_t tmp2 = 0;
+  int32_t tmp3 = 0;
+  int32_t tmp4 = 0;
+  int32_t tmp5 = 0;
+  int32_t tmp6 = 0;
+  int32_t tmp = 0;
+  int16_t* ptr_j = NULL;
+  int16_t* ptr_i = NULL;
+
+  n = 1 << stages;
+  if (n > 1024) {
+    return -1;
+  }
+
+  __asm __volatile (
+    ".set push                                                         \n\t"
+    ".set noreorder                                                    \n\t"
+
+    "addiu      %[k],           $zero,            10                   \n\t"
+    "addiu      %[l],           $zero,            1                    \n\t"
+   "3:                                                                 \n\t"
+    "sll        %[istep],       %[l],             1                    \n\t"
+    "move       %[m],           $zero                                  \n\t"
+    "sll        %[tmp],         %[l],             2                    \n\t"
+    "move       %[i],           $zero                                  \n\t"
+   "2:                                                                 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "sllv       %[tmp3],        %[m],             %[k]                 \n\t"
+    "addiu      %[tmp2],        %[tmp3],          512                  \n\t"
+    "addiu      %[m],           %[m],             1                    \n\t"
+    "lhx        %[wi],          %[tmp3](%[kSinTable1024])              \n\t"
+    "lhx        %[wr],          %[tmp2](%[kSinTable1024])              \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "sllv       %[tmp3],        %[m],             %[k]                 \n\t"
+    "addu       %[ptr_j],       %[tmp3],          %[kSinTable1024]     \n\t"
+    "addiu      %[ptr_i],       %[ptr_j],         512                  \n\t"
+    "addiu      %[m],           %[m],             1                    \n\t"
+    "lh         %[wi],          0(%[ptr_j])                            \n\t"
+    "lh         %[wr],          0(%[ptr_i])                            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+   "1:                                                                 \n\t"
+    "sll        %[tmp1],        %[i],             2                    \n\t"
+    "addu       %[ptr_i],       %[frfi],          %[tmp1]              \n\t"
+    "addu       %[ptr_j],       %[ptr_i],         %[tmp]               \n\t"
+    "lh         %[tmp6],        0(%[ptr_i])                            \n\t"
+    "lh         %[tmp5],        2(%[ptr_i])                            \n\t"
+    "lh         %[tmp3],        0(%[ptr_j])                            \n\t"
+    "lh         %[tmp4],        2(%[ptr_j])                            \n\t"
+    "addu       %[i],           %[i],             %[istep]             \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "mult       %[wr],          %[tmp3]                                \n\t"
+    "madd       %[wi],          %[tmp4]                                \n\t"
+    "mult       $ac1,           %[wr],            %[tmp4]              \n\t"
+    "msub       $ac1,           %[wi],            %[tmp3]              \n\t"
+    "mflo       %[tmp1]                                                \n\t"
+    "mflo       %[tmp2],        $ac1                                   \n\t"
+    "sll        %[tmp6],        %[tmp6],          14                   \n\t"
+    "sll        %[tmp5],        %[tmp5],          14                   \n\t"
+    "shra_r.w   %[tmp1],        %[tmp1],          1                    \n\t"
+    "shra_r.w   %[tmp2],        %[tmp2],          1                    \n\t"
+    "subu       %[tmp4],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp1],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp6],        %[tmp5],          %[tmp2]              \n\t"
+    "subu       %[tmp5],        %[tmp5],          %[tmp2]              \n\t"
+    "shra_r.w   %[tmp1],        %[tmp1],          15                   \n\t"
+    "shra_r.w   %[tmp6],        %[tmp6],          15                   \n\t"
+    "shra_r.w   %[tmp4],        %[tmp4],          15                   \n\t"
+    "shra_r.w   %[tmp5],        %[tmp5],          15                   \n\t"
+#else  // #if defined(MIPS_DSP_R2_LE)
+    "mul        %[tmp2],        %[wr],            %[tmp4]              \n\t"
+    "mul        %[tmp1],        %[wr],            %[tmp3]              \n\t"
+    "mul        %[tmp4],        %[wi],            %[tmp4]              \n\t"
+    "mul        %[tmp3],        %[wi],            %[tmp3]              \n\t"
+    "sll        %[tmp6],        %[tmp6],          14                   \n\t"
+    "sll        %[tmp5],        %[tmp5],          14                   \n\t"
+    "addiu      %[tmp6],        %[tmp6],          16384                \n\t"
+    "addiu      %[tmp5],        %[tmp5],          16384                \n\t"
+    "addu       %[tmp1],        %[tmp1],          %[tmp4]              \n\t"
+    "subu       %[tmp2],        %[tmp2],          %[tmp3]              \n\t"
+    "addiu      %[tmp1],        %[tmp1],          1                    \n\t"
+    "addiu      %[tmp2],        %[tmp2],          1                    \n\t"
+    "sra        %[tmp1],        %[tmp1],          1                    \n\t"
+    "sra        %[tmp2],        %[tmp2],          1                    \n\t"
+    "subu       %[tmp4],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp1],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp6],        %[tmp5],          %[tmp2]              \n\t"
+    "subu       %[tmp5],        %[tmp5],          %[tmp2]              \n\t"
+    "sra        %[tmp4],        %[tmp4],          15                   \n\t"
+    "sra        %[tmp1],        %[tmp1],          15                   \n\t"
+    "sra        %[tmp6],        %[tmp6],          15                   \n\t"
+    "sra        %[tmp5],        %[tmp5],          15                   \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+    "sh         %[tmp1],        0(%[ptr_i])                            \n\t"
+    "sh         %[tmp6],        2(%[ptr_i])                            \n\t"
+    "sh         %[tmp4],        0(%[ptr_j])                            \n\t"
+    "blt        %[i],           %[n],             1b                   \n\t"
+    " sh        %[tmp5],        2(%[ptr_j])                            \n\t"
+    "blt        %[m],           %[l],             2b                   \n\t"
+    " addu      %[i],           $zero,            %[m]                 \n\t"
+    "move       %[l],           %[istep]                               \n\t"
+    "blt        %[l],           %[n],             3b                   \n\t"
+    " addiu     %[k],           %[k],             -1                   \n\t"
+
+    ".set pop                                                          \n\t"
+
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+      [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+      [ptr_i] "=&r" (ptr_i), [i] "=&r" (i), [wi] "=&r" (wi), [wr] "=&r" (wr),
+      [m] "=&r" (m), [istep] "=&r" (istep), [l] "=&r" (l), [k] "=&r" (k),
+      [ptr_j] "=&r" (ptr_j), [tmp] "=&r" (tmp)
+    : [n] "r" (n), [frfi] "r" (frfi), [kSinTable1024] "r" (kSinTable1024)
+    : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+    , "$ac1hi", "$ac1lo"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+  );
+
+  return 0;
+}
+
+int WebRtcSpl_ComplexIFFT(int16_t frfi[], int stages, int mode) {
+  int i = 0, l = 0, k = 0;
+  int istep = 0, n = 0, m = 0;
+  int scale = 0, shift = 0;
+  int32_t wr = 0, wi = 0;
+  int32_t tmp1 = 0, tmp2 = 0, tmp3 = 0, tmp4 = 0;
+  int32_t tmp5 = 0, tmp6 = 0, tmp = 0, tempMax = 0, round2 = 0;
+  int16_t* ptr_j = NULL;
+  int16_t* ptr_i = NULL;
+
+  n = 1 << stages;
+  if (n > 1024) {
+    return -1;
+  }
+
+  __asm __volatile (
+    ".set push                                                         \n\t"
+    ".set noreorder                                                    \n\t"
+
+    "addiu      %[k],           $zero,            10                   \n\t"
+    "addiu      %[l],           $zero,            1                    \n\t"
+    "move       %[scale],       $zero                                  \n\t"
+   "3:                                                                 \n\t"
+    "addiu      %[shift],       $zero,            14                   \n\t"
+    "addiu      %[round2],      $zero,            8192                 \n\t"
+    "move       %[ptr_i],       %[frfi]                                \n\t"
+    "move       %[tempMax],     $zero                                  \n\t"
+    "addu       %[i],           %[n],             %[n]                 \n\t"
+   "5:                                                                 \n\t"
+    "lh         %[tmp1],        0(%[ptr_i])                            \n\t"
+    "lh         %[tmp2],        2(%[ptr_i])                            \n\t"
+    "lh         %[tmp3],        4(%[ptr_i])                            \n\t"
+    "lh         %[tmp4],        6(%[ptr_i])                            \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "absq_s.w   %[tmp1],        %[tmp1]                                \n\t"
+    "absq_s.w   %[tmp2],        %[tmp2]                                \n\t"
+    "absq_s.w   %[tmp3],        %[tmp3]                                \n\t"
+    "absq_s.w   %[tmp4],        %[tmp4]                                \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "slt        %[tmp5],        %[tmp1],          $zero                \n\t"
+    "subu       %[tmp6],        $zero,            %[tmp1]              \n\t"
+    "movn       %[tmp1],        %[tmp6],          %[tmp5]              \n\t"
+    "slt        %[tmp5],        %[tmp2],          $zero                \n\t"
+    "subu       %[tmp6],        $zero,            %[tmp2]              \n\t"
+    "movn       %[tmp2],        %[tmp6],          %[tmp5]              \n\t"
+    "slt        %[tmp5],        %[tmp3],          $zero                \n\t"
+    "subu       %[tmp6],        $zero,            %[tmp3]              \n\t"
+    "movn       %[tmp3],        %[tmp6],          %[tmp5]              \n\t"
+    "slt        %[tmp5],        %[tmp4],          $zero                \n\t"
+    "subu       %[tmp6],        $zero,            %[tmp4]              \n\t"
+    "movn       %[tmp4],        %[tmp6],          %[tmp5]              \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "slt        %[tmp5],        %[tempMax],       %[tmp1]              \n\t"
+    "movn       %[tempMax],     %[tmp1],          %[tmp5]              \n\t"
+    "addiu      %[i],           %[i],             -4                   \n\t"
+    "slt        %[tmp5],        %[tempMax],       %[tmp2]              \n\t"
+    "movn       %[tempMax],     %[tmp2],          %[tmp5]              \n\t"
+    "slt        %[tmp5],        %[tempMax],       %[tmp3]              \n\t"
+    "movn       %[tempMax],     %[tmp3],          %[tmp5]              \n\t"
+    "slt        %[tmp5],        %[tempMax],       %[tmp4]              \n\t"
+    "movn       %[tempMax],     %[tmp4],          %[tmp5]              \n\t"
+    "bgtz       %[i],                             5b                   \n\t"
+    " addiu     %[ptr_i],       %[ptr_i],         8                    \n\t"
+    "addiu      %[tmp1],        $zero,            13573                \n\t"
+    "addiu      %[tmp2],        $zero,            27146                \n\t"
+#if !defined(MIPS32_R2_LE)
+    "sll        %[tempMax],     %[tempMax],       16                   \n\t"
+    "sra        %[tempMax],     %[tempMax],       16                   \n\t"
+#else  // #if !defined(MIPS32_R2_LE)
+    "seh        %[tempMax]                                             \n\t"
+#endif  // #if !defined(MIPS32_R2_LE)
+    "slt        %[tmp1],        %[tmp1],          %[tempMax]           \n\t"
+    "slt        %[tmp2],        %[tmp2],          %[tempMax]           \n\t"
+    "addu       %[tmp1],        %[tmp1],          %[tmp2]              \n\t"
+    "addu       %[shift],       %[shift],         %[tmp1]              \n\t"
+    "addu       %[scale],       %[scale],         %[tmp1]              \n\t"
+    "sllv       %[round2],      %[round2],        %[tmp1]              \n\t"
+    "sll        %[istep],       %[l],             1                    \n\t"
+    "move       %[m],           $zero                                  \n\t"
+    "sll        %[tmp],         %[l],             2                    \n\t"
+   "2:                                                                 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "sllv       %[tmp3],        %[m],             %[k]                 \n\t"
+    "addiu      %[tmp2],        %[tmp3],          512                  \n\t"
+    "addiu      %[m],           %[m],             1                    \n\t"
+    "lhx        %[wi],          %[tmp3](%[kSinTable1024])              \n\t"
+    "lhx        %[wr],          %[tmp2](%[kSinTable1024])              \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "sllv       %[tmp3],        %[m],             %[k]                 \n\t"
+    "addu       %[ptr_j],       %[tmp3],          %[kSinTable1024]     \n\t"
+    "addiu      %[ptr_i],       %[ptr_j],         512                  \n\t"
+    "addiu      %[m],           %[m],             1                    \n\t"
+    "lh         %[wi],          0(%[ptr_j])                            \n\t"
+    "lh         %[wr],          0(%[ptr_i])                            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+   "1:                                                                 \n\t"
+    "sll        %[tmp1],        %[i],             2                    \n\t"
+    "addu       %[ptr_i],       %[frfi],          %[tmp1]              \n\t"
+    "addu       %[ptr_j],       %[ptr_i],         %[tmp]               \n\t"
+    "lh         %[tmp3],        0(%[ptr_j])                            \n\t"
+    "lh         %[tmp4],        2(%[ptr_j])                            \n\t"
+    "lh         %[tmp6],        0(%[ptr_i])                            \n\t"
+    "lh         %[tmp5],        2(%[ptr_i])                            \n\t"
+    "addu       %[i],           %[i],             %[istep]             \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "mult       %[wr],          %[tmp3]                                \n\t"
+    "msub       %[wi],          %[tmp4]                                \n\t"
+    "mult       $ac1,           %[wr],            %[tmp4]              \n\t"
+    "madd       $ac1,           %[wi],            %[tmp3]              \n\t"
+    "mflo       %[tmp1]                                                \n\t"
+    "mflo       %[tmp2],        $ac1                                   \n\t"
+    "sll        %[tmp6],        %[tmp6],          14                   \n\t"
+    "sll        %[tmp5],        %[tmp5],          14                   \n\t"
+    "shra_r.w   %[tmp1],        %[tmp1],          1                    \n\t"
+    "shra_r.w   %[tmp2],        %[tmp2],          1                    \n\t"
+    "addu       %[tmp6],        %[tmp6],          %[round2]            \n\t"
+    "addu       %[tmp5],        %[tmp5],          %[round2]            \n\t"
+    "subu       %[tmp4],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp1],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp6],        %[tmp5],          %[tmp2]              \n\t"
+    "subu       %[tmp5],        %[tmp5],          %[tmp2]              \n\t"
+    "srav       %[tmp4],        %[tmp4],          %[shift]             \n\t"
+    "srav       %[tmp1],        %[tmp1],          %[shift]             \n\t"
+    "srav       %[tmp6],        %[tmp6],          %[shift]             \n\t"
+    "srav       %[tmp5],        %[tmp5],          %[shift]             \n\t"
+#else  // #if defined(MIPS_DSP_R2_LE)
+    "mul        %[tmp1],        %[wr],            %[tmp3]              \n\t"
+    "mul        %[tmp2],        %[wr],            %[tmp4]              \n\t"
+    "mul        %[tmp4],        %[wi],            %[tmp4]              \n\t"
+    "mul        %[tmp3],        %[wi],            %[tmp3]              \n\t"
+    "sll        %[tmp6],        %[tmp6],          14                   \n\t"
+    "sll        %[tmp5],        %[tmp5],          14                   \n\t"
+    "sub        %[tmp1],        %[tmp1],          %[tmp4]              \n\t"
+    "addu       %[tmp2],        %[tmp2],          %[tmp3]              \n\t"
+    "addiu      %[tmp1],        %[tmp1],          1                    \n\t"
+    "addiu      %[tmp2],        %[tmp2],          1                    \n\t"
+    "sra        %[tmp2],        %[tmp2],          1                    \n\t"
+    "sra        %[tmp1],        %[tmp1],          1                    \n\t"
+    "addu       %[tmp6],        %[tmp6],          %[round2]            \n\t"
+    "addu       %[tmp5],        %[tmp5],          %[round2]            \n\t"
+    "subu       %[tmp4],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp1],        %[tmp6],          %[tmp1]              \n\t"
+    "addu       %[tmp6],        %[tmp5],          %[tmp2]              \n\t"
+    "subu       %[tmp5],        %[tmp5],          %[tmp2]              \n\t"
+    "sra        %[tmp4],        %[tmp4],          %[shift]             \n\t"
+    "sra        %[tmp1],        %[tmp1],          %[shift]             \n\t"
+    "sra        %[tmp6],        %[tmp6],          %[shift]             \n\t"
+    "sra        %[tmp5],        %[tmp5],          %[shift]             \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+    "sh         %[tmp1],         0(%[ptr_i])                           \n\t"
+    "sh         %[tmp6],         2(%[ptr_i])                           \n\t"
+    "sh         %[tmp4],         0(%[ptr_j])                           \n\t"
+    "blt        %[i],            %[n],            1b                   \n\t"
+    " sh        %[tmp5],         2(%[ptr_j])                           \n\t"
+    "blt        %[m],            %[l],            2b                   \n\t"
+    " addu      %[i],            $zero,           %[m]                 \n\t"
+    "move       %[l],            %[istep]                              \n\t"
+    "blt        %[l],            %[n],            3b                   \n\t"
+    " addiu     %[k],            %[k],            -1                   \n\t"
+
+    ".set pop                                                          \n\t"
+
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+      [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+      [ptr_i] "=&r" (ptr_i), [i] "=&r" (i), [m] "=&r" (m), [tmp] "=&r" (tmp),
+      [istep] "=&r" (istep), [wi] "=&r" (wi), [wr] "=&r" (wr), [l] "=&r" (l),
+      [k] "=&r" (k), [round2] "=&r" (round2), [ptr_j] "=&r" (ptr_j),
+      [shift] "=&r" (shift), [scale] "=&r" (scale), [tempMax] "=&r" (tempMax)
+    : [n] "r" (n), [frfi] "r" (frfi), [kSinTable1024] "r" (kSinTable1024)
+    : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+    , "$ac1hi", "$ac1lo"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+  );
+
+  return scale;
+
+}
diff --git a/common_audio/signal_processing/complex_fft_tables.h b/common_audio/signal_processing/complex_fft_tables.h
new file mode 100644
index 0000000..6c3fcfd
--- /dev/null
+++ b/common_audio/signal_processing/complex_fft_tables.h
@@ -0,0 +1,148 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_COMPLEX_FFT_TABLES_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_COMPLEX_FFT_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+static const int16_t kSinTable1024[] = {
+       0,    201,    402,    603,    804,   1005,   1206,   1406,
+    1607,   1808,   2009,   2209,   2410,   2610,   2811,   3011,
+    3211,   3411,   3611,   3811,   4011,   4210,   4409,   4608,
+    4807,   5006,   5205,   5403,   5601,   5799,   5997,   6195,
+    6392,   6589,   6786,   6982,   7179,   7375,   7571,   7766,
+    7961,   8156,   8351,   8545,   8739,   8932,   9126,   9319,
+    9511,   9703,   9895,  10087,  10278,  10469,  10659,  10849,
+   11038,  11227,  11416,  11604,  11792,  11980,  12166,  12353,
+   12539,  12724,  12909,  13094,  13278,  13462,  13645,  13827,
+   14009,  14191,  14372,  14552,  14732,  14911,  15090,  15268,
+   15446,  15623,  15799,  15975,  16150,  16325,  16499,  16672,
+   16845,  17017,  17189,  17360,  17530,  17699,  17868,  18036,
+   18204,  18371,  18537,  18702,  18867,  19031,  19194,  19357,
+   19519,  19680,  19840,  20000,  20159,  20317,  20474,  20631,
+   20787,  20942,  21096,  21249,  21402,  21554,  21705,  21855,
+   22004,  22153,  22301,  22448,  22594,  22739,  22883,  23027,
+   23169,  23311,  23452,  23592,  23731,  23869,  24006,  24143,
+   24278,  24413,  24546,  24679,  24811,  24942,  25072,  25201,
+   25329,  25456,  25582,  25707,  25831,  25954,  26077,  26198,
+   26318,  26437,  26556,  26673,  26789,  26905,  27019,  27132,
+   27244,  27355,  27466,  27575,  27683,  27790,  27896,  28001,
+   28105,  28208,  28309,  28410,  28510,  28608,  28706,  28802,
+   28897,  28992,  29085,  29177,  29268,  29358,  29446,  29534,
+   29621,  29706,  29790,  29873,  29955,  30036,  30116,  30195,
+   30272,  30349,  30424,  30498,  30571,  30643,  30713,  30783,
+   30851,  30918,  30984,  31049,  31113,  31175,  31236,  31297,
+   31356,  31413,  31470,  31525,  31580,  31633,  31684,  31735,
+   31785,  31833,  31880,  31926,  31970,  32014,  32056,  32097,
+   32137,  32176,  32213,  32249,  32284,  32318,  32350,  32382,
+   32412,  32441,  32468,  32495,  32520,  32544,  32567,  32588,
+   32609,  32628,  32646,  32662,  32678,  32692,  32705,  32717,
+   32727,  32736,  32744,  32751,  32757,  32761,  32764,  32766,
+   32767,  32766,  32764,  32761,  32757,  32751,  32744,  32736,
+   32727,  32717,  32705,  32692,  32678,  32662,  32646,  32628,
+   32609,  32588,  32567,  32544,  32520,  32495,  32468,  32441,
+   32412,  32382,  32350,  32318,  32284,  32249,  32213,  32176,
+   32137,  32097,  32056,  32014,  31970,  31926,  31880,  31833,
+   31785,  31735,  31684,  31633,  31580,  31525,  31470,  31413,
+   31356,  31297,  31236,  31175,  31113,  31049,  30984,  30918,
+   30851,  30783,  30713,  30643,  30571,  30498,  30424,  30349,
+   30272,  30195,  30116,  30036,  29955,  29873,  29790,  29706,
+   29621,  29534,  29446,  29358,  29268,  29177,  29085,  28992,
+   28897,  28802,  28706,  28608,  28510,  28410,  28309,  28208,
+   28105,  28001,  27896,  27790,  27683,  27575,  27466,  27355,
+   27244,  27132,  27019,  26905,  26789,  26673,  26556,  26437,
+   26318,  26198,  26077,  25954,  25831,  25707,  25582,  25456,
+   25329,  25201,  25072,  24942,  24811,  24679,  24546,  24413,
+   24278,  24143,  24006,  23869,  23731,  23592,  23452,  23311,
+   23169,  23027,  22883,  22739,  22594,  22448,  22301,  22153,
+   22004,  21855,  21705,  21554,  21402,  21249,  21096,  20942,
+   20787,  20631,  20474,  20317,  20159,  20000,  19840,  19680,
+   19519,  19357,  19194,  19031,  18867,  18702,  18537,  18371,
+   18204,  18036,  17868,  17699,  17530,  17360,  17189,  17017,
+   16845,  16672,  16499,  16325,  16150,  15975,  15799,  15623,
+   15446,  15268,  15090,  14911,  14732,  14552,  14372,  14191,
+   14009,  13827,  13645,  13462,  13278,  13094,  12909,  12724,
+   12539,  12353,  12166,  11980,  11792,  11604,  11416,  11227,
+   11038,  10849,  10659,  10469,  10278,  10087,   9895,   9703,
+    9511,   9319,   9126,   8932,   8739,   8545,   8351,   8156,
+    7961,   7766,   7571,   7375,   7179,   6982,   6786,   6589,
+    6392,   6195,   5997,   5799,   5601,   5403,   5205,   5006,
+    4807,   4608,   4409,   4210,   4011,   3811,   3611,   3411,
+    3211,   3011,   2811,   2610,   2410,   2209,   2009,   1808,
+    1607,   1406,   1206,   1005,    804,    603,    402,    201,
+       0,   -201,   -402,   -603,   -804,  -1005,  -1206,  -1406,
+   -1607,  -1808,  -2009,  -2209,  -2410,  -2610,  -2811,  -3011,
+   -3211,  -3411,  -3611,  -3811,  -4011,  -4210,  -4409,  -4608,
+   -4807,  -5006,  -5205,  -5403,  -5601,  -5799,  -5997,  -6195,
+   -6392,  -6589,  -6786,  -6982,  -7179,  -7375,  -7571,  -7766,
+   -7961,  -8156,  -8351,  -8545,  -8739,  -8932,  -9126,  -9319,
+   -9511,  -9703,  -9895, -10087, -10278, -10469, -10659, -10849,
+  -11038, -11227, -11416, -11604, -11792, -11980, -12166, -12353,
+  -12539, -12724, -12909, -13094, -13278, -13462, -13645, -13827,
+  -14009, -14191, -14372, -14552, -14732, -14911, -15090, -15268,
+  -15446, -15623, -15799, -15975, -16150, -16325, -16499, -16672,
+  -16845, -17017, -17189, -17360, -17530, -17699, -17868, -18036,
+  -18204, -18371, -18537, -18702, -18867, -19031, -19194, -19357,
+  -19519, -19680, -19840, -20000, -20159, -20317, -20474, -20631,
+  -20787, -20942, -21096, -21249, -21402, -21554, -21705, -21855,
+  -22004, -22153, -22301, -22448, -22594, -22739, -22883, -23027,
+  -23169, -23311, -23452, -23592, -23731, -23869, -24006, -24143,
+  -24278, -24413, -24546, -24679, -24811, -24942, -25072, -25201,
+  -25329, -25456, -25582, -25707, -25831, -25954, -26077, -26198,
+  -26318, -26437, -26556, -26673, -26789, -26905, -27019, -27132,
+  -27244, -27355, -27466, -27575, -27683, -27790, -27896, -28001,
+  -28105, -28208, -28309, -28410, -28510, -28608, -28706, -28802,
+  -28897, -28992, -29085, -29177, -29268, -29358, -29446, -29534,
+  -29621, -29706, -29790, -29873, -29955, -30036, -30116, -30195,
+  -30272, -30349, -30424, -30498, -30571, -30643, -30713, -30783,
+  -30851, -30918, -30984, -31049, -31113, -31175, -31236, -31297,
+  -31356, -31413, -31470, -31525, -31580, -31633, -31684, -31735,
+  -31785, -31833, -31880, -31926, -31970, -32014, -32056, -32097,
+  -32137, -32176, -32213, -32249, -32284, -32318, -32350, -32382,
+  -32412, -32441, -32468, -32495, -32520, -32544, -32567, -32588,
+  -32609, -32628, -32646, -32662, -32678, -32692, -32705, -32717,
+  -32727, -32736, -32744, -32751, -32757, -32761, -32764, -32766,
+  -32767, -32766, -32764, -32761, -32757, -32751, -32744, -32736,
+  -32727, -32717, -32705, -32692, -32678, -32662, -32646, -32628,
+  -32609, -32588, -32567, -32544, -32520, -32495, -32468, -32441,
+  -32412, -32382, -32350, -32318, -32284, -32249, -32213, -32176,
+  -32137, -32097, -32056, -32014, -31970, -31926, -31880, -31833,
+  -31785, -31735, -31684, -31633, -31580, -31525, -31470, -31413,
+  -31356, -31297, -31236, -31175, -31113, -31049, -30984, -30918,
+  -30851, -30783, -30713, -30643, -30571, -30498, -30424, -30349,
+  -30272, -30195, -30116, -30036, -29955, -29873, -29790, -29706,
+  -29621, -29534, -29446, -29358, -29268, -29177, -29085, -28992,
+  -28897, -28802, -28706, -28608, -28510, -28410, -28309, -28208,
+  -28105, -28001, -27896, -27790, -27683, -27575, -27466, -27355,
+  -27244, -27132, -27019, -26905, -26789, -26673, -26556, -26437,
+  -26318, -26198, -26077, -25954, -25831, -25707, -25582, -25456,
+  -25329, -25201, -25072, -24942, -24811, -24679, -24546, -24413,
+  -24278, -24143, -24006, -23869, -23731, -23592, -23452, -23311,
+  -23169, -23027, -22883, -22739, -22594, -22448, -22301, -22153,
+  -22004, -21855, -21705, -21554, -21402, -21249, -21096, -20942,
+  -20787, -20631, -20474, -20317, -20159, -20000, -19840, -19680,
+  -19519, -19357, -19194, -19031, -18867, -18702, -18537, -18371,
+  -18204, -18036, -17868, -17699, -17530, -17360, -17189, -17017,
+  -16845, -16672, -16499, -16325, -16150, -15975, -15799, -15623,
+  -15446, -15268, -15090, -14911, -14732, -14552, -14372, -14191,
+  -14009, -13827, -13645, -13462, -13278, -13094, -12909, -12724,
+  -12539, -12353, -12166, -11980, -11792, -11604, -11416, -11227,
+  -11038, -10849, -10659, -10469, -10278, -10087,  -9895,  -9703,
+   -9511,  -9319,  -9126,  -8932,  -8739,  -8545,  -8351,  -8156,
+   -7961,  -7766,  -7571,  -7375,  -7179,  -6982,  -6786,  -6589,
+   -6392,  -6195,  -5997,  -5799,  -5601,  -5403,  -5205,  -5006,
+   -4807,  -4608,  -4409,  -4210,  -4011,  -3811,  -3611,  -3411,
+   -3211,  -3011,  -2811,  -2610,  -2410,  -2209,  -2009,  -1808,
+   -1607,  -1406,  -1206,  -1005,   -804,   -603,   -402,   -201
+};
+
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_COMPLEX_FFT_TABLES_H_
diff --git a/common_audio/signal_processing/copy_set_operations.c b/common_audio/signal_processing/copy_set_operations.c
new file mode 100644
index 0000000..ae709d4
--- /dev/null
+++ b/common_audio/signal_processing/copy_set_operations.c
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the implementation of functions
+ * WebRtcSpl_MemSetW16()
+ * WebRtcSpl_MemSetW32()
+ * WebRtcSpl_MemCpyReversedOrder()
+ * WebRtcSpl_CopyFromEndW16()
+ * WebRtcSpl_ZerosArrayW16()
+ * WebRtcSpl_ZerosArrayW32()
+ *
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include <string.h>
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+
+void WebRtcSpl_MemSetW16(int16_t *ptr, int16_t set_value, size_t length)
+{
+    size_t j;
+    int16_t *arrptr = ptr;
+
+    for (j = length; j > 0; j--)
+    {
+        *arrptr++ = set_value;
+    }
+}
+
+void WebRtcSpl_MemSetW32(int32_t *ptr, int32_t set_value, size_t length)
+{
+    size_t j;
+    int32_t *arrptr = ptr;
+
+    for (j = length; j > 0; j--)
+    {
+        *arrptr++ = set_value;
+    }
+}
+
+void WebRtcSpl_MemCpyReversedOrder(int16_t* dest,
+                                   int16_t* source,
+                                   size_t length)
+{
+    size_t j;
+    int16_t* destPtr = dest;
+    int16_t* sourcePtr = source;
+
+    for (j = 0; j < length; j++)
+    {
+        *destPtr-- = *sourcePtr++;
+    }
+}
+
+void WebRtcSpl_CopyFromEndW16(const int16_t *vector_in,
+                              size_t length,
+                              size_t samples,
+                              int16_t *vector_out)
+{
+    // Copy the last <samples> of the input vector to vector_out
+    WEBRTC_SPL_MEMCPY_W16(vector_out, &vector_in[length - samples], samples);
+}
+
+void WebRtcSpl_ZerosArrayW16(int16_t *vector, size_t length)
+{
+    WebRtcSpl_MemSetW16(vector, 0, length);
+}
+
+void WebRtcSpl_ZerosArrayW32(int32_t *vector, size_t length)
+{
+    WebRtcSpl_MemSetW32(vector, 0, length);
+}
diff --git a/common_audio/signal_processing/cross_correlation.c b/common_audio/signal_processing/cross_correlation.c
new file mode 100644
index 0000000..c6267c9
--- /dev/null
+++ b/common_audio/signal_processing/cross_correlation.c
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+/* C version of WebRtcSpl_CrossCorrelation() for generic platforms. */
+void WebRtcSpl_CrossCorrelationC(int32_t* cross_correlation,
+                                 const int16_t* seq1,
+                                 const int16_t* seq2,
+                                 size_t dim_seq,
+                                 size_t dim_cross_correlation,
+                                 int right_shifts,
+                                 int step_seq2) {
+  size_t i = 0, j = 0;
+
+  for (i = 0; i < dim_cross_correlation; i++) {
+    int32_t corr = 0;
+    for (j = 0; j < dim_seq; j++)
+      corr += (seq1[j] * seq2[j]) >> right_shifts;
+    seq2 += step_seq2;
+    *cross_correlation++ = corr;
+  }
+}
diff --git a/common_audio/signal_processing/cross_correlation_mips.c b/common_audio/signal_processing/cross_correlation_mips.c
new file mode 100644
index 0000000..c395101
--- /dev/null
+++ b/common_audio/signal_processing/cross_correlation_mips.c
@@ -0,0 +1,104 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_CrossCorrelation_mips(int32_t* cross_correlation,
+                                     const int16_t* seq1,
+                                     const int16_t* seq2,
+                                     size_t dim_seq,
+                                     size_t dim_cross_correlation,
+                                     int right_shifts,
+                                     int step_seq2) {
+
+  int32_t t0 = 0, t1 = 0, t2 = 0, t3 = 0, sum = 0;
+  int16_t *pseq2 = NULL;
+  int16_t *pseq1 = NULL;
+  int16_t *pseq1_0 = (int16_t*)&seq1[0];
+  int16_t *pseq2_0 = (int16_t*)&seq2[0];
+  int k = 0;
+
+  __asm __volatile (
+    ".set        push                                           \n\t"
+    ".set        noreorder                                      \n\t"
+    "sll         %[step_seq2], %[step_seq2],   1                \n\t"
+    "andi        %[t0],        %[dim_seq],     1                \n\t"
+    "bgtz        %[t0],        3f                               \n\t"
+    " nop                                                       \n\t"
+   "1:                                                          \n\t"
+    "move        %[pseq1],     %[pseq1_0]                       \n\t"
+    "move        %[pseq2],     %[pseq2_0]                       \n\t"
+    "sra         %[k],         %[dim_seq],     1                \n\t"
+    "addiu       %[dim_cc],    %[dim_cc],      -1               \n\t"
+    "xor         %[sum],       %[sum],         %[sum]           \n\t"
+   "2:                                                          \n\t"
+    "lh          %[t0],        0(%[pseq1])                      \n\t"
+    "lh          %[t1],        0(%[pseq2])                      \n\t"
+    "lh          %[t2],        2(%[pseq1])                      \n\t"
+    "lh          %[t3],        2(%[pseq2])                      \n\t"
+    "mul         %[t0],        %[t0],          %[t1]            \n\t"
+    "addiu       %[k],         %[k],           -1               \n\t"
+    "mul         %[t2],        %[t2],          %[t3]            \n\t"
+    "addiu       %[pseq1],     %[pseq1],       4                \n\t"
+    "addiu       %[pseq2],     %[pseq2],       4                \n\t"
+    "srav        %[t0],        %[t0],          %[right_shifts]  \n\t"
+    "addu        %[sum],       %[sum],         %[t0]            \n\t"
+    "srav        %[t2],        %[t2],          %[right_shifts]  \n\t"
+    "bgtz        %[k],         2b                               \n\t"
+    " addu       %[sum],       %[sum],         %[t2]            \n\t"
+    "addu        %[pseq2_0],   %[pseq2_0],     %[step_seq2]     \n\t"
+    "sw          %[sum],       0(%[cc])                         \n\t"
+    "bgtz        %[dim_cc],    1b                               \n\t"
+    " addiu      %[cc],        %[cc],          4                \n\t"
+    "b           6f                                             \n\t"
+    " nop                                                       \n\t"
+   "3:                                                          \n\t"
+    "move        %[pseq1],     %[pseq1_0]                       \n\t"
+    "move        %[pseq2],     %[pseq2_0]                       \n\t"
+    "sra         %[k],         %[dim_seq],     1                \n\t"
+    "addiu       %[dim_cc],    %[dim_cc],      -1               \n\t"
+    "beqz        %[k],         5f                               \n\t"
+    " xor        %[sum],       %[sum],         %[sum]           \n\t"
+   "4:                                                          \n\t"
+    "lh          %[t0],        0(%[pseq1])                      \n\t"
+    "lh          %[t1],        0(%[pseq2])                      \n\t"
+    "lh          %[t2],        2(%[pseq1])                      \n\t"
+    "lh          %[t3],        2(%[pseq2])                      \n\t"
+    "mul         %[t0],        %[t0],          %[t1]            \n\t"
+    "addiu       %[k],         %[k],           -1               \n\t"
+    "mul         %[t2],        %[t2],          %[t3]            \n\t"
+    "addiu       %[pseq1],     %[pseq1],       4                \n\t"
+    "addiu       %[pseq2],     %[pseq2],       4                \n\t"
+    "srav        %[t0],        %[t0],          %[right_shifts]  \n\t"
+    "addu        %[sum],       %[sum],         %[t0]            \n\t"
+    "srav        %[t2],        %[t2],          %[right_shifts]  \n\t"
+    "bgtz        %[k],         4b                               \n\t"
+    " addu       %[sum],       %[sum],         %[t2]            \n\t"
+   "5:                                                          \n\t"
+    "lh          %[t0],        0(%[pseq1])                      \n\t"
+    "lh          %[t1],        0(%[pseq2])                      \n\t"
+    "mul         %[t0],        %[t0],          %[t1]            \n\t"
+    "srav        %[t0],        %[t0],          %[right_shifts]  \n\t"
+    "addu        %[sum],       %[sum],         %[t0]            \n\t"
+    "addu        %[pseq2_0],   %[pseq2_0],     %[step_seq2]     \n\t"
+    "sw          %[sum],       0(%[cc])                         \n\t"
+    "bgtz        %[dim_cc],    3b                               \n\t"
+    " addiu      %[cc],        %[cc],          4                \n\t"
+   "6:                                                          \n\t"
+    ".set        pop                                            \n\t"
+    : [step_seq2] "+r" (step_seq2), [t0] "=&r" (t0), [t1] "=&r" (t1),
+      [t2] "=&r" (t2), [t3] "=&r" (t3), [pseq1] "=&r" (pseq1),
+      [pseq2] "=&r" (pseq2), [pseq1_0] "+r" (pseq1_0), [pseq2_0] "+r" (pseq2_0),
+      [k] "=&r" (k), [dim_cc] "+r" (dim_cross_correlation), [sum] "=&r" (sum),
+      [cc] "+r" (cross_correlation)
+    : [dim_seq] "r" (dim_seq), [right_shifts] "r" (right_shifts)
+    : "hi", "lo", "memory"
+  );
+}
diff --git a/common_audio/signal_processing/cross_correlation_neon.c b/common_audio/signal_processing/cross_correlation_neon.c
new file mode 100644
index 0000000..fdd03f1
--- /dev/null
+++ b/common_audio/signal_processing/cross_correlation_neon.c
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#include <arm_neon.h>
+
+static inline void DotProductWithScaleNeon(int32_t* cross_correlation,
+                                           const int16_t* vector1,
+                                           const int16_t* vector2,
+                                           size_t length,
+                                           int scaling) {
+  size_t i = 0;
+  size_t len1 = length >> 3;
+  size_t len2 = length & 7;
+  int64x2_t sum0 = vdupq_n_s64(0);
+  int64x2_t sum1 = vdupq_n_s64(0);
+
+  for (i = len1; i > 0; i -= 1) {
+    int16x8_t seq1_16x8 = vld1q_s16(vector1);
+    int16x8_t seq2_16x8 = vld1q_s16(vector2);
+#if defined(WEBRTC_ARCH_ARM64)
+    int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8),
+                               vget_low_s16(seq2_16x8));
+    int32x4_t tmp1 = vmull_high_s16(seq1_16x8, seq2_16x8);
+#else
+    int32x4_t tmp0 = vmull_s16(vget_low_s16(seq1_16x8),
+                               vget_low_s16(seq2_16x8));
+    int32x4_t tmp1 = vmull_s16(vget_high_s16(seq1_16x8),
+                               vget_high_s16(seq2_16x8));
+#endif
+    sum0 = vpadalq_s32(sum0, tmp0);
+    sum1 = vpadalq_s32(sum1, tmp1);
+    vector1 += 8;
+    vector2 += 8;
+  }
+
+  // Calculate the rest of the samples.
+  int64_t sum_res = 0;
+  for (i = len2; i > 0; i -= 1) {
+    sum_res += WEBRTC_SPL_MUL_16_16(*vector1, *vector2);
+    vector1++;
+    vector2++;
+  }
+
+  sum0 = vaddq_s64(sum0, sum1);
+#if defined(WEBRTC_ARCH_ARM64)
+  int64_t sum2 = vaddvq_s64(sum0);
+  *cross_correlation = (int32_t)((sum2 + sum_res) >> scaling);
+#else
+  int64x1_t shift = vdup_n_s64(-scaling);
+  int64x1_t sum2 = vadd_s64(vget_low_s64(sum0), vget_high_s64(sum0));
+  sum2 = vadd_s64(sum2, vdup_n_s64(sum_res));
+  sum2 = vshl_s64(sum2, shift);
+  vst1_lane_s32(cross_correlation, vreinterpret_s32_s64(sum2), 0);
+#endif
+}
+
+/* NEON version of WebRtcSpl_CrossCorrelation() for ARM32/64 platforms. */
+void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation,
+                                    const int16_t* seq1,
+                                    const int16_t* seq2,
+                                    size_t dim_seq,
+                                    size_t dim_cross_correlation,
+                                    int right_shifts,
+                                    int step_seq2) {
+  size_t i = 0;
+
+  for (i = 0; i < dim_cross_correlation; i++) {
+    const int16_t* seq1_ptr = seq1;
+    const int16_t* seq2_ptr = seq2 + (step_seq2 * i);
+
+    DotProductWithScaleNeon(cross_correlation,
+                            seq1_ptr,
+                            seq2_ptr,
+                            dim_seq,
+                            right_shifts);
+    cross_correlation++;
+  }
+}
diff --git a/common_audio/signal_processing/division_operations.c b/common_audio/signal_processing/division_operations.c
new file mode 100644
index 0000000..c6195e7
--- /dev/null
+++ b/common_audio/signal_processing/division_operations.c
@@ -0,0 +1,141 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains implementations of the divisions
+ * WebRtcSpl_DivU32U16()
+ * WebRtcSpl_DivW32W16()
+ * WebRtcSpl_DivW32W16ResW16()
+ * WebRtcSpl_DivResultInQ31()
+ * WebRtcSpl_DivW32HiLow()
+ *
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/sanitizer.h"
+
+uint32_t WebRtcSpl_DivU32U16(uint32_t num, uint16_t den)
+{
+    // Guard against division with 0
+    if (den != 0)
+    {
+        return (uint32_t)(num / den);
+    } else
+    {
+        return (uint32_t)0xFFFFFFFF;
+    }
+}
+
+int32_t WebRtcSpl_DivW32W16(int32_t num, int16_t den)
+{
+    // Guard against division with 0
+    if (den != 0)
+    {
+        return (int32_t)(num / den);
+    } else
+    {
+        return (int32_t)0x7FFFFFFF;
+    }
+}
+
+int16_t WebRtcSpl_DivW32W16ResW16(int32_t num, int16_t den)
+{
+    // Guard against division with 0
+    if (den != 0)
+    {
+        return (int16_t)(num / den);
+    } else
+    {
+        return (int16_t)0x7FFF;
+    }
+}
+
+int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den)
+{
+    int32_t L_num = num;
+    int32_t L_den = den;
+    int32_t div = 0;
+    int k = 31;
+    int change_sign = 0;
+
+    if (num == 0)
+        return 0;
+
+    if (num < 0)
+    {
+        change_sign++;
+        L_num = -num;
+    }
+    if (den < 0)
+    {
+        change_sign++;
+        L_den = -den;
+    }
+    while (k--)
+    {
+        div <<= 1;
+        L_num <<= 1;
+        if (L_num >= L_den)
+        {
+            L_num -= L_den;
+            div++;
+        }
+    }
+    if (change_sign == 1)
+    {
+        div = -div;
+    }
+    return div;
+}
+
+int32_t RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/5486
+WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low)
+{
+    int16_t approx, tmp_hi, tmp_low, num_hi, num_low;
+    int32_t tmpW32;
+
+    approx = (int16_t)WebRtcSpl_DivW32W16((int32_t)0x1FFFFFFF, den_hi);
+    // result in Q14 (Note: 3FFFFFFF = 0.5 in Q30)
+
+    // tmpW32 = 1/den = approx * (2.0 - den * approx) (in Q30)
+    tmpW32 = (den_hi * approx << 1) + ((den_low * approx >> 15) << 1);
+    // tmpW32 = den * approx
+
+    tmpW32 = (int32_t)0x7fffffffL - tmpW32; // result in Q30 (tmpW32 = 2.0-(den*approx))
+    // UBSan: 2147483647 - -2 cannot be represented in type 'int'
+
+    // Store tmpW32 in hi and low format
+    tmp_hi = (int16_t)(tmpW32 >> 16);
+    tmp_low = (int16_t)((tmpW32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+    // tmpW32 = 1/den in Q29
+    tmpW32 = (tmp_hi * approx + (tmp_low * approx >> 15)) << 1;
+
+    // 1/den in hi and low format
+    tmp_hi = (int16_t)(tmpW32 >> 16);
+    tmp_low = (int16_t)((tmpW32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+    // Store num in hi and low format
+    num_hi = (int16_t)(num >> 16);
+    num_low = (int16_t)((num - ((int32_t)num_hi << 16)) >> 1);
+
+    // num * (1/den) by 32 bit multiplication (result in Q28)
+
+    tmpW32 = num_hi * tmp_hi + (num_hi * tmp_low >> 15) +
+        (num_low * tmp_hi >> 15);
+
+    // Put result in Q31 (convert from Q28)
+    tmpW32 = WEBRTC_SPL_LSHIFT_W32(tmpW32, 3);
+
+    return tmpW32;
+}
diff --git a/common_audio/signal_processing/dot_product_with_scale.cc b/common_audio/signal_processing/dot_product_with_scale.cc
new file mode 100644
index 0000000..00799da
--- /dev/null
+++ b/common_audio/signal_processing/dot_product_with_scale.cc
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/dot_product_with_scale.h"
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
+                                      const int16_t* vector2,
+                                      size_t length,
+                                      int scaling) {
+  int64_t sum = 0;
+  size_t i = 0;
+
+  /* Unroll the loop to improve performance. */
+  for (i = 0; i + 3 < length; i += 4) {
+    sum += (vector1[i + 0] * vector2[i + 0]) >> scaling;
+    sum += (vector1[i + 1] * vector2[i + 1]) >> scaling;
+    sum += (vector1[i + 2] * vector2[i + 2]) >> scaling;
+    sum += (vector1[i + 3] * vector2[i + 3]) >> scaling;
+  }
+  for (; i < length; i++) {
+    sum += (vector1[i] * vector2[i]) >> scaling;
+  }
+
+  return rtc::saturated_cast<int32_t>(sum);
+}
diff --git a/common_audio/signal_processing/dot_product_with_scale.h b/common_audio/signal_processing/dot_product_with_scale.h
new file mode 100644
index 0000000..ff3c525
--- /dev/null
+++ b/common_audio/signal_processing/dot_product_with_scale.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_DOT_PRODUCT_WITH_SCALE_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_DOT_PRODUCT_WITH_SCALE_H_
+
+#include <string.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Calculates the dot product between two (int16_t) vectors.
+//
+// Input:
+//      - vector1       : Vector 1
+//      - vector2       : Vector 2
+//      - vector_length : Number of samples used in the dot product
+//      - scaling       : The number of right bit shifts to apply on each term
+//                        during calculation to avoid overflow, i.e., the
+//                        output will be in Q(-|scaling|)
+//
+// Return value         : The dot product in Q(-scaling)
+int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
+                                      const int16_t* vector2,
+                                      size_t length,
+                                      int scaling);
+
+#ifdef __cplusplus
+}
+#endif  // __cplusplus
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_DOT_PRODUCT_WITH_SCALE_H_
diff --git a/common_audio/signal_processing/downsample_fast.c b/common_audio/signal_processing/downsample_fast.c
new file mode 100644
index 0000000..9a2ea05
--- /dev/null
+++ b/common_audio/signal_processing/downsample_fast.c
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/sanitizer.h"
+
+// TODO(Bjornv): Change the function parameter order to WebRTC code style.
+// C version of WebRtcSpl_DownsampleFast() for generic platforms.
+int WebRtcSpl_DownsampleFastC(const int16_t* data_in,
+                              size_t data_in_length,
+                              int16_t* data_out,
+                              size_t data_out_length,
+                              const int16_t* __restrict coefficients,
+                              size_t coefficients_length,
+                              int factor,
+                              size_t delay) {
+  int16_t* const original_data_out = data_out;
+  size_t i = 0;
+  size_t j = 0;
+  int32_t out_s32 = 0;
+  size_t endpos = delay + factor * (data_out_length - 1) + 1;
+
+  // Return error if any of the running conditions doesn't meet.
+  if (data_out_length == 0 || coefficients_length == 0
+                           || data_in_length < endpos) {
+    return -1;
+  }
+
+  rtc_MsanCheckInitialized(coefficients, sizeof(coefficients[0]),
+                           coefficients_length);
+
+  for (i = delay; i < endpos; i += factor) {
+    out_s32 = 2048;  // Round value, 0.5 in Q12.
+
+    for (j = 0; j < coefficients_length; j++) {
+      rtc_MsanCheckInitialized(&data_in[i - j], sizeof(data_in[0]), 1);
+      out_s32 += coefficients[j] * data_in[i - j];  // Q12.
+    }
+
+    out_s32 >>= 12;  // Q0.
+
+    // Saturate and store the output.
+    *data_out++ = WebRtcSpl_SatW32ToW16(out_s32);
+  }
+
+  RTC_DCHECK_EQ(original_data_out + data_out_length, data_out);
+  rtc_MsanCheckInitialized(original_data_out, sizeof(original_data_out[0]),
+                           data_out_length);
+
+  return 0;
+}
diff --git a/common_audio/signal_processing/downsample_fast_mips.c b/common_audio/signal_processing/downsample_fast_mips.c
new file mode 100644
index 0000000..0f3f3a0
--- /dev/null
+++ b/common_audio/signal_processing/downsample_fast_mips.c
@@ -0,0 +1,169 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// Version of WebRtcSpl_DownsampleFast() for MIPS platforms.
+int WebRtcSpl_DownsampleFast_mips(const int16_t* data_in,
+                                  size_t data_in_length,
+                                  int16_t* data_out,
+                                  size_t data_out_length,
+                                  const int16_t* __restrict coefficients,
+                                  size_t coefficients_length,
+                                  int factor,
+                                  size_t delay) {
+  int i;
+  int j;
+  int k;
+  int32_t out_s32 = 0;
+  size_t endpos = delay + factor * (data_out_length - 1) + 1;
+
+  int32_t  tmp1, tmp2, tmp3, tmp4, factor_2;
+  int16_t* p_coefficients;
+  int16_t* p_data_in;
+  int16_t* p_data_in_0 = (int16_t*)&data_in[delay];
+  int16_t* p_coefficients_0 = (int16_t*)&coefficients[0];
+#if !defined(MIPS_DSP_R1_LE)
+  int32_t max_16 = 0x7FFF;
+  int32_t min_16 = 0xFFFF8000;
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+
+  // Return error if any of the running conditions doesn't meet.
+  if (data_out_length == 0 || coefficients_length == 0
+                           || data_in_length < endpos) {
+    return -1;
+  }
+#if defined(MIPS_DSP_R2_LE)
+  __asm __volatile (
+    ".set        push                                                \n\t"
+    ".set        noreorder                                           \n\t"
+    "subu        %[i],            %[endpos],       %[delay]          \n\t"
+    "sll         %[factor_2],     %[factor],       1                 \n\t"
+   "1:                                                               \n\t"
+    "move        %[p_data_in],    %[p_data_in_0]                     \n\t"
+    "mult        $zero,           $zero                              \n\t"
+    "move        %[p_coefs],      %[p_coefs_0]                       \n\t"
+    "sra         %[j],            %[coef_length],  2                 \n\t"
+    "beq         %[j],            $zero,           3f                \n\t"
+    " andi       %[k],            %[coef_length],  3                 \n\t"
+   "2:                                                               \n\t"
+    "lwl         %[tmp1],         1(%[p_data_in])                    \n\t"
+    "lwl         %[tmp2],         3(%[p_coefs])                      \n\t"
+    "lwl         %[tmp3],         -3(%[p_data_in])                   \n\t"
+    "lwl         %[tmp4],         7(%[p_coefs])                      \n\t"
+    "lwr         %[tmp1],         -2(%[p_data_in])                   \n\t"
+    "lwr         %[tmp2],         0(%[p_coefs])                      \n\t"
+    "lwr         %[tmp3],         -6(%[p_data_in])                   \n\t"
+    "lwr         %[tmp4],         4(%[p_coefs])                      \n\t"
+    "packrl.ph   %[tmp1],         %[tmp1],         %[tmp1]           \n\t"
+    "packrl.ph   %[tmp3],         %[tmp3],         %[tmp3]           \n\t"
+    "dpa.w.ph    $ac0,            %[tmp1],         %[tmp2]           \n\t"
+    "dpa.w.ph    $ac0,            %[tmp3],         %[tmp4]           \n\t"
+    "addiu       %[j],            %[j],            -1                \n\t"
+    "addiu       %[p_data_in],    %[p_data_in],    -8                \n\t"
+    "bgtz        %[j],            2b                                 \n\t"
+    " addiu      %[p_coefs],      %[p_coefs],      8                 \n\t"
+   "3:                                                               \n\t"
+    "beq         %[k],            $zero,           5f                \n\t"
+    " nop                                                            \n\t"
+   "4:                                                               \n\t"
+    "lhu         %[tmp1],         0(%[p_data_in])                    \n\t"
+    "lhu         %[tmp2],         0(%[p_coefs])                      \n\t"
+    "addiu       %[p_data_in],    %[p_data_in],    -2                \n\t"
+    "addiu       %[k],            %[k],            -1                \n\t"
+    "dpa.w.ph    $ac0,            %[tmp1],         %[tmp2]           \n\t"
+    "bgtz        %[k],            4b                                 \n\t"
+    " addiu      %[p_coefs],      %[p_coefs],      2                 \n\t"
+   "5:                                                               \n\t"
+    "extr_r.w    %[out_s32],      $ac0,            12                \n\t"
+    "addu        %[p_data_in_0],  %[p_data_in_0],  %[factor_2]       \n\t"
+    "subu        %[i],            %[i],            %[factor]         \n\t"
+    "shll_s.w    %[out_s32],      %[out_s32],      16                \n\t"
+    "sra         %[out_s32],      %[out_s32],      16                \n\t"
+    "sh          %[out_s32],      0(%[data_out])                     \n\t"
+    "bgtz        %[i],            1b                                 \n\t"
+    " addiu      %[data_out],     %[data_out],     2                 \n\t"
+    ".set        pop                                                 \n\t"
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+      [tmp4] "=&r" (tmp4), [p_data_in] "=&r" (p_data_in),
+      [p_data_in_0] "+r" (p_data_in_0), [p_coefs] "=&r" (p_coefficients),
+      [j] "=&r" (j), [out_s32] "=&r" (out_s32), [factor_2] "=&r" (factor_2),
+      [i] "=&r" (i), [k] "=&r" (k)
+    : [coef_length] "r" (coefficients_length), [data_out] "r" (data_out),
+      [p_coefs_0] "r" (p_coefficients_0), [endpos] "r" (endpos),
+      [delay] "r" (delay), [factor] "r" (factor)
+    : "memory", "hi", "lo"
+ );
+#else  // #if defined(MIPS_DSP_R2_LE)
+  __asm __volatile (
+    ".set        push                                                \n\t"
+    ".set        noreorder                                           \n\t"
+    "sll         %[factor_2],     %[factor],       1                 \n\t"
+    "subu        %[i],            %[endpos],       %[delay]          \n\t"
+   "1:                                                               \n\t"
+    "move        %[p_data_in],    %[p_data_in_0]                     \n\t"
+    "addiu       %[out_s32],      $zero,           2048              \n\t"
+    "move        %[p_coefs],      %[p_coefs_0]                       \n\t"
+    "sra         %[j],            %[coef_length],  1                 \n\t"
+    "beq         %[j],            $zero,           3f                \n\t"
+    " andi       %[k],            %[coef_length],  1                 \n\t"
+   "2:                                                               \n\t"
+    "lh          %[tmp1],         0(%[p_data_in])                    \n\t"
+    "lh          %[tmp2],         0(%[p_coefs])                      \n\t"
+    "lh          %[tmp3],         -2(%[p_data_in])                   \n\t"
+    "lh          %[tmp4],         2(%[p_coefs])                      \n\t"
+    "mul         %[tmp1],         %[tmp1],         %[tmp2]           \n\t"
+    "addiu       %[p_coefs],      %[p_coefs],      4                 \n\t"
+    "mul         %[tmp3],         %[tmp3],         %[tmp4]           \n\t"
+    "addiu       %[j],            %[j],            -1                \n\t"
+    "addiu       %[p_data_in],    %[p_data_in],    -4                \n\t"
+    "addu        %[tmp1],         %[tmp1],         %[tmp3]           \n\t"
+    "bgtz        %[j],            2b                                 \n\t"
+    " addu       %[out_s32],      %[out_s32],      %[tmp1]           \n\t"
+   "3:                                                               \n\t"
+    "beq         %[k],            $zero,           4f                \n\t"
+    " nop                                                            \n\t"
+    "lh          %[tmp1],         0(%[p_data_in])                    \n\t"
+    "lh          %[tmp2],         0(%[p_coefs])                      \n\t"
+    "mul         %[tmp1],         %[tmp1],         %[tmp2]           \n\t"
+    "addu        %[out_s32],      %[out_s32],      %[tmp1]           \n\t"
+   "4:                                                               \n\t"
+    "sra         %[out_s32],      %[out_s32],      12                \n\t"
+    "addu        %[p_data_in_0],  %[p_data_in_0],  %[factor_2]       \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shll_s.w    %[out_s32],      %[out_s32],      16                \n\t"
+    "sra         %[out_s32],      %[out_s32],      16                \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "slt         %[tmp1],         %[max_16],       %[out_s32]        \n\t"
+    "movn        %[out_s32],      %[max_16],       %[tmp1]           \n\t"
+    "slt         %[tmp1],         %[out_s32],      %[min_16]         \n\t"
+    "movn        %[out_s32],      %[min_16],       %[tmp1]           \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "subu        %[i],            %[i],            %[factor]         \n\t"
+    "sh          %[out_s32],      0(%[data_out])                     \n\t"
+    "bgtz        %[i],            1b                                 \n\t"
+    " addiu      %[data_out],     %[data_out],     2                 \n\t"
+    ".set        pop                                                 \n\t"
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+      [tmp4] "=&r" (tmp4), [p_data_in] "=&r" (p_data_in), [k] "=&r" (k),
+      [p_data_in_0] "+r" (p_data_in_0), [p_coefs] "=&r" (p_coefficients),
+      [j] "=&r" (j), [out_s32] "=&r" (out_s32), [factor_2] "=&r" (factor_2),
+      [i] "=&r" (i)
+    : [coef_length] "r" (coefficients_length), [data_out] "r" (data_out),
+      [p_coefs_0] "r" (p_coefficients_0), [endpos] "r" (endpos),
+#if !defined(MIPS_DSP_R1_LE)
+      [max_16] "r" (max_16), [min_16] "r" (min_16),
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+      [delay] "r" (delay), [factor] "r" (factor)
+    : "memory", "hi", "lo"
+  );
+#endif  // #if defined(MIPS_DSP_R2_LE)
+  return 0;
+}
diff --git a/common_audio/signal_processing/downsample_fast_neon.c b/common_audio/signal_processing/downsample_fast_neon.c
new file mode 100644
index 0000000..36fc0c8
--- /dev/null
+++ b/common_audio/signal_processing/downsample_fast_neon.c
@@ -0,0 +1,217 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#include <arm_neon.h>
+
+// NEON intrinsics version of WebRtcSpl_DownsampleFast()
+// for ARM 32-bit/64-bit platforms.
+int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
+                                 size_t data_in_length,
+                                 int16_t* data_out,
+                                 size_t data_out_length,
+                                 const int16_t* __restrict coefficients,
+                                 size_t coefficients_length,
+                                 int factor,
+                                 size_t delay) {
+  size_t i = 0;
+  size_t j = 0;
+  int32_t out_s32 = 0;
+  size_t endpos = delay + factor * (data_out_length - 1) + 1;
+  size_t res = data_out_length & 0x7;
+  size_t endpos1 = endpos - factor * res;
+
+  // Return error if any of the running conditions doesn't meet.
+  if (data_out_length == 0 || coefficients_length == 0
+                           || data_in_length < endpos) {
+    return -1;
+  }
+
+  // First part, unroll the loop 8 times, with 3 subcases
+  // (factor == 2, 4, others).
+  switch (factor) {
+    case 2: {
+      for (i = delay; i < endpos1; i += 16) {
+        // Round value, 0.5 in Q12.
+        int32x4_t out32x4_0 = vdupq_n_s32(2048);
+        int32x4_t out32x4_1 = vdupq_n_s32(2048);
+
+#if defined(WEBRTC_ARCH_ARM64)
+        // Unroll the loop 2 times.
+        for (j = 0; j < coefficients_length - 1; j += 2) {
+          int32x2_t coeff32 = vld1_dup_s32((int32_t*)&coefficients[j]);
+          int16x4_t coeff16x4 = vreinterpret_s16_s32(coeff32);
+          int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j - 1]);
+
+          // Mul and accumulate low 64-bit data.
+          int16x4_t in16x4_0 = vget_low_s16(in16x8x2.val[0]);
+          int16x4_t in16x4_1 = vget_low_s16(in16x8x2.val[1]);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 1);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_1, coeff16x4, 0);
+
+          // Mul and accumulate high 64-bit data.
+          // TODO: vget_high_s16 need extra cost on ARM64. This could be
+          // replaced by vmlal_high_lane_s16. But for the interface of
+          // vmlal_high_lane_s16, there is a bug in gcc 4.9.
+          // This issue need to be tracked in the future.
+          int16x4_t in16x4_2 = vget_high_s16(in16x8x2.val[0]);
+          int16x4_t in16x4_3 = vget_high_s16(in16x8x2.val[1]);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_2, coeff16x4, 1);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_3, coeff16x4, 0);
+        }
+
+        for (; j < coefficients_length; j++) {
+          int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
+          int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j]);
+
+          // Mul and accumulate low 64-bit data.
+          int16x4_t in16x4_0 = vget_low_s16(in16x8x2.val[0]);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0);
+
+          // Mul and accumulate high 64-bit data.
+          // TODO: vget_high_s16 need extra cost on ARM64. This could be
+          // replaced by vmlal_high_lane_s16. But for the interface of
+          // vmlal_high_lane_s16, there is a bug in gcc 4.9.
+          // This issue need to be tracked in the future.
+          int16x4_t in16x4_1 = vget_high_s16(in16x8x2.val[0]);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0);
+        }
+#else
+        // On ARMv7, the loop unrolling 2 times results in performance
+        // regression.
+        for (j = 0; j < coefficients_length; j++) {
+          int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
+          int16x8x2_t in16x8x2 = vld2q_s16(&data_in[i - j]);
+
+          // Mul and accumulate.
+          int16x4_t in16x4_0 = vget_low_s16(in16x8x2.val[0]);
+          int16x4_t in16x4_1 = vget_high_s16(in16x8x2.val[0]);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0);
+        }
+#endif
+
+        // Saturate and store the output.
+        int16x4_t out16x4_0 = vqshrn_n_s32(out32x4_0, 12);
+        int16x4_t out16x4_1 = vqshrn_n_s32(out32x4_1, 12);
+        vst1q_s16(data_out, vcombine_s16(out16x4_0, out16x4_1));
+        data_out += 8;
+      }
+      break;
+    }
+    case 4: {
+      for (i = delay; i < endpos1; i += 32) {
+        // Round value, 0.5 in Q12.
+        int32x4_t out32x4_0 = vdupq_n_s32(2048);
+        int32x4_t out32x4_1 = vdupq_n_s32(2048);
+
+        // Unroll the loop 4 times.
+        for (j = 0; j < coefficients_length - 3; j += 4) {
+          int16x4_t coeff16x4 = vld1_s16(&coefficients[j]);
+          int16x8x4_t in16x8x4 = vld4q_s16(&data_in[i - j - 3]);
+
+          // Mul and accumulate low 64-bit data.
+          int16x4_t in16x4_0 = vget_low_s16(in16x8x4.val[0]);
+          int16x4_t in16x4_2 = vget_low_s16(in16x8x4.val[1]);
+          int16x4_t in16x4_4 = vget_low_s16(in16x8x4.val[2]);
+          int16x4_t in16x4_6 = vget_low_s16(in16x8x4.val[3]);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 3);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_2, coeff16x4, 2);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_4, coeff16x4, 1);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_6, coeff16x4, 0);
+
+          // Mul and accumulate high 64-bit data.
+          // TODO: vget_high_s16 need extra cost on ARM64. This could be
+          // replaced by vmlal_high_lane_s16. But for the interface of
+          // vmlal_high_lane_s16, there is a bug in gcc 4.9.
+          // This issue need to be tracked in the future.
+          int16x4_t in16x4_1 = vget_high_s16(in16x8x4.val[0]);
+          int16x4_t in16x4_3 = vget_high_s16(in16x8x4.val[1]);
+          int16x4_t in16x4_5 = vget_high_s16(in16x8x4.val[2]);
+          int16x4_t in16x4_7 = vget_high_s16(in16x8x4.val[3]);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 3);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_3, coeff16x4, 2);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_5, coeff16x4, 1);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_7, coeff16x4, 0);
+        }
+
+        for (; j < coefficients_length; j++) {
+          int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
+          int16x8x4_t in16x8x4 = vld4q_s16(&data_in[i - j]);
+
+          // Mul and accumulate low 64-bit data.
+          int16x4_t in16x4_0 = vget_low_s16(in16x8x4.val[0]);
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0);
+
+          // Mul and accumulate high 64-bit data.
+          // TODO: vget_high_s16 need extra cost on ARM64. This could be
+          // replaced by vmlal_high_lane_s16. But for the interface of
+          // vmlal_high_lane_s16, there is a bug in gcc 4.9.
+          // This issue need to be tracked in the future.
+          int16x4_t in16x4_1 = vget_high_s16(in16x8x4.val[0]);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0);
+        }
+
+        // Saturate and store the output.
+        int16x4_t out16x4_0 = vqshrn_n_s32(out32x4_0, 12);
+        int16x4_t out16x4_1 = vqshrn_n_s32(out32x4_1, 12);
+        vst1q_s16(data_out, vcombine_s16(out16x4_0, out16x4_1));
+        data_out += 8;
+      }
+      break;
+    }
+    default: {
+      for (i = delay; i < endpos1; i += factor * 8) {
+        // Round value, 0.5 in Q12.
+        int32x4_t out32x4_0 = vdupq_n_s32(2048);
+        int32x4_t out32x4_1 = vdupq_n_s32(2048);
+
+        for (j = 0; j < coefficients_length; j++) {
+          int16x4_t coeff16x4 = vld1_dup_s16(&coefficients[j]);
+          int16x4_t in16x4_0 = vld1_dup_s16(&data_in[i - j]);
+          in16x4_0 = vld1_lane_s16(&data_in[i + factor - j], in16x4_0, 1);
+          in16x4_0 = vld1_lane_s16(&data_in[i + factor * 2 - j], in16x4_0, 2);
+          in16x4_0 = vld1_lane_s16(&data_in[i + factor * 3 - j], in16x4_0, 3);
+          int16x4_t in16x4_1 = vld1_dup_s16(&data_in[i + factor * 4 - j]);
+          in16x4_1 = vld1_lane_s16(&data_in[i + factor * 5 - j], in16x4_1, 1);
+          in16x4_1 = vld1_lane_s16(&data_in[i + factor * 6 - j], in16x4_1, 2);
+          in16x4_1 = vld1_lane_s16(&data_in[i + factor * 7 - j], in16x4_1, 3);
+
+          // Mul and accumulate.
+          out32x4_0 = vmlal_lane_s16(out32x4_0, in16x4_0, coeff16x4, 0);
+          out32x4_1 = vmlal_lane_s16(out32x4_1, in16x4_1, coeff16x4, 0);
+        }
+
+        // Saturate and store the output.
+        int16x4_t out16x4_0 = vqshrn_n_s32(out32x4_0, 12);
+        int16x4_t out16x4_1 = vqshrn_n_s32(out32x4_1, 12);
+        vst1q_s16(data_out, vcombine_s16(out16x4_0, out16x4_1));
+        data_out += 8;
+      }
+      break;
+    }
+  }
+
+  // Second part, do the rest iterations (if any).
+  for (; i < endpos; i += factor) {
+    out_s32 = 2048;  // Round value, 0.5 in Q12.
+
+    for (j = 0; j < coefficients_length; j++) {
+      out_s32 = WebRtc_MulAccumW16(coefficients[j], data_in[i - j], out_s32);
+    }
+
+    // Saturate and store the output.
+    out_s32 >>= 12;
+    *data_out++ = WebRtcSpl_SatW32ToW16(out_s32);
+  }
+
+  return 0;
+}
diff --git a/common_audio/signal_processing/energy.c b/common_audio/signal_processing/energy.c
new file mode 100644
index 0000000..5cce6b8
--- /dev/null
+++ b/common_audio/signal_processing/energy.c
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_Energy().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+int32_t WebRtcSpl_Energy(int16_t* vector,
+                         size_t vector_length,
+                         int* scale_factor)
+{
+    int32_t en = 0;
+    size_t i;
+    int scaling =
+        WebRtcSpl_GetScalingSquare(vector, vector_length, vector_length);
+    size_t looptimes = vector_length;
+    int16_t *vectorptr = vector;
+
+    for (i = 0; i < looptimes; i++)
+    {
+      en += (*vectorptr * *vectorptr) >> scaling;
+      vectorptr++;
+    }
+    *scale_factor = scaling;
+
+    return en;
+}
diff --git a/common_audio/signal_processing/filter_ar.c b/common_audio/signal_processing/filter_ar.c
new file mode 100644
index 0000000..49d5d61
--- /dev/null
+++ b/common_audio/signal_processing/filter_ar.c
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_FilterAR().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+size_t WebRtcSpl_FilterAR(const int16_t* a,
+                          size_t a_length,
+                          const int16_t* x,
+                          size_t x_length,
+                          int16_t* state,
+                          size_t state_length,
+                          int16_t* state_low,
+                          size_t state_low_length,
+                          int16_t* filtered,
+                          int16_t* filtered_low,
+                          size_t filtered_low_length)
+{
+    int64_t o;
+    int32_t oLOW;
+    size_t i, j, stop;
+    const int16_t* x_ptr = &x[0];
+    int16_t* filteredFINAL_ptr = filtered;
+    int16_t* filteredFINAL_LOW_ptr = filtered_low;
+
+    for (i = 0; i < x_length; i++)
+    {
+        // Calculate filtered[i] and filtered_low[i]
+        const int16_t* a_ptr = &a[1];
+        int16_t* filtered_ptr = &filtered[i - 1];
+        int16_t* filtered_low_ptr = &filtered_low[i - 1];
+        int16_t* state_ptr = &state[state_length - 1];
+        int16_t* state_low_ptr = &state_low[state_length - 1];
+
+        o = (int32_t)(*x_ptr++) * (1 << 12);
+        oLOW = (int32_t)0;
+
+        stop = (i < a_length) ? i + 1 : a_length;
+        for (j = 1; j < stop; j++)
+        {
+          o -= *a_ptr * *filtered_ptr--;
+          oLOW -= *a_ptr++ * *filtered_low_ptr--;
+        }
+        for (j = i + 1; j < a_length; j++)
+        {
+          o -= *a_ptr * *state_ptr--;
+          oLOW -= *a_ptr++ * *state_low_ptr--;
+        }
+
+        o += (oLOW >> 12);
+        *filteredFINAL_ptr = (int16_t)((o + (int32_t)2048) >> 12);
+        *filteredFINAL_LOW_ptr++ =
+            (int16_t)(o - ((int32_t)(*filteredFINAL_ptr++) * (1 << 12)));
+    }
+
+    // Save the filter state
+    if (x_length >= state_length)
+    {
+        WebRtcSpl_CopyFromEndW16(filtered, x_length, a_length - 1, state);
+        WebRtcSpl_CopyFromEndW16(filtered_low, x_length, a_length - 1, state_low);
+    } else
+    {
+        for (i = 0; i < state_length - x_length; i++)
+        {
+            state[i] = state[i + x_length];
+            state_low[i] = state_low[i + x_length];
+        }
+        for (i = 0; i < x_length; i++)
+        {
+            state[state_length - x_length + i] = filtered[i];
+            state[state_length - x_length + i] = filtered_low[i];
+        }
+    }
+
+    return x_length;
+}
diff --git a/common_audio/signal_processing/filter_ar_fast_q12.c b/common_audio/signal_processing/filter_ar_fast_q12.c
new file mode 100644
index 0000000..df9e518
--- /dev/null
+++ b/common_audio/signal_processing/filter_ar_fast_q12.c
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// TODO(bjornv): Change the return type to report errors.
+
+void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
+                               int16_t* data_out,
+                               const int16_t* __restrict coefficients,
+                               size_t coefficients_length,
+                               size_t data_length) {
+  size_t i = 0;
+  size_t j = 0;
+
+  RTC_DCHECK_GT(data_length, 0);
+  RTC_DCHECK_GT(coefficients_length, 1);
+
+  for (i = 0; i < data_length; i++) {
+    int64_t output = 0;
+    int64_t sum = 0;
+
+    for (j = coefficients_length - 1; j > 0; j--) {
+      sum += coefficients[j] * data_out[i - j];
+    }
+
+    output = coefficients[0] * data_in[i];
+    output -= sum;
+
+    // Saturate and store the output.
+    output = WEBRTC_SPL_SAT(134215679, output, -134217728);
+    data_out[i] = (int16_t)((output + 2048) >> 12);
+  }
+}
diff --git a/common_audio/signal_processing/filter_ar_fast_q12_armv7.S b/common_audio/signal_processing/filter_ar_fast_q12_armv7.S
new file mode 100644
index 0000000..c6397c2
--- /dev/null
+++ b/common_audio/signal_processing/filter_ar_fast_q12_armv7.S
@@ -0,0 +1,218 @@
+@
+@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS.  All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+
+@ This file contains the function WebRtcSpl_FilterARFastQ12(), optimized for
+@ ARMv7  platform. The description header can be found in
+@ signal_processing_library.h
+@
+@ Output is bit-exact with the generic C code as in filter_ar_fast_q12.c, and
+@ the reference C code at end of this file.
+
+@ Assumptions:
+@ (1) data_length > 0
+@ (2) coefficients_length > 1
+
+@ Register usage:
+@
+@ r0:  &data_in[i]
+@ r1:  &data_out[i], for result ouput
+@ r2:  &coefficients[0]
+@ r3:  coefficients_length
+@ r4:  Iteration counter for the outer loop.
+@ r5:  data_out[j] as multiplication inputs
+@ r6:  Calculated value for output data_out[]; interation counter for inner loop
+@ r7:  Partial sum of a filtering multiplication results
+@ r8:  Partial sum of a filtering multiplication results
+@ r9:  &data_out[], for filtering input; data_in[i]
+@ r10: coefficients[j]
+@ r11: Scratch
+@ r12: &coefficients[j]
+
+#include "system_wrappers/include/asm_defines.h"
+
+GLOBAL_FUNCTION WebRtcSpl_FilterARFastQ12
+.align  2
+DEFINE_FUNCTION WebRtcSpl_FilterARFastQ12
+  push {r4-r11}
+
+  ldrsh r12, [sp, #32]         @ data_length
+  subs r4, r12, #1
+  beq ODD_LENGTH               @ jump if data_length == 1
+
+LOOP_LENGTH:
+  add r12, r2, r3, lsl #1
+  sub r12, #4                  @ &coefficients[coefficients_length - 2]
+  sub r9, r1, r3, lsl #1
+  add r9, #2                   @ &data_out[i - coefficients_length + 1]
+  ldr r5, [r9], #4             @ data_out[i - coefficients_length + {1,2}]
+
+  mov r7, #0                   @ sum1
+  mov r8, #0                   @ sum2
+  subs r6, r3, #3              @ Iteration counter for inner loop.
+  beq ODD_A_LENGTH             @ branch if coefficients_length == 3
+  blt POST_LOOP_A_LENGTH       @ branch if coefficients_length == 2
+
+LOOP_A_LENGTH:
+  ldr r10, [r12], #-4          @ coefficients[j - 1], coefficients[j]
+  subs r6, #2
+  smlatt r8, r10, r5, r8       @ sum2 += coefficients[j] * data_out[i - j + 1];
+  smlatb r7, r10, r5, r7       @ sum1 += coefficients[j] * data_out[i - j];
+  smlabt r7, r10, r5, r7       @ coefficients[j - 1] * data_out[i - j + 1];
+  ldr r5, [r9], #4             @ data_out[i - j + 2],  data_out[i - j + 3]
+  smlabb r8, r10, r5, r8       @ coefficients[j - 1] * data_out[i - j + 2];
+  bgt LOOP_A_LENGTH
+  blt POST_LOOP_A_LENGTH
+
+ODD_A_LENGTH:
+  ldrsh r10, [r12, #2]         @ Filter coefficients coefficients[2]
+  sub r12, #2                  @ &coefficients[0]
+  smlabb r7, r10, r5, r7       @ sum1 += coefficients[2] * data_out[i - 2];
+  smlabt r8, r10, r5, r8       @ sum2 += coefficients[2] * data_out[i - 1];
+  ldr r5, [r9, #-2]            @ data_out[i - 1],  data_out[i]
+
+POST_LOOP_A_LENGTH:
+  ldr r10, [r12]               @ coefficients[0], coefficients[1]
+  smlatb r7, r10, r5, r7       @ sum1 += coefficients[1] * data_out[i - 1];
+
+  ldr r9, [r0], #4             @ data_in[i], data_in[i + 1]
+  smulbb r6, r10, r9           @ output1 = coefficients[0] * data_in[i];
+  sub r6, r7                   @ output1 -= sum1;
+
+  sbfx r11, r6, #12, #16
+  ssat r7, #16, r6, asr #12
+  cmp r7, r11
+  addeq r6, r6, #2048
+  ssat r6, #16, r6, asr #12
+  strh r6, [r1], #2            @ Store data_out[i]
+
+  smlatb r8, r10, r6, r8       @ sum2 += coefficients[1] * data_out[i];
+  smulbt r6, r10, r9           @ output2 = coefficients[0] * data_in[i + 1];
+  sub r6, r8                   @ output1 -= sum1;
+
+  sbfx r11, r6, #12, #16
+  ssat r7, #16, r6, asr #12
+  cmp r7, r11
+  addeq r6, r6, #2048
+  ssat r6, #16, r6, asr #12
+  strh r6, [r1], #2            @ Store data_out[i + 1]
+
+  subs r4, #2
+  bgt LOOP_LENGTH
+  blt END                      @ For even data_length, it's done. Jump to END.
+
+@ Process i = data_length -1, for the case of an odd length.
+ODD_LENGTH:
+  add r12, r2, r3, lsl #1
+  sub r12, #4                  @ &coefficients[coefficients_length - 2]
+  sub r9, r1, r3, lsl #1
+  add r9, #2                   @ &data_out[i - coefficients_length + 1]
+  mov r7, #0                   @ sum1
+  mov r8, #0                   @ sum1
+  subs r6, r3, #2              @ inner loop counter
+  beq EVEN_A_LENGTH            @ branch if coefficients_length == 2
+
+LOOP2_A_LENGTH:
+  ldr r10, [r12], #-4          @ coefficients[j - 1], coefficients[j]
+  ldr r5, [r9], #4             @ data_out[i - j],  data_out[i - j + 1]
+  subs r6, #2
+  smlatb r7, r10, r5, r7       @ sum1 += coefficients[j] * data_out[i - j];
+  smlabt r8, r10, r5, r8       @ coefficients[j - 1] * data_out[i - j + 1];
+  bgt LOOP2_A_LENGTH
+  addlt r12, #2
+  blt POST_LOOP2_A_LENGTH
+
+EVEN_A_LENGTH:
+  ldrsh r10, [r12, #2]         @ Filter coefficients coefficients[1]
+  ldrsh r5, [r9]               @ data_out[i - 1]
+  smlabb r7, r10, r5, r7       @ sum1 += coefficients[1] * data_out[i - 1];
+
+POST_LOOP2_A_LENGTH:
+  ldrsh r10, [r12]             @ Filter coefficients coefficients[0]
+  ldrsh r9, [r0]               @ data_in[i]
+  smulbb r6, r10, r9           @ output1 = coefficients[0] * data_in[i];
+  sub r6, r7                   @ output1 -= sum1;
+  sub r6, r8                   @ output1 -= sum1;
+  sbfx r8, r6, #12, #16
+  ssat r7, #16, r6, asr #12
+  cmp r7, r8
+  addeq r6, r6, #2048
+  ssat r6, #16, r6, asr #12
+  strh r6, [r1]                @ Store the data_out[i]
+
+END:
+  pop {r4-r11}
+  bx  lr
+
+@Reference C code:
+@
+@void WebRtcSpl_FilterARFastQ12(int16_t* data_in,
+@                               int16_t* data_out,
+@                               int16_t* __restrict coefficients,
+@                               size_t coefficients_length,
+@                               size_t data_length) {
+@  size_t i = 0;
+@  size_t j = 0;
+@
+@  assert(data_length > 0);
+@  assert(coefficients_length > 1);
+@
+@  for (i = 0; i < data_length - 1; i += 2) {
+@    int32_t output1 = 0;
+@    int32_t sum1 = 0;
+@    int32_t output2 = 0;
+@    int32_t sum2 = 0;
+@
+@    for (j = coefficients_length - 1; j > 2; j -= 2) {
+@      sum1 += coefficients[j]      * data_out[i - j];
+@      sum1 += coefficients[j - 1]  * data_out[i - j + 1];
+@      sum2 += coefficients[j]     * data_out[i - j + 1];
+@      sum2 += coefficients[j - 1] * data_out[i - j + 2];
+@    }
+@
+@    if (j == 2) {
+@      sum1 += coefficients[2] * data_out[i - 2];
+@      sum2 += coefficients[2] * data_out[i - 1];
+@    }
+@
+@    sum1 += coefficients[1] * data_out[i - 1];
+@    output1 = coefficients[0] * data_in[i];
+@    output1 -= sum1;
+@    // Saturate and store the output.
+@    output1 = WEBRTC_SPL_SAT(134215679, output1, -134217728);
+@    data_out[i] = (int16_t)((output1 + 2048) >> 12);
+@
+@    sum2 += coefficients[1] * data_out[i];
+@    output2 = coefficients[0] * data_in[i + 1];
+@    output2 -= sum2;
+@    // Saturate and store the output.
+@    output2 = WEBRTC_SPL_SAT(134215679, output2, -134217728);
+@    data_out[i + 1] = (int16_t)((output2 + 2048) >> 12);
+@  }
+@
+@  if (i == data_length - 1) {
+@    int32_t output1 = 0;
+@    int32_t sum1 = 0;
+@
+@    for (j = coefficients_length - 1; j > 1; j -= 2) {
+@      sum1 += coefficients[j]      * data_out[i - j];
+@      sum1 += coefficients[j - 1]  * data_out[i - j + 1];
+@    }
+@
+@    if (j == 1) {
+@      sum1 += coefficients[1] * data_out[i - 1];
+@    }
+@
+@    output1 = coefficients[0] * data_in[i];
+@    output1 -= sum1;
+@    // Saturate and store the output.
+@    output1 = WEBRTC_SPL_SAT(134215679, output1, -134217728);
+@    data_out[i] = (int16_t)((output1 + 2048) >> 12);
+@  }
+@}
diff --git a/common_audio/signal_processing/filter_ar_fast_q12_mips.c b/common_audio/signal_processing/filter_ar_fast_q12_mips.c
new file mode 100644
index 0000000..b9ad30f
--- /dev/null
+++ b/common_audio/signal_processing/filter_ar_fast_q12_mips.c
@@ -0,0 +1,140 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
+                               int16_t* data_out,
+                               const int16_t* __restrict coefficients,
+                               size_t coefficients_length,
+                               size_t data_length) {
+  int r0, r1, r2, r3;
+  int coef0, offset;
+  int i, j, k;
+  int coefptr, outptr, tmpout, inptr;
+#if !defined(MIPS_DSP_R1_LE)
+  int max16 = 0x7FFF;
+  int min16 = 0xFFFF8000;
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+
+  RTC_DCHECK_GT(data_length, 0);
+  RTC_DCHECK_GT(coefficients_length, 1);
+
+  __asm __volatile (
+    ".set       push                                             \n\t"
+    ".set       noreorder                                        \n\t"
+    "addiu      %[i],       %[data_length],          0           \n\t"
+    "lh         %[coef0],   0(%[coefficients])                   \n\t"
+    "addiu      %[j],       %[coefficients_length],  -1          \n\t"
+    "andi       %[k],       %[j],                    1           \n\t"
+    "sll        %[offset],  %[j],                    1           \n\t"
+    "subu       %[outptr],  %[data_out],             %[offset]   \n\t"
+    "addiu      %[inptr],   %[data_in],              0           \n\t"
+    "bgtz       %[k],       3f                                   \n\t"
+    " addu      %[coefptr], %[coefficients],         %[offset]   \n\t"
+   "1:                                                           \n\t"
+    "lh         %[r0],      0(%[inptr])                          \n\t"
+    "addiu      %[i],       %[i],                    -1          \n\t"
+    "addiu      %[tmpout],  %[outptr],               0           \n\t"
+    "mult       %[r0],      %[coef0]                             \n\t"
+   "2:                                                           \n\t"
+    "lh         %[r0],      0(%[tmpout])                         \n\t"
+    "lh         %[r1],      0(%[coefptr])                        \n\t"
+    "lh         %[r2],      2(%[tmpout])                         \n\t"
+    "lh         %[r3],      -2(%[coefptr])                       \n\t"
+    "addiu      %[tmpout],  %[tmpout],               4           \n\t"
+    "msub       %[r0],      %[r1]                                \n\t"
+    "msub       %[r2],      %[r3]                                \n\t"
+    "addiu      %[j],       %[j],                    -2          \n\t"
+    "bgtz       %[j],       2b                                   \n\t"
+    " addiu     %[coefptr], %[coefptr],              -4          \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "extr_r.w   %[r0],      $ac0,                    12          \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "mflo       %[r0]                                            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu       %[coefptr], %[coefficients],         %[offset]   \n\t"
+    "addiu      %[inptr],   %[inptr],                2           \n\t"
+    "addiu      %[j],       %[coefficients_length],  -1          \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shll_s.w   %[r0],      %[r0],                   16          \n\t"
+    "sra        %[r0],      %[r0],                   16          \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu      %[r0],      %[r0],                   2048        \n\t"
+    "sra        %[r0],      %[r0],                   12          \n\t"
+    "slt        %[r1],      %[max16],                %[r0]       \n\t"
+    "movn       %[r0],      %[max16],                %[r1]       \n\t"
+    "slt        %[r1],      %[r0],                   %[min16]    \n\t"
+    "movn       %[r0],      %[min16],                %[r1]       \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sh         %[r0],      0(%[tmpout])                         \n\t"
+    "bgtz       %[i],       1b                                   \n\t"
+    " addiu     %[outptr],  %[outptr],               2           \n\t"
+    "b          5f                                               \n\t"
+    " nop                                                        \n\t"
+   "3:                                                           \n\t"
+    "lh         %[r0],      0(%[inptr])                          \n\t"
+    "addiu      %[i],       %[i],                    -1          \n\t"
+    "addiu      %[tmpout],  %[outptr],               0           \n\t"
+    "mult       %[r0],      %[coef0]                             \n\t"
+   "4:                                                           \n\t"
+    "lh         %[r0],      0(%[tmpout])                         \n\t"
+    "lh         %[r1],      0(%[coefptr])                        \n\t"
+    "lh         %[r2],      2(%[tmpout])                         \n\t"
+    "lh         %[r3],      -2(%[coefptr])                       \n\t"
+    "addiu      %[tmpout],  %[tmpout],               4           \n\t"
+    "msub       %[r0],      %[r1]                                \n\t"
+    "msub       %[r2],      %[r3]                                \n\t"
+    "addiu      %[j],       %[j],                    -2          \n\t"
+    "bgtz       %[j],       4b                                   \n\t"
+    " addiu     %[coefptr], %[coefptr],              -4          \n\t"
+    "lh         %[r0],      0(%[tmpout])                         \n\t"
+    "lh         %[r1],      0(%[coefptr])                        \n\t"
+    "msub       %[r0],      %[r1]                                \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "extr_r.w   %[r0],      $ac0,                    12          \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "mflo       %[r0]                                            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu       %[coefptr], %[coefficients],         %[offset]   \n\t"
+    "addiu      %[inptr],   %[inptr],                2           \n\t"
+    "addiu      %[j],       %[coefficients_length],  -1          \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shll_s.w   %[r0],      %[r0],                   16          \n\t"
+    "sra        %[r0],      %[r0],                   16          \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu      %[r0],      %[r0],                   2048        \n\t"
+    "sra        %[r0],      %[r0],                   12          \n\t"
+    "slt        %[r1],      %[max16],                %[r0]       \n\t"
+    "movn       %[r0],      %[max16],                %[r1]       \n\t"
+    "slt        %[r1],      %[r0],                   %[min16]    \n\t"
+    "movn       %[r0],      %[min16],                %[r1]       \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sh         %[r0],      2(%[tmpout])                         \n\t"
+    "bgtz       %[i],       3b                                   \n\t"
+    " addiu     %[outptr],  %[outptr],               2           \n\t"
+   "5:                                                           \n\t"
+    ".set       pop                                              \n\t"
+    : [i] "=&r" (i), [j] "=&r" (j), [k] "=&r" (k), [r0] "=&r" (r0),
+      [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+      [coef0] "=&r" (coef0), [offset] "=&r" (offset),
+      [outptr] "=&r" (outptr), [inptr] "=&r" (inptr),
+      [coefptr] "=&r" (coefptr), [tmpout] "=&r" (tmpout)
+    : [coefficients] "r" (coefficients), [data_length] "r" (data_length),
+      [coefficients_length] "r" (coefficients_length),
+#if !defined(MIPS_DSP_R1_LE)
+      [max16] "r" (max16), [min16] "r" (min16),
+#endif
+      [data_out] "r" (data_out), [data_in] "r" (data_in)
+    : "hi", "lo", "memory"
+  );
+}
+
diff --git a/common_audio/signal_processing/filter_ma_fast_q12.c b/common_audio/signal_processing/filter_ma_fast_q12.c
new file mode 100644
index 0000000..9596ef1
--- /dev/null
+++ b/common_audio/signal_processing/filter_ma_fast_q12.c
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_FilterMAFastQ12().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#include "rtc_base/sanitizer.h"
+
+void WebRtcSpl_FilterMAFastQ12(const int16_t* in_ptr,
+                               int16_t* out_ptr,
+                               const int16_t* B,
+                               size_t B_length,
+                               size_t length)
+{
+    size_t i, j;
+
+    rtc_MsanCheckInitialized(B, sizeof(B[0]), B_length);
+    rtc_MsanCheckInitialized(in_ptr - B_length + 1, sizeof(in_ptr[0]),
+                             B_length + length - 1);
+
+    for (i = 0; i < length; i++)
+    {
+        int32_t o = 0;
+
+        for (j = 0; j < B_length; j++)
+        {
+          o += B[j] * in_ptr[i - j];
+        }
+
+        // If output is higher than 32768, saturate it. Same with negative side
+        // 2^27 = 134217728, which corresponds to 32768 in Q12
+
+        // Saturate the output
+        o = WEBRTC_SPL_SAT((int32_t)134215679, o, (int32_t)-134217728);
+
+        *out_ptr++ = (int16_t)((o + (int32_t)2048) >> 12);
+    }
+    return;
+}
diff --git a/common_audio/signal_processing/get_hanning_window.c b/common_audio/signal_processing/get_hanning_window.c
new file mode 100644
index 0000000..8f29da8
--- /dev/null
+++ b/common_audio/signal_processing/get_hanning_window.c
@@ -0,0 +1,77 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_GetHanningWindow().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// Hanning table with 256 entries
+static const int16_t kHanningTable[] = {
+    1,      2,      6,     10,     15,     22,     30,     39,
+   50,     62,     75,     89,    104,    121,    138,    157,
+  178,    199,    222,    246,    271,    297,    324,    353,
+  383,    413,    446,    479,    513,    549,    586,    624,
+  663,    703,    744,    787,    830,    875,    920,    967,
+ 1015,   1064,   1114,   1165,   1218,   1271,   1325,   1381,
+ 1437,   1494,   1553,   1612,   1673,   1734,   1796,   1859,
+ 1924,   1989,   2055,   2122,   2190,   2259,   2329,   2399,
+ 2471,   2543,   2617,   2691,   2765,   2841,   2918,   2995,
+ 3073,   3152,   3232,   3312,   3393,   3475,   3558,   3641,
+ 3725,   3809,   3895,   3980,   4067,   4154,   4242,   4330,
+ 4419,   4509,   4599,   4689,   4781,   4872,   4964,   5057,
+ 5150,   5244,   5338,   5432,   5527,   5622,   5718,   5814,
+ 5910,   6007,   6104,   6202,   6299,   6397,   6495,   6594,
+ 6693,   6791,   6891,   6990,   7090,   7189,   7289,   7389,
+ 7489,   7589,   7690,   7790,   7890,   7991,   8091,   8192,
+ 8293,   8393,   8494,   8594,   8694,   8795,   8895,   8995,
+ 9095,   9195,   9294,   9394,   9493,   9593,   9691,   9790,
+ 9889,   9987,  10085,  10182,  10280,  10377,  10474,  10570,
+10666,  10762,  10857,  10952,  11046,  11140,  11234,  11327,
+11420,  11512,  11603,  11695,  11785,  11875,  11965,  12054,
+12142,  12230,  12317,  12404,  12489,  12575,  12659,  12743,
+12826,  12909,  12991,  13072,  13152,  13232,  13311,  13389,
+13466,  13543,  13619,  13693,  13767,  13841,  13913,  13985,
+14055,  14125,  14194,  14262,  14329,  14395,  14460,  14525,
+14588,  14650,  14711,  14772,  14831,  14890,  14947,  15003,
+15059,  15113,  15166,  15219,  15270,  15320,  15369,  15417,
+15464,  15509,  15554,  15597,  15640,  15681,  15721,  15760,
+15798,  15835,  15871,  15905,  15938,  15971,  16001,  16031,
+16060,  16087,  16113,  16138,  16162,  16185,  16206,  16227,
+16246,  16263,  16280,  16295,  16309,  16322,  16334,  16345,
+16354,  16362,  16369,  16374,  16378,  16382,  16383,  16384
+};
+
+void WebRtcSpl_GetHanningWindow(int16_t *v, size_t size)
+{
+    size_t jj;
+    int16_t *vptr1;
+
+    int32_t index;
+    int32_t factor = ((int32_t)0x40000000);
+
+    factor = WebRtcSpl_DivW32W16(factor, (int16_t)size);
+    if (size < 513)
+        index = (int32_t)-0x200000;
+    else
+        index = (int32_t)-0x100000;
+    vptr1 = v;
+
+    for (jj = 0; jj < size; jj++)
+    {
+        index += factor;
+        (*vptr1++) = kHanningTable[index >> 22];
+    }
+
+}
diff --git a/common_audio/signal_processing/get_scaling_square.c b/common_audio/signal_processing/get_scaling_square.c
new file mode 100644
index 0000000..4eb1269
--- /dev/null
+++ b/common_audio/signal_processing/get_scaling_square.c
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_GetScalingSquare().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+int16_t WebRtcSpl_GetScalingSquare(int16_t* in_vector,
+                                   size_t in_vector_length,
+                                   size_t times)
+{
+    int16_t nbits = WebRtcSpl_GetSizeInBits((uint32_t)times);
+    size_t i;
+    int16_t smax = -1;
+    int16_t sabs;
+    int16_t *sptr = in_vector;
+    int16_t t;
+    size_t looptimes = in_vector_length;
+
+    for (i = looptimes; i > 0; i--)
+    {
+        sabs = (*sptr > 0 ? *sptr++ : -*sptr++);
+        smax = (sabs > smax ? sabs : smax);
+    }
+    t = WebRtcSpl_NormW32(WEBRTC_SPL_MUL(smax, smax));
+
+    if (smax == 0)
+    {
+        return 0; // Since norm(0) returns 0
+    } else
+    {
+        return (t > nbits) ? 0 : nbits - t;
+    }
+}
diff --git a/common_audio/signal_processing/ilbc_specific_functions.c b/common_audio/signal_processing/ilbc_specific_functions.c
new file mode 100644
index 0000000..b75b705
--- /dev/null
+++ b/common_audio/signal_processing/ilbc_specific_functions.c
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains implementations of the iLBC specific functions
+ * WebRtcSpl_ReverseOrderMultArrayElements()
+ * WebRtcSpl_ElementwiseVectorMult()
+ * WebRtcSpl_AddVectorsAndShift()
+ * WebRtcSpl_AddAffineVectorToVector()
+ * WebRtcSpl_AffineTransformVector()
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_ReverseOrderMultArrayElements(int16_t *out, const int16_t *in,
+                                             const int16_t *win,
+                                             size_t vector_length,
+                                             int16_t right_shifts)
+{
+    size_t i;
+    int16_t *outptr = out;
+    const int16_t *inptr = in;
+    const int16_t *winptr = win;
+    for (i = 0; i < vector_length; i++)
+    {
+      *outptr++ = (int16_t)((*inptr++ * *winptr--) >> right_shifts);
+    }
+}
+
+void WebRtcSpl_ElementwiseVectorMult(int16_t *out, const int16_t *in,
+                                     const int16_t *win, size_t vector_length,
+                                     int16_t right_shifts)
+{
+    size_t i;
+    int16_t *outptr = out;
+    const int16_t *inptr = in;
+    const int16_t *winptr = win;
+    for (i = 0; i < vector_length; i++)
+    {
+      *outptr++ = (int16_t)((*inptr++ * *winptr++) >> right_shifts);
+    }
+}
+
+void WebRtcSpl_AddVectorsAndShift(int16_t *out, const int16_t *in1,
+                                  const int16_t *in2, size_t vector_length,
+                                  int16_t right_shifts)
+{
+    size_t i;
+    int16_t *outptr = out;
+    const int16_t *in1ptr = in1;
+    const int16_t *in2ptr = in2;
+    for (i = vector_length; i > 0; i--)
+    {
+        (*outptr++) = (int16_t)(((*in1ptr++) + (*in2ptr++)) >> right_shifts);
+    }
+}
+
+void WebRtcSpl_AddAffineVectorToVector(int16_t *out, int16_t *in,
+                                       int16_t gain, int32_t add_constant,
+                                       int16_t right_shifts,
+                                       size_t vector_length)
+{
+    size_t i;
+
+    for (i = 0; i < vector_length; i++)
+    {
+      out[i] += (int16_t)((in[i] * gain + add_constant) >> right_shifts);
+    }
+}
+
+void WebRtcSpl_AffineTransformVector(int16_t *out, int16_t *in,
+                                     int16_t gain, int32_t add_constant,
+                                     int16_t right_shifts, size_t vector_length)
+{
+    size_t i;
+
+    for (i = 0; i < vector_length; i++)
+    {
+      out[i] = (int16_t)((in[i] * gain + add_constant) >> right_shifts);
+    }
+}
diff --git a/common_audio/signal_processing/include/real_fft.h b/common_audio/signal_processing/include/real_fft.h
new file mode 100644
index 0000000..7d21072
--- /dev/null
+++ b/common_audio/signal_processing/include/real_fft.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// For ComplexFFT(), the maximum fft order is 10;
+// for OpenMax FFT in ARM, it is 12;
+// WebRTC APM uses orders of only 7 and 8.
+enum {kMaxFFTOrder = 10};
+
+struct RealFFT;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct RealFFT* WebRtcSpl_CreateRealFFT(int order);
+void WebRtcSpl_FreeRealFFT(struct RealFFT* self);
+
+// Compute an FFT for a real-valued signal of length of 2^order,
+// where 1 < order <= MAX_FFT_ORDER. Transform length is determined by the
+// specification structure, which must be initialized prior to calling the FFT
+// function with WebRtcSpl_CreateRealFFT().
+// The relationship between the input and output sequences can
+// be expressed in terms of the DFT, i.e.:
+//     x[n] = (2^(-scalefactor)/N)  . SUM[k=0,...,N-1] X[k].e^(jnk.2.pi/N)
+//     n=0,1,2,...N-1
+//     N=2^order.
+// The conjugate-symmetric output sequence is represented using a CCS vector,
+// which is of length N+2, and is organized as follows:
+//     Index:      0  1  2  3  4  5   . . .   N-2       N-1       N       N+1
+//     Component:  R0 0  R1 I1 R2 I2  . . .   R[N/2-1]  I[N/2-1]  R[N/2]  0
+// where R[n] and I[n], respectively, denote the real and imaginary components
+// for FFT bin 'n'. Bins  are numbered from 0 to N/2, where N is the FFT length.
+// Bin index 0 corresponds to the DC component, and bin index N/2 corresponds to
+// the foldover frequency.
+//
+// Input Arguments:
+//   self - pointer to preallocated and initialized FFT specification structure.
+//   real_data_in - the input signal. For an ARM Neon platform, it must be
+//                  aligned on a 32-byte boundary.
+//
+// Output Arguments:
+//   complex_data_out - the output complex signal with (2^order + 2) 16-bit
+//                      elements. For an ARM Neon platform, it must be different
+//                      from real_data_in, and aligned on a 32-byte boundary.
+//
+// Return Value:
+//   0  - FFT calculation is successful.
+//   -1 - Error with bad arguments (null pointers).
+int WebRtcSpl_RealForwardFFT(struct RealFFT* self,
+                             const int16_t* real_data_in,
+                             int16_t* complex_data_out);
+
+// Compute the inverse FFT for a conjugate-symmetric input sequence of length of
+// 2^order, where 1 < order <= MAX_FFT_ORDER. Transform length is determined by
+// the specification structure, which must be initialized prior to calling the
+// FFT function with WebRtcSpl_CreateRealFFT().
+// For a transform of length M, the input sequence is represented using a packed
+// CCS vector of length M+2, which is explained in the comments for
+// WebRtcSpl_RealForwardFFTC above.
+//
+// Input Arguments:
+//   self - pointer to preallocated and initialized FFT specification structure.
+//   complex_data_in - the input complex signal with (2^order + 2) 16-bit
+//                     elements. For an ARM Neon platform, it must be aligned on
+//                     a 32-byte boundary.
+//
+// Output Arguments:
+//   real_data_out - the output real signal. For an ARM Neon platform, it must
+//                   be different to complex_data_in, and aligned on a 32-byte
+//                   boundary.
+//
+// Return Value:
+//   0 or a positive number - a value that the elements in the |real_data_out|
+//                            should be shifted left with in order to get
+//                            correct physical values.
+//   -1 - Error with bad arguments (null pointers).
+int WebRtcSpl_RealInverseFFT(struct RealFFT* self,
+                             const int16_t* complex_data_in,
+                             int16_t* real_data_out);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_
diff --git a/common_audio/signal_processing/include/signal_processing_library.h b/common_audio/signal_processing/include/signal_processing_library.h
new file mode 100644
index 0000000..73cdc0d
--- /dev/null
+++ b/common_audio/signal_processing/include/signal_processing_library.h
@@ -0,0 +1,1627 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This header file includes all of the fix point signal processing library (SPL) function
+ * descriptions and declarations.
+ * For specific function calls, see bottom of file.
+ */
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SIGNAL_PROCESSING_LIBRARY_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SIGNAL_PROCESSING_LIBRARY_H_
+
+#include <string.h>
+#include "common_audio/signal_processing/dot_product_with_scale.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Macros specific for the fixed point implementation
+#define WEBRTC_SPL_WORD16_MAX       32767
+#define WEBRTC_SPL_WORD16_MIN       -32768
+#define WEBRTC_SPL_WORD32_MAX       (int32_t)0x7fffffff
+#define WEBRTC_SPL_WORD32_MIN       (int32_t)0x80000000
+#define WEBRTC_SPL_MAX_LPC_ORDER    14
+#define WEBRTC_SPL_MIN(A, B)        (A < B ? A : B)  // Get min value
+#define WEBRTC_SPL_MAX(A, B)        (A > B ? A : B)  // Get max value
+// TODO(kma/bjorn): For the next two macros, investigate how to correct the code
+// for inputs of a = WEBRTC_SPL_WORD16_MIN or WEBRTC_SPL_WORD32_MIN.
+#define WEBRTC_SPL_ABS_W16(a) \
+    (((int16_t)a >= 0) ? ((int16_t)a) : -((int16_t)a))
+#define WEBRTC_SPL_ABS_W32(a) \
+    (((int32_t)a >= 0) ? ((int32_t)a) : -((int32_t)a))
+
+#define WEBRTC_SPL_MUL(a, b) \
+    ((int32_t) ((int32_t)(a) * (int32_t)(b)))
+#define WEBRTC_SPL_UMUL(a, b) \
+    ((uint32_t) ((uint32_t)(a) * (uint32_t)(b)))
+#define WEBRTC_SPL_UMUL_32_16(a, b) \
+    ((uint32_t) ((uint32_t)(a) * (uint16_t)(b)))
+#define WEBRTC_SPL_MUL_16_U16(a, b) \
+    ((int32_t)(int16_t)(a) * (uint16_t)(b))
+
+#ifndef WEBRTC_ARCH_ARM_V7
+// For ARMv7 platforms, these are inline functions in spl_inl_armv7.h
+#ifndef MIPS32_LE
+// For MIPS platforms, these are inline functions in spl_inl_mips.h
+#define WEBRTC_SPL_MUL_16_16(a, b) \
+    ((int32_t) (((int16_t)(a)) * ((int16_t)(b))))
+#define WEBRTC_SPL_MUL_16_32_RSFT16(a, b) \
+    (WEBRTC_SPL_MUL_16_16(a, b >> 16) \
+     + ((WEBRTC_SPL_MUL_16_16(a, (b & 0xffff) >> 1) + 0x4000) >> 15))
+#endif
+#endif
+
+#define WEBRTC_SPL_MUL_16_32_RSFT11(a, b)          \
+  (WEBRTC_SPL_MUL_16_16(a, (b) >> 16) * (1 << 5) + \
+    (((WEBRTC_SPL_MUL_16_U16(a, (uint16_t)(b)) >> 1) + 0x0200) >> 10))
+#define WEBRTC_SPL_MUL_16_32_RSFT14(a, b)          \
+  (WEBRTC_SPL_MUL_16_16(a, (b) >> 16) * (1 << 2) + \
+    (((WEBRTC_SPL_MUL_16_U16(a, (uint16_t)(b)) >> 1) + 0x1000) >> 13))
+#define WEBRTC_SPL_MUL_16_32_RSFT15(a, b)            \
+  ((WEBRTC_SPL_MUL_16_16(a, (b) >> 16) * (1 << 1)) + \
+    (((WEBRTC_SPL_MUL_16_U16(a, (uint16_t)(b)) >> 1) + 0x2000) >> 14))
+
+#define WEBRTC_SPL_MUL_16_16_RSFT(a, b, c) \
+    (WEBRTC_SPL_MUL_16_16(a, b) >> (c))
+
+#define WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, c) \
+    ((WEBRTC_SPL_MUL_16_16(a, b) + ((int32_t) \
+                                  (((int32_t)1) << ((c) - 1)))) >> (c))
+
+// C + the 32 most significant bits of A * B
+#define WEBRTC_SPL_SCALEDIFF32(A, B, C) \
+    (C + (B >> 16) * A + (((uint32_t)(B & 0x0000FFFF) * A) >> 16))
+
+#define WEBRTC_SPL_SAT(a, b, c)         (b > a ? a : b < c ? c : b)
+
+// Shifting with negative numbers allowed
+// Positive means left shift
+#define WEBRTC_SPL_SHIFT_W32(x, c) ((c) >= 0 ? (x) * (1 << (c)) : (x) >> -(c))
+
+// Shifting with negative numbers not allowed
+// We cannot do casting here due to signed/unsigned problem
+#define WEBRTC_SPL_LSHIFT_W32(x, c)     ((x) << (c))
+
+#define WEBRTC_SPL_RSHIFT_U32(x, c)     ((uint32_t)(x) >> (c))
+
+#define WEBRTC_SPL_RAND(a) \
+    ((int16_t)((((int16_t)a * 18816) >> 7) & 0x00007fff))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define WEBRTC_SPL_MEMCPY_W16(v1, v2, length) \
+  memcpy(v1, v2, (length) * sizeof(int16_t))
+
+// inline functions:
+#include "common_audio/signal_processing/include/spl_inl.h"
+
+// Initialize SPL. Currently it contains only function pointer initialization.
+// If the underlying platform is known to be ARM-Neon (WEBRTC_HAS_NEON defined),
+// the pointers will be assigned to code optimized for Neon; otherwise, generic
+// C code will be assigned.
+// Note that this function MUST be called in any application that uses SPL
+// functions.
+void WebRtcSpl_Init();
+
+int16_t WebRtcSpl_GetScalingSquare(int16_t* in_vector,
+                                   size_t in_vector_length,
+                                   size_t times);
+
+// Copy and set operations. Implementation in copy_set_operations.c.
+// Descriptions at bottom of file.
+void WebRtcSpl_MemSetW16(int16_t* vector,
+                         int16_t set_value,
+                         size_t vector_length);
+void WebRtcSpl_MemSetW32(int32_t* vector,
+                         int32_t set_value,
+                         size_t vector_length);
+void WebRtcSpl_MemCpyReversedOrder(int16_t* out_vector,
+                                   int16_t* in_vector,
+                                   size_t vector_length);
+void WebRtcSpl_CopyFromEndW16(const int16_t* in_vector,
+                              size_t in_vector_length,
+                              size_t samples,
+                              int16_t* out_vector);
+void WebRtcSpl_ZerosArrayW16(int16_t* vector,
+                             size_t vector_length);
+void WebRtcSpl_ZerosArrayW32(int32_t* vector,
+                             size_t vector_length);
+// End: Copy and set operations.
+
+
+// Minimum and maximum operation functions and their pointers.
+// Implementation in min_max_operations.c.
+
+// Returns the largest absolute value in a signed 16-bit vector.
+//
+// Input:
+//      - vector : 16-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Maximum absolute value in vector.
+typedef int16_t (*MaxAbsValueW16)(const int16_t* vector, size_t length);
+extern MaxAbsValueW16 WebRtcSpl_MaxAbsValueW16;
+int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, size_t length);
+#if defined(WEBRTC_HAS_NEON)
+int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, size_t length);
+#endif
+#if defined(MIPS32_LE)
+int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, size_t length);
+#endif
+
+// Returns the largest absolute value in a signed 32-bit vector.
+//
+// Input:
+//      - vector : 32-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Maximum absolute value in vector.
+typedef int32_t (*MaxAbsValueW32)(const int32_t* vector, size_t length);
+extern MaxAbsValueW32 WebRtcSpl_MaxAbsValueW32;
+int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length);
+#if defined(WEBRTC_HAS_NEON)
+int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, size_t length);
+#endif
+#if defined(MIPS_DSP_R1_LE)
+int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, size_t length);
+#endif
+
+// Returns the maximum value of a 16-bit vector.
+//
+// Input:
+//      - vector : 16-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Maximum sample value in |vector|.
+typedef int16_t (*MaxValueW16)(const int16_t* vector, size_t length);
+extern MaxValueW16 WebRtcSpl_MaxValueW16;
+int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length);
+#if defined(WEBRTC_HAS_NEON)
+int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, size_t length);
+#endif
+#if defined(MIPS32_LE)
+int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, size_t length);
+#endif
+
+// Returns the maximum value of a 32-bit vector.
+//
+// Input:
+//      - vector : 32-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Maximum sample value in |vector|.
+typedef int32_t (*MaxValueW32)(const int32_t* vector, size_t length);
+extern MaxValueW32 WebRtcSpl_MaxValueW32;
+int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length);
+#if defined(WEBRTC_HAS_NEON)
+int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, size_t length);
+#endif
+#if defined(MIPS32_LE)
+int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, size_t length);
+#endif
+
+// Returns the minimum value of a 16-bit vector.
+//
+// Input:
+//      - vector : 16-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Minimum sample value in |vector|.
+typedef int16_t (*MinValueW16)(const int16_t* vector, size_t length);
+extern MinValueW16 WebRtcSpl_MinValueW16;
+int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length);
+#if defined(WEBRTC_HAS_NEON)
+int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, size_t length);
+#endif
+#if defined(MIPS32_LE)
+int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, size_t length);
+#endif
+
+// Returns the minimum value of a 32-bit vector.
+//
+// Input:
+//      - vector : 32-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Minimum sample value in |vector|.
+typedef int32_t (*MinValueW32)(const int32_t* vector, size_t length);
+extern MinValueW32 WebRtcSpl_MinValueW32;
+int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length);
+#if defined(WEBRTC_HAS_NEON)
+int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length);
+#endif
+#if defined(MIPS32_LE)
+int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length);
+#endif
+
+// Returns the vector index to the largest absolute value of a 16-bit vector.
+//
+// Input:
+//      - vector : 16-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Index to the maximum absolute value in vector.
+//                 If there are multiple equal maxima, return the index of the
+//                 first. -32768 will always have precedence over 32767 (despite
+//                 -32768 presenting an int16 absolute value of 32767).
+size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length);
+
+// Returns the vector index to the maximum sample value of a 16-bit vector.
+//
+// Input:
+//      - vector : 16-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Index to the maximum value in vector (if multiple
+//                 indexes have the maximum, return the first).
+size_t WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length);
+
+// Returns the vector index to the maximum sample value of a 32-bit vector.
+//
+// Input:
+//      - vector : 32-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Index to the maximum value in vector (if multiple
+//                 indexes have the maximum, return the first).
+size_t WebRtcSpl_MaxIndexW32(const int32_t* vector, size_t length);
+
+// Returns the vector index to the minimum sample value of a 16-bit vector.
+//
+// Input:
+//      - vector : 16-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Index to the mimimum value in vector  (if multiple
+//                 indexes have the minimum, return the first).
+size_t WebRtcSpl_MinIndexW16(const int16_t* vector, size_t length);
+
+// Returns the vector index to the minimum sample value of a 32-bit vector.
+//
+// Input:
+//      - vector : 32-bit input vector.
+//      - length : Number of samples in vector.
+//
+// Return value  : Index to the mimimum value in vector  (if multiple
+//                 indexes have the minimum, return the first).
+size_t WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length);
+
+// End: Minimum and maximum operations.
+
+
+// Vector scaling operations. Implementation in vector_scaling_operations.c.
+// Description at bottom of file.
+void WebRtcSpl_VectorBitShiftW16(int16_t* out_vector,
+                                 size_t vector_length,
+                                 const int16_t* in_vector,
+                                 int16_t right_shifts);
+void WebRtcSpl_VectorBitShiftW32(int32_t* out_vector,
+                                 size_t vector_length,
+                                 const int32_t* in_vector,
+                                 int16_t right_shifts);
+void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out_vector,
+                                      size_t vector_length,
+                                      const int32_t* in_vector,
+                                      int right_shifts);
+void WebRtcSpl_ScaleVector(const int16_t* in_vector,
+                           int16_t* out_vector,
+                           int16_t gain,
+                           size_t vector_length,
+                           int16_t right_shifts);
+void WebRtcSpl_ScaleVectorWithSat(const int16_t* in_vector,
+                                  int16_t* out_vector,
+                                  int16_t gain,
+                                  size_t vector_length,
+                                  int16_t right_shifts);
+void WebRtcSpl_ScaleAndAddVectors(const int16_t* in_vector1,
+                                  int16_t gain1, int right_shifts1,
+                                  const int16_t* in_vector2,
+                                  int16_t gain2, int right_shifts2,
+                                  int16_t* out_vector,
+                                  size_t vector_length);
+
+// The functions (with related pointer) perform the vector operation:
+//   out_vector[k] = ((scale1 * in_vector1[k]) + (scale2 * in_vector2[k])
+//        + round_value) >> right_shifts,
+//   where  round_value = (1 << right_shifts) >> 1.
+//
+// Input:
+//      - in_vector1       : Input vector 1
+//      - in_vector1_scale : Gain to be used for vector 1
+//      - in_vector2       : Input vector 2
+//      - in_vector2_scale : Gain to be used for vector 2
+//      - right_shifts     : Number of right bit shifts to be applied
+//      - length           : Number of elements in the input vectors
+//
+// Output:
+//      - out_vector       : Output vector
+// Return value            : 0 if OK, -1 if (in_vector1 == null
+//                           || in_vector2 == null || out_vector == null
+//                           || length <= 0 || right_shift < 0).
+typedef int (*ScaleAndAddVectorsWithRound)(const int16_t* in_vector1,
+                                           int16_t in_vector1_scale,
+                                           const int16_t* in_vector2,
+                                           int16_t in_vector2_scale,
+                                           int right_shifts,
+                                           int16_t* out_vector,
+                                           size_t length);
+extern ScaleAndAddVectorsWithRound WebRtcSpl_ScaleAndAddVectorsWithRound;
+int WebRtcSpl_ScaleAndAddVectorsWithRoundC(const int16_t* in_vector1,
+                                           int16_t in_vector1_scale,
+                                           const int16_t* in_vector2,
+                                           int16_t in_vector2_scale,
+                                           int right_shifts,
+                                           int16_t* out_vector,
+                                           size_t length);
+#if defined(MIPS_DSP_R1_LE)
+int WebRtcSpl_ScaleAndAddVectorsWithRound_mips(const int16_t* in_vector1,
+                                               int16_t in_vector1_scale,
+                                               const int16_t* in_vector2,
+                                               int16_t in_vector2_scale,
+                                               int right_shifts,
+                                               int16_t* out_vector,
+                                               size_t length);
+#endif
+// End: Vector scaling operations.
+
+// iLBC specific functions. Implementations in ilbc_specific_functions.c.
+// Description at bottom of file.
+void WebRtcSpl_ReverseOrderMultArrayElements(int16_t* out_vector,
+                                             const int16_t* in_vector,
+                                             const int16_t* window,
+                                             size_t vector_length,
+                                             int16_t right_shifts);
+void WebRtcSpl_ElementwiseVectorMult(int16_t* out_vector,
+                                     const int16_t* in_vector,
+                                     const int16_t* window,
+                                     size_t vector_length,
+                                     int16_t right_shifts);
+void WebRtcSpl_AddVectorsAndShift(int16_t* out_vector,
+                                  const int16_t* in_vector1,
+                                  const int16_t* in_vector2,
+                                  size_t vector_length,
+                                  int16_t right_shifts);
+void WebRtcSpl_AddAffineVectorToVector(int16_t* out_vector,
+                                       int16_t* in_vector,
+                                       int16_t gain,
+                                       int32_t add_constant,
+                                       int16_t right_shifts,
+                                       size_t vector_length);
+void WebRtcSpl_AffineTransformVector(int16_t* out_vector,
+                                     int16_t* in_vector,
+                                     int16_t gain,
+                                     int32_t add_constant,
+                                     int16_t right_shifts,
+                                     size_t vector_length);
+// End: iLBC specific functions.
+
+// Signal processing operations.
+
+// A 32-bit fix-point implementation of auto-correlation computation
+//
+// Input:
+//      - in_vector        : Vector to calculate autocorrelation upon
+//      - in_vector_length : Length (in samples) of |vector|
+//      - order            : The order up to which the autocorrelation should be
+//                           calculated
+//
+// Output:
+//      - result           : auto-correlation values (values should be seen
+//                           relative to each other since the absolute values
+//                           might have been down shifted to avoid overflow)
+//
+//      - scale            : The number of left shifts required to obtain the
+//                           auto-correlation in Q0
+//
+// Return value            : Number of samples in |result|, i.e. (order+1)
+size_t WebRtcSpl_AutoCorrelation(const int16_t* in_vector,
+                                 size_t in_vector_length,
+                                 size_t order,
+                                 int32_t* result,
+                                 int* scale);
+
+// A 32-bit fix-point implementation of the Levinson-Durbin algorithm that
+// does NOT use the 64 bit class
+//
+// Input:
+//      - auto_corr : Vector with autocorrelation values of length >= |order|+1
+//      - order     : The LPC filter order (support up to order 20)
+//
+// Output:
+//      - lpc_coef  : lpc_coef[0..order] LPC coefficients in Q12
+//      - refl_coef : refl_coef[0...order-1]| Reflection coefficients in Q15
+//
+// Return value     : 1 for stable 0 for unstable
+int16_t WebRtcSpl_LevinsonDurbin(const int32_t* auto_corr,
+                                 int16_t* lpc_coef,
+                                 int16_t* refl_coef,
+                                 size_t order);
+
+// Converts reflection coefficients |refl_coef| to LPC coefficients |lpc_coef|.
+// This version is a 16 bit operation.
+//
+// NOTE: The 16 bit refl_coef -> lpc_coef conversion might result in a
+// "slightly unstable" filter (i.e., a pole just outside the unit circle) in
+// "rare" cases even if the reflection coefficients are stable.
+//
+// Input:
+//      - refl_coef : Reflection coefficients in Q15 that should be converted
+//                    to LPC coefficients
+//      - use_order : Number of coefficients in |refl_coef|
+//
+// Output:
+//      - lpc_coef  : LPC coefficients in Q12
+void WebRtcSpl_ReflCoefToLpc(const int16_t* refl_coef,
+                             int use_order,
+                             int16_t* lpc_coef);
+
+// Converts LPC coefficients |lpc_coef| to reflection coefficients |refl_coef|.
+// This version is a 16 bit operation.
+// The conversion is implemented by the step-down algorithm.
+//
+// Input:
+//      - lpc_coef  : LPC coefficients in Q12, that should be converted to
+//                    reflection coefficients
+//      - use_order : Number of coefficients in |lpc_coef|
+//
+// Output:
+//      - refl_coef : Reflection coefficients in Q15.
+void WebRtcSpl_LpcToReflCoef(int16_t* lpc_coef,
+                             int use_order,
+                             int16_t* refl_coef);
+
+// Calculates reflection coefficients (16 bit) from auto-correlation values
+//
+// Input:
+//      - auto_corr : Auto-correlation values
+//      - use_order : Number of coefficients wanted be calculated
+//
+// Output:
+//      - refl_coef : Reflection coefficients in Q15.
+void WebRtcSpl_AutoCorrToReflCoef(const int32_t* auto_corr,
+                                  int use_order,
+                                  int16_t* refl_coef);
+
+// The functions (with related pointer) calculate the cross-correlation between
+// two sequences |seq1| and |seq2|.
+// |seq1| is fixed and |seq2| slides as the pointer is increased with the
+// amount |step_seq2|. Note the arguments should obey the relationship:
+// |dim_seq| - 1 + |step_seq2| * (|dim_cross_correlation| - 1) <
+//      buffer size of |seq2|
+//
+// Input:
+//      - seq1           : First sequence (fixed throughout the correlation)
+//      - seq2           : Second sequence (slides |step_vector2| for each
+//                            new correlation)
+//      - dim_seq        : Number of samples to use in the cross-correlation
+//      - dim_cross_correlation : Number of cross-correlations to calculate (the
+//                            start position for |vector2| is updated for each
+//                            new one)
+//      - right_shifts   : Number of right bit shifts to use. This will
+//                            become the output Q-domain.
+//      - step_seq2      : How many (positive or negative) steps the
+//                            |vector2| pointer should be updated for each new
+//                            cross-correlation value.
+//
+// Output:
+//      - cross_correlation : The cross-correlation in Q(-right_shifts)
+typedef void (*CrossCorrelation)(int32_t* cross_correlation,
+                                 const int16_t* seq1,
+                                 const int16_t* seq2,
+                                 size_t dim_seq,
+                                 size_t dim_cross_correlation,
+                                 int right_shifts,
+                                 int step_seq2);
+extern CrossCorrelation WebRtcSpl_CrossCorrelation;
+void WebRtcSpl_CrossCorrelationC(int32_t* cross_correlation,
+                                 const int16_t* seq1,
+                                 const int16_t* seq2,
+                                 size_t dim_seq,
+                                 size_t dim_cross_correlation,
+                                 int right_shifts,
+                                 int step_seq2);
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcSpl_CrossCorrelationNeon(int32_t* cross_correlation,
+                                    const int16_t* seq1,
+                                    const int16_t* seq2,
+                                    size_t dim_seq,
+                                    size_t dim_cross_correlation,
+                                    int right_shifts,
+                                    int step_seq2);
+#endif
+#if defined(MIPS32_LE)
+void WebRtcSpl_CrossCorrelation_mips(int32_t* cross_correlation,
+                                     const int16_t* seq1,
+                                     const int16_t* seq2,
+                                     size_t dim_seq,
+                                     size_t dim_cross_correlation,
+                                     int right_shifts,
+                                     int step_seq2);
+#endif
+
+// Creates (the first half of) a Hanning window. Size must be at least 1 and
+// at most 512.
+//
+// Input:
+//      - size      : Length of the requested Hanning window (1 to 512)
+//
+// Output:
+//      - window    : Hanning vector in Q14.
+void WebRtcSpl_GetHanningWindow(int16_t* window, size_t size);
+
+// Calculates y[k] = sqrt(1 - x[k]^2) for each element of the input vector
+// |in_vector|. Input and output values are in Q15.
+//
+// Inputs:
+//      - in_vector     : Values to calculate sqrt(1 - x^2) of
+//      - vector_length : Length of vector |in_vector|
+//
+// Output:
+//      - out_vector    : Output values in Q15
+void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t* in_vector,
+                                      size_t vector_length,
+                                      int16_t* out_vector);
+// End: Signal processing operations.
+
+// Randomization functions. Implementations collected in
+// randomization_functions.c and descriptions at bottom of this file.
+int16_t WebRtcSpl_RandU(uint32_t* seed);
+int16_t WebRtcSpl_RandN(uint32_t* seed);
+int16_t WebRtcSpl_RandUArray(int16_t* vector,
+                             int16_t vector_length,
+                             uint32_t* seed);
+// End: Randomization functions.
+
+// Math functions
+int32_t WebRtcSpl_Sqrt(int32_t value);
+int32_t WebRtcSpl_SqrtFloor(int32_t value);
+
+// Divisions. Implementations collected in division_operations.c and
+// descriptions at bottom of this file.
+uint32_t WebRtcSpl_DivU32U16(uint32_t num, uint16_t den);
+int32_t WebRtcSpl_DivW32W16(int32_t num, int16_t den);
+int16_t WebRtcSpl_DivW32W16ResW16(int32_t num, int16_t den);
+int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den);
+int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low);
+// End: Divisions.
+
+int32_t WebRtcSpl_Energy(int16_t* vector,
+                         size_t vector_length,
+                         int* scale_factor);
+
+// Filter operations.
+size_t WebRtcSpl_FilterAR(const int16_t* ar_coef,
+                          size_t ar_coef_length,
+                          const int16_t* in_vector,
+                          size_t in_vector_length,
+                          int16_t* filter_state,
+                          size_t filter_state_length,
+                          int16_t* filter_state_low,
+                          size_t filter_state_low_length,
+                          int16_t* out_vector,
+                          int16_t* out_vector_low,
+                          size_t out_vector_low_length);
+
+// WebRtcSpl_FilterMAFastQ12(...)
+//
+// Performs a MA filtering on a vector in Q12
+//
+// Input:
+//      - in_vector         : Input samples (state in positions
+//                            in_vector[-order] .. in_vector[-1])
+//      - ma_coef           : Filter coefficients (in Q12)
+//      - ma_coef_length    : Number of B coefficients (order+1)
+//      - vector_length     : Number of samples to be filtered
+//
+// Output:
+//      - out_vector        : Filtered samples
+//
+void WebRtcSpl_FilterMAFastQ12(const int16_t* in_vector,
+                               int16_t* out_vector,
+                               const int16_t* ma_coef,
+                               size_t ma_coef_length,
+                               size_t vector_length);
+
+// Performs a AR filtering on a vector in Q12
+// Input:
+//      - data_in            : Input samples
+//      - data_out           : State information in positions
+//                               data_out[-order] .. data_out[-1]
+//      - coefficients       : Filter coefficients (in Q12)
+//      - coefficients_length: Number of coefficients (order+1)
+//      - data_length        : Number of samples to be filtered
+// Output:
+//      - data_out           : Filtered samples
+void WebRtcSpl_FilterARFastQ12(const int16_t* data_in,
+                               int16_t* data_out,
+                               const int16_t* __restrict coefficients,
+                               size_t coefficients_length,
+                               size_t data_length);
+
+// The functions (with related pointer) perform a MA down sampling filter
+// on a vector.
+// Input:
+//      - data_in            : Input samples (state in positions
+//                               data_in[-order] .. data_in[-1])
+//      - data_in_length     : Number of samples in |data_in| to be filtered.
+//                               This must be at least
+//                               |delay| + |factor|*(|out_vector_length|-1) + 1)
+//      - data_out_length    : Number of down sampled samples desired
+//      - coefficients       : Filter coefficients (in Q12)
+//      - coefficients_length: Number of coefficients (order+1)
+//      - factor             : Decimation factor
+//      - delay              : Delay of filter (compensated for in out_vector)
+// Output:
+//      - data_out           : Filtered samples
+// Return value              : 0 if OK, -1 if |in_vector| is too short
+typedef int (*DownsampleFast)(const int16_t* data_in,
+                              size_t data_in_length,
+                              int16_t* data_out,
+                              size_t data_out_length,
+                              const int16_t* __restrict coefficients,
+                              size_t coefficients_length,
+                              int factor,
+                              size_t delay);
+extern DownsampleFast WebRtcSpl_DownsampleFast;
+int WebRtcSpl_DownsampleFastC(const int16_t* data_in,
+                              size_t data_in_length,
+                              int16_t* data_out,
+                              size_t data_out_length,
+                              const int16_t* __restrict coefficients,
+                              size_t coefficients_length,
+                              int factor,
+                              size_t delay);
+#if defined(WEBRTC_HAS_NEON)
+int WebRtcSpl_DownsampleFastNeon(const int16_t* data_in,
+                                 size_t data_in_length,
+                                 int16_t* data_out,
+                                 size_t data_out_length,
+                                 const int16_t* __restrict coefficients,
+                                 size_t coefficients_length,
+                                 int factor,
+                                 size_t delay);
+#endif
+#if defined(MIPS32_LE)
+int WebRtcSpl_DownsampleFast_mips(const int16_t* data_in,
+                                  size_t data_in_length,
+                                  int16_t* data_out,
+                                  size_t data_out_length,
+                                  const int16_t* __restrict coefficients,
+                                  size_t coefficients_length,
+                                  int factor,
+                                  size_t delay);
+#endif
+
+// End: Filter operations.
+
+// FFT operations
+
+int WebRtcSpl_ComplexFFT(int16_t vector[], int stages, int mode);
+int WebRtcSpl_ComplexIFFT(int16_t vector[], int stages, int mode);
+
+// Treat a 16-bit complex data buffer |complex_data| as an array of 32-bit
+// values, and swap elements whose indexes are bit-reverses of each other.
+//
+// Input:
+//      - complex_data  : Complex data buffer containing 2^|stages| real
+//                        elements interleaved with 2^|stages| imaginary
+//                        elements: [Re Im Re Im Re Im....]
+//      - stages        : Number of FFT stages. Must be at least 3 and at most
+//                        10, since the table WebRtcSpl_kSinTable1024[] is 1024
+//                        elements long.
+//
+// Output:
+//      - complex_data  : The complex data buffer.
+
+void WebRtcSpl_ComplexBitReverse(int16_t* __restrict complex_data, int stages);
+
+// End: FFT operations
+
+/************************************************************
+ *
+ * RESAMPLING FUNCTIONS AND THEIR STRUCTS ARE DEFINED BELOW
+ *
+ ************************************************************/
+
+/*******************************************************************
+ * resample.c
+ *
+ * Includes the following resampling combinations
+ * 22 kHz -> 16 kHz
+ * 16 kHz -> 22 kHz
+ * 22 kHz ->  8 kHz
+ *  8 kHz -> 22 kHz
+ *
+ ******************************************************************/
+
+// state structure for 22 -> 16 resampler
+typedef struct {
+  int32_t S_22_44[8];
+  int32_t S_44_32[8];
+  int32_t S_32_16[8];
+} WebRtcSpl_State22khzTo16khz;
+
+void WebRtcSpl_Resample22khzTo16khz(const int16_t* in,
+                                    int16_t* out,
+                                    WebRtcSpl_State22khzTo16khz* state,
+                                    int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample22khzTo16khz(WebRtcSpl_State22khzTo16khz* state);
+
+// state structure for 16 -> 22 resampler
+typedef struct {
+  int32_t S_16_32[8];
+  int32_t S_32_22[8];
+} WebRtcSpl_State16khzTo22khz;
+
+void WebRtcSpl_Resample16khzTo22khz(const int16_t* in,
+                                    int16_t* out,
+                                    WebRtcSpl_State16khzTo22khz* state,
+                                    int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample16khzTo22khz(WebRtcSpl_State16khzTo22khz* state);
+
+// state structure for 22 -> 8 resampler
+typedef struct {
+  int32_t S_22_22[16];
+  int32_t S_22_16[8];
+  int32_t S_16_8[8];
+} WebRtcSpl_State22khzTo8khz;
+
+void WebRtcSpl_Resample22khzTo8khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State22khzTo8khz* state,
+                                   int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample22khzTo8khz(WebRtcSpl_State22khzTo8khz* state);
+
+// state structure for 8 -> 22 resampler
+typedef struct {
+  int32_t S_8_16[8];
+  int32_t S_16_11[8];
+  int32_t S_11_22[8];
+} WebRtcSpl_State8khzTo22khz;
+
+void WebRtcSpl_Resample8khzTo22khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State8khzTo22khz* state,
+                                   int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample8khzTo22khz(WebRtcSpl_State8khzTo22khz* state);
+
+/*******************************************************************
+ * resample_fractional.c
+ * Functions for internal use in the other resample functions
+ *
+ * Includes the following resampling combinations
+ * 48 kHz -> 32 kHz
+ * 32 kHz -> 24 kHz
+ * 44 kHz -> 32 kHz
+ *
+ ******************************************************************/
+
+void WebRtcSpl_Resample48khzTo32khz(const int32_t* In, int32_t* Out, size_t K);
+
+void WebRtcSpl_Resample32khzTo24khz(const int32_t* In, int32_t* Out, size_t K);
+
+void WebRtcSpl_Resample44khzTo32khz(const int32_t* In, int32_t* Out, size_t K);
+
+/*******************************************************************
+ * resample_48khz.c
+ *
+ * Includes the following resampling combinations
+ * 48 kHz -> 16 kHz
+ * 16 kHz -> 48 kHz
+ * 48 kHz ->  8 kHz
+ *  8 kHz -> 48 kHz
+ *
+ ******************************************************************/
+
+typedef struct {
+  int32_t S_48_48[16];
+  int32_t S_48_32[8];
+  int32_t S_32_16[8];
+} WebRtcSpl_State48khzTo16khz;
+
+void WebRtcSpl_Resample48khzTo16khz(const int16_t* in, int16_t* out,
+                                    WebRtcSpl_State48khzTo16khz* state,
+                                    int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample48khzTo16khz(WebRtcSpl_State48khzTo16khz* state);
+
+typedef struct {
+  int32_t S_16_32[8];
+  int32_t S_32_24[8];
+  int32_t S_24_48[8];
+} WebRtcSpl_State16khzTo48khz;
+
+void WebRtcSpl_Resample16khzTo48khz(const int16_t* in, int16_t* out,
+                                    WebRtcSpl_State16khzTo48khz* state,
+                                    int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample16khzTo48khz(WebRtcSpl_State16khzTo48khz* state);
+
+typedef struct {
+  int32_t S_48_24[8];
+  int32_t S_24_24[16];
+  int32_t S_24_16[8];
+  int32_t S_16_8[8];
+} WebRtcSpl_State48khzTo8khz;
+
+void WebRtcSpl_Resample48khzTo8khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State48khzTo8khz* state,
+                                   int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample48khzTo8khz(WebRtcSpl_State48khzTo8khz* state);
+
+typedef struct {
+  int32_t S_8_16[8];
+  int32_t S_16_12[8];
+  int32_t S_12_24[8];
+  int32_t S_24_48[8];
+} WebRtcSpl_State8khzTo48khz;
+
+void WebRtcSpl_Resample8khzTo48khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State8khzTo48khz* state,
+                                   int32_t* tmpmem);
+
+void WebRtcSpl_ResetResample8khzTo48khz(WebRtcSpl_State8khzTo48khz* state);
+
+/*******************************************************************
+ * resample_by_2.c
+ *
+ * Includes down and up sampling by a factor of two.
+ *
+ ******************************************************************/
+
+void WebRtcSpl_DownsampleBy2(const int16_t* in, size_t len,
+                             int16_t* out, int32_t* filtState);
+
+void WebRtcSpl_UpsampleBy2(const int16_t* in, size_t len,
+                           int16_t* out, int32_t* filtState);
+
+/************************************************************
+ * END OF RESAMPLING FUNCTIONS
+ ************************************************************/
+void WebRtcSpl_AnalysisQMF(const int16_t* in_data,
+                           size_t in_data_length,
+                           int16_t* low_band,
+                           int16_t* high_band,
+                           int32_t* filter_state1,
+                           int32_t* filter_state2);
+void WebRtcSpl_SynthesisQMF(const int16_t* low_band,
+                            const int16_t* high_band,
+                            size_t band_length,
+                            int16_t* out_data,
+                            int32_t* filter_state1,
+                            int32_t* filter_state2);
+
+#ifdef __cplusplus
+}
+#endif  // __cplusplus
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SIGNAL_PROCESSING_LIBRARY_H_
+
+//
+// WebRtcSpl_AddSatW16(...)
+// WebRtcSpl_AddSatW32(...)
+//
+// Returns the result of a saturated 16-bit, respectively 32-bit, addition of
+// the numbers specified by the |var1| and |var2| parameters.
+//
+// Input:
+//      - var1      : Input variable 1
+//      - var2      : Input variable 2
+//
+// Return value     : Added and saturated value
+//
+
+//
+// WebRtcSpl_SubSatW16(...)
+// WebRtcSpl_SubSatW32(...)
+//
+// Returns the result of a saturated 16-bit, respectively 32-bit, subtraction
+// of the numbers specified by the |var1| and |var2| parameters.
+//
+// Input:
+//      - var1      : Input variable 1
+//      - var2      : Input variable 2
+//
+// Returned value   : Subtracted and saturated value
+//
+
+//
+// WebRtcSpl_GetSizeInBits(...)
+//
+// Returns the # of bits that are needed at the most to represent the number
+// specified by the |value| parameter.
+//
+// Input:
+//      - value     : Input value
+//
+// Return value     : Number of bits needed to represent |value|
+//
+
+//
+// WebRtcSpl_NormW32(...)
+//
+// Norm returns the # of left shifts required to 32-bit normalize the 32-bit
+// signed number specified by the |value| parameter.
+//
+// Input:
+//      - value     : Input value
+//
+// Return value     : Number of bit shifts needed to 32-bit normalize |value|
+//
+
+//
+// WebRtcSpl_NormW16(...)
+//
+// Norm returns the # of left shifts required to 16-bit normalize the 16-bit
+// signed number specified by the |value| parameter.
+//
+// Input:
+//      - value     : Input value
+//
+// Return value     : Number of bit shifts needed to 32-bit normalize |value|
+//
+
+//
+// WebRtcSpl_NormU32(...)
+//
+// Norm returns the # of left shifts required to 32-bit normalize the unsigned
+// 32-bit number specified by the |value| parameter.
+//
+// Input:
+//      - value     : Input value
+//
+// Return value     : Number of bit shifts needed to 32-bit normalize |value|
+//
+
+//
+// WebRtcSpl_GetScalingSquare(...)
+//
+// Returns the # of bits required to scale the samples specified in the
+// |in_vector| parameter so that, if the squares of the samples are added the
+// # of times specified by the |times| parameter, the 32-bit addition will not
+// overflow (result in int32_t).
+//
+// Input:
+//      - in_vector         : Input vector to check scaling on
+//      - in_vector_length  : Samples in |in_vector|
+//      - times             : Number of additions to be performed
+//
+// Return value             : Number of right bit shifts needed to avoid
+//                            overflow in the addition calculation
+//
+
+//
+// WebRtcSpl_MemSetW16(...)
+//
+// Sets all the values in the int16_t vector |vector| of length
+// |vector_length| to the specified value |set_value|
+//
+// Input:
+//      - vector        : Pointer to the int16_t vector
+//      - set_value     : Value specified
+//      - vector_length : Length of vector
+//
+
+//
+// WebRtcSpl_MemSetW32(...)
+//
+// Sets all the values in the int32_t vector |vector| of length
+// |vector_length| to the specified value |set_value|
+//
+// Input:
+//      - vector        : Pointer to the int16_t vector
+//      - set_value     : Value specified
+//      - vector_length : Length of vector
+//
+
+//
+// WebRtcSpl_MemCpyReversedOrder(...)
+//
+// Copies all the values from the source int16_t vector |in_vector| to a
+// destination int16_t vector |out_vector|. It is done in reversed order,
+// meaning that the first sample of |in_vector| is copied to the last sample of
+// the |out_vector|. The procedure continues until the last sample of
+// |in_vector| has been copied to the first sample of |out_vector|. This
+// creates a reversed vector. Used in e.g. prediction in iLBC.
+//
+// Input:
+//      - in_vector     : Pointer to the first sample in a int16_t vector
+//                        of length |length|
+//      - vector_length : Number of elements to copy
+//
+// Output:
+//      - out_vector    : Pointer to the last sample in a int16_t vector
+//                        of length |length|
+//
+
+//
+// WebRtcSpl_CopyFromEndW16(...)
+//
+// Copies the rightmost |samples| of |in_vector| (of length |in_vector_length|)
+// to the vector |out_vector|.
+//
+// Input:
+//      - in_vector         : Input vector
+//      - in_vector_length  : Number of samples in |in_vector|
+//      - samples           : Number of samples to extract (from right side)
+//                            from |in_vector|
+//
+// Output:
+//      - out_vector        : Vector with the requested samples
+//
+
+//
+// WebRtcSpl_ZerosArrayW16(...)
+// WebRtcSpl_ZerosArrayW32(...)
+//
+// Inserts the value "zero" in all positions of a w16 and a w32 vector
+// respectively.
+//
+// Input:
+//      - vector_length : Number of samples in vector
+//
+// Output:
+//      - vector        : Vector containing all zeros
+//
+
+//
+// WebRtcSpl_VectorBitShiftW16(...)
+// WebRtcSpl_VectorBitShiftW32(...)
+//
+// Bit shifts all the values in a vector up or downwards. Different calls for
+// int16_t and int32_t vectors respectively.
+//
+// Input:
+//      - vector_length : Length of vector
+//      - in_vector     : Pointer to the vector that should be bit shifted
+//      - right_shifts  : Number of right bit shifts (negative value gives left
+//                        shifts)
+//
+// Output:
+//      - out_vector    : Pointer to the result vector (can be the same as
+//                        |in_vector|)
+//
+
+//
+// WebRtcSpl_VectorBitShiftW32ToW16(...)
+//
+// Bit shifts all the values in a int32_t vector up or downwards and
+// stores the result as an int16_t vector. The function will saturate the
+// signal if needed, before storing in the output vector.
+//
+// Input:
+//      - vector_length : Length of vector
+//      - in_vector     : Pointer to the vector that should be bit shifted
+//      - right_shifts  : Number of right bit shifts (negative value gives left
+//                        shifts)
+//
+// Output:
+//      - out_vector    : Pointer to the result vector (can be the same as
+//                        |in_vector|)
+//
+
+//
+// WebRtcSpl_ScaleVector(...)
+//
+// Performs the vector operation:
+//  out_vector[k] = (gain*in_vector[k])>>right_shifts
+//
+// Input:
+//      - in_vector     : Input vector
+//      - gain          : Scaling gain
+//      - vector_length : Elements in the |in_vector|
+//      - right_shifts  : Number of right bit shifts applied
+//
+// Output:
+//      - out_vector    : Output vector (can be the same as |in_vector|)
+//
+
+//
+// WebRtcSpl_ScaleVectorWithSat(...)
+//
+// Performs the vector operation:
+//  out_vector[k] = SATURATE( (gain*in_vector[k])>>right_shifts )
+//
+// Input:
+//      - in_vector     : Input vector
+//      - gain          : Scaling gain
+//      - vector_length : Elements in the |in_vector|
+//      - right_shifts  : Number of right bit shifts applied
+//
+// Output:
+//      - out_vector    : Output vector (can be the same as |in_vector|)
+//
+
+//
+// WebRtcSpl_ScaleAndAddVectors(...)
+//
+// Performs the vector operation:
+//  out_vector[k] = (gain1*in_vector1[k])>>right_shifts1
+//                  + (gain2*in_vector2[k])>>right_shifts2
+//
+// Input:
+//      - in_vector1    : Input vector 1
+//      - gain1         : Gain to be used for vector 1
+//      - right_shifts1 : Right bit shift to be used for vector 1
+//      - in_vector2    : Input vector 2
+//      - gain2         : Gain to be used for vector 2
+//      - right_shifts2 : Right bit shift to be used for vector 2
+//      - vector_length : Elements in the input vectors
+//
+// Output:
+//      - out_vector    : Output vector
+//
+
+//
+// WebRtcSpl_ReverseOrderMultArrayElements(...)
+//
+// Performs the vector operation:
+//  out_vector[n] = (in_vector[n]*window[-n])>>right_shifts
+//
+// Input:
+//      - in_vector     : Input vector
+//      - window        : Window vector (should be reversed). The pointer
+//                        should be set to the last value in the vector
+//      - right_shifts  : Number of right bit shift to be applied after the
+//                        multiplication
+//      - vector_length : Number of elements in |in_vector|
+//
+// Output:
+//      - out_vector    : Output vector (can be same as |in_vector|)
+//
+
+//
+// WebRtcSpl_ElementwiseVectorMult(...)
+//
+// Performs the vector operation:
+//  out_vector[n] = (in_vector[n]*window[n])>>right_shifts
+//
+// Input:
+//      - in_vector     : Input vector
+//      - window        : Window vector.
+//      - right_shifts  : Number of right bit shift to be applied after the
+//                        multiplication
+//      - vector_length : Number of elements in |in_vector|
+//
+// Output:
+//      - out_vector    : Output vector (can be same as |in_vector|)
+//
+
+//
+// WebRtcSpl_AddVectorsAndShift(...)
+//
+// Performs the vector operation:
+//  out_vector[k] = (in_vector1[k] + in_vector2[k])>>right_shifts
+//
+// Input:
+//      - in_vector1    : Input vector 1
+//      - in_vector2    : Input vector 2
+//      - right_shifts  : Number of right bit shift to be applied after the
+//                        multiplication
+//      - vector_length : Number of elements in |in_vector1| and |in_vector2|
+//
+// Output:
+//      - out_vector    : Output vector (can be same as |in_vector1|)
+//
+
+//
+// WebRtcSpl_AddAffineVectorToVector(...)
+//
+// Adds an affine transformed vector to another vector |out_vector|, i.e,
+// performs
+//  out_vector[k] += (in_vector[k]*gain+add_constant)>>right_shifts
+//
+// Input:
+//      - in_vector     : Input vector
+//      - gain          : Gain value, used to multiply the in vector with
+//      - add_constant  : Constant value to add (usually 1<<(right_shifts-1),
+//                        but others can be used as well
+//      - right_shifts  : Number of right bit shifts (0-16)
+//      - vector_length : Number of samples in |in_vector| and |out_vector|
+//
+// Output:
+//      - out_vector    : Vector with the output
+//
+
+//
+// WebRtcSpl_AffineTransformVector(...)
+//
+// Affine transforms a vector, i.e, performs
+//  out_vector[k] = (in_vector[k]*gain+add_constant)>>right_shifts
+//
+// Input:
+//      - in_vector     : Input vector
+//      - gain          : Gain value, used to multiply the in vector with
+//      - add_constant  : Constant value to add (usually 1<<(right_shifts-1),
+//                        but others can be used as well
+//      - right_shifts  : Number of right bit shifts (0-16)
+//      - vector_length : Number of samples in |in_vector| and |out_vector|
+//
+// Output:
+//      - out_vector    : Vector with the output
+//
+
+//
+// WebRtcSpl_IncreaseSeed(...)
+//
+// Increases the seed (and returns the new value)
+//
+// Input:
+//      - seed      : Seed for random calculation
+//
+// Output:
+//      - seed      : Updated seed value
+//
+// Return value     : The new seed value
+//
+
+//
+// WebRtcSpl_RandU(...)
+//
+// Produces a uniformly distributed value in the int16_t range
+//
+// Input:
+//      - seed      : Seed for random calculation
+//
+// Output:
+//      - seed      : Updated seed value
+//
+// Return value     : Uniformly distributed value in the range
+//                    [Word16_MIN...Word16_MAX]
+//
+
+//
+// WebRtcSpl_RandN(...)
+//
+// Produces a normal distributed value in the int16_t range
+//
+// Input:
+//      - seed      : Seed for random calculation
+//
+// Output:
+//      - seed      : Updated seed value
+//
+// Return value     : N(0,1) value in the Q13 domain
+//
+
+//
+// WebRtcSpl_RandUArray(...)
+//
+// Produces a uniformly distributed vector with elements in the int16_t
+// range
+//
+// Input:
+//      - vector_length : Samples wanted in the vector
+//      - seed          : Seed for random calculation
+//
+// Output:
+//      - vector        : Vector with the uniform values
+//      - seed          : Updated seed value
+//
+// Return value         : Number of samples in vector, i.e., |vector_length|
+//
+
+//
+// WebRtcSpl_Sqrt(...)
+//
+// Returns the square root of the input value |value|. The precision of this
+// function is integer precision, i.e., sqrt(8) gives 2 as answer.
+// If |value| is a negative number then 0 is returned.
+//
+// Algorithm:
+//
+// A sixth order Taylor Series expansion is used here to compute the square
+// root of a number y^0.5 = (1+x)^0.5
+// where
+// x = y-1
+//   = 1+(x/2)-0.5*((x/2)^2+0.5*((x/2)^3-0.625*((x/2)^4+0.875*((x/2)^5)
+// 0.5 <= x < 1
+//
+// Input:
+//      - value     : Value to calculate sqrt of
+//
+// Return value     : Result of the sqrt calculation
+//
+
+//
+// WebRtcSpl_SqrtFloor(...)
+//
+// Returns the square root of the input value |value|. The precision of this
+// function is rounding down integer precision, i.e., sqrt(8) gives 2 as answer.
+// If |value| is a negative number then 0 is returned.
+//
+// Algorithm:
+//
+// An iterative 4 cylce/bit routine
+//
+// Input:
+//      - value     : Value to calculate sqrt of
+//
+// Return value     : Result of the sqrt calculation
+//
+
+//
+// WebRtcSpl_DivU32U16(...)
+//
+// Divides a uint32_t |num| by a uint16_t |den|.
+//
+// If |den|==0, (uint32_t)0xFFFFFFFF is returned.
+//
+// Input:
+//      - num       : Numerator
+//      - den       : Denominator
+//
+// Return value     : Result of the division (as a uint32_t), i.e., the
+//                    integer part of num/den.
+//
+
+//
+// WebRtcSpl_DivW32W16(...)
+//
+// Divides a int32_t |num| by a int16_t |den|.
+//
+// If |den|==0, (int32_t)0x7FFFFFFF is returned.
+//
+// Input:
+//      - num       : Numerator
+//      - den       : Denominator
+//
+// Return value     : Result of the division (as a int32_t), i.e., the
+//                    integer part of num/den.
+//
+
+//
+// WebRtcSpl_DivW32W16ResW16(...)
+//
+// Divides a int32_t |num| by a int16_t |den|, assuming that the
+// result is less than 32768, otherwise an unpredictable result will occur.
+//
+// If |den|==0, (int16_t)0x7FFF is returned.
+//
+// Input:
+//      - num       : Numerator
+//      - den       : Denominator
+//
+// Return value     : Result of the division (as a int16_t), i.e., the
+//                    integer part of num/den.
+//
+
+//
+// WebRtcSpl_DivResultInQ31(...)
+//
+// Divides a int32_t |num| by a int16_t |den|, assuming that the
+// absolute value of the denominator is larger than the numerator, otherwise
+// an unpredictable result will occur.
+//
+// Input:
+//      - num       : Numerator
+//      - den       : Denominator
+//
+// Return value     : Result of the division in Q31.
+//
+
+//
+// WebRtcSpl_DivW32HiLow(...)
+//
+// Divides a int32_t |num| by a denominator in hi, low format. The
+// absolute value of the denominator has to be larger (or equal to) the
+// numerator.
+//
+// Input:
+//      - num       : Numerator
+//      - den_hi    : High part of denominator
+//      - den_low   : Low part of denominator
+//
+// Return value     : Divided value in Q31
+//
+
+//
+// WebRtcSpl_Energy(...)
+//
+// Calculates the energy of a vector
+//
+// Input:
+//      - vector        : Vector which the energy should be calculated on
+//      - vector_length : Number of samples in vector
+//
+// Output:
+//      - scale_factor  : Number of left bit shifts needed to get the physical
+//                        energy value, i.e, to get the Q0 value
+//
+// Return value         : Energy value in Q(-|scale_factor|)
+//
+
+//
+// WebRtcSpl_FilterAR(...)
+//
+// Performs a 32-bit AR filtering on a vector in Q12
+//
+// Input:
+//  - ar_coef                   : AR-coefficient vector (values in Q12),
+//                                ar_coef[0] must be 4096.
+//  - ar_coef_length            : Number of coefficients in |ar_coef|.
+//  - in_vector                 : Vector to be filtered.
+//  - in_vector_length          : Number of samples in |in_vector|.
+//  - filter_state              : Current state (higher part) of the filter.
+//  - filter_state_length       : Length (in samples) of |filter_state|.
+//  - filter_state_low          : Current state (lower part) of the filter.
+//  - filter_state_low_length   : Length (in samples) of |filter_state_low|.
+//  - out_vector_low_length     : Maximum length (in samples) of
+//                                |out_vector_low|.
+//
+// Output:
+//  - filter_state              : Updated state (upper part) vector.
+//  - filter_state_low          : Updated state (lower part) vector.
+//  - out_vector                : Vector containing the upper part of the
+//                                filtered values.
+//  - out_vector_low            : Vector containing the lower part of the
+//                                filtered values.
+//
+// Return value                 : Number of samples in the |out_vector|.
+//
+
+//
+// WebRtcSpl_ComplexIFFT(...)
+//
+// Complex Inverse FFT
+//
+// Computes an inverse complex 2^|stages|-point FFT on the input vector, which
+// is in bit-reversed order. The original content of the vector is destroyed in
+// the process, since the input is overwritten by the output, normal-ordered,
+// FFT vector. With X as the input complex vector, y as the output complex
+// vector and with M = 2^|stages|, the following is computed:
+//
+//        M-1
+// y(k) = sum[X(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]]
+//        i=0
+//
+// The implementations are optimized for speed, not for code size. It uses the
+// decimation-in-time algorithm with radix-2 butterfly technique.
+//
+// Input:
+//      - vector    : In pointer to complex vector containing 2^|stages|
+//                    real elements interleaved with 2^|stages| imaginary
+//                    elements.
+//                    [ReImReImReIm....]
+//                    The elements are in Q(-scale) domain, see more on Return
+//                    Value below.
+//
+//      - stages    : Number of FFT stages. Must be at least 3 and at most 10,
+//                    since the table WebRtcSpl_kSinTable1024[] is 1024
+//                    elements long.
+//
+//      - mode      : This parameter gives the user to choose how the FFT
+//                    should work.
+//                    mode==0: Low-complexity and Low-accuracy mode
+//                    mode==1: High-complexity and High-accuracy mode
+//
+// Output:
+//      - vector    : Out pointer to the FFT vector (the same as input).
+//
+// Return Value     : The scale value that tells the number of left bit shifts
+//                    that the elements in the |vector| should be shifted with
+//                    in order to get Q0 values, i.e. the physically correct
+//                    values. The scale parameter is always 0 or positive,
+//                    except if N>1024 (|stages|>10), which returns a scale
+//                    value of -1, indicating error.
+//
+
+//
+// WebRtcSpl_ComplexFFT(...)
+//
+// Complex FFT
+//
+// Computes a complex 2^|stages|-point FFT on the input vector, which is in
+// bit-reversed order. The original content of the vector is destroyed in
+// the process, since the input is overwritten by the output, normal-ordered,
+// FFT vector. With x as the input complex vector, Y as the output complex
+// vector and with M = 2^|stages|, the following is computed:
+//
+//              M-1
+// Y(k) = 1/M * sum[x(i)*[cos(2*pi*i*k/M) + j*sin(2*pi*i*k/M)]]
+//              i=0
+//
+// The implementations are optimized for speed, not for code size. It uses the
+// decimation-in-time algorithm with radix-2 butterfly technique.
+//
+// This routine prevents overflow by scaling by 2 before each FFT stage. This is
+// a fixed scaling, for proper normalization - there will be log2(n) passes, so
+// this results in an overall factor of 1/n, distributed to maximize arithmetic
+// accuracy.
+//
+// Input:
+//      - vector    : In pointer to complex vector containing 2^|stages| real
+//                    elements interleaved with 2^|stages| imaginary elements.
+//                    [ReImReImReIm....]
+//                    The output is in the Q0 domain.
+//
+//      - stages    : Number of FFT stages. Must be at least 3 and at most 10,
+//                    since the table WebRtcSpl_kSinTable1024[] is 1024
+//                    elements long.
+//
+//      - mode      : This parameter gives the user to choose how the FFT
+//                    should work.
+//                    mode==0: Low-complexity and Low-accuracy mode
+//                    mode==1: High-complexity and High-accuracy mode
+//
+// Output:
+//      - vector    : The output FFT vector is in the Q0 domain.
+//
+// Return value     : The scale parameter is always 0, except if N>1024,
+//                    which returns a scale value of -1, indicating error.
+//
+
+//
+// WebRtcSpl_AnalysisQMF(...)
+//
+// Splits a 0-2*F Hz signal into two sub bands: 0-F Hz and F-2*F Hz. The
+// current version has F = 8000, therefore, a super-wideband audio signal is
+// split to lower-band 0-8 kHz and upper-band 8-16 kHz.
+//
+// Input:
+//      - in_data       : Wide band speech signal, 320 samples (10 ms)
+//
+// Input & Output:
+//      - filter_state1 : Filter state for first All-pass filter
+//      - filter_state2 : Filter state for second All-pass filter
+//
+// Output:
+//      - low_band      : Lower-band signal 0-8 kHz band, 160 samples (10 ms)
+//      - high_band     : Upper-band signal 8-16 kHz band (flipped in frequency
+//                        domain), 160 samples (10 ms)
+//
+
+//
+// WebRtcSpl_SynthesisQMF(...)
+//
+// Combines the two sub bands (0-F and F-2*F Hz) into a signal of 0-2*F
+// Hz, (current version has F = 8000 Hz). So the filter combines lower-band
+// (0-8 kHz) and upper-band (8-16 kHz) channels to obtain super-wideband 0-16
+// kHz audio.
+//
+// Input:
+//      - low_band      : The signal with the 0-8 kHz band, 160 samples (10 ms)
+//      - high_band     : The signal with the 8-16 kHz band, 160 samples (10 ms)
+//
+// Input & Output:
+//      - filter_state1 : Filter state for first All-pass filter
+//      - filter_state2 : Filter state for second All-pass filter
+//
+// Output:
+//      - out_data      : Super-wideband speech signal, 0-16 kHz
+//
+
+// int16_t WebRtcSpl_SatW32ToW16(...)
+//
+// This function saturates a 32-bit word into a 16-bit word.
+//
+// Input:
+//      - value32   : The value of a 32-bit word.
+//
+// Output:
+//      - out16     : the saturated 16-bit word.
+//
+
+// int32_t WebRtc_MulAccumW16(...)
+//
+// This function multiply a 16-bit word by a 16-bit word, and accumulate this
+// value to a 32-bit integer.
+//
+// Input:
+//      - a    : The value of the first 16-bit word.
+//      - b    : The value of the second 16-bit word.
+//      - c    : The value of an 32-bit integer.
+//
+// Return Value: The value of a * b + c.
+//
diff --git a/common_audio/signal_processing/include/spl_inl.h b/common_audio/signal_processing/include/spl_inl.h
new file mode 100644
index 0000000..ba3a113
--- /dev/null
+++ b/common_audio/signal_processing/include/spl_inl.h
@@ -0,0 +1,154 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// This header file includes the inline functions in
+// the fix point signal processing library.
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
+
+#include "rtc_base/compile_assert_c.h"
+
+extern const int8_t kWebRtcSpl_CountLeadingZeros32_Table[64];
+
+// Don't call this directly except in tests!
+static __inline int WebRtcSpl_CountLeadingZeros32_NotBuiltin(uint32_t n) {
+  // Normalize n by rounding up to the nearest number that is a sequence of 0
+  // bits followed by a sequence of 1 bits. This number has the same number of
+  // leading zeros as the original n. There are exactly 33 such values.
+  n |= n >> 1;
+  n |= n >> 2;
+  n |= n >> 4;
+  n |= n >> 8;
+  n |= n >> 16;
+
+  // Multiply the modified n with a constant selected (by exhaustive search)
+  // such that each of the 33 possible values of n give a product whose 6 most
+  // significant bits are unique. Then look up the answer in the table.
+  return kWebRtcSpl_CountLeadingZeros32_Table[(n * 0x8c0b2891) >> 26];
+}
+
+// Don't call this directly except in tests!
+static __inline int WebRtcSpl_CountLeadingZeros64_NotBuiltin(uint64_t n) {
+  const int leading_zeros = n >> 32 == 0 ? 32 : 0;
+  return leading_zeros + WebRtcSpl_CountLeadingZeros32_NotBuiltin(
+                             (uint32_t)(n >> (32 - leading_zeros)));
+}
+
+// Returns the number of leading zero bits in the argument.
+static __inline int WebRtcSpl_CountLeadingZeros32(uint32_t n) {
+#ifdef __GNUC__
+  RTC_COMPILE_ASSERT(sizeof(unsigned int) == sizeof(uint32_t));
+  return n == 0 ? 32 : __builtin_clz(n);
+#else
+  return WebRtcSpl_CountLeadingZeros32_NotBuiltin(n);
+#endif
+}
+
+// Returns the number of leading zero bits in the argument.
+static __inline int WebRtcSpl_CountLeadingZeros64(uint64_t n) {
+#ifdef __GNUC__
+  RTC_COMPILE_ASSERT(sizeof(unsigned long long) == sizeof(uint64_t));  // NOLINT
+  return n == 0 ? 64 : __builtin_clzll(n);
+#else
+  return WebRtcSpl_CountLeadingZeros64_NotBuiltin(n);
+#endif
+}
+
+#ifdef WEBRTC_ARCH_ARM_V7
+#include "common_audio/signal_processing/include/spl_inl_armv7.h"
+#else
+
+#if defined(MIPS32_LE)
+#include "common_audio/signal_processing/include/spl_inl_mips.h"
+#endif
+
+#if !defined(MIPS_DSP_R1_LE)
+static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
+  int16_t out16 = (int16_t) value32;
+
+  if (value32 > 32767)
+    out16 = 32767;
+  else if (value32 < -32768)
+    out16 = -32768;
+
+  return out16;
+}
+
+static __inline int32_t WebRtcSpl_AddSatW32(int32_t a, int32_t b) {
+  // Do the addition in unsigned numbers, since signed overflow is undefined
+  // behavior.
+  const int32_t sum = (int32_t)((uint32_t)a + (uint32_t)b);
+
+  // a + b can't overflow if a and b have different signs. If they have the
+  // same sign, a + b also has the same sign iff it didn't overflow.
+  if ((a < 0) == (b < 0) && (a < 0) != (sum < 0)) {
+    // The direction of the overflow is obvious from the sign of a + b.
+    return sum < 0 ? INT32_MAX : INT32_MIN;
+  }
+  return sum;
+}
+
+static __inline int32_t WebRtcSpl_SubSatW32(int32_t a, int32_t b) {
+  // Do the subtraction in unsigned numbers, since signed overflow is undefined
+  // behavior.
+  const int32_t diff = (int32_t)((uint32_t)a - (uint32_t)b);
+
+  // a - b can't overflow if a and b have the same sign. If they have different
+  // signs, a - b has the same sign as a iff it didn't overflow.
+  if ((a < 0) != (b < 0) && (a < 0) != (diff < 0)) {
+    // The direction of the overflow is obvious from the sign of a - b.
+    return diff < 0 ? INT32_MAX : INT32_MIN;
+  }
+  return diff;
+}
+
+static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
+  return WebRtcSpl_SatW32ToW16((int32_t) a + (int32_t) b);
+}
+
+static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
+  return WebRtcSpl_SatW32ToW16((int32_t) var1 - (int32_t) var2);
+}
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+
+#if !defined(MIPS32_LE)
+static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
+  return 32 - WebRtcSpl_CountLeadingZeros32(n);
+}
+
+// Return the number of steps a can be left-shifted without overflow,
+// or 0 if a == 0.
+static __inline int16_t WebRtcSpl_NormW32(int32_t a) {
+  return a == 0 ? 0 : WebRtcSpl_CountLeadingZeros32(a < 0 ? ~a : a) - 1;
+}
+
+// Return the number of steps a can be left-shifted without overflow,
+// or 0 if a == 0.
+static __inline int16_t WebRtcSpl_NormU32(uint32_t a) {
+  return a == 0 ? 0 : WebRtcSpl_CountLeadingZeros32(a);
+}
+
+// Return the number of steps a can be left-shifted without overflow,
+// or 0 if a == 0.
+static __inline int16_t WebRtcSpl_NormW16(int16_t a) {
+  const int32_t a32 = a;
+  return a == 0 ? 0 : WebRtcSpl_CountLeadingZeros32(a < 0 ? ~a32 : a32) - 17;
+}
+
+static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) {
+  return (a * b + c);
+}
+#endif  // #if !defined(MIPS32_LE)
+
+#endif  // WEBRTC_ARCH_ARM_V7
+
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_H_
diff --git a/common_audio/signal_processing/include/spl_inl_armv7.h b/common_audio/signal_processing/include/spl_inl_armv7.h
new file mode 100644
index 0000000..97179f9
--- /dev/null
+++ b/common_audio/signal_processing/include/spl_inl_armv7.h
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/* This header file includes the inline functions for ARM processors in
+ * the fix point signal processing library.
+ */
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
+
+/* TODO(kma): Replace some assembly code with GCC intrinsics
+ * (e.g. __builtin_clz).
+ */
+
+/* This function produces result that is not bit exact with that by the generic
+ * C version in some cases, although the former is at least as accurate as the
+ * later.
+ */
+static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a, int32_t b) {
+  int32_t tmp = 0;
+  __asm __volatile ("smulwb %0, %1, %2":"=r"(tmp):"r"(b), "r"(a));
+  return tmp;
+}
+
+static __inline int32_t WEBRTC_SPL_MUL_16_16(int16_t a, int16_t b) {
+  int32_t tmp = 0;
+  __asm __volatile ("smulbb %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
+  return tmp;
+}
+
+// TODO(kma): add unit test.
+static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) {
+  int32_t tmp = 0;
+  __asm __volatile ("smlabb %0, %1, %2, %3":"=r"(tmp):"r"(a), "r"(b), "r"(c));
+  return tmp;
+}
+
+static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
+  int32_t s_sum = 0;
+
+  __asm __volatile ("qadd16 %0, %1, %2":"=r"(s_sum):"r"(a), "r"(b));
+
+  return (int16_t) s_sum;
+}
+
+static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) {
+  int32_t l_sum = 0;
+
+  __asm __volatile ("qadd %0, %1, %2":"=r"(l_sum):"r"(l_var1), "r"(l_var2));
+
+  return l_sum;
+}
+
+static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) {
+  int32_t l_sub = 0;
+
+  __asm __volatile ("qsub %0, %1, %2":"=r"(l_sub):"r"(l_var1), "r"(l_var2));
+
+  return l_sub;
+}
+
+static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
+  int32_t s_sub = 0;
+
+  __asm __volatile ("qsub16 %0, %1, %2":"=r"(s_sub):"r"(var1), "r"(var2));
+
+  return (int16_t)s_sub;
+}
+
+static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
+  int32_t tmp = 0;
+
+  __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(n));
+
+  return (int16_t)(32 - tmp);
+}
+
+static __inline int16_t WebRtcSpl_NormW32(int32_t a) {
+  int32_t tmp = 0;
+
+  if (a == 0) {
+    return 0;
+  } else if (a < 0) {
+    a ^= 0xFFFFFFFF;
+  }
+
+  __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a));
+
+  return (int16_t)(tmp - 1);
+}
+
+static __inline int16_t WebRtcSpl_NormU32(uint32_t a) {
+  int tmp = 0;
+
+  if (a == 0) return 0;
+
+  __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a));
+
+  return (int16_t)tmp;
+}
+
+static __inline int16_t WebRtcSpl_NormW16(int16_t a) {
+  int32_t tmp = 0;
+  int32_t a_32 = a;
+
+  if (a_32 == 0) {
+    return 0;
+  } else if (a_32 < 0) {
+    a_32 ^= 0xFFFFFFFF;
+  }
+
+  __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a_32));
+
+  return (int16_t)(tmp - 17);
+}
+
+// TODO(kma): add unit test.
+static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
+  int32_t out = 0;
+
+  __asm __volatile ("ssat %0, #16, %1" : "=r"(out) : "r"(value32));
+
+  return (int16_t)out;
+}
+
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_ARMV7_H_
diff --git a/common_audio/signal_processing/include/spl_inl_mips.h b/common_audio/signal_processing/include/spl_inl_mips.h
new file mode 100644
index 0000000..5819d0f
--- /dev/null
+++ b/common_audio/signal_processing/include/spl_inl_mips.h
@@ -0,0 +1,213 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// This header file includes the inline functions in
+// the fix point signal processing library.
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_MIPS_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_MIPS_H_
+
+static __inline int32_t WEBRTC_SPL_MUL_16_16(int32_t a,
+                                             int32_t b) {
+  int32_t value32 = 0;
+  int32_t a1 = 0, b1 = 0;
+
+  __asm __volatile(
+#if defined(MIPS32_R2_LE)
+    "seh    %[a1],          %[a]                \n\t"
+    "seh    %[b1],          %[b]                \n\t"
+#else
+    "sll    %[a1],          %[a],         16    \n\t"
+    "sll    %[b1],          %[b],         16    \n\t"
+    "sra    %[a1],          %[a1],        16    \n\t"
+    "sra    %[b1],          %[b1],        16    \n\t"
+#endif
+    "mul    %[value32],     %[a1],  %[b1]       \n\t"
+    : [value32] "=r" (value32), [a1] "=&r" (a1), [b1] "=&r" (b1)
+    : [a] "r" (a), [b] "r" (b)
+    : "hi", "lo");
+  return value32;
+}
+
+static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a,
+                                                    int32_t b) {
+  int32_t value32 = 0, b1 = 0, b2 = 0;
+  int32_t a1 = 0;
+
+  __asm __volatile(
+#if defined(MIPS32_R2_LE)
+    "seh    %[a1],          %[a]                        \n\t"
+#else
+    "sll    %[a1],          %[a],           16          \n\t"
+    "sra    %[a1],          %[a1],          16          \n\t"
+#endif
+    "andi   %[b2],          %[b],           0xFFFF      \n\t"
+    "sra    %[b1],          %[b],           16          \n\t"
+    "sra    %[b2],          %[b2],          1           \n\t"
+    "mul    %[value32],     %[a1],          %[b1]       \n\t"
+    "mul    %[b2],          %[a1],          %[b2]       \n\t"
+    "addiu  %[b2],          %[b2],          0x4000      \n\t"
+    "sra    %[b2],          %[b2],          15          \n\t"
+    "addu   %[value32],     %[value32],     %[b2]       \n\t"
+    : [value32] "=&r" (value32), [b1] "=&r" (b1), [b2] "=&r" (b2),
+      [a1] "=&r" (a1)
+    : [a] "r" (a), [b] "r" (b)
+    : "hi", "lo");
+  return value32;
+}
+
+#if defined(MIPS_DSP_R1_LE)
+static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
+  __asm __volatile(
+    "shll_s.w   %[value32], %[value32], 16      \n\t"
+    "sra        %[value32], %[value32], 16      \n\t"
+    : [value32] "+r" (value32)
+    :);
+  int16_t out16 = (int16_t)value32;
+  return out16;
+}
+
+static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
+  int32_t value32 = 0;
+
+  __asm __volatile(
+    "addq_s.ph      %[value32],     %[a],   %[b]    \n\t"
+    : [value32] "=r" (value32)
+    : [a] "r" (a), [b] "r" (b) );
+  return (int16_t)value32;
+}
+
+static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) {
+  int32_t l_sum;
+
+  __asm __volatile(
+    "addq_s.w   %[l_sum],       %[l_var1],      %[l_var2]    \n\t"
+    : [l_sum] "=r" (l_sum)
+    : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) );
+
+  return l_sum;
+}
+
+static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
+  int32_t value32;
+
+  __asm __volatile(
+    "subq_s.ph  %[value32], %[var1],    %[var2]     \n\t"
+    : [value32] "=r" (value32)
+    : [var1] "r" (var1), [var2] "r" (var2) );
+
+  return (int16_t)value32;
+}
+
+static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) {
+  int32_t l_diff;
+
+  __asm __volatile(
+    "subq_s.w   %[l_diff],      %[l_var1],      %[l_var2]    \n\t"
+    : [l_diff] "=r" (l_diff)
+    : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2) );
+
+  return l_diff;
+}
+#endif
+
+static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
+  int bits = 0;
+  int i32 = 32;
+
+  __asm __volatile(
+    "clz    %[bits],    %[n]                    \n\t"
+    "subu   %[bits],    %[i32],     %[bits]     \n\t"
+    : [bits] "=&r" (bits)
+    : [n] "r" (n), [i32] "r" (i32) );
+
+  return (int16_t)bits;
+}
+
+static __inline int16_t WebRtcSpl_NormW32(int32_t a) {
+  int zeros = 0;
+
+  __asm __volatile(
+    ".set       push                                \n\t"
+    ".set       noreorder                           \n\t"
+    "bnez       %[a],       1f                      \n\t"
+    " sra       %[zeros],   %[a],       31          \n\t"
+    "b          2f                                  \n\t"
+    " move      %[zeros],   $zero                   \n\t"
+   "1:                                              \n\t"
+    "xor        %[zeros],   %[a],       %[zeros]    \n\t"
+    "clz        %[zeros],   %[zeros]                \n\t"
+    "addiu      %[zeros],   %[zeros],   -1          \n\t"
+   "2:                                              \n\t"
+    ".set       pop                                 \n\t"
+    : [zeros]"=&r"(zeros)
+    : [a] "r" (a) );
+
+  return (int16_t)zeros;
+}
+
+static __inline int16_t WebRtcSpl_NormU32(uint32_t a) {
+  int zeros = 0;
+
+  __asm __volatile(
+    "clz    %[zeros],   %[a]    \n\t"
+    : [zeros] "=r" (zeros)
+    : [a] "r" (a) );
+
+  return (int16_t)(zeros & 0x1f);
+}
+
+static __inline int16_t WebRtcSpl_NormW16(int16_t a) {
+  int zeros = 0;
+  int a0 = a << 16;
+
+  __asm __volatile(
+    ".set       push                                \n\t"
+    ".set       noreorder                           \n\t"
+    "bnez       %[a0],      1f                      \n\t"
+    " sra       %[zeros],   %[a0],      31          \n\t"
+    "b          2f                                  \n\t"
+    " move      %[zeros],   $zero                   \n\t"
+   "1:                                              \n\t"
+    "xor        %[zeros],   %[a0],      %[zeros]    \n\t"
+    "clz        %[zeros],   %[zeros]                \n\t"
+    "addiu      %[zeros],   %[zeros],   -1          \n\t"
+   "2:                                              \n\t"
+    ".set       pop                                 \n\t"
+    : [zeros]"=&r"(zeros)
+    : [a0] "r" (a0) );
+
+  return (int16_t)zeros;
+}
+
+static __inline int32_t WebRtc_MulAccumW16(int16_t a,
+                                           int16_t b,
+                                           int32_t c) {
+  int32_t res = 0, c1 = 0;
+  __asm __volatile(
+#if defined(MIPS32_R2_LE)
+    "seh    %[a],       %[a]            \n\t"
+    "seh    %[b],       %[b]            \n\t"
+#else
+    "sll    %[a],       %[a],   16      \n\t"
+    "sll    %[b],       %[b],   16      \n\t"
+    "sra    %[a],       %[a],   16      \n\t"
+    "sra    %[b],       %[b],   16      \n\t"
+#endif
+    "mul    %[res],     %[a],   %[b]    \n\t"
+    "addu   %[c1],      %[c],   %[res]  \n\t"
+    : [c1] "=r" (c1), [res] "=&r" (res)
+    : [a] "r" (a), [b] "r" (b), [c] "r" (c)
+    : "hi", "lo");
+  return (c1);
+}
+
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_SPL_INL_MIPS_H_
diff --git a/common_audio/signal_processing/levinson_durbin.c b/common_audio/signal_processing/levinson_durbin.c
new file mode 100644
index 0000000..2c5cbae
--- /dev/null
+++ b/common_audio/signal_processing/levinson_durbin.c
@@ -0,0 +1,249 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_LevinsonDurbin().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/sanitizer.h"
+
+#define SPL_LEVINSON_MAXORDER 20
+
+int16_t RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/5486
+WebRtcSpl_LevinsonDurbin(const int32_t* R, int16_t* A, int16_t* K,
+                         size_t order)
+{
+    size_t i, j;
+    // Auto-correlation coefficients in high precision
+    int16_t R_hi[SPL_LEVINSON_MAXORDER + 1], R_low[SPL_LEVINSON_MAXORDER + 1];
+    // LPC coefficients in high precision
+    int16_t A_hi[SPL_LEVINSON_MAXORDER + 1], A_low[SPL_LEVINSON_MAXORDER + 1];
+    // LPC coefficients for next iteration
+    int16_t A_upd_hi[SPL_LEVINSON_MAXORDER + 1], A_upd_low[SPL_LEVINSON_MAXORDER + 1];
+    // Reflection coefficient in high precision
+    int16_t K_hi, K_low;
+    // Prediction gain Alpha in high precision and with scale factor
+    int16_t Alpha_hi, Alpha_low, Alpha_exp;
+    int16_t tmp_hi, tmp_low;
+    int32_t temp1W32, temp2W32, temp3W32;
+    int16_t norm;
+
+    // Normalize the autocorrelation R[0]...R[order+1]
+
+    norm = WebRtcSpl_NormW32(R[0]);
+
+    for (i = 0; i <= order; ++i)
+    {
+        temp1W32 = R[i] * (1 << norm);
+        // UBSan: 12 * 268435456 cannot be represented in type 'int'
+
+        // Put R in hi and low format
+        R_hi[i] = (int16_t)(temp1W32 >> 16);
+        R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] * 65536)) >> 1);
+    }
+
+    // K = A[1] = -R[1] / R[0]
+
+    temp2W32 = R[1] * (1 << norm); // R[1] in Q31
+    temp3W32 = WEBRTC_SPL_ABS_W32(temp2W32); // abs R[1]
+    temp1W32 = WebRtcSpl_DivW32HiLow(temp3W32, R_hi[0], R_low[0]); // abs(R[1])/R[0] in Q31
+    // Put back the sign on R[1]
+    if (temp2W32 > 0)
+    {
+        temp1W32 = -temp1W32;
+    }
+
+    // Put K in hi and low format
+    K_hi = (int16_t)(temp1W32 >> 16);
+    K_low = (int16_t)((temp1W32 - ((int32_t)K_hi * 65536)) >> 1);
+
+    // Store first reflection coefficient
+    K[0] = K_hi;
+
+    temp1W32 >>= 4;  // A[1] in Q27.
+
+    // Put A[1] in hi and low format
+    A_hi[1] = (int16_t)(temp1W32 >> 16);
+    A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] * 65536)) >> 1);
+
+    // Alpha = R[0] * (1-K^2)
+
+    temp1W32 = ((K_hi * K_low >> 14) + K_hi * K_hi) * 2;  // = k^2 in Q31
+
+    temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); // Guard against <0
+    temp1W32 = (int32_t)0x7fffffffL - temp1W32; // temp1W32 = (1 - K[0]*K[0]) in Q31
+
+    // Store temp1W32 = 1 - K[0]*K[0] on hi and low format
+    tmp_hi = (int16_t)(temp1W32 >> 16);
+    tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+    // Calculate Alpha in Q31
+    temp1W32 = (R_hi[0] * tmp_hi + (R_hi[0] * tmp_low >> 15) +
+        (R_low[0] * tmp_hi >> 15)) << 1;
+
+    // Normalize Alpha and put it in hi and low format
+
+    Alpha_exp = WebRtcSpl_NormW32(temp1W32);
+    temp1W32 = WEBRTC_SPL_LSHIFT_W32(temp1W32, Alpha_exp);
+    Alpha_hi = (int16_t)(temp1W32 >> 16);
+    Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1);
+
+    // Perform the iterative calculations in the Levinson-Durbin algorithm
+
+    for (i = 2; i <= order; i++)
+    {
+        /*                    ----
+         temp1W32 =  R[i] + > R[j]*A[i-j]
+         /
+         ----
+         j=1..i-1
+         */
+
+        temp1W32 = 0;
+
+        for (j = 1; j < i; j++)
+        {
+          // temp1W32 is in Q31
+          temp1W32 += (R_hi[j] * A_hi[i - j] * 2) +
+              (((R_hi[j] * A_low[i - j] >> 15) +
+              (R_low[j] * A_hi[i - j] >> 15)) * 2);
+        }
+
+        temp1W32 = temp1W32 * 16;
+        temp1W32 += ((int32_t)R_hi[i] * 65536)
+                + WEBRTC_SPL_LSHIFT_W32((int32_t)R_low[i], 1);
+
+        // K = -temp1W32 / Alpha
+        temp2W32 = WEBRTC_SPL_ABS_W32(temp1W32); // abs(temp1W32)
+        temp3W32 = WebRtcSpl_DivW32HiLow(temp2W32, Alpha_hi, Alpha_low); // abs(temp1W32)/Alpha
+
+        // Put the sign of temp1W32 back again
+        if (temp1W32 > 0)
+        {
+            temp3W32 = -temp3W32;
+        }
+
+        // Use the Alpha shifts from earlier to de-normalize
+        norm = WebRtcSpl_NormW32(temp3W32);
+        if ((Alpha_exp <= norm) || (temp3W32 == 0))
+        {
+            temp3W32 = temp3W32 * (1 << Alpha_exp);
+        } else
+        {
+            if (temp3W32 > 0)
+            {
+                temp3W32 = (int32_t)0x7fffffffL;
+            } else
+            {
+                temp3W32 = (int32_t)0x80000000L;
+            }
+        }
+
+        // Put K on hi and low format
+        K_hi = (int16_t)(temp3W32 >> 16);
+        K_low = (int16_t)((temp3W32 - ((int32_t)K_hi * 65536)) >> 1);
+
+        // Store Reflection coefficient in Q15
+        K[i - 1] = K_hi;
+
+        // Test for unstable filter.
+        // If unstable return 0 and let the user decide what to do in that case
+
+        if ((int32_t)WEBRTC_SPL_ABS_W16(K_hi) > (int32_t)32750)
+        {
+            return 0; // Unstable filter
+        }
+
+        /*
+         Compute updated LPC coefficient: Anew[i]
+         Anew[j]= A[j] + K*A[i-j]   for j=1..i-1
+         Anew[i]= K
+         */
+
+        for (j = 1; j < i; j++)
+        {
+            // temp1W32 = A[j] in Q27
+            temp1W32 = (int32_t)A_hi[j] * 65536
+                    + WEBRTC_SPL_LSHIFT_W32((int32_t)A_low[j],1);
+
+            // temp1W32 += K*A[i-j] in Q27
+            temp1W32 += (K_hi * A_hi[i - j] + (K_hi * A_low[i - j] >> 15) +
+                (K_low * A_hi[i - j] >> 15)) * 2;
+
+            // Put Anew in hi and low format
+            A_upd_hi[j] = (int16_t)(temp1W32 >> 16);
+            A_upd_low[j] = (int16_t)(
+                (temp1W32 - ((int32_t)A_upd_hi[j] * 65536)) >> 1);
+        }
+
+        // temp3W32 = K in Q27 (Convert from Q31 to Q27)
+        temp3W32 >>= 4;
+
+        // Store Anew in hi and low format
+        A_upd_hi[i] = (int16_t)(temp3W32 >> 16);
+        A_upd_low[i] = (int16_t)(
+            (temp3W32 - ((int32_t)A_upd_hi[i] * 65536)) >> 1);
+
+        // Alpha = Alpha * (1-K^2)
+
+        temp1W32 = ((K_hi * K_low >> 14) + K_hi * K_hi) * 2;  // K*K in Q31
+
+        temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32); // Guard against <0
+        temp1W32 = (int32_t)0x7fffffffL - temp1W32; // 1 - K*K  in Q31
+
+        // Convert 1- K^2 in hi and low format
+        tmp_hi = (int16_t)(temp1W32 >> 16);
+        tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+        // Calculate Alpha = Alpha * (1-K^2) in Q31
+        temp1W32 = (Alpha_hi * tmp_hi + (Alpha_hi * tmp_low >> 15) +
+            (Alpha_low * tmp_hi >> 15)) << 1;
+
+        // Normalize Alpha and store it on hi and low format
+
+        norm = WebRtcSpl_NormW32(temp1W32);
+        temp1W32 = WEBRTC_SPL_LSHIFT_W32(temp1W32, norm);
+
+        Alpha_hi = (int16_t)(temp1W32 >> 16);
+        Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1);
+
+        // Update the total normalization of Alpha
+        Alpha_exp = Alpha_exp + norm;
+
+        // Update A[]
+
+        for (j = 1; j <= i; j++)
+        {
+            A_hi[j] = A_upd_hi[j];
+            A_low[j] = A_upd_low[j];
+        }
+    }
+
+    /*
+     Set A[0] to 1.0 and store the A[i] i=1...order in Q12
+     (Convert from Q27 and use rounding)
+     */
+
+    A[0] = 4096;
+
+    for (i = 1; i <= order; i++)
+    {
+        // temp1W32 in Q27
+        temp1W32 = (int32_t)A_hi[i] * 65536
+                + WEBRTC_SPL_LSHIFT_W32((int32_t)A_low[i], 1);
+        // Round and store upper word
+        A[i] = (int16_t)(((temp1W32 * 2) + 32768) >> 16);
+    }
+    return 1; // Stable filters
+}
diff --git a/common_audio/signal_processing/lpc_to_refl_coef.c b/common_audio/signal_processing/lpc_to_refl_coef.c
new file mode 100644
index 0000000..7a5e251
--- /dev/null
+++ b/common_audio/signal_processing/lpc_to_refl_coef.c
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_LpcToReflCoef().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#define SPL_LPC_TO_REFL_COEF_MAX_AR_MODEL_ORDER 50
+
+void WebRtcSpl_LpcToReflCoef(int16_t* a16, int use_order, int16_t* k16)
+{
+    int m, k;
+    int32_t tmp32[SPL_LPC_TO_REFL_COEF_MAX_AR_MODEL_ORDER];
+    int32_t tmp_inv_denom32;
+    int16_t tmp_inv_denom16;
+
+    k16[use_order - 1] = a16[use_order] << 3;  // Q12<<3 => Q15
+    for (m = use_order - 1; m > 0; m--)
+    {
+        // (1 - k^2) in Q30
+        tmp_inv_denom32 = 1073741823 - k16[m] * k16[m];
+        // (1 - k^2) in Q15
+        tmp_inv_denom16 = (int16_t)(tmp_inv_denom32 >> 15);
+
+        for (k = 1; k <= m; k++)
+        {
+            // tmp[k] = (a[k] - RC[m] * a[m-k+1]) / (1.0 - RC[m]*RC[m]);
+
+            // [Q12<<16 - (Q15*Q12)<<1] = [Q28 - Q28] = Q28
+            tmp32[k] = (a16[k] << 16) - (k16[m] * a16[m - k + 1] << 1);
+
+            tmp32[k] = WebRtcSpl_DivW32W16(tmp32[k], tmp_inv_denom16); //Q28/Q15 = Q13
+        }
+
+        for (k = 1; k < m; k++)
+        {
+            a16[k] = (int16_t)(tmp32[k] >> 1);  // Q13>>1 => Q12
+        }
+
+        tmp32[m] = WEBRTC_SPL_SAT(8191, tmp32[m], -8191);
+        k16[m - 1] = (int16_t)WEBRTC_SPL_LSHIFT_W32(tmp32[m], 2); //Q13<<2 => Q15
+    }
+    return;
+}
diff --git a/common_audio/signal_processing/min_max_operations.c b/common_audio/signal_processing/min_max_operations.c
new file mode 100644
index 0000000..d249a02
--- /dev/null
+++ b/common_audio/signal_processing/min_max_operations.c
@@ -0,0 +1,224 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains the implementation of functions
+ * WebRtcSpl_MaxAbsValueW16C()
+ * WebRtcSpl_MaxAbsValueW32C()
+ * WebRtcSpl_MaxValueW16C()
+ * WebRtcSpl_MaxValueW32C()
+ * WebRtcSpl_MinValueW16C()
+ * WebRtcSpl_MinValueW32C()
+ * WebRtcSpl_MaxAbsIndexW16()
+ * WebRtcSpl_MaxIndexW16()
+ * WebRtcSpl_MaxIndexW32()
+ * WebRtcSpl_MinIndexW16()
+ * WebRtcSpl_MinIndexW32()
+ *
+ */
+
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// TODO(bjorn/kma): Consolidate function pairs (e.g. combine
+//   WebRtcSpl_MaxAbsValueW16C and WebRtcSpl_MaxAbsIndexW16 into a single one.)
+// TODO(kma): Move the next six functions into min_max_operations_c.c.
+
+// Maximum absolute value of word16 vector. C version for generic platforms.
+int16_t WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, size_t length) {
+  size_t i = 0;
+  int absolute = 0, maximum = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    absolute = abs((int)vector[i]);
+
+    if (absolute > maximum) {
+      maximum = absolute;
+    }
+  }
+
+  // Guard the case for abs(-32768).
+  if (maximum > WEBRTC_SPL_WORD16_MAX) {
+    maximum = WEBRTC_SPL_WORD16_MAX;
+  }
+
+  return (int16_t)maximum;
+}
+
+// Maximum absolute value of word32 vector. C version for generic platforms.
+int32_t WebRtcSpl_MaxAbsValueW32C(const int32_t* vector, size_t length) {
+  // Use uint32_t for the local variables, to accommodate the return value
+  // of abs(0x80000000), which is 0x80000000.
+
+  uint32_t absolute = 0, maximum = 0;
+  size_t i = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    absolute = abs((int)vector[i]);
+    if (absolute > maximum) {
+      maximum = absolute;
+    }
+  }
+
+  maximum = WEBRTC_SPL_MIN(maximum, WEBRTC_SPL_WORD32_MAX);
+
+  return (int32_t)maximum;
+}
+
+// Maximum value of word16 vector. C version for generic platforms.
+int16_t WebRtcSpl_MaxValueW16C(const int16_t* vector, size_t length) {
+  int16_t maximum = WEBRTC_SPL_WORD16_MIN;
+  size_t i = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] > maximum)
+      maximum = vector[i];
+  }
+  return maximum;
+}
+
+// Maximum value of word32 vector. C version for generic platforms.
+int32_t WebRtcSpl_MaxValueW32C(const int32_t* vector, size_t length) {
+  int32_t maximum = WEBRTC_SPL_WORD32_MIN;
+  size_t i = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] > maximum)
+      maximum = vector[i];
+  }
+  return maximum;
+}
+
+// Minimum value of word16 vector. C version for generic platforms.
+int16_t WebRtcSpl_MinValueW16C(const int16_t* vector, size_t length) {
+  int16_t minimum = WEBRTC_SPL_WORD16_MAX;
+  size_t i = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] < minimum)
+      minimum = vector[i];
+  }
+  return minimum;
+}
+
+// Minimum value of word32 vector. C version for generic platforms.
+int32_t WebRtcSpl_MinValueW32C(const int32_t* vector, size_t length) {
+  int32_t minimum = WEBRTC_SPL_WORD32_MAX;
+  size_t i = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] < minimum)
+      minimum = vector[i];
+  }
+  return minimum;
+}
+
+// Index of maximum absolute value in a word16 vector.
+size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length) {
+  // Use type int for local variables, to accomodate the value of abs(-32768).
+
+  size_t i = 0, index = 0;
+  int absolute = 0, maximum = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    absolute = abs((int)vector[i]);
+
+    if (absolute > maximum) {
+      maximum = absolute;
+      index = i;
+    }
+  }
+
+  return index;
+}
+
+// Index of maximum value in a word16 vector.
+size_t WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length) {
+  size_t i = 0, index = 0;
+  int16_t maximum = WEBRTC_SPL_WORD16_MIN;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] > maximum) {
+      maximum = vector[i];
+      index = i;
+    }
+  }
+
+  return index;
+}
+
+// Index of maximum value in a word32 vector.
+size_t WebRtcSpl_MaxIndexW32(const int32_t* vector, size_t length) {
+  size_t i = 0, index = 0;
+  int32_t maximum = WEBRTC_SPL_WORD32_MIN;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] > maximum) {
+      maximum = vector[i];
+      index = i;
+    }
+  }
+
+  return index;
+}
+
+// Index of minimum value in a word16 vector.
+size_t WebRtcSpl_MinIndexW16(const int16_t* vector, size_t length) {
+  size_t i = 0, index = 0;
+  int16_t minimum = WEBRTC_SPL_WORD16_MAX;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] < minimum) {
+      minimum = vector[i];
+      index = i;
+    }
+  }
+
+  return index;
+}
+
+// Index of minimum value in a word32 vector.
+size_t WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length) {
+  size_t i = 0, index = 0;
+  int32_t minimum = WEBRTC_SPL_WORD32_MAX;
+
+  RTC_DCHECK_GT(length, 0);
+
+  for (i = 0; i < length; i++) {
+    if (vector[i] < minimum) {
+      minimum = vector[i];
+      index = i;
+    }
+  }
+
+  return index;
+}
diff --git a/common_audio/signal_processing/min_max_operations_mips.c b/common_audio/signal_processing/min_max_operations_mips.c
new file mode 100644
index 0000000..8a7fc65
--- /dev/null
+++ b/common_audio/signal_processing/min_max_operations_mips.c
@@ -0,0 +1,375 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains the implementation of function
+ * WebRtcSpl_MaxAbsValueW16()
+ *
+ * The description header can be found in signal_processing_library.h.
+ *
+ */
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// Maximum absolute value of word16 vector.
+int16_t WebRtcSpl_MaxAbsValueW16_mips(const int16_t* vector, size_t length) {
+  int32_t totMax = 0;
+  int32_t tmp32_0, tmp32_1, tmp32_2, tmp32_3;
+  size_t i, loop_size;
+
+  RTC_DCHECK_GT(length, 0);
+
+#if defined(MIPS_DSP_R1)
+  const int32_t* tmpvec32 = (int32_t*)vector;
+  loop_size = length >> 4;
+
+  for (i = 0; i < loop_size; i++) {
+    __asm__ volatile (
+      "lw         %[tmp32_0],     0(%[tmpvec32])              \n\t"
+      "lw         %[tmp32_1],     4(%[tmpvec32])              \n\t"
+      "lw         %[tmp32_2],     8(%[tmpvec32])              \n\t"
+      "lw         %[tmp32_3],     12(%[tmpvec32])             \n\t"
+
+      "absq_s.ph  %[tmp32_0],     %[tmp32_0]                  \n\t"
+      "absq_s.ph  %[tmp32_1],     %[tmp32_1]                  \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_0]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_0],     %[totMax]   \n\t"
+
+      "lw         %[tmp32_0],     16(%[tmpvec32])             \n\t"
+      "absq_s.ph  %[tmp32_2],     %[tmp32_2]                  \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_1]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_1],     %[totMax]   \n\t"
+
+      "lw         %[tmp32_1],     20(%[tmpvec32])             \n\t"
+      "absq_s.ph  %[tmp32_3],     %[tmp32_3]                  \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_2]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_2],     %[totMax]   \n\t"
+
+      "lw         %[tmp32_2],     24(%[tmpvec32])             \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_3]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_3],     %[totMax]   \n\t"
+
+      "lw         %[tmp32_3],     28(%[tmpvec32])             \n\t"
+      "absq_s.ph  %[tmp32_0],     %[tmp32_0]                  \n\t"
+      "absq_s.ph  %[tmp32_1],     %[tmp32_1]                  \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_0]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_0],     %[totMax]   \n\t"
+
+      "absq_s.ph  %[tmp32_2],     %[tmp32_2]                  \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_1]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_1],     %[totMax]   \n\t"
+      "absq_s.ph  %[tmp32_3],     %[tmp32_3]                  \n\t"
+      "cmp.lt.ph  %[totMax],      %[tmp32_2]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_2],     %[totMax]   \n\t"
+
+      "cmp.lt.ph  %[totMax],      %[tmp32_3]                  \n\t"
+      "pick.ph    %[totMax],      %[tmp32_3],     %[totMax]   \n\t"
+
+      "addiu      %[tmpvec32],    %[tmpvec32],    32          \n\t"
+      : [tmp32_0] "=&r" (tmp32_0), [tmp32_1] "=&r" (tmp32_1),
+        [tmp32_2] "=&r" (tmp32_2), [tmp32_3] "=&r" (tmp32_3),
+        [totMax] "+r" (totMax), [tmpvec32] "+r" (tmpvec32)
+      :
+      : "memory"
+    );
+  }
+  __asm__ volatile (
+    "rotr       %[tmp32_0],     %[totMax],      16          \n\t"
+    "cmp.lt.ph  %[totMax],      %[tmp32_0]                  \n\t"
+    "pick.ph    %[totMax],      %[tmp32_0],     %[totMax]   \n\t"
+    "packrl.ph  %[totMax],      $0,             %[totMax]   \n\t"
+    : [tmp32_0] "=&r" (tmp32_0), [totMax] "+r" (totMax)
+    :
+  );
+  loop_size = length & 0xf;
+  for (i = 0; i < loop_size; i++) {
+    __asm__ volatile (
+      "lh         %[tmp32_0],     0(%[tmpvec32])              \n\t"
+      "addiu      %[tmpvec32],    %[tmpvec32],     2          \n\t"
+      "absq_s.w   %[tmp32_0],     %[tmp32_0]                  \n\t"
+      "slt        %[tmp32_1],     %[totMax],       %[tmp32_0] \n\t"
+      "movn       %[totMax],      %[tmp32_0],      %[tmp32_1] \n\t"
+      : [tmp32_0] "=&r" (tmp32_0), [tmp32_1] "=&r" (tmp32_1),
+        [tmpvec32] "+r" (tmpvec32), [totMax] "+r" (totMax)
+      :
+      : "memory"
+    );
+  }
+#else  // #if defined(MIPS_DSP_R1)
+  int32_t v16MaxMax = WEBRTC_SPL_WORD16_MAX;
+  int32_t r, r1, r2, r3;
+  const int16_t* tmpvector = vector;
+  loop_size = length >> 4;
+  for (i = 0; i < loop_size; i++) {
+    __asm__ volatile (
+      "lh     %[tmp32_0],     0(%[tmpvector])                 \n\t"
+      "lh     %[tmp32_1],     2(%[tmpvector])                 \n\t"
+      "lh     %[tmp32_2],     4(%[tmpvector])                 \n\t"
+      "lh     %[tmp32_3],     6(%[tmpvector])                 \n\t"
+
+      "abs    %[tmp32_0],     %[tmp32_0]                      \n\t"
+      "abs    %[tmp32_1],     %[tmp32_1]                      \n\t"
+      "abs    %[tmp32_2],     %[tmp32_2]                      \n\t"
+      "abs    %[tmp32_3],     %[tmp32_3]                      \n\t"
+
+      "slt    %[r],           %[totMax],      %[tmp32_0]      \n\t"
+      "movn   %[totMax],      %[tmp32_0],     %[r]            \n\t"
+      "slt    %[r1],          %[totMax],      %[tmp32_1]      \n\t"
+      "movn   %[totMax],      %[tmp32_1],     %[r1]           \n\t"
+      "slt    %[r2],          %[totMax],      %[tmp32_2]      \n\t"
+      "movn   %[totMax],      %[tmp32_2],     %[r2]           \n\t"
+      "slt    %[r3],          %[totMax],      %[tmp32_3]      \n\t"
+      "movn   %[totMax],      %[tmp32_3],     %[r3]           \n\t"
+
+      "lh     %[tmp32_0],     8(%[tmpvector])                 \n\t"
+      "lh     %[tmp32_1],     10(%[tmpvector])                \n\t"
+      "lh     %[tmp32_2],     12(%[tmpvector])                \n\t"
+      "lh     %[tmp32_3],     14(%[tmpvector])                \n\t"
+
+      "abs    %[tmp32_0],     %[tmp32_0]                      \n\t"
+      "abs    %[tmp32_1],     %[tmp32_1]                      \n\t"
+      "abs    %[tmp32_2],     %[tmp32_2]                      \n\t"
+      "abs    %[tmp32_3],     %[tmp32_3]                      \n\t"
+
+      "slt    %[r],           %[totMax],      %[tmp32_0]      \n\t"
+      "movn   %[totMax],      %[tmp32_0],     %[r]            \n\t"
+      "slt    %[r1],          %[totMax],      %[tmp32_1]      \n\t"
+      "movn   %[totMax],      %[tmp32_1],     %[r1]           \n\t"
+      "slt    %[r2],          %[totMax],      %[tmp32_2]      \n\t"
+      "movn   %[totMax],      %[tmp32_2],     %[r2]           \n\t"
+      "slt    %[r3],          %[totMax],      %[tmp32_3]      \n\t"
+      "movn   %[totMax],      %[tmp32_3],     %[r3]           \n\t"
+
+      "lh     %[tmp32_0],     16(%[tmpvector])                \n\t"
+      "lh     %[tmp32_1],     18(%[tmpvector])                \n\t"
+      "lh     %[tmp32_2],     20(%[tmpvector])                \n\t"
+      "lh     %[tmp32_3],     22(%[tmpvector])                \n\t"
+
+      "abs    %[tmp32_0],     %[tmp32_0]                      \n\t"
+      "abs    %[tmp32_1],     %[tmp32_1]                      \n\t"
+      "abs    %[tmp32_2],     %[tmp32_2]                      \n\t"
+      "abs    %[tmp32_3],     %[tmp32_3]                      \n\t"
+
+      "slt    %[r],           %[totMax],      %[tmp32_0]      \n\t"
+      "movn   %[totMax],      %[tmp32_0],     %[r]            \n\t"
+      "slt    %[r1],          %[totMax],      %[tmp32_1]      \n\t"
+      "movn   %[totMax],      %[tmp32_1],     %[r1]           \n\t"
+      "slt    %[r2],          %[totMax],      %[tmp32_2]      \n\t"
+      "movn   %[totMax],      %[tmp32_2],     %[r2]           \n\t"
+      "slt    %[r3],          %[totMax],      %[tmp32_3]      \n\t"
+      "movn   %[totMax],      %[tmp32_3],     %[r3]           \n\t"
+
+      "lh     %[tmp32_0],     24(%[tmpvector])                \n\t"
+      "lh     %[tmp32_1],     26(%[tmpvector])                \n\t"
+      "lh     %[tmp32_2],     28(%[tmpvector])                \n\t"
+      "lh     %[tmp32_3],     30(%[tmpvector])                \n\t"
+
+      "abs    %[tmp32_0],     %[tmp32_0]                      \n\t"
+      "abs    %[tmp32_1],     %[tmp32_1]                      \n\t"
+      "abs    %[tmp32_2],     %[tmp32_2]                      \n\t"
+      "abs    %[tmp32_3],     %[tmp32_3]                      \n\t"
+
+      "slt    %[r],           %[totMax],      %[tmp32_0]      \n\t"
+      "movn   %[totMax],      %[tmp32_0],     %[r]            \n\t"
+      "slt    %[r1],          %[totMax],      %[tmp32_1]      \n\t"
+      "movn   %[totMax],      %[tmp32_1],     %[r1]           \n\t"
+      "slt    %[r2],          %[totMax],      %[tmp32_2]      \n\t"
+      "movn   %[totMax],      %[tmp32_2],     %[r2]           \n\t"
+      "slt    %[r3],          %[totMax],      %[tmp32_3]      \n\t"
+      "movn   %[totMax],      %[tmp32_3],     %[r3]           \n\t"
+
+      "addiu  %[tmpvector],   %[tmpvector],   32              \n\t"
+      : [tmp32_0] "=&r" (tmp32_0), [tmp32_1] "=&r" (tmp32_1),
+        [tmp32_2] "=&r" (tmp32_2), [tmp32_3] "=&r" (tmp32_3),
+        [totMax] "+r" (totMax), [r] "=&r" (r), [tmpvector] "+r" (tmpvector),
+        [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+      :
+      : "memory"
+    );
+  }
+  loop_size = length & 0xf;
+  for (i = 0; i < loop_size; i++) {
+    __asm__ volatile (
+      "lh         %[tmp32_0],     0(%[tmpvector])             \n\t"
+      "addiu      %[tmpvector],   %[tmpvector],    2          \n\t"
+      "abs        %[tmp32_0],     %[tmp32_0]                  \n\t"
+      "slt        %[tmp32_1],     %[totMax],       %[tmp32_0] \n\t"
+      "movn       %[totMax],      %[tmp32_0],      %[tmp32_1] \n\t"
+      : [tmp32_0] "=&r" (tmp32_0), [tmp32_1] "=&r" (tmp32_1),
+        [tmpvector] "+r" (tmpvector), [totMax] "+r" (totMax)
+      :
+      : "memory"
+    );
+  }
+
+  __asm__ volatile (
+    "slt    %[r],       %[v16MaxMax],   %[totMax]   \n\t"
+    "movn   %[totMax],  %[v16MaxMax],   %[r]        \n\t"
+    : [totMax] "+r" (totMax), [r] "=&r" (r)
+    : [v16MaxMax] "r" (v16MaxMax)
+  );
+#endif  // #if defined(MIPS_DSP_R1)
+  return (int16_t)totMax;
+}
+
+#if defined(MIPS_DSP_R1_LE)
+// Maximum absolute value of word32 vector. Version for MIPS platform.
+int32_t WebRtcSpl_MaxAbsValueW32_mips(const int32_t* vector, size_t length) {
+  // Use uint32_t for the local variables, to accommodate the return value
+  // of abs(0x80000000), which is 0x80000000.
+
+  uint32_t absolute = 0, maximum = 0;
+  int tmp1 = 0, max_value = 0x7fffffff;
+
+  RTC_DCHECK_GT(length, 0);
+
+  __asm__ volatile (
+    ".set push                                                        \n\t"
+    ".set noreorder                                                   \n\t"
+
+   "1:                                                                \n\t"
+    "lw         %[absolute],      0(%[vector])                        \n\t"
+    "absq_s.w   %[absolute],      %[absolute]                         \n\t"
+    "addiu      %[length],        %[length],          -1              \n\t"
+    "slt        %[tmp1],          %[maximum],         %[absolute]     \n\t"
+    "movn       %[maximum],       %[absolute],        %[tmp1]         \n\t"
+    "bgtz       %[length],        1b                                  \n\t"
+    " addiu     %[vector],        %[vector],          4               \n\t"
+    "slt        %[tmp1],          %[max_value],       %[maximum]      \n\t"
+    "movn       %[maximum],       %[max_value],       %[tmp1]         \n\t"
+
+    ".set pop                                                         \n\t"
+
+    : [tmp1] "=&r" (tmp1), [maximum] "+r" (maximum), [absolute] "+r" (absolute)
+    : [vector] "r" (vector), [length] "r" (length), [max_value] "r" (max_value)
+    : "memory"
+  );
+
+  return (int32_t)maximum;
+}
+#endif  // #if defined(MIPS_DSP_R1_LE)
+
+// Maximum value of word16 vector. Version for MIPS platform.
+int16_t WebRtcSpl_MaxValueW16_mips(const int16_t* vector, size_t length) {
+  int16_t maximum = WEBRTC_SPL_WORD16_MIN;
+  int tmp1;
+  int16_t value;
+
+  RTC_DCHECK_GT(length, 0);
+
+  __asm__ volatile (
+    ".set push                                                        \n\t"
+    ".set noreorder                                                   \n\t"
+
+   "1:                                                                \n\t"
+    "lh         %[value],         0(%[vector])                        \n\t"
+    "addiu      %[length],        %[length],          -1              \n\t"
+    "slt        %[tmp1],          %[maximum],         %[value]        \n\t"
+    "movn       %[maximum],       %[value],           %[tmp1]         \n\t"
+    "bgtz       %[length],        1b                                  \n\t"
+    " addiu     %[vector],        %[vector],          2               \n\t"
+    ".set pop                                                         \n\t"
+
+    : [tmp1] "=&r" (tmp1), [maximum] "+r" (maximum), [value] "=&r" (value)
+    : [vector] "r" (vector), [length] "r" (length)
+    : "memory"
+  );
+
+  return maximum;
+}
+
+// Maximum value of word32 vector. Version for MIPS platform.
+int32_t WebRtcSpl_MaxValueW32_mips(const int32_t* vector, size_t length) {
+  int32_t maximum = WEBRTC_SPL_WORD32_MIN;
+  int tmp1, value;
+
+  RTC_DCHECK_GT(length, 0);
+
+  __asm__ volatile (
+    ".set push                                                        \n\t"
+    ".set noreorder                                                   \n\t"
+
+   "1:                                                                \n\t"
+    "lw         %[value],         0(%[vector])                        \n\t"
+    "addiu      %[length],        %[length],          -1              \n\t"
+    "slt        %[tmp1],          %[maximum],         %[value]        \n\t"
+    "movn       %[maximum],       %[value],           %[tmp1]         \n\t"
+    "bgtz       %[length],        1b                                  \n\t"
+    " addiu     %[vector],        %[vector],          4               \n\t"
+
+    ".set pop                                                         \n\t"
+
+    : [tmp1] "=&r" (tmp1), [maximum] "+r" (maximum), [value] "=&r" (value)
+    : [vector] "r" (vector), [length] "r" (length)
+    : "memory"
+  );
+
+  return maximum;
+}
+
+// Minimum value of word16 vector. Version for MIPS platform.
+int16_t WebRtcSpl_MinValueW16_mips(const int16_t* vector, size_t length) {
+  int16_t minimum = WEBRTC_SPL_WORD16_MAX;
+  int tmp1;
+  int16_t value;
+
+  RTC_DCHECK_GT(length, 0);
+
+  __asm__ volatile (
+    ".set push                                                        \n\t"
+    ".set noreorder                                                   \n\t"
+
+   "1:                                                                \n\t"
+    "lh         %[value],         0(%[vector])                        \n\t"
+    "addiu      %[length],        %[length],          -1              \n\t"
+    "slt        %[tmp1],          %[value],           %[minimum]      \n\t"
+    "movn       %[minimum],       %[value],           %[tmp1]         \n\t"
+    "bgtz       %[length],        1b                                  \n\t"
+    " addiu     %[vector],        %[vector],          2               \n\t"
+
+    ".set pop                                                         \n\t"
+
+    : [tmp1] "=&r" (tmp1), [minimum] "+r" (minimum), [value] "=&r" (value)
+    : [vector] "r" (vector), [length] "r" (length)
+    : "memory"
+  );
+
+  return minimum;
+}
+
+// Minimum value of word32 vector. Version for MIPS platform.
+int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length) {
+  int32_t minimum = WEBRTC_SPL_WORD32_MAX;
+  int tmp1, value;
+
+  RTC_DCHECK_GT(length, 0);
+
+  __asm__ volatile (
+    ".set push                                                        \n\t"
+    ".set noreorder                                                   \n\t"
+
+   "1:                                                                \n\t"
+    "lw         %[value],         0(%[vector])                        \n\t"
+    "addiu      %[length],        %[length],          -1              \n\t"
+    "slt        %[tmp1],          %[value],           %[minimum]      \n\t"
+    "movn       %[minimum],       %[value],           %[tmp1]         \n\t"
+    "bgtz       %[length],        1b                                  \n\t"
+    " addiu     %[vector],        %[vector],          4               \n\t"
+
+    ".set pop                                                         \n\t"
+
+    : [tmp1] "=&r" (tmp1), [minimum] "+r" (minimum), [value] "=&r" (value)
+    : [vector] "r" (vector), [length] "r" (length)
+    : "memory"
+  );
+
+  return minimum;
+}
diff --git a/common_audio/signal_processing/min_max_operations_neon.c b/common_audio/signal_processing/min_max_operations_neon.c
new file mode 100644
index 0000000..53217df
--- /dev/null
+++ b/common_audio/signal_processing/min_max_operations_neon.c
@@ -0,0 +1,283 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// Maximum absolute value of word16 vector. C version for generic platforms.
+int16_t WebRtcSpl_MaxAbsValueW16Neon(const int16_t* vector, size_t length) {
+  int absolute = 0, maximum = 0;
+
+  RTC_DCHECK_GT(length, 0);
+
+  const int16_t* p_start = vector;
+  size_t rest = length & 7;
+  const int16_t* p_end = vector + length - rest;
+
+  int16x8_t v;
+  uint16x8_t max_qv;
+  max_qv = vdupq_n_u16(0);
+
+  while (p_start < p_end) {
+    v = vld1q_s16(p_start);
+    // Note vabs doesn't change the value of -32768.
+    v = vabsq_s16(v);
+    // Use u16 so we don't lose the value -32768.
+    max_qv = vmaxq_u16(max_qv, vreinterpretq_u16_s16(v));
+    p_start += 8;
+  }
+
+#ifdef WEBRTC_ARCH_ARM64
+  maximum = (int)vmaxvq_u16(max_qv);
+#else
+  uint16x4_t max_dv;
+  max_dv = vmax_u16(vget_low_u16(max_qv), vget_high_u16(max_qv));
+  max_dv = vpmax_u16(max_dv, max_dv);
+  max_dv = vpmax_u16(max_dv, max_dv);
+
+  maximum = (int)vget_lane_u16(max_dv, 0);
+#endif
+
+  p_end = vector + length;
+  while (p_start < p_end) {
+    absolute = abs((int)(*p_start));
+
+    if (absolute > maximum) {
+      maximum = absolute;
+    }
+    p_start++;
+  }
+
+  // Guard the case for abs(-32768).
+  if (maximum > WEBRTC_SPL_WORD16_MAX) {
+    maximum = WEBRTC_SPL_WORD16_MAX;
+  }
+
+  return (int16_t)maximum;
+}
+
+// Maximum absolute value of word32 vector. NEON intrinsics version for
+// ARM 32-bit/64-bit platforms.
+int32_t WebRtcSpl_MaxAbsValueW32Neon(const int32_t* vector, size_t length) {
+  // Use uint32_t for the local variables, to accommodate the return value
+  // of abs(0x80000000), which is 0x80000000.
+
+  uint32_t absolute = 0, maximum = 0;
+  size_t i = 0;
+  size_t residual = length & 0x7;
+
+  RTC_DCHECK_GT(length, 0);
+
+  const int32_t* p_start = vector;
+  uint32x4_t max32x4_0 = vdupq_n_u32(0);
+  uint32x4_t max32x4_1 = vdupq_n_u32(0);
+
+  // First part, unroll the loop 8 times.
+  for (i = 0; i < length - residual; i += 8) {
+    int32x4_t in32x4_0 = vld1q_s32(p_start);
+    p_start += 4;
+    int32x4_t in32x4_1 = vld1q_s32(p_start);
+    p_start += 4;
+    in32x4_0 = vabsq_s32(in32x4_0);
+    in32x4_1 = vabsq_s32(in32x4_1);
+    // vabs doesn't change the value of 0x80000000.
+    // Use u32 so we don't lose the value 0x80000000.
+    max32x4_0 = vmaxq_u32(max32x4_0, vreinterpretq_u32_s32(in32x4_0));
+    max32x4_1 = vmaxq_u32(max32x4_1, vreinterpretq_u32_s32(in32x4_1));
+  }
+
+  uint32x4_t max32x4 = vmaxq_u32(max32x4_0, max32x4_1);
+#if defined(WEBRTC_ARCH_ARM64)
+  maximum = vmaxvq_u32(max32x4);
+#else
+  uint32x2_t max32x2 = vmax_u32(vget_low_u32(max32x4), vget_high_u32(max32x4));
+  max32x2 = vpmax_u32(max32x2, max32x2);
+
+  maximum = vget_lane_u32(max32x2, 0);
+#endif
+
+  // Second part, do the remaining iterations (if any).
+  for (i = residual; i > 0; i--) {
+    absolute = abs((int)(*p_start));
+    if (absolute > maximum) {
+      maximum = absolute;
+    }
+    p_start++;
+  }
+
+  // Guard against the case for 0x80000000.
+  maximum = WEBRTC_SPL_MIN(maximum, WEBRTC_SPL_WORD32_MAX);
+
+  return (int32_t)maximum;
+}
+
+// Maximum value of word16 vector. NEON intrinsics version for
+// ARM 32-bit/64-bit platforms.
+int16_t WebRtcSpl_MaxValueW16Neon(const int16_t* vector, size_t length) {
+  int16_t maximum = WEBRTC_SPL_WORD16_MIN;
+  size_t i = 0;
+  size_t residual = length & 0x7;
+
+  RTC_DCHECK_GT(length, 0);
+
+  const int16_t* p_start = vector;
+  int16x8_t max16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MIN);
+
+  // First part, unroll the loop 8 times.
+  for (i = 0; i < length - residual; i += 8) {
+    int16x8_t in16x8 = vld1q_s16(p_start);
+    max16x8 = vmaxq_s16(max16x8, in16x8);
+    p_start += 8;
+  }
+
+#if defined(WEBRTC_ARCH_ARM64)
+  maximum = vmaxvq_s16(max16x8);
+#else
+  int16x4_t max16x4 = vmax_s16(vget_low_s16(max16x8), vget_high_s16(max16x8));
+  max16x4 = vpmax_s16(max16x4, max16x4);
+  max16x4 = vpmax_s16(max16x4, max16x4);
+
+  maximum = vget_lane_s16(max16x4, 0);
+#endif
+
+  // Second part, do the remaining iterations (if any).
+  for (i = residual; i > 0; i--) {
+    if (*p_start > maximum)
+      maximum = *p_start;
+    p_start++;
+  }
+  return maximum;
+}
+
+// Maximum value of word32 vector. NEON intrinsics version for
+// ARM 32-bit/64-bit platforms.
+int32_t WebRtcSpl_MaxValueW32Neon(const int32_t* vector, size_t length) {
+  int32_t maximum = WEBRTC_SPL_WORD32_MIN;
+  size_t i = 0;
+  size_t residual = length & 0x7;
+
+  RTC_DCHECK_GT(length, 0);
+
+  const int32_t* p_start = vector;
+  int32x4_t max32x4_0 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN);
+  int32x4_t max32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MIN);
+
+  // First part, unroll the loop 8 times.
+  for (i = 0; i < length - residual; i += 8) {
+    int32x4_t in32x4_0 = vld1q_s32(p_start);
+    p_start += 4;
+    int32x4_t in32x4_1 = vld1q_s32(p_start);
+    p_start += 4;
+    max32x4_0 = vmaxq_s32(max32x4_0, in32x4_0);
+    max32x4_1 = vmaxq_s32(max32x4_1, in32x4_1);
+  }
+
+  int32x4_t max32x4 = vmaxq_s32(max32x4_0, max32x4_1);
+#if defined(WEBRTC_ARCH_ARM64)
+  maximum = vmaxvq_s32(max32x4);
+#else
+  int32x2_t max32x2 = vmax_s32(vget_low_s32(max32x4), vget_high_s32(max32x4));
+  max32x2 = vpmax_s32(max32x2, max32x2);
+
+  maximum = vget_lane_s32(max32x2, 0);
+#endif
+
+  // Second part, do the remaining iterations (if any).
+  for (i = residual; i > 0; i--) {
+    if (*p_start > maximum)
+      maximum = *p_start;
+    p_start++;
+  }
+  return maximum;
+}
+
+// Minimum value of word16 vector. NEON intrinsics version for
+// ARM 32-bit/64-bit platforms.
+int16_t WebRtcSpl_MinValueW16Neon(const int16_t* vector, size_t length) {
+  int16_t minimum = WEBRTC_SPL_WORD16_MAX;
+  size_t i = 0;
+  size_t residual = length & 0x7;
+
+  RTC_DCHECK_GT(length, 0);
+
+  const int16_t* p_start = vector;
+  int16x8_t min16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MAX);
+
+  // First part, unroll the loop 8 times.
+  for (i = 0; i < length - residual; i += 8) {
+    int16x8_t in16x8 = vld1q_s16(p_start);
+    min16x8 = vminq_s16(min16x8, in16x8);
+    p_start += 8;
+  }
+
+#if defined(WEBRTC_ARCH_ARM64)
+  minimum = vminvq_s16(min16x8);
+#else
+  int16x4_t min16x4 = vmin_s16(vget_low_s16(min16x8), vget_high_s16(min16x8));
+  min16x4 = vpmin_s16(min16x4, min16x4);
+  min16x4 = vpmin_s16(min16x4, min16x4);
+
+  minimum = vget_lane_s16(min16x4, 0);
+#endif
+
+  // Second part, do the remaining iterations (if any).
+  for (i = residual; i > 0; i--) {
+    if (*p_start < minimum)
+      minimum = *p_start;
+    p_start++;
+  }
+  return minimum;
+}
+
+// Minimum value of word32 vector. NEON intrinsics version for
+// ARM 32-bit/64-bit platforms.
+int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length) {
+  int32_t minimum = WEBRTC_SPL_WORD32_MAX;
+  size_t i = 0;
+  size_t residual = length & 0x7;
+
+  RTC_DCHECK_GT(length, 0);
+
+  const int32_t* p_start = vector;
+  int32x4_t min32x4_0 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX);
+  int32x4_t min32x4_1 = vdupq_n_s32(WEBRTC_SPL_WORD32_MAX);
+
+  // First part, unroll the loop 8 times.
+  for (i = 0; i < length - residual; i += 8) {
+    int32x4_t in32x4_0 = vld1q_s32(p_start);
+    p_start += 4;
+    int32x4_t in32x4_1 = vld1q_s32(p_start);
+    p_start += 4;
+    min32x4_0 = vminq_s32(min32x4_0, in32x4_0);
+    min32x4_1 = vminq_s32(min32x4_1, in32x4_1);
+  }
+
+  int32x4_t min32x4 = vminq_s32(min32x4_0, min32x4_1);
+#if defined(WEBRTC_ARCH_ARM64)
+  minimum = vminvq_s32(min32x4);
+#else
+  int32x2_t min32x2 = vmin_s32(vget_low_s32(min32x4), vget_high_s32(min32x4));
+  min32x2 = vpmin_s32(min32x2, min32x2);
+
+  minimum = vget_lane_s32(min32x2, 0);
+#endif
+
+  // Second part, do the remaining iterations (if any).
+  for (i = residual; i > 0; i--) {
+    if (*p_start < minimum)
+      minimum = *p_start;
+    p_start++;
+  }
+  return minimum;
+}
+
diff --git a/common_audio/signal_processing/randomization_functions.c b/common_audio/signal_processing/randomization_functions.c
new file mode 100644
index 0000000..a445c57
--- /dev/null
+++ b/common_audio/signal_processing/randomization_functions.c
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains implementations of the randomization functions
+ * WebRtcSpl_RandU()
+ * WebRtcSpl_RandN()
+ * WebRtcSpl_RandUArray()
+ *
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+static const uint32_t kMaxSeedUsed = 0x80000000;
+
+static const int16_t kRandNTable[] = {
+    9178,    -7260,       40,    10189,     4894,    -3531,   -13779,    14764,
+   -4008,    -8884,    -8990,     1008,     7368,     5184,     3251,    -5817,
+   -9786,     5963,     1770,     8066,    -7135,    10772,    -2298,     1361,
+    6484,     2241,    -8633,      792,      199,    -3344,     6553,   -10079,
+  -15040,       95,    11608,   -12469,    14161,    -4176,     2476,     6403,
+   13685,   -16005,     6646,     2239,    10916,    -3004,     -602,    -3141,
+    2142,    14144,    -5829,     5305,     8209,     4713,     2697,    -5112,
+   16092,    -1210,    -2891,    -6631,    -5360,   -11878,    -6781,    -2739,
+   -6392,      536,    10923,    10872,     5059,    -4748,    -7770,     5477,
+      38,    -1025,    -2892,     1638,     6304,    14375,   -11028,     1553,
+   -1565,    10762,     -393,     4040,     5257,    12310,     6554,    -4799,
+    4899,    -6354,     1603,    -1048,    -2220,     8247,     -186,    -8944,
+  -12004,     2332,     4801,    -4933,     6371,      131,     8614,    -5927,
+   -8287,   -22760,     4033,   -15162,     3385,     3246,     3153,    -5250,
+    3766,      784,     6494,      -62,     3531,    -1582,    15572,      662,
+   -3952,     -330,    -3196,      669,     7236,    -2678,    -6569,    23319,
+   -8645,     -741,    14830,   -15976,     4903,      315,   -11342,    10311,
+    1858,    -7777,     2145,     5436,     5677,     -113,   -10033,      826,
+   -1353,    17210,     7768,      986,    -1471,     8291,    -4982,     8207,
+  -14911,    -6255,    -2449,   -11881,    -7059,   -11703,    -4338,     8025,
+    7538,    -2823,   -12490,     9470,    -1613,    -2529,   -10092,    -7807,
+    9480,     6970,   -12844,     5123,     3532,     4816,     4803,    -8455,
+   -5045,    14032,    -4378,    -1643,     5756,   -11041,    -2732,   -16618,
+   -6430,   -18375,    -3320,     6098,     5131,    -4269,    -8840,     2482,
+   -7048,     1547,   -21890,    -6505,    -7414,     -424,   -11722,     7955,
+    1653,   -17299,     1823,      473,    -9232,     3337,     1111,      873,
+    4018,    -8982,     9889,     3531,   -11763,    -3799,     7373,    -4539,
+    3231,     7054,    -8537,     7616,     6244,    16635,      447,    -2915,
+   13967,      705,    -2669,    -1520,    -1771,   -16188,     5956,     5117,
+    6371,    -9936,    -1448,     2480,     5128,     7550,    -8130,     5236,
+    8213,    -6443,     7707,    -1950,   -13811,     7218,     7031,    -3883,
+      67,     5731,    -2874,    13480,    -3743,     9298,    -3280,     3552,
+   -4425,      -18,    -3785,    -9988,    -5357,     5477,   -11794,     2117,
+    1416,    -9935,     3376,      802,    -5079,    -8243,    12652,       66,
+    3653,    -2368,     6781,   -21895,    -7227,     2487,     7839,     -385,
+    6646,    -7016,    -4658,     5531,    -1705,      834,      129,     3694,
+   -1343,     2238,   -22640,    -6417,   -11139,    11301,    -2945,    -3494,
+   -5626,      185,    -3615,    -2041,    -7972,    -3106,      -60,   -23497,
+   -1566,    17064,     3519,     2518,      304,    -6805,   -10269,     2105,
+    1936,     -426,     -736,    -8122,    -1467,     4238,    -6939,   -13309,
+     360,     7402,    -7970,    12576,     3287,    12194,    -6289,   -16006,
+    9171,     4042,    -9193,     9123,    -2512,     6388,    -4734,    -8739,
+    1028,    -5406,    -1696,     5889,     -666,    -4736,     4971,     3565,
+    9362,    -6292,     3876,    -3652,   -19666,     7523,    -4061,      391,
+  -11773,     7502,    -3763,     4929,    -9478,    13278,     2805,     4496,
+    7814,    16419,    12455,   -14773,     2127,    -2746,     3763,     4847,
+    3698,     6978,     4751,    -6957,    -3581,      -45,     6252,     1513,
+   -4797,    -7925,    11270,    16188,    -2359,    -5269,     9376,   -10777,
+    7262,    20031,    -6515,    -2208,    -5353,     8085,    -1341,    -1303,
+    7333,     5576,     3625,     5763,    -7931,     9833,    -3371,   -10305,
+    6534,   -13539,    -9971,      997,     8464,    -4064,    -1495,     1857,
+   13624,     5458,     9490,   -11086,    -4524,    12022,     -550,     -198,
+     408,    -8455,    -7068,    10289,     9712,    -3366,     9028,    -7621,
+   -5243,     2362,     6909,     4672,    -4933,    -1799,     4709,    -4563,
+     -62,     -566,     1624,    -7010,    14730,   -17791,    -3697,    -2344,
+   -1741,     7099,    -9509,    -6855,    -1989,     3495,    -2289,     2031,
+   12784,      891,    14189,    -3963,    -5683,      421,   -12575,     1724,
+  -12682,    -5970,    -8169,     3143,    -1824,    -5488,    -5130,     8536,
+   12799,      794,     5738,     3459,   -11689,     -258,    -3738,    -3775,
+   -8742,     2333,     8312,    -9383,    10331,    13119,     8398,    10644,
+  -19433,    -6446,   -16277,   -11793,    16284,     9345,    15222,    15834,
+    2009,    -7349,      130,   -14547,      338,    -5998,     3337,    21492,
+    2406,     7703,     -951,    11196,     -564,     3406,     2217,     4806,
+    2374,    -5797,    11839,     8940,   -11874,    18213,     2855,    10492
+};
+
+static uint32_t IncreaseSeed(uint32_t* seed) {
+  seed[0] = (seed[0] * ((int32_t)69069) + 1) & (kMaxSeedUsed - 1);
+  return seed[0];
+}
+
+int16_t WebRtcSpl_RandU(uint32_t* seed) {
+  return (int16_t)(IncreaseSeed(seed) >> 16);
+}
+
+int16_t WebRtcSpl_RandN(uint32_t* seed) {
+  return kRandNTable[IncreaseSeed(seed) >> 23];
+}
+
+// Creates an array of uniformly distributed variables.
+int16_t WebRtcSpl_RandUArray(int16_t* vector,
+                             int16_t vector_length,
+                             uint32_t* seed) {
+  int i;
+  for (i = 0; i < vector_length; i++) {
+    vector[i] = WebRtcSpl_RandU(seed);
+  }
+  return vector_length;
+}
diff --git a/common_audio/signal_processing/real_fft.c b/common_audio/signal_processing/real_fft.c
new file mode 100644
index 0000000..780e517
--- /dev/null
+++ b/common_audio/signal_processing/real_fft.c
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/real_fft.h"
+
+#include <stdlib.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+struct RealFFT {
+  int order;
+};
+
+struct RealFFT* WebRtcSpl_CreateRealFFT(int order) {
+  struct RealFFT* self = NULL;
+
+  if (order > kMaxFFTOrder || order < 0) {
+    return NULL;
+  }
+
+  self = malloc(sizeof(struct RealFFT));
+  if (self == NULL) {
+    return NULL;
+  }
+  self->order = order;
+
+  return self;
+}
+
+void WebRtcSpl_FreeRealFFT(struct RealFFT* self) {
+  if (self != NULL) {
+    free(self);
+  }
+}
+
+// The C version FFT functions (i.e. WebRtcSpl_RealForwardFFT and
+// WebRtcSpl_RealInverseFFT) are real-valued FFT wrappers for complex-valued
+// FFT implementation in SPL.
+
+int WebRtcSpl_RealForwardFFT(struct RealFFT* self,
+                             const int16_t* real_data_in,
+                             int16_t* complex_data_out) {
+  int i = 0;
+  int j = 0;
+  int result = 0;
+  int n = 1 << self->order;
+  // The complex-value FFT implementation needs a buffer to hold 2^order
+  // 16-bit COMPLEX numbers, for both time and frequency data.
+  int16_t complex_buffer[2 << kMaxFFTOrder];
+
+  // Insert zeros to the imaginary parts for complex forward FFT input.
+  for (i = 0, j = 0; i < n; i += 1, j += 2) {
+    complex_buffer[j] = real_data_in[i];
+    complex_buffer[j + 1] = 0;
+  };
+
+  WebRtcSpl_ComplexBitReverse(complex_buffer, self->order);
+  result = WebRtcSpl_ComplexFFT(complex_buffer, self->order, 1);
+
+  // For real FFT output, use only the first N + 2 elements from
+  // complex forward FFT.
+  memcpy(complex_data_out, complex_buffer, sizeof(int16_t) * (n + 2));
+
+  return result;
+}
+
+int WebRtcSpl_RealInverseFFT(struct RealFFT* self,
+                             const int16_t* complex_data_in,
+                             int16_t* real_data_out) {
+  int i = 0;
+  int j = 0;
+  int result = 0;
+  int n = 1 << self->order;
+  // Create the buffer specific to complex-valued FFT implementation.
+  int16_t complex_buffer[2 << kMaxFFTOrder];
+
+  // For n-point FFT, first copy the first n + 2 elements into complex
+  // FFT, then construct the remaining n - 2 elements by real FFT's
+  // conjugate-symmetric properties.
+  memcpy(complex_buffer, complex_data_in, sizeof(int16_t) * (n + 2));
+  for (i = n + 2; i < 2 * n; i += 2) {
+    complex_buffer[i] = complex_data_in[2 * n - i];
+    complex_buffer[i + 1] = -complex_data_in[2 * n - i + 1];
+  }
+
+  WebRtcSpl_ComplexBitReverse(complex_buffer, self->order);
+  result = WebRtcSpl_ComplexIFFT(complex_buffer, self->order, 1);
+
+  // Strip out the imaginary parts of the complex inverse FFT output.
+  for (i = 0, j = 0; i < n; i += 1, j += 2) {
+    real_data_out[i] = complex_buffer[j];
+  }
+
+  return result;
+}
diff --git a/common_audio/signal_processing/real_fft_unittest.cc b/common_audio/signal_processing/real_fft_unittest.cc
new file mode 100644
index 0000000..4f5b5c7
--- /dev/null
+++ b/common_audio/signal_processing/real_fft_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/real_fft.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace {
+
+// FFT order.
+const int kOrder = 5;
+// Lengths for real FFT's time and frequency bufffers.
+// For N-point FFT, the length requirements from API are N and N+2 respectively.
+const int kTimeDataLength = 1 << kOrder;
+const int kFreqDataLength = (1 << kOrder) + 2;
+// For complex FFT's time and freq buffer. The implementation requires
+// 2*N 16-bit words.
+const int kComplexFftDataLength = 2 << kOrder;
+// Reference data for time signal.
+const int16_t kRefData[kTimeDataLength] = {
+  11739, 6848, -8688, 31980, -30295, 25242, 27085, 19410,
+  -26299, 15607, -10791, 11778, -23819, 14498, -25772, 10076,
+  1173, 6848, -8688, 31980, -30295, 2522, 27085, 19410,
+  -2629, 5607, -3, 1178, -23819, 1498, -25772, 10076
+};
+
+class RealFFTTest : public ::testing::Test {
+ protected:
+  RealFFTTest() {
+    WebRtcSpl_Init();
+  }
+};
+
+TEST_F(RealFFTTest, CreateFailsOnBadInput) {
+  RealFFT* fft = WebRtcSpl_CreateRealFFT(11);
+  EXPECT_TRUE(fft == nullptr);
+  fft = WebRtcSpl_CreateRealFFT(-1);
+  EXPECT_TRUE(fft == nullptr);
+}
+
+TEST_F(RealFFTTest, RealAndComplexMatch) {
+  int i = 0;
+  int j = 0;
+  int16_t real_fft_time[kTimeDataLength] = {0};
+  int16_t real_fft_freq[kFreqDataLength] = {0};
+  // One common buffer for complex FFT's time and frequency data.
+  int16_t complex_fft_buff[kComplexFftDataLength] = {0};
+
+  // Prepare the inputs to forward FFT's.
+  memcpy(real_fft_time, kRefData, sizeof(kRefData));
+  for (i = 0, j = 0; i < kTimeDataLength; i += 1, j += 2) {
+    complex_fft_buff[j] = kRefData[i];
+    complex_fft_buff[j + 1] = 0;  // Insert zero's to imaginary parts.
+  }
+
+  // Create and run real forward FFT.
+  RealFFT* fft = WebRtcSpl_CreateRealFFT(kOrder);
+  EXPECT_TRUE(fft != nullptr);
+  EXPECT_EQ(0, WebRtcSpl_RealForwardFFT(fft, real_fft_time, real_fft_freq));
+
+  // Run complex forward FFT.
+  WebRtcSpl_ComplexBitReverse(complex_fft_buff, kOrder);
+  EXPECT_EQ(0, WebRtcSpl_ComplexFFT(complex_fft_buff, kOrder, 1));
+
+  // Verify the results between complex and real forward FFT.
+  for (i = 0; i < kFreqDataLength; i++) {
+    EXPECT_EQ(real_fft_freq[i], complex_fft_buff[i]);
+  }
+
+  // Prepare the inputs to inverse real FFT.
+  // We use whatever data in complex_fft_buff[] since we don't care
+  // about data contents. Only kFreqDataLength 16-bit words are copied
+  // from complex_fft_buff to real_fft_freq since remaining words (2nd half)
+  // are conjugate-symmetric to the first half in theory.
+  memcpy(real_fft_freq, complex_fft_buff, sizeof(real_fft_freq));
+
+  // Run real inverse FFT.
+  int real_scale = WebRtcSpl_RealInverseFFT(fft, real_fft_freq, real_fft_time);
+  EXPECT_GE(real_scale, 0);
+
+  // Run complex inverse FFT.
+  WebRtcSpl_ComplexBitReverse(complex_fft_buff, kOrder);
+  int complex_scale = WebRtcSpl_ComplexIFFT(complex_fft_buff, kOrder, 1);
+
+  // Verify the results between complex and real inverse FFT.
+  // They are not bit-exact, since complex IFFT doesn't produce
+  // exactly conjugate-symmetric data (between first and second half).
+  EXPECT_EQ(real_scale, complex_scale);
+  for (i = 0, j = 0; i < kTimeDataLength; i += 1, j += 2) {
+    EXPECT_LE(abs(real_fft_time[i] - complex_fft_buff[j]), 1);
+  }
+
+  WebRtcSpl_FreeRealFFT(fft);
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/common_audio/signal_processing/refl_coef_to_lpc.c b/common_audio/signal_processing/refl_coef_to_lpc.c
new file mode 100644
index 0000000..b0858b2
--- /dev/null
+++ b/common_audio/signal_processing/refl_coef_to_lpc.c
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_ReflCoefToLpc().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_ReflCoefToLpc(const int16_t *k, int use_order, int16_t *a)
+{
+    int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1];
+    int16_t *aptr, *aptr2, *anyptr;
+    const int16_t *kptr;
+    int m, i;
+
+    kptr = k;
+    *a = 4096; // i.e., (Word16_MAX >> 3)+1.
+    *any = *a;
+    a[1] = *k >> 3;
+
+    for (m = 1; m < use_order; m++)
+    {
+        kptr++;
+        aptr = a;
+        aptr++;
+        aptr2 = &a[m];
+        anyptr = any;
+        anyptr++;
+
+        any[m + 1] = *kptr >> 3;
+        for (i = 0; i < m; i++)
+        {
+            *anyptr = *aptr + (int16_t)((*aptr2 * *kptr) >> 15);
+            anyptr++;
+            aptr++;
+            aptr2--;
+        }
+
+        aptr = a;
+        anyptr = any;
+        for (i = 0; i < (m + 2); i++)
+        {
+            *aptr = *anyptr;
+            aptr++;
+            anyptr++;
+        }
+    }
+}
diff --git a/common_audio/signal_processing/resample.c b/common_audio/signal_processing/resample.c
new file mode 100644
index 0000000..d4b2736
--- /dev/null
+++ b/common_audio/signal_processing/resample.c
@@ -0,0 +1,505 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the resampling functions for 22 kHz.
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/signal_processing/resample_by_2_internal.h"
+
+// Declaration of internally used functions
+static void WebRtcSpl_32khzTo22khzIntToShort(const int32_t *In, int16_t *Out,
+                                             int32_t K);
+
+void WebRtcSpl_32khzTo22khzIntToInt(const int32_t *In, int32_t *Out,
+                                    int32_t K);
+
+// interpolation coefficients
+static const int16_t kCoefficients32To22[5][9] = {
+        {127, -712,  2359, -6333, 23456, 16775, -3695,  945, -154},
+        {-39,  230,  -830,  2785, 32366, -2324,   760, -218,   38},
+        {117, -663,  2222, -6133, 26634, 13070, -3174,  831, -137},
+        {-77,  457, -1677,  5958, 31175, -4136,  1405, -408,   71},
+        { 98, -560,  1900, -5406, 29240,  9423, -2480,  663, -110}
+};
+
+//////////////////////
+// 22 kHz -> 16 kHz //
+//////////////////////
+
+// number of subblocks; options: 1, 2, 4, 5, 10
+#define SUB_BLOCKS_22_16    5
+
+// 22 -> 16 resampler
+void WebRtcSpl_Resample22khzTo16khz(const int16_t* in, int16_t* out,
+                                    WebRtcSpl_State22khzTo16khz* state, int32_t* tmpmem)
+{
+    int k;
+
+    // process two blocks of 10/SUB_BLOCKS_22_16 ms (to reduce temp buffer size)
+    for (k = 0; k < SUB_BLOCKS_22_16; k++)
+    {
+        ///// 22 --> 44 /////
+        // int16_t  in[220/SUB_BLOCKS_22_16]
+        // int32_t out[440/SUB_BLOCKS_22_16]
+        /////
+        WebRtcSpl_UpBy2ShortToInt(in, 220 / SUB_BLOCKS_22_16, tmpmem + 16, state->S_22_44);
+
+        ///// 44 --> 32 /////
+        // int32_t  in[440/SUB_BLOCKS_22_16]
+        // int32_t out[320/SUB_BLOCKS_22_16]
+        /////
+        // copy state to and from input array
+        tmpmem[8] = state->S_44_32[0];
+        tmpmem[9] = state->S_44_32[1];
+        tmpmem[10] = state->S_44_32[2];
+        tmpmem[11] = state->S_44_32[3];
+        tmpmem[12] = state->S_44_32[4];
+        tmpmem[13] = state->S_44_32[5];
+        tmpmem[14] = state->S_44_32[6];
+        tmpmem[15] = state->S_44_32[7];
+        state->S_44_32[0] = tmpmem[440 / SUB_BLOCKS_22_16 + 8];
+        state->S_44_32[1] = tmpmem[440 / SUB_BLOCKS_22_16 + 9];
+        state->S_44_32[2] = tmpmem[440 / SUB_BLOCKS_22_16 + 10];
+        state->S_44_32[3] = tmpmem[440 / SUB_BLOCKS_22_16 + 11];
+        state->S_44_32[4] = tmpmem[440 / SUB_BLOCKS_22_16 + 12];
+        state->S_44_32[5] = tmpmem[440 / SUB_BLOCKS_22_16 + 13];
+        state->S_44_32[6] = tmpmem[440 / SUB_BLOCKS_22_16 + 14];
+        state->S_44_32[7] = tmpmem[440 / SUB_BLOCKS_22_16 + 15];
+
+        WebRtcSpl_Resample44khzTo32khz(tmpmem + 8, tmpmem, 40 / SUB_BLOCKS_22_16);
+
+        ///// 32 --> 16 /////
+        // int32_t  in[320/SUB_BLOCKS_22_16]
+        // int32_t out[160/SUB_BLOCKS_22_16]
+        /////
+        WebRtcSpl_DownBy2IntToShort(tmpmem, 320 / SUB_BLOCKS_22_16, out, state->S_32_16);
+
+        // move input/output pointers 10/SUB_BLOCKS_22_16 ms seconds ahead
+        in += 220 / SUB_BLOCKS_22_16;
+        out += 160 / SUB_BLOCKS_22_16;
+    }
+}
+
+// initialize state of 22 -> 16 resampler
+void WebRtcSpl_ResetResample22khzTo16khz(WebRtcSpl_State22khzTo16khz* state)
+{
+    int k;
+    for (k = 0; k < 8; k++)
+    {
+        state->S_22_44[k] = 0;
+        state->S_44_32[k] = 0;
+        state->S_32_16[k] = 0;
+    }
+}
+
+//////////////////////
+// 16 kHz -> 22 kHz //
+//////////////////////
+
+// number of subblocks; options: 1, 2, 4, 5, 10
+#define SUB_BLOCKS_16_22    4
+
+// 16 -> 22 resampler
+void WebRtcSpl_Resample16khzTo22khz(const int16_t* in, int16_t* out,
+                                    WebRtcSpl_State16khzTo22khz* state, int32_t* tmpmem)
+{
+    int k;
+
+    // process two blocks of 10/SUB_BLOCKS_16_22 ms (to reduce temp buffer size)
+    for (k = 0; k < SUB_BLOCKS_16_22; k++)
+    {
+        ///// 16 --> 32 /////
+        // int16_t  in[160/SUB_BLOCKS_16_22]
+        // int32_t out[320/SUB_BLOCKS_16_22]
+        /////
+        WebRtcSpl_UpBy2ShortToInt(in, 160 / SUB_BLOCKS_16_22, tmpmem + 8, state->S_16_32);
+
+        ///// 32 --> 22 /////
+        // int32_t  in[320/SUB_BLOCKS_16_22]
+        // int32_t out[220/SUB_BLOCKS_16_22]
+        /////
+        // copy state to and from input array
+        tmpmem[0] = state->S_32_22[0];
+        tmpmem[1] = state->S_32_22[1];
+        tmpmem[2] = state->S_32_22[2];
+        tmpmem[3] = state->S_32_22[3];
+        tmpmem[4] = state->S_32_22[4];
+        tmpmem[5] = state->S_32_22[5];
+        tmpmem[6] = state->S_32_22[6];
+        tmpmem[7] = state->S_32_22[7];
+        state->S_32_22[0] = tmpmem[320 / SUB_BLOCKS_16_22];
+        state->S_32_22[1] = tmpmem[320 / SUB_BLOCKS_16_22 + 1];
+        state->S_32_22[2] = tmpmem[320 / SUB_BLOCKS_16_22 + 2];
+        state->S_32_22[3] = tmpmem[320 / SUB_BLOCKS_16_22 + 3];
+        state->S_32_22[4] = tmpmem[320 / SUB_BLOCKS_16_22 + 4];
+        state->S_32_22[5] = tmpmem[320 / SUB_BLOCKS_16_22 + 5];
+        state->S_32_22[6] = tmpmem[320 / SUB_BLOCKS_16_22 + 6];
+        state->S_32_22[7] = tmpmem[320 / SUB_BLOCKS_16_22 + 7];
+
+        WebRtcSpl_32khzTo22khzIntToShort(tmpmem, out, 20 / SUB_BLOCKS_16_22);
+
+        // move input/output pointers 10/SUB_BLOCKS_16_22 ms seconds ahead
+        in += 160 / SUB_BLOCKS_16_22;
+        out += 220 / SUB_BLOCKS_16_22;
+    }
+}
+
+// initialize state of 16 -> 22 resampler
+void WebRtcSpl_ResetResample16khzTo22khz(WebRtcSpl_State16khzTo22khz* state)
+{
+    int k;
+    for (k = 0; k < 8; k++)
+    {
+        state->S_16_32[k] = 0;
+        state->S_32_22[k] = 0;
+    }
+}
+
+//////////////////////
+// 22 kHz ->  8 kHz //
+//////////////////////
+
+// number of subblocks; options: 1, 2, 5, 10
+#define SUB_BLOCKS_22_8     2
+
+// 22 -> 8 resampler
+void WebRtcSpl_Resample22khzTo8khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State22khzTo8khz* state, int32_t* tmpmem)
+{
+    int k;
+
+    // process two blocks of 10/SUB_BLOCKS_22_8 ms (to reduce temp buffer size)
+    for (k = 0; k < SUB_BLOCKS_22_8; k++)
+    {
+        ///// 22 --> 22 lowpass /////
+        // int16_t  in[220/SUB_BLOCKS_22_8]
+        // int32_t out[220/SUB_BLOCKS_22_8]
+        /////
+        WebRtcSpl_LPBy2ShortToInt(in, 220 / SUB_BLOCKS_22_8, tmpmem + 16, state->S_22_22);
+
+        ///// 22 --> 16 /////
+        // int32_t  in[220/SUB_BLOCKS_22_8]
+        // int32_t out[160/SUB_BLOCKS_22_8]
+        /////
+        // copy state to and from input array
+        tmpmem[8] = state->S_22_16[0];
+        tmpmem[9] = state->S_22_16[1];
+        tmpmem[10] = state->S_22_16[2];
+        tmpmem[11] = state->S_22_16[3];
+        tmpmem[12] = state->S_22_16[4];
+        tmpmem[13] = state->S_22_16[5];
+        tmpmem[14] = state->S_22_16[6];
+        tmpmem[15] = state->S_22_16[7];
+        state->S_22_16[0] = tmpmem[220 / SUB_BLOCKS_22_8 + 8];
+        state->S_22_16[1] = tmpmem[220 / SUB_BLOCKS_22_8 + 9];
+        state->S_22_16[2] = tmpmem[220 / SUB_BLOCKS_22_8 + 10];
+        state->S_22_16[3] = tmpmem[220 / SUB_BLOCKS_22_8 + 11];
+        state->S_22_16[4] = tmpmem[220 / SUB_BLOCKS_22_8 + 12];
+        state->S_22_16[5] = tmpmem[220 / SUB_BLOCKS_22_8 + 13];
+        state->S_22_16[6] = tmpmem[220 / SUB_BLOCKS_22_8 + 14];
+        state->S_22_16[7] = tmpmem[220 / SUB_BLOCKS_22_8 + 15];
+
+        WebRtcSpl_Resample44khzTo32khz(tmpmem + 8, tmpmem, 20 / SUB_BLOCKS_22_8);
+
+        ///// 16 --> 8 /////
+        // int32_t in[160/SUB_BLOCKS_22_8]
+        // int32_t out[80/SUB_BLOCKS_22_8]
+        /////
+        WebRtcSpl_DownBy2IntToShort(tmpmem, 160 / SUB_BLOCKS_22_8, out, state->S_16_8);
+
+        // move input/output pointers 10/SUB_BLOCKS_22_8 ms seconds ahead
+        in += 220 / SUB_BLOCKS_22_8;
+        out += 80 / SUB_BLOCKS_22_8;
+    }
+}
+
+// initialize state of 22 -> 8 resampler
+void WebRtcSpl_ResetResample22khzTo8khz(WebRtcSpl_State22khzTo8khz* state)
+{
+    int k;
+    for (k = 0; k < 8; k++)
+    {
+        state->S_22_22[k] = 0;
+        state->S_22_22[k + 8] = 0;
+        state->S_22_16[k] = 0;
+        state->S_16_8[k] = 0;
+    }
+}
+
+//////////////////////
+//  8 kHz -> 22 kHz //
+//////////////////////
+
+// number of subblocks; options: 1, 2, 5, 10
+#define SUB_BLOCKS_8_22     2
+
+// 8 -> 22 resampler
+void WebRtcSpl_Resample8khzTo22khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State8khzTo22khz* state, int32_t* tmpmem)
+{
+    int k;
+
+    // process two blocks of 10/SUB_BLOCKS_8_22 ms (to reduce temp buffer size)
+    for (k = 0; k < SUB_BLOCKS_8_22; k++)
+    {
+        ///// 8 --> 16 /////
+        // int16_t  in[80/SUB_BLOCKS_8_22]
+        // int32_t out[160/SUB_BLOCKS_8_22]
+        /////
+        WebRtcSpl_UpBy2ShortToInt(in, 80 / SUB_BLOCKS_8_22, tmpmem + 18, state->S_8_16);
+
+        ///// 16 --> 11 /////
+        // int32_t  in[160/SUB_BLOCKS_8_22]
+        // int32_t out[110/SUB_BLOCKS_8_22]
+        /////
+        // copy state to and from input array
+        tmpmem[10] = state->S_16_11[0];
+        tmpmem[11] = state->S_16_11[1];
+        tmpmem[12] = state->S_16_11[2];
+        tmpmem[13] = state->S_16_11[3];
+        tmpmem[14] = state->S_16_11[4];
+        tmpmem[15] = state->S_16_11[5];
+        tmpmem[16] = state->S_16_11[6];
+        tmpmem[17] = state->S_16_11[7];
+        state->S_16_11[0] = tmpmem[160 / SUB_BLOCKS_8_22 + 10];
+        state->S_16_11[1] = tmpmem[160 / SUB_BLOCKS_8_22 + 11];
+        state->S_16_11[2] = tmpmem[160 / SUB_BLOCKS_8_22 + 12];
+        state->S_16_11[3] = tmpmem[160 / SUB_BLOCKS_8_22 + 13];
+        state->S_16_11[4] = tmpmem[160 / SUB_BLOCKS_8_22 + 14];
+        state->S_16_11[5] = tmpmem[160 / SUB_BLOCKS_8_22 + 15];
+        state->S_16_11[6] = tmpmem[160 / SUB_BLOCKS_8_22 + 16];
+        state->S_16_11[7] = tmpmem[160 / SUB_BLOCKS_8_22 + 17];
+
+        WebRtcSpl_32khzTo22khzIntToInt(tmpmem + 10, tmpmem, 10 / SUB_BLOCKS_8_22);
+
+        ///// 11 --> 22 /////
+        // int32_t  in[110/SUB_BLOCKS_8_22]
+        // int16_t out[220/SUB_BLOCKS_8_22]
+        /////
+        WebRtcSpl_UpBy2IntToShort(tmpmem, 110 / SUB_BLOCKS_8_22, out, state->S_11_22);
+
+        // move input/output pointers 10/SUB_BLOCKS_8_22 ms seconds ahead
+        in += 80 / SUB_BLOCKS_8_22;
+        out += 220 / SUB_BLOCKS_8_22;
+    }
+}
+
+// initialize state of 8 -> 22 resampler
+void WebRtcSpl_ResetResample8khzTo22khz(WebRtcSpl_State8khzTo22khz* state)
+{
+    int k;
+    for (k = 0; k < 8; k++)
+    {
+        state->S_8_16[k] = 0;
+        state->S_16_11[k] = 0;
+        state->S_11_22[k] = 0;
+    }
+}
+
+// compute two inner-products and store them to output array
+static void WebRtcSpl_DotProdIntToInt(const int32_t* in1, const int32_t* in2,
+                                      const int16_t* coef_ptr, int32_t* out1,
+                                      int32_t* out2)
+{
+    int32_t tmp1 = 16384;
+    int32_t tmp2 = 16384;
+    int16_t coef;
+
+    coef = coef_ptr[0];
+    tmp1 += coef * in1[0];
+    tmp2 += coef * in2[-0];
+
+    coef = coef_ptr[1];
+    tmp1 += coef * in1[1];
+    tmp2 += coef * in2[-1];
+
+    coef = coef_ptr[2];
+    tmp1 += coef * in1[2];
+    tmp2 += coef * in2[-2];
+
+    coef = coef_ptr[3];
+    tmp1 += coef * in1[3];
+    tmp2 += coef * in2[-3];
+
+    coef = coef_ptr[4];
+    tmp1 += coef * in1[4];
+    tmp2 += coef * in2[-4];
+
+    coef = coef_ptr[5];
+    tmp1 += coef * in1[5];
+    tmp2 += coef * in2[-5];
+
+    coef = coef_ptr[6];
+    tmp1 += coef * in1[6];
+    tmp2 += coef * in2[-6];
+
+    coef = coef_ptr[7];
+    tmp1 += coef * in1[7];
+    tmp2 += coef * in2[-7];
+
+    coef = coef_ptr[8];
+    *out1 = tmp1 + coef * in1[8];
+    *out2 = tmp2 + coef * in2[-8];
+}
+
+// compute two inner-products and store them to output array
+static void WebRtcSpl_DotProdIntToShort(const int32_t* in1, const int32_t* in2,
+                                        const int16_t* coef_ptr, int16_t* out1,
+                                        int16_t* out2)
+{
+    int32_t tmp1 = 16384;
+    int32_t tmp2 = 16384;
+    int16_t coef;
+
+    coef = coef_ptr[0];
+    tmp1 += coef * in1[0];
+    tmp2 += coef * in2[-0];
+
+    coef = coef_ptr[1];
+    tmp1 += coef * in1[1];
+    tmp2 += coef * in2[-1];
+
+    coef = coef_ptr[2];
+    tmp1 += coef * in1[2];
+    tmp2 += coef * in2[-2];
+
+    coef = coef_ptr[3];
+    tmp1 += coef * in1[3];
+    tmp2 += coef * in2[-3];
+
+    coef = coef_ptr[4];
+    tmp1 += coef * in1[4];
+    tmp2 += coef * in2[-4];
+
+    coef = coef_ptr[5];
+    tmp1 += coef * in1[5];
+    tmp2 += coef * in2[-5];
+
+    coef = coef_ptr[6];
+    tmp1 += coef * in1[6];
+    tmp2 += coef * in2[-6];
+
+    coef = coef_ptr[7];
+    tmp1 += coef * in1[7];
+    tmp2 += coef * in2[-7];
+
+    coef = coef_ptr[8];
+    tmp1 += coef * in1[8];
+    tmp2 += coef * in2[-8];
+
+    // scale down, round and saturate
+    tmp1 >>= 15;
+    if (tmp1 > (int32_t)0x00007FFF)
+        tmp1 = 0x00007FFF;
+    if (tmp1 < (int32_t)0xFFFF8000)
+        tmp1 = 0xFFFF8000;
+    tmp2 >>= 15;
+    if (tmp2 > (int32_t)0x00007FFF)
+        tmp2 = 0x00007FFF;
+    if (tmp2 < (int32_t)0xFFFF8000)
+        tmp2 = 0xFFFF8000;
+    *out1 = (int16_t)tmp1;
+    *out2 = (int16_t)tmp2;
+}
+
+//   Resampling ratio: 11/16
+// input:  int32_t (normalized, not saturated) :: size 16 * K
+// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 11 * K
+//      K: Number of blocks
+
+void WebRtcSpl_32khzTo22khzIntToInt(const int32_t* In,
+                                    int32_t* Out,
+                                    int32_t K)
+{
+    /////////////////////////////////////////////////////////////
+    // Filter operation:
+    //
+    // Perform resampling (16 input samples -> 11 output samples);
+    // process in sub blocks of size 16 samples.
+    int32_t m;
+
+    for (m = 0; m < K; m++)
+    {
+        // first output sample
+        Out[0] = ((int32_t)In[3] << 15) + (1 << 14);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToInt(&In[0], &In[22], kCoefficients32To22[0], &Out[1], &Out[10]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToInt(&In[2], &In[20], kCoefficients32To22[1], &Out[2], &Out[9]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToInt(&In[3], &In[19], kCoefficients32To22[2], &Out[3], &Out[8]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToInt(&In[5], &In[17], kCoefficients32To22[3], &Out[4], &Out[7]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToInt(&In[6], &In[16], kCoefficients32To22[4], &Out[5], &Out[6]);
+
+        // update pointers
+        In += 16;
+        Out += 11;
+    }
+}
+
+//   Resampling ratio: 11/16
+// input:  int32_t (normalized, not saturated) :: size 16 * K
+// output: int16_t (saturated) :: size 11 * K
+//      K: Number of blocks
+
+void WebRtcSpl_32khzTo22khzIntToShort(const int32_t *In,
+                                      int16_t *Out,
+                                      int32_t K)
+{
+    /////////////////////////////////////////////////////////////
+    // Filter operation:
+    //
+    // Perform resampling (16 input samples -> 11 output samples);
+    // process in sub blocks of size 16 samples.
+    int32_t tmp;
+    int32_t m;
+
+    for (m = 0; m < K; m++)
+    {
+        // first output sample
+        tmp = In[3];
+        if (tmp > (int32_t)0x00007FFF)
+            tmp = 0x00007FFF;
+        if (tmp < (int32_t)0xFFFF8000)
+            tmp = 0xFFFF8000;
+        Out[0] = (int16_t)tmp;
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToShort(&In[0], &In[22], kCoefficients32To22[0], &Out[1], &Out[10]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToShort(&In[2], &In[20], kCoefficients32To22[1], &Out[2], &Out[9]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToShort(&In[3], &In[19], kCoefficients32To22[2], &Out[3], &Out[8]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToShort(&In[5], &In[17], kCoefficients32To22[3], &Out[4], &Out[7]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_DotProdIntToShort(&In[6], &In[16], kCoefficients32To22[4], &Out[5], &Out[6]);
+
+        // update pointers
+        In += 16;
+        Out += 11;
+    }
+}
diff --git a/common_audio/signal_processing/resample_48khz.c b/common_audio/signal_processing/resample_48khz.c
new file mode 100644
index 0000000..8518e7b
--- /dev/null
+++ b/common_audio/signal_processing/resample_48khz.c
@@ -0,0 +1,186 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains resampling functions between 48 kHz and nb/wb.
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include <string.h>
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/signal_processing/resample_by_2_internal.h"
+
+////////////////////////////
+///// 48 kHz -> 16 kHz /////
+////////////////////////////
+
+// 48 -> 16 resampler
+void WebRtcSpl_Resample48khzTo16khz(const int16_t* in, int16_t* out,
+                                    WebRtcSpl_State48khzTo16khz* state, int32_t* tmpmem)
+{
+    ///// 48 --> 48(LP) /////
+    // int16_t  in[480]
+    // int32_t out[480]
+    /////
+    WebRtcSpl_LPBy2ShortToInt(in, 480, tmpmem + 16, state->S_48_48);
+
+    ///// 48 --> 32 /////
+    // int32_t  in[480]
+    // int32_t out[320]
+    /////
+    // copy state to and from input array
+    memcpy(tmpmem + 8, state->S_48_32, 8 * sizeof(int32_t));
+    memcpy(state->S_48_32, tmpmem + 488, 8 * sizeof(int32_t));
+    WebRtcSpl_Resample48khzTo32khz(tmpmem + 8, tmpmem, 160);
+
+    ///// 32 --> 16 /////
+    // int32_t  in[320]
+    // int16_t out[160]
+    /////
+    WebRtcSpl_DownBy2IntToShort(tmpmem, 320, out, state->S_32_16);
+}
+
+// initialize state of 48 -> 16 resampler
+void WebRtcSpl_ResetResample48khzTo16khz(WebRtcSpl_State48khzTo16khz* state)
+{
+    memset(state->S_48_48, 0, 16 * sizeof(int32_t));
+    memset(state->S_48_32, 0, 8 * sizeof(int32_t));
+    memset(state->S_32_16, 0, 8 * sizeof(int32_t));
+}
+
+////////////////////////////
+///// 16 kHz -> 48 kHz /////
+////////////////////////////
+
+// 16 -> 48 resampler
+void WebRtcSpl_Resample16khzTo48khz(const int16_t* in, int16_t* out,
+                                    WebRtcSpl_State16khzTo48khz* state, int32_t* tmpmem)
+{
+    ///// 16 --> 32 /////
+    // int16_t  in[160]
+    // int32_t out[320]
+    /////
+    WebRtcSpl_UpBy2ShortToInt(in, 160, tmpmem + 16, state->S_16_32);
+
+    ///// 32 --> 24 /////
+    // int32_t  in[320]
+    // int32_t out[240]
+    // copy state to and from input array
+    /////
+    memcpy(tmpmem + 8, state->S_32_24, 8 * sizeof(int32_t));
+    memcpy(state->S_32_24, tmpmem + 328, 8 * sizeof(int32_t));
+    WebRtcSpl_Resample32khzTo24khz(tmpmem + 8, tmpmem, 80);
+
+    ///// 24 --> 48 /////
+    // int32_t  in[240]
+    // int16_t out[480]
+    /////
+    WebRtcSpl_UpBy2IntToShort(tmpmem, 240, out, state->S_24_48);
+}
+
+// initialize state of 16 -> 48 resampler
+void WebRtcSpl_ResetResample16khzTo48khz(WebRtcSpl_State16khzTo48khz* state)
+{
+    memset(state->S_16_32, 0, 8 * sizeof(int32_t));
+    memset(state->S_32_24, 0, 8 * sizeof(int32_t));
+    memset(state->S_24_48, 0, 8 * sizeof(int32_t));
+}
+
+////////////////////////////
+///// 48 kHz ->  8 kHz /////
+////////////////////////////
+
+// 48 -> 8 resampler
+void WebRtcSpl_Resample48khzTo8khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State48khzTo8khz* state, int32_t* tmpmem)
+{
+    ///// 48 --> 24 /////
+    // int16_t  in[480]
+    // int32_t out[240]
+    /////
+    WebRtcSpl_DownBy2ShortToInt(in, 480, tmpmem + 256, state->S_48_24);
+
+    ///// 24 --> 24(LP) /////
+    // int32_t  in[240]
+    // int32_t out[240]
+    /////
+    WebRtcSpl_LPBy2IntToInt(tmpmem + 256, 240, tmpmem + 16, state->S_24_24);
+
+    ///// 24 --> 16 /////
+    // int32_t  in[240]
+    // int32_t out[160]
+    /////
+    // copy state to and from input array
+    memcpy(tmpmem + 8, state->S_24_16, 8 * sizeof(int32_t));
+    memcpy(state->S_24_16, tmpmem + 248, 8 * sizeof(int32_t));
+    WebRtcSpl_Resample48khzTo32khz(tmpmem + 8, tmpmem, 80);
+
+    ///// 16 --> 8 /////
+    // int32_t  in[160]
+    // int16_t out[80]
+    /////
+    WebRtcSpl_DownBy2IntToShort(tmpmem, 160, out, state->S_16_8);
+}
+
+// initialize state of 48 -> 8 resampler
+void WebRtcSpl_ResetResample48khzTo8khz(WebRtcSpl_State48khzTo8khz* state)
+{
+    memset(state->S_48_24, 0, 8 * sizeof(int32_t));
+    memset(state->S_24_24, 0, 16 * sizeof(int32_t));
+    memset(state->S_24_16, 0, 8 * sizeof(int32_t));
+    memset(state->S_16_8, 0, 8 * sizeof(int32_t));
+}
+
+////////////////////////////
+/////  8 kHz -> 48 kHz /////
+////////////////////////////
+
+// 8 -> 48 resampler
+void WebRtcSpl_Resample8khzTo48khz(const int16_t* in, int16_t* out,
+                                   WebRtcSpl_State8khzTo48khz* state, int32_t* tmpmem)
+{
+    ///// 8 --> 16 /////
+    // int16_t  in[80]
+    // int32_t out[160]
+    /////
+    WebRtcSpl_UpBy2ShortToInt(in, 80, tmpmem + 264, state->S_8_16);
+
+    ///// 16 --> 12 /////
+    // int32_t  in[160]
+    // int32_t out[120]
+    /////
+    // copy state to and from input array
+    memcpy(tmpmem + 256, state->S_16_12, 8 * sizeof(int32_t));
+    memcpy(state->S_16_12, tmpmem + 416, 8 * sizeof(int32_t));
+    WebRtcSpl_Resample32khzTo24khz(tmpmem + 256, tmpmem + 240, 40);
+
+    ///// 12 --> 24 /////
+    // int32_t  in[120]
+    // int16_t out[240]
+    /////
+    WebRtcSpl_UpBy2IntToInt(tmpmem + 240, 120, tmpmem, state->S_12_24);
+
+    ///// 24 --> 48 /////
+    // int32_t  in[240]
+    // int16_t out[480]
+    /////
+    WebRtcSpl_UpBy2IntToShort(tmpmem, 240, out, state->S_24_48);
+}
+
+// initialize state of 8 -> 48 resampler
+void WebRtcSpl_ResetResample8khzTo48khz(WebRtcSpl_State8khzTo48khz* state)
+{
+    memset(state->S_8_16, 0, 8 * sizeof(int32_t));
+    memset(state->S_16_12, 0, 8 * sizeof(int32_t));
+    memset(state->S_12_24, 0, 8 * sizeof(int32_t));
+    memset(state->S_24_48, 0, 8 * sizeof(int32_t));
+}
diff --git a/common_audio/signal_processing/resample_by_2.c b/common_audio/signal_processing/resample_by_2.c
new file mode 100644
index 0000000..73e1950
--- /dev/null
+++ b/common_audio/signal_processing/resample_by_2.c
@@ -0,0 +1,183 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the resampling by two functions.
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#ifdef WEBRTC_ARCH_ARM_V7
+
+// allpass filter coefficients.
+static const uint32_t kResampleAllpass1[3] = {3284, 24441, 49528 << 15};
+static const uint32_t kResampleAllpass2[3] =
+  {12199, 37471 << 15, 60255 << 15};
+
+// Multiply two 32-bit values and accumulate to another input value.
+// Return: state + ((diff * tbl_value) >> 16)
+
+static __inline int32_t MUL_ACCUM_1(int32_t tbl_value,
+                                    int32_t diff,
+                                    int32_t state) {
+  int32_t result;
+  __asm __volatile ("smlawb %0, %1, %2, %3": "=r"(result): "r"(diff),
+                                   "r"(tbl_value), "r"(state));
+  return result;
+}
+
+// Multiply two 32-bit values and accumulate to another input value.
+// Return: Return: state + (((diff << 1) * tbl_value) >> 32)
+//
+// The reason to introduce this function is that, in case we can't use smlawb
+// instruction (in MUL_ACCUM_1) due to input value range, we can still use 
+// smmla to save some cycles.
+
+static __inline int32_t MUL_ACCUM_2(int32_t tbl_value,
+                                    int32_t diff,
+                                    int32_t state) {
+  int32_t result;
+  __asm __volatile ("smmla %0, %1, %2, %3": "=r"(result): "r"(diff << 1),
+                                  "r"(tbl_value), "r"(state));
+  return result;
+}
+
+#else
+
+// allpass filter coefficients.
+static const uint16_t kResampleAllpass1[3] = {3284, 24441, 49528};
+static const uint16_t kResampleAllpass2[3] = {12199, 37471, 60255};
+
+// Multiply a 32-bit value with a 16-bit value and accumulate to another input:
+#define MUL_ACCUM_1(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
+#define MUL_ACCUM_2(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
+
+#endif  // WEBRTC_ARCH_ARM_V7
+
+
+// decimator
+#if !defined(MIPS32_LE)
+void WebRtcSpl_DownsampleBy2(const int16_t* in, size_t len,
+                             int16_t* out, int32_t* filtState) {
+  int32_t tmp1, tmp2, diff, in32, out32;
+  size_t i;
+
+  register int32_t state0 = filtState[0];
+  register int32_t state1 = filtState[1];
+  register int32_t state2 = filtState[2];
+  register int32_t state3 = filtState[3];
+  register int32_t state4 = filtState[4];
+  register int32_t state5 = filtState[5];
+  register int32_t state6 = filtState[6];
+  register int32_t state7 = filtState[7];
+
+  for (i = (len >> 1); i > 0; i--) {
+    // lower allpass filter
+    in32 = (int32_t)(*in++) * (1 << 10);
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
+    state2 = tmp2;
+
+    // upper allpass filter
+    in32 = (int32_t)(*in++) * (1 << 10);
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
+    state6 = tmp2;
+
+    // add two allpass outputs, divide by two and round
+    out32 = (state3 + state7 + 1024) >> 11;
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+  }
+
+  filtState[0] = state0;
+  filtState[1] = state1;
+  filtState[2] = state2;
+  filtState[3] = state3;
+  filtState[4] = state4;
+  filtState[5] = state5;
+  filtState[6] = state6;
+  filtState[7] = state7;
+}
+#endif  // #if defined(MIPS32_LE)
+
+
+void WebRtcSpl_UpsampleBy2(const int16_t* in, size_t len,
+                           int16_t* out, int32_t* filtState) {
+  int32_t tmp1, tmp2, diff, in32, out32;
+  size_t i;
+
+  register int32_t state0 = filtState[0];
+  register int32_t state1 = filtState[1];
+  register int32_t state2 = filtState[2];
+  register int32_t state3 = filtState[3];
+  register int32_t state4 = filtState[4];
+  register int32_t state5 = filtState[5];
+  register int32_t state6 = filtState[6];
+  register int32_t state7 = filtState[7];
+
+  for (i = len; i > 0; i--) {
+    // lower allpass filter
+    in32 = (int32_t)(*in++) * (1 << 10);
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state2);
+    state2 = tmp2;
+
+    // round; limit amplitude to prevent wrap-around; write to output array
+    out32 = (state3 + 512) >> 10;
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+
+    // upper allpass filter
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state6);
+    state6 = tmp2;
+
+    // round; limit amplitude to prevent wrap-around; write to output array
+    out32 = (state7 + 512) >> 10;
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+  }
+
+  filtState[0] = state0;
+  filtState[1] = state1;
+  filtState[2] = state2;
+  filtState[3] = state3;
+  filtState[4] = state4;
+  filtState[5] = state5;
+  filtState[6] = state6;
+  filtState[7] = state7;
+}
diff --git a/common_audio/signal_processing/resample_by_2_internal.c b/common_audio/signal_processing/resample_by_2_internal.c
new file mode 100644
index 0000000..99592b2
--- /dev/null
+++ b/common_audio/signal_processing/resample_by_2_internal.c
@@ -0,0 +1,689 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This header file contains some internal resampling functions.
+ *
+ */
+
+#include "common_audio/signal_processing/resample_by_2_internal.h"
+#include "rtc_base/sanitizer.h"
+
+// allpass filter coefficients.
+static const int16_t kResampleAllpass[2][3] = {
+        {821, 6110, 12382},
+        {3050, 9368, 15063}
+};
+
+//
+//   decimator
+// input:  int32_t (shifted 15 positions to the left, + offset 16384) OVERWRITTEN!
+// output: int16_t (saturated) (of length len/2)
+// state:  filter state array; length = 8
+
+void RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/5486
+WebRtcSpl_DownBy2IntToShort(int32_t *in, int32_t len, int16_t *out,
+                            int32_t *state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    len >>= 1;
+
+    // lower allpass filter (operates on even input samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i << 1];
+        diff = tmp0 - state[1];
+        // UBSan: -1771017321 - 999586185 cannot be represented in type 'int'
+
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // divide by two and store temporarily
+        in[i << 1] = (state[3] >> 1);
+    }
+
+    in++;
+
+    // upper allpass filter (operates on odd input samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i << 1];
+        diff = tmp0 - state[5];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // divide by two and store temporarily
+        in[i << 1] = (state[7] >> 1);
+    }
+
+    in--;
+
+    // combine allpass outputs
+    for (i = 0; i < len; i += 2)
+    {
+        // divide by two, add both allpass outputs and round
+        tmp0 = (in[i << 1] + in[(i << 1) + 1]) >> 15;
+        tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15;
+        if (tmp0 > (int32_t)0x00007FFF)
+            tmp0 = 0x00007FFF;
+        if (tmp0 < (int32_t)0xFFFF8000)
+            tmp0 = 0xFFFF8000;
+        out[i] = (int16_t)tmp0;
+        if (tmp1 > (int32_t)0x00007FFF)
+            tmp1 = 0x00007FFF;
+        if (tmp1 < (int32_t)0xFFFF8000)
+            tmp1 = 0xFFFF8000;
+        out[i + 1] = (int16_t)tmp1;
+    }
+}
+
+//
+//   decimator
+// input:  int16_t
+// output: int32_t (shifted 15 positions to the left, + offset 16384) (of length len/2)
+// state:  filter state array; length = 8
+
+void RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/5486
+WebRtcSpl_DownBy2ShortToInt(const int16_t *in,
+                            int32_t len,
+                            int32_t *out,
+                            int32_t *state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    len >>= 1;
+
+    // lower allpass filter (operates on even input samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14);
+        diff = tmp0 - state[1];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // UBSan: -1379909682 - 834099714 cannot be represented in type 'int'
+
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // divide by two and store temporarily
+        out[i] = (state[3] >> 1);
+    }
+
+    in++;
+
+    // upper allpass filter (operates on odd input samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14);
+        diff = tmp0 - state[5];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // divide by two and store temporarily
+        out[i] += (state[7] >> 1);
+    }
+
+    in--;
+}
+
+//
+//   interpolator
+// input:  int16_t
+// output: int32_t (normalized, not saturated) (of length len*2)
+// state:  filter state array; length = 8
+void WebRtcSpl_UpBy2ShortToInt(const int16_t *in, int32_t len, int32_t *out,
+                               int32_t *state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    // upper allpass filter (generates odd output samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i] << 15) + (1 << 14);
+        diff = tmp0 - state[5];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[7] >> 15;
+    }
+
+    out++;
+
+    // lower allpass filter (generates even output samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i] << 15) + (1 << 14);
+        diff = tmp0 - state[1];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[3] >> 15;
+    }
+}
+
+//
+//   interpolator
+// input:  int32_t (shifted 15 positions to the left, + offset 16384)
+// output: int32_t (shifted 15 positions to the left, + offset 16384) (of length len*2)
+// state:  filter state array; length = 8
+void WebRtcSpl_UpBy2IntToInt(const int32_t *in, int32_t len, int32_t *out,
+                             int32_t *state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    // upper allpass filter (generates odd output samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i];
+        diff = tmp0 - state[5];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[7];
+    }
+
+    out++;
+
+    // lower allpass filter (generates even output samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i];
+        diff = tmp0 - state[1];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[3];
+    }
+}
+
+//
+//   interpolator
+// input:  int32_t (shifted 15 positions to the left, + offset 16384)
+// output: int16_t (saturated) (of length len*2)
+// state:  filter state array; length = 8
+void WebRtcSpl_UpBy2IntToShort(const int32_t *in, int32_t len, int16_t *out,
+                               int32_t *state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    // upper allpass filter (generates odd output samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i];
+        diff = tmp0 - state[5];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // scale down, saturate and store
+        tmp1 = state[7] >> 15;
+        if (tmp1 > (int32_t)0x00007FFF)
+            tmp1 = 0x00007FFF;
+        if (tmp1 < (int32_t)0xFFFF8000)
+            tmp1 = 0xFFFF8000;
+        out[i << 1] = (int16_t)tmp1;
+    }
+
+    out++;
+
+    // lower allpass filter (generates even output samples)
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i];
+        diff = tmp0 - state[1];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // scale down, saturate and store
+        tmp1 = state[3] >> 15;
+        if (tmp1 > (int32_t)0x00007FFF)
+            tmp1 = 0x00007FFF;
+        if (tmp1 < (int32_t)0xFFFF8000)
+            tmp1 = 0xFFFF8000;
+        out[i << 1] = (int16_t)tmp1;
+    }
+}
+
+//   lowpass filter
+// input:  int16_t
+// output: int32_t (normalized, not saturated)
+// state:  filter state array; length = 8
+void WebRtcSpl_LPBy2ShortToInt(const int16_t* in, int32_t len, int32_t* out,
+                               int32_t* state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    len >>= 1;
+
+    // lower allpass filter: odd input -> even output samples
+    in++;
+    // initial state of polyphase delay element
+    tmp0 = state[12];
+    for (i = 0; i < len; i++)
+    {
+        diff = tmp0 - state[1];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[3] >> 1;
+        tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14);
+    }
+    in--;
+
+    // upper allpass filter: even input -> even output samples
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14);
+        diff = tmp0 - state[5];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // average the two allpass outputs, scale down and store
+        out[i << 1] = (out[i << 1] + (state[7] >> 1)) >> 15;
+    }
+
+    // switch to odd output samples
+    out++;
+
+    // lower allpass filter: even input -> odd output samples
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14);
+        diff = tmp0 - state[9];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[8] + diff * kResampleAllpass[1][0];
+        state[8] = tmp0;
+        diff = tmp1 - state[10];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[9] + diff * kResampleAllpass[1][1];
+        state[9] = tmp1;
+        diff = tmp0 - state[11];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[11] = state[10] + diff * kResampleAllpass[1][2];
+        state[10] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[11] >> 1;
+    }
+
+    // upper allpass filter: odd input -> odd output samples
+    in++;
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = ((int32_t)in[i << 1] << 15) + (1 << 14);
+        diff = tmp0 - state[13];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[12] + diff * kResampleAllpass[0][0];
+        state[12] = tmp0;
+        diff = tmp1 - state[14];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[13] + diff * kResampleAllpass[0][1];
+        state[13] = tmp1;
+        diff = tmp0 - state[15];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[15] = state[14] + diff * kResampleAllpass[0][2];
+        state[14] = tmp0;
+
+        // average the two allpass outputs, scale down and store
+        out[i << 1] = (out[i << 1] + (state[15] >> 1)) >> 15;
+    }
+}
+
+//   lowpass filter
+// input:  int32_t (shifted 15 positions to the left, + offset 16384)
+// output: int32_t (normalized, not saturated)
+// state:  filter state array; length = 8
+void RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/5486
+WebRtcSpl_LPBy2IntToInt(const int32_t* in, int32_t len, int32_t* out,
+                        int32_t* state)
+{
+    int32_t tmp0, tmp1, diff;
+    int32_t i;
+
+    len >>= 1;
+
+    // lower allpass filter: odd input -> even output samples
+    in++;
+    // initial state of polyphase delay element
+    tmp0 = state[12];
+    for (i = 0; i < len; i++)
+    {
+        diff = tmp0 - state[1];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[0] + diff * kResampleAllpass[1][0];
+        state[0] = tmp0;
+        diff = tmp1 - state[2];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[1] + diff * kResampleAllpass[1][1];
+        state[1] = tmp1;
+        diff = tmp0 - state[3];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[3] = state[2] + diff * kResampleAllpass[1][2];
+        state[2] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[3] >> 1;
+        tmp0 = in[i << 1];
+    }
+    in--;
+
+    // upper allpass filter: even input -> even output samples
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i << 1];
+        diff = tmp0 - state[5];
+        // UBSan: -794814117 - 1566149201 cannot be represented in type 'int'
+
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[4] + diff * kResampleAllpass[0][0];
+        state[4] = tmp0;
+        diff = tmp1 - state[6];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[5] + diff * kResampleAllpass[0][1];
+        state[5] = tmp1;
+        diff = tmp0 - state[7];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[7] = state[6] + diff * kResampleAllpass[0][2];
+        state[6] = tmp0;
+
+        // average the two allpass outputs, scale down and store
+        out[i << 1] = (out[i << 1] + (state[7] >> 1)) >> 15;
+    }
+
+    // switch to odd output samples
+    out++;
+
+    // lower allpass filter: even input -> odd output samples
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i << 1];
+        diff = tmp0 - state[9];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[8] + diff * kResampleAllpass[1][0];
+        state[8] = tmp0;
+        diff = tmp1 - state[10];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[9] + diff * kResampleAllpass[1][1];
+        state[9] = tmp1;
+        diff = tmp0 - state[11];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[11] = state[10] + diff * kResampleAllpass[1][2];
+        state[10] = tmp0;
+
+        // scale down, round and store
+        out[i << 1] = state[11] >> 1;
+    }
+
+    // upper allpass filter: odd input -> odd output samples
+    in++;
+    for (i = 0; i < len; i++)
+    {
+        tmp0 = in[i << 1];
+        diff = tmp0 - state[13];
+        // scale down and round
+        diff = (diff + (1 << 13)) >> 14;
+        tmp1 = state[12] + diff * kResampleAllpass[0][0];
+        state[12] = tmp0;
+        diff = tmp1 - state[14];
+        // scale down and round
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        tmp0 = state[13] + diff * kResampleAllpass[0][1];
+        state[13] = tmp1;
+        diff = tmp0 - state[15];
+        // scale down and truncate
+        diff = diff >> 14;
+        if (diff < 0)
+            diff += 1;
+        state[15] = state[14] + diff * kResampleAllpass[0][2];
+        state[14] = tmp0;
+
+        // average the two allpass outputs, scale down and store
+        out[i << 1] = (out[i << 1] + (state[15] >> 1)) >> 15;
+    }
+}
diff --git a/common_audio/signal_processing/resample_by_2_internal.h b/common_audio/signal_processing/resample_by_2_internal.h
new file mode 100644
index 0000000..b0d1969
--- /dev/null
+++ b/common_audio/signal_processing/resample_by_2_internal.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This header file contains some internal resampling functions.
+ *
+ */
+
+#ifndef COMMON_AUDIO_SIGNAL_PROCESSING_RESAMPLE_BY_2_INTERNAL_H_
+#define COMMON_AUDIO_SIGNAL_PROCESSING_RESAMPLE_BY_2_INTERNAL_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*******************************************************************
+ * resample_by_2_fast.c
+ * Functions for internal use in the other resample functions
+ ******************************************************************/
+void WebRtcSpl_DownBy2IntToShort(int32_t *in, int32_t len, int16_t *out,
+                                 int32_t *state);
+
+void WebRtcSpl_DownBy2ShortToInt(const int16_t *in, int32_t len,
+                                 int32_t *out, int32_t *state);
+
+void WebRtcSpl_UpBy2ShortToInt(const int16_t *in, int32_t len,
+                               int32_t *out, int32_t *state);
+
+void WebRtcSpl_UpBy2IntToInt(const int32_t *in, int32_t len, int32_t *out,
+                             int32_t *state);
+
+void WebRtcSpl_UpBy2IntToShort(const int32_t *in, int32_t len,
+                               int16_t *out, int32_t *state);
+
+void WebRtcSpl_LPBy2ShortToInt(const int16_t* in, int32_t len,
+                               int32_t* out, int32_t* state);
+
+void WebRtcSpl_LPBy2IntToInt(const int32_t* in, int32_t len, int32_t* out,
+                             int32_t* state);
+
+#endif  // COMMON_AUDIO_SIGNAL_PROCESSING_RESAMPLE_BY_2_INTERNAL_H_
diff --git a/common_audio/signal_processing/resample_by_2_mips.c b/common_audio/signal_processing/resample_by_2_mips.c
new file mode 100644
index 0000000..f41bab7
--- /dev/null
+++ b/common_audio/signal_processing/resample_by_2_mips.c
@@ -0,0 +1,292 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the resampling by two functions.
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#if defined(MIPS32_LE)
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#if !defined(MIPS_DSP_R2_LE)
+// allpass filter coefficients.
+static const uint16_t kResampleAllpass1[3] = {3284, 24441, 49528};
+static const uint16_t kResampleAllpass2[3] = {12199, 37471, 60255};
+#endif
+
+// Multiply a 32-bit value with a 16-bit value and accumulate to another input:
+#define MUL_ACCUM_1(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
+#define MUL_ACCUM_2(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
+
+// decimator
+void WebRtcSpl_DownsampleBy2(const int16_t* in,
+                             size_t len,
+                             int16_t* out,
+                             int32_t* filtState) {
+  int32_t out32;
+  size_t i, len1;
+
+  register int32_t state0 = filtState[0];
+  register int32_t state1 = filtState[1];
+  register int32_t state2 = filtState[2];
+  register int32_t state3 = filtState[3];
+  register int32_t state4 = filtState[4];
+  register int32_t state5 = filtState[5];
+  register int32_t state6 = filtState[6];
+  register int32_t state7 = filtState[7];
+
+#if defined(MIPS_DSP_R2_LE)
+  int32_t k1Res0, k1Res1, k1Res2, k2Res0, k2Res1, k2Res2;
+
+  k1Res0= 3284;
+  k1Res1= 24441;
+  k1Res2= 49528;
+  k2Res0= 12199;
+  k2Res1= 37471;
+  k2Res2= 60255;
+  len1 = (len >> 1);
+
+  const int32_t* inw = (int32_t*)in;
+  int32_t tmp11, tmp12, tmp21, tmp22;
+  int32_t in322, in321;
+  int32_t diff1, diff2;
+  for (i = len1; i > 0; i--) {
+    __asm__ volatile (
+      "lh         %[in321],    0(%[inw])                  \n\t"
+      "lh         %[in322],    2(%[inw])                  \n\t"
+
+      "sll        %[in321],    %[in321],      10          \n\t"
+      "sll        %[in322],    %[in322],      10          \n\t"
+
+      "addiu      %[inw],      %[inw],        4           \n\t"
+
+      "subu       %[diff1],    %[in321],      %[state1]   \n\t"
+      "subu       %[diff2],    %[in322],      %[state5]   \n\t"
+
+      : [in322] "=&r" (in322), [in321] "=&r" (in321),
+        [diff1] "=&r" (diff1), [diff2] "=r" (diff2), [inw] "+r" (inw)
+      : [state1] "r" (state1), [state5] "r" (state5)
+      : "memory"
+    );
+
+    __asm__ volatile (
+      "mult       $ac0,       %[diff1],       %[k2Res0]   \n\t"
+      "mult       $ac1,       %[diff2],       %[k1Res0]   \n\t"
+
+      "extr.w     %[tmp11],   $ac0,           16          \n\t"
+      "extr.w     %[tmp12],   $ac1,           16          \n\t"
+
+      "addu       %[tmp11],   %[state0],      %[tmp11]    \n\t"
+      "addu       %[tmp12],   %[state4],      %[tmp12]    \n\t"
+
+      "addiu      %[state0],  %[in321],       0           \n\t"
+      "addiu      %[state4],  %[in322],       0           \n\t"
+
+      "subu       %[diff1],   %[tmp11],       %[state2]   \n\t"
+      "subu       %[diff2],   %[tmp12],       %[state6]   \n\t"
+
+      "mult       $ac0,       %[diff1],       %[k2Res1]   \n\t"
+      "mult       $ac1,       %[diff2],       %[k1Res1]   \n\t"
+
+      "extr.w     %[tmp21],   $ac0,           16          \n\t"
+      "extr.w     %[tmp22],   $ac1,           16          \n\t"
+
+      "addu       %[tmp21],   %[state1],      %[tmp21]    \n\t"
+      "addu       %[tmp22],   %[state5],      %[tmp22]    \n\t"
+
+      "addiu      %[state1],  %[tmp11],       0           \n\t"
+      "addiu      %[state5],  %[tmp12],       0           \n\t"
+      : [tmp22] "=r" (tmp22), [tmp21] "=&r" (tmp21),
+        [tmp11] "=&r" (tmp11), [state0] "+r" (state0),
+        [state1] "+r" (state1),
+        [state2] "+r" (state2),
+        [state4] "+r" (state4), [tmp12] "=&r" (tmp12),
+        [state6] "+r" (state6), [state5] "+r" (state5)
+      : [k1Res1] "r" (k1Res1), [k2Res1] "r" (k2Res1), [k2Res0] "r" (k2Res0),
+        [diff2] "r" (diff2), [diff1] "r" (diff1), [in322] "r" (in322),
+        [in321] "r" (in321), [k1Res0] "r" (k1Res0)
+      : "hi", "lo", "$ac1hi", "$ac1lo"
+    );
+
+    // upper allpass filter
+    __asm__ volatile (
+      "subu       %[diff1],   %[tmp21],       %[state3]   \n\t"
+      "subu       %[diff2],   %[tmp22],       %[state7]   \n\t"
+
+      "mult       $ac0,       %[diff1],       %[k2Res2]   \n\t"
+      "mult       $ac1,       %[diff2],       %[k1Res2]   \n\t"
+      "extr.w     %[state3],  $ac0,           16          \n\t"
+      "extr.w     %[state7],  $ac1,           16          \n\t"
+      "addu       %[state3],  %[state2],      %[state3]   \n\t"
+      "addu       %[state7],  %[state6],      %[state7]   \n\t"
+
+      "addiu      %[state2],  %[tmp21],       0           \n\t"
+      "addiu      %[state6],  %[tmp22],       0           \n\t"
+
+      // add two allpass outputs, divide by two and round
+      "addu       %[out32],   %[state3],      %[state7]   \n\t"
+      "addiu      %[out32],   %[out32],       1024        \n\t"
+      "sra        %[out32],   %[out32],       11          \n\t"
+      : [state3] "+r" (state3), [state6] "+r" (state6),
+        [state2] "+r" (state2), [diff2] "=&r" (diff2),
+        [out32] "=r" (out32), [diff1] "=&r" (diff1), [state7] "+r" (state7)
+      : [tmp22] "r" (tmp22), [tmp21] "r" (tmp21),
+        [k1Res2] "r" (k1Res2), [k2Res2] "r" (k2Res2)
+      : "hi", "lo", "$ac1hi", "$ac1lo"
+    );
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+  }
+#else  // #if defined(MIPS_DSP_R2_LE)
+  int32_t tmp1, tmp2, diff;
+  int32_t in32;
+  len1 = (len >> 1)/4;
+  for (i = len1; i > 0; i--) {
+    // lower allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
+    state2 = tmp2;
+
+    // upper allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
+    state6 = tmp2;
+
+    // add two allpass outputs, divide by two and round
+    out32 = (state3 + state7 + 1024) >> 11;
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+    // lower allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
+    state2 = tmp2;
+
+    // upper allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
+    state6 = tmp2;
+
+    // add two allpass outputs, divide by two and round
+    out32 = (state3 + state7 + 1024) >> 11;
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+    // lower allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
+    state2 = tmp2;
+
+    // upper allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
+    state6 = tmp2;
+
+    // add two allpass outputs, divide by two and round
+    out32 = (state3 + state7 + 1024) >> 11;
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+    // lower allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state1;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
+    state0 = in32;
+    diff = tmp1 - state2;
+    tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
+    state1 = tmp1;
+    diff = tmp2 - state3;
+    state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
+    state2 = tmp2;
+
+    // upper allpass filter
+    in32 = (int32_t)(*in++) << 10;
+    diff = in32 - state5;
+    tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
+    state4 = in32;
+    diff = tmp1 - state6;
+    tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
+    state5 = tmp1;
+    diff = tmp2 - state7;
+    state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
+    state6 = tmp2;
+
+    // add two allpass outputs, divide by two and round
+    out32 = (state3 + state7 + 1024) >> 11;
+
+    // limit amplitude to prevent wrap-around, and write to output array
+    *out++ = WebRtcSpl_SatW32ToW16(out32);
+  }
+#endif  // #if defined(MIPS_DSP_R2_LE)
+  __asm__ volatile (
+    "sw       %[state0],      0(%[filtState])     \n\t"
+    "sw       %[state1],      4(%[filtState])     \n\t"
+    "sw       %[state2],      8(%[filtState])     \n\t"
+    "sw       %[state3],      12(%[filtState])    \n\t"
+    "sw       %[state4],      16(%[filtState])    \n\t"
+    "sw       %[state5],      20(%[filtState])    \n\t"
+    "sw       %[state6],      24(%[filtState])    \n\t"
+    "sw       %[state7],      28(%[filtState])    \n\t"
+    :
+    : [state0] "r" (state0), [state1] "r" (state1), [state2] "r" (state2),
+      [state3] "r" (state3), [state4] "r" (state4), [state5] "r" (state5),
+      [state6] "r" (state6), [state7] "r" (state7), [filtState] "r" (filtState)
+    : "memory"
+  );
+}
+
+#endif  // #if defined(MIPS32_LE)
diff --git a/common_audio/signal_processing/resample_fractional.c b/common_audio/signal_processing/resample_fractional.c
new file mode 100644
index 0000000..9ffe0ac
--- /dev/null
+++ b/common_audio/signal_processing/resample_fractional.c
@@ -0,0 +1,239 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the resampling functions between 48, 44, 32 and 24 kHz.
+ * The description headers can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// interpolation coefficients
+static const int16_t kCoefficients48To32[2][8] = {
+        {778, -2050, 1087, 23285, 12903, -3783, 441, 222},
+        {222, 441, -3783, 12903, 23285, 1087, -2050, 778}
+};
+
+static const int16_t kCoefficients32To24[3][8] = {
+        {767, -2362, 2434, 24406, 10620, -3838, 721, 90},
+        {386, -381, -2646, 19062, 19062, -2646, -381, 386},
+        {90, 721, -3838, 10620, 24406, 2434, -2362, 767}
+};
+
+static const int16_t kCoefficients44To32[4][9] = {
+        {117, -669, 2245, -6183, 26267, 13529, -3245, 845, -138},
+        {-101, 612, -2283, 8532, 29790, -5138, 1789, -524, 91},
+        {50, -292, 1016, -3064, 32010, 3933, -1147, 315, -53},
+        {-156, 974, -3863, 18603, 21691, -6246, 2353, -712, 126}
+};
+
+//   Resampling ratio: 2/3
+// input:  int32_t (normalized, not saturated) :: size 3 * K
+// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 2 * K
+//      K: number of blocks
+
+void WebRtcSpl_Resample48khzTo32khz(const int32_t *In, int32_t *Out, size_t K)
+{
+    /////////////////////////////////////////////////////////////
+    // Filter operation:
+    //
+    // Perform resampling (3 input samples -> 2 output samples);
+    // process in sub blocks of size 3 samples.
+    int32_t tmp;
+    size_t m;
+
+    for (m = 0; m < K; m++)
+    {
+        tmp = 1 << 14;
+        tmp += kCoefficients48To32[0][0] * In[0];
+        tmp += kCoefficients48To32[0][1] * In[1];
+        tmp += kCoefficients48To32[0][2] * In[2];
+        tmp += kCoefficients48To32[0][3] * In[3];
+        tmp += kCoefficients48To32[0][4] * In[4];
+        tmp += kCoefficients48To32[0][5] * In[5];
+        tmp += kCoefficients48To32[0][6] * In[6];
+        tmp += kCoefficients48To32[0][7] * In[7];
+        Out[0] = tmp;
+
+        tmp = 1 << 14;
+        tmp += kCoefficients48To32[1][0] * In[1];
+        tmp += kCoefficients48To32[1][1] * In[2];
+        tmp += kCoefficients48To32[1][2] * In[3];
+        tmp += kCoefficients48To32[1][3] * In[4];
+        tmp += kCoefficients48To32[1][4] * In[5];
+        tmp += kCoefficients48To32[1][5] * In[6];
+        tmp += kCoefficients48To32[1][6] * In[7];
+        tmp += kCoefficients48To32[1][7] * In[8];
+        Out[1] = tmp;
+
+        // update pointers
+        In += 3;
+        Out += 2;
+    }
+}
+
+//   Resampling ratio: 3/4
+// input:  int32_t (normalized, not saturated) :: size 4 * K
+// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size 3 * K
+//      K: number of blocks
+
+void WebRtcSpl_Resample32khzTo24khz(const int32_t *In, int32_t *Out, size_t K)
+{
+    /////////////////////////////////////////////////////////////
+    // Filter operation:
+    //
+    // Perform resampling (4 input samples -> 3 output samples);
+    // process in sub blocks of size 4 samples.
+    size_t m;
+    int32_t tmp;
+
+    for (m = 0; m < K; m++)
+    {
+        tmp = 1 << 14;
+        tmp += kCoefficients32To24[0][0] * In[0];
+        tmp += kCoefficients32To24[0][1] * In[1];
+        tmp += kCoefficients32To24[0][2] * In[2];
+        tmp += kCoefficients32To24[0][3] * In[3];
+        tmp += kCoefficients32To24[0][4] * In[4];
+        tmp += kCoefficients32To24[0][5] * In[5];
+        tmp += kCoefficients32To24[0][6] * In[6];
+        tmp += kCoefficients32To24[0][7] * In[7];
+        Out[0] = tmp;
+
+        tmp = 1 << 14;
+        tmp += kCoefficients32To24[1][0] * In[1];
+        tmp += kCoefficients32To24[1][1] * In[2];
+        tmp += kCoefficients32To24[1][2] * In[3];
+        tmp += kCoefficients32To24[1][3] * In[4];
+        tmp += kCoefficients32To24[1][4] * In[5];
+        tmp += kCoefficients32To24[1][5] * In[6];
+        tmp += kCoefficients32To24[1][6] * In[7];
+        tmp += kCoefficients32To24[1][7] * In[8];
+        Out[1] = tmp;
+
+        tmp = 1 << 14;
+        tmp += kCoefficients32To24[2][0] * In[2];
+        tmp += kCoefficients32To24[2][1] * In[3];
+        tmp += kCoefficients32To24[2][2] * In[4];
+        tmp += kCoefficients32To24[2][3] * In[5];
+        tmp += kCoefficients32To24[2][4] * In[6];
+        tmp += kCoefficients32To24[2][5] * In[7];
+        tmp += kCoefficients32To24[2][6] * In[8];
+        tmp += kCoefficients32To24[2][7] * In[9];
+        Out[2] = tmp;
+
+        // update pointers
+        In += 4;
+        Out += 3;
+    }
+}
+
+//
+// fractional resampling filters
+//   Fout = 11/16 * Fin
+//   Fout =  8/11 * Fin
+//
+
+// compute two inner-products and store them to output array
+static void WebRtcSpl_ResampDotProduct(const int32_t *in1, const int32_t *in2,
+                                       const int16_t *coef_ptr, int32_t *out1,
+                                       int32_t *out2)
+{
+    int32_t tmp1 = 16384;
+    int32_t tmp2 = 16384;
+    int16_t coef;
+
+    coef = coef_ptr[0];
+    tmp1 += coef * in1[0];
+    tmp2 += coef * in2[-0];
+
+    coef = coef_ptr[1];
+    tmp1 += coef * in1[1];
+    tmp2 += coef * in2[-1];
+
+    coef = coef_ptr[2];
+    tmp1 += coef * in1[2];
+    tmp2 += coef * in2[-2];
+
+    coef = coef_ptr[3];
+    tmp1 += coef * in1[3];
+    tmp2 += coef * in2[-3];
+
+    coef = coef_ptr[4];
+    tmp1 += coef * in1[4];
+    tmp2 += coef * in2[-4];
+
+    coef = coef_ptr[5];
+    tmp1 += coef * in1[5];
+    tmp2 += coef * in2[-5];
+
+    coef = coef_ptr[6];
+    tmp1 += coef * in1[6];
+    tmp2 += coef * in2[-6];
+
+    coef = coef_ptr[7];
+    tmp1 += coef * in1[7];
+    tmp2 += coef * in2[-7];
+
+    coef = coef_ptr[8];
+    *out1 = tmp1 + coef * in1[8];
+    *out2 = tmp2 + coef * in2[-8];
+}
+
+//   Resampling ratio: 8/11
+// input:  int32_t (normalized, not saturated) :: size 11 * K
+// output: int32_t (shifted 15 positions to the left, + offset 16384) :: size  8 * K
+//      K: number of blocks
+
+void WebRtcSpl_Resample44khzTo32khz(const int32_t *In, int32_t *Out, size_t K)
+{
+    /////////////////////////////////////////////////////////////
+    // Filter operation:
+    //
+    // Perform resampling (11 input samples -> 8 output samples);
+    // process in sub blocks of size 11 samples.
+    int32_t tmp;
+    size_t m;
+
+    for (m = 0; m < K; m++)
+    {
+        tmp = 1 << 14;
+
+        // first output sample
+        Out[0] = ((int32_t)In[3] << 15) + tmp;
+
+        // sum and accumulate filter coefficients and input samples
+        tmp += kCoefficients44To32[3][0] * In[5];
+        tmp += kCoefficients44To32[3][1] * In[6];
+        tmp += kCoefficients44To32[3][2] * In[7];
+        tmp += kCoefficients44To32[3][3] * In[8];
+        tmp += kCoefficients44To32[3][4] * In[9];
+        tmp += kCoefficients44To32[3][5] * In[10];
+        tmp += kCoefficients44To32[3][6] * In[11];
+        tmp += kCoefficients44To32[3][7] * In[12];
+        tmp += kCoefficients44To32[3][8] * In[13];
+        Out[4] = tmp;
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_ResampDotProduct(&In[0], &In[17], kCoefficients44To32[0], &Out[1], &Out[7]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_ResampDotProduct(&In[2], &In[15], kCoefficients44To32[1], &Out[2], &Out[6]);
+
+        // sum and accumulate filter coefficients and input samples
+        WebRtcSpl_ResampDotProduct(&In[3], &In[14], kCoefficients44To32[2], &Out[3], &Out[5]);
+
+        // update pointers
+        In += 11;
+        Out += 8;
+    }
+}
diff --git a/common_audio/signal_processing/signal_processing_unittest.cc b/common_audio/signal_processing/signal_processing_unittest.cc
new file mode 100644
index 0000000..b9efe01
--- /dev/null
+++ b/common_audio/signal_processing/signal_processing_unittest.cc
@@ -0,0 +1,608 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <sstream>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "test/gtest.h"
+
+static const size_t kVector16Size = 9;
+static const int16_t vector16[kVector16Size] = {1, -15511, 4323, 1963,
+  WEBRTC_SPL_WORD16_MAX, 0, WEBRTC_SPL_WORD16_MIN + 5, -3333, 345};
+
+class SplTest : public testing::Test {
+ protected:
+  SplTest() {
+    WebRtcSpl_Init();
+  }
+  virtual ~SplTest() {
+  }
+};
+
+TEST_F(SplTest, MacroTest) {
+    // Macros with inputs.
+    int A = 10;
+    int B = 21;
+    int a = -3;
+    int b = WEBRTC_SPL_WORD32_MAX;
+
+    EXPECT_EQ(10, WEBRTC_SPL_MIN(A, B));
+    EXPECT_EQ(21, WEBRTC_SPL_MAX(A, B));
+
+    EXPECT_EQ(3, WEBRTC_SPL_ABS_W16(a));
+    EXPECT_EQ(3, WEBRTC_SPL_ABS_W32(a));
+
+    EXPECT_EQ(-63, WEBRTC_SPL_MUL(a, B));
+    EXPECT_EQ(2147483651u, WEBRTC_SPL_UMUL(a, b));
+    b = WEBRTC_SPL_WORD16_MAX >> 1;
+    EXPECT_EQ(4294918147u, WEBRTC_SPL_UMUL_32_16(a, b));
+    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_16_U16(a, b));
+
+    a = b;
+    b = -3;
+
+    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_32_RSFT16(a, b));
+    EXPECT_EQ(-1, WEBRTC_SPL_MUL_16_32_RSFT15(a, b));
+    EXPECT_EQ(-3, WEBRTC_SPL_MUL_16_32_RSFT14(a, b));
+    EXPECT_EQ(-24, WEBRTC_SPL_MUL_16_32_RSFT11(a, b));
+
+    EXPECT_EQ(-12288, WEBRTC_SPL_MUL_16_16_RSFT(a, b, 2));
+    EXPECT_EQ(-12287, WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(a, b, 2));
+
+    EXPECT_EQ(21, WEBRTC_SPL_SAT(a, A, B));
+    EXPECT_EQ(21, WEBRTC_SPL_SAT(a, B, A));
+
+    // Shifting with negative numbers allowed
+    int shift_amount = 1;  // Workaround compiler warning using variable here.
+    // Positive means left shift
+    EXPECT_EQ(32766, WEBRTC_SPL_SHIFT_W32(a, shift_amount));
+
+    // Shifting with negative numbers not allowed
+    // We cannot do casting here due to signed/unsigned problem
+    EXPECT_EQ(32766, WEBRTC_SPL_LSHIFT_W32(a, 1));
+
+    EXPECT_EQ(8191u, WEBRTC_SPL_RSHIFT_U32(a, 1));
+
+    EXPECT_EQ(1470, WEBRTC_SPL_RAND(A));
+
+    EXPECT_EQ(-49149, WEBRTC_SPL_MUL_16_16(a, b));
+    EXPECT_EQ(1073676289, WEBRTC_SPL_MUL_16_16(WEBRTC_SPL_WORD16_MAX,
+                                               WEBRTC_SPL_WORD16_MAX));
+    EXPECT_EQ(1073709055, WEBRTC_SPL_MUL_16_32_RSFT16(WEBRTC_SPL_WORD16_MAX,
+                                                      WEBRTC_SPL_WORD32_MAX));
+    EXPECT_EQ(1073741824, WEBRTC_SPL_MUL_16_32_RSFT16(WEBRTC_SPL_WORD16_MIN,
+                                                      WEBRTC_SPL_WORD32_MIN));
+#ifdef WEBRTC_ARCH_ARM_V7
+    EXPECT_EQ(-1073741824,
+              WEBRTC_SPL_MUL_16_32_RSFT16(WEBRTC_SPL_WORD16_MIN,
+                                          WEBRTC_SPL_WORD32_MAX));
+#else
+    EXPECT_EQ(-1073741823,
+              WEBRTC_SPL_MUL_16_32_RSFT16(WEBRTC_SPL_WORD16_MIN,
+                                          WEBRTC_SPL_WORD32_MAX));
+#endif
+}
+
+TEST_F(SplTest, InlineTest) {
+    int16_t a16 = 121;
+    int16_t b16 = -17;
+    int32_t a32 = 111121;
+    int32_t b32 = -1711;
+
+    EXPECT_EQ(17, WebRtcSpl_GetSizeInBits(a32));
+
+    EXPECT_EQ(0, WebRtcSpl_NormW32(0));
+    EXPECT_EQ(31, WebRtcSpl_NormW32(-1));
+    EXPECT_EQ(0, WebRtcSpl_NormW32(WEBRTC_SPL_WORD32_MIN));
+    EXPECT_EQ(14, WebRtcSpl_NormW32(a32));
+
+    EXPECT_EQ(0, WebRtcSpl_NormW16(0));
+    EXPECT_EQ(15, WebRtcSpl_NormW16(-1));
+    EXPECT_EQ(0, WebRtcSpl_NormW16(WEBRTC_SPL_WORD16_MIN));
+    EXPECT_EQ(4, WebRtcSpl_NormW16(b32));
+    for (int ii = 0; ii < 15; ++ii) {
+      int16_t value = 1 << ii;
+      EXPECT_EQ(14 - ii, WebRtcSpl_NormW16(value));
+      EXPECT_EQ(15 - ii, WebRtcSpl_NormW16(-value));
+    }
+
+    EXPECT_EQ(0, WebRtcSpl_NormU32(0u));
+    EXPECT_EQ(0, WebRtcSpl_NormU32(0xffffffff));
+    EXPECT_EQ(15, WebRtcSpl_NormU32(static_cast<uint32_t>(a32)));
+
+    EXPECT_EQ(104, WebRtcSpl_AddSatW16(a16, b16));
+    EXPECT_EQ(138, WebRtcSpl_SubSatW16(a16, b16));
+}
+
+TEST_F(SplTest, AddSubSatW32) {
+  static constexpr int32_t kAddSubArgs[] = {
+      INT32_MIN, INT32_MIN + 1, -3,       -2, -1, 0, 1, -1, 2,
+      3,         INT32_MAX - 1, INT32_MAX};
+  for (int32_t a : kAddSubArgs) {
+    for (int32_t b : kAddSubArgs) {
+      const int64_t sum = std::max<int64_t>(
+          INT32_MIN, std::min<int64_t>(INT32_MAX, static_cast<int64_t>(a) + b));
+      const int64_t diff = std::max<int64_t>(
+          INT32_MIN, std::min<int64_t>(INT32_MAX, static_cast<int64_t>(a) - b));
+      std::ostringstream ss;
+      ss << a << " +/- " << b << ": sum " << sum << ", diff " << diff;
+      SCOPED_TRACE(ss.str());
+      EXPECT_EQ(sum, WebRtcSpl_AddSatW32(a, b));
+      EXPECT_EQ(diff, WebRtcSpl_SubSatW32(a, b));
+    }
+  }
+}
+
+TEST_F(SplTest, CountLeadingZeros32) {
+  EXPECT_EQ(32, WebRtcSpl_CountLeadingZeros32(0));
+  EXPECT_EQ(32, WebRtcSpl_CountLeadingZeros32_NotBuiltin(0));
+  for (int i = 0; i < 32; ++i) {
+    const uint32_t single_one = uint32_t{1} << i;
+    const uint32_t all_ones = 2 * single_one - 1;
+    EXPECT_EQ(31 - i, WebRtcSpl_CountLeadingZeros32(single_one));
+    EXPECT_EQ(31 - i, WebRtcSpl_CountLeadingZeros32_NotBuiltin(single_one));
+    EXPECT_EQ(31 - i, WebRtcSpl_CountLeadingZeros32(all_ones));
+    EXPECT_EQ(31 - i, WebRtcSpl_CountLeadingZeros32_NotBuiltin(all_ones));
+  }
+}
+
+TEST_F(SplTest, CountLeadingZeros64) {
+  EXPECT_EQ(64, WebRtcSpl_CountLeadingZeros64(0));
+  EXPECT_EQ(64, WebRtcSpl_CountLeadingZeros64_NotBuiltin(0));
+  for (int i = 0; i < 64; ++i) {
+    const uint64_t single_one = uint64_t{1} << i;
+    const uint64_t all_ones = 2 * single_one - 1;
+    EXPECT_EQ(63 - i, WebRtcSpl_CountLeadingZeros64(single_one));
+    EXPECT_EQ(63 - i, WebRtcSpl_CountLeadingZeros64_NotBuiltin(single_one));
+    EXPECT_EQ(63 - i, WebRtcSpl_CountLeadingZeros64(all_ones));
+    EXPECT_EQ(63 - i, WebRtcSpl_CountLeadingZeros64_NotBuiltin(all_ones));
+  }
+}
+
+TEST_F(SplTest, MathOperationsTest) {
+    int A = 1134567892;
+    int32_t num = 117;
+    int32_t den = -5;
+    uint16_t denU = 5;
+    EXPECT_EQ(33700, WebRtcSpl_Sqrt(A));
+    EXPECT_EQ(33683, WebRtcSpl_SqrtFloor(A));
+
+
+    EXPECT_EQ(-91772805, WebRtcSpl_DivResultInQ31(den, num));
+    EXPECT_EQ(-23, WebRtcSpl_DivW32W16ResW16(num, (int16_t)den));
+    EXPECT_EQ(-23, WebRtcSpl_DivW32W16(num, (int16_t)den));
+    EXPECT_EQ(23u, WebRtcSpl_DivU32U16(num, denU));
+    EXPECT_EQ(0, WebRtcSpl_DivW32HiLow(128, 0, 256));
+}
+
+TEST_F(SplTest, BasicArrayOperationsTest) {
+    const size_t kVectorSize = 4;
+    int B[] = {4, 12, 133, 1100};
+    int16_t b16[kVectorSize];
+    int32_t b32[kVectorSize];
+
+    int16_t bTmp16[kVectorSize];
+    int32_t bTmp32[kVectorSize];
+
+    WebRtcSpl_MemSetW16(b16, 3, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(3, b16[kk]);
+    }
+    WebRtcSpl_ZerosArrayW16(b16, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(0, b16[kk]);
+    }
+    WebRtcSpl_MemSetW32(b32, 3, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(3, b32[kk]);
+    }
+    WebRtcSpl_ZerosArrayW32(b32, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(0, b32[kk]);
+    }
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        bTmp16[kk] = (int16_t)kk;
+        bTmp32[kk] = (int32_t)kk;
+    }
+    WEBRTC_SPL_MEMCPY_W16(b16, bTmp16, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(b16[kk], bTmp16[kk]);
+    }
+//    WEBRTC_SPL_MEMCPY_W32(b32, bTmp32, kVectorSize);
+//    for (int kk = 0; kk < kVectorSize; ++kk) {
+//        EXPECT_EQ(b32[kk], bTmp32[kk]);
+//    }
+    WebRtcSpl_CopyFromEndW16(b16, kVectorSize, 2, bTmp16);
+    for (size_t kk = 0; kk < 2; ++kk) {
+        EXPECT_EQ(static_cast<int16_t>(kk+2), bTmp16[kk]);
+    }
+
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        b32[kk] = B[kk];
+        b16[kk] = (int16_t)B[kk];
+    }
+    WebRtcSpl_VectorBitShiftW32ToW16(bTmp16, kVectorSize, b32, 1);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
+    }
+    WebRtcSpl_VectorBitShiftW16(bTmp16, kVectorSize, b16, 1);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]>>1), bTmp16[kk]);
+    }
+    WebRtcSpl_VectorBitShiftW32(bTmp32, kVectorSize, b32, 1);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]>>1), bTmp32[kk]);
+    }
+
+    WebRtcSpl_MemCpyReversedOrder(&bTmp16[3], b16, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(b16[3-kk], bTmp16[kk]);
+    }
+}
+
+TEST_F(SplTest, MinMaxOperationsTest) {
+  const size_t kVectorSize = 17;
+
+  // Vectors to test the cases where minimum values have to be caught
+  // outside of the unrolled loops in ARM-Neon.
+  int16_t vector16[kVectorSize] = {-1, 7485, 0, 3333,
+      -18283, 0, 12334, -29871, 988, -3333,
+      345, -456, 222, 999,  888, 8774, WEBRTC_SPL_WORD16_MIN};
+  int32_t vector32[kVectorSize] = {-1, 0, 283211, 3333,
+      8712345, 0, -3333, 89345, -374585456, 222, 999, 122345334,
+      -12389756, -987329871, 888, -2, WEBRTC_SPL_WORD32_MIN};
+
+  EXPECT_EQ(WEBRTC_SPL_WORD16_MIN,
+            WebRtcSpl_MinValueW16(vector16, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD32_MIN,
+            WebRtcSpl_MinValueW32(vector32, kVectorSize));
+  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MinIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MinIndexW32(vector32, kVectorSize));
+
+  // Test the cases where maximum values have to be caught
+  // outside of the unrolled loops in ARM-Neon.
+  vector16[kVectorSize - 1] = WEBRTC_SPL_WORD16_MAX;
+  vector32[kVectorSize - 1] = WEBRTC_SPL_WORD32_MAX;
+
+  EXPECT_EQ(WEBRTC_SPL_WORD16_MAX,
+            WebRtcSpl_MaxAbsValueW16(vector16, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD16_MAX,
+            WebRtcSpl_MaxValueW16(vector16, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD32_MAX,
+            WebRtcSpl_MaxAbsValueW32(vector32, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD32_MAX,
+            WebRtcSpl_MaxValueW32(vector32, kVectorSize));
+  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxAbsIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxIndexW32(vector32, kVectorSize));
+
+  // Test the cases where multiple maximum and minimum values are present.
+  vector16[1] = WEBRTC_SPL_WORD16_MAX;
+  vector16[6] = WEBRTC_SPL_WORD16_MIN;
+  vector16[11] = WEBRTC_SPL_WORD16_MIN;
+  vector32[1] = WEBRTC_SPL_WORD32_MAX;
+  vector32[6] = WEBRTC_SPL_WORD32_MIN;
+  vector32[11] = WEBRTC_SPL_WORD32_MIN;
+
+  EXPECT_EQ(WEBRTC_SPL_WORD16_MAX,
+            WebRtcSpl_MaxAbsValueW16(vector16, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD16_MAX,
+            WebRtcSpl_MaxValueW16(vector16, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD16_MIN,
+            WebRtcSpl_MinValueW16(vector16, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD32_MAX,
+            WebRtcSpl_MaxAbsValueW32(vector32, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD32_MAX,
+            WebRtcSpl_MaxValueW32(vector32, kVectorSize));
+  EXPECT_EQ(WEBRTC_SPL_WORD32_MIN,
+            WebRtcSpl_MinValueW32(vector32, kVectorSize));
+  EXPECT_EQ(6u, WebRtcSpl_MaxAbsIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(1u, WebRtcSpl_MaxIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(1u, WebRtcSpl_MaxIndexW32(vector32, kVectorSize));
+  EXPECT_EQ(6u, WebRtcSpl_MinIndexW16(vector16, kVectorSize));
+  EXPECT_EQ(6u, WebRtcSpl_MinIndexW32(vector32, kVectorSize));
+}
+
+TEST_F(SplTest, VectorOperationsTest) {
+    const size_t kVectorSize = 4;
+    int B[] = {4, 12, 133, 1100};
+    int16_t a16[kVectorSize];
+    int16_t b16[kVectorSize];
+    int16_t bTmp16[kVectorSize];
+
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        a16[kk] = B[kk];
+        b16[kk] = B[kk];
+    }
+
+    WebRtcSpl_AffineTransformVector(bTmp16, b16, 3, 7, 2, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]*3+7)>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ScaleAndAddVectorsWithRound(b16, 3, b16, 2, 2, bTmp16,
+                                          kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((B[kk]*3+B[kk]*2+2)>>2, bTmp16[kk]);
+    }
+
+    WebRtcSpl_AddAffineVectorToVector(bTmp16, b16, 3, 7, 2, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(((B[kk]*3+B[kk]*2+2)>>2)+((b16[kk]*3+7)>>2), bTmp16[kk]);
+    }
+
+    WebRtcSpl_ScaleVector(b16, bTmp16, 13, kVectorSize, 2);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ScaleVectorWithSat(b16, bTmp16, 13, kVectorSize, 2);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((b16[kk]*13)>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ScaleAndAddVectors(a16, 13, 2, b16, 7, 2, bTmp16, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(((a16[kk]*13)>>2)+((b16[kk]*7)>>2), bTmp16[kk]);
+    }
+
+    WebRtcSpl_AddVectorsAndShift(bTmp16, a16, b16, kVectorSize, 2);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(B[kk] >> 1, bTmp16[kk]);
+    }
+    WebRtcSpl_ReverseOrderMultArrayElements(bTmp16, a16, &b16[3],
+                                            kVectorSize, 2);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((a16[kk]*b16[3-kk])>>2, bTmp16[kk]);
+    }
+    WebRtcSpl_ElementwiseVectorMult(bTmp16, a16, b16, kVectorSize, 6);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ((a16[kk]*b16[kk])>>6, bTmp16[kk]);
+    }
+
+    WebRtcSpl_SqrtOfOneMinusXSquared(b16, kVectorSize, bTmp16);
+    for (size_t kk = 0; kk < kVectorSize - 1; ++kk) {
+        EXPECT_EQ(32767, bTmp16[kk]);
+    }
+    EXPECT_EQ(32749, bTmp16[kVectorSize - 1]);
+
+    EXPECT_EQ(0, WebRtcSpl_GetScalingSquare(b16, kVectorSize, 1));
+}
+
+TEST_F(SplTest, EstimatorsTest) {
+  const size_t kOrder = 2;
+  const int32_t unstable_filter[] = { 4, 12, 133, 1100 };
+  const int32_t stable_filter[] = { 1100, 133, 12, 4 };
+  int16_t lpc[kOrder + 2] = { 0 };
+  int16_t refl[kOrder + 2] = { 0 };
+  int16_t lpc_result[] = { 4096, -497, 15, 0 };
+  int16_t refl_result[] = { -3962, 123, 0, 0 };
+
+  EXPECT_EQ(0, WebRtcSpl_LevinsonDurbin(unstable_filter, lpc, refl, kOrder));
+  EXPECT_EQ(1, WebRtcSpl_LevinsonDurbin(stable_filter, lpc, refl, kOrder));
+  for (size_t i = 0; i < kOrder + 2; ++i) {
+    EXPECT_EQ(lpc_result[i], lpc[i]);
+    EXPECT_EQ(refl_result[i], refl[i]);
+  }
+}
+
+TEST_F(SplTest, FilterTest) {
+    const size_t kVectorSize = 4;
+    const size_t kFilterOrder = 3;
+    int16_t A[] = {1, 2, 33, 100};
+    int16_t A5[] = {1, 2, 33, 100, -5};
+    int16_t B[] = {4, 12, 133, 110};
+    int16_t data_in[kVectorSize];
+    int16_t data_out[kVectorSize];
+    int16_t bTmp16Low[kVectorSize];
+    int16_t bState[kVectorSize];
+    int16_t bStateLow[kVectorSize];
+
+    WebRtcSpl_ZerosArrayW16(bState, kVectorSize);
+    WebRtcSpl_ZerosArrayW16(bStateLow, kVectorSize);
+
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        data_in[kk] = A[kk];
+        data_out[kk] = 0;
+    }
+
+    // MA filters.
+    // Note that the input data has |kFilterOrder| states before the actual
+    // data (one sample).
+    WebRtcSpl_FilterMAFastQ12(&data_in[kFilterOrder], data_out, B,
+                              kFilterOrder + 1, 1);
+    EXPECT_EQ(0, data_out[0]);
+    // AR filters.
+    // Note that the output data has |kFilterOrder| states before the actual
+    // data (one sample).
+    WebRtcSpl_FilterARFastQ12(data_in, &data_out[kFilterOrder], A,
+                              kFilterOrder + 1, 1);
+    EXPECT_EQ(0, data_out[kFilterOrder]);
+
+    EXPECT_EQ(kVectorSize, WebRtcSpl_FilterAR(A5,
+                                              5,
+                                              data_in,
+                                              kVectorSize,
+                                              bState,
+                                              kVectorSize,
+                                              bStateLow,
+                                              kVectorSize,
+                                              data_out,
+                                              bTmp16Low,
+                                              kVectorSize));
+}
+
+TEST_F(SplTest, RandTest) {
+    const int kVectorSize = 4;
+    int16_t BU[] = {3653, 12446, 8525, 30691};
+    int16_t b16[kVectorSize];
+    uint32_t bSeed = 100000;
+
+    EXPECT_EQ(7086, WebRtcSpl_RandU(&bSeed));
+    EXPECT_EQ(31565, WebRtcSpl_RandU(&bSeed));
+    EXPECT_EQ(-9786, WebRtcSpl_RandN(&bSeed));
+    EXPECT_EQ(kVectorSize, WebRtcSpl_RandUArray(b16, kVectorSize, &bSeed));
+    for (int kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(BU[kk], b16[kk]);
+    }
+}
+
+TEST_F(SplTest, DotProductWithScaleTest) {
+  EXPECT_EQ(605362796, WebRtcSpl_DotProductWithScale(vector16,
+      vector16, kVector16Size, 2));
+}
+
+TEST_F(SplTest, CrossCorrelationTest) {
+  // Note the function arguments relation specificed by API.
+  const size_t kCrossCorrelationDimension = 3;
+  const int kShift = 2;
+  const int kStep = 1;
+  const size_t kSeqDimension = 6;
+
+  const int16_t kVector16[kVector16Size] = {1, 4323, 1963,
+    WEBRTC_SPL_WORD16_MAX, WEBRTC_SPL_WORD16_MIN + 5, -3333, -876, 8483, 142};
+  int32_t vector32[kCrossCorrelationDimension] = {0};
+
+  WebRtcSpl_CrossCorrelation(vector32, vector16, kVector16, kSeqDimension,
+                             kCrossCorrelationDimension, kShift, kStep);
+
+  // WebRtcSpl_CrossCorrelationC() and WebRtcSpl_CrossCorrelationNeon()
+  // are not bit-exact.
+  const int32_t kExpected[kCrossCorrelationDimension] =
+      {-266947903, -15579555, -171282001};
+  const int32_t* expected = kExpected;
+#if !defined(MIPS32_LE)
+  const int32_t kExpectedNeon[kCrossCorrelationDimension] =
+      {-266947901, -15579553, -171281999};
+  if (WebRtcSpl_CrossCorrelation != WebRtcSpl_CrossCorrelationC) {
+    expected = kExpectedNeon;
+  }
+#endif
+  for (size_t i = 0; i < kCrossCorrelationDimension; ++i) {
+    EXPECT_EQ(expected[i], vector32[i]);
+  }
+}
+
+TEST_F(SplTest, AutoCorrelationTest) {
+  int scale = 0;
+  int32_t vector32[kVector16Size];
+  const int32_t expected[kVector16Size] = {302681398, 14223410, -121705063,
+    -85221647, -17104971, 61806945, 6644603, -669329, 43};
+
+  EXPECT_EQ(kVector16Size,
+            WebRtcSpl_AutoCorrelation(vector16, kVector16Size,
+                                      kVector16Size - 1, vector32, &scale));
+  EXPECT_EQ(3, scale);
+  for (size_t i = 0; i < kVector16Size; ++i) {
+    EXPECT_EQ(expected[i], vector32[i]);
+  }
+}
+
+TEST_F(SplTest, SignalProcessingTest) {
+    const size_t kVectorSize = 4;
+    int A[] = {1, 2, 33, 100};
+    const int16_t kHanning[4] = { 2399, 8192, 13985, 16384 };
+    int16_t b16[kVectorSize];
+
+    int16_t bTmp16[kVectorSize];
+
+    int bScale = 0;
+
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = A[kk];
+    }
+
+    // TODO(bjornv): Activate the Reflection Coefficient tests when refactoring.
+//    WebRtcSpl_ReflCoefToLpc(b16, kVectorSize, bTmp16);
+////    for (int kk = 0; kk < kVectorSize; ++kk) {
+////        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+////    }
+//    WebRtcSpl_LpcToReflCoef(bTmp16, kVectorSize, b16);
+////    for (int kk = 0; kk < kVectorSize; ++kk) {
+////        EXPECT_EQ(a16[kk], b16[kk]);
+////    }
+//    WebRtcSpl_AutoCorrToReflCoef(b32, kVectorSize, bTmp16);
+////    for (int kk = 0; kk < kVectorSize; ++kk) {
+////        EXPECT_EQ(aTmp16[kk], bTmp16[kk]);
+////    }
+
+    WebRtcSpl_GetHanningWindow(bTmp16, kVectorSize);
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        EXPECT_EQ(kHanning[kk], bTmp16[kk]);
+    }
+
+    for (size_t kk = 0; kk < kVectorSize; ++kk) {
+        b16[kk] = A[kk];
+    }
+    EXPECT_EQ(11094 , WebRtcSpl_Energy(b16, kVectorSize, &bScale));
+    EXPECT_EQ(0, bScale);
+}
+
+TEST_F(SplTest, FFTTest) {
+    int16_t B[] = {1, 2, 33, 100,
+            2, 3, 34, 101,
+            3, 4, 35, 102,
+            4, 5, 36, 103};
+
+    EXPECT_EQ(0, WebRtcSpl_ComplexFFT(B, 3, 1));
+//    for (int kk = 0; kk < 16; ++kk) {
+//        EXPECT_EQ(A[kk], B[kk]);
+//    }
+    EXPECT_EQ(0, WebRtcSpl_ComplexIFFT(B, 3, 1));
+//    for (int kk = 0; kk < 16; ++kk) {
+//        EXPECT_EQ(A[kk], B[kk]);
+//    }
+    WebRtcSpl_ComplexBitReverse(B, 3);
+    for (int kk = 0; kk < 16; ++kk) {
+//      EXPECT_EQ(A[kk], B[kk]);
+    }
+}
+
+TEST_F(SplTest, Resample48WithSaturationTest) {
+  // The test resamples 3*kBlockSize number of samples to 2*kBlockSize number
+  // of samples.
+  const size_t kBlockSize = 16;
+
+  // Saturated input vector of 48 samples.
+  const int32_t kVectorSaturated[3 * kBlockSize + 7] = {
+     -32768, -32768, -32768, -32768, -32768, -32768, -32768, -32768,
+     -32768, -32768, -32768, -32768, -32768, -32768, -32768, -32768,
+     -32768, -32768, -32768, -32768, -32768, -32768, -32768, -32768,
+     32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767,
+     32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767,
+     32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767,
+     32767, 32767, 32767, 32767, 32767, 32767, 32767
+  };
+
+  // All values in |out_vector| should be |kRefValue32kHz|.
+  const int32_t kRefValue32kHz1 = -1077493760;
+  const int32_t kRefValue32kHz2 = 1077493645;
+
+  // After bit shift with saturation, |out_vector_w16| is saturated.
+
+  const int16_t kRefValue16kHz1 = -32768;
+  const int16_t kRefValue16kHz2 = 32767;
+  // Vector for storing output.
+  int32_t out_vector[2 * kBlockSize];
+  int16_t out_vector_w16[2 * kBlockSize];
+
+  WebRtcSpl_Resample48khzTo32khz(kVectorSaturated, out_vector, kBlockSize);
+  WebRtcSpl_VectorBitShiftW32ToW16(out_vector_w16, 2 * kBlockSize, out_vector,
+                                   15);
+
+  // Comparing output values against references. The values at position
+  // 12-15 are skipped to account for the filter lag.
+  for (size_t i = 0; i < 12; ++i) {
+    EXPECT_EQ(kRefValue32kHz1, out_vector[i]);
+    EXPECT_EQ(kRefValue16kHz1, out_vector_w16[i]);
+  }
+  for (size_t i = 16; i < 2 * kBlockSize; ++i) {
+    EXPECT_EQ(kRefValue32kHz2, out_vector[i]);
+    EXPECT_EQ(kRefValue16kHz2, out_vector_w16[i]);
+  }
+}
diff --git a/common_audio/signal_processing/spl_init.c b/common_audio/signal_processing/spl_init.c
new file mode 100644
index 0000000..0f41bc1
--- /dev/null
+++ b/common_audio/signal_processing/spl_init.c
@@ -0,0 +1,133 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* The global function contained in this file initializes SPL function
+ * pointers, currently only for ARM platforms.
+ *
+ * Some code came from common/rtcd.c in the WebM project.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+/* Declare function pointers. */
+MaxAbsValueW16 WebRtcSpl_MaxAbsValueW16;
+MaxAbsValueW32 WebRtcSpl_MaxAbsValueW32;
+MaxValueW16 WebRtcSpl_MaxValueW16;
+MaxValueW32 WebRtcSpl_MaxValueW32;
+MinValueW16 WebRtcSpl_MinValueW16;
+MinValueW32 WebRtcSpl_MinValueW32;
+CrossCorrelation WebRtcSpl_CrossCorrelation;
+DownsampleFast WebRtcSpl_DownsampleFast;
+ScaleAndAddVectorsWithRound WebRtcSpl_ScaleAndAddVectorsWithRound;
+
+#if (!defined(WEBRTC_HAS_NEON)) && !defined(MIPS32_LE)
+/* Initialize function pointers to the generic C version. */
+static void InitPointersToC() {
+  WebRtcSpl_MaxAbsValueW16 = WebRtcSpl_MaxAbsValueW16C;
+  WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32C;
+  WebRtcSpl_MaxValueW16 = WebRtcSpl_MaxValueW16C;
+  WebRtcSpl_MaxValueW32 = WebRtcSpl_MaxValueW32C;
+  WebRtcSpl_MinValueW16 = WebRtcSpl_MinValueW16C;
+  WebRtcSpl_MinValueW32 = WebRtcSpl_MinValueW32C;
+  WebRtcSpl_CrossCorrelation = WebRtcSpl_CrossCorrelationC;
+  WebRtcSpl_DownsampleFast = WebRtcSpl_DownsampleFastC;
+  WebRtcSpl_ScaleAndAddVectorsWithRound =
+      WebRtcSpl_ScaleAndAddVectorsWithRoundC;
+}
+#endif
+
+#if defined(WEBRTC_HAS_NEON)
+/* Initialize function pointers to the Neon version. */
+static void InitPointersToNeon() {
+  WebRtcSpl_MaxAbsValueW16 = WebRtcSpl_MaxAbsValueW16Neon;
+  WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32Neon;
+  WebRtcSpl_MaxValueW16 = WebRtcSpl_MaxValueW16Neon;
+  WebRtcSpl_MaxValueW32 = WebRtcSpl_MaxValueW32Neon;
+  WebRtcSpl_MinValueW16 = WebRtcSpl_MinValueW16Neon;
+  WebRtcSpl_MinValueW32 = WebRtcSpl_MinValueW32Neon;
+  WebRtcSpl_CrossCorrelation = WebRtcSpl_CrossCorrelationNeon;
+  WebRtcSpl_DownsampleFast = WebRtcSpl_DownsampleFastNeon;
+  WebRtcSpl_ScaleAndAddVectorsWithRound =
+      WebRtcSpl_ScaleAndAddVectorsWithRoundC;
+}
+#endif
+
+#if defined(MIPS32_LE)
+/* Initialize function pointers to the MIPS version. */
+static void InitPointersToMIPS() {
+  WebRtcSpl_MaxAbsValueW16 = WebRtcSpl_MaxAbsValueW16_mips;
+  WebRtcSpl_MaxValueW16 = WebRtcSpl_MaxValueW16_mips;
+  WebRtcSpl_MaxValueW32 = WebRtcSpl_MaxValueW32_mips;
+  WebRtcSpl_MinValueW16 = WebRtcSpl_MinValueW16_mips;
+  WebRtcSpl_MinValueW32 = WebRtcSpl_MinValueW32_mips;
+  WebRtcSpl_CrossCorrelation = WebRtcSpl_CrossCorrelation_mips;
+  WebRtcSpl_DownsampleFast = WebRtcSpl_DownsampleFast_mips;
+#if defined(MIPS_DSP_R1_LE)
+  WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32_mips;
+  WebRtcSpl_ScaleAndAddVectorsWithRound =
+      WebRtcSpl_ScaleAndAddVectorsWithRound_mips;
+#else
+  WebRtcSpl_MaxAbsValueW32 = WebRtcSpl_MaxAbsValueW32C;
+  WebRtcSpl_ScaleAndAddVectorsWithRound =
+      WebRtcSpl_ScaleAndAddVectorsWithRoundC;
+#endif
+}
+#endif
+
+static void InitFunctionPointers(void) {
+#if defined(WEBRTC_HAS_NEON)
+  InitPointersToNeon();
+#elif defined(MIPS32_LE)
+  InitPointersToMIPS();
+#else
+  InitPointersToC();
+#endif  /* WEBRTC_HAS_NEON */
+}
+
+#if defined(WEBRTC_POSIX)
+#include <pthread.h>
+
+static void once(void (*func)(void)) {
+  static pthread_once_t lock = PTHREAD_ONCE_INIT;
+  pthread_once(&lock, func);
+}
+
+#elif defined(_WIN32)
+#include <windows.h>
+
+static void once(void (*func)(void)) {
+  /* Didn't use InitializeCriticalSection() since there's no race-free context
+   * in which to execute it.
+   *
+   * TODO(kma): Change to different implementation (e.g.
+   * InterlockedCompareExchangePointer) to avoid issues similar to
+   * http://code.google.com/p/webm/issues/detail?id=467.
+   */
+  static CRITICAL_SECTION lock = {(void *)((size_t)-1), -1, 0, 0, 0, 0};
+  static int done = 0;
+
+  EnterCriticalSection(&lock);
+  if (!done) {
+    func();
+    done = 1;
+  }
+  LeaveCriticalSection(&lock);
+}
+
+/* There's no fallback version as an #else block here to ensure thread safety.
+ * In case of neither pthread for WEBRTC_POSIX nor _WIN32 is present, build
+ * system should pick it up.
+ */
+#endif  /* WEBRTC_POSIX */
+
+void WebRtcSpl_Init() {
+  once(InitFunctionPointers);
+}
diff --git a/common_audio/signal_processing/spl_inl.c b/common_audio/signal_processing/spl_inl.c
new file mode 100644
index 0000000..d09e308
--- /dev/null
+++ b/common_audio/signal_processing/spl_inl.c
@@ -0,0 +1,24 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+
+#include "common_audio/signal_processing/include/spl_inl.h"
+
+// Table used by WebRtcSpl_CountLeadingZeros32_NotBuiltin. For each uint32_t n
+// that's a sequence of 0 bits followed by a sequence of 1 bits, the entry at
+// index (n * 0x8c0b2891) >> 26 in this table gives the number of zero bits in
+// n.
+const int8_t kWebRtcSpl_CountLeadingZeros32_Table[64] = {
+    32, 8,  17, -1, -1, 14, -1, -1, -1, 20, -1, -1, -1, 28, -1, 18,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0,  26, 25, 24,
+    4,  11, 23, 31, 3,  7,  10, 16, 22, 30, -1, -1, 2,  6,  13, 9,
+    -1, 15, -1, 21, -1, 29, 19, -1, -1, -1, -1, -1, 1,  27, 5,  12,
+};
diff --git a/common_audio/signal_processing/spl_sqrt.c b/common_audio/signal_processing/spl_sqrt.c
new file mode 100644
index 0000000..cf9448a
--- /dev/null
+++ b/common_audio/signal_processing/spl_sqrt.c
@@ -0,0 +1,194 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_Sqrt().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+int32_t WebRtcSpl_SqrtLocal(int32_t in);
+
+int32_t WebRtcSpl_SqrtLocal(int32_t in)
+{
+
+    int16_t x_half, t16;
+    int32_t A, B, x2;
+
+    /* The following block performs:
+     y=in/2
+     x=y-2^30
+     x_half=x/2^31
+     t = 1 + (x_half) - 0.5*((x_half)^2) + 0.5*((x_half)^3) - 0.625*((x_half)^4)
+         + 0.875*((x_half)^5)
+     */
+
+    B = in / 2;
+
+    B = B - ((int32_t)0x40000000); // B = in/2 - 1/2
+    x_half = (int16_t)(B >> 16);  // x_half = x/2 = (in-1)/2
+    B = B + ((int32_t)0x40000000); // B = 1 + x/2
+    B = B + ((int32_t)0x40000000); // Add 0.5 twice (since 1.0 does not exist in Q31)
+
+    x2 = ((int32_t)x_half) * ((int32_t)x_half) * 2; // A = (x/2)^2
+    A = -x2; // A = -(x/2)^2
+    B = B + (A >> 1); // B = 1 + x/2 - 0.5*(x/2)^2
+
+    A >>= 16;
+    A = A * A * 2; // A = (x/2)^4
+    t16 = (int16_t)(A >> 16);
+    B += -20480 * t16 * 2;  // B = B - 0.625*A
+    // After this, B = 1 + x/2 - 0.5*(x/2)^2 - 0.625*(x/2)^4
+
+    A = x_half * t16 * 2;  // A = (x/2)^5
+    t16 = (int16_t)(A >> 16);
+    B += 28672 * t16 * 2;  // B = B + 0.875*A
+    // After this, B = 1 + x/2 - 0.5*(x/2)^2 - 0.625*(x/2)^4 + 0.875*(x/2)^5
+
+    t16 = (int16_t)(x2 >> 16);
+    A = x_half * t16 * 2;  // A = x/2^3
+
+    B = B + (A >> 1); // B = B + 0.5*A
+    // After this, B = 1 + x/2 - 0.5*(x/2)^2 + 0.5*(x/2)^3 - 0.625*(x/2)^4 + 0.875*(x/2)^5
+
+    B = B + ((int32_t)32768); // Round off bit
+
+    return B;
+}
+
+int32_t WebRtcSpl_Sqrt(int32_t value)
+{
+    /*
+     Algorithm:
+
+     Six term Taylor Series is used here to compute the square root of a number
+     y^0.5 = (1+x)^0.5 where x = y-1
+     = 1+(x/2)-0.5*((x/2)^2+0.5*((x/2)^3-0.625*((x/2)^4+0.875*((x/2)^5)
+     0.5 <= x < 1
+
+     Example of how the algorithm works, with ut=sqrt(in), and
+     with in=73632 and ut=271 (even shift value case):
+
+     in=73632
+     y= in/131072
+     x=y-1
+     t = 1 + (x/2) - 0.5*((x/2)^2) + 0.5*((x/2)^3) - 0.625*((x/2)^4) + 0.875*((x/2)^5)
+     ut=t*(1/sqrt(2))*512
+
+     or:
+
+     in=73632
+     in2=73632*2^14
+     y= in2/2^31
+     x=y-1
+     t = 1 + (x/2) - 0.5*((x/2)^2) + 0.5*((x/2)^3) - 0.625*((x/2)^4) + 0.875*((x/2)^5)
+     ut=t*(1/sqrt(2))
+     ut2=ut*2^9
+
+     which gives:
+
+     in  = 73632
+     in2 = 1206386688
+     y   = 0.56176757812500
+     x   = -0.43823242187500
+     t   = 0.74973506527313
+     ut  = 0.53014274874797
+     ut2 = 2.714330873589594e+002
+
+     or:
+
+     in=73632
+     in2=73632*2^14
+     y=in2/2
+     x=y-2^30
+     x_half=x/2^31
+     t = 1 + (x_half) - 0.5*((x_half)^2) + 0.5*((x_half)^3) - 0.625*((x_half)^4)
+         + 0.875*((x_half)^5)
+     ut=t*(1/sqrt(2))
+     ut2=ut*2^9
+
+     which gives:
+
+     in  = 73632
+     in2 = 1206386688
+     y   = 603193344
+     x   = -470548480
+     x_half =  -0.21911621093750
+     t   = 0.74973506527313
+     ut  = 0.53014274874797
+     ut2 = 2.714330873589594e+002
+
+     */
+
+    int16_t x_norm, nshift, t16, sh;
+    int32_t A;
+
+    int16_t k_sqrt_2 = 23170; // 1/sqrt2 (==5a82)
+
+    A = value;
+
+    // The convention in this function is to calculate sqrt(abs(A)). Negate the
+    // input if it is negative.
+    if (A < 0) {
+        if (A == WEBRTC_SPL_WORD32_MIN) {
+            // This number cannot be held in an int32_t after negating.
+            // Map it to the maximum positive value.
+            A = WEBRTC_SPL_WORD32_MAX;
+        } else {
+            A = -A;
+        }
+    } else if (A == 0) {
+        return 0;  // sqrt(0) = 0
+    }
+
+    sh = WebRtcSpl_NormW32(A); // # shifts to normalize A
+    A = WEBRTC_SPL_LSHIFT_W32(A, sh); // Normalize A
+    if (A < (WEBRTC_SPL_WORD32_MAX - 32767))
+    {
+        A = A + ((int32_t)32768); // Round off bit
+    } else
+    {
+        A = WEBRTC_SPL_WORD32_MAX;
+    }
+
+    x_norm = (int16_t)(A >> 16);  // x_norm = AH
+
+    nshift = (sh / 2);
+    RTC_DCHECK_GE(nshift, 0);
+
+    A = (int32_t)WEBRTC_SPL_LSHIFT_W32((int32_t)x_norm, 16);
+    A = WEBRTC_SPL_ABS_W32(A); // A = abs(x_norm<<16)
+    A = WebRtcSpl_SqrtLocal(A); // A = sqrt(A)
+
+    if (2 * nshift == sh) {
+        // Even shift value case
+
+        t16 = (int16_t)(A >> 16);  // t16 = AH
+
+        A = k_sqrt_2 * t16 * 2;  // A = 1/sqrt(2)*t16
+        A = A + ((int32_t)32768); // Round off
+        A = A & ((int32_t)0x7fff0000); // Round off
+
+        A >>= 15;  // A = A>>16
+
+    } else
+    {
+        A >>= 16;  // A = A>>16
+    }
+
+    A = A & ((int32_t)0x0000ffff);
+    A >>= nshift;  // De-normalize the result.
+
+    return A;
+}
diff --git a/common_audio/signal_processing/spl_sqrt_floor.c b/common_audio/signal_processing/spl_sqrt_floor.c
new file mode 100644
index 0000000..7141386
--- /dev/null
+++ b/common_audio/signal_processing/spl_sqrt_floor.c
@@ -0,0 +1,77 @@
+/*
+ * Written by Wilco Dijkstra, 1996. The following email exchange establishes the
+ * license.
+ *
+ * From: Wilco Dijkstra <Wilco.Dijkstra@ntlworld.com>
+ * Date: Fri, Jun 24, 2011 at 3:20 AM
+ * Subject: Re: sqrt routine
+ * To: Kevin Ma <kma@google.com>
+ * Hi Kevin,
+ * Thanks for asking. Those routines are public domain (originally posted to
+ * comp.sys.arm a long time ago), so you can use them freely for any purpose.
+ * Cheers,
+ * Wilco
+ *
+ * ----- Original Message -----
+ * From: "Kevin Ma" <kma@google.com>
+ * To: <Wilco.Dijkstra@ntlworld.com>
+ * Sent: Thursday, June 23, 2011 11:44 PM
+ * Subject: Fwd: sqrt routine
+ * Hi Wilco,
+ * I saw your sqrt routine from several web sites, including
+ * http://www.finesse.demon.co.uk/steven/sqrt.html.
+ * Just wonder if there's any copyright information with your Successive
+ * approximation routines, or if I can freely use it for any purpose.
+ * Thanks.
+ * Kevin
+ */
+
+// Minor modifications in code style for WebRTC, 2012.
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+/*
+ * Algorithm:
+ * Successive approximation of the equation (root + delta) ^ 2 = N
+ * until delta < 1. If delta < 1 we have the integer part of SQRT (N).
+ * Use delta = 2^i for i = 15 .. 0.
+ *
+ * Output precision is 16 bits. Note for large input values (close to
+ * 0x7FFFFFFF), bit 15 (the highest bit of the low 16-bit half word)
+ * contains the MSB information (a non-sign value). Do with caution
+ * if you need to cast the output to int16_t type.
+ *
+ * If the input value is negative, it returns 0.
+ */
+
+#define WEBRTC_SPL_SQRT_ITER(N)                 \
+  try1 = root + (1 << (N));                     \
+  if (value >= try1 << (N))                     \
+  {                                             \
+    value -= try1 << (N);                       \
+    root |= 2 << (N);                           \
+  }
+
+int32_t WebRtcSpl_SqrtFloor(int32_t value)
+{
+  int32_t root = 0, try1;
+
+  WEBRTC_SPL_SQRT_ITER (15);
+  WEBRTC_SPL_SQRT_ITER (14);
+  WEBRTC_SPL_SQRT_ITER (13);
+  WEBRTC_SPL_SQRT_ITER (12);
+  WEBRTC_SPL_SQRT_ITER (11);
+  WEBRTC_SPL_SQRT_ITER (10);
+  WEBRTC_SPL_SQRT_ITER ( 9);
+  WEBRTC_SPL_SQRT_ITER ( 8);
+  WEBRTC_SPL_SQRT_ITER ( 7);
+  WEBRTC_SPL_SQRT_ITER ( 6);
+  WEBRTC_SPL_SQRT_ITER ( 5);
+  WEBRTC_SPL_SQRT_ITER ( 4);
+  WEBRTC_SPL_SQRT_ITER ( 3);
+  WEBRTC_SPL_SQRT_ITER ( 2);
+  WEBRTC_SPL_SQRT_ITER ( 1);
+  WEBRTC_SPL_SQRT_ITER ( 0);
+
+  return root >> 1;
+}
diff --git a/common_audio/signal_processing/spl_sqrt_floor_arm.S b/common_audio/signal_processing/spl_sqrt_floor_arm.S
new file mode 100644
index 0000000..29e6d4d
--- /dev/null
+++ b/common_audio/signal_processing/spl_sqrt_floor_arm.S
@@ -0,0 +1,110 @@
+@
+@ Written by Wilco Dijkstra, 1996. The following email exchange establishes the
+@ license.
+@
+@ From: Wilco Dijkstra <Wilco.Dijkstra@ntlworld.com>
+@ Date: Fri, Jun 24, 2011 at 3:20 AM
+@ Subject: Re: sqrt routine
+@ To: Kevin Ma <kma@google.com>
+@ Hi Kevin,
+@ Thanks for asking. Those routines are public domain (originally posted to
+@ comp.sys.arm a long time ago), so you can use them freely for any purpose.
+@ Cheers,
+@ Wilco
+@
+@ ----- Original Message -----
+@ From: "Kevin Ma" <kma@google.com>
+@ To: <Wilco.Dijkstra@ntlworld.com>
+@ Sent: Thursday, June 23, 2011 11:44 PM
+@ Subject: Fwd: sqrt routine
+@ Hi Wilco,
+@ I saw your sqrt routine from several web sites, including
+@ http://www.finesse.demon.co.uk/steven/sqrt.html.
+@ Just wonder if there's any copyright information with your Successive
+@ approximation routines, or if I can freely use it for any purpose.
+@ Thanks.
+@ Kevin
+
+@ Minor modifications in code style for WebRTC, 2012.
+@ Output is bit-exact with the reference C code in spl_sqrt_floor.c.
+
+@ Input :             r0 32 bit unsigned integer
+@ Output:             r0 = INT (SQRT (r0)), precision is 16 bits
+@ Registers touched:  r1, r2
+
+#include "system_wrappers/include/asm_defines.h"
+
+GLOBAL_FUNCTION WebRtcSpl_SqrtFloor
+.align  2
+DEFINE_FUNCTION WebRtcSpl_SqrtFloor
+  mov    r1, #3 << 30
+  mov    r2, #1 << 30
+
+  @ unroll for i = 0 .. 15
+
+  cmp    r0, r2, ror #2 * 0
+  subhs  r0, r0, r2, ror #2 * 0
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 1
+  subhs  r0, r0, r2, ror #2 * 1
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 2
+  subhs  r0, r0, r2, ror #2 * 2
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 3
+  subhs  r0, r0, r2, ror #2 * 3
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 4
+  subhs  r0, r0, r2, ror #2 * 4
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 5
+  subhs  r0, r0, r2, ror #2 * 5
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 6
+  subhs  r0, r0, r2, ror #2 * 6
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 7
+  subhs  r0, r0, r2, ror #2 * 7
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 8
+  subhs  r0, r0, r2, ror #2 * 8
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 9
+  subhs  r0, r0, r2, ror #2 * 9
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 10
+  subhs  r0, r0, r2, ror #2 * 10
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 11
+  subhs  r0, r0, r2, ror #2 * 11
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 12
+  subhs  r0, r0, r2, ror #2 * 12
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 13
+  subhs  r0, r0, r2, ror #2 * 13
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 14
+  subhs  r0, r0, r2, ror #2 * 14
+  adc    r2, r1, r2, lsl #1
+
+  cmp    r0, r2, ror #2 * 15
+  subhs  r0, r0, r2, ror #2 * 15
+  adc    r2, r1, r2, lsl #1
+
+  bic    r0, r2, #3 << 30  @ for rounding add: cmp r0, r2  adc r2, #1
+  bx lr
diff --git a/common_audio/signal_processing/spl_sqrt_floor_mips.c b/common_audio/signal_processing/spl_sqrt_floor_mips.c
new file mode 100644
index 0000000..7128fbd
--- /dev/null
+++ b/common_audio/signal_processing/spl_sqrt_floor_mips.c
@@ -0,0 +1,207 @@
+/*
+ * Written by Wilco Dijkstra, 1996. The following email exchange establishes the
+ * license.
+ *
+ * From: Wilco Dijkstra <Wilco.Dijkstra@ntlworld.com>
+ * Date: Fri, Jun 24, 2011 at 3:20 AM
+ * Subject: Re: sqrt routine
+ * To: Kevin Ma <kma@google.com>
+ * Hi Kevin,
+ * Thanks for asking. Those routines are public domain (originally posted to
+ * comp.sys.arm a long time ago), so you can use them freely for any purpose.
+ * Cheers,
+ * Wilco
+ *
+ * ----- Original Message -----
+ * From: "Kevin Ma" <kma@google.com>
+ * To: <Wilco.Dijkstra@ntlworld.com>
+ * Sent: Thursday, June 23, 2011 11:44 PM
+ * Subject: Fwd: sqrt routine
+ * Hi Wilco,
+ * I saw your sqrt routine from several web sites, including
+ * http://www.finesse.demon.co.uk/steven/sqrt.html.
+ * Just wonder if there's any copyright information with your Successive
+ * approximation routines, or if I can freely use it for any purpose.
+ * Thanks.
+ * Kevin
+ */
+
+// Minor modifications in code style for WebRTC, 2012.
+// Code optimizations for MIPS, 2013.
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+/*
+ * Algorithm:
+ * Successive approximation of the equation (root + delta) ^ 2 = N
+ * until delta < 1. If delta < 1 we have the integer part of SQRT (N).
+ * Use delta = 2^i for i = 15 .. 0.
+ *
+ * Output precision is 16 bits. Note for large input values (close to
+ * 0x7FFFFFFF), bit 15 (the highest bit of the low 16-bit half word)
+ * contains the MSB information (a non-sign value). Do with caution
+ * if you need to cast the output to int16_t type.
+ *
+ * If the input value is negative, it returns 0.
+ */
+
+
+int32_t WebRtcSpl_SqrtFloor(int32_t value)
+{
+  int32_t root = 0, tmp1, tmp2, tmp3, tmp4;
+
+  __asm __volatile(
+    ".set   push                                       \n\t"
+    ".set   noreorder                                  \n\t"
+
+    "lui    %[tmp1],      0x4000                       \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "sub    %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "lui    %[tmp1],      0x1                          \n\t"
+    "or     %[tmp4],      %[root],      %[tmp1]        \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x4000         \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      14                           \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x8000         \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x2000         \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      13                           \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x4000         \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x1000         \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      12                           \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x2000         \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x800          \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      11                           \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x1000         \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x400          \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      10                           \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x800          \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x200          \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      9                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],       0x400         \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x100          \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      8                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x200          \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x80           \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      7                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x100          \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x40           \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      6                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x80           \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x20           \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      5                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x40           \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x10           \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      4                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x20           \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x8            \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      3                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x10           \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x4            \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      2                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x8            \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x2            \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "sll    %[tmp1],      1                            \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "subu   %[tmp3],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x4            \n\t"
+    "movz   %[value],     %[tmp3],      %[tmp2]        \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    "addiu  %[tmp1],      $0,           0x1            \n\t"
+    "addu   %[tmp1],      %[tmp1],      %[root]        \n\t"
+    "slt    %[tmp2],      %[value],     %[tmp1]        \n\t"
+    "ori    %[tmp4],      %[root],      0x2            \n\t"
+    "movz   %[root],      %[tmp4],      %[tmp2]        \n\t"
+
+    ".set   pop                                        \n\t"
+
+    : [root] "+r" (root), [value] "+r" (value),
+      [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2),
+      [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4)
+    :
+  );
+
+  return root >> 1;
+}
+
diff --git a/common_audio/signal_processing/splitting_filter.c b/common_audio/signal_processing/splitting_filter.c
new file mode 100644
index 0000000..399433f
--- /dev/null
+++ b/common_audio/signal_processing/splitting_filter.c
@@ -0,0 +1,207 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains the splitting filter functions.
+ *
+ */
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+// Maximum number of samples in a low/high-band frame.
+enum
+{
+    kMaxBandFrameLength = 320  // 10 ms at 64 kHz.
+};
+
+// QMF filter coefficients in Q16.
+static const uint16_t WebRtcSpl_kAllPassFilter1[3] = {6418, 36982, 57261};
+static const uint16_t WebRtcSpl_kAllPassFilter2[3] = {21333, 49062, 63010};
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// WebRtcSpl_AllPassQMF(...)
+//
+// Allpass filter used by the analysis and synthesis parts of the QMF filter.
+//
+// Input:
+//    - in_data             : Input data sequence (Q10)
+//    - data_length         : Length of data sequence (>2)
+//    - filter_coefficients : Filter coefficients (length 3, Q16)
+//
+// Input & Output:
+//    - filter_state        : Filter state (length 6, Q10).
+//
+// Output:
+//    - out_data            : Output data sequence (Q10), length equal to
+//                            |data_length|
+//
+
+void WebRtcSpl_AllPassQMF(int32_t* in_data, size_t data_length,
+                          int32_t* out_data, const uint16_t* filter_coefficients,
+                          int32_t* filter_state)
+{
+    // The procedure is to filter the input with three first order all pass filters
+    // (cascade operations).
+    //
+    //         a_3 + q^-1    a_2 + q^-1    a_1 + q^-1
+    // y[n] =  -----------   -----------   -----------   x[n]
+    //         1 + a_3q^-1   1 + a_2q^-1   1 + a_1q^-1
+    //
+    // The input vector |filter_coefficients| includes these three filter coefficients.
+    // The filter state contains the in_data state, in_data[-1], followed by
+    // the out_data state, out_data[-1]. This is repeated for each cascade.
+    // The first cascade filter will filter the |in_data| and store the output in
+    // |out_data|. The second will the take the |out_data| as input and make an
+    // intermediate storage in |in_data|, to save memory. The third, and final, cascade
+    // filter operation takes the |in_data| (which is the output from the previous cascade
+    // filter) and store the output in |out_data|.
+    // Note that the input vector values are changed during the process.
+    size_t k;
+    int32_t diff;
+    // First all-pass cascade; filter from in_data to out_data.
+
+    // Let y_i[n] indicate the output of cascade filter i (with filter coefficient a_i) at
+    // vector position n. Then the final output will be y[n] = y_3[n]
+
+    // First loop, use the states stored in memory.
+    // "diff" should be safe from wrap around since max values are 2^25
+    // diff = (x[0] - y_1[-1])
+    diff = WebRtcSpl_SubSatW32(in_data[0], filter_state[1]);
+    // y_1[0] =  x[-1] + a_1 * (x[0] - y_1[-1])
+    out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, filter_state[0]);
+
+    // For the remaining loops, use previous values.
+    for (k = 1; k < data_length; k++)
+    {
+        // diff = (x[n] - y_1[n-1])
+        diff = WebRtcSpl_SubSatW32(in_data[k], out_data[k - 1]);
+        // y_1[n] =  x[n-1] + a_1 * (x[n] - y_1[n-1])
+        out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, in_data[k - 1]);
+    }
+
+    // Update states.
+    filter_state[0] = in_data[data_length - 1]; // x[N-1], becomes x[-1] next time
+    filter_state[1] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time
+
+    // Second all-pass cascade; filter from out_data to in_data.
+    // diff = (y_1[0] - y_2[-1])
+    diff = WebRtcSpl_SubSatW32(out_data[0], filter_state[3]);
+    // y_2[0] =  y_1[-1] + a_2 * (y_1[0] - y_2[-1])
+    in_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, filter_state[2]);
+    for (k = 1; k < data_length; k++)
+    {
+        // diff = (y_1[n] - y_2[n-1])
+        diff = WebRtcSpl_SubSatW32(out_data[k], in_data[k - 1]);
+        // y_2[0] =  y_1[-1] + a_2 * (y_1[0] - y_2[-1])
+        in_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, out_data[k-1]);
+    }
+
+    filter_state[2] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time
+    filter_state[3] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time
+
+    // Third all-pass cascade; filter from in_data to out_data.
+    // diff = (y_2[0] - y[-1])
+    diff = WebRtcSpl_SubSatW32(in_data[0], filter_state[5]);
+    // y[0] =  y_2[-1] + a_3 * (y_2[0] - y[-1])
+    out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, filter_state[4]);
+    for (k = 1; k < data_length; k++)
+    {
+        // diff = (y_2[n] - y[n-1])
+        diff = WebRtcSpl_SubSatW32(in_data[k], out_data[k - 1]);
+        // y[n] =  y_2[n-1] + a_3 * (y_2[n] - y[n-1])
+        out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, in_data[k-1]);
+    }
+    filter_state[4] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time
+    filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time
+}
+
+void WebRtcSpl_AnalysisQMF(const int16_t* in_data, size_t in_data_length,
+                           int16_t* low_band, int16_t* high_band,
+                           int32_t* filter_state1, int32_t* filter_state2)
+{
+    size_t i;
+    int16_t k;
+    int32_t tmp;
+    int32_t half_in1[kMaxBandFrameLength];
+    int32_t half_in2[kMaxBandFrameLength];
+    int32_t filter1[kMaxBandFrameLength];
+    int32_t filter2[kMaxBandFrameLength];
+    const size_t band_length = in_data_length / 2;
+    RTC_DCHECK_EQ(0, in_data_length % 2);
+    RTC_DCHECK_LE(band_length, kMaxBandFrameLength);
+
+    // Split even and odd samples. Also shift them to Q10.
+    for (i = 0, k = 0; i < band_length; i++, k += 2)
+    {
+        half_in2[i] = ((int32_t)in_data[k]) * (1 << 10);
+        half_in1[i] = ((int32_t)in_data[k + 1]) * (1 << 10);
+    }
+
+    // All pass filter even and odd samples, independently.
+    WebRtcSpl_AllPassQMF(half_in1, band_length, filter1,
+                         WebRtcSpl_kAllPassFilter1, filter_state1);
+    WebRtcSpl_AllPassQMF(half_in2, band_length, filter2,
+                         WebRtcSpl_kAllPassFilter2, filter_state2);
+
+    // Take the sum and difference of filtered version of odd and even
+    // branches to get upper & lower band.
+    for (i = 0; i < band_length; i++)
+    {
+        tmp = (filter1[i] + filter2[i] + 1024) >> 11;
+        low_band[i] = WebRtcSpl_SatW32ToW16(tmp);
+
+        tmp = (filter1[i] - filter2[i] + 1024) >> 11;
+        high_band[i] = WebRtcSpl_SatW32ToW16(tmp);
+    }
+}
+
+void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band,
+                            size_t band_length, int16_t* out_data,
+                            int32_t* filter_state1, int32_t* filter_state2)
+{
+    int32_t tmp;
+    int32_t half_in1[kMaxBandFrameLength];
+    int32_t half_in2[kMaxBandFrameLength];
+    int32_t filter1[kMaxBandFrameLength];
+    int32_t filter2[kMaxBandFrameLength];
+    size_t i;
+    int16_t k;
+    RTC_DCHECK_LE(band_length, kMaxBandFrameLength);
+
+    // Obtain the sum and difference channels out of upper and lower-band channels.
+    // Also shift to Q10 domain.
+    for (i = 0; i < band_length; i++)
+    {
+        tmp = (int32_t)low_band[i] + (int32_t)high_band[i];
+        half_in1[i] = tmp * (1 << 10);
+        tmp = (int32_t)low_band[i] - (int32_t)high_band[i];
+        half_in2[i] = tmp * (1 << 10);
+    }
+
+    // all-pass filter the sum and difference channels
+    WebRtcSpl_AllPassQMF(half_in1, band_length, filter1,
+                         WebRtcSpl_kAllPassFilter2, filter_state1);
+    WebRtcSpl_AllPassQMF(half_in2, band_length, filter2,
+                         WebRtcSpl_kAllPassFilter1, filter_state2);
+
+    // The filtered signals are even and odd samples of the output. Combine
+    // them. The signals are Q10 should shift them back to Q0 and take care of
+    // saturation.
+    for (i = 0, k = 0; i < band_length; i++)
+    {
+        tmp = (filter2[i] + 512) >> 10;
+        out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
+
+        tmp = (filter1[i] + 512) >> 10;
+        out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
+    }
+
+}
diff --git a/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c b/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
new file mode 100644
index 0000000..a77fd40
--- /dev/null
+++ b/common_audio/signal_processing/sqrt_of_one_minus_x_squared.c
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains the function WebRtcSpl_SqrtOfOneMinusXSquared().
+ * The description header can be found in signal_processing_library.h
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_SqrtOfOneMinusXSquared(int16_t *xQ15, size_t vector_length,
+                                      int16_t *yQ15)
+{
+    int32_t sq;
+    size_t m;
+    int16_t tmp;
+
+    for (m = 0; m < vector_length; m++)
+    {
+        tmp = xQ15[m];
+        sq = tmp * tmp;  // x^2 in Q30
+        sq = 1073741823 - sq; // 1-x^2, where 1 ~= 0.99999999906 is 1073741823 in Q30
+        sq = WebRtcSpl_Sqrt(sq); // sqrt(1-x^2) in Q15
+        yQ15[m] = (int16_t)sq;
+    }
+}
diff --git a/common_audio/signal_processing/vector_scaling_operations.c b/common_audio/signal_processing/vector_scaling_operations.c
new file mode 100644
index 0000000..7307dc7
--- /dev/null
+++ b/common_audio/signal_processing/vector_scaling_operations.c
@@ -0,0 +1,165 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains implementations of the functions
+ * WebRtcSpl_VectorBitShiftW16()
+ * WebRtcSpl_VectorBitShiftW32()
+ * WebRtcSpl_VectorBitShiftW32ToW16()
+ * WebRtcSpl_ScaleVector()
+ * WebRtcSpl_ScaleVectorWithSat()
+ * WebRtcSpl_ScaleAndAddVectors()
+ * WebRtcSpl_ScaleAndAddVectorsWithRoundC()
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+void WebRtcSpl_VectorBitShiftW16(int16_t *res, size_t length,
+                                 const int16_t *in, int16_t right_shifts)
+{
+    size_t i;
+
+    if (right_shifts > 0)
+    {
+        for (i = length; i > 0; i--)
+        {
+            (*res++) = ((*in++) >> right_shifts);
+        }
+    } else
+    {
+        for (i = length; i > 0; i--)
+        {
+            (*res++) = ((*in++) * (1 << (-right_shifts)));
+        }
+    }
+}
+
+void WebRtcSpl_VectorBitShiftW32(int32_t *out_vector,
+                                 size_t vector_length,
+                                 const int32_t *in_vector,
+                                 int16_t right_shifts)
+{
+    size_t i;
+
+    if (right_shifts > 0)
+    {
+        for (i = vector_length; i > 0; i--)
+        {
+            (*out_vector++) = ((*in_vector++) >> right_shifts);
+        }
+    } else
+    {
+        for (i = vector_length; i > 0; i--)
+        {
+            (*out_vector++) = ((*in_vector++) << (-right_shifts));
+        }
+    }
+}
+
+void WebRtcSpl_VectorBitShiftW32ToW16(int16_t* out, size_t length,
+                                      const int32_t* in, int right_shifts) {
+  size_t i;
+  int32_t tmp_w32;
+
+  if (right_shifts >= 0) {
+    for (i = length; i > 0; i--) {
+      tmp_w32 = (*in++) >> right_shifts;
+      (*out++) = WebRtcSpl_SatW32ToW16(tmp_w32);
+    }
+  } else {
+    int left_shifts = -right_shifts;
+    for (i = length; i > 0; i--) {
+      tmp_w32 = (*in++) << left_shifts;
+      (*out++) = WebRtcSpl_SatW32ToW16(tmp_w32);
+    }
+  }
+}
+
+void WebRtcSpl_ScaleVector(const int16_t *in_vector, int16_t *out_vector,
+                           int16_t gain, size_t in_vector_length,
+                           int16_t right_shifts)
+{
+    // Performs vector operation: out_vector = (gain*in_vector)>>right_shifts
+    size_t i;
+    const int16_t *inptr;
+    int16_t *outptr;
+
+    inptr = in_vector;
+    outptr = out_vector;
+
+    for (i = 0; i < in_vector_length; i++)
+    {
+      *outptr++ = (int16_t)((*inptr++ * gain) >> right_shifts);
+    }
+}
+
+void WebRtcSpl_ScaleVectorWithSat(const int16_t *in_vector, int16_t *out_vector,
+                                 int16_t gain, size_t in_vector_length,
+                                 int16_t right_shifts)
+{
+    // Performs vector operation: out_vector = (gain*in_vector)>>right_shifts
+    size_t i;
+    const int16_t *inptr;
+    int16_t *outptr;
+
+    inptr = in_vector;
+    outptr = out_vector;
+
+    for (i = 0; i < in_vector_length; i++) {
+      *outptr++ = WebRtcSpl_SatW32ToW16((*inptr++ * gain) >> right_shifts);
+    }
+}
+
+void WebRtcSpl_ScaleAndAddVectors(const int16_t *in1, int16_t gain1, int shift1,
+                                  const int16_t *in2, int16_t gain2, int shift2,
+                                  int16_t *out, size_t vector_length)
+{
+    // Performs vector operation: out = (gain1*in1)>>shift1 + (gain2*in2)>>shift2
+    size_t i;
+    const int16_t *in1ptr;
+    const int16_t *in2ptr;
+    int16_t *outptr;
+
+    in1ptr = in1;
+    in2ptr = in2;
+    outptr = out;
+
+    for (i = 0; i < vector_length; i++)
+    {
+      *outptr++ = (int16_t)((gain1 * *in1ptr++) >> shift1) +
+          (int16_t)((gain2 * *in2ptr++) >> shift2);
+    }
+}
+
+// C version of WebRtcSpl_ScaleAndAddVectorsWithRound() for generic platforms.
+int WebRtcSpl_ScaleAndAddVectorsWithRoundC(const int16_t* in_vector1,
+                                           int16_t in_vector1_scale,
+                                           const int16_t* in_vector2,
+                                           int16_t in_vector2_scale,
+                                           int right_shifts,
+                                           int16_t* out_vector,
+                                           size_t length) {
+  size_t i = 0;
+  int round_value = (1 << right_shifts) >> 1;
+
+  if (in_vector1 == NULL || in_vector2 == NULL || out_vector == NULL ||
+      length == 0 || right_shifts < 0) {
+    return -1;
+  }
+
+  for (i = 0; i < length; i++) {
+    out_vector[i] = (int16_t)((
+        in_vector1[i] * in_vector1_scale + in_vector2[i] * in_vector2_scale +
+        round_value) >> right_shifts);
+  }
+
+  return 0;
+}
diff --git a/common_audio/signal_processing/vector_scaling_operations_mips.c b/common_audio/signal_processing/vector_scaling_operations_mips.c
new file mode 100644
index 0000000..ba2d26d
--- /dev/null
+++ b/common_audio/signal_processing/vector_scaling_operations_mips.c
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This file contains implementations of the functions
+ * WebRtcSpl_ScaleAndAddVectorsWithRound_mips()
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+int WebRtcSpl_ScaleAndAddVectorsWithRound_mips(const int16_t* in_vector1,
+                                               int16_t in_vector1_scale,
+                                               const int16_t* in_vector2,
+                                               int16_t in_vector2_scale,
+                                               int right_shifts,
+                                               int16_t* out_vector,
+                                               size_t length) {
+  int16_t r0 = 0, r1 = 0;
+  int16_t *in1 = (int16_t*)in_vector1;
+  int16_t *in2 = (int16_t*)in_vector2;
+  int16_t *out = out_vector;
+  size_t i = 0;
+  int value32 = 0;
+
+  if (in_vector1 == NULL || in_vector2 == NULL || out_vector == NULL ||
+      length == 0 || right_shifts < 0) {
+    return -1;
+  }
+  for (i = 0; i < length; i++) {
+    __asm __volatile (
+      "lh         %[r0],          0(%[in1])                               \n\t"
+      "lh         %[r1],          0(%[in2])                               \n\t"
+      "mult       %[r0],          %[in_vector1_scale]                     \n\t"
+      "madd       %[r1],          %[in_vector2_scale]                     \n\t"
+      "extrv_r.w  %[value32],     $ac0,               %[right_shifts]     \n\t"
+      "addiu      %[in1],         %[in1],             2                   \n\t"
+      "addiu      %[in2],         %[in2],             2                   \n\t"
+      "sh         %[value32],     0(%[out])                               \n\t"
+      "addiu      %[out],         %[out],             2                   \n\t"
+      : [value32] "=&r" (value32), [out] "+r" (out), [in1] "+r" (in1),
+        [in2] "+r" (in2), [r0] "=&r" (r0), [r1] "=&r" (r1)
+      : [in_vector1_scale] "r" (in_vector1_scale),
+        [in_vector2_scale] "r" (in_vector2_scale),
+        [right_shifts] "r" (right_shifts)
+      : "hi", "lo", "memory"
+    );
+  }
+  return 0;
+}
diff --git a/common_audio/smoothing_filter.cc b/common_audio/smoothing_filter.cc
new file mode 100644
index 0000000..ecfb5c2
--- /dev/null
+++ b/common_audio/smoothing_filter.cc
@@ -0,0 +1,144 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/smoothing_filter.h"
+
+#include <cmath>
+
+#include "rtc_base/timeutils.h"
+
+namespace webrtc {
+
+SmoothingFilterImpl::SmoothingFilterImpl(int init_time_ms)
+    : init_time_ms_(init_time_ms),
+      // Duing the initalization time, we use an increasing alpha. Specifically,
+      //   alpha(n) = exp(-powf(init_factor_, n)),
+      // where |init_factor_| is chosen such that
+      //   alpha(init_time_ms_) = exp(-1.0f / init_time_ms_),
+      init_factor_(init_time_ms_ == 0
+                       ? 0.0f
+                       : powf(init_time_ms_, -1.0f / init_time_ms_)),
+      // |init_const_| is to a factor to help the calculation during
+      // initialization phase.
+      init_const_(init_time_ms_ == 0
+                      ? 0.0f
+                      : init_time_ms_ -
+                            powf(init_time_ms_, 1.0f - 1.0f / init_time_ms_)) {
+  UpdateAlpha(init_time_ms_);
+}
+
+SmoothingFilterImpl::~SmoothingFilterImpl() = default;
+
+void SmoothingFilterImpl::AddSample(float sample) {
+  const int64_t now_ms = rtc::TimeMillis();
+
+  if (!init_end_time_ms_) {
+    // This is equivalent to assuming the filter has been receiving the same
+    // value as the first sample since time -infinity.
+    state_ = last_sample_ = sample;
+    init_end_time_ms_ = now_ms + init_time_ms_;
+    last_state_time_ms_ = now_ms;
+    return;
+  }
+
+  ExtrapolateLastSample(now_ms);
+  last_sample_ = sample;
+}
+
+rtc::Optional<float> SmoothingFilterImpl::GetAverage() {
+  if (!init_end_time_ms_) {
+    // |init_end_time_ms_| undefined since we have not received any sample.
+    return rtc::nullopt;
+  }
+  ExtrapolateLastSample(rtc::TimeMillis());
+  return state_;
+}
+
+bool SmoothingFilterImpl::SetTimeConstantMs(int time_constant_ms) {
+  if (!init_end_time_ms_ || last_state_time_ms_ < *init_end_time_ms_) {
+    return false;
+  }
+  UpdateAlpha(time_constant_ms);
+  return true;
+}
+
+void SmoothingFilterImpl::UpdateAlpha(int time_constant_ms) {
+  alpha_ = time_constant_ms == 0 ? 0.0f : exp(-1.0f / time_constant_ms);
+}
+
+void SmoothingFilterImpl::ExtrapolateLastSample(int64_t time_ms) {
+  RTC_DCHECK_GE(time_ms, last_state_time_ms_);
+  RTC_DCHECK(init_end_time_ms_);
+
+  float multiplier = 0.0f;
+
+  if (time_ms <= *init_end_time_ms_) {
+    // Current update is to be made during initialization phase.
+    // We update the state as if the |alpha| has been increased according
+    //   alpha(n) = exp(-powf(init_factor_, n)),
+    // where n is the time (in millisecond) since the first sample received.
+    // With algebraic derivation as shown in the Appendix, we can find that the
+    // state can be updated in a similar manner as if alpha is a constant,
+    // except for a different multiplier.
+    if (init_time_ms_ == 0) {
+      // This means |init_factor_| = 0.
+      multiplier = 0.0f;
+    } else if (init_time_ms_ == 1) {
+      // This means |init_factor_| = 1.
+      multiplier = exp(last_state_time_ms_ - time_ms);
+    } else {
+      multiplier =
+          exp(-(powf(init_factor_, last_state_time_ms_ - *init_end_time_ms_) -
+                powf(init_factor_, time_ms - *init_end_time_ms_)) /
+              init_const_);
+    }
+  } else {
+    if (last_state_time_ms_ < *init_end_time_ms_) {
+      // The latest state update was made during initialization phase.
+      // We first extrapolate to the initialization time.
+      ExtrapolateLastSample(*init_end_time_ms_);
+      // Then extrapolate the rest by the following.
+    }
+    multiplier = powf(alpha_, time_ms - last_state_time_ms_);
+  }
+
+  state_ = multiplier * state_ + (1.0f - multiplier) * last_sample_;
+  last_state_time_ms_ = time_ms;
+}
+
+}  // namespace webrtc
+
+// Appendix: derivation of extrapolation during initialization phase.
+// (LaTeX syntax)
+// Assuming
+//   \begin{align}
+//     y(n) &= \alpha_{n-1} y(n-1) + \left(1 - \alpha_{n-1}\right) x(m) \\*
+//          &= \left(\prod_{i=m}^{n-1} \alpha_i\right) y(m) +
+//             \left(1 - \prod_{i=m}^{n-1} \alpha_i \right) x(m)
+//   \end{align}
+// Taking $\alpha_{n} = \exp(-\gamma^n)$, $\gamma$ denotes init\_factor\_, the
+// multiplier becomes
+//   \begin{align}
+//     \prod_{i=m}^{n-1} \alpha_i
+//     &= \exp\left(-\sum_{i=m}^{n-1} \gamma^i \right) \\*
+//     &= \begin{cases}
+//          \exp\left(-\frac{\gamma^m - \gamma^n}{1 - \gamma} \right)
+//          & \gamma \neq 1 \\*
+//          m-n & \gamma = 1
+//        \end{cases}
+//   \end{align}
+// We know $\gamma = T^{-\frac{1}{T}}$, where $T$ denotes init\_time\_ms\_. Then
+// $1 - \gamma$ approaches zero when $T$ increases. This can cause numerical
+// difficulties. We multiply $T$ (if $T > 0$) to both numerator and denominator
+// in the fraction. See.
+//   \begin{align}
+//     \frac{\gamma^m - \gamma^n}{1 - \gamma}
+//     &= \frac{T^\frac{T-m}{T} - T^\frac{T-n}{T}}{T - T^{1-\frac{1}{T}}}
+//   \end{align}
diff --git a/common_audio/smoothing_filter.h b/common_audio/smoothing_filter.h
new file mode 100644
index 0000000..b8ab4e5
--- /dev/null
+++ b/common_audio/smoothing_filter.h
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_SMOOTHING_FILTER_H_
+#define COMMON_AUDIO_SMOOTHING_FILTER_H_
+
+#include "api/optional.h"
+#include "rtc_base/constructormagic.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class SmoothingFilter {
+ public:
+  virtual ~SmoothingFilter() = default;
+  virtual void AddSample(float sample) = 0;
+  virtual rtc::Optional<float> GetAverage() = 0;
+  virtual bool SetTimeConstantMs(int time_constant_ms) = 0;
+};
+
+// SmoothingFilterImpl applies an exponential filter
+//   alpha = exp(-1.0 / time_constant_ms);
+//   y[t] = alpha * y[t-1] + (1 - alpha) * sample;
+// This implies a sample rate of 1000 Hz, i.e., 1 sample / ms.
+// But SmoothingFilterImpl allows sparse samples. All missing samples will be
+// assumed to equal the last received sample.
+class SmoothingFilterImpl final : public SmoothingFilter {
+ public:
+  // |init_time_ms| is initialization time. It defines a period starting from
+  // the arriving time of the first sample. During this period, the exponential
+  // filter uses a varying time constant so that a smaller time constant will be
+  // applied to the earlier samples. This is to allow the the filter to adapt to
+  // earlier samples quickly. After the initialization period, the time constant
+  // will be set to |init_time_ms| first and can be changed through
+  // |SetTimeConstantMs|.
+  explicit SmoothingFilterImpl(int init_time_ms);
+  ~SmoothingFilterImpl() override;
+
+  void AddSample(float sample) override;
+  rtc::Optional<float> GetAverage() override;
+  bool SetTimeConstantMs(int time_constant_ms) override;
+
+  // Methods used for unittests.
+  float alpha() const { return alpha_; }
+
+ private:
+  void UpdateAlpha(int time_constant_ms);
+  void ExtrapolateLastSample(int64_t time_ms);
+
+  const int init_time_ms_;
+  const float init_factor_;
+  const float init_const_;
+
+  rtc::Optional<int64_t> init_end_time_ms_;
+  float last_sample_;
+  float alpha_;
+  float state_;
+  int64_t last_state_time_ms_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(SmoothingFilterImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_SMOOTHING_FILTER_H_
diff --git a/common_audio/smoothing_filter_unittest.cc b/common_audio/smoothing_filter_unittest.cc
new file mode 100644
index 0000000..d173c9a
--- /dev/null
+++ b/common_audio/smoothing_filter_unittest.cc
@@ -0,0 +1,163 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cmath>
+#include <memory>
+
+#include "common_audio/smoothing_filter.h"
+#include "rtc_base/fakeclock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kMaxAbsError = 1e-5f;
+constexpr int64_t kClockInitialTime = 123456;
+
+struct SmoothingFilterStates {
+  explicit SmoothingFilterStates(int init_time_ms)
+      : smoothing_filter(init_time_ms) {
+    fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(kClockInitialTime));
+  }
+  rtc::ScopedFakeClock fake_clock;
+  SmoothingFilterImpl smoothing_filter;
+};
+
+// This function does the following:
+//   1. Add a sample to filter at current clock,
+//   2. Advance the clock by |advance_time_ms|,
+//   3. Get the output of both SmoothingFilter and verify that it equals to an
+//      expected value.
+void CheckOutput(SmoothingFilterStates* states,
+                 float sample,
+                 int advance_time_ms,
+                 float expected_ouput) {
+  states->smoothing_filter.AddSample(sample);
+  states->fake_clock.AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(advance_time_ms));
+  auto output = states->smoothing_filter.GetAverage();
+  EXPECT_TRUE(output);
+  EXPECT_NEAR(expected_ouput, *output, kMaxAbsError);
+}
+
+}  // namespace
+
+TEST(SmoothingFilterTest, NoOutputWhenNoSampleAdded) {
+  constexpr int kInitTimeMs = 100;
+  SmoothingFilterStates states(kInitTimeMs);
+  EXPECT_FALSE(states.smoothing_filter.GetAverage());
+}
+
+// Python script to calculate the reference values used in this test.
+//   import math
+//
+//   class ExpFilter:
+//     def add_sample(self, new_value):
+//       self.state = self.state * self.alpha + (1.0 - self.alpha) * new_value
+//
+//   filter = ExpFilter()
+//   init_time = 795
+//   init_factor = (1.0 / init_time) ** (1.0 / init_time)
+//
+//   filter.state = 1.0
+//
+//   for time_now in range(1, 500):
+//     filter.alpha = math.exp(-init_factor ** time_now)
+//     filter.add_sample(1.0)
+//   print filter.state
+//
+//   for time_now in range(500, 600):
+//     filter.alpha = math.exp(-init_factor ** time_now)
+//     filter.add_sample(0.5)
+//   print filter.state
+//
+//   for time_now in range(600, 700):
+//     filter.alpha = math.exp(-init_factor ** time_now)
+//     filter.add_sample(1.0)
+//   print filter.state
+//
+//   for time_now in range(700, init_time):
+//     filter.alpha = math.exp(-init_factor ** time_now)
+//     filter.add_sample(1.0)
+//
+//   filter.alpha = math.exp(-1.0 / init_time)
+//   for time_now in range(init_time, 800):
+//     filter.add_sample(1.0)
+//   print filter.state
+//
+//   for i in range(800, 900):
+//     filter.add_sample(0.5)
+//   print filter.state
+//
+//   for i in range(900, 1000):
+//     filter.add_sample(1.0)
+//   print filter.state
+TEST(SmoothingFilterTest, CheckBehaviorAroundInitTime) {
+  constexpr int kInitTimeMs = 795;
+  SmoothingFilterStates states(kInitTimeMs);
+  CheckOutput(&states, 1.0f, 500, 1.0f);
+  CheckOutput(&states, 0.5f, 100, 0.680562264029f);
+  CheckOutput(&states, 1.0f, 100, 0.794207139813f);
+  // Next step will go across initialization time.
+  CheckOutput(&states, 1.0f, 100, 0.829803409752f);
+  CheckOutput(&states, 0.5f, 100, 0.790821764210f);
+  CheckOutput(&states, 1.0f, 100, 0.815545922911f);
+}
+
+TEST(SmoothingFilterTest, InitTimeEqualsZero) {
+  constexpr int kInitTimeMs = 0;
+  SmoothingFilterStates states(kInitTimeMs);
+  CheckOutput(&states, 1.0f, 1, 1.0f);
+  CheckOutput(&states, 0.5f, 1, 0.5f);
+}
+
+TEST(SmoothingFilterTest, InitTimeEqualsOne) {
+  constexpr int kInitTimeMs = 1;
+  SmoothingFilterStates states(kInitTimeMs);
+  CheckOutput(&states, 1.0f, 1, 1.0f);
+  CheckOutput(&states, 0.5f, 1, 1.0f * exp(-1.0f) + (1.0f - exp(-1.0f)) * 0.5f);
+}
+
+TEST(SmoothingFilterTest, GetAverageOutputsEmptyBeforeFirstSample) {
+  constexpr int kInitTimeMs = 100;
+  SmoothingFilterStates states(kInitTimeMs);
+  EXPECT_FALSE(states.smoothing_filter.GetAverage());
+  constexpr float kFirstSample = 1.2345f;
+  states.smoothing_filter.AddSample(kFirstSample);
+  EXPECT_EQ(kFirstSample, states.smoothing_filter.GetAverage());
+}
+
+TEST(SmoothingFilterTest, CannotChangeTimeConstantDuringInitialization) {
+  constexpr int kInitTimeMs = 100;
+  SmoothingFilterStates states(kInitTimeMs);
+  states.smoothing_filter.AddSample(0.0);
+
+  // During initialization, |SetTimeConstantMs| does not take effect.
+  states.fake_clock.AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(kInitTimeMs - 1));
+  states.smoothing_filter.AddSample(0.0);
+
+  EXPECT_FALSE(states.smoothing_filter.SetTimeConstantMs(kInitTimeMs * 2));
+  EXPECT_NE(exp(-1.0f / (kInitTimeMs * 2)), states.smoothing_filter.alpha());
+
+  states.fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(1));
+  states.smoothing_filter.AddSample(0.0);
+  // When initialization finishes, the time constant should be come
+  // |kInitTimeConstantMs|.
+  EXPECT_FLOAT_EQ(exp(-1.0f / kInitTimeMs), states.smoothing_filter.alpha());
+
+  // After initialization, |SetTimeConstantMs| takes effect.
+  EXPECT_TRUE(states.smoothing_filter.SetTimeConstantMs(kInitTimeMs * 2));
+  EXPECT_FLOAT_EQ(exp(-1.0f / (kInitTimeMs * 2)),
+                  states.smoothing_filter.alpha());
+}
+
+}  // namespace webrtc
diff --git a/common_audio/sparse_fir_filter.cc b/common_audio/sparse_fir_filter.cc
new file mode 100644
index 0000000..ed2d79b
--- /dev/null
+++ b/common_audio/sparse_fir_filter.cc
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/sparse_fir_filter.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+SparseFIRFilter::SparseFIRFilter(const float* nonzero_coeffs,
+                                 size_t num_nonzero_coeffs,
+                                 size_t sparsity,
+                                 size_t offset)
+    : sparsity_(sparsity),
+      offset_(offset),
+      nonzero_coeffs_(nonzero_coeffs, nonzero_coeffs + num_nonzero_coeffs),
+      state_(sparsity_ * (num_nonzero_coeffs - 1) + offset_, 0.f) {
+  RTC_CHECK_GE(num_nonzero_coeffs, 1);
+  RTC_CHECK_GE(sparsity, 1);
+}
+
+SparseFIRFilter::~SparseFIRFilter() = default;
+
+void SparseFIRFilter::Filter(const float* in, size_t length, float* out) {
+  // Convolves the input signal |in| with the filter kernel |nonzero_coeffs_|
+  // taking into account the previous state.
+  for (size_t i = 0; i < length; ++i) {
+    out[i] = 0.f;
+    size_t j;
+    for (j = 0; i >= j * sparsity_ + offset_ &&
+                j < nonzero_coeffs_.size(); ++j) {
+      out[i] += in[i - j * sparsity_ - offset_] * nonzero_coeffs_[j];
+    }
+    for (; j < nonzero_coeffs_.size(); ++j) {
+      out[i] += state_[i + (nonzero_coeffs_.size() - j - 1) * sparsity_] *
+                nonzero_coeffs_[j];
+    }
+  }
+
+  // Update current state.
+  if (state_.size() > 0u) {
+    if (length >= state_.size()) {
+      std::memcpy(&state_[0],
+                  &in[length - state_.size()],
+                  state_.size() * sizeof(*in));
+    } else {
+      std::memmove(&state_[0],
+                   &state_[length],
+                   (state_.size() - length) * sizeof(state_[0]));
+      std::memcpy(&state_[state_.size() - length], in, length * sizeof(*in));
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/sparse_fir_filter.h b/common_audio/sparse_fir_filter.h
new file mode 100644
index 0000000..22bcdff
--- /dev/null
+++ b/common_audio/sparse_fir_filter.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_SPARSE_FIR_FILTER_H_
+#define COMMON_AUDIO_SPARSE_FIR_FILTER_H_
+
+#include <cstring>
+#include <vector>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// A Finite Impulse Response filter implementation which takes advantage of a
+// sparse structure with uniformly distributed non-zero coefficients.
+class SparseFIRFilter final {
+ public:
+  // |num_nonzero_coeffs| is the number of non-zero coefficients,
+  // |nonzero_coeffs|. They are assumed to be uniformly distributed every
+  // |sparsity| samples and with an initial |offset|. The rest of the filter
+  // coefficients will be assumed zeros. For example, with sparsity = 3, and
+  // offset = 1 the filter coefficients will be:
+  // B = [0 coeffs[0] 0 0 coeffs[1] 0 0 coeffs[2] ... ]
+  // All initial state values will be zeros.
+  SparseFIRFilter(const float* nonzero_coeffs,
+                  size_t num_nonzero_coeffs,
+                  size_t sparsity,
+                  size_t offset);
+  ~SparseFIRFilter();
+
+  // Filters the |in| data supplied.
+  // |out| must be previously allocated and it must be at least of |length|.
+  void Filter(const float* in, size_t length, float* out);
+
+ private:
+  const size_t sparsity_;
+  const size_t offset_;
+  const std::vector<float> nonzero_coeffs_;
+  std::vector<float> state_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SparseFIRFilter);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_SPARSE_FIR_FILTER_H_
diff --git a/common_audio/sparse_fir_filter_unittest.cc b/common_audio/sparse_fir_filter_unittest.cc
new file mode 100644
index 0000000..434daaa
--- /dev/null
+++ b/common_audio/sparse_fir_filter_unittest.cc
@@ -0,0 +1,232 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "common_audio/sparse_fir_filter.h"
+
+#include "common_audio/fir_filter.h"
+#include "common_audio/fir_filter_factory.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+static const float kCoeffs[] = {0.2f, 0.3f, 0.5f, 0.7f, 0.11f};
+static const float kInput[] =
+    {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f};
+
+template <size_t N>
+void VerifyOutput(const float (&expected_output)[N], const float (&output)[N]) {
+  EXPECT_EQ(0, memcmp(expected_output, output, sizeof(output)));
+}
+
+}  // namespace
+
+TEST(SparseFIRFilterTest, FilterAsIdentity) {
+  const float kCoeff = 1.f;
+  const size_t kNumCoeff = 1;
+  const size_t kSparsity = 3;
+  const size_t kOffset = 0;
+  float output[arraysize(kInput)];
+  SparseFIRFilter filter(&kCoeff, kNumCoeff, kSparsity, kOffset);
+  filter.Filter(kInput, arraysize(kInput), output);
+  VerifyOutput(kInput, output);
+}
+
+TEST(SparseFIRFilterTest, SameOutputForScalarCoefficientAndDifferentSparsity) {
+  const float kCoeff = 2.f;
+  const size_t kNumCoeff = 1;
+  const size_t kLowSparsity = 1;
+  const size_t kHighSparsity = 7;
+  const size_t kOffset = 0;
+  float low_sparsity_output[arraysize(kInput)];
+  float high_sparsity_output[arraysize(kInput)];
+  SparseFIRFilter low_sparsity_filter(&kCoeff,
+                                      kNumCoeff,
+                                      kLowSparsity,
+                                      kOffset);
+  SparseFIRFilter high_sparsity_filter(&kCoeff,
+                                       kNumCoeff,
+                                       kHighSparsity,
+                                       kOffset);
+  low_sparsity_filter.Filter(kInput, arraysize(kInput), low_sparsity_output);
+  high_sparsity_filter.Filter(kInput, arraysize(kInput), high_sparsity_output);
+  VerifyOutput(low_sparsity_output, high_sparsity_output);
+}
+
+TEST(SparseFIRFilterTest, FilterUsedAsScalarMultiplication) {
+  const float kCoeff = 5.f;
+  const size_t kNumCoeff = 1;
+  const size_t kSparsity = 5;
+  const size_t kOffset = 0;
+  float output[arraysize(kInput)];
+  SparseFIRFilter filter(&kCoeff, kNumCoeff, kSparsity, kOffset);
+  filter.Filter(kInput, arraysize(kInput), output);
+  EXPECT_FLOAT_EQ(5.f, output[0]);
+  EXPECT_FLOAT_EQ(20.f, output[3]);
+  EXPECT_FLOAT_EQ(25.f, output[4]);
+  EXPECT_FLOAT_EQ(50.f, output[arraysize(kInput) - 1]);
+}
+
+TEST(SparseFIRFilterTest, FilterUsedAsInputShifting) {
+  const float kCoeff = 1.f;
+  const size_t kNumCoeff = 1;
+  const size_t kSparsity = 1;
+  const size_t kOffset = 4;
+  float output[arraysize(kInput)];
+  SparseFIRFilter filter(&kCoeff, kNumCoeff, kSparsity, kOffset);
+  filter.Filter(kInput, arraysize(kInput), output);
+  EXPECT_FLOAT_EQ(0.f, output[0]);
+  EXPECT_FLOAT_EQ(0.f, output[3]);
+  EXPECT_FLOAT_EQ(1.f, output[4]);
+  EXPECT_FLOAT_EQ(2.f, output[5]);
+  EXPECT_FLOAT_EQ(6.f, output[arraysize(kInput) - 1]);
+}
+
+TEST(SparseFIRFilterTest, FilterUsedAsArbitraryWeighting) {
+  const size_t kSparsity = 2;
+  const size_t kOffset = 1;
+  float output[arraysize(kInput)];
+  SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
+  filter.Filter(kInput, arraysize(kInput), output);
+  EXPECT_FLOAT_EQ(0.f, output[0]);
+  EXPECT_FLOAT_EQ(0.9f, output[3]);
+  EXPECT_FLOAT_EQ(1.4f, output[4]);
+  EXPECT_FLOAT_EQ(2.4f, output[5]);
+  EXPECT_FLOAT_EQ(8.61f, output[arraysize(kInput) - 1]);
+}
+
+TEST(SparseFIRFilterTest, FilterInLengthLesserOrEqualToCoefficientsLength) {
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
+  float output[arraysize(kInput)];
+  SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
+  filter.Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(0.2f, output[0]);
+  EXPECT_FLOAT_EQ(0.7f, output[1]);
+}
+
+TEST(SparseFIRFilterTest, MultipleFilterCalls) {
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
+  float output[arraysize(kInput)];
+  SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
+  filter.Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(0.2f, output[0]);
+  EXPECT_FLOAT_EQ(0.7f, output[1]);
+  filter.Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(1.3f, output[0]);
+  EXPECT_FLOAT_EQ(2.4f, output[1]);
+  filter.Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(2.81f, output[0]);
+  EXPECT_FLOAT_EQ(2.62f, output[1]);
+  filter.Filter(kInput, 2, output);
+  EXPECT_FLOAT_EQ(2.81f, output[0]);
+  EXPECT_FLOAT_EQ(2.62f, output[1]);
+  filter.Filter(&kInput[3], 3, output);
+  EXPECT_FLOAT_EQ(3.41f, output[0]);
+  EXPECT_FLOAT_EQ(4.12f, output[1]);
+  EXPECT_FLOAT_EQ(6.21f, output[2]);
+  filter.Filter(&kInput[3], 3, output);
+  EXPECT_FLOAT_EQ(8.12f, output[0]);
+  EXPECT_FLOAT_EQ(9.14f, output[1]);
+  EXPECT_FLOAT_EQ(9.45f, output[2]);
+}
+
+TEST(SparseFIRFilterTest, VerifySampleBasedVsBlockBasedFiltering) {
+  const size_t kSparsity = 3;
+  const size_t kOffset = 1;
+  float output_block_based[arraysize(kInput)];
+  SparseFIRFilter filter_block(kCoeffs,
+                               arraysize(kCoeffs),
+                               kSparsity,
+                               kOffset);
+  filter_block.Filter(kInput, arraysize(kInput), output_block_based);
+  float output_sample_based[arraysize(kInput)];
+  SparseFIRFilter filter_sample(kCoeffs,
+                                arraysize(kCoeffs),
+                                kSparsity,
+                                kOffset);
+  for (size_t i = 0; i < arraysize(kInput); ++i)
+    filter_sample.Filter(&kInput[i], 1, &output_sample_based[i]);
+  VerifyOutput(output_block_based, output_sample_based);
+}
+
+TEST(SparseFIRFilterTest, SimpleHighPassFilter) {
+  const size_t kSparsity = 2;
+  const size_t kOffset = 2;
+  const float kHPCoeffs[] = {1.f, -1.f};
+  const float kConstantInput[] =
+      {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f};
+  float output[arraysize(kConstantInput)];
+  SparseFIRFilter filter(kHPCoeffs, arraysize(kHPCoeffs), kSparsity, kOffset);
+  filter.Filter(kConstantInput, arraysize(kConstantInput), output);
+  EXPECT_FLOAT_EQ(0.f, output[0]);
+  EXPECT_FLOAT_EQ(0.f, output[1]);
+  EXPECT_FLOAT_EQ(1.f, output[2]);
+  EXPECT_FLOAT_EQ(1.f, output[3]);
+  for (size_t i = kSparsity + kOffset; i < arraysize(kConstantInput); ++i)
+    EXPECT_FLOAT_EQ(0.f, output[i]);
+}
+
+TEST(SparseFIRFilterTest, SimpleLowPassFilter) {
+  const size_t kSparsity = 2;
+  const size_t kOffset = 2;
+  const float kLPCoeffs[] = {1.f, 1.f};
+  const float kHighFrequencyInput[] =
+      {1.f, 1.f, -1.f, -1.f, 1.f, 1.f, -1.f, -1.f, 1.f, 1.f};
+  float output[arraysize(kHighFrequencyInput)];
+  SparseFIRFilter filter(kLPCoeffs, arraysize(kLPCoeffs), kSparsity, kOffset);
+  filter.Filter(kHighFrequencyInput, arraysize(kHighFrequencyInput), output);
+  EXPECT_FLOAT_EQ(0.f, output[0]);
+  EXPECT_FLOAT_EQ(0.f, output[1]);
+  EXPECT_FLOAT_EQ(1.f, output[2]);
+  EXPECT_FLOAT_EQ(1.f, output[3]);
+  for (size_t i = kSparsity + kOffset; i < arraysize(kHighFrequencyInput); ++i)
+    EXPECT_FLOAT_EQ(0.f, output[i]);
+}
+
+TEST(SparseFIRFilterTest, SameOutputWhenSwappedCoefficientsAndInput) {
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
+  float output[arraysize(kCoeffs)];
+  float output_swapped[arraysize(kCoeffs)];
+  SparseFIRFilter filter(kCoeffs, arraysize(kCoeffs), kSparsity, kOffset);
+  // Use arraysize(kCoeffs) for in_length to get same-length outputs.
+  filter.Filter(kInput, arraysize(kCoeffs), output);
+  SparseFIRFilter filter_swapped(kInput,
+                                 arraysize(kCoeffs),
+                                 kSparsity,
+                                 kOffset);
+  filter_swapped.Filter(kCoeffs, arraysize(kCoeffs), output_swapped);
+  VerifyOutput(output, output_swapped);
+}
+
+TEST(SparseFIRFilterTest, SameOutputAsFIRFilterWhenSparsityOneAndOffsetZero) {
+  const size_t kSparsity = 1;
+  const size_t kOffset = 0;
+  float output[arraysize(kInput)];
+  float sparse_output[arraysize(kInput)];
+  std::unique_ptr<FIRFilter> filter(
+      CreateFirFilter(kCoeffs, arraysize(kCoeffs), arraysize(kInput)));
+  SparseFIRFilter sparse_filter(kCoeffs,
+                                arraysize(kCoeffs),
+                                kSparsity,
+                                kOffset);
+  filter->Filter(kInput, arraysize(kInput), output);
+  sparse_filter.Filter(kInput, arraysize(kInput), sparse_output);
+  for (size_t i = 0; i < arraysize(kInput); ++i) {
+    EXPECT_FLOAT_EQ(output[i], sparse_output[i]);
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/vad/include/vad.h b/common_audio/vad/include/vad.h
new file mode 100644
index 0000000..bd10756
--- /dev/null
+++ b/common_audio/vad/include/vad.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_VAD_INCLUDE_VAD_H_
+#define COMMON_AUDIO_VAD_INCLUDE_VAD_H_
+
+#include <memory>
+
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "rtc_base/checks.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class Vad {
+ public:
+  enum Aggressiveness {
+    kVadNormal = 0,
+    kVadLowBitrate = 1,
+    kVadAggressive = 2,
+    kVadVeryAggressive = 3
+  };
+
+  enum Activity { kPassive = 0, kActive = 1, kError = -1 };
+
+  virtual ~Vad() = default;
+
+  // Calculates a VAD decision for the given audio frame. Valid sample rates
+  // are 8000, 16000, and 32000 Hz; the number of samples must be such that the
+  // frame is 10, 20, or 30 ms long.
+  virtual Activity VoiceActivity(const int16_t* audio,
+                                 size_t num_samples,
+                                 int sample_rate_hz) = 0;
+
+  // Resets VAD state.
+  virtual void Reset() = 0;
+};
+
+// Returns a Vad instance that's implemented on top of WebRtcVad.
+std::unique_ptr<Vad> CreateVad(Vad::Aggressiveness aggressiveness);
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_VAD_INCLUDE_VAD_H_
diff --git a/common_audio/vad/include/webrtc_vad.h b/common_audio/vad/include/webrtc_vad.h
new file mode 100644
index 0000000..7d71b9b
--- /dev/null
+++ b/common_audio/vad/include/webrtc_vad.h
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This header file includes the VAD API calls. Specific function calls are given below.
+ */
+
+#ifndef COMMON_AUDIO_VAD_INCLUDE_WEBRTC_VAD_H_  // NOLINT
+#define COMMON_AUDIO_VAD_INCLUDE_WEBRTC_VAD_H_
+
+#include <stddef.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct WebRtcVadInst VadInst;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Creates an instance to the VAD structure.
+VadInst* WebRtcVad_Create();
+
+// Frees the dynamic memory of a specified VAD instance.
+//
+// - handle [i] : Pointer to VAD instance that should be freed.
+void WebRtcVad_Free(VadInst* handle);
+
+// Initializes a VAD instance.
+//
+// - handle [i/o] : Instance that should be initialized.
+//
+// returns        : 0 - (OK),
+//                 -1 - (null pointer or Default mode could not be set).
+int WebRtcVad_Init(VadInst* handle);
+
+// Sets the VAD operating mode. A more aggressive (higher mode) VAD is more
+// restrictive in reporting speech. Put in other words the probability of being
+// speech when the VAD returns 1 is increased with increasing mode. As a
+// consequence also the missed detection rate goes up.
+//
+// - handle [i/o] : VAD instance.
+// - mode   [i]   : Aggressiveness mode (0, 1, 2, or 3).
+//
+// returns        : 0 - (OK),
+//                 -1 - (null pointer, mode could not be set or the VAD instance
+//                       has not been initialized).
+int WebRtcVad_set_mode(VadInst* handle, int mode);
+
+// Calculates a VAD decision for the |audio_frame|. For valid sampling rates
+// frame lengths, see the description of WebRtcVad_ValidRatesAndFrameLengths().
+//
+// - handle       [i/o] : VAD Instance. Needs to be initialized by
+//                        WebRtcVad_Init() before call.
+// - fs           [i]   : Sampling frequency (Hz): 8000, 16000, or 32000
+// - audio_frame  [i]   : Audio frame buffer.
+// - frame_length [i]   : Length of audio frame buffer in number of samples.
+//
+// returns              : 1 - (Active Voice),
+//                        0 - (Non-active Voice),
+//                       -1 - (Error)
+int WebRtcVad_Process(VadInst* handle, int fs, const int16_t* audio_frame,
+                      size_t frame_length);
+
+// Checks for valid combinations of |rate| and |frame_length|. We support 10,
+// 20 and 30 ms frames and the rates 8000, 16000 and 32000 Hz.
+//
+// - rate         [i] : Sampling frequency (Hz).
+// - frame_length [i] : Speech frame buffer length in number of samples.
+//
+// returns            : 0 - (valid combination), -1 - (invalid combination)
+int WebRtcVad_ValidRateAndFrameLength(int rate, size_t frame_length);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // COMMON_AUDIO_VAD_INCLUDE_WEBRTC_VAD_H_  // NOLINT
diff --git a/common_audio/vad/mock/mock_vad.h b/common_audio/vad/mock/mock_vad.h
new file mode 100644
index 0000000..afe80ef
--- /dev/null
+++ b/common_audio/vad/mock/mock_vad.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_VAD_MOCK_MOCK_VAD_H_
+#define COMMON_AUDIO_VAD_MOCK_MOCK_VAD_H_
+
+#include "common_audio/vad/include/vad.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockVad : public Vad {
+ public:
+  virtual ~MockVad() { Die(); }
+  MOCK_METHOD0(Die, void());
+
+  MOCK_METHOD3(VoiceActivity,
+               enum Activity(const int16_t* audio,
+                             size_t num_samples,
+                             int sample_rate_hz));
+  MOCK_METHOD0(Reset, void());
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_VAD_MOCK_MOCK_VAD_H_
diff --git a/common_audio/vad/vad.cc b/common_audio/vad/vad.cc
new file mode 100644
index 0000000..1cb332a
--- /dev/null
+++ b/common_audio/vad/vad.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/include/vad.h"
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+class VadImpl final : public Vad {
+ public:
+  explicit VadImpl(Aggressiveness aggressiveness)
+      : handle_(nullptr), aggressiveness_(aggressiveness) {
+    Reset();
+  }
+
+  ~VadImpl() override { WebRtcVad_Free(handle_); }
+
+  Activity VoiceActivity(const int16_t* audio,
+                         size_t num_samples,
+                         int sample_rate_hz) override {
+    int ret = WebRtcVad_Process(handle_, sample_rate_hz, audio, num_samples);
+    switch (ret) {
+      case 0:
+        return kPassive;
+      case 1:
+        return kActive;
+      default:
+        RTC_NOTREACHED() << "WebRtcVad_Process returned an error.";
+        return kError;
+    }
+  }
+
+  void Reset() override {
+    if (handle_)
+      WebRtcVad_Free(handle_);
+    handle_ = WebRtcVad_Create();
+    RTC_CHECK(handle_);
+    RTC_CHECK_EQ(WebRtcVad_Init(handle_), 0);
+    RTC_CHECK_EQ(WebRtcVad_set_mode(handle_, aggressiveness_), 0);
+  }
+
+ private:
+  VadInst* handle_;
+  Aggressiveness aggressiveness_;
+};
+
+}  // namespace
+
+std::unique_ptr<Vad> CreateVad(Vad::Aggressiveness aggressiveness) {
+  return std::unique_ptr<Vad>(new VadImpl(aggressiveness));
+}
+
+}  // namespace webrtc
diff --git a/common_audio/vad/vad_core.c b/common_audio/vad/vad_core.c
new file mode 100644
index 0000000..7316b45
--- /dev/null
+++ b/common_audio/vad/vad_core.c
@@ -0,0 +1,686 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/vad_core.h"
+
+#include "rtc_base/sanitizer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/vad/vad_filterbank.h"
+#include "common_audio/vad/vad_gmm.h"
+#include "common_audio/vad/vad_sp.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Spectrum Weighting
+static const int16_t kSpectrumWeight[kNumChannels] = { 6, 8, 10, 12, 14, 16 };
+static const int16_t kNoiseUpdateConst = 655; // Q15
+static const int16_t kSpeechUpdateConst = 6554; // Q15
+static const int16_t kBackEta = 154; // Q8
+// Minimum difference between the two models, Q5
+static const int16_t kMinimumDifference[kNumChannels] = {
+    544, 544, 576, 576, 576, 576 };
+// Upper limit of mean value for speech model, Q7
+static const int16_t kMaximumSpeech[kNumChannels] = {
+    11392, 11392, 11520, 11520, 11520, 11520 };
+// Minimum value for mean value
+static const int16_t kMinimumMean[kNumGaussians] = { 640, 768 };
+// Upper limit of mean value for noise model, Q7
+static const int16_t kMaximumNoise[kNumChannels] = {
+    9216, 9088, 8960, 8832, 8704, 8576 };
+// Start values for the Gaussian models, Q7
+// Weights for the two Gaussians for the six channels (noise)
+static const int16_t kNoiseDataWeights[kTableSize] = {
+    34, 62, 72, 66, 53, 25, 94, 66, 56, 62, 75, 103 };
+// Weights for the two Gaussians for the six channels (speech)
+static const int16_t kSpeechDataWeights[kTableSize] = {
+    48, 82, 45, 87, 50, 47, 80, 46, 83, 41, 78, 81 };
+// Means for the two Gaussians for the six channels (noise)
+static const int16_t kNoiseDataMeans[kTableSize] = {
+    6738, 4892, 7065, 6715, 6771, 3369, 7646, 3863, 7820, 7266, 5020, 4362 };
+// Means for the two Gaussians for the six channels (speech)
+static const int16_t kSpeechDataMeans[kTableSize] = {
+    8306, 10085, 10078, 11823, 11843, 6309, 9473, 9571, 10879, 7581, 8180, 7483
+};
+// Stds for the two Gaussians for the six channels (noise)
+static const int16_t kNoiseDataStds[kTableSize] = {
+    378, 1064, 493, 582, 688, 593, 474, 697, 475, 688, 421, 455 };
+// Stds for the two Gaussians for the six channels (speech)
+static const int16_t kSpeechDataStds[kTableSize] = {
+    555, 505, 567, 524, 585, 1231, 509, 828, 492, 1540, 1079, 850 };
+
+// Constants used in GmmProbability().
+//
+// Maximum number of counted speech (VAD = 1) frames in a row.
+static const int16_t kMaxSpeechFrames = 6;
+// Minimum standard deviation for both speech and noise.
+static const int16_t kMinStd = 384;
+
+// Constants in WebRtcVad_InitCore().
+// Default aggressiveness mode.
+static const short kDefaultMode = 0;
+static const int kInitCheck = 42;
+
+// Constants used in WebRtcVad_set_mode_core().
+//
+// Thresholds for different frame lengths (10 ms, 20 ms and 30 ms).
+//
+// Mode 0, Quality.
+static const int16_t kOverHangMax1Q[3] = { 8, 4, 3 };
+static const int16_t kOverHangMax2Q[3] = { 14, 7, 5 };
+static const int16_t kLocalThresholdQ[3] = { 24, 21, 24 };
+static const int16_t kGlobalThresholdQ[3] = { 57, 48, 57 };
+// Mode 1, Low bitrate.
+static const int16_t kOverHangMax1LBR[3] = { 8, 4, 3 };
+static const int16_t kOverHangMax2LBR[3] = { 14, 7, 5 };
+static const int16_t kLocalThresholdLBR[3] = { 37, 32, 37 };
+static const int16_t kGlobalThresholdLBR[3] = { 100, 80, 100 };
+// Mode 2, Aggressive.
+static const int16_t kOverHangMax1AGG[3] = { 6, 3, 2 };
+static const int16_t kOverHangMax2AGG[3] = { 9, 5, 3 };
+static const int16_t kLocalThresholdAGG[3] = { 82, 78, 82 };
+static const int16_t kGlobalThresholdAGG[3] = { 285, 260, 285 };
+// Mode 3, Very aggressive.
+static const int16_t kOverHangMax1VAG[3] = { 6, 3, 2 };
+static const int16_t kOverHangMax2VAG[3] = { 9, 5, 3 };
+static const int16_t kLocalThresholdVAG[3] = { 94, 94, 94 };
+static const int16_t kGlobalThresholdVAG[3] = { 1100, 1050, 1100 };
+
+// Calculates the weighted average w.r.t. number of Gaussians. The |data| are
+// updated with an |offset| before averaging.
+//
+// - data     [i/o] : Data to average.
+// - offset   [i]   : An offset added to |data|.
+// - weights  [i]   : Weights used for averaging.
+//
+// returns          : The weighted average.
+static int32_t WeightedAverage(int16_t* data, int16_t offset,
+                               const int16_t* weights) {
+  int k;
+  int32_t weighted_average = 0;
+
+  for (k = 0; k < kNumGaussians; k++) {
+    data[k * kNumChannels] += offset;
+    weighted_average += data[k * kNumChannels] * weights[k * kNumChannels];
+  }
+  return weighted_average;
+}
+
+// An s16 x s32 -> s32 multiplication that's allowed to overflow. (It's still
+// undefined behavior, so not a good idea; this just makes UBSan ignore the
+// violation, so that our old code can continue to do what it's always been
+// doing.)
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+    OverflowingMulS16ByS32ToS32(int16_t a, int32_t b) {
+  return a * b;
+}
+
+// Calculates the probabilities for both speech and background noise using
+// Gaussian Mixture Models (GMM). A hypothesis-test is performed to decide which
+// type of signal is most probable.
+//
+// - self           [i/o] : Pointer to VAD instance
+// - features       [i]   : Feature vector of length |kNumChannels|
+//                          = log10(energy in frequency band)
+// - total_power    [i]   : Total power in audio frame.
+// - frame_length   [i]   : Number of input samples
+//
+// - returns              : the VAD decision (0 - noise, 1 - speech).
+static int16_t GmmProbability(VadInstT* self, int16_t* features,
+                              int16_t total_power, size_t frame_length) {
+  int channel, k;
+  int16_t feature_minimum;
+  int16_t h0, h1;
+  int16_t log_likelihood_ratio;
+  int16_t vadflag = 0;
+  int16_t shifts_h0, shifts_h1;
+  int16_t tmp_s16, tmp1_s16, tmp2_s16;
+  int16_t diff;
+  int gaussian;
+  int16_t nmk, nmk2, nmk3, smk, smk2, nsk, ssk;
+  int16_t delt, ndelt;
+  int16_t maxspe, maxmu;
+  int16_t deltaN[kTableSize], deltaS[kTableSize];
+  int16_t ngprvec[kTableSize] = { 0 };  // Conditional probability = 0.
+  int16_t sgprvec[kTableSize] = { 0 };  // Conditional probability = 0.
+  int32_t h0_test, h1_test;
+  int32_t tmp1_s32, tmp2_s32;
+  int32_t sum_log_likelihood_ratios = 0;
+  int32_t noise_global_mean, speech_global_mean;
+  int32_t noise_probability[kNumGaussians], speech_probability[kNumGaussians];
+  int16_t overhead1, overhead2, individualTest, totalTest;
+
+  // Set various thresholds based on frame lengths (80, 160 or 240 samples).
+  if (frame_length == 80) {
+    overhead1 = self->over_hang_max_1[0];
+    overhead2 = self->over_hang_max_2[0];
+    individualTest = self->individual[0];
+    totalTest = self->total[0];
+  } else if (frame_length == 160) {
+    overhead1 = self->over_hang_max_1[1];
+    overhead2 = self->over_hang_max_2[1];
+    individualTest = self->individual[1];
+    totalTest = self->total[1];
+  } else {
+    overhead1 = self->over_hang_max_1[2];
+    overhead2 = self->over_hang_max_2[2];
+    individualTest = self->individual[2];
+    totalTest = self->total[2];
+  }
+
+  if (total_power > kMinEnergy) {
+    // The signal power of current frame is large enough for processing. The
+    // processing consists of two parts:
+    // 1) Calculating the likelihood of speech and thereby a VAD decision.
+    // 2) Updating the underlying model, w.r.t., the decision made.
+
+    // The detection scheme is an LRT with hypothesis
+    // H0: Noise
+    // H1: Speech
+    //
+    // We combine a global LRT with local tests, for each frequency sub-band,
+    // here defined as |channel|.
+    for (channel = 0; channel < kNumChannels; channel++) {
+      // For each channel we model the probability with a GMM consisting of
+      // |kNumGaussians|, with different means and standard deviations depending
+      // on H0 or H1.
+      h0_test = 0;
+      h1_test = 0;
+      for (k = 0; k < kNumGaussians; k++) {
+        gaussian = channel + k * kNumChannels;
+        // Probability under H0, that is, probability of frame being noise.
+        // Value given in Q27 = Q7 * Q20.
+        tmp1_s32 = WebRtcVad_GaussianProbability(features[channel],
+                                                 self->noise_means[gaussian],
+                                                 self->noise_stds[gaussian],
+                                                 &deltaN[gaussian]);
+        noise_probability[k] = kNoiseDataWeights[gaussian] * tmp1_s32;
+        h0_test += noise_probability[k];  // Q27
+
+        // Probability under H1, that is, probability of frame being speech.
+        // Value given in Q27 = Q7 * Q20.
+        tmp1_s32 = WebRtcVad_GaussianProbability(features[channel],
+                                                 self->speech_means[gaussian],
+                                                 self->speech_stds[gaussian],
+                                                 &deltaS[gaussian]);
+        speech_probability[k] = kSpeechDataWeights[gaussian] * tmp1_s32;
+        h1_test += speech_probability[k];  // Q27
+      }
+
+      // Calculate the log likelihood ratio: log2(Pr{X|H1} / Pr{X|H1}).
+      // Approximation:
+      // log2(Pr{X|H1} / Pr{X|H1}) = log2(Pr{X|H1}*2^Q) - log2(Pr{X|H1}*2^Q)
+      //                           = log2(h1_test) - log2(h0_test)
+      //                           = log2(2^(31-shifts_h1)*(1+b1))
+      //                             - log2(2^(31-shifts_h0)*(1+b0))
+      //                           = shifts_h0 - shifts_h1
+      //                             + log2(1+b1) - log2(1+b0)
+      //                          ~= shifts_h0 - shifts_h1
+      //
+      // Note that b0 and b1 are values less than 1, hence, 0 <= log2(1+b0) < 1.
+      // Further, b0 and b1 are independent and on the average the two terms
+      // cancel.
+      shifts_h0 = WebRtcSpl_NormW32(h0_test);
+      shifts_h1 = WebRtcSpl_NormW32(h1_test);
+      if (h0_test == 0) {
+        shifts_h0 = 31;
+      }
+      if (h1_test == 0) {
+        shifts_h1 = 31;
+      }
+      log_likelihood_ratio = shifts_h0 - shifts_h1;
+
+      // Update |sum_log_likelihood_ratios| with spectrum weighting. This is
+      // used for the global VAD decision.
+      sum_log_likelihood_ratios +=
+          (int32_t) (log_likelihood_ratio * kSpectrumWeight[channel]);
+
+      // Local VAD decision.
+      if ((log_likelihood_ratio * 4) > individualTest) {
+        vadflag = 1;
+      }
+
+      // TODO(bjornv): The conditional probabilities below are applied on the
+      // hard coded number of Gaussians set to two. Find a way to generalize.
+      // Calculate local noise probabilities used later when updating the GMM.
+      h0 = (int16_t) (h0_test >> 12);  // Q15
+      if (h0 > 0) {
+        // High probability of noise. Assign conditional probabilities for each
+        // Gaussian in the GMM.
+        tmp1_s32 = (noise_probability[0] & 0xFFFFF000) << 2;  // Q29
+        ngprvec[channel] = (int16_t) WebRtcSpl_DivW32W16(tmp1_s32, h0);  // Q14
+        ngprvec[channel + kNumChannels] = 16384 - ngprvec[channel];
+      } else {
+        // Low noise probability. Assign conditional probability 1 to the first
+        // Gaussian and 0 to the rest (which is already set at initialization).
+        ngprvec[channel] = 16384;
+      }
+
+      // Calculate local speech probabilities used later when updating the GMM.
+      h1 = (int16_t) (h1_test >> 12);  // Q15
+      if (h1 > 0) {
+        // High probability of speech. Assign conditional probabilities for each
+        // Gaussian in the GMM. Otherwise use the initialized values, i.e., 0.
+        tmp1_s32 = (speech_probability[0] & 0xFFFFF000) << 2;  // Q29
+        sgprvec[channel] = (int16_t) WebRtcSpl_DivW32W16(tmp1_s32, h1);  // Q14
+        sgprvec[channel + kNumChannels] = 16384 - sgprvec[channel];
+      }
+    }
+
+    // Make a global VAD decision.
+    vadflag |= (sum_log_likelihood_ratios >= totalTest);
+
+    // Update the model parameters.
+    maxspe = 12800;
+    for (channel = 0; channel < kNumChannels; channel++) {
+
+      // Get minimum value in past which is used for long term correction in Q4.
+      feature_minimum = WebRtcVad_FindMinimum(self, features[channel], channel);
+
+      // Compute the "global" mean, that is the sum of the two means weighted.
+      noise_global_mean = WeightedAverage(&self->noise_means[channel], 0,
+                                          &kNoiseDataWeights[channel]);
+      tmp1_s16 = (int16_t) (noise_global_mean >> 6);  // Q8
+
+      for (k = 0; k < kNumGaussians; k++) {
+        gaussian = channel + k * kNumChannels;
+
+        nmk = self->noise_means[gaussian];
+        smk = self->speech_means[gaussian];
+        nsk = self->noise_stds[gaussian];
+        ssk = self->speech_stds[gaussian];
+
+        // Update noise mean vector if the frame consists of noise only.
+        nmk2 = nmk;
+        if (!vadflag) {
+          // deltaN = (x-mu)/sigma^2
+          // ngprvec[k] = |noise_probability[k]| /
+          //   (|noise_probability[0]| + |noise_probability[1]|)
+
+          // (Q14 * Q11 >> 11) = Q14.
+          delt = (int16_t)((ngprvec[gaussian] * deltaN[gaussian]) >> 11);
+          // Q7 + (Q14 * Q15 >> 22) = Q7.
+          nmk2 = nmk + (int16_t)((delt * kNoiseUpdateConst) >> 22);
+        }
+
+        // Long term correction of the noise mean.
+        // Q8 - Q8 = Q8.
+        ndelt = (feature_minimum << 4) - tmp1_s16;
+        // Q7 + (Q8 * Q8) >> 9 = Q7.
+        nmk3 = nmk2 + (int16_t)((ndelt * kBackEta) >> 9);
+
+        // Control that the noise mean does not drift to much.
+        tmp_s16 = (int16_t) ((k + 5) << 7);
+        if (nmk3 < tmp_s16) {
+          nmk3 = tmp_s16;
+        }
+        tmp_s16 = (int16_t) ((72 + k - channel) << 7);
+        if (nmk3 > tmp_s16) {
+          nmk3 = tmp_s16;
+        }
+        self->noise_means[gaussian] = nmk3;
+
+        if (vadflag) {
+          // Update speech mean vector:
+          // |deltaS| = (x-mu)/sigma^2
+          // sgprvec[k] = |speech_probability[k]| /
+          //   (|speech_probability[0]| + |speech_probability[1]|)
+
+          // (Q14 * Q11) >> 11 = Q14.
+          delt = (int16_t)((sgprvec[gaussian] * deltaS[gaussian]) >> 11);
+          // Q14 * Q15 >> 21 = Q8.
+          tmp_s16 = (int16_t)((delt * kSpeechUpdateConst) >> 21);
+          // Q7 + (Q8 >> 1) = Q7. With rounding.
+          smk2 = smk + ((tmp_s16 + 1) >> 1);
+
+          // Control that the speech mean does not drift to much.
+          maxmu = maxspe + 640;
+          if (smk2 < kMinimumMean[k]) {
+            smk2 = kMinimumMean[k];
+          }
+          if (smk2 > maxmu) {
+            smk2 = maxmu;
+          }
+          self->speech_means[gaussian] = smk2;  // Q7.
+
+          // (Q7 >> 3) = Q4. With rounding.
+          tmp_s16 = ((smk + 4) >> 3);
+
+          tmp_s16 = features[channel] - tmp_s16;  // Q4
+          // (Q11 * Q4 >> 3) = Q12.
+          tmp1_s32 = (deltaS[gaussian] * tmp_s16) >> 3;
+          tmp2_s32 = tmp1_s32 - 4096;
+          tmp_s16 = sgprvec[gaussian] >> 2;
+          // (Q14 >> 2) * Q12 = Q24.
+          tmp1_s32 = tmp_s16 * tmp2_s32;
+
+          tmp2_s32 = tmp1_s32 >> 4;  // Q20
+
+          // 0.1 * Q20 / Q7 = Q13.
+          if (tmp2_s32 > 0) {
+            tmp_s16 = (int16_t) WebRtcSpl_DivW32W16(tmp2_s32, ssk * 10);
+          } else {
+            tmp_s16 = (int16_t) WebRtcSpl_DivW32W16(-tmp2_s32, ssk * 10);
+            tmp_s16 = -tmp_s16;
+          }
+          // Divide by 4 giving an update factor of 0.025 (= 0.1 / 4).
+          // Note that division by 4 equals shift by 2, hence,
+          // (Q13 >> 8) = (Q13 >> 6) / 4 = Q7.
+          tmp_s16 += 128;  // Rounding.
+          ssk += (tmp_s16 >> 8);
+          if (ssk < kMinStd) {
+            ssk = kMinStd;
+          }
+          self->speech_stds[gaussian] = ssk;
+        } else {
+          // Update GMM variance vectors.
+          // deltaN * (features[channel] - nmk) - 1
+          // Q4 - (Q7 >> 3) = Q4.
+          tmp_s16 = features[channel] - (nmk >> 3);
+          // (Q11 * Q4 >> 3) = Q12.
+          tmp1_s32 = (deltaN[gaussian] * tmp_s16) >> 3;
+          tmp1_s32 -= 4096;
+
+          // (Q14 >> 2) * Q12 = Q24.
+          tmp_s16 = (ngprvec[gaussian] + 2) >> 2;
+          tmp2_s32 = OverflowingMulS16ByS32ToS32(tmp_s16, tmp1_s32);
+          // Q20  * approx 0.001 (2^-10=0.0009766), hence,
+          // (Q24 >> 14) = (Q24 >> 4) / 2^10 = Q20.
+          tmp1_s32 = tmp2_s32 >> 14;
+
+          // Q20 / Q7 = Q13.
+          if (tmp1_s32 > 0) {
+            tmp_s16 = (int16_t) WebRtcSpl_DivW32W16(tmp1_s32, nsk);
+          } else {
+            tmp_s16 = (int16_t) WebRtcSpl_DivW32W16(-tmp1_s32, nsk);
+            tmp_s16 = -tmp_s16;
+          }
+          tmp_s16 += 32;  // Rounding
+          nsk += tmp_s16 >> 6;  // Q13 >> 6 = Q7.
+          if (nsk < kMinStd) {
+            nsk = kMinStd;
+          }
+          self->noise_stds[gaussian] = nsk;
+        }
+      }
+
+      // Separate models if they are too close.
+      // |noise_global_mean| in Q14 (= Q7 * Q7).
+      noise_global_mean = WeightedAverage(&self->noise_means[channel], 0,
+                                          &kNoiseDataWeights[channel]);
+
+      // |speech_global_mean| in Q14 (= Q7 * Q7).
+      speech_global_mean = WeightedAverage(&self->speech_means[channel], 0,
+                                           &kSpeechDataWeights[channel]);
+
+      // |diff| = "global" speech mean - "global" noise mean.
+      // (Q14 >> 9) - (Q14 >> 9) = Q5.
+      diff = (int16_t) (speech_global_mean >> 9) -
+          (int16_t) (noise_global_mean >> 9);
+      if (diff < kMinimumDifference[channel]) {
+        tmp_s16 = kMinimumDifference[channel] - diff;
+
+        // |tmp1_s16| = ~0.8 * (kMinimumDifference - diff) in Q7.
+        // |tmp2_s16| = ~0.2 * (kMinimumDifference - diff) in Q7.
+        tmp1_s16 = (int16_t)((13 * tmp_s16) >> 2);
+        tmp2_s16 = (int16_t)((3 * tmp_s16) >> 2);
+
+        // Move Gaussian means for speech model by |tmp1_s16| and update
+        // |speech_global_mean|. Note that |self->speech_means[channel]| is
+        // changed after the call.
+        speech_global_mean = WeightedAverage(&self->speech_means[channel],
+                                             tmp1_s16,
+                                             &kSpeechDataWeights[channel]);
+
+        // Move Gaussian means for noise model by -|tmp2_s16| and update
+        // |noise_global_mean|. Note that |self->noise_means[channel]| is
+        // changed after the call.
+        noise_global_mean = WeightedAverage(&self->noise_means[channel],
+                                            -tmp2_s16,
+                                            &kNoiseDataWeights[channel]);
+      }
+
+      // Control that the speech & noise means do not drift to much.
+      maxspe = kMaximumSpeech[channel];
+      tmp2_s16 = (int16_t) (speech_global_mean >> 7);
+      if (tmp2_s16 > maxspe) {
+        // Upper limit of speech model.
+        tmp2_s16 -= maxspe;
+
+        for (k = 0; k < kNumGaussians; k++) {
+          self->speech_means[channel + k * kNumChannels] -= tmp2_s16;
+        }
+      }
+
+      tmp2_s16 = (int16_t) (noise_global_mean >> 7);
+      if (tmp2_s16 > kMaximumNoise[channel]) {
+        tmp2_s16 -= kMaximumNoise[channel];
+
+        for (k = 0; k < kNumGaussians; k++) {
+          self->noise_means[channel + k * kNumChannels] -= tmp2_s16;
+        }
+      }
+    }
+    self->frame_counter++;
+  }
+
+  // Smooth with respect to transition hysteresis.
+  if (!vadflag) {
+    if (self->over_hang > 0) {
+      vadflag = 2 + self->over_hang;
+      self->over_hang--;
+    }
+    self->num_of_speech = 0;
+  } else {
+    self->num_of_speech++;
+    if (self->num_of_speech > kMaxSpeechFrames) {
+      self->num_of_speech = kMaxSpeechFrames;
+      self->over_hang = overhead2;
+    } else {
+      self->over_hang = overhead1;
+    }
+  }
+  return vadflag;
+}
+
+// Initialize the VAD. Set aggressiveness mode to default value.
+int WebRtcVad_InitCore(VadInstT* self) {
+  int i;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  // Initialization of general struct variables.
+  self->vad = 1;  // Speech active (=1).
+  self->frame_counter = 0;
+  self->over_hang = 0;
+  self->num_of_speech = 0;
+
+  // Initialization of downsampling filter state.
+  memset(self->downsampling_filter_states, 0,
+         sizeof(self->downsampling_filter_states));
+
+  // Initialization of 48 to 8 kHz downsampling.
+  WebRtcSpl_ResetResample48khzTo8khz(&self->state_48_to_8);
+
+  // Read initial PDF parameters.
+  for (i = 0; i < kTableSize; i++) {
+    self->noise_means[i] = kNoiseDataMeans[i];
+    self->speech_means[i] = kSpeechDataMeans[i];
+    self->noise_stds[i] = kNoiseDataStds[i];
+    self->speech_stds[i] = kSpeechDataStds[i];
+  }
+
+  // Initialize Index and Minimum value vectors.
+  for (i = 0; i < 16 * kNumChannels; i++) {
+    self->low_value_vector[i] = 10000;
+    self->index_vector[i] = 0;
+  }
+
+  // Initialize splitting filter states.
+  memset(self->upper_state, 0, sizeof(self->upper_state));
+  memset(self->lower_state, 0, sizeof(self->lower_state));
+
+  // Initialize high pass filter states.
+  memset(self->hp_filter_state, 0, sizeof(self->hp_filter_state));
+
+  // Initialize mean value memory, for WebRtcVad_FindMinimum().
+  for (i = 0; i < kNumChannels; i++) {
+    self->mean_value[i] = 1600;
+  }
+
+  // Set aggressiveness mode to default (=|kDefaultMode|).
+  if (WebRtcVad_set_mode_core(self, kDefaultMode) != 0) {
+    return -1;
+  }
+
+  self->init_flag = kInitCheck;
+
+  return 0;
+}
+
+// Set aggressiveness mode
+int WebRtcVad_set_mode_core(VadInstT* self, int mode) {
+  int return_value = 0;
+
+  switch (mode) {
+    case 0:
+      // Quality mode.
+      memcpy(self->over_hang_max_1, kOverHangMax1Q,
+             sizeof(self->over_hang_max_1));
+      memcpy(self->over_hang_max_2, kOverHangMax2Q,
+             sizeof(self->over_hang_max_2));
+      memcpy(self->individual, kLocalThresholdQ,
+             sizeof(self->individual));
+      memcpy(self->total, kGlobalThresholdQ,
+             sizeof(self->total));
+      break;
+    case 1:
+      // Low bitrate mode.
+      memcpy(self->over_hang_max_1, kOverHangMax1LBR,
+             sizeof(self->over_hang_max_1));
+      memcpy(self->over_hang_max_2, kOverHangMax2LBR,
+             sizeof(self->over_hang_max_2));
+      memcpy(self->individual, kLocalThresholdLBR,
+             sizeof(self->individual));
+      memcpy(self->total, kGlobalThresholdLBR,
+             sizeof(self->total));
+      break;
+    case 2:
+      // Aggressive mode.
+      memcpy(self->over_hang_max_1, kOverHangMax1AGG,
+             sizeof(self->over_hang_max_1));
+      memcpy(self->over_hang_max_2, kOverHangMax2AGG,
+             sizeof(self->over_hang_max_2));
+      memcpy(self->individual, kLocalThresholdAGG,
+             sizeof(self->individual));
+      memcpy(self->total, kGlobalThresholdAGG,
+             sizeof(self->total));
+      break;
+    case 3:
+      // Very aggressive mode.
+      memcpy(self->over_hang_max_1, kOverHangMax1VAG,
+             sizeof(self->over_hang_max_1));
+      memcpy(self->over_hang_max_2, kOverHangMax2VAG,
+             sizeof(self->over_hang_max_2));
+      memcpy(self->individual, kLocalThresholdVAG,
+             sizeof(self->individual));
+      memcpy(self->total, kGlobalThresholdVAG,
+             sizeof(self->total));
+      break;
+    default:
+      return_value = -1;
+      break;
+  }
+
+  return return_value;
+}
+
+// Calculate VAD decision by first extracting feature values and then calculate
+// probability for both speech and background noise.
+
+int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
+                           size_t frame_length) {
+  int vad;
+  size_t i;
+  int16_t speech_nb[240];  // 30 ms in 8 kHz.
+  // |tmp_mem| is a temporary memory used by resample function, length is
+  // frame length in 10 ms (480 samples) + 256 extra.
+  int32_t tmp_mem[480 + 256] = { 0 };
+  const size_t kFrameLen10ms48khz = 480;
+  const size_t kFrameLen10ms8khz = 80;
+  size_t num_10ms_frames = frame_length / kFrameLen10ms48khz;
+
+  for (i = 0; i < num_10ms_frames; i++) {
+    WebRtcSpl_Resample48khzTo8khz(speech_frame,
+                                  &speech_nb[i * kFrameLen10ms8khz],
+                                  &inst->state_48_to_8,
+                                  tmp_mem);
+  }
+
+  // Do VAD on an 8 kHz signal
+  vad = WebRtcVad_CalcVad8khz(inst, speech_nb, frame_length / 6);
+
+  return vad;
+}
+
+int WebRtcVad_CalcVad32khz(VadInstT* inst, const int16_t* speech_frame,
+                           size_t frame_length)
+{
+    size_t len;
+    int vad;
+    int16_t speechWB[480]; // Downsampled speech frame: 960 samples (30ms in SWB)
+    int16_t speechNB[240]; // Downsampled speech frame: 480 samples (30ms in WB)
+
+
+    // Downsample signal 32->16->8 before doing VAD
+    WebRtcVad_Downsampling(speech_frame, speechWB, &(inst->downsampling_filter_states[2]),
+                           frame_length);
+    len = frame_length / 2;
+
+    WebRtcVad_Downsampling(speechWB, speechNB, inst->downsampling_filter_states, len);
+    len /= 2;
+
+    // Do VAD on an 8 kHz signal
+    vad = WebRtcVad_CalcVad8khz(inst, speechNB, len);
+
+    return vad;
+}
+
+int WebRtcVad_CalcVad16khz(VadInstT* inst, const int16_t* speech_frame,
+                           size_t frame_length)
+{
+    size_t len;
+    int vad;
+    int16_t speechNB[240]; // Downsampled speech frame: 480 samples (30ms in WB)
+
+    // Wideband: Downsample signal before doing VAD
+    WebRtcVad_Downsampling(speech_frame, speechNB, inst->downsampling_filter_states,
+                           frame_length);
+
+    len = frame_length / 2;
+    vad = WebRtcVad_CalcVad8khz(inst, speechNB, len);
+
+    return vad;
+}
+
+int WebRtcVad_CalcVad8khz(VadInstT* inst, const int16_t* speech_frame,
+                          size_t frame_length)
+{
+    int16_t feature_vector[kNumChannels], total_power;
+
+    // Get power in the bands
+    total_power = WebRtcVad_CalculateFeatures(inst, speech_frame, frame_length,
+                                              feature_vector);
+
+    // Make a VAD
+    inst->vad = GmmProbability(inst, feature_vector, total_power, frame_length);
+
+    return inst->vad;
+}
diff --git a/common_audio/vad/vad_core.h b/common_audio/vad/vad_core.h
new file mode 100644
index 0000000..6541819
--- /dev/null
+++ b/common_audio/vad/vad_core.h
@@ -0,0 +1,112 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+/*
+ * This header file includes the descriptions of the core VAD calls.
+ */
+
+#ifndef COMMON_AUDIO_VAD_VAD_CORE_H_
+#define COMMON_AUDIO_VAD_VAD_CORE_H_
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+enum { kNumChannels = 6 };  // Number of frequency bands (named channels).
+enum { kNumGaussians = 2 };  // Number of Gaussians per channel in the GMM.
+enum { kTableSize = kNumChannels * kNumGaussians };
+enum { kMinEnergy = 10 };  // Minimum energy required to trigger audio signal.
+
+typedef struct VadInstT_ {
+    int vad;
+    int32_t downsampling_filter_states[4];
+    WebRtcSpl_State48khzTo8khz state_48_to_8;
+    int16_t noise_means[kTableSize];
+    int16_t speech_means[kTableSize];
+    int16_t noise_stds[kTableSize];
+    int16_t speech_stds[kTableSize];
+    // TODO(bjornv): Change to |frame_count|.
+    int32_t frame_counter;
+    int16_t over_hang;  // Over Hang
+    int16_t num_of_speech;
+    // TODO(bjornv): Change to |age_vector|.
+    int16_t index_vector[16 * kNumChannels];
+    int16_t low_value_vector[16 * kNumChannels];
+    // TODO(bjornv): Change to |median|.
+    int16_t mean_value[kNumChannels];
+    int16_t upper_state[5];
+    int16_t lower_state[5];
+    int16_t hp_filter_state[4];
+    int16_t over_hang_max_1[3];
+    int16_t over_hang_max_2[3];
+    int16_t individual[3];
+    int16_t total[3];
+
+    int init_flag;
+} VadInstT;
+
+// Initializes the core VAD component. The default aggressiveness mode is
+// controlled by |kDefaultMode| in vad_core.c.
+//
+// - self [i/o] : Instance that should be initialized
+//
+// returns      : 0 (OK), -1 (null pointer in or if the default mode can't be
+//                set)
+int WebRtcVad_InitCore(VadInstT* self);
+
+/****************************************************************************
+ * WebRtcVad_set_mode_core(...)
+ *
+ * This function changes the VAD settings
+ *
+ * Input:
+ *      - inst      : VAD instance
+ *      - mode      : Aggressiveness degree
+ *                    0 (High quality) - 3 (Highly aggressive)
+ *
+ * Output:
+ *      - inst      : Changed  instance
+ *
+ * Return value     :  0 - Ok
+ *                    -1 - Error
+ */
+
+int WebRtcVad_set_mode_core(VadInstT* self, int mode);
+
+/****************************************************************************
+ * WebRtcVad_CalcVad48khz(...)
+ * WebRtcVad_CalcVad32khz(...)
+ * WebRtcVad_CalcVad16khz(...)
+ * WebRtcVad_CalcVad8khz(...)
+ *
+ * Calculate probability for active speech and make VAD decision.
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - speech_frame  : Input speech frame
+ *      - frame_length  : Number of input samples
+ *
+ * Output:
+ *      - inst          : Updated filter states etc.
+ *
+ * Return value         : VAD decision
+ *                        0 - No active speech
+ *                        1-6 - Active speech
+ */
+int WebRtcVad_CalcVad48khz(VadInstT* inst, const int16_t* speech_frame,
+                           size_t frame_length);
+int WebRtcVad_CalcVad32khz(VadInstT* inst, const int16_t* speech_frame,
+                           size_t frame_length);
+int WebRtcVad_CalcVad16khz(VadInstT* inst, const int16_t* speech_frame,
+                           size_t frame_length);
+int WebRtcVad_CalcVad8khz(VadInstT* inst, const int16_t* speech_frame,
+                          size_t frame_length);
+
+#endif  // COMMON_AUDIO_VAD_VAD_CORE_H_
diff --git a/common_audio/vad/vad_core_unittest.cc b/common_audio/vad/vad_core_unittest.cc
new file mode 100644
index 0000000..0587878
--- /dev/null
+++ b/common_audio/vad/vad_core_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "common_audio/vad/vad_unittest.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern "C" {
+#include "common_audio/vad/vad_core.h"
+}
+
+namespace webrtc {
+namespace test {
+
+TEST_F(VadTest, InitCore) {
+  // Test WebRtcVad_InitCore().
+  VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
+
+  // null pointer test.
+  EXPECT_EQ(-1, WebRtcVad_InitCore(nullptr));
+
+  // Verify return = 0 for non-null pointer.
+  EXPECT_EQ(0, WebRtcVad_InitCore(self));
+  // Verify init_flag is set.
+  EXPECT_EQ(42, self->init_flag);
+
+  free(self);
+}
+
+TEST_F(VadTest, set_mode_core) {
+  VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
+
+  // TODO(bjornv): Add null pointer check if we take care of it in
+  // vad_core.c
+
+  ASSERT_EQ(0, WebRtcVad_InitCore(self));
+  // Test WebRtcVad_set_mode_core().
+  // Invalid modes should return -1.
+  EXPECT_EQ(-1, WebRtcVad_set_mode_core(self, -1));
+  EXPECT_EQ(-1, WebRtcVad_set_mode_core(self, 1000));
+  // Valid modes should return 0.
+  for (size_t j = 0; j < kModesSize; ++j) {
+    EXPECT_EQ(0, WebRtcVad_set_mode_core(self, kModes[j]));
+  }
+
+  free(self);
+}
+
+TEST_F(VadTest, CalcVad) {
+  VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
+  int16_t speech[kMaxFrameLength];
+
+  // TODO(bjornv): Add null pointer check if we take care of it in
+  // vad_core.c
+
+  // Test WebRtcVad_CalcVadXXkhz()
+  // Verify that all zeros in gives VAD = 0 out.
+  memset(speech, 0, sizeof(speech));
+  ASSERT_EQ(0, WebRtcVad_InitCore(self));
+  for (size_t j = 0; j < kFrameLengthsSize; ++j) {
+    if (ValidRatesAndFrameLengths(8000, kFrameLengths[j])) {
+      EXPECT_EQ(0, WebRtcVad_CalcVad8khz(self, speech, kFrameLengths[j]));
+    }
+    if (ValidRatesAndFrameLengths(16000, kFrameLengths[j])) {
+      EXPECT_EQ(0, WebRtcVad_CalcVad16khz(self, speech, kFrameLengths[j]));
+    }
+    if (ValidRatesAndFrameLengths(32000, kFrameLengths[j])) {
+      EXPECT_EQ(0, WebRtcVad_CalcVad32khz(self, speech, kFrameLengths[j]));
+    }
+    if (ValidRatesAndFrameLengths(48000, kFrameLengths[j])) {
+      EXPECT_EQ(0, WebRtcVad_CalcVad48khz(self, speech, kFrameLengths[j]));
+    }
+  }
+
+  // Construct a speech signal that will trigger the VAD in all modes. It is
+  // known that (i * i) will wrap around, but that doesn't matter in this case.
+  for (size_t i = 0; i < kMaxFrameLength; ++i) {
+    speech[i] = static_cast<int16_t>(i * i);
+  }
+  for (size_t j = 0; j < kFrameLengthsSize; ++j) {
+    if (ValidRatesAndFrameLengths(8000, kFrameLengths[j])) {
+      EXPECT_EQ(1, WebRtcVad_CalcVad8khz(self, speech, kFrameLengths[j]));
+    }
+    if (ValidRatesAndFrameLengths(16000, kFrameLengths[j])) {
+      EXPECT_EQ(1, WebRtcVad_CalcVad16khz(self, speech, kFrameLengths[j]));
+    }
+    if (ValidRatesAndFrameLengths(32000, kFrameLengths[j])) {
+      EXPECT_EQ(1, WebRtcVad_CalcVad32khz(self, speech, kFrameLengths[j]));
+    }
+    if (ValidRatesAndFrameLengths(48000, kFrameLengths[j])) {
+      EXPECT_EQ(1, WebRtcVad_CalcVad48khz(self, speech, kFrameLengths[j]));
+    }
+  }
+
+  free(self);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/common_audio/vad/vad_filterbank.c b/common_audio/vad/vad_filterbank.c
new file mode 100644
index 0000000..82cff25
--- /dev/null
+++ b/common_audio/vad/vad_filterbank.c
@@ -0,0 +1,330 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/vad_filterbank.h"
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Constants used in LogOfEnergy().
+static const int16_t kLogConst = 24660;  // 160*log10(2) in Q9.
+static const int16_t kLogEnergyIntPart = 14336;  // 14 in Q10
+
+// Coefficients used by HighPassFilter, Q14.
+static const int16_t kHpZeroCoefs[3] = { 6631, -13262, 6631 };
+static const int16_t kHpPoleCoefs[3] = { 16384, -7756, 5620 };
+
+// Allpass filter coefficients, upper and lower, in Q15.
+// Upper: 0.64, Lower: 0.17
+static const int16_t kAllPassCoefsQ15[2] = { 20972, 5571 };
+
+// Adjustment for division with two in SplitFilter.
+static const int16_t kOffsetVector[6] = { 368, 368, 272, 176, 176, 176 };
+
+// High pass filtering, with a cut-off frequency at 80 Hz, if the |data_in| is
+// sampled at 500 Hz.
+//
+// - data_in      [i]   : Input audio data sampled at 500 Hz.
+// - data_length  [i]   : Length of input and output data.
+// - filter_state [i/o] : State of the filter.
+// - data_out     [o]   : Output audio data in the frequency interval
+//                        80 - 250 Hz.
+static void HighPassFilter(const int16_t* data_in, size_t data_length,
+                           int16_t* filter_state, int16_t* data_out) {
+  size_t i;
+  const int16_t* in_ptr = data_in;
+  int16_t* out_ptr = data_out;
+  int32_t tmp32 = 0;
+
+
+  // The sum of the absolute values of the impulse response:
+  // The zero/pole-filter has a max amplification of a single sample of: 1.4546
+  // Impulse response: 0.4047 -0.6179 -0.0266  0.1993  0.1035  -0.0194
+  // The all-zero section has a max amplification of a single sample of: 1.6189
+  // Impulse response: 0.4047 -0.8094  0.4047  0       0        0
+  // The all-pole section has a max amplification of a single sample of: 1.9931
+  // Impulse response: 1.0000  0.4734 -0.1189 -0.2187 -0.0627   0.04532
+
+  for (i = 0; i < data_length; i++) {
+    // All-zero section (filter coefficients in Q14).
+    tmp32 = kHpZeroCoefs[0] * *in_ptr;
+    tmp32 += kHpZeroCoefs[1] * filter_state[0];
+    tmp32 += kHpZeroCoefs[2] * filter_state[1];
+    filter_state[1] = filter_state[0];
+    filter_state[0] = *in_ptr++;
+
+    // All-pole section (filter coefficients in Q14).
+    tmp32 -= kHpPoleCoefs[1] * filter_state[2];
+    tmp32 -= kHpPoleCoefs[2] * filter_state[3];
+    filter_state[3] = filter_state[2];
+    filter_state[2] = (int16_t) (tmp32 >> 14);
+    *out_ptr++ = filter_state[2];
+  }
+}
+
+// All pass filtering of |data_in|, used before splitting the signal into two
+// frequency bands (low pass vs high pass).
+// Note that |data_in| and |data_out| can NOT correspond to the same address.
+//
+// - data_in            [i]   : Input audio signal given in Q0.
+// - data_length        [i]   : Length of input and output data.
+// - filter_coefficient [i]   : Given in Q15.
+// - filter_state       [i/o] : State of the filter given in Q(-1).
+// - data_out           [o]   : Output audio signal given in Q(-1).
+static void AllPassFilter(const int16_t* data_in, size_t data_length,
+                          int16_t filter_coefficient, int16_t* filter_state,
+                          int16_t* data_out) {
+  // The filter can only cause overflow (in the w16 output variable)
+  // if more than 4 consecutive input numbers are of maximum value and
+  // has the the same sign as the impulse responses first taps.
+  // First 6 taps of the impulse response:
+  // 0.6399 0.5905 -0.3779 0.2418 -0.1547 0.0990
+
+  size_t i;
+  int16_t tmp16 = 0;
+  int32_t tmp32 = 0;
+  int32_t state32 = ((int32_t) (*filter_state) * (1 << 16));  // Q15
+
+  for (i = 0; i < data_length; i++) {
+    tmp32 = state32 + filter_coefficient * *data_in;
+    tmp16 = (int16_t) (tmp32 >> 16);  // Q(-1)
+    *data_out++ = tmp16;
+    state32 = (*data_in * (1 << 14)) - filter_coefficient * tmp16;  // Q14
+    state32 *= 2;  // Q15.
+    data_in += 2;
+  }
+
+  *filter_state = (int16_t) (state32 >> 16);  // Q(-1)
+}
+
+// Splits |data_in| into |hp_data_out| and |lp_data_out| corresponding to
+// an upper (high pass) part and a lower (low pass) part respectively.
+//
+// - data_in      [i]   : Input audio data to be split into two frequency bands.
+// - data_length  [i]   : Length of |data_in|.
+// - upper_state  [i/o] : State of the upper filter, given in Q(-1).
+// - lower_state  [i/o] : State of the lower filter, given in Q(-1).
+// - hp_data_out  [o]   : Output audio data of the upper half of the spectrum.
+//                        The length is |data_length| / 2.
+// - lp_data_out  [o]   : Output audio data of the lower half of the spectrum.
+//                        The length is |data_length| / 2.
+static void SplitFilter(const int16_t* data_in, size_t data_length,
+                        int16_t* upper_state, int16_t* lower_state,
+                        int16_t* hp_data_out, int16_t* lp_data_out) {
+  size_t i;
+  size_t half_length = data_length >> 1;  // Downsampling by 2.
+  int16_t tmp_out;
+
+  // All-pass filtering upper branch.
+  AllPassFilter(&data_in[0], half_length, kAllPassCoefsQ15[0], upper_state,
+                hp_data_out);
+
+  // All-pass filtering lower branch.
+  AllPassFilter(&data_in[1], half_length, kAllPassCoefsQ15[1], lower_state,
+                lp_data_out);
+
+  // Make LP and HP signals.
+  for (i = 0; i < half_length; i++) {
+    tmp_out = *hp_data_out;
+    *hp_data_out++ -= *lp_data_out;
+    *lp_data_out++ += tmp_out;
+  }
+}
+
+// Calculates the energy of |data_in| in dB, and also updates an overall
+// |total_energy| if necessary.
+//
+// - data_in      [i]   : Input audio data for energy calculation.
+// - data_length  [i]   : Length of input data.
+// - offset       [i]   : Offset value added to |log_energy|.
+// - total_energy [i/o] : An external energy updated with the energy of
+//                        |data_in|.
+//                        NOTE: |total_energy| is only updated if
+//                        |total_energy| <= |kMinEnergy|.
+// - log_energy   [o]   : 10 * log10("energy of |data_in|") given in Q4.
+static void LogOfEnergy(const int16_t* data_in, size_t data_length,
+                        int16_t offset, int16_t* total_energy,
+                        int16_t* log_energy) {
+  // |tot_rshifts| accumulates the number of right shifts performed on |energy|.
+  int tot_rshifts = 0;
+  // The |energy| will be normalized to 15 bits. We use unsigned integer because
+  // we eventually will mask out the fractional part.
+  uint32_t energy = 0;
+
+  RTC_DCHECK(data_in);
+  RTC_DCHECK_GT(data_length, 0);
+
+  energy = (uint32_t) WebRtcSpl_Energy((int16_t*) data_in, data_length,
+                                       &tot_rshifts);
+
+  if (energy != 0) {
+    // By construction, normalizing to 15 bits is equivalent with 17 leading
+    // zeros of an unsigned 32 bit value.
+    int normalizing_rshifts = 17 - WebRtcSpl_NormU32(energy);
+    // In a 15 bit representation the leading bit is 2^14. log2(2^14) in Q10 is
+    // (14 << 10), which is what we initialize |log2_energy| with. For a more
+    // detailed derivations, see below.
+    int16_t log2_energy = kLogEnergyIntPart;
+
+    tot_rshifts += normalizing_rshifts;
+    // Normalize |energy| to 15 bits.
+    // |tot_rshifts| is now the total number of right shifts performed on
+    // |energy| after normalization. This means that |energy| is in
+    // Q(-tot_rshifts).
+    if (normalizing_rshifts < 0) {
+      energy <<= -normalizing_rshifts;
+    } else {
+      energy >>= normalizing_rshifts;
+    }
+
+    // Calculate the energy of |data_in| in dB, in Q4.
+    //
+    // 10 * log10("true energy") in Q4 = 2^4 * 10 * log10("true energy") =
+    // 160 * log10(|energy| * 2^|tot_rshifts|) =
+    // 160 * log10(2) * log2(|energy| * 2^|tot_rshifts|) =
+    // 160 * log10(2) * (log2(|energy|) + log2(2^|tot_rshifts|)) =
+    // (160 * log10(2)) * (log2(|energy|) + |tot_rshifts|) =
+    // |kLogConst| * (|log2_energy| + |tot_rshifts|)
+    //
+    // We know by construction that |energy| is normalized to 15 bits. Hence,
+    // |energy| = 2^14 + frac_Q15, where frac_Q15 is a fractional part in Q15.
+    // Further, we'd like |log2_energy| in Q10
+    // log2(|energy|) in Q10 = 2^10 * log2(2^14 + frac_Q15) =
+    // 2^10 * log2(2^14 * (1 + frac_Q15 * 2^-14)) =
+    // 2^10 * (14 + log2(1 + frac_Q15 * 2^-14)) ~=
+    // (14 << 10) + 2^10 * (frac_Q15 * 2^-14) =
+    // (14 << 10) + (frac_Q15 * 2^-4) = (14 << 10) + (frac_Q15 >> 4)
+    //
+    // Note that frac_Q15 = (|energy| & 0x00003FFF)
+
+    // Calculate and add the fractional part to |log2_energy|.
+    log2_energy += (int16_t) ((energy & 0x00003FFF) >> 4);
+
+    // |kLogConst| is in Q9, |log2_energy| in Q10 and |tot_rshifts| in Q0.
+    // Note that we in our derivation above have accounted for an output in Q4.
+    *log_energy = (int16_t)(((kLogConst * log2_energy) >> 19) +
+        ((tot_rshifts * kLogConst) >> 9));
+
+    if (*log_energy < 0) {
+      *log_energy = 0;
+    }
+  } else {
+    *log_energy = offset;
+    return;
+  }
+
+  *log_energy += offset;
+
+  // Update the approximate |total_energy| with the energy of |data_in|, if
+  // |total_energy| has not exceeded |kMinEnergy|. |total_energy| is used as an
+  // energy indicator in WebRtcVad_GmmProbability() in vad_core.c.
+  if (*total_energy <= kMinEnergy) {
+    if (tot_rshifts >= 0) {
+      // We know by construction that the |energy| > |kMinEnergy| in Q0, so add
+      // an arbitrary value such that |total_energy| exceeds |kMinEnergy|.
+      *total_energy += kMinEnergy + 1;
+    } else {
+      // By construction |energy| is represented by 15 bits, hence any number of
+      // right shifted |energy| will fit in an int16_t. In addition, adding the
+      // value to |total_energy| is wrap around safe as long as
+      // |kMinEnergy| < 8192.
+      *total_energy += (int16_t) (energy >> -tot_rshifts);  // Q0.
+    }
+  }
+}
+
+int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
+                                    size_t data_length, int16_t* features) {
+  int16_t total_energy = 0;
+  // We expect |data_length| to be 80, 160 or 240 samples, which corresponds to
+  // 10, 20 or 30 ms in 8 kHz. Therefore, the intermediate downsampled data will
+  // have at most 120 samples after the first split and at most 60 samples after
+  // the second split.
+  int16_t hp_120[120], lp_120[120];
+  int16_t hp_60[60], lp_60[60];
+  const size_t half_data_length = data_length >> 1;
+  size_t length = half_data_length;  // |data_length| / 2, corresponds to
+                                     // bandwidth = 2000 Hz after downsampling.
+
+  // Initialize variables for the first SplitFilter().
+  int frequency_band = 0;
+  const int16_t* in_ptr = data_in;  // [0 - 4000] Hz.
+  int16_t* hp_out_ptr = hp_120;  // [2000 - 4000] Hz.
+  int16_t* lp_out_ptr = lp_120;  // [0 - 2000] Hz.
+
+  RTC_DCHECK_LE(data_length, 240);
+  RTC_DCHECK_LT(4, kNumChannels - 1);  // Checking maximum |frequency_band|.
+
+  // Split at 2000 Hz and downsample.
+  SplitFilter(in_ptr, data_length, &self->upper_state[frequency_band],
+              &self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
+
+  // For the upper band (2000 Hz - 4000 Hz) split at 3000 Hz and downsample.
+  frequency_band = 1;
+  in_ptr = hp_120;  // [2000 - 4000] Hz.
+  hp_out_ptr = hp_60;  // [3000 - 4000] Hz.
+  lp_out_ptr = lp_60;  // [2000 - 3000] Hz.
+  SplitFilter(in_ptr, length, &self->upper_state[frequency_band],
+              &self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
+
+  // Energy in 3000 Hz - 4000 Hz.
+  length >>= 1;  // |data_length| / 4 <=> bandwidth = 1000 Hz.
+
+  LogOfEnergy(hp_60, length, kOffsetVector[5], &total_energy, &features[5]);
+
+  // Energy in 2000 Hz - 3000 Hz.
+  LogOfEnergy(lp_60, length, kOffsetVector[4], &total_energy, &features[4]);
+
+  // For the lower band (0 Hz - 2000 Hz) split at 1000 Hz and downsample.
+  frequency_band = 2;
+  in_ptr = lp_120;  // [0 - 2000] Hz.
+  hp_out_ptr = hp_60;  // [1000 - 2000] Hz.
+  lp_out_ptr = lp_60;  // [0 - 1000] Hz.
+  length = half_data_length;  // |data_length| / 2 <=> bandwidth = 2000 Hz.
+  SplitFilter(in_ptr, length, &self->upper_state[frequency_band],
+              &self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
+
+  // Energy in 1000 Hz - 2000 Hz.
+  length >>= 1;  // |data_length| / 4 <=> bandwidth = 1000 Hz.
+  LogOfEnergy(hp_60, length, kOffsetVector[3], &total_energy, &features[3]);
+
+  // For the lower band (0 Hz - 1000 Hz) split at 500 Hz and downsample.
+  frequency_band = 3;
+  in_ptr = lp_60;  // [0 - 1000] Hz.
+  hp_out_ptr = hp_120;  // [500 - 1000] Hz.
+  lp_out_ptr = lp_120;  // [0 - 500] Hz.
+  SplitFilter(in_ptr, length, &self->upper_state[frequency_band],
+              &self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
+
+  // Energy in 500 Hz - 1000 Hz.
+  length >>= 1;  // |data_length| / 8 <=> bandwidth = 500 Hz.
+  LogOfEnergy(hp_120, length, kOffsetVector[2], &total_energy, &features[2]);
+
+  // For the lower band (0 Hz - 500 Hz) split at 250 Hz and downsample.
+  frequency_band = 4;
+  in_ptr = lp_120;  // [0 - 500] Hz.
+  hp_out_ptr = hp_60;  // [250 - 500] Hz.
+  lp_out_ptr = lp_60;  // [0 - 250] Hz.
+  SplitFilter(in_ptr, length, &self->upper_state[frequency_band],
+              &self->lower_state[frequency_band], hp_out_ptr, lp_out_ptr);
+
+  // Energy in 250 Hz - 500 Hz.
+  length >>= 1;  // |data_length| / 16 <=> bandwidth = 250 Hz.
+  LogOfEnergy(hp_60, length, kOffsetVector[1], &total_energy, &features[1]);
+
+  // Remove 0 Hz - 80 Hz, by high pass filtering the lower band.
+  HighPassFilter(lp_60, length, self->hp_filter_state, hp_120);
+
+  // Energy in 80 Hz - 250 Hz.
+  LogOfEnergy(hp_120, length, kOffsetVector[0], &total_energy, &features[0]);
+
+  return total_energy;
+}
diff --git a/common_audio/vad/vad_filterbank.h b/common_audio/vad/vad_filterbank.h
new file mode 100644
index 0000000..620f96a
--- /dev/null
+++ b/common_audio/vad/vad_filterbank.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file includes feature calculating functionality used in vad_core.c.
+ */
+
+#ifndef COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
+#define COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
+
+#include "common_audio/vad/vad_core.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Takes |data_length| samples of |data_in| and calculates the logarithm of the
+// energy of each of the |kNumChannels| = 6 frequency bands used by the VAD:
+//        80 Hz - 250 Hz
+//        250 Hz - 500 Hz
+//        500 Hz - 1000 Hz
+//        1000 Hz - 2000 Hz
+//        2000 Hz - 3000 Hz
+//        3000 Hz - 4000 Hz
+//
+// The values are given in Q4 and written to |features|. Further, an approximate
+// overall energy is returned. The return value is used in
+// WebRtcVad_GmmProbability() as a signal indicator, hence it is arbitrary above
+// the threshold |kMinEnergy|.
+//
+// - self         [i/o] : State information of the VAD.
+// - data_in      [i]   : Input audio data, for feature extraction.
+// - data_length  [i]   : Audio data size, in number of samples.
+// - features     [o]   : 10 * log10(energy in each frequency band), Q4.
+// - returns            : Total energy of the signal (NOTE! This value is not
+//                        exact. It is only used in a comparison.)
+int16_t WebRtcVad_CalculateFeatures(VadInstT* self, const int16_t* data_in,
+                                    size_t data_length, int16_t* features);
+
+#endif  // COMMON_AUDIO_VAD_VAD_FILTERBANK_H_
diff --git a/common_audio/vad/vad_filterbank_unittest.cc b/common_audio/vad/vad_filterbank_unittest.cc
new file mode 100644
index 0000000..55b1279
--- /dev/null
+++ b/common_audio/vad/vad_filterbank_unittest.cc
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "common_audio/vad/vad_unittest.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern "C" {
+#include "common_audio/vad/vad_core.h"
+#include "common_audio/vad/vad_filterbank.h"
+}
+
+namespace webrtc {
+namespace test {
+
+const int kNumValidFrameLengths = 3;
+
+TEST_F(VadTest, vad_filterbank) {
+  VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
+  static const int16_t kReference[kNumValidFrameLengths] = { 48, 11, 11 };
+  static const int16_t kFeatures[kNumValidFrameLengths * kNumChannels] = {
+      1213, 759, 587, 462, 434, 272,
+      1479, 1385, 1291, 1200, 1103, 1099,
+      1732, 1692, 1681, 1629, 1436, 1436
+  };
+  static const int16_t kOffsetVector[kNumChannels] = {
+      368, 368, 272, 176, 176, 176 };
+  int16_t features[kNumChannels];
+
+  // Construct a speech signal that will trigger the VAD in all modes. It is
+  // known that (i * i) will wrap around, but that doesn't matter in this case.
+  int16_t speech[kMaxFrameLength];
+  for (size_t i = 0; i < kMaxFrameLength; ++i) {
+    speech[i] = static_cast<int16_t>(i * i);
+  }
+
+  int frame_length_index = 0;
+  ASSERT_EQ(0, WebRtcVad_InitCore(self));
+  for (size_t j = 0; j < kFrameLengthsSize; ++j) {
+    if (ValidRatesAndFrameLengths(8000, kFrameLengths[j])) {
+      EXPECT_EQ(kReference[frame_length_index],
+                WebRtcVad_CalculateFeatures(self, speech, kFrameLengths[j],
+                                            features));
+      for (int k = 0; k < kNumChannels; ++k) {
+        EXPECT_EQ(kFeatures[k + frame_length_index * kNumChannels],
+                  features[k]);
+      }
+      frame_length_index++;
+    }
+  }
+  EXPECT_EQ(kNumValidFrameLengths, frame_length_index);
+
+  // Verify that all zeros in gives kOffsetVector out.
+  memset(speech, 0, sizeof(speech));
+  ASSERT_EQ(0, WebRtcVad_InitCore(self));
+  for (size_t j = 0; j < kFrameLengthsSize; ++j) {
+    if (ValidRatesAndFrameLengths(8000, kFrameLengths[j])) {
+      EXPECT_EQ(0, WebRtcVad_CalculateFeatures(self, speech, kFrameLengths[j],
+                                               features));
+      for (int k = 0; k < kNumChannels; ++k) {
+        EXPECT_EQ(kOffsetVector[k], features[k]);
+      }
+    }
+  }
+
+  // Verify that all ones in gives kOffsetVector out. Any other constant input
+  // will have a small impact in the sub bands.
+  for (size_t i = 0; i < kMaxFrameLength; ++i) {
+    speech[i] = 1;
+  }
+  for (size_t j = 0; j < kFrameLengthsSize; ++j) {
+    if (ValidRatesAndFrameLengths(8000, kFrameLengths[j])) {
+      ASSERT_EQ(0, WebRtcVad_InitCore(self));
+      EXPECT_EQ(0, WebRtcVad_CalculateFeatures(self, speech, kFrameLengths[j],
+                                               features));
+      for (int k = 0; k < kNumChannels; ++k) {
+        EXPECT_EQ(kOffsetVector[k], features[k]);
+      }
+    }
+  }
+
+  free(self);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/common_audio/vad/vad_gmm.c b/common_audio/vad/vad_gmm.c
new file mode 100644
index 0000000..b746fd5
--- /dev/null
+++ b/common_audio/vad/vad_gmm.c
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/vad_gmm.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+static const int32_t kCompVar = 22005;
+static const int16_t kLog2Exp = 5909;  // log2(exp(1)) in Q12.
+
+// For a normal distribution, the probability of |input| is calculated and
+// returned (in Q20). The formula for normal distributed probability is
+//
+// 1 / s * exp(-(x - m)^2 / (2 * s^2))
+//
+// where the parameters are given in the following Q domains:
+// m = |mean| (Q7)
+// s = |std| (Q7)
+// x = |input| (Q4)
+// in addition to the probability we output |delta| (in Q11) used when updating
+// the noise/speech model.
+int32_t WebRtcVad_GaussianProbability(int16_t input,
+                                      int16_t mean,
+                                      int16_t std,
+                                      int16_t* delta) {
+  int16_t tmp16, inv_std, inv_std2, exp_value = 0;
+  int32_t tmp32;
+
+  // Calculate |inv_std| = 1 / s, in Q10.
+  // 131072 = 1 in Q17, and (|std| >> 1) is for rounding instead of truncation.
+  // Q-domain: Q17 / Q7 = Q10.
+  tmp32 = (int32_t) 131072 + (int32_t) (std >> 1);
+  inv_std = (int16_t) WebRtcSpl_DivW32W16(tmp32, std);
+
+  // Calculate |inv_std2| = 1 / s^2, in Q14.
+  tmp16 = (inv_std >> 2);  // Q10 -> Q8.
+  // Q-domain: (Q8 * Q8) >> 2 = Q14.
+  inv_std2 = (int16_t)((tmp16 * tmp16) >> 2);
+  // TODO(bjornv): Investigate if changing to
+  // inv_std2 = (int16_t)((inv_std * inv_std) >> 6);
+  // gives better accuracy.
+
+  tmp16 = (input << 3);  // Q4 -> Q7
+  tmp16 = tmp16 - mean;  // Q7 - Q7 = Q7
+
+  // To be used later, when updating noise/speech model.
+  // |delta| = (x - m) / s^2, in Q11.
+  // Q-domain: (Q14 * Q7) >> 10 = Q11.
+  *delta = (int16_t)((inv_std2 * tmp16) >> 10);
+
+  // Calculate the exponent |tmp32| = (x - m)^2 / (2 * s^2), in Q10. Replacing
+  // division by two with one shift.
+  // Q-domain: (Q11 * Q7) >> 8 = Q10.
+  tmp32 = (*delta * tmp16) >> 9;
+
+  // If the exponent is small enough to give a non-zero probability we calculate
+  // |exp_value| ~= exp(-(x - m)^2 / (2 * s^2))
+  //             ~= exp2(-log2(exp(1)) * |tmp32|).
+  if (tmp32 < kCompVar) {
+    // Calculate |tmp16| = log2(exp(1)) * |tmp32|, in Q10.
+    // Q-domain: (Q12 * Q10) >> 12 = Q10.
+    tmp16 = (int16_t)((kLog2Exp * tmp32) >> 12);
+    tmp16 = -tmp16;
+    exp_value = (0x0400 | (tmp16 & 0x03FF));
+    tmp16 ^= 0xFFFF;
+    tmp16 >>= 10;
+    tmp16 += 1;
+    // Get |exp_value| = exp(-|tmp32|) in Q10.
+    exp_value >>= tmp16;
+  }
+
+  // Calculate and return (1 / s) * exp(-(x - m)^2 / (2 * s^2)), in Q20.
+  // Q-domain: Q10 * Q10 = Q20.
+  return inv_std * exp_value;
+}
diff --git a/common_audio/vad/vad_gmm.h b/common_audio/vad/vad_gmm.h
new file mode 100644
index 0000000..79f15c8
--- /dev/null
+++ b/common_audio/vad/vad_gmm.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Gaussian probability calculations internally used in vad_core.c.
+
+#ifndef COMMON_AUDIO_VAD_VAD_GMM_H_
+#define COMMON_AUDIO_VAD_VAD_GMM_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Calculates the probability for |input|, given that |input| comes from a
+// normal distribution with mean and standard deviation (|mean|, |std|).
+//
+// Inputs:
+//      - input         : input sample in Q4.
+//      - mean          : mean input in the statistical model, Q7.
+//      - std           : standard deviation, Q7.
+//
+// Output:
+//
+//      - delta         : input used when updating the model, Q11.
+//                        |delta| = (|input| - |mean|) / |std|^2.
+//
+// Return:
+//   (probability for |input|) =
+//    1 / |std| * exp(-(|input| - |mean|)^2 / (2 * |std|^2));
+int32_t WebRtcVad_GaussianProbability(int16_t input,
+                                      int16_t mean,
+                                      int16_t std,
+                                      int16_t* delta);
+
+#endif  // COMMON_AUDIO_VAD_VAD_GMM_H_
diff --git a/common_audio/vad/vad_gmm_unittest.cc b/common_audio/vad/vad_gmm_unittest.cc
new file mode 100644
index 0000000..e77603d
--- /dev/null
+++ b/common_audio/vad/vad_gmm_unittest.cc
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/vad_unittest.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern "C" {
+#include "common_audio/vad/vad_gmm.h"
+}
+
+namespace webrtc {
+namespace test {
+
+TEST_F(VadTest, vad_gmm) {
+  int16_t delta = 0;
+  // Input value at mean.
+  EXPECT_EQ(1048576, WebRtcVad_GaussianProbability(0, 0, 128, &delta));
+  EXPECT_EQ(0, delta);
+  EXPECT_EQ(1048576, WebRtcVad_GaussianProbability(16, 128, 128, &delta));
+  EXPECT_EQ(0, delta);
+  EXPECT_EQ(1048576, WebRtcVad_GaussianProbability(-16, -128, 128, &delta));
+  EXPECT_EQ(0, delta);
+
+  // Largest possible input to give non-zero probability.
+  EXPECT_EQ(1024, WebRtcVad_GaussianProbability(59, 0, 128, &delta));
+  EXPECT_EQ(7552, delta);
+  EXPECT_EQ(1024, WebRtcVad_GaussianProbability(75, 128, 128, &delta));
+  EXPECT_EQ(7552, delta);
+  EXPECT_EQ(1024, WebRtcVad_GaussianProbability(-75, -128, 128, &delta));
+  EXPECT_EQ(-7552, delta);
+
+  // Too large input, should give zero probability.
+  EXPECT_EQ(0, WebRtcVad_GaussianProbability(105, 0, 128, &delta));
+  EXPECT_EQ(13440, delta);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/common_audio/vad/vad_sp.c b/common_audio/vad/vad_sp.c
new file mode 100644
index 0000000..915fb37
--- /dev/null
+++ b/common_audio/vad/vad_sp.c
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/vad_sp.h"
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/vad/vad_core.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Allpass filter coefficients, upper and lower, in Q13.
+// Upper: 0.64, Lower: 0.17.
+static const int16_t kAllPassCoefsQ13[2] = { 5243, 1392 };  // Q13.
+static const int16_t kSmoothingDown = 6553;  // 0.2 in Q15.
+static const int16_t kSmoothingUp = 32439;  // 0.99 in Q15.
+
+// TODO(bjornv): Move this function to vad_filterbank.c.
+// Downsampling filter based on splitting filter and allpass functions.
+void WebRtcVad_Downsampling(const int16_t* signal_in,
+                            int16_t* signal_out,
+                            int32_t* filter_state,
+                            size_t in_length) {
+  int16_t tmp16_1 = 0, tmp16_2 = 0;
+  int32_t tmp32_1 = filter_state[0];
+  int32_t tmp32_2 = filter_state[1];
+  size_t n = 0;
+  // Downsampling by 2 gives half length.
+  size_t half_length = (in_length >> 1);
+
+  // Filter coefficients in Q13, filter state in Q0.
+  for (n = 0; n < half_length; n++) {
+    // All-pass filtering upper branch.
+    tmp16_1 = (int16_t) ((tmp32_1 >> 1) +
+        ((kAllPassCoefsQ13[0] * *signal_in) >> 14));
+    *signal_out = tmp16_1;
+    tmp32_1 = (int32_t)(*signal_in++) - ((kAllPassCoefsQ13[0] * tmp16_1) >> 12);
+
+    // All-pass filtering lower branch.
+    tmp16_2 = (int16_t) ((tmp32_2 >> 1) +
+        ((kAllPassCoefsQ13[1] * *signal_in) >> 14));
+    *signal_out++ += tmp16_2;
+    tmp32_2 = (int32_t)(*signal_in++) - ((kAllPassCoefsQ13[1] * tmp16_2) >> 12);
+  }
+  // Store the filter states.
+  filter_state[0] = tmp32_1;
+  filter_state[1] = tmp32_2;
+}
+
+// Inserts |feature_value| into |low_value_vector|, if it is one of the 16
+// smallest values the last 100 frames. Then calculates and returns the median
+// of the five smallest values.
+int16_t WebRtcVad_FindMinimum(VadInstT* self,
+                              int16_t feature_value,
+                              int channel) {
+  int i = 0, j = 0;
+  int position = -1;
+  // Offset to beginning of the 16 minimum values in memory.
+  const int offset = (channel << 4);
+  int16_t current_median = 1600;
+  int16_t alpha = 0;
+  int32_t tmp32 = 0;
+  // Pointer to memory for the 16 minimum values and the age of each value of
+  // the |channel|.
+  int16_t* age = &self->index_vector[offset];
+  int16_t* smallest_values = &self->low_value_vector[offset];
+
+  RTC_DCHECK_LT(channel, kNumChannels);
+
+  // Each value in |smallest_values| is getting 1 loop older. Update |age|, and
+  // remove old values.
+  for (i = 0; i < 16; i++) {
+    if (age[i] != 100) {
+      age[i]++;
+    } else {
+      // Too old value. Remove from memory and shift larger values downwards.
+      for (j = i; j < 16; j++) {
+        smallest_values[j] = smallest_values[j + 1];
+        age[j] = age[j + 1];
+      }
+      age[15] = 101;
+      smallest_values[15] = 10000;
+    }
+  }
+
+  // Check if |feature_value| is smaller than any of the values in
+  // |smallest_values|. If so, find the |position| where to insert the new value
+  // (|feature_value|).
+  if (feature_value < smallest_values[7]) {
+    if (feature_value < smallest_values[3]) {
+      if (feature_value < smallest_values[1]) {
+        if (feature_value < smallest_values[0]) {
+          position = 0;
+        } else {
+          position = 1;
+        }
+      } else if (feature_value < smallest_values[2]) {
+        position = 2;
+      } else {
+        position = 3;
+      }
+    } else if (feature_value < smallest_values[5]) {
+      if (feature_value < smallest_values[4]) {
+        position = 4;
+      } else {
+        position = 5;
+      }
+    } else if (feature_value < smallest_values[6]) {
+      position = 6;
+    } else {
+      position = 7;
+    }
+  } else if (feature_value < smallest_values[15]) {
+    if (feature_value < smallest_values[11]) {
+      if (feature_value < smallest_values[9]) {
+        if (feature_value < smallest_values[8]) {
+          position = 8;
+        } else {
+          position = 9;
+        }
+      } else if (feature_value < smallest_values[10]) {
+        position = 10;
+      } else {
+        position = 11;
+      }
+    } else if (feature_value < smallest_values[13]) {
+      if (feature_value < smallest_values[12]) {
+        position = 12;
+      } else {
+        position = 13;
+      }
+    } else if (feature_value < smallest_values[14]) {
+      position = 14;
+    } else {
+      position = 15;
+    }
+  }
+
+  // If we have detected a new small value, insert it at the correct position
+  // and shift larger values up.
+  if (position > -1) {
+    for (i = 15; i > position; i--) {
+      smallest_values[i] = smallest_values[i - 1];
+      age[i] = age[i - 1];
+    }
+    smallest_values[position] = feature_value;
+    age[position] = 1;
+  }
+
+  // Get |current_median|.
+  if (self->frame_counter > 2) {
+    current_median = smallest_values[2];
+  } else if (self->frame_counter > 0) {
+    current_median = smallest_values[0];
+  }
+
+  // Smooth the median value.
+  if (self->frame_counter > 0) {
+    if (current_median < self->mean_value[channel]) {
+      alpha = kSmoothingDown;  // 0.2 in Q15.
+    } else {
+      alpha = kSmoothingUp;  // 0.99 in Q15.
+    }
+  }
+  tmp32 = (alpha + 1) * self->mean_value[channel];
+  tmp32 += (WEBRTC_SPL_WORD16_MAX - alpha) * current_median;
+  tmp32 += 16384;
+  self->mean_value[channel] = (int16_t) (tmp32 >> 15);
+
+  return self->mean_value[channel];
+}
diff --git a/common_audio/vad/vad_sp.h b/common_audio/vad/vad_sp.h
new file mode 100644
index 0000000..21fed11
--- /dev/null
+++ b/common_audio/vad/vad_sp.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// This file includes specific signal processing tools used in vad_core.c.
+
+#ifndef COMMON_AUDIO_VAD_VAD_SP_H_
+#define COMMON_AUDIO_VAD_VAD_SP_H_
+
+#include "common_audio/vad/vad_core.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Downsamples the signal by a factor 2, eg. 32->16 or 16->8.
+//
+// Inputs:
+//      - signal_in     : Input signal.
+//      - in_length     : Length of input signal in samples.
+//
+// Input & Output:
+//      - filter_state  : Current filter states of the two all-pass filters. The
+//                        |filter_state| is updated after all samples have been
+//                        processed.
+//
+// Output:
+//      - signal_out    : Downsampled signal (of length |in_length| / 2).
+void WebRtcVad_Downsampling(const int16_t* signal_in,
+                            int16_t* signal_out,
+                            int32_t* filter_state,
+                            size_t in_length);
+
+// Updates and returns the smoothed feature minimum. As minimum we use the
+// median of the five smallest feature values in a 100 frames long window.
+// As long as |handle->frame_counter| is zero, that is, we haven't received any
+// "valid" data, FindMinimum() outputs the default value of 1600.
+//
+// Inputs:
+//      - feature_value : New feature value to update with.
+//      - channel       : Channel number.
+//
+// Input & Output:
+//      - handle        : State information of the VAD.
+//
+// Returns:
+//                      : Smoothed minimum value for a moving window.
+int16_t WebRtcVad_FindMinimum(VadInstT* handle,
+                              int16_t feature_value,
+                              int channel);
+
+#endif  // COMMON_AUDIO_VAD_VAD_SP_H_
diff --git a/common_audio/vad/vad_sp_unittest.cc b/common_audio/vad/vad_sp_unittest.cc
new file mode 100644
index 0000000..7eb6794
--- /dev/null
+++ b/common_audio/vad/vad_sp_unittest.cc
@@ -0,0 +1,76 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "common_audio/vad/vad_unittest.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern "C" {
+#include "common_audio/vad/vad_core.h"
+#include "common_audio/vad/vad_sp.h"
+}
+
+namespace webrtc {
+namespace test {
+
+TEST_F(VadTest, vad_sp) {
+  VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
+  const size_t kMaxFrameLenSp = 960;  // Maximum frame length in this unittest.
+  int16_t zeros[kMaxFrameLenSp] = { 0 };
+  int32_t state[2] = { 0 };
+  int16_t data_in[kMaxFrameLenSp];
+  int16_t data_out[kMaxFrameLenSp];
+
+  // We expect the first value to be 1600 as long as |frame_counter| is zero,
+  // which is true for the first iteration.
+  static const int16_t kReferenceMin[32] = {
+      1600, 720, 509, 512, 532, 552, 570, 588,
+       606, 624, 642, 659, 675, 691, 707, 723,
+      1600, 544, 502, 522, 542, 561, 579, 597,
+       615, 633, 651, 667, 683, 699, 715, 731
+  };
+
+  // Construct a speech signal that will trigger the VAD in all modes. It is
+  // known that (i * i) will wrap around, but that doesn't matter in this case.
+  for (size_t i = 0; i < kMaxFrameLenSp; ++i) {
+    data_in[i] = static_cast<int16_t>(i * i);
+  }
+  // Input values all zeros, expect all zeros out.
+  WebRtcVad_Downsampling(zeros, data_out, state, kMaxFrameLenSp);
+  EXPECT_EQ(0, state[0]);
+  EXPECT_EQ(0, state[1]);
+  for (size_t i = 0; i < kMaxFrameLenSp / 2; ++i) {
+    EXPECT_EQ(0, data_out[i]);
+  }
+  // Make a simple non-zero data test.
+  WebRtcVad_Downsampling(data_in, data_out, state, kMaxFrameLenSp);
+  EXPECT_EQ(207, state[0]);
+  EXPECT_EQ(2270, state[1]);
+
+  ASSERT_EQ(0, WebRtcVad_InitCore(self));
+  // TODO(bjornv): Replace this part of the test with taking values from an
+  // array and calculate the reference value here. Make sure the values are not
+  // ordered.
+  for (int16_t i = 0; i < 16; ++i) {
+    int16_t value = 500 * (i + 1);
+    for (int j = 0; j < kNumChannels; ++j) {
+      // Use values both above and below initialized value.
+      EXPECT_EQ(kReferenceMin[i], WebRtcVad_FindMinimum(self, value, j));
+      EXPECT_EQ(kReferenceMin[i + 16], WebRtcVad_FindMinimum(self, 12000, j));
+    }
+    self->frame_counter++;
+  }
+
+  free(self);
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/common_audio/vad/vad_unittest.cc b/common_audio/vad/vad_unittest.cc
new file mode 100644
index 0000000..f79678b
--- /dev/null
+++ b/common_audio/vad/vad_unittest.cc
@@ -0,0 +1,157 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/vad_unittest.h"
+
+#include <stdlib.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+VadTest::VadTest() {}
+
+void VadTest::SetUp() {}
+
+void VadTest::TearDown() {}
+
+// Returns true if the rate and frame length combination is valid.
+bool VadTest::ValidRatesAndFrameLengths(int rate, size_t frame_length) {
+  if (rate == 8000) {
+    if (frame_length == 80 || frame_length == 160 || frame_length == 240) {
+      return true;
+    }
+    return false;
+  } else if (rate == 16000) {
+    if (frame_length == 160 || frame_length == 320 || frame_length == 480) {
+      return true;
+    }
+    return false;
+  } else if (rate == 32000) {
+    if (frame_length == 320 || frame_length == 640 || frame_length == 960) {
+      return true;
+    }
+    return false;
+  } else if (rate == 48000) {
+    if (frame_length == 480 || frame_length == 960 || frame_length == 1440) {
+      return true;
+    }
+    return false;
+  }
+
+  return false;
+}
+
+namespace webrtc {
+namespace test {
+
+TEST_F(VadTest, ApiTest) {
+  // This API test runs through the APIs for all possible valid and invalid
+  // combinations.
+
+  VadInst* handle = WebRtcVad_Create();
+  int16_t zeros[kMaxFrameLength] = { 0 };
+
+  // Construct a speech signal that will trigger the VAD in all modes. It is
+  // known that (i * i) will wrap around, but that doesn't matter in this case.
+  int16_t speech[kMaxFrameLength];
+  for (size_t i = 0; i < kMaxFrameLength; i++) {
+    speech[i] = static_cast<int16_t>(i * i);
+  }
+
+  // nullptr instance tests
+  EXPECT_EQ(-1, WebRtcVad_Init(nullptr));
+  EXPECT_EQ(-1, WebRtcVad_set_mode(nullptr, kModes[0]));
+  EXPECT_EQ(-1,
+            WebRtcVad_Process(nullptr, kRates[0], speech, kFrameLengths[0]));
+
+  // WebRtcVad_Create()
+  RTC_CHECK(handle);
+
+  // Not initialized tests
+  EXPECT_EQ(-1, WebRtcVad_Process(handle, kRates[0], speech, kFrameLengths[0]));
+  EXPECT_EQ(-1, WebRtcVad_set_mode(handle, kModes[0]));
+
+  // WebRtcVad_Init() test
+  ASSERT_EQ(0, WebRtcVad_Init(handle));
+
+  // WebRtcVad_set_mode() invalid modes tests. Tries smallest supported value
+  // minus one and largest supported value plus one.
+  EXPECT_EQ(-1, WebRtcVad_set_mode(handle,
+                                   WebRtcSpl_MinValueW32(kModes,
+                                                         kModesSize) - 1));
+  EXPECT_EQ(-1, WebRtcVad_set_mode(handle,
+                                   WebRtcSpl_MaxValueW32(kModes,
+                                                         kModesSize) + 1));
+
+  // WebRtcVad_Process() tests
+  // nullptr as speech pointer
+  EXPECT_EQ(-1,
+            WebRtcVad_Process(handle, kRates[0], nullptr, kFrameLengths[0]));
+  // Invalid sampling rate
+  EXPECT_EQ(-1, WebRtcVad_Process(handle, 9999, speech, kFrameLengths[0]));
+  // All zeros as input should work
+  EXPECT_EQ(0, WebRtcVad_Process(handle, kRates[0], zeros, kFrameLengths[0]));
+  for (size_t k = 0; k < kModesSize; k++) {
+    // Test valid modes
+    EXPECT_EQ(0, WebRtcVad_set_mode(handle, kModes[k]));
+    // Loop through sampling rate and frame length combinations
+    for (size_t i = 0; i < kRatesSize; i++) {
+      for (size_t j = 0; j < kFrameLengthsSize; j++) {
+        if (ValidRatesAndFrameLengths(kRates[i], kFrameLengths[j])) {
+          EXPECT_EQ(1, WebRtcVad_Process(handle,
+                                         kRates[i],
+                                         speech,
+                                         kFrameLengths[j]));
+        } else {
+          EXPECT_EQ(-1, WebRtcVad_Process(handle,
+                                          kRates[i],
+                                          speech,
+                                          kFrameLengths[j]));
+        }
+      }
+    }
+  }
+
+  WebRtcVad_Free(handle);
+}
+
+TEST_F(VadTest, ValidRatesFrameLengths) {
+  // This test verifies valid and invalid rate/frame_length combinations. We
+  // loop through some sampling rates and frame lengths from negative values to
+  // values larger than possible.
+  const int kRates[] = {
+    -8000, -4000, 0, 4000, 8000, 8001, 15999, 16000, 32000, 48000, 48001, 96000
+  };
+
+  const size_t kFrameLengths[] = {
+    0, 80, 81, 159, 160, 240, 320, 480, 640, 960, 1440, 2000
+  };
+
+  for (size_t i = 0; i < arraysize(kRates); i++) {
+    for (size_t j = 0; j < arraysize(kFrameLengths); j++) {
+      if (ValidRatesAndFrameLengths(kRates[i], kFrameLengths[j])) {
+        EXPECT_EQ(0, WebRtcVad_ValidRateAndFrameLength(kRates[i],
+                                                       kFrameLengths[j]));
+      } else {
+        EXPECT_EQ(-1, WebRtcVad_ValidRateAndFrameLength(kRates[i],
+                                                        kFrameLengths[j]));
+      }
+    }
+  }
+}
+
+// TODO(bjornv): Add a process test, run on file.
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/common_audio/vad/vad_unittest.h b/common_audio/vad/vad_unittest.h
new file mode 100644
index 0000000..f982f52
--- /dev/null
+++ b/common_audio/vad/vad_unittest.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_VAD_VAD_UNITTEST_H_
+#define COMMON_AUDIO_VAD_VAD_UNITTEST_H_
+
+#include <stddef.h>  // size_t
+
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Modes we support
+const int kModes[] = { 0, 1, 2, 3 };
+const size_t kModesSize = sizeof(kModes) / sizeof(*kModes);
+
+// Rates we support.
+const int kRates[] = { 8000, 12000, 16000, 24000, 32000, 48000 };
+const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
+
+// Frame lengths we support.
+const size_t kMaxFrameLength = 1440;
+const size_t kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640, 960,
+    kMaxFrameLength };
+const size_t kFrameLengthsSize = sizeof(kFrameLengths) / sizeof(*kFrameLengths);
+
+}  // namespace test
+}  // namespace webrtc
+
+class VadTest : public ::testing::Test {
+ protected:
+  VadTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  // Returns true if the rate and frame length combination is valid.
+  bool ValidRatesAndFrameLengths(int rate, size_t frame_length);
+};
+
+#endif  // COMMON_AUDIO_VAD_VAD_UNITTEST_H_
diff --git a/common_audio/vad/webrtc_vad.c b/common_audio/vad/webrtc_vad.c
new file mode 100644
index 0000000..7fc4d65
--- /dev/null
+++ b/common_audio/vad/webrtc_vad.c
@@ -0,0 +1,116 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/vad/include/webrtc_vad.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_audio/vad/vad_core.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+static const int kInitCheck = 42;
+static const int kValidRates[] = { 8000, 16000, 32000, 48000 };
+static const size_t kRatesSize = sizeof(kValidRates) / sizeof(*kValidRates);
+static const int kMaxFrameLengthMs = 30;
+
+VadInst* WebRtcVad_Create() {
+  VadInstT* self = (VadInstT*)malloc(sizeof(VadInstT));
+
+  WebRtcSpl_Init();
+  self->init_flag = 0;
+
+  return (VadInst*)self;
+}
+
+void WebRtcVad_Free(VadInst* handle) {
+  free(handle);
+}
+
+// TODO(bjornv): Move WebRtcVad_InitCore() code here.
+int WebRtcVad_Init(VadInst* handle) {
+  // Initialize the core VAD component.
+  return WebRtcVad_InitCore((VadInstT*) handle);
+}
+
+// TODO(bjornv): Move WebRtcVad_set_mode_core() code here.
+int WebRtcVad_set_mode(VadInst* handle, int mode) {
+  VadInstT* self = (VadInstT*) handle;
+
+  if (handle == NULL) {
+    return -1;
+  }
+  if (self->init_flag != kInitCheck) {
+    return -1;
+  }
+
+  return WebRtcVad_set_mode_core(self, mode);
+}
+
+int WebRtcVad_Process(VadInst* handle, int fs, const int16_t* audio_frame,
+                      size_t frame_length) {
+  int vad = -1;
+  VadInstT* self = (VadInstT*) handle;
+
+  if (handle == NULL) {
+    return -1;
+  }
+
+  if (self->init_flag != kInitCheck) {
+    return -1;
+  }
+  if (audio_frame == NULL) {
+    return -1;
+  }
+  if (WebRtcVad_ValidRateAndFrameLength(fs, frame_length) != 0) {
+    return -1;
+  }
+
+  if (fs == 48000) {
+      vad = WebRtcVad_CalcVad48khz(self, audio_frame, frame_length);
+  } else if (fs == 32000) {
+    vad = WebRtcVad_CalcVad32khz(self, audio_frame, frame_length);
+  } else if (fs == 16000) {
+    vad = WebRtcVad_CalcVad16khz(self, audio_frame, frame_length);
+  } else if (fs == 8000) {
+    vad = WebRtcVad_CalcVad8khz(self, audio_frame, frame_length);
+  }
+
+  if (vad > 0) {
+    vad = 1;
+  }
+  return vad;
+}
+
+int WebRtcVad_ValidRateAndFrameLength(int rate, size_t frame_length) {
+  int return_value = -1;
+  size_t i;
+  int valid_length_ms;
+  size_t valid_length;
+
+  // We only allow 10, 20 or 30 ms frames. Loop through valid frame rates and
+  // see if we have a matching pair.
+  for (i = 0; i < kRatesSize; i++) {
+    if (kValidRates[i] == rate) {
+      for (valid_length_ms = 10; valid_length_ms <= kMaxFrameLengthMs;
+          valid_length_ms += 10) {
+        valid_length = (size_t)(kValidRates[i] / 1000 * valid_length_ms);
+        if (frame_length == valid_length) {
+          return_value = 0;
+          break;
+        }
+      }
+      break;
+    }
+  }
+
+  return return_value;
+}
diff --git a/common_audio/wav_file.cc b/common_audio/wav_file.cc
new file mode 100644
index 0000000..37f249e
--- /dev/null
+++ b/common_audio/wav_file.cc
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/wav_file.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <limits>
+#include <sstream>
+
+#include "common_audio/include/audio_util.h"
+#include "common_audio/wav_header.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+// We write 16-bit PCM WAV files.
+static const WavFormat kWavFormat = kWavFormatPcm;
+static const size_t kBytesPerSample = 2;
+
+// Doesn't take ownership of the file handle and won't close it.
+class ReadableWavFile : public ReadableWav {
+ public:
+  explicit ReadableWavFile(FILE* file) : file_(file) {}
+  virtual size_t Read(void* buf, size_t num_bytes) {
+    return fread(buf, 1, num_bytes, file_);
+  }
+
+ private:
+  FILE* file_;
+};
+
+std::string WavFile::FormatAsString() const {
+  std::ostringstream s;
+  s << "Sample rate: " << sample_rate() << " Hz, Channels: " << num_channels()
+    << ", Duration: "
+    << (1.f * num_samples()) / (num_channels() * sample_rate()) << " s";
+  return s.str();
+}
+
+WavReader::WavReader(const std::string& filename)
+    : file_handle_(fopen(filename.c_str(), "rb")) {
+  RTC_CHECK(file_handle_) << "Could not open wav file for reading.";
+
+  ReadableWavFile readable(file_handle_);
+  WavFormat format;
+  size_t bytes_per_sample;
+  RTC_CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format,
+                          &bytes_per_sample, &num_samples_));
+  num_samples_remaining_ = num_samples_;
+  RTC_CHECK_EQ(kWavFormat, format);
+  RTC_CHECK_EQ(kBytesPerSample, bytes_per_sample);
+}
+
+WavReader::~WavReader() {
+  Close();
+}
+
+int WavReader::sample_rate() const {
+  return sample_rate_;
+}
+
+size_t WavReader::num_channels() const {
+  return num_channels_;
+}
+
+size_t WavReader::num_samples() const {
+  return num_samples_;
+}
+
+size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) {
+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+#error "Need to convert samples to big-endian when reading from WAV file"
+#endif
+  // There could be metadata after the audio; ensure we don't read it.
+  num_samples = std::min(num_samples, num_samples_remaining_);
+  const size_t read =
+      fread(samples, sizeof(*samples), num_samples, file_handle_);
+  // If we didn't read what was requested, ensure we've reached the EOF.
+  RTC_CHECK(read == num_samples || feof(file_handle_));
+  RTC_CHECK_LE(read, num_samples_remaining_);
+  num_samples_remaining_ -= read;
+  return read;
+}
+
+size_t WavReader::ReadSamples(size_t num_samples, float* samples) {
+  static const size_t kChunksize = 4096 / sizeof(uint16_t);
+  size_t read = 0;
+  for (size_t i = 0; i < num_samples; i += kChunksize) {
+    int16_t isamples[kChunksize];
+    size_t chunk = std::min(kChunksize, num_samples - i);
+    chunk = ReadSamples(chunk, isamples);
+    for (size_t j = 0; j < chunk; ++j)
+      samples[i + j] = isamples[j];
+    read += chunk;
+  }
+  return read;
+}
+
+void WavReader::Close() {
+  RTC_CHECK_EQ(0, fclose(file_handle_));
+  file_handle_ = nullptr;
+}
+
+WavWriter::WavWriter(const std::string& filename, int sample_rate,
+                     size_t num_channels)
+    : sample_rate_(sample_rate),
+      num_channels_(num_channels),
+      num_samples_(0),
+      file_handle_(fopen(filename.c_str(), "wb")) {
+  RTC_CHECK(file_handle_) << "Could not open wav file for writing.";
+  RTC_CHECK(CheckWavParameters(num_channels_, sample_rate_, kWavFormat,
+                               kBytesPerSample, num_samples_));
+
+  // Write a blank placeholder header, since we need to know the total number
+  // of samples before we can fill in the real data.
+  static const uint8_t blank_header[kWavHeaderSize] = {0};
+  RTC_CHECK_EQ(1, fwrite(blank_header, kWavHeaderSize, 1, file_handle_));
+}
+
+WavWriter::~WavWriter() {
+  Close();
+}
+
+int WavWriter::sample_rate() const {
+  return sample_rate_;
+}
+
+size_t WavWriter::num_channels() const {
+  return num_channels_;
+}
+
+size_t WavWriter::num_samples() const {
+  return num_samples_;
+}
+
+void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) {
+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+#error "Need to convert samples to little-endian when writing to WAV file"
+#endif
+  const size_t written =
+      fwrite(samples, sizeof(*samples), num_samples, file_handle_);
+  RTC_CHECK_EQ(num_samples, written);
+  num_samples_ += written;
+  RTC_CHECK(num_samples_ >= written);  // detect size_t overflow
+}
+
+void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
+  static const size_t kChunksize = 4096 / sizeof(uint16_t);
+  for (size_t i = 0; i < num_samples; i += kChunksize) {
+    int16_t isamples[kChunksize];
+    const size_t chunk = std::min(kChunksize, num_samples - i);
+    FloatS16ToS16(samples + i, chunk, isamples);
+    WriteSamples(isamples, chunk);
+  }
+}
+
+void WavWriter::Close() {
+  RTC_CHECK_EQ(0, fseek(file_handle_, 0, SEEK_SET));
+  uint8_t header[kWavHeaderSize];
+  WriteWavHeader(header, num_channels_, sample_rate_, kWavFormat,
+                 kBytesPerSample, num_samples_);
+  RTC_CHECK_EQ(1, fwrite(header, kWavHeaderSize, 1, file_handle_));
+  RTC_CHECK_EQ(0, fclose(file_handle_));
+  file_handle_ = nullptr;
+}
+
+}  // namespace webrtc
+
+rtc_WavWriter* rtc_WavOpen(const char* filename,
+                           int sample_rate,
+                           size_t num_channels) {
+  return reinterpret_cast<rtc_WavWriter*>(
+      new webrtc::WavWriter(filename, sample_rate, num_channels));
+}
+
+void rtc_WavClose(rtc_WavWriter* wf) {
+  delete reinterpret_cast<webrtc::WavWriter*>(wf);
+}
+
+void rtc_WavWriteSamples(rtc_WavWriter* wf,
+                         const float* samples,
+                         size_t num_samples) {
+  reinterpret_cast<webrtc::WavWriter*>(wf)->WriteSamples(samples, num_samples);
+}
+
+int rtc_WavSampleRate(const rtc_WavWriter* wf) {
+  return reinterpret_cast<const webrtc::WavWriter*>(wf)->sample_rate();
+}
+
+size_t rtc_WavNumChannels(const rtc_WavWriter* wf) {
+  return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_channels();
+}
+
+size_t rtc_WavNumSamples(const rtc_WavWriter* wf) {
+  return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_samples();
+}
diff --git a/common_audio/wav_file.h b/common_audio/wav_file.h
new file mode 100644
index 0000000..f7afe92
--- /dev/null
+++ b/common_audio/wav_file.h
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_WAV_FILE_H_
+#define COMMON_AUDIO_WAV_FILE_H_
+
+#ifdef __cplusplus
+
+#include <stdint.h>
+#include <cstddef>
+#include <string>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Interface to provide access to WAV file parameters.
+class WavFile {
+ public:
+  virtual ~WavFile() {}
+
+  virtual int sample_rate() const = 0;
+  virtual size_t num_channels() const = 0;
+  virtual size_t num_samples() const = 0;
+
+  // Returns a human-readable string containing the audio format.
+  std::string FormatAsString() const;
+};
+
+// Simple C++ class for writing 16-bit PCM WAV files. All error handling is
+// by calls to RTC_CHECK(), making it unsuitable for anything but debug code.
+class WavWriter final : public WavFile {
+ public:
+  // Open a new WAV file for writing.
+  WavWriter(const std::string& filename, int sample_rate, size_t num_channels);
+
+  // Close the WAV file, after writing its header.
+  ~WavWriter() override;
+
+  // Write additional samples to the file. Each sample is in the range
+  // [-32768,32767], and there must be the previously specified number of
+  // interleaved channels.
+  void WriteSamples(const float* samples, size_t num_samples);
+  void WriteSamples(const int16_t* samples, size_t num_samples);
+
+  int sample_rate() const override;
+  size_t num_channels() const override;
+  size_t num_samples() const override;
+
+ private:
+  void Close();
+  const int sample_rate_;
+  const size_t num_channels_;
+  size_t num_samples_;  // Total number of samples written to file.
+  FILE* file_handle_;  // Output file, owned by this class
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(WavWriter);
+};
+
+// Follows the conventions of WavWriter.
+class WavReader final : public WavFile {
+ public:
+  // Opens an existing WAV file for reading.
+  explicit WavReader(const std::string& filename);
+
+  // Close the WAV file.
+  ~WavReader() override;
+
+  // Returns the number of samples read. If this is less than requested,
+  // verifies that the end of the file was reached.
+  size_t ReadSamples(size_t num_samples, float* samples);
+  size_t ReadSamples(size_t num_samples, int16_t* samples);
+
+  int sample_rate() const override;
+  size_t num_channels() const override;
+  size_t num_samples() const override;
+
+ private:
+  void Close();
+  int sample_rate_;
+  size_t num_channels_;
+  size_t num_samples_;  // Total number of samples in the file.
+  size_t num_samples_remaining_;
+  FILE* file_handle_;  // Input file, owned by this class.
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(WavReader);
+};
+
+}  // namespace webrtc
+
+extern "C" {
+#endif  // __cplusplus
+
+// C wrappers for the WavWriter class.
+typedef struct rtc_WavWriter rtc_WavWriter;
+rtc_WavWriter* rtc_WavOpen(const char* filename,
+                           int sample_rate,
+                           size_t num_channels);
+void rtc_WavClose(rtc_WavWriter* wf);
+void rtc_WavWriteSamples(rtc_WavWriter* wf,
+                         const float* samples,
+                         size_t num_samples);
+int rtc_WavSampleRate(const rtc_WavWriter* wf);
+size_t rtc_WavNumChannels(const rtc_WavWriter* wf);
+size_t rtc_WavNumSamples(const rtc_WavWriter* wf);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // COMMON_AUDIO_WAV_FILE_H_
diff --git a/common_audio/wav_file_unittest.cc b/common_audio/wav_file_unittest.cc
new file mode 100644
index 0000000..7113b47
--- /dev/null
+++ b/common_audio/wav_file_unittest.cc
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+#include <limits>
+
+#include "common_audio/wav_file.h"
+#include "common_audio/wav_header.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+static const float kSamples[] = {0.0, 10.0, 4e4, -1e9};
+
+// Write a tiny WAV file with the C++ interface and verify the result.
+TEST(WavWriterTest, CPP) {
+  const std::string outfile = test::OutputPath() + "wavtest1.wav";
+  static const size_t kNumSamples = 3;
+  {
+    WavWriter w(outfile, 14099, 1);
+    EXPECT_EQ(14099, w.sample_rate());
+    EXPECT_EQ(1u, w.num_channels());
+    EXPECT_EQ(0u, w.num_samples());
+    w.WriteSamples(kSamples, kNumSamples);
+    EXPECT_EQ(kNumSamples, w.num_samples());
+  }
+  // Write some extra "metadata" to the file that should be silently ignored
+  // by WavReader. We don't use WavWriter directly for this because it doesn't
+  // support metadata.
+  static const uint8_t kMetadata[] = {101, 202};
+  {
+    FILE* f = fopen(outfile.c_str(), "ab");
+    ASSERT_TRUE(f);
+    ASSERT_EQ(1u, fwrite(kMetadata, sizeof(kMetadata), 1, f));
+    fclose(f);
+  }
+  static const uint8_t kExpectedContents[] = {
+    'R', 'I', 'F', 'F',
+    42, 0, 0, 0,  // size of whole file - 8: 6 + 44 - 8
+    'W', 'A', 'V', 'E',
+    'f', 'm', 't', ' ',
+    16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+    1, 0,  // format: PCM (1)
+    1, 0,  // channels: 1
+    0x13, 0x37, 0, 0,  // sample rate: 14099
+    0x26, 0x6e, 0, 0,  // byte rate: 2 * 14099
+    2, 0,  // block align: NumChannels * BytesPerSample
+    16, 0,  // bits per sample: 2 * 8
+    'd', 'a', 't', 'a',
+    6, 0, 0, 0,  // size of payload: 6
+    0, 0,  // first sample: 0.0
+    10, 0,  // second sample: 10.0
+    0xff, 0x7f,  // third sample: 4e4 (saturated)
+    kMetadata[0], kMetadata[1],
+  };
+  static const size_t kContentSize =
+      kWavHeaderSize + kNumSamples * sizeof(int16_t) + sizeof(kMetadata);
+  static_assert(sizeof(kExpectedContents) == kContentSize, "content size");
+  EXPECT_EQ(kContentSize, test::GetFileSize(outfile));
+  FILE* f = fopen(outfile.c_str(), "rb");
+  ASSERT_TRUE(f);
+  uint8_t contents[kContentSize];
+  ASSERT_EQ(1u, fread(contents, kContentSize, 1, f));
+  EXPECT_EQ(0, fclose(f));
+  EXPECT_EQ(0, memcmp(kExpectedContents, contents, kContentSize));
+
+  {
+    WavReader r(outfile);
+    EXPECT_EQ(14099, r.sample_rate());
+    EXPECT_EQ(1u, r.num_channels());
+    EXPECT_EQ(kNumSamples, r.num_samples());
+    static const float kTruncatedSamples[] = {0.0, 10.0, 32767.0};
+    float samples[kNumSamples];
+    EXPECT_EQ(kNumSamples, r.ReadSamples(kNumSamples, samples));
+    EXPECT_EQ(0, memcmp(kTruncatedSamples, samples, sizeof(samples)));
+    EXPECT_EQ(0u, r.ReadSamples(kNumSamples, samples));
+  }
+}
+
+// Write a tiny WAV file with the C interface and verify the result.
+TEST(WavWriterTest, C) {
+  const std::string outfile = test::OutputPath() + "wavtest2.wav";
+  rtc_WavWriter* w = rtc_WavOpen(outfile.c_str(), 11904, 2);
+  EXPECT_EQ(11904, rtc_WavSampleRate(w));
+  EXPECT_EQ(2u, rtc_WavNumChannels(w));
+  EXPECT_EQ(0u, rtc_WavNumSamples(w));
+  static const size_t kNumSamples = 4;
+  rtc_WavWriteSamples(w, &kSamples[0], 2);
+  EXPECT_EQ(2u, rtc_WavNumSamples(w));
+  rtc_WavWriteSamples(w, &kSamples[2], kNumSamples - 2);
+  EXPECT_EQ(kNumSamples, rtc_WavNumSamples(w));
+  rtc_WavClose(w);
+  static const uint8_t kExpectedContents[] = {
+    'R', 'I', 'F', 'F',
+    44, 0, 0, 0,  // size of whole file - 8: 8 + 44 - 8
+    'W', 'A', 'V', 'E',
+    'f', 'm', 't', ' ',
+    16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+    1, 0,  // format: PCM (1)
+    2, 0,  // channels: 2
+    0x80, 0x2e, 0, 0,  // sample rate: 11904
+    0, 0xba, 0, 0,  // byte rate: 2 * 2 * 11904
+    4, 0,  // block align: NumChannels * BytesPerSample
+    16, 0,  // bits per sample: 2 * 8
+    'd', 'a', 't', 'a',
+    8, 0, 0, 0,  // size of payload: 8
+    0, 0,  // first sample: 0.0
+    10, 0,  // second sample: 10.0
+    0xff, 0x7f,  // third sample: 4e4 (saturated)
+    0, 0x80,  // fourth sample: -1e9 (saturated)
+  };
+  static const size_t kContentSize =
+      kWavHeaderSize + kNumSamples * sizeof(int16_t);
+  static_assert(sizeof(kExpectedContents) == kContentSize, "content size");
+  EXPECT_EQ(kContentSize, test::GetFileSize(outfile));
+  FILE* f = fopen(outfile.c_str(), "rb");
+  ASSERT_TRUE(f);
+  uint8_t contents[kContentSize];
+  ASSERT_EQ(1u, fread(contents, kContentSize, 1, f));
+  EXPECT_EQ(0, fclose(f));
+  EXPECT_EQ(0, memcmp(kExpectedContents, contents, kContentSize));
+}
+
+// Write a larger WAV file. You can listen to this file to sanity-check it.
+TEST(WavWriterTest, LargeFile) {
+  std::string outfile = test::OutputPath() + "wavtest3.wav";
+  static const int kSampleRate = 8000;
+  static const size_t kNumChannels = 2;
+  static const size_t kNumSamples = 3 * kSampleRate * kNumChannels;
+  float samples[kNumSamples];
+  for (size_t i = 0; i < kNumSamples; i += kNumChannels) {
+    // A nice periodic beeping sound.
+    static const double kToneHz = 440;
+    const double t = static_cast<double>(i) / (kNumChannels * kSampleRate);
+    const double x =
+        std::numeric_limits<int16_t>::max() * std::sin(t * kToneHz * 2 * M_PI);
+    samples[i] = std::pow(std::sin(t * 2 * 2 * M_PI), 10) * x;
+    samples[i + 1] = std::pow(std::cos(t * 2 * 2 * M_PI), 10) * x;
+  }
+  {
+    WavWriter w(outfile, kSampleRate, kNumChannels);
+    EXPECT_EQ(kSampleRate, w.sample_rate());
+    EXPECT_EQ(kNumChannels, w.num_channels());
+    EXPECT_EQ(0u, w.num_samples());
+    w.WriteSamples(samples, kNumSamples);
+    EXPECT_EQ(kNumSamples, w.num_samples());
+  }
+  EXPECT_EQ(sizeof(int16_t) * kNumSamples + kWavHeaderSize,
+            test::GetFileSize(outfile));
+
+  {
+    WavReader r(outfile);
+    EXPECT_EQ(kSampleRate, r.sample_rate());
+    EXPECT_EQ(kNumChannels, r.num_channels());
+    EXPECT_EQ(kNumSamples, r.num_samples());
+
+    float read_samples[kNumSamples];
+    EXPECT_EQ(kNumSamples, r.ReadSamples(kNumSamples, read_samples));
+    for (size_t i = 0; i < kNumSamples; ++i)
+      EXPECT_NEAR(samples[i], read_samples[i], 1);
+
+    EXPECT_EQ(0u, r.ReadSamples(kNumSamples, read_samples));
+  }
+}
+
+}  // namespace webrtc
diff --git a/common_audio/wav_header.cc b/common_audio/wav_header.cc
new file mode 100644
index 0000000..a57e917
--- /dev/null
+++ b/common_audio/wav_header.cc
@@ -0,0 +1,243 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Based on the WAV file format documentation at
+// https://ccrma.stanford.edu/courses/422/projects/WaveFormat/ and
+// http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
+
+#include "common_audio/wav_header.h"
+
+#include <algorithm>
+#include <cstring>
+#include <limits>
+#include <string>
+
+#include "common_audio/include/audio_util.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+struct ChunkHeader {
+  uint32_t ID;
+  uint32_t Size;
+};
+static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size");
+
+// We can't nest this definition in WavHeader, because VS2013 gives an error
+// on sizeof(WavHeader::fmt): "error C2070: 'unknown': illegal sizeof operand".
+struct FmtSubchunk {
+  ChunkHeader header;
+  uint16_t AudioFormat;
+  uint16_t NumChannels;
+  uint32_t SampleRate;
+  uint32_t ByteRate;
+  uint16_t BlockAlign;
+  uint16_t BitsPerSample;
+};
+static_assert(sizeof(FmtSubchunk) == 24, "FmtSubchunk size");
+const uint32_t kFmtSubchunkSize = sizeof(FmtSubchunk) - sizeof(ChunkHeader);
+
+struct WavHeader {
+  struct {
+    ChunkHeader header;
+    uint32_t Format;
+  } riff;
+  FmtSubchunk fmt;
+  struct {
+    ChunkHeader header;
+  } data;
+};
+static_assert(sizeof(WavHeader) == kWavHeaderSize, "no padding in header");
+
+}  // namespace
+
+bool CheckWavParameters(size_t num_channels,
+                        int sample_rate,
+                        WavFormat format,
+                        size_t bytes_per_sample,
+                        size_t num_samples) {
+  // num_channels, sample_rate, and bytes_per_sample must be positive, must fit
+  // in their respective fields, and their product must fit in the 32-bit
+  // ByteRate field.
+  if (num_channels == 0 || sample_rate <= 0 || bytes_per_sample == 0)
+    return false;
+  if (static_cast<uint64_t>(sample_rate) > std::numeric_limits<uint32_t>::max())
+    return false;
+  if (num_channels > std::numeric_limits<uint16_t>::max())
+    return false;
+  if (static_cast<uint64_t>(bytes_per_sample) * 8 >
+      std::numeric_limits<uint16_t>::max())
+    return false;
+  if (static_cast<uint64_t>(sample_rate) * num_channels * bytes_per_sample >
+      std::numeric_limits<uint32_t>::max())
+    return false;
+
+  // format and bytes_per_sample must agree.
+  switch (format) {
+    case kWavFormatPcm:
+      // Other values may be OK, but for now we're conservative:
+      if (bytes_per_sample != 1 && bytes_per_sample != 2)
+        return false;
+      break;
+    case kWavFormatALaw:
+    case kWavFormatMuLaw:
+      if (bytes_per_sample != 1)
+        return false;
+      break;
+    default:
+      return false;
+  }
+
+  // The number of bytes in the file, not counting the first ChunkHeader, must
+  // be less than 2^32; otherwise, the ChunkSize field overflows.
+  const size_t header_size = kWavHeaderSize - sizeof(ChunkHeader);
+  const size_t max_samples =
+      (std::numeric_limits<uint32_t>::max() - header_size) / bytes_per_sample;
+  if (num_samples > max_samples)
+    return false;
+
+  // Each channel must have the same number of samples.
+  if (num_samples % num_channels != 0)
+    return false;
+
+  return true;
+}
+
+#ifdef WEBRTC_ARCH_LITTLE_ENDIAN
+static inline void WriteLE16(uint16_t* f, uint16_t x) { *f = x; }
+static inline void WriteLE32(uint32_t* f, uint32_t x) { *f = x; }
+static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) {
+  *f = static_cast<uint32_t>(a)
+      | static_cast<uint32_t>(b) << 8
+      | static_cast<uint32_t>(c) << 16
+      | static_cast<uint32_t>(d) << 24;
+}
+
+static inline uint16_t ReadLE16(uint16_t x) { return x; }
+static inline uint32_t ReadLE32(uint32_t x) { return x; }
+static inline std::string ReadFourCC(uint32_t x) {
+  return std::string(reinterpret_cast<char*>(&x), 4);
+}
+#else
+#error "Write be-to-le conversion functions"
+#endif
+
+static inline uint32_t RiffChunkSize(size_t bytes_in_payload) {
+  return static_cast<uint32_t>(
+      bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader));
+}
+
+static inline uint32_t ByteRate(size_t num_channels, int sample_rate,
+                                size_t bytes_per_sample) {
+  return static_cast<uint32_t>(num_channels * sample_rate * bytes_per_sample);
+}
+
+static inline uint16_t BlockAlign(size_t num_channels,
+                                  size_t bytes_per_sample) {
+  return static_cast<uint16_t>(num_channels * bytes_per_sample);
+}
+
+void WriteWavHeader(uint8_t* buf,
+                    size_t num_channels,
+                    int sample_rate,
+                    WavFormat format,
+                    size_t bytes_per_sample,
+                    size_t num_samples) {
+  RTC_CHECK(CheckWavParameters(num_channels, sample_rate, format,
+                               bytes_per_sample, num_samples));
+
+  WavHeader header;
+  const size_t bytes_in_payload = bytes_per_sample * num_samples;
+
+  WriteFourCC(&header.riff.header.ID, 'R', 'I', 'F', 'F');
+  WriteLE32(&header.riff.header.Size, RiffChunkSize(bytes_in_payload));
+  WriteFourCC(&header.riff.Format, 'W', 'A', 'V', 'E');
+
+  WriteFourCC(&header.fmt.header.ID, 'f', 'm', 't', ' ');
+  WriteLE32(&header.fmt.header.Size, kFmtSubchunkSize);
+  WriteLE16(&header.fmt.AudioFormat, format);
+  WriteLE16(&header.fmt.NumChannels, static_cast<uint16_t>(num_channels));
+  WriteLE32(&header.fmt.SampleRate, sample_rate);
+  WriteLE32(&header.fmt.ByteRate, ByteRate(num_channels, sample_rate,
+                                           bytes_per_sample));
+  WriteLE16(&header.fmt.BlockAlign, BlockAlign(num_channels, bytes_per_sample));
+  WriteLE16(&header.fmt.BitsPerSample,
+            static_cast<uint16_t>(8 * bytes_per_sample));
+
+  WriteFourCC(&header.data.header.ID, 'd', 'a', 't', 'a');
+  WriteLE32(&header.data.header.Size, static_cast<uint32_t>(bytes_in_payload));
+
+  // Do an extra copy rather than writing everything to buf directly, since buf
+  // might not be correctly aligned.
+  memcpy(buf, &header, kWavHeaderSize);
+}
+
+bool ReadWavHeader(ReadableWav* readable,
+                   size_t* num_channels,
+                   int* sample_rate,
+                   WavFormat* format,
+                   size_t* bytes_per_sample,
+                   size_t* num_samples) {
+  WavHeader header;
+  if (readable->Read(&header, kWavHeaderSize - sizeof(header.data)) !=
+      kWavHeaderSize - sizeof(header.data))
+    return false;
+
+  const uint32_t fmt_size = ReadLE32(header.fmt.header.Size);
+  if (fmt_size != kFmtSubchunkSize) {
+    // There is an optional two-byte extension field permitted to be present
+    // with PCM, but which must be zero.
+    int16_t ext_size;
+    if (kFmtSubchunkSize + sizeof(ext_size) != fmt_size)
+      return false;
+    if (readable->Read(&ext_size, sizeof(ext_size)) != sizeof(ext_size))
+      return false;
+    if (ext_size != 0)
+      return false;
+  }
+  if (readable->Read(&header.data, sizeof(header.data)) != sizeof(header.data))
+    return false;
+
+  // Parse needed fields.
+  *format = static_cast<WavFormat>(ReadLE16(header.fmt.AudioFormat));
+  *num_channels = ReadLE16(header.fmt.NumChannels);
+  *sample_rate = ReadLE32(header.fmt.SampleRate);
+  *bytes_per_sample = ReadLE16(header.fmt.BitsPerSample) / 8;
+  const size_t bytes_in_payload = ReadLE32(header.data.header.Size);
+  if (*bytes_per_sample == 0)
+    return false;
+  *num_samples = bytes_in_payload / *bytes_per_sample;
+
+  // Sanity check remaining fields.
+  if (ReadFourCC(header.riff.header.ID) != "RIFF")
+    return false;
+  if (ReadFourCC(header.riff.Format) != "WAVE")
+    return false;
+  if (ReadFourCC(header.fmt.header.ID) != "fmt ")
+    return false;
+  if (ReadFourCC(header.data.header.ID) != "data")
+    return false;
+
+  if (ReadLE32(header.riff.header.Size) < RiffChunkSize(bytes_in_payload))
+    return false;
+  if (ReadLE32(header.fmt.ByteRate) !=
+      ByteRate(*num_channels, *sample_rate, *bytes_per_sample))
+    return false;
+  if (ReadLE16(header.fmt.BlockAlign) !=
+      BlockAlign(*num_channels, *bytes_per_sample))
+    return false;
+
+  return CheckWavParameters(*num_channels, *sample_rate, *format,
+                            *bytes_per_sample, *num_samples);
+}
+
+
+}  // namespace webrtc
diff --git a/common_audio/wav_header.h b/common_audio/wav_header.h
new file mode 100644
index 0000000..2295fbe
--- /dev/null
+++ b/common_audio/wav_header.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_WAV_HEADER_H_
+#define COMMON_AUDIO_WAV_HEADER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace webrtc {
+
+static const size_t kWavHeaderSize = 44;
+
+class ReadableWav {
+ public:
+  // Returns the number of bytes read.
+  size_t virtual Read(void* buf, size_t num_bytes) = 0;
+  virtual ~ReadableWav() {}
+};
+
+enum WavFormat {
+  kWavFormatPcm   = 1,  // PCM, each sample of size bytes_per_sample
+  kWavFormatALaw  = 6,  // 8-bit ITU-T G.711 A-law
+  kWavFormatMuLaw = 7,  // 8-bit ITU-T G.711 mu-law
+};
+
+// Return true if the given parameters will make a well-formed WAV header.
+bool CheckWavParameters(size_t num_channels,
+                        int sample_rate,
+                        WavFormat format,
+                        size_t bytes_per_sample,
+                        size_t num_samples);
+
+// Write a kWavHeaderSize bytes long WAV header to buf. The payload that
+// follows the header is supposed to have the specified number of interleaved
+// channels and contain the specified total number of samples of the specified
+// type. CHECKs the input parameters for validity.
+void WriteWavHeader(uint8_t* buf,
+                    size_t num_channels,
+                    int sample_rate,
+                    WavFormat format,
+                    size_t bytes_per_sample,
+                    size_t num_samples);
+
+// Read a WAV header from an implemented ReadableWav and parse the values into
+// the provided output parameters. ReadableWav is used because the header can
+// be variably sized. Returns false if the header is invalid.
+bool ReadWavHeader(ReadableWav* readable,
+                   size_t* num_channels,
+                   int* sample_rate,
+                   WavFormat* format,
+                   size_t* bytes_per_sample,
+                   size_t* num_samples);
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_WAV_HEADER_H_
diff --git a/common_audio/wav_header_unittest.cc b/common_audio/wav_header_unittest.cc
new file mode 100644
index 0000000..c6f605f
--- /dev/null
+++ b/common_audio/wav_header_unittest.cc
@@ -0,0 +1,323 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "common_audio/wav_header.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Doesn't take ownership of the buffer.
+class ReadableWavBuffer : public ReadableWav {
+ public:
+  ReadableWavBuffer(const uint8_t* buf, size_t size)
+      : buf_(buf),
+        size_(size),
+        pos_(0),
+        buf_exhausted_(false),
+        check_read_size_(true) {}
+  ReadableWavBuffer(const uint8_t* buf, size_t size, bool check_read_size)
+      : buf_(buf),
+        size_(size),
+        pos_(0),
+        buf_exhausted_(false),
+        check_read_size_(check_read_size) {}
+
+  virtual ~ReadableWavBuffer() {
+    // Verify the entire buffer has been read.
+    if (check_read_size_)
+      EXPECT_EQ(size_, pos_);
+  }
+
+  virtual size_t Read(void* buf, size_t num_bytes) {
+    // Verify we don't try to read outside of a properly sized header.
+    if (size_ >= kWavHeaderSize)
+      EXPECT_GE(size_, pos_ + num_bytes);
+    EXPECT_FALSE(buf_exhausted_);
+
+    const size_t bytes_remaining = size_ - pos_;
+    if (num_bytes > bytes_remaining) {
+      // The caller is signalled about an exhausted buffer when we return fewer
+      // bytes than requested. There should not be another read attempt after
+      // this point.
+      buf_exhausted_ = true;
+      num_bytes = bytes_remaining;
+    }
+    memcpy(buf, &buf_[pos_], num_bytes);
+    pos_ += num_bytes;
+    return num_bytes;
+  }
+
+ private:
+  const uint8_t* buf_;
+  const size_t size_;
+  size_t pos_;
+  bool buf_exhausted_;
+  const bool check_read_size_;
+};
+
+// Try various choices of WAV header parameters, and make sure that the good
+// ones are accepted and the bad ones rejected.
+TEST(WavHeaderTest, CheckWavParameters) {
+  // Try some really stupid values for one parameter at a time.
+  EXPECT_TRUE(CheckWavParameters(1, 8000, kWavFormatPcm, 1, 0));
+  EXPECT_FALSE(CheckWavParameters(0, 8000, kWavFormatPcm, 1, 0));
+  EXPECT_FALSE(CheckWavParameters(0x10000, 8000, kWavFormatPcm, 1, 0));
+  EXPECT_FALSE(CheckWavParameters(1, 0, kWavFormatPcm, 1, 0));
+  EXPECT_FALSE(CheckWavParameters(1, 8000, WavFormat(0), 1, 0));
+  EXPECT_FALSE(CheckWavParameters(1, 8000, kWavFormatPcm, 0, 0));
+
+  // Try invalid format/bytes-per-sample combinations.
+  EXPECT_TRUE(CheckWavParameters(1, 8000, kWavFormatPcm, 2, 0));
+  EXPECT_FALSE(CheckWavParameters(1, 8000, kWavFormatPcm, 4, 0));
+  EXPECT_FALSE(CheckWavParameters(1, 8000, kWavFormatALaw, 2, 0));
+  EXPECT_FALSE(CheckWavParameters(1, 8000, kWavFormatMuLaw, 2, 0));
+
+  // Too large values.
+  EXPECT_FALSE(CheckWavParameters(1 << 20, 1 << 20, kWavFormatPcm, 1, 0));
+  EXPECT_FALSE(CheckWavParameters(
+      1, 8000, kWavFormatPcm, 1, std::numeric_limits<uint32_t>::max()));
+
+  // Not the same number of samples for each channel.
+  EXPECT_FALSE(CheckWavParameters(3, 8000, kWavFormatPcm, 1, 5));
+}
+
+TEST(WavHeaderTest, ReadWavHeaderWithErrors) {
+  size_t num_channels = 0;
+  int sample_rate = 0;
+  WavFormat format = kWavFormatPcm;
+  size_t bytes_per_sample = 0;
+  size_t num_samples = 0;
+
+  // Test a few ways the header can be invalid. We start with the valid header
+  // used in WriteAndReadWavHeader, and invalidate one field per test. The
+  // invalid field is indicated in the array name, and in the comments with
+  // *BAD*.
+  {
+    static const uint8_t kBadRiffID[] = {
+      'R', 'i', 'f', 'f',  // *BAD*
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+      'f', 'm', 't', ' ',
+      16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+      6, 0,  // format: A-law (6)
+      17, 0,  // channels: 17
+      0x39, 0x30, 0, 0,  // sample rate: 12345
+      0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+      17, 0,  // block align: NumChannels * BytesPerSample
+      8, 0,  // bits per sample: 1 * 8
+      'd', 'a', 't', 'a',
+      0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+    };
+    ReadableWavBuffer r(kBadRiffID, sizeof(kBadRiffID));
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+  {
+    static const uint8_t kBadBitsPerSample[] = {
+      'R', 'I', 'F', 'F',
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+      'f', 'm', 't', ' ',
+      16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+      6, 0,  // format: A-law (6)
+      17, 0,  // channels: 17
+      0x39, 0x30, 0, 0,  // sample rate: 12345
+      0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+      17, 0,  // block align: NumChannels * BytesPerSample
+      1, 0,  // bits per sample: *BAD*
+      'd', 'a', 't', 'a',
+      0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+    };
+    ReadableWavBuffer r(kBadBitsPerSample, sizeof(kBadBitsPerSample));
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+  {
+    static const uint8_t kBadByteRate[] = {
+      'R', 'I', 'F', 'F',
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+      'f', 'm', 't', ' ',
+      16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+      6, 0,  // format: A-law (6)
+      17, 0,  // channels: 17
+      0x39, 0x30, 0, 0,  // sample rate: 12345
+      0x00, 0x33, 0x03, 0,  // byte rate: *BAD*
+      17, 0,  // block align: NumChannels * BytesPerSample
+      8, 0,  // bits per sample: 1 * 8
+      'd', 'a', 't', 'a',
+      0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+    };
+    ReadableWavBuffer r(kBadByteRate, sizeof(kBadByteRate));
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+  {
+    static const uint8_t kBadFmtHeaderSize[] = {
+      'R', 'I', 'F', 'F',
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+      'f', 'm', 't', ' ',
+      17, 0, 0, 0,  // size of fmt block *BAD*. Only 16 and 18 permitted.
+      6, 0,  // format: A-law (6)
+      17, 0,  // channels: 17
+      0x39, 0x30, 0, 0,  // sample rate: 12345
+      0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+      17, 0,  // block align: NumChannels * BytesPerSample
+      8, 0,  // bits per sample: 1 * 8
+      0,  // extra (though invalid) header byte
+      'd', 'a', 't', 'a',
+      0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+    };
+    ReadableWavBuffer r(kBadFmtHeaderSize, sizeof(kBadFmtHeaderSize), false);
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+  {
+    static const uint8_t kNonZeroExtensionField[] = {
+      'R', 'I', 'F', 'F',
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+      'f', 'm', 't', ' ',
+      18, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+      6, 0,  // format: A-law (6)
+      17, 0,  // channels: 17
+      0x39, 0x30, 0, 0,  // sample rate: 12345
+      0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+      17, 0,  // block align: NumChannels * BytesPerSample
+      8, 0,  // bits per sample: 1 * 8
+      1, 0,  // non-zero extension field *BAD*
+      'd', 'a', 't', 'a',
+      0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+    };
+    ReadableWavBuffer r(kNonZeroExtensionField, sizeof(kNonZeroExtensionField),
+                        false);
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+  {
+    static const uint8_t kMissingDataChunk[] = {
+      'R', 'I', 'F', 'F',
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+      'f', 'm', 't', ' ',
+      16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+      6, 0,  // format: A-law (6)
+      17, 0,  // channels: 17
+      0x39, 0x30, 0, 0,  // sample rate: 12345
+      0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+      17, 0,  // block align: NumChannels * BytesPerSample
+      8, 0,  // bits per sample: 1 * 8
+    };
+    ReadableWavBuffer r(kMissingDataChunk, sizeof(kMissingDataChunk));
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+  {
+    static const uint8_t kMissingFmtAndDataChunks[] = {
+      'R', 'I', 'F', 'F',
+      0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+      'W', 'A', 'V', 'E',
+    };
+    ReadableWavBuffer r(kMissingFmtAndDataChunks,
+                        sizeof(kMissingFmtAndDataChunks));
+    EXPECT_FALSE(
+        ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                      &bytes_per_sample, &num_samples));
+  }
+}
+
+// Try writing and reading a valid WAV header and make sure it looks OK.
+TEST(WavHeaderTest, WriteAndReadWavHeader) {
+  static const int kSize = 4 + kWavHeaderSize + 4;
+  uint8_t buf[kSize];
+  memset(buf, 0xa4, sizeof(buf));
+  WriteWavHeader(buf + 4, 17, 12345, kWavFormatALaw, 1, 123457689);
+  static const uint8_t kExpectedBuf[] = {
+    0xa4, 0xa4, 0xa4, 0xa4,  // untouched bytes before header
+    'R', 'I', 'F', 'F',
+    0xbd, 0xd0, 0x5b, 0x07,  // size of whole file - 8: 123457689 + 44 - 8
+    'W', 'A', 'V', 'E',
+    'f', 'm', 't', ' ',
+    16, 0, 0, 0,  // size of fmt block - 8: 24 - 8
+    6, 0,  // format: A-law (6)
+    17, 0,  // channels: 17
+    0x39, 0x30, 0, 0,  // sample rate: 12345
+    0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+    17, 0,  // block align: NumChannels * BytesPerSample
+    8, 0,  // bits per sample: 1 * 8
+    'd', 'a', 't', 'a',
+    0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+    0xa4, 0xa4, 0xa4, 0xa4,  // untouched bytes after header
+  };
+  static_assert(sizeof(kExpectedBuf) == kSize, "buffer size");
+  EXPECT_EQ(0, memcmp(kExpectedBuf, buf, kSize));
+
+  size_t num_channels = 0;
+  int sample_rate = 0;
+  WavFormat format = kWavFormatPcm;
+  size_t bytes_per_sample = 0;
+  size_t num_samples = 0;
+  ReadableWavBuffer r(buf + 4, sizeof(buf) - 8);
+  EXPECT_TRUE(
+      ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                    &bytes_per_sample, &num_samples));
+  EXPECT_EQ(17u, num_channels);
+  EXPECT_EQ(12345, sample_rate);
+  EXPECT_EQ(kWavFormatALaw, format);
+  EXPECT_EQ(1u, bytes_per_sample);
+  EXPECT_EQ(123457689u, num_samples);
+}
+
+// Try reading an atypical but valid WAV header and make sure it's parsed OK.
+TEST(WavHeaderTest, ReadAtypicalWavHeader) {
+  static const uint8_t kBuf[] = {
+    'R', 'I', 'F', 'F',
+    0x3d, 0xd1, 0x5b, 0x07,  // size of whole file - 8 + an extra 128 bytes of
+                             // "metadata": 123457689 + 44 - 8 + 128. (atypical)
+    'W', 'A', 'V', 'E',
+    'f', 'm', 't', ' ',
+    18, 0, 0, 0,  // size of fmt block (with an atypical extension size field)
+    6, 0,  // format: A-law (6)
+    17, 0,  // channels: 17
+    0x39, 0x30, 0, 0,  // sample rate: 12345
+    0xc9, 0x33, 0x03, 0,  // byte rate: 1 * 17 * 12345
+    17, 0,  // block align: NumChannels * BytesPerSample
+    8, 0,  // bits per sample: 1 * 8
+    0, 0,  // zero extension size field (atypical)
+    'd', 'a', 't', 'a',
+    0x99, 0xd0, 0x5b, 0x07,  // size of payload: 123457689
+  };
+
+  size_t num_channels = 0;
+  int sample_rate = 0;
+  WavFormat format = kWavFormatPcm;
+  size_t bytes_per_sample = 0;
+  size_t num_samples = 0;
+  ReadableWavBuffer r(kBuf, sizeof(kBuf));
+  EXPECT_TRUE(
+      ReadWavHeader(&r, &num_channels, &sample_rate, &format,
+                    &bytes_per_sample, &num_samples));
+  EXPECT_EQ(17u, num_channels);
+  EXPECT_EQ(12345, sample_rate);
+  EXPECT_EQ(kWavFormatALaw, format);
+  EXPECT_EQ(1u, bytes_per_sample);
+  EXPECT_EQ(123457689u, num_samples);
+}
+
+}  // namespace webrtc
diff --git a/common_audio/window_generator.cc b/common_audio/window_generator.cc
new file mode 100644
index 0000000..823d2b7
--- /dev/null
+++ b/common_audio/window_generator.cc
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#define _USE_MATH_DEFINES
+
+#include "common_audio/window_generator.h"
+
+#include <cmath>
+#include <complex>
+
+#include "rtc_base/checks.h"
+
+using std::complex;
+
+namespace {
+
+// Modified Bessel function of order 0 for complex inputs.
+complex<float> I0(complex<float> x) {
+  complex<float> y = x / 3.75f;
+  y *= y;
+  return 1.0f + y * (
+    3.5156229f + y * (
+      3.0899424f + y * (
+        1.2067492f + y * (
+          0.2659732f + y * (
+            0.360768e-1f + y * 0.45813e-2f)))));
+}
+
+}  // namespace
+
+namespace webrtc {
+
+void WindowGenerator::Hanning(int length, float* window) {
+  RTC_CHECK_GT(length, 1);
+  RTC_CHECK(window != nullptr);
+  for (int i = 0; i < length; ++i) {
+    window[i] = 0.5f * (1 - cosf(2 * static_cast<float>(M_PI) * i /
+                                 (length - 1)));
+  }
+}
+
+void WindowGenerator::KaiserBesselDerived(float alpha, size_t length,
+                                          float* window) {
+  RTC_CHECK_GT(length, 1U);
+  RTC_CHECK(window != nullptr);
+
+  const size_t half = (length + 1) / 2;
+  float sum = 0.0f;
+
+  for (size_t i = 0; i <= half; ++i) {
+    complex<float> r = (4.0f * i) / length - 1.0f;
+    sum += I0(static_cast<float>(M_PI) * alpha * sqrt(1.0f - r * r)).real();
+    window[i] = sum;
+  }
+  for (size_t i = length - 1; i >= half; --i) {
+    window[length - i - 1] = sqrtf(window[length - i - 1] / sum);
+    window[i] = window[length - i - 1];
+  }
+  if (length % 2 == 1) {
+    window[half - 1] = sqrtf(window[half - 1] / sum);
+  }
+}
+
+}  // namespace webrtc
+
diff --git a/common_audio/window_generator.h b/common_audio/window_generator.h
new file mode 100644
index 0000000..5fc738e
--- /dev/null
+++ b/common_audio/window_generator.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_AUDIO_WINDOW_GENERATOR_H_
+#define COMMON_AUDIO_WINDOW_GENERATOR_H_
+
+#include <stddef.h>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Helper class with generators for various signal transform windows.
+class WindowGenerator {
+ public:
+  static void Hanning(int length, float* window);
+  static void KaiserBesselDerived(float alpha, size_t length, float* window);
+
+ private:
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WindowGenerator);
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_AUDIO_WINDOW_GENERATOR_H_
+
diff --git a/common_audio/window_generator_unittest.cc b/common_audio/window_generator_unittest.cc
new file mode 100644
index 0000000..b2089d4
--- /dev/null
+++ b/common_audio/window_generator_unittest.cc
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/window_generator.h"
+
+#include <cstring>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(WindowGeneratorTest, KaiserBesselDerived) {
+  float window[7];
+
+  memset(window, 0, sizeof(window));
+
+  WindowGenerator::KaiserBesselDerived(0.397856f, 2, window);
+  ASSERT_NEAR(window[0], 0.707106f, 1e-6f);
+  ASSERT_NEAR(window[1], 0.707106f, 1e-6f);
+  ASSERT_NEAR(window[2], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[3], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[4], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[5], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[6], 0.0f, 1e-6f);
+
+  WindowGenerator::KaiserBesselDerived(0.397856f, 3, window);
+  ASSERT_NEAR(window[0], 0.598066f, 1e-6f);
+  ASSERT_NEAR(window[1], 0.922358f, 1e-6f);
+  ASSERT_NEAR(window[2], 0.598066f, 1e-6f);
+  ASSERT_NEAR(window[3], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[4], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[5], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[6], 0.0f, 1e-6f);
+
+  WindowGenerator::KaiserBesselDerived(0.397856f, 6, window);
+  ASSERT_NEAR(window[0], 0.458495038865344f, 1e-6f);
+  ASSERT_NEAR(window[1], 0.707106781186548f, 1e-6f);
+  ASSERT_NEAR(window[2], 0.888696967101760f, 1e-6f);
+  ASSERT_NEAR(window[3], 0.888696967101760f, 1e-6f);
+  ASSERT_NEAR(window[4], 0.707106781186548f, 1e-6f);
+  ASSERT_NEAR(window[5], 0.458495038865344f, 1e-6f);
+  ASSERT_NEAR(window[6], 0.0f, 1e-6f);
+}
+
+TEST(WindowGeneratorTest, Hanning) {
+  float window[7];
+
+  memset(window, 0, sizeof(window));
+
+  window[0] = -1.0f;
+  window[1] = -1.0f;
+  WindowGenerator::Hanning(2, window);
+  ASSERT_NEAR(window[0], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[1], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[2], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[3], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[4], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[5], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[6], 0.0f, 1e-6f);
+
+  window[0] = -1.0f;
+  window[2] = -1.0f;
+  WindowGenerator::Hanning(3, window);
+  ASSERT_NEAR(window[0], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[1], 1.0f, 1e-6f);
+  ASSERT_NEAR(window[2], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[3], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[4], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[5], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[6], 0.0f, 1e-6f);
+
+  window[0] = -1.0f;
+  window[5] = -1.0f;
+  WindowGenerator::Hanning(6, window);
+  ASSERT_NEAR(window[0], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[1], 0.345491f, 1e-6f);
+  ASSERT_NEAR(window[2], 0.904508f, 1e-6f);
+  ASSERT_NEAR(window[3], 0.904508f, 1e-6f);
+  ASSERT_NEAR(window[4], 0.345491f, 1e-6f);
+  ASSERT_NEAR(window[5], 0.0f, 1e-6f);
+  ASSERT_NEAR(window[6], 0.0f, 1e-6f);
+}
+
+}  // namespace webrtc
+
diff --git a/common_types.h b/common_types.h
new file mode 100644
index 0000000..ee4ddf7
--- /dev/null
+++ b/common_types.h
@@ -0,0 +1,682 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef COMMON_TYPES_H_
+#define COMMON_TYPES_H_
+
+#include <stddef.h>
+#include <string.h>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/deprecation.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(_MSC_VER)
+// Disable "new behavior: elements of array will be default initialized"
+// warning. Affects OverUseDetectorOptions.
+#pragma warning(disable : 4351)
+#endif
+
+#if defined(WEBRTC_EXPORT)
+#define WEBRTC_DLLEXPORT _declspec(dllexport)
+#elif defined(WEBRTC_DLL)
+#define WEBRTC_DLLEXPORT _declspec(dllimport)
+#else
+#define WEBRTC_DLLEXPORT
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define RTP_PAYLOAD_NAME_SIZE 32u
+
+#if defined(WEBRTC_WIN) || defined(WIN32)
+// Compares two strings without regard to case.
+#define STR_CASE_CMP(s1, s2) ::_stricmp(s1, s2)
+// Compares characters of two strings without regard to case.
+#define STR_NCASE_CMP(s1, s2, n) ::_strnicmp(s1, s2, n)
+#else
+#define STR_CASE_CMP(s1, s2) ::strcasecmp(s1, s2)
+#define STR_NCASE_CMP(s1, s2, n) ::strncasecmp(s1, s2, n)
+#endif
+
+namespace webrtc {
+
+class RewindableStream {
+ public:
+  virtual ~RewindableStream() {}
+  virtual int Rewind() = 0;
+};
+
+class InStream : public RewindableStream {
+ public:
+  // Reads |len| bytes from file to |buf|. Returns the number of bytes read
+  // or -1 on error.
+  virtual int Read(void* buf, size_t len) = 0;
+};
+
+class OutStream : public RewindableStream {
+ public:
+  // Writes |len| bytes from |buf| to file. The actual writing may happen
+  // some time later. Call Flush() to force a write.
+  virtual bool Write(const void* buf, size_t len) = 0;
+};
+
+// For the deprecated MediaFile module.
+enum FileFormats {
+  kFileFormatWavFile = 1,
+  kFileFormatPcm16kHzFile = 7,
+  kFileFormatPcm8kHzFile = 8,
+  kFileFormatPcm32kHzFile = 9,
+};
+
+enum FrameType {
+  kEmptyFrame = 0,
+  kAudioFrameSpeech = 1,
+  kAudioFrameCN = 2,
+  kVideoFrameKey = 3,
+  kVideoFrameDelta = 4,
+};
+
+// Statistics for an RTCP channel
+struct RtcpStatistics {
+  RtcpStatistics()
+      : fraction_lost(0),
+        packets_lost(0),
+        extended_highest_sequence_number(0),
+        jitter(0) {}
+
+  uint8_t fraction_lost;
+  union {
+    int32_t packets_lost;  // Defined as a 24 bit signed integer in RTCP
+    RTC_DEPRECATED uint32_t cumulative_lost;
+  };
+  union {
+    uint32_t extended_highest_sequence_number;
+    RTC_DEPRECATED uint32_t extended_max_sequence_number;
+  };
+  uint32_t jitter;
+};
+
+class RtcpStatisticsCallback {
+ public:
+  virtual ~RtcpStatisticsCallback() {}
+
+  virtual void StatisticsUpdated(const RtcpStatistics& statistics,
+                                 uint32_t ssrc) = 0;
+  virtual void CNameChanged(const char* cname, uint32_t ssrc) = 0;
+};
+
+// Statistics for RTCP packet types.
+struct RtcpPacketTypeCounter {
+  RtcpPacketTypeCounter()
+      : first_packet_time_ms(-1),
+        nack_packets(0),
+        fir_packets(0),
+        pli_packets(0),
+        nack_requests(0),
+        unique_nack_requests(0) {}
+
+  void Add(const RtcpPacketTypeCounter& other) {
+    nack_packets += other.nack_packets;
+    fir_packets += other.fir_packets;
+    pli_packets += other.pli_packets;
+    nack_requests += other.nack_requests;
+    unique_nack_requests += other.unique_nack_requests;
+    if (other.first_packet_time_ms != -1 &&
+        (other.first_packet_time_ms < first_packet_time_ms ||
+         first_packet_time_ms == -1)) {
+      // Use oldest time.
+      first_packet_time_ms = other.first_packet_time_ms;
+    }
+  }
+
+  void Subtract(const RtcpPacketTypeCounter& other) {
+    nack_packets -= other.nack_packets;
+    fir_packets -= other.fir_packets;
+    pli_packets -= other.pli_packets;
+    nack_requests -= other.nack_requests;
+    unique_nack_requests -= other.unique_nack_requests;
+    if (other.first_packet_time_ms != -1 &&
+        (other.first_packet_time_ms > first_packet_time_ms ||
+         first_packet_time_ms == -1)) {
+      // Use youngest time.
+      first_packet_time_ms = other.first_packet_time_ms;
+    }
+  }
+
+  int64_t TimeSinceFirstPacketInMs(int64_t now_ms) const {
+    return (first_packet_time_ms == -1) ? -1 : (now_ms - first_packet_time_ms);
+  }
+
+  int UniqueNackRequestsInPercent() const {
+    if (nack_requests == 0) {
+      return 0;
+    }
+    return static_cast<int>((unique_nack_requests * 100.0f / nack_requests) +
+                            0.5f);
+  }
+
+  int64_t first_packet_time_ms;   // Time when first packet is sent/received.
+  uint32_t nack_packets;          // Number of RTCP NACK packets.
+  uint32_t fir_packets;           // Number of RTCP FIR packets.
+  uint32_t pli_packets;           // Number of RTCP PLI packets.
+  uint32_t nack_requests;         // Number of NACKed RTP packets.
+  uint32_t unique_nack_requests;  // Number of unique NACKed RTP packets.
+};
+
+class RtcpPacketTypeCounterObserver {
+ public:
+  virtual ~RtcpPacketTypeCounterObserver() {}
+  virtual void RtcpPacketTypesCounterUpdated(
+      uint32_t ssrc,
+      const RtcpPacketTypeCounter& packet_counter) = 0;
+};
+
+// Rate statistics for a stream.
+struct BitrateStatistics {
+  BitrateStatistics() : bitrate_bps(0), packet_rate(0) {}
+
+  uint32_t bitrate_bps;  // Bitrate in bits per second.
+  uint32_t packet_rate;  // Packet rate in packets per second.
+};
+
+// Callback, used to notify an observer whenever new rates have been estimated.
+class BitrateStatisticsObserver {
+ public:
+  virtual ~BitrateStatisticsObserver() {}
+
+  virtual void Notify(uint32_t total_bitrate_bps,
+                      uint32_t retransmit_bitrate_bps,
+                      uint32_t ssrc) = 0;
+};
+
+struct FrameCounts {
+  FrameCounts() : key_frames(0), delta_frames(0) {}
+  int key_frames;
+  int delta_frames;
+};
+
+// Callback, used to notify an observer whenever frame counts have been updated.
+class FrameCountObserver {
+ public:
+  virtual ~FrameCountObserver() {}
+  virtual void FrameCountUpdated(const FrameCounts& frame_counts,
+                                 uint32_t ssrc) = 0;
+};
+
+// Callback, used to notify an observer whenever the send-side delay is updated.
+class SendSideDelayObserver {
+ public:
+  virtual ~SendSideDelayObserver() {}
+  virtual void SendSideDelayUpdated(int avg_delay_ms,
+                                    int max_delay_ms,
+                                    uint32_t ssrc) = 0;
+};
+
+// Callback, used to notify an observer whenever a packet is sent to the
+// transport.
+// TODO(asapersson): This class will remove the need for SendSideDelayObserver.
+// Remove SendSideDelayObserver once possible.
+class SendPacketObserver {
+ public:
+  virtual ~SendPacketObserver() {}
+  virtual void OnSendPacket(uint16_t packet_id,
+                            int64_t capture_time_ms,
+                            uint32_t ssrc) = 0;
+};
+
+// Callback, used to notify an observer when the overhead per packet
+// has changed.
+class OverheadObserver {
+ public:
+  virtual ~OverheadObserver() = default;
+  virtual void OnOverheadChanged(size_t overhead_bytes_per_packet) = 0;
+};
+
+// ==================================================================
+// Voice specific types
+// ==================================================================
+
+// Each codec supported can be described by this structure.
+struct CodecInst {
+  int pltype;
+  char plname[RTP_PAYLOAD_NAME_SIZE];
+  int plfreq;
+  int pacsize;
+  size_t channels;
+  int rate;  // bits/sec unlike {start,min,max}Bitrate elsewhere in this file!
+
+  bool operator==(const CodecInst& other) const {
+    return pltype == other.pltype &&
+           (STR_CASE_CMP(plname, other.plname) == 0) &&
+           plfreq == other.plfreq && pacsize == other.pacsize &&
+           channels == other.channels && rate == other.rate;
+  }
+
+  bool operator!=(const CodecInst& other) const { return !(*this == other); }
+
+  friend std::ostream& operator<<(std::ostream& os, const CodecInst& ci) {
+    os << "{pltype: " << ci.pltype;
+    os << ", plname: " << ci.plname;
+    os << ", plfreq: " << ci.plfreq;
+    os << ", pacsize: " << ci.pacsize;
+    os << ", channels: " << ci.channels;
+    os << ", rate: " << ci.rate << "}";
+    return os;
+  }
+};
+
+// RTP
+enum { kRtpCsrcSize = 15 };  // RFC 3550 page 13
+
+enum PayloadFrequencies {
+  kFreq8000Hz = 8000,
+  kFreq16000Hz = 16000,
+  kFreq32000Hz = 32000
+};
+
+// Degree of bandwidth reduction.
+enum VadModes {
+  kVadConventional = 0,  // lowest reduction
+  kVadAggressiveLow,
+  kVadAggressiveMid,
+  kVadAggressiveHigh  // highest reduction
+};
+
+// NETEQ statistics.
+struct NetworkStatistics {
+  // current jitter buffer size in ms
+  uint16_t currentBufferSize;
+  // preferred (optimal) buffer size in ms
+  uint16_t preferredBufferSize;
+  // adding extra delay due to "peaky jitter"
+  bool jitterPeaksFound;
+  // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
+  uint64_t totalSamplesReceived;
+  uint64_t concealedSamples;
+  uint64_t concealmentEvents;
+  uint64_t jitterBufferDelayMs;
+  // Stats below DO NOT correspond directly to anything in the WebRTC stats
+  // Loss rate (network + late); fraction between 0 and 1, scaled to Q14.
+  uint16_t currentPacketLossRate;
+  // Late loss rate; fraction between 0 and 1, scaled to Q14.
+  union {
+    RTC_DEPRECATED uint16_t currentDiscardRate;
+  };
+  // fraction (of original stream) of synthesized audio inserted through
+  // expansion (in Q14)
+  uint16_t currentExpandRate;
+  // fraction (of original stream) of synthesized speech inserted through
+  // expansion (in Q14)
+  uint16_t currentSpeechExpandRate;
+  // fraction of synthesized speech inserted through pre-emptive expansion
+  // (in Q14)
+  uint16_t currentPreemptiveRate;
+  // fraction of data removed through acceleration (in Q14)
+  uint16_t currentAccelerateRate;
+  // fraction of data coming from secondary decoding (in Q14)
+  uint16_t currentSecondaryDecodedRate;
+  // Fraction of secondary data, including FEC and RED, that is discarded (in
+  // Q14). Discarding of secondary data can be caused by the reception of the
+  // primary data, obsoleting the secondary data. It can also be caused by early
+  // or late arrival of secondary data.
+  uint16_t currentSecondaryDiscardedRate;
+  // clock-drift in parts-per-million (negative or positive)
+  int32_t clockDriftPPM;
+  // average packet waiting time in the jitter buffer (ms)
+  int meanWaitingTimeMs;
+  // median packet waiting time in the jitter buffer (ms)
+  int medianWaitingTimeMs;
+  // min packet waiting time in the jitter buffer (ms)
+  int minWaitingTimeMs;
+  // max packet waiting time in the jitter buffer (ms)
+  int maxWaitingTimeMs;
+  // added samples in off mode due to packet loss
+  size_t addedSamples;
+};
+
+// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
+struct AudioDecodingCallStats {
+  AudioDecodingCallStats()
+      : calls_to_silence_generator(0),
+        calls_to_neteq(0),
+        decoded_normal(0),
+        decoded_plc(0),
+        decoded_cng(0),
+        decoded_plc_cng(0),
+        decoded_muted_output(0) {}
+
+  int calls_to_silence_generator;  // Number of calls where silence generated,
+                                   // and NetEq was disengaged from decoding.
+  int calls_to_neteq;              // Number of calls to NetEq.
+  int decoded_normal;  // Number of calls where audio RTP packet decoded.
+  int decoded_plc;     // Number of calls resulted in PLC.
+  int decoded_cng;  // Number of calls where comfort noise generated due to DTX.
+  int decoded_plc_cng;  // Number of calls resulted where PLC faded to CNG.
+  int decoded_muted_output;  // Number of calls returning a muted state output.
+};
+
+// ==================================================================
+// Video specific types
+// ==================================================================
+
+// TODO(nisse): Delete, and switch to fourcc values everywhere?
+// Supported video types.
+enum class VideoType {
+  kUnknown,
+  kI420,
+  kIYUV,
+  kRGB24,
+  kABGR,
+  kARGB,
+  kARGB4444,
+  kRGB565,
+  kARGB1555,
+  kYUY2,
+  kYV12,
+  kUYVY,
+  kMJPEG,
+  kNV21,
+  kNV12,
+  kBGRA,
+};
+
+// Video codec
+enum { kMaxSimulcastStreams = 4 };
+enum { kMaxSpatialLayers = 5 };
+enum { kMaxTemporalStreams = 4 };
+
+enum VideoCodecComplexity {
+  kComplexityNormal = 0,
+  kComplexityHigh = 1,
+  kComplexityHigher = 2,
+  kComplexityMax = 3
+};
+
+enum VP8ResilienceMode {
+  kResilienceOff,    // The stream produced by the encoder requires a
+                     // recovery frame (typically a key frame) to be
+                     // decodable after a packet loss.
+  kResilientStream,  // A stream produced by the encoder is resilient to
+                     // packet losses, but packets within a frame subsequent
+                     // to a loss can't be decoded.
+  kResilientFrames   // Same as kResilientStream but with added resilience
+                     // within a frame.
+};
+
+class TemporalLayersFactory;
+// VP8 specific
+struct VideoCodecVP8 {
+  VideoCodecComplexity complexity;
+  VP8ResilienceMode resilience;
+  unsigned char numberOfTemporalLayers;
+  bool denoisingOn;
+  bool automaticResizeOn;
+  bool frameDroppingOn;
+  int keyFrameInterval;
+  TemporalLayersFactory* tl_factory;
+};
+
+// VP9 specific.
+struct VideoCodecVP9 {
+  VideoCodecComplexity complexity;
+  bool resilienceOn;
+  unsigned char numberOfTemporalLayers;
+  bool denoisingOn;
+  bool frameDroppingOn;
+  int keyFrameInterval;
+  bool adaptiveQpMode;
+  bool automaticResizeOn;
+  unsigned char numberOfSpatialLayers;
+  bool flexibleMode;
+};
+
+// TODO(magjed): Move this and other H264 related classes out to their own file.
+namespace H264 {
+
+enum Profile {
+  kProfileConstrainedBaseline,
+  kProfileBaseline,
+  kProfileMain,
+  kProfileConstrainedHigh,
+  kProfileHigh,
+};
+
+}  // namespace H264
+
+// H264 specific.
+struct VideoCodecH264 {
+  bool frameDroppingOn;
+  int keyFrameInterval;
+  // These are NULL/0 if not externally negotiated.
+  const uint8_t* spsData;
+  size_t spsLen;
+  const uint8_t* ppsData;
+  size_t ppsLen;
+  H264::Profile profile;
+};
+
+// Video codec types
+enum VideoCodecType {
+  kVideoCodecVP8,
+  kVideoCodecVP9,
+  kVideoCodecH264,
+  kVideoCodecI420,
+  kVideoCodecRED,
+  kVideoCodecULPFEC,
+  kVideoCodecFlexfec,
+  kVideoCodecGeneric,
+  kVideoCodecMultiplex,
+  kVideoCodecUnknown
+};
+
+// Translates from name of codec to codec type and vice versa.
+const char* CodecTypeToPayloadString(VideoCodecType type);
+VideoCodecType PayloadStringToCodecType(const std::string& name);
+
+union VideoCodecUnion {
+  VideoCodecVP8 VP8;
+  VideoCodecVP9 VP9;
+  VideoCodecH264 H264;
+};
+
+struct SpatialLayer {
+  unsigned short width;
+  unsigned short height;
+  unsigned char numberOfTemporalLayers;
+  unsigned int maxBitrate;     // kilobits/sec.
+  unsigned int targetBitrate;  // kilobits/sec.
+  unsigned int minBitrate;     // kilobits/sec.
+  unsigned int qpMax;          // minimum quality
+  bool active;                 // encoded and sent.
+};
+
+// Simulcast is when the same stream is encoded multiple times with different
+// settings such as resolution.
+typedef SpatialLayer SimulcastStream;
+
+enum VideoCodecMode { kRealtimeVideo, kScreensharing };
+
+// Common video codec properties
+class VideoCodec {
+ public:
+  VideoCodec();
+
+  // Public variables. TODO(hta): Make them private with accessors.
+  VideoCodecType codecType;
+  unsigned char plType;
+
+  unsigned short width;
+  unsigned short height;
+
+  unsigned int startBitrate;   // kilobits/sec.
+  unsigned int maxBitrate;     // kilobits/sec.
+  unsigned int minBitrate;     // kilobits/sec.
+  unsigned int targetBitrate;  // kilobits/sec.
+
+  uint32_t maxFramerate;
+
+  // This enables/disables encoding and sending when there aren't multiple
+  // simulcast streams,by allocating 0 bitrate if inactive.
+  bool active;
+
+  unsigned int qpMax;
+  unsigned char numberOfSimulcastStreams;
+  SimulcastStream simulcastStream[kMaxSimulcastStreams];
+  SpatialLayer spatialLayers[kMaxSpatialLayers];
+
+  VideoCodecMode mode;
+  bool expect_encode_from_texture;
+
+  // Timing frames configuration. There is delay of delay_ms between two
+  // consequent timing frames, excluding outliers. Frame is always made a
+  // timing frame if it's at least outlier_ratio in percent of "ideal" average
+  // frame given bitrate and framerate, i.e. if it's bigger than
+  // |outlier_ratio / 100.0 * bitrate_bps / fps| in bits. This way, timing
+  // frames will not be sent too often usually. Yet large frames will always
+  // have timing information for debug purposes because they are more likely to
+  // cause extra delays.
+  struct TimingFrameTriggerThresholds {
+    int64_t delay_ms;
+    uint16_t outlier_ratio_percent;
+  } timing_frame_thresholds;
+
+  bool operator==(const VideoCodec& other) const = delete;
+  bool operator!=(const VideoCodec& other) const = delete;
+
+  // Accessors for codec specific information.
+  // There is a const version of each that returns a reference,
+  // and a non-const version that returns a pointer, in order
+  // to allow modification of the parameters.
+  VideoCodecVP8* VP8();
+  const VideoCodecVP8& VP8() const;
+  VideoCodecVP9* VP9();
+  const VideoCodecVP9& VP9() const;
+  VideoCodecH264* H264();
+  const VideoCodecH264& H264() const;
+
+ private:
+  // TODO(hta): Consider replacing the union with a pointer type.
+  // This will allow removing the VideoCodec* types from this file.
+  VideoCodecUnion codec_specific_;
+};
+
+class BitrateAllocation {
+ public:
+  static const uint32_t kMaxBitrateBps;
+  BitrateAllocation();
+
+  bool SetBitrate(size_t spatial_index,
+                  size_t temporal_index,
+                  uint32_t bitrate_bps);
+
+  bool HasBitrate(size_t spatial_index, size_t temporal_index) const;
+
+  uint32_t GetBitrate(size_t spatial_index, size_t temporal_index) const;
+
+  // Whether the specific spatial layers has the bitrate set in any of its
+  // temporal layers.
+  bool IsSpatialLayerUsed(size_t spatial_index) const;
+
+  // Get the sum of all the temporal layer for a specific spatial layer.
+  uint32_t GetSpatialLayerSum(size_t spatial_index) const;
+
+  uint32_t get_sum_bps() const { return sum_; }  // Sum of all bitrates.
+  uint32_t get_sum_kbps() const { return (sum_ + 500) / 1000; }
+
+  inline bool operator==(const BitrateAllocation& other) const {
+    return memcmp(bitrates_, other.bitrates_, sizeof(bitrates_)) == 0;
+  }
+  inline bool operator!=(const BitrateAllocation& other) const {
+    return !(*this == other);
+  }
+
+  // Expensive, please use only in tests.
+  std::string ToString() const;
+  std::ostream& operator<<(std::ostream& os) const;
+
+ private:
+  uint32_t sum_;
+  uint32_t bitrates_[kMaxSpatialLayers][kMaxTemporalStreams];
+  bool has_bitrate_[kMaxSpatialLayers][kMaxTemporalStreams];
+};
+
+// Bandwidth over-use detector options.  These are used to drive
+// experimentation with bandwidth estimation parameters.
+// See modules/remote_bitrate_estimator/overuse_detector.h
+// TODO(terelius): This is only used in overuse_estimator.cc, and only in the
+// default constructed state. Can we move the relevant variables into that
+// class and delete this? See also disabled warning at line 27
+struct OverUseDetectorOptions {
+  OverUseDetectorOptions()
+      : initial_slope(8.0 / 512.0),
+        initial_offset(0),
+        initial_e(),
+        initial_process_noise(),
+        initial_avg_noise(0.0),
+        initial_var_noise(50) {
+    initial_e[0][0] = 100;
+    initial_e[1][1] = 1e-1;
+    initial_e[0][1] = initial_e[1][0] = 0;
+    initial_process_noise[0] = 1e-13;
+    initial_process_noise[1] = 1e-3;
+  }
+  double initial_slope;
+  double initial_offset;
+  double initial_e[2][2];
+  double initial_process_noise[2];
+  double initial_avg_noise;
+  double initial_var_noise;
+};
+
+// This structure will have the information about when packet is actually
+// received by socket.
+struct PacketTime {
+  PacketTime() : timestamp(-1), not_before(-1) {}
+  PacketTime(int64_t timestamp, int64_t not_before)
+      : timestamp(timestamp), not_before(not_before) {}
+
+  int64_t timestamp;   // Receive time after socket delivers the data.
+  int64_t not_before;  // Earliest possible time the data could have arrived,
+                       // indicating the potential error in the |timestamp|
+                       // value,in case the system is busy.
+                       // For example, the time of the last select() call.
+                       // If unknown, this value will be set to zero.
+};
+
+// Minimum and maximum playout delay values from capture to render.
+// These are best effort values.
+//
+// A value < 0 indicates no change from previous valid value.
+//
+// min = max = 0 indicates that the receiver should try and render
+// frame as soon as possible.
+//
+// min = x, max = y indicates that the receiver is free to adapt
+// in the range (x, y) based on network jitter.
+//
+// Note: Given that this gets embedded in a union, it is up-to the owner to
+// initialize these values.
+struct PlayoutDelay {
+  int min_ms;
+  int max_ms;
+};
+
+}  // namespace webrtc
+
+#endif  // COMMON_TYPES_H_
diff --git a/modules/audio_coding/BUILD.gn b/modules/audio_coding/BUILD.gn
new file mode 100644
index 0000000..c20bd2b
--- /dev/null
+++ b/modules/audio_coding/BUILD.gn
@@ -0,0 +1,2286 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+import("audio_coding.gni")
+import("//build/config/arm.gni")
+if (!build_with_mozilla) {
+  import("//third_party/protobuf/proto_library.gni")
+}
+
+visibility = [ ":*" ]
+
+audio_codec_deps = [
+  ":cng",
+  ":g711",
+  ":pcm16b",
+]
+if (rtc_include_ilbc) {
+  audio_codec_deps += [ ":ilbc" ]
+}
+if (rtc_include_opus) {
+  audio_codec_deps += [ ":webrtc_opus" ]
+}
+if (current_cpu == "arm") {
+  audio_codec_deps += [ ":isac_fix" ]
+} else {
+  audio_codec_deps += [ ":isac" ]
+}
+audio_codec_deps += [ ":g722" ]
+if (!build_with_mozilla && !build_with_chromium) {
+  audio_codec_deps += [ ":red" ]
+}
+audio_coding_deps = audio_codec_deps + [
+                      "../..:webrtc_common",
+                      "../../common_audio",
+                      "../../system_wrappers",
+                    ]
+
+rtc_static_library("audio_format_conversion") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "codecs/audio_format_conversion.cc",
+    "codecs/audio_format_conversion.h",
+  ]
+  deps = [
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:sanitizer",
+  ]
+}
+
+rtc_static_library("rent_a_codec") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "acm2/acm_codec_database.cc",
+    "acm2/acm_codec_database.h",
+    "acm2/rent_a_codec.cc",
+    "acm2/rent_a_codec.h",
+  ]
+  deps = [
+           "../..:typedefs",
+           "../../rtc_base:checks",
+           "../../api:array_view",
+           "../../api:optional",
+           "../../api/audio_codecs:audio_codecs_api",
+           "../..:webrtc_common",
+           "../../rtc_base:protobuf_utils",
+           "../../rtc_base:rtc_base_approved",
+           "../../system_wrappers",
+           ":audio_coding_module_typedefs",
+           ":isac_common",
+           ":isac_fix_c",
+           ":neteq_decoder_enum",
+         ] + audio_codec_deps
+
+  defines = audio_codec_defines
+}
+
+config("audio_coding_config") {
+  include_dirs = [
+    "include",
+    "../include",
+  ]
+}
+
+rtc_source_set("audio_coding_module_typedefs") {
+  sources = [
+    "include/audio_coding_module_typedefs.h",
+  ]
+  deps = [
+    "..:module_api",
+    "../..:typedefs",
+    "../..:webrtc_common",
+  ]
+}
+
+rtc_static_library("audio_coding") {
+  visibility += [ "*" ]
+  sources = [
+    "acm2/acm_receiver.cc",
+    "acm2/acm_receiver.h",
+    "acm2/acm_resampler.cc",
+    "acm2/acm_resampler.h",
+    "acm2/audio_coding_module.cc",
+    "acm2/call_statistics.cc",
+    "acm2/call_statistics.h",
+    "acm2/codec_manager.cc",
+    "acm2/codec_manager.h",
+    "include/audio_coding_module.h",
+  ]
+
+  defines = []
+
+  public_configs = [ ":audio_coding_config" ]
+
+  if (rtc_include_opus) {
+    public_deps = [
+      ":webrtc_opus",
+    ]
+  }
+
+  if (is_win) {
+    cflags = [
+      # TODO(kjellander): Bug 261: fix this warning.
+      "/wd4373",  # virtual function override.
+    ]
+  }
+
+  deps = audio_coding_deps + [
+           "../../common_audio:common_audio_c",
+           "../..:typedefs",
+           "../../rtc_base:deprecation",
+           "../../rtc_base:checks",
+           "../../system_wrappers:metrics_api",
+           "..:module_api",
+           "../../api:array_view",
+           "../../api/audio_codecs:audio_codecs_api",
+           "../../api/audio_codecs:builtin_audio_decoder_factory",
+           ":audio_coding_module_typedefs",
+           ":neteq",
+           ":rent_a_codec",
+           "../../rtc_base:rtc_base_approved",
+           "../../api:optional",
+           "../../logging:rtc_event_log_api",
+         ]
+  defines = audio_coding_defines
+}
+
+rtc_static_library("legacy_encoded_audio_frame") {
+  sources = [
+    "codecs/legacy_encoded_audio_frame.cc",
+    "codecs/legacy_encoded_audio_frame.h",
+  ]
+  deps = [
+    "../../api:array_view",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+config("cng_config") {
+  include_dirs = [ "codecs/cng/include" ]
+}
+
+rtc_static_library("cng") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/cng/audio_encoder_cng.cc",
+    "codecs/cng/audio_encoder_cng.h",
+    "codecs/cng/webrtc_cng.cc",
+    "codecs/cng/webrtc_cng.h",
+  ]
+
+  public_configs = [ ":cng_config" ]
+
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+config("red_config") {
+  include_dirs = [ "codecs/red" ]
+}
+
+rtc_static_library("red") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/red/audio_encoder_copy_red.cc",
+    "codecs/red/audio_encoder_copy_red.h",
+  ]
+
+  public_configs = [ ":red_config" ]
+
+  deps = [
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+config("g711_config") {
+  include_dirs = [ "codecs/g711/include" ]
+}
+
+rtc_static_library("g711") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/g711/audio_decoder_pcm.cc",
+    "codecs/g711/audio_decoder_pcm.h",
+    "codecs/g711/audio_encoder_pcm.cc",
+    "codecs/g711/audio_encoder_pcm.h",
+  ]
+
+  public_configs = [ ":g711_config" ]
+
+  deps = [
+    ":legacy_encoded_audio_frame",
+    "../..:webrtc_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+  public_deps = [
+    ":g711_c",
+  ]
+}
+
+rtc_source_set("g711_c") {
+  sources = [
+    "codecs/g711/g711.c",
+    "codecs/g711/g711.h",
+    "codecs/g711/g711_interface.c",
+    "codecs/g711/g711_interface.h",
+  ]
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+  ]
+}
+
+config("g722_config") {
+  include_dirs = [ "codecs/g722/include" ]
+}
+
+rtc_static_library("g722") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/g722/audio_decoder_g722.cc",
+    "codecs/g722/audio_decoder_g722.h",
+    "codecs/g722/audio_encoder_g722.cc",
+    "codecs/g722/audio_encoder_g722.h",
+  ]
+
+  public_configs = [ ":g722_config" ]
+
+  deps = [
+    ":legacy_encoded_audio_frame",
+    "../..:webrtc_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/audio_codecs/g722:audio_encoder_g722_config",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+  public_deps = [
+    ":g722_c",
+  ]
+}
+
+rtc_source_set("g722_c") {
+  sources = [
+    "codecs/g722/g722_decode.c",
+    "codecs/g722/g722_enc_dec.h",
+    "codecs/g722/g722_encode.c",
+    "codecs/g722/g722_interface.c",
+    "codecs/g722/g722_interface.h",
+  ]
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+  ]
+}
+
+config("ilbc_config") {
+  include_dirs = [ "codecs/ilbc/include" ]
+}
+
+rtc_static_library("ilbc") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "codecs/ilbc/audio_decoder_ilbc.cc",
+    "codecs/ilbc/audio_decoder_ilbc.h",
+    "codecs/ilbc/audio_encoder_ilbc.cc",
+    "codecs/ilbc/audio_encoder_ilbc.h",
+  ]
+
+  public_configs = [ ":ilbc_config" ]
+
+  deps = [
+    ":legacy_encoded_audio_frame",
+    "../..:webrtc_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/audio_codecs/ilbc:audio_encoder_ilbc_config",
+    "../../common_audio",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+  public_deps = [
+    ":ilbc_c",
+  ]
+}
+
+rtc_source_set("ilbc_c") {
+  sources = [
+    "codecs/ilbc/abs_quant.c",
+    "codecs/ilbc/abs_quant.h",
+    "codecs/ilbc/abs_quant_loop.c",
+    "codecs/ilbc/abs_quant_loop.h",
+    "codecs/ilbc/augmented_cb_corr.c",
+    "codecs/ilbc/augmented_cb_corr.h",
+    "codecs/ilbc/bw_expand.c",
+    "codecs/ilbc/bw_expand.h",
+    "codecs/ilbc/cb_construct.c",
+    "codecs/ilbc/cb_construct.h",
+    "codecs/ilbc/cb_mem_energy.c",
+    "codecs/ilbc/cb_mem_energy.h",
+    "codecs/ilbc/cb_mem_energy_augmentation.c",
+    "codecs/ilbc/cb_mem_energy_augmentation.h",
+    "codecs/ilbc/cb_mem_energy_calc.c",
+    "codecs/ilbc/cb_mem_energy_calc.h",
+    "codecs/ilbc/cb_search.c",
+    "codecs/ilbc/cb_search.h",
+    "codecs/ilbc/cb_search_core.c",
+    "codecs/ilbc/cb_search_core.h",
+    "codecs/ilbc/cb_update_best_index.c",
+    "codecs/ilbc/cb_update_best_index.h",
+    "codecs/ilbc/chebyshev.c",
+    "codecs/ilbc/chebyshev.h",
+    "codecs/ilbc/comp_corr.c",
+    "codecs/ilbc/comp_corr.h",
+    "codecs/ilbc/constants.c",
+    "codecs/ilbc/constants.h",
+    "codecs/ilbc/create_augmented_vec.c",
+    "codecs/ilbc/create_augmented_vec.h",
+    "codecs/ilbc/decode.c",
+    "codecs/ilbc/decode.h",
+    "codecs/ilbc/decode_residual.c",
+    "codecs/ilbc/decode_residual.h",
+    "codecs/ilbc/decoder_interpolate_lsf.c",
+    "codecs/ilbc/decoder_interpolate_lsf.h",
+    "codecs/ilbc/defines.h",
+    "codecs/ilbc/do_plc.c",
+    "codecs/ilbc/do_plc.h",
+    "codecs/ilbc/encode.c",
+    "codecs/ilbc/encode.h",
+    "codecs/ilbc/energy_inverse.c",
+    "codecs/ilbc/energy_inverse.h",
+    "codecs/ilbc/enh_upsample.c",
+    "codecs/ilbc/enh_upsample.h",
+    "codecs/ilbc/enhancer.c",
+    "codecs/ilbc/enhancer.h",
+    "codecs/ilbc/enhancer_interface.c",
+    "codecs/ilbc/enhancer_interface.h",
+    "codecs/ilbc/filtered_cb_vecs.c",
+    "codecs/ilbc/filtered_cb_vecs.h",
+    "codecs/ilbc/frame_classify.c",
+    "codecs/ilbc/frame_classify.h",
+    "codecs/ilbc/gain_dequant.c",
+    "codecs/ilbc/gain_dequant.h",
+    "codecs/ilbc/gain_quant.c",
+    "codecs/ilbc/gain_quant.h",
+    "codecs/ilbc/get_cd_vec.c",
+    "codecs/ilbc/get_cd_vec.h",
+    "codecs/ilbc/get_lsp_poly.c",
+    "codecs/ilbc/get_lsp_poly.h",
+    "codecs/ilbc/get_sync_seq.c",
+    "codecs/ilbc/get_sync_seq.h",
+    "codecs/ilbc/hp_input.c",
+    "codecs/ilbc/hp_input.h",
+    "codecs/ilbc/hp_output.c",
+    "codecs/ilbc/hp_output.h",
+    "codecs/ilbc/ilbc.c",
+    "codecs/ilbc/ilbc.h",
+    "codecs/ilbc/index_conv_dec.c",
+    "codecs/ilbc/index_conv_dec.h",
+    "codecs/ilbc/index_conv_enc.c",
+    "codecs/ilbc/index_conv_enc.h",
+    "codecs/ilbc/init_decode.c",
+    "codecs/ilbc/init_decode.h",
+    "codecs/ilbc/init_encode.c",
+    "codecs/ilbc/init_encode.h",
+    "codecs/ilbc/interpolate.c",
+    "codecs/ilbc/interpolate.h",
+    "codecs/ilbc/interpolate_samples.c",
+    "codecs/ilbc/interpolate_samples.h",
+    "codecs/ilbc/lpc_encode.c",
+    "codecs/ilbc/lpc_encode.h",
+    "codecs/ilbc/lsf_check.c",
+    "codecs/ilbc/lsf_check.h",
+    "codecs/ilbc/lsf_interpolate_to_poly_dec.c",
+    "codecs/ilbc/lsf_interpolate_to_poly_dec.h",
+    "codecs/ilbc/lsf_interpolate_to_poly_enc.c",
+    "codecs/ilbc/lsf_interpolate_to_poly_enc.h",
+    "codecs/ilbc/lsf_to_lsp.c",
+    "codecs/ilbc/lsf_to_lsp.h",
+    "codecs/ilbc/lsf_to_poly.c",
+    "codecs/ilbc/lsf_to_poly.h",
+    "codecs/ilbc/lsp_to_lsf.c",
+    "codecs/ilbc/lsp_to_lsf.h",
+    "codecs/ilbc/my_corr.c",
+    "codecs/ilbc/my_corr.h",
+    "codecs/ilbc/nearest_neighbor.c",
+    "codecs/ilbc/nearest_neighbor.h",
+    "codecs/ilbc/pack_bits.c",
+    "codecs/ilbc/pack_bits.h",
+    "codecs/ilbc/poly_to_lsf.c",
+    "codecs/ilbc/poly_to_lsf.h",
+    "codecs/ilbc/poly_to_lsp.c",
+    "codecs/ilbc/poly_to_lsp.h",
+    "codecs/ilbc/refiner.c",
+    "codecs/ilbc/refiner.h",
+    "codecs/ilbc/simple_interpolate_lsf.c",
+    "codecs/ilbc/simple_interpolate_lsf.h",
+    "codecs/ilbc/simple_lpc_analysis.c",
+    "codecs/ilbc/simple_lpc_analysis.h",
+    "codecs/ilbc/simple_lsf_dequant.c",
+    "codecs/ilbc/simple_lsf_dequant.h",
+    "codecs/ilbc/simple_lsf_quant.c",
+    "codecs/ilbc/simple_lsf_quant.h",
+    "codecs/ilbc/smooth.c",
+    "codecs/ilbc/smooth.h",
+    "codecs/ilbc/smooth_out_data.c",
+    "codecs/ilbc/smooth_out_data.h",
+    "codecs/ilbc/sort_sq.c",
+    "codecs/ilbc/sort_sq.h",
+    "codecs/ilbc/split_vq.c",
+    "codecs/ilbc/split_vq.h",
+    "codecs/ilbc/state_construct.c",
+    "codecs/ilbc/state_construct.h",
+    "codecs/ilbc/state_search.c",
+    "codecs/ilbc/state_search.h",
+    "codecs/ilbc/swap_bytes.c",
+    "codecs/ilbc/swap_bytes.h",
+    "codecs/ilbc/unpack_bits.c",
+    "codecs/ilbc/unpack_bits.h",
+    "codecs/ilbc/vq3.c",
+    "codecs/ilbc/vq3.h",
+    "codecs/ilbc/vq4.c",
+    "codecs/ilbc/vq4.h",
+    "codecs/ilbc/window32_w32.c",
+    "codecs/ilbc/window32_w32.h",
+    "codecs/ilbc/xcorr_coef.c",
+    "codecs/ilbc/xcorr_coef.h",
+  ]
+
+  public_configs = [ ":ilbc_config" ]
+
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:sanitizer",
+  ]
+}
+
+rtc_static_library("isac_common") {
+  sources = [
+    "codecs/isac/audio_decoder_isac_t.h",
+    "codecs/isac/audio_decoder_isac_t_impl.h",
+    "codecs/isac/audio_encoder_isac_t.h",
+    "codecs/isac/audio_encoder_isac_t_impl.h",
+    "codecs/isac/bandwidth_info.h",
+    "codecs/isac/locked_bandwidth_info.cc",
+    "codecs/isac/locked_bandwidth_info.h",
+  ]
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+config("isac_config") {
+  include_dirs = [ "codecs/isac/main/include" ]
+}
+
+rtc_static_library("isac") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/isac/main/include/audio_decoder_isac.h",
+    "codecs/isac/main/include/audio_encoder_isac.h",
+    "codecs/isac/main/source/audio_decoder_isac.cc",
+    "codecs/isac/main/source/audio_encoder_isac.cc",
+  ]
+
+  deps = [
+    ":isac_common",
+    "../../api/audio_codecs:audio_codecs_api",
+  ]
+  public_deps = [
+    ":isac_c",
+  ]
+}
+
+rtc_static_library("isac_c") {
+  sources = [
+    "codecs/isac/main/include/isac.h",
+    "codecs/isac/main/source/arith_routines.c",
+    "codecs/isac/main/source/arith_routines.h",
+    "codecs/isac/main/source/arith_routines_hist.c",
+    "codecs/isac/main/source/arith_routines_logist.c",
+    "codecs/isac/main/source/bandwidth_estimator.c",
+    "codecs/isac/main/source/bandwidth_estimator.h",
+    "codecs/isac/main/source/codec.h",
+    "codecs/isac/main/source/crc.c",
+    "codecs/isac/main/source/crc.h",
+    "codecs/isac/main/source/decode.c",
+    "codecs/isac/main/source/decode_bwe.c",
+    "codecs/isac/main/source/encode.c",
+    "codecs/isac/main/source/encode_lpc_swb.c",
+    "codecs/isac/main/source/encode_lpc_swb.h",
+    "codecs/isac/main/source/entropy_coding.c",
+    "codecs/isac/main/source/entropy_coding.h",
+    "codecs/isac/main/source/fft.c",
+    "codecs/isac/main/source/fft.h",
+    "codecs/isac/main/source/filter_functions.c",
+    "codecs/isac/main/source/filterbank_tables.c",
+    "codecs/isac/main/source/filterbank_tables.h",
+    "codecs/isac/main/source/filterbanks.c",
+    "codecs/isac/main/source/intialize.c",
+    "codecs/isac/main/source/isac.c",
+    "codecs/isac/main/source/isac_float_type.h",
+    "codecs/isac/main/source/lattice.c",
+    "codecs/isac/main/source/lpc_analysis.c",
+    "codecs/isac/main/source/lpc_analysis.h",
+    "codecs/isac/main/source/lpc_gain_swb_tables.c",
+    "codecs/isac/main/source/lpc_gain_swb_tables.h",
+    "codecs/isac/main/source/lpc_shape_swb12_tables.c",
+    "codecs/isac/main/source/lpc_shape_swb12_tables.h",
+    "codecs/isac/main/source/lpc_shape_swb16_tables.c",
+    "codecs/isac/main/source/lpc_shape_swb16_tables.h",
+    "codecs/isac/main/source/lpc_tables.c",
+    "codecs/isac/main/source/lpc_tables.h",
+    "codecs/isac/main/source/os_specific_inline.h",
+    "codecs/isac/main/source/pitch_estimator.c",
+    "codecs/isac/main/source/pitch_estimator.h",
+    "codecs/isac/main/source/pitch_filter.c",
+    "codecs/isac/main/source/pitch_gain_tables.c",
+    "codecs/isac/main/source/pitch_gain_tables.h",
+    "codecs/isac/main/source/pitch_lag_tables.c",
+    "codecs/isac/main/source/pitch_lag_tables.h",
+    "codecs/isac/main/source/settings.h",
+    "codecs/isac/main/source/spectrum_ar_model_tables.c",
+    "codecs/isac/main/source/spectrum_ar_model_tables.h",
+    "codecs/isac/main/source/structs.h",
+    "codecs/isac/main/source/transform.c",
+  ]
+
+  if (is_linux) {
+    libs = [ "m" ]
+  }
+
+  public_configs = [ ":isac_config" ]
+
+  deps = [
+    ":isac_common",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:compile_assert_c",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+config("isac_fix_config") {
+  include_dirs = [ "codecs/isac/fix/include" ]
+}
+
+rtc_static_library("isac_fix") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/isac/fix/source/audio_decoder_isacfix.cc",
+    "codecs/isac/fix/source/audio_encoder_isacfix.cc",
+  ]
+
+  public_configs = [ ":isac_fix_config" ]
+
+  deps = [
+    ":isac_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../system_wrappers",
+  ]
+  public_deps = [
+    ":isac_fix_c",
+  ]
+
+  if (rtc_build_with_neon) {
+    deps += [ ":isac_neon" ]
+  }
+}
+
+rtc_source_set("isac_fix_common") {
+  sources = [
+    "codecs/isac/fix/source/codec.h",
+    "codecs/isac/fix/source/entropy_coding.h",
+    "codecs/isac/fix/source/fft.c",
+    "codecs/isac/fix/source/fft.h",
+    "codecs/isac/fix/source/filterbank_internal.h",
+    "codecs/isac/fix/source/settings.h",
+    "codecs/isac/fix/source/structs.h",
+  ]
+  public_configs = [ ":isac_fix_config" ]
+  deps = [
+    ":isac_common",
+    "../..:typedefs",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+  ]
+}
+
+rtc_source_set("isac_fix_c_arm_asm") {
+  sources = []
+  if (current_cpu == "arm" && arm_version >= 7) {
+    sources += [
+      "codecs/isac/fix/source/lattice_armv7.S",
+      "codecs/isac/fix/source/pitch_filter_armv6.S",
+    ]
+    deps = [
+      ":isac_fix_common",
+      "../../system_wrappers:asm_defines",
+    ]
+  }
+}
+
+rtc_source_set("isac_fix_c") {
+  sources = [
+    "codecs/isac/fix/include/audio_decoder_isacfix.h",
+    "codecs/isac/fix/include/audio_encoder_isacfix.h",
+    "codecs/isac/fix/include/isacfix.h",
+    "codecs/isac/fix/source/arith_routines.c",
+    "codecs/isac/fix/source/arith_routines_hist.c",
+    "codecs/isac/fix/source/arith_routines_logist.c",
+    "codecs/isac/fix/source/arith_routins.h",
+    "codecs/isac/fix/source/bandwidth_estimator.c",
+    "codecs/isac/fix/source/bandwidth_estimator.h",
+    "codecs/isac/fix/source/decode.c",
+    "codecs/isac/fix/source/decode_bwe.c",
+    "codecs/isac/fix/source/decode_plc.c",
+    "codecs/isac/fix/source/encode.c",
+    "codecs/isac/fix/source/entropy_coding.c",
+    "codecs/isac/fix/source/filterbank_tables.c",
+    "codecs/isac/fix/source/filterbank_tables.h",
+    "codecs/isac/fix/source/filterbanks.c",
+    "codecs/isac/fix/source/filters.c",
+    "codecs/isac/fix/source/initialize.c",
+    "codecs/isac/fix/source/isac_fix_type.h",
+    "codecs/isac/fix/source/isacfix.c",
+    "codecs/isac/fix/source/lattice.c",
+    "codecs/isac/fix/source/lattice_c.c",
+    "codecs/isac/fix/source/lpc_masking_model.c",
+    "codecs/isac/fix/source/lpc_masking_model.h",
+    "codecs/isac/fix/source/lpc_tables.c",
+    "codecs/isac/fix/source/lpc_tables.h",
+    "codecs/isac/fix/source/pitch_estimator.c",
+    "codecs/isac/fix/source/pitch_estimator.h",
+    "codecs/isac/fix/source/pitch_estimator_c.c",
+    "codecs/isac/fix/source/pitch_filter.c",
+    "codecs/isac/fix/source/pitch_filter_c.c",
+    "codecs/isac/fix/source/pitch_gain_tables.c",
+    "codecs/isac/fix/source/pitch_gain_tables.h",
+    "codecs/isac/fix/source/pitch_lag_tables.c",
+    "codecs/isac/fix/source/pitch_lag_tables.h",
+    "codecs/isac/fix/source/spectrum_ar_model_tables.c",
+    "codecs/isac/fix/source/spectrum_ar_model_tables.h",
+    "codecs/isac/fix/source/transform.c",
+    "codecs/isac/fix/source/transform_tables.c",
+  ]
+
+  public_configs = [ ":isac_fix_config" ]
+
+  deps = [
+    ":isac_common",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:compile_assert_c",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:sanitizer",
+    "../../system_wrappers:cpu_features_api",
+  ]
+
+  public_deps = [
+    ":isac_fix_common",
+  ]
+
+  if (rtc_build_with_neon) {
+    deps += [ ":isac_neon" ]
+  }
+
+  if (current_cpu == "arm" && arm_version >= 7) {
+    sources -= [
+      "codecs/isac/fix/source/lattice_c.c",
+      "codecs/isac/fix/source/pitch_filter_c.c",
+    ]
+    deps += [ ":isac_fix_c_arm_asm" ]
+  }
+
+  if (current_cpu == "mipsel") {
+    sources += [
+      "codecs/isac/fix/source/entropy_coding_mips.c",
+      "codecs/isac/fix/source/filters_mips.c",
+      "codecs/isac/fix/source/lattice_mips.c",
+      "codecs/isac/fix/source/pitch_estimator_mips.c",
+      "codecs/isac/fix/source/transform_mips.c",
+    ]
+    sources -= [
+      "codecs/isac/fix/source/lattice_c.c",
+      "codecs/isac/fix/source/pitch_estimator_c.c",
+    ]
+    if (mips_dsp_rev > 0) {
+      sources += [ "codecs/isac/fix/source/filterbanks_mips.c" ]
+    }
+    if (mips_dsp_rev > 1) {
+      sources += [
+        "codecs/isac/fix/source/lpc_masking_model_mips.c",
+        "codecs/isac/fix/source/pitch_filter_mips.c",
+      ]
+      sources -= [ "codecs/isac/fix/source/pitch_filter_c.c" ]
+    }
+  }
+}
+
+if (rtc_build_with_neon) {
+  rtc_static_library("isac_neon") {
+    sources = [
+      "codecs/isac/fix/source/entropy_coding_neon.c",
+      "codecs/isac/fix/source/filterbanks_neon.c",
+      "codecs/isac/fix/source/filters_neon.c",
+      "codecs/isac/fix/source/lattice_neon.c",
+      "codecs/isac/fix/source/transform_neon.c",
+    ]
+
+    if (current_cpu != "arm64") {
+      # Enable compilation for the NEON instruction set. This is needed
+      # since //build/config/arm.gni only enables NEON for iOS, not Android.
+      # This provides the same functionality as webrtc/build/arm_neon.gypi.
+      suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+      cflags = [ "-mfpu=neon" ]
+    }
+
+    # Disable LTO on NEON targets due to compiler bug.
+    # TODO(fdegans): Enable this. See crbug.com/408997.
+    if (rtc_use_lto) {
+      cflags -= [
+        "-flto",
+        "-ffat-lto-objects",
+      ]
+    }
+
+    deps = [
+      ":isac_fix_common",
+      "../../common_audio",
+      "../../common_audio:common_audio_c",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+    ]
+  }
+}
+
+config("pcm16b_config") {
+  include_dirs = [ "codecs/pcm16b/include" ]
+}
+
+rtc_static_library("pcm16b") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/pcm16b/audio_decoder_pcm16b.cc",
+    "codecs/pcm16b/audio_decoder_pcm16b.h",
+    "codecs/pcm16b/audio_encoder_pcm16b.cc",
+    "codecs/pcm16b/audio_encoder_pcm16b.h",
+    "codecs/pcm16b/pcm16b_common.cc",
+    "codecs/pcm16b/pcm16b_common.h",
+  ]
+
+  deps = [
+    ":g711",
+    ":legacy_encoded_audio_frame",
+    "../..:webrtc_common",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+  public_deps = [
+    ":pcm16b_c",
+  ]
+  public_configs = [ ":pcm16b_config" ]
+}
+
+rtc_source_set("pcm16b_c") {
+  sources = [
+    "codecs/pcm16b/pcm16b.c",
+    "codecs/pcm16b/pcm16b.h",
+  ]
+
+  public_configs = [ ":pcm16b_config" ]
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+  ]
+}
+
+rtc_static_library("webrtc_opus") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "codecs/opus/audio_decoder_opus.cc",
+    "codecs/opus/audio_decoder_opus.h",
+    "codecs/opus/audio_encoder_opus.cc",
+    "codecs/opus/audio_encoder_opus.h",
+  ]
+
+  deps = [
+    ":audio_network_adaptor",
+    "../..:webrtc_common",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/audio_codecs/opus:audio_encoder_opus_config",
+    "../../common_audio",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:rtc_numerics",
+    "../../rtc_base:safe_minmax",
+    "../../system_wrappers:field_trial_api",
+  ]
+  public_deps = [
+    ":webrtc_opus_c",
+    "../../rtc_base:protobuf_utils",
+  ]
+
+  defines = audio_codec_defines
+
+  if (rtc_build_opus) {
+    public_deps += [ rtc_opus_dir ]
+  } else if (build_with_mozilla) {
+    include_dirs = [ "/media/libopus/include" ]
+  }
+}
+
+rtc_source_set("webrtc_opus_c") {
+  sources = [
+    "codecs/opus/opus_inst.h",
+    "codecs/opus/opus_interface.c",
+    "codecs/opus/opus_interface.h",
+  ]
+
+  defines = audio_coding_defines
+
+  if (rtc_build_opus) {
+    public_deps = [
+      rtc_opus_dir,
+    ]
+  } else if (build_with_mozilla) {
+    include_dirs = [ getenv("DIST") + "/include/opus" ]
+  }
+
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+if (rtc_enable_protobuf) {
+  proto_library("ana_debug_dump_proto") {
+    visibility += webrtc_default_visibility
+    sources = [
+      "audio_network_adaptor/debug_dump.proto",
+    ]
+    deps = [
+      ":ana_config_proto",
+    ]
+    proto_out_dir = "modules/audio_coding/audio_network_adaptor"
+  }
+  proto_library("ana_config_proto") {
+    sources = [
+      "audio_network_adaptor/config.proto",
+    ]
+    proto_out_dir = "modules/audio_coding/audio_network_adaptor"
+  }
+}
+
+rtc_static_library("audio_network_adaptor_config") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "audio_network_adaptor/audio_network_adaptor_config.cc",
+    "audio_network_adaptor/include/audio_network_adaptor_config.h",
+  ]
+  deps = [
+    "../../api:optional",
+  ]
+}
+
+rtc_static_library("audio_network_adaptor") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "audio_network_adaptor/audio_network_adaptor_impl.cc",
+    "audio_network_adaptor/audio_network_adaptor_impl.h",
+    "audio_network_adaptor/bitrate_controller.cc",
+    "audio_network_adaptor/bitrate_controller.h",
+    "audio_network_adaptor/channel_controller.cc",
+    "audio_network_adaptor/channel_controller.h",
+    "audio_network_adaptor/controller.cc",
+    "audio_network_adaptor/controller.h",
+    "audio_network_adaptor/controller_manager.cc",
+    "audio_network_adaptor/controller_manager.h",
+    "audio_network_adaptor/debug_dump_writer.cc",
+    "audio_network_adaptor/debug_dump_writer.h",
+    "audio_network_adaptor/dtx_controller.cc",
+    "audio_network_adaptor/dtx_controller.h",
+    "audio_network_adaptor/event_log_writer.cc",
+    "audio_network_adaptor/event_log_writer.h",
+    "audio_network_adaptor/fec_controller_plr_based.cc",
+    "audio_network_adaptor/fec_controller_plr_based.h",
+    "audio_network_adaptor/fec_controller_rplr_based.cc",
+    "audio_network_adaptor/fec_controller_rplr_based.h",
+    "audio_network_adaptor/frame_length_controller.cc",
+    "audio_network_adaptor/frame_length_controller.h",
+    "audio_network_adaptor/include/audio_network_adaptor.h",
+    "audio_network_adaptor/util/threshold_curve.h",
+  ]
+
+  public_deps = [
+    ":audio_network_adaptor_config",
+  ]
+
+  deps = [
+    "../..:webrtc_common",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../logging:rtc_event_audio",
+    "../../logging:rtc_event_log_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:protobuf_utils",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers",
+    "../../system_wrappers:field_trial_api",
+  ]
+
+  if (rtc_enable_protobuf) {
+    deps += [
+      ":ana_config_proto",
+      ":ana_debug_dump_proto",
+    ]
+  }
+
+  if (!build_with_chromium && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+}
+
+rtc_source_set("neteq_decoder_enum") {
+  sources = [
+    "neteq/neteq_decoder_enum.cc",
+    "neteq/neteq_decoder_enum.h",
+  ]
+  deps = [
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+rtc_static_library("neteq") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "neteq/accelerate.cc",
+    "neteq/accelerate.h",
+    "neteq/audio_multi_vector.cc",
+    "neteq/audio_multi_vector.h",
+    "neteq/audio_vector.cc",
+    "neteq/audio_vector.h",
+    "neteq/background_noise.cc",
+    "neteq/background_noise.h",
+    "neteq/buffer_level_filter.cc",
+    "neteq/buffer_level_filter.h",
+    "neteq/comfort_noise.cc",
+    "neteq/comfort_noise.h",
+    "neteq/cross_correlation.cc",
+    "neteq/cross_correlation.h",
+    "neteq/decision_logic.cc",
+    "neteq/decision_logic.h",
+    "neteq/decision_logic_fax.cc",
+    "neteq/decision_logic_fax.h",
+    "neteq/decision_logic_normal.cc",
+    "neteq/decision_logic_normal.h",
+    "neteq/decoder_database.cc",
+    "neteq/decoder_database.h",
+    "neteq/defines.h",
+    "neteq/delay_manager.cc",
+    "neteq/delay_manager.h",
+    "neteq/delay_peak_detector.cc",
+    "neteq/delay_peak_detector.h",
+    "neteq/dsp_helper.cc",
+    "neteq/dsp_helper.h",
+    "neteq/dtmf_buffer.cc",
+    "neteq/dtmf_buffer.h",
+    "neteq/dtmf_tone_generator.cc",
+    "neteq/dtmf_tone_generator.h",
+    "neteq/expand.cc",
+    "neteq/expand.h",
+    "neteq/include/neteq.h",
+    "neteq/merge.cc",
+    "neteq/merge.h",
+    "neteq/nack_tracker.cc",
+    "neteq/nack_tracker.h",
+    "neteq/neteq.cc",
+    "neteq/neteq_impl.cc",
+    "neteq/neteq_impl.h",
+    "neteq/normal.cc",
+    "neteq/normal.h",
+    "neteq/packet.cc",
+    "neteq/packet.h",
+    "neteq/packet_buffer.cc",
+    "neteq/packet_buffer.h",
+    "neteq/post_decode_vad.cc",
+    "neteq/post_decode_vad.h",
+    "neteq/preemptive_expand.cc",
+    "neteq/preemptive_expand.h",
+    "neteq/random_vector.cc",
+    "neteq/random_vector.h",
+    "neteq/red_payload_splitter.cc",
+    "neteq/red_payload_splitter.h",
+    "neteq/rtcp.cc",
+    "neteq/rtcp.h",
+    "neteq/statistics_calculator.cc",
+    "neteq/statistics_calculator.h",
+    "neteq/sync_buffer.cc",
+    "neteq/sync_buffer.h",
+    "neteq/tick_timer.cc",
+    "neteq/tick_timer.h",
+    "neteq/time_stretch.cc",
+    "neteq/time_stretch.h",
+    "neteq/timestamp_scaler.cc",
+    "neteq/timestamp_scaler.h",
+  ]
+
+  deps = [
+    ":audio_coding_module_typedefs",
+    ":cng",
+    ":neteq_decoder_enum",
+    "..:module_api",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:libjingle_peerconnection_api",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:gtest_prod",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:safe_minmax",
+    "../../rtc_base:sanitizer",
+    "../../rtc_base/system:fallthrough",
+    "../../system_wrappers:field_trial_api",
+    "../../system_wrappers:metrics_api",
+  ]
+}
+
+# Although providing only test support, this target must be outside of the
+# rtc_include_tests conditional. The reason is that it supports fuzzer tests
+# that ultimately are built and run as a part of the Chromium ecosystem, which
+# does not set the rtc_include_tests flag.
+rtc_source_set("neteq_tools_minimal") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "neteq/tools/audio_sink.cc",
+    "neteq/tools/audio_sink.h",
+    "neteq/tools/encode_neteq_input.cc",
+    "neteq/tools/encode_neteq_input.h",
+    "neteq/tools/neteq_input.cc",
+    "neteq/tools/neteq_input.h",
+    "neteq/tools/neteq_test.cc",
+    "neteq/tools/neteq_test.h",
+    "neteq/tools/packet.cc",
+    "neteq/tools/packet.h",
+    "neteq/tools/packet_source.cc",
+    "neteq/tools/packet_source.h",
+  ]
+
+  if (!build_with_chromium && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  deps = [
+    ":neteq",
+    "..:module_api",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:libjingle_peerconnection_api",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../api/audio_codecs:builtin_audio_decoder_factory",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../rtp_rtcp",
+  ]
+}
+
+rtc_source_set("neteq_test_tools") {
+  visibility += webrtc_default_visibility
+  testonly = true
+  sources = [
+    "neteq/tools/audio_checksum.h",
+    "neteq/tools/audio_loop.cc",
+    "neteq/tools/audio_loop.h",
+    "neteq/tools/constant_pcm_packet_source.cc",
+    "neteq/tools/constant_pcm_packet_source.h",
+    "neteq/tools/output_audio_file.h",
+    "neteq/tools/output_wav_file.h",
+    "neteq/tools/rtp_file_source.cc",
+    "neteq/tools/rtp_file_source.h",
+    "neteq/tools/rtp_generator.cc",
+    "neteq/tools/rtp_generator.h",
+  ]
+
+  public_configs = [ ":neteq_tools_config" ]
+
+  if (!build_with_chromium && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  deps = [
+    ":pcm16b",
+    "..:module_api",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api:libjingle_peerconnection_api",
+    "../../common_audio",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:rtc_base_tests_utils",
+    "../../test:rtp_test_utils",
+    "../rtp_rtcp",
+    "../rtp_rtcp:rtp_rtcp_format",
+  ]
+
+  public_deps = [
+    ":neteq_tools",
+    ":neteq_tools_minimal",
+  ]
+
+  if (rtc_enable_protobuf) {
+    sources += [
+      "neteq/tools/neteq_packet_source_input.cc",
+      "neteq/tools/neteq_packet_source_input.h",
+    ]
+    deps += [ ":rtc_event_log_source" ]
+  }
+}
+
+config("neteq_tools_config") {
+  include_dirs = [ "tools" ]
+}
+
+rtc_source_set("neteq_tools") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "neteq/tools/fake_decode_from_file.cc",
+    "neteq/tools/fake_decode_from_file.h",
+    "neteq/tools/neteq_delay_analyzer.cc",
+    "neteq/tools/neteq_delay_analyzer.h",
+    "neteq/tools/neteq_replacement_input.cc",
+    "neteq/tools/neteq_replacement_input.h",
+  ]
+
+  public_configs = [ ":neteq_tools_config" ]
+
+  if (!build_with_chromium && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api:optional",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../rtp_rtcp",
+    "../rtp_rtcp:rtp_rtcp_format",
+  ]
+
+  public_deps = [
+    ":neteq_input_audio_tools",
+    ":neteq_tools_minimal",
+  ]
+}
+
+rtc_source_set("neteq_input_audio_tools") {
+  visibility += webrtc_default_visibility
+  sources = [
+    "neteq/tools/input_audio_file.cc",
+    "neteq/tools/input_audio_file.h",
+    "neteq/tools/resample_input_audio_file.cc",
+    "neteq/tools/resample_input_audio_file.h",
+  ]
+
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../common_audio",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+if (rtc_enable_protobuf) {
+  rtc_static_library("rtc_event_log_source") {
+    testonly = true
+
+    sources = [
+      "neteq/tools/rtc_event_log_source.cc",
+      "neteq/tools/rtc_event_log_source.h",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":neteq_tools_minimal",
+      "../../logging:rtc_event_log_parser",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../rtp_rtcp",
+      "../rtp_rtcp:rtp_rtcp_format",
+    ]
+    public_deps = [
+      "../../logging:rtc_event_log_proto",
+    ]
+  }
+}
+
+if (rtc_include_tests) {
+  rtc_source_set("mocks") {
+    testonly = true
+    sources = [
+      "audio_network_adaptor/mock/mock_audio_network_adaptor.h",
+      "audio_network_adaptor/mock/mock_controller.h",
+      "audio_network_adaptor/mock/mock_controller_manager.h",
+      "audio_network_adaptor/mock/mock_debug_dump_writer.h",
+    ]
+    deps = [
+      ":audio_network_adaptor",
+      "../../test:test_support",
+    ]
+  }
+
+  group("audio_coding_tests") {
+    visibility += webrtc_default_visibility
+    testonly = true
+    public_deps = [
+      ":acm_receive_test",
+      ":acm_send_test",
+      ":audio_codec_speed_tests",
+      ":audio_decoder_unittests",
+      ":audio_decoder_unittests",
+      ":delay_test",
+      ":g711_test",
+      ":g722_test",
+      ":ilbc_test",
+      ":insert_packet_with_timing",
+      ":isac_api_test",
+      ":isac_fix_test",
+      ":isac_switch_samprate_test",
+      ":isac_test",
+      ":neteq_ilbc_quality_test",
+      ":neteq_isac_quality_test",
+      ":neteq_opus_quality_test",
+      ":neteq_pcm16b_quality_test",
+      ":neteq_pcmu_quality_test",
+      ":neteq_speed_test",
+      ":rtp_analyze",
+      ":rtp_encode",
+      ":rtp_jitter",
+      ":rtpcat",
+      ":webrtc_opus_fec_test",
+    ]
+    if (rtc_enable_protobuf) {
+      public_deps += [ ":neteq_rtpplay" ]
+    }
+  }
+
+  rtc_source_set("audio_coding_modules_tests") {
+    testonly = true
+    visibility += webrtc_default_visibility
+
+    sources = [
+      "test/ACMTest.h",
+      "test/APITest.cc",
+      "test/APITest.h",
+      "test/Channel.cc",
+      "test/Channel.h",
+      "test/EncodeDecodeTest.cc",
+      "test/EncodeDecodeTest.h",
+      "test/PCMFile.cc",
+      "test/PCMFile.h",
+      "test/PacketLossTest.cc",
+      "test/PacketLossTest.h",
+      "test/RTPFile.cc",
+      "test/RTPFile.h",
+      "test/TestAllCodecs.cc",
+      "test/TestAllCodecs.h",
+      "test/TestRedFec.cc",
+      "test/TestRedFec.h",
+      "test/TestStereo.cc",
+      "test/TestStereo.h",
+      "test/TestVADDTX.cc",
+      "test/TestVADDTX.h",
+      "test/Tester.cc",
+      "test/TwoWayCommunication.cc",
+      "test/TwoWayCommunication.h",
+      "test/iSACTest.cc",
+      "test/iSACTest.h",
+      "test/opus_test.cc",
+      "test/opus_test.h",
+      "test/target_delay_unittest.cc",
+      "test/utility.cc",
+      "test/utility.h",
+    ]
+    deps = [
+      ":audio_coding",
+      ":audio_coding_module_typedefs",
+      ":audio_format_conversion",
+      ":pcm16b_c",
+      "..:module_api",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../api:optional",
+      "../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../test:fileutils",
+      "../../test:test_support",
+    ]
+    defines = audio_coding_defines
+    if (is_win) {
+      cflags = [
+        # TODO(kjellander): bugs.webrtc.org/261: Fix this warning.
+        "/wd4373",  # virtual function override.
+      ]
+    }
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  rtc_source_set("audio_coding_perf_tests") {
+    testonly = true
+    visibility += webrtc_default_visibility
+
+    sources = [
+      "codecs/opus/opus_complexity_unittest.cc",
+      "neteq/test/neteq_performance_unittest.cc",
+    ]
+    deps = [
+      ":neteq_test_support",
+      ":neteq_test_tools",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../api/audio_codecs/opus:audio_encoder_opus",
+      "../../rtc_base:protobuf_utils",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../system_wrappers:field_trial_api",
+      "../../test:fileutils",
+      "../../test:perf_test",
+      "../../test:test_support",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  rtc_source_set("acm_receive_test") {
+    testonly = true
+    sources = [
+      "acm2/acm_receive_test.cc",
+      "acm2/acm_receive_test.h",
+    ]
+
+    defines = audio_coding_defines
+
+    deps = audio_coding_deps + [
+             ":audio_coding",
+             ":audio_format_conversion",
+             "../../api/audio_codecs:audio_codecs_api",
+             "../../api/audio_codecs:builtin_audio_decoder_factory",
+             ":neteq_tools",
+             "../../rtc_base:rtc_base_approved",
+             "../../test:test_support",
+             "//testing/gtest",
+           ]
+  }
+
+  rtc_source_set("acm_send_test") {
+    testonly = true
+    sources = [
+      "acm2/acm_send_test.cc",
+      "acm2/acm_send_test.h",
+    ]
+
+    defines = audio_coding_defines
+
+    deps = audio_coding_deps + [
+             "../../rtc_base:checks",
+             ":audio_coding",
+             ":neteq_tools",
+             "../../api/audio_codecs:audio_codecs_api",
+             "../../rtc_base:rtc_base_approved",
+             "../../test:test_support",
+             "//testing/gtest",
+           ]
+  }
+
+  rtc_executable("delay_test") {
+    testonly = true
+    sources = [
+      "test/Channel.cc",
+      "test/Channel.h",
+      "test/PCMFile.cc",
+      "test/PCMFile.h",
+      "test/delay_test.cc",
+      "test/utility.cc",
+      "test/utility.h",
+    ]
+
+    deps = [
+      ":audio_coding",
+      ":audio_coding_module_typedefs",
+      ":audio_format_conversion",
+      "..:module_api",
+      "../..:typedefs",
+      "../../:webrtc_common",
+      "../../api:optional",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../system_wrappers:system_wrappers_default",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "../rtp_rtcp",
+      "//testing/gtest",
+    ]
+  }  # delay_test
+
+  rtc_executable("insert_packet_with_timing") {
+    testonly = true
+    sources = [
+      "test/Channel.cc",
+      "test/Channel.h",
+      "test/PCMFile.cc",
+      "test/PCMFile.h",
+      "test/insert_packet_with_timing.cc",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":audio_coding",
+      ":audio_format_conversion",
+      "..:module_api",
+      "../..:typedefs",
+      "../../:webrtc_common",
+      "../../api:optional",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../system_wrappers:system_wrappers_default",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "../rtp_rtcp",
+      "//testing/gtest",
+    ]
+  }  # insert_packet_with_timing
+
+  audio_decoder_unittests_resources =
+      [ "../../resources/audio_coding/testfile32kHz.pcm" ]
+
+  if (is_ios) {
+    bundle_data("audio_decoder_unittests_bundle_data") {
+      testonly = true
+      sources = audio_decoder_unittests_resources
+      outputs = [
+        "{{bundle_resources_dir}}/{{source_file_part}}",
+      ]
+    }
+  }
+
+  rtc_test("audio_decoder_unittests") {
+    testonly = true
+    sources = [
+      "neteq/audio_decoder_unittest.cc",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      "../../test:fileutils",
+    ]
+
+    defines = neteq_defines
+
+    deps += audio_coding_deps
+    deps += [
+      ":ilbc",
+      ":isac",
+      ":isac_fix",
+      ":neteq",
+      ":neteq_tools",
+      "../../api/audio_codecs:audio_codecs_api",
+      "../../api/audio_codecs/opus:audio_encoder_opus",
+      "../../common_audio",
+      "../../rtc_base:protobuf_utils",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+
+    data = audio_decoder_unittests_resources
+
+    if (is_android) {
+      deps += [ "//testing/android/native_test:native_test_native_code" ]
+      shard_timeout = 900
+    }
+    if (is_ios) {
+      deps += [ ":audio_decoder_unittests_bundle_data" ]
+    }
+  }  # audio_decoder_unittests
+
+  if (rtc_enable_protobuf) {
+    proto_library("neteq_unittest_proto") {
+      sources = [
+        "neteq/neteq_unittest.proto",
+      ]
+      proto_out_dir = "modules/audio_coding/neteq"
+    }
+
+    rtc_test("neteq_rtpplay") {
+      testonly = true
+      defines = []
+      deps = [
+        "..:module_api",
+        "../..:typedefs",
+        "../../rtc_base:checks",
+        "../../test:fileutils",
+      ]
+      sources = [
+        "neteq/tools/neteq_rtpplay.cc",
+      ]
+
+      if (!build_with_chromium && is_clang) {
+        # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+        suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+      }
+
+      if (is_win) {
+        cflags = [
+          # TODO(kjellander): bugs.webrtc.org/261: Fix this warning.
+          "/wd4373",  # virtual function override.
+        ]
+      }
+
+      deps += [
+        ":neteq",
+        ":neteq_test_tools",
+        "../..:webrtc_common",
+        "../../rtc_base:rtc_base_approved",
+        "../../system_wrappers:system_wrappers_default",
+        "../../test:test_support",
+      ]
+    }
+  }
+
+  audio_codec_speed_tests_resources = [
+    "//resources/audio_coding/music_stereo_48kHz.pcm",
+    "//resources/audio_coding/speech_mono_16kHz.pcm",
+    "//resources/audio_coding/speech_mono_32_48kHz.pcm",
+  ]
+
+  if (is_ios) {
+    bundle_data("audio_codec_speed_tests_data") {
+      testonly = true
+      sources = audio_codec_speed_tests_resources
+      outputs = [
+        "{{bundle_resources_dir}}/{{source_file_part}}",
+      ]
+    }
+  }
+
+  rtc_test("audio_codec_speed_tests") {
+    testonly = true
+    defines = []
+    deps = [
+      "../..:typedefs",
+      "../../test:fileutils",
+    ]
+    sources = [
+      "codecs/isac/fix/test/isac_speed_test.cc",
+      "codecs/opus/opus_speed_test.cc",
+      "codecs/tools/audio_codec_speed_test.cc",
+      "codecs/tools/audio_codec_speed_test.h",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    data = audio_codec_speed_tests_resources
+
+    if (is_android) {
+      deps += [ "//testing/android/native_test:native_test_native_code" ]
+      shard_timeout = 900
+    }
+
+    if (is_ios) {
+      deps += [ ":audio_codec_speed_tests_data" ]
+    }
+
+    deps += [
+      ":isac_fix",
+      ":webrtc_opus",
+      "../..:webrtc_common",
+      "../../api:libjingle_peerconnection_api",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers:metrics_default",
+      "../../system_wrappers:system_wrappers_default",
+      "../../test:field_trial",
+      "../../test:test_main",
+      "../audio_processing",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_source_set("neteq_test_support") {
+    testonly = true
+    sources = [
+      "neteq/tools/neteq_external_decoder_test.cc",
+      "neteq/tools/neteq_external_decoder_test.h",
+      "neteq/tools/neteq_performance_test.cc",
+      "neteq/tools/neteq_performance_test.h",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":neteq",
+      ":neteq_test_tools",
+      ":pcm16b",
+      "..:module_api",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../api/audio_codecs:audio_codecs_api",
+      "../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_source_set("neteq_quality_test_support") {
+    testonly = true
+    sources = [
+      "neteq/tools/neteq_quality_test.cc",
+      "neteq/tools/neteq_quality_test.h",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":neteq",
+      ":neteq_test_tools",
+      "..:module_api",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("rtp_encode") {
+    testonly = true
+
+    deps = audio_coding_deps + [
+             "../..:typedefs",
+             ":audio_coding",
+             ":neteq_input_audio_tools",
+             "../../api/audio_codecs/g711:audio_encoder_g711",
+             "../../api/audio_codecs/L16:audio_encoder_L16",
+             "../../api/audio_codecs/g722:audio_encoder_g722",
+             "../../api/audio_codecs/ilbc:audio_encoder_ilbc",
+             "../../system_wrappers:system_wrappers_default",
+             "../../api/audio_codecs/isac:audio_encoder_isac",
+             "../../api/audio_codecs/opus:audio_encoder_opus",
+             "../../rtc_base:rtc_base_approved",
+           ]
+
+    sources = [
+      "neteq/tools/rtp_encode.cc",
+    ]
+
+    defines = audio_coding_defines
+  }
+
+  rtc_executable("rtp_jitter") {
+    testonly = true
+
+    deps = audio_coding_deps + [
+             "../..:typedefs",
+             "../../system_wrappers:system_wrappers_default",
+             "../rtp_rtcp:rtp_rtcp_format",
+             "../../api:array_view",
+             "../../rtc_base:rtc_base_approved",
+           ]
+
+    sources = [
+      "neteq/tools/rtp_jitter.cc",
+    ]
+
+    defines = audio_coding_defines
+  }
+
+  rtc_executable("rtpcat") {
+    testonly = true
+
+    sources = [
+      "neteq/tools/rtpcat.cc",
+    ]
+
+    deps = [
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers:system_wrappers_default",
+      "../../test:rtp_test_utils",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("rtp_analyze") {
+    testonly = true
+
+    sources = [
+      "neteq/tools/rtp_analyze.cc",
+    ]
+
+    deps = [
+      ":neteq",
+      ":neteq_test_tools",
+      ":pcm16b",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers:system_wrappers_default",
+      "//testing/gtest",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  rtc_executable("neteq_opus_quality_test") {
+    testonly = true
+
+    sources = [
+      "neteq/test/neteq_opus_quality_test.cc",
+    ]
+
+    deps = [
+      ":neteq",
+      ":neteq_quality_test_support",
+      ":neteq_tools",
+      ":webrtc_opus",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("neteq_speed_test") {
+    testonly = true
+
+    sources = [
+      "neteq/test/neteq_speed_test.cc",
+    ]
+
+    deps = [
+      ":neteq",
+      ":neteq_test_support",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers:system_wrappers_default",
+      "../../test:fileutils",
+      "../../test:test_support",
+    ]
+  }
+
+  rtc_executable("neteq_ilbc_quality_test") {
+    testonly = true
+
+    sources = [
+      "neteq/test/neteq_ilbc_quality_test.cc",
+    ]
+
+    deps = [
+      ":ilbc",
+      ":neteq",
+      ":neteq_quality_test_support",
+      ":neteq_tools",
+      "../..:webrtc_common",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers:system_wrappers_default",
+      "../../test:fileutils",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("neteq_isac_quality_test") {
+    testonly = true
+
+    sources = [
+      "neteq/test/neteq_isac_quality_test.cc",
+    ]
+
+    deps = [
+      ":isac_fix",
+      ":neteq",
+      ":neteq_quality_test_support",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("neteq_pcmu_quality_test") {
+    testonly = true
+
+    sources = [
+      "neteq/test/neteq_pcmu_quality_test.cc",
+    ]
+
+    deps = [
+      ":g711",
+      ":neteq",
+      ":neteq_quality_test_support",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:fileutils",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("neteq_pcm16b_quality_test") {
+    testonly = true
+
+    sources = [
+      "neteq/test/neteq_pcm16b_quality_test.cc",
+    ]
+
+    deps = [
+      ":neteq",
+      ":neteq_quality_test_support",
+      ":pcm16b",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:fileutils",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("isac_fix_test") {
+    testonly = true
+
+    sources = [
+      "codecs/isac/fix/test/kenny.cc",
+    ]
+
+    deps = [
+      ":isac_fix",
+      "../../test:perf_test",
+      "../../test:test_support",
+    ]
+
+    data = [
+      "../../resources/speech_and_misc_wb.pcm",
+    ]
+
+    if (is_win) {
+      cflags = [
+        # Disable warnings to enable Win64 build, issue 1323.
+        "/wd4267",  # size_t to int truncation
+      ]
+    }
+  }
+
+  config("isac_test_warnings_config") {
+    if (is_win && is_clang) {
+      cflags = [
+        # Disable warnings failing when compiling with Clang on Windows.
+        # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+        "-Wno-format",
+      ]
+    }
+  }
+
+  rtc_source_set("isac_test_util") {
+    testonly = true
+    sources = [
+      "codecs/isac/main/util/utility.c",
+      "codecs/isac/main/util/utility.h",
+    ]
+  }
+
+  rtc_executable("isac_test") {
+    testonly = true
+
+    sources = [
+      "codecs/isac/main/test/simpleKenny.c",
+    ]
+
+    include_dirs = [
+      "codecs/isac/main/include",
+      "codecs/isac/main/test",
+      "codecs/isac/main/util",
+    ]
+
+    deps = [
+      ":isac",
+      ":isac_test_util",
+      "../../rtc_base:rtc_base_approved",
+    ]
+
+    configs += [ ":isac_test_warnings_config" ]
+  }
+
+  rtc_executable("g711_test") {
+    testonly = true
+
+    sources = [
+      "codecs/g711/test/testG711.cc",
+    ]
+
+    deps = [
+      ":g711",
+    ]
+  }
+
+  rtc_executable("g722_test") {
+    testonly = true
+
+    sources = [
+      "codecs/g722/test/testG722.cc",
+    ]
+
+    deps = [
+      ":g722",
+      "../..:typedefs",
+      "../..:webrtc_common",
+    ]
+  }
+
+  rtc_executable("isac_api_test") {
+    testonly = true
+
+    sources = [
+      "codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc",
+    ]
+
+    deps = [
+      ":isac",
+      ":isac_test_util",
+      "../../rtc_base:rtc_base_approved",
+    ]
+
+    include_dirs = [
+      "codecs/isac/main/include",
+      "codecs/isac/main/test",
+      "codecs/isac/main/util",
+    ]
+  }
+
+  rtc_executable("isac_switch_samprate_test") {
+    testonly = true
+
+    sources = [
+      "codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc",
+    ]
+
+    deps = [
+      ":isac",
+      ":isac_test_util",
+      "../../common_audio",
+      "../../common_audio:common_audio_c",
+    ]
+
+    include_dirs = [
+      "codecs/isac/main/include",
+      "codecs/isac/main/test",
+      "codecs/isac/main/util",
+      "../../common_audio/signal_processing/include",
+    ]
+  }
+
+  rtc_executable("ilbc_test") {
+    testonly = true
+
+    sources = [
+      "codecs/ilbc/test/iLBC_test.c",
+    ]
+
+    deps = [
+      ":ilbc",
+    ]
+  }
+
+  rtc_executable("webrtc_opus_fec_test") {
+    testonly = true
+
+    sources = [
+      "codecs/opus/opus_fec_test.cc",
+    ]
+
+    deps = [
+      ":webrtc_opus",
+      "../../common_audio",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:fileutils",
+      "../../test:test_main",
+      "//testing/gtest",
+    ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  rtc_source_set("audio_coding_unittests") {
+    testonly = true
+    visibility += webrtc_default_visibility
+
+    sources = [
+      "acm2/acm_receiver_unittest.cc",
+      "acm2/audio_coding_module_unittest.cc",
+      "acm2/call_statistics_unittest.cc",
+      "acm2/codec_manager_unittest.cc",
+      "acm2/rent_a_codec_unittest.cc",
+      "audio_network_adaptor/audio_network_adaptor_impl_unittest.cc",
+      "audio_network_adaptor/bitrate_controller_unittest.cc",
+      "audio_network_adaptor/channel_controller_unittest.cc",
+      "audio_network_adaptor/controller_manager_unittest.cc",
+      "audio_network_adaptor/dtx_controller_unittest.cc",
+      "audio_network_adaptor/event_log_writer_unittest.cc",
+      "audio_network_adaptor/fec_controller_plr_based_unittest.cc",
+      "audio_network_adaptor/fec_controller_rplr_based_unittest.cc",
+      "audio_network_adaptor/frame_length_controller_unittest.cc",
+      "audio_network_adaptor/util/threshold_curve_unittest.cc",
+      "codecs/builtin_audio_decoder_factory_unittest.cc",
+      "codecs/builtin_audio_encoder_factory_unittest.cc",
+      "codecs/cng/audio_encoder_cng_unittest.cc",
+      "codecs/cng/cng_unittest.cc",
+      "codecs/ilbc/ilbc_unittest.cc",
+      "codecs/isac/fix/source/filterbanks_unittest.cc",
+      "codecs/isac/fix/source/filters_unittest.cc",
+      "codecs/isac/fix/source/lpc_masking_model_unittest.cc",
+      "codecs/isac/fix/source/transform_unittest.cc",
+      "codecs/isac/main/source/audio_encoder_isac_unittest.cc",
+      "codecs/isac/main/source/isac_unittest.cc",
+      "codecs/isac/unittest.cc",
+      "codecs/legacy_encoded_audio_frame_unittest.cc",
+      "codecs/opus/audio_encoder_opus_unittest.cc",
+      "codecs/opus/opus_bandwidth_unittest.cc",
+      "codecs/opus/opus_unittest.cc",
+      "codecs/red/audio_encoder_copy_red_unittest.cc",
+      "neteq/audio_multi_vector_unittest.cc",
+      "neteq/audio_vector_unittest.cc",
+      "neteq/background_noise_unittest.cc",
+      "neteq/buffer_level_filter_unittest.cc",
+      "neteq/comfort_noise_unittest.cc",
+      "neteq/decision_logic_unittest.cc",
+      "neteq/decoder_database_unittest.cc",
+      "neteq/delay_manager_unittest.cc",
+      "neteq/delay_peak_detector_unittest.cc",
+      "neteq/dsp_helper_unittest.cc",
+      "neteq/dtmf_buffer_unittest.cc",
+      "neteq/dtmf_tone_generator_unittest.cc",
+      "neteq/expand_unittest.cc",
+      "neteq/merge_unittest.cc",
+      "neteq/mock/mock_buffer_level_filter.h",
+      "neteq/mock/mock_decoder_database.h",
+      "neteq/mock/mock_delay_manager.h",
+      "neteq/mock/mock_delay_peak_detector.h",
+      "neteq/mock/mock_dtmf_buffer.h",
+      "neteq/mock/mock_dtmf_tone_generator.h",
+      "neteq/mock/mock_expand.h",
+      "neteq/mock/mock_external_decoder_pcm16b.h",
+      "neteq/mock/mock_packet_buffer.h",
+      "neteq/mock/mock_red_payload_splitter.h",
+      "neteq/mock/mock_statistics_calculator.h",
+      "neteq/nack_tracker_unittest.cc",
+      "neteq/neteq_external_decoder_unittest.cc",
+      "neteq/neteq_impl_unittest.cc",
+      "neteq/neteq_network_stats_unittest.cc",
+      "neteq/neteq_stereo_unittest.cc",
+      "neteq/neteq_unittest.cc",
+      "neteq/normal_unittest.cc",
+      "neteq/packet_buffer_unittest.cc",
+      "neteq/post_decode_vad_unittest.cc",
+      "neteq/random_vector_unittest.cc",
+      "neteq/red_payload_splitter_unittest.cc",
+      "neteq/statistics_calculator_unittest.cc",
+      "neteq/sync_buffer_unittest.cc",
+      "neteq/tick_timer_unittest.cc",
+      "neteq/time_stretch_unittest.cc",
+      "neteq/timestamp_scaler_unittest.cc",
+      "neteq/tools/input_audio_file_unittest.cc",
+      "neteq/tools/packet_unittest.cc",
+    ]
+
+    deps = [
+      ":acm_receive_test",
+      ":acm_send_test",
+      ":audio_coding",
+      ":audio_coding_module_typedefs",
+      ":audio_format_conversion",
+      ":audio_network_adaptor",
+      ":cng",
+      ":g711",
+      ":ilbc",
+      ":isac",
+      ":isac_c",
+      ":isac_fix",
+      ":legacy_encoded_audio_frame",
+      ":mocks",
+      ":neteq",
+      ":neteq_test_support",
+      ":neteq_test_tools",
+      ":pcm16b",
+      ":red",
+      ":rent_a_codec",
+      ":webrtc_opus",
+      "..:module_api",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../api/audio_codecs:audio_codecs_api",
+      "../../api/audio_codecs:builtin_audio_decoder_factory",
+      "../../api/audio_codecs:builtin_audio_encoder_factory",
+      "../../api/audio_codecs/opus:audio_decoder_opus",
+      "../../api/audio_codecs/opus:audio_encoder_opus",
+      "../../common_audio",
+      "../../common_audio:common_audio_c",
+      "../../common_audio:mock_common_audio",
+      "../../logging:mocks",
+      "../../logging:rtc_event_audio",
+      "../../logging:rtc_event_log_api",
+      "../../rtc_base:checks",
+      "../../rtc_base:protobuf_utils",
+      "../../rtc_base:rtc_base",
+      "../../rtc_base:rtc_base_approved",
+      "../../rtc_base:rtc_base_tests_utils",
+      "../../rtc_base:sanitizer",
+      "../../system_wrappers",
+      "../../system_wrappers:cpu_features_api",
+      "../../test:audio_codec_mocks",
+      "../../test:field_trial",
+      "../../test:fileutils",
+      "../../test:rtp_test_utils",
+      "../../test:test_common",
+      "../../test:test_support",
+      "//testing/gtest",
+    ]
+
+    defines = audio_coding_defines
+
+    if (rtc_enable_protobuf) {
+      defines += [ "WEBRTC_NETEQ_UNITTEST_BITEXACT" ]
+      deps += [
+        ":ana_config_proto",
+        ":neteq_unittest_proto",
+      ]
+    }
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+}
+
+# For backwards compatibility only! Use
+# webrtc/api/audio_codecs:audio_codecs_api instead.
+# TODO(kwiberg): Remove this.
+rtc_source_set("audio_decoder_interface") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/audio_decoder.h",
+  ]
+  deps = [
+    "../../api/audio_codecs:audio_codecs_api",
+  ]
+}
+
+# For backwards compatibility only! Use
+# webrtc/api/audio_codecs:audio_codecs_api instead.
+# TODO(ossu): Remove this.
+rtc_source_set("audio_encoder_interface") {
+  visibility += [ "*" ]
+  sources = [
+    "codecs/audio_encoder.h",
+  ]
+  deps = [
+    "../../api/audio_codecs:audio_codecs_api",
+  ]
+}
diff --git a/modules/audio_coding/DEPS b/modules/audio_coding/DEPS
new file mode 100644
index 0000000..3dc9624
--- /dev/null
+++ b/modules/audio_coding/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+  "+call",
+  "+common_audio",
+  "+logging/rtc_event_log",
+  "+audio_coding/neteq/neteq_unittest.pb.h",  # Different path.
+  "+system_wrappers",
+]
diff --git a/modules/audio_coding/OWNERS b/modules/audio_coding/OWNERS
new file mode 100644
index 0000000..c1adc56
--- /dev/null
+++ b/modules/audio_coding/OWNERS
@@ -0,0 +1,11 @@
+turaj@webrtc.org
+henrik.lundin@webrtc.org
+kwiberg@webrtc.org
+minyue@webrtc.org
+jan.skoglund@webrtc.org
+ossu@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/modules/audio_coding/acm2/acm_codec_database.cc b/modules/audio_coding/acm2/acm_codec_database.cc
new file mode 100644
index 0000000..4553b52
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_codec_database.cc
@@ -0,0 +1,343 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file generates databases with information about all supported audio
+ * codecs.
+ */
+
+// TODO(tlegrand): Change constant input pointers in all functions to constant
+// references, where appropriate.
+#include "modules/audio_coding/acm2/acm_codec_database.h"
+
+#include <assert.h>
+
+#include "rtc_base/checks.h"
+
+#if ((defined WEBRTC_CODEC_ISAC) && (defined WEBRTC_CODEC_ISACFX))
+#error iSAC and iSACFX codecs cannot be enabled at the same time
+#endif
+
+namespace webrtc {
+
+namespace acm2 {
+
+namespace {
+
+// Checks if the bitrate is valid for iSAC.
+bool IsISACRateValid(int rate) {
+  return (rate == -1) || ((rate <= 56000) && (rate >= 10000));
+}
+
+// Checks if the bitrate is valid for iLBC.
+bool IsILBCRateValid(int rate, int frame_size_samples) {
+  if (((frame_size_samples == 240) || (frame_size_samples == 480)) &&
+      (rate == 13300)) {
+    return true;
+  } else if (((frame_size_samples == 160) || (frame_size_samples == 320)) &&
+      (rate == 15200)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// Checks if the bitrate is valid for Opus.
+bool IsOpusRateValid(int rate) {
+  return (rate >= 6000) && (rate <= 510000);
+}
+
+}  // namespace
+
+// Not yet used payload-types.
+// 83,  82,  81, 80, 79,  78,  77,  76,  75,  74,  73,  72,  71,  70,  69, 68,
+// 67, 66, 65
+
+const CodecInst ACMCodecDB::database_[] = {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+  {103, "ISAC", 16000, 480, 1, 32000},
+# if (defined(WEBRTC_CODEC_ISAC))
+  {104, "ISAC", 32000, 960, 1, 56000},
+# endif
+#endif
+  // Mono
+  {107, "L16", 8000, 80, 1, 128000},
+  {108, "L16", 16000, 160, 1, 256000},
+  {109, "L16", 32000, 320, 1, 512000},
+  // Stereo
+  {111, "L16", 8000, 80, 2, 128000},
+  {112, "L16", 16000, 160, 2, 256000},
+  {113, "L16", 32000, 320, 2, 512000},
+  // G.711, PCM mu-law and A-law.
+  // Mono
+  {0, "PCMU", 8000, 160, 1, 64000},
+  {8, "PCMA", 8000, 160, 1, 64000},
+  // Stereo
+  {110, "PCMU", 8000, 160, 2, 64000},
+  {118, "PCMA", 8000, 160, 2, 64000},
+#ifdef WEBRTC_CODEC_ILBC
+  {102, "ILBC", 8000, 240, 1, 13300},
+#endif
+  // Mono
+  {9, "G722", 16000, 320, 1, 64000},
+  // Stereo
+  {119, "G722", 16000, 320, 2, 64000},
+#ifdef WEBRTC_CODEC_OPUS
+  // Opus internally supports 48, 24, 16, 12, 8 kHz.
+  // Mono and stereo.
+  {120, "opus", 48000, 960, 2, 64000},
+#endif
+  // Comfort noise for four different sampling frequencies.
+  {13, "CN", 8000, 240, 1, 0},
+  {98, "CN", 16000, 480, 1, 0},
+  {99, "CN", 32000, 960, 1, 0},
+#ifdef ENABLE_48000_HZ
+  {100, "CN", 48000, 1440, 1, 0},
+#endif
+  {106, "telephone-event", 8000, 240, 1, 0},
+  {114, "telephone-event", 16000, 240, 1, 0},
+  {115, "telephone-event", 32000, 240, 1, 0},
+  {116, "telephone-event", 48000, 240, 1, 0},
+#ifdef WEBRTC_CODEC_RED
+  {127, "red", 8000, 0, 1, 0},
+#endif
+  // To prevent compile errors due to trailing commas.
+  {-1, "Null", -1, -1, 0, -1}
+};
+
+// Create database with all codec settings at compile time.
+// Each entry needs the following parameters in the given order:
+// Number of allowed packet sizes, a vector with the allowed packet sizes,
+// Basic block samples, max number of channels that are supported.
+const ACMCodecDB::CodecSettings ACMCodecDB::codec_settings_[] = {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+    {2, {480, 960}, 0, 1},
+# if (defined(WEBRTC_CODEC_ISAC))
+    {1, {960}, 0, 1},
+# endif
+#endif
+    // Mono
+    {4, {80, 160, 240, 320}, 0, 2},
+    {4, {160, 320, 480, 640}, 0, 2},
+    {2, {320, 640}, 0, 2},
+    // Stereo
+    {4, {80, 160, 240, 320}, 0, 2},
+    {4, {160, 320, 480, 640}, 0, 2},
+    {2, {320, 640}, 0, 2},
+    // G.711, PCM mu-law and A-law.
+    // Mono
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+    // Stereo
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+#ifdef WEBRTC_CODEC_ILBC
+    {4, {160, 240, 320, 480}, 0, 1},
+#endif
+    // Mono
+    {6, {160, 320, 480, 640, 800, 960}, 0, 2},
+    // Stereo
+    {6, {160, 320, 480, 640, 800, 960}, 0, 2},
+#ifdef WEBRTC_CODEC_OPUS
+    // Opus supports frames shorter than 10ms,
+    // but it doesn't help us to use them.
+    // Mono and stereo.
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+    {5, {480, 960, 1920, 2880, 5760}, 0, 2},
+#else
+    {4, {480, 960, 1920, 2880}, 0, 2},
+#endif
+#endif
+    // Comfort noise for three different sampling frequencies.
+    {1, {240}, 240, 1},
+    {1, {480}, 480, 1},
+    {1, {960}, 960, 1},
+// TODO(solenberg): What is this flag? It is never set in the build files.
+#ifdef ENABLE_48000_HZ
+    {1, {1440}, 1440, 1},
+#endif
+    {1, {240}, 240, 1},
+    {1, {240}, 240, 1},
+    {1, {240}, 240, 1},
+    {1, {240}, 240, 1},
+#ifdef WEBRTC_CODEC_RED
+    {1, {0}, 0, 1},
+#endif
+    // To prevent compile errors due to trailing commas.
+    {-1, {-1}, -1, 0}
+};
+
+// Create a database of all NetEQ decoders at compile time.
+const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+    NetEqDecoder::kDecoderISAC,
+# if (defined(WEBRTC_CODEC_ISAC))
+    NetEqDecoder::kDecoderISACswb,
+# endif
+#endif
+    // Mono
+    NetEqDecoder::kDecoderPCM16B, NetEqDecoder::kDecoderPCM16Bwb,
+    NetEqDecoder::kDecoderPCM16Bswb32kHz,
+    // Stereo
+    NetEqDecoder::kDecoderPCM16B_2ch, NetEqDecoder::kDecoderPCM16Bwb_2ch,
+    NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch,
+    // G.711, PCM mu-las and A-law.
+    // Mono
+    NetEqDecoder::kDecoderPCMu, NetEqDecoder::kDecoderPCMa,
+    // Stereo
+    NetEqDecoder::kDecoderPCMu_2ch, NetEqDecoder::kDecoderPCMa_2ch,
+#ifdef WEBRTC_CODEC_ILBC
+    NetEqDecoder::kDecoderILBC,
+#endif
+    // Mono
+    NetEqDecoder::kDecoderG722,
+    // Stereo
+    NetEqDecoder::kDecoderG722_2ch,
+#ifdef WEBRTC_CODEC_OPUS
+    // Mono and stereo.
+    NetEqDecoder::kDecoderOpus,
+#endif
+    // Comfort noise for three different sampling frequencies.
+    NetEqDecoder::kDecoderCNGnb, NetEqDecoder::kDecoderCNGwb,
+    NetEqDecoder::kDecoderCNGswb32kHz,
+#ifdef ENABLE_48000_HZ
+    NetEqDecoder::kDecoderCNGswb48kHz,
+#endif
+    NetEqDecoder::kDecoderAVT,
+    NetEqDecoder::kDecoderAVT16kHz,
+    NetEqDecoder::kDecoderAVT32kHz,
+    NetEqDecoder::kDecoderAVT48kHz,
+#ifdef WEBRTC_CODEC_RED
+    NetEqDecoder::kDecoderRED,
+#endif
+};
+
+// Enumerator for error codes when asking for codec database id.
+enum {
+  kInvalidCodec = -10,
+  kInvalidPayloadtype = -30,
+  kInvalidPacketSize = -40,
+  kInvalidRate = -50
+};
+
+// Gets the codec id number from the database. If there is some mismatch in
+// the codec settings, the function will return an error code.
+// NOTE! The first mismatch found will generate the return value.
+int ACMCodecDB::CodecNumber(const CodecInst& codec_inst) {
+  // Look for a matching codec in the database.
+  int codec_id = CodecId(codec_inst);
+
+  // Checks if we found a matching codec.
+  if (codec_id == -1) {
+    return kInvalidCodec;
+  }
+
+  // Checks the validity of payload type
+  if (!RentACodec::IsPayloadTypeValid(codec_inst.pltype)) {
+    return kInvalidPayloadtype;
+  }
+
+  // Comfort Noise is special case, packet-size & rate is not checked.
+  if (STR_CASE_CMP(database_[codec_id].plname, "CN") == 0) {
+    return codec_id;
+  }
+
+  // RED is special case, packet-size & rate is not checked.
+  if (STR_CASE_CMP(database_[codec_id].plname, "red") == 0) {
+    return codec_id;
+  }
+
+  // Checks the validity of packet size.
+  if (codec_settings_[codec_id].num_packet_sizes > 0) {
+    bool packet_size_ok = false;
+    int i;
+    int packet_size_samples;
+    for (i = 0; i < codec_settings_[codec_id].num_packet_sizes; i++) {
+      packet_size_samples =
+          codec_settings_[codec_id].packet_sizes_samples[i];
+      if (codec_inst.pacsize == packet_size_samples) {
+        packet_size_ok = true;
+        break;
+      }
+    }
+
+    if (!packet_size_ok) {
+      return kInvalidPacketSize;
+    }
+  }
+
+  if (codec_inst.pacsize < 1) {
+    return kInvalidPacketSize;
+  }
+
+  // Check the validity of rate. Codecs with multiple rates have their own
+  // function for this.
+  if (STR_CASE_CMP("isac", codec_inst.plname) == 0) {
+    return IsISACRateValid(codec_inst.rate) ? codec_id : kInvalidRate;
+  } else if (STR_CASE_CMP("ilbc", codec_inst.plname) == 0) {
+    return IsILBCRateValid(codec_inst.rate, codec_inst.pacsize)
+        ? codec_id : kInvalidRate;
+  } else if (STR_CASE_CMP("opus", codec_inst.plname) == 0) {
+    return IsOpusRateValid(codec_inst.rate)
+        ? codec_id : kInvalidRate;
+  }
+
+  return database_[codec_id].rate == codec_inst.rate ? codec_id : kInvalidRate;
+}
+
+// Looks for a matching payload name, frequency, and channels in the
+// codec list. Need to check all three since some codecs have several codec
+// entries with different frequencies and/or channels.
+// Does not check other codec settings, such as payload type and packet size.
+// Returns the id of the codec, or -1 if no match is found.
+int ACMCodecDB::CodecId(const CodecInst& codec_inst) {
+  return (CodecId(codec_inst.plname, codec_inst.plfreq,
+                  codec_inst.channels));
+}
+
+int ACMCodecDB::CodecId(const char* payload_name,
+                        int frequency,
+                        size_t channels) {
+  for (const CodecInst& ci : RentACodec::Database()) {
+    bool name_match = false;
+    bool frequency_match = false;
+    bool channels_match = false;
+
+    // Payload name, sampling frequency and number of channels need to match.
+    // NOTE! If |frequency| is -1, the frequency is not applicable, and is
+    // always treated as true, like for RED.
+    name_match = (STR_CASE_CMP(ci.plname, payload_name) == 0);
+    frequency_match = (frequency == ci.plfreq) || (frequency == -1);
+    // The number of channels must match for all codecs but Opus.
+    if (STR_CASE_CMP(payload_name, "opus") != 0) {
+      channels_match = (channels == ci.channels);
+    } else {
+      // For opus we just check that number of channels is valid.
+      channels_match = (channels == 1 || channels == 2);
+    }
+
+    if (name_match && frequency_match && channels_match) {
+      // We have found a matching codec in the list.
+      return &ci - RentACodec::Database().data();
+    }
+  }
+
+  // We didn't find a matching codec.
+  return -1;
+}
+// Gets codec id number from database for the receiver.
+int ACMCodecDB::ReceiverCodecNumber(const CodecInst& codec_inst) {
+  // Look for a matching codec in the database.
+  return CodecId(codec_inst);
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/acm_codec_database.h b/modules/audio_coding/acm2/acm_codec_database.h
new file mode 100644
index 0000000..81cd4be
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_codec_database.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file generates databases with information about all supported audio
+ * codecs.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+namespace acm2 {
+
+// TODO(tlegrand): replace class ACMCodecDB with a namespace.
+class ACMCodecDB {
+ public:
+  // kMaxNumCodecs - Maximum number of codecs that can be activated in one
+  //                 build.
+  // kMaxNumPacketSize - Maximum number of allowed packet sizes for one codec.
+  // These might need to be increased if adding a new codec to the database
+  static const int kMaxNumCodecs =  50;
+  static const int kMaxNumPacketSize = 6;
+
+  // Codec specific settings
+  //
+  // num_packet_sizes     - number of allowed packet sizes.
+  // packet_sizes_samples - list of the allowed packet sizes.
+  // basic_block_samples  - assigned a value different from 0 if the codec
+  //                        requires to be fed with a specific number of samples
+  //                        that can be different from packet size.
+  // channel_support      - number of channels supported to encode;
+  //                        1 = mono, 2 = stereo, etc.
+  struct CodecSettings {
+    int num_packet_sizes;
+    int packet_sizes_samples[kMaxNumPacketSize];
+    int basic_block_samples;
+    size_t channel_support;
+  };
+
+  // Returns codec id from database, given the information received in the input
+  // [codec_inst].
+  // Input:
+  //   [codec_inst] - Information about the codec for which we require the
+  //                  database id.
+  // Return:
+  //   codec id if successful, otherwise < 0.
+  static int CodecNumber(const CodecInst& codec_inst);
+  static int CodecId(const CodecInst& codec_inst);
+  static int CodecId(const char* payload_name, int frequency, size_t channels);
+  static int ReceiverCodecNumber(const CodecInst& codec_inst);
+
+  // Databases with information about the supported codecs
+  // database_ - stored information about all codecs: payload type, name,
+  //             sampling frequency, packet size in samples, default channel
+  //             support, and default rate.
+  // codec_settings_ - stored codec settings: number of allowed packet sizes,
+  //                   a vector with the allowed packet sizes, basic block
+  //                   samples, and max number of channels that are supported.
+  // neteq_decoders_ - list of supported decoders in NetEQ.
+  static const CodecInst database_[kMaxNumCodecs];
+  static const CodecSettings codec_settings_[kMaxNumCodecs];
+  static const NetEqDecoder neteq_decoders_[kMaxNumCodecs];
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
diff --git a/modules/audio_coding/acm2/acm_receive_test.cc b/modules/audio_coding/acm2/acm_receive_test.cc
new file mode 100644
index 0000000..082506a
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_receive_test.cc
@@ -0,0 +1,242 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_receive_test.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/codecs/audio_format_conversion.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Returns true if the codec should be registered, otherwise false. Changes
+// the number of channels for the Opus codec to always be 1.
+bool ModifyAndUseThisCodec(CodecInst* codec_param) {
+  if (STR_CASE_CMP(codec_param->plname, "CN") == 0 &&
+      codec_param->plfreq == 48000)
+    return false;  // Skip 48 kHz comfort noise.
+
+  if (STR_CASE_CMP(codec_param->plname, "telephone-event") == 0)
+    return false;  // Skip DTFM.
+
+  return true;
+}
+
+// Remaps payload types from ACM's default to those used in the resource file
+// neteq_universal_new.rtp. Returns true if the codec should be registered,
+// otherwise false. The payload types are set as follows (all are mono codecs):
+// PCMu = 0;
+// PCMa = 8;
+// Comfort noise 8 kHz = 13
+// Comfort noise 16 kHz = 98
+// Comfort noise 32 kHz = 99
+// iLBC = 102
+// iSAC wideband = 103
+// iSAC super-wideband = 104
+// AVT/DTMF = 106
+// RED = 117
+// PCM16b 8 kHz = 93
+// PCM16b 16 kHz = 94
+// PCM16b 32 kHz = 95
+// G.722 = 94
+bool RemapPltypeAndUseThisCodec(const char* plname,
+                                int plfreq,
+                                size_t channels,
+                                int* pltype) {
+  if (channels != 1)
+    return false;  // Don't use non-mono codecs.
+
+  // Re-map pltypes to those used in the NetEq test files.
+  if (STR_CASE_CMP(plname, "PCMU") == 0 && plfreq == 8000) {
+    *pltype = 0;
+  } else if (STR_CASE_CMP(plname, "PCMA") == 0 && plfreq == 8000) {
+    *pltype = 8;
+  } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 8000) {
+    *pltype = 13;
+  } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 16000) {
+    *pltype = 98;
+  } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 32000) {
+    *pltype = 99;
+  } else if (STR_CASE_CMP(plname, "ILBC") == 0) {
+    *pltype = 102;
+  } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 16000) {
+    *pltype = 103;
+  } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 32000) {
+    *pltype = 104;
+  } else if (STR_CASE_CMP(plname, "telephone-event") == 0 && plfreq == 8000) {
+    *pltype = 106;
+  } else if (STR_CASE_CMP(plname, "telephone-event") == 0 && plfreq == 16000) {
+    *pltype = 114;
+  } else if (STR_CASE_CMP(plname, "telephone-event") == 0 && plfreq == 32000) {
+    *pltype = 115;
+  } else if (STR_CASE_CMP(plname, "telephone-event") == 0 && plfreq == 48000) {
+    *pltype = 116;
+  } else if (STR_CASE_CMP(plname, "red") == 0) {
+    *pltype = 117;
+  } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 8000) {
+    *pltype = 93;
+  } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 16000) {
+    *pltype = 94;
+  } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 32000) {
+    *pltype = 95;
+  } else if (STR_CASE_CMP(plname, "G722") == 0) {
+    *pltype = 9;
+  } else {
+    // Don't use any other codecs.
+    return false;
+  }
+  return true;
+}
+
+AudioCodingModule::Config MakeAcmConfig(
+    Clock* clock,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory) {
+  AudioCodingModule::Config config;
+  config.clock = clock;
+  config.decoder_factory = std::move(decoder_factory);
+  return config;
+}
+
+}  // namespace
+
+AcmReceiveTestOldApi::AcmReceiveTestOldApi(
+    PacketSource* packet_source,
+    AudioSink* audio_sink,
+    int output_freq_hz,
+    NumOutputChannels exptected_output_channels,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory)
+    : clock_(0),
+      acm_(webrtc::AudioCodingModule::Create(
+          MakeAcmConfig(&clock_, std::move(decoder_factory)))),
+      packet_source_(packet_source),
+      audio_sink_(audio_sink),
+      output_freq_hz_(output_freq_hz),
+      exptected_output_channels_(exptected_output_channels) {}
+
+AcmReceiveTestOldApi::~AcmReceiveTestOldApi() = default;
+
+void AcmReceiveTestOldApi::RegisterDefaultCodecs() {
+  CodecInst my_codec_param;
+  for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+    ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+    if (ModifyAndUseThisCodec(&my_codec_param)) {
+      ASSERT_EQ(true,
+                acm_->RegisterReceiveCodec(my_codec_param.pltype,
+                                           CodecInstToSdp(my_codec_param)))
+          << "Couldn't register receive codec.\n";
+    }
+  }
+}
+
+void AcmReceiveTestOldApi::RegisterNetEqTestCodecs() {
+  CodecInst my_codec_param;
+  for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+    ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+    if (!ModifyAndUseThisCodec(&my_codec_param)) {
+      // Skip this codec.
+      continue;
+    }
+
+    if (RemapPltypeAndUseThisCodec(my_codec_param.plname,
+                                   my_codec_param.plfreq,
+                                   my_codec_param.channels,
+                                   &my_codec_param.pltype)) {
+      ASSERT_EQ(true,
+                acm_->RegisterReceiveCodec(my_codec_param.pltype,
+                                           CodecInstToSdp(my_codec_param)))
+          << "Couldn't register receive codec.\n";
+    }
+  }
+}
+
+void AcmReceiveTestOldApi::Run() {
+  for (std::unique_ptr<Packet> packet(packet_source_->NextPacket()); packet;
+       packet = packet_source_->NextPacket()) {
+    // Pull audio until time to insert packet.
+    while (clock_.TimeInMilliseconds() < packet->time_ms()) {
+      AudioFrame output_frame;
+      bool muted;
+      EXPECT_EQ(0,
+                acm_->PlayoutData10Ms(output_freq_hz_, &output_frame, &muted));
+      ASSERT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
+      ASSERT_FALSE(muted);
+      const size_t samples_per_block =
+          static_cast<size_t>(output_freq_hz_ * 10 / 1000);
+      EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
+      if (exptected_output_channels_ != kArbitraryChannels) {
+        if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
+          // Don't check number of channels for PLC output, since each test run
+          // usually starts with a short period of mono PLC before decoding the
+          // first packet.
+        } else {
+          EXPECT_EQ(exptected_output_channels_, output_frame.num_channels_);
+        }
+      }
+      ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame));
+      clock_.AdvanceTimeMilliseconds(10);
+      AfterGetAudio();
+    }
+
+    // Insert packet after converting from RTPHeader to WebRtcRTPHeader.
+    WebRtcRTPHeader header;
+    header.header = packet->header();
+    header.frameType = kAudioFrameSpeech;
+    memset(&header.type.Audio, 0, sizeof(RTPAudioHeader));
+    EXPECT_EQ(0,
+              acm_->IncomingPacket(
+                  packet->payload(),
+                  static_cast<int32_t>(packet->payload_length_bytes()),
+                  header))
+        << "Failure when inserting packet:" << std::endl
+        << "  PT = " << static_cast<int>(header.header.payloadType) << std::endl
+        << "  TS = " << header.header.timestamp << std::endl
+        << "  SN = " << header.header.sequenceNumber;
+  }
+}
+
+AcmReceiveTestToggleOutputFreqOldApi::AcmReceiveTestToggleOutputFreqOldApi(
+    PacketSource* packet_source,
+    AudioSink* audio_sink,
+    int output_freq_hz_1,
+    int output_freq_hz_2,
+    int toggle_period_ms,
+    NumOutputChannels exptected_output_channels)
+    : AcmReceiveTestOldApi(packet_source,
+                           audio_sink,
+                           output_freq_hz_1,
+                           exptected_output_channels,
+                           CreateBuiltinAudioDecoderFactory()),
+      output_freq_hz_1_(output_freq_hz_1),
+      output_freq_hz_2_(output_freq_hz_2),
+      toggle_period_ms_(toggle_period_ms),
+      last_toggle_time_ms_(clock_.TimeInMilliseconds()) {}
+
+void AcmReceiveTestToggleOutputFreqOldApi::AfterGetAudio() {
+  if (clock_.TimeInMilliseconds() >= last_toggle_time_ms_ + toggle_period_ms_) {
+    output_freq_hz_ = (output_freq_hz_ == output_freq_hz_1_)
+                          ? output_freq_hz_2_
+                          : output_freq_hz_1_;
+    last_toggle_time_ms_ = clock_.TimeInMilliseconds();
+  }
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/acm_receive_test.h b/modules/audio_coding/acm2/acm_receive_test.h
new file mode 100644
index 0000000..c7e7da6
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_receive_test.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_H_
+
+#include <stddef.h> // for size_t
+#include <memory>
+#include <string>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+class AudioCodingModule;
+class AudioDecoder;
+struct CodecInst;
+
+namespace test {
+class AudioSink;
+class PacketSource;
+
+class AcmReceiveTestOldApi {
+ public:
+  enum NumOutputChannels : size_t {
+    kArbitraryChannels = 0,
+    kMonoOutput = 1,
+    kStereoOutput = 2
+  };
+
+  AcmReceiveTestOldApi(PacketSource* packet_source,
+                       AudioSink* audio_sink,
+                       int output_freq_hz,
+                       NumOutputChannels exptected_output_channels,
+                       rtc::scoped_refptr<AudioDecoderFactory> decoder_factory);
+  virtual ~AcmReceiveTestOldApi();
+
+  // Registers the codecs with default parameters from ACM.
+  void RegisterDefaultCodecs();
+
+  // Registers codecs with payload types matching the pre-encoded NetEq test
+  // files.
+  void RegisterNetEqTestCodecs();
+
+  // Runs the test and returns true if successful.
+  void Run();
+
+  AudioCodingModule* get_acm() { return acm_.get(); }
+
+ protected:
+  // Method is called after each block of output audio is received from ACM.
+  virtual void AfterGetAudio() {}
+
+  SimulatedClock clock_;
+  std::unique_ptr<AudioCodingModule> acm_;
+  PacketSource* packet_source_;
+  AudioSink* audio_sink_;
+  int output_freq_hz_;
+  NumOutputChannels exptected_output_channels_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
+};
+
+// This test toggles the output frequency every |toggle_period_ms|. The test
+// starts with |output_freq_hz_1|. Except for the toggling, it does the same
+// thing as AcmReceiveTestOldApi.
+class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi {
+ public:
+  AcmReceiveTestToggleOutputFreqOldApi(
+      PacketSource* packet_source,
+      AudioSink* audio_sink,
+      int output_freq_hz_1,
+      int output_freq_hz_2,
+      int toggle_period_ms,
+      NumOutputChannels exptected_output_channels);
+
+ protected:
+  void AfterGetAudio() override;
+
+  const int output_freq_hz_1_;
+  const int output_freq_hz_2_;
+  const int toggle_period_ms_;
+  int64_t last_toggle_time_ms_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_H_
diff --git a/modules/audio_coding/acm2/acm_receiver.cc b/modules/audio_coding/acm2/acm_receiver.cc
new file mode 100644
index 0000000..0d5dcae
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_receiver.cc
@@ -0,0 +1,415 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_receiver.h"
+
+#include <stdlib.h>  // malloc
+
+#include <algorithm>  // sort
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/audio_coding/acm2/call_statistics.h"
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config)
+    : last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
+      neteq_(NetEq::Create(config.neteq_config, config.decoder_factory)),
+      clock_(config.clock),
+      resampled_last_output_frame_(true) {
+  RTC_DCHECK(clock_);
+  memset(last_audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples);
+}
+
+AcmReceiver::~AcmReceiver() = default;
+
+int AcmReceiver::SetMinimumDelay(int delay_ms) {
+  if (neteq_->SetMinimumDelay(delay_ms))
+    return 0;
+  RTC_LOG(LERROR) << "AcmReceiver::SetExtraDelay " << delay_ms;
+  return -1;
+}
+
+int AcmReceiver::SetMaximumDelay(int delay_ms) {
+  if (neteq_->SetMaximumDelay(delay_ms))
+    return 0;
+  RTC_LOG(LERROR) << "AcmReceiver::SetExtraDelay " << delay_ms;
+  return -1;
+}
+
+int AcmReceiver::LeastRequiredDelayMs() const {
+  return neteq_->LeastRequiredDelayMs();
+}
+
+rtc::Optional<int> AcmReceiver::last_packet_sample_rate_hz() const {
+  rtc::CritScope lock(&crit_sect_);
+  return last_packet_sample_rate_hz_;
+}
+
+int AcmReceiver::last_output_sample_rate_hz() const {
+  return neteq_->last_output_sample_rate_hz();
+}
+
+int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
+                              rtc::ArrayView<const uint8_t> incoming_payload) {
+  uint32_t receive_timestamp = 0;
+  const RTPHeader* header = &rtp_header.header;  // Just a shorthand.
+
+  if (incoming_payload.empty()) {
+    neteq_->InsertEmptyPacket(rtp_header.header);
+    return 0;
+  }
+
+  {
+    rtc::CritScope lock(&crit_sect_);
+
+    const rtc::Optional<CodecInst> ci =
+        RtpHeaderToDecoder(*header, incoming_payload[0]);
+    if (!ci) {
+      RTC_LOG_F(LS_ERROR) << "Payload-type "
+                          << static_cast<int>(header->payloadType)
+                          << " is not registered.";
+      return -1;
+    }
+    receive_timestamp = NowInTimestamp(ci->plfreq);
+
+    if (STR_CASE_CMP(ci->plname, "cn") == 0) {
+      if (last_audio_decoder_ && last_audio_decoder_->channels > 1) {
+        // This is a CNG and the audio codec is not mono, so skip pushing in
+        // packets into NetEq.
+        return 0;
+      }
+    } else {
+      last_audio_decoder_ = ci;
+      last_audio_format_ = neteq_->GetDecoderFormat(ci->pltype);
+      RTC_DCHECK(last_audio_format_);
+      last_packet_sample_rate_hz_ = ci->plfreq;
+    }
+  }  // |crit_sect_| is released.
+
+  if (neteq_->InsertPacket(rtp_header.header, incoming_payload,
+                           receive_timestamp) < 0) {
+    RTC_LOG(LERROR) << "AcmReceiver::InsertPacket "
+                    << static_cast<int>(header->payloadType)
+                    << " Failed to insert packet";
+    return -1;
+  }
+  return 0;
+}
+
+int AcmReceiver::GetAudio(int desired_freq_hz,
+                          AudioFrame* audio_frame,
+                          bool* muted) {
+  RTC_DCHECK(muted);
+  // Accessing members, take the lock.
+  rtc::CritScope lock(&crit_sect_);
+
+  if (neteq_->GetAudio(audio_frame, muted) != NetEq::kOK) {
+    RTC_LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed.";
+    return -1;
+  }
+
+  const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz();
+
+  // Update if resampling is required.
+  const bool need_resampling =
+      (desired_freq_hz != -1) && (current_sample_rate_hz != desired_freq_hz);
+
+  if (need_resampling && !resampled_last_output_frame_) {
+    // Prime the resampler with the last frame.
+    int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
+    int samples_per_channel_int = resampler_.Resample10Msec(
+        last_audio_buffer_.get(), current_sample_rate_hz, desired_freq_hz,
+        audio_frame->num_channels_, AudioFrame::kMaxDataSizeSamples,
+        temp_output);
+    if (samples_per_channel_int < 0) {
+      RTC_LOG(LERROR) << "AcmReceiver::GetAudio - "
+                         "Resampling last_audio_buffer_ failed.";
+      return -1;
+    }
+  }
+
+  // TODO(henrik.lundin) Glitches in the output may appear if the output rate
+  // from NetEq changes. See WebRTC issue 3923.
+  if (need_resampling) {
+    // TODO(yujo): handle this more efficiently for muted frames.
+    int samples_per_channel_int = resampler_.Resample10Msec(
+        audio_frame->data(), current_sample_rate_hz, desired_freq_hz,
+        audio_frame->num_channels_, AudioFrame::kMaxDataSizeSamples,
+        audio_frame->mutable_data());
+    if (samples_per_channel_int < 0) {
+      RTC_LOG(LERROR)
+          << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
+      return -1;
+    }
+    audio_frame->samples_per_channel_ =
+        static_cast<size_t>(samples_per_channel_int);
+    audio_frame->sample_rate_hz_ = desired_freq_hz;
+    RTC_DCHECK_EQ(
+        audio_frame->sample_rate_hz_,
+        rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
+    resampled_last_output_frame_ = true;
+  } else {
+    resampled_last_output_frame_ = false;
+    // We might end up here ONLY if codec is changed.
+  }
+
+  // Store current audio in |last_audio_buffer_| for next time.
+  memcpy(last_audio_buffer_.get(), audio_frame->data(),
+         sizeof(int16_t) * audio_frame->samples_per_channel_ *
+             audio_frame->num_channels_);
+
+  call_stats_.DecodedByNetEq(audio_frame->speech_type_, *muted);
+  return 0;
+}
+
+void AcmReceiver::SetCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+  neteq_->SetCodecs(codecs);
+}
+
+int32_t AcmReceiver::AddCodec(int acm_codec_id,
+                              uint8_t payload_type,
+                              size_t channels,
+                              int /*sample_rate_hz*/,
+                              AudioDecoder* audio_decoder,
+                              const std::string& name) {
+  // TODO(kwiberg): This function has been ignoring the |sample_rate_hz|
+  // argument for a long time. Arguably, it should simply be removed.
+
+  const auto neteq_decoder = [acm_codec_id, channels]() -> NetEqDecoder {
+    if (acm_codec_id == -1)
+      return NetEqDecoder::kDecoderArbitrary;  // External decoder.
+    const rtc::Optional<RentACodec::CodecId> cid =
+        RentACodec::CodecIdFromIndex(acm_codec_id);
+    RTC_DCHECK(cid) << "Invalid codec index: " << acm_codec_id;
+    const rtc::Optional<NetEqDecoder> ned =
+        RentACodec::NetEqDecoderFromCodecId(*cid, channels);
+    RTC_DCHECK(ned) << "Invalid codec ID: " << static_cast<int>(*cid);
+    return *ned;
+  }();
+  const rtc::Optional<SdpAudioFormat> new_format =
+      NetEqDecoderToSdpAudioFormat(neteq_decoder);
+
+  rtc::CritScope lock(&crit_sect_);
+
+  const auto old_format = neteq_->GetDecoderFormat(payload_type);
+  if (old_format && new_format && *old_format == *new_format) {
+    // Re-registering the same codec. Do nothing and return.
+    return 0;
+  }
+
+  if (neteq_->RemovePayloadType(payload_type) != NetEq::kOK) {
+    RTC_LOG(LERROR) << "Cannot remove payload "
+                    << static_cast<int>(payload_type);
+    return -1;
+  }
+
+  int ret_val;
+  if (!audio_decoder) {
+    ret_val = neteq_->RegisterPayloadType(neteq_decoder, name, payload_type);
+  } else {
+    ret_val = neteq_->RegisterExternalDecoder(
+        audio_decoder, neteq_decoder, name, payload_type);
+  }
+  if (ret_val != NetEq::kOK) {
+    RTC_LOG(LERROR) << "AcmReceiver::AddCodec " << acm_codec_id
+                    << static_cast<int>(payload_type)
+                    << " channels: " << channels;
+    return -1;
+  }
+  return 0;
+}
+
+bool AcmReceiver::AddCodec(int rtp_payload_type,
+                           const SdpAudioFormat& audio_format) {
+  const auto old_format = neteq_->GetDecoderFormat(rtp_payload_type);
+  if (old_format && *old_format == audio_format) {
+    // Re-registering the same codec. Do nothing and return.
+    return true;
+  }
+
+  if (neteq_->RemovePayloadType(rtp_payload_type) != NetEq::kOK) {
+    RTC_LOG(LERROR)
+        << "AcmReceiver::AddCodec: Could not remove existing decoder"
+           " for payload type "
+        << rtp_payload_type;
+    return false;
+  }
+
+  const bool success =
+      neteq_->RegisterPayloadType(rtp_payload_type, audio_format);
+  if (!success) {
+    RTC_LOG(LERROR) << "AcmReceiver::AddCodec failed for payload type "
+                    << rtp_payload_type << ", decoder format " << audio_format;
+  }
+  return success;
+}
+
+void AcmReceiver::FlushBuffers() {
+  neteq_->FlushBuffers();
+}
+
+void AcmReceiver::RemoveAllCodecs() {
+  rtc::CritScope lock(&crit_sect_);
+  neteq_->RemoveAllPayloadTypes();
+  last_audio_decoder_ = rtc::nullopt;
+  last_audio_format_ = rtc::nullopt;
+  last_packet_sample_rate_hz_ = rtc::nullopt;
+}
+
+int AcmReceiver::RemoveCodec(uint8_t payload_type) {
+  rtc::CritScope lock(&crit_sect_);
+  if (neteq_->RemovePayloadType(payload_type) != NetEq::kOK) {
+    RTC_LOG(LERROR) << "AcmReceiver::RemoveCodec "
+                    << static_cast<int>(payload_type);
+    return -1;
+  }
+  if (last_audio_decoder_ && payload_type == last_audio_decoder_->pltype) {
+    last_audio_decoder_ = rtc::nullopt;
+    last_audio_format_ = rtc::nullopt;
+    last_packet_sample_rate_hz_ = rtc::nullopt;
+  }
+  return 0;
+}
+
+rtc::Optional<uint32_t> AcmReceiver::GetPlayoutTimestamp() {
+  return neteq_->GetPlayoutTimestamp();
+}
+
+int AcmReceiver::FilteredCurrentDelayMs() const {
+  return neteq_->FilteredCurrentDelayMs();
+}
+
+int AcmReceiver::TargetDelayMs() const {
+  return neteq_->TargetDelayMs();
+}
+
+int AcmReceiver::LastAudioCodec(CodecInst* codec) const {
+  rtc::CritScope lock(&crit_sect_);
+  if (!last_audio_decoder_) {
+    return -1;
+  }
+  *codec = *last_audio_decoder_;
+  return 0;
+}
+
+rtc::Optional<SdpAudioFormat> AcmReceiver::LastAudioFormat() const {
+  rtc::CritScope lock(&crit_sect_);
+  return last_audio_format_;
+}
+
+void AcmReceiver::GetNetworkStatistics(NetworkStatistics* acm_stat) {
+  NetEqNetworkStatistics neteq_stat;
+  // NetEq function always returns zero, so we don't check the return value.
+  neteq_->NetworkStatistics(&neteq_stat);
+
+  acm_stat->currentBufferSize = neteq_stat.current_buffer_size_ms;
+  acm_stat->preferredBufferSize = neteq_stat.preferred_buffer_size_ms;
+  acm_stat->jitterPeaksFound = neteq_stat.jitter_peaks_found ? true : false;
+  acm_stat->currentPacketLossRate = neteq_stat.packet_loss_rate;
+  acm_stat->currentExpandRate = neteq_stat.expand_rate;
+  acm_stat->currentSpeechExpandRate = neteq_stat.speech_expand_rate;
+  acm_stat->currentPreemptiveRate = neteq_stat.preemptive_rate;
+  acm_stat->currentAccelerateRate = neteq_stat.accelerate_rate;
+  acm_stat->currentSecondaryDecodedRate = neteq_stat.secondary_decoded_rate;
+  acm_stat->currentSecondaryDiscardedRate = neteq_stat.secondary_discarded_rate;
+  acm_stat->clockDriftPPM = neteq_stat.clockdrift_ppm;
+  acm_stat->addedSamples = neteq_stat.added_zero_samples;
+  acm_stat->meanWaitingTimeMs = neteq_stat.mean_waiting_time_ms;
+  acm_stat->medianWaitingTimeMs = neteq_stat.median_waiting_time_ms;
+  acm_stat->minWaitingTimeMs = neteq_stat.min_waiting_time_ms;
+  acm_stat->maxWaitingTimeMs = neteq_stat.max_waiting_time_ms;
+
+  NetEqLifetimeStatistics neteq_lifetime_stat = neteq_->GetLifetimeStatistics();
+  acm_stat->totalSamplesReceived = neteq_lifetime_stat.total_samples_received;
+  acm_stat->concealedSamples = neteq_lifetime_stat.concealed_samples;
+  acm_stat->concealmentEvents = neteq_lifetime_stat.concealment_events;
+  acm_stat->jitterBufferDelayMs = neteq_lifetime_stat.jitter_buffer_delay_ms;
+}
+
+int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
+                                      CodecInst* codec) const {
+  rtc::CritScope lock(&crit_sect_);
+  const rtc::Optional<CodecInst> ci = neteq_->GetDecoder(payload_type);
+  if (ci) {
+    *codec = *ci;
+    return 0;
+  } else {
+    RTC_LOG(LERROR) << "AcmReceiver::DecoderByPayloadType "
+                    << static_cast<int>(payload_type);
+    return -1;
+  }
+}
+
+int AcmReceiver::EnableNack(size_t max_nack_list_size) {
+  neteq_->EnableNack(max_nack_list_size);
+  return 0;
+}
+
+void AcmReceiver::DisableNack() {
+  neteq_->DisableNack();
+}
+
+std::vector<uint16_t> AcmReceiver::GetNackList(
+    int64_t round_trip_time_ms) const {
+  return neteq_->GetNackList(round_trip_time_ms);
+}
+
+void AcmReceiver::ResetInitialDelay() {
+  neteq_->SetMinimumDelay(0);
+  // TODO(turajs): Should NetEq Buffer be flushed?
+}
+
+const rtc::Optional<CodecInst> AcmReceiver::RtpHeaderToDecoder(
+    const RTPHeader& rtp_header,
+    uint8_t first_payload_byte) const {
+  const rtc::Optional<CodecInst> ci =
+      neteq_->GetDecoder(rtp_header.payloadType);
+  if (ci && STR_CASE_CMP(ci->plname, "red") == 0) {
+    // This is a RED packet. Get the payload of the audio codec.
+    return neteq_->GetDecoder(first_payload_byte & 0x7f);
+  } else {
+    return ci;
+  }
+}
+
+uint32_t AcmReceiver::NowInTimestamp(int decoder_sampling_rate) const {
+  // Down-cast the time to (32-6)-bit since we only care about
+  // the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
+  // We masked 6 most significant bits of 32-bit so there is no overflow in
+  // the conversion from milliseconds to timestamp.
+  const uint32_t now_in_ms = static_cast<uint32_t>(
+      clock_->TimeInMilliseconds() & 0x03ffffff);
+  return static_cast<uint32_t>(
+      (decoder_sampling_rate / 1000) * now_in_ms);
+}
+
+void AcmReceiver::GetDecodingCallStatistics(
+    AudioDecodingCallStats* stats) const {
+  rtc::CritScope lock(&crit_sect_);
+  *stats = call_stats_.GetDecodingStatistics();
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/acm_receiver.h b/modules/audio_coding/acm2/acm_receiver.h
new file mode 100644
index 0000000..5c6b36f
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_receiver.h
@@ -0,0 +1,298 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/optional.h"
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/audio_coding/acm2/call_statistics.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+struct CodecInst;
+class NetEq;
+
+namespace acm2 {
+
+class AcmReceiver {
+ public:
+  // Constructor of the class
+  explicit AcmReceiver(const AudioCodingModule::Config& config);
+
+  // Destructor of the class.
+  ~AcmReceiver();
+
+  //
+  // Inserts a payload with its associated RTP-header into NetEq.
+  //
+  // Input:
+  //   - rtp_header           : RTP header for the incoming payload containing
+  //                            information about payload type, sequence number,
+  //                            timestamp, SSRC and marker bit.
+  //   - incoming_payload     : Incoming audio payload.
+  //   - length_payload       : Length of incoming audio payload in bytes.
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int InsertPacket(const WebRtcRTPHeader& rtp_header,
+                   rtc::ArrayView<const uint8_t> incoming_payload);
+
+  //
+  // Asks NetEq for 10 milliseconds of decoded audio.
+  //
+  // Input:
+  //   -desired_freq_hz       : specifies the sampling rate [Hz] of the output
+  //                            audio. If set -1 indicates to resampling is
+  //                            is required and the audio returned at the
+  //                            sampling rate of the decoder.
+  //
+  // Output:
+  //   -audio_frame           : an audio frame were output data and
+  //                            associated parameters are written to.
+  //   -muted                 : if true, the sample data in audio_frame is not
+  //                            populated, and must be interpreted as all zero.
+  //
+  // Return value             : 0 if OK.
+  //                           -1 if NetEq returned an error.
+  //
+  int GetAudio(int desired_freq_hz, AudioFrame* audio_frame, bool* muted);
+
+  // Replace the current set of decoders with the specified set.
+  void SetCodecs(const std::map<int, SdpAudioFormat>& codecs);
+
+  //
+  // Adds a new codec to the NetEq codec database.
+  //
+  // Input:
+  //   - acm_codec_id        : ACM codec ID; -1 means external decoder.
+  //   - payload_type        : payload type.
+  //   - sample_rate_hz      : sample rate.
+  //   - audio_decoder       : pointer to a decoder object. If it's null, then
+  //                           NetEq will internally create a decoder object
+  //                           based on the value of |acm_codec_id| (which
+  //                           mustn't be -1). Otherwise, NetEq will use the
+  //                           given decoder for the given payload type. NetEq
+  //                           won't take ownership of the decoder; it's up to
+  //                           the caller to delete it when it's no longer
+  //                           needed.
+  //
+  //                           Providing an existing decoder object here is
+  //                           necessary for external decoders, but may also be
+  //                           used for built-in decoders if NetEq doesn't have
+  //                           all the info it needs to construct them properly
+  //                           (e.g. iSAC, where the decoder needs to be paired
+  //                           with an encoder).
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int AddCodec(int acm_codec_id,
+               uint8_t payload_type,
+               size_t channels,
+               int sample_rate_hz,
+               AudioDecoder* audio_decoder,
+               const std::string& name);
+
+  // Adds a new decoder to the NetEq codec database. Returns true iff
+  // successful.
+  bool AddCodec(int rtp_payload_type, const SdpAudioFormat& audio_format);
+
+  //
+  // Sets a minimum delay for packet buffer. The given delay is maintained,
+  // unless channel condition dictates a higher delay.
+  //
+  // Input:
+  //   - delay_ms             : minimum delay in milliseconds.
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int SetMinimumDelay(int delay_ms);
+
+  //
+  // Sets a maximum delay [ms] for the packet buffer. The target delay does not
+  // exceed the given value, even if channel condition requires so.
+  //
+  // Input:
+  //   - delay_ms             : maximum delay in milliseconds.
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int SetMaximumDelay(int delay_ms);
+
+  //
+  // Get least required delay computed based on channel conditions. Note that
+  // this is before applying any user-defined limits (specified by calling
+  // (SetMinimumDelay() and/or SetMaximumDelay()).
+  //
+  int LeastRequiredDelayMs() const;
+
+  //
+  // Resets the initial delay to zero.
+  //
+  void ResetInitialDelay();
+
+  // Returns the sample rate of the decoder associated with the last incoming
+  // packet. If no packet of a registered non-CNG codec has been received, the
+  // return value is empty. Also, if the decoder was unregistered since the last
+  // packet was inserted, the return value is empty.
+  rtc::Optional<int> last_packet_sample_rate_hz() const;
+
+  // Returns last_output_sample_rate_hz from the NetEq instance.
+  int last_output_sample_rate_hz() const;
+
+  //
+  // Get the current network statistics from NetEq.
+  //
+  // Output:
+  //   - statistics           : The current network statistics.
+  //
+  void GetNetworkStatistics(NetworkStatistics* statistics);
+
+  //
+  // Flushes the NetEq packet and speech buffers.
+  //
+  void FlushBuffers();
+
+  //
+  // Removes a payload-type from the NetEq codec database.
+  //
+  // Input:
+  //   - payload_type         : the payload-type to be removed.
+  //
+  // Return value             : 0 if OK.
+  //                           -1 if an error occurred.
+  //
+  int RemoveCodec(uint8_t payload_type);
+
+  //
+  // Remove all registered codecs.
+  //
+  void RemoveAllCodecs();
+
+  // Returns the RTP timestamp for the last sample delivered by GetAudio().
+  // The return value will be empty if no valid timestamp is available.
+  rtc::Optional<uint32_t> GetPlayoutTimestamp();
+
+  // Returns the current total delay from NetEq (packet buffer and sync buffer)
+  // in ms, with smoothing applied to even out short-time fluctuations due to
+  // jitter. The packet buffer part of the delay is not updated during DTX/CNG
+  // periods.
+  //
+  int FilteredCurrentDelayMs() const;
+
+  // Returns the current target delay for NetEq in ms.
+  //
+  int TargetDelayMs() const;
+
+  //
+  // Get the audio codec associated with the last non-CNG/non-DTMF received
+  // payload. If no non-CNG/non-DTMF packet is received -1 is returned,
+  // otherwise return 0.
+  //
+  int LastAudioCodec(CodecInst* codec) const;
+
+  rtc::Optional<SdpAudioFormat> LastAudioFormat() const;
+
+  //
+  // Get a decoder given its registered payload-type.
+  //
+  // Input:
+  //    -payload_type         : the payload-type of the codec to be retrieved.
+  //
+  // Output:
+  //    -codec                : codec associated with the given payload-type.
+  //
+  // Return value             : 0 if succeeded.
+  //                           -1 if failed, e.g. given payload-type is not
+  //                              registered.
+  //
+  int DecoderByPayloadType(uint8_t payload_type,
+                           CodecInst* codec) const;
+
+  //
+  // Enable NACK and set the maximum size of the NACK list. If NACK is already
+  // enabled then the maximum NACK list size is modified accordingly.
+  //
+  // Input:
+  //    -max_nack_list_size  : maximum NACK list size
+  //                           should be positive (none zero) and less than or
+  //                           equal to |Nack::kNackListSizeLimit|
+  // Return value
+  //                         : 0 if succeeded.
+  //                          -1 if failed
+  //
+  int EnableNack(size_t max_nack_list_size);
+
+  // Disable NACK.
+  void DisableNack();
+
+  //
+  // Get a list of packets to be retransmitted.
+  //
+  // Input:
+  //    -round_trip_time_ms : estimate of the round-trip-time (in milliseconds).
+  // Return value           : list of packets to be retransmitted.
+  //
+  std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
+
+  //
+  // Get statistics of calls to GetAudio().
+  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+
+ private:
+  struct Decoder {
+    int acm_codec_id;
+    uint8_t payload_type;
+    // This field is meaningful for codecs where both mono and
+    // stereo versions are registered under the same ID.
+    size_t channels;
+    int sample_rate_hz;
+  };
+
+  const rtc::Optional<CodecInst> RtpHeaderToDecoder(const RTPHeader& rtp_header,
+                                                    uint8_t first_payload_byte)
+      const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  uint32_t NowInTimestamp(int decoder_sampling_rate) const;
+
+  rtc::CriticalSection crit_sect_;
+  rtc::Optional<CodecInst> last_audio_decoder_ RTC_GUARDED_BY(crit_sect_);
+  rtc::Optional<SdpAudioFormat> last_audio_format_ RTC_GUARDED_BY(crit_sect_);
+  ACMResampler resampler_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<int16_t[]> last_audio_buffer_ RTC_GUARDED_BY(crit_sect_);
+  CallStatistics call_stats_ RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<NetEq> neteq_;  // NetEq is thread-safe; no lock needed.
+  const Clock* const clock_;
+  bool resampled_last_output_frame_ RTC_GUARDED_BY(crit_sect_);
+  rtc::Optional<int> last_packet_sample_rate_hz_ RTC_GUARDED_BY(crit_sect_);
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
diff --git a/modules/audio_coding/acm2/acm_receiver_unittest.cc b/modules/audio_coding/acm2/acm_receiver_unittest.cc
new file mode 100644
index 0000000..8d0b2f1
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_receiver_unittest.cc
@@ -0,0 +1,507 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_receiver.h"
+
+#include <algorithm>  // std::min
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+namespace acm2 {
+namespace {
+
+bool CodecsEqual(const CodecInst& codec_a, const CodecInst& codec_b) {
+    if (strcmp(codec_a.plname, codec_b.plname) != 0 ||
+        codec_a.plfreq != codec_b.plfreq ||
+        codec_a.pltype != codec_b.pltype ||
+        codec_b.channels != codec_a.channels)
+      return false;
+    return true;
+}
+
+struct CodecIdInst {
+  explicit CodecIdInst(RentACodec::CodecId codec_id) {
+    const auto codec_ix = RentACodec::CodecIndexFromId(codec_id);
+    EXPECT_TRUE(codec_ix);
+    id = *codec_ix;
+    const auto codec_inst = RentACodec::CodecInstById(codec_id);
+    EXPECT_TRUE(codec_inst);
+    inst = *codec_inst;
+  }
+  int id;
+  CodecInst inst;
+};
+
+}  // namespace
+
+class AcmReceiverTestOldApi : public AudioPacketizationCallback,
+                              public ::testing::Test {
+ protected:
+  AcmReceiverTestOldApi()
+      : timestamp_(0),
+        packet_sent_(false),
+        last_packet_send_timestamp_(timestamp_),
+        last_frame_type_(kEmptyFrame) {
+    config_.decoder_factory = CreateBuiltinAudioDecoderFactory();
+  }
+
+  ~AcmReceiverTestOldApi() {}
+
+  void SetUp() override {
+    acm_.reset(AudioCodingModule::Create(config_));
+    receiver_.reset(new AcmReceiver(config_));
+    ASSERT_TRUE(receiver_.get() != NULL);
+    ASSERT_TRUE(acm_.get() != NULL);
+    codecs_ = RentACodec::Database();
+
+    acm_->InitializeReceiver();
+    acm_->RegisterTransportCallback(this);
+
+    rtp_header_.header.sequenceNumber = 0;
+    rtp_header_.header.timestamp = 0;
+    rtp_header_.header.markerBit = false;
+    rtp_header_.header.ssrc = 0x12345678;  // Arbitrary.
+    rtp_header_.header.numCSRCs = 0;
+    rtp_header_.header.payloadType = 0;
+    rtp_header_.frameType = kAudioFrameSpeech;
+    rtp_header_.type.Audio.isCNG = false;
+  }
+
+  void TearDown() override {}
+
+  void InsertOnePacketOfSilence(int codec_id) {
+    CodecInst codec =
+        *RentACodec::CodecInstById(*RentACodec::CodecIdFromIndex(codec_id));
+    if (timestamp_ == 0) {  // This is the first time inserting audio.
+      ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+    } else {
+      auto current_codec = acm_->SendCodec();
+      ASSERT_TRUE(current_codec);
+      if (!CodecsEqual(codec, *current_codec))
+        ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+    }
+    AudioFrame frame;
+    // Frame setup according to the codec.
+    frame.sample_rate_hz_ = codec.plfreq;
+    frame.samples_per_channel_ = codec.plfreq / 100;  // 10 ms.
+    frame.num_channels_ = codec.channels;
+    frame.Mute();
+    packet_sent_ = false;
+    last_packet_send_timestamp_ = timestamp_;
+    while (!packet_sent_) {
+      frame.timestamp_ = timestamp_;
+      timestamp_ += rtc::checked_cast<uint32_t>(frame.samples_per_channel_);
+      ASSERT_GE(acm_->Add10MsData(frame), 0);
+    }
+  }
+
+  template <size_t N>
+  void AddSetOfCodecs(const RentACodec::CodecId(&ids)[N]) {
+    for (auto id : ids) {
+      const auto i = RentACodec::CodecIndexFromId(id);
+      ASSERT_TRUE(i);
+      ASSERT_EQ(0, receiver_->AddCodec(*i, codecs_[*i].pltype,
+                                       codecs_[*i].channels, codecs_[*i].plfreq,
+                                       nullptr, codecs_[*i].plname));
+    }
+  }
+
+  int SendData(FrameType frame_type,
+               uint8_t payload_type,
+               uint32_t timestamp,
+               const uint8_t* payload_data,
+               size_t payload_len_bytes,
+               const RTPFragmentationHeader* fragmentation) override {
+    if (frame_type == kEmptyFrame)
+      return 0;
+
+    rtp_header_.header.payloadType = payload_type;
+    rtp_header_.frameType = frame_type;
+    if (frame_type == kAudioFrameSpeech)
+      rtp_header_.type.Audio.isCNG = false;
+    else
+      rtp_header_.type.Audio.isCNG = true;
+    rtp_header_.header.timestamp = timestamp;
+
+    int ret_val = receiver_->InsertPacket(
+        rtp_header_,
+        rtc::ArrayView<const uint8_t>(payload_data, payload_len_bytes));
+    if (ret_val < 0) {
+      assert(false);
+      return -1;
+    }
+    rtp_header_.header.sequenceNumber++;
+    packet_sent_ = true;
+    last_frame_type_ = frame_type;
+    return 0;
+  }
+
+  AudioCodingModule::Config config_;
+  std::unique_ptr<AcmReceiver> receiver_;
+  rtc::ArrayView<const CodecInst> codecs_;
+  std::unique_ptr<AudioCodingModule> acm_;
+  WebRtcRTPHeader rtp_header_;
+  uint32_t timestamp_;
+  bool packet_sent_;  // Set when SendData is called reset when inserting audio.
+  uint32_t last_packet_send_timestamp_;
+  FrameType last_frame_type_;
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecGetCodec DISABLED_AddCodecGetCodec
+#else
+#define MAYBE_AddCodecGetCodec AddCodecGetCodec
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_AddCodecGetCodec) {
+  // Add codec.
+  for (size_t n = 0; n < codecs_.size(); ++n) {
+    if (n & 0x1) {  // Just add codecs with odd index.
+      EXPECT_EQ(
+          0, receiver_->AddCodec(rtc::checked_cast<int>(n), codecs_[n].pltype,
+                                 codecs_[n].channels, codecs_[n].plfreq, NULL,
+                                 codecs_[n].plname));
+    }
+  }
+  // Get codec and compare.
+  for (size_t n = 0; n < codecs_.size(); ++n) {
+    CodecInst my_codec;
+    if (n & 0x1) {
+      // Codecs with odd index should match the reference.
+      EXPECT_EQ(0, receiver_->DecoderByPayloadType(codecs_[n].pltype,
+                                                   &my_codec));
+      EXPECT_TRUE(CodecsEqual(codecs_[n], my_codec));
+    } else {
+      // Codecs with even index are not registered.
+      EXPECT_EQ(-1, receiver_->DecoderByPayloadType(codecs_[n].pltype,
+                                                    &my_codec));
+    }
+  }
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecChangePayloadType DISABLED_AddCodecChangePayloadType
+#else
+#define MAYBE_AddCodecChangePayloadType AddCodecChangePayloadType
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_AddCodecChangePayloadType) {
+  const CodecIdInst codec1(RentACodec::CodecId::kPCMA);
+  CodecInst codec2 = codec1.inst;
+  ++codec2.pltype;
+  CodecInst test_codec;
+
+  // Register the same codec with different payloads.
+  EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec1.inst.pltype,
+                                   codec1.inst.channels, codec1.inst.plfreq,
+                                   nullptr, codec1.inst.plname));
+  EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec2.pltype, codec2.channels,
+                                   codec2.plfreq, NULL, codec2.plname));
+
+  // Both payload types should exist.
+  EXPECT_EQ(0,
+            receiver_->DecoderByPayloadType(codec1.inst.pltype, &test_codec));
+  EXPECT_EQ(true, CodecsEqual(codec1.inst, test_codec));
+  EXPECT_EQ(0, receiver_->DecoderByPayloadType(codec2.pltype, &test_codec));
+  EXPECT_EQ(true, CodecsEqual(codec2, test_codec));
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecChangeCodecId DISABLED_AddCodecChangeCodecId
+#else
+#define MAYBE_AddCodecChangeCodecId AddCodecChangeCodecId
+#endif
+TEST_F(AcmReceiverTestOldApi, AddCodecChangeCodecId) {
+  const CodecIdInst codec1(RentACodec::CodecId::kPCMU);
+  CodecIdInst codec2(RentACodec::CodecId::kPCMA);
+  codec2.inst.pltype = codec1.inst.pltype;
+  CodecInst test_codec;
+
+  // Register the same payload type with different codec ID.
+  EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec1.inst.pltype,
+                                   codec1.inst.channels, codec1.inst.plfreq,
+                                   nullptr, codec1.inst.plname));
+  EXPECT_EQ(0, receiver_->AddCodec(codec2.id, codec2.inst.pltype,
+                                   codec2.inst.channels, codec2.inst.plfreq,
+                                   nullptr, codec2.inst.plname));
+
+  // Make sure that the last codec is used.
+  EXPECT_EQ(0,
+            receiver_->DecoderByPayloadType(codec2.inst.pltype, &test_codec));
+  EXPECT_EQ(true, CodecsEqual(codec2.inst, test_codec));
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecRemoveCodec DISABLED_AddCodecRemoveCodec
+#else
+#define MAYBE_AddCodecRemoveCodec AddCodecRemoveCodec
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_AddCodecRemoveCodec) {
+  const CodecIdInst codec(RentACodec::CodecId::kPCMA);
+  const int payload_type = codec.inst.pltype;
+  EXPECT_EQ(
+      0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
+                             codec.inst.plfreq, nullptr, codec.inst.plname));
+
+  // Remove non-existing codec should not fail. ACM1 legacy.
+  EXPECT_EQ(0, receiver_->RemoveCodec(payload_type + 1));
+
+  // Remove an existing codec.
+  EXPECT_EQ(0, receiver_->RemoveCodec(payload_type));
+
+  // Ask for the removed codec, must fail.
+  CodecInst ci;
+  EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &ci));
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SampleRate DISABLED_SampleRate
+#else
+#define MAYBE_SampleRate SampleRate
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_SampleRate) {
+  const RentACodec::CodecId kCodecId[] = {RentACodec::CodecId::kISAC,
+                                          RentACodec::CodecId::kISACSWB};
+  AddSetOfCodecs(kCodecId);
+
+  AudioFrame frame;
+  const int kOutSampleRateHz = 8000;  // Different than codec sample rate.
+  for (const auto codec_id : kCodecId) {
+    const CodecIdInst codec(codec_id);
+    const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
+    InsertOnePacketOfSilence(codec.id);
+    for (int k = 0; k < num_10ms_frames; ++k) {
+      bool muted;
+      EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame, &muted));
+    }
+    EXPECT_EQ(codec.inst.plfreq, receiver_->last_output_sample_rate_hz());
+  }
+}
+
+class AcmReceiverTestFaxModeOldApi : public AcmReceiverTestOldApi {
+ protected:
+  AcmReceiverTestFaxModeOldApi() {
+    config_.neteq_config.playout_mode = kPlayoutFax;
+  }
+
+  void RunVerifyAudioFrame(RentACodec::CodecId codec_id) {
+    // Make sure "fax mode" is enabled. This will avoid delay changes unless the
+    // packet-loss concealment is made. We do this in order to make the
+    // timestamp increments predictable; in normal mode, NetEq may decide to do
+    // accelerate or pre-emptive expand operations after some time, offsetting
+    // the timestamp.
+    EXPECT_EQ(kPlayoutFax, config_.neteq_config.playout_mode);
+
+    const RentACodec::CodecId kCodecId[] = {codec_id};
+    AddSetOfCodecs(kCodecId);
+
+    const CodecIdInst codec(codec_id);
+    const int output_sample_rate_hz = codec.inst.plfreq;
+    const size_t output_channels = codec.inst.channels;
+    const size_t samples_per_ms = rtc::checked_cast<size_t>(
+        rtc::CheckedDivExact(output_sample_rate_hz, 1000));
+    const int num_10ms_frames = rtc::CheckedDivExact(
+        codec.inst.pacsize, rtc::checked_cast<int>(10 * samples_per_ms));
+    const AudioFrame::VADActivity expected_vad_activity =
+        output_sample_rate_hz > 16000 ? AudioFrame::kVadActive
+                                      : AudioFrame::kVadPassive;
+
+    // Expect the first output timestamp to be 5*fs/8000 samples before the
+    // first inserted timestamp (because of NetEq's look-ahead). (This value is
+    // defined in Expand::overlap_length_.)
+    uint32_t expected_output_ts = last_packet_send_timestamp_ -
+        rtc::CheckedDivExact(5 * output_sample_rate_hz, 8000);
+
+    AudioFrame frame;
+    bool muted;
+    EXPECT_EQ(0, receiver_->GetAudio(output_sample_rate_hz, &frame, &muted));
+    // Expect timestamp = 0 before first packet is inserted.
+    EXPECT_EQ(0u, frame.timestamp_);
+    for (int i = 0; i < 5; ++i) {
+      InsertOnePacketOfSilence(codec.id);
+      for (int k = 0; k < num_10ms_frames; ++k) {
+        EXPECT_EQ(0,
+                  receiver_->GetAudio(output_sample_rate_hz, &frame, &muted));
+        EXPECT_EQ(expected_output_ts, frame.timestamp_);
+        expected_output_ts += rtc::checked_cast<uint32_t>(10 * samples_per_ms);
+        EXPECT_EQ(10 * samples_per_ms, frame.samples_per_channel_);
+        EXPECT_EQ(output_sample_rate_hz, frame.sample_rate_hz_);
+        EXPECT_EQ(output_channels, frame.num_channels_);
+        EXPECT_EQ(AudioFrame::kNormalSpeech, frame.speech_type_);
+        EXPECT_EQ(expected_vad_activity, frame.vad_activity_);
+        EXPECT_FALSE(muted);
+      }
+    }
+  }
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_VerifyAudioFramePCMU DISABLED_VerifyAudioFramePCMU
+#else
+#define MAYBE_VerifyAudioFramePCMU VerifyAudioFramePCMU
+#endif
+TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFramePCMU) {
+  RunVerifyAudioFrame(RentACodec::CodecId::kPCMU);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_VerifyAudioFrameISAC DISABLED_VerifyAudioFrameISAC
+#else
+#define MAYBE_VerifyAudioFrameISAC VerifyAudioFrameISAC
+#endif
+TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFrameISAC) {
+  RunVerifyAudioFrame(RentACodec::CodecId::kISAC);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_VerifyAudioFrameOpus DISABLED_VerifyAudioFrameOpus
+#else
+#define MAYBE_VerifyAudioFrameOpus VerifyAudioFrameOpus
+#endif
+TEST_F(AcmReceiverTestFaxModeOldApi, MAYBE_VerifyAudioFrameOpus) {
+  RunVerifyAudioFrame(RentACodec::CodecId::kOpus);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
+#else
+#define MAYBE_PostdecodingVad PostdecodingVad
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_PostdecodingVad) {
+  EXPECT_TRUE(config_.neteq_config.enable_post_decode_vad);
+  const CodecIdInst codec(RentACodec::CodecId::kPCM16Bwb);
+  ASSERT_EQ(
+      0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
+                             codec.inst.plfreq, nullptr, ""));
+  const int kNumPackets = 5;
+  const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
+  AudioFrame frame;
+  for (int n = 0; n < kNumPackets; ++n) {
+    InsertOnePacketOfSilence(codec.id);
+    for (int k = 0; k < num_10ms_frames; ++k) {
+      bool muted;
+      ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame, &muted));
+    }
+  }
+  EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
+}
+
+class AcmReceiverTestPostDecodeVadPassiveOldApi : public AcmReceiverTestOldApi {
+ protected:
+  AcmReceiverTestPostDecodeVadPassiveOldApi() {
+    config_.neteq_config.enable_post_decode_vad = false;
+  }
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
+#else
+#define MAYBE_PostdecodingVad PostdecodingVad
+#endif
+TEST_F(AcmReceiverTestPostDecodeVadPassiveOldApi, MAYBE_PostdecodingVad) {
+  EXPECT_FALSE(config_.neteq_config.enable_post_decode_vad);
+  const CodecIdInst codec(RentACodec::CodecId::kPCM16Bwb);
+  ASSERT_EQ(
+      0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
+                             codec.inst.plfreq, nullptr, ""));
+  const int kNumPackets = 5;
+  const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
+  AudioFrame frame;
+  for (int n = 0; n < kNumPackets; ++n) {
+    InsertOnePacketOfSilence(codec.id);
+    for (int k = 0; k < num_10ms_frames; ++k) {
+      bool muted;
+      ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame, &muted));
+    }
+  }
+  EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LastAudioCodec DISABLED_LastAudioCodec
+#else
+#define MAYBE_LastAudioCodec LastAudioCodec
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
+  const RentACodec::CodecId kCodecId[] = {
+      RentACodec::CodecId::kISAC, RentACodec::CodecId::kPCMA,
+      RentACodec::CodecId::kISACSWB, RentACodec::CodecId::kPCM16Bswb32kHz};
+  AddSetOfCodecs(kCodecId);
+
+  const RentACodec::CodecId kCngId[] = {
+      // Not including full-band.
+      RentACodec::CodecId::kCNNB, RentACodec::CodecId::kCNWB,
+      RentACodec::CodecId::kCNSWB};
+  AddSetOfCodecs(kCngId);
+
+  // Register CNG at sender side.
+  for (auto id : kCngId)
+    ASSERT_EQ(0, acm_->RegisterSendCodec(CodecIdInst(id).inst));
+
+  CodecInst codec;
+  // No audio payload is received.
+  EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
+
+  // Start with sending DTX.
+  ASSERT_EQ(0, acm_->SetVAD(true, true, VADVeryAggr));
+  packet_sent_ = false;
+  InsertOnePacketOfSilence(CodecIdInst(kCodecId[0]).id);  // Enough to test
+                                                          // with one codec.
+  ASSERT_TRUE(packet_sent_);
+  EXPECT_EQ(kAudioFrameCN, last_frame_type_);
+
+  // Has received, only, DTX. Last Audio codec is undefined.
+  EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
+  EXPECT_FALSE(receiver_->last_packet_sample_rate_hz());
+
+  for (auto id : kCodecId) {
+    const CodecIdInst c(id);
+
+    // Set DTX off to send audio payload.
+    acm_->SetVAD(false, false, VADAggr);
+    packet_sent_ = false;
+    InsertOnePacketOfSilence(c.id);
+
+    // Sanity check if Actually an audio payload received, and it should be
+    // of type "speech."
+    ASSERT_TRUE(packet_sent_);
+    ASSERT_EQ(kAudioFrameSpeech, last_frame_type_);
+    EXPECT_EQ(c.inst.plfreq, receiver_->last_packet_sample_rate_hz());
+
+    // Set VAD on to send DTX. Then check if the "Last Audio codec" returns
+    // the expected codec.
+    acm_->SetVAD(true, true, VADAggr);
+
+    // Do as many encoding until a DTX is sent.
+    while (last_frame_type_ != kAudioFrameCN) {
+      packet_sent_ = false;
+      InsertOnePacketOfSilence(c.id);
+      ASSERT_TRUE(packet_sent_);
+    }
+    EXPECT_EQ(c.inst.plfreq, receiver_->last_packet_sample_rate_hz());
+    EXPECT_EQ(0, receiver_->LastAudioCodec(&codec));
+    EXPECT_TRUE(CodecsEqual(c.inst, codec));
+  }
+}
+#endif
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/acm_resampler.cc b/modules/audio_coding/acm2/acm_resampler.cc
new file mode 100644
index 0000000..b97ced2
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_resampler.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_resampler.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "common_audio/resampler/include/resampler.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace acm2 {
+
+ACMResampler::ACMResampler() {
+}
+
+ACMResampler::~ACMResampler() {
+}
+
+int ACMResampler::Resample10Msec(const int16_t* in_audio,
+                                 int in_freq_hz,
+                                 int out_freq_hz,
+                                 size_t num_audio_channels,
+                                 size_t out_capacity_samples,
+                                 int16_t* out_audio) {
+  size_t in_length = in_freq_hz * num_audio_channels / 100;
+  if (in_freq_hz == out_freq_hz) {
+    if (out_capacity_samples < in_length) {
+      assert(false);
+      return -1;
+    }
+    memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
+    return static_cast<int>(in_length / num_audio_channels);
+  }
+
+  if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
+                                    num_audio_channels) != 0) {
+    RTC_LOG(LS_ERROR) << "InitializeIfNeeded(" << in_freq_hz << ", "
+                      << out_freq_hz << ", " << num_audio_channels
+                      << ") failed.";
+    return -1;
+  }
+
+  int out_length =
+      resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
+  if (out_length == -1) {
+    RTC_LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", "
+                      << out_audio << ", " << out_capacity_samples
+                      << ") failed.";
+    return -1;
+  }
+
+  return static_cast<int>(out_length / num_audio_channels);
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/acm_resampler.h b/modules/audio_coding/acm2/acm_resampler.h
new file mode 100644
index 0000000..d7d7bcf
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_resampler.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+
+#include "common_audio/resampler/include/push_resampler.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace acm2 {
+
+class ACMResampler {
+ public:
+  ACMResampler();
+  ~ACMResampler();
+
+  int Resample10Msec(const int16_t* in_audio,
+                     int in_freq_hz,
+                     int out_freq_hz,
+                     size_t num_audio_channels,
+                     size_t out_capacity_samples,
+                     int16_t* out_audio);
+
+ private:
+  PushResampler<int16_t> resampler_;
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
diff --git a/modules/audio_coding/acm2/acm_send_test.cc b/modules/audio_coding/acm2/acm_send_test.cc
new file mode 100644
index 0000000..307c906
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_send_test.cc
@@ -0,0 +1,160 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/acm_send_test.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
+                                     int source_rate_hz,
+                                     int test_duration_ms)
+    : clock_(0),
+      acm_(webrtc::AudioCodingModule::Create(&clock_)),
+      audio_source_(audio_source),
+      source_rate_hz_(source_rate_hz),
+      input_block_size_samples_(
+          static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
+      codec_registered_(false),
+      test_duration_ms_(test_duration_ms),
+      frame_type_(kAudioFrameSpeech),
+      payload_type_(0),
+      timestamp_(0),
+      sequence_number_(0) {
+  input_frame_.sample_rate_hz_ = source_rate_hz_;
+  input_frame_.num_channels_ = 1;
+  input_frame_.samples_per_channel_ = input_block_size_samples_;
+  assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+         AudioFrame::kMaxDataSizeSamples);
+  acm_->RegisterTransportCallback(this);
+}
+
+AcmSendTestOldApi::~AcmSendTestOldApi() = default;
+
+bool AcmSendTestOldApi::RegisterCodec(const char* payload_name,
+                                      int sampling_freq_hz,
+                                      int channels,
+                                      int payload_type,
+                                      int frame_size_samples) {
+  CodecInst codec;
+  RTC_CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec,
+                                           sampling_freq_hz, channels));
+  codec.pltype = payload_type;
+  codec.pacsize = frame_size_samples;
+  codec_registered_ = (acm_->RegisterSendCodec(codec) == 0);
+  input_frame_.num_channels_ = channels;
+  assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+         AudioFrame::kMaxDataSizeSamples);
+  return codec_registered_;
+}
+
+bool AcmSendTestOldApi::RegisterExternalCodec(
+    AudioEncoder* external_speech_encoder) {
+  acm_->RegisterExternalSendCodec(external_speech_encoder);
+  input_frame_.num_channels_ = external_speech_encoder->NumChannels();
+  assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+         AudioFrame::kMaxDataSizeSamples);
+  return codec_registered_ = true;
+}
+
+std::unique_ptr<Packet> AcmSendTestOldApi::NextPacket() {
+  assert(codec_registered_);
+  if (filter_.test(static_cast<size_t>(payload_type_))) {
+    // This payload type should be filtered out. Since the payload type is the
+    // same throughout the whole test run, no packet at all will be delivered.
+    // We can just as well signal that the test is over by returning NULL.
+    return nullptr;
+  }
+  // Insert audio and process until one packet is produced.
+  while (clock_.TimeInMilliseconds() < test_duration_ms_) {
+    clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
+    RTC_CHECK(audio_source_->Read(input_block_size_samples_,
+                                  input_frame_.mutable_data()));
+    if (input_frame_.num_channels_ > 1) {
+      InputAudioFile::DuplicateInterleaved(input_frame_.data(),
+                                           input_block_size_samples_,
+                                           input_frame_.num_channels_,
+                                           input_frame_.mutable_data());
+    }
+    data_to_send_ = false;
+    RTC_CHECK_GE(acm_->Add10MsData(input_frame_), 0);
+    input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
+    if (data_to_send_) {
+      // Encoded packet received.
+      return CreatePacket();
+    }
+  }
+  // Test ended.
+  return nullptr;
+}
+
+// This method receives the callback from ACM when a new packet is produced.
+int32_t AcmSendTestOldApi::SendData(
+    FrameType frame_type,
+    uint8_t payload_type,
+    uint32_t timestamp,
+    const uint8_t* payload_data,
+    size_t payload_len_bytes,
+    const RTPFragmentationHeader* fragmentation) {
+  // Store the packet locally.
+  frame_type_ = frame_type;
+  payload_type_ = payload_type;
+  timestamp_ = timestamp;
+  last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+  assert(last_payload_vec_.size() == payload_len_bytes);
+  data_to_send_ = true;
+  return 0;
+}
+
+std::unique_ptr<Packet> AcmSendTestOldApi::CreatePacket() {
+  const size_t kRtpHeaderSize = 12;
+  size_t allocated_bytes = last_payload_vec_.size() + kRtpHeaderSize;
+  uint8_t* packet_memory = new uint8_t[allocated_bytes];
+  // Populate the header bytes.
+  packet_memory[0] = 0x80;
+  packet_memory[1] = static_cast<uint8_t>(payload_type_);
+  packet_memory[2] = (sequence_number_ >> 8) & 0xFF;
+  packet_memory[3] = (sequence_number_) & 0xFF;
+  packet_memory[4] = (timestamp_ >> 24) & 0xFF;
+  packet_memory[5] = (timestamp_ >> 16) & 0xFF;
+  packet_memory[6] = (timestamp_ >> 8) & 0xFF;
+  packet_memory[7] = timestamp_ & 0xFF;
+  // Set SSRC to 0x12345678.
+  packet_memory[8] = 0x12;
+  packet_memory[9] = 0x34;
+  packet_memory[10] = 0x56;
+  packet_memory[11] = 0x78;
+
+  ++sequence_number_;
+
+  // Copy the payload data.
+  memcpy(packet_memory + kRtpHeaderSize,
+         &last_payload_vec_[0],
+         last_payload_vec_.size());
+  std::unique_ptr<Packet> packet(
+      new Packet(packet_memory, allocated_bytes, clock_.TimeInMilliseconds()));
+  RTC_DCHECK(packet);
+  RTC_DCHECK(packet->valid_header());
+  return packet;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/acm_send_test.h b/modules/audio_coding/acm2/acm_send_test.h
new file mode 100644
index 0000000..6aea0f1
--- /dev/null
+++ b/modules/audio_coding/acm2/acm_send_test.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_H_
+#define MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "rtc_base/constructormagic.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+class AudioEncoder;
+
+namespace test {
+class InputAudioFile;
+class Packet;
+
+class AcmSendTestOldApi : public AudioPacketizationCallback,
+                          public PacketSource {
+ public:
+  AcmSendTestOldApi(InputAudioFile* audio_source,
+                    int source_rate_hz,
+                    int test_duration_ms);
+  ~AcmSendTestOldApi() override;
+
+  // Registers the send codec. Returns true on success, false otherwise.
+  bool RegisterCodec(const char* payload_name,
+                     int sampling_freq_hz,
+                     int channels,
+                     int payload_type,
+                     int frame_size_samples);
+
+  // Registers an external send codec. Returns true on success, false otherwise.
+  bool RegisterExternalCodec(AudioEncoder* external_speech_encoder);
+
+  // Inherited from PacketSource.
+  std::unique_ptr<Packet> NextPacket() override;
+
+  // Inherited from AudioPacketizationCallback.
+  int32_t SendData(FrameType frame_type,
+                   uint8_t payload_type,
+                   uint32_t timestamp,
+                   const uint8_t* payload_data,
+                   size_t payload_len_bytes,
+                   const RTPFragmentationHeader* fragmentation) override;
+
+  AudioCodingModule* acm() { return acm_.get(); }
+
+ private:
+  static const int kBlockSizeMs = 10;
+
+  // Creates a Packet object from the last packet produced by ACM (and received
+  // through the SendData method as a callback).
+  std::unique_ptr<Packet> CreatePacket();
+
+  SimulatedClock clock_;
+  std::unique_ptr<AudioCodingModule> acm_;
+  InputAudioFile* audio_source_;
+  int source_rate_hz_;
+  const size_t input_block_size_samples_;
+  AudioFrame input_frame_;
+  bool codec_registered_;
+  int test_duration_ms_;
+  // The following member variables are set whenever SendData() is called.
+  FrameType frame_type_;
+  int payload_type_;
+  uint32_t timestamp_;
+  uint16_t sequence_number_;
+  std::vector<uint8_t> last_payload_vec_;
+  bool data_to_send_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AcmSendTestOldApi);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_H_
diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc
new file mode 100644
index 0000000..53b9177
--- /dev/null
+++ b/modules/audio_coding/acm2/audio_coding_module.cc
@@ -0,0 +1,1343 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/include/audio_coding_module.h"
+
+#include <algorithm>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/acm2/acm_receiver.h"
+#include "modules/audio_coding/acm2/acm_resampler.h"
+#include "modules/audio_coding/acm2/codec_manager.h"
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+struct EncoderFactory {
+  AudioEncoder* external_speech_encoder = nullptr;
+  acm2::CodecManager codec_manager;
+  acm2::RentACodec rent_a_codec;
+};
+
+class AudioCodingModuleImpl final : public AudioCodingModule {
+ public:
+  explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
+  ~AudioCodingModuleImpl() override;
+
+  /////////////////////////////////////////
+  //   Sender
+  //
+
+  // Can be called multiple times for Codec, CNG, RED.
+  int RegisterSendCodec(const CodecInst& send_codec) override;
+
+  void RegisterExternalSendCodec(
+      AudioEncoder* external_speech_encoder) override;
+
+  void ModifyEncoder(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
+                         modifier) override;
+
+  void QueryEncoder(
+      rtc::FunctionView<void(const AudioEncoder*)> query) override;
+
+  // Get current send codec.
+  rtc::Optional<CodecInst> SendCodec() const override;
+
+  // Get current send frequency.
+  int SendFrequency() const override;
+
+  // Sets the bitrate to the specified value in bits/sec. In case the codec does
+  // not support the requested value it will choose an appropriate value
+  // instead.
+  void SetBitRate(int bitrate_bps) override;
+
+  // Register a transport callback which will be
+  // called to deliver the encoded buffers.
+  int RegisterTransportCallback(AudioPacketizationCallback* transport) override;
+
+  // Add 10 ms of raw (PCM) audio data to the encoder.
+  int Add10MsData(const AudioFrame& audio_frame) override;
+
+  /////////////////////////////////////////
+  // (RED) Redundant Coding
+  //
+
+  // Configure RED status i.e. on/off.
+  int SetREDStatus(bool enable_red) override;
+
+  // Get RED status.
+  bool REDStatus() const override;
+
+  /////////////////////////////////////////
+  // (FEC) Forward Error Correction (codec internal)
+  //
+
+  // Configure FEC status i.e. on/off.
+  int SetCodecFEC(bool enabled_codec_fec) override;
+
+  // Get FEC status.
+  bool CodecFEC() const override;
+
+  // Set target packet loss rate
+  int SetPacketLossRate(int loss_rate) override;
+
+  /////////////////////////////////////////
+  //   (VAD) Voice Activity Detection
+  //   and
+  //   (CNG) Comfort Noise Generation
+  //
+
+  int SetVAD(bool enable_dtx = true,
+             bool enable_vad = false,
+             ACMVADMode mode = VADNormal) override;
+
+  int VAD(bool* dtx_enabled,
+          bool* vad_enabled,
+          ACMVADMode* mode) const override;
+
+  int RegisterVADCallback(ACMVADCallback* vad_callback) override;
+
+  /////////////////////////////////////////
+  //   Receiver
+  //
+
+  // Initialize receiver, resets codec database etc.
+  int InitializeReceiver() override;
+
+  // Get current receive frequency.
+  int ReceiveFrequency() const override;
+
+  // Get current playout frequency.
+  int PlayoutFrequency() const override;
+
+  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) override;
+
+  bool RegisterReceiveCodec(int rtp_payload_type,
+                            const SdpAudioFormat& audio_format) override;
+
+  int RegisterReceiveCodec(const CodecInst& receive_codec) override;
+  int RegisterReceiveCodec(
+      const CodecInst& receive_codec,
+      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) override;
+
+  int RegisterExternalReceiveCodec(int rtp_payload_type,
+                                   AudioDecoder* external_decoder,
+                                   int sample_rate_hz,
+                                   int num_channels,
+                                   const std::string& name) override;
+
+  // Get current received codec.
+  int ReceiveCodec(CodecInst* current_codec) const override;
+
+  rtc::Optional<SdpAudioFormat> ReceiveFormat() const override;
+
+  // Incoming packet from network parsed and ready for decode.
+  int IncomingPacket(const uint8_t* incoming_payload,
+                     const size_t payload_length,
+                     const WebRtcRTPHeader& rtp_info) override;
+
+  // Minimum playout delay.
+  int SetMinimumPlayoutDelay(int time_ms) override;
+
+  // Maximum playout delay.
+  int SetMaximumPlayoutDelay(int time_ms) override;
+
+  // Smallest latency NetEq will maintain.
+  int LeastRequiredDelayMs() const override;
+
+  RTC_DEPRECATED int32_t PlayoutTimestamp(uint32_t* timestamp) override;
+
+  rtc::Optional<uint32_t> PlayoutTimestamp() override;
+
+  int FilteredCurrentDelayMs() const override;
+
+  int TargetDelayMs() const override;
+
+  // Get 10 milliseconds of raw audio data to play out, and
+  // automatic resample to the requested frequency if > 0.
+  int PlayoutData10Ms(int desired_freq_hz,
+                      AudioFrame* audio_frame,
+                      bool* muted) override;
+  int PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) override;
+
+  /////////////////////////////////////////
+  //   Statistics
+  //
+
+  int GetNetworkStatistics(NetworkStatistics* statistics) override;
+
+  int SetOpusApplication(OpusApplicationMode application) override;
+
+  // If current send codec is Opus, informs it about the maximum playback rate
+  // the receiver will render.
+  int SetOpusMaxPlaybackRate(int frequency_hz) override;
+
+  int EnableOpusDtx() override;
+
+  int DisableOpusDtx() override;
+
+  int UnregisterReceiveCodec(uint8_t payload_type) override;
+
+  int EnableNack(size_t max_nack_list_size) override;
+
+  void DisableNack() override;
+
+  std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const override;
+
+  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const override;
+
+  ANAStats GetANAStats() const override;
+
+ private:
+  struct InputData {
+    uint32_t input_timestamp;
+    const int16_t* audio;
+    size_t length_per_channel;
+    size_t audio_channel;
+    // If a re-mix is required (up or down), this buffer will store a re-mixed
+    // version of the input.
+    int16_t buffer[WEBRTC_10MS_PCM_AUDIO];
+  };
+
+  // This member class writes values to the named UMA histogram, but only if
+  // the value has changed since the last time (and always for the first call).
+  class ChangeLogger {
+   public:
+    explicit ChangeLogger(const std::string& histogram_name)
+        : histogram_name_(histogram_name) {}
+    // Logs the new value if it is different from the last logged value, or if
+    // this is the first call.
+    void MaybeLog(int value);
+
+   private:
+    int last_value_ = 0;
+    int first_time_ = true;
+    const std::string histogram_name_;
+  };
+
+  int RegisterReceiveCodecUnlocked(
+      const CodecInst& codec,
+      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+  int Encode(const InputData& input_data)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  int InitializeReceiverSafe() RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  bool HaveValidEncoder(const char* caller_name) const
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  // Preprocessing of input audio, including resampling and down-mixing if
+  // required, before pushing audio into encoder's buffer.
+  //
+  // in_frame: input audio-frame
+  // ptr_out: pointer to output audio_frame. If no preprocessing is required
+  //          |ptr_out| will be pointing to |in_frame|, otherwise pointing to
+  //          |preprocess_frame_|.
+  //
+  // Return value:
+  //   -1: if encountering an error.
+  //    0: otherwise.
+  int PreprocessToAddData(const AudioFrame& in_frame,
+                          const AudioFrame** ptr_out)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  // Change required states after starting to receive the codec corresponding
+  // to |index|.
+  int UpdateUponReceivingCodec(int index);
+
+  rtc::CriticalSection acm_crit_sect_;
+  rtc::Buffer encode_buffer_ RTC_GUARDED_BY(acm_crit_sect_);
+  uint32_t expected_codec_ts_ RTC_GUARDED_BY(acm_crit_sect_);
+  uint32_t expected_in_ts_ RTC_GUARDED_BY(acm_crit_sect_);
+  acm2::ACMResampler resampler_ RTC_GUARDED_BY(acm_crit_sect_);
+  acm2::AcmReceiver receiver_;  // AcmReceiver has it's own internal lock.
+  ChangeLogger bitrate_logger_ RTC_GUARDED_BY(acm_crit_sect_);
+
+  std::unique_ptr<EncoderFactory> encoder_factory_
+      RTC_GUARDED_BY(acm_crit_sect_);
+
+  // Current encoder stack, either obtained from
+  // encoder_factory_->rent_a_codec.RentEncoderStack or provided by a call to
+  // RegisterEncoder.
+  std::unique_ptr<AudioEncoder> encoder_stack_ RTC_GUARDED_BY(acm_crit_sect_);
+
+  std::unique_ptr<AudioDecoder> isac_decoder_16k_
+      RTC_GUARDED_BY(acm_crit_sect_);
+  std::unique_ptr<AudioDecoder> isac_decoder_32k_
+      RTC_GUARDED_BY(acm_crit_sect_);
+
+  // This is to keep track of CN instances where we can send DTMFs.
+  uint8_t previous_pltype_ RTC_GUARDED_BY(acm_crit_sect_);
+
+  bool receiver_initialized_ RTC_GUARDED_BY(acm_crit_sect_);
+
+  AudioFrame preprocess_frame_ RTC_GUARDED_BY(acm_crit_sect_);
+  bool first_10ms_data_ RTC_GUARDED_BY(acm_crit_sect_);
+
+  bool first_frame_ RTC_GUARDED_BY(acm_crit_sect_);
+  uint32_t last_timestamp_ RTC_GUARDED_BY(acm_crit_sect_);
+  uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(acm_crit_sect_);
+
+  rtc::CriticalSection callback_crit_sect_;
+  AudioPacketizationCallback* packetization_callback_
+      RTC_GUARDED_BY(callback_crit_sect_);
+  ACMVADCallback* vad_callback_ RTC_GUARDED_BY(callback_crit_sect_);
+
+  int codec_histogram_bins_log_[static_cast<size_t>(
+      AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes)];
+  int number_of_consecutive_empty_packets_;
+};
+
+// Adds a codec usage sample to the histogram.
+void UpdateCodecTypeHistogram(size_t codec_type) {
+  RTC_HISTOGRAM_ENUMERATION(
+      "WebRTC.Audio.Encoder.CodecType", static_cast<int>(codec_type),
+      static_cast<int>(
+          webrtc::AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes));
+}
+
+// Stereo-to-mono can be used as in-place.
+int DownMix(const AudioFrame& frame,
+            size_t length_out_buff,
+            int16_t* out_buff) {
+  RTC_DCHECK_EQ(frame.num_channels_, 2);
+  RTC_DCHECK_GE(length_out_buff, frame.samples_per_channel_);
+
+  if (!frame.muted()) {
+    const int16_t* frame_data = frame.data();
+    for (size_t n = 0; n < frame.samples_per_channel_; ++n) {
+      out_buff[n] = static_cast<int16_t>(
+          (static_cast<int32_t>(frame_data[2 * n]) +
+           static_cast<int32_t>(frame_data[2 * n + 1])) >> 1);
+    }
+  } else {
+    std::fill(out_buff, out_buff + frame.samples_per_channel_, 0);
+  }
+  return 0;
+}
+
+// Mono-to-stereo can be used as in-place.
+int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
+  RTC_DCHECK_EQ(frame.num_channels_, 1);
+  RTC_DCHECK_GE(length_out_buff, 2 * frame.samples_per_channel_);
+
+  if (!frame.muted()) {
+    const int16_t* frame_data = frame.data();
+    for (size_t n = frame.samples_per_channel_; n != 0; --n) {
+      size_t i = n - 1;
+      int16_t sample = frame_data[i];
+      out_buff[2 * i + 1] = sample;
+      out_buff[2 * i] = sample;
+    }
+  } else {
+    std::fill(out_buff, out_buff + frame.samples_per_channel_ * 2, 0);
+  }
+  return 0;
+}
+
+void ConvertEncodedInfoToFragmentationHeader(
+    const AudioEncoder::EncodedInfo& info,
+    RTPFragmentationHeader* frag) {
+  if (info.redundant.empty()) {
+    frag->fragmentationVectorSize = 0;
+    return;
+  }
+
+  frag->VerifyAndAllocateFragmentationHeader(
+      static_cast<uint16_t>(info.redundant.size()));
+  frag->fragmentationVectorSize = static_cast<uint16_t>(info.redundant.size());
+  size_t offset = 0;
+  for (size_t i = 0; i < info.redundant.size(); ++i) {
+    frag->fragmentationOffset[i] = offset;
+    offset += info.redundant[i].encoded_bytes;
+    frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
+    frag->fragmentationTimeDiff[i] = rtc::dchecked_cast<uint16_t>(
+        info.encoded_timestamp - info.redundant[i].encoded_timestamp);
+    frag->fragmentationPlType[i] = info.redundant[i].payload_type;
+  }
+}
+
+// Wraps a raw AudioEncoder pointer. The idea is that you can put one of these
+// in a unique_ptr, to protect the contained raw pointer from being deleted
+// when the unique_ptr expires. (This is of course a bad idea in general, but
+// backwards compatibility.)
+class RawAudioEncoderWrapper final : public AudioEncoder {
+ public:
+  RawAudioEncoderWrapper(AudioEncoder* enc) : enc_(enc) {}
+  int SampleRateHz() const override { return enc_->SampleRateHz(); }
+  size_t NumChannels() const override { return enc_->NumChannels(); }
+  int RtpTimestampRateHz() const override { return enc_->RtpTimestampRateHz(); }
+  size_t Num10MsFramesInNextPacket() const override {
+    return enc_->Num10MsFramesInNextPacket();
+  }
+  size_t Max10MsFramesInAPacket() const override {
+    return enc_->Max10MsFramesInAPacket();
+  }
+  int GetTargetBitrate() const override { return enc_->GetTargetBitrate(); }
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override {
+    return enc_->Encode(rtp_timestamp, audio, encoded);
+  }
+  void Reset() override { return enc_->Reset(); }
+  bool SetFec(bool enable) override { return enc_->SetFec(enable); }
+  bool SetDtx(bool enable) override { return enc_->SetDtx(enable); }
+  bool SetApplication(Application application) override {
+    return enc_->SetApplication(application);
+  }
+  void SetMaxPlaybackRate(int frequency_hz) override {
+    return enc_->SetMaxPlaybackRate(frequency_hz);
+  }
+
+ private:
+  AudioEncoder* enc_;
+};
+
+// Return false on error.
+bool CreateSpeechEncoderIfNecessary(EncoderFactory* ef) {
+  auto* sp = ef->codec_manager.GetStackParams();
+  if (sp->speech_encoder) {
+    // Do nothing; we already have a speech encoder.
+  } else if (ef->codec_manager.GetCodecInst()) {
+    RTC_DCHECK(!ef->external_speech_encoder);
+    // We have no speech encoder, but we have a specification for making one.
+    std::unique_ptr<AudioEncoder> enc =
+        ef->rent_a_codec.RentEncoder(*ef->codec_manager.GetCodecInst());
+    if (!enc)
+      return false;  // Encoder spec was bad.
+    sp->speech_encoder = std::move(enc);
+  } else if (ef->external_speech_encoder) {
+    RTC_DCHECK(!ef->codec_manager.GetCodecInst());
+    // We have an external speech encoder.
+    sp->speech_encoder = std::unique_ptr<AudioEncoder>(
+        new RawAudioEncoderWrapper(ef->external_speech_encoder));
+  }
+  return true;
+}
+
+void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
+  if (value != last_value_ || first_time_) {
+    first_time_ = false;
+    last_value_ = value;
+    RTC_HISTOGRAM_COUNTS_SPARSE_100(histogram_name_, value);
+  }
+}
+
+AudioCodingModuleImpl::AudioCodingModuleImpl(
+    const AudioCodingModule::Config& config)
+    : expected_codec_ts_(0xD87F3F9F),
+      expected_in_ts_(0xD87F3F9F),
+      receiver_(config),
+      bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
+      encoder_factory_(new EncoderFactory),
+      encoder_stack_(nullptr),
+      previous_pltype_(255),
+      receiver_initialized_(false),
+      first_10ms_data_(false),
+      first_frame_(true),
+      packetization_callback_(NULL),
+      vad_callback_(NULL),
+      codec_histogram_bins_log_(),
+      number_of_consecutive_empty_packets_(0) {
+  if (InitializeReceiverSafe() < 0) {
+    RTC_LOG(LS_ERROR) << "Cannot initialize receiver";
+  }
+  RTC_LOG(LS_INFO) << "Created";
+}
+
+AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
+
+int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
+  AudioEncoder::EncodedInfo encoded_info;
+  uint8_t previous_pltype;
+
+  // Check if there is an encoder before.
+  if (!HaveValidEncoder("Process"))
+    return -1;
+
+  if(!first_frame_) {
+    RTC_DCHECK(IsNewerTimestamp(input_data.input_timestamp, last_timestamp_))
+        << "Time should not move backwards";
+  }
+
+  // Scale the timestamp to the codec's RTP timestamp rate.
+  uint32_t rtp_timestamp =
+      first_frame_ ? input_data.input_timestamp
+                   : last_rtp_timestamp_ +
+                         rtc::CheckedDivExact(
+                             input_data.input_timestamp - last_timestamp_,
+                             static_cast<uint32_t>(rtc::CheckedDivExact(
+                                 encoder_stack_->SampleRateHz(),
+                                 encoder_stack_->RtpTimestampRateHz())));
+  last_timestamp_ = input_data.input_timestamp;
+  last_rtp_timestamp_ = rtp_timestamp;
+  first_frame_ = false;
+
+  // Clear the buffer before reuse - encoded data will get appended.
+  encode_buffer_.Clear();
+  encoded_info = encoder_stack_->Encode(
+      rtp_timestamp, rtc::ArrayView<const int16_t>(
+                         input_data.audio, input_data.audio_channel *
+                                               input_data.length_per_channel),
+      &encode_buffer_);
+
+  bitrate_logger_.MaybeLog(encoder_stack_->GetTargetBitrate() / 1000);
+  if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
+    // Not enough data.
+    return 0;
+  }
+  previous_pltype = previous_pltype_;  // Read it while we have the critsect.
+
+  // Log codec type to histogram once every 500 packets.
+  if (encoded_info.encoded_bytes == 0) {
+    ++number_of_consecutive_empty_packets_;
+  } else {
+    size_t codec_type = static_cast<size_t>(encoded_info.encoder_type);
+    codec_histogram_bins_log_[codec_type] +=
+        number_of_consecutive_empty_packets_ + 1;
+    number_of_consecutive_empty_packets_ = 0;
+    if (codec_histogram_bins_log_[codec_type] >= 500) {
+      codec_histogram_bins_log_[codec_type] -= 500;
+      UpdateCodecTypeHistogram(codec_type);
+    }
+  }
+
+  RTPFragmentationHeader my_fragmentation;
+  ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
+  FrameType frame_type;
+  if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
+    frame_type = kEmptyFrame;
+    encoded_info.payload_type = previous_pltype;
+  } else {
+    RTC_DCHECK_GT(encode_buffer_.size(), 0);
+    frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
+  }
+
+  {
+    rtc::CritScope lock(&callback_crit_sect_);
+    if (packetization_callback_) {
+      packetization_callback_->SendData(
+          frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
+          encode_buffer_.data(), encode_buffer_.size(),
+          my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
+                                                       : nullptr);
+    }
+
+    if (vad_callback_) {
+      // Callback with VAD decision.
+      vad_callback_->InFrameType(frame_type);
+    }
+  }
+  previous_pltype_ = encoded_info.payload_type;
+  return static_cast<int32_t>(encode_buffer_.size());
+}
+
+/////////////////////////////////////////
+//   Sender
+//
+
+// Can be called multiple times for Codec, CNG, RED.
+int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (!encoder_factory_->codec_manager.RegisterEncoder(send_codec)) {
+    return -1;
+  }
+  if (encoder_factory_->codec_manager.GetCodecInst()) {
+    encoder_factory_->external_speech_encoder = nullptr;
+  }
+  if (!CreateSpeechEncoderIfNecessary(encoder_factory_.get())) {
+    return -1;
+  }
+  auto* sp = encoder_factory_->codec_manager.GetStackParams();
+  if (sp->speech_encoder)
+    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
+  return 0;
+}
+
+void AudioCodingModuleImpl::RegisterExternalSendCodec(
+    AudioEncoder* external_speech_encoder) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  encoder_factory_->codec_manager.UnsetCodecInst();
+  encoder_factory_->external_speech_encoder = external_speech_encoder;
+  RTC_CHECK(CreateSpeechEncoderIfNecessary(encoder_factory_.get()));
+  auto* sp = encoder_factory_->codec_manager.GetStackParams();
+  RTC_CHECK(sp->speech_encoder);
+  encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
+}
+
+void AudioCodingModuleImpl::ModifyEncoder(
+    rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+  rtc::CritScope lock(&acm_crit_sect_);
+
+  // Wipe the encoder factory, so that everything that relies on it will fail.
+  // We don't want the complexity of supporting swapping back and forth.
+  if (encoder_factory_) {
+    encoder_factory_.reset();
+    RTC_CHECK(!encoder_stack_);  // Ensure we hadn't started using the factory.
+  }
+
+  modifier(&encoder_stack_);
+}
+
+void AudioCodingModuleImpl::QueryEncoder(
+    rtc::FunctionView<void(const AudioEncoder*)> query) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  query(encoder_stack_.get());
+}
+
+// Get current send codec.
+rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (encoder_factory_) {
+    auto* ci = encoder_factory_->codec_manager.GetCodecInst();
+    if (ci) {
+      return *ci;
+    }
+    CreateSpeechEncoderIfNecessary(encoder_factory_.get());
+    const std::unique_ptr<AudioEncoder>& enc =
+        encoder_factory_->codec_manager.GetStackParams()->speech_encoder;
+    if (enc) {
+      return acm2::CodecManager::ForgeCodecInst(enc.get());
+    }
+    return rtc::nullopt;
+  } else {
+    return encoder_stack_
+               ? rtc::Optional<CodecInst>(
+                     acm2::CodecManager::ForgeCodecInst(encoder_stack_.get()))
+               : rtc::nullopt;
+  }
+}
+
+// Get current send frequency.
+int AudioCodingModuleImpl::SendFrequency() const {
+  rtc::CritScope lock(&acm_crit_sect_);
+
+  if (!encoder_stack_) {
+    RTC_LOG(LS_ERROR) << "SendFrequency Failed, no codec is registered";
+    return -1;
+  }
+
+  return encoder_stack_->SampleRateHz();
+}
+
+void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (encoder_stack_) {
+    encoder_stack_->OnReceivedUplinkBandwidth(bitrate_bps, rtc::nullopt);
+  }
+}
+
+// Register a transport callback which will be called to deliver
+// the encoded buffers.
+int AudioCodingModuleImpl::RegisterTransportCallback(
+    AudioPacketizationCallback* transport) {
+  rtc::CritScope lock(&callback_crit_sect_);
+  packetization_callback_ = transport;
+  return 0;
+}
+
+// Add 10MS of raw (PCM) audio data to the encoder.
+int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) {
+  InputData input_data;
+  rtc::CritScope lock(&acm_crit_sect_);
+  int r = Add10MsDataInternal(audio_frame, &input_data);
+  return r < 0 ? r : Encode(input_data);
+}
+
+int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
+                                               InputData* input_data) {
+  if (audio_frame.samples_per_channel_ == 0) {
+    assert(false);
+    RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, payload length is zero";
+    return -1;
+  }
+
+  if (audio_frame.sample_rate_hz_ > 48000) {
+    assert(false);
+    RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, input frequency not valid";
+    return -1;
+  }
+
+  // If the length and frequency matches. We currently just support raw PCM.
+  if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
+      audio_frame.samples_per_channel_) {
+    RTC_LOG(LS_ERROR)
+        << "Cannot Add 10 ms audio, input frequency and length doesn't match";
+    return -1;
+  }
+
+  if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2) {
+    RTC_LOG(LS_ERROR) << "Cannot Add 10 ms audio, invalid number of channels.";
+    return -1;
+  }
+
+  // Do we have a codec registered?
+  if (!HaveValidEncoder("Add10MsData")) {
+    return -1;
+  }
+
+  const AudioFrame* ptr_frame;
+  // Perform a resampling, also down-mix if it is required and can be
+  // performed before resampling (a down mix prior to resampling will take
+  // place if both primary and secondary encoders are mono and input is in
+  // stereo).
+  if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
+    return -1;
+  }
+
+  // Check whether we need an up-mix or down-mix?
+  const size_t current_num_channels = encoder_stack_->NumChannels();
+  const bool same_num_channels =
+      ptr_frame->num_channels_ == current_num_channels;
+
+  if (!same_num_channels) {
+    if (ptr_frame->num_channels_ == 1) {
+      if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
+        return -1;
+    } else {
+      if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
+        return -1;
+    }
+  }
+
+  // When adding data to encoders this pointer is pointing to an audio buffer
+  // with correct number of channels.
+  const int16_t* ptr_audio = ptr_frame->data();
+
+  // For pushing data to primary, point the |ptr_audio| to correct buffer.
+  if (!same_num_channels)
+    ptr_audio = input_data->buffer;
+
+  // TODO(yujo): Skip encode of muted frames.
+  input_data->input_timestamp = ptr_frame->timestamp_;
+  input_data->audio = ptr_audio;
+  input_data->length_per_channel = ptr_frame->samples_per_channel_;
+  input_data->audio_channel = current_num_channels;
+
+  return 0;
+}
+
+// Perform a resampling and down-mix if required. We down-mix only if
+// encoder is mono and input is stereo. In case of dual-streaming, both
+// encoders has to be mono for down-mix to take place.
+// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
+// is required, |*ptr_out| points to |in_frame|.
+// TODO(yujo): Make this more efficient for muted frames.
+int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
+                                               const AudioFrame** ptr_out) {
+  const bool resample =
+      in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz();
+
+  // This variable is true if primary codec and secondary codec (if exists)
+  // are both mono and input is stereo.
+  // TODO(henrik.lundin): This condition should probably be
+  //   in_frame.num_channels_ > encoder_stack_->NumChannels()
+  const bool down_mix =
+      in_frame.num_channels_ == 2 && encoder_stack_->NumChannels() == 1;
+
+  if (!first_10ms_data_) {
+    expected_in_ts_ = in_frame.timestamp_;
+    expected_codec_ts_ = in_frame.timestamp_;
+    first_10ms_data_ = true;
+  } else if (in_frame.timestamp_ != expected_in_ts_) {
+    RTC_LOG(LS_WARNING) << "Unexpected input timestamp: " << in_frame.timestamp_
+                        << ", expected: " << expected_in_ts_;
+    expected_codec_ts_ +=
+        (in_frame.timestamp_ - expected_in_ts_) *
+        static_cast<uint32_t>(
+            static_cast<double>(encoder_stack_->SampleRateHz()) /
+            static_cast<double>(in_frame.sample_rate_hz_));
+    expected_in_ts_ = in_frame.timestamp_;
+  }
+
+
+  if (!down_mix && !resample) {
+    // No pre-processing is required.
+    if (expected_in_ts_ == expected_codec_ts_) {
+      // If we've never resampled, we can use the input frame as-is
+      *ptr_out = &in_frame;
+    } else {
+      // Otherwise we'll need to alter the timestamp. Since in_frame is const,
+      // we'll have to make a copy of it.
+      preprocess_frame_.CopyFrom(in_frame);
+      preprocess_frame_.timestamp_ = expected_codec_ts_;
+      *ptr_out = &preprocess_frame_;
+    }
+
+    expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+    expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+    return 0;
+  }
+
+  *ptr_out = &preprocess_frame_;
+  preprocess_frame_.num_channels_ = in_frame.num_channels_;
+  int16_t audio[WEBRTC_10MS_PCM_AUDIO];
+  const int16_t* src_ptr_audio = in_frame.data();
+  if (down_mix) {
+    // If a resampling is required the output of a down-mix is written into a
+    // local buffer, otherwise, it will be written to the output frame.
+    int16_t* dest_ptr_audio = resample ?
+        audio : preprocess_frame_.mutable_data();
+    if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0)
+      return -1;
+    preprocess_frame_.num_channels_ = 1;
+    // Set the input of the resampler is the down-mixed signal.
+    src_ptr_audio = audio;
+  }
+
+  preprocess_frame_.timestamp_ = expected_codec_ts_;
+  preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
+  preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
+  // If it is required, we have to do a resampling.
+  if (resample) {
+    // The result of the resampler is written to output frame.
+    int16_t* dest_ptr_audio = preprocess_frame_.mutable_data();
+
+    int samples_per_channel = resampler_.Resample10Msec(
+        src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(),
+        preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
+        dest_ptr_audio);
+
+    if (samples_per_channel < 0) {
+      RTC_LOG(LS_ERROR) << "Cannot add 10 ms audio, resampling failed";
+      return -1;
+    }
+    preprocess_frame_.samples_per_channel_ =
+        static_cast<size_t>(samples_per_channel);
+    preprocess_frame_.sample_rate_hz_ = encoder_stack_->SampleRateHz();
+  }
+
+  expected_codec_ts_ +=
+      static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
+  expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+
+  return 0;
+}
+
+/////////////////////////////////////////
+//   (RED) Redundant Coding
+//
+
+bool AudioCodingModuleImpl::REDStatus() const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  return encoder_factory_->codec_manager.GetStackParams()->use_red;
+}
+
+// Configure RED status i.e on/off.
+int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
+#ifdef WEBRTC_CODEC_RED
+  rtc::CritScope lock(&acm_crit_sect_);
+  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
+  if (!encoder_factory_->codec_manager.SetCopyRed(enable_red)) {
+    return -1;
+  }
+  auto* sp = encoder_factory_->codec_manager.GetStackParams();
+  if (sp->speech_encoder)
+    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
+  return 0;
+#else
+  RTC_LOG(LS_WARNING) << "  WEBRTC_CODEC_RED is undefined";
+  return -1;
+#endif
+}
+
+/////////////////////////////////////////
+//   (FEC) Forward Error Correction (codec internal)
+//
+
+bool AudioCodingModuleImpl::CodecFEC() const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  return encoder_factory_->codec_manager.GetStackParams()->use_codec_fec;
+}
+
+int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
+  if (!encoder_factory_->codec_manager.SetCodecFEC(enable_codec_fec)) {
+    return -1;
+  }
+  auto* sp = encoder_factory_->codec_manager.GetStackParams();
+  if (sp->speech_encoder)
+    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
+  if (enable_codec_fec) {
+    return sp->use_codec_fec ? 0 : -1;
+  } else {
+    RTC_DCHECK(!sp->use_codec_fec);
+    return 0;
+  }
+}
+
+int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (HaveValidEncoder("SetPacketLossRate")) {
+    encoder_stack_->OnReceivedUplinkPacketLossFraction(loss_rate / 100.0);
+  }
+  return 0;
+}
+
+/////////////////////////////////////////
+//   (VAD) Voice Activity Detection
+//
+int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
+                                  bool enable_vad,
+                                  ACMVADMode mode) {
+  // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
+  RTC_DCHECK_EQ(enable_dtx, enable_vad);
+  rtc::CritScope lock(&acm_crit_sect_);
+  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
+  if (!encoder_factory_->codec_manager.SetVAD(enable_dtx, mode)) {
+    return -1;
+  }
+  auto* sp = encoder_factory_->codec_manager.GetStackParams();
+  if (sp->speech_encoder)
+    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
+  return 0;
+}
+
+// Get VAD/DTX settings.
+int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
+                               ACMVADMode* mode) const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  const auto* sp = encoder_factory_->codec_manager.GetStackParams();
+  *dtx_enabled = *vad_enabled = sp->use_cng;
+  *mode = sp->vad_mode;
+  return 0;
+}
+
+/////////////////////////////////////////
+//   Receiver
+//
+
+int AudioCodingModuleImpl::InitializeReceiver() {
+  rtc::CritScope lock(&acm_crit_sect_);
+  return InitializeReceiverSafe();
+}
+
+// Initialize receiver, resets codec database etc.
+int AudioCodingModuleImpl::InitializeReceiverSafe() {
+  // If the receiver is already initialized then we want to destroy any
+  // existing decoders. After a call to this function, we should have a clean
+  // start-up.
+  if (receiver_initialized_)
+    receiver_.RemoveAllCodecs();
+  receiver_.ResetInitialDelay();
+  receiver_.SetMinimumDelay(0);
+  receiver_.SetMaximumDelay(0);
+  receiver_.FlushBuffers();
+
+  receiver_initialized_ = true;
+  return 0;
+}
+
+// Get current receive frequency.
+int AudioCodingModuleImpl::ReceiveFrequency() const {
+  const auto last_packet_sample_rate = receiver_.last_packet_sample_rate_hz();
+  return last_packet_sample_rate ? *last_packet_sample_rate
+                                 : receiver_.last_output_sample_rate_hz();
+}
+
+// Get current playout frequency.
+int AudioCodingModuleImpl::PlayoutFrequency() const {
+  return receiver_.last_output_sample_rate_hz();
+}
+
+void AudioCodingModuleImpl::SetReceiveCodecs(
+    const std::map<int, SdpAudioFormat>& codecs) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  receiver_.SetCodecs(codecs);
+}
+
+bool AudioCodingModuleImpl::RegisterReceiveCodec(
+    int rtp_payload_type,
+    const SdpAudioFormat& audio_format) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  RTC_DCHECK(receiver_initialized_);
+
+  if (!acm2::RentACodec::IsPayloadTypeValid(rtp_payload_type)) {
+    RTC_LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type
+                        << " for decoder.";
+    return false;
+  }
+
+  return receiver_.AddCodec(rtp_payload_type, audio_format);
+}
+
+int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  auto* ef = encoder_factory_.get();
+  return RegisterReceiveCodecUnlocked(
+      codec, [&] { return ef->rent_a_codec.RentIsacDecoder(codec.plfreq); });
+}
+
+int AudioCodingModuleImpl::RegisterReceiveCodec(
+    const CodecInst& codec,
+    rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  return RegisterReceiveCodecUnlocked(codec, isac_factory);
+}
+
+int AudioCodingModuleImpl::RegisterReceiveCodecUnlocked(
+    const CodecInst& codec,
+    rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
+  RTC_DCHECK(receiver_initialized_);
+  if (codec.channels > 2) {
+    RTC_LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
+    return -1;
+  }
+
+  auto codec_id = acm2::RentACodec::CodecIdByParams(codec.plname, codec.plfreq,
+                                                    codec.channels);
+  if (!codec_id) {
+    RTC_LOG_F(LS_ERROR)
+        << "Wrong codec params to be registered as receive codec";
+    return -1;
+  }
+  auto codec_index = acm2::RentACodec::CodecIndexFromId(*codec_id);
+  RTC_CHECK(codec_index) << "Invalid codec ID: " << static_cast<int>(*codec_id);
+
+  // Check if the payload-type is valid.
+  if (!acm2::RentACodec::IsPayloadTypeValid(codec.pltype)) {
+    RTC_LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
+                        << codec.plname;
+    return -1;
+  }
+
+  AudioDecoder* isac_decoder = nullptr;
+  if (STR_CASE_CMP(codec.plname, "isac") == 0) {
+    std::unique_ptr<AudioDecoder>& saved_isac_decoder =
+        codec.plfreq == 16000 ? isac_decoder_16k_ : isac_decoder_32k_;
+    if (!saved_isac_decoder) {
+      saved_isac_decoder = isac_factory();
+    }
+    isac_decoder = saved_isac_decoder.get();
+  }
+  return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
+                            codec.plfreq, isac_decoder, codec.plname);
+}
+
+int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
+    int rtp_payload_type,
+    AudioDecoder* external_decoder,
+    int sample_rate_hz,
+    int num_channels,
+    const std::string& name) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  RTC_DCHECK(receiver_initialized_);
+  if (num_channels > 2 || num_channels < 0) {
+    RTC_LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels;
+    return -1;
+  }
+
+  // Check if the payload-type is valid.
+  if (!acm2::RentACodec::IsPayloadTypeValid(rtp_payload_type)) {
+    RTC_LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type
+                        << " for external decoder.";
+    return -1;
+  }
+
+  return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels,
+                            sample_rate_hz, external_decoder, name);
+}
+
+// Get current received codec.
+int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  return receiver_.LastAudioCodec(current_codec);
+}
+
+rtc::Optional<SdpAudioFormat> AudioCodingModuleImpl::ReceiveFormat() const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  return receiver_.LastAudioFormat();
+}
+
+// Incoming packet from network parsed and ready for decode.
+int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
+                                          const size_t payload_length,
+                                          const WebRtcRTPHeader& rtp_header) {
+  RTC_DCHECK_EQ(payload_length == 0, incoming_payload == nullptr);
+  return receiver_.InsertPacket(
+      rtp_header,
+      rtc::ArrayView<const uint8_t>(incoming_payload, payload_length));
+}
+
+// Minimum playout delay (Used for lip-sync).
+int AudioCodingModuleImpl::SetMinimumPlayoutDelay(int time_ms) {
+  if ((time_ms < 0) || (time_ms > 10000)) {
+    RTC_LOG(LS_ERROR) << "Delay must be in the range of 0-10000 milliseconds.";
+    return -1;
+  }
+  return receiver_.SetMinimumDelay(time_ms);
+}
+
+int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
+  if ((time_ms < 0) || (time_ms > 10000)) {
+    RTC_LOG(LS_ERROR) << "Delay must be in the range of 0-10000 milliseconds.";
+    return -1;
+  }
+  return receiver_.SetMaximumDelay(time_ms);
+}
+
+// Get 10 milliseconds of raw audio data to play out.
+// Automatic resample to the requested frequency.
+int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
+                                           AudioFrame* audio_frame,
+                                           bool* muted) {
+  // GetAudio always returns 10 ms, at the requested sample rate.
+  if (receiver_.GetAudio(desired_freq_hz, audio_frame, muted) != 0) {
+    RTC_LOG(LS_ERROR) << "PlayoutData failed, RecOut Failed";
+    return -1;
+  }
+  return 0;
+}
+
+int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
+                                           AudioFrame* audio_frame) {
+  bool muted;
+  int ret = PlayoutData10Ms(desired_freq_hz, audio_frame, &muted);
+  RTC_DCHECK(!muted);
+  return ret;
+}
+
+/////////////////////////////////////////
+//   Statistics
+//
+
+// TODO(turajs) change the return value to void. Also change the corresponding
+// NetEq function.
+int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) {
+  receiver_.GetNetworkStatistics(statistics);
+  return 0;
+}
+
+int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) {
+  RTC_LOG(LS_VERBOSE) << "RegisterVADCallback()";
+  rtc::CritScope lock(&callback_crit_sect_);
+  vad_callback_ = vad_callback;
+  return 0;
+}
+
+int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (!HaveValidEncoder("SetOpusApplication")) {
+    return -1;
+  }
+  AudioEncoder::Application app;
+  switch (application) {
+    case kVoip:
+      app = AudioEncoder::Application::kSpeech;
+      break;
+    case kAudio:
+      app = AudioEncoder::Application::kAudio;
+      break;
+    default:
+      FATAL();
+      return 0;
+  }
+  return encoder_stack_->SetApplication(app) ? 0 : -1;
+}
+
+// Informs Opus encoder of the maximum playback rate the receiver will render.
+int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
+    return -1;
+  }
+  encoder_stack_->SetMaxPlaybackRate(frequency_hz);
+  return 0;
+}
+
+int AudioCodingModuleImpl::EnableOpusDtx() {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (!HaveValidEncoder("EnableOpusDtx")) {
+    return -1;
+  }
+  return encoder_stack_->SetDtx(true) ? 0 : -1;
+}
+
+int AudioCodingModuleImpl::DisableOpusDtx() {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (!HaveValidEncoder("DisableOpusDtx")) {
+    return -1;
+  }
+  return encoder_stack_->SetDtx(false) ? 0 : -1;
+}
+
+int32_t AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
+  rtc::Optional<uint32_t> ts = PlayoutTimestamp();
+  if (!ts)
+    return -1;
+  *timestamp = *ts;
+  return 0;
+}
+
+rtc::Optional<uint32_t> AudioCodingModuleImpl::PlayoutTimestamp() {
+  return receiver_.GetPlayoutTimestamp();
+}
+
+int AudioCodingModuleImpl::FilteredCurrentDelayMs() const {
+  return receiver_.FilteredCurrentDelayMs();
+}
+
+int AudioCodingModuleImpl::TargetDelayMs() const {
+  return receiver_.TargetDelayMs();
+}
+
+bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
+  if (!encoder_stack_) {
+    RTC_LOG(LS_ERROR) << caller_name << " failed: No send codec is registered.";
+    return false;
+  }
+  return true;
+}
+
+int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
+  return receiver_.RemoveCodec(payload_type);
+}
+
+int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
+  return receiver_.EnableNack(max_nack_list_size);
+}
+
+void AudioCodingModuleImpl::DisableNack() {
+  receiver_.DisableNack();
+}
+
+std::vector<uint16_t> AudioCodingModuleImpl::GetNackList(
+    int64_t round_trip_time_ms) const {
+  return receiver_.GetNackList(round_trip_time_ms);
+}
+
+int AudioCodingModuleImpl::LeastRequiredDelayMs() const {
+  return receiver_.LeastRequiredDelayMs();
+}
+
+void AudioCodingModuleImpl::GetDecodingCallStatistics(
+      AudioDecodingCallStats* call_stats) const {
+  receiver_.GetDecodingCallStatistics(call_stats);
+}
+
+ANAStats AudioCodingModuleImpl::GetANAStats() const {
+  rtc::CritScope lock(&acm_crit_sect_);
+  if (encoder_stack_)
+    return encoder_stack_->GetANAStats();
+  // If no encoder is set, return default stats.
+  return ANAStats();
+}
+
+}  // namespace
+
+AudioCodingModule::Config::Config()
+    : neteq_config(), clock(Clock::GetRealTimeClock()) {
+  // Post-decode VAD is disabled by default in NetEq, however, Audio
+  // Conference Mixer relies on VAD decisions and fails without them.
+  neteq_config.enable_post_decode_vad = true;
+}
+
+AudioCodingModule::Config::Config(const Config&) = default;
+AudioCodingModule::Config::~Config() = default;
+
+AudioCodingModule* AudioCodingModule::Create(int id) {
+  RTC_UNUSED(id);
+  return Create();
+}
+
+// Create module
+AudioCodingModule* AudioCodingModule::Create() {
+  Config config;
+  config.clock = Clock::GetRealTimeClock();
+  config.decoder_factory = CreateBuiltinAudioDecoderFactory();
+  return Create(config);
+}
+
+AudioCodingModule* AudioCodingModule::Create(Clock* clock) {
+  Config config;
+  config.clock = clock;
+  config.decoder_factory = CreateBuiltinAudioDecoderFactory();
+  return Create(config);
+}
+
+AudioCodingModule* AudioCodingModule::Create(const Config& config) {
+  if (!config.decoder_factory) {
+    // TODO(ossu): Backwards compatibility. Will be removed after a deprecation
+    // cycle.
+    Config config_copy = config;
+    config_copy.decoder_factory = CreateBuiltinAudioDecoderFactory();
+    return new AudioCodingModuleImpl(config_copy);
+  }
+  return new AudioCodingModuleImpl(config);
+}
+
+int AudioCodingModule::NumberOfCodecs() {
+  return static_cast<int>(acm2::RentACodec::NumberOfCodecs());
+}
+
+int AudioCodingModule::Codec(int list_id, CodecInst* codec) {
+  auto codec_id = acm2::RentACodec::CodecIdFromIndex(list_id);
+  if (!codec_id)
+    return -1;
+  auto ci = acm2::RentACodec::CodecInstById(*codec_id);
+  if (!ci)
+    return -1;
+  *codec = *ci;
+  return 0;
+}
+
+int AudioCodingModule::Codec(const char* payload_name,
+                             CodecInst* codec,
+                             int sampling_freq_hz,
+                             size_t channels) {
+  rtc::Optional<CodecInst> ci = acm2::RentACodec::CodecInstByParams(
+      payload_name, sampling_freq_hz, channels);
+  if (ci) {
+    *codec = *ci;
+    return 0;
+  } else {
+    // We couldn't find a matching codec, so set the parameters to unacceptable
+    // values and return.
+    codec->plname[0] = '\0';
+    codec->pltype = -1;
+    codec->pacsize = 0;
+    codec->rate = 0;
+    codec->plfreq = 0;
+    return -1;
+  }
+}
+
+int AudioCodingModule::Codec(const char* payload_name,
+                             int sampling_freq_hz,
+                             size_t channels) {
+  rtc::Optional<acm2::RentACodec::CodecId> ci =
+      acm2::RentACodec::CodecIdByParams(payload_name, sampling_freq_hz,
+                                        channels);
+  if (!ci)
+    return -1;
+  rtc::Optional<int> i = acm2::RentACodec::CodecIndexFromId(*ci);
+  return i ? *i : -1;
+}
+
+// Checks the validity of the parameters of the given codec
+bool AudioCodingModule::IsCodecValid(const CodecInst& codec) {
+  bool valid = acm2::RentACodec::IsCodecValid(codec);
+  if (!valid)
+    RTC_LOG(LS_ERROR) << "Invalid codec setting";
+  return valid;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
new file mode 100644
index 0000000..aaa4230
--- /dev/null
+++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
@@ -0,0 +1,1968 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <memory>
+#include <vector>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/acm2/acm_receive_test.h"
+#include "modules/audio_coding/acm2/acm_send_test.h"
+#include "modules/audio_coding/codecs/audio_format_conversion.h"
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/audio_coding/neteq/tools/audio_checksum.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_wav_file.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/event_wrapper.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder.h"
+#include "test/mock_audio_encoder.h"
+#include "test/testsupport/fileutils.h"
+
+using ::testing::AtLeast;
+using ::testing::Invoke;
+using ::testing::_;
+
+namespace webrtc {
+
+namespace {
+const int kSampleRateHz = 16000;
+const int kNumSamples10ms = kSampleRateHz / 100;
+const int kFrameSizeMs = 10;  // Multiple of 10.
+const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
+const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
+const uint8_t kPayloadType = 111;
+}  // namespace
+
+class RtpUtility {
+ public:
+  RtpUtility(int samples_per_packet, uint8_t payload_type)
+      : samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
+
+  virtual ~RtpUtility() {}
+
+  void Populate(WebRtcRTPHeader* rtp_header) {
+    rtp_header->header.sequenceNumber = 0xABCD;
+    rtp_header->header.timestamp = 0xABCDEF01;
+    rtp_header->header.payloadType = payload_type_;
+    rtp_header->header.markerBit = false;
+    rtp_header->header.ssrc = 0x1234;
+    rtp_header->header.numCSRCs = 0;
+    rtp_header->frameType = kAudioFrameSpeech;
+
+    rtp_header->header.payload_type_frequency = kSampleRateHz;
+    rtp_header->type.Audio.channel = 1;
+    rtp_header->type.Audio.isCNG = false;
+  }
+
+  void Forward(WebRtcRTPHeader* rtp_header) {
+    ++rtp_header->header.sequenceNumber;
+    rtp_header->header.timestamp += samples_per_packet_;
+  }
+
+ private:
+  int samples_per_packet_;
+  uint8_t payload_type_;
+};
+
+class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
+ public:
+  PacketizationCallbackStubOldApi()
+      : num_calls_(0),
+        last_frame_type_(kEmptyFrame),
+        last_payload_type_(-1),
+        last_timestamp_(0) {}
+
+  int32_t SendData(FrameType frame_type,
+                   uint8_t payload_type,
+                   uint32_t timestamp,
+                   const uint8_t* payload_data,
+                   size_t payload_len_bytes,
+                   const RTPFragmentationHeader* fragmentation) override {
+    rtc::CritScope lock(&crit_sect_);
+    ++num_calls_;
+    last_frame_type_ = frame_type;
+    last_payload_type_ = payload_type;
+    last_timestamp_ = timestamp;
+    last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+    return 0;
+  }
+
+  int num_calls() const {
+    rtc::CritScope lock(&crit_sect_);
+    return num_calls_;
+  }
+
+  int last_payload_len_bytes() const {
+    rtc::CritScope lock(&crit_sect_);
+    return rtc::checked_cast<int>(last_payload_vec_.size());
+  }
+
+  FrameType last_frame_type() const {
+    rtc::CritScope lock(&crit_sect_);
+    return last_frame_type_;
+  }
+
+  int last_payload_type() const {
+    rtc::CritScope lock(&crit_sect_);
+    return last_payload_type_;
+  }
+
+  uint32_t last_timestamp() const {
+    rtc::CritScope lock(&crit_sect_);
+    return last_timestamp_;
+  }
+
+  void SwapBuffers(std::vector<uint8_t>* payload) {
+    rtc::CritScope lock(&crit_sect_);
+    last_payload_vec_.swap(*payload);
+  }
+
+ private:
+  int num_calls_ RTC_GUARDED_BY(crit_sect_);
+  FrameType last_frame_type_ RTC_GUARDED_BY(crit_sect_);
+  int last_payload_type_ RTC_GUARDED_BY(crit_sect_);
+  uint32_t last_timestamp_ RTC_GUARDED_BY(crit_sect_);
+  std::vector<uint8_t> last_payload_vec_ RTC_GUARDED_BY(crit_sect_);
+  rtc::CriticalSection crit_sect_;
+};
+
+class AudioCodingModuleTestOldApi : public ::testing::Test {
+ protected:
+  AudioCodingModuleTestOldApi()
+      : rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
+        clock_(Clock::GetRealTimeClock()) {}
+
+  ~AudioCodingModuleTestOldApi() {}
+
+  void TearDown() {}
+
+  void SetUp() {
+    acm_.reset(AudioCodingModule::Create(clock_));
+
+    rtp_utility_->Populate(&rtp_header_);
+
+    input_frame_.sample_rate_hz_ = kSampleRateHz;
+    input_frame_.num_channels_ = 1;
+    input_frame_.samples_per_channel_ = kSampleRateHz * 10 / 1000;  // 10 ms.
+    static_assert(kSampleRateHz * 10 / 1000 <= AudioFrame::kMaxDataSizeSamples,
+                  "audio frame too small");
+    input_frame_.Mute();
+
+    ASSERT_EQ(0, acm_->RegisterTransportCallback(&packet_cb_));
+
+    SetUpL16Codec();
+  }
+
+  // Set up L16 codec.
+  virtual void SetUpL16Codec() {
+    audio_format_ = SdpAudioFormat("L16", kSampleRateHz, 1);
+    ASSERT_EQ(0, AudioCodingModule::Codec("L16", &codec_, kSampleRateHz, 1));
+    codec_.pltype = kPayloadType;
+  }
+
+  virtual void RegisterCodec() {
+    EXPECT_EQ(true, acm_->RegisterReceiveCodec(kPayloadType, *audio_format_));
+    EXPECT_EQ(0, acm_->RegisterSendCodec(codec_));
+  }
+
+  virtual void InsertPacketAndPullAudio() {
+    InsertPacket();
+    PullAudio();
+  }
+
+  virtual void InsertPacket() {
+    const uint8_t kPayload[kPayloadSizeBytes] = {0};
+    ASSERT_EQ(0,
+              acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
+    rtp_utility_->Forward(&rtp_header_);
+  }
+
+  virtual void PullAudio() {
+    AudioFrame audio_frame;
+    bool muted;
+    ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame, &muted));
+    ASSERT_FALSE(muted);
+  }
+
+  virtual void InsertAudio() {
+    ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+    input_frame_.timestamp_ += kNumSamples10ms;
+  }
+
+  virtual void VerifyEncoding() {
+    int last_length = packet_cb_.last_payload_len_bytes();
+    EXPECT_TRUE(last_length == 2 * codec_.pacsize || last_length == 0)
+        << "Last encoded packet was " << last_length << " bytes.";
+  }
+
+  virtual void InsertAudioAndVerifyEncoding() {
+    InsertAudio();
+    VerifyEncoding();
+  }
+
+  std::unique_ptr<RtpUtility> rtp_utility_;
+  std::unique_ptr<AudioCodingModule> acm_;
+  PacketizationCallbackStubOldApi packet_cb_;
+  WebRtcRTPHeader rtp_header_;
+  AudioFrame input_frame_;
+
+  // These two have to be kept in sync for now. In the future, we'll be able to
+  // eliminate the CodecInst and keep only the SdpAudioFormat.
+  rtc::Optional<SdpAudioFormat> audio_format_;
+  CodecInst codec_;
+
+  Clock* clock_;
+};
+
+// Check if the statistics are initialized correctly. Before any call to ACM
+// all fields have to be zero.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_InitializedToZero DISABLED_InitializedToZero
+#else
+#define MAYBE_InitializedToZero InitializedToZero
+#endif
+TEST_F(AudioCodingModuleTestOldApi, MAYBE_InitializedToZero) {
+  RegisterCodec();
+  AudioDecodingCallStats stats;
+  acm_->GetDecodingCallStatistics(&stats);
+  EXPECT_EQ(0, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(0, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(0, stats.decoded_plc);
+  EXPECT_EQ(0, stats.decoded_plc_cng);
+  EXPECT_EQ(0, stats.decoded_muted_output);
+}
+
+// Insert some packets and pull audio. Check statistics are valid. Then,
+// simulate packet loss and check if PLC and PLC-to-CNG statistics are
+// correctly updated.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_NetEqCalls DISABLED_NetEqCalls
+#else
+#define MAYBE_NetEqCalls NetEqCalls
+#endif
+TEST_F(AudioCodingModuleTestOldApi, MAYBE_NetEqCalls) {
+  RegisterCodec();
+  AudioDecodingCallStats stats;
+  const int kNumNormalCalls = 10;
+
+  for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
+    InsertPacketAndPullAudio();
+  }
+  acm_->GetDecodingCallStatistics(&stats);
+  EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(0, stats.decoded_plc);
+  EXPECT_EQ(0, stats.decoded_plc_cng);
+  EXPECT_EQ(0, stats.decoded_muted_output);
+
+  const int kNumPlc = 3;
+  const int kNumPlcCng = 5;
+
+  // Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
+  for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
+    PullAudio();
+  }
+  acm_->GetDecodingCallStatistics(&stats);
+  EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(kNumPlc, stats.decoded_plc);
+  EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
+  EXPECT_EQ(0, stats.decoded_muted_output);
+  // TODO(henrik.lundin) Add a test with muted state enabled.
+}
+
+TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
+  AudioFrame audio_frame;
+  const int kSampleRateHz = 32000;
+  bool muted;
+  EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame, &muted));
+  ASSERT_FALSE(muted);
+  EXPECT_EQ(0u, audio_frame.timestamp_);
+  EXPECT_GT(audio_frame.num_channels_, 0u);
+  EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+            audio_frame.samples_per_channel_);
+  EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
+}
+
+// The below test is temporarily disabled on Windows due to problems
+// with clang debug builds.
+// TODO(tommi): Re-enable when we've figured out what the problem is.
+// http://crbug.com/615050
+#if !defined(WEBRTC_WIN) && defined(__clang__) && RTC_DCHECK_IS_ON && \
+    GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST_F(AudioCodingModuleTestOldApi, FailOnZeroDesiredFrequency) {
+  AudioFrame audio_frame;
+  bool muted;
+  EXPECT_DEATH(acm_->PlayoutData10Ms(0, &audio_frame, &muted),
+               "dst_sample_rate_hz");
+}
+#endif
+
+// Checks that the transport callback is invoked once for each speech packet.
+// Also checks that the frame type is kAudioFrameSpeech.
+TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
+  const int k10MsBlocksPerPacket = 3;
+  codec_.pacsize = k10MsBlocksPerPacket * kSampleRateHz / 100;
+  RegisterCodec();
+  const int kLoops = 10;
+  for (int i = 0; i < kLoops; ++i) {
+    EXPECT_EQ(i / k10MsBlocksPerPacket, packet_cb_.num_calls());
+    if (packet_cb_.num_calls() > 0)
+      EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
+    InsertAudioAndVerifyEncoding();
+  }
+  EXPECT_EQ(kLoops / k10MsBlocksPerPacket, packet_cb_.num_calls());
+  EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+// Verifies that the RTP timestamp series is not reset when the codec is
+// changed.
+TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) {
+  RegisterCodec();  // This registers the default codec.
+  uint32_t expected_ts = input_frame_.timestamp_;
+  int blocks_per_packet = codec_.pacsize / (kSampleRateHz / 100);
+  // Encode 5 packets of the first codec type.
+  const int kNumPackets1 = 5;
+  for (int j = 0; j < kNumPackets1; ++j) {
+    for (int i = 0; i < blocks_per_packet; ++i) {
+      EXPECT_EQ(j, packet_cb_.num_calls());
+      InsertAudio();
+    }
+    EXPECT_EQ(j + 1, packet_cb_.num_calls());
+    EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
+    expected_ts += codec_.pacsize;
+  }
+
+  // Change codec.
+  ASSERT_EQ(0, AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1));
+  RegisterCodec();
+  blocks_per_packet = codec_.pacsize / (kSampleRateHz / 100);
+  // Encode another 5 packets.
+  const int kNumPackets2 = 5;
+  for (int j = 0; j < kNumPackets2; ++j) {
+    for (int i = 0; i < blocks_per_packet; ++i) {
+      EXPECT_EQ(kNumPackets1 + j, packet_cb_.num_calls());
+      InsertAudio();
+    }
+    EXPECT_EQ(kNumPackets1 + j + 1, packet_cb_.num_calls());
+    EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
+    expected_ts += codec_.pacsize;
+  }
+}
+#endif
+
+// Introduce this class to set different expectations on the number of encoded
+// bytes. This class expects all encoded packets to be 9 bytes (matching one
+// CNG SID frame) or 0 bytes. This test depends on |input_frame_| containing
+// (near-)zero values. It also introduces a way to register comfort noise with
+// a custom payload type.
+class AudioCodingModuleTestWithComfortNoiseOldApi
+    : public AudioCodingModuleTestOldApi {
+ protected:
+  void RegisterCngCodec(int rtp_payload_type) {
+    EXPECT_EQ(true,
+              acm_->RegisterReceiveCodec(
+                  rtp_payload_type, SdpAudioFormat("cn", kSampleRateHz, 1)));
+
+    CodecInst codec;
+    EXPECT_EQ(0, AudioCodingModule::Codec("CN", &codec, kSampleRateHz, 1));
+    codec.pltype = rtp_payload_type;
+    EXPECT_EQ(0, acm_->RegisterSendCodec(codec));
+  }
+
+  void VerifyEncoding() override {
+    int last_length = packet_cb_.last_payload_len_bytes();
+    EXPECT_TRUE(last_length == 9 || last_length == 0)
+        << "Last encoded packet was " << last_length << " bytes.";
+  }
+
+  void DoTest(int blocks_per_packet, int cng_pt) {
+    const int kLoops = 40;
+    // This array defines the expected frame types, and when they should arrive.
+    // We expect a frame to arrive each time the speech encoder would have
+    // produced a packet, and once every 100 ms the frame should be non-empty,
+    // that is contain comfort noise.
+    const struct {
+      int ix;
+      FrameType type;
+    } expectation[] = {{2, kAudioFrameCN},
+                       {5, kEmptyFrame},
+                       {8, kEmptyFrame},
+                       {11, kAudioFrameCN},
+                       {14, kEmptyFrame},
+                       {17, kEmptyFrame},
+                       {20, kAudioFrameCN},
+                       {23, kEmptyFrame},
+                       {26, kEmptyFrame},
+                       {29, kEmptyFrame},
+                       {32, kAudioFrameCN},
+                       {35, kEmptyFrame},
+                       {38, kEmptyFrame}};
+    for (int i = 0; i < kLoops; ++i) {
+      int num_calls_before = packet_cb_.num_calls();
+      EXPECT_EQ(i / blocks_per_packet, num_calls_before);
+      InsertAudioAndVerifyEncoding();
+      int num_calls = packet_cb_.num_calls();
+      if (num_calls == num_calls_before + 1) {
+        EXPECT_EQ(expectation[num_calls - 1].ix, i);
+        EXPECT_EQ(expectation[num_calls - 1].type, packet_cb_.last_frame_type())
+            << "Wrong frame type for lap " << i;
+        EXPECT_EQ(cng_pt, packet_cb_.last_payload_type());
+      } else {
+        EXPECT_EQ(num_calls, num_calls_before);
+      }
+    }
+  }
+};
+
+// Checks that the transport callback is invoked once per frame period of the
+// underlying speech encoder, even when comfort noise is produced.
+// Also checks that the frame type is kAudioFrameCN or kEmptyFrame.
+// This test and the next check the same thing, but differ in the order of
+// speech codec and CNG registration.
+TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
+       TransportCallbackTestForComfortNoiseRegisterCngLast) {
+  const int k10MsBlocksPerPacket = 3;
+  codec_.pacsize = k10MsBlocksPerPacket * kSampleRateHz / 100;
+  RegisterCodec();
+  const int kCngPayloadType = 105;
+  RegisterCngCodec(kCngPayloadType);
+  ASSERT_EQ(0, acm_->SetVAD(true, true));
+  DoTest(k10MsBlocksPerPacket, kCngPayloadType);
+}
+
+TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
+       TransportCallbackTestForComfortNoiseRegisterCngFirst) {
+  const int k10MsBlocksPerPacket = 3;
+  codec_.pacsize = k10MsBlocksPerPacket * kSampleRateHz / 100;
+  const int kCngPayloadType = 105;
+  RegisterCngCodec(kCngPayloadType);
+  RegisterCodec();
+  ASSERT_EQ(0, acm_->SetVAD(true, true));
+  DoTest(k10MsBlocksPerPacket, kCngPayloadType);
+}
+
+// A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
+// codec, while the derive class AcmIsacMtTest is using iSAC.
+class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+  static const int kNumPackets = 500;
+  static const int kNumPullCalls = 500;
+
+  AudioCodingModuleMtTestOldApi()
+      : AudioCodingModuleTestOldApi(),
+        send_thread_(CbSendThread, this, "send"),
+        insert_packet_thread_(CbInsertPacketThread, this, "insert_packet"),
+        pull_audio_thread_(CbPullAudioThread, this, "pull_audio"),
+        test_complete_(EventWrapper::Create()),
+        send_count_(0),
+        insert_packet_count_(0),
+        pull_audio_count_(0),
+        next_insert_packet_time_ms_(0),
+        fake_clock_(new SimulatedClock(0)) {
+    clock_ = fake_clock_.get();
+  }
+
+  void SetUp() {
+    AudioCodingModuleTestOldApi::SetUp();
+    RegisterCodec();  // Must be called before the threads start below.
+    StartThreads();
+  }
+
+  void StartThreads() {
+    send_thread_.Start();
+    send_thread_.SetPriority(rtc::kRealtimePriority);
+    insert_packet_thread_.Start();
+    insert_packet_thread_.SetPriority(rtc::kRealtimePriority);
+    pull_audio_thread_.Start();
+    pull_audio_thread_.SetPriority(rtc::kRealtimePriority);
+  }
+
+  void TearDown() {
+    AudioCodingModuleTestOldApi::TearDown();
+    pull_audio_thread_.Stop();
+    send_thread_.Stop();
+    insert_packet_thread_.Stop();
+  }
+
+  EventTypeWrapper RunTest() {
+    return test_complete_->Wait(10 * 60 * 1000);  // 10 minutes' timeout.
+  }
+
+  virtual bool TestDone() {
+    if (packet_cb_.num_calls() > kNumPackets) {
+      rtc::CritScope lock(&crit_sect_);
+      if (pull_audio_count_ > kNumPullCalls) {
+        // Both conditions for completion are met. End the test.
+        return true;
+      }
+    }
+    return false;
+  }
+
+  static bool CbSendThread(void* context) {
+    return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+        ->CbSendImpl();
+  }
+
+  // The send thread doesn't have to care about the current simulated time,
+  // since only the AcmReceiver is using the clock.
+  bool CbSendImpl() {
+    SleepMs(1);
+    if (HasFatalFailure()) {
+      // End the test early if a fatal failure (ASSERT_*) has occurred.
+      test_complete_->Set();
+    }
+    ++send_count_;
+    InsertAudioAndVerifyEncoding();
+    if (TestDone()) {
+      test_complete_->Set();
+    }
+    return true;
+  }
+
+  static bool CbInsertPacketThread(void* context) {
+    return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+        ->CbInsertPacketImpl();
+  }
+
+  bool CbInsertPacketImpl() {
+    SleepMs(1);
+    {
+      rtc::CritScope lock(&crit_sect_);
+      if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+        return true;
+      }
+      next_insert_packet_time_ms_ += 10;
+    }
+    // Now we're not holding the crit sect when calling ACM.
+    ++insert_packet_count_;
+    InsertPacket();
+    return true;
+  }
+
+  static bool CbPullAudioThread(void* context) {
+    return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+        ->CbPullAudioImpl();
+  }
+
+  bool CbPullAudioImpl() {
+    SleepMs(1);
+    {
+      rtc::CritScope lock(&crit_sect_);
+      // Don't let the insert thread fall behind.
+      if (next_insert_packet_time_ms_ < clock_->TimeInMilliseconds()) {
+        return true;
+      }
+      ++pull_audio_count_;
+    }
+    // Now we're not holding the crit sect when calling ACM.
+    PullAudio();
+    fake_clock_->AdvanceTimeMilliseconds(10);
+    return true;
+  }
+
+  rtc::PlatformThread send_thread_;
+  rtc::PlatformThread insert_packet_thread_;
+  rtc::PlatformThread pull_audio_thread_;
+  const std::unique_ptr<EventWrapper> test_complete_;
+  int send_count_;
+  int insert_packet_count_;
+  int pull_audio_count_ RTC_GUARDED_BY(crit_sect_);
+  rtc::CriticalSection crit_sect_;
+  int64_t next_insert_packet_time_ms_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<SimulatedClock> fake_clock_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+TEST_F(AudioCodingModuleMtTestOldApi, MAYBE_DoTest) {
+  EXPECT_EQ(kEventSignaled, RunTest());
+}
+
+// This is a multi-threaded ACM test using iSAC. The test encodes audio
+// from a PCM file. The most recent encoded frame is used as input to the
+// receiving part. Depending on timing, it may happen that the same RTP packet
+// is inserted into the receiver multiple times, but this is a valid use-case,
+// and simplifies the test code a lot.
+class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
+ protected:
+  static const int kNumPackets = 500;
+  static const int kNumPullCalls = 500;
+
+  AcmIsacMtTestOldApi()
+      : AudioCodingModuleMtTestOldApi(), last_packet_number_(0) {}
+
+  ~AcmIsacMtTestOldApi() {}
+
+  void SetUp() override {
+    AudioCodingModuleTestOldApi::SetUp();
+    RegisterCodec();  // Must be called before the threads start below.
+
+    // Set up input audio source to read from specified file, loop after 5
+    // seconds, and deliver blocks of 10 ms.
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+    audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+
+    // Generate one packet to have something to insert.
+    int loop_counter = 0;
+    while (packet_cb_.last_payload_len_bytes() == 0) {
+      InsertAudio();
+      ASSERT_LT(loop_counter++, 10);
+    }
+    // Set |last_packet_number_| to one less that |num_calls| so that the packet
+    // will be fetched in the next InsertPacket() call.
+    last_packet_number_ = packet_cb_.num_calls() - 1;
+
+    StartThreads();
+  }
+
+  void RegisterCodec() override {
+    static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
+    audio_format_ = SdpAudioFormat("isac", kSampleRateHz, 1);
+    AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
+    codec_.pltype = kPayloadType;
+
+    // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+    // registered in AudioCodingModuleTestOldApi::SetUp();
+    EXPECT_EQ(true, acm_->RegisterReceiveCodec(kPayloadType, *audio_format_));
+    EXPECT_EQ(0, acm_->RegisterSendCodec(codec_));
+  }
+
+  void InsertPacket() override {
+    int num_calls = packet_cb_.num_calls();  // Store locally for thread safety.
+    if (num_calls > last_packet_number_) {
+      // Get the new payload out from the callback handler.
+      // Note that since we swap buffers here instead of directly inserting
+      // a pointer to the data in |packet_cb_|, we avoid locking the callback
+      // for the duration of the IncomingPacket() call.
+      packet_cb_.SwapBuffers(&last_payload_vec_);
+      ASSERT_GT(last_payload_vec_.size(), 0u);
+      rtp_utility_->Forward(&rtp_header_);
+      last_packet_number_ = num_calls;
+    }
+    ASSERT_GT(last_payload_vec_.size(), 0u);
+    ASSERT_EQ(
+        0,
+        acm_->IncomingPacket(
+            &last_payload_vec_[0], last_payload_vec_.size(), rtp_header_));
+  }
+
+  void InsertAudio() override {
+    // TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
+    // this call confuses the number of samples with the number of bytes, and
+    // ends up copying only half of what it should.
+    memcpy(input_frame_.mutable_data(), audio_loop_.GetNextBlock().data(),
+           kNumSamples10ms);
+    AudioCodingModuleTestOldApi::InsertAudio();
+  }
+
+  // Override the verification function with no-op, since iSAC produces variable
+  // payload sizes.
+  void VerifyEncoding() override {}
+
+  // This method is the same as AudioCodingModuleMtTestOldApi::TestDone(), but
+  // here it is using the constants defined in this class (i.e., shorter test
+  // run).
+  bool TestDone() override {
+    if (packet_cb_.num_calls() > kNumPackets) {
+      rtc::CritScope lock(&crit_sect_);
+      if (pull_audio_count_ > kNumPullCalls) {
+        // Both conditions for completion are met. End the test.
+        return true;
+      }
+    }
+    return false;
+  }
+
+  int last_packet_number_;
+  std::vector<uint8_t> last_payload_vec_;
+  test::AudioLoop audio_loop_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmIsacMtTestOldApi, MAYBE_DoTest) {
+  EXPECT_EQ(kEventSignaled, RunTest());
+}
+#endif
+
+class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+  static const int kRegisterAfterNumPackets = 5;
+  static const int kNumPackets = 10;
+  static const int kPacketSizeMs = 30;
+  static const int kPacketSizeSamples = kPacketSizeMs * 16;
+
+  AcmReRegisterIsacMtTestOldApi()
+      : AudioCodingModuleTestOldApi(),
+        receive_thread_(CbReceiveThread, this, "receive"),
+        codec_registration_thread_(CbCodecRegistrationThread,
+                                   this,
+                                   "codec_registration"),
+        test_complete_(EventWrapper::Create()),
+        codec_registered_(false),
+        receive_packet_count_(0),
+        next_insert_packet_time_ms_(0),
+        fake_clock_(new SimulatedClock(0)) {
+    AudioEncoderIsacFloatImpl::Config config;
+    config.payload_type = kPayloadType;
+    isac_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
+    clock_ = fake_clock_.get();
+  }
+
+  void SetUp() override {
+    AudioCodingModuleTestOldApi::SetUp();
+    // Set up input audio source to read from specified file, loop after 5
+    // seconds, and deliver blocks of 10 ms.
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+    audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+    RegisterCodec();  // Must be called before the threads start below.
+    StartThreads();
+  }
+
+  void RegisterCodec() override {
+    static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
+    AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
+    codec_.pltype = kPayloadType;
+
+    // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+    // registered in AudioCodingModuleTestOldApi::SetUp();
+    // Only register the decoder for now. The encoder is registered later.
+    ASSERT_EQ(true, acm_->RegisterReceiveCodec(codec_.pltype,
+                                               CodecInstToSdp(codec_)));
+  }
+
+  void StartThreads() {
+    receive_thread_.Start();
+    receive_thread_.SetPriority(rtc::kRealtimePriority);
+    codec_registration_thread_.Start();
+    codec_registration_thread_.SetPriority(rtc::kRealtimePriority);
+  }
+
+  void TearDown() override {
+    AudioCodingModuleTestOldApi::TearDown();
+    receive_thread_.Stop();
+    codec_registration_thread_.Stop();
+  }
+
+  EventTypeWrapper RunTest() {
+    return test_complete_->Wait(10 * 60 * 1000);  // 10 minutes' timeout.
+  }
+
+  static bool CbReceiveThread(void* context) {
+    return reinterpret_cast<AcmReRegisterIsacMtTestOldApi*>(context)
+        ->CbReceiveImpl();
+  }
+
+  bool CbReceiveImpl() {
+    SleepMs(1);
+    rtc::Buffer encoded;
+    AudioEncoder::EncodedInfo info;
+    {
+      rtc::CritScope lock(&crit_sect_);
+      if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+        return true;
+      }
+      next_insert_packet_time_ms_ += kPacketSizeMs;
+      ++receive_packet_count_;
+
+      // Encode new frame.
+      uint32_t input_timestamp = rtp_header_.header.timestamp;
+      while (info.encoded_bytes == 0) {
+        info =
+            isac_encoder_->Encode(input_timestamp, audio_loop_.GetNextBlock(),
+                                  &encoded);
+        input_timestamp += 160;  // 10 ms at 16 kHz.
+      }
+      EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,
+                input_timestamp);
+      EXPECT_EQ(rtp_header_.header.timestamp, info.encoded_timestamp);
+      EXPECT_EQ(rtp_header_.header.payloadType, info.payload_type);
+    }
+    // Now we're not holding the crit sect when calling ACM.
+
+    // Insert into ACM.
+    EXPECT_EQ(0, acm_->IncomingPacket(encoded.data(), info.encoded_bytes,
+                                      rtp_header_));
+
+    // Pull audio.
+    for (int i = 0; i < rtc::CheckedDivExact(kPacketSizeMs, 10); ++i) {
+      AudioFrame audio_frame;
+      bool muted;
+      EXPECT_EQ(0, acm_->PlayoutData10Ms(-1 /* default output frequency */,
+                                         &audio_frame, &muted));
+      if (muted) {
+        ADD_FAILURE();
+        return false;
+      }
+      fake_clock_->AdvanceTimeMilliseconds(10);
+    }
+    rtp_utility_->Forward(&rtp_header_);
+    return true;
+  }
+
+  static bool CbCodecRegistrationThread(void* context) {
+    return reinterpret_cast<AcmReRegisterIsacMtTestOldApi*>(context)
+        ->CbCodecRegistrationImpl();
+  }
+
+  bool CbCodecRegistrationImpl() {
+    SleepMs(1);
+    if (HasFatalFailure()) {
+      // End the test early if a fatal failure (ASSERT_*) has occurred.
+      test_complete_->Set();
+    }
+    rtc::CritScope lock(&crit_sect_);
+    if (!codec_registered_ &&
+        receive_packet_count_ > kRegisterAfterNumPackets) {
+      // Register the iSAC encoder.
+      EXPECT_EQ(0, acm_->RegisterSendCodec(codec_));
+      codec_registered_ = true;
+    }
+    if (codec_registered_ && receive_packet_count_ > kNumPackets) {
+      test_complete_->Set();
+    }
+    return true;
+  }
+
+  rtc::PlatformThread receive_thread_;
+  rtc::PlatformThread codec_registration_thread_;
+  const std::unique_ptr<EventWrapper> test_complete_;
+  rtc::CriticalSection crit_sect_;
+  bool codec_registered_ RTC_GUARDED_BY(crit_sect_);
+  int receive_packet_count_ RTC_GUARDED_BY(crit_sect_);
+  int64_t next_insert_packet_time_ms_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<AudioEncoderIsacFloatImpl> isac_encoder_;
+  std::unique_ptr<SimulatedClock> fake_clock_;
+  test::AudioLoop audio_loop_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmReRegisterIsacMtTestOldApi, MAYBE_DoTest) {
+  EXPECT_EQ(kEventSignaled, RunTest());
+}
+#endif
+
+// Disabling all of these tests on iOS until file support has been added.
+// See https://code.google.com/p/webrtc/issues/detail?id=4752 for details.
+#if !defined(WEBRTC_IOS)
+
+class AcmReceiverBitExactnessOldApi : public ::testing::Test {
+ public:
+  static std::string PlatformChecksum(std::string others,
+                                      std::string win64,
+                                      std::string android_arm32,
+                                      std::string android_arm64,
+                                      std::string android_arm64_clang) {
+#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
+    return win64;
+#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM)
+    return android_arm32;
+#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#if defined(__clang__)
+    // Android ARM64 with Clang compiler
+    return android_arm64_clang;
+#else
+    // Android ARM64 with non-Clang compiler
+    return android_arm64;
+#endif  // __clang__
+#else
+    return others;
+#endif
+  }
+
+ protected:
+  struct ExternalDecoder {
+    int rtp_payload_type;
+    AudioDecoder* external_decoder;
+    int sample_rate_hz;
+    int num_channels;
+    std::string name;
+  };
+
+  void Run(int output_freq_hz, const std::string& checksum_ref) {
+    Run(output_freq_hz, checksum_ref, CreateBuiltinAudioDecoderFactory(),
+        [](AudioCodingModule*) {});
+  }
+
+  void Run(int output_freq_hz,
+           const std::string& checksum_ref,
+           rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+           rtc::FunctionView<void(AudioCodingModule*)> decoder_reg) {
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
+    std::unique_ptr<test::RtpFileSource> packet_source(
+        test::RtpFileSource::Create(input_file_name));
+#ifdef WEBRTC_ANDROID
+    // Filter out iLBC and iSAC-swb since they are not supported on Android.
+    packet_source->FilterOutPayloadType(102);  // iLBC.
+    packet_source->FilterOutPayloadType(104);  // iSAC-swb.
+#endif
+
+    test::AudioChecksum checksum;
+    const std::string output_file_name =
+        webrtc::test::OutputPath() +
+        ::testing::UnitTest::GetInstance()
+            ->current_test_info()
+            ->test_case_name() +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+        "_output.wav";
+    test::OutputWavFile output_file(output_file_name, output_freq_hz);
+    test::AudioSinkFork output(&checksum, &output_file);
+
+    test::AcmReceiveTestOldApi test(
+        packet_source.get(), &output, output_freq_hz,
+        test::AcmReceiveTestOldApi::kArbitraryChannels,
+        std::move(decoder_factory));
+    ASSERT_NO_FATAL_FAILURE(test.RegisterNetEqTestCodecs());
+    decoder_reg(test.get_acm());
+    test.Run();
+
+    std::string checksum_string = checksum.Finish();
+    EXPECT_EQ(checksum_ref, checksum_string);
+
+    // Delete the output file.
+    remove(output_file_name.c_str());
+  }
+};
+
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
+    defined(WEBRTC_CODEC_ILBC)
+TEST_F(AcmReceiverBitExactnessOldApi, 8kHzOutput) {
+  Run(8000, PlatformChecksum("2adede965c6f87de7142c51552111d08",
+                             "028c0fc414b1c9ab7e582dccdf381e98",
+                             "36c95170c1393d4b765d1c17b61ef977",
+                             "4598140b5e4f7ee66c5adad609e65a3e",
+                             "bac5db6dff44323be401060f1279a532"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 16kHzOutput) {
+  Run(16000, PlatformChecksum("c2550a3db7632de409e8db0093df1c12",
+                              "edd31f4b6665cd5b9041fb93f2316594",
+                              "22128bca51650cb61c80bed63b595603",
+                              "f2aad418af974a3b1694d5ae5cc2c3c7",
+                              "61c3cb9386b9503feebcb829c9be54bd"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 32kHzOutput) {
+  Run(32000, PlatformChecksum("85e28d7950132d56f90b099c90f82153",
+                              "7b903f5c89997f271b405e63c245ef45",
+                              "8b8fc6c6fd1dcdcfb3dd90e1ce597f10",
+                              "100869c8dcde51346c2073e52a272d98",
+                              "fdec5301dc649a47d407382b587e14da"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutput) {
+  Run(48000, PlatformChecksum("ab611510e8fd6d5210a23cc04d3f0e8e",
+                              "d8609bc9b495d81f29779344c68bcc47",
+                              "ec5ebb90cda0ea5bb89e79d698af65de",
+                              "bd44bf97e7899186532f91235cef444d",
+                              "0baae2972cca142027d4af44f95f0bd5"));
+}
+
+TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) {
+  class ADFactory : public AudioDecoderFactory {
+   public:
+    ADFactory()
+        : mock_decoder_(new MockAudioDecoder()),
+          pcmu_decoder_(1),
+          decode_forwarder_(&pcmu_decoder_),
+          fact_(CreateBuiltinAudioDecoderFactory()) {
+      // Set expectations on the mock decoder and also delegate the calls to
+      // the real decoder.
+      EXPECT_CALL(*mock_decoder_, IncomingPacket(_, _, _, _, _))
+          .Times(AtLeast(1))
+          .WillRepeatedly(
+              Invoke(&pcmu_decoder_, &AudioDecoderPcmU::IncomingPacket));
+      EXPECT_CALL(*mock_decoder_, SampleRateHz())
+          .Times(AtLeast(1))
+          .WillRepeatedly(
+              Invoke(&pcmu_decoder_, &AudioDecoderPcmU::SampleRateHz));
+      EXPECT_CALL(*mock_decoder_, Channels())
+          .Times(AtLeast(1))
+          .WillRepeatedly(Invoke(&pcmu_decoder_, &AudioDecoderPcmU::Channels));
+      EXPECT_CALL(*mock_decoder_, DecodeInternal(_, _, _, _, _))
+          .Times(AtLeast(1))
+          .WillRepeatedly(Invoke(&decode_forwarder_, &DecodeForwarder::Decode));
+      EXPECT_CALL(*mock_decoder_, HasDecodePlc())
+          .Times(AtLeast(1))
+          .WillRepeatedly(
+              Invoke(&pcmu_decoder_, &AudioDecoderPcmU::HasDecodePlc));
+      EXPECT_CALL(*mock_decoder_, PacketDuration(_, _))
+          .Times(AtLeast(1))
+          .WillRepeatedly(
+              Invoke(&pcmu_decoder_, &AudioDecoderPcmU::PacketDuration));
+      EXPECT_CALL(*mock_decoder_, Die());
+    }
+    std::vector<AudioCodecSpec> GetSupportedDecoders() override {
+      return fact_->GetSupportedDecoders();
+    }
+    bool IsSupportedDecoder(const SdpAudioFormat& format) override {
+      return format.name == "MockPCMu" ? true
+                                       : fact_->IsSupportedDecoder(format);
+    }
+    std::unique_ptr<AudioDecoder> MakeAudioDecoder(
+        const SdpAudioFormat& format,
+        rtc::Optional<AudioCodecPairId> codec_pair_id) override {
+      return format.name == "MockPCMu"
+                 ? std::move(mock_decoder_)
+                 : fact_->MakeAudioDecoder(format, codec_pair_id);
+    }
+
+   private:
+    // Class intended to forward a call from a mock DecodeInternal to Decode on
+    // the real decoder's Decode. DecodeInternal for the real decoder isn't
+    // public.
+    class DecodeForwarder {
+     public:
+      explicit DecodeForwarder(AudioDecoder* decoder) : decoder_(decoder) {}
+      int Decode(const uint8_t* encoded,
+                 size_t encoded_len,
+                 int sample_rate_hz,
+                 int16_t* decoded,
+                 AudioDecoder::SpeechType* speech_type) {
+        return decoder_->Decode(encoded, encoded_len, sample_rate_hz,
+                                decoder_->PacketDuration(encoded, encoded_len) *
+                                    decoder_->Channels() * sizeof(int16_t),
+                                decoded, speech_type);
+      }
+
+     private:
+      AudioDecoder* const decoder_;
+    };
+
+    std::unique_ptr<MockAudioDecoder> mock_decoder_;
+    AudioDecoderPcmU pcmu_decoder_;
+    DecodeForwarder decode_forwarder_;
+    rtc::scoped_refptr<AudioDecoderFactory> fact_;  // Fallback factory.
+  };
+
+  rtc::scoped_refptr<rtc::RefCountedObject<ADFactory>> factory(
+      new rtc::RefCountedObject<ADFactory>);
+  Run(48000, PlatformChecksum("ab611510e8fd6d5210a23cc04d3f0e8e",
+                              "d8609bc9b495d81f29779344c68bcc47",
+                              "ec5ebb90cda0ea5bb89e79d698af65de",
+                              "bd44bf97e7899186532f91235cef444d",
+                              "0baae2972cca142027d4af44f95f0bd5"),
+      factory, [](AudioCodingModule* acm) {
+        acm->RegisterReceiveCodec(0, {"MockPCMu", 8000, 1});
+      });
+}
+#endif
+
+// This test verifies bit exactness for the send-side of ACM. The test setup is
+// a chain of three different test classes:
+//
+// test::AcmSendTest -> AcmSenderBitExactness -> test::AcmReceiveTest
+//
+// The receiver side is driving the test by requesting new packets from
+// AcmSenderBitExactness::NextPacket(). This method, in turn, asks for the
+// packet from test::AcmSendTest::NextPacket, which inserts audio from the
+// input file until one packet is produced. (The input file loops indefinitely.)
+// Before passing the packet to the receiver, this test class verifies the
+// packet header and updates a payload checksum with the new payload. The
+// decoded output from the receiver is also verified with a (separate) checksum.
+class AcmSenderBitExactnessOldApi : public ::testing::Test,
+                                    public test::PacketSource {
+ protected:
+  static const int kTestDurationMs = 1000;
+
+  AcmSenderBitExactnessOldApi()
+      : frame_size_rtp_timestamps_(0),
+        packet_count_(0),
+        payload_type_(0),
+        last_sequence_number_(0),
+        last_timestamp_(0),
+        payload_checksum_(rtc::MessageDigestFactory::Create(rtc::DIGEST_MD5)) {}
+
+  // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+  // false.
+  bool SetUpSender() {
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+    // Note that |audio_source_| will loop forever. The test duration is set
+    // explicitly by |kTestDurationMs|.
+    audio_source_.reset(new test::InputAudioFile(input_file_name));
+    static const int kSourceRateHz = 32000;
+    send_test_.reset(new test::AcmSendTestOldApi(
+        audio_source_.get(), kSourceRateHz, kTestDurationMs));
+    return send_test_.get() != NULL;
+  }
+
+  // Registers a send codec in the test::AcmSendTest object. Returns true on
+  // success, false on failure.
+  bool RegisterSendCodec(const char* payload_name,
+                         int sampling_freq_hz,
+                         int channels,
+                         int payload_type,
+                         int frame_size_samples,
+                         int frame_size_rtp_timestamps) {
+    payload_type_ = payload_type;
+    frame_size_rtp_timestamps_ = frame_size_rtp_timestamps;
+    return send_test_->RegisterCodec(payload_name,
+                                     sampling_freq_hz,
+                                     channels,
+                                     payload_type,
+                                     frame_size_samples);
+  }
+
+  bool RegisterExternalSendCodec(AudioEncoder* external_speech_encoder,
+                                 int payload_type) {
+    payload_type_ = payload_type;
+    frame_size_rtp_timestamps_ = rtc::checked_cast<uint32_t>(
+        external_speech_encoder->Num10MsFramesInNextPacket() *
+        external_speech_encoder->RtpTimestampRateHz() / 100);
+    return send_test_->RegisterExternalCodec(external_speech_encoder);
+  }
+
+  // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+  // before calling this method.
+  void Run(const std::string& audio_checksum_ref,
+           const std::string& payload_checksum_ref,
+           int expected_packets,
+           test::AcmReceiveTestOldApi::NumOutputChannels expected_channels) {
+    // Set up the receiver used to decode the packets and verify the decoded
+    // output.
+    test::AudioChecksum audio_checksum;
+    const std::string output_file_name =
+        webrtc::test::OutputPath() +
+        ::testing::UnitTest::GetInstance()
+            ->current_test_info()
+            ->test_case_name() +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+        "_output.wav";
+    const int kOutputFreqHz = 8000;
+    test::OutputWavFile output_file(output_file_name, kOutputFreqHz);
+    // Have the output audio sent both to file and to the checksum calculator.
+    test::AudioSinkFork output(&audio_checksum, &output_file);
+    test::AcmReceiveTestOldApi receive_test(this, &output, kOutputFreqHz,
+                                            expected_channels,
+                                            CreateBuiltinAudioDecoderFactory());
+    ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+
+    // This is where the actual test is executed.
+    receive_test.Run();
+
+    // Extract and verify the audio checksum.
+    std::string checksum_string = audio_checksum.Finish();
+    EXPECT_EQ(audio_checksum_ref, checksum_string);
+
+    // Extract and verify the payload checksum.
+    rtc::Buffer checksum_result(payload_checksum_->Size());
+    payload_checksum_->Finish(checksum_result.data(), checksum_result.size());
+    checksum_string =
+        rtc::hex_encode(checksum_result.data<char>(), checksum_result.size());
+    EXPECT_EQ(payload_checksum_ref, checksum_string);
+
+    // Verify number of packets produced.
+    EXPECT_EQ(expected_packets, packet_count_);
+
+    // Delete the output file.
+    remove(output_file_name.c_str());
+  }
+
+  // Inherited from test::PacketSource.
+  std::unique_ptr<test::Packet> NextPacket() override {
+    auto packet = send_test_->NextPacket();
+    if (!packet)
+      return NULL;
+
+    VerifyPacket(packet.get());
+    // TODO(henrik.lundin) Save the packet to file as well.
+
+    // Pass it on to the caller. The caller becomes the owner of |packet|.
+    return packet;
+  }
+
+  // Verifies the packet.
+  void VerifyPacket(const test::Packet* packet) {
+    EXPECT_TRUE(packet->valid_header());
+    // (We can check the header fields even if valid_header() is false.)
+    EXPECT_EQ(payload_type_, packet->header().payloadType);
+    if (packet_count_ > 0) {
+      // This is not the first packet.
+      uint16_t sequence_number_diff =
+          packet->header().sequenceNumber - last_sequence_number_;
+      EXPECT_EQ(1, sequence_number_diff);
+      uint32_t timestamp_diff = packet->header().timestamp - last_timestamp_;
+      EXPECT_EQ(frame_size_rtp_timestamps_, timestamp_diff);
+    }
+    ++packet_count_;
+    last_sequence_number_ = packet->header().sequenceNumber;
+    last_timestamp_ = packet->header().timestamp;
+    // Update the checksum.
+    payload_checksum_->Update(packet->payload(),
+                              packet->payload_length_bytes());
+  }
+
+  void SetUpTest(const char* codec_name,
+                 int codec_sample_rate_hz,
+                 int channels,
+                 int payload_type,
+                 int codec_frame_size_samples,
+                 int codec_frame_size_rtp_timestamps) {
+    ASSERT_TRUE(SetUpSender());
+    ASSERT_TRUE(RegisterSendCodec(codec_name,
+                                  codec_sample_rate_hz,
+                                  channels,
+                                  payload_type,
+                                  codec_frame_size_samples,
+                                  codec_frame_size_rtp_timestamps));
+  }
+
+  void SetUpTestExternalEncoder(AudioEncoder* external_speech_encoder,
+                                int payload_type) {
+    ASSERT_TRUE(SetUpSender());
+    ASSERT_TRUE(
+        RegisterExternalSendCodec(external_speech_encoder, payload_type));
+  }
+
+  std::unique_ptr<test::AcmSendTestOldApi> send_test_;
+  std::unique_ptr<test::InputAudioFile> audio_source_;
+  uint32_t frame_size_rtp_timestamps_;
+  int packet_count_;
+  uint8_t payload_type_;
+  uint16_t last_sequence_number_;
+  uint32_t last_timestamp_;
+  std::unique_ptr<rtc::MessageDigest> payload_checksum_;
+};
+
+class AcmSenderBitExactnessNewApi : public AcmSenderBitExactnessOldApi {};
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "2c9cb15d4ed55b5a0cadd04883bc73b0",
+          "9336a9b993cbd8a751f0e8958e66c89c",
+          "bd4682225f7c4ad5f2049f6769713ac2",
+          "343f1f42be0607c61e6516aece424609",
+          "2c9cb15d4ed55b5a0cadd04883bc73b0"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "3c79f16f34218271f3dca4e2b1dfe1bb",
+          "d42cb5195463da26c8129bbfe73a22e6",
+          "83de248aea9c3c2bd680b6952401b4ca",
+          "3c79f16f34218271f3dca4e2b1dfe1bb",
+          "3c79f16f34218271f3dca4e2b1dfe1bb"),
+      33, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "1ad29139a04782a33daad8c2b9b35875",
+          "14d63c5f08127d280e722e3191b73bdd",
+          "edcf26694c289e3d9691faf79b74f09f",
+          "ef75e900e6f375e3061163c53fd09a63",
+          "1ad29139a04782a33daad8c2b9b35875"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "9e0a0ab743ad987b55b8e14802769c56",
+          "ebe04a819d3a9d83a83a17f271e1139a",
+          "97aeef98553b5a4b5a68f8b716e8eaf0",
+          "9e0a0ab743ad987b55b8e14802769c56",
+          "9e0a0ab743ad987b55b8e14802769c56"),
+      16, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_IsacSwb30ms DISABLED_IsacSwb30ms
+#else
+#define MAYBE_IsacSwb30ms IsacSwb30ms
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacSwb30ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "5683b58da0fbf2063c7adc2e6bfb3fb8",
+          "2b3c387d06f00b7b7aad4c9be56fb83d", "android_arm32_audio",
+          "android_arm64_audio", "android_arm64_clang_audio"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "ce86106a93419aefb063097108ec94ab",
+          "bcc2041e7744c7ebd9f701866856849c", "android_arm32_payload",
+          "android_arm64_payload", "android_arm64_clang_payload"),
+      33, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run("de4a98e1406f8b798d99cd0704e862e2",
+      "c1edd36339ce0326cc4550041ad719a0",
+      100,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_16000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 1, 108, 160, 160));
+  Run("ae646d7b68384a1269cc080dd4501916",
+      "ad786526383178b08d80d6eee06e9bad",
+      100,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_32000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 1, 109, 320, 320));
+  Run("7fe325e8fbaf755e3c5df0b11a4774fb",
+      "5ef82ea885e922263606c6fdbc49f651",
+      100,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_8000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 2, 111, 80, 80));
+  Run("fb263b74e7ac3de915474d77e4744ceb",
+      "62ce5adb0d4965d0a52ec98ae7f98974",
+      100,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_16000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 2, 112, 160, 160));
+  Run("d09e9239553649d7ac93e19d304281fd",
+      "41ca8edac4b8c71cd54fd9f25ec14870",
+      100,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_32000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 2, 113, 320, 320));
+  Run("5f025d4f390982cc26b3d92fe02e3044",
+      "50e58502fb04421bf5b857dda4c96879",
+      100,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 1, 0, 160, 160));
+  Run("81a9d4c0bb72e9becc43aef124c981e9",
+      "8f9b8750bd80fe26b6cbf6659b89f0f9",
+      50,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 1, 8, 160, 160));
+  Run("39611f798969053925a49dc06d08de29",
+      "6ad745e55aa48981bfc790d0eeef2dd1",
+      50,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 2, 110, 160, 160));
+  Run("437bec032fdc5cbaa0d5175430af7b18",
+      "60b6f25e8d1e74cb679cfe756dd9bca5",
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 2, 118, 160, 160));
+  Run("a5c6d83c5b7cedbeff734238220a4b0c",
+      "92b282c83efd20e7eeef52ba40842cf7",
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_Ilbc_30ms DISABLED_Ilbc_30ms
+#else
+#define MAYBE_Ilbc_30ms Ilbc_30ms
+#endif
+#if defined(WEBRTC_CODEC_ILBC)
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Ilbc_30ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "7b6ec10910debd9af08011d3ed5249f7",
+          "7b6ec10910debd9af08011d3ed5249f7", "android_arm32_audio",
+          "android_arm64_audio", "android_arm64_clang_audio"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "cfae2e9f6aba96e145f2bcdd5050ce78",
+          "cfae2e9f6aba96e145f2bcdd5050ce78", "android_arm32_payload",
+          "android_arm64_payload", "android_arm64_clang_payload"),
+      33, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+#endif
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_G722_20ms DISABLED_G722_20ms
+#else
+#define MAYBE_G722_20ms G722_20ms
+#endif
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "e99c89be49a46325d03c0d990c292d68",
+          "e99c89be49a46325d03c0d990c292d68", "android_arm32_audio",
+          "android_arm64_audio", "android_arm64_clang_audio"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "fc68a87e1380614e658087cb35d5ca10",
+          "fc68a87e1380614e658087cb35d5ca10", "android_arm32_payload",
+          "android_arm64_payload", "android_arm64_clang_payload"),
+      50, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_G722_stereo_20ms DISABLED_G722_stereo_20ms
+#else
+#define MAYBE_G722_stereo_20ms G722_stereo_20ms
+#endif
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "e280aed283e499d37091b481ca094807",
+          "e280aed283e499d37091b481ca094807", "android_arm32_audio",
+          "android_arm64_audio", "android_arm64_clang_audio"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "66516152eeaa1e650ad94ff85f668dac",
+          "66516152eeaa1e650ad94ff85f668dac", "android_arm32_payload",
+          "android_arm64_payload", "android_arm64_clang_payload"),
+      50, test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "3e285b74510e62062fbd8142dacd16e9",
+          "3e285b74510e62062fbd8142dacd16e9",
+          "439e97ad1932c49923b5da029c17dd5e",
+          "038ec90f5f3fc2320f3090f8ecef6bb7",
+          "038ec90f5f3fc2320f3090f8ecef6bb7"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "78cf8f03157358acdc69f6835caa0d9b",
+          "78cf8f03157358acdc69f6835caa0d9b",
+          "ab88b1a049c36bdfeb7e8b057ef6982a",
+          "27fef7b799393347ec3b5694369a1c36",
+          "27fef7b799393347ec3b5694369a1c36"),
+      50, test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessNewApi, MAYBE_OpusFromFormat_stereo_20ms) {
+  const auto config = AudioEncoderOpus::SdpToConfig(
+      SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}}));
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(*config, 120);
+  ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(encoder.get(), 120));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "3e285b74510e62062fbd8142dacd16e9",
+          "3e285b74510e62062fbd8142dacd16e9",
+          "439e97ad1932c49923b5da029c17dd5e",
+          "038ec90f5f3fc2320f3090f8ecef6bb7",
+          "038ec90f5f3fc2320f3090f8ecef6bb7"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "78cf8f03157358acdc69f6835caa0d9b",
+          "78cf8f03157358acdc69f6835caa0d9b",
+          "ab88b1a049c36bdfeb7e8b057ef6982a",
+          "27fef7b799393347ec3b5694369a1c36",
+          "27fef7b799393347ec3b5694369a1c36"),
+      50, test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms_voip) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+  // If not set, default will be kAudio in case of stereo.
+  EXPECT_EQ(0, send_test_->acm()->SetOpusApplication(kVoip));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "b0325df4e8104f04e03af23c0b75800e",
+          "b0325df4e8104f04e03af23c0b75800e",
+          "1c81121f5d9286a5a865d01dbab22ce8",
+          "11d547f89142e9ef03f37d7ca7f32379",
+          "11d547f89142e9ef03f37d7ca7f32379"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "4eab2259b6fe24c22dd242a113e0b3d9",
+          "4eab2259b6fe24c22dd242a113e0b3d9",
+          "839ea60399447268ee0f0262a50b75fd",
+          "1815fd5589cad0c6f6cf946c76b81aeb",
+          "1815fd5589cad0c6f6cf946c76b81aeb"),
+      50, test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms_voip) {
+  const auto config = AudioEncoderOpus::SdpToConfig(
+      SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}}));
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(*config, 120);
+  ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(encoder.get(), 120));
+  // If not set, default will be kAudio in case of stereo.
+  EXPECT_EQ(0, send_test_->acm()->SetOpusApplication(kVoip));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "b0325df4e8104f04e03af23c0b75800e",
+          "b0325df4e8104f04e03af23c0b75800e",
+          "1c81121f5d9286a5a865d01dbab22ce8",
+          "11d547f89142e9ef03f37d7ca7f32379",
+          "11d547f89142e9ef03f37d7ca7f32379"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "4eab2259b6fe24c22dd242a113e0b3d9",
+          "4eab2259b6fe24c22dd242a113e0b3d9",
+          "839ea60399447268ee0f0262a50b75fd",
+          "1815fd5589cad0c6f6cf946c76b81aeb",
+          "1815fd5589cad0c6f6cf946c76b81aeb"),
+      50, test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+// This test is for verifying the SetBitRate function. The bitrate is changed at
+// the beginning, and the number of generated bytes are checked.
+class AcmSetBitRateTest : public ::testing::Test {
+ protected:
+  static const int kTestDurationMs = 1000;
+
+  // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+  // false.
+  bool SetUpSender() {
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+    // Note that |audio_source_| will loop forever. The test duration is set
+    // explicitly by |kTestDurationMs|.
+    audio_source_.reset(new test::InputAudioFile(input_file_name));
+    static const int kSourceRateHz = 32000;
+    send_test_.reset(new test::AcmSendTestOldApi(
+        audio_source_.get(), kSourceRateHz, kTestDurationMs));
+    return send_test_.get();
+  }
+
+  // Registers a send codec in the test::AcmSendTest object. Returns true on
+  // success, false on failure.
+  virtual bool RegisterSendCodec(const char* payload_name,
+                                 int sampling_freq_hz,
+                                 int channels,
+                                 int payload_type,
+                                 int frame_size_samples,
+                                 int frame_size_rtp_timestamps) {
+    return send_test_->RegisterCodec(payload_name, sampling_freq_hz, channels,
+                                     payload_type, frame_size_samples);
+  }
+
+  bool RegisterExternalSendCodec(AudioEncoder* external_speech_encoder,
+                                 int payload_type) {
+    return send_test_->RegisterExternalCodec(external_speech_encoder);
+  }
+
+  void RunInner(int expected_total_bits) {
+    int nr_bytes = 0;
+    while (std::unique_ptr<test::Packet> next_packet =
+               send_test_->NextPacket()) {
+      nr_bytes += rtc::checked_cast<int>(next_packet->payload_length_bytes());
+    }
+    EXPECT_EQ(expected_total_bits, nr_bytes * 8);
+  }
+
+  void SetUpTest(const char* codec_name,
+                 int codec_sample_rate_hz,
+                 int channels,
+                 int payload_type,
+                 int codec_frame_size_samples,
+                 int codec_frame_size_rtp_timestamps) {
+    ASSERT_TRUE(SetUpSender());
+    ASSERT_TRUE(RegisterSendCodec(codec_name, codec_sample_rate_hz, channels,
+                                  payload_type, codec_frame_size_samples,
+                                  codec_frame_size_rtp_timestamps));
+  }
+
+  std::unique_ptr<test::AcmSendTestOldApi> send_test_;
+  std::unique_ptr<test::InputAudioFile> audio_source_;
+};
+
+class AcmSetBitRateOldApi : public AcmSetBitRateTest {
+ protected:
+  // Runs the test. SetUpSender() must have been called and a codec must be set
+  // up before calling this method.
+  void Run(int target_bitrate_bps, int expected_total_bits) {
+    ASSERT_TRUE(send_test_->acm());
+    send_test_->acm()->SetBitRate(target_bitrate_bps);
+    RunInner(expected_total_bits);
+  }
+};
+
+class AcmSetBitRateNewApi : public AcmSetBitRateTest {
+ protected:
+  // Runs the test. SetUpSender() must have been called and a codec must be set
+  // up before calling this method.
+  void Run(int expected_total_bits) { RunInner(expected_total_bits); }
+};
+
+TEST_F(AcmSetBitRateOldApi, Opus_48khz_20ms_10kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(10000, 8640);
+#else
+  Run(10000, 8680);
+#endif  // WEBRTC_ANDROID
+}
+
+TEST_F(AcmSetBitRateNewApi, OpusFromFormat_48khz_20ms_10kbps) {
+  const auto config = AudioEncoderOpus::SdpToConfig(
+      SdpAudioFormat("opus", 48000, 2, {{"maxaveragebitrate", "10000"}}));
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(*config, 107);
+  ASSERT_TRUE(SetUpSender());
+  ASSERT_TRUE(RegisterExternalSendCodec(encoder.get(), 107));
+#if defined(WEBRTC_ANDROID)
+  RunInner(8640);
+#else
+  RunInner(8680);
+#endif  // WEBRTC_ANDROID
+}
+
+TEST_F(AcmSetBitRateOldApi, Opus_48khz_20ms_50kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(50000, 45792);
+#else
+  Run(50000, 45520);
+#endif  // WEBRTC_ANDROID
+}
+
+TEST_F(AcmSetBitRateNewApi, OpusFromFormat_48khz_20ms_50kbps) {
+  const auto config = AudioEncoderOpus::SdpToConfig(
+      SdpAudioFormat("opus", 48000, 2, {{"maxaveragebitrate", "50000"}}));
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(*config, 107);
+  ASSERT_TRUE(SetUpSender());
+  ASSERT_TRUE(RegisterExternalSendCodec(encoder.get(), 107));
+#if defined(WEBRTC_ANDROID)
+  RunInner(45792);
+#else
+  RunInner(45520);
+#endif  // WEBRTC_ANDROID
+}
+
+// The result on the Android platforms is inconsistent for this test case.
+// On android_rel the result is different from android and android arm64 rel.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_Opus_48khz_20ms_100kbps DISABLED_Opus_48khz_20ms_100kbps
+#define MAYBE_OpusFromFormat_48khz_20ms_100kbps \
+  DISABLED_OpusFromFormat_48khz_20ms_100kbps
+#else
+#define MAYBE_Opus_48khz_20ms_100kbps Opus_48khz_20ms_100kbps
+#define MAYBE_OpusFromFormat_48khz_20ms_100kbps \
+  OpusFromFormat_48khz_20ms_100kbps
+#endif
+TEST_F(AcmSetBitRateOldApi, MAYBE_Opus_48khz_20ms_100kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+  Run(100000, 100832);
+}
+
+TEST_F(AcmSetBitRateNewApi, MAYBE_OpusFromFormat_48khz_20ms_100kbps) {
+  const auto config = AudioEncoderOpus::SdpToConfig(
+      SdpAudioFormat("opus", 48000, 2, {{"maxaveragebitrate", "100000"}}));
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(*config, 107);
+  ASSERT_TRUE(SetUpSender());
+  ASSERT_TRUE(RegisterExternalSendCodec(encoder.get(), 107));
+  RunInner(100832);
+}
+
+// These next 2 tests ensure that the SetBitRate function has no effect on PCM
+TEST_F(AcmSetBitRateOldApi, Pcm16_8khz_10ms_8kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(8000, 128000);
+}
+
+TEST_F(AcmSetBitRateOldApi, Pcm16_8khz_10ms_32kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(32000, 128000);
+}
+
+// This test is for verifying the SetBitRate function. The bitrate is changed
+// in the middle, and the number of generated bytes are before and after the
+// change are checked.
+class AcmChangeBitRateOldApi : public AcmSetBitRateOldApi {
+ protected:
+  AcmChangeBitRateOldApi() : sampling_freq_hz_(0), frame_size_samples_(0) {}
+
+  // Registers a send codec in the test::AcmSendTest object. Returns true on
+  // success, false on failure.
+  bool RegisterSendCodec(const char* payload_name,
+                         int sampling_freq_hz,
+                         int channels,
+                         int payload_type,
+                         int frame_size_samples,
+                         int frame_size_rtp_timestamps) override {
+    frame_size_samples_ = frame_size_samples;
+    sampling_freq_hz_ = sampling_freq_hz;
+    return AcmSetBitRateOldApi::RegisterSendCodec(
+        payload_name, sampling_freq_hz, channels, payload_type,
+        frame_size_samples, frame_size_rtp_timestamps);
+  }
+
+  // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+  // before calling this method.
+  void Run(int target_bitrate_bps,
+           int expected_before_switch_bits,
+           int expected_after_switch_bits) {
+    ASSERT_TRUE(send_test_->acm());
+    int nr_packets =
+        sampling_freq_hz_ * kTestDurationMs / (frame_size_samples_ * 1000);
+    int nr_bytes_before = 0, nr_bytes_after = 0;
+    int packet_counter = 0;
+    while (std::unique_ptr<test::Packet> next_packet =
+               send_test_->NextPacket()) {
+      if (packet_counter == nr_packets / 2)
+        send_test_->acm()->SetBitRate(target_bitrate_bps);
+      if (packet_counter < nr_packets / 2)
+        nr_bytes_before += rtc::checked_cast<int>(
+            next_packet->payload_length_bytes());
+      else
+        nr_bytes_after += rtc::checked_cast<int>(
+            next_packet->payload_length_bytes());
+      packet_counter++;
+    }
+    EXPECT_EQ(expected_before_switch_bits, nr_bytes_before * 8);
+    EXPECT_EQ(expected_after_switch_bits, nr_bytes_after * 8);
+  }
+
+  uint32_t sampling_freq_hz_;
+  uint32_t frame_size_samples_;
+};
+
+TEST_F(AcmChangeBitRateOldApi, Opus_48khz_20ms_10kbps_2) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(10000, 29512, 4800);
+#else
+  Run(10000, 32200, 5368);
+#endif  // WEBRTC_ANDROID
+}
+
+TEST_F(AcmChangeBitRateOldApi, Opus_48khz_20ms_50kbps_2) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(50000, 29512, 23304);
+#else
+  Run(50000, 32200, 23920);
+#endif  // WEBRTC_ANDROID
+}
+
+TEST_F(AcmChangeBitRateOldApi, Opus_48khz_20ms_100kbps_2) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  #if defined(WEBRTC_ARCH_ARM64)
+    Run(100000, 29512, 50440);
+  #else
+    Run(100000, 29512, 50496);
+  #endif  // WEBRTC_ARCH_ARM64
+#else
+  Run(100000, 32200, 50448);
+#endif  // WEBRTC_ANDROID
+}
+
+// These next 2 tests ensure that the SetBitRate function has no effect on PCM
+TEST_F(AcmChangeBitRateOldApi, Pcm16_8khz_10ms_8kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(8000, 64000, 64000);
+}
+
+TEST_F(AcmChangeBitRateOldApi, Pcm16_8khz_10ms_32kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(32000, 64000, 64000);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
+  CodecInst codec_inst;
+  codec_inst.channels = 1;
+  codec_inst.pacsize = 160;
+  codec_inst.pltype = 0;
+  AudioEncoderPcmU encoder(codec_inst);
+  MockAudioEncoder mock_encoder;
+  // Set expectations on the mock encoder and also delegate the calls to the
+  // real encoder.
+  EXPECT_CALL(mock_encoder, SampleRateHz())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SampleRateHz));
+  EXPECT_CALL(mock_encoder, NumChannels())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::NumChannels));
+  EXPECT_CALL(mock_encoder, RtpTimestampRateHz())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::RtpTimestampRateHz));
+  EXPECT_CALL(mock_encoder, Num10MsFramesInNextPacket())
+      .Times(AtLeast(1))
+      .WillRepeatedly(
+          Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
+  EXPECT_CALL(mock_encoder, GetTargetBitrate())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));
+  EXPECT_CALL(mock_encoder, EncodeImpl(_, _, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder,
+                             static_cast<
+                             AudioEncoder::EncodedInfo(AudioEncoder::*)(
+                                 uint32_t,
+                                 rtc::ArrayView<const int16_t>,
+                                 rtc::Buffer*)>(&AudioEncoderPcmU::Encode)));
+  EXPECT_CALL(mock_encoder, SetFec(_))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SetFec));
+  ASSERT_NO_FATAL_FAILURE(
+      SetUpTestExternalEncoder(&mock_encoder, codec_inst.pltype));
+  Run("81a9d4c0bb72e9becc43aef124c981e9", "8f9b8750bd80fe26b6cbf6659b89f0f9",
+      50, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+// This test fixture is implemented to run ACM and change the desired output
+// frequency during the call. The input packets are simply PCM16b-wb encoded
+// payloads with a constant value of |kSampleValue|. The test fixture itself
+// acts as PacketSource in between the receive test class and the constant-
+// payload packet source class. The output is both written to file, and analyzed
+// in this test fixture.
+class AcmSwitchingOutputFrequencyOldApi : public ::testing::Test,
+                                          public test::PacketSource,
+                                          public test::AudioSink {
+ protected:
+  static const size_t kTestNumPackets = 50;
+  static const int kEncodedSampleRateHz = 16000;
+  static const size_t kPayloadLenSamples = 30 * kEncodedSampleRateHz / 1000;
+  static const int kPayloadType = 108;  // Default payload type for PCM16b-wb.
+
+  AcmSwitchingOutputFrequencyOldApi()
+      : first_output_(true),
+        num_packets_(0),
+        packet_source_(kPayloadLenSamples,
+                       kSampleValue,
+                       kEncodedSampleRateHz,
+                       kPayloadType),
+        output_freq_2_(0),
+        has_toggled_(false) {}
+
+  void Run(int output_freq_1, int output_freq_2, int toggle_period_ms) {
+    // Set up the receiver used to decode the packets and verify the decoded
+    // output.
+    const std::string output_file_name =
+        webrtc::test::OutputPath() +
+        ::testing::UnitTest::GetInstance()
+            ->current_test_info()
+            ->test_case_name() +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+        "_output.pcm";
+    test::OutputAudioFile output_file(output_file_name);
+    // Have the output audio sent both to file and to the WriteArray method in
+    // this class.
+    test::AudioSinkFork output(this, &output_file);
+    test::AcmReceiveTestToggleOutputFreqOldApi receive_test(
+        this,
+        &output,
+        output_freq_1,
+        output_freq_2,
+        toggle_period_ms,
+        test::AcmReceiveTestOldApi::kMonoOutput);
+    ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+    output_freq_2_ = output_freq_2;
+
+    // This is where the actual test is executed.
+    receive_test.Run();
+
+    // Delete output file.
+    remove(output_file_name.c_str());
+  }
+
+  // Inherited from test::PacketSource.
+  std::unique_ptr<test::Packet> NextPacket() override {
+    // Check if it is time to terminate the test. The packet source is of type
+    // ConstantPcmPacketSource, which is infinite, so we must end the test
+    // "manually".
+    if (num_packets_++ > kTestNumPackets) {
+      EXPECT_TRUE(has_toggled_);
+      return NULL;  // Test ended.
+    }
+
+    // Get the next packet from the source.
+    return packet_source_.NextPacket();
+  }
+
+  // Inherited from test::AudioSink.
+  bool WriteArray(const int16_t* audio, size_t num_samples) override {
+    // Skip checking the first output frame, since it has a number of zeros
+    // due to how NetEq is initialized.
+    if (first_output_) {
+      first_output_ = false;
+      return true;
+    }
+    for (size_t i = 0; i < num_samples; ++i) {
+      EXPECT_EQ(kSampleValue, audio[i]);
+    }
+    if (num_samples ==
+        static_cast<size_t>(output_freq_2_ / 100))  // Size of 10 ms frame.
+      has_toggled_ = true;
+    // The return value does not say if the values match the expectation, just
+    // that the method could process the samples.
+    return true;
+  }
+
+  const int16_t kSampleValue = 1000;
+  bool first_output_;
+  size_t num_packets_;
+  test::ConstantPcmPacketSource packet_source_;
+  int output_freq_2_;
+  bool has_toggled_;
+};
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, TestWithoutToggling) {
+  Run(16000, 16000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle16KhzTo32Khz) {
+  Run(16000, 32000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle32KhzTo16Khz) {
+  Run(32000, 16000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle16KhzTo8Khz) {
+  Run(16000, 8000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle8KhzTo16Khz) {
+  Run(8000, 16000, 1000);
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/call_statistics.cc b/modules/audio_coding/acm2/call_statistics.cc
new file mode 100644
index 0000000..a506ead
--- /dev/null
+++ b/modules/audio_coding/acm2/call_statistics.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/call_statistics.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type,
+                                    bool muted) {
+  ++decoding_stat_.calls_to_neteq;
+  if (muted) {
+    ++decoding_stat_.decoded_muted_output;
+  }
+  switch (speech_type) {
+    case AudioFrame::kNormalSpeech: {
+      ++decoding_stat_.decoded_normal;
+      break;
+    }
+    case AudioFrame::kPLC: {
+      ++decoding_stat_.decoded_plc;
+      break;
+    }
+    case AudioFrame::kCNG: {
+      ++decoding_stat_.decoded_cng;
+      break;
+    }
+    case AudioFrame::kPLCCNG: {
+      ++decoding_stat_.decoded_plc_cng;
+      break;
+    }
+    case AudioFrame::kUndefined: {
+      // If the audio is decoded by NetEq, |kUndefined| is not an option.
+      RTC_NOTREACHED();
+    }
+  }
+}
+
+void CallStatistics::DecodedBySilenceGenerator() {
+  ++decoding_stat_.calls_to_silence_generator;
+}
+
+const AudioDecodingCallStats& CallStatistics::GetDecodingStatistics() const {
+  return decoding_stat_;
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/call_statistics.h b/modules/audio_coding/acm2/call_statistics.h
new file mode 100644
index 0000000..9dd052f
--- /dev/null
+++ b/modules/audio_coding/acm2/call_statistics.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+#define MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/include/module_common_types.h"
+
+//
+// This class is for book keeping of calls to ACM. It is not useful to log API
+// calls which are supposed to be called every 10ms, e.g. PlayoutData10Ms(),
+// however, it is useful to know the number of such calls in a given time
+// interval. The current implementation covers calls to PlayoutData10Ms() with
+// detailed accounting of the decoded speech type.
+//
+// Thread Safety
+// =============
+// Please note that this class in not thread safe. The class must be protected
+// if different APIs are called from different threads.
+//
+
+namespace webrtc {
+
+namespace acm2 {
+
+class CallStatistics {
+ public:
+  CallStatistics() {}
+  ~CallStatistics() {}
+
+  // Call this method to indicate that NetEq engaged in decoding. |speech_type|
+  // is the audio-type according to NetEq, and |muted| indicates if the decoded
+  // frame was produced in muted state.
+  void DecodedByNetEq(AudioFrame::SpeechType speech_type, bool muted);
+
+  // Call this method to indicate that a decoding call resulted in generating
+  // silence, i.e. call to NetEq is bypassed and the output audio is zero.
+  void DecodedBySilenceGenerator();
+
+  // Get statistics for decoding. The statistics include the number of calls to
+  // NetEq and silence generator, as well as the type of speech pulled of off
+  // NetEq, c.f. declaration of AudioDecodingCallStats for detailed description.
+  const AudioDecodingCallStats& GetDecodingStatistics() const;
+
+ private:
+  // Reset the decoding statistics.
+  void ResetDecodingStatistics();
+
+  AudioDecodingCallStats decoding_stat_;
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
diff --git a/modules/audio_coding/acm2/call_statistics_unittest.cc b/modules/audio_coding/acm2/call_statistics_unittest.cc
new file mode 100644
index 0000000..77c3863
--- /dev/null
+++ b/modules/audio_coding/acm2/call_statistics_unittest.cc
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/call_statistics.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+TEST(CallStatisticsTest, InitializedZero) {
+  CallStatistics call_stats;
+  AudioDecodingCallStats stats;
+
+  stats = call_stats.GetDecodingStatistics();
+  EXPECT_EQ(0, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(0, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(0, stats.decoded_plc);
+  EXPECT_EQ(0, stats.decoded_plc_cng);
+  EXPECT_EQ(0, stats.decoded_muted_output);
+}
+
+TEST(CallStatisticsTest, AllCalls) {
+  CallStatistics call_stats;
+  AudioDecodingCallStats stats;
+
+  call_stats.DecodedBySilenceGenerator();
+  call_stats.DecodedByNetEq(AudioFrame::kNormalSpeech, false);
+  call_stats.DecodedByNetEq(AudioFrame::kPLC, false);
+  call_stats.DecodedByNetEq(AudioFrame::kPLCCNG, true);  // Let this be muted.
+  call_stats.DecodedByNetEq(AudioFrame::kCNG, false);
+
+  stats = call_stats.GetDecodingStatistics();
+  EXPECT_EQ(4, stats.calls_to_neteq);
+  EXPECT_EQ(1, stats.calls_to_silence_generator);
+  EXPECT_EQ(1, stats.decoded_normal);
+  EXPECT_EQ(1, stats.decoded_cng);
+  EXPECT_EQ(1, stats.decoded_plc);
+  EXPECT_EQ(1, stats.decoded_plc_cng);
+  EXPECT_EQ(1, stats.decoded_muted_output);
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+
+
diff --git a/modules/audio_coding/acm2/codec_manager.cc b/modules/audio_coding/acm2/codec_manager.cc
new file mode 100644
index 0000000..a101d3d
--- /dev/null
+++ b/modules/audio_coding/acm2/codec_manager.cc
@@ -0,0 +1,247 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/codec_manager.h"
+
+#include "rtc_base/checks.h"
+//#include "rtc_base/format_macros.h"
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "rtc_base/logging.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace acm2 {
+
+namespace {
+
+// Check if the given codec is a valid to be registered as send codec.
+int IsValidSendCodec(const CodecInst& send_codec) {
+  if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
+    RTC_LOG(LS_ERROR) << "Wrong number of channels (" << send_codec.channels
+                      << "), only mono and stereo are supported)";
+    return -1;
+  }
+
+  auto maybe_codec_id = RentACodec::CodecIdByInst(send_codec);
+  if (!maybe_codec_id) {
+    RTC_LOG(LS_ERROR) << "Invalid codec setting for the send codec.";
+    return -1;
+  }
+
+  // Telephone-event cannot be a send codec.
+  if (!STR_CASE_CMP(send_codec.plname, "telephone-event")) {
+    RTC_LOG(LS_ERROR) << "telephone-event cannot be a send codec";
+    return -1;
+  }
+
+  if (!RentACodec::IsSupportedNumChannels(*maybe_codec_id, send_codec.channels)
+           .value_or(false)) {
+    RTC_LOG(LS_ERROR) << send_codec.channels
+                      << " number of channels not supported for "
+                      << send_codec.plname << ".";
+    return -1;
+  }
+  return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
+}
+
+bool IsOpus(const CodecInst& codec) {
+  return
+#ifdef WEBRTC_CODEC_OPUS
+      !STR_CASE_CMP(codec.plname, "opus") ||
+#endif
+      false;
+}
+
+}  // namespace
+
+CodecManager::CodecManager() {
+  thread_checker_.DetachFromThread();
+}
+
+CodecManager::~CodecManager() = default;
+
+bool CodecManager::RegisterEncoder(const CodecInst& send_codec) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  int codec_id = IsValidSendCodec(send_codec);
+
+  // Check for reported errors from function IsValidSendCodec().
+  if (codec_id < 0) {
+    return false;
+  }
+
+  switch (RentACodec::RegisterRedPayloadType(
+      &codec_stack_params_.red_payload_types, send_codec)) {
+    case RentACodec::RegistrationResult::kOk:
+      return true;
+    case RentACodec::RegistrationResult::kBadFreq:
+      RTC_LOG(LS_ERROR)
+          << "RegisterSendCodec() failed, invalid frequency for RED"
+             " registration";
+      return false;
+    case RentACodec::RegistrationResult::kSkip:
+      break;
+  }
+  switch (RentACodec::RegisterCngPayloadType(
+      &codec_stack_params_.cng_payload_types, send_codec)) {
+    case RentACodec::RegistrationResult::kOk:
+      return true;
+    case RentACodec::RegistrationResult::kBadFreq:
+      RTC_LOG(LS_ERROR)
+          << "RegisterSendCodec() failed, invalid frequency for CNG"
+             " registration";
+      return false;
+    case RentACodec::RegistrationResult::kSkip:
+      break;
+  }
+
+  if (IsOpus(send_codec)) {
+    // VAD/DTX not supported.
+    codec_stack_params_.use_cng = false;
+  }
+
+  send_codec_inst_ = send_codec;
+  recreate_encoder_ = true;  // Caller must recreate it.
+  return true;
+}
+
+CodecInst CodecManager::ForgeCodecInst(
+    const AudioEncoder* external_speech_encoder) {
+  CodecInst ci;
+  ci.channels = external_speech_encoder->NumChannels();
+  ci.plfreq = external_speech_encoder->SampleRateHz();
+  ci.pacsize = rtc::CheckedDivExact(
+      static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+                       ci.plfreq),
+      100);
+  ci.pltype = -1;  // Not valid.
+  ci.rate = -1;    // Not valid.
+  static const char kName[] = "external";
+  memcpy(ci.plname, kName, sizeof(kName));
+  return ci;
+}
+
+bool CodecManager::SetCopyRed(bool enable) {
+  if (enable && codec_stack_params_.use_codec_fec) {
+    RTC_LOG(LS_WARNING) << "Codec internal FEC and RED cannot be co-enabled.";
+    return false;
+  }
+  if (enable && send_codec_inst_ &&
+      codec_stack_params_.red_payload_types.count(send_codec_inst_->plfreq) <
+          1) {
+    RTC_LOG(LS_WARNING) << "Cannot enable RED at " << send_codec_inst_->plfreq
+                        << " Hz.";
+    return false;
+  }
+  codec_stack_params_.use_red = enable;
+  return true;
+}
+
+bool CodecManager::SetVAD(bool enable, ACMVADMode mode) {
+  // Sanity check of the mode.
+  RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
+             mode == VADVeryAggr);
+
+  // Check that the send codec is mono. We don't support VAD/DTX for stereo
+  // sending.
+  const bool stereo_send =
+      codec_stack_params_.speech_encoder
+          ? (codec_stack_params_.speech_encoder->NumChannels() != 1)
+          : false;
+  if (enable && stereo_send) {
+    RTC_LOG(LS_ERROR) << "VAD/DTX not supported for stereo sending";
+    return false;
+  }
+
+  // TODO(kwiberg): This doesn't protect Opus when injected as an external
+  // encoder.
+  if (send_codec_inst_ && IsOpus(*send_codec_inst_)) {
+    // VAD/DTX not supported, but don't fail.
+    enable = false;
+  }
+
+  codec_stack_params_.use_cng = enable;
+  codec_stack_params_.vad_mode = mode;
+  return true;
+}
+
+bool CodecManager::SetCodecFEC(bool enable_codec_fec) {
+  if (enable_codec_fec && codec_stack_params_.use_red) {
+    RTC_LOG(LS_WARNING) << "Codec internal FEC and RED cannot be co-enabled.";
+    return false;
+  }
+
+  codec_stack_params_.use_codec_fec = enable_codec_fec;
+  return true;
+}
+
+bool CodecManager::MakeEncoder(RentACodec* rac, AudioCodingModule* acm) {
+  RTC_DCHECK(rac);
+  RTC_DCHECK(acm);
+
+  if (!recreate_encoder_) {
+    bool error = false;
+    // Try to re-use the speech encoder we've given to the ACM.
+    acm->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+      if (!*encoder) {
+        // There is no existing encoder.
+        recreate_encoder_ = true;
+        return;
+      }
+
+      // Extract the speech encoder from the ACM.
+      std::unique_ptr<AudioEncoder> enc = std::move(*encoder);
+      while (true) {
+        auto sub_enc = enc->ReclaimContainedEncoders();
+        if (sub_enc.empty()) {
+          break;
+        }
+        RTC_CHECK_EQ(1, sub_enc.size());
+
+        // Replace enc with its sub encoder. We need to put the sub encoder in
+        // a temporary first, since otherwise the old value of enc would be
+        // destroyed before the new value got assigned, which would be bad
+        // since the new value is a part of the old value.
+        auto tmp_enc = std::move(sub_enc[0]);
+        enc = std::move(tmp_enc);
+      }
+
+      // Wrap it in a new encoder stack and put it back.
+      codec_stack_params_.speech_encoder = std::move(enc);
+      *encoder = rac->RentEncoderStack(&codec_stack_params_);
+      if (!*encoder) {
+        error = true;
+      }
+    });
+    if (error) {
+      return false;
+    }
+    if (!recreate_encoder_) {
+      return true;
+    }
+  }
+
+  if (!send_codec_inst_) {
+    // We don't have the information we need to create a new speech encoder.
+    // (This is not an error.)
+    return true;
+  }
+
+  codec_stack_params_.speech_encoder = rac->RentEncoder(*send_codec_inst_);
+  auto stack = rac->RentEncoderStack(&codec_stack_params_);
+  if (!stack) {
+    return false;
+  }
+  acm->SetEncoder(std::move(stack));
+  recreate_encoder_ = false;
+  return true;
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/codec_manager.h b/modules/audio_coding/acm2/codec_manager.h
new file mode 100644
index 0000000..7485426
--- /dev/null
+++ b/modules/audio_coding/acm2/codec_manager.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
+#define MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
+
+#include <map>
+
+#include "api/optional.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class AudioDecoder;
+class AudioEncoder;
+
+namespace acm2 {
+
+class CodecManager final {
+ public:
+  CodecManager();
+  ~CodecManager();
+
+  // Parses the given specification. On success, returns true and updates the
+  // stored CodecInst and stack parameters; on error, returns false.
+  bool RegisterEncoder(const CodecInst& send_codec);
+
+  static CodecInst ForgeCodecInst(const AudioEncoder* external_speech_encoder);
+
+  const CodecInst* GetCodecInst() const {
+    return send_codec_inst_ ? &*send_codec_inst_ : nullptr;
+  }
+
+  void UnsetCodecInst() { send_codec_inst_ = rtc::nullopt; }
+
+  const RentACodec::StackParameters* GetStackParams() const {
+    return &codec_stack_params_;
+  }
+  RentACodec::StackParameters* GetStackParams() { return &codec_stack_params_; }
+
+  bool SetCopyRed(bool enable);
+
+  bool SetVAD(bool enable, ACMVADMode mode);
+
+  bool SetCodecFEC(bool enable_codec_fec);
+
+  // Uses the provided Rent-A-Codec to create a new encoder stack, if we have a
+  // complete specification; if so, it is then passed to set_encoder. On error,
+  // returns false.
+  bool MakeEncoder(RentACodec* rac, AudioCodingModule* acm);
+
+ private:
+  rtc::ThreadChecker thread_checker_;
+  rtc::Optional<CodecInst> send_codec_inst_;
+  RentACodec::StackParameters codec_stack_params_;
+  bool recreate_encoder_ = true;  // Need to recreate encoder?
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
diff --git a/modules/audio_coding/acm2/codec_manager_unittest.cc b/modules/audio_coding/acm2/codec_manager_unittest.cc
new file mode 100644
index 0000000..6a5ea5f
--- /dev/null
+++ b/modules/audio_coding/acm2/codec_manager_unittest.cc
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/acm2/codec_manager.h"
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+
+namespace webrtc {
+namespace acm2 {
+
+using ::testing::Return;
+
+namespace {
+
+// Create a MockAudioEncoder with some reasonable default behavior.
+std::unique_ptr<MockAudioEncoder> CreateMockEncoder() {
+  auto enc = std::unique_ptr<MockAudioEncoder>(new MockAudioEncoder);
+  EXPECT_CALL(*enc, SampleRateHz()).WillRepeatedly(Return(8000));
+  EXPECT_CALL(*enc, NumChannels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(*enc, Max10MsFramesInAPacket()).WillRepeatedly(Return(1));
+  return enc;
+}
+
+}  // namespace
+
+TEST(CodecManagerTest, ExternalEncoderFec) {
+  auto enc0 = CreateMockEncoder();
+  auto enc1 = CreateMockEncoder();
+  auto enc2 = CreateMockEncoder();
+  {
+    ::testing::InSequence s;
+    EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
+    EXPECT_CALL(*enc1, SetFec(true)).WillOnce(Return(true));
+    EXPECT_CALL(*enc2, SetFec(true)).WillOnce(Return(false));
+  }
+
+  CodecManager cm;
+  RentACodec rac;
+
+  // use_codec_fec starts out false.
+  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+  cm.GetStackParams()->speech_encoder = std::move(enc0);
+  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+
+  // Set it to true.
+  EXPECT_EQ(true, cm.SetCodecFEC(true));
+  EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
+  cm.GetStackParams()->speech_encoder = std::move(enc1);
+  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+  EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
+
+  // Switch to a codec that doesn't support it.
+  cm.GetStackParams()->speech_encoder = std::move(enc2);
+  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/rent_a_codec.cc b/modules/audio_coding/acm2/rent_a_codec.cc
new file mode 100644
index 0000000..78db38d
--- /dev/null
+++ b/modules/audio_coding/acm2/rent_a_codec.cc
@@ -0,0 +1,310 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "rtc_base/logging.h"
+#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+#ifdef WEBRTC_CODEC_ILBC
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#endif
+#ifdef WEBRTC_CODEC_ISACFX
+#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"  // nogncheck
+#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"  // nogncheck
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"  // nogncheck
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"  // nogncheck
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+#endif
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#ifdef WEBRTC_CODEC_RED
+#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+#endif
+#include "modules/audio_coding/acm2/acm_codec_database.h"
+
+#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
+#include "modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+#endif
+
+namespace webrtc {
+namespace acm2 {
+
+rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByParams(
+    const char* payload_name,
+    int sampling_freq_hz,
+    size_t channels) {
+  return CodecIdFromIndex(
+      ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels));
+}
+
+rtc::Optional<CodecInst> RentACodec::CodecInstById(CodecId codec_id) {
+  rtc::Optional<int> mi = CodecIndexFromId(codec_id);
+  return mi ? rtc::Optional<CodecInst>(Database()[*mi])
+            : rtc::nullopt;
+}
+
+rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByInst(
+    const CodecInst& codec_inst) {
+  return CodecIdFromIndex(ACMCodecDB::CodecNumber(codec_inst));
+}
+
+rtc::Optional<CodecInst> RentACodec::CodecInstByParams(const char* payload_name,
+                                                       int sampling_freq_hz,
+                                                       size_t channels) {
+  rtc::Optional<CodecId> codec_id =
+      CodecIdByParams(payload_name, sampling_freq_hz, channels);
+  if (!codec_id)
+    return rtc::nullopt;
+  rtc::Optional<CodecInst> ci = CodecInstById(*codec_id);
+  RTC_DCHECK(ci);
+
+  // Keep the number of channels from the function call. For most codecs it
+  // will be the same value as in default codec settings, but not for all.
+  ci->channels = channels;
+
+  return ci;
+}
+
+bool RentACodec::IsCodecValid(const CodecInst& codec_inst) {
+  return ACMCodecDB::CodecNumber(codec_inst) >= 0;
+}
+
+rtc::Optional<bool> RentACodec::IsSupportedNumChannels(CodecId codec_id,
+                                                       size_t num_channels) {
+  auto i = CodecIndexFromId(codec_id);
+  return i ? rtc::Optional<bool>(
+                 ACMCodecDB::codec_settings_[*i].channel_support >=
+                 num_channels)
+           : rtc::nullopt;
+}
+
+rtc::ArrayView<const CodecInst> RentACodec::Database() {
+  return rtc::ArrayView<const CodecInst>(ACMCodecDB::database_,
+                                         NumberOfCodecs());
+}
+
+rtc::Optional<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(
+    CodecId codec_id,
+    size_t num_channels) {
+  rtc::Optional<int> i = CodecIndexFromId(codec_id);
+  if (!i)
+    return rtc::nullopt;
+  const NetEqDecoder ned = ACMCodecDB::neteq_decoders_[*i];
+  return (ned == NetEqDecoder::kDecoderOpus && num_channels == 2)
+             ? NetEqDecoder::kDecoderOpus_2ch
+             : ned;
+}
+
+RentACodec::RegistrationResult RentACodec::RegisterCngPayloadType(
+    std::map<int, int>* pt_map,
+    const CodecInst& codec_inst) {
+  if (STR_CASE_CMP(codec_inst.plname, "CN") != 0)
+    return RegistrationResult::kSkip;
+  switch (codec_inst.plfreq) {
+    case 8000:
+    case 16000:
+    case 32000:
+    case 48000:
+      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
+      return RegistrationResult::kOk;
+    default:
+      return RegistrationResult::kBadFreq;
+  }
+}
+
+RentACodec::RegistrationResult RentACodec::RegisterRedPayloadType(
+    std::map<int, int>* pt_map,
+    const CodecInst& codec_inst) {
+  if (STR_CASE_CMP(codec_inst.plname, "RED") != 0)
+    return RegistrationResult::kSkip;
+  switch (codec_inst.plfreq) {
+    case 8000:
+      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
+      return RegistrationResult::kOk;
+    default:
+      return RegistrationResult::kBadFreq;
+  }
+}
+
+namespace {
+
+// Returns a new speech encoder, or null on error.
+// TODO(kwiberg): Don't handle errors here (bug 5033)
+std::unique_ptr<AudioEncoder> CreateEncoder(
+    const CodecInst& speech_inst,
+    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+  if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+    return std::unique_ptr<AudioEncoder>(
+        new AudioEncoderIsacFixImpl(speech_inst, bwinfo));
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+  if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+    return std::unique_ptr<AudioEncoder>(
+        new AudioEncoderIsacFloatImpl(speech_inst, bwinfo));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+  if (STR_CASE_CMP(speech_inst.plname, "opus") == 0)
+    return std::unique_ptr<AudioEncoder>(new AudioEncoderOpusImpl(speech_inst));
+#endif
+  if (STR_CASE_CMP(speech_inst.plname, "pcmu") == 0)
+    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcmU(speech_inst));
+  if (STR_CASE_CMP(speech_inst.plname, "pcma") == 0)
+    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcmA(speech_inst));
+  if (STR_CASE_CMP(speech_inst.plname, "l16") == 0)
+    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcm16B(speech_inst));
+#ifdef WEBRTC_CODEC_ILBC
+  if (STR_CASE_CMP(speech_inst.plname, "ilbc") == 0)
+    return std::unique_ptr<AudioEncoder>(new AudioEncoderIlbcImpl(speech_inst));
+#endif
+  if (STR_CASE_CMP(speech_inst.plname, "g722") == 0)
+    return std::unique_ptr<AudioEncoder>(new AudioEncoderG722Impl(speech_inst));
+  RTC_LOG_F(LS_ERROR) << "Could not create encoder of type "
+                      << speech_inst.plname;
+  return std::unique_ptr<AudioEncoder>();
+}
+
+std::unique_ptr<AudioEncoder> CreateRedEncoder(
+    std::unique_ptr<AudioEncoder> encoder,
+    int red_payload_type) {
+#ifdef WEBRTC_CODEC_RED
+  AudioEncoderCopyRed::Config config;
+  config.payload_type = red_payload_type;
+  config.speech_encoder = std::move(encoder);
+  return std::unique_ptr<AudioEncoder>(
+      new AudioEncoderCopyRed(std::move(config)));
+#else
+  return std::unique_ptr<AudioEncoder>();
+#endif
+}
+
+std::unique_ptr<AudioEncoder> CreateCngEncoder(
+    std::unique_ptr<AudioEncoder> encoder,
+    int payload_type,
+    ACMVADMode vad_mode) {
+  AudioEncoderCng::Config config;
+  config.num_channels = encoder->NumChannels();
+  config.payload_type = payload_type;
+  config.speech_encoder = std::move(encoder);
+  switch (vad_mode) {
+    case VADNormal:
+      config.vad_mode = Vad::kVadNormal;
+      break;
+    case VADLowBitrate:
+      config.vad_mode = Vad::kVadLowBitrate;
+      break;
+    case VADAggr:
+      config.vad_mode = Vad::kVadAggressive;
+      break;
+    case VADVeryAggr:
+      config.vad_mode = Vad::kVadVeryAggressive;
+      break;
+    default:
+      FATAL();
+  }
+  return std::unique_ptr<AudioEncoder>(new AudioEncoderCng(std::move(config)));
+}
+
+std::unique_ptr<AudioDecoder> CreateIsacDecoder(
+    int sample_rate_hz,
+    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+  return std::unique_ptr<AudioDecoder>(
+      new AudioDecoderIsacFixImpl(sample_rate_hz, bwinfo));
+#elif defined(WEBRTC_CODEC_ISAC)
+  return std::unique_ptr<AudioDecoder>(
+      new AudioDecoderIsacFloatImpl(sample_rate_hz, bwinfo));
+#else
+  FATAL() << "iSAC is not supported.";
+  return std::unique_ptr<AudioDecoder>();
+#endif
+}
+
+}  // namespace
+
+RentACodec::RentACodec() {
+#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
+  isac_bandwidth_info_ = new LockedIsacBandwidthInfo;
+#endif
+}
+RentACodec::~RentACodec() = default;
+
+std::unique_ptr<AudioEncoder> RentACodec::RentEncoder(
+    const CodecInst& codec_inst) {
+  return CreateEncoder(codec_inst, isac_bandwidth_info_);
+}
+
+RentACodec::StackParameters::StackParameters() {
+  // Register the default payload types for RED and CNG.
+  for (const CodecInst& ci : RentACodec::Database()) {
+    RentACodec::RegisterCngPayloadType(&cng_payload_types, ci);
+    RentACodec::RegisterRedPayloadType(&red_payload_types, ci);
+  }
+}
+
+RentACodec::StackParameters::~StackParameters() = default;
+
+std::unique_ptr<AudioEncoder> RentACodec::RentEncoderStack(
+    StackParameters* param) {
+  if (!param->speech_encoder)
+    return nullptr;
+
+  if (param->use_codec_fec) {
+    // Switch FEC on. On failure, remember that FEC is off.
+    if (!param->speech_encoder->SetFec(true))
+      param->use_codec_fec = false;
+  } else {
+    // Switch FEC off. This shouldn't fail.
+    const bool success = param->speech_encoder->SetFec(false);
+    RTC_DCHECK(success);
+  }
+
+  auto pt = [&param](const std::map<int, int>& m) {
+    auto it = m.find(param->speech_encoder->SampleRateHz());
+    return it == m.end() ? rtc::nullopt
+                         : rtc::Optional<int>(it->second);
+  };
+  auto cng_pt = pt(param->cng_payload_types);
+  param->use_cng =
+      param->use_cng && cng_pt && param->speech_encoder->NumChannels() == 1;
+  auto red_pt = pt(param->red_payload_types);
+  param->use_red = param->use_red && red_pt;
+
+  if (param->use_cng || param->use_red) {
+    // The RED and CNG encoders need to be in sync with the speech encoder, so
+    // reset the latter to ensure its buffer is empty.
+    param->speech_encoder->Reset();
+  }
+  std::unique_ptr<AudioEncoder> encoder_stack =
+      std::move(param->speech_encoder);
+  if (param->use_red) {
+    encoder_stack = CreateRedEncoder(std::move(encoder_stack), *red_pt);
+  }
+  if (param->use_cng) {
+    encoder_stack =
+        CreateCngEncoder(std::move(encoder_stack), *cng_pt, param->vad_mode);
+  }
+  return encoder_stack;
+}
+
+std::unique_ptr<AudioDecoder> RentACodec::RentIsacDecoder(int sample_rate_hz) {
+  return CreateIsacDecoder(sample_rate_hz, isac_bandwidth_info_);
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/rent_a_codec.h b/modules/audio_coding/acm2/rent_a_codec.h
new file mode 100644
index 0000000..f8fac4c
--- /dev/null
+++ b/modules/audio_coding/acm2/rent_a_codec.h
@@ -0,0 +1,201 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
+#define MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
+
+#include <stddef.h>
+#include <map>
+#include <memory>
+
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/optional.h"
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/audio_coding/neteq/neteq_decoder_enum.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+struct CodecInst;
+class LockedIsacBandwidthInfo;
+
+namespace acm2 {
+
+class RentACodec {
+ public:
+  enum class CodecId {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+    kISAC,
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+    kISACSWB,
+#endif
+    // Mono
+    kPCM16B,
+    kPCM16Bwb,
+    kPCM16Bswb32kHz,
+    // Stereo
+    kPCM16B_2ch,
+    kPCM16Bwb_2ch,
+    kPCM16Bswb32kHz_2ch,
+    // Mono
+    kPCMU,
+    kPCMA,
+    // Stereo
+    kPCMU_2ch,
+    kPCMA_2ch,
+#ifdef WEBRTC_CODEC_ILBC
+    kILBC,
+#endif
+    kG722,      // Mono
+    kG722_2ch,  // Stereo
+#ifdef WEBRTC_CODEC_OPUS
+    kOpus,  // Mono and stereo
+#endif
+    kCNNB,
+    kCNWB,
+    kCNSWB,
+#ifdef ENABLE_48000_HZ
+    kCNFB,
+#endif
+    kAVT,
+    kAVT16kHz,
+    kAVT32kHz,
+    kAVT48kHz,
+#ifdef WEBRTC_CODEC_RED
+    kRED,
+#endif
+    kNumCodecs,  // Implementation detail. Don't use.
+
+// Set unsupported codecs to -1.
+#if !defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX)
+    kISAC = -1,
+#endif
+#ifndef WEBRTC_CODEC_ISAC
+    kISACSWB = -1,
+#endif
+    // 48 kHz not supported, always set to -1.
+    kPCM16Bswb48kHz = -1,
+#ifndef WEBRTC_CODEC_ILBC
+    kILBC = -1,
+#endif
+#ifndef WEBRTC_CODEC_OPUS
+    kOpus = -1,  // Mono and stereo
+#endif
+#ifndef WEBRTC_CODEC_RED
+    kRED = -1,
+#endif
+#ifndef ENABLE_48000_HZ
+    kCNFB = -1,
+#endif
+
+    kNone = -1
+  };
+
+  static inline size_t NumberOfCodecs() {
+    return static_cast<size_t>(CodecId::kNumCodecs);
+  }
+
+  static inline rtc::Optional<int> CodecIndexFromId(CodecId codec_id) {
+    const int i = static_cast<int>(codec_id);
+    return i >= 0 && i < static_cast<int>(NumberOfCodecs())
+               ? rtc::Optional<int>(i)
+               : rtc::nullopt;
+  }
+
+  static inline rtc::Optional<CodecId> CodecIdFromIndex(int codec_index) {
+    return static_cast<size_t>(codec_index) < NumberOfCodecs()
+               ? rtc::Optional<RentACodec::CodecId>(
+                     static_cast<RentACodec::CodecId>(codec_index))
+               : rtc::nullopt;
+  }
+
+  static rtc::Optional<CodecId> CodecIdByParams(const char* payload_name,
+                                                int sampling_freq_hz,
+                                                size_t channels);
+  static rtc::Optional<CodecInst> CodecInstById(CodecId codec_id);
+  static rtc::Optional<CodecId> CodecIdByInst(const CodecInst& codec_inst);
+  static rtc::Optional<CodecInst> CodecInstByParams(const char* payload_name,
+                                                    int sampling_freq_hz,
+                                                    size_t channels);
+  static bool IsCodecValid(const CodecInst& codec_inst);
+
+  static inline bool IsPayloadTypeValid(int payload_type) {
+    return payload_type >= 0 && payload_type <= 127;
+  }
+
+  static rtc::ArrayView<const CodecInst> Database();
+
+  static rtc::Optional<bool> IsSupportedNumChannels(CodecId codec_id,
+                                                    size_t num_channels);
+
+  static rtc::Optional<NetEqDecoder> NetEqDecoderFromCodecId(
+      CodecId codec_id,
+      size_t num_channels);
+
+  // Parse codec_inst and extract payload types. If the given CodecInst was for
+  // the wrong sort of codec, return kSkip; otherwise, if the rate was illegal,
+  // return kBadFreq; otherwise, update the given RTP timestamp rate (Hz) ->
+  // payload type map and return kOk.
+  enum class RegistrationResult { kOk, kSkip, kBadFreq };
+  static RegistrationResult RegisterCngPayloadType(std::map<int, int>* pt_map,
+                                                   const CodecInst& codec_inst);
+  static RegistrationResult RegisterRedPayloadType(std::map<int, int>* pt_map,
+                                                   const CodecInst& codec_inst);
+
+  RentACodec();
+  ~RentACodec();
+
+  // Creates and returns an audio encoder built to the given specification.
+  // Returns null in case of error.
+  std::unique_ptr<AudioEncoder> RentEncoder(const CodecInst& codec_inst);
+
+  struct StackParameters {
+    StackParameters();
+    ~StackParameters();
+
+    std::unique_ptr<AudioEncoder> speech_encoder;
+
+    bool use_codec_fec = false;
+    bool use_red = false;
+    bool use_cng = false;
+    ACMVADMode vad_mode = VADNormal;
+
+    // Maps from RTP timestamp rate (in Hz) to payload type.
+    std::map<int, int> cng_payload_types;
+    std::map<int, int> red_payload_types;
+  };
+
+  // Creates and returns an audio encoder stack constructed to the given
+  // specification. If the specification isn't compatible with the encoder, it
+  // will be changed to match (things will be switched off). The speech encoder
+  // will be stolen. If the specification isn't complete, returns nullptr.
+  std::unique_ptr<AudioEncoder> RentEncoderStack(StackParameters* param);
+
+  // Creates and returns an iSAC decoder.
+  std::unique_ptr<AudioDecoder> RentIsacDecoder(int sample_rate_hz);
+
+ private:
+  std::unique_ptr<AudioEncoder> speech_encoder_;
+  std::unique_ptr<AudioEncoder> cng_encoder_;
+  std::unique_ptr<AudioEncoder> red_encoder_;
+  rtc::scoped_refptr<LockedIsacBandwidthInfo> isac_bandwidth_info_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RentACodec);
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
diff --git a/modules/audio_coding/acm2/rent_a_codec_unittest.cc b/modules/audio_coding/acm2/rent_a_codec_unittest.cc
new file mode 100644
index 0000000..c949c1c
--- /dev/null
+++ b/modules/audio_coding/acm2/rent_a_codec_unittest.cc
@@ -0,0 +1,229 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+
+namespace webrtc {
+namespace acm2 {
+
+using ::testing::Return;
+
+namespace {
+
+const int kDataLengthSamples = 80;
+const int kPacketSizeSamples = 2 * kDataLengthSamples;
+const int16_t kZeroData[kDataLengthSamples] = {0};
+const CodecInst kDefaultCodecInst = {0, "pcmu", 8000, kPacketSizeSamples,
+                                     1, 64000};
+const int kCngPt = 13;
+
+class Marker final {
+ public:
+  MOCK_METHOD1(Mark, void(std::string desc));
+};
+
+}  // namespace
+
+class RentACodecTestF : public ::testing::Test {
+ protected:
+  void CreateCodec() {
+    auto speech_encoder = rent_a_codec_.RentEncoder(kDefaultCodecInst);
+    ASSERT_TRUE(speech_encoder);
+    RentACodec::StackParameters param;
+    param.use_cng = true;
+    param.speech_encoder = std::move(speech_encoder);
+    encoder_ = rent_a_codec_.RentEncoderStack(&param);
+  }
+
+  void EncodeAndVerify(size_t expected_out_length,
+                       uint32_t expected_timestamp,
+                       int expected_payload_type,
+                       int expected_send_even_if_empty) {
+    rtc::Buffer out;
+    AudioEncoder::EncodedInfo encoded_info;
+    encoded_info =
+        encoder_->Encode(timestamp_, kZeroData, &out);
+    timestamp_ += kDataLengthSamples;
+    EXPECT_TRUE(encoded_info.redundant.empty());
+    EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
+    EXPECT_EQ(expected_timestamp, encoded_info.encoded_timestamp);
+    if (expected_payload_type >= 0)
+      EXPECT_EQ(expected_payload_type, encoded_info.payload_type);
+    if (expected_send_even_if_empty >= 0)
+      EXPECT_EQ(static_cast<bool>(expected_send_even_if_empty),
+                encoded_info.send_even_if_empty);
+  }
+
+  RentACodec rent_a_codec_;
+  std::unique_ptr<AudioEncoder> encoder_;
+  uint32_t timestamp_ = 0;
+};
+
+// This test verifies that CNG frames are delivered as expected. Since the frame
+// size is set to 20 ms, we expect the first encode call to produce no output
+// (which is signaled as 0 bytes output of type kNoEncoding). The next encode
+// call should produce one SID frame of 9 bytes. The third call should not
+// result in any output (just like the first one). The fourth and final encode
+// call should produce an "empty frame", which is like no output, but with
+// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
+// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
+// module.)
+TEST_F(RentACodecTestF, VerifyCngFrames) {
+  CreateCodec();
+  uint32_t expected_timestamp = timestamp_;
+  // Verify no frame.
+  {
+    SCOPED_TRACE("First encoding");
+    EncodeAndVerify(0, expected_timestamp, -1, -1);
+  }
+
+  // Verify SID frame delivered.
+  {
+    SCOPED_TRACE("Second encoding");
+    EncodeAndVerify(9, expected_timestamp, kCngPt, 1);
+  }
+
+  // Verify no frame.
+  {
+    SCOPED_TRACE("Third encoding");
+    EncodeAndVerify(0, expected_timestamp, -1, -1);
+  }
+
+  // Verify NoEncoding.
+  expected_timestamp += 2 * kDataLengthSamples;
+  {
+    SCOPED_TRACE("Fourth encoding");
+    EncodeAndVerify(0, expected_timestamp, kCngPt, 1);
+  }
+}
+
+TEST(RentACodecTest, ExternalEncoder) {
+  const int kSampleRateHz = 8000;
+  auto* external_encoder = new MockAudioEncoder;
+  EXPECT_CALL(*external_encoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(*external_encoder, NumChannels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(*external_encoder, SetFec(false)).WillRepeatedly(Return(true));
+
+  RentACodec rac;
+  RentACodec::StackParameters param;
+  param.speech_encoder = std::unique_ptr<AudioEncoder>(external_encoder);
+  std::unique_ptr<AudioEncoder> encoder_stack = rac.RentEncoderStack(&param);
+  EXPECT_EQ(external_encoder, encoder_stack.get());
+  const int kPacketSizeSamples = kSampleRateHz / 100;
+  int16_t audio[kPacketSizeSamples] = {0};
+  rtc::Buffer encoded;
+  AudioEncoder::EncodedInfo info;
+
+  Marker marker;
+  {
+    ::testing::InSequence s;
+    info.encoded_timestamp = 0;
+    EXPECT_CALL(
+        *external_encoder,
+        EncodeImpl(0, rtc::ArrayView<const int16_t>(audio), &encoded))
+        .WillOnce(Return(info));
+    EXPECT_CALL(marker, Mark("A"));
+    EXPECT_CALL(marker, Mark("B"));
+    EXPECT_CALL(marker, Mark("C"));
+  }
+
+  info = encoder_stack->Encode(0, audio, &encoded);
+  EXPECT_EQ(0u, info.encoded_timestamp);
+  marker.Mark("A");
+
+  // Change to internal encoder.
+  CodecInst codec_inst = kDefaultCodecInst;
+  codec_inst.pacsize = kPacketSizeSamples;
+  param.speech_encoder = rac.RentEncoder(codec_inst);
+  ASSERT_TRUE(param.speech_encoder);
+  AudioEncoder* enc = param.speech_encoder.get();
+  std::unique_ptr<AudioEncoder> stack = rac.RentEncoderStack(&param);
+  EXPECT_EQ(enc, stack.get());
+
+  // Don't expect any more calls to the external encoder.
+  info = stack->Encode(1, audio, &encoded);
+  marker.Mark("B");
+  encoder_stack.reset();
+  marker.Mark("C");
+}
+
+// Verify that the speech encoder's Reset method is called when CNG or RED
+// (or both) are switched on, but not when they're switched off.
+void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
+  auto make_enc = [] {
+    auto speech_encoder =
+        std::unique_ptr<MockAudioEncoder>(new MockAudioEncoder);
+    EXPECT_CALL(*speech_encoder, NumChannels()).WillRepeatedly(Return(1));
+    EXPECT_CALL(*speech_encoder, Max10MsFramesInAPacket())
+        .WillRepeatedly(Return(2));
+    EXPECT_CALL(*speech_encoder, SampleRateHz()).WillRepeatedly(Return(8000));
+    EXPECT_CALL(*speech_encoder, SetFec(false)).WillRepeatedly(Return(true));
+    return speech_encoder;
+  };
+  auto speech_encoder1 = make_enc();
+  auto speech_encoder2 = make_enc();
+  Marker marker;
+  {
+    ::testing::InSequence s;
+    EXPECT_CALL(marker, Mark("disabled"));
+    EXPECT_CALL(marker, Mark("enabled"));
+    if (use_cng || use_red)
+      EXPECT_CALL(*speech_encoder2, Reset());
+  }
+
+  RentACodec::StackParameters param1, param2;
+  param1.speech_encoder = std::move(speech_encoder1);
+  param2.speech_encoder = std::move(speech_encoder2);
+  param2.use_cng = use_cng;
+  param2.use_red = use_red;
+  marker.Mark("disabled");
+  RentACodec rac;
+  rac.RentEncoderStack(&param1);
+  marker.Mark("enabled");
+  rac.RentEncoderStack(&param2);
+}
+
+TEST(RentACodecTest, CngResetsSpeechEncoder) {
+  TestCngAndRedResetSpeechEncoder(true, false);
+}
+
+TEST(RentACodecTest, RedResetsSpeechEncoder) {
+  TestCngAndRedResetSpeechEncoder(false, true);
+}
+
+TEST(RentACodecTest, CngAndRedResetsSpeechEncoder) {
+  TestCngAndRedResetSpeechEncoder(true, true);
+}
+
+TEST(RentACodecTest, NoCngAndRedNoSpeechEncoderReset) {
+  TestCngAndRedResetSpeechEncoder(false, false);
+}
+
+TEST(RentACodecTest, RentEncoderError) {
+  const CodecInst codec_inst = {
+      0, "Robert'); DROP TABLE Students;", 8000, 160, 1, 64000};
+  RentACodec rent_a_codec;
+  EXPECT_FALSE(rent_a_codec.RentEncoder(codec_inst));
+}
+
+TEST(RentACodecTest, RentEncoderStackWithoutSpeechEncoder) {
+  RentACodec::StackParameters sp;
+  EXPECT_EQ(nullptr, sp.speech_encoder);
+  EXPECT_EQ(nullptr, RentACodec().RentEncoderStack(&sp));
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_coding.gni b/modules/audio_coding/audio_coding.gni
new file mode 100644
index 0000000..9b0aba8
--- /dev/null
+++ b/modules/audio_coding/audio_coding.gni
@@ -0,0 +1,33 @@
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+audio_codec_defines = []
+if (rtc_include_ilbc) {
+  audio_codec_defines += [ "WEBRTC_CODEC_ILBC" ]
+}
+if (rtc_include_opus) {
+  audio_codec_defines += [ "WEBRTC_CODEC_OPUS" ]
+}
+if (rtc_opus_support_120ms_ptime) {
+  audio_codec_defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=1" ]
+} else {
+  audio_codec_defines += [ "WEBRTC_OPUS_SUPPORT_120MS_PTIME=0" ]
+}
+if (current_cpu == "arm") {
+  audio_codec_defines += [ "WEBRTC_CODEC_ISACFX" ]
+} else {
+  audio_codec_defines += [ "WEBRTC_CODEC_ISAC" ]
+}
+if (!build_with_mozilla && !build_with_chromium) {
+  audio_codec_defines += [ "WEBRTC_CODEC_RED" ]
+}
+
+audio_coding_defines = audio_codec_defines
+neteq_defines = audio_codec_defines
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc
new file mode 100644
index 0000000..16fd2a1
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_config.cc
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+AudioEncoderRuntimeConfig::AudioEncoderRuntimeConfig() = default;
+
+AudioEncoderRuntimeConfig::AudioEncoderRuntimeConfig(
+    const AudioEncoderRuntimeConfig& other) = default;
+
+AudioEncoderRuntimeConfig::~AudioEncoderRuntimeConfig() = default;
+
+AudioEncoderRuntimeConfig& AudioEncoderRuntimeConfig::operator=(
+    const AudioEncoderRuntimeConfig& other) = default;
+
+bool AudioEncoderRuntimeConfig::operator==(
+    const AudioEncoderRuntimeConfig& other) const {
+  return bitrate_bps == other.bitrate_bps &&
+         frame_length_ms == other.frame_length_ms &&
+         uplink_packet_loss_fraction == other.uplink_packet_loss_fraction &&
+         enable_fec == other.enable_fec && enable_dtx == other.enable_dtx &&
+         num_channels == other.num_channels;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
new file mode 100644
index 0000000..55e5309
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h"
+
+#include <utility>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kEventLogMinBitrateChangeBps = 5000;
+constexpr float kEventLogMinBitrateChangeFraction = 0.25;
+constexpr float kEventLogMinPacketLossChangeFraction = 0.5;
+}  // namespace
+
+AudioNetworkAdaptorImpl::Config::Config() : event_log(nullptr){};
+
+AudioNetworkAdaptorImpl::Config::~Config() = default;
+
+AudioNetworkAdaptorImpl::AudioNetworkAdaptorImpl(
+    const Config& config,
+    std::unique_ptr<ControllerManager> controller_manager,
+    std::unique_ptr<DebugDumpWriter> debug_dump_writer)
+    : config_(config),
+      controller_manager_(std::move(controller_manager)),
+      debug_dump_writer_(std::move(debug_dump_writer)),
+      event_log_writer_(
+          config.event_log
+              ? new EventLogWriter(config.event_log,
+                                   kEventLogMinBitrateChangeBps,
+                                   kEventLogMinBitrateChangeFraction,
+                                   kEventLogMinPacketLossChangeFraction)
+              : nullptr),
+      enable_bitrate_adaptation_(
+          webrtc::field_trial::IsEnabled("WebRTC-Audio-BitrateAdaptation")),
+      enable_dtx_adaptation_(
+          webrtc::field_trial::IsEnabled("WebRTC-Audio-DtxAdaptation")),
+      enable_fec_adaptation_(
+          webrtc::field_trial::IsEnabled("WebRTC-Audio-FecAdaptation")),
+      enable_channel_adaptation_(
+          webrtc::field_trial::IsEnabled("WebRTC-Audio-ChannelAdaptation")),
+      enable_frame_length_adaptation_(webrtc::field_trial::IsEnabled(
+          "WebRTC-Audio-FrameLengthAdaptation")) {
+  RTC_DCHECK(controller_manager_);
+}
+
+AudioNetworkAdaptorImpl::~AudioNetworkAdaptorImpl() = default;
+
+void AudioNetworkAdaptorImpl::SetUplinkBandwidth(int uplink_bandwidth_bps) {
+  last_metrics_.uplink_bandwidth_bps = uplink_bandwidth_bps;
+  DumpNetworkMetrics();
+
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+  UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetUplinkPacketLossFraction(
+    float uplink_packet_loss_fraction) {
+  last_metrics_.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+  DumpNetworkMetrics();
+
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+  UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetUplinkRecoverablePacketLossFraction(
+    float uplink_recoverable_packet_loss_fraction) {
+  last_metrics_.uplink_recoverable_packet_loss_fraction =
+      uplink_recoverable_packet_loss_fraction;
+  DumpNetworkMetrics();
+
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.uplink_recoverable_packet_loss_fraction =
+      uplink_recoverable_packet_loss_fraction;
+  UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetRtt(int rtt_ms) {
+  last_metrics_.rtt_ms = rtt_ms;
+  DumpNetworkMetrics();
+
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.rtt_ms = rtt_ms;
+  UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetTargetAudioBitrate(
+    int target_audio_bitrate_bps) {
+  last_metrics_.target_audio_bitrate_bps = target_audio_bitrate_bps;
+  DumpNetworkMetrics();
+
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.target_audio_bitrate_bps = target_audio_bitrate_bps;
+  UpdateNetworkMetrics(network_metrics);
+}
+
+void AudioNetworkAdaptorImpl::SetOverhead(size_t overhead_bytes_per_packet) {
+  last_metrics_.overhead_bytes_per_packet = overhead_bytes_per_packet;
+  DumpNetworkMetrics();
+
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+  UpdateNetworkMetrics(network_metrics);
+}
+
+AudioEncoderRuntimeConfig AudioNetworkAdaptorImpl::GetEncoderRuntimeConfig() {
+  AudioEncoderRuntimeConfig config;
+  for (auto& controller :
+       controller_manager_->GetSortedControllers(last_metrics_))
+    controller->MakeDecision(&config);
+
+  // Update ANA stats.
+  auto increment_opt = [](rtc::Optional<uint32_t>& a) {
+    a = a.value_or(0) + 1;
+  };
+  if (prev_config_) {
+    if (config.bitrate_bps != prev_config_->bitrate_bps) {
+      increment_opt(stats_.bitrate_action_counter);
+    }
+    if (config.enable_dtx != prev_config_->enable_dtx) {
+      increment_opt(stats_.dtx_action_counter);
+    }
+    if (config.enable_fec != prev_config_->enable_fec) {
+      increment_opt(stats_.fec_action_counter);
+    }
+    if (config.frame_length_ms && prev_config_->frame_length_ms) {
+      if (*config.frame_length_ms > *prev_config_->frame_length_ms) {
+        increment_opt(stats_.frame_length_increase_counter);
+      } else if (*config.frame_length_ms < *prev_config_->frame_length_ms) {
+        increment_opt(stats_.frame_length_decrease_counter);
+      }
+    }
+    if (config.num_channels != prev_config_->num_channels) {
+      increment_opt(stats_.channel_action_counter);
+    }
+    if (config.uplink_packet_loss_fraction) {
+      stats_.uplink_packet_loss_fraction = *config.uplink_packet_loss_fraction;
+    }
+  }
+  prev_config_ = config;
+
+  // Prevent certain controllers from taking action (determined by field trials)
+  if (!enable_bitrate_adaptation_ && config.bitrate_bps) {
+    config.bitrate_bps.reset();
+  }
+  if (!enable_dtx_adaptation_ && config.enable_dtx) {
+    config.enable_dtx.reset();
+  }
+  if (!enable_fec_adaptation_ && config.enable_fec) {
+    config.enable_fec.reset();
+    config.uplink_packet_loss_fraction.reset();
+  }
+  if (!enable_frame_length_adaptation_ && config.frame_length_ms) {
+    config.frame_length_ms.reset();
+  }
+  if (!enable_channel_adaptation_ && config.num_channels) {
+    config.num_channels.reset();
+  }
+
+  if (debug_dump_writer_)
+    debug_dump_writer_->DumpEncoderRuntimeConfig(config, rtc::TimeMillis());
+
+  if (event_log_writer_)
+    event_log_writer_->MaybeLogEncoderConfig(config);
+
+  return config;
+}
+
+void AudioNetworkAdaptorImpl::StartDebugDump(FILE* file_handle) {
+  debug_dump_writer_ = DebugDumpWriter::Create(file_handle);
+}
+
+void AudioNetworkAdaptorImpl::StopDebugDump() {
+  debug_dump_writer_.reset(nullptr);
+}
+
+ANAStats AudioNetworkAdaptorImpl::GetStats() const {
+  return stats_;
+}
+
+void AudioNetworkAdaptorImpl::DumpNetworkMetrics() {
+  if (debug_dump_writer_)
+    debug_dump_writer_->DumpNetworkMetrics(last_metrics_, rtc::TimeMillis());
+}
+
+void AudioNetworkAdaptorImpl::UpdateNetworkMetrics(
+    const Controller::NetworkMetrics& network_metrics) {
+  for (auto& controller : controller_manager_->GetControllers())
+    controller->UpdateNetworkMetrics(network_metrics);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
new file mode 100644
index 0000000..14000fe
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_AUDIO_NETWORK_ADAPTOR_IMPL_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_AUDIO_NETWORK_ADAPTOR_IMPL_H_
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "modules/audio_coding/audio_network_adaptor/event_log_writer.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class RtcEventLog;
+
+class AudioNetworkAdaptorImpl final : public AudioNetworkAdaptor {
+ public:
+  struct Config {
+    Config();
+    ~Config();
+    RtcEventLog* event_log;
+  };
+
+  AudioNetworkAdaptorImpl(
+      const Config& config,
+      std::unique_ptr<ControllerManager> controller_manager,
+      std::unique_ptr<DebugDumpWriter> debug_dump_writer = nullptr);
+
+  ~AudioNetworkAdaptorImpl() override;
+
+  void SetUplinkBandwidth(int uplink_bandwidth_bps) override;
+
+  void SetUplinkPacketLossFraction(float uplink_packet_loss_fraction) override;
+
+  void SetUplinkRecoverablePacketLossFraction(
+      float uplink_recoverable_packet_loss_fraction) override;
+
+  void SetRtt(int rtt_ms) override;
+
+  void SetTargetAudioBitrate(int target_audio_bitrate_bps) override;
+
+  void SetOverhead(size_t overhead_bytes_per_packet) override;
+
+  AudioEncoderRuntimeConfig GetEncoderRuntimeConfig() override;
+
+  void StartDebugDump(FILE* file_handle) override;
+
+  void StopDebugDump() override;
+
+  ANAStats GetStats() const override;
+
+ private:
+  void DumpNetworkMetrics();
+
+  void UpdateNetworkMetrics(const Controller::NetworkMetrics& network_metrics);
+
+  const Config config_;
+
+  std::unique_ptr<ControllerManager> controller_manager_;
+
+  std::unique_ptr<DebugDumpWriter> debug_dump_writer_;
+
+  const std::unique_ptr<EventLogWriter> event_log_writer_;
+
+  Controller::NetworkMetrics last_metrics_;
+
+  rtc::Optional<AudioEncoderRuntimeConfig> prev_config_;
+
+  ANAStats stats_;
+
+  const bool enable_bitrate_adaptation_;
+  const bool enable_dtx_adaptation_;
+  const bool enable_fec_adaptation_;
+  const bool enable_channel_adaptation_;
+  const bool enable_frame_length_adaptation_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioNetworkAdaptorImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_AUDIO_NETWORK_ADAPTOR_IMPL_H_
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
new file mode 100644
index 0000000..c437918
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
@@ -0,0 +1,326 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+#include <vector>
+
+#include "logging/rtc_event_log/events/rtc_event.h"
+#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h"
+#include "rtc_base/fakeclock.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace {
+
+constexpr size_t kNumControllers = 2;
+
+constexpr int64_t kClockInitialTimeMs = 12345678;
+
+MATCHER_P(NetworkMetricsIs, metric, "") {
+  return arg.uplink_bandwidth_bps == metric.uplink_bandwidth_bps &&
+         arg.target_audio_bitrate_bps == metric.target_audio_bitrate_bps &&
+         arg.rtt_ms == metric.rtt_ms &&
+         arg.overhead_bytes_per_packet == metric.overhead_bytes_per_packet &&
+         arg.uplink_packet_loss_fraction ==
+             metric.uplink_packet_loss_fraction &&
+         arg.uplink_recoverable_packet_loss_fraction ==
+             metric.uplink_recoverable_packet_loss_fraction;
+}
+
+MATCHER_P(IsRtcEventAnaConfigEqualTo, config, "") {
+  if (arg->GetType() != RtcEvent::Type::AudioNetworkAdaptation) {
+    return false;
+  }
+  auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
+  return *ana_event->config_ == config;
+}
+
+MATCHER_P(EncoderRuntimeConfigIs, config, "") {
+  return arg.bitrate_bps == config.bitrate_bps &&
+         arg.frame_length_ms == config.frame_length_ms &&
+         arg.uplink_packet_loss_fraction ==
+             config.uplink_packet_loss_fraction &&
+         arg.enable_fec == config.enable_fec &&
+         arg.enable_dtx == config.enable_dtx &&
+         arg.num_channels == config.num_channels;
+}
+
+struct AudioNetworkAdaptorStates {
+  std::unique_ptr<AudioNetworkAdaptorImpl> audio_network_adaptor;
+  std::vector<std::unique_ptr<MockController>> mock_controllers;
+  std::unique_ptr<MockRtcEventLog> event_log;
+  MockDebugDumpWriter* mock_debug_dump_writer;
+};
+
+AudioNetworkAdaptorStates CreateAudioNetworkAdaptor() {
+  AudioNetworkAdaptorStates states;
+  std::vector<Controller*> controllers;
+  for (size_t i = 0; i < kNumControllers; ++i) {
+    auto controller =
+        std::unique_ptr<MockController>(new NiceMock<MockController>());
+    EXPECT_CALL(*controller, Die());
+    controllers.push_back(controller.get());
+    states.mock_controllers.push_back(std::move(controller));
+  }
+
+  auto controller_manager = std::unique_ptr<MockControllerManager>(
+      new NiceMock<MockControllerManager>());
+
+  EXPECT_CALL(*controller_manager, Die());
+  EXPECT_CALL(*controller_manager, GetControllers())
+      .WillRepeatedly(Return(controllers));
+  EXPECT_CALL(*controller_manager, GetSortedControllers(_))
+      .WillRepeatedly(Return(controllers));
+
+  states.event_log.reset(new NiceMock<MockRtcEventLog>());
+
+  auto debug_dump_writer =
+      std::unique_ptr<MockDebugDumpWriter>(new NiceMock<MockDebugDumpWriter>());
+  EXPECT_CALL(*debug_dump_writer, Die());
+  states.mock_debug_dump_writer = debug_dump_writer.get();
+
+  AudioNetworkAdaptorImpl::Config config;
+  config.event_log = states.event_log.get();
+  // AudioNetworkAdaptorImpl governs the lifetime of controller manager.
+  states.audio_network_adaptor.reset(new AudioNetworkAdaptorImpl(
+      config,
+      std::move(controller_manager), std::move(debug_dump_writer)));
+
+  return states;
+}
+
+void SetExpectCallToUpdateNetworkMetrics(
+    const std::vector<std::unique_ptr<MockController>>& controllers,
+    const Controller::NetworkMetrics& check) {
+  for (auto& mock_controller : controllers) {
+    EXPECT_CALL(*mock_controller,
+                UpdateNetworkMetrics(NetworkMetricsIs(check)));
+  }
+}
+
+}  // namespace
+
+TEST(AudioNetworkAdaptorImplTest,
+     UpdateNetworkMetricsIsCalledOnSetUplinkBandwidth) {
+  auto states = CreateAudioNetworkAdaptor();
+  constexpr int kBandwidth = 16000;
+  Controller::NetworkMetrics check;
+  check.uplink_bandwidth_bps = kBandwidth;
+  SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+  states.audio_network_adaptor->SetUplinkBandwidth(kBandwidth);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+     UpdateNetworkMetricsIsCalledOnSetUplinkPacketLossFraction) {
+  auto states = CreateAudioNetworkAdaptor();
+  constexpr float kPacketLoss = 0.7f;
+  Controller::NetworkMetrics check;
+  check.uplink_packet_loss_fraction = kPacketLoss;
+  SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+  states.audio_network_adaptor->SetUplinkPacketLossFraction(kPacketLoss);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+     UpdateNetworkMetricsIsCalledOnSetUplinkRecoverablePacketLossFraction) {
+  auto states = CreateAudioNetworkAdaptor();
+  constexpr float kRecoverablePacketLoss = 0.1f;
+  Controller::NetworkMetrics check;
+  check.uplink_recoverable_packet_loss_fraction = kRecoverablePacketLoss;
+  SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+  states.audio_network_adaptor->SetUplinkRecoverablePacketLossFraction(
+      kRecoverablePacketLoss);
+}
+
+TEST(AudioNetworkAdaptorImplTest, UpdateNetworkMetricsIsCalledOnSetRtt) {
+  auto states = CreateAudioNetworkAdaptor();
+  constexpr int kRtt = 100;
+  Controller::NetworkMetrics check;
+  check.rtt_ms = kRtt;
+  SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+  states.audio_network_adaptor->SetRtt(kRtt);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+     UpdateNetworkMetricsIsCalledOnSetTargetAudioBitrate) {
+  auto states = CreateAudioNetworkAdaptor();
+  constexpr int kTargetAudioBitrate = 15000;
+  Controller::NetworkMetrics check;
+  check.target_audio_bitrate_bps = kTargetAudioBitrate;
+  SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+  states.audio_network_adaptor->SetTargetAudioBitrate(kTargetAudioBitrate);
+}
+
+TEST(AudioNetworkAdaptorImplTest, UpdateNetworkMetricsIsCalledOnSetOverhead) {
+  auto states = CreateAudioNetworkAdaptor();
+  constexpr size_t kOverhead = 64;
+  Controller::NetworkMetrics check;
+  check.overhead_bytes_per_packet = kOverhead;
+  SetExpectCallToUpdateNetworkMetrics(states.mock_controllers, check);
+  states.audio_network_adaptor->SetOverhead(kOverhead);
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+     MakeDecisionIsCalledOnGetEncoderRuntimeConfig) {
+  auto states = CreateAudioNetworkAdaptor();
+  for (auto& mock_controller : states.mock_controllers)
+    EXPECT_CALL(*mock_controller, MakeDecision(_));
+  states.audio_network_adaptor->GetEncoderRuntimeConfig();
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+     DumpEncoderRuntimeConfigIsCalledOnGetEncoderRuntimeConfig) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-Audio-BitrateAdaptation/Enabled/WebRTC-Audio-FecAdaptation/"
+      "Enabled/");
+  rtc::ScopedFakeClock fake_clock;
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(kClockInitialTimeMs));
+  auto states = CreateAudioNetworkAdaptor();
+  AudioEncoderRuntimeConfig config;
+  config.bitrate_bps = 32000;
+  config.enable_fec = true;
+
+  EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+      .WillOnce(SetArgPointee<0>(config));
+
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpEncoderRuntimeConfig(EncoderRuntimeConfigIs(config),
+                                       kClockInitialTimeMs));
+  states.audio_network_adaptor->GetEncoderRuntimeConfig();
+}
+
+TEST(AudioNetworkAdaptorImplTest,
+     DumpNetworkMetricsIsCalledOnSetNetworkMetrics) {
+  rtc::ScopedFakeClock fake_clock;
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(kClockInitialTimeMs));
+
+  auto states = CreateAudioNetworkAdaptor();
+
+  constexpr int kBandwidth = 16000;
+  constexpr float kPacketLoss = 0.7f;
+  const auto kRecoverablePacketLoss = 0.2f;
+  constexpr int kRtt = 100;
+  constexpr int kTargetAudioBitrate = 15000;
+  constexpr size_t kOverhead = 64;
+
+  Controller::NetworkMetrics check;
+  check.uplink_bandwidth_bps = kBandwidth;
+  int64_t timestamp_check = kClockInitialTimeMs;
+
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+  states.audio_network_adaptor->SetUplinkBandwidth(kBandwidth);
+
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(100));
+  timestamp_check += 100;
+  check.uplink_packet_loss_fraction = kPacketLoss;
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+  states.audio_network_adaptor->SetUplinkPacketLossFraction(kPacketLoss);
+
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(50));
+  timestamp_check += 50;
+  check.uplink_recoverable_packet_loss_fraction = kRecoverablePacketLoss;
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+  states.audio_network_adaptor->SetUplinkRecoverablePacketLossFraction(
+      kRecoverablePacketLoss);
+
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(200));
+  timestamp_check += 200;
+  check.rtt_ms = kRtt;
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+  states.audio_network_adaptor->SetRtt(kRtt);
+
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(150));
+  timestamp_check += 150;
+  check.target_audio_bitrate_bps = kTargetAudioBitrate;
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+  states.audio_network_adaptor->SetTargetAudioBitrate(kTargetAudioBitrate);
+
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(50));
+  timestamp_check += 50;
+  check.overhead_bytes_per_packet = kOverhead;
+  EXPECT_CALL(*states.mock_debug_dump_writer,
+              DumpNetworkMetrics(NetworkMetricsIs(check), timestamp_check));
+  states.audio_network_adaptor->SetOverhead(kOverhead);
+}
+
+TEST(AudioNetworkAdaptorImplTest, LogRuntimeConfigOnGetEncoderRuntimeConfig) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-Audio-BitrateAdaptation/Enabled/WebRTC-Audio-FecAdaptation/"
+      "Enabled/");
+  auto states = CreateAudioNetworkAdaptor();
+
+  AudioEncoderRuntimeConfig config;
+  config.bitrate_bps = 32000;
+  config.enable_fec = true;
+
+  EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+      .WillOnce(SetArgPointee<0>(config));
+
+  EXPECT_CALL(*states.event_log, LogProxy(IsRtcEventAnaConfigEqualTo(config)))
+      .Times(1);
+  states.audio_network_adaptor->GetEncoderRuntimeConfig();
+}
+
+TEST(AudioNetworkAdaptorImplTest, TestANAStats) {
+  auto states = CreateAudioNetworkAdaptor();
+
+  // Simulate some adaptation, otherwise the stats will not show anything.
+  AudioEncoderRuntimeConfig config1, config2;
+  config1.bitrate_bps = 32000;
+  config1.num_channels = 2;
+  config1.enable_fec = true;
+  config1.enable_dtx = true;
+  config1.frame_length_ms = 120;
+  config1.uplink_packet_loss_fraction = 0.1f;
+  config2.bitrate_bps = 16000;
+  config2.num_channels = 1;
+  config2.enable_fec = false;
+  config2.enable_dtx = false;
+  config2.frame_length_ms = 60;
+  config1.uplink_packet_loss_fraction = 0.1f;
+
+  EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+      .WillOnce(SetArgPointee<0>(config1));
+  states.audio_network_adaptor->GetEncoderRuntimeConfig();
+  EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+      .WillOnce(SetArgPointee<0>(config2));
+  states.audio_network_adaptor->GetEncoderRuntimeConfig();
+  EXPECT_CALL(*states.mock_controllers[0], MakeDecision(_))
+      .WillOnce(SetArgPointee<0>(config1));
+  states.audio_network_adaptor->GetEncoderRuntimeConfig();
+
+  auto ana_stats = states.audio_network_adaptor->GetStats();
+
+  EXPECT_EQ(ana_stats.bitrate_action_counter, 2);
+  EXPECT_EQ(ana_stats.channel_action_counter, 2);
+  EXPECT_EQ(ana_stats.dtx_action_counter, 2);
+  EXPECT_EQ(ana_stats.fec_action_counter, 2);
+  EXPECT_EQ(ana_stats.frame_length_increase_counter, 1);
+  EXPECT_EQ(ana_stats.frame_length_decrease_counter, 1);
+  EXPECT_EQ(ana_stats.uplink_packet_loss_fraction, 0.1f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc b/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
new file mode 100644
index 0000000..6850926
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller.cc
@@ -0,0 +1,76 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/bitrate_controller.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+namespace audio_network_adaptor {
+
+BitrateController::Config::Config(int initial_bitrate_bps,
+                                  int initial_frame_length_ms,
+                                  int fl_increase_overhead_offset,
+                                  int fl_decrease_overhead_offset)
+    : initial_bitrate_bps(initial_bitrate_bps),
+      initial_frame_length_ms(initial_frame_length_ms),
+      fl_increase_overhead_offset(fl_increase_overhead_offset),
+      fl_decrease_overhead_offset(fl_decrease_overhead_offset) {}
+
+BitrateController::Config::~Config() = default;
+
+BitrateController::BitrateController(const Config& config)
+    : config_(config),
+      bitrate_bps_(config_.initial_bitrate_bps),
+      frame_length_ms_(config_.initial_frame_length_ms) {
+  RTC_DCHECK_GT(bitrate_bps_, 0);
+  RTC_DCHECK_GT(frame_length_ms_, 0);
+}
+
+BitrateController::~BitrateController() = default;
+
+void BitrateController::UpdateNetworkMetrics(
+    const NetworkMetrics& network_metrics) {
+  if (network_metrics.target_audio_bitrate_bps)
+    target_audio_bitrate_bps_ = network_metrics.target_audio_bitrate_bps;
+  if (network_metrics.overhead_bytes_per_packet)
+    overhead_bytes_per_packet_ = network_metrics.overhead_bytes_per_packet;
+}
+
+void BitrateController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+  // Decision on |bitrate_bps| should not have been made.
+  RTC_DCHECK(!config->bitrate_bps);
+  if (target_audio_bitrate_bps_ && overhead_bytes_per_packet_) {
+    // Current implementation of BitrateController can only work when
+    // |metrics.target_audio_bitrate_bps| includes overhead is enabled. This is
+    // currently governed by the following field trial.
+    RTC_DCHECK(
+        webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead"));
+    if (config->frame_length_ms)
+      frame_length_ms_ = *config->frame_length_ms;
+    int offset = config->last_fl_change_increase
+                     ? config_.fl_increase_overhead_offset
+                     : config_.fl_decrease_overhead_offset;
+    // Check that
+    // -(*overhead_bytes_per_packet_) <= offset <= (*overhead_bytes_per_packet_)
+    RTC_DCHECK_GE(*overhead_bytes_per_packet_, -offset);
+    RTC_DCHECK_LE(offset, *overhead_bytes_per_packet_);
+    int overhead_rate_bps = static_cast<int>(
+        (*overhead_bytes_per_packet_ + offset) * 8 * 1000 / frame_length_ms_);
+    bitrate_bps_ = std::max(0, *target_audio_bitrate_bps_ - overhead_rate_bps);
+  }
+  config->bitrate_bps = bitrate_bps_;
+}
+
+}  // namespace audio_network_adaptor
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller.h b/modules/audio_coding/audio_network_adaptor/bitrate_controller.h
new file mode 100644
index 0000000..601f794
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_BITRATE_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_BITRATE_CONTROLLER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace audio_network_adaptor {
+
+class BitrateController final : public Controller {
+ public:
+  struct Config {
+    Config(int initial_bitrate_bps,
+           int initial_frame_length_ms,
+           int fl_increase_overhead_offset,
+           int fl_decrease_overhead_offset);
+    ~Config();
+    int initial_bitrate_bps;
+    int initial_frame_length_ms;
+    int fl_increase_overhead_offset;
+    int fl_decrease_overhead_offset;
+  };
+
+  explicit BitrateController(const Config& config);
+
+  ~BitrateController() override;
+
+  void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+  void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+  const Config config_;
+  int bitrate_bps_;
+  int frame_length_ms_;
+  rtc::Optional<int> target_audio_bitrate_bps_;
+  rtc::Optional<size_t> overhead_bytes_per_packet_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(BitrateController);
+};
+
+}  // namespace audio_network_adaptor
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_BITRATE_CONTROLLER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
new file mode 100644
index 0000000..9864511
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/bitrate_controller_unittest.cc
@@ -0,0 +1,249 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/bitrate_controller.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace audio_network_adaptor {
+
+namespace {
+
+void UpdateNetworkMetrics(
+    BitrateController* controller,
+    const rtc::Optional<int>& target_audio_bitrate_bps,
+    const rtc::Optional<size_t>& overhead_bytes_per_packet) {
+  // UpdateNetworkMetrics can accept multiple network metric updates at once.
+  // However, currently, the most used case is to update one metric at a time.
+  // To reflect this fact, we separate the calls.
+  if (target_audio_bitrate_bps) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.target_audio_bitrate_bps = target_audio_bitrate_bps;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  if (overhead_bytes_per_packet) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+}
+
+void CheckDecision(BitrateController* controller,
+                   const rtc::Optional<int>& frame_length_ms,
+                   int expected_bitrate_bps) {
+  AudioEncoderRuntimeConfig config;
+  config.frame_length_ms = frame_length_ms;
+  controller->MakeDecision(&config);
+  EXPECT_EQ(expected_bitrate_bps, config.bitrate_bps);
+}
+
+}  // namespace
+
+// These tests are named AnaBitrateControllerTest to distinguish from
+// BitrateControllerTest in
+// modules/bitrate_controller/bitrate_controller_unittest.cc.
+
+TEST(AnaBitrateControllerTest, OutputInitValueWhenTargetBitrateUnknown) {
+  constexpr int kInitialBitrateBps = 32000;
+  constexpr int kInitialFrameLengthMs = 20;
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  BitrateController controller(BitrateController::Config(
+      kInitialBitrateBps, kInitialFrameLengthMs, 0, 0));
+  UpdateNetworkMetrics(&controller, rtc::nullopt, kOverheadBytesPerPacket);
+  CheckDecision(&controller, kInitialFrameLengthMs * 2, kInitialBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, OutputInitValueWhenOverheadUnknown) {
+  constexpr int kInitialBitrateBps = 32000;
+  constexpr int kInitialFrameLengthMs = 20;
+  constexpr int kTargetBitrateBps = 48000;
+  BitrateController controller(BitrateController::Config(
+      kInitialBitrateBps, kInitialFrameLengthMs, 0, 0));
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, rtc::nullopt);
+  CheckDecision(&controller, kInitialFrameLengthMs * 2, kInitialBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, ChangeBitrateOnTargetBitrateChanged) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  constexpr int kInitialFrameLengthMs = 20;
+  BitrateController controller(
+      BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+  constexpr int kTargetBitrateBps = 48000;
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  constexpr int kBitrateBps =
+      kTargetBitrateBps -
+      kOverheadBytesPerPacket * 8 * 1000 / kInitialFrameLengthMs;
+  // Frame length unchanged, bitrate changes in accordance with
+  // |metrics.target_audio_bitrate_bps| and |metrics.overhead_bytes_per_packet|.
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, kInitialFrameLengthMs, kBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, UpdateMultipleNetworkMetricsAtOnce) {
+  // This test is similar to ChangeBitrateOnTargetBitrateChanged. But instead of
+  // using ::UpdateNetworkMetrics(...), which calls
+  // BitrateController::UpdateNetworkMetrics(...) multiple times, we
+  // we call it only once. This is to verify that
+  // BitrateController::UpdateNetworkMetrics(...) can handle multiple
+  // network updates at once. This is, however, not a common use case in current
+  // audio_network_adaptor_impl.cc.
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  constexpr int kInitialFrameLengthMs = 20;
+  BitrateController controller(
+      BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+  constexpr int kTargetBitrateBps = 48000;
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  constexpr int kBitrateBps =
+      kTargetBitrateBps -
+      kOverheadBytesPerPacket * 8 * 1000 / kInitialFrameLengthMs;
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.target_audio_bitrate_bps = kTargetBitrateBps;
+  network_metrics.overhead_bytes_per_packet = kOverheadBytesPerPacket;
+  controller.UpdateNetworkMetrics(network_metrics);
+  CheckDecision(&controller, kInitialFrameLengthMs, kBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, TreatUnknownFrameLengthAsFrameLengthUnchanged) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  constexpr int kInitialFrameLengthMs = 20;
+  BitrateController controller(
+      BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+  constexpr int kTargetBitrateBps = 48000;
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  constexpr int kBitrateBps =
+      kTargetBitrateBps -
+      kOverheadBytesPerPacket * 8 * 1000 / kInitialFrameLengthMs;
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, rtc::nullopt, kBitrateBps);
+}
+
+TEST(AnaBitrateControllerTest, IncreaseBitrateOnFrameLengthIncreased) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  constexpr int kInitialFrameLengthMs = 20;
+  BitrateController controller(
+      BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+
+  constexpr int kTargetBitrateBps = 48000;
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  constexpr int kBitrateBps =
+      kTargetBitrateBps -
+      kOverheadBytesPerPacket * 8 * 1000 / kInitialFrameLengthMs;
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, rtc::nullopt, kBitrateBps);
+
+  constexpr int kFrameLengthMs = 60;
+  constexpr size_t kPacketOverheadRateDiff =
+      kOverheadBytesPerPacket * 8 * 1000 / 20 -
+      kOverheadBytesPerPacket * 8 * 1000 / 60;
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, kFrameLengthMs,
+                kBitrateBps + kPacketOverheadRateDiff);
+}
+
+TEST(AnaBitrateControllerTest, DecreaseBitrateOnFrameLengthDecreased) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  constexpr int kInitialFrameLengthMs = 60;
+  BitrateController controller(
+      BitrateController::Config(32000, kInitialFrameLengthMs, 0, 0));
+
+  constexpr int kTargetBitrateBps = 48000;
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  constexpr int kBitrateBps =
+      kTargetBitrateBps -
+      kOverheadBytesPerPacket * 8 * 1000 / kInitialFrameLengthMs;
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, rtc::nullopt, kBitrateBps);
+
+  constexpr int kFrameLengthMs = 20;
+  constexpr size_t kPacketOverheadRateDiff =
+      kOverheadBytesPerPacket * 8 * 1000 / 20 -
+      kOverheadBytesPerPacket * 8 * 1000 / 60;
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, kFrameLengthMs,
+                kBitrateBps - kPacketOverheadRateDiff);
+}
+
+TEST(AnaBitrateControllerTest, BitrateNeverBecomesNegative) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  BitrateController controller(BitrateController::Config(32000, 20, 0, 0));
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  constexpr int kFrameLengthMs = 60;
+  // Set a target rate smaller than overhead rate, the bitrate is bounded by 0.
+  constexpr int kTargetBitrateBps =
+      kOverheadBytesPerPacket * 8 * 1000 / kFrameLengthMs - 1;
+  UpdateNetworkMetrics(&controller, kTargetBitrateBps, kOverheadBytesPerPacket);
+  CheckDecision(&controller, kFrameLengthMs, 0);
+}
+
+TEST(AnaBitrateControllerTest, CheckBehaviorOnChangingCondition) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+  BitrateController controller(BitrateController::Config(32000, 20, 0, 0));
+
+  // Start from an arbitrary overall bitrate.
+  int overall_bitrate = 34567;
+  size_t overhead_bytes_per_packet = 64;
+  int frame_length_ms = 20;
+  int current_bitrate = rtc::checked_cast<int>(
+      overall_bitrate - overhead_bytes_per_packet * 8 * 1000 / frame_length_ms);
+
+  UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+  CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+  // Next: increase overall bitrate.
+  overall_bitrate += 100;
+  current_bitrate += 100;
+  UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+  CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+  // Next: change frame length.
+  frame_length_ms = 60;
+  current_bitrate += rtc::checked_cast<int>(
+      overhead_bytes_per_packet * 8 * 1000 / 20 -
+      overhead_bytes_per_packet * 8 * 1000 / 60);
+  UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+  CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+  // Next: change overhead.
+  overhead_bytes_per_packet -= 30;
+  current_bitrate += 30 * 8 * 1000 / frame_length_ms;
+  UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+  CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+  // Next: change frame length.
+  frame_length_ms = 20;
+  current_bitrate -= rtc::checked_cast<int>(
+      overhead_bytes_per_packet * 8 * 1000 / 20 -
+      overhead_bytes_per_packet * 8 * 1000 / 60);
+  UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+  CheckDecision(&controller, frame_length_ms, current_bitrate);
+
+  // Next: decrease overall bitrate and frame length.
+  overall_bitrate -= 100;
+  current_bitrate -= 100;
+  frame_length_ms = 60;
+  current_bitrate += rtc::checked_cast<int>(
+      overhead_bytes_per_packet * 8 * 1000 / 20 -
+      overhead_bytes_per_packet * 8 * 1000 / 60);
+
+  UpdateNetworkMetrics(&controller, overall_bitrate, overhead_bytes_per_packet);
+  CheckDecision(&controller, frame_length_ms, current_bitrate);
+}
+
+}  // namespace audio_network_adaptor
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/channel_controller.cc b/modules/audio_coding/audio_network_adaptor/channel_controller.cc
new file mode 100644
index 0000000..a1c30db
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/channel_controller.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "modules/audio_coding/audio_network_adaptor/channel_controller.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+ChannelController::Config::Config(size_t num_encoder_channels,
+                                  size_t intial_channels_to_encode,
+                                  int channel_1_to_2_bandwidth_bps,
+                                  int channel_2_to_1_bandwidth_bps)
+    : num_encoder_channels(num_encoder_channels),
+      intial_channels_to_encode(intial_channels_to_encode),
+      channel_1_to_2_bandwidth_bps(channel_1_to_2_bandwidth_bps),
+      channel_2_to_1_bandwidth_bps(channel_2_to_1_bandwidth_bps) {}
+
+ChannelController::ChannelController(const Config& config)
+    : config_(config), channels_to_encode_(config_.intial_channels_to_encode) {
+  RTC_DCHECK_GT(config_.intial_channels_to_encode, 0lu);
+  // Currently, we require |intial_channels_to_encode| to be <= 2.
+  RTC_DCHECK_LE(config_.intial_channels_to_encode, 2lu);
+  RTC_DCHECK_GE(config_.num_encoder_channels,
+                config_.intial_channels_to_encode);
+}
+
+ChannelController::~ChannelController() = default;
+
+void ChannelController::UpdateNetworkMetrics(
+    const NetworkMetrics& network_metrics) {
+  if (network_metrics.uplink_bandwidth_bps)
+    uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+}
+
+void ChannelController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+  // Decision on |num_channels| should not have been made.
+  RTC_DCHECK(!config->num_channels);
+
+  if (uplink_bandwidth_bps_) {
+    if (channels_to_encode_ == 2 &&
+        *uplink_bandwidth_bps_ <= config_.channel_2_to_1_bandwidth_bps) {
+      channels_to_encode_ = 1;
+    } else if (channels_to_encode_ == 1 &&
+               *uplink_bandwidth_bps_ >= config_.channel_1_to_2_bandwidth_bps) {
+      channels_to_encode_ =
+          std::min(static_cast<size_t>(2), config_.num_encoder_channels);
+    }
+  }
+  config->num_channels = channels_to_encode_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/channel_controller.h b/modules/audio_coding/audio_network_adaptor/channel_controller.h
new file mode 100644
index 0000000..f53ddd6
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/channel_controller.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CHANNEL_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CHANNEL_CONTROLLER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ChannelController final : public Controller {
+ public:
+  struct Config {
+    Config(size_t num_encoder_channels,
+           size_t intial_channels_to_encode,
+           int channel_1_to_2_bandwidth_bps,
+           int channel_2_to_1_bandwidth_bps);
+    size_t num_encoder_channels;
+    size_t intial_channels_to_encode;
+    // Uplink bandwidth above which the number of encoded channels should switch
+    // from 1 to 2.
+    int channel_1_to_2_bandwidth_bps;
+    // Uplink bandwidth below which the number of encoded channels should switch
+    // from 2 to 1.
+    int channel_2_to_1_bandwidth_bps;
+  };
+
+  explicit ChannelController(const Config& config);
+
+  ~ChannelController() override;
+
+  void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+  void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+  const Config config_;
+  size_t channels_to_encode_;
+  rtc::Optional<int> uplink_bandwidth_bps_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(ChannelController);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CHANNEL_CONTROLLER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc
new file mode 100644
index 0000000..64e5dae
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/channel_controller_unittest.cc
@@ -0,0 +1,100 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/channel_controller.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kNumChannels = 2;
+constexpr int kChannel1To2BandwidthBps = 31000;
+constexpr int kChannel2To1BandwidthBps = 29000;
+constexpr int kMediumBandwidthBps =
+    (kChannel1To2BandwidthBps + kChannel2To1BandwidthBps) / 2;
+
+std::unique_ptr<ChannelController> CreateChannelController(int init_channels) {
+  std::unique_ptr<ChannelController> controller(
+      new ChannelController(ChannelController::Config(
+          kNumChannels, init_channels, kChannel1To2BandwidthBps,
+          kChannel2To1BandwidthBps)));
+  return controller;
+}
+
+void CheckDecision(ChannelController* controller,
+                   const rtc::Optional<int>& uplink_bandwidth_bps,
+                   size_t expected_num_channels) {
+  if (uplink_bandwidth_bps) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  AudioEncoderRuntimeConfig config;
+  controller->MakeDecision(&config);
+  EXPECT_EQ(expected_num_channels, config.num_channels);
+}
+
+}  // namespace
+
+TEST(ChannelControllerTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+  constexpr int kInitChannels = 2;
+  auto controller = CreateChannelController(kInitChannels);
+  CheckDecision(controller.get(), rtc::nullopt, kInitChannels);
+}
+
+TEST(ChannelControllerTest, SwitchTo2ChannelsOnHighUplinkBandwidth) {
+  constexpr int kInitChannels = 1;
+  auto controller = CreateChannelController(kInitChannels);
+  // Use high bandwidth to check output switch to 2.
+  CheckDecision(controller.get(), kChannel1To2BandwidthBps, 2);
+}
+
+TEST(ChannelControllerTest, SwitchTo1ChannelOnLowUplinkBandwidth) {
+  constexpr int kInitChannels = 2;
+  auto controller = CreateChannelController(kInitChannels);
+  // Use low bandwidth to check output switch to 1.
+  CheckDecision(controller.get(), kChannel2To1BandwidthBps, 1);
+}
+
+TEST(ChannelControllerTest, Maintain1ChannelOnMediumUplinkBandwidth) {
+  constexpr int kInitChannels = 1;
+  auto controller = CreateChannelController(kInitChannels);
+  // Use between-thresholds bandwidth to check output remains at 1.
+  CheckDecision(controller.get(), kMediumBandwidthBps, 1);
+}
+
+TEST(ChannelControllerTest, Maintain2ChannelsOnMediumUplinkBandwidth) {
+  constexpr int kInitChannels = 2;
+  auto controller = CreateChannelController(kInitChannels);
+  // Use between-thresholds bandwidth to check output remains at 2.
+  CheckDecision(controller.get(), kMediumBandwidthBps, 2);
+}
+
+TEST(ChannelControllerTest, CheckBehaviorOnChangingUplinkBandwidth) {
+  constexpr int kInitChannels = 1;
+  auto controller = CreateChannelController(kInitChannels);
+
+  // Use between-thresholds bandwidth to check output remains at 1.
+  CheckDecision(controller.get(), kMediumBandwidthBps, 1);
+
+  // Use high bandwidth to check output switch to 2.
+  CheckDecision(controller.get(), kChannel1To2BandwidthBps, 2);
+
+  // Use between-thresholds bandwidth to check output remains at 2.
+  CheckDecision(controller.get(), kMediumBandwidthBps, 2);
+
+  // Use low bandwidth to check output switch to 1.
+  CheckDecision(controller.get(), kChannel2To1BandwidthBps, 1);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/config.proto b/modules/audio_coding/audio_network_adaptor/config.proto
new file mode 100644
index 0000000..6d1cd42
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/config.proto
@@ -0,0 +1,164 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+option java_package = "org.webrtc.AudioNetworkAdaptor";
+option java_outer_classname = "Config";
+package webrtc.audio_network_adaptor.config;
+
+message FecController {
+  message Threshold {
+    // Threshold defines a curve in the bandwidth/packet-loss domain. The
+    // curve is characterized by the two conjunction points: A and B.
+    //
+    // packet ^  |
+    //  loss  | A|
+    //        |   \        A: (low_bandwidth_bps, low_bandwidth_packet_loss)
+    //        |    \       B: (high_bandwidth_bps, high_bandwidth_packet_loss)
+    //        |    B\________
+    //        |---------------> bandwidth
+    optional int32 low_bandwidth_bps = 1;
+    optional float low_bandwidth_packet_loss = 2;
+    optional int32 high_bandwidth_bps = 3;
+    optional float high_bandwidth_packet_loss = 4;
+  }
+
+  // |fec_enabling_threshold| defines a curve, above which FEC should be
+  // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+  // should be disabled. See below
+  //
+  // packet-loss ^   |  |
+  //             |   |  |   FEC
+  //             |    \  \   ON
+  //             | FEC \  \_______ fec_enabling_threshold
+  //             | OFF  \_________ fec_disabling_threshold
+  //             |-----------------> bandwidth
+  optional Threshold fec_enabling_threshold = 1;
+  optional Threshold fec_disabling_threshold = 2;
+
+  // |time_constant_ms| is the time constant for an exponential filter, which
+  // is used for smoothing the packet loss fraction.
+  optional int32 time_constant_ms = 3;
+}
+
+message FecControllerRplrBased {
+  message Threshold {
+    // Threshold defines a curve in the bandwidth/recoverable-packet-loss
+    // domain.
+    // The curve is characterized by the two conjunction points: A and B.
+    //
+    // recoverable ^
+    // packet      |  |
+    //  loss       | A|
+    //             |   \        A: (low_bandwidth_bps,
+    //             |    \           low_bandwidth_recoverable_packet_loss)
+    //             |     \       B: (high_bandwidth_bps,
+    //             |      \          high_bandwidth_recoverable_packet_loss)
+    //             |      B\________
+    //             |---------------> bandwidth
+    optional int32 low_bandwidth_bps = 1;
+    optional float low_bandwidth_recoverable_packet_loss = 2;
+    optional int32 high_bandwidth_bps = 3;
+    optional float high_bandwidth_recoverable_packet_loss = 4;
+  }
+
+  // |fec_enabling_threshold| defines a curve, above which FEC should be
+  // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+  // should be disabled. See below
+  //
+  // packet-loss ^   |  |
+  //             |   |  |   FEC
+  //             |    \  \   ON
+  //             | FEC \  \_______ fec_enabling_threshold
+  //             | OFF  \_________ fec_disabling_threshold
+  //             |-----------------> bandwidth
+  optional Threshold fec_enabling_threshold = 1;
+  optional Threshold fec_disabling_threshold = 2;
+}
+
+message FrameLengthController {
+  // Uplink packet loss fraction below which frame length can increase.
+  optional float fl_increasing_packet_loss_fraction = 1;
+
+  // Uplink packet loss fraction above which frame length should decrease.
+  optional float fl_decreasing_packet_loss_fraction = 2;
+
+  // Uplink bandwidth below which frame length can switch from 20ms to 60ms.
+  optional int32 fl_20ms_to_60ms_bandwidth_bps = 3;
+
+  // Uplink bandwidth above which frame length should switch from 60ms to 20ms.
+  optional int32 fl_60ms_to_20ms_bandwidth_bps = 4;
+
+  // Uplink bandwidth below which frame length can switch from 60ms to 120ms.
+  optional int32 fl_60ms_to_120ms_bandwidth_bps = 5;
+
+  // Uplink bandwidth above which frame length should switch from 120ms to 60ms.
+  optional int32 fl_120ms_to_60ms_bandwidth_bps = 6;
+
+  // Offset to apply to the per-packet overhead when increasing frame length.
+  optional int32 fl_increase_overhead_offset = 7;
+
+  // Offset to apply to the per-packet overhead when decreasing frame length.
+  optional int32 fl_decrease_overhead_offset = 8;
+}
+
+message ChannelController {
+  // Uplink bandwidth above which the number of encoded channels should switch
+  // from 1 to 2.
+  optional int32 channel_1_to_2_bandwidth_bps = 1;
+
+  // Uplink bandwidth below which the number of encoded channels should switch
+  // from 2 to 1.
+  optional int32 channel_2_to_1_bandwidth_bps = 2;
+}
+
+message DtxController {
+  // Uplink bandwidth below which DTX should be switched on.
+  optional int32 dtx_enabling_bandwidth_bps = 1;
+
+  // Uplink bandwidth above which DTX should be switched off.
+  optional int32 dtx_disabling_bandwidth_bps = 2;
+}
+
+message BitrateController {
+  // Offset to apply to per-packet overhead when the frame length is increased.
+  optional int32 fl_increase_overhead_offset = 1;
+  // Offset to apply to per-packet overhead when the frame length is decreased.
+  optional int32 fl_decrease_overhead_offset = 2;
+}
+
+message Controller {
+  message ScoringPoint {
+    // |ScoringPoint| is a subspace of network condition. It is used for
+    // comparing the significance of controllers.
+    optional int32 uplink_bandwidth_bps = 1;
+    optional float uplink_packet_loss_fraction = 2;
+  }
+
+  // The distance from |scoring_point| to a given network condition defines
+  // the significance of this controller with respect that network condition.
+  // Shorter distance means higher significance. The significances of
+  // controllers determine their order in the processing pipeline. Controllers
+  // without |scoring_point| follow their default order in
+  // |ControllerManager::controllers|.
+  optional ScoringPoint scoring_point = 1;
+
+  oneof controller {
+    FecController fec_controller = 21;
+    FrameLengthController frame_length_controller = 22;
+    ChannelController channel_controller = 23;
+    DtxController dtx_controller = 24;
+    BitrateController bitrate_controller = 25;
+    FecControllerRplrBased fec_controller_rplr_based = 26;
+  }
+}
+
+message ControllerManager {
+  repeated Controller controllers = 1;
+
+  // Least time since last reordering for a new reordering to be made.
+  optional int32 min_reordering_time_ms = 2;
+
+  // Least squared distance from last scoring point for a new reordering to be
+  // made.
+  optional float min_reordering_squared_distance = 3;
+}
+
diff --git a/modules/audio_coding/audio_network_adaptor/controller.cc b/modules/audio_coding/audio_network_adaptor/controller.cc
new file mode 100644
index 0000000..5e2dc85
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/controller.cc
@@ -0,0 +1,19 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+
+namespace webrtc {
+
+Controller::NetworkMetrics::NetworkMetrics() = default;
+
+Controller::NetworkMetrics::~NetworkMetrics() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/controller.h b/modules/audio_coding/audio_network_adaptor/controller.h
new file mode 100644
index 0000000..af2f569
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/controller.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_H_
+
+#include "api/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+
+namespace webrtc {
+
+class Controller {
+ public:
+  struct NetworkMetrics {
+    NetworkMetrics();
+    ~NetworkMetrics();
+    rtc::Optional<int> uplink_bandwidth_bps;
+    rtc::Optional<float> uplink_packet_loss_fraction;
+    rtc::Optional<float> uplink_recoverable_packet_loss_fraction;
+    rtc::Optional<int> target_audio_bitrate_bps;
+    rtc::Optional<int> rtt_ms;
+    rtc::Optional<size_t> overhead_bytes_per_packet;
+  };
+
+  virtual ~Controller() = default;
+
+  // Informs network metrics update to this controller. Any non-empty field
+  // indicates an update on the corresponding network metric.
+  virtual void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) = 0;
+
+  virtual void MakeDecision(AudioEncoderRuntimeConfig* config) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager.cc b/modules/audio_coding/audio_network_adaptor/controller_manager.cc
new file mode 100644
index 0000000..313aa62
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/controller_manager.cc
@@ -0,0 +1,441 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+
+#include <cmath>
+#include <utility>
+
+#include "modules/audio_coding/audio_network_adaptor/bitrate_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/channel_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "modules/audio_coding/audio_network_adaptor/dtx_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h"
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.h"
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/timeutils.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+namespace {
+
+#if WEBRTC_ENABLE_PROTOBUF
+
+std::unique_ptr<FecControllerPlrBased> CreateFecControllerPlrBased(
+    const audio_network_adaptor::config::FecController& config,
+    bool initial_fec_enabled) {
+  RTC_CHECK(config.has_fec_enabling_threshold());
+  RTC_CHECK(config.has_fec_disabling_threshold());
+  RTC_CHECK(config.has_time_constant_ms());
+
+  auto& fec_enabling_threshold = config.fec_enabling_threshold();
+  RTC_CHECK(fec_enabling_threshold.has_low_bandwidth_bps());
+  RTC_CHECK(fec_enabling_threshold.has_low_bandwidth_packet_loss());
+  RTC_CHECK(fec_enabling_threshold.has_high_bandwidth_bps());
+  RTC_CHECK(fec_enabling_threshold.has_high_bandwidth_packet_loss());
+
+  auto& fec_disabling_threshold = config.fec_disabling_threshold();
+  RTC_CHECK(fec_disabling_threshold.has_low_bandwidth_bps());
+  RTC_CHECK(fec_disabling_threshold.has_low_bandwidth_packet_loss());
+  RTC_CHECK(fec_disabling_threshold.has_high_bandwidth_bps());
+  RTC_CHECK(fec_disabling_threshold.has_high_bandwidth_packet_loss());
+
+  return std::unique_ptr<FecControllerPlrBased>(
+      new FecControllerPlrBased(FecControllerPlrBased::Config(
+          initial_fec_enabled,
+          ThresholdCurve(fec_enabling_threshold.low_bandwidth_bps(),
+                         fec_enabling_threshold.low_bandwidth_packet_loss(),
+                         fec_enabling_threshold.high_bandwidth_bps(),
+                         fec_enabling_threshold.high_bandwidth_packet_loss()),
+          ThresholdCurve(fec_disabling_threshold.low_bandwidth_bps(),
+                         fec_disabling_threshold.low_bandwidth_packet_loss(),
+                         fec_disabling_threshold.high_bandwidth_bps(),
+                         fec_disabling_threshold.high_bandwidth_packet_loss()),
+          config.time_constant_ms())));
+}
+
+std::unique_ptr<FecControllerRplrBased> CreateFecControllerRplrBased(
+    const audio_network_adaptor::config::FecControllerRplrBased& config,
+    bool initial_fec_enabled) {
+  RTC_CHECK(config.has_fec_enabling_threshold());
+  RTC_CHECK(config.has_fec_disabling_threshold());
+
+  auto& fec_enabling_threshold = config.fec_enabling_threshold();
+  RTC_CHECK(fec_enabling_threshold.has_low_bandwidth_bps());
+  RTC_CHECK(fec_enabling_threshold.has_low_bandwidth_recoverable_packet_loss());
+  RTC_CHECK(fec_enabling_threshold.has_high_bandwidth_bps());
+  RTC_CHECK(
+      fec_enabling_threshold.has_high_bandwidth_recoverable_packet_loss());
+
+  auto& fec_disabling_threshold = config.fec_disabling_threshold();
+  RTC_CHECK(fec_disabling_threshold.has_low_bandwidth_bps());
+  RTC_CHECK(
+      fec_disabling_threshold.has_low_bandwidth_recoverable_packet_loss());
+  RTC_CHECK(fec_disabling_threshold.has_high_bandwidth_bps());
+  RTC_CHECK(
+      fec_disabling_threshold.has_high_bandwidth_recoverable_packet_loss());
+
+  return std::unique_ptr<FecControllerRplrBased>(
+      new FecControllerRplrBased(FecControllerRplrBased::Config(
+          initial_fec_enabled,
+          ThresholdCurve(
+              fec_enabling_threshold.low_bandwidth_bps(),
+              fec_enabling_threshold.low_bandwidth_recoverable_packet_loss(),
+              fec_enabling_threshold.high_bandwidth_bps(),
+              fec_enabling_threshold.high_bandwidth_recoverable_packet_loss()),
+          ThresholdCurve(
+              fec_disabling_threshold.low_bandwidth_bps(),
+              fec_disabling_threshold.low_bandwidth_recoverable_packet_loss(),
+              fec_disabling_threshold.high_bandwidth_bps(),
+              fec_disabling_threshold
+                  .high_bandwidth_recoverable_packet_loss()))));
+}
+
+std::unique_ptr<FrameLengthController> CreateFrameLengthController(
+    const audio_network_adaptor::config::FrameLengthController& config,
+    rtc::ArrayView<const int> encoder_frame_lengths_ms,
+    int initial_frame_length_ms,
+    int min_encoder_bitrate_bps) {
+  RTC_CHECK(config.has_fl_increasing_packet_loss_fraction());
+  RTC_CHECK(config.has_fl_decreasing_packet_loss_fraction());
+  RTC_CHECK(config.has_fl_20ms_to_60ms_bandwidth_bps());
+  RTC_CHECK(config.has_fl_60ms_to_20ms_bandwidth_bps());
+
+  std::map<FrameLengthController::Config::FrameLengthChange, int>
+      fl_changing_bandwidths_bps = {
+          {FrameLengthController::Config::FrameLengthChange(20, 60),
+           config.fl_20ms_to_60ms_bandwidth_bps()},
+          {FrameLengthController::Config::FrameLengthChange(60, 20),
+           config.fl_60ms_to_20ms_bandwidth_bps()}};
+
+  if (config.has_fl_60ms_to_120ms_bandwidth_bps() &&
+      config.has_fl_120ms_to_60ms_bandwidth_bps()) {
+    fl_changing_bandwidths_bps.insert(std::make_pair(
+        FrameLengthController::Config::FrameLengthChange(60, 120),
+        config.fl_60ms_to_120ms_bandwidth_bps()));
+    fl_changing_bandwidths_bps.insert(std::make_pair(
+        FrameLengthController::Config::FrameLengthChange(120, 60),
+        config.fl_120ms_to_60ms_bandwidth_bps()));
+  }
+
+  int fl_increase_overhead_offset = 0;
+  if (config.has_fl_increase_overhead_offset()) {
+    fl_increase_overhead_offset = config.fl_increase_overhead_offset();
+  }
+  int fl_decrease_overhead_offset = 0;
+  if (config.has_fl_decrease_overhead_offset()) {
+    fl_decrease_overhead_offset = config.fl_decrease_overhead_offset();
+  }
+
+  FrameLengthController::Config ctor_config(
+      std::vector<int>(), initial_frame_length_ms, min_encoder_bitrate_bps,
+      config.fl_increasing_packet_loss_fraction(),
+      config.fl_decreasing_packet_loss_fraction(), fl_increase_overhead_offset,
+      fl_decrease_overhead_offset, std::move(fl_changing_bandwidths_bps));
+
+  for (auto frame_length : encoder_frame_lengths_ms)
+    ctor_config.encoder_frame_lengths_ms.push_back(frame_length);
+
+  return std::unique_ptr<FrameLengthController>(
+      new FrameLengthController(ctor_config));
+}
+
+std::unique_ptr<ChannelController> CreateChannelController(
+    const audio_network_adaptor::config::ChannelController& config,
+    size_t num_encoder_channels,
+    size_t intial_channels_to_encode) {
+  RTC_CHECK(config.has_channel_1_to_2_bandwidth_bps());
+  RTC_CHECK(config.has_channel_2_to_1_bandwidth_bps());
+
+  return std::unique_ptr<ChannelController>(new ChannelController(
+      ChannelController::Config(num_encoder_channels, intial_channels_to_encode,
+                                config.channel_1_to_2_bandwidth_bps(),
+                                config.channel_2_to_1_bandwidth_bps())));
+}
+
+std::unique_ptr<DtxController> CreateDtxController(
+    const audio_network_adaptor::config::DtxController& dtx_config,
+    bool initial_dtx_enabled) {
+  RTC_CHECK(dtx_config.has_dtx_enabling_bandwidth_bps());
+  RTC_CHECK(dtx_config.has_dtx_disabling_bandwidth_bps());
+
+  return std::unique_ptr<DtxController>(new DtxController(DtxController::Config(
+      initial_dtx_enabled, dtx_config.dtx_enabling_bandwidth_bps(),
+      dtx_config.dtx_disabling_bandwidth_bps())));
+}
+
+using audio_network_adaptor::BitrateController;
+std::unique_ptr<BitrateController> CreateBitrateController(
+    const audio_network_adaptor::config::BitrateController& bitrate_config,
+    int initial_bitrate_bps,
+    int initial_frame_length_ms) {
+  int fl_increase_overhead_offset = 0;
+  if (bitrate_config.has_fl_increase_overhead_offset()) {
+    fl_increase_overhead_offset = bitrate_config.fl_increase_overhead_offset();
+  }
+  int fl_decrease_overhead_offset = 0;
+  if (bitrate_config.has_fl_decrease_overhead_offset()) {
+    fl_decrease_overhead_offset = bitrate_config.fl_decrease_overhead_offset();
+  }
+  return std::unique_ptr<BitrateController>(
+      new BitrateController(BitrateController::Config(
+          initial_bitrate_bps, initial_frame_length_ms,
+          fl_increase_overhead_offset, fl_decrease_overhead_offset)));
+}
+#endif  // WEBRTC_ENABLE_PROTOBUF
+
+}  // namespace
+
+ControllerManagerImpl::Config::Config(int min_reordering_time_ms,
+                                      float min_reordering_squared_distance)
+    : min_reordering_time_ms(min_reordering_time_ms),
+      min_reordering_squared_distance(min_reordering_squared_distance) {}
+
+ControllerManagerImpl::Config::~Config() = default;
+
+std::unique_ptr<ControllerManager> ControllerManagerImpl::Create(
+    const ProtoString& config_string,
+    size_t num_encoder_channels,
+    rtc::ArrayView<const int> encoder_frame_lengths_ms,
+    int min_encoder_bitrate_bps,
+    size_t intial_channels_to_encode,
+    int initial_frame_length_ms,
+    int initial_bitrate_bps,
+    bool initial_fec_enabled,
+    bool initial_dtx_enabled) {
+  return Create(config_string, num_encoder_channels, encoder_frame_lengths_ms,
+                min_encoder_bitrate_bps, intial_channels_to_encode,
+                initial_frame_length_ms, initial_bitrate_bps,
+                initial_fec_enabled, initial_dtx_enabled, nullptr);
+}
+
+std::unique_ptr<ControllerManager> ControllerManagerImpl::Create(
+    const ProtoString& config_string,
+    size_t num_encoder_channels,
+    rtc::ArrayView<const int> encoder_frame_lengths_ms,
+    int min_encoder_bitrate_bps,
+    size_t intial_channels_to_encode,
+    int initial_frame_length_ms,
+    int initial_bitrate_bps,
+    bool initial_fec_enabled,
+    bool initial_dtx_enabled,
+    DebugDumpWriter* debug_dump_writer) {
+#if WEBRTC_ENABLE_PROTOBUF
+  audio_network_adaptor::config::ControllerManager controller_manager_config;
+  RTC_CHECK(controller_manager_config.ParseFromString(config_string));
+  if (debug_dump_writer)
+    debug_dump_writer->DumpControllerManagerConfig(controller_manager_config,
+                                                   rtc::TimeMillis());
+
+  std::vector<std::unique_ptr<Controller>> controllers;
+  std::map<const Controller*, std::pair<int, float>> scoring_points;
+
+  for (int i = 0; i < controller_manager_config.controllers_size(); ++i) {
+    auto& controller_config = controller_manager_config.controllers(i);
+    std::unique_ptr<Controller> controller;
+    switch (controller_config.controller_case()) {
+      case audio_network_adaptor::config::Controller::kFecController:
+        controller = CreateFecControllerPlrBased(
+            controller_config.fec_controller(), initial_fec_enabled);
+        break;
+      case audio_network_adaptor::config::Controller::kFecControllerRplrBased:
+        controller = CreateFecControllerRplrBased(
+            controller_config.fec_controller_rplr_based(), initial_fec_enabled);
+        break;
+      case audio_network_adaptor::config::Controller::kFrameLengthController:
+        controller = CreateFrameLengthController(
+            controller_config.frame_length_controller(),
+            encoder_frame_lengths_ms, initial_frame_length_ms,
+            min_encoder_bitrate_bps);
+        break;
+      case audio_network_adaptor::config::Controller::kChannelController:
+        controller = CreateChannelController(
+            controller_config.channel_controller(), num_encoder_channels,
+            intial_channels_to_encode);
+        break;
+      case audio_network_adaptor::config::Controller::kDtxController:
+        controller = CreateDtxController(controller_config.dtx_controller(),
+                                         initial_dtx_enabled);
+        break;
+      case audio_network_adaptor::config::Controller::kBitrateController:
+        controller = CreateBitrateController(
+            controller_config.bitrate_controller(), initial_bitrate_bps,
+            initial_frame_length_ms);
+        break;
+      default:
+        RTC_NOTREACHED();
+    }
+    if (controller_config.has_scoring_point()) {
+      auto& scoring_point = controller_config.scoring_point();
+      RTC_CHECK(scoring_point.has_uplink_bandwidth_bps());
+      RTC_CHECK(scoring_point.has_uplink_packet_loss_fraction());
+      scoring_points[controller.get()] = std::make_pair<int, float>(
+          scoring_point.uplink_bandwidth_bps(),
+          scoring_point.uplink_packet_loss_fraction());
+    }
+    controllers.push_back(std::move(controller));
+  }
+
+  if (scoring_points.size() == 0) {
+    return std::unique_ptr<ControllerManagerImpl>(new ControllerManagerImpl(
+        ControllerManagerImpl::Config(0, 0), std::move(controllers),
+        scoring_points));
+  } else {
+    RTC_CHECK(controller_manager_config.has_min_reordering_time_ms());
+    RTC_CHECK(controller_manager_config.has_min_reordering_squared_distance());
+    return std::unique_ptr<ControllerManagerImpl>(new ControllerManagerImpl(
+        ControllerManagerImpl::Config(
+            controller_manager_config.min_reordering_time_ms(),
+            controller_manager_config.min_reordering_squared_distance()),
+        std::move(controllers), scoring_points));
+  }
+
+#else
+  RTC_NOTREACHED();
+  return nullptr;
+#endif  // WEBRTC_ENABLE_PROTOBUF
+}
+
+ControllerManagerImpl::ControllerManagerImpl(const Config& config)
+    : ControllerManagerImpl(
+          config,
+          std::vector<std::unique_ptr<Controller>>(),
+          std::map<const Controller*, std::pair<int, float>>()) {}
+
+ControllerManagerImpl::ControllerManagerImpl(
+    const Config& config,
+    std::vector<std::unique_ptr<Controller>> controllers,
+    const std::map<const Controller*, std::pair<int, float>>& scoring_points)
+    : config_(config),
+      controllers_(std::move(controllers)),
+      last_reordering_time_ms_(rtc::nullopt),
+      last_scoring_point_(0, 0.0) {
+  for (auto& controller : controllers_)
+    default_sorted_controllers_.push_back(controller.get());
+  sorted_controllers_ = default_sorted_controllers_;
+  for (auto& controller_point : scoring_points) {
+    controller_scoring_points_.insert(std::make_pair(
+        controller_point.first, ScoringPoint(controller_point.second.first,
+                                             controller_point.second.second)));
+  }
+}
+
+ControllerManagerImpl::~ControllerManagerImpl() = default;
+
+std::vector<Controller*> ControllerManagerImpl::GetSortedControllers(
+    const Controller::NetworkMetrics& metrics) {
+  if (controller_scoring_points_.size() == 0)
+    return default_sorted_controllers_;
+
+  if (!metrics.uplink_bandwidth_bps || !metrics.uplink_packet_loss_fraction)
+    return sorted_controllers_;
+
+  const int64_t now_ms = rtc::TimeMillis();
+  if (last_reordering_time_ms_ &&
+      now_ms - *last_reordering_time_ms_ < config_.min_reordering_time_ms)
+    return sorted_controllers_;
+
+  ScoringPoint scoring_point(*metrics.uplink_bandwidth_bps,
+                             *metrics.uplink_packet_loss_fraction);
+
+  if (last_reordering_time_ms_ &&
+      last_scoring_point_.SquaredDistanceTo(scoring_point) <
+          config_.min_reordering_squared_distance)
+    return sorted_controllers_;
+
+  // Sort controllers according to the distances of |scoring_point| to the
+  // scoring points of controllers.
+  //
+  // A controller that does not associate with any scoring point
+  // are treated as if
+  // 1) they are less important than any controller that has a scoring point,
+  // 2) they are equally important to any controller that has no scoring point,
+  //    and their relative order will follow |default_sorted_controllers_|.
+  std::vector<Controller*> sorted_controllers(default_sorted_controllers_);
+  std::stable_sort(
+      sorted_controllers.begin(), sorted_controllers.end(),
+      [this, &scoring_point](const Controller* lhs, const Controller* rhs) {
+        auto lhs_scoring_point = controller_scoring_points_.find(lhs);
+        auto rhs_scoring_point = controller_scoring_points_.find(rhs);
+
+        if (lhs_scoring_point == controller_scoring_points_.end())
+          return false;
+
+        if (rhs_scoring_point == controller_scoring_points_.end())
+          return true;
+
+        return lhs_scoring_point->second.SquaredDistanceTo(scoring_point) <
+               rhs_scoring_point->second.SquaredDistanceTo(scoring_point);
+      });
+
+  if (sorted_controllers_ != sorted_controllers) {
+    sorted_controllers_ = sorted_controllers;
+    last_reordering_time_ms_ = now_ms;
+    last_scoring_point_ = scoring_point;
+  }
+  return sorted_controllers_;
+}
+
+std::vector<Controller*> ControllerManagerImpl::GetControllers() const {
+  return default_sorted_controllers_;
+}
+
+ControllerManagerImpl::ScoringPoint::ScoringPoint(
+    int uplink_bandwidth_bps,
+    float uplink_packet_loss_fraction)
+    : uplink_bandwidth_bps(uplink_bandwidth_bps),
+      uplink_packet_loss_fraction(uplink_packet_loss_fraction) {}
+
+namespace {
+
+constexpr int kMinUplinkBandwidthBps = 0;
+constexpr int kMaxUplinkBandwidthBps = 120000;
+
+float NormalizeUplinkBandwidth(int uplink_bandwidth_bps) {
+  uplink_bandwidth_bps =
+      std::min(kMaxUplinkBandwidthBps,
+               std::max(kMinUplinkBandwidthBps, uplink_bandwidth_bps));
+  return static_cast<float>(uplink_bandwidth_bps - kMinUplinkBandwidthBps) /
+         (kMaxUplinkBandwidthBps - kMinUplinkBandwidthBps);
+}
+
+float NormalizePacketLossFraction(float uplink_packet_loss_fraction) {
+  // |uplink_packet_loss_fraction| is seldom larger than 0.3, so we scale it up
+  // by 3.3333f.
+  return std::min(uplink_packet_loss_fraction * 3.3333f, 1.0f);
+}
+
+}  // namespace
+
+float ControllerManagerImpl::ScoringPoint::SquaredDistanceTo(
+    const ScoringPoint& scoring_point) const {
+  float diff_normalized_bitrate_bps =
+      NormalizeUplinkBandwidth(scoring_point.uplink_bandwidth_bps) -
+      NormalizeUplinkBandwidth(uplink_bandwidth_bps);
+  float diff_normalized_packet_loss =
+      NormalizePacketLossFraction(scoring_point.uplink_packet_loss_fraction) -
+      NormalizePacketLossFraction(uplink_packet_loss_fraction);
+  return std::pow(diff_normalized_bitrate_bps, 2) +
+         std::pow(diff_normalized_packet_loss, 2);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager.h b/modules/audio_coding/audio_network_adaptor/controller_manager.h
new file mode 100644
index 0000000..5c63f2f
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/controller_manager.h
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_MANAGER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_MANAGER_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/protobuf_utils.h"
+
+namespace webrtc {
+
+class DebugDumpWriter;
+
+class ControllerManager {
+ public:
+  virtual ~ControllerManager() = default;
+
+  // Sort controllers based on their significance.
+  virtual std::vector<Controller*> GetSortedControllers(
+      const Controller::NetworkMetrics& metrics) = 0;
+
+  virtual std::vector<Controller*> GetControllers() const = 0;
+};
+
+class ControllerManagerImpl final : public ControllerManager {
+ public:
+  struct Config {
+    Config(int min_reordering_time_ms, float min_reordering_squared_distance);
+    ~Config();
+    // Least time since last reordering for a new reordering to be made.
+    int min_reordering_time_ms;
+    // Least squared distance from last scoring point for a new reordering to be
+    // made.
+    float min_reordering_squared_distance;
+  };
+
+  static std::unique_ptr<ControllerManager> Create(
+      const ProtoString& config_string,
+      size_t num_encoder_channels,
+      rtc::ArrayView<const int> encoder_frame_lengths_ms,
+      int min_encoder_bitrate_bps,
+      size_t intial_channels_to_encode,
+      int initial_frame_length_ms,
+      int initial_bitrate_bps,
+      bool initial_fec_enabled,
+      bool initial_dtx_enabled);
+
+  static std::unique_ptr<ControllerManager> Create(
+      const ProtoString& config_string,
+      size_t num_encoder_channels,
+      rtc::ArrayView<const int> encoder_frame_lengths_ms,
+      int min_encoder_bitrate_bps,
+      size_t intial_channels_to_encode,
+      int initial_frame_length_ms,
+      int initial_bitrate_bps,
+      bool initial_fec_enabled,
+      bool initial_dtx_enabled,
+      DebugDumpWriter* debug_dump_writer);
+
+  explicit ControllerManagerImpl(const Config& config);
+
+  // Dependency injection for testing.
+  ControllerManagerImpl(
+      const Config& config,
+      std::vector<std::unique_ptr<Controller>> controllers,
+      const std::map<const Controller*, std::pair<int, float>>&
+          chracteristic_points);
+
+  ~ControllerManagerImpl() override;
+
+  // Sort controllers based on their significance.
+  std::vector<Controller*> GetSortedControllers(
+      const Controller::NetworkMetrics& metrics) override;
+
+  std::vector<Controller*> GetControllers() const override;
+
+ private:
+  // Scoring point is a subset of NetworkMetrics that is used for comparing the
+  // significance of controllers.
+  struct ScoringPoint {
+    // TODO(eladalon): Do we want to experiment with RPLR-based scoring?
+    ScoringPoint(int uplink_bandwidth_bps, float uplink_packet_loss_fraction);
+
+    // Calculate the normalized [0,1] distance between two scoring points.
+    float SquaredDistanceTo(const ScoringPoint& scoring_point) const;
+
+    int uplink_bandwidth_bps;
+    float uplink_packet_loss_fraction;
+  };
+
+  const Config config_;
+
+  std::vector<std::unique_ptr<Controller>> controllers_;
+
+  rtc::Optional<int64_t> last_reordering_time_ms_;
+  ScoringPoint last_scoring_point_;
+
+  std::vector<Controller*> default_sorted_controllers_;
+
+  std::vector<Controller*> sorted_controllers_;
+
+  // |scoring_points_| saves the scoring points of various
+  // controllers.
+  std::map<const Controller*, ScoringPoint> controller_scoring_points_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ControllerManagerImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_CONTROLLER_MANAGER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc b/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
new file mode 100644
index 0000000..576661c
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/controller_manager_unittest.cc
@@ -0,0 +1,472 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_controller.h"
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/protobuf_utils.h"
+#include "test/gtest.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::NiceMock;
+
+namespace {
+
+constexpr size_t kNumControllers = 4;
+constexpr int kChracteristicBandwithBps[2] = {15000, 0};
+constexpr float kChracteristicPacketLossFraction[2] = {0.2f, 0.0f};
+constexpr int kMinReorderingTimeMs = 200;
+constexpr int kFactor = 100;
+constexpr float kMinReorderingSquareDistance = 1.0f / kFactor / kFactor;
+
+// |kMinUplinkBandwidthBps| and |kMaxUplinkBandwidthBps| are copied from
+// controller_manager.cc
+constexpr int kMinUplinkBandwidthBps = 0;
+constexpr int kMaxUplinkBandwidthBps = 120000;
+constexpr int kMinBandwithChangeBps =
+    (kMaxUplinkBandwidthBps - kMinUplinkBandwidthBps) / kFactor;
+
+struct ControllerManagerStates {
+  std::unique_ptr<ControllerManager> controller_manager;
+  std::vector<MockController*> mock_controllers;
+};
+
+ControllerManagerStates CreateControllerManager() {
+  ControllerManagerStates states;
+  std::vector<std::unique_ptr<Controller>> controllers;
+  std::map<const Controller*, std::pair<int, float>> chracteristic_points;
+  for (size_t i = 0; i < kNumControllers; ++i) {
+    auto controller =
+        std::unique_ptr<MockController>(new NiceMock<MockController>());
+    EXPECT_CALL(*controller, Die());
+    states.mock_controllers.push_back(controller.get());
+    controllers.push_back(std::move(controller));
+  }
+
+  // Assign characteristic points to the last two controllers.
+  chracteristic_points[states.mock_controllers[kNumControllers - 2]] =
+      std::make_pair(kChracteristicBandwithBps[0],
+                     kChracteristicPacketLossFraction[0]);
+  chracteristic_points[states.mock_controllers[kNumControllers - 1]] =
+      std::make_pair(kChracteristicBandwithBps[1],
+                     kChracteristicPacketLossFraction[1]);
+
+  states.controller_manager.reset(new ControllerManagerImpl(
+      ControllerManagerImpl::Config(kMinReorderingTimeMs,
+                                    kMinReorderingSquareDistance),
+      std::move(controllers), chracteristic_points));
+  return states;
+}
+
+// |expected_order| contains the expected indices of all controllers in the
+// vector of controllers returned by GetSortedControllers(). A negative index
+// means that we do not care about its exact place, but we do check that it
+// exists in the vector.
+void CheckControllersOrder(
+    ControllerManagerStates* states,
+    const rtc::Optional<int>& uplink_bandwidth_bps,
+    const rtc::Optional<float>& uplink_packet_loss_fraction,
+    const std::vector<int>& expected_order) {
+  RTC_DCHECK_EQ(kNumControllers, expected_order.size());
+  Controller::NetworkMetrics metrics;
+  metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+  metrics.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+  auto check = states->controller_manager->GetSortedControllers(metrics);
+  EXPECT_EQ(states->mock_controllers.size(), check.size());
+  for (size_t i = 0; i < states->mock_controllers.size(); ++i) {
+    if (expected_order[i] >= 0) {
+      EXPECT_EQ(states->mock_controllers[i], check[expected_order[i]]);
+    } else {
+      EXPECT_NE(check.end(), std::find(check.begin(), check.end(),
+                                       states->mock_controllers[i]));
+    }
+  }
+}
+
+}  // namespace
+
+TEST(ControllerManagerTest, GetControllersReturnAllControllers) {
+  auto states = CreateControllerManager();
+  auto check = states.controller_manager->GetControllers();
+  // Verify that controllers in |check| are one-to-one mapped to those in
+  // |mock_controllers_|.
+  EXPECT_EQ(states.mock_controllers.size(), check.size());
+  for (auto& controller : check)
+    EXPECT_NE(states.mock_controllers.end(),
+              std::find(states.mock_controllers.begin(),
+                        states.mock_controllers.end(), controller));
+}
+
+TEST(ControllerManagerTest, ControllersInDefaultOrderOnEmptyNetworkMetrics) {
+  auto states = CreateControllerManager();
+  // |network_metrics| are empty, and the controllers are supposed to follow the
+  // default order.
+  CheckControllersOrder(&states, rtc::nullopt, rtc::nullopt,
+                        {0, 1, 2, 3});
+}
+
+TEST(ControllerManagerTest, ControllersWithoutCharPointAtEndAndInDefaultOrder) {
+  auto states = CreateControllerManager();
+  CheckControllersOrder(&states, 0,
+                        0.0,
+                        {kNumControllers - 2, kNumControllers - 1, -1, -1});
+}
+
+TEST(ControllerManagerTest, ControllersWithCharPointDependOnNetworkMetrics) {
+  auto states = CreateControllerManager();
+  CheckControllersOrder(&states, kChracteristicBandwithBps[1],
+                        kChracteristicPacketLossFraction[1],
+                        {kNumControllers - 2, kNumControllers - 1, 1, 0});
+}
+
+TEST(ControllerManagerTest, DoNotReorderBeforeMinReordingTime) {
+  rtc::ScopedFakeClock fake_clock;
+  auto states = CreateControllerManager();
+  CheckControllersOrder(&states, kChracteristicBandwithBps[0],
+                        kChracteristicPacketLossFraction[0],
+                        {kNumControllers - 2, kNumControllers - 1, 0, 1});
+  fake_clock.AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(kMinReorderingTimeMs - 1));
+  // Move uplink bandwidth and packet loss fraction to the other controller's
+  // characteristic point, which would cause controller manager to reorder the
+  // controllers if time had reached min reordering time.
+  CheckControllersOrder(&states, kChracteristicBandwithBps[1],
+                        kChracteristicPacketLossFraction[1],
+                        {kNumControllers - 2, kNumControllers - 1, 0, 1});
+}
+
+TEST(ControllerManagerTest, ReorderBeyondMinReordingTimeAndMinDistance) {
+  rtc::ScopedFakeClock fake_clock;
+  auto states = CreateControllerManager();
+  constexpr int kBandwidthBps =
+      (kChracteristicBandwithBps[0] + kChracteristicBandwithBps[1]) / 2;
+  constexpr float kPacketLossFraction = (kChracteristicPacketLossFraction[0] +
+                                         kChracteristicPacketLossFraction[1]) /
+                                        2.0f;
+  // Set network metrics to be in the middle between the characteristic points
+  // of two controllers.
+  CheckControllersOrder(&states, kBandwidthBps, kPacketLossFraction,
+                        {kNumControllers - 2, kNumControllers - 1, 0, 1});
+  fake_clock.AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(kMinReorderingTimeMs));
+  // Then let network metrics move a little towards the other controller.
+  CheckControllersOrder(&states, kBandwidthBps - kMinBandwithChangeBps - 1,
+                        kPacketLossFraction,
+                        {kNumControllers - 2, kNumControllers - 1, 1, 0});
+}
+
+TEST(ControllerManagerTest, DoNotReorderIfNetworkMetricsChangeTooSmall) {
+  rtc::ScopedFakeClock fake_clock;
+  auto states = CreateControllerManager();
+  constexpr int kBandwidthBps =
+      (kChracteristicBandwithBps[0] + kChracteristicBandwithBps[1]) / 2;
+  constexpr float kPacketLossFraction = (kChracteristicPacketLossFraction[0] +
+                                         kChracteristicPacketLossFraction[1]) /
+                                        2.0f;
+  // Set network metrics to be in the middle between the characteristic points
+  // of two controllers.
+  CheckControllersOrder(&states, kBandwidthBps, kPacketLossFraction,
+                        {kNumControllers - 2, kNumControllers - 1, 0, 1});
+  fake_clock.AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(kMinReorderingTimeMs));
+  // Then let network metrics move a little towards the other controller.
+  CheckControllersOrder(&states, kBandwidthBps - kMinBandwithChangeBps + 1,
+                        kPacketLossFraction,
+                        {kNumControllers - 2, kNumControllers - 1, 0, 1});
+}
+
+#if WEBRTC_ENABLE_PROTOBUF
+
+namespace {
+
+void AddBitrateControllerConfig(
+    audio_network_adaptor::config::ControllerManager* config) {
+  config->add_controllers()->mutable_bitrate_controller();
+}
+
+void AddChannelControllerConfig(
+    audio_network_adaptor::config::ControllerManager* config) {
+  auto controller_config =
+      config->add_controllers()->mutable_channel_controller();
+  controller_config->set_channel_1_to_2_bandwidth_bps(31000);
+  controller_config->set_channel_2_to_1_bandwidth_bps(29000);
+}
+
+void AddDtxControllerConfig(
+    audio_network_adaptor::config::ControllerManager* config) {
+  auto controller_config = config->add_controllers()->mutable_dtx_controller();
+  controller_config->set_dtx_enabling_bandwidth_bps(55000);
+  controller_config->set_dtx_disabling_bandwidth_bps(65000);
+}
+
+void AddFecControllerConfig(
+    audio_network_adaptor::config::ControllerManager* config) {
+  auto controller_config_ext = config->add_controllers();
+  auto controller_config = controller_config_ext->mutable_fec_controller();
+  auto fec_enabling_threshold =
+      controller_config->mutable_fec_enabling_threshold();
+  fec_enabling_threshold->set_low_bandwidth_bps(17000);
+  fec_enabling_threshold->set_low_bandwidth_packet_loss(0.1f);
+  fec_enabling_threshold->set_high_bandwidth_bps(64000);
+  fec_enabling_threshold->set_high_bandwidth_packet_loss(0.05f);
+  auto fec_disabling_threshold =
+      controller_config->mutable_fec_disabling_threshold();
+  fec_disabling_threshold->set_low_bandwidth_bps(15000);
+  fec_disabling_threshold->set_low_bandwidth_packet_loss(0.08f);
+  fec_disabling_threshold->set_high_bandwidth_bps(64000);
+  fec_disabling_threshold->set_high_bandwidth_packet_loss(0.01f);
+  controller_config->set_time_constant_ms(500);
+
+  auto scoring_point = controller_config_ext->mutable_scoring_point();
+  scoring_point->set_uplink_bandwidth_bps(kChracteristicBandwithBps[0]);
+  scoring_point->set_uplink_packet_loss_fraction(
+      kChracteristicPacketLossFraction[0]);
+}
+
+void AddFrameLengthControllerConfig(
+    audio_network_adaptor::config::ControllerManager* config) {
+  auto controller_config_ext = config->add_controllers();
+  auto controller_config =
+      controller_config_ext->mutable_frame_length_controller();
+  controller_config->set_fl_decreasing_packet_loss_fraction(0.05f);
+  controller_config->set_fl_increasing_packet_loss_fraction(0.04f);
+  controller_config->set_fl_20ms_to_60ms_bandwidth_bps(72000);
+  controller_config->set_fl_60ms_to_20ms_bandwidth_bps(88000);
+
+  auto scoring_point = controller_config_ext->mutable_scoring_point();
+  scoring_point->set_uplink_bandwidth_bps(kChracteristicBandwithBps[1]);
+  scoring_point->set_uplink_packet_loss_fraction(
+      kChracteristicPacketLossFraction[1]);
+}
+
+constexpr int kInitialBitrateBps = 24000;
+constexpr size_t kIntialChannelsToEncode = 1;
+constexpr bool kInitialDtxEnabled = true;
+constexpr bool kInitialFecEnabled = true;
+constexpr int kInitialFrameLengthMs = 60;
+constexpr int kMinBitrateBps = 6000;
+
+ControllerManagerStates CreateControllerManager(
+    const ProtoString& config_string) {
+  ControllerManagerStates states;
+  constexpr size_t kNumEncoderChannels = 2;
+  const std::vector<int> encoder_frame_lengths_ms = {20, 60};
+  states.controller_manager = ControllerManagerImpl::Create(
+      config_string, kNumEncoderChannels, encoder_frame_lengths_ms,
+      kMinBitrateBps, kIntialChannelsToEncode, kInitialFrameLengthMs,
+      kInitialBitrateBps, kInitialFecEnabled, kInitialDtxEnabled);
+  return states;
+}
+
+enum class ControllerType : int8_t {
+  FEC,
+  CHANNEL,
+  DTX,
+  FRAME_LENGTH,
+  BIT_RATE
+};
+
+void CheckControllersOrder(const std::vector<Controller*>& controllers,
+                           const std::vector<ControllerType>& expected_types) {
+  ASSERT_EQ(expected_types.size(), controllers.size());
+
+  // We also check that the controllers follow the initial settings.
+  AudioEncoderRuntimeConfig encoder_config;
+
+  for (size_t i = 0; i < controllers.size(); ++i) {
+    AudioEncoderRuntimeConfig encoder_config;
+    // We check the order of |controllers| by judging their decisions.
+    controllers[i]->MakeDecision(&encoder_config);
+
+    // Since controllers are not provided with network metrics, they give the
+    // initial values.
+    switch (expected_types[i]) {
+      case ControllerType::FEC:
+        EXPECT_EQ(kInitialFecEnabled, encoder_config.enable_fec);
+        break;
+      case ControllerType::CHANNEL:
+        EXPECT_EQ(kIntialChannelsToEncode, encoder_config.num_channels);
+        break;
+      case ControllerType::DTX:
+        EXPECT_EQ(kInitialDtxEnabled, encoder_config.enable_dtx);
+        break;
+      case ControllerType::FRAME_LENGTH:
+        EXPECT_EQ(kInitialFrameLengthMs, encoder_config.frame_length_ms);
+        break;
+      case ControllerType::BIT_RATE:
+        EXPECT_EQ(kInitialBitrateBps, encoder_config.bitrate_bps);
+    }
+  }
+}
+
+MATCHER_P(ControllerManagerEqual, value, "") {
+  ProtoString value_string;
+  ProtoString arg_string;
+  EXPECT_TRUE(arg.SerializeToString(&arg_string));
+  EXPECT_TRUE(value.SerializeToString(&value_string));
+  return arg_string == value_string;
+}
+
+}  // namespace
+
+TEST(ControllerManagerTest, DebugDumpLoggedWhenCreateFromConfigString) {
+  audio_network_adaptor::config::ControllerManager config;
+  config.set_min_reordering_time_ms(kMinReorderingTimeMs);
+  config.set_min_reordering_squared_distance(kMinReorderingSquareDistance);
+
+  AddFecControllerConfig(&config);
+  AddChannelControllerConfig(&config);
+  AddDtxControllerConfig(&config);
+  AddFrameLengthControllerConfig(&config);
+  AddBitrateControllerConfig(&config);
+
+  ProtoString config_string;
+  config.SerializeToString(&config_string);
+
+  constexpr size_t kNumEncoderChannels = 2;
+  const std::vector<int> encoder_frame_lengths_ms = {20, 60};
+
+  constexpr int64_t kClockInitialTimeMs = 12345678;
+  rtc::ScopedFakeClock fake_clock;
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(kClockInitialTimeMs));
+  auto debug_dump_writer =
+      std::unique_ptr<MockDebugDumpWriter>(new NiceMock<MockDebugDumpWriter>());
+  EXPECT_CALL(*debug_dump_writer, Die());
+  EXPECT_CALL(*debug_dump_writer,
+              DumpControllerManagerConfig(ControllerManagerEqual(config),
+                                          kClockInitialTimeMs));
+
+  ControllerManagerImpl::Create(config_string, kNumEncoderChannels,
+                                encoder_frame_lengths_ms, kMinBitrateBps,
+                                kIntialChannelsToEncode, kInitialFrameLengthMs,
+                                kInitialBitrateBps, kInitialFecEnabled,
+                                kInitialDtxEnabled, debug_dump_writer.get());
+}
+
+TEST(ControllerManagerTest, CreateFromConfigStringAndCheckDefaultOrder) {
+  audio_network_adaptor::config::ControllerManager config;
+  config.set_min_reordering_time_ms(kMinReorderingTimeMs);
+  config.set_min_reordering_squared_distance(kMinReorderingSquareDistance);
+
+  AddFecControllerConfig(&config);
+  AddChannelControllerConfig(&config);
+  AddDtxControllerConfig(&config);
+  AddFrameLengthControllerConfig(&config);
+  AddBitrateControllerConfig(&config);
+
+  ProtoString config_string;
+  config.SerializeToString(&config_string);
+
+  auto states = CreateControllerManager(config_string);
+  Controller::NetworkMetrics metrics;
+
+  auto controllers = states.controller_manager->GetSortedControllers(metrics);
+  CheckControllersOrder(
+      controllers,
+      std::vector<ControllerType>{
+          ControllerType::FEC, ControllerType::CHANNEL, ControllerType::DTX,
+          ControllerType::FRAME_LENGTH, ControllerType::BIT_RATE});
+}
+
+TEST(ControllerManagerTest, CreateCharPointFreeConfigAndCheckDefaultOrder) {
+  audio_network_adaptor::config::ControllerManager config;
+
+  // Following controllers have no characteristic points.
+  AddChannelControllerConfig(&config);
+  AddDtxControllerConfig(&config);
+  AddBitrateControllerConfig(&config);
+
+  ProtoString config_string;
+  config.SerializeToString(&config_string);
+
+  auto states = CreateControllerManager(config_string);
+  Controller::NetworkMetrics metrics;
+
+  auto controllers = states.controller_manager->GetSortedControllers(metrics);
+  CheckControllersOrder(
+      controllers,
+      std::vector<ControllerType>{ControllerType::CHANNEL, ControllerType::DTX,
+                                  ControllerType::BIT_RATE});
+}
+
+TEST(ControllerManagerTest, CreateFromConfigStringAndCheckReordering) {
+  rtc::ScopedFakeClock fake_clock;
+  audio_network_adaptor::config::ControllerManager config;
+  config.set_min_reordering_time_ms(kMinReorderingTimeMs);
+  config.set_min_reordering_squared_distance(kMinReorderingSquareDistance);
+
+  AddChannelControllerConfig(&config);
+
+  // Internally associated with characteristic point 0.
+  AddFecControllerConfig(&config);
+
+  AddDtxControllerConfig(&config);
+
+  // Internally associated with characteristic point 1.
+  AddFrameLengthControllerConfig(&config);
+
+  AddBitrateControllerConfig(&config);
+
+  ProtoString config_string;
+  config.SerializeToString(&config_string);
+
+  auto states = CreateControllerManager(config_string);
+
+  Controller::NetworkMetrics metrics;
+  metrics.uplink_bandwidth_bps = kChracteristicBandwithBps[0];
+  metrics.uplink_packet_loss_fraction = kChracteristicPacketLossFraction[0];
+
+  auto controllers = states.controller_manager->GetSortedControllers(metrics);
+  CheckControllersOrder(controllers,
+                        std::vector<ControllerType>{
+                            ControllerType::FEC, ControllerType::FRAME_LENGTH,
+                            ControllerType::CHANNEL, ControllerType::DTX,
+                            ControllerType::BIT_RATE});
+
+  metrics.uplink_bandwidth_bps = kChracteristicBandwithBps[1];
+  metrics.uplink_packet_loss_fraction = kChracteristicPacketLossFraction[1];
+  fake_clock.AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(kMinReorderingTimeMs - 1));
+  controllers = states.controller_manager->GetSortedControllers(metrics);
+  // Should not reorder since min reordering time is not met.
+  CheckControllersOrder(controllers,
+                        std::vector<ControllerType>{
+                            ControllerType::FEC, ControllerType::FRAME_LENGTH,
+                            ControllerType::CHANNEL, ControllerType::DTX,
+                            ControllerType::BIT_RATE});
+
+  fake_clock.AdvanceTime(rtc::TimeDelta::FromMilliseconds(1));
+  controllers = states.controller_manager->GetSortedControllers(metrics);
+  // Reorder now.
+  CheckControllersOrder(controllers,
+                        std::vector<ControllerType>{
+                            ControllerType::FRAME_LENGTH, ControllerType::FEC,
+                            ControllerType::CHANNEL, ControllerType::DTX,
+                            ControllerType::BIT_RATE});
+}
+#endif  // WEBRTC_ENABLE_PROTOBUF
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump.proto b/modules/audio_coding/audio_network_adaptor/debug_dump.proto
new file mode 100644
index 0000000..93b31c3
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/debug_dump.proto
@@ -0,0 +1,42 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audio_network_adaptor.debug_dump;
+
+import "config.proto";
+
+message NetworkMetrics {
+  optional int32 uplink_bandwidth_bps = 1;
+  optional float uplink_packet_loss_fraction = 2;
+  optional int32 target_audio_bitrate_bps = 3;
+  optional int32 rtt_ms = 4;
+  optional int32 uplink_recoverable_packet_loss_fraction = 5;
+}
+
+message EncoderRuntimeConfig {
+  optional int32 bitrate_bps = 1;
+  optional int32 frame_length_ms = 2;
+  // Note: This is what we tell the encoder. It doesn't have to reflect
+  // the actual NetworkMetrics; it's subject to our decision.
+  optional float uplink_packet_loss_fraction = 3;
+  optional bool enable_fec = 4;
+  optional bool enable_dtx = 5;
+  // Some encoders can encode fewer channels than the actual input to make
+  // better use of the bandwidth. |num_channels| sets the number of channels
+  // to encode.
+  optional uint32 num_channels = 6;
+}
+
+message Event {
+  enum Type {
+    NETWORK_METRICS = 0;
+    ENCODER_RUNTIME_CONFIG = 1;
+    CONTROLLER_MANAGER_CONFIG = 2;
+  }
+  required Type type = 1;
+  required uint32 timestamp = 2;
+  optional NetworkMetrics network_metrics = 3;
+  optional EncoderRuntimeConfig encoder_runtime_config = 4;
+  optional webrtc.audio_network_adaptor.config.ControllerManager
+      controller_manager_config = 5;
+}
+
diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc
new file mode 100644
index 0000000..818362e
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.cc
@@ -0,0 +1,166 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/protobuf_utils.h"
+
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/debug_dump.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/debug_dump.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+#if WEBRTC_ENABLE_PROTOBUF
+namespace {
+
+using audio_network_adaptor::debug_dump::Event;
+using audio_network_adaptor::debug_dump::NetworkMetrics;
+using audio_network_adaptor::debug_dump::EncoderRuntimeConfig;
+
+void DumpEventToFile(const Event& event, FileWrapper* dump_file) {
+  RTC_CHECK(dump_file->is_open());
+  ProtoString dump_data;
+  event.SerializeToString(&dump_data);
+  int32_t size = rtc::checked_cast<int32_t>(event.ByteSizeLong());
+  dump_file->Write(&size, sizeof(size));
+  dump_file->Write(dump_data.data(), dump_data.length());
+}
+
+}  // namespace
+#endif  // WEBRTC_ENABLE_PROTOBUF
+
+class DebugDumpWriterImpl final : public DebugDumpWriter {
+ public:
+  explicit DebugDumpWriterImpl(FILE* file_handle);
+  ~DebugDumpWriterImpl() override = default;
+
+  void DumpEncoderRuntimeConfig(const AudioEncoderRuntimeConfig& config,
+                                int64_t timestamp) override;
+
+  void DumpNetworkMetrics(const Controller::NetworkMetrics& metrics,
+                          int64_t timestamp) override;
+
+#if WEBRTC_ENABLE_PROTOBUF
+  void DumpControllerManagerConfig(
+      const audio_network_adaptor::config::ControllerManager&
+          controller_manager_config,
+      int64_t timestamp) override;
+#endif
+
+ private:
+  std::unique_ptr<FileWrapper> dump_file_;
+};
+
+DebugDumpWriterImpl::DebugDumpWriterImpl(FILE* file_handle)
+    : dump_file_(FileWrapper::Create()) {
+#if WEBRTC_ENABLE_PROTOBUF
+  dump_file_->OpenFromFileHandle(file_handle);
+  RTC_CHECK(dump_file_->is_open());
+#else
+  RTC_NOTREACHED();
+#endif
+}
+
+void DebugDumpWriterImpl::DumpNetworkMetrics(
+    const Controller::NetworkMetrics& metrics,
+    int64_t timestamp) {
+#if WEBRTC_ENABLE_PROTOBUF
+  Event event;
+  event.set_timestamp(timestamp);
+  event.set_type(Event::NETWORK_METRICS);
+  auto dump_metrics = event.mutable_network_metrics();
+
+  if (metrics.uplink_bandwidth_bps)
+    dump_metrics->set_uplink_bandwidth_bps(*metrics.uplink_bandwidth_bps);
+
+  if (metrics.uplink_packet_loss_fraction) {
+    dump_metrics->set_uplink_packet_loss_fraction(
+        *metrics.uplink_packet_loss_fraction);
+  }
+
+  if (metrics.target_audio_bitrate_bps) {
+    dump_metrics->set_target_audio_bitrate_bps(
+        *metrics.target_audio_bitrate_bps);
+  }
+
+  if (metrics.rtt_ms)
+    dump_metrics->set_rtt_ms(*metrics.rtt_ms);
+
+  if (metrics.uplink_recoverable_packet_loss_fraction) {
+    dump_metrics->set_uplink_recoverable_packet_loss_fraction(
+        *metrics.uplink_recoverable_packet_loss_fraction);
+  }
+
+  DumpEventToFile(event, dump_file_.get());
+#endif  // WEBRTC_ENABLE_PROTOBUF
+}
+
+void DebugDumpWriterImpl::DumpEncoderRuntimeConfig(
+    const AudioEncoderRuntimeConfig& config,
+    int64_t timestamp) {
+#if WEBRTC_ENABLE_PROTOBUF
+  Event event;
+  event.set_timestamp(timestamp);
+  event.set_type(Event::ENCODER_RUNTIME_CONFIG);
+  auto dump_config = event.mutable_encoder_runtime_config();
+
+  if (config.bitrate_bps)
+    dump_config->set_bitrate_bps(*config.bitrate_bps);
+
+  if (config.frame_length_ms)
+    dump_config->set_frame_length_ms(*config.frame_length_ms);
+
+  if (config.uplink_packet_loss_fraction) {
+    dump_config->set_uplink_packet_loss_fraction(
+        *config.uplink_packet_loss_fraction);
+  }
+
+  if (config.enable_fec)
+    dump_config->set_enable_fec(*config.enable_fec);
+
+  if (config.enable_dtx)
+    dump_config->set_enable_dtx(*config.enable_dtx);
+
+  if (config.num_channels)
+    dump_config->set_num_channels(*config.num_channels);
+
+  DumpEventToFile(event, dump_file_.get());
+#endif  // WEBRTC_ENABLE_PROTOBUF
+}
+
+#if WEBRTC_ENABLE_PROTOBUF
+void DebugDumpWriterImpl::DumpControllerManagerConfig(
+    const audio_network_adaptor::config::ControllerManager&
+        controller_manager_config,
+    int64_t timestamp) {
+  Event event;
+  event.set_timestamp(timestamp);
+  event.set_type(Event::CONTROLLER_MANAGER_CONFIG);
+  event.mutable_controller_manager_config()->CopyFrom(
+      controller_manager_config);
+  DumpEventToFile(event, dump_file_.get());
+}
+#endif  // WEBRTC_ENABLE_PROTOBUF
+
+std::unique_ptr<DebugDumpWriter> DebugDumpWriter::Create(FILE* file_handle) {
+  return std::unique_ptr<DebugDumpWriter>(new DebugDumpWriterImpl(file_handle));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h
new file mode 100644
index 0000000..e40c832
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/debug_dump_writer.h
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DEBUG_DUMP_WRITER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DEBUG_DUMP_WRITER_H_
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/ignore_wundef.h"
+#include "system_wrappers/include/file_wrapper.h"
+#if WEBRTC_ENABLE_PROTOBUF
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/audio_network_adaptor/config.pb.h"
+#else
+#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+namespace webrtc {
+
+class DebugDumpWriter {
+ public:
+  static std::unique_ptr<DebugDumpWriter> Create(FILE* file_handle);
+
+  virtual ~DebugDumpWriter() = default;
+
+  virtual void DumpEncoderRuntimeConfig(const AudioEncoderRuntimeConfig& config,
+                                        int64_t timestamp) = 0;
+
+  virtual void DumpNetworkMetrics(const Controller::NetworkMetrics& metrics,
+                                  int64_t timestamp) = 0;
+
+#if WEBRTC_ENABLE_PROTOBUF
+  virtual void DumpControllerManagerConfig(
+      const audio_network_adaptor::config::ControllerManager&
+          controller_manager_config,
+      int64_t timestamp) = 0;
+#endif
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DEBUG_DUMP_WRITER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/dtx_controller.cc b/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
new file mode 100644
index 0000000..cbfea95
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/dtx_controller.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/dtx_controller.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+DtxController::Config::Config(bool initial_dtx_enabled,
+                              int dtx_enabling_bandwidth_bps,
+                              int dtx_disabling_bandwidth_bps)
+    : initial_dtx_enabled(initial_dtx_enabled),
+      dtx_enabling_bandwidth_bps(dtx_enabling_bandwidth_bps),
+      dtx_disabling_bandwidth_bps(dtx_disabling_bandwidth_bps) {}
+
+DtxController::DtxController(const Config& config)
+    : config_(config), dtx_enabled_(config_.initial_dtx_enabled) {}
+
+DtxController::~DtxController() = default;
+
+void DtxController::UpdateNetworkMetrics(
+    const NetworkMetrics& network_metrics) {
+  if (network_metrics.uplink_bandwidth_bps)
+    uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+}
+
+void DtxController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+  // Decision on |enable_dtx| should not have been made.
+  RTC_DCHECK(!config->enable_dtx);
+
+  if (uplink_bandwidth_bps_) {
+    if (dtx_enabled_ &&
+        *uplink_bandwidth_bps_ >= config_.dtx_disabling_bandwidth_bps) {
+      dtx_enabled_ = false;
+    } else if (!dtx_enabled_ &&
+               *uplink_bandwidth_bps_ <= config_.dtx_enabling_bandwidth_bps) {
+      dtx_enabled_ = true;
+    }
+  }
+  config->enable_dtx = dtx_enabled_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/dtx_controller.h b/modules/audio_coding/audio_network_adaptor/dtx_controller.h
new file mode 100644
index 0000000..8a2427e
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/dtx_controller.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DTX_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DTX_CONTROLLER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class DtxController final : public Controller {
+ public:
+  struct Config {
+    Config(bool initial_dtx_enabled,
+           int dtx_enabling_bandwidth_bps,
+           int dtx_disabling_bandwidth_bps);
+    bool initial_dtx_enabled;
+    // Uplink bandwidth below which DTX should be switched on.
+    int dtx_enabling_bandwidth_bps;
+    // Uplink bandwidth above which DTX should be switched off.
+    int dtx_disabling_bandwidth_bps;
+  };
+
+  explicit DtxController(const Config& config);
+
+  ~DtxController() override;
+
+  void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+  void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+  const Config config_;
+  bool dtx_enabled_;
+  rtc::Optional<int> uplink_bandwidth_bps_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(DtxController);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_DTX_CONTROLLER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc
new file mode 100644
index 0000000..e38e65d
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/dtx_controller_unittest.cc
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/dtx_controller.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kDtxEnablingBandwidthBps = 55000;
+constexpr int kDtxDisablingBandwidthBps = 65000;
+constexpr int kMediumBandwidthBps =
+    (kDtxEnablingBandwidthBps + kDtxDisablingBandwidthBps) / 2;
+
+std::unique_ptr<DtxController> CreateController(int initial_dtx_enabled) {
+  std::unique_ptr<DtxController> controller(new DtxController(
+      DtxController::Config(initial_dtx_enabled, kDtxEnablingBandwidthBps,
+                            kDtxDisablingBandwidthBps)));
+  return controller;
+}
+
+void CheckDecision(DtxController* controller,
+                   const rtc::Optional<int>& uplink_bandwidth_bps,
+                   bool expected_dtx_enabled) {
+  if (uplink_bandwidth_bps) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  AudioEncoderRuntimeConfig config;
+  controller->MakeDecision(&config);
+  EXPECT_EQ(expected_dtx_enabled, config.enable_dtx);
+}
+
+}  // namespace
+
+TEST(DtxControllerTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+  constexpr bool kInitialDtxEnabled = true;
+  auto controller = CreateController(kInitialDtxEnabled);
+  CheckDecision(controller.get(), rtc::nullopt, kInitialDtxEnabled);
+}
+
+TEST(DtxControllerTest, TurnOnDtxForLowUplinkBandwidth) {
+  auto controller = CreateController(false);
+  CheckDecision(controller.get(), kDtxEnablingBandwidthBps, true);
+}
+
+TEST(DtxControllerTest, TurnOffDtxForHighUplinkBandwidth) {
+  auto controller = CreateController(true);
+  CheckDecision(controller.get(), kDtxDisablingBandwidthBps, false);
+}
+
+TEST(DtxControllerTest, MaintainDtxOffForMediumUplinkBandwidth) {
+  auto controller = CreateController(false);
+  CheckDecision(controller.get(), kMediumBandwidthBps, false);
+}
+
+TEST(DtxControllerTest, MaintainDtxOnForMediumUplinkBandwidth) {
+  auto controller = CreateController(true);
+  CheckDecision(controller.get(), kMediumBandwidthBps, true);
+}
+
+TEST(DtxControllerTest, CheckBehaviorOnChangingUplinkBandwidth) {
+  auto controller = CreateController(false);
+  CheckDecision(controller.get(), kMediumBandwidthBps, false);
+  CheckDecision(controller.get(), kDtxEnablingBandwidthBps, true);
+  CheckDecision(controller.get(), kMediumBandwidthBps, true);
+  CheckDecision(controller.get(), kDtxDisablingBandwidthBps, false);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/event_log_writer.cc b/modules/audio_coding/audio_network_adaptor/event_log_writer.cc
new file mode 100644
index 0000000..9cdbc54
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/event_log_writer.cc
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include <algorithm>
+
+#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h"
+#include "logging/rtc_event_log/rtc_event_log.h"
+#include "modules/audio_coding/audio_network_adaptor/event_log_writer.h"
+#include "rtc_base/ptr_util.h"
+
+namespace webrtc {
+
+EventLogWriter::EventLogWriter(RtcEventLog* event_log,
+                               int min_bitrate_change_bps,
+                               float min_bitrate_change_fraction,
+                               float min_packet_loss_change_fraction)
+    : event_log_(event_log),
+      min_bitrate_change_bps_(min_bitrate_change_bps),
+      min_bitrate_change_fraction_(min_bitrate_change_fraction),
+      min_packet_loss_change_fraction_(min_packet_loss_change_fraction) {
+  RTC_DCHECK(event_log_);
+}
+
+EventLogWriter::~EventLogWriter() = default;
+
+void EventLogWriter::MaybeLogEncoderConfig(
+    const AudioEncoderRuntimeConfig& config) {
+  if (last_logged_config_.num_channels != config.num_channels)
+    return LogEncoderConfig(config);
+  if (last_logged_config_.enable_dtx != config.enable_dtx)
+    return LogEncoderConfig(config);
+  if (last_logged_config_.enable_fec != config.enable_fec)
+    return LogEncoderConfig(config);
+  if (last_logged_config_.frame_length_ms != config.frame_length_ms)
+    return LogEncoderConfig(config);
+  if ((!last_logged_config_.bitrate_bps && config.bitrate_bps) ||
+      (last_logged_config_.bitrate_bps && config.bitrate_bps &&
+       std::abs(*last_logged_config_.bitrate_bps - *config.bitrate_bps) >=
+           std::min(static_cast<int>(*last_logged_config_.bitrate_bps *
+                                     min_bitrate_change_fraction_),
+                    min_bitrate_change_bps_))) {
+    return LogEncoderConfig(config);
+  }
+  if ((!last_logged_config_.uplink_packet_loss_fraction &&
+       config.uplink_packet_loss_fraction) ||
+      (last_logged_config_.uplink_packet_loss_fraction &&
+       config.uplink_packet_loss_fraction &&
+       fabs(*last_logged_config_.uplink_packet_loss_fraction -
+            *config.uplink_packet_loss_fraction) >=
+           min_packet_loss_change_fraction_ *
+               *last_logged_config_.uplink_packet_loss_fraction)) {
+    return LogEncoderConfig(config);
+  }
+}
+
+void EventLogWriter::LogEncoderConfig(const AudioEncoderRuntimeConfig& config) {
+  auto config_copy = rtc::MakeUnique<AudioEncoderRuntimeConfig>(config);
+  event_log_->Log(
+      rtc::MakeUnique<RtcEventAudioNetworkAdaptation>(std::move(config_copy)));
+  last_logged_config_ = config;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/event_log_writer.h b/modules/audio_coding/audio_network_adaptor/event_log_writer.h
new file mode 100644
index 0000000..fca8e53
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/event_log_writer.h
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+class RtcEventLog;
+
+class EventLogWriter final {
+ public:
+  EventLogWriter(RtcEventLog* event_log,
+                 int min_bitrate_change_bps,
+                 float min_bitrate_change_fraction,
+                 float min_packet_loss_change_fraction);
+  ~EventLogWriter();
+  void MaybeLogEncoderConfig(const AudioEncoderRuntimeConfig& config);
+
+ private:
+  void LogEncoderConfig(const AudioEncoderRuntimeConfig& config);
+
+  RtcEventLog* const event_log_;
+  const int min_bitrate_change_bps_;
+  const float min_bitrate_change_fraction_;
+  const float min_packet_loss_change_fraction_;
+  AudioEncoderRuntimeConfig last_logged_config_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(EventLogWriter);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_EVENT_LOG_WRITER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc b/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
new file mode 100644
index 0000000..df97594
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
@@ -0,0 +1,238 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "logging/rtc_event_log/events/rtc_event_audio_network_adaptation.h"
+#include "logging/rtc_event_log/mock/mock_rtc_event_log.h"
+#include "modules/audio_coding/audio_network_adaptor/event_log_writer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kMinBitrateChangeBps = 5000;
+constexpr float kMinPacketLossChangeFraction = 0.5;
+constexpr float kMinBitrateChangeFraction = 0.25;
+
+constexpr int kHighBitrateBps = 70000;
+constexpr int kLowBitrateBps = 10000;
+constexpr int kFrameLengthMs = 60;
+constexpr bool kEnableFec = true;
+constexpr bool kEnableDtx = true;
+constexpr float kPacketLossFraction = 0.05f;
+constexpr size_t kNumChannels = 1;
+
+MATCHER_P(IsRtcEventAnaConfigEqualTo, config, "") {
+  if (arg->GetType() != RtcEvent::Type::AudioNetworkAdaptation) {
+    return false;
+  }
+  auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
+  return *ana_event->config_ == config;
+}
+
+struct EventLogWriterStates {
+  std::unique_ptr<EventLogWriter> event_log_writer;
+  std::unique_ptr<testing::StrictMock<MockRtcEventLog>> event_log;
+  AudioEncoderRuntimeConfig runtime_config;
+};
+
+EventLogWriterStates CreateEventLogWriter() {
+  EventLogWriterStates state;
+  state.event_log.reset(new testing::StrictMock<MockRtcEventLog>());
+  state.event_log_writer.reset(new EventLogWriter(
+      state.event_log.get(), kMinBitrateChangeBps, kMinBitrateChangeFraction,
+      kMinPacketLossChangeFraction));
+  state.runtime_config.bitrate_bps = kHighBitrateBps;
+  state.runtime_config.frame_length_ms = kFrameLengthMs;
+  state.runtime_config.uplink_packet_loss_fraction = kPacketLossFraction;
+  state.runtime_config.enable_fec = kEnableFec;
+  state.runtime_config.enable_dtx = kEnableDtx;
+  state.runtime_config.num_channels = kNumChannels;
+  return state;
+}
+}  // namespace
+
+TEST(EventLogWriterTest, FirstConfigIsLogged) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, SameConfigIsNotLogged) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogFecStateChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+  state.runtime_config.enable_fec = !kEnableFec;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogDtxStateChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+  state.runtime_config.enable_dtx = !kEnableDtx;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogChannelChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+  state.runtime_config.num_channels = kNumChannels + 1;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogFrameLengthChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+
+  state.runtime_config.frame_length_ms = 20;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, DoNotLogSmallBitrateChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  state.runtime_config.bitrate_bps = kHighBitrateBps + kMinBitrateChangeBps - 1;
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogLargeBitrateChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  // At high bitrate, the min fraction rule requires a larger change than the
+  // min change rule. We make sure that the min change rule applies.
+  RTC_DCHECK_GT(kHighBitrateBps * kMinBitrateChangeFraction,
+                kMinBitrateChangeBps);
+  state.runtime_config.bitrate_bps = kHighBitrateBps + kMinBitrateChangeBps;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogMinBitrateChangeFractionOnLowBitrateChange) {
+  auto state = CreateEventLogWriter();
+  state.runtime_config.bitrate_bps = kLowBitrateBps;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  // At high bitrate, the min change rule requires a larger change than the min
+  // fraction rule. We make sure that the min fraction rule applies.
+  state.runtime_config.bitrate_bps =
+      kLowBitrateBps + kLowBitrateBps * kMinBitrateChangeFraction;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, DoNotLogSmallPacketLossFractionChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  state.runtime_config.uplink_packet_loss_fraction =
+      kPacketLossFraction + kMinPacketLossChangeFraction * kPacketLossFraction -
+      0.001f;
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogLargePacketLossFractionChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  state.runtime_config.uplink_packet_loss_fraction =
+      kPacketLossFraction + kMinPacketLossChangeFraction * kPacketLossFraction;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogJustOnceOnMultipleChanges) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  state.runtime_config.uplink_packet_loss_fraction =
+      kPacketLossFraction + kMinPacketLossChangeFraction * kPacketLossFraction;
+  state.runtime_config.frame_length_ms = 20;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+}
+
+TEST(EventLogWriterTest, LogAfterGradualChange) {
+  auto state = CreateEventLogWriter();
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  state.runtime_config.bitrate_bps = kHighBitrateBps + kMinBitrateChangeBps;
+  EXPECT_CALL(*state.event_log,
+              LogProxy(IsRtcEventAnaConfigEqualTo(state.runtime_config)))
+      .Times(1);
+  for (int bitrate_bps = kHighBitrateBps;
+       bitrate_bps <= kHighBitrateBps + kMinBitrateChangeBps; bitrate_bps++) {
+    state.runtime_config.bitrate_bps = bitrate_bps;
+    state.event_log_writer->MaybeLogEncoderConfig(state.runtime_config);
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc
new file mode 100644
index 0000000..62f356d
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.cc
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h"
+
+#include <limits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+class NullSmoothingFilter final : public SmoothingFilter {
+ public:
+  void AddSample(float sample) override { last_sample_ = sample; }
+
+  rtc::Optional<float> GetAverage() override { return last_sample_; }
+
+  bool SetTimeConstantMs(int time_constant_ms) override {
+    RTC_NOTREACHED();
+    return false;
+  }
+
+ private:
+  rtc::Optional<float> last_sample_;
+};
+}
+
+FecControllerPlrBased::Config::Config(
+    bool initial_fec_enabled,
+    const ThresholdCurve& fec_enabling_threshold,
+    const ThresholdCurve& fec_disabling_threshold,
+    int time_constant_ms)
+    : initial_fec_enabled(initial_fec_enabled),
+      fec_enabling_threshold(fec_enabling_threshold),
+      fec_disabling_threshold(fec_disabling_threshold),
+      time_constant_ms(time_constant_ms) {}
+
+FecControllerPlrBased::FecControllerPlrBased(
+    const Config& config,
+    std::unique_ptr<SmoothingFilter> smoothing_filter)
+    : config_(config),
+      fec_enabled_(config.initial_fec_enabled),
+      packet_loss_smoother_(std::move(smoothing_filter)) {
+  RTC_DCHECK(config_.fec_disabling_threshold <= config_.fec_enabling_threshold);
+}
+
+FecControllerPlrBased::FecControllerPlrBased(const Config& config)
+    : FecControllerPlrBased(
+          config,
+          webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled"
+              ? std::unique_ptr<NullSmoothingFilter>(new NullSmoothingFilter())
+              : std::unique_ptr<SmoothingFilter>(
+                    new SmoothingFilterImpl(config.time_constant_ms))) {}
+
+FecControllerPlrBased::~FecControllerPlrBased() = default;
+
+void FecControllerPlrBased::UpdateNetworkMetrics(
+    const NetworkMetrics& network_metrics) {
+  if (network_metrics.uplink_bandwidth_bps)
+    uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+  if (network_metrics.uplink_packet_loss_fraction) {
+    packet_loss_smoother_->AddSample(
+        *network_metrics.uplink_packet_loss_fraction);
+  }
+}
+
+void FecControllerPlrBased::MakeDecision(AudioEncoderRuntimeConfig* config) {
+  RTC_DCHECK(!config->enable_fec);
+  RTC_DCHECK(!config->uplink_packet_loss_fraction);
+
+  const auto& packet_loss = packet_loss_smoother_->GetAverage();
+
+  fec_enabled_ = fec_enabled_ ? !FecDisablingDecision(packet_loss)
+                              : FecEnablingDecision(packet_loss);
+
+  config->enable_fec = fec_enabled_;
+
+  config->uplink_packet_loss_fraction = packet_loss ? *packet_loss : 0.0;
+}
+
+bool FecControllerPlrBased::FecEnablingDecision(
+    const rtc::Optional<float>& packet_loss) const {
+  if (!uplink_bandwidth_bps_ || !packet_loss) {
+    return false;
+  } else {
+    // Enable when above the curve or exactly on it.
+    return !config_.fec_enabling_threshold.IsBelowCurve(
+        {static_cast<float>(*uplink_bandwidth_bps_), *packet_loss});
+  }
+}
+
+bool FecControllerPlrBased::FecDisablingDecision(
+    const rtc::Optional<float>& packet_loss) const {
+  if (!uplink_bandwidth_bps_ || !packet_loss) {
+    return false;
+  } else {
+    // Disable when below the curve.
+    return config_.fec_disabling_threshold.IsBelowCurve(
+        {static_cast<float>(*uplink_bandwidth_bps_), *packet_loss});
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
new file mode 100644
index 0000000..c273537
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_PLR_BASED_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_PLR_BASED_H_
+
+#include <memory>
+
+#include "common_audio/smoothing_filter.h"
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class FecControllerPlrBased final : public Controller {
+ public:
+  struct Config {
+    // |fec_enabling_threshold| defines a curve, above which FEC should be
+    // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+    // should be disabled. See below
+    //
+    // packet-loss ^   |  |
+    //             |   |  |   FEC
+    //             |    \  \   ON
+    //             | FEC \  \_______ fec_enabling_threshold
+    //             | OFF  \_________ fec_disabling_threshold
+    //             |-----------------> bandwidth
+    Config(bool initial_fec_enabled,
+           const ThresholdCurve& fec_enabling_threshold,
+           const ThresholdCurve& fec_disabling_threshold,
+           int time_constant_ms);
+    bool initial_fec_enabled;
+    ThresholdCurve fec_enabling_threshold;
+    ThresholdCurve fec_disabling_threshold;
+    int time_constant_ms;
+  };
+
+  // Dependency injection for testing.
+  FecControllerPlrBased(const Config& config,
+                        std::unique_ptr<SmoothingFilter> smoothing_filter);
+
+  explicit FecControllerPlrBased(const Config& config);
+
+  ~FecControllerPlrBased() override;
+
+  void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+  void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+  bool FecEnablingDecision(const rtc::Optional<float>& packet_loss) const;
+  bool FecDisablingDecision(const rtc::Optional<float>& packet_loss) const;
+
+  const Config config_;
+  bool fec_enabled_;
+  rtc::Optional<int> uplink_bandwidth_bps_;
+  const std::unique_ptr<SmoothingFilter> packet_loss_smoother_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FecControllerPlrBased);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_PLR_BASED_H_
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
new file mode 100644
index 0000000..8636aa9
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_plr_based_unittest.cc
@@ -0,0 +1,488 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "common_audio/mocks/mock_smoothing_filter.h"
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_plr_based.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::NiceMock;
+using ::testing::Return;
+using ::testing::_;
+
+namespace {
+
+// The test uses the following settings:
+//
+// packet-loss ^   |  |
+//             |  A| C|   FEC
+//             |    \  \   ON
+//             | FEC \ D\_______
+//             | OFF B\_________
+//             |-----------------> bandwidth
+//
+// A : (kDisablingBandwidthLow, kDisablingPacketLossAtLowBw)
+// B : (kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw)
+// C : (kEnablingBandwidthLow, kEnablingPacketLossAtLowBw)
+// D : (kEnablingBandwidthHigh, kEnablingPacketLossAtHighBw)
+
+constexpr int kDisablingBandwidthLow = 15000;
+constexpr float kDisablingPacketLossAtLowBw = 0.08f;
+constexpr int kDisablingBandwidthHigh = 64000;
+constexpr float kDisablingPacketLossAtHighBw = 0.01f;
+constexpr int kEnablingBandwidthLow = 17000;
+constexpr float kEnablingPacketLossAtLowBw = 0.1f;
+constexpr int kEnablingBandwidthHigh = 64000;
+constexpr float kEnablingPacketLossAtHighBw = 0.05f;
+
+constexpr float kEpsilon = 1e-5f;
+
+struct FecControllerPlrBasedTestStates {
+  std::unique_ptr<FecControllerPlrBased> controller;
+  MockSmoothingFilter* packet_loss_smoother;
+};
+
+FecControllerPlrBasedTestStates CreateFecControllerPlrBased(
+    bool initial_fec_enabled,
+    const ThresholdCurve& enabling_curve,
+    const ThresholdCurve& disabling_curve) {
+  FecControllerPlrBasedTestStates states;
+  std::unique_ptr<MockSmoothingFilter> mock_smoothing_filter(
+      new NiceMock<MockSmoothingFilter>());
+  states.packet_loss_smoother = mock_smoothing_filter.get();
+  states.controller.reset(new FecControllerPlrBased(
+      FecControllerPlrBased::Config(initial_fec_enabled, enabling_curve,
+                                    disabling_curve, 0),
+      std::move(mock_smoothing_filter)));
+  return states;
+}
+
+FecControllerPlrBasedTestStates CreateFecControllerPlrBased(
+    bool initial_fec_enabled) {
+  return CreateFecControllerPlrBased(
+      initial_fec_enabled,
+      ThresholdCurve(kEnablingBandwidthLow, kEnablingPacketLossAtLowBw,
+                     kEnablingBandwidthHigh, kEnablingPacketLossAtHighBw),
+      ThresholdCurve(kDisablingBandwidthLow, kDisablingPacketLossAtLowBw,
+                     kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw));
+}
+
+void UpdateNetworkMetrics(FecControllerPlrBasedTestStates* states,
+                          const rtc::Optional<int>& uplink_bandwidth_bps,
+                          const rtc::Optional<float>& uplink_packet_loss) {
+  // UpdateNetworkMetrics can accept multiple network metric updates at once.
+  // However, currently, the most used case is to update one metric at a time.
+  // To reflect this fact, we separate the calls.
+  if (uplink_bandwidth_bps) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+    states->controller->UpdateNetworkMetrics(network_metrics);
+  }
+  if (uplink_packet_loss) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_packet_loss_fraction = uplink_packet_loss;
+    EXPECT_CALL(*states->packet_loss_smoother, AddSample(*uplink_packet_loss));
+    states->controller->UpdateNetworkMetrics(network_metrics);
+    // This is called during CheckDecision().
+    EXPECT_CALL(*states->packet_loss_smoother, GetAverage())
+        .WillOnce(Return(*uplink_packet_loss));
+  }
+}
+
+// Checks that the FEC decision and |uplink_packet_loss_fraction| given by
+// |states->controller->MakeDecision| matches |expected_enable_fec| and
+// |expected_uplink_packet_loss_fraction|, respectively.
+void CheckDecision(FecControllerPlrBasedTestStates* states,
+                   bool expected_enable_fec,
+                   float expected_uplink_packet_loss_fraction) {
+  AudioEncoderRuntimeConfig config;
+  states->controller->MakeDecision(&config);
+  EXPECT_EQ(expected_enable_fec, config.enable_fec);
+  EXPECT_EQ(expected_uplink_packet_loss_fraction,
+            config.uplink_packet_loss_fraction);
+}
+
+}  // namespace
+
+TEST(FecControllerPlrBasedTest, OutputInitValueBeforeAnyInputsAreReceived) {
+  for (bool initial_fec_enabled : {false, true}) {
+    auto states = CreateFecControllerPlrBased(initial_fec_enabled);
+    CheckDecision(&states, initial_fec_enabled, 0);
+  }
+}
+
+TEST(FecControllerPlrBasedTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+  // Regardless of the initial FEC state and the packet-loss rate,
+  // the initial FEC state is maintained as long as the BWE is unknown.
+  for (bool initial_fec_enabled : {false, true}) {
+    for (float packet_loss :
+         {kDisablingPacketLossAtLowBw - kEpsilon, kDisablingPacketLossAtLowBw,
+          kDisablingPacketLossAtLowBw + kEpsilon,
+          kEnablingPacketLossAtLowBw - kEpsilon, kEnablingPacketLossAtLowBw,
+          kEnablingPacketLossAtLowBw + kEpsilon}) {
+      auto states = CreateFecControllerPlrBased(initial_fec_enabled);
+      UpdateNetworkMetrics(&states, rtc::nullopt, packet_loss);
+      CheckDecision(&states, initial_fec_enabled, packet_loss);
+    }
+  }
+}
+
+TEST(FecControllerPlrBasedTest,
+     OutputInitValueWhenUplinkPacketLossFractionUnknown) {
+  // Regardless of the initial FEC state and the BWE, the initial FEC state
+  // is maintained as long as the packet-loss rate is unknown.
+  for (bool initial_fec_enabled : {false, true}) {
+    for (int bandwidth : {kDisablingBandwidthLow - 1, kDisablingBandwidthLow,
+                          kDisablingBandwidthLow + 1, kEnablingBandwidthLow - 1,
+                          kEnablingBandwidthLow, kEnablingBandwidthLow + 1}) {
+      auto states = CreateFecControllerPlrBased(initial_fec_enabled);
+      UpdateNetworkMetrics(&states, bandwidth, rtc::nullopt);
+      CheckDecision(&states, initial_fec_enabled, 0.0);
+    }
+  }
+}
+
+TEST(FecControllerPlrBasedTest, EnableFecForHighBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  UpdateNetworkMetrics(&states, kEnablingBandwidthHigh,
+                       kEnablingPacketLossAtHighBw);
+  CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+}
+
+TEST(FecControllerPlrBasedTest, UpdateMultipleNetworkMetricsAtOnce) {
+  // This test is similar to EnableFecForHighBandwidth. But instead of
+  // using ::UpdateNetworkMetrics(...), which calls
+  // FecControllerPlrBased::UpdateNetworkMetrics(...) multiple times, we
+  // we call it only once. This is to verify that
+  // FecControllerPlrBased::UpdateNetworkMetrics(...) can handle multiple
+  // network updates at once. This is, however, not a common use case in current
+  // audio_network_adaptor_impl.cc.
+  auto states = CreateFecControllerPlrBased(false);
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.uplink_bandwidth_bps = kEnablingBandwidthHigh;
+  network_metrics.uplink_packet_loss_fraction = kEnablingPacketLossAtHighBw;
+  EXPECT_CALL(*states.packet_loss_smoother, GetAverage())
+      .WillOnce(Return(kEnablingPacketLossAtHighBw));
+  states.controller->UpdateNetworkMetrics(network_metrics);
+  CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForHighBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  constexpr float kPacketLoss = kEnablingPacketLossAtHighBw * 0.99f;
+  UpdateNetworkMetrics(&states, kEnablingBandwidthHigh, kPacketLoss);
+  CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, EnableFecForMediumBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  constexpr float kPacketLoss =
+      (kEnablingPacketLossAtLowBw + kEnablingPacketLossAtHighBw) / 2.0;
+  UpdateNetworkMetrics(&states,
+                       (kEnablingBandwidthHigh + kEnablingBandwidthLow) / 2,
+                       kPacketLoss);
+  CheckDecision(&states, true, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForMediumBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  constexpr float kPacketLoss =
+      kEnablingPacketLossAtLowBw * 0.49f + kEnablingPacketLossAtHighBw * 0.51f;
+  UpdateNetworkMetrics(&states,
+                       (kEnablingBandwidthHigh + kEnablingBandwidthLow) / 2,
+                       kPacketLoss);
+  CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, EnableFecForLowBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  UpdateNetworkMetrics(&states, kEnablingBandwidthLow,
+                       kEnablingPacketLossAtLowBw);
+  CheckDecision(&states, true, kEnablingPacketLossAtLowBw);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForLowBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  constexpr float kPacketLoss = kEnablingPacketLossAtLowBw * 0.99f;
+  UpdateNetworkMetrics(&states, kEnablingBandwidthLow, kPacketLoss);
+  CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOffForVeryLowBandwidth) {
+  auto states = CreateFecControllerPlrBased(false);
+  // Below |kEnablingBandwidthLow|, no packet loss fraction can cause FEC to
+  // turn on.
+  UpdateNetworkMetrics(&states, kEnablingBandwidthLow - 1, 1.0);
+  CheckDecision(&states, false, 1.0);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecForHighBandwidth) {
+  auto states = CreateFecControllerPlrBased(true);
+  constexpr float kPacketLoss = kDisablingPacketLossAtHighBw - kEpsilon;
+  UpdateNetworkMetrics(&states, kDisablingBandwidthHigh, kPacketLoss);
+  CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOnForHighBandwidth) {
+  // Note: Disabling happens when the value is strictly below the threshold.
+  auto states = CreateFecControllerPlrBased(true);
+  UpdateNetworkMetrics(&states, kDisablingBandwidthHigh,
+                       kDisablingPacketLossAtHighBw);
+  CheckDecision(&states, true, kDisablingPacketLossAtHighBw);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecOnMediumBandwidth) {
+  auto states = CreateFecControllerPlrBased(true);
+  constexpr float kPacketLoss =
+      (kDisablingPacketLossAtLowBw + kDisablingPacketLossAtHighBw) / 2.0f -
+      kEpsilon;
+  UpdateNetworkMetrics(&states,
+                       (kDisablingBandwidthHigh + kDisablingBandwidthLow) / 2,
+                       kPacketLoss);
+  CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, MaintainFecOnForMediumBandwidth) {
+  auto states = CreateFecControllerPlrBased(true);
+  constexpr float kPacketLoss = kDisablingPacketLossAtLowBw * 0.51f +
+                                kDisablingPacketLossAtHighBw * 0.49f - kEpsilon;
+  UpdateNetworkMetrics(&states,
+                       (kEnablingBandwidthHigh + kDisablingBandwidthLow) / 2,
+                       kPacketLoss);
+  CheckDecision(&states, true, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecForLowBandwidth) {
+  auto states = CreateFecControllerPlrBased(true);
+  constexpr float kPacketLoss = kDisablingPacketLossAtLowBw - kEpsilon;
+  UpdateNetworkMetrics(&states, kDisablingBandwidthLow, kPacketLoss);
+  CheckDecision(&states, false, kPacketLoss);
+}
+
+TEST(FecControllerPlrBasedTest, DisableFecForVeryLowBandwidth) {
+  auto states = CreateFecControllerPlrBased(true);
+  // Below |kEnablingBandwidthLow|, any packet loss fraction can cause FEC to
+  // turn off.
+  UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
+  CheckDecision(&states, false, 1.0);
+}
+
+TEST(FecControllerPlrBasedTest, CheckBehaviorOnChangingNetworkMetrics) {
+  // In this test, we let the network metrics to traverse from 1 to 5.
+  // packet-loss ^ 1 |  |
+  //             |   | 2|
+  //             |    \  \ 3
+  //             |     \4 \_______
+  //             |      \_________
+  //             |---------5-------> bandwidth
+
+  auto states = CreateFecControllerPlrBased(true);
+  UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
+  CheckDecision(&states, false, 1.0);
+
+  UpdateNetworkMetrics(&states, kEnablingBandwidthLow,
+                       kEnablingPacketLossAtLowBw * 0.99f);
+  CheckDecision(&states, false, kEnablingPacketLossAtLowBw * 0.99f);
+
+  UpdateNetworkMetrics(&states, kEnablingBandwidthHigh,
+                       kEnablingPacketLossAtHighBw);
+  CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+
+  UpdateNetworkMetrics(&states, kDisablingBandwidthHigh,
+                       kDisablingPacketLossAtHighBw);
+  CheckDecision(&states, true, kDisablingPacketLossAtHighBw);
+
+  UpdateNetworkMetrics(&states, kDisablingBandwidthHigh + 1, 0.0);
+  CheckDecision(&states, false, 0.0);
+}
+
+TEST(FecControllerPlrBasedTest, CheckBehaviorOnSpecialCurves) {
+  // We test a special configuration, where the points to define the FEC
+  // enabling/disabling curves are placed like the following, otherwise the test
+  // is the same as CheckBehaviorOnChangingNetworkMetrics.
+  //
+  // packet-loss ^   |  |
+  //             |   | C|
+  //             |   |  |
+  //             |   | D|_______
+  //             |  A|___B______
+  //             |-----------------> bandwidth
+
+  constexpr int kEnablingBandwidthHigh = kEnablingBandwidthLow;
+  constexpr float kDisablingPacketLossAtLowBw = kDisablingPacketLossAtHighBw;
+  FecControllerPlrBasedTestStates states;
+  std::unique_ptr<MockSmoothingFilter> mock_smoothing_filter(
+      new NiceMock<MockSmoothingFilter>());
+  states.packet_loss_smoother = mock_smoothing_filter.get();
+  states.controller.reset(new FecControllerPlrBased(
+      FecControllerPlrBased::Config(
+          true,
+          ThresholdCurve(kEnablingBandwidthLow, kEnablingPacketLossAtLowBw,
+                         kEnablingBandwidthHigh, kEnablingPacketLossAtHighBw),
+          ThresholdCurve(kDisablingBandwidthLow, kDisablingPacketLossAtLowBw,
+                         kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw),
+          0),
+      std::move(mock_smoothing_filter)));
+
+  UpdateNetworkMetrics(&states, kDisablingBandwidthLow - 1, 1.0);
+  CheckDecision(&states, false, 1.0);
+
+  UpdateNetworkMetrics(&states, kEnablingBandwidthLow,
+                       kEnablingPacketLossAtHighBw * 0.99f);
+  CheckDecision(&states, false, kEnablingPacketLossAtHighBw * 0.99f);
+
+  UpdateNetworkMetrics(&states, kEnablingBandwidthHigh,
+                       kEnablingPacketLossAtHighBw);
+  CheckDecision(&states, true, kEnablingPacketLossAtHighBw);
+
+  UpdateNetworkMetrics(&states, kDisablingBandwidthHigh,
+                       kDisablingPacketLossAtHighBw);
+  CheckDecision(&states, true, kDisablingPacketLossAtHighBw);
+
+  UpdateNetworkMetrics(&states, kDisablingBandwidthHigh + 1, 0.0);
+  CheckDecision(&states, false, 0.0);
+}
+
+TEST(FecControllerPlrBasedTest, SingleThresholdCurveForEnablingAndDisabling) {
+  // Note: To avoid numerical errors, keep kPacketLossAtLowBw and
+  // kPacketLossAthighBw as (negative) integer powers of 2.
+  // This is mostly relevant for the O3 case.
+  constexpr int kBandwidthLow = 10000;
+  constexpr float kPacketLossAtLowBw = 0.25f;
+  constexpr int kBandwidthHigh = 20000;
+  constexpr float kPacketLossAtHighBw = 0.125f;
+  auto curve = ThresholdCurve(kBandwidthLow, kPacketLossAtLowBw, kBandwidthHigh,
+                              kPacketLossAtHighBw);
+
+  // B* stands for "below-curve", O* for "on-curve", and A* for "above-curve".
+  //
+  //                                            //
+  // packet-loss ^                              //
+  //             |    |                         //
+  //             | B1 O1                        //
+  //             |    |                         //
+  //             |    O2                        //
+  //             |     \ A1                     //
+  //             |      \                       //
+  //             |       O3   A2                //
+  //             |     B2 \                     //
+  //             |         \                    //
+  //             |          O4--O5----          //
+  //             |                              //
+  //             |            B3                //
+  //             |-----------------> bandwidth  //
+
+  struct NetworkState {
+    int bandwidth;
+    float packet_loss;
+  };
+
+  std::vector<NetworkState> below{
+      {kBandwidthLow - 1, kPacketLossAtLowBw + 0.1f},  // B1
+      {(kBandwidthLow + kBandwidthHigh) / 2,
+       (kPacketLossAtLowBw + kPacketLossAtHighBw) / 2 - kEpsilon},  // B2
+      {kBandwidthHigh + 1, kPacketLossAtHighBw - kEpsilon}          // B3
+  };
+
+  std::vector<NetworkState> on{
+      {kBandwidthLow, kPacketLossAtLowBw + 0.1f},  // O1
+      {kBandwidthLow, kPacketLossAtLowBw},         // O2
+      {(kBandwidthLow + kBandwidthHigh) / 2,
+       (kPacketLossAtLowBw + kPacketLossAtHighBw) / 2},  // O3
+      {kBandwidthHigh, kPacketLossAtHighBw},             // O4
+      {kBandwidthHigh + 1, kPacketLossAtHighBw},         // O5
+  };
+
+  std::vector<NetworkState> above{
+      {(kBandwidthLow + kBandwidthHigh) / 2,
+       (kPacketLossAtLowBw + kPacketLossAtHighBw) / 2 + kEpsilon},  // A1
+      {kBandwidthHigh + 1, kPacketLossAtHighBw + kEpsilon},         // A2
+  };
+
+  // Test that FEC is turned off whenever we're below the curve, independent
+  // of the starting FEC state.
+  for (NetworkState net_state : below) {
+    for (bool initial_fec_enabled : {false, true}) {
+      auto states =
+          CreateFecControllerPlrBased(initial_fec_enabled, curve, curve);
+      UpdateNetworkMetrics(&states, net_state.bandwidth, net_state.packet_loss);
+      CheckDecision(&states, false, net_state.packet_loss);
+    }
+  }
+
+  // Test that FEC is turned on whenever we're on the curve or above it,
+  // independent of the starting FEC state.
+  for (std::vector<NetworkState> states_list : {on, above}) {
+    for (NetworkState net_state : states_list) {
+      for (bool initial_fec_enabled : {false, true}) {
+        auto states =
+            CreateFecControllerPlrBased(initial_fec_enabled, curve, curve);
+        UpdateNetworkMetrics(&states, net_state.bandwidth,
+                             net_state.packet_loss);
+        CheckDecision(&states, true, net_state.packet_loss);
+      }
+    }
+  }
+}
+
+TEST(FecControllerPlrBasedTest, FecAlwaysOff) {
+  ThresholdCurve always_off_curve(0, 1.0f + kEpsilon, 0, 1.0f + kEpsilon);
+  for (bool initial_fec_enabled : {false, true}) {
+    for (int bandwidth : {0, 10000}) {
+      for (float packet_loss : {0.0f, 0.5f, 1.0f}) {
+        auto states = CreateFecControllerPlrBased(
+            initial_fec_enabled, always_off_curve, always_off_curve);
+        UpdateNetworkMetrics(&states, bandwidth, packet_loss);
+        CheckDecision(&states, false, packet_loss);
+      }
+    }
+  }
+}
+
+TEST(FecControllerPlrBasedTest, FecAlwaysOn) {
+  ThresholdCurve always_on_curve(0, 0.0f, 0, 0.0f);
+  for (bool initial_fec_enabled : {false, true}) {
+    for (int bandwidth : {0, 10000}) {
+      for (float packet_loss : {0.0f, 0.5f, 1.0f}) {
+        auto states = CreateFecControllerPlrBased(
+            initial_fec_enabled, always_on_curve, always_on_curve);
+        UpdateNetworkMetrics(&states, bandwidth, packet_loss);
+        CheckDecision(&states, true, packet_loss);
+      }
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(FecControllerPlrBasedDeathTest, InvalidConfig) {
+  FecControllerPlrBasedTestStates states;
+  std::unique_ptr<MockSmoothingFilter> mock_smoothing_filter(
+      new NiceMock<MockSmoothingFilter>());
+  states.packet_loss_smoother = mock_smoothing_filter.get();
+  EXPECT_DEATH(
+      states.controller.reset(new FecControllerPlrBased(
+          FecControllerPlrBased::Config(
+              true,
+              ThresholdCurve(kDisablingBandwidthLow - 1,
+                             kEnablingPacketLossAtLowBw, kEnablingBandwidthHigh,
+                             kEnablingPacketLossAtHighBw),
+              ThresholdCurve(
+                  kDisablingBandwidthLow, kDisablingPacketLossAtLowBw,
+                  kDisablingBandwidthHigh, kDisablingPacketLossAtHighBw),
+              0),
+          std::move(mock_smoothing_filter))),
+      "Check failed");
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.cc
new file mode 100644
index 0000000..c8cfd31
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.cc
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.h"
+
+#include <limits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FecControllerRplrBased::Config::Config(
+    bool initial_fec_enabled,
+    const ThresholdCurve& fec_enabling_threshold,
+    const ThresholdCurve& fec_disabling_threshold)
+    : initial_fec_enabled(initial_fec_enabled),
+      fec_enabling_threshold(fec_enabling_threshold),
+      fec_disabling_threshold(fec_disabling_threshold) {}
+
+FecControllerRplrBased::FecControllerRplrBased(const Config& config)
+    : config_(config), fec_enabled_(config.initial_fec_enabled) {
+  RTC_DCHECK(config_.fec_disabling_threshold <= config_.fec_enabling_threshold);
+}
+
+FecControllerRplrBased::~FecControllerRplrBased() = default;
+
+void FecControllerRplrBased::UpdateNetworkMetrics(
+    const NetworkMetrics& network_metrics) {
+  if (network_metrics.uplink_bandwidth_bps)
+    uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+  if (network_metrics.uplink_recoverable_packet_loss_fraction) {
+    uplink_recoverable_packet_loss_ =
+        network_metrics.uplink_recoverable_packet_loss_fraction;
+  }
+}
+
+void FecControllerRplrBased::MakeDecision(AudioEncoderRuntimeConfig* config) {
+  RTC_DCHECK(!config->enable_fec);
+  RTC_DCHECK(!config->uplink_packet_loss_fraction);
+
+  fec_enabled_ = fec_enabled_ ? !FecDisablingDecision() : FecEnablingDecision();
+
+  config->enable_fec = fec_enabled_;
+  config->uplink_packet_loss_fraction =
+      uplink_recoverable_packet_loss_ ? *uplink_recoverable_packet_loss_ : 0.0;
+}
+
+bool FecControllerRplrBased::FecEnablingDecision() const {
+  if (!uplink_bandwidth_bps_ || !uplink_recoverable_packet_loss_) {
+    return false;
+  } else {
+    // Enable when above the curve or exactly on it.
+    return !config_.fec_enabling_threshold.IsBelowCurve(
+        {static_cast<float>(*uplink_bandwidth_bps_),
+         *uplink_recoverable_packet_loss_});
+  }
+}
+
+bool FecControllerRplrBased::FecDisablingDecision() const {
+  if (!uplink_bandwidth_bps_ || !uplink_recoverable_packet_loss_) {
+    return false;
+  } else {
+    // Disable when below the curve.
+    return config_.fec_disabling_threshold.IsBelowCurve(
+        {static_cast<float>(*uplink_bandwidth_bps_),
+         *uplink_recoverable_packet_loss_});
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.h b/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.h
new file mode 100644
index 0000000..ade55ae
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.h
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_RPLR_BASED_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_RPLR_BASED_H_
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class FecControllerRplrBased final : public Controller {
+ public:
+  struct Config {
+    // |fec_enabling_threshold| defines a curve, above which FEC should be
+    // enabled. |fec_disabling_threshold| defines a curve, under which FEC
+    // should be disabled. See below
+    //
+    // recoverable
+    // packet-loss ^   |  |
+    //             |   |  |   FEC
+    //             |    \  \   ON
+    //             | FEC \  \_______ fec_enabling_threshold
+    //             | OFF  \_________ fec_disabling_threshold
+    //             |-----------------> bandwidth
+    Config(bool initial_fec_enabled,
+           const ThresholdCurve& fec_enabling_threshold,
+           const ThresholdCurve& fec_disabling_threshold);
+    bool initial_fec_enabled;
+    ThresholdCurve fec_enabling_threshold;
+    ThresholdCurve fec_disabling_threshold;
+  };
+
+  explicit FecControllerRplrBased(const Config& config);
+
+  ~FecControllerRplrBased() override;
+
+  void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+  void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+  bool FecEnablingDecision() const;
+  bool FecDisablingDecision() const;
+
+  const Config config_;
+  bool fec_enabled_;
+  rtc::Optional<int> uplink_bandwidth_bps_;
+  rtc::Optional<float> uplink_recoverable_packet_loss_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FecControllerRplrBased);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FEC_CONTROLLER_RPLR_BASED_H_
diff --git a/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based_unittest.cc b/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based_unittest.cc
new file mode 100644
index 0000000..0fc003b
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based_unittest.cc
@@ -0,0 +1,520 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <random>
+#include <utility>
+
+#include "modules/audio_coding/audio_network_adaptor/fec_controller_rplr_based.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// The test uses the following settings:
+//
+// recoverable ^
+// packet-loss |   |  |
+//             |  A| C|   FEC
+//             |    \  \   ON
+//             | FEC \ D\_______
+//             | OFF B\_________
+//             |-----------------> bandwidth
+//
+// A : (kDisablingBandwidthLow, kDisablingRecoverablePacketLossAtLowBw)
+// B : (kDisablingBandwidthHigh, kDisablingRecoverablePacketLossAtHighBw)
+// C : (kEnablingBandwidthLow, kEnablingRecoverablePacketLossAtLowBw)
+// D : (kEnablingBandwidthHigh, kEnablingRecoverablePacketLossAtHighBw)
+
+constexpr int kDisablingBandwidthLow = 15000;
+constexpr float kDisablingRecoverablePacketLossAtLowBw = 0.08f;
+constexpr int kDisablingBandwidthHigh = 64000;
+constexpr float kDisablingRecoverablePacketLossAtHighBw = 0.01f;
+constexpr int kEnablingBandwidthLow = 17000;
+constexpr float kEnablingRecoverablePacketLossAtLowBw = 0.1f;
+constexpr int kEnablingBandwidthHigh = 64000;
+constexpr float kEnablingRecoverablePacketLossAtHighBw = 0.05f;
+
+constexpr float kEpsilon = 1e-5f;
+
+rtc::Optional<float> GetRandomProbabilityOrUnknown() {
+  std::random_device rd;
+  std::mt19937 generator(rd());
+  std::uniform_real_distribution<> distribution(0, 1);
+
+  return (distribution(generator) < 0.2)
+             ? rtc::nullopt
+             : rtc::Optional<float>(distribution(generator));
+}
+
+std::unique_ptr<FecControllerRplrBased> CreateFecControllerRplrBased(
+    bool initial_fec_enabled) {
+  return std::unique_ptr<FecControllerRplrBased>(
+      new FecControllerRplrBased(FecControllerRplrBased::Config(
+          initial_fec_enabled,
+          ThresholdCurve(
+              kEnablingBandwidthLow, kEnablingRecoverablePacketLossAtLowBw,
+              kEnablingBandwidthHigh, kEnablingRecoverablePacketLossAtHighBw),
+          ThresholdCurve(kDisablingBandwidthLow,
+                         kDisablingRecoverablePacketLossAtLowBw,
+                         kDisablingBandwidthHigh,
+                         kDisablingRecoverablePacketLossAtHighBw))));
+}
+
+void UpdateNetworkMetrics(
+    FecControllerRplrBased* controller,
+    const rtc::Optional<int>& uplink_bandwidth_bps,
+    const rtc::Optional<float>& uplink_packet_loss,
+    const rtc::Optional<float>& uplink_recoveralbe_packet_loss) {
+  // UpdateNetworkMetrics can accept multiple network metric updates at once.
+  // However, currently, the most used case is to update one metric at a time.
+  // To reflect this fact, we separate the calls.
+  if (uplink_bandwidth_bps) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  if (uplink_packet_loss) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_packet_loss_fraction = uplink_packet_loss;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  if (uplink_recoveralbe_packet_loss) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_recoverable_packet_loss_fraction =
+        uplink_recoveralbe_packet_loss;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+}
+
+void UpdateNetworkMetrics(
+    FecControllerRplrBased* controller,
+    const rtc::Optional<int>& uplink_bandwidth_bps,
+    const rtc::Optional<float>& uplink_recoveralbe_packet_loss) {
+  // FecControllerRplrBased doesn't currently use the PLR (general packet-loss
+  // rate) at all. (This might be changed in the future.) The unit-tests will
+  // use a random value (including unknown), to show this does not interfere.
+  UpdateNetworkMetrics(controller, uplink_bandwidth_bps,
+                       GetRandomProbabilityOrUnknown(),
+                       uplink_recoveralbe_packet_loss);
+}
+
+// Checks that the FEC decision and |uplink_packet_loss_fraction| given by
+// |states->controller->MakeDecision| matches |expected_enable_fec| and
+// |expected_uplink_packet_loss_fraction|, respectively.
+void CheckDecision(FecControllerRplrBased* controller,
+                   bool expected_enable_fec,
+                   float expected_uplink_packet_loss_fraction) {
+  AudioEncoderRuntimeConfig config;
+  controller->MakeDecision(&config);
+
+  // Less compact than comparing optionals, but yields more readable errors.
+  EXPECT_TRUE(config.enable_fec);
+  if (config.enable_fec) {
+    EXPECT_EQ(expected_enable_fec, *config.enable_fec);
+  }
+  EXPECT_TRUE(config.uplink_packet_loss_fraction);
+  if (config.uplink_packet_loss_fraction) {
+    EXPECT_EQ(expected_uplink_packet_loss_fraction,
+              *config.uplink_packet_loss_fraction);
+  }
+}
+
+}  // namespace
+
+TEST(FecControllerRplrBasedTest, OutputInitValueBeforeAnyInputsAreReceived) {
+  for (bool initial_fec_enabled : {false, true}) {
+    auto controller = CreateFecControllerRplrBased(initial_fec_enabled);
+    CheckDecision(controller.get(), initial_fec_enabled, 0);
+  }
+}
+
+TEST(FecControllerRplrBasedTest, OutputInitValueWhenUplinkBandwidthUnknown) {
+  // Regardless of the initial FEC state and the recoverable-packet-loss
+  // rate, the initial FEC state is maintained as long as the BWE is unknown.
+  for (bool initial_fec_enabled : {false, true}) {
+    for (float recoverable_packet_loss :
+         {kDisablingRecoverablePacketLossAtHighBw - kEpsilon,
+          kDisablingRecoverablePacketLossAtHighBw,
+          kDisablingRecoverablePacketLossAtHighBw + kEpsilon,
+          kEnablingRecoverablePacketLossAtHighBw - kEpsilon,
+          kEnablingRecoverablePacketLossAtHighBw,
+          kEnablingRecoverablePacketLossAtHighBw + kEpsilon}) {
+      auto controller = CreateFecControllerRplrBased(initial_fec_enabled);
+      UpdateNetworkMetrics(controller.get(), rtc::nullopt,
+                           recoverable_packet_loss);
+      CheckDecision(controller.get(), initial_fec_enabled,
+                    recoverable_packet_loss);
+    }
+  }
+}
+
+TEST(FecControllerRplrBasedTest,
+     OutputInitValueWhenUplinkRecoverablePacketLossFractionUnknown) {
+  // Regardless of the initial FEC state and the BWE, the initial FEC state
+  // is maintained as long as the recoverable-packet-loss rate is unknown.
+  for (bool initial_fec_enabled : {false, true}) {
+    for (int bandwidth : {kDisablingBandwidthLow - 1, kDisablingBandwidthLow,
+                          kDisablingBandwidthLow + 1, kEnablingBandwidthLow - 1,
+                          kEnablingBandwidthLow, kEnablingBandwidthLow + 1}) {
+      auto controller = CreateFecControllerRplrBased(initial_fec_enabled);
+      UpdateNetworkMetrics(controller.get(), bandwidth, rtc::nullopt);
+      CheckDecision(controller.get(), initial_fec_enabled, 0.0);
+    }
+  }
+}
+
+TEST(FecControllerRplrBasedTest, EnableFecForHighBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthHigh,
+                       kEnablingRecoverablePacketLossAtHighBw);
+  CheckDecision(controller.get(), true, kEnablingRecoverablePacketLossAtHighBw);
+}
+
+TEST(FecControllerRplrBasedTest, UpdateMultipleNetworkMetricsAtOnce) {
+  // This test is similar to EnableFecForHighBandwidth. But instead of
+  // using ::UpdateNetworkMetrics(...), which calls
+  // FecControllerRplrBasedTest::UpdateNetworkMetrics(...) multiple times, we
+  // we call it only once. This is to verify that
+  // FecControllerRplrBasedTest::UpdateNetworkMetrics(...) can handle multiple
+  // network updates at once. This is, however, not a common use case in current
+  // audio_network_adaptor_impl.cc.
+  auto controller = CreateFecControllerRplrBased(false);
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.uplink_bandwidth_bps = kEnablingBandwidthHigh;
+  network_metrics.uplink_packet_loss_fraction = GetRandomProbabilityOrUnknown();
+  network_metrics.uplink_recoverable_packet_loss_fraction =
+      kEnablingRecoverablePacketLossAtHighBw;
+  controller->UpdateNetworkMetrics(network_metrics);
+  CheckDecision(controller.get(), true, kEnablingRecoverablePacketLossAtHighBw);
+}
+
+TEST(FecControllerRplrBasedTest, MaintainFecOffForHighBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  constexpr float kRecoverablePacketLoss =
+      kEnablingRecoverablePacketLossAtHighBw * 0.99f;
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthHigh,
+                       kRecoverablePacketLoss);
+  CheckDecision(controller.get(), false, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, EnableFecForMediumBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  constexpr float kRecoverablePacketLoss =
+      (kEnablingRecoverablePacketLossAtLowBw +
+       kEnablingRecoverablePacketLossAtHighBw) / 2.0;
+  UpdateNetworkMetrics(
+      controller.get(),
+      (kEnablingBandwidthHigh + kEnablingBandwidthLow) / 2,
+      kRecoverablePacketLoss);
+  CheckDecision(controller.get(), true, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, MaintainFecOffForMediumBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  constexpr float kRecoverablePacketLoss =
+      kEnablingRecoverablePacketLossAtLowBw * 0.49f +
+      kEnablingRecoverablePacketLossAtHighBw * 0.51f;
+  UpdateNetworkMetrics(controller.get(),
+                       (kEnablingBandwidthHigh + kEnablingBandwidthLow) / 2,
+                       kRecoverablePacketLoss);
+  CheckDecision(controller.get(), false, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, EnableFecForLowBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthLow,
+                       kEnablingRecoverablePacketLossAtLowBw);
+  CheckDecision(controller.get(), true, kEnablingRecoverablePacketLossAtLowBw);
+}
+
+TEST(FecControllerRplrBasedTest, MaintainFecOffForLowBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  constexpr float kRecoverablePacketLoss =
+      kEnablingRecoverablePacketLossAtLowBw * 0.99f;
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthLow,
+                       kRecoverablePacketLoss);
+  CheckDecision(controller.get(), false, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, MaintainFecOffForVeryLowBandwidth) {
+  auto controller = CreateFecControllerRplrBased(false);
+  // Below |kEnablingBandwidthLow|, no recoverable packet loss fraction can
+  // cause FEC to turn on.
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthLow - 1, 1.0);
+  CheckDecision(controller.get(), false, 1.0);
+}
+
+TEST(FecControllerRplrBasedTest, DisableFecForHighBandwidth) {
+  auto controller = CreateFecControllerRplrBased(true);
+  constexpr float kRecoverablePacketLoss =
+      kDisablingRecoverablePacketLossAtHighBw - kEpsilon;
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthHigh,
+                       kRecoverablePacketLoss);
+  CheckDecision(controller.get(), false, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, MaintainFecOnForHighBandwidth) {
+  // Note: Disabling happens when the value is strictly below the threshold.
+  auto controller = CreateFecControllerRplrBased(true);
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthHigh,
+                       kDisablingRecoverablePacketLossAtHighBw);
+  CheckDecision(controller.get(), true,
+                kDisablingRecoverablePacketLossAtHighBw);
+}
+
+TEST(FecControllerRplrBasedTest, DisableFecOnMediumBandwidth) {
+  auto controller = CreateFecControllerRplrBased(true);
+  constexpr float kRecoverablePacketLoss =
+      ((kDisablingRecoverablePacketLossAtLowBw +
+        kDisablingRecoverablePacketLossAtHighBw) / 2.0f) - kEpsilon;
+  UpdateNetworkMetrics(
+      controller.get(),
+      (kDisablingBandwidthHigh + kDisablingBandwidthLow) / 2,
+      kRecoverablePacketLoss);
+  CheckDecision(controller.get(), false, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, MaintainFecOnForMediumBandwidth) {
+  auto controller = CreateFecControllerRplrBased(true);
+  constexpr float kRecoverablePacketLoss =
+      kDisablingRecoverablePacketLossAtLowBw * 0.51f +
+      kDisablingRecoverablePacketLossAtHighBw * 0.49f - kEpsilon;
+  UpdateNetworkMetrics(controller.get(),
+                       (kEnablingBandwidthHigh + kDisablingBandwidthLow) / 2,
+                       kRecoverablePacketLoss);
+  CheckDecision(controller.get(), true, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, DisableFecForLowBandwidth) {
+  auto controller = CreateFecControllerRplrBased(true);
+  constexpr float kRecoverablePacketLoss =
+      kDisablingRecoverablePacketLossAtLowBw - kEpsilon;
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthLow,
+                       kRecoverablePacketLoss);
+  CheckDecision(controller.get(), false, kRecoverablePacketLoss);
+}
+
+TEST(FecControllerRplrBasedTest, DisableFecForVeryLowBandwidth) {
+  auto controller = CreateFecControllerRplrBased(true);
+  // Below |kEnablingBandwidthLow|, any recoverable packet loss fraction can
+  // cause FEC to turn off.
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthLow - 1, 1.0);
+  CheckDecision(controller.get(), false, 1.0);
+}
+
+TEST(FecControllerRplrBasedTest, CheckBehaviorOnChangingNetworkMetrics) {
+  // In this test, we let the network metrics to traverse from 1 to 5.
+  //
+  // recoverable ^
+  // packet-loss | 1 |  |
+  //             |   | 2|
+  //             |    \  \ 3
+  //             |     \4 \_______
+  //             |      \_________
+  //             |---------5-------> bandwidth
+
+  auto controller = CreateFecControllerRplrBased(true);
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthLow - 1, 1.0);
+  CheckDecision(controller.get(), false, 1.0);
+
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthLow,
+                       kEnablingRecoverablePacketLossAtLowBw * 0.99f);
+  CheckDecision(controller.get(), false,
+                kEnablingRecoverablePacketLossAtLowBw * 0.99f);
+
+  UpdateNetworkMetrics(controller.get(), kEnablingBandwidthHigh,
+                       kEnablingRecoverablePacketLossAtHighBw);
+  CheckDecision(controller.get(), true, kEnablingRecoverablePacketLossAtHighBw);
+
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthHigh,
+                       kDisablingRecoverablePacketLossAtHighBw);
+  CheckDecision(controller.get(), true,
+                kDisablingRecoverablePacketLossAtHighBw);
+
+  UpdateNetworkMetrics(controller.get(), kDisablingBandwidthHigh + 1, 0.0);
+  CheckDecision(controller.get(), false, 0.0);
+}
+
+TEST(FecControllerRplrBasedTest, CheckBehaviorOnSpecialCurves) {
+  // We test a special configuration, where the points to define the FEC
+  // enabling/disabling curves are placed like the following, otherwise the test
+  // is the same as CheckBehaviorOnChangingNetworkMetrics.
+  //
+  // recoverable ^
+  // packet-loss |   |  |
+  //             |   | C|
+  //             |   |  |
+  //             |   | D|_______
+  //             |  A|___B______
+  //             |-----------------> bandwidth
+
+  constexpr int kEnablingBandwidthHigh = kEnablingBandwidthLow;
+  constexpr float kDisablingRecoverablePacketLossAtLowBw =
+      kDisablingRecoverablePacketLossAtHighBw;
+  FecControllerRplrBased controller(FecControllerRplrBased::Config(
+      true,
+      ThresholdCurve(
+          kEnablingBandwidthLow, kEnablingRecoverablePacketLossAtLowBw,
+          kEnablingBandwidthHigh, kEnablingRecoverablePacketLossAtHighBw),
+      ThresholdCurve(
+          kDisablingBandwidthLow, kDisablingRecoverablePacketLossAtLowBw,
+          kDisablingBandwidthHigh, kDisablingRecoverablePacketLossAtHighBw)));
+
+  UpdateNetworkMetrics(&controller, kDisablingBandwidthLow - 1, 1.0);
+  CheckDecision(&controller, false, 1.0);
+
+  UpdateNetworkMetrics(&controller, kEnablingBandwidthLow,
+                       kEnablingRecoverablePacketLossAtHighBw * 0.99f);
+  CheckDecision(&controller, false,
+                kEnablingRecoverablePacketLossAtHighBw * 0.99f);
+
+  UpdateNetworkMetrics(&controller, kEnablingBandwidthHigh,
+                       kEnablingRecoverablePacketLossAtHighBw);
+  CheckDecision(&controller, true, kEnablingRecoverablePacketLossAtHighBw);
+
+  UpdateNetworkMetrics(&controller, kDisablingBandwidthHigh,
+                       kDisablingRecoverablePacketLossAtHighBw);
+  CheckDecision(&controller, true, kDisablingRecoverablePacketLossAtHighBw);
+
+  UpdateNetworkMetrics(&controller, kDisablingBandwidthHigh + 1, 0.0);
+  CheckDecision(&controller, false, 0.0);
+}
+
+TEST(FecControllerRplrBasedTest, SingleThresholdCurveForEnablingAndDisabling) {
+  // Note: To avoid numerical errors, keep kRecoverablePacketLossAtLowBw and
+  // kRecoverablePacketLossAthighBw as (negative) integer powers of 2.
+  // This is mostly relevant for the O3 case.
+  constexpr int kBandwidthLow = 10000;
+  constexpr float kRecoverablePacketLossAtLowBw = 0.25f;
+  constexpr int kBandwidthHigh = 20000;
+  constexpr float kRecoverablePacketLossAtHighBw = 0.125f;
+  auto curve = ThresholdCurve(kBandwidthLow, kRecoverablePacketLossAtLowBw,
+                              kBandwidthHigh, kRecoverablePacketLossAtHighBw);
+
+  // B* stands for "below-curve", O* for "on-curve", and A* for "above-curve".
+  //
+  //                                            //
+  // recoverable ^                              //
+  // packet-loss |    |                         //
+  //             | B1 O1                        //
+  //             |    |                         //
+  //             |    O2                        //
+  //             |     \ A1                     //
+  //             |      \                       //
+  //             |       O3   A2                //
+  //             |     B2 \                     //
+  //             |         \                    //
+  //             |          O4--O5----          //
+  //             |                              //
+  //             |            B3                //
+  //             |-----------------> bandwidth  //
+
+  struct NetworkState {
+    int bandwidth;
+    float recoverable_packet_loss;
+  };
+
+  std::vector<NetworkState> below{
+      {kBandwidthLow - 1, kRecoverablePacketLossAtLowBw + 0.1f},  // B1
+      {(kBandwidthLow + kBandwidthHigh) / 2,
+       (kRecoverablePacketLossAtLowBw + kRecoverablePacketLossAtHighBw) / 2 -
+           kEpsilon},                                                  // B2
+      {kBandwidthHigh + 1, kRecoverablePacketLossAtHighBw - kEpsilon}  // B3
+  };
+
+  std::vector<NetworkState> on{
+      {kBandwidthLow, kRecoverablePacketLossAtLowBw + 0.1f},  // O1
+      {kBandwidthLow, kRecoverablePacketLossAtLowBw},         // O2
+      {(kBandwidthLow + kBandwidthHigh) / 2,
+       (kRecoverablePacketLossAtLowBw + kRecoverablePacketLossAtHighBw) /
+           2},                                               // O3
+      {kBandwidthHigh, kRecoverablePacketLossAtHighBw},      // O4
+      {kBandwidthHigh + 1, kRecoverablePacketLossAtHighBw},  // O5
+  };
+
+  std::vector<NetworkState> above{
+      {(kBandwidthLow + kBandwidthHigh) / 2,
+       (kRecoverablePacketLossAtLowBw + kRecoverablePacketLossAtHighBw) / 2 +
+           kEpsilon},                                                   // A1
+      {kBandwidthHigh + 1, kRecoverablePacketLossAtHighBw + kEpsilon},  // A2
+  };
+
+  // Test that FEC is turned off whenever we're below the curve, independent
+  // of the starting FEC state.
+  for (NetworkState net_state : below) {
+    for (bool initial_fec_enabled : {false, true}) {
+      FecControllerRplrBased controller(
+          FecControllerRplrBased::Config(initial_fec_enabled, curve, curve));
+      UpdateNetworkMetrics(&controller, net_state.bandwidth,
+                           net_state.recoverable_packet_loss);
+      CheckDecision(&controller, false, net_state.recoverable_packet_loss);
+    }
+  }
+
+  // Test that FEC is turned on whenever we're on the curve or above it,
+  // independent of the starting FEC state.
+  for (std::vector<NetworkState> states_list : {on, above}) {
+    for (NetworkState net_state : states_list) {
+      for (bool initial_fec_enabled : {false, true}) {
+        FecControllerRplrBased controller(
+            FecControllerRplrBased::Config(initial_fec_enabled, curve, curve));
+        UpdateNetworkMetrics(&controller, net_state.bandwidth,
+                             net_state.recoverable_packet_loss);
+        CheckDecision(&controller, true, net_state.recoverable_packet_loss);
+      }
+    }
+  }
+}
+
+TEST(FecControllerRplrBasedTest, FecAlwaysOff) {
+  ThresholdCurve always_off_curve(0, 1.0f + kEpsilon, 0, 1.0f + kEpsilon);
+  for (bool initial_fec_enabled : {false, true}) {
+    for (int bandwidth : {0, 10000}) {
+      for (float recoverable_packet_loss : {0.0f, 0.5f, 1.0f}) {
+        FecControllerRplrBased controller(FecControllerRplrBased::Config(
+            initial_fec_enabled, always_off_curve, always_off_curve));
+        UpdateNetworkMetrics(&controller, bandwidth, recoverable_packet_loss);
+        CheckDecision(&controller, false, recoverable_packet_loss);
+      }
+    }
+  }
+}
+
+TEST(FecControllerRplrBasedTest, FecAlwaysOn) {
+  ThresholdCurve always_on_curve(0, 0.0f, 0, 0.0f);
+  for (bool initial_fec_enabled : {false, true}) {
+    for (int bandwidth : {0, 10000}) {
+      for (float recoverable_packet_loss : {0.0f, 0.5f, 1.0f}) {
+        FecControllerRplrBased controller(FecControllerRplrBased::Config(
+            initial_fec_enabled, always_on_curve, always_on_curve));
+        UpdateNetworkMetrics(&controller, bandwidth, recoverable_packet_loss);
+        CheckDecision(&controller, true, recoverable_packet_loss);
+      }
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(FecControllerRplrBasedDeathTest, InvalidConfig) {
+  EXPECT_DEATH(
+      FecControllerRplrBased controller(FecControllerRplrBased::Config(
+          true,
+          ThresholdCurve(
+              kDisablingBandwidthLow - 1, kEnablingRecoverablePacketLossAtLowBw,
+              kEnablingBandwidthHigh, kEnablingRecoverablePacketLossAtHighBw),
+          ThresholdCurve(kDisablingBandwidthLow,
+                         kDisablingRecoverablePacketLossAtLowBw,
+                         kDisablingBandwidthHigh,
+                         kDisablingRecoverablePacketLossAtHighBw))),
+      "Check failed");
+}
+#endif
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc b/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
new file mode 100644
index 0000000..6c3cae0
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller.cc
@@ -0,0 +1,183 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+constexpr int kPreventOveruseMarginBps = 5000;
+
+int OverheadRateBps(size_t overhead_bytes_per_packet, int frame_length_ms) {
+  return static_cast<int>(overhead_bytes_per_packet * 8 * 1000 /
+                          frame_length_ms);
+}
+}
+
+FrameLengthController::Config::Config(
+    const std::vector<int>& encoder_frame_lengths_ms,
+    int initial_frame_length_ms,
+    int min_encoder_bitrate_bps,
+    float fl_increasing_packet_loss_fraction,
+    float fl_decreasing_packet_loss_fraction,
+    int fl_increase_overhead_offset,
+    int fl_decrease_overhead_offset,
+    std::map<FrameLengthChange, int> fl_changing_bandwidths_bps)
+    : encoder_frame_lengths_ms(encoder_frame_lengths_ms),
+      initial_frame_length_ms(initial_frame_length_ms),
+      min_encoder_bitrate_bps(min_encoder_bitrate_bps),
+      fl_increasing_packet_loss_fraction(fl_increasing_packet_loss_fraction),
+      fl_decreasing_packet_loss_fraction(fl_decreasing_packet_loss_fraction),
+      fl_increase_overhead_offset(fl_increase_overhead_offset),
+      fl_decrease_overhead_offset(fl_decrease_overhead_offset),
+      fl_changing_bandwidths_bps(std::move(fl_changing_bandwidths_bps)) {}
+
+FrameLengthController::Config::Config(const Config& other) = default;
+
+FrameLengthController::Config::~Config() = default;
+
+FrameLengthController::FrameLengthController(const Config& config)
+    : config_(config) {
+  frame_length_ms_ = std::find(config_.encoder_frame_lengths_ms.begin(),
+                               config_.encoder_frame_lengths_ms.end(),
+                               config_.initial_frame_length_ms);
+  // |encoder_frame_lengths_ms| must contain |initial_frame_length_ms|.
+  RTC_DCHECK(frame_length_ms_ != config_.encoder_frame_lengths_ms.end());
+}
+
+FrameLengthController::~FrameLengthController() = default;
+
+void FrameLengthController::UpdateNetworkMetrics(
+    const NetworkMetrics& network_metrics) {
+  if (network_metrics.uplink_bandwidth_bps)
+    uplink_bandwidth_bps_ = network_metrics.uplink_bandwidth_bps;
+  if (network_metrics.uplink_packet_loss_fraction)
+    uplink_packet_loss_fraction_ = network_metrics.uplink_packet_loss_fraction;
+  if (network_metrics.overhead_bytes_per_packet)
+    overhead_bytes_per_packet_ = network_metrics.overhead_bytes_per_packet;
+}
+
+void FrameLengthController::MakeDecision(AudioEncoderRuntimeConfig* config) {
+  // Decision on |frame_length_ms| should not have been made.
+  RTC_DCHECK(!config->frame_length_ms);
+
+  if (FrameLengthIncreasingDecision(*config)) {
+    ++frame_length_ms_;
+    prev_decision_increase_ = true;
+  } else if (FrameLengthDecreasingDecision(*config)) {
+    --frame_length_ms_;
+    prev_decision_increase_ = false;
+  }
+  config->last_fl_change_increase = prev_decision_increase_;
+  config->frame_length_ms = *frame_length_ms_;
+}
+
+FrameLengthController::Config::FrameLengthChange::FrameLengthChange(
+    int from_frame_length_ms,
+    int to_frame_length_ms)
+    : from_frame_length_ms(from_frame_length_ms),
+      to_frame_length_ms(to_frame_length_ms) {}
+
+bool FrameLengthController::Config::FrameLengthChange::operator<(
+    const FrameLengthChange& rhs) const {
+  return from_frame_length_ms < rhs.from_frame_length_ms ||
+         (from_frame_length_ms == rhs.from_frame_length_ms &&
+          to_frame_length_ms < rhs.to_frame_length_ms);
+}
+
+bool FrameLengthController::FrameLengthIncreasingDecision(
+    const AudioEncoderRuntimeConfig& config) const {
+  // Increase frame length if
+  // 1. |uplink_bandwidth_bps| is known to be smaller or equal than
+  //    |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the
+  //    current overhead rate OR all the following:
+  // 2. longer frame length is available AND
+  // 3. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
+  // 4. |uplink_packet_loss_fraction| is known to be smaller than a threshold.
+
+  auto longer_frame_length_ms = std::next(frame_length_ms_);
+  if (longer_frame_length_ms == config_.encoder_frame_lengths_ms.end())
+    return false;
+
+  auto increase_threshold = config_.fl_changing_bandwidths_bps.find(
+      Config::FrameLengthChange(*frame_length_ms_, *longer_frame_length_ms));
+
+  if (increase_threshold == config_.fl_changing_bandwidths_bps.end())
+    return false;
+
+  // Check that
+  // -(*overhead_bytes_per_packet_) <= offset <= (*overhead_bytes_per_packet_)
+  RTC_DCHECK(
+      !overhead_bytes_per_packet_ ||
+      (overhead_bytes_per_packet_ &&
+       static_cast<size_t>(std::max(0, -config_.fl_increase_overhead_offset)) <=
+           *overhead_bytes_per_packet_ &&
+       static_cast<size_t>(std::max(0, config_.fl_increase_overhead_offset)) <=
+           *overhead_bytes_per_packet_));
+
+  if (uplink_bandwidth_bps_ && overhead_bytes_per_packet_ &&
+      *uplink_bandwidth_bps_ <=
+          config_.min_encoder_bitrate_bps + kPreventOveruseMarginBps +
+              OverheadRateBps(*overhead_bytes_per_packet_ +
+                                  config_.fl_increase_overhead_offset,
+                              *frame_length_ms_)) {
+    return true;
+  }
+
+  return (uplink_bandwidth_bps_ &&
+          *uplink_bandwidth_bps_ <= increase_threshold->second) &&
+         (uplink_packet_loss_fraction_ &&
+          *uplink_packet_loss_fraction_ <=
+              config_.fl_increasing_packet_loss_fraction);
+}
+
+bool FrameLengthController::FrameLengthDecreasingDecision(
+    const AudioEncoderRuntimeConfig& config) const {
+  // Decrease frame length if
+  // 1. shorter frame length is available AND
+  // 2. |uplink_bandwidth_bps| is known to be bigger than
+  // |min_encoder_bitrate_bps| plus |prevent_overuse_margin_bps| plus the
+  // overhead which would be produced with the shorter frame length AND
+  // one or more of the followings:
+  // 3. |uplink_bandwidth_bps| is known to be larger than a threshold,
+  // 4. |uplink_packet_loss_fraction| is known to be larger than a threshold,
+  if (frame_length_ms_ == config_.encoder_frame_lengths_ms.begin())
+    return false;
+
+  auto shorter_frame_length_ms = std::prev(frame_length_ms_);
+  auto decrease_threshold = config_.fl_changing_bandwidths_bps.find(
+      Config::FrameLengthChange(*frame_length_ms_, *shorter_frame_length_ms));
+
+  if (decrease_threshold == config_.fl_changing_bandwidths_bps.end())
+    return false;
+
+  if (uplink_bandwidth_bps_ && overhead_bytes_per_packet_ &&
+      *uplink_bandwidth_bps_ <=
+          config_.min_encoder_bitrate_bps + kPreventOveruseMarginBps +
+              OverheadRateBps(*overhead_bytes_per_packet_ +
+                                  config_.fl_decrease_overhead_offset,
+                              *shorter_frame_length_ms)) {
+    return false;
+  }
+
+  return (uplink_bandwidth_bps_ &&
+          *uplink_bandwidth_bps_ >= decrease_threshold->second) ||
+         (uplink_packet_loss_fraction_ &&
+          *uplink_packet_loss_fraction_ >=
+              config_.fl_decreasing_packet_loss_fraction);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller.h b/modules/audio_coding/audio_network_adaptor/frame_length_controller.h
new file mode 100644
index 0000000..c254b3d
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller.h
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_H_
+
+#include <map>
+#include <vector>
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Determines target frame length based on the network metrics and the decision
+// of FEC controller.
+class FrameLengthController final : public Controller {
+ public:
+  struct Config {
+    struct FrameLengthChange {
+      FrameLengthChange(int from_frame_length_ms, int to_frame_length_ms);
+      bool operator<(const FrameLengthChange& rhs) const;
+      int from_frame_length_ms;
+      int to_frame_length_ms;
+    };
+    Config(const std::vector<int>& encoder_frame_lengths_ms,
+           int initial_frame_length_ms,
+           int min_encoder_bitrate_bps,
+           float fl_increasing_packet_loss_fraction,
+           float fl_decreasing_packet_loss_fraction,
+           int fl_increase_overhead_offset,
+           int fl_decrease_overhead_offset,
+           std::map<FrameLengthChange, int> fl_changing_bandwidths_bps);
+    Config(const Config& other);
+    ~Config();
+    std::vector<int> encoder_frame_lengths_ms;
+    int initial_frame_length_ms;
+    int min_encoder_bitrate_bps;
+    // Uplink packet loss fraction below which frame length can increase.
+    float fl_increasing_packet_loss_fraction;
+    // Uplink packet loss fraction below which frame length should decrease.
+    float fl_decreasing_packet_loss_fraction;
+    // Offset to apply to overhead calculation when increasing frame length.
+    int fl_increase_overhead_offset;
+    // Offset to apply to overhead calculation when decreasing frame length.
+    int fl_decrease_overhead_offset;
+    std::map<FrameLengthChange, int> fl_changing_bandwidths_bps;
+  };
+
+  explicit FrameLengthController(const Config& config);
+
+  ~FrameLengthController() override;
+
+  void UpdateNetworkMetrics(const NetworkMetrics& network_metrics) override;
+
+  void MakeDecision(AudioEncoderRuntimeConfig* config) override;
+
+ private:
+  bool FrameLengthIncreasingDecision(
+      const AudioEncoderRuntimeConfig& config) const;
+
+  bool FrameLengthDecreasingDecision(
+      const AudioEncoderRuntimeConfig& config) const;
+
+  const Config config_;
+
+  std::vector<int>::const_iterator frame_length_ms_;
+
+  rtc::Optional<int> uplink_bandwidth_bps_;
+
+  rtc::Optional<float> uplink_packet_loss_fraction_;
+
+  rtc::Optional<size_t> overhead_bytes_per_packet_;
+
+  // True if the previous frame length decision was an increase, otherwise
+  // false.
+  bool prev_decision_increase_ = false;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FrameLengthController);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_FRAME_LENGTH_CONTROLLER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc b/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
new file mode 100644
index 0000000..1f98447
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/frame_length_controller_unittest.cc
@@ -0,0 +1,381 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <utility>
+
+#include "modules/audio_coding/audio_network_adaptor/frame_length_controller.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kFlIncreasingPacketLossFraction = 0.04f;
+constexpr float kFlDecreasingPacketLossFraction = 0.05f;
+constexpr int kFlIncreaseOverheadOffset = 0;
+constexpr int kFlDecreaseOverheadOffset = 0;
+constexpr int kMinEncoderBitrateBps = 6000;
+constexpr int kPreventOveruseMarginBps = 5000;
+constexpr size_t kOverheadBytesPerPacket = 20;
+constexpr int kFl20msTo60msBandwidthBps = 40000;
+constexpr int kFl60msTo20msBandwidthBps = 50000;
+constexpr int kFl60msTo120msBandwidthBps = 30000;
+constexpr int kFl120msTo60msBandwidthBps = 40000;
+constexpr int kMediumBandwidthBps =
+    (kFl60msTo20msBandwidthBps + kFl20msTo60msBandwidthBps) / 2;
+constexpr float kMediumPacketLossFraction =
+    (kFlDecreasingPacketLossFraction + kFlIncreasingPacketLossFraction) / 2;
+
+int VeryLowBitrate(int frame_length_ms) {
+  return kMinEncoderBitrateBps + kPreventOveruseMarginBps +
+         (kOverheadBytesPerPacket * 8 * 1000 / frame_length_ms);
+}
+
+std::unique_ptr<FrameLengthController> CreateController(
+    const std::map<FrameLengthController::Config::FrameLengthChange, int>&
+        frame_length_change_criteria,
+    const std::vector<int>& encoder_frame_lengths_ms,
+    int initial_frame_length_ms) {
+  std::unique_ptr<FrameLengthController> controller(
+      new FrameLengthController(FrameLengthController::Config(
+          encoder_frame_lengths_ms, initial_frame_length_ms,
+          kMinEncoderBitrateBps, kFlIncreasingPacketLossFraction,
+          kFlDecreasingPacketLossFraction, kFlIncreaseOverheadOffset,
+          kFlDecreaseOverheadOffset, frame_length_change_criteria)));
+
+  return controller;
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor20msAnd60ms() {
+  return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+      {FrameLengthController::Config::FrameLengthChange(20, 60),
+       kFl20msTo60msBandwidthBps},
+      {FrameLengthController::Config::FrameLengthChange(60, 20),
+       kFl60msTo20msBandwidthBps}};
+}
+
+std::map<FrameLengthController::Config::FrameLengthChange, int>
+CreateChangeCriteriaFor20ms60msAnd120ms() {
+  return std::map<FrameLengthController::Config::FrameLengthChange, int>{
+      {FrameLengthController::Config::FrameLengthChange(20, 60),
+       kFl20msTo60msBandwidthBps},
+      {FrameLengthController::Config::FrameLengthChange(60, 20),
+       kFl60msTo20msBandwidthBps},
+      {FrameLengthController::Config::FrameLengthChange(60, 120),
+       kFl60msTo120msBandwidthBps},
+      {FrameLengthController::Config::FrameLengthChange(120, 60),
+       kFl120msTo60msBandwidthBps}};
+}
+
+void UpdateNetworkMetrics(
+    FrameLengthController* controller,
+    const rtc::Optional<int>& uplink_bandwidth_bps,
+    const rtc::Optional<float>& uplink_packet_loss_fraction,
+    const rtc::Optional<size_t>& overhead_bytes_per_packet) {
+  // UpdateNetworkMetrics can accept multiple network metric updates at once.
+  // However, currently, the most used case is to update one metric at a time.
+  // To reflect this fact, we separate the calls.
+  if (uplink_bandwidth_bps) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_bandwidth_bps = uplink_bandwidth_bps;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  if (uplink_packet_loss_fraction) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.uplink_packet_loss_fraction = uplink_packet_loss_fraction;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+  if (overhead_bytes_per_packet) {
+    Controller::NetworkMetrics network_metrics;
+    network_metrics.overhead_bytes_per_packet = overhead_bytes_per_packet;
+    controller->UpdateNetworkMetrics(network_metrics);
+  }
+}
+
+void CheckDecision(FrameLengthController* controller,
+                   int expected_frame_length_ms) {
+  AudioEncoderRuntimeConfig config;
+  controller->MakeDecision(&config);
+  EXPECT_EQ(expected_frame_length_ms, config.frame_length_ms);
+}
+
+}  // namespace
+
+TEST(FrameLengthControllerTest, DecreaseTo20MsOnHighUplinkBandwidth) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 60);
+  UpdateNetworkMetrics(controller.get(), kFl60msTo20msBandwidthBps,
+                       rtc::nullopt, kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, DecreaseTo20MsOnHighUplinkPacketLossFraction) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 60);
+  UpdateNetworkMetrics(controller.get(), rtc::nullopt,
+                       kFlDecreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest,
+     Maintain60MsIf20MsNotInReceiverFrameLengthRange) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {60}, 60);
+  // Set FEC on that would cause frame length to decrease if receiver frame
+  // length range included 20ms.
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, Maintain60MsOnMultipleConditions) {
+  // Maintain 60ms frame length if
+  // 1. |uplink_bandwidth_bps| is at medium level,
+  // 2. |uplink_packet_loss_fraction| is at medium,
+  // 3. FEC is not decided ON.
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 60);
+  UpdateNetworkMetrics(controller.get(), kMediumBandwidthBps,
+                       kMediumPacketLossFraction, kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, IncreaseTo60MsOnMultipleConditions) {
+  // Increase to 60ms frame length if
+  // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
+  // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold
+  //    AND
+  // 3. FEC is not decided or OFF.
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 20);
+  UpdateNetworkMetrics(controller.get(), kFl20msTo60msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, IncreaseTo60MsOnVeryLowUplinkBandwidth) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 20);
+  // We set packet loss fraction to kFlDecreasingPacketLossFraction, which
+  // should have prevented frame length to increase, if the uplink bandwidth
+  // was not this low.
+  UpdateNetworkMetrics(controller.get(), VeryLowBitrate(20),
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, Maintain60MsOnVeryLowUplinkBandwidth) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 60);
+  // We set packet loss fraction to FlDecreasingPacketLossFraction, which should
+  // have caused the frame length to decrease, if the uplink bandwidth was not
+  // this low.
+  UpdateNetworkMetrics(controller.get(), VeryLowBitrate(20),
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, UpdateMultipleNetworkMetricsAtOnce) {
+  // This test is similar to IncreaseTo60MsOnMultipleConditions. But instead of
+  // using ::UpdateNetworkMetrics(...), which calls
+  // FrameLengthController::UpdateNetworkMetrics(...) multiple times, we
+  // we call it only once. This is to verify that
+  // FrameLengthController::UpdateNetworkMetrics(...) can handle multiple
+  // network updates at once. This is, however, not a common use case in current
+  // audio_network_adaptor_impl.cc.
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 20);
+  Controller::NetworkMetrics network_metrics;
+  network_metrics.uplink_bandwidth_bps = kFl20msTo60msBandwidthBps;
+  network_metrics.uplink_packet_loss_fraction = kFlIncreasingPacketLossFraction;
+  controller->UpdateNetworkMetrics(network_metrics);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest,
+     Maintain20MsIf60MsNotInReceiverFrameLengthRange) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20}, 20);
+  // Use a low uplink bandwidth and a low uplink packet loss fraction that would
+  // cause frame length to increase if receiver frame length included 60ms.
+  UpdateNetworkMetrics(controller.get(),
+                       kFl20msTo60msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain20MsOnMediumUplinkBandwidth) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 20);
+  UpdateNetworkMetrics(controller.get(),
+                       kMediumBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain20MsOnMediumUplinkPacketLossFraction) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60}, 20);
+  // Use a low uplink bandwidth that would cause frame length to increase if
+  // uplink packet loss fraction was low.
+  UpdateNetworkMetrics(controller.get(),
+                       kFl20msTo60msBandwidthBps,
+                       kMediumPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain60MsWhenNo120msCriteriaIsSet) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20msAnd60ms(), {20, 60, 120}, 60);
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, From120MsTo20MsOnHighUplinkBandwidth) {
+  auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+                                     {20, 60, 120}, 120);
+  // It takes two steps for frame length to go from 120ms to 20ms.
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo20msBandwidthBps,
+                       rtc::nullopt,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo20msBandwidthBps,
+                       rtc::nullopt,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, From120MsTo20MsOnHighUplinkPacketLossFraction) {
+  auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+                                     {20, 60, 120}, 120);
+  // It takes two steps for frame length to go from 120ms to 20ms.
+  UpdateNetworkMetrics(controller.get(), rtc::nullopt,
+                       kFlDecreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+
+  UpdateNetworkMetrics(controller.get(), rtc::nullopt,
+                       kFlDecreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+TEST(FrameLengthControllerTest, Maintain120MsOnVeryLowUplinkBandwidth) {
+  auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+                                     {20, 60, 120}, 120);
+  // We set packet loss fraction to FlDecreasingPacketLossFraction, which should
+  // have caused the frame length to decrease, if the uplink bandwidth was not
+  // this low.
+  UpdateNetworkMetrics(controller.get(), VeryLowBitrate(60),
+                       kFlDecreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 120);
+}
+
+TEST(FrameLengthControllerTest, From60MsTo120MsOnVeryLowUplinkBandwidth) {
+  auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+                                     {20, 60, 120}, 60);
+  // We set packet loss fraction to FlDecreasingPacketLossFraction, which should
+  // have prevented frame length to increase, if the uplink bandwidth was not
+  // this low.
+  UpdateNetworkMetrics(controller.get(), VeryLowBitrate(60),
+                       kFlDecreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 120);
+}
+
+TEST(FrameLengthControllerTest, From20MsTo120MsOnMultipleConditions) {
+  // Increase to 120ms frame length if
+  // 1. |uplink_bandwidth_bps| is known to be smaller than a threshold AND
+  // 2. |uplink_packet_loss_fraction| is known to be smaller than a threshold.
+  auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+                                     {20, 60, 120}, 20);
+  // It takes two steps for frame length to go from 20ms to 120ms.
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 120);
+}
+
+TEST(FrameLengthControllerTest, Stall60MsIf120MsNotInReceiverFrameLengthRange) {
+  auto controller =
+      CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(), {20, 60}, 20);
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+}
+
+TEST(FrameLengthControllerTest, CheckBehaviorOnChangingNetworkMetrics) {
+  auto controller = CreateController(CreateChangeCriteriaFor20ms60msAnd120ms(),
+                                     {20, 60, 120}, 20);
+  UpdateNetworkMetrics(controller.get(),
+                       kMediumBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+
+  UpdateNetworkMetrics(controller.get(),
+                       kFl20msTo60msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kMediumPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+
+  UpdateNetworkMetrics(controller.get(),
+                       kFl60msTo120msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 120);
+
+  UpdateNetworkMetrics(controller.get(),
+                       kFl120msTo60msBandwidthBps,
+                       kFlIncreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 60);
+
+  UpdateNetworkMetrics(controller.get(),
+                       kMediumBandwidthBps,
+                       kFlDecreasingPacketLossFraction,
+                       kOverheadBytesPerPacket);
+  CheckDecision(controller.get(), 20);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h
new file mode 100644
index 0000000..7687446
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_H_
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/optional.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+
+namespace webrtc {
+
+// An AudioNetworkAdaptor optimizes the audio experience by suggesting a
+// suitable runtime configuration (bit rate, frame length, FEC, etc.) to the
+// encoder based on network metrics.
+class AudioNetworkAdaptor {
+ public:
+  virtual ~AudioNetworkAdaptor() = default;
+
+  virtual void SetUplinkBandwidth(int uplink_bandwidth_bps) = 0;
+
+  virtual void SetUplinkPacketLossFraction(
+      float uplink_packet_loss_fraction) = 0;
+
+  virtual void SetUplinkRecoverablePacketLossFraction(
+      float uplink_recoverable_packet_loss_fraction) = 0;
+
+  virtual void SetRtt(int rtt_ms) = 0;
+
+  virtual void SetTargetAudioBitrate(int target_audio_bitrate_bps) = 0;
+
+  virtual void SetOverhead(size_t overhead_bytes_per_packet) = 0;
+
+  virtual AudioEncoderRuntimeConfig GetEncoderRuntimeConfig() = 0;
+
+  virtual void StartDebugDump(FILE* file_handle) = 0;
+
+  virtual void StopDebugDump() = 0;
+
+  virtual ANAStats GetStats() const = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_H_
diff --git a/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
new file mode 100644
index 0000000..874fc97
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_CONFIG_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_CONFIG_H_
+
+#include "api/optional.h"
+
+namespace webrtc {
+
+struct AudioEncoderRuntimeConfig {
+  AudioEncoderRuntimeConfig();
+  AudioEncoderRuntimeConfig(const AudioEncoderRuntimeConfig& other);
+  ~AudioEncoderRuntimeConfig();
+  AudioEncoderRuntimeConfig& operator=(const AudioEncoderRuntimeConfig& other);
+  bool operator==(const AudioEncoderRuntimeConfig& other) const;
+  rtc::Optional<int> bitrate_bps;
+  rtc::Optional<int> frame_length_ms;
+  // Note: This is what we tell the encoder. It doesn't have to reflect
+  // the actual NetworkMetrics; it's subject to our decision.
+  rtc::Optional<float> uplink_packet_loss_fraction;
+  rtc::Optional<bool> enable_fec;
+  rtc::Optional<bool> enable_dtx;
+
+  // Some encoders can encode fewer channels than the actual input to make
+  // better use of the bandwidth. |num_channels| sets the number of channels
+  // to encode.
+  rtc::Optional<size_t> num_channels;
+
+  // This is true if the last frame length change was an increase, and otherwise
+  // false.
+  // The value of this boolean is used to apply a different offset to the
+  // per-packet overhead that is reported by the BWE. The exact offset value
+  // is most important right after a frame length change, because the frame
+  // length change affects the overhead. In the steady state, the exact value is
+  // not important because the BWE will compensate.
+  bool last_fl_change_increase = false;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_INCLUDE_AUDIO_NETWORK_ADAPTOR_CONFIG_H_
diff --git a/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h b/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h
new file mode 100644
index 0000000..15dc741
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_AUDIO_NETWORK_ADAPTOR_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_AUDIO_NETWORK_ADAPTOR_H_
+
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockAudioNetworkAdaptor : public AudioNetworkAdaptor {
+ public:
+  virtual ~MockAudioNetworkAdaptor() { Die(); }
+  MOCK_METHOD0(Die, void());
+
+  MOCK_METHOD1(SetUplinkBandwidth, void(int uplink_bandwidth_bps));
+
+  MOCK_METHOD1(SetUplinkPacketLossFraction,
+               void(float uplink_packet_loss_fraction));
+
+  MOCK_METHOD1(SetUplinkRecoverablePacketLossFraction,
+               void(float uplink_recoverable_packet_loss_fraction));
+
+  MOCK_METHOD1(SetRtt, void(int rtt_ms));
+
+  MOCK_METHOD1(SetTargetAudioBitrate, void(int target_audio_bitrate_bps));
+
+  MOCK_METHOD1(SetOverhead, void(size_t overhead_bytes_per_packet));
+
+  MOCK_METHOD0(GetEncoderRuntimeConfig, AudioEncoderRuntimeConfig());
+
+  MOCK_METHOD1(StartDebugDump, void(FILE* file_handle));
+
+  MOCK_METHOD0(StopDebugDump, void());
+
+  MOCK_CONST_METHOD0(GetStats, ANAStats());
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_AUDIO_NETWORK_ADAPTOR_H_
diff --git a/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h b/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h
new file mode 100644
index 0000000..df28e9e
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/mock/mock_controller.h
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/controller.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockController : public Controller {
+ public:
+  virtual ~MockController() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD1(UpdateNetworkMetrics,
+               void(const NetworkMetrics& network_metrics));
+  MOCK_METHOD1(MakeDecision, void(AudioEncoderRuntimeConfig* config));
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h b/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h
new file mode 100644
index 0000000..8d410a7
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/mock/mock_controller_manager.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_MANAGER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_MANAGER_H_
+
+#include <vector>
+
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockControllerManager : public ControllerManager {
+ public:
+  virtual ~MockControllerManager() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD1(
+      GetSortedControllers,
+      std::vector<Controller*>(const Controller::NetworkMetrics& metrics));
+  MOCK_CONST_METHOD0(GetControllers, std::vector<Controller*>());
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_CONTROLLER_MANAGER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h b/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h
new file mode 100644
index 0000000..06650ab
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/mock/mock_debug_dump_writer.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_DEBUG_DUMP_WRITER_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_DEBUG_DUMP_WRITER_H_
+
+#include "modules/audio_coding/audio_network_adaptor/debug_dump_writer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDebugDumpWriter : public DebugDumpWriter {
+ public:
+  virtual ~MockDebugDumpWriter() { Die(); }
+  MOCK_METHOD0(Die, void());
+
+  MOCK_METHOD2(DumpEncoderRuntimeConfig,
+               void(const AudioEncoderRuntimeConfig& config,
+                    int64_t timestamp));
+  MOCK_METHOD2(DumpNetworkMetrics,
+               void(const Controller::NetworkMetrics& metrics,
+                    int64_t timestamp));
+#if WEBRTC_ENABLE_PROTOBUF
+  MOCK_METHOD2(DumpControllerManagerConfig,
+               void(const audio_network_adaptor::config::ControllerManager&
+                        controller_manager_config,
+                    int64_t timestamp));
+#endif
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_MOCK_MOCK_DEBUG_DUMP_WRITER_H_
diff --git a/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py b/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py
new file mode 100755
index 0000000..a52b065
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python2
+#  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+#  Use of this source code is governed by a BSD-style license
+#  that can be found in the LICENSE file in the root of the source
+#  tree. An additional intellectual property rights grant can be found
+#  in the file PATENTS.  All contributing project authors may
+#  be found in the AUTHORS file in the root of the source tree.
+
+#  To run this script please copy "out/<build_name>//pyproto/webrtc/modules/
+#  audio_coding/audio_network_adaptor/debug_dump_pb2.py" to this folder.
+#  The you can run this script with:
+#  "python parse_ana_dump.py -m uplink_bandwidth_bps -f dump_file.dat"
+#  You can add as may metrics or decisions to the plot as you like.
+#  form more information call:
+#  "python parse_ana_dump.py --help"
+
+import struct
+from optparse import OptionParser
+
+import matplotlib.pyplot as plt
+
+import debug_dump_pb2
+
+
+def GetNextMessageSize(file_to_parse):
+  data = file_to_parse.read(4)
+  if data == '':
+    return 0
+  return struct.unpack('<I', data)[0]
+
+
+def GetNextMessageFromFile(file_to_parse):
+  message_size = GetNextMessageSize(file_to_parse)
+  if message_size == 0:
+    return None
+  try:
+    event = debug_dump_pb2.Event()
+    event.ParseFromString(file_to_parse.read(message_size))
+  except IOError:
+    print 'Invalid message in file'
+    return None
+  return event
+
+
+def InitMetrics():
+  metrics = {}
+  event = debug_dump_pb2.Event()
+  for metric in event.network_metrics.DESCRIPTOR.fields:
+    metrics[metric.name] = {'time': [], 'value': []}
+  return metrics
+
+
+def InitDecisions():
+  decisions = {}
+  event = debug_dump_pb2.Event()
+  for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
+    decisions[decision.name] = {'time': [], 'value': []}
+  return decisions
+
+
+def ParseAnaDump(dump_file_to_parse):
+  with open(dump_file_to_parse, 'rb') as file_to_parse:
+    metrics = InitMetrics()
+    decisions = InitDecisions()
+    first_time_stamp = None
+    while True:
+      event = GetNextMessageFromFile(file_to_parse)
+      if event == None:
+        break
+      if first_time_stamp == None:
+        first_time_stamp = event.timestamp
+      if event.type == debug_dump_pb2.Event.ENCODER_RUNTIME_CONFIG:
+        for decision in event.encoder_runtime_config.DESCRIPTOR.fields:
+          if event.encoder_runtime_config.HasField(decision.name):
+            decisions[decision.name]['time'].append(event.timestamp -
+                                                    first_time_stamp)
+            decisions[decision.name]['value'].append(
+                getattr(event.encoder_runtime_config, decision.name))
+      if event.type == debug_dump_pb2.Event.NETWORK_METRICS:
+        for metric in event.network_metrics.DESCRIPTOR.fields:
+          if event.network_metrics.HasField(metric.name):
+            metrics[metric.name]['time'].append(event.timestamp -
+                                                first_time_stamp)
+            metrics[metric.name]['value'].append(
+                getattr(event.network_metrics, metric.name))
+  return (metrics, decisions)
+
+
+def main():
+  parser = OptionParser()
+  parser.add_option(
+      "-f", "--dump_file", dest="dump_file_to_parse", help="dump file to parse")
+  parser.add_option(
+      '-m',
+      '--metric_plot',
+      default=[],
+      type=str,
+      help='metric key (name of the metric) to plot',
+      dest='metric_keys',
+      action='append')
+
+  parser.add_option(
+      '-d',
+      '--decision_plot',
+      default=[],
+      type=str,
+      help='decision key (name of the decision) to plot',
+      dest='decision_keys',
+      action='append')
+
+  options = parser.parse_args()[0]
+  if options.dump_file_to_parse == None:
+    print "No dump file to parse is set.\n"
+    parser.print_help()
+    exit()
+  (metrics, decisions) = ParseAnaDump(options.dump_file_to_parse)
+  metric_keys = options.metric_keys
+  decision_keys = options.decision_keys
+  plot_count = len(metric_keys) + len(decision_keys)
+  if plot_count == 0:
+    print "You have to set at least one metric or decision to plot.\n"
+    parser.print_help()
+    exit()
+  plots = []
+  if plot_count == 1:
+    f, mp_plot = plt.subplots()
+    plots.append(mp_plot)
+  else:
+    f, mp_plots = plt.subplots(plot_count, sharex=True)
+    plots.extend(mp_plots.tolist())
+
+  for key in metric_keys:
+    plot = plots.pop()
+    plot.grid(True)
+    plot.set_title(key + " (metric)")
+    plot.plot(metrics[key]['time'], metrics[key]['value'])
+  for key in decision_keys:
+    plot = plots.pop()
+    plot.grid(True)
+    plot.set_title(key + " (decision)")
+    plot.plot(decisions[key]['time'], decisions[key]['value'])
+  f.subplots_adjust(hspace=0.3)
+  plt.show()
+
+if __name__ == "__main__":
+  main()
diff --git a/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h b/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h
new file mode 100644
index 0000000..0375386
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/util/threshold_curve.h
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_UTIL_THRESHOLD_CURVE_H_
+#define MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_UTIL_THRESHOLD_CURVE_H_
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class ThresholdCurve {
+ public:
+  struct Point {
+    constexpr Point(float x, float y) : x(x), y(y) {}
+    float x;
+    float y;
+  };
+
+  // ThresholdCurve defines a curve. The curve is characterized by the two
+  // conjunction points: A and B. The curve segments the metric space into
+  // three domains - above the curve, on it and below it.
+  //
+  // y-axis ^  |
+  //        | A|
+  //        |   \    A: (a.x, a.y)
+  //        |    \   B: (b.x, b.y)
+  //        |    B\________
+  //        |---------------> bandwidth
+  //
+  // If either a.x == b.x or a.y == b.y, the curve can be defined
+  // by a single point. (We merge the two points into one - either the lower or
+  // the leftmost one - for easier treatment.)
+  //
+  // y-axis ^  |
+  //        |  |
+  //        |  |
+  //        |  |
+  //        | P|__________
+  //        |---------------> bandwidth
+  ThresholdCurve(const Point& left, const Point& right)
+      : a(GetPoint(left, right, true)),
+        b(GetPoint(left, right, false)),
+        slope(b.x - a.x == 0.0f ? 0.0f : (b.y - a.y) / (b.x - a.x)),
+        offset(a.y - slope * a.x) {
+    // TODO(eladalon): We might want to introduce some numerical validations.
+  }
+
+  ThresholdCurve(float a_x, float a_y, float b_x, float b_y)
+      : ThresholdCurve(Point{a_x, a_y}, Point{b_x, b_y}) {}
+
+  // Checks if a point is strictly below the curve.
+  bool IsBelowCurve(const Point& p) const {
+    if (p.x < a.x) {
+      return true;
+    } else if (p.x == a.x) {
+      // In principle, we could merge this into the next else, but to avoid
+      // numerical errors, we treat it separately.
+      return p.y < a.y;
+    } else if (a.x < p.x && p.x < b.x) {
+      return p.y < offset + slope * p.x;
+    } else {  // if (b.x <= p.x)
+      return p.y < b.y;
+    }
+  }
+
+  // Checks if a point is strictly above the curve.
+  bool IsAboveCurve(const Point& p) const {
+    if (p.x <= a.x) {
+      return false;
+    } else if (a.x < p.x && p.x < b.x) {
+      return p.y > offset + slope * p.x;
+    } else {  // if (b.x <= p.x)
+      return p.y > b.y;
+    }
+  }
+
+  bool operator<=(const ThresholdCurve& rhs) const {
+    // This curve is <= the rhs curve if no point from this curve is
+    // above a corresponding point from the rhs curve.
+    return !IsBelowCurve(rhs.a) && !IsBelowCurve(rhs.b) &&
+           !rhs.IsAboveCurve(a) && !rhs.IsAboveCurve(b);
+  }
+
+ private:
+  static const Point& GetPoint(const Point& left,
+                               const Point& right,
+                               bool is_for_left) {
+    RTC_DCHECK_LE(left.x, right.x);
+    RTC_DCHECK_GE(left.y, right.y);
+
+    // Same X-value or Y-value triggers merging both points to the
+    // lower and/or left of the two points, respectively.
+    if (left.x == right.x) {
+      return right;
+    } else if (left.y == right.y) {
+      return left;
+    }
+
+    // If unmerged, boolean flag determines which of the points is desired.
+    return is_for_left ? left : right;
+  }
+
+  const Point a;
+  const Point b;
+  const float slope;
+  const float offset;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_AUDIO_NETWORK_ADAPTOR_UTIL_THRESHOLD_CURVE_H_
diff --git a/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc b/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc
new file mode 100644
index 0000000..0375e76
--- /dev/null
+++ b/modules/audio_coding/audio_network_adaptor/util/threshold_curve_unittest.cc
@@ -0,0 +1,631 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/audio_network_adaptor/util/threshold_curve.h"
+#include "test/gtest.h"
+
+// A threshold curve divides 2D space into three domains - below, on and above
+// the threshold curve.
+// The curve is defined by two points. Those points, P1 and P2, are ordered so
+// that (P1.x <= P2.x && P1.y >= P2.y).
+// The part of the curve which is between the two points is hereon referred
+// to as the "segment".
+// A "ray" extends from P1 directly upwards into infinity; that's the "vertical
+// ray". Likewise, a "horizontal ray" extends from P2 directly rightwards.
+//
+//  ^   |                         //
+//  |   | vertical ray            //
+//  |   |                         //
+//  |   |                         //
+//  | P1|                         //
+//  |    \                        //
+//  |     \ segment               //
+//  |      \                      //
+//  |       \    horizontal ray   //
+//  |     P2 ------------------   //
+//  *---------------------------> //
+
+namespace webrtc {
+
+namespace {
+enum RelativePosition { kBelow, kOn, kAbove };
+
+void CheckRelativePosition(const ThresholdCurve& curve,
+                           ThresholdCurve::Point point,
+                           RelativePosition pos) {
+  RTC_CHECK(pos == kBelow || pos == kOn || pos == kAbove);
+
+  EXPECT_EQ(pos == kBelow, curve.IsBelowCurve(point));
+  EXPECT_EQ(pos == kAbove, curve.IsAboveCurve(point));
+}
+}  // namespace
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is a "normal" one - P1 and P2 are different in both their
+// X and Y values.
+TEST(ThresholdCurveTest, PointPositionToCommonCurve) {
+  // The points (P1-P2) define the curve.           //
+  // All other points are above/below/on the curve. //
+  //                                                //
+  //  ^                                             //
+  //  |     |                                       //
+  //  |  A  F    J  R   V                           //
+  //  |     |                                       //
+  //  |  B  P1   K  S   W                           //
+  //  |      \                                      //
+  //  |       \                                     //
+  //  |        \ L                                  //
+  //  |         \                                   //
+  //  |  C  G    M  T   X                           //
+  //  |           \                                 //
+  //  |          N \                                //
+  //  |             \                               //
+  //  |  D  H    O  P2--Y----------------           //
+  //  |  E  I    Q  U   Z                           //
+  //  *---------------------------------->          //
+  constexpr ThresholdCurve::Point p1{1000, 2000};
+  constexpr ThresholdCurve::Point p2{2000, 1000};
+
+  RTC_CHECK_GT((p1.x + p2.x) / 2, p1.x);
+  RTC_CHECK_LT((p1.x + p2.x) / 2, p2.x);
+  RTC_CHECK_LT((p1.y + p2.y) / 2, p1.y);
+  RTC_CHECK_GT((p1.y + p2.y) / 2, p2.y);
+
+  const ThresholdCurve curve(p1, p2);
+
+  {
+    // All cases where the point lies to the left of P1.
+    constexpr float x = p1.x - 1;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kBelow);           // A
+    CheckRelativePosition(curve, {x, p1.y + 0}, kBelow);           // B
+    CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kBelow);  // C
+    CheckRelativePosition(curve, {x, p2.y + 0}, kBelow);           // D
+    CheckRelativePosition(curve, {x, p2.y - 1}, kBelow);           // E
+  }
+
+  {
+    // All cases where the point has the same x-value as P1.
+    constexpr float x = p1.x;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kOn);              // F
+    CheckRelativePosition(curve, {x, p1.y + 0}, kOn);              // P1
+    CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kBelow);  // G
+    CheckRelativePosition(curve, {x, p2.y + 0}, kBelow);           // H
+    CheckRelativePosition(curve, {x, p2.y - 1}, kBelow);           // I
+  }
+
+  {
+    // To make sure we're really covering all of the cases, make sure that P1
+    // and P2 were chosen so that L would really be below K, and O would really
+    // be below N. (This would not hold if the Y values are too close together.)
+    RTC_CHECK_LT(((p1.y + p2.y) / 2) + 1, p1.y);
+    RTC_CHECK_LT(p2.y, ((p1.y + p2.y) / 2) - 1);
+
+    // All cases where the point's x-value is between P1 and P2.
+    constexpr float x = (p1.x + p2.x) / 2;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kAbove);                 // J
+    CheckRelativePosition(curve, {x, p1.y + 0}, kAbove);                 // K
+    CheckRelativePosition(curve, {x, ((p1.y + p2.y) / 2) + 1}, kAbove);  // L
+    CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kOn);           // M
+    CheckRelativePosition(curve, {x, ((p1.y + p2.y) / 2) - 1}, kBelow);  // N
+    CheckRelativePosition(curve, {x, p2.y + 0}, kBelow);                 // O
+    CheckRelativePosition(curve, {x, p2.y - 1}, kBelow);                 // Q
+  }
+
+  {
+    // All cases where the point has the same x-value as P2.
+    constexpr float x = p2.x;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kAbove);           // R
+    CheckRelativePosition(curve, {x, p1.y + 0}, kAbove);           // S
+    CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kAbove);  // T
+    CheckRelativePosition(curve, {x, p2.y + 0}, kOn);              // P2
+    CheckRelativePosition(curve, {x, p2.y - 1}, kBelow);           // U
+  }
+
+  {
+    // All cases where the point lies to the right of P2.
+    constexpr float x = p2.x + 1;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kAbove);           // V
+    CheckRelativePosition(curve, {x, p1.y + 0}, kAbove);           // W
+    CheckRelativePosition(curve, {x, (p1.y + p2.y) / 2}, kAbove);  // X
+    CheckRelativePosition(curve, {x, p2.y + 0}, kOn);              // Y
+    CheckRelativePosition(curve, {x, p2.y - 1}, kBelow);           // Z
+  }
+}
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is defined by two points with the same Y value.
+TEST(ThresholdCurveTest, PointPositionToCurveWithHorizaontalSegment) {
+  // The points (P1-P2) define the curve.
+  // All other points are above/below/on the curve.
+  //
+  //  ^
+  //  |    |
+  //  |    |
+  //  | A  D   F  I  K
+  //  |    |
+  //  |    |
+  //  | B  P1--G--P2-L--
+  //  | C  E   H  J  M
+  //  *------------------>
+
+  constexpr ThresholdCurve::Point p1{100, 200};
+  constexpr ThresholdCurve::Point p2{p1.x + 1, p1.y};
+
+  RTC_CHECK_GT((p1.x + p2.x) / 2, p1.x);
+  RTC_CHECK_LT((p1.x + p2.x) / 2, p2.x);
+
+  const ThresholdCurve curve(p1, p2);
+
+  {
+    // All cases where the point lies to the left of P1.
+    constexpr float x = p1.x - 1;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kBelow);  // A
+    CheckRelativePosition(curve, {x, p1.y + 0}, kBelow);  // B
+    CheckRelativePosition(curve, {x, p1.y - 1}, kBelow);  // C
+  }
+
+  {
+    // All cases where the point has the same x-value as P1.
+    constexpr float x = p1.x;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kOn);     // D
+    CheckRelativePosition(curve, {x, p1.y + 0}, kOn);     // P1
+    CheckRelativePosition(curve, {x, p1.y - 1}, kBelow);  // E
+  }
+
+  {
+    // All cases where the point's x-value is between P1 and P2.
+    constexpr float x = (p1.x + p2.x) / 2;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kAbove);  // F
+    CheckRelativePosition(curve, {x, p1.y + 0}, kOn);     // G
+    CheckRelativePosition(curve, {x, p1.y - 1}, kBelow);  // H
+  }
+
+  {
+    // All cases where the point has the same x-value as P2.
+    constexpr float x = p2.x;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kAbove);  // I
+    CheckRelativePosition(curve, {x, p1.y + 0}, kOn);     // P2
+    CheckRelativePosition(curve, {x, p1.y - 1}, kBelow);  // J
+  }
+
+  {
+    // All cases where the point lies to the right of P2.
+    constexpr float x = p2.x + 1;
+    CheckRelativePosition(curve, {x, p1.y + 1}, kAbove);  // K
+    CheckRelativePosition(curve, {x, p1.y + 0}, kOn);     // L
+    CheckRelativePosition(curve, {x, p1.y - 1}, kBelow);  // M
+  }
+}
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is defined by two points with the same X value.
+TEST(ThresholdCurveTest, PointPositionToCurveWithVerticalSegment) {
+  // The points (P1-P2) define the curve.
+  // All other points are above/below/on the curve.
+  //
+  //  ^
+  //  |    |
+  //  | A  B   C
+  //  |    |
+  //  | D  P1  E
+  //  |    |
+  //  | F  G   H
+  //  |    |
+  //  | I  P2--J------
+  //  | K  L   M
+  //  *------------------>
+
+  constexpr ThresholdCurve::Point p1{100, 200};
+  constexpr ThresholdCurve::Point p2{p1.x, p1.y - 1};
+
+  constexpr float left = p1.x - 1;
+  constexpr float on = p1.x;
+  constexpr float right = p1.x + 1;
+
+  RTC_CHECK_LT((p1.y + p2.y) / 2, p1.y);
+  RTC_CHECK_GT((p1.y + p2.y) / 2, p2.y);
+
+  const ThresholdCurve curve(p1, p2);
+
+  {
+    // All cases where the point lies above P1.
+    constexpr float y = p1.y + 1;
+    CheckRelativePosition(curve, {left, y}, kBelow);   // A
+    CheckRelativePosition(curve, {on, y}, kOn);        // B
+    CheckRelativePosition(curve, {right, y}, kAbove);  // C
+  }
+
+  {
+    // All cases where the point has the same y-value as P1.
+    constexpr float y = p1.y;
+    CheckRelativePosition(curve, {left, y}, kBelow);   // D
+    CheckRelativePosition(curve, {on, y}, kOn);        // P1
+    CheckRelativePosition(curve, {right, y}, kAbove);  // E
+  }
+
+  {
+    // All cases where the point's y-value is between P1 and P2.
+    constexpr float y = (p1.y + p2.y) / 2;
+    CheckRelativePosition(curve, {left, y}, kBelow);   // F
+    CheckRelativePosition(curve, {on, y}, kOn);        // G
+    CheckRelativePosition(curve, {right, y}, kAbove);  // H
+  }
+
+  {
+    // All cases where the point has the same y-value as P2.
+    constexpr float y = p2.y;
+    CheckRelativePosition(curve, {left, y}, kBelow);  // I
+    CheckRelativePosition(curve, {on, y}, kOn);       // P2
+    CheckRelativePosition(curve, {right, y}, kOn);    // J
+  }
+
+  {
+    // All cases where the point lies below P2.
+    constexpr float y = p2.y - 1;
+    CheckRelativePosition(curve, {left, y}, kBelow);   // K
+    CheckRelativePosition(curve, {on, y}, kBelow);     // L
+    CheckRelativePosition(curve, {right, y}, kBelow);  // M
+  }
+}
+
+// Test that the curve correctly reports the below/above position of points,
+// when the curve is defined by two points which are identical.
+TEST(ThresholdCurveTest, PointPositionCurveWithNullSegment) {
+  // The points (P1-P2) define the curve.
+  // All other points are above/below/on the curve.
+  //
+  //  ^
+  //  |    |
+  //  | A  D   F
+  //  |    |
+  //  | B  P---G------
+  //  | C  E   H
+  //  *------------------>
+
+  constexpr ThresholdCurve::Point p{100, 200};
+
+  const ThresholdCurve curve(p, p);
+
+  {
+    // All cases where the point lies to the left of P.
+    constexpr float x = p.x - 1;
+    CheckRelativePosition(curve, {x, p.y + 1}, kBelow);  // A
+    CheckRelativePosition(curve, {x, p.y + 0}, kBelow);  // B
+    CheckRelativePosition(curve, {x, p.y - 1}, kBelow);  // C
+  }
+
+  {
+    // All cases where the point has the same x-value as P.
+    constexpr float x = p.x + 0;
+    CheckRelativePosition(curve, {x, p.y + 1}, kOn);     // D
+    CheckRelativePosition(curve, {x, p.y + 0}, kOn);     // P
+    CheckRelativePosition(curve, {x, p.y - 1}, kBelow);  // E
+  }
+
+  {
+    // All cases where the point lies to the right of P.
+    constexpr float x = p.x + 1;
+    CheckRelativePosition(curve, {x, p.y + 1}, kAbove);  // F
+    CheckRelativePosition(curve, {x, p.y + 0}, kOn);     // G
+    CheckRelativePosition(curve, {x, p.y - 1}, kBelow);  // H
+  }
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the two curves have the same projection on the X-axis.
+TEST(ThresholdCurveTest, TwoCurvesSegmentHasSameProjectionAxisX) {
+  //  ^                        //
+  //  | C1 + C2                //
+  //  |  |                     //
+  //  |  |\                    //
+  //  |  | \                   //
+  //  |   \ \                  //
+  //  |    \ \                 //
+  //  |     \ \                //
+  //  |      \ -------- C2     //
+  //  |       --------- C1     //
+  //  *--------------------->  //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  // Same x-values, but higher on Y. (Can be parallel, but doesn't have to be.)
+  constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 20};
+  constexpr ThresholdCurve::Point c2_right{c1_right.x, c1_right.y + 10};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_TRUE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the higher curve's projection on the X-axis is a strict subset of the
+// lower curve's projection on the X-axis (on both ends).
+TEST(ThresholdCurveTest, TwoCurvesSegmentOfHigherSubsetProjectionAxisX) {
+  //  ^                       //
+  //  | C1    C2              //
+  //  |  |    |               //
+  //  |  |    |               //
+  //  |   \   |               //
+  //  |    \  |               //
+  //  |     \ \               //
+  //  |      \ \              //
+  //  |       \ --------- C2  //
+  //  |        \              //
+  //  |         \             //
+  //  |          ---------C1  //
+  //  *---------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  constexpr ThresholdCurve::Point c2_left{6, 11};
+  constexpr ThresholdCurve::Point c2_right{9, 7};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_TRUE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the higher curve's right point is above lower curve's horizontal ray (meaning
+// the higher curve's projection on the X-axis extends further right than
+// the lower curve's).
+TEST(ThresholdCurveTest,
+     TwoCurvesRightPointOfHigherCurveAboveHorizontalRayOfLower) {
+  //  ^                        //
+  //  | C1 + C2                //
+  //  |  |                     //
+  //  |  |\                    //
+  //  |  | \                   //
+  //  |  |  \                  //
+  //  |  |   \                 //
+  //  |  |    \                //
+  //  |   \    \               //
+  //  |    \    \              //
+  //  |     \    \             //
+  //  |      \    ----- C2     //
+  //  |       --------- C1     //
+  //  *--------------------->  //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 1};
+  constexpr ThresholdCurve::Point c2_right{c1_right.x + 1, c1_right.y + 1};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_TRUE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the higher curve's points are on the lower curve's rays (left point on the
+// veritcal ray, right point on the horizontal ray).
+TEST(ThresholdCurveTest, TwoCurvesPointsOfHigherOnRaysOfLower) {
+  //  ^
+  //  | C1 + C2               //
+  //  |  |                    //
+  //  |  |\                   //
+  //  |  | \                  //
+  //  |   \ \                 //
+  //  |    \ \                //
+  //  |     \ \               //
+  //  |      \ \              //
+  //  |       ----- C1 + C2   //
+  //  *---------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  // Same x-values, but one of the points is higher on Y (the other isn't).
+  constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 2};
+  constexpr ThresholdCurve::Point c2_right{c1_right.x + 3, c1_right.y};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_TRUE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the second curve's segment intersects the first curve's vertical ray.
+TEST(ThresholdCurveTest, SecondCurveCrossesVerticalRayOfFirstCurve) {
+  //  ^                       //
+  //  | C2 C1                 //
+  //  |  | |                  //
+  //  |   \|                  //
+  //  |    |                  //
+  //  |    |\                 //
+  //  |    | \                //
+  //  |     \ \               //
+  //  |      \ \              //
+  //  |       \ \             //
+  //  |        \ ------- C2   //
+  //  |         -------- C1   //
+  //  *---------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  constexpr ThresholdCurve::Point c2_left{c1_left.x - 1, c1_left.y + 1};
+  constexpr ThresholdCurve::Point c2_right{c1_right.x, c1_right.y + 1};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_FALSE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the second curve's segment intersects the first curve's horizontal ray.
+TEST(ThresholdCurveTest, SecondCurveCrossesHorizontalRayOfFirstCurve) {
+  //  ^                      //
+  //  | C1 +  C2             //
+  //  |  |                   //
+  //  |  |\                  //
+  //  |  \ \                 //
+  //  |   \ \                //
+  //  |    \ \               //
+  //  |     \ \              //
+  //  |      ----------- C1  //
+  //  |         \            //
+  //  |          ------- C2  //
+  //  *--------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  constexpr ThresholdCurve::Point c2_left{c1_left.x, c1_left.y + 1};
+  constexpr ThresholdCurve::Point c2_right{c1_right.x + 2, c1_right.y - 1};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_FALSE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// the second curve's segment intersects the first curve's segment.
+TEST(ThresholdCurveTest, TwoCurvesWithCrossingSegments) {
+  //  ^                           //
+  //  | C2 C1                     //
+  //  | |  |                      //
+  //  | |  |                      //
+  //  | |  \                      //
+  //  | |   \                     //
+  //  |  -_  \                    //
+  //  |    -_ \                   //
+  //  |      -_\                  //
+  //  |        -_                 //
+  //  |          \-_              //
+  //  |           \ ---------- C2 //
+  //  |            ----------- C1 //
+  //  |                           //
+  //  |                           //
+  //  *-------------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_right);
+
+  constexpr ThresholdCurve::Point c2_left{4, 9};
+  constexpr ThresholdCurve::Point c2_right{10, 6};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  // The test is structured so that the two curves intersect at (8, 7).
+  RTC_CHECK(!c1_curve.IsAboveCurve({8, 7}));
+  RTC_CHECK(!c1_curve.IsBelowCurve({8, 7}));
+  RTC_CHECK(!c2_curve.IsAboveCurve({8, 7}));
+  RTC_CHECK(!c2_curve.IsBelowCurve({8, 7}));
+
+  EXPECT_FALSE(c1_curve <= c2_curve);
+  EXPECT_FALSE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// both curves are identical.
+TEST(ThresholdCurveTest, IdenticalCurves) {
+  //  ^                       //
+  //  |  C1 + C2              //
+  //  |  |                    //
+  //  |  |                    //
+  //  |   \                   //
+  //  |    \                  //
+  //  |     \                 //
+  //  |      ------- C1 + C2  //
+  //  *---------------------> //
+
+  constexpr ThresholdCurve::Point left{5, 10};
+  constexpr ThresholdCurve::Point right{10, 5};
+
+  const ThresholdCurve c1_curve(left, right);
+  const ThresholdCurve c2_curve(left, right);
+
+  EXPECT_TRUE(c1_curve <= c2_curve);
+  EXPECT_TRUE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// they are "nearly identical" - the first curve's segment is contained within
+// the second curve's segment, but the second curve's segment extends further
+// to the left (which also produces separate vertical rays for the curves).
+TEST(ThresholdCurveTest, NearlyIdenticalCurvesSecondContinuesOnOtherLeftSide) {
+  //  ^                       //
+  //  | C2 C1                 //
+  //  |  | |                  //
+  //  |  | |                  //
+  //  |   \|                  //
+  //  |    |                  //
+  //  |     \                 //
+  //  |      \                //
+  //  |       \               //
+  //  |        ----- C1 + C2  //
+  //  *---------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_left);
+
+  constexpr ThresholdCurve::Point c2_left{c1_left.x - 1, c1_left.y + 1};
+  constexpr ThresholdCurve::Point c2_right = c1_right;
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_FALSE(c1_curve <= c2_curve);
+  EXPECT_TRUE(c2_curve <= c1_curve);
+}
+
+// Test that the relative position of two curves is computed correctly when
+// they are "nearly identical" - the first curve's segment is contained within
+// the second curve's segment, but the second curve's segment extends further
+// to the right (which also produces separate horizontal rays for the curves).
+TEST(ThresholdCurveTest, NearlyIdenticalCurvesSecondContinuesOnOtherRightSide) {
+  //  ^                       //
+  //  | C1 + C2               //
+  //  |  |                    //
+  //  |  |                    //
+  //  |   \                   //
+  //  |    \                  //
+  //  |     \                 //
+  //  |      \----------- C1  //
+  //  |       \               //
+  //  |        ---------- C2  //
+  //  *---------------------> //
+
+  constexpr ThresholdCurve::Point c1_left{5, 10};
+  constexpr ThresholdCurve::Point c1_right{10, 5};
+  const ThresholdCurve c1_curve(c1_left, c1_left);
+
+  constexpr ThresholdCurve::Point c2_left = c1_left;
+  constexpr ThresholdCurve::Point c2_right{c1_right.x + 1, c1_right.y - 1};
+  const ThresholdCurve c2_curve(c2_left, c2_right);
+
+  EXPECT_FALSE(c1_curve <= c2_curve);
+  EXPECT_TRUE(c2_curve <= c1_curve);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// The higher-left point must be given as the first point, and the lower-right
+// point must be given as the second.
+// This necessarily produces a non-positive slope.
+TEST(ThresholdCurveTest, WrongOrderPoints) {
+  std::unique_ptr<ThresholdCurve> curve;
+  constexpr ThresholdCurve::Point left{5, 10};
+  constexpr ThresholdCurve::Point right{10, 5};
+  EXPECT_DEATH(curve.reset(new ThresholdCurve(right, left)), "");
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/OWNERS b/modules/audio_coding/codecs/OWNERS
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/modules/audio_coding/codecs/OWNERS
diff --git a/modules/audio_coding/codecs/audio_decoder.h b/modules/audio_coding/codecs/audio_decoder.h
new file mode 100644
index 0000000..b7b15cd
--- /dev/null
+++ b/modules/audio_coding/codecs/audio_decoder.h
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is for backwards compatibility only! Use
+// webrtc/api/audio_codecs/audio_decoder.h instead!
+// TODO(kwiberg): Remove it.
+
+#ifndef MODULES_AUDIO_CODING_CODECS_AUDIO_DECODER_H_
+#define MODULES_AUDIO_CODING_CODECS_AUDIO_DECODER_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+
+#endif  // MODULES_AUDIO_CODING_CODECS_AUDIO_DECODER_H_
diff --git a/modules/audio_coding/codecs/audio_encoder.h b/modules/audio_coding/codecs/audio_encoder.h
new file mode 100644
index 0000000..010ae67
--- /dev/null
+++ b/modules/audio_coding/codecs/audio_encoder.h
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is for backwards compatibility only! Use
+// webrtc/api/audio_codecs/audio_encoder.h instead!
+// TODO(ossu): Remove it.
+
+#ifndef MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
+#define MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
+
+#include "api/audio_codecs/audio_encoder.h"
+
+#endif  // MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
diff --git a/modules/audio_coding/codecs/audio_format_conversion.cc b/modules/audio_coding/codecs/audio_format_conversion.cc
new file mode 100644
index 0000000..a99a28c
--- /dev/null
+++ b/modules/audio_coding/codecs/audio_format_conversion.cc
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/audio_format_conversion.h"
+
+#include <string.h>
+
+#include "api/array_view.h"
+#include "api/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/sanitizer.h"
+
+namespace webrtc {
+
+namespace {
+
+CodecInst MakeCodecInst(int payload_type,
+                        const char* name,
+                        int sample_rate,
+                        size_t num_channels) {
+  // Create a CodecInst with some fields set. The remaining fields are zeroed,
+  // but we tell MSan to consider them uninitialized.
+  CodecInst ci = {0};
+  rtc::MsanMarkUninitialized(rtc::MakeArrayView(&ci, 1));
+  ci.pltype = payload_type;
+  strncpy(ci.plname, name, sizeof(ci.plname));
+  ci.plname[sizeof(ci.plname) - 1] = '\0';
+  ci.plfreq = sample_rate;
+  ci.channels = num_channels;
+  return ci;
+}
+
+}  // namespace
+
+SdpAudioFormat CodecInstToSdp(const CodecInst& ci) {
+  if (STR_CASE_CMP(ci.plname, "g722") == 0) {
+    RTC_CHECK_EQ(16000, ci.plfreq);
+    RTC_CHECK(ci.channels == 1 || ci.channels == 2);
+    return {"g722", 8000, ci.channels};
+  } else if (STR_CASE_CMP(ci.plname, "opus") == 0) {
+    RTC_CHECK_EQ(48000, ci.plfreq);
+    RTC_CHECK(ci.channels == 1 || ci.channels == 2);
+    return ci.channels == 1
+               ? SdpAudioFormat("opus", 48000, 2)
+               : SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}});
+  } else {
+    return {ci.plname, ci.plfreq, ci.channels};
+  }
+}
+
+CodecInst SdpToCodecInst(int payload_type, const SdpAudioFormat& audio_format) {
+  if (STR_CASE_CMP(audio_format.name.c_str(), "g722") == 0) {
+    RTC_CHECK_EQ(8000, audio_format.clockrate_hz);
+    RTC_CHECK(audio_format.num_channels == 1 || audio_format.num_channels == 2);
+    return MakeCodecInst(payload_type, "g722", 16000,
+                         audio_format.num_channels);
+  } else if (STR_CASE_CMP(audio_format.name.c_str(), "opus") == 0) {
+    RTC_CHECK_EQ(48000, audio_format.clockrate_hz);
+    RTC_CHECK_EQ(2, audio_format.num_channels);
+    const int num_channels = [&] {
+      auto stereo = audio_format.parameters.find("stereo");
+      if (stereo != audio_format.parameters.end()) {
+        if (stereo->second == "0") {
+          return 1;
+        } else if (stereo->second == "1") {
+          return 2;
+        } else {
+          RTC_CHECK(false);  // Bad stereo parameter.
+        }
+      }
+      return 1;  // Default to mono.
+    }();
+    return MakeCodecInst(payload_type, "opus", 48000, num_channels);
+  } else {
+    return MakeCodecInst(payload_type, audio_format.name.c_str(),
+                         audio_format.clockrate_hz, audio_format.num_channels);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/audio_format_conversion.h b/modules/audio_coding/codecs/audio_format_conversion.h
new file mode 100644
index 0000000..d981741
--- /dev/null
+++ b/modules/audio_coding/codecs/audio_format_conversion.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_AUDIO_FORMAT_CONVERSION_H_
+#define MODULES_AUDIO_CODING_CODECS_AUDIO_FORMAT_CONVERSION_H_
+
+#include "api/audio_codecs/audio_format.h"
+#include "common_types.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+SdpAudioFormat CodecInstToSdp(const CodecInst& codec_inst);
+CodecInst SdpToCodecInst(int payload_type, const SdpAudioFormat& audio_format);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_AUDIO_FORMAT_CONVERSION_H_
diff --git a/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc b/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc
new file mode 100644
index 0000000..158a58b
--- /dev/null
+++ b/modules/audio_coding/codecs/builtin_audio_decoder_factory_unittest.cc
@@ -0,0 +1,167 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(AudioDecoderFactoryTest, CreateUnknownDecoder) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("rey", 8000, 1), rtc::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreatePcmu) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // PCMu supports 8 kHz, and any number of channels.
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 0), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 1), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 2), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 8000, 3), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcmu", 16000, 1), rtc::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreatePcma) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // PCMa supports 8 kHz, and any number of channels.
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 0), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 1), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 2), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcma", 8000, 3), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("pcma", 16000, 1), rtc::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreateIlbc) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // iLBC supports 8 kHz, 1 channel.
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 8000, 0), rtc::nullopt));
+#ifdef WEBRTC_CODEC_ILBC
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 8000, 1), rtc::nullopt));
+#endif
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 8000, 2), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("ilbc", 16000, 1), rtc::nullopt));
+}
+
+TEST(AudioDecoderFactoryTest, CreateIsac) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // iSAC supports 16 kHz, 1 channel. The float implementation additionally
+  // supports 32 kHz, 1 channel.
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 0), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 1), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 16000, 2), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 8000, 1), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 48000, 1), rtc::nullopt));
+#ifdef WEBRTC_ARCH_ARM
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 32000, 1), rtc::nullopt));
+#else
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("isac", 32000, 1), rtc::nullopt));
+#endif
+}
+
+TEST(AudioDecoderFactoryTest, CreateL16) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // L16 supports any clock rate, any number of channels.
+  const int clockrates[] = {8000, 16000, 32000, 48000};
+  const int num_channels[] = {1, 2, 3, 4711};
+  for (int clockrate : clockrates) {
+    EXPECT_FALSE(adf->MakeAudioDecoder(SdpAudioFormat("l16", clockrate, 0),
+                                       rtc::nullopt));
+    for (int channels : num_channels) {
+      EXPECT_TRUE(adf->MakeAudioDecoder(
+          SdpAudioFormat("l16", clockrate, channels), rtc::nullopt));
+    }
+  }
+}
+
+TEST(AudioDecoderFactoryTest, CreateG722) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // g722 supports 8 kHz, 1-2 channels.
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 0), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 1), rtc::nullopt));
+  EXPECT_TRUE(
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 2), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 3), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 16000, 1), rtc::nullopt));
+  EXPECT_FALSE(
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 32000, 1), rtc::nullopt));
+
+  // g722 actually uses a 16 kHz sample rate instead of the nominal 8 kHz.
+  std::unique_ptr<AudioDecoder> dec =
+      adf->MakeAudioDecoder(SdpAudioFormat("g722", 8000, 1), rtc::nullopt);
+  EXPECT_EQ(16000, dec->SampleRateHz());
+}
+
+TEST(AudioDecoderFactoryTest, CreateOpus) {
+  rtc::scoped_refptr<AudioDecoderFactory> adf =
+      CreateBuiltinAudioDecoderFactory();
+  ASSERT_TRUE(adf);
+  // Opus supports 48 kHz, 2 channels, and wants a "stereo" parameter whose
+  // value is either "0" or "1".
+  for (int hz : {8000, 16000, 32000, 48000}) {
+    for (int channels : {0, 1, 2, 3}) {
+      for (std::string stereo : {"XX", "0", "1", "2"}) {
+        std::map<std::string, std::string> params;
+        if (stereo != "XX") {
+          params["stereo"] = stereo;
+        }
+        const bool good = (hz == 48000 && channels == 2 &&
+                           (stereo == "XX" || stereo == "0" || stereo == "1"));
+        EXPECT_EQ(good,
+                  static_cast<bool>(adf->MakeAudioDecoder(
+                      SdpAudioFormat("opus", hz, channels, std::move(params)),
+                      rtc::nullopt)));
+      }
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc b/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc
new file mode 100644
index 0000000..d371149
--- /dev/null
+++ b/modules/audio_coding/codecs/builtin_audio_encoder_factory_unittest.cc
@@ -0,0 +1,145 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class AudioEncoderFactoryTest
+    : public ::testing::TestWithParam<rtc::scoped_refptr<AudioEncoderFactory>> {
+};
+
+TEST_P(AudioEncoderFactoryTest, SupportsAtLeastOneFormat) {
+  auto factory = GetParam();
+  auto supported_encoders = factory->GetSupportedEncoders();
+  EXPECT_FALSE(supported_encoders.empty());
+}
+
+TEST_P(AudioEncoderFactoryTest, CanQueryAllSupportedFormats) {
+  auto factory = GetParam();
+  auto supported_encoders = factory->GetSupportedEncoders();
+  for (const auto& spec : supported_encoders) {
+    auto info = factory->QueryAudioEncoder(spec.format);
+    EXPECT_TRUE(info);
+  }
+}
+
+TEST_P(AudioEncoderFactoryTest, CanConstructAllSupportedEncoders) {
+  auto factory = GetParam();
+  auto supported_encoders = factory->GetSupportedEncoders();
+  for (const auto& spec : supported_encoders) {
+    auto info = factory->QueryAudioEncoder(spec.format);
+    auto encoder = factory->MakeAudioEncoder(127, spec.format, rtc::nullopt);
+    EXPECT_TRUE(encoder);
+    EXPECT_EQ(encoder->SampleRateHz(), info->sample_rate_hz);
+    EXPECT_EQ(encoder->NumChannels(), info->num_channels);
+    EXPECT_EQ(encoder->RtpTimestampRateHz(), spec.format.clockrate_hz);
+  }
+}
+
+TEST_P(AudioEncoderFactoryTest, CanRunAllSupportedEncoders) {
+  constexpr int kTestPayloadType = 127;
+  auto factory = GetParam();
+  auto supported_encoders = factory->GetSupportedEncoders();
+  for (const auto& spec : supported_encoders) {
+    auto encoder =
+        factory->MakeAudioEncoder(kTestPayloadType, spec.format, rtc::nullopt);
+    EXPECT_TRUE(encoder);
+    encoder->Reset();
+    const int num_samples = rtc::checked_cast<int>(
+        encoder->SampleRateHz() * encoder->NumChannels() / 100);
+    rtc::Buffer out;
+    rtc::BufferT<int16_t> audio;
+    audio.SetData(num_samples, [](rtc::ArrayView<int16_t> audio) {
+      for (size_t i = 0; i != audio.size(); ++i) {
+        // Just put some numbers in there, ensure they're within range.
+        audio[i] =
+            static_cast<int16_t>(i & std::numeric_limits<int16_t>::max());
+      }
+      return audio.size();
+    });
+    // This is here to stop the test going forever with a broken encoder.
+    constexpr int kMaxEncodeCalls = 100;
+    int blocks = 0;
+    for (; blocks < kMaxEncodeCalls; ++blocks) {
+      AudioEncoder::EncodedInfo info = encoder->Encode(
+          blocks * encoder->RtpTimestampRateHz() / 100, audio, &out);
+      EXPECT_EQ(info.encoded_bytes, out.size());
+      if (info.encoded_bytes > 0) {
+        EXPECT_EQ(0u, info.encoded_timestamp);
+        EXPECT_EQ(kTestPayloadType, info.payload_type);
+        break;
+      }
+    }
+    ASSERT_LT(blocks, kMaxEncodeCalls);
+    const unsigned int next_timestamp =
+        blocks * encoder->RtpTimestampRateHz() / 100;
+    out.Clear();
+    for (; blocks < kMaxEncodeCalls; ++blocks) {
+      AudioEncoder::EncodedInfo info = encoder->Encode(
+          blocks * encoder->RtpTimestampRateHz() / 100, audio, &out);
+      EXPECT_EQ(info.encoded_bytes, out.size());
+      if (info.encoded_bytes > 0) {
+        EXPECT_EQ(next_timestamp, info.encoded_timestamp);
+        EXPECT_EQ(kTestPayloadType, info.payload_type);
+        break;
+      }
+    }
+    ASSERT_LT(blocks, kMaxEncodeCalls);
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(BuiltinAudioEncoderFactoryTest,
+                        AudioEncoderFactoryTest,
+                        ::testing::Values(CreateBuiltinAudioEncoderFactory()));
+
+TEST(BuiltinAudioEncoderFactoryTest, SupportsTheExpectedFormats) {
+  using ::testing::ElementsAreArray;
+  // Check that we claim to support the formats we expect from build flags, and
+  // we've ordered them correctly.
+  auto factory = CreateBuiltinAudioEncoderFactory();
+  auto specs = factory->GetSupportedEncoders();
+
+  const std::vector<SdpAudioFormat> supported_formats = [&specs] {
+    std::vector<SdpAudioFormat> formats;
+    for (const auto& spec : specs) {
+      formats.push_back(spec.format);
+    }
+    return formats;
+  }();
+
+  const std::vector<SdpAudioFormat> expected_formats = {
+#ifdef WEBRTC_CODEC_OPUS
+    {"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}},
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+    {"isac", 16000, 1},
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+    {"isac", 32000, 1},
+#endif
+    {"G722", 8000, 1},
+#ifdef WEBRTC_CODEC_ILBC
+    {"ilbc", 8000, 1},
+#endif
+    {"pcmu", 8000, 1},
+    {"pcma", 8000, 1}
+  };
+
+  ASSERT_THAT(supported_formats, ElementsAreArray(expected_formats));
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
new file mode 100644
index 0000000..78148ab
--- /dev/null
+++ b/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -0,0 +1,267 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+
+#include <algorithm>
+#include <memory>
+#include <limits>
+#include <utility>
+
+namespace webrtc {
+
+namespace {
+
+const int kMaxFrameSizeMs = 60;
+
+}  // namespace
+
+AudioEncoderCng::Config::Config() = default;
+AudioEncoderCng::Config::Config(Config&&) = default;
+AudioEncoderCng::Config::~Config() = default;
+
+bool AudioEncoderCng::Config::IsOk() const {
+  if (num_channels != 1)
+    return false;
+  if (!speech_encoder)
+    return false;
+  if (num_channels != speech_encoder->NumChannels())
+    return false;
+  if (sid_frame_interval_ms <
+      static_cast<int>(speech_encoder->Max10MsFramesInAPacket() * 10))
+    return false;
+  if (num_cng_coefficients > WEBRTC_CNG_MAX_LPC_ORDER ||
+      num_cng_coefficients <= 0)
+    return false;
+  return true;
+}
+
+AudioEncoderCng::AudioEncoderCng(Config&& config)
+    : speech_encoder_(
+          ([&] { RTC_CHECK(config.IsOk()) << "Invalid configuration."; }(),
+           std::move(config.speech_encoder))),
+      cng_payload_type_(config.payload_type),
+      num_cng_coefficients_(config.num_cng_coefficients),
+      sid_frame_interval_ms_(config.sid_frame_interval_ms),
+      last_frame_active_(true),
+      vad_(config.vad ? std::unique_ptr<Vad>(config.vad)
+           : CreateVad(config.vad_mode)),
+      cng_encoder_(new ComfortNoiseEncoder(SampleRateHz(),
+                                           sid_frame_interval_ms_,
+                                           num_cng_coefficients_)) {
+}
+
+AudioEncoderCng::~AudioEncoderCng() = default;
+
+int AudioEncoderCng::SampleRateHz() const {
+  return speech_encoder_->SampleRateHz();
+}
+
+size_t AudioEncoderCng::NumChannels() const {
+  return 1;
+}
+
+int AudioEncoderCng::RtpTimestampRateHz() const {
+  return speech_encoder_->RtpTimestampRateHz();
+}
+
+size_t AudioEncoderCng::Num10MsFramesInNextPacket() const {
+  return speech_encoder_->Num10MsFramesInNextPacket();
+}
+
+size_t AudioEncoderCng::Max10MsFramesInAPacket() const {
+  return speech_encoder_->Max10MsFramesInAPacket();
+}
+
+int AudioEncoderCng::GetTargetBitrate() const {
+  return speech_encoder_->GetTargetBitrate();
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+  const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+  RTC_CHECK_EQ(speech_buffer_.size(),
+               rtp_timestamps_.size() * samples_per_10ms_frame);
+  rtp_timestamps_.push_back(rtp_timestamp);
+  RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size());
+  speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend());
+  const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
+  if (rtp_timestamps_.size() < frames_to_encode) {
+    return EncodedInfo();
+  }
+  RTC_CHECK_LE(frames_to_encode * 10, kMaxFrameSizeMs)
+      << "Frame size cannot be larger than " << kMaxFrameSizeMs
+      << " ms when using VAD/CNG.";
+
+  // Group several 10 ms blocks per VAD call. Call VAD once or twice using the
+  // following split sizes:
+  // 10 ms = 10 + 0 ms; 20 ms = 20 + 0 ms; 30 ms = 30 + 0 ms;
+  // 40 ms = 20 + 20 ms; 50 ms = 30 + 20 ms; 60 ms = 30 + 30 ms.
+  size_t blocks_in_first_vad_call =
+      (frames_to_encode > 3 ? 3 : frames_to_encode);
+  if (frames_to_encode == 4)
+    blocks_in_first_vad_call = 2;
+  RTC_CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
+  const size_t blocks_in_second_vad_call =
+      frames_to_encode - blocks_in_first_vad_call;
+
+  // Check if all of the buffer is passive speech. Start with checking the first
+  // block.
+  Vad::Activity activity = vad_->VoiceActivity(
+      &speech_buffer_[0], samples_per_10ms_frame * blocks_in_first_vad_call,
+      SampleRateHz());
+  if (activity == Vad::kPassive && blocks_in_second_vad_call > 0) {
+    // Only check the second block if the first was passive.
+    activity = vad_->VoiceActivity(
+        &speech_buffer_[samples_per_10ms_frame * blocks_in_first_vad_call],
+        samples_per_10ms_frame * blocks_in_second_vad_call, SampleRateHz());
+  }
+
+  EncodedInfo info;
+  switch (activity) {
+    case Vad::kPassive: {
+      info = EncodePassive(frames_to_encode, encoded);
+      last_frame_active_ = false;
+      break;
+    }
+    case Vad::kActive: {
+      info = EncodeActive(frames_to_encode, encoded);
+      last_frame_active_ = true;
+      break;
+    }
+    case Vad::kError: {
+      FATAL();  // Fails only if fed invalid data.
+      break;
+    }
+  }
+
+  speech_buffer_.erase(
+      speech_buffer_.begin(),
+      speech_buffer_.begin() + frames_to_encode * samples_per_10ms_frame);
+  rtp_timestamps_.erase(rtp_timestamps_.begin(),
+                        rtp_timestamps_.begin() + frames_to_encode);
+  return info;
+}
+
+void AudioEncoderCng::Reset() {
+  speech_encoder_->Reset();
+  speech_buffer_.clear();
+  rtp_timestamps_.clear();
+  last_frame_active_ = true;
+  vad_->Reset();
+  cng_encoder_.reset(
+      new ComfortNoiseEncoder(SampleRateHz(), sid_frame_interval_ms_,
+                              num_cng_coefficients_));
+}
+
+bool AudioEncoderCng::SetFec(bool enable) {
+  return speech_encoder_->SetFec(enable);
+}
+
+bool AudioEncoderCng::SetDtx(bool enable) {
+  return speech_encoder_->SetDtx(enable);
+}
+
+bool AudioEncoderCng::SetApplication(Application application) {
+  return speech_encoder_->SetApplication(application);
+}
+
+void AudioEncoderCng::SetMaxPlaybackRate(int frequency_hz) {
+  speech_encoder_->SetMaxPlaybackRate(frequency_hz);
+}
+
+rtc::ArrayView<std::unique_ptr<AudioEncoder>>
+AudioEncoderCng::ReclaimContainedEncoders() {
+  return rtc::ArrayView<std::unique_ptr<AudioEncoder>>(&speech_encoder_, 1);
+}
+
+void AudioEncoderCng::OnReceivedUplinkPacketLossFraction(
+    float uplink_packet_loss_fraction) {
+  speech_encoder_->OnReceivedUplinkPacketLossFraction(
+      uplink_packet_loss_fraction);
+}
+
+void AudioEncoderCng::OnReceivedUplinkRecoverablePacketLossFraction(
+    float uplink_recoverable_packet_loss_fraction) {
+  speech_encoder_->OnReceivedUplinkRecoverablePacketLossFraction(
+      uplink_recoverable_packet_loss_fraction);
+}
+
+void AudioEncoderCng::OnReceivedUplinkBandwidth(
+    int target_audio_bitrate_bps,
+    rtc::Optional<int64_t> bwe_period_ms) {
+  speech_encoder_->OnReceivedUplinkBandwidth(target_audio_bitrate_bps,
+                                             bwe_period_ms);
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
+    size_t frames_to_encode,
+    rtc::Buffer* encoded) {
+  bool force_sid = last_frame_active_;
+  bool output_produced = false;
+  const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+  AudioEncoder::EncodedInfo info;
+
+  for (size_t i = 0; i < frames_to_encode; ++i) {
+    // It's important not to pass &info.encoded_bytes directly to
+    // WebRtcCng_Encode(), since later loop iterations may return zero in
+    // that value, in which case we don't want to overwrite any value from
+    // an earlier iteration.
+    size_t encoded_bytes_tmp =
+        cng_encoder_->Encode(
+            rtc::ArrayView<const int16_t>(
+                &speech_buffer_[i * samples_per_10ms_frame],
+                samples_per_10ms_frame),
+            force_sid, encoded);
+
+    if (encoded_bytes_tmp > 0) {
+      RTC_CHECK(!output_produced);
+      info.encoded_bytes = encoded_bytes_tmp;
+      output_produced = true;
+      force_sid = false;
+    }
+  }
+
+  info.encoded_timestamp = rtp_timestamps_.front();
+  info.payload_type = cng_payload_type_;
+  info.send_even_if_empty = true;
+  info.speech = false;
+  return info;
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
+    size_t frames_to_encode,
+    rtc::Buffer* encoded) {
+  const size_t samples_per_10ms_frame = SamplesPer10msFrame();
+  AudioEncoder::EncodedInfo info;
+  for (size_t i = 0; i < frames_to_encode; ++i) {
+    info =
+        speech_encoder_->Encode(rtp_timestamps_.front(),
+                                rtc::ArrayView<const int16_t>(
+                                    &speech_buffer_[i * samples_per_10ms_frame],
+                                    samples_per_10ms_frame),
+                                encoded);
+    if (i + 1 == frames_to_encode) {
+      RTC_CHECK_GT(info.encoded_bytes, 0) << "Encoder didn't deliver data.";
+    } else {
+      RTC_CHECK_EQ(info.encoded_bytes, 0)
+          << "Encoder delivered data too early.";
+    }
+  }
+  return info;
+}
+
+size_t AudioEncoderCng::SamplesPer10msFrame() const {
+  return rtc::CheckedDivExact(10 * SampleRateHz(), 1000);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/cng/audio_encoder_cng.h b/modules/audio_coding/codecs/cng/audio_encoder_cng.h
new file mode 100644
index 0000000..4491289
--- /dev/null
+++ b/modules/audio_coding/codecs/cng/audio_encoder_cng.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
+#define MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "common_audio/vad/include/vad.h"
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class Vad;
+
+class AudioEncoderCng final : public AudioEncoder {
+ public:
+  struct Config {
+    Config();
+    Config(Config&&);
+    ~Config();
+    bool IsOk() const;
+
+    size_t num_channels = 1;
+    int payload_type = 13;
+    std::unique_ptr<AudioEncoder> speech_encoder;
+    Vad::Aggressiveness vad_mode = Vad::kVadNormal;
+    int sid_frame_interval_ms = 100;
+    int num_cng_coefficients = 8;
+    // The Vad pointer is mainly for testing. If a NULL pointer is passed, the
+    // AudioEncoderCng creates (and destroys) a Vad object internally. If an
+    // object is passed, the AudioEncoderCng assumes ownership of the Vad
+    // object.
+    Vad* vad = nullptr;
+  };
+
+  explicit AudioEncoderCng(Config&& config);
+  ~AudioEncoderCng() override;
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  int RtpTimestampRateHz() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+  void Reset() override;
+  bool SetFec(bool enable) override;
+  bool SetDtx(bool enable) override;
+  bool SetApplication(Application application) override;
+  void SetMaxPlaybackRate(int frequency_hz) override;
+  rtc::ArrayView<std::unique_ptr<AudioEncoder>> ReclaimContainedEncoders()
+      override;
+  void OnReceivedUplinkPacketLossFraction(
+      float uplink_packet_loss_fraction) override;
+  void OnReceivedUplinkRecoverablePacketLossFraction(
+      float uplink_recoverable_packet_loss_fraction) override;
+  void OnReceivedUplinkBandwidth(
+      int target_audio_bitrate_bps,
+      rtc::Optional<int64_t> bwe_period_ms) override;
+
+ private:
+  EncodedInfo EncodePassive(size_t frames_to_encode,
+                            rtc::Buffer* encoded);
+  EncodedInfo EncodeActive(size_t frames_to_encode,
+                           rtc::Buffer* encoded);
+  size_t SamplesPer10msFrame() const;
+
+  std::unique_ptr<AudioEncoder> speech_encoder_;
+  const int cng_payload_type_;
+  const int num_cng_coefficients_;
+  const int sid_frame_interval_ms_;
+  std::vector<int16_t> speech_buffer_;
+  std::vector<uint32_t> rtp_timestamps_;
+  bool last_frame_active_;
+  std::unique_ptr<Vad> vad_;
+  std::unique_ptr<ComfortNoiseEncoder> cng_encoder_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCng);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
diff --git a/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
new file mode 100644
index 0000000..f85abe2
--- /dev/null
+++ b/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -0,0 +1,507 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/vad/mock/mock_vad.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::SetArgPointee;
+using ::testing::InSequence;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+namespace {
+static const size_t kMaxNumSamples = 48 * 10 * 2;  // 10 ms @ 48 kHz stereo.
+static const size_t kMockReturnEncodedBytes = 17;
+static const int kCngPayloadType = 18;
+}
+
+class AudioEncoderCngTest : public ::testing::Test {
+ protected:
+  AudioEncoderCngTest()
+      : mock_encoder_owner_(new MockAudioEncoder),
+        mock_encoder_(mock_encoder_owner_.get()),
+        mock_vad_(new MockVad),
+        timestamp_(4711),
+        num_audio_samples_10ms_(0),
+        sample_rate_hz_(8000) {
+    memset(audio_, 0, kMaxNumSamples * 2);
+    EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(1));
+  }
+
+  void TearDown() override {
+    EXPECT_CALL(*mock_vad_, Die()).Times(1);
+    cng_.reset();
+  }
+
+  AudioEncoderCng::Config MakeCngConfig() {
+    AudioEncoderCng::Config config;
+    config.speech_encoder = std::move(mock_encoder_owner_);
+    EXPECT_TRUE(config.speech_encoder);
+
+    // Let the AudioEncoderCng object use a MockVad instead of its internally
+    // created Vad object.
+    config.vad = mock_vad_;
+    config.payload_type = kCngPayloadType;
+
+    return config;
+  }
+
+  void CreateCng(AudioEncoderCng::Config&& config) {
+    num_audio_samples_10ms_ = static_cast<size_t>(10 * sample_rate_hz_ / 1000);
+    ASSERT_LE(num_audio_samples_10ms_, kMaxNumSamples);
+    if (config.speech_encoder) {
+      EXPECT_CALL(*mock_encoder_, SampleRateHz())
+          .WillRepeatedly(Return(sample_rate_hz_));
+      // Max10MsFramesInAPacket() is just used to verify that the SID frame
+      // period is not too small. The return value does not matter that much,
+      // as long as it is smaller than 10.
+      EXPECT_CALL(*mock_encoder_, Max10MsFramesInAPacket())
+          .WillOnce(Return(1u));
+    }
+    cng_.reset(new AudioEncoderCng(std::move(config)));
+  }
+
+  void Encode() {
+    ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
+    encoded_info_ = cng_->Encode(
+        timestamp_,
+        rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms_),
+        &encoded_);
+    timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
+  }
+
+  // Expect |num_calls| calls to the encoder, all successful. The last call
+  // claims to have encoded |kMockReturnEncodedBytes| bytes, and all the
+  // preceding ones 0 bytes.
+  void ExpectEncodeCalls(size_t num_calls) {
+    InSequence s;
+    AudioEncoder::EncodedInfo info;
+    for (size_t j = 0; j < num_calls - 1; ++j) {
+      EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+          .WillOnce(Return(info));
+    }
+    info.encoded_bytes = kMockReturnEncodedBytes;
+    EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+        .WillOnce(
+            Invoke(MockAudioEncoder::FakeEncoding(kMockReturnEncodedBytes)));
+  }
+
+  // Verifies that the cng_ object waits until it has collected
+  // |blocks_per_frame| blocks of audio, and then dispatches all of them to
+  // the underlying codec (speech or cng).
+  void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
+    EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+        .WillRepeatedly(Return(blocks_per_frame));
+    auto config = MakeCngConfig();
+    const int num_cng_coefficients = config.num_cng_coefficients;
+    CreateCng(std::move(config));
+    EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+        .WillRepeatedly(Return(active_speech ? Vad::kActive : Vad::kPassive));
+
+    // Don't expect any calls to the encoder yet.
+    EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).Times(0);
+    for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
+      Encode();
+      EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+    }
+    if (active_speech)
+      ExpectEncodeCalls(blocks_per_frame);
+    Encode();
+    if (active_speech) {
+      EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
+    } else {
+      EXPECT_EQ(static_cast<size_t>(num_cng_coefficients + 1),
+                encoded_info_.encoded_bytes);
+    }
+  }
+
+  // Verifies that the audio is partitioned into larger blocks before calling
+  // the VAD.
+  void CheckVadInputSize(int input_frame_size_ms,
+                         int expected_first_block_size_ms,
+                         int expected_second_block_size_ms) {
+    const size_t blocks_per_frame =
+        static_cast<size_t>(input_frame_size_ms / 10);
+
+    EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+        .WillRepeatedly(Return(blocks_per_frame));
+
+    // Expect nothing to happen before the last block is sent to cng_.
+    EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)).Times(0);
+    for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
+      Encode();
+    }
+
+    // Let the VAD decision be passive, since an active decision may lead to
+    // early termination of the decision loop.
+    InSequence s;
+    EXPECT_CALL(
+        *mock_vad_,
+        VoiceActivity(_, expected_first_block_size_ms * sample_rate_hz_ / 1000,
+                      sample_rate_hz_)).WillOnce(Return(Vad::kPassive));
+    if (expected_second_block_size_ms > 0) {
+      EXPECT_CALL(*mock_vad_,
+                  VoiceActivity(
+                      _, expected_second_block_size_ms * sample_rate_hz_ / 1000,
+                      sample_rate_hz_)).WillOnce(Return(Vad::kPassive));
+    }
+
+    // With this call to Encode(), |mock_vad_| should be called according to the
+    // above expectations.
+    Encode();
+  }
+
+  // Tests a frame with both active and passive speech. Returns true if the
+  // decision was active speech, false if it was passive.
+  bool CheckMixedActivePassive(Vad::Activity first_type,
+                               Vad::Activity second_type) {
+    // Set the speech encoder frame size to 60 ms, to ensure that the VAD will
+    // be called twice.
+    const size_t blocks_per_frame = 6;
+    EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+        .WillRepeatedly(Return(blocks_per_frame));
+    InSequence s;
+    EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+        .WillOnce(Return(first_type));
+    if (first_type == Vad::kPassive) {
+      // Expect a second call to the VAD only if the first frame was passive.
+      EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+          .WillOnce(Return(second_type));
+    }
+    encoded_info_.payload_type = 0;
+    for (size_t i = 0; i < blocks_per_frame; ++i) {
+      Encode();
+    }
+    return encoded_info_.payload_type != kCngPayloadType;
+  }
+
+  std::unique_ptr<AudioEncoderCng> cng_;
+  std::unique_ptr<MockAudioEncoder> mock_encoder_owner_;
+  MockAudioEncoder* mock_encoder_;
+  MockVad* mock_vad_;  // Ownership is transferred to |cng_|.
+  uint32_t timestamp_;
+  int16_t audio_[kMaxNumSamples];
+  size_t num_audio_samples_10ms_;
+  rtc::Buffer encoded_;
+  AudioEncoder::EncodedInfo encoded_info_;
+  int sample_rate_hz_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCngTest);
+};
+
+TEST_F(AudioEncoderCngTest, CreateAndDestroy) {
+  CreateCng(MakeCngConfig());
+}
+
+TEST_F(AudioEncoderCngTest, CheckFrameSizePropagation) {
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+      .WillOnce(Return(17U));
+  EXPECT_EQ(17U, cng_->Num10MsFramesInNextPacket());
+}
+
+TEST_F(AudioEncoderCngTest, CheckTargetAudioBitratePropagation) {
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_encoder_,
+              OnReceivedUplinkBandwidth(4711, rtc::Optional<int64_t>()));
+  cng_->OnReceivedUplinkBandwidth(4711, rtc::nullopt);
+}
+
+TEST_F(AudioEncoderCngTest, CheckPacketLossFractionPropagation) {
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_encoder_, OnReceivedUplinkPacketLossFraction(0.5));
+  cng_->OnReceivedUplinkPacketLossFraction(0.5);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCallsVad) {
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+      .WillRepeatedly(Return(1U));
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+      .WillOnce(Return(Vad::kPassive));
+  Encode();
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects1BlockPassiveSpeech) {
+  CheckBlockGrouping(1, false);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects2BlocksPassiveSpeech) {
+  CheckBlockGrouping(2, false);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects3BlocksPassiveSpeech) {
+  CheckBlockGrouping(3, false);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects1BlockActiveSpeech) {
+  CheckBlockGrouping(1, true);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects2BlocksActiveSpeech) {
+  CheckBlockGrouping(2, true);
+}
+
+TEST_F(AudioEncoderCngTest, EncodeCollects3BlocksActiveSpeech) {
+  CheckBlockGrouping(3, true);
+}
+
+TEST_F(AudioEncoderCngTest, EncodePassive) {
+  const size_t kBlocksPerFrame = 3;
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+      .WillRepeatedly(Return(kBlocksPerFrame));
+  auto config = MakeCngConfig();
+  const auto sid_frame_interval_ms = config.sid_frame_interval_ms;
+  const auto num_cng_coefficients = config.num_cng_coefficients;
+  CreateCng(std::move(config));
+  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+      .WillRepeatedly(Return(Vad::kPassive));
+  // Expect no calls at all to the speech encoder mock.
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).Times(0);
+  uint32_t expected_timestamp = timestamp_;
+  for (size_t i = 0; i < 100; ++i) {
+    Encode();
+    // Check if it was time to call the cng encoder. This is done once every
+    // |kBlocksPerFrame| calls.
+    if ((i + 1) % kBlocksPerFrame == 0) {
+      // Now check if a SID interval has elapsed.
+      if ((i % (sid_frame_interval_ms / 10)) < kBlocksPerFrame) {
+        // If so, verify that we got a CNG encoding.
+        EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+        EXPECT_FALSE(encoded_info_.speech);
+        EXPECT_EQ(static_cast<size_t>(num_cng_coefficients) + 1,
+                  encoded_info_.encoded_bytes);
+        EXPECT_EQ(expected_timestamp, encoded_info_.encoded_timestamp);
+      }
+      expected_timestamp += rtc::checked_cast<uint32_t>(
+          kBlocksPerFrame * num_audio_samples_10ms_);
+    } else {
+      // Otherwise, expect no output.
+      EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+    }
+  }
+}
+
+// Verifies that the correct action is taken for frames with both active and
+// passive speech.
+TEST_F(AudioEncoderCngTest, MixedActivePassive) {
+  CreateCng(MakeCngConfig());
+
+  // All of the frame is active speech.
+  ExpectEncodeCalls(6);
+  EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kActive));
+  EXPECT_TRUE(encoded_info_.speech);
+
+  // First half of the frame is active speech.
+  ExpectEncodeCalls(6);
+  EXPECT_TRUE(CheckMixedActivePassive(Vad::kActive, Vad::kPassive));
+  EXPECT_TRUE(encoded_info_.speech);
+
+  // Second half of the frame is active speech.
+  ExpectEncodeCalls(6);
+  EXPECT_TRUE(CheckMixedActivePassive(Vad::kPassive, Vad::kActive));
+  EXPECT_TRUE(encoded_info_.speech);
+
+  // All of the frame is passive speech. Expect no calls to |mock_encoder_|.
+  EXPECT_FALSE(CheckMixedActivePassive(Vad::kPassive, Vad::kPassive));
+  EXPECT_FALSE(encoded_info_.speech);
+}
+
+// These tests verify that the audio is partitioned into larger blocks before
+// calling the VAD.
+// The parameters for CheckVadInputSize are:
+// CheckVadInputSize(frame_size, expected_first_block_size,
+//                   expected_second_block_size);
+TEST_F(AudioEncoderCngTest, VadInputSize10Ms) {
+  CreateCng(MakeCngConfig());
+  CheckVadInputSize(10, 10, 0);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize20Ms) {
+  CreateCng(MakeCngConfig());
+  CheckVadInputSize(20, 20, 0);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize30Ms) {
+  CreateCng(MakeCngConfig());
+  CheckVadInputSize(30, 30, 0);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize40Ms) {
+  CreateCng(MakeCngConfig());
+  CheckVadInputSize(40, 20, 20);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize50Ms) {
+  CreateCng(MakeCngConfig());
+  CheckVadInputSize(50, 30, 20);
+}
+TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
+  CreateCng(MakeCngConfig());
+  CheckVadInputSize(60, 30, 30);
+}
+
+// Verifies that the correct payload type is set when CNG is encoded.
+TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _)).Times(0);
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
+  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+      .WillOnce(Return(Vad::kPassive));
+  encoded_info_.payload_type = 0;
+  Encode();
+  EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+}
+
+// Verifies that a SID frame is encoded immediately as the signal changes from
+// active speech to passive.
+TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
+  auto config = MakeCngConfig();
+  const auto num_cng_coefficients = config.num_cng_coefficients;
+  CreateCng(std::move(config));
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+      .WillRepeatedly(Return(1U));
+  // Start with encoding noise.
+  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+      .Times(2)
+      .WillRepeatedly(Return(Vad::kPassive));
+  Encode();
+  EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+  EXPECT_EQ(static_cast<size_t>(num_cng_coefficients) + 1,
+            encoded_info_.encoded_bytes);
+  // Encode again, and make sure we got no frame at all (since the SID frame
+  // period is 100 ms by default).
+  Encode();
+  EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+
+  // Now encode active speech.
+  encoded_info_.payload_type = 0;
+  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+      .WillOnce(Return(Vad::kActive));
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+      .WillOnce(
+          Invoke(MockAudioEncoder::FakeEncoding(kMockReturnEncodedBytes)));
+  Encode();
+  EXPECT_EQ(kMockReturnEncodedBytes, encoded_info_.encoded_bytes);
+
+  // Go back to noise again, and verify that a SID frame is emitted.
+  EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
+      .WillOnce(Return(Vad::kPassive));
+  Encode();
+  EXPECT_EQ(kCngPayloadType, encoded_info_.payload_type);
+  EXPECT_EQ(static_cast<size_t>(num_cng_coefficients) + 1,
+            encoded_info_.encoded_bytes);
+}
+
+// Resetting the CNG should reset both the VAD and the encoder.
+TEST_F(AudioEncoderCngTest, Reset) {
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_encoder_, Reset()).Times(1);
+  EXPECT_CALL(*mock_vad_, Reset()).Times(1);
+  cng_->Reset();
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// This test fixture tests various error conditions that makes the
+// AudioEncoderCng die via CHECKs.
+class AudioEncoderCngDeathTest : public AudioEncoderCngTest {
+ protected:
+  AudioEncoderCngDeathTest() : AudioEncoderCngTest() {
+    EXPECT_CALL(*mock_vad_, Die()).Times(1);
+    delete mock_vad_;
+    mock_vad_ = nullptr;
+  }
+
+  // Override AudioEncoderCngTest::TearDown, since that one expects a call to
+  // the destructor of |mock_vad_|. In this case, that object is already
+  // deleted.
+  void TearDown() override {
+    cng_.reset();
+  }
+
+  AudioEncoderCng::Config MakeCngConfig() {
+    // Don't provide a Vad mock object, since it would leak when the test dies.
+    auto config = AudioEncoderCngTest::MakeCngConfig();
+    config.vad = nullptr;
+    return config;
+  }
+
+  void TryWrongNumCoefficients(int num) {
+    EXPECT_DEATH(
+        [&] {
+          auto config = MakeCngConfig();
+          config.num_cng_coefficients = num;
+          CreateCng(std::move(config));
+        }(),
+        "Invalid configuration");
+  }
+};
+
+TEST_F(AudioEncoderCngDeathTest, WrongFrameSize) {
+  CreateCng(MakeCngConfig());
+  num_audio_samples_10ms_ *= 2;  // 20 ms frame.
+  EXPECT_DEATH(Encode(), "");
+  num_audio_samples_10ms_ = 0;  // Zero samples.
+  EXPECT_DEATH(Encode(), "");
+}
+
+TEST_F(AudioEncoderCngDeathTest, WrongNumCoefficientsA) {
+  TryWrongNumCoefficients(-1);
+}
+
+TEST_F(AudioEncoderCngDeathTest, WrongNumCoefficientsB) {
+  TryWrongNumCoefficients(0);
+}
+
+TEST_F(AudioEncoderCngDeathTest, WrongNumCoefficientsC) {
+  TryWrongNumCoefficients(13);
+}
+
+TEST_F(AudioEncoderCngDeathTest, NullSpeechEncoder) {
+  auto config = MakeCngConfig();
+  config.speech_encoder = nullptr;
+  EXPECT_DEATH(CreateCng(std::move(config)), "");
+}
+
+TEST_F(AudioEncoderCngDeathTest, StereoEncoder) {
+  EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(2));
+  EXPECT_DEATH(CreateCng(MakeCngConfig()), "Invalid configuration");
+}
+
+TEST_F(AudioEncoderCngDeathTest, StereoConfig) {
+  EXPECT_DEATH(
+      [&] {
+        auto config = MakeCngConfig();
+        config.num_channels = 2;
+        CreateCng(std::move(config));
+      }(),
+      "Invalid configuration");
+}
+
+TEST_F(AudioEncoderCngDeathTest, EncoderFrameSizeTooLarge) {
+  CreateCng(MakeCngConfig());
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+      .WillRepeatedly(Return(7U));
+  for (int i = 0; i < 6; ++i)
+    Encode();
+  EXPECT_DEATH(Encode(),
+               "Frame size cannot be larger than 60 ms when using VAD/CNG.");
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/cng/cng_unittest.cc b/modules/audio_coding/codecs/cng/cng_unittest.cc
new file mode 100644
index 0000000..54e5189
--- /dev/null
+++ b/modules/audio_coding/codecs/cng/cng_unittest.cc
@@ -0,0 +1,241 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+enum {
+  kSidShortIntervalUpdate = 1,
+  kSidNormalIntervalUpdate = 100,
+  kSidLongIntervalUpdate = 10000
+};
+
+enum : size_t {
+  kCNGNumParamsLow = 0,
+  kCNGNumParamsNormal = 8,
+  kCNGNumParamsHigh = WEBRTC_CNG_MAX_LPC_ORDER,
+  kCNGNumParamsTooHigh = WEBRTC_CNG_MAX_LPC_ORDER + 1
+};
+
+enum {
+  kNoSid,
+  kForceSid
+};
+
+class CngTest : public ::testing::Test {
+ protected:
+  virtual void SetUp();
+
+  void TestCngEncode(int sample_rate_hz, int quality);
+
+  int16_t speech_data_[640];  // Max size of CNG internal buffers.
+};
+
+void CngTest::SetUp() {
+  FILE* input_file;
+  const std::string file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+  input_file = fopen(file_name.c_str(), "rb");
+  ASSERT_TRUE(input_file != NULL);
+  ASSERT_EQ(640, static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
+                                             640, input_file)));
+  fclose(input_file);
+  input_file = NULL;
+}
+
+void CngTest::TestCngEncode(int sample_rate_hz, int quality) {
+  const size_t num_samples_10ms = rtc::CheckedDivExact(sample_rate_hz, 100);
+  rtc::Buffer sid_data;
+
+  ComfortNoiseEncoder cng_encoder(sample_rate_hz, kSidNormalIntervalUpdate,
+                                  quality);
+  EXPECT_EQ(0U, cng_encoder.Encode(rtc::ArrayView<const int16_t>(
+                                       speech_data_, num_samples_10ms),
+                                   kNoSid, &sid_data));
+  EXPECT_EQ(static_cast<size_t>(quality + 1),
+            cng_encoder.Encode(
+                rtc::ArrayView<const int16_t>(speech_data_, num_samples_10ms),
+                kForceSid, &sid_data));
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Create CNG encoder, init with faulty values, free CNG encoder.
+TEST_F(CngTest, CngInitFail) {
+  // Call with too few parameters.
+  EXPECT_DEATH({ ComfortNoiseEncoder(8000, kSidNormalIntervalUpdate,
+                                     kCNGNumParamsLow); }, "");
+  // Call with too many parameters.
+  EXPECT_DEATH({ ComfortNoiseEncoder(8000, kSidNormalIntervalUpdate,
+                                     kCNGNumParamsTooHigh); }, "");
+}
+
+// Encode Cng with too long input vector.
+TEST_F(CngTest, CngEncodeTooLong) {
+  rtc::Buffer sid_data;
+
+  // Create encoder.
+  ComfortNoiseEncoder cng_encoder(8000, kSidNormalIntervalUpdate,
+                                  kCNGNumParamsNormal);
+  // Run encoder with too much data.
+  EXPECT_DEATH(
+      cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 641),
+                         kNoSid, &sid_data),
+      "");
+}
+#endif  // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST_F(CngTest, CngEncode8000) {
+  TestCngEncode(8000, kCNGNumParamsNormal);
+}
+
+TEST_F(CngTest, CngEncode16000) {
+  TestCngEncode(16000, kCNGNumParamsNormal);
+}
+
+TEST_F(CngTest, CngEncode32000) {
+  TestCngEncode(32000, kCNGNumParamsHigh);
+}
+
+TEST_F(CngTest, CngEncode48000) {
+  TestCngEncode(48000, kCNGNumParamsNormal);
+}
+
+TEST_F(CngTest, CngEncode64000) {
+  TestCngEncode(64000, kCNGNumParamsNormal);
+}
+
+// Update SID parameters, for both 9 and 16 parameters.
+TEST_F(CngTest, CngUpdateSid) {
+  rtc::Buffer sid_data;
+
+  // Create and initialize encoder and decoder.
+  ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+                                  kCNGNumParamsNormal);
+  ComfortNoiseDecoder cng_decoder;
+
+  // Run normal Encode and UpdateSid.
+  EXPECT_EQ(kCNGNumParamsNormal + 1,
+            cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+                               kForceSid, &sid_data));
+  cng_decoder.UpdateSid(sid_data);
+
+  // Reinit with new length.
+  cng_encoder.Reset(16000, kSidNormalIntervalUpdate, kCNGNumParamsHigh);
+  cng_decoder.Reset();
+
+  // Expect 0 because of unstable parameters after switching length.
+  EXPECT_EQ(0U,
+            cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+                               kForceSid, &sid_data));
+  EXPECT_EQ(
+      kCNGNumParamsHigh + 1,
+      cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_ + 160, 160),
+                         kForceSid, &sid_data));
+  cng_decoder.UpdateSid(
+      rtc::ArrayView<const uint8_t>(sid_data.data(), kCNGNumParamsNormal + 1));
+}
+
+// Update SID parameters, with wrong parameters or without calling decode.
+TEST_F(CngTest, CngUpdateSidErroneous) {
+  rtc::Buffer sid_data;
+
+  // Encode.
+  ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+                                  kCNGNumParamsNormal);
+  ComfortNoiseDecoder cng_decoder;
+  EXPECT_EQ(kCNGNumParamsNormal + 1,
+            cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+                               kForceSid, &sid_data));
+
+  // First run with valid parameters, then with too many CNG parameters.
+  // The function will operate correctly by only reading the maximum number of
+  // parameters, skipping the extra.
+  EXPECT_EQ(kCNGNumParamsNormal + 1, sid_data.size());
+  cng_decoder.UpdateSid(sid_data);
+
+  // Make sure the input buffer is large enough. Since Encode() appends data, we
+  // need to set the size manually only afterwards, or the buffer will be bigger
+  // than anticipated.
+  sid_data.SetSize(kCNGNumParamsTooHigh + 1);
+  cng_decoder.UpdateSid(sid_data);
+}
+
+// Test to generate cng data, by forcing SID. Both normal and faulty condition.
+TEST_F(CngTest, CngGenerate) {
+  rtc::Buffer sid_data;
+  int16_t out_data[640];
+
+  // Create and initialize encoder and decoder.
+  ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+                                  kCNGNumParamsNormal);
+  ComfortNoiseDecoder cng_decoder;
+
+  // Normal Encode.
+  EXPECT_EQ(kCNGNumParamsNormal + 1,
+            cng_encoder.Encode(rtc::ArrayView<const int16_t>(speech_data_, 160),
+                               kForceSid, &sid_data));
+
+  // Normal UpdateSid.
+  cng_decoder.UpdateSid(sid_data);
+
+  // Two normal Generate, one with new_period.
+  EXPECT_TRUE(cng_decoder.Generate(rtc::ArrayView<int16_t>(out_data, 640), 1));
+  EXPECT_TRUE(cng_decoder.Generate(rtc::ArrayView<int16_t>(out_data, 640), 0));
+
+  // Call Genereate with too much data.
+  EXPECT_FALSE(cng_decoder.Generate(rtc::ArrayView<int16_t>(out_data, 641), 0));
+}
+
+// Test automatic SID.
+TEST_F(CngTest, CngAutoSid) {
+  rtc::Buffer sid_data;
+
+  // Create and initialize encoder and decoder.
+  ComfortNoiseEncoder cng_encoder(16000, kSidNormalIntervalUpdate,
+                                  kCNGNumParamsNormal);
+  ComfortNoiseDecoder cng_decoder;
+
+  // Normal Encode, 100 msec, where no SID data should be generated.
+  for (int i = 0; i < 10; i++) {
+    EXPECT_EQ(0U, cng_encoder.Encode(
+        rtc::ArrayView<const int16_t>(speech_data_, 160), kNoSid, &sid_data));
+  }
+
+  // We have reached 100 msec, and SID data should be generated.
+  EXPECT_EQ(kCNGNumParamsNormal + 1, cng_encoder.Encode(
+      rtc::ArrayView<const int16_t>(speech_data_, 160), kNoSid, &sid_data));
+}
+
+// Test automatic SID, with very short interval.
+TEST_F(CngTest, CngAutoSidShort) {
+  rtc::Buffer sid_data;
+
+  // Create and initialize encoder and decoder.
+  ComfortNoiseEncoder cng_encoder(16000, kSidShortIntervalUpdate,
+                                  kCNGNumParamsNormal);
+  ComfortNoiseDecoder cng_decoder;
+
+  // First call will never generate SID, unless forced to.
+  EXPECT_EQ(0U, cng_encoder.Encode(
+      rtc::ArrayView<const int16_t>(speech_data_, 160), kNoSid, &sid_data));
+
+  // Normal Encode, 100 msec, SID data should be generated all the time.
+  for (int i = 0; i < 10; i++) {
+    EXPECT_EQ(kCNGNumParamsNormal + 1, cng_encoder.Encode(
+        rtc::ArrayView<const int16_t>(speech_data_, 160), kNoSid, &sid_data));
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.cc b/modules/audio_coding/codecs/cng/webrtc_cng.cc
new file mode 100644
index 0000000..bd17a61
--- /dev/null
+++ b/modules/audio_coding/codecs/cng/webrtc_cng.cc
@@ -0,0 +1,444 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+
+#include <algorithm>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kCngMaxOutsizeOrder = 640;
+
+// TODO(ossu): Rename the left-over WebRtcCng according to style guide.
+void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a);
+
+const int32_t WebRtcCng_kDbov[94] = {
+  1081109975, 858756178, 682134279, 541838517, 430397633, 341876992,
+  271562548,  215709799, 171344384, 136103682, 108110997, 85875618,
+  68213428,   54183852,  43039763,  34187699,  27156255,  21570980,
+  17134438,   13610368,  10811100,  8587562,   6821343,   5418385,
+  4303976,    3418770,   2715625,   2157098,   1713444,   1361037,
+  1081110,    858756,    682134,    541839,    430398,    341877,
+  271563,     215710,    171344,    136104,    108111,    85876,
+  68213,      54184,     43040,     34188,     27156,     21571,
+  17134,      13610,     10811,     8588,      6821,      5418,
+  4304,       3419,      2716,      2157,      1713,      1361,
+  1081,       859,       682,       542,       430,       342,
+  272,        216,       171,       136,       108,       86,
+  68,         54,        43,        34,        27,        22,
+  17,         14,        11,        9,         7,         5,
+  4,          3,         3,         2,         2,         1,
+  1,          1,         1,         1
+};
+
+const int16_t WebRtcCng_kCorrWindow[WEBRTC_CNG_MAX_LPC_ORDER] = {
+  32702, 32636, 32570, 32505, 32439, 32374,
+  32309, 32244, 32179, 32114, 32049, 31985
+};
+
+}  // namespace
+
+ComfortNoiseDecoder::ComfortNoiseDecoder() {
+  /* Needed to get the right function pointers in SPLIB. */
+  WebRtcSpl_Init();
+  Reset();
+}
+
+void ComfortNoiseDecoder::Reset() {
+  dec_seed_ = 7777;  /* For debugging only. */
+  dec_target_energy_ = 0;
+  dec_used_energy_ = 0;
+  for (auto& c : dec_target_reflCoefs_)
+    c = 0;
+  for (auto& c : dec_used_reflCoefs_)
+    c = 0;
+  for (auto& c : dec_filtstate_)
+    c = 0;
+  for (auto& c : dec_filtstateLow_)
+    c = 0;
+  dec_order_ = 5;
+  dec_target_scale_factor_ = 0;
+  dec_used_scale_factor_ = 0;
+}
+
+void ComfortNoiseDecoder::UpdateSid(rtc::ArrayView<const uint8_t> sid) {
+  int16_t refCs[WEBRTC_CNG_MAX_LPC_ORDER];
+  int32_t targetEnergy;
+  size_t length = sid.size();
+  /* Throw away reflection coefficients of higher order than we can handle. */
+  if (length > (WEBRTC_CNG_MAX_LPC_ORDER + 1))
+    length = WEBRTC_CNG_MAX_LPC_ORDER + 1;
+
+  dec_order_ = static_cast<uint16_t>(length - 1);
+
+  uint8_t sid0 = std::min<uint8_t>(sid[0], 93);
+  targetEnergy = WebRtcCng_kDbov[sid0];
+  /* Take down target energy to 75%. */
+  targetEnergy = targetEnergy >> 1;
+  targetEnergy += targetEnergy >> 2;
+
+  dec_target_energy_ = targetEnergy;
+
+  /* Reconstruct coeffs with tweak for WebRtc implementation of RFC3389. */
+  if (dec_order_ == WEBRTC_CNG_MAX_LPC_ORDER) {
+    for (size_t i = 0; i < (dec_order_); i++) {
+      refCs[i] = sid[i + 1] << 8; /* Q7 to Q15*/
+      dec_target_reflCoefs_[i] = refCs[i];
+    }
+  } else {
+    for (size_t i = 0; i < (dec_order_); i++) {
+      refCs[i] = (sid[i + 1] - 127) * (1 << 8); /* Q7 to Q15. */
+      dec_target_reflCoefs_[i] = refCs[i];
+    }
+  }
+
+  for (size_t i = (dec_order_); i < WEBRTC_CNG_MAX_LPC_ORDER; i++) {
+    refCs[i] = 0;
+    dec_target_reflCoefs_[i] = refCs[i];
+  }
+}
+
+bool ComfortNoiseDecoder::Generate(rtc::ArrayView<int16_t> out_data,
+                                   bool new_period) {
+  int16_t excitation[kCngMaxOutsizeOrder];
+  int16_t low[kCngMaxOutsizeOrder];
+  int16_t lpPoly[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int16_t ReflBetaStd = 26214;  /* 0.8 in q15. */
+  int16_t ReflBetaCompStd = 6553;  /* 0.2 in q15. */
+  int16_t ReflBetaNewP = 19661;  /* 0.6 in q15. */
+  int16_t ReflBetaCompNewP = 13107;  /* 0.4 in q15. */
+  int16_t Beta, BetaC;  /* These are in Q15. */
+  int32_t targetEnergy;
+  int16_t En;
+  int16_t temp16;
+  const size_t num_samples = out_data.size();
+
+  if (num_samples > kCngMaxOutsizeOrder) {
+    return false;
+  }
+
+  if (new_period) {
+    dec_used_scale_factor_ = dec_target_scale_factor_;
+    Beta = ReflBetaNewP;
+    BetaC = ReflBetaCompNewP;
+  } else {
+    Beta = ReflBetaStd;
+    BetaC = ReflBetaCompStd;
+  }
+
+  /* Calculate new scale factor in Q13 */
+  dec_used_scale_factor_ =
+      rtc::checked_cast<int16_t>(
+          WEBRTC_SPL_MUL_16_16_RSFT(dec_used_scale_factor_, Beta >> 2, 13) +
+          WEBRTC_SPL_MUL_16_16_RSFT(dec_target_scale_factor_, BetaC >> 2, 13));
+
+  dec_used_energy_  = dec_used_energy_ >> 1;
+  dec_used_energy_ += dec_target_energy_ >> 1;
+
+  /* Do the same for the reflection coeffs, albeit in Q15. */
+  for (size_t i = 0; i < WEBRTC_CNG_MAX_LPC_ORDER; i++) {
+    dec_used_reflCoefs_[i] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
+        dec_used_reflCoefs_[i], Beta, 15);
+    dec_used_reflCoefs_[i] += (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
+        dec_target_reflCoefs_[i], BetaC, 15);
+  }
+
+  /* Compute the polynomial coefficients. */
+  WebRtcCng_K2a16(dec_used_reflCoefs_, WEBRTC_CNG_MAX_LPC_ORDER, lpPoly);
+
+
+  targetEnergy = dec_used_energy_;
+
+  /* Calculate scaling factor based on filter energy. */
+  En = 8192;  /* 1.0 in Q13. */
+  for (size_t i = 0; i < (WEBRTC_CNG_MAX_LPC_ORDER); i++) {
+    /* Floating point value for reference.
+       E *= 1.0 - (dec_used_reflCoefs_[i] / 32768.0) *
+       (dec_used_reflCoefs_[i] / 32768.0);
+     */
+
+    /* Same in fixed point. */
+    /* K(i).^2 in Q15. */
+    temp16 = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
+        dec_used_reflCoefs_[i], dec_used_reflCoefs_[i], 15);
+    /* 1 - K(i).^2 in Q15. */
+    temp16 = 0x7fff - temp16;
+    En = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(En, temp16, 15);
+  }
+
+  /* float scaling= sqrt(E * dec_target_energy_ / (1 << 24)); */
+
+  /* Calculate sqrt(En * target_energy / excitation energy) */
+  targetEnergy = WebRtcSpl_Sqrt(dec_used_energy_);
+
+  En = (int16_t) WebRtcSpl_Sqrt(En) << 6;
+  En = (En * 3) >> 1;  /* 1.5 estimates sqrt(2). */
+  dec_used_scale_factor_ = (int16_t)((En * targetEnergy) >> 12);
+
+  /* Generate excitation. */
+  /* Excitation energy per sample is 2.^24 - Q13 N(0,1). */
+  for (size_t i = 0; i < num_samples; i++) {
+    excitation[i] = WebRtcSpl_RandN(&dec_seed_) >> 1;
+  }
+
+  /* Scale to correct energy. */
+  WebRtcSpl_ScaleVector(excitation, excitation, dec_used_scale_factor_,
+                        num_samples, 13);
+
+  /* |lpPoly| - Coefficients in Q12.
+   * |excitation| - Speech samples.
+   * |nst->dec_filtstate| - State preservation.
+   * |out_data| - Filtered speech samples. */
+  WebRtcSpl_FilterAR(lpPoly, WEBRTC_CNG_MAX_LPC_ORDER + 1, excitation,
+                     num_samples, dec_filtstate_, WEBRTC_CNG_MAX_LPC_ORDER,
+                     dec_filtstateLow_, WEBRTC_CNG_MAX_LPC_ORDER,
+                     out_data.data(), low, num_samples);
+
+  return true;
+}
+
+ComfortNoiseEncoder::ComfortNoiseEncoder(int fs, int interval, int quality)
+    : enc_nrOfCoefs_(quality),
+      enc_sampfreq_(fs),
+      enc_interval_(interval),
+      enc_msSinceSid_(0),
+      enc_Energy_(0),
+      enc_reflCoefs_{0},
+      enc_corrVector_{0},
+      enc_seed_(7777)  /* For debugging only. */ {
+  RTC_CHECK_GT(quality, 0);
+  RTC_CHECK_LE(quality, WEBRTC_CNG_MAX_LPC_ORDER);
+  /* Needed to get the right function pointers in SPLIB. */
+  WebRtcSpl_Init();
+}
+
+void ComfortNoiseEncoder::Reset(int fs, int interval, int quality) {
+  RTC_CHECK_GT(quality, 0);
+  RTC_CHECK_LE(quality, WEBRTC_CNG_MAX_LPC_ORDER);
+  enc_nrOfCoefs_ = quality;
+  enc_sampfreq_ = fs;
+  enc_interval_ = interval;
+  enc_msSinceSid_ = 0;
+  enc_Energy_ = 0;
+  for (auto& c : enc_reflCoefs_)
+    c = 0;
+  for (auto& c : enc_corrVector_)
+    c = 0;
+  enc_seed_ = 7777;  /* For debugging only. */
+}
+
+size_t ComfortNoiseEncoder::Encode(rtc::ArrayView<const int16_t> speech,
+                                   bool force_sid,
+                                   rtc::Buffer* output) {
+  int16_t arCoefs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int32_t corrVector[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int16_t refCs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int16_t hanningW[kCngMaxOutsizeOrder];
+  int16_t ReflBeta = 19661;     /* 0.6 in q15. */
+  int16_t ReflBetaComp = 13107; /* 0.4 in q15. */
+  int32_t outEnergy;
+  int outShifts;
+  size_t i;
+  int stab;
+  int acorrScale;
+  size_t index;
+  size_t ind, factor;
+  int32_t* bptr;
+  int32_t blo, bhi;
+  int16_t negate;
+  const int16_t* aptr;
+  int16_t speechBuf[kCngMaxOutsizeOrder];
+
+  const size_t num_samples = speech.size();
+  RTC_CHECK_LE(num_samples, kCngMaxOutsizeOrder);
+
+  for (i = 0; i < num_samples; i++) {
+    speechBuf[i] = speech[i];
+  }
+
+  factor = num_samples;
+
+  /* Calculate energy and a coefficients. */
+  outEnergy = WebRtcSpl_Energy(speechBuf, num_samples, &outShifts);
+  while (outShifts > 0) {
+    /* We can only do 5 shifts without destroying accuracy in
+     * division factor. */
+    if (outShifts > 5) {
+      outEnergy <<= (outShifts - 5);
+      outShifts = 5;
+    } else {
+      factor /= 2;
+      outShifts--;
+    }
+  }
+  outEnergy = WebRtcSpl_DivW32W16(outEnergy, (int16_t)factor);
+
+  if (outEnergy > 1) {
+    /* Create Hanning Window. */
+    WebRtcSpl_GetHanningWindow(hanningW, num_samples / 2);
+    for (i = 0; i < (num_samples / 2); i++)
+      hanningW[num_samples - i - 1] = hanningW[i];
+
+    WebRtcSpl_ElementwiseVectorMult(speechBuf, hanningW, speechBuf, num_samples,
+                                    14);
+
+    WebRtcSpl_AutoCorrelation(speechBuf, num_samples, enc_nrOfCoefs_,
+                              corrVector, &acorrScale);
+
+    if (*corrVector == 0)
+      *corrVector = WEBRTC_SPL_WORD16_MAX;
+
+    /* Adds the bandwidth expansion. */
+    aptr = WebRtcCng_kCorrWindow;
+    bptr = corrVector;
+
+    /* (zzz) lpc16_1 = 17+1+820+2+2 = 842 (ordo2=700). */
+    for (ind = 0; ind < enc_nrOfCoefs_; ind++) {
+      /* The below code multiplies the 16 b corrWindow values (Q15) with
+       * the 32 b corrvector (Q0) and shifts the result down 15 steps. */
+      negate = *bptr < 0;
+      if (negate)
+        *bptr = -*bptr;
+
+      blo = (int32_t) * aptr * (*bptr & 0xffff);
+      bhi = ((blo >> 16) & 0xffff)
+          + ((int32_t)(*aptr++) * ((*bptr >> 16) & 0xffff));
+      blo = (blo & 0xffff) | ((bhi & 0xffff) << 16);
+
+      *bptr = (((bhi >> 16) & 0x7fff) << 17) | ((uint32_t) blo >> 15);
+      if (negate)
+        *bptr = -*bptr;
+      bptr++;
+    }
+    /* End of bandwidth expansion. */
+
+    stab = WebRtcSpl_LevinsonDurbin(corrVector, arCoefs, refCs,
+                                    enc_nrOfCoefs_);
+
+    if (!stab) {
+      /* Disregard from this frame */
+      return 0;
+    }
+
+  } else {
+    for (i = 0; i < enc_nrOfCoefs_; i++)
+      refCs[i] = 0;
+  }
+
+  if (force_sid) {
+    /* Read instantaneous values instead of averaged. */
+    for (i = 0; i < enc_nrOfCoefs_; i++)
+      enc_reflCoefs_[i] = refCs[i];
+    enc_Energy_ = outEnergy;
+  } else {
+    /* Average history with new values. */
+    for (i = 0; i < enc_nrOfCoefs_; i++) {
+      enc_reflCoefs_[i] = (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(
+          enc_reflCoefs_[i], ReflBeta, 15);
+      enc_reflCoefs_[i] +=
+          (int16_t) WEBRTC_SPL_MUL_16_16_RSFT(refCs[i], ReflBetaComp, 15);
+    }
+    enc_Energy_ =
+        (outEnergy >> 2) + (enc_Energy_ >> 1) + (enc_Energy_ >> 2);
+  }
+
+  if (enc_Energy_ < 1) {
+    enc_Energy_ = 1;
+  }
+
+  if ((enc_msSinceSid_ > (enc_interval_ - 1)) || force_sid) {
+    /* Search for best dbov value. */
+    index = 0;
+    for (i = 1; i < 93; i++) {
+      /* Always round downwards. */
+      if ((enc_Energy_ - WebRtcCng_kDbov[i]) > 0) {
+        index = i;
+        break;
+      }
+    }
+    if ((i == 93) && (index == 0))
+      index = 94;
+
+    const size_t output_coefs = enc_nrOfCoefs_ + 1;
+    output->AppendData(output_coefs, [&] (rtc::ArrayView<uint8_t> output) {
+        output[0] = (uint8_t)index;
+
+        /* Quantize coefficients with tweak for WebRtc implementation of
+         * RFC3389. */
+        if (enc_nrOfCoefs_ == WEBRTC_CNG_MAX_LPC_ORDER) {
+          for (i = 0; i < enc_nrOfCoefs_; i++) {
+            /* Q15 to Q7 with rounding. */
+            output[i + 1] = ((enc_reflCoefs_[i] + 128) >> 8);
+          }
+        } else {
+          for (i = 0; i < enc_nrOfCoefs_; i++) {
+            /* Q15 to Q7 with rounding. */
+            output[i + 1] = (127 + ((enc_reflCoefs_[i] + 128) >> 8));
+          }
+        }
+
+        return output_coefs;
+      });
+
+    enc_msSinceSid_ =
+        static_cast<int16_t>((1000 * num_samples) / enc_sampfreq_);
+    return output_coefs;
+  } else {
+    enc_msSinceSid_ +=
+        static_cast<int16_t>((1000 * num_samples) / enc_sampfreq_);
+    return 0;
+  }
+}
+
+namespace {
+/* Values in |k| are Q15, and |a| Q12. */
+void WebRtcCng_K2a16(int16_t* k, int useOrder, int16_t* a) {
+  int16_t any[WEBRTC_SPL_MAX_LPC_ORDER + 1];
+  int16_t* aptr;
+  int16_t* aptr2;
+  int16_t* anyptr;
+  const int16_t* kptr;
+  int m, i;
+
+  kptr = k;
+  *a = 4096; /* i.e., (Word16_MAX >> 3) + 1 */
+  *any = *a;
+  a[1] = (*k + 4) >> 3;
+  for (m = 1; m < useOrder; m++) {
+    kptr++;
+    aptr = a;
+    aptr++;
+    aptr2 = &a[m];
+    anyptr = any;
+    anyptr++;
+
+    any[m + 1] = (*kptr + 4) >> 3;
+    for (i = 0; i < m; i++) {
+      *anyptr++ =
+          (*aptr++) +
+          (int16_t)((((int32_t)(*aptr2--) * (int32_t)*kptr) + 16384) >> 15);
+    }
+
+    aptr = a;
+    anyptr = any;
+    for (i = 0; i < (m + 2); i++) {
+      *aptr++ = *anyptr++;
+    }
+  }
+}
+
+}  // namespace
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.h b/modules/audio_coding/codecs/cng/webrtc_cng.h
new file mode 100644
index 0000000..5e21b8f
--- /dev/null
+++ b/modules/audio_coding/codecs/cng/webrtc_cng.h
@@ -0,0 +1,99 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
+#define MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
+
+#include <cstddef>
+
+#include "api/array_view.h"
+#include "rtc_base/buffer.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#define WEBRTC_CNG_MAX_LPC_ORDER 12
+
+namespace webrtc {
+
+class ComfortNoiseDecoder {
+ public:
+  ComfortNoiseDecoder();
+  ~ComfortNoiseDecoder() = default;
+
+  ComfortNoiseDecoder(const ComfortNoiseDecoder&) = delete;
+  ComfortNoiseDecoder& operator=(const ComfortNoiseDecoder&) = delete;
+
+  void Reset();
+
+  // Updates the CN state when a new SID packet arrives.
+  // |sid| is a view of the SID packet without the headers.
+  void UpdateSid(rtc::ArrayView<const uint8_t> sid);
+
+  // Generates comfort noise.
+  // |out_data| will be filled with samples - its size determines the number of
+  // samples generated. When |new_period| is true, CNG history will be reset
+  // before any audio is generated.  Returns |false| if outData is too large -
+  // currently 640 bytes (equalling 10ms at 64kHz).
+  // TODO(ossu): Specify better limits for the size of out_data. Either let it
+  //             be unbounded or limit to 10ms in the current sample rate.
+  bool Generate(rtc::ArrayView<int16_t> out_data, bool new_period);
+
+ private:
+  uint32_t dec_seed_;
+  int32_t dec_target_energy_;
+  int32_t dec_used_energy_;
+  int16_t dec_target_reflCoefs_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int16_t dec_used_reflCoefs_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int16_t dec_filtstate_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int16_t dec_filtstateLow_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  uint16_t dec_order_;
+  int16_t dec_target_scale_factor_;  /* Q29 */
+  int16_t dec_used_scale_factor_;  /* Q29 */
+};
+
+class ComfortNoiseEncoder {
+ public:
+  // Creates a comfort noise encoder.
+  // |fs| selects sample rate: 8000 for narrowband or 16000 for wideband.
+  // |interval| sets the interval at which to generate SID data (in ms).
+  // |quality| selects the number of refl. coeffs. Maximum allowed is 12.
+  ComfortNoiseEncoder(int fs, int interval, int quality);
+  ~ComfortNoiseEncoder() = default;
+
+  ComfortNoiseEncoder(const ComfortNoiseEncoder&) = delete;
+  ComfortNoiseEncoder& operator=(const ComfortNoiseEncoder&) = delete;
+
+  // Resets the comfort noise encoder to its initial state.
+  // Parameters are set as during construction.
+  void Reset(int fs, int interval, int quality);
+
+  // Analyzes background noise from |speech| and appends coefficients to
+  // |output|.  Returns the number of coefficients generated.  If |force_sid| is
+  // true, a SID frame is forced and the internal sid interval counter is reset.
+  // Will fail if the input size is too large (> 640 samples, see
+  // ComfortNoiseDecoder::Generate).
+  size_t Encode(rtc::ArrayView<const int16_t> speech,
+                bool force_sid,
+                rtc::Buffer* output);
+
+ private:
+  size_t enc_nrOfCoefs_;
+  int enc_sampfreq_;
+  int16_t enc_interval_;
+  int16_t enc_msSinceSid_;
+  int32_t enc_Energy_;
+  int16_t enc_reflCoefs_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  int32_t enc_corrVector_[WEBRTC_CNG_MAX_LPC_ORDER + 1];
+  uint32_t enc_seed_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
diff --git a/modules/audio_coding/codecs/g711/OWNERS b/modules/audio_coding/codecs/g711/OWNERS
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/OWNERS
diff --git a/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc b/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
new file mode 100644
index 0000000..a620a3e
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+
+namespace webrtc {
+
+void AudioDecoderPcmU::Reset() {}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderPcmU::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  return LegacyEncodedAudioFrame::SplitBySamples(
+      this, std::move(payload), timestamp, 8 * num_channels_, 8);
+}
+
+int AudioDecoderPcmU::SampleRateHz() const {
+  return 8000;
+}
+
+size_t AudioDecoderPcmU::Channels() const {
+  return num_channels_;
+}
+
+int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded,
+                                     size_t encoded_len,
+                                     int sample_rate_hz,
+                                     int16_t* decoded,
+                                     SpeechType* speech_type) {
+  RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+  int16_t temp_type = 1;  // Default is speech.
+  size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type);
+  *speech_type = ConvertSpeechType(temp_type);
+  return static_cast<int>(ret);
+}
+
+int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
+                                     size_t encoded_len) const {
+  // One encoded byte per sample per channel.
+  return static_cast<int>(encoded_len / Channels());
+}
+
+void AudioDecoderPcmA::Reset() {}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderPcmA::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  return LegacyEncodedAudioFrame::SplitBySamples(
+      this, std::move(payload), timestamp, 8 * num_channels_, 8);
+}
+
+int AudioDecoderPcmA::SampleRateHz() const {
+  return 8000;
+}
+
+size_t AudioDecoderPcmA::Channels() const {
+  return num_channels_;
+}
+
+int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded,
+                                     size_t encoded_len,
+                                     int sample_rate_hz,
+                                     int16_t* decoded,
+                                     SpeechType* speech_type) {
+  RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+  int16_t temp_type = 1;  // Default is speech.
+  size_t ret = WebRtcG711_DecodeA(encoded, encoded_len, decoded, &temp_type);
+  *speech_type = ConvertSpeechType(temp_type);
+  return static_cast<int>(ret);
+}
+
+int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
+                                     size_t encoded_len) const {
+  // One encoded byte per sample per channel.
+  return static_cast<int>(encoded_len / Channels());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/g711/audio_decoder_pcm.h b/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
new file mode 100644
index 0000000..29e4fa6
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
+#define MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class AudioDecoderPcmU final : public AudioDecoder {
+ public:
+  explicit AudioDecoderPcmU(size_t num_channels) : num_channels_(num_channels) {
+    RTC_DCHECK_GE(num_channels, 1);
+  }
+  void Reset() override;
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  const size_t num_channels_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmU);
+};
+
+class AudioDecoderPcmA final : public AudioDecoder {
+ public:
+  explicit AudioDecoderPcmA(size_t num_channels) : num_channels_(num_channels) {
+    RTC_DCHECK_GE(num_channels, 1);
+  }
+  void Reset() override;
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  const size_t num_channels_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmA);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
diff --git a/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
new file mode 100644
index 0000000..9fb94fd
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -0,0 +1,142 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+template <typename T>
+typename T::Config CreateConfig(const CodecInst& codec_inst) {
+  typename T::Config config;
+  config.frame_size_ms = codec_inst.pacsize / 8;
+  config.num_channels = codec_inst.channels;
+  config.payload_type = codec_inst.pltype;
+  return config;
+}
+
+}  // namespace
+
+bool AudioEncoderPcm::Config::IsOk() const {
+  return (frame_size_ms % 10 == 0) && (num_channels >= 1);
+}
+
+AudioEncoderPcm::AudioEncoderPcm(const Config& config, int sample_rate_hz)
+    : sample_rate_hz_(sample_rate_hz),
+      num_channels_(config.num_channels),
+      payload_type_(config.payload_type),
+      num_10ms_frames_per_packet_(
+          static_cast<size_t>(config.frame_size_ms / 10)),
+      full_frame_samples_(
+          config.num_channels * config.frame_size_ms * sample_rate_hz / 1000),
+      first_timestamp_in_buffer_(0) {
+  RTC_CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
+  RTC_CHECK_EQ(config.frame_size_ms % 10, 0)
+      << "Frame size must be an integer multiple of 10 ms.";
+  speech_buffer_.reserve(full_frame_samples_);
+}
+
+AudioEncoderPcm::~AudioEncoderPcm() = default;
+
+int AudioEncoderPcm::SampleRateHz() const {
+  return sample_rate_hz_;
+}
+
+size_t AudioEncoderPcm::NumChannels() const {
+  return num_channels_;
+}
+
+size_t AudioEncoderPcm::Num10MsFramesInNextPacket() const {
+  return num_10ms_frames_per_packet_;
+}
+
+size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
+  return num_10ms_frames_per_packet_;
+}
+
+int AudioEncoderPcm::GetTargetBitrate() const {
+  return static_cast<int>(
+      8 * BytesPerSample() * SampleRateHz() * NumChannels());
+}
+
+AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+  if (speech_buffer_.empty()) {
+    first_timestamp_in_buffer_ = rtp_timestamp;
+  }
+  speech_buffer_.insert(speech_buffer_.end(), audio.begin(), audio.end());
+  if (speech_buffer_.size() < full_frame_samples_) {
+    return EncodedInfo();
+  }
+  RTC_CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
+  EncodedInfo info;
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  info.encoded_bytes =
+      encoded->AppendData(full_frame_samples_ * BytesPerSample(),
+                          [&] (rtc::ArrayView<uint8_t> encoded) {
+                            return EncodeCall(&speech_buffer_[0],
+                                              full_frame_samples_,
+                                              encoded.data());
+                          });
+  speech_buffer_.clear();
+  info.encoder_type = GetCodecType();
+  return info;
+}
+
+void AudioEncoderPcm::Reset() {
+  speech_buffer_.clear();
+}
+
+AudioEncoderPcmA::AudioEncoderPcmA(const CodecInst& codec_inst)
+    : AudioEncoderPcmA(CreateConfig<AudioEncoderPcmA>(codec_inst)) {}
+
+size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
+                                    size_t input_len,
+                                    uint8_t* encoded) {
+  return WebRtcG711_EncodeA(audio, input_len, encoded);
+}
+
+size_t AudioEncoderPcmA::BytesPerSample() const {
+  return 1;
+}
+
+AudioEncoder::CodecType AudioEncoderPcmA::GetCodecType() const {
+  return AudioEncoder::CodecType::kPcmA;
+}
+
+AudioEncoderPcmU::AudioEncoderPcmU(const CodecInst& codec_inst)
+    : AudioEncoderPcmU(CreateConfig<AudioEncoderPcmU>(codec_inst)) {}
+
+size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
+                                    size_t input_len,
+                                    uint8_t* encoded) {
+  return WebRtcG711_EncodeU(audio, input_len, encoded);
+}
+
+size_t AudioEncoderPcmU::BytesPerSample() const {
+  return 1;
+}
+
+AudioEncoder::CodecType AudioEncoderPcmU::GetCodecType() const {
+  return AudioEncoder::CodecType::kPcmU;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/g711/audio_encoder_pcm.h b/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
new file mode 100644
index 0000000..37b67cf
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
+#define MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
+
+#include <vector>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class AudioEncoderPcm : public AudioEncoder {
+ public:
+  struct Config {
+   public:
+    bool IsOk() const;
+
+    int frame_size_ms;
+    size_t num_channels;
+    int payload_type;
+
+   protected:
+    explicit Config(int pt)
+        : frame_size_ms(20), num_channels(1), payload_type(pt) {}
+  };
+
+  ~AudioEncoderPcm() override;
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+  void Reset() override;
+
+ protected:
+  AudioEncoderPcm(const Config& config, int sample_rate_hz);
+
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+
+  virtual size_t EncodeCall(const int16_t* audio,
+                            size_t input_len,
+                            uint8_t* encoded) = 0;
+
+  virtual size_t BytesPerSample() const = 0;
+
+  // Used to set EncodedInfoLeaf::encoder_type in
+  // AudioEncoderPcm::EncodeImpl
+  virtual AudioEncoder::CodecType GetCodecType() const = 0;
+
+ private:
+  const int sample_rate_hz_;
+  const size_t num_channels_;
+  const int payload_type_;
+  const size_t num_10ms_frames_per_packet_;
+  const size_t full_frame_samples_;
+  std::vector<int16_t> speech_buffer_;
+  uint32_t first_timestamp_in_buffer_;
+};
+
+struct CodecInst;
+
+class AudioEncoderPcmA final : public AudioEncoderPcm {
+ public:
+  struct Config : public AudioEncoderPcm::Config {
+    Config() : AudioEncoderPcm::Config(8) {}
+  };
+
+  explicit AudioEncoderPcmA(const Config& config)
+      : AudioEncoderPcm(config, kSampleRateHz) {}
+  explicit AudioEncoderPcmA(const CodecInst& codec_inst);
+
+ protected:
+  size_t EncodeCall(const int16_t* audio,
+                    size_t input_len,
+                    uint8_t* encoded) override;
+
+  size_t BytesPerSample() const override;
+
+  AudioEncoder::CodecType GetCodecType() const override;
+
+ private:
+  static const int kSampleRateHz = 8000;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcmA);
+};
+
+class AudioEncoderPcmU final : public AudioEncoderPcm {
+ public:
+  struct Config : public AudioEncoderPcm::Config {
+    Config() : AudioEncoderPcm::Config(0) {}
+  };
+
+  explicit AudioEncoderPcmU(const Config& config)
+      : AudioEncoderPcm(config, kSampleRateHz) {}
+  explicit AudioEncoderPcmU(const CodecInst& codec_inst);
+
+ protected:
+  size_t EncodeCall(const int16_t* audio,
+                    size_t input_len,
+                    uint8_t* encoded) override;
+
+  size_t BytesPerSample() const override;
+
+  AudioEncoder::CodecType GetCodecType() const override;
+
+ private:
+  static const int kSampleRateHz = 8000;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcmU);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
diff --git a/modules/audio_coding/codecs/g711/g711.c b/modules/audio_coding/codecs/g711/g711.c
new file mode 100644
index 0000000..0c65764
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/g711.c
@@ -0,0 +1,73 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g711.c - A-law and u-law transcoding routines
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2006 Steve Underwood
+ *
+ *  Despite my general liking of the GPL, I place this code in the
+ *  public domain for the benefit of all mankind - even the slimy
+ *  ones who might try to proprietize my work and use it to my
+ *  detriment.
+ *
+ * $Id: g711.c,v 1.1 2006/06/07 15:46:39 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed unused include files
+ * -Changed to use WebRtc types
+ * -Added option to run encoder bitexact with ITU-T reference implementation
+ */
+
+#include "modules/audio_coding/codecs/g711/g711.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* Copied from the CCITT G.711 specification */
+static const uint8_t ulaw_to_alaw_table[256] = {
+   42,  43,  40,  41,  46,  47,  44,  45,  34,  35,  32,  33,  38,  39,  36,
+   37,  58,  59,  56,  57,  62,  63,  60,  61,  50,  51,  48,  49,  54,  55,
+   52,  53,  10,  11,   8,   9,  14,  15,  12,  13,   2,   3,   0,   1,   6,
+    7,   4,  26,  27,  24,  25,  30,  31,  28,  29,  18,  19,  16,  17,  22,
+   23,  20,  21, 106, 104, 105, 110, 111, 108, 109,  98,  99,  96,  97, 102,
+  103, 100, 101, 122, 120, 126, 127, 124, 125, 114, 115, 112, 113, 118, 119,
+  116, 117,  75,  73,  79,  77,  66,  67,  64,  65,  70,  71,  68,  69,  90,
+   91,  88,  89,  94,  95,  92,  93,  82,  82,  83,  83,  80,  80,  81,  81,
+   86,  86,  87,  87,  84,  84,  85,  85, 170, 171, 168, 169, 174, 175, 172,
+  173, 162, 163, 160, 161, 166, 167, 164, 165, 186, 187, 184, 185, 190, 191,
+  188, 189, 178, 179, 176, 177, 182, 183, 180, 181, 138, 139, 136, 137, 142,
+  143, 140, 141, 130, 131, 128, 129, 134, 135, 132, 154, 155, 152, 153, 158,
+  159, 156, 157, 146, 147, 144, 145, 150, 151, 148, 149, 234, 232, 233, 238,
+  239, 236, 237, 226, 227, 224, 225, 230, 231, 228, 229, 250, 248, 254, 255,
+  252, 253, 242, 243, 240, 241, 246, 247, 244, 245, 203, 201, 207, 205, 194,
+  195, 192, 193, 198, 199, 196, 197, 218, 219, 216, 217, 222, 223, 220, 221,
+  210, 210, 211, 211, 208, 208, 209, 209, 214, 214, 215, 215, 212, 212, 213,
+  213
+};
+
+/* These transcoding tables are copied from the CCITT G.711 specification. To
+   achieve optimal results, do not change them. */
+static const uint8_t alaw_to_ulaw_table[256] = {
+   42,  43,  40,  41,  46,  47,  44,  45,  34,  35,  32,  33,  38,  39,  36,
+   37,  57,  58,  55,  56,  61,  62,  59,  60,  49,  50,  47,  48,  53,  54,
+   51,  52,  10,  11,   8,   9,  14,  15,  12,  13,   2,   3,   0,   1,   6,
+    7,   4,   5,  26,  27,  24,  25,  30,  31,  28,  29,  18,  19,  16,  17,
+   22,  23,  20,  21,  98,  99,  96,  97, 102, 103, 100, 101,  93,  93,  92,
+   92,  95,  95,  94,  94, 116, 118, 112, 114, 124, 126, 120, 122, 106, 107,
+  104, 105, 110, 111, 108, 109,  72,  73,  70,  71,  76,  77,  74,  75,  64,
+   65,  63,  63,  68,  69,  66,  67,  86,  87,  84,  85,  90,  91,  88,  89,
+   79,  79,  78,  78,  82,  83,  80,  81, 170, 171, 168, 169, 174, 175, 172,
+  173, 162, 163, 160, 161, 166, 167, 164, 165, 185, 186, 183, 184, 189, 190,
+  187, 188, 177, 178, 175, 176, 181, 182, 179, 180, 138, 139, 136, 137, 142,
+  143, 140, 141, 130, 131, 128, 129, 134, 135, 132, 133, 154, 155, 152, 153,
+  158, 159, 156, 157, 146, 147, 144, 145, 150, 151, 148, 149, 226, 227, 224,
+  225, 230, 231, 228, 229, 221, 221, 220, 220, 223, 223, 222, 222, 244, 246,
+  240, 242, 252, 254, 248, 250, 234, 235, 232, 233, 238, 239, 236, 237, 200,
+  201, 198, 199, 204, 205, 202, 203, 192, 193, 191, 191, 196, 197, 194, 195,
+  214, 215, 212, 213, 218, 219, 216, 217, 207, 207, 206, 206, 210, 211, 208,
+  209
+};
+
+uint8_t alaw_to_ulaw(uint8_t alaw) { return alaw_to_ulaw_table[alaw]; }
+
+uint8_t ulaw_to_alaw(uint8_t ulaw) { return ulaw_to_alaw_table[ulaw]; }
diff --git a/modules/audio_coding/codecs/g711/g711.h b/modules/audio_coding/codecs/g711/g711.h
new file mode 100644
index 0000000..8b1fc81
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/g711.h
@@ -0,0 +1,344 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g711.h - In line A-law and u-law conversion routines
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2001 Steve Underwood
+ *
+ *  Despite my general liking of the GPL, I place this code in the
+ *  public domain for the benefit of all mankind - even the slimy
+ *  ones who might try to proprietize my work and use it to my
+ *  detriment.
+ *
+ * $Id: g711.h,v 1.1 2006/06/07 15:46:39 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Changed to use WebRtc types
+ * -Changed __inline__ to __inline
+ * -Two changes to make implementation bitexact with ITU-T reference implementation
+ */
+
+/*! \page g711_page A-law and mu-law handling
+Lookup tables for A-law and u-law look attractive, until you consider the impact
+on the CPU cache. If it causes a substantial area of your processor cache to get
+hit too often, cache sloshing will severely slow things down. The main reason
+these routines are slow in C, is the lack of direct access to the CPU's "find
+the first 1" instruction. A little in-line assembler fixes that, and the
+conversion routines can be faster than lookup tables, in most real world usage.
+A "find the first 1" instruction is available on most modern CPUs, and is a
+much underused feature.
+
+If an assembly language method of bit searching is not available, these routines
+revert to a method that can be a little slow, so the cache thrashing might not
+seem so bad :(
+
+Feel free to submit patches to add fast "find the first 1" support for your own
+favourite processor.
+
+Look up tables are used for transcoding between A-law and u-law, since it is
+difficult to achieve the precise transcoding procedure laid down in the G.711
+specification by other means.
+*/
+
+#if !defined(_G711_H_)
+#define _G711_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(__i386__)
+/*! \brief Find the bit position of the highest set bit in a word
+    \param bits The word to be searched
+    \return The bit number of the highest set bit, or -1 if the word is zero. */
+static __inline__ int top_bit(unsigned int bits) {
+  int res;
+
+  __asm__ __volatile__(" movl $-1,%%edx;\n"
+                       " bsrl %%eax,%%edx;\n"
+                       : "=d" (res)
+                       : "a" (bits));
+  return res;
+}
+
+/*! \brief Find the bit position of the lowest set bit in a word
+    \param bits The word to be searched
+    \return The bit number of the lowest set bit, or -1 if the word is zero. */
+static __inline__ int bottom_bit(unsigned int bits) {
+  int res;
+
+  __asm__ __volatile__(" movl $-1,%%edx;\n"
+                       " bsfl %%eax,%%edx;\n"
+                       : "=d" (res)
+                       : "a" (bits));
+  return res;
+}
+#elif defined(__x86_64__)
+static __inline__ int top_bit(unsigned int bits) {
+  int res;
+
+  __asm__ __volatile__(" movq $-1,%%rdx;\n"
+                       " bsrq %%rax,%%rdx;\n"
+                       : "=d" (res)
+                       : "a" (bits));
+  return res;
+}
+
+static __inline__ int bottom_bit(unsigned int bits) {
+  int res;
+
+  __asm__ __volatile__(" movq $-1,%%rdx;\n"
+                       " bsfq %%rax,%%rdx;\n"
+                       : "=d" (res)
+                       : "a" (bits));
+  return res;
+}
+#else
+static __inline int top_bit(unsigned int bits) {
+  int i;
+
+  if (bits == 0) {
+    return -1;
+  }
+  i = 0;
+  if (bits & 0xFFFF0000) {
+    bits &= 0xFFFF0000;
+    i += 16;
+  }
+  if (bits & 0xFF00FF00) {
+    bits &= 0xFF00FF00;
+    i += 8;
+  }
+  if (bits & 0xF0F0F0F0) {
+    bits &= 0xF0F0F0F0;
+    i += 4;
+  }
+  if (bits & 0xCCCCCCCC) {
+    bits &= 0xCCCCCCCC;
+    i += 2;
+  }
+  if (bits & 0xAAAAAAAA) {
+    bits &= 0xAAAAAAAA;
+    i += 1;
+  }
+  return i;
+}
+
+static __inline int bottom_bit(unsigned int bits) {
+  int i;
+
+  if (bits == 0) {
+    return -1;
+  }
+  i = 32;
+  if (bits & 0x0000FFFF) {
+    bits &= 0x0000FFFF;
+    i -= 16;
+  }
+  if (bits & 0x00FF00FF) {
+    bits &= 0x00FF00FF;
+    i -= 8;
+  }
+  if (bits & 0x0F0F0F0F) {
+    bits &= 0x0F0F0F0F;
+    i -= 4;
+  }
+  if (bits & 0x33333333) {
+    bits &= 0x33333333;
+    i -= 2;
+  }
+  if (bits & 0x55555555) {
+    bits &= 0x55555555;
+    i -= 1;
+  }
+  return i;
+}
+#endif
+
+/* N.B. It is tempting to use look-up tables for A-law and u-law conversion.
+ *      However, you should consider the cache footprint.
+ *
+ *      A 64K byte table for linear to x-law and a 512 byte table for x-law to
+ *      linear sound like peanuts these days, and shouldn't an array lookup be
+ *      real fast? No! When the cache sloshes as badly as this one will, a tight
+ *      calculation may be better. The messiest part is normally finding the
+ *      segment, but a little inline assembly can fix that on an i386, x86_64 and
+ *      many other modern processors.
+ */
+
+/*
+ * Mu-law is basically as follows:
+ *
+ *      Biased Linear Input Code        Compressed Code
+ *      ------------------------        ---------------
+ *      00000001wxyza                   000wxyz
+ *      0000001wxyzab                   001wxyz
+ *      000001wxyzabc                   010wxyz
+ *      00001wxyzabcd                   011wxyz
+ *      0001wxyzabcde                   100wxyz
+ *      001wxyzabcdef                   101wxyz
+ *      01wxyzabcdefg                   110wxyz
+ *      1wxyzabcdefgh                   111wxyz
+ *
+ * Each biased linear code has a leading 1 which identifies the segment
+ * number. The value of the segment number is equal to 7 minus the number
+ * of leading 0's. The quantization interval is directly available as the
+ * four bits wxyz.  * The trailing bits (a - h) are ignored.
+ *
+ * Ordinarily the complement of the resulting code word is used for
+ * transmission, and so the code word is complemented before it is returned.
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+
+//#define ULAW_ZEROTRAP                 /* turn on the trap as per the MIL-STD */
+#define ULAW_BIAS 0x84  /* Bias for linear code. */
+
+/*! \brief Encode a linear sample to u-law
+    \param linear The sample to encode.
+    \return The u-law value.
+*/
+static __inline uint8_t linear_to_ulaw(int linear) {
+  uint8_t u_val;
+  int mask;
+  int seg;
+
+  /* Get the sign and the magnitude of the value. */
+  if (linear < 0) {
+    /* WebRtc, tlegrand: -1 added to get bitexact to reference implementation */
+    linear = ULAW_BIAS - linear - 1;
+    mask = 0x7F;
+  } else {
+    linear = ULAW_BIAS + linear;
+    mask = 0xFF;
+  }
+
+  seg = top_bit(linear | 0xFF) - 7;
+
+  /*
+   * Combine the sign, segment, quantization bits,
+   * and complement the code word.
+   */
+  if (seg >= 8)
+    u_val = (uint8_t)(0x7F ^ mask);
+  else
+    u_val = (uint8_t)(((seg << 4) | ((linear >> (seg + 3)) & 0xF)) ^ mask);
+#ifdef ULAW_ZEROTRAP
+  /* Optional ITU trap */
+  if (u_val == 0)
+    u_val = 0x02;
+#endif
+  return u_val;
+}
+
+/*! \brief Decode an u-law sample to a linear value.
+    \param ulaw The u-law sample to decode.
+    \return The linear value.
+*/
+static __inline int16_t ulaw_to_linear(uint8_t ulaw) {
+  int t;
+
+  /* Complement to obtain normal u-law value. */
+  ulaw = ~ulaw;
+  /*
+   * Extract and bias the quantization bits. Then
+   * shift up by the segment number and subtract out the bias.
+   */
+  t = (((ulaw & 0x0F) << 3) + ULAW_BIAS) << (((int) ulaw & 0x70) >> 4);
+  return (int16_t)((ulaw & 0x80) ? (ULAW_BIAS - t) : (t - ULAW_BIAS));
+}
+
+/*
+ * A-law is basically as follows:
+ *
+ *      Linear Input Code        Compressed Code
+ *      -----------------        ---------------
+ *      0000000wxyza             000wxyz
+ *      0000001wxyza             001wxyz
+ *      000001wxyzab             010wxyz
+ *      00001wxyzabc             011wxyz
+ *      0001wxyzabcd             100wxyz
+ *      001wxyzabcde             101wxyz
+ *      01wxyzabcdef             110wxyz
+ *      1wxyzabcdefg             111wxyz
+ *
+ * For further information see John C. Bellamy's Digital Telephony, 1982,
+ * John Wiley & Sons, pps 98-111 and 472-476.
+ */
+
+#define ALAW_AMI_MASK 0x55
+
+/*! \brief Encode a linear sample to A-law
+    \param linear The sample to encode.
+    \return The A-law value.
+*/
+static __inline uint8_t linear_to_alaw(int linear) {
+  int mask;
+  int seg;
+
+  if (linear >= 0) {
+    /* Sign (bit 7) bit = 1 */
+    mask = ALAW_AMI_MASK | 0x80;
+  } else {
+    /* Sign (bit 7) bit = 0 */
+    mask = ALAW_AMI_MASK;
+    /* WebRtc, tlegrand: Changed from -8 to -1 to get bitexact to reference
+     * implementation */
+    linear = -linear - 1;
+  }
+
+  /* Convert the scaled magnitude to segment number. */
+  seg = top_bit(linear | 0xFF) - 7;
+  if (seg >= 8) {
+    if (linear >= 0) {
+      /* Out of range. Return maximum value. */
+      return (uint8_t)(0x7F ^ mask);
+    }
+    /* We must be just a tiny step below zero */
+    return (uint8_t)(0x00 ^ mask);
+  }
+  /* Combine the sign, segment, and quantization bits. */
+  return (uint8_t)(((seg << 4) | ((linear >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^
+                   mask);
+}
+
+/*! \brief Decode an A-law sample to a linear value.
+    \param alaw The A-law sample to decode.
+    \return The linear value.
+*/
+static __inline int16_t alaw_to_linear(uint8_t alaw) {
+  int i;
+  int seg;
+
+  alaw ^= ALAW_AMI_MASK;
+  i = ((alaw & 0x0F) << 4);
+  seg = (((int) alaw & 0x70) >> 4);
+  if (seg)
+    i = (i + 0x108) << (seg - 1);
+  else
+    i += 8;
+  return (int16_t)((alaw & 0x80) ? i : -i);
+}
+
+/*! \brief Transcode from A-law to u-law, using the procedure defined in G.711.
+    \param alaw The A-law sample to transcode.
+    \return The best matching u-law value.
+*/
+uint8_t alaw_to_ulaw(uint8_t alaw);
+
+/*! \brief Transcode from u-law to A-law, using the procedure defined in G.711.
+    \param alaw The u-law sample to transcode.
+    \return The best matching A-law value.
+*/
+uint8_t ulaw_to_alaw(uint8_t ulaw);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/modules/audio_coding/codecs/g711/g711_interface.c b/modules/audio_coding/codecs/g711/g711_interface.c
new file mode 100644
index 0000000..9c31cbc
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/g711_interface.c
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/g711/g711.h"
+#include "modules/audio_coding/codecs/g711/g711_interface.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded) {
+  size_t n;
+  for (n = 0; n < len; n++)
+    encoded[n] = linear_to_alaw(speechIn[n]);
+  return len;
+}
+
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded) {
+  size_t n;
+  for (n = 0; n < len; n++)
+    encoded[n] = linear_to_ulaw(speechIn[n]);
+  return len;
+}
+
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType) {
+  size_t n;
+  for (n = 0; n < len; n++)
+    decoded[n] = alaw_to_linear(encoded[n]);
+  *speechType = 1;
+  return len;
+}
+
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType) {
+  size_t n;
+  for (n = 0; n < len; n++)
+    decoded[n] = ulaw_to_linear(encoded[n]);
+  *speechType = 1;
+  return len;
+}
+
+int16_t WebRtcG711_Version(char* version, int16_t lenBytes) {
+  strncpy(version, "2.0.0", lenBytes);
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/g711/g711_interface.h b/modules/audio_coding/codecs/g711/g711_interface.h
new file mode 100644
index 0000000..1f23da6
--- /dev/null
+++ b/modules/audio_coding/codecs/g711/g711_interface.h
@@ -0,0 +1,135 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Comfort noise constants
+#define G711_WEBRTC_SPEECH 1
+#define G711_WEBRTC_CNG 2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcG711_EncodeA(...)
+ *
+ * This function encodes a G711 A-law frame and inserts it into a packet.
+ * Input speech length has be of any length.
+ *
+ * Input:
+ *      - speechIn           : Input speech vector
+ *      - len                : Samples in speechIn
+ *
+ * Output:
+ *      - encoded            : The encoded data vector
+ *
+ * Return value              : Length (in bytes) of coded data.
+ *                             Always equal to len input parameter.
+ */
+
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcG711_EncodeU(...)
+ *
+ * This function encodes a G711 U-law frame and inserts it into a packet.
+ * Input speech length has be of any length.
+ *
+ * Input:
+ *      - speechIn           : Input speech vector
+ *      - len                : Samples in speechIn
+ *
+ * Output:
+ *      - encoded            : The encoded data vector
+ *
+ * Return value              : Length (in bytes) of coded data.
+ *                             Always equal to len input parameter.
+ */
+
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+                          size_t len,
+                          uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcG711_DecodeA(...)
+ *
+ * This function decodes a packet G711 A-law frame.
+ *
+ * Input:
+ *      - encoded            : Encoded data
+ *      - len                : Bytes in encoded vector
+ *
+ * Output:
+ *      - decoded            : The decoded vector
+ *      - speechType         : 1 normal, 2 CNG (for G711 it should
+ *                             always return 1 since G711 does not have a
+ *                             built-in DTX/CNG scheme)
+ *
+ * Return value              : >0 - Samples in decoded vector
+ *                             -1 - Error
+ */
+
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType);
+
+/****************************************************************************
+ * WebRtcG711_DecodeU(...)
+ *
+ * This function decodes a packet G711 U-law frame.
+ *
+ * Input:
+ *      - encoded            : Encoded data
+ *      - len                : Bytes in encoded vector
+ *
+ * Output:
+ *      - decoded            : The decoded vector
+ *      - speechType         : 1 normal, 2 CNG (for G711 it should
+ *                             always return 1 since G711 does not have a
+ *                             built-in DTX/CNG scheme)
+ *
+ * Return value              : >0 - Samples in decoded vector
+ *                             -1 - Error
+ */
+
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+                          size_t len,
+                          int16_t* decoded,
+                          int16_t* speechType);
+
+/**********************************************************************
+* WebRtcG711_Version(...)
+*
+* This function gives the version string of the G.711 codec.
+*
+* Input:
+*      - lenBytes:     the size of Allocated space (in Bytes) where
+*                      the version number is written to (in string format).
+*
+* Output:
+*      - version:      Pointer to a buffer where the version number is
+*                      written to.
+*
+*/
+
+int16_t WebRtcG711_Version(char* version, int16_t lenBytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
diff --git a/modules/audio_coding/codecs/g722/audio_decoder_g722.cc b/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
new file mode 100644
index 0000000..ea4a721
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
@@ -0,0 +1,162 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioDecoderG722Impl::AudioDecoderG722Impl() {
+  WebRtcG722_CreateDecoder(&dec_state_);
+  WebRtcG722_DecoderInit(dec_state_);
+}
+
+AudioDecoderG722Impl::~AudioDecoderG722Impl() {
+  WebRtcG722_FreeDecoder(dec_state_);
+}
+
+bool AudioDecoderG722Impl::HasDecodePlc() const {
+  return false;
+}
+
+int AudioDecoderG722Impl::DecodeInternal(const uint8_t* encoded,
+                                         size_t encoded_len,
+                                         int sample_rate_hz,
+                                         int16_t* decoded,
+                                         SpeechType* speech_type) {
+  RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+  int16_t temp_type = 1;  // Default is speech.
+  size_t ret =
+      WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+  *speech_type = ConvertSpeechType(temp_type);
+  return static_cast<int>(ret);
+}
+
+void AudioDecoderG722Impl::Reset() {
+  WebRtcG722_DecoderInit(dec_state_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderG722Impl::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  return LegacyEncodedAudioFrame::SplitBySamples(this, std::move(payload),
+                                                 timestamp, 8, 16);
+}
+
+int AudioDecoderG722Impl::PacketDuration(const uint8_t* encoded,
+                                         size_t encoded_len) const {
+  // 1/2 encoded byte per sample per channel.
+  return static_cast<int>(2 * encoded_len / Channels());
+}
+
+int AudioDecoderG722Impl::SampleRateHz() const {
+  return 16000;
+}
+
+size_t AudioDecoderG722Impl::Channels() const {
+  return 1;
+}
+
+AudioDecoderG722StereoImpl::AudioDecoderG722StereoImpl() {
+  WebRtcG722_CreateDecoder(&dec_state_left_);
+  WebRtcG722_CreateDecoder(&dec_state_right_);
+  WebRtcG722_DecoderInit(dec_state_left_);
+  WebRtcG722_DecoderInit(dec_state_right_);
+}
+
+AudioDecoderG722StereoImpl::~AudioDecoderG722StereoImpl() {
+  WebRtcG722_FreeDecoder(dec_state_left_);
+  WebRtcG722_FreeDecoder(dec_state_right_);
+}
+
+int AudioDecoderG722StereoImpl::DecodeInternal(const uint8_t* encoded,
+                                               size_t encoded_len,
+                                               int sample_rate_hz,
+                                               int16_t* decoded,
+                                               SpeechType* speech_type) {
+  RTC_DCHECK_EQ(SampleRateHz(), sample_rate_hz);
+  int16_t temp_type = 1;  // Default is speech.
+  // De-interleave the bit-stream into two separate payloads.
+  uint8_t* encoded_deinterleaved = new uint8_t[encoded_len];
+  SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
+  // Decode left and right.
+  size_t decoded_len = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
+                                         encoded_len / 2, decoded, &temp_type);
+  size_t ret = WebRtcG722_Decode(
+      dec_state_right_, &encoded_deinterleaved[encoded_len / 2],
+      encoded_len / 2, &decoded[decoded_len], &temp_type);
+  if (ret == decoded_len) {
+    ret += decoded_len;  // Return total number of samples.
+    // Interleave output.
+    for (size_t k = ret / 2; k < ret; k++) {
+      int16_t temp = decoded[k];
+      memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
+              (ret - k - 1) * sizeof(int16_t));
+      decoded[2 * k - ret + 1] = temp;
+    }
+  }
+  *speech_type = ConvertSpeechType(temp_type);
+  delete[] encoded_deinterleaved;
+  return static_cast<int>(ret);
+}
+
+int AudioDecoderG722StereoImpl::SampleRateHz() const {
+  return 16000;
+}
+
+size_t AudioDecoderG722StereoImpl::Channels() const {
+  return 2;
+}
+
+void AudioDecoderG722StereoImpl::Reset() {
+  WebRtcG722_DecoderInit(dec_state_left_);
+  WebRtcG722_DecoderInit(dec_state_right_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderG722StereoImpl::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  return LegacyEncodedAudioFrame::SplitBySamples(this, std::move(payload),
+                                                 timestamp, 2 * 8, 16);
+}
+
+// Split the stereo packet and place left and right channel after each other
+// in the output array.
+void AudioDecoderG722StereoImpl::SplitStereoPacket(
+    const uint8_t* encoded,
+    size_t encoded_len,
+    uint8_t* encoded_deinterleaved) {
+  // Regroup the 4 bits/sample so |l1 l2| |r1 r2| |l3 l4| |r3 r4| ...,
+  // where "lx" is 4 bits representing left sample number x, and "rx" right
+  // sample. Two samples fit in one byte, represented with |...|.
+  for (size_t i = 0; i + 1 < encoded_len; i += 2) {
+    uint8_t right_byte = ((encoded[i] & 0x0F) << 4) + (encoded[i + 1] & 0x0F);
+    encoded_deinterleaved[i] = (encoded[i] & 0xF0) + (encoded[i + 1] >> 4);
+    encoded_deinterleaved[i + 1] = right_byte;
+  }
+
+  // Move one byte representing right channel each loop, and place it at the
+  // end of the bytestream vector. After looping the data is reordered to:
+  // |l1 l2| |l3 l4| ... |l(N-1) lN| |r1 r2| |r3 r4| ... |r(N-1) r(N)|,
+  // where N is the total number of samples.
+  for (size_t i = 0; i < encoded_len / 2; i++) {
+    uint8_t right_byte = encoded_deinterleaved[i + 1];
+    memmove(&encoded_deinterleaved[i + 1], &encoded_deinterleaved[i + 2],
+            encoded_len - i - 2);
+    encoded_deinterleaved[encoded_len - 1] = right_byte;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/g722/audio_decoder_g722.h b/modules/audio_coding/codecs/g722/audio_decoder_g722.h
new file mode 100644
index 0000000..3240448
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/audio_decoder_g722.h
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
+#define MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/constructormagic.h"
+
+typedef struct WebRtcG722DecInst G722DecInst;
+
+namespace webrtc {
+
+class AudioDecoderG722Impl final : public AudioDecoder {
+ public:
+  AudioDecoderG722Impl();
+  ~AudioDecoderG722Impl() override;
+  bool HasDecodePlc() const override;
+  void Reset() override;
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  G722DecInst* dec_state_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Impl);
+};
+
+class AudioDecoderG722StereoImpl final : public AudioDecoder {
+ public:
+  AudioDecoderG722StereoImpl();
+  ~AudioDecoderG722StereoImpl() override;
+  void Reset() override;
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  // Splits the stereo-interleaved payload in |encoded| into separate payloads
+  // for left and right channels. The separated payloads are written to
+  // |encoded_deinterleaved|, which must hold at least |encoded_len| samples.
+  // The left channel starts at offset 0, while the right channel starts at
+  // offset encoded_len / 2 into |encoded_deinterleaved|.
+  void SplitStereoPacket(const uint8_t* encoded,
+                         size_t encoded_len,
+                         uint8_t* encoded_deinterleaved);
+
+  G722DecInst* dec_state_left_;
+  G722DecInst* dec_state_right_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722StereoImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
diff --git a/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
new file mode 100644
index 0000000..ec97ee3
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -0,0 +1,162 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+
+#include <algorithm>
+
+#include <limits>
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kSampleRateHz = 16000;
+
+AudioEncoderG722Config CreateConfig(const CodecInst& codec_inst) {
+  AudioEncoderG722Config config;
+  config.num_channels = rtc::dchecked_cast<int>(codec_inst.channels);
+  config.frame_size_ms = codec_inst.pacsize / 16;
+  return config;
+}
+
+}  // namespace
+
+AudioEncoderG722Impl::AudioEncoderG722Impl(const AudioEncoderG722Config& config,
+                                           int payload_type)
+    : num_channels_(config.num_channels),
+      payload_type_(payload_type),
+      num_10ms_frames_per_packet_(
+          static_cast<size_t>(config.frame_size_ms / 10)),
+      num_10ms_frames_buffered_(0),
+      first_timestamp_in_buffer_(0),
+      encoders_(new EncoderState[num_channels_]),
+      interleave_buffer_(2 * num_channels_) {
+  RTC_CHECK(config.IsOk());
+  const size_t samples_per_channel =
+      kSampleRateHz / 100 * num_10ms_frames_per_packet_;
+  for (size_t i = 0; i < num_channels_; ++i) {
+    encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
+    encoders_[i].encoded_buffer.SetSize(samples_per_channel / 2);
+  }
+  Reset();
+}
+
+AudioEncoderG722Impl::AudioEncoderG722Impl(const CodecInst& codec_inst)
+    : AudioEncoderG722Impl(CreateConfig(codec_inst), codec_inst.pltype) {}
+
+AudioEncoderG722Impl::~AudioEncoderG722Impl() = default;
+
+int AudioEncoderG722Impl::SampleRateHz() const {
+  return kSampleRateHz;
+}
+
+size_t AudioEncoderG722Impl::NumChannels() const {
+  return num_channels_;
+}
+
+int AudioEncoderG722Impl::RtpTimestampRateHz() const {
+  // The RTP timestamp rate for G.722 is 8000 Hz, even though it is a 16 kHz
+  // codec.
+  return kSampleRateHz / 2;
+}
+
+size_t AudioEncoderG722Impl::Num10MsFramesInNextPacket() const {
+  return num_10ms_frames_per_packet_;
+}
+
+size_t AudioEncoderG722Impl::Max10MsFramesInAPacket() const {
+  return num_10ms_frames_per_packet_;
+}
+
+int AudioEncoderG722Impl::GetTargetBitrate() const {
+  // 4 bits/sample, 16000 samples/s/channel.
+  return static_cast<int>(64000 * NumChannels());
+}
+
+void AudioEncoderG722Impl::Reset() {
+  num_10ms_frames_buffered_ = 0;
+  for (size_t i = 0; i < num_channels_; ++i)
+    RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
+}
+
+AudioEncoder::EncodedInfo AudioEncoderG722Impl::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+  if (num_10ms_frames_buffered_ == 0)
+    first_timestamp_in_buffer_ = rtp_timestamp;
+
+  // Deinterleave samples and save them in each channel's buffer.
+  const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
+  for (size_t i = 0; i < kSampleRateHz / 100; ++i)
+    for (size_t j = 0; j < num_channels_; ++j)
+      encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
+
+  // If we don't yet have enough samples for a packet, we're done for now.
+  if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
+    return EncodedInfo();
+  }
+
+  // Encode each channel separately.
+  RTC_CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+  num_10ms_frames_buffered_ = 0;
+  const size_t samples_per_channel = SamplesPerChannel();
+  for (size_t i = 0; i < num_channels_; ++i) {
+    const size_t bytes_encoded = WebRtcG722_Encode(
+        encoders_[i].encoder, encoders_[i].speech_buffer.get(),
+        samples_per_channel, encoders_[i].encoded_buffer.data());
+    RTC_CHECK_EQ(bytes_encoded, samples_per_channel / 2);
+  }
+
+  const size_t bytes_to_encode = samples_per_channel / 2 * num_channels_;
+  EncodedInfo info;
+  info.encoded_bytes = encoded->AppendData(
+      bytes_to_encode, [&] (rtc::ArrayView<uint8_t> encoded) {
+        // Interleave the encoded bytes of the different channels. Each separate
+        // channel and the interleaved stream encodes two samples per byte, most
+        // significant half first.
+        for (size_t i = 0; i < samples_per_channel / 2; ++i) {
+          for (size_t j = 0; j < num_channels_; ++j) {
+            uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
+            interleave_buffer_.data()[j] = two_samples >> 4;
+            interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
+          }
+          for (size_t j = 0; j < num_channels_; ++j)
+            encoded[i * num_channels_ + j] =
+                interleave_buffer_.data()[2 * j] << 4 |
+                interleave_buffer_.data()[2 * j + 1];
+        }
+
+        return bytes_to_encode;
+      });
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  info.encoder_type = CodecType::kG722;
+  return info;
+}
+
+AudioEncoderG722Impl::EncoderState::EncoderState() {
+  RTC_CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
+}
+
+AudioEncoderG722Impl::EncoderState::~EncoderState() {
+  RTC_CHECK_EQ(0, WebRtcG722_FreeEncoder(encoder));
+}
+
+size_t AudioEncoderG722Impl::SamplesPerChannel() const {
+  return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/g722/audio_encoder_g722.h b/modules/audio_coding/codecs/g722/audio_encoder_g722.h
new file mode 100644
index 0000000..1f4b943
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/audio_encoder_g722.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
+#define MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
+
+#include <memory>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/g722/audio_encoder_g722_config.h"
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+struct CodecInst;
+
+class AudioEncoderG722Impl final : public AudioEncoder {
+ public:
+  AudioEncoderG722Impl(const AudioEncoderG722Config& config, int payload_type);
+  explicit AudioEncoderG722Impl(const CodecInst& codec_inst);
+  ~AudioEncoderG722Impl() override;
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  int RtpTimestampRateHz() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+  void Reset() override;
+
+ protected:
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+
+ private:
+  // The encoder state for one channel.
+  struct EncoderState {
+    G722EncInst* encoder;
+    std::unique_ptr<int16_t[]> speech_buffer;   // Queued up for encoding.
+    rtc::Buffer encoded_buffer;                 // Already encoded.
+    EncoderState();
+    ~EncoderState();
+  };
+
+  size_t SamplesPerChannel() const;
+
+  const size_t num_channels_;
+  const int payload_type_;
+  const size_t num_10ms_frames_per_packet_;
+  size_t num_10ms_frames_buffered_;
+  uint32_t first_timestamp_in_buffer_;
+  const std::unique_ptr<EncoderState[]> encoders_;
+  rtc::Buffer interleave_buffer_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderG722Impl);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
diff --git a/modules/audio_coding/codecs/g722/g722_decode.c b/modules/audio_coding/codecs/g722/g722_decode.c
new file mode 100644
index 0000000..06b3485
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/g722_decode.c
@@ -0,0 +1,400 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_decode.c - The ITU G.722 codec, decode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ *  Despite my general liking of the GPL, I place my own contributions
+ *  to this code in the public domain for the benefit of all mankind -
+ *  even the slimy ones who might try to proprietize my work and use it
+ *  to my detriment.
+ *
+ * Based in part on a single channel G.722 codec which is:
+ *
+ * Copyright (c) CMU 1993
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_decode.c,v 1.15 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Changed __inline__ to __inline
+ * -Added saturation check on output
+ */
+
+/*! \file */
+
+
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/g722/g722_enc_dec.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+    int16_t amp16;
+
+    /* Hopefully this is optimised for the common case - not clipping */
+    amp16 = (int16_t) amp;
+    if (amp == amp16)
+        return amp16;
+    if (amp > WEBRTC_INT16_MAX)
+        return  WEBRTC_INT16_MAX;
+    return  WEBRTC_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(G722DecoderState *s, int band, int d);
+
+static void block4(G722DecoderState *s, int band, int d)
+{
+    int wd1;
+    int wd2;
+    int wd3;
+    int i;
+
+    /* Block 4, RECONS */
+    s->band[band].d[0] = d;
+    s->band[band].r[0] = saturate(s->band[band].s + d);
+
+    /* Block 4, PARREC */
+    s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+    /* Block 4, UPPOL2 */
+    for (i = 0;  i < 3;  i++)
+        s->band[band].sg[i] = s->band[band].p[i] >> 15;
+    wd1 = saturate(s->band[band].a[1] * 4);
+
+    wd2 = (s->band[band].sg[0] == s->band[band].sg[1])  ?  -wd1  :  wd1;
+    if (wd2 > 32767)
+        wd2 = 32767;
+    wd3 = (s->band[band].sg[0] == s->band[band].sg[2])  ?  128  :  -128;
+    wd3 += (wd2 >> 7);
+    wd3 += (s->band[band].a[2]*32512) >> 15;
+    if (wd3 > 12288)
+        wd3 = 12288;
+    else if (wd3 < -12288)
+        wd3 = -12288;
+    s->band[band].ap[2] = wd3;
+
+    /* Block 4, UPPOL1 */
+    s->band[band].sg[0] = s->band[band].p[0] >> 15;
+    s->band[band].sg[1] = s->band[band].p[1] >> 15;
+    wd1 = (s->band[band].sg[0] == s->band[band].sg[1])  ?  192  :  -192;
+    wd2 = (s->band[band].a[1]*32640) >> 15;
+
+    s->band[band].ap[1] = saturate(wd1 + wd2);
+    wd3 = saturate(15360 - s->band[band].ap[2]);
+    if (s->band[band].ap[1] > wd3)
+        s->band[band].ap[1] = wd3;
+    else if (s->band[band].ap[1] < -wd3)
+        s->band[band].ap[1] = -wd3;
+
+    /* Block 4, UPZERO */
+    wd1 = (d == 0)  ?  0  :  128;
+    s->band[band].sg[0] = d >> 15;
+    for (i = 1;  i < 7;  i++)
+    {
+        s->band[band].sg[i] = s->band[band].d[i] >> 15;
+        wd2 = (s->band[band].sg[i] == s->band[band].sg[0])  ?  wd1  :  -wd1;
+        wd3 = (s->band[band].b[i]*32640) >> 15;
+        s->band[band].bp[i] = saturate(wd2 + wd3);
+    }
+
+    /* Block 4, DELAYA */
+    for (i = 6;  i > 0;  i--)
+    {
+        s->band[band].d[i] = s->band[band].d[i - 1];
+        s->band[band].b[i] = s->band[band].bp[i];
+    }
+
+    for (i = 2;  i > 0;  i--)
+    {
+        s->band[band].r[i] = s->band[band].r[i - 1];
+        s->band[band].p[i] = s->band[band].p[i - 1];
+        s->band[band].a[i] = s->band[band].ap[i];
+    }
+
+    /* Block 4, FILTEP */
+    wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+    wd1 = (s->band[band].a[1]*wd1) >> 15;
+    wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+    wd2 = (s->band[band].a[2]*wd2) >> 15;
+    s->band[band].sp = saturate(wd1 + wd2);
+
+    /* Block 4, FILTEZ */
+    s->band[band].sz = 0;
+    for (i = 6;  i > 0;  i--)
+    {
+        wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+        s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+    }
+    s->band[band].sz = saturate(s->band[band].sz);
+
+    /* Block 4, PREDIC */
+    s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
+                                          int rate,
+                                          int options) {
+    s = s ? s : malloc(sizeof(*s));
+    memset(s, 0, sizeof(*s));
+    if (rate == 48000)
+        s->bits_per_sample = 6;
+    else if (rate == 56000)
+        s->bits_per_sample = 7;
+    else
+        s->bits_per_sample = 8;
+    if ((options & G722_SAMPLE_RATE_8000))
+        s->eight_k = TRUE;
+    if ((options & G722_PACKED)  &&  s->bits_per_sample != 8)
+        s->packed = TRUE;
+    else
+        s->packed = FALSE;
+    s->band[0].det = 32;
+    s->band[1].det = 8;
+    return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int WebRtc_g722_decode_release(G722DecoderState *s)
+{
+    free(s);
+    return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+size_t WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
+                          const uint8_t g722_data[], size_t len)
+{
+    static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
+    static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1,
+                                 7, 6, 5, 4, 3,  2, 1, 0 };
+    static const int ilb[32] =
+    {
+        2048, 2093, 2139, 2186, 2233, 2282, 2332,
+        2383, 2435, 2489, 2543, 2599, 2656, 2714,
+        2774, 2834, 2896, 2960, 3025, 3091, 3158,
+        3228, 3298, 3371, 3444, 3520, 3597, 3676,
+        3756, 3838, 3922, 4008
+    };
+    static const int wh[3] = {0, -214, 798};
+    static const int rh2[4] = {2, 1, 2, 1};
+    static const int qm2[4] = {-7408, -1616,  7408,   1616};
+    static const int qm4[16] =
+    {
+              0, -20456, -12896,  -8968,
+          -6288,  -4240,  -2584,  -1200,
+          20456,  12896,   8968,   6288,
+           4240,   2584,   1200,      0
+    };
+    static const int qm5[32] =
+    {
+           -280,   -280, -23352, -17560,
+         -14120, -11664,  -9752,  -8184,
+          -6864,  -5712,  -4696,  -3784,
+          -2960,  -2208,  -1520,   -880,
+          23352,  17560,  14120,  11664,
+           9752,   8184,   6864,   5712,
+           4696,   3784,   2960,   2208,
+           1520,    880,    280,   -280
+    };
+    static const int qm6[64] =
+    {
+           -136,   -136,   -136,   -136,
+         -24808, -21904, -19008, -16704,
+         -14984, -13512, -12280, -11192,
+         -10232,  -9360,  -8576,  -7856,
+          -7192,  -6576,  -6000,  -5456,
+          -4944,  -4464,  -4008,  -3576,
+          -3168,  -2776,  -2400,  -2032,
+          -1688,  -1360,  -1040,   -728,
+          24808,  21904,  19008,  16704,
+          14984,  13512,  12280,  11192,
+          10232,   9360,   8576,   7856,
+           7192,   6576,   6000,   5456,
+           4944,   4464,   4008,   3576,
+           3168,   2776,   2400,   2032,
+           1688,   1360,   1040,    728,
+            432,    136,   -432,   -136
+    };
+    static const int qmf_coeffs[12] =
+    {
+           3,  -11,   12,   32, -210,  951, 3876, -805,  362, -156,   53,  -11,
+    };
+
+    int dlowt;
+    int rlow;
+    int ihigh;
+    int dhigh;
+    int rhigh;
+    int xout1;
+    int xout2;
+    int wd1;
+    int wd2;
+    int wd3;
+    int code;
+    size_t outlen;
+    int i;
+    size_t j;
+
+    outlen = 0;
+    rhigh = 0;
+    for (j = 0;  j < len;  )
+    {
+        if (s->packed)
+        {
+            /* Unpack the code bits */
+            if (s->in_bits < s->bits_per_sample)
+            {
+                s->in_buffer |= (g722_data[j++] << s->in_bits);
+                s->in_bits += 8;
+            }
+            code = s->in_buffer & ((1 << s->bits_per_sample) - 1);
+            s->in_buffer >>= s->bits_per_sample;
+            s->in_bits -= s->bits_per_sample;
+        }
+        else
+        {
+            code = g722_data[j++];
+        }
+
+        switch (s->bits_per_sample)
+        {
+        default:
+        case 8:
+            wd1 = code & 0x3F;
+            ihigh = (code >> 6) & 0x03;
+            wd2 = qm6[wd1];
+            wd1 >>= 2;
+            break;
+        case 7:
+            wd1 = code & 0x1F;
+            ihigh = (code >> 5) & 0x03;
+            wd2 = qm5[wd1];
+            wd1 >>= 1;
+            break;
+        case 6:
+            wd1 = code & 0x0F;
+            ihigh = (code >> 4) & 0x03;
+            wd2 = qm4[wd1];
+            break;
+        }
+        /* Block 5L, LOW BAND INVQBL */
+        wd2 = (s->band[0].det*wd2) >> 15;
+        /* Block 5L, RECONS */
+        rlow = s->band[0].s + wd2;
+        /* Block 6L, LIMIT */
+        if (rlow > 16383)
+            rlow = 16383;
+        else if (rlow < -16384)
+            rlow = -16384;
+
+        /* Block 2L, INVQAL */
+        wd2 = qm4[wd1];
+        dlowt = (s->band[0].det*wd2) >> 15;
+
+        /* Block 3L, LOGSCL */
+        wd2 = rl42[wd1];
+        wd1 = (s->band[0].nb*127) >> 7;
+        wd1 += wl[wd2];
+        if (wd1 < 0)
+            wd1 = 0;
+        else if (wd1 > 18432)
+            wd1 = 18432;
+        s->band[0].nb = wd1;
+
+        /* Block 3L, SCALEL */
+        wd1 = (s->band[0].nb >> 6) & 31;
+        wd2 = 8 - (s->band[0].nb >> 11);
+        wd3 = (wd2 < 0)  ?  (ilb[wd1] << -wd2)  :  (ilb[wd1] >> wd2);
+        s->band[0].det = wd3 << 2;
+
+        block4(s, 0, dlowt);
+
+        if (!s->eight_k)
+        {
+            /* Block 2H, INVQAH */
+            wd2 = qm2[ihigh];
+            dhigh = (s->band[1].det*wd2) >> 15;
+            /* Block 5H, RECONS */
+            rhigh = dhigh + s->band[1].s;
+            /* Block 6H, LIMIT */
+            if (rhigh > 16383)
+                rhigh = 16383;
+            else if (rhigh < -16384)
+                rhigh = -16384;
+
+            /* Block 2H, INVQAH */
+            wd2 = rh2[ihigh];
+            wd1 = (s->band[1].nb*127) >> 7;
+            wd1 += wh[wd2];
+            if (wd1 < 0)
+                wd1 = 0;
+            else if (wd1 > 22528)
+                wd1 = 22528;
+            s->band[1].nb = wd1;
+
+            /* Block 3H, SCALEH */
+            wd1 = (s->band[1].nb >> 6) & 31;
+            wd2 = 10 - (s->band[1].nb >> 11);
+            wd3 = (wd2 < 0)  ?  (ilb[wd1] << -wd2)  :  (ilb[wd1] >> wd2);
+            s->band[1].det = wd3 << 2;
+
+            block4(s, 1, dhigh);
+        }
+
+        if (s->itu_test_mode)
+        {
+            amp[outlen++] = (int16_t) (rlow << 1);
+            amp[outlen++] = (int16_t) (rhigh << 1);
+        }
+        else
+        {
+            if (s->eight_k)
+            {
+                amp[outlen++] = (int16_t) (rlow << 1);
+            }
+            else
+            {
+                /* Apply the receive QMF */
+                for (i = 0;  i < 22;  i++)
+                    s->x[i] = s->x[i + 2];
+                s->x[22] = rlow + rhigh;
+                s->x[23] = rlow - rhigh;
+
+                xout1 = 0;
+                xout2 = 0;
+                for (i = 0;  i < 12;  i++)
+                {
+                    xout2 += s->x[2*i]*qmf_coeffs[i];
+                    xout1 += s->x[2*i + 1]*qmf_coeffs[11 - i];
+                }
+                /* We shift by 12 to allow for the QMF filters (DC gain = 4096), less 1
+                   to allow for the 15 bit input to the G.722 algorithm. */
+                /* WebRtc, tlegrand: added saturation */
+                amp[outlen++] = saturate(xout1 >> 11);
+                amp[outlen++] = saturate(xout2 >> 11);
+            }
+        }
+    }
+    return outlen;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/modules/audio_coding/codecs/g722/g722_enc_dec.h b/modules/audio_coding/codecs/g722/g722_enc_dec.h
new file mode 100644
index 0000000..ccda09b
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/g722_enc_dec.h
@@ -0,0 +1,160 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722.h - The ITU G.722 codec.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ *  Despite my general liking of the GPL, I place my own contributions 
+ *  to this code in the public domain for the benefit of all mankind -
+ *  even the slimy ones who might try to proprietize my work and use it
+ *  to my detriment.
+ *
+ * Based on a single channel G.722 codec which is:
+ *
+ *****    Copyright (c) CMU    1993      *****
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722.h,v 1.10 2006/06/16 12:45:53 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Changed to use WebRtc types
+ * -Added new defines for minimum and maximum values of short int
+ */
+
+
+/*! \file */
+
+#if !defined(_G722_ENC_DEC_H_)
+#define _G722_ENC_DEC_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*! \page g722_page G.722 encoding and decoding
+\section g722_page_sec_1 What does it do?
+The G.722 module is a bit exact implementation of the ITU G.722 specification for all three
+specified bit rates - 64000bps, 56000bps and 48000bps. It passes the ITU tests.
+
+To allow fast and flexible interworking with narrow band telephony, the encoder and decoder
+support an option for the linear audio to be an 8k samples/second stream. In this mode the
+codec is considerably faster, and still fully compatible with wideband terminals using G.722.
+
+\section g722_page_sec_2 How does it work?
+???.
+*/
+
+#define WEBRTC_INT16_MAX 32767
+#define WEBRTC_INT16_MIN -32768
+
+enum
+{
+    G722_SAMPLE_RATE_8000 = 0x0001,
+    G722_PACKED = 0x0002
+};
+
+typedef struct
+{
+    /*! TRUE if the operating in the special ITU test mode, with the band split filters
+             disabled. */
+    int itu_test_mode;
+    /*! TRUE if the G.722 data is packed */
+    int packed;
+    /*! TRUE if encode from 8k samples/second */
+    int eight_k;
+    /*! 6 for 48000kbps, 7 for 56000kbps, or 8 for 64000kbps. */
+    int bits_per_sample;
+
+    /*! Signal history for the QMF */
+    int x[24];
+
+    struct
+    {
+        int s;
+        int sp;
+        int sz;
+        int r[3];
+        int a[3];
+        int ap[3];
+        int p[3];
+        int d[7];
+        int b[7];
+        int bp[7];
+        int sg[7];
+        int nb;
+        int det;
+    } band[2];
+
+    unsigned int in_buffer;
+    int in_bits;
+    unsigned int out_buffer;
+    int out_bits;
+} G722EncoderState;
+
+typedef struct
+{
+    /*! TRUE if the operating in the special ITU test mode, with the band split filters
+             disabled. */
+    int itu_test_mode;
+    /*! TRUE if the G.722 data is packed */
+    int packed;
+    /*! TRUE if decode to 8k samples/second */
+    int eight_k;
+    /*! 6 for 48000kbps, 7 for 56000kbps, or 8 for 64000kbps. */
+    int bits_per_sample;
+
+    /*! Signal history for the QMF */
+    int x[24];
+
+    struct
+    {
+        int s;
+        int sp;
+        int sz;
+        int r[3];
+        int a[3];
+        int ap[3];
+        int p[3];
+        int d[7];
+        int b[7];
+        int bp[7];
+        int sg[7];
+        int nb;
+        int det;
+    } band[2];
+    
+    unsigned int in_buffer;
+    int in_bits;
+    unsigned int out_buffer;
+    int out_bits;
+} G722DecoderState;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+G722EncoderState* WebRtc_g722_encode_init(G722EncoderState* s,
+                                          int rate,
+                                          int options);
+int WebRtc_g722_encode_release(G722EncoderState *s);
+size_t WebRtc_g722_encode(G722EncoderState *s,
+                          uint8_t g722_data[],
+                          const int16_t amp[],
+                          size_t len);
+
+G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
+                                          int rate,
+                                          int options);
+int WebRtc_g722_decode_release(G722DecoderState *s);
+size_t WebRtc_g722_decode(G722DecoderState *s,
+                          int16_t amp[],
+                          const uint8_t g722_data[],
+                          size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/modules/audio_coding/codecs/g722/g722_encode.c b/modules/audio_coding/codecs/g722/g722_encode.c
new file mode 100644
index 0000000..eeb7649
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/g722_encode.c
@@ -0,0 +1,430 @@
+/*
+ * SpanDSP - a series of DSP components for telephony
+ *
+ * g722_encode.c - The ITU G.722 codec, encode part.
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ *
+ * Copyright (C) 2005 Steve Underwood
+ *
+ * All rights reserved.
+ *
+ *  Despite my general liking of the GPL, I place my own contributions
+ *  to this code in the public domain for the benefit of all mankind -
+ *  even the slimy ones who might try to proprietize my work and use it
+ *  to my detriment.
+ *
+ * Based on a single channel 64kbps only G.722 codec which is:
+ *
+ *****    Copyright (c) CMU    1993      *****
+ * Computer Science, Speech Group
+ * Chengxiang Lu and Alex Hauptmann
+ *
+ * $Id: g722_encode.c,v 1.14 2006/07/07 16:37:49 steveu Exp $
+ *
+ * Modifications for WebRtc, 2011/04/28, by tlegrand:
+ * -Removed usage of inttypes.h and tgmath.h
+ * -Changed to use WebRtc types
+ * -Added option to run encoder bitexact with ITU-T reference implementation
+ */
+
+/*! \file */
+
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/g722/g722_enc_dec.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if !defined(FALSE)
+#define FALSE 0
+#endif
+#if !defined(TRUE)
+#define TRUE (!FALSE)
+#endif
+
+static __inline int16_t saturate(int32_t amp)
+{
+    int16_t amp16;
+
+    /* Hopefully this is optimised for the common case - not clipping */
+    amp16 = (int16_t) amp;
+    if (amp == amp16)
+        return amp16;
+    if (amp > WEBRTC_INT16_MAX)
+        return  WEBRTC_INT16_MAX;
+    return  WEBRTC_INT16_MIN;
+}
+/*- End of function --------------------------------------------------------*/
+
+static void block4(G722EncoderState *s, int band, int d)
+{
+    int wd1;
+    int wd2;
+    int wd3;
+    int i;
+
+    /* Block 4, RECONS */
+    s->band[band].d[0] = d;
+    s->band[band].r[0] = saturate(s->band[band].s + d);
+
+    /* Block 4, PARREC */
+    s->band[band].p[0] = saturate(s->band[band].sz + d);
+
+    /* Block 4, UPPOL2 */
+    for (i = 0;  i < 3;  i++)
+        s->band[band].sg[i] = s->band[band].p[i] >> 15;
+    wd1 = saturate(s->band[band].a[1] << 2);
+
+    wd2 = (s->band[band].sg[0] == s->band[band].sg[1])  ?  -wd1  :  wd1;
+    if (wd2 > 32767)
+        wd2 = 32767;
+    wd3 = (wd2 >> 7) + ((s->band[band].sg[0] == s->band[band].sg[2])  ?  128  :  -128);
+    wd3 += (s->band[band].a[2]*32512) >> 15;
+    if (wd3 > 12288)
+        wd3 = 12288;
+    else if (wd3 < -12288)
+        wd3 = -12288;
+    s->band[band].ap[2] = wd3;
+
+    /* Block 4, UPPOL1 */
+    s->band[band].sg[0] = s->band[band].p[0] >> 15;
+    s->band[band].sg[1] = s->band[band].p[1] >> 15;
+    wd1 = (s->band[band].sg[0] == s->band[band].sg[1])  ?  192  :  -192;
+    wd2 = (s->band[band].a[1]*32640) >> 15;
+
+    s->band[band].ap[1] = saturate(wd1 + wd2);
+    wd3 = saturate(15360 - s->band[band].ap[2]);
+    if (s->band[band].ap[1] > wd3)
+        s->band[band].ap[1] = wd3;
+    else if (s->band[band].ap[1] < -wd3)
+        s->band[band].ap[1] = -wd3;
+
+    /* Block 4, UPZERO */
+    wd1 = (d == 0)  ?  0  :  128;
+    s->band[band].sg[0] = d >> 15;
+    for (i = 1;  i < 7;  i++)
+    {
+        s->band[band].sg[i] = s->band[band].d[i] >> 15;
+        wd2 = (s->band[band].sg[i] == s->band[band].sg[0])  ?  wd1  :  -wd1;
+        wd3 = (s->band[band].b[i]*32640) >> 15;
+        s->band[band].bp[i] = saturate(wd2 + wd3);
+    }
+
+    /* Block 4, DELAYA */
+    for (i = 6;  i > 0;  i--)
+    {
+        s->band[band].d[i] = s->band[band].d[i - 1];
+        s->band[band].b[i] = s->band[band].bp[i];
+    }
+
+    for (i = 2;  i > 0;  i--)
+    {
+        s->band[band].r[i] = s->band[band].r[i - 1];
+        s->band[band].p[i] = s->band[band].p[i - 1];
+        s->band[band].a[i] = s->band[band].ap[i];
+    }
+
+    /* Block 4, FILTEP */
+    wd1 = saturate(s->band[band].r[1] + s->band[band].r[1]);
+    wd1 = (s->band[band].a[1]*wd1) >> 15;
+    wd2 = saturate(s->band[band].r[2] + s->band[band].r[2]);
+    wd2 = (s->band[band].a[2]*wd2) >> 15;
+    s->band[band].sp = saturate(wd1 + wd2);
+
+    /* Block 4, FILTEZ */
+    s->band[band].sz = 0;
+    for (i = 6;  i > 0;  i--)
+    {
+        wd1 = saturate(s->band[band].d[i] + s->band[band].d[i]);
+        s->band[band].sz += (s->band[band].b[i]*wd1) >> 15;
+    }
+    s->band[band].sz = saturate(s->band[band].sz);
+
+    /* Block 4, PREDIC */
+    s->band[band].s = saturate(s->band[band].sp + s->band[band].sz);
+}
+/*- End of function --------------------------------------------------------*/
+
+G722EncoderState* WebRtc_g722_encode_init(G722EncoderState* s,
+                                          int rate,
+                                          int options) {
+    if (s == NULL)
+    {
+        if ((s = (G722EncoderState *) malloc(sizeof(*s))) == NULL)
+            return NULL;
+    }
+    memset(s, 0, sizeof(*s));
+    if (rate == 48000)
+        s->bits_per_sample = 6;
+    else if (rate == 56000)
+        s->bits_per_sample = 7;
+    else
+        s->bits_per_sample = 8;
+    if ((options & G722_SAMPLE_RATE_8000))
+        s->eight_k = TRUE;
+    if ((options & G722_PACKED)  &&  s->bits_per_sample != 8)
+        s->packed = TRUE;
+    else
+        s->packed = FALSE;
+    s->band[0].det = 32;
+    s->band[1].det = 8;
+    return s;
+}
+/*- End of function --------------------------------------------------------*/
+
+int WebRtc_g722_encode_release(G722EncoderState *s)
+{
+    free(s);
+    return 0;
+}
+/*- End of function --------------------------------------------------------*/
+
+/* WebRtc, tlegrand:
+ * Only define the following if bit-exactness with reference implementation
+ * is needed. Will only have any effect if input signal is saturated.
+ */
+//#define RUN_LIKE_REFERENCE_G722
+#ifdef RUN_LIKE_REFERENCE_G722
+int16_t limitValues (int16_t rl)
+{
+
+    int16_t yl;
+
+    yl = (rl > 16383) ? 16383 : ((rl < -16384) ? -16384 : rl);
+
+    return (yl);
+}
+#endif
+
+size_t WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
+                          const int16_t amp[], size_t len)
+{
+    static const int q6[32] =
+    {
+           0,   35,   72,  110,  150,  190,  233,  276,
+         323,  370,  422,  473,  530,  587,  650,  714,
+         786,  858,  940, 1023, 1121, 1219, 1339, 1458,
+        1612, 1765, 1980, 2195, 2557, 2919,    0,    0
+    };
+    static const int iln[32] =
+    {
+         0, 63, 62, 31, 30, 29, 28, 27,
+        26, 25, 24, 23, 22, 21, 20, 19,
+        18, 17, 16, 15, 14, 13, 12, 11,
+        10,  9,  8,  7,  6,  5,  4,  0
+    };
+    static const int ilp[32] =
+    {
+         0, 61, 60, 59, 58, 57, 56, 55,
+        54, 53, 52, 51, 50, 49, 48, 47,
+        46, 45, 44, 43, 42, 41, 40, 39,
+        38, 37, 36, 35, 34, 33, 32,  0
+    };
+    static const int wl[8] =
+    {
+        -60, -30, 58, 172, 334, 538, 1198, 3042
+    };
+    static const int rl42[16] =
+    {
+        0, 7, 6, 5, 4, 3, 2, 1, 7, 6, 5, 4, 3, 2, 1, 0
+    };
+    static const int ilb[32] =
+    {
+        2048, 2093, 2139, 2186, 2233, 2282, 2332,
+        2383, 2435, 2489, 2543, 2599, 2656, 2714,
+        2774, 2834, 2896, 2960, 3025, 3091, 3158,
+        3228, 3298, 3371, 3444, 3520, 3597, 3676,
+        3756, 3838, 3922, 4008
+    };
+    static const int qm4[16] =
+    {
+             0, -20456, -12896, -8968,
+         -6288,  -4240,  -2584, -1200,
+         20456,  12896,   8968,  6288,
+          4240,   2584,   1200,     0
+    };
+    static const int qm2[4] =
+    {
+        -7408,  -1616,   7408,   1616
+    };
+    static const int qmf_coeffs[12] =
+    {
+           3,  -11,   12,   32, -210,  951, 3876, -805,  362, -156,   53,  -11,
+    };
+    static const int ihn[3] = {0, 1, 0};
+    static const int ihp[3] = {0, 3, 2};
+    static const int wh[3] = {0, -214, 798};
+    static const int rh2[4] = {2, 1, 2, 1};
+
+    int dlow;
+    int dhigh;
+    int el;
+    int wd;
+    int wd1;
+    int ril;
+    int wd2;
+    int il4;
+    int ih2;
+    int wd3;
+    int eh;
+    int mih;
+    int i;
+    size_t j;
+    /* Low and high band PCM from the QMF */
+    int xlow;
+    int xhigh;
+    size_t g722_bytes;
+    /* Even and odd tap accumulators */
+    int sumeven;
+    int sumodd;
+    int ihigh;
+    int ilow;
+    int code;
+
+    g722_bytes = 0;
+    xhigh = 0;
+    for (j = 0;  j < len;  )
+    {
+        if (s->itu_test_mode)
+        {
+            xlow =
+            xhigh = amp[j++] >> 1;
+        }
+        else
+        {
+            if (s->eight_k)
+            {
+                /* We shift by 1 to allow for the 15 bit input to the G.722 algorithm. */
+                xlow = amp[j++] >> 1;
+            }
+            else
+            {
+                /* Apply the transmit QMF */
+                /* Shuffle the buffer down */
+                for (i = 0;  i < 22;  i++)
+                    s->x[i] = s->x[i + 2];
+                s->x[22] = amp[j++];
+                s->x[23] = amp[j++];
+
+                /* Discard every other QMF output */
+                sumeven = 0;
+                sumodd = 0;
+                for (i = 0;  i < 12;  i++)
+                {
+                    sumodd += s->x[2*i]*qmf_coeffs[i];
+                    sumeven += s->x[2*i + 1]*qmf_coeffs[11 - i];
+                }
+                /* We shift by 12 to allow for the QMF filters (DC gain = 4096), plus 1
+                   to allow for us summing two filters, plus 1 to allow for the 15 bit
+                   input to the G.722 algorithm. */
+                xlow = (sumeven + sumodd) >> 14;
+                xhigh = (sumeven - sumodd) >> 14;
+
+#ifdef RUN_LIKE_REFERENCE_G722
+                /* The following lines are only used to verify bit-exactness
+                 * with reference implementation of G.722. Higher precision
+                 * is achieved without limiting the values.
+                 */
+                xlow = limitValues(xlow);
+                xhigh = limitValues(xhigh);
+#endif
+            }
+        }
+        /* Block 1L, SUBTRA */
+        el = saturate(xlow - s->band[0].s);
+
+        /* Block 1L, QUANTL */
+        wd = (el >= 0)  ?  el  :  -(el + 1);
+
+        for (i = 1;  i < 30;  i++)
+        {
+            wd1 = (q6[i]*s->band[0].det) >> 12;
+            if (wd < wd1)
+                break;
+        }
+        ilow = (el < 0)  ?  iln[i]  :  ilp[i];
+
+        /* Block 2L, INVQAL */
+        ril = ilow >> 2;
+        wd2 = qm4[ril];
+        dlow = (s->band[0].det*wd2) >> 15;
+
+        /* Block 3L, LOGSCL */
+        il4 = rl42[ril];
+        wd = (s->band[0].nb*127) >> 7;
+        s->band[0].nb = wd + wl[il4];
+        if (s->band[0].nb < 0)
+            s->band[0].nb = 0;
+        else if (s->band[0].nb > 18432)
+            s->band[0].nb = 18432;
+
+        /* Block 3L, SCALEL */
+        wd1 = (s->band[0].nb >> 6) & 31;
+        wd2 = 8 - (s->band[0].nb >> 11);
+        wd3 = (wd2 < 0)  ?  (ilb[wd1] << -wd2)  :  (ilb[wd1] >> wd2);
+        s->band[0].det = wd3 << 2;
+
+        block4(s, 0, dlow);
+
+        if (s->eight_k)
+        {
+            /* Just leave the high bits as zero */
+            code = (0xC0 | ilow) >> (8 - s->bits_per_sample);
+        }
+        else
+        {
+            /* Block 1H, SUBTRA */
+            eh = saturate(xhigh - s->band[1].s);
+
+            /* Block 1H, QUANTH */
+            wd = (eh >= 0)  ?  eh  :  -(eh + 1);
+            wd1 = (564*s->band[1].det) >> 12;
+            mih = (wd >= wd1)  ?  2  :  1;
+            ihigh = (eh < 0)  ?  ihn[mih]  :  ihp[mih];
+
+            /* Block 2H, INVQAH */
+            wd2 = qm2[ihigh];
+            dhigh = (s->band[1].det*wd2) >> 15;
+
+            /* Block 3H, LOGSCH */
+            ih2 = rh2[ihigh];
+            wd = (s->band[1].nb*127) >> 7;
+            s->band[1].nb = wd + wh[ih2];
+            if (s->band[1].nb < 0)
+                s->band[1].nb = 0;
+            else if (s->band[1].nb > 22528)
+                s->band[1].nb = 22528;
+
+            /* Block 3H, SCALEH */
+            wd1 = (s->band[1].nb >> 6) & 31;
+            wd2 = 10 - (s->band[1].nb >> 11);
+            wd3 = (wd2 < 0)  ?  (ilb[wd1] << -wd2)  :  (ilb[wd1] >> wd2);
+            s->band[1].det = wd3 << 2;
+
+            block4(s, 1, dhigh);
+            code = ((ihigh << 6) | ilow) >> (8 - s->bits_per_sample);
+        }
+
+        if (s->packed)
+        {
+            /* Pack the code bits */
+            s->out_buffer |= (code << s->out_bits);
+            s->out_bits += s->bits_per_sample;
+            if (s->out_bits >= 8)
+            {
+                g722_data[g722_bytes++] = (uint8_t) (s->out_buffer & 0xFF);
+                s->out_bits -= 8;
+                s->out_buffer >>= 8;
+            }
+        }
+        else
+        {
+            g722_data[g722_bytes++] = (uint8_t) code;
+        }
+    }
+    return g722_bytes;
+}
+/*- End of function --------------------------------------------------------*/
+/*- End of file ------------------------------------------------------------*/
diff --git a/modules/audio_coding/codecs/g722/g722_interface.c b/modules/audio_coding/codecs/g722/g722_interface.c
new file mode 100644
index 0000000..fb25049
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/g722_interface.c
@@ -0,0 +1,105 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/g722/g722_enc_dec.h"
+#include "modules/audio_coding/codecs/g722/g722_interface.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+int16_t WebRtcG722_CreateEncoder(G722EncInst **G722enc_inst)
+{
+    *G722enc_inst=(G722EncInst*)malloc(sizeof(G722EncoderState));
+    if (*G722enc_inst!=NULL) {
+      return(0);
+    } else {
+      return(-1);
+    }
+}
+
+int16_t WebRtcG722_EncoderInit(G722EncInst *G722enc_inst)
+{
+    // Create and/or reset the G.722 encoder
+    // Bitrate 64 kbps and wideband mode (2)
+    G722enc_inst = (G722EncInst *) WebRtc_g722_encode_init(
+        (G722EncoderState*) G722enc_inst, 64000, 2);
+    if (G722enc_inst == NULL) {
+        return -1;
+    } else {
+        return 0;
+    }
+}
+
+int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst)
+{
+    // Free encoder memory
+    return WebRtc_g722_encode_release((G722EncoderState*) G722enc_inst);
+}
+
+size_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
+                         const int16_t* speechIn,
+                         size_t len,
+                         uint8_t* encoded)
+{
+    unsigned char *codechar = (unsigned char*) encoded;
+    // Encode the input speech vector
+    return WebRtc_g722_encode((G722EncoderState*) G722enc_inst, codechar,
+                              speechIn, len);
+}
+
+int16_t WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst)
+{
+    *G722dec_inst=(G722DecInst*)malloc(sizeof(G722DecoderState));
+    if (*G722dec_inst!=NULL) {
+      return(0);
+    } else {
+      return(-1);
+    }
+}
+
+void WebRtcG722_DecoderInit(G722DecInst* inst) {
+  // Create and/or reset the G.722 decoder
+  // Bitrate 64 kbps and wideband mode (2)
+  WebRtc_g722_decode_init((G722DecoderState*)inst, 64000, 2);
+}
+
+int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst)
+{
+    // Free encoder memory
+    return WebRtc_g722_decode_release((G722DecoderState*) G722dec_inst);
+}
+
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+                         const uint8_t *encoded,
+                         size_t len,
+                         int16_t *decoded,
+                         int16_t *speechType)
+{
+    // Decode the G.722 encoder stream
+    *speechType=G722_WEBRTC_SPEECH;
+    return WebRtc_g722_decode((G722DecoderState*) G722dec_inst, decoded,
+                              encoded, len);
+}
+
+int16_t WebRtcG722_Version(char *versionStr, short len)
+{
+    // Get version string
+    char version[30] = "2.0.0\n";
+    if (strlen(version) < (unsigned int)len)
+    {
+        strcpy(versionStr, version);
+        return 0;
+    }
+    else
+    {
+        return -1;
+    }
+}
diff --git a/modules/audio_coding/codecs/g722/g722_interface.h b/modules/audio_coding/codecs/g722/g722_interface.h
new file mode 100644
index 0000000..d957223
--- /dev/null
+++ b/modules/audio_coding/codecs/g722/g722_interface.h
@@ -0,0 +1,182 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*
+ * Solution to support multiple instances
+ */
+
+typedef struct WebRtcG722EncInst    G722EncInst;
+typedef struct WebRtcG722DecInst    G722DecInst;
+
+/*
+ * Comfort noise constants
+ */
+
+#define G722_WEBRTC_SPEECH     1
+#define G722_WEBRTC_CNG        2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/****************************************************************************
+ * WebRtcG722_CreateEncoder(...)
+ *
+ * Create memory used for G722 encoder
+ *
+ * Input:
+ *     - G722enc_inst         : G722 instance for encoder
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+int16_t WebRtcG722_CreateEncoder(G722EncInst **G722enc_inst);
+
+
+/****************************************************************************
+ * WebRtcG722_EncoderInit(...)
+ *
+ * This function initializes a G722 instance
+ *
+ * Input:
+ *     - G722enc_inst         : G722 instance, i.e. the user that should receive
+ *                             be initialized
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+
+int16_t WebRtcG722_EncoderInit(G722EncInst *G722enc_inst);
+
+
+/****************************************************************************
+ * WebRtcG722_FreeEncoder(...)
+ *
+ * Free the memory used for G722 encoder
+ *
+ * Input:
+ *     - G722enc_inst         : G722 instance for encoder
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst);
+
+
+
+/****************************************************************************
+ * WebRtcG722_Encode(...)
+ *
+ * This function encodes G722 encoded data.
+ *
+ * Input:
+ *     - G722enc_inst         : G722 instance, i.e. the user that should encode
+ *                              a packet
+ *     - speechIn             : Input speech vector
+ *     - len                  : Samples in speechIn
+ *
+ * Output:
+ *        - encoded           : The encoded data vector
+ *
+ * Return value               : Length (in bytes) of coded data
+ */
+
+size_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
+                         const int16_t* speechIn,
+                         size_t len,
+                         uint8_t* encoded);
+
+
+/****************************************************************************
+ * WebRtcG722_CreateDecoder(...)
+ *
+ * Create memory used for G722 encoder
+ *
+ * Input:
+ *     - G722dec_inst         : G722 instance for decoder
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+int16_t WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst);
+
+/****************************************************************************
+ * WebRtcG722_DecoderInit(...)
+ *
+ * This function initializes a G722 instance
+ *
+ * Input:
+ *     - inst      : G722 instance
+ */
+
+void WebRtcG722_DecoderInit(G722DecInst* inst);
+
+/****************************************************************************
+ * WebRtcG722_FreeDecoder(...)
+ *
+ * Free the memory used for G722 decoder
+ *
+ * Input:
+ *     - G722dec_inst         : G722 instance for decoder
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+
+int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst);
+
+
+/****************************************************************************
+ * WebRtcG722_Decode(...)
+ *
+ * This function decodes a packet with G729 frame(s). Output speech length
+ * will be a multiple of 80 samples (80*frames/packet).
+ *
+ * Input:
+ *     - G722dec_inst       : G722 instance, i.e. the user that should decode
+ *                            a packet
+ *     - encoded            : Encoded G722 frame(s)
+ *     - len                : Bytes in encoded vector
+ *
+ * Output:
+ *        - decoded         : The decoded vector
+ *      - speechType        : 1 normal, 2 CNG (Since G722 does not have its own
+ *                            DTX/CNG scheme it should always return 1)
+ *
+ * Return value             : Samples in decoded vector
+ */
+
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+                         const uint8_t* encoded,
+                         size_t len,
+                         int16_t *decoded,
+                         int16_t *speechType);
+
+/****************************************************************************
+ * WebRtcG722_Version(...)
+ *
+ * Get a string with the current version of the codec
+ */
+
+int16_t WebRtcG722_Version(char *versionStr, short len);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_ */
diff --git a/modules/audio_coding/codecs/ilbc/abs_quant.c b/modules/audio_coding/codecs/ilbc/abs_quant.c
new file mode 100644
index 0000000..308902f
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/abs_quant.c
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuant.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/abs_quant_loop.h"
+
+
+/*----------------------------------------------------------------*
+ *  predictive noise shaping encoding of scaled start state
+ *  (subrutine for WebRtcIlbcfix_StateSearch)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AbsQuant(
+    IlbcEncoder *iLBCenc_inst,
+    /* (i) Encoder instance */
+    iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits (outputs idxForMax
+                                   and idxVec, uses state_first as
+                                   input) */
+    int16_t *in,     /* (i) vector to encode */
+    int16_t *weightDenum   /* (i) denominator of synthesis filter */
+                            ) {
+  int16_t *syntOut;
+  size_t quantLen[2];
+
+  /* Stack based */
+  int16_t syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];
+  int16_t in_weightedVec[STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+  int16_t *in_weighted = &in_weightedVec[LPC_FILTERORDER];
+
+  /* Initialize the buffers */
+  WebRtcSpl_MemSetW16(syntOutBuf, 0, LPC_FILTERORDER+STATE_SHORT_LEN_30MS);
+  syntOut = &syntOutBuf[LPC_FILTERORDER];
+  /* Start with zero state */
+  WebRtcSpl_MemSetW16(in_weightedVec, 0, LPC_FILTERORDER);
+
+  /* Perform the quantization loop in two sections of length quantLen[i],
+     where the perceptual weighting filter is updated at the subframe
+     border */
+
+  if (iLBC_encbits->state_first) {
+    quantLen[0]=SUBL;
+    quantLen[1]=iLBCenc_inst->state_short_len-SUBL;
+  } else {
+    quantLen[0]=iLBCenc_inst->state_short_len-SUBL;
+    quantLen[1]=SUBL;
+  }
+
+  /* Calculate the weighted residual, switch perceptual weighting
+     filter at the subframe border */
+  WebRtcSpl_FilterARFastQ12(
+      in, in_weighted,
+      weightDenum, LPC_FILTERORDER+1, quantLen[0]);
+  WebRtcSpl_FilterARFastQ12(
+      &in[quantLen[0]], &in_weighted[quantLen[0]],
+      &weightDenum[LPC_FILTERORDER+1], LPC_FILTERORDER+1, quantLen[1]);
+
+  WebRtcIlbcfix_AbsQuantLoop(
+      syntOut,
+      in_weighted,
+      weightDenum,
+      quantLen,
+      iLBC_encbits->idxVec);
+
+}
diff --git a/modules/audio_coding/codecs/ilbc/abs_quant.h b/modules/audio_coding/codecs/ilbc/abs_quant.h
new file mode 100644
index 0000000..3a98a6e
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/abs_quant.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuant.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  predictive noise shaping encoding of scaled start state
+ *  (subrutine for WebRtcIlbcfix_StateSearch)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AbsQuant(
+    IlbcEncoder *iLBCenc_inst,
+    /* (i) Encoder instance */
+    iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits (outputs idxForMax
+                                   and idxVec, uses state_first as
+                                   input) */
+    int16_t *in,     /* (i) vector to encode */
+    int16_t *weightDenum   /* (i) denominator of synthesis filter */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/abs_quant_loop.c b/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
new file mode 100644
index 0000000..2d8a998
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuantLoop.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/sort_sq.h"
+
+void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
+                                int16_t *weightDenumIN, size_t *quantLenIN,
+                                int16_t *idxVecIN ) {
+  size_t k1, k2;
+  int16_t index;
+  int32_t toQW32;
+  int32_t toQ32;
+  int16_t tmp16a;
+  int16_t xq;
+
+  int16_t *syntOut   = syntOutIN;
+  int16_t *in_weighted  = in_weightedIN;
+  int16_t *weightDenum  = weightDenumIN;
+  size_t *quantLen  = quantLenIN;
+  int16_t *idxVec   = idxVecIN;
+
+  for(k1=0;k1<2;k1++) {
+    for(k2=0;k2<quantLen[k1];k2++){
+
+      /* Filter to get the predicted value */
+      WebRtcSpl_FilterARFastQ12(
+          syntOut, syntOut,
+          weightDenum, LPC_FILTERORDER+1, 1);
+
+      /* the quantizer */
+      toQW32 = (int32_t)(*in_weighted) - (int32_t)(*syntOut);
+
+      toQ32 = (((int32_t)toQW32)<<2);
+
+      if (toQ32 > 32767) {
+        toQ32 = (int32_t) 32767;
+      } else if (toQ32 < -32768) {
+        toQ32 = (int32_t) -32768;
+      }
+
+      /* Quantize the state */
+      if (toQW32<(-7577)) {
+        /* To prevent negative overflow */
+        index=0;
+      } else if (toQW32>8151) {
+        /* To prevent positive overflow */
+        index=7;
+      } else {
+        /* Find the best quantization index
+           (state_sq3Tbl is in Q13 and toQ is in Q11)
+        */
+        WebRtcIlbcfix_SortSq(&xq, &index,
+                             (int16_t)toQ32,
+                             WebRtcIlbcfix_kStateSq3, 8);
+      }
+
+      /* Store selected index */
+      (*idxVec++) = index;
+
+      /* Compute decoded sample and update of the prediction filter */
+      tmp16a = ((WebRtcIlbcfix_kStateSq3[index] + 2 ) >> 2);
+
+      *syntOut     = (int16_t) (tmp16a + (int32_t)(*in_weighted) - toQW32);
+
+      syntOut++; in_weighted++;
+    }
+    /* Update perceptual weighting filter at subframe border */
+    weightDenum += 11;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/abs_quant_loop.h b/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
new file mode 100644
index 0000000..5116bfd
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AbsQuantLoop.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_LOOP_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ABS_QUANT_LOOP_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  predictive noise shaping encoding of scaled start state
+ *  (subrutine for WebRtcIlbcfix_StateSearch)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
+                                int16_t *weightDenumIN, size_t *quantLenIN,
+                                int16_t *idxVecIN);
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc b/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
new file mode 100644
index 0000000..08d21f4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
@@ -0,0 +1,110 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+
+#include <utility>
+
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+AudioDecoderIlbcImpl::AudioDecoderIlbcImpl() {
+  WebRtcIlbcfix_DecoderCreate(&dec_state_);
+  WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
+}
+
+AudioDecoderIlbcImpl::~AudioDecoderIlbcImpl() {
+  WebRtcIlbcfix_DecoderFree(dec_state_);
+}
+
+bool AudioDecoderIlbcImpl::HasDecodePlc() const {
+  return true;
+}
+
+int AudioDecoderIlbcImpl::DecodeInternal(const uint8_t* encoded,
+                                     size_t encoded_len,
+                                     int sample_rate_hz,
+                                     int16_t* decoded,
+                                     SpeechType* speech_type) {
+  RTC_DCHECK_EQ(sample_rate_hz, 8000);
+  int16_t temp_type = 1;  // Default is speech.
+  int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded,
+                                 &temp_type);
+  *speech_type = ConvertSpeechType(temp_type);
+  return ret;
+}
+
+size_t AudioDecoderIlbcImpl::DecodePlc(size_t num_frames, int16_t* decoded) {
+  return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
+}
+
+void AudioDecoderIlbcImpl::Reset() {
+  WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderIlbcImpl::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  std::vector<ParseResult> results;
+  size_t bytes_per_frame;
+  int timestamps_per_frame;
+  if (payload.size() >= 950) {
+    RTC_LOG(LS_WARNING)
+        << "AudioDecoderIlbcImpl::ParsePayload: Payload too large";
+    return results;
+  }
+  if (payload.size() % 38 == 0) {
+    // 20 ms frames.
+    bytes_per_frame = 38;
+    timestamps_per_frame = 160;
+  } else if (payload.size() % 50 == 0) {
+    // 30 ms frames.
+    bytes_per_frame = 50;
+    timestamps_per_frame = 240;
+  } else {
+    RTC_LOG(LS_WARNING)
+        << "AudioDecoderIlbcImpl::ParsePayload: Invalid payload";
+    return results;
+  }
+
+  RTC_DCHECK_EQ(0, payload.size() % bytes_per_frame);
+  if (payload.size() == bytes_per_frame) {
+    std::unique_ptr<EncodedAudioFrame> frame(
+        new LegacyEncodedAudioFrame(this, std::move(payload)));
+    results.emplace_back(timestamp, 0, std::move(frame));
+  } else {
+    size_t byte_offset;
+    uint32_t timestamp_offset;
+    for (byte_offset = 0, timestamp_offset = 0;
+         byte_offset < payload.size();
+         byte_offset += bytes_per_frame,
+             timestamp_offset += timestamps_per_frame) {
+      std::unique_ptr<EncodedAudioFrame> frame(new LegacyEncodedAudioFrame(
+          this, rtc::Buffer(payload.data() + byte_offset, bytes_per_frame)));
+      results.emplace_back(timestamp + timestamp_offset, 0, std::move(frame));
+    }
+  }
+
+  return results;
+}
+
+int AudioDecoderIlbcImpl::SampleRateHz() const {
+  return 8000;
+}
+
+size_t AudioDecoderIlbcImpl::Channels() const {
+  return 1;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h b/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h
new file mode 100644
index 0000000..edb65d0
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/constructormagic.h"
+
+typedef struct iLBC_decinst_t_ IlbcDecoderInstance;
+
+namespace webrtc {
+
+class AudioDecoderIlbcImpl final : public AudioDecoder {
+ public:
+  AudioDecoderIlbcImpl();
+  ~AudioDecoderIlbcImpl() override;
+  bool HasDecodePlc() const override;
+  size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
+  void Reset() override;
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  IlbcDecoderInstance* dec_state_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbcImpl);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
diff --git a/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
new file mode 100644
index 0000000..6ddc078
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -0,0 +1,155 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+
+#include <algorithm>
+#include <limits>
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kSampleRateHz = 8000;
+
+AudioEncoderIlbcConfig CreateConfig(const CodecInst& codec_inst) {
+  AudioEncoderIlbcConfig config;
+  config.frame_size_ms = codec_inst.pacsize / 8;
+  return config;
+}
+
+int GetIlbcBitrate(int ptime) {
+  switch (ptime) {
+    case 20:
+    case 40:
+      // 38 bytes per frame of 20 ms => 15200 bits/s.
+      return 15200;
+    case 30:
+    case 60:
+      // 50 bytes per frame of 30 ms => (approx) 13333 bits/s.
+      return 13333;
+    default:
+      FATAL();
+  }
+}
+
+}  // namespace
+
+AudioEncoderIlbcImpl::AudioEncoderIlbcImpl(const AudioEncoderIlbcConfig& config,
+                                           int payload_type)
+    : frame_size_ms_(config.frame_size_ms),
+      payload_type_(payload_type),
+      num_10ms_frames_per_packet_(
+          static_cast<size_t>(config.frame_size_ms / 10)),
+      encoder_(nullptr) {
+  RTC_CHECK(config.IsOk());
+  Reset();
+}
+
+AudioEncoderIlbcImpl::AudioEncoderIlbcImpl(const CodecInst& codec_inst)
+    : AudioEncoderIlbcImpl(CreateConfig(codec_inst), codec_inst.pltype) {}
+
+AudioEncoderIlbcImpl::~AudioEncoderIlbcImpl() {
+  RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+}
+
+int AudioEncoderIlbcImpl::SampleRateHz() const {
+  return kSampleRateHz;
+}
+
+size_t AudioEncoderIlbcImpl::NumChannels() const {
+  return 1;
+}
+
+size_t AudioEncoderIlbcImpl::Num10MsFramesInNextPacket() const {
+  return num_10ms_frames_per_packet_;
+}
+
+size_t AudioEncoderIlbcImpl::Max10MsFramesInAPacket() const {
+  return num_10ms_frames_per_packet_;
+}
+
+int AudioEncoderIlbcImpl::GetTargetBitrate() const {
+  return GetIlbcBitrate(rtc::dchecked_cast<int>(num_10ms_frames_per_packet_) *
+                        10);
+}
+
+AudioEncoder::EncodedInfo AudioEncoderIlbcImpl::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+
+  // Save timestamp if starting a new packet.
+  if (num_10ms_frames_buffered_ == 0)
+    first_timestamp_in_buffer_ = rtp_timestamp;
+
+  // Buffer input.
+  std::copy(audio.cbegin(), audio.cend(),
+            input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_);
+
+  // If we don't yet have enough buffered input for a whole packet, we're done
+  // for now.
+  if (++num_10ms_frames_buffered_ < num_10ms_frames_per_packet_) {
+    return EncodedInfo();
+  }
+
+  // Encode buffered input.
+  RTC_DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+  num_10ms_frames_buffered_ = 0;
+  size_t encoded_bytes =
+      encoded->AppendData(
+          RequiredOutputSizeBytes(),
+          [&] (rtc::ArrayView<uint8_t> encoded) {
+            const int r = WebRtcIlbcfix_Encode(
+                encoder_,
+                input_buffer_,
+                kSampleRateHz / 100 * num_10ms_frames_per_packet_,
+                encoded.data());
+            RTC_CHECK_GE(r, 0);
+
+            return static_cast<size_t>(r);
+          });
+
+  RTC_DCHECK_EQ(encoded_bytes, RequiredOutputSizeBytes());
+
+  EncodedInfo info;
+  info.encoded_bytes = encoded_bytes;
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  info.encoder_type = CodecType::kIlbc;
+  return info;
+}
+
+void AudioEncoderIlbcImpl::Reset() {
+  if (encoder_)
+    RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+  RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
+  const int encoder_frame_size_ms = frame_size_ms_ > 30
+                                        ? frame_size_ms_ / 2
+                                        : frame_size_ms_;
+  RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderInit(encoder_, encoder_frame_size_ms));
+  num_10ms_frames_buffered_ = 0;
+}
+
+size_t AudioEncoderIlbcImpl::RequiredOutputSizeBytes() const {
+  switch (num_10ms_frames_per_packet_) {
+    case 2:   return 38;
+    case 3:   return 50;
+    case 4:   return 2 * 38;
+    case 6:   return 2 * 50;
+    default:  FATAL();
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h b/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
new file mode 100644
index 0000000..a238689
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h"
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+struct CodecInst;
+
+class AudioEncoderIlbcImpl final : public AudioEncoder {
+ public:
+  AudioEncoderIlbcImpl(const AudioEncoderIlbcConfig& config, int payload_type);
+  explicit AudioEncoderIlbcImpl(const CodecInst& codec_inst);
+  ~AudioEncoderIlbcImpl() override;
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+  void Reset() override;
+
+ private:
+  size_t RequiredOutputSizeBytes() const;
+
+  static constexpr size_t kMaxSamplesPerPacket = 480;
+  const int frame_size_ms_;
+  const int payload_type_;
+  const size_t num_10ms_frames_per_packet_;
+  size_t num_10ms_frames_buffered_;
+  uint32_t first_timestamp_in_buffer_;
+  int16_t input_buffer_[kMaxSamplesPerPacket];
+  IlbcEncoderInstance* encoder_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderIlbcImpl);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
diff --git a/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c b/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
new file mode 100644
index 0000000..77b0f7f
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AugmentedCbCorr.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/augmented_cb_corr.h"
+
+void WebRtcIlbcfix_AugmentedCbCorr(
+    int16_t *target,   /* (i) Target vector */
+    int16_t *buffer,   /* (i) Memory buffer */
+    int16_t *interpSamples, /* (i) buffer with
+                                     interpolated samples */
+    int32_t *crossDot,  /* (o) The cross correlation between
+                                 the target and the Augmented
+                                 vector */
+    size_t low,    /* (i) Lag to start from (typically
+                             20) */
+    size_t high,   /* (i) Lag to end at (typically 39) */
+    int scale)   /* (i) Scale factor to use for
+                              the crossDot */
+{
+  size_t lagcount;
+  size_t ilow;
+  int16_t *targetPtr;
+  int32_t *crossDotPtr;
+  int16_t *iSPtr=interpSamples;
+
+  /* Calculate the correlation between the target and the
+     interpolated codebook. The correlation is calculated in
+     3 sections with the interpolated part in the middle */
+  crossDotPtr=crossDot;
+  for (lagcount=low; lagcount<=high; lagcount++) {
+
+    ilow = lagcount - 4;
+
+    /* Compute dot product for the first (lagcount-4) samples */
+    (*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);
+
+    /* Compute dot product on the interpolated samples */
+    (*crossDotPtr) += WebRtcSpl_DotProductWithScale(target+ilow, iSPtr, 4, scale);
+    targetPtr = target + lagcount;
+    iSPtr += lagcount-ilow;
+
+    /* Compute dot product for the remaining samples */
+    (*crossDotPtr) += WebRtcSpl_DotProductWithScale(targetPtr, buffer-lagcount, SUBL-lagcount, scale);
+    crossDotPtr++;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h b/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
new file mode 100644
index 0000000..581f0d6
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_AugmentedCbCorr.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_AUGMENTED_CB_CORR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_AUGMENTED_CB_CORR_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Calculate correlation between target and Augmented codebooks
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_AugmentedCbCorr(
+    int16_t *target,   /* (i) Target vector */
+    int16_t *buffer,   /* (i) Memory buffer */
+    int16_t *interpSamples, /* (i) buffer with
+                                           interpolated samples */
+    int32_t *crossDot,  /* (o) The cross correlation between
+                                           the target and the Augmented
+                                           vector */
+    size_t low,    /* (i) Lag to start from (typically
+                                                   20) */
+    size_t high,   /* (i) Lag to end at (typically 39 */
+    int scale);   /* (i) Scale factor to use for the crossDot */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/bw_expand.c b/modules/audio_coding/codecs/ilbc/bw_expand.c
new file mode 100644
index 0000000..566af7d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/bw_expand.c
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_BwExpand.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lpc bandwidth expansion
+ *---------------------------------------------------------------*/
+
+/* The output is in the same domain as the input */
+void WebRtcIlbcfix_BwExpand(
+    int16_t *out, /* (o) the bandwidth expanded lpc coefficients */
+    int16_t *in,  /* (i) the lpc coefficients before bandwidth
+                                   expansion */
+    int16_t *coef, /* (i) the bandwidth expansion factor Q15 */
+    int16_t length /* (i) the length of lpc coefficient vectors */
+                            ) {
+  int i;
+
+  out[0] = in[0];
+  for (i = 1; i < length; i++) {
+    /* out[i] = coef[i] * in[i] with rounding.
+       in[] and out[] are in Q12 and coef[] is in Q15
+    */
+    out[i] = (int16_t)((coef[i] * in[i] + 16384) >> 15);
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/bw_expand.h b/modules/audio_coding/codecs/ilbc/bw_expand.h
new file mode 100644
index 0000000..ee9e45a
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/bw_expand.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_BwExpand.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_BW_EXPAND_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_BW_EXPAND_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lpc bandwidth expansion
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_BwExpand(
+    int16_t *out, /* (o) the bandwidth expanded lpc coefficients */
+    int16_t *in,  /* (i) the lpc coefficients before bandwidth
+                                   expansion */
+    int16_t *coef, /* (i) the bandwidth expansion factor Q15 */
+    int16_t length /* (i) the length of lpc coefficient vectors */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_construct.c b/modules/audio_coding/codecs/ilbc/cb_construct.c
new file mode 100644
index 0000000..e2ae361
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_construct.c
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbConstruct.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/cb_construct.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/gain_dequant.h"
+#include "modules/audio_coding/codecs/ilbc/get_cd_vec.h"
+
+/*----------------------------------------------------------------*
+ *  Construct decoded vector from codebook and gains.
+ *---------------------------------------------------------------*/
+
+bool WebRtcIlbcfix_CbConstruct(
+    int16_t* decvector,        /* (o) Decoded vector */
+    const int16_t* index,      /* (i) Codebook indices */
+    const int16_t* gain_index, /* (i) Gain quantization indices */
+    int16_t* mem,              /* (i) Buffer for codevector construction */
+    size_t lMem,               /* (i) Length of buffer */
+    size_t veclen) {           /* (i) Length of vector */
+  size_t j;
+  int16_t gain[CB_NSTAGES];
+  /* Stack based */
+  int16_t cbvec0[SUBL];
+  int16_t cbvec1[SUBL];
+  int16_t cbvec2[SUBL];
+  int32_t a32;
+  int16_t *gainPtr;
+
+  /* gain de-quantization */
+
+  gain[0] = WebRtcIlbcfix_GainDequant(gain_index[0], 16384, 0);
+  gain[1] = WebRtcIlbcfix_GainDequant(gain_index[1], gain[0], 1);
+  gain[2] = WebRtcIlbcfix_GainDequant(gain_index[2], gain[1], 2);
+
+  /* codebook vector construction and construction of total vector */
+
+  /* Stack based */
+  if (!WebRtcIlbcfix_GetCbVec(cbvec0, mem, (size_t)index[0], lMem, veclen))
+    return false;  // Failure.
+  if (!WebRtcIlbcfix_GetCbVec(cbvec1, mem, (size_t)index[1], lMem, veclen))
+    return false;  // Failure.
+  if (!WebRtcIlbcfix_GetCbVec(cbvec2, mem, (size_t)index[2], lMem, veclen))
+    return false;  // Failure.
+
+  gainPtr = &gain[0];
+  for (j=0;j<veclen;j++) {
+    a32 = (*gainPtr++) * cbvec0[j];
+    a32 += (*gainPtr++) * cbvec1[j];
+    a32 += (*gainPtr) * cbvec2[j];
+    gainPtr -= 2;
+    decvector[j] = (int16_t)((a32 + 8192) >> 14);
+  }
+
+  return true;  // Success.
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_construct.h b/modules/audio_coding/codecs/ilbc/cb_construct.h
new file mode 100644
index 0000000..b200990
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_construct.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbConstruct.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_CONSTRUCT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_CONSTRUCT_H_
+
+#include <stdbool.h>
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Construct decoded vector from codebook and gains.
+ *---------------------------------------------------------------*/
+
+// Returns true on success, false on failure.
+bool WebRtcIlbcfix_CbConstruct(
+    int16_t* decvector,        /* (o) Decoded vector */
+    const int16_t* index,      /* (i) Codebook indices */
+    const int16_t* gain_index, /* (i) Gain quantization indices */
+    int16_t* mem,              /* (i) Buffer for codevector construction */
+    size_t lMem,               /* (i) Length of buffer */
+    size_t veclen              /* (i) Length of vector */
+    ) RTC_WARN_UNUSED_RESULT;
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_mem_energy.c b/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
new file mode 100644
index 0000000..9304a91
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergy.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h"
+
+/*----------------------------------------------------------------*
+ *  Function WebRtcIlbcfix_CbMemEnergy computes the energy of all
+ * the vectors in the codebook memory that will be used in the
+ * following search for the best match.
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CbMemEnergy(
+    size_t range,
+    int16_t *CB,   /* (i) The CB memory (1:st section) */
+    int16_t *filteredCB,  /* (i) The filtered CB memory (2:nd section) */
+    size_t lMem,   /* (i) Length of the CB memory */
+    size_t lTarget,   /* (i) Length of the target vector */
+    int16_t *energyW16,  /* (o) Energy in the CB vectors */
+    int16_t *energyShifts, /* (o) Shift value of the energy */
+    int scale,   /* (i) The scaling of all energy values */
+    size_t base_size  /* (i) Index to where energy values should be stored */
+                               ) {
+  int16_t *ppi, *ppo, *pp;
+  int32_t energy, tmp32;
+
+  /* Compute the energy and store it in a vector. Also the
+   * corresponding shift values are stored. The energy values
+   * are reused in all three stages. */
+
+  /* Calculate the energy in the first block of 'lTarget' sampels. */
+  ppi = CB+lMem-lTarget-1;
+  ppo = CB+lMem-1;
+
+  pp=CB+lMem-lTarget;
+  energy = WebRtcSpl_DotProductWithScale( pp, pp, lTarget, scale);
+
+  /* Normalize the energy and store the number of shifts */
+  energyShifts[0] = (int16_t)WebRtcSpl_NormW32(energy);
+  tmp32 = energy << energyShifts[0];
+  energyW16[0] = (int16_t)(tmp32 >> 16);
+
+  /* Compute the energy of the rest of the cb memory
+   * by step wise adding and subtracting the next
+   * sample and the last sample respectively. */
+  WebRtcIlbcfix_CbMemEnergyCalc(energy, range, ppi, ppo, energyW16, energyShifts, scale, 0);
+
+  /* Next, precompute the energy values for the filtered cb section */
+  energy=0;
+  pp=filteredCB+lMem-lTarget;
+
+  energy = WebRtcSpl_DotProductWithScale( pp, pp, lTarget, scale);
+
+  /* Normalize the energy and store the number of shifts */
+  energyShifts[base_size] = (int16_t)WebRtcSpl_NormW32(energy);
+  tmp32 = energy << energyShifts[base_size];
+  energyW16[base_size] = (int16_t)(tmp32 >> 16);
+
+  ppi = filteredCB + lMem - 1 - lTarget;
+  ppo = filteredCB + lMem - 1;
+
+  WebRtcIlbcfix_CbMemEnergyCalc(energy, range, ppi, ppo, energyW16, energyShifts, scale, base_size);
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_mem_energy.h b/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
new file mode 100644
index 0000000..e8e2fe9
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergy.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
+
+void WebRtcIlbcfix_CbMemEnergy(
+    size_t range,
+    int16_t *CB,   /* (i) The CB memory (1:st section) */
+    int16_t *filteredCB,  /* (i) The filtered CB memory (2:nd section) */
+    size_t lMem,   /* (i) Length of the CB memory */
+    size_t lTarget,   /* (i) Length of the target vector */
+    int16_t *energyW16,  /* (o) Energy in the CB vectors */
+    int16_t *energyShifts, /* (o) Shift value of the energy */
+    int scale,   /* (i) The scaling of all energy values */
+    size_t base_size  /* (i) Index to where energy values should be stored */
+                               );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c b/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
new file mode 100644
index 0000000..df9ff45
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyAugmentation.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+void WebRtcIlbcfix_CbMemEnergyAugmentation(
+    int16_t *interpSamples, /* (i) The interpolated samples */
+    int16_t *CBmem,   /* (i) The CB memory */
+    int scale,   /* (i) The scaling of all energy values */
+    size_t base_size,  /* (i) Index to where energy values should be stored */
+    int16_t *energyW16,  /* (o) Energy in the CB vectors */
+    int16_t *energyShifts /* (o) Shift value of the energy */
+                                           ){
+  int32_t energy, tmp32;
+  int16_t *ppe, *pp, *interpSamplesPtr;
+  int16_t *CBmemPtr;
+  size_t lagcount;
+  int16_t *enPtr=&energyW16[base_size-20];
+  int16_t *enShPtr=&energyShifts[base_size-20];
+  int32_t nrjRecursive;
+
+  CBmemPtr = CBmem+147;
+  interpSamplesPtr = interpSamples;
+
+  /* Compute the energy for the first (low-5) noninterpolated samples */
+  nrjRecursive = WebRtcSpl_DotProductWithScale( CBmemPtr-19, CBmemPtr-19, 15, scale);
+  ppe = CBmemPtr - 20;
+
+  for (lagcount=20; lagcount<=39; lagcount++) {
+
+    /* Update the energy recursively to save complexity */
+    nrjRecursive += (*ppe * *ppe) >> scale;
+    ppe--;
+    energy = nrjRecursive;
+
+    /* interpolation */
+    energy += WebRtcSpl_DotProductWithScale(interpSamplesPtr, interpSamplesPtr, 4, scale);
+    interpSamplesPtr += 4;
+
+    /* Compute energy for the remaining samples */
+    pp = CBmemPtr - lagcount;
+    energy += WebRtcSpl_DotProductWithScale(pp, pp, SUBL-lagcount, scale);
+
+    /* Normalize the energy and store the number of shifts */
+    (*enShPtr) = (int16_t)WebRtcSpl_NormW32(energy);
+    tmp32 = energy << *enShPtr;
+    *enPtr = (int16_t)(tmp32 >> 16);
+    enShPtr++;
+    enPtr++;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h b/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
new file mode 100644
index 0000000..00eb017
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyAugmentation.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_AUGMENTATION_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_AUGMENTATION_H_
+
+void WebRtcIlbcfix_CbMemEnergyAugmentation(
+    int16_t *interpSamples, /* (i) The interpolated samples */
+    int16_t *CBmem,   /* (i) The CB memory */
+    int scale,   /* (i) The scaling of all energy values */
+    size_t base_size,  /* (i) Index to where energy values should be stored */
+    int16_t *energyW16,  /* (o) Energy in the CB vectors */
+    int16_t *energyShifts /* (o) Shift value of the energy */
+                                           );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c b/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
new file mode 100644
index 0000000..35d3ce4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyCalc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/* Compute the energy of the rest of the cb memory
+ * by step wise adding and subtracting the next
+ * sample and the last sample respectively */
+void WebRtcIlbcfix_CbMemEnergyCalc(
+    int32_t energy,   /* (i) input start energy */
+    size_t range,   /* (i) number of iterations */
+    int16_t *ppi,   /* (i) input pointer 1 */
+    int16_t *ppo,   /* (i) input pointer 2 */
+    int16_t *energyW16,  /* (o) Energy in the CB vectors */
+    int16_t *energyShifts, /* (o) Shift value of the energy */
+    int scale,   /* (i) The scaling of all energy values */
+    size_t base_size  /* (i) Index to where energy values should be stored */
+                                   )
+{
+  size_t j;
+  int16_t shft;
+  int32_t tmp;
+  int16_t *eSh_ptr;
+  int16_t *eW16_ptr;
+
+
+  eSh_ptr  = &energyShifts[1+base_size];
+  eW16_ptr = &energyW16[1+base_size];
+
+  for (j = 0; j + 1 < range; j++) {
+
+    /* Calculate next energy by a +/-
+       operation on the edge samples */
+    tmp = (*ppi) * (*ppi) - (*ppo) * (*ppo);
+    energy += tmp >> scale;
+    energy = WEBRTC_SPL_MAX(energy, 0);
+
+    ppi--;
+    ppo--;
+
+    /* Normalize the energy into a int16_t and store
+       the number of shifts */
+
+    shft = (int16_t)WebRtcSpl_NormW32(energy);
+    *eSh_ptr++ = shft;
+
+    tmp = energy << shft;
+    *eW16_ptr++ = (int16_t)(tmp >> 16);
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h b/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
new file mode 100644
index 0000000..af8e658
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbMemEnergyCalc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_CALC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_CALC_H_
+
+void WebRtcIlbcfix_CbMemEnergyCalc(
+    int32_t energy,   /* (i) input start energy */
+    size_t range,   /* (i) number of iterations */
+    int16_t *ppi,   /* (i) input pointer 1 */
+    int16_t *ppo,   /* (i) input pointer 2 */
+    int16_t *energyW16,  /* (o) Energy in the CB vectors */
+    int16_t *energyShifts, /* (o) Shift value of the energy */
+    int scale,   /* (i) The scaling of all energy values */
+    size_t base_size  /* (i) Index to where energy values should be stored */
+                                   );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_search.c b/modules/audio_coding/codecs/ilbc/cb_search.c
new file mode 100644
index 0000000..88b2f01
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_search.c
@@ -0,0 +1,403 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearch.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/gain_quant.h"
+#include "modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy.h"
+#include "modules/audio_coding/codecs/ilbc/interpolate_samples.h"
+#include "modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h"
+#include "modules/audio_coding/codecs/ilbc/cb_search_core.h"
+#include "modules/audio_coding/codecs/ilbc/energy_inverse.h"
+#include "modules/audio_coding/codecs/ilbc/augmented_cb_corr.h"
+#include "modules/audio_coding/codecs/ilbc/cb_update_best_index.h"
+#include "modules/audio_coding/codecs/ilbc/create_augmented_vec.h"
+
+/*----------------------------------------------------------------*
+ *  Search routine for codebook encoding and gain quantization.
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CbSearch(
+    IlbcEncoder *iLBCenc_inst,
+    /* (i) the encoder state structure */
+    int16_t *index,  /* (o) Codebook indices */
+    int16_t *gain_index, /* (o) Gain quantization indices */
+    int16_t *intarget, /* (i) Target vector for encoding */
+    int16_t *decResidual,/* (i) Decoded residual for codebook construction */
+    size_t lMem,  /* (i) Length of buffer */
+    size_t lTarget,  /* (i) Length of vector */
+    int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
+    size_t block  /* (i) the subblock number */
+                            ) {
+  size_t i, range;
+  int16_t ii, j, stage;
+  int16_t *pp;
+  int16_t tmp;
+  int scale;
+  int16_t bits, temp1, temp2;
+  size_t base_size;
+  int32_t codedEner, targetEner;
+  int16_t gains[CB_NSTAGES+1];
+  int16_t *cb_vecPtr;
+  size_t indexOffset, sInd, eInd;
+  int32_t CritMax=0;
+  int16_t shTotMax=WEBRTC_SPL_WORD16_MIN;
+  size_t bestIndex=0;
+  int16_t bestGain=0;
+  size_t indexNew;
+  int16_t CritNewSh;
+  int32_t CritNew;
+  int32_t *cDotPtr;
+  size_t noOfZeros;
+  int16_t *gainPtr;
+  int32_t t32, tmpW32;
+  int16_t *WebRtcIlbcfix_kGainSq5_ptr;
+  /* Stack based */
+  int16_t CBbuf[CB_MEML+LPC_FILTERORDER+CB_HALFFILTERLEN];
+  int32_t cDot[128];
+  int32_t Crit[128];
+  int16_t targetVec[SUBL+LPC_FILTERORDER];
+  int16_t cbvectors[CB_MEML + 1];  /* Adding one extra position for
+                                            Coverity warnings. */
+  int16_t codedVec[SUBL];
+  int16_t interpSamples[20*4];
+  int16_t interpSamplesFilt[20*4];
+  int16_t energyW16[CB_EXPAND*128];
+  int16_t energyShifts[CB_EXPAND*128];
+  int16_t *inverseEnergy=energyW16;   /* Reuse memory */
+  int16_t *inverseEnergyShifts=energyShifts; /* Reuse memory */
+  int16_t *buf = &CBbuf[LPC_FILTERORDER];
+  int16_t *target = &targetVec[LPC_FILTERORDER];
+  int16_t *aug_vec = (int16_t*)cDot;   /* length [SUBL], reuse memory */
+
+  /* Determine size of codebook sections */
+
+  base_size=lMem-lTarget+1;
+  if (lTarget==SUBL) {
+    base_size=lMem-19;
+  }
+
+  /* weighting of the CB memory */
+  noOfZeros=lMem-WebRtcIlbcfix_kFilterRange[block];
+  WebRtcSpl_MemSetW16(&buf[-LPC_FILTERORDER], 0, noOfZeros+LPC_FILTERORDER);
+  WebRtcSpl_FilterARFastQ12(
+      decResidual+noOfZeros, buf+noOfZeros,
+      weightDenum, LPC_FILTERORDER+1, WebRtcIlbcfix_kFilterRange[block]);
+
+  /* weighting of the target vector */
+  WEBRTC_SPL_MEMCPY_W16(&target[-LPC_FILTERORDER], buf+noOfZeros+WebRtcIlbcfix_kFilterRange[block]-LPC_FILTERORDER, LPC_FILTERORDER);
+  WebRtcSpl_FilterARFastQ12(
+      intarget, target,
+      weightDenum, LPC_FILTERORDER+1, lTarget);
+
+  /* Store target, towards the end codedVec is calculated as
+     the initial target minus the remaining target */
+  WEBRTC_SPL_MEMCPY_W16(codedVec, target, lTarget);
+
+  /* Find the highest absolute value to calculate proper
+     vector scale factor (so that it uses 12 bits) */
+  temp1 = WebRtcSpl_MaxAbsValueW16(buf, lMem);
+  temp2 = WebRtcSpl_MaxAbsValueW16(target, lTarget);
+
+  if ((temp1>0)&&(temp2>0)) {
+    temp1 = WEBRTC_SPL_MAX(temp1, temp2);
+    scale = WebRtcSpl_GetSizeInBits((uint32_t)(temp1 * temp1));
+  } else {
+    /* temp1 or temp2 is negative (maximum was -32768) */
+    scale = 30;
+  }
+
+  /* Scale to so that a mul-add 40 times does not overflow */
+  scale = scale - 25;
+  scale = WEBRTC_SPL_MAX(0, scale);
+
+  /* Compute energy of the original target */
+  targetEner = WebRtcSpl_DotProductWithScale(target, target, lTarget, scale);
+
+  /* Prepare search over one more codebook section. This section
+     is created by filtering the original buffer with a filter. */
+  WebRtcIlbcfix_FilteredCbVecs(cbvectors, buf, lMem, WebRtcIlbcfix_kFilterRange[block]);
+
+  range = WebRtcIlbcfix_kSearchRange[block][0];
+
+  if(lTarget == SUBL) {
+    /* Create the interpolated samples and store them for use in all stages */
+
+    /* First section, non-filtered half of the cb */
+    WebRtcIlbcfix_InterpolateSamples(interpSamples, buf, lMem);
+
+    /* Second section, filtered half of the cb */
+    WebRtcIlbcfix_InterpolateSamples(interpSamplesFilt, cbvectors, lMem);
+
+    /* Compute the CB vectors' energies for the first cb section (non-filtered) */
+    WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamples, buf,
+                                          scale, 20, energyW16, energyShifts);
+
+    /* Compute the CB vectors' energies for the second cb section (filtered cb) */
+    WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors, scale,
+                                          base_size + 20, energyW16,
+                                          energyShifts);
+
+    /* Compute the CB vectors' energies and store them in the vector
+     * energyW16. Also the corresponding shift values are stored. The
+     * energy values are used in all three stages. */
+    WebRtcIlbcfix_CbMemEnergy(range, buf, cbvectors, lMem,
+                              lTarget, energyW16+20, energyShifts+20, scale, base_size);
+
+  } else {
+    /* Compute the CB vectors' energies and store them in the vector
+     * energyW16. Also the corresponding shift values are stored. The
+     * energy values are used in all three stages. */
+    WebRtcIlbcfix_CbMemEnergy(range, buf, cbvectors, lMem,
+                              lTarget, energyW16, energyShifts, scale, base_size);
+
+    /* Set the energy positions 58-63 and 122-127 to zero
+       (otherwise they are uninitialized) */
+    WebRtcSpl_MemSetW16(energyW16+range, 0, (base_size-range));
+    WebRtcSpl_MemSetW16(energyW16+range+base_size, 0, (base_size-range));
+  }
+
+  /* Calculate Inverse Energy (energyW16 is already normalized
+     and will contain the inverse energy in Q29 after this call */
+  WebRtcIlbcfix_EnergyInverse(energyW16, base_size*CB_EXPAND);
+
+  /* The gain value computed in the previous stage is used
+   * as an upper limit to what the next stage gain value
+   * is allowed to be. In stage 0, 16384 (1.0 in Q14) is used as
+   * the upper limit. */
+  gains[0] = 16384;
+
+  for (stage=0; stage<CB_NSTAGES; stage++) {
+
+    /* Set up memories */
+    range = WebRtcIlbcfix_kSearchRange[block][stage];
+
+    /* initialize search measures */
+    CritMax=0;
+    shTotMax=-100;
+    bestIndex=0;
+    bestGain=0;
+
+    /* loop over lags 40+ in the first codebook section, full search */
+    cb_vecPtr = buf+lMem-lTarget;
+
+    /* Calculate all the cross correlations (augmented part of CB) */
+    if (lTarget==SUBL) {
+      WebRtcIlbcfix_AugmentedCbCorr(target, buf+lMem,
+                                    interpSamples, cDot,
+                                    20, 39, scale);
+      cDotPtr=&cDot[20];
+    } else {
+      cDotPtr=cDot;
+    }
+    /* Calculate all the cross correlations (main part of CB) */
+    WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget, range, scale, -1);
+
+    /* Adjust the search range for the augmented vectors */
+    if (lTarget==SUBL) {
+      range=WebRtcIlbcfix_kSearchRange[block][stage]+20;
+    } else {
+      range=WebRtcIlbcfix_kSearchRange[block][stage];
+    }
+
+    indexOffset=0;
+
+    /* Search for best index in this part of the vector */
+    WebRtcIlbcfix_CbSearchCore(
+        cDot, range, stage, inverseEnergy,
+        inverseEnergyShifts, Crit,
+        &indexNew, &CritNew, &CritNewSh);
+
+    /* Update the global best index and the corresponding gain */
+    WebRtcIlbcfix_CbUpdateBestIndex(
+        CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew+indexOffset],
+        inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
+        &CritMax, &shTotMax, &bestIndex, &bestGain);
+
+    sInd = ((CB_RESRANGE >> 1) > bestIndex) ?
+        0 : (bestIndex - (CB_RESRANGE >> 1));
+    eInd=sInd+CB_RESRANGE;
+    if (eInd>=range) {
+      eInd=range-1;
+      sInd=eInd-CB_RESRANGE;
+    }
+
+    range = WebRtcIlbcfix_kSearchRange[block][stage];
+
+    if (lTarget==SUBL) {
+      i=sInd;
+      if (sInd<20) {
+        WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors + lMem,
+                                      interpSamplesFilt, cDot, sInd + 20,
+                                      WEBRTC_SPL_MIN(39, (eInd + 20)), scale);
+        i=20;
+        cDotPtr = &cDot[20 - sInd];
+      } else {
+        cDotPtr = cDot;
+      }
+
+      cb_vecPtr = cbvectors+lMem-20-i;
+
+      /* Calculate the cross correlations (main part of the filtered CB) */
+      WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
+                                 eInd - i + 1, scale, -1);
+
+    } else {
+      cDotPtr = cDot;
+      cb_vecPtr = cbvectors+lMem-lTarget-sInd;
+
+      /* Calculate the cross correlations (main part of the filtered CB) */
+      WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
+                                 eInd - sInd + 1, scale, -1);
+
+    }
+
+    /* Adjust the search range for the augmented vectors */
+    indexOffset=base_size+sInd;
+
+    /* Search for best index in this part of the vector */
+    WebRtcIlbcfix_CbSearchCore(
+        cDot, eInd-sInd+1, stage, inverseEnergy+indexOffset,
+        inverseEnergyShifts+indexOffset, Crit,
+        &indexNew, &CritNew, &CritNewSh);
+
+    /* Update the global best index and the corresponding gain */
+    WebRtcIlbcfix_CbUpdateBestIndex(
+        CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew],
+        inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
+        &CritMax, &shTotMax, &bestIndex, &bestGain);
+
+    index[stage] = (int16_t)bestIndex;
+
+
+    bestGain = WebRtcIlbcfix_GainQuant(bestGain,
+                                       (int16_t)WEBRTC_SPL_ABS_W16(gains[stage]), stage, &gain_index[stage]);
+
+    /* Extract the best (according to measure) codebook vector
+       Also adjust the index, so that the augmented vectors are last.
+       Above these vectors were first...
+    */
+
+    if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
+
+      if((size_t)index[stage]<base_size) {
+        pp=buf+lMem-lTarget-index[stage];
+      } else {
+        pp=cbvectors+lMem-lTarget-
+            index[stage]+base_size;
+      }
+
+    } else {
+
+      if ((size_t)index[stage]<base_size) {
+        if (index[stage]>=20) {
+          /* Adjust index and extract vector */
+          index[stage]-=20;
+          pp=buf+lMem-lTarget-index[stage];
+        } else {
+          /* Adjust index and extract vector */
+          index[stage]+=(int16_t)(base_size-20);
+
+          WebRtcIlbcfix_CreateAugmentedVec(index[stage]-base_size+40,
+                                           buf+lMem, aug_vec);
+          pp = aug_vec;
+
+        }
+      } else {
+
+        if ((index[stage] - base_size) >= 20) {
+          /* Adjust index and extract vector */
+          index[stage]-=20;
+          pp=cbvectors+lMem-lTarget-
+              index[stage]+base_size;
+        } else {
+          /* Adjust index and extract vector */
+          index[stage]+=(int16_t)(base_size-20);
+          WebRtcIlbcfix_CreateAugmentedVec(index[stage]-2*base_size+40,
+                                           cbvectors+lMem, aug_vec);
+          pp = aug_vec;
+        }
+      }
+    }
+
+    /* Subtract the best codebook vector, according
+       to measure, from the target vector */
+
+    WebRtcSpl_AddAffineVectorToVector(target, pp, (int16_t)(-bestGain),
+                                      (int32_t)8192, (int16_t)14, lTarget);
+
+    /* record quantized gain */
+    gains[stage+1] = bestGain;
+
+  } /* end of Main Loop. for (stage=0;... */
+
+  /* Calculte the coded vector (original target - what's left) */
+  for (i=0;i<lTarget;i++) {
+    codedVec[i]-=target[i];
+  }
+
+  /* Gain adjustment for energy matching */
+  codedEner = WebRtcSpl_DotProductWithScale(codedVec, codedVec, lTarget, scale);
+
+  j=gain_index[0];
+
+  temp1 = (int16_t)WebRtcSpl_NormW32(codedEner);
+  temp2 = (int16_t)WebRtcSpl_NormW32(targetEner);
+
+  if(temp1 < temp2) {
+    bits = 16 - temp1;
+  } else {
+    bits = 16 - temp2;
+  }
+
+  tmp = (int16_t)((gains[1] * gains[1]) >> 14);
+
+  targetEner = (int16_t)WEBRTC_SPL_SHIFT_W32(targetEner, -bits) * tmp;
+
+  tmpW32 = ((int32_t)(gains[1]-1))<<1;
+
+  /* Pointer to the table that contains
+     gain_sq5TblFIX * gain_sq5TblFIX in Q14 */
+  gainPtr=(int16_t*)WebRtcIlbcfix_kGainSq5Sq+gain_index[0];
+  temp1 = (int16_t)WEBRTC_SPL_SHIFT_W32(codedEner, -bits);
+
+  WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[j];
+
+  /* targetEner and codedEner are in Q(-2*scale) */
+  for (ii=gain_index[0];ii<32;ii++) {
+
+    /* Change the index if
+       (codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
+       gainTbl[i] < 2*gain[0]
+    */
+
+    t32 = temp1 * *gainPtr;
+    t32 = t32 - targetEner;
+    if (t32 < 0) {
+      if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
+        j=ii;
+        WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[ii];
+      }
+    }
+    gainPtr++;
+  }
+  gain_index[0]=j;
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_search.h b/modules/audio_coding/codecs/ilbc/cb_search.h
new file mode 100644
index 0000000..c8626c5
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_search.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearch.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_H_
+
+void WebRtcIlbcfix_CbSearch(
+    IlbcEncoder *iLBCenc_inst,
+    /* (i) the encoder state structure */
+    int16_t *index,  /* (o) Codebook indices */
+    int16_t *gain_index, /* (o) Gain quantization indices */
+    int16_t *intarget, /* (i) Target vector for encoding */
+    int16_t *decResidual,/* (i) Decoded residual for codebook construction */
+    size_t lMem,  /* (i) Length of buffer */
+    size_t lTarget,  /* (i) Length of vector */
+    int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
+    size_t block  /* (i) the subblock number */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_search_core.c b/modules/audio_coding/codecs/ilbc/cb_search_core.c
new file mode 100644
index 0000000..09d26d3
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_search_core.c
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearchCore.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+void WebRtcIlbcfix_CbSearchCore(
+    int32_t *cDot,    /* (i) Cross Correlation */
+    size_t range,    /* (i) Search range */
+    int16_t stage,    /* (i) Stage of this search */
+    int16_t *inverseEnergy,  /* (i) Inversed energy */
+    int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
+                                           with the offset 2*16-29 */
+    int32_t *Crit,    /* (o) The criteria */
+    size_t *bestIndex,   /* (o) Index that corresponds to
+                                                   maximum criteria (in this
+                                                   vector) */
+    int32_t *bestCrit,   /* (o) Value of critera for the
+                                                   chosen index */
+    int16_t *bestCritSh)   /* (o) The domain of the chosen
+                                                   criteria */
+{
+  int32_t maxW32, tmp32;
+  int16_t max, sh, tmp16;
+  size_t i;
+  int32_t *cDotPtr;
+  int16_t cDotSqW16;
+  int16_t *inverseEnergyPtr;
+  int32_t *critPtr;
+  int16_t *inverseEnergyShiftPtr;
+
+  /* Don't allow negative values for stage 0 */
+  if (stage==0) {
+    cDotPtr=cDot;
+    for (i=0;i<range;i++) {
+      *cDotPtr=WEBRTC_SPL_MAX(0, (*cDotPtr));
+      cDotPtr++;
+    }
+  }
+
+  /* Normalize cDot to int16_t, calculate the square of cDot and store the upper int16_t */
+  maxW32 = WebRtcSpl_MaxAbsValueW32(cDot, range);
+
+  sh = (int16_t)WebRtcSpl_NormW32(maxW32);
+  cDotPtr = cDot;
+  inverseEnergyPtr = inverseEnergy;
+  critPtr = Crit;
+  inverseEnergyShiftPtr=inverseEnergyShift;
+  max=WEBRTC_SPL_WORD16_MIN;
+
+  for (i=0;i<range;i++) {
+    /* Calculate cDot*cDot and put the result in a int16_t */
+    tmp32 = *cDotPtr << sh;
+    tmp16 = (int16_t)(tmp32 >> 16);
+    cDotSqW16 = (int16_t)(((int32_t)(tmp16)*(tmp16))>>16);
+
+    /* Calculate the criteria (cDot*cDot/energy) */
+    *critPtr = cDotSqW16 * *inverseEnergyPtr;
+
+    /* Extract the maximum shift value under the constraint
+       that the criteria is not zero */
+    if ((*critPtr)!=0) {
+      max = WEBRTC_SPL_MAX((*inverseEnergyShiftPtr), max);
+    }
+
+    inverseEnergyPtr++;
+    inverseEnergyShiftPtr++;
+    critPtr++;
+    cDotPtr++;
+  }
+
+  /* If no max shifts still at initialization value, set shift to zero */
+  if (max==WEBRTC_SPL_WORD16_MIN) {
+    max = 0;
+  }
+
+  /* Modify the criterias, so that all of them use the same Q domain */
+  critPtr=Crit;
+  inverseEnergyShiftPtr=inverseEnergyShift;
+  for (i=0;i<range;i++) {
+    /* Guarantee that the shift value is less than 16
+       in order to simplify for DSP's (and guard against >31) */
+    tmp16 = WEBRTC_SPL_MIN(16, max-(*inverseEnergyShiftPtr));
+
+    (*critPtr)=WEBRTC_SPL_SHIFT_W32((*critPtr),-tmp16);
+    critPtr++;
+    inverseEnergyShiftPtr++;
+  }
+
+  /* Find the index of the best value */
+  *bestIndex = WebRtcSpl_MaxIndexW32(Crit, range);
+  *bestCrit = Crit[*bestIndex];
+
+  /* Calculate total shifts of this criteria */
+  *bestCritSh = 32 - 2*sh + max;
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_search_core.h b/modules/audio_coding/codecs/ilbc/cb_search_core.h
new file mode 100644
index 0000000..3210668
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_search_core.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbSearchCore.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_CORE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_SEARCH_CORE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_CbSearchCore(
+    int32_t *cDot,    /* (i) Cross Correlation */
+    size_t range,    /* (i) Search range */
+    int16_t stage,    /* (i) Stage of this search */
+    int16_t *inverseEnergy,  /* (i) Inversed energy */
+    int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
+                                          with the offset 2*16-29 */
+    int32_t *Crit,    /* (o) The criteria */
+    size_t *bestIndex,   /* (o) Index that corresponds to
+                                   maximum criteria (in this
+                                   vector) */
+    int32_t *bestCrit,   /* (o) Value of critera for the
+                                  chosen index */
+    int16_t *bestCritSh);  /* (o) The domain of the chosen
+                                    criteria */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/cb_update_best_index.c b/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
new file mode 100644
index 0000000..ed20c46
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbUpdateBestIndex.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/cb_update_best_index.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+void WebRtcIlbcfix_CbUpdateBestIndex(
+    int32_t CritNew,    /* (i) New Potentially best Criteria */
+    int16_t CritNewSh,   /* (i) Shift value of above Criteria */
+    size_t IndexNew,   /* (i) Index of new Criteria */
+    int32_t cDotNew,    /* (i) Cross dot of new index */
+    int16_t invEnergyNew,  /* (i) Inversed energy new index */
+    int16_t energyShiftNew,  /* (i) Energy shifts of new index */
+    int32_t *CritMax,   /* (i/o) Maximum Criteria (so far) */
+    int16_t *shTotMax,   /* (i/o) Shifts of maximum criteria */
+    size_t *bestIndex,   /* (i/o) Index that corresponds to
+                                                   maximum criteria */
+    int16_t *bestGain)   /* (i/o) Gain in Q14 that corresponds
+                                                   to maximum criteria */
+{
+  int16_t shOld, shNew, tmp16;
+  int16_t scaleTmp;
+  int32_t gainW32;
+
+  /* Normalize the new and old Criteria to the same domain */
+  if (CritNewSh>(*shTotMax)) {
+    shOld=WEBRTC_SPL_MIN(31,CritNewSh-(*shTotMax));
+    shNew=0;
+  } else {
+    shOld=0;
+    shNew=WEBRTC_SPL_MIN(31,(*shTotMax)-CritNewSh);
+  }
+
+  /* Compare the two criterias. If the new one is better,
+     calculate the gain and store this index as the new best one
+  */
+
+  if ((CritNew >> shNew) > (*CritMax >> shOld)) {
+
+    tmp16 = (int16_t)WebRtcSpl_NormW32(cDotNew);
+    tmp16 = 16 - tmp16;
+
+    /* Calculate the gain in Q14
+       Compensate for inverseEnergyshift in Q29 and that the energy
+       value was stored in a int16_t (shifted down 16 steps)
+       => 29-14+16 = 31 */
+
+    scaleTmp = -energyShiftNew-tmp16+31;
+    scaleTmp = WEBRTC_SPL_MIN(31, scaleTmp);
+
+    gainW32 = ((int16_t)WEBRTC_SPL_SHIFT_W32(cDotNew, -tmp16) * invEnergyNew) >>
+        scaleTmp;
+
+    /* Check if criteria satisfies Gain criteria (max 1.3)
+       if it is larger set the gain to 1.3
+       (slightly different from FLP version)
+    */
+    if (gainW32>21299) {
+      *bestGain=21299;
+    } else if (gainW32<-21299) {
+      *bestGain=-21299;
+    } else {
+      *bestGain=(int16_t)gainW32;
+    }
+
+    *CritMax=CritNew;
+    *shTotMax=CritNewSh;
+    *bestIndex = IndexNew;
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/cb_update_best_index.h b/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
new file mode 100644
index 0000000..a4a4cde
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CbUpdateBestIndex.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_UPDATE_BEST_INDEX_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_UPDATE_BEST_INDEX_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_CbUpdateBestIndex(
+    int32_t CritNew,    /* (i) New Potentially best Criteria */
+    int16_t CritNewSh,   /* (i) Shift value of above Criteria */
+    size_t IndexNew,   /* (i) Index of new Criteria */
+    int32_t cDotNew,    /* (i) Cross dot of new index */
+    int16_t invEnergyNew,  /* (i) Inversed energy new index */
+    int16_t energyShiftNew,  /* (i) Energy shifts of new index */
+    int32_t *CritMax,   /* (i/o) Maximum Criteria (so far) */
+    int16_t *shTotMax,   /* (i/o) Shifts of maximum criteria */
+    size_t *bestIndex,   /* (i/o) Index that corresponds to
+                                   maximum criteria */
+    int16_t *bestGain);   /* (i/o) Gain in Q14 that corresponds
+                                   to maximum criteria */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/chebyshev.c b/modules/audio_coding/codecs/ilbc/chebyshev.c
new file mode 100644
index 0000000..38a3069
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/chebyshev.c
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Chebyshev.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*------------------------------------------------------------------*
+ *  Calculate the Chevyshev polynomial series
+ *  F(w) = 2*exp(-j5w)*C(x)
+ *   C(x) = (T_0(x) + f(1)T_1(x) + ... + f(4)T_1(x) + f(5)/2)
+ *   T_i(x) is the i:th order Chebyshev polynomial
+ *------------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_Chebyshev(
+    /* (o) Result of C(x) */
+    int16_t x,  /* (i) Value to the Chevyshev polynomial */
+    int16_t *f  /* (i) The coefficients in the polynomial */
+                                      ) {
+  int16_t b1_high, b1_low; /* Use the high, low format to increase the accuracy */
+  int32_t b2;
+  int32_t tmp1W32;
+  int32_t tmp2W32;
+  int i;
+
+  b2 = (int32_t)0x1000000; /* b2 = 1.0 (Q23) */
+  /* Calculate b1 = 2*x + f[1] */
+  tmp1W32 = (x << 10) + (f[1] << 14);
+
+  for (i = 2; i < 5; i++) {
+    tmp2W32 = tmp1W32;
+
+    /* Split b1 (in tmp1W32) into a high and low part */
+    b1_high = (int16_t)(tmp1W32 >> 16);
+    b1_low = (int16_t)((tmp1W32 - ((int32_t)b1_high << 16)) >> 1);
+
+    /* Calculate 2*x*b1-b2+f[i] */
+    tmp1W32 = ((b1_high * x + ((b1_low * x) >> 15)) << 2) - b2 + (f[i] << 14);
+
+    /* Update b2 for next round */
+    b2 = tmp2W32;
+  }
+
+  /* Split b1 (in tmp1W32) into a high and low part */
+  b1_high = (int16_t)(tmp1W32 >> 16);
+  b1_low = (int16_t)((tmp1W32 - ((int32_t)b1_high << 16)) >> 1);
+
+  /* tmp1W32 = x*b1 - b2 + f[i]/2 */
+  tmp1W32 = ((b1_high * x) << 1) + (((b1_low * x) >> 15) << 1) -
+      b2 + (f[i] << 13);
+
+  /* Handle overflows and set to maximum or minimum int16_t instead */
+  if (tmp1W32>((int32_t)33553408)) {
+    return(WEBRTC_SPL_WORD16_MAX);
+  } else if (tmp1W32<((int32_t)-33554432)) {
+    return(WEBRTC_SPL_WORD16_MIN);
+  } else {
+    return (int16_t)(tmp1W32 >> 10);
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/chebyshev.h b/modules/audio_coding/codecs/ilbc/chebyshev.h
new file mode 100644
index 0000000..46eef6b
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/chebyshev.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Chebyshev.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CHEBYSHEV_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CHEBYSHEV_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*------------------------------------------------------------------*
+ *  Calculate the Chevyshev polynomial series
+ *  F(w) = 2*exp(-j5w)*C(x)
+ *   C(x) = (T_0(x) + f(1)T_1(x) + ... + f(4)T_1(x) + f(5)/2)
+ *   T_i(x) is the i:th order Chebyshev polynomial
+ *------------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_Chebyshev(
+    /* (o) Result of C(x) */
+    int16_t x,  /* (i) Value to the Chevyshev polynomial */
+    int16_t *f  /* (i) The coefficients in the polynomial */
+                                      );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/comp_corr.c b/modules/audio_coding/codecs/ilbc/comp_corr.c
new file mode 100644
index 0000000..b43f2fc
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/comp_corr.c
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CompCorr.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Compute cross correlation and pitch gain for pitch prediction
+ *  of last subframe at given lag.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CompCorr(
+    int32_t *corr, /* (o) cross correlation */
+    int32_t *ener, /* (o) energy */
+    int16_t *buffer, /* (i) signal buffer */
+    size_t lag,  /* (i) pitch lag */
+    size_t bLen, /* (i) length of buffer */
+    size_t sRange, /* (i) correlation search length */
+    int16_t scale /* (i) number of rightshifts to use */
+                            ){
+  int16_t *w16ptr;
+
+  w16ptr=&buffer[bLen-sRange-lag];
+
+  /* Calculate correlation and energy */
+  (*corr)=WebRtcSpl_DotProductWithScale(&buffer[bLen-sRange], w16ptr, sRange, scale);
+  (*ener)=WebRtcSpl_DotProductWithScale(w16ptr, w16ptr, sRange, scale);
+
+  /* For zero energy set the energy to 0 in order to avoid potential
+     problems for coming divisions */
+  if (*ener == 0) {
+    *corr = 0;
+    *ener = 1;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/comp_corr.h b/modules/audio_coding/codecs/ilbc/comp_corr.h
new file mode 100644
index 0000000..f54dca2
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/comp_corr.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CompCorr.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_COMP_CORR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_COMP_CORR_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Compute cross correlation and pitch gain for pitch prediction
+ *  of last subframe at given lag.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CompCorr(
+    int32_t *corr, /* (o) cross correlation */
+    int32_t *ener, /* (o) energy */
+    int16_t *buffer, /* (i) signal buffer */
+    size_t lag,  /* (i) pitch lag */
+    size_t bLen, /* (i) length of buffer */
+    size_t sRange, /* (i) correlation search length */
+    int16_t scale /* (i) number of rightshifts to use */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/complexityMeasures.m b/modules/audio_coding/codecs/ilbc/complexityMeasures.m
new file mode 100644
index 0000000..5c39e36
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/complexityMeasures.m
@@ -0,0 +1,59 @@
+%
+%  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+%
+%  Use of this source code is governed by a BSD-style license
+%  that can be found in the LICENSE file in the root of the source
+%  tree. An additional intellectual property rights grant can be found
+%  in the file PATENTS.  All contributing project authors may
+%  be found in the AUTHORS file in the root of the source tree.
+%
+
+clear;
+pack;
+%
+% Enter the path to YOUR executable and remember to define the perprocessor
+% variable PRINT_MIPS te get the instructions printed to the screen.
+%
+command = '!iLBCtest.exe 30 speechAndBGnoise.pcm out1.bit out1.pcm tlm10_30ms.dat';
+cout=' > st.txt';   %saves to matlab variable 'st'
+eval(strcat(command,cout));
+if(length(cout)>3)
+    load st.txt
+else
+    disp('No cout file to load')
+end
+
+% initialize vector to zero
+index = find(st(1:end,1)==-1);
+indexnonzero = find(st(1:end,1)>0);
+frames = length(index)-indexnonzero(1)+1;
+start = indexnonzero(1) - 1;
+functionOrder=max(st(:,2));
+new=zeros(frames,functionOrder);
+
+for i = 1:frames,
+    for j = index(start-1+i)+1:(index(start+i)-1),
+        new(i,st(j,2)) = new(i,st(j,2)) + st(j,1);
+    end
+end
+
+result=zeros(functionOrder,3);
+for i=1:functionOrder
+    nonzeroelements = find(new(1:end,i)>0);
+    result(i,1)=i;
+    
+    % Compute each function's mean complexity
+    % result(i,2)=(sum(new(nonzeroelements,i))/(length(nonzeroelements)*0.03))/1000000;
+    
+    % Compute each function's maximum complexity in encoding
+    % and decoding respectively and then add it together:
+    % result(i,3)=(max(new(1:end,i))/0.03)/1000000;
+    result(i,3)=(max(new(1:size(new,1)/2,i))/0.03)/1000000 + (max(new(size(new,1)/2+1:end,i))/0.03)/1000000;
+end
+
+result
+
+% Compute maximum complexity for a single frame (enc/dec separately and together)
+maxEncComplexityInAFrame = (max(sum(new(1:size(new,1)/2,:),2))/0.03)/1000000
+maxDecComplexityInAFrame = (max(sum(new(size(new,1)/2+1:end,:),2))/0.03)/1000000
+totalComplexity = maxEncComplexityInAFrame + maxDecComplexityInAFrame
\ No newline at end of file
diff --git a/modules/audio_coding/codecs/ilbc/constants.c b/modules/audio_coding/codecs/ilbc/constants.c
new file mode 100644
index 0000000..8efa6ae
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/constants.c
@@ -0,0 +1,666 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ constants.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/* HP Filters {b[0] b[1] b[2] -a[1] -a[2]} */
+
+const int16_t WebRtcIlbcfix_kHpInCoefs[5] = {3798, -7596, 3798, 7807, -3733};
+const int16_t WebRtcIlbcfix_kHpOutCoefs[5] = {3849, -7699, 3849, 7918, -3833};
+
+/* Window in Q11 to window the energies of the 5 choises (3 for 20ms) in the choise for
+   the 80 sample start state
+*/
+const int16_t WebRtcIlbcfix_kStartSequenceEnrgWin[NSUB_MAX-1]= {
+  1638, 1843, 2048, 1843, 1638
+};
+
+/* LP Filter coeffs used for downsampling */
+const int16_t WebRtcIlbcfix_kLpFiltCoefs[FILTERORDER_DS_PLUS1]= {
+  -273, 512, 1297, 1696, 1297, 512, -273
+};
+
+/* Constants used in the LPC calculations */
+
+/* Hanning LPC window (in Q15) */
+const int16_t WebRtcIlbcfix_kLpcWin[BLOCKL_MAX] = {
+  6, 22, 50, 89, 139, 200, 272, 355, 449, 554, 669, 795,
+  932, 1079, 1237, 1405, 1583, 1771, 1969, 2177, 2395, 2622, 2858, 3104,
+  3359, 3622, 3894, 4175, 4464, 4761, 5066, 5379, 5699, 6026, 6361, 6702,
+  7050, 7404, 7764, 8130, 8502, 8879, 9262, 9649, 10040, 10436, 10836, 11240,
+  11647, 12058, 12471, 12887, 13306, 13726, 14148, 14572, 14997, 15423, 15850, 16277,
+  16704, 17131, 17558, 17983, 18408, 18831, 19252, 19672, 20089, 20504, 20916, 21325,
+  21730, 22132, 22530, 22924, 23314, 23698, 24078, 24452, 24821, 25185, 25542, 25893,
+  26238, 26575, 26906, 27230, 27547, 27855, 28156, 28450, 28734, 29011, 29279, 29538,
+  29788, 30029, 30261, 30483, 30696, 30899, 31092, 31275, 31448, 31611, 31764, 31906,
+  32037, 32158, 32268, 32367, 32456, 32533, 32600, 32655, 32700, 32733, 32755, 32767,
+  32767, 32755, 32733, 32700, 32655, 32600, 32533, 32456, 32367, 32268, 32158, 32037,
+  31906, 31764, 31611, 31448, 31275, 31092, 30899, 30696, 30483, 30261, 30029, 29788,
+  29538, 29279, 29011, 28734, 28450, 28156, 27855, 27547, 27230, 26906, 26575, 26238,
+  25893, 25542, 25185, 24821, 24452, 24078, 23698, 23314, 22924, 22530, 22132, 21730,
+  21325, 20916, 20504, 20089, 19672, 19252, 18831, 18408, 17983, 17558, 17131, 16704,
+  16277, 15850, 15423, 14997, 14572, 14148, 13726, 13306, 12887, 12471, 12058, 11647,
+  11240, 10836, 10436, 10040, 9649, 9262, 8879, 8502, 8130, 7764, 7404, 7050,
+  6702, 6361, 6026, 5699, 5379, 5066, 4761, 4464, 4175, 3894, 3622, 3359,
+  3104, 2858, 2622, 2395, 2177, 1969, 1771, 1583, 1405, 1237, 1079, 932,
+  795, 669, 554, 449, 355, 272, 200, 139, 89, 50, 22, 6
+};
+
+/* Asymmetric LPC window (in Q15)*/
+const int16_t WebRtcIlbcfix_kLpcAsymWin[BLOCKL_MAX] = {
+  2, 7, 15, 27, 42, 60, 81, 106, 135, 166, 201, 239,
+  280, 325, 373, 424, 478, 536, 597, 661, 728, 798, 872, 949,
+  1028, 1111, 1197, 1287, 1379, 1474, 1572, 1674, 1778, 1885, 1995, 2108,
+  2224, 2343, 2465, 2589, 2717, 2847, 2980, 3115, 3254, 3395, 3538, 3684,
+  3833, 3984, 4138, 4295, 4453, 4615, 4778, 4944, 5112, 5283, 5456, 5631,
+  5808, 5987, 6169, 6352, 6538, 6725, 6915, 7106, 7300, 7495, 7692, 7891,
+  8091, 8293, 8497, 8702, 8909, 9118, 9328, 9539, 9752, 9966, 10182, 10398,
+  10616, 10835, 11055, 11277, 11499, 11722, 11947, 12172, 12398, 12625, 12852, 13080,
+  13309, 13539, 13769, 14000, 14231, 14463, 14695, 14927, 15160, 15393, 15626, 15859,
+  16092, 16326, 16559, 16792, 17026, 17259, 17492, 17725, 17957, 18189, 18421, 18653,
+  18884, 19114, 19344, 19573, 19802, 20030, 20257, 20483, 20709, 20934, 21157, 21380,
+  21602, 21823, 22042, 22261, 22478, 22694, 22909, 23123, 23335, 23545, 23755, 23962,
+  24168, 24373, 24576, 24777, 24977, 25175, 25371, 25565, 25758, 25948, 26137, 26323,
+  26508, 26690, 26871, 27049, 27225, 27399, 27571, 27740, 27907, 28072, 28234, 28394,
+  28552, 28707, 28860, 29010, 29157, 29302, 29444, 29584, 29721, 29855, 29987, 30115,
+  30241, 30364, 30485, 30602, 30717, 30828, 30937, 31043, 31145, 31245, 31342, 31436,
+  31526, 31614, 31699, 31780, 31858, 31933, 32005, 32074, 32140, 32202, 32261, 32317,
+  32370, 32420, 32466, 32509, 32549, 32585, 32618, 32648, 32675, 32698, 32718, 32734,
+  32748, 32758, 32764, 32767, 32767, 32667, 32365, 31863, 31164, 30274, 29197, 27939,
+  26510, 24917, 23170, 21281, 19261, 17121, 14876, 12540, 10126, 7650, 5126, 2571
+};
+
+/* Lag window for LPC (Q31) */
+const int32_t WebRtcIlbcfix_kLpcLagWin[LPC_FILTERORDER + 1]={
+  2147483647,   2144885453,   2137754373,   2125918626,   2109459810,
+  2088483140,   2063130336,   2033564590,   1999977009,   1962580174,
+  1921610283};
+
+/* WebRtcIlbcfix_kLpcChirpSyntDenum vector in Q15 corresponding
+ * floating point vector {1 0.9025 0.9025^2 0.9025^3 ...}
+ */
+const int16_t WebRtcIlbcfix_kLpcChirpSyntDenum[LPC_FILTERORDER + 1] = {
+  32767, 29573, 26690, 24087,
+  21739, 19619, 17707, 15980,
+  14422, 13016, 11747};
+
+/* WebRtcIlbcfix_kLpcChirpWeightDenum in Q15 corresponding to
+ * floating point vector {1 0.4222 0.4222^2... }
+ */
+const int16_t WebRtcIlbcfix_kLpcChirpWeightDenum[LPC_FILTERORDER + 1] = {
+  32767, 13835, 5841, 2466, 1041, 440,
+  186, 78,  33,  14,  6};
+
+/* LSF quantization Q13 domain */
+const int16_t WebRtcIlbcfix_kLsfCb[64 * 3 + 128 * 3 + 128 * 4] = {
+  1273,       2238,       3696,
+  3199,       5309,       8209,
+  3606,       5671,       7829,
+  2815,       5262,       8778,
+  2608,       4027,       5493,
+  1582,       3076,       5945,
+  2983,       4181,       5396,
+  2437,       4322,       6902,
+  1861,       2998,       4613,
+  2007,       3250,       5214,
+  1388,       2459,       4262,
+  2563,       3805,       5269,
+  2036,       3522,       5129,
+  1935,       4025,       6694,
+  2744,       5121,       7338,
+  2810,       4248,       5723,
+  3054,       5405,       7745,
+  1449,       2593,       4763,
+  3411,       5128,       6596,
+  2484,       4659,       7496,
+  1668,       2879,       4818,
+  1812,       3072,       5036,
+  1638,       2649,       3900,
+  2464,       3550,       4644,
+  1853,       2900,       4158,
+  2458,       4163,       5830,
+  2556,       4036,       6254,
+  2703,       4432,       6519,
+  3062,       4953,       7609,
+  1725,       3703,       6187,
+  2221,       3877,       5427,
+  2339,       3579,       5197,
+  2021,       4633,       7037,
+  2216,       3328,       4535,
+  2961,       4739,       6667,
+  2807,       3955,       5099,
+  2788,       4501,       6088,
+  1642,       2755,       4431,
+  3341,       5282,       7333,
+  2414,       3726,       5727,
+  1582,       2822,       5269,
+  2259,       3447,       4905,
+  3117,       4986,       7054,
+  1825,       3491,       5542,
+  3338,       5736,       8627,
+  1789,       3090,       5488,
+  2566,       3720,       4923,
+  2846,       4682,       7161,
+  1950,       3321,       5976,
+  1834,       3383,       6734,
+  3238,       4769,       6094,
+  2031,       3978,       5903,
+  1877,       4068,       7436,
+  2131,       4644,       8296,
+  2764,       5010,       8013,
+  2194,       3667,       6302,
+  2053,       3127,       4342,
+  3523,       6595,      10010,
+  3134,       4457,       5748,
+  3142,       5819,       9414,
+  2223,       4334,       6353,
+  2022,       3224,       4822,
+  2186,       3458,       5544,
+  2552,       4757,       6870,
+  10905,      12917,      14578,
+  9503,      11485,      14485,
+  9518,      12494,      14052,
+  6222,       7487,       9174,
+  7759,       9186,      10506,
+  8315,      12755,      14786,
+  9609,      11486,      13866,
+  8909,      12077,      13643,
+  7369,       9054,      11520,
+  9408,      12163,      14715,
+  6436,       9911,      12843,
+  7109,       9556,      11884,
+  7557,      10075,      11640,
+  6482,       9202,      11547,
+  6463,       7914,      10980,
+  8611,      10427,      12752,
+  7101,       9676,      12606,
+  7428,      11252,      13172,
+  10197,      12955,      15842,
+  7487,      10955,      12613,
+  5575,       7858,      13621,
+  7268,      11719,      14752,
+  7476,      11744,      13795,
+  7049,       8686,      11922,
+  8234,      11314,      13983,
+  6560,      11173,      14984,
+  6405,       9211,      12337,
+  8222,      12054,      13801,
+  8039,      10728,      13255,
+  10066,      12733,      14389,
+  6016,       7338,      10040,
+  6896,       8648,      10234,
+  7538,       9170,      12175,
+  7327,      12608,      14983,
+  10516,      12643,      15223,
+  5538,       7644,      12213,
+  6728,      12221,      14253,
+  7563,       9377,      12948,
+  8661,      11023,      13401,
+  7280,       8806,      11085,
+  7723,       9793,      12333,
+  12225,      14648,      16709,
+  8768,      13389,      15245,
+  10267,      12197,      13812,
+  5301,       7078,      11484,
+  7100,      10280,      11906,
+  8716,      12555,      14183,
+  9567,      12464,      15434,
+  7832,      12305,      14300,
+  7608,      10556,      12121,
+  8913,      11311,      12868,
+  7414,       9722,      11239,
+  8666,      11641,      13250,
+  9079,      10752,      12300,
+  8024,      11608,      13306,
+  10453,      13607,      16449,
+  8135,       9573,      10909,
+  6375,       7741,      10125,
+  10025,      12217,      14874,
+  6985,      11063,      14109,
+  9296,      13051,      14642,
+  8613,      10975,      12542,
+  6583,      10414,      13534,
+  6191,       9368,      13430,
+  5742,       6859,       9260,
+  7723,       9813,      13679,
+  8137,      11291,      12833,
+  6562,       8973,      10641,
+  6062,       8462,      11335,
+  6928,       8784,      12647,
+  7501,       8784,      10031,
+  8372,      10045,      12135,
+  8191,       9864,      12746,
+  5917,       7487,      10979,
+  5516,       6848,      10318,
+  6819,       9899,      11421,
+  7882,      12912,      15670,
+  9558,      11230,      12753,
+  7752,       9327,      11472,
+  8479,       9980,      11358,
+  11418,      14072,      16386,
+  7968,      10330,      14423,
+  8423,      10555,      12162,
+  6337,      10306,      14391,
+  8850,      10879,      14276,
+  6750,      11885,      15710,
+  7037,       8328,       9764,
+  6914,       9266,      13476,
+  9746,      13949,      15519,
+  11032,      14444,      16925,
+  8032,      10271,      11810,
+  10962,      13451,      15833,
+  10021,      11667,      13324,
+  6273,       8226,      12936,
+  8543,      10397,      13496,
+  7936,      10302,      12745,
+  6769,       8138,      10446,
+  6081,       7786,      11719,
+  8637,      11795,      14975,
+  8790,      10336,      11812,
+  7040,       8490,      10771,
+  7338,      10381,      13153,
+  6598,       7888,       9358,
+  6518,       8237,      12030,
+  9055,      10763,      12983,
+  6490,      10009,      12007,
+  9589,      12023,      13632,
+  6867,       9447,      10995,
+  7930,       9816,      11397,
+  10241,      13300,      14939,
+  5830,       8670,      12387,
+  9870,      11915,      14247,
+  9318,      11647,      13272,
+  6721,      10836,      12929,
+  6543,       8233,       9944,
+  8034,      10854,      12394,
+  9112,      11787,      14218,
+  9302,      11114,      13400,
+  9022,      11366,      13816,
+  6962,      10461,      12480,
+  11288,      13333,      15222,
+  7249,       8974,      10547,
+  10566,      12336,      14390,
+  6697,      11339,      13521,
+  11851,      13944,      15826,
+  6847,       8381,      11349,
+  7509,       9331,      10939,
+  8029,       9618,      11909,
+  13973,      17644,      19647,      22474,
+  14722,      16522,      20035,      22134,
+  16305,      18179,      21106,      23048,
+  15150,      17948,      21394,      23225,
+  13582,      15191,      17687,      22333,
+  11778,      15546,      18458,      21753,
+  16619,      18410,      20827,      23559,
+  14229,      15746,      17907,      22474,
+  12465,      15327,      20700,      22831,
+  15085,      16799,      20182,      23410,
+  13026,      16935,      19890,      22892,
+  14310,      16854,      19007,      22944,
+  14210,      15897,      18891,      23154,
+  14633,      18059,      20132,      22899,
+  15246,      17781,      19780,      22640,
+  16396,      18904,      20912,      23035,
+  14618,      17401,      19510,      21672,
+  15473,      17497,      19813,      23439,
+  18851,      20736,      22323,      23864,
+  15055,      16804,      18530,      20916,
+  16490,      18196,      19990,      21939,
+  11711,      15223,      21154,      23312,
+  13294,      15546,      19393,      21472,
+  12956,      16060,      20610,      22417,
+  11628,      15843,      19617,      22501,
+  14106,      16872,      19839,      22689,
+  15655,      18192,      20161,      22452,
+  12953,      15244,      20619,      23549,
+  15322,      17193,      19926,      21762,
+  16873,      18676,      20444,      22359,
+  14874,      17871,      20083,      21959,
+  11534,      14486,      19194,      21857,
+  17766,      19617,      21338,      23178,
+  13404,      15284,      19080,      23136,
+  15392,      17527,      19470,      21953,
+  14462,      16153,      17985,      21192,
+  17734,      19750,      21903,      23783,
+  16973,      19096,      21675,      23815,
+  16597,      18936,      21257,      23461,
+  15966,      17865,      20602,      22920,
+  15416,      17456,      20301,      22972,
+  18335,      20093,      21732,      23497,
+  15548,      17217,      20679,      23594,
+  15208,      16995,      20816,      22870,
+  13890,      18015,      20531,      22468,
+  13211,      15377,      19951,      22388,
+  12852,      14635,      17978,      22680,
+  16002,      17732,      20373,      23544,
+  11373,      14134,      19534,      22707,
+  17329,      19151,      21241,      23462,
+  15612,      17296,      19362,      22850,
+  15422,      19104,      21285,      23164,
+  13792,      17111,      19349,      21370,
+  15352,      17876,      20776,      22667,
+  15253,      16961,      18921,      22123,
+  14108,      17264,      20294,      23246,
+  15785,      17897,      20010,      21822,
+  17399,      19147,      20915,      22753,
+  13010,      15659,      18127,      20840,
+  16826,      19422,      22218,      24084,
+  18108,      20641,      22695,      24237,
+  18018,      20273,      22268,      23920,
+  16057,      17821,      21365,      23665,
+  16005,      17901,      19892,      23016,
+  13232,      16683,      21107,      23221,
+  13280,      16615,      19915,      21829,
+  14950,      18575,      20599,      22511,
+  16337,      18261,      20277,      23216,
+  14306,      16477,      21203,      23158,
+  12803,      17498,      20248,      22014,
+  14327,      17068,      20160,      22006,
+  14402,      17461,      21599,      23688,
+  16968,      18834,      20896,      23055,
+  15070,      17157,      20451,      22315,
+  15419,      17107,      21601,      23946,
+  16039,      17639,      19533,      21424,
+  16326,      19261,      21745,      23673,
+  16489,      18534,      21658,      23782,
+  16594,      18471,      20549,      22807,
+  18973,      21212,      22890,      24278,
+  14264,      18674,      21123,      23071,
+  15117,      16841,      19239,      23118,
+  13762,      15782,      20478,      23230,
+  14111,      15949,      20058,      22354,
+  14990,      16738,      21139,      23492,
+  13735,      16971,      19026,      22158,
+  14676,      17314,      20232,      22807,
+  16196,      18146,      20459,      22339,
+  14747,      17258,      19315,      22437,
+  14973,      17778,      20692,      23367,
+  15715,      17472,      20385,      22349,
+  15702,      18228,      20829,      23410,
+  14428,      16188,      20541,      23630,
+  16824,      19394,      21365,      23246,
+  13069,      16392,      18900,      21121,
+  12047,      16640,      19463,      21689,
+  14757,      17433,      19659,      23125,
+  15185,      16930,      19900,      22540,
+  16026,      17725,      19618,      22399,
+  16086,      18643,      21179,      23472,
+  15462,      17248,      19102,      21196,
+  17368,      20016,      22396,      24096,
+  12340,      14475,      19665,      23362,
+  13636,      16229,      19462,      22728,
+  14096,      16211,      19591,      21635,
+  12152,      14867,      19943,      22301,
+  14492,      17503,      21002,      22728,
+  14834,      16788,      19447,      21411,
+  14650,      16433,      19326,      22308,
+  14624,      16328,      19659,      23204,
+  13888,      16572,      20665,      22488,
+  12977,      16102,      18841,      22246,
+  15523,      18431,      21757,      23738,
+  14095,      16349,      18837,      20947,
+  13266,      17809,      21088,      22839,
+  15427,      18190,      20270,      23143,
+  11859,      16753,      20935,      22486,
+  12310,      17667,      21736,      23319,
+  14021,      15926,      18702,      22002,
+  12286,      15299,      19178,      21126,
+  15703,      17491,      21039,      23151,
+  12272,      14018,      18213,      22570,
+  14817,      16364,      18485,      22598,
+  17109,      19683,      21851,      23677,
+  12657,      14903,      19039,      22061,
+  14713,      16487,      20527,      22814,
+  14635,      16726,      18763,      21715,
+  15878,      18550,      20718,      22906
+};
+
+const int16_t WebRtcIlbcfix_kLsfDimCb[LSF_NSPLIT] = {3, 3, 4};
+const int16_t WebRtcIlbcfix_kLsfSizeCb[LSF_NSPLIT] = {64,128,128};
+
+const int16_t WebRtcIlbcfix_kLsfMean[LPC_FILTERORDER] = {
+  2308,       3652,       5434,       7885,
+  10255,      12559,      15160,      17513,
+  20328,      22752};
+
+const int16_t WebRtcIlbcfix_kLspMean[LPC_FILTERORDER] = {
+  31476, 29565, 25819, 18725, 10276,
+  1236, -9049, -17600, -25884, -30618
+};
+
+/* Q14 */
+const int16_t WebRtcIlbcfix_kLsfWeight20ms[4] = {12288, 8192, 4096, 0};
+const int16_t WebRtcIlbcfix_kLsfWeight30ms[6] = {8192, 16384, 10923, 5461, 0, 0};
+
+/*
+   cos(x) in Q15
+   WebRtcIlbcfix_kCos[i] = cos(pi*i/64.0)
+   used in WebRtcIlbcfix_Lsp2Lsf()
+*/
+
+const int16_t WebRtcIlbcfix_kCos[64] = {
+  32767,  32729,  32610,  32413,  32138,  31786,  31357,  30853,
+  30274,  29622,  28899,  28106,  27246,  26320,  25330,  24279,
+  23170,  22006,  20788,  19520,  18205,  16846,  15447,  14010,
+  12540,  11039,   9512,   7962,   6393,   4808,   3212,   1608,
+  0,  -1608,  -3212,  -4808,  -6393,  -7962,  -9512, -11039,
+  -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006,
+  -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622,
+  -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729
+};
+
+/*
+   Derivative in Q19, used to interpolate between the
+   WebRtcIlbcfix_kCos[] values to get a more exact y = cos(x)
+*/
+const int16_t WebRtcIlbcfix_kCosDerivative[64] = {
+  -632,  -1893,  -3150,  -4399,  -5638,  -6863,  -8072,  -9261,
+  -10428, -11570, -12684, -13767, -14817, -15832, -16808, -17744,
+  -18637, -19486, -20287, -21039, -21741, -22390, -22986, -23526,
+  -24009, -24435, -24801, -25108, -25354, -25540, -25664, -25726,
+  -25726, -25664, -25540, -25354, -25108, -24801, -24435, -24009,
+  -23526, -22986, -22390, -21741, -21039, -20287, -19486, -18637,
+  -17744, -16808, -15832, -14817, -13767, -12684, -11570, -10428,
+  -9261,  -8072,  -6863,  -5638,  -4399,  -3150,  -1893,   -632};
+
+/*
+  Table in Q15, used for a2lsf conversion
+  WebRtcIlbcfix_kCosGrid[i] = cos((2*pi*i)/(float)(2*COS_GRID_POINTS));
+*/
+
+const int16_t WebRtcIlbcfix_kCosGrid[COS_GRID_POINTS + 1] = {
+  32760, 32723, 32588, 32364, 32051, 31651, 31164, 30591,
+  29935, 29196, 28377, 27481, 26509, 25465, 24351, 23170,
+  21926, 20621, 19260, 17846, 16384, 14876, 13327, 11743,
+  10125, 8480, 6812, 5126, 3425, 1714, 0, -1714, -3425,
+  -5126, -6812, -8480, -10125, -11743, -13327, -14876,
+  -16384, -17846, -19260, -20621, -21926, -23170, -24351,
+  -25465, -26509, -27481, -28377, -29196, -29935, -30591,
+  -31164, -31651, -32051, -32364, -32588, -32723, -32760
+};
+
+/*
+   Derivative of y = acos(x) in Q12
+   used in WebRtcIlbcfix_Lsp2Lsf()
+*/
+
+const int16_t WebRtcIlbcfix_kAcosDerivative[64] = {
+  -26887, -8812, -5323, -3813, -2979, -2444, -2081, -1811,
+  -1608, -1450, -1322, -1219, -1132, -1059, -998, -946,
+  -901, -861, -827, -797, -772, -750, -730, -713,
+  -699, -687, -677, -668, -662, -657, -654, -652,
+  -652, -654, -657, -662, -668, -677, -687, -699,
+  -713, -730, -750, -772, -797, -827, -861, -901,
+  -946, -998, -1059, -1132, -1219, -1322, -1450, -1608,
+  -1811, -2081, -2444, -2979, -3813, -5323, -8812, -26887
+};
+
+
+/* Tables for quantization of start state */
+
+/* State quantization tables */
+const int16_t WebRtcIlbcfix_kStateSq3[8] = { /* Values in Q13 */
+  -30473, -17838, -9257, -2537,
+  3639, 10893, 19958, 32636
+};
+
+/* This table defines the limits for the selection of the freqg
+   less or equal than value 0 => index = 0
+   less or equal than value k => index = k
+*/
+const int32_t WebRtcIlbcfix_kChooseFrgQuant[64] = {
+  118, 163, 222, 305, 425, 604,
+  851, 1174, 1617, 2222, 3080, 4191,
+  5525, 7215, 9193, 11540, 14397, 17604,
+  21204, 25209, 29863, 35720, 42531, 50375,
+  59162, 68845, 80108, 93754, 110326, 129488,
+  150654, 174328, 201962, 233195, 267843, 308239,
+  354503, 405988, 464251, 531550, 608652, 697516,
+  802526, 928793, 1080145, 1258120, 1481106, 1760881,
+  2111111, 2546619, 3078825, 3748642, 4563142, 5573115,
+  6887601, 8582108, 10797296, 14014513, 18625760, 25529599,
+  37302935, 58819185, 109782723, WEBRTC_SPL_WORD32_MAX
+};
+
+const int16_t WebRtcIlbcfix_kScale[64] = {
+  /* Values in Q16 */
+  29485, 25003, 21345, 18316, 15578, 13128, 10973, 9310, 7955,
+  6762, 5789, 4877, 4255, 3699, 3258, 2904, 2595, 2328,
+  2123, 1932, 1785, 1631, 1493, 1370, 1260, 1167, 1083,
+  /* Values in Q21 */
+  32081, 29611, 27262, 25229, 23432, 21803, 20226, 18883, 17609,
+  16408, 15311, 14327, 13390, 12513, 11693, 10919, 10163, 9435,
+  8739, 8100, 7424, 6813, 6192, 5648, 5122, 4639, 4207, 3798,
+  3404, 3048, 2706, 2348, 2036, 1713, 1393, 1087, 747
+};
+
+/*frgq in fixpoint, but already computed like this:
+  for(i=0; i<64; i++){
+  a = (pow(10,frgq[i])/4.5);
+  WebRtcIlbcfix_kFrgQuantMod[i] = round(a);
+  }
+
+  Value 0 :36 in Q8
+  37:58 in Q5
+  59:63 in Q3
+*/
+const int16_t WebRtcIlbcfix_kFrgQuantMod[64] = {
+  /* First 37 values in Q8 */
+  569, 671, 786, 916, 1077, 1278,
+  1529, 1802, 2109, 2481, 2898, 3440,
+  3943, 4535, 5149, 5778, 6464, 7208,
+  7904, 8682, 9397, 10285, 11240, 12246,
+  13313, 14382, 15492, 16735, 18131, 19693,
+  21280, 22912, 24624, 26544, 28432, 30488,
+  32720,
+  /* 22 values in Q5 */
+  4383, 4684, 5012, 5363, 5739, 6146,
+  6603, 7113, 7679, 8285, 9040, 9850,
+  10838, 11882, 13103, 14467, 15950, 17669,
+  19712, 22016, 24800, 28576,
+  /* 5 values in Q3 */
+  8240, 9792, 12040, 15440, 22472
+};
+
+/* Constants for codebook search and creation */
+
+/* Expansion filter to get additional cb section.
+ * Q12 and reversed compared to flp
+ */
+const int16_t WebRtcIlbcfix_kCbFiltersRev[CB_FILTERLEN]={
+  -140, 446, -755, 3302, 2922, -590, 343, -138};
+
+/* Weighting coefficients for short lags.
+ * [0.2 0.4 0.6 0.8] in Q15 */
+const int16_t WebRtcIlbcfix_kAlpha[4]={
+  6554, 13107, 19661, 26214};
+
+/* Ranges for search and filters at different subframes */
+
+const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
+  {58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
+
+const size_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
+
+/* Gain Quantization for the codebook gains of the 3 stages */
+
+/* Q14 (one extra value (max int16_t) to simplify for the search) */
+const int16_t WebRtcIlbcfix_kGainSq3[9]={
+  -16384, -10813, -5407, 0, 4096, 8192,
+  12288, 16384, 32767};
+
+/* Q14 (one extra value (max int16_t) to simplify for the search) */
+const int16_t WebRtcIlbcfix_kGainSq4[17]={
+  -17203, -14746, -12288, -9830, -7373, -4915,
+  -2458, 0, 2458, 4915, 7373, 9830,
+  12288, 14746, 17203, 19661, 32767};
+
+/* Q14 (one extra value (max int16_t) to simplify for the search) */
+const int16_t WebRtcIlbcfix_kGainSq5[33]={
+  614,        1229,        1843,        2458,        3072,       3686,
+  4301,        4915,        5530,        6144,        6758,        7373,
+  7987,        8602,        9216,        9830,       10445,       11059,
+  11674,       12288,       12902,       13517,       14131,       14746,
+  15360,       15974,       16589,       17203,       17818,       18432,
+  19046,       19661,    32767};
+
+/* Q14 gain_sq5Tbl squared in Q14 */
+const int16_t WebRtcIlbcfix_kGainSq5Sq[32] = {
+  23,   92,    207,  368,  576,  829,
+  1129,  1474,   1866,  2304,  2787,  3317,
+  3893,  4516,   5184,  5897,  6658,  7464,
+  8318,  9216,   10160,  11151,  12187,  13271,
+  14400,  15574,   16796,  18062,  19377,  20736,
+  22140,  23593
+};
+
+const int16_t* const WebRtcIlbcfix_kGain[3] =
+{WebRtcIlbcfix_kGainSq5, WebRtcIlbcfix_kGainSq4, WebRtcIlbcfix_kGainSq3};
+
+
+/* Tables for the Enhancer, using upsamling factor 4 (ENH_UPS0 = 4) */
+
+const int16_t WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0][ENH_FLO_MULT2_PLUS1]={
+  {0,    0,    0, 4096,    0,  0,   0},
+  {64, -315, 1181, 3531, -436, 77, -64},
+  {97, -509, 2464, 2464, -509, 97, -97},
+  {77, -436, 3531, 1181, -315, 64, -77}
+};
+
+const int16_t WebRtcIlbcfix_kEnhWt[3] = {
+  4800, 16384, 27968 /* Q16 */
+};
+
+const size_t WebRtcIlbcfix_kEnhPlocs[ENH_NBLOCKS_TOT] = {
+  160, 480, 800, 1120, 1440, 1760, 2080, 2400  /* Q(-2) */
+};
+
+/* PLC table */
+
+const int16_t WebRtcIlbcfix_kPlcPerSqr[6] = { /* Grid points for square of periodiciy in Q15 */
+  839, 1343, 2048, 2998, 4247, 5849
+};
+
+const int16_t WebRtcIlbcfix_kPlcPitchFact[6] = { /* Value of y=(x^4-0.4)/(0.7-0.4) in grid points in Q15 */
+  0, 5462, 10922, 16384, 21846, 27306
+};
+
+const int16_t WebRtcIlbcfix_kPlcPfSlope[6] = { /* Slope of y=(x^4-0.4)/(0.7-0.4) in Q11 */
+  26667, 18729, 13653, 10258, 7901, 6214
+};
diff --git a/modules/audio_coding/codecs/ilbc/constants.h b/modules/audio_coding/codecs/ilbc/constants.h
new file mode 100644
index 0000000..6864f16
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/constants.h
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ constants.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CONSTANTS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CONSTANTS_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* high pass filters */
+
+extern const int16_t WebRtcIlbcfix_kHpInCoefs[];
+extern const int16_t WebRtcIlbcfix_kHpOutCoefs[];
+
+/* Window for start state decision */
+extern const int16_t WebRtcIlbcfix_kStartSequenceEnrgWin[];
+
+/* low pass filter used for downsampling */
+extern const int16_t WebRtcIlbcfix_kLpFiltCoefs[];
+
+/* LPC analysis and quantization */
+
+extern const int16_t WebRtcIlbcfix_kLpcWin[];
+extern const int16_t WebRtcIlbcfix_kLpcAsymWin[];
+extern const int32_t WebRtcIlbcfix_kLpcLagWin[];
+extern const int16_t WebRtcIlbcfix_kLpcChirpSyntDenum[];
+extern const int16_t WebRtcIlbcfix_kLpcChirpWeightDenum[];
+extern const int16_t WebRtcIlbcfix_kLsfDimCb[];
+extern const int16_t WebRtcIlbcfix_kLsfSizeCb[];
+extern const int16_t WebRtcIlbcfix_kLsfCb[];
+extern const int16_t WebRtcIlbcfix_kLsfWeight20ms[];
+extern const int16_t WebRtcIlbcfix_kLsfWeight30ms[];
+extern const int16_t WebRtcIlbcfix_kLsfMean[];
+extern const int16_t WebRtcIlbcfix_kLspMean[];
+extern const int16_t WebRtcIlbcfix_kCos[];
+extern const int16_t WebRtcIlbcfix_kCosDerivative[];
+extern const int16_t WebRtcIlbcfix_kCosGrid[];
+extern const int16_t WebRtcIlbcfix_kAcosDerivative[];
+
+/* state quantization tables */
+
+extern const int16_t WebRtcIlbcfix_kStateSq3[];
+extern const int32_t WebRtcIlbcfix_kChooseFrgQuant[];
+extern const int16_t WebRtcIlbcfix_kScale[];
+extern const int16_t WebRtcIlbcfix_kFrgQuantMod[];
+
+/* Ranges for search and filters at different subframes */
+
+extern const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
+extern const size_t WebRtcIlbcfix_kFilterRange[];
+
+/* gain quantization tables */
+
+extern const int16_t WebRtcIlbcfix_kGainSq3[];
+extern const int16_t WebRtcIlbcfix_kGainSq4[];
+extern const int16_t WebRtcIlbcfix_kGainSq5[];
+extern const int16_t WebRtcIlbcfix_kGainSq5Sq[];
+extern const int16_t* const WebRtcIlbcfix_kGain[];
+
+/* adaptive codebook definitions */
+
+extern const int16_t WebRtcIlbcfix_kCbFiltersRev[];
+extern const int16_t WebRtcIlbcfix_kAlpha[];
+
+/* enhancer definitions */
+
+extern const int16_t WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0][ENH_FLO_MULT2_PLUS1];
+extern const int16_t WebRtcIlbcfix_kEnhWt[];
+extern const size_t WebRtcIlbcfix_kEnhPlocs[];
+
+/* PLC tables */
+
+extern const int16_t WebRtcIlbcfix_kPlcPerSqr[];
+extern const int16_t WebRtcIlbcfix_kPlcPitchFact[];
+extern const int16_t WebRtcIlbcfix_kPlcPfSlope[];
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
new file mode 100644
index 0000000..6a4d058
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CreateAugmentedVec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "rtc_base/sanitizer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+/*----------------------------------------------------------------*
+ *  Recreate a specific codebook vector from the augmented part.
+ *
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CreateAugmentedVec(
+    size_t index,          /* (i) Index for the augmented vector to be
+                              created */
+    const int16_t* buffer, /* (i) Pointer to the end of the codebook memory
+                              that is used for creation of the augmented
+                              codebook */
+    int16_t* cbVec) {      /* (o) The constructed codebook vector */
+  size_t ilow;
+  const int16_t *ppo, *ppi;
+  int16_t cbVecTmp[4];
+  /* Interpolation starts 4 elements before cbVec+index, but must not start
+     outside |cbVec|; clamping interp_len to stay within |cbVec|.
+   */
+  size_t interp_len = WEBRTC_SPL_MIN(index, 4);
+
+  rtc_MsanCheckInitialized(buffer - index - interp_len, sizeof(buffer[0]),
+                           index + interp_len);
+
+  ilow = index - interp_len;
+
+  /* copy the first noninterpolated part */
+  ppo = buffer-index;
+  WEBRTC_SPL_MEMCPY_W16(cbVec, ppo, index);
+
+  /* interpolation */
+  ppo = buffer - interp_len;
+  ppi = buffer - index - interp_len;
+
+  /* perform cbVec[ilow+k] = ((ppi[k]*alphaTbl[k])>>15) +
+                             ((ppo[k]*alphaTbl[interp_len-1-k])>>15);
+     for k = 0..interp_len-1
+  */
+  WebRtcSpl_ElementwiseVectorMult(&cbVec[ilow], ppi, WebRtcIlbcfix_kAlpha,
+                                  interp_len, 15);
+  WebRtcSpl_ReverseOrderMultArrayElements(
+      cbVecTmp, ppo, &WebRtcIlbcfix_kAlpha[interp_len - 1], interp_len, 15);
+  WebRtcSpl_AddVectorsAndShift(&cbVec[ilow], &cbVec[ilow], cbVecTmp, interp_len,
+                               0);
+
+  /* copy the second noninterpolated part */
+  ppo = buffer - index;
+  /* |tempbuff2| is declared in WebRtcIlbcfix_GetCbVec and is SUBL+5 elements
+     long. |buffer| points one element past the end of that vector, i.e., at
+     tempbuff2+SUBL+5. Since ppo=buffer-index, we cannot read any more than
+     |index| elements from |ppo|.
+
+     |cbVec| is declared to be SUBL elements long in WebRtcIlbcfix_CbConstruct.
+     Therefore, we can only write SUBL-index elements to cbVec+index.
+
+     These two conditions limit the number of elements to copy.
+   */
+  WEBRTC_SPL_MEMCPY_W16(cbVec+index, ppo, WEBRTC_SPL_MIN(SUBL-index, index));
+}
diff --git a/modules/audio_coding/codecs/ilbc/create_augmented_vec.h b/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
new file mode 100644
index 0000000..ca8b371
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_CreateAugmentedVec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CREATE_AUGMENTED_VEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CREATE_AUGMENTED_VEC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Recreate a specific codebook vector from the augmented part.
+ *
+ *----------------------------------------------------------------*/
+
+void WebRtcIlbcfix_CreateAugmentedVec(
+    size_t index,          /* (i) Index for the augmented vector to be
+                              created */
+    const int16_t* buffer, /* (i) Pointer to the end of the codebook memory
+                              that is used for creation of the augmented
+                              codebook */
+    int16_t* cbVec);       /* (o) The construced codebook vector */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/decode.c b/modules/audio_coding/codecs/ilbc/decode.c
new file mode 100644
index 0000000..7cba418
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/decode.c
@@ -0,0 +1,257 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Decode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h"
+#include "modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+#include "modules/audio_coding/codecs/ilbc/do_plc.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/enhancer_interface.h"
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+#include "modules/audio_coding/codecs/ilbc/decode_residual.h"
+#include "modules/audio_coding/codecs/ilbc/unpack_bits.h"
+#include "modules/audio_coding/codecs/ilbc/hp_output.h"
+#include "modules/audio_coding/codecs/ilbc/init_decode.h"
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+#include "modules/audio_coding/codecs/ilbc/swap_bytes.h"
+#endif
+
+/*----------------------------------------------------------------*
+ *  main decoder function
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_DecodeImpl(
+    int16_t *decblock,    /* (o) decoded signal block */
+    const uint16_t *bytes, /* (i) encoded signal bits */
+    IlbcDecoder *iLBCdec_inst, /* (i/o) the decoder state
+                                           structure */
+    int16_t mode      /* (i) 0: bad packet, PLC,
+                                                                   1: normal */
+                           ) {
+  const int old_mode = iLBCdec_inst->mode;
+  const int old_use_enhancer = iLBCdec_inst->use_enhancer;
+
+  size_t i;
+  int16_t order_plus_one;
+
+  int16_t last_bit;
+  int16_t *data;
+  /* Stack based */
+  int16_t decresidual[BLOCKL_MAX];
+  int16_t PLCresidual[BLOCKL_MAX + LPC_FILTERORDER];
+  int16_t syntdenum[NSUB_MAX*(LPC_FILTERORDER+1)];
+  int16_t PLClpc[LPC_FILTERORDER + 1];
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  uint16_t swapped[NO_OF_WORDS_30MS];
+#endif
+  iLBC_bits *iLBCbits_inst = (iLBC_bits*)PLCresidual;
+
+  /* Reuse some buffers that are non overlapping in order to save stack memory */
+  data = &PLCresidual[LPC_FILTERORDER];
+
+  if (mode) { /* the data are good */
+
+    /* decode data */
+
+    /* Unpacketize bits into parameters */
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+    WebRtcIlbcfix_SwapBytes(bytes, iLBCdec_inst->no_of_words, swapped);
+    last_bit = WebRtcIlbcfix_UnpackBits(swapped, iLBCbits_inst, iLBCdec_inst->mode);
+#else
+    last_bit = WebRtcIlbcfix_UnpackBits(bytes, iLBCbits_inst, iLBCdec_inst->mode);
+#endif
+
+    /* Check for bit errors */
+    if (iLBCbits_inst->startIdx<1)
+      mode = 0;
+    if ((iLBCdec_inst->mode==20) && (iLBCbits_inst->startIdx>3))
+      mode = 0;
+    if ((iLBCdec_inst->mode==30) && (iLBCbits_inst->startIdx>5))
+      mode = 0;
+    if (last_bit==1)
+      mode = 0;
+
+    if (mode) { /* No bit errors was detected, continue decoding */
+      /* Stack based */
+      int16_t lsfdeq[LPC_FILTERORDER*LPC_N_MAX];
+      int16_t weightdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
+
+      /* adjust index */
+      WebRtcIlbcfix_IndexConvDec(iLBCbits_inst->cb_index);
+
+      /* decode the lsf */
+      WebRtcIlbcfix_SimpleLsfDeQ(lsfdeq, (int16_t*)(iLBCbits_inst->lsf), iLBCdec_inst->lpc_n);
+      WebRtcIlbcfix_LsfCheck(lsfdeq, LPC_FILTERORDER, iLBCdec_inst->lpc_n);
+      WebRtcIlbcfix_DecoderInterpolateLsp(syntdenum, weightdenum,
+                                          lsfdeq, LPC_FILTERORDER, iLBCdec_inst);
+
+      /* Decode the residual using the cb and gain indexes */
+      if (!WebRtcIlbcfix_DecodeResidual(iLBCdec_inst, iLBCbits_inst,
+                                        decresidual, syntdenum))
+        goto error;
+
+      /* preparing the plc for a future loss! */
+      WebRtcIlbcfix_DoThePlc(
+          PLCresidual, PLClpc, 0, decresidual,
+          syntdenum + (LPC_FILTERORDER + 1) * (iLBCdec_inst->nsub - 1),
+          iLBCdec_inst->last_lag, iLBCdec_inst);
+
+      /* Use the output from doThePLC */
+      WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
+    }
+
+  }
+
+  if (mode == 0) {
+    /* the data is bad (either a PLC call
+     * was made or a bit error was detected)
+     */
+
+    /* packet loss conceal */
+
+    WebRtcIlbcfix_DoThePlc(PLCresidual, PLClpc, 1, decresidual, syntdenum,
+                           iLBCdec_inst->last_lag, iLBCdec_inst);
+
+    WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
+
+    order_plus_one = LPC_FILTERORDER + 1;
+
+    for (i = 0; i < iLBCdec_inst->nsub; i++) {
+      WEBRTC_SPL_MEMCPY_W16(syntdenum+(i*order_plus_one),
+                            PLClpc, order_plus_one);
+    }
+  }
+
+  if ((*iLBCdec_inst).use_enhancer == 1) { /* Enhancer activated */
+
+    /* Update the filter and filter coefficients if there was a packet loss */
+    if (iLBCdec_inst->prev_enh_pl==2) {
+      for (i=0;i<iLBCdec_inst->nsub;i++) {
+        WEBRTC_SPL_MEMCPY_W16(&(iLBCdec_inst->old_syntdenum[i*(LPC_FILTERORDER+1)]),
+                              syntdenum, (LPC_FILTERORDER+1));
+      }
+    }
+
+    /* post filtering */
+    (*iLBCdec_inst).last_lag =
+        WebRtcIlbcfix_EnhancerInterface(data, decresidual, iLBCdec_inst);
+
+    /* synthesis filtering */
+
+    /* Set up the filter state */
+    WEBRTC_SPL_MEMCPY_W16(&data[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
+
+    if (iLBCdec_inst->mode==20) {
+      /* Enhancer has 40 samples delay */
+      i=0;
+      WebRtcSpl_FilterARFastQ12(
+          data, data,
+          iLBCdec_inst->old_syntdenum + (i+iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1),
+          LPC_FILTERORDER+1, SUBL);
+
+      for (i=1; i < iLBCdec_inst->nsub; i++) {
+        WebRtcSpl_FilterARFastQ12(
+            data+i*SUBL, data+i*SUBL,
+            syntdenum+(i-1)*(LPC_FILTERORDER+1),
+            LPC_FILTERORDER+1, SUBL);
+      }
+
+    } else if (iLBCdec_inst->mode==30) {
+      /* Enhancer has 80 samples delay */
+      for (i=0; i < 2; i++) {
+        WebRtcSpl_FilterARFastQ12(
+            data+i*SUBL, data+i*SUBL,
+            iLBCdec_inst->old_syntdenum + (i+4)*(LPC_FILTERORDER+1),
+            LPC_FILTERORDER+1, SUBL);
+      }
+      for (i=2; i < iLBCdec_inst->nsub; i++) {
+        WebRtcSpl_FilterARFastQ12(
+            data+i*SUBL, data+i*SUBL,
+            syntdenum+(i-2)*(LPC_FILTERORDER+1),
+            LPC_FILTERORDER+1, SUBL);
+      }
+    }
+
+    /* Save the filter state */
+    WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
+
+  } else { /* Enhancer not activated */
+    size_t lag;
+
+    /* Find last lag (since the enhancer is not called to give this info) */
+    lag = 20;
+    if (iLBCdec_inst->mode==20) {
+      lag = WebRtcIlbcfix_XcorrCoef(
+          &decresidual[iLBCdec_inst->blockl-60],
+          &decresidual[iLBCdec_inst->blockl-60-lag],
+          60,
+          80, lag, -1);
+    } else {
+      lag = WebRtcIlbcfix_XcorrCoef(
+          &decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
+          &decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
+          ENH_BLOCKL,
+          100, lag, -1);
+    }
+
+    /* Store lag (it is needed if next packet is lost) */
+    (*iLBCdec_inst).last_lag = lag;
+
+    /* copy data and run synthesis filter */
+    WEBRTC_SPL_MEMCPY_W16(data, decresidual, iLBCdec_inst->blockl);
+
+    /* Set up the filter state */
+    WEBRTC_SPL_MEMCPY_W16(&data[-LPC_FILTERORDER], iLBCdec_inst->syntMem, LPC_FILTERORDER);
+
+    for (i=0; i < iLBCdec_inst->nsub; i++) {
+      WebRtcSpl_FilterARFastQ12(
+          data+i*SUBL, data+i*SUBL,
+          syntdenum + i*(LPC_FILTERORDER+1),
+          LPC_FILTERORDER+1, SUBL);
+    }
+
+    /* Save the filter state */
+    WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
+  }
+
+  WEBRTC_SPL_MEMCPY_W16(decblock,data,iLBCdec_inst->blockl);
+
+  /* High pass filter the signal (with upscaling a factor 2 and saturation) */
+  WebRtcIlbcfix_HpOutput(decblock, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
+                         iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
+                         iLBCdec_inst->blockl);
+
+  WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->old_syntdenum,
+                        syntdenum, iLBCdec_inst->nsub*(LPC_FILTERORDER+1));
+
+  iLBCdec_inst->prev_enh_pl=0;
+
+  if (mode==0) { /* PLC was used */
+    iLBCdec_inst->prev_enh_pl=1;
+  }
+
+  return 0;  // Success.
+
+error:
+  // The decoder got sick from eating that data. Reset it and return.
+  WebRtcIlbcfix_InitDecode(iLBCdec_inst, old_mode, old_use_enhancer);
+  return -1;  // Error
+}
diff --git a/modules/audio_coding/codecs/ilbc/decode.h b/modules/audio_coding/codecs/ilbc/decode.h
new file mode 100644
index 0000000..ecc968e
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/decode.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Decode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  main decoder function
+ *---------------------------------------------------------------*/
+
+// Returns 0 on success, -1 on error.
+int WebRtcIlbcfix_DecodeImpl(
+    int16_t* decblock,         /* (o) decoded signal block */
+    const uint16_t* bytes,     /* (i) encoded signal bits */
+    IlbcDecoder* iLBCdec_inst, /* (i/o) the decoder state
+                                           structure */
+    int16_t mode               /* (i) 0: bad packet, PLC,
+                                      1: normal */
+    ) RTC_WARN_UNUSED_RESULT;
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/decode_residual.c b/modules/audio_coding/codecs/ilbc/decode_residual.c
new file mode 100644
index 0000000..3c113ae
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/decode_residual.c
@@ -0,0 +1,185 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecodeResidual.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/decode_residual.h"
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/state_construct.h"
+#include "modules/audio_coding/codecs/ilbc/cb_construct.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+#include "modules/audio_coding/codecs/ilbc/do_plc.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/enhancer_interface.h"
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+
+/*----------------------------------------------------------------*
+ *  frame residual decoder function (subrutine to iLBC_decode)
+ *---------------------------------------------------------------*/
+
+bool WebRtcIlbcfix_DecodeResidual(
+    IlbcDecoder *iLBCdec_inst,
+    /* (i/o) the decoder state structure */
+    iLBC_bits *iLBC_encbits, /* (i/o) Encoded bits, which are used
+                                for the decoding  */
+    int16_t *decresidual,  /* (o) decoded residual frame */
+    int16_t *syntdenum   /* (i) the decoded synthesis filter
+                                  coefficients */
+                                  ) {
+  size_t meml_gotten, diff, start_pos;
+  size_t subcount, subframe;
+  int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
+  int16_t *memVec = iLBCdec_inst->prevResidual;  /* Memory for codebook and filter state (reuse memory in state) */
+  int16_t *mem = &memVec[CB_HALFFILTERLEN];   /* Memory for codebook */
+
+  diff = STATE_LEN - iLBCdec_inst->state_short_len;
+
+  if (iLBC_encbits->state_first == 1) {
+    start_pos = (iLBC_encbits->startIdx-1)*SUBL;
+  } else {
+    start_pos = (iLBC_encbits->startIdx-1)*SUBL + diff;
+  }
+
+  /* decode scalar part of start state */
+
+  WebRtcIlbcfix_StateConstruct(iLBC_encbits->idxForMax,
+                               iLBC_encbits->idxVec, &syntdenum[(iLBC_encbits->startIdx-1)*(LPC_FILTERORDER+1)],
+                               &decresidual[start_pos], iLBCdec_inst->state_short_len
+                               );
+
+  if (iLBC_encbits->state_first) { /* put adaptive part in the end */
+
+    /* setup memory */
+
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCdec_inst->state_short_len);
+    WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCdec_inst->state_short_len, decresidual+start_pos,
+                          iLBCdec_inst->state_short_len);
+
+    /* construct decoded vector */
+
+    if (!WebRtcIlbcfix_CbConstruct(
+            &decresidual[start_pos + iLBCdec_inst->state_short_len],
+            iLBC_encbits->cb_index, iLBC_encbits->gain_index,
+            mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL, diff))
+      return false;  // Error.
+
+  }
+  else {/* put adaptive part in the beginning */
+
+    /* setup memory */
+
+    meml_gotten = iLBCdec_inst->state_short_len;
+    WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
+                                  decresidual+start_pos, meml_gotten);
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
+
+    /* construct decoded vector */
+
+    if (!WebRtcIlbcfix_CbConstruct(reverseDecresidual, iLBC_encbits->cb_index,
+                                   iLBC_encbits->gain_index,
+                                   mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL,
+                                   diff))
+      return false;  // Error.
+
+    /* get decoded residual from reversed vector */
+
+    WebRtcSpl_MemCpyReversedOrder(&decresidual[start_pos-1],
+                                  reverseDecresidual, diff);
+  }
+
+  /* counter for predicted subframes */
+
+  subcount=1;
+
+  /* forward prediction of subframes */
+
+  if (iLBCdec_inst->nsub > iLBC_encbits->startIdx + 1) {
+
+    /* setup memory */
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
+    WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-STATE_LEN,
+                          decresidual+(iLBC_encbits->startIdx-1)*SUBL, STATE_LEN);
+
+    /* loop over subframes to encode */
+
+    size_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
+    for (subframe=0; subframe<Nfor; subframe++) {
+
+      /* construct decoded vector */
+      if (!WebRtcIlbcfix_CbConstruct(
+              &decresidual[(iLBC_encbits->startIdx + 1 + subframe) * SUBL],
+              iLBC_encbits->cb_index + subcount * CB_NSTAGES,
+              iLBC_encbits->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+              SUBL))
+        return false;  // Error;
+
+      /* update memory */
+      memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+      WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+                            &decresidual[(iLBC_encbits->startIdx+1+subframe)*SUBL], SUBL);
+
+      subcount++;
+    }
+
+  }
+
+  /* backward prediction of subframes */
+
+  if (iLBC_encbits->startIdx > 1) {
+
+    /* setup memory */
+
+    meml_gotten = SUBL*(iLBCdec_inst->nsub+1-iLBC_encbits->startIdx);
+    if( meml_gotten > CB_MEML ) {
+      meml_gotten=CB_MEML;
+    }
+
+    WebRtcSpl_MemCpyReversedOrder(mem+CB_MEML-1,
+                                  decresidual+(iLBC_encbits->startIdx-1)*SUBL, meml_gotten);
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
+
+    /* loop over subframes to decode */
+
+    size_t Nback = iLBC_encbits->startIdx - 1;
+    for (subframe=0; subframe<Nback; subframe++) {
+
+      /* construct decoded vector */
+      if (!WebRtcIlbcfix_CbConstruct(
+              &reverseDecresidual[subframe * SUBL],
+              iLBC_encbits->cb_index + subcount * CB_NSTAGES,
+              iLBC_encbits->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+              SUBL))
+        return false;  // Error.
+
+      /* update memory */
+      memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+      WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+                            &reverseDecresidual[subframe*SUBL], SUBL);
+
+      subcount++;
+    }
+
+    /* get decoded residual from reversed vector */
+    WebRtcSpl_MemCpyReversedOrder(decresidual+SUBL*Nback-1,
+                                  reverseDecresidual, SUBL*Nback);
+  }
+
+  return true;  // Success.
+}
diff --git a/modules/audio_coding/codecs/ilbc/decode_residual.h b/modules/audio_coding/codecs/ilbc/decode_residual.h
new file mode 100644
index 0000000..7468e5f
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/decode_residual.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecodeResidual.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_RESIDUAL_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODE_RESIDUAL_H_
+
+#include <stdbool.h>
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  frame residual decoder function (subrutine to iLBC_decode)
+ *---------------------------------------------------------------*/
+
+// Returns true on success, false on failure. In case of failure, the decoder
+// state may be corrupted and needs resetting.
+bool WebRtcIlbcfix_DecodeResidual(
+    IlbcDecoder* iLBCdec_inst, /* (i/o) the decoder state structure */
+    iLBC_bits* iLBC_encbits,   /* (i/o) Encoded bits, which are used
+                                        for the decoding  */
+    int16_t* decresidual,      /* (o) decoded residual frame */
+    int16_t* syntdenum         /* (i) the decoded synthesis filter
+                                                         coefficients */
+    ) RTC_WARN_UNUSED_RESULT;
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c b/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
new file mode 100644
index 0000000..8413a73
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecoderInterpolateLsp.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h"
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  obtain synthesis and weighting filters form lsf coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DecoderInterpolateLsp(
+    int16_t *syntdenum,  /* (o) synthesis filter coefficients */
+    int16_t *weightdenum, /* (o) weighting denumerator
+                                   coefficients */
+    int16_t *lsfdeq,   /* (i) dequantized lsf coefficients */
+    int16_t length,   /* (i) length of lsf coefficient vector */
+    IlbcDecoder *iLBCdec_inst
+    /* (i) the decoder state structure */
+                                          ){
+  size_t i;
+  int pos, lp_length;
+  int16_t  lp[LPC_FILTERORDER + 1], *lsfdeq2;
+
+  lsfdeq2 = lsfdeq + length;
+  lp_length = length + 1;
+
+  if (iLBCdec_inst->mode==30) {
+    /* subframe 1: Interpolation between old and first LSF */
+
+    WebRtcIlbcfix_LspInterpolate2PolyDec(lp, (*iLBCdec_inst).lsfdeqold, lsfdeq,
+                                         WebRtcIlbcfix_kLsfWeight30ms[0], length);
+    WEBRTC_SPL_MEMCPY_W16(syntdenum,lp,lp_length);
+    WebRtcIlbcfix_BwExpand(weightdenum, lp, (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, (int16_t)lp_length);
+
+    /* subframes 2 to 6: interpolation between first and last LSF */
+
+    pos = lp_length;
+    for (i = 1; i < 6; i++) {
+      WebRtcIlbcfix_LspInterpolate2PolyDec(lp, lsfdeq, lsfdeq2,
+                                           WebRtcIlbcfix_kLsfWeight30ms[i], length);
+      WEBRTC_SPL_MEMCPY_W16(syntdenum + pos,lp,lp_length);
+      WebRtcIlbcfix_BwExpand(weightdenum + pos, lp,
+                             (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, (int16_t)lp_length);
+      pos += lp_length;
+    }
+  } else { /* iLBCdec_inst->mode=20 */
+    /* subframes 1 to 4: interpolation between old and new LSF */
+    pos = 0;
+    for (i = 0; i < iLBCdec_inst->nsub; i++) {
+      WebRtcIlbcfix_LspInterpolate2PolyDec(lp, iLBCdec_inst->lsfdeqold, lsfdeq,
+                                           WebRtcIlbcfix_kLsfWeight20ms[i], length);
+      WEBRTC_SPL_MEMCPY_W16(syntdenum+pos,lp,lp_length);
+      WebRtcIlbcfix_BwExpand(weightdenum+pos, lp,
+                             (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, (int16_t)lp_length);
+      pos += lp_length;
+    }
+  }
+
+  /* update memory */
+
+  if (iLBCdec_inst->mode==30) {
+    WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, lsfdeq2, length);
+  } else {
+    WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, lsfdeq, length);
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h b/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h
new file mode 100644
index 0000000..416fc36
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DecoderInterpolateLsp.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODER_INTERPOLATE_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DECODER_INTERPOLATE_LSF_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  obtain synthesis and weighting filters form lsf coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DecoderInterpolateLsp(
+    int16_t *syntdenum,  /* (o) synthesis filter coefficients */
+    int16_t *weightdenum, /* (o) weighting denumerator
+                                   coefficients */
+    int16_t *lsfdeq,   /* (i) dequantized lsf coefficients */
+    int16_t length,   /* (i) length of lsf coefficient vector */
+    IlbcDecoder *iLBCdec_inst
+    /* (i) the decoder state structure */
+                                          );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/defines.h b/modules/audio_coding/codecs/ilbc/defines.h
new file mode 100644
index 0000000..6100801
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/defines.h
@@ -0,0 +1,221 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ define.h
+
+******************************************************************/
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DEFINES_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DEFINES_H_
+
+#include <string.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* general codec settings */
+
+#define FS       8000
+#define BLOCKL_20MS     160
+#define BLOCKL_30MS     240
+#define BLOCKL_MAX     240
+#define NSUB_20MS     4
+#define NSUB_30MS     6
+#define NSUB_MAX     6
+#define NASUB_20MS     2
+#define NASUB_30MS     4
+#define NASUB_MAX     4
+#define SUBL      40
+#define STATE_LEN     80
+#define STATE_SHORT_LEN_30MS  58
+#define STATE_SHORT_LEN_20MS  57
+
+/* LPC settings */
+
+#define LPC_FILTERORDER    10
+#define LPC_LOOKBACK    60
+#define LPC_N_20MS     1
+#define LPC_N_30MS     2
+#define LPC_N_MAX     2
+#define LPC_ASYMDIFF    20
+#define LSF_NSPLIT     3
+#define LSF_NUMBER_OF_STEPS   4
+#define LPC_HALFORDER    5
+#define COS_GRID_POINTS 60
+
+/* cb settings */
+
+#define CB_NSTAGES     3
+#define CB_EXPAND     2
+#define CB_MEML      147
+#define CB_FILTERLEN    (2*4)
+#define CB_HALFFILTERLEN   4
+#define CB_RESRANGE     34
+#define CB_MAXGAIN_FIXQ6   83 /* error = -0.24% */
+#define CB_MAXGAIN_FIXQ14   21299
+
+/* enhancer */
+
+#define ENH_BLOCKL     80  /* block length */
+#define ENH_BLOCKL_HALF    (ENH_BLOCKL/2)
+#define ENH_HL      3  /* 2*ENH_HL+1 is number blocks
+                                                                           in said second sequence */
+#define ENH_SLOP     2  /* max difference estimated and
+                                                                           correct pitch period */
+#define ENH_PLOCSL     8  /* pitch-estimates and
+                                                                           pitch-locations buffer length */
+#define ENH_OVERHANG    2
+#define ENH_UPS0     4  /* upsampling rate */
+#define ENH_FL0      3  /* 2*FLO+1 is the length of each filter */
+#define ENH_FLO_MULT2_PLUS1   7
+#define ENH_VECTL     (ENH_BLOCKL+2*ENH_FL0)
+#define ENH_CORRDIM     (2*ENH_SLOP+1)
+#define ENH_NBLOCKS     (BLOCKL/ENH_BLOCKL)
+#define ENH_NBLOCKS_EXTRA   5
+#define ENH_NBLOCKS_TOT    8 /* ENH_NBLOCKS+ENH_NBLOCKS_EXTRA */
+#define ENH_BUFL     (ENH_NBLOCKS_TOT)*ENH_BLOCKL
+#define ENH_BUFL_FILTEROVERHEAD  3
+#define ENH_A0      819   /* Q14 */
+#define ENH_A0_MINUS_A0A0DIV4  848256041 /* Q34 */
+#define ENH_A0DIV2     26843546 /* Q30 */
+
+/* PLC */
+
+/* Down sampling */
+
+#define FILTERORDER_DS_PLUS1  7
+#define DELAY_DS     3
+#define FACTOR_DS     2
+
+/* bit stream defs */
+
+#define NO_OF_BYTES_20MS   38
+#define NO_OF_BYTES_30MS   50
+#define NO_OF_WORDS_20MS   19
+#define NO_OF_WORDS_30MS   25
+#define STATE_BITS     3
+#define BYTE_LEN     8
+#define ULP_CLASSES     3
+
+/* help parameters */
+
+#define TWO_PI_FIX     25736 /* Q12 */
+
+/* Constants for codebook search and creation */
+
+#define ST_MEM_L_TBL  85
+#define MEM_LF_TBL  147
+
+
+/* Struct for the bits */
+typedef struct iLBC_bits_t_ {
+  int16_t lsf[LSF_NSPLIT*LPC_N_MAX];
+  int16_t cb_index[CB_NSTAGES*(NASUB_MAX+1)];  /* First CB_NSTAGES values contains extra CB index */
+  int16_t gain_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB gain */
+  size_t idxForMax;
+  int16_t state_first;
+  int16_t idxVec[STATE_SHORT_LEN_30MS];
+  int16_t firstbits;
+  size_t startIdx;
+} iLBC_bits;
+
+/* type definition encoder instance */
+typedef struct IlbcEncoder_ {
+
+  /* flag for frame size mode */
+  int16_t mode;
+
+  /* basic parameters for different frame sizes */
+  size_t blockl;
+  size_t nsub;
+  int16_t nasub;
+  size_t no_of_bytes, no_of_words;
+  int16_t lpc_n;
+  size_t state_short_len;
+
+  /* analysis filter state */
+  int16_t anaMem[LPC_FILTERORDER];
+
+  /* Fix-point old lsf parameters for interpolation */
+  int16_t lsfold[LPC_FILTERORDER];
+  int16_t lsfdeqold[LPC_FILTERORDER];
+
+  /* signal buffer for LP analysis */
+  int16_t lpc_buffer[LPC_LOOKBACK + BLOCKL_MAX];
+
+  /* state of input HP filter */
+  int16_t hpimemx[2];
+  int16_t hpimemy[4];
+
+#ifdef SPLIT_10MS
+  int16_t weightdenumbuf[66];
+  int16_t past_samples[160];
+  uint16_t bytes[25];
+  int16_t section;
+  int16_t Nfor_flag;
+  int16_t Nback_flag;
+  int16_t start_pos;
+  size_t diff;
+#endif
+
+} IlbcEncoder;
+
+/* type definition decoder instance */
+typedef struct IlbcDecoder_ {
+
+  /* flag for frame size mode */
+  int16_t mode;
+
+  /* basic parameters for different frame sizes */
+  size_t blockl;
+  size_t nsub;
+  int16_t nasub;
+  size_t no_of_bytes, no_of_words;
+  int16_t lpc_n;
+  size_t state_short_len;
+
+  /* synthesis filter state */
+  int16_t syntMem[LPC_FILTERORDER];
+
+  /* old LSF for interpolation */
+  int16_t lsfdeqold[LPC_FILTERORDER];
+
+  /* pitch lag estimated in enhancer and used in PLC */
+  size_t last_lag;
+
+  /* PLC state information */
+  int consPLICount, prev_enh_pl;
+  int16_t perSquare;
+
+  int16_t prevScale, prevPLI;
+  size_t prevLag;
+  int16_t prevLpc[LPC_FILTERORDER+1];
+  int16_t prevResidual[NSUB_MAX*SUBL];
+  int16_t seed;
+
+  /* previous synthesis filter parameters */
+
+  int16_t old_syntdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
+
+  /* state of output HP filter */
+  int16_t hpimemx[2];
+  int16_t hpimemy[4];
+
+  /* enhancer state information */
+  int use_enhancer;
+  int16_t enh_buf[ENH_BUFL+ENH_BUFL_FILTEROVERHEAD];
+  size_t enh_period[ENH_NBLOCKS_TOT];
+
+} IlbcDecoder;
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/do_plc.c b/modules/audio_coding/codecs/ilbc/do_plc.c
new file mode 100644
index 0000000..5d3e896
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/do_plc.c
@@ -0,0 +1,307 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DoThePlc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/comp_corr.h"
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+
+/*----------------------------------------------------------------*
+ *  Packet loss concealment routine. Conceals a residual signal
+ *  and LP parameters. If no packet loss, update state.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DoThePlc(
+    int16_t *PLCresidual,  /* (o) concealed residual */
+    int16_t *PLClpc,    /* (o) concealed LP parameters */
+    int16_t PLI,     /* (i) packet loss indicator
+                                                           0 - no PL, 1 = PL */
+    int16_t *decresidual,  /* (i) decoded residual */
+    int16_t *lpc,    /* (i) decoded LPC (only used for no PL) */
+    size_t inlag,    /* (i) pitch lag */
+    IlbcDecoder *iLBCdec_inst
+    /* (i/o) decoder instance */
+                            ){
+  size_t i;
+  int32_t cross, ener, cross_comp, ener_comp = 0;
+  int32_t measure, maxMeasure, energy;
+  int16_t max, crossSquareMax, crossSquare;
+  size_t j, lag, randlag;
+  int16_t tmp1, tmp2;
+  int16_t shift1, shift2, shift3, shiftMax;
+  int16_t scale3;
+  size_t corrLen;
+  int32_t tmpW32, tmp2W32;
+  int16_t use_gain;
+  int16_t tot_gain;
+  int16_t max_perSquare;
+  int16_t scale1, scale2;
+  int16_t totscale;
+  int32_t nom;
+  int16_t denom;
+  int16_t pitchfact;
+  size_t use_lag;
+  int ind;
+  int16_t randvec[BLOCKL_MAX];
+
+  /* Packet Loss */
+  if (PLI == 1) {
+
+    (*iLBCdec_inst).consPLICount += 1;
+
+    /* if previous frame not lost,
+       determine pitch pred. gain */
+
+    if (iLBCdec_inst->prevPLI != 1) {
+
+      /* Maximum 60 samples are correlated, preserve as high accuracy
+         as possible without getting overflow */
+      max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual,
+                                     iLBCdec_inst->blockl);
+      scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
+      if (scale3 < 0) {
+        scale3 = 0;
+      }
+
+      /* Store scale for use when interpolating between the
+       * concealment and the received packet */
+      iLBCdec_inst->prevScale = scale3;
+
+      /* Search around the previous lag +/-3 to find the
+         best pitch period */
+      lag = inlag - 3;
+
+      /* Guard against getting outside the frame */
+      corrLen = (size_t)WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
+
+      WebRtcIlbcfix_CompCorr( &cross, &ener,
+                              iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
+
+      /* Normalize and store cross^2 and the number of shifts */
+      shiftMax = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(cross))-15;
+      crossSquareMax = (int16_t)((
+          (int16_t)WEBRTC_SPL_SHIFT_W32(cross, -shiftMax) *
+          (int16_t)WEBRTC_SPL_SHIFT_W32(cross, -shiftMax)) >> 15);
+
+      for (j=inlag-2;j<=inlag+3;j++) {
+        WebRtcIlbcfix_CompCorr( &cross_comp, &ener_comp,
+                                iLBCdec_inst->prevResidual, j, iLBCdec_inst->blockl, corrLen, scale3);
+
+        /* Use the criteria (corr*corr)/energy to compare if
+           this lag is better or not. To avoid the division,
+           do a cross multiplication */
+        shift1 = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(cross_comp))-15;
+        crossSquare = (int16_t)((
+            (int16_t)WEBRTC_SPL_SHIFT_W32(cross_comp, -shift1) *
+            (int16_t)WEBRTC_SPL_SHIFT_W32(cross_comp, -shift1)) >> 15);
+
+        shift2 = WebRtcSpl_GetSizeInBits(ener)-15;
+        measure = (int16_t)WEBRTC_SPL_SHIFT_W32(ener, -shift2) * crossSquare;
+
+        shift3 = WebRtcSpl_GetSizeInBits(ener_comp)-15;
+        maxMeasure = (int16_t)WEBRTC_SPL_SHIFT_W32(ener_comp, -shift3) *
+            crossSquareMax;
+
+        /* Calculate shift value, so that the two measures can
+           be put in the same Q domain */
+        if(2 * shiftMax + shift3 > 2 * shift1 + shift2) {
+          tmp1 =
+              WEBRTC_SPL_MIN(31, 2 * shiftMax + shift3 - 2 * shift1 - shift2);
+          tmp2 = 0;
+        } else {
+          tmp1 = 0;
+          tmp2 =
+              WEBRTC_SPL_MIN(31, 2 * shift1 + shift2 - 2 * shiftMax - shift3);
+        }
+
+        if ((measure>>tmp1) > (maxMeasure>>tmp2)) {
+          /* New lag is better => record lag, measure and domain */
+          lag = j;
+          crossSquareMax = crossSquare;
+          cross = cross_comp;
+          shiftMax = shift1;
+          ener = ener_comp;
+        }
+      }
+
+      /* Calculate the periodicity for the lag with the maximum correlation.
+
+         Definition of the periodicity:
+         abs(corr(vec1, vec2))/(sqrt(energy(vec1))*sqrt(energy(vec2)))
+
+         Work in the Square domain to simplify the calculations
+         max_perSquare is less than 1 (in Q15)
+      */
+      tmp2W32=WebRtcSpl_DotProductWithScale(&iLBCdec_inst->prevResidual[iLBCdec_inst->blockl-corrLen],
+                                            &iLBCdec_inst->prevResidual[iLBCdec_inst->blockl-corrLen],
+                                            corrLen, scale3);
+
+      if ((tmp2W32>0)&&(ener_comp>0)) {
+        /* norm energies to int16_t, compute the product of the energies and
+           use the upper int16_t as the denominator */
+
+        scale1=(int16_t)WebRtcSpl_NormW32(tmp2W32)-16;
+        tmp1=(int16_t)WEBRTC_SPL_SHIFT_W32(tmp2W32, scale1);
+
+        scale2=(int16_t)WebRtcSpl_NormW32(ener)-16;
+        tmp2=(int16_t)WEBRTC_SPL_SHIFT_W32(ener, scale2);
+        denom = (int16_t)((tmp1 * tmp2) >> 16);  /* in Q(scale1+scale2-16) */
+
+        /* Square the cross correlation and norm it such that max_perSquare
+           will be in Q15 after the division */
+
+        totscale = scale1+scale2-1;
+        tmp1 = (int16_t)WEBRTC_SPL_SHIFT_W32(cross, (totscale>>1));
+        tmp2 = (int16_t)WEBRTC_SPL_SHIFT_W32(cross, totscale-(totscale>>1));
+
+        nom = tmp1 * tmp2;
+        max_perSquare = (int16_t)WebRtcSpl_DivW32W16(nom, denom);
+
+      } else {
+        max_perSquare = 0;
+      }
+    }
+
+    /* previous frame lost, use recorded lag and gain */
+
+    else {
+      lag = iLBCdec_inst->prevLag;
+      max_perSquare = iLBCdec_inst->perSquare;
+    }
+
+    /* Attenuate signal and scale down pitch pred gain if
+       several frames lost consecutively */
+
+    use_gain = 32767;   /* 1.0 in Q15 */
+
+    if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>320) {
+      use_gain = 29491;  /* 0.9 in Q15 */
+    } else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>640) {
+      use_gain = 22938;  /* 0.7 in Q15 */
+    } else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>960) {
+      use_gain = 16384;  /* 0.5 in Q15 */
+    } else if (iLBCdec_inst->consPLICount*iLBCdec_inst->blockl>1280) {
+      use_gain = 0;   /* 0.0 in Q15 */
+    }
+
+    /* Compute mixing factor of picth repeatition and noise:
+       for max_per>0.7 set periodicity to 1.0
+       0.4<max_per<0.7 set periodicity to (maxper-0.4)/0.7-0.4)
+       max_per<0.4 set periodicity to 0.0
+    */
+
+    if (max_perSquare>7868) { /* periodicity > 0.7  (0.7^4=0.2401 in Q15) */
+      pitchfact = 32767;
+    } else if (max_perSquare>839) { /* 0.4 < periodicity < 0.7 (0.4^4=0.0256 in Q15) */
+      /* find best index and interpolate from that */
+      ind = 5;
+      while ((max_perSquare<WebRtcIlbcfix_kPlcPerSqr[ind])&&(ind>0)) {
+        ind--;
+      }
+      /* pitch fact is approximated by first order */
+      tmpW32 = (int32_t)WebRtcIlbcfix_kPlcPitchFact[ind] +
+          ((WebRtcIlbcfix_kPlcPfSlope[ind] *
+              (max_perSquare - WebRtcIlbcfix_kPlcPerSqr[ind])) >> 11);
+
+      pitchfact = (int16_t)WEBRTC_SPL_MIN(tmpW32, 32767); /* guard against overflow */
+
+    } else { /* periodicity < 0.4 */
+      pitchfact = 0;
+    }
+
+    /* avoid repetition of same pitch cycle (buzzyness) */
+    use_lag = lag;
+    if (lag<80) {
+      use_lag = 2*lag;
+    }
+
+    /* compute concealed residual */
+    energy = 0;
+
+    for (i=0; i<iLBCdec_inst->blockl; i++) {
+
+      /* noise component -  52 < randlagFIX < 117 */
+      iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
+      randlag = 53 + (iLBCdec_inst->seed & 63);
+      if (randlag > i) {
+        randvec[i] =
+            iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];
+      } else {
+        randvec[i] = iLBCdec_inst->prevResidual[i - randlag];
+      }
+
+      /* pitch repeatition component */
+      if (use_lag > i) {
+        PLCresidual[i] =
+            iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - use_lag];
+      } else {
+        PLCresidual[i] = PLCresidual[i - use_lag];
+      }
+
+      /* Attinuate total gain for each 10 ms */
+      if (i<80) {
+        tot_gain=use_gain;
+      } else if (i<160) {
+        tot_gain = (int16_t)((31130 * use_gain) >> 15);  /* 0.95*use_gain */
+      } else {
+        tot_gain = (int16_t)((29491 * use_gain) >> 15);  /* 0.9*use_gain */
+      }
+
+
+      /* mix noise and pitch repeatition */
+      PLCresidual[i] = (int16_t)((tot_gain *
+          ((pitchfact * PLCresidual[i] + (32767 - pitchfact) * randvec[i] +
+              16384) >> 15)) >> 15);
+
+      /* Shifting down the result one step extra to ensure that no overflow
+         will occur */
+      energy += (PLCresidual[i] * PLCresidual[i]) >>
+          (iLBCdec_inst->prevScale + 1);
+    }
+
+    /* less than 30 dB, use only noise */
+    if (energy < (WEBRTC_SPL_SHIFT_W32(((int32_t)iLBCdec_inst->blockl*900),-(iLBCdec_inst->prevScale+1)))) {
+      energy = 0;
+      for (i=0; i<iLBCdec_inst->blockl; i++) {
+        PLCresidual[i] = randvec[i];
+      }
+    }
+
+    /* use the old LPC */
+    WEBRTC_SPL_MEMCPY_W16(PLClpc, (*iLBCdec_inst).prevLpc, LPC_FILTERORDER+1);
+
+    /* Update state in case there are multiple frame losses */
+    iLBCdec_inst->prevLag = lag;
+    iLBCdec_inst->perSquare = max_perSquare;
+  }
+
+  /* no packet loss, copy input */
+
+  else {
+    WEBRTC_SPL_MEMCPY_W16(PLCresidual, decresidual, iLBCdec_inst->blockl);
+    WEBRTC_SPL_MEMCPY_W16(PLClpc, lpc, (LPC_FILTERORDER+1));
+    iLBCdec_inst->consPLICount = 0;
+  }
+
+  /* update state */
+  iLBCdec_inst->prevPLI = PLI;
+  WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->prevLpc, PLClpc, (LPC_FILTERORDER+1));
+  WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->prevResidual, PLCresidual, iLBCdec_inst->blockl);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/do_plc.h b/modules/audio_coding/codecs/ilbc/do_plc.h
new file mode 100644
index 0000000..37af305
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/do_plc.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_DoThePlc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DO_PLC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_DO_PLC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Packet loss concealment routine. Conceals a residual signal
+ *  and LP parameters. If no packet loss, update state.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_DoThePlc(
+    int16_t *PLCresidual,  /* (o) concealed residual */
+    int16_t *PLClpc,    /* (o) concealed LP parameters */
+    int16_t PLI,     /* (i) packet loss indicator
+                                                           0 - no PL, 1 = PL */
+    int16_t *decresidual,  /* (i) decoded residual */
+    int16_t *lpc,    /* (i) decoded LPC (only used for no PL) */
+    size_t inlag,    /* (i) pitch lag */
+    IlbcDecoder *iLBCdec_inst
+    /* (i/o) decoder instance */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/encode.c b/modules/audio_coding/codecs/ilbc/encode.c
new file mode 100644
index 0000000..3631c65
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/encode.c
@@ -0,0 +1,513 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Encode.c
+
+******************************************************************/
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/lpc_encode.h"
+#include "modules/audio_coding/codecs/ilbc/frame_classify.h"
+#include "modules/audio_coding/codecs/ilbc/state_search.h"
+#include "modules/audio_coding/codecs/ilbc/state_construct.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/cb_search.h"
+#include "modules/audio_coding/codecs/ilbc/cb_construct.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_enc.h"
+#include "modules/audio_coding/codecs/ilbc/pack_bits.h"
+#include "modules/audio_coding/codecs/ilbc/hp_input.h"
+#include "rtc_base/checks.h"
+
+#ifdef SPLIT_10MS
+#include "modules/audio_coding/codecs/ilbc/unpack_bits.h"
+#include "modules/audio_coding/codecs/ilbc/index_conv_dec.h"
+#endif
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+#include "modules/audio_coding/codecs/ilbc/swap_bytes.h"
+#endif
+
+/*----------------------------------------------------------------*
+ *  main encoder function
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EncodeImpl(
+    uint16_t *bytes,     /* (o) encoded data bits iLBC */
+    const int16_t *block, /* (i) speech vector to encode */
+    IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
+                                     state */
+                          ){
+  size_t n, meml_gotten, Nfor;
+  size_t diff, start_pos;
+  size_t index;
+  size_t subcount, subframe;
+  size_t start_count, end_count;
+  int16_t *residual;
+  int32_t en1, en2;
+  int16_t scale, max;
+  int16_t *syntdenum;
+  int16_t *decresidual;
+  int16_t *reverseResidual;
+  int16_t *reverseDecresidual;
+  /* Stack based */
+  int16_t weightdenum[(LPC_FILTERORDER + 1)*NSUB_MAX];
+  int16_t dataVec[BLOCKL_MAX + LPC_FILTERORDER];
+  int16_t memVec[CB_MEML+CB_FILTERLEN];
+  int16_t bitsMemory[sizeof(iLBC_bits)/sizeof(int16_t)];
+  iLBC_bits *iLBCbits_inst = (iLBC_bits*)bitsMemory;
+
+
+#ifdef SPLIT_10MS
+  int16_t *weightdenumbuf = iLBCenc_inst->weightdenumbuf;
+  int16_t last_bit;
+#endif
+
+  int16_t *data = &dataVec[LPC_FILTERORDER];
+  int16_t *mem = &memVec[CB_HALFFILTERLEN];
+
+  /* Reuse som buffers to save stack memory */
+  residual = &iLBCenc_inst->lpc_buffer[LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl];
+  syntdenum = mem;      /* syntdenum[(LPC_FILTERORDER + 1)*NSUB_MAX] and mem are used non overlapping in the code */
+  decresidual = residual;     /* Already encoded residual is overwritten by the decoded version */
+  reverseResidual = data;     /* data and reverseResidual are used non overlapping in the code */
+  reverseDecresidual = reverseResidual; /* Already encoded residual is overwritten by the decoded version */
+
+#ifdef SPLIT_10MS
+
+  WebRtcSpl_MemSetW16 (  (int16_t *) iLBCbits_inst, 0,
+                         sizeof(iLBC_bits) / sizeof(int16_t)  );
+
+  start_pos = iLBCenc_inst->start_pos;
+  diff = iLBCenc_inst->diff;
+
+  if (iLBCenc_inst->section != 0){
+    WEBRTC_SPL_MEMCPY_W16 (weightdenum, weightdenumbuf,
+                           SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+    /* Un-Packetize the frame into parameters */
+    last_bit = WebRtcIlbcfix_UnpackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+    if (last_bit)
+      return;
+    /* adjust index */
+    WebRtcIlbcfix_IndexConvDec (iLBCbits_inst->cb_index);
+
+    if (iLBCenc_inst->section == 1){
+      /* Save first 80 samples of a 160/240 sample frame for 20/30msec */
+      WEBRTC_SPL_MEMCPY_W16 (iLBCenc_inst->past_samples, block, 80);
+    }
+    else{ // iLBCenc_inst->section == 2 AND mode = 30ms
+      /* Save second 80 samples of a 240 sample frame for 30msec */
+      WEBRTC_SPL_MEMCPY_W16 (iLBCenc_inst->past_samples + 80, block, 80);
+    }
+  }
+  else{ // iLBCenc_inst->section == 0
+    /* form a complete frame of 160/240 for 20msec/30msec mode */
+    WEBRTC_SPL_MEMCPY_W16 (data + (iLBCenc_inst->mode * 8) - 80, block, 80);
+    WEBRTC_SPL_MEMCPY_W16 (data, iLBCenc_inst->past_samples,
+                           (iLBCenc_inst->mode * 8) - 80);
+    iLBCenc_inst->Nfor_flag = 0;
+    iLBCenc_inst->Nback_flag = 0;
+#else
+    /* copy input block to data*/
+    WEBRTC_SPL_MEMCPY_W16(data,block,iLBCenc_inst->blockl);
+#endif
+
+    /* high pass filtering of input signal and scale down the residual (*0.5) */
+    WebRtcIlbcfix_HpInput(data, (int16_t*)WebRtcIlbcfix_kHpInCoefs,
+                          iLBCenc_inst->hpimemy, iLBCenc_inst->hpimemx,
+                          iLBCenc_inst->blockl);
+
+    /* LPC of hp filtered input data */
+    WebRtcIlbcfix_LpcEncode(syntdenum, weightdenum, iLBCbits_inst->lsf, data,
+                            iLBCenc_inst);
+
+    /* Set up state */
+    WEBRTC_SPL_MEMCPY_W16(dataVec, iLBCenc_inst->anaMem, LPC_FILTERORDER);
+
+    /* inverse filter to get residual */
+    for (n=0; n<iLBCenc_inst->nsub; n++ ) {
+      WebRtcSpl_FilterMAFastQ12(
+          &data[n*SUBL], &residual[n*SUBL],
+          &syntdenum[n*(LPC_FILTERORDER+1)],
+          LPC_FILTERORDER+1, SUBL);
+    }
+
+    /* Copy the state for next frame */
+    WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->anaMem, &data[iLBCenc_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
+
+    /* find state location */
+
+    iLBCbits_inst->startIdx = WebRtcIlbcfix_FrameClassify(iLBCenc_inst,residual);
+
+    /* check if state should be in first or last part of the
+       two subframes */
+
+    index = (iLBCbits_inst->startIdx-1)*SUBL;
+    max=WebRtcSpl_MaxAbsValueW16(&residual[index], 2*SUBL);
+    scale = WebRtcSpl_GetSizeInBits((uint32_t)(max * max));
+
+    /* Scale to maximum 25 bits so that the MAC won't cause overflow */
+    scale = scale - 25;
+    if(scale < 0) {
+      scale = 0;
+    }
+
+    diff = STATE_LEN - iLBCenc_inst->state_short_len;
+    en1=WebRtcSpl_DotProductWithScale(&residual[index], &residual[index],
+                                      iLBCenc_inst->state_short_len, scale);
+    index += diff;
+    en2=WebRtcSpl_DotProductWithScale(&residual[index], &residual[index],
+                                      iLBCenc_inst->state_short_len, scale);
+    if (en1 > en2) {
+      iLBCbits_inst->state_first = 1;
+      start_pos = (iLBCbits_inst->startIdx-1)*SUBL;
+    } else {
+      iLBCbits_inst->state_first = 0;
+      start_pos = (iLBCbits_inst->startIdx-1)*SUBL + diff;
+    }
+
+    /* scalar quantization of state */
+
+    WebRtcIlbcfix_StateSearch(iLBCenc_inst, iLBCbits_inst, &residual[start_pos],
+                              &syntdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
+                              &weightdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)]);
+
+    WebRtcIlbcfix_StateConstruct(iLBCbits_inst->idxForMax, iLBCbits_inst->idxVec,
+                                 &syntdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
+                                 &decresidual[start_pos], iLBCenc_inst->state_short_len
+                                 );
+
+    /* predictive quantization in state */
+
+    if (iLBCbits_inst->state_first) { /* put adaptive part in the end */
+
+      /* setup memory */
+
+      WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
+      WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-iLBCenc_inst->state_short_len,
+                            decresidual+start_pos, iLBCenc_inst->state_short_len);
+
+      /* encode subframes */
+
+      WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
+                             &residual[start_pos+iLBCenc_inst->state_short_len],
+                             mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL, diff,
+                             &weightdenum[iLBCbits_inst->startIdx*(LPC_FILTERORDER+1)], 0);
+
+      /* construct decoded vector */
+
+      RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+          &decresidual[start_pos + iLBCenc_inst->state_short_len],
+          iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
+          mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL, diff));
+
+    }
+    else { /* put adaptive part in the beginning */
+
+      /* create reversed vectors for prediction */
+
+      WebRtcSpl_MemCpyReversedOrder(&reverseResidual[diff-1],
+                                    &residual[(iLBCbits_inst->startIdx+1)*SUBL-STATE_LEN], diff);
+
+      /* setup memory */
+
+      meml_gotten = iLBCenc_inst->state_short_len;
+      WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[start_pos], meml_gotten);
+      WebRtcSpl_MemSetW16(mem, 0, CB_MEML - iLBCenc_inst->state_short_len);
+
+      /* encode subframes */
+      WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index, iLBCbits_inst->gain_index,
+                             reverseResidual, mem+CB_MEML-ST_MEM_L_TBL, ST_MEM_L_TBL, diff,
+                             &weightdenum[(iLBCbits_inst->startIdx-1)*(LPC_FILTERORDER+1)],
+                             0);
+
+      /* construct decoded vector */
+      RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+            reverseDecresidual, iLBCbits_inst->cb_index,
+            iLBCbits_inst->gain_index, mem + CB_MEML - ST_MEM_L_TBL,
+            ST_MEM_L_TBL, diff));
+
+      /* get decoded residual from reversed vector */
+
+      WebRtcSpl_MemCpyReversedOrder(&decresidual[start_pos-1], reverseDecresidual, diff);
+    }
+
+#ifdef SPLIT_10MS
+    iLBCenc_inst->start_pos = start_pos;
+    iLBCenc_inst->diff = diff;
+    iLBCenc_inst->section++;
+    /* adjust index */
+    WebRtcIlbcfix_IndexConvEnc (iLBCbits_inst->cb_index);
+    /* Packetize the parameters into the frame */
+    WebRtcIlbcfix_PackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+    WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
+                           SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+    return;
+  }
+#endif
+
+  /* forward prediction of subframes */
+
+  Nfor = iLBCenc_inst->nsub-iLBCbits_inst->startIdx-1;
+
+  /* counter for predicted subframes */
+#ifdef SPLIT_10MS
+  if (iLBCenc_inst->mode == 20)
+  {
+    subcount = 1;
+  }
+  if (iLBCenc_inst->mode == 30)
+  {
+    if (iLBCenc_inst->section == 1)
+    {
+      subcount = 1;
+    }
+    if (iLBCenc_inst->section == 2)
+    {
+      subcount = 3;
+    }
+  }
+#else
+  subcount=1;
+#endif
+
+  if( Nfor > 0 ){
+
+    /* setup memory */
+
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML-STATE_LEN);
+    WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-STATE_LEN,
+                          decresidual+(iLBCbits_inst->startIdx-1)*SUBL, STATE_LEN);
+
+#ifdef SPLIT_10MS
+    if (iLBCenc_inst->Nfor_flag > 0)
+    {
+      for (subframe = 0; subframe < WEBRTC_SPL_MIN (Nfor, 2); subframe++)
+      {
+        /* update memory */
+        WEBRTC_SPL_MEMCPY_W16 (mem, mem + SUBL, (CB_MEML - SUBL));
+        WEBRTC_SPL_MEMCPY_W16 (mem + CB_MEML - SUBL,
+                               &decresidual[(iLBCbits_inst->startIdx + 1 +
+                                             subframe) * SUBL], SUBL);
+      }
+    }
+
+    iLBCenc_inst->Nfor_flag++;
+
+    if (iLBCenc_inst->mode == 20)
+    {
+      start_count = 0;
+      end_count = Nfor;
+    }
+    if (iLBCenc_inst->mode == 30)
+    {
+      if (iLBCenc_inst->section == 1)
+      {
+        start_count = 0;
+        end_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
+      }
+      if (iLBCenc_inst->section == 2)
+      {
+        start_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
+        end_count = Nfor;
+      }
+    }
+#else
+    start_count = 0;
+    end_count = Nfor;
+#endif
+
+    /* loop over subframes to encode */
+
+    for (subframe = start_count; subframe < end_count; subframe++){
+
+      /* encode subframe */
+
+      WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
+                             iLBCbits_inst->gain_index+subcount*CB_NSTAGES,
+                             &residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
+                             mem, MEM_LF_TBL, SUBL,
+                             &weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
+                             subcount);
+
+      /* construct decoded vector */
+      RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+            &decresidual[(iLBCbits_inst->startIdx + 1 + subframe) * SUBL],
+            iLBCbits_inst->cb_index + subcount * CB_NSTAGES,
+            iLBCbits_inst->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+            SUBL));
+
+      /* update memory */
+
+      memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+      WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+                            &decresidual[(iLBCbits_inst->startIdx+1+subframe)*SUBL], SUBL);
+
+      subcount++;
+    }
+  }
+
+#ifdef SPLIT_10MS
+  if ((iLBCenc_inst->section == 1) &&
+      (iLBCenc_inst->mode == 30) && (Nfor > 0) && (end_count == 2))
+  {
+    iLBCenc_inst->section++;
+    /* adjust index */
+    WebRtcIlbcfix_IndexConvEnc (iLBCbits_inst->cb_index);
+    /* Packetize the parameters into the frame */
+    WebRtcIlbcfix_PackBits (iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+    WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
+                           SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+    return;
+  }
+#endif
+
+  /* backward prediction of subframes */
+
+  if (iLBCbits_inst->startIdx > 1) {
+
+    /* create reverse order vectors
+       (The decresidual does not need to be copied since it is
+       contained in the same vector as the residual)
+    */
+
+    size_t Nback = iLBCbits_inst->startIdx - 1;
+    WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
+
+    /* setup memory */
+
+    meml_gotten = SUBL*(iLBCenc_inst->nsub+1-iLBCbits_inst->startIdx);
+    if( meml_gotten > CB_MEML ) {
+      meml_gotten=CB_MEML;
+    }
+
+    WebRtcSpl_MemCpyReversedOrder(&mem[CB_MEML-1], &decresidual[Nback*SUBL], meml_gotten);
+    WebRtcSpl_MemSetW16(mem, 0, CB_MEML - meml_gotten);
+
+#ifdef SPLIT_10MS
+    if (iLBCenc_inst->Nback_flag > 0)
+    {
+      for (subframe = 0; subframe < WEBRTC_SPL_MAX (2 - Nfor, 0); subframe++)
+      {
+        /* update memory */
+        WEBRTC_SPL_MEMCPY_W16 (mem, mem + SUBL, (CB_MEML - SUBL));
+        WEBRTC_SPL_MEMCPY_W16 (mem + CB_MEML - SUBL,
+                               &reverseDecresidual[subframe * SUBL], SUBL);
+      }
+    }
+
+    iLBCenc_inst->Nback_flag++;
+
+
+    if (iLBCenc_inst->mode == 20)
+    {
+      start_count = 0;
+      end_count = Nback;
+    }
+    if (iLBCenc_inst->mode == 30)
+    {
+      if (iLBCenc_inst->section == 1)
+      {
+        start_count = 0;
+        end_count = (Nfor >= 2) ? 0 : (2 - NFor);
+      }
+      if (iLBCenc_inst->section == 2)
+      {
+        start_count = (Nfor >= 2) ? 0 : (2 - NFor);
+        end_count = Nback;
+      }
+    }
+#else
+    start_count = 0;
+    end_count = Nback;
+#endif
+
+    /* loop over subframes to encode */
+
+    for (subframe = start_count; subframe < end_count; subframe++){
+
+      /* encode subframe */
+
+      WebRtcIlbcfix_CbSearch(iLBCenc_inst, iLBCbits_inst->cb_index+subcount*CB_NSTAGES,
+                             iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
+                             mem, MEM_LF_TBL, SUBL,
+                             &weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
+                             subcount);
+
+      /* construct decoded vector */
+      RTC_CHECK(WebRtcIlbcfix_CbConstruct(
+            &reverseDecresidual[subframe * SUBL],
+            iLBCbits_inst->cb_index + subcount * CB_NSTAGES,
+            iLBCbits_inst->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL,
+            SUBL));
+
+      /* update memory */
+      memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+      WEBRTC_SPL_MEMCPY_W16(mem+CB_MEML-SUBL,
+                            &reverseDecresidual[subframe*SUBL], SUBL);
+
+      subcount++;
+
+    }
+
+    /* get decoded residual from reversed vector */
+
+    WebRtcSpl_MemCpyReversedOrder(&decresidual[SUBL*Nback-1], reverseDecresidual, SUBL*Nback);
+  }
+  /* end encoding part */
+
+  /* adjust index */
+
+  WebRtcIlbcfix_IndexConvEnc(iLBCbits_inst->cb_index);
+
+  /* Packetize the parameters into the frame */
+
+#ifdef SPLIT_10MS
+  if( (iLBCenc_inst->mode==30) && (iLBCenc_inst->section==1) ){
+    WebRtcIlbcfix_PackBits(iLBCenc_inst->bytes, iLBCbits_inst, iLBCenc_inst->mode);
+  }
+  else{
+    WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
+  }
+#else
+  WebRtcIlbcfix_PackBits(bytes, iLBCbits_inst, iLBCenc_inst->mode);
+#endif
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  /* Swap bytes for LITTLE ENDIAN since the packbits()
+     function assumes BIG_ENDIAN machine */
+#ifdef SPLIT_10MS
+  if (( (iLBCenc_inst->section == 1) && (iLBCenc_inst->mode == 20) ) ||
+      ( (iLBCenc_inst->section == 2) && (iLBCenc_inst->mode == 30) )){
+    WebRtcIlbcfix_SwapBytes(bytes, iLBCenc_inst->no_of_words, bytes);
+  }
+#else
+  WebRtcIlbcfix_SwapBytes(bytes, iLBCenc_inst->no_of_words, bytes);
+#endif
+#endif
+
+#ifdef SPLIT_10MS
+  if (subcount == (iLBCenc_inst->nsub - 1))
+  {
+    iLBCenc_inst->section = 0;
+  }
+  else
+  {
+    iLBCenc_inst->section++;
+    WEBRTC_SPL_MEMCPY_W16 (weightdenumbuf, weightdenum,
+                           SCRATCH_ENCODE_DATAVEC - SCRATCH_ENCODE_WEIGHTDENUM);
+  }
+#endif
+
+}
diff --git a/modules/audio_coding/codecs/ilbc/encode.h b/modules/audio_coding/codecs/ilbc/encode.h
new file mode 100644
index 0000000..8a3928c
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/encode.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Encode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENCODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENCODE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  main encoder function
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EncodeImpl(
+    uint16_t *bytes,     /* (o) encoded data bits iLBC */
+    const int16_t *block, /* (i) speech vector to encode */
+    IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
+                                           state */
+                          );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/energy_inverse.c b/modules/audio_coding/codecs/ilbc/energy_inverse.c
new file mode 100644
index 0000000..7f00254
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/energy_inverse.c
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnergyInverse.c
+
+******************************************************************/
+
+/* Inverses the in vector in into Q29 domain */
+
+#include "modules/audio_coding/codecs/ilbc/energy_inverse.h"
+
+void WebRtcIlbcfix_EnergyInverse(
+    int16_t *energy,    /* (i/o) Energy and inverse
+                                                           energy (in Q29) */
+    size_t noOfEnergies)  /* (i)   The length of the energy
+                                   vector */
+{
+  int32_t Nom=(int32_t)0x1FFFFFFF;
+  int16_t *energyPtr;
+  size_t i;
+
+  /* Set the minimum energy value to 16384 to avoid overflow */
+  energyPtr=energy;
+  for (i=0; i<noOfEnergies; i++) {
+    (*energyPtr)=WEBRTC_SPL_MAX((*energyPtr),16384);
+    energyPtr++;
+  }
+
+  /* Calculate inverse energy in Q29 */
+  energyPtr=energy;
+  for (i=0; i<noOfEnergies; i++) {
+    (*energyPtr) = (int16_t)WebRtcSpl_DivW32W16(Nom, (*energyPtr));
+    energyPtr++;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/energy_inverse.h b/modules/audio_coding/codecs/ilbc/energy_inverse.h
new file mode 100644
index 0000000..0404f7d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/energy_inverse.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnergyInverse.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENERGY_INVERSE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENERGY_INVERSE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/* Inverses the in vector in into Q29 domain */
+
+void WebRtcIlbcfix_EnergyInverse(
+    int16_t *energy,     /* (i/o) Energy and inverse
+                                                                   energy (in Q29) */
+    size_t noOfEnergies);   /* (i)   The length of the energy
+                                   vector */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/enh_upsample.c b/modules/audio_coding/codecs/ilbc/enh_upsample.c
new file mode 100644
index 0000000..0a5f044
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/enh_upsample.c
@@ -0,0 +1,110 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhUpsample.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ * upsample finite array assuming zeros outside bounds
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EnhUpsample(
+    int32_t *useq1, /* (o) upsampled output sequence */
+    int16_t *seq1 /* (i) unupsampled sequence */
+                                ){
+  int j;
+  int32_t *pu1, *pu11;
+  int16_t *ps, *w16tmp;
+  const int16_t *pp;
+
+  /* filtering: filter overhangs left side of sequence */
+  pu1=useq1;
+  for (j=0;j<ENH_UPS0; j++) {
+    pu11=pu1;
+    /* i = 2 */
+    pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
+    ps=seq1+2;
+    *pu11 = (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    pu11+=ENH_UPS0;
+    /* i = 3 */
+    pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
+    ps=seq1+3;
+    *pu11 = (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    pu11+=ENH_UPS0;
+    /* i = 4 */
+    pp=WebRtcIlbcfix_kEnhPolyPhaser[j]+1;
+    ps=seq1+4;
+    *pu11 = (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    pu1++;
+  }
+
+  /* filtering: simple convolution=inner products
+     (not needed since the sequence is so short)
+  */
+
+  /* filtering: filter overhangs right side of sequence */
+
+  /* Code with loops, which is equivivalent to the expanded version below
+
+     filterlength = 5;
+     hf1 = 2;
+     for(j=0;j<ENH_UPS0; j++){
+     pu = useq1 + (filterlength-hfl)*ENH_UPS0 + j;
+     for(i=1; i<=hfl; i++){
+     *pu=0;
+     pp = polyp[j]+i;
+     ps = seq1+dim1-1;
+     for(k=0;k<filterlength-i;k++) {
+     *pu += (*ps--) * *pp++;
+     }
+     pu+=ENH_UPS0;
+     }
+     }
+  */
+  pu1 = useq1 + 12;
+  w16tmp = seq1+4;
+  for (j=0;j<ENH_UPS0; j++) {
+    pu11 = pu1;
+    /* i = 1 */
+    pp = WebRtcIlbcfix_kEnhPolyPhaser[j]+2;
+    ps = w16tmp;
+    *pu11 = (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    pu11+=ENH_UPS0;
+    /* i = 2 */
+    pp = WebRtcIlbcfix_kEnhPolyPhaser[j]+3;
+    ps = w16tmp;
+    *pu11 = (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    *pu11 += (*ps--) * *pp++;
+    pu11+=ENH_UPS0;
+
+    pu1++;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/enh_upsample.h b/modules/audio_coding/codecs/ilbc/enh_upsample.h
new file mode 100644
index 0000000..e9a68f4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/enh_upsample.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhUpsample.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENH_UPSAMPLE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENH_UPSAMPLE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * upsample finite array assuming zeros outside bounds
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_EnhUpsample(
+    int32_t *useq1, /* (o) upsampled output sequence */
+    int16_t *seq1 /* (i) unupsampled sequence */
+                                );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/enhancer.c b/modules/audio_coding/codecs/ilbc/enhancer.c
new file mode 100644
index 0000000..d5cd977
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/enhancer.c
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Enhancer.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/get_sync_seq.h"
+#include "modules/audio_coding/codecs/ilbc/smooth.h"
+
+/*----------------------------------------------------------------*
+ * perform enhancement on idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Enhancer(
+    int16_t *odata,   /* (o) smoothed block, dimension blockl */
+    int16_t *idata,   /* (i) data buffer used for enhancing */
+    size_t idatal,   /* (i) dimension idata */
+    size_t centerStartPos, /* (i) first sample current block within idata */
+    size_t *period,   /* (i) pitch period array (pitch bward-in time) */
+    const size_t *plocs,   /* (i) locations where period array values valid */
+    size_t periodl   /* (i) dimension of period and plocs */
+                            ){
+  /* Stack based */
+  int16_t surround[ENH_BLOCKL];
+
+  WebRtcSpl_MemSetW16(surround, 0, ENH_BLOCKL);
+
+  /* get said second sequence of segments */
+
+  WebRtcIlbcfix_GetSyncSeq(idata, idatal, centerStartPos, period, plocs,
+                           periodl, ENH_HL, surround);
+
+  /* compute the smoothed output from said second sequence */
+
+  WebRtcIlbcfix_Smooth(odata, idata + centerStartPos, surround);
+}
diff --git a/modules/audio_coding/codecs/ilbc/enhancer.h b/modules/audio_coding/codecs/ilbc/enhancer.h
new file mode 100644
index 0000000..7e20eb1
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/enhancer.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Enhancer.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * perform enhancement on idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Enhancer(
+    int16_t *odata,   /* (o) smoothed block, dimension blockl */
+    int16_t *idata,   /* (i) data buffer used for enhancing */
+    size_t idatal,   /* (i) dimension idata */
+    size_t centerStartPos, /* (i) first sample current block within idata */
+    size_t *period,   /* (i) pitch period array (pitch bward-in time) */
+    const size_t *plocs,   /* (i) locations where period array values valid */
+    size_t periodl   /* (i) dimension of period and plocs */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/enhancer_interface.c b/modules/audio_coding/codecs/ilbc/enhancer_interface.c
new file mode 100644
index 0000000..f85df6d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/enhancer_interface.c
@@ -0,0 +1,377 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhancerInterface.c
+
+******************************************************************/
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/xcorr_coef.h"
+#include "modules/audio_coding/codecs/ilbc/enhancer.h"
+#include "modules/audio_coding/codecs/ilbc/hp_output.h"
+
+
+
+/*----------------------------------------------------------------*
+ * interface for enhancer
+ *---------------------------------------------------------------*/
+
+size_t  // (o) Estimated lag in end of in[]
+    WebRtcIlbcfix_EnhancerInterface(
+        int16_t* out,                 // (o) enhanced signal
+        const int16_t* in,            // (i) unenhanced signal
+        IlbcDecoder* iLBCdec_inst) {  // (i) buffers etc
+  size_t iblock;
+  size_t lag=20, tlag=20;
+  size_t inLen=iLBCdec_inst->blockl+120;
+  int16_t scale, scale1;
+  size_t plc_blockl;
+  int16_t *enh_buf;
+  size_t *enh_period;
+  int32_t tmp1, tmp2, max;
+  size_t new_blocks;
+  int16_t *enh_bufPtr1;
+  size_t i;
+  size_t k;
+  int16_t EnChange;
+  int16_t SqrtEnChange;
+  int16_t inc;
+  int16_t win;
+  int16_t *tmpW16ptr;
+  size_t startPos;
+  int16_t *plc_pred;
+  const int16_t *target, *regressor;
+  int16_t max16;
+  int shifts;
+  int32_t ener;
+  int16_t enerSh;
+  int16_t corrSh;
+  size_t ind;
+  int16_t sh;
+  size_t start, stop;
+  /* Stack based */
+  int16_t totsh[3];
+  int16_t downsampled[(BLOCKL_MAX+120)>>1]; /* length 180 */
+  int32_t corr32[50];
+  int32_t corrmax[3];
+  int16_t corr16[3];
+  int16_t en16[3];
+  size_t lagmax[3];
+
+  plc_pred = downsampled; /* Reuse memory since plc_pred[ENH_BLOCKL] and
+                              downsampled are non overlapping */
+  enh_buf=iLBCdec_inst->enh_buf;
+  enh_period=iLBCdec_inst->enh_period;
+
+  /* Copy in the new data into the enhancer buffer */
+  memmove(enh_buf, &enh_buf[iLBCdec_inst->blockl],
+          (ENH_BUFL - iLBCdec_inst->blockl) * sizeof(*enh_buf));
+
+  WEBRTC_SPL_MEMCPY_W16(&enh_buf[ENH_BUFL-iLBCdec_inst->blockl], in,
+                        iLBCdec_inst->blockl);
+
+  /* Set variables that are dependent on frame size */
+  if (iLBCdec_inst->mode==30) {
+    plc_blockl=ENH_BLOCKL;
+    new_blocks=3;
+    startPos=320;  /* Start position for enhancement
+                     (640-new_blocks*ENH_BLOCKL-80) */
+  } else {
+    plc_blockl=40;
+    new_blocks=2;
+    startPos=440;  /* Start position for enhancement
+                    (640-new_blocks*ENH_BLOCKL-40) */
+  }
+
+  /* Update the pitch prediction for each enhancer block, move the old ones */
+  memmove(enh_period, &enh_period[new_blocks],
+          (ENH_NBLOCKS_TOT - new_blocks) * sizeof(*enh_period));
+
+  WebRtcSpl_DownsampleFast(
+      enh_buf+ENH_BUFL-inLen,    /* Input samples */
+      inLen + ENH_BUFL_FILTEROVERHEAD,
+      downsampled,
+      inLen / 2,
+      (int16_t*)WebRtcIlbcfix_kLpFiltCoefs,  /* Coefficients in Q12 */
+      FILTERORDER_DS_PLUS1,    /* Length of filter (order-1) */
+      FACTOR_DS,
+      DELAY_DS);
+
+  /* Estimate the pitch in the down sampled domain. */
+  for(iblock = 0; iblock<new_blocks; iblock++){
+
+    /* references */
+    target = downsampled + 60 + iblock * ENH_BLOCKL_HALF;
+    regressor = target - 10;
+
+    /* scaling */
+    max16 = WebRtcSpl_MaxAbsValueW16(&regressor[-50], ENH_BLOCKL_HALF + 50 - 1);
+    shifts = WebRtcSpl_GetSizeInBits((uint32_t)(max16 * max16)) - 25;
+    shifts = WEBRTC_SPL_MAX(0, shifts);
+
+    /* compute cross correlation */
+    WebRtcSpl_CrossCorrelation(corr32, target, regressor, ENH_BLOCKL_HALF, 50,
+                               shifts, -1);
+
+    /* Find 3 highest correlations that should be compared for the
+       highest (corr*corr)/ener */
+
+    for (i=0;i<2;i++) {
+      lagmax[i] = WebRtcSpl_MaxIndexW32(corr32, 50);
+      corrmax[i] = corr32[lagmax[i]];
+      start = WEBRTC_SPL_MAX(2, lagmax[i]) - 2;
+      stop = WEBRTC_SPL_MIN(47, lagmax[i]) + 2;
+      for (k = start; k <= stop; k++) {
+        corr32[k] = 0;
+      }
+    }
+    lagmax[2] = WebRtcSpl_MaxIndexW32(corr32, 50);
+    corrmax[2] = corr32[lagmax[2]];
+
+    /* Calculate normalized corr^2 and ener */
+    for (i=0;i<3;i++) {
+      corrSh = 15-WebRtcSpl_GetSizeInBits(corrmax[i]);
+      ener = WebRtcSpl_DotProductWithScale(regressor - lagmax[i],
+                                           regressor - lagmax[i],
+                                           ENH_BLOCKL_HALF, shifts);
+      enerSh = 15-WebRtcSpl_GetSizeInBits(ener);
+      corr16[i] = (int16_t)WEBRTC_SPL_SHIFT_W32(corrmax[i], corrSh);
+      corr16[i] = (int16_t)((corr16[i] * corr16[i]) >> 16);
+      en16[i] = (int16_t)WEBRTC_SPL_SHIFT_W32(ener, enerSh);
+      totsh[i] = enerSh - 2 * corrSh;
+    }
+
+    /* Compare lagmax[0..3] for the (corr^2)/ener criteria */
+    ind = 0;
+    for (i=1; i<3; i++) {
+      if (totsh[ind] > totsh[i]) {
+        sh = WEBRTC_SPL_MIN(31, totsh[ind]-totsh[i]);
+        if (corr16[ind] * en16[i] < (corr16[i] * en16[ind]) >> sh) {
+          ind = i;
+        }
+      } else {
+        sh = WEBRTC_SPL_MIN(31, totsh[i]-totsh[ind]);
+        if ((corr16[ind] * en16[i]) >> sh < corr16[i] * en16[ind]) {
+          ind = i;
+        }
+      }
+    }
+
+    lag = lagmax[ind] + 10;
+
+    /* Store the estimated lag in the non-downsampled domain */
+    enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = lag * 8;
+
+    /* Store the estimated lag for backward PLC */
+    if (iLBCdec_inst->prev_enh_pl==1) {
+      if (!iblock) {
+        tlag = lag * 2;
+      }
+    } else {
+      if (iblock==1) {
+        tlag = lag * 2;
+      }
+    }
+
+    lag *= 2;
+  }
+
+  if ((iLBCdec_inst->prev_enh_pl==1)||(iLBCdec_inst->prev_enh_pl==2)) {
+
+    /* Calculate the best lag of the new frame
+       This is used to interpolate backwards and mix with the PLC'd data
+    */
+
+    /* references */
+    target=in;
+    regressor=in+tlag-1;
+
+    /* scaling */
+    max16 = WebRtcSpl_MaxAbsValueW16(regressor, plc_blockl + 3 - 1);
+    if (max16>5000)
+      shifts=2;
+    else
+      shifts=0;
+
+    /* compute cross correlation */
+    WebRtcSpl_CrossCorrelation(corr32, target, regressor, plc_blockl, 3, shifts,
+                               1);
+
+    /* find lag */
+    lag=WebRtcSpl_MaxIndexW32(corr32, 3);
+    lag+=tlag-1;
+
+    /* Copy the backward PLC to plc_pred */
+
+    if (iLBCdec_inst->prev_enh_pl==1) {
+      if (lag>plc_blockl) {
+        WEBRTC_SPL_MEMCPY_W16(plc_pred, &in[lag-plc_blockl], plc_blockl);
+      } else {
+        WEBRTC_SPL_MEMCPY_W16(&plc_pred[plc_blockl-lag], in, lag);
+        WEBRTC_SPL_MEMCPY_W16(
+            plc_pred, &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl+lag],
+            (plc_blockl-lag));
+      }
+    } else {
+      size_t pos;
+
+      pos = plc_blockl;
+
+      while (lag<pos) {
+        WEBRTC_SPL_MEMCPY_W16(&plc_pred[pos-lag], in, lag);
+        pos = pos - lag;
+      }
+      WEBRTC_SPL_MEMCPY_W16(plc_pred, &in[lag-pos], pos);
+
+    }
+
+    if (iLBCdec_inst->prev_enh_pl==1) {
+      /* limit energy change
+         if energy in backward PLC is more than 4 times higher than the forward
+         PLC, then reduce the energy in the backward PLC vector:
+         sample 1...len-16 set energy of the to 4 times forward PLC
+         sample len-15..len interpolate between 4 times fw PLC and bw PLC energy
+
+         Note: Compared to floating point code there is a slight change,
+         the window is 16 samples long instead of 10 samples to simplify the
+         calculations
+      */
+
+      max=WebRtcSpl_MaxAbsValueW16(
+          &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl], plc_blockl);
+      max16=WebRtcSpl_MaxAbsValueW16(plc_pred, plc_blockl);
+      max = WEBRTC_SPL_MAX(max, max16);
+      scale=22-(int16_t)WebRtcSpl_NormW32(max);
+      scale=WEBRTC_SPL_MAX(scale,0);
+
+      tmp2 = WebRtcSpl_DotProductWithScale(
+          &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl],
+          &enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl],
+          plc_blockl, scale);
+      tmp1 = WebRtcSpl_DotProductWithScale(plc_pred, plc_pred,
+                                           plc_blockl, scale);
+
+      /* Check the energy difference */
+      if ((tmp1>0)&&((tmp1>>2)>tmp2)) {
+        /* EnChange is now guaranteed to be <0.5
+           Calculate EnChange=tmp2/tmp1 in Q16
+        */
+
+        scale1=(int16_t)WebRtcSpl_NormW32(tmp1);
+        tmp1=WEBRTC_SPL_SHIFT_W32(tmp1, (scale1-16)); /* using 15 bits */
+
+        tmp2=WEBRTC_SPL_SHIFT_W32(tmp2, (scale1));
+        EnChange = (int16_t)WebRtcSpl_DivW32W16(tmp2,
+                                                      (int16_t)tmp1);
+
+        /* Calculate the Sqrt of the energy in Q15 ((14+16)/2) */
+        SqrtEnChange = (int16_t)WebRtcSpl_SqrtFloor(EnChange << 14);
+
+
+        /* Multiply first part of vector with 2*SqrtEnChange */
+        WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, plc_blockl-16,
+                              14);
+
+        /* Calculate increase parameter for window part (16 last samples) */
+        /* (1-2*SqrtEnChange)/16 in Q15 */
+        inc = 2048 - (SqrtEnChange >> 3);
+
+        win=0;
+        tmpW16ptr=&plc_pred[plc_blockl-16];
+
+        for (i=16;i>0;i--) {
+          *tmpW16ptr = (int16_t)(
+              (*tmpW16ptr * (SqrtEnChange + (win >> 1))) >> 14);
+          /* multiply by (2.0*SqrtEnChange+win) */
+
+          win += inc;
+          tmpW16ptr++;
+        }
+      }
+
+      /* Make the linear interpolation between the forward PLC'd data
+         and the backward PLC'd data (from the new frame)
+      */
+
+      if (plc_blockl==40) {
+        inc=400; /* 1/41 in Q14 */
+      } else { /* plc_blockl==80 */
+        inc=202; /* 1/81 in Q14 */
+      }
+      win=0;
+      enh_bufPtr1=&enh_buf[ENH_BUFL-1-iLBCdec_inst->blockl];
+      for (i=0; i<plc_blockl; i++) {
+        win+=inc;
+        *enh_bufPtr1 = (int16_t)((*enh_bufPtr1 * win) >> 14);
+        *enh_bufPtr1 += (int16_t)(
+            ((16384 - win) * plc_pred[plc_blockl - 1 - i]) >> 14);
+        enh_bufPtr1--;
+      }
+    } else {
+      int16_t *synt = &downsampled[LPC_FILTERORDER];
+
+      enh_bufPtr1=&enh_buf[ENH_BUFL-iLBCdec_inst->blockl-plc_blockl];
+      WEBRTC_SPL_MEMCPY_W16(enh_bufPtr1, plc_pred, plc_blockl);
+
+      /* Clear fileter memory */
+      WebRtcSpl_MemSetW16(iLBCdec_inst->syntMem, 0, LPC_FILTERORDER);
+      WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemy, 0, 4);
+      WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemx, 0, 2);
+
+      /* Initialize filter memory by filtering through 2 lags */
+      WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], iLBCdec_inst->syntMem,
+                            LPC_FILTERORDER);
+      WebRtcSpl_FilterARFastQ12(
+          enh_bufPtr1,
+          synt,
+          &iLBCdec_inst->old_syntdenum[
+              (iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
+          LPC_FILTERORDER+1, lag);
+
+      WEBRTC_SPL_MEMCPY_W16(&synt[-LPC_FILTERORDER], &synt[lag-LPC_FILTERORDER],
+                            LPC_FILTERORDER);
+      WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
+                             iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
+                             lag);
+      WebRtcSpl_FilterARFastQ12(
+          enh_bufPtr1, synt,
+          &iLBCdec_inst->old_syntdenum[
+              (iLBCdec_inst->nsub-1)*(LPC_FILTERORDER+1)],
+          LPC_FILTERORDER+1, lag);
+
+      WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &synt[lag-LPC_FILTERORDER],
+                            LPC_FILTERORDER);
+      WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
+                             iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
+                             lag);
+    }
+  }
+
+
+  /* Perform enhancement block by block */
+
+  for (iblock = 0; iblock<new_blocks; iblock++) {
+    WebRtcIlbcfix_Enhancer(out + iblock * ENH_BLOCKL,
+                           enh_buf,
+                           ENH_BUFL,
+                           iblock * ENH_BLOCKL + startPos,
+                           enh_period,
+                           WebRtcIlbcfix_kEnhPlocs, ENH_NBLOCKS_TOT);
+  }
+
+  return (lag);
+}
diff --git a/modules/audio_coding/codecs/ilbc/enhancer_interface.h b/modules/audio_coding/codecs/ilbc/enhancer_interface.h
new file mode 100644
index 0000000..e305161
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/enhancer_interface.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_EnhancerInterface.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_ENHANCER_INTERFACE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * interface for enhancer
+ *---------------------------------------------------------------*/
+
+size_t  // (o) Estimated lag in end of in[]
+    WebRtcIlbcfix_EnhancerInterface(
+        int16_t* out,                // (o) enhanced signal
+        const int16_t* in,           // (i) unenhanced signal
+        IlbcDecoder* iLBCdec_inst);  // (i) buffers etc
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c b/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
new file mode 100644
index 0000000..4624211
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FilteredCbVecs.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  Construct an additional codebook vector by filtering the
+ *  initial codebook buffer. This vector is then used to expand
+ *  the codebook with an additional section.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_FilteredCbVecs(
+    int16_t *cbvectors, /* (o) Codebook vector for the higher section */
+    int16_t *CBmem,  /* (i) Codebook memory that is filtered to create a
+                                           second CB section */
+    size_t lMem,  /* (i) Length of codebook memory */
+    size_t samples    /* (i) Number of samples to filter */
+                                  ) {
+
+  /* Set up the memory, start with zero state */
+  WebRtcSpl_MemSetW16(CBmem+lMem, 0, CB_HALFFILTERLEN);
+  WebRtcSpl_MemSetW16(CBmem-CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN);
+  WebRtcSpl_MemSetW16(cbvectors, 0, lMem-samples);
+
+  /* Filter to obtain the filtered CB memory */
+
+  WebRtcSpl_FilterMAFastQ12(
+      CBmem+CB_HALFFILTERLEN+lMem-samples, cbvectors+lMem-samples,
+      (int16_t*)WebRtcIlbcfix_kCbFiltersRev, CB_FILTERLEN, samples);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h b/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
new file mode 100644
index 0000000..f57e9c4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FilteredCbVecs.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FILTERED_CB_VECS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FILTERED_CB_VECS_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Construct an additional codebook vector by filtering the
+ *  initial codebook buffer. This vector is then used to expand
+ *  the codebook with an additional section.
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_FilteredCbVecs(
+    int16_t *cbvectors, /* (o) Codebook vector for the higher section */
+    int16_t *CBmem,  /* (i) Codebook memory that is filtered to create a
+                                           second CB section */
+    size_t lMem,  /* (i) Length of codebook memory */
+    size_t samples    /* (i) Number of samples to filter */
+                                  );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/frame_classify.c b/modules/audio_coding/codecs/ilbc/frame_classify.c
new file mode 100644
index 0000000..6edf921
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/frame_classify.c
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FrameClassify.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  Classification of subframes to localize start state
+ *---------------------------------------------------------------*/
+
+size_t WebRtcIlbcfix_FrameClassify(
+    /* (o) Index to the max-energy sub frame */
+    IlbcEncoder *iLBCenc_inst,
+    /* (i/o) the encoder state structure */
+    int16_t *residualFIX /* (i) lpc residual signal */
+                                                ){
+  int16_t max, scale;
+  int32_t ssqEn[NSUB_MAX-1];
+  int16_t *ssqPtr;
+  int32_t *seqEnPtr;
+  int32_t maxW32;
+  int16_t scale1;
+  size_t pos;
+  size_t n;
+
+  /*
+    Calculate the energy of each of the 80 sample blocks
+    in the draft the 4 first and last samples are windowed with 1/5...4/5
+    and 4/5...1/5 respectively. To simplify for the fixpoint we have changed
+    this to 0 0 1 1 and 1 1 0 0
+  */
+
+  max = WebRtcSpl_MaxAbsValueW16(residualFIX, iLBCenc_inst->blockl);
+  scale = WebRtcSpl_GetSizeInBits((uint32_t)(max * max));
+
+  /* Scale to maximum 24 bits so that it won't overflow for 76 samples */
+  scale = scale-24;
+  scale1 = WEBRTC_SPL_MAX(0, scale);
+
+  /* Calculate energies */
+  ssqPtr=residualFIX + 2;
+  seqEnPtr=ssqEn;
+  for (n=(iLBCenc_inst->nsub-1); n>0; n--) {
+    (*seqEnPtr) = WebRtcSpl_DotProductWithScale(ssqPtr, ssqPtr, 76, scale1);
+    ssqPtr += 40;
+    seqEnPtr++;
+  }
+
+  /* Scale to maximum 20 bits in order to allow for the 11 bit window */
+  maxW32 = WebRtcSpl_MaxValueW32(ssqEn, iLBCenc_inst->nsub - 1);
+  scale = WebRtcSpl_GetSizeInBits(maxW32) - 20;
+  scale1 = WEBRTC_SPL_MAX(0, scale);
+
+  /* Window each 80 block with the ssqEn_winTbl window to give higher probability for
+     the blocks in the middle
+  */
+  seqEnPtr=ssqEn;
+  if (iLBCenc_inst->mode==20) {
+    ssqPtr=(int16_t*)WebRtcIlbcfix_kStartSequenceEnrgWin+1;
+  } else {
+    ssqPtr=(int16_t*)WebRtcIlbcfix_kStartSequenceEnrgWin;
+  }
+  for (n=(iLBCenc_inst->nsub-1); n>0; n--) {
+    (*seqEnPtr)=WEBRTC_SPL_MUL(((*seqEnPtr)>>scale1), (*ssqPtr));
+    seqEnPtr++;
+    ssqPtr++;
+  }
+
+  /* Extract the best choise of start state */
+  pos = WebRtcSpl_MaxIndexW32(ssqEn, iLBCenc_inst->nsub - 1) + 1;
+
+  return(pos);
+}
diff --git a/modules/audio_coding/codecs/ilbc/frame_classify.h b/modules/audio_coding/codecs/ilbc/frame_classify.h
new file mode 100644
index 0000000..60b3249
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/frame_classify.h
@@ -0,0 +1,29 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_FrameClassify.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
+
+size_t WebRtcIlbcfix_FrameClassify(
+    /* (o) Index to the max-energy sub frame */
+    IlbcEncoder *iLBCenc_inst,
+    /* (i/o) the encoder state structure */
+    int16_t *residualFIX /* (i) lpc residual signal */
+                                                );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/gain_dequant.c b/modules/audio_coding/codecs/ilbc/gain_dequant.c
new file mode 100644
index 0000000..cb405ae
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/gain_dequant.c
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainDequant.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  decoder for quantized gains in the gain-shape coding of
+ *  residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainDequant(
+    /* (o) quantized gain value (Q14) */
+    int16_t index, /* (i) quantization index */
+    int16_t maxIn, /* (i) maximum of unquantized gain (Q14) */
+    int16_t stage /* (i) The stage of the search */
+                                                ){
+  int16_t scale;
+  const int16_t *gain;
+
+  /* obtain correct scale factor */
+
+  scale=WEBRTC_SPL_ABS_W16(maxIn);
+  scale = WEBRTC_SPL_MAX(1638, scale);  /* if lower than 0.1, set it to 0.1 */
+
+  /* select the quantization table and return the decoded value */
+  gain = WebRtcIlbcfix_kGain[stage];
+
+  return (int16_t)((scale * gain[index] + 8192) >> 14);
+}
diff --git a/modules/audio_coding/codecs/ilbc/gain_dequant.h b/modules/audio_coding/codecs/ilbc/gain_dequant.h
new file mode 100644
index 0000000..6989372
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/gain_dequant.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainDequant.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_DEQUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_DEQUANT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  decoder for quantized gains in the gain-shape coding of
+ *  residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainDequant(
+    /* (o) quantized gain value (Q14) */
+    int16_t index, /* (i) quantization index */
+    int16_t maxIn, /* (i) maximum of unquantized gain (Q14) */
+    int16_t stage /* (i) The stage of the search */
+                                         );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/gain_quant.c b/modules/audio_coding/codecs/ilbc/gain_quant.c
new file mode 100644
index 0000000..2472239
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/gain_quant.c
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainQuant.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  quantizer for the gain in the gain-shape coding of residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainQuant( /* (o) quantized gain value */
+    int16_t gain, /* (i) gain value Q14 */
+    int16_t maxIn, /* (i) maximum of gain value Q14 */
+    int16_t stage, /* (i) The stage of the search */
+    int16_t *index /* (o) quantization index */
+                                        ) {
+
+  int16_t scale, cblen;
+  int32_t gainW32, measure1, measure2;
+  const int16_t *cbPtr, *cb;
+  int loc, noMoves, noChecks, i;
+
+  /* ensure a lower bound (0.1) on the scaling factor */
+
+  scale = WEBRTC_SPL_MAX(1638, maxIn);
+
+  /* select the quantization table and calculate
+     the length of the table and the number of
+     steps in the binary search that are needed */
+  cb = WebRtcIlbcfix_kGain[stage];
+  cblen = 32>>stage;
+  noChecks = 4-stage;
+
+  /* Multiply the gain with 2^14 to make the comparison
+     easier and with higher precision */
+  gainW32 = gain << 14;
+
+  /* Do a binary search, starting in the middle of the CB
+     loc - defines the current position in the table
+     noMoves - defines the number of steps to move in the CB in order
+     to get next CB location
+  */
+
+  loc = cblen>>1;
+  noMoves = loc;
+  cbPtr = cb + loc; /* Centre of CB */
+
+  for (i=noChecks;i>0;i--) {
+    noMoves>>=1;
+    measure1 = scale * *cbPtr;
+
+    /* Move up if gain is larger, otherwise move down in table */
+    measure1 = measure1 - gainW32;
+
+    if (0>measure1) {
+      cbPtr+=noMoves;
+      loc+=noMoves;
+    } else {
+      cbPtr-=noMoves;
+      loc-=noMoves;
+    }
+  }
+
+  /* Check which value is the closest one: loc-1, loc or loc+1 */
+
+  measure1 = scale * *cbPtr;
+  if (gainW32>measure1) {
+    /* Check against value above loc */
+    measure2 = scale * cbPtr[1];
+    if ((measure2-gainW32)<(gainW32-measure1)) {
+      loc+=1;
+    }
+  } else {
+    /* Check against value below loc */
+    measure2 = scale * cbPtr[-1];
+    if ((gainW32-measure2)<=(measure1-gainW32)) {
+      loc-=1;
+    }
+  }
+
+  /* Guard against getting outside the table. The calculation above can give a location
+     which is one above the maximum value (in very rare cases) */
+  loc=WEBRTC_SPL_MIN(loc, (cblen-1));
+  *index=loc;
+
+  /* Calculate and return the quantized gain value (in Q14) */
+  return (int16_t)((scale * cb[loc] + 8192) >> 14);
+}
diff --git a/modules/audio_coding/codecs/ilbc/gain_quant.h b/modules/audio_coding/codecs/ilbc/gain_quant.h
new file mode 100644
index 0000000..bc5a936
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/gain_quant.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GainQuant.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_QUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GAIN_QUANT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  quantizer for the gain in the gain-shape coding of residual
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_GainQuant( /* (o) quantized gain value */
+    int16_t gain, /* (i) gain value Q14 */
+    int16_t maxIn, /* (i) maximum of gain value Q14 */
+    int16_t stage, /* (i) The stage of the search */
+    int16_t *index /* (o) quantization index */
+                                       );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/modules/audio_coding/codecs/ilbc/get_cd_vec.c
new file mode 100644
index 0000000..d3479ec
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/get_cd_vec.c
@@ -0,0 +1,126 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetCbVec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/get_cd_vec.h"
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/create_augmented_vec.h"
+
+/*----------------------------------------------------------------*
+ *  Construct codebook vector for given index.
+ *---------------------------------------------------------------*/
+
+bool WebRtcIlbcfix_GetCbVec(
+    int16_t *cbvec,   /* (o) Constructed codebook vector */
+    int16_t *mem,   /* (i) Codebook buffer */
+    size_t index,   /* (i) Codebook index */
+    size_t lMem,   /* (i) Length of codebook buffer */
+    size_t cbveclen   /* (i) Codebook vector length */
+                            ){
+  size_t k, base_size;
+  size_t lag;
+  /* Stack based */
+  int16_t tempbuff2[SUBL+5];
+
+  /* Determine size of codebook sections */
+
+  base_size=lMem-cbveclen+1;
+
+  if (cbveclen==SUBL) {
+    base_size += cbveclen / 2;
+  }
+
+  /* No filter -> First codebook section */
+
+  if (index<lMem-cbveclen+1) {
+
+    /* first non-interpolated vectors */
+
+    k=index+cbveclen;
+    /* get vector */
+    WEBRTC_SPL_MEMCPY_W16(cbvec, mem+lMem-k, cbveclen);
+
+  } else if (index < base_size) {
+
+    /* Calculate lag */
+
+    k = (2 * (index - (lMem - cbveclen + 1))) + cbveclen;
+
+    lag = k / 2;
+
+    WebRtcIlbcfix_CreateAugmentedVec(lag, mem+lMem, cbvec);
+
+  }
+
+  /* Higher codebbok section based on filtering */
+
+  else {
+
+    size_t memIndTest;
+
+    /* first non-interpolated vectors */
+
+    if (index-base_size<lMem-cbveclen+1) {
+
+      /* Set up filter memory, stuff zeros outside memory buffer */
+
+      memIndTest = lMem-(index-base_size+cbveclen);
+
+      WebRtcSpl_MemSetW16(mem-CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN);
+      WebRtcSpl_MemSetW16(mem+lMem, 0, CB_HALFFILTERLEN);
+
+      /* do filtering to get the codebook vector */
+
+      WebRtcSpl_FilterMAFastQ12(
+          &mem[memIndTest+4], cbvec, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
+          CB_FILTERLEN, cbveclen);
+    }
+
+    /* interpolated vectors */
+
+    else {
+      if (cbveclen < SUBL) {
+        // We're going to fill in cbveclen + 5 elements of tempbuff2 in
+        // WebRtcSpl_FilterMAFastQ12, less than the SUBL + 5 elements we'll be
+        // using in WebRtcIlbcfix_CreateAugmentedVec. This error is caused by
+        // bad values in |index| (which come from the encoded stream). Tell the
+        // caller that things went south, and that the decoder state is now
+        // corrupt (because it's half-way through an update that we can't
+        // complete).
+        return false;
+      }
+
+      /* Stuff zeros outside memory buffer  */
+      memIndTest = lMem-cbveclen-CB_FILTERLEN;
+      WebRtcSpl_MemSetW16(mem+lMem, 0, CB_HALFFILTERLEN);
+
+      /* do filtering */
+      WebRtcSpl_FilterMAFastQ12(
+          &mem[memIndTest+7], tempbuff2, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
+          CB_FILTERLEN, cbveclen+5);
+
+      /* Calculate lag index */
+      lag = (cbveclen<<1)-20+index-base_size-lMem-1;
+
+      WebRtcIlbcfix_CreateAugmentedVec(lag, tempbuff2+SUBL+5, cbvec);
+    }
+  }
+
+  return true;  // Success.
+}
diff --git a/modules/audio_coding/codecs/ilbc/get_cd_vec.h b/modules/audio_coding/codecs/ilbc/get_cd_vec.h
new file mode 100644
index 0000000..76e1a56
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/get_cd_vec.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetCbVec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_CD_VEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_CD_VEC_H_
+
+#include <stdbool.h>
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+// Returns true on success, false on failure. In case of failure, the decoder
+// state may be corrupted and needs resetting.
+bool WebRtcIlbcfix_GetCbVec(
+    int16_t* cbvec, /* (o) Constructed codebook vector */
+    int16_t* mem,   /* (i) Codebook buffer */
+    size_t index,   /* (i) Codebook index */
+    size_t lMem,    /* (i) Length of codebook buffer */
+    size_t cbveclen /* (i) Codebook vector length */
+    ) RTC_WARN_UNUSED_RESULT;
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/get_lsp_poly.c b/modules/audio_coding/codecs/ilbc/get_lsp_poly.c
new file mode 100644
index 0000000..ecf5770
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/get_lsp_poly.c
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetLspPoly.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Construct the polynomials F1(z) and F2(z) from the LSP
+ * (Computations are done in Q24)
+ *
+ * The expansion is performed using the following recursion:
+ *
+ * f[0] = 1;
+ * tmp = -2.0 * lsp[0];
+ * f[1] = tmp;
+ * for (i=2; i<=5; i++) {
+ *    b = -2.0 * lsp[2*i-2];
+ *    f[i] = tmp*f[i-1] + 2.0*f[i-2];
+ *    for (j=i; j>=2; j--) {
+ *       f[j] = f[j] + tmp*f[j-1] + f[j-2];
+ *    }
+ *    f[i] = f[i] + tmp;
+ * }
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetLspPoly(
+    int16_t *lsp, /* (i) LSP in Q15 */
+    int32_t *f)  /* (o) polonymial in Q24 */
+{
+  int32_t tmpW32;
+  int i, j;
+  int16_t high, low;
+  int16_t *lspPtr;
+  int32_t *fPtr;
+
+  lspPtr = lsp;
+  fPtr = f;
+  /* f[0] = 1.0 (Q24) */
+  (*fPtr) = (int32_t)16777216;
+  fPtr++;
+
+  (*fPtr) = WEBRTC_SPL_MUL((*lspPtr), -1024);
+  fPtr++;
+  lspPtr+=2;
+
+  for(i=2; i<=5; i++)
+  {
+    (*fPtr) = fPtr[-2];
+
+    for(j=i; j>1; j--)
+    {
+      /* Compute f[j] = f[j] + tmp*f[j-1] + f[j-2]; */
+      high = (int16_t)(fPtr[-1] >> 16);
+      low = (int16_t)((fPtr[-1] & 0xffff) >> 1);
+
+      tmpW32 = 4 * high * *lspPtr + 4 * ((low * *lspPtr) >> 15);
+
+      (*fPtr) += fPtr[-2];
+      (*fPtr) -= tmpW32;
+      fPtr--;
+    }
+    *fPtr -= *lspPtr * (1 << 10);
+
+    fPtr+=i;
+    lspPtr+=2;
+  }
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/get_lsp_poly.h b/modules/audio_coding/codecs/ilbc/get_lsp_poly.h
new file mode 100644
index 0000000..1351b8b
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/get_lsp_poly.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetLspPoly.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_LSP_POLY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_LSP_POLY_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Construct the polynomials F1(z) and F2(z) from the LSP
+ * (Computations are done in Q24)
+ *
+ * The expansion is performed using the following recursion:
+ *
+ * f[0] = 1;
+ * tmp = -2.0 * lsp[0];
+ * f[1] = tmp;
+ * for (i=2; i<=5; i++) {
+ *    b = -2.0 * lsp[2*i-2];
+ *    f[i] = tmp*f[i-1] + 2.0*f[i-2];
+ *    for (j=i; j>=2; j--) {
+ *       f[j] = f[j] + tmp*f[j-1] + f[j-2];
+ *    }
+ *    f[i] = f[i] + tmp;
+ * }
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetLspPoly(
+    int16_t *lsp, /* (i) LSP in Q15 */
+    int32_t *f);  /* (o) polonymial in Q24 */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/get_sync_seq.c b/modules/audio_coding/codecs/ilbc/get_sync_seq.c
new file mode 100644
index 0000000..c5b11f1
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/get_sync_seq.c
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetSyncSeq.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/refiner.h"
+#include "modules/audio_coding/codecs/ilbc/nearest_neighbor.h"
+
+/*----------------------------------------------------------------*
+ * get the pitch-synchronous sample sequence
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetSyncSeq(
+    int16_t *idata,   /* (i) original data */
+    size_t idatal,   /* (i) dimension of data */
+    size_t centerStartPos, /* (i) where current block starts */
+    size_t *period,   /* (i) rough-pitch-period array       (Q-2) */
+    const size_t *plocs, /* (i) where periods of period array are taken (Q-2) */
+    size_t periodl,   /* (i) dimension period array */
+    size_t hl,    /* (i) 2*hl+1 is the number of sequences */
+    int16_t *surround  /* (i/o) The contribution from this sequence
+                                summed with earlier contributions */
+                              ){
+  size_t i, centerEndPos, q;
+  /* Stack based */
+  size_t lagBlock[2 * ENH_HL + 1];
+  size_t blockStartPos[2 * ENH_HL + 1]; /* The position to search around (Q2) */
+  size_t plocs2[ENH_PLOCSL];
+
+  centerEndPos = centerStartPos + ENH_BLOCKL - 1;
+
+  /* present (find predicted lag from this position) */
+
+  WebRtcIlbcfix_NearestNeighbor(lagBlock + hl,
+                                plocs,
+                                2 * (centerStartPos + centerEndPos),
+                                periodl);
+
+  blockStartPos[hl] = 4 * centerStartPos;
+
+  /* past (find predicted position and perform a refined
+     search to find the best sequence) */
+
+  for (q = hl; q > 0; q--) {
+    size_t qq = q - 1;
+    size_t period_q = period[lagBlock[q]];
+    /* Stop if this sequence would be outside the buffer; that means all
+       further-past sequences would also be outside the buffer. */
+    if (blockStartPos[q] < period_q + (4 * ENH_OVERHANG))
+      break;
+    blockStartPos[qq] = blockStartPos[q] - period_q;
+
+    size_t value = blockStartPos[qq] + 4 * ENH_BLOCKL_HALF;
+    value = (value > period_q) ? (value - period_q) : 0;
+    WebRtcIlbcfix_NearestNeighbor(lagBlock + qq, plocs, value, periodl);
+
+    /* Find the best possible sequence in the 4 times upsampled
+        domain around blockStartPos+q */
+    WebRtcIlbcfix_Refiner(blockStartPos + qq, idata, idatal, centerStartPos,
+                          blockStartPos[qq], surround,
+                          WebRtcIlbcfix_kEnhWt[qq]);
+  }
+
+  /* future (find predicted position and perform a refined
+     search to find the best sequence) */
+
+  for (i = 0; i < periodl; i++) {
+    plocs2[i] = plocs[i] - period[i];
+  }
+
+  for (q = hl + 1; q <= (2 * hl); q++) {
+
+    WebRtcIlbcfix_NearestNeighbor(
+        lagBlock + q,
+        plocs2,
+        blockStartPos[q - 1] + 4 * ENH_BLOCKL_HALF,
+        periodl);
+
+    blockStartPos[q]=blockStartPos[q-1]+period[lagBlock[q]];
+
+    if (blockStartPos[q] + 4 * (ENH_BLOCKL + ENH_OVERHANG) < 4 * idatal) {
+
+      /* Find the best possible sequence in the 4 times upsampled
+         domain around blockStartPos+q */
+      WebRtcIlbcfix_Refiner(blockStartPos + q, idata, idatal, centerStartPos,
+                            blockStartPos[q], surround,
+                            WebRtcIlbcfix_kEnhWt[2 * hl - q]);
+
+    } else {
+      /* Don't add anything since this sequence would
+         be outside the buffer */
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/get_sync_seq.h b/modules/audio_coding/codecs/ilbc/get_sync_seq.h
new file mode 100644
index 0000000..5c72956
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/get_sync_seq.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_GetSyncSeq.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_SYNC_SEQ_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_GET_SYNC_SEQ_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * get the pitch-synchronous sample sequence
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_GetSyncSeq(
+    int16_t *idata,   /* (i) original data */
+    size_t idatal,   /* (i) dimension of data */
+    size_t centerStartPos, /* (i) where current block starts */
+    size_t *period,   /* (i) rough-pitch-period array       (Q-2) */
+    const size_t *plocs, /* (i) where periods of period array are taken (Q-2) */
+    size_t periodl,   /* (i) dimension period array */
+    size_t hl,    /* (i) 2*hl+1 is the number of sequences */
+    int16_t *surround  /* (i/o) The contribution from this sequence
+                                summed with earlier contributions */
+                              );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/hp_input.c b/modules/audio_coding/codecs/ilbc/hp_input.c
new file mode 100644
index 0000000..dd6e20b
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/hp_input.c
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpInput.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  high-pass filter of input with *0.5 and saturation
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_HpInput(
+    int16_t *signal,     /* (i/o) signal vector */
+    int16_t *ba,      /* (i)   B- and A-coefficients (2:nd order)
+                                                                   {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+                                                                   is assumed to be 1.0 */
+    int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
+                                                                   yhi[n-2] ylow[n-2] */
+    int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
+    size_t len)      /* (i)   Number of samples to filter */
+{
+  size_t i;
+  int32_t tmpW32;
+  int32_t tmpW32b;
+
+  for (i=0; i<len; i++) {
+
+    /*
+        y[i] = b[0]*x[i] + b[1]*x[i-1] + b[2]*x[i-2]
+        + (-a[1])*y[i-1] + (-a[2])*y[i-2];
+    */
+
+    tmpW32 = y[1] * ba[3];  /* (-a[1])*y[i-1] (low part) */
+    tmpW32 += y[3] * ba[4];  /* (-a[2])*y[i-2] (low part) */
+    tmpW32 = (tmpW32>>15);
+    tmpW32 += y[0] * ba[3];  /* (-a[1])*y[i-1] (high part) */
+    tmpW32 += y[2] * ba[4];  /* (-a[2])*y[i-2] (high part) */
+    tmpW32 = (tmpW32<<1);
+
+    tmpW32 += signal[i] * ba[0];  /* b[0]*x[0] */
+    tmpW32 += x[0] * ba[1];  /* b[1]*x[i-1] */
+    tmpW32 += x[1] * ba[2];  /* b[2]*x[i-2] */
+
+    /* Update state (input part) */
+    x[1] = x[0];
+    x[0] = signal[i];
+
+    /* Rounding in Q(12+1), i.e. add 2^12 */
+    tmpW32b = tmpW32 + 4096;
+
+    /* Saturate (to 2^28) so that the HP filtered signal does not overflow */
+    tmpW32b = WEBRTC_SPL_SAT((int32_t)268435455, tmpW32b, (int32_t)-268435456);
+
+    /* Convert back to Q0 and multiply with 0.5 */
+    signal[i] = (int16_t)(tmpW32b >> 13);
+
+    /* Update state (filtered part) */
+    y[2] = y[0];
+    y[3] = y[1];
+
+    /* upshift tmpW32 by 3 with saturation */
+    if (tmpW32>268435455) {
+      tmpW32 = WEBRTC_SPL_WORD32_MAX;
+    } else if (tmpW32<-268435456) {
+      tmpW32 = WEBRTC_SPL_WORD32_MIN;
+    } else {
+      tmpW32 <<= 3;
+    }
+
+    y[0] = (int16_t)(tmpW32 >> 16);
+    y[1] = (int16_t)((tmpW32 - (y[0] << 16)) >> 1);
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/hp_input.h b/modules/audio_coding/codecs/ilbc/hp_input.h
new file mode 100644
index 0000000..f354dd9
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/hp_input.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpInput.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_INPUT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_INPUT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_HpInput(
+    int16_t *signal,     /* (i/o) signal vector */
+    int16_t *ba,      /* (i)   B- and A-coefficients (2:nd order)
+                                                                   {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+                                                                   is assumed to be 1.0 */
+    int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
+                                                                   yhi[n-2] ylow[n-2] */
+    int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
+    size_t len);     /* (i)   Number of samples to filter */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/hp_output.c b/modules/audio_coding/codecs/ilbc/hp_output.c
new file mode 100644
index 0000000..0628e58
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/hp_output.c
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpOutput.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  high-pass filter of output and *2 with saturation
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_HpOutput(
+    int16_t *signal,     /* (i/o) signal vector */
+    int16_t *ba,      /* (i)   B- and A-coefficients (2:nd order)
+                                                                   {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+                                                                   is assumed to be 1.0 */
+    int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
+                                                                   yhi[n-2] ylow[n-2] */
+    int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
+    size_t len)      /* (i)   Number of samples to filter */
+{
+  size_t i;
+  int32_t tmpW32;
+  int32_t tmpW32b;
+
+  for (i=0; i<len; i++) {
+
+    /*
+      y[i] = b[0]*x[i] + b[1]*x[i-1] + b[2]*x[i-2]
+      + (-a[1])*y[i-1] + (-a[2])*y[i-2];
+    */
+
+    tmpW32 = y[1] * ba[3];  /* (-a[1])*y[i-1] (low part) */
+    tmpW32 += y[3] * ba[4];  /* (-a[2])*y[i-2] (low part) */
+    tmpW32 = (tmpW32>>15);
+    tmpW32 += y[0] * ba[3];  /* (-a[1])*y[i-1] (high part) */
+    tmpW32 += y[2] * ba[4];  /* (-a[2])*y[i-2] (high part) */
+    tmpW32 *= 2;
+
+    tmpW32 += signal[i] * ba[0];  /* b[0]*x[0] */
+    tmpW32 += x[0] * ba[1];  /* b[1]*x[i-1] */
+    tmpW32 += x[1] * ba[2];  /* b[2]*x[i-2] */
+
+    /* Update state (input part) */
+    x[1] = x[0];
+    x[0] = signal[i];
+
+    /* Rounding in Q(12-1), i.e. add 2^10 */
+    tmpW32b = tmpW32 + 1024;
+
+    /* Saturate (to 2^26) so that the HP filtered signal does not overflow */
+    tmpW32b = WEBRTC_SPL_SAT((int32_t)67108863, tmpW32b, (int32_t)-67108864);
+
+    /* Convert back to Q0 and multiply with 2 */
+    signal[i] = (int16_t)(tmpW32b >> 11);
+
+    /* Update state (filtered part) */
+    y[2] = y[0];
+    y[3] = y[1];
+
+    /* upshift tmpW32 by 3 with saturation */
+    if (tmpW32>268435455) {
+      tmpW32 = WEBRTC_SPL_WORD32_MAX;
+    } else if (tmpW32<-268435456) {
+      tmpW32 = WEBRTC_SPL_WORD32_MIN;
+    } else {
+      tmpW32 *= 8;
+    }
+
+    y[0] = (int16_t)(tmpW32 >> 16);
+    y[1] = (int16_t)((tmpW32 & 0xffff) >> 1);
+
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/hp_output.h b/modules/audio_coding/codecs/ilbc/hp_output.h
new file mode 100644
index 0000000..a060a9d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/hp_output.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_HpOutput.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_OUTPUT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_HP_OUTPUT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_HpOutput(
+    int16_t *signal,     /* (i/o) signal vector */
+    int16_t *ba,      /* (i)   B- and A-coefficients (2:nd order)
+                               {b[0] b[1] b[2] -a[1] -a[2]} a[0]
+                               is assumed to be 1.0 */
+    int16_t *y,      /* (i/o) Filter state yhi[n-1] ylow[n-1]
+                              yhi[n-2] ylow[n-2] */
+    int16_t *x,      /* (i/o) Filter state x[n-1] x[n-2] */
+    size_t len);      /* (i)   Number of samples to filter */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/ilbc.c b/modules/audio_coding/codecs/ilbc/ilbc.c
new file mode 100644
index 0000000..21fc3f8
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/ilbc.c
@@ -0,0 +1,288 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ iLBCInterface.c
+
+******************************************************************/
+
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/ilbc/ilbc.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/init_encode.h"
+#include "modules/audio_coding/codecs/ilbc/encode.h"
+#include "modules/audio_coding/codecs/ilbc/init_decode.h"
+#include "modules/audio_coding/codecs/ilbc/decode.h"
+#include "rtc_base/checks.h"
+
+int16_t WebRtcIlbcfix_EncoderAssign(IlbcEncoderInstance** iLBC_encinst,
+                                    int16_t* ILBCENC_inst_Addr,
+                                    int16_t* size) {
+  *iLBC_encinst=(IlbcEncoderInstance*)ILBCENC_inst_Addr;
+  *size=sizeof(IlbcEncoder)/sizeof(int16_t);
+  if (*iLBC_encinst!=NULL) {
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+int16_t WebRtcIlbcfix_DecoderAssign(IlbcDecoderInstance** iLBC_decinst,
+                                    int16_t* ILBCDEC_inst_Addr,
+                                    int16_t* size) {
+  *iLBC_decinst=(IlbcDecoderInstance*)ILBCDEC_inst_Addr;
+  *size=sizeof(IlbcDecoder)/sizeof(int16_t);
+  if (*iLBC_decinst!=NULL) {
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+int16_t WebRtcIlbcfix_EncoderCreate(IlbcEncoderInstance **iLBC_encinst) {
+  *iLBC_encinst=(IlbcEncoderInstance*)malloc(sizeof(IlbcEncoder));
+  if (*iLBC_encinst!=NULL) {
+    WebRtcSpl_Init();
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+int16_t WebRtcIlbcfix_DecoderCreate(IlbcDecoderInstance **iLBC_decinst) {
+  *iLBC_decinst=(IlbcDecoderInstance*)malloc(sizeof(IlbcDecoder));
+  if (*iLBC_decinst!=NULL) {
+    WebRtcSpl_Init();
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+int16_t WebRtcIlbcfix_EncoderFree(IlbcEncoderInstance *iLBC_encinst) {
+  free(iLBC_encinst);
+  return(0);
+}
+
+int16_t WebRtcIlbcfix_DecoderFree(IlbcDecoderInstance *iLBC_decinst) {
+  free(iLBC_decinst);
+  return(0);
+}
+
+int16_t WebRtcIlbcfix_EncoderInit(IlbcEncoderInstance* iLBCenc_inst,
+                                  int16_t mode) {
+  if ((mode==20)||(mode==30)) {
+    WebRtcIlbcfix_InitEncode((IlbcEncoder*) iLBCenc_inst, mode);
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
+                         const int16_t* speechIn,
+                         size_t len,
+                         uint8_t* encoded) {
+  size_t pos = 0;
+  size_t encpos = 0;
+
+  if ((len != ((IlbcEncoder*)iLBCenc_inst)->blockl) &&
+#ifdef SPLIT_10MS
+      (len != 80) &&
+#endif
+      (len != 2*((IlbcEncoder*)iLBCenc_inst)->blockl) &&
+      (len != 3*((IlbcEncoder*)iLBCenc_inst)->blockl))
+  {
+    /* A maximum of 3 frames/packet is allowed */
+    return(-1);
+  } else {
+
+    /* call encoder */
+    while (pos<len) {
+      WebRtcIlbcfix_EncodeImpl((uint16_t*)&encoded[2 * encpos], &speechIn[pos],
+                               (IlbcEncoder*)iLBCenc_inst);
+#ifdef SPLIT_10MS
+      pos += 80;
+      if(((IlbcEncoder*)iLBCenc_inst)->section == 0)
+#else
+        pos += ((IlbcEncoder*)iLBCenc_inst)->blockl;
+#endif
+      encpos += ((IlbcEncoder*)iLBCenc_inst)->no_of_words;
+    }
+    return (int)(encpos*2);
+  }
+}
+
+int16_t WebRtcIlbcfix_DecoderInit(IlbcDecoderInstance* iLBCdec_inst,
+                                  int16_t mode) {
+  if ((mode==20)||(mode==30)) {
+    WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, mode, 1);
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+void WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance* iLBCdec_inst) {
+  WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, 20, 1);
+}
+void WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance* iLBCdec_inst) {
+  WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, 30, 1);
+}
+
+
+int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
+                         const uint8_t* encoded,
+                         size_t len,
+                         int16_t* decoded,
+                         int16_t* speechType)
+{
+  size_t i=0;
+  /* Allow for automatic switching between the frame sizes
+     (although you do get some discontinuity) */
+  if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+      (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+      (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
+    /* ok, do nothing */
+  } else {
+    /* Test if the mode has changed */
+    if (((IlbcDecoder*)iLBCdec_inst)->mode==20) {
+      if ((len==NO_OF_BYTES_30MS)||
+          (len==2*NO_OF_BYTES_30MS)||
+          (len==3*NO_OF_BYTES_30MS)) {
+        WebRtcIlbcfix_InitDecode(
+            ((IlbcDecoder*)iLBCdec_inst), 30,
+            ((IlbcDecoder*)iLBCdec_inst)->use_enhancer);
+      } else {
+        /* Unsupported frame length */
+        return(-1);
+      }
+    } else {
+      if ((len==NO_OF_BYTES_20MS)||
+          (len==2*NO_OF_BYTES_20MS)||
+          (len==3*NO_OF_BYTES_20MS)) {
+        WebRtcIlbcfix_InitDecode(
+            ((IlbcDecoder*)iLBCdec_inst), 20,
+            ((IlbcDecoder*)iLBCdec_inst)->use_enhancer);
+      } else {
+        /* Unsupported frame length */
+        return(-1);
+      }
+    }
+  }
+
+  while ((i*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)<len) {
+    if (WebRtcIlbcfix_DecodeImpl(
+            &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl],
+            (const uint16_t*)&encoded
+                [2 * i * ((IlbcDecoder*)iLBCdec_inst)->no_of_words],
+            (IlbcDecoder*)iLBCdec_inst, 1) == -1)
+      return -1;
+    i++;
+  }
+  /* iLBC does not support VAD/CNG yet */
+  *speechType=1;
+  return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
+                             const uint8_t* encoded,
+                             size_t len,
+                             int16_t* decoded,
+                             int16_t* speechType)
+{
+  size_t i=0;
+  if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+      (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+      (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
+    /* ok, do nothing */
+  } else {
+    return(-1);
+  }
+
+  while ((i*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)<len) {
+    if (!WebRtcIlbcfix_DecodeImpl(
+        &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl],
+        (const uint16_t*)&encoded
+            [2 * i * ((IlbcDecoder*)iLBCdec_inst)->no_of_words],
+        (IlbcDecoder*)iLBCdec_inst, 1))
+      return -1;
+    i++;
+  }
+  /* iLBC does not support VAD/CNG yet */
+  *speechType=1;
+  return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
+                             const uint8_t* encoded,
+                             size_t len,
+                             int16_t* decoded,
+                             int16_t* speechType)
+{
+  size_t i=0;
+  if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+      (len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
+      (len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
+    /* ok, do nothing */
+  } else {
+    return(-1);
+  }
+
+  while ((i*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)<len) {
+    if (!WebRtcIlbcfix_DecodeImpl(
+        &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl],
+        (const uint16_t*)&encoded
+            [2 * i * ((IlbcDecoder*)iLBCdec_inst)->no_of_words],
+        (IlbcDecoder*)iLBCdec_inst, 1))
+      return -1;
+    i++;
+  }
+  /* iLBC does not support VAD/CNG yet */
+  *speechType=1;
+  return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
+                               int16_t* decoded,
+                               size_t noOfLostFrames) {
+  size_t i;
+  uint16_t dummy;
+
+  for (i=0;i<noOfLostFrames;i++) {
+    // PLC decoding shouldn't fail, because there is no external input data
+    // that can be bad.
+    RTC_CHECK(WebRtcIlbcfix_DecodeImpl(
+        &decoded[i * ((IlbcDecoder*)iLBCdec_inst)->blockl], &dummy,
+        (IlbcDecoder*)iLBCdec_inst, 0));
+  }
+  return (noOfLostFrames*((IlbcDecoder*)iLBCdec_inst)->blockl);
+}
+
+size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
+                              int16_t* decoded,
+                              size_t noOfLostFrames) {
+  /* Two input parameters not used, but needed for function pointers in NetEQ */
+  (void)(decoded = NULL);
+  (void)(noOfLostFrames = 0);
+
+  WebRtcSpl_MemSetW16(((IlbcDecoder*)iLBCdec_inst)->enh_buf, 0, ENH_BUFL);
+  ((IlbcDecoder*)iLBCdec_inst)->prev_enh_pl = 2;
+
+  return (0);
+}
+
+void WebRtcIlbcfix_version(char *version)
+{
+  strcpy((char*)version, "1.1.1");
+}
diff --git a/modules/audio_coding/codecs/ilbc/ilbc.h b/modules/audio_coding/codecs/ilbc/ilbc.h
new file mode 100644
index 0000000..7836489
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/ilbc.h
@@ -0,0 +1,258 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * ilbc.h
+ *
+ * This header file contains all of the API's for iLBC.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
+
+#include <stddef.h>
+
+/*
+ * Define the fixpoint numeric formats
+ */
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*
+ * Solution to support multiple instances
+ * Customer has to cast instance to proper type
+ */
+
+typedef struct iLBC_encinst_t_ IlbcEncoderInstance;
+
+typedef struct iLBC_decinst_t_ IlbcDecoderInstance;
+
+/*
+ * Comfort noise constants
+ */
+
+#define ILBC_SPEECH 1
+#define ILBC_CNG  2
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+  /****************************************************************************
+   * WebRtcIlbcfix_XxxAssign(...)
+   *
+   * These functions assigns the encoder/decoder instance to the specified
+   * memory location
+   *
+   * Input:
+   *     - XXX_xxxinst       : Pointer to created instance that should be
+   *                           assigned
+   *     - ILBCXXX_inst_Addr : Pointer to the desired memory space
+   *     - size              : The size that this structure occupies (in Word16)
+   *
+   * Return value             :  0 - Ok
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIlbcfix_EncoderAssign(IlbcEncoderInstance **iLBC_encinst,
+                                      int16_t *ILBCENC_inst_Addr,
+                                      int16_t *size);
+  int16_t WebRtcIlbcfix_DecoderAssign(IlbcDecoderInstance **iLBC_decinst,
+                                      int16_t *ILBCDEC_inst_Addr,
+                                      int16_t *size);
+
+
+  /****************************************************************************
+   * WebRtcIlbcfix_XxxAssign(...)
+   *
+   * These functions create a instance to the specified structure
+   *
+   * Input:
+   *      - XXX_inst        : Pointer to created instance that should be created
+   *
+   * Return value           :  0 - Ok
+   *                          -1 - Error
+   */
+
+  int16_t WebRtcIlbcfix_EncoderCreate(IlbcEncoderInstance **iLBC_encinst);
+  int16_t WebRtcIlbcfix_DecoderCreate(IlbcDecoderInstance **iLBC_decinst);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_XxxFree(...)
+   *
+   * These functions frees the dynamic memory of a specified instance
+   *
+   * Input:
+   *      - XXX_inst          : Pointer to created instance that should be freed
+   *
+   * Return value             :  0 - Ok
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIlbcfix_EncoderFree(IlbcEncoderInstance *iLBC_encinst);
+  int16_t WebRtcIlbcfix_DecoderFree(IlbcDecoderInstance *iLBC_decinst);
+
+
+  /****************************************************************************
+   * WebRtcIlbcfix_EncoderInit(...)
+   *
+   * This function initializes a iLBC instance
+   *
+   * Input:
+   *      - iLBCenc_inst      : iLBC instance, i.e. the user that should receive
+   *                            be initialized
+   *      - frameLen          : The frame length of the codec 20/30 (ms)
+   *
+   * Return value             :  0 - Ok
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIlbcfix_EncoderInit(IlbcEncoderInstance *iLBCenc_inst,
+                                    int16_t frameLen);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_Encode(...)
+   *
+   * This function encodes one iLBC frame. Input speech length has be a
+   * multiple of the frame length.
+   *
+   * Input:
+   *      - iLBCenc_inst      : iLBC instance, i.e. the user that should encode
+   *                            a package
+   *      - speechIn          : Input speech vector
+   *      - len               : Samples in speechIn (160, 240, 320 or 480)
+   *
+   * Output:
+   *  - encoded               : The encoded data vector
+   *
+   * Return value             : >0 - Length (in bytes) of coded data
+   *                            -1 - Error
+   */
+
+  int WebRtcIlbcfix_Encode(IlbcEncoderInstance *iLBCenc_inst,
+                           const int16_t *speechIn,
+                           size_t len,
+                           uint8_t* encoded);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_DecoderInit(...)
+   *
+   * This function initializes a iLBC instance with either 20 or 30 ms frames
+   * Alternatively the WebRtcIlbcfix_DecoderInit_XXms can be used. Then it's
+   * not needed to specify the frame length with a variable.
+   *
+   * Input:
+   *      - IlbcDecoderInstance : iLBC decoder instance
+   *      - frameLen            : The frame length of the codec 20/30 (ms)
+   *
+   * Return value               :  0 - Ok
+   *                              -1 - Error
+   */
+
+  int16_t WebRtcIlbcfix_DecoderInit(IlbcDecoderInstance *iLBCdec_inst,
+                                    int16_t frameLen);
+  void WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance* iLBCdec_inst);
+  void WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance* iLBCdec_inst);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_Decode(...)
+   *
+   * This function decodes a packet with iLBC frame(s). Output speech length
+   * will be a multiple of 160 or 240 samples ((160 or 240)*frames/packet).
+   *
+   * Input:
+   *      - iLBCdec_inst      : iLBC instance, i.e. the user that should decode
+   *                            a packet
+   *      - encoded           : Encoded iLBC frame(s)
+   *      - len               : Bytes in encoded vector
+   *
+   * Output:
+   *      - decoded           : The decoded vector
+   *      - speechType        : 1 normal, 2 CNG
+   *
+   * Return value             : >0 - Samples in decoded vector
+   *                            -1 - Error
+   */
+
+  int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
+                           const uint8_t* encoded,
+                           size_t len,
+                           int16_t* decoded,
+                           int16_t* speechType);
+  int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
+                               const uint8_t* encoded,
+                               size_t len,
+                               int16_t* decoded,
+                               int16_t* speechType);
+  int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
+                               const uint8_t* encoded,
+                               size_t len,
+                               int16_t* decoded,
+                               int16_t* speechType);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_DecodePlc(...)
+   *
+   * This function conducts PLC for iLBC frame(s). Output speech length
+   * will be a multiple of 160 or 240 samples.
+   *
+   * Input:
+   *      - iLBCdec_inst      : iLBC instance, i.e. the user that should perform
+   *                            a PLC
+   *      - noOfLostFrames    : Number of PLC frames to produce
+   *
+   * Output:
+   *      - decoded           : The "decoded" vector
+   *
+   * Return value             : Samples in decoded PLC vector
+   */
+
+  size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
+                                 int16_t *decoded,
+                                 size_t noOfLostFrames);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_NetEqPlc(...)
+   *
+   * This function updates the decoder when a packet loss has occured, but it
+   * does not produce any PLC data. Function can be used if another PLC method
+   * is used (i.e NetEq).
+   *
+   * Input:
+   *      - iLBCdec_inst      : iLBC instance that should be updated
+   *      - noOfLostFrames    : Number of lost frames
+   *
+   * Output:
+   *      - decoded           : The "decoded" vector (nothing in this case)
+   *
+   * Return value             : Samples in decoded PLC vector
+   */
+
+  size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
+                                int16_t *decoded,
+                                size_t noOfLostFrames);
+
+  /****************************************************************************
+   * WebRtcIlbcfix_version(...)
+   *
+   * This function returns the version number of iLBC
+   *
+   * Output:
+   *      - version           : Version number of iLBC (maximum 20 char)
+   */
+
+  void WebRtcIlbcfix_version(char *version);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
diff --git a/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc b/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc
new file mode 100644
index 0000000..b8d3c7c
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/ilbc_unittest.cc
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(IlbcTest, BadPacket) {
+  // Get a good packet.
+  AudioEncoderIlbcConfig config;
+  config.frame_size_ms = 20;  // We need 20 ms rather than the default 30 ms;
+                              // otherwise, all possible values of cb_index[2]
+                              // are valid.
+  AudioEncoderIlbcImpl encoder(config, 102);
+  std::vector<int16_t> samples(encoder.SampleRateHz() / 100, 4711);
+  rtc::Buffer packet;
+  int num_10ms_chunks = 0;
+  while (packet.size() == 0) {
+    encoder.Encode(0, samples, &packet);
+    num_10ms_chunks += 1;
+  }
+
+  // Break the packet by setting all bits of the unsigned 7-bit number
+  // cb_index[2] to 1, giving it a value of 127. For a 20 ms packet, this is
+  // too large.
+  EXPECT_EQ(38u, packet.size());
+  rtc::Buffer bad_packet(packet.data(), packet.size());
+  bad_packet[29] |= 0x3f;  // Bits 1-6.
+  bad_packet[30] |= 0x80;  // Bit 0.
+
+  // Decode the bad packet. We expect the decoder to respond by returning -1.
+  AudioDecoderIlbcImpl decoder;
+  std::vector<int16_t> decoded_samples(num_10ms_chunks * samples.size());
+  AudioDecoder::SpeechType speech_type;
+  EXPECT_EQ(-1, decoder.Decode(bad_packet.data(), bad_packet.size(),
+                               encoder.SampleRateHz(),
+                               sizeof(int16_t) * decoded_samples.size(),
+                               decoded_samples.data(), &speech_type));
+
+  // Decode the good packet. This should work, because the failed decoding
+  // should not have left the decoder in a broken state.
+  EXPECT_EQ(static_cast<int>(decoded_samples.size()),
+            decoder.Decode(packet.data(), packet.size(), encoder.SampleRateHz(),
+                           sizeof(int16_t) * decoded_samples.size(),
+                           decoded_samples.data(), &speech_type));
+}
+
+class SplitIlbcTest : public ::testing::TestWithParam<std::pair<int, int> > {
+ protected:
+  virtual void SetUp() {
+    const std::pair<int, int> parameters = GetParam();
+    num_frames_ = parameters.first;
+    frame_length_ms_ = parameters.second;
+    frame_length_bytes_ = (frame_length_ms_ == 20) ? 38 : 50;
+  }
+  size_t num_frames_;
+  int frame_length_ms_;
+  size_t frame_length_bytes_;
+};
+
+TEST_P(SplitIlbcTest, NumFrames) {
+  AudioDecoderIlbcImpl decoder;
+  const size_t frame_length_samples = frame_length_ms_ * 8;
+  const auto generate_payload = [] (size_t payload_length_bytes) {
+    rtc::Buffer payload(payload_length_bytes);
+    // Fill payload with increasing integers {0, 1, 2, ...}.
+    for (size_t i = 0; i < payload.size(); ++i) {
+      payload[i] = static_cast<uint8_t>(i);
+    }
+    return payload;
+  };
+
+  const auto results = decoder.ParsePayload(
+      generate_payload(frame_length_bytes_ * num_frames_), 0);
+  EXPECT_EQ(num_frames_, results.size());
+
+  size_t frame_num = 0;
+  uint8_t payload_value = 0;
+  for (const auto& result : results) {
+    EXPECT_EQ(frame_length_samples * frame_num, result.timestamp);
+    const LegacyEncodedAudioFrame* frame =
+        static_cast<const LegacyEncodedAudioFrame*>(result.frame.get());
+    const rtc::Buffer& payload = frame->payload();
+    EXPECT_EQ(frame_length_bytes_, payload.size());
+    for (size_t i = 0; i < payload.size(); ++i, ++payload_value) {
+      EXPECT_EQ(payload_value, payload[i]);
+    }
+    ++frame_num;
+  }
+}
+
+// Test 1 through 5 frames of 20 and 30 ms size.
+// Also test the maximum number of frames in one packet for 20 and 30 ms.
+// The maximum is defined by the largest payload length that can be uniquely
+// resolved to a frame size of either 38 bytes (20 ms) or 50 bytes (30 ms).
+INSTANTIATE_TEST_CASE_P(
+    IlbcTest, SplitIlbcTest,
+    ::testing::Values(std::pair<int, int>(1, 20),  // 1 frame, 20 ms.
+                      std::pair<int, int>(2, 20),  // 2 frames, 20 ms.
+                      std::pair<int, int>(3, 20),  // And so on.
+                      std::pair<int, int>(4, 20),
+                      std::pair<int, int>(5, 20),
+                      std::pair<int, int>(24, 20),
+                      std::pair<int, int>(1, 30),
+                      std::pair<int, int>(2, 30),
+                      std::pair<int, int>(3, 30),
+                      std::pair<int, int>(4, 30),
+                      std::pair<int, int>(5, 30),
+                      std::pair<int, int>(18, 30)));
+
+// Test too large payload size.
+TEST(IlbcTest, SplitTooLargePayload) {
+  AudioDecoderIlbcImpl decoder;
+  constexpr size_t kPayloadLengthBytes = 950;
+  const auto results =
+      decoder.ParsePayload(rtc::Buffer(kPayloadLengthBytes), 0);
+  EXPECT_TRUE(results.empty());
+}
+
+// Payload not an integer number of frames.
+TEST(IlbcTest, SplitUnevenPayload) {
+  AudioDecoderIlbcImpl decoder;
+  constexpr size_t kPayloadLengthBytes = 39;  // Not an even number of frames.
+  const auto results =
+      decoder.ParsePayload(rtc::Buffer(kPayloadLengthBytes), 0);
+  EXPECT_TRUE(results.empty());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/ilbc/index_conv_dec.c b/modules/audio_coding/codecs/ilbc/index_conv_dec.c
new file mode 100644
index 0000000..ad12cee
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/index_conv_dec.c
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvDec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_IndexConvDec(
+    int16_t *index   /* (i/o) Codebook indexes */
+                                ){
+  int k;
+
+  for (k=4;k<6;k++) {
+    /* Readjust the second and third codebook index for the first 40 sample
+       so that they look the same as the first (in terms of lag)
+    */
+    if ((index[k]>=44)&&(index[k]<108)) {
+      index[k]+=64;
+    } else if ((index[k]>=108)&&(index[k]<128)) {
+      index[k]+=128;
+    } else {
+      /* ERROR */
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/index_conv_dec.h b/modules/audio_coding/codecs/ilbc/index_conv_dec.h
new file mode 100644
index 0000000..03a721b
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/index_conv_dec.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvDec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_DEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_DEC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_IndexConvDec(
+    int16_t *index   /* (i/o) Codebook indexes */
+                                );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/index_conv_enc.c b/modules/audio_coding/codecs/ilbc/index_conv_enc.c
new file mode 100644
index 0000000..6cf164e
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/index_conv_enc.c
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ IiLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvEnc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+/*----------------------------------------------------------------*
+ *  Convert the codebook indexes to make the search easier
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_IndexConvEnc(
+    int16_t *index   /* (i/o) Codebook indexes */
+                                ){
+  int k;
+
+  for (k=4;k<6;k++) {
+    /* Readjust the second and third codebook index so that it is
+       packetized into 7 bits (before it was put in lag-wise the same
+       way as for the first codebook which uses 8 bits)
+    */
+    if ((index[k]>=108)&&(index[k]<172)) {
+      index[k]-=64;
+    } else if (index[k]>=236) {
+      index[k]-=128;
+    } else {
+      /* ERROR */
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/index_conv_enc.h b/modules/audio_coding/codecs/ilbc/index_conv_enc.h
new file mode 100644
index 0000000..9938448
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/index_conv_enc.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_IndexConvEnc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_ENC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INDEX_CONV_ENC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Convert the codebook indexes to make the search easier
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_IndexConvEnc(
+    int16_t *index   /* (i/o) Codebook indexes */
+                                );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/init_decode.c b/modules/audio_coding/codecs/ilbc/init_decode.c
new file mode 100644
index 0000000..c63cc7c
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/init_decode.c
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+	iLBC Speech Coder ANSI-C Source Code
+
+	WebRtcIlbcfix_InitDecode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  Initiation of decoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitDecode(  /* (o) Number of decoded samples */
+    IlbcDecoder *iLBCdec_inst,  /* (i/o) Decoder instance */
+    int16_t mode,  /* (i) frame size mode */
+    int use_enhancer) {  /* (i) 1: use enhancer, 0: no enhancer */
+  int i;
+
+  iLBCdec_inst->mode = mode;
+
+  /* Set all the variables that are dependent on the frame size mode */
+  if (mode==30) {
+    iLBCdec_inst->blockl = BLOCKL_30MS;
+    iLBCdec_inst->nsub = NSUB_30MS;
+    iLBCdec_inst->nasub = NASUB_30MS;
+    iLBCdec_inst->lpc_n = LPC_N_30MS;
+    iLBCdec_inst->no_of_bytes = NO_OF_BYTES_30MS;
+    iLBCdec_inst->no_of_words = NO_OF_WORDS_30MS;
+    iLBCdec_inst->state_short_len=STATE_SHORT_LEN_30MS;
+  }
+  else if (mode==20) {
+    iLBCdec_inst->blockl = BLOCKL_20MS;
+    iLBCdec_inst->nsub = NSUB_20MS;
+    iLBCdec_inst->nasub = NASUB_20MS;
+    iLBCdec_inst->lpc_n = LPC_N_20MS;
+    iLBCdec_inst->no_of_bytes = NO_OF_BYTES_20MS;
+    iLBCdec_inst->no_of_words = NO_OF_WORDS_20MS;
+    iLBCdec_inst->state_short_len=STATE_SHORT_LEN_20MS;
+  }
+  else {
+    return(-1);
+  }
+
+  /* Reset all the previous LSF to mean LSF */
+  WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->lsfdeqold, WebRtcIlbcfix_kLsfMean, LPC_FILTERORDER);
+
+  /* Clear the synthesis filter memory */
+  WebRtcSpl_MemSetW16(iLBCdec_inst->syntMem, 0, LPC_FILTERORDER);
+
+  /* Set the old synthesis filter to {1.0 0.0 ... 0.0} */
+  WebRtcSpl_MemSetW16(iLBCdec_inst->old_syntdenum, 0, ((LPC_FILTERORDER + 1)*NSUB_MAX));
+  for (i=0; i<NSUB_MAX; i++) {
+    iLBCdec_inst->old_syntdenum[i*(LPC_FILTERORDER+1)] = 4096;
+  }
+
+  /* Clear the variables that are used for the PLC */
+  iLBCdec_inst->last_lag = 20;
+  iLBCdec_inst->consPLICount = 0;
+  iLBCdec_inst->prevPLI = 0;
+  iLBCdec_inst->perSquare = 0;
+  iLBCdec_inst->prevLag = 120;
+  iLBCdec_inst->prevLpc[0] = 4096;
+  WebRtcSpl_MemSetW16(iLBCdec_inst->prevLpc+1, 0, LPC_FILTERORDER);
+  WebRtcSpl_MemSetW16(iLBCdec_inst->prevResidual, 0, BLOCKL_MAX);
+
+  /* Initialize the seed for the random number generator */
+  iLBCdec_inst->seed = 777;
+
+  /* Set the filter state of the HP filter to 0 */
+  WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemx, 0, 2);
+  WebRtcSpl_MemSetW16(iLBCdec_inst->hpimemy, 0, 4);
+
+  /* Set the variables that are used in the ehnahcer */
+  iLBCdec_inst->use_enhancer = use_enhancer;
+  WebRtcSpl_MemSetW16(iLBCdec_inst->enh_buf, 0, (ENH_BUFL+ENH_BUFL_FILTEROVERHEAD));
+  for (i=0;i<ENH_NBLOCKS_TOT;i++) {
+    iLBCdec_inst->enh_period[i]=160; /* Q(-4) */
+  }
+
+  iLBCdec_inst->prev_enh_pl = 0;
+
+  return (int)(iLBCdec_inst->blockl);
+}
diff --git a/modules/audio_coding/codecs/ilbc/init_decode.h b/modules/audio_coding/codecs/ilbc/init_decode.h
new file mode 100644
index 0000000..49bd61c
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/init_decode.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitDecode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_DECODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_DECODE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Initiation of decoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitDecode(  /* (o) Number of decoded samples */
+    IlbcDecoder *iLBCdec_inst, /* (i/o) Decoder instance */
+    int16_t mode,     /* (i) frame size mode */
+    int use_enhancer           /* (i) 1 to use enhancer
+                                  0 to run without enhancer */
+                                         );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/init_encode.c b/modules/audio_coding/codecs/ilbc/init_encode.c
new file mode 100644
index 0000000..b21f77e
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/init_encode.c
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitEncode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  Initiation of encoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitEncode(  /* (o) Number of bytes encoded */
+    IlbcEncoder *iLBCenc_inst,  /* (i/o) Encoder instance */
+    int16_t mode) {  /* (i) frame size mode */
+  iLBCenc_inst->mode = mode;
+
+  /* Set all the variables that are dependent on the frame size mode */
+  if (mode==30) {
+    iLBCenc_inst->blockl = BLOCKL_30MS;
+    iLBCenc_inst->nsub = NSUB_30MS;
+    iLBCenc_inst->nasub = NASUB_30MS;
+    iLBCenc_inst->lpc_n = LPC_N_30MS;
+    iLBCenc_inst->no_of_bytes = NO_OF_BYTES_30MS;
+    iLBCenc_inst->no_of_words = NO_OF_WORDS_30MS;
+    iLBCenc_inst->state_short_len=STATE_SHORT_LEN_30MS;
+  }
+  else if (mode==20) {
+    iLBCenc_inst->blockl = BLOCKL_20MS;
+    iLBCenc_inst->nsub = NSUB_20MS;
+    iLBCenc_inst->nasub = NASUB_20MS;
+    iLBCenc_inst->lpc_n = LPC_N_20MS;
+    iLBCenc_inst->no_of_bytes = NO_OF_BYTES_20MS;
+    iLBCenc_inst->no_of_words = NO_OF_WORDS_20MS;
+    iLBCenc_inst->state_short_len=STATE_SHORT_LEN_20MS;
+  }
+  else {
+    return(-1);
+  }
+
+  /* Clear the buffers and set the previous LSF and LSP to the mean value */
+  WebRtcSpl_MemSetW16(iLBCenc_inst->anaMem, 0, LPC_FILTERORDER);
+  WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lsfold, WebRtcIlbcfix_kLsfMean, LPC_FILTERORDER);
+  WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lsfdeqold, WebRtcIlbcfix_kLsfMean, LPC_FILTERORDER);
+  WebRtcSpl_MemSetW16(iLBCenc_inst->lpc_buffer, 0, LPC_LOOKBACK + BLOCKL_MAX);
+
+  /* Set the filter state of the HP filter to 0 */
+  WebRtcSpl_MemSetW16(iLBCenc_inst->hpimemx, 0, 2);
+  WebRtcSpl_MemSetW16(iLBCenc_inst->hpimemy, 0, 4);
+
+#ifdef SPLIT_10MS
+  /*Zeroing the past samples for 10msec Split*/
+  WebRtcSpl_MemSetW16(iLBCenc_inst->past_samples,0,160);
+  iLBCenc_inst->section = 0;
+#endif
+
+  return (int)(iLBCenc_inst->no_of_bytes);
+}
diff --git a/modules/audio_coding/codecs/ilbc/init_encode.h b/modules/audio_coding/codecs/ilbc/init_encode.h
new file mode 100644
index 0000000..d9b2971
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/init_encode.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InitEncode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_ENCODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INIT_ENCODE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Initiation of encoder instance.
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_InitEncode(  /* (o) Number of bytes encoded */
+    IlbcEncoder *iLBCenc_inst, /* (i/o) Encoder instance */
+    int16_t mode     /* (i) frame size mode */
+                                         );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/interpolate.c b/modules/audio_coding/codecs/ilbc/interpolate.c
new file mode 100644
index 0000000..3ce480e
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/interpolate.c
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Interpolate.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  interpolation between vectors
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Interpolate(
+    int16_t *out, /* (o) output vector */
+    int16_t *in1, /* (i) first input vector */
+    int16_t *in2, /* (i) second input vector */
+    int16_t coef, /* (i) weight coefficient in Q14 */
+    int16_t length)  /* (i) number of sample is vectors */
+{
+  int i;
+  int16_t invcoef;
+
+  /*
+    Performs the operation out[i] = in[i]*coef + (1-coef)*in2[i] (with rounding)
+  */
+
+  invcoef = 16384 - coef; /* 16384 = 1.0 (Q14)*/
+  for (i = 0; i < length; i++) {
+    out[i] = (int16_t)((coef * in1[i] + invcoef * in2[i] + 8192) >> 14);
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/interpolate.h b/modules/audio_coding/codecs/ilbc/interpolate.h
new file mode 100644
index 0000000..fc360b4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/interpolate.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Interpolate.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  interpolation between vectors
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Interpolate(
+    int16_t *out, /* (o) output vector */
+    int16_t *in1, /* (i) first input vector */
+    int16_t *in2, /* (i) second input vector */
+    int16_t coef, /* (i) weight coefficient in Q14 */
+    int16_t length); /* (i) number of sample is vectors */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/interpolate_samples.c b/modules/audio_coding/codecs/ilbc/interpolate_samples.c
new file mode 100644
index 0000000..9ca38a4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/interpolate_samples.c
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InterpolateSamples.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+void WebRtcIlbcfix_InterpolateSamples(
+    int16_t *interpSamples, /* (o) The interpolated samples */
+    int16_t *CBmem,   /* (i) The CB memory */
+    size_t lMem    /* (i) Length of the CB memory */
+                                      ) {
+  int16_t *ppi, *ppo, i, j, temp1, temp2;
+  int16_t *tmpPtr;
+
+  /* Calculate the 20 vectors of interpolated samples (4 samples each)
+     that are used in the codebooks for lag 20 to 39 */
+  tmpPtr = interpSamples;
+  for (j=0; j<20; j++) {
+    temp1 = 0;
+    temp2 = 3;
+    ppo = CBmem+lMem-4;
+    ppi = CBmem+lMem-j-24;
+    for (i=0; i<4; i++) {
+
+      *tmpPtr++ = (int16_t)((WebRtcIlbcfix_kAlpha[temp2] * *ppo) >> 15) +
+          (int16_t)((WebRtcIlbcfix_kAlpha[temp1] * *ppi) >> 15);
+
+      ppo++;
+      ppi++;
+      temp1++;
+      temp2--;
+    }
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/interpolate_samples.h b/modules/audio_coding/codecs/ilbc/interpolate_samples.h
new file mode 100644
index 0000000..f522f93
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/interpolate_samples.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_InterpolateSamples.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_SAMPLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_INTERPOLATE_SAMPLES_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Construct the interpolated samples for the Augmented CB
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_InterpolateSamples(
+    int16_t *interpSamples, /* (o) The interpolated samples */
+    int16_t *CBmem,   /* (i) The CB memory */
+    size_t lMem    /* (i) Length of the CB memory */
+                                      );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lpc_encode.c b/modules/audio_coding/codecs/ilbc/lpc_encode.c
new file mode 100644
index 0000000..9b2a0c0
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lpc_encode.c
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LpcEncode.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h"
+#include "modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/simple_lsf_quant.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_check.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  lpc encoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LpcEncode(
+    int16_t *syntdenum,  /* (i/o) synthesis filter coefficients
+                                           before/after encoding */
+    int16_t *weightdenum, /* (i/o) weighting denumerator coefficients
+                                   before/after encoding */
+    int16_t *lsf_index,  /* (o) lsf quantization index */
+    int16_t *data,   /* (i) Speech to do LPC analysis on */
+    IlbcEncoder *iLBCenc_inst
+    /* (i/o) the encoder state structure */
+                              ) {
+  /* Stack based */
+  int16_t lsf[LPC_FILTERORDER * LPC_N_MAX];
+  int16_t lsfdeq[LPC_FILTERORDER * LPC_N_MAX];
+
+  /* Calculate LSF's from the input speech */
+  WebRtcIlbcfix_SimpleLpcAnalysis(lsf, data, iLBCenc_inst);
+
+  /* Quantize the LSF's */
+  WebRtcIlbcfix_SimpleLsfQ(lsfdeq, lsf_index, lsf, iLBCenc_inst->lpc_n);
+
+  /* Stableize the LSF's if needed */
+  WebRtcIlbcfix_LsfCheck(lsfdeq, LPC_FILTERORDER, iLBCenc_inst->lpc_n);
+
+  /* Calculate the synthesis and weighting filter coefficients from
+     the optimal LSF and the dequantized LSF */
+  WebRtcIlbcfix_SimpleInterpolateLsf(syntdenum, weightdenum,
+                                     lsf, lsfdeq, iLBCenc_inst->lsfold,
+                                     iLBCenc_inst->lsfdeqold, LPC_FILTERORDER, iLBCenc_inst);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/lpc_encode.h b/modules/audio_coding/codecs/ilbc/lpc_encode.h
new file mode 100644
index 0000000..7255705
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lpc_encode.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LpcEncode.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LPC_ENCODE_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LPC_ENCODE_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lpc encoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LpcEncode(
+    int16_t *syntdenum,  /* (i/o) synthesis filter coefficients
+                                  before/after encoding */
+    int16_t *weightdenum, /* (i/o) weighting denumerator coefficients
+                                   before/after encoding */
+    int16_t *lsf_index,  /* (o) lsf quantization index */
+    int16_t *data,   /* (i) Speech to do LPC analysis on */
+    IlbcEncoder *iLBCenc_inst
+    /* (i/o) the encoder state structure */
+                             );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lsf_check.c b/modules/audio_coding/codecs/ilbc/lsf_check.c
new file mode 100644
index 0000000..684b2ce
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_check.c
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfCheck.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  check for stability of lsf coefficients
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_LsfCheck(
+    int16_t *lsf, /* LSF parameters */
+    int dim, /* dimension of LSF */
+    int NoAn)  /* No of analysis per frame */
+{
+  int k,n,m, Nit=2, change=0,pos;
+  const int16_t eps=319;  /* 0.039 in Q13 (50 Hz)*/
+  const int16_t eps2=160;  /* eps/2.0 in Q13;*/
+  const int16_t maxlsf=25723; /* 3.14; (4000 Hz)*/
+  const int16_t minlsf=82;  /* 0.01; (0 Hz)*/
+
+  /* LSF separation check*/
+  for (n=0;n<Nit;n++) {  /* Run through a 2 times */
+    for (m=0;m<NoAn;m++) { /* Number of analyses per frame */
+      for (k=0;k<(dim-1);k++) {
+        pos=m*dim+k;
+
+        /* Seperate coefficients with a safety margin of 50 Hz */
+        if ((lsf[pos+1]-lsf[pos])<eps) {
+
+          if (lsf[pos+1]<lsf[pos]) {
+            lsf[pos+1]= lsf[pos]+eps2;
+            lsf[pos]= lsf[pos+1]-eps2;
+          } else {
+            lsf[pos]-=eps2;
+            lsf[pos+1]+=eps2;
+          }
+          change=1;
+        }
+
+        /* Limit minimum and maximum LSF */
+        if (lsf[pos]<minlsf) {
+          lsf[pos]=minlsf;
+          change=1;
+        }
+
+        if (lsf[pos]>maxlsf) {
+          lsf[pos]=maxlsf;
+          change=1;
+        }
+      }
+    }
+  }
+
+  return change;
+}
diff --git a/modules/audio_coding/codecs/ilbc/lsf_check.h b/modules/audio_coding/codecs/ilbc/lsf_check.h
new file mode 100644
index 0000000..f92e0cc
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_check.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfCheck.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_CHECK_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_CHECK_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  check for stability of lsf coefficients
+ *---------------------------------------------------------------*/
+
+int WebRtcIlbcfix_LsfCheck(
+    int16_t *lsf, /* LSF parameters */
+    int dim, /* dimension of LSF */
+    int NoAn); /* No of analysis per frame */
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c
new file mode 100644
index 0000000..e501f3c
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.c
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LspInterpolate2PolyDec.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/interpolate.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_to_poly.h"
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  interpolation of lsf coefficients for the decoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LspInterpolate2PolyDec(
+    int16_t *a,   /* (o) lpc coefficients Q12 */
+    int16_t *lsf1,  /* (i) first set of lsf coefficients Q13 */
+    int16_t *lsf2,  /* (i) second set of lsf coefficients Q13 */
+    int16_t coef,  /* (i) weighting coefficient to use between
+                                   lsf1 and lsf2 Q14 */
+    int16_t length  /* (i) length of coefficient vectors */
+                                          ){
+  int16_t lsftmp[LPC_FILTERORDER];
+
+  /* interpolate LSF */
+  WebRtcIlbcfix_Interpolate(lsftmp, lsf1, lsf2, coef, length);
+
+  /* Compute the filter coefficients from the LSF */
+  WebRtcIlbcfix_Lsf2Poly(a, lsftmp);
+}
diff --git a/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h
new file mode 100644
index 0000000..4a6c0d5
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_dec.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LspInterpolate2PolyDec.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_DEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_DEC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  interpolation of lsf coefficients for the decoder
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LspInterpolate2PolyDec(
+    int16_t *a,   /* (o) lpc coefficients Q12 */
+    int16_t *lsf1,  /* (i) first set of lsf coefficients Q13 */
+    int16_t *lsf2,  /* (i) second set of lsf coefficients Q13 */
+    int16_t coef,  /* (i) weighting coefficient to use between
+                                   lsf1 and lsf2 Q14 */
+    int16_t length  /* (i) length of coefficient vectors */
+                                          );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c
new file mode 100644
index 0000000..a660c4f
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.c
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/interpolate.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_to_poly.h"
+
+/*----------------------------------------------------------------*
+ *  lsf interpolator and conversion from lsf to a coefficients
+ *  (subrutine to SimpleInterpolateLSF)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LsfInterpolate2PloyEnc(
+    int16_t *a,  /* (o) lpc coefficients Q12 */
+    int16_t *lsf1, /* (i) first set of lsf coefficients Q13 */
+    int16_t *lsf2, /* (i) second set of lsf coefficients Q13 */
+    int16_t coef, /* (i) weighting coefficient to use between
+                           lsf1 and lsf2 Q14 */
+    int16_t length /* (i) length of coefficient vectors */
+                                          ) {
+  /* Stack based */
+  int16_t lsftmp[LPC_FILTERORDER];
+
+  /* interpolate LSF */
+  WebRtcIlbcfix_Interpolate(lsftmp, lsf1, lsf2, coef, length);
+
+  /* Compute the filter coefficients from the LSF */
+  WebRtcIlbcfix_Lsf2Poly(a, lsftmp);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h
new file mode 100644
index 0000000..74863c6
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_LsfInterpolate2PloyEnc.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_ENC_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_INTERPOLATE_TO_POLY_ENC_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lsf interpolator and conversion from lsf to a coefficients
+ *  (subrutine to SimpleInterpolateLSF)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_LsfInterpolate2PloyEnc(
+    int16_t *a,  /* (o) lpc coefficients Q12 */
+    int16_t *lsf1, /* (i) first set of lsf coefficients Q13 */
+    int16_t *lsf2, /* (i) second set of lsf coefficients Q13 */
+    int16_t coef, /* (i) weighting coefficient to use between
+                           lsf1 and lsf2 Q14 */
+    int16_t length /* (i) length of coefficient vectors */
+                                          );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c b/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c
new file mode 100644
index 0000000..8767e2d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_to_lsp.c
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Lsp.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  conversion from lsf to lsp coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsf2Lsp(
+    int16_t *lsf, /* (i) lsf in Q13 values between 0 and pi */
+    int16_t *lsp, /* (o) lsp in Q15 values between -1 and 1 */
+    int16_t m  /* (i) number of coefficients */
+                           ) {
+  int16_t i, k;
+  int16_t diff; /* difference, which is used for the
+                           linear approximation (Q8) */
+  int16_t freq; /* normalized frequency in Q15 (0..1) */
+  int32_t tmpW32;
+
+  for(i=0; i<m; i++)
+  {
+    freq = (int16_t)((lsf[i] * 20861) >> 15);
+    /* 20861: 1.0/(2.0*PI) in Q17 */
+    /*
+       Upper 8 bits give the index k and
+       Lower 8 bits give the difference, which needs
+       to be approximated linearly
+    */
+    k = freq >> 8;
+    diff = (freq&0x00ff);
+
+    /* Guard against getting outside table */
+
+    if (k>63) {
+      k = 63;
+    }
+
+    /* Calculate linear approximation */
+    tmpW32 = WebRtcIlbcfix_kCosDerivative[k] * diff;
+    lsp[i] = WebRtcIlbcfix_kCos[k] + (int16_t)(tmpW32 >> 12);
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h b/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h
new file mode 100644
index 0000000..80c0798
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_to_lsp.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Lsp.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_LSP_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_LSP_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  conversion from lsf to lsp coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsf2Lsp(
+    int16_t *lsf, /* (i) lsf in Q13 values between 0 and pi */
+    int16_t *lsp, /* (o) lsp in Q15 values between -1 and 1 */
+    int16_t m     /* (i) number of coefficients */
+                           );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lsf_to_poly.c b/modules/audio_coding/codecs/ilbc/lsf_to_poly.c
new file mode 100644
index 0000000..4dbf96d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_to_poly.c
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Poly.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_to_lsp.h"
+#include "modules/audio_coding/codecs/ilbc/get_lsp_poly.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+void WebRtcIlbcfix_Lsf2Poly(
+    int16_t *a,     /* (o) predictor coefficients (order = 10) in Q12 */
+    int16_t *lsf    /* (i) line spectral frequencies in Q13 */
+                            ) {
+  int32_t f[2][6]; /* f[0][] and f[1][] corresponds to
+                            F1(z) and F2(z) respectivly */
+  int32_t *f1ptr, *f2ptr;
+  int16_t *a1ptr, *a2ptr;
+  int32_t tmpW32;
+  int16_t lsp[10];
+  int i;
+
+  /* Convert lsf to lsp */
+  WebRtcIlbcfix_Lsf2Lsp(lsf, lsp, LPC_FILTERORDER);
+
+  /* Get F1(z) and F2(z) from the lsp */
+  f1ptr=f[0];
+  f2ptr=f[1];
+  WebRtcIlbcfix_GetLspPoly(&lsp[0],f1ptr);
+  WebRtcIlbcfix_GetLspPoly(&lsp[1],f2ptr);
+
+  /* for i = 5 down to 1
+     Compute f1[i] += f1[i-1];
+     and     f2[i] += f2[i-1];
+  */
+  f1ptr=&f[0][5];
+  f2ptr=&f[1][5];
+  for (i=5; i>0; i--)
+  {
+    (*f1ptr) += (*(f1ptr-1));
+    (*f2ptr) -= (*(f2ptr-1));
+    f1ptr--;
+    f2ptr--;
+  }
+
+  /* Get the A(z) coefficients
+     a[0] = 1.0
+     for i = 1 to 5
+     a[i] = (f1[i] + f2[i] + round)>>13;
+     for i = 1 to 5
+     a[11-i] = (f1[i] - f2[i] + round)>>13;
+  */
+  a[0]=4096;
+  a1ptr=&a[1];
+  a2ptr=&a[10];
+  f1ptr=&f[0][1];
+  f2ptr=&f[1][1];
+  for (i=5; i>0; i--)
+  {
+    tmpW32 = (*f1ptr) + (*f2ptr);
+    *a1ptr = (int16_t)((tmpW32 + 4096) >> 13);
+
+    tmpW32 = (*f1ptr) - (*f2ptr);
+    *a2ptr = (int16_t)((tmpW32 + 4096) >> 13);
+
+    a1ptr++;
+    a2ptr--;
+    f1ptr++;
+    f2ptr++;
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/lsf_to_poly.h b/modules/audio_coding/codecs/ilbc/lsf_to_poly.h
new file mode 100644
index 0000000..68c4dd0
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsf_to_poly.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsf2Poly.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_POLY_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSF_TO_POLY_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  Convert from LSF coefficients to A coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsf2Poly(
+    int16_t *a,     /* (o) predictor coefficients (order = 10) in Q12 */
+    int16_t *lsf    /* (i) line spectral frequencies in Q13 */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c b/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c
new file mode 100644
index 0000000..db11cfe
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsp_to_lsf.c
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsp2Lsf.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  conversion from LSP coefficients to LSF coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsp2Lsf(
+    int16_t *lsp, /* (i) lsp vector -1...+1 in Q15 */
+    int16_t *lsf, /* (o) Lsf vector 0...Pi in Q13
+                           (ordered, so that lsf[i]<lsf[i+1]) */
+    int16_t m  /* (i) Number of coefficients */
+                           )
+{
+  int16_t i, k;
+  int16_t diff; /* diff between table value and desired value (Q15) */
+  int16_t freq; /* lsf/(2*pi) (Q16) */
+  int16_t *lspPtr, *lsfPtr, *cosTblPtr;
+  int16_t tmp;
+
+  /* set the index to maximum index value in WebRtcIlbcfix_kCos */
+  k = 63;
+
+  /*
+     Start with the highest LSP and then work the way down
+     For each LSP the lsf is calculated by first order approximation
+     of the acos(x) function
+  */
+  lspPtr = &lsp[9];
+  lsfPtr = &lsf[9];
+  cosTblPtr=(int16_t*)&WebRtcIlbcfix_kCos[k];
+  for(i=m-1; i>=0; i--)
+  {
+    /*
+       locate value in the table, which is just above lsp[i],
+       basically an approximation to acos(x)
+    */
+    while( (((int32_t)(*cosTblPtr)-(*lspPtr)) < 0)&&(k>0) )
+    {
+      k-=1;
+      cosTblPtr--;
+    }
+
+    /* Calculate diff, which is used in the linear approximation of acos(x) */
+    diff = (*lspPtr)-(*cosTblPtr);
+
+    /*
+       The linear approximation of acos(lsp[i]) :
+       acos(lsp[i])= k*512 + (WebRtcIlbcfix_kAcosDerivative[ind]*offset >> 11)
+    */
+
+    /* tmp (linear offset) in Q16 */
+    tmp = (int16_t)((WebRtcIlbcfix_kAcosDerivative[k] * diff) >> 11);
+
+    /* freq in Q16 */
+    freq = (k << 9) + tmp;
+
+    /* lsf = freq*2*pi */
+    (*lsfPtr) = (int16_t)(((int32_t)freq*25736)>>15);
+
+    lsfPtr--;
+    lspPtr--;
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h b/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h
new file mode 100644
index 0000000..666a99a
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/lsp_to_lsf.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Lsp2Lsf.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSP_TO_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_LSP_TO_LSF_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  conversion from LSP coefficients to LSF coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Lsp2Lsf(
+    int16_t *lsp, /* (i) lsp vector -1...+1 in Q15 */
+    int16_t *lsf, /* (o) Lsf vector 0...Pi in Q13
+                           (ordered, so that lsf[i]<lsf[i+1]) */
+    int16_t m  /* (i) Number of coefficients */
+                           );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/my_corr.c b/modules/audio_coding/codecs/ilbc/my_corr.c
new file mode 100644
index 0000000..2f2a058
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/my_corr.c
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_MyCorr.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * compute cross correlation between sequences
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_MyCorr(
+    int32_t* corr,  /* (o) correlation of seq1 and seq2 */
+    const int16_t* seq1,  /* (i) first sequence */
+    size_t dim1,  /* (i) dimension first seq1 */
+    const int16_t* seq2, /* (i) second sequence */
+    size_t dim2   /* (i) dimension seq2 */
+                          ){
+  uint32_t max1, max2;
+  size_t loops;
+  int right_shift;
+
+  // Calculate a right shift that will let us sum dim2 pairwise products of
+  // values from the two sequences without overflowing an int32_t. (The +1 in
+  // max1 and max2 are because WebRtcSpl_MaxAbsValueW16 will return 2**15 - 1
+  // if the input array contains -2**15.)
+  max1 = WebRtcSpl_MaxAbsValueW16(seq1, dim1) + 1;
+  max2 = WebRtcSpl_MaxAbsValueW16(seq2, dim2) + 1;
+  right_shift =
+      (64 - 31) - WebRtcSpl_CountLeadingZeros64((max1 * max2) * (uint64_t)dim2);
+  if (right_shift < 0) {
+    right_shift = 0;
+  }
+
+  loops=dim1-dim2+1;
+
+  /* Calculate the cross correlations */
+  WebRtcSpl_CrossCorrelation(corr, seq2, seq1, dim2, loops, right_shift, 1);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/my_corr.h b/modules/audio_coding/codecs/ilbc/my_corr.h
new file mode 100644
index 0000000..7c6eb19
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/my_corr.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_MyCorr.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_MY_CORR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_MY_CORR_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * compute cross correlation between sequences
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_MyCorr(
+    int32_t* corr,  /* (o) correlation of seq1 and seq2 */
+    const int16_t* seq1,  /* (i) first sequence */
+    size_t dim1,  /* (i) dimension first seq1 */
+    const int16_t* seq2, /* (i) second sequence */
+    size_t dim2   /* (i) dimension seq2 */
+                          );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/nearest_neighbor.c b/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
new file mode 100644
index 0000000..9d78528
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_NearestNeighbor.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+void WebRtcIlbcfix_NearestNeighbor(size_t* index,
+                                   const size_t* array,
+                                   size_t value,
+                                   size_t arlength) {
+  size_t i;
+  size_t min_diff = (size_t)-1;
+  for (i = 0; i < arlength; i++) {
+    const size_t diff =
+        (array[i] < value) ? (value - array[i]) : (array[i] - value);
+    if (diff < min_diff) {
+      *index = i;
+      min_diff = diff;
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/nearest_neighbor.h b/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
new file mode 100644
index 0000000..d541fb7
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_NearestNeighbor.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_NEAREST_NEIGHBOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_NEAREST_NEIGHBOR_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Find index in array such that the array element with said
+ * index is the element of said array closest to "value"
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_NearestNeighbor(
+    size_t* index, /* (o) index of array element closest to value */
+    const size_t* array, /* (i) data array (Q2) */
+    size_t value, /* (i) value (Q2) */
+    size_t arlength /* (i) dimension of data array (==ENH_NBLOCKS_TOT) */
+                                   );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/pack_bits.c b/modules/audio_coding/codecs/ilbc/pack_bits.c
new file mode 100644
index 0000000..507e25e
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/pack_bits.c
@@ -0,0 +1,251 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_PackBits.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_PackBits(
+    uint16_t *bitstream,   /* (o) The packetized bitstream */
+    iLBC_bits *enc_bits,  /* (i) Encoded bits */
+    int16_t mode     /* (i) Codec mode (20 or 30) */
+                             ){
+  uint16_t *bitstreamPtr;
+  int i, k;
+  int16_t *tmpPtr;
+
+  bitstreamPtr=bitstream;
+
+  /* Class 1 bits of ULP */
+  /* First int16_t */
+  (*bitstreamPtr)  = ((uint16_t)enc_bits->lsf[0])<<10;   /* Bit 0..5  */
+  (*bitstreamPtr) |= (enc_bits->lsf[1])<<3;     /* Bit 6..12 */
+  (*bitstreamPtr) |= (enc_bits->lsf[2]&0x70)>>4;    /* Bit 13..15 */
+  bitstreamPtr++;
+  /* Second int16_t */
+  (*bitstreamPtr)  = ((uint16_t)enc_bits->lsf[2]&0xF)<<12;  /* Bit 0..3  */
+
+  if (mode==20) {
+    (*bitstreamPtr) |= (enc_bits->startIdx)<<10;    /* Bit 4..5  */
+    (*bitstreamPtr) |= (enc_bits->state_first)<<9;    /* Bit 6  */
+    (*bitstreamPtr) |= (enc_bits->idxForMax)<<3;    /* Bit 7..12 */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[0])&0x70)>>4;  /* Bit 13..15 */
+    bitstreamPtr++;
+    /* Third int16_t */
+    (*bitstreamPtr) = ((enc_bits->cb_index[0])&0xE)<<12;  /* Bit 0..2  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[0])&0x18)<<8;  /* Bit 3..4  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[1])&0x8)<<7;  /* Bit 5  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[3])&0xFE)<<2;  /* Bit 6..12 */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[3])&0x10)>>2;  /* Bit 13  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[4])&0x8)>>2;  /* Bit 14  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[6])&0x10)>>4;  /* Bit 15  */
+  } else { /* mode==30 */
+    (*bitstreamPtr) |= (enc_bits->lsf[3])<<6;     /* Bit 4..9  */
+    (*bitstreamPtr) |= (enc_bits->lsf[4]&0x7E)>>1;    /* Bit 10..15 */
+    bitstreamPtr++;
+    /* Third int16_t */
+    (*bitstreamPtr)  = ((uint16_t)enc_bits->lsf[4]&0x1)<<15;  /* Bit 0  */
+    (*bitstreamPtr) |= (enc_bits->lsf[5])<<8;     /* Bit 1..7  */
+    (*bitstreamPtr) |= (enc_bits->startIdx)<<5;     /* Bit 8..10 */
+    (*bitstreamPtr) |= (enc_bits->state_first)<<4;    /* Bit 11  */
+    (*bitstreamPtr) |= ((enc_bits->idxForMax)&0x3C)>>2;   /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 4:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)enc_bits->idxForMax&0x3)<<14; /* Bit 0..1  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[0]&0x78)<<7;   /* Bit 2..5  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[0]&0x10)<<5;  /* Bit 6  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[1]&0x8)<<5;  /* Bit 7  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[3]&0xFC);   /* Bit 8..13 */
+    (*bitstreamPtr) |= (enc_bits->gain_index[3]&0x10)>>3;  /* Bit 14  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[4]&0x8)>>3;  /* Bit 15  */
+  }
+  /* Class 2 bits of ULP */
+  /* 4:th to 6:th int16_t for 20 ms case
+     5:th to 7:th int16_t for 30 ms case */
+  bitstreamPtr++;
+  tmpPtr=enc_bits->idxVec;
+  for (k=0; k<3; k++) {
+    (*bitstreamPtr) = 0;
+    for (i=15; i>=0; i--) {
+      (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x4)>>2)<<i;
+      /* Bit 15-i  */
+      tmpPtr++;
+    }
+    bitstreamPtr++;
+  }
+
+  if (mode==20) {
+    /* 7:th int16_t */
+    (*bitstreamPtr) = 0;
+    for (i=15; i>6; i--) {
+      (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x4)>>2)<<i;
+      /* Bit 15-i  */
+      tmpPtr++;
+    }
+    (*bitstreamPtr) |= (enc_bits->gain_index[1]&0x4)<<4;  /* Bit 9  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[3]&0xC)<<2;  /* Bit 10..11 */
+    (*bitstreamPtr) |= (enc_bits->gain_index[4]&0x4)<<1;  /* Bit 12  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[6]&0x8)>>1;  /* Bit 13  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[7]&0xC)>>2;  /* Bit 14..15 */
+
+  } else { /* mode==30 */
+    /* 8:th int16_t */
+    (*bitstreamPtr) = 0;
+    for (i=15; i>5; i--) {
+      (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x4)>>2)<<i;
+      /* Bit 15-i  */
+      tmpPtr++;
+    }
+    (*bitstreamPtr) |= (enc_bits->cb_index[0]&0x6)<<3;   /* Bit 10..11 */
+    (*bitstreamPtr) |= (enc_bits->gain_index[0]&0x8);   /* Bit 12  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[1]&0x4);   /* Bit 13  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[3]&0x2);    /* Bit 14  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[6]&0x80)>>7;   /* Bit 15  */
+    bitstreamPtr++;
+    /* 9:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)enc_bits->cb_index[6]&0x7E)<<9;/* Bit 0..5  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[9]&0xFE)<<2;   /* Bit 6..12 */
+    (*bitstreamPtr) |= (enc_bits->cb_index[12]&0xE0)>>5;  /* Bit 13..15 */
+    bitstreamPtr++;
+    /* 10:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)enc_bits->cb_index[12]&0x1E)<<11;/* Bit 0..3 */
+    (*bitstreamPtr) |= (enc_bits->gain_index[3]&0xC)<<8;  /* Bit 4..5  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[4]&0x6)<<7;  /* Bit 6..7  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[6]&0x18)<<3;  /* Bit 8..9  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[7]&0xC)<<2;  /* Bit 10..11 */
+    (*bitstreamPtr) |= (enc_bits->gain_index[9]&0x10)>>1;  /* Bit 12  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[10]&0x8)>>1;  /* Bit 13  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[12]&0x10)>>3;  /* Bit 14  */
+    (*bitstreamPtr) |= (enc_bits->gain_index[13]&0x8)>>3;  /* Bit 15  */
+  }
+  bitstreamPtr++;
+  /* Class 3 bits of ULP */
+  /*  8:th to 14:th int16_t for 20 ms case
+      11:th to 17:th int16_t for 30 ms case */
+  tmpPtr=enc_bits->idxVec;
+  for (k=0; k<7; k++) {
+    (*bitstreamPtr) = 0;
+    for (i=14; i>=0; i-=2) {
+      (*bitstreamPtr) |= ((uint16_t)((*tmpPtr)&0x3))<<i; /* Bit 15-i..14-i*/
+      tmpPtr++;
+    }
+    bitstreamPtr++;
+  }
+
+  if (mode==20) {
+    /* 15:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)((enc_bits->idxVec[56])&0x3))<<14;/* Bit 0..1 */
+    (*bitstreamPtr) |= (((enc_bits->cb_index[0])&1))<<13;  /* Bit 2  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[1]))<<6;   /* Bit 3..9  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[2])&0x7E)>>1;  /* Bit 10..15 */
+    bitstreamPtr++;
+    /* 16:th int16_t */
+    (*bitstreamPtr) = ((uint16_t)((enc_bits->cb_index[2])&0x1))<<15;
+    /* Bit 0  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[0])&0x7)<<12;  /* Bit 1..3  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[1])&0x3)<<10;  /* Bit 4..5  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[2]))<<7;   /* Bit 6..8  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[3])&0x1)<<6;  /* Bit 9  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[4])&0x7E)>>1;  /* Bit 10..15 */
+    bitstreamPtr++;
+    /* 17:th int16_t */
+    (*bitstreamPtr) = ((uint16_t)((enc_bits->cb_index[4])&0x1))<<15;
+    /* Bit 0  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[5])<<8;    /* Bit 1..7  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[6]);     /* Bit 8..15 */
+    bitstreamPtr++;
+    /* 18:th int16_t */
+    (*bitstreamPtr) = ((uint16_t)(enc_bits->cb_index[7]))<<8; /* Bit 0..7  */
+    (*bitstreamPtr) |= (enc_bits->cb_index[8]);     /* Bit 8..15 */
+    bitstreamPtr++;
+    /* 19:th int16_t */
+    (*bitstreamPtr) = ((uint16_t)((enc_bits->gain_index[3])&0x3))<<14;
+    /* Bit 0..1  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[4])&0x3)<<12;  /* Bit 2..3  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[5]))<<9;   /* Bit 4..6  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[6])&0x7)<<6;  /* Bit 7..9  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[7])&0x3)<<4;  /* Bit 10..11 */
+    (*bitstreamPtr) |= (enc_bits->gain_index[8])<<1;   /* Bit 12..14 */
+  } else { /* mode==30 */
+    /* 18:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)((enc_bits->idxVec[56])&0x3))<<14;/* Bit 0..1 */
+    (*bitstreamPtr) |= (((enc_bits->idxVec[57])&0x3))<<12;  /* Bit 2..3  */
+    (*bitstreamPtr) |= (((enc_bits->cb_index[0])&1))<<11;  /* Bit 4  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[1]))<<4;   /* Bit 5..11 */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[2])&0x78)>>3;  /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 19:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->cb_index[2])&0x7)<<13;
+    /* Bit 0..2  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[0])&0x7)<<10;  /* Bit 3..5  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[1])&0x3)<<8;  /* Bit 6..7  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[2])&0x7)<<5;  /* Bit 8..10 */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[3])&0x1)<<4;  /* Bit 11  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[4])&0x78)>>3;  /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 20:th int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->cb_index[4])&0x7)<<13;
+    /* Bit 0..2  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[5]))<<6;   /* Bit 3..9  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[6])&0x1)<<5;  /* Bit 10  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[7])&0xF8)>>3;  /* Bit 11..15 */
+    bitstreamPtr++;
+    /* 21:st int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->cb_index[7])&0x7)<<13;
+    /* Bit 0..2  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[8]))<<5;   /* Bit 3..10 */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[9])&0x1)<<4;  /* Bit 11  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[10])&0xF0)>>4;  /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 22:nd int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->cb_index[10])&0xF)<<12;
+    /* Bit 0..3  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[11]))<<4;   /* Bit 4..11 */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[12])&0x1)<<3;  /* Bit 12  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[13])&0xE0)>>5;  /* Bit 13..15 */
+    bitstreamPtr++;
+    /* 23:rd int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->cb_index[13])&0x1F)<<11;
+    /* Bit 0..4  */
+    (*bitstreamPtr) |= ((enc_bits->cb_index[14]))<<3;   /* Bit 5..12 */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[3])&0x3)<<1;  /* Bit 13..14 */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[4])&0x1);   /* Bit 15  */
+    bitstreamPtr++;
+    /* 24:rd int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->gain_index[5]))<<13;
+    /* Bit 0..2  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[6])&0x7)<<10;  /* Bit 3..5  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[7])&0x3)<<8;  /* Bit 6..7  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[8]))<<5;   /* Bit 8..10 */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[9])&0xF)<<1;  /* Bit 11..14 */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[10])&0x4)>>2;  /* Bit 15  */
+    bitstreamPtr++;
+    /* 25:rd int16_t */
+    (*bitstreamPtr)  = ((uint16_t)(enc_bits->gain_index[10])&0x3)<<14;
+    /* Bit 0..1  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[11]))<<11;  /* Bit 2..4  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[12])&0xF)<<7;  /* Bit 5..8  */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[13])&0x7)<<4;  /* Bit 9..11 */
+    (*bitstreamPtr) |= ((enc_bits->gain_index[14]))<<1;   /* Bit 12..14 */
+  }
+  /* Last bit is automatically zero */
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/pack_bits.h b/modules/audio_coding/codecs/ilbc/pack_bits.h
new file mode 100644
index 0000000..8ae3013
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/pack_bits.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_PackBits.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_PACK_BITS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_PACK_BITS_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_PackBits( 
+    uint16_t *bitstream,   /* (o) The packetized bitstream */
+    iLBC_bits *enc_bits,  /* (i) Encoded bits */
+    int16_t mode     /* (i) Codec mode (20 or 30) */
+                             );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/poly_to_lsf.c b/modules/audio_coding/codecs/ilbc/poly_to_lsf.c
new file mode 100644
index 0000000..92aa165
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/poly_to_lsf.c
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsf.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/poly_to_lsp.h"
+#include "modules/audio_coding/codecs/ilbc/lsp_to_lsf.h"
+
+void WebRtcIlbcfix_Poly2Lsf(
+    int16_t *lsf,   /* (o) lsf coefficients (Q13) */
+    int16_t *a    /* (i) A coefficients (Q12) */
+                            ) {
+  int16_t lsp[10];
+  WebRtcIlbcfix_Poly2Lsp(a, lsp, (int16_t*)WebRtcIlbcfix_kLspMean);
+  WebRtcIlbcfix_Lsp2Lsf(lsp, lsf, 10);
+}
diff --git a/modules/audio_coding/codecs/ilbc/poly_to_lsf.h b/modules/audio_coding/codecs/ilbc/poly_to_lsf.h
new file mode 100644
index 0000000..f930c45
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/poly_to_lsf.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsf.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSF_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  conversion from lpc coefficients to lsf coefficients
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Poly2Lsf(
+    int16_t *lsf,   /* (o) lsf coefficients (Q13) */
+    int16_t *a    /* (i) A coefficients (Q12) */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/poly_to_lsp.c b/modules/audio_coding/codecs/ilbc/poly_to_lsp.c
new file mode 100644
index 0000000..88df639
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/poly_to_lsp.c
@@ -0,0 +1,158 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsp.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/chebyshev.h"
+
+/*----------------------------------------------------------------*
+ * conversion from lpc coefficients to lsp coefficients
+ * function is only for 10:th order LPC
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Poly2Lsp(
+    int16_t *a,  /* (o) A coefficients in Q12 */
+    int16_t *lsp, /* (i) LSP coefficients in Q15 */
+    int16_t *old_lsp /* (i) old LSP coefficients that are used if the new
+                              coefficients turn out to be unstable */
+                            ) {
+  int16_t f[2][6]; /* f[0][] represents f1 and f[1][] represents f2 */
+  int16_t *a_i_ptr, *a_10mi_ptr;
+  int16_t *f1ptr, *f2ptr;
+  int32_t tmpW32;
+  int16_t x, y, xlow, ylow, xmid, ymid, xhigh, yhigh, xint;
+  int16_t shifts, sign;
+  int i, j;
+  int foundFreqs;
+  int fi_select;
+
+  /*
+     Calculate the two polynomials f1(z) and f2(z)
+     (the sum and the diff polynomial)
+     f1[0] = f2[0] = 1.0;
+     f1[i+1] = a[i+1] + a[10-i] - f1[i];
+     f2[i+1] = a[i+1] - a[10-i] - f1[i];
+  */
+
+  a_i_ptr = a + 1;
+  a_10mi_ptr = a + 10;
+  f1ptr = f[0];
+  f2ptr = f[1];
+  (*f1ptr) = 1024; /* 1.0 in Q10 */
+  (*f2ptr) = 1024; /* 1.0 in Q10 */
+  for (i = 0; i < 5; i++) {
+    *(f1ptr + 1) =
+        (int16_t)((((int32_t)(*a_i_ptr) + *a_10mi_ptr) >> 2) - *f1ptr);
+    *(f2ptr + 1) =
+        (int16_t)((((int32_t)(*a_i_ptr) - *a_10mi_ptr) >> 2) + *f2ptr);
+    a_i_ptr++;
+    a_10mi_ptr--;
+    f1ptr++;
+    f2ptr++;
+  }
+
+  /*
+    find the LSPs using the Chebychev pol. evaluation
+  */
+
+  fi_select = 0; /* selector between f1 and f2, start with f1 */
+
+  foundFreqs = 0;
+
+  xlow = WebRtcIlbcfix_kCosGrid[0];
+  ylow = WebRtcIlbcfix_Chebyshev(xlow, f[fi_select]);
+
+  /*
+     Iterate until all the 10 LSP's have been found or
+     all the grid points have been tried. If the 10 LSP's can
+     not be found, set the LSP vector to previous LSP
+  */
+
+  for (j = 1; j < COS_GRID_POINTS && foundFreqs < 10; j++) {
+    xhigh = xlow;
+    yhigh = ylow;
+    xlow = WebRtcIlbcfix_kCosGrid[j];
+    ylow = WebRtcIlbcfix_Chebyshev(xlow, f[fi_select]);
+
+    if (ylow * yhigh <= 0) {
+      /* Run 4 times to reduce the interval */
+      for (i = 0; i < 4; i++) {
+        /* xmid =(xlow + xhigh)/2 */
+        xmid = (xlow >> 1) + (xhigh >> 1);
+        ymid = WebRtcIlbcfix_Chebyshev(xmid, f[fi_select]);
+
+        if (ylow * ymid <= 0) {
+          yhigh = ymid;
+          xhigh = xmid;
+        } else {
+          ylow = ymid;
+          xlow = xmid;
+        }
+      }
+
+      /*
+        Calculater xint by linear interpolation:
+        xint = xlow - ylow*(xhigh-xlow)/(yhigh-ylow);
+      */
+
+      x = xhigh - xlow;
+      y = yhigh - ylow;
+
+      if (y == 0) {
+        xint = xlow;
+      } else {
+        sign = y;
+        y = WEBRTC_SPL_ABS_W16(y);
+        shifts = (int16_t)WebRtcSpl_NormW32(y)-16;
+        y <<= shifts;
+        y = (int16_t)WebRtcSpl_DivW32W16(536838144, y); /* 1/(yhigh-ylow) */
+
+        tmpW32 = (x * y) >> (19 - shifts);
+
+        /* y=(xhigh-xlow)/(yhigh-ylow) */
+        y = (int16_t)(tmpW32&0xFFFF);
+
+        if (sign < 0) {
+          y = -y;
+        }
+        /* tmpW32 = ylow*(xhigh-xlow)/(yhigh-ylow) */
+        tmpW32 = (ylow * y) >> 10;
+        xint = xlow-(int16_t)(tmpW32&0xFFFF);
+      }
+
+      /* Store the calculated lsp */
+      lsp[foundFreqs] = (int16_t)xint;
+      foundFreqs++;
+
+      /* if needed, set xlow and ylow for next recursion */
+      if (foundFreqs<10) {
+        xlow = xint;
+        /* Swap between f1 and f2 (f[0][] and f[1][]) */
+        fi_select = ((fi_select+1)&0x1);
+
+        ylow = WebRtcIlbcfix_Chebyshev(xlow, f[fi_select]);
+      }
+    }
+  }
+
+  /* Check if M roots found, if not then use the old LSP */
+  if (foundFreqs < 10) {
+    WEBRTC_SPL_MEMCPY_W16(lsp, old_lsp, 10);
+  }
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/poly_to_lsp.h b/modules/audio_coding/codecs/ilbc/poly_to_lsp.h
new file mode 100644
index 0000000..e53aa20
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/poly_to_lsp.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Poly2Lsp.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSP_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_POLY_TO_LSP_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * conversion from lpc coefficients to lsp coefficients
+ * function is only for 10:th order LPC
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Poly2Lsp(
+    int16_t *a,  /* (o) A coefficients in Q12 */
+    int16_t *lsp, /* (i) LSP coefficients in Q15 */
+    int16_t *old_lsp /* (i) old LSP coefficients that are used if the new
+                              coefficients turn out to be unstable */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/refiner.c b/modules/audio_coding/codecs/ilbc/refiner.c
new file mode 100644
index 0000000..d8a9bfb
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/refiner.c
@@ -0,0 +1,140 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Refiner.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/enh_upsample.h"
+#include "modules/audio_coding/codecs/ilbc/my_corr.h"
+
+/*----------------------------------------------------------------*
+ * find segment starting near idata+estSegPos that has highest
+ * correlation with idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1 segment is found at a
+ * resolution of ENH_UPSO times the original of the original
+ * sampling rate
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Refiner(
+    size_t *updStartPos, /* (o) updated start point (Q-2) */
+    int16_t *idata,   /* (i) original data buffer */
+    size_t idatal,   /* (i) dimension of idata */
+    size_t centerStartPos, /* (i) beginning center segment */
+    size_t estSegPos,  /* (i) estimated beginning other segment (Q-2) */
+    int16_t *surround,  /* (i/o) The contribution from this sequence
+                                           summed with earlier contributions */
+    int16_t gain    /* (i) Gain to use for this sequence */
+                           ){
+  size_t estSegPosRounded, searchSegStartPos, searchSegEndPos, corrdim;
+  size_t tloc, tloc2, i;
+
+  int32_t maxtemp, scalefact;
+  int16_t *filtStatePtr, *polyPtr;
+  /* Stack based */
+  int16_t filt[7];
+  int32_t corrVecUps[ENH_CORRDIM*ENH_UPS0];
+  int32_t corrVecTemp[ENH_CORRDIM];
+  int16_t vect[ENH_VECTL];
+  int16_t corrVec[ENH_CORRDIM];
+
+  /* defining array bounds */
+
+  estSegPosRounded = (estSegPos - 2) >> 2;
+
+  searchSegStartPos =
+      (estSegPosRounded < ENH_SLOP) ? 0 : (estSegPosRounded - ENH_SLOP);
+
+  searchSegEndPos = estSegPosRounded + ENH_SLOP;
+  if ((searchSegEndPos + ENH_BLOCKL) >= idatal) {
+    searchSegEndPos = idatal - ENH_BLOCKL - 1;
+  }
+
+  corrdim = searchSegEndPos + 1 - searchSegStartPos;
+
+  /* compute upsampled correlation and find
+     location of max */
+
+  WebRtcIlbcfix_MyCorr(corrVecTemp, idata + searchSegStartPos,
+                       corrdim + ENH_BLOCKL - 1, idata + centerStartPos,
+                       ENH_BLOCKL);
+
+  /* Calculate the rescaling factor for the correlation in order to
+     put the correlation in a int16_t vector instead */
+  maxtemp = WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
+
+  scalefact = WebRtcSpl_GetSizeInBits(maxtemp) - 15;
+
+  if (scalefact > 0) {
+    for (i = 0; i < corrdim; i++) {
+      corrVec[i] = (int16_t)(corrVecTemp[i] >> scalefact);
+    }
+  } else {
+    for (i = 0; i < corrdim; i++) {
+      corrVec[i] = (int16_t)corrVecTemp[i];
+    }
+  }
+  /* In order to guarantee that all values are initialized */
+  for (i = corrdim; i < ENH_CORRDIM; i++) {
+    corrVec[i] = 0;
+  }
+
+  /* Upsample the correlation */
+  WebRtcIlbcfix_EnhUpsample(corrVecUps, corrVec);
+
+  /* Find maximum */
+  tloc = WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
+
+  /* make vector can be upsampled without ever running outside
+     bounds */
+  *updStartPos = searchSegStartPos * 4 + tloc + 4;
+
+  tloc2 = (tloc + 3) >> 2;
+
+  /* initialize the vector to be filtered, stuff with zeros
+     when data is outside idata buffer */
+  if (ENH_FL0 > (searchSegStartPos + tloc2)) {
+    const size_t st = ENH_FL0 - searchSegStartPos - tloc2;
+    WebRtcSpl_MemSetW16(vect, 0, st);
+    WEBRTC_SPL_MEMCPY_W16(&vect[st], idata, ENH_VECTL - st);
+  } else {
+    const size_t st = searchSegStartPos + tloc2 - ENH_FL0;
+    if ((st + ENH_VECTL) > idatal) {
+      const size_t en = st + ENH_VECTL - idatal;
+      WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL - en);
+      WebRtcSpl_MemSetW16(&vect[ENH_VECTL - en], 0, en);
+    } else {
+      WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL);
+    }
+  }
+
+  /* compute the segment (this is actually a convolution) */
+  filtStatePtr = filt + 6;
+  polyPtr = (int16_t*)WebRtcIlbcfix_kEnhPolyPhaser[tloc2 * ENH_UPS0 - tloc];
+  for (i = 0; i < 7; i++) {
+    *filtStatePtr-- = *polyPtr++;
+  }
+
+  WebRtcSpl_FilterMAFastQ12(&vect[6], vect, filt, ENH_FLO_MULT2_PLUS1,
+                            ENH_BLOCKL);
+
+  /* Add the contribution from this vector (scaled with gain) to the total
+     surround vector */
+  WebRtcSpl_AddAffineVectorToVector(surround, vect, gain, 32768, 16,
+                                    ENH_BLOCKL);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/refiner.h b/modules/audio_coding/codecs/ilbc/refiner.h
new file mode 100644
index 0000000..707be7f
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/refiner.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Refiner.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_REFINER_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_REFINER_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * find segment starting near idata+estSegPos that has highest
+ * correlation with idata+centerStartPos through
+ * idata+centerStartPos+ENH_BLOCKL-1 segment is found at a
+ * resolution of ENH_UPSO times the original of the original
+ * sampling rate
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Refiner(
+    size_t *updStartPos, /* (o) updated start point (Q-2) */
+    int16_t *idata,   /* (i) original data buffer */
+    size_t idatal,   /* (i) dimension of idata */
+    size_t centerStartPos, /* (i) beginning center segment */
+    size_t estSegPos,  /* (i) estimated beginning other segment (Q-2) */
+    int16_t *surround,  /* (i/o) The contribution from this sequence
+                                 summed with earlier contributions */
+    int16_t gain    /* (i) Gain to use for this sequence */
+                           );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c b/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
new file mode 100644
index 0000000..df41b28
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
@@ -0,0 +1,131 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleInterpolateLsf.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/lsf_interpolate_to_poly_enc.h"
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  lsf interpolator (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleInterpolateLsf(
+    int16_t *syntdenum, /* (o) the synthesis filter denominator
+                                   resulting from the quantized
+                                   interpolated lsf Q12 */
+    int16_t *weightdenum, /* (o) the weighting filter denominator
+                                   resulting from the unquantized
+                                   interpolated lsf Q12 */
+    int16_t *lsf,  /* (i) the unquantized lsf coefficients Q13 */
+    int16_t *lsfdeq,  /* (i) the dequantized lsf coefficients Q13 */
+    int16_t *lsfold,  /* (i) the unquantized lsf coefficients of
+                                           the previous signal frame Q13 */
+    int16_t *lsfdeqold, /* (i) the dequantized lsf coefficients of the
+                                   previous signal frame Q13 */
+    int16_t length,  /* (i) should equate FILTERORDER */
+    IlbcEncoder *iLBCenc_inst
+    /* (i/o) the encoder state structure */
+                                        ) {
+  size_t i;
+  int pos, lp_length;
+
+  int16_t *lsf2, *lsfdeq2;
+  /* Stack based */
+  int16_t lp[LPC_FILTERORDER + 1];
+
+  lsf2 = lsf + length;
+  lsfdeq2 = lsfdeq + length;
+  lp_length = length + 1;
+
+  if (iLBCenc_inst->mode==30) {
+    /* subframe 1: Interpolation between old and first set of
+       lsf coefficients */
+
+    /* Calculate Analysis/Syntehsis filter from quantized LSF */
+    WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfdeqold, lsfdeq,
+                                         WebRtcIlbcfix_kLsfWeight30ms[0],
+                                         length);
+    WEBRTC_SPL_MEMCPY_W16(syntdenum, lp, lp_length);
+
+    /* Calculate Weighting filter from quantized LSF */
+    WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfold, lsf,
+                                         WebRtcIlbcfix_kLsfWeight30ms[0],
+                                         length);
+    WebRtcIlbcfix_BwExpand(weightdenum, lp,
+                           (int16_t*)WebRtcIlbcfix_kLpcChirpWeightDenum,
+                           (int16_t)lp_length);
+
+    /* subframe 2 to 6: Interpolation between first and second
+       set of lsf coefficients */
+
+    pos = lp_length;
+    for (i = 1; i < iLBCenc_inst->nsub; i++) {
+
+      /* Calculate Analysis/Syntehsis filter from quantized LSF */
+      WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfdeq, lsfdeq2,
+                                           WebRtcIlbcfix_kLsfWeight30ms[i],
+                                           length);
+      WEBRTC_SPL_MEMCPY_W16(syntdenum + pos, lp, lp_length);
+
+      /* Calculate Weighting filter from quantized LSF */
+      WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsf, lsf2,
+                                           WebRtcIlbcfix_kLsfWeight30ms[i],
+                                           length);
+      WebRtcIlbcfix_BwExpand(weightdenum + pos, lp,
+                             (int16_t*)WebRtcIlbcfix_kLpcChirpWeightDenum,
+                             (int16_t)lp_length);
+
+      pos += lp_length;
+    }
+
+    /* update memory */
+
+    WEBRTC_SPL_MEMCPY_W16(lsfold, lsf2, length);
+    WEBRTC_SPL_MEMCPY_W16(lsfdeqold, lsfdeq2, length);
+
+  } else { /* iLBCenc_inst->mode==20 */
+    pos = 0;
+    for (i = 0; i < iLBCenc_inst->nsub; i++) {
+
+      /* Calculate Analysis/Syntehsis filter from quantized LSF */
+      WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfdeqold, lsfdeq,
+                                           WebRtcIlbcfix_kLsfWeight20ms[i],
+                                           length);
+      WEBRTC_SPL_MEMCPY_W16(syntdenum + pos, lp, lp_length);
+
+      /* Calculate Weighting filter from quantized LSF */
+      WebRtcIlbcfix_LsfInterpolate2PloyEnc(lp, lsfold, lsf,
+                                           WebRtcIlbcfix_kLsfWeight20ms[i],
+                                           length);
+      WebRtcIlbcfix_BwExpand(weightdenum+pos, lp,
+                             (int16_t*)WebRtcIlbcfix_kLpcChirpWeightDenum,
+                             (int16_t)lp_length);
+
+      pos += lp_length;
+    }
+
+    /* update memory */
+
+    WEBRTC_SPL_MEMCPY_W16(lsfold, lsf, length);
+    WEBRTC_SPL_MEMCPY_W16(lsfdeqold, lsfdeq, length);
+
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h b/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h
new file mode 100644
index 0000000..61a5625
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleInterpolateLsf.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_INTERPOLATE_LSF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_INTERPOLATE_LSF_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lsf interpolator (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleInterpolateLsf(
+    int16_t *syntdenum, /* (o) the synthesis filter denominator
+                                   resulting from the quantized
+                                   interpolated lsf Q12 */
+    int16_t *weightdenum, /* (o) the weighting filter denominator
+                                   resulting from the unquantized
+                                   interpolated lsf Q12 */
+    int16_t *lsf,  /* (i) the unquantized lsf coefficients Q13 */
+    int16_t *lsfdeq,  /* (i) the dequantized lsf coefficients Q13 */
+    int16_t *lsfold,  /* (i) the unquantized lsf coefficients of
+                                           the previous signal frame Q13 */
+    int16_t *lsfdeqold, /* (i) the dequantized lsf coefficients of the
+                                   previous signal frame Q13 */
+    int16_t length,  /* (i) should equate FILTERORDER */
+    IlbcEncoder *iLBCenc_inst
+    /* (i/o) the encoder state structure */
+                                        );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c b/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
new file mode 100644
index 0000000..09e64ac
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLpcAnalysis.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/window32_w32.h"
+#include "modules/audio_coding/codecs/ilbc/bw_expand.h"
+#include "modules/audio_coding/codecs/ilbc/poly_to_lsf.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  lpc analysis (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLpcAnalysis(
+    int16_t *lsf,   /* (o) lsf coefficients */
+    int16_t *data,   /* (i) new block of speech */
+    IlbcEncoder *iLBCenc_inst
+    /* (i/o) the encoder state structure */
+                                     ) {
+  int k;
+  int scale;
+  size_t is;
+  int16_t stability;
+  /* Stack based */
+  int16_t A[LPC_FILTERORDER + 1];
+  int32_t R[LPC_FILTERORDER + 1];
+  int16_t windowedData[BLOCKL_MAX];
+  int16_t rc[LPC_FILTERORDER];
+
+  is=LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl;
+  WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lpc_buffer+is,data,iLBCenc_inst->blockl);
+
+  /* No lookahead, last window is asymmetric */
+
+  for (k = 0; k < iLBCenc_inst->lpc_n; k++) {
+
+    is = LPC_LOOKBACK;
+
+    if (k < (iLBCenc_inst->lpc_n - 1)) {
+
+      /* Hanning table WebRtcIlbcfix_kLpcWin[] is in Q15-domain so the output is right-shifted 15 */
+      WebRtcSpl_ElementwiseVectorMult(windowedData, iLBCenc_inst->lpc_buffer, WebRtcIlbcfix_kLpcWin, BLOCKL_MAX, 15);
+    } else {
+
+      /* Hanning table WebRtcIlbcfix_kLpcAsymWin[] is in Q15-domain so the output is right-shifted 15 */
+      WebRtcSpl_ElementwiseVectorMult(windowedData, iLBCenc_inst->lpc_buffer+is, WebRtcIlbcfix_kLpcAsymWin, BLOCKL_MAX, 15);
+    }
+
+    /* Compute autocorrelation */
+    WebRtcSpl_AutoCorrelation(windowedData, BLOCKL_MAX, LPC_FILTERORDER, R, &scale);
+
+    /* Window autocorrelation vector */
+    WebRtcIlbcfix_Window32W32(R, R, WebRtcIlbcfix_kLpcLagWin, LPC_FILTERORDER + 1 );
+
+    /* Calculate the A coefficients from the Autocorrelation using Levinson Durbin algorithm */
+    stability=WebRtcSpl_LevinsonDurbin(R, A, rc, LPC_FILTERORDER);
+
+    /*
+       Set the filter to {1.0, 0.0, 0.0,...} if filter from Levinson Durbin algorithm is unstable
+       This should basically never happen...
+    */
+    if (stability!=1) {
+      A[0]=4096;
+      WebRtcSpl_MemSetW16(&A[1], 0, LPC_FILTERORDER);
+    }
+
+    /* Bandwidth expand the filter coefficients */
+    WebRtcIlbcfix_BwExpand(A, A, (int16_t*)WebRtcIlbcfix_kLpcChirpSyntDenum, LPC_FILTERORDER+1);
+
+    /* Convert from A to LSF representation */
+    WebRtcIlbcfix_Poly2Lsf(lsf + k*LPC_FILTERORDER, A);
+  }
+
+  is=LPC_LOOKBACK+BLOCKL_MAX-iLBCenc_inst->blockl;
+  WEBRTC_SPL_MEMCPY_W16(iLBCenc_inst->lpc_buffer,
+                        iLBCenc_inst->lpc_buffer+LPC_LOOKBACK+BLOCKL_MAX-is, is);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h b/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h
new file mode 100644
index 0000000..5eaa3d7
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLpcAnalysis.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LPC_ANALYSIS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LPC_ANALYSIS_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lpc analysis (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLpcAnalysis(
+    int16_t *lsf,   /* (o) lsf coefficients */
+    int16_t *data,   /* (i) new block of speech */
+    IlbcEncoder *iLBCenc_inst
+    /* (i/o) the encoder state structure */
+                                     );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c b/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c
new file mode 100644
index 0000000..90673a2
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.c
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfDeQ.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  obtain dequantized lsf coefficients from quantization index
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfDeQ(
+    int16_t *lsfdeq,  /* (o) dequantized lsf coefficients */
+    int16_t *index,  /* (i) quantization index */
+    int16_t lpc_n  /* (i) number of LPCs */
+                                ){
+  int i, j, pos, cb_pos;
+
+  /* decode first LSF */
+
+  pos = 0;
+  cb_pos = 0;
+  for (i = 0; i < LSF_NSPLIT; i++) {
+    for (j = 0; j < WebRtcIlbcfix_kLsfDimCb[i]; j++) {
+      lsfdeq[pos + j] = WebRtcIlbcfix_kLsfCb[cb_pos + j + index[i] *
+                                             WebRtcIlbcfix_kLsfDimCb[i]];
+    }
+    pos += WebRtcIlbcfix_kLsfDimCb[i];
+    cb_pos += WebRtcIlbcfix_kLsfSizeCb[i] * WebRtcIlbcfix_kLsfDimCb[i];
+  }
+
+  if (lpc_n>1) {
+    /* decode last LSF */
+    pos = 0;
+    cb_pos = 0;
+    for (i = 0; i < LSF_NSPLIT; i++) {
+      for (j = 0; j < WebRtcIlbcfix_kLsfDimCb[i]; j++) {
+        lsfdeq[LPC_FILTERORDER + pos + j] = WebRtcIlbcfix_kLsfCb[
+            cb_pos + index[LSF_NSPLIT + i] * WebRtcIlbcfix_kLsfDimCb[i] + j];
+      }
+      pos += WebRtcIlbcfix_kLsfDimCb[i];
+      cb_pos += WebRtcIlbcfix_kLsfSizeCb[i] * WebRtcIlbcfix_kLsfDimCb[i];
+    }
+  }
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h b/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h
new file mode 100644
index 0000000..d78d714
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_lsf_dequant.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfDeQ.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_DEQUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_DEQUANT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  obtain dequantized lsf coefficients from quantization index
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfDeQ(
+    int16_t *lsfdeq,  /* (o) dequantized lsf coefficients */
+    int16_t *index,  /* (i) quantization index */
+    int16_t lpc_n  /* (i) number of LPCs */
+                                );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c b/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c
new file mode 100644
index 0000000..45373a9
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_lsf_quant.c
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfQ.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/split_vq.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  lsf quantizer (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfQ(
+    int16_t *lsfdeq, /* (o) dequantized lsf coefficients
+                                   (dimension FILTERORDER) Q13 */
+    int16_t *index, /* (o) quantization index */
+    int16_t *lsf, /* (i) the lsf coefficient vector to be
+                           quantized (dimension FILTERORDER) Q13 */
+    int16_t lpc_n /* (i) number of lsf sets to quantize */
+                              ){
+
+  /* Quantize first LSF with memoryless split VQ */
+  WebRtcIlbcfix_SplitVq( lsfdeq, index, lsf,
+                         (int16_t*)WebRtcIlbcfix_kLsfCb, (int16_t*)WebRtcIlbcfix_kLsfDimCb, (int16_t*)WebRtcIlbcfix_kLsfSizeCb);
+
+  if (lpc_n==2) {
+    /* Quantize second LSF with memoryless split VQ */
+    WebRtcIlbcfix_SplitVq( lsfdeq + LPC_FILTERORDER, index + LSF_NSPLIT,
+                           lsf + LPC_FILTERORDER, (int16_t*)WebRtcIlbcfix_kLsfCb,
+                           (int16_t*)WebRtcIlbcfix_kLsfDimCb, (int16_t*)WebRtcIlbcfix_kLsfSizeCb);
+  }
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h b/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h
new file mode 100644
index 0000000..5e4e6f1
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/simple_lsf_quant.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SimpleLsfQ.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_QUANT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SIMPLE_LSF_QUANT_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  lsf quantizer (subrutine to LPCencode)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SimpleLsfQ(
+    int16_t *lsfdeq, /* (o) dequantized lsf coefficients
+                                   (dimension FILTERORDER) Q13 */
+    int16_t *index, /* (o) quantization index */
+    int16_t *lsf, /* (i) the lsf coefficient vector to be
+                           quantized (dimension FILTERORDER) Q13 */
+    int16_t lpc_n /* (i) number of lsf sets to quantize */
+                              );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/smooth.c b/modules/audio_coding/codecs/ilbc/smooth.c
new file mode 100644
index 0000000..edafb0c
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/smooth.c
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/smooth_out_data.h"
+
+/*----------------------------------------------------------------*
+ * find the smoothed output data
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Smooth(
+    int16_t *odata,   /* (o) smoothed output */
+    int16_t *current,  /* (i) the un enhanced residual for
+                                this block */
+    int16_t *surround  /* (i) The approximation from the
+                                surrounding sequences */
+                          ) {
+  int16_t scale, scale1, scale2;
+  int16_t A, B, C, denomW16;
+  int32_t B_W32, denom, num;
+  int32_t errs;
+  int32_t w00,w10,w11, endiff, crit;
+  int32_t w00prim, w10prim, w11_div_w00;
+  int16_t w11prim;
+  int16_t bitsw00, bitsw10, bitsw11;
+  int32_t w11w00, w10w10, w00w00;
+  uint32_t max1, max2, max12;
+
+  /* compute some inner products (ensure no overflow by first calculating proper scale factor) */
+
+  w00 = w10 = w11 = 0;
+
+  // Calculate a right shift that will let us sum ENH_BLOCKL pairwise products
+  // of values from the two sequences without overflowing an int32_t. (The +1
+  // in max1 and max2 are because WebRtcSpl_MaxAbsValueW16 will return 2**15 -
+  // 1 if the input array contains -2**15.)
+  max1 = WebRtcSpl_MaxAbsValueW16(current, ENH_BLOCKL) + 1;
+  max2 = WebRtcSpl_MaxAbsValueW16(surround, ENH_BLOCKL) + 1;
+  max12 = WEBRTC_SPL_MAX(max1, max2);
+  scale = (64 - 31) -
+          WebRtcSpl_CountLeadingZeros64((max12 * max12) * (uint64_t)ENH_BLOCKL);
+  scale=WEBRTC_SPL_MAX(0, scale);
+
+  w00=WebRtcSpl_DotProductWithScale(current,current,ENH_BLOCKL,scale);
+  w11=WebRtcSpl_DotProductWithScale(surround,surround,ENH_BLOCKL,scale);
+  w10=WebRtcSpl_DotProductWithScale(surround,current,ENH_BLOCKL,scale);
+
+  if (w00<0) w00 = WEBRTC_SPL_WORD32_MAX;
+  if (w11<0) w11 = WEBRTC_SPL_WORD32_MAX;
+
+  /* Rescale w00 and w11 to w00prim and w11prim, so that w00prim/w11prim
+     is in Q16 */
+
+  bitsw00 = WebRtcSpl_GetSizeInBits(w00);
+  bitsw11 = WebRtcSpl_GetSizeInBits(w11);
+  bitsw10 = WebRtcSpl_GetSizeInBits(WEBRTC_SPL_ABS_W32(w10));
+  scale1 = 31 - bitsw00;
+  scale2 = 15 - bitsw11;
+
+  if (scale2>(scale1-16)) {
+    scale2 = scale1 - 16;
+  } else {
+    scale1 = scale2 + 16;
+  }
+
+  w00prim = w00 << scale1;
+  w11prim = (int16_t) WEBRTC_SPL_SHIFT_W32(w11, scale2);
+
+  /* Perform C = sqrt(w11/w00) (C is in Q11 since (16+6)/2=11) */
+  if (w11prim>64) {
+    endiff = WebRtcSpl_DivW32W16(w00prim, w11prim) << 6;
+    C = (int16_t)WebRtcSpl_SqrtFloor(endiff); /* C is in Q11 */
+  } else {
+    C = 1;
+  }
+
+  /* first try enhancement without power-constraint */
+
+  errs = WebRtcIlbcfix_Smooth_odata(odata, current, surround, C);
+
+
+
+  /* if constraint violated by first try, add constraint */
+
+  if ( (6-scale+scale1) > 31) {
+    crit=0;
+  } else {
+    /* crit = 0.05 * w00 (Result in Q-6) */
+    crit = WEBRTC_SPL_SHIFT_W32(
+        WEBRTC_SPL_MUL(ENH_A0, w00prim >> 14),
+        -(6-scale+scale1));
+  }
+
+  if (errs > crit) {
+
+    if( w00 < 1) {
+      w00=1;
+    }
+
+    /* Calculate w11*w00, w10*w10 and w00*w00 in the same Q domain */
+
+    scale1 = bitsw00-15;
+    scale2 = bitsw11-15;
+
+    if (scale2>scale1) {
+      scale = scale2;
+    } else {
+      scale = scale1;
+    }
+
+    w11w00 = (int16_t)WEBRTC_SPL_SHIFT_W32(w11, -scale) *
+        (int16_t)WEBRTC_SPL_SHIFT_W32(w00, -scale);
+
+    w10w10 = (int16_t)WEBRTC_SPL_SHIFT_W32(w10, -scale) *
+        (int16_t)WEBRTC_SPL_SHIFT_W32(w10, -scale);
+
+    w00w00 = (int16_t)WEBRTC_SPL_SHIFT_W32(w00, -scale) *
+        (int16_t)WEBRTC_SPL_SHIFT_W32(w00, -scale);
+
+    /* Calculate (w11*w00-w10*w10)/(w00*w00) in Q16 */
+    if (w00w00>65536) {
+      endiff = (w11w00-w10w10);
+      endiff = WEBRTC_SPL_MAX(0, endiff);
+      /* denom is in Q16 */
+      denom = WebRtcSpl_DivW32W16(endiff, (int16_t)(w00w00 >> 16));
+    } else {
+      denom = 65536;
+    }
+
+    if( denom > 7){ /* eliminates numerical problems
+                       for if smooth */
+
+      scale=WebRtcSpl_GetSizeInBits(denom)-15;
+
+      if (scale>0) {
+        /* denomW16 is in Q(16+scale) */
+        denomW16 = (int16_t)(denom >> scale);
+
+        /* num in Q(34-scale) */
+        num = ENH_A0_MINUS_A0A0DIV4 >> scale;
+      } else {
+        /* denomW16 is in Q16 */
+        denomW16=(int16_t)denom;
+
+        /* num in Q34 */
+        num=ENH_A0_MINUS_A0A0DIV4;
+      }
+
+      /* A sqrt( (ENH_A0-(ENH_A0^2)/4)*(w00*w00)/(w11*w00 + w10*w10) ) in Q9 */
+      A = (int16_t)WebRtcSpl_SqrtFloor(WebRtcSpl_DivW32W16(num, denomW16));
+
+      /* B_W32 is in Q30 ( B = 1 - ENH_A0/2 - A * w10/w00 ) */
+      scale1 = 31-bitsw10;
+      scale2 = 21-scale1;
+      w10prim = w10 == 0 ? 0 : w10 * (1 << scale1);
+      w00prim = WEBRTC_SPL_SHIFT_W32(w00, -scale2);
+      scale = bitsw00-scale2-15;
+
+      if (scale>0) {
+        w10prim >>= scale;
+        w00prim >>= scale;
+      }
+
+      if ((w00prim>0)&&(w10prim>0)) {
+        w11_div_w00=WebRtcSpl_DivW32W16(w10prim, (int16_t)w00prim);
+
+        if (WebRtcSpl_GetSizeInBits(w11_div_w00)+WebRtcSpl_GetSizeInBits(A)>31) {
+          B_W32 = 0;
+        } else {
+          B_W32 = (int32_t)1073741824 - (int32_t)ENH_A0DIV2 -
+              WEBRTC_SPL_MUL(A, w11_div_w00);
+        }
+        B = (int16_t)(B_W32 >> 16);  /* B in Q14. */
+      } else {
+        /* No smoothing */
+        A = 0;
+        B = 16384; /* 1 in Q14 */
+      }
+    }
+    else{ /* essentially no difference between cycles;
+             smoothing not needed */
+
+      A = 0;
+      B = 16384; /* 1 in Q14 */
+    }
+
+    /* create smoothed sequence */
+
+    WebRtcSpl_ScaleAndAddVectors(surround, A, 9,
+                                current, B, 14,
+                                odata, ENH_BLOCKL);
+  }
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/smooth.h b/modules/audio_coding/codecs/ilbc/smooth.h
new file mode 100644
index 0000000..a8d1706
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/smooth.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * find the smoothed output data
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Smooth(
+    int16_t *odata,   /* (o) smoothed output */
+    int16_t *current,  /* (i) the un enhanced residual for
+                                this block */
+    int16_t *surround  /* (i) The approximation from the
+                                surrounding sequences */
+                          );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/smooth_out_data.c b/modules/audio_coding/codecs/ilbc/smooth_out_data.c
new file mode 100644
index 0000000..1aa1e0a
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/smooth_out_data.c
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth_odata.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+int32_t WebRtcIlbcfix_Smooth_odata(
+    int16_t *odata,
+    int16_t *psseq,
+    int16_t *surround,
+    int16_t C)
+{
+  int i;
+
+  int16_t err;
+  int32_t errs;
+
+  for(i=0;i<80;i++) {
+    odata[i]= (int16_t)((C * surround[i] + 1024) >> 11);
+  }
+
+  errs=0;
+  for(i=0;i<80;i++) {
+    err = (psseq[i] - odata[i]) >> 3;
+    errs += err * err;  /* errs in Q-6 */
+  }
+
+  return errs;
+}
diff --git a/modules/audio_coding/codecs/ilbc/smooth_out_data.h b/modules/audio_coding/codecs/ilbc/smooth_out_data.h
new file mode 100644
index 0000000..6370d10
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/smooth_out_data.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Smooth_odata.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_OUT_DATA_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SMOOTH_OUT_DATA_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * help function to WebRtcIlbcfix_Smooth()
+ *---------------------------------------------------------------*/
+
+int32_t WebRtcIlbcfix_Smooth_odata(
+    int16_t *odata,
+    int16_t *psseq,
+    int16_t *surround,
+    int16_t C);
+
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/sort_sq.c b/modules/audio_coding/codecs/ilbc/sort_sq.c
new file mode 100644
index 0000000..dd3ca80
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/sort_sq.c
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SortSq.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  scalar quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SortSq(
+    int16_t *xq,   /* (o) the quantized value */
+    int16_t *index,  /* (o) the quantization index */
+    int16_t x,   /* (i) the value to quantize */
+    const int16_t *cb, /* (i) the quantization codebook */
+    int16_t cb_size  /* (i) the size of the quantization codebook */
+                          ){
+  int i;
+
+  if (x <= cb[0]) {
+    *index = 0;
+    *xq = cb[0];
+  } else {
+    i = 0;
+    while ((x > cb[i]) && (i < (cb_size-1))) {
+      i++;
+    }
+
+    if (x > (((int32_t)cb[i] + cb[i - 1] + 1) >> 1)) {
+      *index = i;
+      *xq = cb[i];
+    } else {
+      *index = i - 1;
+      *xq = cb[i - 1];
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/sort_sq.h b/modules/audio_coding/codecs/ilbc/sort_sq.h
new file mode 100644
index 0000000..f3c01ef
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/sort_sq.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SortSq.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SORT_SQ_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SORT_SQ_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  scalar quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SortSq(
+    int16_t *xq,   /* (o) the quantized value */
+    int16_t *index,  /* (o) the quantization index */
+    int16_t x,   /* (i) the value to quantize */
+    const int16_t *cb, /* (i) the quantization codebook */
+    int16_t cb_size  /* (i) the size of the quantization codebook */
+                           );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/split_vq.c b/modules/audio_coding/codecs/ilbc/split_vq.c
new file mode 100644
index 0000000..2f218ed
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/split_vq.c
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SplitVq.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/vq3.h"
+#include "modules/audio_coding/codecs/ilbc/vq4.h"
+
+/*----------------------------------------------------------------*
+ *  split vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SplitVq(
+    int16_t *qX,  /* (o) the quantized vector in Q13 */
+    int16_t *index, /* (o) a vector of indexes for all vector
+                                   codebooks in the split */
+    int16_t *X,  /* (i) the vector to quantize */
+    int16_t *CB,  /* (i) the quantizer codebook in Q13 */
+    int16_t *dim, /* (i) the dimension of X and qX */
+    int16_t *cbsize /* (i) the number of vectors in the codebook */
+                           ) {
+
+  int16_t *qXPtr, *indexPtr, *CBPtr, *XPtr;
+
+  /* Quantize X with the 3 vectror quantization tables */
+
+  qXPtr=qX;
+  indexPtr=index;
+  CBPtr=CB;
+  XPtr=X;
+  WebRtcIlbcfix_Vq3(qXPtr, indexPtr, CBPtr, XPtr, cbsize[0]);
+
+  qXPtr+=3;
+  indexPtr+=1;
+  CBPtr+=(dim[0]*cbsize[0]);
+  XPtr+=3;
+  WebRtcIlbcfix_Vq3(qXPtr, indexPtr, CBPtr, XPtr, cbsize[1]);
+
+  qXPtr+=3;
+  indexPtr+=1;
+  CBPtr+=(dim[1]*cbsize[1]);
+  XPtr+=3;
+  WebRtcIlbcfix_Vq4(qXPtr, indexPtr, CBPtr, XPtr, cbsize[2]);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/split_vq.h b/modules/audio_coding/codecs/ilbc/split_vq.h
new file mode 100644
index 0000000..a758159
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/split_vq.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SplitVq.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SPLIT_VQ_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SPLIT_VQ_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  split vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SplitVq(
+    int16_t *qX,  /* (o) the quantized vector in Q13 */
+    int16_t *index, /* (o) a vector of indexes for all vector
+                                   codebooks in the split */
+    int16_t *X,  /* (i) the vector to quantize */
+    int16_t *CB,  /* (i) the quantizer codebook in Q13 */
+    int16_t *dim, /* (i) the dimension of X and qX */
+    int16_t *cbsize /* (i) the number of vectors in the codebook */
+                           );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/state_construct.c b/modules/audio_coding/codecs/ilbc/state_construct.c
new file mode 100644
index 0000000..753415b
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/state_construct.c
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateConstruct.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  decoding of the start state
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateConstruct(
+    size_t idxForMax,   /* (i) 6-bit index for the quantization of
+                                           max amplitude */
+    int16_t *idxVec,   /* (i) vector of quantization indexes */
+    int16_t *syntDenum,  /* (i) synthesis filter denumerator */
+    int16_t *Out_fix,  /* (o) the decoded state vector */
+    size_t len    /* (i) length of a state vector */
+                                  ) {
+  size_t k;
+  int16_t maxVal;
+  int16_t *tmp1, *tmp2, *tmp3;
+  /* Stack based */
+  int16_t numerator[1+LPC_FILTERORDER];
+  int16_t sampleValVec[2*STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+  int16_t sampleMaVec[2*STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+  int16_t *sampleVal = &sampleValVec[LPC_FILTERORDER];
+  int16_t *sampleMa = &sampleMaVec[LPC_FILTERORDER];
+  int16_t *sampleAr = &sampleValVec[LPC_FILTERORDER];
+
+  /* initialization of coefficients */
+
+  for (k=0; k<LPC_FILTERORDER+1; k++){
+    numerator[k] = syntDenum[LPC_FILTERORDER-k];
+  }
+
+  /* decoding of the maximum value */
+
+  maxVal = WebRtcIlbcfix_kFrgQuantMod[idxForMax];
+
+  /* decoding of the sample values */
+  tmp1 = sampleVal;
+  tmp2 = &idxVec[len-1];
+
+  if (idxForMax<37) {
+    for(k=0; k<len; k++){
+      /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 2097152 (= 0.5 << 22)
+        maxVal is in Q8 and result is in Q(-1) */
+      *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 2097152) >>
+          22);
+      tmp1++;
+      tmp2--;
+    }
+  } else if (idxForMax<59) {
+    for(k=0; k<len; k++){
+      /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 262144 (= 0.5 << 19)
+        maxVal is in Q5 and result is in Q(-1) */
+      *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 262144) >>
+          19);
+      tmp1++;
+      tmp2--;
+    }
+  } else {
+    for(k=0; k<len; k++){
+      /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 65536 (= 0.5 << 17)
+        maxVal is in Q3 and result is in Q(-1) */
+      *tmp1 = (int16_t)((maxVal * WebRtcIlbcfix_kStateSq3[*tmp2] + 65536) >>
+          17);
+      tmp1++;
+      tmp2--;
+    }
+  }
+
+  /* Set the rest of the data to zero */
+  WebRtcSpl_MemSetW16(&sampleVal[len], 0, len);
+
+  /* circular convolution with all-pass filter */
+
+  /* Set the state to zero */
+  WebRtcSpl_MemSetW16(sampleValVec, 0, (LPC_FILTERORDER));
+
+  /* Run MA filter + AR filter */
+  WebRtcSpl_FilterMAFastQ12(
+      sampleVal, sampleMa,
+      numerator, LPC_FILTERORDER+1, len + LPC_FILTERORDER);
+  WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
+  WebRtcSpl_FilterARFastQ12(
+      sampleMa, sampleAr,
+      syntDenum, LPC_FILTERORDER+1, 2 * len);
+
+  tmp1 = &sampleAr[len-1];
+  tmp2 = &sampleAr[2*len-1];
+  tmp3 = Out_fix;
+  for(k=0;k<len;k++){
+    (*tmp3) = (*tmp1) + (*tmp2);
+    tmp1--;
+    tmp2--;
+    tmp3++;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/state_construct.h b/modules/audio_coding/codecs/ilbc/state_construct.h
new file mode 100644
index 0000000..9339f65
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/state_construct.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateConstruct.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_CONSTRUCT_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_CONSTRUCT_H_
+
+/*----------------------------------------------------------------*
+ *  Generate the start state from the quantized indexes
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateConstruct(
+    size_t idxForMax,   /* (i) 6-bit index for the quantization of
+                                           max amplitude */
+    int16_t *idxVec,   /* (i) vector of quantization indexes */
+    int16_t *syntDenum,  /* (i) synthesis filter denumerator */
+    int16_t *Out_fix,  /* (o) the decoded state vector */
+    size_t len    /* (i) length of a state vector */
+                                  );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/state_search.c b/modules/audio_coding/codecs/ilbc/state_search.c
new file mode 100644
index 0000000..5e8a2f5
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/state_search.c
@@ -0,0 +1,119 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateSearch.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+#include "modules/audio_coding/codecs/ilbc/abs_quant.h"
+
+/*----------------------------------------------------------------*
+ *  encoding of start state
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateSearch(
+    IlbcEncoder *iLBCenc_inst,
+    /* (i) Encoder instance */
+    iLBC_bits *iLBC_encbits,/* (i/o) Encoded bits (output idxForMax
+                               and idxVec, input state_first) */
+    int16_t *residual,   /* (i) target residual vector */
+    int16_t *syntDenum,  /* (i) lpc synthesis filter */
+    int16_t *weightDenum  /* (i) weighting filter denuminator */
+                               ) {
+  size_t k, index;
+  int16_t maxVal;
+  int16_t scale, shift;
+  int32_t maxValsq;
+  int16_t scaleRes;
+  int16_t max;
+  int i;
+  /* Stack based */
+  int16_t numerator[1+LPC_FILTERORDER];
+  int16_t residualLongVec[2*STATE_SHORT_LEN_30MS+LPC_FILTERORDER];
+  int16_t sampleMa[2*STATE_SHORT_LEN_30MS];
+  int16_t *residualLong = &residualLongVec[LPC_FILTERORDER];
+  int16_t *sampleAr = residualLong;
+
+  /* Scale to maximum 12 bits to avoid saturation in circular convolution filter */
+  max = WebRtcSpl_MaxAbsValueW16(residual, iLBCenc_inst->state_short_len);
+  scaleRes = WebRtcSpl_GetSizeInBits(max)-12;
+  scaleRes = WEBRTC_SPL_MAX(0, scaleRes);
+  /* Set up the filter coefficients for the circular convolution */
+  for (i=0; i<LPC_FILTERORDER+1; i++) {
+    numerator[i] = (syntDenum[LPC_FILTERORDER-i]>>scaleRes);
+  }
+
+  /* Copy the residual to a temporary buffer that we can filter
+   * and set the remaining samples to zero.
+   */
+  WEBRTC_SPL_MEMCPY_W16(residualLong, residual, iLBCenc_inst->state_short_len);
+  WebRtcSpl_MemSetW16(residualLong + iLBCenc_inst->state_short_len, 0, iLBCenc_inst->state_short_len);
+
+  /* Run the Zero-Pole filter (Ciurcular convolution) */
+  WebRtcSpl_MemSetW16(residualLongVec, 0, LPC_FILTERORDER);
+  WebRtcSpl_FilterMAFastQ12(residualLong, sampleMa, numerator,
+                            LPC_FILTERORDER + 1,
+                            iLBCenc_inst->state_short_len + LPC_FILTERORDER);
+  WebRtcSpl_MemSetW16(&sampleMa[iLBCenc_inst->state_short_len + LPC_FILTERORDER], 0, iLBCenc_inst->state_short_len - LPC_FILTERORDER);
+
+  WebRtcSpl_FilterARFastQ12(
+      sampleMa, sampleAr,
+      syntDenum, LPC_FILTERORDER+1, 2 * iLBCenc_inst->state_short_len);
+
+  for(k=0;k<iLBCenc_inst->state_short_len;k++){
+    sampleAr[k] += sampleAr[k+iLBCenc_inst->state_short_len];
+  }
+
+  /* Find maximum absolute value in the vector */
+  maxVal=WebRtcSpl_MaxAbsValueW16(sampleAr, iLBCenc_inst->state_short_len);
+
+  /* Find the best index */
+
+  if ((((int32_t)maxVal)<<scaleRes)<23170) {
+    maxValsq=((int32_t)maxVal*maxVal)<<(2+2*scaleRes);
+  } else {
+    maxValsq=(int32_t)WEBRTC_SPL_WORD32_MAX;
+  }
+
+  index=0;
+  for (i=0;i<63;i++) {
+
+    if (maxValsq>=WebRtcIlbcfix_kChooseFrgQuant[i]) {
+      index=i+1;
+    } else {
+      i=63;
+    }
+  }
+  iLBC_encbits->idxForMax=index;
+
+  /* Rescale the vector before quantization */
+  scale=WebRtcIlbcfix_kScale[index];
+
+  if (index<27) { /* scale table is in Q16, fout[] is in Q(-1) and we want the result to be in Q11 */
+    shift=4;
+  } else { /* scale table is in Q21, fout[] is in Q(-1) and we want the result to be in Q11 */
+    shift=9;
+  }
+
+  /* Set up vectors for AbsQuant and rescale it with the scale factor */
+  WebRtcSpl_ScaleVectorWithSat(sampleAr, sampleAr, scale,
+                              iLBCenc_inst->state_short_len, (int16_t)(shift-scaleRes));
+
+  /* Quantize the values in fout[] */
+  WebRtcIlbcfix_AbsQuant(iLBCenc_inst, iLBC_encbits, sampleAr, weightDenum);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/state_search.h b/modules/audio_coding/codecs/ilbc/state_search.h
new file mode 100644
index 0000000..976edca
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/state_search.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_StateSearch.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_SEARCH_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_STATE_SEARCH_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  encoding of start state
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_StateSearch(
+    IlbcEncoder *iLBCenc_inst,
+    /* (i) Encoder instance */
+    iLBC_bits *iLBC_encbits,/* (i/o) Encoded bits (output idxForMax
+                               and idxVec, input state_first) */
+    int16_t *residual,   /* (i) target residual vector */
+    int16_t *syntDenum,  /* (i) lpc synthesis filter */
+    int16_t *weightDenum  /* (i) weighting filter denuminator */
+                               );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/swap_bytes.c b/modules/audio_coding/codecs/ilbc/swap_bytes.c
new file mode 100644
index 0000000..806cc2a
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/swap_bytes.c
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SwapBytes.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Swap bytes (to simplify operations on Little Endian machines)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SwapBytes(
+    const uint16_t* input,   /* (i) the sequence to swap */
+    size_t wordLength,      /* (i) number or uint16_t to swap */
+    uint16_t* output         /* (o) the swapped sequence */
+                              ) {
+  size_t k;
+  for (k = wordLength; k > 0; k--) {
+    *output++ = (*input >> 8)|(*input << 8);
+    input++;
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/swap_bytes.h b/modules/audio_coding/codecs/ilbc/swap_bytes.h
new file mode 100644
index 0000000..63930d4
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/swap_bytes.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_SwapBytes.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SWAP_BYTES_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_SWAP_BYTES_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * Swap bytes (to simplify operations on Little Endian machines)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_SwapBytes(
+    const uint16_t* input,   /* (i) the sequence to swap */
+    size_t wordLength,      /* (i) number or uint16_t to swap */
+    uint16_t* output         /* (o) the swapped sequence */
+                              );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/unpack_bits.c b/modules/audio_coding/codecs/ilbc/unpack_bits.c
new file mode 100644
index 0000000..ad6a7ee
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/unpack_bits.c
@@ -0,0 +1,239 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_UnpackBits.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_UnpackBits( /* (o) "Empty" frame indicator */
+    const uint16_t *bitstream,    /* (i) The packatized bitstream */
+    iLBC_bits *enc_bits,  /* (o) Paramerers from bitstream */
+    int16_t mode     /* (i) Codec mode (20 or 30) */
+                                        ) {
+  const uint16_t *bitstreamPtr;
+  int i, k;
+  int16_t *tmpPtr;
+
+  bitstreamPtr=bitstream;
+
+  /* First int16_t */
+  enc_bits->lsf[0]  =  (*bitstreamPtr)>>10;       /* Bit 0..5  */
+  enc_bits->lsf[1]  = ((*bitstreamPtr)>>3)&0x7F;      /* Bit 6..12 */
+  enc_bits->lsf[2]  = ((*bitstreamPtr)&0x7)<<4;      /* Bit 13..15 */
+  bitstreamPtr++;
+  /* Second int16_t */
+  enc_bits->lsf[2] |= ((*bitstreamPtr)>>12)&0xF;      /* Bit 0..3  */
+
+  if (mode==20) {
+    enc_bits->startIdx             = ((*bitstreamPtr)>>10)&0x3;  /* Bit 4..5  */
+    enc_bits->state_first          = ((*bitstreamPtr)>>9)&0x1;  /* Bit 6  */
+    enc_bits->idxForMax            = ((*bitstreamPtr)>>3)&0x3F;  /* Bit 7..12 */
+    enc_bits->cb_index[0]          = ((*bitstreamPtr)&0x7)<<4;  /* Bit 13..15 */
+    bitstreamPtr++;
+    /* Third int16_t */
+    enc_bits->cb_index[0]         |= ((*bitstreamPtr)>>12)&0xE;  /* Bit 0..2  */
+    enc_bits->gain_index[0]        = ((*bitstreamPtr)>>8)&0x18;  /* Bit 3..4  */
+    enc_bits->gain_index[1]        = ((*bitstreamPtr)>>7)&0x8;  /* Bit 5  */
+    enc_bits->cb_index[3]          = ((*bitstreamPtr)>>2)&0xFE;  /* Bit 6..12 */
+    enc_bits->gain_index[3]        = ((*bitstreamPtr)<<2)&0x10;  /* Bit 13  */
+    enc_bits->gain_index[4]        = ((*bitstreamPtr)<<2)&0x8;  /* Bit 14  */
+    enc_bits->gain_index[6]        = ((*bitstreamPtr)<<4)&0x10;  /* Bit 15  */
+  } else { /* mode==30 */
+    enc_bits->lsf[3]               = ((*bitstreamPtr)>>6)&0x3F;  /* Bit 4..9  */
+    enc_bits->lsf[4]               = ((*bitstreamPtr)<<1)&0x7E;  /* Bit 10..15 */
+    bitstreamPtr++;
+    /* Third int16_t */
+    enc_bits->lsf[4]              |= ((*bitstreamPtr)>>15)&0x1;  /* Bit 0  */
+    enc_bits->lsf[5]               = ((*bitstreamPtr)>>8)&0x7F;  /* Bit 1..7  */
+    enc_bits->startIdx             = ((*bitstreamPtr)>>5)&0x7;  /* Bit 8..10 */
+    enc_bits->state_first          = ((*bitstreamPtr)>>4)&0x1;  /* Bit 11  */
+    enc_bits->idxForMax            = ((*bitstreamPtr)<<2)&0x3C;  /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 4:th int16_t */
+    enc_bits->idxForMax           |= ((*bitstreamPtr)>>14)&0x3;  /* Bit 0..1  */
+    enc_bits->cb_index[0]        = ((*bitstreamPtr)>>7)&0x78;  /* Bit 2..5  */
+    enc_bits->gain_index[0]        = ((*bitstreamPtr)>>5)&0x10;  /* Bit 6  */
+    enc_bits->gain_index[1]        = ((*bitstreamPtr)>>5)&0x8;  /* Bit 7  */
+    enc_bits->cb_index[3]          = ((*bitstreamPtr))&0xFC;  /* Bit 8..13 */
+    enc_bits->gain_index[3]        = ((*bitstreamPtr)<<3)&0x10;  /* Bit 14  */
+    enc_bits->gain_index[4]        = ((*bitstreamPtr)<<3)&0x8;  /* Bit 15  */
+  }
+  /* Class 2 bits of ULP */
+  /* 4:th to 6:th int16_t for 20 ms case
+     5:th to 7:th int16_t for 30 ms case */
+  bitstreamPtr++;
+  tmpPtr=enc_bits->idxVec;
+  for (k=0; k<3; k++) {
+    for (i=15; i>=0; i--) {
+      (*tmpPtr)                  = (((*bitstreamPtr)>>i)<<2)&0x4;
+      /* Bit 15-i  */
+      tmpPtr++;
+    }
+    bitstreamPtr++;
+  }
+
+  if (mode==20) {
+    /* 7:th int16_t */
+    for (i=15; i>6; i--) {
+      (*tmpPtr)                  = (((*bitstreamPtr)>>i)<<2)&0x4;
+      /* Bit 15-i  */
+      tmpPtr++;
+    }
+    enc_bits->gain_index[1]       |= ((*bitstreamPtr)>>4)&0x4; /* Bit 9  */
+    enc_bits->gain_index[3]       |= ((*bitstreamPtr)>>2)&0xC; /* Bit 10..11 */
+    enc_bits->gain_index[4]       |= ((*bitstreamPtr)>>1)&0x4; /* Bit 12  */
+    enc_bits->gain_index[6]       |= ((*bitstreamPtr)<<1)&0x8; /* Bit 13  */
+    enc_bits->gain_index[7]        = ((*bitstreamPtr)<<2)&0xC; /* Bit 14..15 */
+
+  } else { /* mode==30 */
+    /* 8:th int16_t */
+    for (i=15; i>5; i--) {
+      (*tmpPtr)                  = (((*bitstreamPtr)>>i)<<2)&0x4;
+      /* Bit 15-i  */
+      tmpPtr++;
+    }
+    enc_bits->cb_index[0]         |= ((*bitstreamPtr)>>3)&0x6; /* Bit 10..11 */
+    enc_bits->gain_index[0]       |= ((*bitstreamPtr))&0x8;  /* Bit 12  */
+    enc_bits->gain_index[1]       |= ((*bitstreamPtr))&0x4;  /* Bit 13  */
+    enc_bits->cb_index[3]         |= ((*bitstreamPtr))&0x2;  /* Bit 14  */
+    enc_bits->cb_index[6]          = ((*bitstreamPtr)<<7)&0x80; /* Bit 15  */
+    bitstreamPtr++;
+    /* 9:th int16_t */
+    enc_bits->cb_index[6]         |= ((*bitstreamPtr)>>9)&0x7E; /* Bit 0..5  */
+    enc_bits->cb_index[9]          = ((*bitstreamPtr)>>2)&0xFE; /* Bit 6..12 */
+    enc_bits->cb_index[12]         = ((*bitstreamPtr)<<5)&0xE0; /* Bit 13..15 */
+    bitstreamPtr++;
+    /* 10:th int16_t */
+    enc_bits->cb_index[12]         |= ((*bitstreamPtr)>>11)&0x1E;/* Bit 0..3 */
+    enc_bits->gain_index[3]       |= ((*bitstreamPtr)>>8)&0xC; /* Bit 4..5  */
+    enc_bits->gain_index[4]       |= ((*bitstreamPtr)>>7)&0x6; /* Bit 6..7  */
+    enc_bits->gain_index[6]        = ((*bitstreamPtr)>>3)&0x18; /* Bit 8..9  */
+    enc_bits->gain_index[7]        = ((*bitstreamPtr)>>2)&0xC; /* Bit 10..11 */
+    enc_bits->gain_index[9]        = ((*bitstreamPtr)<<1)&0x10; /* Bit 12  */
+    enc_bits->gain_index[10]       = ((*bitstreamPtr)<<1)&0x8; /* Bit 13  */
+    enc_bits->gain_index[12]       = ((*bitstreamPtr)<<3)&0x10; /* Bit 14  */
+    enc_bits->gain_index[13]       = ((*bitstreamPtr)<<3)&0x8; /* Bit 15  */
+  }
+  bitstreamPtr++;
+  /* Class 3 bits of ULP */
+  /*  8:th to 14:th int16_t for 20 ms case
+      11:th to 17:th int16_t for 30 ms case */
+  tmpPtr=enc_bits->idxVec;
+  for (k=0; k<7; k++) {
+    for (i=14; i>=0; i-=2) {
+      (*tmpPtr)                 |= ((*bitstreamPtr)>>i)&0x3; /* Bit 15-i..14-i*/
+      tmpPtr++;
+    }
+    bitstreamPtr++;
+  }
+
+  if (mode==20) {
+    /* 15:th int16_t */
+    enc_bits->idxVec[56]          |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1  */
+    enc_bits->cb_index[0]         |= ((*bitstreamPtr)>>13)&0x1; /* Bit 2  */
+    enc_bits->cb_index[1]          = ((*bitstreamPtr)>>6)&0x7F; /* Bit 3..9  */
+    enc_bits->cb_index[2]          = ((*bitstreamPtr)<<1)&0x7E; /* Bit 10..15 */
+    bitstreamPtr++;
+    /* 16:th int16_t */
+    enc_bits->cb_index[2]         |= ((*bitstreamPtr)>>15)&0x1; /* Bit 0  */
+    enc_bits->gain_index[0]       |= ((*bitstreamPtr)>>12)&0x7; /* Bit 1..3  */
+    enc_bits->gain_index[1]       |= ((*bitstreamPtr)>>10)&0x3; /* Bit 4..5  */
+    enc_bits->gain_index[2]        = ((*bitstreamPtr)>>7)&0x7; /* Bit 6..8  */
+    enc_bits->cb_index[3]         |= ((*bitstreamPtr)>>6)&0x1; /* Bit 9  */
+    enc_bits->cb_index[4]          = ((*bitstreamPtr)<<1)&0x7E; /* Bit 10..15 */
+    bitstreamPtr++;
+    /* 17:th int16_t */
+    enc_bits->cb_index[4]         |= ((*bitstreamPtr)>>15)&0x1; /* Bit 0  */
+    enc_bits->cb_index[5]          = ((*bitstreamPtr)>>8)&0x7F; /* Bit 1..7  */
+    enc_bits->cb_index[6]          = ((*bitstreamPtr))&0xFF; /* Bit 8..15 */
+    bitstreamPtr++;
+    /* 18:th int16_t */
+    enc_bits->cb_index[7]          = (*bitstreamPtr)>>8;  /* Bit 0..7  */
+    enc_bits->cb_index[8]          = (*bitstreamPtr)&0xFF;  /* Bit 8..15 */
+    bitstreamPtr++;
+    /* 19:th int16_t */
+    enc_bits->gain_index[3]       |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1  */
+    enc_bits->gain_index[4]       |= ((*bitstreamPtr)>>12)&0x3; /* Bit 2..3  */
+    enc_bits->gain_index[5]        = ((*bitstreamPtr)>>9)&0x7; /* Bit 4..6  */
+    enc_bits->gain_index[6]       |= ((*bitstreamPtr)>>6)&0x7; /* Bit 7..9  */
+    enc_bits->gain_index[7]       |= ((*bitstreamPtr)>>4)&0x3; /* Bit 10..11 */
+    enc_bits->gain_index[8]        = ((*bitstreamPtr)>>1)&0x7; /* Bit 12..14 */
+  } else { /* mode==30 */
+    /* 18:th int16_t */
+    enc_bits->idxVec[56]          |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1  */
+    enc_bits->idxVec[57]          |= ((*bitstreamPtr)>>12)&0x3; /* Bit 2..3  */
+    enc_bits->cb_index[0]         |= ((*bitstreamPtr)>>11)&1; /* Bit 4  */
+    enc_bits->cb_index[1]          = ((*bitstreamPtr)>>4)&0x7F; /* Bit 5..11 */
+    enc_bits->cb_index[2]          = ((*bitstreamPtr)<<3)&0x78; /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 19:th int16_t */
+    enc_bits->cb_index[2]         |= ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2  */
+    enc_bits->gain_index[0]       |= ((*bitstreamPtr)>>10)&0x7; /* Bit 3..5  */
+    enc_bits->gain_index[1]       |= ((*bitstreamPtr)>>8)&0x3; /* Bit 6..7  */
+    enc_bits->gain_index[2]        = ((*bitstreamPtr)>>5)&0x7; /* Bit 8..10 */
+    enc_bits->cb_index[3]         |= ((*bitstreamPtr)>>4)&0x1; /* Bit 11  */
+    enc_bits->cb_index[4]          = ((*bitstreamPtr)<<3)&0x78; /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 20:th int16_t */
+    enc_bits->cb_index[4]         |= ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2  */
+    enc_bits->cb_index[5]          = ((*bitstreamPtr)>>6)&0x7F; /* Bit 3..9  */
+    enc_bits->cb_index[6]         |= ((*bitstreamPtr)>>5)&0x1; /* Bit 10  */
+    enc_bits->cb_index[7]          = ((*bitstreamPtr)<<3)&0xF8; /* Bit 11..15 */
+    bitstreamPtr++;
+    /* 21:st int16_t */
+    enc_bits->cb_index[7]         |= ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2  */
+    enc_bits->cb_index[8]          = ((*bitstreamPtr)>>5)&0xFF; /* Bit 3..10 */
+    enc_bits->cb_index[9]         |= ((*bitstreamPtr)>>4)&0x1; /* Bit 11  */
+    enc_bits->cb_index[10]         = ((*bitstreamPtr)<<4)&0xF0; /* Bit 12..15 */
+    bitstreamPtr++;
+    /* 22:nd int16_t */
+    enc_bits->cb_index[10]        |= ((*bitstreamPtr)>>12)&0xF; /* Bit 0..3  */
+    enc_bits->cb_index[11]         = ((*bitstreamPtr)>>4)&0xFF; /* Bit 4..11 */
+    enc_bits->cb_index[12]        |= ((*bitstreamPtr)>>3)&0x1; /* Bit 12  */
+    enc_bits->cb_index[13]         = ((*bitstreamPtr)<<5)&0xE0; /* Bit 13..15 */
+    bitstreamPtr++;
+    /* 23:rd int16_t */
+    enc_bits->cb_index[13]        |= ((*bitstreamPtr)>>11)&0x1F;/* Bit 0..4  */
+    enc_bits->cb_index[14]         = ((*bitstreamPtr)>>3)&0xFF; /* Bit 5..12 */
+    enc_bits->gain_index[3]       |= ((*bitstreamPtr)>>1)&0x3; /* Bit 13..14 */
+    enc_bits->gain_index[4]       |= ((*bitstreamPtr)&0x1);  /* Bit 15  */
+    bitstreamPtr++;
+    /* 24:rd int16_t */
+    enc_bits->gain_index[5]        = ((*bitstreamPtr)>>13)&0x7; /* Bit 0..2  */
+    enc_bits->gain_index[6]       |= ((*bitstreamPtr)>>10)&0x7; /* Bit 3..5  */
+    enc_bits->gain_index[7]       |= ((*bitstreamPtr)>>8)&0x3; /* Bit 6..7  */
+    enc_bits->gain_index[8]        = ((*bitstreamPtr)>>5)&0x7; /* Bit 8..10 */
+    enc_bits->gain_index[9]       |= ((*bitstreamPtr)>>1)&0xF; /* Bit 11..14 */
+    enc_bits->gain_index[10]      |= ((*bitstreamPtr)<<2)&0x4; /* Bit 15  */
+    bitstreamPtr++;
+    /* 25:rd int16_t */
+    enc_bits->gain_index[10]      |= ((*bitstreamPtr)>>14)&0x3; /* Bit 0..1  */
+    enc_bits->gain_index[11]       = ((*bitstreamPtr)>>11)&0x7; /* Bit 2..4  */
+    enc_bits->gain_index[12]      |= ((*bitstreamPtr)>>7)&0xF; /* Bit 5..8  */
+    enc_bits->gain_index[13]      |= ((*bitstreamPtr)>>4)&0x7; /* Bit 9..11 */
+    enc_bits->gain_index[14]       = ((*bitstreamPtr)>>1)&0x7; /* Bit 12..14 */
+  }
+  /* Last bit should be zero, otherwise it's an "empty" frame */
+  if (((*bitstreamPtr)&0x1) == 1) {
+    return(1);
+  } else {
+    return(0);
+  }
+}
diff --git a/modules/audio_coding/codecs/ilbc/unpack_bits.h b/modules/audio_coding/codecs/ilbc/unpack_bits.h
new file mode 100644
index 0000000..b2e622f
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/unpack_bits.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_UnpackBits.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_UNPACK_BITS_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_UNPACK_BITS_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  unpacking of bits from bitstream, i.e., vector of bytes
+ *---------------------------------------------------------------*/
+
+int16_t WebRtcIlbcfix_UnpackBits( /* (o) "Empty" frame indicator */
+    const uint16_t *bitstream,    /* (i) The packatized bitstream */
+    iLBC_bits *enc_bits,  /* (o) Paramerers from bitstream */
+    int16_t mode     /* (i) Codec mode (20 or 30) */
+                                        );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/vq3.c b/modules/audio_coding/codecs/ilbc/vq3.c
new file mode 100644
index 0000000..b63a7a8
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/vq3.c
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq3.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/vq3.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq3(
+    int16_t *Xq, /* quantized vector (Q13) */
+    int16_t *index,
+    int16_t *CB, /* codebook in Q13 */
+    int16_t *X,  /* vector to quantize (Q13) */
+    int16_t n_cb
+                       ){
+  int16_t i, j;
+  int16_t pos, minindex=0;
+  int16_t tmp;
+  int32_t dist, mindist;
+
+  pos = 0;
+  mindist = WEBRTC_SPL_WORD32_MAX; /* start value */
+
+  /* Find the codebook with the lowest square distance */
+  for (j = 0; j < n_cb; j++) {
+    tmp = X[0] - CB[pos];
+    dist = tmp * tmp;
+    for (i = 1; i < 3; i++) {
+      tmp = X[i] - CB[pos + i];
+      dist += tmp * tmp;
+    }
+
+    if (dist < mindist) {
+      mindist = dist;
+      minindex = j;
+    }
+    pos += 3;
+  }
+
+  /* Store the quantized codebook and the index */
+  for (i = 0; i < 3; i++) {
+    Xq[i] = CB[minindex*3 + i];
+  }
+  *index = minindex;
+
+}
diff --git a/modules/audio_coding/codecs/ilbc/vq3.h b/modules/audio_coding/codecs/ilbc/vq3.h
new file mode 100644
index 0000000..6d3dc3a
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/vq3.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq3.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ3_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ3_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*----------------------------------------------------------------*
+ *  Vector quantization of order 3 (based on MSE)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq3(
+    int16_t *Xq,  /* (o) the quantized vector (Q13) */
+    int16_t *index, /* (o) the quantization index */
+    int16_t *CB,  /* (i) the vector quantization codebook (Q13) */
+    int16_t *X,  /* (i) the vector to quantize (Q13) */
+    int16_t n_cb  /* (i) the number of vectors in the codebook */
+                       );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/vq4.c b/modules/audio_coding/codecs/ilbc/vq4.c
new file mode 100644
index 0000000..2522ac2
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/vq4.c
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq4.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/vq4.h"
+#include "modules/audio_coding/codecs/ilbc/constants.h"
+
+/*----------------------------------------------------------------*
+ *  vector quantization
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq4(
+    int16_t *Xq, /* quantized vector (Q13) */
+    int16_t *index,
+    int16_t *CB, /* codebook in Q13 */
+    int16_t *X,  /* vector to quantize (Q13) */
+    int16_t n_cb
+                       ){
+  int16_t i, j;
+  int16_t pos, minindex=0;
+  int16_t tmp;
+  int32_t dist, mindist;
+
+  pos = 0;
+  mindist = WEBRTC_SPL_WORD32_MAX; /* start value */
+
+  /* Find the codebook with the lowest square distance */
+  for (j = 0; j < n_cb; j++) {
+    tmp = X[0] - CB[pos];
+    dist = tmp * tmp;
+    for (i = 1; i < 4; i++) {
+      tmp = X[i] - CB[pos + i];
+      dist += tmp * tmp;
+    }
+
+    if (dist < mindist) {
+      mindist = dist;
+      minindex = j;
+    }
+    pos += 4;
+  }
+
+  /* Store the quantized codebook and the index */
+  for (i = 0; i < 4; i++) {
+    Xq[i] = CB[minindex*4 + i];
+  }
+  *index = minindex;
+}
diff --git a/modules/audio_coding/codecs/ilbc/vq4.h b/modules/audio_coding/codecs/ilbc/vq4.h
new file mode 100644
index 0000000..c7f5271
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/vq4.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Vq4.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ4_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_VQ4_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*----------------------------------------------------------------*
+ *  Vector quantization of order 4 (based on MSE)
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Vq4(
+    int16_t *Xq,  /* (o) the quantized vector (Q13) */
+    int16_t *index, /* (o) the quantization index */
+    int16_t *CB,  /* (i) the vector quantization codebook (Q13) */
+    int16_t *X,  /* (i) the vector to quantize (Q13) */
+    int16_t n_cb  /* (i) the number of vectors in the codebook */
+                       );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/window32_w32.c b/modules/audio_coding/codecs/ilbc/window32_w32.c
new file mode 100644
index 0000000..fe19de6
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/window32_w32.c
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Window32W32.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  window multiplication
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Window32W32(
+    int32_t *z,    /* Output */
+    int32_t *x,    /* Input (same domain as Output)*/
+    const int32_t  *y,  /* Q31 Window */
+    size_t N     /* length to process */
+                               ) {
+  size_t i;
+  int16_t x_low, x_hi, y_low, y_hi;
+  int16_t left_shifts;
+  int32_t temp;
+
+  left_shifts = (int16_t)WebRtcSpl_NormW32(x[0]);
+  WebRtcSpl_VectorBitShiftW32(x, N, x, (int16_t)(-left_shifts));
+
+
+  /* The double precision numbers use a special representation:
+   * w32 = hi<<16 + lo<<1
+   */
+  for (i = 0; i < N; i++) {
+    /* Extract higher bytes */
+    x_hi = (int16_t)(x[i] >> 16);
+    y_hi = (int16_t)(y[i] >> 16);
+
+    /* Extract lower bytes, defined as (w32 - hi<<16)>>1 */
+    x_low = (int16_t)((x[i] - (x_hi << 16)) >> 1);
+
+    y_low = (int16_t)((y[i] - (y_hi << 16)) >> 1);
+
+    /* Calculate z by a 32 bit multiplication using both low and high from x and y */
+    temp = ((x_hi * y_hi) << 1) + ((x_hi * y_low) >> 14);
+
+    z[i] = temp + ((x_low * y_hi) >> 14);
+  }
+
+  WebRtcSpl_VectorBitShiftW32(z, N, z, left_shifts);
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/ilbc/window32_w32.h b/modules/audio_coding/codecs/ilbc/window32_w32.h
new file mode 100644
index 0000000..c348d1d
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/window32_w32.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_Window32W32.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_WINDOW32_W32_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_WINDOW32_W32_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ *  window multiplication
+ *---------------------------------------------------------------*/
+
+void WebRtcIlbcfix_Window32W32(
+    int32_t *z,    /* Output */
+    int32_t *x,    /* Input (same domain as Output)*/
+    const int32_t  *y,  /* Q31 Window */
+    size_t N     /* length to process */
+                               );
+
+#endif
diff --git a/modules/audio_coding/codecs/ilbc/xcorr_coef.c b/modules/audio_coding/codecs/ilbc/xcorr_coef.c
new file mode 100644
index 0000000..f6375df
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/xcorr_coef.c
@@ -0,0 +1,140 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_XcorrCoef.c
+
+******************************************************************/
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * cross correlation which finds the optimal lag for the
+ * crossCorr*crossCorr/(energy) criteria
+ *---------------------------------------------------------------*/
+
+size_t WebRtcIlbcfix_XcorrCoef(
+    int16_t *target,  /* (i) first array */
+    int16_t *regressor, /* (i) second array */
+    size_t subl,  /* (i) dimension arrays */
+    size_t searchLen, /* (i) the search lenght */
+    size_t offset,  /* (i) samples offset between arrays */
+    int16_t step   /* (i) +1 or -1 */
+                            ){
+  size_t k;
+  size_t maxlag;
+  int16_t pos;
+  int16_t max;
+  int16_t crossCorrScale, Energyscale;
+  int16_t crossCorrSqMod, crossCorrSqMod_Max;
+  int32_t crossCorr, Energy;
+  int16_t crossCorrmod, EnergyMod, EnergyMod_Max;
+  int16_t *tp, *rp;
+  int16_t *rp_beg, *rp_end;
+  int16_t totscale, totscale_max;
+  int16_t scalediff;
+  int32_t newCrit, maxCrit;
+  int shifts;
+
+  /* Initializations, to make sure that the first one is selected */
+  crossCorrSqMod_Max=0;
+  EnergyMod_Max=WEBRTC_SPL_WORD16_MAX;
+  totscale_max=-500;
+  maxlag=0;
+  pos=0;
+
+  /* Find scale value and start position */
+  if (step==1) {
+    max=WebRtcSpl_MaxAbsValueW16(regressor, subl + searchLen - 1);
+    rp_beg = regressor;
+    rp_end = regressor + subl;
+  } else { /* step==-1 */
+    max = WebRtcSpl_MaxAbsValueW16(regressor - searchLen, subl + searchLen - 1);
+    rp_beg = regressor - 1;
+    rp_end = regressor + subl - 1;
+  }
+
+  /* Introduce a scale factor on the Energy in int32_t in
+     order to make sure that the calculation does not
+     overflow */
+
+  if (max>5000) {
+    shifts=2;
+  } else {
+    shifts=0;
+  }
+
+  /* Calculate the first energy, then do a +/- to get the other energies */
+  Energy=WebRtcSpl_DotProductWithScale(regressor, regressor, subl, shifts);
+
+  for (k=0;k<searchLen;k++) {
+    tp = target;
+    rp = &regressor[pos];
+
+    crossCorr=WebRtcSpl_DotProductWithScale(tp, rp, subl, shifts);
+
+    if ((Energy>0)&&(crossCorr>0)) {
+
+      /* Put cross correlation and energy on 16 bit word */
+      crossCorrScale=(int16_t)WebRtcSpl_NormW32(crossCorr)-16;
+      crossCorrmod=(int16_t)WEBRTC_SPL_SHIFT_W32(crossCorr, crossCorrScale);
+      Energyscale=(int16_t)WebRtcSpl_NormW32(Energy)-16;
+      EnergyMod=(int16_t)WEBRTC_SPL_SHIFT_W32(Energy, Energyscale);
+
+      /* Square cross correlation and store upper int16_t */
+      crossCorrSqMod = (int16_t)((crossCorrmod * crossCorrmod) >> 16);
+
+      /* Calculate the total number of (dynamic) right shifts that have
+         been performed on (crossCorr*crossCorr)/energy
+      */
+      totscale=Energyscale-(crossCorrScale<<1);
+
+      /* Calculate the shift difference in order to be able to compare the two
+         (crossCorr*crossCorr)/energy in the same domain
+      */
+      scalediff=totscale-totscale_max;
+      scalediff=WEBRTC_SPL_MIN(scalediff,31);
+      scalediff=WEBRTC_SPL_MAX(scalediff,-31);
+
+      /* Compute the cross multiplication between the old best criteria
+         and the new one to be able to compare them without using a
+         division */
+
+      if (scalediff<0) {
+        newCrit = ((int32_t)crossCorrSqMod*EnergyMod_Max)>>(-scalediff);
+        maxCrit = ((int32_t)crossCorrSqMod_Max*EnergyMod);
+      } else {
+        newCrit = ((int32_t)crossCorrSqMod*EnergyMod_Max);
+        maxCrit = ((int32_t)crossCorrSqMod_Max*EnergyMod)>>scalediff;
+      }
+
+      /* Store the new lag value if the new criteria is larger
+         than previous largest criteria */
+
+      if (newCrit > maxCrit) {
+        crossCorrSqMod_Max = crossCorrSqMod;
+        EnergyMod_Max = EnergyMod;
+        totscale_max = totscale;
+        maxlag = k;
+      }
+    }
+    pos+=step;
+
+    /* Do a +/- to get the next energy */
+    Energy += step * ((*rp_end * *rp_end - *rp_beg * *rp_beg) >> shifts);
+    rp_beg+=step;
+    rp_end+=step;
+  }
+
+  return(maxlag+offset);
+}
diff --git a/modules/audio_coding/codecs/ilbc/xcorr_coef.h b/modules/audio_coding/codecs/ilbc/xcorr_coef.h
new file mode 100644
index 0000000..cd58b60
--- /dev/null
+++ b/modules/audio_coding/codecs/ilbc/xcorr_coef.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/******************************************************************
+
+ iLBC Speech Coder ANSI-C Source Code
+
+ WebRtcIlbcfix_XcorrCoef.h
+
+******************************************************************/
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_XCORR_COEF_H_
+#define MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_XCORR_COEF_H_
+
+#include "modules/audio_coding/codecs/ilbc/defines.h"
+
+/*----------------------------------------------------------------*
+ * cross correlation which finds the optimal lag for the
+ * crossCorr*crossCorr/(energy) criteria
+ *---------------------------------------------------------------*/
+
+size_t WebRtcIlbcfix_XcorrCoef(
+    int16_t *target,  /* (i) first array */
+    int16_t *regressor, /* (i) second array */
+    size_t subl,  /* (i) dimension arrays */
+    size_t searchLen, /* (i) the search lenght */
+    size_t offset,  /* (i) samples offset between arrays */
+    int16_t step   /* (i) +1 or -1 */
+                            );
+
+#endif
diff --git a/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h b/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h
new file mode 100644
index 0000000..eda1cfa
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
+
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/optional.h"
+#include "modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+template <typename T>
+class AudioDecoderIsacT final : public AudioDecoder {
+ public:
+  explicit AudioDecoderIsacT(int sample_rate_hz);
+  AudioDecoderIsacT(int sample_rate_hz,
+                    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo);
+  ~AudioDecoderIsacT() override;
+
+  bool HasDecodePlc() const override;
+  size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
+  void Reset() override;
+  int IncomingPacket(const uint8_t* payload,
+                     size_t payload_len,
+                     uint16_t rtp_sequence_number,
+                     uint32_t rtp_timestamp,
+                     uint32_t arrival_timestamp) override;
+  int ErrorCode() override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  typename T::instance_type* isac_state_;
+  int sample_rate_hz_;
+  rtc::scoped_refptr<LockedIsacBandwidthInfo> bwinfo_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsacT);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
diff --git a/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h b/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h
new file mode 100644
index 0000000..b2783c6
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+template <typename T>
+AudioDecoderIsacT<T>::AudioDecoderIsacT(int sample_rate_hz)
+    : AudioDecoderIsacT(sample_rate_hz, nullptr) {}
+
+template <typename T>
+AudioDecoderIsacT<T>::AudioDecoderIsacT(
+    int sample_rate_hz,
+    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo)
+    : sample_rate_hz_(sample_rate_hz), bwinfo_(bwinfo) {
+  RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000)
+      << "Unsupported sample rate " << sample_rate_hz;
+  RTC_CHECK_EQ(0, T::Create(&isac_state_));
+  T::DecoderInit(isac_state_);
+  if (bwinfo_) {
+    IsacBandwidthInfo bi;
+    T::GetBandwidthInfo(isac_state_, &bi);
+    bwinfo_->Set(bi);
+  }
+  RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz_));
+}
+
+template <typename T>
+AudioDecoderIsacT<T>::~AudioDecoderIsacT() {
+  RTC_CHECK_EQ(0, T::Free(isac_state_));
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::DecodeInternal(const uint8_t* encoded,
+                                         size_t encoded_len,
+                                         int sample_rate_hz,
+                                         int16_t* decoded,
+                                         SpeechType* speech_type) {
+  RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
+  int16_t temp_type = 1;  // Default is speech.
+  int ret =
+      T::DecodeInternal(isac_state_, encoded, encoded_len, decoded, &temp_type);
+  *speech_type = ConvertSpeechType(temp_type);
+  return ret;
+}
+
+template <typename T>
+bool AudioDecoderIsacT<T>::HasDecodePlc() const {
+  return false;
+}
+
+template <typename T>
+size_t AudioDecoderIsacT<T>::DecodePlc(size_t num_frames, int16_t* decoded) {
+  return T::DecodePlc(isac_state_, decoded, num_frames);
+}
+
+template <typename T>
+void AudioDecoderIsacT<T>::Reset() {
+  T::DecoderInit(isac_state_);
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::IncomingPacket(const uint8_t* payload,
+                                         size_t payload_len,
+                                         uint16_t rtp_sequence_number,
+                                         uint32_t rtp_timestamp,
+                                         uint32_t arrival_timestamp) {
+  int ret = T::UpdateBwEstimate(isac_state_, payload, payload_len,
+                                rtp_sequence_number, rtp_timestamp,
+                                arrival_timestamp);
+  if (bwinfo_) {
+    IsacBandwidthInfo bwinfo;
+    T::GetBandwidthInfo(isac_state_, &bwinfo);
+    bwinfo_->Set(bwinfo);
+  }
+  return ret;
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::ErrorCode() {
+  return T::GetErrorCode(isac_state_);
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::SampleRateHz() const {
+  return sample_rate_hz_;
+}
+
+template <typename T>
+size_t AudioDecoderIsacT<T>::Channels() const {
+  return 1;
+}
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
diff --git a/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
new file mode 100644
index 0000000..541b90c
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
+
+#include <vector>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace webrtc {
+
+struct CodecInst;
+
+template <typename T>
+class AudioEncoderIsacT final : public AudioEncoder {
+ public:
+  // Allowed combinations of sample rate, frame size, and bit rate are
+  //  - 16000 Hz, 30 ms, 10000-32000 bps
+  //  - 16000 Hz, 60 ms, 10000-32000 bps
+  //  - 32000 Hz, 30 ms, 10000-56000 bps (if T has super-wideband support)
+  struct Config {
+    bool IsOk() const;
+
+    rtc::scoped_refptr<LockedIsacBandwidthInfo> bwinfo;
+
+    int payload_type = 103;
+    int sample_rate_hz = 16000;
+    int frame_size_ms = 30;
+    int bit_rate = kDefaultBitRate;  // Limit on the short-term average bit
+                                     // rate, in bits/s.
+    int max_payload_size_bytes = -1;
+    int max_bit_rate = -1;
+
+    // If true, the encoder will dynamically adjust frame size and bit rate;
+    // the configured values are then merely the starting point.
+    bool adaptive_mode = false;
+
+    // In adaptive mode, prevent adaptive changes to the frame size. (Not used
+    // in nonadaptive mode.)
+    bool enforce_frame_size = false;
+  };
+
+  explicit AudioEncoderIsacT(const Config& config);
+  explicit AudioEncoderIsacT(
+      const CodecInst& codec_inst,
+      const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo);
+  ~AudioEncoderIsacT() override;
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+  void Reset() override;
+
+ private:
+  // This value is taken from STREAM_SIZE_MAX_60 for iSAC float (60 ms) and
+  // STREAM_MAXW16_60MS for iSAC fix (60 ms).
+  static const size_t kSufficientEncodeBufferSizeBytes = 400;
+
+  static const int kDefaultBitRate = 32000;
+
+  // Recreate the iSAC encoder instance with the given settings, and save them.
+  void RecreateEncoderInstance(const Config& config);
+
+  Config config_;
+  typename T::instance_type* isac_state_ = nullptr;
+  rtc::scoped_refptr<LockedIsacBandwidthInfo> bwinfo_;
+
+  // Have we accepted input but not yet emitted it in a packet?
+  bool packet_in_progress_ = false;
+
+  // Timestamp of the first input of the currently in-progress packet.
+  uint32_t packet_timestamp_;
+
+  // Timestamp of the previously encoded packet.
+  uint32_t last_encoded_timestamp_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderIsacT);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
diff --git a/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
new file mode 100644
index 0000000..696b799
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -0,0 +1,189 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+template <typename T>
+typename AudioEncoderIsacT<T>::Config CreateIsacConfig(
+    const CodecInst& codec_inst,
+    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
+  typename AudioEncoderIsacT<T>::Config config;
+  config.bwinfo = bwinfo;
+  config.payload_type = codec_inst.pltype;
+  config.sample_rate_hz = codec_inst.plfreq;
+  config.frame_size_ms =
+      rtc::CheckedDivExact(1000 * codec_inst.pacsize, config.sample_rate_hz);
+  config.adaptive_mode = (codec_inst.rate == -1);
+  if (codec_inst.rate != -1)
+    config.bit_rate = codec_inst.rate;
+  return config;
+}
+
+template <typename T>
+bool AudioEncoderIsacT<T>::Config::IsOk() const {
+  if (max_bit_rate < 32000 && max_bit_rate != -1)
+    return false;
+  if (max_payload_size_bytes < 120 && max_payload_size_bytes != -1)
+    return false;
+  if (adaptive_mode && !bwinfo)
+    return false;
+  switch (sample_rate_hz) {
+    case 16000:
+      if (max_bit_rate > 53400)
+        return false;
+      if (max_payload_size_bytes > 400)
+        return false;
+      return (frame_size_ms == 30 || frame_size_ms == 60) &&
+             (bit_rate == 0 || (bit_rate >= 10000 && bit_rate <= 32000));
+    case 32000:
+      if (max_bit_rate > 160000)
+        return false;
+      if (max_payload_size_bytes > 600)
+        return false;
+      return T::has_swb &&
+             (frame_size_ms == 30 &&
+              (bit_rate == 0 || (bit_rate >= 10000 && bit_rate <= 56000)));
+    default:
+      return false;
+  }
+}
+
+template <typename T>
+AudioEncoderIsacT<T>::AudioEncoderIsacT(const Config& config) {
+  RecreateEncoderInstance(config);
+}
+
+template <typename T>
+AudioEncoderIsacT<T>::AudioEncoderIsacT(
+    const CodecInst& codec_inst,
+    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo)
+    : AudioEncoderIsacT(CreateIsacConfig<T>(codec_inst, bwinfo)) {}
+
+template <typename T>
+AudioEncoderIsacT<T>::~AudioEncoderIsacT() {
+  RTC_CHECK_EQ(0, T::Free(isac_state_));
+}
+
+template <typename T>
+int AudioEncoderIsacT<T>::SampleRateHz() const {
+  return T::EncSampRate(isac_state_);
+}
+
+template <typename T>
+size_t AudioEncoderIsacT<T>::NumChannels() const {
+  return 1;
+}
+
+template <typename T>
+size_t AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
+  const int samples_in_next_packet = T::GetNewFrameLen(isac_state_);
+  return static_cast<size_t>(
+      rtc::CheckedDivExact(samples_in_next_packet,
+                           rtc::CheckedDivExact(SampleRateHz(), 100)));
+}
+
+template <typename T>
+size_t AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
+  return 6;  // iSAC puts at most 60 ms in a packet.
+}
+
+template <typename T>
+int AudioEncoderIsacT<T>::GetTargetBitrate() const {
+  if (config_.adaptive_mode)
+    return -1;
+  return config_.bit_rate == 0 ? kDefaultBitRate : config_.bit_rate;
+}
+
+template <typename T>
+AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+  if (!packet_in_progress_) {
+    // Starting a new packet; remember the timestamp for later.
+    packet_in_progress_ = true;
+    packet_timestamp_ = rtp_timestamp;
+  }
+  if (bwinfo_) {
+    IsacBandwidthInfo bwinfo = bwinfo_->Get();
+    T::SetBandwidthInfo(isac_state_, &bwinfo);
+  }
+
+  size_t encoded_bytes = encoded->AppendData(
+      kSufficientEncodeBufferSizeBytes,
+      [&] (rtc::ArrayView<uint8_t> encoded) {
+        int r = T::Encode(isac_state_, audio.data(), encoded.data());
+
+        RTC_CHECK_GE(r, 0) << "Encode failed (error code "
+                           << T::GetErrorCode(isac_state_) << ")";
+
+        return static_cast<size_t>(r);
+      });
+
+  if (encoded_bytes == 0)
+    return EncodedInfo();
+
+  // Got enough input to produce a packet. Return the saved timestamp from
+  // the first chunk of input that went into the packet.
+  packet_in_progress_ = false;
+  EncodedInfo info;
+  info.encoded_bytes = encoded_bytes;
+  info.encoded_timestamp = packet_timestamp_;
+  info.payload_type = config_.payload_type;
+  info.encoder_type = CodecType::kIsac;
+  return info;
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::Reset() {
+  RecreateEncoderInstance(config_);
+}
+
+template <typename T>
+void AudioEncoderIsacT<T>::RecreateEncoderInstance(const Config& config) {
+  RTC_CHECK(config.IsOk());
+  packet_in_progress_ = false;
+  bwinfo_ = config.bwinfo;
+  if (isac_state_)
+    RTC_CHECK_EQ(0, T::Free(isac_state_));
+  RTC_CHECK_EQ(0, T::Create(&isac_state_));
+  RTC_CHECK_EQ(0, T::EncoderInit(isac_state_, config.adaptive_mode ? 0 : 1));
+  RTC_CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
+  const int bit_rate = config.bit_rate == 0 ? kDefaultBitRate : config.bit_rate;
+  if (config.adaptive_mode) {
+    RTC_CHECK_EQ(0, T::ControlBwe(isac_state_, bit_rate, config.frame_size_ms,
+                                  config.enforce_frame_size));
+  } else {
+    RTC_CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
+  }
+  if (config.max_payload_size_bytes != -1)
+    RTC_CHECK_EQ(
+        0, T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
+  if (config.max_bit_rate != -1)
+    RTC_CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
+
+  // Set the decoder sample rate even though we just use the encoder. This
+  // doesn't appear to be necessary to produce a valid encoding, but without it
+  // we get an encoding that isn't bit-for-bit identical with what a combined
+  // encoder+decoder object produces.
+  RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, config.sample_rate_hz));
+
+  config_ = config;
+}
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_IMPL_H_
diff --git a/modules/audio_coding/codecs/isac/bandwidth_info.h b/modules/audio_coding/codecs/isac/bandwidth_info.h
new file mode 100644
index 0000000..0539780
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/bandwidth_info.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct {
+  int in_use;
+  int32_t send_bw_avg;
+  int32_t send_max_delay_avg;
+  int16_t bottleneck_idx;
+  int16_t jitter_info;
+} IsacBandwidthInfo;
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_BANDWIDTH_INFO_H_
diff --git a/modules/audio_coding/codecs/isac/empty.cc b/modules/audio_coding/codecs/isac/empty.cc
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/empty.cc
diff --git a/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h b/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h
new file mode 100644
index 0000000..0b4eadd
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
+
+namespace webrtc {
+
+using AudioDecoderIsacFixImpl = AudioDecoderIsacT<IsacFix>;
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_DECODER_ISACFIX_H_
diff --git a/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h b/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h
new file mode 100644
index 0000000..f0cc038
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
+
+namespace webrtc {
+
+using AudioEncoderIsacFixImpl = AudioEncoderIsacT<IsacFix>;
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_AUDIO_ENCODER_ISACFIX_H_
diff --git a/modules/audio_coding/codecs/isac/fix/include/isacfix.h b/modules/audio_coding/codecs/isac/fix/include/isacfix.h
new file mode 100644
index 0000000..ef194ca
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/include/isacfix.h
@@ -0,0 +1,637 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct {
+  void *dummy;
+} ISACFIX_MainStruct;
+
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+  /**************************************************************************
+   * WebRtcIsacfix_AssignSize(...)
+   *
+   *  Functions used when malloc is not allowed
+   *  Output the number of bytes needed to allocate for iSAC struct.
+   *
+   */
+
+  int16_t WebRtcIsacfix_AssignSize(int *sizeinbytes);
+
+  /**************************************************************************
+   * WebRtcIsacfix_Assign(...)
+   *
+   * Functions used when malloc is not allowed, it
+   * places a struct at the given address.
+   *
+   * Input:
+   *      - *ISAC_main_inst   : a pointer to the coder instance.
+   *      - ISACFIX_inst_Addr : address of the memory where a space is
+   *                            for iSAC structure.
+   *
+   * Return value             : 0 - Ok
+   *                           -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_Assign(ISACFIX_MainStruct **inst,
+                                     void *ISACFIX_inst_Addr);
+
+  /****************************************************************************
+   * WebRtcIsacfix_Create(...)
+   *
+   * This function creates an ISAC instance, which will contain the state
+   * information for one coding/decoding channel.
+   *
+   * Input:
+   *      - *ISAC_main_inst   : a pointer to the coder instance.
+   *
+   * Return value             : 0 - Ok
+   *                           -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_Create(ISACFIX_MainStruct **ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_Free(...)
+   *
+   * This function frees the ISAC instance created at the beginning.
+   *
+   * Input:
+   *      - ISAC_main_inst    : a ISAC instance.
+   *
+   * Return value             :  0 - Ok
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_Free(ISACFIX_MainStruct *ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_EncoderInit(...)
+   *
+   * This function initializes an ISAC instance prior to the encoder calls.
+   *
+   * Input:
+   *     - ISAC_main_inst     : ISAC instance.
+   *     - CodingMode         : 0 - Bit rate and frame length are automatically
+   *                                adjusted to available bandwidth on
+   *                                transmission channel.
+   *                            1 - User sets a frame length and a target bit
+   *                                rate which is taken as the maximum short-term
+   *                                average bit rate.
+   *
+   * Return value             :  0 - Ok
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
+                                    int16_t  CodingMode);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_Encode(...)
+   *
+   * This function encodes 10ms frame(s) and inserts it into a package.
+   * Input speech length has to be 160 samples (10ms). The encoder buffers those
+   * 10ms frames until it reaches the chosen Framesize (480 or 960 samples
+   * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - speechIn          : input speech vector.
+   *
+   * Output:
+   *      - encoded           : the encoded data vector
+   *
+   * Return value             : >0 - Length (in bytes) of coded data
+   *                             0 - The buffer didn't reach the chosen framesize
+   *                                 so it keeps buffering speech samples.
+   *                            -1 - Error
+   */
+
+  int WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
+                           const int16_t *speechIn,
+                           uint8_t* encoded);
+
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_EncodeNb(...)
+   *
+   * This function encodes 10ms narrow band (8 kHz sampling) frame(s) and inserts
+   * it into a package. Input speech length has to be 80 samples (10ms). The encoder
+   * interpolates into wide-band (16 kHz sampling) buffers those
+   * 10ms frames until it reaches the chosen Framesize (480 or 960 wide-band samples
+   * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+   *
+   * The function is enabled if WEBRTC_ISAC_FIX_NB_CALLS_ENABLED is defined
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - speechIn          : input speech vector.
+   *
+   * Output:
+   *      - encoded           : the encoded data vector
+   *
+   * Return value             : >0 - Length (in bytes) of coded data
+   *                             0 - The buffer didn't reach the chosen framesize
+   *                                 so it keeps buffering speech samples.
+   *                            -1 - Error
+   */
+
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  int16_t WebRtcIsacfix_EncodeNb(ISACFIX_MainStruct *ISAC_main_inst,
+                                 const int16_t *speechIn,
+                                 int16_t *encoded);
+#endif //  WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_DecoderInit(...)
+   *
+   * This function initializes an ISAC instance prior to the decoder calls.
+   *
+   * Input:
+   *  - ISAC_main_inst : ISAC instance.
+   */
+
+  void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct* ISAC_main_inst);
+
+  /****************************************************************************
+   * WebRtcIsacfix_UpdateBwEstimate1(...)
+   *
+   * This function updates the estimate of the bandwidth.
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - encoded           : encoded ISAC frame(s).
+   *      - packet_size       : size of the packet in bytes.
+   *      - rtp_seq_number    : the RTP number of the packet.
+   *      - arr_ts            : the arrival time of the packet (from NetEq)
+   *                            in samples.
+   *
+   * Return value             : 0 - Ok
+   *                           -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
+                                          const uint8_t* encoded,
+                                          size_t packet_size,
+                                          uint16_t rtp_seq_number,
+                                          uint32_t arr_ts);
+
+  /****************************************************************************
+   * WebRtcIsacfix_UpdateBwEstimate(...)
+   *
+   * This function updates the estimate of the bandwidth.
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - encoded           : encoded ISAC frame(s).
+   *      - packet_size       : size of the packet in bytes.
+   *      - rtp_seq_number    : the RTP number of the packet.
+   *      - send_ts           : the send time of the packet from RTP header,
+   *                            in samples.
+   *      - arr_ts            : the arrival time of the packet (from NetEq)
+   *                            in samples.
+   *
+   * Return value             :  0 - Ok
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
+                                         const uint8_t* encoded,
+                                         size_t packet_size,
+                                         uint16_t rtp_seq_number,
+                                         uint32_t send_ts,
+                                         uint32_t arr_ts);
+
+  /****************************************************************************
+   * WebRtcIsacfix_Decode(...)
+   *
+   * This function decodes an ISAC frame. Output speech length
+   * will be a multiple of 480 samples: 480 or 960 samples,
+   * depending on the framesize (30 or 60 ms).
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - encoded           : encoded ISAC frame(s)
+   *      - len               : bytes in encoded vector
+   *
+   * Output:
+   *      - decoded           : The decoded vector
+   *
+   * Return value             : >0 - number of samples in decoded vector
+   *                            -1 - Error
+   */
+
+  int WebRtcIsacfix_Decode(ISACFIX_MainStruct *ISAC_main_inst,
+                           const uint8_t* encoded,
+                           size_t len,
+                           int16_t *decoded,
+                           int16_t *speechType);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_DecodeNb(...)
+   *
+   * This function decodes a ISAC frame in narrow-band (8 kHz sampling).
+   * Output speech length will be a multiple of 240 samples: 240 or 480 samples,
+   * depending on the framesize (30 or 60 ms).
+   *
+   * The function is enabled if WEBRTC_ISAC_FIX_NB_CALLS_ENABLED is defined
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - encoded           : encoded ISAC frame(s)
+   *      - len               : bytes in encoded vector
+   *
+   * Output:
+   *      - decoded           : The decoded vector
+   *
+   * Return value             : >0 - number of samples in decoded vector
+   *                            -1 - Error
+   */
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
+                             const uint16_t *encoded,
+                             size_t len,
+                             int16_t *decoded,
+                             int16_t *speechType);
+#endif //  WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_DecodePlcNb(...)
+   *
+   * This function conducts PLC for ISAC frame(s) in narrow-band (8kHz sampling).
+   * Output speech length  will be "240*noOfLostFrames" samples
+   * that equevalent of "30*noOfLostFrames" millisecond.
+   *
+   * The function is enabled if WEBRTC_ISAC_FIX_NB_CALLS_ENABLED is defined
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - noOfLostFrames    : Number of PLC frames (240 sample=30ms) to produce
+   *                            NOTE! Maximum number is 2 (480 samples = 60ms)
+   *
+   * Output:
+   *      - decoded           : The decoded vector
+   *
+   * Return value             : Number of samples in decoded PLC vector
+   */
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
+                                   int16_t *decoded,
+                                   size_t noOfLostFrames);
+#endif // WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+
+
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_DecodePlc(...)
+   *
+   * This function conducts PLC for ISAC frame(s) in wide-band (16kHz sampling).
+   * Output speech length  will be "480*noOfLostFrames" samples
+   * that is equevalent of "30*noOfLostFrames" millisecond.
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - noOfLostFrames    : Number of PLC frames (480sample = 30ms)
+   *                            to produce
+   *                            NOTE! Maximum number is 2 (960 samples = 60ms)
+   *
+   * Output:
+   *      - decoded           : The decoded vector
+   *
+   * Return value             : Number of samples in decoded PLC vector
+   */
+
+  size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
+                                 int16_t *decoded,
+                                 size_t noOfLostFrames );
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_ReadFrameLen(...)
+   *
+   * This function returns the length of the frame represented in the packet.
+   *
+   * Input:
+   *      - encoded           : Encoded bitstream
+   *      - encoded_len_bytes : Length of the bitstream in bytes.
+   *
+   * Output:
+   *      - frameLength       : Length of frame in packet (in samples)
+   *
+   */
+
+  int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
+                                     size_t encoded_len_bytes,
+                                     size_t* frameLength);
+
+  /****************************************************************************
+   * WebRtcIsacfix_Control(...)
+   *
+   * This function sets the limit on the short-term average bit rate and the
+   * frame length. Should be used only in Instantaneous mode.
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - rate              : limit on the short-term average bit rate,
+   *                            in bits/second (between 10000 and 32000)
+   *      - framesize         : number of milliseconds per frame (30 or 60)
+   *
+   * Return value             : 0  - ok
+   *                           -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_Control(ISACFIX_MainStruct *ISAC_main_inst,
+                                int16_t rate,
+                                int framesize);
+
+  void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
+                                             int bottleneck_bits_per_second);
+
+  /****************************************************************************
+   * WebRtcIsacfix_ControlBwe(...)
+   *
+   * This function sets the initial values of bottleneck and frame-size if
+   * iSAC is used in channel-adaptive mode. Through this API, users can
+   * enforce a frame-size for all values of bottleneck. Then iSAC will not
+   * automatically change the frame-size.
+   *
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - rateBPS           : initial value of bottleneck in bits/second
+   *                            10000 <= rateBPS <= 32000 is accepted
+   *      - frameSizeMs       : number of milliseconds per frame (30 or 60)
+   *      - enforceFrameSize  : 1 to enforce the given frame-size through out
+   *                            the adaptation process, 0 to let iSAC change
+   *                            the frame-size if required.
+   *
+   * Return value             : 0  - ok
+   *                           -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_ControlBwe(ISACFIX_MainStruct *ISAC_main_inst,
+                                   int16_t rateBPS,
+                                   int frameSizeMs,
+                                   int16_t enforceFrameSize);
+
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_version(...)
+   *
+   * This function returns the version number.
+   *
+   * Output:
+   *      - version      : Pointer to character string
+   *
+   */
+
+  void WebRtcIsacfix_version(char *version);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_GetErrorCode(...)
+   *
+   * This function can be used to check the error code of an iSAC instance. When
+   * a function returns -1 a error code will be set for that instance. The
+   * function below extract the code of the last error that occured in the
+   * specified instance.
+   *
+   * Input:
+   *  - ISAC_main_inst        : ISAC instance
+   *
+   * Return value             : Error code
+   */
+
+  int16_t WebRtcIsacfix_GetErrorCode(ISACFIX_MainStruct *ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_GetUplinkBw(...)
+   *
+   * This function return iSAC send bitrate
+   *
+   * Input:
+   *      - ISAC_main_inst    : iSAC instance
+   *
+   * Return value             : <0 Error code
+   *                            else bitrate
+   */
+
+  int32_t WebRtcIsacfix_GetUplinkBw(ISACFIX_MainStruct *ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_SetMaxPayloadSize(...)
+   *
+   * This function sets a limit for the maximum payload size of iSAC. The same
+   * value is used both for 30 and 60 msec packets.
+   * The absolute max will be valid until next time the function is called.
+   * NOTE! This function may override the function WebRtcIsacfix_SetMaxRate()
+   *
+   * Input:
+   *      - ISAC_main_inst    : iSAC instance
+   *      - maxPayloadBytes   : maximum size of the payload in bytes
+   *                            valid values are between 100 and 400 bytes
+   *
+   *
+   * Return value             : 0 if sucessful
+   *                           -1 if error happens
+   */
+
+  int16_t WebRtcIsacfix_SetMaxPayloadSize(ISACFIX_MainStruct *ISAC_main_inst,
+                                          int16_t maxPayloadBytes);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_SetMaxRate(...)
+   *
+   * This function sets the maximum rate which the codec may not exceed for a
+   * singel packet. The maximum rate is set in bits per second.
+   * The codec has an absolute maximum rate of 53400 bits per second (200 bytes
+   * per 30 msec).
+   * It is possible to set a maximum rate between 32000 and 53400 bits per second.
+   *
+   * The rate limit is valid until next time the function is called.
+   *
+   * NOTE! Packet size will never go above the value set if calling
+   * WebRtcIsacfix_SetMaxPayloadSize() (default max packet size is 400 bytes).
+   *
+   * Input:
+   *      - ISAC_main_inst    : iSAC instance
+   *      - maxRateInBytes    : maximum rate in bits per second,
+   *                            valid values are 32000 to 53400 bits
+   *
+   * Return value             : 0 if sucessful
+   *                           -1 if error happens
+   */
+
+  int16_t WebRtcIsacfix_SetMaxRate(ISACFIX_MainStruct *ISAC_main_inst,
+                                   int32_t maxRate);
+
+  /****************************************************************************
+   * WebRtcIsacfix_CreateInternal(...)
+   *
+   * This function creates the memory that is used to store data in the encoder
+   *
+   * Input:
+   *      - *ISAC_main_inst   : a pointer to the coder instance.
+   *
+   * Return value             : 0 - Ok
+   *                           -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_CreateInternal(ISACFIX_MainStruct *ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_FreeInternal(...)
+   *
+   * This function frees the internal memory for storing encoder data.
+   *
+   * Input:
+   *      - ISAC_main_inst        : an ISAC instance.
+   *
+   * Return value                 :  0 - Ok
+   *                                -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_FreeInternal(ISACFIX_MainStruct *ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_GetNewBitStream(...)
+   *
+   * This function returns encoded data, with the recieved bwe-index in the
+   * stream. It should always return a complete packet, i.e. only called once
+   * even for 60 msec frames
+   *
+   * Input:
+   *      - ISAC_main_inst    : ISAC instance.
+   *      - bweIndex          : index of bandwidth estimate to put in new bitstream
+   *      - scale             : factor for rate change (0.4 ~=> half the rate, 1 no change).
+   *
+   * Output:
+   *      - encoded           : the encoded data vector
+   *
+   * Return value             : >0 - Length (in bytes) of coded data
+   *                            -1 - Error
+   */
+
+  int16_t WebRtcIsacfix_GetNewBitStream(ISACFIX_MainStruct *ISAC_main_inst,
+                                        int16_t          bweIndex,
+                                        float              scale,
+                                        uint8_t* encoded);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_GetDownLinkBwIndex(...)
+   *
+   * This function returns index representing the Bandwidth estimate from
+   * other side to this side.
+   *
+   * Input:
+   *      - ISAC_main_inst    : iSAC struct
+   *
+   * Output:
+   *      - rateIndex         : Bandwidth estimate to transmit to other side.
+   *
+   */
+
+  int16_t WebRtcIsacfix_GetDownLinkBwIndex(ISACFIX_MainStruct* ISAC_main_inst,
+                                           int16_t*     rateIndex);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_UpdateUplinkBw(...)
+   *
+   * This function takes an index representing the Bandwidth estimate from
+   * this side to other side and updates BWE.
+   *
+   * Input:
+   *      - ISAC_main_inst    : iSAC struct
+   *      - rateIndex         : Bandwidth estimate from other side.
+   *
+   */
+
+  int16_t WebRtcIsacfix_UpdateUplinkBw(ISACFIX_MainStruct* ISAC_main_inst,
+                                       int16_t     rateIndex);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_ReadBwIndex(...)
+   *
+   * This function returns the index of the Bandwidth estimate from the bitstream.
+   *
+   * Input:
+   *      - encoded           : Encoded bitstream
+   *      - encoded_len_bytes : Length of the bitstream in bytes.
+   *
+   * Output:
+   *      - rateIndex         : Bandwidth estimate in bitstream
+   *
+   */
+
+  int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
+                                    size_t encoded_len_bytes,
+                                    int16_t* rateIndex);
+
+
+  /****************************************************************************
+   * WebRtcIsacfix_GetNewFrameLen(...)
+   *
+   * This function return the next frame length (in samples) of iSAC.
+   *
+   * Input:
+   *      -ISAC_main_inst     : iSAC instance
+   *
+   * Return value             : frame lenght in samples
+   */
+
+  int16_t WebRtcIsacfix_GetNewFrameLen(ISACFIX_MainStruct *ISAC_main_inst);
+
+  /* Fills in an IsacBandwidthInfo struct. */
+  void WebRtcIsacfix_GetBandwidthInfo(ISACFIX_MainStruct* ISAC_main_inst,
+                                      IsacBandwidthInfo* bwinfo);
+
+  /* Uses the values from an IsacBandwidthInfo struct. */
+  void WebRtcIsacfix_SetBandwidthInfo(ISACFIX_MainStruct* ISAC_main_inst,
+                                      const IsacBandwidthInfo* bwinfo);
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INCLUDE_ISACFIX_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/arith_routines.c b/modules/audio_coding/codecs/isac/fix/source/arith_routines.c
new file mode 100644
index 0000000..eaeef50
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/arith_routines.c
@@ -0,0 +1,122 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routins.c
+ *
+ * This C file contains a function for finalizing the bitstream
+ * after arithmetic coding.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncTerminate(...)
+ *
+ * Final call to the arithmetic coder for an encoder call. This function
+ * terminates and return byte stream.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *
+ * Return value             : number of bytes in the stream
+ */
+int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc *streamData)
+{
+  uint16_t *streamPtr;
+  uint16_t negCarry;
+
+  /* point to the right place in the stream buffer */
+  streamPtr = streamData->stream + streamData->stream_index;
+
+  /* find minimum length (determined by current interval width) */
+  if ( streamData->W_upper > 0x01FFFFFF )
+  {
+    streamData->streamval += 0x01000000;
+
+    /* if result is less than the added value we must take care of the carry */
+    if (streamData->streamval < 0x01000000)
+    {
+      /* propagate carry */
+      if (streamData->full == 0) {
+        /* Add value to current value */
+        negCarry = *streamPtr;
+        negCarry += 0x0100;
+        *streamPtr = negCarry;
+
+        /* if value is too big, propagate carry to next byte, and so on */
+        while (!(negCarry))
+        {
+          negCarry = *--streamPtr;
+          negCarry++;
+          *streamPtr = negCarry;
+        }
+      } else {
+        /* propagate carry by adding one to the previous byte in the
+         * stream if that byte is 0xFFFF we need to propagate the carry
+         * furhter back in the stream */
+        while ( !(++(*--streamPtr)) );
+      }
+
+      /* put pointer back to the old value */
+      streamPtr = streamData->stream + streamData->stream_index;
+    }
+    /* write remaining data to bitstream, if "full == 0" first byte has data */
+    if (streamData->full == 0) {
+      *streamPtr++ += (uint16_t)(streamData->streamval >> 24);
+      streamData->full = 1;
+    } else {
+      *streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
+      streamData->full = 0;
+    }
+  }
+  else
+  {
+    streamData->streamval += 0x00010000;
+
+    /* if result is less than the added value we must take care of the carry */
+    if (streamData->streamval < 0x00010000)
+    {
+      /* propagate carry */
+      if (streamData->full == 0) {
+        /* Add value to current value */
+        negCarry = *streamPtr;
+        negCarry += 0x0100;
+        *streamPtr = negCarry;
+
+        /* if value to big, propagate carry to next byte, and so on */
+        while (!(negCarry))
+        {
+          negCarry = *--streamPtr;
+          negCarry++;
+          *streamPtr = negCarry;
+        }
+      } else {
+        /* Add carry to previous byte */
+        while ( !(++(*--streamPtr)) );
+      }
+
+      /* put pointer back to the old value */
+      streamPtr = streamData->stream + streamData->stream_index;
+    }
+    /* write remaining data (2 bytes) to bitstream */
+    if (streamData->full) {
+      *streamPtr++ = (uint16_t)(streamData->streamval >> 16);
+    } else {
+      *streamPtr++ |= (uint16_t)(streamData->streamval >> 24);
+      *streamPtr = (uint16_t)(streamData->streamval >> 8) & 0xFF00;
+    }
+  }
+
+  /* calculate stream length in bytes */
+  return (((streamPtr - streamData->stream)<<1) + !(streamData->full));
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c b/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c
new file mode 100644
index 0000000..cad3056
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/arith_routines_hist.c
@@ -0,0 +1,401 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routinshist.c
+ *
+ * This C file contains arithmetic encoding and decoding.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncHistMulti(...)
+ *
+ * Encode the histogram interval
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - data              : data vector
+ *      - cdf               : array of cdf arrays
+ *      - lenData           : data vector length
+ *
+ * Return value             : 0 if ok
+ *                            <0 if error detected
+ */
+int WebRtcIsacfix_EncHistMulti(Bitstr_enc *streamData,
+                               const int16_t *data,
+                               const uint16_t *const *cdf,
+                               const int16_t lenData)
+{
+  uint32_t W_lower;
+  uint32_t W_upper;
+  uint32_t W_upper_LSB;
+  uint32_t W_upper_MSB;
+  uint16_t *streamPtr;
+  uint16_t negCarry;
+  uint16_t *maxStreamPtr;
+  uint16_t *streamPtrCarry;
+  uint32_t cdfLo;
+  uint32_t cdfHi;
+  int k;
+
+
+  /* point to beginning of stream buffer
+   * and set maximum streamPtr value */
+  streamPtr = streamData->stream + streamData->stream_index;
+  maxStreamPtr = streamData->stream + STREAM_MAXW16_60MS - 1;
+
+  W_upper = streamData->W_upper;
+
+  for (k = lenData; k > 0; k--)
+  {
+    /* fetch cdf_lower and cdf_upper from cdf tables */
+    cdfLo = (uint32_t) *(*cdf + (uint32_t)*data);
+    cdfHi = (uint32_t) *(*cdf++ + (uint32_t)*data++ + 1);
+
+    /* update interval */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+    W_lower = WEBRTC_SPL_UMUL(W_upper_MSB, cdfLo);
+    W_lower += ((W_upper_LSB * cdfLo) >> 16);
+    W_upper = WEBRTC_SPL_UMUL(W_upper_MSB, cdfHi);
+    W_upper += ((W_upper_LSB * cdfHi) >> 16);
+
+    /* shift interval such that it begins at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamData->streamval += W_lower;
+
+    /* handle carry */
+    if (streamData->streamval < W_lower)
+    {
+      /* propagate carry */
+      streamPtrCarry = streamPtr;
+      if (streamData->full == 0) {
+        negCarry = *streamPtrCarry;
+        negCarry += 0x0100;
+        *streamPtrCarry = negCarry;
+        while (!(negCarry))
+        {
+          negCarry = *--streamPtrCarry;
+          negCarry++;
+          *streamPtrCarry = negCarry;
+        }
+      } else {
+        while ( !(++(*--streamPtrCarry)) );
+      }
+    }
+
+    /* renormalize interval, store most significant byte of streamval and update streamval
+     * W_upper < 2^24 */
+    while ( !(W_upper & 0xFF000000) )
+    {
+      W_upper <<= 8;
+      if (streamData->full == 0) {
+        *streamPtr++ += (uint16_t)(streamData->streamval >> 24);
+        streamData->full = 1;
+      } else {
+        *streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
+        streamData->full = 0;
+      }
+
+      if( streamPtr > maxStreamPtr ) {
+        return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
+      }
+      streamData->streamval <<= 8;
+    }
+  }
+
+  /* calculate new stream_index */
+  streamData->stream_index = streamPtr - streamData->stream;
+  streamData->W_upper = W_upper;
+
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistBisectMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, using
+ * method of bisection cdf tables should be of size 2^k-1 (which corresponds
+ * to an alphabet size of 2^k-2)
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - cdf               : array of cdf arrays
+ *      - cdfSize           : array of cdf table sizes+1 (power of two: 2^k)
+ *      - lenData           : data vector length
+ *
+ * Output:
+ *      - data              : data vector
+ *
+ * Return value             : number of bytes in the stream
+ *                            <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistBisectMulti(int16_t *data,
+                                         Bitstr_dec *streamData,
+                                         const uint16_t *const *cdf,
+                                         const uint16_t *cdfSize,
+                                         const int16_t lenData)
+{
+  uint32_t    W_lower = 0;
+  uint32_t    W_upper;
+  uint32_t    W_tmp;
+  uint32_t    W_upper_LSB;
+  uint32_t    W_upper_MSB;
+  uint32_t    streamval;
+  const uint16_t *streamPtr;
+  const uint16_t *cdfPtr;
+  int16_t     sizeTmp;
+  int             k;
+
+
+  streamPtr = streamData->stream + streamData->stream_index;
+  W_upper = streamData->W_upper;
+
+  /* Error check: should not be possible in normal operation */
+  if (W_upper == 0) {
+    return -2;
+  }
+
+  /* first time decoder is called for this stream */
+  if (streamData->stream_index == 0)
+  {
+    /* read first word from bytestream */
+    streamval = (uint32_t)*streamPtr++ << 16;
+    streamval |= *streamPtr++;
+  } else {
+    streamval = streamData->streamval;
+  }
+
+  for (k = lenData; k > 0; k--)
+  {
+    /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+
+    /* start halfway the cdf range */
+    sizeTmp = *cdfSize++ / 2;
+    cdfPtr = *cdf + (sizeTmp - 1);
+
+    /* method of bisection */
+    for ( ;; )
+    {
+      W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
+      W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+      sizeTmp /= 2;
+      if (sizeTmp == 0) {
+        break;
+      }
+
+      if (streamval > W_tmp)
+      {
+        W_lower = W_tmp;
+        cdfPtr += sizeTmp;
+      } else {
+        W_upper = W_tmp;
+        cdfPtr -= sizeTmp;
+      }
+    }
+    if (streamval > W_tmp)
+    {
+      W_lower = W_tmp;
+      *data++ = cdfPtr - *cdf++;
+    } else {
+      W_upper = W_tmp;
+      *data++ = cdfPtr - *cdf++ - 1;
+    }
+
+    /* shift interval to start at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamval -= W_lower;
+
+    /* renormalize interval and update streamval */
+    /* W_upper < 2^24 */
+    while ( !(W_upper & 0xFF000000) )
+    {
+      /* read next byte from stream */
+      if (streamData->full == 0) {
+        streamval = (streamval << 8) | (*streamPtr++ & 0x00FF);
+        streamData->full = 1;
+      } else {
+        streamval = (streamval << 8) | (*streamPtr >> 8);
+        streamData->full = 0;
+      }
+      W_upper <<= 8;
+    }
+
+
+    /* Error check: should not be possible in normal operation */
+    if (W_upper == 0) {
+      return -2;
+    }
+
+  }
+
+  streamData->stream_index = streamPtr - streamData->stream;
+  streamData->W_upper = W_upper;
+  streamData->streamval = streamval;
+
+  if ( W_upper > 0x01FFFFFF ) {
+    return (streamData->stream_index*2 - 3 + !streamData->full);
+  } else {
+    return (streamData->stream_index*2 - 2 + !streamData->full);
+  }
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistOneStepMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, taking
+ * single step up or down at a time.
+ * cdf tables can be of arbitrary size, but large tables may take a lot of
+ * iterations.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - cdf               : array of cdf arrays
+ *      - initIndex         : vector of initial cdf table search entries
+ *      - lenData           : data vector length
+ *
+ * Output:
+ *      - data              : data vector
+ *
+ * Return value             : number of bytes in original stream
+ *                            <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistOneStepMulti(int16_t *data,
+                                          Bitstr_dec *streamData,
+                                          const uint16_t *const *cdf,
+                                          const uint16_t *initIndex,
+                                          const int16_t lenData)
+{
+  uint32_t    W_lower;
+  uint32_t    W_upper;
+  uint32_t    W_tmp;
+  uint32_t    W_upper_LSB;
+  uint32_t    W_upper_MSB;
+  uint32_t    streamval;
+  const uint16_t *streamPtr;
+  const uint16_t *cdfPtr;
+  int             k;
+
+
+  streamPtr = streamData->stream + streamData->stream_index;
+  W_upper = streamData->W_upper;
+  /* Error check: Should not be possible in normal operation */
+  if (W_upper == 0) {
+    return -2;
+  }
+
+  /* Check if it is the first time decoder is called for this stream */
+  if (streamData->stream_index == 0)
+  {
+    /* read first word from bytestream */
+    streamval = (uint32_t)(*streamPtr++) << 16;
+    streamval |= *streamPtr++;
+  } else {
+    streamval = streamData->streamval;
+  }
+
+  for (k = lenData; k > 0; k--)
+  {
+    /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
+
+    /* start at the specified table entry */
+    cdfPtr = *cdf + (*initIndex++);
+    W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
+    W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+
+    if (streamval > W_tmp)
+    {
+      for ( ;; )
+      {
+        W_lower = W_tmp;
+
+        /* range check */
+        if (cdfPtr[0] == 65535) {
+          return -3;
+        }
+
+        W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *++cdfPtr);
+        W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+
+        if (streamval <= W_tmp) {
+          break;
+        }
+      }
+      W_upper = W_tmp;
+      *data++ = cdfPtr - *cdf++ - 1;
+    } else {
+      for ( ;; )
+      {
+        W_upper = W_tmp;
+        --cdfPtr;
+
+        /* range check */
+        if (cdfPtr < *cdf) {
+          return -3;
+        }
+
+        W_tmp = WEBRTC_SPL_UMUL_32_16(W_upper_MSB, *cdfPtr);
+        W_tmp += (W_upper_LSB * (*cdfPtr)) >> 16;
+
+        if (streamval > W_tmp) {
+          break;
+        }
+      }
+      W_lower = W_tmp;
+      *data++ = cdfPtr - *cdf++;
+    }
+
+    /* shift interval to start at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamval -= W_lower;
+
+    /* renormalize interval and update streamval */
+    /* W_upper < 2^24 */
+    while ( !(W_upper & 0xFF000000) )
+    {
+      /* read next byte from stream */
+      if (streamData->full == 0) {
+        streamval = (streamval << 8) | (*streamPtr++ & 0x00FF);
+        streamData->full = 1;
+      } else {
+        streamval = (streamval << 8) | (*streamPtr >> 8);
+        streamData->full = 0;
+      }
+      W_upper <<= 8;
+    }
+  }
+
+  streamData->stream_index = streamPtr - streamData->stream;
+  streamData->W_upper = W_upper;
+  streamData->streamval = streamval;
+
+  /* find number of bytes in original stream (determined by current interval width) */
+  if ( W_upper > 0x01FFFFFF ) {
+    return (streamData->stream_index*2 - 3 + !streamData->full);
+  } else {
+    return (streamData->stream_index*2 - 2 + !streamData->full);
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c b/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c
new file mode 100644
index 0000000..8e97960
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/arith_routines_logist.c
@@ -0,0 +1,413 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routinslogist.c
+ *
+ * This C file contains arithmetic encode and decode logistic
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+
+/* Tables for piecewise linear cdf functions: y = k*x */
+
+/* x Points for function piecewise() in Q15 */
+static const int32_t kHistEdges[51] = {
+  -327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716,
+  -196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858,  -91751,  -78644,
+  -65536,  -52429,  -39322,  -26215,  -13108,       0,   13107,   26214,   39321,   52428,
+  65536,   78643,   91750,  104857,  117964,  131072,  144179,  157286,  170393,  183500,
+  196608,  209715,  222822,  235929,  249036,  262144,  275251,  288358,  301465,  314572,
+  327680
+};
+
+
+/* k Points for function piecewise() in Q0 */
+static const uint16_t kCdfSlope[51] = {
+  5,    5,     5,     5,     5,     5,     5,     5,    5,    5,
+  5,    5,    13,    23,    47,    87,   154,   315,  700, 1088,
+  2471, 6064, 14221, 21463, 36634, 36924, 19750, 13270, 5806, 2312,
+  1095,  660,   316,   145,    86,    41,    32,     5,    5,    5,
+  5,    5,     5,     5,     5,     5,     5,     5,    5,    2,
+  0
+};
+
+/* y Points for function piecewise() in Q0 */
+static const uint16_t kCdfLogistic[51] = {
+  0,     2,     4,     6,     8,    10,    12,    14,    16,    18,
+  20,    22,    24,    29,    38,    57,    92,   153,   279,   559,
+  994,  1983,  4408, 10097, 18682, 33336, 48105, 56005, 61313, 63636,
+  64560, 64998, 65262, 65389, 65447, 65481, 65497, 65510, 65512, 65514,
+  65516, 65518, 65520, 65522, 65524, 65526, 65528, 65530, 65532, 65534,
+  65535
+};
+
+
+/****************************************************************************
+ * WebRtcIsacfix_Piecewise(...)
+ *
+ * Piecewise linear function
+ *
+ * Input:
+ *      - xinQ15           : input value x in Q15
+ *
+ * Return value            : korresponding y-value in Q0
+ */
+
+
+static __inline uint16_t WebRtcIsacfix_Piecewise(int32_t xinQ15) {
+  int32_t ind;
+  int32_t qtmp1;
+  uint16_t qtmp2;
+
+  /* Find index for x-value */
+  qtmp1 = WEBRTC_SPL_SAT(kHistEdges[50],xinQ15,kHistEdges[0]);
+  ind = WEBRTC_SPL_MUL(5, qtmp1 - kHistEdges[0]);
+  ind >>= 16;
+
+  /* Calculate corresponding y-value ans return*/
+  qtmp1 = qtmp1 - kHistEdges[ind];
+  qtmp2 = (uint16_t)WEBRTC_SPL_RSHIFT_U32(
+      WEBRTC_SPL_UMUL_32_16(qtmp1,kCdfSlope[ind]), 15);
+  return (kCdfLogistic[ind] + qtmp2);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_EncLogisticMulti2(...)
+ *
+ * Arithmetic coding of spectrum.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - dataQ7            : data vector in Q7
+ *      - envQ8             : side info vector defining the width of the pdf
+ *                            in Q8
+ *      - lenData           : data vector length
+ *
+ * Return value             :  0 if ok,
+ *                            <0 otherwise.
+ */
+int WebRtcIsacfix_EncLogisticMulti2(Bitstr_enc *streamData,
+                                   int16_t *dataQ7,
+                                   const uint16_t *envQ8,
+                                   const int16_t lenData)
+{
+  uint32_t W_lower;
+  uint32_t W_upper;
+  uint16_t W_upper_LSB;
+  uint16_t W_upper_MSB;
+  uint16_t *streamPtr;
+  uint16_t *maxStreamPtr;
+  uint16_t *streamPtrCarry;
+  uint16_t negcarry;
+  uint32_t cdfLo;
+  uint32_t cdfHi;
+  int k;
+
+  /* point to beginning of stream buffer
+   * and set maximum streamPtr value */
+  streamPtr = streamData->stream + streamData->stream_index;
+  maxStreamPtr = streamData->stream + STREAM_MAXW16_60MS - 1;
+  W_upper = streamData->W_upper;
+
+  for (k = 0; k < lenData; k++)
+  {
+    /* compute cdf_lower and cdf_upper by evaluating the
+     * WebRtcIsacfix_Piecewise linear cdf */
+    cdfLo = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(*dataQ7 - 64, *envQ8));
+    cdfHi = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(*dataQ7 + 64, *envQ8));
+
+    /* test and clip if probability gets too small */
+    while ((cdfLo + 1) >= cdfHi) {
+      /* clip */
+      if (*dataQ7 > 0) {
+        *dataQ7 -= 128;
+        cdfHi = cdfLo;
+        cdfLo = WebRtcIsacfix_Piecewise(
+            WEBRTC_SPL_MUL_16_U16(*dataQ7 - 64, *envQ8));
+      } else {
+        *dataQ7 += 128;
+        cdfLo = cdfHi;
+        cdfHi = WebRtcIsacfix_Piecewise(
+            WEBRTC_SPL_MUL_16_U16(*dataQ7 + 64, *envQ8));
+      }
+    }
+
+    dataQ7++;
+    /* increment only once per 4 iterations */
+    envQ8 += (k & 1) & (k >> 1);
+
+
+    /* update interval */
+    W_upper_LSB = (uint16_t)W_upper;
+    W_upper_MSB = (uint16_t)WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
+    W_lower = WEBRTC_SPL_UMUL_32_16(cdfLo, W_upper_MSB);
+    W_lower += (cdfLo * W_upper_LSB) >> 16;
+    W_upper = WEBRTC_SPL_UMUL_32_16(cdfHi, W_upper_MSB);
+    W_upper += (cdfHi * W_upper_LSB) >> 16;
+
+    /* shift interval such that it begins at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamData->streamval += W_lower;
+
+    /* handle carry */
+    if (streamData->streamval < W_lower)
+    {
+      /* propagate carry */
+      streamPtrCarry = streamPtr;
+      if (streamData->full == 0) {
+        negcarry = *streamPtrCarry;
+        negcarry += 0x0100;
+        *streamPtrCarry = negcarry;
+        while (!(negcarry))
+        {
+          negcarry = *--streamPtrCarry;
+          negcarry++;
+          *streamPtrCarry = negcarry;
+        }
+      } else {
+        while (!(++(*--streamPtrCarry)));
+      }
+    }
+
+    /* renormalize interval, store most significant byte of streamval and update streamval
+     * W_upper < 2^24 */
+    while ( !(W_upper & 0xFF000000) )
+    {
+      W_upper <<= 8;
+      if (streamData->full == 0) {
+        *streamPtr++ += (uint16_t) WEBRTC_SPL_RSHIFT_U32(
+            streamData->streamval, 24);
+        streamData->full = 1;
+      } else {
+        *streamPtr = (uint16_t)((streamData->streamval >> 24) << 8);
+        streamData->full = 0;
+      }
+
+      if( streamPtr > maxStreamPtr )
+        return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
+
+      streamData->streamval <<= 8;
+    }
+  }
+
+  /* calculate new stream_index */
+  streamData->stream_index = streamPtr - streamData->stream;
+  streamData->W_upper = W_upper;
+
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecLogisticMulti2(...)
+ *
+ * Arithmetic decoding of spectrum.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - envQ8             : side info vector defining the width of the pdf
+ *                            in Q8
+ *      - lenData           : data vector length
+ *
+ * Input/Output:
+ *      - dataQ7            : input: dither vector, output: data vector
+ *
+ * Return value             : number of bytes in the stream so far
+ *                            -1 if error detected
+ */
+int WebRtcIsacfix_DecLogisticMulti2(int16_t *dataQ7,
+                                    Bitstr_dec *streamData,
+                                    const int32_t *envQ8,
+                                    const int16_t lenData)
+{
+  uint32_t    W_lower;
+  uint32_t    W_upper;
+  uint32_t    W_tmp;
+  uint16_t    W_upper_LSB;
+  uint16_t    W_upper_MSB;
+  uint32_t    streamVal;
+  uint16_t    cdfTmp;
+  int32_t     res;
+  int32_t     inSqrt;
+  int32_t     newRes;
+  const uint16_t *streamPtr;
+  int16_t     candQ7;
+  int16_t     envCount;
+  uint16_t    tmpARSpecQ8 = 0;
+  int             k, i;
+  int offset = 0;
+
+  /* point to beginning of stream buffer */
+  streamPtr = streamData->stream + streamData->stream_index;
+  W_upper = streamData->W_upper;
+
+  /* Check if it is first time decoder is called for this stream */
+  if (streamData->stream_index == 0)
+  {
+    /* read first word from bytestream */
+    streamVal = (uint32_t)(*streamPtr++) << 16;
+    streamVal |= *streamPtr++;
+
+  } else {
+    streamVal = streamData->streamval;
+  }
+
+
+  res = 1 << (WebRtcSpl_GetSizeInBits(envQ8[0]) >> 1);
+  envCount = 0;
+
+  /* code assumes lenData%4 == 0 */
+  for (k = 0; k < lenData; k += 4)
+  {
+    int k4;
+
+    /* convert to magnitude spectrum, by doing square-roots (modified from SPLIB) */
+    inSqrt = envQ8[envCount];
+    i = 10;
+
+    /* For safty reasons */
+    if (inSqrt < 0)
+      inSqrt=-inSqrt;
+
+    newRes = (inSqrt / res + res) >> 1;
+    do
+    {
+      res = newRes;
+      newRes = (inSqrt / res + res) >> 1;
+    } while (newRes != res && i-- > 0);
+
+    tmpARSpecQ8 = (uint16_t)newRes;
+
+    for(k4 = 0; k4 < 4; k4++)
+    {
+      /* find the integer *data for which streamVal lies in [W_lower+1, W_upper] */
+      W_upper_LSB = (uint16_t) (W_upper & 0x0000FFFF);
+      W_upper_MSB = (uint16_t) WEBRTC_SPL_RSHIFT_U32(W_upper, 16);
+
+      /* find first candidate by inverting the logistic cdf
+       * Input dither value collected from io-stream */
+      candQ7 = - *dataQ7 + 64;
+      cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+      W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+      W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+      if (streamVal > W_tmp)
+      {
+        W_lower = W_tmp;
+        candQ7 += 128;
+        cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+        W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+        W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+        while (streamVal > W_tmp)
+        {
+          W_lower = W_tmp;
+          candQ7 += 128;
+          cdfTmp = WebRtcIsacfix_Piecewise(
+              WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+          W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+          W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+          /* error check */
+          if (W_lower == W_tmp) {
+            return -1;
+          }
+        }
+        W_upper = W_tmp;
+
+        /* Output value put in dataQ7: another sample decoded */
+        *dataQ7 = candQ7 - 64;
+      }
+      else
+      {
+        W_upper = W_tmp;
+        candQ7 -= 128;
+        cdfTmp = WebRtcIsacfix_Piecewise(WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+        W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+        W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+        while ( !(streamVal > W_tmp) )
+        {
+          W_upper = W_tmp;
+          candQ7 -= 128;
+          cdfTmp = WebRtcIsacfix_Piecewise(
+              WEBRTC_SPL_MUL_16_U16(candQ7, tmpARSpecQ8));
+
+          W_tmp = (uint32_t)cdfTmp * W_upper_MSB;
+          W_tmp += ((uint32_t)cdfTmp * (uint32_t)W_upper_LSB) >> 16;
+
+          /* error check */
+          if (W_upper == W_tmp){
+            return -1;
+          }
+        }
+        W_lower = W_tmp;
+
+        /* Output value put in dataQ7: another sample decoded */
+        *dataQ7 = candQ7 + 64;
+      }
+
+      dataQ7++;
+
+      /* shift interval to start at zero */
+      W_upper -= ++W_lower;
+
+      /* add integer to bitstream */
+      streamVal -= W_lower;
+
+      /* renormalize interval and update streamVal
+       * W_upper < 2^24 */
+      while ( !(W_upper & 0xFF000000) )
+      {
+        if (streamPtr < streamData->stream + streamData->stream_size) {
+          /* read next byte from stream */
+          if (streamData->full == 0) {
+            streamVal = (streamVal << 8) | (*streamPtr++ & 0x00FF);
+            streamData->full = 1;
+          } else {
+            streamVal = (streamVal << 8) | (*streamPtr >> 8);
+            streamData->full = 0;
+          }
+        } else {
+          /* Intending to read outside the stream. This can happen for the last
+           * two or three bytes. It is how the algorithm is implemented. Do
+           * not read from the bit stream and insert zeros instead. */
+          streamVal <<= 8;
+          if (streamData->full == 0) {
+            offset++;  // We would have incremented the pointer in this case.
+            streamData->full = 1;
+          } else {
+            streamData->full = 0;
+          }
+        }
+        W_upper <<= 8;
+      }
+    }
+    envCount++;
+  }
+
+  streamData->stream_index = streamPtr + offset - streamData->stream;
+  streamData->W_upper = W_upper;
+  streamData->streamval = streamVal;
+
+  /* find number of bytes in original stream (determined by current interval width) */
+  if ( W_upper > 0x01FFFFFF )
+    return (streamData->stream_index*2 - 3 + !streamData->full);
+  else
+    return (streamData->stream_index*2 - 2 + !streamData->full);
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/arith_routins.h b/modules/audio_coding/codecs/isac/fix/source/arith_routins.h
new file mode 100644
index 0000000..25eeecf
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/arith_routins.h
@@ -0,0 +1,159 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routins.h
+ *
+ * Functions for arithmetic coding.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+/****************************************************************************
+ * WebRtcIsacfix_EncLogisticMulti2(...)
+ *
+ * Arithmetic coding of spectrum.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - dataQ7            : data vector in Q7
+ *      - envQ8             : side info vector defining the width of the pdf
+ *                            in Q8
+ *      - lenData           : data vector length
+ *
+ * Return value             :  0 if ok,
+ *                             <0 otherwise.
+ */
+int WebRtcIsacfix_EncLogisticMulti2(
+    Bitstr_enc *streamData,
+    int16_t *dataQ7,
+    const uint16_t *env,
+    const int16_t lenData);
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncTerminate(...)
+ *
+ * Final call to the arithmetic coder for an encoder call. This function
+ * terminates and return byte stream.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *
+ * Return value             : number of bytes in the stream
+ */
+int16_t WebRtcIsacfix_EncTerminate(Bitstr_enc *streamData);
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecLogisticMulti2(...)
+ *
+ * Arithmetic decoding of spectrum.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - envQ8             : side info vector defining the width of the pdf
+ *                            in Q8
+ *      - lenData           : data vector length
+ *
+ * Input/Output:
+ *      - dataQ7            : input: dither vector, output: data vector, in Q7
+ *
+ * Return value             : number of bytes in the stream so far
+ *                            <0 if error detected
+ */
+int WebRtcIsacfix_DecLogisticMulti2(
+    int16_t *data,
+    Bitstr_dec *streamData,
+    const int32_t *env,
+    const int16_t lenData);
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncHistMulti(...)
+ *
+ * Encode the histogram interval
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - data              : data vector
+ *      - cdf               : array of cdf arrays
+ *      - lenData           : data vector length
+ *
+ * Return value             : 0 if ok
+ *                            <0 if error detected
+ */
+int WebRtcIsacfix_EncHistMulti(
+    Bitstr_enc *streamData,
+    const int16_t *data,
+    const uint16_t *const *cdf,
+    const int16_t lenData);
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistBisectMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, using
+ * method of bisection.
+ * C df tables should be of size 2^k-1 (which corresponds to an
+ * alphabet size of 2^k-2)
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - cdf               : array of cdf arrays
+ *      - cdfSize           : array of cdf table sizes+1 (power of two: 2^k)
+ *      - lenData           : data vector length
+ *
+ * Output:
+ *      - data              : data vector
+ *
+ * Return value             : number of bytes in the stream
+ *                            <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistBisectMulti(
+    int16_t *data,
+    Bitstr_dec *streamData,
+    const uint16_t *const *cdf,
+    const uint16_t *cdfSize,
+    const int16_t lenData);
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecHistOneStepMulti(...)
+ *
+ * Function to decode more symbols from the arithmetic bytestream, taking
+ * single step up or down at a time.
+ * cdf tables can be of arbitrary size, but large tables may take a lot of
+ * iterations.
+ *
+ * Input:
+ *      - streamData        : in-/output struct containing bitstream
+ *      - cdf               : array of cdf arrays
+ *      - initIndex         : vector of initial cdf table search entries
+ *      - lenData           : data vector length
+ *
+ * Output:
+ *      - data              : data vector
+ *
+ * Return value             : number of bytes in original stream
+ *                            <0 if error detected
+ */
+int16_t WebRtcIsacfix_DecHistOneStepMulti(
+    int16_t *data,
+    Bitstr_dec *streamData,
+    const uint16_t *const *cdf,
+    const uint16_t *initIndex,
+    const int16_t lenData);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ARITH_ROUTINS_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc b/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc
new file mode 100644
index 0000000..21259ee
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioDecoderIsacT<IsacFix>;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc b/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
new file mode 100644
index 0000000..0190ab9
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioEncoderIsacT<IsacFix>;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c b/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
new file mode 100644
index 0000000..354ae3b
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
@@ -0,0 +1,1037 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * bandwidth_estimator.c
+ *
+ * This file contains the code for the Bandwidth Estimator designed
+ * for iSAC.
+ *
+ * NOTE! Castings needed for C55, do not remove!
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/checks.h"
+
+/* array of quantization levels for bottle neck info; Matlab code: */
+/* sprintf('%4.1ff, ', logspace(log10(5000), log10(40000), 12)) */
+static const int16_t kQRateTable[12] = {
+  10000, 11115, 12355, 13733, 15265, 16967,
+  18860, 20963, 23301, 25900, 28789, 32000
+};
+
+/* 0.1 times the values in the table kQRateTable */
+/* values are in Q16                                         */
+static const int32_t KQRate01[12] = {
+  65536000,  72843264,  80969728,  90000589,  100040704, 111194931,
+  123600896, 137383117, 152705434, 169738240, 188671590, 209715200
+};
+
+/* Bits per Bytes Seconds
+ * 8 bits/byte * 1000 msec/sec * 1/framelength (in msec)->bits/byte*sec
+ * frame length will either be 30 or 60 msec. 8738 is 1/60 in Q19 and 1/30 in Q18
+ * The following number is either in Q15 or Q14 depending on the current frame length */
+static const int32_t kBitsByteSec = 4369000;
+
+/* Received header rate. First value is for 30 ms packets and second for 60 ms */
+static const int16_t kRecHeaderRate[2] = {
+  9333, 4666
+};
+
+/* Inverted minimum and maximum bandwidth in Q30.
+   minBwInv 30 ms, maxBwInv 30 ms,
+   minBwInv 60 ms, maxBwInv 69 ms
+*/
+static const int32_t kInvBandwidth[4] = {
+  55539, 25978,
+  73213, 29284
+};
+
+/* Number of samples in 25 msec */
+static const int32_t kSamplesIn25msec = 400;
+
+
+/****************************************************************************
+ * WebRtcIsacfix_InitBandwidthEstimator(...)
+ *
+ * This function initializes the struct for the bandwidth estimator
+ *
+ * Input/Output:
+ *      - bweStr        : Struct containing bandwidth information.
+ *
+ * Return value            : 0
+ */
+int32_t WebRtcIsacfix_InitBandwidthEstimator(BwEstimatorstr *bweStr)
+{
+  bweStr->prevFrameSizeMs       = INIT_FRAME_LEN;
+  bweStr->prevRtpNumber         = 0;
+  bweStr->prevSendTime          = 0;
+  bweStr->prevArrivalTime       = 0;
+  bweStr->prevRtpRate           = 1;
+  bweStr->lastUpdate            = 0;
+  bweStr->lastReduction         = 0;
+  bweStr->countUpdates          = -9;
+
+  /* INIT_BN_EST = 20000
+   * INIT_BN_EST_Q7 = 2560000
+   * INIT_HDR_RATE = 4666
+   * INIT_REC_BN_EST_Q5 = 789312
+   *
+   * recBwInv = 1/(INIT_BN_EST + INIT_HDR_RATE) in Q30
+   * recBwAvg = INIT_BN_EST + INIT_HDR_RATE in Q5
+   */
+  bweStr->recBwInv              = 43531;
+  bweStr->recBw                 = INIT_BN_EST;
+  bweStr->recBwAvgQ             = INIT_BN_EST_Q7;
+  bweStr->recBwAvg              = INIT_REC_BN_EST_Q5;
+  bweStr->recJitter             = (int32_t) 327680;   /* 10 in Q15 */
+  bweStr->recJitterShortTerm    = 0;
+  bweStr->recJitterShortTermAbs = (int32_t) 40960;    /* 5 in Q13 */
+  bweStr->recMaxDelay           = (int32_t) 10;
+  bweStr->recMaxDelayAvgQ       = (int32_t) 5120;     /* 10 in Q9 */
+  bweStr->recHeaderRate         = INIT_HDR_RATE;
+  bweStr->countRecPkts          = 0;
+  bweStr->sendBwAvg             = INIT_BN_EST_Q7;
+  bweStr->sendMaxDelayAvg       = (int32_t) 5120;     /* 10 in Q9 */
+
+  bweStr->countHighSpeedRec     = 0;
+  bweStr->highSpeedRec          = 0;
+  bweStr->countHighSpeedSent    = 0;
+  bweStr->highSpeedSend         = 0;
+  bweStr->inWaitPeriod          = 0;
+
+  /* Find the inverse of the max bw and min bw in Q30
+   *  (1 / (MAX_ISAC_BW + INIT_HDR_RATE) in Q30
+   *  (1 / (MIN_ISAC_BW + INIT_HDR_RATE) in Q30
+   */
+  bweStr->maxBwInv              = kInvBandwidth[3];
+  bweStr->minBwInv              = kInvBandwidth[2];
+
+  bweStr->external_bw_info.in_use = 0;
+
+  return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBwImpl(...)
+ *
+ * This function updates bottle neck rate received from other side in payload
+ * and calculates a new bottle neck to send to the other side.
+ *
+ * Input/Output:
+ *      - bweStr           : struct containing bandwidth information.
+ *      - rtpNumber        : value from RTP packet, from NetEq
+ *      - frameSize        : length of signal frame in ms, from iSAC decoder
+ *      - sendTime         : value in RTP header giving send time in samples
+ *      - arrivalTime      : value given by timeGetTime() time of arrival in
+ *                           samples of packet from NetEq
+ *      - pksize           : size of packet in bytes, from NetEq
+ *      - Index            : integer (range 0...23) indicating bottle neck &
+ *                           jitter as estimated by other side
+ *
+ * Return value            : 0 if everything went fine,
+ *                           -1 otherwise
+ */
+int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bweStr,
+                                         const uint16_t rtpNumber,
+                                         const int16_t  frameSize,
+                                         const uint32_t sendTime,
+                                         const uint32_t arrivalTime,
+                                         const size_t   pksize,
+                                         const uint16_t Index)
+{
+  uint16_t  weight = 0;
+  uint32_t  currBwInv = 0;
+  uint16_t  recRtpRate;
+  uint32_t  arrTimeProj;
+  int32_t   arrTimeDiff;
+  int32_t   arrTimeNoise;
+  int32_t   arrTimeNoiseAbs;
+  int32_t   sendTimeDiff;
+
+  int32_t delayCorrFactor = DELAY_CORRECTION_MED;
+  int32_t lateDiff = 0;
+  int16_t immediateSet = 0;
+  int32_t frameSizeSampl;
+
+  int32_t  temp;
+  int32_t  msec;
+  uint32_t exponent;
+  uint32_t reductionFactor;
+  uint32_t numBytesInv;
+  int32_t  sign;
+
+  uint32_t byteSecondsPerBit;
+  uint32_t tempLower;
+  uint32_t tempUpper;
+  int32_t recBwAvgInv;
+  int32_t numPktsExpected;
+
+  int16_t errCode;
+
+  RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+  /* UPDATE ESTIMATES FROM OTHER SIDE */
+
+  /* The function also checks if Index has a valid value */
+  errCode = WebRtcIsacfix_UpdateUplinkBwRec(bweStr, Index);
+  if (errCode <0) {
+    return(errCode);
+  }
+
+
+  /* UPDATE ESTIMATES ON THIS SIDE */
+
+  /* Bits per second per byte * 1/30 or 1/60 */
+  if (frameSize == 60) {
+    /* If frameSize changed since last call, from 30 to 60, recalculate some values */
+    if ( (frameSize != bweStr->prevFrameSizeMs) && (bweStr->countUpdates > 0)) {
+      bweStr->countUpdates = 10;
+      bweStr->recHeaderRate = kRecHeaderRate[1];
+
+      bweStr->maxBwInv = kInvBandwidth[3];
+      bweStr->minBwInv = kInvBandwidth[2];
+      bweStr->recBwInv = 1073741824 / (bweStr->recBw + bweStr->recHeaderRate);
+    }
+
+    /* kBitsByteSec is in Q15 */
+    recRtpRate = (int16_t)((kBitsByteSec * pksize) >> 15) +
+        bweStr->recHeaderRate;
+
+  } else {
+    /* If frameSize changed since last call, from 60 to 30, recalculate some values */
+    if ( (frameSize != bweStr->prevFrameSizeMs) && (bweStr->countUpdates > 0)) {
+      bweStr->countUpdates = 10;
+      bweStr->recHeaderRate = kRecHeaderRate[0];
+
+      bweStr->maxBwInv = kInvBandwidth[1];
+      bweStr->minBwInv = kInvBandwidth[0];
+      bweStr->recBwInv = 1073741824 / (bweStr->recBw + bweStr->recHeaderRate);
+    }
+
+    /* kBitsByteSec is in Q14 */
+    recRtpRate = (uint16_t)((kBitsByteSec * pksize) >> 14) +
+        bweStr->recHeaderRate;
+  }
+
+
+  /* Check for timer wrap-around */
+  if (arrivalTime < bweStr->prevArrivalTime) {
+    bweStr->prevArrivalTime = arrivalTime;
+    bweStr->lastUpdate      = arrivalTime;
+    bweStr->lastReduction   = arrivalTime + FS3;
+
+    bweStr->countRecPkts      = 0;
+
+    /* store frame size */
+    bweStr->prevFrameSizeMs = frameSize;
+
+    /* store far-side transmission rate */
+    bweStr->prevRtpRate = recRtpRate;
+
+    /* store far-side RTP time stamp */
+    bweStr->prevRtpNumber = rtpNumber;
+
+    return 0;
+  }
+
+  bweStr->countRecPkts++;
+
+  /* Calculate framesize in msec */
+  frameSizeSampl = SAMPLES_PER_MSEC * frameSize;
+
+  /* Check that it's not one of the first 9 packets */
+  if ( bweStr->countUpdates > 0 ) {
+
+    /* Stay in Wait Period for 1.5 seconds (no updates in wait period) */
+    if(bweStr->inWaitPeriod) {
+      if ((arrivalTime - bweStr->startWaitPeriod)> FS_1_HALF) {
+        bweStr->inWaitPeriod = 0;
+      }
+    }
+
+    /* If not been updated for a long time, reduce the BN estimate */
+
+    /* Check send time difference between this packet and previous received      */
+    sendTimeDiff = sendTime - bweStr->prevSendTime;
+    if (sendTimeDiff <= frameSizeSampl * 2) {
+
+      /* Only update if 3 seconds has past since last update */
+      if ((arrivalTime - bweStr->lastUpdate) > FS3) {
+
+        /* Calculate expected number of received packets since last update */
+        numPktsExpected = (arrivalTime - bweStr->lastUpdate) / frameSizeSampl;
+
+        /* If received number of packets is more than 90% of expected (922 = 0.9 in Q10): */
+        /* do the update, else not                                                        */
+        if ((int32_t)bweStr->countRecPkts << 10 > 922 * numPktsExpected) {
+          /* Q4 chosen to approx dividing by 16 */
+          msec = (arrivalTime - bweStr->lastReduction);
+
+          /* the number below represents 13 seconds, highly unlikely
+             but to insure no overflow when reduction factor is multiplied by recBw inverse */
+          if (msec > 208000) {
+            msec = 208000;
+          }
+
+          /* Q20 2^(negative number: - 76/1048576) = .99995
+             product is Q24 */
+          exponent = WEBRTC_SPL_UMUL(0x0000004C, msec);
+
+          /* do the approx with positive exponent so that value is actually rf^-1
+             and multiply by bw inverse */
+          reductionFactor = WEBRTC_SPL_RSHIFT_U32(0x01000000 | (exponent & 0x00FFFFFF),
+                                                  WEBRTC_SPL_RSHIFT_U32(exponent, 24));
+
+          /* reductionFactor in Q13 */
+          reductionFactor = WEBRTC_SPL_RSHIFT_U32(reductionFactor, 11);
+
+          if ( reductionFactor != 0 ) {
+            bweStr->recBwInv = WEBRTC_SPL_MUL((int32_t)bweStr->recBwInv, (int32_t)reductionFactor);
+            bweStr->recBwInv = (int32_t)bweStr->recBwInv >> 13;
+
+          } else {
+            static const uint32_t kInitRate = INIT_BN_EST + INIT_HDR_RATE;
+            /* recBwInv = 1 / kInitRate  in Q26 (Q30??)*/
+            bweStr->recBwInv = (1073741824 + kInitRate / 2) / kInitRate;
+          }
+
+          /* reset time-since-update counter */
+          bweStr->lastReduction = arrivalTime;
+        } else {
+          /* Delay last reduction with 3 seconds */
+          bweStr->lastReduction = arrivalTime + FS3;
+          bweStr->lastUpdate    = arrivalTime;
+          bweStr->countRecPkts  = 0;
+        }
+      }
+    } else {
+      bweStr->lastReduction = arrivalTime + FS3;
+      bweStr->lastUpdate    = arrivalTime;
+      bweStr->countRecPkts  = 0;
+    }
+
+
+    /*   update only if previous packet was not lost */
+    if ( rtpNumber == bweStr->prevRtpNumber + 1 ) {
+      arrTimeDiff = arrivalTime - bweStr->prevArrivalTime;
+
+      if (!(bweStr->highSpeedSend && bweStr->highSpeedRec)) {
+        if (arrTimeDiff > frameSizeSampl) {
+          if (sendTimeDiff > 0) {
+            lateDiff = arrTimeDiff - sendTimeDiff - frameSizeSampl * 2;
+          } else {
+            lateDiff = arrTimeDiff - frameSizeSampl;
+          }
+
+          /* 8000 is 1/2 second (in samples at FS) */
+          if (lateDiff > 8000) {
+            delayCorrFactor = (int32_t) DELAY_CORRECTION_MAX;
+            bweStr->inWaitPeriod = 1;
+            bweStr->startWaitPeriod = arrivalTime;
+            immediateSet = 1;
+          } else if (lateDiff > 5120) {
+            delayCorrFactor = (int32_t) DELAY_CORRECTION_MED;
+            immediateSet = 1;
+            bweStr->inWaitPeriod = 1;
+            bweStr->startWaitPeriod = arrivalTime;
+          }
+        }
+      }
+
+      if ((bweStr->prevRtpRate > (int32_t)bweStr->recBwAvg >> 5) &&
+          (recRtpRate > (int32_t)bweStr->recBwAvg >> 5) &&
+          !bweStr->inWaitPeriod) {
+
+        /* test if still in initiation period and increment counter */
+        if (bweStr->countUpdates++ > 99) {
+          /* constant weight after initiation part, 0.01 in Q13 */
+          weight = (uint16_t) 82;
+        } else {
+          /* weight decreases with number of updates, 1/countUpdates in Q13  */
+          weight = (uint16_t) WebRtcSpl_DivW32W16(
+              8192 + (bweStr->countUpdates >> 1),
+              (int16_t)bweStr->countUpdates);
+        }
+
+        /* Bottle Neck Estimation */
+
+        /* limit outliers, if more than 25 ms too much */
+        if (arrTimeDiff > frameSizeSampl + kSamplesIn25msec) {
+          arrTimeDiff = frameSizeSampl + kSamplesIn25msec;
+        }
+
+        /* don't allow it to be less than frame rate - 10 ms */
+        if (arrTimeDiff < frameSizeSampl - FRAMESAMPLES_10ms) {
+          arrTimeDiff = frameSizeSampl - FRAMESAMPLES_10ms;
+        }
+
+        /* compute inverse receiving rate for last packet, in Q19 */
+        numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
+            (int32_t)(524288 + ((pksize + HEADER_SIZE) >> 1)),
+            (int16_t)(pksize + HEADER_SIZE));
+
+        /* 8389 is  ~ 1/128000 in Q30 */
+        byteSecondsPerBit = (uint32_t)(arrTimeDiff * 8389);
+
+        /* get upper N bits */
+        tempUpper = WEBRTC_SPL_RSHIFT_U32(byteSecondsPerBit, 15);
+
+        /* get lower 15 bits */
+        tempLower = byteSecondsPerBit & 0x00007FFF;
+
+        tempUpper = WEBRTC_SPL_MUL(tempUpper, numBytesInv);
+        tempLower = WEBRTC_SPL_MUL(tempLower, numBytesInv);
+        tempLower = WEBRTC_SPL_RSHIFT_U32(tempLower, 15);
+
+        currBwInv = tempUpper + tempLower;
+        currBwInv = WEBRTC_SPL_RSHIFT_U32(currBwInv, 4);
+
+        /* Limit inv rate. Note that minBwInv > maxBwInv! */
+        if(currBwInv < bweStr->maxBwInv) {
+          currBwInv = bweStr->maxBwInv;
+        } else if(currBwInv > bweStr->minBwInv) {
+          currBwInv = bweStr->minBwInv;
+        }
+
+        /* update bottle neck rate estimate */
+        bweStr->recBwInv = WEBRTC_SPL_UMUL(weight, currBwInv) +
+            WEBRTC_SPL_UMUL((uint32_t) 8192 - weight, bweStr->recBwInv);
+
+        /* Shift back to Q30 from Q40 (actual used bits shouldn't be more than 27 based on minBwInv)
+           up to 30 bits used with Q13 weight */
+        bweStr->recBwInv = WEBRTC_SPL_RSHIFT_U32(bweStr->recBwInv, 13);
+
+        /* reset time-since-update counter */
+        bweStr->lastUpdate    = arrivalTime;
+        bweStr->lastReduction = arrivalTime + FS3;
+        bweStr->countRecPkts  = 0;
+
+        /* to save resolution compute the inverse of recBwAvg in Q26 by left shifting numerator to 2^31
+           and NOT right shifting recBwAvg 5 bits to an integer
+           At max 13 bits are used
+           shift to Q5 */
+        recBwAvgInv = (0x80000000 + bweStr->recBwAvg / 2) / bweStr->recBwAvg;
+
+        /* Calculate Projected arrival time difference */
+
+        /* The numerator of the quotient can be 22 bits so right shift inv by 4 to avoid overflow
+           result in Q22 */
+        arrTimeProj = WEBRTC_SPL_MUL((int32_t)8000, recBwAvgInv);
+        /* shift to Q22 */
+        arrTimeProj = WEBRTC_SPL_RSHIFT_U32(arrTimeProj, 4);
+        /* complete calulation */
+        arrTimeProj = WEBRTC_SPL_MUL(((int32_t)pksize + HEADER_SIZE), arrTimeProj);
+        /* shift to Q10 */
+        arrTimeProj = WEBRTC_SPL_RSHIFT_U32(arrTimeProj, 12);
+
+        /* difference between projected and actual arrival time differences */
+        /* Q9 (only shift arrTimeDiff by 5 to simulate divide by 16 (need to revisit if change sampling rate) DH */
+        if ((arrTimeDiff << 6) > (int32_t)arrTimeProj) {
+          arrTimeNoise = (arrTimeDiff << 6) - arrTimeProj;
+          sign = 1;
+        } else {
+          arrTimeNoise = arrTimeProj - (arrTimeDiff << 6);
+          sign = -1;
+        }
+
+        /* Q9 */
+        arrTimeNoiseAbs = arrTimeNoise;
+
+        /* long term averaged absolute jitter, Q15 */
+        weight >>= 3;
+        bweStr->recJitter = weight * (arrTimeNoiseAbs << 5) +
+            (1024 - weight) * bweStr->recJitter;
+
+        /* remove the fractional portion */
+        bweStr->recJitter >>= 10;
+
+        /* Maximum jitter is 10 msec in Q15 */
+        if (bweStr->recJitter > (int32_t)327680) {
+          bweStr->recJitter = (int32_t)327680;
+        }
+
+        /* short term averaged absolute jitter */
+        /* Calculation in Q13 products in Q23 */
+        bweStr->recJitterShortTermAbs = 51 * (arrTimeNoiseAbs << 3) +
+            WEBRTC_SPL_MUL(973, bweStr->recJitterShortTermAbs);
+        bweStr->recJitterShortTermAbs >>= 10;
+
+        /* short term averaged jitter */
+        /* Calculation in Q13 products in Q23 */
+        bweStr->recJitterShortTerm = 205 * (arrTimeNoise << 3) * sign +
+            WEBRTC_SPL_MUL(3891, bweStr->recJitterShortTerm);
+
+        if (bweStr->recJitterShortTerm < 0) {
+          temp = -bweStr->recJitterShortTerm;
+          temp >>= 12;
+          bweStr->recJitterShortTerm = -temp;
+        } else {
+          bweStr->recJitterShortTerm >>= 12;
+        }
+      }
+    }
+  } else {
+    /* reset time-since-update counter when receiving the first 9 packets */
+    bweStr->lastUpdate    = arrivalTime;
+    bweStr->lastReduction = arrivalTime + FS3;
+    bweStr->countRecPkts  = 0;
+    bweStr->countUpdates++;
+  }
+
+  /* Limit to minimum or maximum bottle neck rate (in Q30) */
+  if (bweStr->recBwInv > bweStr->minBwInv) {
+    bweStr->recBwInv = bweStr->minBwInv;
+  } else if (bweStr->recBwInv < bweStr->maxBwInv) {
+    bweStr->recBwInv = bweStr->maxBwInv;
+  }
+
+
+  /* store frame length */
+  bweStr->prevFrameSizeMs = frameSize;
+
+  /* store far-side transmission rate */
+  bweStr->prevRtpRate = recRtpRate;
+
+  /* store far-side RTP time stamp */
+  bweStr->prevRtpNumber = rtpNumber;
+
+  /* Replace bweStr->recMaxDelay by the new value (atomic operation) */
+  if (bweStr->prevArrivalTime != 0xffffffff) {
+    bweStr->recMaxDelay = WEBRTC_SPL_MUL(3, bweStr->recJitter);
+  }
+
+  /* store arrival time stamp */
+  bweStr->prevArrivalTime = arrivalTime;
+  bweStr->prevSendTime = sendTime;
+
+  /* Replace bweStr->recBw by the new value */
+  bweStr->recBw = 1073741824 / bweStr->recBwInv - bweStr->recHeaderRate;
+
+  if (immediateSet) {
+    /* delay correction factor is in Q10 */
+    bweStr->recBw = WEBRTC_SPL_UMUL(delayCorrFactor, bweStr->recBw);
+    bweStr->recBw = WEBRTC_SPL_RSHIFT_U32(bweStr->recBw, 10);
+
+    if (bweStr->recBw < (int32_t) MIN_ISAC_BW) {
+      bweStr->recBw = (int32_t) MIN_ISAC_BW;
+    }
+
+    bweStr->recBwAvg = (bweStr->recBw + bweStr->recHeaderRate) << 5;
+
+    bweStr->recBwAvgQ = bweStr->recBw << 7;
+
+    bweStr->recJitterShortTerm = 0;
+
+    bweStr->recBwInv = 1073741824 / (bweStr->recBw + bweStr->recHeaderRate);
+
+    immediateSet = 0;
+  }
+
+
+  return 0;
+}
+
+/* This function updates the send bottle neck rate                                                   */
+/* Index         - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise                                                   */
+int16_t WebRtcIsacfix_UpdateUplinkBwRec(BwEstimatorstr *bweStr,
+                                        const int16_t Index)
+{
+  uint16_t RateInd;
+
+  RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+  if ( (Index < 0) || (Index > 23) ) {
+    return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+  }
+
+  /* UPDATE ESTIMATES FROM OTHER SIDE */
+
+  if ( Index > 11 ) {
+    RateInd = Index - 12;
+    /* compute the jitter estimate as decoded on the other side in Q9 */
+    /* sendMaxDelayAvg = 0.9 * sendMaxDelayAvg + 0.1 * MAX_ISAC_MD */
+    bweStr->sendMaxDelayAvg = WEBRTC_SPL_MUL(461, bweStr->sendMaxDelayAvg) +
+        51 * (MAX_ISAC_MD << 9);
+    bweStr->sendMaxDelayAvg >>= 9;
+
+  } else {
+    RateInd = Index;
+    /* compute the jitter estimate as decoded on the other side in Q9 */
+    /* sendMaxDelayAvg = 0.9 * sendMaxDelayAvg + 0.1 * MIN_ISAC_MD */
+    bweStr->sendMaxDelayAvg = WEBRTC_SPL_MUL(461, bweStr->sendMaxDelayAvg) +
+        51 * (MIN_ISAC_MD << 9);
+    bweStr->sendMaxDelayAvg >>= 9;
+
+  }
+
+
+  /* compute the BN estimate as decoded on the other side */
+  /* sendBwAvg = 0.9 * sendBwAvg + 0.1 * kQRateTable[RateInd]; */
+  bweStr->sendBwAvg = 461 * bweStr->sendBwAvg +
+      51 * ((uint32_t)kQRateTable[RateInd] << 7);
+  bweStr->sendBwAvg = WEBRTC_SPL_RSHIFT_U32(bweStr->sendBwAvg, 9);
+
+
+  if (WEBRTC_SPL_RSHIFT_U32(bweStr->sendBwAvg, 7) > 28000 && !bweStr->highSpeedSend) {
+    bweStr->countHighSpeedSent++;
+
+    /* approx 2 seconds with 30ms frames */
+    if (bweStr->countHighSpeedSent >= 66) {
+      bweStr->highSpeedSend = 1;
+    }
+  } else if (!bweStr->highSpeedSend) {
+    bweStr->countHighSpeedSent = 0;
+  }
+
+  return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownlinkBwIndexImpl(...)
+ *
+ * This function calculates and returns the bandwidth/jitter estimation code
+ * (integer 0...23) to put in the sending iSAC payload.
+ *
+ * Input:
+ *      - bweStr       : BWE struct
+ *
+ * Return:
+ *      bandwith and jitter index (0..23)
+ */
+uint16_t WebRtcIsacfix_GetDownlinkBwIndexImpl(BwEstimatorstr *bweStr)
+{
+  int32_t  rate;
+  int32_t  maxDelay;
+  uint16_t rateInd;
+  uint16_t maxDelayBit;
+  int32_t  tempTerm1;
+  int32_t  tempTerm2;
+  int32_t  tempTermX;
+  int32_t  tempTermY;
+  int32_t  tempMin;
+  int32_t  tempMax;
+
+  if (bweStr->external_bw_info.in_use)
+    return bweStr->external_bw_info.bottleneck_idx;
+
+  /* Get Rate Index */
+
+  /* Get unquantized rate. Always returns 10000 <= rate <= 32000 */
+  rate = WebRtcIsacfix_GetDownlinkBandwidth(bweStr);
+
+  /* Compute the averaged BN estimate on this side */
+
+  /* recBwAvg = 0.9 * recBwAvg + 0.1 * (rate + bweStr->recHeaderRate), 0.9 and 0.1 in Q9 */
+  bweStr->recBwAvg = 922 * bweStr->recBwAvg +
+      102 * (((uint32_t)rate + bweStr->recHeaderRate) << 5);
+  bweStr->recBwAvg = WEBRTC_SPL_RSHIFT_U32(bweStr->recBwAvg, 10);
+
+  /* Find quantization index that gives the closest rate after averaging.
+   * Note that we don't need to check the last value, rate <= kQRateTable[11],
+   * because we will use rateInd = 11 even if rate > kQRateTable[11]. */
+  for (rateInd = 1; rateInd < 11; rateInd++) {
+    if (rate <= kQRateTable[rateInd]){
+      break;
+    }
+  }
+
+  /* find closest quantization index, and update quantized average by taking: */
+  /* 0.9*recBwAvgQ + 0.1*kQRateTable[rateInd] */
+
+  /* 0.9 times recBwAvgQ in Q16 */
+  /* 461/512 - 25/65536 =0.900009 */
+  tempTerm1 = WEBRTC_SPL_MUL(bweStr->recBwAvgQ, 25);
+  tempTerm1 >>= 7;
+  tempTermX = WEBRTC_SPL_UMUL(461, bweStr->recBwAvgQ) - tempTerm1;
+
+  /* rate in Q16 */
+  tempTermY = rate << 16;
+
+  /* 0.1 * kQRateTable[rateInd] = KQRate01[rateInd] */
+  tempTerm1 = tempTermX + KQRate01[rateInd] - tempTermY;
+  tempTerm2 = tempTermY - tempTermX - KQRate01[rateInd-1];
+
+  /* Compare (0.9 * recBwAvgQ + 0.1 * kQRateTable[rateInd] - rate) >
+     (rate - 0.9 * recBwAvgQ - 0.1 * kQRateTable[rateInd-1]) */
+  if (tempTerm1  > tempTerm2) {
+    rateInd--;
+  }
+
+  /* Update quantized average by taking:                  */
+  /* 0.9*recBwAvgQ + 0.1*kQRateTable[rateInd] */
+
+  /* Add 0.1 times kQRateTable[rateInd], in Q16 */
+  tempTermX += KQRate01[rateInd];
+
+  /* Shift back to Q7 */
+  bweStr->recBwAvgQ = tempTermX >> 9;
+
+  /* Count consecutive received bandwidth above 28000 kbps (28000 in Q7 = 3584000) */
+  /* If 66 high estimates in a row, set highSpeedRec to one */
+  /* 66 corresponds to ~2 seconds in 30 msec mode */
+  if ((bweStr->recBwAvgQ > 3584000) && !bweStr->highSpeedRec) {
+    bweStr->countHighSpeedRec++;
+    if (bweStr->countHighSpeedRec >= 66) {
+      bweStr->highSpeedRec = 1;
+    }
+  } else if (!bweStr->highSpeedRec)    {
+    bweStr->countHighSpeedRec = 0;
+  }
+
+  /* Get Max Delay Bit */
+
+  /* get unquantized max delay */
+  maxDelay = WebRtcIsacfix_GetDownlinkMaxDelay(bweStr);
+
+  /* Update quantized max delay average */
+  tempMax = 652800; /* MAX_ISAC_MD * 0.1 in Q18 */
+  tempMin = 130560; /* MIN_ISAC_MD * 0.1 in Q18 */
+  tempTermX = WEBRTC_SPL_MUL((int32_t)bweStr->recMaxDelayAvgQ, (int32_t)461);
+  tempTermY = maxDelay << 18;
+
+  tempTerm1 = tempTermX + tempMax - tempTermY;
+  tempTerm2 = tempTermY - tempTermX - tempMin;
+
+  if ( tempTerm1 > tempTerm2) {
+    maxDelayBit = 0;
+    tempTerm1 = tempTermX + tempMin;
+
+    /* update quantized average, shift back to Q9 */
+    bweStr->recMaxDelayAvgQ = tempTerm1 >> 9;
+  } else {
+    maxDelayBit = 12;
+    tempTerm1 =  tempTermX + tempMax;
+
+    /* update quantized average, shift back to Q9 */
+    bweStr->recMaxDelayAvgQ = tempTerm1 >> 9;
+  }
+
+  /* Return bandwitdh and jitter index (0..23) */
+  return (uint16_t)(rateInd + maxDelayBit);
+}
+
+/* get the bottle neck rate from far side to here, as estimated on this side */
+uint16_t WebRtcIsacfix_GetDownlinkBandwidth(const BwEstimatorstr *bweStr)
+{
+  uint32_t  recBw;
+  int32_t   jitter_sign; /* Q8 */
+  int32_t   bw_adjust;   /* Q16 */
+  int32_t   rec_jitter_short_term_abs_inv; /* Q18 */
+  int32_t   temp;
+
+  RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+  /* Q18  rec jitter short term abs is in Q13, multiply it by 2^13 to save precision
+     2^18 then needs to be shifted 13 bits to 2^31 */
+  rec_jitter_short_term_abs_inv = 0x80000000u / bweStr->recJitterShortTermAbs;
+
+  /* Q27 = 9 + 18 */
+  jitter_sign = (bweStr->recJitterShortTerm >> 4) *
+      rec_jitter_short_term_abs_inv;
+
+  if (jitter_sign < 0) {
+    temp = -jitter_sign;
+    temp >>= 19;
+    jitter_sign = -temp;
+  } else {
+    jitter_sign >>= 19;
+  }
+
+  /* adjust bw proportionally to negative average jitter sign */
+  //bw_adjust = 1.0f - jitter_sign * (0.15f + 0.15f * jitter_sign * jitter_sign);
+  //Q8 -> Q16 .15 +.15 * jitter^2 first term is .15 in Q16 latter term is Q8*Q8*Q8
+  //38 in Q8 ~.15 9830 in Q16 ~.15
+  temp = 9830 + ((38 * jitter_sign * jitter_sign) >> 8);
+
+  if (jitter_sign < 0) {
+    temp = WEBRTC_SPL_MUL(jitter_sign, temp);
+    temp = -temp;
+    temp >>= 8;
+    bw_adjust = (uint32_t)65536 + temp; /* (1 << 16) + temp; */
+  } else {
+    /* (1 << 16) - ((jitter_sign * temp) >> 8); */
+    bw_adjust = 65536 - ((jitter_sign * temp) >> 8);
+  }
+
+  //make sure following multiplication won't overflow
+  //bw adjust now Q14
+  bw_adjust >>= 2;  // See if good resolution is maintained.
+
+  /* adjust Rate if jitter sign is mostly constant */
+  recBw = WEBRTC_SPL_UMUL(bweStr->recBw, bw_adjust);
+
+  recBw >>= 14;
+
+  /* limit range of bottle neck rate */
+  if (recBw < MIN_ISAC_BW) {
+    recBw = MIN_ISAC_BW;
+  } else if (recBw > MAX_ISAC_BW) {
+    recBw = MAX_ISAC_BW;
+  }
+
+  return  (uint16_t) recBw;
+}
+
+/* Returns the mmax delay (in ms) */
+int16_t WebRtcIsacfix_GetDownlinkMaxDelay(const BwEstimatorstr *bweStr)
+{
+  int16_t recMaxDelay = (int16_t)(bweStr->recMaxDelay >> 15);
+
+  RTC_DCHECK(!bweStr->external_bw_info.in_use);
+
+  /* limit range of jitter estimate */
+  if (recMaxDelay < MIN_ISAC_MD) {
+    recMaxDelay = MIN_ISAC_MD;
+  } else if (recMaxDelay > MAX_ISAC_MD) {
+    recMaxDelay = MAX_ISAC_MD;
+  }
+
+  return recMaxDelay;
+}
+
+/* Clamp val to the closed interval [min,max]. */
+static int16_t clamp(int16_t val, int16_t min, int16_t max) {
+  RTC_DCHECK_LE(min, max);
+  return val < min ? min : (val > max ? max : val);
+}
+
+int16_t WebRtcIsacfix_GetUplinkBandwidth(const BwEstimatorstr* bweStr) {
+  return bweStr->external_bw_info.in_use
+             ? bweStr->external_bw_info.send_bw_avg
+             : clamp(bweStr->sendBwAvg >> 7, MIN_ISAC_BW, MAX_ISAC_BW);
+}
+
+int16_t WebRtcIsacfix_GetUplinkMaxDelay(const BwEstimatorstr* bweStr) {
+  return bweStr->external_bw_info.in_use
+             ? bweStr->external_bw_info.send_max_delay_avg
+             : clamp(bweStr->sendMaxDelayAvg >> 9, MIN_ISAC_MD, MAX_ISAC_MD);
+}
+
+void WebRtcIsacfixBw_GetBandwidthInfo(BwEstimatorstr* bweStr,
+                                   IsacBandwidthInfo* bwinfo) {
+  RTC_DCHECK(!bweStr->external_bw_info.in_use);
+  bwinfo->in_use = 1;
+  bwinfo->send_bw_avg = WebRtcIsacfix_GetUplinkBandwidth(bweStr);
+  bwinfo->send_max_delay_avg = WebRtcIsacfix_GetUplinkMaxDelay(bweStr);
+  bwinfo->bottleneck_idx = WebRtcIsacfix_GetDownlinkBwIndexImpl(bweStr);
+  bwinfo->jitter_info = 0;  // Not used.
+}
+
+void WebRtcIsacfixBw_SetBandwidthInfo(BwEstimatorstr* bweStr,
+                                   const IsacBandwidthInfo* bwinfo) {
+  memcpy(&bweStr->external_bw_info, bwinfo,
+         sizeof bweStr->external_bw_info);
+}
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ * returns minimum payload size (bytes)
+ */
+uint16_t WebRtcIsacfix_GetMinBytes(RateModel *State,
+                                   int16_t StreamSize,                    /* bytes in bitstream */
+                                   const int16_t FrameSamples,            /* samples per frame */
+                                   const int16_t BottleNeck,        /* bottle neck rate; excl headers (bps) */
+                                   const int16_t DelayBuildUp)      /* max delay from bottle neck buffering (ms) */
+{
+  int32_t MinRate = 0;
+  uint16_t    MinBytes;
+  int16_t TransmissionTime;
+  int32_t inv_Q12;
+  int32_t den;
+
+
+  /* first 10 packets @ low rate, then INIT_BURST_LEN packets @ fixed rate of INIT_RATE bps */
+  if (State->InitCounter > 0) {
+    if (State->InitCounter-- <= INIT_BURST_LEN) {
+      MinRate = INIT_RATE;
+    } else {
+      MinRate = 0;
+    }
+  } else {
+    /* handle burst */
+    if (State->BurstCounter) {
+      if (State->StillBuffered <
+          (((512 - 512 / BURST_LEN) * DelayBuildUp) >> 9)) {
+        /* max bps derived from BottleNeck and DelayBuildUp values */
+        inv_Q12 = 4096 / (BURST_LEN * FrameSamples);
+        MinRate = (512 + SAMPLES_PER_MSEC * ((DelayBuildUp * inv_Q12) >> 3)) *
+            BottleNeck;
+      } else {
+        /* max bps derived from StillBuffered and DelayBuildUp values */
+        inv_Q12 = 4096 / FrameSamples;
+        if (DelayBuildUp > State->StillBuffered) {
+          MinRate = (512 + SAMPLES_PER_MSEC * (((DelayBuildUp -
+              State->StillBuffered) * inv_Q12) >> 3)) * BottleNeck;
+        } else if ((den = WEBRTC_SPL_MUL(SAMPLES_PER_MSEC, (State->StillBuffered - DelayBuildUp))) >= FrameSamples) {
+          /* MinRate will be negative here */
+          MinRate = 0;
+        } else {
+          MinRate = (512 - ((den * inv_Q12) >> 3)) * BottleNeck;
+        }
+        //if (MinRate < 1.04 * BottleNeck)
+        //    MinRate = 1.04 * BottleNeck;
+        //Q9
+        if (MinRate < WEBRTC_SPL_MUL(532, BottleNeck)) {
+          MinRate += WEBRTC_SPL_MUL(22, BottleNeck);
+        }
+      }
+
+      State->BurstCounter--;
+    }
+  }
+
+
+  /* convert rate from bits/second to bytes/packet */
+  //round and shift before conversion
+  MinRate += 256;
+  MinRate >>= 9;
+  MinBytes = MinRate * FrameSamples / FS8;
+
+  /* StreamSize will be adjusted if less than MinBytes */
+  if (StreamSize < MinBytes) {
+    StreamSize = MinBytes;
+  }
+
+  /* keep track of when bottle neck was last exceeded by at least 1% */
+  //517/512 ~ 1.01
+  if ((StreamSize * (int32_t)FS8) / FrameSamples > (517 * BottleNeck) >> 9) {
+    if (State->PrevExceed) {
+      /* bottle_neck exceded twice in a row, decrease ExceedAgo */
+      State->ExceedAgo -= BURST_INTERVAL / (BURST_LEN - 1);
+      if (State->ExceedAgo < 0) {
+        State->ExceedAgo = 0;
+      }
+    } else {
+      State->ExceedAgo += FrameSamples / SAMPLES_PER_MSEC;  /* ms */
+      State->PrevExceed = 1;
+    }
+  } else {
+    State->PrevExceed = 0;
+    State->ExceedAgo += FrameSamples / SAMPLES_PER_MSEC;  /* ms */
+  }
+
+  /* set burst flag if bottle neck not exceeded for long time */
+  if ((State->ExceedAgo > BURST_INTERVAL) && (State->BurstCounter == 0)) {
+    if (State->PrevExceed) {
+      State->BurstCounter = BURST_LEN - 1;
+    } else {
+      State->BurstCounter = BURST_LEN;
+    }
+  }
+
+
+  /* Update buffer delay */
+  TransmissionTime = (StreamSize * 8000) / BottleNeck;  /* ms */
+  State->StillBuffered += TransmissionTime;
+  State->StillBuffered -= FrameSamples / SAMPLES_PER_MSEC;   /* ms */
+  if (State->StillBuffered < 0) {
+    State->StillBuffered = 0;
+  }
+
+  if (State->StillBuffered > 2000) {
+    State->StillBuffered = 2000;
+  }
+
+  return MinBytes;
+}
+
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsacfix_UpdateRateModel(RateModel *State,
+                                   int16_t StreamSize,                    /* bytes in bitstream */
+                                   const int16_t FrameSamples,            /* samples per frame */
+                                   const int16_t BottleNeck)        /* bottle neck rate; excl headers (bps) */
+{
+  const int16_t TransmissionTime = (StreamSize * 8000) / BottleNeck;  /* ms */
+
+  /* avoid the initial "high-rate" burst */
+  State->InitCounter = 0;
+
+  /* Update buffer delay */
+  State->StillBuffered += TransmissionTime;
+  State->StillBuffered -= FrameSamples >> 4;  /* ms */
+  if (State->StillBuffered < 0) {
+    State->StillBuffered = 0;
+  }
+
+}
+
+
+void WebRtcIsacfix_InitRateModel(RateModel *State)
+{
+  State->PrevExceed      = 0;                        /* boolean */
+  State->ExceedAgo       = 0;                        /* ms */
+  State->BurstCounter    = 0;                        /* packets */
+  State->InitCounter     = INIT_BURST_LEN + 10;    /* packets */
+  State->StillBuffered   = 1;                    /* ms */
+}
+
+
+
+
+
+int16_t WebRtcIsacfix_GetNewFrameLength(int16_t bottle_neck, int16_t current_framesamples)
+{
+  int16_t new_framesamples;
+
+  new_framesamples = current_framesamples;
+
+  /* find new framelength */
+  switch(current_framesamples) {
+    case 480:
+      if (bottle_neck < Thld_30_60) {
+        new_framesamples = 960;
+      }
+      break;
+    case 960:
+      if (bottle_neck >= Thld_60_30) {
+        new_framesamples = 480;
+      }
+      break;
+    default:
+      new_framesamples = -1; /* Error */
+  }
+
+  return new_framesamples;
+}
+
+int16_t WebRtcIsacfix_GetSnr(int16_t bottle_neck, int16_t framesamples)
+{
+  int16_t s2nr = 0;
+
+  /* find new SNR value */
+  //consider BottleNeck to be in Q10 ( * 1 in Q10)
+  switch(framesamples) {
+  // TODO(bjornv): The comments below confuses me. I don't know if there is a
+  // difference between frame lengths (in which case the implementation is
+  // wrong), or if it is frame length independent in which case we should
+  // correct the comment and simplify the implementation.
+    case 480:
+      /*s2nr = -1*(a_30 << 10) + ((b_30 * bottle_neck) >> 10);*/
+      s2nr = -22500 + (int16_t)(500 * bottle_neck >> 10);
+      break;
+    case 960:
+      /*s2nr = -1*(a_60 << 10) + ((b_60 * bottle_neck) >> 10);*/
+      s2nr = -22500 + (int16_t)(500 * bottle_neck >> 10);
+      break;
+    default:
+      s2nr = -1; /* Error */
+  }
+
+  return s2nr; //return in Q10
+
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h b/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
new file mode 100644
index 0000000..67f8d07
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * bandwidth_estimator.h
+ *
+ * This header file contains the API for the Bandwidth Estimator
+ * designed for iSAC.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+/****************************************************************************
+ * WebRtcIsacfix_InitBandwidthEstimator(...)
+ *
+ * This function initializes the struct for the bandwidth estimator
+ *
+ * Input/Output:
+ *      - bwest_str        : Struct containing bandwidth information.
+ *
+ * Return value            : 0
+ */
+
+int32_t WebRtcIsacfix_InitBandwidthEstimator(BwEstimatorstr *bwest_str);
+
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBwImpl(...)
+ *
+ * This function updates bottle neck rate received from other side in payload
+ * and calculates a new bottle neck to send to the other side.
+ *
+ * Input/Output:
+ *      - bweStr           : struct containing bandwidth information.
+ *      - rtpNumber        : value from RTP packet, from NetEq
+ *      - frameSize        : length of signal frame in ms, from iSAC decoder
+ *      - sendTime         : value in RTP header giving send time in samples
+ *      - arrivalTime      : value given by timeGetTime() time of arrival in
+ *                           samples of packet from NetEq
+ *      - pksize           : size of packet in bytes, from NetEq
+ *      - Index            : integer (range 0...23) indicating bottle neck &
+ *                           jitter as estimated by other side
+ *
+ * Return value            : 0 if everything went fine,
+ *                           -1 otherwise
+ */
+
+int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr       *bwest_str,
+                                         const uint16_t        rtp_number,
+                                         const int16_t         frameSize,
+                                         const uint32_t        send_ts,
+                                         const uint32_t        arr_ts,
+                                         const size_t          pksize,
+                                         const uint16_t        Index);
+
+/* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
+int16_t WebRtcIsacfix_UpdateUplinkBwRec(BwEstimatorstr *bwest_str,
+                                        const int16_t Index);
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownlinkBwIndexImpl(...)
+ *
+ * This function calculates and returns the bandwidth/jitter estimation code
+ * (integer 0...23) to put in the sending iSAC payload.
+ *
+ * Input:
+ *      - bweStr       : BWE struct
+ *
+ * Return:
+ *      bandwith and jitter index (0..23)
+ */
+uint16_t WebRtcIsacfix_GetDownlinkBwIndexImpl(BwEstimatorstr *bwest_str);
+
+/* Returns the bandwidth estimation (in bps) */
+uint16_t WebRtcIsacfix_GetDownlinkBandwidth(const BwEstimatorstr *bwest_str);
+
+/* Returns the bandwidth that iSAC should send with in bps */
+int16_t WebRtcIsacfix_GetUplinkBandwidth(const BwEstimatorstr *bwest_str);
+
+/* Returns the max delay (in ms) */
+int16_t WebRtcIsacfix_GetDownlinkMaxDelay(const BwEstimatorstr *bwest_str);
+
+/* Returns the max delay value from the other side in ms */
+int16_t WebRtcIsacfix_GetUplinkMaxDelay(const BwEstimatorstr *bwest_str);
+
+/* Fills in an IsacExternalBandwidthInfo struct. */
+void WebRtcIsacfixBw_GetBandwidthInfo(BwEstimatorstr* bwest_str,
+                                      IsacBandwidthInfo* bwinfo);
+
+/* Uses the values from an IsacExternalBandwidthInfo struct. */
+void WebRtcIsacfixBw_SetBandwidthInfo(BwEstimatorstr* bwest_str,
+                                      const IsacBandwidthInfo* bwinfo);
+
+/*
+ * update amount of data in bottle neck buffer and burst handling
+ * returns minimum payload size (bytes)
+ */
+uint16_t WebRtcIsacfix_GetMinBytes(RateModel *State,
+                                   int16_t StreamSize,     /* bytes in bitstream */
+                                   const int16_t FrameLen,    /* ms per frame */
+                                   const int16_t BottleNeck,        /* bottle neck rate; excl headers (bps) */
+                                   const int16_t DelayBuildUp);     /* max delay from bottle neck buffering (ms) */
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsacfix_UpdateRateModel(RateModel *State,
+                                   int16_t StreamSize,    /* bytes in bitstream */
+                                   const int16_t FrameSamples,  /* samples per frame */
+                                   const int16_t BottleNeck);       /* bottle neck rate; excl headers (bps) */
+
+
+void WebRtcIsacfix_InitRateModel(RateModel *State);
+
+/* Returns the new framelength value (input argument: bottle_neck) */
+int16_t WebRtcIsacfix_GetNewFrameLength(int16_t bottle_neck, int16_t current_framelength);
+
+/* Returns the new SNR value (input argument: bottle_neck) */
+//returns snr in Q10
+int16_t WebRtcIsacfix_GetSnr(int16_t bottle_neck, int16_t framesamples);
+
+
+#endif /*  MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_BANDWIDTH_ESTIMATOR_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/codec.h b/modules/audio_coding/codecs/isac/fix/source/codec.h
new file mode 100644
index 0000000..9b87c40
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -0,0 +1,228 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * codec.h
+ *
+ * This header file contains the calls to the internal encoder
+ * and decoder functions.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr* bwest_str,
+                                    Bitstr_dec* streamdata,
+                                    size_t packet_size,
+                                    uint16_t rtp_seq_number,
+                                    uint32_t send_ts,
+                                    uint32_t arr_ts);
+
+int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
+                             IsacFixDecoderInstance* ISACdec_obj,
+                             size_t* current_framesamples);
+
+void WebRtcIsacfix_DecodePlcImpl(int16_t* decoded,
+                                 IsacFixDecoderInstance* ISACdec_obj,
+                                 size_t* current_framesample );
+
+int WebRtcIsacfix_EncodeImpl(int16_t* in,
+                             IsacFixEncoderInstance* ISACenc_obj,
+                             BwEstimatorstr* bw_estimatordata,
+                             int16_t CodingMode);
+
+int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance* ISACenc_obj,
+                                   int BWnumber,
+                                   float scale);
+
+/* initialization functions */
+
+void WebRtcIsacfix_InitMaskingEnc(MaskFiltstr_enc* maskdata);
+void WebRtcIsacfix_InitMaskingDec(MaskFiltstr_dec* maskdata);
+
+void WebRtcIsacfix_InitPreFilterbank(PreFiltBankstr* prefiltdata);
+
+void WebRtcIsacfix_InitPostFilterbank(PostFiltBankstr* postfiltdata);
+
+void WebRtcIsacfix_InitPitchFilter(PitchFiltstr* pitchfiltdata);
+
+void WebRtcIsacfix_InitPitchAnalysis(PitchAnalysisStruct* State);
+
+void WebRtcIsacfix_InitPlc(PLCstr* State);
+
+
+/* transform functions */
+
+void WebRtcIsacfix_InitTransform();
+
+typedef void (*Time2Spec)(int16_t* inre1Q9,
+                          int16_t* inre2Q9,
+                          int16_t* outre,
+                          int16_t* outim);
+typedef void (*Spec2Time)(int16_t* inreQ7,
+                          int16_t* inimQ7,
+                          int32_t* outre1Q16,
+                          int32_t* outre2Q16);
+
+extern Time2Spec WebRtcIsacfix_Time2Spec;
+extern Spec2Time WebRtcIsacfix_Spec2Time;
+
+void WebRtcIsacfix_Time2SpecC(int16_t* inre1Q9,
+                              int16_t* inre2Q9,
+                              int16_t* outre,
+                              int16_t* outim);
+void WebRtcIsacfix_Spec2TimeC(int16_t* inreQ7,
+                              int16_t* inimQ7,
+                              int32_t* outre1Q16,
+                              int32_t* outre2Q16);
+
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcIsacfix_Time2SpecNeon(int16_t* inre1Q9,
+                                 int16_t* inre2Q9,
+                                 int16_t* outre,
+                                 int16_t* outim);
+void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
+                                 int16_t* inimQ7,
+                                 int32_t* outre1Q16,
+                                 int32_t* outre2Q16);
+#endif
+
+#if defined(MIPS32_LE)
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+                                 int16_t* inre2Q9,
+                                 int16_t* outre,
+                                 int16_t* outim);
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t* inreQ7,
+                                 int16_t* inimQ7,
+                                 int32_t* outre1Q16,
+                                 int32_t* outre2Q16);
+#endif
+
+/* filterbank functions */
+
+void WebRtcIsacfix_SplitAndFilter1(int16_t* in,
+                                   int16_t* LP16,
+                                   int16_t* HP16,
+                                   PreFiltBankstr* prefiltdata);
+
+void WebRtcIsacfix_FilterAndCombine1(int16_t* tempin_ch1,
+                                     int16_t* tempin_ch2,
+                                     int16_t* out16,
+                                     PostFiltBankstr* postfiltdata);
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+
+void WebRtcIsacfix_SplitAndFilter2(int16_t* in,
+                                   int16_t* LP16,
+                                   int16_t* HP16,
+                                   PreFiltBankstr* prefiltdata);
+
+void WebRtcIsacfix_FilterAndCombine2(int16_t* tempin_ch1,
+                                     int16_t* tempin_ch2,
+                                     int16_t* out16,
+                                     PostFiltBankstr* postfiltdata,
+                                     int16_t len);
+
+#endif
+
+/* normalized lattice filters */
+
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
+                                       int32_t* stateGQ15,
+                                       int16_t* lat_inQ0,
+                                       int16_t* filt_coefQ15,
+                                       int32_t* gain_lo_hiQ17,
+                                       int16_t lo_hi,
+                                       int16_t* lat_outQ9);
+
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
+                                       int16_t* stateGQ0,
+                                       int32_t* lat_inQ25,
+                                       int16_t* filt_coefQ15,
+                                       int32_t* gain_lo_hiQ17,
+                                       int16_t lo_hi,
+                                       int16_t* lat_outQ0);
+
+/* TODO(kma): Remove the following functions into individual header files. */
+
+/* Internal functions in both C and ARM Neon versions */
+
+int WebRtcIsacfix_AutocorrC(int32_t* __restrict r,
+                            const int16_t* __restrict x,
+                            int16_t N,
+                            int16_t order,
+                            int16_t* __restrict scale);
+
+void WebRtcIsacfix_FilterMaLoopC(int16_t input0,
+                                 int16_t input1,
+                                 int32_t input2,
+                                 int32_t* ptr0,
+                                 int32_t* ptr1,
+                                 int32_t* ptr2);
+
+#if defined(WEBRTC_HAS_NEON)
+int WebRtcIsacfix_AutocorrNeon(int32_t* __restrict r,
+                               const int16_t* __restrict x,
+                               int16_t N,
+                               int16_t order,
+                               int16_t* __restrict scale);
+
+void WebRtcIsacfix_FilterMaLoopNeon(int16_t input0,
+                                    int16_t input1,
+                                    int32_t input2,
+                                    int32_t* ptr0,
+                                    int32_t* ptr1,
+                                    int32_t* ptr2);
+#endif
+
+#if defined(MIPS32_LE)
+int WebRtcIsacfix_AutocorrMIPS(int32_t* __restrict r,
+                               const int16_t* __restrict x,
+                               int16_t N,
+                               int16_t order,
+                               int16_t* __restrict scale);
+
+void WebRtcIsacfix_FilterMaLoopMIPS(int16_t input0,
+                                    int16_t input1,
+                                    int32_t input2,
+                                    int32_t* ptr0,
+                                    int32_t* ptr1,
+                                    int32_t* ptr2);
+#endif
+
+/* Function pointers associated with the above functions. */
+
+typedef int (*AutocorrFix)(int32_t* __restrict r,
+                           const int16_t* __restrict x,
+                           int16_t N,
+                           int16_t order,
+                           int16_t* __restrict scale);
+extern AutocorrFix WebRtcIsacfix_AutocorrFix;
+
+typedef void (*FilterMaLoopFix)(int16_t input0,
+                                int16_t input1,
+                                int32_t input2,
+                                int32_t* ptr0,
+                                int32_t* ptr1,
+                                int32_t* ptr2);
+extern FilterMaLoopFix WebRtcIsacfix_FilterMaLoopFix;
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_CODEC_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/decode.c b/modules/audio_coding/codecs/isac/fix/source/decode.c
new file mode 100644
index 0000000..1442088
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/decode.c
@@ -0,0 +1,221 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode.c
+ *
+ * This C file contains the internal decoding function.
+ *
+ */
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+
+
+
+int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
+                             IsacFixDecoderInstance* ISACdec_obj,
+                             size_t* current_framesamples)
+{
+  int k;
+  int err;
+  int16_t BWno;
+  int len = 0;
+
+  int16_t model;
+
+
+  int16_t Vector_Word16_1[FRAMESAMPLES/2];
+  int16_t Vector_Word16_2[FRAMESAMPLES/2];
+
+  int32_t Vector_Word32_1[FRAMESAMPLES/2];
+  int32_t Vector_Word32_2[FRAMESAMPLES/2];
+
+  int16_t lofilt_coefQ15[ORDERLO*SUBFRAMES]; //refl. coeffs
+  int16_t hifilt_coefQ15[ORDERHI*SUBFRAMES]; //refl. coeffs
+  int32_t gain_lo_hiQ17[2*SUBFRAMES];
+
+  int16_t PitchLags_Q7[PITCH_SUBFRAMES];
+  int16_t PitchGains_Q12[PITCH_SUBFRAMES];
+  int16_t AvgPitchGain_Q12;
+
+  int16_t tmp_1, tmp_2;
+  int32_t tmp32a;
+  int16_t gainQ13;
+
+
+  size_t frame_nb; /* counter */
+  size_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+  static const size_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
+
+  /* PLC */
+  int16_t overlapWin[ 240 ];
+
+  (ISACdec_obj->bitstr_obj).W_upper = 0xFFFFFFFF;
+  (ISACdec_obj->bitstr_obj).streamval = 0;
+  (ISACdec_obj->bitstr_obj).stream_index = 0;
+  (ISACdec_obj->bitstr_obj).full = 1;
+
+
+  /* decode framelength and BW estimation - not used, only for stream pointer*/
+  err = WebRtcIsacfix_DecodeFrameLen(&ISACdec_obj->bitstr_obj, current_framesamples);
+  if (err<0)  // error check
+    return err;
+
+  frame_mode = *current_framesamples / MAX_FRAMESAMPLES;  /* 0, or 1 */
+
+  err = WebRtcIsacfix_DecodeSendBandwidth(&ISACdec_obj->bitstr_obj, &BWno);
+  if (err<0)  // error check
+    return err;
+
+  /* one loop if it's one frame (30ms), two loops if two frames bundled together
+   * (60ms) */
+  for (frame_nb = 0; frame_nb <= frame_mode; frame_nb++) {
+
+    /* decode & dequantize pitch parameters */
+    err = WebRtcIsacfix_DecodePitchGain(&(ISACdec_obj->bitstr_obj), PitchGains_Q12);
+    if (err<0)  // error check
+      return err;
+
+    err = WebRtcIsacfix_DecodePitchLag(&ISACdec_obj->bitstr_obj, PitchGains_Q12, PitchLags_Q7);
+    if (err<0)  // error check
+      return err;
+
+    AvgPitchGain_Q12 = (int16_t)(((int32_t)PitchGains_Q12[0] + PitchGains_Q12[1] + PitchGains_Q12[2] + PitchGains_Q12[3])>>2);
+
+    /* decode & dequantize FiltCoef */
+    err = WebRtcIsacfix_DecodeLpc(gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15,
+                                  &ISACdec_obj->bitstr_obj, &model);
+
+    if (err<0)  // error check
+      return err;
+
+    /* decode & dequantize spectrum */
+    len = WebRtcIsacfix_DecodeSpec(&ISACdec_obj->bitstr_obj, Vector_Word16_1, Vector_Word16_2, AvgPitchGain_Q12);
+    if (len < 0)  // error check
+      return len;
+
+    // Why does this need Q16 in and out? /JS
+    WebRtcIsacfix_Spec2Time(Vector_Word16_1, Vector_Word16_2, Vector_Word32_1, Vector_Word32_2);
+
+    for (k=0; k<FRAMESAMPLES/2; k++) {
+      // Q16 -> Q9.
+      Vector_Word16_1[k] = (int16_t)((Vector_Word32_1[k] + 64) >> 7);
+    }
+
+    /* ----  If this is recovery frame ---- */
+    if( (ISACdec_obj->plcstr_obj).used == PLC_WAS_USED )
+    {
+      (ISACdec_obj->plcstr_obj).used = PLC_NOT_USED;
+      if( (ISACdec_obj->plcstr_obj).B < 1000 )
+      {
+        (ISACdec_obj->plcstr_obj).decayCoeffPriodic = 4000;
+      }
+
+      ISACdec_obj->plcstr_obj.decayCoeffPriodic = WEBRTC_SPL_WORD16_MAX;    /* DECAY_RATE is in Q15 */
+      ISACdec_obj->plcstr_obj.decayCoeffNoise = WEBRTC_SPL_WORD16_MAX;    /* DECAY_RATE is in Q15 */
+      ISACdec_obj->plcstr_obj.pitchCycles = 0;
+
+      PitchGains_Q12[0] = (int16_t)(PitchGains_Q12[0] * 700 >> 10);
+
+      /* ---- Add-overlap ---- */
+      WebRtcSpl_GetHanningWindow( overlapWin, RECOVERY_OVERLAP );
+      for( k = 0; k < RECOVERY_OVERLAP; k++ )
+        Vector_Word16_1[k] = WebRtcSpl_AddSatW16(
+            (int16_t)(ISACdec_obj->plcstr_obj.overlapLP[k] *
+                overlapWin[RECOVERY_OVERLAP - k - 1] >> 14),
+            (int16_t)(Vector_Word16_1[k] * overlapWin[k] >> 14));
+
+
+
+    }
+
+    /* --- Store side info --- */
+    if( frame_nb == frame_mode )
+    {
+      /* --- LPC info */
+      WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).lofilt_coefQ15, &lofilt_coefQ15[(SUBFRAMES-1)*ORDERLO], ORDERLO );
+      WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).hifilt_coefQ15, &hifilt_coefQ15[(SUBFRAMES-1)*ORDERHI], ORDERHI );
+      (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[0] = gain_lo_hiQ17[(SUBFRAMES-1) * 2];
+      (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[1] = gain_lo_hiQ17[(SUBFRAMES-1) * 2 + 1];
+
+      /* --- LTP info */
+      (ISACdec_obj->plcstr_obj).AvgPitchGain_Q12 = PitchGains_Q12[3];
+      (ISACdec_obj->plcstr_obj).lastPitchGain_Q12 = PitchGains_Q12[3];
+      (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 = PitchLags_Q7[3];
+
+      if( PitchLags_Q7[3] < 3000 )
+        (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 += PitchLags_Q7[3];
+
+      WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).prevPitchInvIn, Vector_Word16_1, FRAMESAMPLES/2 );
+
+    }
+    /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+
+    /* inverse pitch filter */
+    WebRtcIsacfix_PitchFilter(Vector_Word16_1, Vector_Word16_2, &ISACdec_obj->pitchfiltstr_obj, PitchLags_Q7, PitchGains_Q12, 4);
+
+    if( frame_nb == frame_mode )
+    {
+      WEBRTC_SPL_MEMCPY_W16( (ISACdec_obj->plcstr_obj).prevPitchInvOut, &(Vector_Word16_2[FRAMESAMPLES/2 - (PITCH_MAX_LAG + 10)]), PITCH_MAX_LAG );
+    }
+
+
+    /* reduce gain to compensate for pitch enhancer */
+    /* gain = 1.0f - 0.45f * AvgPitchGain; */
+    tmp32a = AvgPitchGain_Q12 * 29;  // Q18
+    gainQ13 = (int16_t)((262144 - tmp32a) >> 5);  // Q18 -> Q13.
+
+    for (k = 0; k < FRAMESAMPLES/2; k++)
+    {
+      Vector_Word32_1[k] = (Vector_Word16_2[k] * gainQ13) * (1 << 3);  // Q25
+    }
+
+
+    /* perceptual post-filtering (using normalized lattice filter) */
+    WebRtcIsacfix_NormLatticeFilterAr(ORDERLO, (ISACdec_obj->maskfiltstr_obj).PostStateLoGQ0,
+                                      Vector_Word32_1, lofilt_coefQ15, gain_lo_hiQ17, 0, Vector_Word16_1);
+
+    /* --- Store Highpass Residual --- */
+    for (k = 0; k < FRAMESAMPLES/2; k++)
+      Vector_Word32_1[k] = Vector_Word32_2[k] * (1 << 9);  // Q16 -> Q25
+
+    for( k = 0; k < PITCH_MAX_LAG + 10; k++ )
+      (ISACdec_obj->plcstr_obj).prevHP[k] = Vector_Word32_1[FRAMESAMPLES/2 - (PITCH_MAX_LAG + 10) + k];
+
+
+    WebRtcIsacfix_NormLatticeFilterAr(ORDERHI, (ISACdec_obj->maskfiltstr_obj).PostStateHiGQ0,
+                                      Vector_Word32_1, hifilt_coefQ15, gain_lo_hiQ17, 1, Vector_Word16_2);
+
+    /* recombine the 2 bands */
+
+    /* Form the polyphase signals, and compensate for DC offset */
+    for (k=0;k<FRAMESAMPLES/2;k++) {
+      tmp_1 = (int16_t)WebRtcSpl_SatW32ToW16(((int32_t)Vector_Word16_1[k]+Vector_Word16_2[k] + 1)); /* Construct a new upper channel signal*/
+      tmp_2 = (int16_t)WebRtcSpl_SatW32ToW16(((int32_t)Vector_Word16_1[k]-Vector_Word16_2[k])); /* Construct a new lower channel signal*/
+      Vector_Word16_1[k] = tmp_1;
+      Vector_Word16_2[k] = tmp_2;
+    }
+
+    WebRtcIsacfix_FilterAndCombine1(Vector_Word16_1,
+                                    Vector_Word16_2,
+                                    signal_out16 + frame_nb * kProcessedSamples,
+                                    &ISACdec_obj->postfiltbankstr_obj);
+
+  }
+  return len;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c b/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
new file mode 100644
index 0000000..9967650
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode_bwe.c
+ *
+ * This C file contains the internal decode bandwidth estimate function.
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+
+
+
+int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
+                                    Bitstr_dec  *streamdata,
+                                    size_t packet_size,
+                                    uint16_t rtp_seq_number,
+                                    uint32_t send_ts,
+                                    uint32_t arr_ts)
+{
+  int16_t index;
+  size_t frame_samples;
+  int err;
+
+  /* decode framelength */
+  err = WebRtcIsacfix_DecodeFrameLen(streamdata, &frame_samples);
+  /* error check */
+  if (err<0) {
+    return err;
+  }
+
+  /* decode BW estimation */
+  err = WebRtcIsacfix_DecodeSendBandwidth(streamdata, &index);
+  /* error check */
+  if (err<0) {
+    return err;
+  }
+
+  /* Update BWE with received data */
+  err = WebRtcIsacfix_UpdateUplinkBwImpl(
+      bwest_str,
+      rtp_seq_number,
+      (int16_t)(frame_samples * 1000 / FS),
+      send_ts,
+      arr_ts,
+      packet_size,  /* in bytes */
+      index);
+
+  /* error check */
+  if (err<0) {
+    return err;
+  }
+
+  /* Succesful */
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/decode_plc.c b/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
new file mode 100644
index 0000000..873cf95
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
@@ -0,0 +1,805 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode_plc.c
+ *
+ * Packet Loss Concealment.
+ *
+ */
+
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+
+#define NO_OF_PRIMES 8
+#define NOISE_FILTER_LEN 30
+
+/*
+ * function to decode the bitstream
+ * returns the total number of bytes in the stream
+ */
+
+static int16_t plc_filterma_Fast(
+    int16_t *In,  /* (i)   Vector to be filtered. InOut[-orderCoef+1]
+                           to InOut[-1] contains state */
+    int16_t *Out,  /* (o)   Filtered vector */
+    int16_t *B,   /* (i)   The filter coefficients (in Q0) */
+    int16_t Blen,  /* (i)   Number of B coefficients */
+    int16_t len,   /* (i)  Number of samples to be filtered */
+    int16_t reduceDecay,
+    int16_t decay,
+    int16_t rshift )
+{
+  int i, j;
+  int32_t o;
+  int32_t lim = (1 << (15 + rshift)) - 1;
+
+  for (i = 0; i < len; i++)
+  {
+    const int16_t *b_ptr = &B[0];
+    const int16_t *x_ptr = &In[i];
+
+    o = (int32_t)0;
+
+    for (j = 0;j < Blen; j++)
+    {
+      o = WebRtcSpl_AddSatW32(o, *b_ptr * *x_ptr);
+      b_ptr++;
+      x_ptr--;
+    }
+
+    /* to round off correctly */
+    o = WebRtcSpl_AddSatW32(o, 1 << (rshift - 1));
+
+    /* saturate according to the domain of the filter coefficients */
+    o = WEBRTC_SPL_SAT((int32_t)lim, o, (int32_t)-lim);
+
+    /* o should be in the range of int16_t */
+    o >>= rshift;
+
+    /* decay the output signal; this is specific to plc */
+    *Out++ = (int16_t)((int16_t)o * decay >> 15);
+
+    /* change the decay */
+    decay -= reduceDecay;
+    if( decay < 0 )
+      decay = 0;
+  }
+  return( decay );
+}
+
+
+
+
+
+
+
+
+static __inline int32_t log2_Q8_T( uint32_t x ) {
+
+  int32_t zeros;
+  int16_t frac;
+
+  zeros=WebRtcSpl_NormU32(x);
+  frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
+
+  /* log2(magn(i)) */
+  return ((31 - zeros) << 8) + frac;
+}
+
+static __inline int16_t  exp2_Q10_T(int16_t x) { // Both in and out in Q10
+
+  int16_t tmp16_1, tmp16_2;
+
+  tmp16_2=(int16_t)(0x0400|(x&0x03FF));
+  tmp16_1 = -(x >> 10);
+  if(tmp16_1>0)
+    return tmp16_2 >> tmp16_1;
+  else
+    return tmp16_2 << -tmp16_1;
+
+}
+
+
+/*
+  This is a fixed-point version of the above code with limLow = 700 and limHigh = 5000,
+  hard-coded. The values 700 and 5000 were experimentally obtained.
+
+  The function implements membership values for two sets. The mebership functions are
+  of second orders corresponding to half-bell-shapped pulses.
+*/
+static void MemshipValQ15( int16_t in, int16_t *A, int16_t *B )
+{
+  int16_t x;
+
+  in -= 700;    /* translate the lowLim to 0, limHigh = 5000 - 700, M = 2150 */
+
+  if( in <= 2150 )
+  {
+    if( in > 0 )
+    {
+      /* b = in^2 / (2 * M^2), a = 1 - b in Q0.
+         We have to compute in Q15 */
+
+      /* x = in / 2150 {in Q15} = x * 15.2409 {in Q15} =
+         x*15 + (x*983)/(2^12); note that 983/2^12 = 0.23999     */
+
+      /* we are sure that x is in the range of int16_t            */
+      x = (int16_t)(in * 15 + (in * 983 >> 12));
+      /* b = x^2 / 2 {in Q15} so a shift of 16 is required to
+         be in correct domain and one more for the division by 2 */
+      *B = (int16_t)((x * x + 0x00010000) >> 17);
+      *A = WEBRTC_SPL_WORD16_MAX - *B;
+    }
+    else
+    {
+      *B = 0;
+      *A = WEBRTC_SPL_WORD16_MAX;
+    }
+  }
+  else
+  {
+    if( in < 4300 )
+    {
+      /* This is a mirror case of the above */
+      in = 4300 - in;
+      x = (int16_t)(in * 15 + (in * 983 >> 12));
+      /* b = x^2 / 2 {in Q15} so a shift of 16 is required to
+         be in correct domain and one more for the division by 2 */
+      *A = (int16_t)((x * x + 0x00010000) >> 17);
+      *B = WEBRTC_SPL_WORD16_MAX - *A;
+
+    }
+    else
+    {
+      *A = 0;
+      *B = WEBRTC_SPL_WORD16_MAX;
+    }
+  }
+}
+
+
+
+
+static void LinearResampler(int16_t* in,
+                            int16_t* out,
+                            size_t lenIn,
+                            size_t lenOut)
+{
+  size_t n = (lenIn - 1) * RESAMP_RES;
+  int16_t resOut, relativePos, diff; /* */
+  size_t i, j;
+  uint16_t udiff;
+
+  if( lenIn == lenOut )
+  {
+    WEBRTC_SPL_MEMCPY_W16( out, in, lenIn );
+    return;
+  }
+
+  resOut = WebRtcSpl_DivW32W16ResW16( (int32_t)n, (int16_t)(lenOut-1) );
+
+  out[0] = in[0];
+  for( i = 1, j = 0, relativePos = 0; i < lenOut; i++ )
+  {
+
+    relativePos += resOut;
+    while( relativePos > RESAMP_RES )
+    {
+      j++;
+      relativePos -= RESAMP_RES;
+    }
+
+
+    /* an overflow may happen and the differce in sample values may
+     * require more than 16 bits. We like to avoid 32 bit arithmatic
+     * as much as possible */
+
+    if( (in[ j ] > 0) && (in[j + 1] < 0) )
+    {
+      udiff = (uint16_t)(in[ j ] - in[j + 1]);
+      out[ i ] = in[ j ] - (uint16_t)( ((int32_t)( udiff * relativePos )) >> RESAMP_RES_BIT);
+    }
+    else
+    {
+      if( (in[j] < 0) && (in[j+1] > 0) )
+      {
+        udiff = (uint16_t)( in[j + 1] - in[ j ] );
+        out[ i ] = in[ j ] + (uint16_t)( ((int32_t)( udiff * relativePos )) >> RESAMP_RES_BIT);
+      }
+      else
+      {
+        diff = in[ j + 1 ] - in[ j ];
+        out[i] = in[j] + (int16_t)(diff * relativePos >> RESAMP_RES_BIT);
+      }
+    }
+  }
+}
+
+
+
+
+
+void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
+                                 IsacFixDecoderInstance *ISACdec_obj,
+                                 size_t *current_framesamples )
+{
+  int subframecnt;
+
+  int16_t* Vector_Word16_1;
+  int16_t  Vector_Word16_Extended_1[FRAMESAMPLES_HALF + NOISE_FILTER_LEN];
+  int16_t* Vector_Word16_2;
+  int16_t  Vector_Word16_Extended_2[FRAMESAMPLES_HALF + NOISE_FILTER_LEN];
+
+  int32_t Vector_Word32_1[FRAMESAMPLES_HALF];
+  int32_t Vector_Word32_2[FRAMESAMPLES_HALF];
+
+  int16_t lofilt_coefQ15[ORDERLO*SUBFRAMES]; //refl. coeffs
+  int16_t hifilt_coefQ15[ORDERHI*SUBFRAMES]; //refl. coeffs
+
+  int16_t pitchLags_Q7[PITCH_SUBFRAMES];
+  int16_t pitchGains_Q12[PITCH_SUBFRAMES];
+
+  int16_t tmp_1, tmp_2;
+  int32_t tmp32a, tmp32b;
+  int16_t gainQ13;
+
+  int16_t myDecayRate;
+
+  /* ---------- PLC variables ------------ */
+  size_t lag0, i, k;
+  int16_t noiseIndex;
+  int16_t stretchPitchLP[PITCH_MAX_LAG + 10], stretchPitchLP1[PITCH_MAX_LAG + 10];
+
+  int32_t gain_lo_hiQ17[2*SUBFRAMES];
+
+  int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16;
+  size_t minIdx;
+  int32_t nHP, pHP, wNoisyHP, wPriodicHP, corr, minCorr, maxCoeff;
+  int16_t noise1, rshift;
+
+
+  int16_t ltpGain, pitchGain, myVoiceIndicator, myAbs, maxAbs;
+  int32_t varIn, varOut, logVarIn, logVarOut, Q, logMaxAbs;
+  int rightShiftIn, rightShiftOut;
+
+
+  /* ------------------------------------- */
+
+
+  myDecayRate = (DECAY_RATE);
+  Vector_Word16_1 = &Vector_Word16_Extended_1[NOISE_FILTER_LEN];
+  Vector_Word16_2 = &Vector_Word16_Extended_2[NOISE_FILTER_LEN];
+
+
+  /* ----- Simply Copy Previous LPC parameters ------ */
+  for( subframecnt = 0; subframecnt < SUBFRAMES; subframecnt++ )
+  {
+    /* lower Band */
+    WEBRTC_SPL_MEMCPY_W16(&lofilt_coefQ15[ subframecnt * ORDERLO ],
+                          (ISACdec_obj->plcstr_obj).lofilt_coefQ15, ORDERLO);
+    gain_lo_hiQ17[2*subframecnt] = (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[0];
+
+    /* Upper Band */
+    WEBRTC_SPL_MEMCPY_W16(&hifilt_coefQ15[ subframecnt * ORDERHI ],
+                          (ISACdec_obj->plcstr_obj).hifilt_coefQ15, ORDERHI);
+    gain_lo_hiQ17[2*subframecnt + 1] = (ISACdec_obj->plcstr_obj).gain_lo_hiQ17[1];
+  }
+
+
+
+
+  lag0 = (size_t)(((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1);
+
+
+  if( (ISACdec_obj->plcstr_obj).used != PLC_WAS_USED )
+  {
+    (ISACdec_obj->plcstr_obj).pitchCycles = 0;
+
+    (ISACdec_obj->plcstr_obj).lastPitchLP =
+        &((ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0]);
+    minCorr = WEBRTC_SPL_WORD32_MAX;
+
+    if ((FRAMESAMPLES_HALF - 10) > 2 * lag0)
+    {
+      minIdx = 11;
+      for( i = 0; i < 21; i++ )
+      {
+        corr = 0;
+        for( k = 0; k < lag0; k++ )
+        {
+          corr = WebRtcSpl_AddSatW32(corr, WEBRTC_SPL_ABS_W32(
+              WebRtcSpl_SubSatW16(
+                  (ISACdec_obj->plcstr_obj).lastPitchLP[k],
+                  (ISACdec_obj->plcstr_obj).prevPitchInvIn[
+                      FRAMESAMPLES_HALF - 2*lag0 - 10 + i + k ] ) ) );
+        }
+        if( corr < minCorr )
+        {
+          minCorr = corr;
+          minIdx = i;
+        }
+      }
+      (ISACdec_obj->plcstr_obj).prevPitchLP =
+          &( (ISACdec_obj->plcstr_obj).prevPitchInvIn[
+              FRAMESAMPLES_HALF - lag0*2 - 10 + minIdx] );
+    }
+    else
+    {
+      (ISACdec_obj->plcstr_obj).prevPitchLP =
+          (ISACdec_obj->plcstr_obj).lastPitchLP;
+    }
+    pitchGain = (ISACdec_obj->plcstr_obj).lastPitchGain_Q12;
+
+    WebRtcSpl_AutoCorrelation(
+        &(ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF - lag0],
+        lag0, 0, &varIn, &rightShiftIn);
+    WebRtcSpl_AutoCorrelation(
+        &(ISACdec_obj->plcstr_obj).prevPitchInvOut[PITCH_MAX_LAG + 10 - lag0],
+        lag0, 0, &varOut, &rightShiftOut);
+
+    maxAbs = 0;
+    for( i = 0; i< lag0; i++)
+    {
+      myAbs = WEBRTC_SPL_ABS_W16(
+          (ISACdec_obj->plcstr_obj).prevPitchInvOut[
+              PITCH_MAX_LAG + 10 - lag0 + i] );
+      maxAbs = (myAbs > maxAbs)? myAbs:maxAbs;
+    }
+    logVarIn = log2_Q8_T( (uint32_t)( varIn ) ) +
+        (int32_t)(rightShiftIn << 8);
+    logVarOut = log2_Q8_T( (uint32_t)( varOut ) ) +
+        (int32_t)(rightShiftOut << 8);
+    logMaxAbs = log2_Q8_T( (uint32_t)( maxAbs ) );
+
+    ltpGain = (int16_t)(logVarOut - logVarIn);
+    Q = 2 * logMaxAbs - ( logVarOut - 1512 );
+
+    /*
+     * ---
+     * We are computing sqrt( (VarIn/lag0) / var( noise ) )
+     * var( noise ) is almost 256. we have already computed log2( VarIn ) in Q8
+     * so we actually compute 2^( 0.5*(log2( VarIn ) - log2( lag0 ) - log2( var(noise ) )  ).
+     * Note that put log function is in Q8 but the exponential function is in Q10.
+     * --
+     */
+
+    logVarIn -= log2_Q8_T( (uint32_t)( lag0 ) );
+    tmp16 = (int16_t)((logVarIn<<1) - (4<<10) );
+    rightShiftIn = 0;
+    if( tmp16 > 4096 )
+    {
+      tmp16 -= 4096;
+      tmp16 = exp2_Q10_T( tmp16 );
+      tmp16 >>= 6;
+    }
+    else
+      tmp16 = exp2_Q10_T( tmp16 )>>10;
+
+    (ISACdec_obj->plcstr_obj).std = tmp16 - 4;
+
+    if( (ltpGain < 110) || (ltpGain > 230) )
+    {
+      if( ltpGain < 100 && (pitchGain < 1800) )
+      {
+        (ISACdec_obj->plcstr_obj).A = WEBRTC_SPL_WORD16_MAX;
+      }
+      else
+      {
+        (ISACdec_obj->plcstr_obj).A = ((ltpGain < 110) && (Q < 800)
+                                       )? WEBRTC_SPL_WORD16_MAX:0;
+      }
+      (ISACdec_obj->plcstr_obj).B = WEBRTC_SPL_WORD16_MAX -
+          (ISACdec_obj->plcstr_obj).A;
+    }
+    else
+    {
+      if( (pitchGain < 450) || (pitchGain > 1600) )
+      {
+        (ISACdec_obj->plcstr_obj).A = ((pitchGain < 450)
+                                       )? WEBRTC_SPL_WORD16_MAX:0;
+        (ISACdec_obj->plcstr_obj).B = WEBRTC_SPL_WORD16_MAX -
+            (ISACdec_obj->plcstr_obj).A;
+      }
+      else
+      {
+        myVoiceIndicator = ltpGain * 2 + pitchGain;
+        MemshipValQ15( myVoiceIndicator,
+                       &(ISACdec_obj->plcstr_obj).A, &(ISACdec_obj->plcstr_obj).B );
+      }
+    }
+
+
+
+    myVoiceIndicator = ltpGain * 16 + pitchGain * 2 + (pitchGain >> 8);
+    MemshipValQ15( myVoiceIndicator,
+                   &(ISACdec_obj->plcstr_obj).A, &(ISACdec_obj->plcstr_obj).B );
+
+
+
+    (ISACdec_obj->plcstr_obj).stretchLag = lag0;
+    (ISACdec_obj->plcstr_obj).pitchIndex = 0;
+
+  }
+  else
+  {
+    myDecayRate = (DECAY_RATE<<2);
+  }
+
+  if( (ISACdec_obj->plcstr_obj).B < 1000 )
+  {
+    myDecayRate += (DECAY_RATE<<3);
+  }
+
+  /* ------------ reconstructing the residual signal ------------------ */
+
+  LinearResampler( (ISACdec_obj->plcstr_obj).lastPitchLP,
+                   stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+  /* inverse pitch filter */
+
+  pitchLags_Q7[0] = pitchLags_Q7[1] = pitchLags_Q7[2] = pitchLags_Q7[3] =
+      (int16_t)((ISACdec_obj->plcstr_obj).stretchLag<<7);
+  pitchGains_Q12[3] = ( (ISACdec_obj->plcstr_obj).lastPitchGain_Q12);
+  pitchGains_Q12[2] = (int16_t)(pitchGains_Q12[3] * 1010 >> 10);
+  pitchGains_Q12[1] = (int16_t)(pitchGains_Q12[2] * 1010 >> 10);
+  pitchGains_Q12[0] = (int16_t)(pitchGains_Q12[1] * 1010 >> 10);
+
+
+  /* most of the time either B or A are zero so seperating */
+  if( (ISACdec_obj->plcstr_obj).B == 0 )
+  {
+    for( i = 0; i < FRAMESAMPLES_HALF; i++ )
+    {
+      /* --- Low Pass                                             */
+      (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+          (ISACdec_obj->plcstr_obj).seed );
+      Vector_Word16_1[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+      /* --- Highpass                                              */
+      (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+          (ISACdec_obj->plcstr_obj).seed );
+      Vector_Word16_2[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+    }
+    for( i = 1; i < NOISE_FILTER_LEN; i++ )
+    {
+      (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+          (ISACdec_obj->plcstr_obj).seed );
+      Vector_Word16_Extended_1[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+      (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+          (ISACdec_obj->plcstr_obj).seed );
+      Vector_Word16_Extended_2[i] = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+    }
+    plc_filterma_Fast(Vector_Word16_1, Vector_Word16_Extended_1,
+                      &(ISACdec_obj->plcstr_obj).prevPitchInvIn[FRAMESAMPLES_HALF -
+                                                                NOISE_FILTER_LEN], (int16_t) NOISE_FILTER_LEN,
+                      (int16_t) FRAMESAMPLES_HALF, (int16_t)(5),
+                      (ISACdec_obj->plcstr_obj).decayCoeffNoise, (int16_t)(6));
+
+    maxCoeff = WebRtcSpl_MaxAbsValueW32(
+        &(ISACdec_obj->plcstr_obj).prevHP[
+            PITCH_MAX_LAG + 10 - NOISE_FILTER_LEN], NOISE_FILTER_LEN );
+
+    rshift = 0;
+    while( maxCoeff > WEBRTC_SPL_WORD16_MAX )
+    {
+      maxCoeff >>= 1;
+      rshift++;
+    }
+    for( i = 0; i < NOISE_FILTER_LEN; i++ ) {
+      Vector_Word16_1[FRAMESAMPLES_HALF - NOISE_FILTER_LEN + i] =(int16_t)(
+          ISACdec_obj->plcstr_obj.prevHP[PITCH_MAX_LAG + 10 - NOISE_FILTER_LEN +
+                                         i] >> rshift);
+    }
+    (ISACdec_obj->plcstr_obj).decayCoeffNoise = plc_filterma_Fast(
+        Vector_Word16_2,
+        Vector_Word16_Extended_2,
+        &Vector_Word16_1[FRAMESAMPLES_HALF - NOISE_FILTER_LEN],
+        (int16_t) NOISE_FILTER_LEN,
+        (int16_t) FRAMESAMPLES_HALF,
+        (int16_t) (5),
+        (ISACdec_obj->plcstr_obj).decayCoeffNoise,
+        (int16_t) (7) );
+
+    for( i = 0; i < FRAMESAMPLES_HALF; i++ )
+      Vector_Word32_2[i] = Vector_Word16_Extended_2[i] << rshift;
+
+    Vector_Word16_1 = Vector_Word16_Extended_1;
+  }
+  else
+  {
+    if( (ISACdec_obj->plcstr_obj).A == 0 )
+    {
+      /* ------ Periodic Vector ---                                */
+      for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
+      {
+        /* --- Lowpass                                               */
+        pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
+            ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
+
+        /* --- Highpass                                              */
+        pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
+            (ISACdec_obj->plcstr_obj).decayCoeffPriodic,
+            (ISACdec_obj->plcstr_obj).prevHP[PITCH_MAX_LAG + 10 -
+                                             (ISACdec_obj->plcstr_obj).stretchLag +
+                                             (ISACdec_obj->plcstr_obj).pitchIndex] );
+
+        /* --- lower the muliplier (more decay at next sample) --- */
+        (ISACdec_obj->plcstr_obj).decayCoeffPriodic -= (myDecayRate);
+        if( (ISACdec_obj->plcstr_obj).decayCoeffPriodic < 0 )
+          (ISACdec_obj->plcstr_obj).decayCoeffPriodic = 0;
+
+        (ISACdec_obj->plcstr_obj).pitchIndex++;
+
+        if( (ISACdec_obj->plcstr_obj).pitchIndex ==
+            (ISACdec_obj->plcstr_obj).stretchLag )
+        {
+          (ISACdec_obj->plcstr_obj).pitchIndex = 0;
+          (ISACdec_obj->plcstr_obj).pitchCycles++;
+
+          if( (ISACdec_obj->plcstr_obj).stretchLag != (lag0 + 1) )
+          {
+            (ISACdec_obj->plcstr_obj).stretchLag = lag0 + 1;
+          }
+          else
+          {
+            (ISACdec_obj->plcstr_obj).stretchLag = lag0;
+          }
+
+          (ISACdec_obj->plcstr_obj).stretchLag = (
+              (ISACdec_obj->plcstr_obj).stretchLag > PITCH_MAX_LAG
+                                                  )? (PITCH_MAX_LAG):(ISACdec_obj->plcstr_obj).stretchLag;
+
+          LinearResampler( (ISACdec_obj->plcstr_obj).lastPitchLP,
+                           stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+          LinearResampler( (ISACdec_obj->plcstr_obj).prevPitchLP,
+                           stretchPitchLP1, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+          switch( (ISACdec_obj->plcstr_obj).pitchCycles )
+          {
+            case 1:
+              {
+                for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+                {
+                  stretchPitchLP[k] = (int16_t)((
+                      (int32_t)stretchPitchLP[k]* 3 +
+                      (int32_t)stretchPitchLP1[k])>>2);
+                }
+                break;
+              }
+            case 2:
+              {
+                for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+                {
+                  stretchPitchLP[k] = (int16_t)((
+                      (int32_t)stretchPitchLP[k] +
+                      (int32_t)stretchPitchLP1[k] )>>1);
+                }
+                break;
+              }
+            case 3:
+              {
+                for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+                {
+                  stretchPitchLP[k] = (int16_t)((stretchPitchLP[k] +
+                                                       (int32_t)stretchPitchLP1[k]*3 )>>2);
+                }
+                break;
+              }
+          }
+
+          if( (ISACdec_obj->plcstr_obj).pitchCycles == 3 )
+          {
+            myDecayRate += 35; //(myDecayRate>>1);
+            (ISACdec_obj->plcstr_obj).pitchCycles = 0;
+          }
+
+        }
+
+        /* ------ Sum the noisy and periodic signals  ------ */
+        Vector_Word16_1[i] = pLP;
+        Vector_Word32_2[i] = pHP;
+      }
+    }
+    else
+    {
+      for( i = 0, noiseIndex = 0; i < FRAMESAMPLES_HALF; i++, noiseIndex++ )
+      {
+
+        (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+            (ISACdec_obj->plcstr_obj).seed );
+
+        noise1 = (ISACdec_obj->plcstr_obj.seed >> 10) - 16;
+
+        nLP = (int16_t)((int16_t)(noise1 * ISACdec_obj->plcstr_obj.std) *
+            ISACdec_obj->plcstr_obj.decayCoeffNoise >> 15);
+
+        /* --- Highpass                                              */
+        (ISACdec_obj->plcstr_obj).seed = WEBRTC_SPL_RAND(
+            (ISACdec_obj->plcstr_obj).seed );
+        noise1 = (ISACdec_obj->plcstr_obj.seed >> 11) - 8;
+
+        nHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
+            (ISACdec_obj->plcstr_obj).decayCoeffNoise,
+            (int32_t)(noise1*(ISACdec_obj->plcstr_obj).std) );
+
+        /* --- lower the muliplier (more decay at next sample) --- */
+        (ISACdec_obj->plcstr_obj).decayCoeffNoise -= (myDecayRate);
+        if( (ISACdec_obj->plcstr_obj).decayCoeffNoise < 0 )
+          (ISACdec_obj->plcstr_obj).decayCoeffNoise = 0;
+
+        /* ------ Periodic Vector ---                                */
+        /* --- Lowpass                                               */
+        pLP = (int16_t)(stretchPitchLP[ISACdec_obj->plcstr_obj.pitchIndex] *
+            ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
+
+        /* --- Highpass                                              */
+        pHP = (int32_t)WEBRTC_SPL_MUL_16_32_RSFT15(
+            (ISACdec_obj->plcstr_obj).decayCoeffPriodic,
+            (ISACdec_obj->plcstr_obj).prevHP[PITCH_MAX_LAG + 10 -
+                                             (ISACdec_obj->plcstr_obj).stretchLag +
+                                             (ISACdec_obj->plcstr_obj).pitchIndex] );
+
+        /* --- lower the muliplier (more decay at next sample) --- */
+        (ISACdec_obj->plcstr_obj).decayCoeffPriodic -= (myDecayRate);
+        if( (ISACdec_obj->plcstr_obj).decayCoeffPriodic < 0 )
+        {
+          (ISACdec_obj->plcstr_obj).decayCoeffPriodic = 0;
+        }
+
+        /* ------ Weighting the noisy and periodic vectors -------   */
+        wNoisyLP = (int16_t)(ISACdec_obj->plcstr_obj.A * nLP >> 15);
+        wNoisyHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
+            (ISACdec_obj->plcstr_obj).A, (nHP) ) );
+
+        wPriodicLP = (int16_t)(ISACdec_obj->plcstr_obj.B * pLP >> 15);
+        wPriodicHP = (int32_t)(WEBRTC_SPL_MUL_16_32_RSFT15(
+            (ISACdec_obj->plcstr_obj).B, pHP));
+
+        (ISACdec_obj->plcstr_obj).pitchIndex++;
+
+        if((ISACdec_obj->plcstr_obj).pitchIndex ==
+           (ISACdec_obj->plcstr_obj).stretchLag)
+        {
+          (ISACdec_obj->plcstr_obj).pitchIndex = 0;
+          (ISACdec_obj->plcstr_obj).pitchCycles++;
+
+          if( (ISACdec_obj->plcstr_obj).stretchLag != (lag0 + 1) )
+            (ISACdec_obj->plcstr_obj).stretchLag = lag0 + 1;
+          else
+            (ISACdec_obj->plcstr_obj).stretchLag = lag0;
+
+          (ISACdec_obj->plcstr_obj).stretchLag = (
+              (ISACdec_obj->plcstr_obj).stretchLag > PITCH_MAX_LAG
+                                                  )? (PITCH_MAX_LAG):(ISACdec_obj->plcstr_obj).stretchLag;
+          LinearResampler(
+              (ISACdec_obj->plcstr_obj).lastPitchLP,
+              stretchPitchLP, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+          LinearResampler((ISACdec_obj->plcstr_obj).prevPitchLP,
+                          stretchPitchLP1, lag0, (ISACdec_obj->plcstr_obj).stretchLag );
+
+          switch((ISACdec_obj->plcstr_obj).pitchCycles)
+          {
+            case 1:
+              {
+                for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+                {
+                  stretchPitchLP[k] = (int16_t)((
+                      (int32_t)stretchPitchLP[k]* 3 +
+                      (int32_t)stretchPitchLP1[k] )>>2);
+                }
+                break;
+              }
+            case 2:
+              {
+                for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+                {
+                  stretchPitchLP[k] = (int16_t)((
+                      (int32_t)stretchPitchLP[k] +
+                      (int32_t)stretchPitchLP1[k])>>1);
+                }
+                break;
+              }
+            case 3:
+              {
+                for( k=0; k<(ISACdec_obj->plcstr_obj).stretchLag; k++ )
+                {
+                  stretchPitchLP[k] = (int16_t)(
+                      (stretchPitchLP[k] +
+                       (int32_t)stretchPitchLP1[k]*3 )>>2);
+                }
+                break;
+              }
+          }
+
+          if( (ISACdec_obj->plcstr_obj).pitchCycles == 3 )
+          {
+            myDecayRate += 55; //(myDecayRate>>1);
+            (ISACdec_obj->plcstr_obj).pitchCycles = 0;
+          }
+        }
+
+        /* ------ Sum the noisy and periodic signals  ------ */
+        Vector_Word16_1[i] = WebRtcSpl_AddSatW16(wNoisyLP, wPriodicLP);
+        Vector_Word32_2[i] = WebRtcSpl_AddSatW32(wNoisyHP, wPriodicHP);
+      }
+    }
+  }
+  /* ----------------- residual signal is reconstructed ------------------ */
+
+  k = (ISACdec_obj->plcstr_obj).pitchIndex;
+  /* --- Write one pitch cycle for recovery block --- */
+
+  for( i = 0; i < RECOVERY_OVERLAP; i++ )
+  {
+    ISACdec_obj->plcstr_obj.overlapLP[i] = (int16_t)(
+        stretchPitchLP[k] * ISACdec_obj->plcstr_obj.decayCoeffPriodic >> 15);
+    k = ( k < ((ISACdec_obj->plcstr_obj).stretchLag - 1) )? (k+1):0;
+  }
+
+  (ISACdec_obj->plcstr_obj).lastPitchLag_Q7 =
+      (int16_t)((ISACdec_obj->plcstr_obj).stretchLag << 7);
+
+
+  /* --- Inverse Pitch Filter --- */
+  WebRtcIsacfix_PitchFilter(Vector_Word16_1, Vector_Word16_2,
+                            &ISACdec_obj->pitchfiltstr_obj, pitchLags_Q7, pitchGains_Q12, 4);
+
+  /* reduce gain to compensate for pitch enhancer */
+  /* gain = 1.0f - 0.45f * AvgPitchGain; */
+  tmp32a = ISACdec_obj->plcstr_obj.AvgPitchGain_Q12 * 29;  // Q18
+  tmp32b = 262144 - tmp32a;  // Q18
+  gainQ13 = (int16_t) (tmp32b >> 5); // Q13
+
+  /* perceptual post-filtering (using normalized lattice filter) */
+  for (k = 0; k < FRAMESAMPLES_HALF; k++)
+    Vector_Word32_1[k] = (Vector_Word16_2[k] * gainQ13) << 3;  // Q25
+
+
+  WebRtcIsacfix_NormLatticeFilterAr(ORDERLO,
+                                    (ISACdec_obj->maskfiltstr_obj).PostStateLoGQ0,
+                                    Vector_Word32_1, lofilt_coefQ15, gain_lo_hiQ17, 0, Vector_Word16_1);
+
+  WebRtcIsacfix_NormLatticeFilterAr(ORDERHI,
+                                    (ISACdec_obj->maskfiltstr_obj).PostStateHiGQ0,
+                                    Vector_Word32_2, hifilt_coefQ15, gain_lo_hiQ17, 1, Vector_Word16_2);
+
+  /* recombine the 2 bands */
+
+  /* Form the polyphase signals, and compensate for DC offset */
+  for (k=0;k<FRAMESAMPLES_HALF;k++)
+  {
+    /* Construct a new upper channel signal*/
+    tmp_1 = (int16_t)WebRtcSpl_SatW32ToW16(
+                                           ((int32_t)Vector_Word16_1[k]+Vector_Word16_2[k] + 1));
+    /* Construct a new lower channel signal*/
+    tmp_2 = (int16_t)WebRtcSpl_SatW32ToW16(
+                                           ((int32_t)Vector_Word16_1[k]-Vector_Word16_2[k]));
+    Vector_Word16_1[k] = tmp_1;
+    Vector_Word16_2[k] = tmp_2;
+  }
+
+
+  WebRtcIsacfix_FilterAndCombine1(Vector_Word16_1,
+                                  Vector_Word16_2, signal_out16, &ISACdec_obj->postfiltbankstr_obj);
+
+  (ISACdec_obj->plcstr_obj).used = PLC_WAS_USED;
+  *current_framesamples = 480;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/encode.c b/modules/audio_coding/codecs/isac/fix/source/encode.c
new file mode 100644
index 0000000..ef3e320
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/encode.c
@@ -0,0 +1,635 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * encode.c
+ *
+ * Encoding function for the iSAC coder.
+ *
+ */
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+#include <stdio.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+
+int WebRtcIsacfix_EncodeImpl(int16_t      *in,
+                             IsacFixEncoderInstance  *ISACenc_obj,
+                             BwEstimatorstr      *bw_estimatordata,
+                             int16_t         CodingMode)
+{
+  int16_t stream_length = 0;
+  int16_t usefulstr_len = 0;
+  int k;
+  int16_t BWno;
+
+  int16_t lofilt_coefQ15[(ORDERLO)*SUBFRAMES];
+  int16_t hifilt_coefQ15[(ORDERHI)*SUBFRAMES];
+  int32_t gain_lo_hiQ17[2*SUBFRAMES];
+
+  int16_t LPandHP[FRAMESAMPLES/2 + QLOOKAHEAD];
+  int16_t LP16a[FRAMESAMPLES/2 + QLOOKAHEAD];
+  int16_t HP16a[FRAMESAMPLES/2 + QLOOKAHEAD];
+
+  int16_t PitchLags_Q7[PITCH_SUBFRAMES];
+  int16_t PitchGains_Q12[PITCH_SUBFRAMES];
+  int16_t AvgPitchGain_Q12;
+
+  int16_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+  int16_t processed_samples;
+  int status;
+
+  int32_t bits_gainsQ11;
+  int16_t MinBytes;
+  int16_t bmodel;
+
+  transcode_obj transcodingParam;
+  int16_t payloadLimitBytes;
+  int16_t arithLenBeforeEncodingDFT;
+  int16_t iterCntr;
+
+  /* copy new frame length and bottle neck rate only for the first 10 ms data */
+  if (ISACenc_obj->buffer_index == 0) {
+    /* set the framelength for the next packet */
+    ISACenc_obj->current_framesamples = ISACenc_obj->new_framelength;
+  }
+
+  frame_mode = ISACenc_obj->current_framesamples/MAX_FRAMESAMPLES; /* 0 (30 ms) or 1 (60 ms)  */
+  processed_samples = ISACenc_obj->current_framesamples/(frame_mode+1); /* 480 (30, 60 ms) */
+
+  /* buffer speech samples (by 10ms packet) until the framelength is reached (30 or 60 ms) */
+  /**************************************************************************************/
+  /* fill the buffer with 10ms input data */
+  for(k=0; k<FRAMESAMPLES_10ms; k++) {
+    ISACenc_obj->data_buffer_fix[k + ISACenc_obj->buffer_index] = in[k];
+  }
+  /* if buffersize is not equal to current framesize, and end of file is not reached yet, */
+  /* increase index and go back to main to get more speech samples */
+  if (ISACenc_obj->buffer_index + FRAMESAMPLES_10ms != processed_samples) {
+    ISACenc_obj->buffer_index = ISACenc_obj->buffer_index + FRAMESAMPLES_10ms;
+    return 0;
+  }
+  /* if buffer reached the right size, reset index and continue with encoding the frame */
+  ISACenc_obj->buffer_index = 0;
+
+  /* end of buffer function */
+  /**************************/
+
+  /* encoding */
+  /************/
+
+  if (frame_mode == 0 || ISACenc_obj->frame_nb == 0 )
+  {
+    /* reset bitstream */
+    ISACenc_obj->bitstr_obj.W_upper = 0xFFFFFFFF;
+    ISACenc_obj->bitstr_obj.streamval = 0;
+    ISACenc_obj->bitstr_obj.stream_index = 0;
+    ISACenc_obj->bitstr_obj.full = 1;
+
+    if (CodingMode == 0) {
+      ISACenc_obj->BottleNeck =  WebRtcIsacfix_GetUplinkBandwidth(bw_estimatordata);
+      ISACenc_obj->MaxDelay =  WebRtcIsacfix_GetUplinkMaxDelay(bw_estimatordata);
+    }
+    if (CodingMode == 0 && frame_mode == 0 && (ISACenc_obj->enforceFrameSize == 0)) {
+      ISACenc_obj->new_framelength = WebRtcIsacfix_GetNewFrameLength(ISACenc_obj->BottleNeck,
+                                                                     ISACenc_obj->current_framesamples);
+    }
+
+    // multiply the bottleneck by 0.88 before computing SNR, 0.88 is tuned by experimenting on TIMIT
+    // 901/1024 is 0.87988281250000
+    ISACenc_obj->s2nr = WebRtcIsacfix_GetSnr(
+        (int16_t)(ISACenc_obj->BottleNeck * 901 >> 10),
+        ISACenc_obj->current_framesamples);
+
+    /* encode frame length */
+    status = WebRtcIsacfix_EncodeFrameLen(ISACenc_obj->current_framesamples, &ISACenc_obj->bitstr_obj);
+    if (status < 0)
+    {
+      /* Wrong frame size */
+      if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+      {
+        // If this is the second 30ms of a 60ms frame reset this such that in the next call
+        // encoder starts fresh.
+        ISACenc_obj->frame_nb = 0;
+      }
+      return status;
+    }
+
+    /* Save framelength for multiple packets memory */
+    if (ISACenc_obj->SaveEnc_ptr != NULL) {
+      (ISACenc_obj->SaveEnc_ptr)->framelength=ISACenc_obj->current_framesamples;
+    }
+
+    /* bandwidth estimation and coding */
+    BWno = WebRtcIsacfix_GetDownlinkBwIndexImpl(bw_estimatordata);
+    status = WebRtcIsacfix_EncodeReceiveBandwidth(&BWno, &ISACenc_obj->bitstr_obj);
+    if (status < 0)
+    {
+      if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+      {
+        // If this is the second 30ms of a 60ms frame reset this such that in the next call
+        // encoder starts fresh.
+        ISACenc_obj->frame_nb = 0;
+      }
+      return status;
+    }
+  }
+
+  /* split signal in two bands */
+  WebRtcIsacfix_SplitAndFilter1(ISACenc_obj->data_buffer_fix, LP16a, HP16a, &ISACenc_obj->prefiltbankstr_obj );
+
+  /* estimate pitch parameters and pitch-filter lookahead signal */
+  WebRtcIsacfix_PitchAnalysis(LP16a+QLOOKAHEAD, LPandHP,
+                              &ISACenc_obj->pitchanalysisstr_obj,  PitchLags_Q7, PitchGains_Q12); /* LPandHP = LP_lookahead_pfQ0, */
+
+  /* Set where to store data in multiple packets memory */
+  if (ISACenc_obj->SaveEnc_ptr != NULL) {
+    if (frame_mode == 0 || ISACenc_obj->frame_nb == 0)
+    {
+      (ISACenc_obj->SaveEnc_ptr)->startIdx = 0;
+    }
+    else
+    {
+      (ISACenc_obj->SaveEnc_ptr)->startIdx = 1;
+    }
+  }
+
+  /* quantize & encode pitch parameters */
+  status = WebRtcIsacfix_EncodePitchGain(PitchGains_Q12, &ISACenc_obj->bitstr_obj,  ISACenc_obj->SaveEnc_ptr);
+  if (status < 0)
+  {
+    if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+    {
+      // If this is the second 30ms of a 60ms frame reset this such that in the next call
+      // encoder starts fresh.
+      ISACenc_obj->frame_nb = 0;
+    }
+    return status;
+  }
+  status = WebRtcIsacfix_EncodePitchLag(PitchLags_Q7 , PitchGains_Q12, &ISACenc_obj->bitstr_obj,  ISACenc_obj->SaveEnc_ptr);
+  if (status < 0)
+  {
+    if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+    {
+      // If this is the second 30ms of a 60ms frame reset this such that in the next call
+      // encoder starts fresh.
+      ISACenc_obj->frame_nb = 0;
+    }
+    return status;
+  }
+  AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
+      PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
+
+  /* find coefficients for perceptual pre-filters */
+  WebRtcIsacfix_GetLpcCoef(LPandHP, HP16a+QLOOKAHEAD, &ISACenc_obj->maskfiltstr_obj,
+                           ISACenc_obj->s2nr, PitchGains_Q12,
+                           gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15); /*LPandHP = LP_lookahead_pfQ0*/
+
+  // record LPC Gains for possible bit-rate reduction
+  for(k = 0; k < KLT_ORDER_GAIN; k++)
+  {
+    transcodingParam.lpcGains[k] = gain_lo_hiQ17[k];
+  }
+
+  /* code LPC model and shape - gains not quantized yet */
+  status = WebRtcIsacfix_EncodeLpc(gain_lo_hiQ17, lofilt_coefQ15, hifilt_coefQ15,
+                                   &bmodel, &bits_gainsQ11, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr, &transcodingParam);
+  if (status < 0)
+  {
+    if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+    {
+      // If this is the second 30ms of a 60ms frame reset this such that in the next call
+      // encoder starts fresh.
+      ISACenc_obj->frame_nb = 0;
+    }
+    return status;
+  }
+  arithLenBeforeEncodingDFT = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full);
+
+  /* low-band filtering */
+  WebRtcIsacfix_NormLatticeFilterMa(ORDERLO, ISACenc_obj->maskfiltstr_obj.PreStateLoGQ15,
+                                    LP16a, lofilt_coefQ15, gain_lo_hiQ17, 0, LPandHP);/* LPandHP = LP16b */
+
+  /* pitch filter */
+  WebRtcIsacfix_PitchFilter(LPandHP, LP16a, &ISACenc_obj->pitchfiltstr_obj, PitchLags_Q7, PitchGains_Q12, 1);/* LPandHP = LP16b */
+
+  /* high-band filtering */
+  WebRtcIsacfix_NormLatticeFilterMa(ORDERHI, ISACenc_obj->maskfiltstr_obj.PreStateHiGQ15,
+                                    HP16a, hifilt_coefQ15, gain_lo_hiQ17, 1, LPandHP);/*LPandHP = HP16b*/
+
+  /* transform */
+  WebRtcIsacfix_Time2Spec(LP16a, LPandHP, LP16a, LPandHP); /*LPandHP = HP16b*/
+
+  /* Save data for multiple packets memory */
+  if (ISACenc_obj->SaveEnc_ptr != NULL) {
+    for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+      (ISACenc_obj->SaveEnc_ptr)->fre[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LP16a[k];
+      (ISACenc_obj->SaveEnc_ptr)->fim[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LPandHP[k];
+    }
+    (ISACenc_obj->SaveEnc_ptr)->AvgPitchGain[(ISACenc_obj->SaveEnc_ptr)->startIdx] = AvgPitchGain_Q12;
+  }
+
+  /* quantization and lossless coding */
+  status = WebRtcIsacfix_EncodeSpec(LP16a, LPandHP, &ISACenc_obj->bitstr_obj, AvgPitchGain_Q12);
+  if((status <= -1) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) /*LPandHP = HP16b*/
+  {
+    if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+    {
+      // If this is the second 30ms of a 60ms frame reset this such that in the next call
+      // encoder starts fresh.
+      ISACenc_obj->frame_nb = 0;
+    }
+    return status;
+  }
+
+  if((frame_mode == 1) && (ISACenc_obj->frame_nb == 0))
+  {
+    // it is a 60ms and we are in the first 30ms
+    // then the limit at this point should be half of the assigned value
+    payloadLimitBytes = ISACenc_obj->payloadLimitBytes60 >> 1;
+  }
+  else if (frame_mode == 0)
+  {
+    // it is a 30ms frame
+    payloadLimitBytes = (ISACenc_obj->payloadLimitBytes30) - 3;
+  }
+  else
+  {
+    // this is the second half of a 60ms frame.
+    payloadLimitBytes = ISACenc_obj->payloadLimitBytes60 - 3; // subract 3 because termination process may add 3 bytes
+  }
+
+  iterCntr = 0;
+  while((((ISACenc_obj->bitstr_obj.stream_index) << 1) > payloadLimitBytes) ||
+        (status == -ISAC_DISALLOWED_BITSTREAM_LENGTH))
+  {
+    int16_t arithLenDFTByte;
+    int16_t bytesLeftQ5;
+    int16_t ratioQ5[8] = {0, 6, 9, 12, 16, 19, 22, 25};
+
+    // According to experiments on TIMIT the following is proper for audio, but it is not agressive enough for tonal inputs
+    // such as DTMF, sweep-sine, ...
+    //
+    // (0.55 - (0.8 - ratio[i]/32) * 5 / 6) * 2^14
+    // int16_t scaleQ14[8] = {0, 648, 1928, 3208, 4915, 6195, 7475, 8755};
+
+
+    // This is a supper-agressive scaling passed the tests (tonal inputs) tone with one iteration for payload limit
+    // of 120 (32kbps bottleneck), number of frames needed a rate-reduction was 58403
+    //
+    int16_t scaleQ14[8] = {0, 348, 828, 1408, 2015, 3195, 3500, 3500};
+    int16_t idx;
+
+    if(iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION)
+    {
+      // We were not able to limit the payload size
+
+      if((frame_mode == 1) && (ISACenc_obj->frame_nb == 0))
+      {
+        // This was the first 30ms of a 60ms frame. Although the payload is larger than it
+        // should be but we let the second 30ms be encoded. Maybe togetehr we won't exceed
+        // the limit.
+        ISACenc_obj->frame_nb = 1;
+        return 0;
+      }
+      else if((frame_mode == 1) && (ISACenc_obj->frame_nb == 1))
+      {
+        ISACenc_obj->frame_nb = 0;
+      }
+
+      if(status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)
+      {
+        return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
+      }
+      else
+      {
+        return status;
+      }
+    }
+    if(status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)
+    {
+      arithLenDFTByte = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full) - arithLenBeforeEncodingDFT;
+      bytesLeftQ5 = (payloadLimitBytes - arithLenBeforeEncodingDFT) << 5;
+
+      // bytesLeft / arithLenDFTBytes indicates how much scaling is required a rough estimate (agressive)
+      // scale = 0.55 - (0.8 - bytesLeft / arithLenDFTBytes) * 5 / 6
+      // bytesLeft / arithLenDFTBytes below 0.2 will have a scale of zero and above 0.8 are treated as 0.8
+      // to avoid division we do more simplification.
+      //
+      // values of (bytesLeft / arithLenDFTBytes)*32 between ratioQ5[i] and ratioQ5[i+1] are rounded to ratioQ5[i]
+      // and the corresponding scale is chosen
+
+      // we compare bytesLeftQ5 with ratioQ5[]*arithLenDFTByte;
+      idx = 4;
+      idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 2 : -2;
+      idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 1 : -1;
+      idx += (bytesLeftQ5 >= ratioQ5[idx] * arithLenDFTByte) ? 0 : -1;
+    }
+    else
+    {
+      // we are here because the bit-stream did not fit into the buffer, in this case, the stream_index is not
+      // trustable, especially if the is the first 30ms of a packet. Thereforem, we will go for the most agressive
+      // case.
+      idx = 0;
+    }
+    // scale FFT coefficients to reduce the bit-rate
+    for(k = 0; k < FRAMESAMPLES_HALF; k++)
+    {
+      LP16a[k] = (int16_t)(LP16a[k] * scaleQ14[idx] >> 14);
+      LPandHP[k] = (int16_t)(LPandHP[k] * scaleQ14[idx] >> 14);
+    }
+
+    // Save data for multiple packets memory
+    if (ISACenc_obj->SaveEnc_ptr != NULL)
+    {
+      for(k = 0; k < FRAMESAMPLES_HALF; k++)
+      {
+        (ISACenc_obj->SaveEnc_ptr)->fre[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LP16a[k];
+        (ISACenc_obj->SaveEnc_ptr)->fim[k + (ISACenc_obj->SaveEnc_ptr)->startIdx*FRAMESAMPLES_HALF] = LPandHP[k];
+      }
+    }
+
+    // scale the unquantized LPC gains and save the scaled version for the future use
+    for(k = 0; k < KLT_ORDER_GAIN; k++)
+    {
+      gain_lo_hiQ17[k] = WEBRTC_SPL_MUL_16_32_RSFT14(scaleQ14[idx], transcodingParam.lpcGains[k]);//transcodingParam.lpcGains[k]; //
+      transcodingParam.lpcGains[k] = gain_lo_hiQ17[k];
+    }
+
+    // reset the bit-stream object to the state which it had before encoding LPC Gains
+    ISACenc_obj->bitstr_obj.full = transcodingParam.full;
+    ISACenc_obj->bitstr_obj.stream_index = transcodingParam.stream_index;
+    ISACenc_obj->bitstr_obj.streamval = transcodingParam.streamval;
+    ISACenc_obj->bitstr_obj.W_upper = transcodingParam.W_upper;
+    ISACenc_obj->bitstr_obj.stream[transcodingParam.stream_index-1] = transcodingParam.beforeLastWord;
+    ISACenc_obj->bitstr_obj.stream[transcodingParam.stream_index] = transcodingParam.lastWord;
+
+
+    // quantize and encode LPC gain
+    WebRtcIsacfix_EstCodeLpcGain(gain_lo_hiQ17, &ISACenc_obj->bitstr_obj, ISACenc_obj->SaveEnc_ptr);
+    arithLenBeforeEncodingDFT = (ISACenc_obj->bitstr_obj.stream_index << 1) + (1-ISACenc_obj->bitstr_obj.full);
+    status = WebRtcIsacfix_EncodeSpec(LP16a, LPandHP, &ISACenc_obj->bitstr_obj, AvgPitchGain_Q12);
+    if((status <= -1) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) /*LPandHP = HP16b*/
+    {
+      if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+      {
+        // If this is the second 30ms of a 60ms frame reset this such that in the next call
+        // encoder starts fresh.
+        ISACenc_obj->frame_nb = 0;
+      }
+      return status;
+    }
+    iterCntr++;
+  }
+
+  if (frame_mode == 1 && ISACenc_obj->frame_nb == 0)
+    /* i.e. 60 ms framesize and just processed the first 30ms, */
+    /* go back to main function to buffer the other 30ms speech frame */
+  {
+    ISACenc_obj->frame_nb = 1;
+    return 0;
+  }
+  else if (frame_mode == 1 && ISACenc_obj->frame_nb == 1)
+  {
+    ISACenc_obj->frame_nb = 0;
+    /* also update the framelength for next packet, in Adaptive mode only */
+    if (CodingMode == 0 && (ISACenc_obj->enforceFrameSize == 0)) {
+      ISACenc_obj->new_framelength = WebRtcIsacfix_GetNewFrameLength(ISACenc_obj->BottleNeck,
+                                                                     ISACenc_obj->current_framesamples);
+    }
+  }
+
+
+  /* complete arithmetic coding */
+  stream_length = WebRtcIsacfix_EncTerminate(&ISACenc_obj->bitstr_obj);
+  /* can this be negative? */
+
+  if(CodingMode == 0)
+  {
+
+    /* update rate model and get minimum number of bytes in this packet */
+    MinBytes = WebRtcIsacfix_GetMinBytes(&ISACenc_obj->rate_data_obj, (int16_t) stream_length,
+                                         ISACenc_obj->current_framesamples, ISACenc_obj->BottleNeck, ISACenc_obj->MaxDelay);
+
+    /* if bitstream is too short, add garbage at the end */
+
+    /* Store length of coded data */
+    usefulstr_len = stream_length;
+
+    /* Make sure MinBytes does not exceed packet size limit */
+    if ((ISACenc_obj->frame_nb == 0) && (MinBytes > ISACenc_obj->payloadLimitBytes30)) {
+      MinBytes = ISACenc_obj->payloadLimitBytes30;
+    } else if ((ISACenc_obj->frame_nb == 1) && (MinBytes > ISACenc_obj->payloadLimitBytes60)) {
+      MinBytes = ISACenc_obj->payloadLimitBytes60;
+    }
+
+    /* Make sure we don't allow more than 255 bytes of garbage data.
+       We store the length of the garbage data in 8 bits in the bitstream,
+       255 is the max garbage lenght we can signal using 8 bits. */
+    if( MinBytes > usefulstr_len + 255 ) {
+      MinBytes = usefulstr_len + 255;
+    }
+
+    /* Save data for creation of multiple bitstreams */
+    if (ISACenc_obj->SaveEnc_ptr != NULL) {
+      (ISACenc_obj->SaveEnc_ptr)->minBytes = MinBytes;
+    }
+
+    while (stream_length < MinBytes)
+    {
+      RTC_DCHECK_GE(stream_length, 0);
+      if (stream_length & 0x0001){
+        ISACenc_obj->bitstr_seed = WEBRTC_SPL_RAND( ISACenc_obj->bitstr_seed );
+        ISACenc_obj->bitstr_obj.stream[stream_length / 2] |=
+            (uint16_t)(ISACenc_obj->bitstr_seed & 0xFF);
+      } else {
+        ISACenc_obj->bitstr_seed = WEBRTC_SPL_RAND( ISACenc_obj->bitstr_seed );
+        ISACenc_obj->bitstr_obj.stream[stream_length / 2] =
+            ((uint16_t)ISACenc_obj->bitstr_seed << 8);
+      }
+      stream_length++;
+    }
+
+    /* to get the real stream_length, without garbage */
+    if (usefulstr_len & 0x0001) {
+      ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] &= 0xFF00;
+      ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] += (MinBytes - usefulstr_len) & 0x00FF;
+    }
+    else {
+      ISACenc_obj->bitstr_obj.stream[usefulstr_len>>1] &= 0x00FF;
+      ISACenc_obj->bitstr_obj.stream[usefulstr_len >> 1] +=
+          ((uint16_t)((MinBytes - usefulstr_len) & 0x00FF) << 8);
+    }
+  }
+  else
+  {
+    /* update rate model */
+    WebRtcIsacfix_UpdateRateModel(&ISACenc_obj->rate_data_obj, (int16_t) stream_length,
+                                  ISACenc_obj->current_framesamples, ISACenc_obj->BottleNeck);
+  }
+  return stream_length;
+}
+
+/* This function is used to create a new bitstream with new BWE.
+   The same data as previously encoded with the fucntion WebRtcIsacfix_EncodeImpl()
+   is used. The data needed is taken from the struct, where it was stored
+   when calling the encoder. */
+int WebRtcIsacfix_EncodeStoredData(IsacFixEncoderInstance  *ISACenc_obj,
+                                   int     BWnumber,
+                                   float              scale)
+{
+  int ii;
+  int status;
+  int16_t BWno = (int16_t)BWnumber;
+  int stream_length = 0;
+
+  int16_t model;
+  const uint16_t *Q_PitchGain_cdf_ptr[1];
+  const uint16_t **cdf;
+  const IsacSaveEncoderData *SaveEnc_str;
+  int32_t tmpLPCcoeffs_g[KLT_ORDER_GAIN<<1];
+  int16_t tmpLPCindex_g[KLT_ORDER_GAIN<<1];
+  int16_t tmp_fre[FRAMESAMPLES];
+  int16_t tmp_fim[FRAMESAMPLES];
+
+  SaveEnc_str = ISACenc_obj->SaveEnc_ptr;
+
+  /* Check if SaveEnc memory exists */
+  if (SaveEnc_str == NULL) {
+    return (-1);
+  }
+
+  /* Sanity Check - possible values for BWnumber is 0 - 23 */
+  if ((BWnumber < 0) || (BWnumber > 23)) {
+    return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+  }
+
+  /* reset bitstream */
+  ISACenc_obj->bitstr_obj.W_upper = 0xFFFFFFFF;
+  ISACenc_obj->bitstr_obj.streamval = 0;
+  ISACenc_obj->bitstr_obj.stream_index = 0;
+  ISACenc_obj->bitstr_obj.full = 1;
+
+  /* encode frame length */
+  status = WebRtcIsacfix_EncodeFrameLen(SaveEnc_str->framelength, &ISACenc_obj->bitstr_obj);
+  if (status < 0) {
+    /* Wrong frame size */
+    return status;
+  }
+
+  /* encode bandwidth estimate */
+  status = WebRtcIsacfix_EncodeReceiveBandwidth(&BWno, &ISACenc_obj->bitstr_obj);
+  if (status < 0) {
+    return status;
+  }
+
+  /* Transcoding                                                 */
+  /* If scale < 1, rescale data to produce lower bitrate signal  */
+  if ((0.0 < scale) && (scale < 1.0)) {
+    /* Compensate LPC gain */
+    for (ii = 0; ii < (KLT_ORDER_GAIN*(1+SaveEnc_str->startIdx)); ii++) {
+      tmpLPCcoeffs_g[ii] = (int32_t) ((scale) * (float) SaveEnc_str->LPCcoeffs_g[ii]);
+    }
+
+    /* Scale DFT */
+    for (ii = 0; ii < (FRAMESAMPLES_HALF*(1+SaveEnc_str->startIdx)); ii++) {
+      tmp_fre[ii] = (int16_t) ((scale) * (float) SaveEnc_str->fre[ii]) ;
+      tmp_fim[ii] = (int16_t) ((scale) * (float) SaveEnc_str->fim[ii]) ;
+    }
+  } else {
+    for (ii = 0; ii < (KLT_ORDER_GAIN*(1+SaveEnc_str->startIdx)); ii++) {
+      tmpLPCindex_g[ii] =  SaveEnc_str->LPCindex_g[ii];
+    }
+
+    for (ii = 0; ii < (FRAMESAMPLES_HALF*(1+SaveEnc_str->startIdx)); ii++) {
+      tmp_fre[ii] = SaveEnc_str->fre[ii];
+      tmp_fim[ii] = SaveEnc_str->fim[ii];
+    }
+  }
+
+  /* Loop over number of 30 msec */
+  for (ii = 0; ii <= SaveEnc_str->startIdx; ii++)
+  {
+
+    /* encode pitch gains */
+    *Q_PitchGain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
+    status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &SaveEnc_str->pitchGain_index[ii],
+                                       Q_PitchGain_cdf_ptr, 1);
+    if (status < 0) {
+      return status;
+    }
+
+    /* entropy coding of quantization pitch lags */
+    /* voicing classificiation */
+    if (SaveEnc_str->meanGain[ii] <= 819) {
+      cdf = WebRtcIsacfix_kPitchLagPtrLo;
+    } else if (SaveEnc_str->meanGain[ii] <= 1638) {
+      cdf = WebRtcIsacfix_kPitchLagPtrMid;
+    } else {
+      cdf = WebRtcIsacfix_kPitchLagPtrHi;
+    }
+    status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj,
+                                       &SaveEnc_str->pitchIndex[PITCH_SUBFRAMES*ii], cdf, PITCH_SUBFRAMES);
+    if (status < 0) {
+      return status;
+    }
+
+    /* LPC */
+    /* entropy coding of model number */
+    model = 0;
+    status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj,  &model,
+                                       WebRtcIsacfix_kModelCdfPtr, 1);
+    if (status < 0) {
+      return status;
+    }
+
+    /* entropy coding of quantization indices - LPC shape only */
+    status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &SaveEnc_str->LPCindex_s[KLT_ORDER_SHAPE*ii],
+                                       WebRtcIsacfix_kCdfShapePtr[0], KLT_ORDER_SHAPE);
+    if (status < 0) {
+      return status;
+    }
+
+    /* If transcoding, get new LPC gain indices */
+    if (scale < 1.0) {
+      WebRtcIsacfix_TranscodeLpcCoef(&tmpLPCcoeffs_g[KLT_ORDER_GAIN*ii], &tmpLPCindex_g[KLT_ORDER_GAIN*ii]);
+    }
+
+    /* entropy coding of quantization indices - LPC gain */
+    status = WebRtcIsacfix_EncHistMulti(&ISACenc_obj->bitstr_obj, &tmpLPCindex_g[KLT_ORDER_GAIN*ii],
+                                       WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
+    if (status < 0) {
+      return status;
+    }
+
+    /* quantization and lossless coding */
+    status = WebRtcIsacfix_EncodeSpec(&tmp_fre[ii*FRAMESAMPLES_HALF], &tmp_fim[ii*FRAMESAMPLES_HALF],
+                                      &ISACenc_obj->bitstr_obj, SaveEnc_str->AvgPitchGain[ii]);
+    if (status < 0) {
+      return status;
+    }
+  }
+
+  /* complete arithmetic coding */
+  stream_length = WebRtcIsacfix_EncTerminate(&ISACenc_obj->bitstr_obj);
+
+  return stream_length;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
new file mode 100644
index 0000000..cb15445
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
@@ -0,0 +1,2056 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.c
+ *
+ * This file contains all functions used to arithmetically
+ * encode the iSAC bistream.
+ *
+ */
+
+#include <stddef.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/arith_routins.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h"
+#include "rtc_base/sanitizer.h"
+
+/*
+ * Eenumerations for arguments to functions WebRtcIsacfix_MatrixProduct1()
+ * and WebRtcIsacfix_MatrixProduct2().
+*/
+
+enum matrix_index_factor {
+  kTIndexFactor1 = 1,
+  kTIndexFactor2 = 2,
+  kTIndexFactor3 = SUBFRAMES,
+  kTIndexFactor4 = LPC_SHAPE_ORDER
+};
+
+enum matrix_index_step {
+  kTIndexStep1 = 1,
+  kTIndexStep2 = SUBFRAMES,
+  kTIndexStep3 = LPC_SHAPE_ORDER
+};
+
+enum matrixprod_loop_count {
+  kTLoopCount1 = SUBFRAMES,
+  kTLoopCount2 = 2,
+  kTLoopCount3 = LPC_SHAPE_ORDER
+};
+
+enum matrix1_shift_value {
+  kTMatrix1_shift0 = 0,
+  kTMatrix1_shift1 = 1,
+  kTMatrix1_shift5 = 5
+};
+
+enum matrixprod_init_case {
+  kTInitCase0 = 0,
+  kTInitCase1 = 1
+};
+
+/*
+  This function implements the fix-point correspondant function to lrint.
+
+  FLP: (int32_t)floor(flt+.499999999999)
+  FIP: (fixVal+roundVal)>>qDomain
+
+  where roundVal = 2^(qDomain-1) = 1<<(qDomain-1)
+
+*/
+static __inline int32_t CalcLrIntQ(int32_t fixVal, int16_t qDomain) {
+  return (fixVal + (1 << (qDomain - 1))) >> qDomain;
+}
+
+/*
+  __inline uint32_t stepwise(int32_t dinQ10) {
+
+  int32_t ind, diQ10, dtQ10;
+
+  diQ10 = dinQ10;
+  if (diQ10 < DPMIN_Q10)
+  diQ10 = DPMIN_Q10;
+  if (diQ10 >= DPMAX_Q10)
+  diQ10 = DPMAX_Q10 - 1;
+
+  dtQ10 = diQ10 - DPMIN_Q10;*/ /* Q10 + Q10 = Q10 */
+/* ind = (dtQ10 * 5) >> 10;  */ /* 2^10 / 5 = 0.2 in Q10  */
+/* Q10 -> Q0 */
+
+/* return rpointsFIX_Q10[ind];
+
+   }
+*/
+
+/* logN(x) = logN(2)*log2(x) = 0.6931*log2(x). Output in Q8. */
+/* The input argument X to logN(X) is 2^17 times higher than the
+   input floating point argument Y to log(Y), since the X value
+   is a Q17 value. This can be compensated for after the call, by
+   subraction a value Z for each Q-step. One Q-step means that
+   X gets 2 thimes higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+   177.445678 should be subtracted (since logN() returns a Q8 value).
+   For a X value in Q17, the value 177.445678*17 = 3017 should be
+   subtracted */
+static int16_t CalcLogN(int32_t arg) {
+  int16_t zeros, log2, frac, logN;
+
+  zeros=WebRtcSpl_NormU32(arg);
+  frac = (int16_t)((uint32_t)((arg << zeros) & 0x7FFFFFFF) >> 23);
+  log2 = (int16_t)(((31 - zeros) << 8) + frac);  // log2(x) in Q8
+  logN = (int16_t)(log2 * 22713 >> 15);  // log(2) = 0.693147 = 22713 in Q15
+  logN=logN+11; //Scalar compensation which minimizes the (log(x)-logN(x))^2 error over all x.
+
+  return logN;
+}
+
+
+/*
+  expN(x) = 2^(a*x), where a = log2(e) ~= 1.442695
+
+  Input:  Q8  (int16_t)
+  Output: Q17 (int32_t)
+
+  a = log2(e) = log2(exp(1)) ~= 1.442695  ==>  a = 23637 in Q14 (1.442688)
+  To this value, 700 is added or subtracted in order to get an average error
+  nearer zero, instead of always same-sign.
+*/
+
+static int32_t CalcExpN(int16_t x) {
+  int16_t axINT, axFRAC;
+  int16_t exp16;
+  int32_t exp;
+  int16_t ax = (int16_t)(x * 23637 >> 14);  // Q8
+
+  if (x>=0) {
+    axINT = ax >> 8;  //Q0
+    axFRAC = ax&0x00FF;
+    exp16 = 1 << axINT;  // Q0
+    axFRAC = axFRAC+256; //Q8
+    exp = exp16 * axFRAC;  // Q0*Q8 = Q8
+    exp <<= 9;  // Q17
+  } else {
+    ax = -ax;
+    axINT = 1 + (ax >> 8);  //Q0
+    axFRAC = 0x00FF - (ax&0x00FF);
+    exp16 = (int16_t)(32768 >> axINT);  // Q15
+    axFRAC = axFRAC+256; //Q8
+    exp = exp16 * axFRAC;  // Q15*Q8 = Q23
+    exp >>= 6;  // Q17
+  }
+
+  return exp;
+}
+
+
+/* compute correlation from power spectrum */
+static void CalcCorrelation(int32_t *PSpecQ12, int32_t *CorrQ7)
+{
+  int32_t summ[FRAMESAMPLES/8];
+  int32_t diff[FRAMESAMPLES/8];
+  int32_t sum;
+  int k, n;
+
+  for (k = 0; k < FRAMESAMPLES/8; k++) {
+    summ[k] = (PSpecQ12[k] + PSpecQ12[FRAMESAMPLES / 4 - 1 - k] + 16) >> 5;
+    diff[k] = (PSpecQ12[k] - PSpecQ12[FRAMESAMPLES / 4 - 1 - k] + 16) >> 5;
+  }
+
+  sum = 2;
+  for (n = 0; n < FRAMESAMPLES/8; n++)
+    sum += summ[n];
+  CorrQ7[0] = sum;
+
+  for (k = 0; k < AR_ORDER; k += 2) {
+    sum = 0;
+    for (n = 0; n < FRAMESAMPLES/8; n++)
+      sum += (WebRtcIsacfix_kCos[k][n] * diff[n] + 256) >> 9;
+    CorrQ7[k+1] = sum;
+  }
+
+  for (k=1; k<AR_ORDER; k+=2) {
+    sum = 0;
+    for (n = 0; n < FRAMESAMPLES/8; n++)
+      sum += (WebRtcIsacfix_kCos[k][n] * summ[n] + 256) >> 9;
+    CorrQ7[k+1] = sum;
+  }
+}
+
+// Some arithmetic operations that are allowed to overflow. (It's still
+// undefined behavior, so not a good idea; this just makes UBSan ignore the
+// violations, so that our old code can continue to do what it's always been
+// doing.)
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+    OverflowingMulS16S32ToS32(int16_t a, int32_t b) {
+  return a * b;
+}
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+    OverflowingAddS32S32ToS32(int32_t a, int32_t b) {
+  return a + b;
+}
+static inline int32_t RTC_NO_SANITIZE("signed-integer-overflow")
+    OverflowingSubS32S32ToS32(int32_t a, int32_t b) {
+  return a - b;
+}
+
+/* compute inverse AR power spectrum */
+static void CalcInvArSpec(const int16_t *ARCoefQ12,
+                          const int32_t gainQ10,
+                          int32_t *CurveQ16)
+{
+  int32_t CorrQ11[AR_ORDER+1];
+  int32_t sum, tmpGain;
+  int32_t diffQ16[FRAMESAMPLES/8];
+  const int16_t *CS_ptrQ9;
+  int k, n;
+  int16_t round, shftVal = 0, sh;
+
+  sum = 0;
+  for (n = 0; n < AR_ORDER+1; n++)
+    sum += WEBRTC_SPL_MUL(ARCoefQ12[n], ARCoefQ12[n]);    /* Q24 */
+  sum = ((sum >> 6) * 65 + 32768) >> 16;  /* Result in Q8. */
+  CorrQ11[0] = (sum * gainQ10 + 256) >> 9;
+
+  /* To avoid overflow, we shift down gainQ10 if it is large. We will not lose any precision */
+  if(gainQ10>400000){
+    tmpGain = gainQ10 >> 3;
+    round = 32;
+    shftVal = 6;
+  } else {
+    tmpGain = gainQ10;
+    round = 256;
+    shftVal = 9;
+  }
+
+  for (k = 1; k < AR_ORDER+1; k++) {
+    sum = 16384;
+    for (n = k; n < AR_ORDER+1; n++)
+      sum += WEBRTC_SPL_MUL(ARCoefQ12[n-k], ARCoefQ12[n]);  /* Q24 */
+    sum >>= 15;
+    CorrQ11[k] = (sum * tmpGain + round) >> shftVal;
+  }
+  sum = CorrQ11[0] << 7;
+  for (n = 0; n < FRAMESAMPLES/8; n++)
+    CurveQ16[n] = sum;
+
+  for (k = 1; k < AR_ORDER; k += 2) {
+    for (n = 0; n < FRAMESAMPLES/8; n++)
+      CurveQ16[n] +=
+          (OverflowingMulS16S32ToS32(WebRtcIsacfix_kCos[k][n], CorrQ11[k + 1]) +
+           2) >>
+          2;
+  }
+
+  CS_ptrQ9 = WebRtcIsacfix_kCos[0];
+
+  /* If CorrQ11[1] too large we avoid getting overflow in the calculation by shifting */
+  sh=WebRtcSpl_NormW32(CorrQ11[1]);
+  if (CorrQ11[1]==0) /* Use next correlation */
+    sh=WebRtcSpl_NormW32(CorrQ11[2]);
+
+  if (sh<9)
+    shftVal = 9 - sh;
+  else
+    shftVal = 0;
+
+  for (n = 0; n < FRAMESAMPLES/8; n++)
+    diffQ16[n] = (CS_ptrQ9[n] * (CorrQ11[1] >> shftVal) + 2) >> 2;
+  for (k = 2; k < AR_ORDER; k += 2) {
+    CS_ptrQ9 = WebRtcIsacfix_kCos[k];
+    for (n = 0; n < FRAMESAMPLES/8; n++)
+      diffQ16[n] += (CS_ptrQ9[n] * (CorrQ11[k + 1] >> shftVal) + 2) >> 2;
+  }
+
+  for (k=0; k<FRAMESAMPLES/8; k++) {
+    int32_t diff_q16 = diffQ16[k] * (1 << shftVal);
+    CurveQ16[FRAMESAMPLES / 4 - 1 - k] =
+        OverflowingSubS32S32ToS32(CurveQ16[k], diff_q16);
+    CurveQ16[k] = OverflowingAddS32S32ToS32(CurveQ16[k], diff_q16);
+  }
+}
+
+static void CalcRootInvArSpec(const int16_t *ARCoefQ12,
+                              const int32_t gainQ10,
+                              uint16_t *CurveQ8)
+{
+  int32_t CorrQ11[AR_ORDER+1];
+  int32_t sum, tmpGain;
+  int32_t summQ16[FRAMESAMPLES/8];
+  int32_t diffQ16[FRAMESAMPLES/8];
+
+  const int16_t *CS_ptrQ9;
+  int k, n, i;
+  int16_t round, shftVal = 0, sh;
+  int32_t res, in_sqrt, newRes;
+
+  sum = 0;
+  for (n = 0; n < AR_ORDER+1; n++)
+    sum += WEBRTC_SPL_MUL(ARCoefQ12[n], ARCoefQ12[n]);    /* Q24 */
+  sum = ((sum >> 6) * 65 + 32768) >> 16;  /* Result in Q8. */
+  CorrQ11[0] = (sum * gainQ10 + 256) >> 9;
+
+  /* To avoid overflow, we shift down gainQ10 if it is large. We will not lose any precision */
+  if(gainQ10>400000){
+    tmpGain = gainQ10 >> 3;
+    round = 32;
+    shftVal = 6;
+  } else {
+    tmpGain = gainQ10;
+    round = 256;
+    shftVal = 9;
+  }
+
+  for (k = 1; k < AR_ORDER+1; k++) {
+    sum = 16384;
+    for (n = k; n < AR_ORDER+1; n++)
+      sum += WEBRTC_SPL_MUL(ARCoefQ12[n-k], ARCoefQ12[n]);  /* Q24 */
+    sum >>= 15;
+    CorrQ11[k] = (sum * tmpGain + round) >> shftVal;
+  }
+  sum = CorrQ11[0] << 7;
+  for (n = 0; n < FRAMESAMPLES/8; n++)
+    summQ16[n] = sum;
+
+  for (k = 1; k < (AR_ORDER); k += 2) {
+    for (n = 0; n < FRAMESAMPLES/8; n++)
+      summQ16[n] += ((CorrQ11[k + 1] * WebRtcIsacfix_kCos[k][n]) + 2) >> 2;
+  }
+
+  CS_ptrQ9 = WebRtcIsacfix_kCos[0];
+
+  /* If CorrQ11[1] too large we avoid getting overflow in the calculation by shifting */
+  sh=WebRtcSpl_NormW32(CorrQ11[1]);
+  if (CorrQ11[1]==0) /* Use next correlation */
+    sh=WebRtcSpl_NormW32(CorrQ11[2]);
+
+  if (sh<9)
+    shftVal = 9 - sh;
+  else
+    shftVal = 0;
+
+  for (n = 0; n < FRAMESAMPLES/8; n++)
+    diffQ16[n] = (CS_ptrQ9[n] * (CorrQ11[1] >> shftVal) + 2) >> 2;
+  for (k = 2; k < AR_ORDER; k += 2) {
+    CS_ptrQ9 = WebRtcIsacfix_kCos[k];
+    for (n = 0; n < FRAMESAMPLES/8; n++)
+      diffQ16[n] += (CS_ptrQ9[n] * (CorrQ11[k + 1] >> shftVal) + 2) >> 2;
+  }
+
+  in_sqrt = summQ16[0] + (diffQ16[0] << shftVal);
+
+  /* convert to magnitude spectrum, by doing square-roots (modified from SPLIB)  */
+  res = 1 << (WebRtcSpl_GetSizeInBits(in_sqrt) >> 1);
+
+  for (k = 0; k < FRAMESAMPLES/8; k++)
+  {
+    in_sqrt = summQ16[k] + (diffQ16[k] << shftVal);
+    i = 10;
+
+    /* make in_sqrt positive to prohibit sqrt of negative values */
+    if(in_sqrt<0)
+      in_sqrt=-in_sqrt;
+
+    newRes = (in_sqrt / res + res) >> 1;
+    do
+    {
+      res = newRes;
+      newRes = (in_sqrt / res + res) >> 1;
+    } while (newRes != res && i-- > 0);
+
+    CurveQ8[k] = (int16_t)newRes;
+  }
+  for (k = FRAMESAMPLES/8; k < FRAMESAMPLES/4; k++) {
+
+    in_sqrt = summQ16[FRAMESAMPLES / 4 - 1 - k] -
+        (diffQ16[FRAMESAMPLES / 4 - 1 - k] << shftVal);
+    i = 10;
+
+    /* make in_sqrt positive to prohibit sqrt of negative values */
+    if(in_sqrt<0)
+      in_sqrt=-in_sqrt;
+
+    newRes = (in_sqrt / res + res) >> 1;
+    do
+    {
+      res = newRes;
+      newRes = (in_sqrt / res + res) >> 1;
+    } while (newRes != res && i-- > 0);
+
+    CurveQ8[k] = (int16_t)newRes;
+  }
+
+}
+
+
+
+/* generate array of dither samples in Q7 */
+static void GenerateDitherQ7(int16_t *bufQ7,
+                             uint32_t seed,
+                             int16_t length,
+                             int16_t AvgPitchGain_Q12)
+{
+  int   k;
+  int16_t dither1_Q7, dither2_Q7, dither_gain_Q14, shft;
+
+  if (AvgPitchGain_Q12 < 614)  /* this threshold should be equal to that in decode_spec() */
+  {
+    for (k = 0; k < length-2; k += 3)
+    {
+      /* new random unsigned int32_t */
+      seed = WEBRTC_SPL_UMUL(seed, 196314165) + 907633515;
+
+      /* fixed-point dither sample between -64 and 64 (Q7) */
+      dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+      /* new random unsigned int32_t */
+      seed = WEBRTC_SPL_UMUL(seed, 196314165) + 907633515;
+
+      /* fixed-point dither sample between -64 and 64 */
+      dither2_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+      shft = (int16_t)(WEBRTC_SPL_RSHIFT_U32(seed, 25) & 15);
+      if (shft < 5)
+      {
+        bufQ7[k]   = dither1_Q7;
+        bufQ7[k+1] = dither2_Q7;
+        bufQ7[k+2] = 0;
+      }
+      else if (shft < 10)
+      {
+        bufQ7[k]   = dither1_Q7;
+        bufQ7[k+1] = 0;
+        bufQ7[k+2] = dither2_Q7;
+      }
+      else
+      {
+        bufQ7[k]   = 0;
+        bufQ7[k+1] = dither1_Q7;
+        bufQ7[k+2] = dither2_Q7;
+      }
+    }
+  }
+  else
+  {
+    dither_gain_Q14 = (int16_t)(22528 - WEBRTC_SPL_MUL(10, AvgPitchGain_Q12));
+
+    /* dither on half of the coefficients */
+    for (k = 0; k < length-1; k += 2)
+    {
+      /* new random unsigned int32_t */
+      seed = WEBRTC_SPL_UMUL(seed, 196314165) + 907633515;
+
+      /* fixed-point dither sample between -64 and 64 */
+      dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+      /* dither sample is placed in either even or odd index */
+      shft = (int16_t)(WEBRTC_SPL_RSHIFT_U32(seed, 25) & 1);     /* either 0 or 1 */
+
+      bufQ7[k + shft] = (int16_t)((dither_gain_Q14 * dither1_Q7 + 8192) >> 14);
+      bufQ7[k + 1 - shft] = 0;
+    }
+  }
+}
+
+
+
+
+/*
+ * function to decode the complex spectrum from the bitstream
+ * returns the total number of bytes in the stream
+ */
+int WebRtcIsacfix_DecodeSpec(Bitstr_dec *streamdata,
+                             int16_t *frQ7,
+                             int16_t *fiQ7,
+                             int16_t AvgPitchGain_Q12)
+{
+  int16_t  data[FRAMESAMPLES];
+  int32_t  invARSpec2_Q16[FRAMESAMPLES/4];
+  int16_t  ARCoefQ12[AR_ORDER+1];
+  int16_t  RCQ15[AR_ORDER];
+  int16_t  gainQ10;
+  int32_t  gain2_Q10;
+  int len;
+  int          k;
+
+  /* create dither signal */
+  GenerateDitherQ7(data, streamdata->W_upper, FRAMESAMPLES, AvgPitchGain_Q12); /* Dither is output in vector 'Data' */
+
+  /* decode model parameters */
+  if (WebRtcIsacfix_DecodeRcCoef(streamdata, RCQ15) < 0)
+    return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+
+  WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+  if (WebRtcIsacfix_DecodeGain2(streamdata, &gain2_Q10) < 0)
+    return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+  /* compute inverse AR power spectrum */
+  CalcInvArSpec(ARCoefQ12, gain2_Q10, invARSpec2_Q16);
+
+  /* arithmetic decoding of spectrum */
+  /* 'data' input and output. Input = Dither */
+  len = WebRtcIsacfix_DecLogisticMulti2(data, streamdata, invARSpec2_Q16, (int16_t)FRAMESAMPLES);
+
+  if (len<1)
+    return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+  /* subtract dither and scale down spectral samples with low SNR */
+  if (AvgPitchGain_Q12 <= 614)
+  {
+    for (k = 0; k < FRAMESAMPLES; k += 4)
+    {
+      gainQ10 = WebRtcSpl_DivW32W16ResW16(30 << 10,
+          (int16_t)((uint32_t)(invARSpec2_Q16[k >> 2] + 2195456) >> 16));
+      *frQ7++ = (int16_t)((data[k] * gainQ10 + 512) >> 10);
+      *fiQ7++ = (int16_t)((data[k + 1] * gainQ10 + 512) >> 10);
+      *frQ7++ = (int16_t)((data[k + 2] * gainQ10 + 512) >> 10);
+      *fiQ7++ = (int16_t)((data[k + 3] * gainQ10 + 512) >> 10);
+    }
+  }
+  else
+  {
+    for (k = 0; k < FRAMESAMPLES; k += 4)
+    {
+      gainQ10 = WebRtcSpl_DivW32W16ResW16(36 << 10,
+          (int16_t)((uint32_t)(invARSpec2_Q16[k >> 2] + 2654208) >> 16));
+      *frQ7++ = (int16_t)((data[k] * gainQ10 + 512) >> 10);
+      *fiQ7++ = (int16_t)((data[k + 1] * gainQ10 + 512) >> 10);
+      *frQ7++ = (int16_t)((data[k + 2] * gainQ10 + 512) >> 10);
+      *fiQ7++ = (int16_t)((data[k + 3] * gainQ10 + 512) >> 10);
+    }
+  }
+
+  return len;
+}
+
+
+int WebRtcIsacfix_EncodeSpec(const int16_t *fr,
+                             const int16_t *fi,
+                             Bitstr_enc *streamdata,
+                             int16_t AvgPitchGain_Q12)
+{
+  int16_t  dataQ7[FRAMESAMPLES];
+  int32_t  PSpec[FRAMESAMPLES/4];
+  uint16_t invARSpecQ8[FRAMESAMPLES/4];
+  int32_t  CorrQ7[AR_ORDER+1];
+  int32_t  CorrQ7_norm[AR_ORDER+1];
+  int16_t  RCQ15[AR_ORDER];
+  int16_t  ARCoefQ12[AR_ORDER+1];
+  int32_t  gain2_Q10;
+  int16_t  val;
+  int32_t  nrg;
+  uint32_t sum;
+  int16_t  lft_shft;
+  int16_t  status;
+  int          k, n, j;
+
+
+  /* create dither_float signal */
+  GenerateDitherQ7(dataQ7, streamdata->W_upper, FRAMESAMPLES, AvgPitchGain_Q12);
+
+  /* add dither and quantize, and compute power spectrum */
+  /* Vector dataQ7 contains Dither in Q7 */
+  for (k = 0; k < FRAMESAMPLES; k += 4)
+  {
+    val = ((*fr++ + dataQ7[k]   + 64) & 0xFF80) - dataQ7[k]; /* Data = Dither */
+    dataQ7[k] = val;            /* New value in Data */
+    sum = WEBRTC_SPL_UMUL(val, val);
+
+    val = ((*fi++ + dataQ7[k+1] + 64) & 0xFF80) - dataQ7[k+1]; /* Data = Dither */
+    dataQ7[k+1] = val;            /* New value in Data */
+    sum += WEBRTC_SPL_UMUL(val, val);
+
+    val = ((*fr++ + dataQ7[k+2] + 64) & 0xFF80) - dataQ7[k+2]; /* Data = Dither */
+    dataQ7[k+2] = val;            /* New value in Data */
+    sum += WEBRTC_SPL_UMUL(val, val);
+
+    val = ((*fi++ + dataQ7[k+3] + 64) & 0xFF80) - dataQ7[k+3]; /* Data = Dither */
+    dataQ7[k+3] = val;            /* New value in Data */
+    sum += WEBRTC_SPL_UMUL(val, val);
+
+    PSpec[k>>2] = WEBRTC_SPL_RSHIFT_U32(sum, 2);
+  }
+
+  /* compute correlation from power spectrum */
+  CalcCorrelation(PSpec, CorrQ7);
+
+
+  /* find AR coefficients */
+  /* number of bit shifts to 14-bit normalize CorrQ7[0] (leaving room for sign) */
+  lft_shft = WebRtcSpl_NormW32(CorrQ7[0]) - 18;
+
+  if (lft_shft > 0) {
+    for (k=0; k<AR_ORDER+1; k++)
+      CorrQ7_norm[k] = CorrQ7[k] << lft_shft;
+  } else {
+    for (k=0; k<AR_ORDER+1; k++)
+      CorrQ7_norm[k] = CorrQ7[k] >> -lft_shft;
+  }
+
+  /* find RC coefficients */
+  WebRtcSpl_AutoCorrToReflCoef(CorrQ7_norm, AR_ORDER, RCQ15);
+
+  /* quantize & code RC Coef */
+  status = WebRtcIsacfix_EncodeRcCoef(RCQ15, streamdata);
+  if (status < 0) {
+    return status;
+  }
+
+  /* RC -> AR coefficients */
+  WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+  /* compute ARCoef' * Corr * ARCoef in Q19 */
+  nrg = 0;
+  for (j = 0; j <= AR_ORDER; j++) {
+    for (n = 0; n <= j; n++)
+      nrg += (ARCoefQ12[j] * ((CorrQ7_norm[j - n] * ARCoefQ12[n] + 256) >> 9) +
+          4) >> 3;
+    for (n = j+1; n <= AR_ORDER; n++)
+      nrg += (ARCoefQ12[j] * ((CorrQ7_norm[n - j] * ARCoefQ12[n] + 256) >> 9) +
+          4) >> 3;
+  }
+
+  if (lft_shft > 0)
+    nrg >>= lft_shft;
+  else
+    nrg <<= -lft_shft;
+
+  if(nrg>131072)
+    gain2_Q10 = WebRtcSpl_DivResultInQ31(FRAMESAMPLES >> 2, nrg);  /* also shifts 31 bits to the left! */
+  else
+    gain2_Q10 = FRAMESAMPLES >> 2;
+
+  /* quantize & code gain2_Q10 */
+  if (WebRtcIsacfix_EncodeGain2(&gain2_Q10, streamdata))
+    return -1;
+
+  /* compute inverse AR magnitude spectrum */
+  CalcRootInvArSpec(ARCoefQ12, gain2_Q10, invARSpecQ8);
+
+
+  /* arithmetic coding of spectrum */
+  status = WebRtcIsacfix_EncLogisticMulti2(streamdata, dataQ7, invARSpecQ8, (int16_t)FRAMESAMPLES);
+  if ( status )
+    return( status );
+
+  return 0;
+}
+
+
+/* Matlab's LAR definition */
+static void Rc2LarFix(const int16_t *rcQ15, int32_t *larQ17, int16_t order) {
+
+  /*
+
+    This is a piece-wise implemenetation of a rc2lar-function (all values in the comment
+    are Q15 values and  are based on [0 24956/32768 30000/32768 32500/32768], i.e.
+    [0.76159667968750   0.91552734375000   0.99182128906250]
+
+    x0  x1           a                 k              x0(again)         b
+    ==================================================================================
+    0.00 0.76:   0                  2.625997508581   0                  0
+    0.76 0.91:   2.000012018559     7.284502668663   0.761596679688    -3.547841027073
+    0.91 0.99:   3.121320351712    31.115835041229   0.915527343750   -25.366077452148
+    0.99 1.00:   5.495270168700   686.663805654056   0.991821289063  -675.552510708011
+
+    The implementation is y(x)= a + (x-x0)*k, but this can be simplified to
+
+    y(x) = a-x0*k + x*k = b + x*k, where b = a-x0*k
+
+    akx=[0                 2.625997508581   0
+    2.000012018559     7.284502668663   0.761596679688
+    3.121320351712    31.115835041229   0.915527343750
+    5.495270168700   686.663805654056   0.991821289063];
+
+    b = akx(:,1) - akx(:,3).*akx(:,2)
+
+    [ 0.0
+    -3.547841027073
+    -25.366077452148
+    -675.552510708011]
+
+  */
+
+  int k;
+  int16_t rc;
+  int32_t larAbsQ17;
+
+  for (k = 0; k < order; k++) {
+
+    rc = WEBRTC_SPL_ABS_W16(rcQ15[k]); //Q15
+
+    /* Calculate larAbsQ17 in Q17 from rc in Q15 */
+
+    if (rc<24956) {  //0.7615966 in Q15
+      // (Q15*Q13)>>11 = Q17
+      larAbsQ17 = rc * 21512 >> 11;
+    } else if (rc<30000) { //0.91552734375 in Q15
+      // Q17 + (Q15*Q12)>>10 = Q17
+      larAbsQ17 = -465024 + (rc * 29837 >> 10);
+    } else if (rc<32500) { //0.99182128906250 in Q15
+      // Q17 + (Q15*Q10)>>8 = Q17
+      larAbsQ17 = -3324784 + (rc * 31863 >> 8);
+    } else  {
+      // Q17 + (Q15*Q5)>>3 = Q17
+      larAbsQ17 = -88546020 + (rc * 21973 >> 3);
+    }
+
+    if (rcQ15[k]>0) {
+      larQ17[k] = larAbsQ17;
+    } else {
+      larQ17[k] = -larAbsQ17;
+    }
+  }
+}
+
+
+static void Lar2RcFix(const int32_t *larQ17, int16_t *rcQ15,  int16_t order) {
+
+  /*
+    This is a piece-wise implemenetation of a lar2rc-function
+    See comment in Rc2LarFix() about details.
+  */
+
+  int k;
+  int16_t larAbsQ11;
+  int32_t rc;
+
+  for (k = 0; k < order; k++) {
+
+    larAbsQ11 = (int16_t)WEBRTC_SPL_ABS_W32((larQ17[k] + 32) >> 6);  // Q11
+
+    if (larAbsQ11<4097) { //2.000012018559 in Q11
+      // Q11*Q16>>12 = Q15
+      rc = larAbsQ11 * 24957 >> 12;
+    } else if (larAbsQ11<6393) { //3.121320351712 in Q11
+      // (Q11*Q17 + Q13)>>13 = Q15
+      rc = (larAbsQ11 * 17993 + 130738688) >> 13;
+    } else if (larAbsQ11<11255) { //5.495270168700 in Q11
+      // (Q11*Q19 + Q30)>>15 = Q15
+      rc = (larAbsQ11 * 16850 + 875329820) >> 15;
+    } else  {
+      // (Q11*Q24>>16 + Q19)>>4 = Q15
+      rc = (((larAbsQ11 * 24433) >> 16) + 515804) >> 4;
+    }
+
+    if (larQ17[k]<=0) {
+      rc = -rc;
+    }
+
+    rcQ15[k] = (int16_t) rc;  // Q15
+  }
+}
+
+static void Poly2LarFix(int16_t *lowbandQ15,
+                        int16_t orderLo,
+                        int16_t *hibandQ15,
+                        int16_t orderHi,
+                        int16_t Nsub,
+                        int32_t *larsQ17) {
+
+  int k, n;
+  int32_t *outpQ17;
+  int16_t orderTot;
+  int32_t larQ17[MAX_ORDER];   // Size 7+6 is enough
+
+  orderTot = (orderLo + orderHi);
+  outpQ17 = larsQ17;
+  for (k = 0; k < Nsub; k++) {
+
+    Rc2LarFix(lowbandQ15, larQ17, orderLo);
+
+    for (n = 0; n < orderLo; n++)
+      outpQ17[n] = larQ17[n]; //Q17
+
+    Rc2LarFix(hibandQ15, larQ17, orderHi);
+
+    for (n = 0; n < orderHi; n++)
+      outpQ17[n + orderLo] = larQ17[n]; //Q17;
+
+    outpQ17 += orderTot;
+    lowbandQ15 += orderLo;
+    hibandQ15 += orderHi;
+  }
+}
+
+
+static void Lar2polyFix(int32_t *larsQ17,
+                        int16_t *lowbandQ15,
+                        int16_t orderLo,
+                        int16_t *hibandQ15,
+                        int16_t orderHi,
+                        int16_t Nsub) {
+
+  int k, n;
+  int16_t orderTot;
+  int16_t *outplQ15, *outphQ15;
+  int32_t *inpQ17;
+  int16_t rcQ15[7+6];
+
+  orderTot = (orderLo + orderHi);
+  outplQ15 = lowbandQ15;
+  outphQ15 = hibandQ15;
+  inpQ17 = larsQ17;
+  for (k = 0; k < Nsub; k++) {
+
+    /* gains not handled here as in the FLP version */
+
+    /* Low band */
+    Lar2RcFix(&inpQ17[0], rcQ15, orderLo);
+    for (n = 0; n < orderLo; n++)
+      outplQ15[n] = rcQ15[n]; // Refl. coeffs
+
+    /* High band */
+    Lar2RcFix(&inpQ17[orderLo], rcQ15, orderHi);
+    for (n = 0; n < orderHi; n++)
+      outphQ15[n] = rcQ15[n]; // Refl. coeffs
+
+    inpQ17 += orderTot;
+    outplQ15 += orderLo;
+    outphQ15 += orderHi;
+  }
+}
+
+/*
+Function WebRtcIsacfix_MatrixProduct1C() does one form of matrix multiplication.
+It first shifts input data of one matrix, determines the right indexes for the
+two matrixes, multiply them, and write the results into an output buffer.
+
+Note that two factors (or, multipliers) determine the initialization values of
+the variable |matrix1_index| in the code. The relationship is
+|matrix1_index| = |matrix1_index_factor1| * |matrix1_index_factor2|, where
+|matrix1_index_factor1| is given by the argument while |matrix1_index_factor2|
+is determined by the value of argument |matrix1_index_init_case|;
+|matrix1_index_factor2| is the value of the outmost loop counter j (when
+|matrix1_index_init_case| is 0), or the value of the middle loop counter k (when
+|matrix1_index_init_case| is non-zero).
+
+|matrix0_index| is determined the same way.
+
+Arguments:
+  matrix0[]:                 matrix0 data in Q15 domain.
+  matrix1[]:                 matrix1 data.
+  matrix_product[]:          output data (matrix product).
+  matrix1_index_factor1:     The first of two factors determining the
+                             initialization value of matrix1_index.
+  matrix0_index_factor1:     The first of two factors determining the
+                             initialization value of matrix0_index.
+  matrix1_index_init_case:   Case number for selecting the second of two
+                             factors determining the initialization value
+                             of matrix1_index and matrix0_index.
+  matrix1_index_step:        Incremental step for matrix1_index.
+  matrix0_index_step:        Incremental step for matrix0_index.
+  inner_loop_count:          Maximum count of the inner loop.
+  mid_loop_count:            Maximum count of the intermediate loop.
+  shift:                     Left shift value for matrix1.
+*/
+void WebRtcIsacfix_MatrixProduct1C(const int16_t matrix0[],
+                                   const int32_t matrix1[],
+                                   int32_t matrix_product[],
+                                   const int matrix1_index_factor1,
+                                   const int matrix0_index_factor1,
+                                   const int matrix1_index_init_case,
+                                   const int matrix1_index_step,
+                                   const int matrix0_index_step,
+                                   const int inner_loop_count,
+                                   const int mid_loop_count,
+                                   const int shift) {
+  int j = 0, k = 0, n = 0;
+  int matrix0_index = 0, matrix1_index = 0, matrix_prod_index = 0;
+  int* matrix0_index_factor2 = &k;
+  int* matrix1_index_factor2 = &j;
+  if (matrix1_index_init_case != 0) {
+    matrix0_index_factor2 = &j;
+    matrix1_index_factor2 = &k;
+  }
+
+  for (j = 0; j < SUBFRAMES; j++) {
+    matrix_prod_index = mid_loop_count * j;
+    for (k = 0; k < mid_loop_count; k++) {
+      int32_t sum32 = 0;
+      matrix0_index = matrix0_index_factor1 * (*matrix0_index_factor2);
+      matrix1_index = matrix1_index_factor1 * (*matrix1_index_factor2);
+      for (n = 0; n < inner_loop_count; n++) {
+        sum32 += WEBRTC_SPL_MUL_16_32_RSFT16(
+            matrix0[matrix0_index], matrix1[matrix1_index] * (1 << shift));
+        matrix0_index += matrix0_index_step;
+        matrix1_index += matrix1_index_step;
+      }
+      matrix_product[matrix_prod_index] = sum32;
+      matrix_prod_index++;
+    }
+  }
+}
+
+/*
+Function WebRtcIsacfix_MatrixProduct2C() returns the product of two matrixes,
+one of which has two columns. It first has to determine the correct index of
+the first matrix before doing the actual element multiplication.
+
+Arguments:
+  matrix0[]:                 A matrix in Q15 domain.
+  matrix1[]:                 A matrix in Q21 domain.
+  matrix_product[]:          Output data in Q17 domain.
+  matrix0_index_factor:      A factor determining the initialization value
+                             of matrix0_index.
+  matrix0_index_step:        Incremental step for matrix0_index.
+*/
+void WebRtcIsacfix_MatrixProduct2C(const int16_t matrix0[],
+                                   const int32_t matrix1[],
+                                   int32_t matrix_product[],
+                                   const int matrix0_index_factor,
+                                   const int matrix0_index_step) {
+  int j = 0, n = 0;
+  int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    int32_t sum32 = 0, sum32_2 = 0;
+    matrix1_index = 0;
+    matrix0_index = matrix0_index_factor * j;
+    for (n = SUBFRAMES; n > 0; n--) {
+      sum32 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
+                                            matrix1[matrix1_index]));
+      sum32_2 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
+                                            matrix1[matrix1_index + 1]));
+      matrix1_index += 2;
+      matrix0_index += matrix0_index_step;
+    }
+    matrix_product[matrix_prod_index] = sum32 >> 3;
+    matrix_product[matrix_prod_index + 1] = sum32_2 >> 3;
+    matrix_prod_index += 2;
+  }
+}
+
+int WebRtcIsacfix_DecodeLpc(int32_t *gain_lo_hiQ17,
+                            int16_t *LPCCoef_loQ15,
+                            int16_t *LPCCoef_hiQ15,
+                            Bitstr_dec *streamdata,
+                            int16_t *outmodel) {
+
+  int32_t larsQ17[KLT_ORDER_SHAPE]; // KLT_ORDER_GAIN+KLT_ORDER_SHAPE == (ORDERLO+ORDERHI)*SUBFRAMES
+  int err;
+
+  err = WebRtcIsacfix_DecodeLpcCoef(streamdata, larsQ17, gain_lo_hiQ17, outmodel);
+  if (err<0)  // error check
+    return -ISAC_RANGE_ERROR_DECODE_LPC;
+
+  Lar2polyFix(larsQ17, LPCCoef_loQ15, ORDERLO, LPCCoef_hiQ15, ORDERHI, SUBFRAMES);
+
+  return 0;
+}
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsacfix_DecodeLpcCoef(Bitstr_dec *streamdata,
+                                int32_t *LPCCoefQ17,
+                                int32_t *gain_lo_hiQ17,
+                                int16_t *outmodel)
+{
+  int j, k, n;
+  int err;
+  int16_t pos, pos2, posg, poss;
+  int16_t gainpos;
+  int16_t model;
+  int16_t index_QQ[KLT_ORDER_SHAPE];
+  int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+  int16_t tmpcoeffs_sQ10[KLT_ORDER_SHAPE];
+  int32_t tmpcoeffs_sQ17[KLT_ORDER_SHAPE];
+  int32_t tmpcoeffs2_sQ18[KLT_ORDER_SHAPE];
+  int32_t sumQQ;
+  int16_t sumQQ16;
+  int32_t tmp32;
+
+
+
+  /* entropy decoding of model number */
+  err = WebRtcIsacfix_DecHistOneStepMulti(&model, streamdata, WebRtcIsacfix_kModelCdfPtr, WebRtcIsacfix_kModelInitIndex, 1);
+  if (err<0)  // error check
+    return err;
+
+  /* entropy decoding of quantization indices */
+  err = WebRtcIsacfix_DecHistOneStepMulti(index_QQ, streamdata, WebRtcIsacfix_kCdfShapePtr[model], WebRtcIsacfix_kInitIndexShape[model], KLT_ORDER_SHAPE);
+  if (err<0)  // error check
+    return err;
+  /* find quantization levels for coefficients */
+  for (k=0; k<KLT_ORDER_SHAPE; k++) {
+    tmpcoeffs_sQ10[WebRtcIsacfix_kSelIndShape[k]] = WebRtcIsacfix_kLevelsShapeQ10[WebRtcIsacfix_kOfLevelsShape[model]+WebRtcIsacfix_kOffsetShape[model][k] + index_QQ[k]];
+  }
+
+  err = WebRtcIsacfix_DecHistOneStepMulti(index_QQ, streamdata, WebRtcIsacfix_kCdfGainPtr[model], WebRtcIsacfix_kInitIndexGain[model], KLT_ORDER_GAIN);
+  if (err<0)  // error check
+    return err;
+  /* find quantization levels for coefficients */
+  for (k=0; k<KLT_ORDER_GAIN; k++) {
+    tmpcoeffs_gQ17[WebRtcIsacfix_kSelIndGain[k]] = WebRtcIsacfix_kLevelsGainQ17[WebRtcIsacfix_kOfLevelsGain[model]+ WebRtcIsacfix_kOffsetGain[model][k] + index_QQ[k]];
+  }
+
+
+  /* inverse KLT  */
+
+  /* left transform */  // Transpose matrix!
+  WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT1GainQ15[model], tmpcoeffs_gQ17,
+                               tmpcoeffs2_gQ21, kTIndexFactor2, kTIndexFactor2,
+                               kTInitCase0, kTIndexStep1, kTIndexStep1,
+                               kTLoopCount2, kTLoopCount2, kTMatrix1_shift5);
+
+  poss = 0;
+  for (j=0; j<SUBFRAMES; j++) {
+    for (k=0; k<LPC_SHAPE_ORDER; k++) {
+      sumQQ = 0;
+      pos = LPC_SHAPE_ORDER * j;
+      pos2 = LPC_SHAPE_ORDER * k;
+      for (n=0; n<LPC_SHAPE_ORDER; n++) {
+        sumQQ += tmpcoeffs_sQ10[pos] *
+            WebRtcIsacfix_kT1ShapeQ15[model][pos2] >> 7;  // (Q10*Q15)>>7 = Q18
+        pos++;
+        pos2++;
+      }
+      tmpcoeffs2_sQ18[poss] = sumQQ; //Q18
+      poss++;
+    }
+  }
+
+  /* right transform */ // Transpose matrix
+  WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+                               tmpcoeffs_gQ17, kTIndexFactor1, kTIndexStep2);
+  WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT2ShapeQ15[model],
+      tmpcoeffs2_sQ18, tmpcoeffs_sQ17, kTIndexFactor1, kTIndexFactor1,
+      kTInitCase1, kTIndexStep3, kTIndexStep2, kTLoopCount1, kTLoopCount3,
+      kTMatrix1_shift0);
+
+  /* scaling, mean addition, and gain restoration */
+  gainpos = 0;
+  posg = 0;poss = 0;pos=0;
+  for (k=0; k<SUBFRAMES; k++) {
+
+    /* log gains */
+    // Divide by 4 and get Q17 to Q8, i.e. shift 2+9.
+    sumQQ16 = (int16_t)(tmpcoeffs_gQ17[posg] >> 11);
+    sumQQ16 += WebRtcIsacfix_kMeansGainQ8[model][posg];
+    sumQQ = CalcExpN(sumQQ16); // Q8 in and Q17 out
+    gain_lo_hiQ17[gainpos] = sumQQ; //Q17
+    gainpos++;
+    posg++;
+
+    // Divide by 4 and get Q17 to Q8, i.e. shift 2+9.
+    sumQQ16 = (int16_t)(tmpcoeffs_gQ17[posg] >> 11);
+    sumQQ16 += WebRtcIsacfix_kMeansGainQ8[model][posg];
+    sumQQ = CalcExpN(sumQQ16); // Q8 in and Q17 out
+    gain_lo_hiQ17[gainpos] = sumQQ; //Q17
+    gainpos++;
+    posg++;
+
+    /* lo band LAR coeffs */
+    for (n=0; n<ORDERLO; n++, pos++, poss++) {
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(31208, tmpcoeffs_sQ17[poss]); // (Q16*Q17)>>16 = Q17, with 1/2.1 = 0.47619047619 ~= 31208 in Q16
+      tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[model][poss]; // Q17+Q17 = Q17
+      LPCCoefQ17[pos] = tmp32;
+    }
+
+    /* hi band LAR coeffs */
+    for (n=0; n<ORDERHI; n++, pos++, poss++) {
+      // ((Q13*Q17)>>16)<<3 = Q17, with 1/0.45 = 2.222222222222 ~= 18204 in Q13
+      tmp32 =
+          WEBRTC_SPL_MUL_16_32_RSFT16(18204, tmpcoeffs_sQ17[poss]) * (1 << 3);
+      tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[model][poss]; // Q17+Q17 = Q17
+      LPCCoefQ17[pos] = tmp32;
+    }
+  }
+
+
+  *outmodel=model;
+
+  return 0;
+}
+
+/* estimate codel length of LPC Coef */
+static int EstCodeLpcCoef(int32_t *LPCCoefQ17,
+                          int32_t *gain_lo_hiQ17,
+                          int16_t *model,
+                          int32_t *sizeQ11,
+                          Bitstr_enc *streamdata,
+                          IsacSaveEncoderData* encData,
+                          transcode_obj *transcodingParam) {
+  int j, k, n;
+  int16_t posQQ, pos2QQ, gainpos;
+  int16_t  pos, poss, posg, offsg;
+  int16_t index_gQQ[KLT_ORDER_GAIN], index_sQQ[KLT_ORDER_SHAPE];
+  int16_t index_ovr_gQQ[KLT_ORDER_GAIN], index_ovr_sQQ[KLT_ORDER_SHAPE];
+  int32_t BitsQQ;
+
+  int16_t tmpcoeffs_gQ6[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs_sQ17[KLT_ORDER_SHAPE];
+  int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs2_sQ17[KLT_ORDER_SHAPE];
+  int32_t sumQQ;
+  int32_t tmp32;
+  int16_t sumQQ16;
+  int status = 0;
+
+  /* write LAR coefficients to statistics file */
+  /* Save data for creation of multiple bitstreams (and transcoding) */
+  if (encData != NULL) {
+    for (k=0; k<KLT_ORDER_GAIN; k++) {
+      encData->LPCcoeffs_g[KLT_ORDER_GAIN*encData->startIdx + k] = gain_lo_hiQ17[k];
+    }
+  }
+
+  /* log gains, mean removal and scaling */
+  posg = 0;poss = 0;pos=0; gainpos=0;
+
+  for (k=0; k<SUBFRAMES; k++) {
+    /* log gains */
+
+    /* The input argument X to logN(X) is 2^17 times higher than the
+       input floating point argument Y to log(Y), since the X value
+       is a Q17 value. This can be compensated for after the call, by
+       subraction a value Z for each Q-step. One Q-step means that
+       X gets 2 times higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+       177.445678 should be subtracted (since logN() returns a Q8 value).
+       For a X value in Q17, the value 177.445678*17 = 3017 should be
+       subtracted */
+    tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+    tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+    posg++; gainpos++;
+
+    tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+    tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+    posg++; gainpos++;
+
+    /* lo band LAR coeffs */
+    for (n=0; n<ORDERLO; n++, poss++, pos++) {
+      tmp32 = LPCCoefQ17[pos] - WebRtcIsacfix_kMeansShapeQ17[0][poss]; //Q17
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(17203, tmp32<<3); // tmp32 = 2.1*tmp32
+      tmpcoeffs_sQ17[poss] = tmp32; //Q17
+    }
+
+    /* hi band LAR coeffs */
+    for (n=0; n<ORDERHI; n++, poss++, pos++) {
+      tmp32 = LPCCoefQ17[pos] - WebRtcIsacfix_kMeansShapeQ17[0][poss]; //Q17
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(14746, tmp32<<1); // tmp32 = 0.45*tmp32
+      tmpcoeffs_sQ17[poss] = tmp32; //Q17
+    }
+
+  }
+
+
+  /* KLT  */
+
+  /* left transform */
+  offsg = 0;
+  posg = 0;
+  for (j=0; j<SUBFRAMES; j++) {
+    // Q21 = Q6 * Q15
+    sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][0] +
+        tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][2];
+    tmpcoeffs2_gQ21[posg] = sumQQ;
+    posg++;
+
+    // Q21 = Q6 * Q15
+    sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][1] +
+        tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][3];
+    tmpcoeffs2_gQ21[posg] = sumQQ;
+    posg++;
+
+    offsg += 2;
+  }
+
+  WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT1ShapeQ15[0], tmpcoeffs_sQ17,
+      tmpcoeffs2_sQ17, kTIndexFactor4, kTIndexFactor1, kTInitCase0,
+      kTIndexStep1, kTIndexStep3, kTLoopCount3, kTLoopCount3, kTMatrix1_shift1);
+
+  /* right transform */
+  WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+                               tmpcoeffs_gQ17, kTIndexFactor3, kTIndexStep1);
+
+  WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT2ShapeQ15[0], tmpcoeffs2_sQ17,
+      tmpcoeffs_sQ17, kTIndexFactor1, kTIndexFactor3, kTInitCase1, kTIndexStep3,
+      kTIndexStep1, kTLoopCount1, kTLoopCount3, kTMatrix1_shift1);
+
+  /* quantize coefficients */
+
+  BitsQQ = 0;
+  for (k=0; k<KLT_ORDER_GAIN; k++) //ATTN: ok?
+  {
+    posQQ = WebRtcIsacfix_kSelIndGain[k];
+    pos2QQ= (int16_t)CalcLrIntQ(tmpcoeffs_gQ17[posQQ], 17);
+
+    index_gQQ[k] = pos2QQ + WebRtcIsacfix_kQuantMinGain[k]; //ATTN: ok?
+    if (index_gQQ[k] < 0) {
+      index_gQQ[k] = 0;
+    }
+    else if (index_gQQ[k] > WebRtcIsacfix_kMaxIndGain[k]) {
+      index_gQQ[k] = WebRtcIsacfix_kMaxIndGain[k];
+    }
+    index_ovr_gQQ[k] = WebRtcIsacfix_kOffsetGain[0][k]+index_gQQ[k];
+    posQQ = WebRtcIsacfix_kOfLevelsGain[0] + index_ovr_gQQ[k];
+
+    /* Save data for creation of multiple bitstreams */
+    if (encData != NULL) {
+      encData->LPCindex_g[KLT_ORDER_GAIN*encData->startIdx + k] = index_gQQ[k];
+    }
+
+    /* determine number of bits */
+    sumQQ = WebRtcIsacfix_kCodeLenGainQ11[posQQ]; //Q11
+    BitsQQ += sumQQ;
+  }
+
+  for (k=0; k<KLT_ORDER_SHAPE; k++) //ATTN: ok?
+  {
+    index_sQQ[k] = (int16_t)(CalcLrIntQ(tmpcoeffs_sQ17[WebRtcIsacfix_kSelIndShape[k]], 17) + WebRtcIsacfix_kQuantMinShape[k]); //ATTN: ok?
+
+    if (index_sQQ[k] < 0)
+      index_sQQ[k] = 0;
+    else if (index_sQQ[k] > WebRtcIsacfix_kMaxIndShape[k])
+      index_sQQ[k] = WebRtcIsacfix_kMaxIndShape[k];
+    index_ovr_sQQ[k] = WebRtcIsacfix_kOffsetShape[0][k]+index_sQQ[k];
+
+    posQQ = WebRtcIsacfix_kOfLevelsShape[0] + index_ovr_sQQ[k];
+    sumQQ = WebRtcIsacfix_kCodeLenShapeQ11[posQQ]; //Q11
+    BitsQQ += sumQQ;
+  }
+
+
+
+  *model = 0;
+  *sizeQ11=BitsQQ;
+
+  /* entropy coding of model number */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, model, WebRtcIsacfix_kModelCdfPtr, 1);
+  if (status < 0) {
+    return status;
+  }
+
+  /* entropy coding of quantization indices - shape only */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, index_sQQ, WebRtcIsacfix_kCdfShapePtr[0], KLT_ORDER_SHAPE);
+  if (status < 0) {
+    return status;
+  }
+
+  /* Save data for creation of multiple bitstreams */
+  if (encData != NULL) {
+    for (k=0; k<KLT_ORDER_SHAPE; k++)
+    {
+      encData->LPCindex_s[KLT_ORDER_SHAPE*encData->startIdx + k] = index_sQQ[k];
+    }
+  }
+  /* save the state of the bitstream object 'streamdata' for the possible bit-rate reduction */
+  transcodingParam->full         = streamdata->full;
+  transcodingParam->stream_index = streamdata->stream_index;
+  transcodingParam->streamval    = streamdata->streamval;
+  transcodingParam->W_upper      = streamdata->W_upper;
+  transcodingParam->beforeLastWord     = streamdata->stream[streamdata->stream_index-1];
+  transcodingParam->lastWord     = streamdata->stream[streamdata->stream_index];
+
+  /* entropy coding of index */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, index_gQQ, WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
+  if (status < 0) {
+    return status;
+  }
+
+  /* find quantization levels for shape coefficients */
+  for (k=0; k<KLT_ORDER_SHAPE; k++) {
+    tmpcoeffs_sQ17[WebRtcIsacfix_kSelIndShape[k]] = WEBRTC_SPL_MUL(128, WebRtcIsacfix_kLevelsShapeQ10[WebRtcIsacfix_kOfLevelsShape[0]+index_ovr_sQQ[k]]);
+
+  }
+  /* inverse KLT  */
+
+  /* left transform */  // Transpose matrix!
+  WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT1ShapeQ15[0], tmpcoeffs_sQ17,
+      tmpcoeffs2_sQ17, kTIndexFactor4, kTIndexFactor4, kTInitCase0,
+      kTIndexStep1, kTIndexStep1, kTLoopCount3, kTLoopCount3, kTMatrix1_shift1);
+
+  /* right transform */ // Transpose matrix
+  WebRtcIsacfix_MatrixProduct1(WebRtcIsacfix_kT2ShapeQ15[0], tmpcoeffs2_sQ17,
+      tmpcoeffs_sQ17, kTIndexFactor1, kTIndexFactor1, kTInitCase1, kTIndexStep3,
+      kTIndexStep2, kTLoopCount1, kTLoopCount3, kTMatrix1_shift1);
+
+  /* scaling, mean addition, and gain restoration */
+  poss = 0;pos=0;
+  for (k=0; k<SUBFRAMES; k++) {
+
+    /* lo band LAR coeffs */
+    for (n=0; n<ORDERLO; n++, pos++, poss++) {
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(31208, tmpcoeffs_sQ17[poss]); // (Q16*Q17)>>16 = Q17, with 1/2.1 = 0.47619047619 ~= 31208 in Q16
+      tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[0][poss]; // Q17+Q17 = Q17
+      LPCCoefQ17[pos] = tmp32;
+    }
+
+    /* hi band LAR coeffs */
+    for (n=0; n<ORDERHI; n++, pos++, poss++) {
+      // ((Q13*Q17)>>16)<<3 = Q17, with 1/0.45 = 2.222222222222 ~= 18204 in Q13
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(18204, tmpcoeffs_sQ17[poss]) << 3;
+      tmp32 = tmp32 + WebRtcIsacfix_kMeansShapeQ17[0][poss]; // Q17+Q17 = Q17
+      LPCCoefQ17[pos] = tmp32;
+    }
+
+  }
+
+  //to update tmpcoeffs_gQ17 to the proper state
+  for (k=0; k<KLT_ORDER_GAIN; k++) {
+    tmpcoeffs_gQ17[WebRtcIsacfix_kSelIndGain[k]] = WebRtcIsacfix_kLevelsGainQ17[WebRtcIsacfix_kOfLevelsGain[0]+index_ovr_gQQ[k]];
+  }
+
+
+
+  /* find quantization levels for coefficients */
+
+  /* left transform */
+  offsg = 0;
+  posg = 0;
+  for (j=0; j<SUBFRAMES; j++) {
+    // (Q15 * Q17) >> (16 - 1) = Q17; Q17 << 4 = Q21.
+    sumQQ = (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][0],
+                                         tmpcoeffs_gQ17[offsg]) << 1);
+    sumQQ += (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][1],
+                                          tmpcoeffs_gQ17[offsg + 1]) << 1);
+    tmpcoeffs2_gQ21[posg] = sumQQ << 4;
+    posg++;
+
+    sumQQ = (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][2],
+                                         tmpcoeffs_gQ17[offsg]) << 1);
+    sumQQ += (WEBRTC_SPL_MUL_16_32_RSFT16(WebRtcIsacfix_kT1GainQ15[0][3],
+                                          tmpcoeffs_gQ17[offsg + 1]) << 1);
+    tmpcoeffs2_gQ21[posg] = sumQQ << 4;
+    posg++;
+    offsg += 2;
+  }
+
+  /* right transform */ // Transpose matrix
+  WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+                               tmpcoeffs_gQ17, kTIndexFactor1, kTIndexStep2);
+
+  /* scaling, mean addition, and gain restoration */
+  posg = 0;
+  gainpos = 0;
+  for (k=0; k<2*SUBFRAMES; k++) {
+
+    // Divide by 4 and get Q17 to Q8, i.e. shift 2+9.
+    sumQQ16 = (int16_t)(tmpcoeffs_gQ17[posg] >> 11);
+    sumQQ16 += WebRtcIsacfix_kMeansGainQ8[0][posg];
+    sumQQ = CalcExpN(sumQQ16); // Q8 in and Q17 out
+    gain_lo_hiQ17[gainpos] = sumQQ; //Q17
+
+    gainpos++;
+    pos++;posg++;
+  }
+
+  return 0;
+}
+
+int WebRtcIsacfix_EstCodeLpcGain(int32_t *gain_lo_hiQ17,
+                                 Bitstr_enc *streamdata,
+                                 IsacSaveEncoderData* encData) {
+  int j, k;
+  int16_t posQQ, pos2QQ, gainpos;
+  int16_t posg;
+  int16_t index_gQQ[KLT_ORDER_GAIN];
+
+  int16_t tmpcoeffs_gQ6[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+  int32_t sumQQ;
+  int status = 0;
+
+  /* write LAR coefficients to statistics file */
+  /* Save data for creation of multiple bitstreams (and transcoding) */
+  if (encData != NULL) {
+    for (k=0; k<KLT_ORDER_GAIN; k++) {
+      encData->LPCcoeffs_g[KLT_ORDER_GAIN*encData->startIdx + k] = gain_lo_hiQ17[k];
+    }
+  }
+
+  /* log gains, mean removal and scaling */
+  posg = 0; gainpos = 0;
+
+  for (k=0; k<SUBFRAMES; k++) {
+    /* log gains */
+
+    /* The input argument X to logN(X) is 2^17 times higher than the
+       input floating point argument Y to log(Y), since the X value
+       is a Q17 value. This can be compensated for after the call, by
+       subraction a value Z for each Q-step. One Q-step means that
+       X gets 2 times higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+       177.445678 should be subtracted (since logN() returns a Q8 value).
+       For a X value in Q17, the value 177.445678*17 = 3017 should be
+       subtracted */
+    tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+    tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+    posg++; gainpos++;
+
+    tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+    tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+    posg++; gainpos++;
+  }
+
+
+  /* KLT  */
+
+  /* left transform */
+  posg = 0;
+  for (j=0; j<SUBFRAMES; j++) {
+      // Q21 = Q6 * Q15
+      sumQQ = tmpcoeffs_gQ6[j * 2] * WebRtcIsacfix_kT1GainQ15[0][0] +
+          tmpcoeffs_gQ6[j * 2 + 1] * WebRtcIsacfix_kT1GainQ15[0][2];
+      tmpcoeffs2_gQ21[posg] = sumQQ;
+      posg++;
+
+      sumQQ = tmpcoeffs_gQ6[j * 2] * WebRtcIsacfix_kT1GainQ15[0][1] +
+          tmpcoeffs_gQ6[j * 2 + 1] * WebRtcIsacfix_kT1GainQ15[0][3];
+      tmpcoeffs2_gQ21[posg] = sumQQ;
+      posg++;
+  }
+
+  /* right transform */
+  WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+                               tmpcoeffs_gQ17, kTIndexFactor3, kTIndexStep1);
+
+  /* quantize coefficients */
+
+  for (k=0; k<KLT_ORDER_GAIN; k++) //ATTN: ok?
+  {
+    posQQ = WebRtcIsacfix_kSelIndGain[k];
+    pos2QQ= (int16_t)CalcLrIntQ(tmpcoeffs_gQ17[posQQ], 17);
+
+    index_gQQ[k] = pos2QQ + WebRtcIsacfix_kQuantMinGain[k]; //ATTN: ok?
+    if (index_gQQ[k] < 0) {
+      index_gQQ[k] = 0;
+    }
+    else if (index_gQQ[k] > WebRtcIsacfix_kMaxIndGain[k]) {
+      index_gQQ[k] = WebRtcIsacfix_kMaxIndGain[k];
+    }
+
+    /* Save data for creation of multiple bitstreams */
+    if (encData != NULL) {
+      encData->LPCindex_g[KLT_ORDER_GAIN*encData->startIdx + k] = index_gQQ[k];
+    }
+  }
+
+  /* entropy coding of index */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, index_gQQ, WebRtcIsacfix_kCdfGainPtr[0], KLT_ORDER_GAIN);
+  if (status < 0) {
+    return status;
+  }
+
+  return 0;
+}
+
+
+int WebRtcIsacfix_EncodeLpc(int32_t *gain_lo_hiQ17,
+                            int16_t *LPCCoef_loQ15,
+                            int16_t *LPCCoef_hiQ15,
+                            int16_t *model,
+                            int32_t *sizeQ11,
+                            Bitstr_enc *streamdata,
+                            IsacSaveEncoderData* encData,
+                            transcode_obj *transcodeParam)
+{
+  int status = 0;
+  int32_t larsQ17[KLT_ORDER_SHAPE]; // KLT_ORDER_SHAPE == (ORDERLO+ORDERHI)*SUBFRAMES
+  // = (6+12)*6 == 108
+
+  Poly2LarFix(LPCCoef_loQ15, ORDERLO, LPCCoef_hiQ15, ORDERHI, SUBFRAMES, larsQ17);
+
+  status = EstCodeLpcCoef(larsQ17, gain_lo_hiQ17, model, sizeQ11,
+                          streamdata, encData, transcodeParam);
+  if (status < 0) {
+    return (status);
+  }
+
+  Lar2polyFix(larsQ17, LPCCoef_loQ15, ORDERLO, LPCCoef_hiQ15, ORDERHI, SUBFRAMES);
+
+  return 0;
+}
+
+
+/* decode & dequantize RC */
+int WebRtcIsacfix_DecodeRcCoef(Bitstr_dec *streamdata, int16_t *RCQ15)
+{
+  int k, err;
+  int16_t index[AR_ORDER];
+
+  /* entropy decoding of quantization indices */
+  err = WebRtcIsacfix_DecHistOneStepMulti(index, streamdata, WebRtcIsacfix_kRcCdfPtr, WebRtcIsacfix_kRcInitInd, AR_ORDER);
+  if (err<0)  // error check
+    return err;
+
+  /* find quantization levels for reflection coefficients */
+  for (k=0; k<AR_ORDER; k++)
+  {
+    RCQ15[k] = *(WebRtcIsacfix_kRcLevPtr[k] + index[k]);
+  }
+
+  return 0;
+}
+
+
+
+/* quantize & code RC */
+int WebRtcIsacfix_EncodeRcCoef(int16_t *RCQ15, Bitstr_enc *streamdata)
+{
+  int k;
+  int16_t index[AR_ORDER];
+  int status;
+
+  /* quantize reflection coefficients (add noise feedback?) */
+  for (k=0; k<AR_ORDER; k++)
+  {
+    index[k] = WebRtcIsacfix_kRcInitInd[k];
+
+    if (RCQ15[k] > WebRtcIsacfix_kRcBound[index[k]])
+    {
+      while (RCQ15[k] > WebRtcIsacfix_kRcBound[index[k] + 1])
+        index[k]++;
+    }
+    else
+    {
+      while (RCQ15[k] < WebRtcIsacfix_kRcBound[--index[k]]) ;
+    }
+
+    RCQ15[k] = *(WebRtcIsacfix_kRcLevPtr[k] + index[k]);
+  }
+
+
+  /* entropy coding of quantization indices */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, index, WebRtcIsacfix_kRcCdfPtr, AR_ORDER);
+
+  /* If error in WebRtcIsacfix_EncHistMulti(), status will be negative, otherwise 0 */
+  return status;
+}
+
+
+/* decode & dequantize squared Gain */
+int WebRtcIsacfix_DecodeGain2(Bitstr_dec *streamdata, int32_t *gainQ10)
+{
+  int err;
+  int16_t index;
+
+  /* entropy decoding of quantization index */
+  err = WebRtcIsacfix_DecHistOneStepMulti(
+      &index,
+      streamdata,
+      WebRtcIsacfix_kGainPtr,
+      WebRtcIsacfix_kGainInitInd,
+      1);
+  /* error check */
+  if (err<0) {
+    return err;
+  }
+
+  /* find quantization level */
+  *gainQ10 = WebRtcIsacfix_kGain2Lev[index];
+
+  return 0;
+}
+
+
+
+/* quantize & code squared Gain */
+int WebRtcIsacfix_EncodeGain2(int32_t *gainQ10, Bitstr_enc *streamdata)
+{
+  int16_t index;
+  int status = 0;
+
+  /* find quantization index */
+  index = WebRtcIsacfix_kGainInitInd[0];
+  if (*gainQ10 > WebRtcIsacfix_kGain2Bound[index])
+  {
+    while (*gainQ10 > WebRtcIsacfix_kGain2Bound[index + 1])
+      index++;
+  }
+  else
+  {
+    while (*gainQ10 < WebRtcIsacfix_kGain2Bound[--index]) ;
+  }
+
+  /* dequantize */
+  *gainQ10 = WebRtcIsacfix_kGain2Lev[index];
+
+  /* entropy coding of quantization index */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, &index, WebRtcIsacfix_kGainPtr, 1);
+
+  /* If error in WebRtcIsacfix_EncHistMulti(), status will be negative, otherwise 0 */
+  return status;
+}
+
+
+/* code and decode Pitch Gains and Lags functions */
+
+/* decode & dequantize Pitch Gains */
+int WebRtcIsacfix_DecodePitchGain(Bitstr_dec *streamdata, int16_t *PitchGains_Q12)
+{
+  int err;
+  int16_t index_comb;
+  const uint16_t *pitch_gain_cdf_ptr[1];
+
+  /* entropy decoding of quantization indices */
+  *pitch_gain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
+  err = WebRtcIsacfix_DecHistBisectMulti(&index_comb, streamdata, pitch_gain_cdf_ptr, WebRtcIsacfix_kCdfTableSizeGain, 1);
+  /* error check, Q_mean_Gain.. tables are of size 144 */
+  if ((err < 0) || (index_comb < 0) || (index_comb >= 144))
+    return -ISAC_RANGE_ERROR_DECODE_PITCH_GAIN;
+
+  /* unquantize back to pitch gains by table look-up */
+  PitchGains_Q12[0] = WebRtcIsacfix_kPitchGain1[index_comb];
+  PitchGains_Q12[1] = WebRtcIsacfix_kPitchGain2[index_comb];
+  PitchGains_Q12[2] = WebRtcIsacfix_kPitchGain3[index_comb];
+  PitchGains_Q12[3] = WebRtcIsacfix_kPitchGain4[index_comb];
+
+  return 0;
+}
+
+
+/* quantize & code Pitch Gains */
+int WebRtcIsacfix_EncodePitchGain(int16_t* PitchGains_Q12,
+                                  Bitstr_enc* streamdata,
+                                  IsacSaveEncoderData* encData) {
+  int k,j;
+  int16_t SQ15[PITCH_SUBFRAMES];
+  int16_t index[3];
+  int16_t index_comb;
+  const uint16_t *pitch_gain_cdf_ptr[1];
+  int32_t CQ17;
+  int status = 0;
+
+
+  /* get the approximate arcsine (almost linear)*/
+  for (k=0; k<PITCH_SUBFRAMES; k++)
+    SQ15[k] = (int16_t)(PitchGains_Q12[k] * 33 >> 2);  // Q15
+
+
+  /* find quantization index; only for the first three transform coefficients */
+  for (k=0; k<3; k++)
+  {
+    /*  transform */
+    CQ17=0;
+    for (j=0; j<PITCH_SUBFRAMES; j++) {
+      CQ17 += WebRtcIsacfix_kTransform[k][j] * SQ15[j] >> 10;  // Q17
+    }
+
+    index[k] = (int16_t)((CQ17 + 8192)>>14); // Rounding and scaling with stepsize (=1/0.125=8)
+
+    /* check that the index is not outside the boundaries of the table */
+    if (index[k] < WebRtcIsacfix_kLowerlimiGain[k]) index[k] = WebRtcIsacfix_kLowerlimiGain[k];
+    else if (index[k] > WebRtcIsacfix_kUpperlimitGain[k]) index[k] = WebRtcIsacfix_kUpperlimitGain[k];
+    index[k] -= WebRtcIsacfix_kLowerlimiGain[k];
+  }
+
+  /* calculate unique overall index */
+  index_comb = (int16_t)(WEBRTC_SPL_MUL(WebRtcIsacfix_kMultsGain[0], index[0]) +
+                               WEBRTC_SPL_MUL(WebRtcIsacfix_kMultsGain[1], index[1]) + index[2]);
+
+  /* unquantize back to pitch gains by table look-up */
+  // (Y)
+  PitchGains_Q12[0] = WebRtcIsacfix_kPitchGain1[index_comb];
+  PitchGains_Q12[1] = WebRtcIsacfix_kPitchGain2[index_comb];
+  PitchGains_Q12[2] = WebRtcIsacfix_kPitchGain3[index_comb];
+  PitchGains_Q12[3] = WebRtcIsacfix_kPitchGain4[index_comb];
+
+
+  /* entropy coding of quantization pitch gains */
+  *pitch_gain_cdf_ptr = WebRtcIsacfix_kPitchGainCdf;
+  status = WebRtcIsacfix_EncHistMulti(streamdata, &index_comb, pitch_gain_cdf_ptr, 1);
+  if (status < 0) {
+    return status;
+  }
+
+  /* Save data for creation of multiple bitstreams */
+  if (encData != NULL) {
+    encData->pitchGain_index[encData->startIdx] = index_comb;
+  }
+
+  return 0;
+}
+
+
+
+/* Pitch LAG */
+
+
+/* decode & dequantize Pitch Lags */
+int WebRtcIsacfix_DecodePitchLag(Bitstr_dec *streamdata,
+                                 int16_t *PitchGain_Q12,
+                                 int16_t *PitchLags_Q7)
+{
+  int k, err;
+  int16_t index[PITCH_SUBFRAMES];
+  const int16_t *mean_val2Q10, *mean_val4Q10;
+
+  const int16_t *lower_limit;
+  const uint16_t *init_index;
+  const uint16_t *cdf_size;
+  const uint16_t **cdf;
+
+  int32_t meangainQ12;
+  int32_t CQ11, CQ10,tmp32a,tmp32b;
+  int16_t shft;
+
+  meangainQ12=0;
+  for (k = 0; k < 4; k++)
+    meangainQ12 += PitchGain_Q12[k];
+
+  meangainQ12 >>= 2;  // Get average.
+
+  /* voicing classificiation */
+  if (meangainQ12 <= 819) {                 // mean_gain < 0.2
+    shft = -1;        // StepSize=2.0;
+    cdf = WebRtcIsacfix_kPitchLagPtrLo;
+    cdf_size = WebRtcIsacfix_kPitchLagSizeLo;
+    mean_val2Q10 = WebRtcIsacfix_kMeanLag2Lo;
+    mean_val4Q10 = WebRtcIsacfix_kMeanLag4Lo;
+    lower_limit = WebRtcIsacfix_kLowerLimitLo;
+    init_index = WebRtcIsacfix_kInitIndLo;
+  } else if (meangainQ12 <= 1638) {            // mean_gain < 0.4
+    shft = 0;        // StepSize=1.0;
+    cdf = WebRtcIsacfix_kPitchLagPtrMid;
+    cdf_size = WebRtcIsacfix_kPitchLagSizeMid;
+    mean_val2Q10 = WebRtcIsacfix_kMeanLag2Mid;
+    mean_val4Q10 = WebRtcIsacfix_kMeanLag4Mid;
+    lower_limit = WebRtcIsacfix_kLowerLimitMid;
+    init_index = WebRtcIsacfix_kInitIndMid;
+  } else {
+    shft = 1;        // StepSize=0.5;
+    cdf = WebRtcIsacfix_kPitchLagPtrHi;
+    cdf_size = WebRtcIsacfix_kPitchLagSizeHi;
+    mean_val2Q10 = WebRtcIsacfix_kMeanLag2Hi;
+    mean_val4Q10 = WebRtcIsacfix_kMeanLag4Hi;
+    lower_limit = WebRtcIsacfix_kLowerLimitHi;
+    init_index = WebRtcIsacfix_kInitIndHi;
+  }
+
+  /* entropy decoding of quantization indices */
+  err = WebRtcIsacfix_DecHistBisectMulti(index, streamdata, cdf, cdf_size, 1);
+  if ((err<0) || (index[0]<0))  // error check
+    return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+
+  err = WebRtcIsacfix_DecHistOneStepMulti(index+1, streamdata, cdf+1, init_index, 3);
+  if (err<0)  // error check
+    return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+
+
+  /* unquantize back to transform coefficients and do the inverse transform: S = T'*C */
+  CQ11 = ((int32_t)index[0] + lower_limit[0]);  // Q0
+  CQ11 = WEBRTC_SPL_SHIFT_W32(CQ11,11-shft); // Scale with StepSize, Q11
+  for (k=0; k<PITCH_SUBFRAMES; k++) {
+    tmp32a =  WEBRTC_SPL_MUL_16_32_RSFT11(WebRtcIsacfix_kTransform[0][k], CQ11);
+    PitchLags_Q7[k] = (int16_t)(tmp32a >> 5);
+  }
+
+  CQ10 = mean_val2Q10[index[1]];
+  for (k=0; k<PITCH_SUBFRAMES; k++) {
+    tmp32b = WebRtcIsacfix_kTransform[1][k] * (int16_t)CQ10 >> 10;
+    PitchLags_Q7[k] += (int16_t)(tmp32b >> 5);
+  }
+
+  CQ10 = mean_val4Q10[index[3]];
+  for (k=0; k<PITCH_SUBFRAMES; k++) {
+    tmp32b = WebRtcIsacfix_kTransform[3][k] * (int16_t)CQ10 >> 10;
+    PitchLags_Q7[k] += (int16_t)(tmp32b >> 5);
+  }
+
+  return 0;
+}
+
+
+
+/* quantize & code Pitch Lags */
+int WebRtcIsacfix_EncodePitchLag(int16_t* PitchLagsQ7,
+                                 int16_t* PitchGain_Q12,
+                                 Bitstr_enc* streamdata,
+                                 IsacSaveEncoderData* encData) {
+  int k, j;
+  int16_t index[PITCH_SUBFRAMES];
+  int32_t meangainQ12, CQ17;
+  int32_t CQ11, CQ10,tmp32a;
+
+  const int16_t *mean_val2Q10,*mean_val4Q10;
+  const int16_t *lower_limit, *upper_limit;
+  const uint16_t **cdf;
+  int16_t shft, tmp16b;
+  int32_t tmp32b;
+  int status = 0;
+
+  /* compute mean pitch gain */
+  meangainQ12=0;
+  for (k = 0; k < 4; k++)
+    meangainQ12 += PitchGain_Q12[k];
+
+  meangainQ12 >>= 2;
+
+  /* Save data for creation of multiple bitstreams */
+  if (encData != NULL) {
+    encData->meanGain[encData->startIdx] = meangainQ12;
+  }
+
+  /* voicing classificiation */
+  if (meangainQ12 <= 819) {                 // mean_gain < 0.2
+    shft = -1;        // StepSize=2.0;
+    cdf = WebRtcIsacfix_kPitchLagPtrLo;
+    mean_val2Q10 = WebRtcIsacfix_kMeanLag2Lo;
+    mean_val4Q10 = WebRtcIsacfix_kMeanLag4Lo;
+    lower_limit = WebRtcIsacfix_kLowerLimitLo;
+    upper_limit = WebRtcIsacfix_kUpperLimitLo;
+  } else if (meangainQ12 <= 1638) {            // mean_gain < 0.4
+    shft = 0;        // StepSize=1.0;
+    cdf = WebRtcIsacfix_kPitchLagPtrMid;
+    mean_val2Q10 = WebRtcIsacfix_kMeanLag2Mid;
+    mean_val4Q10 = WebRtcIsacfix_kMeanLag4Mid;
+    lower_limit = WebRtcIsacfix_kLowerLimitMid;
+    upper_limit = WebRtcIsacfix_kUpperLimitMid;
+  } else {
+    shft = 1;        // StepSize=0.5;
+    cdf = WebRtcIsacfix_kPitchLagPtrHi;
+    mean_val2Q10 = WebRtcIsacfix_kMeanLag2Hi;
+    mean_val4Q10 = WebRtcIsacfix_kMeanLag4Hi;
+    lower_limit = WebRtcIsacfix_kLowerLimitHi;
+    upper_limit = WebRtcIsacfix_kUpperLimitHi;
+  }
+
+  /* find quantization index */
+  for (k=0; k<4; k++)
+  {
+    /*  transform */
+    CQ17=0;
+    for (j=0; j<PITCH_SUBFRAMES; j++)
+      CQ17 += WebRtcIsacfix_kTransform[k][j] * PitchLagsQ7[j] >> 2;  // Q17
+
+    CQ17 = WEBRTC_SPL_SHIFT_W32(CQ17,shft); // Scale with StepSize
+
+    /* quantize */
+    tmp16b = (int16_t)((CQ17 + 65536) >> 17);
+    index[k] =  tmp16b;
+
+    /* check that the index is not outside the boundaries of the table */
+    if (index[k] < lower_limit[k]) index[k] = lower_limit[k];
+    else if (index[k] > upper_limit[k]) index[k] = upper_limit[k];
+    index[k] -= lower_limit[k];
+
+    /* Save data for creation of multiple bitstreams */
+    if(encData != NULL) {
+      encData->pitchIndex[PITCH_SUBFRAMES*encData->startIdx + k] = index[k];
+    }
+  }
+
+  /* unquantize back to transform coefficients and do the inverse transform: S = T'*C */
+  CQ11 = (index[0] + lower_limit[0]);  // Q0
+  CQ11 = WEBRTC_SPL_SHIFT_W32(CQ11,11-shft); // Scale with StepSize, Q11
+
+  for (k=0; k<PITCH_SUBFRAMES; k++) {
+    tmp32a =  WEBRTC_SPL_MUL_16_32_RSFT11(WebRtcIsacfix_kTransform[0][k], CQ11); // Q12
+    PitchLagsQ7[k] = (int16_t)(tmp32a >> 5);  // Q7.
+  }
+
+  CQ10 = mean_val2Q10[index[1]];
+  for (k=0; k<PITCH_SUBFRAMES; k++) {
+    tmp32b = WebRtcIsacfix_kTransform[1][k] * (int16_t)CQ10 >> 10;
+    PitchLagsQ7[k] += (int16_t)(tmp32b >> 5);  // Q7.
+  }
+
+  CQ10 = mean_val4Q10[index[3]];
+  for (k=0; k<PITCH_SUBFRAMES; k++) {
+    tmp32b = WebRtcIsacfix_kTransform[3][k] * (int16_t)CQ10 >> 10;
+    PitchLagsQ7[k] += (int16_t)(tmp32b >> 5);  // Q7.
+  }
+
+  /* entropy coding of quantization pitch lags */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, index, cdf, PITCH_SUBFRAMES);
+
+  /* If error in WebRtcIsacfix_EncHistMulti(), status will be negative, otherwise 0 */
+  return status;
+}
+
+
+
+/* Routines for inband signaling of bandwitdh estimation */
+/* Histograms based on uniform distribution of indices */
+/* Move global variables later! */
+
+
+/* cdf array for frame length indicator */
+const uint16_t kFrameLenCdf[4] = {
+  0, 21845, 43690, 65535};
+
+/* pointer to cdf array for frame length indicator */
+const uint16_t * const kFrameLenCdfPtr[1] = {kFrameLenCdf};
+
+/* initial cdf index for decoder of frame length indicator */
+const uint16_t kFrameLenInitIndex[1] = {1};
+
+
+int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
+                                 size_t *framesamples)
+{
+
+  int err;
+  int16_t frame_mode;
+
+  err = 0;
+  /* entropy decoding of frame length [1:30ms,2:60ms] */
+  err = WebRtcIsacfix_DecHistOneStepMulti(&frame_mode, streamdata, kFrameLenCdfPtr, kFrameLenInitIndex, 1);
+  if (err<0)  // error check
+    return -ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH;
+
+  switch(frame_mode) {
+    case 1:
+      *framesamples = 480; /* 30ms */
+      break;
+    case 2:
+      *framesamples = 960; /* 60ms */
+      break;
+    default:
+      err = -ISAC_DISALLOWED_FRAME_MODE_DECODER;
+  }
+
+  return err;
+}
+
+
+int WebRtcIsacfix_EncodeFrameLen(int16_t framesamples, Bitstr_enc *streamdata) {
+
+  int status;
+  int16_t frame_mode;
+
+  status = 0;
+  frame_mode = 0;
+  /* entropy coding of frame length [1:480 samples,2:960 samples] */
+  switch(framesamples) {
+    case 480:
+      frame_mode = 1;
+      break;
+    case 960:
+      frame_mode = 2;
+      break;
+    default:
+      status = - ISAC_DISALLOWED_FRAME_MODE_ENCODER;
+  }
+
+  if (status < 0)
+    return status;
+
+  status = WebRtcIsacfix_EncHistMulti(streamdata, &frame_mode, kFrameLenCdfPtr, 1);
+
+  return status;
+}
+
+/* cdf array for estimated bandwidth */
+const uint16_t kBwCdf[25] = {
+  0, 2731, 5461, 8192, 10923, 13653, 16384, 19114, 21845, 24576, 27306, 30037,
+  32768, 35498, 38229, 40959, 43690, 46421, 49151, 51882, 54613, 57343, 60074,
+  62804, 65535};
+
+/* pointer to cdf array for estimated bandwidth */
+const uint16_t * const kBwCdfPtr[1] = {kBwCdf};
+
+/* initial cdf index for decoder of estimated bandwidth*/
+const uint16_t kBwInitIndex[1] = {7};
+
+
+int WebRtcIsacfix_DecodeSendBandwidth(Bitstr_dec *streamdata, int16_t *BWno) {
+
+  int err;
+  int16_t BWno32;
+
+  /* entropy decoding of sender's BW estimation [0..23] */
+  err = WebRtcIsacfix_DecHistOneStepMulti(&BWno32, streamdata, kBwCdfPtr, kBwInitIndex, 1);
+  if (err<0)  // error check
+    return -ISAC_RANGE_ERROR_DECODE_BANDWIDTH;
+  *BWno = (int16_t)BWno32;
+  return err;
+
+}
+
+
+int WebRtcIsacfix_EncodeReceiveBandwidth(int16_t *BWno, Bitstr_enc *streamdata)
+{
+  int status = 0;
+  /* entropy encoding of receiver's BW estimation [0..23] */
+  status = WebRtcIsacfix_EncHistMulti(streamdata, BWno, kBwCdfPtr, 1);
+
+  return status;
+}
+
+/* estimate codel length of LPC Coef */
+void WebRtcIsacfix_TranscodeLpcCoef(int32_t *gain_lo_hiQ17,
+                                    int16_t *index_gQQ) {
+  int j, k;
+  int16_t posQQ, pos2QQ;
+  int16_t posg, offsg, gainpos;
+  int32_t tmpcoeffs_gQ6[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs_gQ17[KLT_ORDER_GAIN];
+  int32_t tmpcoeffs2_gQ21[KLT_ORDER_GAIN];
+  int32_t sumQQ;
+
+
+  /* log gains, mean removal and scaling */
+  posg = 0; gainpos=0;
+
+  for (k=0; k<SUBFRAMES; k++) {
+    /* log gains */
+
+    /* The input argument X to logN(X) is 2^17 times higher than the
+       input floating point argument Y to log(Y), since the X value
+       is a Q17 value. This can be compensated for after the call, by
+       subraction a value Z for each Q-step. One Q-step means that
+       X gets 2 times higher, i.e. Z = logN(2)*256 = 0.693147180559*256 =
+       177.445678 should be subtracted (since logN() returns a Q8 value).
+       For a X value in Q17, the value 177.445678*17 = 3017 should be
+       subtracted */
+    tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+    tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+    posg++; gainpos++;
+
+    tmpcoeffs_gQ6[posg] = CalcLogN(gain_lo_hiQ17[gainpos])-3017; //Q8
+    tmpcoeffs_gQ6[posg] -= WebRtcIsacfix_kMeansGainQ8[0][posg]; //Q8, but Q6 after not-needed mult. by 4
+    posg++; gainpos++;
+
+  }
+
+
+  /* KLT  */
+
+  /* left transform */
+  for (j = 0, offsg = 0; j < SUBFRAMES; j++, offsg += 2) {
+    // Q21 = Q6 * Q15
+    sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][0] +
+        tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][2];
+    tmpcoeffs2_gQ21[offsg] = sumQQ;
+
+    // Q21 = Q6 * Q15
+    sumQQ = tmpcoeffs_gQ6[offsg] * WebRtcIsacfix_kT1GainQ15[0][1] +
+        tmpcoeffs_gQ6[offsg + 1] * WebRtcIsacfix_kT1GainQ15[0][3];
+    tmpcoeffs2_gQ21[offsg + 1] = sumQQ;
+  }
+
+  /* right transform */
+  WebRtcIsacfix_MatrixProduct2(WebRtcIsacfix_kT2GainQ15[0], tmpcoeffs2_gQ21,
+                               tmpcoeffs_gQ17, kTIndexFactor3, kTIndexStep1);
+
+  /* quantize coefficients */
+  for (k=0; k<KLT_ORDER_GAIN; k++) //ATTN: ok?
+  {
+    posQQ = WebRtcIsacfix_kSelIndGain[k];
+    pos2QQ= (int16_t)CalcLrIntQ(tmpcoeffs_gQ17[posQQ], 17);
+
+    index_gQQ[k] = pos2QQ + WebRtcIsacfix_kQuantMinGain[k]; //ATTN: ok?
+    if (index_gQQ[k] < 0) {
+      index_gQQ[k] = 0;
+    }
+    else if (index_gQQ[k] > WebRtcIsacfix_kMaxIndGain[k]) {
+      index_gQQ[k] = WebRtcIsacfix_kMaxIndGain[k];
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
new file mode 100644
index 0000000..ba7bcde
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
@@ -0,0 +1,189 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.h
+ *
+ * This header file contains all of the functions used to arithmetically
+ * encode the iSAC bistream
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+/* decode complex spectrum (return number of bytes in stream) */
+int WebRtcIsacfix_DecodeSpec(Bitstr_dec  *streamdata,
+                             int16_t *frQ7,
+                             int16_t *fiQ7,
+                             int16_t AvgPitchGain_Q12);
+
+/* encode complex spectrum */
+int WebRtcIsacfix_EncodeSpec(const int16_t *fr,
+                             const int16_t *fi,
+                             Bitstr_enc *streamdata,
+                             int16_t AvgPitchGain_Q12);
+
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsacfix_DecodeLpcCoef(Bitstr_dec  *streamdata,
+                                int32_t *LPCCoefQ17,
+                                int32_t *gain_lo_hiQ17,
+                                int16_t *outmodel);
+
+int WebRtcIsacfix_DecodeLpc(int32_t *gain_lo_hiQ17,
+                            int16_t *LPCCoef_loQ15,
+                            int16_t *LPCCoef_hiQ15,
+                            Bitstr_dec  *streamdata,
+                            int16_t *outmodel);
+
+/* quantize & code LPC Coef */
+int WebRtcIsacfix_EncodeLpc(int32_t *gain_lo_hiQ17,
+                            int16_t *LPCCoef_loQ15,
+                            int16_t *LPCCoef_hiQ15,
+                            int16_t *model,
+                            int32_t *sizeQ11,
+                            Bitstr_enc *streamdata,
+                            IsacSaveEncoderData* encData,
+                            transcode_obj *transcodeParam);
+
+int WebRtcIsacfix_EstCodeLpcGain(int32_t *gain_lo_hiQ17,
+                                 Bitstr_enc *streamdata,
+                                 IsacSaveEncoderData* encData);
+/* decode & dequantize RC */
+int WebRtcIsacfix_DecodeRcCoef(Bitstr_dec *streamdata,
+                               int16_t *RCQ15);
+
+/* quantize & code RC */
+int WebRtcIsacfix_EncodeRcCoef(int16_t *RCQ15,
+                               Bitstr_enc *streamdata);
+
+/* decode & dequantize squared Gain */
+int WebRtcIsacfix_DecodeGain2(Bitstr_dec *streamdata,
+                              int32_t *Gain2);
+
+/* quantize & code squared Gain (input is squared gain) */
+int WebRtcIsacfix_EncodeGain2(int32_t *gain2,
+                              Bitstr_enc *streamdata);
+
+int WebRtcIsacfix_EncodePitchGain(int16_t *PitchGains_Q12,
+                                  Bitstr_enc *streamdata,
+                                  IsacSaveEncoderData* encData);
+
+int WebRtcIsacfix_EncodePitchLag(int16_t *PitchLagQ7,
+                                 int16_t *PitchGain_Q12,
+                                 Bitstr_enc *streamdata,
+                                 IsacSaveEncoderData* encData);
+
+int WebRtcIsacfix_DecodePitchGain(Bitstr_dec *streamdata,
+                                  int16_t *PitchGain_Q12);
+
+int WebRtcIsacfix_DecodePitchLag(Bitstr_dec *streamdata,
+                                 int16_t *PitchGain_Q12,
+                                 int16_t *PitchLagQ7);
+
+int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
+                                 size_t *framelength);
+
+
+int WebRtcIsacfix_EncodeFrameLen(int16_t framelength,
+                                 Bitstr_enc *streamdata);
+
+int WebRtcIsacfix_DecodeSendBandwidth(Bitstr_dec *streamdata,
+                                      int16_t *BWno);
+
+
+int WebRtcIsacfix_EncodeReceiveBandwidth(int16_t *BWno,
+                                         Bitstr_enc *streamdata);
+
+void WebRtcIsacfix_TranscodeLpcCoef(int32_t *tmpcoeffs_gQ6,
+                                    int16_t *index_gQQ);
+
+// Pointer functions for LPC transforms.
+
+typedef void (*MatrixProduct1)(const int16_t matrix0[],
+                               const int32_t matrix1[],
+                               int32_t matrix_product[],
+                               const int matrix1_index_factor1,
+                               const int matrix0_index_factor1,
+                               const int matrix1_index_init_case,
+                               const int matrix1_index_step,
+                               const int matrix0_index_step,
+                               const int inner_loop_count,
+                               const int mid_loop_count,
+                               const int shift);
+typedef void (*MatrixProduct2)(const int16_t matrix0[],
+                               const int32_t matrix1[],
+                               int32_t matrix_product[],
+                               const int matrix0_index_factor,
+                               const int matrix0_index_step);
+
+extern MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
+extern MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
+
+void WebRtcIsacfix_MatrixProduct1C(const int16_t matrix0[],
+                                   const int32_t matrix1[],
+                                   int32_t matrix_product[],
+                                   const int matrix1_index_factor1,
+                                   const int matrix0_index_factor1,
+                                   const int matrix1_index_init_case,
+                                   const int matrix1_index_step,
+                                   const int matrix0_index_step,
+                                   const int inner_loop_count,
+                                   const int mid_loop_count,
+                                   const int shift);
+void WebRtcIsacfix_MatrixProduct2C(const int16_t matrix0[],
+                                   const int32_t matrix1[],
+                                   int32_t matrix_product[],
+                                   const int matrix0_index_factor,
+                                   const int matrix0_index_step);
+
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix1_index_factor1,
+                                      const int matrix0_index_factor1,
+                                      const int matrix1_index_init_case,
+                                      const int matrix1_index_step,
+                                      const int matrix0_index_step,
+                                      const int inner_loop_count,
+                                      const int mid_loop_count,
+                                      const int shift);
+void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix0_index_factor,
+                                      const int matrix0_index_step);
+#endif
+
+#if defined(MIPS32_LE)
+void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix1_index_factor1,
+                                      const int matrix0_index_factor1,
+                                      const int matrix1_index_init_case,
+                                      const int matrix1_index_step,
+                                      const int matrix0_index_step,
+                                      const int inner_loop_count,
+                                      const int mid_loop_count,
+                                      const int shift);
+
+void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix0_index_factor,
+                                      const int matrix0_index_step);
+#endif
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ENTROPY_CODING_H_
diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c b/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c
new file mode 100644
index 0000000..a66a43e
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding_mips.c
@@ -0,0 +1,249 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// MIPS optimization of the function WebRtcIsacfix_MatrixProduct1.
+// Bit-exact with the function WebRtcIsacfix_MatrixProduct1C from
+// entropy_coding.c file.
+void WebRtcIsacfix_MatrixProduct1MIPS(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix1_index_factor1,
+                                      const int matrix0_index_factor1,
+                                      const int matrix1_index_init_case,
+                                      const int matrix1_index_step,
+                                      const int matrix0_index_step,
+                                      const int inner_loop_count,
+                                      const int mid_loop_count,
+                                      const int shift) {
+  if (matrix1_index_init_case != 0) {
+    int j = SUBFRAMES, k = 0, n = 0;
+    int32_t r0, r1, r2, sum32;
+    int32_t* product_start = matrix_product;
+    int32_t* product_ptr;
+    const uint32_t product_step = 4 * mid_loop_count;
+    const uint32_t matrix0_step = 2 * matrix0_index_step;
+    const uint32_t matrix1_step = 4 * matrix1_index_step;
+    const uint32_t matrix0_step2 = 2 * matrix0_index_factor1;
+    const uint32_t matrix1_step2 = 4 * matrix1_index_factor1;
+    const int16_t* matrix0_start = matrix0;
+    const int32_t* matrix1_start = matrix1;
+    int16_t* matrix0_ptr;
+    int32_t* matrix1_ptr;
+
+    __asm __volatile (
+      ".set     push                                                       \n\t"
+      ".set     noreorder                                                  \n\t"
+     "1:                                                                   \n\t"
+      "addu     %[product_ptr],     %[product_start],     $0               \n\t"
+      "addu     %[k],               %[product_step],      $0               \n\t"
+      "addiu    %[j],               %[j],                 -1               \n\t"
+      "addu     %[matrix1_start],   %[matrix1],           $0               \n\t"
+     "2:                                                                   \n\t"
+      "addu     %[matrix1_ptr],     %[matrix1_start],     $0               \n\t"
+      "addu     %[matrix0_ptr],     %[matrix0_start],     $0               \n\t"
+      "addu     %[n],               %[inner_loop_count],  $0               \n\t"
+      "mul      %[sum32],           $0,                   $0               \n\t"
+     "3:                                                                   \n\t"
+      "lw       %[r0],              0(%[matrix1_ptr])                      \n\t"
+      "lh       %[r1],              0(%[matrix0_ptr])                      \n\t"
+      "addu     %[matrix1_ptr],     %[matrix1_ptr],       %[matrix1_step]  \n\t"
+      "sllv     %[r0],              %[r0],                %[shift]         \n\t"
+      "andi     %[r2],              %[r0],                0xffff           \n\t"
+      "sra      %[r2],              %[r2],                1                \n\t"
+      "mul      %[r2],              %[r2],                %[r1]            \n\t"
+      "sra      %[r0],              %[r0],                16               \n\t"
+      "mul      %[r0],              %[r0],                %[r1]            \n\t"
+      "addu     %[matrix0_ptr],     %[matrix0_ptr],       %[matrix0_step]  \n\t"
+      "addiu    %[n],               %[n],                 -1               \n\t"
+#if defined(MIPS_DSP_R1_LE)
+      "shra_r.w %[r2],              %[r2],                15               \n\t"
+#else
+      "addiu    %[r2],              %[r2],                0x4000           \n\t"
+      "sra      %[r2],              %[r2],                15               \n\t"
+#endif
+      "addu     %[sum32],           %[sum32],             %[r2]            \n\t"
+      "bgtz     %[n],               3b                                     \n\t"
+      " addu    %[sum32],           %[sum32],             %[r0]            \n\t"
+      "addiu    %[k],               %[k],                 -4               \n\t"
+      "addu     %[matrix1_start],   %[matrix1_start],     %[matrix1_step2] \n\t"
+      "sw       %[sum32],           0(%[product_ptr])                      \n\t"
+      "bgtz     %[k],               2b                                     \n\t"
+      " addiu   %[product_ptr],     %[product_ptr],       4                \n\t"
+      "addu     %[matrix0_start],   %[matrix0_start],     %[matrix0_step2] \n\t"
+      "bgtz     %[j],               1b                                     \n\t"
+      " addu    %[product_start],   %[product_start],     %[product_step]  \n\t"
+      ".set     pop                                                        \n\t"
+      : [product_ptr] "=&r" (product_ptr), [product_start] "+r" (product_start),
+        [k] "=&r" (k), [j] "+r" (j), [matrix1_start] "=&r"(matrix1_start),
+        [matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
+        [matrix0_start] "+r" (matrix0_start), [n] "=&r" (n), [r0] "=&r" (r0),
+        [sum32] "=&r" (sum32), [r1] "=&r" (r1),[r2] "=&r" (r2)
+      : [product_step] "r" (product_step), [matrix1] "r" (matrix1),
+        [inner_loop_count] "r" (inner_loop_count),
+        [matrix1_step] "r" (matrix1_step), [shift] "r" (shift),
+        [matrix0_step] "r" (matrix0_step), [matrix1_step2] "r" (matrix1_step2),
+        [matrix0_step2] "r" (matrix0_step2)
+      : "hi", "lo", "memory"
+    );
+  } else {
+    int j = SUBFRAMES, k = 0, n = 0;
+    int32_t r0, r1, r2, sum32;
+    int32_t* product_start = matrix_product;
+    int32_t* product_ptr;
+    const uint32_t product_step = 4 * mid_loop_count;
+    const uint32_t matrix0_step = 2 * matrix0_index_step;
+    const uint32_t matrix1_step = 4 * matrix1_index_step;
+    const uint32_t matrix0_step2 = 2 * matrix0_index_factor1;
+    const uint32_t matrix1_step2 = 4 * matrix1_index_factor1;
+    const int16_t* matrix0_start = matrix0;
+    const int32_t* matrix1_start = matrix1;
+    int16_t* matrix0_ptr;
+    int32_t* matrix1_ptr;
+
+    __asm __volatile (
+      ".set     push                                                       \n\t"
+      ".set     noreorder                                                  \n\t"
+     "1:                                                                   \n\t"
+      "addu     %[product_ptr],     %[product_start],     $0               \n\t"
+      "addu     %[k],               %[product_step],      $0               \n\t"
+      "addiu    %[j],               %[j],                 -1               \n\t"
+      "addu     %[matrix0_start],   %[matrix0],           $0               \n\t"
+     "2:                                                                   \n\t"
+      "addu     %[matrix1_ptr],     %[matrix1_start],     $0               \n\t"
+      "addu     %[matrix0_ptr],     %[matrix0_start],     $0               \n\t"
+      "addu     %[n],               %[inner_loop_count],  $0               \n\t"
+      "mul      %[sum32],           $0,                   $0               \n\t"
+     "3:                                                                   \n\t"
+      "lw       %[r0],              0(%[matrix1_ptr])                      \n\t"
+      "lh       %[r1],              0(%[matrix0_ptr])                      \n\t"
+      "addu     %[matrix1_ptr],     %[matrix1_ptr],       %[matrix1_step]  \n\t"
+      "sllv     %[r0],              %[r0],                %[shift]         \n\t"
+      "andi     %[r2],              %[r0],                0xffff           \n\t"
+      "sra      %[r2],              %[r2],                1                \n\t"
+      "mul      %[r2],              %[r2],                %[r1]            \n\t"
+      "sra      %[r0],              %[r0],                16               \n\t"
+      "mul      %[r0],              %[r0],                %[r1]            \n\t"
+      "addu     %[matrix0_ptr],     %[matrix0_ptr],       %[matrix0_step]  \n\t"
+      "addiu    %[n],               %[n],                 -1               \n\t"
+#if defined(MIPS_DSP_R1_LE)
+      "shra_r.w %[r2],              %[r2],                15               \n\t"
+#else
+      "addiu    %[r2],              %[r2],                0x4000           \n\t"
+      "sra      %[r2],              %[r2],                15               \n\t"
+#endif
+      "addu     %[sum32],           %[sum32],             %[r2]            \n\t"
+      "bgtz     %[n],               3b                                     \n\t"
+      " addu    %[sum32],           %[sum32],             %[r0]            \n\t"
+      "addiu    %[k],               %[k],                 -4               \n\t"
+      "addu     %[matrix0_start],   %[matrix0_start],     %[matrix0_step2] \n\t"
+      "sw       %[sum32],           0(%[product_ptr])                      \n\t"
+      "bgtz     %[k],               2b                                     \n\t"
+      " addiu   %[product_ptr],     %[product_ptr],       4                \n\t"
+      "addu     %[matrix1_start],   %[matrix1_start],     %[matrix1_step2] \n\t"
+      "bgtz     %[j],               1b                                     \n\t"
+      " addu    %[product_start],   %[product_start],     %[product_step]  \n\t"
+      ".set     pop                                                        \n\t"
+      : [product_ptr] "=&r" (product_ptr), [product_start] "+r" (product_start),
+        [k] "=&r" (k), [j] "+r" (j), [matrix1_start] "+r"(matrix1_start),
+        [matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
+        [matrix0_start] "=&r" (matrix0_start), [n] "=&r" (n), [r0] "=&r" (r0),
+        [sum32] "=&r" (sum32), [r1] "=&r" (r1),[r2] "=&r" (r2)
+      : [product_step] "r" (product_step), [matrix0] "r" (matrix0),
+        [inner_loop_count] "r" (inner_loop_count),
+        [matrix1_step] "r" (matrix1_step), [shift] "r" (shift),
+        [matrix0_step] "r" (matrix0_step), [matrix1_step2] "r" (matrix1_step2),
+        [matrix0_step2] "r" (matrix0_step2)
+      : "hi", "lo", "memory"
+    );
+  }
+}
+
+// MIPS optimization of the function WebRtcIsacfix_MatrixProduct2.
+// Bit-exact with the function WebRtcIsacfix_MatrixProduct2C from
+// entropy_coding.c file.
+void WebRtcIsacfix_MatrixProduct2MIPS(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix0_index_factor,
+                                      const int matrix0_index_step) {
+  int j = 0, n = 0;
+  int loop_count = SUBFRAMES;
+  const int16_t* matrix0_ptr;
+  const int32_t* matrix1_ptr;
+  const int16_t* matrix0_start = matrix0;
+  const int matrix0_step = 2 * matrix0_index_step;
+  const int matrix0_step2 = 2 * matrix0_index_factor;
+  int32_t r0, r1, r2, r3, r4, sum32, sum32_2;
+
+  __asm __volatile (
+    ".set       push                                                   \n\t"
+    ".set       noreorder                                              \n\t"
+    "addu       %[j],              %[loop_count],     $0               \n\t"
+    "addu       %[matrix0_start],  %[matrix0],        $0               \n\t"
+   "1:                                                                 \n\t"
+    "addu       %[matrix1_ptr],    %[matrix1],        $0               \n\t"
+    "addu       %[matrix0_ptr],    %[matrix0_start],  $0               \n\t"
+    "addu       %[n],              %[loop_count],     $0               \n\t"
+    "mul        %[sum32],          $0,                $0               \n\t"
+    "mul        %[sum32_2],        $0,                $0               \n\t"
+   "2:                                                                 \n\t"
+    "lw         %[r0],             0(%[matrix1_ptr])                   \n\t"
+    "lw         %[r1],             4(%[matrix1_ptr])                   \n\t"
+    "lh         %[r2],             0(%[matrix0_ptr])                   \n\t"
+    "andi       %[r3],             %[r0],             0xffff           \n\t"
+    "sra        %[r3],             %[r3],             1                \n\t"
+    "mul        %[r3],             %[r3],             %[r2]            \n\t"
+    "andi       %[r4],             %[r1],             0xffff           \n\t"
+    "sra        %[r4],             %[r4],             1                \n\t"
+    "mul        %[r4],             %[r4],             %[r2]            \n\t"
+    "sra        %[r0],             %[r0],             16               \n\t"
+    "mul        %[r0],             %[r0],             %[r2]            \n\t"
+    "sra        %[r1],             %[r1],             16               \n\t"
+    "mul        %[r1],             %[r1],             %[r2]            \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w   %[r3],             %[r3],             15               \n\t"
+    "shra_r.w   %[r4],             %[r4],             15               \n\t"
+#else
+    "addiu      %[r3],             %[r3],             0x4000           \n\t"
+    "sra        %[r3],             %[r3],             15               \n\t"
+    "addiu      %[r4],             %[r4],             0x4000           \n\t"
+    "sra        %[r4],             %[r4],             15               \n\t"
+#endif
+    "addiu      %[matrix1_ptr],    %[matrix1_ptr],    8                \n\t"
+    "addu       %[matrix0_ptr],    %[matrix0_ptr],    %[matrix0_step]  \n\t"
+    "addiu      %[n],              %[n],              -1               \n\t"
+    "addu       %[sum32],          %[sum32],          %[r3]            \n\t"
+    "addu       %[sum32_2],        %[sum32_2],        %[r4]            \n\t"
+    "addu       %[sum32],          %[sum32],          %[r0]            \n\t"
+    "bgtz       %[n],              2b                                  \n\t"
+    " addu      %[sum32_2],        %[sum32_2],        %[r1]            \n\t"
+    "sra        %[sum32],          %[sum32],          3                \n\t"
+    "sra        %[sum32_2],        %[sum32_2],        3                \n\t"
+    "addiu      %[j],              %[j],              -1               \n\t"
+    "addu       %[matrix0_start],  %[matrix0_start],  %[matrix0_step2] \n\t"
+    "sw         %[sum32],          0(%[matrix_product])                \n\t"
+    "sw         %[sum32_2],        4(%[matrix_product])                \n\t"
+    "bgtz       %[j],              1b                                  \n\t"
+    " addiu     %[matrix_product], %[matrix_product], 8                \n\t"
+    ".set       pop                                                    \n\t"
+    : [j] "=&r" (j), [matrix0_start] "=&r" (matrix0_start),
+      [matrix1_ptr] "=&r" (matrix1_ptr), [matrix0_ptr] "=&r" (matrix0_ptr),
+      [n] "=&r" (n), [sum32] "=&r" (sum32), [sum32_2] "=&r" (sum32_2),
+      [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+      [r4] "=&r" (r4), [matrix_product] "+r" (matrix_product)
+    : [loop_count] "r" (loop_count), [matrix0] "r" (matrix0),
+      [matrix1] "r" (matrix1), [matrix0_step] "r" (matrix0_step),
+      [matrix0_step2] "r" (matrix0_step2)
+    : "hi", "lo", "memory"
+  );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c b/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c
new file mode 100644
index 0000000..0200567
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/entropy_coding_neon.c
@@ -0,0 +1,217 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* This file contains WebRtcIsacfix_MatrixProduct1Neon() and
+ * WebRtcIsacfix_MatrixProduct2Neon() for ARM Neon platform. API's are in
+ * entropy_coding.c. Results are bit exact with the c code for
+ * generic platforms.
+ */
+
+#include <arm_neon.h>
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+
+void WebRtcIsacfix_MatrixProduct1Neon(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix1_index_factor1,
+                                      const int matrix0_index_factor1,
+                                      const int matrix1_index_init_case,
+                                      const int matrix1_index_step,
+                                      const int matrix0_index_step,
+                                      const int inner_loop_count,
+                                      const int mid_loop_count,
+                                      const int shift) {
+  int j = 0, k = 0, n = 0;
+  int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
+  int* matrix1_index_factor2 = &j;
+  int* matrix0_index_factor2 = &k;
+  if (matrix1_index_init_case != 0) {
+    matrix1_index_factor2 = &k;
+    matrix0_index_factor2 = &j;
+  }
+  int32x4_t shift32x4 = vdupq_n_s32(shift);
+  int32x2_t shift32x2 = vdup_n_s32(shift);
+  int32x4_t sum_32x4 =  vdupq_n_s32(0);
+  int32x2_t sum_32x2 =  vdup_n_s32(0);
+
+  RTC_DCHECK_EQ(0, inner_loop_count % 2);
+  RTC_DCHECK_EQ(0, mid_loop_count % 2);
+
+  if (matrix1_index_init_case != 0 && matrix1_index_factor1 == 1) {
+    for (j = 0; j < SUBFRAMES; j++) {
+      matrix_prod_index = mid_loop_count * j;
+      for (k = 0; k < (mid_loop_count >> 2) << 2; k += 4) {
+        sum_32x4 = veorq_s32(sum_32x4, sum_32x4);  // Initialize to zeros.
+        matrix1_index = k;
+        matrix0_index = matrix0_index_factor1 * j;
+        for (n = 0; n < inner_loop_count; n++) {
+          int32x4_t matrix0_32x4 =
+              vdupq_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
+          int32x4_t matrix1_32x4 =
+              vshlq_s32(vld1q_s32(&matrix1[matrix1_index]), shift32x4);
+          int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
+          sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
+          matrix1_index += matrix1_index_step;
+          matrix0_index += matrix0_index_step;
+        }
+        vst1q_s32(&matrix_product[matrix_prod_index], sum_32x4);
+        matrix_prod_index += 4;
+      }
+      if (mid_loop_count % 4 > 1) {
+        sum_32x2 = veor_s32(sum_32x2, sum_32x2);  // Initialize to zeros.
+        matrix1_index = k;
+        k += 2;
+        matrix0_index = matrix0_index_factor1 * j;
+        for (n = 0; n < inner_loop_count; n++) {
+          int32x2_t matrix0_32x2 =
+              vdup_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
+          int32x2_t matrix1_32x2 =
+              vshl_s32(vld1_s32(&matrix1[matrix1_index]), shift32x2);
+          int32x2_t multi_32x2 = vqdmulh_s32(matrix0_32x2, matrix1_32x2);
+          sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+          matrix1_index += matrix1_index_step;
+          matrix0_index += matrix0_index_step;
+        }
+        vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
+        matrix_prod_index += 2;
+      }
+    }
+  }
+  else if (matrix1_index_init_case == 0 && matrix0_index_factor1 == 1) {
+    int32x2_t multi_32x2 = vdup_n_s32(0);
+    int32x2_t matrix0_32x2 = vdup_n_s32(0);
+    for (j = 0; j < SUBFRAMES; j++) {
+      matrix_prod_index = mid_loop_count * j;
+      for (k = 0; k < (mid_loop_count >> 2) << 2; k += 4) {
+        sum_32x4 = veorq_s32(sum_32x4, sum_32x4);  // Initialize to zeros.
+        matrix1_index = matrix1_index_factor1 * j;
+        matrix0_index = k;
+        for (n = 0; n < inner_loop_count; n++) {
+          int32x4_t matrix1_32x4 = vdupq_n_s32(matrix1[matrix1_index] << shift);
+          int32x4_t matrix0_32x4 =
+              vshll_n_s16(vld1_s16(&matrix0[matrix0_index]), 15);
+          int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
+          sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
+          matrix1_index += matrix1_index_step;
+          matrix0_index += matrix0_index_step;
+        }
+        vst1q_s32(&matrix_product[matrix_prod_index], sum_32x4);
+        matrix_prod_index += 4;
+      }
+      if (mid_loop_count % 4 > 1) {
+        sum_32x2 = veor_s32(sum_32x2, sum_32x2);  // Initialize to zeros.
+        matrix1_index = matrix1_index_factor1 * j;
+        matrix0_index = k;
+        for (n = 0; n < inner_loop_count; n++) {
+          int32x2_t matrix1_32x2 = vdup_n_s32(matrix1[matrix1_index] << shift);
+          matrix0_32x2 =
+              vset_lane_s32((int32_t)matrix0[matrix0_index], matrix0_32x2, 0);
+          matrix0_32x2 = vset_lane_s32((int32_t)matrix0[matrix0_index + 1],
+                                     matrix0_32x2, 1);
+          matrix0_32x2 = vshl_n_s32(matrix0_32x2, 15);
+          multi_32x2 = vqdmulh_s32(matrix1_32x2, matrix0_32x2);
+          sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+          matrix1_index += matrix1_index_step;
+          matrix0_index += matrix0_index_step;
+        }
+        vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
+        matrix_prod_index += 2;
+      }
+    }
+  }
+  else if (matrix1_index_init_case == 0 &&
+           matrix1_index_step == 1 &&
+           matrix0_index_step == 1) {
+    int32x2_t multi_32x2 = vdup_n_s32(0);
+    int32x2_t matrix0_32x2 = vdup_n_s32(0);
+    for (j = 0; j < SUBFRAMES; j++) {
+      matrix_prod_index = mid_loop_count * j;
+      for (k = 0; k < mid_loop_count; k++) {
+        sum_32x4 = veorq_s32(sum_32x4, sum_32x4);  // Initialize to zeros.
+        matrix1_index = matrix1_index_factor1 * j;
+        matrix0_index = matrix0_index_factor1 * k;
+        for (n = 0; n < (inner_loop_count >> 2) << 2; n += 4) {
+          int32x4_t matrix1_32x4 =
+              vshlq_s32(vld1q_s32(&matrix1[matrix1_index]), shift32x4);
+          int32x4_t matrix0_32x4 =
+              vshll_n_s16(vld1_s16(&matrix0[matrix0_index]), 15);
+          int32x4_t multi_32x4 = vqdmulhq_s32(matrix0_32x4, matrix1_32x4);
+          sum_32x4 = vqaddq_s32(sum_32x4, multi_32x4);
+          matrix1_index += 4;
+          matrix0_index += 4;
+        }
+        sum_32x2 = vqadd_s32(vget_low_s32(sum_32x4), vget_high_s32(sum_32x4));
+        if (inner_loop_count % 4 > 1) {
+          int32x2_t matrix1_32x2 =
+              vshl_s32(vld1_s32(&matrix1[matrix1_index]), shift32x2);
+          matrix0_32x2 =
+              vset_lane_s32((int32_t)matrix0[matrix0_index], matrix0_32x2, 0);
+          matrix0_32x2 = vset_lane_s32((int32_t)matrix0[matrix0_index + 1],
+                                     matrix0_32x2, 1);
+          matrix0_32x2 = vshl_n_s32(matrix0_32x2, 15);
+          multi_32x2 = vqdmulh_s32(matrix1_32x2, matrix0_32x2);
+          sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+        }
+        sum_32x2 = vpadd_s32(sum_32x2, sum_32x2);
+        vst1_lane_s32(&matrix_product[matrix_prod_index], sum_32x2, 0);
+        matrix_prod_index++;
+      }
+    }
+  }
+  else {
+    for (j = 0; j < SUBFRAMES; j++) {
+      matrix_prod_index = mid_loop_count * j;
+      for (k=0; k < mid_loop_count; k++) {
+        int32_t sum32 = 0;
+        matrix1_index = matrix1_index_factor1 * (*matrix1_index_factor2);
+        matrix0_index = matrix0_index_factor1 * (*matrix0_index_factor2);
+        for (n = 0; n < inner_loop_count; n++) {
+          sum32 += (WEBRTC_SPL_MUL_16_32_RSFT16(matrix0[matrix0_index],
+              matrix1[matrix1_index] << shift));
+          matrix1_index += matrix1_index_step;
+          matrix0_index += matrix0_index_step;
+        }
+        matrix_product[matrix_prod_index] = sum32;
+        matrix_prod_index++;
+      }
+    }
+  }
+}
+
+void WebRtcIsacfix_MatrixProduct2Neon(const int16_t matrix0[],
+                                      const int32_t matrix1[],
+                                      int32_t matrix_product[],
+                                      const int matrix0_index_factor,
+                                      const int matrix0_index_step) {
+  int j = 0, n = 0;
+  int matrix1_index = 0, matrix0_index = 0, matrix_prod_index = 0;
+  int32x2_t sum_32x2 = vdup_n_s32(0);
+  for (j = 0; j < SUBFRAMES; j++) {
+    sum_32x2 = veor_s32(sum_32x2, sum_32x2);  // Initialize to zeros.
+    matrix1_index = 0;
+    matrix0_index = matrix0_index_factor * j;
+    for (n = SUBFRAMES; n > 0; n--) {
+      int32x2_t matrix0_32x2 =
+          vdup_n_s32((int32_t)(matrix0[matrix0_index]) << 15);
+      int32x2_t matrix1_32x2 = vld1_s32(&matrix1[matrix1_index]);
+      int32x2_t multi_32x2 = vqdmulh_s32(matrix0_32x2, matrix1_32x2);
+      sum_32x2 = vqadd_s32(sum_32x2, multi_32x2);
+      matrix1_index += 2;
+      matrix0_index += matrix0_index_step;
+    }
+    sum_32x2 = vshr_n_s32(sum_32x2, 3);
+    vst1_s32(&matrix_product[matrix_prod_index], sum_32x2);
+    matrix_prod_index += 2;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/fft.c b/modules/audio_coding/codecs/isac/fix/source/fft.c
new file mode 100644
index 0000000..a0ed3f8
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/fft.c
@@ -0,0 +1,415 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * fft.c
+ *
+ * Fast Fourier Transform
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+
+static const int16_t kSortTabFft[240] = {
+  0, 60, 120, 180, 20, 80, 140, 200, 40, 100, 160, 220,
+  4, 64, 124, 184, 24, 84, 144, 204, 44, 104, 164, 224,
+  8, 68, 128, 188, 28, 88, 148, 208, 48, 108, 168, 228,
+  12, 72, 132, 192, 32, 92, 152, 212, 52, 112, 172, 232,
+  16, 76, 136, 196, 36, 96, 156, 216, 56, 116, 176, 236,
+  1, 61, 121, 181, 21, 81, 141, 201, 41, 101, 161, 221,
+  5, 65, 125, 185, 25, 85, 145, 205, 45, 105, 165, 225,
+  9, 69, 129, 189, 29, 89, 149, 209, 49, 109, 169, 229,
+  13, 73, 133, 193, 33, 93, 153, 213, 53, 113, 173, 233,
+  17, 77, 137, 197, 37, 97, 157, 217, 57, 117, 177, 237,
+  2, 62, 122, 182, 22, 82, 142, 202, 42, 102, 162, 222,
+  6, 66, 126, 186, 26, 86, 146, 206, 46, 106, 166, 226,
+  10, 70, 130, 190, 30, 90, 150, 210, 50, 110, 170, 230,
+  14, 74, 134, 194, 34, 94, 154, 214, 54, 114, 174, 234,
+  18, 78, 138, 198, 38, 98, 158, 218, 58, 118, 178, 238,
+  3, 63, 123, 183, 23, 83, 143, 203, 43, 103, 163, 223,
+  7, 67, 127, 187, 27, 87, 147, 207, 47, 107, 167, 227,
+  11, 71, 131, 191, 31, 91, 151, 211, 51, 111, 171, 231,
+  15, 75, 135, 195, 35, 95, 155, 215, 55, 115, 175, 235,
+  19, 79, 139, 199, 39, 99, 159, 219, 59, 119, 179, 239
+};
+
+/* Cosine table in Q14 */
+static const int16_t kCosTabFfftQ14[240] = {
+  16384,  16378, 16362,   16333,  16294,  16244,  16182,  16110,  16026,  15931,  15826,  15709,
+  15582,  15444, 15296,   15137,  14968,  14788,  14598,  14399,  14189,  13970,  13741,  13502,
+  13255,  12998, 12733,   12458,  12176,  11885,  11585,  11278,  10963,  10641,  10311,   9974,
+  9630,   9280,  8923,    8561,   8192,   7818,   7438,   7053,   6664,   6270,   5872,   5469,
+  5063,   4653,  4240,    3825,   3406,   2986,   2563,   2139,   1713,   1285,    857,    429,
+  0,   -429,  -857,   -1285,  -1713,  -2139,  -2563,  -2986,  -3406,  -3825,  -4240,  -4653,
+  -5063,  -5469, -5872,   -6270,  -6664,  -7053,  -7438,  -7818,  -8192,  -8561,  -8923,  -9280,
+  -9630,  -9974, -10311, -10641, -10963, -11278, -11585, -11885, -12176, -12458, -12733, -12998,
+  -13255, -13502, -13741, -13970, -14189, -14399, -14598, -14788, -14968, -15137, -15296, -15444,
+  -15582, -15709, -15826, -15931, -16026, -16110, -16182, -16244, -16294, -16333, -16362, -16378,
+  -16384, -16378, -16362, -16333, -16294, -16244, -16182, -16110, -16026, -15931, -15826, -15709,
+  -15582, -15444, -15296, -15137, -14968, -14788, -14598, -14399, -14189, -13970, -13741, -13502,
+  -13255, -12998, -12733, -12458, -12176, -11885, -11585, -11278, -10963, -10641, -10311,  -9974,
+  -9630,  -9280,  -8923,  -8561,  -8192,  -7818,  -7438,  -7053,  -6664,  -6270,  -5872,  -5469,
+  -5063,  -4653,  -4240,  -3825,  -3406,  -2986,  -2563,  -2139,  -1713,  -1285,   -857,   -429,
+  0,    429,    857,   1285,   1713,   2139,   2563,   2986,   3406,   3825,   4240,   4653,
+  5063,   5469,   5872,   6270,   6664,   7053,   7438,   7818,   8192,   8561,   8923,   9280,
+  9630,   9974,  10311,  10641,  10963,  11278,  11585,  11885,  12176,  12458,  12733,  12998,
+  13255,  13502,  13741,  13970,  14189,  14399,  14598,  14788,  14968,  15137,  15296,  15444,
+  15582,  15709,  15826,  15931,  16026,  16110,  16182,  16244,  16294,  16333,  16362,  16378
+};
+
+
+
+/* Uses 16x16 mul, without rounding, which is faster. Uses WEBRTC_SPL_MUL_16_16_RSFT */
+int16_t WebRtcIsacfix_FftRadix16Fastest(int16_t RexQx[], int16_t ImxQx[], int16_t iSign) {
+
+  int16_t dd, ee, ff, gg, hh, ii;
+  int16_t k0, k1, k2, k3, k4, kk;
+  int16_t tmp116, tmp216;
+
+  int16_t ccc1Q14, ccc2Q14, ccc3Q14, sss1Q14, sss2Q14, sss3Q14;
+  int16_t sss60Q14, ccc72Q14, sss72Q14;
+  int16_t aaQx, ajQx, akQx, ajmQx, ajpQx, akmQx, akpQx;
+  int16_t bbQx, bjQx, bkQx, bjmQx, bjpQx, bkmQx, bkpQx;
+
+  int16_t ReDATAQx[240],  ImDATAQx[240];
+
+  sss60Q14 = kCosTabFfftQ14[20];
+  ccc72Q14 = kCosTabFfftQ14[48];
+  sss72Q14 = kCosTabFfftQ14[12];
+
+  if (iSign < 0) {
+    sss72Q14 = -sss72Q14;
+    sss60Q14 = -sss60Q14;
+  }
+  /* Complexity is: 10 cycles */
+
+  /* compute fourier transform */
+
+  // transform for factor of 4
+  for (kk=0; kk<60; kk++) {
+    k0 = kk;
+    k1 = k0 + 60;
+    k2 = k1 + 60;
+    k3 = k2 + 60;
+
+    akpQx = RexQx[k0] + RexQx[k2];
+    akmQx = RexQx[k0] - RexQx[k2];
+    ajpQx = RexQx[k1] + RexQx[k3];
+    ajmQx = RexQx[k1] - RexQx[k3];
+    bkpQx = ImxQx[k0] + ImxQx[k2];
+    bkmQx = ImxQx[k0] - ImxQx[k2];
+    bjpQx = ImxQx[k1] + ImxQx[k3];
+    bjmQx = ImxQx[k1] - ImxQx[k3];
+
+    RexQx[k0] = akpQx + ajpQx;
+    ImxQx[k0] = bkpQx + bjpQx;
+    ajpQx = akpQx - ajpQx;
+    bjpQx = bkpQx - bjpQx;
+    if (iSign < 0) {
+      akpQx = akmQx + bjmQx;
+      bkpQx = bkmQx - ajmQx;
+      akmQx -= bjmQx;
+      bkmQx += ajmQx;
+    } else {
+      akpQx = akmQx - bjmQx;
+      bkpQx = bkmQx + ajmQx;
+      akmQx += bjmQx;
+      bkmQx -= ajmQx;
+    }
+
+    ccc1Q14 = kCosTabFfftQ14[kk];
+    ccc2Q14 = kCosTabFfftQ14[2 * kk];
+    ccc3Q14 = kCosTabFfftQ14[3 * kk];
+    sss1Q14 = kCosTabFfftQ14[kk + 60];
+    sss2Q14 = kCosTabFfftQ14[2 * kk + 60];
+    sss3Q14 = kCosTabFfftQ14[3 * kk + 60];
+    if (iSign==1) {
+      sss1Q14 = -sss1Q14;
+      sss2Q14 = -sss2Q14;
+      sss3Q14 = -sss3Q14;
+    }
+
+    //Do several multiplications like Q14*Q16>>14 = Q16
+    // RexQ16[k1] = akpQ16 * ccc1Q14 - bkpQ16 * sss1Q14;
+    // RexQ16[k2] = ajpQ16 * ccc2Q14 - bjpQ16 * sss2Q14;
+    // RexQ16[k3] = akmQ16 * ccc3Q14 - bkmQ16 * sss3Q14;
+    // ImxQ16[k1] = akpQ16 * sss1Q14 + bkpQ16 * ccc1Q14;
+    // ImxQ16[k2] = ajpQ16 * sss2Q14 + bjpQ16 * ccc2Q14;
+    // ImxQ16[k3] = akmQ16 * sss3Q14 + bkmQ16 * ccc3Q14;
+
+    RexQx[k1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc1Q14, akpQx, 14) -
+        (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss1Q14, bkpQx, 14); // 6 non-mul + 2 mul cycles, i.e. 8 cycles (6+2*7=20 cycles if 16x32mul)
+    RexQx[k2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, ajpQx, 14) -
+        (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bjpQx, 14);
+    RexQx[k3] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc3Q14, akmQx, 14) -
+        (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss3Q14, bkmQx, 14);
+    ImxQx[k1] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss1Q14, akpQx, 14) +
+        (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc1Q14, bkpQx, 14);
+    ImxQx[k2] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, ajpQx, 14) +
+        (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bjpQx, 14);
+    ImxQx[k3] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss3Q14, akmQx, 14) +
+        (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc3Q14, bkmQx, 14);
+    //This mul segment needs 6*8 = 48 cycles for 16x16 muls, but 6*20 = 120 cycles for 16x32 muls
+
+
+  }
+  /* Complexity is: 51+48 = 99 cycles for 16x16 muls, but 51+120 = 171 cycles for 16x32 muls*/
+
+  // transform for factor of 3
+  kk=0;
+  k1=20;
+  k2=40;
+
+  for (hh=0; hh<4; hh++) {
+    for (ii=0; ii<20; ii++) {
+      akQx = RexQx[kk];
+      bkQx = ImxQx[kk];
+      ajQx = RexQx[k1] + RexQx[k2];
+      bjQx = ImxQx[k1] + ImxQx[k2];
+      RexQx[kk] = akQx + ajQx;
+      ImxQx[kk] = bkQx + bjQx;
+      tmp116 = ajQx >> 1;
+      tmp216 = bjQx >> 1;
+      akQx = akQx - tmp116;
+      bkQx = bkQx - tmp216;
+      tmp116 = RexQx[k1] - RexQx[k2];
+      tmp216 = ImxQx[k1] - ImxQx[k2];
+
+      ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss60Q14, tmp116, 14); // Q14*Qx>>14 = Qx
+      bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss60Q14, tmp216, 14); // Q14*Qx>>14 = Qx
+      RexQx[k1] = akQx - bjQx;
+      RexQx[k2] = akQx + bjQx;
+      ImxQx[k1] = bkQx + ajQx;
+      ImxQx[k2] = bkQx - ajQx;
+
+      kk++;
+      k1++;
+      k2++;
+    }
+    /* Complexity : (31+6)*20 = 740 cycles for 16x16 muls, but (31+18)*20 = 980 cycles for 16x32 muls*/
+    kk=kk+40;
+    k1=k1+40;
+    k2=k2+40;
+  }
+  /* Complexity : 4*(740+3) = 2972 cycles for 16x16 muls, but 4*(980+3) = 3932 cycles for 16x32 muls*/
+
+  /* multiply by rotation factor for odd factor 3 or 5 (not for 4)
+     Same code (duplicated) for both ii=2 and ii=3 */
+  kk = 1;
+  ee = 0;
+  ff = 0;
+
+  for (gg=0; gg<19; gg++) {
+    kk += 20;
+    ff = ff+4;
+    for (hh=0; hh<2; hh++) {
+      ee = ff + hh * ff;
+      dd = ee + 60;
+      ccc2Q14 = kCosTabFfftQ14[ee];
+      sss2Q14 = kCosTabFfftQ14[dd];
+      if (iSign==1) {
+        sss2Q14 = -sss2Q14;
+      }
+      for (ii=0; ii<4; ii++) {
+        akQx = RexQx[kk];
+        bkQx = ImxQx[kk];
+        RexQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akQx, 14) - // Q14*Qx>>14 = Qx
+            (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkQx, 14);
+        ImxQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akQx, 14) + // Q14*Qx>>14 = Qx
+            (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkQx, 14);
+
+
+        kk += 60;
+      }
+      kk = kk - 220;
+    }
+    // Complexity: 2*(13+5+4*13+2) = 144 for 16x16 muls, but 2*(13+5+4*33+2) = 304 cycles for 16x32 muls
+    kk = kk - 59;
+  }
+  // Complexity: 19*144 = 2736 for 16x16 muls, but 19*304 = 5776 cycles for 16x32 muls
+
+  // transform for factor of 5
+  kk = 0;
+  ccc2Q14 = kCosTabFfftQ14[96];
+  sss2Q14 = kCosTabFfftQ14[84];
+  if (iSign==1) {
+    sss2Q14 = -sss2Q14;
+  }
+
+  for (hh=0; hh<4; hh++) {
+    for (ii=0; ii<12; ii++) {
+      k1 = kk + 4;
+      k2 = k1 + 4;
+      k3 = k2 + 4;
+      k4 = k3 + 4;
+
+      akpQx = RexQx[k1] + RexQx[k4];
+      akmQx = RexQx[k1] - RexQx[k4];
+      bkpQx = ImxQx[k1] + ImxQx[k4];
+      bkmQx = ImxQx[k1] - ImxQx[k4];
+      ajpQx = RexQx[k2] + RexQx[k3];
+      ajmQx = RexQx[k2] - RexQx[k3];
+      bjpQx = ImxQx[k2] + ImxQx[k3];
+      bjmQx = ImxQx[k2] - ImxQx[k3];
+      aaQx = RexQx[kk];
+      bbQx = ImxQx[kk];
+      RexQx[kk] = aaQx + akpQx + ajpQx;
+      ImxQx[kk] = bbQx + bkpQx + bjpQx;
+
+      akQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, akpQx, 14) +
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, ajpQx, 14)  + aaQx;
+      bkQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, bkpQx, 14) +
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bjpQx, 14)  + bbQx;
+      ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, akmQx, 14) +
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, ajmQx, 14);
+      bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, bkmQx, 14) +
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bjmQx, 14);
+      // 32+4*8=64 or 32+4*20=112
+
+      RexQx[k1] = akQx - bjQx;
+      RexQx[k4] = akQx + bjQx;
+      ImxQx[k1] = bkQx + ajQx;
+      ImxQx[k4] = bkQx - ajQx;
+
+      akQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akpQx, 14)  +
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, ajpQx, 14) + aaQx;
+      bkQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkpQx, 14)  +
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc72Q14, bjpQx, 14) + bbQx;
+      ajQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akmQx, 14) -
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, ajmQx, 14);
+      bjQx = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkmQx, 14) -
+          (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss72Q14, bjmQx, 14);
+      // 8+4*8=40 or 8+4*20=88
+
+      RexQx[k2] = akQx - bjQx;
+      RexQx[k3] = akQx + bjQx;
+      ImxQx[k2] = bkQx + ajQx;
+      ImxQx[k3] = bkQx - ajQx;
+
+      kk = k4 + 4;
+    }
+    // Complexity: 12*(64+40+10) = 1368 for 16x16 muls, but 12*(112+88+10) = 2520 cycles for 16x32 muls
+    kk -= 239;
+  }
+  // Complexity: 4*1368 = 5472 for 16x16 muls, but 4*2520 = 10080 cycles for 16x32 muls
+
+  /* multiply by rotation factor for odd factor 3 or 5 (not for 4)
+     Same code (duplicated) for both ii=2 and ii=3 */
+  kk = 1;
+  ee=0;
+
+  for (gg=0; gg<3; gg++) {
+    kk += 4;
+    dd = 12 + 12 * gg;
+    ff = 0;
+    for (hh=0; hh<4; hh++) {
+      ff = ff+dd;
+      ee = ff+60;
+      for (ii=0; ii<12; ii++) {
+        akQx = RexQx[kk];
+        bkQx = ImxQx[kk];
+
+        ccc2Q14 = kCosTabFfftQ14[ff];
+        sss2Q14 = kCosTabFfftQ14[ee];
+
+        if (iSign==1) {
+          sss2Q14 = -sss2Q14;
+        }
+
+        RexQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, akQx, 14) -
+            (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, bkQx, 14);
+        ImxQx[kk] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(sss2Q14, akQx, 14) +
+            (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(ccc2Q14, bkQx, 14);
+
+        kk += 20;
+      }
+      kk = kk - 236;
+      // Complexity: 12*(12+12) = 288 for 16x16 muls, but 12*(12+32) = 528 cycles for 16x32 muls
+    }
+    kk = kk - 19;
+    // Complexity: 4*288+6 for 16x16 muls, but 4*528+6 cycles for 16x32 muls
+  }
+  // Complexity: 3*4*288+6 = 3462 for 16x16 muls, but 3*4*528+6 = 6342 cycles for 16x32 muls
+
+
+  // last transform for factor of 4 */
+  for (kk=0; kk<240; kk=kk+4) {
+    k1 = kk + 1;
+    k2 = k1 + 1;
+    k3 = k2 + 1;
+
+    akpQx = RexQx[kk] + RexQx[k2];
+    akmQx = RexQx[kk] - RexQx[k2];
+    ajpQx = RexQx[k1] + RexQx[k3];
+    ajmQx = RexQx[k1] - RexQx[k3];
+    bkpQx = ImxQx[kk] + ImxQx[k2];
+    bkmQx = ImxQx[kk] - ImxQx[k2];
+    bjpQx = ImxQx[k1] + ImxQx[k3];
+    bjmQx = ImxQx[k1] - ImxQx[k3];
+    RexQx[kk] = akpQx + ajpQx;
+    ImxQx[kk] = bkpQx + bjpQx;
+    ajpQx = akpQx - ajpQx;
+    bjpQx = bkpQx - bjpQx;
+    if (iSign < 0) {
+      akpQx = akmQx + bjmQx;
+      bkpQx = bkmQx - ajmQx;
+      akmQx -= bjmQx;
+      bkmQx += ajmQx;
+    } else {
+      akpQx = akmQx - bjmQx;
+      bkpQx = bkmQx + ajmQx;
+      akmQx += bjmQx;
+      bkmQx -= ajmQx;
+    }
+    RexQx[k1] = akpQx;
+    RexQx[k2] = ajpQx;
+    RexQx[k3] = akmQx;
+    ImxQx[k1] = bkpQx;
+    ImxQx[k2] = bjpQx;
+    ImxQx[k3] = bkmQx;
+  }
+  // Complexity: 60*45 = 2700 for 16x16 muls, but 60*45 = 2700 cycles for 16x32 muls
+
+  /* permute the results to normal order */
+  for (ii=0; ii<240; ii++) {
+    ReDATAQx[ii]=RexQx[ii];
+    ImDATAQx[ii]=ImxQx[ii];
+  }
+  // Complexity: 240*2=480 cycles
+
+  for (ii=0; ii<240; ii++) {
+    RexQx[ii]=ReDATAQx[kSortTabFft[ii]];
+    ImxQx[ii]=ImDATAQx[kSortTabFft[ii]];
+  }
+  // Complexity: 240*2*2=960 cycles
+
+  // Total complexity:
+  //            16x16 16x32
+  // Complexity:   10    10
+  // Complexity:   99   171
+  // Complexity: 2972  3932
+  // Complexity: 2736  5776
+  // Complexity: 5472 10080
+  // Complexity: 3462  6342
+  // Complexity: 2700  2700
+  // Complexity:  480   480
+  // Complexity:  960   960
+  // =======================
+  //            18891 30451
+  //
+  // If this FFT is called 2 time each frame, i.e. 67 times per second, it will correspond to
+  // a C54 complexity of 67*18891/1000000 = 1.27 MIPS with 16x16-muls, and 67*30451/1000000 =
+  // = 2.04 MIPS with 16x32-muls. Note that this routine somtimes is called 6 times during the
+  // encoding of a frame, i.e. the max complexity would be 7/2*1.27 = 4.4 MIPS for the 16x16 mul case.
+
+
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/fft.h b/modules/audio_coding/codecs/isac/fix/source/fft.h
new file mode 100644
index 0000000..61ec515
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/fft.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*--------------------------------*-C-*---------------------------------*
+ * File:
+ * fft.h
+ * ---------------------------------------------------------------------*
+ * Re[]: real value array
+ * Im[]: imaginary value array
+ * nTotal: total number of complex values
+ * nPass: number of elements involved in this pass of transform
+ * nSpan: nspan/nPass = number of bytes to increment pointer
+ *  in Re[] and Im[]
+ * isign: exponent: +1 = forward  -1 = reverse
+ * scaling: normalizing constant by which the final result is *divided*
+ * scaling == -1, normalize by total dimension of the transform
+ * scaling <  -1, normalize by the square-root of the total dimension
+ *
+ * ----------------------------------------------------------------------
+ * See the comments in the code for correct usage!
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+int16_t WebRtcIsacfix_FftRadix16Fastest(int16_t RexQx[], int16_t ImxQx[], int16_t iSign);
+
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FFT_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
new file mode 100644
index 0000000..1c34969
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+/* Arguments:
+ *   io:  Input/output, in Q0.
+ *   len: Input, sample length.
+ *   coefficient: Input.
+ *   state: Input/output, filter state, in Q4.
+ */
+typedef void (*HighpassFilterFixDec32)(int16_t* io,
+                                       int16_t len,
+                                       const int16_t* coefficient,
+                                       int32_t* state);
+extern HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t* io,
+                                           int16_t len,
+                                           const int16_t* coefficient,
+                                           int32_t* state);
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+                                              int16_t len,
+                                              const int16_t* coefficient,
+                                              int32_t* state);
+#endif
+
+typedef void (*AllpassFilter2FixDec16)(
+    int16_t *data_ch1,           // Input and output in channel 1, in Q0
+    int16_t *data_ch2,           // Input and output in channel 2, in Q0
+    const int16_t *factor_ch1,   // Scaling factor for channel 1, in Q15
+    const int16_t *factor_ch2,   // Scaling factor for channel 2, in Q15
+    const int length,            // Length of the data buffers
+    int32_t *filter_state_ch1,   // Filter state for channel 1, in Q16
+    int32_t *filter_state_ch2);  // Filter state for channel 2, in Q16
+extern AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16;
+
+void WebRtcIsacfix_AllpassFilter2FixDec16C(
+   int16_t *data_ch1,
+   int16_t *data_ch2,
+   const int16_t *factor_ch1,
+   const int16_t *factor_ch2,
+   const int length,
+   int32_t *filter_state_ch1,
+   int32_t *filter_state_ch2);
+
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcIsacfix_AllpassFilter2FixDec16Neon(
+   int16_t *data_ch1,
+   int16_t *data_ch2,
+   const int16_t *factor_ch1,
+   const int16_t *factor_ch2,
+   const int length,
+   int32_t *filter_state_ch1,
+   int32_t *filter_state_ch2);
+#endif
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
+   int16_t *data_ch1,
+   int16_t *data_ch2,
+   const int16_t *factor_ch1,
+   const int16_t *factor_ch2,
+   const int length,
+   int32_t *filter_state_ch1,
+   int32_t *filter_state_ch2);
+#endif
+
+#if defined(__cplusplus) || defined(c_plusplus)
+}
+#endif
+
+#endif
+/* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_INTERNAL_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c b/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c
new file mode 100644
index 0000000..f2dec79
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.c
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbank_tables.c
+ *
+ * This file contains variables that are used in
+ * filterbanks.c
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
+
+/* HPstcoeff_in_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
+ * In float, they are: {-1.94895953203325f, 0.94984516000000f,
+ * -0.05101826139794f, 0.05015484000000f};
+ */
+const int16_t WebRtcIsacfix_kHpStCoeffInQ30[8] = {
+  16189, -31932,  /* Q30 lo/hi pair */
+  17243, 15562,  /* Q30 lo/hi pair */
+  -17186, -26748,  /* Q35 lo/hi pair */
+  -27476, 26296  /* Q35 lo/hi pair */
+};
+
+/* HPstcoeff_out_1_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
+ * In float, they are: {-1.99701049409000f, 0.99714204490000f,
+ * 0.01701049409000f, -0.01704204490000f};
+ */
+const int16_t WebRtcIsacfix_kHPStCoeffOut1Q30[8] = {
+  -1306, -32719,  /* Q30 lo/hi pair */
+  11486, 16337,  /* Q30 lo/hi pair */
+  26078, 8918,  /* Q35 lo/hi pair */
+  3956, -8935  /* Q35 lo/hi pair */
+};
+
+/* HPstcoeff_out_2_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2};
+ * In float, they are: {-1.98645294509837f, 0.98672435560000f,
+ * 0.00645294509837f, -0.00662435560000f};
+ */
+const int16_t WebRtcIsacfix_kHPStCoeffOut2Q30[8] = {
+  -2953, -32546,  /* Q30 lo/hi pair */
+  32233, 16166,  /* Q30 lo/hi pair */
+  13217, 3383,  /* Q35 lo/hi pair */
+  -4597, -3473  /* Q35 lo/hi pair */
+};
+
+/* The upper channel all-pass filter factors */
+const int16_t WebRtcIsacfix_kUpperApFactorsQ15[2] = {
+  1137, 12537
+};
+
+/* The lower channel all-pass filter factors */
+const int16_t WebRtcIsacfix_kLowerApFactorsQ15[2] = {
+  5059, 24379
+};
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h b/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h
new file mode 100644
index 0000000..55e1f44
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbank_tables.h
+ *
+ * Header file for variables that are defined in
+ * filterbank_tables.c.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+/********************* Coefficient Tables ************************/
+
+/* HPstcoeff_in_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
+extern const int16_t WebRtcIsacfix_kHpStCoeffInQ30[8];
+
+/* HPstcoeff_out_1_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
+extern const int16_t WebRtcIsacfix_kHPStCoeffOut1Q30[8];
+
+/* HPstcoeff_out_2_Q14 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+/* [Q30lo Q30hi Q30lo Q30hi Q35lo Q35hi Q35lo Q35hi] */
+extern const int16_t WebRtcIsacfix_kHPStCoeffOut2Q30[8];
+
+/* The upper channel all-pass filter factors */
+extern const int16_t WebRtcIsacfix_kUpperApFactorsQ15[2];
+
+/* The lower channel all-pass filter factors */
+extern const int16_t WebRtcIsacfix_kLowerApFactorsQ15[2];
+
+#if defined(__cplusplus) || defined(c_plusplus)
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_FILTERBANK_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
new file mode 100644
index 0000000..6aba8b6
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
@@ -0,0 +1,421 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbanks.c
+ *
+ * This file contains function 
+ * WebRtcIsacfix_SplitAndFilter, and WebRtcIsacfix_FilterAndCombine
+ * which implement filterbanks that produce decimated lowpass and
+ * highpass versions of a signal, and performs reconstruction.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/checks.h"
+
+// Declare a function pointer.
+AllpassFilter2FixDec16 WebRtcIsacfix_AllpassFilter2FixDec16;
+
+void WebRtcIsacfix_AllpassFilter2FixDec16C(
+    int16_t *data_ch1,  // Input and output in channel 1, in Q0
+    int16_t *data_ch2,  // Input and output in channel 2, in Q0
+    const int16_t *factor_ch1,  // Scaling factor for channel 1, in Q15
+    const int16_t *factor_ch2,  // Scaling factor for channel 2, in Q15
+    const int length,  // Length of the data buffers
+    int32_t *filter_state_ch1,  // Filter state for channel 1, in Q16
+    int32_t *filter_state_ch2) {  // Filter state for channel 2, in Q16
+  int n = 0;
+  int32_t state0_ch1 = filter_state_ch1[0], state1_ch1 = filter_state_ch1[1];
+  int32_t state0_ch2 = filter_state_ch2[0], state1_ch2 = filter_state_ch2[1];
+  int16_t in_out = 0;
+  int32_t a = 0, b = 0;
+
+  // Assembly file assumption.
+  RTC_DCHECK_EQ(0, length % 2);
+
+  for (n = 0; n < length; n++) {
+    // Process channel 1:
+    in_out = data_ch1[n];
+    a = factor_ch1[0] * in_out;  // Q15 * Q0 = Q15
+    a *= 1 << 1;  // Q15 -> Q16
+    b = WebRtcSpl_AddSatW32(a, state0_ch1);
+    a = -factor_ch1[0] * (int16_t)(b >> 16);  // Q15
+    state0_ch1 =
+        WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16));  // Q16
+    in_out = (int16_t) (b >> 16);  // Save as Q0
+
+    a = factor_ch1[1] * in_out;  // Q15 * Q0 = Q15
+    a *= 1 << 1; // Q15 -> Q16
+    b = WebRtcSpl_AddSatW32(a, state1_ch1);  // Q16
+    a = -factor_ch1[1] * (int16_t)(b >> 16);  // Q15
+    state1_ch1 =
+        WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16));  // Q16
+    data_ch1[n] = (int16_t) (b >> 16);  // Save as Q0
+
+    // Process channel 2:
+    in_out = data_ch2[n];
+    a = factor_ch2[0] * in_out;  // Q15 * Q0 = Q15
+    a *= 1 << 1;  // Q15 -> Q16
+    b = WebRtcSpl_AddSatW32(a, state0_ch2);  // Q16
+    a = -factor_ch2[0] * (int16_t)(b >> 16);  // Q15
+    state0_ch2 =
+        WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16));  // Q16
+    in_out = (int16_t) (b >> 16);  // Save as Q0
+
+    a = factor_ch2[1] * in_out;  // Q15 * Q0 = Q15
+    a *= (1 << 1);  // Q15 -> Q16
+    b = WebRtcSpl_AddSatW32(a, state1_ch2);  // Q16
+    a = -factor_ch2[1] * (int16_t)(b >> 16);  // Q15
+    state1_ch2 =
+        WebRtcSpl_AddSatW32(a * (1 << 1), (int32_t)in_out * (1 << 16));  // Q16
+    data_ch2[n] = (int16_t) (b >> 16);  // Save as Q0
+  }
+
+  filter_state_ch1[0] = state0_ch1;
+  filter_state_ch1[1] = state1_ch1;
+  filter_state_ch2[0] = state0_ch2;
+  filter_state_ch2[1] = state1_ch2;
+}
+
+// Declare a function pointer.
+HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t *io,
+                                           int16_t len,
+                                           const int16_t *coefficient,
+                                           int32_t *state)
+{
+  int k;
+  int32_t a1 = 0, b1 = 0, c = 0, in = 0;
+  int32_t a2 = 0, b2 = 0;
+  int32_t state0 = state[0];
+  int32_t state1 = state[1];
+
+  for (k=0; k<len; k++) {
+    in = (int32_t)io[k];
+
+#ifdef WEBRTC_ARCH_ARM_V7
+    {
+      register int tmp_coeff0;
+      register int tmp_coeff1;
+      __asm __volatile(
+        "ldr %[tmp_coeff0], [%[coeff]]\n\t"
+        "ldr %[tmp_coeff1], [%[coeff], #4]\n\t"
+        "smmulr %[a2], %[tmp_coeff0], %[state0]\n\t"
+        "smmulr %[b2], %[tmp_coeff1], %[state1]\n\t"
+        "ldr %[tmp_coeff0], [%[coeff], #8]\n\t"
+        "ldr %[tmp_coeff1], [%[coeff], #12]\n\t"
+        "smmulr %[a1], %[tmp_coeff0], %[state0]\n\t"
+        "smmulr %[b1], %[tmp_coeff1], %[state1]\n\t"
+        :[a2]"=&r"(a2),
+         [b2]"=&r"(b2),
+         [a1]"=&r"(a1),
+         [b1]"=r"(b1),
+         [tmp_coeff0]"=&r"(tmp_coeff0),
+         [tmp_coeff1]"=&r"(tmp_coeff1)
+        :[coeff]"r"(coefficient),
+         [state0]"r"(state0),
+         [state1]"r"(state1)
+      );
+    }
+#else
+    /* Q35 * Q4 = Q39 ; shift 32 bit => Q7 */
+    a1 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[5], state0) +
+        (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[4], state0) >> 16);
+    b1 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[7], state1) +
+        (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[6], state1) >> 16);
+
+    /* Q30 * Q4 = Q34 ; shift 32 bit => Q2 */
+    a2 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[1], state0) +
+        (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[0], state0) >> 16);
+    b2 = WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[3], state1) +
+        (WEBRTC_SPL_MUL_16_32_RSFT16(coefficient[2], state1) >> 16);
+#endif
+
+    c = in + ((a1 + b1) >> 7);  // Q0.
+    io[k] = (int16_t)WebRtcSpl_SatW32ToW16(c);  // Write output as Q0.
+
+    c = in * (1 << 2) - a2 - b2;  // In Q2.
+    c = (int32_t)WEBRTC_SPL_SAT(536870911, c, -536870912);
+
+    state1 = state0;
+    state0 = c * (1 << 2);  // Write state as Q4
+  }
+  state[0] = state0;
+  state[1] = state1;
+}
+
+
+void WebRtcIsacfix_SplitAndFilter1(int16_t *pin,
+                                   int16_t *LP16,
+                                   int16_t *HP16,
+                                   PreFiltBankstr *prefiltdata)
+{
+  /* Function WebRtcIsacfix_SplitAndFilter */
+  /* This function creates low-pass and high-pass decimated versions of part of
+     the input signal, and part of the signal in the input 'lookahead buffer'. */
+
+  int k;
+
+  int16_t tempin_ch1[FRAMESAMPLES/2 + QLOOKAHEAD];
+  int16_t tempin_ch2[FRAMESAMPLES/2 + QLOOKAHEAD];
+  int32_t tmpState_ch1[2 * (QORDER-1)]; /* 4 */
+  int32_t tmpState_ch2[2 * (QORDER-1)]; /* 4 */
+
+  /* High pass filter */
+  WebRtcIsacfix_HighpassFilterFixDec32(pin, FRAMESAMPLES, WebRtcIsacfix_kHpStCoeffInQ30, prefiltdata->HPstates_fix);
+
+
+  /* First Channel */
+  for (k=0;k<FRAMESAMPLES/2;k++) {
+    tempin_ch1[QLOOKAHEAD + k] = pin[1 + 2 * k];
+  }
+  for (k=0;k<QLOOKAHEAD;k++) {
+    tempin_ch1[k]=prefiltdata->INLABUF1_fix[k];
+    prefiltdata->INLABUF1_fix[k] = pin[FRAMESAMPLES + 1 - 2 * (QLOOKAHEAD - k)];
+  }
+
+  /* Second Channel.  This is exactly like the first channel, except that the
+     even samples are now filtered instead (lower channel). */
+  for (k=0;k<FRAMESAMPLES/2;k++) {
+    tempin_ch2[QLOOKAHEAD + k] = pin[2 * k];
+  }
+  for (k=0;k<QLOOKAHEAD;k++) {
+    tempin_ch2[k]=prefiltdata->INLABUF2_fix[k];
+    prefiltdata->INLABUF2_fix[k] = pin[FRAMESAMPLES - 2 * (QLOOKAHEAD - k)];
+  }
+
+
+  /*obtain polyphase components by forward all-pass filtering through each channel */
+  /* The all pass filtering automatically updates the filter states which are exported in the
+     prefiltdata structure */
+  WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
+                                       tempin_ch2,
+                                       WebRtcIsacfix_kUpperApFactorsQ15,
+                                       WebRtcIsacfix_kLowerApFactorsQ15,
+                                       FRAMESAMPLES/2,
+                                       prefiltdata->INSTAT1_fix,
+                                       prefiltdata->INSTAT2_fix);
+
+  for (k = 0; k < 2 * (QORDER - 1); k++) {
+    tmpState_ch1[k] = prefiltdata->INSTAT1_fix[k];
+    tmpState_ch2[k] = prefiltdata->INSTAT2_fix[k];
+  }
+  WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1 + FRAMESAMPLES/2,
+                                       tempin_ch2 + FRAMESAMPLES/2,
+                                       WebRtcIsacfix_kUpperApFactorsQ15,
+                                       WebRtcIsacfix_kLowerApFactorsQ15,
+                                       QLOOKAHEAD,
+                                       tmpState_ch1,
+                                       tmpState_ch2);
+
+  /* Now Construct low-pass and high-pass signals as combinations of polyphase components */
+  for (k=0; k<FRAMESAMPLES/2 + QLOOKAHEAD; k++) {
+    int32_t tmp1, tmp2, tmp3;
+    tmp1 = (int32_t)tempin_ch1[k]; // Q0 -> Q0
+    tmp2 = (int32_t)tempin_ch2[k]; // Q0 -> Q0
+    tmp3 = (tmp1 + tmp2) >> 1;  /* Low pass signal. */
+    LP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*low pass */
+    tmp3 = (tmp1 - tmp2) >> 1;  /* High pass signal. */
+    HP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*high pass */
+  }
+
+}/*end of WebRtcIsacfix_SplitAndFilter */
+
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+
+/* Without lookahead */
+void WebRtcIsacfix_SplitAndFilter2(int16_t *pin,
+                                   int16_t *LP16,
+                                   int16_t *HP16,
+                                   PreFiltBankstr *prefiltdata)
+{
+  /* Function WebRtcIsacfix_SplitAndFilter2 */
+  /* This function creates low-pass and high-pass decimated versions of part of
+     the input signal. */
+
+  int k;
+
+  int16_t tempin_ch1[FRAMESAMPLES/2];
+  int16_t tempin_ch2[FRAMESAMPLES/2];
+
+
+  /* High pass filter */
+  WebRtcIsacfix_HighpassFilterFixDec32(pin, FRAMESAMPLES, WebRtcIsacfix_kHpStCoeffInQ30, prefiltdata->HPstates_fix);
+
+
+  /* First Channel */
+  for (k=0;k<FRAMESAMPLES/2;k++) {
+    tempin_ch1[k] = pin[1 + 2 * k];
+  }
+
+  /* Second Channel.  This is exactly like the first channel, except that the
+     even samples are now filtered instead (lower channel). */
+  for (k=0;k<FRAMESAMPLES/2;k++) {
+    tempin_ch2[k] = pin[2 * k];
+  }
+
+
+  /*obtain polyphase components by forward all-pass filtering through each channel */
+  /* The all pass filtering automatically updates the filter states which are exported in the
+     prefiltdata structure */
+  WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
+                                       tempin_ch2,
+                                       WebRtcIsacfix_kUpperApFactorsQ15,
+                                       WebRtcIsacfix_kLowerApFactorsQ15,
+                                       FRAMESAMPLES/2,
+                                       prefiltdata->INSTAT1_fix,
+                                       prefiltdata->INSTAT2_fix);
+
+  /* Now Construct low-pass and high-pass signals as combinations of polyphase components */
+  for (k=0; k<FRAMESAMPLES/2; k++) {
+    int32_t tmp1, tmp2, tmp3;
+    tmp1 = (int32_t)tempin_ch1[k]; // Q0 -> Q0
+    tmp2 = (int32_t)tempin_ch2[k]; // Q0 -> Q0
+    tmp3 = (tmp1 + tmp2) >> 1;  /* Low pass signal. */
+    LP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*low pass */
+    tmp3 = (tmp1 - tmp2) >> 1;  /* High pass signal. */
+    HP16[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp3); /*high pass */
+  }
+
+}/*end of WebRtcIsacfix_SplitAndFilter */
+
+#endif
+
+
+
+//////////////////////////////////////////////////////////
+////////// Combining
+/* Function WebRtcIsacfix_FilterAndCombine */
+/* This is a decoder function that takes the decimated
+   length FRAMESAMPLES/2 input low-pass and
+   high-pass signals and creates a reconstructed fullband
+   output signal of length FRAMESAMPLES. WebRtcIsacfix_FilterAndCombine
+   is the sibling function of WebRtcIsacfix_SplitAndFilter */
+/* INPUTS:
+   inLP: a length FRAMESAMPLES/2 array of input low-pass
+   samples.
+   inHP: a length FRAMESAMPLES/2 array of input high-pass
+   samples.
+   postfiltdata: input data structure containing the filterbank
+   states from the previous decoding iteration.
+   OUTPUTS:
+   Out: a length FRAMESAMPLES array of output reconstructed
+   samples (fullband) based on the input low-pass and
+   high-pass signals.
+   postfiltdata: the input data structure containing the filterbank
+   states is updated for the next decoding iteration */
+void WebRtcIsacfix_FilterAndCombine1(int16_t *tempin_ch1,
+                                     int16_t *tempin_ch2,
+                                     int16_t *out16,
+                                     PostFiltBankstr *postfiltdata)
+{
+  int k;
+  int16_t in[FRAMESAMPLES];
+
+  /* all-pass filter the new upper and lower channel signal.
+     For upper channel, use the all-pass filter factors that were used as a
+     lower channel at the encoding side. So at the decoder, the corresponding
+     all-pass filter factors for each channel are swapped.
+     For lower channel signal, since all-pass filter factors at the decoder are
+     swapped from the ones at the encoder, the 'upper' channel all-pass filter
+     factors (kUpperApFactors) are used to filter this new lower channel signal.
+  */
+  WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
+                                       tempin_ch2,
+                                       WebRtcIsacfix_kLowerApFactorsQ15,
+                                       WebRtcIsacfix_kUpperApFactorsQ15,
+                                       FRAMESAMPLES/2,
+                                       postfiltdata->STATE_0_UPPER_fix,
+                                       postfiltdata->STATE_0_LOWER_fix);
+
+  /* Merge outputs to form the full length output signal.*/
+  for (k=0;k<FRAMESAMPLES/2;k++) {
+    in[2 * k] = tempin_ch2[k];
+    in[2 * k + 1] = tempin_ch1[k];
+  }
+
+  /* High pass filter */
+  WebRtcIsacfix_HighpassFilterFixDec32(in, FRAMESAMPLES, WebRtcIsacfix_kHPStCoeffOut1Q30, postfiltdata->HPstates1_fix);
+  WebRtcIsacfix_HighpassFilterFixDec32(in, FRAMESAMPLES, WebRtcIsacfix_kHPStCoeffOut2Q30, postfiltdata->HPstates2_fix);
+
+  for (k=0;k<FRAMESAMPLES;k++) {
+    out16[k] = in[k];
+  }
+}
+
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+/* Function WebRtcIsacfix_FilterAndCombine */
+/* This is a decoder function that takes the decimated
+   length len/2 input low-pass and
+   high-pass signals and creates a reconstructed fullband
+   output signal of length len. WebRtcIsacfix_FilterAndCombine
+   is the sibling function of WebRtcIsacfix_SplitAndFilter */
+/* INPUTS:
+   inLP: a length len/2 array of input low-pass
+   samples.
+   inHP: a length len/2 array of input high-pass
+   samples.
+   postfiltdata: input data structure containing the filterbank
+   states from the previous decoding iteration.
+   OUTPUTS:
+   Out: a length len array of output reconstructed
+   samples (fullband) based on the input low-pass and
+   high-pass signals.
+   postfiltdata: the input data structure containing the filterbank
+   states is updated for the next decoding iteration */
+void WebRtcIsacfix_FilterAndCombine2(int16_t *tempin_ch1,
+                                     int16_t *tempin_ch2,
+                                     int16_t *out16,
+                                     PostFiltBankstr *postfiltdata,
+                                     int16_t len)
+{
+  int k;
+  int16_t in[FRAMESAMPLES];
+
+  /* all-pass filter the new upper and lower channel signal.
+     For upper channel, use the all-pass filter factors that were used as a
+     lower channel at the encoding side. So at the decoder, the corresponding
+     all-pass filter factors for each channel are swapped.
+     For lower channel signal, since all-pass filter factors at the decoder are
+     swapped from the ones at the encoder, the 'upper' channel all-pass filter
+     factors (kUpperApFactors) are used to filter this new lower channel signal.
+  */
+  WebRtcIsacfix_AllpassFilter2FixDec16(tempin_ch1,
+                                       tempin_ch2,
+                                       WebRtcIsacfix_kLowerApFactorsQ15,
+                                       WebRtcIsacfix_kUpperApFactorsQ15,
+                                       len / 2,
+                                       postfiltdata->STATE_0_UPPER_fix,
+                                       postfiltdata->STATE_0_LOWER_fix);
+
+  /* Merge outputs to form the full length output signal.*/
+  for (k=0;k<len/2;k++) {
+    in[2 * k] = tempin_ch2[k];
+    in[2 * k + 1] = tempin_ch1[k];
+  }
+
+  /* High pass filter */
+  WebRtcIsacfix_HighpassFilterFixDec32(in, len, WebRtcIsacfix_kHPStCoeffOut1Q30, postfiltdata->HPstates1_fix);
+  WebRtcIsacfix_HighpassFilterFixDec32(in, len, WebRtcIsacfix_kHPStCoeffOut2Q30, postfiltdata->HPstates2_fix);
+
+  for (k=0;k<len;k++) {
+    out16[k] = in[k];
+  }
+}
+
+#endif
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
new file mode 100644
index 0000000..949bca7
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
@@ -0,0 +1,242 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+
+// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c.
+void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
+    int16_t* data_ch1,            // Input and output in channel 1, in Q0.
+    int16_t* data_ch2,            // Input and output in channel 2, in Q0.
+    const int16_t* factor_ch1,    // Scaling factor for channel 1, in Q15.
+    const int16_t* factor_ch2,    // Scaling factor for channel 2, in Q15.
+    const int length,             // Length of the data buffers.
+    int32_t* filter_state_ch1,    // Filter state for channel 1, in Q16.
+    int32_t* filter_state_ch2) {  // Filter state for channel 2, in Q16.
+
+  int32_t st0_ch1, st1_ch1;                // channel1 state variables.
+  int32_t st0_ch2, st1_ch2;                // channel2 state variables.
+  int32_t f_ch10, f_ch11, f_ch20, f_ch21;  // factor variables.
+  int32_t r0, r1, r2, r3, r4, r5;          // temporary register variables.
+
+  __asm __volatile (
+    ".set           push                                                  \n\t"
+    ".set           noreorder                                             \n\t"
+    // Load all the state and factor variables.
+    "lh             %[f_ch10],      0(%[factor_ch1])                      \n\t"
+    "lh             %[f_ch20],      0(%[factor_ch2])                      \n\t"
+    "lh             %[f_ch11],      2(%[factor_ch1])                      \n\t"
+    "lh             %[f_ch21],      2(%[factor_ch2])                      \n\t"
+    "lw             %[st0_ch1],     0(%[filter_state_ch1])                \n\t"
+    "lw             %[st1_ch1],     4(%[filter_state_ch1])                \n\t"
+    "lw             %[st0_ch2],     0(%[filter_state_ch2])                \n\t"
+    "lw             %[st1_ch2],     4(%[filter_state_ch2])                \n\t"
+    // Allpass filtering loop.
+   "1:                                                                    \n\t"
+    "lh             %[r0],          0(%[data_ch1])                        \n\t"
+    "lh             %[r1],          0(%[data_ch2])                        \n\t"
+    "addiu          %[length],      %[length],              -1            \n\t"
+    "mul            %[r2],          %[r0],                  %[f_ch10]     \n\t"
+    "mul            %[r3],          %[r1],                  %[f_ch20]     \n\t"
+    "sll            %[r0],          %[r0],                  16            \n\t"
+    "sll            %[r1],          %[r1],                  16            \n\t"
+    "sll            %[r2],          %[r2],                  1             \n\t"
+    "addq_s.w       %[r2],          %[r2],                  %[st0_ch1]    \n\t"
+    "sll            %[r3],          %[r3],                  1             \n\t"
+    "addq_s.w       %[r3],          %[r3],                  %[st0_ch2]    \n\t"
+    "sra            %[r2],          %[r2],                  16            \n\t"
+    "mul            %[st0_ch1],     %[f_ch10],              %[r2]         \n\t"
+    "sra            %[r3],          %[r3],                  16            \n\t"
+    "mul            %[st0_ch2],     %[f_ch20],              %[r3]         \n\t"
+    "mul            %[r4],          %[r2],                  %[f_ch11]     \n\t"
+    "mul            %[r5],          %[r3],                  %[f_ch21]     \n\t"
+    "sll            %[st0_ch1],     %[st0_ch1],             1             \n\t"
+    "subq_s.w       %[st0_ch1],     %[r0],                  %[st0_ch1]    \n\t"
+    "sll            %[st0_ch2],     %[st0_ch2],             1             \n\t"
+    "subq_s.w       %[st0_ch2],     %[r1],                  %[st0_ch2]    \n\t"
+    "sll            %[r4],          %[r4],                  1             \n\t"
+    "addq_s.w       %[r4],          %[r4],                  %[st1_ch1]    \n\t"
+    "sll            %[r5],          %[r5],                  1             \n\t"
+    "addq_s.w       %[r5],          %[r5],                  %[st1_ch2]    \n\t"
+    "sra            %[r4],          %[r4],                  16            \n\t"
+    "mul            %[r0],          %[r4],                  %[f_ch11]     \n\t"
+    "sra            %[r5],          %[r5],                  16            \n\t"
+    "mul            %[r1],          %[r5],                  %[f_ch21]     \n\t"
+    "sh             %[r4],          0(%[data_ch1])                        \n\t"
+    "sh             %[r5],          0(%[data_ch2])                        \n\t"
+    "addiu          %[data_ch1],    %[data_ch1],            2             \n\t"
+    "sll            %[r2],          %[r2],                  16            \n\t"
+    "sll            %[r0],          %[r0],                  1             \n\t"
+    "subq_s.w       %[st1_ch1],     %[r2],                  %[r0]         \n\t"
+    "sll            %[r3],          %[r3],                  16            \n\t"
+    "sll            %[r1],          %[r1],                  1             \n\t"
+    "subq_s.w       %[st1_ch2],     %[r3],                  %[r1]         \n\t"
+    "bgtz           %[length],      1b                                    \n\t"
+    " addiu         %[data_ch2],    %[data_ch2],            2             \n\t"
+    // Store channel states.
+    "sw             %[st0_ch1],     0(%[filter_state_ch1])                \n\t"
+    "sw             %[st1_ch1],     4(%[filter_state_ch1])                \n\t"
+    "sw             %[st0_ch2],     0(%[filter_state_ch2])                \n\t"
+    "sw             %[st1_ch2],     4(%[filter_state_ch2])                \n\t"
+    ".set           pop                                                   \n\t"
+    : [f_ch10] "=&r" (f_ch10), [f_ch20] "=&r" (f_ch20),
+      [f_ch11] "=&r" (f_ch11), [f_ch21] "=&r" (f_ch21),
+      [st0_ch1] "=&r" (st0_ch1), [st1_ch1] "=&r" (st1_ch1),
+      [st0_ch2] "=&r" (st0_ch2), [st1_ch2] "=&r" (st1_ch2),
+      [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+      [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5)
+    : [factor_ch1] "r" (factor_ch1), [factor_ch2] "r" (factor_ch2),
+      [filter_state_ch1] "r" (filter_state_ch1),
+      [filter_state_ch2] "r" (filter_state_ch2),
+      [data_ch1] "r" (data_ch1), [data_ch2] "r" (data_ch2),
+      [length] "r" (length)
+    : "memory", "hi", "lo"
+  );
+}
+
+// WebRtcIsacfix_HighpassFilterFixDec32 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_HighpassFilterFixDec32C from filterbanks.c.
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+                                              int16_t len,
+                                              const int16_t* coefficient,
+                                              int32_t* state) {
+  int k;
+  int32_t a1, a2, b1, b2, in;
+  int32_t state0 = state[0];
+  int32_t state1 = state[1];
+
+  int32_t c0, c1, c2, c3;
+  int32_t c4, c5, c6, c7;
+  int32_t state0_lo, state0_hi;
+  int32_t state1_lo, state1_hi;
+  int32_t t0, t1, t2, t3, t4, t5;
+
+  __asm  __volatile (
+    "lh         %[c0],         0(%[coeff_ptr])            \n\t"
+    "lh         %[c1],         2(%[coeff_ptr])            \n\t"
+    "lh         %[c2],         4(%[coeff_ptr])            \n\t"
+    "lh         %[c3],         6(%[coeff_ptr])            \n\t"
+    "sra        %[state0_hi],  %[state0],        16       \n\t"
+    "sra        %[state1_hi],  %[state1],        16       \n\t"
+    "andi       %[state0_lo],  %[state0],        0xFFFF   \n\t"
+    "andi       %[state1_lo],  %[state1],        0xFFFF   \n\t"
+    "lh         %[c4],         8(%[coeff_ptr])            \n\t"
+    "lh         %[c5],         10(%[coeff_ptr])           \n\t"
+    "lh         %[c6],         12(%[coeff_ptr])           \n\t"
+    "lh         %[c7],         14(%[coeff_ptr])           \n\t"
+    "sra        %[state0_lo],  %[state0_lo],     1        \n\t"
+    "sra        %[state1_lo],  %[state1_lo],     1        \n\t"
+    : [c0] "=&r" (c0), [c1] "=&r" (c1), [c2] "=&r" (c2), [c3] "=&r" (c3),
+      [c4] "=&r" (c4), [c5] "=&r" (c5), [c6] "=&r" (c6), [c7] "=&r" (c7),
+      [state0_hi] "=&r" (state0_hi), [state0_lo] "=&r" (state0_lo),
+      [state1_hi] "=&r" (state1_hi), [state1_lo] "=&r" (state1_lo)
+    : [coeff_ptr] "r" (coefficient), [state0] "r" (state0),
+      [state1] "r" (state1)
+    : "memory"
+  );
+
+  for (k = 0; k < len; k++) {
+    in = (int32_t)io[k];
+
+    __asm __volatile (
+      ".set      push                                      \n\t"
+      ".set      noreorder                                 \n\t"
+      "mul       %[t2],        %[c4],        %[state0_lo]  \n\t"
+      "mul       %[t0],        %[c5],        %[state0_lo]  \n\t"
+      "mul       %[t1],        %[c4],        %[state0_hi]  \n\t"
+      "mul       %[a1],        %[c5],        %[state0_hi]  \n\t"
+      "mul       %[t5],        %[c6],        %[state1_lo]  \n\t"
+      "mul       %[t3],        %[c7],        %[state1_lo]  \n\t"
+      "mul       %[t4],        %[c6],        %[state1_hi]  \n\t"
+      "mul       %[b1],        %[c7],        %[state1_hi]  \n\t"
+      "shra_r.w  %[t2],        %[t2],        15            \n\t"
+      "shra_r.w  %[t0],        %[t0],        15            \n\t"
+      "addu      %[t1],        %[t1],        %[t2]         \n\t"
+      "addu      %[a1],        %[a1],        %[t0]         \n\t"
+      "sra       %[t1],        %[t1],        16            \n\t"
+      "addu      %[a1],        %[a1],        %[t1]         \n\t"
+      "shra_r.w  %[t5],        %[t5],        15            \n\t"
+      "shra_r.w  %[t3],        %[t3],        15            \n\t"
+      "addu      %[t4],        %[t4],        %[t5]         \n\t"
+      "addu      %[b1],        %[b1],        %[t3]         \n\t"
+      "sra       %[t4],        %[t4],        16            \n\t"
+      "addu      %[b1],        %[b1],        %[t4]         \n\t"
+      "mul       %[t2],        %[c0],        %[state0_lo]  \n\t"
+      "mul       %[t0],        %[c1],        %[state0_lo]  \n\t"
+      "mul       %[t1],        %[c0],        %[state0_hi]  \n\t"
+      "mul       %[a2],        %[c1],        %[state0_hi]  \n\t"
+      "mul       %[t5],        %[c2],        %[state1_lo]  \n\t"
+      "mul       %[t3],        %[c3],        %[state1_lo]  \n\t"
+      "mul       %[t4],        %[c2],        %[state1_hi]  \n\t"
+      "mul       %[b2],        %[c3],        %[state1_hi]  \n\t"
+      "shra_r.w  %[t2],        %[t2],        15            \n\t"
+      "shra_r.w  %[t0],        %[t0],        15            \n\t"
+      "addu      %[t1],        %[t1],        %[t2]         \n\t"
+      "addu      %[a2],        %[a2],        %[t0]         \n\t"
+      "sra       %[t1],        %[t1],        16            \n\t"
+      "addu      %[a2],        %[a2],        %[t1]         \n\t"
+      "shra_r.w  %[t5],        %[t5],        15            \n\t"
+      "shra_r.w  %[t3],        %[t3],        15            \n\t"
+      "addu      %[t4],        %[t4],        %[t5]         \n\t"
+      "addu      %[b2],        %[b2],        %[t3]         \n\t"
+      "sra       %[t4],        %[t4],        16            \n\t"
+      "addu      %[b2],        %[b2],        %[t4]         \n\t"
+      "addu      %[a1],        %[a1],        %[b1]         \n\t"
+      "sra       %[a1],        %[a1],        7             \n\t"
+      "addu      %[a1],        %[a1],        %[in]         \n\t"
+      "sll       %[t0],        %[in],        2             \n\t"
+      "addu      %[a2],        %[a2],        %[b2]         \n\t"
+      "subu      %[t0],        %[t0],        %[a2]         \n\t"
+      "shll_s.w  %[a1],        %[a1],        16            \n\t"
+      "shll_s.w  %[t0],        %[t0],        2             \n\t"
+      "sra       %[a1],        %[a1],        16            \n\t"
+      "addu      %[state1_hi], %[state0_hi], $0            \n\t"
+      "addu      %[state1_lo], %[state0_lo], $0            \n\t"
+      "sra       %[state0_hi], %[t0],        16            \n\t"
+      "andi      %[state0_lo], %[t0],        0xFFFF        \n\t"
+      "sra       %[state0_lo], %[state0_lo], 1             \n\t"
+      ".set      pop                                       \n\t"
+      : [a1] "=&r" (a1), [b1] "=&r" (b1), [a2] "=&r" (a2), [b2] "=&r" (b2),
+        [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+        [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo),
+        [t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2),
+        [t3] "=&r" (t3), [t4] "=&r" (t4), [t5] "=&r" (t5)
+      : [c0] "r" (c0), [c1] "r" (c1), [c2] "r" (c2), [c3] "r" (c3),
+        [c4] "r" (c4), [c5] "r" (c5), [c6] "r" (c6), [c7] "r" (c7),
+        [in] "r" (in)
+      : "hi", "lo"
+    );
+    io[k] = (int16_t)a1;
+  }
+  __asm __volatile (
+    ".set            push                                            \n\t"
+    ".set            noreorder                                       \n\t"
+#if !defined(MIPS_DSP_R2_LE)
+    "sll             %[state0_hi],   %[state0_hi],   16              \n\t"
+    "sll             %[state0_lo],   %[state0_lo],   1               \n\t"
+    "sll             %[state1_hi],   %[state1_hi],   16              \n\t"
+    "sll             %[state1_lo],   %[state1_lo],   1               \n\t"
+    "or              %[state0_hi],   %[state0_hi],   %[state0_lo]    \n\t"
+    "or              %[state1_hi],   %[state1_hi],   %[state1_lo]    \n\t"
+#else
+    "sll             %[state0_lo],   %[state0_lo],   1               \n\t"
+    "sll             %[state1_lo],   %[state1_lo],   1               \n\t"
+    "precr_sra.ph.w  %[state0_hi],   %[state0_lo],   0               \n\t"
+    "precr_sra.ph.w  %[state1_hi],   %[state1_lo],   0               \n\t"
+#endif
+    "sw              %[state0_hi],   0(%[state])                     \n\t"
+    "sw              %[state1_hi],   4(%[state])                     \n\t"
+    ".set            pop                                             \n\t"
+    : [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+      [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo)
+    : [state] "r" (state)
+    : "memory"
+  );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c
new file mode 100644
index 0000000..fd29ccb
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_neon.c
@@ -0,0 +1,277 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Contains a function for WebRtcIsacfix_AllpassFilter2FixDec16Neon()
+// in iSAC codec, optimized for ARM Neon platform. Bit exact with function
+// WebRtcIsacfix_AllpassFilter2FixDec16C() in filterbanks.c. Prototype
+// C code is at end of this file.
+
+#include <arm_neon.h>
+
+#include "rtc_base/checks.h"
+
+void WebRtcIsacfix_AllpassFilter2FixDec16Neon(
+    int16_t* data_ch1,  // Input and output in channel 1, in Q0
+    int16_t* data_ch2,  // Input and output in channel 2, in Q0
+    const int16_t* factor_ch1,  // Scaling factor for channel 1, in Q15
+    const int16_t* factor_ch2,  // Scaling factor for channel 2, in Q15
+    const int length,  // Length of the data buffers
+    int32_t* filter_state_ch1,  // Filter state for channel 1, in Q16
+    int32_t* filter_state_ch2) {  // Filter state for channel 2, in Q16
+  RTC_DCHECK_EQ(0, length % 2);
+  int n = 0;
+  int16x4_t factorv;
+  int16x4_t datav;
+  int32x4_t statev;
+
+  // Load factor_ch1 and factor_ch2.
+  factorv = vld1_dup_s16(factor_ch1);
+  factorv = vld1_lane_s16(factor_ch1 + 1, factorv, 1);
+  factorv = vld1_lane_s16(factor_ch2, factorv, 2);
+  factorv = vld1_lane_s16(factor_ch2 + 1, factorv, 3);
+
+  // Load filter_state_ch1[0] and filter_state_ch2[0].
+  statev = vld1q_dup_s32(filter_state_ch1);
+  statev = vld1q_lane_s32(filter_state_ch2, statev, 2);
+
+  // Loop unrolling preprocessing.
+  int32x4_t a;
+  int16x4_t tmp1, tmp2;
+
+  // Load data_ch1[0] and data_ch2[0].
+  datav = vld1_dup_s16(data_ch1);
+  datav = vld1_lane_s16(data_ch2, datav, 2);
+
+  a = vqdmlal_s16(statev, datav, factorv);
+  tmp1 = vshrn_n_s32(a, 16);
+
+  // Update filter_state_ch1[0] and filter_state_ch2[0].
+  statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
+
+  // Load filter_state_ch1[1] and filter_state_ch2[1].
+  statev = vld1q_lane_s32(filter_state_ch1 + 1, statev, 1);
+  statev = vld1q_lane_s32(filter_state_ch2 + 1, statev, 3);
+
+  // Load data_ch1[1] and data_ch2[1].
+  tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1);
+  tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3);
+  datav = vrev32_s16(tmp1);
+
+  // Loop unrolling processing.
+  for (n = 0; n < length - 2; n += 2) {
+    a = vqdmlal_s16(statev, datav, factorv);
+    tmp1 = vshrn_n_s32(a, 16);
+    // Store data_ch1[n] and data_ch2[n].
+    vst1_lane_s16(data_ch1 + n, tmp1, 1);
+    vst1_lane_s16(data_ch2 + n, tmp1, 3);
+
+    // Update filter_state_ch1[0], filter_state_ch1[1]
+    // and filter_state_ch2[0], filter_state_ch2[1].
+    statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
+
+    // Load data_ch1[n + 2] and data_ch2[n + 2].
+    tmp1 = vld1_lane_s16(data_ch1 + n + 2, tmp1, 1);
+    tmp1 = vld1_lane_s16(data_ch2 + n + 2, tmp1, 3);
+    datav = vrev32_s16(tmp1);
+
+    a = vqdmlal_s16(statev, datav, factorv);
+    tmp2 = vshrn_n_s32(a, 16);
+    // Store data_ch1[n + 1] and data_ch2[n + 1].
+    vst1_lane_s16(data_ch1 + n + 1, tmp2, 1);
+    vst1_lane_s16(data_ch2 + n + 1, tmp2, 3);
+
+    // Update filter_state_ch1[0], filter_state_ch1[1]
+    // and filter_state_ch2[0], filter_state_ch2[1].
+    statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv);
+
+    // Load data_ch1[n + 3] and data_ch2[n + 3].
+    tmp2 = vld1_lane_s16(data_ch1 + n + 3, tmp2, 1);
+    tmp2 = vld1_lane_s16(data_ch2 + n + 3, tmp2, 3);
+    datav = vrev32_s16(tmp2);
+  }
+
+  // Loop unrolling post-processing.
+  a = vqdmlal_s16(statev, datav, factorv);
+  tmp1 = vshrn_n_s32(a, 16);
+  // Store data_ch1[n] and data_ch2[n].
+  vst1_lane_s16(data_ch1 + n, tmp1, 1);
+  vst1_lane_s16(data_ch2 + n, tmp1, 3);
+
+  // Update filter_state_ch1[0], filter_state_ch1[1]
+  // and filter_state_ch2[0], filter_state_ch2[1].
+  statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv);
+  // Store filter_state_ch1[0] and filter_state_ch2[0].
+  vst1q_lane_s32(filter_state_ch1, statev, 0);
+  vst1q_lane_s32(filter_state_ch2, statev, 2);
+
+  datav = vrev32_s16(tmp1);
+  a = vqdmlal_s16(statev, datav, factorv);
+  tmp2 = vshrn_n_s32(a, 16);
+  // Store data_ch1[n + 1] and data_ch2[n + 1].
+  vst1_lane_s16(data_ch1 + n + 1, tmp2, 1);
+  vst1_lane_s16(data_ch2 + n + 1, tmp2, 3);
+
+  // Update filter_state_ch1[1] and filter_state_ch2[1].
+  statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp2, factorv);
+  // Store filter_state_ch1[1] and filter_state_ch2[1].
+  vst1q_lane_s32(filter_state_ch1 + 1, statev, 1);
+  vst1q_lane_s32(filter_state_ch2 + 1, statev, 3);
+}
+
+// This function is the prototype for above neon optimized function.
+//void AllpassFilter2FixDec16BothChannels(
+//    int16_t *data_ch1,  // Input and output in channel 1, in Q0
+//    int16_t *data_ch2,  // Input and output in channel 2, in Q0
+//    const int16_t *factor_ch1,  // Scaling factor for channel 1, in Q15
+//    const int16_t *factor_ch2,  // Scaling factor for channel 2, in Q15
+//    const int length,  // Length of the data buffers
+//    int32_t *filter_state_ch1,  // Filter state for channel 1, in Q16
+//    int32_t *filter_state_ch2) {  // Filter state for channel 2, in Q16
+//  int n = 0;
+//  int32_t state0_ch1 = filter_state_ch1[0], state1_ch1 = filter_state_ch1[1];
+//  int32_t state0_ch2 = filter_state_ch2[0], state1_ch2 = filter_state_ch2[1];
+//  int16_t sample0_ch1 = 0, sample0_ch2 = 0;
+//  int16_t sample1_ch1 = 0, sample1_ch2  = 0;
+//  int32_t a0_ch1 = 0, a0_ch2 = 0;
+//  int32_t b0_ch1 = 0, b0_ch2 = 0;
+//
+//  int32_t a1_ch1 = 0, a1_ch2 = 0;
+//  int32_t b1_ch1 = 0, b1_ch2 = 0;
+//  int32_t b2_ch1  = 0, b2_ch2 = 0;
+//
+//  // Loop unrolling preprocessing.
+//
+//  sample0_ch1 = data_ch1[n];
+//  sample0_ch2 = data_ch2[n];
+//
+//  a0_ch1 = (factor_ch1[0] * sample0_ch1) << 1;
+//  a0_ch2 = (factor_ch2[0] * sample0_ch2) << 1;
+//
+//  b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state0_ch1);
+//  b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state0_ch2); //Q16+Q16=Q16
+//
+//  a0_ch1 = -factor_ch1[0] * (int16_t)(b0_ch1 >> 16);
+//  a0_ch2 = -factor_ch2[0] * (int16_t)(b0_ch2 >> 16);
+//
+//  state0_ch1 = WebRtcSpl_AddSatW32(a0_ch1 <<1, (uint32_t)sample0_ch1 << 16);
+//  state0_ch2 = WebRtcSpl_AddSatW32(a0_ch2 <<1, (uint32_t)sample0_ch2 << 16);
+//
+//  sample1_ch1 = data_ch1[n + 1];
+//  sample0_ch1 = (int16_t) (b0_ch1 >> 16); //Save as Q0
+//  sample1_ch2  = data_ch2[n + 1];
+//  sample0_ch2 = (int16_t) (b0_ch2 >> 16); //Save as Q0
+//
+//
+//  for (n = 0; n < length - 2; n += 2) {
+//    a1_ch1 = (factor_ch1[0] * sample1_ch1) << 1;
+//    a0_ch1 = (factor_ch1[1] * sample0_ch1) << 1;
+//    a1_ch2 = (factor_ch2[0] * sample1_ch2) << 1;
+//    a0_ch2 = (factor_ch2[1] * sample0_ch2) << 1;
+//
+//    b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state0_ch1);
+//    b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state1_ch1); //Q16+Q16=Q16
+//    b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state0_ch2); //Q16+Q16=Q16
+//    b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state1_ch2); //Q16+Q16=Q16
+//
+//    a1_ch1 = -factor_ch1[0] * (int16_t)(b1_ch1 >> 16);
+//    a0_ch1 = -factor_ch1[1] * (int16_t)(b0_ch1 >> 16);
+//    a1_ch2 = -factor_ch2[0] * (int16_t)(b1_ch2 >> 16);
+//    a0_ch2 = -factor_ch2[1] * (int16_t)(b0_ch2 >> 16);
+//
+//    state0_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1 <<16);
+//    state1_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1 <<16);
+//    state0_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2 <<16);
+//    state1_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2 <<16);
+//
+//    sample0_ch1 = data_ch1[n + 2];
+//    sample1_ch1 = (int16_t) (b1_ch1 >> 16); //Save as Q0
+//    sample0_ch2 = data_ch2[n + 2];
+//    sample1_ch2  = (int16_t) (b1_ch2 >> 16); //Save as Q0
+//
+//    a0_ch1 = (factor_ch1[0] * sample0_ch1) << 1;
+//    a1_ch1 = (factor_ch1[1] * sample1_ch1) << 1;
+//    a0_ch2 = (factor_ch2[0] * sample0_ch2) << 1;
+//    a1_ch2 = (factor_ch2[1] * sample1_ch2) << 1;
+//
+//    b2_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state0_ch1);
+//    b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state1_ch1); //Q16+Q16=Q16
+//    b2_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state0_ch2); //Q16+Q16=Q16
+//    b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state1_ch2); //Q16+Q16=Q16
+//
+//    a0_ch1 = -factor_ch1[0] * (int16_t)(b2_ch1 >> 16);
+//    a1_ch1 = -factor_ch1[1] * (int16_t)(b1_ch1 >> 16);
+//    a0_ch2 = -factor_ch2[0] * (int16_t)(b2_ch2 >> 16);
+//    a1_ch2 = -factor_ch2[1] * (int16_t)(b1_ch2 >> 16);
+//
+//    state0_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1<<16);
+//    state1_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1<<16);
+//    state0_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2<<16);
+//    state1_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2<<16);
+//
+//
+//    sample1_ch1 = data_ch1[n + 3];
+//    sample0_ch1 = (int16_t) (b2_ch1  >> 16); //Save as Q0
+//    sample1_ch2 = data_ch2[n + 3];
+//    sample0_ch2 = (int16_t) (b2_ch2 >> 16); //Save as Q0
+//
+//    data_ch1[n]     = (int16_t) (b0_ch1 >> 16); //Save as Q0
+//    data_ch1[n + 1] = (int16_t) (b1_ch1 >> 16); //Save as Q0
+//    data_ch2[n]     = (int16_t) (b0_ch2 >> 16);
+//    data_ch2[n + 1] = (int16_t) (b1_ch2 >> 16);
+//  }
+//
+//  // Loop unrolling post-processing.
+//
+//  a1_ch1 = (factor_ch1[0] * sample1_ch1) << 1;
+//  a0_ch1 = (factor_ch1[1] * sample0_ch1) << 1;
+//  a1_ch2 = (factor_ch2[0] * sample1_ch2) << 1;
+//  a0_ch2 = (factor_ch2[1] * sample0_ch2) << 1;
+//
+//  b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state0_ch1);
+//  b0_ch1 = WebRtcSpl_AddSatW32(a0_ch1, state1_ch1);
+//  b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state0_ch2);
+//  b0_ch2 = WebRtcSpl_AddSatW32(a0_ch2, state1_ch2);
+//
+//  a1_ch1 = -factor_ch1[0] * (int16_t)(b1_ch1 >> 16);
+//  a0_ch1 = -factor_ch1[1] * (int16_t)(b0_ch1 >> 16);
+//  a1_ch2 = -factor_ch2[0] * (int16_t)(b1_ch2 >> 16);
+//  a0_ch2 = -factor_ch2[1] * (int16_t)(b0_ch2 >> 16);
+//
+//  state0_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1 << 16);
+//  state1_ch1 = WebRtcSpl_AddSatW32(a0_ch1<<1, (uint32_t)sample0_ch1 << 16);
+//  state0_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2 << 16);
+//  state1_ch2 = WebRtcSpl_AddSatW32(a0_ch2<<1, (uint32_t)sample0_ch2 << 16);
+//
+//  data_ch1[n] = (int16_t) (b0_ch1 >> 16); //Save as Q0
+//  data_ch2[n] = (int16_t) (b0_ch2 >> 16);
+//
+//  sample1_ch1 = (int16_t) (b1_ch1 >> 16); //Save as Q0
+//  sample1_ch2  = (int16_t) (b1_ch2 >> 16); //Save as Q0
+//
+//  a1_ch1 = (factor_ch1[1] * sample1_ch1) << 1;
+//  a1_ch2 = (factor_ch2[1] * sample1_ch2) << 1;
+//
+//  b1_ch1 = WebRtcSpl_AddSatW32(a1_ch1, state1_ch1); //Q16+Q16=Q16
+//  b1_ch2 = WebRtcSpl_AddSatW32(a1_ch2, state1_ch2); //Q16+Q16=Q16
+//
+//  a1_ch1 = -factor_ch1[1] * (int16_t)(b1_ch1 >> 16);
+//  a1_ch2 = -factor_ch2[1] * (int16_t)(b1_ch2 >> 16);
+//
+//  state1_ch1 = WebRtcSpl_AddSatW32(a1_ch1<<1, (uint32_t)sample1_ch1<<16);
+//  state1_ch2 = WebRtcSpl_AddSatW32(a1_ch2<<1, (uint32_t)sample1_ch2<<16);
+//
+//  data_ch1[n + 1] = (int16_t) (b1_ch1 >> 16); //Save as Q0
+//  data_ch2[n + 1] = (int16_t) (b1_ch2 >> 16);
+//
+//  filter_state_ch1[0] = state0_ch1;
+//  filter_state_ch1[1] = state1_ch1;
+//  filter_state_ch2[0] = state0_ch2;
+//  filter_state_ch2[1] = state1_ch2;
+//}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc b/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
new file mode 100644
index 0000000..d17f4a5
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/sanitizer.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+class FilterBanksTest : public testing::Test {
+ protected:
+  // Pass a function pointer to the Tester function.
+  void RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/5513
+  CalculateResidualEnergyTester(AllpassFilter2FixDec16
+                                AllpassFilter2FixDec16Function) {
+    const int kSamples = QLOOKAHEAD;
+    const int kState = 2;
+    int16_t data_ch1[kSamples] = {0};
+    int16_t data_ch2[kSamples] = {0};
+    int32_t state_ch1[kState] = {0};
+    int32_t state_ch2[kState] = {0};
+    const int32_t out_state_ch1[kState] = {-809122714, 1645972152};
+    const int32_t out_state_ch2[kState] = {428019288, 1057309936};
+    const int32_t out_data_ch1[kSamples] = {0, 0, 347, 10618, 16718, -7089,
+        32767, 16913, 27042, 8377, -22973, -28372, -27603, -14804, 398, -25332,
+        -11200, 18044, 25223, -6839, 1116, -23984, 32717, 7364};
+    const int32_t out_data_ch2[kSamples] = {0, 0, 3010, 22351, 21106, 16969,
+        -2095, -664, 3513, -30980, 32767, -23839, 13335, 20289, -6831, 339,
+        -17207, 32767, 4959, 6177, 32767, 16599, -4747, 20504};
+    int sign = 1;
+
+    for (int i = 0; i < kSamples; i++) {
+      sign *= -1;
+      data_ch1[i] = sign * WEBRTC_SPL_WORD32_MAX / (i * i + 1);
+      data_ch2[i] = sign * WEBRTC_SPL_WORD32_MIN / (i * i + 1);
+      // UBSan: -1 * -2147483648 cannot be represented in type 'int'
+    };
+
+    AllpassFilter2FixDec16Function(data_ch1,
+                                   data_ch2,
+                                   WebRtcIsacfix_kUpperApFactorsQ15,
+                                   WebRtcIsacfix_kLowerApFactorsQ15,
+                                   kSamples,
+                                   state_ch1,
+                                   state_ch2);
+
+    for (int i = 0; i < kSamples; i++) {
+      EXPECT_EQ(out_data_ch1[i], data_ch1[i]);
+      EXPECT_EQ(out_data_ch2[i], data_ch2[i]);
+    }
+    for (int i = 0; i < kState; i++) {
+      EXPECT_EQ(out_state_ch1[i], state_ch1[i]);
+      EXPECT_EQ(out_state_ch2[i], state_ch2[i]);
+    }
+  }
+};
+
+TEST_F(FilterBanksTest, AllpassFilter2FixDec16Test) {
+  CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16C);
+#if defined(WEBRTC_HAS_NEON)
+  CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16Neon);
+#endif
+}
+
+TEST_F(FilterBanksTest, HighpassFilterFixDec32Test) {
+  const int kSamples = 20;
+  int16_t in[kSamples];
+  int32_t state[2] = {12345, 987654};
+#ifdef WEBRTC_ARCH_ARM_V7
+  int32_t out[kSamples] = {-1040, -1035, -22875, -1397, -27604, 20018, 7917,
+    -1279, -8552, -14494, -7558, -23537, -27258, -30554, -32768, -3432, -32768,
+    25215, -27536, 22436};
+#else
+  int32_t out[kSamples] = {-1040, -1035, -22875, -1397, -27604, 20017, 7915,
+    -1280, -8554, -14496, -7561, -23541, -27263, -30560, -32768, -3441, -32768,
+    25203, -27550, 22419};
+#endif
+  HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+#if defined(MIPS_DSP_R1_LE)
+  WebRtcIsacfix_HighpassFilterFixDec32 =
+      WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#else
+  WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+#endif
+
+  for (int i = 0; i < kSamples; i++) {
+    in[i] = WEBRTC_SPL_WORD32_MAX / (i + 1);
+  }
+
+  WebRtcIsacfix_HighpassFilterFixDec32(in, kSamples,
+      WebRtcIsacfix_kHPStCoeffOut1Q30, state);
+
+  for (int i = 0; i < kSamples; i++) {
+    EXPECT_EQ(out[i], in[i]);
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filters.c b/modules/audio_coding/codecs/isac/fix/source/filters.c
new file mode 100644
index 0000000..85860f7
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filters.c
@@ -0,0 +1,112 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+// Autocorrelation function in fixed point.
+// NOTE! Different from SPLIB-version in how it scales the signal.
+int WebRtcIsacfix_AutocorrC(int32_t* __restrict r,
+                            const int16_t* __restrict x,
+                            int16_t N,
+                            int16_t order,
+                            int16_t* __restrict scale) {
+  int i = 0;
+  int j = 0;
+  int16_t scaling = 0;
+  int32_t sum = 0;
+  uint32_t temp = 0;
+  int64_t prod = 0;
+
+  // The ARM assembly code assumptoins.
+  RTC_DCHECK_EQ(0, N % 4);
+  RTC_DCHECK_GE(N, 8);
+
+  // Calculate r[0].
+  for (i = 0; i < N; i++) {
+    prod += x[i] * x[i];
+  }
+
+  // Calculate scaling (the value of shifting).
+  temp = (uint32_t)(prod >> 31);
+  if(temp == 0) {
+    scaling = 0;
+  } else {
+    scaling = 32 - WebRtcSpl_NormU32(temp);
+  }
+  r[0] = (int32_t)(prod >> scaling);
+
+  // Perform the actual correlation calculation.
+  for (i = 1; i < order + 1; i++) {
+    prod = 0;
+    for (j = 0; j < N - i; j++) {
+      prod += x[j] * x[i + j];
+    }
+    sum = (int32_t)(prod >> scaling);
+    r[i] = sum;
+  }
+
+  *scale = scaling;
+
+  return(order + 1);
+}
+
+static const int32_t kApUpperQ15[ALLPASSSECTIONS] = { 1137, 12537 };
+static const int32_t kApLowerQ15[ALLPASSSECTIONS] = { 5059, 24379 };
+
+
+static void AllpassFilterForDec32(int16_t         *InOut16, //Q0
+                                  const int32_t   *APSectionFactors, //Q15
+                                  int16_t         lengthInOut,
+                                  int32_t          *FilterState) //Q16
+{
+  int n, j;
+  int32_t a, b;
+
+  for (j=0; j<ALLPASSSECTIONS; j++) {
+    for (n=0;n<lengthInOut;n+=2){
+      a = WEBRTC_SPL_MUL_16_32_RSFT16(InOut16[n], APSectionFactors[j]); //Q0*Q31=Q31 shifted 16 gives Q15
+      a <<= 1;  // Q15 -> Q16
+      b = WebRtcSpl_AddSatW32(a, FilterState[j]);  //Q16+Q16=Q16
+      // |a| in Q15 (Q0*Q31=Q31 shifted 16 gives Q15).
+      a = WEBRTC_SPL_MUL_16_32_RSFT16(b >> 16, -APSectionFactors[j]);
+      // FilterState[j]: Q15<<1 + Q0<<16 = Q16 + Q16 = Q16
+      FilterState[j] = WebRtcSpl_AddSatW32(a << 1, (uint32_t)InOut16[n] << 16);
+      InOut16[n] = (int16_t)(b >> 16);  // Save as Q0.
+    }
+  }
+}
+
+
+
+
+void WebRtcIsacfix_DecimateAllpass32(const int16_t *in,
+                                     int32_t *state_in,        /* array of size: 2*ALLPASSSECTIONS+1 */
+                                     int16_t N,                /* number of input samples */
+                                     int16_t *out)             /* array of size N/2 */
+{
+  int n;
+  int16_t data_vec[PITCH_FRAME_LEN];
+
+  /* copy input */
+  memcpy(data_vec + 1, in, sizeof(int16_t) * (N - 1));
+
+  data_vec[0] = (int16_t)(state_in[2 * ALLPASSSECTIONS] >> 16);  // z^-1 state.
+  state_in[2 * ALLPASSSECTIONS] = (uint32_t)in[N - 1] << 16;
+
+
+
+  AllpassFilterForDec32(data_vec+1, kApUpperQ15, N, state_in);
+  AllpassFilterForDec32(data_vec, kApLowerQ15, N, state_in+ALLPASSSECTIONS);
+
+  for (n=0;n<N/2;n++) {
+    out[n] = WebRtcSpl_AddSatW16(data_vec[2 * n], data_vec[2 * n + 1]);
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filters_mips.c b/modules/audio_coding/codecs/isac/fix/source/filters_mips.c
new file mode 100644
index 0000000..ded3d03
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filters_mips.c
@@ -0,0 +1,365 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+// MIPS optimized implementation of the Autocorrelation function in fixed point.
+// NOTE! Different from SPLIB-version in how it scales the signal.
+int WebRtcIsacfix_AutocorrMIPS(int32_t* __restrict r,
+                               const int16_t* __restrict x,
+                               int16_t N,
+                               int16_t order,
+                               int16_t* __restrict scale) {
+  int i = 0;
+  int16_t scaling = 0;
+  int16_t* in = (int16_t*)x;
+  int loop_size = (int)(N >> 3);
+  int count = (int)(N & 7);
+  // Declare temporary variables used as registry values.
+  int32_t r0, r1, r2, r3;
+#if !defined(MIPS_DSP_R2_LE)
+  // For non-DSPR2 optimizations 4 more registers are used.
+  int32_t r4, r5, r6, r7;
+#endif
+
+  // Calculate r[0] and scaling needed.
+  __asm __volatile (
+    ".set          push                                            \n\t"
+    ".set          noreorder                                       \n\t"
+    "mult          $0,             $0                              \n\t"
+    // Loop is unrolled 8 times, set accumulator to zero in branch delay slot.
+    "beqz          %[loop_size],   2f                              \n\t"
+    " mult         $0,             $0                              \n\t"
+   "1:                                                             \n\t"
+    // Load 8 samples per loop iteration.
+#if defined(MIPS_DSP_R2_LE)
+    "ulw           %[r0],          0(%[in])                        \n\t"
+    "ulw           %[r1],          4(%[in])                        \n\t"
+    "ulw           %[r2],          8(%[in])                        \n\t"
+    "ulw           %[r3],          12(%[in])                       \n\t"
+#else
+    "lh            %[r0],          0(%[in])                        \n\t"
+    "lh            %[r1],          2(%[in])                        \n\t"
+    "lh            %[r2],          4(%[in])                        \n\t"
+    "lh            %[r3],          6(%[in])                        \n\t"
+    "lh            %[r4],          8(%[in])                        \n\t"
+    "lh            %[r5],          10(%[in])                       \n\t"
+    "lh            %[r6],          12(%[in])                       \n\t"
+    "lh            %[r7],          14(%[in])                       \n\t"
+#endif
+    "addiu         %[loop_size],   %[loop_size],   -1              \n\t"
+    // Multiply and accumulate.
+#if defined(MIPS_DSP_R2_LE)
+    "dpa.w.ph      $ac0,           %[r0],          %[r0]           \n\t"
+    "dpa.w.ph      $ac0,           %[r1],          %[r1]           \n\t"
+    "dpa.w.ph      $ac0,           %[r2],          %[r2]           \n\t"
+    "dpa.w.ph      $ac0,           %[r3],          %[r3]           \n\t"
+#else
+    "madd          %[r0],          %[r0]                           \n\t"
+    "madd          %[r1],          %[r1]                           \n\t"
+    "madd          %[r2],          %[r2]                           \n\t"
+    "madd          %[r3],          %[r3]                           \n\t"
+    "madd          %[r4],          %[r4]                           \n\t"
+    "madd          %[r5],          %[r5]                           \n\t"
+    "madd          %[r6],          %[r6]                           \n\t"
+    "madd          %[r7],          %[r7]                           \n\t"
+#endif
+    "bnez          %[loop_size],   1b                              \n\t"
+    " addiu        %[in],          %[in],          16              \n\t"
+   "2:                                                             \n\t"
+    "beqz          %[count],       4f                              \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    " extr.w       %[r0],          $ac0,           31              \n\t"
+#else
+    " mfhi         %[r2]                                           \n\t"
+#endif
+    // Process remaining samples (if any).
+   "3:                                                             \n\t"
+    "lh            %[r0],          0(%[in])                        \n\t"
+    "addiu         %[count],       %[count],       -1              \n\t"
+    "madd          %[r0],          %[r0]                           \n\t"
+    "bnez          %[count],       3b                              \n\t"
+    " addiu        %[in],          %[in],          2               \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "extr.w        %[r0],          $ac0,           31              \n\t"
+#else
+    "mfhi          %[r2]                                           \n\t"
+#endif
+   "4:                                                             \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+    "mflo          %[r3]                                           \n\t"
+    "sll           %[r0],          %[r2],          1               \n\t"
+    "srl           %[r1],          %[r3],          31              \n\t"
+    "addu          %[r0],          %[r0],          %[r1]           \n\t"
+#endif
+    // Calculate scaling (the value of shifting).
+    "clz           %[r1],          %[r0]                           \n\t"
+    "addiu         %[r1],          %[r1],          -32             \n\t"
+    "subu          %[scaling],     $0,             %[r1]           \n\t"
+    "slti          %[r1],          %[r0],          0x1             \n\t"
+    "movn          %[scaling],     $0,             %[r1]           \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "extrv.w       %[r0],          $ac0,           %[scaling]      \n\t"
+    "mfhi          %[r2],          $ac0                            \n\t"
+#else
+    "addiu         %[r1],          %[scaling],     -32             \n\t"
+    "subu          %[r1],          $0,             %[r1]           \n\t"
+    "sllv          %[r1],          %[r2],          %[r1]           \n\t"
+    "srlv          %[r0],          %[r3],          %[scaling]      \n\t"
+    "addu          %[r0],          %[r0],          %[r1]           \n\t"
+#endif
+    "slti          %[r1],          %[scaling],     32              \n\t"
+    "movz          %[r0],          %[r2],          %[r1]           \n\t"
+    ".set          pop                                             \n\t"
+    : [loop_size] "+r" (loop_size), [in] "+r" (in), [r0] "=&r" (r0),
+      [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+#if !defined(MIPS_DSP_R2_LE)
+      [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+      [count] "+r" (count), [scaling] "=r" (scaling)
+    : [N] "r" (N)
+    : "memory", "hi", "lo"
+  );
+  r[0] = r0;
+
+  // Correlation calculation is divided in 3 cases depending on the scaling
+  // value (different accumulator manipulation needed). Three slightly different
+  // loops are written in order to avoid branches inside the loop.
+  if (scaling == 0) {
+    // In this case, the result will be in low part of the accumulator.
+    for (i = 1; i < order + 1; i++) {
+      in = (int16_t*)x;
+      int16_t* in1 = (int16_t*)x + i;
+      count = N - i;
+      loop_size = (count) >> 2;
+      __asm  __volatile (
+        ".set        push                                          \n\t"
+        ".set        noreorder                                     \n\t"
+        "mult        $0,             $0                            \n\t"
+        "beqz        %[loop_size],   2f                            \n\t"
+        " andi       %[count],       %[count],       0x3           \n\t"
+        // Loop processing 4 pairs of samples per iteration.
+       "1:                                                         \n\t"
+#if defined(MIPS_DSP_R2_LE)
+        "ulw         %[r0],          0(%[in])                      \n\t"
+        "ulw         %[r1],          0(%[in1])                     \n\t"
+        "ulw         %[r2],          4(%[in])                      \n\t"
+        "ulw         %[r3],          4(%[in1])                     \n\t"
+#else
+        "lh          %[r0],          0(%[in])                      \n\t"
+        "lh          %[r1],          0(%[in1])                     \n\t"
+        "lh          %[r2],          2(%[in])                      \n\t"
+        "lh          %[r3],          2(%[in1])                     \n\t"
+        "lh          %[r4],          4(%[in])                      \n\t"
+        "lh          %[r5],          4(%[in1])                     \n\t"
+        "lh          %[r6],          6(%[in])                      \n\t"
+        "lh          %[r7],          6(%[in1])                     \n\t"
+#endif
+        "addiu       %[loop_size],   %[loop_size],   -1            \n\t"
+#if defined(MIPS_DSP_R2_LE)
+        "dpa.w.ph    $ac0,           %[r0],          %[r1]         \n\t"
+        "dpa.w.ph    $ac0,           %[r2],          %[r3]         \n\t"
+#else
+        "madd        %[r0],          %[r1]                         \n\t"
+        "madd        %[r2],          %[r3]                         \n\t"
+        "madd        %[r4],          %[r5]                         \n\t"
+        "madd        %[r6],          %[r7]                         \n\t"
+#endif
+        "addiu       %[in],          %[in],          8             \n\t"
+        "bnez        %[loop_size],   1b                            \n\t"
+        " addiu      %[in1],         %[in1],         8             \n\t"
+       "2:                                                         \n\t"
+        "beqz        %[count],       4f                            \n\t"
+        " mflo       %[r0]                                         \n\t"
+        // Process remaining samples (if any).
+       "3:                                                         \n\t"
+        "lh          %[r0],          0(%[in])                      \n\t"
+        "lh          %[r1],          0(%[in1])                     \n\t"
+        "addiu       %[count],       %[count],       -1            \n\t"
+        "addiu       %[in],          %[in],          2             \n\t"
+        "madd        %[r0],          %[r1]                         \n\t"
+        "bnez        %[count],       3b                            \n\t"
+        " addiu      %[in1],         %[in1],         2             \n\t"
+        "mflo        %[r0]                                         \n\t"
+       "4:                                                         \n\t"
+        ".set        pop                                           \n\t"
+        : [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
+#if !defined(MIPS_DSP_R2_LE)
+          [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+          [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+          [count] "+r" (count)
+        :
+        : "memory", "hi", "lo"
+      );
+      r[i] = r0;
+    }
+  } else if (scaling == 32) {
+    // In this case, the result will be high part of the accumulator.
+    for (i = 1; i < order + 1; i++) {
+      in = (int16_t*)x;
+      int16_t* in1 = (int16_t*)x + i;
+      count = N - i;
+      loop_size = (count) >> 2;
+      __asm __volatile (
+        ".set        push                                          \n\t"
+        ".set        noreorder                                     \n\t"
+        "mult        $0,             $0                            \n\t"
+        "beqz        %[loop_size],   2f                            \n\t"
+        " andi       %[count],       %[count],       0x3           \n\t"
+        // Loop processing 4 pairs of samples per iteration.
+       "1:                                                         \n\t"
+#if defined(MIPS_DSP_R2_LE)
+        "ulw         %[r0],          0(%[in])                      \n\t"
+        "ulw         %[r1],          0(%[in1])                     \n\t"
+        "ulw         %[r2],          4(%[in])                      \n\t"
+        "ulw         %[r3],          4(%[in1])                     \n\t"
+#else
+        "lh          %[r0],          0(%[in])                      \n\t"
+        "lh          %[r1],          0(%[in1])                     \n\t"
+        "lh          %[r2],          2(%[in])                      \n\t"
+        "lh          %[r3],          2(%[in1])                     \n\t"
+        "lh          %[r4],          4(%[in])                      \n\t"
+        "lh          %[r5],          4(%[in1])                     \n\t"
+        "lh          %[r6],          6(%[in])                      \n\t"
+        "lh          %[r7],          6(%[in1])                     \n\t"
+#endif
+        "addiu       %[loop_size],   %[loop_size],   -1            \n\t"
+#if defined(MIPS_DSP_R2_LE)
+        "dpa.w.ph    $ac0,           %[r0],          %[r1]         \n\t"
+        "dpa.w.ph    $ac0,           %[r2],          %[r3]         \n\t"
+#else
+        "madd        %[r0],          %[r1]                         \n\t"
+        "madd        %[r2],          %[r3]                         \n\t"
+        "madd        %[r4],          %[r5]                         \n\t"
+        "madd        %[r6],          %[r7]                         \n\t"
+#endif
+        "addiu       %[in],          %[in],          8             \n\t"
+        "bnez        %[loop_size],   1b                            \n\t"
+        " addiu      %[in1],         %[in1],         8             \n\t"
+       "2:                                                         \n\t"
+        "beqz        %[count],       4f                            \n\t"
+        " mfhi       %[r0]                                         \n\t"
+        // Process remaining samples (if any).
+       "3:                                                         \n\t"
+        "lh          %[r0],          0(%[in])                      \n\t"
+        "lh          %[r1],          0(%[in1])                     \n\t"
+        "addiu       %[count],       %[count],       -1            \n\t"
+        "addiu       %[in],          %[in],          2             \n\t"
+        "madd        %[r0],          %[r1]                         \n\t"
+        "bnez        %[count],       3b                            \n\t"
+        " addiu      %[in1],         %[in1],         2             \n\t"
+        "mfhi        %[r0]                                         \n\t"
+       "4:                                                         \n\t"
+        ".set        pop                                           \n\t"
+        : [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
+#if !defined(MIPS_DSP_R2_LE)
+          [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+          [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+          [count] "+r" (count)
+        :
+        : "memory", "hi", "lo"
+      );
+      r[i] = r0;
+    }
+  } else {
+    // In this case, the result is obtained by combining low and high parts
+    // of the accumulator.
+#if !defined(MIPS_DSP_R1_LE)
+    int32_t tmp_shift = 32 - scaling;
+#endif
+    for (i = 1; i < order + 1; i++) {
+      in = (int16_t*)x;
+      int16_t* in1 = (int16_t*)x + i;
+      count = N - i;
+      loop_size = (count) >> 2;
+      __asm __volatile (
+        ".set        push                                          \n\t"
+        ".set        noreorder                                     \n\t"
+        "mult        $0,             $0                            \n\t"
+        "beqz        %[loop_size],   2f                            \n\t"
+        " andi       %[count],       %[count],       0x3           \n\t"
+       "1:                                                         \n\t"
+#if defined(MIPS_DSP_R2_LE)
+        "ulw         %[r0],          0(%[in])                      \n\t"
+        "ulw         %[r1],          0(%[in1])                     \n\t"
+        "ulw         %[r2],          4(%[in])                      \n\t"
+        "ulw         %[r3],          4(%[in1])                     \n\t"
+#else
+        "lh          %[r0],          0(%[in])                      \n\t"
+        "lh          %[r1],          0(%[in1])                     \n\t"
+        "lh          %[r2],          2(%[in])                      \n\t"
+        "lh          %[r3],          2(%[in1])                     \n\t"
+        "lh          %[r4],          4(%[in])                      \n\t"
+        "lh          %[r5],          4(%[in1])                     \n\t"
+        "lh          %[r6],          6(%[in])                      \n\t"
+        "lh          %[r7],          6(%[in1])                     \n\t"
+#endif
+        "addiu       %[loop_size],   %[loop_size],   -1            \n\t"
+#if defined(MIPS_DSP_R2_LE)
+        "dpa.w.ph    $ac0,           %[r0],          %[r1]         \n\t"
+        "dpa.w.ph    $ac0,           %[r2],          %[r3]         \n\t"
+#else
+        "madd        %[r0],          %[r1]                         \n\t"
+        "madd        %[r2],          %[r3]                         \n\t"
+        "madd        %[r4],          %[r5]                         \n\t"
+        "madd        %[r6],          %[r7]                         \n\t"
+#endif
+        "addiu       %[in],          %[in],          8             \n\t"
+        "bnez        %[loop_size],   1b                            \n\t"
+        " addiu      %[in1],         %[in1],         8             \n\t"
+       "2:                                                         \n\t"
+        "beqz        %[count],       4f                            \n\t"
+#if defined(MIPS_DSP_R1_LE)
+        " extrv.w    %[r0],          $ac0,           %[scaling]    \n\t"
+#else
+        " mfhi       %[r0]                                         \n\t"
+#endif
+       "3:                                                         \n\t"
+        "lh          %[r0],          0(%[in])                      \n\t"
+        "lh          %[r1],          0(%[in1])                     \n\t"
+        "addiu       %[count],       %[count],       -1            \n\t"
+        "addiu       %[in],          %[in],          2             \n\t"
+        "madd        %[r0],          %[r1]                         \n\t"
+        "bnez        %[count],       3b                            \n\t"
+        " addiu      %[in1],         %[in1],         2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+        "extrv.w     %[r0],          $ac0,           %[scaling]    \n\t"
+#else
+        "mfhi        %[r0]                                         \n\t"
+#endif
+       "4:                                                         \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+        "mflo        %[r1]                                         \n\t"
+        "sllv        %[r0],          %[r0],          %[tmp_shift]  \n\t"
+        "srlv        %[r1],          %[r1],          %[scaling]    \n\t"
+        "addu        %[r0],          %[r0],          %[r1]         \n\t"
+#endif
+        ".set        pop                                           \n\t"
+        : [loop_size] "+r" (loop_size), [in] "+r" (in), [in1] "+r" (in1),
+#if !defined(MIPS_DSP_R2_LE)
+          [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+#endif
+          [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+          [count] "+r" (count)
+        : [scaling] "r" (scaling)
+#if !defined(MIPS_DSP_R1_LE)
+        , [tmp_shift] "r" (tmp_shift)
+#endif
+        : "memory", "hi", "lo"
+      );
+      r[i] = r0;
+    }
+  }
+  *scale = scaling;
+
+  return (order + 1);
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filters_neon.c b/modules/audio_coding/codecs/isac/fix/source/filters_neon.c
new file mode 100644
index 0000000..1734a96
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filters_neon.c
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+
+// Autocorrelation function in fixed point.
+// NOTE! Different from SPLIB-version in how it scales the signal.
+int WebRtcIsacfix_AutocorrNeon(int32_t* __restrict r,
+                               const int16_t* x,
+                               int16_t n,
+                               int16_t order,
+                               int16_t* __restrict scale) {
+  int i = 0;
+  int16_t scaling = 0;
+  uint32_t temp = 0;
+  int64_t prod = 0;
+  int64_t prod_tail = 0;
+
+  RTC_DCHECK_EQ(0, n % 4);
+  RTC_DCHECK_GE(n, 8);
+
+  // Calculate r[0].
+  int16x4_t x0_v;
+  int32x4_t tmpa0_v;
+  int64x2_t tmpb_v;
+
+  tmpb_v = vdupq_n_s64(0);
+  const int16_t* x_start = x;
+  const int16_t* x_end0 = x_start + n;
+  while (x_start < x_end0) {
+    x0_v = vld1_s16(x_start);
+    tmpa0_v = vmull_s16(x0_v, x0_v);
+    tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
+    x_start += 4;
+  }
+
+#ifdef WEBRTC_ARCH_ARM64
+  prod = vaddvq_s64(tmpb_v);
+#else
+  prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)),
+                       0);
+#endif
+  // Calculate scaling (the value of shifting).
+  temp = (uint32_t)(prod >> 31);
+
+  scaling = temp ? 32 - WebRtcSpl_NormU32(temp) : 0;
+  r[0] = (int32_t)(prod >> scaling);
+
+  int16x8_t x1_v;
+  int16x8_t y_v;
+  int32x4_t tmpa1_v;
+  // Perform the actual correlation calculation.
+  for (i = 1; i < order + 1; i++) {
+    tmpb_v = vdupq_n_s64(0);
+    int rest = (n - i) % 8;
+    x_start = x;
+    x_end0 = x_start + n - i - rest;
+    const int16_t* y_start = x_start + i;
+    while (x_start < x_end0) {
+      x1_v = vld1q_s16(x_start);
+      y_v = vld1q_s16(y_start);
+      tmpa0_v = vmull_s16(vget_low_s16(x1_v), vget_low_s16(y_v));
+#ifdef WEBRTC_ARCH_ARM64
+      tmpa1_v = vmull_high_s16(x1_v, y_v);
+#else
+      tmpa1_v = vmull_s16(vget_high_s16(x1_v), vget_high_s16(y_v));
+#endif
+      tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
+      tmpb_v = vpadalq_s32(tmpb_v, tmpa1_v);
+      x_start += 8;
+      y_start += 8;
+    }
+    // The remaining calculation.
+    const int16_t* x_end1 = x + n - i;
+    if (rest >= 4) {
+        int16x4_t x2_v = vld1_s16(x_start);
+        int16x4_t y2_v = vld1_s16(y_start);
+        tmpa0_v = vmull_s16(x2_v, y2_v);
+        tmpb_v = vpadalq_s32(tmpb_v, tmpa0_v);
+        x_start += 4;
+        y_start += 4;
+    }
+#ifdef WEBRTC_ARCH_ARM64
+    prod = vaddvq_s64(tmpb_v);
+#else
+    prod = vget_lane_s64(vadd_s64(vget_low_s64(tmpb_v), vget_high_s64(tmpb_v)),
+                         0);
+#endif
+
+    prod_tail = 0;
+    while (x_start < x_end1) {
+      prod_tail += *x_start * *y_start;
+      ++x_start;
+      ++y_start;
+    }
+
+    r[i] = (int32_t)((prod + prod_tail) >> scaling);
+  }
+
+  *scale = scaling;
+
+  return order + 1;
+}
+
diff --git a/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc b/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc
new file mode 100644
index 0000000..fa52986
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/filters_unittest.cc
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+class FiltersTest : public testing::Test {
+ protected:
+  // Pass a function pointer to the Tester function.
+  void FiltersTester(AutocorrFix WebRtcIsacfix_AutocorrFixFunction) {
+    const int kOrder = 12;
+    const int kBuffer = 40;
+    int16_t scale = 0;
+    int32_t r_buffer[kOrder + 2] = {0};
+
+    // Test an overflow case.
+    const int16_t x_buffer_0[kBuffer] = {0, 0, 3010, 22351, 21106, 16969, -2095,
+        -664, 3513, -30980, 32767, -23839, 13335, 20289, -6831, 339, -17207,
+        32767, 4959, 6177, 32767, 16599, -4747, 20504, 3513, -30980, 32767,
+        -23839, 13335, 20289, 0, -16969, -2095, -664, 3513, 31981, 32767,
+        -13839, 23336, 30281};
+    const int32_t r_expected_0[kOrder + 2] = {1872498461, -224288754, 203789985,
+        483400487, -208272635, 2436500, 137785322, 266600814, -208486262,
+        329510080, 137949184, -161738972, -26894267, 237630192};
+
+    WebRtcIsacfix_AutocorrFixFunction(r_buffer, x_buffer_0,
+                                      kBuffer, kOrder + 1, &scale);
+    for (int i = 0; i < kOrder + 2; i++) {
+      EXPECT_EQ(r_expected_0[i], r_buffer[i]);
+    }
+    EXPECT_EQ(3, scale);
+
+    // Test a no-overflow case.
+    const int16_t x_buffer_1[kBuffer] = {0, 0, 300, 21, 206, 169, -295,
+        -664, 3513, -300, 327, -29, 15, 289, -6831, 339, -107,
+        37, 59, 6177, 327, 169, -4747, 204, 313, -980, 767,
+        -9, 135, 289, 0, -6969, -2095, -664, 0, 1, 7,
+        -39, 236, 281};
+    const int32_t r_expected_1[kOrder + 2] = {176253864, 8126617, 1983287,
+        -26196788, -3487363, -42839676, -24644043, 3469813, 30559879, 31905045,
+        5101567, 29328896, -55787438, -13163978};
+
+    WebRtcIsacfix_AutocorrFixFunction(r_buffer, x_buffer_1,
+                                      kBuffer, kOrder + 1, &scale);
+    for (int i = 0; i < kOrder + 2; i++) {
+      EXPECT_EQ(r_expected_1[i], r_buffer[i]);
+    }
+    EXPECT_EQ(0, scale);
+  }
+};
+
+TEST_F(FiltersTest, AutocorrFixTest) {
+  FiltersTester(WebRtcIsacfix_AutocorrC);
+#if defined(WEBRTC_HAS_NEON)
+  FiltersTester(WebRtcIsacfix_AutocorrNeon);
+#endif
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/initialize.c b/modules/audio_coding/codecs/isac/fix/source/initialize.c
new file mode 100644
index 0000000..1b82958
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/initialize.c
@@ -0,0 +1,173 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * initialize.c
+ *
+ * Internal initfunctions
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+
+void WebRtcIsacfix_InitMaskingEnc(MaskFiltstr_enc *maskdata) {
+
+  int k;
+
+  for (k = 0; k < WINLEN; k++) {
+    maskdata->DataBufferLoQ0[k] = (int16_t) 0;
+    maskdata->DataBufferHiQ0[k] = (int16_t) 0;
+  }
+  for (k = 0; k < ORDERLO+1; k++) {
+    maskdata->CorrBufLoQQ[k] = (int32_t) 0;
+    maskdata->CorrBufLoQdom[k] = 0;
+
+    maskdata->PreStateLoGQ15[k] = 0;
+
+  }
+  for (k = 0; k < ORDERHI+1; k++) {
+    maskdata->CorrBufHiQQ[k] = (int32_t) 0;
+    maskdata->CorrBufHiQdom[k] = 0;
+    maskdata->PreStateHiGQ15[k] = 0;
+  }
+
+  maskdata->OldEnergy = 10;
+
+  return;
+}
+
+void WebRtcIsacfix_InitMaskingDec(MaskFiltstr_dec *maskdata) {
+
+  int k;
+
+  for (k = 0; k < ORDERLO+1; k++)
+  {
+    maskdata->PostStateLoGQ0[k] = 0;
+  }
+  for (k = 0; k < ORDERHI+1; k++)
+  {
+    maskdata->PostStateHiGQ0[k] = 0;
+  }
+
+  maskdata->OldEnergy = 10;
+
+  return;
+}
+
+
+
+
+
+
+
+void WebRtcIsacfix_InitPreFilterbank(PreFiltBankstr *prefiltdata)
+{
+  int k;
+
+  for (k = 0; k < QLOOKAHEAD; k++) {
+    prefiltdata->INLABUF1_fix[k] = 0;
+    prefiltdata->INLABUF2_fix[k] = 0;
+  }
+  for (k = 0; k < 2 * (QORDER - 1); k++) {
+    prefiltdata->INSTAT1_fix[k] = 0;
+    prefiltdata->INSTAT2_fix[k] = 0;
+  }
+
+  /* High pass filter states */
+  prefiltdata->HPstates_fix[0] = 0;
+  prefiltdata->HPstates_fix[1] = 0;
+
+  return;
+}
+
+void WebRtcIsacfix_InitPostFilterbank(PostFiltBankstr *postfiltdata)
+{
+  int k;
+
+  for (k = 0; k < 2 * POSTQORDER; k++) {
+    postfiltdata->STATE_0_LOWER_fix[k] = 0;
+    postfiltdata->STATE_0_UPPER_fix[k] = 0;
+  }
+
+  /* High pass filter states */
+
+  postfiltdata->HPstates1_fix[0] = 0;
+  postfiltdata->HPstates1_fix[1] = 0;
+
+  postfiltdata->HPstates2_fix[0] = 0;
+  postfiltdata->HPstates2_fix[1] = 0;
+
+  return;
+}
+
+
+void WebRtcIsacfix_InitPitchFilter(PitchFiltstr *pitchfiltdata)
+{
+  int k;
+
+  for (k = 0; k < PITCH_BUFFSIZE; k++)
+    pitchfiltdata->ubufQQ[k] = 0;
+  for (k = 0; k < (PITCH_DAMPORDER); k++)
+    pitchfiltdata->ystateQQ[k] = 0;
+
+  pitchfiltdata->oldlagQ7 = 6400; /* 50.0 in Q7 */
+  pitchfiltdata->oldgainQ12 = 0;
+}
+
+void WebRtcIsacfix_InitPitchAnalysis(PitchAnalysisStruct *State)
+{
+  int k;
+
+  for (k = 0; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k++) {
+    State->dec_buffer16[k] = 0;
+  }
+  for (k = 0; k < 2 * ALLPASSSECTIONS + 1; k++) {
+    State->decimator_state32[k] = 0;
+  }
+
+  for (k = 0; k < QLOOKAHEAD; k++)
+    State->inbuf[k] = 0;
+
+  WebRtcIsacfix_InitPitchFilter(&(State->PFstr_wght));
+
+  WebRtcIsacfix_InitPitchFilter(&(State->PFstr));
+}
+
+
+void WebRtcIsacfix_InitPlc( PLCstr *State )
+{
+  State->decayCoeffPriodic = WEBRTC_SPL_WORD16_MAX;
+  State->decayCoeffNoise = WEBRTC_SPL_WORD16_MAX;
+
+  State->used = PLC_WAS_USED;
+
+  WebRtcSpl_ZerosArrayW16(State->overlapLP, RECOVERY_OVERLAP);
+  WebRtcSpl_ZerosArrayW16(State->lofilt_coefQ15, ORDERLO);
+  WebRtcSpl_ZerosArrayW16(State->hifilt_coefQ15, ORDERHI );
+
+  State->AvgPitchGain_Q12 = 0;
+  State->lastPitchGain_Q12 = 0;
+  State->lastPitchLag_Q7 = 0;
+  State->gain_lo_hiQ17[0]=State->gain_lo_hiQ17[1] = 0;
+  WebRtcSpl_ZerosArrayW16(State->prevPitchInvIn, FRAMESAMPLES/2);
+  WebRtcSpl_ZerosArrayW16(State->prevPitchInvOut, PITCH_MAX_LAG + 10 );
+  WebRtcSpl_ZerosArrayW32(State->prevHP, PITCH_MAX_LAG + 10 );
+  State->pitchCycles = 0;
+  State->A = 0;
+  State->B = 0;
+  State->pitchIndex = 0;
+  State->stretchLag = 240;
+  State->seed = 4447;
+
+
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h b/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h
new file mode 100644
index 0000000..5f0f822
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
+
+#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+class IsacFix {
+ public:
+  using instance_type = ISACFIX_MainStruct;
+  static const bool has_swb = false;
+  static inline int16_t Control(instance_type* inst,
+                                int32_t rate,
+                                int framesize) {
+    return WebRtcIsacfix_Control(inst, rate, framesize);
+  }
+  static inline int16_t ControlBwe(instance_type* inst,
+                                   int32_t rate_bps,
+                                   int frame_size_ms,
+                                   int16_t enforce_frame_size) {
+    return WebRtcIsacfix_ControlBwe(inst, rate_bps, frame_size_ms,
+                                    enforce_frame_size);
+  }
+  static inline int16_t Create(instance_type** inst) {
+    return WebRtcIsacfix_Create(inst);
+  }
+  static inline int DecodeInternal(instance_type* inst,
+                                   const uint8_t* encoded,
+                                   size_t len,
+                                   int16_t* decoded,
+                                   int16_t* speech_type) {
+    return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
+  }
+  static inline size_t DecodePlc(instance_type* inst,
+                                 int16_t* decoded,
+                                 size_t num_lost_frames) {
+    return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
+  }
+  static inline void DecoderInit(instance_type* inst) {
+    WebRtcIsacfix_DecoderInit(inst);
+  }
+  static inline int Encode(instance_type* inst,
+                           const int16_t* speech_in,
+                           uint8_t* encoded) {
+    return WebRtcIsacfix_Encode(inst, speech_in, encoded);
+  }
+  static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
+    return WebRtcIsacfix_EncoderInit(inst, coding_mode);
+  }
+  static inline uint16_t EncSampRate(instance_type* inst) {
+    return kFixSampleRate;
+  }
+
+  static inline int16_t Free(instance_type* inst) {
+    return WebRtcIsacfix_Free(inst);
+  }
+  static inline void GetBandwidthInfo(instance_type* inst,
+                                      IsacBandwidthInfo* bwinfo) {
+    WebRtcIsacfix_GetBandwidthInfo(inst, bwinfo);
+  }
+  static inline int16_t GetErrorCode(instance_type* inst) {
+    return WebRtcIsacfix_GetErrorCode(inst);
+  }
+
+  static inline int16_t GetNewFrameLen(instance_type* inst) {
+    return WebRtcIsacfix_GetNewFrameLen(inst);
+  }
+  static inline void SetBandwidthInfo(instance_type* inst,
+                                      const IsacBandwidthInfo* bwinfo) {
+    WebRtcIsacfix_SetBandwidthInfo(inst, bwinfo);
+  }
+  static inline int16_t SetDecSampRate(instance_type* inst,
+                                       uint16_t sample_rate_hz) {
+    RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+    return 0;
+  }
+  static inline int16_t SetEncSampRate(instance_type* inst,
+                                       uint16_t sample_rate_hz) {
+    RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+    return 0;
+  }
+  static inline void SetEncSampRateInDecoder(instance_type* inst,
+                                             uint16_t sample_rate_hz) {
+    RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+  }
+  static inline void SetInitialBweBottleneck(instance_type* inst,
+                                             int bottleneck_bits_per_second) {
+    WebRtcIsacfix_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
+  }
+  static inline int16_t UpdateBwEstimate(instance_type* inst,
+                                         const uint8_t* encoded,
+                                         size_t packet_size,
+                                         uint16_t rtp_seq_number,
+                                         uint32_t send_ts,
+                                         uint32_t arr_ts) {
+    return WebRtcIsacfix_UpdateBwEstimate(inst, encoded, packet_size,
+                                          rtp_seq_number, send_ts, arr_ts);
+  }
+  static inline int16_t SetMaxPayloadSize(instance_type* inst,
+                                          int16_t max_payload_size_bytes) {
+    return WebRtcIsacfix_SetMaxPayloadSize(inst, max_payload_size_bytes);
+  }
+  static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
+    return WebRtcIsacfix_SetMaxRate(inst, max_bit_rate);
+  }
+
+ private:
+  enum { kFixSampleRate = 16000 };
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
new file mode 100644
index 0000000..bbe9098
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -0,0 +1,1551 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * isacfix.c
+ *
+ * This C file contains the functions for the ISAC API
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/include/isacfix.h"
+
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+#include "modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+// Declare function pointers.
+FilterMaLoopFix WebRtcIsacfix_FilterMaLoopFix;
+Spec2Time WebRtcIsacfix_Spec2Time;
+Time2Spec WebRtcIsacfix_Time2Spec;
+MatrixProduct1 WebRtcIsacfix_MatrixProduct1;
+MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
+
+/* This method assumes that |stream_size_bytes| is in valid range,
+ * i.e. >= 0 && <=  STREAM_MAXW16_60MS
+ */
+static void InitializeDecoderBitstream(size_t stream_size_bytes,
+                                       Bitstr_dec* bitstream) {
+  bitstream->W_upper = 0xFFFFFFFF;
+  bitstream->streamval = 0;
+  bitstream->stream_index = 0;
+  bitstream->full = 1;
+  bitstream->stream_size = (stream_size_bytes + 1) >> 1;
+  memset(bitstream->stream, 0, sizeof(bitstream->stream));
+}
+
+/**************************************************************************
+ * WebRtcIsacfix_AssignSize(...)
+ *
+ * Functions used when malloc is not allowed
+ * Returns number of bytes needed to allocate for iSAC struct.
+ *
+ */
+
+int16_t WebRtcIsacfix_AssignSize(int *sizeinbytes) {
+  *sizeinbytes=sizeof(ISACFIX_SubStruct)*2/sizeof(int16_t);
+  return(0);
+}
+
+/***************************************************************************
+ * WebRtcIsacfix_Assign(...)
+ *
+ * Functions used when malloc is not allowed
+ * Place struct at given address
+ *
+ * If successful, Return 0, else Return -1
+ */
+
+int16_t WebRtcIsacfix_Assign(ISACFIX_MainStruct **inst, void *ISACFIX_inst_Addr) {
+  if (ISACFIX_inst_Addr!=NULL) {
+    ISACFIX_SubStruct* self = ISACFIX_inst_Addr;
+    *inst = (ISACFIX_MainStruct*)self;
+    self->errorcode = 0;
+    self->initflag = 0;
+    self->ISACenc_obj.SaveEnc_ptr = NULL;
+    WebRtcIsacfix_InitBandwidthEstimator(&self->bwestimator_obj);
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+
+#ifndef ISACFIX_NO_DYNAMIC_MEM
+
+/****************************************************************************
+ * WebRtcIsacfix_Create(...)
+ *
+ * This function creates a ISAC instance, which will contain the state
+ * information for one coding/decoding channel.
+ *
+ * Input:
+ *      - *ISAC_main_inst   : a pointer to the coder instance.
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Create(ISACFIX_MainStruct **ISAC_main_inst)
+{
+  ISACFIX_SubStruct *tempo;
+  tempo = malloc(1 * sizeof(ISACFIX_SubStruct));
+  *ISAC_main_inst = (ISACFIX_MainStruct *)tempo;
+  if (*ISAC_main_inst!=NULL) {
+    (*(ISACFIX_SubStruct**)ISAC_main_inst)->errorcode = 0;
+    (*(ISACFIX_SubStruct**)ISAC_main_inst)->initflag = 0;
+    (*(ISACFIX_SubStruct**)ISAC_main_inst)->ISACenc_obj.SaveEnc_ptr = NULL;
+    WebRtcSpl_Init();
+    WebRtcIsacfix_InitBandwidthEstimator(&tempo->bwestimator_obj);
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_CreateInternal(...)
+ *
+ * This function creates the memory that is used to store data in the encoder
+ *
+ * Input:
+ *      - *ISAC_main_inst   : a pointer to the coder instance.
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_CreateInternal(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Allocate memory for storing encoder data */
+  ISAC_inst->ISACenc_obj.SaveEnc_ptr = malloc(1 * sizeof(IsacSaveEncoderData));
+
+  if (ISAC_inst->ISACenc_obj.SaveEnc_ptr!=NULL) {
+    return(0);
+  } else {
+    return(-1);
+  }
+}
+
+
+#endif
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_Free(...)
+ *
+ * This function frees the ISAC instance created at the beginning.
+ *
+ * Input:
+ *      - ISAC_main_inst    : a ISAC instance.
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Free(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  free(ISAC_main_inst);
+  return(0);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_FreeInternal(...)
+ *
+ * This function frees the internal memory for storing encoder data.
+ *
+ * Input:
+ *       - ISAC_main_inst    : a ISAC instance.
+ *
+ * Return value              :  0 - Ok
+ *                             -1 - Error
+ */
+
+int16_t WebRtcIsacfix_FreeInternal(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Release memory */
+  free(ISAC_inst->ISACenc_obj.SaveEnc_ptr);
+
+  return(0);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_InitNeon(...)
+ *
+ * This function initializes function pointers for ARM Neon platform.
+ */
+
+#if defined(WEBRTC_HAS_NEON)
+static void WebRtcIsacfix_InitNeon(void) {
+  WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrNeon;
+  WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopNeon;
+  WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeNeon;
+  WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecNeon;
+  WebRtcIsacfix_AllpassFilter2FixDec16 =
+      WebRtcIsacfix_AllpassFilter2FixDec16Neon;
+  WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1Neon;
+  WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2Neon;
+}
+#endif
+
+/****************************************************************************
+ * WebRtcIsacfix_InitMIPS(...)
+ *
+ * This function initializes function pointers for MIPS platform.
+ */
+
+#if defined(MIPS32_LE)
+static void WebRtcIsacfix_InitMIPS(void) {
+  WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrMIPS;
+  WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopMIPS;
+  WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeMIPS;
+  WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecMIPS;
+  WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1MIPS;
+  WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2MIPS;
+#if defined(MIPS_DSP_R1_LE)
+  WebRtcIsacfix_AllpassFilter2FixDec16 =
+      WebRtcIsacfix_AllpassFilter2FixDec16MIPS;
+  WebRtcIsacfix_HighpassFilterFixDec32 =
+      WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#endif
+#if defined(MIPS_DSP_R2_LE)
+  WebRtcIsacfix_CalculateResidualEnergy =
+      WebRtcIsacfix_CalculateResidualEnergyMIPS;
+#endif
+}
+#endif
+
+static void InitFunctionPointers(void) {
+  WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrC;
+  WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopC;
+  WebRtcIsacfix_CalculateResidualEnergy =
+      WebRtcIsacfix_CalculateResidualEnergyC;
+  WebRtcIsacfix_AllpassFilter2FixDec16 = WebRtcIsacfix_AllpassFilter2FixDec16C;
+  WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+  WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecC;
+  WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeC;
+  WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1C;
+  WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C;
+
+#if defined(WEBRTC_HAS_NEON)
+  WebRtcIsacfix_InitNeon();
+#endif
+
+#if defined(MIPS32_LE)
+  WebRtcIsacfix_InitMIPS();
+#endif
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_EncoderInit(...)
+ *
+ * This function initializes a ISAC instance prior to the encoder calls.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - CodingMode        : 0 -> Bit rate and frame length are automatically
+ *                                 adjusted to available bandwidth on
+ *                                 transmission channel.
+ *                            1 -> User sets a frame length and a target bit
+ *                                 rate which is taken as the maximum short-term
+ *                                 average bit rate.
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
+                                  int16_t  CodingMode)
+{
+  int k;
+  int16_t statusInit;
+  ISACFIX_SubStruct *ISAC_inst;
+
+  statusInit = 0;
+  /* typecast pointer to rela structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* flag encoder init */
+  ISAC_inst->initflag |= 2;
+
+  if (CodingMode == 0)
+    /* Adaptive mode */
+    ISAC_inst->ISACenc_obj.new_framelength  = INITIAL_FRAMESAMPLES;
+  else if (CodingMode == 1)
+    /* Instantaneous mode */
+    ISAC_inst->ISACenc_obj.new_framelength = 480;    /* default for I-mode */
+  else {
+    ISAC_inst->errorcode = ISAC_DISALLOWED_CODING_MODE;
+    statusInit = -1;
+  }
+
+  ISAC_inst->CodingMode = CodingMode;
+
+  WebRtcIsacfix_InitMaskingEnc(&ISAC_inst->ISACenc_obj.maskfiltstr_obj);
+  WebRtcIsacfix_InitPreFilterbank(&ISAC_inst->ISACenc_obj.prefiltbankstr_obj);
+  WebRtcIsacfix_InitPitchFilter(&ISAC_inst->ISACenc_obj.pitchfiltstr_obj);
+  WebRtcIsacfix_InitPitchAnalysis(&ISAC_inst->ISACenc_obj.pitchanalysisstr_obj);
+
+  WebRtcIsacfix_InitRateModel(&ISAC_inst->ISACenc_obj.rate_data_obj);
+
+
+  ISAC_inst->ISACenc_obj.buffer_index   = 0;
+  ISAC_inst->ISACenc_obj.frame_nb    = 0;
+  ISAC_inst->ISACenc_obj.BottleNeck      = 32000; /* default for I-mode */
+  ISAC_inst->ISACenc_obj.MaxDelay    = 10;    /* default for I-mode */
+  ISAC_inst->ISACenc_obj.current_framesamples = 0;
+  ISAC_inst->ISACenc_obj.s2nr     = 0;
+  ISAC_inst->ISACenc_obj.MaxBits    = 0;
+  ISAC_inst->ISACenc_obj.bitstr_seed   = 4447;
+  ISAC_inst->ISACenc_obj.payloadLimitBytes30  = STREAM_MAXW16_30MS << 1;
+  ISAC_inst->ISACenc_obj.payloadLimitBytes60  = STREAM_MAXW16_60MS << 1;
+  ISAC_inst->ISACenc_obj.maxPayloadBytes      = STREAM_MAXW16_60MS << 1;
+  ISAC_inst->ISACenc_obj.maxRateInBytes       = STREAM_MAXW16_30MS << 1;
+  ISAC_inst->ISACenc_obj.enforceFrameSize     = 0;
+
+  /* Init the bistream data area to zero */
+  for (k=0; k<STREAM_MAXW16_60MS; k++){
+    ISAC_inst->ISACenc_obj.bitstr_obj.stream[k] = 0;
+  }
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  WebRtcIsacfix_InitPostFilterbank(&ISAC_inst->ISACenc_obj.interpolatorstr_obj);
+#endif
+
+  InitFunctionPointers();
+
+  return statusInit;
+}
+
+/* Read the given number of bytes of big-endian 16-bit integers from |src| and
+   write them to |dest| in host endian. If |nbytes| is odd, the number of
+   output elements is rounded up, and the least significant byte of the last
+   element is set to 0. */
+static void read_be16(const uint8_t* src, size_t nbytes, uint16_t* dest) {
+  size_t i;
+  for (i = 0; i < nbytes / 2; ++i)
+    dest[i] = src[2 * i] << 8 | src[2 * i + 1];
+  if (nbytes % 2 == 1)
+    dest[nbytes / 2] = src[nbytes - 1] << 8;
+}
+
+/* Read the given number of bytes of host-endian 16-bit integers from |src| and
+   write them to |dest| in big endian. If |nbytes| is odd, the number of source
+   elements is rounded up (but only the most significant byte of the last
+   element is used), and the number of output bytes written will be
+   nbytes + 1. */
+static void write_be16(const uint16_t* src, size_t nbytes, uint8_t* dest) {
+  size_t i;
+  for (i = 0; i < nbytes / 2; ++i) {
+    dest[2 * i] = src[i] >> 8;
+    dest[2 * i + 1] = src[i];
+  }
+  if (nbytes % 2 == 1) {
+    dest[nbytes - 1] = src[nbytes / 2] >> 8;
+    dest[nbytes] = 0;
+  }
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_Encode(...)
+ *
+ * This function encodes 10ms frame(s) and inserts it into a package.
+ * Input speech length has to be 160 samples (10ms). The encoder buffers those
+ * 10ms frames until it reaches the chosen Framesize (480 or 960 samples
+ * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - speechIn          : input speech vector.
+ *
+ * Output:
+ *      - encoded           : the encoded data vector
+ *
+ * Return value:
+ *                          : >0 - Length (in bytes) of coded data
+ *                          :  0 - The buffer didn't reach the chosen framesize
+ *                            so it keeps buffering speech samples.
+ *                          : -1 - Error
+ */
+
+int WebRtcIsacfix_Encode(ISACFIX_MainStruct *ISAC_main_inst,
+                         const int16_t    *speechIn,
+                         uint8_t* encoded)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  int stream_len;
+
+  /* typecast pointer to rela structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+
+  /* check if encoder initiated */
+  if ((ISAC_inst->initflag & 2) != 2) {
+    ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  stream_len = WebRtcIsacfix_EncodeImpl((int16_t*)speechIn,
+                                        &ISAC_inst->ISACenc_obj,
+                                        &ISAC_inst->bwestimator_obj,
+                                        ISAC_inst->CodingMode);
+  if (stream_len<0) {
+    ISAC_inst->errorcode = -(int16_t)stream_len;
+    return -1;
+  }
+
+  write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, (size_t)stream_len,
+             encoded);
+  return stream_len;
+
+}
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_EncodeNb(...)
+ *
+ * This function encodes 10ms narrow band (8 kHz sampling) frame(s) and inserts
+ * it into a package. Input speech length has to be 80 samples (10ms). The encoder
+ * interpolates into wide-band (16 kHz sampling) buffers those
+ * 10ms frames until it reaches the chosen Framesize (480 or 960 wide-band samples
+ * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+ *
+ * The function is enabled if WEBRTC_ISAC_FIX_NB_CALLS_ENABLED is defined
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - speechIn          : input speech vector.
+ *
+ * Output:
+ *      - encoded           : the encoded data vector
+ *
+ * Return value:
+ *                          : >0 - Length (in bytes) of coded data
+ *                          :  0 - The buffer didn't reach the chosen framesize
+ *                            so it keeps buffering speech samples.
+ *                          : -1 - Error
+ */
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+int16_t WebRtcIsacfix_EncodeNb(ISACFIX_MainStruct *ISAC_main_inst,
+                               const int16_t    *speechIn,
+                               int16_t          *encoded)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  int16_t stream_len;
+  int16_t speechInWB[FRAMESAMPLES_10ms];
+  int16_t Vector_Word16_1[FRAMESAMPLES_10ms/2];
+  int16_t Vector_Word16_2[FRAMESAMPLES_10ms/2];
+
+  int k;
+
+
+  /* typecast pointer to rela structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+
+  /* check if encoder initiated */
+  if ((ISAC_inst->initflag & 2) != 2) {
+    ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+    return (-1);
+  }
+
+
+  /* Oversample to WB */
+
+  /* Form polyphase signals, and compensate for DC offset */
+  for (k=0;k<FRAMESAMPLES_10ms/2;k++) {
+    Vector_Word16_1[k] = speechIn[k] + 1;
+    Vector_Word16_2[k] = speechIn[k];
+  }
+  WebRtcIsacfix_FilterAndCombine2(Vector_Word16_1, Vector_Word16_2, speechInWB, &ISAC_inst->ISACenc_obj.interpolatorstr_obj, FRAMESAMPLES_10ms);
+
+
+  /* Encode WB signal */
+  stream_len = WebRtcIsacfix_EncodeImpl((int16_t*)speechInWB,
+                                        &ISAC_inst->ISACenc_obj,
+                                        &ISAC_inst->bwestimator_obj,
+                                        ISAC_inst->CodingMode);
+  if (stream_len<0) {
+    ISAC_inst->errorcode = - stream_len;
+    return -1;
+  }
+
+  write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream,
+             stream_len,
+             (uint8_t*)encoded);
+  return stream_len;
+}
+#endif  /* WEBRTC_ISAC_FIX_NB_CALLS_ENABLED */
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetNewBitStream(...)
+ *
+ * This function returns encoded data, with the recieved bwe-index in the
+ * stream. It should always return a complete packet, i.e. only called once
+ * even for 60 msec frames
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - bweIndex          : index of bandwidth estimate to put in new bitstream
+ *
+ * Output:
+ *      - encoded           : the encoded data vector
+ *
+ * Return value:
+ *                          : >0 - Length (in bytes) of coded data
+ *                          : -1 - Error
+ */
+
+int16_t WebRtcIsacfix_GetNewBitStream(ISACFIX_MainStruct *ISAC_main_inst,
+                                      int16_t      bweIndex,
+                                      float              scale,
+                                      uint8_t* encoded)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  int16_t stream_len;
+
+  /* typecast pointer to rela structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+
+  /* check if encoder initiated */
+  if ((ISAC_inst->initflag & 2) != 2) {
+    ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  stream_len = WebRtcIsacfix_EncodeStoredData(&ISAC_inst->ISACenc_obj,
+                                              bweIndex,
+                                              scale);
+  if (stream_len<0) {
+    ISAC_inst->errorcode = - stream_len;
+    return -1;
+  }
+
+  write_be16(ISAC_inst->ISACenc_obj.bitstr_obj.stream, stream_len, encoded);
+  return stream_len;
+}
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecoderInit(...)
+ *
+ * This function initializes a ISAC instance prior to the decoder calls.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ */
+
+void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+
+  InitFunctionPointers();
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* flag decoder init */
+  ISAC_inst->initflag |= 1;
+
+  WebRtcIsacfix_InitMaskingDec(&ISAC_inst->ISACdec_obj.maskfiltstr_obj);
+  WebRtcIsacfix_InitPostFilterbank(&ISAC_inst->ISACdec_obj.postfiltbankstr_obj);
+  WebRtcIsacfix_InitPitchFilter(&ISAC_inst->ISACdec_obj.pitchfiltstr_obj);
+
+  /* TS */
+  WebRtcIsacfix_InitPlc( &ISAC_inst->ISACdec_obj.plcstr_obj );
+
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  WebRtcIsacfix_InitPreFilterbank(&ISAC_inst->ISACdec_obj.decimatorstr_obj);
+#endif
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateBwEstimate1(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - encoded           : encoded ISAC frame(s).
+ *      - packet_size       : size of the packet.
+ *      - rtp_seq_number    : the RTP number of the packet.
+ *      - arr_ts            : the arrival time of the packet (from NetEq)
+ *                            in samples.
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
+                                        const uint8_t* encoded,
+                                        size_t packet_size,
+                                        uint16_t rtp_seq_number,
+                                        uint32_t arr_ts)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  Bitstr_dec streamdata;
+  int16_t err;
+  const size_t kRequiredEncodedLenBytes = 10;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Sanity check of packet length */
+  if (packet_size == 0) {
+    /* return error code if the packet length is null or less */
+    ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+    return -1;
+  } else if (packet_size > (STREAM_MAXW16<<1)) {
+    /* return error code if length of stream is too long */
+    ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+    return -1;
+  }
+
+  /* check if decoder initiated */
+  if ((ISAC_inst->initflag & 1) != 1) {
+    ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  InitializeDecoderBitstream(packet_size, &streamdata);
+
+  read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+  err = WebRtcIsacfix_EstimateBandwidth(&ISAC_inst->bwestimator_obj,
+                                        &streamdata,
+                                        packet_size,
+                                        rtp_seq_number,
+                                        0,
+                                        arr_ts);
+
+
+  if (err < 0)
+  {
+    /* return error code if something went wrong */
+    ISAC_inst->errorcode = -err;
+    return -1;
+  }
+
+
+  return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateBwEstimate(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - encoded           : encoded ISAC frame(s).
+ *      - packet_size       : size of the packet.
+ *      - rtp_seq_number    : the RTP number of the packet.
+ *      - send_ts           : Send Time Stamp from RTP header
+ *      - arr_ts            : the arrival time of the packet (from NetEq)
+ *                            in samples.
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
+                                       const uint8_t* encoded,
+                                       size_t packet_size,
+                                       uint16_t rtp_seq_number,
+                                       uint32_t send_ts,
+                                       uint32_t arr_ts)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  Bitstr_dec streamdata;
+  int16_t err;
+  const size_t kRequiredEncodedLenBytes = 10;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Sanity check of packet length */
+  if (packet_size == 0) {
+    /* return error code if the packet length is null  or less */
+    ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+    return -1;
+  } else if (packet_size < kRequiredEncodedLenBytes) {
+    ISAC_inst->errorcode = ISAC_PACKET_TOO_SHORT;
+    return -1;
+  } else if (packet_size > (STREAM_MAXW16<<1)) {
+    /* return error code if length of stream is too long */
+    ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+    return -1;
+  }
+
+  /* check if decoder initiated */
+  if ((ISAC_inst->initflag & 1) != 1) {
+    ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  InitializeDecoderBitstream(packet_size, &streamdata);
+
+  read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+  err = WebRtcIsacfix_EstimateBandwidth(&ISAC_inst->bwestimator_obj,
+                                        &streamdata,
+                                        packet_size,
+                                        rtp_seq_number,
+                                        send_ts,
+                                        arr_ts);
+
+  if (err < 0)
+  {
+    /* return error code if something went wrong */
+    ISAC_inst->errorcode = -err;
+    return -1;
+  }
+
+
+  return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_Decode(...)
+ *
+ * This function decodes a ISAC frame. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - encoded           : encoded ISAC frame(s)
+ *      - len               : bytes in encoded vector
+ *
+ * Output:
+ *      - decoded           : The decoded vector
+ *
+ * Return value             : >0 - number of samples in decoded vector
+ *                            -1 - Error
+ */
+
+
+int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
+                         const uint8_t* encoded,
+                         size_t len,
+                         int16_t* decoded,
+                         int16_t* speechType)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  /* number of samples (480 or 960), output from decoder */
+  /* that were actually used in the encoder/decoder (determined on the fly) */
+  size_t number_of_samples;
+  int declen_int = 0;
+  size_t declen;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* check if decoder initiated */
+  if ((ISAC_inst->initflag & 1) != 1) {
+    ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  /* Sanity check of packet length */
+  if (len == 0) {
+    /* return error code if the packet length is null  or less */
+    ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+    return -1;
+  } else if (len > (STREAM_MAXW16<<1)) {
+    /* return error code if length of stream is too long */
+    ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+    return -1;
+  }
+
+  InitializeDecoderBitstream(len, &ISAC_inst->ISACdec_obj.bitstr_obj);
+
+  read_be16(encoded, len, ISAC_inst->ISACdec_obj.bitstr_obj.stream);
+
+  /* added for NetEq purposes (VAD/DTX related) */
+  *speechType=1;
+
+  declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+                                        &number_of_samples);
+  if (declen_int < 0) {
+    /* Some error inside the decoder */
+    ISAC_inst->errorcode = -(int16_t)declen_int;
+    memset(decoded, 0, sizeof(int16_t) * MAX_FRAMESAMPLES);
+    return -1;
+  }
+  declen = (size_t)declen_int;
+
+  /* error check */
+
+  if (declen & 1) {
+    if (len != declen &&
+        len != declen +
+            ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) {
+      ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+      memset(decoded, 0, sizeof(int16_t) * number_of_samples);
+      return -1;
+    }
+  } else {
+    if (len != declen &&
+        len != declen +
+            ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) >> 8)) {
+      ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+      memset(decoded, 0, sizeof(int16_t) * number_of_samples);
+      return -1;
+    }
+  }
+
+  return (int)number_of_samples;
+}
+
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecodeNb(...)
+ *
+ * This function decodes a ISAC frame in narrow-band (8 kHz sampling).
+ * Output speech length will be a multiple of 240 samples: 240 or 480 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * The function is enabled if WEBRTC_ISAC_FIX_NB_CALLS_ENABLED is defined
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - encoded           : encoded ISAC frame(s)
+ *      - len               : bytes in encoded vector
+ *
+ * Output:
+ *      - decoded           : The decoded vector
+ *
+ * Return value             : >0 - number of samples in decoded vector
+ *                            -1 - Error
+ */
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct* ISAC_main_inst,
+                           const uint16_t* encoded,
+                           size_t len,
+                           int16_t* decoded,
+                           int16_t* speechType)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  /* twice the number of samples (480 or 960), output from decoder */
+  /* that were actually used in the encoder/decoder (determined on the fly) */
+  size_t number_of_samples;
+  int declen_int = 0;
+  size_t declen;
+  int16_t dummy[FRAMESAMPLES/2];
+
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* check if decoder initiated */
+  if ((ISAC_inst->initflag & 1) != 1) {
+    ISAC_inst->errorcode = ISAC_DECODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  if (len == 0) {
+    /* return error code if the packet length is null  or less */
+    ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
+    return -1;
+  } else if (len > (STREAM_MAXW16<<1)) {
+    /* return error code if length of stream is too long */
+    ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+    return -1;
+  }
+
+  InitializeDecoderBitstream(len, &ISAC_inst->ISACdec_obj.bitstr_obj);
+
+  read_be16(encoded, len, ISAC_inst->ISACdec_obj.bitstr_obj.stream);
+
+  /* added for NetEq purposes (VAD/DTX related) */
+  *speechType=1;
+
+  declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+                                        &number_of_samples);
+  if (declen_int < 0) {
+    /* Some error inside the decoder */
+    ISAC_inst->errorcode = -(int16_t)declen_int;
+    memset(decoded, 0, sizeof(int16_t) * FRAMESAMPLES);
+    return -1;
+  }
+  declen = (size_t)declen_int;
+
+  /* error check */
+
+  if (declen & 1) {
+    if (len != declen &&
+        len != declen +
+            ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) {
+      ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+      memset(decoded, 0, sizeof(int16_t) * number_of_samples);
+      return -1;
+    }
+  } else {
+    if (len != declen &&
+        len != declen +
+            ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >>1]) >> 8)) {
+      ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
+      memset(decoded, 0, sizeof(int16_t) * number_of_samples);
+      return -1;
+    }
+  }
+
+  WebRtcIsacfix_SplitAndFilter2(decoded, decoded, dummy, &ISAC_inst->ISACdec_obj.decimatorstr_obj);
+
+  if (number_of_samples>FRAMESAMPLES) {
+    WebRtcIsacfix_SplitAndFilter2(decoded + FRAMESAMPLES, decoded + FRAMESAMPLES/2,
+                                  dummy, &ISAC_inst->ISACdec_obj.decimatorstr_obj);
+  }
+
+  return (int)(number_of_samples / 2);
+}
+#endif /* WEBRTC_ISAC_FIX_NB_CALLS_ENABLED */
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecodePlcNb(...)
+ *
+ * This function conducts PLC for ISAC frame(s) in narrow-band (8kHz sampling).
+ * Output speech length  will be "240*noOfLostFrames" samples
+ * that is equevalent of "30*noOfLostFrames" millisecond.
+ *
+ * The function is enabled if WEBRTC_ISAC_FIX_NB_CALLS_ENABLED is defined
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - noOfLostFrames    : Number of PLC frames (240 sample=30ms) to produce
+ *
+ * Output:
+ *      - decoded           : The decoded vector
+ *
+ * Return value             : Number of samples in decoded PLC vector
+ */
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct* ISAC_main_inst,
+                                 int16_t* decoded,
+                                 size_t noOfLostFrames )
+{
+  size_t no_of_samples, declen, k;
+  int16_t outframeNB[FRAMESAMPLES];
+  int16_t outframeWB[FRAMESAMPLES];
+  int16_t dummy[FRAMESAMPLES/2];
+
+
+  ISACFIX_SubStruct *ISAC_inst;
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Limit number of frames to two = 60 msec. Otherwise we exceed data vectors */
+  if (noOfLostFrames > 2){
+    noOfLostFrames = 2;
+  }
+
+  k = 0;
+  declen = 0;
+  while( noOfLostFrames > 0 )
+  {
+    WebRtcIsacfix_DecodePlcImpl(outframeWB, &ISAC_inst->ISACdec_obj,
+                                &no_of_samples);
+
+    WebRtcIsacfix_SplitAndFilter2(outframeWB, &(outframeNB[k*240]), dummy, &ISAC_inst->ISACdec_obj.decimatorstr_obj);
+
+    declen += no_of_samples;
+    noOfLostFrames--;
+    k++;
+  }
+
+  declen>>=1;
+
+  for (k=0;k<declen;k++) {
+    decoded[k] = outframeNB[k];
+  }
+
+  return declen;
+}
+#endif /* WEBRTC_ISAC_FIX_NB_CALLS_ENABLED */
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_DecodePlc(...)
+ *
+ * This function conducts PLC for ISAC frame(s) in wide-band (16kHz sampling).
+ * Output speech length  will be "480*noOfLostFrames" samples
+ * that is equevalent of "30*noOfLostFrames" millisecond.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - noOfLostFrames    : Number of PLC frames (480sample = 30ms)
+ *                                to produce
+ *
+ * Output:
+ *      - decoded           : The decoded vector
+ *
+ * Return value             : Number of samples in decoded PLC vector
+ */
+
+size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
+                               int16_t* decoded,
+                               size_t noOfLostFrames)
+{
+
+  size_t no_of_samples, declen, k;
+  int16_t outframe16[MAX_FRAMESAMPLES];
+
+  ISACFIX_SubStruct *ISAC_inst;
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Limit number of frames to two = 60 msec. Otherwise we exceed data vectors */
+  if (noOfLostFrames > 2) {
+    noOfLostFrames = 2;
+  }
+  k = 0;
+  declen = 0;
+  while( noOfLostFrames > 0 )
+  {
+    WebRtcIsacfix_DecodePlcImpl(&(outframe16[k*480]), &ISAC_inst->ISACdec_obj,
+                                &no_of_samples);
+    declen += no_of_samples;
+    noOfLostFrames--;
+    k++;
+  }
+
+  for (k=0;k<declen;k++) {
+    decoded[k] = outframe16[k];
+  }
+
+  return declen;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_Control(...)
+ *
+ * This function sets the limit on the short-term average bit rate and the
+ * frame length. Should be used only in Instantaneous mode.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance.
+ *      - rate              : limit on the short-term average bit rate,
+ *                            in bits/second (between 10000 and 32000)
+ *      - framesize         : number of milliseconds per frame (30 or 60)
+ *
+ * Return value             : 0  - ok
+ *                            -1 - Error
+ */
+
+int16_t WebRtcIsacfix_Control(ISACFIX_MainStruct *ISAC_main_inst,
+                              int16_t rate,
+                              int framesize)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  if (ISAC_inst->CodingMode == 0)
+  {
+    /* in adaptive mode */
+    ISAC_inst->errorcode = ISAC_MODE_MISMATCH;
+    return -1;
+  }
+
+
+  if (rate >= 10000 && rate <= 32000)
+    ISAC_inst->ISACenc_obj.BottleNeck = rate;
+  else {
+    ISAC_inst->errorcode = ISAC_DISALLOWED_BOTTLENECK;
+    return -1;
+  }
+
+
+
+  if (framesize  == 30 || framesize == 60)
+    ISAC_inst->ISACenc_obj.new_framelength = (int16_t)((FS/1000) * framesize);
+  else {
+    ISAC_inst->errorcode = ISAC_DISALLOWED_FRAME_LENGTH;
+    return -1;
+  }
+
+  return 0;
+}
+
+void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
+                                           int bottleneck_bits_per_second) {
+  ISACFIX_SubStruct* inst = (ISACFIX_SubStruct*)ISAC_main_inst;
+  RTC_DCHECK_GE(bottleneck_bits_per_second, 10000);
+  RTC_DCHECK_LE(bottleneck_bits_per_second, 32000);
+  inst->bwestimator_obj.sendBwAvg = ((uint32_t)bottleneck_bits_per_second) << 7;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_ControlBwe(...)
+ *
+ * This function sets the initial values of bottleneck and frame-size if
+ * iSAC is used in channel-adaptive mode. Through this API, users can
+ * enforce a frame-size for all values of bottleneck. Then iSAC will not
+ * automatically change the frame-size.
+ *
+ *
+ * Input:
+ *  - ISAC_main_inst : ISAC instance.
+ *      - rateBPS           : initial value of bottleneck in bits/second
+ *                            10000 <= rateBPS <= 32000 is accepted
+ *                            For default bottleneck set rateBPS = 0
+ *      - frameSizeMs       : number of milliseconds per frame (30 or 60)
+ *      - enforceFrameSize  : 1 to enforce the given frame-size through out
+ *                            the adaptation process, 0 to let iSAC change
+ *                            the frame-size if required.
+ *
+ * Return value    : 0  - ok
+ *         -1 - Error
+ */
+
+int16_t WebRtcIsacfix_ControlBwe(ISACFIX_MainStruct *ISAC_main_inst,
+                                 int16_t rateBPS,
+                                 int frameSizeMs,
+                                 int16_t enforceFrameSize)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  /* Typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* check if encoder initiated */
+  if ((ISAC_inst->initflag & 2) != 2) {
+    ISAC_inst->errorcode = ISAC_ENCODER_NOT_INITIATED;
+    return (-1);
+  }
+
+  /* Check that we are in channel-adaptive mode, otherwise, return -1 */
+  if (ISAC_inst->CodingMode != 0) {
+    ISAC_inst->errorcode = ISAC_MODE_MISMATCH;
+    return (-1);
+  }
+
+  /* Set struct variable if enforceFrameSize is set. ISAC will then keep the */
+  /* chosen frame size.                                                      */
+  ISAC_inst->ISACenc_obj.enforceFrameSize = (enforceFrameSize != 0)? 1:0;
+
+  /* Set initial rate, if value between 10000 and 32000,                */
+  /* if rateBPS is 0, keep the default initial bottleneck value (15000) */
+  if ((rateBPS >= 10000) && (rateBPS <= 32000)) {
+    ISAC_inst->bwestimator_obj.sendBwAvg = (((uint32_t)rateBPS) << 7);
+  } else if (rateBPS != 0) {
+    ISAC_inst->errorcode = ISAC_DISALLOWED_BOTTLENECK;
+    return -1;
+  }
+
+  /* Set initial framesize. If enforceFrameSize is set the frame size will not change */
+  if ((frameSizeMs  == 30) || (frameSizeMs == 60)) {
+    ISAC_inst->ISACenc_obj.new_framelength = (int16_t)((FS/1000) * frameSizeMs);
+  } else {
+    ISAC_inst->errorcode = ISAC_DISALLOWED_FRAME_LENGTH;
+    return -1;
+  }
+
+  return 0;
+}
+
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetDownLinkBwIndex(...)
+ *
+ * This function returns index representing the Bandwidth estimate from
+ * other side to this side.
+ *
+ * Input:
+ *      - ISAC_main_inst: iSAC struct
+ *
+ * Output:
+ *      - rateIndex     : Bandwidth estimate to transmit to other side.
+ *
+ */
+
+int16_t WebRtcIsacfix_GetDownLinkBwIndex(ISACFIX_MainStruct* ISAC_main_inst,
+                                         int16_t*     rateIndex)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Call function to get Bandwidth Estimate */
+  *rateIndex = WebRtcIsacfix_GetDownlinkBwIndexImpl(&ISAC_inst->bwestimator_obj);
+
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_UpdateUplinkBw(...)
+ *
+ * This function takes an index representing the Bandwidth estimate from
+ * this side to other side and updates BWE.
+ *
+ * Input:
+ *      - ISAC_main_inst: iSAC struct
+ *      - rateIndex     : Bandwidth estimate from other side.
+ *
+ */
+
+int16_t WebRtcIsacfix_UpdateUplinkBw(ISACFIX_MainStruct* ISAC_main_inst,
+                                     int16_t     rateIndex)
+{
+  int16_t err = 0;
+  ISACFIX_SubStruct *ISAC_inst;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  /* Call function to update BWE with received Bandwidth Estimate */
+  err = WebRtcIsacfix_UpdateUplinkBwRec(&ISAC_inst->bwestimator_obj, rateIndex);
+  if (err < 0) {
+    ISAC_inst->errorcode = -err;
+    return (-1);
+  }
+
+  return 0;
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_ReadFrameLen(...)
+ *
+ * This function returns the length of the frame represented in the packet.
+ *
+ * Input:
+ *      - encoded       : Encoded bitstream
+ *
+ * Output:
+ *      - frameLength   : Length of frame in packet (in samples)
+ *
+ */
+
+int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
+                                   size_t encoded_len_bytes,
+                                   size_t* frameLength)
+{
+  Bitstr_dec streamdata;
+  int16_t err;
+  const size_t kRequiredEncodedLenBytes = 10;
+
+  if (encoded_len_bytes < kRequiredEncodedLenBytes) {
+    return -1;
+  }
+
+  InitializeDecoderBitstream(encoded_len_bytes, &streamdata);
+
+  read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+  /* decode frame length */
+  err = WebRtcIsacfix_DecodeFrameLen(&streamdata, frameLength);
+  if (err<0)  // error check
+    return err;
+
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_ReadBwIndex(...)
+ *
+ * This function returns the index of the Bandwidth estimate from the bitstream.
+ *
+ * Input:
+ *      - encoded       : Encoded bitstream
+ *
+ * Output:
+ *      - frameLength   : Length of frame in packet (in samples)
+ *      - rateIndex     : Bandwidth estimate in bitstream
+ *
+ */
+
+int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
+                                  size_t encoded_len_bytes,
+                                  int16_t* rateIndex)
+{
+  Bitstr_dec streamdata;
+  int16_t err;
+  const size_t kRequiredEncodedLenBytes = 10;
+
+  if (encoded_len_bytes < kRequiredEncodedLenBytes) {
+    return -1;
+  }
+
+  InitializeDecoderBitstream(encoded_len_bytes, &streamdata);
+
+  read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
+
+  /* decode frame length, needed to get to the rateIndex in the bitstream */
+  size_t frameLength;
+  err = WebRtcIsacfix_DecodeFrameLen(&streamdata, &frameLength);
+  if (err<0)  // error check
+    return err;
+
+  /* decode BW estimation */
+  err = WebRtcIsacfix_DecodeSendBandwidth(&streamdata, rateIndex);
+  if (err<0)  // error check
+    return err;
+
+  return 0;
+}
+
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetErrorCode(...)
+ *
+ * This function can be used to check the error code of an iSAC instance. When
+ * a function returns -1 a error code will be set for that instance. The
+ * function below extract the code of the last error that occured in the
+ * specified instance.
+ *
+ * Input:
+ *      - ISAC_main_inst    : ISAC instance
+ *
+ * Return value             : Error code
+ */
+
+int16_t WebRtcIsacfix_GetErrorCode(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  return ISAC_inst->errorcode;
+}
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_GetUplinkBw(...)
+ *
+ * This function returns the inst quantized iSAC send bitrate
+ *
+ * Input:
+ *      - ISAC_main_inst    : iSAC instance
+ *
+ * Return value             : bitrate
+ */
+
+int32_t WebRtcIsacfix_GetUplinkBw(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  ISACFIX_SubStruct *ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+  BwEstimatorstr * bw = (BwEstimatorstr*)&(ISAC_inst->bwestimator_obj);
+
+  return (int32_t) WebRtcIsacfix_GetUplinkBandwidth(bw);
+}
+
+/****************************************************************************
+ * WebRtcIsacfix_GetNewFrameLen(...)
+ *
+ * This function return the next frame length (in samples) of iSAC.
+ *
+ * Input:
+ *      - ISAC_main_inst    : iSAC instance
+ *
+ * Return value             :  frame lenght in samples
+ */
+
+int16_t WebRtcIsacfix_GetNewFrameLen(ISACFIX_MainStruct *ISAC_main_inst)
+{
+  ISACFIX_SubStruct *ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+  return ISAC_inst->ISACenc_obj.new_framelength;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_SetMaxPayloadSize(...)
+ *
+ * This function sets a limit for the maximum payload size of iSAC. The same
+ * value is used both for 30 and 60 msec packets.
+ * The absolute max will be valid until next time the function is called.
+ * NOTE! This function may override the function WebRtcIsacfix_SetMaxRate()
+ *
+ * Input:
+ *      - ISAC_main_inst    : iSAC instance
+ *      - maxPayloadBytes   : maximum size of the payload in bytes
+ *                            valid values are between 100 and 400 bytes
+ *
+ *
+ * Return value             : 0 if sucessful
+ *                           -1 if error happens
+ */
+
+int16_t WebRtcIsacfix_SetMaxPayloadSize(ISACFIX_MainStruct *ISAC_main_inst,
+                                        int16_t maxPayloadBytes)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  if((maxPayloadBytes < 100) || (maxPayloadBytes > 400))
+  {
+    /* maxPayloadBytes is out of valid range */
+    return -1;
+  }
+  else
+  {
+    /* Set new absolute max, which will not change unless this function
+       is called again with a new value */
+    ISAC_inst->ISACenc_obj.maxPayloadBytes = maxPayloadBytes;
+
+    /* Set new maximum values for 30 and 60 msec packets */
+    if (maxPayloadBytes < ISAC_inst->ISACenc_obj.maxRateInBytes) {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes30 = maxPayloadBytes;
+    } else {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes30 = ISAC_inst->ISACenc_obj.maxRateInBytes;
+    }
+
+    if ( maxPayloadBytes < (ISAC_inst->ISACenc_obj.maxRateInBytes << 1)) {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes60 = maxPayloadBytes;
+    } else {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes60 = (ISAC_inst->ISACenc_obj.maxRateInBytes << 1);
+    }
+  }
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsacfix_SetMaxRate(...)
+ *
+ * This function sets the maximum rate which the codec may not exceed for a
+ * singel packet. The maximum rate is set in bits per second.
+ * The codec has an absolute maximum rate of 53400 bits per second (200 bytes
+ * per 30 msec).
+ * It is possible to set a maximum rate between 32000 and 53400 bits per second.
+ *
+ * The rate limit is valid until next time the function is called.
+ *
+ * NOTE! Packet size will never go above the value set if calling
+ * WebRtcIsacfix_SetMaxPayloadSize() (default max packet size is 400 bytes).
+ *
+ * Input:
+ *      - ISAC_main_inst    : iSAC instance
+ *      - maxRateInBytes    : maximum rate in bits per second,
+ *                            valid values are 32000 to 53400 bits
+ *
+ * Return value             : 0 if sucessful
+ *                           -1 if error happens
+ */
+
+int16_t WebRtcIsacfix_SetMaxRate(ISACFIX_MainStruct *ISAC_main_inst,
+                                 int32_t maxRate)
+{
+  ISACFIX_SubStruct *ISAC_inst;
+  int16_t maxRateInBytes;
+
+  /* typecast pointer to real structure */
+  ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
+
+  if((maxRate < 32000) || (maxRate > 53400))
+  {
+    /* maxRate is out of valid range */
+    return -1;
+  }
+  else
+  {
+    /* Calculate maximum number of bytes per 30 msec packets for the given
+       maximum rate. Multiply with 30/1000 to get number of bits per 30 msec,
+       divide by 8 to get number of bytes per 30 msec:
+       maxRateInBytes = floor((maxRate * 30/1000) / 8); */
+    maxRateInBytes = (int16_t)( WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_MUL(maxRate, 3), 800) );
+
+    /* Store the value for usage in the WebRtcIsacfix_SetMaxPayloadSize-function */
+    ISAC_inst->ISACenc_obj.maxRateInBytes = maxRateInBytes;
+
+    /* For 30 msec packets: if the new limit is below the maximum
+       payload size, set a new limit */
+    if (maxRateInBytes < ISAC_inst->ISACenc_obj.maxPayloadBytes) {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes30 = maxRateInBytes;
+    } else {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes30 = ISAC_inst->ISACenc_obj.maxPayloadBytes;
+    }
+
+    /* For 60 msec packets: if the new limit (times 2) is below the
+       maximum payload size, set a new limit */
+    if ( (maxRateInBytes << 1) < ISAC_inst->ISACenc_obj.maxPayloadBytes) {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes60 = (maxRateInBytes << 1);
+    } else {
+      ISAC_inst->ISACenc_obj.payloadLimitBytes60 = ISAC_inst->ISACenc_obj.maxPayloadBytes;
+    }
+  }
+
+  return 0;
+}
+
+
+
+/****************************************************************************
+ * WebRtcIsacfix_version(...)
+ *
+ * This function returns the version number.
+ *
+ * Output:
+ *      - version  : Pointer to character string
+ *
+ */
+
+void WebRtcIsacfix_version(char *version)
+{
+  strcpy(version, "3.6.0");
+}
+
+void WebRtcIsacfix_GetBandwidthInfo(ISACFIX_MainStruct* ISAC_main_inst,
+                                    IsacBandwidthInfo* bwinfo) {
+  ISACFIX_SubStruct* inst = (ISACFIX_SubStruct*)ISAC_main_inst;
+  RTC_DCHECK_NE(0, inst->initflag & 1);  // Decoder initialized.
+  WebRtcIsacfixBw_GetBandwidthInfo(&inst->bwestimator_obj, bwinfo);
+}
+
+void WebRtcIsacfix_SetBandwidthInfo(ISACFIX_MainStruct* ISAC_main_inst,
+                                    const IsacBandwidthInfo* bwinfo) {
+  ISACFIX_SubStruct* inst = (ISACFIX_SubStruct*)ISAC_main_inst;
+  RTC_DCHECK_NE(0, inst->initflag & 2);  // Encoder initialized.
+  WebRtcIsacfixBw_SetBandwidthInfo(&inst->bwestimator_obj, bwinfo);
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lattice.c b/modules/audio_coding/codecs/isac/fix/source/lattice.c
new file mode 100644
index 0000000..1089549
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lattice.c
@@ -0,0 +1,322 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lattice.c
+ *
+ * Contains the normalized lattice filter routines (MA and AR) for iSAC codec
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "rtc_base/sanitizer.h"
+
+#define LATTICE_MUL_32_32_RSFT16(a32a, a32b, b32)                  \
+  ((int32_t)(WEBRTC_SPL_MUL(a32a, b32) + (WEBRTC_SPL_MUL_16_32_RSFT16(a32b, b32))))
+/* This macro is FORBIDDEN to use elsewhere than in a function in this file and
+   its corresponding neon version. It might give unpredictable results, since a
+   general int32_t*int32_t multiplication results in a 64 bit value.
+   The result is then shifted just 16 steps to the right, giving need for 48
+   bits, i.e. in the generel case, it will NOT fit in a int32_t. In the
+   cases used in here, the int32_t will be enough, since (for a good
+   reason) the involved multiplicands aren't big enough to overflow a
+   int32_t after shifting right 16 bits. I have compared the result of a
+   multiplication between t32 and tmp32, done in two ways:
+   1) Using (int32_t) (((float)(tmp32))*((float)(tmp32b))/65536.0);
+   2) Using LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
+   By running 25 files, I haven't found any bigger diff than 64 - this was in the
+   case when  method 1) gave 650235648 and 2) gave 650235712.
+*/
+
+/* Function prototype: filtering ar_g_Q0[] and ar_f_Q0[] through an AR filter
+   with coefficients cth_Q15[] and sth_Q15[].
+   Implemented for both generic and ARMv7 platforms.
+ */
+void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0,
+                                int16_t* ar_f_Q0,
+                                int16_t* cth_Q15,
+                                int16_t* sth_Q15,
+                                size_t order_coef);
+
+/* Inner loop used for function WebRtcIsacfix_NormLatticeFilterMa(). It does:
+   for 0 <= n < HALF_SUBFRAMELEN - 1:
+     *ptr2 = input2 * (*ptr2) + input0 * (*ptr0));
+     *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+   Note, function WebRtcIsacfix_FilterMaLoopNeon and WebRtcIsacfix_FilterMaLoopC
+   are not bit-exact. The accuracy by the ARM Neon function is same or better.
+*/
+void WebRtcIsacfix_FilterMaLoopC(int16_t input0,  // Filter coefficient
+                                 int16_t input1,  // Filter coefficient
+                                 int32_t input2,  // Inverse coeff. (1/input1)
+                                 int32_t* ptr0,   // Sample buffer
+                                 int32_t* ptr1,   // Sample buffer
+                                 int32_t* ptr2) { // Sample buffer
+  int n = 0;
+
+  // Separate the 32-bit variable input2 into two 16-bit integers (high 16 and
+  // low 16 bits), for using LATTICE_MUL_32_32_RSFT16 in the loop.
+  int16_t t16a = (int16_t)(input2 >> 16);
+  int16_t t16b = (int16_t)input2;
+  if (t16b < 0) t16a++;
+
+  // The loop filtering the samples *ptr0, *ptr1, *ptr2 with filter coefficients
+  // input0, input1, and input2.
+  for(n = 0; n < HALF_SUBFRAMELEN - 1; n++, ptr0++, ptr1++, ptr2++) {
+    int32_t tmp32a = 0;
+    int32_t tmp32b = 0;
+
+    // Calculate *ptr2 = input2 * (*ptr2 + input0 * (*ptr0));
+    tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr0); // Q15 * Q15 >> 15 = Q15
+    tmp32b = *ptr2 + tmp32a; // Q15 + Q15 = Q15
+    *ptr2 = LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
+
+    // Calculate *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+    tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input1, *ptr0); // Q15*Q15>>15 = Q15
+    tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr2); // Q15*Q15>>15 = Q15
+    *ptr1 = tmp32a + tmp32b; // Q15 + Q15 = Q15
+  }
+}
+
+/* filter the signal using normalized lattice filter */
+/* MA filter */
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
+                                       int32_t *stateGQ15,
+                                       int16_t *lat_inQ0,
+                                       int16_t *filt_coefQ15,
+                                       int32_t *gain_lo_hiQ17,
+                                       int16_t lo_hi,
+                                       int16_t *lat_outQ9)
+{
+  int16_t sthQ15[MAX_AR_MODEL_ORDER];
+  int16_t cthQ15[MAX_AR_MODEL_ORDER];
+
+  int u, n;
+  size_t i, k;
+  int16_t temp2,temp3;
+  size_t ord_1 = orderCoef+1;
+  int32_t inv_cthQ16[MAX_AR_MODEL_ORDER];
+
+  int32_t gain32, fQtmp;
+  int16_t gain16;
+  int16_t gain_sh;
+
+  int32_t tmp32, tmp32b;
+  int32_t fQ15vec[HALF_SUBFRAMELEN];
+  int32_t gQ15[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
+  int16_t sh;
+  int16_t t16a;
+  int16_t t16b;
+
+  for (u=0;u<SUBFRAMES;u++)
+  {
+    int32_t temp1 = u * HALF_SUBFRAMELEN;
+
+    /* set the Direct Form coefficients */
+    temp2 = (int16_t)(u * orderCoef);
+    temp3 = (int16_t)(2 * u + lo_hi);
+
+    /* compute lattice filter coefficients */
+    memcpy(sthQ15, &filt_coefQ15[temp2], orderCoef * sizeof(int16_t));
+
+    WebRtcSpl_SqrtOfOneMinusXSquared(sthQ15, orderCoef, cthQ15);
+
+    /* compute the gain */
+    gain32 = gain_lo_hiQ17[temp3];
+    gain_sh = WebRtcSpl_NormW32(gain32);
+    gain32 <<= gain_sh;  // Q(17+gain_sh)
+
+    for (k=0;k<orderCoef;k++)
+    {
+      gain32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[k], gain32); //Q15*Q(17+gain_sh)>>15 = Q(17+gain_sh)
+      inv_cthQ16[k] = WebRtcSpl_DivW32W16((int32_t)2147483647, cthQ15[k]); // 1/cth[k] in Q31/Q15 = Q16
+    }
+    gain16 = (int16_t)(gain32 >> 16);  // Q(1+gain_sh).
+
+    /* normalized lattice filter */
+    /*****************************/
+
+    /* initial conditions */
+    for (i=0;i<HALF_SUBFRAMELEN;i++)
+    {
+      fQ15vec[i] = lat_inQ0[i + temp1] << 15;  // Q15
+      gQ15[0][i] = lat_inQ0[i + temp1] << 15;  // Q15
+    }
+
+
+    fQtmp = fQ15vec[0];
+
+    /* get the state of f&g for the first input, for all orders */
+    for (i=1;i<ord_1;i++)
+    {
+      // Calculate f[i][0] = inv_cth[i-1]*(f[i-1][0] + sth[i-1]*stateG[i-1]);
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(sthQ15[i-1], stateGQ15[i-1]);//Q15*Q15>>15 = Q15
+      tmp32b= fQtmp + tmp32; //Q15+Q15=Q15
+      tmp32 = inv_cthQ16[i-1]; //Q16
+      t16a = (int16_t)(tmp32 >> 16);
+      t16b = (int16_t)(tmp32 - (t16a << 16));
+      if (t16b<0) t16a++;
+      tmp32 = LATTICE_MUL_32_32_RSFT16(t16a, t16b, tmp32b);
+      fQtmp = tmp32; // Q15
+
+      // Calculate g[i][0] = cth[i-1]*stateG[i-1] + sth[i-1]* f[i][0];
+      tmp32  = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[i-1], stateGQ15[i-1]); //Q15*Q15>>15 = Q15
+      tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(sthQ15[i-1], fQtmp); //Q15*Q15>>15 = Q15
+      tmp32  = tmp32 + tmp32b;//Q15+Q15 = Q15
+      gQ15[i][0] = tmp32; // Q15
+    }
+
+    /* filtering */
+    /* save the states */
+    for(k=0;k<orderCoef;k++)
+    {
+      // for 0 <= n < HALF_SUBFRAMELEN - 1:
+      //   f[k+1][n+1] = inv_cth[k]*(f[k][n+1] + sth[k]*g[k][n]);
+      //   g[k+1][n+1] = cth[k]*g[k][n] + sth[k]* f[k+1][n+1];
+      WebRtcIsacfix_FilterMaLoopFix(sthQ15[k], cthQ15[k], inv_cthQ16[k],
+                                    &gQ15[k][0], &gQ15[k+1][1], &fQ15vec[1]);
+    }
+
+    fQ15vec[0] = fQtmp;
+
+    for(n=0;n<HALF_SUBFRAMELEN;n++)
+    {
+      //gain32 >>= gain_sh; // Q(17+gain_sh) -> Q17
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(gain16, fQ15vec[n]); //Q(1+gain_sh)*Q15>>16 = Q(gain_sh)
+      sh = 9-gain_sh; //number of needed shifts to reach Q9
+      t16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32, sh);
+      lat_outQ9[n + temp1] = t16a;
+    }
+
+    /* save the states */
+    for (i=0;i<ord_1;i++)
+    {
+      stateGQ15[i] = gQ15[i][HALF_SUBFRAMELEN-1];
+    }
+    //process next frame
+  }
+
+  return;
+}
+
+// Left shift of an int32_t that's allowed to overflow. (It's still undefined
+// behavior, so not a good idea; this just makes UBSan ignore the violation, so
+// that our old code can continue to do what it's always been doing.)
+static inline int32_t RTC_NO_SANITIZE("shift")
+    OverflowingLShiftS32(int32_t x, int shift) {
+  return x << shift;
+}
+
+/* ----------------AR filter-------------------------*/
+/* filter the signal using normalized lattice filter */
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
+                                       int16_t *stateGQ0,
+                                       int32_t *lat_inQ25,
+                                       int16_t *filt_coefQ15,
+                                       int32_t *gain_lo_hiQ17,
+                                       int16_t lo_hi,
+                                       int16_t *lat_outQ0)
+{
+  size_t ii, k, i;
+  int n, u;
+  int16_t sthQ15[MAX_AR_MODEL_ORDER];
+  int16_t cthQ15[MAX_AR_MODEL_ORDER];
+  int32_t tmp32;
+
+
+  int16_t tmpAR;
+  int16_t ARfQ0vec[HALF_SUBFRAMELEN];
+  int16_t ARgQ0vec[MAX_AR_MODEL_ORDER+1];
+
+  int32_t inv_gain32;
+  int16_t inv_gain16;
+  int16_t den16;
+  int16_t sh;
+
+  int16_t temp2,temp3;
+  size_t ord_1 = orderCoef+1;
+
+  for (u=0;u<SUBFRAMES;u++)
+  {
+    int32_t temp1 = u * HALF_SUBFRAMELEN;
+
+    //set the denominator and numerator of the Direct Form
+    temp2 = (int16_t)(u * orderCoef);
+    temp3 = (int16_t)(2 * u + lo_hi);
+
+    for (ii=0; ii<orderCoef; ii++) {
+      sthQ15[ii] = filt_coefQ15[temp2+ii];
+    }
+
+    WebRtcSpl_SqrtOfOneMinusXSquared(sthQ15, orderCoef, cthQ15);
+
+    // Originally, this line was assumed to never overflow, since "[s]imulation
+    // of the 25 files shows that maximum value in the vector gain_lo_hiQ17[]
+    // is 441344, which means that it is log2((2^31)/441344) = 12.2 shifting
+    // bits from saturation. Therefore, it should be safe to use Q27 instead of
+    // Q17." However, a fuzzer test succeeded in provoking an overflow here,
+    // which we ignore on the theory that only "abnormal" inputs cause
+    // overflow.
+    tmp32 = OverflowingLShiftS32(gain_lo_hiQ17[temp3], 10);  // Q27
+
+    for (k=0;k<orderCoef;k++) {
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT15(cthQ15[k], tmp32); // Q15*Q27>>15 = Q27
+    }
+
+    sh = WebRtcSpl_NormW32(tmp32); // tmp32 is the gain
+    den16 = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32, sh-16); //Q(27+sh-16) = Q(sh+11) (all 16 bits are value bits)
+    inv_gain32 = WebRtcSpl_DivW32W16((int32_t)2147483647, den16); // 1/gain in Q31/Q(sh+11) = Q(20-sh)
+
+    //initial conditions
+    inv_gain16 = (int16_t)(inv_gain32 >> 2);  // 1/gain in Q(20-sh-2) = Q(18-sh)
+
+    for (i=0;i<HALF_SUBFRAMELEN;i++)
+    {
+
+      tmp32 = lat_inQ25[i + temp1] * (1 << 1);  // Q25->Q26
+      tmp32 = WEBRTC_SPL_MUL_16_32_RSFT16(inv_gain16, tmp32); //lat_in[]*inv_gain in (Q(18-sh)*Q26)>>16 = Q(28-sh)
+      tmp32 = WEBRTC_SPL_SHIFT_W32(tmp32, -(28-sh)); // lat_in[]*inv_gain in Q0
+
+      ARfQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
+    }
+
+    // Get the state of f & g for the first input, for all orders.
+    for (i = orderCoef; i > 0; i--)
+    {
+      tmp32 = (cthQ15[i - 1] * ARfQ0vec[0] - sthQ15[i - 1] * stateGQ0[i - 1] +
+               16384) >> 15;
+      tmpAR = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
+
+      tmp32 = (sthQ15[i - 1] * ARfQ0vec[0] + cthQ15[i - 1] * stateGQ0[i - 1] +
+               16384) >> 15;
+      ARgQ0vec[i] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32); // Q0
+      ARfQ0vec[0] = tmpAR;
+    }
+    ARgQ0vec[0] = ARfQ0vec[0];
+
+    // Filter ARgQ0vec[] and ARfQ0vec[] through coefficients cthQ15[] and sthQ15[].
+    WebRtcIsacfix_FilterArLoop(ARgQ0vec, ARfQ0vec, cthQ15, sthQ15, orderCoef);
+
+    for(n=0;n<HALF_SUBFRAMELEN;n++)
+    {
+      lat_outQ0[n + temp1] = ARfQ0vec[n];
+    }
+
+
+    /* cannot use memcpy in the following */
+
+    for (i=0;i<ord_1;i++)
+    {
+      stateGQ0[i] = ARgQ0vec[i];
+    }
+  }
+
+  return;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S b/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
new file mode 100644
index 0000000..67ca4a4
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
@@ -0,0 +1,77 @@
+@
+@ Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS.  All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+
+@ Contains a function for the core loop in the normalized lattice AR
+@ filter routine for iSAC codec, optimized for ARMv7 platforms.
+@
+@ Output is bit-exact with the reference C code in lattic_c.c
+@
+@ Register usage:
+@
+@ r0:  &ar_g_Q0
+@ r1:  &ar_f_Q0
+@ r2:  &cth_Q15
+@ r3:  &sth_Q15
+@ r4:  out loop counter
+@ r5:  tmpAR
+@ r9:  inner loop counter
+@ r12: constant #16384
+@ r6, r7, r8, r10, r11: scratch
+
+#include "system_wrappers/include/asm_defines.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+GLOBAL_FUNCTION WebRtcIsacfix_FilterArLoop
+.align  2
+DEFINE_FUNCTION WebRtcIsacfix_FilterArLoop
+  push    {r4-r11}
+
+  add     r1, #2                 @ &ar_f_Q0[1]
+  mov     r12, #16384
+  mov     r4, #HALF_SUBFRAMELEN
+  sub     r4, #1                 @ Outer loop counter = HALF_SUBFRAMELEN - 1
+
+HALF_SUBFRAME_LOOP:  @ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++)
+
+  ldr     r9, [sp, #32]          @ Restore the inner loop counter to order_coef
+  ldrh    r5, [r1]               @ tmpAR = ar_f_Q0[n+1]
+  add     r0, r9, asl #1         @ Restore r0 to &ar_g_Q0[order_coef]
+  add     r2, r9, asl #1         @ Restore r2 to &cth_Q15[order_coef]
+  add     r3, r9, asl #1         @ Restore r3 to &sth_Q15[order_coef]
+
+ORDER_COEF_LOOP:  @ for (k = order_coef; k > 0; k--)
+
+  ldrh    r7, [r3, #-2]!         @ sth_Q15[k - 1]
+  ldrh    r6, [r2, #-2]!         @ cth_Q15[k - 1]
+
+  ldrh    r8, [r0, #-2]          @ ar_g_Q0[k - 1]
+  smlabb  r11, r7, r5, r12       @ sth_Q15[k - 1] * tmpAR + 16384
+  smlabb  r10, r6, r5, r12       @ cth_Q15[k - 1] * tmpAR + 16384
+  smulbb  r7, r7, r8             @ sth_Q15[k - 1] * ar_g_Q0[k - 1]
+  smlabb  r11, r6, r8, r11       @ cth_Q15[k - 1] * ar_g_Q0[k - 1] +
+                                 @     (sth_Q15[k - 1] * tmpAR + 16384)
+
+  sub     r10, r10, r7           @ cth_Q15[k - 1] * tmpAR + 16384 -
+                                 @     (sth_Q15[k - 1] * ar_g_Q0[k - 1])
+  ssat    r11, #16, r11, asr #15
+  ssat    r5, #16, r10, asr #15
+  strh    r11, [r0], #-2         @ Output: ar_g_Q0[k]
+
+  subs    r9, #1
+  bgt     ORDER_COEF_LOOP
+
+  strh    r5, [r0]               @ Output: ar_g_Q0[0] = tmpAR;
+  strh    r5, [r1], #2           @ Output: ar_f_Q0[n+1] = tmpAR;
+
+  subs    r4, #1
+  bne     HALF_SUBFRAME_LOOP
+
+  pop     {r4-r11}
+  bx      lr
diff --git a/modules/audio_coding/codecs/isac/fix/source/lattice_c.c b/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
new file mode 100644
index 0000000..d9849d6
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Contains the core loop function for the lattice filter AR routine
+ * for iSAC codec.
+ *
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* Filter ar_g_Q0[] and ar_f_Q0[] through an AR filter with coefficients
+ * cth_Q15[] and sth_Q15[].
+ */
+void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0,     // Input samples
+                                int16_t* ar_f_Q0,     // Input samples
+                                int16_t* cth_Q15,     // Filter coefficients
+                                int16_t* sth_Q15,     // Filter coefficients
+                                size_t order_coef) { // order of the filter
+  int n = 0;
+
+  for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
+    size_t k = 0;
+    int16_t tmpAR = 0;
+    int32_t tmp32 = 0;
+    int32_t tmp32_2 = 0;
+
+    tmpAR = ar_f_Q0[n + 1];
+    for (k = order_coef; k > 0; k--) {
+      tmp32 = (cth_Q15[k - 1] * tmpAR - sth_Q15[k - 1] * ar_g_Q0[k - 1] +
+               16384) >> 15;
+      tmp32_2 = (sth_Q15[k - 1] * tmpAR + cth_Q15[k - 1] * ar_g_Q0[k - 1] +
+                 16384) >> 15;
+      tmpAR   = (int16_t)WebRtcSpl_SatW32ToW16(tmp32);
+      ar_g_Q0[k] = (int16_t)WebRtcSpl_SatW32ToW16(tmp32_2);
+    }
+    ar_f_Q0[n + 1] = tmpAR;
+    ar_g_Q0[0] = tmpAR;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c b/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
new file mode 100644
index 0000000..613c07c
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
@@ -0,0 +1,329 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Filter ar_g_Q0[] and ar_f_Q0[] through an AR filter with coefficients
+// cth_Q15[] and sth_Q15[].
+void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0,     // Input samples
+                                int16_t* ar_f_Q0,     // Input samples
+                                int16_t* cth_Q15,     // Filter coefficients
+                                int16_t* sth_Q15,     // Filter coefficients
+                                size_t order_coef) { // order of the filter
+  int n = 0;
+
+  for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
+    int count = (int)(order_coef - 1);
+    int offset;
+#if !defined(MIPS_DSP_R1_LE)
+    int16_t* tmp_cth;
+    int16_t* tmp_sth;
+    int16_t* tmp_arg;
+    int32_t max_q16 = 0x7fff;
+    int32_t min_q16 = 0xffff8000;
+#endif
+    // Declare variables used as temporary registers.
+    int32_t r0, r1, r2, t0, t1, t2, t_ar;
+
+    __asm __volatile (
+      ".set          push                                                \n\t"
+      ".set          noreorder                                           \n\t"
+      "bltz          %[count],     2f                                    \n\t"
+      " lh           %[t_ar],      0(%[tmp])                             \n\t"
+      // Inner loop
+     "1:                                                                 \n\t"
+      "sll           %[offset],    %[count],               1             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+      "lhx           %[r0],        %[offset](%[cth_Q15])                 \n\t"
+      "lhx           %[r1],        %[offset](%[sth_Q15])                 \n\t"
+      "lhx           %[r2],        %[offset](%[ar_g_Q0])                 \n\t"
+#else
+      "addu          %[tmp_cth],   %[cth_Q15],             %[offset]     \n\t"
+      "addu          %[tmp_sth],   %[sth_Q15],             %[offset]     \n\t"
+      "addu          %[tmp_arg],   %[ar_g_Q0],             %[offset]     \n\t"
+      "lh            %[r0],        0(%[tmp_cth])                         \n\t"
+      "lh            %[r1],        0(%[tmp_sth])                         \n\t"
+      "lh            %[r2],        0(%[tmp_arg])                         \n\t"
+#endif
+      "mul           %[t0],        %[r0],                  %[t_ar]       \n\t"
+      "mul           %[t1],        %[r1],                  %[t_ar]       \n\t"
+      "mul           %[t2],        %[r1],                  %[r2]         \n\t"
+      "mul           %[r0],        %[r0],                  %[r2]         \n\t"
+      "subu          %[t0],        %[t0],                  %[t2]         \n\t"
+      "addu          %[t1],        %[t1],                  %[r0]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+      "shra_r.w      %[t1],        %[t1],                  15            \n\t"
+      "shra_r.w      %[t0],        %[t0],                  15            \n\t"
+#else
+      "addiu         %[t1],        %[t1],                  0x4000        \n\t"
+      "sra           %[t1],        %[t1],                  15            \n\t"
+      "addiu         %[t0],        %[t0],                  0x4000        \n\t"
+      "sra           %[t0],        %[t0],                  15            \n\t"
+#endif
+      "addiu         %[offset],    %[offset],              2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+      "shll_s.w      %[t1],        %[t1],                  16            \n\t"
+      "shll_s.w      %[t_ar],      %[t0],                  16            \n\t"
+#else
+      "slt           %[r0],        %[t1],                  %[max_q16]    \n\t"
+      "slt           %[r1],        %[t0],                  %[max_q16]    \n\t"
+      "movz          %[t1],        %[max_q16],             %[r0]         \n\t"
+      "movz          %[t0],        %[max_q16],             %[r1]         \n\t"
+#endif
+      "addu          %[offset],    %[offset],              %[ar_g_Q0]    \n\t"
+#if defined(MIPS_DSP_R1_LE)
+      "sra           %[t1],        %[t1],                  16            \n\t"
+      "sra           %[t_ar],      %[t_ar],                16            \n\t"
+#else
+      "slt           %[r0],        %[t1],                  %[min_q16]    \n\t"
+      "slt           %[r1],        %[t0],                  %[min_q16]    \n\t"
+      "movn          %[t1],        %[min_q16],             %[r0]         \n\t"
+      "movn          %[t0],        %[min_q16],             %[r1]         \n\t"
+      "addu          %[t_ar],      $zero,                  %[t0]         \n\t"
+#endif
+      "sh            %[t1],        0(%[offset])                          \n\t"
+      "bgtz          %[count],     1b                                    \n\t"
+      " addiu        %[count],     %[count],               -1            \n\t"
+     "2:                                                                 \n\t"
+      "sh            %[t_ar],      0(%[tmp])                             \n\t"
+      "sh            %[t_ar],      0(%[ar_g_Q0])                         \n\t"
+      ".set          pop                                                 \n\t"
+      : [t_ar] "=&r" (t_ar), [count] "+r" (count), [offset] "=&r" (offset),
+        [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [t0] "=&r" (t0),
+#if !defined(MIPS_DSP_R1_LE)
+        [tmp_cth] "=&r" (tmp_cth), [tmp_sth] "=&r" (tmp_sth),
+        [tmp_arg] "=&r" (tmp_arg),
+#endif
+        [t1] "=&r" (t1), [t2] "=&r" (t2)
+      : [tmp] "r" (&ar_f_Q0[n+1]), [cth_Q15] "r" (cth_Q15),
+#if !defined(MIPS_DSP_R1_LE)
+        [max_q16] "r" (max_q16), [min_q16] "r" (min_q16),
+#endif
+        [sth_Q15] "r" (sth_Q15), [ar_g_Q0] "r" (ar_g_Q0)
+      : "memory", "hi", "lo"
+    );
+  }
+}
+
+// MIPS optimization of the inner loop used for function
+// WebRtcIsacfix_NormLatticeFilterMa(). It does:
+//
+// for 0 <= n < HALF_SUBFRAMELEN - 1:
+//   *ptr2 = input2 * (*ptr2) + input0 * (*ptr0));
+//   *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+//
+// Note, function WebRtcIsacfix_FilterMaLoopMIPS and WebRtcIsacfix_FilterMaLoopC
+// are not bit-exact. The accuracy of the MIPS function is same or better.
+void WebRtcIsacfix_FilterMaLoopMIPS(int16_t input0,  // Filter coefficient
+                                    int16_t input1,  // Filter coefficient
+                                    int32_t input2,  // Inverse coeff (1/input1)
+                                    int32_t* ptr0,   // Sample buffer
+                                    int32_t* ptr1,   // Sample buffer
+                                    int32_t* ptr2) { // Sample buffer
+#if defined(MIPS_DSP_R2_LE)
+  // MIPS DSPR2 version. 4 available accumulators allows loop unrolling 4 times.
+  // This variant is not bit-exact with WebRtcIsacfix_FilterMaLoopC, since we
+  // are exploiting 64-bit accumulators. The accuracy of the MIPS DSPR2 function
+  // is same or better.
+  int n = (HALF_SUBFRAMELEN - 1) >> 2;
+  int m = (HALF_SUBFRAMELEN - 1) & 3;
+
+  int r0, r1, r2, r3;
+  int t0, t1, t2, t3;
+  int s0, s1, s2, s3;
+
+  __asm __volatile (
+    ".set          push                                      \n\t"
+    ".set          noreorder                                 \n\t"
+   "1:                                                       \n\t"
+    "lw            %[r0],        0(%[ptr0])                  \n\t"
+    "lw            %[r1],        4(%[ptr0])                  \n\t"
+    "lw            %[r2],        8(%[ptr0])                  \n\t"
+    "lw            %[r3],        12(%[ptr0])                 \n\t"
+    "mult          $ac0,         %[r0],        %[input0]     \n\t"
+    "mult          $ac1,         %[r1],        %[input0]     \n\t"
+    "mult          $ac2,         %[r2],        %[input0]     \n\t"
+    "mult          $ac3,         %[r3],        %[input0]     \n\t"
+    "lw            %[t0],        0(%[ptr2])                  \n\t"
+    "extr_rs.w     %[s0],        $ac0,         15            \n\t"
+    "extr_rs.w     %[s1],        $ac1,         15            \n\t"
+    "extr_rs.w     %[s2],        $ac2,         15            \n\t"
+    "extr_rs.w     %[s3],        $ac3,         15            \n\t"
+    "lw            %[t1],        4(%[ptr2])                  \n\t"
+    "lw            %[t2],        8(%[ptr2])                  \n\t"
+    "lw            %[t3],        12(%[ptr2])                 \n\t"
+    "addu          %[t0],        %[t0],        %[s0]         \n\t"
+    "addu          %[t1],        %[t1],        %[s1]         \n\t"
+    "addu          %[t2],        %[t2],        %[s2]         \n\t"
+    "addu          %[t3],        %[t3],        %[s3]         \n\t"
+    "mult          $ac0,         %[t0],        %[input2]     \n\t"
+    "mult          $ac1,         %[t1],        %[input2]     \n\t"
+    "mult          $ac2,         %[t2],        %[input2]     \n\t"
+    "mult          $ac3,         %[t3],        %[input2]     \n\t"
+    "addiu         %[ptr0],      %[ptr0],      16            \n\t"
+    "extr_rs.w     %[t0],        $ac0,         16            \n\t"
+    "extr_rs.w     %[t1],        $ac1,         16            \n\t"
+    "extr_rs.w     %[t2],        $ac2,         16            \n\t"
+    "extr_rs.w     %[t3],        $ac3,         16            \n\t"
+    "addiu         %[n],         %[n],         -1            \n\t"
+    "mult          $ac0,         %[r0],        %[input1]     \n\t"
+    "mult          $ac1,         %[r1],        %[input1]     \n\t"
+    "mult          $ac2,         %[r2],        %[input1]     \n\t"
+    "mult          $ac3,         %[r3],        %[input1]     \n\t"
+    "sw            %[t0],        0(%[ptr2])                  \n\t"
+    "extr_rs.w     %[s0],        $ac0,         15            \n\t"
+    "extr_rs.w     %[s1],        $ac1,         15            \n\t"
+    "extr_rs.w     %[s2],        $ac2,         15            \n\t"
+    "extr_rs.w     %[s3],        $ac3,         15            \n\t"
+    "sw            %[t1],        4(%[ptr2])                  \n\t"
+    "sw            %[t2],        8(%[ptr2])                  \n\t"
+    "sw            %[t3],        12(%[ptr2])                 \n\t"
+    "mult          $ac0,         %[t0],        %[input0]     \n\t"
+    "mult          $ac1,         %[t1],        %[input0]     \n\t"
+    "mult          $ac2,         %[t2],        %[input0]     \n\t"
+    "mult          $ac3,         %[t3],        %[input0]     \n\t"
+    "addiu         %[ptr2],      %[ptr2],      16            \n\t"
+    "extr_rs.w     %[t0],        $ac0,         15            \n\t"
+    "extr_rs.w     %[t1],        $ac1,         15            \n\t"
+    "extr_rs.w     %[t2],        $ac2,         15            \n\t"
+    "extr_rs.w     %[t3],        $ac3,         15            \n\t"
+    "addu          %[t0],        %[t0],        %[s0]         \n\t"
+    "addu          %[t1],        %[t1],        %[s1]         \n\t"
+    "addu          %[t2],        %[t2],        %[s2]         \n\t"
+    "addu          %[t3],        %[t3],        %[s3]         \n\t"
+    "sw            %[t0],        0(%[ptr1])                  \n\t"
+    "sw            %[t1],        4(%[ptr1])                  \n\t"
+    "sw            %[t2],        8(%[ptr1])                  \n\t"
+    "sw            %[t3],        12(%[ptr1])                 \n\t"
+    "bgtz          %[n],         1b                          \n\t"
+    " addiu        %[ptr1],      %[ptr1],      16            \n\t"
+    "beq           %[m],         %0,           3f            \n\t"
+    " nop                                                    \n\t"
+   "2:                                                       \n\t"
+    "lw            %[r0],        0(%[ptr0])                  \n\t"
+    "lw            %[t0],        0(%[ptr2])                  \n\t"
+    "addiu         %[ptr0],      %[ptr0],      4             \n\t"
+    "mult          $ac0,         %[r0],        %[input0]     \n\t"
+    "mult          $ac1,         %[r0],        %[input1]     \n\t"
+    "extr_rs.w     %[r1],        $ac0,         15            \n\t"
+    "extr_rs.w     %[t1],        $ac1,         15            \n\t"
+    "addu          %[t0],        %[t0],        %[r1]         \n\t"
+    "mult          $ac0,         %[t0],        %[input2]     \n\t"
+    "extr_rs.w     %[t0],        $ac0,         16            \n\t"
+    "sw            %[t0],        0(%[ptr2])                  \n\t"
+    "mult          $ac0,         %[t0],        %[input0]     \n\t"
+    "addiu         %[ptr2],      %[ptr2],      4             \n\t"
+    "addiu         %[m],         %[m],         -1            \n\t"
+    "extr_rs.w     %[t0],        $ac0,         15            \n\t"
+    "addu          %[t0],        %[t0],        %[t1]         \n\t"
+    "sw            %[t0],        0(%[ptr1])                  \n\t"
+    "bgtz          %[m],         2b                          \n\t"
+    " addiu        %[ptr1],      %[ptr1],      4             \n\t"
+   "3:                                                       \n\t"
+    ".set          pop                                       \n\t"
+    : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+      [r3] "=&r" (r3), [t0] "=&r" (t0), [t1] "=&r" (t1),
+      [t2] "=&r" (t2), [t3] "=&r" (t3), [s0] "=&r" (s0),
+      [s1] "=&r" (s1), [s2] "=&r" (s2), [s3] "=&r" (s3),
+      [ptr0] "+r" (ptr0), [ptr1] "+r" (ptr1), [m] "+r" (m),
+      [ptr2] "+r" (ptr2), [n] "+r" (n)
+    : [input0] "r" (input0), [input1] "r" (input1),
+      [input2] "r" (input2)
+    : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi",
+      "$ac2lo", "$ac3hi", "$ac3lo"
+  );
+#else
+  // Non-DSPR2 version of the function. Avoiding the accumulator usage due to
+  // large latencies. This variant is bit-exact with C code.
+  int n = HALF_SUBFRAMELEN - 1;
+  int32_t t16a, t16b;
+  int32_t r0, r1, r2, r3, r4;
+
+  __asm __volatile (
+    ".set          push                                      \n\t"
+    ".set          noreorder                                 \n\t"
+    "sra           %[t16a],      %[input2],     16           \n\t"
+    "andi          %[t16b],      %[input2],     0xFFFF       \n\t"
+#if defined(MIPS32R2_LE)
+    "seh           %[t16b],      %[t16b]                     \n\t"
+    "seh           %[input0],    %[input0]                   \n\t"
+    "seh           %[input1],    %[input1]                   \n\t"
+#else
+    "sll           %[t16b],      %[t16b],       16           \n\t"
+    "sra           %[t16b],      %[t16b],       16           \n\t"
+    "sll           %[input0],    %[input0],     16           \n\t"
+    "sra           %[input0],    %[input0],     16           \n\t"
+    "sll           %[input1],    %[input1],     16           \n\t"
+    "sra           %[input1],    %[input1],     16           \n\t"
+#endif
+    "addiu         %[r0],        %[t16a],       1            \n\t"
+    "slt           %[r1],        %[t16b],       $zero        \n\t"
+    "movn          %[t16a],      %[r0],         %[r1]        \n\t"
+   "1:                                                       \n\t"
+    "lw            %[r0],        0(%[ptr0])                  \n\t"
+    "lw            %[r1],        0(%[ptr2])                  \n\t"
+    "addiu         %[ptr0],      %[ptr0],       4            \n\t"
+    "sra           %[r2],        %[r0],         16           \n\t"
+    "andi          %[r0],        %[r0],         0xFFFF       \n\t"
+    "mul           %[r3],        %[r2],         %[input0]    \n\t"
+    "mul           %[r4],        %[r0],         %[input0]    \n\t"
+    "mul           %[r2],        %[r2],         %[input1]    \n\t"
+    "mul           %[r0],        %[r0],         %[input1]    \n\t"
+    "addiu         %[ptr2],      %[ptr2],       4            \n\t"
+    "sll           %[r3],        %[r3],         1            \n\t"
+    "sra           %[r4],        %[r4],         1            \n\t"
+    "addiu         %[r4],        %[r4],         0x2000       \n\t"
+    "sra           %[r4],        %[r4],         14           \n\t"
+    "addu          %[r3],        %[r3],         %[r4]        \n\t"
+    "addu          %[r1],        %[r1],         %[r3]        \n\t"
+    "sra           %[r3],        %[r1],         16           \n\t"
+    "andi          %[r4],        %[r1],         0xFFFF       \n\t"
+    "sra           %[r4],        %[r4],         1            \n\t"
+    "mul           %[r1],        %[r1],         %[t16a]      \n\t"
+    "mul           %[r3],        %[r3],         %[t16b]      \n\t"
+    "mul           %[r4],        %[r4],         %[t16b]      \n\t"
+    "sll           %[r2],        %[r2],         1            \n\t"
+    "sra           %[r0],        %[r0],         1            \n\t"
+    "addiu         %[r0],        %[r0],         0x2000       \n\t"
+    "sra           %[r0],        %[r0],         14           \n\t"
+    "addu          %[r0],        %[r0],         %[r2]        \n\t"
+    "addiu         %[n],         %[n],          -1           \n\t"
+    "addu          %[r1],        %[r1],         %[r3]        \n\t"
+    "addiu         %[r4],        %[r4],         0x4000       \n\t"
+    "sra           %[r4],        %[r4],         15           \n\t"
+    "addu          %[r1],        %[r1],         %[r4]        \n\t"
+    "sra           %[r2],        %[r1],         16           \n\t"
+    "andi          %[r3],        %[r1],         0xFFFF       \n\t"
+    "mul           %[r3],        %[r3],         %[input0]    \n\t"
+    "mul           %[r2],        %[r2],         %[input0]    \n\t"
+    "sw            %[r1],        -4(%[ptr2])                 \n\t"
+    "sra           %[r3],        %[r3],         1            \n\t"
+    "addiu         %[r3],        %[r3],         0x2000       \n\t"
+    "sra           %[r3],        %[r3],         14           \n\t"
+    "addu          %[r0],        %[r0],         %[r3]        \n\t"
+    "sll           %[r2],        %[r2],         1            \n\t"
+    "addu          %[r0],        %[r0],         %[r2]        \n\t"
+    "sw            %[r0],        0(%[ptr1])                  \n\t"
+    "bgtz          %[n],         1b                          \n\t"
+    " addiu        %[ptr1],      %[ptr1],       4            \n\t"
+    ".set          pop                                       \n\t"
+    : [t16a] "=&r" (t16a), [t16b] "=&r" (t16b), [r0] "=&r" (r0),
+      [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+      [r4] "=&r" (r4), [ptr0] "+r" (ptr0), [ptr1] "+r" (ptr1),
+      [ptr2] "+r" (ptr2), [n] "+r" (n)
+    : [input0] "r" (input0), [input1] "r" (input1),
+      [input2] "r" (input2)
+    : "hi", "lo", "memory"
+  );
+#endif
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c b/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c
new file mode 100644
index 0000000..8ea9b63
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lattice_neon.c
@@ -0,0 +1,195 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// Contains a function for the core loop in the normalized lattice MA
+// filter routine for iSAC codec, optimized for ARM Neon platform.
+// It does:
+//  for 0 <= n < HALF_SUBFRAMELEN - 1:
+//    *ptr2 = input2 * ((*ptr2) + input0 * (*ptr0));
+//    *ptr1 = input1 * (*ptr0) + input0 * (*ptr2);
+// Output is not bit-exact with the reference C code, due to the replacement
+// of WEBRTC_SPL_MUL_16_32_RSFT15 and LATTICE_MUL_32_32_RSFT16 with Neon
+// instructions. The difference should not be bigger than 1.
+void WebRtcIsacfix_FilterMaLoopNeon(int16_t input0,  // Filter coefficient
+                                    int16_t input1,  // Filter coefficient
+                                    int32_t input2,  // Inverse coefficient
+                                    int32_t* ptr0,   // Sample buffer
+                                    int32_t* ptr1,   // Sample buffer
+                                    int32_t* ptr2)   // Sample buffer
+{
+  int n = 0;
+  int loop = (HALF_SUBFRAMELEN - 1) >> 3;
+  int loop_tail = (HALF_SUBFRAMELEN - 1) & 0x7;
+
+  int32x4_t input0_v = vdupq_n_s32((int32_t)input0 << 16);
+  int32x4_t input1_v = vdupq_n_s32((int32_t)input1 << 16);
+  int32x4_t input2_v = vdupq_n_s32(input2);
+  int32x4_t tmp0a, tmp1a, tmp2a, tmp3a;
+  int32x4_t tmp0b, tmp1b, tmp2b, tmp3b;
+  int32x4_t ptr0va, ptr1va, ptr2va;
+  int32x4_t ptr0vb, ptr1vb, ptr2vb;
+
+  int64x2_t tmp2al_low, tmp2al_high, tmp2bl_low, tmp2bl_high;
+  // Unroll to process 8 samples at once.
+  for (n = 0; n < loop; n++) {
+    ptr0va = vld1q_s32(ptr0);
+    ptr0vb = vld1q_s32(ptr0 + 4);
+    ptr0 += 8;
+
+    ptr2va = vld1q_s32(ptr2);
+    ptr2vb = vld1q_s32(ptr2 + 4);
+
+    // Calculate tmp0 = (*ptr0) * input0.
+    tmp0a = vqrdmulhq_s32(ptr0va, input0_v);
+    tmp0b = vqrdmulhq_s32(ptr0vb, input0_v);
+
+    // Calculate tmp1 = (*ptr0) * input1.
+    tmp1a = vqrdmulhq_s32(ptr0va, input1_v);
+    tmp1b = vqrdmulhq_s32(ptr0vb, input1_v);
+
+    // Calculate tmp2 = tmp0 + *(ptr2).
+    tmp2a = vaddq_s32(tmp0a, ptr2va);
+    tmp2b = vaddq_s32(tmp0b, ptr2vb);
+
+    // Calculate *ptr2 = input2 * tmp2.
+    tmp2al_low = vmull_s32(vget_low_s32(tmp2a), vget_low_s32(input2_v));
+#if defined(WEBRTC_ARCH_ARM64)
+    tmp2al_high = vmull_high_s32(tmp2a, input2_v);
+#else
+    tmp2al_high = vmull_s32(vget_high_s32(tmp2a), vget_high_s32(input2_v));
+#endif
+    ptr2va = vcombine_s32(vrshrn_n_s64(tmp2al_low, 16),
+                          vrshrn_n_s64(tmp2al_high, 16));
+
+    tmp2bl_low = vmull_s32(vget_low_s32(tmp2b), vget_low_s32(input2_v));
+#if defined(WEBRTC_ARCH_ARM64)
+    tmp2bl_high = vmull_high_s32(tmp2b, input2_v);
+#else
+    tmp2bl_high = vmull_s32(vget_high_s32(tmp2b), vget_high_s32(input2_v));
+#endif
+    ptr2vb = vcombine_s32(vrshrn_n_s64(tmp2bl_low, 16),
+                          vrshrn_n_s64(tmp2bl_high, 16));
+
+    vst1q_s32(ptr2, ptr2va);
+    vst1q_s32(ptr2 + 4, ptr2vb);
+    ptr2 += 8;
+
+    // Calculate tmp3 = ptr2v * input0.
+    tmp3a = vqrdmulhq_s32(ptr2va, input0_v);
+    tmp3b = vqrdmulhq_s32(ptr2vb, input0_v);
+
+    // Calculate *ptr1 = tmp1 + tmp3.
+    ptr1va = vaddq_s32(tmp1a, tmp3a);
+    ptr1vb = vaddq_s32(tmp1b, tmp3b);
+
+    vst1q_s32(ptr1, ptr1va);
+    vst1q_s32(ptr1 + 4, ptr1vb);
+    ptr1 += 8;
+  }
+
+  // Process four more samples.
+  if (loop_tail & 0x4) {
+    ptr0va = vld1q_s32(ptr0);
+    ptr2va = vld1q_s32(ptr2);
+    ptr0 += 4;
+
+    // Calculate tmp0 = (*ptr0) * input0.
+    tmp0a = vqrdmulhq_s32(ptr0va, input0_v);
+
+    // Calculate tmp1 = (*ptr0) * input1.
+    tmp1a = vqrdmulhq_s32(ptr0va, input1_v);
+
+    // Calculate tmp2 = tmp0 + *(ptr2).
+    tmp2a = vaddq_s32(tmp0a, ptr2va);
+
+    // Calculate *ptr2 = input2 * tmp2.
+    tmp2al_low = vmull_s32(vget_low_s32(tmp2a), vget_low_s32(input2_v));
+
+#if defined(WEBRTC_ARCH_ARM64)
+    tmp2al_high = vmull_high_s32(tmp2a, input2_v);
+#else
+    tmp2al_high = vmull_s32(vget_high_s32(tmp2a), vget_high_s32(input2_v));
+#endif
+    ptr2va = vcombine_s32(vrshrn_n_s64(tmp2al_low, 16),
+                          vrshrn_n_s64(tmp2al_high, 16));
+
+    vst1q_s32(ptr2, ptr2va);
+    ptr2 += 4;
+
+    // Calculate tmp3 = *(ptr2) * input0.
+    tmp3a = vqrdmulhq_s32(ptr2va, input0_v);
+
+    // Calculate *ptr1 = tmp1 + tmp3.
+    ptr1va = vaddq_s32(tmp1a, tmp3a);
+
+    vst1q_s32(ptr1, ptr1va);
+    ptr1 += 4;
+  }
+
+  // Process two more samples.
+  if (loop_tail & 0x2) {
+    int32x2_t ptr0v_tail, ptr2v_tail, ptr1v_tail;
+    int32x2_t tmp0_tail, tmp1_tail, tmp2_tail, tmp3_tail;
+    int64x2_t tmp2l_tail;
+    ptr0v_tail = vld1_s32(ptr0);
+    ptr2v_tail = vld1_s32(ptr2);
+    ptr0 += 2;
+
+    // Calculate tmp0 = (*ptr0) * input0.
+    tmp0_tail = vqrdmulh_s32(ptr0v_tail, vget_low_s32(input0_v));
+
+    // Calculate tmp1 = (*ptr0) * input1.
+    tmp1_tail = vqrdmulh_s32(ptr0v_tail, vget_low_s32(input1_v));
+
+    // Calculate tmp2 = tmp0 + *(ptr2).
+    tmp2_tail = vadd_s32(tmp0_tail, ptr2v_tail);
+
+    // Calculate *ptr2 = input2 * tmp2.
+    tmp2l_tail = vmull_s32(tmp2_tail, vget_low_s32(input2_v));
+    ptr2v_tail = vrshrn_n_s64(tmp2l_tail, 16);
+
+    vst1_s32(ptr2, ptr2v_tail);
+    ptr2 += 2;
+
+    // Calculate tmp3 = *(ptr2) * input0.
+    tmp3_tail = vqrdmulh_s32(ptr2v_tail, vget_low_s32(input0_v));
+
+    // Calculate *ptr1 = tmp1 + tmp3.
+    ptr1v_tail = vadd_s32(tmp1_tail, tmp3_tail);
+
+    vst1_s32(ptr1, ptr1v_tail);
+    ptr1 += 2;
+  }
+
+  // Process one more sample.
+  if (loop_tail & 0x1) {
+    int16_t t16a = (int16_t)(input2 >> 16);
+    int16_t t16b = (int16_t)input2;
+    if (t16b < 0) t16a++;
+    int32_t tmp32a;
+    int32_t tmp32b;
+
+    // Calculate *ptr2 = input2 * (*ptr2 + input0 * (*ptr0)).
+    tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr0);
+    tmp32b = *ptr2 + tmp32a;
+    *ptr2 = (int32_t)(WEBRTC_SPL_MUL(t16a, tmp32b) +
+                       (WEBRTC_SPL_MUL_16_32_RSFT16(t16b, tmp32b)));
+
+    // Calculate *ptr1 = input1 * (*ptr0) + input0 * (*ptr2).
+    tmp32a = WEBRTC_SPL_MUL_16_32_RSFT15(input1, *ptr0);
+    tmp32b = WEBRTC_SPL_MUL_16_32_RSFT15(input0, *ptr2);
+    *ptr1 = tmp32a + tmp32b;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
new file mode 100644
index 0000000..b538085
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.c
@@ -0,0 +1,949 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_masking_model.c
+ *
+ * LPC analysis and filtering functions
+ *
+ */
+
+#include "lpc_masking_model.h"
+
+#include <limits.h>  /* For LLONG_MAX and LLONG_MIN. */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/* The conversion is implemented by the step-down algorithm */
+void WebRtcSpl_AToK_JSK(
+    int16_t *a16, /* Q11 */
+    int16_t useOrder,
+    int16_t *k16  /* Q15 */
+                        )
+{
+  int m, k;
+  int32_t tmp32[MAX_AR_MODEL_ORDER];
+  int32_t tmp32b;
+  int32_t tmp_inv_denum32;
+  int16_t tmp_inv_denum16;
+
+  k16[useOrder-1] = a16[useOrder] << 4;  // Q11<<4 => Q15
+
+  for (m=useOrder-1; m>0; m--) {
+    // (1 - k^2) in Q30
+    tmp_inv_denum32 = 1073741823 - k16[m] * k16[m];
+    tmp_inv_denum16 = (int16_t)(tmp_inv_denum32 >> 15);  // (1 - k^2) in Q15.
+
+    for (k=1; k<=m; k++) {
+      tmp32b = (a16[k] << 16) - ((k16[m] * a16[m - k + 1]) << 1);
+
+      tmp32[k] = WebRtcSpl_DivW32W16(tmp32b, tmp_inv_denum16); //Q27/Q15 = Q12
+    }
+
+    for (k=1; k<m; k++) {
+      a16[k] = (int16_t)(tmp32[k] >> 1);  // Q12>>1 => Q11
+    }
+
+    tmp32[m] = WEBRTC_SPL_SAT(4092, tmp32[m], -4092);
+    k16[m - 1] = (int16_t)(tmp32[m] << 3);  // Q12<<3 => Q15
+  }
+
+  return;
+}
+
+
+
+
+
+int16_t WebRtcSpl_LevinsonW32_JSK(
+    int32_t *R,  /* (i) Autocorrelation of length >= order+1 */
+    int16_t *A,  /* (o) A[0..order] LPC coefficients (Q11) */
+    int16_t *K,  /* (o) K[0...order-1] Reflection coefficients (Q15) */
+    int16_t order /* (i) filter order */
+                                        ) {
+  int16_t i, j;
+  int16_t R_hi[LEVINSON_MAX_ORDER+1], R_low[LEVINSON_MAX_ORDER+1];
+  /* Aurocorr coefficients in high precision */
+  int16_t A_hi[LEVINSON_MAX_ORDER+1], A_low[LEVINSON_MAX_ORDER+1];
+  /* LPC coefficients in high precicion */
+  int16_t A_upd_hi[LEVINSON_MAX_ORDER+1], A_upd_low[LEVINSON_MAX_ORDER+1];
+  /* LPC coefficients for next iteration */
+  int16_t K_hi, K_low;      /* reflection coefficient in high precision */
+  int16_t Alpha_hi, Alpha_low, Alpha_exp; /* Prediction gain Alpha in high precision
+                                                   and with scale factor */
+  int16_t tmp_hi, tmp_low;
+  int32_t temp1W32, temp2W32, temp3W32;
+  int16_t norm;
+
+  /* Normalize the autocorrelation R[0]...R[order+1] */
+
+  norm = WebRtcSpl_NormW32(R[0]);
+
+  for (i=order;i>=0;i--) {
+    temp1W32 = R[i] << norm;
+    /* Put R in hi and low format */
+    R_hi[i] = (int16_t)(temp1W32 >> 16);
+    R_low[i] = (int16_t)((temp1W32 - ((int32_t)R_hi[i] << 16)) >> 1);
+  }
+
+  /* K = A[1] = -R[1] / R[0] */
+
+  temp2W32 = (R_hi[1] << 16) + (R_low[1] << 1);  /* R[1] in Q31      */
+  temp3W32  = WEBRTC_SPL_ABS_W32(temp2W32);      /* abs R[1]         */
+  temp1W32  = WebRtcSpl_DivW32HiLow(temp3W32, R_hi[0], R_low[0]); /* abs(R[1])/R[0] in Q31 */
+  /* Put back the sign on R[1] */
+  if (temp2W32 > 0) {
+    temp1W32 = -temp1W32;
+  }
+
+  /* Put K in hi and low format */
+  K_hi = (int16_t)(temp1W32 >> 16);
+  K_low = (int16_t)((temp1W32 - ((int32_t)K_hi << 16)) >> 1);
+
+  /* Store first reflection coefficient */
+  K[0] = K_hi;
+
+  temp1W32 >>= 4;  /* A[1] in Q27. */
+
+  /* Put A[1] in hi and low format */
+  A_hi[1] = (int16_t)(temp1W32 >> 16);
+  A_low[1] = (int16_t)((temp1W32 - ((int32_t)A_hi[1] << 16)) >> 1);
+
+  /*  Alpha = R[0] * (1-K^2) */
+
+  temp1W32  = (((K_hi * K_low) >> 14) + K_hi * K_hi) << 1;  /* = k^2 in Q31 */
+
+  temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32);    /* Guard against <0 */
+  temp1W32 = (int32_t)0x7fffffffL - temp1W32;    /* temp1W32 = (1 - K[0]*K[0]) in Q31 */
+
+  /* Store temp1W32 = 1 - K[0]*K[0] on hi and low format */
+  tmp_hi = (int16_t)(temp1W32 >> 16);
+  tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+  /* Calculate Alpha in Q31 */
+  temp1W32 = (R_hi[0] * tmp_hi + ((R_hi[0] * tmp_low) >> 15) +
+      ((R_low[0] * tmp_hi) >> 15)) << 1;
+
+  /* Normalize Alpha and put it in hi and low format */
+
+  Alpha_exp = WebRtcSpl_NormW32(temp1W32);
+  temp1W32 <<= Alpha_exp;
+  Alpha_hi = (int16_t)(temp1W32 >> 16);
+  Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi<< 16)) >> 1);
+
+  /* Perform the iterative calculations in the
+     Levinson Durbin algorithm */
+
+  for (i=2; i<=order; i++)
+  {
+
+    /*                    ----
+                          \
+        temp1W32 =  R[i] + > R[j]*A[i-j]
+                          /
+                          ----
+                          j=1..i-1
+    */
+
+    temp1W32 = 0;
+
+    for(j=1; j<i; j++) {
+      /* temp1W32 is in Q31 */
+      temp1W32 += ((R_hi[j] * A_hi[i - j]) << 1) +
+          ((((R_hi[j] * A_low[i - j]) >> 15) +
+              ((R_low[j] * A_hi[i - j]) >> 15)) << 1);
+    }
+
+    temp1W32 <<= 4;
+    temp1W32 += (R_hi[i] << 16) + (R_low[i] << 1);
+
+    /* K = -temp1W32 / Alpha */
+    temp2W32 = WEBRTC_SPL_ABS_W32(temp1W32);      /* abs(temp1W32) */
+    temp3W32 = WebRtcSpl_DivW32HiLow(temp2W32, Alpha_hi, Alpha_low); /* abs(temp1W32)/Alpha */
+
+    /* Put the sign of temp1W32 back again */
+    if (temp1W32 > 0) {
+      temp3W32 = -temp3W32;
+    }
+
+    /* Use the Alpha shifts from earlier to denormalize */
+    norm = WebRtcSpl_NormW32(temp3W32);
+    if ((Alpha_exp <= norm)||(temp3W32==0)) {
+      temp3W32 <<= Alpha_exp;
+    } else {
+      if (temp3W32 > 0)
+      {
+        temp3W32 = (int32_t)0x7fffffffL;
+      } else
+      {
+        temp3W32 = (int32_t)0x80000000L;
+      }
+    }
+
+    /* Put K on hi and low format */
+    K_hi = (int16_t)(temp3W32 >> 16);
+    K_low = (int16_t)((temp3W32 - ((int32_t)K_hi << 16)) >> 1);
+
+    /* Store Reflection coefficient in Q15 */
+    K[i-1] = K_hi;
+
+    /* Test for unstable filter. If unstable return 0 and let the
+       user decide what to do in that case
+    */
+
+    if ((int32_t)WEBRTC_SPL_ABS_W16(K_hi) > (int32_t)32740) {
+      return(-i); /* Unstable filter */
+    }
+
+    /*
+      Compute updated LPC coefficient: Anew[i]
+      Anew[j]= A[j] + K*A[i-j]   for j=1..i-1
+      Anew[i]= K
+    */
+
+    for(j=1; j<i; j++)
+    {
+      temp1W32 = (A_hi[j] << 16) + (A_low[j] << 1);  // temp1W32 = A[j] in Q27
+
+      temp1W32 += (K_hi * A_hi[i - j] + ((K_hi * A_low[i - j]) >> 15) +
+          ((K_low * A_hi[i - j]) >> 15)) << 1;  // temp1W32 += K*A[i-j] in Q27.
+
+      /* Put Anew in hi and low format */
+      A_upd_hi[j] = (int16_t)(temp1W32 >> 16);
+      A_upd_low[j] = (int16_t)((temp1W32 - ((int32_t)A_upd_hi[j] << 16)) >> 1);
+    }
+
+    temp3W32 >>= 4;  /* temp3W32 = K in Q27 (Convert from Q31 to Q27) */
+
+    /* Store Anew in hi and low format */
+    A_upd_hi[i] = (int16_t)(temp3W32 >> 16);
+    A_upd_low[i] = (int16_t)((temp3W32 - ((int32_t)A_upd_hi[i] << 16)) >> 1);
+
+    /*  Alpha = Alpha * (1-K^2) */
+
+    temp1W32 = (((K_hi * K_low) >> 14) + K_hi * K_hi) << 1;  /* K*K in Q31 */
+
+    temp1W32 = WEBRTC_SPL_ABS_W32(temp1W32);      /* Guard against <0 */
+    temp1W32 = (int32_t)0x7fffffffL - temp1W32;      /* 1 - K*K  in Q31 */
+
+    /* Convert 1- K^2 in hi and low format */
+    tmp_hi = (int16_t)(temp1W32 >> 16);
+    tmp_low = (int16_t)((temp1W32 - ((int32_t)tmp_hi << 16)) >> 1);
+
+    /* Calculate Alpha = Alpha * (1-K^2) in Q31 */
+    temp1W32 = (Alpha_hi * tmp_hi + ((Alpha_hi * tmp_low) >> 15) +
+        ((Alpha_low * tmp_hi) >> 15)) << 1;
+
+    /* Normalize Alpha and store it on hi and low format */
+
+    norm = WebRtcSpl_NormW32(temp1W32);
+    temp1W32 <<= norm;
+
+    Alpha_hi = (int16_t)(temp1W32 >> 16);
+    Alpha_low = (int16_t)((temp1W32 - ((int32_t)Alpha_hi << 16)) >> 1);
+
+    /* Update the total nomalization of Alpha */
+    Alpha_exp = Alpha_exp + norm;
+
+    /* Update A[] */
+
+    for(j=1; j<=i; j++)
+    {
+      A_hi[j] =A_upd_hi[j];
+      A_low[j] =A_upd_low[j];
+    }
+  }
+
+  /*
+    Set A[0] to 1.0 and store the A[i] i=1...order in Q12
+    (Convert from Q27 and use rounding)
+  */
+
+  A[0] = 2048;
+
+  for(i=1; i<=order; i++) {
+    /* temp1W32 in Q27 */
+    temp1W32 = (A_hi[i] << 16) + (A_low[i] << 1);
+    /* Round and store upper word */
+    A[i] = (int16_t)((temp1W32 + 32768) >> 16);
+  }
+  return(1); /* Stable filters */
+}
+
+
+
+
+
+/* window */
+/* Matlab generation of floating point code:
+ *  t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
+ *  for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
+ * All values are multiplyed with 2^21 in fixed point code.
+ */
+static const int16_t kWindowAutocorr[WINLEN] = {
+  0,     0,     0,     0,     0,     1,     1,     2,     2,     3,     5,     6,
+  8,    10,    12,    14,    17,    20,    24,    28,    33,    38,    43,    49,
+  56,    63,    71,    79,    88,    98,   108,   119,   131,   143,   157,   171,
+  186,   202,   219,   237,   256,   275,   296,   318,   341,   365,   390,   416,
+  444,   472,   502,   533,   566,   600,   635,   671,   709,   748,   789,   831,
+  875,   920,   967,  1015,  1065,  1116,  1170,  1224,  1281,  1339,  1399,  1461,
+  1525,  1590,  1657,  1726,  1797,  1870,  1945,  2021,  2100,  2181,  2263,  2348,
+  2434,  2523,  2614,  2706,  2801,  2898,  2997,  3099,  3202,  3307,  3415,  3525,
+  3637,  3751,  3867,  3986,  4106,  4229,  4354,  4481,  4611,  4742,  4876,  5012,
+  5150,  5291,  5433,  5578,  5725,  5874,  6025,  6178,  6333,  6490,  6650,  6811,
+  6974,  7140,  7307,  7476,  7647,  7820,  7995,  8171,  8349,  8529,  8711,  8894,
+  9079,  9265,  9453,  9642,  9833, 10024, 10217, 10412, 10607, 10803, 11000, 11199,
+  11398, 11597, 11797, 11998, 12200, 12401, 12603, 12805, 13008, 13210, 13412, 13614,
+  13815, 14016, 14216, 14416, 14615, 14813, 15009, 15205, 15399, 15591, 15782, 15971,
+  16157, 16342, 16524, 16704, 16881, 17056, 17227, 17395, 17559, 17720, 17877, 18030,
+  18179, 18323, 18462, 18597, 18727, 18851, 18970, 19082, 19189, 19290, 19384, 19471,
+  19551, 19623, 19689, 19746, 19795, 19835, 19867, 19890, 19904, 19908, 19902, 19886,
+  19860, 19823, 19775, 19715, 19644, 19561, 19465, 19357, 19237, 19102, 18955, 18793,
+  18618, 18428, 18223, 18004, 17769, 17518, 17252, 16970, 16672, 16357, 16025, 15677,
+  15311, 14929, 14529, 14111, 13677, 13225, 12755, 12268, 11764, 11243, 10706, 10152,
+  9583,  8998,  8399,  7787,  7162,  6527,  5883,  5231,  4576,  3919,  3265,  2620,
+  1990,  1386,   825,   333
+};
+
+
+/* By using a hearing threshold level in dB of -28 dB (higher value gives more noise),
+   the H_T_H (in float) can be calculated as:
+   H_T_H = pow(10.0, 0.05 * (-28.0)) = 0.039810717055350
+   In Q19, H_T_H becomes round(0.039810717055350*2^19) ~= 20872, i.e.
+   H_T_H = 20872/524288.0, and H_T_HQ19 = 20872;
+*/
+
+
+/* The bandwidth expansion vectors are created from:
+   kPolyVecLo=[0.900000,0.810000,0.729000,0.656100,0.590490,0.531441,0.478297,0.430467,0.387420,0.348678,0.313811,0.282430];
+   kPolyVecHi=[0.800000,0.640000,0.512000,0.409600,0.327680,0.262144];
+   round(kPolyVecLo*32768)
+   round(kPolyVecHi*32768)
+*/
+static const int16_t kPolyVecLo[12] = {
+  29491, 26542, 23888, 21499, 19349, 17414, 15673, 14106, 12695, 11425, 10283, 9255
+};
+static const int16_t kPolyVecHi[6] = {
+  26214, 20972, 16777, 13422, 10737, 8590
+};
+
+static __inline int32_t log2_Q8_LPC( uint32_t x ) {
+
+  int32_t zeros;
+  int16_t frac;
+
+  zeros=WebRtcSpl_NormU32(x);
+  frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
+
+  /* log2(x) */
+  return ((31 - zeros) << 8) + frac;
+}
+
+static const int16_t kMulPitchGain = -25; /* 200/256 in Q5 */
+static const int16_t kChngFactor = 3523; /* log10(2)*10/4*0.4/1.4=log10(2)/1.4= 0.2150 in Q14 */
+static const int16_t kExp2 = 11819; /* 1/log(2) */
+const int kShiftLowerBand = 11;  /* Shift value for lower band in Q domain. */
+const int kShiftHigherBand = 12;  /* Shift value for higher band in Q domain. */
+
+void WebRtcIsacfix_GetVars(const int16_t *input, const int16_t *pitchGains_Q12,
+                           uint32_t *oldEnergy, int16_t *varscale)
+{
+  int k;
+  uint32_t nrgQ[4];
+  int16_t nrgQlog[4];
+  int16_t tmp16, chng1, chng2, chng3, chng4, tmp, chngQ, oldNrgQlog, pgQ, pg3;
+  int32_t expPg32;
+  int16_t expPg, divVal;
+  int16_t tmp16_1, tmp16_2;
+
+  /* Calculate energies of first and second frame halfs */
+  nrgQ[0]=0;
+  for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES/4 + QLOOKAHEAD) / 2; k++) {
+    nrgQ[0] += (uint32_t)(input[k] * input[k]);
+  }
+  nrgQ[1]=0;
+  for ( ; k < (FRAMESAMPLES/2 + QLOOKAHEAD) / 2; k++) {
+    nrgQ[1] += (uint32_t)(input[k] * input[k]);
+  }
+  nrgQ[2]=0;
+  for ( ; k < (FRAMESAMPLES * 3 / 4 + QLOOKAHEAD) / 2; k++) {
+    nrgQ[2] += (uint32_t)(input[k] * input[k]);
+  }
+  nrgQ[3]=0;
+  for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
+    nrgQ[3] += (uint32_t)(input[k] * input[k]);
+  }
+
+  for ( k=0; k<4; k++) {
+    nrgQlog[k] = (int16_t)log2_Q8_LPC(nrgQ[k]); /* log2(nrgQ) */
+  }
+  oldNrgQlog = (int16_t)log2_Q8_LPC(*oldEnergy);
+
+  /* Calculate average level change */
+  chng1 = WEBRTC_SPL_ABS_W16(nrgQlog[3]-nrgQlog[2]);
+  chng2 = WEBRTC_SPL_ABS_W16(nrgQlog[2]-nrgQlog[1]);
+  chng3 = WEBRTC_SPL_ABS_W16(nrgQlog[1]-nrgQlog[0]);
+  chng4 = WEBRTC_SPL_ABS_W16(nrgQlog[0]-oldNrgQlog);
+  tmp = chng1+chng2+chng3+chng4;
+  chngQ = (int16_t)(tmp * kChngFactor >> 10);  /* Q12 */
+  chngQ += 2926; /* + 1.0/1.4 in Q12 */
+
+  /* Find average pitch gain */
+  pgQ = 0;
+  for (k=0; k<4; k++)
+  {
+    pgQ += pitchGains_Q12[k];
+  }
+
+  pg3 = (int16_t)(pgQ * pgQ >> 11);  // pgQ in Q(12+2)=Q14. Q14*Q14>>11 => Q17
+  pg3 = (int16_t)(pgQ * pg3 >> 13);  /* Q14*Q17>>13 =>Q18  */
+  /* kMulPitchGain = -25 = -200 in Q-3. */
+  pg3 = (int16_t)(pg3 * kMulPitchGain >> 5);  // Q10
+  tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,pg3,13);/* Q13*Q10>>13 => Q10*/
+  if (tmp16<0) {
+    tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
+    tmp16_1 = ((uint16_t)(tmp16 ^ 0xFFFF) >> 10) - 3;  /* Gives result in Q14 */
+    if (tmp16_1<0)
+      expPg = -(tmp16_2 << -tmp16_1);
+    else
+      expPg = -(tmp16_2 >> tmp16_1);
+  } else
+    expPg = (int16_t) -16384; /* 1 in Q14, since 2^0=1 */
+
+  expPg32 = (int32_t)expPg << 8;  /* Q22 */
+  divVal = WebRtcSpl_DivW32W16ResW16(expPg32, chngQ); /* Q22/Q12=Q10 */
+
+  tmp16=(int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2,divVal,13);/* Q13*Q10>>13 => Q10*/
+  if (tmp16<0) {
+    tmp16_2 = (0x0400 | (tmp16 & 0x03FF));
+    tmp16_1 = ((uint16_t)(tmp16 ^ 0xFFFF) >> 10) - 3;  /* Gives result in Q14 */
+    if (tmp16_1<0)
+      expPg = tmp16_2 << -tmp16_1;
+    else
+      expPg = tmp16_2 >> tmp16_1;
+  } else
+    expPg = (int16_t) 16384; /* 1 in Q14, since 2^0=1 */
+
+  *varscale = expPg-1;
+  *oldEnergy = nrgQ[3];
+}
+
+
+
+static __inline int16_t  exp2_Q10_T(int16_t x) { // Both in and out in Q10
+
+  int16_t tmp16_1, tmp16_2;
+
+  tmp16_2=(int16_t)(0x0400|(x&0x03FF));
+  tmp16_1 = -(x >> 10);
+  if(tmp16_1>0)
+    return tmp16_2 >> tmp16_1;
+  else
+    return tmp16_2 << -tmp16_1;
+
+}
+
+
+// Declare function pointers.
+AutocorrFix WebRtcIsacfix_AutocorrFix;
+CalculateResidualEnergy WebRtcIsacfix_CalculateResidualEnergy;
+
+/* This routine calculates the residual energy for LPC.
+ * Formula as shown in comments inside.
+ */
+int32_t WebRtcIsacfix_CalculateResidualEnergyC(int lpc_order,
+                                               int32_t q_val_corr,
+                                               int q_val_polynomial,
+                                               int16_t* a_polynomial,
+                                               int32_t* corr_coeffs,
+                                               int* q_val_residual_energy) {
+  int i = 0, j = 0;
+  int shift_internal = 0, shift_norm = 0;
+  int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
+  int64_t sum64 = 0, sum64_tmp = 0;
+
+  for (i = 0; i <= lpc_order; i++) {
+    for (j = i; j <= lpc_order; j++) {
+      /* For the case of i == 0: residual_energy +=
+       *    a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
+       * For the case of i != 0: residual_energy +=
+       *    a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
+       */
+
+      tmp32 = a_polynomial[j] * a_polynomial[j - i];
+                                   /* tmp32 in Q(q_val_polynomial * 2). */
+      if (i != 0) {
+        tmp32 <<= 1;
+      }
+      sum64_tmp = (int64_t)tmp32 * (int64_t)corr_coeffs[i];
+      sum64_tmp >>= shift_internal;
+
+      /* Test overflow and sum the result. */
+      if(((sum64_tmp > 0 && sum64 > 0) && (LLONG_MAX - sum64 < sum64_tmp)) ||
+         ((sum64_tmp < 0 && sum64 < 0) && (LLONG_MIN - sum64 > sum64_tmp))) {
+        /* Shift right for overflow. */
+        shift_internal += 1;
+        sum64 >>= 1;
+        sum64 += sum64_tmp >> 1;
+      } else {
+        sum64 += sum64_tmp;
+      }
+    }
+  }
+
+  word32_high = (int32_t)(sum64 >> 32);
+  word32_low = (int32_t)sum64;
+
+  // Calculate the value of shifting (shift_norm) for the 64-bit sum.
+  if(word32_high != 0) {
+    shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
+    residual_energy = (int32_t)(sum64 >> shift_norm);
+  } else {
+    if((word32_low & 0x80000000) != 0) {
+      shift_norm = 1;
+      residual_energy = (uint32_t)word32_low >> 1;
+    } else {
+      shift_norm = WebRtcSpl_NormW32(word32_low);
+      residual_energy = word32_low << shift_norm;
+      shift_norm = -shift_norm;
+    }
+  }
+
+  /* Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
+   *   = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
+   */
+  *q_val_residual_energy = q_val_corr - shift_internal - shift_norm
+                           + q_val_polynomial * 2;
+
+  return residual_energy;
+}
+
+void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
+                              int16_t *inHiQ0,
+                              MaskFiltstr_enc *maskdata,
+                              int16_t snrQ10,
+                              const int16_t *pitchGains_Q12,
+                              int32_t *gain_lo_hiQ17,
+                              int16_t *lo_coeffQ15,
+                              int16_t *hi_coeffQ15)
+{
+  int k, n, ii;
+  int pos1, pos2;
+  int sh_lo, sh_hi, sh, ssh, shMem;
+  int16_t varscaleQ14;
+
+  int16_t tmpQQlo, tmpQQhi;
+  int32_t tmp32;
+  int16_t tmp16,tmp16b;
+
+  int16_t polyHI[ORDERHI+1];
+  int16_t rcQ15_lo[ORDERLO], rcQ15_hi[ORDERHI];
+
+
+  int16_t DataLoQ6[WINLEN], DataHiQ6[WINLEN];
+  int32_t corrloQQ[ORDERLO+2];
+  int32_t corrhiQQ[ORDERHI+1];
+  int32_t corrlo2QQ[ORDERLO+1];
+  int16_t scale;
+  int16_t QdomLO, QdomHI, newQdomHI, newQdomLO;
+
+  int32_t res_nrgQQ;
+  int32_t sqrt_nrg;
+
+  /* less-noise-at-low-frequencies factor */
+  int16_t aaQ14;
+
+  /* Multiplication with 1/sqrt(12) ~= 0.28901734104046 can be done by convertion to
+     Q15, i.e. round(0.28901734104046*32768) = 9471, and use 9471/32768.0 ~= 0.289032
+  */
+  int16_t snrq;
+  int shft;
+
+  int16_t tmp16a;
+  int32_t tmp32a, tmp32b, tmp32c;
+
+  int16_t a_LOQ11[ORDERLO+1];
+  int16_t k_vecloQ15[ORDERLO];
+  int16_t a_HIQ12[ORDERHI+1];
+  int16_t k_vechiQ15[ORDERHI];
+
+  int16_t stab;
+
+  snrq=snrQ10;
+
+  /* SNR= C * 2 ^ (D * snrq) ; C=0.289, D=0.05*log2(10)=0.166 (~=172 in Q10)*/
+  tmp16 = (int16_t)(snrq * 172 >> 10);  // Q10
+  tmp16b = exp2_Q10_T(tmp16); // Q10
+  snrq = (int16_t)(tmp16b * 285 >> 10);  // Q10
+
+  /* change quallevel depending on pitch gains and level fluctuations */
+  WebRtcIsacfix_GetVars(inLoQ0, pitchGains_Q12, &(maskdata->OldEnergy), &varscaleQ14);
+
+  /* less-noise-at-low-frequencies factor */
+  /* Calculation of 0.35 * (0.5 + 0.5 * varscale) in fixpoint:
+     With 0.35 in Q16 (0.35 ~= 22938/65536.0 = 0.3500061) and varscaleQ14 in Q14,
+     we get Q16*Q14>>16 = Q14
+  */
+  aaQ14 = (int16_t)((22938 * (8192 + (varscaleQ14 >> 1)) + 32768) >> 16);
+
+  /* Calculate tmp = (1.0 + aa*aa); in Q12 */
+  tmp16 = (int16_t)(aaQ14 * aaQ14 >> 15);  // Q14*Q14>>15 = Q13
+  tmpQQlo = 4096 + (tmp16 >> 1);  // Q12 + Q13>>1 = Q12.
+
+  /* Calculate tmp = (1.0+aa) * (1.0+aa); */
+  tmp16 = 8192 + (aaQ14 >> 1);  // 1+a in Q13.
+  tmpQQhi = (int16_t)(tmp16 * tmp16 >> 14);  // Q13*Q13>>14 = Q12
+
+  /* replace data in buffer by new look-ahead data */
+  for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++) {
+    maskdata->DataBufferLoQ0[pos1 + WINLEN - QLOOKAHEAD] = inLoQ0[pos1];
+  }
+
+  for (k = 0; k < SUBFRAMES; k++) {
+
+    /* Update input buffer and multiply signal with window */
+    for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
+      maskdata->DataBufferLoQ0[pos1] = maskdata->DataBufferLoQ0[pos1 + UPDATE/2];
+      maskdata->DataBufferHiQ0[pos1] = maskdata->DataBufferHiQ0[pos1 + UPDATE/2];
+      DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
+          kWindowAutocorr[pos1] >> 15);  // Q0*Q21>>15 = Q6
+      DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
+          kWindowAutocorr[pos1] >> 15);  // Q0*Q21>>15 = Q6
+    }
+    pos2 = (int16_t)(k * UPDATE / 2);
+    for (n = 0; n < UPDATE/2; n++, pos1++) {
+      maskdata->DataBufferLoQ0[pos1] = inLoQ0[QLOOKAHEAD + pos2];
+      maskdata->DataBufferHiQ0[pos1] = inHiQ0[pos2++];
+      DataLoQ6[pos1] = (int16_t)(maskdata->DataBufferLoQ0[pos1] *
+          kWindowAutocorr[pos1] >> 15);  // Q0*Q21>>15 = Q6
+      DataHiQ6[pos1] = (int16_t)(maskdata->DataBufferHiQ0[pos1] *
+          kWindowAutocorr[pos1] >> 15);  // Q0*Q21>>15 = Q6
+    }
+
+    /* Get correlation coefficients */
+    /* The highest absolute value measured inside DataLo in the test set
+       For DataHi, corresponding value was 160.
+
+       This means that it should be possible to represent the input values
+       to WebRtcSpl_AutoCorrelation() as Q6 values (since 307*2^6 =
+       19648). Of course, Q0 will also work, but due to the low energy in
+       DataLo and DataHi, the outputted autocorrelation will be more accurate
+       and mimic the floating point code better, by being in an high as possible
+       Q-domain.
+    */
+
+    WebRtcIsacfix_AutocorrFix(corrloQQ,DataLoQ6,WINLEN, ORDERLO+1, &scale);
+    QdomLO = 12-scale; // QdomLO is the Q-domain of corrloQQ
+    sh_lo = WebRtcSpl_NormW32(corrloQQ[0]);
+    QdomLO += sh_lo;
+    for (ii=0; ii<ORDERLO+2; ii++) {
+      corrloQQ[ii] <<= sh_lo;
+    }
+    /* It is investigated whether it was possible to use 16 bits for the
+       32-bit vector corrloQQ, but it didn't work. */
+
+    WebRtcIsacfix_AutocorrFix(corrhiQQ,DataHiQ6,WINLEN, ORDERHI, &scale);
+
+    QdomHI = 12-scale; // QdomHI is the Q-domain of corrhiQQ
+    sh_hi = WebRtcSpl_NormW32(corrhiQQ[0]);
+    QdomHI += sh_hi;
+    for (ii=0; ii<ORDERHI+1; ii++) {
+      corrhiQQ[ii] <<= sh_hi;
+    }
+
+    /* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
+
+    /* Calculate corrlo2[0] = tmpQQlo * corrlo[0] - 2.0*tmpQQlo * corrlo[1];*/
+    // |corrlo2QQ| in Q(QdomLO-5).
+    corrlo2QQ[0] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[0]) >> 1) -
+        (WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, corrloQQ[1]) >> 2);
+
+    /* Calculate corrlo2[n] = tmpQQlo * corrlo[n] - tmpQQlo * (corrlo[n-1] + corrlo[n+1]);*/
+    for (n = 1; n <= ORDERLO; n++) {
+
+      tmp32 = (corrloQQ[n - 1] >> 1) + (corrloQQ[n + 1] >> 1);  // Q(QdomLO-1).
+      corrlo2QQ[n] = (WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQlo, corrloQQ[n]) >> 1) -
+          (WEBRTC_SPL_MUL_16_32_RSFT16(aaQ14, tmp32) >> 2);
+    }
+    QdomLO -= 5;
+
+    /* Calculate corrhi[n] = tmpQQhi * corrhi[n]; */
+    for (n = 0; n <= ORDERHI; n++) {
+      corrhiQQ[n] = WEBRTC_SPL_MUL_16_32_RSFT16(tmpQQhi, corrhiQQ[n]); // Q(12+QdomHI-16) = Q(QdomHI-4)
+    }
+    QdomHI -= 4;
+
+    /* add white noise floor */
+    /* corrlo2QQ is in Q(QdomLO) and corrhiQQ is in Q(QdomHI) */
+    /* Calculate corrlo2[0] += 9.5367431640625e-7; and
+       corrhi[0]  += 9.5367431640625e-7, where the constant is 1/2^20 */
+
+    tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t) 1, QdomLO-20);
+    corrlo2QQ[0] += tmp32;
+    tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t) 1, QdomHI-20);
+    corrhiQQ[0]  += tmp32;
+
+    /* corrlo2QQ is in Q(QdomLO) and corrhiQQ is in Q(QdomHI) before the following
+       code segment, where we want to make sure we get a 1-bit margin */
+    for (n = 0; n <= ORDERLO; n++) {
+      corrlo2QQ[n] >>= 1;  // Make sure we have a 1-bit margin.
+    }
+    QdomLO -= 1; // Now, corrlo2QQ is in Q(QdomLO), with a 1-bit margin
+
+    for (n = 0; n <= ORDERHI; n++) {
+      corrhiQQ[n] >>= 1;  // Make sure we have a 1-bit margin.
+    }
+    QdomHI -= 1; // Now, corrhiQQ is in Q(QdomHI), with a 1-bit margin
+
+
+    newQdomLO = QdomLO;
+
+    for (n = 0; n <= ORDERLO; n++) {
+      int32_t tmp, tmpB, tmpCorr;
+      int16_t alpha=328; //0.01 in Q15
+      int16_t beta=324; //(1-0.01)*0.01=0.0099 in Q15
+      int16_t gamma=32440; //(1-0.01)=0.99 in Q15
+
+      if (maskdata->CorrBufLoQQ[n] != 0) {
+        shMem=WebRtcSpl_NormW32(maskdata->CorrBufLoQQ[n]);
+        sh = QdomLO - maskdata->CorrBufLoQdom[n];
+        if (sh<=shMem) {
+          tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], sh); // Get CorrBufLoQQ to same domain as corrlo2
+          tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
+        } else if ((sh-shMem)<7){
+          tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufLoQQ as much as possible
+          // Shift |alpha| the number of times required to get |tmp| in QdomLO.
+          tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
+        } else {
+          tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufLoQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
+          // Shift |alpha| as much as possible without overflow the number of
+          // times required to get |tmp| in QdomLO.
+          tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
+          tmpCorr = corrloQQ[n] >> (sh - shMem - 6);
+          tmp = tmp + tmpCorr;
+          maskdata->CorrBufLoQQ[n] = tmp;
+          newQdomLO = QdomLO-(sh-shMem-6);
+          maskdata->CorrBufLoQdom[n] = newQdomLO;
+        }
+      } else
+        tmp = 0;
+
+      tmp = tmp + corrlo2QQ[n];
+
+      maskdata->CorrBufLoQQ[n] = tmp;
+      maskdata->CorrBufLoQdom[n] = QdomLO;
+
+      tmp=WEBRTC_SPL_MUL_16_32_RSFT15(beta, tmp);
+      tmpB=WEBRTC_SPL_MUL_16_32_RSFT15(gamma, corrlo2QQ[n]);
+      corrlo2QQ[n] = tmp + tmpB;
+    }
+    if( newQdomLO!=QdomLO) {
+      for (n = 0; n <= ORDERLO; n++) {
+        if (maskdata->CorrBufLoQdom[n] != newQdomLO)
+          corrloQQ[n] >>= maskdata->CorrBufLoQdom[n] - newQdomLO;
+      }
+      QdomLO = newQdomLO;
+    }
+
+
+    newQdomHI = QdomHI;
+
+    for (n = 0; n <= ORDERHI; n++) {
+      int32_t tmp, tmpB, tmpCorr;
+      int16_t alpha=328; //0.01 in Q15
+      int16_t beta=324; //(1-0.01)*0.01=0.0099 in Q15
+      int16_t gamma=32440; //(1-0.01)=0.99 in Q1
+      if (maskdata->CorrBufHiQQ[n] != 0) {
+        shMem=WebRtcSpl_NormW32(maskdata->CorrBufHiQQ[n]);
+        sh = QdomHI - maskdata->CorrBufHiQdom[n];
+        if (sh<=shMem) {
+          tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], sh); // Get CorrBufHiQQ to same domain as corrhi
+          tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha, tmp);
+          tmpCorr = corrhiQQ[n];
+          tmp = tmp + tmpCorr;
+          maskdata->CorrBufHiQQ[n] = tmp;
+          maskdata->CorrBufHiQdom[n] = QdomHI;
+        } else if ((sh-shMem)<7) {
+          tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
+          // Shift |alpha| the number of times required to get |tmp| in QdomHI.
+          tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << (sh - shMem), tmp);
+          tmpCorr = corrhiQQ[n];
+          tmp = tmp + tmpCorr;
+          maskdata->CorrBufHiQQ[n] = tmp;
+          maskdata->CorrBufHiQdom[n] = QdomHI;
+        } else {
+          tmp = WEBRTC_SPL_SHIFT_W32(maskdata->CorrBufHiQQ[n], shMem); // Shift up CorrBufHiQQ as much as possible
+          // Shift |alpha| as much as possible without overflow the number of
+          // times required to get |tmp| in QdomHI.
+          tmp = WEBRTC_SPL_MUL_16_32_RSFT15(alpha << 6, tmp);
+          tmpCorr = corrhiQQ[n] >> (sh - shMem - 6);
+          tmp = tmp + tmpCorr;
+          maskdata->CorrBufHiQQ[n] = tmp;
+          newQdomHI = QdomHI-(sh-shMem-6);
+          maskdata->CorrBufHiQdom[n] = newQdomHI;
+        }
+      } else {
+        tmp = corrhiQQ[n];
+        tmpCorr = tmp;
+        maskdata->CorrBufHiQQ[n] = tmp;
+        maskdata->CorrBufHiQdom[n] = QdomHI;
+      }
+
+      tmp=WEBRTC_SPL_MUL_16_32_RSFT15(beta, tmp);
+      tmpB=WEBRTC_SPL_MUL_16_32_RSFT15(gamma, tmpCorr);
+      corrhiQQ[n] = tmp + tmpB;
+    }
+
+    if( newQdomHI!=QdomHI) {
+      for (n = 0; n <= ORDERHI; n++) {
+        if (maskdata->CorrBufHiQdom[n] != newQdomHI)
+          corrhiQQ[n] >>= maskdata->CorrBufHiQdom[n] - newQdomHI;
+      }
+      QdomHI = newQdomHI;
+    }
+
+    stab=WebRtcSpl_LevinsonW32_JSK(corrlo2QQ, a_LOQ11, k_vecloQ15, ORDERLO);
+
+    if (stab<0) {  // If unstable use lower order
+      a_LOQ11[0]=2048;
+      for (n = 1; n <= ORDERLO; n++) {
+        a_LOQ11[n]=0;
+      }
+
+      stab=WebRtcSpl_LevinsonW32_JSK(corrlo2QQ, a_LOQ11, k_vecloQ15, 8);
+    }
+
+
+    WebRtcSpl_LevinsonDurbin(corrhiQQ,  a_HIQ12,  k_vechiQ15, ORDERHI);
+
+    /* bandwidth expansion */
+    for (n = 1; n <= ORDERLO; n++) {
+      a_LOQ11[n] = (int16_t)((kPolyVecLo[n - 1] * a_LOQ11[n] + (1 << 14)) >>
+          15);
+    }
+
+
+    polyHI[0] = a_HIQ12[0];
+    for (n = 1; n <= ORDERHI; n++) {
+      a_HIQ12[n] = (int16_t)(((int32_t)(kPolyVecHi[n - 1] * a_HIQ12[n]) +
+        (1 << 14)) >> 15);
+      polyHI[n] = a_HIQ12[n];
+    }
+
+    /* Normalize the corrlo2 vector */
+    sh = WebRtcSpl_NormW32(corrlo2QQ[0]);
+    for (n = 0; n <= ORDERLO; n++) {
+      corrlo2QQ[n] <<= sh;
+    }
+    QdomLO += sh; /* Now, corrlo2QQ is still in Q(QdomLO) */
+
+
+    /* residual energy */
+
+    sh_lo = 31;
+    res_nrgQQ = WebRtcIsacfix_CalculateResidualEnergy(ORDERLO, QdomLO,
+        kShiftLowerBand, a_LOQ11, corrlo2QQ, &sh_lo);
+
+    /* Convert to reflection coefficients */
+    WebRtcSpl_AToK_JSK(a_LOQ11, ORDERLO, rcQ15_lo);
+
+    if (sh_lo & 0x0001) {
+      res_nrgQQ >>= 1;
+      sh_lo-=1;
+    }
+
+
+    if( res_nrgQQ > 0 )
+    {
+      sqrt_nrg=WebRtcSpl_Sqrt(res_nrgQQ);
+
+      /* add hearing threshold and compute the gain */
+      /* lo_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
+
+      tmp32a = varscaleQ14 >> 1;  // H_T_HQ19=65536 (16-17=-1)
+      ssh = sh_lo >> 1;  // sqrt_nrg is in Qssh.
+      sh = ssh - 14;
+      tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
+      tmp32c = sqrt_nrg + tmp32b;  // Qssh  (denominator)
+      tmp32a = varscaleQ14 * snrq;  // Q24 (numerator)
+
+      sh = WebRtcSpl_NormW32(tmp32c);
+      shft = 16 - sh;
+      tmp16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32c, -shft); // Q(ssh-shft)  (denominator)
+
+      tmp32b = WebRtcSpl_DivW32W16(tmp32a, tmp16a); // Q(24-ssh+shft)
+      sh = ssh-shft-7;
+      *gain_lo_hiQ17 = WEBRTC_SPL_SHIFT_W32(tmp32b, sh);  // Gains in Q17
+    }
+    else
+    {
+      *gain_lo_hiQ17 = 100;  // Gains in Q17
+    }
+    gain_lo_hiQ17++;
+
+    /* copy coefficients to output array */
+    for (n = 0; n < ORDERLO; n++) {
+      *lo_coeffQ15 = (int16_t) (rcQ15_lo[n]);
+      lo_coeffQ15++;
+    }
+    /* residual energy */
+    sh_hi = 31;
+    res_nrgQQ = WebRtcIsacfix_CalculateResidualEnergy(ORDERHI, QdomHI,
+        kShiftHigherBand, a_HIQ12, corrhiQQ, &sh_hi);
+
+    /* Convert to reflection coefficients */
+    WebRtcSpl_LpcToReflCoef(polyHI, ORDERHI, rcQ15_hi);
+
+    if (sh_hi & 0x0001) {
+      res_nrgQQ >>= 1;
+      sh_hi-=1;
+    }
+
+
+    if( res_nrgQQ > 0 )
+    {
+      sqrt_nrg=WebRtcSpl_Sqrt(res_nrgQQ);
+
+
+      /* add hearing threshold and compute the gain */
+      /* hi_coeff = varscale * S_N_R / (sqrt_nrg + varscale * H_T_H); */
+
+      tmp32a = varscaleQ14 >> 1;  // H_T_HQ19=65536 (16-17=-1)
+
+      ssh = sh_hi >> 1;  // |sqrt_nrg| is in Qssh.
+      sh = ssh - 14;
+      tmp32b = WEBRTC_SPL_SHIFT_W32(tmp32a, sh); // Q14->Qssh
+      tmp32c = sqrt_nrg + tmp32b;  // Qssh  (denominator)
+      tmp32a = varscaleQ14 * snrq;  // Q24 (numerator)
+
+      sh = WebRtcSpl_NormW32(tmp32c);
+      shft = 16 - sh;
+      tmp16a = (int16_t) WEBRTC_SPL_SHIFT_W32(tmp32c, -shft); // Q(ssh-shft)  (denominator)
+
+      tmp32b = WebRtcSpl_DivW32W16(tmp32a, tmp16a); // Q(24-ssh+shft)
+      sh = ssh-shft-7;
+      *gain_lo_hiQ17 = WEBRTC_SPL_SHIFT_W32(tmp32b, sh);  // Gains in Q17
+    }
+    else
+    {
+      *gain_lo_hiQ17 = 100;  // Gains in Q17
+    }
+    gain_lo_hiQ17++;
+
+
+    /* copy coefficients to output array */
+    for (n = 0; n < ORDERHI; n++) {
+      *hi_coeffQ15 = rcQ15_hi[n];
+      hi_coeffQ15++;
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
new file mode 100644
index 0000000..d6d1e8f
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_masking_model.h
+ *
+ * LPC functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+void WebRtcIsacfix_GetVars(const int16_t *input,
+                           const int16_t *pitchGains_Q12,
+                           uint32_t *oldEnergy,
+                           int16_t *varscale);
+
+void WebRtcIsacfix_GetLpcCoef(int16_t *inLoQ0,
+                              int16_t *inHiQ0,
+                              MaskFiltstr_enc *maskdata,
+                              int16_t snrQ10,
+                              const int16_t *pitchGains_Q12,
+                              int32_t *gain_lo_hiQ17,
+                              int16_t *lo_coeffQ15,
+                              int16_t *hi_coeffQ15);
+
+typedef int32_t (*CalculateResidualEnergy)(int lpc_order,
+                                           int32_t q_val_corr,
+                                           int q_val_polynomial,
+                                           int16_t* a_polynomial,
+                                           int32_t* corr_coeffs,
+                                           int* q_val_residual_energy);
+extern CalculateResidualEnergy WebRtcIsacfix_CalculateResidualEnergy;
+
+int32_t WebRtcIsacfix_CalculateResidualEnergyC(int lpc_order,
+                                               int32_t q_val_corr,
+                                               int q_val_polynomial,
+                                               int16_t* a_polynomial,
+                                               int32_t* corr_coeffs,
+                                               int* q_val_residual_energy);
+
+#if defined(MIPS_DSP_R2_LE)
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+                                                  int32_t q_val_corr,
+                                                  int q_val_polynomial,
+                                                  int16_t* a_polynomial,
+                                                  int32_t* corr_coeffs,
+                                                  int* q_val_residual_energy);
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_MASKING_MODEL_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
new file mode 100644
index 0000000..727008d
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
@@ -0,0 +1,237 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+
+// MIPS DSPR2 optimization for function WebRtcIsacfix_CalculateResidualEnergy
+// Bit-exact with WebRtcIsacfix_CalculateResidualEnergyC from file
+// lpc_masking_model.c
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+                                                  int32_t q_val_corr,
+                                                  int q_val_polynomial,
+                                                  int16_t* a_polynomial,
+                                                  int32_t* corr_coeffs,
+                                                  int* q_val_residual_energy) {
+
+  int i = 0, j = 0;
+  int shift_internal = 0, shift_norm = 0;
+  int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
+  int32_t tmp_corr_c = corr_coeffs[0];
+  int16_t* tmp_a_poly = &a_polynomial[0];
+  int32_t sum64_hi = 0;
+  int32_t sum64_lo = 0;
+
+  for (j = 0; j <= lpc_order; j++) {
+    // For the case of i == 0:
+    //   residual_energy +=
+    //     a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
+
+    int32_t tmp2, tmp3;
+    int16_t sign_1;
+    int16_t sign_2;
+    int16_t sign_3;
+
+    __asm __volatile (
+      ".set      push                                                \n\t"
+      ".set      noreorder                                           \n\t"
+      "lh        %[tmp2],         0(%[tmp_a_poly])                   \n\t"
+      "mul       %[tmp32],        %[tmp2],            %[tmp2]        \n\t"
+      "addiu     %[tmp_a_poly],   %[tmp_a_poly],      2              \n\t"
+      "sra       %[sign_2],       %[sum64_hi],        31             \n\t"
+      "mult      $ac0,            %[tmp32],           %[tmp_corr_c]  \n\t"
+      "shilov    $ac0,            %[shift_internal]                  \n\t"
+      "mfhi      %[tmp2],         $ac0                               \n\t"
+      "mflo      %[tmp3],         $ac0                               \n\t"
+      "sra       %[sign_1],       %[tmp2],            31             \n\t"
+      "xor       %[sign_3],       %[sign_1],          %[sign_2]      \n\t"
+      ".set      pop                                                 \n\t"
+      : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+        [tmp_a_poly] "+r" (tmp_a_poly), [sign_1] "=&r" (sign_1),
+        [sign_3] "=&r" (sign_3), [sign_2] "=&r" (sign_2),
+        [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+      : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+      : "hi", "lo", "memory"
+    );
+
+    if (sign_3 != 0) {
+      __asm __volatile (
+        ".set      push                                      \n\t"
+        ".set      noreorder                                 \n\t"
+        "addsc     %[sum64_lo],   %[sum64_lo],    %[tmp3]    \n\t"
+        "addwc     %[sum64_hi],   %[sum64_hi],    %[tmp2]    \n\t"
+        ".set      pop                                       \n\t"
+        : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+        : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+        : "hi", "lo", "memory"
+      );
+    } else {
+      if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+          ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+        // Shift right for overflow.
+        __asm __volatile (
+          ".set      push                                             \n\t"
+          ".set      noreorder                                        \n\t"
+          "addiu     %[shift_internal], %[shift_internal],  1         \n\t"
+          "prepend   %[sum64_lo],       %[sum64_hi],        1         \n\t"
+          "sra       %[sum64_hi],       %[sum64_hi],        1         \n\t"
+          "prepend   %[tmp3],           %[tmp2],            1         \n\t"
+          "sra       %[tmp2],           %[tmp2],            1         \n\t"
+          "addsc     %[sum64_lo],       %[sum64_lo],        %[tmp3]   \n\t"
+          "addwc     %[sum64_hi],       %[sum64_hi],        %[tmp2]   \n\t"
+          ".set      pop                                              \n\t"
+          : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+            [shift_internal] "+r" (shift_internal),
+            [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+          :
+          : "hi", "lo", "memory"
+        );
+      } else {
+        __asm __volatile (
+          ".set      push                                      \n\t"
+          ".set      noreorder                                 \n\t"
+          "addsc     %[sum64_lo],   %[sum64_lo],    %[tmp3]    \n\t"
+          "addwc     %[sum64_hi],   %[sum64_hi],    %[tmp2]    \n\t"
+          ".set      pop                                       \n\t"
+          : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+          : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+          : "hi", "lo", "memory"
+        );
+      }
+    }
+  }
+
+  for (i = 1; i <= lpc_order; i++) {
+    tmp_corr_c = corr_coeffs[i];
+    int16_t* tmp_a_poly_j = &a_polynomial[i];
+    int16_t* tmp_a_poly_j_i = &a_polynomial[0];
+    for (j = i; j <= lpc_order; j++) {
+      // For the case of i = 1 .. lpc_order:
+      //   residual_energy +=
+      //     a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
+
+      int32_t tmp2, tmp3;
+      int16_t sign_1;
+      int16_t sign_2;
+      int16_t sign_3;
+
+      __asm __volatile (
+        ".set      push                                                   \n\t"
+        ".set      noreorder                                              \n\t"
+        "lh        %[tmp3],           0(%[tmp_a_poly_j])                  \n\t"
+        "lh        %[tmp2],           0(%[tmp_a_poly_j_i])                \n\t"
+        "addiu     %[tmp_a_poly_j],   %[tmp_a_poly_j],    2               \n\t"
+        "addiu     %[tmp_a_poly_j_i], %[tmp_a_poly_j_i],  2               \n\t"
+        "mul       %[tmp32],          %[tmp3],            %[tmp2]         \n\t"
+        "sll       %[tmp32],          %[tmp32],           1               \n\t"
+        "mult      $ac0,              %[tmp32],           %[tmp_corr_c]   \n\t"
+        "shilov    $ac0,              %[shift_internal]                   \n\t"
+        "mfhi      %[tmp2],           $ac0                                \n\t"
+        "mflo      %[tmp3],           $ac0                                \n\t"
+        "sra       %[sign_1],         %[tmp2],            31              \n\t"
+        "sra       %[sign_2],         %[sum64_hi],        31              \n\t"
+        "xor       %[sign_3],         %[sign_1],          %[sign_2]       \n\t"
+        ".set      pop                                                    \n\t"
+        : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+          [tmp_a_poly_j] "+r" (tmp_a_poly_j), [sign_1] "=&r" (sign_1),
+          [tmp_a_poly_j_i] "+r" (tmp_a_poly_j_i), [sign_2] "=&r" (sign_2),
+          [sign_3] "=&r" (sign_3), [sum64_hi] "+r" (sum64_hi),
+          [sum64_lo] "+r" (sum64_lo)
+        : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+        : "hi", "lo", "memory"
+      );
+      if (sign_3 != 0) {
+        __asm __volatile (
+          ".set      push                                     \n\t"
+          ".set      noreorder                                \n\t"
+          "addsc     %[sum64_lo],   %[sum64_lo],   %[tmp3]    \n\t"
+          "addwc     %[sum64_hi],   %[sum64_hi],   %[tmp2]    \n\t"
+          ".set      pop                                      \n\t"
+          : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi),
+            [sum64_lo] "+r" (sum64_lo)
+          :
+          :"memory"
+        );
+      } else {
+        // Test overflow and sum the result.
+        if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+            ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+          // Shift right for overflow.
+          __asm __volatile (
+            ".set      push                                              \n\t"
+            ".set      noreorder                                         \n\t"
+            "addiu     %[shift_internal],  %[shift_internal],  1         \n\t"
+            "prepend   %[sum64_lo],        %[sum64_hi],        1         \n\t"
+            "sra       %[sum64_hi],        %[sum64_hi],        1         \n\t"
+            "prepend   %[tmp3],            %[tmp2],            1         \n\t"
+            "sra       %[tmp2],            %[tmp2],            1         \n\t"
+            "addsc     %[sum64_lo],        %[sum64_lo],        %[tmp3]   \n\t"
+            "addwc     %[sum64_hi],        %[sum64_hi],        %[tmp2]   \n\t"
+            ".set      pop                                               \n\t"
+            : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+              [shift_internal] "+r" (shift_internal),
+              [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+            :
+            : "hi", "lo", "memory"
+          );
+        } else {
+          __asm __volatile (
+            ".set      push                                      \n\t"
+            ".set      noreorder                                 \n\t"
+            "addsc     %[sum64_lo],    %[sum64_lo],   %[tmp3]    \n\t"
+            "addwc     %[sum64_hi],    %[sum64_hi],   %[tmp2]    \n\t"
+            ".set      pop                                       \n\t"
+            : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+              [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+            :
+            : "hi", "lo", "memory"
+          );
+        }
+      }
+    }
+  }
+  word32_high = sum64_hi;
+  word32_low = sum64_lo;
+
+  // Calculate the value of shifting (shift_norm) for the 64-bit sum.
+  if (word32_high != 0) {
+    shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
+    int tmp1;
+    __asm __volatile (
+      ".set    push                                                     \n\t"
+      ".set    noreorder                                                \n\t"
+      "srl     %[residual_energy],  %[sum64_lo],         %[shift_norm]  \n\t"
+      "li      %[tmp1],             32                                  \n\t"
+      "subu    %[tmp1],             %[tmp1],             %[shift_norm]  \n\t"
+      "sll     %[tmp1],             %[sum64_hi],         %[tmp1]        \n\t"
+      "or      %[residual_energy],  %[residual_energy],  %[tmp1]        \n\t"
+      ".set    pop                                                      \n\t"
+      : [residual_energy] "=&r" (residual_energy), [tmp1]"=&r"(tmp1),
+        [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+      : [shift_norm] "r" (shift_norm)
+      : "memory"
+    );
+  } else {
+    if ((word32_low & 0x80000000) != 0) {
+      shift_norm = 1;
+      residual_energy = (uint32_t)word32_low >> 1;
+    } else {
+      shift_norm = WebRtcSpl_NormW32(word32_low);
+      residual_energy = word32_low << shift_norm;
+      shift_norm = -shift_norm;
+    }
+  }
+
+  // Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
+  //   = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
+  *q_val_residual_energy =
+      q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2;
+
+  return residual_energy;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc
new file mode 100644
index 0000000..1604cc4
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+class LpcMaskingModelTest : public testing::Test {
+ protected:
+  // Pass a function pointer to the Tester function.
+  void CalculateResidualEnergyTester(CalculateResidualEnergy
+                                     CalculateResidualEnergyFunction) {
+    const int kIntOrder = 10;
+    const int32_t kInt32QDomain = 5;
+    const int kIntShift = 11;
+    int16_t a[kIntOrder + 1] = {32760, 122, 7, 0, -32760, -3958,
+        -48, 18745, 498, 9, 23456};
+    int32_t corr[kIntOrder + 1] = {11443647, -27495, 0,
+        98745, -11443600, 1, 1, 498, 9, 888, 23456};
+    int q_shift_residual = 0;
+    int32_t residual_energy = 0;
+
+    // Test the code path where (residual_energy >= 0x10000).
+    residual_energy = CalculateResidualEnergyFunction(kIntOrder,
+        kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
+    EXPECT_EQ(1789023310, residual_energy);
+    EXPECT_EQ(2, q_shift_residual);
+
+    // Test the code path where (residual_energy < 0x10000)
+    // and ((energy & 0x8000) != 0).
+    for (int i = 0; i < kIntOrder + 1; i++) {
+      a[i] = 24575 >> i;
+      corr[i] = i;
+    }
+    residual_energy = CalculateResidualEnergyFunction(kIntOrder,
+        kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
+    EXPECT_EQ(1595279092, residual_energy);
+    EXPECT_EQ(26, q_shift_residual);
+
+    // Test the code path where (residual_energy <= 0x7fff).
+    for (int i = 0; i < kIntOrder + 1; i++) {
+      a[i] = 2457 >> i;
+    }
+    residual_energy = CalculateResidualEnergyFunction(kIntOrder,
+        kInt32QDomain, kIntShift, a, corr, &q_shift_residual);
+    EXPECT_EQ(2029266944, residual_energy);
+    EXPECT_EQ(33, q_shift_residual);
+  }
+};
+
+TEST_F(LpcMaskingModelTest, CalculateResidualEnergyTest) {
+  CalculateResidualEnergyTester(WebRtcIsacfix_CalculateResidualEnergyC);
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c b/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c
new file mode 100644
index 0000000..df723fd
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_tables.c
@@ -0,0 +1,1280 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_tables.c
+ *
+ * Coding tables for the KLT coefficients
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/lpc_tables.h"
+
+/* indices of KLT coefficients used */
+const uint16_t WebRtcIsacfix_kSelIndGain[12] = {
+  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
+  10,  11};
+
+const uint16_t WebRtcIsacfix_kSelIndShape[108] = {
+  0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
+  10,  11,  12,  13,  14,  15,  16,  17,  18,  19,
+  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
+  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,
+  40,  41,  42,  43,  44,  45,  46,  47,  48,  49,
+  50,  51,  52,  53,  54,  55,  56,  57,  58,  59,
+  60,  61,  62,  63,  64,  65,  66,  67,  68,  69,
+  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,
+  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,
+  100,  101,  102,  103,  104,  105,  106,  107
+};
+
+/* cdf array for model indicator */
+const uint16_t WebRtcIsacfix_kModelCdf[4] = {
+  0,  15434,  37548,  65535
+};
+
+/* pointer to cdf array for model indicator */
+const uint16_t *WebRtcIsacfix_kModelCdfPtr[1] = {
+  WebRtcIsacfix_kModelCdf
+};
+
+/* initial cdf index for decoder of model indicator */
+const uint16_t WebRtcIsacfix_kModelInitIndex[1] = {
+  1
+};
+
+/* offset to go from rounded value to quantization index */
+const int16_t WebRtcIsacfix_kQuantMinGain[12] ={
+  3,  6,  4,  6,  6,  9,  5,  16,  11,  34,  32,  47
+};
+
+const int16_t WebRtcIsacfix_kQuantMinShape[108] = {
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  1,
+  1,  1,  1,  1,  2,  2,  2,  3,  0,  0,
+  0,  0,  1,  0,  0,  0,  0,  1,  1,  1,
+  1,  1,  1,  2,  2,  3,  0,  0,  0,  0,
+  1,  0,  1,  1,  1,  1,  1,  1,  1,  2,
+  2,  4,  3,  5,  0,  0,  0,  0,  1,  1,
+  1,  1,  1,  1,  2,  1,  2,  2,  3,  4,
+  4,  7,  0,  0,  1,  1,  1,  1,  1,  1,
+  1,  2,  3,  2,  3,  4,  4,  5,  7,  13,
+  0,  1,  1,  2,  3,  2,  2,  2,  4,  4,
+  5,  6,  7,  11, 9, 13, 12, 26
+};
+
+/* maximum quantization index */
+const uint16_t WebRtcIsacfix_kMaxIndGain[12] = {
+  6,  12,  8,  14,  10,  19,  12,  31,  22,  56,  52,  138
+};
+
+const uint16_t WebRtcIsacfix_kMaxIndShape[108] = {
+  0,  0,  0,  0,  0,  0,  0,  0,  0,  1,
+  2,  2,  2,  2,  4,  4,  5,  6,  0,  0,
+  0,  0,  1,  0,  0,  0,  0,  1,  2,  2,
+  2,  2,  3,  4,  5,  7,  0,  0,  0,  0,
+  2,  0,  2,  2,  2,  2,  3,  2,  2,  4,
+  4,  6,  6,  9,  0,  0,  0,  0,  2,  2,
+  2,  2,  2,  2,  3,  2,  4,  4,  7,  7,
+  9,  13, 0,  0,  2,  2,  2,  2,  2,  2,
+  3,  4,  5,  4,  6,  8,  8, 10, 16, 25,
+  0,  2,  2,  4,  5,  4,  4,  4,  7,  8,
+  9, 10, 13, 19, 17, 23, 25, 49
+};
+
+/* index offset */
+const uint16_t WebRtcIsacfix_kOffsetGain[3][12] = {
+  { 0,  7,  20,  29,  44,  55,  75,  88,  120,  143,  200,  253},
+  { 0,  7,  19,  27,  42,  53,  73,  86,  117,  140,  197,  249},
+  { 0,  7,  20,  28,  44,  55,  75,  89,  121,  145,  202,  257}
+};
+
+const uint16_t WebRtcIsacfix_kOffsetShape[3][108] = {
+  {
+    0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
+    11,  14,  17,  20,  23,  28,  33,  39,  46,  47,
+    48,  49,  50,  52,  53,  54,  55,  56,  58,  61,
+    64,  67,  70,  74,  79,  85,  93,  94,  95,  96,
+    97,  100,  101,  104,  107,  110,  113,  117,  120,  123,
+    128,  133,  140,  147,  157,  158,  159,  160,  161,  164,
+    167,  170,  173,  176,  179,  183,  186,  191,  196,  204,
+    212,  222,  236,  237,  238,  241,  244,  247,  250,  253,
+    256,  260,  265,  271,  276,  283,  292,  301,  312,  329,
+    355,  356,  359,  362,  367,  373,  378,  383,  388,  396,
+    405,  415,  426,  440,  460,  478,  502,  528
+  },
+  {
+    0,  1,  2,  3,  4,  6,  7,  8,  9,  11,
+    13,  16,  19,  22,  26,  29,  34,  39,  45,  46,
+    47,  48,  49,  50,  51,  52,  53,  55,  57,  60,
+    63,  66,  70,  73,  78,  84,  91,  92,  93,  94,
+    95,  96,  97,  99,  102,  105,  108,  111,  114,  118,
+    123,  128,  134,  141,  151,  152,  153,  154,  156,  159,
+    162,  165,  168,  171,  174,  177,  181,  186,  194,  200,
+    208,  218,  233,  234,  235,  236,  239,  242,  245,  248,
+    251,  254,  258,  263,  270,  277,  288,  297,  308,  324,
+    349,  351,  354,  357,  361,  366,  372,  378,  383,  390,
+    398,  407,  420,  431,  450,  472,  496,  524
+  },
+  {
+    0,  1,  2,  3,  4,  5,  6,  7,  8,  11,
+    14,  17,  20,  23,  26,  29,  34,  40,  47,  48,
+    49,  50,  51,  52,  53,  54,  55,  58,  61,  64,
+    67,  70,  73,  77,  82,  88,  96,  97,  98,  99,
+    101,  102,  104,  107,  110,  113,  116,  119,  122,  125,
+    129,  134,  141,  150,  160,  161,  162,  163,  166,  168,
+    171,  174,  177,  180,  183,  186,  190,  195,  201,  208,
+    216,  226,  243,  244,  245,  248,  251,  254,  257,  260,
+    263,  268,  273,  278,  284,  291,  299,  310,  323,  340,
+    366,  368,  371,  374,  379,  383,  389,  394,  399,  406,
+    414,  422,  433,  445,  461,  480,  505,  533
+  }
+};
+
+/* initial cdf index for KLT coefficients */
+const uint16_t WebRtcIsacfix_kInitIndexGain[3][12] = {
+  { 3,  6,  4,  7,  5,  10,  6,  16,  11,  28,  26,  69},
+  { 3,  6,  4,  7,  5,  10,  6,  15,  11,  28,  26,  69},
+  { 3,  6,  4,  8,  5,  10,  7,  16,  12,  28,  27,  70}
+};
+
+const uint16_t WebRtcIsacfix_kInitIndexShape[3][108] = {
+  {
+    0,  0,  0,  0,  0,  0,  0,  0,  0,  1,
+    1,  1,  1,  1,  2,  2,  3,  3,  0,  0,
+    0,  0,  1,  0,  0,  0,  0,  1,  1,  1,
+    1,  1,  2,  2,  3,  4,  0,  0,  0,  0,
+    1,  0,  1,  1,  1,  1,  2,  1,  1,  2,
+    2,  3,  3,  5,  0,  0,  0,  0,  1,  1,
+    1,  1,  1,  1,  2,  1,  2,  2,  4,  4,
+    5,  7,  0,  0,  1,  1,  1,  1,  1,  1,
+    2,  2,  3,  2,  3,  4,  4,  5,  8,  13,
+    0,  1,  1,  2,  3,  2,  2,  2,  4,  4,
+    5,  5,  7,  10,  9,  12,  13,  25
+  },
+  {
+    0,  0,  0,  0,  1,  0,  0,  0,  1,  1,
+    1,  1,  1,  2,  1,  2,  2,  3,  0,  0,
+    0,  0,  0,  0,  0,  0,  1,  1,  1,  1,
+    1,  2,  1,  2,  3,  3,  0,  0,  0,  0,
+    0,  0,  1,  1,  1,  1,  1,  1,  2,  2,
+    2,  3,  3,  5,  0,  0,  0,  1,  1,  1,
+    1,  1,  1,  1,  1,  2,  2,  4,  3,  4,
+    5,  7,  0,  0,  0,  1,  1,  1,  1,  1,
+    1,  2,  2,  3,  3,  5,  4,  5,  8,  12,
+    1,  1,  1,  2,  2,  3,  3,  2,  3,  4,
+    4,  6,  5,  9,  11,  12,  14,  25
+  },
+  {
+    0,  0,  0,  0,  0,  0,  0,  0,  1,  1,
+    1,  1,  1,  1,  1,  2,  3,  3,  0,  0,
+    0,  0,  0,  0,  0,  0,  1,  1,  1,  1,
+    1,  1,  2,  2,  3,  4,  0,  0,  0,  1,
+    0,  1,  1,  1,  1,  1,  1,  1,  1,  2,
+    2,  3,  4,  5,  0,  0,  0,  1,  1,  1,
+    1,  1,  1,  1,  1,  2,  2,  3,  3,  4,
+    5,  8,  0,  0,  1,  1,  1,  1,  1,  1,
+    2,  2,  2,  3,  3,  4,  5,  6,  8,  13,
+    1,  1,  1,  2,  2,  3,  2,  2,  3,  4,
+    4,  5,  6,  8,  9,  12,  14,  25
+  }
+};
+
+/* offsets for quantizer representation levels*/
+const uint16_t WebRtcIsacfix_kOfLevelsGain[3] = {
+  0,  392,  779
+};
+
+const uint16_t WebRtcIsacfix_kOfLevelsShape[3] = {
+  0,  578,  1152
+};
+
+/* quantizer representation levels */
+
+
+
+const int32_t WebRtcIsacfix_kLevelsGainQ17[1176] = {
+  -364547,-231664,-102123,-573,104807,238257,368823,-758583,-640135,-510291
+  ,-377662,-252785,-113177,2627,112906,248601,389461,522691,644517,763974
+  ,-538963,-368179,-245823,-106095,-890,104299,241111,350730,493190,-800763
+  ,-646230,-510239,-382115,-248623,-111829,-2983,113852,251105,388114,519757
+  ,644048,774712,896334,1057931,-770009,-635310,-503690,-375087,-248106,-108525
+  ,-105,108259,243472,377948,519271,-1160885,-1032044,-914636,-777593,-647891
+  ,-518408,-388028,-254321,-115293,-598,117849,251296,385367,515510,652727
+  ,777432,920363,1038625,1153637,1316836,-632287,-505430,-379430,-248458,-118013
+  ,-888,118762,250266,381650,513327,652169,766471,932113,-2107480,-1971030
+  ,-1827020,-1698263,-1558670,-1436314,-1305377,-1172252,-1047355,-914202,-779651,-651001
+  ,-520999,-390394,-255761,-123490,-1893,126839,256703,385493,518607,651760
+  ,782750,908693,1044131,1163865,1311066,1424177,1582628,1709823,1831740,1955391
+  ,-1423044,-1288917,-1181281,-1043222,-911770,-780354,-646799,-522664,-386721,-258266
+  ,-128060,-1101,128233,259996,390336,519590,649290,778701,908010,1040796
+  ,1161235,1306889,1441882,-4446002,-4301031,-4194304,-4080591,-3947740,-3808975,-3686530
+  ,-3567839,-3383251,-3287089,-3136577,-3017405,-2869860,-2751321,-2619984,-2482932,-2354790
+  ,-2223147,-2090669,-1964135,-1831208,-1706697,-1570817,-1446008,-1305386,-1175773,-1046066
+  ,-915356,-785120,-653614,-524331,-393767,-260442,-130187,-799,128841,261466
+  ,393616,520542,652117,784613,914159,1045399,1181072,1308971,1442502,1570346
+  ,1693912,1843986,1966014,2090474,2224869,2364593,2475934,2628403,2752512,2856640
+  ,-4192441,-4063232,-3917821,-3799195,-3666233,-3519199,-3411021,-3269192,-3135684,-3008826
+  ,-2880875,-2747342,-2620981,-2494872,-2354979,-2229718,-2098939,-1964971,-1835399,-1703452
+  ,-1572806,-1440482,-1311794,-1179338,-1046521,-919823,-785914,-655335,-523416,-395507
+  ,-264833,-132184,-2546,131698,256217,391372,522688,651248,789964,909618
+  ,1035305,1179145,1313824,1436934,1552353,1693722,1815508,1972826,2096328,2228224
+  ,2359296,2490368,2598848,-6160384,-6029312,-5881382,-5767168,-5636096,-5505024,-5373952
+  ,-5228418,-5110384,-4954923,-4880576,-4710990,-4587364,-4471340,-4333905,-4211513,-4051293
+  ,-3907927,-3800105,-3675961,-3538640,-3413663,-3271148,-3152105,-3019103,-2869647,-2744015
+  ,-2620639,-2479385,-2364211,-2227611,-2095427,-1974497,-1834168,-1703561,-1568961,-1439826
+  ,-1309192,-1174050,-1050191,-917836,-786015,-656943,-518934,-394831,-257708,-128041
+  ,1610,128991,264442,393977,521383,653849,788164,918641,1049122,1181971
+  ,1308934,1439505,1571808,1706305,1836318,1966235,2097269,2228990,2357005,2490292
+  ,2617400,2749680,2881234,3014880,3145637,3276467,3409099,3536637,3671493,3802918
+  ,3929740,4065036,4194143,4325999,4456126,4586857,4717194,4843923,4978676,5110913
+  ,5245281,5371394,5499780,5633779,5762611,5897682,6028688,6167546,6296465,6421682
+  ,6548882,6682074,6809432,6941956,7078143,7204509,7334296,7475137,7609896,7732044
+  ,7861604,8002039,8131670,8259222,8390299,8522399,8650037,8782348,8908402,9037815
+  ,9164594,9300338,9434679,9574500,9699702,9833934,9948152,10083972,10244937,10332822
+  ,10485760,10600122,10760754,10892964,11010048,11111004,11272192,11403264,11525091,11624984
+  ,11796480,11915146,-393216,-262144,-101702,-740,100568,262144,393216,-786432
+  ,-655360,-524288,-383907,-243301,-94956,-156,95547,269629,416691,524288
+  ,655360,-393216,-262144,-88448,-37,87318,262144,393216,524288,-917504
+  ,-786432,-655360,-495894,-373308,-267503,-93211,4119,91308,250895,393216
+  ,526138,655360,786432,917504,-786432,-655360,-524288,-393216,-262144,-83497
+  ,222,86893,240922,393216,524288,-1048576,-917504,-790472,-655360,-508639
+  ,-383609,-262016,-95550,-3775,96692,256797,364847,534906,655360,786432
+  ,889679,1048576,1179648,1310720,1441792,-655360,-524288,-377684,-248408,-93690
+  ,1261,95441,227519,393216,524288,655360,786432,917504,-2097152,-1966080
+  ,-1809470,-1703936,-1572864,-1441792,-1314289,-1195149,-1056205,-917504,-809951,-657769
+  ,-521072,-383788,-248747,-106350,-2944,105550,243408,388548,521064,628732
+  ,786432,885456,1064548,1179648,1310720,1441792,1572864,1703936,1835008,-1441792
+  ,-1310720,-1179648,-1037570,-888492,-767774,-646634,-519935,-373458,-248029,-111915
+  ,760,111232,247735,379432,507672,672699,786432,917504,1048576,1179648
+  ,1310720,1441792,-4456448,-4325376,-4194304,-4063232,-3932160,-3801088,-3670016,-3538944
+  ,-3407872,-3276800,-3145728,-3014656,-2883584,-2752512,-2647002,-2490368,-2359296,-2228224
+  ,-2097152,-1951753,-1835008,-1703936,-1594177,-1462001,-1289150,-1160774,-1025917,-924928
+  ,-782509,-641294,-516191,-386630,-251910,-118886,5210,121226,253949,386008
+  ,517973,649374,780064,917783,1052462,1183856,1290593,1419389,1556641,1699884
+  ,1835008,1988314,2090470,2228224,2359296,2490368,2621440,2752512,2883584,-3801088
+  ,-3643514,-3539937,-3409931,-3263294,-3145658,-3012952,-2879230,-2752359,-2622556,-2483471
+  ,-2357556,-2226500,-2093112,-1965892,-1833664,-1701035,-1567767,-1440320,-1310556,-1178339
+  ,-1049625,-916812,-786477,-655277,-525050,-393773,-264828,-130696,-480,132126
+  ,260116,394197,527846,652294,785563,917183,1049511,1175958,1308161,1438759
+  ,1572253,1698835,1828535,1967072,2089391,2212798,2348901,2461547,2621440,2752512
+  ,2883584,-7309870,-7203780,-7062699,-6939106,-6790819,-6672036,-6553600,-6422317,-6288422
+  ,-6164694,-6026456,-5901410,-5754168,-5621459,-5502710,-5369686,-5240454,-5120712,-4976140
+  ,-4847970,-4723070,-4589083,-4450923,-4324680,-4189892,-4065551,-3931803,-3800209,-3668539
+  ,-3539395,-3404801,-3277470,-3141389,-3016710,-2885724,-2752612,-2618541,-2486762,-2354153
+  ,-2225059,-2094984,-1968194,-1830895,-1699508,-1575743,-1444516,-1308683,-1179714,-1053088
+  ,-917981,-783707,-653900,-524980,-395409,-260309,-131948,-3452,132113,263241
+  ,392185,522597,654134,788288,919810,1045795,1179210,1314201,1444235,1574447
+  ,1705193,1834009,1967332,2098102,2229019,2359147,2489859,2619878,2754966,2879671
+  ,3014438,3146143,3276733,3405958,3542196,3667493,3798815,3932961,4062458,4187125
+  ,4322346,4454875,4587752,4716809,4848274,4975027,5111957,5242215,5373085,5501158
+  ,5640140,5762918,5895358,6024008,6157906,6290628,6422713,6546339,6675888,6815606
+  ,6955288,7077501,7211630,7337893,7473635,7607175,7728310,7866475,7999658,8127888
+  ,8241758,8386483,8522550,8641582,8771915,8922139,9038632,9179385,9313426,9437184
+  ,9568256,9699328,9830400,9952933,10120004,10223616,10354688,10474645,10616832,-393216
+  ,-262144,-85425,-121,82533,262144,393216,-786432,-655360,-524288,-379928
+  ,-222821,-95200,287,95541,227093,393216,493567,655360,786432,-393216
+  ,-262144,-86805,510,86722,262144,393216,524288,-1048576,-917504,-786432
+  ,-624456,-529951,-395071,-241627,-101168,81,99975,241605,393216,524288
+  ,655360,786432,917504,-786432,-655360,-524288,-393216,-230359,-95619,-137
+  ,94425,226222,393216,524288,-1179648,-1048576,-917504,-773841,-655360,-492258
+  ,-379715,-244707,-103621,-434,104523,242680,381575,523659,650565,786432
+  ,917504,1048576,1179648,1310720,-786432,-629344,-524288,-376757,-242858,-101932
+  ,-2715,107155,239212,366480,514943,655360,786432,917504,-2228224,-2097152
+  ,-1966080,-1835008,-1703936,-1572864,-1441792,-1284584,-1179648,-1048819,-934658,-777181
+  ,-626371,-515660,-377493,-248975,-113036,436,113584,248354,379718,512475
+  ,653932,796494,917504,1048576,1179648,1310720,1441792,1572864,1703936,1835008
+  ,-1572864,-1441792,-1297608,-1161159,-1032316,-917092,-779770,-647384,-515529,-384269
+  ,-250003,-119252,1053,118111,249512,380545,512039,648101,770656,907003
+  ,1021725,1178082,1310720,1441792,-4587520,-4456448,-4325376,-4194304,-4063232,-3932160
+  ,-3801088,-3670016,-3538944,-3407872,-3276800,-3145728,-2999335,-2883584,-2752512,-2621440
+  ,-2490368,-2359296,-2228224,-2112691,-1966080,-1848781,-1709830,-1566109,-1438427,-1303530
+  ,-1176124,-1040936,-913876,-784585,-652025,-518361,-385267,-256342,-127297,-2733
+  ,125422,257792,389363,519911,651106,783805,909407,1044143,1174156,1309267
+  ,1436173,1553771,1708958,1814083,1967036,2095386,2255169,2359296,2478303,2621440
+  ,2752512,-4456448,-4325376,-4194304,-4063232,-3932160,-3797524,-3670016,-3560250,-3413217
+  ,-3257719,-3166416,-2986626,-2878000,-2781144,-2625383,-2495465,-2346792,-2230930,-2077063
+  ,-1949225,-1819274,-1697261,-1568664,-1443074,-1304302,-1175289,-1043794,-913423,-785561
+  ,-652104,-522835,-392667,-260517,-130088,-2,129509,260990,391931,522470
+  ,655770,784902,917093,1046445,1176951,1303121,1441362,1565401,1702022,1822856
+  ,1952852,2090384,2214607,2338436,2457483,2621440,-8781824,-8650752,-8519680,-8388608
+  ,-8260828,-8126464,-8003337,-7859030,-7750057,-7602176,-7471104,-7340032,-7193045,-7090588
+  ,-6946816,-6843344,-6676635,-6557575,-6447804,-6277614,-6159736,-6035729,-5884723,-5739567
+  ,-5634818,-5489867,-5372864,-5243300,-5098939,-4988639,-4856258,-4728494,-4591717,-4447428
+  ,-4322409,-4192918,-4062638,-3934141,-3797545,-3673373,-3531587,-3407391,-3277404,-3147797
+  ,-3013578,-2886548,-2749811,-2616428,-2490949,-2361301,-2228482,-2096883,-1964343,-1831754
+  ,-1702201,-1572495,-1442012,-1309242,-1182451,-1048996,-916905,-786510,-657079,-524730
+  ,-393672,-261313,-128743,166,130678,261334,393287,524155,655570,786839
+  ,917353,1052167,1179013,1309360,1442634,1571153,1703961,1832027,1965014,2097912
+  ,2224861,2355341,2490455,2623051,2753484,2877015,3015783,3144157,3273705,3405255
+  ,3542006,3669580,3802417,3935413,4065088,4190896,4333521,4456355,4579781,4713832
+  ,4845707,4978625,5113278,5243817,5382318,5500592,5638135,5761179,5900822,6029270
+  ,6186398,6297816,6436435,6559163,6666389,6806548,6950461,7086078,7195777,7350973
+  ,7480132,7614852,7743514,7847288,8014762,8126464,8257536,8388608,8519680,8650752
+  ,8781824,8912896,9043968,9175040,9306112,9437184
+};
+
+
+
+const int16_t WebRtcIsacfix_kLevelsShapeQ10[1735] = {
+  0,     0,    -1,     0,     0,     1,     0,     1,     0,  -821
+  ,     1,  -763,    -1,   656,  -620,     0,   633,  -636,     4,   615
+  ,  -630,     1,   649, -1773,  -670,     5,   678,  1810, -1876,  -676
+  ,     0,   691,  1843, -1806,  -743,    -1,   749,  1795,  2920, -2872
+  , -1761,  -772,    -3,   790,  1763,  2942,     0,     0,     0,     0
+  ,  -792,     2,     0,     0,     1,     0,  -854,     0,  -702,    -1
+  ,   662,  -624,    -5,   638,  -611,    -6,   638,  -647,     0,   651
+  ,  -685,    -4,   679,  2123, -1814,  -693,     0,   664,  1791, -1735
+  ,  -737,     0,   771,  1854,  2873, -2867, -1842,  -793,    -1,   821
+  ,  1826,  2805,  3922,     0,     0,     0,    -1,  -779,     1,   786
+  ,     1,  -708,     0,   789,  -799,     1,   797,  -663,     2,   646
+  ,  -600,     3,   609,  -600,     1,   658,  1807,  -627,    -3,   612
+  ,  -625,     3,   632, -1732,  -674,     1,   672,  2048, -1768,  -715
+  ,     0,   724,  1784, -3881, -3072, -1774,  -719,    -1,   730,  1811
+  , -2963, -1829,  -806,    -1,   816,  1795,  3050, -5389, -3784, -2942
+  , -1893,  -865,   -12,   867,  1885,  2945,  3928,    -2,     1,     4
+  ,     0,  -694,     2,   665,  -598,     5,   587,  -599,    -1,   661
+  ,  -656,    -7,   611,  -607,     5,   603,  -618,    -4,   620, -1794
+  ,  -645,    -2,   654,  -655,    -1,   658, -1801,  -700,     5,   707
+  ,  1927, -1752,  -745,    -8,   752,  1843, -2838, -1781,  -801,    11
+  ,   796,  1811,  2942,  3866, -3849, -3026, -1848,  -819,     2,   827
+  ,  1825,  2963, -3873, -2904, -1869,  -910,    -6,   903,  1902,  2885
+  ,  3978,  5286, -7168, -6081, -4989, -3968, -2963, -1970,  -943,    -2
+  ,   953,  1951,  2968,  3974,  5009,  6032,    -2,     3, -1024,     2
+  ,  1024,  -637,     1,   669,  -613,    -7,   630,  -603,     4,   612
+  ,  -612,     0,   590,  -645,   -11,   627,  -657,    -2,   671,  1849
+  , -1853,  -694,     2,   702,  1838, -3304, -1780,  -736,    -8,   732
+  ,  1772, -1709,  -755,    -6,   760,  1780, -2994, -1780,  -800,     8
+  ,   819,  1830,  2816, -4096, -2822, -1881,  -851,    -4,   855,  1872
+  ,  2840,  3899, -3908, -2904, -1878,  -887,     6,   897,  1872,  2942
+  ,  4008, -4992, -3881, -2933, -1915,  -928,     1,   937,  1919,  2900
+  ,  4009,  4881, -6848, -6157, -5065, -3981, -2983, -1972,  -978,    -1
+  ,   968,  1979,  2988,  4008,  5007,  6108,  7003,  8051,  9027,-13272
+  ,-12012,-11228,-10213, -9261, -8084, -7133, -6075, -5052, -4050, -3036
+  , -2014,  -996,    -4,  1007,  2031,  3038,  4049,  5074,  6134,  7069
+  ,  8094,  9069, 10212, 11049, 12104,    51, -1024,   -13,  1024,  -609
+  ,  -107,   613, -2048,  -687,   -95,   667,  2048, -3072, -1724,  -785
+  ,   -34,   732,  1819, -2048,  -703,   -26,   681,  2048, -2048,  -686
+  ,    -9,   665,  2048, -2048,  -702,    37,   748,  1723, -4096, -2786
+  , -1844,  -837,    37,   811,  1742,  3072, -4096, -2783, -1848,  -881
+  ,    39,   898,  1843,  2792,  3764, -5120, -4096, -2923, -1833,  -852
+  ,   -14,   862,  1824,  2834,  4096, -6144, -5120, -3914, -2842, -1870
+  ,  -886,   -27,   888,  1929,  2931,  4051, -7168, -6144, -5120, -3866
+  , -2933, -1915,  -927,    64,   933,  1902,  2929,  3912,  5063,  6144
+  ,-11264,-10240, -9216, -8192, -7086, -6144, -5039, -3972, -2943, -1929
+  ,  -941,     3,   938,  1942,  2959,  3933,  4905,  6088,  6983,  8192
+  , -9216, -8192, -7202, -6088, -4983, -4019, -2955, -1975,  -966,    17
+  ,   997,  1981,  2967,  3990,  4948,  6022,  6967,  8192,-13312,-12288
+  ,-11264,-10240, -9216, -8049, -6997, -6040, -5026, -4043, -3029, -2034
+  , -1015,   -23,   984,  1997,  3010,  4038,  5002,  6015,  6946,  8061
+  ,  9216, 10240,-12381,-11264,-10240, -9060, -8058, -7153, -6085, -5075
+  , -4051, -3042, -2037, -1017,    -5,  1007,  2028,  3035,  4050,  5088
+  ,  6111,  7160,  8156,  9215, 10095, 11229, 12202, 13016,-26624,-25600
+  ,-24582,-23671,-22674,-21400,-20355,-19508,-18315,-17269,-16361,-15299
+  ,-14363,-13294,-12262,-11237,-10203, -9227, -8165, -7156, -6116, -5122
+  , -4076, -3056, -2043, -1020,    -8,  1027,  2047,  3065,  4110,  5130
+  ,  6125,  7168,  8195,  9206, 10230, 11227, 12256, 13304, 14281, 15316
+  , 16374, 17382, 18428, 19388, 20361, 21468, 22448, 23781,     0,     0
+  ,    -1,     0,    -2,  1024,     0,     0,     0,    -1,  1024, -1024
+  ,     1, -1024,     4,  1024, -1024,     2,  1024, -1024,     2,  1024
+  , -2048, -1024,    -4,  1024, -1024,     2,  1024, -2048, -1024,    -3
+  ,  1024,  2048, -2048, -1024,     4,  1024,  2048, -3072, -2048, -1024
+  ,    -1,   662,  2048,     0,     1,     0,     0,     1,    -2,    -2
+  ,     0,     2,  1024,    -1,  1024, -1024,     4,  1024, -1024,     1
+  ,  1024, -1024,     1,  1024, -2048,  -781,    -4,   844,  -807,    -5
+  ,   866, -2048,  -726,   -13,   777,  2048, -2048,  -643,    -4,   617
+  ,  2048,  3072, -3072, -2048,  -629,     1,   630,  2048,  3072,     0
+  ,    -1,     1,    -2,     2,     1, -1024,     5, -1024,     6,  1024
+  , -1024,     4,  1024, -1024,     1,  1024, -1024,    -9,  1024,  -673
+  ,    -7,   655, -2048,  -665,   -15,   716, -2048,  -647,     4,   640
+  ,  2048, -2048,  -615,    -1,   635,  2048, -2048,  -613,    10,   637
+  ,  2048,  3072, -3072, -2048,  -647,    -3,   641,  2048,  3072, -5120
+  , -4096, -3072, -2048,  -681,     6,   685,  2048,  3072,  4096,     1
+  ,     1,     0,    -1,  1024, -1024,    -3,  1024, -1024,     6,  1024
+  , -1024,    -1,   769,  -733,     0,  1024,  -876,    -2,   653, -1024
+  ,    -4,   786,  -596,   -13,   595,  -634,    -2,   638,  2048, -2048
+  ,  -620,    -5,   620,  2048, -4096, -3072, -2048,  -639,    11,   655
+  ,  2048,  3072, -3072, -2048,  -659,     5,   663,  2048, -3072, -1823
+  ,  -687,    22,   695,  2048,  3072,  4096, -4096, -3072, -1848,  -715
+  ,    -3,   727,  1816,  3072,  4096,  5120, -8192, -7168, -6144, -5120
+  , -4096, -2884, -1771,  -756,   -14,   775,  1844,  3072,  4096,  5120
+  ,  6144,    -1,     1,     0, -1024,     2,   815,  -768,     2,   708
+  , -1024,    -3,   693,  -661,    -7,   607,  -643,    -5,   609,  -624
+  ,     3,   631,  -682,    -3,   691,  2048, -2048,  -640,     5,   650
+  ,  2048, -3072, -2048,  -701,     9,   704,  2048,  3072, -3072, -2048
+  ,  -670,    10,   674,  2048,  3072, -5120, -4096, -3072, -1749,  -738
+  ,     0,   733,  1811,  3072,  4096,  5120, -4096, -3072, -1873,  -753
+  ,     0,   756,  1874,  3072,  4096, -5120, -4096, -2900, -1838,  -793
+  ,    -6,   793,  1868,  2837,  4096,  5120, -7168, -6144, -5120, -4096
+  , -2832, -1891,  -828,     1,   828,  1901,  2823,  3912,  5120,  6144
+  ,  7168,  8192,-13312,-12288,-11264,-10240, -9216, -8192, -7168, -6144
+  , -5120, -3976, -3004, -1911,  -869,     7,   869,  1932,  3024,  3992
+  ,  5009,  6144,  7168,  8192,  9216, 10240, 11264,    -4,  1024,  -629
+  ,   -22,   609,  -623,     9,   640, -2048,  -768,     1,   682, -2048
+  ,  -741,    49,   722,  2048, -3072, -1706,  -808,   -20,   768,  1750
+  , -1684,  -727,   -29,   788,  1840,  3033, -1758,  -784,     0,   801
+  ,  1702, -3072, -1813,  -814,    38,   820,  1884,  2927, -4096, -3241
+  , -1839,  -922,    25,   882,  1886,  2812, -4096, -2982, -1923,  -894
+  ,    84,   912,  1869,  2778,  4096, -4928, -3965, -2902, -1920,  -883
+  ,     3,   917,  1953,  2921,  3957,  4922,  6144,  7168, -5120, -3916
+  , -2897, -1949,  -930,    31,   959,  1934,  2901,  3851,  5120, -9216
+  , -8192, -7046, -6029, -5030, -4034, -2980, -1969, -1013,   -76,   963
+  ,  1963,  2901,  3929,  4893,  6270,  7168,  8192,  9216,-12288,-11264
+  ,-10240, -9216, -8192, -6846, -6123, -5108, -4008, -3000, -1963,  -954
+  ,    -6,   958,  1992,  3009,  4020,  5085,  6097,  7168,  8192,  9216
+  ,-11264,-10139, -9194, -8127, -7156, -6102, -5053, -4049, -3036, -2025
+  , -1009,   -34,   974,  1984,  3034,  4028,  5138,  6000,  7057,  8166
+  ,  9070, 10033, 11360, 12288,-13312,-12288,-10932,-10190, -9120, -8123
+  , -7128, -6103, -5074, -4081, -3053, -2029,  -989,    -4,  1010,  2028
+  ,  3051,  4073,  5071,  6099,  7132,  8147,  9295, 10159, 11023, 12263
+  , 13312, 14336,-25600,-24576,-23552,-22529,-21504,-20480,-19456,-18637
+  ,-17425,-16165,-15316,-14327,-13606,-12135,-11182,-10107, -9153, -8144
+  , -7146, -6160, -5129, -4095, -3064, -2038, -1025,     1,  1031,  2072
+  ,  3074,  4088,  5123,  6149,  7157,  8173,  9198, 10244, 11250, 12268
+  , 13263, 14289, 15351, 16370, 17402, 18413, 19474, 20337, 21386, 22521
+  , 23367, 24350,     0,     0,     0,     0,     0,     0,     0,     0
+  , -1024,     0,  1024, -1024,     0,  1024, -1024,     0,  1024, -1024
+  ,     0,  1024, -1024,     0,  1024,  -773,     0,  1024,  -674,     0
+  ,   645, -2048,  -745,     0,   628,  2048, -2048,  -712,     0,   681
+  ,  2048,  3072, -3072, -2048,  -673,     0,   682,  1964,  3257,     0
+  ,     0,     0,     0,     0,     0,     0,     0, -1024,     0,  1024
+  , -1024,     0,  1024, -1024,     0,  1024,  -705,     0,   623,  -771
+  ,     0,  1024,  -786,     0,   688,  -631,     0,   652,  2048, -2048
+  ,  -627,    -1,   666,  2048, -3072, -1756,  -694,     0,   674,  2048
+  , -3098, -1879,  -720,     5,   694,  1886,  2958,  4096,     0,     0
+  ,     0,     0,  1024,     0,     0,  1024,  -769,     0,  1024, -1024
+  ,     0,  1024, -1024,     0,  1024,  -817,     0,   734,  -786,     0
+  ,   651,  -638,     0,   637,  -623,     0,   671,  -652,     0,   619
+  ,  2048, -2048,  -670,    -1,   663,  2048, -1908,  -680,     1,   686
+  ,  2048,  3072,  4096, -4096, -3072, -1833,  -711,     0,   727,  1747
+  ,  3072,  4096, -4096, -2971, -1826,  -762,     2,   766,  1832,  2852
+  ,  3928,  5079,     0,     0,     0, -1024,     0,  1024, -1024,     0
+  ,  -656,     0,  1024,  -599,     0,   620, -1024,     0,  1024,  -603
+  ,     0,   622,  -643,     0,   660,  -599,     0,   611,  -641,    -1
+  ,   651,  2048, -2048,  -648,    -2,   647,  1798, -3072, -2048,  -672
+  ,     2,   670,  2048, -3072, -1780,  -694,    -1,   706,  1751,  3072
+  , -3072, -1862,  -757,     7,   739,  1798,  3072,  4096, -5120, -4096
+  , -3253, -1811,  -787,     3,   782,  1887,  3123,  4096, -7252, -6144
+  , -5354, -4060, -2864, -1863,  -820,   -11,   847,  1903,  2970,  3851
+  ,  4921,  5957,  7168,  8192,  9306,     0,     0, -1024,     0,  1024
+  ,  -726,     0,   706,  -692,     0,   593,  -598,     0,   616,  -624
+  ,     0,   616,  -605,     0,   613, -2048,  -652,     1,   635,  2048
+  , -2048,  -647,    -1,   660,  2048, -1811,  -668,    -2,   685,  2048
+  , -1796,  -731,    -2,   730,  1702,  3072, -3072, -1766,  -747,    -4
+  ,   756,  1770,  3072, -4096, -3024, -1762,  -783,     4,   771,  1781
+  ,  3072, -5120, -4057, -2807, -1832,  -822,     0,   816,  1804,  2851
+  ,  3949,  5120, -6144, -4899, -3927, -2920, -1893,  -874,    -2,   868
+  ,  1881,  2905,  3960,  4912,  6144, -9216, -8192, -7168, -6225, -4963
+  , -3943, -2956, -1890,  -902,     0,   897,  1914,  2916,  3984,  4990
+  ,  6050,  7168,-11264,-10217, -9114, -8132, -7035, -5988, -4984, -4000
+  , -2980, -1962,  -927,     7,   931,  1956,  2981,  4031,  4972,  6213
+  ,  7227,  8192,  9216, 10240, 11170, 12288, 13312, 14336,     0,  1024
+  ,  -557,     1,   571,  -606,    -4,   612, -1676,  -707,    10,   673
+  ,  2048, -2048,  -727,     5,   686, -3072, -1772,  -755,    12,   716
+  ,  1877, -1856,  -786,     2,   786,  1712, -1685,  -818,   -16,   863
+  ,  1729, -3072, -1762,  -857,     3,   866,  1838,  2841, -3862, -2816
+  , -1864,  -925,    -2,   923,  1897,  2779, -2782, -1838,  -920,   -28
+  ,   931,  1951,  2835,  3804, -4815, -4001, -2940, -1934,  -959,   -22
+  ,   975,  1957,  2904,  3971,  4835, -5148, -3892, -2944, -1953,  -986
+  ,   -11,   989,  1968,  2939,  3949,  4947,  5902, -9216, -8192, -6915
+  , -6004, -4965, -4013, -3009, -1977,  -987,    -1,   982,  1972,  3000
+  ,  3960,  4939,  5814, -8976, -7888, -7084, -5955, -5043, -4009, -2991
+  , -2002, -1000,    -8,   993,  2011,  3023,  4026,  5028,  6023,  7052
+  ,  8014,  9216,-11240,-10036, -9125, -8118, -7105, -6062, -5048, -4047
+  , -3044, -2025, -1009,    -1,  1011,  2023,  3042,  4074,  5085,  6108
+  ,  7119,  8142,  9152, 10114, 11141, 12250, 13307,-15360,-14099,-13284
+  ,-12291,-11223,-10221, -9152, -8147, -7128, -6104, -5077, -4072, -3062
+  , -2033, -1020,     7,  1018,  2038,  3059,  4081,  5084,  6109,  7102
+  ,  8128,  9134, 10125, 11239, 12080,-23552,-22528,-21504,-20480,-19456
+  ,-18159,-17240,-16291,-15364,-14285,-13305,-12271,-11233,-10217, -9198
+  , -8175, -7157, -6134, -5122, -4089, -3071, -2047, -1018,     3,  1026
+  ,  2041,  3077,  4090,  5108,  6131,  7150,  8172,  9175, 10196, 11272
+  , 12303, 13273, 14328, 15332, 16334, 17381, 18409, 19423, 20423, 21451
+  , 22679, 23391, 24568, 25600, 26589
+};
+
+/* cdf tables for quantizer indices */
+const uint16_t WebRtcIsacfix_kCdfGain[1212] = {
+  0,  13,  301,  3730,  61784,  65167,  65489,  65535,  0,  17,
+  142,  314,  929,  2466,  7678,  56450,  63463,  64740,  65204,  65426,
+  65527,  65535,  0,  8,  100,  724,  6301,  60105,  65125,  65510,
+  65531,  65535,  0,  13,  117,  368,  1068,  3010,  11928,  53603,
+  61177,  63404,  64505,  65108,  65422,  65502,  65531,  65535,  0,  4,
+  17,  96,  410,  1859,  12125,  54361,  64103,  65305,  65497,  65535,
+  0,  4,  88,  230,  469,  950,  1746,  3228,  6092,  16592,
+  44756,  56848,  61256,  63308,  64325,  64920,  65309,  65460,  65502,  65522,
+  65535,  0,  88,  352,  1675,  6339,  20749,  46686,  59284,  63525,
+  64949,  65359,  65502,  65527,  65535,  0,  13,  38,  63,  117,
+  234,  381,  641,  929,  1407,  2043,  2809,  4032,  5753,  8792,
+  14407,  24308,  38941,  48947,  55403,  59293,  61411,  62688,  63630,  64329,
+  64840,  65188,  65376,  65472,  65506,  65527,  65531,  65535,  0,  8,
+  29,  75,  222,  615,  1327,  2801,  5623,  9931,  16094,  24966,
+  34419,  43458,  50676,  56186,  60055,  62500,  63936,  64765,  65225,  65435,
+  65514,  65535,  0,  8,  13,  15,  17,  21,  33,  59,
+  71,  92,  151,  243,  360,  456,  674,  934,  1223,  1583,
+  1989,  2504,  3031,  3617,  4354,  5154,  6163,  7411,  8780,  10747,
+  12874,  15591,  18974,  23027,  27436,  32020,  36948,  41830,  46205,  49797,
+  53042,  56094,  58418,  60360,  61763,  62818,  63559,  64103,  64509,  64798,
+  65045,  65162,  65288,  65363,  65447,  65506,  65522,  65531,  65533,  65535,
+  0,  4,  6,  25,  38,  71,  138,  264,  519,  808,
+  1227,  1825,  2516,  3408,  4279,  5560,  7092,  9197,  11420,  14108,
+  16947,  20300,  23926,  27459,  31164,  34827,  38575,  42178,  45540,  48747,
+  51444,  54090,  56426,  58460,  60080,  61595,  62734,  63668,  64275,  64673,
+  64936,  65112,  65217,  65334,  65426,  65464,  65477,  65489,  65518,  65527,
+  65529,  65531,  65533,  65535,  0,  2,  4,  8,  10,  12,
+  14,  16,  21,  33,  50,  71,  84,  92,  105,  138,
+  180,  255,  318,  377,  435,  473,  511,  590,  682,  758,
+  913,  1097,  1256,  1449,  1671,  1884,  2169,  2445,  2772,  3157,
+  3563,  3944,  4375,  4848,  5334,  5820,  6448,  7101,  7716,  8378,
+  9102,  9956,  10752,  11648,  12707,  13670,  14758,  15910,  17187,  18472,
+  19627,  20649,  21951,  23169,  24283,  25552,  26862,  28227,  29391,  30764,
+  31882,  33213,  34432,  35600,  36910,  38116,  39464,  40729,  41872,  43144,
+  44371,  45514,  46762,  47813,  48968,  50069,  51032,  51974,  52908,  53737,
+  54603,  55445,  56282,  56990,  57572,  58191,  58840,  59410,  59887,  60264,
+  60607,  60946,  61269,  61516,  61771,  61960,  62198,  62408,  62558,  62776,
+  62985,  63207,  63408,  63546,  63739,  63906,  64070,  64237,  64371,  64551,
+  64677,  64836,  64999,  65095,  65213,  65284,  65338,  65380,  65426,  65447,
+  65472,  65485,  65487,  65489,  65502,  65510,  65512,  65514,  65516,  65518,
+  65522,  65531,  65533,  65535,  0,  2,  4,  6,  65528,  65531,
+  65533,  65535,  0,  2,  4,  6,  8,  10,  222,  65321,
+  65513,  65528,  65531,  65533,  65535,  0,  2,  4,  50,  65476,
+  65529,  65531,  65533,  65535,  0,  2,  4,  6,  8,  12,
+  38,  544,  64936,  65509,  65523,  65525,  65529,  65531,  65533,  65535,
+  0,  2,  4,  6,  8,  10,  1055,  64508,  65528,  65531,
+  65533,  65535,  0,  2,  4,  6,  8,  10,  12,  123,
+  3956,  62999,  65372,  65495,  65515,  65521,  65523,  65525,  65527,  65529,
+  65531,  65533,  65535,  0,  2,  4,  12,  53,  4707,  59445,
+  65467,  65525,  65527,  65529,  65531,  65533,  65535,  0,  2,  4,
+  6,  8,  10,  12,  14,  16,  38,  40,  50,  67,
+  96,  234,  929,  14345,  55750,  64866,  65389,  65462,  65514,  65517,
+  65519,  65521,  65523,  65525,  65527,  65529,  65531,  65533,  65535,  0,
+  2,  4,  6,  8,  10,  15,  35,  91,  377,  1946,
+  13618,  52565,  63714,  65184,  65465,  65520,  65523,  65525,  65527,  65529,
+  65531,  65533,  65535,  0,  2,  4,  6,  8,  10,  12,
+  14,  16,  18,  20,  22,  24,  26,  28,  30,  32,
+  34,  36,  38,  40,  42,  44,  46,  48,  50,  52,
+  54,  82,  149,  362,  751,  1701,  4239,  12893,  38627,  55072,
+  60875,  63071,  64158,  64702,  65096,  65283,  65412,  65473,  65494,  65505,
+  65508,  65517,  65519,  65521,  65523,  65525,  65527,  65529,  65531,  65533,
+  65535,  0,  2,  15,  23,  53,  143,  260,  418,  698,
+  988,  1353,  1812,  2411,  3144,  4015,  5143,  6401,  7611,  8999,
+  10653,  12512,  14636,  16865,  19404,  22154,  24798,  27521,  30326,  33102,
+  35790,  38603,  41415,  43968,  46771,  49435,  52152,  54715,  57143,  59481,
+  61178,  62507,  63603,  64489,  64997,  65257,  65427,  65473,  65503,  65520,
+  65529,  65531,  65533,  65535,  0,  3,  6,  9,  26,  32,
+  44,  46,  64,  94,  111,  164,  205,  254,  327,  409,
+  506,  608,  733,  885,  1093,  1292,  1482,  1742,  1993,  2329,
+  2615,  3029,  3374,  3798,  4257,  4870,  5405,  5992,  6618,  7225,
+  7816,  8418,  9051,  9761,  10532,  11380,  12113,  13010,  13788,  14594,
+  15455,  16361,  17182,  18088,  18997,  20046,  20951,  21968,  22947,  24124,
+  25296,  26547,  27712,  28775,  29807,  30835,  31709,  32469,  33201,  34014,
+  34876,  35773,  36696,  37620,  38558,  39547,  40406,  41277,  42367,  43290,
+  44445,  45443,  46510,  47684,  48973,  50157,  51187,  52242,  53209,  54083,
+  55006,  55871,  56618,  57293,  57965,  58556,  59222,  59722,  60180,  60554,
+  60902,  61250,  61554,  61837,  62100,  62372,  62631,  62856,  63078,  63324,
+  63557,  63768,  63961,  64089,  64235,  64352,  64501,  64633,  64770,  64887,
+  65001,  65059,  65121,  65188,  65246,  65302,  65346,  65390,  65428,  65463,
+  65477,  65506,  65515,  65517,  65519,  65521,  65523,  65525,  65527,  65529,
+  65531,  65533,  65535,  0,  2,  4,  109,  65332,  65531,  65533,
+  65535,  0,  2,  4,  6,  8,  25,  1817,  63874,  65511,
+  65527,  65529,  65531,  65533,  65535,  0,  2,  4,  907,  65014,
+  65529,  65531,  65533,  65535,  0,  2,  4,  6,  8,  10,
+  12,  132,  2743,  62708,  65430,  65525,  65527,  65529,  65531,  65533,
+  65535,  0,  2,  4,  6,  8,  35,  3743,  61666,  65485,
+  65531,  65533,  65535,  0,  2,  4,  6,  8,  10,  23,
+  109,  683,  6905,  58417,  64911,  65398,  65497,  65518,  65525,  65527,
+  65529,  65531,  65533,  65535,  0,  2,  4,  6,  53,  510,
+  10209,  55212,  64573,  65441,  65522,  65529,  65531,  65533,  65535,  0,
+  2,  4,  6,  8,  10,  12,  14,  16,  18,  20,
+  22,  32,  90,  266,  1037,  3349,  14468,  50488,  62394,  64685,
+  65341,  65480,  65514,  65519,  65521,  65523,  65525,  65527,  65529,  65531,
+  65533,  65535,  0,  2,  4,  6,  9,  16,  37,  106,
+  296,  748,  1868,  5733,  18897,  45553,  60165,  63949,  64926,  65314,
+  65441,  65508,  65524,  65529,  65531,  65533,  65535,  0,  2,  4,
+  6,  8,  10,  12,  14,  16,  18,  20,  22,  24,
+  26,  28,  30,  32,  34,  36,  38,  40,  42,  44,
+  46,  48,  50,  83,  175,  344,  667,  1293,  2337,  4357,
+  8033,  14988,  28600,  43244,  52011,  57042,  59980,  61779,  63065,  63869,
+  64390,  64753,  64988,  65164,  65326,  65422,  65462,  65492,  65506,  65522,
+  65524,  65526,  65531,  65533,  65535,  0,  2,  4,  6,  8,
+  10,  12,  14,  16,  25,  39,  48,  55,  62,  65,
+  85,  106,  139,  169,  194,  252,  323,  485,  688,  1074,
+  1600,  2544,  3863,  5733,  8303,  11397,  15529,  20273,  25734,  31455,
+  36853,  41891,  46410,  50306,  53702,  56503,  58673,  60479,  61880,  62989,
+  63748,  64404,  64852,  65124,  65309,  65424,  65480,  65524,  65528,  65533,
+  65535,  0,  2,  4,  6,  8,  10,  12,  14,  21,
+  23,  25,  27,  29,  31,  39,  41,  43,  48,  60,
+  72,  79,  106,  136,  166,  187,  224,  252,  323,  381,
+  427,  478,  568,  660,  783,  912,  1046,  1175,  1365,  1567,
+  1768,  2024,  2347,  2659,  3049,  3529,  4033,  4623,  5281,  5925,
+  6726,  7526,  8417,  9468,  10783,  12141,  13571,  15222,  16916,  18659,
+  20350,  22020,  23725,  25497,  27201,  29026,  30867,  32632,  34323,  36062,
+  37829,  39466,  41144,  42654,  43981,  45343,  46579,  47759,  49013,  50171,
+  51249,  52283,  53245,  54148,  54938,  55669,  56421,  57109,  57791,  58464,
+  59092,  59674,  60105,  60653,  61083,  61407,  61757,  62095,  62388,  62649,
+  62873,  63157,  63358,  63540,  63725,  63884,  64046,  64155,  64278,  64426,
+  64548,  64654,  64806,  64906,  64994,  65077,  65137,  65215,  65277,  65324,
+  65354,  65409,  65437,  65455,  65462,  65490,  65495,  65499,  65508,  65511,
+  65513,  65515,  65517,  65519,  65521,  65523,  65525,  65527,  65529,  65531,
+  65533,  65535
+};
+
+const uint16_t WebRtcIsacfix_kCdfShape[2059] = {
+  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,
+  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,  4,
+  65535,  0,  8,  65514,  65535,  0,  29,  65481,  65535,  0,
+  121,  65439,  65535,  0,  239,  65284,  65535,  0,  8,  779,
+  64999,  65527,  65535,  0,  8,  888,  64693,  65522,  65535,  0,
+  29,  2604,  62843,  65497,  65531,  65535,  0,  25,  176,  4576,
+  61164,  65275,  65527,  65535,  0,  65535,  0,  65535,  0,  65535,
+  0,  65535,  0,  4,  65535,  0,  65535,  0,  65535,  0,
+  65535,  0,  65535,  0,  4,  65535,  0,  33,  65502,  65535,
+  0,  54,  65481,  65535,  0,  251,  65309,  65535,  0,  611,
+  65074,  65535,  0,  1273,  64292,  65527,  65535,  0,  4,  1809,
+  63940,  65518,  65535,  0,  88,  4392,  60603,  65426,  65531,  65535,
+  0,  25,  419,  7046,  57756,  64961,  65514,  65531,  65535,  0,
+  65535,  0,  65535,  0,  65535,  0,  65535,  0,  4,  65531,
+  65535,  0,  65535,  0,  8,  65531,  65535,  0,  4,  65527,
+  65535,  0,  17,  65510,  65535,  0,  42,  65481,  65535,  0,
+  197,  65342,  65531,  65535,  0,  385,  65154,  65535,  0,  1005,
+  64522,  65535,  0,  8,  1985,  63469,  65533,  65535,  0,  38,
+  3119,  61884,  65514,  65535,  0,  4,  6,  67,  4961,  60804,
+  65472,  65535,  0,  17,  565,  9182,  56538,  65087,  65514,  65535,
+  0,  8,  63,  327,  2118,  14490,  52774,  63839,  65376,  65522,
+  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,
+  17,  65522,  65535,  0,  59,  65489,  65535,  0,  50,  65522,
+  65535,  0,  54,  65489,  65535,  0,  310,  65179,  65535,  0,
+  615,  64836,  65535,  0,  4,  1503,  63965,  65535,  0,  2780,
+  63383,  65535,  0,  21,  3919,  61051,  65527,  65535,  0,  84,
+  6674,  59929,  65435,  65535,  0,  4,  255,  7976,  55784,  65150,
+  65518,  65531,  65535,  0,  4,  8,  582,  10726,  53465,  64949,
+  65518,  65535,  0,  29,  339,  3006,  17555,  49517,  62956,  65200,
+  65497,  65531,  65535,  0,  2,  33,  138,  565,  2324,  7670,
+  22089,  45966,  58949,  63479,  64966,  65380,  65518,  65535,  0,  65535,
+  0,  65535,  0,  2,  65533,  65535,  0,  46,  65514,  65535,
+  0,  414,  65091,  65535,  0,  540,  64911,  65535,  0,  419,
+  65162,  65535,  0,  976,  64790,  65535,  0,  2977,  62495,  65531,
+  65535,  0,  4,  3852,  61034,  65527,  65535,  0,  4,  29,
+  6021,  60243,  65468,  65535,  0,  84,  6711,  58066,  65418,  65535,
+  0,  13,  281,  9550,  54917,  65125,  65506,  65535,  0,  2,
+  63,  984,  12108,  52644,  64342,  65435,  65527,  65535,  0,  29,
+  251,  2014,  14871,  47553,  62881,  65229,  65518,  65535,  0,  13,
+  142,  749,  4220,  18497,  45200,  60913,  64823,  65426,  65527,  65535,
+  0,  13,  71,  264,  1176,  3789,  10500,  24480,  43488,  56324,
+  62315,  64493,  65242,  65464,  65514,  65522,  65531,  65535,  0,  4,
+  13,  38,  109,  205,  448,  850,  1708,  3429,  6276,  11371,
+  19221,  29734,  40955,  49391,  55411,  59460,  62102,  63793,  64656,  65150,
+  65401,  65485,  65522,  65531,  65535,  0,  65535,  0,  2,  65533,
+  65535,  0,  1160,  65476,  65535,  0,  2,  6640,  64763,  65533,
+  65535,  0,  2,  38,  9923,  61009,  65527,  65535,  0,  2,
+  4949,  63092,  65533,  65535,  0,  2,  3090,  63398,  65533,  65535,
+  0,  2,  2520,  58744,  65510,  65535,  0,  2,  13,  544,
+  8784,  51403,  65148,  65533,  65535,  0,  2,  25,  1017,  10412,
+  43550,  63651,  65489,  65527,  65535,  0,  2,  4,  29,  783,
+  13377,  52462,  64524,  65495,  65533,  65535,  0,  2,  4,  6,
+  100,  1817,  18451,  52590,  63559,  65376,  65531,  65535,  0,  2,
+  4,  6,  46,  385,  2562,  11225,  37416,  60488,  65026,  65487,
+  65529,  65533,  65535,  0,  2,  4,  6,  8,  10,  12,
+  42,  222,  971,  5221,  19811,  45048,  60312,  64486,  65294,  65474,
+  65525,  65529,  65533,  65535,  0,  2,  4,  8,  71,  167,
+  666,  2533,  7875,  19622,  38082,  54359,  62108,  64633,  65290,  65495,
+  65529,  65533,  65535,  0,  2,  4,  6,  8,  10,  13,
+  109,  586,  1930,  4949,  11600,  22641,  36125,  48312,  56899,  61495,
+  63927,  64932,  65389,  65489,  65518,  65531,  65533,  65535,  0,  4,
+  6,  8,  67,  209,  712,  1838,  4195,  8432,  14432,  22834,
+  31723,  40523,  48139,  53929,  57865,  60657,  62403,  63584,  64363,  64907,
+  65167,  65372,  65472,  65514,  65535,  0,  2,  4,  13,  25,
+  42,  46,  50,  75,  113,  147,  281,  448,  657,  909,
+  1185,  1591,  1976,  2600,  3676,  5317,  7398,  9914,  12941,  16169,
+  19477,  22885,  26464,  29851,  33360,  37228,  41139,  44802,  48654,  52058,
+  55181,  57676,  59581,  61022,  62190,  63107,  63676,  64199,  64547,  64924,
+  65158,  65313,  65430,  65481,  65518,  65535,  0,  65535,  0,  65535,
+  0,  65535,  0,  65535,  0,  65533,  65535,  0,  65535,  0,
+  65535,  0,  65535,  0,  65533,  65535,  0,  2,  65535,  0,
+  2,  65533,  65535,  0,  2,  65533,  65535,  0,  2,  65533,
+  65535,  0,  2,  4,  65533,  65535,  0,  2,  65533,  65535,
+  0,  2,  4,  65531,  65533,  65535,  0,  2,  4,  65531,
+  65533,  65535,  0,  2,  4,  6,  65524,  65533,  65535,  0,
+  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,
+  65535,  0,  65535,  0,  65535,  0,  65533,  65535,  0,  65533,
+  65535,  0,  2,  65533,  65535,  0,  2,  65533,  65535,  0,
+  2,  65533,  65535,  0,  2,  4,  65532,  65535,  0,  6,
+  65523,  65535,  0,  2,  15,  65530,  65533,  65535,  0,  2,
+  35,  65493,  65531,  65533,  65535,  0,  2,  4,  158,  65382,
+  65531,  65533,  65535,  0,  65535,  0,  65535,  0,  65535,  0,
+  65535,  0,  65535,  0,  65535,  0,  2,  65535,  0,  2,
+  65533,  65535,  0,  2,  65533,  65535,  0,  2,  65533,  65535,
+  0,  2,  65533,  65535,  0,  9,  65512,  65535,  0,  2,
+  12,  65529,  65535,  0,  2,  73,  65434,  65533,  65535,  0,
+  2,  240,  65343,  65533,  65535,  0,  2,  476,  65017,  65531,
+  65533,  65535,  0,  2,  4,  1046,  64686,  65531,  65533,  65535,
+  0,  2,  4,  6,  8,  1870,  63898,  65529,  65531,  65533,
+  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65533,  65535,
+  0,  2,  65533,  65535,  0,  2,  65533,  65535,  0,  2,
+  65532,  65535,  0,  6,  65533,  65535,  0,  6,  65523,  65535,
+  0,  2,  65532,  65535,  0,  137,  65439,  65535,  0,  576,
+  64899,  65533,  65535,  0,  2,  289,  65299,  65533,  65535,  0,
+  2,  4,  6,  880,  64134,  65531,  65533,  65535,  0,  2,
+  4,  1853,  63347,  65533,  65535,  0,  2,  6,  2516,  61762,
+  65529,  65531,  65533,  65535,  0,  2,  4,  9,  3980,  61380,
+  65503,  65529,  65531,  65533,  65535,  0,  2,  4,  6,  8,
+  10,  12,  61,  6393,  59859,  65466,  65527,  65529,  65531,  65533,
+  65535,  0,  65535,  0,  65535,  0,  65535,  0,  2,  65532,
+  65535,  0,  3,  65529,  65535,  0,  2,  65529,  65535,  0,
+  61,  65453,  65535,  0,  234,  65313,  65535,  0,  503,  65138,
+  65535,  0,  155,  65402,  65533,  65535,  0,  2,  1058,  64554,
+  65533,  65535,  0,  2,  4,  3138,  62109,  65531,  65533,  65535,
+  0,  2,  4,  2031,  63339,  65531,  65533,  65535,  0,  2,
+  4,  6,  9,  4155,  60778,  65523,  65529,  65531,  65533,  65535,
+  0,  2,  4,  41,  6189,  59269,  65490,  65531,  65533,  65535,
+  0,  2,  4,  6,  210,  8789,  57043,  65400,  65528,  65531,
+  65533,  65535,  0,  2,  4,  6,  8,  26,  453,  10086,
+  55499,  64948,  65483,  65524,  65527,  65529,  65531,  65533,  65535,  0,
+  2,  4,  6,  8,  10,  12,  14,  16,  18,  20,
+  114,  1014,  11202,  52670,  64226,  65356,  65503,  65514,  65523,  65525,
+  65527,  65529,  65531,  65533,  65535,  0,  65533,  65535,  0,  15,
+  65301,  65535,  0,  152,  64807,  65535,  0,  2,  3328,  63308,
+  65535,  0,  2,  4050,  59730,  65533,  65535,  0,  2,  164,
+  10564,  61894,  65529,  65535,  0,  15,  6712,  59831,  65076,  65532,
+  65535,  0,  32,  7712,  57449,  65459,  65535,  0,  2,  210,
+  7849,  53110,  65021,  65523,  65535,  0,  2,  12,  1081,  13883,
+  48262,  62870,  65477,  65535,  0,  2,  88,  847,  6145,  37852,
+  62012,  65454,  65533,  65535,  0,  9,  47,  207,  1823,  14522,
+  45521,  61069,  64891,  65481,  65528,  65531,  65533,  65535,  0,  2,
+  9,  488,  2881,  12758,  38703,  58412,  64420,  65410,  65533,  65535,
+  0,  2,  4,  6,  61,  333,  1891,  6486,  19720,  43188,
+  57547,  62472,  64796,  65421,  65497,  65523,  65529,  65531,  65533,  65535,
+  0,  2,  4,  6,  8,  10,  12,  29,  117,  447,
+  1528,  6138,  21242,  43133,  56495,  62432,  64746,  65362,  65500,  65529,
+  65531,  65533,  65535,  0,  2,  18,  105,  301,  760,  1490,
+  3472,  7568,  15002,  26424,  40330,  53029,  60048,  62964,  64274,  64890,
+  65337,  65445,  65489,  65513,  65527,  65530,  65533,  65535,  0,  2,
+  4,  6,  41,  102,  409,  853,  2031,  4316,  7302,  11328,
+  16869,  24825,  34926,  43481,  50877,  56126,  59874,  62103,  63281,  63857,
+  64166,  64675,  65382,  65522,  65531,  65533,  65535,  0,  2,  4,
+  6,  8,  10,  12,  14,  16,  18,  29,  38,  53,
+  58,  96,  181,  503,  1183,  2849,  5590,  8600,  11379,  13942,
+  16478,  19453,  22638,  26039,  29411,  32921,  37596,  41433,  44998,  48560,
+  51979,  55106,  57666,  59892,  61485,  62616,  63484,  64018,  64375,  64685,
+  64924,  65076,  65278,  65395,  65471,  65509,  65529,  65535,  0,  65535,
+  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,
+  0,  65535,  0,  65535,  0,  2,  65533,  65535,  0,  2,
+  65533,  65535,  0,  2,  65533,  65535,  0,  2,  65533,  65535,
+  0,  2,  65533,  65535,  0,  2,  65533,  65535,  0,  7,
+  65519,  65535,  0,  2,  14,  65491,  65533,  65535,  0,  2,
+  81,  65427,  65531,  65533,  65535,  0,  2,  4,  312,  65293,
+  65528,  65533,  65535,  0,  65535,  0,  65535,  0,  65535,  0,
+  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,
+  2,  65533,  65535,  0,  2,  65533,  65535,  0,  2,  65533,
+  65535,  0,  5,  65523,  65535,  0,  2,  65533,  65535,  0,
+  7,  65526,  65535,  0,  46,  65464,  65533,  65535,  0,  2,
+  120,  65309,  65533,  65535,  0,  2,  5,  362,  65097,  65533,
+  65535,  0,  2,  18,  1164,  64785,  65528,  65531,  65533,  65535,
+  0,  65535,  0,  65535,  0,  65535,  0,  65533,  65535,  0,
+  65535,  0,  65533,  65535,  0,  2,  65533,  65535,  0,  2,
+  65533,  65535,  0,  2,  65533,  65535,  0,  2,  65530,  65535,
+  0,  2,  65523,  65535,  0,  69,  65477,  65535,  0,  141,
+  65459,  65535,  0,  194,  65325,  65533,  65535,  0,  2,  543,
+  64912,  65533,  65535,  0,  5,  1270,  64301,  65529,  65531,  65533,
+  65535,  0,  2,  4,  12,  2055,  63538,  65508,  65531,  65533,
+  65535,  0,  2,  7,  102,  3775,  61970,  65429,  65526,  65528,
+  65533,  65535,  0,  65535,  0,  65535,  0,  65535,  0,  2,
+  65533,  65535,  0,  2,  65535,  0,  9,  65533,  65535,  0,
+  25,  65512,  65535,  0,  2,  65533,  65535,  0,  44,  65480,
+  65535,  0,  48,  65475,  65535,  0,  162,  65373,  65535,  0,
+  637,  64806,  65533,  65535,  0,  2,  935,  64445,  65533,  65535,
+  0,  2,  4,  1662,  64083,  65533,  65535,  0,  2,  12,
+  3036,  62469,  65521,  65533,  65535,  0,  2,  120,  5405,  60468,
+  65469,  65531,  65533,  65535,  0,  2,  4,  18,  254,  6663,
+  58999,  65272,  65528,  65533,  65535,  0,  2,  4,  9,  12,
+  67,  591,  8981,  56781,  64564,  65365,  65508,  65524,  65526,  65529,
+  65531,  65533,  65535,  0,  65535,  0,  65535,  0,  2,  65533,
+  65535,  0,  9,  65526,  65535,  0,  14,  65503,  65535,  0,
+  127,  65390,  65535,  0,  517,  64990,  65535,  0,  178,  65330,
+  65535,  0,  2,  1055,  64533,  65533,  65535,  0,  2,  1558,
+  63942,  65533,  65535,  0,  2,  2205,  63173,  65533,  65535,  0,
+  25,  4493,  60862,  65505,  65533,  65535,  0,  2,  48,  5890,
+  59442,  65482,  65533,  65535,  0,  2,  4,  127,  7532,  58191,
+  65394,  65533,  65535,  0,  2,  5,  32,  550,  10388,  54924,
+  65046,  65510,  65531,  65533,  65535,  0,  2,  4,  30,  150,
+  1685,  14340,  51375,  63619,  65288,  65503,  65528,  65533,  65535,  0,
+  2,  4,  6,  8,  28,  97,  473,  2692,  15407,  50020,
+  62880,  65064,  65445,  65508,  65531,  65533,  65535,  0,  2,  4,
+  12,  32,  79,  150,  372,  907,  2184,  5868,  18207,  45431,
+  59856,  64031,  65096,  65401,  65481,  65507,  65521,  65523,  65525,  65527,
+  65529,  65531,  65533,  65535,  0,  65533,  65535,  0,  182,  65491,
+  65535,  0,  877,  64286,  65535,  0,  9,  2708,  63612,  65533,
+  65535,  0,  2,  6038,  59532,  65535,  0,  2,  92,  5500,
+  60539,  65533,  65535,  0,  268,  8908,  56512,  65385,  65535,  0,
+  129,  13110,  52742,  65036,  65535,  0,  2,  806,  14003,  51929,
+  64732,  65523,  65535,  0,  7,  92,  2667,  18159,  47678,  62610,
+  65355,  65535,  0,  32,  1836,  19676,  48237,  61677,  64960,  65526,
+  65535,  0,  21,  159,  967,  5668,  22782,  44709,  58317,  64020,
+  65406,  65528,  65535,  0,  7,  162,  1838,  8328,  23929,  43014,
+  56394,  63374,  65216,  65484,  65521,  65535,  0,  2,  4,  6,
+  28,  268,  1120,  3613,  10688,  24185,  40989,  54917,  61684,  64510,
+  65403,  65530,  65535,  0,  2,  16,  44,  139,  492,  1739,
+  5313,  13558,  26766,  41566,  52446,  58937,  62815,  64480,  65201,  65454,
+  65524,  65533,  65535,  0,  7,  25,  76,  263,  612,  1466,
+  3325,  6832,  12366,  20152,  29466,  39255,  47360,  53506,  57740,  60726,
+  62845,  64131,  64882,  65260,  65459,  65521,  65528,  65530,  65535,  0,
+  2,  4,  14,  48,  136,  312,  653,  1240,  2369,  4327,
+  7028,  10759,  15449,  21235,  28027,  35386,  42938,  49562,  54990,  59119,
+  62086,  63916,  64863,  65249,  65445,  65493,  65523,  65535,  0,  2,
+  4,  6,  8,  10,  12,  21,  83,  208,  409,  723,
+  1152,  1868,  2951,  4463,  6460,  8979,  11831,  15195,  18863,  22657,
+  26762,  30881,  34963,  39098,  43054,  47069,  50620,  53871,  56821,  59386,
+  61340,  62670,  63512,  64023,  64429,  64750,  64944,  65126,  65279,  65366,
+  65413,  65445,  65473,  65505,  65510,  65521,  65528,  65530,  65535
+};
+
+/* pointers to cdf tables for quantizer indices */
+const uint16_t *WebRtcIsacfix_kCdfGainPtr[3][12] = {
+  { WebRtcIsacfix_kCdfGain +0 +0,   WebRtcIsacfix_kCdfGain +0 +8,   WebRtcIsacfix_kCdfGain +0 +22,
+    WebRtcIsacfix_kCdfGain +0 +32,  WebRtcIsacfix_kCdfGain +0 +48,  WebRtcIsacfix_kCdfGain +0 +60,
+    WebRtcIsacfix_kCdfGain +0 +81,  WebRtcIsacfix_kCdfGain +0 +95,  WebRtcIsacfix_kCdfGain +0 +128,
+    WebRtcIsacfix_kCdfGain +0 +152, WebRtcIsacfix_kCdfGain +0 +210, WebRtcIsacfix_kCdfGain +0 +264
+  },
+  { WebRtcIsacfix_kCdfGain +404 +0,   WebRtcIsacfix_kCdfGain +404 +8,   WebRtcIsacfix_kCdfGain +404 +21,
+    WebRtcIsacfix_kCdfGain +404 +30,  WebRtcIsacfix_kCdfGain +404 +46,  WebRtcIsacfix_kCdfGain +404 +58,
+    WebRtcIsacfix_kCdfGain +404 +79,  WebRtcIsacfix_kCdfGain +404 +93,  WebRtcIsacfix_kCdfGain +404 +125,
+    WebRtcIsacfix_kCdfGain +404 +149, WebRtcIsacfix_kCdfGain +404 +207, WebRtcIsacfix_kCdfGain +404 +260
+  },
+  { WebRtcIsacfix_kCdfGain +803 +0,   WebRtcIsacfix_kCdfGain +803 +8,   WebRtcIsacfix_kCdfGain +803 +22,
+    WebRtcIsacfix_kCdfGain +803 +31,  WebRtcIsacfix_kCdfGain +803 +48,  WebRtcIsacfix_kCdfGain +803 +60,
+    WebRtcIsacfix_kCdfGain +803 +81,  WebRtcIsacfix_kCdfGain +803 +96,  WebRtcIsacfix_kCdfGain +803 +129,
+    WebRtcIsacfix_kCdfGain +803 +154, WebRtcIsacfix_kCdfGain +803 +212, WebRtcIsacfix_kCdfGain +803 +268
+  }
+};
+
+const uint16_t *WebRtcIsacfix_kCdfShapePtr[3][108] = {
+  { WebRtcIsacfix_kCdfShape +0 +0,   WebRtcIsacfix_kCdfShape +0 +2,   WebRtcIsacfix_kCdfShape +0 +4,
+    WebRtcIsacfix_kCdfShape +0 +6,   WebRtcIsacfix_kCdfShape +0 +8,   WebRtcIsacfix_kCdfShape +0 +10,
+    WebRtcIsacfix_kCdfShape +0 +12,  WebRtcIsacfix_kCdfShape +0 +14,  WebRtcIsacfix_kCdfShape +0 +16,
+    WebRtcIsacfix_kCdfShape +0 +18,  WebRtcIsacfix_kCdfShape +0 +21,  WebRtcIsacfix_kCdfShape +0 +25,
+    WebRtcIsacfix_kCdfShape +0 +29,  WebRtcIsacfix_kCdfShape +0 +33,  WebRtcIsacfix_kCdfShape +0 +37,
+    WebRtcIsacfix_kCdfShape +0 +43,  WebRtcIsacfix_kCdfShape +0 +49,  WebRtcIsacfix_kCdfShape +0 +56,
+    WebRtcIsacfix_kCdfShape +0 +64,  WebRtcIsacfix_kCdfShape +0 +66,  WebRtcIsacfix_kCdfShape +0 +68,
+    WebRtcIsacfix_kCdfShape +0 +70,  WebRtcIsacfix_kCdfShape +0 +72,  WebRtcIsacfix_kCdfShape +0 +75,
+    WebRtcIsacfix_kCdfShape +0 +77,  WebRtcIsacfix_kCdfShape +0 +79,  WebRtcIsacfix_kCdfShape +0 +81,
+    WebRtcIsacfix_kCdfShape +0 +83,  WebRtcIsacfix_kCdfShape +0 +86,  WebRtcIsacfix_kCdfShape +0 +90,
+    WebRtcIsacfix_kCdfShape +0 +94,  WebRtcIsacfix_kCdfShape +0 +98,  WebRtcIsacfix_kCdfShape +0 +102,
+    WebRtcIsacfix_kCdfShape +0 +107, WebRtcIsacfix_kCdfShape +0 +113, WebRtcIsacfix_kCdfShape +0 +120,
+    WebRtcIsacfix_kCdfShape +0 +129, WebRtcIsacfix_kCdfShape +0 +131, WebRtcIsacfix_kCdfShape +0 +133,
+    WebRtcIsacfix_kCdfShape +0 +135, WebRtcIsacfix_kCdfShape +0 +137, WebRtcIsacfix_kCdfShape +0 +141,
+    WebRtcIsacfix_kCdfShape +0 +143, WebRtcIsacfix_kCdfShape +0 +147, WebRtcIsacfix_kCdfShape +0 +151,
+    WebRtcIsacfix_kCdfShape +0 +155, WebRtcIsacfix_kCdfShape +0 +159, WebRtcIsacfix_kCdfShape +0 +164,
+    WebRtcIsacfix_kCdfShape +0 +168, WebRtcIsacfix_kCdfShape +0 +172, WebRtcIsacfix_kCdfShape +0 +178,
+    WebRtcIsacfix_kCdfShape +0 +184, WebRtcIsacfix_kCdfShape +0 +192, WebRtcIsacfix_kCdfShape +0 +200,
+    WebRtcIsacfix_kCdfShape +0 +211, WebRtcIsacfix_kCdfShape +0 +213, WebRtcIsacfix_kCdfShape +0 +215,
+    WebRtcIsacfix_kCdfShape +0 +217, WebRtcIsacfix_kCdfShape +0 +219, WebRtcIsacfix_kCdfShape +0 +223,
+    WebRtcIsacfix_kCdfShape +0 +227, WebRtcIsacfix_kCdfShape +0 +231, WebRtcIsacfix_kCdfShape +0 +235,
+    WebRtcIsacfix_kCdfShape +0 +239, WebRtcIsacfix_kCdfShape +0 +243, WebRtcIsacfix_kCdfShape +0 +248,
+    WebRtcIsacfix_kCdfShape +0 +252, WebRtcIsacfix_kCdfShape +0 +258, WebRtcIsacfix_kCdfShape +0 +264,
+    WebRtcIsacfix_kCdfShape +0 +273, WebRtcIsacfix_kCdfShape +0 +282, WebRtcIsacfix_kCdfShape +0 +293,
+    WebRtcIsacfix_kCdfShape +0 +308, WebRtcIsacfix_kCdfShape +0 +310, WebRtcIsacfix_kCdfShape +0 +312,
+    WebRtcIsacfix_kCdfShape +0 +316, WebRtcIsacfix_kCdfShape +0 +320, WebRtcIsacfix_kCdfShape +0 +324,
+    WebRtcIsacfix_kCdfShape +0 +328, WebRtcIsacfix_kCdfShape +0 +332, WebRtcIsacfix_kCdfShape +0 +336,
+    WebRtcIsacfix_kCdfShape +0 +341, WebRtcIsacfix_kCdfShape +0 +347, WebRtcIsacfix_kCdfShape +0 +354,
+    WebRtcIsacfix_kCdfShape +0 +360, WebRtcIsacfix_kCdfShape +0 +368, WebRtcIsacfix_kCdfShape +0 +378,
+    WebRtcIsacfix_kCdfShape +0 +388, WebRtcIsacfix_kCdfShape +0 +400, WebRtcIsacfix_kCdfShape +0 +418,
+    WebRtcIsacfix_kCdfShape +0 +445, WebRtcIsacfix_kCdfShape +0 +447, WebRtcIsacfix_kCdfShape +0 +451,
+    WebRtcIsacfix_kCdfShape +0 +455, WebRtcIsacfix_kCdfShape +0 +461, WebRtcIsacfix_kCdfShape +0 +468,
+    WebRtcIsacfix_kCdfShape +0 +474, WebRtcIsacfix_kCdfShape +0 +480, WebRtcIsacfix_kCdfShape +0 +486,
+    WebRtcIsacfix_kCdfShape +0 +495, WebRtcIsacfix_kCdfShape +0 +505, WebRtcIsacfix_kCdfShape +0 +516,
+    WebRtcIsacfix_kCdfShape +0 +528, WebRtcIsacfix_kCdfShape +0 +543, WebRtcIsacfix_kCdfShape +0 +564,
+    WebRtcIsacfix_kCdfShape +0 +583, WebRtcIsacfix_kCdfShape +0 +608, WebRtcIsacfix_kCdfShape +0 +635
+  },
+  { WebRtcIsacfix_kCdfShape +686 +0,   WebRtcIsacfix_kCdfShape +686 +2,   WebRtcIsacfix_kCdfShape +686 +4,
+    WebRtcIsacfix_kCdfShape +686 +6,   WebRtcIsacfix_kCdfShape +686 +8,   WebRtcIsacfix_kCdfShape +686 +11,
+    WebRtcIsacfix_kCdfShape +686 +13,  WebRtcIsacfix_kCdfShape +686 +15,  WebRtcIsacfix_kCdfShape +686 +17,
+    WebRtcIsacfix_kCdfShape +686 +20,  WebRtcIsacfix_kCdfShape +686 +23,  WebRtcIsacfix_kCdfShape +686 +27,
+    WebRtcIsacfix_kCdfShape +686 +31,  WebRtcIsacfix_kCdfShape +686 +35,  WebRtcIsacfix_kCdfShape +686 +40,
+    WebRtcIsacfix_kCdfShape +686 +44,  WebRtcIsacfix_kCdfShape +686 +50,  WebRtcIsacfix_kCdfShape +686 +56,
+    WebRtcIsacfix_kCdfShape +686 +63,  WebRtcIsacfix_kCdfShape +686 +65,  WebRtcIsacfix_kCdfShape +686 +67,
+    WebRtcIsacfix_kCdfShape +686 +69,  WebRtcIsacfix_kCdfShape +686 +71,  WebRtcIsacfix_kCdfShape +686 +73,
+    WebRtcIsacfix_kCdfShape +686 +75,  WebRtcIsacfix_kCdfShape +686 +77,  WebRtcIsacfix_kCdfShape +686 +79,
+    WebRtcIsacfix_kCdfShape +686 +82,  WebRtcIsacfix_kCdfShape +686 +85,  WebRtcIsacfix_kCdfShape +686 +89,
+    WebRtcIsacfix_kCdfShape +686 +93,  WebRtcIsacfix_kCdfShape +686 +97,  WebRtcIsacfix_kCdfShape +686 +102,
+    WebRtcIsacfix_kCdfShape +686 +106, WebRtcIsacfix_kCdfShape +686 +112, WebRtcIsacfix_kCdfShape +686 +119,
+    WebRtcIsacfix_kCdfShape +686 +127, WebRtcIsacfix_kCdfShape +686 +129, WebRtcIsacfix_kCdfShape +686 +131,
+    WebRtcIsacfix_kCdfShape +686 +133, WebRtcIsacfix_kCdfShape +686 +135, WebRtcIsacfix_kCdfShape +686 +137,
+    WebRtcIsacfix_kCdfShape +686 +139, WebRtcIsacfix_kCdfShape +686 +142, WebRtcIsacfix_kCdfShape +686 +146,
+    WebRtcIsacfix_kCdfShape +686 +150, WebRtcIsacfix_kCdfShape +686 +154, WebRtcIsacfix_kCdfShape +686 +158,
+    WebRtcIsacfix_kCdfShape +686 +162, WebRtcIsacfix_kCdfShape +686 +167, WebRtcIsacfix_kCdfShape +686 +173,
+    WebRtcIsacfix_kCdfShape +686 +179, WebRtcIsacfix_kCdfShape +686 +186, WebRtcIsacfix_kCdfShape +686 +194,
+    WebRtcIsacfix_kCdfShape +686 +205, WebRtcIsacfix_kCdfShape +686 +207, WebRtcIsacfix_kCdfShape +686 +209,
+    WebRtcIsacfix_kCdfShape +686 +211, WebRtcIsacfix_kCdfShape +686 +214, WebRtcIsacfix_kCdfShape +686 +218,
+    WebRtcIsacfix_kCdfShape +686 +222, WebRtcIsacfix_kCdfShape +686 +226, WebRtcIsacfix_kCdfShape +686 +230,
+    WebRtcIsacfix_kCdfShape +686 +234, WebRtcIsacfix_kCdfShape +686 +238, WebRtcIsacfix_kCdfShape +686 +242,
+    WebRtcIsacfix_kCdfShape +686 +247, WebRtcIsacfix_kCdfShape +686 +253, WebRtcIsacfix_kCdfShape +686 +262,
+    WebRtcIsacfix_kCdfShape +686 +269, WebRtcIsacfix_kCdfShape +686 +278, WebRtcIsacfix_kCdfShape +686 +289,
+    WebRtcIsacfix_kCdfShape +686 +305, WebRtcIsacfix_kCdfShape +686 +307, WebRtcIsacfix_kCdfShape +686 +309,
+    WebRtcIsacfix_kCdfShape +686 +311, WebRtcIsacfix_kCdfShape +686 +315, WebRtcIsacfix_kCdfShape +686 +319,
+    WebRtcIsacfix_kCdfShape +686 +323, WebRtcIsacfix_kCdfShape +686 +327, WebRtcIsacfix_kCdfShape +686 +331,
+    WebRtcIsacfix_kCdfShape +686 +335, WebRtcIsacfix_kCdfShape +686 +340, WebRtcIsacfix_kCdfShape +686 +346,
+    WebRtcIsacfix_kCdfShape +686 +354, WebRtcIsacfix_kCdfShape +686 +362, WebRtcIsacfix_kCdfShape +686 +374,
+    WebRtcIsacfix_kCdfShape +686 +384, WebRtcIsacfix_kCdfShape +686 +396, WebRtcIsacfix_kCdfShape +686 +413,
+    WebRtcIsacfix_kCdfShape +686 +439, WebRtcIsacfix_kCdfShape +686 +442, WebRtcIsacfix_kCdfShape +686 +446,
+    WebRtcIsacfix_kCdfShape +686 +450, WebRtcIsacfix_kCdfShape +686 +455, WebRtcIsacfix_kCdfShape +686 +461,
+    WebRtcIsacfix_kCdfShape +686 +468, WebRtcIsacfix_kCdfShape +686 +475, WebRtcIsacfix_kCdfShape +686 +481,
+    WebRtcIsacfix_kCdfShape +686 +489, WebRtcIsacfix_kCdfShape +686 +498, WebRtcIsacfix_kCdfShape +686 +508,
+    WebRtcIsacfix_kCdfShape +686 +522, WebRtcIsacfix_kCdfShape +686 +534, WebRtcIsacfix_kCdfShape +686 +554,
+    WebRtcIsacfix_kCdfShape +686 +577, WebRtcIsacfix_kCdfShape +686 +602, WebRtcIsacfix_kCdfShape +686 +631
+  },
+  { WebRtcIsacfix_kCdfShape +1368 +0,   WebRtcIsacfix_kCdfShape +1368 +2,   WebRtcIsacfix_kCdfShape +1368 +4,
+    WebRtcIsacfix_kCdfShape +1368 +6,   WebRtcIsacfix_kCdfShape +1368 +8,   WebRtcIsacfix_kCdfShape +1368 +10,
+    WebRtcIsacfix_kCdfShape +1368 +12,  WebRtcIsacfix_kCdfShape +1368 +14,  WebRtcIsacfix_kCdfShape +1368 +16,
+    WebRtcIsacfix_kCdfShape +1368 +20,  WebRtcIsacfix_kCdfShape +1368 +24,  WebRtcIsacfix_kCdfShape +1368 +28,
+    WebRtcIsacfix_kCdfShape +1368 +32,  WebRtcIsacfix_kCdfShape +1368 +36,  WebRtcIsacfix_kCdfShape +1368 +40,
+    WebRtcIsacfix_kCdfShape +1368 +44,  WebRtcIsacfix_kCdfShape +1368 +50,  WebRtcIsacfix_kCdfShape +1368 +57,
+    WebRtcIsacfix_kCdfShape +1368 +65,  WebRtcIsacfix_kCdfShape +1368 +67,  WebRtcIsacfix_kCdfShape +1368 +69,
+    WebRtcIsacfix_kCdfShape +1368 +71,  WebRtcIsacfix_kCdfShape +1368 +73,  WebRtcIsacfix_kCdfShape +1368 +75,
+    WebRtcIsacfix_kCdfShape +1368 +77,  WebRtcIsacfix_kCdfShape +1368 +79,  WebRtcIsacfix_kCdfShape +1368 +81,
+    WebRtcIsacfix_kCdfShape +1368 +85,  WebRtcIsacfix_kCdfShape +1368 +89,  WebRtcIsacfix_kCdfShape +1368 +93,
+    WebRtcIsacfix_kCdfShape +1368 +97,  WebRtcIsacfix_kCdfShape +1368 +101, WebRtcIsacfix_kCdfShape +1368 +105,
+    WebRtcIsacfix_kCdfShape +1368 +110, WebRtcIsacfix_kCdfShape +1368 +116, WebRtcIsacfix_kCdfShape +1368 +123,
+    WebRtcIsacfix_kCdfShape +1368 +132, WebRtcIsacfix_kCdfShape +1368 +134, WebRtcIsacfix_kCdfShape +1368 +136,
+    WebRtcIsacfix_kCdfShape +1368 +138, WebRtcIsacfix_kCdfShape +1368 +141, WebRtcIsacfix_kCdfShape +1368 +143,
+    WebRtcIsacfix_kCdfShape +1368 +146, WebRtcIsacfix_kCdfShape +1368 +150, WebRtcIsacfix_kCdfShape +1368 +154,
+    WebRtcIsacfix_kCdfShape +1368 +158, WebRtcIsacfix_kCdfShape +1368 +162, WebRtcIsacfix_kCdfShape +1368 +166,
+    WebRtcIsacfix_kCdfShape +1368 +170, WebRtcIsacfix_kCdfShape +1368 +174, WebRtcIsacfix_kCdfShape +1368 +179,
+    WebRtcIsacfix_kCdfShape +1368 +185, WebRtcIsacfix_kCdfShape +1368 +193, WebRtcIsacfix_kCdfShape +1368 +203,
+    WebRtcIsacfix_kCdfShape +1368 +214, WebRtcIsacfix_kCdfShape +1368 +216, WebRtcIsacfix_kCdfShape +1368 +218,
+    WebRtcIsacfix_kCdfShape +1368 +220, WebRtcIsacfix_kCdfShape +1368 +224, WebRtcIsacfix_kCdfShape +1368 +227,
+    WebRtcIsacfix_kCdfShape +1368 +231, WebRtcIsacfix_kCdfShape +1368 +235, WebRtcIsacfix_kCdfShape +1368 +239,
+    WebRtcIsacfix_kCdfShape +1368 +243, WebRtcIsacfix_kCdfShape +1368 +247, WebRtcIsacfix_kCdfShape +1368 +251,
+    WebRtcIsacfix_kCdfShape +1368 +256, WebRtcIsacfix_kCdfShape +1368 +262, WebRtcIsacfix_kCdfShape +1368 +269,
+    WebRtcIsacfix_kCdfShape +1368 +277, WebRtcIsacfix_kCdfShape +1368 +286, WebRtcIsacfix_kCdfShape +1368 +297,
+    WebRtcIsacfix_kCdfShape +1368 +315, WebRtcIsacfix_kCdfShape +1368 +317, WebRtcIsacfix_kCdfShape +1368 +319,
+    WebRtcIsacfix_kCdfShape +1368 +323, WebRtcIsacfix_kCdfShape +1368 +327, WebRtcIsacfix_kCdfShape +1368 +331,
+    WebRtcIsacfix_kCdfShape +1368 +335, WebRtcIsacfix_kCdfShape +1368 +339, WebRtcIsacfix_kCdfShape +1368 +343,
+    WebRtcIsacfix_kCdfShape +1368 +349, WebRtcIsacfix_kCdfShape +1368 +355, WebRtcIsacfix_kCdfShape +1368 +361,
+    WebRtcIsacfix_kCdfShape +1368 +368, WebRtcIsacfix_kCdfShape +1368 +376, WebRtcIsacfix_kCdfShape +1368 +385,
+    WebRtcIsacfix_kCdfShape +1368 +397, WebRtcIsacfix_kCdfShape +1368 +411, WebRtcIsacfix_kCdfShape +1368 +429,
+    WebRtcIsacfix_kCdfShape +1368 +456, WebRtcIsacfix_kCdfShape +1368 +459, WebRtcIsacfix_kCdfShape +1368 +463,
+    WebRtcIsacfix_kCdfShape +1368 +467, WebRtcIsacfix_kCdfShape +1368 +473, WebRtcIsacfix_kCdfShape +1368 +478,
+    WebRtcIsacfix_kCdfShape +1368 +485, WebRtcIsacfix_kCdfShape +1368 +491, WebRtcIsacfix_kCdfShape +1368 +497,
+    WebRtcIsacfix_kCdfShape +1368 +505, WebRtcIsacfix_kCdfShape +1368 +514, WebRtcIsacfix_kCdfShape +1368 +523,
+    WebRtcIsacfix_kCdfShape +1368 +535, WebRtcIsacfix_kCdfShape +1368 +548, WebRtcIsacfix_kCdfShape +1368 +565,
+    WebRtcIsacfix_kCdfShape +1368 +585, WebRtcIsacfix_kCdfShape +1368 +611, WebRtcIsacfix_kCdfShape +1368 +640
+  }
+};
+
+/* code length for all coefficients using different models */
+
+const int16_t WebRtcIsacfix_kCodeLenGainQ11[392] = {
+  25189, 16036,  8717,   358,  8757, 15706, 21456, 24397, 18502, 17559
+  , 13794, 11088,  7480,   873,  6603, 11636, 14627, 16805, 19132, 26624
+  , 26624, 19408, 13751,  7280,   583,  7591, 15178, 23773, 28672, 25189
+  , 19045, 16442, 13412, 10397,  5893,  1338,  6376,  9992, 12074, 13853
+  , 15781, 19821, 22819, 28672, 28672, 25189, 19858, 15781, 11262,  5477
+  ,  1298,  5632, 11814, 17234, 22020, 28672, 19677, 18125, 16587, 14521
+  , 13032, 11196,  9249,  5411,  2495,  4994,  7975, 10234, 12308, 13892
+  , 15148, 17944, 21725, 23917, 25189, 19539, 16293, 11531,  7808,  4475
+  ,  2739,  4872,  8089, 11314, 14992, 18105, 23257, 26624, 25189, 23257
+  , 23257, 20982, 18697, 18023, 16338, 16036, 14539, 13695, 13146, 11763
+  , 10754,  9074,  7260,  5584,  4430,  5553,  6848,  8344, 10141, 11636
+  , 12535, 13416, 14342, 15477, 17296, 19282, 22349, 23773, 28672, 28672
+  , 26624, 23773, 21456, 18023, 15118, 13362, 11212,  9293,  8043,  6985
+  ,  5908,  5721,  5853,  6518,  7316,  8360,  9716, 11289, 12912, 14652
+  , 16969, 19858, 23773, 26624, 28013, 30720, 30720, 28672, 25426, 23141
+  , 25426, 23773, 20720, 19408, 18697, 19282, 16859, 16338, 16026, 15377
+  , 15021, 14319, 14251, 13937, 13260, 13017, 12332, 11703, 11430, 10359
+  , 10128,  9405,  8757,  8223,  7974,  7859,  7646,  7673,  7997,  8580
+  ,  8880,  9061,  9866, 10397, 11358, 12200, 13244, 14157, 15021, 16026
+  , 16490, 18697, 18479, 20011, 19677, 20720, 24576, 26276, 30720, 30720
+  , 28672, 30720, 24068, 25189, 22437, 20345, 18479, 16396, 16026, 14928
+  , 13877, 13450, 12696, 12766, 11626, 11098, 10159,  9998,  9437,  9275
+  ,  8783,  8552,  8629,  8488,  8522,  8454,  8571,  8775,  8915,  9427
+  ,  9483,  9851, 10260, 10933, 11131, 11974, 12560, 13833, 15080, 16304
+  , 17491, 19017, 18697, 19408, 22020, 25189, 25426, 22819, 26276, 30720
+  , 30720, 30720, 30720, 30720, 30720, 28672, 30720, 30720, 30720, 30720
+  , 28013, 25426, 24397, 23773, 25189, 26624, 25189, 22437, 21725, 20011
+  , 20527, 20720, 20771, 22020, 22020, 19858, 19408, 19972, 17866, 17360
+  , 17791, 17219, 16805, 16927, 16067, 16162, 15661, 15178, 15021, 15209
+  , 14845, 14570, 14490, 14490, 13733, 13617, 13794, 13577, 13312, 12824
+  , 13032, 12683, 12189, 12469, 12109, 11940, 11636, 11617, 11932, 12294
+  , 11578, 11775, 12039, 11654, 11560, 11439, 11909, 11421, 12029, 11513
+  , 11773, 11899, 11560, 11805, 11476, 11664, 11963, 11647, 11754, 11963
+  , 11703, 12211, 11932, 12074, 12469, 12535, 12560, 12912, 12783, 12866
+  , 12884, 13378, 13957, 13775, 13635, 14019, 14545, 15240, 15520, 15554
+  , 15697, 16490, 16396, 17281, 16599, 16969, 17963, 16859, 16983, 16805
+  , 17099, 18210, 17219, 17646, 17700, 17646, 18297, 17425, 18479, 17791
+  , 17718, 19282, 18672, 20173, 20982, 21725, 21456, 23773, 23257, 25189
+  , 30720, 30720, 25189, 26624, 30720, 30720, 30720, 30720, 28672, 26276
+  , 30720, 30720
+};
+
+const int16_t WebRtcIsacfix_kCodeLenShapeQ11[577] = {
+  0,     0,     0,     0,     0,     0,     0,     0,     0, 28672
+  ,     0, 26624,     1, 23773, 22819,     4, 20982, 18598,    10, 19282
+  , 16587,    22, 16442, 26624, 13126,    60, 14245, 26624, 26624, 12736
+  ,    79, 12912, 25189, 22819,  9563,   249,  9474, 22349, 28672, 23257
+  , 17944,  7980,   434,  8181, 16431, 26624,     0,     0,     0,     0
+  , 28672,     0,     0,     0,     0,     0, 28672,     0, 22437,     3
+  , 22437, 20982,     5, 20982, 16442,    22, 16752, 13814,    49, 14646
+  , 11645,   116, 11734, 26624, 28672, 10613,   158, 11010, 24397, 19539
+  ,  8046,   453,  7709, 19017, 28672, 23257, 15110,  6770,   758,  6523
+  , 14108, 24397, 28672,     0,     0,     0,     0, 28672,     0, 28672
+  ,     0, 26624,     1, 28672, 28672,     1, 26624, 24397,     2, 23257
+  , 21725,     4, 20982, 17158,    18, 17281, 28672, 15178,    35, 15209
+  , 12343,    92, 12320, 26624, 10344,   189, 10217, 30720, 22020,  9033
+  ,   322,  8549, 23773, 28672, 30720, 20622,  7666,   473,  7806, 20527
+  , 24397, 14135,  5995,   960,  6018, 14872, 23773, 26624, 20928, 16293
+  , 10636,  4926,  1588,  5256, 11088, 18043, 25189,     0,     0,     0
+  ,     0, 24397,     1, 25189, 20720,     5, 21456, 21209,     3, 25189
+  , 20982,     5, 21456, 15818,    30, 15410, 13794,    60, 13416, 28672
+  , 11162,   142, 11025,  9337,   231, 10094, 23773,  8338,   405,  7930
+  , 26624, 19677,  6787,   613,  7318, 19161, 28672, 16442,  6319,   932
+  ,  5748, 15312, 25189, 28672, 28672, 28672, 13998,  5513,  1263,  5146
+  , 14024, 24397, 22819, 15818,  9460,  4447,  2122,  4681,  9970, 15945
+  , 22349, 28672, 30720, 22622, 19017, 14872, 10689,  7405,  4473,  2983
+  ,  4783,  7894, 11186, 14964, 18210, 24397,     0,     0, 30720,     0
+  , 30720, 21456,     3, 23773, 14964,    39, 14757, 14179,    53, 13751
+  , 14928,    36, 15272, 12430,    79, 13228,  9135,   285,  9077, 28672
+  , 28672,  8377,   403,  7919, 26624, 28672, 23257,  7068,   560,  7473
+  , 20345, 19677,  6770,   720,  6464, 18697, 25189, 16249,  5779,  1087
+  ,  5494, 15209, 22819, 30720, 20622, 12601,  5240,  1419,  5091, 12095
+  , 19408, 26624, 22819, 16805, 10683,  4812,  2056,  4293,  9836, 16026
+  , 24397, 25189, 18409, 13833,  8681,  4503,  2653,  4220,  8329, 13853
+  , 19132, 26624, 25189, 20771, 17219, 12630,  9520,  6733,  4565,  3657
+  ,  4817,  7069, 10058, 13212, 16805, 21209, 26624, 26276, 28672, 28672
+  , 26276, 23257, 20173, 19282, 16538, 15051, 12811, 10754,  9267,  7547
+  ,  6270,  5407,  5214,  6057,  7054,  8226,  9488, 10806, 12793, 14442
+  , 16442, 19677, 22099, 26276, 28672,     0, 30720,     0, 30720, 11920
+  ,    56, 20720, 30720,  6766,   355, 13130, 30720, 30720, 22180,  5589
+  ,   736,  7902, 26624, 30720,  7634,   354,  9721, 30720, 30720,  9027
+  ,   246, 10117, 30720, 30720,  9630,   453,  6709, 23257, 30720, 25683
+  , 14228,  6127,  1271,  4615, 15178, 30720, 30720, 23504, 12382,  5739
+  ,  2015,  3492, 10560, 22020, 26624, 30720, 30720, 23257, 13192,  4873
+  ,  1527,  5001, 12445, 22020, 30720, 30720, 30720, 30720, 19344, 10761
+  ,  4051,  1927,  5281, 10594, 17866, 28672, 30720, 30720, 30720, 21869
+  , 15554, 10060,  5979,  2710,  3085,  7889, 14646, 21725, 28672, 30720
+  , 30720, 30720, 30720, 30720, 30720, 30720, 22719, 17425, 13212,  8083
+  ,  4439,  2820,  4305,  8136, 12988, 17425, 21151, 28672, 28672, 30720
+  , 30720, 30720, 28672, 20527, 19282, 14412, 10513,  7407,  5079,  3744
+  ,  4115,  6308,  9621, 13599, 17040, 22349, 28672, 30720, 30720, 30720
+  , 30720, 30720, 30720, 29522, 19282, 14545, 11485,  9093,  6760,  5262
+  ,  4672,  4970,  6005,  7852,  9732, 12343, 14672, 19161, 22819, 25189
+  , 30720, 30720, 28672, 30720, 30720, 20720, 18125, 14388, 12007,  9825
+  ,  8092,  7064,  6069,  5903,  5932,  6359,  7169,  8310,  9324, 10711
+  , 11867, 13096, 14157, 16338, 17040, 19161, 21725, 23773, 30720, 30720
+  , 26276, 25426, 24397, 28672, 28672, 23257, 22020, 22349, 18297, 17646
+  , 16983, 16431, 16162, 15021, 15178, 13751, 12142, 10895, 10193,  9632
+  ,  9086,  8896,  8823,  8735,  8591,  8754,  8649,  8361,  8329,  8522
+  ,  8373,  8739,  8993,  9657, 10454, 11279, 11899, 12614, 14024, 14273
+  , 15477, 15240, 16649, 17866, 18697, 21151, 22099
+};
+
+/* left KLT transforms */
+const int16_t WebRtcIsacfix_kT1GainQ15[3][4] = {
+  { -26130, 19773, 19773, 26130 },
+  { -26664, 19046, 19046, 26664 },
+  { -23538, 22797, 22797, 23538 }
+};
+
+
+
+const int16_t WebRtcIsacfix_kT1ShapeQ15[3][324] = {
+  { 52,16,168,7,439,-138,-89,306,671,882,
+    157,1301,291,1598,-3571,-1943,-1119,32404,96,-12,
+    379,-64,-307,345,-836,539,1045,2541,-2865,-992,
+    1683,-4717,5808,7427,30599,2319,183,-73,451,481,
+    933,-198,781,-397,1244,-777,3690,-2414,149,-1356,
+    -2593,-31140,8289,-1737,-202,-14,-214,360,501,450,
+    -245,-7,797,3638,-2804,3042,-337,22137,-22103,2264,
+    6838,-3381,305,172,263,-195,-355,351,179,513,
+    2234,3343,5509,7531,19075,-17740,-16836,2244,-629,-1505,
+    -153,108,124,-324,2694,-124,1492,-850,5347,4285,
+    7439,-10229,-22822,-12467,-12891,3645,822,-232,131,13,
+    374,565,536,4681,1294,-1935,1926,-5734,-10643,26462,
+    -12480,-5589,-1038,-2468,964,-704,-247,-106,186,-558,
+    -4050,3760,2972,2141,-7393,6294,26740,11991,-3251,5461,
+    5341,1574,2208,-51,-552,-297,-753,-154,2068,-5371,
+    3578,4106,28043,-10533,8041,2353,2389,4609,3410,1906,
+    351,-249,18,-15,1117,539,2870,9084,17585,-24528,
+    -366,-6490,2009,-3170,2942,1116,-232,1672,1065,606,
+    -399,-388,-518,38,3728,28948,-11936,4543,4104,-4441,
+    1545,-4044,1485,622,-68,186,-473,135,-280,125,
+    -546,-1813,6989,6606,23711,19376,-2636,2870,-4553,-1687,
+    878,-375,205,-208,-409,-108,-200,-45,-1670,-337,
+    8213,-5524,-2334,5240,-12939,-26205,5937,-1582,-592,-959,
+    -5374,2449,3400,559,349,-492,668,12379,-27684,3419,
+    5117,4415,-297,-8270,-1252,-3490,-1272,-1199,-3159,191,
+    630,488,-797,-3071,12912,-27783,-10249,1047,647,619,
+    111,-3722,-915,-1055,-502,5,-1384,-306,221,68,
+    5219,13173,-26474,-11663,-5626,927,806,-1127,236,-589,
+    -522,-230,-312,-315,-428,-573,426,192,-11830,-26883,
+    -14121,-2785,-1429,-109,410,-832,-302,539,-459,104,
+    1,-530,-202,-289,153,116,30082,-12944,-671,20,
+    649,98,103,215,234,0,280,-51,-169,298,
+    31,230,-73,-51
+  },
+  { -154,-7,-192,61,-739,-389,-947,-162,-60,94,
+    511,-716,1520,-1428,4168,-2214,1816,32270,-123,-77,
+    -199,-99,-42,-588,203,-240,-930,-35,1580,234,
+    3206,-5507,-1495,-10946,30000,-2667,-136,-176,-240,-175,
+    -204,-661,-1796,-1039,-1271,498,3143,734,2663,2699,
+    -8127,29333,10495,2356,-72,113,-91,118,-2840,-723,
+    -1733,-1158,-389,-2116,-3054,-3,-5179,8071,29546,6308,
+    5657,-3178,-186,-294,-473,-635,1213,-983,-1437,-1715,
+    -1094,1280,-92,-9573,948,29576,-7060,-5921,2954,1349,
+    -337,-108,-1099,962,418,-413,-1149,-334,1241,3975,
+    -6825,26725,-14377,7051,-4772,-1707,2335,2008,-150,570,
+    1371,42,-1649,-619,2039,3369,-1225,1583,-2755,-15207,
+    -27504,-4855,-4304,1495,2733,1324,15,-448,403,353,
+    3016,-1242,2338,2673,2064,-7496,-30447,-3686,5833,-1301,
+    -2455,2122,1519,608,43,-653,773,-3072,912,-1537,
+    4505,10284,30237,1549,3200,-691,205,1702,658,1014,
+    1499,148,79,-322,-1162,-4639,-813,7536,3204,29109,
+    -10747,-26,1611,2286,2114,2561,1022,372,348,207,
+    1062,-1088,-443,-9849,2381,5671,29097,-7612,-2927,3853,
+    194,1155,275,1438,1438,1312,581,888,-784,906,
+    112,-11103,25104,14438,-9311,-3068,1210,368,370,-940,
+    -2434,-1148,1925,392,657,258,-526,1475,-2281,-4265,
+    -1880,1534,2185,-1472,959,-30934,6306,3114,-4109,1768,
+    -2612,-703,45,644,2185,2033,5670,7211,19114,-22427,
+    6432,5150,-4090,-2694,3860,1245,-596,293,1829,369,
+    -319,229,-3256,2170,-6374,-26216,-4570,-16053,-5766,-262,
+    -2006,2873,-1477,147,378,-1544,-344,-544,-985,-481,
+    4210,4542,30757,-7291,-4863,1529,-2079,-628,-603,-783,
+    -408,1646,697,808,-620,-292,181,158,-13313,-29173,
+    5984,-1262,859,-1776,-558,-24,-883,-1421,739,210,
+    -531,-285,131,-160,-246,-56,29345,-13706,-2859,-2966,
+    -300,-970,-2382,-268,-103,-636,-12,-62,-691,-253,
+    -147,-127,27,66
+  },
+  { 55,-212,-198,489,-274,81,682,399,328,-934,
+    -389,-37,1357,-3632,5276,6581,-9493,-29921,29,-45,
+    2,190,172,-15,311,-130,-1085,-25,324,-684,
+    3223,-6580,4485,-5280,-29521,9933,82,-320,-530,229,
+    -705,-533,-414,848,-1842,-4473,1390,-857,6717,-6692,
+    4648,29397,576,8339,-68,-85,238,-330,264,-1012,
+    -381,-203,-3384,-3329,3906,6810,3790,-6250,28312,-8078,
+    8089,1565,160,-569,-612,-613,-1063,-1928,-1125,3421,
+    -7481,-7484,4942,-6984,4330,-25591,-10574,-6982,5682,-1781,
+    -308,89,178,-1715,-420,-3530,-5776,1219,-8617,-7137,
+    7015,4981,24875,12657,-5408,-3356,-785,-1972,326,-858,
+    -506,-3382,-986,-6258,-2259,4015,-8374,-10482,3127,23826,
+    -14126,-514,-5417,2178,-2912,-17,-587,80,67,-5881,
+    -1702,-5351,-4481,398,-10156,-225,20727,-15460,-11603,7752,
+    3660,1714,-2001,-359,499,-527,-1225,-7820,-1297,-6326,
+    -8526,7900,-18328,13311,-17488,-2926,-196,-17,2281,873,
+    480,-160,-624,471,780,-8729,1707,-14262,-20647,1721,
+    18590,-2206,-1214,-1066,312,-2602,783,-412,-113,49,
+    -119,1305,-2371,-15132,-1833,-18252,20295,-8316,2227,341,
+    -2074,-702,3082,-262,-465,-198,430,30,-70,-788,
+    2342,-25132,-4863,19783,-484,2137,2811,-1906,799,1586,
+    962,-734,-191,-30,-129,-93,-1126,1729,5860,-2030,
+    8953,603,-3338,-10869,-1144,22070,12130,10513,3191,-6881,
+    -3514,2090,711,-666,1843,-5997,-5681,2921,-17641,-2801,
+    4969,18590,7169,12214,8587,4405,3008,-1074,-371,-77,
+    253,331,-5611,5014,13152,-1985,18483,-1696,8043,20463,
+    2381,-393,1688,-1205,618,1220,457,248,-83,176,
+    7920,-13676,-22139,-3038,17402,2036,844,3258,994,719,
+    2087,-44,426,494,12,-91,46,5,-14204,22912,
+    -18156,-361,442,2298,-829,2229,386,1433,1335,1323,
+    55,-592,-139,49,-12,-57,27783,17134,350,-282,
+    552,158,142,2488,465,329,1087,118,143,10,
+    56,65,-15,-31
+  }
+};
+
+/* right KLT transforms */
+const int16_t WebRtcIsacfix_kT2GainQ15[3][36] = {
+  {   4775, -14892,  20313, -17104,  10533,  -3613,  -6782,  16044,  -8889,
+      -11019,  21330, -10720,  13193, -15678, -11101,  14461,  12250, -13096,
+      -16951,   2167,  16066,  15569,   -702, -16754, -19195, -12823,  -4321,
+      5128,    13348,  17825,  13232,  13404,  13494,  13490,  13383,  13261
+  },
+  {  -3725,  11408, -18493,  20031, -13097,   3865,   9344, -19294,  10740,
+     8856, -18432,   8982,  13975, -14444, -11930,  11774,  14285, -13594,
+     -16323,     -4,  16340,  15609,    359, -17220, -18401, -13471,  -4643,
+     5225,  13375,  18053,  13124,  13463,  13621,  13583,  13393,  13072
+  },
+  {  -3513,  11402, -17883,  19504, -14399,   4885,   8702, -19513,  12046,
+     8533, -18110,   8447,  12778, -14838, -12444,  13177,  14107, -12759,
+     -17268,    914,  15822,  15661,    838, -16686, -18907, -12936,  -4820,
+     4175,  12398,  18830,  12913,  13215,  13433,  13572,  13601,  13518
+  }
+};
+
+const int16_t WebRtcIsacfix_kT2ShapeQ15[3][36] = {
+  {   4400, -11512,  17205, -19470,  14770,  -5345,   9784, -19222,  11228,
+      6842, -18371,   9909,  14191, -13496, -11563,  14015,  11827, -14839,
+      -15439,    948,  17802,  14827,  -2053, -17132,  18723,  14516,   4135,
+      -6822, -13869, -16016,  12975,  13341,  13563,  13603,  13478,  13296
+  },
+  {   5420, -14215,  19060, -18073,  11709,  -3911,   9645, -18335,   7717,
+      10842, -19283,   9777,  14898, -12555, -13661,  11668,  13520, -13733,
+      -15936,  -1358,  15671,  16728,    328, -17100,  17527,  13973,   5587,
+      -5194, -14165, -17677,  12970,  13446,  13693,  13660,  13462,  13015
+  },
+  {   4386, -12426,  18019, -18895,  13894,  -5034,   9713, -19270,  10283,
+      8692, -18439,   9317,  13992, -13454, -13241,  12850,  13366, -13336,
+      -16334,   -498,  15976,  16213,   -114, -16987,  18191,  13659,   4958,
+      -5116, -13444, -18021,  12911,  13424,  13718,  13674,  13464,  13054
+  }
+};
+
+/* means of log gains and LAR coefficients*/
+const int16_t WebRtcIsacfix_kMeansGainQ8[3][12] = {
+  { -1758, -1370, -1758, -1373, -1757, -1375,
+    -1758, -1374, -1758, -1373, -1755, -1370
+  },
+  { -1569, -1224, -1569, -1225, -1569, -1227,
+    -1569, -1226, -1567, -1225, -1565, -1224
+  },
+  { -1452,  -957, -1447,  -951, -1438,  -944,
+    -1431,  -938, -1419,  -931, -1406,  -926
+  }
+};
+
+
+const int32_t WebRtcIsacfix_kMeansShapeQ17[3][108] = {
+  { -119581, 34418, -44193, 11112, -4428, 18906, 9222, 8068, 1953, 5425,
+    1871, 1689, 109933, 33751, 10471, -2566, 1090, 2320, -119219, 33728,
+    -43759, 11450, -4870, 19117, 9174, 8037, 1972, 5331, 1872, 1843,
+    109899, 34301, 10629, -2316, 1272, 2562, -118608, 32318, -44012, 11591,
+    -4914, 18932, 9456, 8088, 1900, 5419, 1723, 1853, 109963, 35059,
+    10745, -2335, 1161, 2520, -119174, 32107, -44462, 11635, -4694, 18611,
+    9757, 8108, 1969, 5486, 1673, 1777, 109636, 34907, 10643, -2406,
+    1034, 2420, -118597, 32320, -44590, 10854, -4569, 18821, 9701, 7866,
+    2003, 5577, 1732, 1626, 109913, 34448, 10714, -2752, 990, 2228,
+    -118138, 32996, -44352, 10334, -3772, 18488, 9464, 7865, 2208, 5540,
+    1745, 1664, 109880, 33381, 10640, -2779, 980, 2054
+  },
+  { -146328, 46370, 1047, 26431, 10035, 13933, 6415, 14359, -2368, 6661,
+    2269, 1764, 96623, 7802, 4163, 10742, 1643, 2954, -146871, 46561, 1127,
+    26225, 10113, 14096, 6771, 14323, -2037, 6788, 2297, 1761, 96324, 8382,
+    4309, 10450, 1695, 3016, -146502, 46475, 1580, 26118, 10487, 14179, 6622,
+    14439, -2034, 6757, 2342, 1761, 95869, 8966, 4347, 10358, 1999, 2855,
+    -146958, 47717, 826, 25952, 10263, 14061, 5266, 13681, -2417, 6582, 2047,
+    1608, 96257, 9107, 4452, 10301, 1792, 2676, -146992, 47123, 446, 25822,
+    10405, 14292, 5140, 13804, -2403, 6496, 1834, 1735, 97489, 9253, 4414,
+    10684, 1549, 2721, -145811, 46182, 901, 26482, 10241, 14524, 6075, 14514,
+    -2147, 6691, 2196, 1899, 97011, 8178, 4102, 10758, 1638, 2869
+  },
+  { -166617, 46969, -43908, 17726, 6330, 25615, 6913, 5450, -2301, 1984,
+    507, 2883, 149998, 28709, 19333, 16703, 11093, 8965, -168254, 46604,
+    -44315, 17862, 6474, 25746, 7018, 5373, -2343, 1930, 513, 2819, 150391,
+    28627, 19194, 16678, 10998, 8929, -169093, 46084, -44767, 17427, 6401,
+    25674, 7147, 5472, -2336, 1820, 491, 2802, 149860, 28430, 19064, 16524,
+    10898, 8875, -170205, 46189, -44877, 17403, 6190, 25209, 7035, 5673, -2173,
+    1894, 574, 2756, 148830, 28230, 18819, 16418, 10789, 8811, -171263, 45045,
+    -44834, 16858, 6103, 24726, 7014, 5713, -2103, 1877, 518, 2729, 147073,
+    27744, 18629, 16277, 10690, 8703, -171720, 44153, -45062, 15951, 5872,
+    24429, 7044, 5585, -2082, 1807, 519, 2769, 144791, 27402, 18490, 16126,
+    10548, 8635
+  }
+};
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h b/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h
new file mode 100644
index 0000000..05c53dd
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_tables.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_tables.h
+ *
+ * header file for coding tables for the LPC coefficients
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* indices of KLT coefficients used */
+extern const uint16_t WebRtcIsacfix_kSelIndGain[12];
+
+extern const uint16_t WebRtcIsacfix_kSelIndShape[108];
+
+/* cdf array for model indicator */
+extern const uint16_t WebRtcIsacfix_kModelCdf[KLT_NUM_MODELS+1];
+
+/* pointer to cdf array for model indicator */
+extern const uint16_t *WebRtcIsacfix_kModelCdfPtr[1];
+
+/* initial cdf index for decoder of model indicator */
+extern const uint16_t WebRtcIsacfix_kModelInitIndex[1];
+
+/* offset to go from rounded value to quantization index */
+extern const int16_t WebRtcIsacfix_kQuantMinGain[12];
+
+extern const int16_t WebRtcIsacfix_kQuantMinShape[108];
+
+/* maximum quantization index */
+extern const uint16_t WebRtcIsacfix_kMaxIndGain[12];
+
+extern const uint16_t WebRtcIsacfix_kMaxIndShape[108];
+
+/* index offset */
+extern const uint16_t WebRtcIsacfix_kOffsetGain[KLT_NUM_MODELS][12];
+
+extern const uint16_t WebRtcIsacfix_kOffsetShape[KLT_NUM_MODELS][108];
+
+/* initial cdf index for KLT coefficients */
+extern const uint16_t WebRtcIsacfix_kInitIndexGain[KLT_NUM_MODELS][12];
+
+extern const uint16_t WebRtcIsacfix_kInitIndexShape[KLT_NUM_MODELS][108];
+
+/* offsets for quantizer representation levels */
+extern const uint16_t WebRtcIsacfix_kOfLevelsGain[3];
+
+extern const uint16_t WebRtcIsacfix_kOfLevelsShape[3];
+
+/* quantizer representation levels */
+extern const int32_t WebRtcIsacfix_kLevelsGainQ17[1176];
+
+extern const int16_t WebRtcIsacfix_kLevelsShapeQ10[1735];
+
+/* cdf tables for quantizer indices */
+extern const uint16_t WebRtcIsacfix_kCdfGain[1212];
+
+extern const uint16_t WebRtcIsacfix_kCdfShape[2059];
+
+/* pointers to cdf tables for quantizer indices */
+extern const uint16_t *WebRtcIsacfix_kCdfGainPtr[KLT_NUM_MODELS][12];
+
+extern const uint16_t *WebRtcIsacfix_kCdfShapePtr[KLT_NUM_MODELS][108];
+
+/* code length for all coefficients using different models */
+extern const int16_t WebRtcIsacfix_kCodeLenGainQ11[392];
+
+extern const int16_t WebRtcIsacfix_kCodeLenShapeQ11[577];
+
+/* left KLT transforms */
+extern const int16_t WebRtcIsacfix_kT1GainQ15[KLT_NUM_MODELS][4];
+
+extern const int16_t WebRtcIsacfix_kT1ShapeQ15[KLT_NUM_MODELS][324];
+
+/* right KLT transforms */
+extern const int16_t WebRtcIsacfix_kT2GainQ15[KLT_NUM_MODELS][36];
+
+extern const int16_t WebRtcIsacfix_kT2ShapeQ15[KLT_NUM_MODELS][36];
+
+/* means of log gains and LAR coefficients */
+extern const int16_t WebRtcIsacfix_kMeansGainQ8[KLT_NUM_MODELS][12];
+
+extern const int32_t WebRtcIsacfix_kMeansShapeQ17[3][108];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_LPC_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
new file mode 100644
index 0000000..78cb93f
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
@@ -0,0 +1,435 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/compile_assert_c.h"
+
+/* log2[0.2, 0.5, 0.98] in Q8 */
+static const int16_t kLogLagWinQ8[3] = {
+  -594, -256, -7
+};
+
+/* [1 -0.75 0.25] in Q12 */
+static const int16_t kACoefQ12[3] = {
+  4096, -3072, 1024
+};
+
+int32_t WebRtcIsacfix_Log2Q8(uint32_t x) {
+  int32_t zeros;
+  int16_t frac;
+
+  zeros=WebRtcSpl_NormU32(x);
+  frac = (int16_t)(((x << zeros) & 0x7FFFFFFF) >> 23);
+  /* log2(magn(i)) */
+
+  return ((31 - zeros) << 8) + frac;
+}
+
+static __inline int16_t Exp2Q10(int16_t x) { // Both in and out in Q10
+
+  int16_t tmp16_1, tmp16_2;
+
+  tmp16_2=(int16_t)(0x0400|(x&0x03FF));
+  tmp16_1 = -(x >> 10);
+  if(tmp16_1>0)
+    return tmp16_2 >> tmp16_1;
+  else
+    return tmp16_2 << -tmp16_1;
+
+}
+
+
+
+/* 1D parabolic interpolation . All input and output values are in Q8 */
+static __inline void Intrp1DQ8(int32_t *x, int32_t *fx, int32_t *y, int32_t *fy) {
+
+  int16_t sign1=1, sign2=1;
+  int32_t r32, q32, t32, nom32, den32;
+  int16_t t16, tmp16, tmp16_1;
+
+  if ((fx[0]>0) && (fx[2]>0)) {
+    r32=fx[1]-fx[2];
+    q32=fx[0]-fx[1];
+    nom32=q32+r32;
+    den32 = (q32 - r32) * 2;
+    if (nom32<0)
+      sign1=-1;
+    if (den32<0)
+      sign2=-1;
+
+    /* t = (q32+r32)/(2*(q32-r32)) = (fx[0]-fx[1] + fx[1]-fx[2])/(2 * fx[0]-fx[1] - (fx[1]-fx[2]))*/
+    /* (Signs are removed because WebRtcSpl_DivResultInQ31 can't handle negative numbers) */
+    /* t in Q31, without signs */
+    t32 = WebRtcSpl_DivResultInQ31(nom32 * sign1, den32 * sign2);
+
+    t16 = (int16_t)(t32 >> 23);  /* Q8 */
+    t16=t16*sign1*sign2;        /* t in Q8 with signs */
+
+    *y = x[0]+t16;          /* Q8 */
+    // *y = x[1]+t16;          /* Q8 */
+
+    /* The following code calculates fy in three steps */
+    /* fy = 0.5 * t * (t-1) * fx[0] + (1-t*t) * fx[1] + 0.5 * t * (t+1) * fx[2]; */
+
+    /* Part I: 0.5 * t * (t-1) * fx[0] */
+    tmp16_1 = (int16_t)(t16 * t16);  /* Q8*Q8=Q16 */
+    tmp16_1 >>= 2;  /* Q16>>2 = Q14 */
+    t16 <<= 6;  /* Q8<<6 = Q14  */
+    tmp16 = tmp16_1-t16;
+    *fy = WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[0]); /* (Q14 * Q8 >>15)/2 = Q8 */
+
+    /* Part II: (1-t*t) * fx[1] */
+    tmp16 = 16384-tmp16_1;        /* 1 in Q14 - Q14 */
+    *fy += WEBRTC_SPL_MUL_16_32_RSFT14(tmp16, fx[1]);/* Q14 * Q8 >> 14 = Q8 */
+
+    /* Part III: 0.5 * t * (t+1) * fx[2] */
+    tmp16 = tmp16_1+t16;
+    *fy += WEBRTC_SPL_MUL_16_32_RSFT15(tmp16, fx[2]);/* (Q14 * Q8 >>15)/2 = Q8 */
+  } else {
+    *y = x[0];
+    *fy= fx[1];
+  }
+}
+
+
+static void FindFour32(int32_t *in, int16_t length, int16_t *bestind)
+{
+  int32_t best[4]= {-100, -100, -100, -100};
+  int16_t k;
+
+  for (k=0; k<length; k++) {
+    if (in[k] > best[3]) {
+      if (in[k] > best[2]) {
+        if (in[k] > best[1]) {
+          if (in[k] > best[0]) { // The Best
+            best[3] = best[2];
+            bestind[3] = bestind[2];
+            best[2] = best[1];
+            bestind[2] = bestind[1];
+            best[1] = best[0];
+            bestind[1] = bestind[0];
+            best[0] = in[k];
+            bestind[0] = k;
+          } else { // 2nd best
+            best[3] = best[2];
+            bestind[3] = bestind[2];
+            best[2] = best[1];
+            bestind[2] = bestind[1];
+            best[1] = in[k];
+            bestind[1] = k;
+          }
+        } else { // 3rd best
+          best[3] = best[2];
+          bestind[3] = bestind[2];
+          best[2] = in[k];
+          bestind[2] = k;
+        }
+      } else {  // 4th best
+        best[3] = in[k];
+        bestind[3] = k;
+      }
+    }
+  }
+}
+
+
+
+
+
+extern void WebRtcIsacfix_PCorr2Q32(const int16_t *in, int32_t *logcorQ8);
+
+
+
+void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
+                                PitchAnalysisStruct *State,
+                                int16_t *lagsQ7                   /* Q7 */
+                                )
+{
+  int16_t buf_dec16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2];
+  int32_t *crrvecQ8_1,*crrvecQ8_2;
+  int32_t cv1q[PITCH_LAG_SPAN2+2],cv2q[PITCH_LAG_SPAN2+2], peakvq[PITCH_LAG_SPAN2+2];
+  int k;
+  int16_t peaks_indq;
+  int16_t peakiq[PITCH_LAG_SPAN2];
+  int32_t corr;
+  int32_t corr32, corr_max32, corr_max_o32;
+  int16_t npkq;
+  int16_t best4q[4]={0,0,0,0};
+  int32_t xq[3],yq[1],fyq[1];
+  int32_t *fxq;
+  int32_t best_lag1q, best_lag2q;
+  int32_t tmp32a,tmp32b,lag32,ratq;
+  int16_t start;
+  int16_t oldgQ12, tmp16a, tmp16b, gain_bias16,tmp16c, tmp16d, bias16;
+  int32_t tmp32c,tmp32d, tmp32e;
+  int16_t old_lagQ;
+  int32_t old_lagQ8;
+  int32_t lagsQ8[4];
+
+  old_lagQ = State->PFstr_wght.oldlagQ7; // Q7
+  old_lagQ8 = old_lagQ << 1;  // Q8
+
+  oldgQ12= State->PFstr_wght.oldgainQ12;
+
+  crrvecQ8_1=&cv1q[1];
+  crrvecQ8_2=&cv2q[1];
+
+
+  /* copy old values from state buffer */
+  memcpy(buf_dec16, State->dec_buffer16, sizeof(State->dec_buffer16));
+
+  /* decimation; put result after the old values */
+  WebRtcIsacfix_DecimateAllpass32(in, State->decimator_state32, PITCH_FRAME_LEN,
+                                  &buf_dec16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2]);
+
+  /* low-pass filtering */
+  start= PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2;
+  WebRtcSpl_FilterARFastQ12(&buf_dec16[start],&buf_dec16[start],(int16_t*)kACoefQ12,3, PITCH_FRAME_LEN/2);
+
+  /* copy end part back into state buffer */
+  for (k = 0; k < (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2); k++)
+    State->dec_buffer16[k] = buf_dec16[k+PITCH_FRAME_LEN/2];
+
+
+  /* compute correlation for first and second half of the frame */
+  WebRtcIsacfix_PCorr2Q32(buf_dec16, crrvecQ8_1);
+  WebRtcIsacfix_PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
+
+
+  /* bias towards pitch lag of previous frame */
+  tmp32a = WebRtcIsacfix_Log2Q8((uint32_t) old_lagQ8) - 2304;
+      // log2(0.5*oldlag) in Q8
+  tmp32b = oldgQ12 * oldgQ12 >> 10;  // Q12 & * 4.0;
+  gain_bias16 = (int16_t) tmp32b;  //Q12
+  if (gain_bias16 > 3276) gain_bias16 = 3276; // 0.8 in Q12
+
+
+  for (k = 0; k < PITCH_LAG_SPAN2; k++)
+  {
+    if (crrvecQ8_1[k]>0) {
+      tmp32b = WebRtcIsacfix_Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
+      tmp16a = (int16_t) (tmp32b - tmp32a); // Q8 & fabs(ratio)<4
+      tmp32c = tmp16a * tmp16a >> 6;  // Q10
+      tmp16b = (int16_t) tmp32c; // Q10 & <8
+      tmp32d = tmp16b * 177 >> 8;  // mult with ln2 in Q8
+      tmp16c = (int16_t) tmp32d; // Q10 & <4
+      tmp16d = Exp2Q10((int16_t) -tmp16c); //Q10
+      tmp32c = gain_bias16 * tmp16d >> 13;  // Q10  & * 0.5
+      bias16 = (int16_t) (1024 + tmp32c); // Q10
+      tmp32b = WebRtcIsacfix_Log2Q8((uint32_t)bias16) - 2560;
+          // Q10 in -> Q8 out with 10*2^8 offset
+      crrvecQ8_1[k] += tmp32b ; // -10*2^8 offset
+    }
+  }
+
+  /* taper correlation functions */
+  for (k = 0; k < 3; k++) {
+    crrvecQ8_1[k] += kLogLagWinQ8[k];
+    crrvecQ8_2[k] += kLogLagWinQ8[k];
+
+    crrvecQ8_1[PITCH_LAG_SPAN2-1-k] += kLogLagWinQ8[k];
+    crrvecQ8_2[PITCH_LAG_SPAN2-1-k] += kLogLagWinQ8[k];
+  }
+
+
+  /* Make zeropadded corr vectors */
+  cv1q[0]=0;
+  cv2q[0]=0;
+  cv1q[PITCH_LAG_SPAN2+1]=0;
+  cv2q[PITCH_LAG_SPAN2+1]=0;
+  corr_max32 = 0;
+
+  for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+  {
+
+
+    corr32=crrvecQ8_1[k-1];
+    if (corr32 > corr_max32)
+      corr_max32 = corr32;
+
+    corr32=crrvecQ8_2[k-1];
+    corr32 += -4; // Compensate for later (log2(0.99))
+
+    if (corr32 > corr_max32)
+      corr_max32 = corr32;
+
+  }
+
+  /* threshold value to qualify as a peak */
+  // corr_max32 += -726; // log(0.14)/log(2.0) in Q8
+  corr_max32 += -1000; // log(0.14)/log(2.0) in Q8
+  corr_max_o32 = corr_max32;
+
+
+  /* find peaks in corr1 */
+  peaks_indq = 0;
+  for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+  {
+    corr32=cv1q[k];
+    if (corr32>corr_max32) { // Disregard small peaks
+      if ((corr32>=cv1q[k-1]) && (corr32>cv1q[k+1])) { // Peak?
+        peakvq[peaks_indq] = corr32;
+        peakiq[peaks_indq++] = k;
+      }
+    }
+  }
+
+
+  /* find highest interpolated peak */
+  corr_max32=0;
+  best_lag1q =0;
+  if (peaks_indq > 0) {
+    FindFour32(peakvq, (int16_t) peaks_indq, best4q);
+    npkq = WEBRTC_SPL_MIN(peaks_indq, 4);
+
+    for (k=0;k<npkq;k++) {
+
+      lag32 =  peakiq[best4q[k]];
+      fxq = &cv1q[peakiq[best4q[k]]-1];
+      xq[0]= lag32;
+      xq[0] <<= 8;
+      Intrp1DQ8(xq, fxq, yq, fyq);
+
+      tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+      /* Bias towards short lags */
+      /* log(pow(0.8, log(2.0 * *y )))/log(2.0) */
+      tmp32b = (int16_t)tmp32a * -42 >> 8;
+      tmp32c= tmp32b + 256;
+      *fyq += tmp32c;
+      if (*fyq > corr_max32) {
+        corr_max32 = *fyq;
+        best_lag1q = *yq;
+      }
+    }
+    tmp32b = (best_lag1q - OFFSET_Q8) * 2;
+    lagsQ8[0] = tmp32b + PITCH_MIN_LAG_Q8;
+    lagsQ8[1] = lagsQ8[0];
+  } else {
+    lagsQ8[0] = old_lagQ8;
+    lagsQ8[1] = lagsQ8[0];
+  }
+
+  /* Bias towards constant pitch */
+  tmp32a = lagsQ8[0] - PITCH_MIN_LAG_Q8;
+  ratq = (tmp32a >> 1) + OFFSET_Q8;
+
+  for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+  {
+    tmp32a = k << 7; // 0.5*k Q8
+    tmp32b = tmp32a * 2 - ratq;  // Q8
+    tmp32c = (int16_t)tmp32b * (int16_t)tmp32b >> 8;  // Q8
+
+    tmp32b = tmp32c + (ratq >> 1);
+        // (k-r)^2 + 0.5 * r  Q8
+    tmp32c = WebRtcIsacfix_Log2Q8((uint32_t)tmp32a) - 2048;
+        // offset 8*2^8 , log2(0.5*k) Q8
+    tmp32d = WebRtcIsacfix_Log2Q8((uint32_t)tmp32b) - 2048;
+        // offset 8*2^8 , log2(0.5*k) Q8
+    tmp32e =  tmp32c - tmp32d;
+
+    cv2q[k] += tmp32e >> 1;
+
+  }
+
+  /* find peaks in corr2 */
+  corr_max32 = corr_max_o32;
+  peaks_indq = 0;
+
+  for (k = 1; k <= PITCH_LAG_SPAN2; k++)
+  {
+    corr=cv2q[k];
+    if (corr>corr_max32) { // Disregard small peaks
+      if ((corr>=cv2q[k-1]) && (corr>cv2q[k+1])) { // Peak?
+        peakvq[peaks_indq] = corr;
+        peakiq[peaks_indq++] = k;
+      }
+    }
+  }
+
+
+
+  /* find highest interpolated peak */
+  corr_max32 = 0;
+  best_lag2q =0;
+  if (peaks_indq > 0) {
+
+    FindFour32(peakvq, (int16_t) peaks_indq, best4q);
+    npkq = WEBRTC_SPL_MIN(peaks_indq, 4);
+    for (k=0;k<npkq;k++) {
+
+      lag32 =  peakiq[best4q[k]];
+      fxq = &cv2q[peakiq[best4q[k]]-1];
+
+      xq[0]= lag32;
+      xq[0] <<= 8;
+      Intrp1DQ8(xq, fxq, yq, fyq);
+
+      /* Bias towards short lags */
+      /* log(pow(0.8, log(2.0f * *y )))/log(2.0f) */
+      tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+      tmp32b = (int16_t)tmp32a * -82 >> 8;
+      tmp32c= tmp32b + 256;
+      *fyq += tmp32c;
+      if (*fyq > corr_max32) {
+        corr_max32 = *fyq;
+        best_lag2q = *yq;
+      }
+    }
+
+    tmp32b = (best_lag2q - OFFSET_Q8) * 2;
+    lagsQ8[2] = tmp32b + PITCH_MIN_LAG_Q8;
+    lagsQ8[3] = lagsQ8[2];
+  } else {
+    lagsQ8[2] = lagsQ8[0];
+    lagsQ8[3] = lagsQ8[0];
+  }
+
+  lagsQ7[0] = (int16_t)(lagsQ8[0] >> 1);
+  lagsQ7[1] = (int16_t)(lagsQ8[1] >> 1);
+  lagsQ7[2] = (int16_t)(lagsQ8[2] >> 1);
+  lagsQ7[3] = (int16_t)(lagsQ8[3] >> 1);
+}
+
+
+
+void WebRtcIsacfix_PitchAnalysis(const int16_t *inn,               /* PITCH_FRAME_LEN samples */
+                                 int16_t *outQ0,                  /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+                                 PitchAnalysisStruct *State,
+                                 int16_t *PitchLags_Q7,
+                                 int16_t *PitchGains_Q12)
+{
+  int16_t inbufQ0[PITCH_FRAME_LEN + QLOOKAHEAD];
+  int16_t k;
+
+  /* inital pitch estimate */
+  WebRtcIsacfix_InitialPitch(inn, State,  PitchLags_Q7);
+
+
+  /* Calculate gain */
+  WebRtcIsacfix_PitchFilterGains(inn, &(State->PFstr_wght), PitchLags_Q7, PitchGains_Q12);
+
+  /* concatenate previous input's end and current input */
+  for (k = 0; k < QLOOKAHEAD; k++) {
+    inbufQ0[k] = State->inbuf[k];
+  }
+  for (k = 0; k < PITCH_FRAME_LEN; k++) {
+    inbufQ0[k+QLOOKAHEAD] = (int16_t) inn[k];
+  }
+
+  /* lookahead pitch filtering for masking analysis */
+  WebRtcIsacfix_PitchFilter(inbufQ0, outQ0, &(State->PFstr), PitchLags_Q7,PitchGains_Q12, 2);
+
+
+  /* store last part of input */
+  for (k = 0; k < QLOOKAHEAD; k++) {
+    State->inbuf[k] = inbufQ0[k + PITCH_FRAME_LEN];
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
new file mode 100644
index 0000000..994cce7
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_estimator.h
+ *
+ * Pitch functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+
+void WebRtcIsacfix_PitchAnalysis(const int16_t *in,               /* PITCH_FRAME_LEN samples */
+                                 int16_t *outQ0,                  /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+                                 PitchAnalysisStruct *State,
+                                 int16_t *lagsQ7,
+                                 int16_t *PitchGains_Q12);
+
+void WebRtcIsacfix_InitialPitch(const int16_t *in,
+                                PitchAnalysisStruct *State,
+                                int16_t *qlags);
+
+void WebRtcIsacfix_PitchFilter(int16_t *indatFix,
+                               int16_t *outdatQQ,
+                               PitchFiltstr *pfp,
+                               int16_t *lagsQ7,
+                               int16_t *gainsQ12,
+                               int16_t type);
+
+void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+                                   int16_t gain,
+                                   size_t index,
+                                   int16_t sign,
+                                   int16_t* inputState,
+                                   int16_t* outputBuff2,
+                                   const int16_t* coefficient,
+                                   int16_t* inputBuf,
+                                   int16_t* outputBuf,
+                                   int* index2);
+
+void WebRtcIsacfix_PitchFilterGains(const int16_t *indatQ0,
+                                    PitchFiltstr *pfp,
+                                    int16_t *lagsQ7,
+                                    int16_t *gainsQ12);
+
+void WebRtcIsacfix_DecimateAllpass32(const int16_t *in,
+                                     int32_t *state_in,        /* array of size: 2*ALLPASSSECTIONS+1 */
+                                     int16_t N,                   /* number of input samples */
+                                     int16_t *out);             /* array of size N/2 */
+
+int32_t WebRtcIsacfix_Log2Q8( uint32_t x );
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
new file mode 100644
index 0000000..1214e23
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
@@ -0,0 +1,112 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+#ifdef WEBRTC_HAS_NEON
+#include <arm_neon.h>
+#endif
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+  int16_t scaling,n,k;
+  int32_t ysum32,csum32, lys, lcs;
+  const int32_t oneQ8 = 1 << 8;  // 1.00 in Q8
+  const int16_t* x;
+  const int16_t* inptr;
+
+  x = in + PITCH_MAX_LAG / 2 + 2;
+  scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+                                       PITCH_CORR_LEN2,
+                                       PITCH_CORR_LEN2);
+  ysum32 = 1;
+  csum32 = 0;
+  x = in + PITCH_MAX_LAG / 2 + 2;
+  for (n = 0; n < PITCH_CORR_LEN2; n++) {
+    ysum32 += in[n] * in[n] >> scaling;  // Q0
+    csum32 += x[n] * in[n] >> scaling;  // Q0
+  }
+  logcorQ8 += PITCH_LAG_SPAN2 - 1;
+  lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
+  if (csum32 > 0) {
+    lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32);  // 2log(csum) in Q8
+    if (lcs > (lys + oneQ8)) {          // csum/sqrt(ysum) > 2 in Q8
+      *logcorQ8 = lcs - lys;            // log2(csum/sqrt(ysum))
+    } else {
+      *logcorQ8 = oneQ8;                // 1.00
+    }
+  } else {
+    *logcorQ8 = 0;
+  }
+
+
+  for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+    inptr = &in[k];
+    ysum32 -= in[k - 1] * in[k - 1] >> scaling;
+    ysum32 += in[PITCH_CORR_LEN2 + k - 1] * in[PITCH_CORR_LEN2 + k - 1] >>
+        scaling;
+
+#ifdef WEBRTC_HAS_NEON
+    {
+      int32_t vbuff[4];
+      int32x4_t int_32x4_sum = vmovq_n_s32(0);
+      // Can't shift a Neon register to right with a non-constant shift value.
+      int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
+      // Assert a codition used in loop unrolling at compile-time.
+      RTC_COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
+
+      for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
+        int16x4_t int_16x4_x = vld1_s16(&x[n]);
+        int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
+        int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
+        int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
+        int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
+      }
+
+      // Use vector store to avoid long stall from data trasferring
+      // from vector to general register.
+      vst1q_s32(vbuff, int_32x4_sum);
+      csum32 = vbuff[0] + vbuff[1];
+      csum32 += vbuff[2];
+      csum32 += vbuff[3];
+    }
+#else
+    csum32 = 0;
+    if(scaling == 0) {
+      for (n = 0; n < PITCH_CORR_LEN2; n++) {
+        csum32 += x[n] * inptr[n];
+      }
+    } else {
+      for (n = 0; n < PITCH_CORR_LEN2; n++) {
+        csum32 += (x[n] * inptr[n]) >> scaling;
+      }
+    }
+#endif
+
+    logcorQ8--;
+
+    lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
+
+    if (csum32 > 0) {
+      lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32);  // 2log(csum) in Q8
+      if (lcs > (lys + oneQ8)) {          // csum/sqrt(ysum) > 2
+        *logcorQ8 = lcs - lys;            // log2(csum/sqrt(ysum))
+      } else {
+        *logcorQ8 = oneQ8;                // 1.00
+      }
+    } else {
+      *logcorQ8 = 0;
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
new file mode 100644
index 0000000..4ead84c
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
@@ -0,0 +1,193 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+  int16_t scaling,n,k;
+  int32_t ysum32,csum32, lys, lcs;
+  const int32_t oneQ8 = 1 << 8;  // 1.00 in Q8
+  const int16_t* x;
+  const int16_t* inptr;
+
+  x = in + PITCH_MAX_LAG / 2 + 2;
+  scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+                                       PITCH_CORR_LEN2,
+                                       PITCH_CORR_LEN2);
+  ysum32 = 1;
+  csum32 = 0;
+  x = in + PITCH_MAX_LAG / 2 + 2;
+  {
+    const int16_t* tmp_x = x;
+    const int16_t* tmp_in = in;
+    int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+    n = PITCH_CORR_LEN2;
+    RTC_COMPILE_ASSERT(PITCH_CORR_LEN2 % 4 == 0);
+    __asm __volatile (
+      ".set       push                                          \n\t"
+      ".set       noreorder                                     \n\t"
+     "1:                                                        \n\t"
+      "lh         %[tmp1],       0(%[tmp_in])                   \n\t"
+      "lh         %[tmp2],       2(%[tmp_in])                   \n\t"
+      "lh         %[tmp3],       4(%[tmp_in])                   \n\t"
+      "lh         %[tmp4],       6(%[tmp_in])                   \n\t"
+      "lh         %[tmp5],       0(%[tmp_x])                    \n\t"
+      "lh         %[tmp6],       2(%[tmp_x])                    \n\t"
+      "lh         %[tmp7],       4(%[tmp_x])                    \n\t"
+      "lh         %[tmp8],       6(%[tmp_x])                    \n\t"
+      "mul        %[tmp5],       %[tmp1],        %[tmp5]        \n\t"
+      "mul        %[tmp1],       %[tmp1],        %[tmp1]        \n\t"
+      "mul        %[tmp6],       %[tmp2],        %[tmp6]        \n\t"
+      "mul        %[tmp2],       %[tmp2],        %[tmp2]        \n\t"
+      "mul        %[tmp7],       %[tmp3],        %[tmp7]        \n\t"
+      "mul        %[tmp3],       %[tmp3],        %[tmp3]        \n\t"
+      "mul        %[tmp8],       %[tmp4],        %[tmp8]        \n\t"
+      "mul        %[tmp4],       %[tmp4],        %[tmp4]        \n\t"
+      "addiu      %[n],          %[n],           -4             \n\t"
+      "srav       %[tmp5],       %[tmp5],        %[scaling]     \n\t"
+      "srav       %[tmp1],       %[tmp1],        %[scaling]     \n\t"
+      "srav       %[tmp6],       %[tmp6],        %[scaling]     \n\t"
+      "srav       %[tmp2],       %[tmp2],        %[scaling]     \n\t"
+      "srav       %[tmp7],       %[tmp7],        %[scaling]     \n\t"
+      "srav       %[tmp3],       %[tmp3],        %[scaling]     \n\t"
+      "srav       %[tmp8],       %[tmp8],        %[scaling]     \n\t"
+      "srav       %[tmp4],       %[tmp4],        %[scaling]     \n\t"
+      "addu       %[ysum32],     %[ysum32],      %[tmp1]        \n\t"
+      "addu       %[csum32],     %[csum32],      %[tmp5]        \n\t"
+      "addu       %[ysum32],     %[ysum32],      %[tmp2]        \n\t"
+      "addu       %[csum32],     %[csum32],      %[tmp6]        \n\t"
+      "addu       %[ysum32],     %[ysum32],      %[tmp3]        \n\t"
+      "addu       %[csum32],     %[csum32],      %[tmp7]        \n\t"
+      "addu       %[ysum32],     %[ysum32],      %[tmp4]        \n\t"
+      "addu       %[csum32],     %[csum32],      %[tmp8]        \n\t"
+      "addiu      %[tmp_in],     %[tmp_in],      8              \n\t"
+      "bgtz       %[n],          1b                             \n\t"
+      " addiu     %[tmp_x],      %[tmp_x],       8              \n\t"
+      ".set       pop                                           \n\t"
+      : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+        [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+        [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [tmp_in] "+r" (tmp_in),
+        [ysum32] "+r" (ysum32), [tmp_x] "+r" (tmp_x), [csum32] "+r" (csum32),
+        [n] "+r" (n)
+      : [scaling] "r" (scaling)
+      : "memory", "hi", "lo"
+    );
+  }
+  logcorQ8 += PITCH_LAG_SPAN2 - 1;
+  lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
+  if (csum32 > 0) {
+    lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32);  // 2log(csum) in Q8
+    if (lcs > (lys + oneQ8)) {  // csum/sqrt(ysum) > 2 in Q8
+      *logcorQ8 = lcs - lys;  // log2(csum/sqrt(ysum))
+    } else {
+      *logcorQ8 = oneQ8;  // 1.00
+    }
+  } else {
+    *logcorQ8 = 0;
+  }
+
+  for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+    inptr = &in[k];
+    const int16_t* tmp_in1 = &in[k - 1];
+    const int16_t* tmp_in2 = &in[PITCH_CORR_LEN2 + k - 1];
+    const int16_t* tmp_x = x;
+    int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+    n = PITCH_CORR_LEN2;
+    csum32 = 0;
+    __asm __volatile (
+      ".set       push                                             \n\t"
+      ".set       noreorder                                        \n\t"
+      "lh         %[tmp1],        0(%[tmp_in1])                    \n\t"
+      "lh         %[tmp2],        0(%[tmp_in2])                    \n\t"
+      "mul        %[tmp1],        %[tmp1],         %[tmp1]         \n\t"
+      "mul        %[tmp2],        %[tmp2],         %[tmp2]         \n\t"
+      "srav       %[tmp1],        %[tmp1],         %[scaling]      \n\t"
+      "srav       %[tmp2],        %[tmp2],         %[scaling]      \n\t"
+      "subu       %[ysum32],      %[ysum32],       %[tmp1]         \n\t"
+      "bnez       %[scaling],     2f                               \n\t"
+      " addu      %[ysum32],      %[ysum32],       %[tmp2]         \n\t"
+     "1:                                                           \n\t"
+      "lh         %[tmp1],        0(%[inptr])                      \n\t"
+      "lh         %[tmp2],        0(%[tmp_x])                      \n\t"
+      "lh         %[tmp3],        2(%[inptr])                      \n\t"
+      "lh         %[tmp4],        2(%[tmp_x])                      \n\t"
+      "lh         %[tmp5],        4(%[inptr])                      \n\t"
+      "lh         %[tmp6],        4(%[tmp_x])                      \n\t"
+      "lh         %[tmp7],        6(%[inptr])                      \n\t"
+      "lh         %[tmp8],        6(%[tmp_x])                      \n\t"
+      "mul        %[tmp1],        %[tmp1],         %[tmp2]         \n\t"
+      "mul        %[tmp2],        %[tmp3],         %[tmp4]         \n\t"
+      "mul        %[tmp3],        %[tmp5],         %[tmp6]         \n\t"
+      "mul        %[tmp4],        %[tmp7],         %[tmp8]         \n\t"
+      "addiu      %[n],           %[n],            -4              \n\t"
+      "addiu      %[inptr],       %[inptr],        8               \n\t"
+      "addiu      %[tmp_x],       %[tmp_x],        8               \n\t"
+      "addu       %[csum32],      %[csum32],       %[tmp1]         \n\t"
+      "addu       %[csum32],      %[csum32],       %[tmp2]         \n\t"
+      "addu       %[csum32],      %[csum32],       %[tmp3]         \n\t"
+      "bgtz       %[n],           1b                               \n\t"
+      " addu      %[csum32],      %[csum32],       %[tmp4]         \n\t"
+      "b          3f                                               \n\t"
+      " nop                                                        \n\t"
+     "2:                                                           \n\t"
+      "lh         %[tmp1],        0(%[inptr])                      \n\t"
+      "lh         %[tmp2],        0(%[tmp_x])                      \n\t"
+      "lh         %[tmp3],        2(%[inptr])                      \n\t"
+      "lh         %[tmp4],        2(%[tmp_x])                      \n\t"
+      "lh         %[tmp5],        4(%[inptr])                      \n\t"
+      "lh         %[tmp6],        4(%[tmp_x])                      \n\t"
+      "lh         %[tmp7],        6(%[inptr])                      \n\t"
+      "lh         %[tmp8],        6(%[tmp_x])                      \n\t"
+      "mul        %[tmp1],        %[tmp1],         %[tmp2]         \n\t"
+      "mul        %[tmp2],        %[tmp3],         %[tmp4]         \n\t"
+      "mul        %[tmp3],        %[tmp5],         %[tmp6]         \n\t"
+      "mul        %[tmp4],        %[tmp7],         %[tmp8]         \n\t"
+      "addiu      %[n],           %[n],            -4              \n\t"
+      "addiu      %[inptr],       %[inptr],        8               \n\t"
+      "addiu      %[tmp_x],       %[tmp_x],        8               \n\t"
+      "srav       %[tmp1],        %[tmp1],         %[scaling]      \n\t"
+      "srav       %[tmp2],        %[tmp2],         %[scaling]      \n\t"
+      "srav       %[tmp3],        %[tmp3],         %[scaling]      \n\t"
+      "srav       %[tmp4],        %[tmp4],         %[scaling]      \n\t"
+      "addu       %[csum32],      %[csum32],       %[tmp1]         \n\t"
+      "addu       %[csum32],      %[csum32],       %[tmp2]         \n\t"
+      "addu       %[csum32],      %[csum32],       %[tmp3]         \n\t"
+      "bgtz       %[n],           2b                               \n\t"
+      " addu      %[csum32],      %[csum32],       %[tmp4]         \n\t"
+     "3:                                                           \n\t"
+      ".set       pop                                              \n\t"
+      : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+        [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+        [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [inptr] "+r" (inptr),
+        [csum32] "+r" (csum32), [tmp_x] "+r" (tmp_x), [ysum32] "+r" (ysum32),
+        [n] "+r" (n)
+      : [tmp_in1] "r" (tmp_in1), [tmp_in2] "r" (tmp_in2),
+        [scaling] "r" (scaling)
+      : "memory", "hi", "lo"
+    );
+
+    logcorQ8--;
+    lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32) >> 1; // Q8, sqrt(ysum)
+    if (csum32 > 0) {
+      lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+      if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+        *logcorQ8 = lcs - lys;  // log2(csum/sqrt(ysum))
+      } else {
+        *logcorQ8 = oneQ8;  // 1.00
+      }
+    } else {
+      *logcorQ8 = 0;
+    }
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c b/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
new file mode 100644
index 0000000..e565e85
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
@@ -0,0 +1,248 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/structs.h"
+#include "rtc_base/compile_assert_c.h"
+
+// Number of segments in a pitch subframe.
+static const int kSegments = 5;
+
+// A division factor of 1/5 in Q15.
+static const int16_t kDivFactor = 6553;
+
+// Interpolation coefficients; generated by design_pitch_filter.m.
+// Coefficients are stored in Q14.
+static const int16_t kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = {
+  {-367, 1090, -2706,  9945, 10596, -3318,  1626, -781,  287},
+  {-325,  953, -2292,  7301, 12963, -3320,  1570, -743,  271},
+  {-240,  693, -1622,  4634, 14809, -2782,  1262, -587,  212},
+  {-125,  358,  -817,  2144, 15982, -1668,   721, -329,  118},
+  {   0,    0,    -1,     1, 16380,     1,    -1,    0,    0},
+  { 118, -329,   721, -1668, 15982,  2144,  -817,  358, -125},
+  { 212, -587,  1262, -2782, 14809,  4634, -1622,  693, -240},
+  { 271, -743,  1570, -3320, 12963,  7301, -2292,  953, -325}
+};
+
+static __inline size_t CalcLrIntQ(int16_t fixVal,
+                                  int16_t qDomain) {
+  int32_t roundVal = 1 << (qDomain - 1);
+
+  return (fixVal + roundVal) >> qDomain;
+}
+
+void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
+                                                       // Q0 if type is 2.
+                               int16_t* outdatQQ,
+                               PitchFiltstr* pfp,
+                               int16_t* lagsQ7,
+                               int16_t* gainsQ12,
+                               int16_t type) {
+  int    k, ind, cnt;
+  int16_t sign = 1;
+  int16_t inystateQQ[PITCH_DAMPORDER];
+  int16_t ubufQQ[PITCH_INTBUFFSIZE + QLOOKAHEAD];
+  const int16_t Gain = 21299;     // 1.3 in Q14
+  int16_t oldLagQ7;
+  int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
+  size_t indW32 = 0, frcQQ = 0;
+  const int16_t* fracoeffQQ = NULL;
+
+  // Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
+  RTC_COMPILE_ASSERT(PITCH_FRACORDER == 9);
+  RTC_COMPILE_ASSERT(PITCH_DAMPORDER == 5);
+
+  // Set up buffer and states.
+  memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
+  memcpy(inystateQQ, pfp->ystateQQ, sizeof(inystateQQ));
+
+  // Get old lag and gain value from memory.
+  oldLagQ7 = pfp->oldlagQ7;
+  oldGainQ12 = pfp->oldgainQ12;
+
+  if (type == 4) {
+    sign = -1;
+
+    // Make output more periodic.
+    for (k = 0; k < PITCH_SUBFRAMES; k++) {
+      gainsQ12[k] = (int16_t)(gainsQ12[k] * Gain >> 14);
+    }
+  }
+
+  // No interpolation if pitch lag step is big.
+  if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
+    oldLagQ7 = lagsQ7[0];
+    oldGainQ12 = gainsQ12[0];
+  }
+
+  ind = 0;
+
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    // Calculate interpolation steps.
+    lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
+    lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                  lagdeltaQ7, kDivFactor, 15);
+    curLagQ7 = oldLagQ7;
+    gaindeltaQ12 = gainsQ12[k] - oldGainQ12;
+    gaindeltaQ12 = (int16_t)(gaindeltaQ12 * kDivFactor >> 15);
+
+    curGainQ12 = oldGainQ12;
+    oldLagQ7 = lagsQ7[k];
+    oldGainQ12 = gainsQ12[k];
+
+    // Each frame has 4 60-sample pitch subframes, and each subframe has 5
+    // 12-sample segments. Each segment need to be processed with
+    // newly-updated parameters, so we break the pitch filtering into
+    // two for-loops (5 x 12) below. It's also why kDivFactor = 0.2 (in Q15).
+    for (cnt = 0; cnt < kSegments; cnt++) {
+      // Update parameters for each segment.
+      curGainQ12 += gaindeltaQ12;
+      curLagQ7 += lagdeltaQ7;
+      indW32 = CalcLrIntQ(curLagQ7, 7);
+      if (indW32 < PITCH_FRACORDER - 2) {
+        // WebRtcIsacfix_PitchFilterCore requires indW32 >= PITCH_FRACORDER -
+        // 2; otherwise, it will read from entries of ubufQQ that haven't been
+        // written yet. (This problem has only been seen in fuzzer tests, not
+        // in real life.) See Chromium bug 581901.
+        indW32 = PITCH_FRACORDER - 2;
+      }
+      frcQQ = ((indW32 << 7) + 64 - curLagQ7) >> 4;
+
+      if (frcQQ >= PITCH_FRACS) {
+        frcQQ = 0;
+      }
+      fracoeffQQ = kIntrpCoef[frcQQ];
+
+      // Pitch filtering.
+      WebRtcIsacfix_PitchFilterCore(PITCH_SUBFRAME_LEN / kSegments, curGainQ12,
+        indW32, sign, inystateQQ, ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
+    }
+  }
+
+  // Export buffer and states.
+  memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
+  memcpy(pfp->ystateQQ, inystateQQ, sizeof(pfp->ystateQQ));
+
+  pfp->oldlagQ7 = oldLagQ7;
+  pfp->oldgainQ12 = oldGainQ12;
+
+  if (type == 2) {
+    // Filter look-ahead segment.
+    WebRtcIsacfix_PitchFilterCore(QLOOKAHEAD, curGainQ12, indW32, 1, inystateQQ,
+                ubufQQ, fracoeffQQ, indatQQ, outdatQQ, &ind);
+  }
+}
+
+
+void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
+                                    PitchFiltstr* pfp,
+                                    int16_t* lagsQ7,
+                                    int16_t* gainsQ12) {
+  int  k, n, m;
+  size_t ind, pos, pos3QQ;
+
+  int16_t ubufQQ[PITCH_INTBUFFSIZE];
+  int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
+  const int16_t* fracoeffQQ = NULL;
+  int16_t scale;
+  int16_t cnt = 0, tmpW16;
+  size_t frcQQ, indW16 = 0;
+  int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;
+
+  // Set up buffer and states.
+  memcpy(ubufQQ, pfp->ubufQQ, sizeof(pfp->ubufQQ));
+  oldLagQ7 = pfp->oldlagQ7;
+
+  // No interpolation if pitch lag step is big.
+  if (((lagsQ7[0] * 3 >> 1) < oldLagQ7) || (lagsQ7[0] > (oldLagQ7 * 3 >> 1))) {
+    oldLagQ7 = lagsQ7[0];
+  }
+
+  ind = 0;
+  pos = ind + PITCH_BUFFSIZE;
+  scale = 0;
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+
+    // Calculate interpolation steps.
+    lagdeltaQ7 = lagsQ7[k] - oldLagQ7;
+    lagdeltaQ7 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   lagdeltaQ7, kDivFactor, 15);
+    curLagQ7 = oldLagQ7;
+    oldLagQ7 = lagsQ7[k];
+
+    csum1QQ = 1;
+    esumxQQ = 1;
+
+    // Same as function WebRtcIsacfix_PitchFilter(), we break the pitch
+    // filtering into two for-loops (5 x 12) below.
+    for (cnt = 0; cnt < kSegments; cnt++) {
+      // Update parameters for each segment.
+      curLagQ7 += lagdeltaQ7;
+      indW16 = CalcLrIntQ(curLagQ7, 7);
+      frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
+
+      if (frcQQ >= PITCH_FRACS) {
+        frcQQ = 0;
+      }
+      fracoeffQQ = kIntrpCoef[frcQQ];
+
+      pos3QQ = pos - (indW16 + 4);
+
+      for (n = 0; n < PITCH_SUBFRAME_LEN / kSegments; n++) {
+        // Filter to get fractional pitch.
+
+        tmpW32 = 0;
+        for (m = 0; m < PITCH_FRACORDER; m++) {
+          tmpW32 += ubufQQ[pos3QQ + m] * fracoeffQQ[m];
+        }
+
+        // Subtract from input and update buffer.
+        ubufQQ[pos] = indatQ0[ind];
+
+        tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
+        tmpW32 += 8192;
+        tmpW16 = tmpW32 >> 14;
+        tmpW32 = tmpW16 * tmpW16;
+
+        if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
+            (tmpW32 > 1073700000) || (esumxQQ > 1073700000)) {  // 2^30
+          scale++;
+          csum1QQ >>= 1;
+          esumxQQ >>= 1;
+        }
+        csum1QQ += tmp2W32 >> scale;
+        esumxQQ += tmpW32 >> scale;
+
+        ind++;
+        pos++;
+        pos3QQ++;
+      }
+    }
+
+    if (csum1QQ < esumxQQ) {
+      tmp2W32 = WebRtcSpl_DivResultInQ31(csum1QQ, esumxQQ);
+
+      // Gain should be half the correlation.
+      tmpW32 = tmp2W32 >> 20;
+    } else {
+      tmpW32 = 4096;
+    }
+    gainsQ12[k] = (int16_t)WEBRTC_SPL_SAT(PITCH_MAX_GAIN_Q12, tmpW32, 0);
+  }
+
+  // Export buffer and states.
+  memcpy(pfp->ubufQQ, ubufQQ + PITCH_FRAME_LEN, sizeof(pfp->ubufQQ));
+  pfp->oldlagQ7 = lagsQ7[PITCH_SUBFRAMES - 1];
+  pfp->oldgainQ12 = gainsQ12[PITCH_SUBFRAMES - 1];
+
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S b/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
new file mode 100644
index 0000000..d5b5541
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
@@ -0,0 +1,143 @@
+@
+@ Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+@
+@ Use of this source code is governed by a BSD-style license
+@ that can be found in the LICENSE file in the root of the source
+@ tree. An additional intellectual property rights grant can be found
+@ in the file PATENTS.  All contributing project authors may
+@ be found in the AUTHORS file in the root of the source tree.
+@
+
+@ Contains the core loop routine for the pitch filter function in iSAC,
+@ optimized for ARMv7 platforms.
+@
+@ Output is bit-exact with the reference C code in pitch_filter.c.
+
+#include "system_wrappers/include/asm_defines.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+GLOBAL_FUNCTION WebRtcIsacfix_PitchFilterCore
+.align  2
+
+@ void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+@                                    int16_t gain,
+@                                    size_t index,
+@                                    int16_t sign,
+@                                    int16_t* inputState,
+@                                    int16_t* outputBuf2,
+@                                    const int16_t* coefficient,
+@                                    int16_t* inputBuf,
+@                                    int16_t* outputBuf,
+@                                    int* index2) {
+DEFINE_FUNCTION WebRtcIsacfix_PitchFilterCore
+  push {r4-r11}
+  sub sp, #8
+
+  str r0, [sp]                @ loopNumber
+  str r3, [sp, #4]            @ sign
+  ldr r3, [sp, #44]           @ outputBuf2
+  ldr r6, [sp, #60]           @ index2
+  ldr r7, [r6]                @ *index2
+  ldr r8, [sp, #52]           @ inputBuf
+  ldr r12, [sp, #56]          @ outputBuf
+
+  add r4, r7, r0
+  str r4, [r6]                @ Store return value to index2.
+
+  mov r10, r7, asl #1
+  add r12, r10                @ &outputBuf[*index2]
+  add r8, r10                 @ &inputBuf[*index2]
+
+  add r4, r7, #PITCH_BUFFSIZE @ *index2 + PITCH_BUFFSIZE
+  add r6, r3, r4, lsl #1      @ &outputBuf2[*index2 + PITCH_BUFFSIZE]
+  sub r4, r2                  @ r2: index
+  sub r4, #2                  @ *index2 + PITCH_BUFFSIZE - index - 2
+  add r3, r4, lsl #1          @ &ubufQQpos2[*index2]
+  ldr r9, [sp, #48]           @ coefficient
+
+LOOP:
+@ Usage of registers in the loop:
+@  r0: loop counter
+@  r1: gain
+@  r2: tmpW32
+@  r3: &ubufQQpos2[]
+@  r6: &outputBuf2[]
+@  r8: &inputBuf[]
+@  r9: &coefficient[]
+@  r12: &outputBuf[]
+@  r4, r5, r7, r10, r11: scratch
+
+  @ Filter to get fractional pitch.
+  @ The pitch filter loop here is unrolled with 9 multipications.
+  pld [r3]
+  ldr r10, [r3], #4           @ ubufQQpos2[*index2 + 0, *index2 + 1]
+  ldr r4, [r9], #4            @ coefficient[0, 1]
+  ldr r11, [r3], #4
+  ldr r5, [r9], #4
+  smuad r2, r10, r4
+  smlad r2, r11, r5, r2
+
+  ldr r10, [r3], #4
+  ldr r4, [r9], #4
+  ldr r11, [r3], #4
+  ldr r5, [r9], #4
+  smlad r2, r10, r4, r2
+  ldrh r10, [r3], #-14        @ r3 back to &ubufQQpos2[*index2].
+  ldrh  r4, [r9], #-16        @ r9 back to &coefficient[0].
+  smlad r2, r11, r5, r2
+  smlabb r2, r10, r4, r2
+
+  @ Saturate to avoid overflow in tmpW16.
+  asr r2, #1
+  add r4, r2, #0x1000
+  ssat r7, #16, r4, asr #13
+
+  @ Shift low pass filter state, and excute the low pass filter.
+  @ The memmove() and the low pass filter loop are unrolled and mixed.
+  smulbb r5, r1, r7
+  add r7, r5, #0x800
+  asr r7, #12                 @ Get the value for inputState[0].
+  ldr r11, [sp, #40]          @ inputState
+  pld [r11]
+  adr r10, kDampFilter
+  ldrsh r4, [r10], #2         @ kDampFilter[0]
+  mul r2, r7, r4
+  ldr r4, [r11]               @ inputState[0, 1], before shift.
+  strh r7, [r11]              @ inputState[0], after shift.
+  ldr r5, [r11, #4]           @ inputState[2, 3], before shift.
+  ldr r7, [r10], #4           @ kDampFilter[1, 2]
+  ldr r10, [r10]              @ kDampFilter[3, 4]
+  str r4, [r11, #2]           @ inputState[1, 2], after shift.
+  str r5, [r11, #6]           @ inputState[3, 4], after shift.
+  smlad r2, r4, r7, r2
+  smlad r2, r5, r10, r2
+
+  @ Saturate to avoid overflow.
+  @ First shift the sample to the range of [0xC0000000, 0x3FFFFFFF],
+  @ to avoid overflow in the next saturation step.
+  asr r2, #1
+  add r10, r2, #0x2000
+  ssat r10, #16, r10, asr #14
+
+  @ Subtract from input and update buffer.
+  ldr r11, [sp, #4]           @ sign
+  ldrsh r4, [r8]
+  ldrsh r7, [r8], #2          @ inputBuf[*index2]
+  smulbb r5, r11, r10
+  subs r0, #1
+  sub r4, r5
+  ssat r2, #16, r4
+  strh  r2, [r12], #2         @ outputBuf[*index2]
+
+  add r2, r7
+  ssat r2, #16, r2
+  strh  r2, [r6], #2          @ outputBuff2[*index2 + PITCH_BUFFSIZE]
+  bgt LOOP
+
+  add sp, #8
+  pop {r4-r11}
+  bx  lr
+
+.align  2
+kDampFilter:
+  .short  -2294, 8192, 20972, 8192, -2294
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c b/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
new file mode 100644
index 0000000..f23d19d
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+/* Filter coefficicients in Q15. */
+static const int16_t kDampFilter[PITCH_DAMPORDER] = {
+  -2294, 8192, 20972, 8192, -2294
+};
+
+void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+                                   int16_t gain,
+                                   size_t index,
+                                   int16_t sign,
+                                   int16_t* inputState,
+                                   int16_t* outputBuf2,
+                                   const int16_t* coefficient,
+                                   int16_t* inputBuf,
+                                   int16_t* outputBuf,
+                                   int* index2) {
+  int i = 0, j = 0;  /* Loop counters. */
+  int16_t* ubufQQpos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)];
+  int16_t tmpW16 = 0;
+
+  for (i = 0; i < loopNumber; i++) {
+    int32_t tmpW32 = 0;
+
+    /* Filter to get fractional pitch. */
+    for (j = 0; j < PITCH_FRACORDER; j++) {
+      tmpW32 += ubufQQpos2[*index2 + j] * coefficient[j];
+    }
+
+    /* Saturate to avoid overflow in tmpW16. */
+    tmpW32 = WEBRTC_SPL_SAT(536862719, tmpW32, -536879104);
+    tmpW32 += 8192;
+    tmpW16 = (int16_t)(tmpW32 >> 14);
+
+    /* Shift low pass filter state. */
+    memmove(&inputState[1], &inputState[0],
+            (PITCH_DAMPORDER - 1) * sizeof(int16_t));
+    inputState[0] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                      gain, tmpW16, 12);
+
+    /* Low pass filter. */
+    tmpW32 = 0;
+    /* TODO(kma): Define a static inline function WebRtcSpl_DotProduct()
+       in spl_inl.h to replace this and other similar loops. */
+    for (j = 0; j < PITCH_DAMPORDER; j++) {
+      tmpW32 += inputState[j] * kDampFilter[j];
+    }
+
+    /* Saturate to avoid overflow in tmpW16. */
+    tmpW32 = WEBRTC_SPL_SAT(1073725439, tmpW32, -1073758208);
+    tmpW32 += 16384;
+    tmpW16 = (int16_t)(tmpW32 >> 15);
+
+    /* Subtract from input and update buffer. */
+    tmpW32 = inputBuf[*index2] - sign * tmpW16;
+    outputBuf[*index2] = WebRtcSpl_SatW32ToW16(tmpW32);
+    tmpW32 = inputBuf[*index2] + outputBuf[*index2];
+    outputBuf2[*index2 + PITCH_BUFFSIZE] = WebRtcSpl_SatW32ToW16(tmpW32);
+
+    (*index2)++;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c b/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
new file mode 100644
index 0000000..785fd94
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
@@ -0,0 +1,133 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+void WebRtcIsacfix_PitchFilterCore(int loopNumber,
+                                   int16_t gain,
+                                   size_t index,
+                                   int16_t sign,
+                                   int16_t* inputState,
+                                   int16_t* outputBuf2,
+                                   const int16_t* coefficient,
+                                   int16_t* inputBuf,
+                                   int16_t* outputBuf,
+                                   int* index2) {
+  int ind2t = *index2;
+  int i = 0;
+  int16_t* out2_pos2 = &outputBuf2[PITCH_BUFFSIZE - (index + 2)] + ind2t;
+  int32_t w1, w2, w3, w4, w5, gain32, sign32;
+  int32_t coef1, coef2, coef3, coef4, coef5 = 0;
+  // Define damp factors as int32_t (pair of int16_t)
+  int32_t kDampF0 = 0x0000F70A;
+  int32_t kDampF1 = 0x51EC2000;
+  int32_t kDampF2 = 0xF70A2000;
+  int16_t* input1 = inputBuf + ind2t;
+  int16_t* output1 = outputBuf + ind2t;
+  int16_t* output2 = outputBuf2 + ind2t + PITCH_BUFFSIZE;
+
+  // Load coefficients outside the loop and sign-extend gain and sign
+  __asm __volatile (
+    ".set     push                                        \n\t"
+    ".set     noreorder                                   \n\t"
+    "lwl      %[coef1],       3(%[coefficient])           \n\t"
+    "lwl      %[coef2],       7(%[coefficient])           \n\t"
+    "lwl      %[coef3],       11(%[coefficient])          \n\t"
+    "lwl      %[coef4],       15(%[coefficient])          \n\t"
+    "lwr      %[coef1],       0(%[coefficient])           \n\t"
+    "lwr      %[coef2],       4(%[coefficient])           \n\t"
+    "lwr      %[coef3],       8(%[coefficient])           \n\t"
+    "lwr      %[coef4],       12(%[coefficient])          \n\t"
+    "lhu      %[coef5],       16(%[coefficient])          \n\t"
+    "seh      %[gain32],      %[gain]                     \n\t"
+    "seh      %[sign32],      %[sign]                     \n\t"
+    ".set     pop                                         \n\t"
+    : [coef1] "=&r" (coef1), [coef2] "=&r" (coef2), [coef3] "=&r" (coef3),
+      [coef4] "=&r" (coef4), [coef5] "=&r" (coef5), [gain32] "=&r" (gain32),
+      [sign32] "=&r" (sign32)
+    : [coefficient] "r" (coefficient), [gain] "r" (gain),
+      [sign] "r" (sign)
+    : "memory"
+  );
+
+  for (i = 0; i < loopNumber; i++) {
+    __asm __volatile (
+      ".set       push                                            \n\t"
+      ".set       noreorder                                       \n\t"
+      // Filter to get fractional pitch
+      "li         %[w1],          8192                            \n\t"
+      "mtlo       %[w1]                                           \n\t"
+      "mthi       $0                                              \n\t"
+      "lwl        %[w1],          3(%[out2_pos2])                 \n\t"
+      "lwl        %[w2],          7(%[out2_pos2])                 \n\t"
+      "lwl        %[w3],          11(%[out2_pos2])                \n\t"
+      "lwl        %[w4],          15(%[out2_pos2])                \n\t"
+      "lwr        %[w1],          0(%[out2_pos2])                 \n\t"
+      "lwr        %[w2],          4(%[out2_pos2])                 \n\t"
+      "lwr        %[w3],          8(%[out2_pos2])                 \n\t"
+      "lwr        %[w4],          12(%[out2_pos2])                \n\t"
+      "lhu        %[w5],          16(%[out2_pos2])                \n\t"
+      "dpa.w.ph   $ac0,           %[w1],              %[coef1]    \n\t"
+      "dpa.w.ph   $ac0,           %[w2],              %[coef2]    \n\t"
+      "dpa.w.ph   $ac0,           %[w3],              %[coef3]    \n\t"
+      "dpa.w.ph   $ac0,           %[w4],              %[coef4]    \n\t"
+      "dpa.w.ph   $ac0,           %[w5],              %[coef5]    \n\t"
+      "addiu      %[out2_pos2],   %[out2_pos2],       2           \n\t"
+      "mthi       $0,             $ac1                            \n\t"
+      "lwl        %[w2],          3(%[inputState])                \n\t"
+      "lwl        %[w3],          7(%[inputState])                \n\t"
+      // Fractional pitch shift & saturation
+      "extr_s.h   %[w1],          $ac0,               14          \n\t"
+      "li         %[w4],          16384                           \n\t"
+      "lwr        %[w2],          0(%[inputState])                \n\t"
+      "lwr        %[w3],          4(%[inputState])                \n\t"
+      "mtlo       %[w4],          $ac1                            \n\t"
+      // Shift low pass filter state
+      "swl        %[w2],          5(%[inputState])                \n\t"
+      "swl        %[w3],          9(%[inputState])                \n\t"
+      "mul        %[w1],          %[gain32],          %[w1]       \n\t"
+      "swr        %[w2],          2(%[inputState])                \n\t"
+      "swr        %[w3],          6(%[inputState])                \n\t"
+      // Low pass filter accumulation
+      "dpa.w.ph   $ac1,           %[kDampF1],         %[w2]       \n\t"
+      "dpa.w.ph   $ac1,           %[kDampF2],         %[w3]       \n\t"
+      "lh         %[w4],          0(%[input1])                    \n\t"
+      "addiu      %[input1],      %[input1],          2           \n\t"
+      "shra_r.w   %[w1],          %[w1],              12          \n\t"
+      "sh         %[w1],          0(%[inputState])                \n\t"
+      "dpa.w.ph   $ac1,           %[kDampF0],         %[w1]       \n\t"
+      // Low pass filter shift & saturation
+      "extr_s.h   %[w2],          $ac1,               15          \n\t"
+      "mul        %[w2],          %[w2],              %[sign32]   \n\t"
+      // Buffer update
+      "subu       %[w2],          %[w4],              %[w2]       \n\t"
+      "shll_s.w   %[w2],          %[w2],              16          \n\t"
+      "sra        %[w2],          %[w2],              16          \n\t"
+      "sh         %[w2],          0(%[output1])                   \n\t"
+      "addu       %[w2],          %[w2],              %[w4]       \n\t"
+      "shll_s.w   %[w2],          %[w2],              16          \n\t"
+      "addiu      %[output1],     %[output1],         2           \n\t"
+      "sra        %[w2],          %[w2],              16          \n\t"
+      "sh         %[w2],          0(%[output2])                   \n\t"
+      "addiu      %[output2],     %[output2],         2           \n\t"
+      ".set       pop                                             \n\t"
+      : [w1] "=&r" (w1), [w2] "=&r" (w2), [w3] "=&r" (w3), [w4] "=&r" (w4),
+        [w5] "=&r" (w5), [input1] "+r" (input1), [out2_pos2] "+r" (out2_pos2),
+        [output1] "+r" (output1), [output2] "+r" (output2)
+      : [coefficient] "r" (coefficient), [inputState] "r" (inputState),
+        [gain32] "r" (gain32), [sign32] "r" (sign32), [kDampF0] "r" (kDampF0),
+        [kDampF1] "r" (kDampF1), [kDampF2] "r" (kDampF2),
+        [coef1] "r" (coef1), [coef2] "r" (coef2), [coef3] "r" (coef3),
+        [coef4] "r" (coef4), [coef5] "r" (coef5)
+      : "hi", "lo", "$ac1hi", "$ac1lo", "memory"
+    );
+  }
+  (*index2) += loopNumber;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c b/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c
new file mode 100644
index 0000000..bfbab19
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.c
@@ -0,0 +1,149 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_gain_tables.c
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h"
+
+
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+
+/* cdf for quantized pitch filter gains */
+const uint16_t WebRtcIsacfix_kPitchGainCdf[255] = {
+  0,  2,  4,  6,  64,  901,  903,  905,  16954,  16956,
+  16961,  17360,  17362,  17364,  17366,  17368,  17370,  17372,  17374,  17411,
+  17514,  17516,  17583,  18790,  18796,  18802,  20760,  20777,  20782,  21722,
+  21724,  21728,  21738,  21740,  21742,  21744,  21746,  21748,  22224,  22227,
+  22230,  23214,  23229,  23239,  25086,  25108,  25120,  26088,  26094,  26098,
+  26175,  26177,  26179,  26181,  26183,  26185,  26484,  26507,  26522,  27705,
+  27731,  27750,  29767,  29799,  29817,  30866,  30883,  30885,  31025,  31029,
+  31031,  31033,  31035,  31037,  31114,  31126,  31134,  32687,  32722,  32767,
+  35718,  35742,  35757,  36943,  36952,  36954,  37115,  37128,  37130,  37132,
+  37134,  37136,  37143,  37145,  37152,  38843,  38863,  38897,  47458,  47467,
+  47474,  49040,  49061,  49063,  49145,  49157,  49159,  49161,  49163,  49165,
+  49167,  49169,  49171,  49757,  49770,  49782,  61333,  61344,  61346,  62860,
+  62883,  62885,  62887,  62889,  62891,  62893,  62895,  62897,  62899,  62901,
+  62903,  62905,  62907,  62909,  65496,  65498,  65500,  65521,  65523,  65525,
+  65527,  65529,  65531,  65533,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerlimiGain[3] = {
+  -7, -2, -1
+};
+
+const int16_t WebRtcIsacfix_kUpperlimitGain[3] = {
+  0,  3,  1
+};
+
+const uint16_t WebRtcIsacfix_kMultsGain[2] = {
+  18,  3
+};
+
+/* size of cdf table */
+const uint16_t WebRtcIsacfix_kCdfTableSizeGain[1] = {
+  256
+};
+
+/* mean values of pitch filter gains in FIXED point Q12 */
+const int16_t WebRtcIsacfix_kPitchGain1[144] = {
+  843, 1092, 1336, 1222, 1405, 1656, 1500, 1815, 1843, 1838, 1839,
+  1843, 1843, 1843, 1843, 1843,   1843, 1843, 814, 846, 1092, 1013,
+  1174, 1383, 1391, 1511, 1584, 1734, 1753, 1843, 1843, 1843,   1843,
+  1843, 1843, 1843, 524, 689, 777, 845, 947, 1069, 1090, 1263,
+  1380, 1447, 1559, 1676,   1645, 1749, 1843, 1843, 1843, 1843, 81,
+  477, 563, 611, 706, 806, 849, 1012, 1192, 1128,   1330, 1489,
+  1425, 1576, 1826, 1741, 1843, 1843, 0,     290, 305, 356, 488,
+  575, 602, 741,    890, 835, 1079, 1196, 1182, 1376, 1519, 1506,
+  1680, 1843, 0,     47,  97,  69,  289, 381,    385, 474, 617,
+  664, 803, 1079, 935, 1160, 1269, 1265, 1506, 1741, 0,      0,
+  0,      0,      112, 120, 190, 283, 442, 343, 526, 809, 684,
+  935, 1134, 1020, 1265, 1506, 0,      0,      0,      0,      0,      0,
+  0,      111,    256, 87,  373, 597, 430, 684, 935, 770, 1020,
+  1265
+};
+
+const int16_t WebRtcIsacfix_kPitchGain2[144] = {
+  1760, 1525, 1285, 1747, 1671, 1393, 1843, 1826, 1555, 1843, 1784,
+  1606, 1843, 1843, 1711, 1843,   1843, 1814, 1389, 1275, 1040, 1564,
+  1414, 1252, 1610, 1495, 1343, 1753, 1592, 1405, 1804, 1720,   1475,
+  1843, 1814, 1581, 1208, 1061, 856, 1349, 1148, 994, 1390, 1253,
+  1111, 1495, 1343, 1178,   1770, 1465, 1234, 1814, 1581, 1342, 1040,
+  793, 713, 1053, 895, 737, 1128, 1003, 861, 1277,   1094, 981,
+  1475, 1192, 1019, 1581, 1342, 1098, 855, 570, 483, 833, 648,
+  540, 948, 744,    572, 1009, 844, 636, 1234, 934, 685, 1342,
+  1217, 984, 537, 318, 124, 603, 423, 350,    687, 479, 322,
+  791, 581, 430, 987, 671, 488, 1098, 849, 597, 283, 27,
+  0,     397,    222, 38,  513, 271, 124, 624, 325, 157, 737,
+  484, 233, 849, 597, 343, 27,  0,      0,   141, 0,     0,
+  256, 69,  0,     370, 87,  0,     484, 229, 0,     597, 343,
+  87
+};
+
+const int16_t WebRtcIsacfix_kPitchGain3[144] = {
+  1843, 1843, 1711, 1843, 1818, 1606, 1843, 1827, 1511, 1814, 1639,
+  1393, 1760, 1525, 1285, 1656,   1419, 1176, 1835, 1718, 1475, 1841,
+  1650, 1387, 1648, 1498, 1287, 1600, 1411, 1176, 1522, 1299,   1040,
+  1419, 1176, 928, 1773, 1461, 1128, 1532, 1355, 1202, 1429, 1260,
+  1115, 1398, 1151, 1025,   1172, 1080, 790, 1176, 928, 677, 1475,
+  1147, 1019, 1276, 1096, 922, 1214, 1010, 901, 1057,   893, 800,
+  1040, 796, 734, 928, 677, 424, 1137, 897, 753, 1120, 830,
+  710, 875, 751,    601, 795, 642, 583, 790, 544, 475, 677,
+  474, 140, 987, 750, 482, 697, 573, 450,    691, 487, 303,
+  661, 394, 332, 537, 303, 220, 424, 168, 0,     737, 484,
+  229, 624,    348, 153, 441, 261, 136, 397, 166, 51,  283,
+  27,  0,     168, 0,     0,     484, 229,    0,   370, 57,  0,
+  256, 43,  0,     141, 0,  0,   27,  0,   0,   0,   0,
+  0
+};
+
+
+const int16_t WebRtcIsacfix_kPitchGain4[144] = {
+  1843, 1843, 1843, 1843, 1841, 1843, 1500, 1821, 1843, 1222, 1434,
+  1656, 843, 1092, 1336, 504,    757, 1007, 1843, 1843, 1843, 1838,
+  1791, 1843, 1265, 1505, 1599, 965, 1219, 1425, 730, 821,    1092,
+  249, 504, 757, 1783, 1819, 1843, 1351, 1567, 1727, 1096, 1268,
+  1409, 805, 961, 1131,   444, 670, 843, 0,  249, 504, 1425,
+  1655, 1743, 1096, 1324, 1448, 822, 1019, 1199, 490,    704, 867,
+  81,  450, 555, 0,     0,  249, 1247, 1428, 1530, 881, 1073,
+  1283, 610, 759,    939, 278, 464, 645, 0,     200, 270, 0,
+  0,   0,  935, 1163, 1410, 528, 790, 1068,   377, 499, 717,
+  173, 240, 274, 0,   43,  62,  0,   0,   0,   684, 935,
+  1182, 343,    551, 735, 161, 262, 423, 0,      55,  27,  0,
+  0,   0,   0,   0,   0,   430, 684,    935, 87,  377, 597,
+  0,   46,  256, 0,   0,   0,   0,   0,   0,   0,   0,
+  0
+};
+
+
+
+/* transform matrix in Q12*/
+const int16_t WebRtcIsacfix_kTransform[4][4] = {
+  { -2048, -2048, -2048, -2048 },
+  {  2748,   916,  -916, -2748 },
+  {  2048, -2048, -2048,  2048 },
+  {   916, -2748,  2748,  -916 }
+};
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h b/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h
new file mode 100644
index 0000000..fe4d288
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_gain_tables.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_gain_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+/* cdf for quantized pitch filter gains */
+extern const uint16_t WebRtcIsacfix_kPitchGainCdf[255];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerlimiGain[3];
+extern const int16_t WebRtcIsacfix_kUpperlimitGain[3];
+extern const uint16_t WebRtcIsacfix_kMultsGain[2];
+
+/* mean values of pitch filter gains in Q12*/
+extern const int16_t WebRtcIsacfix_kPitchGain1[144];
+extern const int16_t WebRtcIsacfix_kPitchGain2[144];
+extern const int16_t WebRtcIsacfix_kPitchGain3[144];
+extern const int16_t WebRtcIsacfix_kPitchGain4[144];
+
+/* size of cdf table */
+extern const uint16_t WebRtcIsacfix_kCdfTableSizeGain[1];
+
+/* transform matrix */
+extern const int16_t WebRtcIsacfix_kTransform[4][4];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_GAIN_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c b/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c
new file mode 100644
index 0000000..894716e
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.c
@@ -0,0 +1,306 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_lag_tables.c
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h"
+
+
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsacfix_kPitchLagCdf1Lo[127] = {
+  0,  134,  336,  549,  778,  998,  1264,  1512,  1777,  2070,
+  2423,  2794,  3051,  3361,  3708,  3979,  4315,  4610,  4933,  5269,
+  5575,  5896,  6155,  6480,  6816,  7129,  7477,  7764,  8061,  8358,
+  8718,  9020,  9390,  9783,  10177,  10543,  10885,  11342,  11795,  12213,
+  12680,  13096,  13524,  13919,  14436,  14903,  15349,  15795,  16267,  16734,
+  17266,  17697,  18130,  18632,  19080,  19447,  19884,  20315,  20735,  21288,
+  21764,  22264,  22723,  23193,  23680,  24111,  24557,  25022,  25537,  26082,
+  26543,  27090,  27620,  28139,  28652,  29149,  29634,  30175,  30692,  31273,
+  31866,  32506,  33059,  33650,  34296,  34955,  35629,  36295,  36967,  37726,
+  38559,  39458,  40364,  41293,  42256,  43215,  44231,  45253,  46274,  47359,
+  48482,  49678,  50810,  51853,  53016,  54148,  55235,  56263,  57282,  58363,
+  59288,  60179,  61076,  61806,  62474,  63129,  63656,  64160,  64533,  64856,
+  65152,  65535,  65535,  65535,  65535,  65535,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf2Lo[20] = {
+  0,  429,  3558,  5861,  8558,  11639,  15210,  19502,  24773,  31983,
+  42602,  48567,  52601,  55676,  58160,  60172,  61889,  63235,  65383,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf3Lo[2] = {
+  0,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf4Lo[10] = {
+  0,  2966,  6368,  11182,  19431,  37793,  48532,  55353,  60626,  65535
+};
+
+const uint16_t *WebRtcIsacfix_kPitchLagPtrLo[4] = {
+  WebRtcIsacfix_kPitchLagCdf1Lo,
+  WebRtcIsacfix_kPitchLagCdf2Lo,
+  WebRtcIsacfix_kPitchLagCdf3Lo,
+  WebRtcIsacfix_kPitchLagCdf4Lo
+};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsacfix_kPitchLagSizeLo[1] = {
+  128
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerLimitLo[4] = {
+  -140, -9,  0, -4
+};
+
+const int16_t WebRtcIsacfix_kUpperLimitLo[4] = {
+  -20,  9,  0,  4
+};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsacfix_kInitIndLo[3] = {
+  10,  1,  5
+};
+
+/* mean values of pitch filter lags in Q10 */
+
+const int16_t WebRtcIsacfix_kMeanLag2Lo[19] = {
+  -17627, -16207, -14409, -12319, -10253, -8200, -6054, -3986, -1948, -19,
+  1937, 3974, 6064, 8155, 10229, 12270, 14296, 16127, 17520
+};
+
+const int16_t WebRtcIsacfix_kMeanLag4Lo[9] = {
+  -7949, -6063, -4036, -1941, 38, 1977, 4060, 6059
+};
+
+
+
+/* tables for use with medium pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsacfix_kPitchLagCdf1Mid[255] = {
+  0,  28,  61,  88,  121,  149,  233,  331,  475,  559,
+  624,  661,  689,  712,  745,  791,  815,  843,  866,  922,
+  959,  1024,  1061,  1117,  1178,  1238,  1280,  1350,  1453,  1513,
+  1564,  1625,  1671,  1741,  1788,  1904,  2072,  2421,  2626,  2770,
+  2840,  2900,  2942,  3012,  3068,  3115,  3147,  3194,  3254,  3319,
+  3366,  3520,  3678,  3780,  3850,  3911,  3957,  4032,  4106,  4185,
+  4292,  4474,  4683,  4842,  5019,  5191,  5321,  5428,  5540,  5675,
+  5763,  5847,  5959,  6127,  6304,  6564,  6839,  7090,  7263,  7421,
+  7556,  7728,  7872,  7984,  8142,  8361,  8580,  8743,  8938,  9227,
+  9409,  9539,  9674,  9795,  9930,  10060,  10177,  10382,  10614,  10861,
+  11038,  11271,  11415,  11629,  11792,  12044,  12193,  12416,  12574,  12821,
+  13007,  13235,  13445,  13654,  13901,  14134,  14488,  15000,  15703,  16285,
+  16504,  16797,  17086,  17328,  17579,  17807,  17998,  18268,  18538,  18836,
+  19087,  19274,  19474,  19716,  19935,  20270,  20833,  21303,  21532,  21741,
+  21978,  22207,  22523,  22770,  23054,  23613,  23943,  24204,  24399,  24651,
+  24832,  25074,  25270,  25549,  25759,  26015,  26150,  26424,  26713,  27048,
+  27342,  27504,  27681,  27854,  28021,  28207,  28412,  28664,  28859,  29064,
+  29278,  29548,  29748,  30107,  30377,  30656,  30856,  31164,  31452,  31755,
+  32011,  32328,  32626,  32919,  33319,  33789,  34329,  34925,  35396,  35973,
+  36443,  36964,  37551,  38156,  38724,  39357,  40023,  40908,  41587,  42602,
+  43924,  45037,  45810,  46597,  47421,  48291,  49092,  50051,  51448,  52719,
+  53440,  54241,  54944,  55977,  56676,  57299,  57872,  58389,  59059,  59688,
+  60237,  60782,  61094,  61573,  61890,  62290,  62658,  63030,  63217,  63454,
+  63622,  63882,  64003,  64273,  64427,  64529,  64581,  64697,  64758,  64902,
+  65414,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf2Mid[36] = {
+  0,  71,  335,  581,  836,  1039,  1323,  1795,  2258,  2608,
+  3005,  3591,  4243,  5344,  7163,  10583,  16848,  28078,  49448,  57007,
+  60357,  61850,  62837,  63437,  63872,  64188,  64377,  64614,  64774,  64949,
+  65039,  65115,  65223,  65360,  65474,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf3Mid[2] = {
+  0,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf4Mid[20] = {
+  0,  28,  246,  459,  667,  1045,  1523,  2337,  4337,  11347,
+  44231,  56709,  60781,  62243,  63161,  63969,  64608,  65062,  65502,  65535
+};
+
+const uint16_t *WebRtcIsacfix_kPitchLagPtrMid[4] = {
+  WebRtcIsacfix_kPitchLagCdf1Mid,
+  WebRtcIsacfix_kPitchLagCdf2Mid,
+  WebRtcIsacfix_kPitchLagCdf3Mid,
+  WebRtcIsacfix_kPitchLagCdf4Mid
+};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsacfix_kPitchLagSizeMid[1] = {
+  256
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerLimitMid[4] = {
+  -280, -17,  0, -9
+};
+
+const int16_t WebRtcIsacfix_kUpperLimitMid[4] = {
+  -40,  17,  0,  9
+};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsacfix_kInitIndMid[3] = {
+  18,  1,  10
+};
+
+/* mean values of pitch filter lags in Q10 */
+
+const int16_t WebRtcIsacfix_kMeanLag2Mid[35] = {
+  -17297, -16250, -15416, -14343, -13341, -12363, -11270,
+  -10355, -9122, -8217, -7172, -6083, -5102, -4004, -3060,
+  -1982, -952, -18, 935, 1976, 3040, 4032,
+  5082, 6065, 7257, 8202, 9264, 10225, 11242,
+  12234, 13337, 14336, 15374, 16187, 17347
+};
+
+
+const int16_t WebRtcIsacfix_kMeanLag4Mid[19] = {
+  -8811, -8081, -7203, -6003, -5057, -4025, -2983, -1964,
+  -891, 29, 921, 1920, 2988, 4064, 5187, 6079, 7173, 8074, 8849
+};
+
+
+/* tables for use with large pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsacfix_kPitchLagCdf1Hi[511] = {
+  0,  7,  18,  33,  69,  105,  156,  228,  315,  612,
+  680,  691,  709,  724,  735,  738,  742,  746,  749,  753,
+  756,  760,  764,  774,  782,  785,  789,  796,  800,  803,
+  807,  814,  818,  822,  829,  832,  847,  854,  858,  869,
+  876,  883,  898,  908,  934,  977,  1010,  1050,  1060,  1064,
+  1075,  1078,  1086,  1089,  1093,  1104,  1111,  1122,  1133,  1136,
+  1151,  1162,  1183,  1209,  1252,  1281,  1339,  1364,  1386,  1401,
+  1411,  1415,  1426,  1430,  1433,  1440,  1448,  1455,  1462,  1477,
+  1487,  1495,  1502,  1506,  1509,  1516,  1524,  1531,  1535,  1542,
+  1553,  1556,  1578,  1589,  1611,  1625,  1639,  1643,  1654,  1665,
+  1672,  1687,  1694,  1705,  1708,  1719,  1730,  1744,  1752,  1759,
+  1791,  1795,  1820,  1867,  1886,  1915,  1936,  1943,  1965,  1987,
+  2041,  2099,  2161,  2175,  2200,  2211,  2226,  2233,  2244,  2251,
+  2266,  2280,  2287,  2298,  2309,  2316,  2331,  2342,  2356,  2378,
+  2403,  2418,  2447,  2497,  2544,  2602,  2863,  2895,  2903,  2935,
+  2950,  2971,  3004,  3011,  3018,  3029,  3040,  3062,  3087,  3127,
+  3152,  3170,  3199,  3243,  3293,  3322,  3340,  3377,  3402,  3427,
+  3474,  3518,  3543,  3579,  3601,  3637,  3659,  3706,  3731,  3760,
+  3818,  3847,  3869,  3901,  3920,  3952,  4068,  4169,  4220,  4271,
+  4524,  4571,  4604,  4632,  4672,  4730,  4777,  4806,  4857,  4904,
+  4951,  5002,  5031,  5060,  5107,  5150,  5212,  5266,  5331,  5382,
+  5432,  5490,  5544,  5610,  5700,  5762,  5812,  5874,  5972,  6022,
+  6091,  6163,  6232,  6305,  6402,  6540,  6685,  6880,  7090,  7271,
+  7379,  7452,  7542,  7625,  7687,  7770,  7843,  7911,  7966,  8024,
+  8096,  8190,  8252,  8320,  8411,  8501,  8585,  8639,  8751,  8842,
+  8918,  8986,  9066,  9127,  9203,  9269,  9345,  9406,  9464,  9536,
+  9612,  9667,  9735,  9844,  9931,  10036,  10119,  10199,  10260,  10358,
+  10441,  10514,  10666,  10734,  10872,  10951,  11053,  11125,  11223,  11324,
+  11516,  11664,  11737,  11816,  11892,  12008,  12120,  12200,  12280,  12392,
+  12490,  12576,  12685,  12812,  12917,  13003,  13108,  13210,  13300,  13384,
+  13470,  13579,  13673,  13771,  13879,  13999,  14136,  14201,  14368,  14614,
+  14759,  14867,  14958,  15030,  15121,  15189,  15280,  15385,  15461,  15555,
+  15653,  15768,  15884,  15971,  16069,  16145,  16210,  16279,  16380,  16463,
+  16539,  16615,  16688,  16818,  16919,  17017,  18041,  18338,  18523,  18649,
+  18790,  18917,  19047,  19167,  19315,  19460,  19601,  19731,  19858,  20068,
+  20173,  20318,  20466,  20625,  20741,  20911,  21045,  21201,  21396,  21588,
+  21816,  22022,  22305,  22547,  22786,  23072,  23322,  23600,  23879,  24168,
+  24433,  24769,  25120,  25511,  25895,  26289,  26792,  27219,  27683,  28077,
+  28566,  29094,  29546,  29977,  30491,  30991,  31573,  32105,  32594,  33173,
+  33788,  34497,  35181,  35833,  36488,  37255,  37921,  38645,  39275,  39894,
+  40505,  41167,  41790,  42431,  43096,  43723,  44385,  45134,  45858,  46607,
+  47349,  48091,  48768,  49405,  49955,  50555,  51167,  51985,  52611,  53078,
+  53494,  53965,  54435,  54996,  55601,  56125,  56563,  56838,  57244,  57566,
+  57967,  58297,  58771,  59093,  59419,  59647,  59886,  60143,  60461,  60693,
+  60917,  61170,  61416,  61634,  61891,  62122,  62310,  62455,  62632,  62839,
+  63103,  63436,  63639,  63805,  63906,  64015,  64192,  64355,  64475,  64558,
+  64663,  64742,  64811,  64865,  64916,  64956,  64981,  65025,  65068,  65115,
+  65195,  65314,  65419,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf2Hi[68] = {
+  0,  7,  11,  22,  37,  52,  56,  59,  81,  85,
+  89,  96,  115,  130,  137,  152,  170,  181,  193,  200,
+  207,  233,  237,  259,  289,  318,  363,  433,  592,  992,
+  1607,  3062,  6149,  12206,  25522,  48368,  58223,  61918,  63640,  64584,
+  64943,  65098,  65206,  65268,  65294,  65335,  65350,  65372,  65387,  65402,
+  65413,  65420,  65428,  65435,  65439,  65450,  65454,  65468,  65472,  65476,
+  65483,  65491,  65498,  65505,  65516,  65520,  65528,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf3Hi[2] = {
+  0,  65535
+};
+
+const uint16_t WebRtcIsacfix_kPitchLagCdf4Hi[35] = {
+  0,  7,  19,  30,  41,  48,  63,  74,  82,  96,
+  122,  152,  215,  330,  701,  2611,  10931,  48106,  61177,  64341,
+  65112,  65238,  65309,  65338,  65364,  65379,  65401,  65427,  65453,
+  65465,  65476,  65490,  65509,  65528,  65535
+};
+
+const uint16_t *WebRtcIsacfix_kPitchLagPtrHi[4] = {
+  WebRtcIsacfix_kPitchLagCdf1Hi,
+  WebRtcIsacfix_kPitchLagCdf2Hi,
+  WebRtcIsacfix_kPitchLagCdf3Hi,
+  WebRtcIsacfix_kPitchLagCdf4Hi
+};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsacfix_kPitchLagSizeHi[1] = {
+  512
+};
+
+/* index limits and ranges */
+const int16_t WebRtcIsacfix_kLowerLimitHi[4] = {
+  -552, -34,  0, -16
+};
+
+const int16_t WebRtcIsacfix_kUpperLimitHi[4] = {
+  -80,  32,  0,  17
+};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsacfix_kInitIndHi[3] = {
+  34,  1,  18
+};
+
+/* mean values of pitch filter lags */
+
+const int16_t WebRtcIsacfix_kMeanLag2Hi[67] = {
+  -17482, -16896, -16220, -15929, -15329, -14848, -14336, -13807, -13312, -12800, -12218, -11720,
+  -11307, -10649, -10396, -9742, -9148, -8668, -8297, -7718, -7155, -6656, -6231, -5600, -5129,
+  -4610, -4110, -3521, -3040, -2525, -2016, -1506, -995, -477, -5, 469, 991, 1510, 2025, 2526, 3079,
+  3555, 4124, 4601, 5131, 5613, 6194, 6671, 7140, 7645, 8207, 8601, 9132, 9728, 10359, 10752, 11302,
+  11776, 12288, 12687, 13204, 13759, 14295, 14810, 15360, 15764, 16350
+};
+
+
+const int16_t WebRtcIsacfix_kMeanLag4Hi[34] = {
+  -8175, -7659, -7205, -6684, -6215, -5651, -5180, -4566, -4087, -3536, -3096,
+  -2532, -1990, -1482, -959, -440, 11, 451, 954, 1492, 2020, 2562, 3059,
+  3577, 4113, 4618, 5134, 5724, 6060, 6758, 7015, 7716, 8066, 8741
+};
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h b/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h
new file mode 100644
index 0000000..a8c0c3a
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_lag_tables.h
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_lag_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/********************* Pitch Filter Lag Coefficient Tables ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Lo[127];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Lo[20];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Lo[2];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Lo[10];
+
+extern const uint16_t *WebRtcIsacfix_kPitchLagPtrLo[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsacfix_kPitchLagSizeLo[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerLimitLo[4];
+extern const int16_t WebRtcIsacfix_kUpperLimitLo[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsacfix_kInitIndLo[3];
+
+/* mean values of pitch filter lags */
+extern const int16_t WebRtcIsacfix_kMeanLag2Lo[19];
+extern const int16_t WebRtcIsacfix_kMeanLag4Lo[9];
+
+
+
+/* tables for use with medium pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Mid[255];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Mid[36];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Mid[2];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Mid[20];
+
+extern const uint16_t *WebRtcIsacfix_kPitchLagPtrMid[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsacfix_kPitchLagSizeMid[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerLimitMid[4];
+extern const int16_t WebRtcIsacfix_kUpperLimitMid[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsacfix_kInitIndMid[3];
+
+/* mean values of pitch filter lags */
+extern const int16_t WebRtcIsacfix_kMeanLag2Mid[35];
+extern const int16_t WebRtcIsacfix_kMeanLag4Mid[19];
+
+
+/* tables for use with large pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf1Hi[511];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf2Hi[68];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf3Hi[2];
+extern const uint16_t WebRtcIsacfix_kPitchLagCdf4Hi[35];
+
+extern const uint16_t *WebRtcIsacfix_kPitchLagPtrHi[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsacfix_kPitchLagSizeHi[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsacfix_kLowerLimitHi[4];
+extern const int16_t WebRtcIsacfix_kUpperLimitHi[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsacfix_kInitIndHi[3];
+
+/* mean values of pitch filter lags */
+extern const int16_t WebRtcIsacfix_kMeanLag2Hi[67];
+extern const int16_t WebRtcIsacfix_kMeanLag4Hi[34];
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_LAG_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/settings.h b/modules/audio_coding/codecs/isac/fix/source/settings.h
new file mode 100644
index 0000000..34c0efe
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/settings.h
@@ -0,0 +1,215 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * settings.h
+ *
+ * Declaration of #defines used in the iSAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_
+
+
+/* sampling frequency (Hz) */
+#define FS                                      16000
+/* 1.5 times Sampling frequency */
+#define FS_1_HALF        (uint32_t) 24000
+/* Three times Sampling frequency */
+#define FS3          (uint32_t) 48000
+/* Eight times Sampling frequency */
+#define FS8          (uint32_t) 128000
+
+/* number of samples per frame (either 480 (30ms) or 960 (60ms)) */
+#define INITIAL_FRAMESAMPLES     960
+
+/* miliseconds */
+#define FRAMESIZE                               30
+/* number of samples per frame processed in the encoder (30ms) */
+#define FRAMESAMPLES                            480     /* ((FRAMESIZE*FS)/1000) */
+#define FRAMESAMPLES_HALF       240
+/* max number of samples per frame (= 60 ms frame) */
+#define MAX_FRAMESAMPLES      960
+/* number of samples per 10ms frame */
+#define FRAMESAMPLES_10ms                       160      /* ((10*FS)/1000) */
+/* Number of samples per 1 ms */
+#define SAMPLES_PER_MSEC      16
+/* number of subframes */
+#define SUBFRAMES                               6
+/* length of a subframe */
+#define UPDATE                                  80
+/* length of half a subframe (low/high band) */
+#define HALF_SUBFRAMELEN                        40    /* (UPDATE/2) */
+/* samples of look ahead (in a half-band, so actually half the samples of look ahead @ FS) */
+#define QLOOKAHEAD                              24    /* 3 ms */
+
+/* order of AR model in spectral entropy coder */
+#define AR_ORDER                                6
+#define MAX_ORDER                               13
+#define LEVINSON_MAX_ORDER                  12
+
+/* window length (masking analysis) */
+#define WINLEN                                  256
+/* order of low-band pole filter used to approximate masking curve */
+#define ORDERLO                                 12
+/* order of hi-band pole filter used to approximate masking curve */
+#define ORDERHI                                 6
+
+#define KLT_NUM_AVG_GAIN                        0
+#define KLT_NUM_AVG_SHAPE                       0
+#define KLT_NUM_MODELS                          3
+#define LPC_SHAPE_ORDER                         18    /* (ORDERLO + ORDERHI) */
+
+#define KLT_ORDER_GAIN                          12    /* (2 * SUBFRAMES) */
+#define KLT_ORDER_SHAPE                         108   /*  (LPC_SHAPE_ORDER * SUBFRAMES) */
+
+
+
+/* order for post_filter_bank */
+#define POSTQORDER                              3
+/* order for pre-filterbank */
+#define QORDER                                  3
+/* for decimator */
+#define ALLPASSSECTIONS                         2
+/* The number of composite all-pass filter factors */
+#define NUMBEROFCOMPOSITEAPSECTIONS             4
+
+/* The number of all-pass filter factors in an upper or lower channel*/
+#define NUMBEROFCHANNELAPSECTIONS               2
+
+
+
+#define DPMIN_Q10                            -10240   /* -10.00 in Q10 */
+#define DPMAX_Q10                             10240   /* 10.00 in Q10 */
+#define MINBITS_Q10                           10240   /* 10.0 in Q10 */
+
+
+/* array size for byte stream in number of Word16. */
+#define STREAM_MAXW16       300 /* The old maximum size still needed for the decoding */
+#define STREAM_MAXW16_30MS  100 /* 100 Word16 = 200 bytes = 53.4 kbit/s @ 30 ms.framelength */
+#define STREAM_MAXW16_60MS  200 /* 200 Word16 = 400 bytes = 53.4 kbit/s @ 60 ms.framelength */
+/* This is used only at the decoder bit-stream struct.
+ * - The encoder and decoder bitstream containers are of different size because
+ *   old iSAC limited the encoded bitstream to 600 bytes. But newer versions
+ *   restrict to shorter bitstream.
+ * - We add 10 bytes of guards to the internal bitstream container. The reason
+ *   is that entropy decoder might read few bytes (3 according to our
+ *   observations) more than the actual size of the bitstream. To avoid reading
+ *   outside memory, in rare occasion of full-size bitstream we add 10 bytes
+ *   of guard. */
+#define INTERNAL_STREAM_SIZE_W16 (STREAM_MAXW16 + 5)
+
+/* storage size for bit counts */
+//#define BIT_COUNTER_SIZE                        30
+/* maximum order of any AR model or filter */
+#define MAX_AR_MODEL_ORDER                      12
+
+/* Maximum number of iterations allowed to limit payload size */
+#define MAX_PAYLOAD_LIMIT_ITERATION           1
+
+/* Bandwidth estimator */
+
+#define MIN_ISAC_BW                           10000     /* Minimum bandwidth in bits per sec */
+#define MAX_ISAC_BW                           32000     /* Maxmum bandwidth in bits per sec */
+#define MIN_ISAC_MD                           5         /* Minimum Max Delay in ?? */
+#define MAX_ISAC_MD                           25        /* Maxmum Max Delay in ?? */
+#define DELAY_CORRECTION_MAX      717
+#define DELAY_CORRECTION_MED      819
+#define Thld_30_60         18000
+#define Thld_60_30         27000
+
+/* assumed header size; we don't know the exact number (header compression may be used) */
+#define HEADER_SIZE                           35       /* bytes */
+#define INIT_FRAME_LEN                        60
+#define INIT_BN_EST                           20000
+#define INIT_BN_EST_Q7                        2560000  /* 20 kbps in Q7 */
+#define INIT_REC_BN_EST_Q5                    789312   /* INIT_BN_EST + INIT_HDR_RATE in Q5 */
+
+/* 8738 in Q18 is ~ 1/30 */
+/* #define INIT_HDR_RATE (((HEADER_SIZE * 8 * 1000) * 8738) >> NUM_BITS_TO_SHIFT (INIT_FRAME_LEN)) */
+#define INIT_HDR_RATE                    4666
+/* number of packets in a row for a high rate burst */
+#define BURST_LEN                             3
+/* ms, max time between two full bursts */
+#define BURST_INTERVAL                        800
+/* number of packets in a row for initial high rate burst */
+#define INIT_BURST_LEN                        5
+/* bits/s, rate for the first BURST_LEN packets */
+#define INIT_RATE                             10240000 /* INIT_BN_EST in Q9 */
+
+
+/* For pitch analysis */
+#define PITCH_FRAME_LEN                         240  /* (FRAMESAMPLES/2) 30 ms  */
+#define PITCH_MAX_LAG                           140       /* 57 Hz  */
+#define PITCH_MIN_LAG                           20                /* 400 Hz */
+#define PITCH_MIN_LAG_Q8                        5120 /* 256 * PITCH_MIN_LAG */
+#define OFFSET_Q8                               768  /* 256 * 3 */
+
+#define PITCH_MAX_GAIN_Q12      1843                  /* 0.45 */
+#define PITCH_LAG_SPAN2                         65   /* (PITCH_MAX_LAG/2-PITCH_MIN_LAG/2+5) */
+#define PITCH_CORR_LEN2                         60     /* 15 ms  */
+#define PITCH_CORR_STEP2                        60   /* (PITCH_FRAME_LEN/4) */
+#define PITCH_SUBFRAMES                         4
+#define PITCH_SUBFRAME_LEN                      60   /* (PITCH_FRAME_LEN/PITCH_SUBFRAMES) */
+
+/* For pitch filter */
+#define PITCH_BUFFSIZE                   190  /* (PITCH_MAX_LAG + 50) Extra 50 for fraction and LP filters */
+#define PITCH_INTBUFFSIZE               430  /* (PITCH_FRAME_LEN+PITCH_BUFFSIZE) */
+#define PITCH_FRACS                             8
+#define PITCH_FRACORDER                         9
+#define PITCH_DAMPORDER                         5
+
+
+/* Order of high pass filter */
+#define HPORDER                                 2
+
+
+/* PLC */
+#define DECAY_RATE               10               /* Q15, 20% of decay every lost frame apllied linearly sample by sample*/
+#define PLC_WAS_USED              1
+#define PLC_NOT_USED              3
+#define RECOVERY_OVERLAP         80
+#define RESAMP_RES              256
+#define RESAMP_RES_BIT            8
+
+
+
+/* Define Error codes */
+/* 6000 General */
+#define ISAC_MEMORY_ALLOCATION_FAILED    6010
+#define ISAC_MODE_MISMATCH       6020
+#define ISAC_DISALLOWED_BOTTLENECK     6030
+#define ISAC_DISALLOWED_FRAME_LENGTH    6040
+/* 6200 Bandwidth estimator */
+#define ISAC_RANGE_ERROR_BW_ESTIMATOR    6240
+/* 6400 Encoder */
+#define ISAC_ENCODER_NOT_INITIATED     6410
+#define ISAC_DISALLOWED_CODING_MODE     6420
+#define ISAC_DISALLOWED_FRAME_MODE_ENCODER   6430
+#define ISAC_DISALLOWED_BITSTREAM_LENGTH            6440
+#define ISAC_PAYLOAD_LARGER_THAN_LIMIT              6450
+/* 6600 Decoder */
+#define ISAC_DECODER_NOT_INITIATED     6610
+#define ISAC_EMPTY_PACKET       6620
+#define ISAC_PACKET_TOO_SHORT 6625
+#define ISAC_DISALLOWED_FRAME_MODE_DECODER   6630
+#define ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH  6640
+#define ISAC_RANGE_ERROR_DECODE_BANDWIDTH   6650
+#define ISAC_RANGE_ERROR_DECODE_PITCH_GAIN   6660
+#define ISAC_RANGE_ERROR_DECODE_PITCH_LAG   6670
+#define ISAC_RANGE_ERROR_DECODE_LPC     6680
+#define ISAC_RANGE_ERROR_DECODE_SPECTRUM   6690
+#define ISAC_LENGTH_MISMATCH      6730
+/* 6800 Call setup formats */
+#define ISAC_INCOMPATIBLE_FORMATS     6810
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SETTINGS_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c b/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c
new file mode 100644
index 0000000..4ef9a33
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.c
@@ -0,0 +1,193 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * spectrum_ar_model_tables.c
+ *
+ * This file contains tables with AR coefficients, Gain coefficients
+ * and cosine tables.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/********************* AR Coefficient Tables ************************/
+
+/* cdf for quantized reflection coefficient 1 */
+const uint16_t WebRtcIsacfix_kRc1Cdf[12] = {
+  0,  2,  4,  129,  7707,  57485,  65495,  65527,  65529,  65531,
+  65533,  65535
+};
+
+/* cdf for quantized reflection coefficient 2 */
+const uint16_t WebRtcIsacfix_kRc2Cdf[12] = {
+  0,  2,  4,  7,  531,  25298,  64525,  65526,  65529,  65531,
+  65533,  65535
+};
+
+/* cdf for quantized reflection coefficient 3 */
+const uint16_t WebRtcIsacfix_kRc3Cdf[12] = {
+  0,  2,  4,  6,  620,  22898,  64843,  65527,  65529,  65531,
+  65533,  65535
+};
+
+/* cdf for quantized reflection coefficient 4 */
+const uint16_t WebRtcIsacfix_kRc4Cdf[12] = {
+  0,  2,  4,  6,  35,  10034,  60733,  65506,  65529,  65531,
+  65533,  65535
+};
+
+/* cdf for quantized reflection coefficient 5 */
+const uint16_t WebRtcIsacfix_kRc5Cdf[12] = {
+  0,  2,  4,  6,  36,  7567,  56727,  65385,  65529,  65531,
+  65533,  65535
+};
+
+/* cdf for quantized reflection coefficient 6 */
+const uint16_t WebRtcIsacfix_kRc6Cdf[12] = {
+  0,  2,  4,  6,  14,  6579,  57360,  65409,  65529,  65531,
+  65533,  65535
+};
+
+/* representation levels for quantized reflection coefficient 1 */
+const int16_t WebRtcIsacfix_kRc1Levels[11] = {
+  -32104, -29007, -23202, -15496, -9279, -2577, 5934, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 2 */
+const int16_t WebRtcIsacfix_kRc2Levels[11] = {
+  -32104, -29503, -23494, -15261, -7309, -1399, 6158, 16381, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 3 */
+const int16_t WebRtcIsacfix_kRc3Levels[11] = {
+  -32104, -29503, -23157, -15186, -7347, -1359, 5829, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 4 */
+const int16_t WebRtcIsacfix_kRc4Levels[11] = {
+  -32104, -29503, -24512, -15362, -6665, -342, 6596, 14585, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 5 */
+const int16_t WebRtcIsacfix_kRc5Levels[11] = {
+  -32104, -29503, -24512, -15005, -6564, -106, 7123, 14920, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 6 */
+const int16_t WebRtcIsacfix_kRc6Levels[11] = {
+  -32104, -29503, -24512, -15096, -6656, -37, 7036, 14847, 24512, 29503, 32104
+};
+
+/* quantization boundary levels for reflection coefficients */
+const int16_t WebRtcIsacfix_kRcBound[12] = {
+  -32768, -31441, -27566, -21458, -13612, -4663,
+  4663, 13612, 21458, 27566, 31441, 32767
+};
+
+/* initial index for AR reflection coefficient quantizer and cdf table search */
+const uint16_t WebRtcIsacfix_kRcInitInd[6] = {
+  5,  5,  5,  5,  5,  5
+};
+
+/* pointers to AR cdf tables */
+const uint16_t *WebRtcIsacfix_kRcCdfPtr[AR_ORDER] = {
+  WebRtcIsacfix_kRc1Cdf,
+  WebRtcIsacfix_kRc2Cdf,
+  WebRtcIsacfix_kRc3Cdf,
+  WebRtcIsacfix_kRc4Cdf,
+  WebRtcIsacfix_kRc5Cdf,
+  WebRtcIsacfix_kRc6Cdf
+};
+
+/* pointers to AR representation levels tables */
+const int16_t *WebRtcIsacfix_kRcLevPtr[AR_ORDER] = {
+  WebRtcIsacfix_kRc1Levels,
+  WebRtcIsacfix_kRc2Levels,
+  WebRtcIsacfix_kRc3Levels,
+  WebRtcIsacfix_kRc4Levels,
+  WebRtcIsacfix_kRc5Levels,
+  WebRtcIsacfix_kRc6Levels
+};
+
+
+/******************** GAIN Coefficient Tables ***********************/
+
+/* cdf for Gain coefficient */
+const uint16_t WebRtcIsacfix_kGainCdf[19] = {
+  0,  2,  4,  6,  8,  10,  12,  14,  16,  1172,
+  11119,  29411,  51699,  64445,  65527,  65529,  65531,  65533,  65535
+};
+
+/* representation levels for quantized squared Gain coefficient */
+const int32_t WebRtcIsacfix_kGain2Lev[18] = {
+  128, 128, 128, 128, 128, 215, 364, 709, 1268,
+  1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000
+};
+
+/* quantization boundary levels for squared Gain coefficient */
+const int32_t WebRtcIsacfix_kGain2Bound[19] = {
+  0, 21, 35, 59, 99, 166, 280, 475, 815, 1414,
+  2495, 4505, 8397, 16405, 34431, 81359, 240497, 921600, 0x7FFFFFFF
+};
+
+/* pointers to Gain cdf table */
+const uint16_t *WebRtcIsacfix_kGainPtr[1] = {
+  WebRtcIsacfix_kGainCdf
+};
+
+/* gain initial index for gain quantizer and cdf table search */
+const uint16_t WebRtcIsacfix_kGainInitInd[1] = {
+  11
+};
+
+
+/************************* Cosine Tables ****************************/
+
+/* cosine table */
+const int16_t WebRtcIsacfix_kCos[6][60] = {
+  { 512,   512,   511,   510,   508,   507,   505,   502,   499,   496,
+        493,   489,   485,   480,   476,   470,   465,   459,   453,   447,
+ 440,   433,   426,   418,   410,   402,   394,   385,   376,   367,
+        357,   348,   338,   327,   317,   306,   295,   284,   273,   262,
+ 250,   238,   226,   214,   202,   190,   177,   165,   152,   139,
+        126,   113,   100,   87,   73,   60,   47,   33,   20,   7       },
+  { 512,   510,   508,   503,   498,   491,   483,   473,   462,   450,
+        437,   422,   406,   389,   371,   352,   333,   312,   290,   268,
+ 244,   220,   196,   171,   145,   120,   93,   67,   40,   13,
+        -13,   -40,   -67,   -93,   -120,   -145,   -171,   -196,   -220,   -244,
+ -268,   -290,   -312,   -333,   -352,   -371,   -389,   -406,   -422,   -437,
+        -450,   -462,   -473,   -483,   -491,   -498,   -503,   -508,   -510,   -512    },
+  { 512,   508,   502,   493,   480,   465,   447,   426,   402,   376,
+        348,   317,   284,   250,   214,   177,   139,   100,   60,   20,
+ -20,   -60,   -100,   -139,   -177,   -214,   -250,   -284,   -317,   -348,
+        -376,   -402,   -426,   -447,   -465,   -480,   -493,   -502,   -508,   -512,
+ -512,   -508,   -502,   -493,   -480,   -465,   -447,   -426,   -402,   -376,
+        -348,   -317,   -284,   -250,   -214,   -177,   -139,   -100,   -60,   -20     },
+  { 511,   506,   495,   478,   456,   429,   398,   362,   322,   279,
+        232,   183,   133,   80,   27,   -27,   -80,   -133,   -183,   -232,
+ -279,   -322,   -362,   -398,   -429,   -456,   -478,   -495,   -506,   -511,
+        -511,   -506,   -495,   -478,   -456,   -429,   -398,   -362,   -322,   -279,
+ -232,   -183,   -133,   -80,   -27,   27,   80,   133,   183,   232,
+        279,   322,   362,   398,   429,   456,   478,   495,   506,   511     },
+  { 511,   502,   485,   459,   426,   385,   338,   284,   226,   165,
+        100,   33,   -33,   -100,   -165,   -226,   -284,   -338,   -385,   -426,
+ -459,   -485,   -502,   -511,   -511,   -502,   -485,   -459,   -426,   -385,
+        -338,   -284,   -226,   -165,   -100,   -33,   33,   100,   165,   226,
+ 284,   338,   385,   426,   459,   485,   502,   511,   511,   502,
+        485,   459,   426,   385,   338,   284,   226,   165,   100,   33      },
+  { 510,   498,   473,   437,   389,   333,   268,   196,   120,   40,
+        -40,   -120,   -196,   -268,   -333,   -389,   -437,   -473,   -498,   -510,
+ -510,   -498,   -473,   -437,   -389,   -333,   -268,   -196,   -120,   -40,
+        40,   120,   196,   268,   333,   389,   437,   473,   498,   510,
+ 510,   498,   473,   437,   389,   333,   268,   196,   120,   40,
+        -40,   -120,   -196,   -268,   -333,   -389,   -437,   -473,   -498,   -510    }
+};
diff --git a/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h b/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h
new file mode 100644
index 0000000..04fddf5
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/spectrum_ar_model_tables.h
@@ -0,0 +1,95 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * spectrum_ar_model_tables.h
+ *
+ * This file contains definitions of tables with AR coefficients,
+ * Gain coefficients and cosine tables.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/********************* AR Coefficient Tables ************************/
+/* cdf for quantized reflection coefficient 1 */
+extern const uint16_t WebRtcIsacfix_kRc1Cdf[12];
+
+/* cdf for quantized reflection coefficient 2 */
+extern const uint16_t WebRtcIsacfix_kRc2Cdf[12];
+
+/* cdf for quantized reflection coefficient 3 */
+extern const uint16_t WebRtcIsacfix_kRc3Cdf[12];
+
+/* cdf for quantized reflection coefficient 4 */
+extern const uint16_t WebRtcIsacfix_kRc4Cdf[12];
+
+/* cdf for quantized reflection coefficient 5 */
+extern const uint16_t WebRtcIsacfix_kRc5Cdf[12];
+
+/* cdf for quantized reflection coefficient 6 */
+extern const uint16_t WebRtcIsacfix_kRc6Cdf[12];
+
+/* representation levels for quantized reflection coefficient 1 */
+extern const int16_t WebRtcIsacfix_kRc1Levels[11];
+
+/* representation levels for quantized reflection coefficient 2 */
+extern const int16_t WebRtcIsacfix_kRc2Levels[11];
+
+/* representation levels for quantized reflection coefficient 3 */
+extern const int16_t WebRtcIsacfix_kRc3Levels[11];
+
+/* representation levels for quantized reflection coefficient 4 */
+extern const int16_t WebRtcIsacfix_kRc4Levels[11];
+
+/* representation levels for quantized reflection coefficient 5 */
+extern const int16_t WebRtcIsacfix_kRc5Levels[11];
+
+/* representation levels for quantized reflection coefficient 6 */
+extern const int16_t WebRtcIsacfix_kRc6Levels[11];
+
+/* quantization boundary levels for reflection coefficients */
+extern const int16_t WebRtcIsacfix_kRcBound[12];
+
+/* initial indices for AR reflection coefficient quantizer and cdf table search */
+extern const uint16_t WebRtcIsacfix_kRcInitInd[AR_ORDER];
+
+/* pointers to AR cdf tables */
+extern const uint16_t *WebRtcIsacfix_kRcCdfPtr[AR_ORDER];
+
+/* pointers to AR representation levels tables */
+extern const int16_t *WebRtcIsacfix_kRcLevPtr[AR_ORDER];
+
+
+/******************** GAIN Coefficient Tables ***********************/
+/* cdf for Gain coefficient */
+extern const uint16_t WebRtcIsacfix_kGainCdf[19];
+
+/* representation levels for quantized Gain coefficient */
+extern const int32_t WebRtcIsacfix_kGain2Lev[18];
+
+/* squared quantization boundary levels for Gain coefficient */
+extern const int32_t WebRtcIsacfix_kGain2Bound[19];
+
+/* pointer to Gain cdf table */
+extern const uint16_t *WebRtcIsacfix_kGainPtr[1];
+
+/* Gain initial index for gain quantizer and cdf table search */
+extern const uint16_t WebRtcIsacfix_kGainInitInd[1];
+
+/************************* Cosine Tables ****************************/
+/* Cosine table */
+extern const int16_t WebRtcIsacfix_kCos[6][60];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/structs.h b/modules/audio_coding/codecs/isac/fix/source/structs.h
new file mode 100644
index 0000000..7a14e5c
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/structs.h
@@ -0,0 +1,382 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * structs.h
+ *
+ * This header file contains all the structs used in the ISAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_
+
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* Bitstream struct for decoder */
+typedef struct Bitstreamstruct_dec {
+
+  uint16_t  stream[INTERNAL_STREAM_SIZE_W16];  /* Array bytestream to decode */
+  uint32_t  W_upper;          /* Upper boundary of interval W */
+  uint32_t  streamval;
+  uint16_t  stream_index;     /* Index to the current position in bytestream */
+  int16_t   full;             /* 0 - first byte in memory filled, second empty*/
+  /* 1 - both bytes are empty (we just filled the previous memory */
+
+  size_t stream_size;  /* The size of stream in bytes. */
+} Bitstr_dec;
+
+/* Bitstream struct for encoder */
+typedef struct Bitstreamstruct_enc {
+
+  uint16_t  stream[STREAM_MAXW16_60MS];   /* Vector for adding encoded bytestream */
+  uint32_t  W_upper;          /* Upper boundary of interval W */
+  uint32_t  streamval;
+  uint16_t  stream_index;     /* Index to the current position in bytestream */
+  int16_t   full;             /* 0 - first byte in memory filled, second empty*/
+  /* 1 - both bytes are empty (we just filled the previous memory */
+
+} Bitstr_enc;
+
+
+typedef struct {
+
+  int16_t DataBufferLoQ0[WINLEN];
+  int16_t DataBufferHiQ0[WINLEN];
+
+  int32_t CorrBufLoQQ[ORDERLO+1];
+  int32_t CorrBufHiQQ[ORDERHI+1];
+
+  int16_t CorrBufLoQdom[ORDERLO+1];
+  int16_t CorrBufHiQdom[ORDERHI+1];
+
+  int32_t PreStateLoGQ15[ORDERLO+1];
+  int32_t PreStateHiGQ15[ORDERHI+1];
+
+  uint32_t OldEnergy;
+
+} MaskFiltstr_enc;
+
+
+
+typedef struct {
+
+  int16_t PostStateLoGQ0[ORDERLO+1];
+  int16_t PostStateHiGQ0[ORDERHI+1];
+
+  uint32_t OldEnergy;
+
+} MaskFiltstr_dec;
+
+
+
+
+
+
+
+
+typedef struct {
+
+  //state vectors for each of the two analysis filters
+
+  int32_t INSTAT1_fix[2*(QORDER-1)];
+  int32_t INSTAT2_fix[2*(QORDER-1)];
+  int16_t INLABUF1_fix[QLOOKAHEAD];
+  int16_t INLABUF2_fix[QLOOKAHEAD];
+
+  /* High pass filter */
+  int32_t HPstates_fix[HPORDER];
+
+} PreFiltBankstr;
+
+
+typedef struct {
+
+  //state vectors for each of the two analysis filters
+  int32_t STATE_0_LOWER_fix[2*POSTQORDER];
+  int32_t STATE_0_UPPER_fix[2*POSTQORDER];
+
+  /* High pass filter */
+
+  int32_t HPstates1_fix[HPORDER];
+  int32_t HPstates2_fix[HPORDER];
+
+} PostFiltBankstr;
+
+typedef struct {
+
+
+  /* data buffer for pitch filter */
+  int16_t ubufQQ[PITCH_BUFFSIZE];
+
+  /* low pass state vector */
+  int16_t ystateQQ[PITCH_DAMPORDER];
+
+  /* old lag and gain */
+  int16_t oldlagQ7;
+  int16_t oldgainQ12;
+
+} PitchFiltstr;
+
+
+
+typedef struct {
+
+  //for inital estimator
+  int16_t   dec_buffer16[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2];
+  int32_t   decimator_state32[2*ALLPASSSECTIONS+1];
+  int16_t   inbuf[QLOOKAHEAD];
+
+  PitchFiltstr  PFstr_wght;
+  PitchFiltstr  PFstr;
+
+
+} PitchAnalysisStruct;
+
+
+typedef struct {
+  /* Parameters used in PLC to avoid re-computation       */
+
+  /* --- residual signals --- */
+  int16_t prevPitchInvIn[FRAMESAMPLES/2];
+  int16_t prevPitchInvOut[PITCH_MAX_LAG + 10];            // [FRAMESAMPLES/2]; save 90
+  int32_t prevHP[PITCH_MAX_LAG + 10];                     // [FRAMESAMPLES/2]; save 90
+
+
+  int16_t decayCoeffPriodic; /* how much to supress a sample */
+  int16_t decayCoeffNoise;
+  int16_t used;       /* if PLC is used */
+
+
+  int16_t *lastPitchLP;                                  // [FRAMESAMPLES/2]; saved 240;
+
+
+  /* --- LPC side info --- */
+  int16_t lofilt_coefQ15[ ORDERLO ];
+  int16_t hifilt_coefQ15[ ORDERHI ];
+  int32_t gain_lo_hiQ17[2];
+
+  /* --- LTP side info --- */
+  int16_t AvgPitchGain_Q12;
+  int16_t lastPitchGain_Q12;
+  int16_t lastPitchLag_Q7;
+
+  /* --- Add-overlap in recovery packet --- */
+  int16_t overlapLP[ RECOVERY_OVERLAP ];                 // [FRAMESAMPLES/2]; saved 160
+
+  int16_t pitchCycles;
+  int16_t A;
+  int16_t B;
+  size_t pitchIndex;
+  size_t stretchLag;
+  int16_t *prevPitchLP;                                  // [ FRAMESAMPLES/2 ]; saved 240
+  int16_t seed;
+
+  int16_t std;
+} PLCstr;
+
+
+
+/* Have instance of struct together with other iSAC structs */
+typedef struct {
+
+  int16_t   prevFrameSizeMs;      /* Previous frame size (in ms) */
+  uint16_t  prevRtpNumber;      /* Previous RTP timestamp from received packet */
+  /* (in samples relative beginning)  */
+  uint32_t  prevSendTime;   /* Send time for previous packet, from RTP header */
+  uint32_t  prevArrivalTime;      /* Arrival time for previous packet (in ms using timeGetTime()) */
+  uint16_t  prevRtpRate;          /* rate of previous packet, derived from RTP timestamps (in bits/s) */
+  uint32_t  lastUpdate;           /* Time since the last update of the Bottle Neck estimate (in samples) */
+  uint32_t  lastReduction;        /* Time sinse the last reduction (in samples) */
+  int32_t   countUpdates;         /* How many times the estimate was update in the beginning */
+
+  /* The estimated bottle neck rate from there to here (in bits/s)                */
+  uint32_t  recBw;
+  uint32_t  recBwInv;
+  uint32_t  recBwAvg;
+  uint32_t  recBwAvgQ;
+
+  uint32_t  minBwInv;
+  uint32_t  maxBwInv;
+
+  /* The estimated mean absolute jitter value, as seen on this side (in ms)       */
+  int32_t   recJitter;
+  int32_t   recJitterShortTerm;
+  int32_t   recJitterShortTermAbs;
+  int32_t   recMaxDelay;
+  int32_t   recMaxDelayAvgQ;
+
+
+  int16_t   recHeaderRate;         /* (assumed) bitrate for headers (bps) */
+
+  uint32_t  sendBwAvg;           /* The estimated bottle neck rate from here to there (in bits/s) */
+  int32_t   sendMaxDelayAvg;    /* The estimated mean absolute jitter value, as seen on the other siee (in ms)  */
+
+
+  int16_t   countRecPkts;          /* number of packets received since last update */
+  int16_t   highSpeedRec;        /* flag for marking that a high speed network has been detected downstream */
+
+  /* number of consecutive pkts sent during which the bwe estimate has
+     remained at a value greater than the downstream threshold for determining highspeed network */
+  int16_t   countHighSpeedRec;
+
+  /* flag indicating bwe should not adjust down immediately for very late pckts */
+  int16_t   inWaitPeriod;
+
+  /* variable holding the time of the start of a window of time when
+     bwe should not adjust down immediately for very late pckts */
+  uint32_t  startWaitPeriod;
+
+  /* number of consecutive pkts sent during which the bwe estimate has
+     remained at a value greater than the upstream threshold for determining highspeed network */
+  int16_t   countHighSpeedSent;
+
+  /* flag indicated the desired number of packets over threshold rate have been sent and
+     bwe will assume the connection is over broadband network */
+  int16_t   highSpeedSend;
+
+  IsacBandwidthInfo external_bw_info;
+} BwEstimatorstr;
+
+
+typedef struct {
+
+  /* boolean, flags if previous packet exceeded B.N. */
+  int16_t    PrevExceed;
+  /* ms */
+  int16_t    ExceedAgo;
+  /* packets left to send in current burst */
+  int16_t    BurstCounter;
+  /* packets */
+  int16_t    InitCounter;
+  /* ms remaining in buffer when next packet will be sent */
+  int16_t    StillBuffered;
+
+} RateModel;
+
+/* The following strutc is used to store data from encoding, to make it
+   fast and easy to construct a new bitstream with a different Bandwidth
+   estimate. All values (except framelength and minBytes) is double size to
+   handle 60 ms of data.
+*/
+typedef struct {
+
+  /* Used to keep track of if it is first or second part of 60 msec packet */
+  int     startIdx;
+
+  /* Frame length in samples */
+  int16_t         framelength;
+
+  /* Pitch Gain */
+  int16_t   pitchGain_index[2];
+
+  /* Pitch Lag */
+  int32_t   meanGain[2];
+  int16_t   pitchIndex[PITCH_SUBFRAMES*2];
+
+  /* LPC */
+  int32_t         LPCcoeffs_g[12*2]; /* KLT_ORDER_GAIN = 12 */
+  int16_t   LPCindex_s[108*2]; /* KLT_ORDER_SHAPE = 108 */
+  int16_t   LPCindex_g[12*2];  /* KLT_ORDER_GAIN = 12 */
+
+  /* Encode Spec */
+  int16_t   fre[FRAMESAMPLES];
+  int16_t   fim[FRAMESAMPLES];
+  int16_t   AvgPitchGain[2];
+
+  /* Used in adaptive mode only */
+  int     minBytes;
+
+} IsacSaveEncoderData;
+
+typedef struct {
+
+  Bitstr_enc          bitstr_obj;
+  MaskFiltstr_enc     maskfiltstr_obj;
+  PreFiltBankstr      prefiltbankstr_obj;
+  PitchFiltstr        pitchfiltstr_obj;
+  PitchAnalysisStruct pitchanalysisstr_obj;
+  RateModel           rate_data_obj;
+
+  int16_t         buffer_index;
+  int16_t         current_framesamples;
+
+  int16_t      data_buffer_fix[FRAMESAMPLES]; // the size was MAX_FRAMESAMPLES
+
+  int16_t         frame_nb;
+  int16_t         BottleNeck;
+  int16_t         MaxDelay;
+  int16_t         new_framelength;
+  int16_t         s2nr;
+  uint16_t        MaxBits;
+
+  int16_t         bitstr_seed;
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  PostFiltBankstr     interpolatorstr_obj;
+#endif
+
+  IsacSaveEncoderData *SaveEnc_ptr;
+  int16_t         payloadLimitBytes30; /* Maximum allowed number of bits for a 30 msec packet */
+  int16_t         payloadLimitBytes60; /* Maximum allowed number of bits for a 30 msec packet */
+  int16_t         maxPayloadBytes;     /* Maximum allowed number of bits for both 30 and 60 msec packet */
+  int16_t         maxRateInBytes;      /* Maximum allowed rate in bytes per 30 msec packet */
+  int16_t         enforceFrameSize;    /* If set iSAC will never change packet size */
+
+} IsacFixEncoderInstance;
+
+
+typedef struct {
+
+  Bitstr_dec          bitstr_obj;
+  MaskFiltstr_dec     maskfiltstr_obj;
+  PostFiltBankstr     postfiltbankstr_obj;
+  PitchFiltstr        pitchfiltstr_obj;
+  PLCstr              plcstr_obj;               /* TS; for packet loss concealment */
+
+#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
+  PreFiltBankstr      decimatorstr_obj;
+#endif
+
+} IsacFixDecoderInstance;
+
+
+
+typedef struct {
+
+  IsacFixEncoderInstance ISACenc_obj;
+  IsacFixDecoderInstance ISACdec_obj;
+  BwEstimatorstr     bwestimator_obj;
+  int16_t         CodingMode;       /* 0 = adaptive; 1 = instantaneous */
+  int16_t   errorcode;
+  int16_t   initflag;  /* 0 = nothing initiated; 1 = encoder or decoder */
+  /* not initiated; 2 = all initiated */
+} ISACFIX_SubStruct;
+
+
+typedef struct {
+  int32_t   lpcGains[12];     /* 6 lower-band & 6 upper-band we may need to double it for 60*/
+  /* */
+  uint32_t  W_upper;          /* Upper boundary of interval W */
+  uint32_t  streamval;
+  uint16_t  stream_index;     /* Index to the current position in bytestream */
+  int16_t   full;             /* 0 - first byte in memory filled, second empty*/
+  /* 1 - both bytes are empty (we just filled the previous memory */
+  uint16_t  beforeLastWord;
+  uint16_t  lastWord;
+} transcode_obj;
+
+
+//Bitstr_enc myBitStr;
+
+#endif  /* MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_STRUCTS_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform.c b/modules/audio_coding/codecs/isac/fix/source/transform.c
new file mode 100644
index 0000000..2f1275d
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform.c
@@ -0,0 +1,213 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * WebRtcIsacfix_kTransform.c
+ *
+ * Transform functions
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+/* Tables are defined in transform_tables.c file or ARM assembly files. */
+/* Cosine table 1 in Q14 */
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+/* Sine table 1 in Q14 */
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+/* Sine table 2 in Q14 */
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+void WebRtcIsacfix_Time2SpecC(int16_t *inre1Q9,
+                              int16_t *inre2Q9,
+                              int16_t *outreQ7,
+                              int16_t *outimQ7)
+{
+
+  int k;
+  int32_t tmpreQ16[FRAMESAMPLES/2], tmpimQ16[FRAMESAMPLES/2];
+  int16_t tmp1rQ14, tmp1iQ14;
+  int32_t xrQ16, xiQ16, yrQ16, yiQ16;
+  int32_t v1Q16, v2Q16;
+  int16_t factQ19, sh;
+
+  /* Multiply with complex exponentials and combine into one complex vector */
+  factQ19 = 16921; // 0.5/sqrt(240) in Q19 is round(.5/sqrt(240)*(2^19)) = 16921
+  for (k = 0; k < FRAMESAMPLES/2; k++) {
+    tmp1rQ14 = WebRtcIsacfix_kCosTab1[k];
+    tmp1iQ14 = WebRtcIsacfix_kSinTab1[k];
+    xrQ16 = (tmp1rQ14 * inre1Q9[k] + tmp1iQ14 * inre2Q9[k]) >> 7;
+    xiQ16 = (tmp1rQ14 * inre2Q9[k] - tmp1iQ14 * inre1Q9[k]) >> 7;
+    // Q-domains below: (Q16*Q19>>16)>>3 = Q16
+    tmpreQ16[k] = (WEBRTC_SPL_MUL_16_32_RSFT16(factQ19, xrQ16) + 4) >> 3;
+    tmpimQ16[k] = (WEBRTC_SPL_MUL_16_32_RSFT16(factQ19, xiQ16) + 4) >> 3;
+  }
+
+
+  xrQ16  = WebRtcSpl_MaxAbsValueW32(tmpreQ16, FRAMESAMPLES/2);
+  yrQ16 = WebRtcSpl_MaxAbsValueW32(tmpimQ16, FRAMESAMPLES/2);
+  if (yrQ16>xrQ16) {
+    xrQ16 = yrQ16;
+  }
+
+  sh = WebRtcSpl_NormW32(xrQ16);
+  sh = sh-24; //if sh becomes >=0, then we should shift sh steps to the left, and the domain will become Q(16+sh)
+  //if sh becomes <0, then we should shift -sh steps to the right, and the domain will become Q(16+sh)
+
+  //"Fastest" vectors
+  if (sh>=0) {
+    for (k=0; k<FRAMESAMPLES/2; k++) {
+      inre1Q9[k] = (int16_t)(tmpreQ16[k] << sh);  // Q(16+sh)
+      inre2Q9[k] = (int16_t)(tmpimQ16[k] << sh);  // Q(16+sh)
+    }
+  } else {
+    int32_t round = 1 << (-sh - 1);
+    for (k=0; k<FRAMESAMPLES/2; k++) {
+      inre1Q9[k] = (int16_t)((tmpreQ16[k] + round) >> -sh);  // Q(16+sh)
+      inre2Q9[k] = (int16_t)((tmpimQ16[k] + round) >> -sh);  // Q(16+sh)
+    }
+  }
+
+  /* Get DFT */
+  WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
+
+  //"Fastest" vectors
+  if (sh>=0) {
+    for (k=0; k<FRAMESAMPLES/2; k++) {
+      tmpreQ16[k] = inre1Q9[k] >> sh;  // Q(16+sh) -> Q16
+      tmpimQ16[k] = inre2Q9[k] >> sh;  // Q(16+sh) -> Q16
+    }
+  } else {
+    for (k=0; k<FRAMESAMPLES/2; k++) {
+      tmpreQ16[k] = inre1Q9[k] << -sh;  // Q(16+sh) -> Q16
+      tmpimQ16[k] = inre2Q9[k] << -sh;  // Q(16+sh) -> Q16
+    }
+  }
+
+
+  /* Use symmetry to separate into two complex vectors and center frames in time around zero */
+  for (k = 0; k < FRAMESAMPLES/4; k++) {
+    xrQ16 = tmpreQ16[k] + tmpreQ16[FRAMESAMPLES/2 - 1 - k];
+    yiQ16 = -tmpreQ16[k] + tmpreQ16[FRAMESAMPLES/2 - 1 - k];
+    xiQ16 = tmpimQ16[k] - tmpimQ16[FRAMESAMPLES/2 - 1 - k];
+    yrQ16 = tmpimQ16[k] + tmpimQ16[FRAMESAMPLES/2 - 1 - k];
+    tmp1rQ14 = -WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 1 - k];
+    tmp1iQ14 = WebRtcIsacfix_kSinTab2[k];
+    v1Q16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, xrQ16) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, xiQ16);
+    v2Q16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, xrQ16) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, xiQ16);
+    outreQ7[k] = (int16_t)(v1Q16 >> 9);
+    outimQ7[k] = (int16_t)(v2Q16 >> 9);
+    v1Q16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, yrQ16) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, yiQ16);
+    v2Q16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, yrQ16) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, yiQ16);
+    // CalcLrIntQ(v1Q16, 9);
+    outreQ7[FRAMESAMPLES / 2 - 1 - k] = (int16_t)(v1Q16 >> 9);
+    // CalcLrIntQ(v2Q16, 9);
+    outimQ7[FRAMESAMPLES / 2 - 1 - k] = (int16_t)(v2Q16 >> 9);
+
+  }
+}
+
+
+void WebRtcIsacfix_Spec2TimeC(int16_t *inreQ7, int16_t *inimQ7, int32_t *outre1Q16, int32_t *outre2Q16)
+{
+
+  int k;
+  int16_t tmp1rQ14, tmp1iQ14;
+  int32_t xrQ16, xiQ16, yrQ16, yiQ16;
+  int32_t tmpInRe, tmpInIm, tmpInRe2, tmpInIm2;
+  int16_t factQ11;
+  int16_t sh;
+
+  for (k = 0; k < FRAMESAMPLES/4; k++) {
+    /* Move zero in time to beginning of frames */
+    tmp1rQ14 = -WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 1 - k];
+    tmp1iQ14 = WebRtcIsacfix_kSinTab2[k];
+
+    tmpInRe = inreQ7[k] * (1 << 9);  // Q7 -> Q16
+    tmpInIm = inimQ7[k] * (1 << 9);  // Q7 -> Q16
+    tmpInRe2 = inreQ7[FRAMESAMPLES / 2 - 1 - k] * (1 << 9);  // Q7 -> Q16
+    tmpInIm2 = inimQ7[FRAMESAMPLES / 2 - 1 - k] * (1 << 9);  // Q7 -> Q16
+
+    xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInRe) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInIm);
+    xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInIm) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInRe);
+    yrQ16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInIm2) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInRe2);
+    yiQ16 = -WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, tmpInRe2) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, tmpInIm2);
+
+    /* Combine into one vector,  z = x + j * y */
+    outre1Q16[k] = xrQ16 - yiQ16;
+    outre1Q16[FRAMESAMPLES/2 - 1 - k] = xrQ16 + yiQ16;
+    outre2Q16[k] = xiQ16 + yrQ16;
+    outre2Q16[FRAMESAMPLES/2 - 1 - k] = -xiQ16 + yrQ16;
+  }
+
+  /* Get IDFT */
+  tmpInRe  = WebRtcSpl_MaxAbsValueW32(outre1Q16, 240);
+  tmpInIm = WebRtcSpl_MaxAbsValueW32(outre2Q16, 240);
+  if (tmpInIm>tmpInRe) {
+    tmpInRe = tmpInIm;
+  }
+
+  sh = WebRtcSpl_NormW32(tmpInRe);
+  sh = sh-24; //if sh becomes >=0, then we should shift sh steps to the left, and the domain will become Q(16+sh)
+  //if sh becomes <0, then we should shift -sh steps to the right, and the domain will become Q(16+sh)
+
+  //"Fastest" vectors
+  if (sh>=0) {
+    for (k=0; k<240; k++) {
+      inreQ7[k] = (int16_t)(outre1Q16[k] << sh);  // Q(16+sh)
+      inimQ7[k] = (int16_t)(outre2Q16[k] << sh);  // Q(16+sh)
+    }
+  } else {
+    int32_t round = 1 << (-sh - 1);
+    for (k=0; k<240; k++) {
+      inreQ7[k] = (int16_t)((outre1Q16[k] + round) >> -sh);  // Q(16+sh)
+      inimQ7[k] = (int16_t)((outre2Q16[k] + round) >> -sh);  // Q(16+sh)
+    }
+  }
+
+  WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
+
+  //"Fastest" vectors
+  if (sh>=0) {
+    for (k=0; k<240; k++) {
+      outre1Q16[k] = inreQ7[k] >> sh;  // Q(16+sh) -> Q16
+      outre2Q16[k] = inimQ7[k] >> sh;  // Q(16+sh) -> Q16
+    }
+  } else {
+    for (k=0; k<240; k++) {
+      outre1Q16[k] = inreQ7[k] * (1 << -sh);  // Q(16+sh) -> Q16
+      outre2Q16[k] = inimQ7[k] * (1 << -sh);  // Q(16+sh) -> Q16
+    }
+  }
+
+  /* Divide through by the normalizing constant: */
+  /* scale all values with 1/240, i.e. with 273 in Q16 */
+  /* 273/65536 ~= 0.0041656                            */
+  /*     1/240 ~= 0.0041666                            */
+  for (k=0; k<240; k++) {
+    outre1Q16[k] = WEBRTC_SPL_MUL_16_32_RSFT16(273, outre1Q16[k]);
+    outre2Q16[k] = WEBRTC_SPL_MUL_16_32_RSFT16(273, outre2Q16[k]);
+  }
+
+  /* Demodulate and separate */
+  factQ11 = 31727; // sqrt(240) in Q11 is round(15.49193338482967*2048) = 31727
+  for (k = 0; k < FRAMESAMPLES/2; k++) {
+    tmp1rQ14 = WebRtcIsacfix_kCosTab1[k];
+    tmp1iQ14 = WebRtcIsacfix_kSinTab1[k];
+    xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, outre1Q16[k]) - WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, outre2Q16[k]);
+    xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT14(tmp1rQ14, outre2Q16[k]) + WEBRTC_SPL_MUL_16_32_RSFT14(tmp1iQ14, outre1Q16[k]);
+    xrQ16 = WEBRTC_SPL_MUL_16_32_RSFT11(factQ11, xrQ16);
+    xiQ16 = WEBRTC_SPL_MUL_16_32_RSFT11(factQ11, xiQ16);
+    outre2Q16[k] = xiQ16;
+    outre1Q16[k] = xrQ16;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_mips.c b/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
new file mode 100644
index 0000000..a87b3b5
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
@@ -0,0 +1,1294 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// The tables are defined in transform_tables.c file.
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4];
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+// MIPS DSPr2 version of the WebRtcIsacfix_Time2Spec function
+// is not bit-exact with the C version.
+// The accuracy of the MIPS DSPr2 version is same or better.
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+                                 int16_t* inre2Q9,
+                                 int16_t* outreQ7,
+                                 int16_t* outimQ7) {
+  int k = FRAMESAMPLES / 2;
+  int32_t tmpreQ16[FRAMESAMPLES / 2], tmpimQ16[FRAMESAMPLES / 2];
+  int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
+  int32_t inre1, inre2, tmpre, tmpim, factor, max, max1;
+  int16_t* cosptr;
+  int16_t* sinptr;
+
+  cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+  sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+
+  __asm __volatile (
+    ".set           push                                      \n\t"
+    ".set           noreorder                                 \n\t"
+    "addiu          %[inre1],     %[inre1Q9],   0             \n\t"
+    "addiu          %[inre2],     %[inre2Q9],   0             \n\t"
+    "addiu          %[tmpre],     %[tmpreQ16],  0             \n\t"
+    "addiu          %[tmpim],     %[tmpimQ16],  0             \n\t"
+    "addiu          %[factor],    $zero,        16921         \n\t"
+    "mul            %[max],       $zero,        $zero         \n\t"
+    // Multiply with complex exponentials and combine into one complex vector.
+    // Also, calculate the maximal absolute value in the same loop.
+   "1:                                                        \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "lwl            %[r0],        0(%[inre1])                 \n\t"
+    "lwl            %[r2],        0(%[cosptr])                \n\t"
+    "lwl            %[r3],        0(%[sinptr])                \n\t"
+    "lwl            %[r1],        0(%[inre2])                 \n\t"
+    "lwr            %[r0],        0(%[inre1])                 \n\t"
+    "lwr            %[r2],        0(%[cosptr])                \n\t"
+    "lwr            %[r3],        0(%[sinptr])                \n\t"
+    "lwr            %[r1],        0(%[inre2])                 \n\t"
+    "muleq_s.w.phr  %[r4],        %[r2],        %[r0]         \n\t"
+    "muleq_s.w.phr  %[r5],        %[r3],        %[r0]         \n\t"
+    "muleq_s.w.phr  %[r6],        %[r3],        %[r1]         \n\t"
+    "muleq_s.w.phr  %[r7],        %[r2],        %[r1]         \n\t"
+    "muleq_s.w.phl  %[r8],        %[r2],        %[r0]         \n\t"
+    "muleq_s.w.phl  %[r0],        %[r3],        %[r0]         \n\t"
+    "muleq_s.w.phl  %[r3],        %[r3],        %[r1]         \n\t"
+    "muleq_s.w.phl  %[r1],        %[r2],        %[r1]         \n\t"
+    "addiu          %[k],         %[k],         -2            \n\t"
+    "addu           %[r4],        %[r4],        %[r6]         \n\t"
+    "subu           %[r5],        %[r7],        %[r5]         \n\t"
+    "sra            %[r4],        %[r4],        8             \n\t"
+    "sra            %[r5],        %[r5],        8             \n\t"
+    "mult           $ac0,         %[factor],    %[r4]         \n\t"
+    "mult           $ac1,         %[factor],    %[r5]         \n\t"
+    "addu           %[r3],        %[r8],        %[r3]         \n\t"
+    "subu           %[r0],        %[r1],        %[r0]         \n\t"
+    "sra            %[r3],        %[r3],        8             \n\t"
+    "sra            %[r0],        %[r0],        8             \n\t"
+    "mult           $ac2,         %[factor],    %[r3]         \n\t"
+    "mult           $ac3,         %[factor],    %[r0]         \n\t"
+    "extr_r.w       %[r4],        $ac0,         16            \n\t"
+    "extr_r.w       %[r5],        $ac1,         16            \n\t"
+    "addiu          %[inre1],     %[inre1],     4             \n\t"
+    "addiu          %[inre2],     %[inre2],     4             \n\t"
+    "extr_r.w       %[r6],        $ac2,         16            \n\t"
+    "extr_r.w       %[r7],        $ac3,         16            \n\t"
+    "addiu          %[cosptr],    %[cosptr],    4             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    4             \n\t"
+    "shra_r.w       %[r4],        %[r4],        3             \n\t"
+    "shra_r.w       %[r5],        %[r5],        3             \n\t"
+    "sw             %[r4],        0(%[tmpre])                 \n\t"
+    "absq_s.w       %[r4],        %[r4]                       \n\t"
+    "sw             %[r5],        0(%[tmpim])                 \n\t"
+    "absq_s.w       %[r5],        %[r5]                       \n\t"
+    "shra_r.w       %[r6],        %[r6],        3             \n\t"
+    "shra_r.w       %[r7],        %[r7],        3             \n\t"
+    "sw             %[r6],        4(%[tmpre])                 \n\t"
+    "absq_s.w       %[r6],        %[r6]                       \n\t"
+    "sw             %[r7],        4(%[tmpim])                 \n\t"
+    "absq_s.w       %[r7],        %[r7]                       \n\t"
+    "slt            %[r0],        %[r4],        %[r5]         \n\t"
+    "movn           %[r4],        %[r5],        %[r0]         \n\t"
+    "slt            %[r1],        %[r6],        %[r7]         \n\t"
+    "movn           %[r6],        %[r7],        %[r1]         \n\t"
+    "slt            %[r0],        %[max],       %[r4]         \n\t"
+    "movn           %[max],       %[r4],        %[r0]         \n\t"
+    "slt            %[r1],        %[max],       %[r6]         \n\t"
+    "movn           %[max],       %[r6],        %[r1]         \n\t"
+    "addiu          %[tmpre],     %[tmpre],     8             \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[tmpim],     %[tmpim],     8             \n\t"
+#else  // #if defined(MIPS_DSP_R2_LE)
+    "lh             %[r0],        0(%[inre1])                 \n\t"
+    "lh             %[r1],        0(%[inre2])                 \n\t"
+    "lh             %[r2],        0(%[cosptr])                \n\t"
+    "lh             %[r3],        0(%[sinptr])                \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "mul            %[r4],        %[r0],        %[r2]         \n\t"
+    "mul            %[r5],        %[r1],        %[r3]         \n\t"
+    "mul            %[r0],        %[r0],        %[r3]         \n\t"
+    "mul            %[r2],        %[r1],        %[r2]         \n\t"
+    "addiu          %[inre1],     %[inre1],     2             \n\t"
+    "addiu          %[inre2],     %[inre2],     2             \n\t"
+    "addiu          %[cosptr],    %[cosptr],    2             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    2             \n\t"
+    "addu           %[r1],        %[r4],        %[r5]         \n\t"
+    "sra            %[r1],        %[r1],        7             \n\t"
+    "sra            %[r3],        %[r1],        16            \n\t"
+    "andi           %[r1],        %[r1],        0xFFFF        \n\t"
+    "sra            %[r1],        %[r1],        1             \n\t"
+    "mul            %[r1],        %[factor],    %[r1]         \n\t"
+    "mul            %[r3],        %[factor],    %[r3]         \n\t"
+    "subu           %[r0],        %[r2],        %[r0]         \n\t"
+    "sra            %[r0],        %[r0],        7             \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "sra            %[r0],        %[r0],        1             \n\t"
+    "mul            %[r0],        %[factor],    %[r0]         \n\t"
+    "mul            %[r2],        %[factor],    %[r2]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r1],        %[r1],        15            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r1],        %[r1],        0x4000        \n\t"
+    "sra            %[r1],        %[r1],        15            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r1],        %[r3],        %[r1]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r1],        %[r1],        3             \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r1],        %[r1],        4             \n\t"
+    "sra            %[r1],        %[r1],        3             \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sw             %[r1],        0(%[tmpre])                 \n\t"
+    "addiu          %[tmpre],     %[tmpre],     4             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "absq_s.w       %[r1],        %[r1]                       \n\t"
+    "shra_r.w       %[r0],        %[r0],        15            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "negu           %[r4],        %[r1]                       \n\t"
+    "slt            %[r3],        %[r1],        $zero         \n\t"
+    "movn           %[r1],        %[r4],        %[r3]         \n\t"
+    "addiu          %[r0],        %[r0],        0x4000        \n\t"
+    "sra            %[r0],        %[r0],        15            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r2]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r0],        %[r0],        3             \n\t"
+    "sw             %[r0],        0(%[tmpim])                 \n\t"
+    "absq_s.w       %[r0],        %[r0]                       \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r0],        %[r0],        4             \n\t"
+    "sra            %[r0],        %[r0],        3             \n\t"
+    "sw             %[r0],        0(%[tmpim])                 \n\t"
+    "negu           %[r2],        %[r0]                       \n\t"
+    "slt            %[r3],        %[r0],        $zero         \n\t"
+    "movn           %[r0],        %[r2],        %[r3]         \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "slt            %[r2],        %[max],       %[r1]         \n\t"
+    "movn           %[max],       %[r1],        %[r2]         \n\t"
+    "slt            %[r2],        %[max],       %[r0]         \n\t"
+    "movn           %[max],       %[r0],        %[r2]         \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[tmpim],     %[tmpim],     4             \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+    // Calculate WebRtcSpl_NormW32(max).
+    // If max gets value >=0, we should shift max steps to the left, and the
+    // domain will be Q(16+shift). If max gets value <0, we should shift -max
+    // steps to the right, and the domain will be Q(16+max)
+    "clz            %[max],       %[max]                      \n\t"
+    "addiu          %[max],       %[max],       -25           \n\t"
+    ".set           pop                                       \n\t"
+    : [k] "+r" (k), [inre1] "=&r" (inre1), [inre2] "=&r" (inre2),
+      [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+      [r3] "=&r" (r3), [r4] "=&r" (r4), [tmpre] "=&r" (tmpre),
+      [tmpim] "=&r" (tmpim), [max] "=&r" (max), [factor] "=&r" (factor),
+#if defined(MIPS_DSP_R2_LE)
+      [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8),
+#endif  // #if defined(MIPS_DSP_R2_LE)
+      [r5] "=&r" (r5)
+    : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+      [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+      [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+    : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+    , "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+  );
+
+  // "Fastest" vectors
+  k = FRAMESAMPLES / 4;
+  __asm __volatile (
+    ".set           push                                      \n\t"
+    ".set           noreorder                                 \n\t"
+    "addiu          %[tmpre],     %[tmpreQ16],  0             \n\t"
+    "addiu          %[tmpim],     %[tmpimQ16],  0             \n\t"
+    "addiu          %[inre1],     %[inre1Q9],   0             \n\t"
+    "addiu          %[inre2],     %[inre2Q9],   0             \n\t"
+    "blez           %[max],       2f                          \n\t"
+    " subu          %[max1],      $zero,        %[max]        \n\t"
+   "1:                                                        \n\t"
+    "lw             %[r0],        0(%[tmpre])                 \n\t"
+    "lw             %[r1],        0(%[tmpim])                 \n\t"
+    "lw             %[r2],        4(%[tmpre])                 \n\t"
+    "lw             %[r3],        4(%[tmpim])                 \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "sllv           %[r0],        %[r0],        %[max]        \n\t"
+    "sllv           %[r1],        %[r1],        %[max]        \n\t"
+    "sllv           %[r2],        %[r2],        %[max]        \n\t"
+    "sllv           %[r3],        %[r3],        %[max]        \n\t"
+    "addiu          %[tmpre],     %[tmpre],     8             \n\t"
+    "addiu          %[tmpim],     %[tmpim],     8             \n\t"
+    "sh             %[r0],        0(%[inre1])                 \n\t"
+    "sh             %[r1],        0(%[inre2])                 \n\t"
+    "sh             %[r2],        2(%[inre1])                 \n\t"
+    "sh             %[r3],        2(%[inre2])                 \n\t"
+    "addiu          %[inre1],     %[inre1],     4             \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[inre2],     %[inre2],     4             \n\t"
+    "b              4f                                        \n\t"
+    " nop                                                     \n\t"
+   "2:                                                        \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+    "addiu          %[r4],        %[max1],      -1            \n\t"
+    "addiu          %[r5],        $zero,        1             \n\t"
+    "sllv           %[r4],        %[r5],        %[r4]         \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+   "3:                                                        \n\t"
+    "lw             %[r0],        0(%[tmpre])                 \n\t"
+    "lw             %[r1],        0(%[tmpim])                 \n\t"
+    "lw             %[r2],        4(%[tmpre])                 \n\t"
+    "lw             %[r3],        4(%[tmpim])                 \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shrav_r.w      %[r0],        %[r0],        %[max1]       \n\t"
+    "shrav_r.w      %[r1],        %[r1],        %[max1]       \n\t"
+    "shrav_r.w      %[r2],        %[r2],        %[max1]       \n\t"
+    "shrav_r.w      %[r3],        %[r3],        %[max1]       \n\t"
+#else // #if !defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r4]         \n\t"
+    "addu           %[r1],        %[r1],        %[r4]         \n\t"
+    "addu           %[r2],        %[r2],        %[r4]         \n\t"
+    "addu           %[r3],        %[r3],        %[r4]         \n\t"
+    "srav           %[r0],        %[r0],        %[max1]       \n\t"
+    "srav           %[r1],        %[r1],        %[max1]       \n\t"
+    "srav           %[r2],        %[r2],        %[max1]       \n\t"
+    "srav           %[r3],        %[r3],        %[max1]       \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+    "addiu          %[tmpre],     %[tmpre],     8             \n\t"
+    "addiu          %[tmpim],     %[tmpim],     8             \n\t"
+    "sh             %[r0],        0(%[inre1])                 \n\t"
+    "sh             %[r1],        0(%[inre2])                 \n\t"
+    "sh             %[r2],        2(%[inre1])                 \n\t"
+    "sh             %[r3],        2(%[inre2])                 \n\t"
+    "addiu          %[inre1],     %[inre1],     4             \n\t"
+    "bgtz           %[k],         3b                          \n\t"
+    " addiu         %[inre2],     %[inre2],     4             \n\t"
+   "4:                                                        \n\t"
+    ".set           pop                                       \n\t"
+    : [tmpre] "=&r" (tmpre), [tmpim] "=&r" (tmpim), [inre1] "=&r" (inre1),
+      [inre2] "=&r" (inre2), [k] "+r" (k), [max1] "=&r" (max1),
+#if !defined(MIPS_DSP_R1_LE)
+      [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+      [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+    : [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+      [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9), [max] "r" (max)
+    : "memory"
+  );
+
+  // Get DFT
+  WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
+
+  // "Fastest" vectors and
+  // Use symmetry to separate into two complex vectors
+  // and center frames in time around zero
+  // merged into one loop
+  cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+  sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+  k = FRAMESAMPLES / 4;
+  factor = FRAMESAMPLES - 2;  // offset for FRAMESAMPLES / 2 - 1 array member
+
+  __asm __volatile (
+    ".set           push                                      \n\t"
+    ".set           noreorder                                 \n\t"
+    "addiu          %[inre1],     %[inre1Q9],   0             \n\t"
+    "addiu          %[inre2],     %[inre2Q9],   0             \n\t"
+    "addiu          %[tmpre],     %[outreQ7],   0             \n\t"
+    "addiu          %[tmpim],     %[outimQ7],   0             \n\t"
+    "bltz           %[max],       2f                          \n\t"
+    " subu          %[max1],      $zero,        %[max]        \n\t"
+   "1:                                                        \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+    "addu           %[r4],        %[inre1],     %[offset]     \n\t"
+    "addu           %[r5],        %[inre2],     %[offset]     \n\t"
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+    "lh             %[r0],        0(%[inre1])                 \n\t"
+    "lh             %[r1],        0(%[inre2])                 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "lhx            %[r2],        %[offset](%[inre1])         \n\t"
+    "lhx            %[r3],        %[offset](%[inre2])         \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "lh             %[r2],        0(%[r4])                    \n\t"
+    "lh             %[r3],        0(%[r5])                    \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "srav           %[r0],        %[r0],        %[max]        \n\t"
+    "srav           %[r1],        %[r1],        %[max]        \n\t"
+    "srav           %[r2],        %[r2],        %[max]        \n\t"
+    "srav           %[r3],        %[r3],        %[max]        \n\t"
+    "addu           %[r4],        %[r0],        %[r2]         \n\t"
+    "subu           %[r0],        %[r2],        %[r0]         \n\t"
+    "subu           %[r2],        %[r1],        %[r3]         \n\t"
+    "addu           %[r1],        %[r1],        %[r3]         \n\t"
+    "lh             %[r3],        0(%[cosptr])                \n\t"
+    "lh             %[r5],        0(%[sinptr])                \n\t"
+    "andi           %[r6],        %[r4],        0xFFFF        \n\t"
+    "sra            %[r4],        %[r4],        16            \n\t"
+    "mul            %[r7],        %[r3],        %[r6]         \n\t"
+    "mul            %[r8],        %[r3],        %[r4]         \n\t"
+    "mul            %[r6],        %[r5],        %[r6]         \n\t"
+    "mul            %[r4],        %[r5],        %[r4]         \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "addiu          %[inre1],     %[inre1],     2             \n\t"
+    "addiu          %[inre2],     %[inre2],     2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r7],        %[r7],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r7],        %[r7],        0x2000        \n\t"
+    "sra            %[r7],        %[r7],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r8],        %[r8],        2             \n\t"
+    "addu           %[r8],        %[r8],        %[r7]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r6],        %[r6],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r6],        %[r6],        0x2000        \n\t"
+    "sra            %[r6],        %[r6],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r4],        %[r4],        2             \n\t"
+    "addu           %[r4],        %[r4],        %[r6]         \n\t"
+    "andi           %[r6],        %[r2],        0xFFFF        \n\t"
+    "sra            %[r2],        %[r2],        16            \n\t"
+    "mul            %[r7],        %[r5],        %[r6]         \n\t"
+    "mul            %[r9],        %[r5],        %[r2]         \n\t"
+    "mul            %[r6],        %[r3],        %[r6]         \n\t"
+    "mul            %[r2],        %[r3],        %[r2]         \n\t"
+    "addiu          %[cosptr],    %[cosptr],    2             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r7],        %[r7],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r7],        %[r7],        0x2000        \n\t"
+    "sra            %[r7],        %[r7],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r9],        %[r9],        2             \n\t"
+    "addu           %[r9],        %[r7],        %[r9]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r6],        %[r6],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r6],        %[r6],        0x2000        \n\t"
+    "sra            %[r6],        %[r6],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r2],        %[r2],        2             \n\t"
+    "addu           %[r2],        %[r6],        %[r2]         \n\t"
+    "subu           %[r8],        %[r8],        %[r9]         \n\t"
+    "sra            %[r8],        %[r8],        9             \n\t"
+    "addu           %[r2],        %[r4],        %[r2]         \n\t"
+    "sra            %[r2],        %[r2],        9             \n\t"
+    "sh             %[r8],        0(%[tmpre])                 \n\t"
+    "sh             %[r2],        0(%[tmpim])                 \n\t"
+
+    "andi           %[r4],        %[r1],        0xFFFF        \n\t"
+    "sra            %[r1],        %[r1],        16            \n\t"
+    "andi           %[r6],        %[r0],        0xFFFF        \n\t"
+    "sra            %[r0],        %[r0],        16            \n\t"
+    "mul            %[r7],        %[r5],        %[r4]         \n\t"
+    "mul            %[r9],        %[r5],        %[r1]         \n\t"
+    "mul            %[r4],        %[r3],        %[r4]         \n\t"
+    "mul            %[r1],        %[r3],        %[r1]         \n\t"
+    "mul            %[r8],        %[r3],        %[r0]         \n\t"
+    "mul            %[r3],        %[r3],        %[r6]         \n\t"
+    "mul            %[r6],        %[r5],        %[r6]         \n\t"
+    "mul            %[r0],        %[r5],        %[r0]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r7],        %[r7],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r7],        %[r7],        0x2000        \n\t"
+    "sra            %[r7],        %[r7],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r9],        %[r9],        2             \n\t"
+    "addu           %[r9],        %[r9],        %[r7]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r4],        %[r4],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r4],        %[r4],        0x2000        \n\t"
+    "sra            %[r4],        %[r4],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r1],        %[r1],        2             \n\t"
+    "addu           %[r1],        %[r1],        %[r4]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r3],        %[r3],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r3],        %[r3],        0x2000        \n\t"
+    "sra            %[r3],        %[r3],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r8],        %[r8],        2             \n\t"
+    "addu           %[r8],        %[r8],        %[r3]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r6],        %[r6],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r6],        %[r6],        0x2000        \n\t"
+    "sra            %[r6],        %[r6],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r0],        %[r0],        2             \n\t"
+    "addu           %[r0],        %[r0],        %[r6]         \n\t"
+    "addu           %[r3],        %[tmpre],     %[offset]     \n\t"
+    "addu           %[r2],        %[tmpim],     %[offset]     \n\t"
+    "addu           %[r9],        %[r9],        %[r8]         \n\t"
+    "negu           %[r9],        %[r9]                       \n\t"
+    "sra            %[r9],        %[r9],        9             \n\t"
+    "subu           %[r0],        %[r0],        %[r1]         \n\t"
+    "addiu          %[offset],    %[offset],    -4            \n\t"
+    "sh             %[r9],        0(%[r3])                    \n\t"
+    "sh             %[r0],        0(%[r2])                    \n\t"
+    "addiu          %[tmpre],     %[tmpre],     2             \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[tmpim],     %[tmpim],     2             \n\t"
+    "b              3f                                        \n\t"
+    " nop                                                     \n\t"
+   "2:                                                        \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+    "addu           %[r4],        %[inre1],     %[offset]     \n\t"
+    "addu           %[r5],        %[inre2],     %[offset]     \n\t"
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+    "lh             %[r0],        0(%[inre1])                 \n\t"
+    "lh             %[r1],        0(%[inre2])                 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "lhx            %[r2],        %[offset](%[inre1])         \n\t"
+    "lhx            %[r3],        %[offset](%[inre2])         \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "lh             %[r2],        0(%[r4])                    \n\t"
+    "lh             %[r3],        0(%[r5])                    \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sllv           %[r0],        %[r0],        %[max1]       \n\t"
+    "sllv           %[r1],        %[r1],        %[max1]       \n\t"
+    "sllv           %[r2],        %[r2],        %[max1]       \n\t"
+    "sllv           %[r3],        %[r3],        %[max1]       \n\t"
+    "addu           %[r4],        %[r0],        %[r2]         \n\t"
+    "subu           %[r0],        %[r2],        %[r0]         \n\t"
+    "subu           %[r2],        %[r1],        %[r3]         \n\t"
+    "addu           %[r1],        %[r1],        %[r3]         \n\t"
+    "lh             %[r3],        0(%[cosptr])                \n\t"
+    "lh             %[r5],        0(%[sinptr])                \n\t"
+    "andi           %[r6],        %[r4],        0xFFFF        \n\t"
+    "sra            %[r4],        %[r4],        16            \n\t"
+    "mul            %[r7],        %[r3],        %[r6]         \n\t"
+    "mul            %[r8],        %[r3],        %[r4]         \n\t"
+    "mul            %[r6],        %[r5],        %[r6]         \n\t"
+    "mul            %[r4],        %[r5],        %[r4]         \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "addiu          %[inre1],     %[inre1],     2             \n\t"
+    "addiu          %[inre2],     %[inre2],     2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r7],        %[r7],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r7],        %[r7],        0x2000        \n\t"
+    "sra            %[r7],        %[r7],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r8],        %[r8],        2             \n\t"
+    "addu           %[r8],        %[r8],        %[r7]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r6],        %[r6],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r6],        %[r6],        0x2000        \n\t"
+    "sra            %[r6],        %[r6],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r4],        %[r4],        2             \n\t"
+    "addu           %[r4],        %[r4],        %[r6]         \n\t"
+    "andi           %[r6],        %[r2],        0xFFFF        \n\t"
+    "sra            %[r2],        %[r2],        16            \n\t"
+    "mul            %[r7],        %[r5],        %[r6]         \n\t"
+    "mul            %[r9],        %[r5],        %[r2]         \n\t"
+    "mul            %[r6],        %[r3],        %[r6]         \n\t"
+    "mul            %[r2],        %[r3],        %[r2]         \n\t"
+    "addiu          %[cosptr],    %[cosptr],    2             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r7],        %[r7],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r7],        %[r7],        0x2000        \n\t"
+    "sra            %[r7],        %[r7],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r9],        %[r9],        2             \n\t"
+    "addu           %[r9],        %[r7],        %[r9]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r6],        %[r6],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r6],        %[r6],        0x2000        \n\t"
+    "sra            %[r6],        %[r6],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r2],        %[r2],        2             \n\t"
+    "addu           %[r2],        %[r6],        %[r2]         \n\t"
+    "subu           %[r8],        %[r8],        %[r9]         \n\t"
+    "sra            %[r8],        %[r8],        9             \n\t"
+    "addu           %[r2],        %[r4],        %[r2]         \n\t"
+    "sra            %[r2],        %[r2],        9             \n\t"
+    "sh             %[r8],        0(%[tmpre])                 \n\t"
+    "sh             %[r2],        0(%[tmpim])                 \n\t"
+    "andi           %[r4],        %[r1],        0xFFFF        \n\t"
+    "sra            %[r1],        %[r1],        16            \n\t"
+    "andi           %[r6],        %[r0],        0xFFFF        \n\t"
+    "sra            %[r0],        %[r0],        16            \n\t"
+    "mul            %[r7],        %[r5],        %[r4]         \n\t"
+    "mul            %[r9],        %[r5],        %[r1]         \n\t"
+    "mul            %[r4],        %[r3],        %[r4]         \n\t"
+    "mul            %[r1],        %[r3],        %[r1]         \n\t"
+    "mul            %[r8],        %[r3],        %[r0]         \n\t"
+    "mul            %[r3],        %[r3],        %[r6]         \n\t"
+    "mul            %[r6],        %[r5],        %[r6]         \n\t"
+    "mul            %[r0],        %[r5],        %[r0]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r7],        %[r7],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r7],        %[r7],        0x2000        \n\t"
+    "sra            %[r7],        %[r7],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r9],        %[r9],        2             \n\t"
+    "addu           %[r9],        %[r9],        %[r7]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r4],        %[r4],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r4],        %[r4],        0x2000        \n\t"
+    "sra            %[r4],        %[r4],        14            \n\t"
+#endif
+    "sll            %[r1],        %[r1],        2             \n\t"
+    "addu           %[r1],        %[r1],        %[r4]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r3],        %[r3],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r3],        %[r3],        0x2000        \n\t"
+    "sra            %[r3],        %[r3],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r8],        %[r8],        2             \n\t"
+    "addu           %[r8],        %[r8],        %[r3]         \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r6],        %[r6],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r6],        %[r6],        0x2000        \n\t"
+    "sra            %[r6],        %[r6],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sll            %[r0],        %[r0],        2             \n\t"
+    "addu           %[r0],        %[r0],        %[r6]         \n\t"
+    "addu           %[r3],        %[tmpre],     %[offset]     \n\t"
+    "addu           %[r2],        %[tmpim],     %[offset]     \n\t"
+    "addu           %[r9],        %[r9],        %[r8]         \n\t"
+    "negu           %[r9],        %[r9]                       \n\t"
+    "sra            %[r9],        %[r9],        9             \n\t"
+    "subu           %[r0],        %[r0],        %[r1]         \n\t"
+    "sra            %[r0],        %[r0],        9             \n\t"
+    "addiu          %[offset],    %[offset],    -4            \n\t"
+    "sh             %[r9],        0(%[r3])                    \n\t"
+    "sh             %[r0],        0(%[r2])                    \n\t"
+    "addiu          %[tmpre],     %[tmpre],     2             \n\t"
+    "bgtz           %[k],         2b                          \n\t"
+    " addiu         %[tmpim],     %[tmpim],     2             \n\t"
+   "3:                                                        \n\t"
+    ".set           pop                                       \n\t"
+    : [inre1] "=&r" (inre1), [inre2] "=&r" (inre2), [tmpre] "=&r" (tmpre),
+      [tmpim] "=&r" (tmpim), [offset] "+r" (factor), [k] "+r" (k),
+      [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+      [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+      [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1)
+    : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+      [outreQ7] "r" (outreQ7), [outimQ7] "r" (outimQ7),
+      [max] "r" (max), [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+    : "hi", "lo", "memory"
+  );
+}
+
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t *inreQ7,
+                                 int16_t *inimQ7,
+                                 int32_t *outre1Q16,
+                                 int32_t *outre2Q16) {
+  int k = FRAMESAMPLES / 4;
+  int16_t* inre;
+  int16_t* inim;
+  int32_t* outre1;
+  int32_t* outre2;
+  int16_t* cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+  int16_t* sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+  int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, max, max1;
+#if defined(MIPS_DSP_R1_LE)
+  int32_t offset = FRAMESAMPLES - 4;
+#else  // #if defined(MIPS_DSP_R1_LE)
+  int32_t offset = FRAMESAMPLES - 2;
+#endif  // #if defined(MIPS_DSP_R1_LE)
+
+  __asm __volatile (
+    ".set           push                                      \n\t"
+    ".set           noreorder                                 \n\t"
+    "addiu          %[inre],      %[inreQ7],    0             \n\t"
+    "addiu          %[inim] ,     %[inimQ7],    0             \n\t"
+    "addiu          %[outre1],    %[outre1Q16], 0             \n\t"
+    "addiu          %[outre2],    %[outre2Q16], 0             \n\t"
+    "mul            %[max],       $zero,        $zero         \n\t"
+   "1:                                                        \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    // Process two samples in one iteration avoiding left shift before
+    // multiplication. MaxAbsValueW32 function inlined into the loop.
+    "addu           %[r8],        %[inre],      %[offset]     \n\t"
+    "addu           %[r9],        %[inim],      %[offset]     \n\t"
+    "lwl            %[r4],        0(%[r8])                    \n\t"
+    "lwl            %[r5],        0(%[r9])                    \n\t"
+    "lwl            %[r0],        0(%[inre])                  \n\t"
+    "lwl            %[r1],        0(%[inim])                  \n\t"
+    "lwl            %[r2],        0(%[cosptr])                \n\t"
+    "lwl            %[r3],        0(%[sinptr])                \n\t"
+    "lwr            %[r4],        0(%[r8])                    \n\t"
+    "lwr            %[r5],        0(%[r9])                    \n\t"
+    "lwr            %[r0],        0(%[inre])                  \n\t"
+    "lwr            %[r1],        0(%[inim])                  \n\t"
+    "lwr            %[r2],        0(%[cosptr])                \n\t"
+    "lwr            %[r3],        0(%[sinptr])                \n\t"
+    "packrl.ph      %[r4],        %[r4],        %[r4]         \n\t"
+    "packrl.ph      %[r5],        %[r5],        %[r5]         \n\t"
+    "muleq_s.w.phr  %[r6],        %[r0],        %[r2]         \n\t"
+    "muleq_s.w.phr  %[r7],        %[r1],        %[r3]         \n\t"
+    "muleq_s.w.phr  %[r8],        %[r4],        %[r2]         \n\t"
+    "muleq_s.w.phr  %[r9],        %[r5],        %[r3]         \n\t"
+    "addiu          %[k],         %[k],         -2            \n\t"
+    "addiu          %[cosptr],    %[cosptr],    4             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    4             \n\t"
+    "addiu          %[inre],      %[inre],      4             \n\t"
+    "addiu          %[inim],      %[inim],      4             \n\t"
+    "shra_r.w       %[r6],        %[r6],        6             \n\t"
+    "shra_r.w       %[r7],        %[r7],        6             \n\t"
+    "shra_r.w       %[r8],        %[r8],        6             \n\t"
+    "shra_r.w       %[r9],        %[r9],        6             \n\t"
+    "addu           %[r6],        %[r6],        %[r7]         \n\t"
+    "subu           %[r9],        %[r9],        %[r8]         \n\t"
+    "subu           %[r7],        %[r6],        %[r9]         \n\t"
+    "addu           %[r6],        %[r6],        %[r9]         \n\t"
+    "sw             %[r7],        0(%[outre1])                \n\t"
+    "absq_s.w       %[r7],        %[r7]                       \n\t"
+    "slt            %[r8],        %[max],       %[r7]         \n\t"
+    "movn           %[max],       %[r7],        %[r8]         \n\t"
+    "sll            %[r7],        %[offset],    1             \n\t"
+    "addu           %[r7],        %[outre1],    %[r7]         \n\t"
+    "sw             %[r6],        4(%[r7])                    \n\t"
+    "absq_s.w       %[r6],        %[r6]                       \n\t"
+    "slt            %[r8],        %[max],       %[r6]         \n\t"
+    "movn           %[max],       %[r6],        %[r8]         \n\t"
+    "muleq_s.w.phl  %[r6],        %[r0],        %[r2]         \n\t"
+    "muleq_s.w.phl  %[r7],        %[r1],        %[r3]         \n\t"
+    "muleq_s.w.phl  %[r8],        %[r4],        %[r2]         \n\t"
+    "muleq_s.w.phl  %[r9],        %[r5],        %[r3]         \n\t"
+    "shra_r.w       %[r6],        %[r6],        6             \n\t"
+    "shra_r.w       %[r7],        %[r7],        6             \n\t"
+    "shra_r.w       %[r8],        %[r8],        6             \n\t"
+    "shra_r.w       %[r9],        %[r9],        6             \n\t"
+    "addu           %[r6],        %[r6],        %[r7]         \n\t"
+    "subu           %[r9],        %[r9],        %[r8]         \n\t"
+    "subu           %[r7],        %[r6],        %[r9]         \n\t"
+    "addu           %[r6],        %[r6],        %[r9]         \n\t"
+    "sw             %[r7],        4(%[outre1])                \n\t"
+    "absq_s.w       %[r7],        %[r7]                       \n\t"
+    "slt            %[r8],        %[max],       %[r7]         \n\t"
+    "movn           %[max],       %[r7],        %[r8]         \n\t"
+    "sll            %[r7],        %[offset],    1             \n\t"
+    "addu           %[r7],        %[outre1],    %[r7]         \n\t"
+    "sw             %[r6],        0(%[r7])                    \n\t"
+    "absq_s.w       %[r6],        %[r6]                       \n\t"
+    "slt            %[r8],        %[max],       %[r6]         \n\t"
+    "movn           %[max],       %[r6],        %[r8]         \n\t"
+    "muleq_s.w.phr  %[r6],        %[r1],        %[r2]         \n\t"
+    "muleq_s.w.phr  %[r7],        %[r0],        %[r3]         \n\t"
+    "muleq_s.w.phr  %[r8],        %[r5],        %[r2]         \n\t"
+    "muleq_s.w.phr  %[r9],        %[r4],        %[r3]         \n\t"
+    "addiu          %[outre1],    %[outre1],    8             \n\t"
+    "shra_r.w       %[r6],        %[r6],        6             \n\t"
+    "shra_r.w       %[r7],        %[r7],        6             \n\t"
+    "shra_r.w       %[r8],        %[r8],        6             \n\t"
+    "shra_r.w       %[r9],        %[r9],        6             \n\t"
+    "subu           %[r6],        %[r6],        %[r7]         \n\t"
+    "addu           %[r9],        %[r9],        %[r8]         \n\t"
+    "subu           %[r7],        %[r6],        %[r9]         \n\t"
+    "addu           %[r6],        %[r9],        %[r6]         \n\t"
+    "negu           %[r6],        %[r6]                       \n\t"
+    "sw             %[r7],        0(%[outre2])                \n\t"
+    "absq_s.w       %[r7],        %[r7]                       \n\t"
+    "slt            %[r8],        %[max],       %[r7]         \n\t"
+    "movn           %[max],       %[r7],        %[r8]         \n\t"
+    "sll            %[r7],        %[offset],    1             \n\t"
+    "addu           %[r7],        %[outre2],    %[r7]         \n\t"
+    "sw             %[r6],        4(%[r7])                    \n\t"
+    "absq_s.w       %[r6],        %[r6]                       \n\t"
+    "slt            %[r8],        %[max],       %[r6]         \n\t"
+    "movn           %[max],       %[r6],        %[r8]         \n\t"
+    "muleq_s.w.phl  %[r6],       %[r1],         %[r2]         \n\t"
+    "muleq_s.w.phl  %[r7],       %[r0],         %[r3]         \n\t"
+    "muleq_s.w.phl  %[r8],       %[r5],         %[r2]         \n\t"
+    "muleq_s.w.phl  %[r9],       %[r4],         %[r3]         \n\t"
+    "addiu          %[offset],   %[offset],     -8            \n\t"
+    "shra_r.w       %[r6],       %[r6],         6             \n\t"
+    "shra_r.w       %[r7],       %[r7],         6             \n\t"
+    "shra_r.w       %[r8],       %[r8],         6             \n\t"
+    "shra_r.w       %[r9],       %[r9],         6             \n\t"
+    "subu           %[r6],       %[r6],         %[r7]         \n\t"
+    "addu           %[r9],       %[r9],         %[r8]         \n\t"
+    "subu           %[r7],       %[r6],         %[r9]         \n\t"
+    "addu           %[r6],       %[r9],         %[r6]         \n\t"
+    "negu           %[r6],       %[r6]                        \n\t"
+    "sw             %[r7],       4(%[outre2])                 \n\t"
+    "absq_s.w       %[r7],       %[r7]                        \n\t"
+    "slt            %[r8],       %[max],        %[r7]         \n\t"
+    "movn           %[max],      %[r7],         %[r8]         \n\t"
+    "sll            %[r7],       %[offset],     1             \n\t"
+    "addu           %[r7],       %[outre2],     %[r7]         \n\t"
+    "sw             %[r6],       0(%[r7])                     \n\t"
+    "absq_s.w       %[r6],       %[r6]                        \n\t"
+    "slt            %[r8],       %[max],        %[r6]         \n\t"
+    "movn           %[max],      %[r6],         %[r8]         \n\t"
+    "bgtz           %[k],        1b                           \n\t"
+    " addiu         %[outre2],   %[outre2],     8             \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "lh             %[r0],       0(%[inre])                   \n\t"
+    "lh             %[r1],       0(%[inim])                   \n\t"
+    "lh             %[r4],       0(%[cosptr])                 \n\t"
+    "lh             %[r5],       0(%[sinptr])                 \n\t"
+    "addiu          %[k],        %[k],          -1            \n\t"
+    "mul            %[r2],       %[r0],         %[r4]         \n\t"
+    "mul            %[r0],       %[r0],         %[r5]         \n\t"
+    "mul            %[r3],       %[r1],         %[r5]         \n\t"
+    "mul            %[r1],       %[r1],         %[r4]         \n\t"
+    "addiu          %[cosptr],   %[cosptr],     2             \n\t"
+    "addiu          %[sinptr],   %[sinptr],     2             \n\t"
+    "addu           %[r8],       %[inre],       %[offset]     \n\t"
+    "addu           %[r9],       %[inim],       %[offset]     \n\t"
+    "addiu          %[r2],       %[r2],         16            \n\t"
+    "sra            %[r2],       %[r2],         5             \n\t"
+    "addiu          %[r0],       %[r0],         16            \n\t"
+    "sra            %[r0],       %[r0],         5             \n\t"
+    "addiu          %[r3],       %[r3],         16            \n\t"
+    "sra            %[r3],       %[r3],         5             \n\t"
+    "lh             %[r6],       0(%[r8])                     \n\t"
+    "lh             %[r7],       0(%[r9])                     \n\t"
+    "addiu          %[r1],       %[r1],         16            \n\t"
+    "sra            %[r1],       %[r1],         5             \n\t"
+    "mul            %[r8],       %[r7],         %[r4]         \n\t"
+    "mul            %[r7],       %[r7],         %[r5]         \n\t"
+    "mul            %[r9],       %[r6],         %[r4]         \n\t"
+    "mul            %[r6],       %[r6],         %[r5]         \n\t"
+    "addu           %[r2],       %[r2],         %[r3]         \n\t"
+    "subu           %[r1],       %[r1],         %[r0]         \n\t"
+    "sll            %[r0],       %[offset],     1             \n\t"
+    "addu           %[r4],       %[outre1],     %[r0]         \n\t"
+    "addu           %[r5],       %[outre2],     %[r0]         \n\t"
+    "addiu          %[r8],       %[r8],         16            \n\t"
+    "sra            %[r8],       %[r8],         5             \n\t"
+    "addiu          %[r7],       %[r7],         16            \n\t"
+    "sra            %[r7],       %[r7],         5             \n\t"
+    "addiu          %[r6],       %[r6],         16            \n\t"
+    "sra            %[r6],       %[r6],         5             \n\t"
+    "addiu          %[r9],       %[r9],         16            \n\t"
+    "sra            %[r9],       %[r9],         5             \n\t"
+    "addu           %[r8],       %[r8],         %[r6]         \n\t"
+    "negu           %[r8],       %[r8]                        \n\t"
+    "subu           %[r7],       %[r7],         %[r9]         \n\t"
+    "subu           %[r6],       %[r2],         %[r7]         \n\t"
+    "addu           %[r0],       %[r2],         %[r7]         \n\t"
+    "addu           %[r3],       %[r1],         %[r8]         \n\t"
+    "subu           %[r1],       %[r8],         %[r1]         \n\t"
+    "sw             %[r6],       0(%[outre1])                 \n\t"
+    "sw             %[r0],       0(%[r4])                     \n\t"
+    "sw             %[r3],       0(%[outre2])                 \n\t"
+    "sw             %[r1],       0(%[r5])                     \n\t"
+    "addiu          %[outre1],   %[outre1],     4             \n\t"
+    "addiu          %[offset],   %[offset],     -4            \n\t"
+    "addiu          %[inre],     %[inre],       2             \n\t"
+    "addiu          %[inim],     %[inim],       2             \n\t"
+    // Inlined WebRtcSpl_MaxAbsValueW32
+    "negu           %[r5],       %[r6]                        \n\t"
+    "slt            %[r2],       %[r6],         $zero         \n\t"
+    "movn           %[r6],       %[r5],         %[r2]         \n\t"
+    "negu           %[r5],       %[r0]                        \n\t"
+    "slt            %[r2],       %[r0],         $zero         \n\t"
+    "movn           %[r0],       %[r5],         %[r2]         \n\t"
+    "negu           %[r5],       %[r3]                        \n\t"
+    "slt            %[r2],       %[r3],         $zero         \n\t"
+    "movn           %[r3],       %[r5],         %[r2]         \n\t"
+    "negu           %[r5],       %[r1]                        \n\t"
+    "slt            %[r2],       %[r1],         $zero         \n\t"
+    "movn           %[r1],       %[r5],         %[r2]         \n\t"
+    "slt            %[r2],       %[r6],         %[r0]         \n\t"
+    "slt            %[r5],       %[r3],         %[r1]         \n\t"
+    "movn           %[r6],       %[r0],         %[r2]         \n\t"
+    "movn           %[r3],       %[r1],         %[r5]         \n\t"
+    "slt            %[r2],       %[r6],         %[r3]         \n\t"
+    "movn           %[r6],       %[r3],         %[r2]         \n\t"
+    "slt            %[r2],       %[max],        %[r6]         \n\t"
+    "movn           %[max],      %[r6],         %[r2]         \n\t"
+    "bgtz           %[k],        1b                           \n\t"
+    " addiu         %[outre2],   %[outre2],     4             \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "clz            %[max],      %[max]                       \n\t"
+    "addiu          %[max],      %[max],        -25           \n\t"
+    ".set           pop                                       \n\t"
+    : [inre] "=&r" (inre), [inim] "=&r" (inim),
+      [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+      [offset] "+r" (offset), [k] "+r" (k), [r0] "=&r" (r0),
+      [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+      [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6),
+      [r7] "=&r" (r7), [r8] "=&r" (r8), [r9] "=&r" (r9),
+      [max] "=&r" (max)
+    : [inreQ7] "r" (inreQ7), [inimQ7] "r" (inimQ7),
+      [cosptr] "r" (cosptr), [sinptr] "r" (sinptr),
+      [outre1Q16] "r" (outre1Q16), [outre2Q16] "r" (outre2Q16)
+    : "hi", "lo", "memory"
+  );
+
+  // "Fastest" vectors
+  k = FRAMESAMPLES / 4;
+  __asm __volatile (
+    ".set           push                                      \n\t"
+    ".set           noreorder                                 \n\t"
+    "addiu          %[inre],      %[inreQ7],    0             \n\t"
+    "addiu          %[inim],      %[inimQ7],    0             \n\t"
+    "addiu          %[outre1],    %[outre1Q16], 0             \n\t"
+    "addiu          %[outre2],    %[outre2Q16], 0             \n\t"
+    "bltz           %[max],       2f                          \n\t"
+    " subu          %[max1],      $zero,        %[max]        \n\t"
+   "1:                                                        \n\t"
+    "lw             %[r0],        0(%[outre1])                \n\t"
+    "lw             %[r1],        0(%[outre2])                \n\t"
+    "lw             %[r2],        4(%[outre1])                \n\t"
+    "lw             %[r3],        4(%[outre2])                \n\t"
+    "sllv           %[r0],        %[r0],        %[max]        \n\t"
+    "sllv           %[r1],        %[r1],        %[max]        \n\t"
+    "sllv           %[r2],        %[r2],        %[max]        \n\t"
+    "sllv           %[r3],        %[r3],        %[max]        \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "addiu          %[outre1],    %[outre1],    8             \n\t"
+    "addiu          %[outre2],    %[outre2],    8             \n\t"
+    "sh             %[r0],        0(%[inre])                  \n\t"
+    "sh             %[r1],        0(%[inim])                  \n\t"
+    "sh             %[r2],        2(%[inre])                  \n\t"
+    "sh             %[r3],        2(%[inim])                  \n\t"
+    "addiu          %[inre],      %[inre],      4             \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[inim],      %[inim],      4             \n\t"
+    "b              4f                                        \n\t"
+    " nop                                                     \n\t"
+   "2:                                                        \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+    "addiu          %[r4],        $zero,        1             \n\t"
+    "addiu          %[r5],        %[max1],      -1            \n\t"
+    "sllv           %[r4],        %[r4],        %[r5]         \n\t"
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+   "3:                                                        \n\t"
+    "lw             %[r0],        0(%[outre1])                \n\t"
+    "lw             %[r1],        0(%[outre2])                \n\t"
+    "lw             %[r2],        4(%[outre1])                \n\t"
+    "lw             %[r3],        4(%[outre2])                \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shrav_r.w      %[r0],        %[r0],        %[max1]       \n\t"
+    "shrav_r.w      %[r1],        %[r1],        %[max1]       \n\t"
+    "shrav_r.w      %[r2],        %[r2],        %[max1]       \n\t"
+    "shrav_r.w      %[r3],        %[r3],        %[max1]       \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r4]         \n\t"
+    "addu           %[r1],        %[r1],        %[r4]         \n\t"
+    "addu           %[r2],        %[r2],        %[r4]         \n\t"
+    "addu           %[r3],        %[r3],        %[r4]         \n\t"
+    "srav           %[r0],        %[r0],        %[max1]       \n\t"
+    "srav           %[r1],        %[r1],        %[max1]       \n\t"
+    "srav           %[r2],        %[r2],        %[max1]       \n\t"
+    "srav           %[r3],        %[r3],        %[max1]       \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[outre1],    %[outre1],    8             \n\t"
+    "addiu          %[outre2],    %[outre2],    8             \n\t"
+    "sh             %[r0],        0(%[inre])                  \n\t"
+    "sh             %[r1],        0(%[inim])                  \n\t"
+    "sh             %[r2],        2(%[inre])                  \n\t"
+    "sh             %[r3],        2(%[inim])                  \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "addiu          %[inre],      %[inre],      4             \n\t"
+    "bgtz           %[k],         3b                          \n\t"
+    " addiu         %[inim],      %[inim],      4             \n\t"
+   "4:                                                        \n\t"
+    ".set           pop                                       \n\t"
+    : [k] "+r" (k), [max1] "=&r" (max1), [r0] "=&r" (r0),
+      [inre] "=&r" (inre), [inim] "=&r" (inim),
+      [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+#if !defined(MIPS_DSP_R1_LE)
+      [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif  // #if !defined(MIPS_DSP_R1_LE)
+      [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+    : [max] "r" (max), [inreQ7] "r" (inreQ7),
+      [inimQ7] "r" (inimQ7), [outre1Q16] "r" (outre1Q16),
+      [outre2Q16] "r" (outre2Q16)
+    : "memory"
+  );
+
+  WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
+
+  // All the remaining processing is done inside a single loop to avoid
+  // unnecessary memory accesses. MIPS DSPr2 version processes two samples
+  // at a time.
+  cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+  sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+  k = FRAMESAMPLES / 2;
+  __asm __volatile (
+    ".set           push                                      \n\t"
+    ".set           noreorder                                 \n\t"
+    "addiu          %[inre],      %[inreQ7],    0             \n\t"
+    "addiu          %[inim],      %[inimQ7],    0             \n\t"
+    "addiu          %[outre1],    %[outre1Q16], 0             \n\t"
+    "addiu          %[outre2],    %[outre2Q16], 0             \n\t"
+    "addiu          %[r4],        $zero,        273           \n\t"
+    "addiu          %[r5],        $zero,        31727         \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "addiu          %[max],       %[max],       16            \n\t"
+    "replv.ph       %[r4],        %[r4]                       \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+    "bltz           %[max],       2f                          \n\t"
+    " subu          %[max1],      $zero,        %[max]        \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "addiu          %[max],       %[max],       1             \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+   "1:                                                        \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "lwl            %[r0],        0(%[inre])                  \n\t"
+    "lwl            %[r1],        0(%[inim])                  \n\t"
+    "lh             %[r2],        0(%[cosptr])                \n\t"
+    "lwr            %[r0],        0(%[inre])                  \n\t"
+    "lwr            %[r1],        0(%[inim])                  \n\t"
+    "lh             %[r3],        0(%[sinptr])                \n\t"
+    "muleq_s.w.phr  %[r6],        %[r0],        %[r4]         \n\t"
+    "muleq_s.w.phr  %[r7],        %[r1],        %[r4]         \n\t"
+    "muleq_s.w.phl  %[r0],        %[r0],        %[r4]         \n\t"
+    "muleq_s.w.phl  %[r1],        %[r1],        %[r4]         \n\t"
+    "addiu          %[k],         %[k],         -2            \n\t"
+    "addiu          %[inre],      %[inre],      4             \n\t"
+    "addiu          %[inim],      %[inim],      4             \n\t"
+    "shrav_r.w      %[r6],        %[r6],        %[max]        \n\t"
+    "shrav_r.w      %[r7],        %[r7],        %[max]        \n\t"
+    "mult           $ac0,         %[r2],        %[r6]         \n\t"
+    "mult           $ac1,         %[r3],        %[r7]         \n\t"
+    "mult           $ac2,         %[r2],        %[r7]         \n\t"
+    "mult           $ac3,         %[r3],        %[r6]         \n\t"
+    "lh             %[r2],        2(%[cosptr])                \n\t"
+    "lh             %[r3],        2(%[sinptr])                \n\t"
+    "extr_r.w       %[r6],        $ac0,         14            \n\t"
+    "extr_r.w       %[r7],        $ac1,         14            \n\t"
+    "extr_r.w       %[r8],        $ac2,         14            \n\t"
+    "extr_r.w       %[r9],        $ac3,         14            \n\t"
+    "shrav_r.w      %[r0],        %[r0],        %[max]        \n\t"
+    "shrav_r.w      %[r1],        %[r1],        %[max]        \n\t"
+    "mult           $ac0,         %[r2],        %[r0]         \n\t"
+    "mult           $ac1,         %[r3],        %[r1]         \n\t"
+    "mult           $ac2,         %[r2],        %[r1]         \n\t"
+    "mult           $ac3,         %[r3],        %[r0]         \n\t"
+    "addiu          %[cosptr],    %[cosptr],    4             \n\t"
+    "extr_r.w       %[r0],        $ac0,         14            \n\t"
+    "extr_r.w       %[r1],        $ac1,         14            \n\t"
+    "extr_r.w       %[r2],        $ac2,         14            \n\t"
+    "extr_r.w       %[r3],        $ac3,         14            \n\t"
+    "subu           %[r6],        %[r6],        %[r7]         \n\t"
+    "addu           %[r8],        %[r8],        %[r9]         \n\t"
+    "mult           $ac0,         %[r5],        %[r6]         \n\t"
+    "mult           $ac1,         %[r5],        %[r8]         \n\t"
+    "addiu          %[sinptr],    %[sinptr],    4             \n\t"
+    "subu           %[r0],        %[r0],        %[r1]         \n\t"
+    "addu           %[r2],        %[r2],        %[r3]         \n\t"
+    "extr_r.w       %[r1],        $ac0,         11            \n\t"
+    "extr_r.w       %[r3],        $ac1,         11            \n\t"
+    "mult           $ac2,         %[r5],        %[r0]         \n\t"
+    "mult           $ac3,         %[r5],        %[r2]         \n\t"
+    "sw             %[r1],        0(%[outre1])                \n\t"
+    "sw             %[r3],        0(%[outre2])                \n\t"
+    "addiu          %[outre1],    %[outre1],    8             \n\t"
+    "extr_r.w       %[r0],        $ac2,         11            \n\t"
+    "extr_r.w       %[r2],        $ac3,         11            \n\t"
+    "sw             %[r0],        -4(%[outre1])               \n\t"
+    "sw             %[r2],        4(%[outre2])                \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[outre2],    %[outre2],    8             \n\t"
+    "b              3f                                        \n\t"
+#else  // #if defined(MIPS_DSP_R2_LE)
+    "lh             %[r0],        0(%[inre])                  \n\t"
+    "lh             %[r1],        0(%[inim])                  \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "srav           %[r0],        %[r0],        %[max]        \n\t"
+    "srav           %[r1],        %[r1],        %[max]        \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "sra            %[r0],        %[r0],        1             \n\t"
+    "sra            %[r3],        %[r1],        16            \n\t"
+    "andi           %[r1],        %[r1],        0xFFFF        \n\t"
+    "sra            %[r1],        %[r1],        1             \n\t"
+    "mul            %[r2],        %[r2],        %[r4]         \n\t"
+    "mul            %[r0],        %[r0],        %[r4]         \n\t"
+    "mul            %[r3],        %[r3],        %[r4]         \n\t"
+    "mul            %[r1],        %[r1],        %[r4]         \n\t"
+    "addiu          %[inre],      %[inre],      2             \n\t"
+    "addiu          %[inim],      %[inim],      2             \n\t"
+    "lh             %[r6],        0(%[cosptr])                \n\t"
+    "lh             %[r7],        0(%[sinptr])                \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r0],        %[r0],        15            \n\t"
+    "shra_r.w       %[r1],        %[r1],        15            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r0],        %[r0],        0x4000        \n\t"
+    "addiu          %[r1],        %[r1],        0x4000        \n\t"
+    "sra            %[r0],        %[r0],        15            \n\t"
+    "sra            %[r1],        %[r1],        15            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r2],        %[r0]         \n\t"
+    "addu           %[r1],        %[r3],        %[r1]         \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "mul            %[r9],        %[r2],        %[r6]         \n\t"
+    "mul            %[r2],        %[r2],        %[r7]         \n\t"
+    "mul            %[r8],        %[r0],        %[r6]         \n\t"
+    "mul            %[r0],        %[r0],        %[r7]         \n\t"
+    "sra            %[r3],        %[r3],        16            \n\t"
+    "andi           %[r1],        %[r1],        0xFFFF        \n\t"
+    "sll            %[r9],        %[r9],        2             \n\t"
+    "sll            %[r2],        %[r2],        2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r8],        %[r8],        14            \n\t"
+    "shra_r.w       %[r0],        %[r0],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r8],        %[r8],        0x2000        \n\t"
+    "addiu          %[r0],        %[r0],        0x2000        \n\t"
+    "sra            %[r8],        %[r8],        14            \n\t"
+    "sra            %[r0],        %[r0],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r9],        %[r9],        %[r8]         \n\t"
+    "addu           %[r2],        %[r2],        %[r0]         \n\t"
+    "mul            %[r0],        %[r3],        %[r6]         \n\t"
+    "mul            %[r3],        %[r3],        %[r7]         \n\t"
+    "mul            %[r8],        %[r1],        %[r6]         \n\t"
+    "mul            %[r1],        %[r1],        %[r8]         \n\t"
+    "addiu          %[cosptr],    %[cosptr],    2             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    2             \n\t"
+    "sll            %[r0],        %[r0],        2             \n\t"
+    "sll            %[r3],        %[r3],        2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r8],        %[r8],        14            \n\t"
+    "shra_r.w       %[r1],        %[r1],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r8],        %[r8],        0x2000        \n\t"
+    "addiu          %[r1],        %[r1],        0x2000        \n\t"
+    "sra            %[r8],        %[r8],        14            \n\t"
+    "sra            %[r1],        %[r1],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r8]         \n\t"
+    "addu           %[r3],        %[r3],        %[r1]         \n\t"
+    "subu           %[r9],        %[r9],        %[r3]         \n\t"
+    "addu           %[r0],        %[r0],        %[r2]         \n\t"
+    "sra            %[r1],        %[r9],        16            \n\t"
+    "andi           %[r9],        %[r9],        0xFFFF        \n\t"
+    "mul            %[r1],        %[r1],        %[r5]         \n\t"
+    "mul            %[r9],        %[r9],        %[r5]         \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "mul            %[r2],        %[r2],        %[r5]         \n\t"
+    "mul            %[r0],        %[r0],        %[r5]         \n\t"
+    "sll            %[r1],        %[r1],        5             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r9],        %[r9],        11            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r9],        %[r9],        0x400         \n\t"
+    "sra            %[r9],        %[r9],        11            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r1],        %[r1],        %[r9]         \n\t"
+    "sll            %[r2],        %[r2],        5             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r0],        %[r0],        11            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r0],        %[r0],        0x400         \n\t"
+    "sra            %[r0],        %[r0],        11            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r2]         \n\t"
+    "sw             %[r1],        0(%[outre1])                \n\t"
+    "addiu          %[outre1],    %[outre1],    4             \n\t"
+    "sw             %[r0],        0(%[outre2])                \n\t"
+    "bgtz           %[k],         1b                          \n\t"
+    " addiu         %[outre2],    %[outre2],    4             \n\t"
+    "b              3f                                        \n\t"
+    " nop                                                     \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+   "2:                                                        \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "addiu          %[max1],      %[max1],      -1            \n\t"
+   "21:                                                       \n\t"
+    "lwl            %[r0],        0(%[inre])                  \n\t"
+    "lwl            %[r1],        0(%[inim])                  \n\t"
+    "lh             %[r2],        0(%[cosptr])                \n\t"
+    "lwr            %[r0],        0(%[inre])                  \n\t"
+    "lwr            %[r1],        0(%[inim])                  \n\t"
+    "lh             %[r3],        0(%[sinptr])                \n\t"
+    "muleq_s.w.phr  %[r6],        %[r0],        %[r4]         \n\t"
+    "muleq_s.w.phr  %[r7],        %[r1],        %[r4]         \n\t"
+    "muleq_s.w.phl  %[r0],        %[r0],        %[r4]         \n\t"
+    "muleq_s.w.phl  %[r1],        %[r1],        %[r4]         \n\t"
+    "addiu          %[k],         %[k],         -2            \n\t"
+    "addiu          %[inre],      %[inre],      4             \n\t"
+    "addiu          %[inim],      %[inim],      4             \n\t"
+    "sllv           %[r6],        %[r6],        %[max1]       \n\t"
+    "sllv           %[r7],        %[r7],        %[max1]       \n\t"
+    "mult           $ac0,         %[r2],        %[r6]         \n\t"
+    "mult           $ac1,         %[r3],        %[r7]         \n\t"
+    "mult           $ac2,         %[r2],        %[r7]         \n\t"
+    "mult           $ac3,         %[r3],        %[r6]         \n\t"
+    "lh             %[r2],        2(%[cosptr])                \n\t"
+    "lh             %[r3],        2(%[sinptr])                \n\t"
+    "extr_r.w       %[r6],        $ac0,         14            \n\t"
+    "extr_r.w       %[r7],        $ac1,         14            \n\t"
+    "extr_r.w       %[r8],        $ac2,         14            \n\t"
+    "extr_r.w       %[r9],        $ac3,         14            \n\t"
+    "sllv           %[r0],        %[r0],        %[max1]       \n\t"
+    "sllv           %[r1],        %[r1],        %[max1]       \n\t"
+    "mult           $ac0,         %[r2],        %[r0]         \n\t"
+    "mult           $ac1,         %[r3],        %[r1]         \n\t"
+    "mult           $ac2,         %[r2],        %[r1]         \n\t"
+    "mult           $ac3,         %[r3],        %[r0]         \n\t"
+    "addiu          %[cosptr],    %[cosptr],    4             \n\t"
+    "extr_r.w       %[r0],        $ac0,         14            \n\t"
+    "extr_r.w       %[r1],        $ac1,         14            \n\t"
+    "extr_r.w       %[r2],        $ac2,         14            \n\t"
+    "extr_r.w       %[r3],        $ac3,         14            \n\t"
+    "subu           %[r6],        %[r6],        %[r7]         \n\t"
+    "addu           %[r8],        %[r8],        %[r9]         \n\t"
+    "mult           $ac0,         %[r5],        %[r6]         \n\t"
+    "mult           $ac1,         %[r5],        %[r8]         \n\t"
+    "addiu          %[sinptr],    %[sinptr],    4             \n\t"
+    "subu           %[r0],        %[r0],        %[r1]         \n\t"
+    "addu           %[r2],        %[r2],        %[r3]         \n\t"
+    "extr_r.w       %[r1],        $ac0,         11            \n\t"
+    "extr_r.w       %[r3],        $ac1,         11            \n\t"
+    "mult           $ac2,         %[r5],        %[r0]         \n\t"
+    "mult           $ac3,         %[r5],        %[r2]         \n\t"
+    "sw             %[r1],        0(%[outre1])                \n\t"
+    "sw             %[r3],        0(%[outre2])                \n\t"
+    "addiu          %[outre1],    %[outre1],    8             \n\t"
+    "extr_r.w       %[r0],        $ac2,         11            \n\t"
+    "extr_r.w       %[r2],        $ac3,         11            \n\t"
+    "sw             %[r0],        -4(%[outre1])               \n\t"
+    "sw             %[r2],        4(%[outre2])                \n\t"
+    "bgtz           %[k],         21b                         \n\t"
+    " addiu         %[outre2],    %[outre2],    8             \n\t"
+    "b              3f                                        \n\t"
+    " nop                                                     \n\t"
+#else  // #if defined(MIPS_DSP_R2_LE)
+    "lh             %[r0],        0(%[inre])                  \n\t"
+    "lh             %[r1],        0(%[inim])                  \n\t"
+    "addiu          %[k],         %[k],         -1            \n\t"
+    "sllv           %[r0],        %[r0],        %[max1]       \n\t"
+    "sllv           %[r1],        %[r1],        %[max1]       \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "sra            %[r0],        %[r0],        1             \n\t"
+    "sra            %[r3],        %[r1],        16            \n\t"
+    "andi           %[r1],        %[r1],        0xFFFF        \n\t"
+    "sra            %[r1],        %[r1],        1             \n\t"
+    "mul            %[r2],        %[r2],        %[r4]         \n\t"
+    "mul            %[r0],        %[r0],        %[r4]         \n\t"
+    "mul            %[r3],        %[r3],        %[r4]         \n\t"
+    "mul            %[r1],        %[r1],        %[r4]         \n\t"
+    "addiu          %[inre],      %[inre],      2             \n\t"
+    "addiu          %[inim],      %[inim],      2             \n\t"
+    "lh             %[r6],        0(%[cosptr])                \n\t"
+    "lh             %[r7],        0(%[sinptr])                \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r0],        %[r0],        15            \n\t"
+    "shra_r.w       %[r1],        %[r1],        15            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r0],        %[r0],        0x4000        \n\t"
+    "addiu          %[r1],        %[r1],        0x4000        \n\t"
+    "sra            %[r0],        %[r0],        15            \n\t"
+    "sra            %[r1],        %[r1],        15            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r2],        %[r0]         \n\t"
+    "addu           %[r1],        %[r3],        %[r1]         \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "mul            %[r9],        %[r2],        %[r6]         \n\t"
+    "mul            %[r2],        %[r2],        %[r7]         \n\t"
+    "mul            %[r8],        %[r0],        %[r6]         \n\t"
+    "mul            %[r0],        %[r0],        %[r7]         \n\t"
+    "sra            %[r3],        %[r1],        16            \n\t"
+    "andi           %[r1],        %[r1],        0xFFFF        \n\t"
+    "sll            %[r9],        %[r9],        2             \n\t"
+    "sll            %[r2],        %[r2],        2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r8],        %[r8],        14            \n\t"
+    "shra_r.w       %[r0],        %[r0],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r8],        %[r8],        0x2000        \n\t"
+    "addiu          %[r0],        %[r0],        0x2000        \n\t"
+    "sra            %[r8],        %[r8],        14            \n\t"
+    "sra            %[r0],        %[r0],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r9],        %[r9],        %[r8]         \n\t"
+    "addu           %[r2],        %[r2],        %[r0]         \n\t"
+    "mul            %[r0],        %[r3],        %[r6]         \n\t"
+    "mul            %[r3],        %[r3],        %[r7]         \n\t"
+    "mul            %[r8],        %[r1],        %[r6]         \n\t"
+    "mul            %[r1],        %[r1],        %[r7]         \n\t"
+    "addiu          %[cosptr],    %[cosptr],    2             \n\t"
+    "addiu          %[sinptr],    %[sinptr],    2             \n\t"
+    "sll            %[r0],        %[r0],        2             \n\t"
+    "sll            %[r3],        %[r3],        2             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r8],        %[r8],        14            \n\t"
+    "shra_r.w       %[r1],        %[r1],        14            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r8],        %[r8],        0x2000        \n\t"
+    "addiu          %[r1],        %[r1],        0x2000        \n\t"
+    "sra            %[r8],        %[r8],        14            \n\t"
+    "sra            %[r1],        %[r1],        14            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r8]         \n\t"
+    "addu           %[r3],        %[r3],        %[r1]         \n\t"
+    "subu           %[r9],        %[r9],        %[r3]         \n\t"
+    "addu           %[r0],        %[r0],        %[r2]         \n\t"
+    "sra            %[r1],        %[r9],        16            \n\t"
+    "andi           %[r9],        %[r9],        0xFFFF        \n\t"
+    "mul            %[r1],        %[r1],        %[r5]         \n\t"
+    "mul            %[r9],        %[r9],        %[r5]         \n\t"
+    "sra            %[r2],        %[r0],        16            \n\t"
+    "andi           %[r0],        %[r0],        0xFFFF        \n\t"
+    "mul            %[r2],        %[r2],        %[r5]         \n\t"
+    "mul            %[r0],        %[r0],        %[r5]         \n\t"
+    "sll            %[r1],        %[r1],        5             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r9],        %[r9],        11            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r9],        %[r9],        0x400         \n\t"
+    "sra            %[r9],        %[r9],        11            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r1],        %[r1],        %[r9]         \n\t"
+    "sll            %[r2],        %[r2],        5             \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shra_r.w       %[r0],        %[r0],        11            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "addiu          %[r0],        %[r0],        0x400         \n\t"
+    "sra            %[r0],        %[r0],        11            \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "addu           %[r0],        %[r0],        %[r2]         \n\t"
+    "sw             %[r1],        0(%[outre1])                \n\t"
+    "addiu          %[outre1],    %[outre1],    4             \n\t"
+    "sw             %[r0],        0(%[outre2])                \n\t"
+    "bgtz           %[k],         2b                          \n\t"
+    " addiu         %[outre2],    %[outre2],    4             \n\t"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+   "3:                                                        \n\t"
+    ".set           pop                                       \n\t"
+    : [k] "+r" (k), [r0] "=&r" (r0), [r1] "=&r" (r1),
+      [r2] "=&r" (r2), [r3] "=&r" (r3), [r4] "=&r" (r4),
+      [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+      [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1),
+      [inre] "=&r" (inre), [inim] "=&r" (inim),
+      [outre1] "=&r" (outre1), [outre2] "=&r" (outre2)
+    : [max] "r" (max), [inreQ7] "r" (inreQ7),
+      [inimQ7] "r" (inimQ7), [cosptr] "r" (cosptr),
+      [sinptr] "r" (sinptr), [outre1Q16] "r" (outre1Q16),
+      [outre2Q16] "r" (outre2Q16)
+    : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+    , "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+#endif  // #if defined(MIPS_DSP_R2_LE)
+  );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_neon.c b/modules/audio_coding/codecs/isac/fix/source/transform_neon.c
new file mode 100644
index 0000000..79dadc4
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_neon.c
@@ -0,0 +1,479 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// Tables are defined in transform_tables.c file.
+// Cosine table 1 in Q14.
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+// Sine table 1 in Q14.
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+// Sine table 2 in Q14.
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+static inline int32_t ComplexMulAndFindMaxNeon(int16_t* inre1Q9,
+                                               int16_t* inre2Q9,
+                                               int32_t* outreQ16,
+                                               int32_t* outimQ16) {
+  int k;
+  const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0];
+  const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0];
+  // 0.5 / sqrt(240) in Q19 is round((.5 / sqrt(240)) * (2^19)) = 16921.
+  // Use "16921 << 5" and vqdmulh, instead of ">> 26" as in the C code.
+  int32_t fact  = 16921 << 5;
+  int32x4_t factq = vdupq_n_s32(fact);
+  uint32x4_t max_r = vdupq_n_u32(0);
+  uint32x4_t max_i = vdupq_n_u32(0);
+
+  for (k = 0; k < FRAMESAMPLES/2; k += 8) {
+    int16x8_t tmpr = vld1q_s16(kCosTab);
+    int16x8_t tmpi = vld1q_s16(kSinTab);
+    int16x8_t inre1 = vld1q_s16(inre1Q9);
+    int16x8_t inre2 = vld1q_s16(inre2Q9);
+    kCosTab += 8;
+    kSinTab += 8;
+    inre1Q9 += 8;
+    inre2Q9 += 8;
+
+    // Use ">> 26", instead of ">> 7", ">> 16" and then ">> 3" as in the C code.
+    int32x4_t tmp0 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre1));
+    int32x4_t tmp1 = vmull_s16(vget_low_s16(tmpr), vget_low_s16(inre2));
+    tmp0 = vmlal_s16(tmp0, vget_low_s16(tmpi), vget_low_s16(inre2));
+    tmp1 = vmlsl_s16(tmp1, vget_low_s16(tmpi), vget_low_s16(inre1));
+#if defined(WEBRTC_ARCH_ARM64)
+    int32x4_t tmp2 = vmull_high_s16(tmpr, inre1);
+    int32x4_t tmp3 = vmull_high_s16(tmpr, inre2);
+    tmp2 = vmlal_high_s16(tmp2, tmpi, inre2);
+    tmp3 = vmlsl_high_s16(tmp3, tmpi, inre1);
+#else
+    int32x4_t tmp2 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre1));
+    int32x4_t tmp3 = vmull_s16(vget_high_s16(tmpr), vget_high_s16(inre2));
+    tmp2 = vmlal_s16(tmp2, vget_high_s16(tmpi), vget_high_s16(inre2));
+    tmp3 = vmlsl_s16(tmp3, vget_high_s16(tmpi), vget_high_s16(inre1));
+#endif
+
+    int32x4_t outr_0 = vqdmulhq_s32(tmp0, factq);
+    int32x4_t outr_1 = vqdmulhq_s32(tmp2, factq);
+    int32x4_t outi_0 = vqdmulhq_s32(tmp1, factq);
+    int32x4_t outi_1 = vqdmulhq_s32(tmp3, factq);
+    vst1q_s32(outreQ16, outr_0);
+    outreQ16 += 4;
+    vst1q_s32(outreQ16, outr_1);
+    outreQ16 += 4;
+    vst1q_s32(outimQ16, outi_0);
+    outimQ16 += 4;
+    vst1q_s32(outimQ16, outi_1);
+    outimQ16 += 4;
+
+    // Find the absolute maximum in the vectors.
+    tmp0 = vabsq_s32(outr_0);
+    tmp1 = vabsq_s32(outr_1);
+    tmp2 = vabsq_s32(outi_0);
+    tmp3 = vabsq_s32(outi_1);
+    // vabs doesn't change the value of 0x80000000.
+    // Use u32 so we don't lose the value 0x80000000.
+    max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp0));
+    max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp2));
+    max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp1));
+    max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp3));
+  }
+
+  max_r = vmaxq_u32(max_r, max_i);
+#if defined(WEBRTC_ARCH_ARM64)
+  uint32_t maximum = vmaxvq_u32(max_r);
+#else
+  uint32x2_t max32x2_r = vmax_u32(vget_low_u32(max_r), vget_high_u32(max_r));
+  max32x2_r = vpmax_u32(max32x2_r, max32x2_r);
+  uint32_t maximum = vget_lane_u32(max32x2_r, 0);
+#endif
+
+  return (int32_t)maximum;
+}
+
+static inline void PreShiftW32toW16Neon(int32_t* inre,
+                                        int32_t* inim,
+                                        int16_t* outre,
+                                        int16_t* outim,
+                                        int32_t sh) {
+  int k;
+  int32x4_t sh32x4 = vdupq_n_s32(sh);
+  for (k = 0; k < FRAMESAMPLES/2; k += 16) {
+    int32x4x4_t inre32x4x4 = vld4q_s32(inre);
+    int32x4x4_t inim32x4x4 = vld4q_s32(inim);
+    inre += 16;
+    inim += 16;
+    inre32x4x4.val[0] = vrshlq_s32(inre32x4x4.val[0], sh32x4);
+    inre32x4x4.val[1] = vrshlq_s32(inre32x4x4.val[1], sh32x4);
+    inre32x4x4.val[2] = vrshlq_s32(inre32x4x4.val[2], sh32x4);
+    inre32x4x4.val[3] = vrshlq_s32(inre32x4x4.val[3], sh32x4);
+    inim32x4x4.val[0] = vrshlq_s32(inim32x4x4.val[0], sh32x4);
+    inim32x4x4.val[1] = vrshlq_s32(inim32x4x4.val[1], sh32x4);
+    inim32x4x4.val[2] = vrshlq_s32(inim32x4x4.val[2], sh32x4);
+    inim32x4x4.val[3] = vrshlq_s32(inim32x4x4.val[3], sh32x4);
+    int16x4x4_t outre16x4x4;
+    int16x4x4_t outim16x4x4;
+    outre16x4x4.val[0]  = vmovn_s32(inre32x4x4.val[0]);
+    outre16x4x4.val[1]  = vmovn_s32(inre32x4x4.val[1]);
+    outre16x4x4.val[2]  = vmovn_s32(inre32x4x4.val[2]);
+    outre16x4x4.val[3]  = vmovn_s32(inre32x4x4.val[3]);
+    outim16x4x4.val[0]  = vmovn_s32(inim32x4x4.val[0]);
+    outim16x4x4.val[1]  = vmovn_s32(inim32x4x4.val[1]);
+    outim16x4x4.val[2]  = vmovn_s32(inim32x4x4.val[2]);
+    outim16x4x4.val[3]  = vmovn_s32(inim32x4x4.val[3]);
+    vst4_s16(outre, outre16x4x4);
+    vst4_s16(outim, outim16x4x4);
+    outre += 16;
+    outim += 16;
+  }
+}
+
+static inline void PostShiftAndSeparateNeon(int16_t* inre,
+                                            int16_t* inim,
+                                            int16_t* outre,
+                                            int16_t* outim,
+                                            int32_t sh) {
+  int k;
+  int16_t* inre1 = inre;
+  int16_t* inre2 = &inre[FRAMESAMPLES/2 - 4];
+  int16_t* inim1 = inim;
+  int16_t* inim2 = &inim[FRAMESAMPLES/2 - 4];
+  int16_t* outre1 = outre;
+  int16_t* outre2 = &outre[FRAMESAMPLES/2 - 4];
+  int16_t* outim1 = outim;
+  int16_t* outim2 = &outim[FRAMESAMPLES/2 - 4];
+  const int16_t* kSinTab1 = &WebRtcIsacfix_kSinTab2[0];
+  const int16_t* kSinTab2 = &WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 -4];
+  // By vshl, we effectively did "<< (-sh - 23)", instead of "<< (-sh)",
+  // ">> 14" and then ">> 9" as in the C code.
+  int32x4_t shift = vdupq_n_s32(-sh - 23);
+
+  for (k = 0; k < FRAMESAMPLES/4; k += 4) {
+    int16x4_t tmpi = vld1_s16(kSinTab1);
+    kSinTab1 += 4;
+    int16x4_t tmpr = vld1_s16(kSinTab2);
+    kSinTab2 -= 4;
+    int16x4_t inre_0 = vld1_s16(inre1);
+    inre1 += 4;
+    int16x4_t inre_1 = vld1_s16(inre2);
+    inre2 -= 4;
+    int16x4_t inim_0 = vld1_s16(inim1);
+    inim1 += 4;
+    int16x4_t inim_1 = vld1_s16(inim2);
+    inim2 -= 4;
+    tmpr = vneg_s16(tmpr);
+    inre_1 = vrev64_s16(inre_1);
+    inim_1 = vrev64_s16(inim_1);
+    tmpr = vrev64_s16(tmpr);
+
+    int16x4_t xr = vqadd_s16(inre_0, inre_1);
+    int16x4_t xi = vqsub_s16(inim_0, inim_1);
+    int16x4_t yr = vqadd_s16(inim_0, inim_1);
+    int16x4_t yi = vqsub_s16(inre_1, inre_0);
+
+    int32x4_t outr0 = vmull_s16(tmpr, xr);
+    int32x4_t outi0 = vmull_s16(tmpi, xr);
+    int32x4_t outr1 = vmull_s16(tmpi, yr);
+    int32x4_t outi1 = vmull_s16(tmpi, yi);
+    outr0 = vmlsl_s16(outr0, tmpi, xi);
+    outi0 = vmlal_s16(outi0, tmpr, xi);
+    outr1 = vmlal_s16(outr1, tmpr, yi);
+    outi1 = vmlsl_s16(outi1, tmpr, yr);
+
+    outr0 = vshlq_s32(outr0, shift);
+    outi0 = vshlq_s32(outi0, shift);
+    outr1 = vshlq_s32(outr1, shift);
+    outi1 = vshlq_s32(outi1, shift);
+    outr1 = vnegq_s32(outr1);
+
+    int16x4_t outre_0  = vmovn_s32(outr0);
+    int16x4_t outim_0  = vmovn_s32(outi0);
+    int16x4_t outre_1  = vmovn_s32(outr1);
+    int16x4_t outim_1  = vmovn_s32(outi1);
+    outre_1 = vrev64_s16(outre_1);
+    outim_1 = vrev64_s16(outim_1);
+
+    vst1_s16(outre1, outre_0);
+    outre1 += 4;
+    vst1_s16(outim1, outim_0);
+    outim1 += 4;
+    vst1_s16(outre2, outre_1);
+    outre2 -= 4;
+    vst1_s16(outim2, outim_1);
+    outim2 -= 4;
+  }
+}
+
+void WebRtcIsacfix_Time2SpecNeon(int16_t* inre1Q9,
+                                 int16_t* inre2Q9,
+                                 int16_t* outreQ7,
+                                 int16_t* outimQ7) {
+  int32_t tmpreQ16[FRAMESAMPLES/2], tmpimQ16[FRAMESAMPLES/2];
+  int32_t max;
+  int32_t sh;
+
+  // Multiply with complex exponentials and combine into one complex vector.
+  // And find the maximum.
+  max = ComplexMulAndFindMaxNeon(inre1Q9, inre2Q9, tmpreQ16, tmpimQ16);
+
+  sh = (int32_t)WebRtcSpl_NormW32(max);
+  sh = sh - 24;
+
+  // If sh becomes >= 0, then we should shift sh steps to the left,
+  // and the domain will become Q(16 + sh).
+  // If sh becomes < 0, then we should shift -sh steps to the right,
+  // and the domain will become Q(16 + sh).
+  PreShiftW32toW16Neon(tmpreQ16, tmpimQ16, inre1Q9, inre2Q9, sh);
+
+  // Get DFT.
+  WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1);
+
+  // If sh >= 0, shift sh steps to the right,
+  // If sh < 0, shift -sh steps to the left.
+  // Use symmetry to separate into two complex vectors
+  // and center frames in time around zero.
+  PostShiftAndSeparateNeon(inre1Q9, inre2Q9, outreQ7, outimQ7, sh);
+}
+
+static inline int32_t TransformAndFindMaxNeon(int16_t* inre,
+                                              int16_t* inim,
+                                              int32_t* outre,
+                                              int32_t* outim) {
+  int k;
+  int16_t* inre1 = inre;
+  int16_t* inre2 = &inre[FRAMESAMPLES/2 - 4];
+  int16_t* inim1 = inim;
+  int16_t* inim2 = &inim[FRAMESAMPLES/2 - 4];
+  int32_t* outre1 = outre;
+  int32_t* outre2 = &outre[FRAMESAMPLES/2 - 4];
+  int32_t* outim1 = outim;
+  int32_t* outim2 = &outim[FRAMESAMPLES/2 - 4];
+  const int16_t* kSinTab1 = &WebRtcIsacfix_kSinTab2[0];
+  const int16_t* kSinTab2 = &WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4 - 4];
+  uint32x4_t max_r = vdupq_n_u32(0);
+  uint32x4_t max_i = vdupq_n_u32(0);
+
+  // Use ">> 5", instead of "<< 9" and then ">> 14" as in the C code.
+  for (k = 0; k < FRAMESAMPLES/4; k += 4) {
+    int16x4_t tmpi = vld1_s16(kSinTab1);
+    kSinTab1 += 4;
+    int16x4_t tmpr = vld1_s16(kSinTab2);
+    kSinTab2 -= 4;
+    int16x4_t inre_0 = vld1_s16(inre1);
+    inre1 += 4;
+    int16x4_t inre_1 = vld1_s16(inre2);
+    inre2 -= 4;
+    int16x4_t inim_0 = vld1_s16(inim1);
+    inim1 += 4;
+    int16x4_t inim_1 = vld1_s16(inim2);
+    inim2 -= 4;
+    tmpr = vneg_s16(tmpr);
+    inre_1 = vrev64_s16(inre_1);
+    inim_1 = vrev64_s16(inim_1);
+    tmpr = vrev64_s16(tmpr);
+
+    int32x4_t xr = vmull_s16(tmpr, inre_0);
+    int32x4_t xi = vmull_s16(tmpr, inim_0);
+    int32x4_t yr = vmull_s16(tmpr, inim_1);
+    int32x4_t yi = vmull_s16(tmpi, inim_1);
+    xr = vmlal_s16(xr, tmpi, inim_0);
+    xi = vmlsl_s16(xi, tmpi, inre_0);
+    yr = vmlal_s16(yr, tmpi, inre_1);
+    yi = vmlsl_s16(yi, tmpr, inre_1);
+    yr = vnegq_s32(yr);
+
+    xr = vshrq_n_s32(xr, 5);
+    xi = vshrq_n_s32(xi, 5);
+    yr = vshrq_n_s32(yr, 5);
+    yi = vshrq_n_s32(yi, 5);
+
+    int32x4_t outr0 = vsubq_s32(xr, yi);
+    int32x4_t outr1 = vaddq_s32(xr, yi);
+    int32x4_t outi0 = vaddq_s32(xi, yr);
+    int32x4_t outi1 = vsubq_s32(yr, xi);
+
+    // Find the absolute maximum in the vectors.
+    int32x4_t tmp0 = vabsq_s32(outr0);
+    int32x4_t tmp1 = vabsq_s32(outr1);
+    int32x4_t tmp2 = vabsq_s32(outi0);
+    int32x4_t tmp3 = vabsq_s32(outi1);
+    // vabs doesn't change the value of 0x80000000.
+    // Use u32 so we don't lose the value 0x80000000.
+    max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp0));
+    max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp2));
+    max_r = vmaxq_u32(max_r, vreinterpretq_u32_s32(tmp1));
+    max_i = vmaxq_u32(max_i, vreinterpretq_u32_s32(tmp3));
+
+    // Store the vectors.
+    outr1 = vrev64q_s32(outr1);
+    outi1 = vrev64q_s32(outi1);
+    int32x4_t outr_1 = vcombine_s32(vget_high_s32(outr1), vget_low_s32(outr1));
+    int32x4_t outi_1 = vcombine_s32(vget_high_s32(outi1), vget_low_s32(outi1));
+
+    vst1q_s32(outre1, outr0);
+    outre1 += 4;
+    vst1q_s32(outim1, outi0);
+    outim1 += 4;
+    vst1q_s32(outre2, outr_1);
+    outre2 -= 4;
+    vst1q_s32(outim2, outi_1);
+    outim2 -= 4;
+  }
+
+  max_r = vmaxq_u32(max_r, max_i);
+#if defined(WEBRTC_ARCH_ARM64)
+  uint32_t maximum = vmaxvq_u32(max_r);
+#else
+  uint32x2_t max32x2_r = vmax_u32(vget_low_u32(max_r), vget_high_u32(max_r));
+  max32x2_r = vpmax_u32(max32x2_r, max32x2_r);
+  uint32_t maximum = vget_lane_u32(max32x2_r, 0);
+#endif
+
+  return (int32_t)maximum;
+}
+
+static inline void PostShiftAndDivideAndDemodulateNeon(int16_t* inre,
+                                                       int16_t* inim,
+                                                       int32_t* outre1,
+                                                       int32_t* outre2,
+                                                       int32_t sh) {
+  int k;
+  int16_t* p_inre = inre;
+  int16_t* p_inim = inim;
+  int32_t* p_outre1 = outre1;
+  int32_t* p_outre2 = outre2;
+  const int16_t* kCosTab = &WebRtcIsacfix_kCosTab1[0];
+  const int16_t* kSinTab = &WebRtcIsacfix_kSinTab1[0];
+  int32x4_t shift = vdupq_n_s32(-sh - 16);
+  // Divide through by the normalizing constant:
+  // scale all values with 1/240, i.e. with 273 in Q16.
+  // 273/65536 ~= 0.0041656
+  // 1/240 ~= 0.0041666
+  int16x8_t scale = vdupq_n_s16(273);
+  // Sqrt(240) in Q11 is round(15.49193338482967 * 2048) = 31727.
+  int factQ19 = 31727 << 16;
+  int32x4_t fact = vdupq_n_s32(factQ19);
+
+  for (k = 0; k < FRAMESAMPLES/2; k += 8) {
+    int16x8_t inre16x8 = vld1q_s16(p_inre);
+    int16x8_t inim16x8 = vld1q_s16(p_inim);
+    p_inre += 8;
+    p_inim += 8;
+    int16x8_t tmpr = vld1q_s16(kCosTab);
+    int16x8_t tmpi = vld1q_s16(kSinTab);
+    kCosTab += 8;
+    kSinTab += 8;
+    // By vshl and vmull, we effectively did "<< (-sh - 16)",
+    // instead of "<< (-sh)" and ">> 16" as in the C code.
+    int32x4_t outre1_0 = vmull_s16(vget_low_s16(inre16x8), vget_low_s16(scale));
+    int32x4_t outre2_0 = vmull_s16(vget_low_s16(inim16x8), vget_low_s16(scale));
+#if defined(WEBRTC_ARCH_ARM64)
+    int32x4_t outre1_1 = vmull_high_s16(inre16x8, scale);
+    int32x4_t outre2_1 = vmull_high_s16(inim16x8, scale);
+#else
+    int32x4_t outre1_1 = vmull_s16(vget_high_s16(inre16x8),
+                                   vget_high_s16(scale));
+    int32x4_t outre2_1 = vmull_s16(vget_high_s16(inim16x8),
+                                   vget_high_s16(scale));
+#endif
+
+    outre1_0 = vshlq_s32(outre1_0, shift);
+    outre1_1 = vshlq_s32(outre1_1, shift);
+    outre2_0 = vshlq_s32(outre2_0, shift);
+    outre2_1 = vshlq_s32(outre2_1, shift);
+
+    // Demodulate and separate.
+    int32x4_t tmpr_0 = vmovl_s16(vget_low_s16(tmpr));
+    int32x4_t tmpi_0 = vmovl_s16(vget_low_s16(tmpi));
+#if defined(WEBRTC_ARCH_ARM64)
+    int32x4_t tmpr_1 = vmovl_high_s16(tmpr);
+    int32x4_t tmpi_1 = vmovl_high_s16(tmpi);
+#else
+    int32x4_t tmpr_1 = vmovl_s16(vget_high_s16(tmpr));
+    int32x4_t tmpi_1 = vmovl_s16(vget_high_s16(tmpi));
+#endif
+
+    int64x2_t xr0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre1_0));
+    int64x2_t xi0 = vmull_s32(vget_low_s32(tmpr_0), vget_low_s32(outre2_0));
+    int64x2_t xr2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre1_1));
+    int64x2_t xi2 = vmull_s32(vget_low_s32(tmpr_1), vget_low_s32(outre2_1));
+    xr0 = vmlsl_s32(xr0, vget_low_s32(tmpi_0), vget_low_s32(outre2_0));
+    xi0 = vmlal_s32(xi0, vget_low_s32(tmpi_0), vget_low_s32(outre1_0));
+    xr2 = vmlsl_s32(xr2, vget_low_s32(tmpi_1), vget_low_s32(outre2_1));
+    xi2 = vmlal_s32(xi2, vget_low_s32(tmpi_1), vget_low_s32(outre1_1));
+
+#if defined(WEBRTC_ARCH_ARM64)
+    int64x2_t xr1 = vmull_high_s32(tmpr_0, outre1_0);
+    int64x2_t xi1 = vmull_high_s32(tmpr_0, outre2_0);
+    int64x2_t xr3 = vmull_high_s32(tmpr_1, outre1_1);
+    int64x2_t xi3 = vmull_high_s32(tmpr_1, outre2_1);
+    xr1 = vmlsl_high_s32(xr1, tmpi_0, outre2_0);
+    xi1 = vmlal_high_s32(xi1, tmpi_0, outre1_0);
+    xr3 = vmlsl_high_s32(xr3, tmpi_1, outre2_1);
+    xi3 = vmlal_high_s32(xi3, tmpi_1, outre1_1);
+#else
+    int64x2_t xr1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre1_0));
+    int64x2_t xi1 = vmull_s32(vget_high_s32(tmpr_0), vget_high_s32(outre2_0));
+    int64x2_t xr3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre1_1));
+    int64x2_t xi3 = vmull_s32(vget_high_s32(tmpr_1), vget_high_s32(outre2_1));
+    xr1 = vmlsl_s32(xr1, vget_high_s32(tmpi_0), vget_high_s32(outre2_0));
+    xi1 = vmlal_s32(xi1, vget_high_s32(tmpi_0), vget_high_s32(outre1_0));
+    xr3 = vmlsl_s32(xr3, vget_high_s32(tmpi_1), vget_high_s32(outre2_1));
+    xi3 = vmlal_s32(xi3, vget_high_s32(tmpi_1), vget_high_s32(outre1_1));
+#endif
+
+    outre1_0 = vcombine_s32(vrshrn_n_s64(xr0, 10), vrshrn_n_s64(xr1, 10));
+    outre2_0 = vcombine_s32(vrshrn_n_s64(xi0, 10), vrshrn_n_s64(xi1, 10));
+    outre1_1 = vcombine_s32(vrshrn_n_s64(xr2, 10), vrshrn_n_s64(xr3, 10));
+    outre2_1 = vcombine_s32(vrshrn_n_s64(xi2, 10), vrshrn_n_s64(xi3, 10));
+    outre1_0 = vqdmulhq_s32(outre1_0, fact);
+    outre2_0 = vqdmulhq_s32(outre2_0, fact);
+    outre1_1 = vqdmulhq_s32(outre1_1, fact);
+    outre2_1 = vqdmulhq_s32(outre2_1, fact);
+
+    vst1q_s32(p_outre1, outre1_0);
+    p_outre1 += 4;
+    vst1q_s32(p_outre1, outre1_1);
+    p_outre1 += 4;
+    vst1q_s32(p_outre2, outre2_0);
+    p_outre2 += 4;
+    vst1q_s32(p_outre2, outre2_1);
+    p_outre2 += 4;
+  }
+}
+
+void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
+                                 int16_t* inimQ7,
+                                 int32_t* outre1Q16,
+                                 int32_t* outre2Q16) {
+  int32_t max;
+  int32_t sh;
+
+  max = TransformAndFindMaxNeon(inreQ7, inimQ7, outre1Q16, outre2Q16);
+
+
+  sh = (int32_t)WebRtcSpl_NormW32(max);
+  sh = sh - 24;
+  // If sh becomes >= 0, then we should shift sh steps to the left,
+  // and the domain will become Q(16 + sh).
+  // If sh becomes < 0, then we should shift -sh steps to the right,
+  // and the domain will become Q(16 + sh).
+
+  // "Fastest" vectors.
+  PreShiftW32toW16Neon(outre1Q16, outre2Q16, inreQ7, inimQ7, sh);
+
+  // Get IDFT.
+  WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1);
+
+  PostShiftAndDivideAndDemodulateNeon(inreQ7, inimQ7, outre1Q16, outre2Q16, sh);
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_tables.c b/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
new file mode 100644
index 0000000..eed88e4
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains trigonometric functions look-up tables used in
+ * transform functions WebRtcIsacfix_Time2Spec and WebRtcIsacfix_Spec2Time.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* Cosine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
+  16384,  16383,  16378,  16371,  16362,  16349,  16333,  16315,  16294,  16270,
+  16244,  16214,  16182,  16147,  16110,  16069,  16026,  15980,  15931,  15880,
+  15826,  15769,  15709,  15647,  15582,  15515,  15444,  15371,  15296,  15218,
+  15137,  15053,  14968,  14879,  14788,  14694,  14598,  14500,  14399,  14295,
+  14189,  14081,  13970,  13856,  13741,  13623,  13502,  13380,  13255,  13128,
+  12998,  12867,  12733,  12597,  12458,  12318,  12176,  12031,  11885,  11736,
+  11585,  11433,  11278,  11121,  10963,  10803,  10641,  10477,  10311,  10143,
+  9974,   9803,   9630,   9456,   9280,   9102,   8923,   8743,   8561,   8377,
+  8192,   8006,   7818,   7629,   7438,   7246,   7053,   6859,   6664,   6467,
+  6270,   6071,   5872,   5671,   5469,   5266,   5063,   4859,   4653,   4447,
+  4240,   4033,   3825,   3616,   3406,   3196,   2986,   2775,   2563,   2351,
+  2139,   1926,   1713,   1499,   1285,   1072,    857,    643,    429,    214,
+  0,   -214,   -429,   -643,   -857,  -1072,  -1285,  -1499,  -1713,  -1926,
+  -2139,  -2351,  -2563,  -2775,  -2986,  -3196,  -3406,  -3616,  -3825,  -4033,
+  -4240,  -4447,  -4653,  -4859,  -5063,  -5266,  -5469,  -5671,  -5872,  -6071,
+  -6270,  -6467,  -6664,  -6859,  -7053,  -7246,  -7438,  -7629,  -7818,  -8006,
+  -8192,  -8377,  -8561,  -8743,  -8923,  -9102,  -9280,  -9456,  -9630,  -9803,
+  -9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
+  -11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
+  -12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
+  -13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
+  -14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
+  -15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
+  -16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
+  -16333, -16349, -16362, -16371, -16378, -16383
+};
+
+/* Sine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
+  0,   214,   429,   643,   857,  1072,  1285,  1499,  1713,  1926,
+  2139,  2351,  2563,  2775,  2986,  3196,  3406,  3616,  3825,  4033,
+  4240,  4447,  4653,  4859,  5063,  5266,  5469,  5671,  5872,  6071,
+  6270,  6467,  6664,  6859,  7053,  7246,  7438,  7629,  7818,  8006,
+  8192,  8377,  8561,  8743,  8923,  9102,  9280,  9456,  9630,  9803,
+  9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
+  11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
+  12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
+  14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
+  15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
+  15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
+  16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
+  16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+  16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+  15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+  15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+  14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+  12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+  11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+  9974,  9803,  9630,  9456,  9280,  9102,  8923,  8743,  8561,  8377,
+  8192,  8006,  7818,  7629,  7438,  7246,  7053,  6859,  6664,  6467,
+  6270,  6071,  5872,  5671,  5469,  5266,  5063,  4859,  4653,  4447,
+  4240,  4033,  3825,  3616,  3406,  3196,  2986,  2775,  2563,  2351,
+  2139,  1926,  1713,  1499,  1285,  1072,   857,   643,   429,   214
+};
+
+
+/* Sine table 2 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
+  16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
+  16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
+  15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
+  15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
+  14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
+  12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
+  11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
+  9889,  -9717,  9543,  -9368,  9191,  -9013,  8833,  -8652,  8469,  -8285,
+  8099,  -7912,  7723,  -7534,  7342,  -7150,  6957,  -6762,  6566,  -6369,
+  6171,  -5971,  5771,  -5570,  5368,  -5165,  4961,  -4756,  4550,  -4344,
+  4137,  -3929,  3720,  -3511,  3301,  -3091,  2880,  -2669,  2457,  -2245,
+  2032,  -1819,  1606,  -1392,  1179,   -965,   750,   -536,   322,   -107
+};
+
+#if defined(MIPS32_LE)
+/* Cosine table 2 in Q14. Used only on MIPS platforms. */
+const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4] = {
+  107,   -322,   536,   -750,   965,  -1179,  1392,  -1606,  1819,  -2032,
+  2245,  -2457,  2669,  -2880,  3091,  -3301,  3511,  -3720,  3929,  -4137,
+  4344,  -4550,  4756,  -4961,  5165,  -5368,  5570,  -5771,  5971,  -6171,
+  6369,  -6566,  6762,  -6957,  7150,  -7342,  7534,  -7723,  7912,  -8099,
+  8285,  -8469,  8652,  -8833,  9013,  -9191,  9368,  -9543,  9717,  -9889,
+  10059, -10227, 10394, -10559, 10722, -10883, 11042, -11200, 11356, -11509,
+  11661, -11810, 11958, -12104, 12247, -12389, 12528, -12665, 12800, -12933,
+  13063, -13192, 13318, -13441, 13563, -13682, 13799, -13913, 14025, -14135,
+  14242, -14347, 14449, -14549, 14647, -14741, 14834, -14924, 15011, -15095,
+  15178, -15257, 15334, -15408, 15480, -15549, 15615, -15679, 15739, -15798,
+  15853, -15906, 15956, -16003, 16048, -16090, 16129, -16165, 16199, -16229,
+  16257, -16283, 16305, -16325, 16342, -16356, 16367, -16375, 16381, -16384
+};
+#endif
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc b/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc
new file mode 100644
index 0000000..347b049
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_unittest.cc
@@ -0,0 +1,193 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+static const int kSamples = FRAMESAMPLES/2;
+static const int32_t spec2time_out_expected_1[kSamples] = {
+  -3366470, -2285227,
+  -3415765, -2310215, -3118030, -2222470, -3030254, -2192091, -3423170,
+  -2216041, -3305541, -2171936, -3195767, -2095779, -3153304, -2157560,
+  -3071167, -2032108, -3101190, -1972016, -3103824, -2089118, -3139811,
+  -1898337, -3102801, -2055082, -3029665, -1854140, -2962586, -1966454,
+  -3071167, -1894588, -2851743, -1917315, -2848087, -1594932, -2799242,
+  -1462184, -2845887, -1437599, -2691776, -1329637, -2770659, -1268491,
+  -2625161, -1578991, -2460299, -1186385, -2365613, -1039354, -2322608,
+  -958518, -2271749, -789860, -2254538, -850308, -2384436, -850959, -2133734,
+  -587678, -2093316, -495115, -1973364, -475177, -1801282, -173507,
+  -1848516, -158015, -1792018, -62648, -1643313, 214746, -1500758, 267077,
+  -1450193, 560521, -1521579, 675283, -1345408, 857559, -1300822, 1116332,
+  -1294533, 1241117, -1070027, 1263503, -983816, 1529821, -1019586,
+  1910421, -955420, 2073688, -836459, 2401105, -653905, 2690474, -731425,
+  2930131, -935234, 3299500, -875978, 3523432, -878906, 3924822, -1081630,
+  4561267, -1203023, 5105274, -1510983, 6052762, -2294646, 7021597,
+  -3108053, 8826736, -4935222, 11678789, -8442713, 18725700, -21526692,
+  25420577, 19589811, -28108666, 12634054, -14483066, 6263217, -9979706,
+  3665661, -7909736, 2531530, -6434896, 1700772, -5525393, 1479473,
+  -4894262, 1231760, -4353044, 1032940, -3786590, 941152, -3331614,
+  665090, -2851619, 830696, -2762201, 958007, -2483118, 788233, -2184965,
+  804825, -1967306, 1007255, -1862474, 920889, -1457506, 755406, -1405841,
+  890230, -1302124, 1161599, -701867, 1154163, -1083366, 1204743, -513581,
+  1547264, -650636, 1493384, -285543, 1771863, -277906, 1841343, -9078,
+  1751863, 230222, 1819578, 207170, 1978972, 398137, 2106468, 552155,
+  1997624, 685213, 2129520, 601078, 2238736, 944591, 2441879, 1194178,
+  2355280, 986124, 2393328, 1049005, 2417944, 1208368, 2489516, 1352023,
+  2572118, 1445283, 2856081, 1532997, 2742279, 1615877, 2915274, 1808036,
+  2856871, 1806936, 3241747, 1622461, 2978558, 1841297, 3010378, 1923666,
+  3271367, 2126700, 3070935, 1956958, 3107588, 2128405, 3288872, 2114911,
+  3315952, 2406651, 3344038, 2370199, 3368980, 2144361, 3305030, 2183803,
+  3401450, 2523102, 3405463, 2452475, 3463355, 2421678, 3551968, 2431949,
+  3477251, 2148125, 3244489, 2174090};
+static const int32_t spec2time_out_expected_2[kSamples] = {
+  1691694, -2499988, -2035547,
+  1060469, 988634, -2044502, -306271, 2041000, 201454, -2289456, 93694,
+  2129427, -369152, -1887834, 860796, 2089102, -929424, -1673956, 1395291,
+  1785651, -1619673, -1380109, 1963449, 1093311, -2111007, -840456,
+  2372786, 578119, -2242702, 89774, 2463304, -132717, -2121480, 643634,
+  2277636, -1125999, -1995858, 1543748, 2227861, -1483779, -1495491,
+  2102642, 1833876, -1920568, -958378, 2485101, 772261, -2454257, -24942,
+  2918714, 136838, -2500453, 816118, 3039735, -746560, -2365815, 1586396,
+  2714951, -1511696, -1942334, 2571792, 2182827, -2325335, -1311543,
+  3055970, 1367220, -2737182, -110626, 3889222, 631008, -3280879, 853066,
+  4122279, -706638, -3334449, 2148311, 3993512, -1846301, -3004894,
+  3426779, 3329522, -3165264, -2242423, 4756866, 2557711, -4131280,
+  -805259, 5702711, 1120592, -4852821, 743664, 6476444, -621186, -5465828,
+  2815787, 6768835, -3017442, -5338409, 5658126, 6838454, -5492288,
+  -4682382, 8874947, 6153814, -8832561, -2649251, 12817398, 4237692,
+  -13000247, 1190661, 18986363, -115738, -19693978, 9908367, 30660381,
+  -10632635, -37962068, 47022884, 89744622, -42087632, 40279224,
+  -88869341, -47542383, 38572364, 10441576, -30339718, -9926740, 19896578,
+  28009, -18886612, -1124047, 13232498, -4150304, -12770551, 2637074,
+  9051831, -6162211, -8713972, 4557937, 5489716, -6862312, -5532349,
+  5415449, 2791310, -6999367, -2790102, 5375806, 546222, -6486452,
+  -821261, 4994973, -1278840, -5645501, 1060484, 3996285, -2503954,
+  -4653629, 2220549, 3036977, -3282133, -3318585, 2780636, 1789880,
+  -4004589, -2041031, 3105373, 574819, -3992722, -971004, 3001703,
+  -676739, -3841508, 417284, 2897970, -1427018, -3058480, 1189948,
+  2210960, -2268992, -2603272, 1949785, 1576172, -2720404, -1891738,
+  2309456, 769178, -2975646, -707150, 2424652, -88039, -2966660, -65452,
+  2320780, -957557, -2798978, 744640, 1879794, -1672081, -2365319,
+  1253309, 1366383, -2204082, -1544367, 1801452, 613828, -2531994,
+  -983847, 2064842, 118326, -2613790, -203220, 2219635, -730341, -2641861,
+  563557, 1765434, -1329916, -2272927, 1037138, 1266725, -1939220,
+  -1588643, 1754528, 816552, -2376303, -1099167, 1864999, 122477,
+  -2422762, -400027, 1889228, -579916, -2490353, 287139, 2011318,
+  -1176657, -2502978, 812896, 1116502, -1940211};
+static const int16_t time2spec_out_expected_1[kSamples] = {
+  20342, 23889, -10063, -9419,
+  3242, 7280, -2012, -5029, 332, 4478, -97, -3244, -891, 3117, 773, -2204,
+  -1335, 2009, 1236, -1469, -1562, 1277, 1366, -815, -1619, 599, 1449, -177,
+  -1507, 116, 1294, 263, -1338, -244, 1059, 553, -1045, -549, 829, 826,
+  -731, -755, 516, 909, -427, -853, 189, 1004, -184, -828, -108, 888, 72,
+  -700, -280, 717, 342, -611, -534, 601, 534, -374, -646, 399, 567, -171,
+  -720, 234, 645, -11, -712, -26, 593, 215, -643, -172, 536, 361, -527,
+  -403, 388, 550, -361, -480, 208, 623, -206, -585, 41, 578, 12, -504,
+  -182, 583, 218, -437, -339, 499, 263, -354, -450, 347, 456, -193, -524,
+  212, 475, -74, -566, 94, 511, 112, -577, -201, 408, 217, -546, -295, 338,
+  387, -13, 4, -46, 2, -76, 103, -83, 108, -55, 100, -150, 131, -156, 141,
+  -171, 179, -190, 128, -227, 172, -214, 215, -189, 265, -244, 322, -335,
+  337, -352, 358, -368, 362, -355, 366, -381, 403, -395, 411, -392, 446,
+  -458, 504, -449, 507, -464, 452, -491, 481, -534, 486, -516, 560, -535,
+  525, -537, 559, -554, 570, -616, 591, -585, 627, -509, 588, -584, 547,
+  -610, 580, -614, 635, -620, 655, -554, 546, -591, 642, -590, 660, -656,
+  629, -604, 620, -580, 617, -645, 648, -573, 612, -604, 584, -571, 597,
+  -562, 627, -550, 560, -606, 529, -584, 568, -503, 532, -463, 512, -440,
+  399, -457, 437, -349, 278, -317, 257, -220, 163, -8, -61, 18, -161, 367,
+  -1306};
+static const int16_t time2spec_out_expected_2[kSamples] = {
+  14283, -11552, -15335, 6626,
+  7554, -2150, -6309, 1307, 4523, -4, -3908, -314, 3001, 914, -2715, -1042,
+  2094, 1272, -1715, -1399, 1263, 1508, -1021, -1534, 735, 1595, -439, -1447,
+  155, 1433, 22, -1325, -268, 1205, 424, -1030, -608, 950, 643, -733, -787,
+  661, 861, -502, -888, 331, 852, -144, -849, 19, 833, 99, -826, -154,
+  771, 368, -735, -459, 645, 513, -491, -604, 431, 630, -314, -598, 183,
+  622, -78, -612, -48, 641, 154, -645, -257, 610, 281, -529, -444, 450,
+  441, -327, -506, 274, 476, -232, -570, 117, 554, -86, -531, -21, 572,
+  151, -606, -221, 496, 322, -407, -388, 407, 394, -268, -428, 280, 505,
+  -115, -588, 19, 513, -29, -539, -109, 468, 173, -501, -242, 442, 278,
+  -478, -680, 656, -659, 656, -669, 602, -688, 612, -667, 612, -642, 627,
+  -648, 653, -676, 596, -680, 655, -649, 678, -672, 587, -608, 637, -645,
+  637, -620, 556, -580, 553, -635, 518, -599, 583, -501, 536, -544, 473,
+  -552, 583, -511, 541, -532, 563, -486, 461, -453, 486, -388, 424, -416,
+  432, -374, 399, -462, 364, -346, 293, -329, 331, -313, 281, -247, 309,
+  -337, 241, -190, 207, -194, 179, -163, 155, -156, 117, -135, 107, -126,
+  29, -22, 81, -8, 17, -61, -10, 8, -37, 80, -44, 72, -88, 65, -89, 130,
+  -114, 181, -215, 189, -245, 260, -288, 294, -339, 344, -396, 407, -429,
+  438, -439, 485, -556, 629, -612, 637, -645, 661, -737, 829, -830, 831,
+  -1041};
+
+class TransformTest : public testing::Test {
+ protected:
+   TransformTest() {
+     WebRtcSpl_Init();
+   }
+
+   // Pass a function pointer to the Tester function.
+   void Time2SpecTester(Time2Spec Time2SpecFunction) {
+     // WebRtcIsacfix_Time2Spec functions hard coded the buffer lengths. It's a
+     // large buffer but we have to test it here.
+     int16_t data_in_1[kSamples] = {0};
+     int16_t data_in_2[kSamples] = {0};
+     int16_t data_out_1[kSamples] = {0};
+     int16_t data_out_2[kSamples] = {0};
+
+     for(int i = 0; i < kSamples; i++) {
+       data_in_1[i] = i * i + 1777;
+       data_in_2[i] = WEBRTC_SPL_WORD16_MAX / (i + 1) + 17;
+     }
+
+     Time2SpecFunction(data_in_1, data_in_2, data_out_1, data_out_2);
+
+     for (int i = 0; i < kSamples; i++) {
+       // We don't require bit-exact for ARM assembly code.
+       EXPECT_LE(abs(time2spec_out_expected_1[i] - data_out_1[i]), 1);
+       EXPECT_LE(abs(time2spec_out_expected_2[i] - data_out_2[i]), 1);
+     }
+   }
+
+  // Pass a function pointer to the Tester function.
+  void Spec2TimeTester(Spec2Time Spec2TimeFunction) {
+    // WebRtcIsacfix_Spec2Time functions hard coded the buffer lengths. It's a
+    // large buffer but we have to test it here.
+    int16_t data_in_1[kSamples] = {0};
+    int16_t data_in_2[kSamples] = {0};
+    int32_t data_out_1[kSamples] = {0};
+    int32_t data_out_2[kSamples] = {0};
+    for(int i = 0; i < kSamples; i++) {
+      data_in_1[i] = i * i + 1777;
+      data_in_2[i] = WEBRTC_SPL_WORD16_MAX / (i + 1) + 17;
+    }
+
+    Spec2TimeFunction(data_in_1, data_in_2, data_out_1, data_out_2);
+
+    for (int i = 0; i < kSamples; i++) {
+      // We don't require bit-exact for ARM assembly code.
+      EXPECT_LE(abs(spec2time_out_expected_1[i] - data_out_1[i]), 16);
+      EXPECT_LE(abs(spec2time_out_expected_2[i] - data_out_2[i]), 16);
+    }
+  }
+
+};
+
+TEST_F(TransformTest, Time2SpecTest) {
+  Time2SpecTester(WebRtcIsacfix_Time2SpecC);
+#if defined(WEBRTC_HAS_NEON)
+  Time2SpecTester(WebRtcIsacfix_Time2SpecNeon);
+#endif
+}
+
+TEST_F(TransformTest, Spec2TimeTest) {
+  Spec2TimeTester(WebRtcIsacfix_Spec2TimeC);
+#if defined(WEBRTC_HAS_NEON)
+  Spec2TimeTester(WebRtcIsacfix_Spec2TimeNeon);
+#endif
+}
diff --git a/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc b/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc
new file mode 100644
index 0000000..80d10ab
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc
@@ -0,0 +1,21 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+
+namespace webrtc {
+
+LockedIsacBandwidthInfo::LockedIsacBandwidthInfo() : ref_count_(0) {
+  bwinfo_.in_use = 0;
+}
+
+LockedIsacBandwidthInfo::~LockedIsacBandwidthInfo() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/locked_bandwidth_info.h b/modules/audio_coding/codecs/isac/locked_bandwidth_info.h
new file mode 100644
index 0000000..37074f8
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/locked_bandwidth_info.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_LOCKED_BANDWIDTH_INFO_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_LOCKED_BANDWIDTH_INFO_H_
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+// An IsacBandwidthInfo that's safe to access from multiple threads because
+// it's protected by a mutex.
+class LockedIsacBandwidthInfo final {
+ public:
+  LockedIsacBandwidthInfo();
+  ~LockedIsacBandwidthInfo();
+
+  IsacBandwidthInfo Get() const {
+    rtc::CritScope lock(&lock_);
+    return bwinfo_;
+  }
+
+  void Set(const IsacBandwidthInfo& bwinfo) {
+    rtc::CritScope lock(&lock_);
+    bwinfo_ = bwinfo;
+  }
+
+  int AddRef() const { return rtc::AtomicOps::Increment(&ref_count_); }
+
+  int Release() const {
+    const int count = rtc::AtomicOps::Decrement(&ref_count_);
+    if (count == 0) {
+      delete this;
+    }
+    return count;
+  }
+
+ private:
+  mutable volatile int ref_count_;
+  rtc::CriticalSection lock_;
+  IsacBandwidthInfo bwinfo_ RTC_GUARDED_BY(lock_);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_LOCKED_BANDWIDTH_INFO_H_
diff --git a/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h b/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h
new file mode 100644
index 0000000..fae2f3d
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_DECODER_ISAC_H_
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
+
+namespace webrtc {
+
+using AudioDecoderIsacFloatImpl = AudioDecoderIsacT<IsacFloat>;
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
diff --git a/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h b/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h
new file mode 100644
index 0000000..dc32bcd
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
+#include "modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
+
+namespace webrtc {
+
+using AudioEncoderIsacFloatImpl = AudioEncoderIsacT<IsacFloat>;
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_AUDIO_ENCODER_ISAC_H_
diff --git a/modules/audio_coding/codecs/isac/main/include/isac.h b/modules/audio_coding/codecs/isac/main/include/isac.h
new file mode 100644
index 0000000..e1ee818
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/include/isac.h
@@ -0,0 +1,724 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct WebRtcISACStruct    ISACStruct;
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+  /******************************************************************************
+   * WebRtcIsac_AssignSize(...)
+   *
+   * This function returns the size of the ISAC instance, so that the instance
+   * can be created outside iSAC.
+   *
+   * Input:
+   *        - samplingRate      : sampling rate of the input/output audio.
+   *
+   * Output:
+   *        - sizeinbytes       : number of bytes needed to allocate for the
+   *                              instance.
+   *
+   * Return value               : 0 - Ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_AssignSize(
+      int* sizeinbytes);
+
+
+  /******************************************************************************
+   * WebRtcIsac_Assign(...)
+   *
+   * This function assignes the memory already created to the ISAC instance.
+   *
+   * Input:
+   *        - *ISAC_main_inst   : a pointer to the coder instance.
+   *        - samplingRate      : sampling rate of the input/output audio.
+   *        - ISAC_inst_Addr    : the already allocated memory, where we put the
+   *                              iSAC structure.
+   *
+   * Return value               : 0 - Ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_Assign(
+      ISACStruct** ISAC_main_inst,
+      void*        ISAC_inst_Addr);
+
+
+  /******************************************************************************
+   * WebRtcIsac_Create(...)
+   *
+   * This function creates an ISAC instance, which will contain the state
+   * information for one coding/decoding channel.
+   *
+   * Input:
+   *        - *ISAC_main_inst   : a pointer to the coder instance.
+   *
+   * Return value               : 0 - Ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_Create(
+      ISACStruct** ISAC_main_inst);
+
+
+  /******************************************************************************
+   * WebRtcIsac_Free(...)
+   *
+   * This function frees the ISAC instance created at the beginning.
+   *
+   * Input:
+   *        - ISAC_main_inst    : an ISAC instance.
+   *
+   * Return value               : 0 - Ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_Free(
+      ISACStruct* ISAC_main_inst);
+
+
+  /******************************************************************************
+   * WebRtcIsac_EncoderInit(...)
+   *
+   * This function initializes an ISAC instance prior to the encoder calls.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - CodingMode        : 0 -> Bit rate and frame length are
+   *                                automatically adjusted to available bandwidth
+   *                                on transmission channel, just valid if codec
+   *                                is created to work in wideband mode.
+   *                              1 -> User sets a frame length and a target bit
+   *                                rate which is taken as the maximum
+   *                                short-term average bit rate.
+   *
+   * Return value               : 0 - Ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_EncoderInit(
+      ISACStruct* ISAC_main_inst,
+      int16_t CodingMode);
+
+
+  /******************************************************************************
+   * WebRtcIsac_Encode(...)
+   *
+   * This function encodes 10ms audio blocks and inserts it into a package.
+   * Input speech length has 160 samples if operating at 16 kHz sampling
+   * rate, or 320 if operating at 32 kHz sampling rate. The encoder buffers the
+   * input audio until the whole frame is buffered then proceeds with encoding.
+   *
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - speechIn          : input speech vector.
+   *
+   * Output:
+   *        - encoded           : the encoded data vector
+   *
+   * Return value:
+   *                            : >0 - Length (in bytes) of coded data
+   *                            :  0 - The buffer didn't reach the chosen
+   *                               frame-size so it keeps buffering speech
+   *                               samples.
+   *                            : -1 - Error
+   */
+
+  int WebRtcIsac_Encode(
+      ISACStruct*        ISAC_main_inst,
+      const int16_t* speechIn,
+      uint8_t* encoded);
+
+
+  /******************************************************************************
+   * WebRtcIsac_DecoderInit(...)
+   *
+   * This function initializes an ISAC instance prior to the decoder calls.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   */
+
+  void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst);
+
+  /******************************************************************************
+   * WebRtcIsac_UpdateBwEstimate(...)
+   *
+   * This function updates the estimate of the bandwidth.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - encoded           : encoded ISAC frame(s).
+   *        - packet_size       : size of the packet.
+   *        - rtp_seq_number    : the RTP number of the packet.
+   *        - send_ts           : the RTP send timestamp, given in samples
+   *        - arr_ts            : the arrival time of the packet (from NetEq)
+   *                              in samples.
+   *
+   * Return value               : 0 - Ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_UpdateBwEstimate(
+      ISACStruct*         ISAC_main_inst,
+      const uint8_t* encoded,
+      size_t         packet_size,
+      uint16_t        rtp_seq_number,
+      uint32_t        send_ts,
+      uint32_t        arr_ts);
+
+
+  /******************************************************************************
+   * WebRtcIsac_Decode(...)
+   *
+   * This function decodes an ISAC frame. At 16 kHz sampling rate, the length
+   * of the output audio could be either 480 or 960 samples, equivalent to
+   * 30 or 60 ms respectively. At 32 kHz sampling rate, the length of the
+   * output audio is 960 samples, which is 30 ms.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - encoded           : encoded ISAC frame(s).
+   *        - len               : bytes in encoded vector.
+   *
+   * Output:
+   *        - decoded           : The decoded vector.
+   *
+   * Return value               : >0 - number of samples in decoded vector.
+   *                              -1 - Error.
+   */
+
+  int WebRtcIsac_Decode(
+      ISACStruct*           ISAC_main_inst,
+      const uint8_t* encoded,
+      size_t         len,
+      int16_t*        decoded,
+      int16_t*        speechType);
+
+
+  /******************************************************************************
+   * WebRtcIsac_DecodePlc(...)
+   *
+   * This function conducts PLC for ISAC frame(s). Output speech length
+   * will be a multiple of frames, i.e. multiples of 30 ms audio. Therefore,
+   * the output is multiple of 480 samples if operating at 16 kHz and multiple
+   * of 960 if operating at 32 kHz.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - noOfLostFrames    : Number of PLC frames to produce.
+   *
+   * Output:
+   *        - decoded           : The decoded vector.
+   *
+   * Return value               : Number of samples in decoded PLC vector
+   */
+
+  size_t WebRtcIsac_DecodePlc(
+      ISACStruct*  ISAC_main_inst,
+      int16_t* decoded,
+      size_t  noOfLostFrames);
+
+
+  /******************************************************************************
+   * WebRtcIsac_Control(...)
+   *
+   * This function sets the limit on the short-term average bit-rate and the
+   * frame length. Should be used only in Instantaneous mode. At 16 kHz sampling
+   * rate, an average bit-rate between 10000 to 32000 bps is valid and a
+   * frame-size of 30 or 60 ms is acceptable. At 32 kHz, an average bit-rate
+   * between 10000 to 56000 is acceptable, and the valid frame-size is 30 ms.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - rate              : limit on the short-term average bit rate,
+   *                              in bits/second.
+   *        - framesize         : frame-size in millisecond.
+   *
+   * Return value               : 0  - ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_Control(
+      ISACStruct*   ISAC_main_inst,
+      int32_t rate,
+      int framesize);
+
+  void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
+                                          int bottleneck_bits_per_second);
+
+  /******************************************************************************
+   * WebRtcIsac_ControlBwe(...)
+   *
+   * This function sets the initial values of bottleneck and frame-size if
+   * iSAC is used in channel-adaptive mode. Therefore, this API is not
+   * applicable if the codec is created to operate in super-wideband mode.
+   *
+   * Through this API, users can enforce a frame-size for all values of
+   * bottleneck. Then iSAC will not automatically change the frame-size.
+   *
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - rateBPS           : initial value of bottleneck in bits/second
+   *                              10000 <= rateBPS <= 56000 is accepted
+   *                              For default bottleneck set rateBPS = 0
+   *        - frameSizeMs       : number of milliseconds per frame (30 or 60)
+   *        - enforceFrameSize  : 1 to enforce the given frame-size through
+   *                              out the adaptation process, 0 to let iSAC
+   *                              change the frame-size if required.
+   *
+   * Return value               : 0  - ok
+   *                             -1 - Error
+   */
+
+  int16_t WebRtcIsac_ControlBwe(
+      ISACStruct* ISAC_main_inst,
+      int32_t rateBPS,
+      int frameSizeMs,
+      int16_t enforceFrameSize);
+
+
+  /******************************************************************************
+   * WebRtcIsac_ReadFrameLen(...)
+   *
+   * This function returns the length of the frame represented in the packet.
+   *
+   * Input:
+   *        - encoded           : Encoded bit-stream
+   *
+   * Output:
+   *        - frameLength       : Length of frame in packet (in samples)
+   *
+   */
+
+  int16_t WebRtcIsac_ReadFrameLen(
+      ISACStruct*          ISAC_main_inst,
+      const uint8_t* encoded,
+      int16_t*       frameLength);
+
+
+  /******************************************************************************
+   * WebRtcIsac_version(...)
+   *
+   * This function returns the version number.
+   *
+   * Output:
+   *        - version      : Pointer to character string
+   *
+   */
+
+  void WebRtcIsac_version(
+      char *version);
+
+
+  /******************************************************************************
+   * WebRtcIsac_GetErrorCode(...)
+   *
+   * This function can be used to check the error code of an iSAC instance. When
+   * a function returns -1 a error code will be set for that instance. The
+   * function below extract the code of the last error that occurred in the
+   * specified instance.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance
+   *
+   * Return value               : Error code
+   */
+
+  int16_t WebRtcIsac_GetErrorCode(
+      ISACStruct* ISAC_main_inst);
+
+
+  /****************************************************************************
+   * WebRtcIsac_GetUplinkBw(...)
+   *
+   * This function outputs the target bottleneck of the codec. In
+   * channel-adaptive mode, the target bottleneck is specified through in-band
+   * signalling retreived by bandwidth estimator.
+   * In channel-independent, also called instantaneous mode, the target
+   * bottleneck is provided to the encoder by calling xxx_control(...). If
+   * xxx_control is never called the default values is returned. The default
+   * value for bottleneck at 16 kHz encoder sampling rate is 32000 bits/sec,
+   * and it is 56000 bits/sec for 32 kHz sampling rate.
+   * Note that the output is the iSAC internal operating bottleneck which might
+   * differ slightly from the one provided through xxx_control().
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *
+   * Output:
+   *        - *bottleneck       : bottleneck in bits/sec
+   *
+   * Return value               : -1 if error happens
+   *                               0 bit-rates computed correctly.
+   */
+
+  int16_t WebRtcIsac_GetUplinkBw(
+      ISACStruct*    ISAC_main_inst,
+      int32_t* bottleneck);
+
+
+  /******************************************************************************
+   * WebRtcIsac_SetMaxPayloadSize(...)
+   *
+   * This function sets a limit for the maximum payload size of iSAC. The same
+   * value is used both for 30 and 60 ms packets. If the encoder sampling rate
+   * is 16 kHz the maximum payload size is between 120 and 400 bytes. If the
+   * encoder sampling rate is 32 kHz the maximum payload size is between 120
+   * and 600 bytes.
+   *
+   * If an out of range limit is used, the function returns -1, but the closest
+   * valid value will be applied.
+   *
+   * ---------------
+   * IMPORTANT NOTES
+   * ---------------
+   * The size of a packet is limited to the minimum of 'max-payload-size' and
+   * 'max-rate.' For instance, let's assume the max-payload-size is set to
+   * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+   * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+   * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+   * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+   * 170 bytes, i.e. min(170, 300).
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *        - maxPayloadBytes   : maximum size of the payload in bytes
+   *                              valid values are between 120 and 400 bytes
+   *                              if encoder sampling rate is 16 kHz. For
+   *                              32 kHz encoder sampling rate valid values
+   *                              are between 120 and 600 bytes.
+   *
+   * Return value               : 0 if successful
+   *                             -1 if error happens
+   */
+
+  int16_t WebRtcIsac_SetMaxPayloadSize(
+      ISACStruct* ISAC_main_inst,
+      int16_t maxPayloadBytes);
+
+
+  /******************************************************************************
+   * WebRtcIsac_SetMaxRate(...)
+   *
+   * This function sets the maximum rate which the codec may not exceed for
+   * any signal packet. The maximum rate is defined and payload-size per
+   * frame-size in bits per second.
+   *
+   * The codec has a maximum rate of 53400 bits per second (200 bytes per 30
+   * ms) if the encoder sampling rate is 16kHz, and 160 kbps (600 bytes/30 ms)
+   * if the encoder sampling rate is 32 kHz.
+   *
+   * It is possible to set a maximum rate between 32000 and 53400 bits/sec
+   * in wideband mode, and 32000 to 160000 bits/sec in super-wideband mode.
+   *
+   * If an out of range limit is used, the function returns -1, but the closest
+   * valid value will be applied.
+   *
+   * ---------------
+   * IMPORTANT NOTES
+   * ---------------
+   * The size of a packet is limited to the minimum of 'max-payload-size' and
+   * 'max-rate.' For instance, let's assume the max-payload-size is set to
+   * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+   * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+   * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+   * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+   * 170 bytes, min(170, 300).
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *        - maxRate           : maximum rate in bits per second,
+   *                              valid values are 32000 to 53400 bits/sec in
+   *                              wideband mode, and 32000 to 160000 bits/sec in
+   *                              super-wideband mode.
+   *
+   * Return value               : 0 if successful
+   *                             -1 if error happens
+   */
+
+  int16_t WebRtcIsac_SetMaxRate(
+      ISACStruct* ISAC_main_inst,
+      int32_t maxRate);
+
+
+  /******************************************************************************
+   * WebRtcIsac_DecSampRate()
+   * Return the sampling rate of the decoded audio.
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *
+   * Return value               : sampling frequency in Hertz.
+   *
+   */
+
+  uint16_t WebRtcIsac_DecSampRate(ISACStruct* ISAC_main_inst);
+
+
+  /******************************************************************************
+   * WebRtcIsac_EncSampRate()
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *
+   * Return value               : sampling rate in Hertz.
+   *
+   */
+
+  uint16_t WebRtcIsac_EncSampRate(ISACStruct* ISAC_main_inst);
+
+
+  /******************************************************************************
+   * WebRtcIsac_SetDecSampRate()
+   * Set the sampling rate of the decoder.  Initialization of the decoder WILL
+   * NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
+   * which is set when the instance is created.
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *        - sampRate          : sampling rate in Hertz.
+   *
+   * Return value               : 0 if successful
+   *                             -1 if failed.
+   */
+
+  int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
+                                          uint16_t samp_rate_hz);
+
+
+  /******************************************************************************
+   * WebRtcIsac_SetEncSampRate()
+   * Set the sampling rate of the encoder. Initialization of the encoder WILL
+   * NOT overwrite the sampling rate of the encoder. The default value is 16 kHz
+   * which is set when the instance is created. The encoding-mode and the
+   * bottleneck remain unchanged by this call, however, the maximum rate and
+   * maximum payload-size will reset to their default value.
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC instance
+   *        - sampRate          : sampling rate in Hertz.
+   *
+   * Return value               : 0 if successful
+   *                             -1 if failed.
+   */
+
+  int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
+                                          uint16_t sample_rate_hz);
+
+
+
+  /******************************************************************************
+   * WebRtcIsac_GetNewBitStream(...)
+   *
+   * This function returns encoded data, with the recieved bwe-index in the
+   * stream. If the rate is set to a value less than bottleneck of codec
+   * the new bistream will be re-encoded with the given target rate.
+   * It should always return a complete packet, i.e. only called once
+   * even for 60 msec frames.
+   *
+   * NOTE 1! This function does not write in the ISACStruct, it is not allowed.
+   * NOTE 2! Currently not implemented for SWB mode.
+   * NOTE 3! Rates larger than the bottleneck of the codec will be limited
+   *         to the current bottleneck.
+   *
+   * Input:
+   *        - ISAC_main_inst    : ISAC instance.
+   *        - bweIndex          : Index of bandwidth estimate to put in new
+   *                              bitstream
+   *        - rate              : target rate of the transcoder is bits/sec.
+   *                              Valid values are the accepted rate in iSAC,
+   *                              i.e. 10000 to 56000.
+   *        - isRCU                       : if the new bit-stream is an RCU stream.
+   *                              Note that the rate parameter always indicates
+   *                              the target rate of the main payload, regardless
+   *                              of 'isRCU' value.
+   *
+   * Output:
+   *        - encoded           : The encoded data vector
+   *
+   * Return value               : >0 - Length (in bytes) of coded data
+   *                              -1 - Error  or called in SWB mode
+   *                                 NOTE! No error code is written to
+   *                                 the struct since it is only allowed to read
+   *                                 the struct.
+   */
+  int16_t WebRtcIsac_GetNewBitStream(
+      ISACStruct*    ISAC_main_inst,
+      int16_t  bweIndex,
+      int16_t  jitterInfo,
+      int32_t  rate,
+      uint8_t* encoded,
+      int16_t  isRCU);
+
+
+
+  /****************************************************************************
+   * WebRtcIsac_GetDownLinkBwIndex(...)
+   *
+   * This function returns index representing the Bandwidth estimate from
+   * other side to this side.
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC struct
+   *
+   * Output:
+   *        - bweIndex          : Bandwidth estimate to transmit to other side.
+   *
+   */
+
+  int16_t WebRtcIsac_GetDownLinkBwIndex(
+      ISACStruct*  ISAC_main_inst,
+      int16_t* bweIndex,
+      int16_t* jitterInfo);
+
+
+  /****************************************************************************
+   * WebRtcIsac_UpdateUplinkBw(...)
+   *
+   * This function takes an index representing the Bandwidth estimate from
+   * this side to other side and updates BWE.
+   *
+   * Input:
+   *        - ISAC_main_inst    : iSAC struct
+   *        - bweIndex          : Bandwidth estimate from other side.
+   *
+   */
+
+  int16_t WebRtcIsac_UpdateUplinkBw(
+      ISACStruct* ISAC_main_inst,
+      int16_t bweIndex);
+
+
+  /****************************************************************************
+   * WebRtcIsac_ReadBwIndex(...)
+   *
+   * This function returns the index of the Bandwidth estimate from the bitstream.
+   *
+   * Input:
+   *        - encoded           : Encoded bitstream
+   *
+   * Output:
+   *        - frameLength       : Length of frame in packet (in samples)
+   *        - bweIndex         : Bandwidth estimate in bitstream
+   *
+   */
+
+  int16_t WebRtcIsac_ReadBwIndex(
+      const uint8_t* encoded,
+      int16_t*       bweIndex);
+
+
+
+  /*******************************************************************************
+   * WebRtcIsac_GetNewFrameLen(...)
+   *
+   * returns the frame lenght (in samples) of the next packet. In the case of channel-adaptive
+   * mode, iSAC decides on its frame lenght based on the estimated bottleneck
+   * this allows a user to prepare for the next packet (at the encoder)
+   *
+   * The primary usage is in CE to make the iSAC works in channel-adaptive mode
+   *
+   * Input:
+   *        - ISAC_main_inst     : iSAC struct
+   *
+   * Return Value                : frame lenght in samples
+   *
+   */
+
+  int16_t WebRtcIsac_GetNewFrameLen(
+      ISACStruct* ISAC_main_inst);
+
+
+  /****************************************************************************
+   *  WebRtcIsac_GetRedPayload(...)
+   *
+   *  Populates "encoded" with the redundant payload of the recently encoded
+   *  frame. This function has to be called once that WebRtcIsac_Encode(...)
+   *  returns a positive value. Regardless of the frame-size this function will
+   *  be called only once after encoding is completed.
+   *
+   * Input:
+   *      - ISAC_main_inst    : iSAC struct
+   *
+   * Output:
+   *        - encoded            : the encoded data vector
+   *
+   *
+   * Return value:
+   *                              : >0 - Length (in bytes) of coded data
+   *                              : -1 - Error
+   *
+   *
+   */
+  int16_t WebRtcIsac_GetRedPayload(
+      ISACStruct*    ISAC_main_inst,
+      uint8_t* encoded);
+
+
+  /****************************************************************************
+   * WebRtcIsac_DecodeRcu(...)
+   *
+   * This function decodes a redundant (RCU) iSAC frame. Function is called in
+   * NetEq with a stored RCU payload i case of packet loss. Output speech length
+   * will be a multiple of 480 samples: 480 or 960 samples,
+   * depending on the framesize (30 or 60 ms).
+   *
+   * Input:
+   *      - ISAC_main_inst     : ISAC instance.
+   *      - encoded            : encoded ISAC RCU frame(s)
+   *      - len                : bytes in encoded vector
+   *
+   * Output:
+   *      - decoded            : The decoded vector
+   *
+   * Return value              : >0 - number of samples in decoded vector
+   *                             -1 - Error
+   */
+  int WebRtcIsac_DecodeRcu(
+      ISACStruct*           ISAC_main_inst,
+      const uint8_t* encoded,
+      size_t         len,
+      int16_t*        decoded,
+      int16_t*        speechType);
+
+  /* Fills in an IsacBandwidthInfo struct. |inst| should be a decoder. */
+  void WebRtcIsac_GetBandwidthInfo(ISACStruct* inst, IsacBandwidthInfo* bwinfo);
+
+  /* Uses the values from an IsacBandwidthInfo struct. |inst| should be an
+     encoder. */
+  void WebRtcIsac_SetBandwidthInfo(ISACStruct* inst,
+                                   const IsacBandwidthInfo* bwinfo);
+
+  /* If |inst| is a decoder but not an encoder: tell it what sample rate the
+     encoder is using, for bandwidth estimation purposes. */
+  void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INCLUDE_ISAC_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/arith_routines.c b/modules/audio_coding/codecs/isac/main/source/arith_routines.c
new file mode 100644
index 0000000..9d5c693
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/arith_routines.c
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+
+/*
+ * terminate and return byte stream;
+ * returns the number of bytes in the stream
+ */
+int WebRtcIsac_EncTerminate(Bitstr *streamdata) /* in-/output struct containing bitstream */
+{
+  uint8_t *stream_ptr;
+
+
+  /* point to the right place in the stream buffer */
+  stream_ptr = streamdata->stream + streamdata->stream_index;
+
+  /* find minimum length (determined by current interval width) */
+  if ( streamdata->W_upper > 0x01FFFFFF )
+  {
+    streamdata->streamval += 0x01000000;
+    /* add carry to buffer */
+    if (streamdata->streamval < 0x01000000)
+    {
+      /* propagate carry */
+      while ( !(++(*--stream_ptr)) );
+      /* put pointer back to the old value */
+      stream_ptr = streamdata->stream + streamdata->stream_index;
+    }
+    /* write remaining data to bitstream */
+    *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+  }
+  else
+  {
+    streamdata->streamval += 0x00010000;
+    /* add carry to buffer */
+    if (streamdata->streamval < 0x00010000)
+    {
+      /* propagate carry */
+      while ( !(++(*--stream_ptr)) );
+      /* put pointer back to the old value */
+      stream_ptr = streamdata->stream + streamdata->stream_index;
+    }
+    /* write remaining data to bitstream */
+    *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+    *stream_ptr++ = (uint8_t) ((streamdata->streamval >> 16) & 0x00FF);
+  }
+
+  /* calculate stream length */
+  return (int)(stream_ptr - streamdata->stream);
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/arith_routines.h b/modules/audio_coding/codecs/isac/main/source/arith_routines.h
new file mode 100644
index 0000000..d001c68
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/arith_routines.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routines.h
+ *
+ * Functions for arithmetic coding.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+int WebRtcIsac_EncLogisticMulti2(
+    Bitstr *streamdata,              /* in-/output struct containing bitstream */
+    int16_t *dataQ7,           /* input: data vector */
+    const uint16_t *env,       /* input: side info vector defining the width of the pdf */
+    const int N,                     /* input: data vector length */
+    const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
+
+/* returns the number of bytes in the stream */
+int WebRtcIsac_EncTerminate(Bitstr *streamdata); /* in-/output struct containing bitstream */
+
+/* returns the number of bytes in the stream so far */
+int WebRtcIsac_DecLogisticMulti2(
+    int16_t *data,             /* output: data vector */
+    Bitstr *streamdata,              /* in-/output struct containing bitstream */
+    const uint16_t *env,       /* input: side info vector defining the width of the pdf */
+    const int16_t *dither,     /* input: dither vector */
+    const int N,                     /* input: data vector length */
+    const int16_t isSWB12kHz); /* if the codec is working in 12kHz bandwidth */
+
+void WebRtcIsac_EncHistMulti(
+    Bitstr *streamdata,         /* in-/output struct containing bitstream */
+    const int *data,            /* input: data vector */
+    const uint16_t *const *cdf, /* input: array of cdf arrays */
+    const int N);               /* input: data vector length */
+
+int WebRtcIsac_DecHistBisectMulti(
+    int *data,                      /* output: data vector */
+    Bitstr *streamdata,             /* in-/output struct containing bitstream */
+    const uint16_t *const *cdf,     /* input: array of cdf arrays */
+    const uint16_t *cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
+    const int N);                   /* input: data vector length */
+
+int WebRtcIsac_DecHistOneStepMulti(
+    int *data,                       /* output: data vector */
+    Bitstr *streamdata,              /* in-/output struct containing bitstream */
+    const uint16_t *const *cdf,      /* input: array of cdf arrays */
+    const uint16_t *init_index,/* input: vector of initial cdf table search entries */
+    const int N);                    /* input: data vector length */
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ARITH_ROUTINES_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c b/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c
new file mode 100644
index 0000000..e948979
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/arith_routines_hist.c
@@ -0,0 +1,291 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+
+
+/*
+ * code symbols into arithmetic bytestream
+ */
+void WebRtcIsac_EncHistMulti(Bitstr *streamdata, /* in-/output struct containing bitstream */
+                             const int *data,  /* input: data vector */
+                             const uint16_t *const *cdf, /* input: array of cdf arrays */
+                             const int N)   /* input: data vector length */
+{
+  uint32_t W_lower, W_upper;
+  uint32_t W_upper_LSB, W_upper_MSB;
+  uint8_t *stream_ptr;
+  uint8_t *stream_ptr_carry;
+  uint32_t cdf_lo, cdf_hi;
+  int k;
+
+
+  /* point to beginning of stream buffer */
+  stream_ptr = streamdata->stream + streamdata->stream_index;
+  W_upper = streamdata->W_upper;
+
+  for (k=N; k>0; k--)
+  {
+    /* fetch cdf_lower and cdf_upper from cdf tables */
+    cdf_lo = (uint32_t) *(*cdf + *data);
+    cdf_hi = (uint32_t) *(*cdf++ + *data++ + 1);
+
+    /* update interval */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+    W_lower = W_upper_MSB * cdf_lo;
+    W_lower += (W_upper_LSB * cdf_lo) >> 16;
+    W_upper = W_upper_MSB * cdf_hi;
+    W_upper += (W_upper_LSB * cdf_hi) >> 16;
+
+    /* shift interval such that it begins at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamdata->streamval += W_lower;
+
+    /* handle carry */
+    if (streamdata->streamval < W_lower)
+    {
+      /* propagate carry */
+      stream_ptr_carry = stream_ptr;
+      while (!(++(*--stream_ptr_carry)));
+    }
+
+    /* renormalize interval, store most significant byte of streamval and update streamval */
+    while ( !(W_upper & 0xFF000000) )      /* W_upper < 2^24 */
+    {
+      W_upper <<= 8;
+      *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+      streamdata->streamval <<= 8;
+    }
+  }
+
+  /* calculate new stream_index */
+  streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+  streamdata->W_upper = W_upper;
+
+  return;
+}
+
+
+
+/*
+ * function to decode more symbols from the arithmetic bytestream, using method of bisection
+ * cdf tables should be of size 2^k-1 (which corresponds to an alphabet size of 2^k-2)
+ */
+int WebRtcIsac_DecHistBisectMulti(int *data,     /* output: data vector */
+                                  Bitstr *streamdata,   /* in-/output struct containing bitstream */
+                                  const uint16_t *const *cdf,  /* input: array of cdf arrays */
+                                  const uint16_t *cdf_size, /* input: array of cdf table sizes+1 (power of two: 2^k) */
+                                  const int N)    /* input: data vector length */
+{
+  uint32_t    W_lower, W_upper;
+  uint32_t    W_tmp;
+  uint32_t    W_upper_LSB, W_upper_MSB;
+  uint32_t    streamval;
+  const   uint8_t *stream_ptr;
+  const   uint16_t *cdf_ptr;
+  int     size_tmp;
+  int     k;
+
+  W_lower = 0; //to remove warning -DH
+  stream_ptr = streamdata->stream + streamdata->stream_index;
+  W_upper = streamdata->W_upper;
+  if (W_upper == 0)
+    /* Should not be possible in normal operation */
+    return -2;
+
+  if (streamdata->stream_index == 0)   /* first time decoder is called for this stream */
+  {
+    /* read first word from bytestream */
+    streamval = *stream_ptr << 24;
+    streamval |= *++stream_ptr << 16;
+    streamval |= *++stream_ptr << 8;
+    streamval |= *++stream_ptr;
+  } else {
+    streamval = streamdata->streamval;
+  }
+
+  for (k=N; k>0; k--)
+  {
+    /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+
+    /* start halfway the cdf range */
+    size_tmp = *cdf_size++ >> 1;
+    cdf_ptr = *cdf + (size_tmp - 1);
+
+    /* method of bisection */
+    for ( ;; )
+    {
+      W_tmp = W_upper_MSB * *cdf_ptr;
+      W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+      size_tmp >>= 1;
+      if (size_tmp == 0) break;
+      if (streamval > W_tmp)
+      {
+        W_lower = W_tmp;
+        cdf_ptr += size_tmp;
+      } else {
+        W_upper = W_tmp;
+        cdf_ptr -= size_tmp;
+      }
+    }
+    if (streamval > W_tmp)
+    {
+      W_lower = W_tmp;
+      *data++ = (int)(cdf_ptr - *cdf++);
+    } else {
+      W_upper = W_tmp;
+      *data++ = (int)(cdf_ptr - *cdf++ - 1);
+    }
+
+    /* shift interval to start at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamval -= W_lower;
+
+    /* renormalize interval and update streamval */
+    while ( !(W_upper & 0xFF000000) )    /* W_upper < 2^24 */
+    {
+      /* read next byte from stream */
+      streamval = (streamval << 8) | *++stream_ptr;
+      W_upper <<= 8;
+    }
+
+    if (W_upper == 0)
+      /* Should not be possible in normal operation */
+      return -2;
+
+
+  }
+
+  streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+  streamdata->W_upper = W_upper;
+  streamdata->streamval = streamval;
+
+
+  /* find number of bytes in original stream (determined by current interval width) */
+  if ( W_upper > 0x01FFFFFF )
+    return streamdata->stream_index - 2;
+  else
+    return streamdata->stream_index - 1;
+}
+
+
+
+/*
+ * function to decode more symbols from the arithmetic bytestream, taking single step up or
+ * down at a time
+ * cdf tables can be of arbitrary size, but large tables may take a lot of iterations
+ */
+int WebRtcIsac_DecHistOneStepMulti(int *data,        /* output: data vector */
+                                   Bitstr *streamdata,      /* in-/output struct containing bitstream */
+                                   const uint16_t *const *cdf,   /* input: array of cdf arrays */
+                                   const uint16_t *init_index, /* input: vector of initial cdf table search entries */
+                                   const int N)     /* input: data vector length */
+{
+  uint32_t    W_lower, W_upper;
+  uint32_t    W_tmp;
+  uint32_t    W_upper_LSB, W_upper_MSB;
+  uint32_t    streamval;
+  const   uint8_t *stream_ptr;
+  const   uint16_t *cdf_ptr;
+  int     k;
+
+
+  stream_ptr = streamdata->stream + streamdata->stream_index;
+  W_upper = streamdata->W_upper;
+  if (W_upper == 0)
+    /* Should not be possible in normal operation */
+    return -2;
+
+  if (streamdata->stream_index == 0)   /* first time decoder is called for this stream */
+  {
+    /* read first word from bytestream */
+    streamval = (uint32_t)(*stream_ptr) << 24;
+    streamval |= (uint32_t)(*++stream_ptr) << 16;
+    streamval |= (uint32_t)(*++stream_ptr) << 8;
+    streamval |= (uint32_t)(*++stream_ptr);
+  } else {
+    streamval = streamdata->streamval;
+  }
+
+
+  for (k=N; k>0; k--)
+  {
+    /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+
+    /* start at the specified table entry */
+    cdf_ptr = *cdf + (*init_index++);
+    W_tmp = W_upper_MSB * *cdf_ptr;
+    W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+    if (streamval > W_tmp)
+    {
+      for ( ;; )
+      {
+        W_lower = W_tmp;
+        if (cdf_ptr[0]==65535)
+          /* range check */
+          return -3;
+        W_tmp = W_upper_MSB * *++cdf_ptr;
+        W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+        if (streamval <= W_tmp) break;
+      }
+      W_upper = W_tmp;
+      *data++ = (int)(cdf_ptr - *cdf++ - 1);
+    } else {
+      for ( ;; )
+      {
+        W_upper = W_tmp;
+        --cdf_ptr;
+        if (cdf_ptr<*cdf) {
+          /* range check */
+          return -3;
+        }
+        W_tmp = W_upper_MSB * *cdf_ptr;
+        W_tmp += (W_upper_LSB * *cdf_ptr) >> 16;
+        if (streamval > W_tmp) break;
+      }
+      W_lower = W_tmp;
+      *data++ = (int)(cdf_ptr - *cdf++);
+    }
+
+    /* shift interval to start at zero */
+    W_upper -= ++W_lower;
+    /* add integer to bitstream */
+    streamval -= W_lower;
+
+    /* renormalize interval and update streamval */
+    while ( !(W_upper & 0xFF000000) )    /* W_upper < 2^24 */
+    {
+      /* read next byte from stream */
+      streamval = (streamval << 8) | *++stream_ptr;
+      W_upper <<= 8;
+    }
+  }
+
+  streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+  streamdata->W_upper = W_upper;
+  streamdata->streamval = streamval;
+
+
+  /* find number of bytes in original stream (determined by current interval width) */
+  if ( W_upper > 0x01FFFFFF )
+    return streamdata->stream_index - 2;
+  else
+    return streamdata->stream_index - 1;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c b/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c
new file mode 100644
index 0000000..777780f
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/arith_routines_logist.c
@@ -0,0 +1,303 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * arith_routines.h
+ *
+ * This file contains functions for arithmatically encoding and
+ * decoding DFT coefficients.
+ *
+ */
+
+
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+
+
+
+static const int32_t kHistEdgesQ15[51] = {
+  -327680, -314573, -301466, -288359, -275252, -262144, -249037, -235930, -222823, -209716,
+  -196608, -183501, -170394, -157287, -144180, -131072, -117965, -104858, -91751, -78644,
+  -65536, -52429, -39322, -26215, -13108,  0,  13107,  26214,  39321,  52428,
+  65536,  78643,  91750,  104857,  117964,  131072,  144179,  157286,  170393,  183500,
+  196608,  209715,  222822,  235929,  249036,  262144,  275251,  288358,  301465,  314572,
+  327680};
+
+
+static const int kCdfSlopeQ0[51] = {  /* Q0 */
+  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,
+  5,  5,  13,  23,  47,  87,  154,  315,  700,  1088,
+  2471,  6064,  14221,  21463,  36634,  36924,  19750,  13270,  5806,  2312,
+  1095,  660,  316,  145,  86,  41,  32,  5,  5,  5,
+  5,  5,  5,  5,  5,  5,  5,  5,  5,  2, 0};
+
+
+static const int kCdfQ16[51] = {  /* Q16 */
+  0,  2,  4,  6,  8,  10,  12,  14,  16,  18,
+  20,  22,  24,  29,  38,  57,  92,  153,  279,  559,
+  994,  1983,  4408,  10097,  18682,  33336,  48105,  56005,  61313,  63636,
+  64560,  64998,  65262,  65389,  65447,  65481,  65497,  65510,  65512,  65514,
+  65516,  65518,  65520,  65522,  65524,  65526,  65528,  65530,  65532,  65534,
+  65535};
+
+
+
+/* function to be converted to fixed point */
+static __inline uint32_t piecewise(int32_t xinQ15) {
+
+  int32_t ind, qtmp1, qtmp2, qtmp3;
+  uint32_t tmpUW32;
+
+
+  qtmp2 = xinQ15;
+
+  if (qtmp2 < kHistEdgesQ15[0]) {
+    qtmp2 = kHistEdgesQ15[0];
+  }
+  if (qtmp2 > kHistEdgesQ15[50]) {
+    qtmp2 = kHistEdgesQ15[50];
+  }
+
+  qtmp1 = qtmp2 - kHistEdgesQ15[0];       /* Q15 - Q15 = Q15        */
+  ind = (qtmp1 * 5) >> 16;              /* 2^16 / 5 = 0.4 in Q15  */
+  /* Q15 -> Q0              */
+  qtmp1 = qtmp2 - kHistEdgesQ15[ind];     /* Q15 - Q15 = Q15        */
+  qtmp2 = kCdfSlopeQ0[ind] * qtmp1;      /* Q0 * Q15 = Q15         */
+  qtmp3 = qtmp2>>15;                    /* Q15 -> Q0              */
+
+  tmpUW32 = kCdfQ16[ind] + qtmp3;    /* Q0 + Q0 = Q0           */
+  return tmpUW32;
+}
+
+
+
+int WebRtcIsac_EncLogisticMulti2(
+    Bitstr *streamdata,      /* in-/output struct containing bitstream */
+    int16_t *dataQ7,    /* input: data vector */
+    const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */
+    const int N,       /* input: data vector length / 2 */
+    const int16_t isSWB12kHz)
+{
+  uint32_t W_lower, W_upper;
+  uint32_t W_upper_LSB, W_upper_MSB;
+  uint8_t *stream_ptr;
+  uint8_t *maxStreamPtr;
+  uint8_t *stream_ptr_carry;
+  uint32_t cdf_lo, cdf_hi;
+  int k;
+
+  /* point to beginning of stream buffer */
+  stream_ptr = streamdata->stream + streamdata->stream_index;
+  W_upper = streamdata->W_upper;
+
+  maxStreamPtr = streamdata->stream + STREAM_SIZE_MAX_60 - 1;
+  for (k = 0; k < N; k++)
+  {
+    /* compute cdf_lower and cdf_upper by evaluating the piecewise linear cdf */
+    cdf_lo = piecewise((*dataQ7 - 64) * *envQ8);
+    cdf_hi = piecewise((*dataQ7 + 64) * *envQ8);
+
+    /* test and clip if probability gets too small */
+    while (cdf_lo+1 >= cdf_hi) {
+      /* clip */
+      if (*dataQ7 > 0) {
+        *dataQ7 -= 128;
+        cdf_hi = cdf_lo;
+        cdf_lo = piecewise((*dataQ7 - 64) * *envQ8);
+      } else {
+        *dataQ7 += 128;
+        cdf_lo = cdf_hi;
+        cdf_hi = piecewise((*dataQ7 + 64) * *envQ8);
+      }
+    }
+
+    dataQ7++;
+    // increment only once per 4 iterations for SWB-16kHz or WB
+    // increment only once per 2 iterations for SWB-12kHz
+    envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1));
+
+
+    /* update interval */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+    W_lower = W_upper_MSB * cdf_lo;
+    W_lower += (W_upper_LSB * cdf_lo) >> 16;
+    W_upper = W_upper_MSB * cdf_hi;
+    W_upper += (W_upper_LSB * cdf_hi) >> 16;
+
+    /* shift interval such that it begins at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamdata->streamval += W_lower;
+
+    /* handle carry */
+    if (streamdata->streamval < W_lower)
+    {
+      /* propagate carry */
+      stream_ptr_carry = stream_ptr;
+      while (!(++(*--stream_ptr_carry)));
+    }
+
+    /* renormalize interval, store most significant byte of streamval and update streamval */
+    while ( !(W_upper & 0xFF000000) )      /* W_upper < 2^24 */
+    {
+      W_upper <<= 8;
+      *stream_ptr++ = (uint8_t) (streamdata->streamval >> 24);
+
+      if(stream_ptr > maxStreamPtr)
+      {
+        return -ISAC_DISALLOWED_BITSTREAM_LENGTH;
+      }
+      streamdata->streamval <<= 8;
+    }
+  }
+
+  /* calculate new stream_index */
+  streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+  streamdata->W_upper = W_upper;
+
+  return 0;
+}
+
+
+
+int WebRtcIsac_DecLogisticMulti2(
+    int16_t *dataQ7,       /* output: data vector */
+    Bitstr *streamdata,      /* in-/output struct containing bitstream */
+    const uint16_t *envQ8, /* input: side info vector defining the width of the pdf */
+    const int16_t *ditherQ7,/* input: dither vector */
+    const int N,         /* input: data vector length */
+    const int16_t isSWB12kHz)
+{
+  uint32_t    W_lower, W_upper;
+  uint32_t    W_tmp;
+  uint32_t    W_upper_LSB, W_upper_MSB;
+  uint32_t    streamval;
+  const uint8_t *stream_ptr;
+  uint32_t    cdf_tmp;
+  int16_t     candQ7;
+  int             k;
+
+  // Position just past the end of the stream. STREAM_SIZE_MAX_60 instead of
+  // STREAM_SIZE_MAX (which is the size of the allocated buffer) because that's
+  // the limit to how much data is filled in.
+  const uint8_t* const stream_end = streamdata->stream + STREAM_SIZE_MAX_60;
+
+  stream_ptr = streamdata->stream + streamdata->stream_index;
+  W_upper = streamdata->W_upper;
+  if (streamdata->stream_index == 0)   /* first time decoder is called for this stream */
+  {
+    /* read first word from bytestream */
+    if (stream_ptr + 3 >= stream_end)
+      return -1;  // Would read out of bounds. Malformed input?
+    streamval = *stream_ptr << 24;
+    streamval |= *++stream_ptr << 16;
+    streamval |= *++stream_ptr << 8;
+    streamval |= *++stream_ptr;
+  } else {
+    streamval = streamdata->streamval;
+  }
+
+
+  for (k = 0; k < N; k++)
+  {
+    /* find the integer *data for which streamval lies in [W_lower+1, W_upper] */
+    W_upper_LSB = W_upper & 0x0000FFFF;
+    W_upper_MSB = W_upper >> 16;
+
+    /* find first candidate by inverting the logistic cdf */
+    candQ7 = - *ditherQ7 + 64;
+    cdf_tmp = piecewise(candQ7 * *envQ8);
+
+    W_tmp = W_upper_MSB * cdf_tmp;
+    W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+    if (streamval > W_tmp)
+    {
+      W_lower = W_tmp;
+      candQ7 += 128;
+      cdf_tmp = piecewise(candQ7 * *envQ8);
+
+      W_tmp = W_upper_MSB * cdf_tmp;
+      W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+      while (streamval > W_tmp)
+      {
+        W_lower = W_tmp;
+        candQ7 += 128;
+        cdf_tmp = piecewise(candQ7 * *envQ8);
+
+        W_tmp = W_upper_MSB * cdf_tmp;
+        W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+
+        /* error check */
+        if (W_lower == W_tmp) return -1;
+      }
+      W_upper = W_tmp;
+
+      /* another sample decoded */
+      *dataQ7 = candQ7 - 64;
+    }
+    else
+    {
+      W_upper = W_tmp;
+      candQ7 -= 128;
+      cdf_tmp = piecewise(candQ7 * *envQ8);
+
+      W_tmp = W_upper_MSB * cdf_tmp;
+      W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+      while ( !(streamval > W_tmp) )
+      {
+        W_upper = W_tmp;
+        candQ7 -= 128;
+        cdf_tmp = piecewise(candQ7 * *envQ8);
+
+        W_tmp = W_upper_MSB * cdf_tmp;
+        W_tmp += (W_upper_LSB * cdf_tmp) >> 16;
+
+        /* error check */
+        if (W_upper == W_tmp) return -1;
+      }
+      W_lower = W_tmp;
+
+      /* another sample decoded */
+      *dataQ7 = candQ7 + 64;
+    }
+    ditherQ7++;
+    dataQ7++;
+    // increment only once per 4 iterations for SWB-16kHz or WB
+    // increment only once per 2 iterations for SWB-12kHz
+    envQ8 += (isSWB12kHz)? (k & 1):((k & 1) & (k >> 1));
+
+    /* shift interval to start at zero */
+    W_upper -= ++W_lower;
+
+    /* add integer to bitstream */
+    streamval -= W_lower;
+
+    /* renormalize interval and update streamval */
+    while ( !(W_upper & 0xFF000000) )    /* W_upper < 2^24 */
+    {
+      /* read next byte from stream */
+      if (stream_ptr + 1 >= stream_end)
+        return -1;  // Would read out of bounds. Malformed input?
+      streamval = (streamval << 8) | *++stream_ptr;
+      W_upper <<= 8;
+    }
+  }
+
+  streamdata->stream_index = (int)(stream_ptr - streamdata->stream);
+  streamdata->W_upper = W_upper;
+  streamdata->streamval = streamval;
+
+  /* find number of bytes in original stream (determined by current interval width) */
+  if ( W_upper > 0x01FFFFFF )
+    return streamdata->stream_index - 2;
+  else
+    return streamdata->stream_index - 1;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc b/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc
new file mode 100644
index 0000000..b671002
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
+
+#include "modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioDecoderIsacT<IsacFloat>;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc b/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
new file mode 100644
index 0000000..b7f2c0b
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+
+#include "modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
+
+namespace webrtc {
+
+// Explicit instantiation:
+template class AudioEncoderIsacT<IsacFloat>;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc b/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
new file mode 100644
index 0000000..333ab52
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void TestBadConfig(const AudioEncoderIsacFloatImpl::Config& config) {
+  EXPECT_FALSE(config.IsOk());
+}
+
+void TestGoodConfig(const AudioEncoderIsacFloatImpl::Config& config) {
+  EXPECT_TRUE(config.IsOk());
+  AudioEncoderIsacFloatImpl aei(config);
+}
+
+// Wrap subroutine calls that test things in this, so that the error messages
+// will be accompanied by stack traces that make it possible to tell which
+// subroutine invocation caused the failure.
+#define S(x) do { SCOPED_TRACE(#x); x; } while (0)
+
+}  // namespace
+
+TEST(AudioEncoderIsacTest, TestConfigBitrate) {
+  AudioEncoderIsacFloatImpl::Config config;
+
+  // The default value is some real, positive value.
+  EXPECT_GT(config.bit_rate, 1);
+  S(TestGoodConfig(config));
+
+  // 0 is another way to ask for the default value.
+  config.bit_rate = 0;
+  S(TestGoodConfig(config));
+
+  // Try some unreasonable values and watch them fail.
+  config.bit_rate = -1;
+  S(TestBadConfig(config));
+  config.bit_rate = 1;
+  S(TestBadConfig(config));
+  config.bit_rate = std::numeric_limits<int>::max();
+  S(TestBadConfig(config));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c b/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
new file mode 100644
index 0000000..673f10d
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
@@ -0,0 +1,1031 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * BwEstimator.c
+ *
+ * This file contains the code for the Bandwidth Estimator designed
+ * for iSAC.
+ *
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "rtc_base/checks.h"
+
+/* array of quantization levels for bottle neck info; Matlab code: */
+/* sprintf('%4.1ff, ', logspace(log10(5000), log10(40000), 12)) */
+static const float kQRateTableWb[12] =
+{
+  10000.0f, 11115.3f, 12355.1f, 13733.1f, 15264.8f, 16967.3f,
+  18859.8f, 20963.3f, 23301.4f, 25900.3f, 28789.0f, 32000.0f};
+
+
+static const float kQRateTableSwb[24] =
+{
+  10000.0f, 11115.3f, 12355.1f, 13733.1f, 15264.8f, 16967.3f,
+  18859.8f, 20963.3f, 23153.1f, 25342.9f, 27532.7f, 29722.5f,
+  31912.3f, 34102.1f, 36291.9f, 38481.7f, 40671.4f, 42861.2f,
+  45051.0f, 47240.8f, 49430.6f, 51620.4f, 53810.2f, 56000.0f,
+};
+
+
+
+
+int32_t WebRtcIsac_InitBandwidthEstimator(
+    BwEstimatorstr*              bwest_str,
+    enum IsacSamplingRate encoderSampRate,
+    enum IsacSamplingRate decoderSampRate)
+{
+  switch(encoderSampRate)
+  {
+    case kIsacWideband:
+      {
+        bwest_str->send_bw_avg       = INIT_BN_EST_WB;
+        break;
+      }
+    case kIsacSuperWideband:
+      {
+        bwest_str->send_bw_avg       = INIT_BN_EST_SWB;
+        break;
+      }
+  }
+
+  switch(decoderSampRate)
+  {
+    case kIsacWideband:
+      {
+        bwest_str->prev_frame_length = INIT_FRAME_LEN_WB;
+        bwest_str->rec_bw_inv        = 1.0f /
+            (INIT_BN_EST_WB + INIT_HDR_RATE_WB);
+        bwest_str->rec_bw            = (int32_t)INIT_BN_EST_WB;
+        bwest_str->rec_bw_avg_Q      = INIT_BN_EST_WB;
+        bwest_str->rec_bw_avg        = INIT_BN_EST_WB + INIT_HDR_RATE_WB;
+        bwest_str->rec_header_rate   = INIT_HDR_RATE_WB;
+        break;
+      }
+    case kIsacSuperWideband:
+      {
+        bwest_str->prev_frame_length = INIT_FRAME_LEN_SWB;
+        bwest_str->rec_bw_inv        = 1.0f /
+            (INIT_BN_EST_SWB + INIT_HDR_RATE_SWB);
+        bwest_str->rec_bw            = (int32_t)INIT_BN_EST_SWB;
+        bwest_str->rec_bw_avg_Q      = INIT_BN_EST_SWB;
+        bwest_str->rec_bw_avg        = INIT_BN_EST_SWB + INIT_HDR_RATE_SWB;
+        bwest_str->rec_header_rate   = INIT_HDR_RATE_SWB;
+        break;
+      }
+  }
+
+  bwest_str->prev_rec_rtp_number       = 0;
+  bwest_str->prev_rec_arr_ts           = 0;
+  bwest_str->prev_rec_send_ts          = 0;
+  bwest_str->prev_rec_rtp_rate         = 1.0f;
+  bwest_str->last_update_ts            = 0;
+  bwest_str->last_reduction_ts         = 0;
+  bwest_str->count_tot_updates_rec     = -9;
+  bwest_str->rec_jitter                = 10.0f;
+  bwest_str->rec_jitter_short_term     = 0.0f;
+  bwest_str->rec_jitter_short_term_abs = 5.0f;
+  bwest_str->rec_max_delay             = 10.0f;
+  bwest_str->rec_max_delay_avg_Q       = 10.0f;
+  bwest_str->num_pkts_rec              = 0;
+
+  bwest_str->send_max_delay_avg        = 10.0f;
+
+  bwest_str->hsn_detect_rec = 0;
+
+  bwest_str->num_consec_rec_pkts_over_30k = 0;
+
+  bwest_str->hsn_detect_snd = 0;
+
+  bwest_str->num_consec_snt_pkts_over_30k = 0;
+
+  bwest_str->in_wait_period = 0;
+
+  bwest_str->change_to_WB = 0;
+
+  bwest_str->numConsecLatePkts = 0;
+  bwest_str->consecLatency = 0;
+  bwest_str->inWaitLatePkts = 0;
+  bwest_str->senderTimestamp = 0;
+  bwest_str->receiverTimestamp = 0;
+
+  bwest_str->external_bw_info.in_use = 0;
+
+  return 0;
+}
+
+/* This function updates both bottle neck rates                                                      */
+/* Parameters:                                                                                       */
+/* rtp_number    - value from RTP packet, from NetEq                                                 */
+/* frame length  - length of signal frame in ms, from iSAC decoder                                   */
+/* send_ts       - value in RTP header giving send time in samples                                     */
+/* arr_ts        - value given by timeGetTime() time of arrival in samples of packet from NetEq      */
+/* pksize        - size of packet in bytes, from NetEq                                               */
+/* Index         - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise                                                   */
+int16_t WebRtcIsac_UpdateBandwidthEstimator(
+    BwEstimatorstr* bwest_str,
+    const uint16_t rtp_number,
+    const int32_t frame_length,
+    const uint32_t send_ts,
+    const uint32_t arr_ts,
+    const size_t pksize
+    /*,    const uint16_t Index*/)
+{
+  float weight = 0.0f;
+  float curr_bw_inv = 0.0f;
+  float rec_rtp_rate;
+  float t_diff_proj;
+  float arr_ts_diff;
+  float send_ts_diff;
+  float arr_time_noise;
+  float arr_time_noise_abs;
+
+  float delay_correction_factor = 1;
+  float late_diff = 0.0f;
+  int immediate_set = 0;
+  int num_pkts_expected;
+
+  RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+  // We have to adjust the header-rate if the first packet has a
+  // frame-size different than the initialized value.
+  if ( frame_length != bwest_str->prev_frame_length )
+  {
+    bwest_str->rec_header_rate = (float)HEADER_SIZE * 8.0f *
+        1000.0f / (float)frame_length;     /* bits/s */
+  }
+
+  /* UPDATE ESTIMATES ON THIS SIDE */
+  /* compute far-side transmission rate */
+  rec_rtp_rate = ((float)pksize * 8.0f * 1000.0f / (float)frame_length) +
+      bwest_str->rec_header_rate;
+  // rec_rtp_rate packet bits/s + header bits/s
+
+  /* check for timer wrap-around */
+  if (arr_ts < bwest_str->prev_rec_arr_ts)
+  {
+    bwest_str->prev_rec_arr_ts   = arr_ts;
+    bwest_str->last_update_ts    = arr_ts;
+    bwest_str->last_reduction_ts = arr_ts + 3*FS;
+    bwest_str->num_pkts_rec      = 0;
+
+    /* store frame length */
+    bwest_str->prev_frame_length = frame_length;
+
+    /* store far-side transmission rate */
+    bwest_str->prev_rec_rtp_rate = rec_rtp_rate;
+
+    /* store far-side RTP time stamp */
+    bwest_str->prev_rec_rtp_number = rtp_number;
+
+    return 0;
+  }
+
+  bwest_str->num_pkts_rec++;
+
+  /* check that it's not one of the first 9 packets */
+  if ( bwest_str->count_tot_updates_rec > 0 )
+  {
+    if(bwest_str->in_wait_period > 0 )
+    {
+      bwest_str->in_wait_period--;
+    }
+
+    bwest_str->inWaitLatePkts -= ((bwest_str->inWaitLatePkts > 0)? 1:0);
+    send_ts_diff = (float)(send_ts - bwest_str->prev_rec_send_ts);
+
+    if (send_ts_diff <= (16 * frame_length)*2)
+      //doesn't allow for a dropped packet, not sure necessary to be
+      // that strict -DH
+    {
+      /* if not been updated for a long time, reduce the BN estimate */
+      if((uint32_t)(arr_ts - bwest_str->last_update_ts) *
+         1000.0f / FS > 3000)
+      {
+        //how many frames should have been received since the last
+        // update if too many have been dropped or there have been
+        // big delays won't allow this reduction may no longer need
+        // the send_ts_diff here
+        num_pkts_expected = (int)(((float)(arr_ts -
+                                           bwest_str->last_update_ts) * 1000.0f /(float) FS) /
+                                  (float)frame_length);
+
+        if(((float)bwest_str->num_pkts_rec/(float)num_pkts_expected) >
+           0.9)
+        {
+          float inv_bitrate = (float) pow( 0.99995,
+                                           (double)((uint32_t)(arr_ts -
+                                                                     bwest_str->last_reduction_ts)*1000.0f/FS) );
+
+          if ( inv_bitrate )
+          {
+            bwest_str->rec_bw_inv /= inv_bitrate;
+
+            //precautionary, likely never necessary
+            if (bwest_str->hsn_detect_snd &&
+                bwest_str->hsn_detect_rec)
+            {
+              if (bwest_str->rec_bw_inv > 0.000066f)
+              {
+                bwest_str->rec_bw_inv = 0.000066f;
+              }
+            }
+          }
+          else
+          {
+            bwest_str->rec_bw_inv = 1.0f /
+                (INIT_BN_EST_WB + INIT_HDR_RATE_WB);
+          }
+          /* reset time-since-update counter */
+          bwest_str->last_reduction_ts = arr_ts;
+        }
+        else
+          //reset here?
+        {
+          bwest_str->last_reduction_ts = arr_ts + 3*FS;
+          bwest_str->last_update_ts = arr_ts;
+          bwest_str->num_pkts_rec = 0;
+        }
+      }
+    }
+    else
+    {
+      bwest_str->last_reduction_ts = arr_ts + 3*FS;
+      bwest_str->last_update_ts = arr_ts;
+      bwest_str->num_pkts_rec = 0;
+    }
+
+
+    /* temporarily speed up adaptation if frame length has changed */
+    if ( frame_length != bwest_str->prev_frame_length )
+    {
+      bwest_str->count_tot_updates_rec = 10;
+      bwest_str->rec_header_rate = (float)HEADER_SIZE * 8.0f *
+          1000.0f / (float)frame_length;     /* bits/s */
+
+      bwest_str->rec_bw_inv = 1.0f /((float)bwest_str->rec_bw +
+                                     bwest_str->rec_header_rate);
+    }
+
+    ////////////////////////
+    arr_ts_diff = (float)(arr_ts - bwest_str->prev_rec_arr_ts);
+
+    if (send_ts_diff > 0 )
+    {
+      late_diff = arr_ts_diff - send_ts_diff;
+    }
+    else
+    {
+      late_diff = arr_ts_diff - (float)(16 * frame_length);
+    }
+
+    if((late_diff > 0) && !bwest_str->inWaitLatePkts)
+    {
+      bwest_str->numConsecLatePkts++;
+      bwest_str->consecLatency += late_diff;
+    }
+    else
+    {
+      bwest_str->numConsecLatePkts = 0;
+      bwest_str->consecLatency = 0;
+    }
+    if(bwest_str->numConsecLatePkts > 50)
+    {
+      float latencyMs = bwest_str->consecLatency/(FS/1000);
+      float averageLatencyMs = latencyMs / bwest_str->numConsecLatePkts;
+      delay_correction_factor = frame_length / (frame_length + averageLatencyMs);
+      immediate_set = 1;
+      bwest_str->inWaitLatePkts = (int16_t)((bwest_str->consecLatency/(FS/1000)) / 30);// + 150;
+      bwest_str->start_wait_period = arr_ts;
+    }
+    ///////////////////////////////////////////////
+
+
+
+    /*   update only if previous packet was not lost */
+    if ( rtp_number == bwest_str->prev_rec_rtp_number + 1 )
+    {
+
+
+      if (!(bwest_str->hsn_detect_snd && bwest_str->hsn_detect_rec))
+      {
+        if ((arr_ts_diff > (float)(16 * frame_length)))
+        {
+          //1/2 second
+          if ((late_diff > 8000.0f) && !bwest_str->in_wait_period)
+          {
+            delay_correction_factor = 0.7f;
+            bwest_str->in_wait_period = 55;
+            bwest_str->start_wait_period = arr_ts;
+            immediate_set = 1;
+          }
+          //320 ms
+          else if (late_diff > 5120.0f && !bwest_str->in_wait_period)
+          {
+            delay_correction_factor = 0.8f;
+            immediate_set = 1;
+            bwest_str->in_wait_period = 44;
+            bwest_str->start_wait_period = arr_ts;
+          }
+        }
+      }
+
+
+      if ((bwest_str->prev_rec_rtp_rate > bwest_str->rec_bw_avg) &&
+          (rec_rtp_rate > bwest_str->rec_bw_avg)                 &&
+          !bwest_str->in_wait_period)
+      {
+        /* test if still in initiation period and increment counter */
+        if (bwest_str->count_tot_updates_rec++ > 99)
+        {
+          /* constant weight after initiation part */
+          weight = 0.01f;
+        }
+        else
+        {
+          /* weight decreases with number of updates */
+          weight = 1.0f / (float) bwest_str->count_tot_updates_rec;
+        }
+        /* Bottle Neck Estimation */
+
+        /* limit outliers */
+        /* if more than 25 ms too much */
+        if (arr_ts_diff > frame_length * FS/1000 + 400.0f)
+        {
+          // in samples,  why 25ms??
+          arr_ts_diff = frame_length * FS/1000 + 400.0f;
+        }
+        if(arr_ts_diff < (frame_length * FS/1000) - 160.0f)
+        {
+          /* don't allow it to be less than frame rate - 10 ms */
+          arr_ts_diff = (float)frame_length * FS/1000 - 160.0f;
+        }
+
+        /* compute inverse receiving rate for last packet */
+        curr_bw_inv = arr_ts_diff / ((float)(pksize + HEADER_SIZE) *
+                                     8.0f * FS); // (180+35)*8*16000 = 27.5 Mbit....
+
+
+        if(curr_bw_inv <
+           (1.0f / (MAX_ISAC_BW + bwest_str->rec_header_rate)))
+        {
+          // don't allow inv rate to be larger than MAX
+          curr_bw_inv = (1.0f /
+                         (MAX_ISAC_BW + bwest_str->rec_header_rate));
+        }
+
+        /* update bottle neck rate estimate */
+        bwest_str->rec_bw_inv = weight * curr_bw_inv +
+            (1.0f - weight) * bwest_str->rec_bw_inv;
+
+        /* reset time-since-update counter */
+        bwest_str->last_update_ts    = arr_ts;
+        bwest_str->last_reduction_ts = arr_ts + 3 * FS;
+        bwest_str->num_pkts_rec = 0;
+
+        /* Jitter Estimation */
+        /* projected difference between arrival times */
+        t_diff_proj = ((float)(pksize + HEADER_SIZE) * 8.0f *
+                       1000.0f) / bwest_str->rec_bw_avg;
+
+
+        // difference between projected and actual
+        //   arrival time differences
+        arr_time_noise = (float)(arr_ts_diff*1000.0f/FS) -
+            t_diff_proj;
+        arr_time_noise_abs = (float) fabs( arr_time_noise );
+
+        /* long term averaged absolute jitter */
+        bwest_str->rec_jitter = weight * arr_time_noise_abs +
+            (1.0f - weight) * bwest_str->rec_jitter;
+        if (bwest_str->rec_jitter > 10.0f)
+        {
+          bwest_str->rec_jitter = 10.0f;
+        }
+        /* short term averaged absolute jitter */
+        bwest_str->rec_jitter_short_term_abs = 0.05f *
+            arr_time_noise_abs + 0.95f *
+            bwest_str->rec_jitter_short_term_abs;
+
+        /* short term averaged jitter */
+        bwest_str->rec_jitter_short_term = 0.05f * arr_time_noise +
+            0.95f * bwest_str->rec_jitter_short_term;
+      }
+    }
+  }
+  else
+  {
+    // reset time-since-update counter when
+    // receiving the first 9 packets
+    bwest_str->last_update_ts    = arr_ts;
+    bwest_str->last_reduction_ts = arr_ts + 3*FS;
+    bwest_str->num_pkts_rec = 0;
+
+    bwest_str->count_tot_updates_rec++;
+  }
+
+  /* limit minimum bottle neck rate */
+  if (bwest_str->rec_bw_inv > 1.0f / ((float)MIN_ISAC_BW +
+                                      bwest_str->rec_header_rate))
+  {
+    bwest_str->rec_bw_inv = 1.0f / ((float)MIN_ISAC_BW +
+                                    bwest_str->rec_header_rate);
+  }
+
+  // limit maximum bitrate
+  if (bwest_str->rec_bw_inv < 1.0f / ((float)MAX_ISAC_BW +
+                                      bwest_str->rec_header_rate))
+  {
+    bwest_str->rec_bw_inv = 1.0f / ((float)MAX_ISAC_BW +
+                                    bwest_str->rec_header_rate);
+  }
+
+  /* store frame length */
+  bwest_str->prev_frame_length = frame_length;
+
+  /* store far-side transmission rate */
+  bwest_str->prev_rec_rtp_rate = rec_rtp_rate;
+
+  /* store far-side RTP time stamp */
+  bwest_str->prev_rec_rtp_number = rtp_number;
+
+  // Replace bwest_str->rec_max_delay by the new
+  // value (atomic operation)
+  bwest_str->rec_max_delay = 3.0f * bwest_str->rec_jitter;
+
+  /* store send and arrival time stamp */
+  bwest_str->prev_rec_arr_ts = arr_ts ;
+  bwest_str->prev_rec_send_ts = send_ts;
+
+  /* Replace bwest_str->rec_bw by the new value (atomic operation) */
+  bwest_str->rec_bw = (int32_t)(1.0f / bwest_str->rec_bw_inv -
+                                      bwest_str->rec_header_rate);
+
+  if (immediate_set)
+  {
+    bwest_str->rec_bw = (int32_t) (delay_correction_factor *
+                                         (float) bwest_str->rec_bw);
+
+    if (bwest_str->rec_bw < (int32_t) MIN_ISAC_BW)
+    {
+      bwest_str->rec_bw = (int32_t) MIN_ISAC_BW;
+    }
+
+    bwest_str->rec_bw_avg = bwest_str->rec_bw +
+        bwest_str->rec_header_rate;
+
+    bwest_str->rec_bw_avg_Q = (float) bwest_str->rec_bw;
+
+    bwest_str->rec_jitter_short_term = 0.0f;
+
+    bwest_str->rec_bw_inv = 1.0f / (bwest_str->rec_bw +
+                                    bwest_str->rec_header_rate);
+
+    bwest_str->count_tot_updates_rec = 1;
+
+    immediate_set = 0;
+    bwest_str->consecLatency = 0;
+    bwest_str->numConsecLatePkts = 0;
+  }
+
+  return 0;
+}
+
+
+/* This function updates the send bottle neck rate                                                   */
+/* Index         - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+/* returns 0 if everything went fine, -1 otherwise                                                   */
+int16_t WebRtcIsac_UpdateUplinkBwImpl(
+    BwEstimatorstr*           bwest_str,
+    int16_t               index,
+    enum IsacSamplingRate encoderSamplingFreq)
+{
+  RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+  if((index < 0) || (index > 23))
+  {
+    return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+  }
+
+  /* UPDATE ESTIMATES FROM OTHER SIDE */
+  if(encoderSamplingFreq == kIsacWideband)
+  {
+    if(index > 11)
+    {
+      index -= 12;   
+      /* compute the jitter estimate as decoded on the other side */
+      bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+          0.1f * (float)MAX_ISAC_MD;
+    }
+    else
+    {
+      /* compute the jitter estimate as decoded on the other side */
+      bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+          0.1f * (float)MIN_ISAC_MD;
+    }
+
+    /* compute the BN estimate as decoded on the other side */
+    bwest_str->send_bw_avg = 0.9f * bwest_str->send_bw_avg +
+        0.1f * kQRateTableWb[index];
+  }
+  else
+  {
+    /* compute the BN estimate as decoded on the other side */
+    bwest_str->send_bw_avg = 0.9f * bwest_str->send_bw_avg +
+        0.1f * kQRateTableSwb[index];
+  }
+
+  if (bwest_str->send_bw_avg > (float) 28000 && !bwest_str->hsn_detect_snd)
+  {
+    bwest_str->num_consec_snt_pkts_over_30k++;
+
+    if (bwest_str->num_consec_snt_pkts_over_30k >= 66)
+    {
+      //approx 2 seconds with 30ms frames
+      bwest_str->hsn_detect_snd = 1;
+    }
+  }
+  else if (!bwest_str->hsn_detect_snd)
+  {
+    bwest_str->num_consec_snt_pkts_over_30k = 0;
+  }
+  return 0;
+}
+
+// called when there is upper-band bit-stream to update jitter
+// statistics.
+int16_t WebRtcIsac_UpdateUplinkJitter(
+    BwEstimatorstr*              bwest_str,
+    int32_t                  index)
+{
+  RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+  if((index < 0) || (index > 23))
+  {
+    return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+  }
+
+  if(index > 0)
+  {
+    /* compute the jitter estimate as decoded on the other side */
+    bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+        0.1f * (float)MAX_ISAC_MD;
+  }
+  else
+  {
+    /* compute the jitter estimate as decoded on the other side */
+    bwest_str->send_max_delay_avg = 0.9f * bwest_str->send_max_delay_avg +
+        0.1f * (float)MIN_ISAC_MD;
+  }
+
+  return 0;
+}
+
+
+
+// Returns the bandwidth/jitter estimation code (integer 0...23)
+// to put in the sending iSAC payload
+void
+WebRtcIsac_GetDownlinkBwJitIndexImpl(
+    BwEstimatorstr*           bwest_str,
+    int16_t*              bottleneckIndex,
+    int16_t*              jitterInfo,
+    enum IsacSamplingRate decoderSamplingFreq)
+{
+  float MaxDelay;
+  //uint16_t MaxDelayBit;
+
+  float rate;
+  float r;
+  float e1, e2;
+  const float weight = 0.1f;
+  const float* ptrQuantizationTable;
+  int16_t addJitterInfo;
+  int16_t minInd;
+  int16_t maxInd;
+  int16_t midInd;
+
+  if (bwest_str->external_bw_info.in_use) {
+    *bottleneckIndex = bwest_str->external_bw_info.bottleneck_idx;
+    *jitterInfo = bwest_str->external_bw_info.jitter_info;
+    return;
+  }
+
+  /* Get Max Delay Bit */
+  /* get unquantized max delay */
+  MaxDelay = (float)WebRtcIsac_GetDownlinkMaxDelay(bwest_str);
+
+  if ( ((1.f - weight) * bwest_str->rec_max_delay_avg_Q + weight *
+        MAX_ISAC_MD - MaxDelay) > (MaxDelay - (1.f-weight) *
+                                   bwest_str->rec_max_delay_avg_Q - weight * MIN_ISAC_MD) )
+  {
+    jitterInfo[0] = 0;
+    /* update quantized average */
+    bwest_str->rec_max_delay_avg_Q =
+        (1.f - weight) * bwest_str->rec_max_delay_avg_Q + weight *
+        (float)MIN_ISAC_MD;
+  }
+  else
+  {
+    jitterInfo[0] = 1;
+    /* update quantized average */
+    bwest_str->rec_max_delay_avg_Q =
+        (1.f-weight) * bwest_str->rec_max_delay_avg_Q + weight *
+        (float)MAX_ISAC_MD;
+  }
+
+  // Get unquantized rate.
+  rate = (float)WebRtcIsac_GetDownlinkBandwidth(bwest_str);
+
+  /* Get Rate Index */
+  if(decoderSamplingFreq == kIsacWideband)
+  {
+    ptrQuantizationTable = kQRateTableWb;
+    addJitterInfo = 1;
+    maxInd = 11;
+  }
+  else
+  {
+    ptrQuantizationTable = kQRateTableSwb;
+    addJitterInfo = 0;
+    maxInd = 23;
+  }
+
+  minInd = 0;
+  while(maxInd > minInd + 1)
+  {
+    midInd = (maxInd + minInd) >> 1;
+    if(rate > ptrQuantizationTable[midInd])
+    {
+      minInd = midInd;
+    }
+    else
+    {
+      maxInd = midInd;
+    }
+  }
+  // Chose the index which gives results an average which is closest
+  // to rate
+  r = (1 - weight) * bwest_str->rec_bw_avg_Q - rate;
+  e1 = weight * ptrQuantizationTable[minInd] + r;
+  e2 = weight * ptrQuantizationTable[maxInd] + r;
+  e1 = (e1 > 0)? e1:-e1;
+  e2 = (e2 > 0)? e2:-e2;
+  if(e1 < e2)
+  {
+    bottleneckIndex[0] = minInd;
+  }
+  else
+  {
+    bottleneckIndex[0] = maxInd;
+  }
+
+  bwest_str->rec_bw_avg_Q = (1 - weight) * bwest_str->rec_bw_avg_Q +
+      weight * ptrQuantizationTable[bottleneckIndex[0]];
+  bottleneckIndex[0] += jitterInfo[0] * 12 * addJitterInfo;
+
+  bwest_str->rec_bw_avg = (1 - weight) * bwest_str->rec_bw_avg + weight *
+      (rate + bwest_str->rec_header_rate);
+}
+
+
+
+/* get the bottle neck rate from far side to here, as estimated on this side */
+int32_t WebRtcIsac_GetDownlinkBandwidth( const BwEstimatorstr *bwest_str)
+{
+  int32_t  rec_bw;
+  float   jitter_sign;
+  float   bw_adjust;
+
+  RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+  /* create a value between -1.0 and 1.0 indicating "average sign" of jitter */
+  jitter_sign = bwest_str->rec_jitter_short_term /
+      bwest_str->rec_jitter_short_term_abs;
+
+  /* adjust bw proportionally to negative average jitter sign */
+  bw_adjust = 1.0f - jitter_sign * (0.15f + 0.15f * jitter_sign * jitter_sign);
+
+  /* adjust Rate if jitter sign is mostly constant */
+  rec_bw = (int32_t)(bwest_str->rec_bw * bw_adjust);
+
+  /* limit range of bottle neck rate */
+  if (rec_bw < MIN_ISAC_BW)
+  {
+    rec_bw = MIN_ISAC_BW;
+  }
+  else if (rec_bw > MAX_ISAC_BW)
+  {
+    rec_bw = MAX_ISAC_BW;
+  }
+  return rec_bw;
+}
+
+/* Returns the max delay (in ms) */
+int32_t
+WebRtcIsac_GetDownlinkMaxDelay(const BwEstimatorstr *bwest_str)
+{
+  int32_t rec_max_delay;
+
+  RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+
+  rec_max_delay = (int32_t)(bwest_str->rec_max_delay);
+
+  /* limit range of jitter estimate */
+  if (rec_max_delay < MIN_ISAC_MD)
+  {
+    rec_max_delay = MIN_ISAC_MD;
+  }
+  else if (rec_max_delay > MAX_ISAC_MD)
+  {
+    rec_max_delay = MAX_ISAC_MD;
+  }
+  return rec_max_delay;
+}
+
+/* Clamp val to the closed interval [min,max]. */
+static int32_t clamp(int32_t val, int32_t min, int32_t max) {
+  RTC_DCHECK_LE(min, max);
+  return val < min ? min : (val > max ? max : val);
+}
+
+int32_t WebRtcIsac_GetUplinkBandwidth(const BwEstimatorstr* bwest_str) {
+  return bwest_str->external_bw_info.in_use
+             ? bwest_str->external_bw_info.send_bw_avg
+             : clamp(bwest_str->send_bw_avg, MIN_ISAC_BW, MAX_ISAC_BW);
+}
+
+int32_t WebRtcIsac_GetUplinkMaxDelay(const BwEstimatorstr* bwest_str) {
+  return bwest_str->external_bw_info.in_use
+             ? bwest_str->external_bw_info.send_max_delay_avg
+             : clamp(bwest_str->send_max_delay_avg, MIN_ISAC_MD, MAX_ISAC_MD);
+}
+
+void WebRtcIsacBw_GetBandwidthInfo(BwEstimatorstr* bwest_str,
+                                   enum IsacSamplingRate decoder_sample_rate_hz,
+                                   IsacBandwidthInfo* bwinfo) {
+  RTC_DCHECK(!bwest_str->external_bw_info.in_use);
+  bwinfo->in_use = 1;
+  bwinfo->send_bw_avg = WebRtcIsac_GetUplinkBandwidth(bwest_str);
+  bwinfo->send_max_delay_avg = WebRtcIsac_GetUplinkMaxDelay(bwest_str);
+  WebRtcIsac_GetDownlinkBwJitIndexImpl(bwest_str, &bwinfo->bottleneck_idx,
+                                       &bwinfo->jitter_info,
+                                       decoder_sample_rate_hz);
+}
+
+void WebRtcIsacBw_SetBandwidthInfo(BwEstimatorstr* bwest_str,
+                                   const IsacBandwidthInfo* bwinfo) {
+  memcpy(&bwest_str->external_bw_info, bwinfo,
+         sizeof bwest_str->external_bw_info);
+}
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ * returns minimum payload size (bytes)
+ */
+int WebRtcIsac_GetMinBytes(
+    RateModel*         State,
+    int                StreamSize,    /* bytes in bitstream */
+    const int          FrameSamples,  /* samples per frame */
+    const double       BottleNeck,    /* bottle neck rate; excl headers (bps) */
+    const double       DelayBuildUp,  /* max delay from bottleneck buffering (ms) */
+    enum ISACBandwidth bandwidth
+    /*,int16_t        frequentLargePackets*/)
+{
+  double MinRate = 0.0;
+  int    MinBytes;
+  double TransmissionTime;
+  int    burstInterval = BURST_INTERVAL;
+
+  // first 10 packets @ low rate, then INIT_BURST_LEN packets @
+  // fixed rate of INIT_RATE bps
+  if (State->InitCounter > 0)
+  {
+    if (State->InitCounter-- <= INIT_BURST_LEN)
+    {
+      if(bandwidth == isac8kHz)
+      {
+        MinRate = INIT_RATE_WB;
+      }
+      else
+      {
+        MinRate = INIT_RATE_SWB;
+      }
+    }
+    else
+    {
+      MinRate = 0;
+    }
+  }
+  else
+  {
+    /* handle burst */
+    if (State->BurstCounter)
+    {
+      if (State->StillBuffered < (1.0 - 1.0/BURST_LEN) * DelayBuildUp)
+      {
+        /* max bps derived from BottleNeck and DelayBuildUp values */
+        MinRate = (1.0 + (FS/1000) * DelayBuildUp /
+                   (double)(BURST_LEN * FrameSamples)) * BottleNeck;
+      }
+      else
+      {
+        // max bps derived from StillBuffered and DelayBuildUp
+        // values
+        MinRate = (1.0 + (FS/1000) * (DelayBuildUp -
+                                      State->StillBuffered) / (double)FrameSamples) * BottleNeck;
+        if (MinRate < 1.04 * BottleNeck)
+        {
+          MinRate = 1.04 * BottleNeck;
+        }
+      }
+      State->BurstCounter--;
+    }
+  }
+
+
+  /* convert rate from bits/second to bytes/packet */
+  MinBytes = (int) (MinRate * FrameSamples / (8.0 * FS));
+
+  /* StreamSize will be adjusted if less than MinBytes */
+  if (StreamSize < MinBytes)
+  {
+    StreamSize = MinBytes;
+  }
+
+  /* keep track of when bottle neck was last exceeded by at least 1% */
+  if (StreamSize * 8.0 * FS / FrameSamples > 1.01 * BottleNeck) {
+    if (State->PrevExceed) {
+      /* bottle_neck exceded twice in a row, decrease ExceedAgo */
+      State->ExceedAgo -= /*BURST_INTERVAL*/ burstInterval / (BURST_LEN - 1);
+      if (State->ExceedAgo < 0)
+        State->ExceedAgo = 0;
+    }
+    else
+    {
+      State->ExceedAgo += (FrameSamples * 1000) / FS; /* ms */
+      State->PrevExceed = 1;
+    }
+  }
+  else
+  {
+    State->PrevExceed = 0;
+    State->ExceedAgo += (FrameSamples * 1000) / FS;     /* ms */
+  }
+
+  /* set burst flag if bottle neck not exceeded for long time */
+  if ((State->ExceedAgo > burstInterval) &&
+      (State->BurstCounter == 0))
+  {
+    if (State->PrevExceed)
+    {
+      State->BurstCounter = BURST_LEN - 1;
+    }
+    else
+    {
+      State->BurstCounter = BURST_LEN;
+    }
+  }
+
+
+  /* Update buffer delay */
+  TransmissionTime = StreamSize * 8.0 * 1000.0 / BottleNeck;  /* ms */
+  State->StillBuffered += TransmissionTime;
+  State->StillBuffered -= (FrameSamples * 1000) / FS;     /* ms */
+  if (State->StillBuffered < 0.0)
+  {
+    State->StillBuffered = 0.0;
+  }
+
+  return MinBytes;
+}
+
+
+/*
+ * update long-term average bitrate and amount of data in buffer
+ */
+void WebRtcIsac_UpdateRateModel(
+    RateModel *State,
+    int StreamSize,                    /* bytes in bitstream */
+    const int FrameSamples,            /* samples per frame */
+    const double BottleNeck)        /* bottle neck rate; excl headers (bps) */
+{
+  double TransmissionTime;
+
+  /* avoid the initial "high-rate" burst */
+  State->InitCounter = 0;
+
+  /* Update buffer delay */
+  TransmissionTime = StreamSize * 8.0 * 1000.0 / BottleNeck;  /* ms */
+  State->StillBuffered += TransmissionTime;
+  State->StillBuffered -= (FrameSamples * 1000) / FS;     /* ms */
+  if (State->StillBuffered < 0.0)
+    State->StillBuffered = 0.0;
+
+}
+
+
+void WebRtcIsac_InitRateModel(
+    RateModel *State)
+{
+  State->PrevExceed      = 0;                        /* boolean */
+  State->ExceedAgo       = 0;                        /* ms */
+  State->BurstCounter    = 0;                        /* packets */
+  State->InitCounter     = INIT_BURST_LEN + 10;    /* packets */
+  State->StillBuffered   = 1.0;                    /* ms */
+}
+
+int WebRtcIsac_GetNewFrameLength(
+    double bottle_neck,
+    int    current_framesamples)
+{
+  int new_framesamples;
+
+  const int Thld_20_30 = 20000;
+
+  //const int Thld_30_20 = 30000;
+  const int Thld_30_20 = 1000000;   // disable 20 ms frames
+
+  const int Thld_30_60 = 18000;
+  //const int Thld_30_60 = 0;      // disable 60 ms frames
+
+  const int Thld_60_30 = 27000;
+
+
+  new_framesamples = current_framesamples;
+
+  /* find new framelength */
+  switch(current_framesamples) {
+    case 320:
+      if (bottle_neck < Thld_20_30)
+        new_framesamples = 480;
+      break;
+    case 480:
+      if (bottle_neck < Thld_30_60)
+        new_framesamples = 960;
+      else if (bottle_neck > Thld_30_20)
+        new_framesamples = 320;
+      break;
+    case 960:
+      if (bottle_neck >= Thld_60_30)
+        new_framesamples = 480;
+      break;
+  }
+
+  return new_framesamples;
+}
+
+double WebRtcIsac_GetSnr(
+    double bottle_neck,
+    int    framesamples)
+{
+  double s2nr;
+
+  const double a_20 = -30.0;
+  const double b_20 = 0.8;
+  const double c_20 = 0.0;
+
+  const double a_30 = -23.0;
+  const double b_30 = 0.48;
+  const double c_30 = 0.0;
+
+  const double a_60 = -23.0;
+  const double b_60 = 0.53;
+  const double c_60 = 0.0;
+
+
+  /* find new SNR value */
+  switch(framesamples) {
+    case 320:
+      s2nr = a_20 + b_20 * bottle_neck * 0.001 + c_20 * bottle_neck *
+          bottle_neck * 0.000001;
+      break;
+    case 480:
+      s2nr = a_30 + b_30 * bottle_neck * 0.001 + c_30 * bottle_neck *
+          bottle_neck * 0.000001;
+      break;
+    case 960:
+      s2nr = a_60 + b_60 * bottle_neck * 0.001 + c_60 * bottle_neck *
+          bottle_neck * 0.000001;
+      break;
+    default:
+      s2nr = 0;
+  }
+
+  return s2nr;
+
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h b/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
new file mode 100644
index 0000000..e0ecf55
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
@@ -0,0 +1,183 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * bandwidth_estimator.h
+ *
+ * This header file contains the API for the Bandwidth Estimator
+ * designed for iSAC.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#define MIN_ISAC_BW     10000
+#define MIN_ISAC_BW_LB  10000
+#define MIN_ISAC_BW_UB  25000
+
+#define MAX_ISAC_BW     56000
+#define MAX_ISAC_BW_UB  32000
+#define MAX_ISAC_BW_LB  32000
+
+#define MIN_ISAC_MD     5
+#define MAX_ISAC_MD     25
+
+// assumed header size, in bytes; we don't know the exact number
+// (header compression may be used)
+#define HEADER_SIZE        35
+
+// Initial Frame-Size, in ms, for Wideband & Super-Wideband Mode
+#define INIT_FRAME_LEN_WB  60
+#define INIT_FRAME_LEN_SWB 30
+
+// Initial Bottleneck Estimate, in bits/sec, for
+// Wideband & Super-wideband mode
+#define INIT_BN_EST_WB     20e3f
+#define INIT_BN_EST_SWB    56e3f
+
+// Initial Header rate (header rate depends on frame-size),
+// in bits/sec, for Wideband & Super-Wideband mode.
+#define INIT_HDR_RATE_WB                                                \
+  ((float)HEADER_SIZE * 8.0f * 1000.0f / (float)INIT_FRAME_LEN_WB)
+#define INIT_HDR_RATE_SWB                                               \
+  ((float)HEADER_SIZE * 8.0f * 1000.0f / (float)INIT_FRAME_LEN_SWB)
+
+// number of packets in a row for a high rate burst
+#define BURST_LEN       3
+
+// ms, max time between two full bursts
+#define BURST_INTERVAL  500
+
+// number of packets in a row for initial high rate burst
+#define INIT_BURST_LEN  5
+
+// bits/s, rate for the first BURST_LEN packets
+#define INIT_RATE_WB       INIT_BN_EST_WB
+#define INIT_RATE_SWB      INIT_BN_EST_SWB
+
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+  /* This function initializes the struct                    */
+  /* to be called before using the struct for anything else  */
+  /* returns 0 if everything went fine, -1 otherwise         */
+  int32_t WebRtcIsac_InitBandwidthEstimator(
+      BwEstimatorstr*           bwest_str,
+      enum IsacSamplingRate encoderSampRate,
+      enum IsacSamplingRate decoderSampRate);
+
+  /* This function updates the receiving estimate                                                      */
+  /* Parameters:                                                                                       */
+  /* rtp_number    - value from RTP packet, from NetEq                                                 */
+  /* frame length  - length of signal frame in ms, from iSAC decoder                                   */
+  /* send_ts       - value in RTP header giving send time in samples                                   */
+  /* arr_ts        - value given by timeGetTime() time of arrival in samples of packet from NetEq      */
+  /* pksize        - size of packet in bytes, from NetEq                                               */
+  /* Index         - integer (range 0...23) indicating bottle neck & jitter as estimated by other side */
+  /* returns 0 if everything went fine, -1 otherwise                                                   */
+  int16_t WebRtcIsac_UpdateBandwidthEstimator(
+      BwEstimatorstr* bwest_str,
+      const uint16_t rtp_number,
+      const int32_t frame_length,
+      const uint32_t send_ts,
+      const uint32_t arr_ts,
+      const size_t pksize);
+
+  /* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
+  int16_t WebRtcIsac_UpdateUplinkBwImpl(
+      BwEstimatorstr*           bwest_str,
+      int16_t               Index,
+      enum IsacSamplingRate encoderSamplingFreq);
+
+  /* Returns the bandwidth/jitter estimation code (integer 0...23) to put in the sending iSAC payload */
+  void WebRtcIsac_GetDownlinkBwJitIndexImpl(
+      BwEstimatorstr* bwest_str,
+      int16_t* bottleneckIndex,
+      int16_t* jitterInfo,
+      enum IsacSamplingRate decoderSamplingFreq);
+
+  /* Returns the bandwidth estimation (in bps) */
+  int32_t WebRtcIsac_GetDownlinkBandwidth(
+      const BwEstimatorstr *bwest_str);
+
+  /* Returns the max delay (in ms) */
+  int32_t WebRtcIsac_GetDownlinkMaxDelay(
+      const BwEstimatorstr *bwest_str);
+
+  /* Returns the bandwidth that iSAC should send with in bps */
+  int32_t WebRtcIsac_GetUplinkBandwidth(const BwEstimatorstr* bwest_str);
+
+  /* Returns the max delay value from the other side in ms */
+  int32_t WebRtcIsac_GetUplinkMaxDelay(
+      const BwEstimatorstr *bwest_str);
+
+  /* Fills in an IsacExternalBandwidthInfo struct. */
+  void WebRtcIsacBw_GetBandwidthInfo(
+      BwEstimatorstr* bwest_str,
+      enum IsacSamplingRate decoder_sample_rate_hz,
+      IsacBandwidthInfo* bwinfo);
+
+  /* Uses the values from an IsacExternalBandwidthInfo struct. */
+  void WebRtcIsacBw_SetBandwidthInfo(BwEstimatorstr* bwest_str,
+                                     const IsacBandwidthInfo* bwinfo);
+
+  /*
+   * update amount of data in bottle neck buffer and burst handling
+   * returns minimum payload size (bytes)
+   */
+  int WebRtcIsac_GetMinBytes(
+      RateModel*         State,
+      int                StreamSize,    /* bytes in bitstream */
+      const int          FrameLen,      /* ms per frame */
+      const double       BottleNeck,    /* bottle neck rate; excl headers (bps) */
+      const double       DelayBuildUp,  /* max delay from bottleneck buffering (ms) */
+      enum ISACBandwidth bandwidth
+      /*,int16_t        frequentLargePackets*/);
+
+  /*
+   * update long-term average bitrate and amount of data in buffer
+   */
+  void WebRtcIsac_UpdateRateModel(
+      RateModel*   State,
+      int          StreamSize,                /* bytes in bitstream */
+      const int    FrameSamples,        /* samples per frame */
+      const double BottleNeck);       /* bottle neck rate; excl headers (bps) */
+
+
+  void WebRtcIsac_InitRateModel(
+      RateModel *State);
+
+  /* Returns the new framelength value (input argument: bottle_neck) */
+  int WebRtcIsac_GetNewFrameLength(
+      double bottle_neck,
+      int    current_framelength);
+
+  /* Returns the new SNR value (input argument: bottle_neck) */
+  double WebRtcIsac_GetSnr(
+      double bottle_neck,
+      int    new_framelength);
+
+
+  int16_t WebRtcIsac_UpdateUplinkJitter(
+      BwEstimatorstr*              bwest_str,
+      int32_t                  index);
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_BANDWIDTH_ESTIMATOR_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/codec.h b/modules/audio_coding/codecs/isac/main/source/codec.h
new file mode 100644
index 0000000..af7efc0
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/codec.h
@@ -0,0 +1,232 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * codec.h
+ *
+ * This header file contains the calls to the internal encoder
+ * and decoder functions.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
+
+int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str, Bitstr* streamdata,
+                                 size_t packet_size,
+                                 uint16_t rtp_seq_number,
+                                 uint32_t send_ts, uint32_t arr_ts,
+                                 enum IsacSamplingRate encoderSampRate,
+                                 enum IsacSamplingRate decoderSampRate);
+
+int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
+                        float* signal_out,
+                        ISACLBDecStruct* ISACdec_obj,
+                        int16_t* current_framesamples,
+                        int16_t isRCUPayload);
+
+int WebRtcIsac_DecodeRcuLb(float* signal_out, ISACLBDecStruct* ISACdec_obj,
+                           int16_t* current_framesamples);
+
+int WebRtcIsac_EncodeLb(const TransformTables* transform_tables,
+                        float* in,
+                        ISACLBEncStruct* ISACencLB_obj,
+                        int16_t codingMode,
+                        int16_t bottleneckIndex);
+
+int WebRtcIsac_EncodeStoredDataLb(const IsacSaveEncoderData* ISACSavedEnc_obj,
+                                  Bitstr* ISACBitStr_obj, int BWnumber,
+                                  float scale);
+
+int WebRtcIsac_EncodeStoredDataUb(
+    const ISACUBSaveEncDataStruct* ISACSavedEnc_obj, Bitstr* bitStream,
+    int32_t jitterInfo, float scale, enum ISACBandwidth bandwidth);
+
+int16_t WebRtcIsac_GetRedPayloadUb(
+    const ISACUBSaveEncDataStruct* ISACSavedEncObj, Bitstr* bitStreamObj,
+    enum ISACBandwidth bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_RateAllocation()
+ * Internal function to perform a rate-allocation for upper and lower-band,
+ * given a total rate.
+ *
+ * Input:
+ *   - inRateBitPerSec           : a total bit-rate in bits/sec.
+ *
+ * Output:
+ *   - rateLBBitPerSec           : a bit-rate allocated to the lower-band
+ *                                 in bits/sec.
+ *   - rateUBBitPerSec           : a bit-rate allocated to the upper-band
+ *                                 in bits/sec.
+ *
+ * Return value                  : 0 if rate allocation has been successful.
+ *                                -1 if failed to allocate rates.
+ */
+
+int16_t WebRtcIsac_RateAllocation(int32_t inRateBitPerSec,
+                                  double* rateLBBitPerSec,
+                                  double* rateUBBitPerSec,
+                                  enum ISACBandwidth* bandwidthKHz);
+
+
+/******************************************************************************
+ * WebRtcIsac_DecodeUb16()
+ *
+ * Decode the upper-band if the codec is in 0-16 kHz mode.
+ *
+ * Input/Output:
+ *       -ISACdec_obj        : pointer to the upper-band decoder object. The
+ *                             bit-stream is stored inside the decoder object.
+ *
+ * Output:
+ *       -signal_out         : decoded audio, 480 samples 30 ms.
+ *
+ * Return value              : >0 number of decoded bytes.
+ *                             <0 if an error occurred.
+ */
+int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
+                          float* signal_out,
+                          ISACUBDecStruct* ISACdec_obj,
+                          int16_t isRCUPayload);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeUb12()
+ *
+ * Decode the upper-band if the codec is in 0-12 kHz mode.
+ *
+ * Input/Output:
+ *       -ISACdec_obj        : pointer to the upper-band decoder object. The
+ *                             bit-stream is stored inside the decoder object.
+ *
+ * Output:
+ *       -signal_out         : decoded audio, 480 samples 30 ms.
+ *
+ * Return value              : >0 number of decoded bytes.
+ *                             <0 if an error occurred.
+ */
+int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
+                          float* signal_out,
+                          ISACUBDecStruct* ISACdec_obj,
+                          int16_t isRCUPayload);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeUb16()
+ *
+ * Encode the upper-band if the codec is in 0-16 kHz mode.
+ *
+ * Input:
+ *       -in                 : upper-band audio, 160 samples (10 ms).
+ *
+ * Input/Output:
+ *       -ISACdec_obj        : pointer to the upper-band encoder object. The
+ *                             bit-stream is stored inside the encoder object.
+ *
+ * Return value              : >0 number of encoded bytes.
+ *                             <0 if an error occurred.
+ */
+int WebRtcIsac_EncodeUb16(const TransformTables* transform_tables,
+                          float* in,
+                          ISACUBEncStruct* ISACenc_obj,
+                          int32_t jitterInfo);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeUb12()
+ *
+ * Encode the upper-band if the codec is in 0-12 kHz mode.
+ *
+ * Input:
+ *       -in                 : upper-band audio, 160 samples (10 ms).
+ *
+ * Input/Output:
+ *       -ISACdec_obj        : pointer to the upper-band encoder object. The
+ *                             bit-stream is stored inside the encoder object.
+ *
+ * Return value              : >0 number of encoded bytes.
+ *                             <0 if an error occurred.
+ */
+int WebRtcIsac_EncodeUb12(const TransformTables* transform_tables,
+                          float* in,
+                          ISACUBEncStruct* ISACenc_obj,
+                          int32_t jitterInfo);
+
+/************************** initialization functions *************************/
+
+void WebRtcIsac_InitMasking(MaskFiltstr* maskdata);
+
+void WebRtcIsac_InitPreFilterbank(PreFiltBankstr* prefiltdata);
+
+void WebRtcIsac_InitPostFilterbank(PostFiltBankstr* postfiltdata);
+
+void WebRtcIsac_InitPitchFilter(PitchFiltstr* pitchfiltdata);
+
+void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct* State);
+
+
+/**************************** transform functions ****************************/
+
+void WebRtcIsac_InitTransform(TransformTables* tables);
+
+void WebRtcIsac_Time2Spec(const TransformTables* tables,
+                          double* inre1,
+                          double* inre2,
+                          int16_t* outre,
+                          int16_t* outim,
+                          FFTstr* fftstr_obj);
+
+void WebRtcIsac_Spec2time(const TransformTables* tables,
+                          double* inre,
+                          double* inim,
+                          double* outre1,
+                          double* outre2,
+                          FFTstr* fftstr_obj);
+
+/******************************* filter functions ****************************/
+
+void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, size_t lengthInOut,
+                              int orderCoef);
+
+void WebRtcIsac_AllZeroFilter(double* In, double* Coef, size_t lengthInOut,
+                              int orderCoef, double* Out);
+
+void WebRtcIsac_ZeroPoleFilter(double* In, double* ZeroCoef, double* PoleCoef,
+                               size_t lengthInOut, int orderCoef, double* Out);
+
+
+/***************************** filterbank functions **************************/
+
+void WebRtcIsac_SplitAndFilterFloat(float* in, float* LP, float* HP,
+                                    double* LP_la, double* HP_la,
+                                    PreFiltBankstr* prefiltdata);
+
+
+void WebRtcIsac_FilterAndCombineFloat(float* InLP, float* InHP, float* Out,
+                                      PostFiltBankstr* postfiltdata);
+
+
+/************************* normalized lattice filters ************************/
+
+void WebRtcIsac_NormLatticeFilterMa(int orderCoef, float* stateF, float* stateG,
+                                    float* lat_in, double* filtcoeflo,
+                                    double* lat_out);
+
+void WebRtcIsac_NormLatticeFilterAr(int orderCoef, float* stateF, float* stateG,
+                                    double* lat_in, double* lo_filt_coef,
+                                    float* lat_out);
+
+void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
+
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/crc.c b/modules/audio_coding/codecs/isac/main/source/crc.c
new file mode 100644
index 0000000..1bb0827
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/crc.c
@@ -0,0 +1,111 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/crc.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+#define POLYNOMIAL 0x04c11db7L
+
+
+static const uint32_t kCrcTable[256] = {
+  0,          0x4c11db7,  0x9823b6e,  0xd4326d9,  0x130476dc, 0x17c56b6b,
+  0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+  0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
+  0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+  0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
+  0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+  0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
+  0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+  0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
+  0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+  0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
+  0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+  0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x18aeb13,  0x54bf6a4,
+  0x808d07d,  0xcc9cdca,  0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+  0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
+  0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+  0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
+  0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+  0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
+  0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+  0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
+  0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+  0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
+  0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+  0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
+  0x3f9b762c, 0x3b5a6b9b,  0x315d626, 0x7d4cb91,  0xa97ed48,  0xe56f0ff,
+  0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
+  0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+  0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
+  0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+  0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
+  0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+  0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
+  0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+  0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
+  0x18197087, 0x1cd86d30, 0x29f3d35,  0x65e2082,  0xb1d065b,  0xfdc1bec,
+  0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
+  0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+  0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
+  0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+  0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
+  0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+  0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+};
+
+
+
+
+/****************************************************************************
+ * WebRtcIsac_GetCrc(...)
+ *
+ * This function returns a 32 bit CRC checksum of a bit stream
+ *
+ * Input:
+ *  - bitstream              : payload bitstream
+ *  - len_bitstream_in_bytes : number of 8-bit words in the bit stream
+ *
+ * Output:
+ *  - crc                    : checksum
+ *
+ * Return value              :  0 - Ok
+ *                             -1 - Error
+ */
+
+int WebRtcIsac_GetCrc(const int16_t* bitstream,
+                      int len_bitstream_in_bytes,
+                      uint32_t* crc)
+{
+  uint8_t* bitstream_ptr_uw8;
+  uint32_t crc_state;
+  int byte_cntr;
+  int crc_tbl_indx;
+
+  /* Sanity Check. */
+  if (bitstream == NULL) {
+    return -1;
+  }
+  /* cast to UWord8 pointer */
+  bitstream_ptr_uw8 = (uint8_t *)bitstream;
+
+  /* initialize */
+  crc_state = 0xFFFFFFFF;
+
+  for (byte_cntr = 0; byte_cntr < len_bitstream_in_bytes; byte_cntr++) {
+    crc_tbl_indx = (WEBRTC_SPL_RSHIFT_U32(crc_state, 24) ^
+                       bitstream_ptr_uw8[byte_cntr]) & 0xFF;
+    crc_state = (crc_state << 8) ^ kCrcTable[crc_tbl_indx];
+  }
+
+  *crc = ~crc_state;
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/crc.h b/modules/audio_coding/codecs/isac/main/source/crc.h
new file mode 100644
index 0000000..b3197a1
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/crc.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * crc.h
+ *
+ * Checksum functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/****************************************************************************
+ * WebRtcIsac_GetCrc(...)
+ *
+ * This function returns a 32 bit CRC checksum of a bit stream
+ *
+ * Input:
+ *  - encoded      : payload bit stream
+ *  - no_of_word8s : number of 8-bit words in the bit stream
+ *
+ * Output:
+ *  - crc          : checksum
+ *
+ * Return value    :  0 - Ok
+ *                   -1 - Error
+ */
+
+int WebRtcIsac_GetCrc(
+    const int16_t* encoded,
+    int no_of_word8s,
+    uint32_t* crc);
+
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CRC_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/decode.c b/modules/audio_coding/codecs/isac/main/source/decode.c
new file mode 100644
index 0000000..e13bc55
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/decode.c
@@ -0,0 +1,302 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * decode_B.c
+ *
+ * This file contains definition of funtions for decoding.
+ * Decoding of lower-band, including normal-decoding and RCU decoding.
+ * Decoding of upper-band, including 8-12 kHz, when the bandwidth is
+ * 0-12 kHz, and 8-16 kHz, when the bandwidth is 0-16 kHz.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/*
+ * function to decode the bitstream
+ * returns the total number of bytes in the stream
+ */
+int WebRtcIsac_DecodeLb(const TransformTables* transform_tables,
+                        float* signal_out, ISACLBDecStruct* ISACdecLB_obj,
+                        int16_t* current_framesamples,
+                        int16_t isRCUPayload) {
+  int k;
+  int len, err;
+  int16_t bandwidthInd;
+
+  float LP_dec_float[FRAMESAMPLES_HALF];
+  float HP_dec_float[FRAMESAMPLES_HALF];
+
+  double LPw[FRAMESAMPLES_HALF];
+  double HPw[FRAMESAMPLES_HALF];
+  double LPw_pf[FRAMESAMPLES_HALF];
+
+  double lo_filt_coef[(ORDERLO + 1)*SUBFRAMES];
+  double hi_filt_coef[(ORDERHI + 1)*SUBFRAMES];
+
+  double real_f[FRAMESAMPLES_HALF];
+  double imag_f[FRAMESAMPLES_HALF];
+
+  double PitchLags[4];
+  double PitchGains[4];
+  double AvgPitchGain;
+  int16_t PitchGains_Q12[4];
+  int16_t AvgPitchGain_Q12;
+
+  float gain;
+
+  int frame_nb; /* counter */
+  int frame_mode; /* 0 30ms, 1 for 60ms */
+  /* Processed_samples: 480 (30, 60 ms). Cannot take other values. */
+
+  WebRtcIsac_ResetBitstream(&(ISACdecLB_obj->bitstr_obj));
+
+  len = 0;
+
+  /* Decode framelength and BW estimation - not used,
+     only for stream pointer*/
+  err = WebRtcIsac_DecodeFrameLen(&ISACdecLB_obj->bitstr_obj,
+                                  current_framesamples);
+  if (err < 0) {
+    return err;
+  }
+
+  /* Frame_mode:
+   * 0: indicates 30 ms frame (480 samples)
+   * 1: indicates 60 ms frame (960 samples) */
+  frame_mode = *current_framesamples / MAX_FRAMESAMPLES;
+
+  err = WebRtcIsac_DecodeSendBW(&ISACdecLB_obj->bitstr_obj, &bandwidthInd);
+  if (err < 0) {
+    return err;
+  }
+
+  /* One loop if it's one frame (20 or 30ms), 2 loops if 2 frames
+     bundled together (60ms). */
+  for (frame_nb = 0; frame_nb <= frame_mode; frame_nb++) {
+    /* Decode & de-quantize pitch parameters */
+    err = WebRtcIsac_DecodePitchGain(&ISACdecLB_obj->bitstr_obj,
+                                     PitchGains_Q12);
+    if (err < 0) {
+      return err;
+    }
+
+    err = WebRtcIsac_DecodePitchLag(&ISACdecLB_obj->bitstr_obj, PitchGains_Q12,
+                                    PitchLags);
+    if (err < 0) {
+      return err;
+    }
+
+    AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
+        PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
+
+    /* Decode & de-quantize filter coefficients. */
+    err = WebRtcIsac_DecodeLpc(&ISACdecLB_obj->bitstr_obj, lo_filt_coef,
+                               hi_filt_coef);
+    if (err < 0) {
+      return err;
+    }
+    /* Decode & de-quantize spectrum. */
+    len = WebRtcIsac_DecodeSpec(&ISACdecLB_obj->bitstr_obj, AvgPitchGain_Q12,
+                                kIsacLowerBand, real_f, imag_f);
+    if (len < 0) {
+      return len;
+    }
+
+    /* Inverse transform. */
+    WebRtcIsac_Spec2time(transform_tables, real_f, imag_f, LPw, HPw,
+                         &ISACdecLB_obj->fftstr_obj);
+
+    /* Convert PitchGains back to float for pitchfilter_post */
+    for (k = 0; k < 4; k++) {
+      PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
+    }
+    if (isRCUPayload) {
+      for (k = 0; k < 240; k++) {
+        LPw[k] *= RCU_TRANSCODING_SCALE_INVERSE;
+        HPw[k] *= RCU_TRANSCODING_SCALE_INVERSE;
+      }
+    }
+
+    /* Inverse pitch filter. */
+    WebRtcIsac_PitchfilterPost(LPw, LPw_pf, &ISACdecLB_obj->pitchfiltstr_obj,
+                               PitchLags, PitchGains);
+    /* Convert AvgPitchGain back to float for computation of gain. */
+    AvgPitchGain = ((float)AvgPitchGain_Q12) / 4096;
+    gain = 1.0f - 0.45f * (float)AvgPitchGain;
+
+    for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+      /* Reduce gain to compensate for pitch enhancer. */
+      LPw_pf[k] *= gain;
+    }
+
+    if (isRCUPayload) {
+      for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+        /* Compensation for transcoding gain changes. */
+        LPw_pf[k] *= RCU_TRANSCODING_SCALE;
+        HPw[k] *= RCU_TRANSCODING_SCALE;
+      }
+    }
+    /* Perceptual post-filtering (using normalized lattice filter). */
+    WebRtcIsac_NormLatticeFilterAr(
+        ORDERLO, ISACdecLB_obj->maskfiltstr_obj.PostStateLoF,
+        (ISACdecLB_obj->maskfiltstr_obj).PostStateLoG, LPw_pf, lo_filt_coef,
+        LP_dec_float);
+    WebRtcIsac_NormLatticeFilterAr(
+        ORDERHI, ISACdecLB_obj->maskfiltstr_obj.PostStateHiF,
+        (ISACdecLB_obj->maskfiltstr_obj).PostStateHiG, HPw, hi_filt_coef,
+        HP_dec_float);
+
+    /* Recombine the 2 bands. */
+    WebRtcIsac_FilterAndCombineFloat(LP_dec_float, HP_dec_float,
+                                     signal_out + frame_nb * FRAMESAMPLES,
+                                     &ISACdecLB_obj->postfiltbankstr_obj);
+  }
+  return len;
+}
+
+
+/*
+ * This decode function is called when the codec is operating in 16 kHz
+ * bandwidth to decode the upperband, i.e. 8-16 kHz.
+ *
+ * Contrary to lower-band, the upper-band (8-16 kHz) is not split in
+ * frequency, but split to 12 sub-frames, i.e. twice as lower-band.
+ */
+int WebRtcIsac_DecodeUb16(const TransformTables* transform_tables,
+                          float* signal_out, ISACUBDecStruct* ISACdecUB_obj,
+                          int16_t isRCUPayload) {
+  int len, err;
+
+  double halfFrameFirst[FRAMESAMPLES_HALF];
+  double halfFrameSecond[FRAMESAMPLES_HALF];
+
+  double percepFilterParam[(UB_LPC_ORDER + 1) * (SUBFRAMES << 1) +
+                           (UB_LPC_ORDER + 1)];
+
+  double real_f[FRAMESAMPLES_HALF];
+  double imag_f[FRAMESAMPLES_HALF];
+  const int16_t kAveragePitchGain = 0; /* No pitch-gain for upper-band. */
+  len = 0;
+
+  /* Decode & de-quantize filter coefficients. */
+  memset(percepFilterParam, 0, sizeof(percepFilterParam));
+  err = WebRtcIsac_DecodeInterpolLpcUb(&ISACdecUB_obj->bitstr_obj,
+                                       percepFilterParam, isac16kHz);
+  if (err < 0) {
+    return err;
+  }
+
+  /* Decode & de-quantize spectrum. */
+  len = WebRtcIsac_DecodeSpec(&ISACdecUB_obj->bitstr_obj, kAveragePitchGain,
+                              kIsacUpperBand16, real_f, imag_f);
+  if (len < 0) {
+    return len;
+  }
+  if (isRCUPayload) {
+    int n;
+    for (n = 0; n < 240; n++) {
+      real_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+      imag_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+    }
+  }
+  /* Inverse transform. */
+  WebRtcIsac_Spec2time(transform_tables,
+                       real_f, imag_f, halfFrameFirst, halfFrameSecond,
+                       &ISACdecUB_obj->fftstr_obj);
+
+  /* Perceptual post-filtering (using normalized lattice filter). */
+  WebRtcIsac_NormLatticeFilterAr(
+      UB_LPC_ORDER, ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
+      (ISACdecUB_obj->maskfiltstr_obj).PostStateLoG, halfFrameFirst,
+      &percepFilterParam[(UB_LPC_ORDER + 1)], signal_out);
+
+  WebRtcIsac_NormLatticeFilterAr(
+      UB_LPC_ORDER, ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
+      (ISACdecUB_obj->maskfiltstr_obj).PostStateLoG, halfFrameSecond,
+      &percepFilterParam[(UB_LPC_ORDER + 1) * SUBFRAMES + (UB_LPC_ORDER + 1)],
+      &signal_out[FRAMESAMPLES_HALF]);
+
+  return len;
+}
+
+/*
+ * This decode function is called when the codec operates at 0-12 kHz
+ * bandwidth to decode the upperband, i.e. 8-12 kHz.
+ *
+ * At the encoder the upper-band is split into two band, 8-12 kHz & 12-16
+ * kHz, and only 8-12 kHz is encoded. At the decoder, 8-12 kHz band is
+ * reconstructed and 12-16 kHz replaced with zeros. Then two bands
+ * are combined, to reconstruct the upperband 8-16 kHz.
+ */
+int WebRtcIsac_DecodeUb12(const TransformTables* transform_tables,
+                          float* signal_out, ISACUBDecStruct* ISACdecUB_obj,
+                          int16_t isRCUPayload) {
+  int len, err;
+
+  float LP_dec_float[FRAMESAMPLES_HALF];
+  float HP_dec_float[FRAMESAMPLES_HALF];
+
+  double LPw[FRAMESAMPLES_HALF];
+  double HPw[FRAMESAMPLES_HALF];
+
+  double percepFilterParam[(UB_LPC_ORDER + 1)*SUBFRAMES];
+
+  double real_f[FRAMESAMPLES_HALF];
+  double imag_f[FRAMESAMPLES_HALF];
+  const int16_t kAveragePitchGain = 0; /* No pitch-gain for upper-band. */
+  len = 0;
+
+  /* Decode & dequantize filter coefficients. */
+  err = WebRtcIsac_DecodeInterpolLpcUb(&ISACdecUB_obj->bitstr_obj,
+                                       percepFilterParam, isac12kHz);
+  if (err < 0) {
+    return err;
+  }
+
+  /* Decode & de-quantize spectrum. */
+  len = WebRtcIsac_DecodeSpec(&ISACdecUB_obj->bitstr_obj, kAveragePitchGain,
+                              kIsacUpperBand12, real_f, imag_f);
+  if (len < 0) {
+    return len;
+  }
+
+  if (isRCUPayload) {
+    int n;
+    for (n = 0; n < 240; n++) {
+      real_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+      imag_f[n] *= RCU_TRANSCODING_SCALE_UB_INVERSE;
+    }
+  }
+  /* Inverse transform. */
+  WebRtcIsac_Spec2time(transform_tables,
+                       real_f, imag_f, LPw, HPw, &ISACdecUB_obj->fftstr_obj);
+  /* perceptual post-filtering (using normalized lattice filter) */
+  WebRtcIsac_NormLatticeFilterAr(UB_LPC_ORDER,
+                                 ISACdecUB_obj->maskfiltstr_obj.PostStateLoF,
+                                 (ISACdecUB_obj->maskfiltstr_obj).PostStateLoG,
+                                 LPw, percepFilterParam, LP_dec_float);
+  /* Zero for 12-16 kHz. */
+  memset(HP_dec_float, 0, sizeof(float) * (FRAMESAMPLES_HALF));
+  /* Recombine the 2 bands. */
+  WebRtcIsac_FilterAndCombineFloat(HP_dec_float, LP_dec_float, signal_out,
+                                   &ISACdecUB_obj->postfiltbankstr_obj);
+  return len;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/decode_bwe.c b/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
new file mode 100644
index 0000000..89d970f
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+
+
+int
+WebRtcIsac_EstimateBandwidth(
+    BwEstimatorstr*           bwest_str,
+    Bitstr*                   streamdata,
+    size_t                packet_size,
+    uint16_t              rtp_seq_number,
+    uint32_t              send_ts,
+    uint32_t              arr_ts,
+    enum IsacSamplingRate encoderSampRate,
+    enum IsacSamplingRate decoderSampRate)
+{
+  int16_t  index;
+  int16_t  frame_samples;
+  uint32_t sendTimestampIn16kHz;
+  uint32_t arrivalTimestampIn16kHz;
+  uint32_t diffSendTime;
+  uint32_t diffArrivalTime;
+  int err;
+
+  /* decode framelength and BW estimation */
+  err = WebRtcIsac_DecodeFrameLen(streamdata, &frame_samples);
+  if(err < 0)  // error check
+  {
+    return err;
+  }
+  err = WebRtcIsac_DecodeSendBW(streamdata, &index);
+  if(err < 0)  // error check
+  {
+    return err;
+  }
+
+  /* UPDATE ESTIMATES FROM OTHER SIDE */
+  err = WebRtcIsac_UpdateUplinkBwImpl(bwest_str, index, encoderSampRate);
+  if(err < 0)
+  {
+    return err;
+  }
+
+  // We like BWE to work at 16 kHz sampling rate,
+  // therefore, we have to change the timestamps accordingly.
+  // translate the send timestamp if required
+  diffSendTime = (uint32_t)((uint32_t)send_ts -
+                                  (uint32_t)bwest_str->senderTimestamp);
+  bwest_str->senderTimestamp = send_ts;
+
+  diffArrivalTime = (uint32_t)((uint32_t)arr_ts -
+                                     (uint32_t)bwest_str->receiverTimestamp);
+  bwest_str->receiverTimestamp = arr_ts;
+
+  if(decoderSampRate == kIsacSuperWideband)
+  {
+    diffArrivalTime = (uint32_t)diffArrivalTime >> 1;
+    diffSendTime = (uint32_t)diffSendTime >> 1;
+  }
+
+  // arrival timestamp in 16 kHz
+  arrivalTimestampIn16kHz = (uint32_t)((uint32_t)
+                                             bwest_str->prev_rec_arr_ts + (uint32_t)diffArrivalTime);
+  // send timestamp in 16 kHz
+  sendTimestampIn16kHz = (uint32_t)((uint32_t)
+                                          bwest_str->prev_rec_send_ts + (uint32_t)diffSendTime);
+
+  err = WebRtcIsac_UpdateBandwidthEstimator(bwest_str, rtp_seq_number,
+                                            (frame_samples * 1000) / FS, sendTimestampIn16kHz,
+                                            arrivalTimestampIn16kHz, packet_size);
+  // error check
+  if(err < 0)
+  {
+    return err;
+  }
+
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/encode.c b/modules/audio_coding/codecs/isac/main/source/encode.c
new file mode 100644
index 0000000..7963820
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/encode.c
@@ -0,0 +1,1258 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * encode.c
+ *
+ * This file contains definition of funtions for encoding.
+ * Decoding of upper-band, including 8-12 kHz, when the bandwidth is
+ * 0-12 kHz, and 8-16 kHz, when the bandwidth is 0-16 kHz.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+
+
+#define UB_LOOKAHEAD 24
+
+
+/*
+  Rate allocation tables of lower and upper-band bottleneck for
+  12kHz & 16kHz bandwidth.
+
+  12 kHz bandwidth
+  -----------------
+  The overall bottleneck of the coder is between 38 kbps and 45 kbps. We have
+  considered 7 enteries, uniformly distributed in this interval, i.e. 38,
+  39.17, 40.33, 41.5, 42.67, 43.83 and 45. For every entery, the lower-band
+  and the upper-band bottlenecks are specified in
+  'kLowerBandBitRate12' and 'kUpperBandBitRate12'
+  tables, respectively. E.g. the overall rate of 41.5 kbps corresponts to a
+  bottleneck of 31 kbps for lower-band and 27 kbps for upper-band. Given an
+  overall bottleneck of the codec, we use linear interpolation to get
+  lower-band and upper-band bottlenecks.
+
+  16 kHz bandwidth
+  -----------------
+  The overall bottleneck of the coder is between 50 kbps and 56 kbps. We have
+  considered 7 enteries, uniformly distributed in this interval, i.e. 50, 51.2,
+  52.4, 53.6, 54.8 and 56. For every entery, the lower-band and the upper-band
+  bottlenecks are specified in 'kLowerBandBitRate16' and
+  'kUpperBandBitRate16' tables, respectively. E.g. the overall rate
+  of 53.6 kbps corresponts to a bottleneck of 32 kbps for lower-band and 30
+  kbps for upper-band. Given an overall bottleneck of the codec, we use linear
+  interpolation to get lower-band and upper-band bottlenecks.
+
+ */
+
+/*     38  39.17  40.33   41.5  42.67  43.83     45 */
+static const int16_t kLowerBandBitRate12[7] = {
+    29000, 30000, 30000, 31000, 31000, 32000, 32000 };
+static const int16_t kUpperBandBitRate12[7] = {
+    25000, 25000, 27000, 27000, 29000, 29000, 32000 };
+
+/*    50     51.2  52.4   53.6   54.8    56 */
+static const int16_t kLowerBandBitRate16[6] = {
+    31000, 31000, 32000, 32000, 32000, 32000 };
+static const int16_t kUpperBandBitRate16[6] = {
+    28000, 29000, 29000, 30000, 31000, 32000 };
+
+/******************************************************************************
+ * WebRtcIsac_RateAllocation()
+ * Internal function to perform a rate-allocation for upper and lower-band,
+ * given a total rate.
+ *
+ * Input:
+ *   - inRateBitPerSec           : a total bottleneck in bits/sec.
+ *
+ * Output:
+ *   - rateLBBitPerSec           : a bottleneck allocated to the lower-band
+ *                                 in bits/sec.
+ *   - rateUBBitPerSec           : a bottleneck allocated to the upper-band
+ *                                 in bits/sec.
+ *
+ * Return value                  : 0 if rate allocation has been successful.
+ *                                -1 if failed to allocate rates.
+ */
+
+int16_t WebRtcIsac_RateAllocation(int32_t inRateBitPerSec,
+                                        double* rateLBBitPerSec,
+                                        double* rateUBBitPerSec,
+                                        enum ISACBandwidth* bandwidthKHz) {
+  int16_t idx;
+  double idxD;
+  double idxErr;
+  if (inRateBitPerSec < 38000) {
+    /* If the given overall bottleneck is less than 38000 then
+     * then codec has to operate in wideband mode, i.e. 8 kHz
+     * bandwidth. */
+    *rateLBBitPerSec = (int16_t)((inRateBitPerSec > 32000) ?
+        32000 : inRateBitPerSec);
+    *rateUBBitPerSec = 0;
+    *bandwidthKHz = isac8kHz;
+  } else if ((inRateBitPerSec >= 38000) && (inRateBitPerSec < 50000)) {
+    /* At a bottleneck between 38 and 50 kbps the codec is operating
+     * at 12 kHz bandwidth. Using xxxBandBitRate12[] to calculates
+     * upper/lower bottleneck */
+
+    /* Find the bottlenecks by linear interpolation,
+     * step is (45000 - 38000)/6.0 we use the inverse of it. */
+    const double stepSizeInv = 8.5714286e-4;
+    idxD = (inRateBitPerSec - 38000) * stepSizeInv;
+    idx = (idxD >= 6) ? 6 : ((int16_t)idxD);
+    idxErr = idxD - idx;
+    *rateLBBitPerSec = kLowerBandBitRate12[idx];
+    *rateUBBitPerSec = kUpperBandBitRate12[idx];
+
+    if (idx < 6) {
+      *rateLBBitPerSec += (int16_t)(
+          idxErr * (kLowerBandBitRate12[idx + 1] - kLowerBandBitRate12[idx]));
+      *rateUBBitPerSec += (int16_t)(
+          idxErr * (kUpperBandBitRate12[idx + 1] - kUpperBandBitRate12[idx]));
+    }
+    *bandwidthKHz = isac12kHz;
+  } else if ((inRateBitPerSec >= 50000) && (inRateBitPerSec <= 56000)) {
+    /* A bottleneck between 50 and 56 kbps corresponds to bandwidth
+     * of 16 kHz. Using xxxBandBitRate16[] to calculates
+     * upper/lower bottleneck. */
+
+    /* Find the bottlenecks by linear interpolation
+     * step is (56000 - 50000)/5 we use the inverse of it. */
+    const double stepSizeInv = 8.3333333e-4;
+    idxD = (inRateBitPerSec - 50000) * stepSizeInv;
+    idx = (idxD >= 5) ? 5 : ((int16_t)idxD);
+    idxErr = idxD - idx;
+    *rateLBBitPerSec = kLowerBandBitRate16[idx];
+    *rateUBBitPerSec  = kUpperBandBitRate16[idx];
+
+    if (idx < 5) {
+      *rateLBBitPerSec += (int16_t)(idxErr *
+          (kLowerBandBitRate16[idx + 1] -
+              kLowerBandBitRate16[idx]));
+
+      *rateUBBitPerSec += (int16_t)(idxErr *
+          (kUpperBandBitRate16[idx + 1] -
+              kUpperBandBitRate16[idx]));
+    }
+    *bandwidthKHz = isac16kHz;
+  } else {
+    /* Out-of-range botlteneck value. */
+    return -1;
+  }
+
+  /* limit the values. */
+  *rateLBBitPerSec = (*rateLBBitPerSec > 32000) ? 32000 : *rateLBBitPerSec;
+  *rateUBBitPerSec = (*rateUBBitPerSec > 32000) ? 32000 : *rateUBBitPerSec;
+  return 0;
+}
+
+
+void WebRtcIsac_ResetBitstream(Bitstr* bit_stream) {
+  bit_stream->W_upper = 0xFFFFFFFF;
+  bit_stream->stream_index = 0;
+  bit_stream->streamval = 0;
+}
+
+int WebRtcIsac_EncodeLb(const TransformTables* transform_tables,
+                        float* in, ISACLBEncStruct* ISACencLB_obj,
+                        int16_t codingMode,
+                        int16_t bottleneckIndex) {
+  int stream_length = 0;
+  int err;
+  int k;
+  int iterCntr;
+
+  double lofilt_coef[(ORDERLO + 1)*SUBFRAMES];
+  double hifilt_coef[(ORDERHI + 1)*SUBFRAMES];
+  float LP[FRAMESAMPLES_HALF];
+  float HP[FRAMESAMPLES_HALF];
+
+  double LP_lookahead[FRAMESAMPLES_HALF];
+  double HP_lookahead[FRAMESAMPLES_HALF];
+  double LP_lookahead_pf[FRAMESAMPLES_HALF + QLOOKAHEAD];
+  double LPw[FRAMESAMPLES_HALF];
+
+  double HPw[FRAMESAMPLES_HALF];
+  double LPw_pf[FRAMESAMPLES_HALF];
+  int16_t fre[FRAMESAMPLES_HALF];   /* Q7 */
+  int16_t fim[FRAMESAMPLES_HALF];   /* Q7 */
+
+  double PitchLags[4];
+  double PitchGains[4];
+  int16_t PitchGains_Q12[4];
+  int16_t AvgPitchGain_Q12;
+
+  int frame_mode; /* 0 for 30ms, 1 for 60ms */
+  int status = 0;
+  int my_index;
+  transcode_obj transcodingParam;
+  double bytesLeftSpecCoding;
+  uint16_t payloadLimitBytes;
+
+  /* Copy new frame-length and bottleneck rate only for the first 10 ms data */
+  if (ISACencLB_obj->buffer_index == 0) {
+    /* Set the framelength for the next packet. */
+    ISACencLB_obj->current_framesamples = ISACencLB_obj->new_framelength;
+  }
+  /* 'frame_mode' is 0 (30 ms) or 1 (60 ms). */
+  frame_mode = ISACencLB_obj->current_framesamples / MAX_FRAMESAMPLES;
+
+  /* buffer speech samples (by 10ms packet) until the frame-length */
+  /* is reached (30 or 60 ms).                                     */
+  /*****************************************************************/
+
+  /* fill the buffer with 10ms input data */
+  for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+    ISACencLB_obj->data_buffer_float[k + ISACencLB_obj->buffer_index] = in[k];
+  }
+
+  /* If buffersize is not equal to current framesize then increase index
+   * and return. We do no encoding untill we have enough audio.  */
+  if (ISACencLB_obj->buffer_index + FRAMESAMPLES_10ms != FRAMESAMPLES) {
+    ISACencLB_obj->buffer_index += FRAMESAMPLES_10ms;
+    return 0;
+  }
+  /* If buffer reached the right size, reset index and continue with
+   * encoding the frame. */
+  ISACencLB_obj->buffer_index = 0;
+
+  /* End of buffer function. */
+  /**************************/
+
+  /* Encoding */
+  /************/
+
+  if (frame_mode == 0 || ISACencLB_obj->frame_nb == 0) {
+    /* This is to avoid Linux warnings until we change 'int' to 'Word32'
+     * at all places. */
+    int intVar;
+    /* reset bitstream */
+    WebRtcIsac_ResetBitstream(&(ISACencLB_obj->bitstr_obj));
+
+    if ((codingMode == 0) && (frame_mode == 0) &&
+        (ISACencLB_obj->enforceFrameSize == 0)) {
+      ISACencLB_obj->new_framelength = WebRtcIsac_GetNewFrameLength(
+          ISACencLB_obj->bottleneck, ISACencLB_obj->current_framesamples);
+    }
+
+    ISACencLB_obj->s2nr = WebRtcIsac_GetSnr(
+        ISACencLB_obj->bottleneck, ISACencLB_obj->current_framesamples);
+
+    /* Encode frame length. */
+    status = WebRtcIsac_EncodeFrameLen(
+        ISACencLB_obj->current_framesamples, &ISACencLB_obj->bitstr_obj);
+    if (status < 0) {
+      /* Wrong frame size. */
+      return status;
+    }
+    /* Save framelength for multiple packets memory. */
+    ISACencLB_obj->SaveEnc_obj.framelength =
+        ISACencLB_obj->current_framesamples;
+
+    /* To be used for Redundant Coding. */
+    ISACencLB_obj->lastBWIdx = bottleneckIndex;
+    intVar = (int)bottleneckIndex;
+    WebRtcIsac_EncodeReceiveBw(&intVar, &ISACencLB_obj->bitstr_obj);
+  }
+
+  /* Split signal in two bands. */
+  WebRtcIsac_SplitAndFilterFloat(ISACencLB_obj->data_buffer_float, LP, HP,
+                                 LP_lookahead, HP_lookahead,
+                                 &ISACencLB_obj->prefiltbankstr_obj);
+
+  /* estimate pitch parameters and pitch-filter lookahead signal */
+  WebRtcIsac_PitchAnalysis(LP_lookahead, LP_lookahead_pf,
+                           &ISACencLB_obj->pitchanalysisstr_obj, PitchLags,
+                           PitchGains);
+
+  /* Encode in FIX Q12. */
+
+  /* Convert PitchGain to Fixed point. */
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchGains_Q12[k] = (int16_t)(PitchGains[k] * 4096.0);
+  }
+
+  /* Set where to store data in multiple packets memory. */
+  if (frame_mode == 0 || ISACencLB_obj->frame_nb == 0) {
+    ISACencLB_obj->SaveEnc_obj.startIdx = 0;
+  } else {
+    ISACencLB_obj->SaveEnc_obj.startIdx = 1;
+  }
+
+  /* Quantize & encode pitch parameters. */
+  WebRtcIsac_EncodePitchGain(PitchGains_Q12, &ISACencLB_obj->bitstr_obj,
+                             &ISACencLB_obj->SaveEnc_obj);
+  WebRtcIsac_EncodePitchLag(PitchLags, PitchGains_Q12,
+                            &ISACencLB_obj->bitstr_obj,
+                            &ISACencLB_obj->SaveEnc_obj);
+
+  AvgPitchGain_Q12 = (PitchGains_Q12[0] + PitchGains_Q12[1] +
+      PitchGains_Q12[2] + PitchGains_Q12[3]) >> 2;
+
+  /* Find coefficients for perceptual pre-filters. */
+  WebRtcIsac_GetLpcCoefLb(LP_lookahead_pf, HP_lookahead,
+                          &ISACencLB_obj->maskfiltstr_obj, ISACencLB_obj->s2nr,
+                          PitchGains_Q12, lofilt_coef, hifilt_coef);
+
+  /* Code LPC model and shape - gains not quantized yet. */
+  WebRtcIsac_EncodeLpcLb(lofilt_coef, hifilt_coef, &ISACencLB_obj->bitstr_obj,
+                         &ISACencLB_obj->SaveEnc_obj);
+
+  /* Convert PitchGains back to FLOAT for pitchfilter_pre. */
+  for (k = 0; k < 4; k++) {
+    PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
+  }
+
+  /* Store the state of arithmetic coder before coding LPC gains. */
+  transcodingParam.W_upper = ISACencLB_obj->bitstr_obj.W_upper;
+  transcodingParam.stream_index = ISACencLB_obj->bitstr_obj.stream_index;
+  transcodingParam.streamval = ISACencLB_obj->bitstr_obj.streamval;
+  transcodingParam.stream[0] =
+      ISACencLB_obj->bitstr_obj.stream[ISACencLB_obj->bitstr_obj.stream_index -
+                                       2];
+  transcodingParam.stream[1] =
+      ISACencLB_obj->bitstr_obj.stream[ISACencLB_obj->bitstr_obj.stream_index -
+                                       1];
+  transcodingParam.stream[2] =
+      ISACencLB_obj->bitstr_obj.stream[ISACencLB_obj->bitstr_obj.stream_index];
+
+  /* Store LPC Gains before encoding them. */
+  for (k = 0; k < SUBFRAMES; k++) {
+    transcodingParam.loFiltGain[k] = lofilt_coef[(LPC_LOBAND_ORDER + 1) * k];
+    transcodingParam.hiFiltGain[k] = hifilt_coef[(LPC_HIBAND_ORDER + 1) * k];
+  }
+
+  /* Code gains */
+  WebRtcIsac_EncodeLpcGainLb(lofilt_coef, hifilt_coef,
+                             &ISACencLB_obj->bitstr_obj,
+                             &ISACencLB_obj->SaveEnc_obj);
+
+  /* Get the correct value for the payload limit and calculate the
+   * number of bytes left for coding the spectrum. */
+  if ((frame_mode == 1) && (ISACencLB_obj->frame_nb == 0)) {
+    /* It is a 60ms and we are in the first 30ms then the limit at
+     * this point should be half of the assigned value. */
+    payloadLimitBytes = ISACencLB_obj->payloadLimitBytes60 >> 1;
+  } else if (frame_mode == 0) {
+    /* It is a 30ms frame */
+    /* Subract 3 because termination process may add 3 bytes. */
+    payloadLimitBytes = ISACencLB_obj->payloadLimitBytes30 - 3;
+  } else {
+    /* This is the second half of a 60ms frame. */
+    /* Subract 3 because termination process may add 3 bytes. */
+    payloadLimitBytes = ISACencLB_obj->payloadLimitBytes60 - 3;
+  }
+  bytesLeftSpecCoding = payloadLimitBytes - transcodingParam.stream_index;
+
+  /* Perceptual pre-filtering (using normalized lattice filter). */
+  /* Low-band filtering. */
+  WebRtcIsac_NormLatticeFilterMa(ORDERLO,
+                                 ISACencLB_obj->maskfiltstr_obj.PreStateLoF,
+                                 ISACencLB_obj->maskfiltstr_obj.PreStateLoG,
+                                 LP, lofilt_coef, LPw);
+  /* High-band filtering. */
+  WebRtcIsac_NormLatticeFilterMa(ORDERHI,
+                                 ISACencLB_obj->maskfiltstr_obj.PreStateHiF,
+                                 ISACencLB_obj->maskfiltstr_obj.PreStateHiG,
+                                 HP, hifilt_coef, HPw);
+  /* Pitch filter. */
+  WebRtcIsac_PitchfilterPre(LPw, LPw_pf, &ISACencLB_obj->pitchfiltstr_obj,
+                            PitchLags, PitchGains);
+  /* Transform */
+  WebRtcIsac_Time2Spec(transform_tables,
+                       LPw_pf, HPw, fre, fim, &ISACencLB_obj->fftstr_obj);
+
+  /* Save data for multiple packets memory. */
+  my_index = ISACencLB_obj->SaveEnc_obj.startIdx * FRAMESAMPLES_HALF;
+  memcpy(&ISACencLB_obj->SaveEnc_obj.fre[my_index], fre, sizeof(fre));
+  memcpy(&ISACencLB_obj->SaveEnc_obj.fim[my_index], fim, sizeof(fim));
+
+  ISACencLB_obj->SaveEnc_obj.AvgPitchGain[ISACencLB_obj->SaveEnc_obj.startIdx] =
+      AvgPitchGain_Q12;
+
+  /* Quantization and loss-less coding. */
+  err = WebRtcIsac_EncodeSpec(fre, fim, AvgPitchGain_Q12, kIsacLowerBand,
+                              &ISACencLB_obj->bitstr_obj);
+  if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+    /* There has been an error but it was not too large payload
+       (we can cure too large payload). */
+    if (frame_mode == 1 && ISACencLB_obj->frame_nb == 1) {
+      /* If this is the second 30ms of a 60ms frame reset
+         this such that in the next call encoder starts fresh. */
+      ISACencLB_obj->frame_nb = 0;
+    }
+    return err;
+  }
+  iterCntr = 0;
+  while ((ISACencLB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+      (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+    double bytesSpecCoderUsed;
+    double transcodeScale;
+
+    if (iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION) {
+      /* We were not able to limit the payload size */
+      if ((frame_mode == 1) && (ISACencLB_obj->frame_nb == 0)) {
+        /* This was the first 30ms of a 60ms frame. Although
+           the payload is larger than it should be but we let
+           the second 30ms be encoded. Maybe together we
+           won't exceed the limit. */
+        ISACencLB_obj->frame_nb = 1;
+        return 0;
+      } else if ((frame_mode == 1) && (ISACencLB_obj->frame_nb == 1)) {
+        ISACencLB_obj->frame_nb = 0;
+      }
+
+      if (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH) {
+        return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
+      } else {
+        return status;
+      }
+    }
+
+    if (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH) {
+      bytesSpecCoderUsed = STREAM_SIZE_MAX;
+      /* Being conservative */
+      transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed * 0.5;
+    } else {
+      bytesSpecCoderUsed = ISACencLB_obj->bitstr_obj.stream_index -
+          transcodingParam.stream_index;
+      transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed;
+    }
+
+    /* To be safe, we reduce the scale depending on
+       the number of iterations. */
+    transcodeScale *= (1.0 - (0.9 * (double)iterCntr /
+        (double)MAX_PAYLOAD_LIMIT_ITERATION));
+
+    /* Scale the LPC Gains. */
+    for (k = 0; k < SUBFRAMES; k++) {
+      lofilt_coef[(LPC_LOBAND_ORDER + 1) * k] =
+          transcodingParam.loFiltGain[k] * transcodeScale;
+      hifilt_coef[(LPC_HIBAND_ORDER + 1) * k] =
+          transcodingParam.hiFiltGain[k] * transcodeScale;
+      transcodingParam.loFiltGain[k] = lofilt_coef[(LPC_LOBAND_ORDER + 1) * k];
+      transcodingParam.hiFiltGain[k] = hifilt_coef[(LPC_HIBAND_ORDER + 1) * k];
+    }
+
+    /* Scale DFT coefficients. */
+    for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+      fre[k] = (int16_t)(fre[k] * transcodeScale);
+      fim[k] = (int16_t)(fim[k] * transcodeScale);
+    }
+
+    /* Save data for multiple packets memory. */
+    my_index = ISACencLB_obj->SaveEnc_obj.startIdx * FRAMESAMPLES_HALF;
+    memcpy(&ISACencLB_obj->SaveEnc_obj.fre[my_index], fre, sizeof(fre));
+    memcpy(&ISACencLB_obj->SaveEnc_obj.fim[my_index], fim, sizeof(fim));
+
+    /* Re-store the state of arithmetic coder before coding LPC gains. */
+    ISACencLB_obj->bitstr_obj.W_upper = transcodingParam.W_upper;
+    ISACencLB_obj->bitstr_obj.stream_index = transcodingParam.stream_index;
+    ISACencLB_obj->bitstr_obj.streamval = transcodingParam.streamval;
+    ISACencLB_obj->bitstr_obj.stream[transcodingParam.stream_index - 2] =
+        transcodingParam.stream[0];
+    ISACencLB_obj->bitstr_obj.stream[transcodingParam.stream_index - 1] =
+        transcodingParam.stream[1];
+    ISACencLB_obj->bitstr_obj.stream[transcodingParam.stream_index] =
+        transcodingParam.stream[2];
+
+    /* Code gains. */
+    WebRtcIsac_EncodeLpcGainLb(lofilt_coef, hifilt_coef,
+                               &ISACencLB_obj->bitstr_obj,
+                               &ISACencLB_obj->SaveEnc_obj);
+
+    /* Update the number of bytes left for encoding the spectrum. */
+    bytesLeftSpecCoding = payloadLimitBytes - transcodingParam.stream_index;
+
+    /* Encode the spectrum. */
+    err = WebRtcIsac_EncodeSpec(fre, fim, AvgPitchGain_Q12, kIsacLowerBand,
+                                &ISACencLB_obj->bitstr_obj);
+
+    if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+      /* There has been an error but it was not too large
+         payload (we can cure too large payload). */
+      if (frame_mode == 1 && ISACencLB_obj->frame_nb == 1) {
+        /* If this is the second 30 ms of a 60 ms frame reset
+           this such that in the next call encoder starts fresh. */
+        ISACencLB_obj->frame_nb = 0;
+      }
+      return err;
+    }
+    iterCntr++;
+  }
+
+  /* If 60 ms frame-size and just processed the first 30 ms, */
+  /* go back to main function to buffer the other 30 ms speech frame. */
+  if (frame_mode == 1) {
+    if (ISACencLB_obj->frame_nb == 0) {
+      ISACencLB_obj->frame_nb = 1;
+      return 0;
+    } else if (ISACencLB_obj->frame_nb == 1) {
+      ISACencLB_obj->frame_nb = 0;
+      /* Also update the frame-length for next packet,
+         in Adaptive mode only. */
+      if (codingMode == 0 && (ISACencLB_obj->enforceFrameSize == 0)) {
+        ISACencLB_obj->new_framelength =
+            WebRtcIsac_GetNewFrameLength(ISACencLB_obj->bottleneck,
+                                         ISACencLB_obj->current_framesamples);
+      }
+    }
+  } else {
+    ISACencLB_obj->frame_nb = 0;
+  }
+
+  /* Complete arithmetic coding. */
+  stream_length = WebRtcIsac_EncTerminate(&ISACencLB_obj->bitstr_obj);
+  return stream_length;
+}
+
+
+
+static int LimitPayloadUb(ISACUBEncStruct* ISACencUB_obj,
+                          uint16_t payloadLimitBytes,
+                          double bytesLeftSpecCoding,
+                          transcode_obj* transcodingParam,
+                          int16_t* fre, int16_t* fim,
+                          double* lpcGains, enum ISACBand band, int status) {
+
+  int iterCntr = 0;
+  int k;
+  double bytesSpecCoderUsed;
+  double transcodeScale;
+  const int16_t kAveragePitchGain = 0.0;
+
+  do {
+    if (iterCntr >= MAX_PAYLOAD_LIMIT_ITERATION) {
+      /* We were not able to limit the payload size. */
+      return -ISAC_PAYLOAD_LARGER_THAN_LIMIT;
+    }
+
+    if (status == -ISAC_DISALLOWED_BITSTREAM_LENGTH) {
+      bytesSpecCoderUsed = STREAM_SIZE_MAX;
+      /* Being conservative. */
+      transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed * 0.5;
+    } else {
+      bytesSpecCoderUsed = ISACencUB_obj->bitstr_obj.stream_index -
+          transcodingParam->stream_index;
+      transcodeScale = bytesLeftSpecCoding / bytesSpecCoderUsed;
+    }
+
+    /* To be safe, we reduce the scale depending on the
+       number of iterations. */
+    transcodeScale *= (1.0 - (0.9 * (double)iterCntr /
+        (double)MAX_PAYLOAD_LIMIT_ITERATION));
+
+    /* Scale the LPC Gains. */
+    if (band == kIsacUpperBand16) {
+      /* Two sets of coefficients if 16 kHz. */
+      for (k = 0; k < SUBFRAMES; k++) {
+        transcodingParam->loFiltGain[k] *= transcodeScale;
+        transcodingParam->hiFiltGain[k] *= transcodeScale;
+      }
+    } else {
+      /* One sets of coefficients if 12 kHz. */
+      for (k = 0; k < SUBFRAMES; k++) {
+        transcodingParam->loFiltGain[k] *= transcodeScale;
+      }
+    }
+
+    /* Scale DFT coefficients. */
+    for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+      fre[k] = (int16_t)(fre[k] * transcodeScale + 0.5);
+      fim[k] = (int16_t)(fim[k] * transcodeScale + 0.5);
+    }
+    /* Store FFT coefficients for multiple encoding. */
+    memcpy(ISACencUB_obj->SaveEnc_obj.realFFT, fre,
+          sizeof(ISACencUB_obj->SaveEnc_obj.realFFT));
+    memcpy(ISACencUB_obj->SaveEnc_obj.imagFFT, fim,
+           sizeof(ISACencUB_obj->SaveEnc_obj.imagFFT));
+
+    /* Store the state of arithmetic coder before coding LPC gains */
+    ISACencUB_obj->bitstr_obj.W_upper = transcodingParam->W_upper;
+    ISACencUB_obj->bitstr_obj.stream_index = transcodingParam->stream_index;
+    ISACencUB_obj->bitstr_obj.streamval = transcodingParam->streamval;
+    ISACencUB_obj->bitstr_obj.stream[transcodingParam->stream_index - 2] =
+        transcodingParam->stream[0];
+    ISACencUB_obj->bitstr_obj.stream[transcodingParam->stream_index - 1] =
+        transcodingParam->stream[1];
+    ISACencUB_obj->bitstr_obj.stream[transcodingParam->stream_index] =
+        transcodingParam->stream[2];
+
+    /* Store the gains for multiple encoding. */
+    memcpy(ISACencUB_obj->SaveEnc_obj.lpcGain, lpcGains,
+           SUBFRAMES * sizeof(double));
+    /* Entropy Code lpc-gains, indices are stored for a later use.*/
+    WebRtcIsac_EncodeLpcGainUb(transcodingParam->loFiltGain,
+                               &ISACencUB_obj->bitstr_obj,
+                               ISACencUB_obj->SaveEnc_obj.lpcGainIndex);
+
+    /* If 16kHz should do one more set. */
+    if (band == kIsacUpperBand16) {
+      /* Store the gains for multiple encoding. */
+      memcpy(&ISACencUB_obj->SaveEnc_obj.lpcGain[SUBFRAMES],
+             &lpcGains[SUBFRAMES], SUBFRAMES * sizeof(double));
+      /* Entropy Code lpc-gains, indices are stored for a later use.*/
+      WebRtcIsac_EncodeLpcGainUb(
+          transcodingParam->hiFiltGain, &ISACencUB_obj->bitstr_obj,
+          &ISACencUB_obj->SaveEnc_obj.lpcGainIndex[SUBFRAMES]);
+    }
+
+    /* Update the number of bytes left for encoding the spectrum. */
+    bytesLeftSpecCoding = payloadLimitBytes -
+        ISACencUB_obj->bitstr_obj.stream_index;
+
+    /* Save the bit-stream object at this point for FEC. */
+    memcpy(&ISACencUB_obj->SaveEnc_obj.bitStreamObj,
+           &ISACencUB_obj->bitstr_obj, sizeof(Bitstr));
+
+    /* Encode the spectrum. */
+    status = WebRtcIsac_EncodeSpec(fre, fim, kAveragePitchGain,
+                                   band, &ISACencUB_obj->bitstr_obj);
+    if ((status < 0) && (status != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+      /* There has been an error but it was not too large payload
+         (we can cure too large payload). */
+      return status;
+    }
+    iterCntr++;
+  } while ((ISACencUB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+      (status == -ISAC_DISALLOWED_BITSTREAM_LENGTH));
+  return 0;
+}
+
+int WebRtcIsac_EncodeUb16(const TransformTables* transform_tables,
+                          float* in, ISACUBEncStruct* ISACencUB_obj,
+                          int32_t jitterInfo) {
+  int err;
+  int k;
+
+  double lpcVecs[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+  double percepFilterParams[(1 + UB_LPC_ORDER) * (SUBFRAMES << 1) +
+                            (1 + UB_LPC_ORDER)];
+
+  double LP_lookahead[FRAMESAMPLES];
+  int16_t fre[FRAMESAMPLES_HALF];   /* Q7 */
+  int16_t fim[FRAMESAMPLES_HALF];   /* Q7 */
+
+  int status = 0;
+
+  double varscale[2];
+  double corr[SUBFRAMES << 1][UB_LPC_ORDER + 1];
+  double lpcGains[SUBFRAMES << 1];
+  transcode_obj transcodingParam;
+  uint16_t payloadLimitBytes;
+  double s2nr;
+  const int16_t kAveragePitchGain = 0.0;
+  int bytesLeftSpecCoding;
+
+  /* Buffer speech samples (by 10ms packet) until the frame-length is   */
+  /* reached (30 ms).                                                   */
+  /*********************************************************************/
+
+  /* fill the buffer with 10ms input data */
+  memcpy(&ISACencUB_obj->data_buffer_float[ISACencUB_obj->buffer_index], in,
+         FRAMESAMPLES_10ms * sizeof(float));
+
+  /* If buffer size is not equal to current frame-size, and end of file is
+   * not reached yet, we don't do encoding unless we have the whole frame. */
+  if (ISACencUB_obj->buffer_index + FRAMESAMPLES_10ms < FRAMESAMPLES) {
+    ISACencUB_obj->buffer_index += FRAMESAMPLES_10ms;
+    return 0;
+  }
+
+  /* End of buffer function. */
+  /**************************/
+
+  /* Encoding */
+  /************/
+
+  /* Reset bit-stream */
+  WebRtcIsac_ResetBitstream(&(ISACencUB_obj->bitstr_obj));
+
+  /* Encoding of bandwidth information. */
+  WebRtcIsac_EncodeJitterInfo(jitterInfo, &ISACencUB_obj->bitstr_obj);
+
+  status = WebRtcIsac_EncodeBandwidth(isac16kHz, &ISACencUB_obj->bitstr_obj);
+  if (status < 0) {
+    return status;
+  }
+
+  s2nr = WebRtcIsac_GetSnr(ISACencUB_obj->bottleneck, FRAMESAMPLES);
+
+  memcpy(lpcVecs, ISACencUB_obj->lastLPCVec, UB_LPC_ORDER * sizeof(double));
+
+  for (k = 0; k < FRAMESAMPLES; k++) {
+    LP_lookahead[k] = ISACencUB_obj->data_buffer_float[UB_LOOKAHEAD + k];
+  }
+
+  /* Find coefficients for perceptual pre-filters. */
+  WebRtcIsac_GetLpcCoefUb(LP_lookahead, &ISACencUB_obj->maskfiltstr_obj,
+                          &lpcVecs[UB_LPC_ORDER], corr, varscale, isac16kHz);
+
+  memcpy(ISACencUB_obj->lastLPCVec,
+         &lpcVecs[(UB16_LPC_VEC_PER_FRAME - 1) * (UB_LPC_ORDER)],
+         sizeof(double) * UB_LPC_ORDER);
+
+  /* Code LPC model and shape - gains not quantized yet. */
+  WebRtcIsac_EncodeLpcUB(lpcVecs, &ISACencUB_obj->bitstr_obj,
+                         percepFilterParams, isac16kHz,
+                         &ISACencUB_obj->SaveEnc_obj);
+
+  /* the first set of lpc parameters are from the last sub-frame of
+   * the previous frame. so we don't care about them. */
+  WebRtcIsac_GetLpcGain(s2nr, &percepFilterParams[UB_LPC_ORDER + 1],
+                        (SUBFRAMES << 1), lpcGains, corr, varscale);
+
+  /* Store the state of arithmetic coder before coding LPC gains */
+  transcodingParam.stream_index = ISACencUB_obj->bitstr_obj.stream_index;
+  transcodingParam.W_upper = ISACencUB_obj->bitstr_obj.W_upper;
+  transcodingParam.streamval = ISACencUB_obj->bitstr_obj.streamval;
+  transcodingParam.stream[0] =
+      ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+                                       2];
+  transcodingParam.stream[1] =
+      ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+                                       1];
+  transcodingParam.stream[2] =
+      ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index];
+
+  /* Store LPC Gains before encoding them. */
+  for (k = 0; k < SUBFRAMES; k++) {
+    transcodingParam.loFiltGain[k] = lpcGains[k];
+    transcodingParam.hiFiltGain[k] = lpcGains[SUBFRAMES + k];
+  }
+
+  /* Store the gains for multiple encoding. */
+  memcpy(ISACencUB_obj->SaveEnc_obj.lpcGain, lpcGains,
+         (SUBFRAMES << 1) * sizeof(double));
+
+  WebRtcIsac_EncodeLpcGainUb(lpcGains, &ISACencUB_obj->bitstr_obj,
+                             ISACencUB_obj->SaveEnc_obj.lpcGainIndex);
+  WebRtcIsac_EncodeLpcGainUb(
+      &lpcGains[SUBFRAMES], &ISACencUB_obj->bitstr_obj,
+      &ISACencUB_obj->SaveEnc_obj.lpcGainIndex[SUBFRAMES]);
+
+  /* Get the correct value for the payload limit and calculate the number of
+     bytes left for coding the spectrum. It is a 30ms frame
+     Subract 3 because termination process may add 3 bytes */
+  payloadLimitBytes = ISACencUB_obj->maxPayloadSizeBytes -
+      ISACencUB_obj->numBytesUsed - 3;
+  bytesLeftSpecCoding = payloadLimitBytes -
+        ISACencUB_obj->bitstr_obj.stream_index;
+
+  for (k = 0; k < (SUBFRAMES << 1); k++) {
+    percepFilterParams[k * (UB_LPC_ORDER + 1) + (UB_LPC_ORDER + 1)] =
+        lpcGains[k];
+  }
+
+  /* LPC filtering (using normalized lattice filter), */
+  /* first half-frame. */
+  WebRtcIsac_NormLatticeFilterMa(UB_LPC_ORDER,
+                                 ISACencUB_obj->maskfiltstr_obj.PreStateLoF,
+                                 ISACencUB_obj->maskfiltstr_obj.PreStateLoG,
+                                 &ISACencUB_obj->data_buffer_float[0],
+                                 &percepFilterParams[UB_LPC_ORDER + 1],
+                                 &LP_lookahead[0]);
+
+  /* Second half-frame filtering. */
+  WebRtcIsac_NormLatticeFilterMa(
+      UB_LPC_ORDER, ISACencUB_obj->maskfiltstr_obj.PreStateLoF,
+      ISACencUB_obj->maskfiltstr_obj.PreStateLoG,
+      &ISACencUB_obj->data_buffer_float[FRAMESAMPLES_HALF],
+      &percepFilterParams[(UB_LPC_ORDER + 1) + SUBFRAMES * (UB_LPC_ORDER + 1)],
+      &LP_lookahead[FRAMESAMPLES_HALF]);
+
+  WebRtcIsac_Time2Spec(transform_tables,
+                       &LP_lookahead[0], &LP_lookahead[FRAMESAMPLES_HALF],
+                       fre, fim, &ISACencUB_obj->fftstr_obj);
+
+  /* Store FFT coefficients for multiple encoding. */
+  memcpy(ISACencUB_obj->SaveEnc_obj.realFFT, fre, sizeof(fre));
+  memcpy(ISACencUB_obj->SaveEnc_obj.imagFFT, fim, sizeof(fim));
+
+  /* Prepare the audio buffer for the next packet
+   * move the last 3 ms to the beginning of the buffer. */
+  memcpy(ISACencUB_obj->data_buffer_float,
+         &ISACencUB_obj->data_buffer_float[FRAMESAMPLES],
+         LB_TOTAL_DELAY_SAMPLES * sizeof(float));
+  /* start writing with 3 ms delay to compensate for the delay
+   * of the lower-band. */
+  ISACencUB_obj->buffer_index = LB_TOTAL_DELAY_SAMPLES;
+
+  /* Save the bit-stream object at this point for FEC. */
+  memcpy(&ISACencUB_obj->SaveEnc_obj.bitStreamObj, &ISACencUB_obj->bitstr_obj,
+         sizeof(Bitstr));
+
+  /* Qantization and lossless coding */
+  /* Note that there is no pitch-gain for this band so kAveragePitchGain = 0
+   * is passed to the function. In fact, the function ignores the 3rd parameter
+   * for this band. */
+  err = WebRtcIsac_EncodeSpec(fre, fim, kAveragePitchGain, kIsacUpperBand16,
+                              &ISACencUB_obj->bitstr_obj);
+  if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+    return err;
+  }
+
+  if ((ISACencUB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+      (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+    err = LimitPayloadUb(ISACencUB_obj, payloadLimitBytes, bytesLeftSpecCoding,
+                         &transcodingParam, fre, fim, lpcGains,
+                         kIsacUpperBand16, err);
+  }
+  if (err < 0) {
+    return err;
+  }
+  /* Complete arithmetic coding. */
+  return WebRtcIsac_EncTerminate(&ISACencUB_obj->bitstr_obj);
+}
+
+
+int WebRtcIsac_EncodeUb12(const TransformTables* transform_tables,
+                          float* in, ISACUBEncStruct* ISACencUB_obj,
+                          int32_t jitterInfo) {
+  int err;
+  int k;
+
+  double lpcVecs[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+  double percepFilterParams[(1 + UB_LPC_ORDER) * SUBFRAMES];
+  float LP[FRAMESAMPLES_HALF];
+  float HP[FRAMESAMPLES_HALF];
+
+  double LP_lookahead[FRAMESAMPLES_HALF];
+  double HP_lookahead[FRAMESAMPLES_HALF];
+  double LPw[FRAMESAMPLES_HALF];
+
+  double HPw[FRAMESAMPLES_HALF];
+  int16_t fre[FRAMESAMPLES_HALF];   /* Q7 */
+  int16_t fim[FRAMESAMPLES_HALF];   /* Q7 */
+
+  int status = 0;
+
+  double varscale[1];
+
+  double corr[UB_LPC_GAIN_DIM][UB_LPC_ORDER + 1];
+  double lpcGains[SUBFRAMES];
+  transcode_obj transcodingParam;
+  uint16_t payloadLimitBytes;
+  double s2nr;
+  const int16_t kAveragePitchGain = 0.0;
+  double bytesLeftSpecCoding;
+
+  /* Buffer speech samples (by 10ms packet) until the framelength is  */
+  /* reached (30 ms).                                                 */
+  /********************************************************************/
+
+  /* Fill the buffer with 10ms input data. */
+  memcpy(&ISACencUB_obj->data_buffer_float[ISACencUB_obj->buffer_index], in,
+         FRAMESAMPLES_10ms * sizeof(float));
+
+  /* if buffer-size is not equal to current frame-size then increase the
+     index and return. We do the encoding when we have enough audio.     */
+  if (ISACencUB_obj->buffer_index + FRAMESAMPLES_10ms < FRAMESAMPLES) {
+    ISACencUB_obj->buffer_index += FRAMESAMPLES_10ms;
+    return 0;
+  }
+  /* If buffer reached the right size, reset index and continue
+     with encoding the frame */
+  ISACencUB_obj->buffer_index = 0;
+
+  /* End of buffer function */
+  /**************************/
+
+  /* Encoding */
+  /************/
+
+  /* Reset bit-stream. */
+  WebRtcIsac_ResetBitstream(&(ISACencUB_obj->bitstr_obj));
+
+  /* Encoding bandwidth information. */
+  WebRtcIsac_EncodeJitterInfo(jitterInfo, &ISACencUB_obj->bitstr_obj);
+  status = WebRtcIsac_EncodeBandwidth(isac12kHz, &ISACencUB_obj->bitstr_obj);
+  if (status < 0) {
+    return status;
+  }
+
+  s2nr = WebRtcIsac_GetSnr(ISACencUB_obj->bottleneck, FRAMESAMPLES);
+
+  /* Split signal in two bands. */
+  WebRtcIsac_SplitAndFilterFloat(ISACencUB_obj->data_buffer_float, HP, LP,
+                                 HP_lookahead, LP_lookahead,
+                                 &ISACencUB_obj->prefiltbankstr_obj);
+
+  /* Find coefficients for perceptual pre-filters. */
+  WebRtcIsac_GetLpcCoefUb(LP_lookahead, &ISACencUB_obj->maskfiltstr_obj,
+                          lpcVecs, corr, varscale, isac12kHz);
+
+  /* Code LPC model and shape - gains not quantized yet. */
+  WebRtcIsac_EncodeLpcUB(lpcVecs, &ISACencUB_obj->bitstr_obj,
+                         percepFilterParams, isac12kHz,
+                         &ISACencUB_obj->SaveEnc_obj);
+
+  WebRtcIsac_GetLpcGain(s2nr, percepFilterParams, SUBFRAMES, lpcGains, corr,
+                        varscale);
+
+  /* Store the state of arithmetic coder before coding LPC gains. */
+  transcodingParam.W_upper = ISACencUB_obj->bitstr_obj.W_upper;
+  transcodingParam.stream_index = ISACencUB_obj->bitstr_obj.stream_index;
+  transcodingParam.streamval = ISACencUB_obj->bitstr_obj.streamval;
+  transcodingParam.stream[0] =
+      ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+                                       2];
+  transcodingParam.stream[1] =
+      ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index -
+                                       1];
+  transcodingParam.stream[2] =
+      ISACencUB_obj->bitstr_obj.stream[ISACencUB_obj->bitstr_obj.stream_index];
+
+  /* Store LPC Gains before encoding them. */
+  for (k = 0; k < SUBFRAMES; k++) {
+    transcodingParam.loFiltGain[k] = lpcGains[k];
+  }
+
+  /* Store the gains for multiple encoding. */
+  memcpy(ISACencUB_obj->SaveEnc_obj.lpcGain, lpcGains, SUBFRAMES *
+         sizeof(double));
+
+  WebRtcIsac_EncodeLpcGainUb(lpcGains, &ISACencUB_obj->bitstr_obj,
+                             ISACencUB_obj->SaveEnc_obj.lpcGainIndex);
+
+  for (k = 0; k < SUBFRAMES; k++) {
+    percepFilterParams[k * (UB_LPC_ORDER + 1)] = lpcGains[k];
+  }
+
+  /* perceptual pre-filtering (using normalized lattice filter) */
+  /* low-band filtering */
+  WebRtcIsac_NormLatticeFilterMa(UB_LPC_ORDER,
+                                 ISACencUB_obj->maskfiltstr_obj.PreStateLoF,
+                                 ISACencUB_obj->maskfiltstr_obj.PreStateLoG, LP,
+                                 percepFilterParams, LPw);
+
+  /* Get the correct value for the payload limit and calculate the number
+     of bytes left for coding the spectrum. It is a 30ms frame Subract 3
+     because termination process may add 3 bytes */
+  payloadLimitBytes = ISACencUB_obj->maxPayloadSizeBytes -
+      ISACencUB_obj->numBytesUsed - 3;
+  bytesLeftSpecCoding = payloadLimitBytes -
+      ISACencUB_obj->bitstr_obj.stream_index;
+
+  memset(HPw, 0, sizeof(HPw));
+
+  /* Transform */
+  WebRtcIsac_Time2Spec(transform_tables,
+                       LPw, HPw, fre, fim, &ISACencUB_obj->fftstr_obj);
+
+  /* Store FFT coefficients for multiple encoding. */
+  memcpy(ISACencUB_obj->SaveEnc_obj.realFFT, fre,
+         sizeof(ISACencUB_obj->SaveEnc_obj.realFFT));
+  memcpy(ISACencUB_obj->SaveEnc_obj.imagFFT, fim,
+         sizeof(ISACencUB_obj->SaveEnc_obj.imagFFT));
+
+  /* Save the bit-stream object at this point for FEC. */
+  memcpy(&ISACencUB_obj->SaveEnc_obj.bitStreamObj,
+         &ISACencUB_obj->bitstr_obj, sizeof(Bitstr));
+
+  /* Quantization and loss-less coding */
+  /* The 4th parameter to this function is pitch-gain, which is only used
+   * when encoding 0-8 kHz band, and irrelevant in this function, therefore,
+   * we insert zero here. */
+  err = WebRtcIsac_EncodeSpec(fre, fim, kAveragePitchGain, kIsacUpperBand12,
+                              &ISACencUB_obj->bitstr_obj);
+  if ((err < 0) && (err != -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+    /* There has been an error but it was not too large
+       payload (we can cure too large payload) */
+    return err;
+  }
+
+  if ((ISACencUB_obj->bitstr_obj.stream_index > payloadLimitBytes) ||
+      (err == -ISAC_DISALLOWED_BITSTREAM_LENGTH)) {
+    err = LimitPayloadUb(ISACencUB_obj, payloadLimitBytes, bytesLeftSpecCoding,
+                         &transcodingParam, fre, fim, lpcGains,
+                         kIsacUpperBand12, err);
+  }
+  if (err < 0) {
+    return err;
+  }
+  /* Complete arithmetic coding. */
+  return WebRtcIsac_EncTerminate(&ISACencUB_obj->bitstr_obj);
+}
+
+
+
+
+
+
+/* This function is used to create a new bit-stream with new BWE.
+   The same data as previously encoded with the function WebRtcIsac_Encoder().
+   The data needed is taken from the structure, where it was stored
+   when calling the encoder. */
+
+int WebRtcIsac_EncodeStoredDataLb(const IsacSaveEncoderData* ISACSavedEnc_obj,
+                                  Bitstr* ISACBitStr_obj, int BWnumber,
+                                  float scale) {
+  int ii;
+  int status;
+  int BWno = BWnumber;
+
+  const uint16_t* WebRtcIsac_kQPitchGainCdf_ptr[1];
+  const uint16_t** cdf;
+
+  double tmpLPCcoeffs_lo[(ORDERLO + 1)*SUBFRAMES * 2];
+  double tmpLPCcoeffs_hi[(ORDERHI + 1)*SUBFRAMES * 2];
+  int tmpLPCindex_g[12 * 2];
+  int16_t tmp_fre[FRAMESAMPLES], tmp_fim[FRAMESAMPLES];
+  const int kModel = 0;
+
+  /* Sanity Check - possible values for BWnumber is 0 - 23. */
+  if ((BWnumber < 0) || (BWnumber > 23)) {
+    return -ISAC_RANGE_ERROR_BW_ESTIMATOR;
+  }
+
+  /* Reset bit-stream. */
+  WebRtcIsac_ResetBitstream(ISACBitStr_obj);
+
+  /* Encode frame length */
+  status = WebRtcIsac_EncodeFrameLen(ISACSavedEnc_obj->framelength,
+                                     ISACBitStr_obj);
+  if (status < 0) {
+    /* Wrong frame size. */
+    return status;
+  }
+
+  /* Transcoding */
+  if ((scale > 0.0) && (scale < 1.0)) {
+    /* Compensate LPC gain. */
+    for (ii = 0;
+        ii < ((ORDERLO + 1)* SUBFRAMES * (1 + ISACSavedEnc_obj->startIdx));
+        ii++) {
+      tmpLPCcoeffs_lo[ii] = scale *  ISACSavedEnc_obj->LPCcoeffs_lo[ii];
+    }
+    for (ii = 0;
+        ii < ((ORDERHI + 1) * SUBFRAMES * (1 + ISACSavedEnc_obj->startIdx));
+        ii++) {
+      tmpLPCcoeffs_hi[ii] = scale *  ISACSavedEnc_obj->LPCcoeffs_hi[ii];
+    }
+    /* Scale DFT. */
+    for (ii = 0;
+        ii < (FRAMESAMPLES_HALF * (1 + ISACSavedEnc_obj->startIdx));
+        ii++) {
+      tmp_fre[ii] = (int16_t)((scale) * (float)ISACSavedEnc_obj->fre[ii]);
+      tmp_fim[ii] = (int16_t)((scale) * (float)ISACSavedEnc_obj->fim[ii]);
+    }
+  } else {
+    for (ii = 0;
+        ii < (KLT_ORDER_GAIN * (1 + ISACSavedEnc_obj->startIdx));
+        ii++) {
+      tmpLPCindex_g[ii] =  ISACSavedEnc_obj->LPCindex_g[ii];
+    }
+    for (ii = 0;
+        ii < (FRAMESAMPLES_HALF * (1 + ISACSavedEnc_obj->startIdx));
+        ii++) {
+      tmp_fre[ii] = ISACSavedEnc_obj->fre[ii];
+      tmp_fim[ii] = ISACSavedEnc_obj->fim[ii];
+    }
+  }
+
+  /* Encode bandwidth estimate. */
+  WebRtcIsac_EncodeReceiveBw(&BWno, ISACBitStr_obj);
+
+  /* Loop over number of 30 msec */
+  for (ii = 0; ii <= ISACSavedEnc_obj->startIdx; ii++) {
+    /* Encode pitch gains. */
+    *WebRtcIsac_kQPitchGainCdf_ptr = WebRtcIsac_kQPitchGainCdf;
+    WebRtcIsac_EncHistMulti(ISACBitStr_obj,
+                            &ISACSavedEnc_obj->pitchGain_index[ii],
+                            WebRtcIsac_kQPitchGainCdf_ptr, 1);
+
+    /* Entropy coding of quantization pitch lags */
+    /* Voicing classification. */
+    if (ISACSavedEnc_obj->meanGain[ii] < 0.2) {
+      cdf = WebRtcIsac_kQPitchLagCdfPtrLo;
+    } else if (ISACSavedEnc_obj->meanGain[ii] < 0.4) {
+      cdf = WebRtcIsac_kQPitchLagCdfPtrMid;
+    } else {
+      cdf = WebRtcIsac_kQPitchLagCdfPtrHi;
+    }
+    WebRtcIsac_EncHistMulti(ISACBitStr_obj,
+                            &ISACSavedEnc_obj->pitchIndex[PITCH_SUBFRAMES * ii],
+                            cdf, PITCH_SUBFRAMES);
+
+    /* LPC */
+    /* Only one model exists. The entropy coding is done only for backward
+     * compatibility. */
+    WebRtcIsac_EncHistMulti(ISACBitStr_obj, &kModel,
+                            WebRtcIsac_kQKltModelCdfPtr, 1);
+    /* Entropy coding of quantization indices - LPC shape only. */
+    WebRtcIsac_EncHistMulti(ISACBitStr_obj,
+                            &ISACSavedEnc_obj->LPCindex_s[KLT_ORDER_SHAPE * ii],
+                            WebRtcIsac_kQKltCdfPtrShape,
+                            KLT_ORDER_SHAPE);
+
+    /* If transcoding, get new LPC gain indices */
+    if (scale < 1.0) {
+      WebRtcIsac_TranscodeLPCCoef(
+          &tmpLPCcoeffs_lo[(ORDERLO + 1) * SUBFRAMES * ii],
+          &tmpLPCcoeffs_hi[(ORDERHI + 1)*SUBFRAMES * ii],
+          &tmpLPCindex_g[KLT_ORDER_GAIN * ii]);
+    }
+
+    /* Entropy coding of quantization indices - LPC gain. */
+    WebRtcIsac_EncHistMulti(ISACBitStr_obj, &tmpLPCindex_g[KLT_ORDER_GAIN * ii],
+                            WebRtcIsac_kQKltCdfPtrGain, KLT_ORDER_GAIN);
+
+    /* Quantization and loss-less coding. */
+    status = WebRtcIsac_EncodeSpec(&tmp_fre[ii * FRAMESAMPLES_HALF],
+                                   &tmp_fim[ii * FRAMESAMPLES_HALF],
+                                   ISACSavedEnc_obj->AvgPitchGain[ii],
+                                   kIsacLowerBand, ISACBitStr_obj);
+    if (status < 0) {
+      return status;
+    }
+  }
+  /* Complete arithmetic coding. */
+  return WebRtcIsac_EncTerminate(ISACBitStr_obj);
+}
+
+
+int WebRtcIsac_EncodeStoredDataUb(
+    const ISACUBSaveEncDataStruct* ISACSavedEnc_obj,
+    Bitstr* bitStream,
+    int32_t jitterInfo,
+    float scale,
+    enum ISACBandwidth bandwidth) {
+  int n;
+  int err;
+  double lpcGain[SUBFRAMES];
+  int16_t realFFT[FRAMESAMPLES_HALF];
+  int16_t imagFFT[FRAMESAMPLES_HALF];
+  const uint16_t** shape_cdf;
+  int shape_len;
+  const int16_t kAveragePitchGain = 0.0;
+  enum ISACBand band;
+  /* Reset bitstream. */
+  WebRtcIsac_ResetBitstream(bitStream);
+
+  /* Encode jitter index. */
+  WebRtcIsac_EncodeJitterInfo(jitterInfo, bitStream);
+
+  err = WebRtcIsac_EncodeBandwidth(bandwidth, bitStream);
+  if (err < 0) {
+    return err;
+  }
+
+  /* Encode LPC-shape. */
+  if (bandwidth == isac12kHz) {
+    shape_cdf = WebRtcIsac_kLpcShapeCdfMatUb12;
+    shape_len = UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME;
+    band = kIsacUpperBand12;
+  } else {
+    shape_cdf = WebRtcIsac_kLpcShapeCdfMatUb16;
+    shape_len = UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME;
+    band = kIsacUpperBand16;
+  }
+  WebRtcIsac_EncHistMulti(bitStream, ISACSavedEnc_obj->indexLPCShape,
+                          shape_cdf, shape_len);
+
+  if ((scale <= 0.0) || (scale >= 1.0)) {
+    /* We only consider scales between zero and one. */
+    WebRtcIsac_EncHistMulti(bitStream, ISACSavedEnc_obj->lpcGainIndex,
+                            WebRtcIsac_kLpcGainCdfMat, UB_LPC_GAIN_DIM);
+    if (bandwidth == isac16kHz) {
+      /* Store gain indices of the second half. */
+      WebRtcIsac_EncHistMulti(bitStream,
+                              &ISACSavedEnc_obj->lpcGainIndex[SUBFRAMES],
+                              WebRtcIsac_kLpcGainCdfMat, UB_LPC_GAIN_DIM);
+    }
+    /* Store FFT coefficients. */
+    err = WebRtcIsac_EncodeSpec(ISACSavedEnc_obj->realFFT,
+                                ISACSavedEnc_obj->imagFFT, kAveragePitchGain,
+                                band, bitStream);
+  } else {
+    /* Scale LPC gain and FFT coefficients. */
+    for (n = 0; n < SUBFRAMES; n++) {
+      lpcGain[n] = scale * ISACSavedEnc_obj->lpcGain[n];
+    }
+    /* Store LPC gains. */
+    WebRtcIsac_StoreLpcGainUb(lpcGain, bitStream);
+
+    if (bandwidth == isac16kHz) {
+      /* Scale and code the gains of the second half of the frame, if 16kHz. */
+      for (n = 0; n < SUBFRAMES; n++) {
+        lpcGain[n] = scale * ISACSavedEnc_obj->lpcGain[n + SUBFRAMES];
+      }
+      WebRtcIsac_StoreLpcGainUb(lpcGain, bitStream);
+    }
+
+    for (n = 0; n < FRAMESAMPLES_HALF; n++) {
+      realFFT[n] = (int16_t)(scale * (float)ISACSavedEnc_obj->realFFT[n] +
+          0.5f);
+      imagFFT[n] = (int16_t)(scale * (float)ISACSavedEnc_obj->imagFFT[n] +
+          0.5f);
+    }
+    /* Store FFT coefficients. */
+    err = WebRtcIsac_EncodeSpec(realFFT, imagFFT, kAveragePitchGain,
+                                band, bitStream);
+  }
+  if (err < 0) {
+    /* Error happened while encoding FFT coefficients. */
+    return err;
+  }
+
+  /* Complete arithmetic coding. */
+  return WebRtcIsac_EncTerminate(bitStream);
+}
+
+int16_t WebRtcIsac_GetRedPayloadUb(
+    const ISACUBSaveEncDataStruct* ISACSavedEncObj,
+    Bitstr*                        bitStreamObj,
+    enum ISACBandwidth             bandwidth) {
+  int n;
+  int16_t status;
+  int16_t realFFT[FRAMESAMPLES_HALF];
+  int16_t imagFFT[FRAMESAMPLES_HALF];
+  enum ISACBand band;
+  const int16_t kAveragePitchGain = 0.0;
+  /* Store bit-stream object. */
+  memcpy(bitStreamObj, &ISACSavedEncObj->bitStreamObj, sizeof(Bitstr));
+
+  /* Scale FFT coefficients. */
+  for (n = 0; n < FRAMESAMPLES_HALF; n++) {
+    realFFT[n] = (int16_t)((float)ISACSavedEncObj->realFFT[n] *
+        RCU_TRANSCODING_SCALE_UB + 0.5);
+    imagFFT[n] = (int16_t)((float)ISACSavedEncObj->imagFFT[n] *
+        RCU_TRANSCODING_SCALE_UB + 0.5);
+  }
+
+  band = (bandwidth == isac12kHz) ? kIsacUpperBand12 : kIsacUpperBand16;
+  status = WebRtcIsac_EncodeSpec(realFFT, imagFFT, kAveragePitchGain, band,
+                                 bitStreamObj);
+  if (status < 0) {
+    return status;
+  } else {
+    /* Terminate entropy coding */
+    return WebRtcIsac_EncTerminate(bitStreamObj);
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c b/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c
new file mode 100644
index 0000000..0ab2dc1
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.c
@@ -0,0 +1,707 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * code_LPC_UB.c
+ *
+ * This file contains definition of functions used to
+ * encode LPC parameters (Shape & gain) of the upper band.
+ *
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/******************************************************************************
+ * WebRtcIsac_RemoveLarMean()
+ *
+ * Remove the means from LAR coefficients.
+ *
+ * Input:
+ *      -lar                : pointer to lar vectors. LAR vectors are
+ *                            concatenated.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -lar                : pointer to mean-removed LAR:s.
+ *
+ *
+ */
+int16_t
+WebRtcIsac_RemoveLarMean(
+    double* lar,
+    int16_t bandwidth)
+{
+  int16_t coeffCntr;
+  int16_t vecCntr;
+  int16_t numVec;
+  const double* meanLAR;
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        numVec = UB_LPC_VEC_PER_FRAME;
+        meanLAR = WebRtcIsac_kMeanLarUb12;
+        break;
+      }
+    case isac16kHz:
+      {
+        numVec = UB16_LPC_VEC_PER_FRAME;
+        meanLAR = WebRtcIsac_kMeanLarUb16;
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  for(vecCntr = 0; vecCntr < numVec; vecCntr++)
+  {
+    for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+    {
+      // REMOVE MEAN
+      *lar++ -= meanLAR[coeffCntr];
+    }
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateIntraVec()
+ *
+ * Remove the correlation amonge the components of LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from left.
+ *
+ * Input:
+ *      -inLar              : pointer to mean-removed LAR vecrtors.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : decorrelated LAR vectors.
+ */
+int16_t
+WebRtcIsac_DecorrelateIntraVec(
+    const double* data,
+    double*       out,
+    int16_t bandwidth)
+{
+  const double* ptrData;
+  const double* ptrRow;
+  int16_t rowCntr;
+  int16_t colCntr;
+  int16_t larVecCntr;
+  int16_t numVec;
+  const double* decorrMat;
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        decorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
+        numVec = UB_LPC_VEC_PER_FRAME;
+        break;
+      }
+    case isac16kHz:
+      {
+        decorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
+        numVec = UB16_LPC_VEC_PER_FRAME;
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  //
+  // decorrMat * data
+  //
+  // data is assumed to contain 'numVec' of LAR
+  // vectors (mean removed) each of dimension 'UB_LPC_ORDER'
+  // concatenated one after the other.
+  //
+
+  ptrData = data;
+  for(larVecCntr = 0; larVecCntr < numVec; larVecCntr++)
+  {
+    for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
+    {
+      ptrRow = &decorrMat[rowCntr * UB_LPC_ORDER];
+      *out = 0;
+      for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
+      {
+        *out += ptrData[colCntr] * ptrRow[colCntr];
+      }
+      out++;
+    }
+    ptrData += UB_LPC_ORDER;
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateInterVec()
+ *
+ * Remover the correlation among mean-removed LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from right.
+ *
+ * Input:
+ *      -data               : pointer to matrix of LAR vectors. The matrix
+ *                            is stored column-wise.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : decorrelated LAR vectors.
+ */
+int16_t
+WebRtcIsac_DecorrelateInterVec(
+    const double* data,
+    double* out,
+    int16_t bandwidth)
+{
+  int16_t coeffCntr;
+  int16_t rowCntr;
+  int16_t colCntr;
+  const double* decorrMat;
+  int16_t interVecDim;
+
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        decorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
+        interVecDim = UB_LPC_VEC_PER_FRAME;
+        break;
+      }
+    case isac16kHz:
+      {
+        decorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
+        interVecDim = UB16_LPC_VEC_PER_FRAME;
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  //
+  // data * decorrMat
+  //
+  // data is of size 'interVecDim' * 'UB_LPC_ORDER'
+  // That is 'interVecDim' of LAR vectors (mean removed)
+  // in columns each of dimension 'UB_LPC_ORDER'.
+  // matrix is stored column-wise.
+  //
+
+  for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+  {
+    for(colCntr = 0; colCntr < interVecDim; colCntr++)
+    {
+      out[coeffCntr + colCntr * UB_LPC_ORDER] = 0;
+      for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
+      {
+        out[coeffCntr + colCntr * UB_LPC_ORDER] +=
+            data[coeffCntr + rowCntr * UB_LPC_ORDER] *
+            decorrMat[rowCntr * interVecDim + colCntr];
+      }
+    }
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeUncorrLar()
+ *
+ * Quantize the uncorrelated parameters.
+ *
+ * Input:
+ *      -data               : uncorrelated LAR vectors.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -data               : quantized version of the input.
+ *      -idx                : pointer to quantization indices.
+ */
+double
+WebRtcIsac_QuantizeUncorrLar(
+    double* data,
+    int* recIdx,
+    int16_t bandwidth)
+{
+  int16_t cntr;
+  int32_t idx;
+  int16_t interVecDim;
+  const double* leftRecPoint;
+  double quantizationStepSize;
+  const int16_t* numQuantCell;
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        leftRecPoint         = WebRtcIsac_kLpcShapeLeftRecPointUb12;
+        quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
+        numQuantCell         = WebRtcIsac_kLpcShapeNumRecPointUb12;
+        interVecDim          = UB_LPC_VEC_PER_FRAME;
+        break;
+      }
+    case isac16kHz:
+      {
+        leftRecPoint         = WebRtcIsac_kLpcShapeLeftRecPointUb16;
+        quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
+        numQuantCell         = WebRtcIsac_kLpcShapeNumRecPointUb16;
+        interVecDim          = UB16_LPC_VEC_PER_FRAME;
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  //
+  // Quantize the parametrs.
+  //
+  for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
+  {
+    idx = (int32_t)floor((*data - leftRecPoint[cntr]) /
+                               quantizationStepSize + 0.5);
+    if(idx < 0)
+    {
+      idx = 0;
+    }
+    else if(idx >= numQuantCell[cntr])
+    {
+      idx = numQuantCell[cntr] - 1;
+    }
+
+    *data++ = leftRecPoint[cntr] + idx * quantizationStepSize;
+    *recIdx++ = idx;
+  }
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcParam()
+ *
+ * Get the quantized value of uncorrelated LARs given the quantization indices.
+ *
+ * Input:
+ *      -idx                : pointer to quantiztion indices.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : pointer to quantized values.
+ */
+int16_t
+WebRtcIsac_DequantizeLpcParam(
+    const int* idx,
+    double*    out,
+    int16_t bandwidth)
+{
+  int16_t cntr;
+  int16_t interVecDim;
+  const double* leftRecPoint;
+  double quantizationStepSize;
+
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        leftRecPoint =         WebRtcIsac_kLpcShapeLeftRecPointUb12;
+        quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb12;
+        interVecDim =          UB_LPC_VEC_PER_FRAME;
+        break;
+      }
+    case isac16kHz:
+      {
+        leftRecPoint =         WebRtcIsac_kLpcShapeLeftRecPointUb16;
+        quantizationStepSize = WebRtcIsac_kLpcShapeQStepSizeUb16;
+        interVecDim =          UB16_LPC_VEC_PER_FRAME;
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  //
+  // Dequantize given the quantization indices
+  //
+
+  for(cntr = 0; cntr < UB_LPC_ORDER * interVecDim; cntr++)
+  {
+    *out++ = leftRecPoint[cntr] + *idx++ * quantizationStepSize;
+  }
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateIntraVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateIntraVec().
+ *
+ * Input:
+ *      -data               : uncorrelated parameters.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : correlated parametrs.
+ */
+int16_t
+WebRtcIsac_CorrelateIntraVec(
+    const double* data,
+    double*       out,
+    int16_t bandwidth)
+{
+  int16_t vecCntr;
+  int16_t rowCntr;
+  int16_t colCntr;
+  int16_t numVec;
+  const double* ptrData;
+  const double* intraVecDecorrMat;
+
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        numVec            = UB_LPC_VEC_PER_FRAME;
+        intraVecDecorrMat = &WebRtcIsac_kIntraVecDecorrMatUb12[0][0];
+        break;
+      }
+    case isac16kHz:
+      {
+        numVec            = UB16_LPC_VEC_PER_FRAME;
+        intraVecDecorrMat = &WebRtcIsac_kIintraVecDecorrMatUb16[0][0];
+        break;
+      }
+    default:
+      return -1;
+  }
+
+
+  ptrData = data;
+  for(vecCntr = 0; vecCntr < numVec; vecCntr++)
+  {
+    for(colCntr = 0; colCntr < UB_LPC_ORDER; colCntr++)
+    {
+      *out = 0;
+      for(rowCntr = 0; rowCntr < UB_LPC_ORDER; rowCntr++)
+      {
+        *out += ptrData[rowCntr] *
+            intraVecDecorrMat[rowCntr * UB_LPC_ORDER + colCntr];
+      }
+      out++;
+    }
+    ptrData += UB_LPC_ORDER;
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateInterVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateInterVec().
+ *
+ * Input:
+ *      -data
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : correlated parametrs.
+ */
+int16_t
+WebRtcIsac_CorrelateInterVec(
+    const double* data,
+    double*       out,
+    int16_t bandwidth)
+{
+  int16_t coeffCntr;
+  int16_t rowCntr;
+  int16_t colCntr;
+  int16_t interVecDim;
+  double myVec[UB16_LPC_VEC_PER_FRAME] = {0.0};
+  const double* interVecDecorrMat;
+
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        interVecDim       = UB_LPC_VEC_PER_FRAME;
+        interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb12[0][0];
+        break;
+      }
+    case isac16kHz:
+      {
+        interVecDim       = UB16_LPC_VEC_PER_FRAME;
+        interVecDecorrMat = &WebRtcIsac_kInterVecDecorrMatUb16[0][0];
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+  {
+    for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
+    {
+      myVec[rowCntr] = 0;
+      for(colCntr = 0; colCntr < interVecDim; colCntr++)
+      {
+        myVec[rowCntr] += data[coeffCntr + colCntr * UB_LPC_ORDER] * //*ptrData *
+            interVecDecorrMat[rowCntr * interVecDim + colCntr];
+        //ptrData += UB_LPC_ORDER;
+      }
+    }
+
+    for(rowCntr = 0; rowCntr < interVecDim; rowCntr++)
+    {
+      out[coeffCntr + rowCntr * UB_LPC_ORDER] = myVec[rowCntr];
+    }
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_AddLarMean()
+ *
+ * This is the inverse of WebRtcIsac_RemoveLarMean()
+ *
+ * Input:
+ *      -data               : pointer to mean-removed LAR:s.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -data               : pointer to LARs.
+ */
+int16_t
+WebRtcIsac_AddLarMean(
+    double* data,
+    int16_t bandwidth)
+{
+  int16_t coeffCntr;
+  int16_t vecCntr;
+  int16_t numVec;
+  const double* meanLAR;
+
+  switch(bandwidth)
+  {
+    case isac12kHz:
+      {
+        numVec = UB_LPC_VEC_PER_FRAME;
+        meanLAR = WebRtcIsac_kMeanLarUb12;
+        break;
+      }
+    case isac16kHz:
+      {
+        numVec = UB16_LPC_VEC_PER_FRAME;
+        meanLAR = WebRtcIsac_kMeanLarUb16;
+        break;
+      }
+    default:
+      return -1;
+  }
+
+  for(vecCntr = 0; vecCntr < numVec; vecCntr++)
+  {
+    for(coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++)
+    {
+      *data++ += meanLAR[coeffCntr];
+    }
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_ToLogDomainRemoveMean()
+ *
+ * Transform the LPC gain to log domain then remove the mean value.
+ *
+ * Input:
+ *      -lpcGain            : pointer to LPC Gain, expecting 6 LPC gains
+ *
+ * Output:
+ *      -lpcGain            : mean-removed in log domain.
+ */
+int16_t
+WebRtcIsac_ToLogDomainRemoveMean(
+    double* data)
+{
+  int16_t coeffCntr;
+  for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+  {
+    data[coeffCntr] = log(data[coeffCntr]) - WebRtcIsac_kMeanLpcGain;
+  }
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateLPGain()
+ *
+ * Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
+ * multiplying gain vector with decorrelating matrix.
+ *
+ * Input:
+ *      -data               : LPC gain in log-domain with mean removed.
+ *
+ * Output:
+ *      -out                : decorrelated parameters.
+ */
+int16_t WebRtcIsac_DecorrelateLPGain(
+    const double* data,
+    double* out)
+{
+  int16_t rowCntr;
+  int16_t colCntr;
+
+  for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
+  {
+    *out = 0;
+    for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
+    {
+      *out += data[rowCntr] * WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr];
+    }
+    out++;
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeLpcGain()
+ *
+ * Quantize the decorrelated log-domain gains.
+ *
+ * Input:
+ *      -lpcGain            : uncorrelated LPC gains.
+ *
+ * Output:
+ *      -idx                : quantization indices
+ *      -lpcGain            : quantized value of the inpt.
+ */
+double WebRtcIsac_QuantizeLpcGain(
+    double* data,
+    int*    idx)
+{
+  int16_t coeffCntr;
+  for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+  {
+    *idx = (int)floor((*data - WebRtcIsac_kLeftRecPointLpcGain[coeffCntr]) /
+                                WebRtcIsac_kQSizeLpcGain + 0.5);
+
+    if(*idx < 0)
+    {
+      *idx = 0;
+    }
+    else if(*idx >= WebRtcIsac_kNumQCellLpcGain[coeffCntr])
+    {
+      *idx = WebRtcIsac_kNumQCellLpcGain[coeffCntr] - 1;
+    }
+    *data = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
+        WebRtcIsac_kQSizeLpcGain;
+
+    data++;
+    idx++;
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcGain()
+ *
+ * Get the quantized values given the quantization indices.
+ *
+ * Input:
+ *      -idx                : pointer to quantization indices.
+ *
+ * Output:
+ *      -lpcGains           : quantized values of the given parametes.
+ */
+int16_t WebRtcIsac_DequantizeLpcGain(
+    const int* idx,
+    double*    out)
+{
+  int16_t coeffCntr;
+  for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+  {
+    *out = WebRtcIsac_kLeftRecPointLpcGain[coeffCntr] + *idx *
+        WebRtcIsac_kQSizeLpcGain;
+    out++;
+    idx++;
+  }
+  return 0;
+}
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateLpcGain()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateLPGain().
+ *
+ * Input:
+ *      -data               : decorrelated parameters.
+ *
+ * Output:
+ *      -out                : correlated parameters.
+ */
+int16_t WebRtcIsac_CorrelateLpcGain(
+    const double* data,
+    double* out)
+{
+  int16_t rowCntr;
+  int16_t colCntr;
+
+  for(rowCntr = 0; rowCntr < UB_LPC_GAIN_DIM; rowCntr++)
+  {
+    *out = 0;
+    for(colCntr = 0; colCntr < UB_LPC_GAIN_DIM; colCntr++)
+    {
+      *out += WebRtcIsac_kLpcGainDecorrMat[rowCntr][colCntr] * data[colCntr];
+    }
+    out++;
+  }
+
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_AddMeanToLinearDomain()
+ *
+ * This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
+ *
+ * Input:
+ *      -lpcGain            : LPC gain in log-domain & mean removed
+ *
+ * Output:
+ *      -lpcGain            : LPC gain in normal domain.
+ */
+int16_t WebRtcIsac_AddMeanToLinearDomain(
+    double* lpcGains)
+{
+  int16_t coeffCntr;
+  for(coeffCntr = 0; coeffCntr < UB_LPC_GAIN_DIM; coeffCntr++)
+  {
+    lpcGains[coeffCntr] = exp(lpcGains[coeffCntr] + WebRtcIsac_kMeanLpcGain);
+  }
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h b/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h
new file mode 100644
index 0000000..2fa1c71
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h
@@ -0,0 +1,282 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * encode_lpc_swb.h
+ *
+ * This file contains declaration of functions used to
+ * encode LPC parameters (Shape & gain) of the upper band.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/******************************************************************************
+ * WebRtcIsac_RemoveLarMean()
+ *
+ * Remove the means from LAR coefficients.
+ *
+ * Input:
+ *      -lar                : pointer to lar vectors. LAR vectors are
+ *                            concatenated.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -lar                : pointer to mean-removed LAR:s.
+ *
+ *
+ */
+int16_t WebRtcIsac_RemoveLarMean(
+    double*     lar,
+    int16_t bandwidth);
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateIntraVec()
+ *
+ * Remove the correlation amonge the components of LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from left.
+ *
+ * Input:
+ *      -inLar              : pointer to mean-removed LAR vecrtors.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : decorrelated LAR vectors.
+ */
+int16_t WebRtcIsac_DecorrelateIntraVec(
+    const double* inLAR,
+    double*       out,
+    int16_t   bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateInterVec()
+ *
+ * Remover the correlation among mean-removed LAR vectors. If LAR vectors
+ * of one frame are put in a matrix where each column is a LAR vector of a
+ * sub-frame, then this is equivalent to multiplying the LAR matrix with
+ * a decorrelting mtrix from right.
+ *
+ * Input:
+ *      -data               : pointer to matrix of LAR vectors. The matrix
+ *                            is stored column-wise.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : decorrelated LAR vectors.
+ */
+int16_t WebRtcIsac_DecorrelateInterVec(
+    const double* data,
+    double*       out,
+    int16_t   bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeUncorrLar()
+ *
+ * Quantize the uncorrelated parameters.
+ *
+ * Input:
+ *      -data               : uncorrelated LAR vectors.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -data               : quantized version of the input.
+ *      -idx                : pointer to quantization indices.
+ */
+double WebRtcIsac_QuantizeUncorrLar(
+    double*     data,
+    int*        idx,
+    int16_t bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateIntraVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateIntraVec().
+ *
+ * Input:
+ *      -data               : uncorrelated parameters.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : correlated parametrs.
+ */
+int16_t WebRtcIsac_CorrelateIntraVec(
+    const double* data,
+    double*       out,
+    int16_t   bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateInterVec()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateInterVec().
+ *
+ * Input:
+ *      -data
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : correlated parametrs.
+ */
+int16_t WebRtcIsac_CorrelateInterVec(
+    const double* data,
+    double*       out,
+    int16_t   bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_AddLarMean()
+ *
+ * This is the inverse of WebRtcIsac_RemoveLarMean()
+ * 
+ * Input:
+ *      -data               : pointer to mean-removed LAR:s.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -data               : pointer to LARs.
+ */
+int16_t WebRtcIsac_AddLarMean(
+    double*     data,
+    int16_t bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcParam()
+ *
+ * Get the quantized value of uncorrelated LARs given the quantization indices.
+ *
+ * Input:
+ *      -idx                : pointer to quantiztion indices.
+ *      -bandwidth          : indicates if the given LAR vectors belong
+ *                            to SWB-12kHz or SWB-16kHz.
+ *
+ * Output:
+ *      -out                : pointer to quantized values.
+ */
+int16_t WebRtcIsac_DequantizeLpcParam(
+    const int*  idx,
+    double*     out,
+    int16_t bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_ToLogDomainRemoveMean()
+ *
+ * Transform the LPC gain to log domain then remove the mean value.
+ *
+ * Input:
+ *      -lpcGain            : pointer to LPC Gain, expecting 6 LPC gains
+ *
+ * Output:
+ *      -lpcGain            : mean-removed in log domain.
+ */
+int16_t WebRtcIsac_ToLogDomainRemoveMean(
+    double* lpGains);
+
+
+/******************************************************************************
+ * WebRtcIsac_DecorrelateLPGain()
+ *
+ * Decorrelate LPC gains. There are 6 LPC Gains per frame. This is like
+ * multiplying gain vector with decorrelating matrix.
+ *
+ * Input:
+ *      -data               : LPC gain in log-domain with mean removed.
+ *
+ * Output:
+ *      -out                : decorrelated parameters.
+ */
+int16_t WebRtcIsac_DecorrelateLPGain(
+    const double* data,
+    double*       out);
+
+
+/******************************************************************************
+ * WebRtcIsac_QuantizeLpcGain()
+ *
+ * Quantize the decorrelated log-domain gains.
+ * 
+ * Input:
+ *      -lpcGain            : uncorrelated LPC gains.
+ *
+ * Output:
+ *      -idx                : quantization indices
+ *      -lpcGain            : quantized value of the inpt.
+ */
+double WebRtcIsac_QuantizeLpcGain(
+    double* lpGains,
+    int*    idx);
+
+
+/******************************************************************************
+ * WebRtcIsac_DequantizeLpcGain()
+ *
+ * Get the quantized values given the quantization indices.
+ *
+ * Input:
+ *      -idx                : pointer to quantization indices.
+ *
+ * Output:
+ *      -lpcGains           : quantized values of the given parametes.
+ */
+int16_t WebRtcIsac_DequantizeLpcGain(
+    const int* idx,
+    double*    lpGains);
+
+
+/******************************************************************************
+ * WebRtcIsac_CorrelateLpcGain()
+ *
+ * This is the inverse of WebRtcIsac_DecorrelateLPGain().
+ *
+ * Input:
+ *      -data               : decorrelated parameters.
+ *
+ * Output:
+ *      -out                : correlated parameters.
+ */
+int16_t WebRtcIsac_CorrelateLpcGain(
+    const double* data,
+    double*       out);
+
+
+/******************************************************************************
+ * WebRtcIsac_AddMeanToLinearDomain()
+ *
+ * This is the inverse of WebRtcIsac_ToLogDomainRemoveMean().
+ *
+ * Input:
+ *      -lpcGain            : LPC gain in log-domain & mean removed
+ *
+ * Output:
+ *      -lpcGain            : LPC gain in normal domain.
+ */
+int16_t WebRtcIsac_AddMeanToLinearDomain(
+    double* lpcGains);
+
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENCODE_LPC_SWB_H_
diff --git a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
new file mode 100644
index 0000000..28767af
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
@@ -0,0 +1,2066 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.c
+ *
+ * This header file defines all of the functions used to arithmetically
+ * encode the iSAC bistream
+ *
+ */
+
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/arith_routines.h"
+#include "modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/encode_lpc_swb.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+
+#include <math.h>
+#include <string.h>
+
+static const uint16_t kLpcVecPerSegmentUb12 = 5;
+static const uint16_t kLpcVecPerSegmentUb16 = 4;
+
+/* CDF array for encoder bandwidth (12 vs 16 kHz) indicator. */
+static const uint16_t kOneBitEqualProbCdf[3] = {
+    0, 32768, 65535 };
+
+/* Pointer to cdf array for encoder bandwidth (12 vs 16 kHz) indicator. */
+static const uint16_t* const kOneBitEqualProbCdf_ptr[1] = {
+    kOneBitEqualProbCdf };
+
+/*
+ * Initial cdf index for decoder of encoded bandwidth
+ * (12 vs 16 kHz) indicator.
+ */
+static const uint16_t kOneBitEqualProbInitIndex[1] = { 1 };
+
+
+static const int kIsSWB12 = 1;
+
+/* compute correlation from power spectrum */
+static void FindCorrelation(int32_t* PSpecQ12, int32_t* CorrQ7) {
+  int32_t summ[FRAMESAMPLES / 8];
+  int32_t diff[FRAMESAMPLES / 8];
+  const int16_t* CS_ptrQ9;
+  int32_t sum;
+  int k, n;
+
+  for (k = 0; k < FRAMESAMPLES / 8; k++) {
+    summ[k] = (PSpecQ12[k] + PSpecQ12[FRAMESAMPLES_QUARTER - 1 - k] + 16) >> 5;
+    diff[k] = (PSpecQ12[k] - PSpecQ12[FRAMESAMPLES_QUARTER - 1 - k] + 16) >> 5;
+  }
+
+  sum = 2;
+  for (n = 0; n < FRAMESAMPLES / 8; n++) {
+    sum += summ[n];
+  }
+  CorrQ7[0] = sum;
+
+  for (k = 0; k < AR_ORDER; k += 2) {
+    sum = 0;
+    CS_ptrQ9 = WebRtcIsac_kCos[k];
+    for (n = 0; n < FRAMESAMPLES / 8; n++)
+      sum += (CS_ptrQ9[n] * diff[n] + 256) >> 9;
+    CorrQ7[k + 1] = sum;
+  }
+
+  for (k = 1; k < AR_ORDER; k += 2) {
+    sum = 0;
+    CS_ptrQ9 = WebRtcIsac_kCos[k];
+    for (n = 0; n < FRAMESAMPLES / 8; n++)
+      sum += (CS_ptrQ9[n] * summ[n] + 256) >> 9;
+    CorrQ7[k + 1] = sum;
+  }
+}
+
+/* compute inverse AR power spectrum */
+/* Changed to the function used in iSAC FIX for compatibility reasons */
+static void FindInvArSpec(const int16_t* ARCoefQ12,
+                          const int32_t gainQ10,
+                          int32_t* CurveQ16) {
+  int32_t CorrQ11[AR_ORDER + 1];
+  int32_t sum, tmpGain;
+  int32_t diffQ16[FRAMESAMPLES / 8];
+  const int16_t* CS_ptrQ9;
+  int k, n;
+  int16_t round, shftVal = 0, sh;
+
+  sum = 0;
+  for (n = 0; n < AR_ORDER + 1; n++) {
+    sum += WEBRTC_SPL_MUL(ARCoefQ12[n], ARCoefQ12[n]);   /* Q24 */
+  }
+  sum = ((sum >> 6) * 65 + 32768) >> 16;  /* Q8 */
+  CorrQ11[0] = (sum * gainQ10 + 256) >> 9;
+
+  /* To avoid overflow, we shift down gainQ10 if it is large.
+   * We will not lose any precision */
+  if (gainQ10 > 400000) {
+    tmpGain = gainQ10 >> 3;
+    round = 32;
+    shftVal = 6;
+  } else {
+    tmpGain = gainQ10;
+    round = 256;
+    shftVal = 9;
+  }
+
+  for (k = 1; k < AR_ORDER + 1; k++) {
+    sum = 16384;
+    for (n = k; n < AR_ORDER + 1; n++)
+      sum += WEBRTC_SPL_MUL(ARCoefQ12[n - k], ARCoefQ12[n]); /* Q24 */
+    sum >>= 15;
+    CorrQ11[k] = (sum * tmpGain + round) >> shftVal;
+  }
+  sum = CorrQ11[0] << 7;
+  for (n = 0; n < FRAMESAMPLES / 8; n++) {
+    CurveQ16[n] = sum;
+  }
+  for (k = 1; k < AR_ORDER; k += 2) {
+    for (n = 0; n < FRAMESAMPLES / 8; n++) {
+      CurveQ16[n] += (WebRtcIsac_kCos[k][n] * CorrQ11[k + 1] + 2) >> 2;
+    }
+  }
+
+  CS_ptrQ9 = WebRtcIsac_kCos[0];
+
+  /* If CorrQ11[1] too large we avoid getting overflow in the
+   * calculation by shifting */
+  sh = WebRtcSpl_NormW32(CorrQ11[1]);
+  if (CorrQ11[1] == 0) { /* Use next correlation */
+    sh = WebRtcSpl_NormW32(CorrQ11[2]);
+  }
+  if (sh < 9) {
+    shftVal = 9 - sh;
+  } else {
+    shftVal = 0;
+  }
+  for (n = 0; n < FRAMESAMPLES / 8; n++) {
+    diffQ16[n] = (CS_ptrQ9[n] * (CorrQ11[1] >> shftVal) + 2) >> 2;
+  }
+  for (k = 2; k < AR_ORDER; k += 2) {
+    CS_ptrQ9 = WebRtcIsac_kCos[k];
+    for (n = 0; n < FRAMESAMPLES / 8; n++) {
+      diffQ16[n] += (CS_ptrQ9[n] * (CorrQ11[k + 1] >> shftVal) + 2) >> 2;
+    }
+  }
+
+  for (k = 0; k < FRAMESAMPLES / 8; k++) {
+    int32_t diff_q16_shifted = (int32_t)((uint32_t)(diffQ16[k]) << shftVal);
+    CurveQ16[FRAMESAMPLES_QUARTER - 1 - k] = CurveQ16[k] - diff_q16_shifted;
+    CurveQ16[k] += diff_q16_shifted;
+  }
+}
+
+/* Generate array of dither samples in Q7. */
+static void GenerateDitherQ7Lb(int16_t* bufQ7, uint32_t seed,
+                               int length, int16_t AvgPitchGain_Q12) {
+  int   k, shft;
+  int16_t dither1_Q7, dither2_Q7, dither_gain_Q14;
+
+  /* This threshold should be equal to that in decode_spec(). */
+  if (AvgPitchGain_Q12 < 614) {
+    for (k = 0; k < length - 2; k += 3) {
+      /* New random unsigned int. */
+      seed = (seed * 196314165) + 907633515;
+
+      /* Fixed-point dither sample between -64 and 64 (Q7). */
+      /* dither = seed * 128 / 4294967295 */
+      dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+      /* New random unsigned int. */
+      seed = (seed * 196314165) + 907633515;
+
+      /* Fixed-point dither sample between -64 and 64. */
+      dither2_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+      shft = (seed >> 25) & 15;
+      if (shft < 5) {
+        bufQ7[k]   = dither1_Q7;
+        bufQ7[k + 1] = dither2_Q7;
+        bufQ7[k + 2] = 0;
+      } else if (shft < 10) {
+        bufQ7[k]   = dither1_Q7;
+        bufQ7[k + 1] = 0;
+        bufQ7[k + 2] = dither2_Q7;
+      } else {
+        bufQ7[k]   = 0;
+        bufQ7[k + 1] = dither1_Q7;
+        bufQ7[k + 2] = dither2_Q7;
+      }
+    }
+  } else {
+    dither_gain_Q14 = (int16_t)(22528 - 10 * AvgPitchGain_Q12);
+
+    /* Dither on half of the coefficients. */
+    for (k = 0; k < length - 1; k += 2) {
+      /* New random unsigned int */
+      seed = (seed * 196314165) + 907633515;
+
+      /* Fixed-point dither sample between -64 and 64. */
+      dither1_Q7 = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+      /* Dither sample is placed in either even or odd index. */
+      shft = (seed >> 25) & 1;     /* Either 0 or 1 */
+
+      bufQ7[k + shft] = (((dither_gain_Q14 * dither1_Q7) + 8192) >> 14);
+      bufQ7[k + 1 - shft] = 0;
+    }
+  }
+}
+
+
+
+/******************************************************************************
+ * GenerateDitherQ7LbUB()
+ *
+ * generate array of dither samples in Q7 There are less zeros in dither
+ * vector compared to GenerateDitherQ7Lb.
+ *
+ * A uniform random number generator with the range of [-64 64] is employed
+ * but the generated dithers are scaled by 0.35, a heuristic scaling.
+ *
+ * Input:
+ *      -seed               : the initial seed for the random number generator.
+ *      -length             : the number of dither values to be generated.
+ *
+ * Output:
+ *      -bufQ7              : pointer to a buffer where dithers are written to.
+ */
+static void GenerateDitherQ7LbUB(
+    int16_t* bufQ7,
+    uint32_t seed,
+    int length) {
+  int k;
+  for (k = 0; k < length; k++) {
+    /* new random unsigned int */
+    seed = (seed * 196314165) + 907633515;
+
+    /* Fixed-point dither sample between -64 and 64 (Q7). */
+    /* bufQ7 = seed * 128 / 4294967295 */
+    bufQ7[k] = (int16_t)(((int32_t)(seed + 16777216)) >> 25);
+
+    /* Scale by 0.35. */
+    bufQ7[k] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(bufQ7[k], 2048, 13);
+  }
+}
+
+/*
+ * Function to decode the complex spectrum from the bit stream
+ * returns the total number of bytes in the stream.
+ */
+int WebRtcIsac_DecodeSpec(Bitstr* streamdata, int16_t AvgPitchGain_Q12,
+                          enum ISACBand band, double* fr, double* fi) {
+  int16_t  DitherQ7[FRAMESAMPLES];
+  int16_t  data[FRAMESAMPLES];
+  int32_t  invARSpec2_Q16[FRAMESAMPLES_QUARTER];
+  uint16_t invARSpecQ8[FRAMESAMPLES_QUARTER];
+  int16_t  ARCoefQ12[AR_ORDER + 1];
+  int16_t  RCQ15[AR_ORDER];
+  int16_t  gainQ10;
+  int32_t  gain2_Q10, res;
+  int32_t  in_sqrt;
+  int32_t  newRes;
+  int k, len, i;
+  int is_12khz = !kIsSWB12;
+  int num_dft_coeff = FRAMESAMPLES;
+  /* Create dither signal. */
+  if (band == kIsacLowerBand) {
+    GenerateDitherQ7Lb(DitherQ7, streamdata->W_upper, FRAMESAMPLES,
+                       AvgPitchGain_Q12);
+  } else {
+    GenerateDitherQ7LbUB(DitherQ7, streamdata->W_upper, FRAMESAMPLES);
+    if (band == kIsacUpperBand12) {
+      is_12khz = kIsSWB12;
+      num_dft_coeff = FRAMESAMPLES_HALF;
+    }
+  }
+
+  /* Decode model parameters. */
+  if (WebRtcIsac_DecodeRc(streamdata, RCQ15) < 0)
+    return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+  WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+  if (WebRtcIsac_DecodeGain2(streamdata, &gain2_Q10) < 0)
+    return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+
+  /* Compute inverse AR power spectrum. */
+  FindInvArSpec(ARCoefQ12, gain2_Q10, invARSpec2_Q16);
+
+  /* Convert to magnitude spectrum,
+   * by doing square-roots (modified from SPLIB). */
+  res = 1 << (WebRtcSpl_GetSizeInBits(invARSpec2_Q16[0]) >> 1);
+  for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+    in_sqrt = invARSpec2_Q16[k];
+    i = 10;
+
+    /* Negative values make no sense for a real sqrt-function. */
+    if (in_sqrt < 0)
+      in_sqrt = -in_sqrt;
+
+    newRes = (in_sqrt / res + res) >> 1;
+    do {
+      res = newRes;
+      newRes = (in_sqrt / res + res) >> 1;
+    } while (newRes != res && i-- > 0);
+
+    invARSpecQ8[k] = (int16_t)newRes;
+  }
+
+  len = WebRtcIsac_DecLogisticMulti2(data, streamdata, invARSpecQ8, DitherQ7,
+                                     num_dft_coeff, is_12khz);
+  /* Arithmetic decoding of spectrum. */
+  if (len < 1) {
+    return -ISAC_RANGE_ERROR_DECODE_SPECTRUM;
+  }
+
+  switch (band) {
+    case kIsacLowerBand: {
+      /* Scale down spectral samples with low SNR. */
+      int32_t p1;
+      int32_t p2;
+      if (AvgPitchGain_Q12 <= 614) {
+        p1 = 30 << 10;
+        p2 = 32768 + (33 << 16);
+      } else {
+        p1 = 36 << 10;
+        p2 = 32768 + (40 << 16);
+      }
+      for (k = 0; k < FRAMESAMPLES; k += 4) {
+        gainQ10 = WebRtcSpl_DivW32W16ResW16(p1, (int16_t)(
+            (invARSpec2_Q16[k >> 2] + p2) >> 16));
+        *fr++ = (double)((data[ k ] * gainQ10 + 512) >> 10) / 128.0;
+        *fi++ = (double)((data[k + 1] * gainQ10 + 512) >> 10) / 128.0;
+        *fr++ = (double)((data[k + 2] * gainQ10 + 512) >> 10) / 128.0;
+        *fi++ = (double)((data[k + 3] * gainQ10 + 512) >> 10) / 128.0;
+      }
+      break;
+    }
+    case kIsacUpperBand12: {
+      for (k = 0, i = 0; k < FRAMESAMPLES_HALF; k += 4) {
+        fr[i] = (double)data[ k ] / 128.0;
+        fi[i] = (double)data[k + 1] / 128.0;
+        i++;
+        fr[i] = (double)data[k + 2] / 128.0;
+        fi[i] = (double)data[k + 3] / 128.0;
+        i++;
+      }
+      /* The second half of real and imaginary coefficients is zero. This is
+       * due to using the old FFT module which requires two signals as input
+       * while in 0-12 kHz mode we only have 8-12 kHz band, and the second
+       * signal is set to zero. */
+      memset(&fr[FRAMESAMPLES_QUARTER], 0, FRAMESAMPLES_QUARTER *
+             sizeof(double));
+      memset(&fi[FRAMESAMPLES_QUARTER], 0, FRAMESAMPLES_QUARTER *
+             sizeof(double));
+      break;
+    }
+    case kIsacUpperBand16: {
+      for (i = 0, k = 0; k < FRAMESAMPLES; k += 4, i++) {
+        fr[i] = (double)data[ k ] / 128.0;
+        fi[i] = (double)data[k + 1] / 128.0;
+        fr[(FRAMESAMPLES_HALF) - 1 - i] = (double)data[k + 2] / 128.0;
+        fi[(FRAMESAMPLES_HALF) - 1 - i] = (double)data[k + 3] / 128.0;
+      }
+      break;
+    }
+  }
+  return len;
+}
+
+
+int WebRtcIsac_EncodeSpec(const int16_t* fr, const int16_t* fi,
+                          int16_t AvgPitchGain_Q12, enum ISACBand band,
+                          Bitstr* streamdata) {
+  int16_t ditherQ7[FRAMESAMPLES];
+  int16_t dataQ7[FRAMESAMPLES];
+  int32_t PSpec[FRAMESAMPLES_QUARTER];
+  int32_t invARSpec2_Q16[FRAMESAMPLES_QUARTER];
+  uint16_t invARSpecQ8[FRAMESAMPLES_QUARTER];
+  int32_t CorrQ7[AR_ORDER + 1];
+  int32_t CorrQ7_norm[AR_ORDER + 1];
+  int16_t RCQ15[AR_ORDER];
+  int16_t ARCoefQ12[AR_ORDER + 1];
+  int32_t gain2_Q10;
+  int16_t val;
+  int32_t nrg, res;
+  uint32_t sum;
+  int32_t in_sqrt;
+  int32_t newRes;
+  int16_t err;
+  uint32_t nrg_u32;
+  int shift_var;
+  int k, n, j, i;
+  int is_12khz = !kIsSWB12;
+  int num_dft_coeff = FRAMESAMPLES;
+
+  /* Create dither signal. */
+  if (band == kIsacLowerBand) {
+    GenerateDitherQ7Lb(ditherQ7, streamdata->W_upper, FRAMESAMPLES,
+                       AvgPitchGain_Q12);
+  } else {
+    GenerateDitherQ7LbUB(ditherQ7, streamdata->W_upper, FRAMESAMPLES);
+    if (band == kIsacUpperBand12) {
+      is_12khz = kIsSWB12;
+      num_dft_coeff = FRAMESAMPLES_HALF;
+    }
+  }
+
+  /* add dither and quantize, and compute power spectrum */
+  switch (band) {
+    case kIsacLowerBand: {
+      for (k = 0; k < FRAMESAMPLES; k += 4) {
+        val = ((*fr++ + ditherQ7[k]   + 64) & 0xFF80) - ditherQ7[k];
+        dataQ7[k] = val;
+        sum = val * val;
+
+        val = ((*fi++ + ditherQ7[k + 1] + 64) & 0xFF80) - ditherQ7[k + 1];
+        dataQ7[k + 1] = val;
+        sum += val * val;
+
+        val = ((*fr++ + ditherQ7[k + 2] + 64) & 0xFF80) - ditherQ7[k + 2];
+        dataQ7[k + 2] = val;
+        sum += val * val;
+
+        val = ((*fi++ + ditherQ7[k + 3] + 64) & 0xFF80) - ditherQ7[k + 3];
+        dataQ7[k + 3] = val;
+        sum += val * val;
+
+        PSpec[k >> 2] = sum >> 2;
+      }
+      break;
+    }
+    case kIsacUpperBand12: {
+      for (k = 0, j = 0; k < FRAMESAMPLES_HALF; k += 4) {
+        val = ((*fr++ + ditherQ7[k]   + 64) & 0xFF80) - ditherQ7[k];
+        dataQ7[k] = val;
+        sum = val * val;
+
+        val = ((*fi++ + ditherQ7[k + 1] + 64) & 0xFF80) - ditherQ7[k + 1];
+        dataQ7[k + 1] = val;
+        sum += val * val;
+
+        PSpec[j++] = sum >> 1;
+
+        val = ((*fr++ + ditherQ7[k + 2] + 64) & 0xFF80) - ditherQ7[k + 2];
+        dataQ7[k + 2] = val;
+        sum = val * val;
+
+        val = ((*fi++ + ditherQ7[k + 3] + 64) & 0xFF80) - ditherQ7[k + 3];
+        dataQ7[k + 3] = val;
+        sum += val * val;
+
+        PSpec[j++] = sum >> 1;
+      }
+      break;
+    }
+    case kIsacUpperBand16: {
+      for (j = 0, k = 0; k < FRAMESAMPLES; k += 4, j++) {
+        val = ((fr[j] + ditherQ7[k]   + 64) & 0xFF80) - ditherQ7[k];
+        dataQ7[k] = val;
+        sum = val * val;
+
+        val = ((fi[j] + ditherQ7[k + 1] + 64) & 0xFF80) - ditherQ7[k + 1];
+        dataQ7[k + 1] = val;
+        sum += val * val;
+
+        val = ((fr[(FRAMESAMPLES_HALF) - 1 - j] + ditherQ7[k + 2] + 64) &
+            0xFF80) - ditherQ7[k + 2];
+        dataQ7[k + 2] = val;
+        sum += val * val;
+
+        val = ((fi[(FRAMESAMPLES_HALF) - 1 - j] + ditherQ7[k + 3] + 64) &
+            0xFF80) - ditherQ7[k + 3];
+        dataQ7[k + 3] = val;
+        sum += val * val;
+
+        PSpec[k >> 2] = sum >> 2;
+      }
+      break;
+    }
+  }
+
+  /* compute correlation from power spectrum */
+  FindCorrelation(PSpec, CorrQ7);
+
+  /* Find AR coefficients */
+  /* Aumber of bit shifts to 14-bit normalize CorrQ7[0]
+   * (leaving room for sign) */
+  shift_var = WebRtcSpl_NormW32(CorrQ7[0]) - 18;
+
+  if (shift_var > 0) {
+    for (k = 0; k < AR_ORDER + 1; k++) {
+      CorrQ7_norm[k] = CorrQ7[k] << shift_var;
+    }
+  } else {
+    for (k = 0; k < AR_ORDER + 1; k++) {
+      CorrQ7_norm[k] = CorrQ7[k] >> (-shift_var);
+    }
+  }
+
+  /* Find RC coefficients. */
+  WebRtcSpl_AutoCorrToReflCoef(CorrQ7_norm, AR_ORDER, RCQ15);
+
+  /* Quantize & code RC Coefficient. */
+  WebRtcIsac_EncodeRc(RCQ15, streamdata);
+
+  /* RC -> AR coefficients */
+  WebRtcSpl_ReflCoefToLpc(RCQ15, AR_ORDER, ARCoefQ12);
+
+  /* Compute ARCoef' * Corr * ARCoef in Q19. */
+  nrg = 0;
+  for (j = 0; j <= AR_ORDER; j++) {
+    for (n = 0; n <= j; n++) {
+      nrg += (ARCoefQ12[j] * ((CorrQ7_norm[j - n] * ARCoefQ12[n] + 256) >> 9) +
+          4) >> 3;
+    }
+    for (n = j + 1; n <= AR_ORDER; n++) {
+      nrg += (ARCoefQ12[j] * ((CorrQ7_norm[n - j] * ARCoefQ12[n] + 256) >> 9) +
+          4) >> 3;
+    }
+  }
+
+  nrg_u32 = (uint32_t)nrg;
+  if (shift_var > 0) {
+    nrg_u32 = nrg_u32 >> shift_var;
+  } else {
+    nrg_u32 = nrg_u32 << (-shift_var);
+  }
+  if (nrg_u32 > 0x7FFFFFFF) {
+    nrg = 0x7FFFFFFF;
+  }  else {
+    nrg = (int32_t)nrg_u32;
+  }
+  /* Also shifts 31 bits to the left! */
+  gain2_Q10 = WebRtcSpl_DivResultInQ31(FRAMESAMPLES_QUARTER, nrg);
+
+  /* Quantize & code gain2_Q10. */
+  if (WebRtcIsac_EncodeGain2(&gain2_Q10, streamdata)) {
+    return -1;
+  }
+
+  /* Compute inverse AR power spectrum. */
+  FindInvArSpec(ARCoefQ12, gain2_Q10, invARSpec2_Q16);
+  /* Convert to magnitude spectrum, by doing square-roots
+   * (modified from SPLIB). */
+  res = 1 << (WebRtcSpl_GetSizeInBits(invARSpec2_Q16[0]) >> 1);
+  for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+    in_sqrt = invARSpec2_Q16[k];
+    i = 10;
+    /* Negative values make no sense for a real sqrt-function. */
+    if (in_sqrt < 0) {
+      in_sqrt = -in_sqrt;
+    }
+    newRes = (in_sqrt / res + res) >> 1;
+    do {
+      res = newRes;
+      newRes = (in_sqrt / res + res) >> 1;
+    } while (newRes != res && i-- > 0);
+
+    invARSpecQ8[k] = (int16_t)newRes;
+  }
+  /* arithmetic coding of spectrum */
+  err = WebRtcIsac_EncLogisticMulti2(streamdata, dataQ7, invARSpecQ8,
+                                     num_dft_coeff, is_12khz);
+  if (err < 0) {
+    return (err);
+  }
+  return 0;
+}
+
+
+/* step-up */
+void WebRtcIsac_Rc2Poly(double* RC, int N, double* a) {
+  int m, k;
+  double tmp[MAX_AR_MODEL_ORDER];
+
+  a[0] = 1.0;
+  tmp[0] = 1.0;
+  for (m = 1; m <= N; m++) {
+    /* copy */
+    memcpy(&tmp[1], &a[1], (m - 1) * sizeof(double));
+    a[m] = RC[m - 1];
+    for (k = 1; k < m; k++) {
+      a[k] += RC[m - 1] * tmp[m - k];
+    }
+  }
+  return;
+}
+
+/* step-down */
+void WebRtcIsac_Poly2Rc(double* a, int N, double* RC) {
+  int m, k;
+  double tmp[MAX_AR_MODEL_ORDER];
+  double tmp_inv;
+
+  RC[N - 1] = a[N];
+  for (m = N - 1; m > 0; m--) {
+    tmp_inv = 1.0 / (1.0 - RC[m] * RC[m]);
+    for (k = 1; k <= m; k++) {
+      tmp[k] = (a[k] - RC[m] * a[m - k + 1]) * tmp_inv;
+    }
+
+    memcpy(&a[1], &tmp[1], (m - 1) * sizeof(double));
+    RC[m - 1] = tmp[m];
+  }
+  return;
+}
+
+
+#define MAX_ORDER 100
+
+/* Matlab's LAR definition */
+void WebRtcIsac_Rc2Lar(const double* refc, double* lar, int order) {
+  int k;
+  for (k = 0; k < order; k++) {
+    lar[k] = log((1 + refc[k]) / (1 - refc[k]));
+  }
+}
+
+
+void WebRtcIsac_Lar2Rc(const double* lar, double* refc,  int order) {
+  int k;
+  double tmp;
+
+  for (k = 0; k < order; k++) {
+    tmp = exp(lar[k]);
+    refc[k] = (tmp - 1) / (tmp + 1);
+  }
+}
+
+void WebRtcIsac_Poly2Lar(double* lowband, int orderLo, double* hiband,
+                         int orderHi, int Nsub, double* lars) {
+  int k;
+  double rc[MAX_ORDER], *inpl, *inph, *outp;
+
+  inpl = lowband;
+  inph = hiband;
+  outp = lars;
+  for (k = 0; k < Nsub; k++) {
+    /* gains */
+    outp[0] = inpl[0];
+    outp[1] = inph[0];
+    outp += 2;
+
+    /* Low band */
+    inpl[0] = 1.0;
+    WebRtcIsac_Poly2Rc(inpl, orderLo, rc);
+    WebRtcIsac_Rc2Lar(rc, outp, orderLo);
+    outp += orderLo;
+
+    /* High band */
+    inph[0] = 1.0;
+    WebRtcIsac_Poly2Rc(inph, orderHi, rc);
+    WebRtcIsac_Rc2Lar(rc, outp, orderHi);
+    outp += orderHi;
+
+    inpl += orderLo + 1;
+    inph += orderHi + 1;
+  }
+}
+
+
+int16_t WebRtcIsac_Poly2LarUB(double* lpcVecs, int16_t bandwidth) {
+  double      poly[MAX_ORDER];
+  double      rc[MAX_ORDER];
+  double*     ptrIO;
+  int16_t vecCntr;
+  int16_t vecSize;
+  int16_t numVec;
+
+  vecSize = UB_LPC_ORDER;
+  switch (bandwidth) {
+    case isac12kHz: {
+      numVec  = UB_LPC_VEC_PER_FRAME;
+      break;
+    }
+    case isac16kHz: {
+      numVec  = UB16_LPC_VEC_PER_FRAME;
+      break;
+    }
+    default:
+      return -1;
+  }
+
+  ptrIO = lpcVecs;
+  poly[0] = 1.0;
+  for (vecCntr = 0; vecCntr < numVec; vecCntr++) {
+    memcpy(&poly[1], ptrIO, sizeof(double) * vecSize);
+    WebRtcIsac_Poly2Rc(poly, vecSize, rc);
+    WebRtcIsac_Rc2Lar(rc, ptrIO, vecSize);
+    ptrIO += vecSize;
+  }
+  return 0;
+}
+
+
+void WebRtcIsac_Lar2Poly(double* lars, double* lowband, int orderLo,
+                         double* hiband, int orderHi, int Nsub) {
+  int k, orderTot;
+  double rc[MAX_ORDER], *outpl, *outph, *inp;
+
+  orderTot = (orderLo + orderHi + 2);
+  outpl = lowband;
+  outph = hiband;
+  /* First two elements of 'inp' store gains*/
+  inp = lars;
+  for (k = 0; k < Nsub; k++) {
+    /* Low band */
+    WebRtcIsac_Lar2Rc(&inp[2], rc, orderLo);
+    WebRtcIsac_Rc2Poly(rc, orderLo, outpl);
+
+    /* High band */
+    WebRtcIsac_Lar2Rc(&inp[orderLo + 2], rc, orderHi);
+    WebRtcIsac_Rc2Poly(rc, orderHi, outph);
+
+    /* gains */
+    outpl[0] = inp[0];
+    outph[0] = inp[1];
+
+    outpl += orderLo + 1;
+    outph += orderHi + 1;
+    inp += orderTot;
+  }
+}
+
+/*
+ *  assumes 2 LAR vectors interpolates to 'numPolyVec' A-polynomials
+ *  Note: 'numPolyVecs' includes the first and the last point of the interval
+ */
+void WebRtcIsac_Lar2PolyInterpolUB(double* larVecs, double* percepFilterParams,
+                                   int numPolyVecs) {
+  int polyCntr, coeffCntr;
+  double larInterpol[UB_LPC_ORDER];
+  double rc[UB_LPC_ORDER];
+  double delta[UB_LPC_ORDER];
+
+  /* calculate the step-size for linear interpolation coefficients */
+  for (coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++) {
+    delta[coeffCntr] = (larVecs[UB_LPC_ORDER + coeffCntr] -
+        larVecs[coeffCntr]) / (numPolyVecs - 1);
+  }
+
+  for (polyCntr = 0; polyCntr < numPolyVecs; polyCntr++) {
+    for (coeffCntr = 0; coeffCntr < UB_LPC_ORDER; coeffCntr++) {
+      larInterpol[coeffCntr] = larVecs[coeffCntr] +
+          delta[coeffCntr] * polyCntr;
+    }
+    WebRtcIsac_Lar2Rc(larInterpol, rc, UB_LPC_ORDER);
+
+    /* convert to A-polynomial, the following function returns A[0] = 1;
+     * which is written where gains had to be written. Then we write the
+     * gain (outside this function). This way we say a memcpy. */
+    WebRtcIsac_Rc2Poly(rc, UB_LPC_ORDER, percepFilterParams);
+    percepFilterParams += (UB_LPC_ORDER + 1);
+  }
+}
+
+int WebRtcIsac_DecodeLpc(Bitstr* streamdata, double* LPCCoef_lo,
+                         double* LPCCoef_hi) {
+  double lars[KLT_ORDER_GAIN + KLT_ORDER_SHAPE];
+  int err;
+
+  err = WebRtcIsac_DecodeLpcCoef(streamdata, lars);
+  if (err < 0) {
+    return -ISAC_RANGE_ERROR_DECODE_LPC;
+  }
+  WebRtcIsac_Lar2Poly(lars, LPCCoef_lo, ORDERLO, LPCCoef_hi, ORDERHI,
+                      SUBFRAMES);
+  return 0;
+}
+
+int16_t WebRtcIsac_DecodeInterpolLpcUb(Bitstr* streamdata,
+                                       double* percepFilterParams,
+                                       int16_t bandwidth) {
+  double lpcCoeff[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+  int err;
+  int interpolCntr;
+  int subframeCntr;
+  int16_t numSegments;
+  int16_t numVecPerSegment;
+  int16_t numGains;
+
+  double percepFilterGains[SUBFRAMES << 1];
+  double* ptrOutParam = percepFilterParams;
+
+  err = WebRtcIsac_DecodeLpcCoefUB(streamdata, lpcCoeff, percepFilterGains,
+                                   bandwidth);
+  if (err < 0) {
+    return -ISAC_RANGE_ERROR_DECODE_LPC;
+  }
+
+  switch (bandwidth) {
+    case isac12kHz: {
+      numGains = SUBFRAMES;
+      numSegments = UB_LPC_VEC_PER_FRAME - 1;
+      numVecPerSegment = kLpcVecPerSegmentUb12;
+      break;
+    }
+    case isac16kHz: {
+      numGains = SUBFRAMES << 1;
+      numSegments = UB16_LPC_VEC_PER_FRAME - 1;
+      numVecPerSegment = kLpcVecPerSegmentUb16;
+      break;
+    }
+    default:
+      return -1;
+  }
+
+  for (interpolCntr = 0; interpolCntr < numSegments; interpolCntr++) {
+    WebRtcIsac_Lar2PolyInterpolUB(&lpcCoeff[interpolCntr * UB_LPC_ORDER],
+                                  ptrOutParam, numVecPerSegment + 1);
+    ptrOutParam += (numVecPerSegment * (UB_LPC_ORDER + 1));
+  }
+
+  ptrOutParam = percepFilterParams;
+
+  if (bandwidth == isac16kHz) {
+    ptrOutParam += (1 + UB_LPC_ORDER);
+  }
+
+  for (subframeCntr = 0; subframeCntr < numGains; subframeCntr++) {
+    *ptrOutParam = percepFilterGains[subframeCntr];
+    ptrOutParam += (1 + UB_LPC_ORDER);
+  }
+  return 0;
+}
+
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsac_DecodeLpcCoef(Bitstr* streamdata, double* LPCCoef) {
+  int j, k, n, pos, pos2, posg, poss, offsg, offss, offs2;
+  int index_g[KLT_ORDER_GAIN], index_s[KLT_ORDER_SHAPE];
+  double tmpcoeffs_g[KLT_ORDER_GAIN], tmpcoeffs_s[KLT_ORDER_SHAPE];
+  double tmpcoeffs2_g[KLT_ORDER_GAIN], tmpcoeffs2_s[KLT_ORDER_SHAPE];
+  double sum;
+  int err;
+  int model = 1;
+
+  /* entropy decoding of model number */
+  /* We are keeping this for backward compatibility of bit-streams. */
+  err = WebRtcIsac_DecHistOneStepMulti(&model, streamdata,
+                                       WebRtcIsac_kQKltModelCdfPtr,
+                                       WebRtcIsac_kQKltModelInitIndex, 1);
+  if (err < 0) {
+    return err;
+  }
+  /* Only accepted value of model is 0. It is kept in bit-stream for backward
+   * compatibility. */
+  if (model != 0) {
+    return -ISAC_DISALLOWED_LPC_MODEL;
+  }
+
+  /* entropy decoding of quantization indices */
+  err = WebRtcIsac_DecHistOneStepMulti(
+      index_s, streamdata, WebRtcIsac_kQKltCdfPtrShape,
+      WebRtcIsac_kQKltInitIndexShape, KLT_ORDER_SHAPE);
+  if (err < 0) {
+    return err;
+  }
+  err = WebRtcIsac_DecHistOneStepMulti(
+      index_g, streamdata, WebRtcIsac_kQKltCdfPtrGain,
+      WebRtcIsac_kQKltInitIndexGain, KLT_ORDER_GAIN);
+  if (err < 0) {
+    return err;
+  }
+
+  /* find quantization levels for coefficients */
+  for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+    tmpcoeffs_s[k] =
+        WebRtcIsac_kQKltLevelsShape[WebRtcIsac_kQKltOffsetShape[k] +
+                                    index_s[k]];
+  }
+  for (k = 0; k < KLT_ORDER_GAIN; k++) {
+    tmpcoeffs_g[k] = WebRtcIsac_kQKltLevelsGain[WebRtcIsac_kQKltOffsetGain[k] +
+                                                index_g[k]];
+  }
+
+  /* Inverse KLT  */
+
+  /* Left transform, transpose matrix!  */
+  offsg = 0;
+  offss = 0;
+  posg = 0;
+  poss = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    offs2 = 0;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = offsg;
+      pos2 = offs2;
+      for (n = 0; n < LPC_GAIN_ORDER; n++) {
+        sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2++];
+      }
+      tmpcoeffs2_g[posg++] = sum;
+      offs2 += LPC_GAIN_ORDER;
+    }
+    offs2 = 0;
+    for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+      sum = 0;
+      pos = offss;
+      pos2 = offs2;
+      for (n = 0; n < LPC_SHAPE_ORDER; n++) {
+        sum += tmpcoeffs_s[pos++] * WebRtcIsac_kKltT1Shape[pos2++];
+      }
+      tmpcoeffs2_s[poss++] = sum;
+      offs2 += LPC_SHAPE_ORDER;
+    }
+    offsg += LPC_GAIN_ORDER;
+    offss += LPC_SHAPE_ORDER;
+  }
+
+  /* Right transform, transpose matrix */
+  offsg = 0;
+  offss = 0;
+  posg = 0;
+  poss = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    posg = offsg;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = j;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2];
+        pos += LPC_GAIN_ORDER;
+        pos2 += SUBFRAMES;
+
+      }
+      tmpcoeffs_g[posg++] = sum;
+    }
+    poss = offss;
+    for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = j;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_s[pos] * WebRtcIsac_kKltT2Shape[pos2];
+        pos += LPC_SHAPE_ORDER;
+        pos2 += SUBFRAMES;
+      }
+      tmpcoeffs_s[poss++] = sum;
+    }
+    offsg += LPC_GAIN_ORDER;
+    offss += LPC_SHAPE_ORDER;
+  }
+
+  /* scaling, mean addition, and gain restoration */
+  posg = 0;
+  poss = 0;
+  pos = 0;
+  for (k = 0; k < SUBFRAMES; k++) {
+    /* log gains */
+    LPCCoef[pos] = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+    LPCCoef[pos] += WebRtcIsac_kLpcMeansGain[posg];
+    LPCCoef[pos] = exp(LPCCoef[pos]);
+    pos++;
+    posg++;
+    LPCCoef[pos] = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+    LPCCoef[pos] += WebRtcIsac_kLpcMeansGain[posg];
+    LPCCoef[pos] = exp(LPCCoef[pos]);
+    pos++;
+    posg++;
+
+    /* Low-band LAR coefficients. */
+    for (n = 0; n < LPC_LOBAND_ORDER; n++, pos++, poss++) {
+      LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_LOBAND_SCALE;
+      LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+    }
+
+    /* High-band LAR coefficients. */
+    for (n = 0; n < LPC_HIBAND_ORDER; n++, pos++, poss++) {
+      LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_HIBAND_SCALE;
+      LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+    }
+  }
+  return 0;
+}
+
+/* Encode LPC in LAR domain. */
+void WebRtcIsac_EncodeLar(double* LPCCoef, Bitstr* streamdata,
+                          IsacSaveEncoderData* encData) {
+  int j, k, n, pos, pos2, poss, offss, offs2;
+  int index_s[KLT_ORDER_SHAPE];
+  int index_ovr_s[KLT_ORDER_SHAPE];
+  double tmpcoeffs_s[KLT_ORDER_SHAPE];
+  double tmpcoeffs2_s[KLT_ORDER_SHAPE];
+  double sum;
+  const int kModel = 0;
+
+  /* Mean removal and scaling. */
+  poss = 0;
+  pos = 0;
+  for (k = 0; k < SUBFRAMES; k++) {
+    /* First two element are gains, move over them. */
+    pos += 2;
+
+    /* Low-band LAR coefficients. */
+    for (n = 0; n < LPC_LOBAND_ORDER; n++, poss++, pos++) {
+      tmpcoeffs_s[poss] = LPCCoef[pos] - WebRtcIsac_kLpcMeansShape[poss];
+      tmpcoeffs_s[poss] *= LPC_LOBAND_SCALE;
+    }
+
+    /* High-band LAR coefficients. */
+    for (n = 0; n < LPC_HIBAND_ORDER; n++, poss++, pos++) {
+      tmpcoeffs_s[poss] = LPCCoef[pos] - WebRtcIsac_kLpcMeansShape[poss];
+      tmpcoeffs_s[poss] *= LPC_HIBAND_SCALE;
+    }
+  }
+
+  /* KLT  */
+
+  /* Left transform. */
+  offss = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    poss = offss;
+    for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+      sum = 0;
+      pos = offss;
+      pos2 = k;
+      for (n = 0; n < LPC_SHAPE_ORDER; n++) {
+        sum += tmpcoeffs_s[pos++] * WebRtcIsac_kKltT1Shape[pos2];
+        pos2 += LPC_SHAPE_ORDER;
+      }
+      tmpcoeffs2_s[poss++] = sum;
+    }
+    offss += LPC_SHAPE_ORDER;
+  }
+
+  /* Right transform. */
+  offss = 0;
+  offs2 = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    poss = offss;
+    for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = offs2;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_s[pos] * WebRtcIsac_kKltT2Shape[pos2++];
+        pos += LPC_SHAPE_ORDER;
+      }
+      tmpcoeffs_s[poss++] = sum;
+    }
+    offs2 += SUBFRAMES;
+    offss += LPC_SHAPE_ORDER;
+  }
+
+  /* Quantize coefficients. */
+  for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+    index_s[k] = (WebRtcIsac_lrint(tmpcoeffs_s[k] / KLT_STEPSIZE)) +
+        WebRtcIsac_kQKltQuantMinShape[k];
+    if (index_s[k] < 0) {
+      index_s[k] = 0;
+    } else if (index_s[k] > WebRtcIsac_kQKltMaxIndShape[k]) {
+      index_s[k] = WebRtcIsac_kQKltMaxIndShape[k];
+    }
+    index_ovr_s[k] = WebRtcIsac_kQKltOffsetShape[k] + index_s[k];
+  }
+
+
+  /* Only one model remains in this version of the code, kModel = 0. We
+   * are keeping for bit-streams to be backward compatible. */
+  /* entropy coding of model number */
+  WebRtcIsac_EncHistMulti(streamdata, &kModel, WebRtcIsac_kQKltModelCdfPtr, 1);
+
+  /* Save data for creation of multiple bit streams */
+  /* Entropy coding of quantization indices - shape only. */
+  WebRtcIsac_EncHistMulti(streamdata, index_s, WebRtcIsac_kQKltCdfPtrShape,
+                          KLT_ORDER_SHAPE);
+
+  /* Save data for creation of multiple bit streams. */
+  for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+    encData->LPCindex_s[KLT_ORDER_SHAPE * encData->startIdx + k] = index_s[k];
+  }
+
+  /* Find quantization levels for shape coefficients. */
+  for (k = 0; k < KLT_ORDER_SHAPE; k++) {
+    tmpcoeffs_s[k] = WebRtcIsac_kQKltLevelsShape[index_ovr_s[k]];
+  }
+  /* Inverse KLT.  */
+  /* Left transform, transpose matrix.! */
+  offss = 0;
+  poss = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    offs2 = 0;
+    for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+      sum = 0;
+      pos = offss;
+      pos2 = offs2;
+      for (n = 0; n < LPC_SHAPE_ORDER; n++) {
+        sum += tmpcoeffs_s[pos++] * WebRtcIsac_kKltT1Shape[pos2++];
+      }
+      tmpcoeffs2_s[poss++] = sum;
+      offs2 += LPC_SHAPE_ORDER;
+    }
+    offss += LPC_SHAPE_ORDER;
+  }
+
+  /* Right transform, Transpose matrix */
+  offss = 0;
+  poss = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    poss = offss;
+    for (k = 0; k < LPC_SHAPE_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = j;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_s[pos] * WebRtcIsac_kKltT2Shape[pos2];
+        pos += LPC_SHAPE_ORDER;
+        pos2 += SUBFRAMES;
+      }
+      tmpcoeffs_s[poss++] = sum;
+    }
+    offss += LPC_SHAPE_ORDER;
+  }
+
+  /* Scaling, mean addition, and gain restoration. */
+  poss = 0;
+  pos = 0;
+  for (k = 0; k < SUBFRAMES; k++) {
+    /* Ignore gains. */
+    pos += 2;
+
+    /* Low band LAR coefficients. */
+    for (n = 0; n < LPC_LOBAND_ORDER; n++, pos++, poss++) {
+      LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_LOBAND_SCALE;
+      LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+    }
+
+    /* High band LAR coefficients. */
+    for (n = 0; n < LPC_HIBAND_ORDER; n++, pos++, poss++) {
+      LPCCoef[pos] = tmpcoeffs_s[poss] / LPC_HIBAND_SCALE;
+      LPCCoef[pos] += WebRtcIsac_kLpcMeansShape[poss];
+    }
+  }
+}
+
+
+void WebRtcIsac_EncodeLpcLb(double* LPCCoef_lo, double* LPCCoef_hi,
+                            Bitstr* streamdata, IsacSaveEncoderData* encData) {
+  double lars[KLT_ORDER_GAIN + KLT_ORDER_SHAPE];
+  int k;
+
+  WebRtcIsac_Poly2Lar(LPCCoef_lo, ORDERLO, LPCCoef_hi, ORDERHI, SUBFRAMES,
+                      lars);
+  WebRtcIsac_EncodeLar(lars, streamdata, encData);
+  WebRtcIsac_Lar2Poly(lars, LPCCoef_lo, ORDERLO, LPCCoef_hi, ORDERHI,
+                      SUBFRAMES);
+  /* Save data for creation of multiple bit streams (and transcoding). */
+  for (k = 0; k < (ORDERLO + 1)*SUBFRAMES; k++) {
+    encData->LPCcoeffs_lo[(ORDERLO + 1)*SUBFRAMES * encData->startIdx + k] =
+        LPCCoef_lo[k];
+  }
+  for (k = 0; k < (ORDERHI + 1)*SUBFRAMES; k++) {
+    encData->LPCcoeffs_hi[(ORDERHI + 1)*SUBFRAMES * encData->startIdx + k] =
+        LPCCoef_hi[k];
+  }
+}
+
+
+int16_t WebRtcIsac_EncodeLpcUB(double* lpcVecs, Bitstr* streamdata,
+                               double* interpolLPCCoeff,
+                               int16_t bandwidth,
+                                     ISACUBSaveEncDataStruct* encData) {
+  double    U[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+  int     idx[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+  int interpolCntr;
+
+  WebRtcIsac_Poly2LarUB(lpcVecs, bandwidth);
+  WebRtcIsac_RemoveLarMean(lpcVecs, bandwidth);
+  WebRtcIsac_DecorrelateIntraVec(lpcVecs, U, bandwidth);
+  WebRtcIsac_DecorrelateInterVec(U, lpcVecs, bandwidth);
+  WebRtcIsac_QuantizeUncorrLar(lpcVecs, idx, bandwidth);
+
+  WebRtcIsac_CorrelateInterVec(lpcVecs, U, bandwidth);
+  WebRtcIsac_CorrelateIntraVec(U, lpcVecs, bandwidth);
+  WebRtcIsac_AddLarMean(lpcVecs, bandwidth);
+
+  switch (bandwidth) {
+    case isac12kHz: {
+      /* Store the indices to be used for multiple encoding. */
+      memcpy(encData->indexLPCShape, idx, UB_LPC_ORDER *
+             UB_LPC_VEC_PER_FRAME * sizeof(int));
+      WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcShapeCdfMatUb12,
+                              UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME);
+      for (interpolCntr = 0; interpolCntr < UB_INTERPOL_SEGMENTS;
+          interpolCntr++) {
+        WebRtcIsac_Lar2PolyInterpolUB(lpcVecs, interpolLPCCoeff,
+                                      kLpcVecPerSegmentUb12 + 1);
+        lpcVecs += UB_LPC_ORDER;
+        interpolLPCCoeff += (kLpcVecPerSegmentUb12 * (UB_LPC_ORDER + 1));
+      }
+      break;
+    }
+    case isac16kHz: {
+      /* Store the indices to be used for multiple encoding. */
+      memcpy(encData->indexLPCShape, idx, UB_LPC_ORDER *
+             UB16_LPC_VEC_PER_FRAME * sizeof(int));
+      WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcShapeCdfMatUb16,
+                              UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME);
+      for (interpolCntr = 0; interpolCntr < UB16_INTERPOL_SEGMENTS;
+          interpolCntr++) {
+        WebRtcIsac_Lar2PolyInterpolUB(lpcVecs, interpolLPCCoeff,
+                                      kLpcVecPerSegmentUb16 + 1);
+        lpcVecs += UB_LPC_ORDER;
+        interpolLPCCoeff += (kLpcVecPerSegmentUb16 * (UB_LPC_ORDER + 1));
+      }
+      break;
+    }
+    default:
+      return -1;
+  }
+  return 0;
+}
+
+void WebRtcIsac_EncodeLpcGainLb(double* LPCCoef_lo, double* LPCCoef_hi,
+                                Bitstr* streamdata,
+                                IsacSaveEncoderData* encData) {
+  int j, k, n, pos, pos2, posg, offsg, offs2;
+  int index_g[KLT_ORDER_GAIN];
+  int index_ovr_g[KLT_ORDER_GAIN];
+  double tmpcoeffs_g[KLT_ORDER_GAIN];
+  double tmpcoeffs2_g[KLT_ORDER_GAIN];
+  double sum;
+  /* log gains, mean removal and scaling */
+  posg = 0;
+  for (k = 0; k < SUBFRAMES; k++) {
+    tmpcoeffs_g[posg] = log(LPCCoef_lo[(LPC_LOBAND_ORDER + 1) * k]);
+    tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+    tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+    posg++;
+    tmpcoeffs_g[posg] = log(LPCCoef_hi[(LPC_HIBAND_ORDER + 1) * k]);
+    tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+    tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+    posg++;
+  }
+
+  /* KLT  */
+
+  /* Left transform. */
+  offsg = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    posg = offsg;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = offsg;
+      pos2 = k;
+      for (n = 0; n < LPC_GAIN_ORDER; n++) {
+        sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2];
+        pos2 += LPC_GAIN_ORDER;
+      }
+      tmpcoeffs2_g[posg++] = sum;
+    }
+    offsg += LPC_GAIN_ORDER;
+  }
+
+  /* Right transform. */
+  offsg = 0;
+  offs2 = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    posg = offsg;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = offs2;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2++];
+        pos += LPC_GAIN_ORDER;
+      }
+      tmpcoeffs_g[posg++] = sum;
+    }
+    offs2 += SUBFRAMES;
+    offsg += LPC_GAIN_ORDER;
+  }
+
+  /* Quantize coefficients. */
+  for (k = 0; k < KLT_ORDER_GAIN; k++) {
+    /* Get index. */
+    pos2 = WebRtcIsac_lrint(tmpcoeffs_g[k] / KLT_STEPSIZE);
+    index_g[k] = (pos2) + WebRtcIsac_kQKltQuantMinGain[k];
+    if (index_g[k] < 0) {
+      index_g[k] = 0;
+    } else if (index_g[k] > WebRtcIsac_kQKltMaxIndGain[k]) {
+      index_g[k] = WebRtcIsac_kQKltMaxIndGain[k];
+    }
+    index_ovr_g[k] = WebRtcIsac_kQKltOffsetGain[k] + index_g[k];
+
+    /* Find quantization levels for coefficients. */
+    tmpcoeffs_g[k] = WebRtcIsac_kQKltLevelsGain[index_ovr_g[k]];
+
+    /* Save data for creation of multiple bit streams. */
+    encData->LPCindex_g[KLT_ORDER_GAIN * encData->startIdx + k] = index_g[k];
+  }
+
+  /* Entropy coding of quantization indices - gain. */
+  WebRtcIsac_EncHistMulti(streamdata, index_g, WebRtcIsac_kQKltCdfPtrGain,
+                          KLT_ORDER_GAIN);
+
+  /* Find quantization levels for coefficients. */
+  /* Left transform. */
+  offsg = 0;
+  posg = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    offs2 = 0;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = offsg;
+      pos2 = offs2;
+      for (n = 0; n < LPC_GAIN_ORDER; n++)
+        sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2++];
+      tmpcoeffs2_g[posg++] = sum;
+      offs2 += LPC_GAIN_ORDER;
+    }
+    offsg += LPC_GAIN_ORDER;
+  }
+
+  /* Right transform, transpose matrix. */
+  offsg = 0;
+  posg = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    posg = offsg;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = j;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2];
+        pos += LPC_GAIN_ORDER;
+        pos2 += SUBFRAMES;
+      }
+      tmpcoeffs_g[posg++] = sum;
+    }
+    offsg += LPC_GAIN_ORDER;
+  }
+
+
+  /* Scaling, mean addition, and gain restoration. */
+  posg = 0;
+  for (k = 0; k < SUBFRAMES; k++) {
+    sum = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+    sum += WebRtcIsac_kLpcMeansGain[posg];
+    LPCCoef_lo[k * (LPC_LOBAND_ORDER + 1)] = exp(sum);
+    pos++;
+    posg++;
+    sum = tmpcoeffs_g[posg] / LPC_GAIN_SCALE;
+    sum += WebRtcIsac_kLpcMeansGain[posg];
+    LPCCoef_hi[k * (LPC_HIBAND_ORDER + 1)] = exp(sum);
+    pos++;
+    posg++;
+  }
+
+}
+
+void WebRtcIsac_EncodeLpcGainUb(double* lpGains, Bitstr* streamdata,
+                                int* lpcGainIndex) {
+  double U[UB_LPC_GAIN_DIM];
+  int idx[UB_LPC_GAIN_DIM];
+  WebRtcIsac_ToLogDomainRemoveMean(lpGains);
+  WebRtcIsac_DecorrelateLPGain(lpGains, U);
+  WebRtcIsac_QuantizeLpcGain(U, idx);
+  /* Store the index for re-encoding for FEC. */
+  memcpy(lpcGainIndex, idx, UB_LPC_GAIN_DIM * sizeof(int));
+  WebRtcIsac_CorrelateLpcGain(U, lpGains);
+  WebRtcIsac_AddMeanToLinearDomain(lpGains);
+  WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcGainCdfMat,
+                          UB_LPC_GAIN_DIM);
+}
+
+
+void WebRtcIsac_StoreLpcGainUb(double* lpGains, Bitstr* streamdata) {
+  double U[UB_LPC_GAIN_DIM];
+  int idx[UB_LPC_GAIN_DIM];
+  WebRtcIsac_ToLogDomainRemoveMean(lpGains);
+  WebRtcIsac_DecorrelateLPGain(lpGains, U);
+  WebRtcIsac_QuantizeLpcGain(U, idx);
+  WebRtcIsac_EncHistMulti(streamdata, idx, WebRtcIsac_kLpcGainCdfMat,
+                          UB_LPC_GAIN_DIM);
+}
+
+
+
+int16_t WebRtcIsac_DecodeLpcGainUb(double* lpGains, Bitstr* streamdata) {
+  double U[UB_LPC_GAIN_DIM];
+  int idx[UB_LPC_GAIN_DIM];
+  int err;
+  err = WebRtcIsac_DecHistOneStepMulti(idx, streamdata,
+                                       WebRtcIsac_kLpcGainCdfMat,
+                                       WebRtcIsac_kLpcGainEntropySearch,
+                                       UB_LPC_GAIN_DIM);
+  if (err < 0) {
+    return -1;
+  }
+  WebRtcIsac_DequantizeLpcGain(idx, U);
+  WebRtcIsac_CorrelateLpcGain(U, lpGains);
+  WebRtcIsac_AddMeanToLinearDomain(lpGains);
+  return 0;
+}
+
+
+
+/* decode & dequantize RC */
+int WebRtcIsac_DecodeRc(Bitstr* streamdata, int16_t* RCQ15) {
+  int k, err;
+  int index[AR_ORDER];
+
+  /* entropy decoding of quantization indices */
+  err = WebRtcIsac_DecHistOneStepMulti(index, streamdata,
+                                       WebRtcIsac_kQArRcCdfPtr,
+                                       WebRtcIsac_kQArRcInitIndex, AR_ORDER);
+  if (err < 0)
+    return err;
+
+  /* find quantization levels for reflection coefficients */
+  for (k = 0; k < AR_ORDER; k++) {
+    RCQ15[k] = *(WebRtcIsac_kQArRcLevelsPtr[k] + index[k]);
+  }
+  return 0;
+}
+
+
+/* quantize & code RC */
+void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata) {
+  int k;
+  int index[AR_ORDER];
+
+  /* quantize reflection coefficients (add noise feedback?) */
+  for (k = 0; k < AR_ORDER; k++) {
+    index[k] = WebRtcIsac_kQArRcInitIndex[k];
+    // The safe-guards in following while conditions are to suppress gcc 4.8.3
+    // warnings, Issue 2888. Otherwise, first and last elements of
+    // |WebRtcIsac_kQArBoundaryLevels| are such that the following search
+    // *never* cause an out-of-boundary read.
+    if (RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k]]) {
+      while (index[k] + 1 < NUM_AR_RC_QUANT_BAUNDARY &&
+        RCQ15[k] > WebRtcIsac_kQArBoundaryLevels[index[k] + 1]) {
+        index[k]++;
+      }
+    } else {
+      while (index[k] > 0 &&
+        RCQ15[k] < WebRtcIsac_kQArBoundaryLevels[--index[k]]) ;
+    }
+    RCQ15[k] = *(WebRtcIsac_kQArRcLevelsPtr[k] + index[k]);
+  }
+
+  /* entropy coding of quantization indices */
+  WebRtcIsac_EncHistMulti(streamdata, index, WebRtcIsac_kQArRcCdfPtr, AR_ORDER);
+}
+
+
+/* decode & dequantize squared Gain */
+int WebRtcIsac_DecodeGain2(Bitstr* streamdata, int32_t* gainQ10) {
+  int index, err;
+
+  /* entropy decoding of quantization index */
+  err = WebRtcIsac_DecHistOneStepMulti(&index, streamdata,
+                                       WebRtcIsac_kQGainCdf_ptr,
+                                       WebRtcIsac_kQGainInitIndex, 1);
+  if (err < 0) {
+    return err;
+  }
+  /* find quantization level */
+  *gainQ10 = WebRtcIsac_kQGain2Levels[index];
+  return 0;
+}
+
+
+/* quantize & code squared Gain */
+int WebRtcIsac_EncodeGain2(int32_t* gainQ10, Bitstr* streamdata) {
+  int index;
+
+  /* find quantization index */
+  index = WebRtcIsac_kQGainInitIndex[0];
+  if (*gainQ10 > WebRtcIsac_kQGain2BoundaryLevels[index]) {
+    while (*gainQ10 > WebRtcIsac_kQGain2BoundaryLevels[index + 1]) {
+      index++;
+    }
+  } else {
+    while (*gainQ10 < WebRtcIsac_kQGain2BoundaryLevels[--index]) ;
+  }
+  /* De-quantize */
+  *gainQ10 = WebRtcIsac_kQGain2Levels[index];
+
+  /* entropy coding of quantization index */
+  WebRtcIsac_EncHistMulti(streamdata, &index, WebRtcIsac_kQGainCdf_ptr, 1);
+  return 0;
+}
+
+
+/* code and decode Pitch Gains and Lags functions */
+
+/* decode & dequantize Pitch Gains */
+int WebRtcIsac_DecodePitchGain(Bitstr* streamdata,
+                               int16_t* PitchGains_Q12) {
+  int index_comb, err;
+  const uint16_t* WebRtcIsac_kQPitchGainCdf_ptr[1];
+
+  /* Entropy decoding of quantization indices */
+  *WebRtcIsac_kQPitchGainCdf_ptr = WebRtcIsac_kQPitchGainCdf;
+  err = WebRtcIsac_DecHistBisectMulti(&index_comb, streamdata,
+                                      WebRtcIsac_kQPitchGainCdf_ptr,
+                                      WebRtcIsac_kQCdfTableSizeGain, 1);
+  /* Error check, Q_mean_Gain.. tables are of size 144 */
+  if ((err < 0) || (index_comb < 0) || (index_comb >= 144)) {
+    return -ISAC_RANGE_ERROR_DECODE_PITCH_GAIN;
+  }
+  /* De-quantize back to pitch gains by table look-up. */
+  PitchGains_Q12[0] = WebRtcIsac_kQMeanGain1Q12[index_comb];
+  PitchGains_Q12[1] = WebRtcIsac_kQMeanGain2Q12[index_comb];
+  PitchGains_Q12[2] = WebRtcIsac_kQMeanGain3Q12[index_comb];
+  PitchGains_Q12[3] = WebRtcIsac_kQMeanGain4Q12[index_comb];
+  return 0;
+}
+
+
+/* Quantize & code Pitch Gains. */
+void WebRtcIsac_EncodePitchGain(int16_t* PitchGains_Q12,
+                                Bitstr* streamdata,
+                                IsacSaveEncoderData* encData) {
+  int k, j;
+  double C;
+  double S[PITCH_SUBFRAMES];
+  int index[3];
+  int index_comb;
+  const uint16_t* WebRtcIsac_kQPitchGainCdf_ptr[1];
+  double PitchGains[PITCH_SUBFRAMES] = {0, 0, 0, 0};
+
+  /* Take the asin. */
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchGains[k] = ((float)PitchGains_Q12[k]) / 4096;
+    S[k] = asin(PitchGains[k]);
+  }
+
+  /* Find quantization index; only for the first three
+   * transform coefficients. */
+  for (k = 0; k < 3; k++) {
+    /*  transform */
+    C = 0.0;
+    for (j = 0; j < PITCH_SUBFRAMES; j++) {
+      C += WebRtcIsac_kTransform[k][j] * S[j];
+    }
+    /* Quantize */
+    index[k] = WebRtcIsac_lrint(C / PITCH_GAIN_STEPSIZE);
+
+    /* Check that the index is not outside the boundaries of the table. */
+    if (index[k] < WebRtcIsac_kIndexLowerLimitGain[k]) {
+      index[k] = WebRtcIsac_kIndexLowerLimitGain[k];
+    } else if (index[k] > WebRtcIsac_kIndexUpperLimitGain[k]) {
+      index[k] = WebRtcIsac_kIndexUpperLimitGain[k];
+    }
+    index[k] -= WebRtcIsac_kIndexLowerLimitGain[k];
+  }
+
+  /* Calculate unique overall index. */
+  index_comb = WebRtcIsac_kIndexMultsGain[0] * index[0] +
+      WebRtcIsac_kIndexMultsGain[1] * index[1] + index[2];
+
+  /* unquantize back to pitch gains by table look-up */
+  PitchGains_Q12[0] = WebRtcIsac_kQMeanGain1Q12[index_comb];
+  PitchGains_Q12[1] = WebRtcIsac_kQMeanGain2Q12[index_comb];
+  PitchGains_Q12[2] = WebRtcIsac_kQMeanGain3Q12[index_comb];
+  PitchGains_Q12[3] = WebRtcIsac_kQMeanGain4Q12[index_comb];
+
+  /* entropy coding of quantization pitch gains */
+  *WebRtcIsac_kQPitchGainCdf_ptr = WebRtcIsac_kQPitchGainCdf;
+  WebRtcIsac_EncHistMulti(streamdata, &index_comb,
+                          WebRtcIsac_kQPitchGainCdf_ptr, 1);
+  encData->pitchGain_index[encData->startIdx] = index_comb;
+}
+
+
+
+/* Pitch LAG */
+/* Decode & de-quantize Pitch Lags. */
+int WebRtcIsac_DecodePitchLag(Bitstr* streamdata, int16_t* PitchGain_Q12,
+                              double* PitchLags) {
+  int k, err;
+  double StepSize;
+  double C;
+  int index[PITCH_SUBFRAMES];
+  double mean_gain;
+  const double* mean_val2, *mean_val3, *mean_val4;
+  const int16_t* lower_limit;
+  const uint16_t* init_index;
+  const uint16_t* cdf_size;
+  const uint16_t** cdf;
+  double PitchGain[4] = {0, 0, 0, 0};
+
+  /* compute mean pitch gain */
+  mean_gain = 0.0;
+  for (k = 0; k < 4; k++) {
+    PitchGain[k] = ((float)PitchGain_Q12[k]) / 4096;
+    mean_gain += PitchGain[k];
+  }
+  mean_gain /= 4.0;
+
+  /* voicing classification. */
+  if (mean_gain < 0.2) {
+    StepSize = WebRtcIsac_kQPitchLagStepsizeLo;
+    cdf = WebRtcIsac_kQPitchLagCdfPtrLo;
+    cdf_size = WebRtcIsac_kQPitchLagCdfSizeLo;
+    mean_val2 = WebRtcIsac_kQMeanLag2Lo;
+    mean_val3 = WebRtcIsac_kQMeanLag3Lo;
+    mean_val4 = WebRtcIsac_kQMeanLag4Lo;
+    lower_limit = WebRtcIsac_kQIndexLowerLimitLagLo;
+    init_index = WebRtcIsac_kQInitIndexLagLo;
+  } else if (mean_gain < 0.4) {
+    StepSize = WebRtcIsac_kQPitchLagStepsizeMid;
+    cdf = WebRtcIsac_kQPitchLagCdfPtrMid;
+    cdf_size = WebRtcIsac_kQPitchLagCdfSizeMid;
+    mean_val2 = WebRtcIsac_kQMeanLag2Mid;
+    mean_val3 = WebRtcIsac_kQMeanLag3Mid;
+    mean_val4 = WebRtcIsac_kQMeanLag4Mid;
+    lower_limit = WebRtcIsac_kQIndexLowerLimitLagMid;
+    init_index = WebRtcIsac_kQInitIndexLagMid;
+  } else {
+    StepSize = WebRtcIsac_kQPitchLagStepsizeHi;
+    cdf = WebRtcIsac_kQPitchLagCdfPtrHi;
+    cdf_size = WebRtcIsac_kQPitchLagCdfSizeHi;
+    mean_val2 = WebRtcIsac_kQMeanLag2Hi;
+    mean_val3 = WebRtcIsac_kQMeanLag3Hi;
+    mean_val4 = WebRtcIsac_kQMeanLag4Hi;
+    lower_limit = WebRtcIsac_kQindexLowerLimitLagHi;
+    init_index = WebRtcIsac_kQInitIndexLagHi;
+  }
+
+  /* Entropy decoding of quantization indices. */
+  err = WebRtcIsac_DecHistBisectMulti(index, streamdata, cdf, cdf_size, 1);
+  if ((err < 0) || (index[0] < 0)) {
+    return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+  }
+  err = WebRtcIsac_DecHistOneStepMulti(index + 1, streamdata, cdf + 1,
+                                       init_index, 3);
+  if (err < 0) {
+    return -ISAC_RANGE_ERROR_DECODE_PITCH_LAG;
+  }
+
+  /* Unquantize back to transform coefficients and do the inverse transform:
+   * S = T'*C. */
+  C = (index[0] + lower_limit[0]) * StepSize;
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] = WebRtcIsac_kTransformTranspose[k][0] * C;
+  }
+  C = mean_val2[index[1]];
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] += WebRtcIsac_kTransformTranspose[k][1] * C;
+  }
+  C = mean_val3[index[2]];
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] += WebRtcIsac_kTransformTranspose[k][2] * C;
+  }
+  C = mean_val4[index[3]];
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] += WebRtcIsac_kTransformTranspose[k][3] * C;
+  }
+  return 0;
+}
+
+
+
+/* Quantize & code pitch lags. */
+void WebRtcIsac_EncodePitchLag(double* PitchLags, int16_t* PitchGain_Q12,
+                               Bitstr* streamdata,
+                               IsacSaveEncoderData* encData) {
+  int k, j;
+  double StepSize;
+  double C;
+  int index[PITCH_SUBFRAMES];
+  double mean_gain;
+  const double* mean_val2, *mean_val3, *mean_val4;
+  const int16_t* lower_limit, *upper_limit;
+  const uint16_t** cdf;
+  double PitchGain[4] = {0, 0, 0, 0};
+
+  /* compute mean pitch gain */
+  mean_gain = 0.0;
+  for (k = 0; k < 4; k++) {
+    PitchGain[k] = ((float)PitchGain_Q12[k]) / 4096;
+    mean_gain += PitchGain[k];
+  }
+  mean_gain /= 4.0;
+
+  /* Save data for creation of multiple bit streams */
+  encData->meanGain[encData->startIdx] = mean_gain;
+
+  /* Voicing classification. */
+  if (mean_gain < 0.2) {
+    StepSize = WebRtcIsac_kQPitchLagStepsizeLo;
+    cdf = WebRtcIsac_kQPitchLagCdfPtrLo;
+    mean_val2 = WebRtcIsac_kQMeanLag2Lo;
+    mean_val3 = WebRtcIsac_kQMeanLag3Lo;
+    mean_val4 = WebRtcIsac_kQMeanLag4Lo;
+    lower_limit = WebRtcIsac_kQIndexLowerLimitLagLo;
+    upper_limit = WebRtcIsac_kQIndexUpperLimitLagLo;
+  } else if (mean_gain < 0.4) {
+    StepSize = WebRtcIsac_kQPitchLagStepsizeMid;
+    cdf = WebRtcIsac_kQPitchLagCdfPtrMid;
+    mean_val2 = WebRtcIsac_kQMeanLag2Mid;
+    mean_val3 = WebRtcIsac_kQMeanLag3Mid;
+    mean_val4 = WebRtcIsac_kQMeanLag4Mid;
+    lower_limit = WebRtcIsac_kQIndexLowerLimitLagMid;
+    upper_limit = WebRtcIsac_kQIndexUpperLimitLagMid;
+  } else {
+    StepSize = WebRtcIsac_kQPitchLagStepsizeHi;
+    cdf = WebRtcIsac_kQPitchLagCdfPtrHi;
+    mean_val2 = WebRtcIsac_kQMeanLag2Hi;
+    mean_val3 = WebRtcIsac_kQMeanLag3Hi;
+    mean_val4 = WebRtcIsac_kQMeanLag4Hi;
+    lower_limit = WebRtcIsac_kQindexLowerLimitLagHi;
+    upper_limit = WebRtcIsac_kQindexUpperLimitLagHi;
+  }
+
+  /* find quantization index */
+  for (k = 0; k < 4; k++) {
+    /*  transform */
+    C = 0.0;
+    for (j = 0; j < PITCH_SUBFRAMES; j++) {
+      C += WebRtcIsac_kTransform[k][j] * PitchLags[j];
+    }
+    /* quantize */
+    index[k] = WebRtcIsac_lrint(C / StepSize);
+
+    /* check that the index is not outside the boundaries of the table */
+    if (index[k] < lower_limit[k]) {
+      index[k] = lower_limit[k];
+    } else if (index[k] > upper_limit[k]) index[k] = upper_limit[k]; {
+      index[k] -= lower_limit[k];
+    }
+    /* Save data for creation of multiple bit streams */
+    encData->pitchIndex[PITCH_SUBFRAMES * encData->startIdx + k] = index[k];
+  }
+
+  /* Un-quantize back to transform coefficients and do the inverse transform:
+   * S = T'*C */
+  C = (index[0] + lower_limit[0]) * StepSize;
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] = WebRtcIsac_kTransformTranspose[k][0] * C;
+  }
+  C = mean_val2[index[1]];
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] += WebRtcIsac_kTransformTranspose[k][1] * C;
+  }
+  C = mean_val3[index[2]];
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] += WebRtcIsac_kTransformTranspose[k][2] * C;
+  }
+  C = mean_val4[index[3]];
+  for (k = 0; k < PITCH_SUBFRAMES; k++) {
+    PitchLags[k] += WebRtcIsac_kTransformTranspose[k][3] * C;
+  }
+  /* entropy coding of quantization pitch lags */
+  WebRtcIsac_EncHistMulti(streamdata, index, cdf, PITCH_SUBFRAMES);
+}
+
+
+
+/* Routines for in-band signaling of bandwidth estimation */
+/* Histograms based on uniform distribution of indices */
+/* Move global variables later! */
+
+
+/* cdf array for frame length indicator */
+const uint16_t WebRtcIsac_kFrameLengthCdf[4] = {
+    0, 21845, 43690, 65535 };
+
+/* pointer to cdf array for frame length indicator */
+const uint16_t* WebRtcIsac_kFrameLengthCdf_ptr[1] = {
+    WebRtcIsac_kFrameLengthCdf };
+
+/* initial cdf index for decoder of frame length indicator */
+const uint16_t WebRtcIsac_kFrameLengthInitIndex[1] = { 1 };
+
+
+int WebRtcIsac_DecodeFrameLen(Bitstr* streamdata, int16_t* framesamples) {
+  int frame_mode, err;
+  err = 0;
+  /* entropy decoding of frame length [1:30ms,2:60ms] */
+  err = WebRtcIsac_DecHistOneStepMulti(&frame_mode, streamdata,
+                                       WebRtcIsac_kFrameLengthCdf_ptr,
+                                       WebRtcIsac_kFrameLengthInitIndex, 1);
+  if (err < 0)
+    return -ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH;
+
+  switch (frame_mode) {
+    case 1:
+      *framesamples = 480; /* 30ms */
+      break;
+    case 2:
+      *framesamples = 960; /* 60ms */
+      break;
+    default:
+      err = -ISAC_DISALLOWED_FRAME_MODE_DECODER;
+  }
+  return err;
+}
+
+int WebRtcIsac_EncodeFrameLen(int16_t framesamples, Bitstr* streamdata) {
+  int frame_mode, status;
+
+  status = 0;
+  frame_mode = 0;
+  /* entropy coding of frame length [1:480 samples,2:960 samples] */
+  switch (framesamples) {
+    case 480:
+      frame_mode = 1;
+      break;
+    case 960:
+      frame_mode = 2;
+      break;
+    default:
+      status = - ISAC_DISALLOWED_FRAME_MODE_ENCODER;
+  }
+
+  if (status < 0)
+    return status;
+
+  WebRtcIsac_EncHistMulti(streamdata, &frame_mode,
+                          WebRtcIsac_kFrameLengthCdf_ptr, 1);
+  return status;
+}
+
+/* cdf array for estimated bandwidth */
+static const uint16_t kBwCdf[25] = {
+    0, 2731, 5461, 8192, 10923, 13653, 16384, 19114, 21845, 24576, 27306, 30037,
+    32768, 35498, 38229, 40959, 43690, 46421, 49151, 51882, 54613, 57343, 60074,
+    62804, 65535 };
+
+/* pointer to cdf array for estimated bandwidth */
+static const uint16_t* const kBwCdfPtr[1] = { kBwCdf };
+
+/* initial cdf index for decoder of estimated bandwidth*/
+static const uint16_t kBwInitIndex[1] = { 7 };
+
+
+int WebRtcIsac_DecodeSendBW(Bitstr* streamdata, int16_t* BWno) {
+  int BWno32, err;
+
+  /* entropy decoding of sender's BW estimation [0..23] */
+  err = WebRtcIsac_DecHistOneStepMulti(&BWno32, streamdata, kBwCdfPtr,
+                                       kBwInitIndex, 1);
+  if (err < 0) {
+    return -ISAC_RANGE_ERROR_DECODE_BANDWIDTH;
+  }
+  *BWno = (int16_t)BWno32;
+  return err;
+}
+
+void WebRtcIsac_EncodeReceiveBw(int* BWno, Bitstr* streamdata) {
+  /* entropy encoding of receiver's BW estimation [0..23] */
+  WebRtcIsac_EncHistMulti(streamdata, BWno, kBwCdfPtr, 1);
+}
+
+
+/* estimate code length of LPC Coef */
+void WebRtcIsac_TranscodeLPCCoef(double* LPCCoef_lo, double* LPCCoef_hi,
+                                 int* index_g) {
+  int j, k, n, pos, pos2, posg, offsg, offs2;
+  int index_ovr_g[KLT_ORDER_GAIN];
+  double tmpcoeffs_g[KLT_ORDER_GAIN];
+  double tmpcoeffs2_g[KLT_ORDER_GAIN];
+  double sum;
+
+  /* log gains, mean removal and scaling */
+  posg = 0;
+  for (k = 0; k < SUBFRAMES; k++) {
+    tmpcoeffs_g[posg] = log(LPCCoef_lo[(LPC_LOBAND_ORDER + 1) * k]);
+    tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+    tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+    posg++;
+    tmpcoeffs_g[posg] = log(LPCCoef_hi[(LPC_HIBAND_ORDER + 1) * k]);
+    tmpcoeffs_g[posg] -= WebRtcIsac_kLpcMeansGain[posg];
+    tmpcoeffs_g[posg] *= LPC_GAIN_SCALE;
+    posg++;
+  }
+
+  /* KLT  */
+
+  /* Left transform. */
+  offsg = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    posg = offsg;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = offsg;
+      pos2 = k;
+      for (n = 0; n < LPC_GAIN_ORDER; n++) {
+        sum += tmpcoeffs_g[pos++] * WebRtcIsac_kKltT1Gain[pos2];
+        pos2 += LPC_GAIN_ORDER;
+      }
+      tmpcoeffs2_g[posg++] = sum;
+    }
+    offsg += LPC_GAIN_ORDER;
+  }
+
+  /* Right transform. */
+  offsg = 0;
+  offs2 = 0;
+  for (j = 0; j < SUBFRAMES; j++) {
+    posg = offsg;
+    for (k = 0; k < LPC_GAIN_ORDER; k++) {
+      sum = 0;
+      pos = k;
+      pos2 = offs2;
+      for (n = 0; n < SUBFRAMES; n++) {
+        sum += tmpcoeffs2_g[pos] * WebRtcIsac_kKltT2Gain[pos2++];
+        pos += LPC_GAIN_ORDER;
+      }
+      tmpcoeffs_g[posg++] = sum;
+    }
+    offs2 += SUBFRAMES;
+    offsg += LPC_GAIN_ORDER;
+  }
+
+
+  /* quantize coefficients */
+  for (k = 0; k < KLT_ORDER_GAIN; k++) {
+    /* Get index. */
+    pos2 = WebRtcIsac_lrint(tmpcoeffs_g[k] / KLT_STEPSIZE);
+    index_g[k] = (pos2) + WebRtcIsac_kQKltQuantMinGain[k];
+    if (index_g[k] < 0) {
+      index_g[k] = 0;
+    } else if (index_g[k] > WebRtcIsac_kQKltMaxIndGain[k]) {
+      index_g[k] = WebRtcIsac_kQKltMaxIndGain[k];
+    }
+    index_ovr_g[k] = WebRtcIsac_kQKltOffsetGain[k] + index_g[k];
+
+    /* find quantization levels for coefficients */
+    tmpcoeffs_g[k] = WebRtcIsac_kQKltLevelsGain[index_ovr_g[k]];
+  }
+}
+
+
+/* Decode & de-quantize LPC Coefficients. */
+int WebRtcIsac_DecodeLpcCoefUB(Bitstr* streamdata, double* lpcVecs,
+                               double* percepFilterGains,
+                               int16_t bandwidth) {
+  int  index_s[KLT_ORDER_SHAPE];
+
+  double U[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+  int err;
+
+  /* Entropy decoding of quantization indices. */
+  switch (bandwidth) {
+    case isac12kHz: {
+      err = WebRtcIsac_DecHistOneStepMulti(
+          index_s, streamdata, WebRtcIsac_kLpcShapeCdfMatUb12,
+          WebRtcIsac_kLpcShapeEntropySearchUb12, UB_LPC_ORDER *
+          UB_LPC_VEC_PER_FRAME);
+      break;
+    }
+    case isac16kHz: {
+      err = WebRtcIsac_DecHistOneStepMulti(
+          index_s, streamdata, WebRtcIsac_kLpcShapeCdfMatUb16,
+          WebRtcIsac_kLpcShapeEntropySearchUb16, UB_LPC_ORDER *
+          UB16_LPC_VEC_PER_FRAME);
+      break;
+    }
+    default:
+      return -1;
+  }
+
+  if (err < 0) {
+    return err;
+  }
+
+  WebRtcIsac_DequantizeLpcParam(index_s, lpcVecs, bandwidth);
+  WebRtcIsac_CorrelateInterVec(lpcVecs, U, bandwidth);
+  WebRtcIsac_CorrelateIntraVec(U, lpcVecs, bandwidth);
+  WebRtcIsac_AddLarMean(lpcVecs, bandwidth);
+  WebRtcIsac_DecodeLpcGainUb(percepFilterGains, streamdata);
+
+  if (bandwidth == isac16kHz) {
+    /* Decode another set of Gains. */
+    WebRtcIsac_DecodeLpcGainUb(&percepFilterGains[SUBFRAMES], streamdata);
+  }
+  return 0;
+}
+
+int16_t WebRtcIsac_EncodeBandwidth(enum ISACBandwidth bandwidth,
+                                   Bitstr* streamData) {
+  int bandwidthMode;
+  switch (bandwidth) {
+    case isac12kHz: {
+      bandwidthMode = 0;
+      break;
+    }
+    case isac16kHz: {
+      bandwidthMode = 1;
+      break;
+    }
+    default:
+      return -ISAC_DISALLOWED_ENCODER_BANDWIDTH;
+  }
+  WebRtcIsac_EncHistMulti(streamData, &bandwidthMode, kOneBitEqualProbCdf_ptr,
+                          1);
+  return 0;
+}
+
+int16_t WebRtcIsac_DecodeBandwidth(Bitstr* streamData,
+                                   enum ISACBandwidth* bandwidth) {
+  int bandwidthMode;
+  if (WebRtcIsac_DecHistOneStepMulti(&bandwidthMode, streamData,
+                                     kOneBitEqualProbCdf_ptr,
+                                     kOneBitEqualProbInitIndex, 1) < 0) {
+    return -ISAC_RANGE_ERROR_DECODE_BANDWITH;
+  }
+  switch (bandwidthMode) {
+    case 0: {
+      *bandwidth = isac12kHz;
+      break;
+    }
+    case 1: {
+      *bandwidth = isac16kHz;
+      break;
+    }
+    default:
+      return -ISAC_DISALLOWED_BANDWIDTH_MODE_DECODER;
+  }
+  return 0;
+}
+
+int16_t WebRtcIsac_EncodeJitterInfo(int32_t jitterIndex,
+                                    Bitstr* streamData) {
+  /* This is to avoid LINUX warning until we change 'int' to 'Word32'. */
+  int intVar;
+
+  if ((jitterIndex < 0) || (jitterIndex > 1)) {
+    return -1;
+  }
+  intVar = (int)(jitterIndex);
+  /* Use the same CDF table as for bandwidth
+   * both take two values with equal probability.*/
+  WebRtcIsac_EncHistMulti(streamData, &intVar, kOneBitEqualProbCdf_ptr, 1);
+  return 0;
+}
+
+int16_t WebRtcIsac_DecodeJitterInfo(Bitstr* streamData,
+                                    int32_t* jitterInfo) {
+  int intVar;
+  /* Use the same CDF table as for bandwidth
+   * both take two values with equal probability. */
+  if (WebRtcIsac_DecHistOneStepMulti(&intVar, streamData,
+                                     kOneBitEqualProbCdf_ptr,
+                                     kOneBitEqualProbInitIndex, 1) < 0) {
+    return -ISAC_RANGE_ERROR_DECODE_BANDWITH;
+  }
+  *jitterInfo = (int16_t)(intVar);
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/entropy_coding.h b/modules/audio_coding/codecs/isac/main/source/entropy_coding.h
new file mode 100644
index 0000000..7224ad0
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/entropy_coding.h
@@ -0,0 +1,343 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * entropy_coding.h
+ *
+ * This header file declares all of the functions used to arithmetically
+ * encode the iSAC bistream
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+/******************************************************************************
+ * WebRtcIsac_DecodeSpec()
+ * Decode real and imaginary part of the DFT coefficients, given a bit-stream.
+ * The decoded DFT coefficient can be transformed to time domain by
+ * WebRtcIsac_Time2Spec().
+ *
+ * Input:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *  - AvgPitchGain_Q12      : average pitch-gain of the frame. This is only
+ *                            relevant for 0-4 kHz band, and the input value is
+ *                            not used in other bands.
+ *  - band                  : specifies which band's DFT should be decoded.
+ *
+ * Output:
+ *   - *fr                  : pointer to a buffer where the real part of DFT
+ *                            coefficients are written to.
+ *   - *fi                  : pointer to a buffer where the imaginary part
+ *                            of DFT coefficients are written to.
+ *
+ * Return value             : < 0 if an error occures
+ *                              0 if succeeded.
+ */
+int WebRtcIsac_DecodeSpec(Bitstr* streamdata, int16_t AvgPitchGain_Q12,
+                          enum ISACBand band, double* fr, double* fi);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeSpec()
+ * Encode real and imaginary part of the DFT coefficients into the given
+ * bit-stream.
+ *
+ * Input:
+ *  - *fr                   : pointer to a buffer where the real part of DFT
+ *                            coefficients are written to.
+ *  - *fi                   : pointer to a buffer where the imaginary part
+ *                            of DFT coefficients are written to.
+ *  - AvgPitchGain_Q12      : average pitch-gain of the frame. This is only
+ *                            relevant for 0-4 kHz band, and the input value is
+ *                            not used in other bands.
+ *  - band                  : specifies which band's DFT should be decoded.
+ *
+ * Output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Return value             : < 0 if an error occures
+ *                              0 if succeeded.
+ */
+int WebRtcIsac_EncodeSpec(const int16_t* fr, const int16_t* fi,
+                          int16_t AvgPitchGain_Q12, enum ISACBand band,
+                          Bitstr* streamdata);
+
+/* decode & dequantize LPC Coef */
+int WebRtcIsac_DecodeLpcCoef(Bitstr* streamdata, double* LPCCoef);
+int WebRtcIsac_DecodeLpcCoefUB(Bitstr* streamdata, double* lpcVecs,
+                               double* percepFilterGains,
+                               int16_t bandwidth);
+
+int WebRtcIsac_DecodeLpc(Bitstr* streamdata, double* LPCCoef_lo,
+                         double* LPCCoef_hi);
+
+/* quantize & code LPC Coef */
+void WebRtcIsac_EncodeLpcLb(double* LPCCoef_lo, double* LPCCoef_hi,
+                            Bitstr* streamdata, IsacSaveEncoderData* encData);
+
+void WebRtcIsac_EncodeLpcGainLb(double* LPCCoef_lo, double* LPCCoef_hi,
+                                Bitstr* streamdata,
+                                IsacSaveEncoderData* encData);
+
+/******************************************************************************
+ * WebRtcIsac_EncodeLpcUB()
+ * Encode LPC parameters, given as A-polynomial, of upper-band. The encoding
+ * is performed in LAR domain.
+ * For the upper-band, we compute and encode LPC of some sub-frames, LPC of
+ * other sub-frames are computed by linear interpolation, in LAR domain. This
+ * function performs the interpolation and returns the LPC of all sub-frames.
+ *
+ * Inputs:
+ *  - lpcCoef               : a buffer containing A-polynomials of sub-frames
+ *                            (excluding first coefficient that is 1).
+ *  - bandwidth             : specifies if the codec is operating at 0-12 kHz
+ *                            or 0-16 kHz mode.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a structure containing the encoded
+ *                            data and the parameters needed for entropy
+ *                            coding.
+ *
+ * Output:
+ *  - interpolLPCCoeff      : Decoded and interpolated LPC (A-polynomial)
+ *                            of all sub-frames.
+ *                            If LP analysis is of order K, and there are N
+ *                            sub-frames then this is a buffer of size
+ *                            (k + 1) * N, each vector starts with the LPC gain
+ *                            of the corresponding sub-frame. The LPC gains
+ *                            are encoded and inserted after this function is
+ *                            called. The first A-coefficient which is 1 is not
+ *                            included.
+ *
+ * Return value             : 0 if encoding is successful,
+ *                           <0 if failed to encode.
+ */
+int16_t WebRtcIsac_EncodeLpcUB(double* lpcCoeff, Bitstr* streamdata,
+                               double* interpolLPCCoeff,
+                               int16_t bandwidth,
+                               ISACUBSaveEncDataStruct* encData);
+
+/******************************************************************************
+ * WebRtcIsac_DecodeInterpolLpcUb()
+ * Decode LPC coefficients and interpolate to get the coefficients fo all
+ * sub-frmaes.
+ *
+ * Inputs:
+ *  - bandwidth             : spepecifies if the codec is in 0-12 kHz or
+ *                            0-16 kHz mode.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Output:
+ *  - percepFilterParam     : Decoded and interpolated LPC (A-polynomial) of
+ *                            all sub-frames.
+ *                            If LP analysis is of order K, and there are N
+ *                            sub-frames then this is a buffer of size
+ *                            (k + 1) * N, each vector starts with the LPC gain
+ *                            of the corresponding sub-frame. The LPC gains
+ *                            are encoded and inserted after this function is
+ *                            called. The first A-coefficient which is 1 is not
+ *                            included.
+ *
+ * Return value             : 0 if encoding is successful,
+ *                           <0 if failed to encode.
+ */
+int16_t WebRtcIsac_DecodeInterpolLpcUb(Bitstr* streamdata,
+                                       double* percepFilterParam,
+                                       int16_t bandwidth);
+
+/* Decode & dequantize RC */
+int WebRtcIsac_DecodeRc(Bitstr* streamdata, int16_t* RCQ15);
+
+/* Quantize & code RC */
+void WebRtcIsac_EncodeRc(int16_t* RCQ15, Bitstr* streamdata);
+
+/* Decode & dequantize squared Gain */
+int WebRtcIsac_DecodeGain2(Bitstr* streamdata, int32_t* Gain2);
+
+/* Quantize & code squared Gain (input is squared gain) */
+int WebRtcIsac_EncodeGain2(int32_t* gain2, Bitstr* streamdata);
+
+void WebRtcIsac_EncodePitchGain(int16_t* PitchGains_Q12,
+                                Bitstr* streamdata,
+                                IsacSaveEncoderData* encData);
+
+void WebRtcIsac_EncodePitchLag(double* PitchLags,
+                               int16_t* PitchGain_Q12,
+                               Bitstr* streamdata,
+                               IsacSaveEncoderData* encData);
+
+int WebRtcIsac_DecodePitchGain(Bitstr* streamdata,
+                               int16_t* PitchGain_Q12);
+int WebRtcIsac_DecodePitchLag(Bitstr* streamdata, int16_t* PitchGain_Q12,
+                              double* PitchLag);
+
+int WebRtcIsac_DecodeFrameLen(Bitstr* streamdata, int16_t* framelength);
+int WebRtcIsac_EncodeFrameLen(int16_t framelength, Bitstr* streamdata);
+int WebRtcIsac_DecodeSendBW(Bitstr* streamdata, int16_t* BWno);
+void WebRtcIsac_EncodeReceiveBw(int* BWno, Bitstr* streamdata);
+
+/* Step-down */
+void WebRtcIsac_Poly2Rc(double* a, int N, double* RC);
+
+/* Step-up */
+void WebRtcIsac_Rc2Poly(double* RC, int N, double* a);
+
+void WebRtcIsac_TranscodeLPCCoef(double* LPCCoef_lo, double* LPCCoef_hi,
+                                 int* index_g);
+
+
+/******************************************************************************
+ * WebRtcIsac_EncodeLpcGainUb()
+ * Encode LPC gains of sub-Frames.
+ *
+ * Input/outputs:
+ *  - lpGains               : a buffer which contains 'SUBFRAME' number of
+ *                            LP gains to be encoded. The input values are
+ *                            overwritten by the quantized values.
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Output:
+ *  - lpcGainIndex          : quantization indices for lpc gains, these will
+ *                            be stored to be used  for FEC.
+ */
+void WebRtcIsac_EncodeLpcGainUb(double* lpGains, Bitstr* streamdata,
+                                int* lpcGainIndex);
+
+
+/******************************************************************************
+ * WebRtcIsac_EncodeLpcGainUb()
+ * Store LPC gains of sub-Frames in 'streamdata'.
+ *
+ * Input:
+ *  - lpGains               : a buffer which contains 'SUBFRAME' number of
+ *                            LP gains to be encoded.
+ * Input/outputs:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ */
+void WebRtcIsac_StoreLpcGainUb(double* lpGains, Bitstr* streamdata);
+
+
+/******************************************************************************
+ * WebRtcIsac_DecodeLpcGainUb()
+ * Decode the LPC gain of sub-frames.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Output:
+ *  - lpGains               : a buffer where decoded LPC gians will be stored.
+ *
+ * Return value             : 0 if succeeded.
+ *                           <0 if failed.
+ */
+int16_t WebRtcIsac_DecodeLpcGainUb(double* lpGains, Bitstr* streamdata);
+
+
+/******************************************************************************
+ * WebRtcIsac_EncodeBandwidth()
+ * Encode if the bandwidth of encoded audio is 0-12 kHz or 0-16 kHz.
+ *
+ * Input:
+ *  - bandwidth             : an enumerator specifying if the codec in is
+ *                            0-12 kHz or 0-16 kHz mode.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Return value             : 0 if succeeded.
+ *                           <0 if failed.
+ */
+int16_t WebRtcIsac_EncodeBandwidth(enum ISACBandwidth bandwidth,
+                                   Bitstr* streamData);
+
+
+/******************************************************************************
+ * WebRtcIsac_DecodeBandwidth()
+ * Decode the bandwidth of the encoded audio, i.e. if the bandwidth is 0-12 kHz
+ * or 0-16 kHz.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Output:
+ *  - bandwidth             : an enumerator specifying if the codec is in
+ *                            0-12 kHz or 0-16 kHz mode.
+ *
+ * Return value             : 0 if succeeded.
+ *                           <0 if failed.
+ */
+int16_t WebRtcIsac_DecodeBandwidth(Bitstr* streamData,
+                                   enum ISACBandwidth* bandwidth);
+
+
+/******************************************************************************
+ * WebRtcIsac_EncodeJitterInfo()
+ * Decode the jitter information.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Input:
+ *  - jitterInfo            : one bit of info specifying if the channel is
+ *                            in high/low jitter. Zero indicates low jitter
+ *                            and one indicates high jitter.
+ *
+ * Return value             : 0 if succeeded.
+ *                           <0 if failed.
+ */
+int16_t WebRtcIsac_EncodeJitterInfo(int32_t jitterIndex,
+                                    Bitstr* streamData);
+
+
+/******************************************************************************
+ * WebRtcIsac_DecodeJitterInfo()
+ * Decode the jitter information.
+ *
+ * Input/output:
+ *  - streamdata            : pointer to a stucture containg the encoded
+ *                            data and theparameters needed for entropy
+ *                            coding.
+ *
+ * Output:
+ *  - jitterInfo            : one bit of info specifying if the channel is
+ *                            in high/low jitter. Zero indicates low jitter
+ *                            and one indicates high jitter.
+ *
+ * Return value             : 0 if succeeded.
+ *                           <0 if failed.
+ */
+int16_t WebRtcIsac_DecodeJitterInfo(Bitstr* streamData,
+                                    int32_t* jitterInfo);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ENTROPY_CODING_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/fft.c b/modules/audio_coding/codecs/isac/main/source/fft.c
new file mode 100644
index 0000000..a3cbd5e
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/fft.c
@@ -0,0 +1,942 @@
+/*
+ * Copyright(c)1995,97 Mark Olesen <olesen@me.QueensU.CA>
+ *    Queen's Univ at Kingston (Canada)
+ *
+ * Permission to use, copy, modify, and distribute this software for
+ * any purpose without fee is hereby granted, provided that this
+ * entire notice is included in all copies of any software which is
+ * or includes a copy or modification of this software and in all
+ * copies of the supporting documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY.  IN PARTICULAR, NEITHER THE AUTHOR NOR QUEEN'S
+ * UNIVERSITY AT KINGSTON MAKES ANY REPRESENTATION OR WARRANTY OF ANY
+ * KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * All of which is to say that you can do what you like with this
+ * source code provided you don't try to sell it as your own and you
+ * include an unaltered copy of this message (including the
+ * copyright).
+ *
+ * It is also implicitly understood that bug fixes and improvements
+ * should make their way back to the general Internet community so
+ * that everyone benefits.
+ *
+ * Changes:
+ *   Trivial type modifications by the WebRTC authors.
+ */
+
+
+/*
+ * File:
+ * WebRtcIsac_Fftn.c
+ *
+ * Public:
+ * WebRtcIsac_Fftn / fftnf ();
+ *
+ * Private:
+ * WebRtcIsac_Fftradix / fftradixf ();
+ *
+ * Descript:
+ * multivariate complex Fourier transform, computed in place
+ * using mixed-radix Fast Fourier Transform algorithm.
+ *
+ * Fortran code by:
+ * RC Singleton, Stanford Research Institute, Sept. 1968
+ *
+ * translated by f2c (version 19950721).
+ *
+ * int WebRtcIsac_Fftn (int ndim, const int dims[], REAL Re[], REAL Im[],
+ *     int iSign, double scaling);
+ *
+ * NDIM = the total number dimensions
+ * DIMS = a vector of array sizes
+ * if NDIM is zero then DIMS must be zero-terminated
+ *
+ * RE and IM hold the real and imaginary components of the data, and return
+ * the resulting real and imaginary Fourier coefficients.  Multidimensional
+ * data *must* be allocated contiguously.  There is no limit on the number
+ * of dimensions.
+ *
+ * ISIGN = the sign of the complex exponential (ie, forward or inverse FFT)
+ * the magnitude of ISIGN (normally 1) is used to determine the
+ * correct indexing increment (see below).
+ *
+ * SCALING = normalizing constant by which the final result is *divided*
+ * if SCALING == -1, normalize by total dimension of the transform
+ * if SCALING <  -1, normalize by the square-root of the total dimension
+ *
+ * example:
+ * tri-variate transform with Re[n1][n2][n3], Im[n1][n2][n3]
+ *
+ * int dims[3] = {n1,n2,n3}
+ * WebRtcIsac_Fftn (3, dims, Re, Im, 1, scaling);
+ *
+ *-----------------------------------------------------------------------*
+ * int WebRtcIsac_Fftradix (REAL Re[], REAL Im[], size_t nTotal, size_t nPass,
+ *   size_t nSpan, int iSign, size_t max_factors,
+ *   size_t max_perm);
+ *
+ * RE, IM - see above documentation
+ *
+ * Although there is no limit on the number of dimensions, WebRtcIsac_Fftradix() must
+ * be called once for each dimension, but the calls may be in any order.
+ *
+ * NTOTAL = the total number of complex data values
+ * NPASS  = the dimension of the current variable
+ * NSPAN/NPASS = the spacing of consecutive data values while indexing the
+ * current variable
+ * ISIGN - see above documentation
+ *
+ * example:
+ * tri-variate transform with Re[n1][n2][n3], Im[n1][n2][n3]
+ *
+ * WebRtcIsac_Fftradix (Re, Im, n1*n2*n3, n1,       n1, 1, maxf, maxp);
+ * WebRtcIsac_Fftradix (Re, Im, n1*n2*n3, n2,    n1*n2, 1, maxf, maxp);
+ * WebRtcIsac_Fftradix (Re, Im, n1*n2*n3, n3, n1*n2*n3, 1, maxf, maxp);
+ *
+ * single-variate transform,
+ *    NTOTAL = N = NSPAN = (number of complex data values),
+ *
+ * WebRtcIsac_Fftradix (Re, Im, n, n, n, 1, maxf, maxp);
+ *
+ * The data can also be stored in a single array with alternating real and
+ * imaginary parts, the magnitude of ISIGN is changed to 2 to give correct
+ * indexing increment, and data [0] and data [1] used to pass the initial
+ * addresses for the sequences of real and imaginary values,
+ *
+ * example:
+ * REAL data [2*NTOTAL];
+ * WebRtcIsac_Fftradix ( &data[0], &data[1], NTOTAL, nPass, nSpan, 2, maxf, maxp);
+ *
+ * for temporary allocation:
+ *
+ * MAX_FACTORS >= the maximum prime factor of NPASS
+ * MAX_PERM >= the number of prime factors of NPASS.  In addition,
+ * if the square-free portion K of NPASS has two or more prime
+ * factors, then MAX_PERM >= (K-1)
+ *
+ * storage in FACTOR for a maximum of 15 prime factors of NPASS. if NPASS
+ * has more than one square-free factor, the product of the square-free
+ * factors must be <= 210 array storage for maximum prime factor of 23 the
+ * following two constants should agree with the array dimensions.
+ *
+ *----------------------------------------------------------------------*/
+
+#include <stdlib.h>
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/fft.h"
+
+/* double precision routine */
+static int
+WebRtcIsac_Fftradix (double Re[], double Im[],
+                    size_t nTotal, size_t nPass, size_t nSpan, int isign,
+                    int max_factors, unsigned int max_perm,
+                    FFTstr *fftstate);
+
+
+
+#ifndef M_PI
+# define M_PI 3.14159265358979323846264338327950288
+#endif
+
+#ifndef SIN60
+# define SIN60 0.86602540378443865 /* sin(60 deg) */
+# define COS72 0.30901699437494742 /* cos(72 deg) */
+# define SIN72 0.95105651629515357 /* sin(72 deg) */
+#endif
+
+# define REAL  double
+# define FFTN  WebRtcIsac_Fftn
+# define FFTNS  "fftn"
+# define FFTRADIX WebRtcIsac_Fftradix
+# define FFTRADIXS "fftradix"
+
+
+int  WebRtcIsac_Fftns(unsigned int ndim, const int dims[],
+                     double Re[],
+                     double Im[],
+                     int iSign,
+                     double scaling,
+                     FFTstr *fftstate)
+{
+
+  size_t nSpan, nPass, nTotal;
+  unsigned int i;
+  int ret, max_factors, max_perm;
+
+  /*
+   * tally the number of elements in the data array
+   * and determine the number of dimensions
+   */
+  nTotal = 1;
+  if (ndim && dims [0])
+  {
+    for (i = 0; i < ndim; i++)
+    {
+      if (dims [i] <= 0)
+      {
+        return -1;
+      }
+      nTotal *= dims [i];
+    }
+  }
+  else
+  {
+    ndim = 0;
+    for (i = 0; dims [i]; i++)
+    {
+      if (dims [i] <= 0)
+      {
+        return -1;
+      }
+      nTotal *= dims [i];
+      ndim++;
+    }
+  }
+
+  /* determine maximum number of factors and permuations */
+#if 1
+  /*
+   * follow John Beale's example, just use the largest dimension and don't
+   * worry about excess allocation.  May be someone else will do it?
+   */
+  max_factors = max_perm = 1;
+  for (i = 0; i < ndim; i++)
+  {
+    nSpan = dims [i];
+    if ((int)nSpan > max_factors)
+    {
+      max_factors = (int)nSpan;
+    }
+    if ((int)nSpan > max_perm) 
+    {
+      max_perm = (int)nSpan;
+    }
+  }
+#else
+  /* use the constants used in the original Fortran code */
+  max_factors = 23;
+  max_perm = 209;
+#endif
+  /* loop over the dimensions: */
+  nPass = 1;
+  for (i = 0; i < ndim; i++)
+  {
+    nSpan = dims [i];
+    nPass *= nSpan;
+    ret = FFTRADIX (Re, Im, nTotal, nSpan, nPass, iSign,
+                    max_factors, max_perm, fftstate);
+    /* exit, clean-up already done */
+    if (ret)
+      return ret;
+  }
+
+  /* Divide through by the normalizing constant: */
+  if (scaling && scaling != 1.0)
+  {
+    if (iSign < 0) iSign = -iSign;
+    if (scaling < 0.0)
+    {
+      scaling = (double)nTotal;
+      if (scaling < -1.0)
+        scaling = sqrt (scaling);
+    }
+    scaling = 1.0 / scaling; /* multiply is often faster */
+    for (i = 0; i < nTotal; i += iSign)
+    {
+      Re [i] *= scaling;
+      Im [i] *= scaling;
+    }
+  }
+  return 0;
+}
+
+/*
+ * singleton's mixed radix routine
+ *
+ * could move allocation out to WebRtcIsac_Fftn(), but leave it here so that it's
+ * possible to make this a standalone function
+ */
+
+static int   FFTRADIX (REAL Re[],
+                       REAL Im[],
+                       size_t nTotal,
+                       size_t nPass,
+                       size_t nSpan,
+                       int iSign,
+                       int max_factors,
+                       unsigned int max_perm,
+                       FFTstr *fftstate)
+{
+  int ii, mfactor, kspan, ispan, inc;
+  int j, jc, jf, jj, k, k1, k2, k3, k4, kk, kt, nn, ns, nt;
+
+
+  REAL radf;
+  REAL c1, c2, c3, cd, aa, aj, ak, ajm, ajp, akm, akp;
+  REAL s1, s2, s3, sd, bb, bj, bk, bjm, bjp, bkm, bkp;
+
+  REAL *Rtmp = NULL; /* temp space for real part*/
+  REAL *Itmp = NULL; /* temp space for imaginary part */
+  REAL *Cos = NULL; /* Cosine values */
+  REAL *Sin = NULL; /* Sine values */
+
+  REAL s60 = SIN60;  /* sin(60 deg) */
+  REAL c72 = COS72;  /* cos(72 deg) */
+  REAL s72 = SIN72;  /* sin(72 deg) */
+  REAL pi2 = M_PI;  /* use PI first, 2 PI later */
+
+
+  fftstate->SpaceAlloced = 0;
+  fftstate->MaxPermAlloced = 0;
+
+
+  // initialize to avoid warnings
+  k3 = c2 = c3 = s2 = s3 = 0.0;
+
+  if (nPass < 2)
+    return 0;
+
+  /*  allocate storage */
+  if (fftstate->SpaceAlloced < max_factors * sizeof (REAL))
+  {
+#ifdef SUN_BROKEN_REALLOC
+    if (!fftstate->SpaceAlloced) /* first time */
+    {
+      fftstate->SpaceAlloced = max_factors * sizeof (REAL);
+    }
+    else
+    {
+#endif
+      fftstate->SpaceAlloced = max_factors * sizeof (REAL);
+#ifdef SUN_BROKEN_REALLOC
+    }
+#endif
+  }
+  else
+  {
+    /* allow full use of alloc'd space */
+    max_factors = fftstate->SpaceAlloced / sizeof (REAL);
+  }
+  if (fftstate->MaxPermAlloced < max_perm)
+  {
+#ifdef SUN_BROKEN_REALLOC
+    if (!fftstate->MaxPermAlloced) /* first time */
+    else
+#endif
+      fftstate->MaxPermAlloced = max_perm;
+  }
+  else
+  {
+    /* allow full use of alloc'd space */
+    max_perm = fftstate->MaxPermAlloced;
+  }
+
+  /* assign pointers */
+  Rtmp = (REAL *) fftstate->Tmp0;
+  Itmp = (REAL *) fftstate->Tmp1;
+  Cos  = (REAL *) fftstate->Tmp2;
+  Sin  = (REAL *) fftstate->Tmp3;
+
+  /*
+   * Function Body
+   */
+  inc = iSign;
+  if (iSign < 0) {
+    s72 = -s72;
+    s60 = -s60;
+    pi2 = -pi2;
+    inc = -inc;  /* absolute value */
+  }
+
+  /* adjust for strange increments */
+  nt = inc * (int)nTotal;
+  ns = inc * (int)nSpan;
+  kspan = ns;
+
+  nn = nt - inc;
+  jc = ns / (int)nPass;
+  radf = pi2 * (double) jc;
+  pi2 *= 2.0;   /* use 2 PI from here on */
+
+  ii = 0;
+  jf = 0;
+  /*  determine the factors of n */
+  mfactor = 0;
+  k = (int)nPass;
+  while (k % 16 == 0) {
+    mfactor++;
+    fftstate->factor [mfactor - 1] = 4;
+    k /= 16;
+  }
+  j = 3;
+  jj = 9;
+  do {
+    while (k % jj == 0) {
+      mfactor++;
+      fftstate->factor [mfactor - 1] = j;
+      k /= jj;
+    }
+    j += 2;
+    jj = j * j;
+  } while (jj <= k);
+  if (k <= 4) {
+    kt = mfactor;
+    fftstate->factor [mfactor] = k;
+    if (k != 1)
+      mfactor++;
+  } else {
+    if (k - (k / 4 << 2) == 0) {
+      mfactor++;
+      fftstate->factor [mfactor - 1] = 2;
+      k /= 4;
+    }
+    kt = mfactor;
+    j = 2;
+    do {
+      if (k % j == 0) {
+        mfactor++;
+        fftstate->factor [mfactor - 1] = j;
+        k /= j;
+      }
+      j = ((j + 1) / 2 << 1) + 1;
+    } while (j <= k);
+  }
+  if (kt) {
+    j = kt;
+    do {
+      mfactor++;
+      fftstate->factor [mfactor - 1] = fftstate->factor [j - 1];
+      j--;
+    } while (j);
+  }
+
+  /* test that mfactors is in range */
+  if (mfactor > NFACTOR)
+  {
+    return -1;
+  }
+
+  /* compute fourier transform */
+  for (;;) {
+    sd = radf / (double) kspan;
+    cd = sin(sd);
+    cd = 2.0 * cd * cd;
+    sd = sin(sd + sd);
+    kk = 0;
+    ii++;
+
+    switch (fftstate->factor [ii - 1]) {
+      case 2:
+        /* transform for factor of 2 (including rotation factor) */
+        kspan /= 2;
+        k1 = kspan + 2;
+        do {
+          do {
+            k2 = kk + kspan;
+            ak = Re [k2];
+            bk = Im [k2];
+            Re [k2] = Re [kk] - ak;
+            Im [k2] = Im [kk] - bk;
+            Re [kk] += ak;
+            Im [kk] += bk;
+            kk = k2 + kspan;
+          } while (kk < nn);
+          kk -= nn;
+        } while (kk < jc);
+        if (kk >= kspan)
+          goto Permute_Results_Label;  /* exit infinite loop */
+        do {
+          c1 = 1.0 - cd;
+          s1 = sd;
+          do {
+            do {
+              do {
+                k2 = kk + kspan;
+                ak = Re [kk] - Re [k2];
+                bk = Im [kk] - Im [k2];
+                Re [kk] += Re [k2];
+                Im [kk] += Im [k2];
+                Re [k2] = c1 * ak - s1 * bk;
+                Im [k2] = s1 * ak + c1 * bk;
+                kk = k2 + kspan;
+              } while (kk < (nt-1));
+              k2 = kk - nt;
+              c1 = -c1;
+              kk = k1 - k2;
+            } while (kk > k2);
+            ak = c1 - (cd * c1 + sd * s1);
+            s1 = sd * c1 - cd * s1 + s1;
+            c1 = 2.0 - (ak * ak + s1 * s1);
+            s1 *= c1;
+            c1 *= ak;
+            kk += jc;
+          } while (kk < k2);
+          k1 += inc + inc;
+          kk = (k1 - kspan + 1) / 2 + jc - 1;
+        } while (kk < (jc + jc));
+        break;
+
+      case 4:   /* transform for factor of 4 */
+        ispan = kspan;
+        kspan /= 4;
+
+        do {
+          c1 = 1.0;
+          s1 = 0.0;
+          do {
+            do {
+              k1 = kk + kspan;
+              k2 = k1 + kspan;
+              k3 = k2 + kspan;
+              akp = Re [kk] + Re [k2];
+              akm = Re [kk] - Re [k2];
+              ajp = Re [k1] + Re [k3];
+              ajm = Re [k1] - Re [k3];
+              bkp = Im [kk] + Im [k2];
+              bkm = Im [kk] - Im [k2];
+              bjp = Im [k1] + Im [k3];
+              bjm = Im [k1] - Im [k3];
+              Re [kk] = akp + ajp;
+              Im [kk] = bkp + bjp;
+              ajp = akp - ajp;
+              bjp = bkp - bjp;
+              if (iSign < 0) {
+                akp = akm + bjm;
+                bkp = bkm - ajm;
+                akm -= bjm;
+                bkm += ajm;
+              } else {
+                akp = akm - bjm;
+                bkp = bkm + ajm;
+                akm += bjm;
+                bkm -= ajm;
+              }
+              /* avoid useless multiplies */
+              if (s1 == 0.0) {
+                Re [k1] = akp;
+                Re [k2] = ajp;
+                Re [k3] = akm;
+                Im [k1] = bkp;
+                Im [k2] = bjp;
+                Im [k3] = bkm;
+              } else {
+                Re [k1] = akp * c1 - bkp * s1;
+                Re [k2] = ajp * c2 - bjp * s2;
+                Re [k3] = akm * c3 - bkm * s3;
+                Im [k1] = akp * s1 + bkp * c1;
+                Im [k2] = ajp * s2 + bjp * c2;
+                Im [k3] = akm * s3 + bkm * c3;
+              }
+              kk = k3 + kspan;
+            } while (kk < nt);
+
+            c2 = c1 - (cd * c1 + sd * s1);
+            s1 = sd * c1 - cd * s1 + s1;
+            c1 = 2.0 - (c2 * c2 + s1 * s1);
+            s1 *= c1;
+            c1 *= c2;
+            /* values of c2, c3, s2, s3 that will get used next time */
+            c2 = c1 * c1 - s1 * s1;
+            s2 = 2.0 * c1 * s1;
+            c3 = c2 * c1 - s2 * s1;
+            s3 = c2 * s1 + s2 * c1;
+            kk = kk - nt + jc;
+          } while (kk < kspan);
+          kk = kk - kspan + inc;
+        } while (kk < jc);
+        if (kspan == jc)
+          goto Permute_Results_Label;  /* exit infinite loop */
+        break;
+
+      default:
+        /*  transform for odd factors */
+#ifdef FFT_RADIX4
+        return -1;
+        break;
+#else /* FFT_RADIX4 */
+        k = fftstate->factor [ii - 1];
+        ispan = kspan;
+        kspan /= k;
+
+        switch (k) {
+          case 3: /* transform for factor of 3 (optional code) */
+            do {
+              do {
+                k1 = kk + kspan;
+                k2 = k1 + kspan;
+                ak = Re [kk];
+                bk = Im [kk];
+                aj = Re [k1] + Re [k2];
+                bj = Im [k1] + Im [k2];
+                Re [kk] = ak + aj;
+                Im [kk] = bk + bj;
+                ak -= 0.5 * aj;
+                bk -= 0.5 * bj;
+                aj = (Re [k1] - Re [k2]) * s60;
+                bj = (Im [k1] - Im [k2]) * s60;
+                Re [k1] = ak - bj;
+                Re [k2] = ak + bj;
+                Im [k1] = bk + aj;
+                Im [k2] = bk - aj;
+                kk = k2 + kspan;
+              } while (kk < (nn - 1));
+              kk -= nn;
+            } while (kk < kspan);
+            break;
+
+          case 5: /*  transform for factor of 5 (optional code) */
+            c2 = c72 * c72 - s72 * s72;
+            s2 = 2.0 * c72 * s72;
+            do {
+              do {
+                k1 = kk + kspan;
+                k2 = k1 + kspan;
+                k3 = k2 + kspan;
+                k4 = k3 + kspan;
+                akp = Re [k1] + Re [k4];
+                akm = Re [k1] - Re [k4];
+                bkp = Im [k1] + Im [k4];
+                bkm = Im [k1] - Im [k4];
+                ajp = Re [k2] + Re [k3];
+                ajm = Re [k2] - Re [k3];
+                bjp = Im [k2] + Im [k3];
+                bjm = Im [k2] - Im [k3];
+                aa = Re [kk];
+                bb = Im [kk];
+                Re [kk] = aa + akp + ajp;
+                Im [kk] = bb + bkp + bjp;
+                ak = akp * c72 + ajp * c2 + aa;
+                bk = bkp * c72 + bjp * c2 + bb;
+                aj = akm * s72 + ajm * s2;
+                bj = bkm * s72 + bjm * s2;
+                Re [k1] = ak - bj;
+                Re [k4] = ak + bj;
+                Im [k1] = bk + aj;
+                Im [k4] = bk - aj;
+                ak = akp * c2 + ajp * c72 + aa;
+                bk = bkp * c2 + bjp * c72 + bb;
+                aj = akm * s2 - ajm * s72;
+                bj = bkm * s2 - bjm * s72;
+                Re [k2] = ak - bj;
+                Re [k3] = ak + bj;
+                Im [k2] = bk + aj;
+                Im [k3] = bk - aj;
+                kk = k4 + kspan;
+              } while (kk < (nn-1));
+              kk -= nn;
+            } while (kk < kspan);
+            break;
+
+          default:
+            if (k != jf) {
+              jf = k;
+              s1 = pi2 / (double) k;
+              c1 = cos(s1);
+              s1 = sin(s1);
+              if (jf > max_factors){
+                return -1;
+              }
+              Cos [jf - 1] = 1.0;
+              Sin [jf - 1] = 0.0;
+              j = 1;
+              do {
+                Cos [j - 1] = Cos [k - 1] * c1 + Sin [k - 1] * s1;
+                Sin [j - 1] = Cos [k - 1] * s1 - Sin [k - 1] * c1;
+                k--;
+                Cos [k - 1] = Cos [j - 1];
+                Sin [k - 1] = -Sin [j - 1];
+                j++;
+              } while (j < k);
+            }
+            do {
+              do {
+                k1 = kk;
+                k2 = kk + ispan;
+                ak = aa = Re [kk];
+                bk = bb = Im [kk];
+                j = 1;
+                k1 += kspan;
+                do {
+                  k2 -= kspan;
+                  j++;
+                  Rtmp [j - 1] = Re [k1] + Re [k2];
+                  ak += Rtmp [j - 1];
+                  Itmp [j - 1] = Im [k1] + Im [k2];
+                  bk += Itmp [j - 1];
+                  j++;
+                  Rtmp [j - 1] = Re [k1] - Re [k2];
+                  Itmp [j - 1] = Im [k1] - Im [k2];
+                  k1 += kspan;
+                } while (k1 < k2);
+                Re [kk] = ak;
+                Im [kk] = bk;
+                k1 = kk;
+                k2 = kk + ispan;
+                j = 1;
+                do {
+                  k1 += kspan;
+                  k2 -= kspan;
+                  jj = j;
+                  ak = aa;
+                  bk = bb;
+                  aj = 0.0;
+                  bj = 0.0;
+                  k = 1;
+                  do {
+                    k++;
+                    ak += Rtmp [k - 1] * Cos [jj - 1];
+                    bk += Itmp [k - 1] * Cos [jj - 1];
+                    k++;
+                    aj += Rtmp [k - 1] * Sin [jj - 1];
+                    bj += Itmp [k - 1] * Sin [jj - 1];
+                    jj += j;
+                    if (jj > jf) {
+                      jj -= jf;
+                    }
+                  } while (k < jf);
+                  k = jf - j;
+                  Re [k1] = ak - bj;
+                  Im [k1] = bk + aj;
+                  Re [k2] = ak + bj;
+                  Im [k2] = bk - aj;
+                  j++;
+                } while (j < k);
+                kk += ispan;
+              } while (kk < nn);
+              kk -= nn;
+            } while (kk < kspan);
+            break;
+        }
+
+        /*  multiply by rotation factor (except for factors of 2 and 4) */
+        if (ii == mfactor)
+          goto Permute_Results_Label;  /* exit infinite loop */
+        kk = jc;
+        do {
+          c2 = 1.0 - cd;
+          s1 = sd;
+          do {
+            c1 = c2;
+            s2 = s1;
+            kk += kspan;
+            do {
+              do {
+                ak = Re [kk];
+                Re [kk] = c2 * ak - s2 * Im [kk];
+                Im [kk] = s2 * ak + c2 * Im [kk];
+                kk += ispan;
+              } while (kk < nt);
+              ak = s1 * s2;
+              s2 = s1 * c2 + c1 * s2;
+              c2 = c1 * c2 - ak;
+              kk = kk - nt + kspan;
+            } while (kk < ispan);
+            c2 = c1 - (cd * c1 + sd * s1);
+            s1 += sd * c1 - cd * s1;
+            c1 = 2.0 - (c2 * c2 + s1 * s1);
+            s1 *= c1;
+            c2 *= c1;
+            kk = kk - ispan + jc;
+          } while (kk < kspan);
+          kk = kk - kspan + jc + inc;
+        } while (kk < (jc + jc));
+        break;
+#endif /* FFT_RADIX4 */
+    }
+  }
+
+  /*  permute the results to normal order---done in two stages */
+  /*  permutation for square factors of n */
+Permute_Results_Label:
+  fftstate->Perm [0] = ns;
+  if (kt) {
+    k = kt + kt + 1;
+    if (mfactor < k)
+      k--;
+    j = 1;
+    fftstate->Perm [k] = jc;
+    do {
+      fftstate->Perm [j] = fftstate->Perm [j - 1] / fftstate->factor [j - 1];
+      fftstate->Perm [k - 1] = fftstate->Perm [k] * fftstate->factor [j - 1];
+      j++;
+      k--;
+    } while (j < k);
+    k3 = fftstate->Perm [k];
+    kspan = fftstate->Perm [1];
+    kk = jc;
+    k2 = kspan;
+    j = 1;
+    if (nPass != nTotal) {
+      /*  permutation for multivariate transform */
+   Permute_Multi_Label:
+      do {
+        do {
+          k = kk + jc;
+          do {
+            /* swap Re [kk] <> Re [k2], Im [kk] <> Im [k2] */
+            ak = Re [kk]; Re [kk] = Re [k2]; Re [k2] = ak;
+            bk = Im [kk]; Im [kk] = Im [k2]; Im [k2] = bk;
+            kk += inc;
+            k2 += inc;
+          } while (kk < (k-1));
+          kk += ns - jc;
+          k2 += ns - jc;
+        } while (kk < (nt-1));
+        k2 = k2 - nt + kspan;
+        kk = kk - nt + jc;
+      } while (k2 < (ns-1));
+      do {
+        do {
+          k2 -= fftstate->Perm [j - 1];
+          j++;
+          k2 = fftstate->Perm [j] + k2;
+        } while (k2 > fftstate->Perm [j - 1]);
+        j = 1;
+        do {
+          if (kk < (k2-1))
+            goto Permute_Multi_Label;
+          kk += jc;
+          k2 += kspan;
+        } while (k2 < (ns-1));
+      } while (kk < (ns-1));
+    } else {
+      /*  permutation for single-variate transform (optional code) */
+   Permute_Single_Label:
+      do {
+        /* swap Re [kk] <> Re [k2], Im [kk] <> Im [k2] */
+        ak = Re [kk]; Re [kk] = Re [k2]; Re [k2] = ak;
+        bk = Im [kk]; Im [kk] = Im [k2]; Im [k2] = bk;
+        kk += inc;
+        k2 += kspan;
+      } while (k2 < (ns-1));
+      do {
+        do {
+          k2 -= fftstate->Perm [j - 1];
+          j++;
+          k2 = fftstate->Perm [j] + k2;
+        } while (k2 >= fftstate->Perm [j - 1]);
+        j = 1;
+        do {
+          if (kk < k2)
+            goto Permute_Single_Label;
+          kk += inc;
+          k2 += kspan;
+        } while (k2 < (ns-1));
+      } while (kk < (ns-1));
+    }
+    jc = k3;
+  }
+
+  if ((kt << 1) + 1 >= mfactor)
+    return 0;
+  ispan = fftstate->Perm [kt];
+  /* permutation for square-free factors of n */
+  j = mfactor - kt;
+  fftstate->factor [j] = 1;
+  do {
+    fftstate->factor [j - 1] *= fftstate->factor [j];
+    j--;
+  } while (j != kt);
+  kt++;
+  nn = fftstate->factor [kt - 1] - 1;
+  if (nn > (int) max_perm) {
+    return -1;
+  }
+  j = jj = 0;
+  for (;;) {
+    k = kt + 1;
+    k2 = fftstate->factor [kt - 1];
+    kk = fftstate->factor [k - 1];
+    j++;
+    if (j > nn)
+      break;    /* exit infinite loop */
+    jj += kk;
+    while (jj >= k2) {
+      jj -= k2;
+      k2 = kk;
+      k++;
+      kk = fftstate->factor [k - 1];
+      jj += kk;
+    }
+    fftstate->Perm [j - 1] = jj;
+  }
+  /*  determine the permutation cycles of length greater than 1 */
+  j = 0;
+  for (;;) {
+    do {
+      j++;
+      kk = fftstate->Perm [j - 1];
+    } while (kk < 0);
+    if (kk != j) {
+      do {
+        k = kk;
+        kk = fftstate->Perm [k - 1];
+        fftstate->Perm [k - 1] = -kk;
+      } while (kk != j);
+      k3 = kk;
+    } else {
+      fftstate->Perm [j - 1] = -j;
+      if (j == nn)
+        break;  /* exit infinite loop */
+    }
+  }
+  max_factors *= inc;
+  /*  reorder a and b, following the permutation cycles */
+  for (;;) {
+    j = k3 + 1;
+    nt -= ispan;
+    ii = nt - inc + 1;
+    if (nt < 0)
+      break;   /* exit infinite loop */
+    do {
+      do {
+        j--;
+      } while (fftstate->Perm [j - 1] < 0);
+      jj = jc;
+      do {
+        kspan = jj;
+        if (jj > max_factors) {
+          kspan = max_factors;
+        }
+        jj -= kspan;
+        k = fftstate->Perm [j - 1];
+        kk = jc * k + ii + jj;
+        k1 = kk + kspan - 1;
+        k2 = 0;
+        do {
+          k2++;
+          Rtmp [k2 - 1] = Re [k1];
+          Itmp [k2 - 1] = Im [k1];
+          k1 -= inc;
+        } while (k1 != (kk-1));
+        do {
+          k1 = kk + kspan - 1;
+          k2 = k1 - jc * (k + fftstate->Perm [k - 1]);
+          k = -fftstate->Perm [k - 1];
+          do {
+            Re [k1] = Re [k2];
+            Im [k1] = Im [k2];
+            k1 -= inc;
+            k2 -= inc;
+          } while (k1 != (kk-1));
+          kk = k2 + 1;
+        } while (k != j);
+        k1 = kk + kspan - 1;
+        k2 = 0;
+        do {
+          k2++;
+          Re [k1] = Rtmp [k2 - 1];
+          Im [k1] = Itmp [k2 - 1];
+          k1 -= inc;
+        } while (k1 != (kk-1));
+      } while (jj);
+    } while (j != 1);
+  }
+  return 0;   /* exit point here */
+}
+/* ---------------------- end-of-file (c source) ---------------------- */
+
diff --git a/modules/audio_coding/codecs/isac/main/source/fft.h b/modules/audio_coding/codecs/isac/main/source/fft.h
new file mode 100644
index 0000000..9750153
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/fft.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*--------------------------------*-C-*---------------------------------*
+ * File:
+ *       fftn.h
+ * ---------------------------------------------------------------------*
+ * Re[]:        real value array
+ * Im[]:        imaginary value array
+ * nTotal:      total number of complex values
+ * nPass:       number of elements involved in this pass of transform
+ * nSpan:       nspan/nPass = number of bytes to increment pointer
+ *              in Re[] and Im[]
+ * isign: exponent: +1 = forward  -1 = reverse
+ * scaling: normalizing constant by which the final result is *divided*
+ * scaling == -1, normalize by total dimension of the transform
+ * scaling <  -1, normalize by the square-root of the total dimension
+ *
+ * ----------------------------------------------------------------------
+ * See the comments in the code for correct usage!
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FFT_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FFT_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+/* double precision routine */
+
+
+int WebRtcIsac_Fftns (unsigned int ndim, const int dims[], double Re[], double Im[],
+                     int isign, double scaling, FFTstr *fftstate);
+
+
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FFT_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/filter_functions.c b/modules/audio_coding/codecs/isac/main/source/filter_functions.c
new file mode 100644
index 0000000..7bd5a79
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/filter_functions.c
@@ -0,0 +1,263 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <stdlib.h>
+#endif
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+
+
+
+void WebRtcIsac_AllPoleFilter(double* InOut,
+                              double* Coef,
+                              size_t lengthInOut,
+                              int orderCoef) {
+  /* the state of filter is assumed to be in InOut[-1] to InOut[-orderCoef] */
+  double scal;
+  double sum;
+  size_t n;
+  int k;
+
+  //if (fabs(Coef[0]-1.0)<0.001) {
+  if ( (Coef[0] > 0.9999) && (Coef[0] < 1.0001) )
+  {
+    for(n = 0; n < lengthInOut; n++)
+    {
+      sum = Coef[1] * InOut[-1];
+      for(k = 2; k <= orderCoef; k++){
+        sum += Coef[k] * InOut[-k];
+      }
+      *InOut++ -= sum;
+    }
+  }
+  else
+  {
+    scal = 1.0 / Coef[0];
+    for(n=0;n<lengthInOut;n++)
+    {
+      *InOut *= scal;
+      for(k=1;k<=orderCoef;k++){
+        *InOut -= scal*Coef[k]*InOut[-k];
+      }
+      InOut++;
+    }
+  }
+}
+
+
+void WebRtcIsac_AllZeroFilter(double* In,
+                              double* Coef,
+                              size_t lengthInOut,
+                              int orderCoef,
+                              double* Out) {
+  /* the state of filter is assumed to be in In[-1] to In[-orderCoef] */
+
+  size_t n;
+  int k;
+  double tmp;
+
+  for(n = 0; n < lengthInOut; n++)
+  {
+    tmp = In[0] * Coef[0];
+
+    for(k = 1; k <= orderCoef; k++){
+      tmp += Coef[k] * In[-k];
+    }
+
+    *Out++ = tmp;
+    In++;
+  }
+}
+
+
+void WebRtcIsac_ZeroPoleFilter(double* In,
+                               double* ZeroCoef,
+                               double* PoleCoef,
+                               size_t lengthInOut,
+                               int orderCoef,
+                               double* Out) {
+  /* the state of the zero section is assumed to be in In[-1] to In[-orderCoef] */
+  /* the state of the pole section is assumed to be in Out[-1] to Out[-orderCoef] */
+
+  WebRtcIsac_AllZeroFilter(In,ZeroCoef,lengthInOut,orderCoef,Out);
+  WebRtcIsac_AllPoleFilter(Out,PoleCoef,lengthInOut,orderCoef);
+}
+
+
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order) {
+  size_t  lag, n;
+  double sum, prod;
+  const double *x_lag;
+
+  for (lag = 0; lag <= order; lag++)
+  {
+    sum = 0.0f;
+    x_lag = &x[lag];
+    prod = x[0] * x_lag[0];
+    for (n = 1; n < N - lag; n++) {
+      sum += prod;
+      prod = x[n] * x_lag[n];
+    }
+    sum += prod;
+    r[lag] = sum;
+  }
+
+}
+
+
+void WebRtcIsac_BwExpand(double* out, double* in, double coef, size_t length) {
+  size_t i;
+  double  chirp;
+
+  chirp = coef;
+
+  out[0] = in[0];
+  for (i = 1; i < length; i++) {
+    out[i] = chirp * in[i];
+    chirp *= coef;
+  }
+}
+
+void WebRtcIsac_WeightingFilter(const double* in,
+                                double* weiout,
+                                double* whiout,
+                                WeightFiltstr* wfdata) {
+  double  tmpbuffer[PITCH_FRAME_LEN + PITCH_WLPCBUFLEN];
+  double  corr[PITCH_WLPCORDER+1], rc[PITCH_WLPCORDER+1];
+  double apol[PITCH_WLPCORDER+1], apolr[PITCH_WLPCORDER+1];
+  double  rho=0.9, *inp, *dp, *dp2;
+  double  whoutbuf[PITCH_WLPCBUFLEN + PITCH_WLPCORDER];
+  double  weoutbuf[PITCH_WLPCBUFLEN + PITCH_WLPCORDER];
+  double  *weo, *who, opol[PITCH_WLPCORDER+1], ext[PITCH_WLPCWINLEN];
+  int     k, n, endpos, start;
+
+  /* Set up buffer and states */
+  memcpy(tmpbuffer, wfdata->buffer, sizeof(double) * PITCH_WLPCBUFLEN);
+  memcpy(tmpbuffer+PITCH_WLPCBUFLEN, in, sizeof(double) * PITCH_FRAME_LEN);
+  memcpy(wfdata->buffer, tmpbuffer+PITCH_FRAME_LEN, sizeof(double) * PITCH_WLPCBUFLEN);
+
+  dp=weoutbuf;
+  dp2=whoutbuf;
+  for (k=0;k<PITCH_WLPCORDER;k++) {
+    *dp++ = wfdata->weostate[k];
+    *dp2++ = wfdata->whostate[k];
+    opol[k]=0.0;
+  }
+  opol[0]=1.0;
+  opol[PITCH_WLPCORDER]=0.0;
+  weo=dp;
+  who=dp2;
+
+  endpos=PITCH_WLPCBUFLEN + PITCH_SUBFRAME_LEN;
+  inp=tmpbuffer + PITCH_WLPCBUFLEN;
+
+  for (n=0; n<PITCH_SUBFRAMES; n++) {
+    /* Windowing */
+    start=endpos-PITCH_WLPCWINLEN;
+    for (k=0; k<PITCH_WLPCWINLEN; k++) {
+      ext[k]=wfdata->window[k]*tmpbuffer[start+k];
+    }
+
+    /* Get LPC polynomial */
+    WebRtcIsac_AutoCorr(corr, ext, PITCH_WLPCWINLEN, PITCH_WLPCORDER);
+    corr[0]=1.01*corr[0]+1.0; /* White noise correction */
+    WebRtcIsac_LevDurb(apol, rc, corr, PITCH_WLPCORDER);
+    WebRtcIsac_BwExpand(apolr, apol, rho, PITCH_WLPCORDER+1);
+
+    /* Filtering */
+    WebRtcIsac_ZeroPoleFilter(inp, apol, apolr, PITCH_SUBFRAME_LEN, PITCH_WLPCORDER, weo);
+    WebRtcIsac_ZeroPoleFilter(inp, apolr, opol, PITCH_SUBFRAME_LEN, PITCH_WLPCORDER, who);
+
+    inp+=PITCH_SUBFRAME_LEN;
+    endpos+=PITCH_SUBFRAME_LEN;
+    weo+=PITCH_SUBFRAME_LEN;
+    who+=PITCH_SUBFRAME_LEN;
+  }
+
+  /* Export filter states */
+  for (k=0;k<PITCH_WLPCORDER;k++) {
+    wfdata->weostate[k]=weoutbuf[PITCH_FRAME_LEN+k];
+    wfdata->whostate[k]=whoutbuf[PITCH_FRAME_LEN+k];
+  }
+
+  /* Export output data */
+  memcpy(weiout, weoutbuf+PITCH_WLPCORDER, sizeof(double) * PITCH_FRAME_LEN);
+  memcpy(whiout, whoutbuf+PITCH_WLPCORDER, sizeof(double) * PITCH_FRAME_LEN);
+}
+
+
+static const double APupper[ALLPASSSECTIONS] = {0.0347, 0.3826};
+static const double APlower[ALLPASSSECTIONS] = {0.1544, 0.744};
+
+
+void WebRtcIsac_AllpassFilterForDec(double* InOut,
+                                    const double* APSectionFactors,
+                                    size_t lengthInOut,
+                                    double* FilterState) {
+  //This performs all-pass filtering--a series of first order all-pass sections are used
+  //to filter the input in a cascade manner.
+  size_t n,j;
+  double temp;
+  for (j=0; j<ALLPASSSECTIONS; j++){
+    for (n=0;n<lengthInOut;n+=2){
+      temp = InOut[n]; //store input
+      InOut[n] = FilterState[j] + APSectionFactors[j]*temp;
+      FilterState[j] = -APSectionFactors[j]*InOut[n] + temp;
+    }
+  }
+}
+
+void WebRtcIsac_DecimateAllpass(const double* in,
+                                double* state_in,
+                                size_t N,
+                                double* out) {
+  size_t n;
+  double data_vec[PITCH_FRAME_LEN];
+
+  /* copy input */
+  memcpy(data_vec+1, in, sizeof(double) * (N-1));
+
+  data_vec[0] = state_in[2*ALLPASSSECTIONS];   //the z^(-1) state
+  state_in[2*ALLPASSSECTIONS] = in[N-1];
+
+  WebRtcIsac_AllpassFilterForDec(data_vec+1, APupper, N, state_in);
+  WebRtcIsac_AllpassFilterForDec(data_vec, APlower, N, state_in+ALLPASSSECTIONS);
+
+  for (n=0;n<N/2;n++)
+    out[n] = data_vec[2*n] + data_vec[2*n+1];
+
+}
+
+
+/* create high-pass filter ocefficients
+ * z = 0.998 * exp(j*2*pi*35/8000);
+ * p = 0.94 * exp(j*2*pi*140/8000);
+ * HP_b = [1, -2*real(z), abs(z)^2];
+ * HP_a = [1, -2*real(p), abs(p)^2]; */
+static const double a_coef[2] = { 1.86864659625574, -0.88360000000000};
+static const double b_coef[2] = {-1.99524591718270,  0.99600400000000};
+
+/* second order high-pass filter */
+void WebRtcIsac_Highpass(const double* in,
+                         double* out,
+                         double* state,
+                         size_t N) {
+  size_t k;
+
+  for (k=0; k<N; k++) {
+    *out = *in + state[1];
+    state[1] = state[0] + b_coef[0] * *in + a_coef[0] * *out;
+    state[0] = b_coef[1] * *in++ + a_coef[1] * *out++;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/filterbank_tables.c b/modules/audio_coding/codecs/isac/main/source/filterbank_tables.c
new file mode 100644
index 0000000..12caee0
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/filterbank_tables.c
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* filterbank_tables.c*/
+/* This file contains variables that are used in filterbanks.c*/
+
+#include "modules/audio_coding/codecs/isac/main/source/filterbank_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* The composite all-pass filter factors */
+const float WebRtcIsac_kCompositeApFactorsFloat[4] = {
+ 0.03470000000000f,  0.15440000000000f,  0.38260000000000f,  0.74400000000000f};
+
+/* The upper channel all-pass filter factors */
+const float WebRtcIsac_kUpperApFactorsFloat[2] = {
+ 0.03470000000000f,  0.38260000000000f};
+
+/* The lower channel all-pass filter factors */
+const float WebRtcIsac_kLowerApFactorsFloat[2] = {
+ 0.15440000000000f,  0.74400000000000f};
+
+/* The matrix for transforming the backward composite state to upper channel state */
+const float WebRtcIsac_kTransform1Float[8] = {
+  -0.00158678506084f,  0.00127157815343f, -0.00104805672709f,  0.00084837248079f,
+  0.00134467983258f, -0.00107756549387f,  0.00088814793277f, -0.00071893072525f};
+
+/* The matrix for transforming the backward composite state to lower channel state */
+const float WebRtcIsac_kTransform2Float[8] = {
+ -0.00170686041697f,  0.00136780109829f, -0.00112736532350f,  0.00091257055385f,
+  0.00103094281812f, -0.00082615076557f,  0.00068092756088f, -0.00055119165484f};
diff --git a/modules/audio_coding/codecs/isac/main/source/filterbank_tables.h b/modules/audio_coding/codecs/isac/main/source/filterbank_tables.h
new file mode 100644
index 0000000..2edb0f0
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/filterbank_tables.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbank_tables.h
+ *
+ * Header file for variables that are defined in
+ * filterbank_tables.c.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTERBANK_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTERBANK_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+/********************* Coefficient Tables ************************/
+/* The number of composite all-pass filter factors */
+#define NUMBEROFCOMPOSITEAPSECTIONS 4
+
+/* The number of all-pass filter factors in an upper or lower channel*/
+#define NUMBEROFCHANNELAPSECTIONS 2
+
+/* The composite all-pass filter factors */
+extern const float WebRtcIsac_kCompositeApFactorsFloat[4];
+
+/* The upper channel all-pass filter factors */
+extern const float WebRtcIsac_kUpperApFactorsFloat[2];
+
+/* The lower channel all-pass filter factors */
+extern const float WebRtcIsac_kLowerApFactorsFloat[2];
+
+/* The matrix for transforming the backward composite state to upper channel state */
+extern const float WebRtcIsac_kTransform1Float[8];
+
+/* The matrix for transforming the backward composite state to lower channel state */
+extern const float WebRtcIsac_kTransform2Float[8];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_FILTERBANK_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/filterbanks.c b/modules/audio_coding/codecs/isac/main/source/filterbanks.c
new file mode 100644
index 0000000..6f1e4db
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/filterbanks.c
@@ -0,0 +1,346 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * filterbanks.c
+ *
+ * This file contains function WebRtcIsac_AllPassFilter2Float,
+ * WebRtcIsac_SplitAndFilter, and WebRtcIsac_FilterAndCombine
+ * which implement filterbanks that produce decimated lowpass and
+ * highpass versions of a signal, and performs reconstruction.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/filterbank_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+
+/* This function performs all-pass filtering--a series of first order all-pass
+ * sections are used to filter the input in a cascade manner.
+ * The input is overwritten!!
+ */
+static void WebRtcIsac_AllPassFilter2Float(float *InOut, const float *APSectionFactors,
+                                           int lengthInOut, int NumberOfSections,
+                                           float *FilterState)
+{
+  int n, j;
+  float temp;
+  for (j=0; j<NumberOfSections; j++){
+    for (n=0;n<lengthInOut;n++){
+      temp = FilterState[j] + APSectionFactors[j] * InOut[n];
+      FilterState[j] = -APSectionFactors[j] * temp + InOut[n];
+      InOut[n] = temp;
+    }
+  }
+}
+
+/* HPstcoeff_in = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+static const float kHpStCoefInFloat[4] =
+{-1.94895953203325f, 0.94984516000000f, -0.05101826139794f, 0.05015484000000f};
+
+/* Function WebRtcIsac_SplitAndFilter
+ * This function creates low-pass and high-pass decimated versions of part of
+ the input signal, and part of the signal in the input 'lookahead buffer'.
+
+ INPUTS:
+ in: a length FRAMESAMPLES array of input samples
+ prefiltdata: input data structure containing the filterbank states
+ and lookahead samples from the previous encoding
+ iteration.
+ OUTPUTS:
+ LP: a FRAMESAMPLES_HALF array of low-pass filtered samples that
+ have been phase equalized.  The first QLOOKAHEAD samples are
+ based on the samples in the two prefiltdata->INLABUFx arrays
+ each of length QLOOKAHEAD.
+ The remaining FRAMESAMPLES_HALF-QLOOKAHEAD samples are based
+ on the first FRAMESAMPLES_HALF-QLOOKAHEAD samples of the input
+ array in[].
+ HP: a FRAMESAMPLES_HALF array of high-pass filtered samples that
+ have been phase equalized.  The first QLOOKAHEAD samples are
+ based on the samples in the two prefiltdata->INLABUFx arrays
+ each of length QLOOKAHEAD.
+ The remaining FRAMESAMPLES_HALF-QLOOKAHEAD samples are based
+ on the first FRAMESAMPLES_HALF-QLOOKAHEAD samples of the input
+ array in[].
+
+ LP_la: a FRAMESAMPLES_HALF array of low-pass filtered samples.
+ These samples are not phase equalized. They are computed
+ from the samples in the in[] array.
+ HP_la: a FRAMESAMPLES_HALF array of high-pass filtered samples
+ that are not phase equalized. They are computed from
+ the in[] vector.
+ prefiltdata: this input data structure's filterbank state and
+ lookahead sample buffers are updated for the next
+ encoding iteration.
+*/
+void WebRtcIsac_SplitAndFilterFloat(float *pin, float *LP, float *HP,
+                                    double *LP_la, double *HP_la,
+                                    PreFiltBankstr *prefiltdata)
+{
+  int k,n;
+  float CompositeAPFilterState[NUMBEROFCOMPOSITEAPSECTIONS];
+  float ForTransform_CompositeAPFilterState[NUMBEROFCOMPOSITEAPSECTIONS];
+  float ForTransform_CompositeAPFilterState2[NUMBEROFCOMPOSITEAPSECTIONS];
+  float tempinoutvec[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+  float tempin_ch1[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+  float tempin_ch2[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+  float in[FRAMESAMPLES];
+  float ftmp;
+
+
+  /* High pass filter */
+
+  for (k=0;k<FRAMESAMPLES;k++) {
+    in[k] = pin[k] + kHpStCoefInFloat[2] * prefiltdata->HPstates_float[0] +
+        kHpStCoefInFloat[3] * prefiltdata->HPstates_float[1];
+    ftmp = pin[k] - kHpStCoefInFloat[0] * prefiltdata->HPstates_float[0] -
+        kHpStCoefInFloat[1] * prefiltdata->HPstates_float[1];
+    prefiltdata->HPstates_float[1] = prefiltdata->HPstates_float[0];
+    prefiltdata->HPstates_float[0] = ftmp;
+  }
+
+  /*
+    % backwards all-pass filtering to obtain zero-phase
+    [tmp1(N2+LA:-1:LA+1, 1), state1] = filter(Q.coef, Q.coef(end:-1:1), in(N:-2:2));
+    tmp1(LA:-1:1) = filter(Q.coef, Q.coef(end:-1:1), Q.LookAheadBuf1, state1);
+    Q.LookAheadBuf1 = in(N:-2:N-2*LA+2);
+  */
+  /*Backwards all-pass filter the odd samples of the input (upper channel)
+    to eventually obtain zero phase.  The composite all-pass filter (comprised of both
+    the upper and lower channel all-pass filsters in series) is used for the
+    filtering. */
+
+  /* First Channel */
+
+  /*initial state of composite filter is zero */
+  for (k=0;k<NUMBEROFCOMPOSITEAPSECTIONS;k++){
+    CompositeAPFilterState[k] = 0.0;
+  }
+  /* put every other sample of input into a temporary vector in reverse (backward) order*/
+  for (k=0;k<FRAMESAMPLES_HALF;k++) {
+    tempinoutvec[k] = in[FRAMESAMPLES-1-2*k];
+  }
+
+  /* now all-pass filter the backwards vector.  Output values overwrite the input vector. */
+  WebRtcIsac_AllPassFilter2Float(tempinoutvec, WebRtcIsac_kCompositeApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+  /* save the backwards filtered output for later forward filtering,
+     but write it in forward order*/
+  for (k=0;k<FRAMESAMPLES_HALF;k++) {
+    tempin_ch1[FRAMESAMPLES_HALF+QLOOKAHEAD-1-k] = tempinoutvec[k];
+  }
+
+  /* save the backwards filter state  becaue it will be transformed
+     later into a forward state */
+  for (k=0; k<NUMBEROFCOMPOSITEAPSECTIONS; k++) {
+    ForTransform_CompositeAPFilterState[k] = CompositeAPFilterState[k];
+  }
+
+  /* now backwards filter the samples in the lookahead buffer. The samples were
+     placed there in the encoding of the previous frame.  The output samples
+     overwrite the input samples */
+  WebRtcIsac_AllPassFilter2Float(prefiltdata->INLABUF1_float,
+                                 WebRtcIsac_kCompositeApFactorsFloat, QLOOKAHEAD,
+                                 NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+  /* save the output, but write it in forward order */
+  /* write the lookahead samples for the next encoding iteration. Every other
+     sample at the end of the input frame is written in reverse order for the
+     lookahead length. Exported in the prefiltdata structure. */
+  for (k=0;k<QLOOKAHEAD;k++) {
+    tempin_ch1[QLOOKAHEAD-1-k]=prefiltdata->INLABUF1_float[k];
+    prefiltdata->INLABUF1_float[k]=in[FRAMESAMPLES-1-2*k];
+  }
+
+  /* Second Channel.  This is exactly like the first channel, except that the
+     even samples are now filtered instead (lower channel). */
+  for (k=0;k<NUMBEROFCOMPOSITEAPSECTIONS;k++){
+    CompositeAPFilterState[k] = 0.0;
+  }
+
+  for (k=0;k<FRAMESAMPLES_HALF;k++) {
+    tempinoutvec[k] = in[FRAMESAMPLES-2-2*k];
+  }
+
+  WebRtcIsac_AllPassFilter2Float(tempinoutvec, WebRtcIsac_kCompositeApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCOMPOSITEAPSECTIONS, CompositeAPFilterState);
+
+  for (k=0;k<FRAMESAMPLES_HALF;k++) {
+    tempin_ch2[FRAMESAMPLES_HALF+QLOOKAHEAD-1-k] = tempinoutvec[k];
+  }
+
+  for (k=0; k<NUMBEROFCOMPOSITEAPSECTIONS; k++) {
+    ForTransform_CompositeAPFilterState2[k] = CompositeAPFilterState[k];
+  }
+
+
+  WebRtcIsac_AllPassFilter2Float(prefiltdata->INLABUF2_float,
+                                 WebRtcIsac_kCompositeApFactorsFloat, QLOOKAHEAD,NUMBEROFCOMPOSITEAPSECTIONS,
+                                 CompositeAPFilterState);
+
+  for (k=0;k<QLOOKAHEAD;k++) {
+    tempin_ch2[QLOOKAHEAD-1-k]=prefiltdata->INLABUF2_float[k];
+    prefiltdata->INLABUF2_float[k]=in[FRAMESAMPLES-2-2*k];
+  }
+
+  /* Transform filter states from backward to forward */
+  /*At this point, each of the states of the backwards composite filters for the
+    two channels are transformed into forward filtering states for the corresponding
+    forward channel filters.  Each channel's forward filtering state from the previous
+    encoding iteration is added to the transformed state to get a proper forward state */
+
+  /* So the existing NUMBEROFCOMPOSITEAPSECTIONS x 1 (4x1) state vector is multiplied by a
+     NUMBEROFCHANNELAPSECTIONSxNUMBEROFCOMPOSITEAPSECTIONS (2x4) transform matrix to get the
+     new state that is added to the previous 2x1 input state */
+
+  for (k=0;k<NUMBEROFCHANNELAPSECTIONS;k++){ /* k is row variable */
+    for (n=0; n<NUMBEROFCOMPOSITEAPSECTIONS;n++){/* n is column variable */
+      prefiltdata->INSTAT1_float[k] += ForTransform_CompositeAPFilterState[n]*
+          WebRtcIsac_kTransform1Float[k*NUMBEROFCHANNELAPSECTIONS+n];
+      prefiltdata->INSTAT2_float[k] += ForTransform_CompositeAPFilterState2[n]*
+          WebRtcIsac_kTransform2Float[k*NUMBEROFCHANNELAPSECTIONS+n];
+    }
+  }
+
+  /*obtain polyphase components by forward all-pass filtering through each channel */
+  /* the backward filtered samples are now forward filtered with the corresponding channel filters */
+  /* The all pass filtering automatically updates the filter states which are exported in the
+     prefiltdata structure */
+  WebRtcIsac_AllPassFilter2Float(tempin_ch1,WebRtcIsac_kUpperApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTAT1_float);
+  WebRtcIsac_AllPassFilter2Float(tempin_ch2,WebRtcIsac_kLowerApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTAT2_float);
+
+  /* Now Construct low-pass and high-pass signals as combinations of polyphase components */
+  for (k=0; k<FRAMESAMPLES_HALF; k++) {
+    LP[k] = 0.5f*(tempin_ch1[k] + tempin_ch2[k]);/* low pass signal*/
+    HP[k] = 0.5f*(tempin_ch1[k] - tempin_ch2[k]);/* high pass signal*/
+  }
+
+  /* Lookahead LP and HP signals */
+  /* now create low pass and high pass signals of the input vector.  However, no
+     backwards filtering is performed, and hence no phase equalization is involved.
+     Also, the input contains some samples that are lookahead samples.  The high pass
+     and low pass signals that are created are used outside this function for analysis
+     (not encoding) purposes */
+
+  /* set up input */
+  for (k=0; k<FRAMESAMPLES_HALF; k++) {
+    tempin_ch1[k]=in[2*k+1];
+    tempin_ch2[k]=in[2*k];
+  }
+
+  /* the input filter states are passed in and updated by the all-pass filtering routine and
+     exported in the prefiltdata structure*/
+  WebRtcIsac_AllPassFilter2Float(tempin_ch1,WebRtcIsac_kUpperApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTATLA1_float);
+  WebRtcIsac_AllPassFilter2Float(tempin_ch2,WebRtcIsac_kLowerApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS, prefiltdata->INSTATLA2_float);
+
+  for (k=0; k<FRAMESAMPLES_HALF; k++) {
+    LP_la[k] = (float)(0.5f*(tempin_ch1[k] + tempin_ch2[k])); /*low pass */
+    HP_la[k] = (double)(0.5f*(tempin_ch1[k] - tempin_ch2[k])); /* high pass */
+  }
+
+
+}/*end of WebRtcIsac_SplitAndFilter */
+
+
+/* Combining */
+
+/* HPstcoeff_out_1 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+static const float kHpStCoefOut1Float[4] =
+{-1.99701049409000f, 0.99714204490000f, 0.01701049409000f, -0.01704204490000f};
+
+/* HPstcoeff_out_2 = {a1, a2, b1 - b0 * a1, b2 - b0 * a2}; */
+static const float kHpStCoefOut2Float[4] =
+{-1.98645294509837f, 0.98672435560000f, 0.00645294509837f, -0.00662435560000f};
+
+
+/* Function WebRtcIsac_FilterAndCombine */
+/* This is a decoder function that takes the decimated
+   length FRAMESAMPLES_HALF input low-pass and
+   high-pass signals and creates a reconstructed fullband
+   output signal of length FRAMESAMPLES. WebRtcIsac_FilterAndCombine
+   is the sibling function of WebRtcIsac_SplitAndFilter */
+/* INPUTS:
+   inLP: a length FRAMESAMPLES_HALF array of input low-pass
+   samples.
+   inHP: a length FRAMESAMPLES_HALF array of input high-pass
+   samples.
+   postfiltdata: input data structure containing the filterbank
+   states from the previous decoding iteration.
+   OUTPUTS:
+   Out: a length FRAMESAMPLES array of output reconstructed
+   samples (fullband) based on the input low-pass and
+   high-pass signals.
+   postfiltdata: the input data structure containing the filterbank
+   states is updated for the next decoding iteration */
+void WebRtcIsac_FilterAndCombineFloat(float *InLP,
+                                      float *InHP,
+                                      float *Out,
+                                      PostFiltBankstr *postfiltdata)
+{
+  int k;
+  float tempin_ch1[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+  float tempin_ch2[FRAMESAMPLES+MAX_AR_MODEL_ORDER];
+  float ftmp, ftmp2;
+
+  /* Form the polyphase signals*/
+  for (k=0;k<FRAMESAMPLES_HALF;k++) {
+    tempin_ch1[k]=InLP[k]+InHP[k]; /* Construct a new upper channel signal*/
+    tempin_ch2[k]=InLP[k]-InHP[k]; /* Construct a new lower channel signal*/
+  }
+
+
+  /* all-pass filter the new upper channel signal. HOWEVER, use the all-pass filter factors
+     that were used as a lower channel at the encoding side.  So at the decoder, the
+     corresponding all-pass filter factors for each channel are swapped.*/
+  WebRtcIsac_AllPassFilter2Float(tempin_ch1, WebRtcIsac_kLowerApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_UPPER_float);
+
+  /* Now, all-pass filter the new lower channel signal. But since all-pass filter factors
+     at the decoder are swapped from the ones at the encoder, the 'upper' channel
+     all-pass filter factors (WebRtcIsac_kUpperApFactorsFloat) are used to filter this new
+     lower channel signal */
+  WebRtcIsac_AllPassFilter2Float(tempin_ch2, WebRtcIsac_kUpperApFactorsFloat,
+                                 FRAMESAMPLES_HALF, NUMBEROFCHANNELAPSECTIONS,postfiltdata->STATE_0_LOWER_float);
+
+
+  /* Merge outputs to form the full length output signal.*/
+  for (k=0;k<FRAMESAMPLES_HALF;k++) {
+    Out[2*k]=tempin_ch2[k];
+    Out[2*k+1]=tempin_ch1[k];
+  }
+
+
+  /* High pass filter */
+
+  for (k=0;k<FRAMESAMPLES;k++) {
+    ftmp2 = Out[k] + kHpStCoefOut1Float[2] * postfiltdata->HPstates1_float[0] +
+        kHpStCoefOut1Float[3] * postfiltdata->HPstates1_float[1];
+    ftmp = Out[k] - kHpStCoefOut1Float[0] * postfiltdata->HPstates1_float[0] -
+        kHpStCoefOut1Float[1] * postfiltdata->HPstates1_float[1];
+    postfiltdata->HPstates1_float[1] = postfiltdata->HPstates1_float[0];
+    postfiltdata->HPstates1_float[0] = ftmp;
+    Out[k] = ftmp2;
+  }
+
+  for (k=0;k<FRAMESAMPLES;k++) {
+    ftmp2 = Out[k] + kHpStCoefOut2Float[2] * postfiltdata->HPstates2_float[0] +
+        kHpStCoefOut2Float[3] * postfiltdata->HPstates2_float[1];
+    ftmp = Out[k] - kHpStCoefOut2Float[0] * postfiltdata->HPstates2_float[0] -
+        kHpStCoefOut2Float[1] * postfiltdata->HPstates2_float[1];
+    postfiltdata->HPstates2_float[1] = postfiltdata->HPstates2_float[0];
+    postfiltdata->HPstates2_float[0] = ftmp;
+    Out[k] = ftmp2;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/intialize.c b/modules/audio_coding/codecs/isac/main/source/intialize.c
new file mode 100644
index 0000000..57025c6
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/intialize.c
@@ -0,0 +1,171 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* encode.c  - Encoding function for the iSAC coder */
+
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+
+void WebRtcIsac_InitMasking(MaskFiltstr *maskdata) {
+
+  int k;
+
+  for (k = 0; k < WINLEN; k++) {
+    maskdata->DataBufferLo[k] = 0.0;
+    maskdata->DataBufferHi[k] = 0.0;
+  }
+  for (k = 0; k < ORDERLO+1; k++) {
+    maskdata->CorrBufLo[k] = 0.0;
+    maskdata->PreStateLoF[k] = 0.0;
+    maskdata->PreStateLoG[k] = 0.0;
+    maskdata->PostStateLoF[k] = 0.0;
+    maskdata->PostStateLoG[k] = 0.0;
+  }
+  for (k = 0; k < ORDERHI+1; k++) {
+    maskdata->CorrBufHi[k] = 0.0;
+    maskdata->PreStateHiF[k] = 0.0;
+    maskdata->PreStateHiG[k] = 0.0;
+    maskdata->PostStateHiF[k] = 0.0;
+    maskdata->PostStateHiG[k] = 0.0;
+  }
+
+  maskdata->OldEnergy = 10.0;
+  return;
+}
+
+void WebRtcIsac_InitPreFilterbank(PreFiltBankstr *prefiltdata)
+{
+  int k;
+
+  for (k = 0; k < QLOOKAHEAD; k++) {
+    prefiltdata->INLABUF1[k] = 0;
+    prefiltdata->INLABUF2[k] = 0;
+
+    prefiltdata->INLABUF1_float[k] = 0;
+    prefiltdata->INLABUF2_float[k] = 0;
+  }
+  for (k = 0; k < 2*(QORDER-1); k++) {
+    prefiltdata->INSTAT1[k] = 0;
+    prefiltdata->INSTAT2[k] = 0;
+    prefiltdata->INSTATLA1[k] = 0;
+    prefiltdata->INSTATLA2[k] = 0;
+
+    prefiltdata->INSTAT1_float[k] = 0;
+    prefiltdata->INSTAT2_float[k] = 0;
+    prefiltdata->INSTATLA1_float[k] = 0;
+    prefiltdata->INSTATLA2_float[k] = 0;
+  }
+
+  /* High pass filter states */
+  prefiltdata->HPstates[0] = 0.0;
+  prefiltdata->HPstates[1] = 0.0;
+
+  prefiltdata->HPstates_float[0] = 0.0f;
+  prefiltdata->HPstates_float[1] = 0.0f;
+
+  return;
+}
+
+void WebRtcIsac_InitPostFilterbank(PostFiltBankstr *postfiltdata)
+{
+  int k;
+
+  for (k = 0; k < 2*POSTQORDER; k++) {
+    postfiltdata->STATE_0_LOWER[k] = 0;
+    postfiltdata->STATE_0_UPPER[k] = 0;
+
+    postfiltdata->STATE_0_LOWER_float[k] = 0;
+    postfiltdata->STATE_0_UPPER_float[k] = 0;
+  }
+
+  /* High pass filter states */
+  postfiltdata->HPstates1[0] = 0.0;
+  postfiltdata->HPstates1[1] = 0.0;
+
+  postfiltdata->HPstates2[0] = 0.0;
+  postfiltdata->HPstates2[1] = 0.0;
+
+  postfiltdata->HPstates1_float[0] = 0.0f;
+  postfiltdata->HPstates1_float[1] = 0.0f;
+
+  postfiltdata->HPstates2_float[0] = 0.0f;
+  postfiltdata->HPstates2_float[1] = 0.0f;
+
+  return;
+}
+
+
+void WebRtcIsac_InitPitchFilter(PitchFiltstr *pitchfiltdata)
+{
+  int k;
+
+  for (k = 0; k < PITCH_BUFFSIZE; k++) {
+    pitchfiltdata->ubuf[k] = 0.0;
+  }
+  pitchfiltdata->ystate[0] = 0.0;
+  for (k = 1; k < (PITCH_DAMPORDER); k++) {
+    pitchfiltdata->ystate[k] = 0.0;
+  }
+  pitchfiltdata->oldlagp[0] = 50.0;
+  pitchfiltdata->oldgainp[0] = 0.0;
+}
+
+void WebRtcIsac_InitWeightingFilter(WeightFiltstr *wfdata)
+{
+  int k;
+  double t, dtmp, dtmp2, denum, denum2;
+
+  for (k=0;k<PITCH_WLPCBUFLEN;k++)
+    wfdata->buffer[k]=0.0;
+
+  for (k=0;k<PITCH_WLPCORDER;k++) {
+    wfdata->istate[k]=0.0;
+    wfdata->weostate[k]=0.0;
+    wfdata->whostate[k]=0.0;
+  }
+
+  /* next part should be in Matlab, writing to a global table */
+  t = 0.5;
+  denum = 1.0 / ((double) PITCH_WLPCWINLEN);
+  denum2 = denum * denum;
+  for (k=0;k<PITCH_WLPCWINLEN;k++) {
+    dtmp = PITCH_WLPCASYM * t * denum + (1-PITCH_WLPCASYM) * t * t * denum2;
+    dtmp *= 3.14159265;
+    dtmp2 = sin(dtmp);
+    wfdata->window[k] = dtmp2 * dtmp2;
+    t++;
+  }
+}
+
+/* clear all buffers */
+void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct *State)
+{
+  int k;
+
+  for (k = 0; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k++)
+    State->dec_buffer[k] = 0.0;
+  for (k = 0; k < 2*ALLPASSSECTIONS+1; k++)
+    State->decimator_state[k] = 0.0;
+  for (k = 0; k < 2; k++)
+    State->hp_state[k] = 0.0;
+  for (k = 0; k < QLOOKAHEAD; k++)
+    State->whitened_buf[k] = 0.0;
+  for (k = 0; k < QLOOKAHEAD; k++)
+    State->inbuf[k] = 0.0;
+
+  WebRtcIsac_InitPitchFilter(&(State->PFstr_wght));
+
+  WebRtcIsac_InitPitchFilter(&(State->PFstr));
+
+  WebRtcIsac_InitWeightingFilter(&(State->Wghtstr));
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/isac.c b/modules/audio_coding/codecs/isac/main/source/isac.c
new file mode 100644
index 0000000..79dc7e2
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -0,0 +1,2374 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * isac.c
+ *
+ * This C file contains the functions for the ISAC API
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/crc.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#define BIT_MASK_DEC_INIT 0x0001
+#define BIT_MASK_ENC_INIT 0x0002
+
+#define LEN_CHECK_SUM_WORD8     4
+#define MAX_NUM_LAYERS         10
+
+
+/****************************************************************************
+ * UpdatePayloadSizeLimit(...)
+ *
+ * Call this function to update the limit on the payload size. The limit on
+ * payload size might change i) if a user ''directly changes the limit by
+ * calling xxx_setMaxPayloadSize() or xxx_setMaxRate(), or ii) indirectly
+ * when bandwidth is changing. The latter might be the result of bandwidth
+ * adaptation, or direct change of the bottleneck in instantaneous mode.
+ *
+ * This function takes the current overall limit on payload, and translates it
+ * to the limits on lower and upper-band. If the codec is in wideband mode,
+ * then the overall limit and the limit on the lower-band is the same.
+ * Otherwise, a fraction of the limit should be allocated to lower-band
+ * leaving some room for the upper-band bit-stream. That is why an update
+ * of limit is required every time that the bandwidth is changing.
+ *
+ */
+static void UpdatePayloadSizeLimit(ISACMainStruct* instISAC) {
+  int16_t lim30MsPayloadBytes = WEBRTC_SPL_MIN(
+                          (instISAC->maxPayloadSizeBytes),
+                          (instISAC->maxRateBytesPer30Ms));
+  int16_t lim60MsPayloadBytes = WEBRTC_SPL_MIN(
+                          (instISAC->maxPayloadSizeBytes),
+                          (instISAC->maxRateBytesPer30Ms << 1));
+
+  /* The only time that iSAC will have 60 ms
+   * frame-size is when operating in wideband, so
+   * there is no upper-band bit-stream. */
+
+  if (instISAC->bandwidthKHz == isac8kHz) {
+    /* At 8 kHz there is no upper-band bit-stream,
+     * therefore, the lower-band limit is the overall limit. */
+    instISAC->instLB.ISACencLB_obj.payloadLimitBytes60 =
+      lim60MsPayloadBytes;
+    instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+      lim30MsPayloadBytes;
+  } else {
+    /* When in super-wideband, we only have 30 ms frames.
+     * Do a rate allocation for the given limit. */
+    if (lim30MsPayloadBytes > 250) {
+      /* 4/5 to lower-band the rest for upper-band. */
+      instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+        (lim30MsPayloadBytes << 2) / 5;
+    } else if (lim30MsPayloadBytes > 200) {
+      /* For the interval of 200 to 250 the share of
+       * upper-band linearly grows from 20 to 50. */
+      instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+        (lim30MsPayloadBytes << 1) / 5 + 100;
+    } else {
+      /* Allocate only 20 for upper-band. */
+      instISAC->instLB.ISACencLB_obj.payloadLimitBytes30 =
+        lim30MsPayloadBytes - 20;
+    }
+    instISAC->instUB.ISACencUB_obj.maxPayloadSizeBytes =
+      lim30MsPayloadBytes;
+  }
+}
+
+
+/****************************************************************************
+ * UpdateBottleneck(...)
+ *
+ * This function updates the bottleneck only if the codec is operating in
+ * channel-adaptive mode. Furthermore, as the update of bottleneck might
+ * result in an update of bandwidth, therefore, the bottlenech should be
+ * updated just right before the first 10ms of a frame is pushed into encoder.
+ *
+ */
+static void UpdateBottleneck(ISACMainStruct* instISAC) {
+  /* Read the bottleneck from bandwidth estimator for the
+   * first 10 ms audio. This way, if there is a change
+   * in bandwidth, upper and lower-band will be in sync. */
+  if ((instISAC->codingMode == 0) &&
+      (instISAC->instLB.ISACencLB_obj.buffer_index == 0) &&
+      (instISAC->instLB.ISACencLB_obj.frame_nb == 0)) {
+    int32_t bottleneck =
+        WebRtcIsac_GetUplinkBandwidth(&instISAC->bwestimator_obj);
+
+    /* Adding hysteresis when increasing signal bandwidth. */
+    if ((instISAC->bandwidthKHz == isac8kHz)
+        && (bottleneck > 37000)
+        && (bottleneck < 41000)) {
+      bottleneck = 37000;
+    }
+
+    /* Switching from 12 kHz to 16 kHz is not allowed at this revision.
+     * If we let this happen, we have to take care of buffer_index and
+     * the last LPC vector. */
+    if ((instISAC->bandwidthKHz != isac16kHz) &&
+        (bottleneck > 46000)) {
+      bottleneck = 46000;
+    }
+
+    /* We might need a rate allocation. */
+    if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+      /* Wideband is the only choice we have here. */
+      instISAC->instLB.ISACencLB_obj.bottleneck =
+        (bottleneck > 32000) ? 32000 : bottleneck;
+      instISAC->bandwidthKHz = isac8kHz;
+    } else {
+      /* Do the rate-allocation and get the new bandwidth. */
+      enum ISACBandwidth bandwidth;
+      WebRtcIsac_RateAllocation(bottleneck,
+                                &(instISAC->instLB.ISACencLB_obj.bottleneck),
+                                &(instISAC->instUB.ISACencUB_obj.bottleneck),
+                                &bandwidth);
+      if (bandwidth != isac8kHz) {
+        instISAC->instLB.ISACencLB_obj.new_framelength = 480;
+      }
+      if (bandwidth != instISAC->bandwidthKHz) {
+        /* Bandwidth is changing. */
+        instISAC->bandwidthKHz = bandwidth;
+        UpdatePayloadSizeLimit(instISAC);
+        if (bandwidth == isac12kHz) {
+          instISAC->instLB.ISACencLB_obj.buffer_index = 0;
+        }
+        /* Currently we don't let the bandwidth to switch to 16 kHz
+         * if in adaptive mode. If we let this happen, we have to take
+         * care of buffer_index and the last LPC vector. */
+      }
+    }
+  }
+}
+
+
+/****************************************************************************
+ * GetSendBandwidthInfo(...)
+ *
+ * This is called to get the bandwidth info. This info is the bandwidth and
+ * the jitter of 'there-to-here' channel, estimated 'here.' These info
+ * is signaled in an in-band fashion to the other side.
+ *
+ * The call to the bandwidth estimator triggers a recursive averaging which
+ * has to be synchronized between encoder & decoder, therefore, the call to
+ * BWE should be once per packet. As the BWE info is inserted into bit-stream
+ * We need a valid info right before the encodeLB function is going to
+ * generate a bit-stream. That is when lower-band buffer has already 20ms
+ * of audio, and the 3rd block of 10ms is going to be injected into encoder.
+ *
+ * Inputs:
+ *         - instISAC          : iSAC instance.
+ *
+ * Outputs:
+ *         - bandwidthIndex    : an index which has to be encoded in
+ *                               lower-band bit-stream, indicating the
+ *                               bandwidth of there-to-here channel.
+ *         - jitterInfo        : this indicates if the jitter is high
+ *                               or low and it is encoded in upper-band
+ *                               bit-stream.
+ *
+ */
+static void GetSendBandwidthInfo(ISACMainStruct* instISAC,
+                                 int16_t* bandwidthIndex,
+                                 int16_t* jitterInfo) {
+  if ((instISAC->instLB.ISACencLB_obj.buffer_index ==
+      (FRAMESAMPLES_10ms << 1)) &&
+      (instISAC->instLB.ISACencLB_obj.frame_nb == 0)) {
+    /* Bandwidth estimation and coding. */
+    WebRtcIsac_GetDownlinkBwJitIndexImpl(&(instISAC->bwestimator_obj),
+                                         bandwidthIndex, jitterInfo,
+                                         instISAC->decoderSamplingRateKHz);
+  }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_AssignSize(...)
+ *
+ * This function returns the size of the ISAC instance, so that the instance
+ * can be created out side iSAC.
+ *
+ * Output:
+ *        - sizeinbytes       : number of bytes needed to allocate for the
+ *                              instance.
+ *
+ * Return value               : 0 - Ok
+ *                             -1 - Error
+ */
+int16_t WebRtcIsac_AssignSize(int* sizeInBytes) {
+  *sizeInBytes = sizeof(ISACMainStruct) * 2 / sizeof(int16_t);
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Assign(...)
+ *
+ * This function assigns the memory already created to the ISAC instance.
+ *
+ * Input:
+ *        - ISAC_main_inst    : address of the pointer to the coder instance.
+ *        - instISAC_Addr     : the already allocated memory, where we put the
+ *                              iSAC structure.
+ *
+ * Return value               : 0 - Ok
+ *                             -1 - Error
+ */
+int16_t WebRtcIsac_Assign(ISACStruct** ISAC_main_inst,
+                          void* instISAC_Addr) {
+  if (instISAC_Addr != NULL) {
+    ISACMainStruct* instISAC = (ISACMainStruct*)instISAC_Addr;
+    instISAC->errorCode = 0;
+    instISAC->initFlag = 0;
+
+    /* Assign the address. */
+    *ISAC_main_inst = (ISACStruct*)instISAC_Addr;
+
+    /* Default is wideband. */
+    instISAC->encoderSamplingRateKHz = kIsacWideband;
+    instISAC->decoderSamplingRateKHz = kIsacWideband;
+    instISAC->bandwidthKHz           = isac8kHz;
+    instISAC->in_sample_rate_hz = 16000;
+
+    WebRtcIsac_InitTransform(&instISAC->transform_tables);
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Create(...)
+ *
+ * This function creates an ISAC instance, which will contain the state
+ * information for one coding/decoding channel.
+ *
+ * Input:
+ *        - ISAC_main_inst    : address of the pointer to the coder instance.
+ *
+ * Return value               : 0 - Ok
+ *                             -1 - Error
+ */
+int16_t WebRtcIsac_Create(ISACStruct** ISAC_main_inst) {
+  ISACMainStruct* instISAC;
+
+  if (ISAC_main_inst != NULL) {
+    instISAC = (ISACMainStruct*)malloc(sizeof(ISACMainStruct));
+    *ISAC_main_inst = (ISACStruct*)instISAC;
+    if (*ISAC_main_inst != NULL) {
+      instISAC->errorCode = 0;
+      instISAC->initFlag = 0;
+      /* Default is wideband. */
+      instISAC->bandwidthKHz = isac8kHz;
+      instISAC->encoderSamplingRateKHz = kIsacWideband;
+      instISAC->decoderSamplingRateKHz = kIsacWideband;
+      instISAC->in_sample_rate_hz = 16000;
+
+      WebRtcIsac_InitTransform(&instISAC->transform_tables);
+      return 0;
+    } else {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Free(...)
+ *
+ * This function frees the ISAC instance created at the beginning.
+ *
+ * Input:
+ *        - ISAC_main_inst    : a ISAC instance.
+ *
+ * Return value               : 0 - Ok
+ *                             -1 - Error
+ */
+int16_t WebRtcIsac_Free(ISACStruct* ISAC_main_inst) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  free(instISAC);
+  return 0;
+}
+
+
+/****************************************************************************
+ * EncoderInitLb(...) - internal function for initialization of
+ *                                Lower Band
+ * EncoderInitUb(...) - internal function for initialization of
+ *                                Upper Band
+ * WebRtcIsac_EncoderInit(...) - API function
+ *
+ * This function initializes a ISAC instance prior to the encoder calls.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - CodingMode        : 0 -> Bit rate and frame length are automatically
+ *                                 adjusted to available bandwidth on
+ *                                 transmission channel, applicable just to
+ *                                 wideband mode.
+ *                              1 -> User sets a frame length and a target bit
+ *                                 rate which is taken as the maximum
+ *                                 short-term average bit rate.
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+static int16_t EncoderInitLb(ISACLBStruct* instLB,
+                             int16_t codingMode,
+                             enum IsacSamplingRate sampRate) {
+  int16_t statusInit = 0;
+  int k;
+
+  /* Init stream vector to zero */
+  for (k = 0; k < STREAM_SIZE_MAX_60; k++) {
+    instLB->ISACencLB_obj.bitstr_obj.stream[k] = 0;
+  }
+
+  if ((codingMode == 1) || (sampRate == kIsacSuperWideband)) {
+    /* 30 ms frame-size if either in super-wideband or
+     * instantaneous mode (I-mode). */
+    instLB->ISACencLB_obj.new_framelength = 480;
+  } else {
+    instLB->ISACencLB_obj.new_framelength = INITIAL_FRAMESAMPLES;
+  }
+
+  WebRtcIsac_InitMasking(&instLB->ISACencLB_obj.maskfiltstr_obj);
+  WebRtcIsac_InitPreFilterbank(&instLB->ISACencLB_obj.prefiltbankstr_obj);
+  WebRtcIsac_InitPitchFilter(&instLB->ISACencLB_obj.pitchfiltstr_obj);
+  WebRtcIsac_InitPitchAnalysis(
+    &instLB->ISACencLB_obj.pitchanalysisstr_obj);
+
+  instLB->ISACencLB_obj.buffer_index = 0;
+  instLB->ISACencLB_obj.frame_nb = 0;
+  /* Default for I-mode. */
+  instLB->ISACencLB_obj.bottleneck = 32000;
+  instLB->ISACencLB_obj.current_framesamples = 0;
+  instLB->ISACencLB_obj.s2nr = 0;
+  instLB->ISACencLB_obj.payloadLimitBytes30 = STREAM_SIZE_MAX_30;
+  instLB->ISACencLB_obj.payloadLimitBytes60 = STREAM_SIZE_MAX_60;
+  instLB->ISACencLB_obj.maxPayloadBytes = STREAM_SIZE_MAX_60;
+  instLB->ISACencLB_obj.maxRateInBytes = STREAM_SIZE_MAX_30;
+  instLB->ISACencLB_obj.enforceFrameSize = 0;
+  /* Invalid value prevents getRedPayload to
+     run before encoder is called. */
+  instLB->ISACencLB_obj.lastBWIdx            = -1;
+  return statusInit;
+}
+
+static int16_t EncoderInitUb(ISACUBStruct* instUB,
+                             int16_t bandwidth) {
+  int16_t statusInit = 0;
+  int k;
+
+  /* Init stream vector to zero. */
+  for (k = 0; k < STREAM_SIZE_MAX_60; k++) {
+    instUB->ISACencUB_obj.bitstr_obj.stream[k] = 0;
+  }
+
+  WebRtcIsac_InitMasking(&instUB->ISACencUB_obj.maskfiltstr_obj);
+  WebRtcIsac_InitPreFilterbank(&instUB->ISACencUB_obj.prefiltbankstr_obj);
+
+  if (bandwidth == isac16kHz) {
+    instUB->ISACencUB_obj.buffer_index = LB_TOTAL_DELAY_SAMPLES;
+  } else {
+    instUB->ISACencUB_obj.buffer_index = 0;
+  }
+  /* Default for I-mode. */
+  instUB->ISACencUB_obj.bottleneck = 32000;
+  /* These store the limits for the wideband + super-wideband bit-stream. */
+  instUB->ISACencUB_obj.maxPayloadSizeBytes = STREAM_SIZE_MAX_30 << 1;
+  /* This has to be updated after each lower-band encoding to guarantee
+   * a correct payload-limitation. */
+  instUB->ISACencUB_obj.numBytesUsed = 0;
+  memset(instUB->ISACencUB_obj.data_buffer_float, 0,
+         (MAX_FRAMESAMPLES + LB_TOTAL_DELAY_SAMPLES) * sizeof(float));
+
+  memcpy(&(instUB->ISACencUB_obj.lastLPCVec),
+         WebRtcIsac_kMeanLarUb16, sizeof(double) * UB_LPC_ORDER);
+
+  return statusInit;
+}
+
+
+int16_t WebRtcIsac_EncoderInit(ISACStruct* ISAC_main_inst,
+                               int16_t codingMode) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  int16_t status;
+
+  if ((codingMode != 0) && (codingMode != 1)) {
+    instISAC->errorCode = ISAC_DISALLOWED_CODING_MODE;
+    return -1;
+  }
+  /* Default bottleneck. */
+  instISAC->bottleneck = MAX_ISAC_BW;
+
+  if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+    instISAC->bandwidthKHz = isac8kHz;
+    instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX_60;
+    instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX_30;
+  } else {
+    instISAC->bandwidthKHz = isac16kHz;
+    instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX;
+    instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX;
+  }
+
+  /* Channel-adaptive = 0; Instantaneous (Channel-independent) = 1. */
+  instISAC->codingMode = codingMode;
+
+  WebRtcIsac_InitBandwidthEstimator(&instISAC->bwestimator_obj,
+                                    instISAC->encoderSamplingRateKHz,
+                                    instISAC->decoderSamplingRateKHz);
+
+  WebRtcIsac_InitRateModel(&instISAC->rate_data_obj);
+  /* Default for I-mode. */
+  instISAC->MaxDelay = 10.0;
+
+  status = EncoderInitLb(&instISAC->instLB, codingMode,
+                         instISAC->encoderSamplingRateKHz);
+  if (status < 0) {
+    instISAC->errorCode = -status;
+    return -1;
+  }
+
+  if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+    /* Initialize encoder filter-bank. */
+    memset(instISAC->analysisFBState1, 0,
+           FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+    memset(instISAC->analysisFBState2, 0,
+           FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+
+    status = EncoderInitUb(&(instISAC->instUB),
+                           instISAC->bandwidthKHz);
+    if (status < 0) {
+      instISAC->errorCode = -status;
+      return -1;
+    }
+  }
+  /* Initialization is successful, set the flag. */
+  instISAC->initFlag |= BIT_MASK_ENC_INIT;
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_Encode(...)
+ *
+ * This function encodes 10ms frame(s) and inserts it into a package.
+ * Input speech length has to be 160 samples (10ms). The encoder buffers those
+ * 10ms frames until it reaches the chosen Framesize (480 or 960 samples
+ * corresponding to 30 or 60 ms frames), and then proceeds to the encoding.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - speechIn          : input speech vector.
+ *
+ * Output:
+ *        - encoded           : the encoded data vector
+ *
+ * Return value:
+ *                            : >0 - Length (in bytes) of coded data
+ *                            :  0 - The buffer didn't reach the chosen
+ *                                  frameSize so it keeps buffering speech
+ *                                 samples.
+ *                            : -1 - Error
+ */
+int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
+                      const int16_t* speechIn,
+                      uint8_t* encoded) {
+  float inFrame[FRAMESAMPLES_10ms];
+  int16_t speechInLB[FRAMESAMPLES_10ms];
+  int16_t speechInUB[FRAMESAMPLES_10ms];
+  int streamLenLB = 0;
+  int streamLenUB = 0;
+  int streamLen = 0;
+  size_t k = 0;
+  uint8_t garbageLen = 0;
+  int32_t bottleneck = 0;
+  int16_t bottleneckIdx = 0;
+  int16_t jitterInfo = 0;
+
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  ISACLBStruct* instLB = &(instISAC->instLB);
+  ISACUBStruct* instUB = &(instISAC->instUB);
+
+  /* Check if encoder initiated. */
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+
+  if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+    WebRtcSpl_AnalysisQMF(speechIn, SWBFRAMESAMPLES_10ms, speechInLB,
+                          speechInUB, instISAC->analysisFBState1,
+                          instISAC->analysisFBState2);
+
+    /* Convert from fixed to floating point. */
+    for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+      inFrame[k] = (float)speechInLB[k];
+    }
+  } else {
+    for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+      inFrame[k] = (float) speechIn[k];
+    }
+  }
+
+  /* Add some noise to avoid denormal numbers. */
+  inFrame[0] += (float)1.23455334e-3;
+  inFrame[1] -= (float)2.04324239e-3;
+  inFrame[2] += (float)1.90854954e-3;
+  inFrame[9] += (float)1.84854878e-3;
+
+  /* This function will update the bottleneck if required. */
+  UpdateBottleneck(instISAC);
+
+  /* Get the bandwith information which has to be sent to the other side. */
+  GetSendBandwidthInfo(instISAC, &bottleneckIdx, &jitterInfo);
+
+  /* Encode lower-band. */
+  streamLenLB = WebRtcIsac_EncodeLb(&instISAC->transform_tables,
+                                    inFrame, &instLB->ISACencLB_obj,
+                                    instISAC->codingMode, bottleneckIdx);
+  if (streamLenLB < 0) {
+    return -1;
+  }
+
+  if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+    instUB = &(instISAC->instUB);
+
+    /* Convert to float. */
+    for (k = 0; k < FRAMESAMPLES_10ms; k++) {
+      inFrame[k] = (float) speechInUB[k];
+    }
+
+    /* Add some noise to avoid denormal numbers. */
+    inFrame[0] += (float)1.23455334e-3;
+    inFrame[1] -= (float)2.04324239e-3;
+    inFrame[2] += (float)1.90854954e-3;
+    inFrame[9] += (float)1.84854878e-3;
+
+    /* Tell to upper-band the number of bytes used so far.
+     * This is for payload limitation. */
+    instUB->ISACencUB_obj.numBytesUsed =
+        (int16_t)(streamLenLB + 1 + LEN_CHECK_SUM_WORD8);
+    /* Encode upper-band. */
+    switch (instISAC->bandwidthKHz) {
+      case isac12kHz: {
+        streamLenUB = WebRtcIsac_EncodeUb12(&instISAC->transform_tables,
+                                            inFrame, &instUB->ISACencUB_obj,
+                                            jitterInfo);
+        break;
+      }
+      case isac16kHz: {
+        streamLenUB = WebRtcIsac_EncodeUb16(&instISAC->transform_tables,
+                                            inFrame, &instUB->ISACencUB_obj,
+                                            jitterInfo);
+        break;
+      }
+      case isac8kHz: {
+        streamLenUB = 0;
+        break;
+      }
+    }
+
+    if ((streamLenUB < 0) && (streamLenUB != -ISAC_PAYLOAD_LARGER_THAN_LIMIT)) {
+      /* An error has happened but this is not the error due to a
+       * bit-stream larger than the limit. */
+      return -1;
+    }
+
+    if (streamLenLB == 0) {
+      return 0;
+    }
+
+    /* One byte is allocated for the length. According to older decoders
+       so the length bit-stream plus one byte for size and
+       LEN_CHECK_SUM_WORD8 for the checksum should be less than or equal
+       to 255. */
+    if ((streamLenUB > (255 - (LEN_CHECK_SUM_WORD8 + 1))) ||
+        (streamLenUB == -ISAC_PAYLOAD_LARGER_THAN_LIMIT)) {
+      /* We have got a too long bit-stream we skip the upper-band
+       * bit-stream for this frame. */
+      streamLenUB = 0;
+    }
+
+    memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
+    streamLen = streamLenLB;
+    if (streamLenUB > 0) {
+      encoded[streamLenLB] = (uint8_t)(streamLenUB + 1 + LEN_CHECK_SUM_WORD8);
+      memcpy(&encoded[streamLenLB + 1],
+             instUB->ISACencUB_obj.bitstr_obj.stream,
+             streamLenUB);
+      streamLen += encoded[streamLenLB];
+    } else {
+      encoded[streamLenLB] = 0;
+    }
+  } else {
+    if (streamLenLB == 0) {
+      return 0;
+    }
+    memcpy(encoded, instLB->ISACencLB_obj.bitstr_obj.stream, streamLenLB);
+    streamLenUB = 0;
+    streamLen = streamLenLB;
+  }
+
+  /* Add Garbage if required. */
+  bottleneck = WebRtcIsac_GetUplinkBandwidth(&instISAC->bwestimator_obj);
+  if (instISAC->codingMode == 0) {
+    int minBytes;
+    int limit;
+    uint8_t* ptrGarbage;
+
+    instISAC->MaxDelay = (double)WebRtcIsac_GetUplinkMaxDelay(
+                           &instISAC->bwestimator_obj);
+
+    /* Update rate model and get minimum number of bytes in this packet. */
+    minBytes = WebRtcIsac_GetMinBytes(
+        &(instISAC->rate_data_obj), streamLen,
+        instISAC->instLB.ISACencLB_obj.current_framesamples, bottleneck,
+        instISAC->MaxDelay, instISAC->bandwidthKHz);
+
+    /* Make sure MinBytes does not exceed packet size limit. */
+    if (instISAC->bandwidthKHz == isac8kHz) {
+      if (instLB->ISACencLB_obj.current_framesamples == FRAMESAMPLES) {
+        limit = instLB->ISACencLB_obj.payloadLimitBytes30;
+      } else {
+        limit = instLB->ISACencLB_obj.payloadLimitBytes60;
+      }
+    } else {
+      limit = instUB->ISACencUB_obj.maxPayloadSizeBytes;
+    }
+    minBytes = (minBytes > limit) ? limit : minBytes;
+
+    /* Make sure we don't allow more than 255 bytes of garbage data.
+     * We store the length of the garbage data in 8 bits in the bitstream,
+     * 255 is the max garbage length we can signal using 8 bits. */
+    if ((instISAC->bandwidthKHz == isac8kHz) ||
+        (streamLenUB == 0)) {
+      ptrGarbage = &encoded[streamLenLB];
+      limit = streamLen + 255;
+    } else {
+      ptrGarbage = &encoded[streamLenLB + 1 + streamLenUB];
+      limit = streamLen + (255 - encoded[streamLenLB]);
+    }
+    minBytes = (minBytes > limit) ? limit : minBytes;
+
+    garbageLen = (minBytes > streamLen) ? (uint8_t)(minBytes - streamLen) : 0;
+
+    /* Save data for creation of multiple bit-streams. */
+    /* If bit-stream too short then add garbage at the end. */
+    if (garbageLen > 0) {
+      /* Overwrite the garbage area to avoid leaking possibly sensitive data
+         over the network. This also makes the output deterministic. */
+      memset(ptrGarbage, 0, garbageLen);
+
+      /* For a correct length of the upper-band bit-stream together
+       * with the garbage. Garbage is embeded in upper-band bit-stream.
+       * That is the only way to preserve backward compatibility. */
+      if ((instISAC->bandwidthKHz == isac8kHz) ||
+          (streamLenUB == 0)) {
+        encoded[streamLenLB] = garbageLen;
+      } else {
+        encoded[streamLenLB] += garbageLen;
+        /* Write the length of the garbage at the end of the upper-band
+         *  bit-stream, if exists. This helps for sanity check. */
+        encoded[streamLenLB + 1 + streamLenUB] = garbageLen;
+
+      }
+      streamLen += garbageLen;
+    }
+  } else {
+    /* update rate model */
+    WebRtcIsac_UpdateRateModel(
+        &instISAC->rate_data_obj, streamLen,
+        instISAC->instLB.ISACencLB_obj.current_framesamples, bottleneck);
+    garbageLen = 0;
+  }
+
+  /* Generate CRC if required. */
+  if ((instISAC->bandwidthKHz != isac8kHz) && (streamLenUB > 0)) {
+    uint32_t crc;
+
+    WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
+                      streamLenUB + garbageLen, &crc);
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+    for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+      encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] =
+          (uint8_t)(crc >> (24 - k * 8));
+    }
+#else
+    memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc, LEN_CHECK_SUM_WORD8);
+#endif
+  }
+  return streamLen;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_GetNewBitStream(...)
+ *
+ * This function returns encoded data, with the recieved bwe-index in the
+ * stream. If the rate is set to a value less than bottleneck of codec
+ * the new bistream will be re-encoded with the given target rate.
+ * It should always return a complete packet, i.e. only called once
+ * even for 60 msec frames.
+ *
+ * NOTE 1! This function does not write in the ISACStruct, it is not allowed.
+ * NOTE 2! Rates larger than the bottleneck of the codec will be limited
+ *         to the current bottleneck.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - bweIndex          : Index of bandwidth estimate to put in new
+ *                              bitstream
+ *        - rate              : target rate of the transcoder is bits/sec.
+ *                              Valid values are the accepted rate in iSAC,
+ *                              i.e. 10000 to 56000.
+ *
+ * Output:
+ *        - encoded           : The encoded data vector
+ *
+ * Return value               : >0 - Length (in bytes) of coded data
+ *                              -1 - Error  or called in SWB mode
+ *                                 NOTE! No error code is written to
+ *                                 the struct since it is only allowed to read
+ *                                 the struct.
+ */
+int16_t WebRtcIsac_GetNewBitStream(ISACStruct*  ISAC_main_inst,
+                                   int16_t  bweIndex,
+                                   int16_t  jitterInfo,
+                                   int32_t  rate,
+                                   uint8_t* encoded,
+                                   int16_t  isRCU) {
+  Bitstr iSACBitStreamInst;   /* Local struct for bitstream handling */
+  int16_t streamLenLB;
+  int16_t streamLenUB;
+  int16_t totalStreamLen;
+  double gain2;
+  double gain1;
+  float scale;
+  enum ISACBandwidth bandwidthKHz;
+  double rateLB;
+  double rateUB;
+  int32_t currentBN;
+  uint32_t crc;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  int16_t  k;
+#endif
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    return -1;
+  }
+
+  /* Get the bottleneck of this iSAC and limit the
+   * given rate to the current bottleneck. */
+  WebRtcIsac_GetUplinkBw(ISAC_main_inst, &currentBN);
+  if (rate > currentBN) {
+    rate = currentBN;
+  }
+
+  if (WebRtcIsac_RateAllocation(rate, &rateLB, &rateUB, &bandwidthKHz) < 0) {
+    return -1;
+  }
+
+  /* Cannot transcode from 16 kHz to 12 kHz. */
+  if ((bandwidthKHz == isac12kHz) &&
+      (instISAC->bandwidthKHz == isac16kHz)) {
+    return -1;
+  }
+
+  /* A gain [dB] for the given rate. */
+  gain1 = WebRtcIsac_GetSnr(
+      rateLB, instISAC->instLB.ISACencLB_obj.current_framesamples);
+  /* The gain [dB] of this iSAC. */
+  gain2 = WebRtcIsac_GetSnr(
+      instISAC->instLB.ISACencLB_obj.bottleneck,
+      instISAC->instLB.ISACencLB_obj.current_framesamples);
+
+  /* Scale is the ratio of two gains in normal domain. */
+  scale = (float)pow(10, (gain1 - gain2) / 20.0);
+  /* Change the scale if this is a RCU bit-stream. */
+  scale = (isRCU) ? (scale * RCU_TRANSCODING_SCALE) : scale;
+
+  streamLenLB = WebRtcIsac_EncodeStoredDataLb(
+                  &instISAC->instLB.ISACencLB_obj.SaveEnc_obj,
+                  &iSACBitStreamInst, bweIndex, scale);
+
+  if (streamLenLB < 0) {
+    return -1;
+  }
+
+  /* Convert from bytes to int16_t. */
+  memcpy(encoded, iSACBitStreamInst.stream, streamLenLB);
+
+  if (bandwidthKHz == isac8kHz) {
+    return streamLenLB;
+  }
+
+  totalStreamLen = streamLenLB;
+  /* super-wideband is always at 30ms.
+   * These gains are in dB.
+   * Gain for the given rate. */
+  gain1 = WebRtcIsac_GetSnr(rateUB, FRAMESAMPLES);
+  /* Gain of this iSAC */
+  gain2 = WebRtcIsac_GetSnr(instISAC->instUB.ISACencUB_obj.bottleneck,
+                            FRAMESAMPLES);
+
+  /* Scale is the ratio of two gains in normal domain. */
+  scale = (float)pow(10, (gain1 - gain2) / 20.0);
+
+  /* Change the scale if this is a RCU bit-stream. */
+  scale = (isRCU)? (scale * RCU_TRANSCODING_SCALE_UB) : scale;
+
+  streamLenUB = WebRtcIsac_EncodeStoredDataUb(
+                  &(instISAC->instUB.ISACencUB_obj.SaveEnc_obj),
+                  &iSACBitStreamInst, jitterInfo, scale,
+                  instISAC->bandwidthKHz);
+
+  if (streamLenUB < 0) {
+    return -1;
+  }
+
+  if (streamLenUB + 1 + LEN_CHECK_SUM_WORD8 > 255) {
+    return streamLenLB;
+  }
+
+  totalStreamLen = streamLenLB + streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+  encoded[streamLenLB] = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+
+  memcpy(&encoded[streamLenLB + 1], iSACBitStreamInst.stream,
+         streamLenUB);
+
+  WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
+                    streamLenUB, &crc);
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+    encoded[totalStreamLen - LEN_CHECK_SUM_WORD8 + k] =
+      (uint8_t)((crc >> (24 - k * 8)) & 0xFF);
+  }
+#else
+  memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc,
+         LEN_CHECK_SUM_WORD8);
+#endif
+  return totalStreamLen;
+}
+
+
+/****************************************************************************
+ * DecoderInitLb(...) - internal function for initialization of
+ *                                Lower Band
+ * DecoderInitUb(...) - internal function for initialization of
+ *                                Upper Band
+ * WebRtcIsac_DecoderInit(...) - API function
+ *
+ * This function initializes a ISAC instance prior to the decoder calls.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ */
+static void DecoderInitLb(ISACLBStruct* instISAC) {
+  int i;
+  /* Initialize stream vector to zero. */
+  for (i = 0; i < STREAM_SIZE_MAX_60; i++) {
+    instISAC->ISACdecLB_obj.bitstr_obj.stream[i] = 0;
+  }
+
+  WebRtcIsac_InitMasking(&instISAC->ISACdecLB_obj.maskfiltstr_obj);
+  WebRtcIsac_InitPostFilterbank(
+    &instISAC->ISACdecLB_obj.postfiltbankstr_obj);
+  WebRtcIsac_InitPitchFilter(&instISAC->ISACdecLB_obj.pitchfiltstr_obj);
+}
+
+static void DecoderInitUb(ISACUBStruct* instISAC) {
+  int i;
+  /* Init stream vector to zero */
+  for (i = 0; i < STREAM_SIZE_MAX_60; i++) {
+    instISAC->ISACdecUB_obj.bitstr_obj.stream[i] = 0;
+  }
+
+  WebRtcIsac_InitMasking(&instISAC->ISACdecUB_obj.maskfiltstr_obj);
+  WebRtcIsac_InitPostFilterbank(
+    &instISAC->ISACdecUB_obj.postfiltbankstr_obj);
+}
+
+void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  DecoderInitLb(&instISAC->instLB);
+  if (instISAC->decoderSamplingRateKHz == kIsacSuperWideband) {
+    memset(instISAC->synthesisFBState1, 0,
+           FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+    memset(instISAC->synthesisFBState2, 0,
+           FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+    DecoderInitUb(&(instISAC->instUB));
+  }
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) != BIT_MASK_ENC_INIT) {
+    WebRtcIsac_InitBandwidthEstimator(&instISAC->bwestimator_obj,
+                                      instISAC->encoderSamplingRateKHz,
+                                      instISAC->decoderSamplingRateKHz);
+  }
+  instISAC->initFlag |= BIT_MASK_DEC_INIT;
+  instISAC->resetFlag_8kHz = 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_UpdateBwEstimate(...)
+ *
+ * This function updates the estimate of the bandwidth.
+ *
+ * NOTE:
+ * The estimates of bandwidth is not valid if the sample rate of the far-end
+ * encoder is set to 48 kHz and send timestamps are increamented according to
+ * 48 kHz sampling rate.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - encoded           : encoded ISAC frame(s).
+ *        - packet_size       : size of the packet.
+ *        - rtp_seq_number    : the RTP number of the packet.
+ *        - arr_ts            : the arrival time of the packet (from NetEq)
+ *                              in samples.
+ *
+ * Return value               :  0 - Ok
+ *                              -1 - Error
+ */
+int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
+                                    const uint8_t* encoded,
+                                    size_t packet_size,
+                                    uint16_t rtp_seq_number,
+                                    uint32_t send_ts,
+                                    uint32_t arr_ts) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  Bitstr streamdata;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  int k;
+#endif
+  int16_t err;
+
+  /* Check if decoder initiated. */
+  if ((instISAC->initFlag & BIT_MASK_DEC_INIT) != BIT_MASK_DEC_INIT) {
+    instISAC->errorCode = ISAC_DECODER_NOT_INITIATED;
+    return -1;
+  }
+
+  /* Check that the size of the packet is valid, and if not return without
+   * updating the bandwidth estimate. A valid size is at least 10 bytes. */
+  if (packet_size < 10) {
+    /* Return error code if the packet length is null. */
+    instISAC->errorCode = ISAC_EMPTY_PACKET;
+    return -1;
+  }
+
+  WebRtcIsac_ResetBitstream(&(streamdata));
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  for (k = 0; k < 10; k++) {
+    uint16_t ek = ((const uint16_t*)encoded)[k >> 1];
+    streamdata.stream[k] = (uint8_t)((ek >> ((k & 1) << 3)) & 0xff);
+  }
+#else
+  memcpy(streamdata.stream, encoded, 10);
+#endif
+
+  err = WebRtcIsac_EstimateBandwidth(&instISAC->bwestimator_obj, &streamdata,
+                                     packet_size, rtp_seq_number, send_ts,
+                                     arr_ts, instISAC->encoderSamplingRateKHz,
+                                     instISAC->decoderSamplingRateKHz);
+  if (err < 0) {
+    /* Return error code if something went wrong. */
+    instISAC->errorCode = -err;
+    return -1;
+  }
+  return 0;
+}
+
+static int Decode(ISACStruct* ISAC_main_inst,
+                  const uint8_t* encoded,
+                  size_t lenEncodedBytes,
+                  int16_t* decoded,
+                  int16_t* speechType,
+                  int16_t isRCUPayload) {
+  /* Number of samples (480 or 960), output from decoder
+     that were actually used in the encoder/decoder
+     (determined on the fly). */
+  int16_t numSamplesLB;
+  int16_t numSamplesUB;
+  int16_t speechIdx;
+  float outFrame[MAX_FRAMESAMPLES];
+  int16_t outFrameLB[MAX_FRAMESAMPLES];
+  int16_t outFrameUB[MAX_FRAMESAMPLES];
+  int numDecodedBytesLBint;
+  size_t numDecodedBytesLB;
+  int numDecodedBytesUB;
+  size_t lenEncodedLBBytes;
+  int16_t validChecksum = 1;
+  int16_t k;
+  uint16_t numLayer;
+  size_t totSizeBytes;
+  int16_t err;
+
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  ISACUBDecStruct* decInstUB = &(instISAC->instUB.ISACdecUB_obj);
+  ISACLBDecStruct* decInstLB = &(instISAC->instLB.ISACdecLB_obj);
+
+  /* Check if decoder initiated. */
+  if ((instISAC->initFlag & BIT_MASK_DEC_INIT) !=
+      BIT_MASK_DEC_INIT) {
+    instISAC->errorCode = ISAC_DECODER_NOT_INITIATED;
+    return -1;
+  }
+
+  if (lenEncodedBytes == 0) {
+    /* return error code if the packet length is null. */
+    instISAC->errorCode = ISAC_EMPTY_PACKET;
+    return -1;
+  }
+
+  /* The size of the encoded lower-band is bounded by
+   * STREAM_SIZE_MAX. If a payload with the size larger than STREAM_SIZE_MAX
+   * is received, it is not considered erroneous. */
+  lenEncodedLBBytes = (lenEncodedBytes > STREAM_SIZE_MAX) ?
+      STREAM_SIZE_MAX : lenEncodedBytes;
+
+  /* Copy to lower-band bit-stream structure. */
+  memcpy(instISAC->instLB.ISACdecLB_obj.bitstr_obj.stream, encoded,
+         lenEncodedLBBytes);
+
+  /* We need to initialize numSamplesLB to something; otherwise, in the test
+     for whether we should return -1 below, the compiler might generate code
+     that fools Memcheck (Valgrind) into thinking that the control flow depends
+     on the uninitialized value in numSamplesLB (since WebRtcIsac_DecodeLb will
+     not fill it in if it fails and returns -1). */
+  numSamplesLB = 0;
+
+  /* Regardless of that the current codec is setup to work in
+   * wideband or super-wideband, the decoding of the lower-band
+   * has to be performed. */
+  numDecodedBytesLBint = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
+                                             outFrame, decInstLB,
+                                             &numSamplesLB, isRCUPayload);
+  numDecodedBytesLB = (size_t)numDecodedBytesLBint;
+  if ((numDecodedBytesLBint < 0) ||
+      (numDecodedBytesLB > lenEncodedLBBytes) ||
+      (numSamplesLB > MAX_FRAMESAMPLES)) {
+    instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+    return -1;
+  }
+
+  /* Error Check, we accept multi-layer bit-stream This will limit number
+   * of iterations of the while loop. Even without this the number
+   * of iterations is limited. */
+  numLayer = 1;
+  totSizeBytes = numDecodedBytesLB;
+  while (totSizeBytes != lenEncodedBytes) {
+    if ((totSizeBytes > lenEncodedBytes) ||
+        (encoded[totSizeBytes] == 0) ||
+        (numLayer > MAX_NUM_LAYERS)) {
+      instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+      return -1;
+    }
+    totSizeBytes += encoded[totSizeBytes];
+    numLayer++;
+  }
+
+  if (instISAC->decoderSamplingRateKHz == kIsacWideband) {
+    for (k = 0; k < numSamplesLB; k++) {
+      if (outFrame[k] > 32767) {
+        decoded[k] = 32767;
+      } else if (outFrame[k] < -32768) {
+        decoded[k] = -32768;
+      } else {
+        decoded[k] = (int16_t)WebRtcIsac_lrint(outFrame[k]);
+      }
+    }
+    numSamplesUB = 0;
+  } else {
+    uint32_t crc;
+    /* We don't accept larger than 30ms (480 samples at lower-band)
+     * frame-size. */
+    for (k = 0; k < numSamplesLB; k++) {
+      if (outFrame[k] > 32767) {
+        outFrameLB[k] = 32767;
+      } else if (outFrame[k] < -32768) {
+        outFrameLB[k] = -32768;
+      } else {
+        outFrameLB[k] = (int16_t)WebRtcIsac_lrint(outFrame[k]);
+      }
+    }
+
+    /* Check for possible error, and if upper-band stream exists. */
+    if (numDecodedBytesLB == lenEncodedBytes) {
+      /* Decoding was successful. No super-wideband bit-stream exists. */
+      numSamplesUB = numSamplesLB;
+      memset(outFrameUB, 0, sizeof(int16_t) *  numSamplesUB);
+
+      /* Prepare for the potential increase of signal bandwidth. */
+      instISAC->resetFlag_8kHz = 2;
+    } else {
+      /* This includes the checksum and the bytes that stores the length. */
+      int16_t lenNextStream = encoded[numDecodedBytesLB];
+
+      /* Is this garbage or valid super-wideband bit-stream?
+       * Check if checksum is valid. */
+      if (lenNextStream <= (LEN_CHECK_SUM_WORD8 + 1)) {
+        /* Such a small second layer cannot be super-wideband layer.
+         * It must be a short garbage. */
+        validChecksum = 0;
+      } else {
+        /* Run CRC to see if the checksum match. */
+        WebRtcIsac_GetCrc((int16_t*)(&encoded[numDecodedBytesLB + 1]),
+                          lenNextStream - LEN_CHECK_SUM_WORD8 - 1, &crc);
+
+        validChecksum = 1;
+        for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+          validChecksum &= (((crc >> (24 - k * 8)) & 0xFF) ==
+                            encoded[numDecodedBytesLB + lenNextStream -
+                                          LEN_CHECK_SUM_WORD8 + k]);
+        }
+      }
+
+      if (!validChecksum) {
+        /* This is a garbage, we have received a wideband
+         * bit-stream with garbage. */
+        numSamplesUB = numSamplesLB;
+        memset(outFrameUB, 0, sizeof(int16_t) * numSamplesUB);
+      } else {
+        /* A valid super-wideband biststream exists. */
+        enum ISACBandwidth bandwidthKHz;
+        int32_t maxDelayBit;
+
+        /* If we have super-wideband bit-stream, we cannot
+         * have 60 ms frame-size. */
+        if (numSamplesLB > FRAMESAMPLES) {
+          instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+          return -1;
+        }
+
+        /* The rest of the bit-stream contains the upper-band
+         * bit-stream curently this is the only thing there,
+         * however, we might add more layers. */
+
+        /* Have to exclude one byte where the length is stored
+         * and last 'LEN_CHECK_SUM_WORD8' bytes where the
+         * checksum is stored. */
+        lenNextStream -= (LEN_CHECK_SUM_WORD8 + 1);
+
+        memcpy(decInstUB->bitstr_obj.stream,
+               &encoded[numDecodedBytesLB + 1], lenNextStream);
+
+        /* Reset bit-stream object, this is the first decoding. */
+        WebRtcIsac_ResetBitstream(&(decInstUB->bitstr_obj));
+
+        /* Decode jitter information. */
+        err = WebRtcIsac_DecodeJitterInfo(&decInstUB->bitstr_obj, &maxDelayBit);
+        if (err < 0) {
+          instISAC->errorCode = -err;
+          return -1;
+        }
+
+        /* Update jitter info which is in the upper-band bit-stream
+         * only if the encoder is in super-wideband. Otherwise,
+         * the jitter info is already embedded in bandwidth index
+         * and has been updated. */
+        if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+          err = WebRtcIsac_UpdateUplinkJitter(
+                  &(instISAC->bwestimator_obj), maxDelayBit);
+          if (err < 0) {
+            instISAC->errorCode = -err;
+            return -1;
+          }
+        }
+
+        /* Decode bandwidth information. */
+        err = WebRtcIsac_DecodeBandwidth(&decInstUB->bitstr_obj,
+                                         &bandwidthKHz);
+        if (err < 0) {
+          instISAC->errorCode = -err;
+          return -1;
+        }
+
+        switch (bandwidthKHz) {
+          case isac12kHz: {
+            numDecodedBytesUB = WebRtcIsac_DecodeUb12(
+                &instISAC->transform_tables, outFrame, decInstUB, isRCUPayload);
+
+            /* Hang-over for transient alleviation -
+             * wait two frames to add the upper band going up from 8 kHz. */
+            if (instISAC->resetFlag_8kHz > 0) {
+              if (instISAC->resetFlag_8kHz == 2) {
+                /* Silence first and a half frame. */
+                memset(outFrame, 0, MAX_FRAMESAMPLES *
+                       sizeof(float));
+              } else {
+                const float rampStep = 2.0f / MAX_FRAMESAMPLES;
+                float rampVal = 0;
+                memset(outFrame, 0, (MAX_FRAMESAMPLES >> 1) *
+                       sizeof(float));
+
+                /* Ramp up second half of second frame. */
+                for (k = MAX_FRAMESAMPLES / 2; k < MAX_FRAMESAMPLES; k++) {
+                  outFrame[k] *= rampVal;
+                  rampVal += rampStep;
+                }
+              }
+              instISAC->resetFlag_8kHz -= 1;
+            }
+
+            break;
+          }
+          case isac16kHz: {
+            numDecodedBytesUB = WebRtcIsac_DecodeUb16(
+                &instISAC->transform_tables, outFrame, decInstUB, isRCUPayload);
+            break;
+          }
+          default:
+            return -1;
+        }
+
+        if (numDecodedBytesUB < 0) {
+          instISAC->errorCode = numDecodedBytesUB;
+          return -1;
+        }
+        if (numDecodedBytesLB + numDecodedBytesUB > lenEncodedBytes) {
+          // We have supposedly decoded more bytes than we were given. Likely
+          // caused by bad input data.
+          instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+          return -1;
+        }
+
+        /* It might be less due to garbage. */
+        if ((numDecodedBytesUB != lenNextStream) &&
+            (numDecodedBytesUB != (lenNextStream -
+                encoded[numDecodedBytesLB + 1 + numDecodedBytesUB]))) {
+          instISAC->errorCode = ISAC_LENGTH_MISMATCH;
+          return -1;
+        }
+
+        /* If there is no error Upper-band always decodes
+         * 30 ms (480 samples). */
+        numSamplesUB = FRAMESAMPLES;
+
+        /* Convert to W16. */
+        for (k = 0; k < numSamplesUB; k++) {
+          if (outFrame[k] > 32767) {
+            outFrameUB[k] = 32767;
+          } else if (outFrame[k] < -32768) {
+            outFrameUB[k] = -32768;
+          } else {
+            outFrameUB[k] = (int16_t)WebRtcIsac_lrint(
+                              outFrame[k]);
+          }
+        }
+      }
+    }
+
+    speechIdx = 0;
+    while (speechIdx < numSamplesLB) {
+      WebRtcSpl_SynthesisQMF(&outFrameLB[speechIdx], &outFrameUB[speechIdx],
+                             FRAMESAMPLES_10ms, &decoded[(speechIdx << 1)],
+                             instISAC->synthesisFBState1,
+                             instISAC->synthesisFBState2);
+
+      speechIdx += FRAMESAMPLES_10ms;
+    }
+  }
+  *speechType = 0;
+  return (numSamplesLB + numSamplesUB);
+}
+
+
+
+
+
+
+
+/****************************************************************************
+ * WebRtcIsac_Decode(...)
+ *
+ * This function decodes a ISAC frame. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the  frameSize (30 or 60 ms).
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - encoded           : encoded ISAC frame(s)
+ *        - len               : bytes in encoded vector
+ *
+ * Output:
+ *        - decoded           : The decoded vector
+ *
+ * Return value               : >0 - number of samples in decoded vector
+ *                              -1 - Error
+ */
+
+int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
+                      const uint8_t* encoded,
+                      size_t lenEncodedBytes,
+                      int16_t* decoded,
+                      int16_t* speechType) {
+  int16_t isRCUPayload = 0;
+  return Decode(ISAC_main_inst, encoded, lenEncodedBytes, decoded,
+                speechType, isRCUPayload);
+}
+
+/****************************************************************************
+ * WebRtcIsac_DecodeRcu(...)
+ *
+ * This function decodes a redundant (RCU) iSAC frame. Function is called in
+ * NetEq with a stored RCU payload in case of packet loss. Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the framesize (30 or 60 ms).
+ *
+ * Input:
+ *      - ISAC_main_inst     : ISAC instance.
+ *      - encoded            : encoded ISAC RCU frame(s)
+ *      - len                : bytes in encoded vector
+ *
+ * Output:
+ *      - decoded            : The decoded vector
+ *
+ * Return value              : >0 - number of samples in decoded vector
+ *                             -1 - Error
+ */
+
+
+
+int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
+                         const uint8_t* encoded,
+                         size_t lenEncodedBytes,
+                         int16_t* decoded,
+                         int16_t* speechType) {
+  int16_t isRCUPayload = 1;
+  return Decode(ISAC_main_inst, encoded, lenEncodedBytes, decoded,
+                speechType, isRCUPayload);
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_DecodePlc(...)
+ *
+ * This function conducts PLC for ISAC frame(s). Output speech length
+ * will be a multiple of 480 samples: 480 or 960 samples,
+ * depending on the  frameSize (30 or 60 ms).
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - noOfLostFrames    : Number of PLC frames to produce
+ *
+ * Output:
+ *        - decoded           : The decoded vector
+ *
+ * Return value               : Number of samples in decoded PLC vector
+ */
+size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
+                            int16_t* decoded,
+                            size_t noOfLostFrames) {
+  size_t numSamples = 0;
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  /* Limit number of frames to two = 60 millisecond.
+   * Otherwise we exceed data vectors. */
+  if (noOfLostFrames > 2) {
+    noOfLostFrames = 2;
+  }
+
+  /* Get the number of samples per frame */
+  switch (instISAC->decoderSamplingRateKHz) {
+    case kIsacWideband: {
+      numSamples = 480 * noOfLostFrames;
+      break;
+    }
+    case kIsacSuperWideband: {
+      numSamples = 960 * noOfLostFrames;
+      break;
+    }
+  }
+
+  /* Set output samples to zero. */
+  memset(decoded, 0, numSamples * sizeof(int16_t));
+  return numSamples;
+}
+
+
+/****************************************************************************
+ * ControlLb(...) - Internal function for controlling Lower Band
+ * ControlUb(...) - Internal function for controlling Upper Band
+ * WebRtcIsac_Control(...) - API function
+ *
+ * This function sets the limit on the short-term average bit rate and the
+ * frame length. Should be used only in Instantaneous mode.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - rate              : limit on the short-term average bit rate,
+ *                              in bits/second (between 10000 and 32000)
+ *        - frameSize         : number of milliseconds per frame (30 or 60)
+ *
+ * Return value               : 0 - ok
+ *                             -1 - Error
+ */
+static int16_t ControlLb(ISACLBStruct* instISAC, double rate,
+                         int16_t frameSize) {
+  if ((rate >= 10000) && (rate <= 32000)) {
+    instISAC->ISACencLB_obj.bottleneck = rate;
+  } else {
+    return -ISAC_DISALLOWED_BOTTLENECK;
+  }
+
+  if ((frameSize == 30) || (frameSize == 60)) {
+    instISAC->ISACencLB_obj.new_framelength = (FS / 1000) *  frameSize;
+  } else {
+    return -ISAC_DISALLOWED_FRAME_LENGTH;
+  }
+
+  return 0;
+}
+
+static int16_t ControlUb(ISACUBStruct* instISAC, double rate) {
+  if ((rate >= 10000) && (rate <= 32000)) {
+    instISAC->ISACencUB_obj.bottleneck = rate;
+  } else {
+    return -ISAC_DISALLOWED_BOTTLENECK;
+  }
+  return 0;
+}
+
+int16_t WebRtcIsac_Control(ISACStruct* ISAC_main_inst,
+                           int32_t bottleneckBPS,
+                           int frameSize) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  int16_t status;
+  double rateLB;
+  double rateUB;
+  enum ISACBandwidth bandwidthKHz;
+
+  if (instISAC->codingMode == 0) {
+    /* In adaptive mode. */
+    instISAC->errorCode = ISAC_MODE_MISMATCH;
+    return -1;
+  }
+
+  /* Check if encoder initiated */
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+
+  if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+    /* If the sampling rate is 16kHz then bandwith should be 8kHz,
+     * regardless of bottleneck. */
+    bandwidthKHz = isac8kHz;
+    rateLB = (bottleneckBPS > 32000) ? 32000 : bottleneckBPS;
+    rateUB = 0;
+  } else {
+    if (WebRtcIsac_RateAllocation(bottleneckBPS, &rateLB, &rateUB,
+                                  &bandwidthKHz) < 0) {
+      return -1;
+    }
+  }
+
+  if ((instISAC->encoderSamplingRateKHz == kIsacSuperWideband) &&
+      (frameSize != 30) &&
+      (bandwidthKHz != isac8kHz)) {
+    /* Cannot have 60 ms in super-wideband. */
+    instISAC->errorCode = ISAC_DISALLOWED_FRAME_LENGTH;
+    return -1;
+  }
+
+  status = ControlLb(&instISAC->instLB, rateLB, (int16_t)frameSize);
+  if (status < 0) {
+    instISAC->errorCode = -status;
+    return -1;
+  }
+  if (bandwidthKHz != isac8kHz) {
+    status = ControlUb(&(instISAC->instUB), rateUB);
+    if (status < 0) {
+      instISAC->errorCode = -status;
+      return -1;
+    }
+  }
+
+
+  /* Check if bandwidth is changing from wideband to super-wideband
+   * then we have to synch data buffer of lower & upper-band. Also
+   * clean up the upper-band data buffer. */
+
+  if ((instISAC->bandwidthKHz == isac8kHz) && (bandwidthKHz != isac8kHz)) {
+    memset(instISAC->instUB.ISACencUB_obj.data_buffer_float, 0,
+           sizeof(float) * (MAX_FRAMESAMPLES + LB_TOTAL_DELAY_SAMPLES));
+
+    if (bandwidthKHz == isac12kHz) {
+      instISAC->instUB.ISACencUB_obj.buffer_index =
+        instISAC->instLB.ISACencLB_obj.buffer_index;
+    } else {
+      instISAC->instUB.ISACencUB_obj.buffer_index =
+          LB_TOTAL_DELAY_SAMPLES + instISAC->instLB.ISACencLB_obj.buffer_index;
+
+      memcpy(&(instISAC->instUB.ISACencUB_obj.lastLPCVec),
+             WebRtcIsac_kMeanLarUb16, sizeof(double) * UB_LPC_ORDER);
+    }
+  }
+
+  /* Update the payload limit if the bandwidth is changing. */
+  if (instISAC->bandwidthKHz != bandwidthKHz) {
+    instISAC->bandwidthKHz = bandwidthKHz;
+    UpdatePayloadSizeLimit(instISAC);
+  }
+  instISAC->bottleneck = bottleneckBPS;
+  return 0;
+}
+
+void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
+                                        int bottleneck_bits_per_second) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  RTC_DCHECK_GE(bottleneck_bits_per_second, 10000);
+  RTC_DCHECK_LE(bottleneck_bits_per_second, 32000);
+  instISAC->bwestimator_obj.send_bw_avg = (float)bottleneck_bits_per_second;
+}
+
+/****************************************************************************
+ * WebRtcIsac_ControlBwe(...)
+ *
+ * This function sets the initial values of bottleneck and frame-size if
+ * iSAC is used in channel-adaptive mode. Through this API, users can
+ * enforce a frame-size for all values of bottleneck. Then iSAC will not
+ * automatically change the frame-size.
+ *
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance.
+ *        - rateBPS           : initial value of bottleneck in bits/second
+ *                              10000 <= rateBPS <= 32000 is accepted
+ *                              For default bottleneck set rateBPS = 0
+ *        - frameSizeMs       : number of milliseconds per frame (30 or 60)
+ *        - enforceFrameSize  : 1 to enforce the given frame-size through out
+ *                              the adaptation process, 0 to let iSAC change
+ *                              the frame-size if required.
+ *
+ * Return value               : 0 - ok
+ *                             -1 - Error
+ */
+int16_t WebRtcIsac_ControlBwe(ISACStruct* ISAC_main_inst,
+                              int32_t bottleneckBPS,
+                              int frameSizeMs,
+                              int16_t enforceFrameSize) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  enum ISACBandwidth bandwidth;
+
+   /* Check if encoder initiated */
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+
+  /* Check that we are in channel-adaptive mode, otherwise, return (-1) */
+  if (instISAC->codingMode != 0) {
+    instISAC->errorCode = ISAC_MODE_MISMATCH;
+    return -1;
+  }
+  if ((frameSizeMs != 30) &&
+      (instISAC->encoderSamplingRateKHz == kIsacSuperWideband)) {
+    return -1;
+  }
+
+  /* Set structure variable if enforceFrameSize is set. ISAC will then
+   * keep the chosen frame size. */
+  if (enforceFrameSize != 0) {
+    instISAC->instLB.ISACencLB_obj.enforceFrameSize = 1;
+  } else {
+    instISAC->instLB.ISACencLB_obj.enforceFrameSize = 0;
+  }
+
+  /* Set the initial rate. If the input value is zero then the default intial
+   * rate is used. Otehrwise, values between 10 to 32 kbps are accepted. */
+  if (bottleneckBPS != 0) {
+    double rateLB;
+    double rateUB;
+    if (WebRtcIsac_RateAllocation(bottleneckBPS, &rateLB, &rateUB,
+                                  &bandwidth) < 0) {
+      return -1;
+    }
+    instISAC->bwestimator_obj.send_bw_avg = (float)bottleneckBPS;
+    instISAC->bandwidthKHz = bandwidth;
+  }
+
+  /* Set the initial frame-size. If 'enforceFrameSize' is set, the frame-size
+   *  will not change */
+  if (frameSizeMs != 0) {
+    if ((frameSizeMs  == 30) || (frameSizeMs == 60)) {
+      instISAC->instLB.ISACencLB_obj.new_framelength =
+          (int16_t)((FS / 1000) * frameSizeMs);
+    } else {
+      instISAC->errorCode = ISAC_DISALLOWED_FRAME_LENGTH;
+      return -1;
+    }
+  }
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetDownLinkBwIndex(...)
+ *
+ * This function returns index representing the Bandwidth estimate from
+ * the other side to this side.
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC structure
+ *
+ * Output:
+ *        - bweIndex         : Bandwidth estimate to transmit to other side.
+ *
+ */
+int16_t WebRtcIsac_GetDownLinkBwIndex(ISACStruct* ISAC_main_inst,
+                                      int16_t* bweIndex,
+                                      int16_t* jitterInfo) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  /* Check if encoder initialized. */
+  if ((instISAC->initFlag & BIT_MASK_DEC_INIT) !=
+      BIT_MASK_DEC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+
+  /* Call function to get Bandwidth Estimate. */
+  WebRtcIsac_GetDownlinkBwJitIndexImpl(&(instISAC->bwestimator_obj), bweIndex,
+                                       jitterInfo,
+                                       instISAC->decoderSamplingRateKHz);
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_UpdateUplinkBw(...)
+ *
+ * This function takes an index representing the Bandwidth estimate from
+ * this side to other side and updates BWE.
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC structure
+ *        - rateIndex         : Bandwidth estimate from other side.
+ *
+ * Return value               : 0 - ok
+ *                             -1 - index out of range
+ */
+int16_t WebRtcIsac_UpdateUplinkBw(ISACStruct* ISAC_main_inst,
+                                  int16_t bweIndex) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  int16_t returnVal;
+
+  /* Check if encoder initiated. */
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+
+  /* Call function to get Bandwidth Estimate. */
+  returnVal = WebRtcIsac_UpdateUplinkBwImpl(
+                &(instISAC->bwestimator_obj), bweIndex,
+                instISAC->encoderSamplingRateKHz);
+
+  if (returnVal < 0) {
+    instISAC->errorCode = -returnVal;
+    return -1;
+  } else {
+    return 0;
+  }
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_ReadBwIndex(...)
+ *
+ * This function returns the index of the Bandwidth estimate from the
+ * bit-stream.
+ *
+ * Input:
+ *        - encoded           : Encoded bit-stream
+ *
+ * Output:
+ *        - frameLength       : Length of frame in packet (in samples)
+ *        - bweIndex          : Bandwidth estimate in bit-stream
+ *
+ */
+int16_t WebRtcIsac_ReadBwIndex(const uint8_t* encoded,
+                               int16_t* bweIndex) {
+  Bitstr streamdata;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  int k;
+#endif
+  int16_t err;
+
+  WebRtcIsac_ResetBitstream(&(streamdata));
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  for (k = 0; k < 10; k++) {
+    int16_t ek2 = ((const int16_t*)encoded)[k >> 1];
+    streamdata.stream[k] = (uint8_t)((ek2 >> ((k & 1) << 3)) & 0xff);
+  }
+#else
+  memcpy(streamdata.stream, encoded, 10);
+#endif
+
+  /* Decode frame length. */
+  err = WebRtcIsac_DecodeFrameLen(&streamdata, bweIndex);
+  if (err < 0) {
+    return err;
+  }
+
+  /* Decode BW estimation. */
+  err = WebRtcIsac_DecodeSendBW(&streamdata, bweIndex);
+  if (err < 0) {
+    return err;
+  }
+
+  return 0;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_ReadFrameLen(...)
+ *
+ * This function returns the number of samples the decoder will generate if
+ * the given payload is decoded.
+ *
+ * Input:
+ *        - encoded           : Encoded bitstream
+ *
+ * Output:
+ *        - frameLength       : Length of frame in packet (in samples)
+ *
+ */
+int16_t WebRtcIsac_ReadFrameLen(ISACStruct* ISAC_main_inst,
+                                const uint8_t* encoded,
+                                int16_t* frameLength) {
+  Bitstr streamdata;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  int k;
+#endif
+  int16_t err;
+  ISACMainStruct* instISAC;
+
+  WebRtcIsac_ResetBitstream(&(streamdata));
+
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  for (k = 0; k < 10; k++) {
+    int16_t ek2 = ((const int16_t*)encoded)[k >> 1];
+    streamdata.stream[k] = (uint8_t)((ek2 >> ((k & 1) << 3)) & 0xff);
+  }
+#else
+  memcpy(streamdata.stream, encoded, 10);
+#endif
+
+  /* Decode frame length. */
+  err = WebRtcIsac_DecodeFrameLen(&streamdata, frameLength);
+  if (err < 0) {
+    return -1;
+  }
+  instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  if (instISAC->decoderSamplingRateKHz == kIsacSuperWideband) {
+    /* The decoded frame length indicates the number of samples in
+     * lower-band in this case, multiply by 2 to get the total number
+     * of samples. */
+    *frameLength <<= 1;
+  }
+  return 0;
+}
+
+
+/*******************************************************************************
+ * WebRtcIsac_GetNewFrameLen(...)
+ *
+ * This function returns the frame length (in samples) of the next packet.
+ * In the case of channel-adaptive mode, iSAC decides on its frame length based
+ * on the estimated bottleneck, this AOI allows a user to prepare for the next
+ * packet (at the encoder).
+ *
+ * The primary usage is in CE to make the iSAC works in channel-adaptive mode
+ *
+ * Input:
+ *        - ISAC_main_inst     : iSAC struct
+ *
+ * Return Value                : frame lenght in samples
+ *
+ */
+int16_t WebRtcIsac_GetNewFrameLen(ISACStruct* ISAC_main_inst) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  /* Return new frame length. */
+  if (instISAC->in_sample_rate_hz == 16000)
+    return (instISAC->instLB.ISACencLB_obj.new_framelength);
+  else  /* 32000 Hz */
+    return ((instISAC->instLB.ISACencLB_obj.new_framelength) * 2);
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetErrorCode(...)
+ *
+ * This function can be used to check the error code of an iSAC instance.
+ * When a function returns -1 an error code will be set for that instance.
+ * The function below extracts the code of the last error that occurred in
+ * the specified instance.
+ *
+ * Input:
+ *        - ISAC_main_inst    : ISAC instance
+ *
+ * Return value               : Error code
+ */
+int16_t WebRtcIsac_GetErrorCode(ISACStruct* ISAC_main_inst) {
+ return ((ISACMainStruct*)ISAC_main_inst)->errorCode;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetUplinkBw(...)
+ *
+ * This function outputs the target bottleneck of the codec. In
+ * channel-adaptive mode, the target bottleneck is specified through an in-band
+ * signalling retrieved by bandwidth estimator.
+ * In channel-independent, also called instantaneous mode, the target
+ * bottleneck is provided to the encoder by calling xxx_control(...) (if
+ * xxx_control is never called, the default values are used.).
+ * Note that the output is the iSAC internal operating bottleneck which might
+ * differ slightly from the one provided through xxx_control().
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *
+ * Output:
+ *        - *bottleneck       : bottleneck in bits/sec
+ *
+ * Return value               : -1 if error happens
+ *                               0 bit-rates computed correctly.
+ */
+int16_t WebRtcIsac_GetUplinkBw(ISACStruct*  ISAC_main_inst,
+                               int32_t* bottleneck) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+
+  if (instISAC->codingMode == 0) {
+    /* We are in adaptive mode then get the bottleneck from BWE. */
+    *bottleneck = (int32_t)instISAC->bwestimator_obj.send_bw_avg;
+  } else {
+    *bottleneck = instISAC->bottleneck;
+  }
+
+  if ((*bottleneck > 32000) && (*bottleneck < 38000)) {
+    *bottleneck = 32000;
+  } else if ((*bottleneck > 45000) && (*bottleneck < 50000)) {
+    *bottleneck = 45000;
+  } else if (*bottleneck > 56000) {
+    *bottleneck = 56000;
+  }
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetMaxPayloadSize(...)
+ *
+ * This function sets a limit for the maximum payload size of iSAC. The same
+ * value is used both for 30 and 60 ms packets. If the encoder sampling rate
+ * is 16 kHz the maximum payload size is between 120 and 400 bytes. If the
+ * encoder sampling rate is 32 kHz the maximum payload size is between 120
+ * and 600 bytes.
+ *
+ * ---------------
+ * IMPORTANT NOTES
+ * ---------------
+ * The size of a packet is limited to the minimum of 'max-payload-size' and
+ * 'max-rate.' For instance, let's assume the max-payload-size is set to
+ * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+ * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+ * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+ * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+ * 170 bytes, i.e. min(170, 300).
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *        - maxPayloadBytes   : maximum size of the payload in bytes
+ *                              valid values are between 100 and 400 bytes
+ *                              if encoder sampling rate is 16 kHz. For
+ *                              32 kHz encoder sampling rate valid values
+ *                              are between 100 and 600 bytes.
+ *
+ * Return value               : 0 if successful
+ *                             -1 if error happens
+ */
+int16_t WebRtcIsac_SetMaxPayloadSize(ISACStruct* ISAC_main_inst,
+                                     int16_t maxPayloadBytes) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  int16_t status = 0;
+
+  /* Check if encoder initiated */
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+
+  if (instISAC->encoderSamplingRateKHz == kIsacSuperWideband) {
+    /* Sanity check. */
+    if (maxPayloadBytes < 120) {
+      /* 'maxRate' is out of valid range
+       * set to the acceptable value and return -1. */
+      maxPayloadBytes = 120;
+      status = -1;
+    }
+
+    /* sanity check */
+    if (maxPayloadBytes > STREAM_SIZE_MAX) {
+      /* maxRate is out of valid range,
+       * set to the acceptable value and return -1. */
+      maxPayloadBytes = STREAM_SIZE_MAX;
+      status = -1;
+    }
+  } else {
+    if (maxPayloadBytes < 120) {
+      /* Max payload-size is out of valid range
+       * set to the acceptable value and return -1. */
+      maxPayloadBytes = 120;
+      status = -1;
+    }
+    if (maxPayloadBytes > STREAM_SIZE_MAX_60) {
+      /* Max payload-size is out of valid range
+       * set to the acceptable value and return -1. */
+      maxPayloadBytes = STREAM_SIZE_MAX_60;
+      status = -1;
+    }
+  }
+  instISAC->maxPayloadSizeBytes = maxPayloadBytes;
+  UpdatePayloadSizeLimit(instISAC);
+  return status;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetMaxRate(...)
+ *
+ * This function sets the maximum rate which the codec may not exceed for
+ * any signal packet. The maximum rate is defined and payload-size per
+ * frame-size in bits per second.
+ *
+ * The codec has a maximum rate of 53400 bits per second (200 bytes per 30
+ * ms) if the encoder sampling rate is 16kHz, and 160 kbps (600 bytes/30 ms)
+ * if the encoder sampling rate is 32 kHz.
+ *
+ * It is possible to set a maximum rate between 32000 and 53400 bits/sec
+ * in wideband mode, and 32000 to 160000 bits/sec in super-wideband mode.
+ *
+ * ---------------
+ * IMPORTANT NOTES
+ * ---------------
+ * The size of a packet is limited to the minimum of 'max-payload-size' and
+ * 'max-rate.' For instance, let's assume the max-payload-size is set to
+ * 170 bytes, and max-rate is set to 40 kbps. Note that a limit of 40 kbps
+ * translates to 150 bytes for 30ms frame-size & 300 bytes for 60ms
+ * frame-size. Then a packet with a frame-size of 30 ms is limited to 150,
+ * i.e. min(170, 150), and a packet with 60 ms frame-size is limited to
+ * 170 bytes, min(170, 300).
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *        - maxRate           : maximum rate in bits per second,
+ *                              valid values are 32000 to 53400 bits/sec in
+ *                              wideband mode, and 32000 to 160000 bits/sec in
+ *                              super-wideband mode.
+ *
+ * Return value               : 0 if successful
+ *                             -1 if error happens
+ */
+int16_t WebRtcIsac_SetMaxRate(ISACStruct* ISAC_main_inst,
+                              int32_t maxRate) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  int16_t maxRateInBytesPer30Ms;
+  int16_t status = 0;
+
+  /* check if encoder initiated */
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) != BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+    return -1;
+  }
+  /* Calculate maximum number of bytes per 30 msec packets for the
+     given maximum rate. Multiply with 30/1000 to get number of
+     bits per 30 ms, divide by 8 to get number of bytes per 30 ms:
+     maxRateInBytes = floor((maxRate * 30/1000) / 8); */
+  maxRateInBytesPer30Ms = (int16_t)(maxRate * 3 / 800);
+
+  if (instISAC->encoderSamplingRateKHz == kIsacWideband) {
+    if (maxRate < 32000) {
+      /* 'maxRate' is out of valid range.
+       * Set to the acceptable value and return -1. */
+      maxRateInBytesPer30Ms = 120;
+      status = -1;
+    }
+
+    if (maxRate > 53400) {
+      /* 'maxRate' is out of valid range.
+       * Set to the acceptable value and return -1. */
+      maxRateInBytesPer30Ms = 200;
+      status = -1;
+    }
+  } else {
+    if (maxRateInBytesPer30Ms < 120) {
+      /* 'maxRate' is out of valid range
+       * Set to the acceptable value and return -1. */
+      maxRateInBytesPer30Ms = 120;
+      status = -1;
+    }
+
+    if (maxRateInBytesPer30Ms > STREAM_SIZE_MAX) {
+      /* 'maxRate' is out of valid range.
+       * Set to the acceptable value and return -1. */
+      maxRateInBytesPer30Ms = STREAM_SIZE_MAX;
+      status = -1;
+    }
+  }
+  instISAC->maxRateBytesPer30Ms = maxRateInBytesPer30Ms;
+  UpdatePayloadSizeLimit(instISAC);
+  return status;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_GetRedPayload(...)
+ *
+ * This function populates "encoded" with the redundant payload of the recently
+ * encodedframe. This function has to be called once that WebRtcIsac_Encode(...)
+ * returns a positive value. Regardless of the frame-size this function will
+ * be called only once after encoding is completed. The bit-stream is
+ * targeted for 16000 bit/sec.
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC struct
+ *
+ * Output:
+ *        - encoded           : the encoded data vector
+ *
+ *
+ * Return value               : >0 - Length (in bytes) of coded data
+ *                            : -1 - Error
+ */
+int16_t WebRtcIsac_GetRedPayload(ISACStruct* ISAC_main_inst,
+                                 uint8_t* encoded) {
+  Bitstr iSACBitStreamInst;
+  int16_t streamLenLB;
+  int16_t streamLenUB;
+  int16_t streamLen;
+  int16_t totalLenUB;
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+  int k;
+#endif
+
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    instISAC->errorCode = ISAC_ENCODER_NOT_INITIATED;
+  }
+
+  WebRtcIsac_ResetBitstream(&(iSACBitStreamInst));
+
+  streamLenLB = WebRtcIsac_EncodeStoredDataLb(
+                  &instISAC->instLB.ISACencLB_obj.SaveEnc_obj,
+                  &iSACBitStreamInst,
+                  instISAC->instLB.ISACencLB_obj.lastBWIdx,
+                  RCU_TRANSCODING_SCALE);
+  if (streamLenLB < 0) {
+    return -1;
+  }
+
+  /* convert from bytes to int16_t. */
+  memcpy(encoded, iSACBitStreamInst.stream, streamLenLB);
+  streamLen = streamLenLB;
+  if (instISAC->bandwidthKHz == isac8kHz) {
+    return streamLenLB;
+  }
+
+  streamLenUB = WebRtcIsac_GetRedPayloadUb(
+                  &instISAC->instUB.ISACencUB_obj.SaveEnc_obj,
+                  &iSACBitStreamInst, instISAC->bandwidthKHz);
+  if (streamLenUB < 0) {
+    /* An error has happened but this is not the error due to a
+     * bit-stream larger than the limit. */
+    return -1;
+  }
+
+  /* We have one byte to write the total length of the upper-band.
+   * The length includes the bit-stream length, check-sum and the
+   * single byte where the length is written to. This is according to
+   * iSAC wideband and how the "garbage" is dealt. */
+  totalLenUB = streamLenUB + 1 + LEN_CHECK_SUM_WORD8;
+  if (totalLenUB > 255) {
+    streamLenUB = 0;
+  }
+
+  /* Generate CRC if required. */
+  if ((instISAC->bandwidthKHz != isac8kHz) &&
+      (streamLenUB > 0)) {
+    uint32_t crc;
+    streamLen += totalLenUB;
+    encoded[streamLenLB] = (uint8_t)totalLenUB;
+    memcpy(&encoded[streamLenLB + 1], iSACBitStreamInst.stream,
+           streamLenUB);
+
+    WebRtcIsac_GetCrc((int16_t*)(&(encoded[streamLenLB + 1])),
+                      streamLenUB, &crc);
+#ifndef WEBRTC_ARCH_BIG_ENDIAN
+    for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
+      encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] =
+        (uint8_t)((crc >> (24 - k * 8)) & 0xFF);
+    }
+#else
+    memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc,
+           LEN_CHECK_SUM_WORD8);
+#endif
+  }
+  return streamLen;
+}
+
+
+/****************************************************************************
+ * WebRtcIsac_version(...)
+ *
+ * This function returns the version number.
+ *
+ * Output:
+ *        - version      : Pointer to character string
+ *
+ */
+void WebRtcIsac_version(char* version) {
+  strcpy(version, "4.3.0");
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetEncSampRate()
+ * This function sets the sampling rate of the encoder. Initialization of the
+ * encoder WILL NOT overwrite the sampling rate of the encoder. The default
+ * value is 16 kHz which is set when the instance is created. The encoding-mode
+ * and the bottleneck remain unchanged by this call, however, the maximum rate
+ * and maximum payload-size will be reset to their default values.
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *        - sample_rate_hz    : sampling rate in Hertz, valid values are 16000
+ *                              and 32000.
+ *
+ * Return value               : 0 if successful
+ *                             -1 if failed.
+ */
+int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
+                                  uint16_t sample_rate_hz) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  enum IsacSamplingRate encoder_operational_rate;
+
+  if ((sample_rate_hz != 16000) && (sample_rate_hz != 32000)) {
+    /* Sampling Frequency is not supported. */
+    instISAC->errorCode = ISAC_UNSUPPORTED_SAMPLING_FREQUENCY;
+    return -1;
+  }
+  if (sample_rate_hz == 16000) {
+    encoder_operational_rate = kIsacWideband;
+  } else {
+    encoder_operational_rate = kIsacSuperWideband;
+  }
+
+  if ((instISAC->initFlag & BIT_MASK_ENC_INIT) !=
+      BIT_MASK_ENC_INIT) {
+    if (encoder_operational_rate == kIsacWideband) {
+      instISAC->bandwidthKHz = isac8kHz;
+    } else {
+      instISAC->bandwidthKHz = isac16kHz;
+    }
+  } else {
+    ISACUBStruct* instUB = &(instISAC->instUB);
+    ISACLBStruct* instLB = &(instISAC->instLB);
+    int32_t bottleneck = instISAC->bottleneck;
+    int16_t codingMode = instISAC->codingMode;
+    int16_t frameSizeMs = instLB->ISACencLB_obj.new_framelength /
+        (FS / 1000);
+
+    if ((encoder_operational_rate == kIsacWideband) &&
+        (instISAC->encoderSamplingRateKHz == kIsacSuperWideband)) {
+      /* Changing from super-wideband to wideband.
+       * we don't need to re-initialize the encoder of the lower-band. */
+      instISAC->bandwidthKHz = isac8kHz;
+      if (codingMode == 1) {
+        ControlLb(instLB,
+                  (bottleneck > 32000) ? 32000 : bottleneck, FRAMESIZE);
+      }
+      instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX_60;
+      instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX_30;
+    } else if ((encoder_operational_rate == kIsacSuperWideband) &&
+               (instISAC->encoderSamplingRateKHz == kIsacWideband)) {
+      double bottleneckLB = 0;
+      double bottleneckUB = 0;
+      if (codingMode == 1) {
+        WebRtcIsac_RateAllocation(bottleneck, &bottleneckLB, &bottleneckUB,
+                                  &(instISAC->bandwidthKHz));
+      }
+
+      instISAC->bandwidthKHz = isac16kHz;
+      instISAC->maxPayloadSizeBytes = STREAM_SIZE_MAX;
+      instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX;
+
+      EncoderInitLb(instLB, codingMode, encoder_operational_rate);
+      EncoderInitUb(instUB, instISAC->bandwidthKHz);
+
+      memset(instISAC->analysisFBState1, 0,
+             FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+      memset(instISAC->analysisFBState2, 0,
+             FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+
+      if (codingMode == 1) {
+        instISAC->bottleneck = bottleneck;
+        ControlLb(instLB, bottleneckLB,
+                  (instISAC->bandwidthKHz == isac8kHz) ? frameSizeMs:FRAMESIZE);
+        if (instISAC->bandwidthKHz > isac8kHz) {
+          ControlUb(instUB, bottleneckUB);
+        }
+      } else {
+        instLB->ISACencLB_obj.enforceFrameSize = 0;
+        instLB->ISACencLB_obj.new_framelength = FRAMESAMPLES;
+      }
+    }
+  }
+  instISAC->encoderSamplingRateKHz = encoder_operational_rate;
+  instISAC->in_sample_rate_hz = sample_rate_hz;
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_SetDecSampRate()
+ * This function sets the sampling rate of the decoder. Initialization of the
+ * decoder WILL NOT overwrite the sampling rate of the encoder. The default
+ * value is 16 kHz which is set when the instance is created.
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *        - sample_rate_hz    : sampling rate in Hertz, valid values are 16000
+ *                              and 32000.
+ *
+ * Return value               : 0 if successful
+ *                             -1 if failed.
+ */
+int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
+                                  uint16_t sample_rate_hz) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  enum IsacSamplingRate decoder_operational_rate;
+
+  if (sample_rate_hz == 16000) {
+    decoder_operational_rate = kIsacWideband;
+  } else if (sample_rate_hz == 32000) {
+    decoder_operational_rate = kIsacSuperWideband;
+  } else {
+    /* Sampling Frequency is not supported. */
+    instISAC->errorCode = ISAC_UNSUPPORTED_SAMPLING_FREQUENCY;
+    return -1;
+  }
+
+  if ((instISAC->decoderSamplingRateKHz == kIsacWideband) &&
+        (decoder_operational_rate == kIsacSuperWideband)) {
+      /* Switching from wideband to super-wideband at the decoder
+       * we need to reset the filter-bank and initialize upper-band decoder. */
+      memset(instISAC->synthesisFBState1, 0,
+             FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+      memset(instISAC->synthesisFBState2, 0,
+             FB_STATE_SIZE_WORD32 * sizeof(int32_t));
+
+      DecoderInitUb(&instISAC->instUB);
+  }
+  instISAC->decoderSamplingRateKHz = decoder_operational_rate;
+  return 0;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_EncSampRate()
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *
+ * Return value               : sampling rate in Hertz. The input to encoder
+ *                              is expected to be sampled in this rate.
+ *
+ */
+uint16_t WebRtcIsac_EncSampRate(ISACStruct* ISAC_main_inst) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  return instISAC->in_sample_rate_hz;
+}
+
+
+/******************************************************************************
+ * WebRtcIsac_DecSampRate()
+ * Return the sampling rate of the decoded audio.
+ *
+ * Input:
+ *        - ISAC_main_inst    : iSAC instance
+ *
+ * Return value               : sampling rate in Hertz. Decoder output is
+ *                              sampled at this rate.
+ *
+ */
+uint16_t WebRtcIsac_DecSampRate(ISACStruct* ISAC_main_inst) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+  return instISAC->decoderSamplingRateKHz == kIsacWideband ? 16000 : 32000;
+}
+
+void WebRtcIsac_GetBandwidthInfo(ISACStruct* inst,
+                                 IsacBandwidthInfo* bwinfo) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)inst;
+  RTC_DCHECK_NE(0, instISAC->initFlag & BIT_MASK_DEC_INIT);
+  WebRtcIsacBw_GetBandwidthInfo(&instISAC->bwestimator_obj,
+                                instISAC->decoderSamplingRateKHz, bwinfo);
+}
+
+void WebRtcIsac_SetBandwidthInfo(ISACStruct* inst,
+                                 const IsacBandwidthInfo* bwinfo) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)inst;
+  RTC_DCHECK_NE(0, instISAC->initFlag & BIT_MASK_ENC_INIT);
+  WebRtcIsacBw_SetBandwidthInfo(&instISAC->bwestimator_obj, bwinfo);
+}
+
+void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst,
+                                        int sample_rate_hz) {
+  ISACMainStruct* instISAC = (ISACMainStruct*)inst;
+  RTC_DCHECK_NE(0, instISAC->initFlag & BIT_MASK_DEC_INIT);
+  RTC_DCHECK(!(instISAC->initFlag & BIT_MASK_ENC_INIT));
+  RTC_DCHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000);
+  instISAC->encoderSamplingRateKHz = sample_rate_hz / 1000;
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/isac_float_type.h b/modules/audio_coding/codecs/isac/main/source/isac_float_type.h
new file mode 100644
index 0000000..59a8805
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/isac_float_type.h
@@ -0,0 +1,117 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
+
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+
+namespace webrtc {
+
+struct IsacFloat {
+  using instance_type = ISACStruct;
+  static const bool has_swb = true;
+  static inline int16_t Control(instance_type* inst,
+                                int32_t rate,
+                                int framesize) {
+    return WebRtcIsac_Control(inst, rate, framesize);
+  }
+  static inline int16_t ControlBwe(instance_type* inst,
+                                   int32_t rate_bps,
+                                   int frame_size_ms,
+                                   int16_t enforce_frame_size) {
+    return WebRtcIsac_ControlBwe(inst, rate_bps, frame_size_ms,
+                                 enforce_frame_size);
+  }
+  static inline int16_t Create(instance_type** inst) {
+    return WebRtcIsac_Create(inst);
+  }
+  static inline int DecodeInternal(instance_type* inst,
+                                   const uint8_t* encoded,
+                                   size_t len,
+                                   int16_t* decoded,
+                                   int16_t* speech_type) {
+    return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
+  }
+  static inline size_t DecodePlc(instance_type* inst,
+                                 int16_t* decoded,
+                                 size_t num_lost_frames) {
+    return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
+  }
+
+  static inline void DecoderInit(instance_type* inst) {
+    WebRtcIsac_DecoderInit(inst);
+  }
+  static inline int Encode(instance_type* inst,
+                           const int16_t* speech_in,
+                           uint8_t* encoded) {
+    return WebRtcIsac_Encode(inst, speech_in, encoded);
+  }
+  static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
+    return WebRtcIsac_EncoderInit(inst, coding_mode);
+  }
+  static inline uint16_t EncSampRate(instance_type* inst) {
+    return WebRtcIsac_EncSampRate(inst);
+  }
+
+  static inline int16_t Free(instance_type* inst) {
+    return WebRtcIsac_Free(inst);
+  }
+  static inline void GetBandwidthInfo(instance_type* inst,
+                                      IsacBandwidthInfo* bwinfo) {
+    WebRtcIsac_GetBandwidthInfo(inst, bwinfo);
+  }
+  static inline int16_t GetErrorCode(instance_type* inst) {
+    return WebRtcIsac_GetErrorCode(inst);
+  }
+
+  static inline int16_t GetNewFrameLen(instance_type* inst) {
+    return WebRtcIsac_GetNewFrameLen(inst);
+  }
+  static inline void SetBandwidthInfo(instance_type* inst,
+                                      const IsacBandwidthInfo* bwinfo) {
+    WebRtcIsac_SetBandwidthInfo(inst, bwinfo);
+  }
+  static inline int16_t SetDecSampRate(instance_type* inst,
+                                       uint16_t sample_rate_hz) {
+    return WebRtcIsac_SetDecSampRate(inst, sample_rate_hz);
+  }
+  static inline int16_t SetEncSampRate(instance_type* inst,
+                                       uint16_t sample_rate_hz) {
+    return WebRtcIsac_SetEncSampRate(inst, sample_rate_hz);
+  }
+  static inline void SetEncSampRateInDecoder(instance_type* inst,
+                                             uint16_t sample_rate_hz) {
+    WebRtcIsac_SetEncSampRateInDecoder(inst, sample_rate_hz);
+  }
+  static inline void SetInitialBweBottleneck(instance_type* inst,
+                                             int bottleneck_bits_per_second) {
+    WebRtcIsac_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
+  }
+  static inline int16_t UpdateBwEstimate(instance_type* inst,
+                                         const uint8_t* encoded,
+                                         size_t packet_size,
+                                         uint16_t rtp_seq_number,
+                                         uint32_t send_ts,
+                                         uint32_t arr_ts) {
+    return WebRtcIsac_UpdateBwEstimate(inst, encoded, packet_size,
+                                       rtp_seq_number, send_ts, arr_ts);
+  }
+  static inline int16_t SetMaxPayloadSize(instance_type* inst,
+                                          int16_t max_payload_size_bytes) {
+    return WebRtcIsac_SetMaxPayloadSize(inst, max_payload_size_bytes);
+  }
+  static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
+    return WebRtcIsac_SetMaxRate(inst, max_bit_rate);
+  }
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
diff --git a/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc b/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
new file mode 100644
index 0000000..727f0f6
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <string>
+
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+struct WebRtcISACStruct;
+
+namespace webrtc {
+
+// Number of samples in a 60 ms, sampled at 32 kHz.
+const int kIsacNumberOfSamples = 320 * 6;
+// Maximum number of bytes in output bitstream.
+const size_t kMaxBytes = 1000;
+
+class IsacTest : public ::testing::Test {
+ protected:
+  IsacTest();
+  virtual void SetUp();
+
+  WebRtcISACStruct* isac_codec_;
+
+  int16_t speech_data_[kIsacNumberOfSamples];
+  int16_t output_data_[kIsacNumberOfSamples];
+  uint8_t bitstream_[kMaxBytes];
+  uint8_t bitstream_small_[7];  // Simulate sync packets.
+};
+
+IsacTest::IsacTest()
+    : isac_codec_(NULL) {
+}
+
+void IsacTest::SetUp() {
+  // Read some samples from a speech file, to be used in the encode test.
+  FILE* input_file;
+  const std::string file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+  input_file = fopen(file_name.c_str(), "rb");
+  ASSERT_TRUE(input_file != NULL);
+  ASSERT_EQ(kIsacNumberOfSamples,
+            static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
+                                       kIsacNumberOfSamples, input_file)));
+  fclose(input_file);
+  input_file = NULL;
+}
+
+// Test failing Create.
+TEST_F(IsacTest, IsacCreateFail) {
+  // Test to see that an invalid pointer is caught.
+  EXPECT_EQ(-1, WebRtcIsac_Create(NULL));
+}
+
+// Test failing Free.
+TEST_F(IsacTest, IsacFreeFail) {
+  // Test to see that free function doesn't crash.
+  EXPECT_EQ(0, WebRtcIsac_Free(NULL));
+}
+
+// Test normal Create and Free.
+TEST_F(IsacTest, IsacCreateFree) {
+  EXPECT_EQ(0, WebRtcIsac_Create(&isac_codec_));
+  EXPECT_TRUE(isac_codec_ != NULL);
+  EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));}
+
+TEST_F(IsacTest, IsacUpdateBWE) {
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcIsac_Create(&isac_codec_));
+
+  // Init encoder (adaptive mode) and decoder.
+  WebRtcIsac_EncoderInit(isac_codec_, 0);
+  WebRtcIsac_DecoderInit(isac_codec_);
+
+  int encoded_bytes;
+
+  // Test with call with a small packet (sync packet).
+  EXPECT_EQ(-1, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_small_, 7, 1,
+                                            12345, 56789));
+
+  // Encode 60 ms of data (needed to create a first packet).
+  encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_EQ(0, encoded_bytes);
+  encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_EQ(0, encoded_bytes);
+  encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_EQ(0, encoded_bytes);
+  encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_EQ(0, encoded_bytes);
+  encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_EQ(0, encoded_bytes);
+  encoded_bytes =  WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+  EXPECT_GT(encoded_bytes, 0);
+
+  // Call to update bandwidth estimator with real data.
+  EXPECT_EQ(0, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_,
+                                           static_cast<size_t>(encoded_bytes),
+                                           1, 12345, 56789));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/isac/main/source/lattice.c b/modules/audio_coding/codecs/isac/main/source/lattice.c
new file mode 100644
index 0000000..d9d2d65
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lattice.c
@@ -0,0 +1,219 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lattice.c
+ *
+ * contains the normalized lattice filter routines (MA and AR) for iSAC codec
+ *
+ */
+
+#include <math.h>
+#include <memory.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <stdlib.h>
+#endif
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+
+/* filter the signal using normalized lattice filter */
+/* MA filter */
+void WebRtcIsac_NormLatticeFilterMa(int orderCoef,
+                                     float *stateF,
+                                     float *stateG,
+                                     float *lat_in,
+                                     double *filtcoeflo,
+                                     double *lat_out)
+{
+  int n,k,i,u,temp1;
+  int ord_1 = orderCoef+1;
+  float sth[MAX_AR_MODEL_ORDER];
+  float cth[MAX_AR_MODEL_ORDER];
+  float inv_cth[MAX_AR_MODEL_ORDER];
+  double a[MAX_AR_MODEL_ORDER+1];
+  float f[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN], g[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
+  float gain1;
+
+  for (u=0;u<SUBFRAMES;u++)
+  {
+    /* set the Direct Form coefficients */
+    temp1 = u*ord_1;
+    a[0] = 1;
+    memcpy(a+1, filtcoeflo+temp1+1, sizeof(double) * (ord_1-1));
+
+    /* compute lattice filter coefficients */
+    WebRtcIsac_Dir2Lat(a,orderCoef,sth,cth);
+
+    /* compute the gain */
+    gain1 = (float)filtcoeflo[temp1];
+    for (k=0;k<orderCoef;k++)
+    {
+      gain1 *= cth[k];
+      inv_cth[k] = 1/cth[k];
+    }
+
+    /* normalized lattice filter */
+    /*****************************/
+
+    /* initial conditions */
+    for (i=0;i<HALF_SUBFRAMELEN;i++)
+    {
+      f[0][i] = lat_in[i + u * HALF_SUBFRAMELEN];
+      g[0][i] = lat_in[i + u * HALF_SUBFRAMELEN];
+    }
+
+    /* get the state of f&g for the first input, for all orders */
+    for (i=1;i<ord_1;i++)
+    {
+      f[i][0] = inv_cth[i-1]*(f[i-1][0] + sth[i-1]*stateG[i-1]);
+      g[i][0] = cth[i-1]*stateG[i-1] + sth[i-1]* f[i][0];
+    }
+
+    /* filtering */
+    for(k=0;k<orderCoef;k++)
+    {
+      for(n=0;n<(HALF_SUBFRAMELEN-1);n++)
+      {
+        f[k+1][n+1] = inv_cth[k]*(f[k][n+1] + sth[k]*g[k][n]);
+        g[k+1][n+1] = cth[k]*g[k][n] + sth[k]* f[k+1][n+1];
+      }
+    }
+
+    for(n=0;n<HALF_SUBFRAMELEN;n++)
+    {
+      lat_out[n + u * HALF_SUBFRAMELEN] = gain1 * f[orderCoef][n];
+    }
+
+    /* save the states */
+    for (i=0;i<ord_1;i++)
+    {
+      stateF[i] = f[i][HALF_SUBFRAMELEN-1];
+      stateG[i] = g[i][HALF_SUBFRAMELEN-1];
+    }
+    /* process next frame */
+  }
+
+  return;
+}
+
+
+/*///////////////////AR filter ///////////////////////////////*/
+/* filter the signal using normalized lattice filter */
+void WebRtcIsac_NormLatticeFilterAr(int orderCoef,
+                                     float *stateF,
+                                     float *stateG,
+                                     double *lat_in,
+                                     double *lo_filt_coef,
+                                     float *lat_out)
+{
+  int n,k,i,u,temp1;
+  int ord_1 = orderCoef+1;
+  float sth[MAX_AR_MODEL_ORDER];
+  float cth[MAX_AR_MODEL_ORDER];
+  double a[MAX_AR_MODEL_ORDER+1];
+  float ARf[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN], ARg[MAX_AR_MODEL_ORDER+1][HALF_SUBFRAMELEN];
+  float gain1,inv_gain1;
+
+  for (u=0;u<SUBFRAMES;u++)
+  {
+    /* set the denominator and numerator of the Direct Form */
+    temp1 = u*ord_1;
+    a[0] = 1;
+
+    memcpy(a+1, lo_filt_coef+temp1+1, sizeof(double) * (ord_1-1));
+
+    WebRtcIsac_Dir2Lat(a,orderCoef,sth,cth);
+
+    gain1 = (float)lo_filt_coef[temp1];
+    for (k=0;k<orderCoef;k++)
+    {
+      gain1 = cth[k]*gain1;
+    }
+
+    /* initial conditions */
+    inv_gain1 = 1/gain1;
+    for (i=0;i<HALF_SUBFRAMELEN;i++)
+    {
+      ARf[orderCoef][i] = (float)lat_in[i + u * HALF_SUBFRAMELEN]*inv_gain1;
+    }
+
+
+    for (i=orderCoef-1;i>=0;i--) //get the state of f&g for the first input, for all orders
+    {
+      ARf[i][0] = cth[i]*ARf[i+1][0] - sth[i]*stateG[i];
+      ARg[i+1][0] = sth[i]*ARf[i+1][0] + cth[i]* stateG[i];
+    }
+    ARg[0][0] = ARf[0][0];
+
+    for(n=0;n<(HALF_SUBFRAMELEN-1);n++)
+    {
+      for(k=orderCoef-1;k>=0;k--)
+      {
+        ARf[k][n+1] = cth[k]*ARf[k+1][n+1] - sth[k]*ARg[k][n];
+        ARg[k+1][n+1] = sth[k]*ARf[k+1][n+1] + cth[k]* ARg[k][n];
+      }
+      ARg[0][n+1] = ARf[0][n+1];
+    }
+
+    memcpy(lat_out+u * HALF_SUBFRAMELEN, &(ARf[0][0]), sizeof(float) * HALF_SUBFRAMELEN);
+
+    /* cannot use memcpy in the following */
+    for (i=0;i<ord_1;i++)
+    {
+      stateF[i] = ARf[i][HALF_SUBFRAMELEN-1];
+      stateG[i] = ARg[i][HALF_SUBFRAMELEN-1];
+    }
+
+  }
+
+  return;
+}
+
+
+/* compute the reflection coefficients using the step-down procedure*/
+/* converts the direct form parameters to lattice form.*/
+/* a and b are vectors which contain the direct form coefficients,
+   according to
+   A(z) = a(1) + a(2)*z + a(3)*z^2 + ... + a(M+1)*z^M
+   B(z) = b(1) + b(2)*z + b(3)*z^2 + ... + b(M+1)*z^M
+*/
+
+void WebRtcIsac_Dir2Lat(double *a,
+                        int orderCoef,
+                        float *sth,
+                        float *cth)
+{
+  int m, k;
+  float tmp[MAX_AR_MODEL_ORDER];
+  float tmp_inv, cth2;
+
+  sth[orderCoef-1] = (float)a[orderCoef];
+  cth2 = 1.0f - sth[orderCoef-1] * sth[orderCoef-1];
+  cth[orderCoef-1] = (float)sqrt(cth2);
+  for (m=orderCoef-1; m>0; m--)
+  {
+    tmp_inv = 1.0f / cth2;
+    for (k=1; k<=m; k++)
+    {
+      tmp[k] = ((float)a[k] - sth[m] * (float)a[m-k+1]) * tmp_inv;
+    }
+
+    for (k=1; k<m; k++)
+    {
+      a[k] = tmp[k];
+    }
+
+    sth[m-1] = tmp[m];
+    cth2 = 1 - sth[m-1] * sth[m-1];
+    cth[m-1] = (float)sqrt(cth2);
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c b/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
new file mode 100644
index 0000000..dbf33fc
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
@@ -0,0 +1,535 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/entropy_coding.h"
+
+#define LEVINSON_EPS    1.0e-10
+
+
+/* window */
+/* Matlab generation code:
+ *  t = (1:256)/257; r = 1-(1-t).^.45; w = sin(r*pi).^3; w = w/sum(w); plot((1:256)/8, w); grid;
+ *  for k=1:16, fprintf(1, '%.8f, ', w(k*16 + (-15:0))); fprintf(1, '\n'); end
+ */
+static const double kLpcCorrWindow[WINLEN] = {
+  0.00000000, 0.00000001, 0.00000004, 0.00000010, 0.00000020,
+  0.00000035, 0.00000055, 0.00000083, 0.00000118, 0.00000163,
+  0.00000218, 0.00000283, 0.00000361, 0.00000453, 0.00000558, 0.00000679,
+  0.00000817, 0.00000973, 0.00001147, 0.00001342, 0.00001558,
+  0.00001796, 0.00002058, 0.00002344, 0.00002657, 0.00002997,
+  0.00003365, 0.00003762, 0.00004190, 0.00004651, 0.00005144, 0.00005673,
+  0.00006236, 0.00006837, 0.00007476, 0.00008155, 0.00008875,
+  0.00009636, 0.00010441, 0.00011290, 0.00012186, 0.00013128,
+  0.00014119, 0.00015160, 0.00016252, 0.00017396, 0.00018594, 0.00019846,
+  0.00021155, 0.00022521, 0.00023946, 0.00025432, 0.00026978,
+  0.00028587, 0.00030260, 0.00031998, 0.00033802, 0.00035674,
+  0.00037615, 0.00039626, 0.00041708, 0.00043863, 0.00046092, 0.00048396,
+  0.00050775, 0.00053233, 0.00055768, 0.00058384, 0.00061080,
+  0.00063858, 0.00066720, 0.00069665, 0.00072696, 0.00075813,
+  0.00079017, 0.00082310, 0.00085692, 0.00089164, 0.00092728, 0.00096384,
+  0.00100133, 0.00103976, 0.00107914, 0.00111947, 0.00116077,
+  0.00120304, 0.00124630, 0.00129053, 0.00133577, 0.00138200,
+  0.00142924, 0.00147749, 0.00152676, 0.00157705, 0.00162836, 0.00168070,
+  0.00173408, 0.00178850, 0.00184395, 0.00190045, 0.00195799,
+  0.00201658, 0.00207621, 0.00213688, 0.00219860, 0.00226137,
+  0.00232518, 0.00239003, 0.00245591, 0.00252284, 0.00259079, 0.00265977,
+  0.00272977, 0.00280078, 0.00287280, 0.00294582, 0.00301984,
+  0.00309484, 0.00317081, 0.00324774, 0.00332563, 0.00340446,
+  0.00348421, 0.00356488, 0.00364644, 0.00372889, 0.00381220, 0.00389636,
+  0.00398135, 0.00406715, 0.00415374, 0.00424109, 0.00432920,
+  0.00441802, 0.00450754, 0.00459773, 0.00468857, 0.00478001,
+  0.00487205, 0.00496464, 0.00505775, 0.00515136, 0.00524542, 0.00533990,
+  0.00543476, 0.00552997, 0.00562548, 0.00572125, 0.00581725,
+  0.00591342, 0.00600973, 0.00610612, 0.00620254, 0.00629895,
+  0.00639530, 0.00649153, 0.00658758, 0.00668341, 0.00677894, 0.00687413,
+  0.00696891, 0.00706322, 0.00715699, 0.00725016, 0.00734266,
+  0.00743441, 0.00752535, 0.00761540, 0.00770449, 0.00779254,
+  0.00787947, 0.00796519, 0.00804963, 0.00813270, 0.00821431, 0.00829437,
+  0.00837280, 0.00844949, 0.00852436, 0.00859730, 0.00866822,
+  0.00873701, 0.00880358, 0.00886781, 0.00892960, 0.00898884,
+  0.00904542, 0.00909923, 0.00915014, 0.00919805, 0.00924283, 0.00928436,
+  0.00932252, 0.00935718, 0.00938821, 0.00941550, 0.00943890,
+  0.00945828, 0.00947351, 0.00948446, 0.00949098, 0.00949294,
+  0.00949020, 0.00948262, 0.00947005, 0.00945235, 0.00942938, 0.00940099,
+  0.00936704, 0.00932738, 0.00928186, 0.00923034, 0.00917268,
+  0.00910872, 0.00903832, 0.00896134, 0.00887763, 0.00878706,
+  0.00868949, 0.00858478, 0.00847280, 0.00835343, 0.00822653, 0.00809199,
+  0.00794970, 0.00779956, 0.00764145, 0.00747530, 0.00730103,
+  0.00711857, 0.00692787, 0.00672888, 0.00652158, 0.00630597,
+  0.00608208, 0.00584994, 0.00560962, 0.00536124, 0.00510493, 0.00484089,
+  0.00456935, 0.00429062, 0.00400505, 0.00371310, 0.00341532,
+  0.00311238, 0.00280511, 0.00249452, 0.00218184, 0.00186864,
+  0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
+};
+
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order)
+{
+
+  double sum, alpha;
+  size_t m, m_h, i;
+  alpha = 0; //warning -DH
+  a[0] = 1.0;
+  if (r[0] < LEVINSON_EPS) { /* if r[0] <= 0, set LPC coeff. to zero */
+    for (i = 0; i < order; i++) {
+      k[i] = 0;
+      a[i+1] = 0;
+    }
+  } else {
+    a[1] = k[0] = -r[1]/r[0];
+    alpha = r[0] + r[1] * k[0];
+    for (m = 1; m < order; m++){
+      sum = r[m + 1];
+      for (i = 0; i < m; i++){
+        sum += a[i+1] * r[m - i];
+      }
+      k[m] = -sum / alpha;
+      alpha += k[m] * sum;
+      m_h = (m + 1) >> 1;
+      for (i = 0; i < m_h; i++){
+        sum = a[i+1] + k[m] * a[m - i];
+        a[m - i] += k[m] * a[i+1];
+        a[i+1] = sum;
+      }
+      a[m+1] = k[m];
+    }
+  }
+  return alpha;
+}
+
+
+//was static before, but didn't work with MEX file
+void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
+                       double *oldEnergy, double *varscale)
+{
+  double nrg[4], chng, pg;
+  int k;
+
+  double pitchGains[4]={0,0,0,0};;
+
+  /* Calculate energies of first and second frame halfs */
+  nrg[0] = 0.0001;
+  for (k = QLOOKAHEAD/2; k < (FRAMESAMPLES_QUARTER + QLOOKAHEAD) / 2; k++) {
+    nrg[0] += input[k]*input[k];
+  }
+  nrg[1] = 0.0001;
+  for ( ; k < (FRAMESAMPLES_HALF + QLOOKAHEAD) / 2; k++) {
+    nrg[1] += input[k]*input[k];
+  }
+  nrg[2] = 0.0001;
+  for ( ; k < (FRAMESAMPLES*3/4 + QLOOKAHEAD) / 2; k++) {
+    nrg[2] += input[k]*input[k];
+  }
+  nrg[3] = 0.0001;
+  for ( ; k < (FRAMESAMPLES + QLOOKAHEAD) / 2; k++) {
+    nrg[3] += input[k]*input[k];
+  }
+
+  /* Calculate average level change */
+  chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
+                 fabs(10.0 * log10(nrg[2] / nrg[1])) +
+                 fabs(10.0 * log10(nrg[1] / nrg[0])) +
+                 fabs(10.0 * log10(nrg[0] / *oldEnergy)));
+
+
+  /* Find average pitch gain */
+  pg = 0.0;
+  for (k=0; k<4; k++)
+  {
+    pitchGains[k] = ((float)pitchGains_Q12[k])/4096;
+    pg += pitchGains[k];
+  }
+  pg *= 0.25;
+
+  /* If pitch gain is low and energy constant - increase noise level*/
+  /* Matlab code:
+     pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
+  */
+  *varscale = 0.0 + 1.0 * exp( -1.4 * exp(-200.0 * pg*pg*pg) / (1.0 + 0.4 * chng) );
+
+  *oldEnergy = nrg[3];
+}
+
+void
+WebRtcIsac_GetVarsUB(
+    const double* input,
+    double*       oldEnergy,
+    double*       varscale)
+{
+  double nrg[4], chng;
+  int k;
+
+  /* Calculate energies of first and second frame halfs */
+  nrg[0] = 0.0001;
+  for (k = 0; k < (FRAMESAMPLES_QUARTER) / 2; k++) {
+    nrg[0] += input[k]*input[k];
+  }
+  nrg[1] = 0.0001;
+  for ( ; k < (FRAMESAMPLES_HALF) / 2; k++) {
+    nrg[1] += input[k]*input[k];
+  }
+  nrg[2] = 0.0001;
+  for ( ; k < (FRAMESAMPLES*3/4) / 2; k++) {
+    nrg[2] += input[k]*input[k];
+  }
+  nrg[3] = 0.0001;
+  for ( ; k < (FRAMESAMPLES) / 2; k++) {
+    nrg[3] += input[k]*input[k];
+  }
+
+  /* Calculate average level change */
+  chng = 0.25 * (fabs(10.0 * log10(nrg[3] / nrg[2])) +
+                 fabs(10.0 * log10(nrg[2] / nrg[1])) +
+                 fabs(10.0 * log10(nrg[1] / nrg[0])) +
+                 fabs(10.0 * log10(nrg[0] / *oldEnergy)));
+
+
+  /* If pitch gain is low and energy constant - increase noise level*/
+  /* Matlab code:
+     pg = 0:.01:.45; plot(pg, 0.0 + 1.0 * exp( -1.0 * exp(-200.0 * pg.*pg.*pg) / (1.0 + 0.4 * 0) ))
+  */
+  *varscale = exp( -1.4 / (1.0 + 0.4 * chng) );
+
+  *oldEnergy = nrg[3];
+}
+
+void WebRtcIsac_GetLpcCoefLb(double *inLo, double *inHi, MaskFiltstr *maskdata,
+                             double signal_noise_ratio, const int16_t *pitchGains_Q12,
+                             double *lo_coeff, double *hi_coeff)
+{
+  int k, n, j, pos1, pos2;
+  double varscale;
+
+  double DataLo[WINLEN], DataHi[WINLEN];
+  double corrlo[ORDERLO+2], corrlo2[ORDERLO+1];
+  double corrhi[ORDERHI+1];
+  double k_veclo[ORDERLO], k_vechi[ORDERHI];
+
+  double a_LO[ORDERLO+1], a_HI[ORDERHI+1];
+  double tmp, res_nrg;
+
+  double FwdA, FwdB;
+
+  /* hearing threshold level in dB; higher value gives more noise */
+  const double HearThresOffset = -28.0;
+
+  /* bandwdith expansion factors for low- and high band */
+  const double gammaLo = 0.9;
+  const double gammaHi = 0.8;
+
+  /* less-noise-at-low-frequencies factor */
+  double aa;
+
+
+  /* convert from dB to signal level */
+  const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
+  double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46;    /* divide by sqrt(12) */
+
+  /* change quallevel depending on pitch gains and level fluctuations */
+  WebRtcIsac_GetVars(inLo, pitchGains_Q12, &(maskdata->OldEnergy), &varscale);
+
+  /* less-noise-at-low-frequencies factor */
+  aa = 0.35 * (0.5 + 0.5 * varscale);
+
+  /* replace data in buffer by new look-ahead data */
+  for (pos1 = 0; pos1 < QLOOKAHEAD; pos1++)
+    maskdata->DataBufferLo[pos1 + WINLEN - QLOOKAHEAD] = inLo[pos1];
+
+  for (k = 0; k < SUBFRAMES; k++) {
+
+    /* Update input buffer and multiply signal with window */
+    for (pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++) {
+      maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 + UPDATE/2];
+      maskdata->DataBufferHi[pos1] = maskdata->DataBufferHi[pos1 + UPDATE/2];
+      DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+      DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
+    }
+    pos2 = k * UPDATE/2;
+    for (n = 0; n < UPDATE/2; n++, pos1++) {
+      maskdata->DataBufferLo[pos1] = inLo[QLOOKAHEAD + pos2];
+      maskdata->DataBufferHi[pos1] = inHi[pos2++];
+      DataLo[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+      DataHi[pos1] = maskdata->DataBufferHi[pos1] * kLpcCorrWindow[pos1];
+    }
+
+    /* Get correlation coefficients */
+    WebRtcIsac_AutoCorr(corrlo, DataLo, WINLEN, ORDERLO+1); /* computing autocorrelation */
+    WebRtcIsac_AutoCorr(corrhi, DataHi, WINLEN, ORDERHI);
+
+
+    /* less noise for lower frequencies, by filtering/scaling autocorrelation sequences */
+    corrlo2[0] = (1.0+aa*aa) * corrlo[0] - 2.0*aa * corrlo[1];
+    tmp = (1.0 + aa*aa);
+    for (n = 1; n <= ORDERLO; n++) {
+      corrlo2[n] = tmp * corrlo[n] - aa * (corrlo[n-1] + corrlo[n+1]);
+    }
+    tmp = (1.0+aa) * (1.0+aa);
+    for (n = 0; n <= ORDERHI; n++) {
+      corrhi[n] = tmp * corrhi[n];
+    }
+
+    /* add white noise floor */
+    corrlo2[0] += 1e-6;
+    corrhi[0] += 1e-6;
+
+
+    FwdA = 0.01;
+    FwdB = 0.01;
+
+    /* recursive filtering of correlation over subframes */
+    for (n = 0; n <= ORDERLO; n++) {
+      maskdata->CorrBufLo[n] = FwdA * maskdata->CorrBufLo[n] + corrlo2[n];
+      corrlo2[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufLo[n] + (1.0-FwdB) * corrlo2[n];
+    }
+    for (n = 0; n <= ORDERHI; n++) {
+      maskdata->CorrBufHi[n] = FwdA * maskdata->CorrBufHi[n] + corrhi[n];
+      corrhi[n] = ((1.0-FwdA)*FwdB) * maskdata->CorrBufHi[n] + (1.0-FwdB) * corrhi[n];
+    }
+
+    /* compute prediction coefficients */
+    WebRtcIsac_LevDurb(a_LO, k_veclo, corrlo2, ORDERLO);
+    WebRtcIsac_LevDurb(a_HI, k_vechi, corrhi, ORDERHI);
+
+    /* bandwidth expansion */
+    tmp = gammaLo;
+    for (n = 1; n <= ORDERLO; n++) {
+      a_LO[n] *= tmp;
+      tmp *= gammaLo;
+    }
+
+    /* residual energy */
+    res_nrg = 0.0;
+    for (j = 0; j <= ORDERLO; j++) {
+      for (n = 0; n <= j; n++) {
+        res_nrg += a_LO[j] * corrlo2[j-n] * a_LO[n];
+      }
+      for (n = j+1; n <= ORDERLO; n++) {
+        res_nrg += a_LO[j] * corrlo2[n-j] * a_LO[n];
+      }
+    }
+
+    /* add hearing threshold and compute the gain */
+    *lo_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
+
+    /* copy coefficients to output array */
+    for (n = 1; n <= ORDERLO; n++) {
+      *lo_coeff++ = a_LO[n];
+    }
+
+
+    /* bandwidth expansion */
+    tmp = gammaHi;
+    for (n = 1; n <= ORDERHI; n++) {
+      a_HI[n] *= tmp;
+      tmp *= gammaHi;
+    }
+
+    /* residual energy */
+    res_nrg = 0.0;
+    for (j = 0; j <= ORDERHI; j++) {
+      for (n = 0; n <= j; n++) {
+        res_nrg += a_HI[j] * corrhi[j-n] * a_HI[n];
+      }
+      for (n = j+1; n <= ORDERHI; n++) {
+        res_nrg += a_HI[j] * corrhi[n-j] * a_HI[n];
+      }
+    }
+
+    /* add hearing threshold and compute of the gain */
+    *hi_coeff++ = S_N_R / (sqrt(res_nrg) / varscale + H_T_H);
+
+    /* copy coefficients to output array */
+    for (n = 1; n <= ORDERHI; n++) {
+      *hi_coeff++ = a_HI[n];
+    }
+  }
+}
+
+
+
+/******************************************************************************
+ * WebRtcIsac_GetLpcCoefUb()
+ *
+ * Compute LP coefficients and correlation coefficients. At 12 kHz LP
+ * coefficients of the first and the last sub-frame is computed. At 16 kHz
+ * LP coefficients of 4th, 8th and 12th sub-frames are computed. We always
+ * compute correlation coefficients of all sub-frames.
+ *
+ * Inputs:
+ *       -inSignal           : Input signal
+ *       -maskdata           : a structure keeping signal from previous frame.
+ *       -bandwidth          : specifies if the codec is in 0-16 kHz mode or
+ *                             0-12 kHz mode.
+ *
+ * Outputs:
+ *       -lpCoeff            : pointer to a buffer where A-polynomials are
+ *                             written to (first coeff is 1 and it is not
+ *                             written)
+ *       -corrMat            : a matrix where correlation coefficients of each
+ *                             sub-frame are written to one row.
+ *       -varscale           : a scale used to compute LPC gains.
+ */
+void
+WebRtcIsac_GetLpcCoefUb(
+    double*      inSignal,
+    MaskFiltstr* maskdata,
+    double*      lpCoeff,
+    double       corrMat[][UB_LPC_ORDER + 1],
+    double*      varscale,
+    int16_t  bandwidth)
+{
+  int frameCntr, activeFrameCntr, n, pos1, pos2;
+  int16_t criterion1;
+  int16_t criterion2;
+  int16_t numSubFrames = SUBFRAMES * (1 + (bandwidth == isac16kHz));
+  double data[WINLEN];
+  double corrSubFrame[UB_LPC_ORDER+2];
+  double reflecCoeff[UB_LPC_ORDER];
+
+  double aPolynom[UB_LPC_ORDER+1];
+  double tmp;
+
+  /* bandwdith expansion factors */
+  const double gamma = 0.9;
+
+  /* change quallevel depending on pitch gains and level fluctuations */
+  WebRtcIsac_GetVarsUB(inSignal, &(maskdata->OldEnergy), varscale);
+
+  /* replace data in buffer by new look-ahead data */
+  for(frameCntr = 0, activeFrameCntr = 0; frameCntr < numSubFrames;
+      frameCntr++)
+  {
+    if(frameCntr == SUBFRAMES)
+    {
+      // we are in 16 kHz
+      varscale++;
+      WebRtcIsac_GetVarsUB(&inSignal[FRAMESAMPLES_HALF],
+                          &(maskdata->OldEnergy), varscale);
+    }
+    /* Update input buffer and multiply signal with window */
+    for(pos1 = 0; pos1 < WINLEN - UPDATE/2; pos1++)
+    {
+      maskdata->DataBufferLo[pos1] = maskdata->DataBufferLo[pos1 +
+                                                            UPDATE/2];
+      data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+    }
+    pos2 = frameCntr * UPDATE/2;
+    for(n = 0; n < UPDATE/2; n++, pos1++, pos2++)
+    {
+      maskdata->DataBufferLo[pos1] = inSignal[pos2];
+      data[pos1] = maskdata->DataBufferLo[pos1] * kLpcCorrWindow[pos1];
+    }
+
+    /* Get correlation coefficients */
+    /* computing autocorrelation    */
+    WebRtcIsac_AutoCorr(corrSubFrame, data, WINLEN, UB_LPC_ORDER+1);
+    memcpy(corrMat[frameCntr], corrSubFrame,
+           (UB_LPC_ORDER+1)*sizeof(double));
+
+    criterion1 = ((frameCntr == 0) || (frameCntr == (SUBFRAMES - 1))) &&
+        (bandwidth == isac12kHz);
+    criterion2 = (((frameCntr+1) % 4) == 0) &&
+        (bandwidth == isac16kHz);
+    if(criterion1 || criterion2)
+    {
+      /* add noise */
+      corrSubFrame[0] += 1e-6;
+      /* compute prediction coefficients */
+      WebRtcIsac_LevDurb(aPolynom, reflecCoeff, corrSubFrame,
+                        UB_LPC_ORDER);
+
+      /* bandwidth expansion */
+      tmp = gamma;
+      for (n = 1; n <= UB_LPC_ORDER; n++)
+      {
+        *lpCoeff++ = aPolynom[n] * tmp;
+        tmp *= gamma;
+      }
+      activeFrameCntr++;
+    }
+  }
+}
+
+
+
+/******************************************************************************
+ * WebRtcIsac_GetLpcGain()
+ *
+ * Compute the LPC gains for each sub-frame, given the LPC of each sub-frame
+ * and the corresponding correlation coefficients.
+ *
+ * Inputs:
+ *       -signal_noise_ratio : the desired SNR in dB.
+ *       -numVecs            : number of sub-frames
+ *       -corrMat             : a matrix of correlation coefficients where
+ *                             each row is a set of correlation coefficients of
+ *                             one sub-frame.
+ *       -varscale           : a scale computed when WebRtcIsac_GetLpcCoefUb()
+ *                             is called.
+ *
+ * Outputs:
+ *       -gain               : pointer to a buffer where LP gains are written.
+ *
+ */
+void
+WebRtcIsac_GetLpcGain(
+    double        signal_noise_ratio,
+    const double* filtCoeffVecs,
+    int           numVecs,
+    double*       gain,
+    double        corrMat[][UB_LPC_ORDER + 1],
+    const double* varscale)
+{
+  int16_t j, n;
+  int16_t subFrameCntr;
+  double aPolynom[ORDERLO + 1];
+  double res_nrg;
+
+  const double HearThresOffset = -28.0;
+  const double H_T_H = pow(10.0, 0.05 * HearThresOffset);
+  /* divide by sqrt(12) = 3.46 */
+  const double S_N_R = pow(10.0, 0.05 * signal_noise_ratio) / 3.46;
+
+  aPolynom[0] = 1;
+  for(subFrameCntr = 0; subFrameCntr < numVecs; subFrameCntr++)
+  {
+    if(subFrameCntr == SUBFRAMES)
+    {
+      // we are in second half of a SWB frame. use new varscale
+      varscale++;
+    }
+    memcpy(&aPolynom[1], &filtCoeffVecs[(subFrameCntr * (UB_LPC_ORDER + 1)) +
+                                        1], sizeof(double) * UB_LPC_ORDER);
+
+    /* residual energy */
+    res_nrg = 0.0;
+    for(j = 0; j <= UB_LPC_ORDER; j++)
+    {
+      for(n = 0; n <= j; n++)
+      {
+        res_nrg += aPolynom[j] * corrMat[subFrameCntr][j-n] *
+            aPolynom[n];
+      }
+      for(n = j+1; n <= UB_LPC_ORDER; n++)
+      {
+        res_nrg += aPolynom[j] * corrMat[subFrameCntr][n-j] *
+            aPolynom[n];
+      }
+    }
+
+    /* add hearing threshold and compute the gain */
+    gain[subFrameCntr] = S_N_R / (sqrt(res_nrg) / *varscale + H_T_H);
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h b/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
new file mode 100644
index 0000000..c0848ab
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_analysis.h
+ *
+ * LPC functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYSIS_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order);
+
+void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
+                       double *oldEnergy, double *varscale);
+
+void WebRtcIsac_GetLpcCoefLb(double *inLo, double *inHi, MaskFiltstr *maskdata,
+                             double signal_noise_ratio, const int16_t *pitchGains_Q12,
+                             double *lo_coeff, double *hi_coeff);
+
+
+void WebRtcIsac_GetLpcGain(
+    double         signal_noise_ratio,
+    const double*  filtCoeffVecs,
+    int            numVecs,
+    double*        gain,
+    double         corrLo[][UB_LPC_ORDER + 1],
+    const double*  varscale);
+
+void WebRtcIsac_GetLpcCoefUb(
+    double*      inSignal,
+    MaskFiltstr* maskdata,
+    double*      lpCoeff,
+    double       corr[][UB_LPC_ORDER + 1],
+    double*      varscale,
+    int16_t  bandwidth);
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_ANALYIS_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c b/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c
new file mode 100644
index 0000000..d6e65f3
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.c
@@ -0,0 +1,137 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB_KLT_Tables_LPCGain.c
+ *
+ * This file defines tables used for entropy coding of LPC Gain
+ * of upper-band.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+const double WebRtcIsac_kQSizeLpcGain = 0.100000;
+
+const double WebRtcIsac_kMeanLpcGain = -3.3822;
+
+/*
+* The smallest reconstruction points for quantiztion of
+* LPC gains.
+*/
+const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES] =
+{
+   -0.800000, -1.000000, -1.200000, -2.200000, -3.000000, -12.700000
+};
+
+/*
+* Number of reconstruction points of quantizers for LPC Gains.
+*/
+const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES] =
+{
+    17,  20,  25,  45,  77, 170
+};
+/*
+* Starting index for entropy decoder to search for the right interval,
+* one entry per LAR coefficient
+*/
+const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES] =
+{
+     8,  10,  12,  22,  38,  85
+};
+
+/*
+* The following 6 vectors define CDF of 6 decorrelated LPC
+* gains.
+*/
+const uint16_t WebRtcIsac_kLpcGainCdfVec0[18] =
+{
+     0,    10,    27,    83,   234,   568,  1601,  4683, 16830, 57534, 63437,
+ 64767, 65229, 65408, 65483, 65514, 65527, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec1[21] =
+{
+     0,    15,    33,    84,   185,   385,   807,  1619,  3529,  7850, 19488,
+ 51365, 62437, 64548, 65088, 65304, 65409, 65484, 65507, 65522, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec2[26] =
+{
+     0,    15,    29,    54,    89,   145,   228,   380,   652,  1493,  4260,
+ 12359, 34133, 50749, 57224, 60814, 62927, 64078, 64742, 65103, 65311, 65418,
+ 65473, 65509, 65521, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec3[46] =
+{
+     0,     8,    12,    16,    26,    42,    56,    76,   111,   164,   247,
+   366,   508,   693,  1000,  1442,  2155,  3188,  4854,  7387, 11249, 17617,
+ 30079, 46711, 56291, 60127, 62140, 63258, 63954, 64384, 64690, 64891, 65031,
+ 65139, 65227, 65293, 65351, 65399, 65438, 65467, 65492, 65504, 65510, 65518,
+ 65523, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec4[78] =
+{
+     0,    17,    29,    39,    51,    70,   104,   154,   234,   324,   443,
+   590,   760,   971,  1202,  1494,  1845,  2274,  2797,  3366,  4088,  4905,
+  5899,  7142,  8683, 10625, 12983, 16095, 20637, 28216, 38859, 47237, 51537,
+ 54150, 56066, 57583, 58756, 59685, 60458, 61103, 61659, 62144, 62550, 62886,
+ 63186, 63480, 63743, 63954, 64148, 64320, 64467, 64600, 64719, 64837, 64939,
+ 65014, 65098, 65160, 65211, 65250, 65290, 65325, 65344, 65366, 65391, 65410,
+ 65430, 65447, 65460, 65474, 65487, 65494, 65501, 65509, 65513, 65518, 65520,
+ 65535
+};
+
+const uint16_t WebRtcIsac_kLpcGainCdfVec5[171] =
+{
+     0,    10,    12,    14,    16,    18,    23,    29,    35,    42,    51,
+    58,    65,    72,    78,    87,    96,   103,   111,   122,   134,   150,
+   167,   184,   202,   223,   244,   265,   289,   315,   346,   379,   414,
+   450,   491,   532,   572,   613,   656,   700,   751,   802,   853,   905,
+   957,  1021,  1098,  1174,  1250,  1331,  1413,  1490,  1565,  1647,  1730,
+  1821,  1913,  2004,  2100,  2207,  2314,  2420,  2532,  2652,  2783,  2921,
+  3056,  3189,  3327,  3468,  3640,  3817,  3993,  4171,  4362,  4554,  4751,
+  4948,  5142,  5346,  5566,  5799,  6044,  6301,  6565,  6852,  7150,  7470,
+  7797,  8143,  8492,  8835,  9181,  9547,  9919, 10315, 10718, 11136, 11566,
+ 12015, 12482, 12967, 13458, 13953, 14432, 14903, 15416, 15936, 16452, 16967,
+ 17492, 18024, 18600, 19173, 19736, 20311, 20911, 21490, 22041, 22597, 23157,
+ 23768, 24405, 25034, 25660, 26280, 26899, 27614, 28331, 29015, 29702, 30403,
+ 31107, 31817, 32566, 33381, 34224, 35099, 36112, 37222, 38375, 39549, 40801,
+ 42074, 43350, 44626, 45982, 47354, 48860, 50361, 51845, 53312, 54739, 56026,
+ 57116, 58104, 58996, 59842, 60658, 61488, 62324, 63057, 63769, 64285, 64779,
+ 65076, 65344, 65430, 65500, 65517, 65535
+};
+
+/*
+* An array of pointers to CDFs of decorrelated LPC Gains
+*/
+const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES] =
+{
+    WebRtcIsac_kLpcGainCdfVec0, WebRtcIsac_kLpcGainCdfVec1,
+    WebRtcIsac_kLpcGainCdfVec2, WebRtcIsac_kLpcGainCdfVec3,
+    WebRtcIsac_kLpcGainCdfVec4, WebRtcIsac_kLpcGainCdfVec5
+};
+
+/*
+* A matrix to decorrellate LPC gains of subframes.
+*/
+const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES] =
+{
+    {-0.150860,  0.327872,  0.367220,  0.504613,  0.559270,  0.409234},
+    { 0.457128, -0.613591, -0.289283, -0.029734,  0.393760,  0.418240},
+    {-0.626043,  0.136489, -0.439118, -0.448323,  0.135987,  0.420869},
+    { 0.526617,  0.480187,  0.242552, -0.488754, -0.158713,  0.411331},
+    {-0.302587, -0.494953,  0.588112, -0.063035, -0.404290,  0.387510},
+    { 0.086378,  0.147714, -0.428875,  0.548300, -0.570121,  0.401391}
+};
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h b/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h
new file mode 100644
index 0000000..7a5abfd
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_gain_swb_tables.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB_KLT_Tables_LPCGain.h
+ *
+ * This file declares tables used for entropy coding of LPC Gain
+ * of upper-band.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern const double WebRtcIsac_kQSizeLpcGain;
+
+extern const double WebRtcIsac_kLeftRecPointLpcGain[SUBFRAMES];
+
+extern const int16_t WebRtcIsac_kNumQCellLpcGain[SUBFRAMES];
+
+extern const uint16_t WebRtcIsac_kLpcGainEntropySearch[SUBFRAMES];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec0[18];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec1[21];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec2[26];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec3[46];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec4[78];
+
+extern const uint16_t WebRtcIsac_kLpcGainCdfVec5[171];
+
+extern const uint16_t* WebRtcIsac_kLpcGainCdfMat[SUBFRAMES];
+
+extern const double WebRtcIsac_kLpcGainDecorrMat[SUBFRAMES][SUBFRAMES];
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_GAIN_SWB_TABLES_H_
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c
new file mode 100644
index 0000000..490866c
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.c
@@ -0,0 +1,159 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB_KLT_Tables.c
+ *
+ * This file defines tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 12 kHz.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*
+* Mean value of LAR
+*/
+const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER] =
+{
+  0.03748928306641, 0.09453441192543, -0.01112522344398, 0.03800237516842
+};
+
+/*
+* A rotation matrix to decorrelate intra-vector correlation,
+* i.e. correlation among components of LAR vector.
+*/
+const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER][UB_LPC_ORDER] =
+{
+    {-0.00075365493856,  -0.05809964887743,  -0.23397966154116,   0.97050367376411},
+    { 0.00625021257734,  -0.17299965610679,   0.95977735920651,   0.22104179375008},
+    { 0.20543384258374,  -0.96202143495696,  -0.15301870801552,  -0.09432375099565},
+    {-0.97865075648479,  -0.20300322280841,  -0.02581111653779,  -0.01913568980258}
+};
+
+/*
+* A rotation matrix to remove correlation among LAR coefficients
+* of different LAR vectors. One might guess that decorrelation matrix
+* for the first component should differ from the second component
+* but we haven't observed a significant benefit of having different
+* decorrelation matrices for different components.
+*/
+const double WebRtcIsac_kInterVecDecorrMatUb12
+[UB_LPC_VEC_PER_FRAME][UB_LPC_VEC_PER_FRAME] =
+{
+    { 0.70650597970460,  -0.70770707262373},
+    {-0.70770707262373,  -0.70650597970460}
+};
+
+/*
+* LAR quantization step-size.
+*/
+const double WebRtcIsac_kLpcShapeQStepSizeUb12 = 0.150000;
+
+/*
+* The smallest reconstruction points for quantiztion of LAR coefficients.
+*/
+const double WebRtcIsac_kLpcShapeLeftRecPointUb12
+[UB_LPC_ORDER*UB_LPC_VEC_PER_FRAME] =
+{
+    -0.900000, -1.050000, -1.350000, -1.800000, -1.350000, -1.650000,
+    -2.250000, -3.450000
+};
+
+/*
+* Number of reconstruction points of quantizers for LAR coefficients.
+*/
+const int16_t WebRtcIsac_kLpcShapeNumRecPointUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
+{
+    13, 15, 19, 27, 19, 24, 32, 48
+};
+
+/*
+* Starting index for entropy decoder to search for the right interval,
+* one entry per LAR coefficient
+*/
+const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
+{
+     6,  7,  9, 13,  9, 12, 16, 24
+};
+
+/*
+* The following 8 vectors define CDF of 8 decorrelated LAR
+* coefficients.
+*/
+const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14] =
+{
+     0,    13,    95,   418,  1687,  6498, 21317, 44200, 59029, 63849, 65147,
+ 65449, 65525, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16] =
+{
+     0,    10,    59,   255,   858,  2667,  8200, 22609, 42988, 57202, 62947,
+ 64743, 65308, 65476, 65522, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20] =
+{
+     0,    18,    40,   118,   332,   857,  2017,  4822, 11321, 24330, 41279,
+ 54342, 60637, 63394, 64659, 65184, 65398, 65482, 65518, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28] =
+{
+     0,    21,    38,    90,   196,   398,   770,  1400,  2589,  4650,  8211,
+ 14933, 26044, 39592, 50814, 57452, 60971, 62884, 63995, 64621, 65019, 65273,
+ 65410, 65480, 65514, 65522, 65531, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20] =
+{
+     0,     7,    46,   141,   403,   969,  2132,  4649, 10633, 24902, 43254,
+ 54665, 59928, 62674, 64173, 64938, 65293, 65464, 65523, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25] =
+{
+     0,     7,    22,    72,   174,   411,   854,  1737,  3545,  6774, 13165,
+ 25221, 40980, 52821, 58714, 61706, 63472, 64437, 64989, 65287, 65430, 65503,
+ 65525, 65529, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33] =
+{
+     0,    11,    21,    36,    65,   128,   228,   401,   707,  1241,  2126,
+  3589,  6060, 10517, 18853, 31114, 42477, 49770, 54271, 57467, 59838, 61569,
+ 62831, 63772, 64433, 64833, 65123, 65306, 65419, 65466, 65499, 65519, 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49] =
+{
+     0,    14,    34,    67,   107,   167,   245,   326,   449,   645,   861,
+  1155,  1508,  2003,  2669,  3544,  4592,  5961,  7583,  9887, 13256, 18765,
+ 26519, 34077, 40034, 44349, 47795, 50663, 53262, 55473, 57458, 59122, 60592,
+ 61742, 62690, 63391, 63997, 64463, 64794, 65045, 65207, 65309, 65394, 65443,
+ 65478, 65504, 65514, 65523, 65535
+};
+
+/*
+* An array of pointers to CDFs of decorrelated LARs
+*/
+const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME] =
+{
+    WebRtcIsac_kLpcShapeCdfVec0Ub12, WebRtcIsac_kLpcShapeCdfVec1Ub12,
+    WebRtcIsac_kLpcShapeCdfVec2Ub12, WebRtcIsac_kLpcShapeCdfVec3Ub12,
+    WebRtcIsac_kLpcShapeCdfVec4Ub12, WebRtcIsac_kLpcShapeCdfVec5Ub12,
+    WebRtcIsac_kLpcShapeCdfVec6Ub12, WebRtcIsac_kLpcShapeCdfVec7Ub12
+};
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h
new file mode 100644
index 0000000..7bae096
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb12_tables.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_shape_swb12_tables.h
+ *
+ * This file declares tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 12 kHz.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern const double WebRtcIsac_kMeanLarUb12[UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kMeanLpcGain;
+
+extern const double WebRtcIsac_kIntraVecDecorrMatUb12[UB_LPC_ORDER][UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kInterVecDecorrMatUb12
+[UB_LPC_VEC_PER_FRAME][UB_LPC_VEC_PER_FRAME];
+
+extern const double WebRtcIsac_kLpcShapeQStepSizeUb12;
+
+extern const double WebRtcIsac_kLpcShapeLeftRecPointUb12
+[UB_LPC_ORDER*UB_LPC_VEC_PER_FRAME];
+
+
+extern const int16_t WebRtcIsac_kLpcShapeNumRecPointUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+extern const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec0Ub12[14];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub12[16];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub12[20];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub12[28];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub12[20];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub12[25];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub12[33];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub12[49];
+
+extern const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb12
+[UB_LPC_ORDER * UB_LPC_VEC_PER_FRAME];
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB12_TABLES_H_
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c
new file mode 100644
index 0000000..d03c7b7
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.c
@@ -0,0 +1,248 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * SWB16_KLT_Tables.c
+ *
+ * This file defines tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 16 kHz.
+ *
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/*
+* Mean value of LAR
+*/
+const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER] =
+{
+0.454978, 0.364747, 0.102999, 0.104523
+};
+
+/*
+* A rotation matrix to decorrelate intra-vector correlation,
+* i.e. correlation among components of LAR vector.
+*/
+const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER][UB_LPC_ORDER] =
+{
+    {-0.020528, -0.085858, -0.002431,  0.996093},
+    {-0.033155,  0.036102,  0.998786,  0.004866},
+    { 0.202627,  0.974853, -0.028940,  0.088132},
+    {-0.978479,  0.202454, -0.039785, -0.002811}
+};
+
+/*
+* A rotation matrix to remove correlation among LAR coefficients
+* of different LAR vectors. One might guess that decorrelation matrix
+* for the first component should differ from the second component
+* but we haven't observed a significant benefit of having different
+* decorrelation matrices for different components.
+*/
+const double WebRtcIsac_kInterVecDecorrMatUb16
+[UB16_LPC_VEC_PER_FRAME][UB16_LPC_VEC_PER_FRAME] =
+{
+    { 0.291675, -0.515786,  0.644927,  0.482658},
+    {-0.647220,  0.479712,  0.289556,  0.516856},
+    { 0.643084,  0.485489, -0.289307,  0.516763},
+    {-0.287185, -0.517823, -0.645389,  0.482553}
+};
+
+/*
+* The following 16 vectors define CDF of 16 decorrelated LAR
+* coefficients.
+*/
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14] =
+{
+     0,      2,     20,    159,   1034,   5688,  20892,  44653,
+ 59849,  64485,  65383,  65518,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16] =
+{
+     0,      1,      7,     43,    276,   1496,   6681,  21653,
+ 43891,  58859,  64022,  65248,  65489,  65529,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18] =
+{
+     0,      1,      9,     54,    238,    933,   3192,   9461,
+ 23226,  42146,  56138,  62413,  64623,  65300,  65473,  65521,
+ 65533,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30] =
+{
+     0,      2,      4,      8,     17,     36,     75,    155,
+   329,    683,   1376,   2662,   5047,   9508,  17526,  29027,
+ 40363,  48997,  55096,  59180,  61789,  63407,  64400,  64967,
+ 65273,  65429,  65497,  65526,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16] =
+{
+     0,      1,     10,     63,    361,   1785,   7407,  22242,
+ 43337,  58125,  63729,  65181,  65472,  65527,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17] =
+{
+     0,      1,      7,     29,    134,    599,   2443,   8590,
+ 22962,  42635,  56911,  63060,  64940,  65408,  65513,  65531,
+ 65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21] =
+{
+     0,      1,      5,     16,     57,    191,    611,   1808,
+  4847,  11755,  24612,  40910,  53789,  60698,  63729,  64924,
+ 65346,  65486,  65523,  65532,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36] =
+{
+     0,      1,      4,     12,     25,     55,    104,    184,
+   314,    539,    926,   1550,   2479,   3861,   5892,   8845,
+ 13281,  20018,  29019,  38029,  45581,  51557,  56057,  59284,
+ 61517,  63047,  64030,  64648,  65031,  65261,  65402,  65480,
+ 65518,  65530,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21] =
+{
+     0,      1,      2,      7,     26,    103,    351,   1149,
+  3583,  10204,  23846,  41711,  55361,  61917,  64382,  65186,
+ 65433,  65506,  65528,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21] =
+{
+     0,      6,     19,     63,    205,    638,   1799,   4784,
+ 11721,  24494,  40803,  53805,  60886,  63822,  64931,  65333,
+ 65472,  65517,  65530,  65533,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28] =
+{
+     0,      1,      3,     11,     31,     86,    221,    506,
+  1101,   2296,   4486,   8477,  15356,  26079,  38941,  49952,
+ 57165,  61257,  63426,  64549,  65097,  65351,  65463,  65510,
+ 65526,  65532,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55] =
+{
+     0,      3,     12,     23,     42,     65,     89,    115,
+   150,    195,    248,    327,    430,    580,    784,   1099,
+  1586,   2358,   3651,   5899,   9568,  14312,  19158,  23776,
+ 28267,  32663,  36991,  41153,  45098,  48680,  51870,  54729,
+ 57141,  59158,  60772,  62029,  63000,  63761,  64322,  64728,
+ 65000,  65192,  65321,  65411,  65463,  65496,  65514,  65523,
+ 65527,  65529,  65531,  65532,  65533,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26] =
+{
+     0,      2,      4,     10,     21,     48,    114,    280,
+   701,   1765,   4555,  11270,  24267,  41213,  54285,  61003,
+ 63767,  64840,  65254,  65421,  65489,  65514,  65526,  65532,
+ 65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28] =
+{
+     0,      1,      3,      6,     15,     36,     82,    196,
+   453,   1087,   2557,   5923,  13016,  25366,  40449,  52582,
+ 59539,  62896,  64389,  65033,  65316,  65442,  65494,  65519,
+ 65529,  65533,  65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34] =
+{
+     0,      2,      4,      8,     18,     35,     73,    146,
+   279,    524,    980,   1789,   3235,   5784,  10040,  16998,
+ 27070,  38543,  48499,  55421,  59712,  62257,  63748,  64591,
+ 65041,  65278,  65410,  65474,  65508,  65522,  65530,  65533,
+ 65534,  65535
+};
+
+const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71] =
+{
+     0,      1,      2,      6,     13,     26,     55,     92,
+   141,    191,    242,    296,    355,    429,    522,    636,
+   777,    947,   1162,   1428,   1753,   2137,   2605,   3140,
+  3743,   4409,   5164,   6016,   6982,   8118,   9451,  10993,
+ 12754,  14810,  17130,  19780,  22864,  26424,  30547,  35222,
+ 40140,  44716,  48698,  52056,  54850,  57162,  59068,  60643,
+ 61877,  62827,  63561,  64113,  64519,  64807,  65019,  65167,
+ 65272,  65343,  65399,  65440,  65471,  65487,  65500,  65509,
+ 65518,  65524,  65527,  65531,  65533,  65534,  65535
+};
+
+/*
+* An array of pointers to CDFs of decorrelated LARs
+*/
+const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] = {
+     WebRtcIsac_kLpcShapeCdfVec01Ub16,
+     WebRtcIsac_kLpcShapeCdfVec1Ub16,
+     WebRtcIsac_kLpcShapeCdfVec2Ub16,
+     WebRtcIsac_kLpcShapeCdfVec3Ub16,
+     WebRtcIsac_kLpcShapeCdfVec4Ub16,
+     WebRtcIsac_kLpcShapeCdfVec5Ub16,
+     WebRtcIsac_kLpcShapeCdfVec6Ub16,
+     WebRtcIsac_kLpcShapeCdfVec7Ub16,
+     WebRtcIsac_kLpcShapeCdfVec8Ub16,
+     WebRtcIsac_kLpcShapeCdfVec01Ub160,
+     WebRtcIsac_kLpcShapeCdfVec01Ub161,
+     WebRtcIsac_kLpcShapeCdfVec01Ub162,
+     WebRtcIsac_kLpcShapeCdfVec01Ub163,
+     WebRtcIsac_kLpcShapeCdfVec01Ub164,
+     WebRtcIsac_kLpcShapeCdfVec01Ub165,
+     WebRtcIsac_kLpcShapeCdfVec01Ub166
+};
+
+/*
+* The smallest reconstruction points for quantiztion of LAR coefficients.
+*/
+const double WebRtcIsac_kLpcShapeLeftRecPointUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
+{
+ -0.8250,  -0.9750,  -1.1250,  -2.1750,  -0.9750,  -1.1250,  -1.4250,
+ -2.6250,  -1.4250,  -1.2750,  -1.8750,  -3.6750,  -1.7250,  -1.8750,
+ -2.3250,  -5.4750
+};
+
+/*
+* Number of reconstruction points of quantizers for LAR coefficients.
+*/
+const int16_t WebRtcIsac_kLpcShapeNumRecPointUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
+{
+   13,    15,    17,    29,    15,    16,    20,    35,    20,
+   20,    27,    54,    25,    27,    33,    70
+};
+
+/*
+* Starting index for entropy decoder to search for the right interval,
+* one entry per LAR coefficient
+*/
+const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME] =
+{
+    6,     7,     8,    14,     7,     8,    10,    17,    10,
+   10,    13,    27,    12,    13,    16,    35
+};
+
+/*
+* LAR quantization step-size.
+*/
+const double WebRtcIsac_kLpcShapeQStepSizeUb16 = 0.150000;
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h
new file mode 100644
index 0000000..d828b83
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_shape_swb16_tables.h
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_shape_swb16_tables.h
+ *
+ * This file declares tables used for entropy coding of LPC shape of
+ * upper-band signal if the bandwidth is 16 kHz.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+extern const double WebRtcIsac_kMeanLarUb16[UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kIintraVecDecorrMatUb16[UB_LPC_ORDER][UB_LPC_ORDER];
+
+extern const double WebRtcIsac_kInterVecDecorrMatUb16
+[UB16_LPC_VEC_PER_FRAME][UB16_LPC_VEC_PER_FRAME];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub16[14];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec1Ub16[16];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec2Ub16[18];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec3Ub16[30];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec4Ub16[16];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec5Ub16[17];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec6Ub16[21];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec7Ub16[36];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec8Ub16[21];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub160[21];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub161[28];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub162[55];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub163[26];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub164[28];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub165[34];
+
+extern const uint16_t WebRtcIsac_kLpcShapeCdfVec01Ub166[71];
+
+extern const uint16_t* WebRtcIsac_kLpcShapeCdfMatUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const double WebRtcIsac_kLpcShapeLeftRecPointUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const int16_t WebRtcIsac_kLpcShapeNumRecPointUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const uint16_t WebRtcIsac_kLpcShapeEntropySearchUb16
+[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+
+extern const double WebRtcIsac_kLpcShapeQStepSizeUb16;
+
+#endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_SHAPE_SWB16_TABLES_H_
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_tables.c b/modules/audio_coding/codecs/isac/main/source/lpc_tables.c
new file mode 100644
index 0000000..461b92e
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_tables.c
@@ -0,0 +1,601 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* coding tables for the KLT coefficients */
+
+#include "modules/audio_coding/codecs/isac/main/source/lpc_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* cdf array for model indicator */
+const uint16_t WebRtcIsac_kQKltModelCdf[4] = {
+    0,  15434,  37548,  65535 };
+
+/* pointer to cdf array for model indicator */
+const uint16_t *WebRtcIsac_kQKltModelCdfPtr[1] = {
+    WebRtcIsac_kQKltModelCdf };
+
+/* initial cdf index for decoder of model indicator */
+const uint16_t WebRtcIsac_kQKltModelInitIndex[1] = { 1 };
+
+/* offset to go from rounded value to quantization index */
+const short WebRtcIsac_kQKltQuantMinGain[12] = {
+    3,  6,  4,  6,  6,  9,  5,  16,  11,  34, 32,  47 };
+
+
+const short WebRtcIsac_kQKltQuantMinShape[108] = {
+    0,  0,  0,  0,  0,  0,  0,  0,  0,  1,
+    1,  1,  1,  1,  2,  2,  2,  3,  0,  0,
+    0,  0,  1,  0,  0,  0,  0,  1,  1,  1,
+    1,  1,  1,  2,  2,  3,  0,  0,  0,  0,
+    1,  0,  1,  1,  1,  1,  1,  1,  1,  2,
+    2,  4,  3,  5,  0,  0,  0,  0,  1,  1,
+    1,  1,  1,  1,  2,  1,  2,  2,  3,  4,
+    4,  7,  0,  0,  1,  1,  1,  1,  1,  1,
+    1,  2,  3,  2,  3,  4,  4,  5,  7,  13,
+    0,  1,  1,  2,  3,  2,  2,  2,  4,  4,
+    5,  6,  7,  11,  9,  13,  12,  26 };
+
+/* maximum quantization index */
+const uint16_t WebRtcIsac_kQKltMaxIndGain[12] = {
+    6,  12,  8,  14,  10,  19,  12,  31,  22,  56, 52,  138 };
+
+const uint16_t WebRtcIsac_kQKltMaxIndShape[108] = {
+    0,  0,  0,  0,  0,  0,  0,  0,  0,  1,
+    2,  2,  2,  2,  4,  4,  5,  6,  0,  0,
+    0,  0,  1,  0,  0,  0,  0,  1,  2,  2,
+    2,  2,  3,  4,  5,  7,  0,  0,  0,  0,
+    2,  0,  2,  2,  2,  2,  3,  2,  2,  4,
+    4,  6,  6,  9,  0,  0,  0,  0,  2,  2,
+    2,  2,  2,  2,  3,  2,  4,  4,  7,  7,
+    9,  13,  0,  0,  2,  2,  2,  2,  2,  2,
+    3,  4,  5,  4,  6,  8,  8,  10,  16,  25,
+    0,  2,  2,  4,  5,  4,  4,  4,  7,  8,
+    9,  10,  13,  19,  17,  23,  25,  49 };
+
+/* index offset */
+const uint16_t WebRtcIsac_kQKltOffsetGain[12] = {
+    0,  7,  20,  29,  44,  55,  75,  88,  120,  143,  200,  253 };
+
+const uint16_t WebRtcIsac_kQKltOffsetShape[108] = {
+    0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
+    11,  14,  17,  20,  23,  28,  33,  39,  46,  47,
+    48,  49,  50,  52,  53,  54,  55,  56,  58,  61,
+    64,  67,  70,  74,  79,  85,  93,  94,  95,  96,
+    97,  100,  101,  104,  107,  110,  113,  117,  120,  123,
+    128,  133,  140,  147,  157,  158,  159,  160,  161,  164,
+    167,  170,  173,  176,  179,  183,  186,  191,  196,  204,
+    212,  222,  236,  237,  238,  241,  244,  247,  250,  253,
+    256,  260,  265,  271,  276,  283,  292,  301,  312,  329,
+    355,  356,  359,  362,  367,  373,  378,  383,  388,  396,
+    405,  415,  426,  440,  460,  478,  502,  528 };
+
+/* initial cdf index for KLT coefficients */
+const uint16_t WebRtcIsac_kQKltInitIndexGain[12] = {
+    3,  6,  4,  7,  5,  10,  6,  16,  11,  28, 26,  69};
+
+const uint16_t WebRtcIsac_kQKltInitIndexShape[108] = {
+    0,  0,  0,  0,  0,  0,  0,  0,  0,  1,
+    1,  1,  1,  1,  2,  2,  3,  3,  0,  0,
+    0,  0,  1,  0,  0,  0,  0,  1,  1,  1,
+    1,  1,  2,  2,  3,  4,  0,  0,  0,  0,
+    1,  0,  1,  1,  1,  1,  2,  1,  1,  2,
+    2,  3,  3,  5,  0,  0,  0,  0,  1,  1,
+    1,  1,  1,  1,  2,  1,  2,  2,  4,  4,
+    5,  7,  0,  0,  1,  1,  1,  1,  1,  1,
+    2,  2,  3,  2,  3,  4,  4,  5,  8,  13,
+    0,  1,  1,  2,  3,  2,  2,  2,  4,  4,
+    5,  5,  7,  10,  9,  12,  13,  25 };
+
+
+/* quantizer representation levels */
+const double WebRtcIsac_kQKltLevelsGain[392] = {
+    -2.78127126, -1.76745590, -0.77913790, -0.00437329,  0.79961206,
+    1.81775776, 2.81389782, -5.78753143, -4.88384084, -3.89320940,
+    -2.88133610, -1.92859977, -0.86347396,  0.02003888,  0.86140400,
+    1.89667156,  2.97134967,  3.98781964,  4.91727277,  5.82865898,
+    -4.11195874, -2.80898424, -1.87547977, -0.80943825, -0.00679084,
+    0.79573851,  1.83953397,  2.67586037,  3.76274082, -6.10933968,
+    -4.93034581, -3.89281296, -2.91530625, -1.89684163, -0.85319130,
+    -0.02275767,  0.86862017,  1.91578276,  2.96107339,  3.96543056,
+    4.91369908,  5.91058154,  6.83848343,  8.07136925, -5.87470395,
+    -4.84703049, -3.84284597, -2.86168446, -1.89290192, -0.82798145,
+    -0.00080013,  0.82594974,  1.85754329,  2.88351798,  3.96172628,
+    -8.85684885, -7.87387461, -6.97811862, -5.93256270, -4.94301439,
+    -3.95513701, -2.96041544, -1.94031192, -0.87961478, -0.00456201,
+    0.89911505,  1.91723376,  2.94011511,  3.93302540,  4.97990967,
+    5.93133404,  7.02181199,  7.92407762,  8.80155440,  10.04665814,
+    -4.82396678, -3.85612158, -2.89482244, -1.89558408, -0.90036978,
+    -0.00677823,  0.90607989,  1.90937981,  2.91175777,  3.91637730,
+    4.97565723,  5.84771228,  7.11145863, -16.07879840, -15.03776309,
+    -13.93905670, -12.95671800, -11.89171202, -10.95820934, -9.95923714,
+    -8.94357334, -7.99068299, -6.97481009, -5.94826231, -4.96673988,
+    -3.97490466, -2.97846970, -1.95130435, -0.94215262, -0.01444043,
+    0.96770704,  1.95848598,  2.94107862,  3.95666119,  4.97253085,
+    5.97191122,  6.93277360,  7.96608727,  8.87958779,  10.00264269,
+    10.86560820,  12.07449071,  13.04491775,  13.97507061,  14.91845261,
+    -10.85696295, -9.83365357, -9.01245635, -7.95915145, -6.95625003,
+    -5.95362618, -4.93468444, -3.98760978, -2.95044407, -1.97041277,
+    -0.97701799, -0.00840234,  0.97834289,  1.98361415,  2.97802439,
+    3.96415871,  4.95369042,  5.94101770,  6.92756798,  7.94063998,
+    8.85951828,  9.97077022,  11.00068503, -33.92030406, -32.81426422,
+    -32.00000000, -31.13243639, -30.11886909, -29.06017570, -28.12598824,
+    -27.22045482, -25.81215858, -25.07849962, -23.93018013, -23.02097643,
+    -21.89529725, -20.99091085, -19.98889048, -18.94327044, -17.96562071,
+    -16.96126218, -15.95054062, -14.98516200, -13.97101012, -13.02106500,
+    -11.98438006, -11.03216748, -9.95930286, -8.97043946, -7.98085082,
+    -6.98360995, -5.98998802, -4.98668173, -4.00032906, -3.00420619,
+    -1.98701132, -0.99324682, -0.00609324,  0.98297834,  1.99483076,
+    3.00305044,  3.97142097,  4.97525759,  5.98612258,  6.97448236,
+    7.97575900,  9.01086211,  9.98665542,  11.00541438,  11.98078628,
+    12.92352471,  14.06849675,  14.99949430,  15.94904834,  16.97440321,
+    18.04040916,  18.88987609,  20.05312391,  21.00000000,  21.79443341,
+    -31.98578825, -31.00000000, -29.89060567, -28.98555686, -27.97114102,
+    -26.84935410, -26.02402230, -24.94195278, -23.92336849, -22.95552382,
+    -21.97932836, -20.96055470, -19.99649553, -19.03436122, -17.96706525,
+    -17.01139515, -16.01363516, -14.99154248, -14.00298333, -12.99630613,
+    -11.99955519, -10.99000421, -10.00819092, -8.99763648, -7.98431793,
+    -7.01769025, -5.99604690, -4.99980697, -3.99334671, -3.01748192,
+    -2.02051217, -1.00848371, -0.01942358,  1.00477757,  1.95477872,
+    2.98593031,  3.98779079,  4.96862849,  6.02694771,  6.93983733,
+    7.89874717,  8.99615862,  10.02367921,  10.96293452,  11.84351528,
+    12.92207187,  13.85122329,  15.05146877,  15.99371264,  17.00000000,
+    18.00000000,  19.00000000,  19.82763573, -47.00000000, -46.00000000,
+    -44.87138498, -44.00000000, -43.00000000, -42.00000000, -41.00000000,
+    -39.88966612, -38.98913239, -37.80306486, -37.23584325, -35.94200288,
+    -34.99881301, -34.11361858, -33.06507360, -32.13129135, -30.90891364,
+    -29.81511907, -28.99250380, -28.04535391, -26.99767800, -26.04418164,
+    -24.95687851, -24.04865595, -23.03392645, -21.89366707, -20.93517364,
+    -19.99388660, -18.91620943, -18.03749683, -16.99532379, -15.98683813,
+    -15.06421479, -13.99359211, -12.99714098, -11.97022520, -10.98500279,
+    -9.98834422, -8.95729330, -8.01232284, -7.00253661, -5.99681626,
+    -5.01207817, -3.95914904, -3.01232178, -1.96615919, -0.97687670,
+    0.01228030,  0.98412288,  2.01753544,  3.00580570,  3.97783510,
+    4.98846894,  6.01321400,  7.00867732,  8.00416375,  9.01771966,
+    9.98637729,  10.98255180,  11.99194163,  13.01807333,  14.00999545,
+    15.00118556,  16.00089224,  17.00584148,  17.98251763,  18.99942091,
+    19.96917690,  20.97839265,  21.98207297,  23.00171271,  23.99930737,
+    24.99746061,  26.00936304,  26.98240132,  28.01126868,  29.01395915,
+    29.98153507,  31.01376711,  31.99876818,  33.00475317,  33.99753994,
+    34.99493913,  35.98933585,  36.95620160,  37.98428461,  38.99317544,
+    40.01832073,  40.98048133,  41.95999283,  42.98232091,  43.96523612,
+    44.99574268,  45.99524194,  47.05464025,  48.03821548,  48.99354366,
+    49.96400411,  50.98017973,  51.95184408,  52.96291806,  54.00194392,
+    54.96603783,  55.95623778,  57.03076595,  58.05889901,  58.99081551,
+    59.97928121,  61.05071612,  62.03971580,  63.01286038,  64.01290338,
+    65.02074503,  65.99454594,  67.00399425,  67.96571257,  68.95305727,
+    69.92030664,  70.95594862,  71.98088567,  73.04764124,  74.00285480,
+    75.02696330,  75.89837673,  76.93459997,  78.16266309,  78.83317543,
+    80.00000000,  80.87251574,  82.09803524,  83.10671664,  84.00000000,
+    84.77023523,  86.00000000,  87.00000000,  87.92946897,  88.69159118,
+    90.00000000,  90.90535270 };
+
+const double WebRtcIsac_kQKltLevelsShape[578] = {
+    0.00032397,  0.00008053, -0.00061202, -0.00012620,  0.00030437,
+    0.00054764, -0.00027902,  0.00069360,  0.00029449, -0.80219239,
+    0.00091089, -0.74514927, -0.00094283,  0.64030631, -0.60509119,
+    0.00035575,  0.61851665, -0.62129957,  0.00375219,  0.60054900,
+    -0.61554359,  0.00054977,  0.63362016, -1.73118727, -0.65422341,
+    0.00524568,  0.66165298,  1.76785515, -1.83182018, -0.65997434,
+    -0.00011887,  0.67524299,  1.79933938, -1.76344480, -0.72547708,
+    -0.00133017,  0.73104704,  1.75305377,  2.85164534, -2.80423916,
+    -1.71959639, -0.75419722, -0.00329945,  0.77196760,  1.72211069,
+    2.87339653,  0.00031089, -0.00015311,  0.00018201, -0.00035035,
+    -0.77357251,  0.00154647, -0.00047625, -0.00045299,  0.00086590,
+    0.00044762, -0.83383829,  0.00024787, -0.68526258, -0.00122472,
+    0.64643255, -0.60904942, -0.00448987,  0.62309184, -0.59626442,
+    -0.00574132,  0.62296546, -0.63222115,  0.00013441,  0.63609545,
+    -0.66911055, -0.00369971,  0.66346095,  2.07281301, -1.77184694,
+    -0.67640425, -0.00010145,  0.64818392,  1.74948973, -1.69420224,
+    -0.71943894, -0.00004680,  0.75303493,  1.81075983,  2.80610041,
+    -2.80005755, -1.79866753, -0.77409777, -0.00084220,  0.80141293,
+    1.78291081,  2.73954236,  3.82994169,  0.00015140, -0.00012766,
+    -0.00034241, -0.00119125, -0.76113497,  0.00069246,  0.76722027,
+    0.00132862, -0.69107530,  0.00010656,  0.77061578, -0.78012970,
+    0.00095947,  0.77828502, -0.64787758,  0.00217168,  0.63050167,
+    -0.58601125,  0.00306596,  0.59466308, -0.58603410,  0.00059779,
+    0.64257970,  1.76512766, -0.61193600, -0.00259517,  0.59767574,
+    -0.61026273,  0.00315811,  0.61725479, -1.69169719, -0.65816029,
+    0.00067575,  0.65576890,  2.00000000, -1.72689193, -0.69780808,
+    -0.00040990,  0.70668487,  1.74198458, -3.79028154, -3.00000000,
+    -1.73194459, -0.70179341, -0.00106695,  0.71302629,  1.76849782,
+    -2.89332364, -1.78585007, -0.78731491, -0.00132610,  0.79692976,
+    1.75247009,  2.97828682, -5.26238694, -3.69559829, -2.87286122,
+    -1.84908818, -0.84434577, -0.01167975,  0.84641753,  1.84087672,
+    2.87628156,  3.83556679, -0.00190204,  0.00092642,  0.00354385,
+    -0.00012982, -0.67742785,  0.00229509,  0.64935672, -0.58444751,
+    0.00470733,  0.57299534, -0.58456202, -0.00097715,  0.64593607,
+    -0.64060330, -0.00638534,  0.59680157, -0.59287537,  0.00490772,
+    0.58919707, -0.60306173, -0.00417464,  0.60562100, -1.75218757,
+    -0.63018569, -0.00225922,  0.63863300, -0.63949939, -0.00126421,
+    0.64268914, -1.75851182, -0.68318060,  0.00510418,  0.69049211,
+    1.88178506, -1.71136148, -0.72710534, -0.00815559,  0.73412917,
+    1.79996711, -2.77111145, -1.73940498, -0.78212945,  0.01074476,
+    0.77688916,  1.76873972,  2.87281379,  3.77554698, -3.75832725,
+    -2.95463235, -1.80451491, -0.80017226,  0.00149902,  0.80729206,
+    1.78265046,  2.89391793, -3.78236148, -2.83640598, -1.82532067,
+    -0.88844327, -0.00620952,  0.88208030,  1.85757631,  2.81712391,
+    3.88430176,  5.16179367, -7.00000000, -5.93805408, -4.87172597,
+    -3.87524433, -2.89399744, -1.92359563, -0.92136341, -0.00172725,
+    0.93087018,  1.90528280,  2.89809686,  3.88085708,  4.89147740,
+    5.89078692, -0.00239502,  0.00312564, -1.00000000,  0.00178325,
+    1.00000000, -0.62198029,  0.00143254,  0.65344051, -0.59851220,
+    -0.00676987,  0.61510140, -0.58894151,  0.00385055,  0.59794203,
+    -0.59808568, -0.00038214,  0.57625703, -0.63009713, -0.01107985,
+    0.61278758, -0.64206758, -0.00154369,  0.65480598,  1.80604162,
+    -1.80909286, -0.67810514,  0.00205762,  0.68571097,  1.79453891,
+    -3.22682422, -1.73808453, -0.71870305, -0.00738594,  0.71486172,
+    1.73005326, -1.66891897, -0.73689615, -0.00616203,  0.74262409,
+    1.73807899, -2.92417482, -1.73866741, -0.78133871,  0.00764425,
+    0.80027264,  1.78668732,  2.74992588, -4.00000000, -2.75578740,
+    -1.83697516, -0.83117035, -0.00355191,  0.83527172,  1.82814700,
+    2.77377675,  3.80718693, -3.81667698, -2.83575471, -1.83372350,
+    -0.86579471,  0.00547578,  0.87582281,  1.82858793,  2.87265007,
+    3.91405377, -4.87521600, -3.78999094, -2.86437014, -1.86964365,
+    -0.90618018,  0.00128243,  0.91497811,  1.87374952,  2.83199819,
+    3.91519130,  4.76632822, -6.68713448, -6.01252467, -4.94587936,
+    -3.88795368, -2.91299088, -1.92592211, -0.95504570, -0.00089980,
+    0.94565200,  1.93239633,  2.91832808,  3.91363475,  4.88920034,
+    5.96471415,  6.83905252,  7.86195009,  8.81571018,-12.96141759,
+    -11.73039516,-10.96459719, -9.97382433, -9.04414433, -7.89460619,
+    -6.96628608, -5.93236595, -4.93337924, -3.95479990, -2.96451499,
+    -1.96635876, -0.97271229, -0.00402238,  0.98343930,  1.98348291,
+    2.96641164,  3.95456471,  4.95517089,  5.98975714,  6.90322073,
+    7.90468849,  8.85639467,  9.97255498, 10.79006309, 11.81988596,
+    0.04950500, -1.00000000, -0.01226628,  1.00000000, -0.59479469,
+    -0.10438305,  0.59822144, -2.00000000, -0.67109149, -0.09256692,
+    0.65171621,  2.00000000, -3.00000000, -1.68391999, -0.76681039,
+    -0.03354151,  0.71509146,  1.77615472, -2.00000000, -0.68661511,
+    -0.02497881,  0.66478398,  2.00000000, -2.00000000, -0.67032784,
+    -0.00920582,  0.64892756,  2.00000000, -2.00000000, -0.68561894,
+    0.03641869,  0.73021611,  1.68293863, -4.00000000, -2.72024184,
+    -1.80096059, -0.81696185,  0.03604685,  0.79232033,  1.70070730,
+    3.00000000, -4.00000000, -2.71795670, -1.80482986, -0.86001162,
+    0.03764903,  0.87723968,  1.79970771,  2.72685932,  3.67589143,
+    -5.00000000, -4.00000000, -2.85492548, -1.78996365, -0.83250358,
+    -0.01376828,  0.84195506,  1.78161105,  2.76754458,  4.00000000,
+    -6.00000000, -5.00000000, -3.82268811, -2.77563624, -1.82608163,
+    -0.86486114, -0.02671886,  0.86693165,  1.88422879,  2.86248347,
+    3.95632216, -7.00000000, -6.00000000, -5.00000000, -3.77533988,
+    -2.86391432, -1.87052039, -0.90513658,  0.06271236,  0.91083620,
+    1.85734756,  2.86031688,  3.82019418,  4.94420394,  6.00000000,
+    -11.00000000,-10.00000000, -9.00000000, -8.00000000, -6.91952415,
+    -6.00000000, -4.92044374, -3.87845165, -2.87392362, -1.88413020,
+    -0.91915740,  0.00318517,  0.91602800,  1.89664838,  2.88925058,
+    3.84123856,  4.78988651,  5.94526812,  6.81953917,  8.00000000,
+    -9.00000000, -8.00000000, -7.03319143, -5.94530963, -4.86669720,
+    -3.92438007, -2.88620396, -1.92848070, -0.94365985,  0.01671855,
+    0.97349410,  1.93419878,  2.89740109,  3.89662823,  4.83235583,
+    5.88106535,  6.80328232,  8.00000000,-13.00000000,-12.00000000,
+    -11.00000000,-10.00000000, -9.00000000, -7.86033489, -6.83344055,
+    -5.89844215, -4.90811454, -3.94841298, -2.95820490, -1.98627966,
+    -0.99161468, -0.02286136,  0.96055651,  1.95052433,  2.93969396,
+    3.94304346,  4.88522624,  5.87434241,  6.78309433,  7.87244101,
+    9.00000000, 10.00000000,-12.09117356,-11.00000000,-10.00000000,
+    -8.84766108, -7.86934236, -6.98544896, -5.94233429, -4.95583292,
+    -3.95575986, -2.97085529, -1.98955811, -0.99359873, -0.00485413,
+    0.98298870,  1.98093258,  2.96430203,  3.95540216,  4.96915010,
+    5.96775124,  6.99236918,  7.96503302,  8.99864542,  9.85857723,
+    10.96541926, 11.91647197, 12.71060069,-26.00000000,-25.00000000,
+    -24.00585596,-23.11642573,-22.14271284,-20.89800711,-19.87815799,
+    -19.05036354,-17.88555651,-16.86471209,-15.97711073,-14.94012359,
+    -14.02661226,-12.98243228,-11.97489256,-10.97402777, -9.96425624,
+    -9.01085220, -7.97372506, -6.98795002, -5.97271328, -5.00191694,
+    -3.98055849, -2.98458048, -1.99470442, -0.99656768, -0.00825666,
+    1.00272004,  1.99922218,  2.99357669,  4.01407905,  5.01003897,
+    5.98115528,  7.00018958,  8.00338125,  8.98981046,  9.98990318,
+    10.96341479, 11.96866930, 12.99175139, 13.94580443, 14.95745083,
+    15.98992869, 16.97484646, 17.99630043, 18.93396897, 19.88347741,
+    20.96532482, 21.92191032, 23.22314702 };
+
+
+/* cdf tables for quantizer indices */
+const uint16_t WebRtcIsac_kQKltCdfGain[404] = {
+    0,  13,  301,  3730,  61784,  65167,  65489,  65535,  0,  17,
+    142,  314,  929,  2466,  7678,  56450,  63463,  64740,  65204,  65426,
+    65527,  65535,  0,  8,  100,  724,  6301,  60105,  65125,  65510,
+    65531,  65535,  0,  13,  117,  368,  1068,  3010,  11928,  53603,
+    61177,  63404,  64505,  65108,  65422,  65502,  65531,  65535,  0,  4,
+    17,  96,  410,  1859,  12125,  54361,  64103,  65305,  65497,  65535,
+    0,  4,  88,  230,  469,  950,  1746,  3228,  6092,  16592,
+    44756,  56848,  61256,  63308,  64325,  64920,  65309,  65460,  65502,
+    65522, 65535,  0,  88,  352,  1675,  6339,  20749,  46686,  59284,  63525,
+    64949,  65359,  65502,  65527,  65535,  0,  13,  38,  63,  117,
+    234,  381,  641,  929,  1407,  2043,  2809,  4032,  5753,  8792,
+    14407,  24308,  38941,  48947,  55403,  59293,  61411,  62688,  63630,
+    64329, 64840,  65188,  65376,  65472,  65506,  65527,  65531,  65535,
+    0,  8, 29,  75,  222,  615,  1327,  2801,  5623,  9931,  16094,  24966,
+    34419, 43458,  50676,  56186,  60055,  62500,  63936,  64765,  65225,
+    65435, 65514,  65535,  0,  8,  13,  15,  17,  21,  33,  59,
+    71,  92,  151,  243,  360,  456,  674,  934,  1223,  1583,
+    1989,  2504,  3031,  3617,  4354,  5154,  6163,  7411,  8780,  10747,
+    12874,  15591,  18974,  23027,  27436,  32020,  36948,  41830,  46205,
+    49797,  53042,  56094,  58418,  60360,  61763,  62818,  63559,  64103,
+    64509,  64798,  65045,  65162,  65288,  65363,  65447,  65506,  65522,
+    65531,  65533,  65535,  0,  4,  6,  25,  38,  71,  138,  264,  519,  808,
+    1227,  1825,  2516,  3408,  4279,  5560,  7092,  9197,  11420,  14108,
+    16947,  20300,  23926,  27459,  31164,  34827,  38575,  42178,  45540,
+    48747,  51444,  54090,  56426,  58460,  60080,  61595,  62734,  63668,
+    64275,  64673,  64936,  65112,  65217,  65334,  65426,  65464,  65477,
+    65489,  65518,  65527,  65529,  65531,  65533,  65535,  0,  2,  4,  8,  10,
+    12,  14,  16,  21,  33,  50,  71,  84,  92,  105,  138, 180,  255,  318,
+    377,  435,  473,  511,  590,  682,  758,  913,  1097,  1256,  1449,  1671,
+    1884,  2169,  2445,  2772,  3157,  3563,  3944,  4375,  4848,  5334,  5820,
+    6448,  7101,  7716,  8378,  9102,  9956,  10752,  11648,  12707,  13670,
+    14758,  15910,  17187,  18472,  19627,  20649,  21951,  23169,  24283,
+    25552,  26862,  28227,  29391,  30764,  31882,  33213,  34432,  35600,
+    36910,  38116,  39464,  40729,  41872,  43144,  44371,  45514,  46762,
+    47813,  48968,  50069,  51032,  51974,  52908,  53737,  54603,  55445,
+    56282,  56990,  57572,  58191,  58840,  59410,  59887,  60264,  60607,
+    60946,  61269,  61516,  61771,  61960,  62198,  62408,  62558,  62776,
+    62985,  63207,  63408,  63546,  63739,  63906,  64070,  64237,  64371,
+    64551,  64677,  64836,  64999,  65095,  65213,  65284,  65338,  65380,
+    65426,  65447,  65472,  65485,  65487,  65489,  65502,  65510,  65512,
+    65514,  65516,  65518,  65522,  65531,  65533,  65535 };
+
+
+const uint16_t WebRtcIsac_kQKltCdfShape[686] = {
+    0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,
+    0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,  4,
+    65535,  0,  8,  65514,  65535,  0,  29,  65481,  65535,  0,
+    121,  65439,  65535,  0,  239,  65284,  65535,  0,  8,  779,
+    64999,  65527,  65535,  0,  8,  888,  64693,  65522,  65535,  0,
+    29,  2604,  62843,  65497,  65531,  65535,  0,  25,  176,  4576,
+    61164,  65275,  65527,  65535,  0,  65535,  0,  65535,  0,  65535,
+    0,  65535,  0,  4,  65535,  0,  65535,  0,  65535,  0,
+    65535,  0,  65535,  0,  4,  65535,  0,  33,  65502,  65535,
+    0,  54,  65481,  65535,  0,  251,  65309,  65535,  0,  611,
+    65074,  65535,  0,  1273,  64292,  65527,  65535,  0,  4,  1809,
+    63940,  65518,  65535,  0,  88,  4392,  60603,  65426,  65531,  65535,
+    0,  25,  419,  7046,  57756,  64961,  65514,  65531,  65535,  0,
+    65535,  0,  65535,  0,  65535,  0,  65535,  0,  4,  65531,
+    65535,  0,  65535,  0,  8,  65531,  65535,  0,  4,  65527,
+    65535,  0,  17,  65510,  65535,  0,  42,  65481,  65535,  0,
+    197,  65342,  65531,  65535,  0,  385,  65154,  65535,  0,  1005,
+    64522,  65535,  0,  8,  1985,  63469,  65533,  65535,  0,  38,
+    3119,  61884,  65514,  65535,  0,  4,  6,  67,  4961,  60804,
+    65472,  65535,  0,  17,  565,  9182,  56538,  65087,  65514,  65535,
+    0,  8,  63,  327,  2118,  14490,  52774,  63839,  65376,  65522,
+    65535,  0,  65535,  0,  65535,  0,  65535,  0,  65535,  0,
+    17,  65522,  65535,  0,  59,  65489,  65535,  0,  50,  65522,
+    65535,  0,  54,  65489,  65535,  0,  310,  65179,  65535,  0,
+    615,  64836,  65535,  0,  4,  1503,  63965,  65535,  0,  2780,
+    63383,  65535,  0,  21,  3919,  61051,  65527,  65535,  0,  84,
+    6674,  59929,  65435,  65535,  0,  4,  255,  7976,  55784,  65150,
+    65518,  65531,  65535,  0,  4,  8,  582,  10726,  53465,  64949,
+    65518,  65535,  0,  29,  339,  3006,  17555,  49517,  62956,  65200,
+    65497,  65531,  65535,  0,  2,  33,  138,  565,  2324,  7670,
+    22089,  45966,  58949,  63479,  64966,  65380,  65518,  65535,  0,  65535,
+    0,  65535,  0,  2,  65533,  65535,  0,  46,  65514,  65535,
+    0,  414,  65091,  65535,  0,  540,  64911,  65535,  0,  419,
+    65162,  65535,  0,  976,  64790,  65535,  0,  2977,  62495,  65531,
+    65535,  0,  4,  3852,  61034,  65527,  65535,  0,  4,  29,
+    6021,  60243,  65468,  65535,  0,  84,  6711,  58066,  65418,  65535,
+    0,  13,  281,  9550,  54917,  65125,  65506,  65535,  0,  2,
+    63,  984,  12108,  52644,  64342,  65435,  65527,  65535,  0,  29,
+    251,  2014,  14871,  47553,  62881,  65229,  65518,  65535,  0,  13,
+    142,  749,  4220,  18497,  45200,  60913,  64823,  65426,  65527,  65535,
+    0,  13,  71,  264,  1176,  3789,  10500,  24480,  43488,  56324,
+    62315,  64493,  65242,  65464,  65514,  65522,  65531,  65535,  0,  4,
+    13,  38,  109,  205,  448,  850,  1708,  3429,  6276,  11371,
+    19221,  29734,  40955,  49391,  55411,  59460,  62102,  63793,  64656,
+    65150, 65401,  65485,  65522,  65531,  65535,  0,  65535,  0,  2,  65533,
+    65535,  0,  1160,  65476,  65535,  0,  2,  6640,  64763,  65533,
+    65535,  0,  2,  38,  9923,  61009,  65527,  65535,  0,  2,
+    4949,  63092,  65533,  65535,  0,  2,  3090,  63398,  65533,  65535,
+    0,  2,  2520,  58744,  65510,  65535,  0,  2,  13,  544,
+    8784,  51403,  65148,  65533,  65535,  0,  2,  25,  1017,  10412,
+    43550,  63651,  65489,  65527,  65535,  0,  2,  4,  29,  783,
+    13377,  52462,  64524,  65495,  65533,  65535,  0,  2,  4,  6,
+    100,  1817,  18451,  52590,  63559,  65376,  65531,  65535,  0,  2,
+    4,  6,  46,  385,  2562,  11225,  37416,  60488,  65026,  65487,
+    65529,  65533,  65535,  0,  2,  4,  6,  8,  10,  12,
+    42,  222,  971,  5221,  19811,  45048,  60312,  64486,  65294,  65474,
+    65525,  65529,  65533,  65535,  0,  2,  4,  8,  71,  167,
+    666,  2533,  7875,  19622,  38082,  54359,  62108,  64633,  65290,  65495,
+    65529,  65533,  65535,  0,  2,  4,  6,  8,  10,  13,
+    109,  586,  1930,  4949,  11600,  22641,  36125,  48312,  56899,  61495,
+    63927,  64932,  65389,  65489,  65518,  65531,  65533,  65535,  0,  4,
+    6,  8,  67,  209,  712,  1838,  4195,  8432,  14432,  22834,
+    31723,  40523,  48139,  53929,  57865,  60657,  62403,  63584,  64363,
+    64907, 65167,  65372,  65472,  65514,  65535,  0,  2,  4,  13,  25,
+    42,  46,  50,  75,  113,  147,  281,  448,  657,  909,
+    1185,  1591,  1976,  2600,  3676,  5317,  7398,  9914,  12941,  16169,
+    19477,  22885,  26464,  29851,  33360,  37228,  41139,  44802,  48654,
+    52058, 55181,  57676,  59581,  61022,  62190,  63107,  63676,  64199,
+    64547,  64924, 65158,  65313,  65430,  65481,  65518,  65535 };
+
+
+/* pointers to cdf tables for quantizer indices */
+const uint16_t *WebRtcIsac_kQKltCdfPtrGain[12] = {
+    WebRtcIsac_kQKltCdfGain +0 +0, WebRtcIsac_kQKltCdfGain +0 +8,
+    WebRtcIsac_kQKltCdfGain +0 +22, WebRtcIsac_kQKltCdfGain +0 +32,
+    WebRtcIsac_kQKltCdfGain +0 +48, WebRtcIsac_kQKltCdfGain +0 +60,
+    WebRtcIsac_kQKltCdfGain +0 +81, WebRtcIsac_kQKltCdfGain +0 +95,
+    WebRtcIsac_kQKltCdfGain +0 +128, WebRtcIsac_kQKltCdfGain +0 +152,
+    WebRtcIsac_kQKltCdfGain +0 +210, WebRtcIsac_kQKltCdfGain +0 +264 };
+
+const uint16_t *WebRtcIsac_kQKltCdfPtrShape[108] = {
+    WebRtcIsac_kQKltCdfShape +0 +0, WebRtcIsac_kQKltCdfShape +0 +2,
+    WebRtcIsac_kQKltCdfShape +0 +4, WebRtcIsac_kQKltCdfShape +0 +6,
+    WebRtcIsac_kQKltCdfShape +0 +8, WebRtcIsac_kQKltCdfShape +0 +10,
+    WebRtcIsac_kQKltCdfShape +0 +12, WebRtcIsac_kQKltCdfShape +0 +14,
+    WebRtcIsac_kQKltCdfShape +0 +16, WebRtcIsac_kQKltCdfShape +0 +18,
+    WebRtcIsac_kQKltCdfShape +0 +21, WebRtcIsac_kQKltCdfShape +0 +25,
+    WebRtcIsac_kQKltCdfShape +0 +29, WebRtcIsac_kQKltCdfShape +0 +33,
+    WebRtcIsac_kQKltCdfShape +0 +37, WebRtcIsac_kQKltCdfShape +0 +43,
+    WebRtcIsac_kQKltCdfShape +0 +49, WebRtcIsac_kQKltCdfShape +0 +56,
+    WebRtcIsac_kQKltCdfShape +0 +64, WebRtcIsac_kQKltCdfShape +0 +66,
+    WebRtcIsac_kQKltCdfShape +0 +68, WebRtcIsac_kQKltCdfShape +0 +70,
+    WebRtcIsac_kQKltCdfShape +0 +72, WebRtcIsac_kQKltCdfShape +0 +75,
+    WebRtcIsac_kQKltCdfShape +0 +77, WebRtcIsac_kQKltCdfShape +0 +79,
+    WebRtcIsac_kQKltCdfShape +0 +81, WebRtcIsac_kQKltCdfShape +0 +83,
+    WebRtcIsac_kQKltCdfShape +0 +86, WebRtcIsac_kQKltCdfShape +0 +90,
+    WebRtcIsac_kQKltCdfShape +0 +94, WebRtcIsac_kQKltCdfShape +0 +98,
+    WebRtcIsac_kQKltCdfShape +0 +102, WebRtcIsac_kQKltCdfShape +0 +107,
+    WebRtcIsac_kQKltCdfShape +0 +113, WebRtcIsac_kQKltCdfShape +0 +120,
+    WebRtcIsac_kQKltCdfShape +0 +129, WebRtcIsac_kQKltCdfShape +0 +131,
+    WebRtcIsac_kQKltCdfShape +0 +133, WebRtcIsac_kQKltCdfShape +0 +135,
+    WebRtcIsac_kQKltCdfShape +0 +137, WebRtcIsac_kQKltCdfShape +0 +141,
+    WebRtcIsac_kQKltCdfShape +0 +143, WebRtcIsac_kQKltCdfShape +0 +147,
+    WebRtcIsac_kQKltCdfShape +0 +151, WebRtcIsac_kQKltCdfShape +0 +155,
+    WebRtcIsac_kQKltCdfShape +0 +159, WebRtcIsac_kQKltCdfShape +0 +164,
+    WebRtcIsac_kQKltCdfShape +0 +168, WebRtcIsac_kQKltCdfShape +0 +172,
+    WebRtcIsac_kQKltCdfShape +0 +178, WebRtcIsac_kQKltCdfShape +0 +184,
+    WebRtcIsac_kQKltCdfShape +0 +192, WebRtcIsac_kQKltCdfShape +0 +200,
+    WebRtcIsac_kQKltCdfShape +0 +211, WebRtcIsac_kQKltCdfShape +0 +213,
+    WebRtcIsac_kQKltCdfShape +0 +215, WebRtcIsac_kQKltCdfShape +0 +217,
+    WebRtcIsac_kQKltCdfShape +0 +219, WebRtcIsac_kQKltCdfShape +0 +223,
+    WebRtcIsac_kQKltCdfShape +0 +227, WebRtcIsac_kQKltCdfShape +0 +231,
+    WebRtcIsac_kQKltCdfShape +0 +235, WebRtcIsac_kQKltCdfShape +0 +239,
+    WebRtcIsac_kQKltCdfShape +0 +243, WebRtcIsac_kQKltCdfShape +0 +248,
+    WebRtcIsac_kQKltCdfShape +0 +252, WebRtcIsac_kQKltCdfShape +0 +258,
+    WebRtcIsac_kQKltCdfShape +0 +264, WebRtcIsac_kQKltCdfShape +0 +273,
+    WebRtcIsac_kQKltCdfShape +0 +282, WebRtcIsac_kQKltCdfShape +0 +293,
+    WebRtcIsac_kQKltCdfShape +0 +308, WebRtcIsac_kQKltCdfShape +0 +310,
+    WebRtcIsac_kQKltCdfShape +0 +312, WebRtcIsac_kQKltCdfShape +0 +316,
+    WebRtcIsac_kQKltCdfShape +0 +320, WebRtcIsac_kQKltCdfShape +0 +324,
+    WebRtcIsac_kQKltCdfShape +0 +328, WebRtcIsac_kQKltCdfShape +0 +332,
+    WebRtcIsac_kQKltCdfShape +0 +336, WebRtcIsac_kQKltCdfShape +0 +341,
+    WebRtcIsac_kQKltCdfShape +0 +347, WebRtcIsac_kQKltCdfShape +0 +354,
+    WebRtcIsac_kQKltCdfShape +0 +360, WebRtcIsac_kQKltCdfShape +0 +368,
+    WebRtcIsac_kQKltCdfShape +0 +378, WebRtcIsac_kQKltCdfShape +0 +388,
+    WebRtcIsac_kQKltCdfShape +0 +400, WebRtcIsac_kQKltCdfShape +0 +418,
+    WebRtcIsac_kQKltCdfShape +0 +445, WebRtcIsac_kQKltCdfShape +0 +447,
+    WebRtcIsac_kQKltCdfShape +0 +451, WebRtcIsac_kQKltCdfShape +0 +455,
+    WebRtcIsac_kQKltCdfShape +0 +461, WebRtcIsac_kQKltCdfShape +0 +468,
+    WebRtcIsac_kQKltCdfShape +0 +474, WebRtcIsac_kQKltCdfShape +0 +480,
+    WebRtcIsac_kQKltCdfShape +0 +486, WebRtcIsac_kQKltCdfShape +0 +495,
+    WebRtcIsac_kQKltCdfShape +0 +505, WebRtcIsac_kQKltCdfShape +0 +516,
+    WebRtcIsac_kQKltCdfShape +0 +528, WebRtcIsac_kQKltCdfShape +0 +543,
+    WebRtcIsac_kQKltCdfShape +0 +564, WebRtcIsac_kQKltCdfShape +0 +583,
+    WebRtcIsac_kQKltCdfShape +0 +608, WebRtcIsac_kQKltCdfShape +0 +635 };
+
+
+/* left KLT transforms */
+const double WebRtcIsac_kKltT1Gain[4] = {
+    -0.79742827,  0.60341375,  0.60341375,  0.79742827 };
+
+const double WebRtcIsac_kKltT1Shape[324] = {
+    0.00159597,  0.00049320,  0.00513821,  0.00021066,  0.01338581,
+    -0.00422367, -0.00272072,  0.00935107,  0.02047622,  0.02691189,
+    0.00478236,  0.03969702,  0.00886698,  0.04877604, -0.10898362,
+    -0.05930891, -0.03415047,  0.98889721,  0.00293558, -0.00035282,
+    0.01156321, -0.00195341, -0.00937631,  0.01052213, -0.02551163,
+    0.01644059,  0.03189927,  0.07754773, -0.08742313, -0.03026338,
+    0.05136248, -0.14395974,  0.17725040,  0.22664856,  0.93380230,
+    0.07076411,  0.00557890, -0.00222834,  0.01377569,  0.01466808,
+    0.02847361, -0.00603178,  0.02382480, -0.01210452,  0.03797267,
+    -0.02371480,  0.11260335, -0.07366682,  0.00453436, -0.04136941,
+    -0.07912843, -0.95031418,  0.25295337, -0.05302216, -0.00617554,
+    -0.00044040, -0.00653778,  0.01097838,  0.01529174,  0.01374431,
+    -0.00748512, -0.00020034,  0.02432713,  0.11101570, -0.08556891,
+    0.09282249, -0.01029446,  0.67556443, -0.67454300,  0.06910063,
+    0.20866865, -0.10318050,  0.00932175,  0.00524058,  0.00803610,
+    -0.00594676, -0.01082578,  0.01069906,  0.00546768,  0.01565291,
+    0.06816200,  0.10201227,  0.16812734,  0.22984074,  0.58213170,
+    -0.54138651, -0.51379962,  0.06847390, -0.01920037, -0.04592324,
+    -0.00467394,  0.00328858,  0.00377424, -0.00987448,  0.08222096,
+    -0.00377301,  0.04551941, -0.02592517,  0.16317082,  0.13077530,
+    0.22702921, -0.31215289, -0.69645962, -0.38047101, -0.39339411,
+    0.11124777,  0.02508035, -0.00708074,  0.00400344,  0.00040331,
+    0.01142402,  0.01725406,  0.01635170,  0.14285366,  0.03949233,
+    -0.05905676,  0.05877154, -0.17497577, -0.32479440,  0.80754464,
+    -0.38085603, -0.17055430, -0.03168622, -0.07531451,  0.02942002,
+    -0.02148095, -0.00754114, -0.00322372,  0.00567812, -0.01701521,
+    -0.12358320,  0.11473564,  0.09070136,  0.06533068, -0.22560802,
+    0.19209022,  0.81605094,  0.36592275, -0.09919829,  0.16667122,
+    0.16300725,  0.04803807,  0.06739263, -0.00156752, -0.01685302,
+    -0.00905240, -0.02297836, -0.00469939,  0.06310613, -0.16391930,
+    0.10919511,  0.12529293,  0.85581322, -0.32145522,  0.24539076,
+    0.07181839,  0.07289591,  0.14066759,  0.10406711,  0.05815518,
+    0.01072680, -0.00759339,  0.00053486, -0.00044865,  0.03407361,
+    0.01645348,  0.08758579,  0.27722240,  0.53665485, -0.74853376,
+    -0.01118192, -0.19805430,  0.06130619, -0.09675299,  0.08978480,
+    0.03405255, -0.00706867,  0.05102045,  0.03250746,  0.01849966,
+    -0.01216314, -0.01184187, -0.01579288,  0.00114807,  0.11376166,
+    0.88342114, -0.36425379,  0.13863190,  0.12524180, -0.13553892,
+    0.04715856, -0.12341103,  0.04531568,  0.01899360, -0.00206897,
+    0.00567768, -0.01444163,  0.00411946, -0.00855896,  0.00381663,
+    -0.01664861, -0.05534280,  0.21328278,  0.20161162,  0.72360394,
+    0.59130708, -0.08043791,  0.08757349, -0.13893918, -0.05147377,
+    0.02680690, -0.01144070,  0.00625162, -0.00634215, -0.01248947,
+    -0.00329455, -0.00609625, -0.00136305, -0.05097048, -0.01029851,
+    0.25065384, -0.16856837, -0.07123372,  0.15992623, -0.39487617,
+    -0.79972301,  0.18118185, -0.04826639, -0.01805578, -0.02927253,
+    -0.16400618,  0.07472763,  0.10376449,  0.01705406,  0.01065801,
+    -0.01500498,  0.02039914,  0.37776349, -0.84484186,  0.10434286,
+    0.15616990,  0.13474456, -0.00906238, -0.25238368, -0.03820885,
+    -0.10650905, -0.03880833, -0.03660028, -0.09640894,  0.00583314,
+    0.01922097,  0.01489911, -0.02431117, -0.09372217,  0.39404721,
+    -0.84786223, -0.31277121,  0.03193850,  0.01974060,  0.01887901,
+    0.00337911, -0.11359599, -0.02792521, -0.03220184, -0.01533311,
+    0.00015962, -0.04225043, -0.00933965,  0.00675311,  0.00206060,
+    0.15926771,  0.40199829, -0.80792558, -0.35591604, -0.17169764,
+    0.02830436,  0.02459982, -0.03438589,  0.00718705, -0.01798329,
+    -0.01594508, -0.00702430, -0.00952419, -0.00962701, -0.01307212,
+    -0.01749740,  0.01299602,  0.00587270, -0.36103108, -0.82039266,
+    -0.43092844, -0.08500097, -0.04361674, -0.00333482,  0.01250434,
+    -0.02538295, -0.00921797,  0.01645071, -0.01400872,  0.00317607,
+    0.00003277, -0.01617646, -0.00616863, -0.00882661,  0.00466157,
+    0.00353237,  0.91803104, -0.39503305, -0.02048964,  0.00060125,
+    0.01980634,  0.00300109,  0.00313880,  0.00657337,  0.00715163,
+    0.00000261,  0.00854276, -0.00154825, -0.00516128,  0.00909527,
+    0.00095609,  0.00701196, -0.00221867, -0.00156741 };
+
+/* right KLT transforms */
+const double WebRtcIsac_kKltT2Gain[36] = {
+    0.14572837, -0.45446306,  0.61990621, -0.52197033,  0.32145074,
+    -0.11026900, -0.20698282,  0.48962182, -0.27127933, -0.33627476,
+    0.65094037, -0.32715751,  0.40262573, -0.47844405, -0.33876075,
+    0.44130653,  0.37383966, -0.39964662, -0.51730480,  0.06611973,
+    0.49030187,  0.47512886, -0.02141226, -0.51129451, -0.58578569,
+    -0.39132064, -0.13187771,  0.15649421,  0.40735596,  0.54396897,
+    0.40381276,  0.40904942,  0.41179766,  0.41167576,  0.40840251,
+    0.40468132 };
+
+const double WebRtcIsac_kKltT2Shape[36] = {
+    0.13427386, -0.35132558,  0.52506528, -0.59419077,  0.45075085,
+    -0.16312057,  0.29857439, -0.58660147,  0.34265431,  0.20879510,
+    -0.56063262,  0.30238345,  0.43308283, -0.41186999, -0.35288681,
+    0.42768996,  0.36094634, -0.45284910, -0.47116680,  0.02893449,
+    0.54326135,  0.45249040, -0.06264420, -0.52283830,  0.57137758,
+    0.44298139,  0.12617554, -0.20819946, -0.42324603, -0.48876443,
+    0.39597050,  0.40713935,  0.41389880,  0.41512486,  0.41130400,
+    0.40575001 };
+
+/* means of log gains and LAR coefficients*/
+const double WebRtcIsac_kLpcMeansGain[12] = {
+    -6.86881911, -5.35075273, -6.86792680, -5.36200897, -6.86401538,
+    -5.36921533, -6.86802969, -5.36893966, -6.86538097, -5.36315063,
+    -6.85535304, -5.35155315 };
+
+const double WebRtcIsac_kLpcMeansShape[108] = {
+    -0.91232981,  0.26258634, -0.33716701,  0.08477430, -0.03378426,
+    0.14423909,  0.07036185,  0.06155019,  0.01490385,  0.04138740,
+    0.01427317,  0.01288970,  0.83872106,  0.25750199,  0.07988929,
+    -0.01957923,  0.00831390,  0.01770300, -0.90957164,  0.25732216,
+    -0.33385344,  0.08735740, -0.03715332,  0.14584917,  0.06998990,
+    0.06131968,  0.01504379,  0.04067339,  0.01428039,  0.01406460,
+    0.83846243,  0.26169862,  0.08109025, -0.01767055,  0.00970539,
+    0.01954310, -0.90490803,  0.24656405, -0.33578607,  0.08843286,
+    -0.03749139,  0.14443959,  0.07214669,  0.06170993,  0.01449947,
+    0.04134309,  0.01314762,  0.01413471,  0.83895203,  0.26748062,
+    0.08197507, -0.01781298,  0.00885967,  0.01922394, -0.90922472,
+    0.24495889, -0.33921540,  0.08877169, -0.03581332,  0.14199172,
+    0.07444032,  0.06185940,  0.01502054,  0.04185113,  0.01276579,
+    0.01355457,  0.83645358,  0.26631720,  0.08119697, -0.01835449,
+    0.00788512,  0.01846446, -0.90482253,  0.24658310, -0.34019734,
+    0.08281090, -0.03486038,  0.14359248,  0.07401336,  0.06001471,
+    0.01528421,  0.04254560,  0.01321472,  0.01240799,  0.83857127,
+    0.26281654,  0.08174380, -0.02099842,  0.00755176,  0.01699448,
+    -0.90132307,  0.25174308, -0.33838268,  0.07883863, -0.02877906,
+    0.14105407,  0.07220290,  0.06000352,  0.01684879,  0.04226844,
+    0.01331331,  0.01269244,  0.83832138,  0.25467485,  0.08118028,
+    -0.02120528,  0.00747832,  0.01567212 };
diff --git a/modules/audio_coding/codecs/isac/main/source/lpc_tables.h b/modules/audio_coding/codecs/isac/main/source/lpc_tables.h
new file mode 100644
index 0000000..2b02557
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/lpc_tables.h
@@ -0,0 +1,100 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * lpc_tables.h
+ *
+ * header file for coding tables for the LPC coefficients
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+#define KLT_STEPSIZE         1.00000000
+#define KLT_NUM_AVG_GAIN     0
+#define KLT_NUM_AVG_SHAPE    0
+#define KLT_NUM_MODELS  3
+#define LPC_GAIN_SCALE     4.000f
+#define LPC_LOBAND_SCALE   2.100f
+#define LPC_LOBAND_ORDER   ORDERLO
+#define LPC_HIBAND_SCALE   0.450f
+#define LPC_HIBAND_ORDER   ORDERHI
+#define LPC_GAIN_ORDER     2
+
+#define LPC_SHAPE_ORDER    (LPC_LOBAND_ORDER + LPC_HIBAND_ORDER)
+
+#define KLT_ORDER_GAIN     (LPC_GAIN_ORDER * SUBFRAMES)
+#define KLT_ORDER_SHAPE    (LPC_SHAPE_ORDER * SUBFRAMES)
+
+/* cdf array for model indicator */
+extern const uint16_t WebRtcIsac_kQKltModelCdf[KLT_NUM_MODELS+1];
+
+/* pointer to cdf array for model indicator */
+extern const uint16_t *WebRtcIsac_kQKltModelCdfPtr[1];
+
+/* initial cdf index for decoder of model indicator */
+extern const uint16_t WebRtcIsac_kQKltModelInitIndex[1];
+
+/* offset to go from rounded value to quantization index */
+extern const short WebRtcIsac_kQKltQuantMinGain[12];
+
+extern const short WebRtcIsac_kQKltQuantMinShape[108];
+
+/* maximum quantization index */
+extern const uint16_t WebRtcIsac_kQKltMaxIndGain[12];
+
+extern const uint16_t WebRtcIsac_kQKltMaxIndShape[108];
+
+/* index offset */
+extern const uint16_t WebRtcIsac_kQKltOffsetGain[12];
+
+extern const uint16_t WebRtcIsac_kQKltOffsetShape[108];
+
+/* initial cdf index for KLT coefficients */
+extern const uint16_t WebRtcIsac_kQKltInitIndexGain[12];
+
+extern const uint16_t WebRtcIsac_kQKltInitIndexShape[108];
+
+/* quantizer representation levels */
+extern const double WebRtcIsac_kQKltLevelsGain[392];
+
+extern const double WebRtcIsac_kQKltLevelsShape[578];
+
+/* cdf tables for quantizer indices */
+extern const uint16_t WebRtcIsac_kQKltCdfGain[404];
+
+extern const uint16_t WebRtcIsac_kQKltCdfShape[686];
+
+/* pointers to cdf tables for quantizer indices */
+extern const uint16_t *WebRtcIsac_kQKltCdfPtrGain[12];
+
+extern const uint16_t *WebRtcIsac_kQKltCdfPtrShape[108];
+
+/* left KLT transforms */
+extern const double WebRtcIsac_kKltT1Gain[4];
+
+extern const double WebRtcIsac_kKltT1Shape[324];
+
+/* right KLT transforms */
+extern const double WebRtcIsac_kKltT2Gain[36];
+
+extern const double WebRtcIsac_kKltT2Shape[36];
+
+/* means of log gains and LAR coefficients */
+extern const double WebRtcIsac_kLpcMeansGain[12];
+
+extern const double WebRtcIsac_kLpcMeansShape[108];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_LPC_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h b/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h
new file mode 100644
index 0000000..597dc21
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/os_specific_inline.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
+
+#include <math.h>
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(WEBRTC_POSIX)
+#define WebRtcIsac_lrint lrint
+#elif (defined(WEBRTC_ARCH_X86) && defined(WIN32))
+static __inline long int WebRtcIsac_lrint(double x_dbl) {
+  long int x_int;
+
+  __asm {
+    fld x_dbl
+    fistp x_int
+  };
+
+  return x_int;
+}
+#else // Do a slow but correct implementation of lrint
+
+static __inline long int WebRtcIsac_lrint(double x_dbl) {
+  long int x_int;
+  x_int = (long int)floor(x_dbl + 0.499999999999);
+  return x_int;
+}
+
+#endif
+
+#endif  // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_OS_SPECIFIC_INLINE_H_
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c b/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c
new file mode 100644
index 0000000..4c0a558
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_estimator.c
@@ -0,0 +1,623 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <memory.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <stdlib.h>
+#endif
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+
+static const double kInterpolWin[8] = {-0.00067556028640,  0.02184247643159, -0.12203175715679,  0.60086484101160,
+                                       0.60086484101160, -0.12203175715679,  0.02184247643159, -0.00067556028640};
+
+/* interpolation filter */
+__inline static void IntrepolFilter(double *data_ptr, double *intrp)
+{
+  *intrp = kInterpolWin[0] * data_ptr[-3];
+  *intrp += kInterpolWin[1] * data_ptr[-2];
+  *intrp += kInterpolWin[2] * data_ptr[-1];
+  *intrp += kInterpolWin[3] * data_ptr[0];
+  *intrp += kInterpolWin[4] * data_ptr[1];
+  *intrp += kInterpolWin[5] * data_ptr[2];
+  *intrp += kInterpolWin[6] * data_ptr[3];
+  *intrp += kInterpolWin[7] * data_ptr[4];
+}
+
+
+/* 2D parabolic interpolation */
+/* probably some 0.5 factors can be eliminated, and the square-roots can be removed from the Cholesky fact. */
+__inline static void Intrpol2D(double T[3][3], double *x, double *y, double *peak_val)
+{
+  double c, b[2], A[2][2];
+  double t1, t2, d;
+  double delta1, delta2;
+
+
+  // double T[3][3] = {{-1.25, -.25,-.25}, {-.25, .75, .75}, {-.25, .75, .75}};
+  // should result in: delta1 = 0.5;  delta2 = 0.0;  peak_val = 1.0
+
+  c = T[1][1];
+  b[0] = 0.5 * (T[1][2] + T[2][1] - T[0][1] - T[1][0]);
+  b[1] = 0.5 * (T[1][0] + T[2][1] - T[0][1] - T[1][2]);
+  A[0][1] = -0.5 * (T[0][1] + T[2][1] - T[1][0] - T[1][2]);
+  t1 = 0.5 * (T[0][0] + T[2][2]) - c;
+  t2 = 0.5 * (T[2][0] + T[0][2]) - c;
+  d = (T[0][1] + T[1][2] + T[1][0] + T[2][1]) - 4.0 * c - t1 - t2;
+  A[0][0] = -t1 - 0.5 * d;
+  A[1][1] = -t2 - 0.5 * d;
+
+  /* deal with singularities or ill-conditioned cases */
+  if ( (A[0][0] < 1e-7) || ((A[0][0] * A[1][1] - A[0][1] * A[0][1]) < 1e-7) ) {
+    *peak_val = T[1][1];
+    return;
+  }
+
+  /* Cholesky decomposition: replace A by upper-triangular factor */
+  A[0][0] = sqrt(A[0][0]);
+  A[0][1] = A[0][1] / A[0][0];
+  A[1][1] = sqrt(A[1][1] - A[0][1] * A[0][1]);
+
+  /* compute [x; y] = -0.5 * inv(A) * b */
+  t1 = b[0] / A[0][0];
+  t2 = (b[1] - t1 * A[0][1]) / A[1][1];
+  delta2 = t2 / A[1][1];
+  delta1 = 0.5 * (t1 - delta2 * A[0][1]) / A[0][0];
+  delta2 *= 0.5;
+
+  /* limit norm */
+  t1 = delta1 * delta1 + delta2 * delta2;
+  if (t1 > 1.0) {
+    delta1 /= t1;
+    delta2 /= t1;
+  }
+
+  *peak_val = 0.5 * (b[0] * delta1 + b[1] * delta2) + c;
+
+  *x += delta1;
+  *y += delta2;
+}
+
+
+static void PCorr(const double *in, double *outcorr)
+{
+  double sum, ysum, prod;
+  const double *x, *inptr;
+  int k, n;
+
+  //ysum = 1e-6;          /* use this with float (i.s.o. double)! */
+  ysum = 1e-13;
+  sum = 0.0;
+  x = in + PITCH_MAX_LAG/2 + 2;
+  for (n = 0; n < PITCH_CORR_LEN2; n++) {
+    ysum += in[n] * in[n];
+    sum += x[n] * in[n];
+  }
+
+  outcorr += PITCH_LAG_SPAN2 - 1;     /* index of last element in array */
+  *outcorr = sum / sqrt(ysum);
+
+  for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+    ysum -= in[k-1] * in[k-1];
+    ysum += in[PITCH_CORR_LEN2 + k - 1] * in[PITCH_CORR_LEN2 + k - 1];
+    sum = 0.0;
+    inptr = &in[k];
+    prod = x[0] * inptr[0];
+    for (n = 1; n < PITCH_CORR_LEN2; n++) {
+      sum += prod;
+      prod = x[n] * inptr[n];
+    }
+    sum += prod;
+    outcorr--;
+    *outcorr = sum / sqrt(ysum);
+  }
+}
+
+
+void WebRtcIsac_InitializePitch(const double *in,
+                                const double old_lag,
+                                const double old_gain,
+                                PitchAnalysisStruct *State,
+                                double *lags)
+{
+  double buf_dec[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2];
+  double ratio, log_lag, gain_bias;
+  double bias;
+  double corrvec1[PITCH_LAG_SPAN2];
+  double corrvec2[PITCH_LAG_SPAN2];
+  int m, k;
+  // Allocating 10 extra entries at the begining of the CorrSurf
+  double corrSurfBuff[10 + (2*PITCH_BW+3)*(PITCH_LAG_SPAN2+4)];
+  double* CorrSurf[2*PITCH_BW+3];
+  double *CorrSurfPtr1, *CorrSurfPtr2;
+  double LagWin[3] = {0.2, 0.5, 0.98};
+  int ind1, ind2, peaks_ind, peak, max_ind;
+  int peaks[PITCH_MAX_NUM_PEAKS];
+  double adj, gain_tmp;
+  double corr, corr_max;
+  double intrp_a, intrp_b, intrp_c, intrp_d;
+  double peak_vals[PITCH_MAX_NUM_PEAKS];
+  double lags1[PITCH_MAX_NUM_PEAKS];
+  double lags2[PITCH_MAX_NUM_PEAKS];
+  double T[3][3];
+  int row;
+
+  for(k = 0; k < 2*PITCH_BW+3; k++)
+  {
+    CorrSurf[k] = &corrSurfBuff[10 + k * (PITCH_LAG_SPAN2+4)];
+  }
+  /* reset CorrSurf matrix */
+  memset(corrSurfBuff, 0, sizeof(double) * (10 + (2*PITCH_BW+3) * (PITCH_LAG_SPAN2+4)));
+
+  //warnings -DH
+  max_ind = 0;
+  peak = 0;
+
+  /* copy old values from state buffer */
+  memcpy(buf_dec, State->dec_buffer, sizeof(double) * (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2));
+
+  /* decimation; put result after the old values */
+  WebRtcIsac_DecimateAllpass(in, State->decimator_state, PITCH_FRAME_LEN,
+                             &buf_dec[PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2]);
+
+  /* low-pass filtering */
+  for (k = PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2; k < PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2+2; k++)
+    buf_dec[k] += 0.75 * buf_dec[k-1] - 0.25 * buf_dec[k-2];
+
+  /* copy end part back into state buffer */
+  memcpy(State->dec_buffer, buf_dec+PITCH_FRAME_LEN/2, sizeof(double) * (PITCH_CORR_LEN2+PITCH_CORR_STEP2+PITCH_MAX_LAG/2-PITCH_FRAME_LEN/2+2));
+
+  /* compute correlation for first and second half of the frame */
+  PCorr(buf_dec, corrvec1);
+  PCorr(buf_dec + PITCH_CORR_STEP2, corrvec2);
+
+  /* bias towards pitch lag of previous frame */
+  log_lag = log(0.5 * old_lag);
+  gain_bias = 4.0 * old_gain * old_gain;
+  if (gain_bias > 0.8) gain_bias = 0.8;
+  for (k = 0; k < PITCH_LAG_SPAN2; k++)
+  {
+    ratio = log((double) (k + (PITCH_MIN_LAG/2-2))) - log_lag;
+    bias = 1.0 + gain_bias * exp(-5.0 * ratio * ratio);
+    corrvec1[k] *= bias;
+  }
+
+  /* taper correlation functions */
+  for (k = 0; k < 3; k++) {
+    gain_tmp = LagWin[k];
+    corrvec1[k] *= gain_tmp;
+    corrvec2[k] *= gain_tmp;
+    corrvec1[PITCH_LAG_SPAN2-1-k] *= gain_tmp;
+    corrvec2[PITCH_LAG_SPAN2-1-k] *= gain_tmp;
+  }
+
+  corr_max = 0.0;
+  /* fill middle row of correlation surface */
+  ind1 = 0;
+  ind2 = 0;
+  CorrSurfPtr1 = &CorrSurf[PITCH_BW][2];
+  for (k = 0; k < PITCH_LAG_SPAN2; k++) {
+    corr = corrvec1[ind1++] + corrvec2[ind2++];
+    CorrSurfPtr1[k] = corr;
+    if (corr > corr_max) {
+      corr_max = corr;  /* update maximum */
+      max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+    }
+  }
+  /* fill first and last rows of correlation surface */
+  ind1 = 0;
+  ind2 = PITCH_BW;
+  CorrSurfPtr1 = &CorrSurf[0][2];
+  CorrSurfPtr2 = &CorrSurf[2*PITCH_BW][PITCH_BW+2];
+  for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW; k++) {
+    ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
+    adj = 0.2 * ratio * (2.0 - ratio);   /* adjustment factor; inverse parabola as a function of ratio */
+    corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
+    CorrSurfPtr1[k] = corr;
+    if (corr > corr_max) {
+      corr_max = corr;  /* update maximum */
+      max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+    }
+    corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
+    CorrSurfPtr2[k] = corr;
+    if (corr > corr_max) {
+      corr_max = corr;  /* update maximum */
+      max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
+    }
+  }
+  /* fill second and next to last rows of correlation surface */
+  ind1 = 0;
+  ind2 = PITCH_BW-1;
+  CorrSurfPtr1 = &CorrSurf[1][2];
+  CorrSurfPtr2 = &CorrSurf[2*PITCH_BW-1][PITCH_BW+1];
+  for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW+1; k++) {
+    ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
+    adj = 0.9 * ratio * (2.0 - ratio);   /* adjustment factor; inverse parabola as a function of ratio */
+    corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
+    CorrSurfPtr1[k] = corr;
+    if (corr > corr_max) {
+      corr_max = corr;  /* update maximum */
+      max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+    }
+    corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
+    CorrSurfPtr2[k] = corr;
+    if (corr > corr_max) {
+      corr_max = corr;  /* update maximum */
+      max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
+    }
+  }
+  /* fill remainder of correlation surface */
+  for (m = 2; m < PITCH_BW; m++) {
+    ind1 = 0;
+    ind2 = PITCH_BW - m;         /* always larger than ind1 */
+    CorrSurfPtr1 = &CorrSurf[m][2];
+    CorrSurfPtr2 = &CorrSurf[2*PITCH_BW-m][PITCH_BW+2-m];
+    for (k = 0; k < PITCH_LAG_SPAN2-PITCH_BW+m; k++) {
+      ratio = ((double) (ind1 + 12)) / ((double) (ind2 + 12));
+      adj = ratio * (2.0 - ratio);    /* adjustment factor; inverse parabola as a function of ratio */
+      corr = adj * (corrvec1[ind1] + corrvec2[ind2]);
+      CorrSurfPtr1[k] = corr;
+      if (corr > corr_max) {
+        corr_max = corr;  /* update maximum */
+        max_ind = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+      }
+      corr = adj * (corrvec1[ind2++] + corrvec2[ind1++]);
+      CorrSurfPtr2[k] = corr;
+      if (corr > corr_max) {
+        corr_max = corr;  /* update maximum */
+        max_ind = (int)(&CorrSurfPtr2[k] - &CorrSurf[0][0]);
+      }
+    }
+  }
+
+  /* threshold value to qualify as a peak */
+  corr_max *= 0.6;
+
+  peaks_ind = 0;
+  /* find peaks */
+  for (m = 1; m < PITCH_BW+1; m++) {
+    if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+    CorrSurfPtr1 = &CorrSurf[m][2];
+    for (k = 2; k < PITCH_LAG_SPAN2-PITCH_BW-2+m; k++) {
+      corr = CorrSurfPtr1[k];
+      if (corr > corr_max) {
+        if ( (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+5)]) && (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+4)]) ) {
+          if ( (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+4)]) && (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+5)]) ) {
+            /* found a peak; store index into matrix */
+            peaks[peaks_ind++] = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+            if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+          }
+        }
+      }
+    }
+  }
+  for (m = PITCH_BW+1; m < 2*PITCH_BW; m++) {
+    if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+    CorrSurfPtr1 = &CorrSurf[m][2];
+    for (k = 2+m-PITCH_BW; k < PITCH_LAG_SPAN2-2; k++) {
+      corr = CorrSurfPtr1[k];
+      if (corr > corr_max) {
+        if ( (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+5)]) && (corr > CorrSurfPtr1[k - (PITCH_LAG_SPAN2+4)]) ) {
+          if ( (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+4)]) && (corr > CorrSurfPtr1[k + (PITCH_LAG_SPAN2+5)]) ) {
+            /* found a peak; store index into matrix */
+            peaks[peaks_ind++] = (int)(&CorrSurfPtr1[k] - &CorrSurf[0][0]);
+            if (peaks_ind == PITCH_MAX_NUM_PEAKS) break;
+          }
+        }
+      }
+    }
+  }
+
+  if (peaks_ind > 0) {
+    /* examine each peak */
+    CorrSurfPtr1 = &CorrSurf[0][0];
+    for (k = 0; k < peaks_ind; k++) {
+      peak = peaks[k];
+
+      /* compute four interpolated values around current peak */
+      IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)], &intrp_a);
+      IntrepolFilter(&CorrSurfPtr1[peak - 1            ], &intrp_b);
+      IntrepolFilter(&CorrSurfPtr1[peak                ], &intrp_c);
+      IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)], &intrp_d);
+
+      /* determine maximum of the interpolated values */
+      corr = CorrSurfPtr1[peak];
+      corr_max = intrp_a;
+      if (intrp_b > corr_max) corr_max = intrp_b;
+      if (intrp_c > corr_max) corr_max = intrp_c;
+      if (intrp_d > corr_max) corr_max = intrp_d;
+
+      /* determine where the peak sits and fill a 3x3 matrix around it */
+      row = peak / (PITCH_LAG_SPAN2+4);
+      lags1[k] = (double) ((peak - row * (PITCH_LAG_SPAN2+4)) + PITCH_MIN_LAG/2 - 4);
+      lags2[k] = (double) (lags1[k] + PITCH_BW - row);
+      if ( corr > corr_max ) {
+        T[0][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
+        T[2][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
+        T[1][1] = corr;
+        T[0][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
+        T[2][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
+        T[1][0] = intrp_a;
+        T[0][1] = intrp_b;
+        T[2][1] = intrp_c;
+        T[1][2] = intrp_d;
+      } else {
+        if (intrp_a == corr_max) {
+          lags1[k] -= 0.5;
+          lags2[k] += 0.5;
+          IntrepolFilter(&CorrSurfPtr1[peak - 2*(PITCH_LAG_SPAN2+5)], &T[0][0]);
+          IntrepolFilter(&CorrSurfPtr1[peak - (2*PITCH_LAG_SPAN2+9)], &T[2][0]);
+          T[1][1] = intrp_a;
+          T[0][2] = intrp_b;
+          T[2][2] = intrp_c;
+          T[1][0] = CorrSurfPtr1[peak - (2*PITCH_LAG_SPAN2+9)];
+          T[0][1] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
+          T[2][1] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
+          T[1][2] = corr;
+        } else if (intrp_b == corr_max) {
+          lags1[k] -= 0.5;
+          lags2[k] -= 0.5;
+          IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+6)], &T[0][0]);
+          T[2][0] = intrp_a;
+          T[1][1] = intrp_b;
+          IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+3)], &T[0][2]);
+          T[2][2] = intrp_d;
+          T[1][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+5)];
+          T[0][1] = CorrSurfPtr1[peak - 1];
+          T[2][1] = corr;
+          T[1][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
+        } else if (intrp_c == corr_max) {
+          lags1[k] += 0.5;
+          lags2[k] += 0.5;
+          T[0][0] = intrp_a;
+          IntrepolFilter(&CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)], &T[2][0]);
+          T[1][1] = intrp_c;
+          T[0][2] = intrp_d;
+          IntrepolFilter(&CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)], &T[2][2]);
+          T[1][0] = CorrSurfPtr1[peak - (PITCH_LAG_SPAN2+4)];
+          T[0][1] = corr;
+          T[2][1] = CorrSurfPtr1[peak + 1];
+          T[1][2] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
+        } else {
+          lags1[k] += 0.5;
+          lags2[k] -= 0.5;
+          T[0][0] = intrp_b;
+          T[2][0] = intrp_c;
+          T[1][1] = intrp_d;
+          IntrepolFilter(&CorrSurfPtr1[peak + 2*(PITCH_LAG_SPAN2+4)], &T[0][2]);
+          IntrepolFilter(&CorrSurfPtr1[peak + (2*PITCH_LAG_SPAN2+9)], &T[2][2]);
+          T[1][0] = corr;
+          T[0][1] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+4)];
+          T[2][1] = CorrSurfPtr1[peak + (PITCH_LAG_SPAN2+5)];
+          T[1][2] = CorrSurfPtr1[peak + (2*PITCH_LAG_SPAN2+9)];
+        }
+      }
+
+      /* 2D parabolic interpolation gives more accurate lags and peak value */
+      Intrpol2D(T, &lags1[k], &lags2[k], &peak_vals[k]);
+    }
+
+    /* determine the highest peak, after applying a bias towards short lags */
+    corr_max = 0.0;
+    for (k = 0; k < peaks_ind; k++) {
+      corr = peak_vals[k] * pow(PITCH_PEAK_DECAY, log(lags1[k] + lags2[k]));
+      if (corr > corr_max) {
+        corr_max = corr;
+        peak = k;
+      }
+    }
+
+    lags1[peak] *= 2.0;
+    lags2[peak] *= 2.0;
+
+    if (lags1[peak] < (double) PITCH_MIN_LAG) lags1[peak] = (double) PITCH_MIN_LAG;
+    if (lags2[peak] < (double) PITCH_MIN_LAG) lags2[peak] = (double) PITCH_MIN_LAG;
+    if (lags1[peak] > (double) PITCH_MAX_LAG) lags1[peak] = (double) PITCH_MAX_LAG;
+    if (lags2[peak] > (double) PITCH_MAX_LAG) lags2[peak] = (double) PITCH_MAX_LAG;
+
+    /* store lags of highest peak in output array */
+    lags[0] = lags1[peak];
+    lags[1] = lags1[peak];
+    lags[2] = lags2[peak];
+    lags[3] = lags2[peak];
+  }
+  else
+  {
+    row = max_ind / (PITCH_LAG_SPAN2+4);
+    lags1[0] = (double) ((max_ind - row * (PITCH_LAG_SPAN2+4)) + PITCH_MIN_LAG/2 - 4);
+    lags2[0] = (double) (lags1[0] + PITCH_BW - row);
+
+    if (lags1[0] < (double) PITCH_MIN_LAG) lags1[0] = (double) PITCH_MIN_LAG;
+    if (lags2[0] < (double) PITCH_MIN_LAG) lags2[0] = (double) PITCH_MIN_LAG;
+    if (lags1[0] > (double) PITCH_MAX_LAG) lags1[0] = (double) PITCH_MAX_LAG;
+    if (lags2[0] > (double) PITCH_MAX_LAG) lags2[0] = (double) PITCH_MAX_LAG;
+
+    /* store lags of highest peak in output array */
+    lags[0] = lags1[0];
+    lags[1] = lags1[0];
+    lags[2] = lags2[0];
+    lags[3] = lags2[0];
+  }
+}
+
+
+
+/* create weighting matrix by orthogonalizing a basis of polynomials of increasing order
+ * t = (0:4)';
+ * A = [t.^0, t.^1, t.^2, t.^3, t.^4];
+ * [Q, dummy] = qr(A);
+ * P.Weight = Q * diag([0, .1, .5, 1, 1]) * Q'; */
+static const double kWeight[5][5] = {
+  { 0.29714285714286,  -0.30857142857143,  -0.05714285714286,   0.05142857142857,  0.01714285714286},
+  {-0.30857142857143,   0.67428571428571,  -0.27142857142857,  -0.14571428571429,  0.05142857142857},
+  {-0.05714285714286,  -0.27142857142857,   0.65714285714286,  -0.27142857142857, -0.05714285714286},
+  { 0.05142857142857,  -0.14571428571429,  -0.27142857142857,   0.67428571428571, -0.30857142857143},
+  { 0.01714285714286,   0.05142857142857,  -0.05714285714286,  -0.30857142857143,  0.29714285714286}
+};
+
+
+void WebRtcIsac_PitchAnalysis(const double *in,               /* PITCH_FRAME_LEN samples */
+                              double *out,                    /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+                              PitchAnalysisStruct *State,
+                              double *lags,
+                              double *gains)
+{
+  double HPin[PITCH_FRAME_LEN];
+  double Weighted[PITCH_FRAME_LEN];
+  double Whitened[PITCH_FRAME_LEN + QLOOKAHEAD];
+  double inbuf[PITCH_FRAME_LEN + QLOOKAHEAD];
+  double out_G[PITCH_FRAME_LEN + QLOOKAHEAD];          // could be removed by using out instead
+  double out_dG[4][PITCH_FRAME_LEN + QLOOKAHEAD];
+  double old_lag, old_gain;
+  double nrg_wht, tmp;
+  double Wnrg, Wfluct, Wgain;
+  double H[4][4];
+  double grad[4];
+  double dG[4];
+  int k, m, n, iter;
+
+  /* high pass filtering using second order pole-zero filter */
+  WebRtcIsac_Highpass(in, HPin, State->hp_state, PITCH_FRAME_LEN);
+
+  /* copy from state into buffer */
+  memcpy(Whitened, State->whitened_buf, sizeof(double) * QLOOKAHEAD);
+
+  /* compute weighted and whitened signals */
+  WebRtcIsac_WeightingFilter(HPin, &Weighted[0], &Whitened[QLOOKAHEAD], &(State->Wghtstr));
+
+  /* copy from buffer into state */
+  memcpy(State->whitened_buf, Whitened+PITCH_FRAME_LEN, sizeof(double) * QLOOKAHEAD);
+
+  old_lag = State->PFstr_wght.oldlagp[0];
+  old_gain = State->PFstr_wght.oldgainp[0];
+
+  /* inital pitch estimate */
+  WebRtcIsac_InitializePitch(Weighted, old_lag, old_gain, State, lags);
+
+
+  /* Iterative optimization of lags - to be done */
+
+  /* compute energy of whitened signal */
+  nrg_wht = 0.0;
+  for (k = 0; k < PITCH_FRAME_LEN + QLOOKAHEAD; k++)
+    nrg_wht += Whitened[k] * Whitened[k];
+
+
+  /* Iterative optimization of gains */
+
+  /* set weights for energy, gain fluctiation, and spectral gain penalty functions */
+  Wnrg = 1.0 / nrg_wht;
+  Wgain = 0.005;
+  Wfluct = 3.0;
+
+  /* set initial gains */
+  for (k = 0; k < 4; k++)
+    gains[k] = PITCH_MAX_GAIN_06;
+
+  /* two iterations should be enough */
+  for (iter = 0; iter < 2; iter++) {
+    /* compute Jacobian of pre-filter output towards gains */
+    WebRtcIsac_PitchfilterPre_gains(Whitened, out_G, out_dG, &(State->PFstr_wght), lags, gains);
+
+    /* gradient and approximate Hessian (lower triangle) for minimizing the filter's output power */
+    for (k = 0; k < 4; k++) {
+      tmp = 0.0;
+      for (n = 0; n < PITCH_FRAME_LEN + QLOOKAHEAD; n++)
+        tmp += out_G[n] * out_dG[k][n];
+      grad[k] = tmp * Wnrg;
+    }
+    for (k = 0; k < 4; k++) {
+      for (m = 0; m <= k; m++) {
+        tmp = 0.0;
+        for (n = 0; n < PITCH_FRAME_LEN + QLOOKAHEAD; n++)
+          tmp += out_dG[m][n] * out_dG[k][n];
+        H[k][m] = tmp * Wnrg;
+      }
+    }
+
+    /* add gradient and Hessian (lower triangle) for dampening fast gain changes */
+    for (k = 0; k < 4; k++) {
+      tmp = kWeight[k+1][0] * old_gain;
+      for (m = 0; m < 4; m++)
+        tmp += kWeight[k+1][m+1] * gains[m];
+      grad[k] += tmp * Wfluct;
+    }
+    for (k = 0; k < 4; k++) {
+      for (m = 0; m <= k; m++) {
+        H[k][m] += kWeight[k+1][m+1] * Wfluct;
+      }
+    }
+
+    /* add gradient and Hessian for dampening gain */
+    for (k = 0; k < 3; k++) {
+      tmp = 1.0 / (1 - gains[k]);
+      grad[k] += tmp * tmp * Wgain;
+      H[k][k] += 2.0 * tmp * (tmp * tmp * Wgain);
+    }
+    tmp = 1.0 / (1 - gains[3]);
+    grad[3] += 1.33 * (tmp * tmp * Wgain);
+    H[3][3] += 2.66 * tmp * (tmp * tmp * Wgain);
+
+
+    /* compute Cholesky factorization of Hessian
+     * by overwritting the upper triangle; scale factors on diagonal
+     * (for non pc-platforms store the inverse of the diagonals seperately to minimize divisions) */
+    H[0][1] = H[1][0] / H[0][0];
+    H[0][2] = H[2][0] / H[0][0];
+    H[0][3] = H[3][0] / H[0][0];
+    H[1][1] -= H[0][0] * H[0][1] * H[0][1];
+    H[1][2] = (H[2][1] - H[0][1] * H[2][0]) / H[1][1];
+    H[1][3] = (H[3][1] - H[0][1] * H[3][0]) / H[1][1];
+    H[2][2] -= H[0][0] * H[0][2] * H[0][2] + H[1][1] * H[1][2] * H[1][2];
+    H[2][3] = (H[3][2] - H[0][2] * H[3][0] - H[1][2] * H[1][1] * H[1][3]) / H[2][2];
+    H[3][3] -= H[0][0] * H[0][3] * H[0][3] + H[1][1] * H[1][3] * H[1][3] + H[2][2] * H[2][3] * H[2][3];
+
+    /* Compute update as  delta_gains = -inv(H) * grad */
+    /* copy and negate */
+    for (k = 0; k < 4; k++)
+      dG[k] = -grad[k];
+    /* back substitution */
+    dG[1] -= dG[0] * H[0][1];
+    dG[2] -= dG[0] * H[0][2] + dG[1] * H[1][2];
+    dG[3] -= dG[0] * H[0][3] + dG[1] * H[1][3] + dG[2] * H[2][3];
+    /* scale */
+    for (k = 0; k < 4; k++)
+      dG[k] /= H[k][k];
+    /* back substitution */
+    dG[2] -= dG[3] * H[2][3];
+    dG[1] -= dG[3] * H[1][3] + dG[2] * H[1][2];
+    dG[0] -= dG[3] * H[0][3] + dG[2] * H[0][2] + dG[1] * H[0][1];
+
+    /* update gains and check range */
+    for (k = 0; k < 4; k++) {
+      gains[k] += dG[k];
+      if (gains[k] > PITCH_MAX_GAIN)
+        gains[k] = PITCH_MAX_GAIN;
+      else if (gains[k] < 0.0)
+        gains[k] = 0.0;
+    }
+  }
+
+  /* update state for next frame */
+  WebRtcIsac_PitchfilterPre(Whitened, out, &(State->PFstr_wght), lags, gains);
+
+  /* concatenate previous input's end and current input */
+  memcpy(inbuf, State->inbuf, sizeof(double) * QLOOKAHEAD);
+  memcpy(inbuf+QLOOKAHEAD, in, sizeof(double) * PITCH_FRAME_LEN);
+
+  /* lookahead pitch filtering for masking analysis */
+  WebRtcIsac_PitchfilterPre_la(inbuf, out, &(State->PFstr), lags, gains);
+
+  /* store last part of input */
+  for (k = 0; k < QLOOKAHEAD; k++)
+    State->inbuf[k] = inbuf[k + PITCH_FRAME_LEN];
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h b/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
new file mode 100644
index 0000000..47dab0e
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_estimator.h
+ *
+ * Pitch functions
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+void WebRtcIsac_PitchAnalysis(const double *in,               /* PITCH_FRAME_LEN samples */
+                              double *out,                    /* PITCH_FRAME_LEN+QLOOKAHEAD samples */
+                              PitchAnalysisStruct *State,
+                              double *lags,
+                              double *gains);
+
+void WebRtcIsac_InitializePitch(const double *in,
+                                const double old_lag,
+                                const double old_gain,
+                                PitchAnalysisStruct *State,
+                                double *lags);
+
+void WebRtcIsac_PitchfilterPre(double *indat,
+                               double *outdat,
+                               PitchFiltstr *pfp,
+                               double *lags,
+                               double *gains);
+
+void WebRtcIsac_PitchfilterPost(double *indat,
+                                double *outdat,
+                                PitchFiltstr *pfp,
+                                double *lags,
+                                double *gains);
+
+void WebRtcIsac_PitchfilterPre_la(double *indat,
+                                  double *outdat,
+                                  PitchFiltstr *pfp,
+                                  double *lags,
+                                  double *gains);
+
+void WebRtcIsac_PitchfilterPre_gains(double *indat,
+                                     double *outdat,
+                                     double out_dG[][PITCH_FRAME_LEN + QLOOKAHEAD],
+                                     PitchFiltstr *pfp,
+                                     double *lags,
+                                     double *gains);
+
+void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata);
+
+void WebRtcIsac_Highpass(const double *in,
+                         double *out,
+                         double *state,
+                         size_t N);
+
+void WebRtcIsac_DecimateAllpass(const double *in,
+                                double *state_in,  /* array of size:
+                                                    *     2*ALLPASSSECTIONS+1 */
+                                size_t N,          /* number of input samples */
+                                double *out);      /* array of size N/2 */
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_filter.c b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
new file mode 100644
index 0000000..61cd533
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_filter.c
@@ -0,0 +1,388 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <memory.h>
+#include <stdlib.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+#include "rtc_base/compile_assert_c.h"
+
+/*
+ * We are implementing the following filters;
+ *
+ * Pre-filtering:
+ *   y(z) = x(z) + damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
+ *
+ * Post-filtering:
+ *   y(z) = x(z) - damper(z) * gain * (x(z) + y(z)) * z ^ (-lag);
+ *
+ * Note that |lag| is a floating number so we perform an interpolation to
+ * obtain the correct |lag|.
+ *
+ */
+
+static const double kDampFilter[PITCH_DAMPORDER] = {-0.07, 0.25, 0.64, 0.25,
+    -0.07};
+
+/* interpolation coefficients; generated by design_pitch_filter.m */
+static const double kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = {
+    {-0.02239172458614,  0.06653315052934, -0.16515880017569,  0.60701333734125,
+     0.64671399919202, -0.20249000396417,  0.09926548334755, -0.04765933793109,
+     0.01754159521746},
+    {-0.01985640750434,  0.05816126837866, -0.13991265473714,  0.44560418147643,
+     0.79117042386876, -0.20266133815188,  0.09585268418555, -0.04533310458084,
+     0.01654127246314},
+    {-0.01463300534216,  0.04229888475060, -0.09897034715253,  0.28284326017787,
+     0.90385267956632, -0.16976950138649,  0.07704272393639, -0.03584218578311,
+     0.01295781500709},
+    {-0.00764851320885,  0.02184035544377, -0.04985561057281,  0.13083306574393,
+     0.97545011664662, -0.10177807997561,  0.04400901776474, -0.02010737175166,
+     0.00719783432422},
+    {-0.00000000000000,  0.00000000000000, -0.00000000000001,  0.00000000000001,
+     0.99999999999999,  0.00000000000001, -0.00000000000001,  0.00000000000000,
+     -0.00000000000000},
+    {0.00719783432422, -0.02010737175166,  0.04400901776474, -0.10177807997562,
+     0.97545011664663,  0.13083306574393, -0.04985561057280,  0.02184035544377,
+     -0.00764851320885},
+    {0.01295781500710, -0.03584218578312,  0.07704272393640, -0.16976950138650,
+     0.90385267956634,  0.28284326017785, -0.09897034715252,  0.04229888475059,
+     -0.01463300534216},
+    {0.01654127246315, -0.04533310458085,  0.09585268418557, -0.20266133815190,
+     0.79117042386878,  0.44560418147640, -0.13991265473712,  0.05816126837865,
+     -0.01985640750433}
+};
+
+/*
+ * Enumerating the operation of the filter.
+ * iSAC has 4 different pitch-filter which are very similar in their structure.
+ *
+ * kPitchFilterPre     : In this mode the filter is operating as pitch
+ *                       pre-filter. This is used at the encoder.
+ * kPitchFilterPost    : In this mode the filter is operating as pitch
+ *                       post-filter. This is the inverse of pre-filter and used
+ *                       in the decoder.
+ * kPitchFilterPreLa   : This is, in structure, similar to pre-filtering but
+ *                       utilizing 3 millisecond lookahead. It is used to
+ *                       obtain the signal for LPC analysis.
+ * kPitchFilterPreGain : This is, in structure, similar to pre-filtering but
+ *                       differential changes in gain is considered. This is
+ *                       used to find the optimal gain.
+ */
+typedef enum {
+  kPitchFilterPre, kPitchFilterPost, kPitchFilterPreLa, kPitchFilterPreGain
+} PitchFilterOperation;
+
+/*
+ * Structure with parameters used for pitch-filtering.
+ * buffer           : a buffer where the sum of previous inputs and outputs
+ *                    are stored.
+ * damper_state     : the state of the damping filter. The filter is defined by
+ *                    |kDampFilter|.
+ * interpol_coeff   : pointer to a set of coefficient which are used to utilize
+ *                    fractional pitch by interpolation.
+ * gain             : pitch-gain to be applied to the current segment of input.
+ * lag              : pitch-lag for the current segment of input.
+ * lag_offset       : the offset of lag w.r.t. current sample.
+ * sub_frame        : sub-frame index, there are 4 pitch sub-frames in an iSAC
+ *                    frame.
+ *                    This specifies the usage of the filter. See
+ *                    'PitchFilterOperation' for operational modes.
+ * num_samples      : number of samples to be processed in each segment.
+ * index            : index of the input and output sample.
+ * damper_state_dg  : state of damping filter for different trial gains.
+ * gain_mult        : differential changes to gain.
+ */
+typedef struct {
+  double buffer[PITCH_INTBUFFSIZE + QLOOKAHEAD];
+  double damper_state[PITCH_DAMPORDER];
+  const double *interpol_coeff;
+  double gain;
+  double lag;
+  int lag_offset;
+
+  int sub_frame;
+  PitchFilterOperation mode;
+  int num_samples;
+  int index;
+
+  double damper_state_dg[4][PITCH_DAMPORDER];
+  double gain_mult[4];
+} PitchFilterParam;
+
+/**********************************************************************
+ * FilterSegment()
+ * Filter one segment, a quarter of a frame.
+ *
+ * Inputs
+ *   in_data      : pointer to the input signal of 30 ms at 8 kHz sample-rate.
+ *   filter_param : pitch filter parameters.
+ *
+ * Outputs
+ *   out_data     : pointer to a buffer where the filtered signal is written to.
+ *   out_dg       : [only used in kPitchFilterPreGain] pointer to a buffer
+ *                  where the output of different gain values (differential
+ *                  change to gain) is written.
+ */
+static void FilterSegment(const double* in_data, PitchFilterParam* parameters,
+                          double* out_data,
+                          double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD]) {
+  int n;
+  int m;
+  int j;
+  double sum;
+  double sum2;
+  /* Index of |parameters->buffer| where the output is written to. */
+  int pos = parameters->index + PITCH_BUFFSIZE;
+  /* Index of |parameters->buffer| where samples are read for fractional-lag
+   * computation. */
+  int pos_lag = pos - parameters->lag_offset;
+
+  for (n = 0; n < parameters->num_samples; ++n) {
+    /* Shift low pass filter states. */
+    for (m = PITCH_DAMPORDER - 1; m > 0; --m) {
+      parameters->damper_state[m] = parameters->damper_state[m - 1];
+    }
+    /* Filter to get fractional pitch. */
+    sum = 0.0;
+    for (m = 0; m < PITCH_FRACORDER; ++m) {
+      sum += parameters->buffer[pos_lag + m] * parameters->interpol_coeff[m];
+    }
+    /* Multiply with gain. */
+    parameters->damper_state[0] = parameters->gain * sum;
+
+    if (parameters->mode == kPitchFilterPreGain) {
+      int lag_index = parameters->index - parameters->lag_offset;
+      int m_tmp = (lag_index < 0) ? -lag_index : 0;
+      /* Update the damper state for the new sample. */
+      for (m = PITCH_DAMPORDER - 1; m > 0; --m) {
+        for (j = 0; j < 4; ++j) {
+          parameters->damper_state_dg[j][m] =
+              parameters->damper_state_dg[j][m - 1];
+        }
+      }
+
+      for (j = 0; j < parameters->sub_frame + 1; ++j) {
+        /* Filter for fractional pitch. */
+        sum2 = 0.0;
+        for (m = PITCH_FRACORDER-1; m >= m_tmp; --m) {
+          /* |lag_index + m| is always larger than or equal to zero, see how
+           * m_tmp is computed. This is equivalent to assume samples outside
+           * |out_dg[j]| are zero. */
+          sum2 += out_dg[j][lag_index + m] * parameters->interpol_coeff[m];
+        }
+        /* Add the contribution of differential gain change. */
+        parameters->damper_state_dg[j][0] = parameters->gain_mult[j] * sum +
+            parameters->gain * sum2;
+      }
+
+      /* Filter with damping filter, and store the results. */
+      for (j = 0; j < parameters->sub_frame + 1; ++j) {
+        sum = 0.0;
+        for (m = 0; m < PITCH_DAMPORDER; ++m) {
+          sum -= parameters->damper_state_dg[j][m] * kDampFilter[m];
+        }
+        out_dg[j][parameters->index] = sum;
+      }
+    }
+    /* Filter with damping filter. */
+    sum = 0.0;
+    for (m = 0; m < PITCH_DAMPORDER; ++m) {
+      sum += parameters->damper_state[m] * kDampFilter[m];
+    }
+
+    /* Subtract from input and update buffer. */
+    out_data[parameters->index] = in_data[parameters->index] - sum;
+    parameters->buffer[pos] = in_data[parameters->index] +
+        out_data[parameters->index];
+
+    ++parameters->index;
+    ++pos;
+    ++pos_lag;
+  }
+  return;
+}
+
+/* Update filter parameters based on the pitch-gains and pitch-lags. */
+static void Update(PitchFilterParam* parameters) {
+  double fraction;
+  int fraction_index;
+  /* Compute integer lag-offset. */
+  parameters->lag_offset = WebRtcIsac_lrint(parameters->lag + PITCH_FILTDELAY +
+                                            0.5);
+  /* Find correct set of coefficients for computing fractional pitch. */
+  fraction = parameters->lag_offset - (parameters->lag + PITCH_FILTDELAY);
+  fraction_index = WebRtcIsac_lrint(PITCH_FRACS * fraction - 0.5);
+  parameters->interpol_coeff = kIntrpCoef[fraction_index];
+
+  if (parameters->mode == kPitchFilterPreGain) {
+    /* If in this mode make a differential change to pitch gain. */
+    parameters->gain_mult[parameters->sub_frame] += 0.2;
+    if (parameters->gain_mult[parameters->sub_frame] > 1.0) {
+      parameters->gain_mult[parameters->sub_frame] = 1.0;
+    }
+    if (parameters->sub_frame > 0) {
+      parameters->gain_mult[parameters->sub_frame - 1] -= 0.2;
+    }
+  }
+}
+
+/******************************************************************************
+ * FilterFrame()
+ * Filter a frame of 30 millisecond, given pitch-lags and pitch-gains.
+ *
+ * Inputs
+ *   in_data     : pointer to the input signal of 30 ms at 8 kHz sample-rate.
+ *   lags        : pointer to pitch-lags, 4 lags per frame.
+ *   gains       : pointer to pitch-gians, 4 gains per frame.
+ *   mode        : defining the functionality of the filter. It takes the
+ *                 following values.
+ *                 kPitchFilterPre:     Pitch pre-filter, used at encoder.
+ *                 kPitchFilterPost:    Pitch post-filter, used at decoder.
+ *                 kPitchFilterPreLa:   Pitch pre-filter with lookahead.
+ *                 kPitchFilterPreGain: Pitch pre-filter used to otain optimal
+ *                                      pitch-gains.
+ *
+ * Outputs
+ *   out_data    : pointer to a buffer where the filtered signal is written to.
+ *   out_dg      : [only used in kPitchFilterPreGain] pointer to a buffer
+ *                 where the output of different gain values (differential
+ *                 change to gain) is written.
+ */
+static void FilterFrame(const double* in_data, PitchFiltstr* filter_state,
+                        double* lags, double* gains, PitchFilterOperation mode,
+                        double* out_data,
+                        double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD]) {
+  PitchFilterParam filter_parameters;
+  double gain_delta, lag_delta;
+  double old_lag, old_gain;
+  int n;
+  int m;
+  const double kEnhancer = 1.3;
+
+  /* Set up buffer and states. */
+  filter_parameters.index = 0;
+  filter_parameters.lag_offset = 0;
+  filter_parameters.mode = mode;
+  /* Copy states to local variables. */
+  memcpy(filter_parameters.buffer, filter_state->ubuf,
+         sizeof(filter_state->ubuf));
+  RTC_COMPILE_ASSERT(sizeof(filter_parameters.buffer) >=
+                 sizeof(filter_state->ubuf));
+  memset(filter_parameters.buffer +
+             sizeof(filter_state->ubuf) / sizeof(filter_state->ubuf[0]),
+         0, sizeof(filter_parameters.buffer) - sizeof(filter_state->ubuf));
+  memcpy(filter_parameters.damper_state, filter_state->ystate,
+         sizeof(filter_state->ystate));
+
+  if (mode == kPitchFilterPreGain) {
+    /* Clear buffers. */
+    memset(filter_parameters.gain_mult, 0, sizeof(filter_parameters.gain_mult));
+    memset(filter_parameters.damper_state_dg, 0,
+           sizeof(filter_parameters.damper_state_dg));
+    for (n = 0; n < PITCH_SUBFRAMES; ++n) {
+      //memset(out_dg[n], 0, sizeof(double) * (PITCH_FRAME_LEN + QLOOKAHEAD));
+      memset(out_dg[n], 0, sizeof(out_dg[n]));
+    }
+  } else if (mode == kPitchFilterPost) {
+    /* Make output more periodic. Negative sign is to change the structure
+     * of the filter. */
+    for (n = 0; n < PITCH_SUBFRAMES; ++n) {
+      gains[n] *= -kEnhancer;
+    }
+  }
+
+  old_lag = *filter_state->oldlagp;
+  old_gain = *filter_state->oldgainp;
+
+  /* No interpolation if pitch lag step is big. */
+  if ((lags[0] > (PITCH_UPSTEP * old_lag)) ||
+      (lags[0] < (PITCH_DOWNSTEP * old_lag))) {
+    old_lag = lags[0];
+    old_gain = gains[0];
+
+    if (mode == kPitchFilterPreGain) {
+      filter_parameters.gain_mult[0] = 1.0;
+    }
+  }
+
+  filter_parameters.num_samples = PITCH_UPDATE;
+  for (m = 0; m < PITCH_SUBFRAMES; ++m) {
+    /* Set the sub-frame value. */
+    filter_parameters.sub_frame = m;
+    /* Calculate interpolation steps for pitch-lag and pitch-gain. */
+    lag_delta = (lags[m] - old_lag) / PITCH_GRAN_PER_SUBFRAME;
+    filter_parameters.lag = old_lag;
+    gain_delta = (gains[m] - old_gain) / PITCH_GRAN_PER_SUBFRAME;
+    filter_parameters.gain = old_gain;
+    /* Store for the next sub-frame. */
+    old_lag = lags[m];
+    old_gain = gains[m];
+
+    for (n = 0; n < PITCH_GRAN_PER_SUBFRAME; ++n) {
+      /* Step-wise interpolation of pitch gains and lags. As pitch-lag changes,
+       * some parameters of filter need to be update. */
+      filter_parameters.gain += gain_delta;
+      filter_parameters.lag += lag_delta;
+      /* Update parameters according to new lag value. */
+      Update(&filter_parameters);
+      /* Filter a segment of input. */
+      FilterSegment(in_data, &filter_parameters, out_data, out_dg);
+    }
+  }
+
+  if (mode != kPitchFilterPreGain) {
+    /* Export buffer and states. */
+    memcpy(filter_state->ubuf, &filter_parameters.buffer[PITCH_FRAME_LEN],
+           sizeof(filter_state->ubuf));
+    memcpy(filter_state->ystate, filter_parameters.damper_state,
+           sizeof(filter_state->ystate));
+
+    /* Store for the next frame. */
+    *filter_state->oldlagp = old_lag;
+    *filter_state->oldgainp = old_gain;
+  }
+
+  if ((mode == kPitchFilterPreGain) || (mode == kPitchFilterPreLa)) {
+    /* Filter the lookahead segment, this is treated as the last sub-frame. So
+     * set |pf_param| to last sub-frame. */
+    filter_parameters.sub_frame = PITCH_SUBFRAMES - 1;
+    filter_parameters.num_samples = QLOOKAHEAD;
+    FilterSegment(in_data, &filter_parameters, out_data, out_dg);
+  }
+}
+
+void WebRtcIsac_PitchfilterPre(double* in_data, double* out_data,
+                               PitchFiltstr* pf_state, double* lags,
+                               double* gains) {
+  FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPre, out_data, NULL);
+}
+
+void WebRtcIsac_PitchfilterPre_la(double* in_data, double* out_data,
+                                  PitchFiltstr* pf_state, double* lags,
+                                  double* gains) {
+  FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPreLa, out_data,
+              NULL);
+}
+
+void WebRtcIsac_PitchfilterPre_gains(
+    double* in_data, double* out_data,
+    double out_dg[][PITCH_FRAME_LEN + QLOOKAHEAD], PitchFiltstr *pf_state,
+    double* lags, double* gains) {
+  FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPreGain, out_data,
+              out_dg);
+}
+
+void WebRtcIsac_PitchfilterPost(double* in_data, double* out_data,
+                                PitchFiltstr* pf_state, double* lags,
+                                double* gains) {
+  FilterFrame(in_data, pf_state, lags, gains, kPitchFilterPost, out_data, NULL);
+}
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c b/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c
new file mode 100644
index 0000000..080432c
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.c
@@ -0,0 +1,104 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* header file for coding tables for the pitch filter side-info in the entropy coder */
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+/* cdf for quantized pitch filter gains */
+const uint16_t WebRtcIsac_kQPitchGainCdf[255] = {
+  0,  2,  4,  6,  64,  901,  903,  905,  16954,  16956,
+  16961,  17360,  17362,  17364,  17366,  17368,  17370,  17372,  17374,  17411,
+  17514,  17516,  17583,  18790,  18796,  18802,  20760,  20777,  20782,  21722,
+  21724,  21728,  21738,  21740,  21742,  21744,  21746,  21748,  22224,  22227,
+  22230,  23214,  23229,  23239,  25086,  25108,  25120,  26088,  26094,  26098,
+  26175,  26177,  26179,  26181,  26183,  26185,  26484,  26507,  26522,  27705,
+  27731,  27750,  29767,  29799,  29817,  30866,  30883,  30885,  31025,  31029,
+  31031,  31033,  31035,  31037,  31114,  31126,  31134,  32687,  32722,  32767,
+  35718,  35742,  35757,  36943,  36952,  36954,  37115,  37128,  37130,  37132,
+  37134,  37136,  37143,  37145,  37152,  38843,  38863,  38897,  47458,  47467,
+  47474,  49040,  49061,  49063,  49145,  49157,  49159,  49161,  49163,  49165,
+  49167,  49169,  49171,  49757,  49770,  49782,  61333,  61344,  61346,  62860,
+  62883,  62885,  62887,  62889,  62891,  62893,  62895,  62897,  62899,  62901,
+  62903,  62905,  62907,  62909,  65496,  65498,  65500,  65521,  65523,  65525,
+  65527,  65529,  65531,  65533,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+  65535,  65535,  65535,  65535,  65535};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kIndexLowerLimitGain[3] = {
+  -7, -2, -1};
+
+const int16_t WebRtcIsac_kIndexUpperLimitGain[3] = {
+  0,  3,  1};
+
+const uint16_t WebRtcIsac_kIndexMultsGain[2] = {
+  18,  3};
+
+/* size of cdf table */
+const uint16_t WebRtcIsac_kQCdfTableSizeGain[1] = {
+  256};
+
+///////////////////////////FIXED POINT
+/* mean values of pitch filter gains in FIXED point */
+const int16_t WebRtcIsac_kQMeanGain1Q12[144] = {
+   843,    1092,    1336,    1222,    1405,    1656,    1500,    1815,    1843,    1838,    1839,    1843,    1843,    1843,    1843,    1843,
+  1843,    1843,     814,     846,    1092,    1013,    1174,    1383,    1391,    1511,    1584,    1734,    1753,    1843,    1843,    1843,
+  1843,    1843,    1843,    1843,     524,     689,     777,     845,     947,    1069,    1090,    1263,    1380,    1447,    1559,    1676,
+  1645,    1749,    1843,    1843,    1843,    1843,      81,     477,     563,     611,     706,     806,     849,    1012,    1192,    1128,
+  1330,    1489,    1425,    1576,    1826,    1741,    1843,    1843,       0,     290,     305,     356,     488,     575,     602,     741,
+   890,     835,    1079,    1196,    1182,    1376,    1519,    1506,    1680,    1843,       0,      47,      97,      69,     289,     381,
+   385,     474,     617,     664,     803,    1079,     935,    1160,    1269,    1265,    1506,    1741,       0,       0,       0,       0,
+   112,     120,     190,     283,     442,     343,     526,     809,     684,     935,    1134,    1020,    1265,    1506,       0,       0,
+     0,       0,       0,       0,       0,     111,     256,      87,     373,     597,     430,     684,     935,     770,    1020,    1265};
+
+const int16_t WebRtcIsac_kQMeanGain2Q12[144] = {
+  1760,    1525,    1285,    1747,    1671,    1393,    1843,    1826,    1555,    1843,    1784,    1606,    1843,    1843,    1711,    1843,
+  1843,    1814,    1389,    1275,    1040,    1564,    1414,    1252,    1610,    1495,    1343,    1753,    1592,    1405,    1804,    1720,
+  1475,    1843,    1814,    1581,    1208,    1061,    856,    1349,    1148,    994,    1390,    1253,    1111,    1495,    1343,    1178,
+  1770,    1465,    1234,    1814,    1581,    1342,    1040,    793,    713,    1053,    895,    737,    1128,    1003,    861,    1277,
+  1094,    981,    1475,    1192,    1019,    1581,    1342,    1098,    855,    570,    483,    833,    648,    540,    948,    744,
+  572,    1009,    844,    636,    1234,    934,    685,    1342,    1217,    984,    537,    318,    124,    603,    423,    350,
+  687,    479,    322,    791,    581,    430,    987,    671,    488,    1098,    849,    597,    283,    27,        0,    397,
+  222,    38,        513,    271,    124,    624,    325,    157,    737,    484,    233,    849,    597,    343,    27,        0,
+  0,    141,    0,    0,    256,    69,        0,    370,    87,        0,    484,    229,    0,    597,    343,    87};
+
+const int16_t WebRtcIsac_kQMeanGain3Q12[144] = {
+  1843,    1843,    1711,    1843,    1818,    1606,    1843,    1827,    1511,    1814,    1639,    1393,    1760,    1525,    1285,    1656,
+  1419,    1176,    1835,    1718,    1475,    1841,    1650,    1387,    1648,    1498,    1287,    1600,    1411,    1176,    1522,    1299,
+  1040,    1419,    1176,    928,    1773,    1461,    1128,    1532,    1355,    1202,    1429,    1260,    1115,    1398,    1151,    1025,
+  1172,    1080,    790,    1176,    928,    677,    1475,    1147,    1019,    1276,    1096,    922,    1214,    1010,    901,    1057,
+  893,    800,    1040,    796,    734,    928,    677,    424,    1137,    897,    753,    1120,    830,    710,    875,    751,
+  601,    795,    642,    583,    790,    544,    475,    677,    474,    140,    987,    750,    482,    697,    573,    450,
+  691,    487,    303,    661,    394,    332,    537,    303,    220,    424,    168,    0,    737,    484,    229,    624,
+  348,    153,    441,    261,    136,    397,    166,    51,        283,    27,        0,    168,    0,    0,    484,    229,
+  0,    370,    57,        0,    256,    43,        0,    141,    0,        0,    27,        0,    0,    0,    0,    0};
+
+
+const int16_t WebRtcIsac_kQMeanGain4Q12[144] = {
+  1843,    1843,    1843,    1843,    1841,    1843,    1500,    1821,    1843,    1222,    1434,    1656,    843,    1092,    1336,    504,
+  757,    1007,    1843,    1843,    1843,    1838,    1791,    1843,    1265,    1505,    1599,    965,    1219,    1425,    730,    821,
+  1092,    249,    504,    757,    1783,    1819,    1843,    1351,    1567,    1727,    1096,    1268,    1409,    805,    961,    1131,
+  444,    670,    843,    0,        249,    504,    1425,    1655,    1743,    1096,    1324,    1448,    822,    1019,    1199,    490,
+  704,    867,    81,        450,    555,    0,    0,        249,    1247,    1428,    1530,    881,    1073,    1283,    610,    759,
+  939,    278,    464,    645,    0,    200,    270,    0,    0,    0,        935,    1163,    1410,    528,    790,    1068,
+  377,    499,    717,    173,    240,    274,    0,    43,        62,        0,    0,    0,    684,    935,    1182,    343,
+  551,    735,    161,    262,    423,    0,    55,        27,        0,    0,    0,    0,    0,    0,    430,    684,
+  935,    87,        377,    597,    0,    46,        256,    0,    0,    0,    0,    0,    0,    0,    0,    0};
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h b/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h
new file mode 100644
index 0000000..fe506ee
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_gain_tables.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_gain_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* header file for coding tables for the pitch filter side-info in the entropy coder */
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+/* cdf for quantized pitch filter gains */
+extern const uint16_t WebRtcIsac_kQPitchGainCdf[255];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kIndexLowerLimitGain[3];
+
+extern const int16_t WebRtcIsac_kIndexUpperLimitGain[3];
+extern const uint16_t WebRtcIsac_kIndexMultsGain[2];
+
+/* mean values of pitch filter gains */
+//(Y)
+extern const int16_t WebRtcIsac_kQMeanGain1Q12[144];
+extern const int16_t WebRtcIsac_kQMeanGain2Q12[144];
+extern const int16_t WebRtcIsac_kQMeanGain3Q12[144];
+extern const int16_t WebRtcIsac_kQMeanGain4Q12[144];
+//(Y)
+
+/* size of cdf table */
+extern const uint16_t WebRtcIsac_kQCdfTableSizeGain[1];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_GAIN_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c b/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c
new file mode 100644
index 0000000..57d1202
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.c
@@ -0,0 +1,277 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/* header file for coding tables for the pitch filter side-info in the entropy coder */
+/********************* Pitch Filter Gain Coefficient Tables ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsac_kQPitchLagCdf1Lo[127] = {
+ 0,  134,  336,  549,  778,  998,  1264,  1512,  1777,  2070,
+ 2423,  2794,  3051,  3361,  3708,  3979,  4315,  4610,  4933,  5269,
+ 5575,  5896,  6155,  6480,  6816,  7129,  7477,  7764,  8061,  8358,
+ 8718,  9020,  9390,  9783,  10177,  10543,  10885,  11342,  11795,  12213,
+ 12680,  13096,  13524,  13919,  14436,  14903,  15349,  15795,  16267,  16734,
+ 17266,  17697,  18130,  18632,  19080,  19447,  19884,  20315,  20735,  21288,
+ 21764,  22264,  22723,  23193,  23680,  24111,  24557,  25022,  25537,  26082,
+ 26543,  27090,  27620,  28139,  28652,  29149,  29634,  30175,  30692,  31273,
+ 31866,  32506,  33059,  33650,  34296,  34955,  35629,  36295,  36967,  37726,
+ 38559,  39458,  40364,  41293,  42256,  43215,  44231,  45253,  46274,  47359,
+ 48482,  49678,  50810,  51853,  53016,  54148,  55235,  56263,  57282,  58363,
+ 59288,  60179,  61076,  61806,  62474,  63129,  63656,  64160,  64533,  64856,
+ 65152,  65535,  65535,  65535,  65535,  65535,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf2Lo[20] = {
+ 0,  429,  3558,  5861,  8558,  11639,  15210,  19502,  24773,  31983,
+ 42602,  48567,  52601,  55676,  58160,  60172,  61889,  63235,  65383,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf3Lo[2] = {
+ 0,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf4Lo[10] = {
+ 0,  2966,  6368,  11182,  19431,  37793,  48532,  55353,  60626,  65535};
+
+const uint16_t *WebRtcIsac_kQPitchLagCdfPtrLo[4] = {WebRtcIsac_kQPitchLagCdf1Lo, WebRtcIsac_kQPitchLagCdf2Lo, WebRtcIsac_kQPitchLagCdf3Lo, WebRtcIsac_kQPitchLagCdf4Lo};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsac_kQPitchLagCdfSizeLo[1] = {128};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kQIndexLowerLimitLagLo[4] = {
+-140, -9,  0, -4};
+
+const int16_t WebRtcIsac_kQIndexUpperLimitLagLo[4] = {
+-20,  9,  0,  4};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsac_kQInitIndexLagLo[3] = {
+ 10,  1,  5};
+
+/* mean values of pitch filter lags */
+const double WebRtcIsac_kQMeanLag2Lo[19] = {
+-17.21385070, -15.82678944, -14.07123081, -12.03003877, -10.01311864, -8.00794627, -5.91162987, -3.89231876, -1.90220980, -0.01879275,
+ 1.89144232,  3.88123171,  5.92146992,  7.96435361,  9.98923648,  11.98266347,  13.96101002,  15.74855713,  17.10976611};
+
+const double WebRtcIsac_kQMeanLag3Lo[1] = {
+ 0.00000000};
+
+const double WebRtcIsac_kQMeanLag4Lo[9] = {
+-7.76246496, -5.92083980, -3.94095226, -1.89502305,  0.03724681,  1.93054221,  3.96443467,  5.91726366,  7.78434291};
+
+const double WebRtcIsac_kQPitchLagStepsizeLo = 2.000000;
+
+
+/* tables for use with medium pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsac_kQPitchLagCdf1Mid[255] = {
+ 0,  28,  61,  88,  121,  149,  233,  331,  475,  559,
+ 624,  661,  689,  712,  745,  791,  815,  843,  866,  922,
+ 959,  1024,  1061,  1117,  1178,  1238,  1280,  1350,  1453,  1513,
+ 1564,  1625,  1671,  1741,  1788,  1904,  2072,  2421,  2626,  2770,
+ 2840,  2900,  2942,  3012,  3068,  3115,  3147,  3194,  3254,  3319,
+ 3366,  3520,  3678,  3780,  3850,  3911,  3957,  4032,  4106,  4185,
+ 4292,  4474,  4683,  4842,  5019,  5191,  5321,  5428,  5540,  5675,
+ 5763,  5847,  5959,  6127,  6304,  6564,  6839,  7090,  7263,  7421,
+ 7556,  7728,  7872,  7984,  8142,  8361,  8580,  8743,  8938,  9227,
+ 9409,  9539,  9674,  9795,  9930,  10060,  10177,  10382,  10614,  10861,
+ 11038,  11271,  11415,  11629,  11792,  12044,  12193,  12416,  12574,  12821,
+ 13007,  13235,  13445,  13654,  13901,  14134,  14488,  15000,  15703,  16285,
+ 16504,  16797,  17086,  17328,  17579,  17807,  17998,  18268,  18538,  18836,
+ 19087,  19274,  19474,  19716,  19935,  20270,  20833,  21303,  21532,  21741,
+ 21978,  22207,  22523,  22770,  23054,  23613,  23943,  24204,  24399,  24651,
+ 24832,  25074,  25270,  25549,  25759,  26015,  26150,  26424,  26713,  27048,
+ 27342,  27504,  27681,  27854,  28021,  28207,  28412,  28664,  28859,  29064,
+ 29278,  29548,  29748,  30107,  30377,  30656,  30856,  31164,  31452,  31755,
+ 32011,  32328,  32626,  32919,  33319,  33789,  34329,  34925,  35396,  35973,
+ 36443,  36964,  37551,  38156,  38724,  39357,  40023,  40908,  41587,  42602,
+ 43924,  45037,  45810,  46597,  47421,  48291,  49092,  50051,  51448,  52719,
+ 53440,  54241,  54944,  55977,  56676,  57299,  57872,  58389,  59059,  59688,
+ 60237,  60782,  61094,  61573,  61890,  62290,  62658,  63030,  63217,  63454,
+ 63622,  63882,  64003,  64273,  64427,  64529,  64581,  64697,  64758,  64902,
+ 65414,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+ 65535,  65535,  65535,  65535,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf2Mid[36] = {
+ 0,  71,  335,  581,  836,  1039,  1323,  1795,  2258,  2608,
+ 3005,  3591,  4243,  5344,  7163,  10583,  16848,  28078,  49448,  57007,
+ 60357,  61850,  62837,  63437,  63872,  64188,  64377,  64614,  64774,  64949,
+ 65039,  65115,  65223,  65360,  65474,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf3Mid[2] = {
+ 0,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf4Mid[20] = {
+ 0,  28,  246,  459,  667,  1045,  1523,  2337,  4337,  11347,
+ 44231,  56709,  60781,  62243,  63161,  63969,  64608,  65062,  65502,  65535};
+
+const uint16_t *WebRtcIsac_kQPitchLagCdfPtrMid[4] = {WebRtcIsac_kQPitchLagCdf1Mid, WebRtcIsac_kQPitchLagCdf2Mid, WebRtcIsac_kQPitchLagCdf3Mid, WebRtcIsac_kQPitchLagCdf4Mid};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsac_kQPitchLagCdfSizeMid[1] = {256};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kQIndexLowerLimitLagMid[4] = {
+-280, -17,  0, -9};
+
+const int16_t WebRtcIsac_kQIndexUpperLimitLagMid[4] = {
+-40,  17,  0,  9};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsac_kQInitIndexLagMid[3] = {
+ 18,  1,  10};
+
+/* mean values of pitch filter lags */
+const double WebRtcIsac_kQMeanLag2Mid[35] = {
+-16.89183900, -15.86949778, -15.05476653, -14.00664348, -13.02793036, -12.07324237, -11.00542532, -10.11250602, -8.90792971, -8.02474753,
+-7.00426767, -5.94055287, -4.98251338, -3.91053158, -2.98820425, -1.93524245, -0.92978085, -0.01722509,  0.91317387,  1.92973955,
+ 2.96908851,  3.93728974,  4.96308471,  5.92244151,  7.08673497,  8.00993708,  9.04656316,  9.98538742,  10.97851694,  11.94772884,
+ 13.02426166,  14.00039951,  15.01347042,  15.80758023,  16.94086895};
+
+const double WebRtcIsac_kQMeanLag3Mid[1] = {
+ 0.00000000};
+
+const double WebRtcIsac_kQMeanLag4Mid[19] = {
+-8.60409403, -7.89198395, -7.03450280, -5.86260421, -4.93822322, -3.93078706, -2.91302322, -1.91824007, -0.87003282,  0.02822649,
+ 0.89951758,  1.87495484,  2.91802604,  3.96874074,  5.06571703,  5.93618227,  7.00520185,  7.88497726,  8.64160364};
+
+const double WebRtcIsac_kQPitchLagStepsizeMid = 1.000000;
+
+
+/* tables for use with large pitch gain */
+
+/* cdf for quantized pitch filter lags */
+const uint16_t WebRtcIsac_kQPitchLagCdf1Hi[511] = {
+ 0,  7,  18,  33,  69,  105,  156,  228,  315,  612,
+ 680,  691,  709,  724,  735,  738,  742,  746,  749,  753,
+ 756,  760,  764,  774,  782,  785,  789,  796,  800,  803,
+ 807,  814,  818,  822,  829,  832,  847,  854,  858,  869,
+ 876,  883,  898,  908,  934,  977,  1010,  1050,  1060,  1064,
+ 1075,  1078,  1086,  1089,  1093,  1104,  1111,  1122,  1133,  1136,
+ 1151,  1162,  1183,  1209,  1252,  1281,  1339,  1364,  1386,  1401,
+ 1411,  1415,  1426,  1430,  1433,  1440,  1448,  1455,  1462,  1477,
+ 1487,  1495,  1502,  1506,  1509,  1516,  1524,  1531,  1535,  1542,
+ 1553,  1556,  1578,  1589,  1611,  1625,  1639,  1643,  1654,  1665,
+ 1672,  1687,  1694,  1705,  1708,  1719,  1730,  1744,  1752,  1759,
+ 1791,  1795,  1820,  1867,  1886,  1915,  1936,  1943,  1965,  1987,
+ 2041,  2099,  2161,  2175,  2200,  2211,  2226,  2233,  2244,  2251,
+ 2266,  2280,  2287,  2298,  2309,  2316,  2331,  2342,  2356,  2378,
+ 2403,  2418,  2447,  2497,  2544,  2602,  2863,  2895,  2903,  2935,
+ 2950,  2971,  3004,  3011,  3018,  3029,  3040,  3062,  3087,  3127,
+ 3152,  3170,  3199,  3243,  3293,  3322,  3340,  3377,  3402,  3427,
+ 3474,  3518,  3543,  3579,  3601,  3637,  3659,  3706,  3731,  3760,
+ 3818,  3847,  3869,  3901,  3920,  3952,  4068,  4169,  4220,  4271,
+ 4524,  4571,  4604,  4632,  4672,  4730,  4777,  4806,  4857,  4904,
+ 4951,  5002,  5031,  5060,  5107,  5150,  5212,  5266,  5331,  5382,
+ 5432,  5490,  5544,  5610,  5700,  5762,  5812,  5874,  5972,  6022,
+ 6091,  6163,  6232,  6305,  6402,  6540,  6685,  6880,  7090,  7271,
+ 7379,  7452,  7542,  7625,  7687,  7770,  7843,  7911,  7966,  8024,
+ 8096,  8190,  8252,  8320,  8411,  8501,  8585,  8639,  8751,  8842,
+ 8918,  8986,  9066,  9127,  9203,  9269,  9345,  9406,  9464,  9536,
+ 9612,  9667,  9735,  9844,  9931,  10036,  10119,  10199,  10260,  10358,
+ 10441,  10514,  10666,  10734,  10872,  10951,  11053,  11125,  11223,  11324,
+ 11516,  11664,  11737,  11816,  11892,  12008,  12120,  12200,  12280,  12392,
+ 12490,  12576,  12685,  12812,  12917,  13003,  13108,  13210,  13300,  13384,
+ 13470,  13579,  13673,  13771,  13879,  13999,  14136,  14201,  14368,  14614,
+ 14759,  14867,  14958,  15030,  15121,  15189,  15280,  15385,  15461,  15555,
+ 15653,  15768,  15884,  15971,  16069,  16145,  16210,  16279,  16380,  16463,
+ 16539,  16615,  16688,  16818,  16919,  17017,  18041,  18338,  18523,  18649,
+ 18790,  18917,  19047,  19167,  19315,  19460,  19601,  19731,  19858,  20068,
+ 20173,  20318,  20466,  20625,  20741,  20911,  21045,  21201,  21396,  21588,
+ 21816,  22022,  22305,  22547,  22786,  23072,  23322,  23600,  23879,  24168,
+ 24433,  24769,  25120,  25511,  25895,  26289,  26792,  27219,  27683,  28077,
+ 28566,  29094,  29546,  29977,  30491,  30991,  31573,  32105,  32594,  33173,
+ 33788,  34497,  35181,  35833,  36488,  37255,  37921,  38645,  39275,  39894,
+ 40505,  41167,  41790,  42431,  43096,  43723,  44385,  45134,  45858,  46607,
+ 47349,  48091,  48768,  49405,  49955,  50555,  51167,  51985,  52611,  53078,
+ 53494,  53965,  54435,  54996,  55601,  56125,  56563,  56838,  57244,  57566,
+ 57967,  58297,  58771,  59093,  59419,  59647,  59886,  60143,  60461,  60693,
+ 60917,  61170,  61416,  61634,  61891,  62122,  62310,  62455,  62632,  62839,
+ 63103,  63436,  63639,  63805,  63906,  64015,  64192,  64355,  64475,  64558,
+ 64663,  64742,  64811,  64865,  64916,  64956,  64981,  65025,  65068,  65115,
+ 65195,  65314,  65419,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+ 65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+ 65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+ 65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,  65535,
+ 65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf2Hi[68] = {
+ 0,  7,  11,  22,  37,  52,  56,  59,  81,  85,
+ 89,  96,  115,  130,  137,  152,  170,  181,  193,  200,
+ 207,  233,  237,  259,  289,  318,  363,  433,  592,  992,
+ 1607,  3062,  6149,  12206,  25522,  48368,  58223,  61918,  63640,  64584,
+ 64943,  65098,  65206,  65268,  65294,  65335,  65350,  65372,  65387,  65402,
+ 65413,  65420,  65428,  65435,  65439,  65450,  65454,  65468,  65472,  65476,
+ 65483,  65491,  65498,  65505,  65516,  65520,  65528,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf3Hi[2] = {
+ 0,  65535};
+
+const uint16_t WebRtcIsac_kQPitchLagCdf4Hi[35] = {
+ 0,  7,  19,  30,  41,  48,  63,  74,  82,  96,
+ 122,  152,  215,  330,  701,  2611,  10931,  48106,  61177,  64341,
+ 65112,  65238,  65309,  65338,  65364,  65379,  65401,  65427,  65453,  65465,
+ 65476,  65490,  65509,  65528,  65535};
+
+const uint16_t *WebRtcIsac_kQPitchLagCdfPtrHi[4] = {WebRtcIsac_kQPitchLagCdf1Hi, WebRtcIsac_kQPitchLagCdf2Hi, WebRtcIsac_kQPitchLagCdf3Hi, WebRtcIsac_kQPitchLagCdf4Hi};
+
+/* size of first cdf table */
+const uint16_t WebRtcIsac_kQPitchLagCdfSizeHi[1] = {512};
+
+/* index limits and ranges */
+const int16_t WebRtcIsac_kQindexLowerLimitLagHi[4] = {
+-552, -34,  0, -16};
+
+const int16_t WebRtcIsac_kQindexUpperLimitLagHi[4] = {
+-80,  32,  0,  17};
+
+/* initial index for arithmetic decoder */
+const uint16_t WebRtcIsac_kQInitIndexLagHi[3] = {
+ 34,  1,  18};
+
+/* mean values of pitch filter lags */
+const double WebRtcIsac_kQMeanLag2Hi[67] = {
+-17.07263295, -16.50000000, -15.83966081, -15.55613708, -14.96948007, -14.50000000, -14.00000000, -13.48377986, -13.00000000, -12.50000000,
+-11.93199636, -11.44530414, -11.04197641, -10.39910301, -10.15202337, -9.51322461, -8.93357741, -8.46456632, -8.10270672, -7.53751847,
+-6.98686404, -6.50000000, -6.08463150, -5.46872991, -5.00864717, -4.50163760, -4.01382410, -3.43856708, -2.96898001, -2.46554810,
+-1.96861004, -1.47106701, -0.97197237, -0.46561654, -0.00531409,  0.45767857,  0.96777907,  1.47507903,  1.97740425,  2.46695420,
+ 3.00695774,  3.47167185,  4.02712538,  4.49280007,  5.01087640,  5.48191963,  6.04916550,  6.51511058,  6.97297819,  7.46565499,
+ 8.01489405,  8.39912001,  8.91819757,  9.50000000,  10.11654065,  10.50000000,  11.03712583,  11.50000000,  12.00000000,  12.38964346,
+ 12.89466127,  13.43657881,  13.96013840,  14.46279912,  15.00000000,  15.39412269,  15.96662441};
+
+const double WebRtcIsac_kQMeanLag3Hi[1] = {
+ 0.00000000};
+
+const double WebRtcIsac_kQMeanLag4Hi[34] = {
+-7.98331221, -7.47988769, -7.03626557, -6.52708003, -6.06982173, -5.51856292, -5.05827033, -4.45909878, -3.99125864, -3.45308135,
+-3.02328139, -2.47297273, -1.94341995, -1.44699056, -0.93612243, -0.43012406,  0.01120357,  0.44054812,  0.93199883,  1.45669587,
+ 1.97218322,  2.50187419,  2.98748690,  3.49343202,  4.01660147,  4.50984306,  5.01402683,  5.58936797,  5.91787793,  6.59998900,
+ 6.85034315,  7.53503316,  7.87711194,  8.53631648};
+
+const double WebRtcIsac_kQPitchLagStepsizeHi = 0.500000;
+
+/* transform matrix */
+const double WebRtcIsac_kTransform[4][4] = {
+{-0.50000000, -0.50000000, -0.50000000, -0.50000000},
+{ 0.67082039,  0.22360680, -0.22360680, -0.67082039},
+{ 0.50000000, -0.50000000, -0.50000000,  0.50000000},
+{ 0.22360680, -0.67082039,  0.67082039, -0.22360680}};
+
+/* transpose transform matrix */
+const double WebRtcIsac_kTransformTranspose[4][4] = {
+{-0.50000000,  0.67082039,  0.50000000,  0.22360680},
+{-0.50000000,  0.22360680, -0.50000000, -0.67082039},
+{-0.50000000, -0.22360680, -0.50000000,  0.67082039},
+{-0.50000000, -0.67082039,  0.50000000, -0.22360680}};
+
diff --git a/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h b/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h
new file mode 100644
index 0000000..6a57c87
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/pitch_lag_tables.h
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * pitch_lag_tables.h
+ *
+ * This file contains tables for the pitch filter side-info in the entropy coder.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_LAG_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_LAG_TABLES_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+/* header file for coding tables for the pitch filter side-info in the entropy coder */
+/********************* Pitch Filter Lag Coefficient Tables ************************/
+
+/* tables for use with small pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsac_kQPitchLagCdf1Lo[127];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf2Lo[20];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf3Lo[2];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf4Lo[10];
+
+extern const uint16_t *WebRtcIsac_kQPitchLagCdfPtrLo[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsac_kQPitchLagCdfSizeLo[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kQIndexLowerLimitLagLo[4];
+extern const int16_t WebRtcIsac_kQIndexUpperLimitLagLo[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsac_kQInitIndexLagLo[3];
+
+/* mean values of pitch filter lags */
+extern const double WebRtcIsac_kQMeanLag2Lo[19];
+extern const double WebRtcIsac_kQMeanLag3Lo[1];
+extern const double WebRtcIsac_kQMeanLag4Lo[9];
+
+extern const double WebRtcIsac_kQPitchLagStepsizeLo;
+
+
+/* tables for use with medium pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsac_kQPitchLagCdf1Mid[255];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf2Mid[36];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf3Mid[2];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf4Mid[20];
+
+extern const uint16_t *WebRtcIsac_kQPitchLagCdfPtrMid[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsac_kQPitchLagCdfSizeMid[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kQIndexLowerLimitLagMid[4];
+extern const int16_t WebRtcIsac_kQIndexUpperLimitLagMid[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsac_kQInitIndexLagMid[3];
+
+/* mean values of pitch filter lags */
+extern const double WebRtcIsac_kQMeanLag2Mid[35];
+extern const double WebRtcIsac_kQMeanLag3Mid[1];
+extern const double WebRtcIsac_kQMeanLag4Mid[19];
+
+extern const double WebRtcIsac_kQPitchLagStepsizeMid;
+
+
+/* tables for use with large pitch gain */
+
+/* cdfs for quantized pitch lags */
+extern const uint16_t WebRtcIsac_kQPitchLagCdf1Hi[511];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf2Hi[68];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf3Hi[2];
+extern const uint16_t WebRtcIsac_kQPitchLagCdf4Hi[35];
+
+extern const uint16_t *WebRtcIsac_kQPitchLagCdfPtrHi[4];
+
+/* size of first cdf table */
+extern const uint16_t WebRtcIsac_kQPitchLagCdfSizeHi[1];
+
+/* index limits and ranges */
+extern const int16_t WebRtcIsac_kQindexLowerLimitLagHi[4];
+extern const int16_t WebRtcIsac_kQindexUpperLimitLagHi[4];
+
+/* initial index for arithmetic decoder */
+extern const uint16_t WebRtcIsac_kQInitIndexLagHi[3];
+
+/* mean values of pitch filter lags */
+extern const double WebRtcIsac_kQMeanLag2Hi[67];
+extern const double WebRtcIsac_kQMeanLag3Hi[1];
+extern const double WebRtcIsac_kQMeanLag4Hi[34];
+
+extern const double WebRtcIsac_kQPitchLagStepsizeHi;
+
+/* transform matrix */
+extern const double WebRtcIsac_kTransform[4][4];
+
+/* transpose transform matrix */
+extern const double WebRtcIsac_kTransformTranspose[4][4];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_LAG_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/settings.h b/modules/audio_coding/codecs/isac/main/source/settings.h
new file mode 100644
index 0000000..c08d72f
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/settings.h
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * settings.h
+ *
+ * Declaration of #defines used in the iSAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SETTINGS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SETTINGS_H_
+
+/* sampling frequency (Hz) */
+#define FS                                      16000
+
+/* number of samples per frame (either 320 (20ms), 480 (30ms) or 960 (60ms)) */
+#define INITIAL_FRAMESAMPLES     960
+
+
+#define MAXFFTSIZE 2048
+#define NFACTOR 11
+
+
+
+/* do not modify the following; this will have to be modified if we
+ * have a 20ms framesize option */
+/**********************************************************************/
+/* miliseconds */
+#define FRAMESIZE                               30
+/* number of samples per frame processed in the encoder, 480 */
+#define FRAMESAMPLES                            480 /* ((FRAMESIZE*FS)/1000) */
+#define FRAMESAMPLES_HALF      240
+#define FRAMESAMPLES_QUARTER                    120
+/**********************************************************************/
+
+
+
+/* max number of samples per frame (= 60 ms frame) */
+#define MAX_FRAMESAMPLES      960
+#define MAX_SWBFRAMESAMPLES                     (MAX_FRAMESAMPLES * 2)
+/* number of samples per 10ms frame */
+#define FRAMESAMPLES_10ms                       ((10*FS)/1000)
+#define SWBFRAMESAMPLES_10ms                    (FRAMESAMPLES_10ms * 2)
+/* number of samples in 30 ms frame */
+#define FRAMESAMPLES_30ms            480
+/* number of subframes */
+#define SUBFRAMES                               6
+/* length of a subframe */
+#define UPDATE                                  80
+/* length of half a subframe (low/high band) */
+#define HALF_SUBFRAMELEN                        (UPDATE/2)
+/* samples of look ahead (in a half-band, so actually
+ * half the samples of look ahead @ FS) */
+#define QLOOKAHEAD                              24    /* 3 ms */
+/* order of AR model in spectral entropy coder */
+#define AR_ORDER                                6
+/* order of LP model in spectral entropy coder */
+#define LP_ORDER                                0
+
+/* window length (masking analysis) */
+#define WINLEN                                  256
+/* order of low-band pole filter used to approximate masking curve */
+#define ORDERLO                                 12
+/* order of hi-band pole filter used to approximate masking curve */
+#define ORDERHI                                 6
+
+#define UB_LPC_ORDER                            4
+#define UB_LPC_VEC_PER_FRAME                    2
+#define UB16_LPC_VEC_PER_FRAME                  4
+#define UB_ACTIVE_SUBFRAMES                     2
+#define UB_MAX_LPC_ORDER                        6
+#define UB_INTERPOL_SEGMENTS                    1
+#define UB16_INTERPOL_SEGMENTS                  3
+#define LB_TOTAL_DELAY_SAMPLES                 48
+enum ISACBandwidth {isac8kHz = 8, isac12kHz = 12, isac16kHz = 16};
+enum ISACBand {kIsacLowerBand = 0, kIsacUpperBand12 = 1, kIsacUpperBand16 = 2};
+enum IsacSamplingRate {kIsacWideband = 16,  kIsacSuperWideband = 32};
+#define UB_LPC_GAIN_DIM                 SUBFRAMES
+#define FB_STATE_SIZE_WORD32                    6
+
+
+/* order for post_filter_bank */
+#define POSTQORDER                              3
+/* order for pre-filterbank */
+#define QORDER                                  3
+/* another order */
+#define QORDER_ALL                              (POSTQORDER+QORDER-1)
+/* for decimator */
+#define ALLPASSSECTIONS                         2
+
+
+/* array size for byte stream in number of bytes. */
+/* The old maximum size still needed for the decoding */
+#define STREAM_SIZE_MAX     600
+#define STREAM_SIZE_MAX_30  200 /* 200 bytes=53.4 kbps @ 30 ms.framelength */
+#define STREAM_SIZE_MAX_60  400 /* 400 bytes=53.4 kbps @ 60 ms.framelength */
+
+/* storage size for bit counts */
+#define BIT_COUNTER_SIZE                        30
+/* maximum order of any AR model or filter */
+#define MAX_AR_MODEL_ORDER                      12//50
+
+
+/* For pitch analysis */
+#define PITCH_FRAME_LEN                         (FRAMESAMPLES_HALF) /* 30 ms  */
+#define PITCH_MAX_LAG                           140     /* 57 Hz  */
+#define PITCH_MIN_LAG                           20              /* 400 Hz */
+#define PITCH_MAX_GAIN                          0.45
+#define PITCH_MAX_GAIN_06                       0.27  /* PITCH_MAX_GAIN*0.6 */
+#define PITCH_MAX_GAIN_Q12      1843
+#define PITCH_LAG_SPAN2                     (PITCH_MAX_LAG/2-PITCH_MIN_LAG/2+5)
+#define PITCH_CORR_LEN2                         60     /* 15 ms  */
+#define PITCH_CORR_STEP2                        (PITCH_FRAME_LEN/4)
+#define PITCH_BW        11     /* half the band width of correlation surface */
+#define PITCH_SUBFRAMES                         4
+#define PITCH_GRAN_PER_SUBFRAME                 5
+#define PITCH_SUBFRAME_LEN        (PITCH_FRAME_LEN/PITCH_SUBFRAMES)
+#define PITCH_UPDATE              (PITCH_SUBFRAME_LEN/PITCH_GRAN_PER_SUBFRAME)
+/* maximum number of peaks to be examined in correlation surface */
+#define PITCH_MAX_NUM_PEAKS                  10
+#define PITCH_PEAK_DECAY               0.85
+/* For weighting filter */
+#define PITCH_WLPCORDER                   6
+#define PITCH_WLPCWINLEN               PITCH_FRAME_LEN
+#define PITCH_WLPCASYM                   0.3         /* asymmetry parameter */
+#define PITCH_WLPCBUFLEN               PITCH_WLPCWINLEN
+/* For pitch filter */
+/* Extra 50 for fraction and LP filters */
+#define PITCH_BUFFSIZE                   (PITCH_MAX_LAG + 50)
+#define PITCH_INTBUFFSIZE               (PITCH_FRAME_LEN+PITCH_BUFFSIZE)
+/* Max rel. step for interpolation */
+#define PITCH_UPSTEP                1.5
+/* Max rel. step for interpolation */
+#define PITCH_DOWNSTEP                   0.67
+#define PITCH_FRACS                             8
+#define PITCH_FRACORDER                         9
+#define PITCH_DAMPORDER                         5
+#define PITCH_FILTDELAY                         1.5f
+/* stepsize for quantization of the pitch Gain */
+#define PITCH_GAIN_STEPSIZE                     0.125
+
+
+
+/* Order of high pass filter */
+#define HPORDER                                 2
+
+/* some mathematical constants */
+/* log2(exp) */
+#define LOG2EXP                                 1.44269504088896
+#define PI                                      3.14159265358979
+
+/* Maximum number of iterations allowed to limit payload size */
+#define MAX_PAYLOAD_LIMIT_ITERATION             5
+
+/* Redundant Coding */
+#define RCU_BOTTLENECK_BPS                      16000
+#define RCU_TRANSCODING_SCALE                   0.40f
+#define RCU_TRANSCODING_SCALE_INVERSE           2.5f
+
+#define RCU_TRANSCODING_SCALE_UB                0.50f
+#define RCU_TRANSCODING_SCALE_UB_INVERSE        2.0f
+
+/* Define Error codes */
+/* 6000 General */
+#define ISAC_MEMORY_ALLOCATION_FAILED    6010
+#define ISAC_MODE_MISMATCH       6020
+#define ISAC_DISALLOWED_BOTTLENECK     6030
+#define ISAC_DISALLOWED_FRAME_LENGTH    6040
+#define ISAC_UNSUPPORTED_SAMPLING_FREQUENCY         6050
+
+/* 6200 Bandwidth estimator */
+#define ISAC_RANGE_ERROR_BW_ESTIMATOR    6240
+/* 6400 Encoder */
+#define ISAC_ENCODER_NOT_INITIATED     6410
+#define ISAC_DISALLOWED_CODING_MODE     6420
+#define ISAC_DISALLOWED_FRAME_MODE_ENCODER   6430
+#define ISAC_DISALLOWED_BITSTREAM_LENGTH            6440
+#define ISAC_PAYLOAD_LARGER_THAN_LIMIT              6450
+#define ISAC_DISALLOWED_ENCODER_BANDWIDTH           6460
+/* 6600 Decoder */
+#define ISAC_DECODER_NOT_INITIATED     6610
+#define ISAC_EMPTY_PACKET       6620
+#define ISAC_DISALLOWED_FRAME_MODE_DECODER   6630
+#define ISAC_RANGE_ERROR_DECODE_FRAME_LENGTH  6640
+#define ISAC_RANGE_ERROR_DECODE_BANDWIDTH   6650
+#define ISAC_RANGE_ERROR_DECODE_PITCH_GAIN   6660
+#define ISAC_RANGE_ERROR_DECODE_PITCH_LAG   6670
+#define ISAC_RANGE_ERROR_DECODE_LPC     6680
+#define ISAC_RANGE_ERROR_DECODE_SPECTRUM   6690
+#define ISAC_LENGTH_MISMATCH      6730
+#define ISAC_RANGE_ERROR_DECODE_BANDWITH            6740
+#define ISAC_DISALLOWED_BANDWIDTH_MODE_DECODER      6750
+#define ISAC_DISALLOWED_LPC_MODEL                   6760
+/* 6800 Call setup formats */
+#define ISAC_INCOMPATIBLE_FORMATS     6810
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SETTINGS_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c b/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c
new file mode 100644
index 0000000..839d5d4
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.c
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+
+/********************* AR Coefficient Tables ************************/
+/* cdf for quantized reflection coefficient 1 */
+const uint16_t WebRtcIsac_kQArRc1Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0,  2,  4,  129,  7707,  57485,  65495,  65527,  65529,  65531,
+ 65533,  65535};
+
+/* cdf for quantized reflection coefficient 2 */
+const uint16_t WebRtcIsac_kQArRc2Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0,  2,  4,  7,  531,  25298,  64525,  65526,  65529,  65531,
+ 65533,  65535};
+
+/* cdf for quantized reflection coefficient 3 */
+const uint16_t WebRtcIsac_kQArRc3Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0,  2,  4,  6,  620,  22898,  64843,  65527,  65529,  65531,
+ 65533,  65535};
+
+/* cdf for quantized reflection coefficient 4 */
+const uint16_t WebRtcIsac_kQArRc4Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0,  2,  4,  6,  35,  10034,  60733,  65506,  65529,  65531,
+ 65533,  65535};
+
+/* cdf for quantized reflection coefficient 5 */
+const uint16_t WebRtcIsac_kQArRc5Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0,  2,  4,  6,  36,  7567,  56727,  65385,  65529,  65531,
+ 65533,  65535};
+
+/* cdf for quantized reflection coefficient 6 */
+const uint16_t WebRtcIsac_kQArRc6Cdf[NUM_AR_RC_QUANT_BAUNDARY] = {
+ 0,  2,  4,  6,  14,  6579,  57360,  65409,  65529,  65531,
+ 65533,  65535};
+
+/* representation levels for quantized reflection coefficient 1 */
+const int16_t WebRtcIsac_kQArRc1Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+ -32104, -29007, -23202, -15496, -9279, -2577, 5934, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 2 */
+const int16_t WebRtcIsac_kQArRc2Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+ -32104, -29503, -23494, -15261, -7309, -1399, 6158, 16381, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 3 */
+const int16_t WebRtcIsac_kQArRc3Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -23157, -15186, -7347, -1359, 5829, 17535, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 4 */
+const int16_t WebRtcIsac_kQArRc4Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -24512, -15362, -6665, -342, 6596, 14585, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 5 */
+const int16_t WebRtcIsac_kQArRc5Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -24512, -15005, -6564, -106, 7123, 14920, 24512, 29503, 32104
+};
+
+/* representation levels for quantized reflection coefficient 6 */
+const int16_t WebRtcIsac_kQArRc6Levels[NUM_AR_RC_QUANT_BAUNDARY - 1] = {
+-32104, -29503, -24512, -15096, -6656, -37, 7036, 14847, 24512, 29503, 32104
+};
+
+/* quantization boundary levels for reflection coefficients */
+const int16_t WebRtcIsac_kQArBoundaryLevels[NUM_AR_RC_QUANT_BAUNDARY] = {
+-32768, -31441, -27566, -21458, -13612, -4663, 4663, 13612, 21458, 27566, 31441,
+32767
+};
+
+/* initial index for AR reflection coefficient quantizer and cdf table search */
+const uint16_t WebRtcIsac_kQArRcInitIndex[6] = {
+ 5,  5,  5,  5,  5,  5};
+
+/* pointers to AR cdf tables */
+const uint16_t *WebRtcIsac_kQArRcCdfPtr[AR_ORDER] = {
+  WebRtcIsac_kQArRc1Cdf, WebRtcIsac_kQArRc2Cdf, WebRtcIsac_kQArRc3Cdf,
+  WebRtcIsac_kQArRc4Cdf, WebRtcIsac_kQArRc5Cdf, WebRtcIsac_kQArRc6Cdf
+};
+
+/* pointers to AR representation levels tables */
+const int16_t *WebRtcIsac_kQArRcLevelsPtr[AR_ORDER] = {
+  WebRtcIsac_kQArRc1Levels, WebRtcIsac_kQArRc2Levels, WebRtcIsac_kQArRc3Levels,
+  WebRtcIsac_kQArRc4Levels, WebRtcIsac_kQArRc5Levels, WebRtcIsac_kQArRc6Levels
+};
+
+
+/******************** GAIN Coefficient Tables ***********************/
+/* cdf for Gain coefficient */
+const uint16_t WebRtcIsac_kQGainCdf[19] = {
+ 0,  2,  4,  6,  8,  10,  12,  14,  16,  1172,
+ 11119,  29411,  51699,  64445,  65527,  65529,  65531,  65533,  65535};
+
+/* representation levels for quantized squared Gain coefficient */
+const int32_t WebRtcIsac_kQGain2Levels[18] = {
+// 17, 28, 46, 76, 128, 215, 364, 709, 1268, 1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000};
+ 128, 128, 128, 128, 128, 215, 364, 709, 1268, 1960, 3405, 6078, 11286, 17827, 51918, 134498, 487432, 2048000};
+/* quantization boundary levels for squared Gain coefficient */
+const int32_t WebRtcIsac_kQGain2BoundaryLevels[19] = {
+0, 21, 35, 59, 99, 166, 280, 475, 815, 1414, 2495, 4505, 8397, 16405, 34431, 81359, 240497, 921600, 0x7FFFFFFF};
+
+/* pointers to Gain cdf table */
+const uint16_t *WebRtcIsac_kQGainCdf_ptr[1] = {WebRtcIsac_kQGainCdf};
+
+/* Gain initial index for gain quantizer and cdf table search */
+const uint16_t WebRtcIsac_kQGainInitIndex[1] = {11};
+
+/************************* Cosine Tables ****************************/
+/* Cosine table */
+const int16_t WebRtcIsac_kCos[6][60] = {
+{512,  512,  511,  510,  508,  507,  505,  502,  499,  496,  493,  489,  485,  480,  476,  470,  465,  459,  453,  447,
+440,  433,  426,  418,  410,  402,  394,  385,  376,  367,  357,  348,  338,  327,  317,  306,  295,  284,  273,  262,
+250,  238,  226,  214,  202,  190,  177,  165,  152,  139,  126,  113,  100,  87,  73,  60,  47,  33,  20,  7},
+{512,  510,  508,  503,  498,  491,  483,  473,  462,  450,  437,  422,  406,  389,  371,  352,  333,  312,  290,  268,
+244,  220,  196,  171,  145,  120,  93,  67,  40,  13,  -13,  -40,  -67,  -93,  -120,  -145,  -171,  -196,  -220,  -244,
+-268,  -290,  -312,  -333,  -352,  -371,  -389,  -406,  -422,  -437,  -450,  -462,  -473,  -483,  -491,  -498,  -503,  -508,  -510,  -512},
+{512,  508,  502,  493,  480,  465,  447,  426,  402,  376,  348,  317,  284,  250,  214,  177,  139,  100,  60,  20,
+-20,  -60,  -100,  -139,  -177,  -214,  -250,  -284,  -317,  -348,  -376,  -402,  -426,  -447,  -465,  -480,  -493,  -502,  -508,  -512,
+-512,  -508,  -502,  -493,  -480,  -465,  -447,  -426,  -402,  -376,  -348,  -317,  -284,  -250,  -214,  -177,  -139,  -100,  -60,  -20},
+{511,  506,  495,  478,  456,  429,  398,  362,  322,  279,  232,  183,  133,  80,  27,  -27,  -80,  -133,  -183,  -232,
+-279,  -322,  -362,  -398,  -429,  -456,  -478,  -495,  -506,  -511,  -511,  -506,  -495,  -478,  -456,  -429,  -398,  -362,  -322,  -279,
+-232,  -183,  -133,  -80,  -27,  27,  80,  133,  183,  232,  279,  322,  362,  398,  429,  456,  478,  495,  506,  511},
+{511,  502,  485,  459,  426,  385,  338,  284,  226,  165,  100,  33,  -33,  -100,  -165,  -226,  -284,  -338,  -385,  -426,
+-459,  -485,  -502,  -511,  -511,  -502,  -485,  -459,  -426,  -385,  -338,  -284,  -226,  -165,  -100,  -33,  33,  100,  165,  226,
+284,  338,  385,  426,  459,  485,  502,  511,  511,  502,  485,  459,  426,  385,  338,  284,  226,  165,  100,  33},
+{510,  498,  473,  437,  389,  333,  268,  196,  120,  40,  -40,  -120,  -196,  -268,  -333,  -389,  -437,  -473,  -498,  -510,
+-510,  -498,  -473,  -437,  -389,  -333,  -268,  -196,  -120,  -40,  40,  120,  196,  268,  333,  389,  437,  473,  498,  510,
+510,  498,  473,  437,  389,  333,  268,  196,  120,  40,  -40,  -120,  -196,  -268,  -333,  -389,  -437,  -473,  -498,  -510}
+};
diff --git a/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h b/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h
new file mode 100644
index 0000000..1e656eb
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/spectrum_ar_model_tables.h
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * spectrum_ar_model_tables.h
+ *
+ * This file contains definitions of tables with AR coefficients, 
+ * Gain coefficients and cosine tables.
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_
+
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+
+#define NUM_AR_RC_QUANT_BAUNDARY 12
+
+/********************* AR Coefficient Tables ************************/
+/* cdf for quantized reflection coefficient 1 */
+extern const uint16_t WebRtcIsac_kQArRc1Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 2 */
+extern const uint16_t WebRtcIsac_kQArRc2Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 3 */
+extern const uint16_t WebRtcIsac_kQArRc3Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 4 */
+extern const uint16_t WebRtcIsac_kQArRc4Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 5 */
+extern const uint16_t WebRtcIsac_kQArRc5Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* cdf for quantized reflection coefficient 6 */
+extern const uint16_t WebRtcIsac_kQArRc6Cdf[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* quantization boundary levels for reflection coefficients */
+extern const int16_t WebRtcIsac_kQArBoundaryLevels[NUM_AR_RC_QUANT_BAUNDARY];
+
+/* initial indices for AR reflection coefficient quantizer and cdf table search */
+extern const uint16_t WebRtcIsac_kQArRcInitIndex[AR_ORDER];
+
+/* pointers to AR cdf tables */
+extern const uint16_t *WebRtcIsac_kQArRcCdfPtr[AR_ORDER];
+
+/* pointers to AR representation levels tables */
+extern const int16_t *WebRtcIsac_kQArRcLevelsPtr[AR_ORDER];
+
+
+/******************** GAIN Coefficient Tables ***********************/
+/* cdf for Gain coefficient */
+extern const uint16_t WebRtcIsac_kQGainCdf[19];
+
+/* representation levels for quantized Gain coefficient */
+extern const int32_t WebRtcIsac_kQGain2Levels[18];
+
+/* squared quantization boundary levels for Gain coefficient */
+extern const int32_t WebRtcIsac_kQGain2BoundaryLevels[19];
+
+/* pointer to Gain cdf table */
+extern const uint16_t *WebRtcIsac_kQGainCdf_ptr[1];
+
+/* Gain initial index for gain quantizer and cdf table search */
+extern const uint16_t WebRtcIsac_kQGainInitIndex[1];
+
+/************************* Cosine Tables ****************************/
+/* Cosine table */
+extern const int16_t WebRtcIsac_kCos[6][60];
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_SPECTRUM_AR_MODEL_TABLES_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/structs.h b/modules/audio_coding/codecs/isac/main/source/structs.h
new file mode 100644
index 0000000..ef4282b
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/structs.h
@@ -0,0 +1,495 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * structs.h
+ *
+ * This header file contains all the structs used in the ISAC codec
+ *
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_STRUCTS_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_STRUCTS_H_
+
+#include "modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "modules/audio_coding/codecs/isac/main/include/isac.h"
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct Bitstreamstruct {
+
+  uint8_t   stream[STREAM_SIZE_MAX];
+  uint32_t  W_upper;
+  uint32_t  streamval;
+  uint32_t  stream_index;
+
+} Bitstr;
+
+typedef struct {
+
+  double    DataBufferLo[WINLEN];
+  double    DataBufferHi[WINLEN];
+
+  double    CorrBufLo[ORDERLO+1];
+  double    CorrBufHi[ORDERHI+1];
+
+  float    PreStateLoF[ORDERLO+1];
+  float    PreStateLoG[ORDERLO+1];
+  float    PreStateHiF[ORDERHI+1];
+  float    PreStateHiG[ORDERHI+1];
+  float    PostStateLoF[ORDERLO+1];
+  float    PostStateLoG[ORDERLO+1];
+  float    PostStateHiF[ORDERHI+1];
+  float    PostStateHiG[ORDERHI+1];
+
+  double    OldEnergy;
+
+} MaskFiltstr;
+
+
+typedef struct {
+
+  //state vectors for each of the two analysis filters
+  double    INSTAT1[2*(QORDER-1)];
+  double    INSTAT2[2*(QORDER-1)];
+  double    INSTATLA1[2*(QORDER-1)];
+  double    INSTATLA2[2*(QORDER-1)];
+  double    INLABUF1[QLOOKAHEAD];
+  double    INLABUF2[QLOOKAHEAD];
+
+  float    INSTAT1_float[2*(QORDER-1)];
+  float    INSTAT2_float[2*(QORDER-1)];
+  float    INSTATLA1_float[2*(QORDER-1)];
+  float    INSTATLA2_float[2*(QORDER-1)];
+  float    INLABUF1_float[QLOOKAHEAD];
+  float    INLABUF2_float[QLOOKAHEAD];
+
+  /* High pass filter */
+  double    HPstates[HPORDER];
+  float    HPstates_float[HPORDER];
+
+} PreFiltBankstr;
+
+
+typedef struct {
+
+  //state vectors for each of the two analysis filters
+  double    STATE_0_LOWER[2*POSTQORDER];
+  double    STATE_0_UPPER[2*POSTQORDER];
+
+  /* High pass filter */
+  double    HPstates1[HPORDER];
+  double    HPstates2[HPORDER];
+
+  float    STATE_0_LOWER_float[2*POSTQORDER];
+  float    STATE_0_UPPER_float[2*POSTQORDER];
+
+  float    HPstates1_float[HPORDER];
+  float    HPstates2_float[HPORDER];
+
+} PostFiltBankstr;
+
+typedef struct {
+
+  //data buffer for pitch filter
+  double    ubuf[PITCH_BUFFSIZE];
+
+  //low pass state vector
+  double    ystate[PITCH_DAMPORDER];
+
+  //old lag and gain
+  double    oldlagp[1];
+  double    oldgainp[1];
+
+} PitchFiltstr;
+
+typedef struct {
+
+  //data buffer
+  double    buffer[PITCH_WLPCBUFLEN];
+
+  //state vectors
+  double    istate[PITCH_WLPCORDER];
+  double    weostate[PITCH_WLPCORDER];
+  double    whostate[PITCH_WLPCORDER];
+
+  //LPC window   -> should be a global array because constant
+  double    window[PITCH_WLPCWINLEN];
+
+} WeightFiltstr;
+
+typedef struct {
+
+  //for inital estimator
+  double         dec_buffer[PITCH_CORR_LEN2 + PITCH_CORR_STEP2 +
+                            PITCH_MAX_LAG/2 - PITCH_FRAME_LEN/2+2];
+  double        decimator_state[2*ALLPASSSECTIONS+1];
+  double        hp_state[2];
+
+  double        whitened_buf[QLOOKAHEAD];
+
+  double        inbuf[QLOOKAHEAD];
+
+  PitchFiltstr  PFstr_wght;
+  PitchFiltstr  PFstr;
+  WeightFiltstr Wghtstr;
+
+} PitchAnalysisStruct;
+
+
+
+/* Have instance of struct together with other iSAC structs */
+typedef struct {
+
+  /* Previous frame length (in ms)                                    */
+  int32_t    prev_frame_length;
+
+  /* Previous RTP timestamp from received
+     packet (in samples relative beginning)                           */
+  int32_t    prev_rec_rtp_number;
+
+  /* Send timestamp for previous packet (in ms using timeGetTime())   */
+  uint32_t    prev_rec_send_ts;
+
+  /* Arrival time for previous packet (in ms using timeGetTime())     */
+  uint32_t    prev_rec_arr_ts;
+
+  /* rate of previous packet, derived from RTP timestamps (in bits/s) */
+  float   prev_rec_rtp_rate;
+
+  /* Time sinse the last update of the BN estimate (in ms)            */
+  uint32_t    last_update_ts;
+
+  /* Time sinse the last reduction (in ms)                            */
+  uint32_t    last_reduction_ts;
+
+  /* How many times the estimate was update in the beginning          */
+  int32_t    count_tot_updates_rec;
+
+  /* The estimated bottle neck rate from there to here (in bits/s)    */
+  int32_t  rec_bw;
+  float   rec_bw_inv;
+  float   rec_bw_avg;
+  float   rec_bw_avg_Q;
+
+  /* The estimated mean absolute jitter value,
+     as seen on this side (in ms)                                     */
+  float   rec_jitter;
+  float   rec_jitter_short_term;
+  float   rec_jitter_short_term_abs;
+  float   rec_max_delay;
+  float   rec_max_delay_avg_Q;
+
+  /* (assumed) bitrate for headers (bps)                              */
+  float   rec_header_rate;
+
+  /* The estimated bottle neck rate from here to there (in bits/s)    */
+  float    send_bw_avg;
+
+  /* The estimated mean absolute jitter value, as seen on
+     the other siee (in ms)                                           */
+  float   send_max_delay_avg;
+
+  // number of packets received since last update
+  int num_pkts_rec;
+
+  int num_consec_rec_pkts_over_30k;
+
+  // flag for marking that a high speed network has been
+  // detected downstream
+  int hsn_detect_rec;
+
+  int num_consec_snt_pkts_over_30k;
+
+  // flag for marking that a high speed network has
+  // been detected upstream
+  int hsn_detect_snd;
+
+  uint32_t start_wait_period;
+
+  int in_wait_period;
+
+  int change_to_WB;
+
+  uint32_t                 senderTimestamp;
+  uint32_t                 receiverTimestamp;
+  //enum IsacSamplingRate incomingStreamSampFreq;
+  uint16_t                 numConsecLatePkts;
+  float                        consecLatency;
+  int16_t                  inWaitLatePkts;
+
+  IsacBandwidthInfo external_bw_info;
+} BwEstimatorstr;
+
+
+typedef struct {
+
+  /* boolean, flags if previous packet exceeded B.N. */
+  int    PrevExceed;
+  /* ms */
+  int    ExceedAgo;
+  /* packets left to send in current burst */
+  int    BurstCounter;
+  /* packets */
+  int    InitCounter;
+  /* ms remaining in buffer when next packet will be sent */
+  double StillBuffered;
+
+} RateModel;
+
+
+typedef struct {
+
+  unsigned int SpaceAlloced;
+  unsigned int MaxPermAlloced;
+  double Tmp0[MAXFFTSIZE];
+  double Tmp1[MAXFFTSIZE];
+  double Tmp2[MAXFFTSIZE];
+  double Tmp3[MAXFFTSIZE];
+  int Perm[MAXFFTSIZE];
+  int factor [NFACTOR];
+
+} FFTstr;
+
+
+/* The following strutc is used to store data from encoding, to make it
+   fast and easy to construct a new bitstream with a different Bandwidth
+   estimate. All values (except framelength and minBytes) is double size to
+   handle 60 ms of data.
+*/
+typedef struct {
+
+  /* Used to keep track of if it is first or second part of 60 msec packet */
+  int         startIdx;
+
+  /* Frame length in samples */
+  int16_t framelength;
+
+  /* Pitch Gain */
+  int         pitchGain_index[2];
+
+  /* Pitch Lag */
+  double      meanGain[2];
+  int         pitchIndex[PITCH_SUBFRAMES*2];
+
+  /* LPC */
+  int         LPCindex_s[108*2]; /* KLT_ORDER_SHAPE = 108 */
+  int         LPCindex_g[12*2];  /* KLT_ORDER_GAIN = 12 */
+  double      LPCcoeffs_lo[(ORDERLO+1)*SUBFRAMES*2];
+  double      LPCcoeffs_hi[(ORDERHI+1)*SUBFRAMES*2];
+
+  /* Encode Spec */
+  int16_t fre[FRAMESAMPLES];
+  int16_t fim[FRAMESAMPLES];
+  int16_t AvgPitchGain[2];
+
+  /* Used in adaptive mode only */
+  int         minBytes;
+
+} IsacSaveEncoderData;
+
+
+typedef struct {
+
+  int         indexLPCShape[UB_LPC_ORDER * UB16_LPC_VEC_PER_FRAME];
+  double      lpcGain[SUBFRAMES<<1];
+  int         lpcGainIndex[SUBFRAMES<<1];
+
+  Bitstr      bitStreamObj;
+
+  int16_t realFFT[FRAMESAMPLES_HALF];
+  int16_t imagFFT[FRAMESAMPLES_HALF];
+} ISACUBSaveEncDataStruct;
+
+
+
+typedef struct {
+
+  Bitstr              bitstr_obj;
+  MaskFiltstr         maskfiltstr_obj;
+  PreFiltBankstr      prefiltbankstr_obj;
+  PitchFiltstr        pitchfiltstr_obj;
+  PitchAnalysisStruct pitchanalysisstr_obj;
+  FFTstr              fftstr_obj;
+  IsacSaveEncoderData SaveEnc_obj;
+
+  int                 buffer_index;
+  int16_t         current_framesamples;
+
+  float               data_buffer_float[FRAMESAMPLES_30ms];
+
+  int                 frame_nb;
+  double              bottleneck;
+  int16_t         new_framelength;
+  double              s2nr;
+
+  /* Maximum allowed number of bits for a 30 msec packet */
+  int16_t         payloadLimitBytes30;
+  /* Maximum allowed number of bits for a 30 msec packet */
+  int16_t         payloadLimitBytes60;
+  /* Maximum allowed number of bits for both 30 and 60 msec packet */
+  int16_t         maxPayloadBytes;
+  /* Maximum allowed rate in bytes per 30 msec packet */
+  int16_t         maxRateInBytes;
+
+  /*---
+    If set to 1 iSAC will not addapt the frame-size, if used in
+    channel-adaptive mode. The initial value will be used for all rates.
+    ---*/
+  int16_t         enforceFrameSize;
+
+  /*-----
+    This records the BWE index the encoder injected into the bit-stream.
+    It will be used in RCU. The same BWE index of main payload will be in
+    the redundant payload. We can not retrive it from BWE because it is
+    a recursive procedure (WebRtcIsac_GetDownlinkBwJitIndexImpl) and has to be
+    called only once per each encode.
+    -----*/
+  int16_t         lastBWIdx;
+} ISACLBEncStruct;
+
+typedef struct {
+
+  Bitstr                  bitstr_obj;
+  MaskFiltstr             maskfiltstr_obj;
+  PreFiltBankstr          prefiltbankstr_obj;
+  FFTstr                  fftstr_obj;
+  ISACUBSaveEncDataStruct SaveEnc_obj;
+
+  int                     buffer_index;
+  float                   data_buffer_float[MAX_FRAMESAMPLES +
+                                            LB_TOTAL_DELAY_SAMPLES];
+  double                  bottleneck;
+  /* Maximum allowed number of bits for a 30 msec packet */
+  //int16_t        payloadLimitBytes30;
+  /* Maximum allowed number of bits for both 30 and 60 msec packet */
+  //int16_t        maxPayloadBytes;
+  int16_t             maxPayloadSizeBytes;
+
+  double                  lastLPCVec[UB_LPC_ORDER];
+  int16_t             numBytesUsed;
+  int16_t             lastJitterInfo;
+} ISACUBEncStruct;
+
+
+
+typedef struct {
+
+  Bitstr          bitstr_obj;
+  MaskFiltstr     maskfiltstr_obj;
+  PostFiltBankstr postfiltbankstr_obj;
+  PitchFiltstr    pitchfiltstr_obj;
+  FFTstr          fftstr_obj;
+
+} ISACLBDecStruct;
+
+typedef struct {
+
+  Bitstr          bitstr_obj;
+  MaskFiltstr     maskfiltstr_obj;
+  PostFiltBankstr postfiltbankstr_obj;
+  FFTstr          fftstr_obj;
+
+} ISACUBDecStruct;
+
+
+
+typedef struct {
+
+  ISACLBEncStruct ISACencLB_obj;
+  ISACLBDecStruct ISACdecLB_obj;
+} ISACLBStruct;
+
+
+typedef struct {
+
+  ISACUBEncStruct ISACencUB_obj;
+  ISACUBDecStruct ISACdecUB_obj;
+} ISACUBStruct;
+
+/*
+  This struct is used to take a snapshot of the entropy coder and LPC gains
+  right before encoding LPC gains. This allows us to go back to that state
+  if we like to limit the payload size.
+*/
+typedef struct {
+  /* 6 lower-band & 6 upper-band */
+  double       loFiltGain[SUBFRAMES];
+  double       hiFiltGain[SUBFRAMES];
+  /* Upper boundary of interval W */
+  uint32_t W_upper;
+  uint32_t streamval;
+  /* Index to the current position in bytestream */
+  uint32_t stream_index;
+  uint8_t  stream[3];
+} transcode_obj;
+
+typedef struct {
+  // TODO(kwiberg): The size of these tables could be reduced by storing floats
+  // instead of doubles, and by making use of the identity cos(x) =
+  // sin(x+pi/2). They could also be made global constants that we fill in at
+  // compile time.
+  double costab1[FRAMESAMPLES_HALF];
+  double sintab1[FRAMESAMPLES_HALF];
+  double costab2[FRAMESAMPLES_QUARTER];
+  double sintab2[FRAMESAMPLES_QUARTER];
+} TransformTables;
+
+typedef struct {
+  // lower-band codec instance
+  ISACLBStruct              instLB;
+  // upper-band codec instance
+  ISACUBStruct              instUB;
+
+  // Bandwidth Estimator and model for the rate.
+  BwEstimatorstr            bwestimator_obj;
+  RateModel                 rate_data_obj;
+  double                    MaxDelay;
+
+  /* 0 = adaptive; 1 = instantaneous */
+  int16_t               codingMode;
+
+  // overall bottleneck of the codec
+  int32_t               bottleneck;
+
+  // QMF Filter state
+  int32_t               analysisFBState1[FB_STATE_SIZE_WORD32];
+  int32_t               analysisFBState2[FB_STATE_SIZE_WORD32];
+  int32_t               synthesisFBState1[FB_STATE_SIZE_WORD32];
+  int32_t               synthesisFBState2[FB_STATE_SIZE_WORD32];
+
+  // Error Code
+  int16_t               errorCode;
+
+  // bandwidth of the encoded audio 8, 12 or 16 kHz
+  enum ISACBandwidth        bandwidthKHz;
+  // Sampling rate of audio, encoder and decode,  8 or 16 kHz
+  enum IsacSamplingRate encoderSamplingRateKHz;
+  enum IsacSamplingRate decoderSamplingRateKHz;
+  // Flag to keep track of initializations, lower & upper-band
+  // encoder and decoder.
+  int16_t               initFlag;
+
+  // Flag to to indicate signal bandwidth switch
+  int16_t               resetFlag_8kHz;
+
+  // Maximum allowed rate, measured in Bytes per 30 ms.
+  int16_t               maxRateBytesPer30Ms;
+  // Maximum allowed payload-size, measured in Bytes.
+  int16_t               maxPayloadSizeBytes;
+  /* The expected sampling rate of the input signal. Valid values are 16000
+   * and 32000. This is not the operation sampling rate of the codec. */
+  uint16_t in_sample_rate_hz;
+
+  // Trig tables for WebRtcIsac_Time2Spec and WebRtcIsac_Spec2time.
+  TransformTables transform_tables;
+} ISACMainStruct;
+
+#endif /* MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_STRUCTS_H_ */
diff --git a/modules/audio_coding/codecs/isac/main/source/transform.c b/modules/audio_coding/codecs/isac/main/source/transform.c
new file mode 100644
index 0000000..bfbfecb
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/source/transform.c
@@ -0,0 +1,126 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/source/settings.h"
+#include "modules/audio_coding/codecs/isac/main/source/fft.h"
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/os_specific_inline.h"
+
+void WebRtcIsac_InitTransform(TransformTables* tables) {
+  int k;
+  double fact, phase;
+
+  fact = PI / (FRAMESAMPLES_HALF);
+  phase = 0.0;
+  for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+    tables->costab1[k] = cos(phase);
+    tables->sintab1[k] = sin(phase);
+    phase += fact;
+  }
+
+  fact = PI * ((double) (FRAMESAMPLES_HALF - 1)) / ((double) FRAMESAMPLES_HALF);
+  phase = 0.5 * fact;
+  for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+    tables->costab2[k] = cos(phase);
+    tables->sintab2[k] = sin(phase);
+    phase += fact;
+  }
+}
+
+void WebRtcIsac_Time2Spec(const TransformTables* tables,
+                          double* inre1,
+                          double* inre2,
+                          int16_t* outreQ7,
+                          int16_t* outimQ7,
+                          FFTstr* fftstr_obj) {
+  int k;
+  int dims[1];
+  double tmp1r, tmp1i, xr, xi, yr, yi, fact;
+  double tmpre[FRAMESAMPLES_HALF], tmpim[FRAMESAMPLES_HALF];
+
+
+  dims[0] = FRAMESAMPLES_HALF;
+
+
+  /* Multiply with complex exponentials and combine into one complex vector */
+  fact = 0.5 / sqrt(FRAMESAMPLES_HALF);
+  for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+    tmp1r = tables->costab1[k];
+    tmp1i = tables->sintab1[k];
+    tmpre[k] = (inre1[k] * tmp1r + inre2[k] * tmp1i) * fact;
+    tmpim[k] = (inre2[k] * tmp1r - inre1[k] * tmp1i) * fact;
+  }
+
+
+  /* Get DFT */
+  WebRtcIsac_Fftns(1, dims, tmpre, tmpim, -1, 1.0, fftstr_obj);
+
+  /* Use symmetry to separate into two complex vectors and center frames in time around zero */
+  for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+    xr = tmpre[k] + tmpre[FRAMESAMPLES_HALF - 1 - k];
+    yi = -tmpre[k] + tmpre[FRAMESAMPLES_HALF - 1 - k];
+    xi = tmpim[k] - tmpim[FRAMESAMPLES_HALF - 1 - k];
+    yr = tmpim[k] + tmpim[FRAMESAMPLES_HALF - 1 - k];
+
+    tmp1r = tables->costab2[k];
+    tmp1i = tables->sintab2[k];
+    outreQ7[k] = (int16_t)WebRtcIsac_lrint((xr * tmp1r - xi * tmp1i) * 128.0);
+    outimQ7[k] = (int16_t)WebRtcIsac_lrint((xr * tmp1i + xi * tmp1r) * 128.0);
+    outreQ7[FRAMESAMPLES_HALF - 1 - k] = (int16_t)WebRtcIsac_lrint((-yr * tmp1i - yi * tmp1r) * 128.0);
+    outimQ7[FRAMESAMPLES_HALF - 1 - k] = (int16_t)WebRtcIsac_lrint((-yr * tmp1r + yi * tmp1i) * 128.0);
+  }
+}
+
+void WebRtcIsac_Spec2time(const TransformTables* tables,
+                          double* inre,
+                          double* inim,
+                          double* outre1,
+                          double* outre2,
+                          FFTstr* fftstr_obj) {
+  int k;
+  double tmp1r, tmp1i, xr, xi, yr, yi, fact;
+
+  int dims;
+
+  dims = FRAMESAMPLES_HALF;
+
+  for (k = 0; k < FRAMESAMPLES_QUARTER; k++) {
+    /* Move zero in time to beginning of frames */
+    tmp1r = tables->costab2[k];
+    tmp1i = tables->sintab2[k];
+    xr = inre[k] * tmp1r + inim[k] * tmp1i;
+    xi = inim[k] * tmp1r - inre[k] * tmp1i;
+    yr = -inim[FRAMESAMPLES_HALF - 1 - k] * tmp1r - inre[FRAMESAMPLES_HALF - 1 - k] * tmp1i;
+    yi = -inre[FRAMESAMPLES_HALF - 1 - k] * tmp1r + inim[FRAMESAMPLES_HALF - 1 - k] * tmp1i;
+
+    /* Combine into one vector,  z = x + j * y */
+    outre1[k] = xr - yi;
+    outre1[FRAMESAMPLES_HALF - 1 - k] = xr + yi;
+    outre2[k] = xi + yr;
+    outre2[FRAMESAMPLES_HALF - 1 - k] = -xi + yr;
+  }
+
+
+  /* Get IDFT */
+  WebRtcIsac_Fftns(1, &dims, outre1, outre2, 1, FRAMESAMPLES_HALF, fftstr_obj);
+
+
+  /* Demodulate and separate */
+  fact = sqrt(FRAMESAMPLES_HALF);
+  for (k = 0; k < FRAMESAMPLES_HALF; k++) {
+    tmp1r = tables->costab1[k];
+    tmp1i = tables->sintab1[k];
+    xr = (outre1[k] * tmp1r - outre2[k] * tmp1i) * fact;
+    outre2[k] = (outre2[k] * tmp1r + outre1[k] * tmp1i) * fact;
+    outre1[k] = xr;
+  }
+}
diff --git a/modules/audio_coding/codecs/isac/main/util/utility.c b/modules/audio_coding/codecs/isac/main/util/utility.c
new file mode 100644
index 0000000..56547b1
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/util/utility.c
@@ -0,0 +1,179 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#include "modules/audio_coding/codecs/isac/main/util/utility.h"
+
+/* function for reading audio data from PCM file */
+int
+readframe(
+    short* data,
+    FILE*  inp,
+    int    length)
+{
+    short k, rlen, status = 0;
+	unsigned char* ptrUChar;
+	ptrUChar = (unsigned char*)data;
+
+    rlen = (short)fread(data, sizeof(short), length, inp);
+    if (rlen < length) {
+        for (k = rlen; k < length; k++)
+            data[k] = 0;
+        status = 1;
+    }
+
+	// Assuming that our PCM files are written in Intel machines
+	for(k = 0; k < length; k++)
+	{
+		data[k] = (short)ptrUChar[k<<1] | ((((short)ptrUChar[(k<<1) + 1]) << 8) & 0xFF00);
+	}
+
+    return status;
+}
+
+short
+readSwitch(
+    int   argc,
+    char* argv[],
+    char* strID)
+{
+    short n;
+    for(n = 0; n < argc; n++)
+    {
+        if(strcmp(argv[n], strID) == 0)
+        {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+double
+readParamDouble(
+    int    argc,
+    char*  argv[],
+    char*  strID,
+    double defaultVal)
+{
+    double returnVal = defaultVal;
+    short n;
+    for(n = 0; n < argc; n++)
+    {
+        if(strcmp(argv[n], strID) == 0)
+        {
+            n++;
+            if(n < argc)
+            {
+                returnVal = atof(argv[n]);
+            }
+            break;
+        }
+    }
+    return returnVal;
+}
+
+int
+readParamInt(
+    int   argc,
+    char* argv[],
+    char* strID,
+    int   defaultVal)
+{
+    int returnVal = defaultVal;
+    short n;
+    for(n = 0; n < argc; n++)
+    {
+        if(strcmp(argv[n], strID) == 0)
+        {
+            n++;
+            if(n < argc)
+            {
+                returnVal = atoi(argv[n]);
+            }
+            break;
+        }
+    }
+    return returnVal;
+}
+
+int
+readParamString(
+    int   argc,
+    char* argv[],
+    char* strID,
+    char* stringParam,
+    int   maxSize)
+{
+    int paramLenght = 0;
+    short n;
+    for(n = 0; n < argc; n++)
+    {
+        if(strcmp(argv[n], strID) == 0)
+        {
+            n++;
+            if(n < argc)
+            {
+                strncpy(stringParam, argv[n], maxSize);
+                paramLenght = (int)strlen(argv[n]);
+            }
+            break;
+        }
+    }
+    return paramLenght;
+}
+
+void
+get_arrival_time(
+    int              current_framesamples,   /* samples */
+    size_t           packet_size,            /* bytes */
+    int              bottleneck,             /* excluding headers; bits/s */
+    BottleNeckModel* BN_data,
+    short            senderSampFreqHz,
+    short            receiverSampFreqHz)
+{
+    unsigned int travelTimeMs;
+	const int headerSizeByte = 35;
+
+	int headerRate;
+
+    BN_data->whenPackGeneratedMs += (current_framesamples / (senderSampFreqHz / 1000));
+
+	headerRate = headerSizeByte * 8 * senderSampFreqHz / current_framesamples;     /* bits/s */
+
+	/* everything in samples */
+	BN_data->sample_count = BN_data->sample_count + current_framesamples;
+
+    //travelTimeMs = ((packet_size + HeaderSize) * 8 * sampFreqHz) /
+    //    (bottleneck + HeaderRate)
+    travelTimeMs = (unsigned int)floor((double)((packet_size + headerSizeByte) * 8 * 1000)
+        / (double)(bottleneck + headerRate) + 0.5);
+
+    if(BN_data->whenPrevPackLeftMs > BN_data->whenPackGeneratedMs)
+    {
+        BN_data->whenPrevPackLeftMs += travelTimeMs;
+    }
+    else
+    {
+        BN_data->whenPrevPackLeftMs = BN_data->whenPackGeneratedMs +
+            travelTimeMs;
+    }
+
+    BN_data->arrival_time = (BN_data->whenPrevPackLeftMs *
+        (receiverSampFreqHz / 1000));
+
+//	if (BN_data->arrival_time < BN_data->sample_count)
+//		BN_data->arrival_time = BN_data->sample_count;
+
+	BN_data->rtp_number++;
+}
diff --git a/modules/audio_coding/codecs/isac/main/util/utility.h b/modules/audio_coding/codecs/isac/main/util/utility.h
new file mode 100644
index 0000000..b5882a5
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/main/util/utility.h
@@ -0,0 +1,144 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_UTIL_UTILITY_H_
+#define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_UTIL_UTILITY_H_
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define OPEN_FILE_WB(filePtr, fullPath)                         \
+  do                                                            \
+  {                                                             \
+    if(fullPath != NULL)                                        \
+    {                                                           \
+      filePtr = fopen(fullPath, "wb");                          \
+      if(filePtr == NULL)                                       \
+      {                                                         \
+        printf("could not open %s to write to.", fullPath);     \
+        return -1;                                              \
+      }                                                         \
+    }                                                           \
+    else                                                        \
+    {                                                           \
+      filePtr = NULL;                                           \
+    }                                                           \
+  }while(0)
+
+#define OPEN_FILE_AB(filePtr, fullPath)                         \
+  do                                                            \
+  {                                                             \
+    if(fullPath != NULL)                                        \
+    {                                                           \
+      filePtr = fopen(fullPath, "ab");                          \
+      if(filePtr == NULL)                                       \
+      {                                                         \
+        printf("could not open %s to write to.", fullPath);     \
+        return -1;                                              \
+      }                                                         \
+    }                                                           \
+    else                                                        \
+    {                                                           \
+      filePtr = NULL;                                           \
+    }                                                           \
+  }while(0)
+
+#define OPEN_FILE_RB(filePtr, fullPath)                         \
+  do                                                            \
+  {                                                             \
+    if(fullPath != NULL)                                        \
+    {                                                           \
+      filePtr = fopen(fullPath, "rb");                          \
+      if(filePtr == NULL)                                       \
+      {                                                         \
+        printf("could not open %s to read from.", fullPath);    \
+        return -1;                                              \
+      }                                                         \
+    }                                                           \
+    else                                                        \
+    {                                                           \
+      filePtr = NULL;                                           \
+    }                                                           \
+  }while(0)
+
+#define WRITE_FILE_D(bufferPtr, len, filePtr)           \
+  do                                                    \
+  {                                                     \
+    if(filePtr != NULL)                                 \
+    {                                                   \
+      double dummy[1000];                               \
+      int cntr;                                         \
+      for(cntr = 0; cntr < (len); cntr++)               \
+      {                                                 \
+        dummy[cntr] = (double)bufferPtr[cntr];          \
+      }                                                 \
+      fwrite(dummy, sizeof(double), len, filePtr);      \
+      fflush(filePtr);                                  \
+    }                                                   \
+  } while(0)
+
+  typedef struct {
+    unsigned int whenPackGeneratedMs;
+    unsigned int whenPrevPackLeftMs;
+    unsigned int sendTimeMs ;          /* milisecond */
+    unsigned int arrival_time;         /* samples */
+    unsigned int sample_count;         /* samples, also used as "send time stamp" */
+    unsigned int rtp_number;
+  } BottleNeckModel;
+
+  void get_arrival_time(
+      int              current_framesamples,   /* samples */
+      size_t           packet_size,            /* bytes */
+      int              bottleneck,             /* excluding headers; bits/s */
+      BottleNeckModel* BN_data,
+      short            senderSampFreqHz,
+      short            receiverSampFreqHz);
+
+  /* function for reading audio data from PCM file */
+  int readframe(
+      short* data,
+      FILE*  inp,
+      int    length);
+
+  short readSwitch(
+      int   argc,
+      char* argv[],
+      char* strID);
+
+  double readParamDouble(
+      int    argc,
+      char*  argv[],
+      char*  strID,
+      double defaultVal);
+
+  int readParamInt(
+      int   argc,
+      char* argv[],
+      char* strID,
+      int   defaultVal);
+
+  int readParamString(
+      int   argc,
+      char* argv[],
+      char* strID,
+      char* stringParam,
+      int   maxSize);
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+
+#endif
diff --git a/modules/audio_coding/codecs/isac/unittest.cc b/modules/audio_coding/codecs/isac/unittest.cc
new file mode 100644
index 0000000..4e76e0d
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/unittest.cc
@@ -0,0 +1,257 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <numeric>
+#include <sstream>
+#include <vector>
+
+#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+namespace {
+
+const int kIsacNumberOfSamples = 32 * 60;  // 60 ms at 32 kHz
+
+std::vector<int16_t> LoadSpeechData() {
+  webrtc::test::InputAudioFile input_file(
+      webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"));
+  std::vector<int16_t> speech_data(kIsacNumberOfSamples);
+  input_file.Read(kIsacNumberOfSamples, speech_data.data());
+  return speech_data;
+}
+
+template <typename T>
+IsacBandwidthInfo GetBwInfo(typename T::instance_type* inst) {
+  IsacBandwidthInfo bi;
+  T::GetBandwidthInfo(inst, &bi);
+  EXPECT_TRUE(bi.in_use);
+  return bi;
+}
+
+// Encodes one packet. Returns the packet duration in milliseconds.
+template <typename T>
+int EncodePacket(typename T::instance_type* inst,
+                 const IsacBandwidthInfo* bi,
+                 const int16_t* speech_data,
+                 rtc::Buffer* output) {
+  output->SetSize(1000);
+  for (int duration_ms = 10;; duration_ms += 10) {
+    if (bi)
+      T::SetBandwidthInfo(inst, bi);
+    int encoded_bytes = T::Encode(inst, speech_data, output->data());
+    if (encoded_bytes > 0 || duration_ms >= 60) {
+      EXPECT_GT(encoded_bytes, 0);
+      EXPECT_LE(static_cast<size_t>(encoded_bytes), output->size());
+      output->SetSize(encoded_bytes);
+      return duration_ms;
+    }
+  }
+}
+
+template <typename T>
+std::vector<int16_t> DecodePacket(typename T::instance_type* inst,
+                                  const rtc::Buffer& encoded) {
+  std::vector<int16_t> decoded(kIsacNumberOfSamples);
+  int16_t speech_type;
+  int nsamples = T::DecodeInternal(inst, encoded.data(), encoded.size(),
+                                   &decoded.front(), &speech_type);
+  EXPECT_GT(nsamples, 0);
+  EXPECT_LE(static_cast<size_t>(nsamples), decoded.size());
+  decoded.resize(nsamples);
+  return decoded;
+}
+
+class BoundedCapacityChannel final {
+ public:
+  BoundedCapacityChannel(int sample_rate_hz, int rate_bits_per_second)
+      : current_time_rtp_(0),
+        channel_rate_bytes_per_sample_(rate_bits_per_second /
+                                       (8.0 * sample_rate_hz)) {}
+
+  // Simulate sending the given number of bytes at the given RTP time. Returns
+  // the new current RTP time after the sending is done.
+  int Send(int send_time_rtp, int nbytes) {
+    current_time_rtp_ = std::max(current_time_rtp_, send_time_rtp) +
+                        nbytes / channel_rate_bytes_per_sample_;
+    return current_time_rtp_;
+  }
+
+ private:
+  int current_time_rtp_;
+  // The somewhat strange unit for channel rate, bytes per sample, is because
+  // RTP time is measured in samples:
+  const double channel_rate_bytes_per_sample_;
+};
+
+// Test that the iSAC encoder produces identical output whether or not we use a
+// conjoined encoder+decoder pair or a separate encoder and decoder that
+// communicate BW estimation info explicitly.
+template <typename T, bool adaptive>
+void TestGetSetBandwidthInfo(const int16_t* speech_data,
+                             int rate_bits_per_second,
+                             int sample_rate_hz,
+                             int frame_size_ms) {
+  const int bit_rate = 32000;
+
+  // Conjoined encoder/decoder pair:
+  typename T::instance_type* encdec;
+  ASSERT_EQ(0, T::Create(&encdec));
+  ASSERT_EQ(0, T::EncoderInit(encdec, adaptive ? 0 : 1));
+  T::DecoderInit(encdec);
+  ASSERT_EQ(0, T::SetEncSampRate(encdec, sample_rate_hz));
+  if (adaptive)
+    ASSERT_EQ(0, T::ControlBwe(encdec, bit_rate, frame_size_ms, false));
+  else
+    ASSERT_EQ(0, T::Control(encdec, bit_rate, frame_size_ms));
+
+  // Disjoint encoder/decoder pair:
+  typename T::instance_type* enc;
+  ASSERT_EQ(0, T::Create(&enc));
+  ASSERT_EQ(0, T::EncoderInit(enc, adaptive ? 0 : 1));
+  ASSERT_EQ(0, T::SetEncSampRate(enc, sample_rate_hz));
+  if (adaptive)
+    ASSERT_EQ(0, T::ControlBwe(enc, bit_rate, frame_size_ms, false));
+  else
+    ASSERT_EQ(0, T::Control(enc, bit_rate, frame_size_ms));
+  typename T::instance_type* dec;
+  ASSERT_EQ(0, T::Create(&dec));
+  T::DecoderInit(dec);
+  T::SetInitialBweBottleneck(dec, bit_rate);
+  T::SetEncSampRateInDecoder(dec, sample_rate_hz);
+
+  // 0. Get initial BW info from decoder.
+  auto bi = GetBwInfo<T>(dec);
+
+  BoundedCapacityChannel channel1(sample_rate_hz, rate_bits_per_second),
+      channel2(sample_rate_hz, rate_bits_per_second);
+
+  int elapsed_time_ms = 0;
+  for (int i = 0; elapsed_time_ms < 10000; ++i) {
+    std::ostringstream ss;
+    ss << " i = " << i;
+    SCOPED_TRACE(ss.str());
+
+    // 1. Encode 3 * 10 ms or 6 * 10 ms. The separate encoder is given the BW
+    // info before each encode call.
+    rtc::Buffer bitstream1, bitstream2;
+    int duration1_ms =
+        EncodePacket<T>(encdec, nullptr, speech_data, &bitstream1);
+    int duration2_ms = EncodePacket<T>(enc, &bi, speech_data, &bitstream2);
+    EXPECT_EQ(duration1_ms, duration2_ms);
+    if (adaptive)
+      EXPECT_TRUE(duration1_ms == 30 || duration1_ms == 60);
+    else
+      EXPECT_EQ(frame_size_ms, duration1_ms);
+    ASSERT_EQ(bitstream1.size(), bitstream2.size());
+    EXPECT_EQ(bitstream1, bitstream2);
+
+    // 2. Deliver the encoded data to the decoders.
+    const int send_time = elapsed_time_ms * (sample_rate_hz / 1000);
+    EXPECT_EQ(0, T::UpdateBwEstimate(
+                     encdec, bitstream1.data(), bitstream1.size(), i, send_time,
+                     channel1.Send(send_time,
+                                   rtc::checked_cast<int>(bitstream1.size()))));
+    EXPECT_EQ(0, T::UpdateBwEstimate(
+                     dec, bitstream2.data(), bitstream2.size(), i, send_time,
+                     channel2.Send(send_time,
+                                   rtc::checked_cast<int>(bitstream2.size()))));
+
+    // 3. Decode, and get new BW info from the separate decoder.
+    ASSERT_EQ(0, T::SetDecSampRate(encdec, sample_rate_hz));
+    ASSERT_EQ(0, T::SetDecSampRate(dec, sample_rate_hz));
+    auto decoded1 = DecodePacket<T>(encdec, bitstream1);
+    auto decoded2 = DecodePacket<T>(dec, bitstream2);
+    EXPECT_EQ(decoded1, decoded2);
+    bi = GetBwInfo<T>(dec);
+
+    elapsed_time_ms += duration1_ms;
+  }
+
+  EXPECT_EQ(0, T::Free(encdec));
+  EXPECT_EQ(0, T::Free(enc));
+  EXPECT_EQ(0, T::Free(dec));
+}
+
+enum class IsacType { Fix, Float };
+
+std::ostream& operator<<(std::ostream& os, IsacType t) {
+  os << (t == IsacType::Fix ? "fix" : "float");
+  return os;
+}
+
+struct IsacTestParam {
+  IsacType isac_type;
+  bool adaptive;
+  int channel_rate_bits_per_second;
+  int sample_rate_hz;
+  int frame_size_ms;
+
+  friend std::ostream& operator<<(std::ostream& os, const IsacTestParam& itp) {
+    os << '{' << itp.isac_type << ','
+       << (itp.adaptive ? "adaptive" : "nonadaptive") << ','
+       << itp.channel_rate_bits_per_second << ',' << itp.sample_rate_hz << ','
+       << itp.frame_size_ms << '}';
+    return os;
+  }
+};
+
+class IsacCommonTest : public testing::TestWithParam<IsacTestParam> {};
+
+}  // namespace
+
+TEST_P(IsacCommonTest, GetSetBandwidthInfo) {
+  auto p = GetParam();
+  auto test_fun = [p] {
+    if (p.isac_type == IsacType::Fix) {
+      if (p.adaptive)
+        return TestGetSetBandwidthInfo<IsacFix, true>;
+      else
+        return TestGetSetBandwidthInfo<IsacFix, false>;
+    } else {
+      if (p.adaptive)
+        return TestGetSetBandwidthInfo<IsacFloat, true>;
+      else
+        return TestGetSetBandwidthInfo<IsacFloat, false>;
+    }
+  }();
+  test_fun(LoadSpeechData().data(), p.channel_rate_bits_per_second,
+           p.sample_rate_hz, p.frame_size_ms);
+}
+
+std::vector<IsacTestParam> TestCases() {
+  static const IsacType types[] = {IsacType::Fix, IsacType::Float};
+  static const bool adaptives[] = {true, false};
+  static const int channel_rates[] = {12000, 15000, 19000, 22000};
+  static const int sample_rates[] = {16000, 32000};
+  static const int frame_sizes[] = {30, 60};
+  std::vector<IsacTestParam> cases;
+  for (IsacType type : types)
+    for (bool adaptive : adaptives)
+      for (int channel_rate : channel_rates)
+        for (int sample_rate : sample_rates)
+          if (!(type == IsacType::Fix && sample_rate == 32000))
+            for (int frame_size : frame_sizes)
+              if (!(sample_rate == 32000 && frame_size == 60))
+                cases.push_back(
+                    {type, adaptive, channel_rate, sample_rate, frame_size});
+  return cases;
+}
+
+INSTANTIATE_TEST_CASE_P(, IsacCommonTest, testing::ValuesIn(TestCases()));
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc b/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
new file mode 100644
index 0000000..341336e
--- /dev/null
+++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame.cc
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+namespace webrtc {
+
+LegacyEncodedAudioFrame::LegacyEncodedAudioFrame(AudioDecoder* decoder,
+                                                 rtc::Buffer&& payload)
+    : decoder_(decoder), payload_(std::move(payload)) {}
+
+LegacyEncodedAudioFrame::~LegacyEncodedAudioFrame() = default;
+
+size_t LegacyEncodedAudioFrame::Duration() const {
+  const int ret = decoder_->PacketDuration(payload_.data(), payload_.size());
+  return (ret < 0) ? 0 : static_cast<size_t>(ret);
+}
+
+rtc::Optional<AudioDecoder::EncodedAudioFrame::DecodeResult>
+LegacyEncodedAudioFrame::Decode(rtc::ArrayView<int16_t> decoded) const {
+  AudioDecoder::SpeechType speech_type = AudioDecoder::kSpeech;
+  const int ret = decoder_->Decode(
+      payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+      decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+
+  if (ret < 0)
+    return rtc::nullopt;
+
+  return DecodeResult{static_cast<size_t>(ret), speech_type};
+}
+
+std::vector<AudioDecoder::ParseResult> LegacyEncodedAudioFrame::SplitBySamples(
+    AudioDecoder* decoder,
+    rtc::Buffer&& payload,
+    uint32_t timestamp,
+    size_t bytes_per_ms,
+    uint32_t timestamps_per_ms) {
+  RTC_DCHECK(payload.data());
+  std::vector<AudioDecoder::ParseResult> results;
+  size_t split_size_bytes = payload.size();
+
+  // Find a "chunk size" >= 20 ms and < 40 ms.
+  const size_t min_chunk_size = bytes_per_ms * 20;
+  if (min_chunk_size >= payload.size()) {
+    std::unique_ptr<LegacyEncodedAudioFrame> frame(
+        new LegacyEncodedAudioFrame(decoder, std::move(payload)));
+    results.emplace_back(timestamp, 0, std::move(frame));
+  } else {
+    // Reduce the split size by half as long as |split_size_bytes| is at least
+    // twice the minimum chunk size (so that the resulting size is at least as
+    // large as the minimum chunk size).
+    while (split_size_bytes >= 2 * min_chunk_size) {
+      split_size_bytes /= 2;
+    }
+
+    const uint32_t timestamps_per_chunk = static_cast<uint32_t>(
+        split_size_bytes * timestamps_per_ms / bytes_per_ms);
+    size_t byte_offset;
+    uint32_t timestamp_offset;
+    for (byte_offset = 0, timestamp_offset = 0;
+         byte_offset < payload.size();
+         byte_offset += split_size_bytes,
+             timestamp_offset += timestamps_per_chunk) {
+      split_size_bytes =
+          std::min(split_size_bytes, payload.size() - byte_offset);
+      rtc::Buffer new_payload(payload.data() + byte_offset, split_size_bytes);
+      std::unique_ptr<LegacyEncodedAudioFrame> frame(
+          new LegacyEncodedAudioFrame(decoder, std::move(new_payload)));
+      results.emplace_back(timestamp + timestamp_offset, 0, std::move(frame));
+    }
+  }
+
+  return results;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame.h b/modules/audio_coding/codecs/legacy_encoded_audio_frame.h
new file mode 100644
index 0000000..275576e
--- /dev/null
+++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_LEGACY_ENCODED_AUDIO_FRAME_H_
+#define MODULES_AUDIO_CODING_CODECS_LEGACY_ENCODED_AUDIO_FRAME_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+
+namespace webrtc {
+
+class LegacyEncodedAudioFrame final : public AudioDecoder::EncodedAudioFrame {
+ public:
+  LegacyEncodedAudioFrame(AudioDecoder* decoder, rtc::Buffer&& payload);
+  ~LegacyEncodedAudioFrame() override;
+
+  static std::vector<AudioDecoder::ParseResult> SplitBySamples(
+      AudioDecoder* decoder,
+      rtc::Buffer&& payload,
+      uint32_t timestamp,
+      size_t bytes_per_ms,
+      uint32_t timestamps_per_ms);
+
+  size_t Duration() const override;
+
+  rtc::Optional<DecodeResult> Decode(
+      rtc::ArrayView<int16_t> decoded) const override;
+
+  // For testing:
+  const rtc::Buffer& payload() const { return payload_; }
+
+ private:
+  AudioDecoder* const decoder_;
+  const rtc::Buffer payload_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_LEGACY_ENCODED_AUDIO_FRAME_H_
diff --git a/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc b/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc
new file mode 100644
index 0000000..e2dd445
--- /dev/null
+++ b/modules/audio_coding/codecs/legacy_encoded_audio_frame_unittest.cc
@@ -0,0 +1,170 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+
+#include "modules/audio_coding/acm2/rent_a_codec.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class SplitBySamplesTest : public ::testing::TestWithParam<NetEqDecoder> {
+ protected:
+  virtual void SetUp() {
+    decoder_type_ = GetParam();
+    switch (decoder_type_) {
+      case NetEqDecoder::kDecoderPCMu:
+      case NetEqDecoder::kDecoderPCMa:
+        bytes_per_ms_ = 8;
+        samples_per_ms_ = 8;
+        break;
+      case NetEqDecoder::kDecoderPCMu_2ch:
+      case NetEqDecoder::kDecoderPCMa_2ch:
+        bytes_per_ms_ = 2 * 8;
+        samples_per_ms_ = 8;
+        break;
+      case NetEqDecoder::kDecoderG722:
+        bytes_per_ms_ = 8;
+        samples_per_ms_ = 16;
+        break;
+      case NetEqDecoder::kDecoderPCM16B:
+        bytes_per_ms_ = 16;
+        samples_per_ms_ = 8;
+        break;
+      case NetEqDecoder::kDecoderPCM16Bwb:
+        bytes_per_ms_ = 32;
+        samples_per_ms_ = 16;
+        break;
+      case NetEqDecoder::kDecoderPCM16Bswb32kHz:
+        bytes_per_ms_ = 64;
+        samples_per_ms_ = 32;
+        break;
+      case NetEqDecoder::kDecoderPCM16Bswb48kHz:
+        bytes_per_ms_ = 96;
+        samples_per_ms_ = 48;
+        break;
+      case NetEqDecoder::kDecoderPCM16B_2ch:
+        bytes_per_ms_ = 2 * 16;
+        samples_per_ms_ = 8;
+        break;
+      case NetEqDecoder::kDecoderPCM16Bwb_2ch:
+        bytes_per_ms_ = 2 * 32;
+        samples_per_ms_ = 16;
+        break;
+      case NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch:
+        bytes_per_ms_ = 2 * 64;
+        samples_per_ms_ = 32;
+        break;
+      case NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch:
+        bytes_per_ms_ = 2 * 96;
+        samples_per_ms_ = 48;
+        break;
+      case NetEqDecoder::kDecoderPCM16B_5ch:
+        bytes_per_ms_ = 5 * 16;
+        samples_per_ms_ = 8;
+        break;
+      default:
+        assert(false);
+        break;
+    }
+  }
+  size_t bytes_per_ms_;
+  int samples_per_ms_;
+  NetEqDecoder decoder_type_;
+};
+
+// Test splitting sample-based payloads.
+TEST_P(SplitBySamplesTest, PayloadSizes) {
+  constexpr uint32_t kBaseTimestamp = 0x12345678;
+  struct ExpectedSplit {
+    size_t payload_size_ms;
+    size_t num_frames;
+    // For simplicity. We only expect up to two packets per split.
+    size_t frame_sizes[2];
+  };
+  // The payloads are expected to be split as follows:
+  // 10 ms -> 10 ms
+  // 20 ms -> 20 ms
+  // 30 ms -> 30 ms
+  // 40 ms -> 20 + 20 ms
+  // 50 ms -> 25 + 25 ms
+  // 60 ms -> 30 + 30 ms
+  ExpectedSplit expected_splits[] = {
+    {10, 1, {10}},
+    {20, 1, {20}},
+    {30, 1, {30}},
+    {40, 2, {20, 20}},
+    {50, 2, {25, 25}},
+    {60, 2, {30, 30}}
+  };
+
+  for (const auto& expected_split : expected_splits) {
+    // The payload values are set to steadily increase (modulo 256), so that the
+    // resulting frames can be checked and we can be reasonably certain no
+    // sample was missed or repeated.
+    const auto generate_payload = [] (size_t num_bytes) {
+      rtc::Buffer payload(num_bytes);
+      uint8_t value = 0;
+      // Allow wrap-around of value in counter below.
+      for (size_t i = 0; i != payload.size(); ++i, ++value) {
+        payload[i] = value;
+      }
+      return payload;
+    };
+
+    const auto results = LegacyEncodedAudioFrame::SplitBySamples(
+        nullptr,
+        generate_payload(expected_split.payload_size_ms * bytes_per_ms_),
+        kBaseTimestamp, bytes_per_ms_, samples_per_ms_);
+
+    EXPECT_EQ(expected_split.num_frames, results.size());
+    uint32_t expected_timestamp = kBaseTimestamp;
+    uint32_t expected_byte_offset = 0;
+    uint8_t value = 0;
+    for (size_t i = 0; i != expected_split.num_frames; ++i) {
+      const auto& result = results[i];
+      const LegacyEncodedAudioFrame* frame =
+          static_cast<const LegacyEncodedAudioFrame*>(result.frame.get());
+      const size_t length_bytes = expected_split.frame_sizes[i] * bytes_per_ms_;
+      EXPECT_EQ(length_bytes, frame->payload().size());
+      EXPECT_EQ(expected_timestamp, result.timestamp);
+      const rtc::Buffer& payload = frame->payload();
+      // Allow wrap-around of value in counter below.
+      for (size_t i = 0; i != payload.size(); ++i, ++value) {
+        ASSERT_EQ(value, payload[i]);
+      }
+
+      expected_timestamp += rtc::checked_cast<uint32_t>(
+          expected_split.frame_sizes[i] * samples_per_ms_);
+      expected_byte_offset += rtc::checked_cast<uint32_t>(length_bytes);
+    }
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(
+    LegacyEncodedAudioFrame,
+    SplitBySamplesTest,
+    ::testing::Values(NetEqDecoder::kDecoderPCMu,
+                      NetEqDecoder::kDecoderPCMa,
+                      NetEqDecoder::kDecoderPCMu_2ch,
+                      NetEqDecoder::kDecoderPCMa_2ch,
+                      NetEqDecoder::kDecoderG722,
+                      NetEqDecoder::kDecoderPCM16B,
+                      NetEqDecoder::kDecoderPCM16Bwb,
+                      NetEqDecoder::kDecoderPCM16Bswb32kHz,
+                      NetEqDecoder::kDecoderPCM16Bswb48kHz,
+                      NetEqDecoder::kDecoderPCM16B_2ch,
+                      NetEqDecoder::kDecoderPCM16Bwb_2ch,
+                      NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch,
+                      NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch,
+                      NetEqDecoder::kDecoderPCM16B_5ch));
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
new file mode 100644
index 0000000..3d10b6f
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -0,0 +1,168 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+class OpusFrame : public AudioDecoder::EncodedAudioFrame {
+ public:
+  OpusFrame(AudioDecoderOpusImpl* decoder,
+            rtc::Buffer&& payload,
+            bool is_primary_payload)
+      : decoder_(decoder),
+        payload_(std::move(payload)),
+        is_primary_payload_(is_primary_payload) {}
+
+  size_t Duration() const override {
+    int ret;
+    if (is_primary_payload_) {
+      ret = decoder_->PacketDuration(payload_.data(), payload_.size());
+    } else {
+      ret = decoder_->PacketDurationRedundant(payload_.data(), payload_.size());
+    }
+    return (ret < 0) ? 0 : static_cast<size_t>(ret);
+  }
+
+  rtc::Optional<DecodeResult> Decode(
+      rtc::ArrayView<int16_t> decoded) const override {
+    AudioDecoder::SpeechType speech_type = AudioDecoder::kSpeech;
+    int ret;
+    if (is_primary_payload_) {
+      ret = decoder_->Decode(
+          payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+          decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+    } else {
+      ret = decoder_->DecodeRedundant(
+          payload_.data(), payload_.size(), decoder_->SampleRateHz(),
+          decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
+    }
+
+    if (ret < 0)
+      return rtc::nullopt;
+
+    return DecodeResult{static_cast<size_t>(ret), speech_type};
+  }
+
+ private:
+  AudioDecoderOpusImpl* const decoder_;
+  const rtc::Buffer payload_;
+  const bool is_primary_payload_;
+};
+
+}  // namespace
+
+AudioDecoderOpusImpl::AudioDecoderOpusImpl(size_t num_channels)
+    : channels_(num_channels) {
+  RTC_DCHECK(num_channels == 1 || num_channels == 2);
+  WebRtcOpus_DecoderCreate(&dec_state_, channels_);
+  WebRtcOpus_DecoderInit(dec_state_);
+}
+
+AudioDecoderOpusImpl::~AudioDecoderOpusImpl() {
+  WebRtcOpus_DecoderFree(dec_state_);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderOpusImpl::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  std::vector<ParseResult> results;
+
+  if (PacketHasFec(payload.data(), payload.size())) {
+    const int duration =
+        PacketDurationRedundant(payload.data(), payload.size());
+    RTC_DCHECK_GE(duration, 0);
+    rtc::Buffer payload_copy(payload.data(), payload.size());
+    std::unique_ptr<EncodedAudioFrame> fec_frame(
+        new OpusFrame(this, std::move(payload_copy), false));
+    results.emplace_back(timestamp - duration, 1, std::move(fec_frame));
+  }
+  std::unique_ptr<EncodedAudioFrame> frame(
+      new OpusFrame(this, std::move(payload), true));
+  results.emplace_back(timestamp, 0, std::move(frame));
+  return results;
+}
+
+int AudioDecoderOpusImpl::DecodeInternal(const uint8_t* encoded,
+                                         size_t encoded_len,
+                                         int sample_rate_hz,
+                                         int16_t* decoded,
+                                         SpeechType* speech_type) {
+  RTC_DCHECK_EQ(sample_rate_hz, 48000);
+  int16_t temp_type = 1;  // Default is speech.
+  int ret =
+      WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+  if (ret > 0)
+    ret *= static_cast<int>(channels_);  // Return total number of samples.
+  *speech_type = ConvertSpeechType(temp_type);
+  return ret;
+}
+
+int AudioDecoderOpusImpl::DecodeRedundantInternal(const uint8_t* encoded,
+                                                  size_t encoded_len,
+                                                  int sample_rate_hz,
+                                                  int16_t* decoded,
+                                                  SpeechType* speech_type) {
+  if (!PacketHasFec(encoded, encoded_len)) {
+    // This packet is a RED packet.
+    return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
+                          speech_type);
+  }
+
+  RTC_DCHECK_EQ(sample_rate_hz, 48000);
+  int16_t temp_type = 1;  // Default is speech.
+  int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
+                                 &temp_type);
+  if (ret > 0)
+    ret *= static_cast<int>(channels_);  // Return total number of samples.
+  *speech_type = ConvertSpeechType(temp_type);
+  return ret;
+}
+
+void AudioDecoderOpusImpl::Reset() {
+  WebRtcOpus_DecoderInit(dec_state_);
+}
+
+int AudioDecoderOpusImpl::PacketDuration(const uint8_t* encoded,
+                                         size_t encoded_len) const {
+  return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len);
+}
+
+int AudioDecoderOpusImpl::PacketDurationRedundant(const uint8_t* encoded,
+                                                  size_t encoded_len) const {
+  if (!PacketHasFec(encoded, encoded_len)) {
+    // This packet is a RED packet.
+    return PacketDuration(encoded, encoded_len);
+  }
+
+  return WebRtcOpus_FecDurationEst(encoded, encoded_len);
+}
+
+bool AudioDecoderOpusImpl::PacketHasFec(const uint8_t* encoded,
+                                        size_t encoded_len) const {
+  int fec;
+  fec = WebRtcOpus_PacketHasFec(encoded, encoded_len);
+  return (fec == 1);
+}
+
+int AudioDecoderOpusImpl::SampleRateHz() const {
+  return 48000;
+}
+
+size_t AudioDecoderOpusImpl::Channels() const {
+  return channels_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/audio_decoder_opus.h b/modules/audio_coding/codecs/opus/audio_decoder_opus.h
new file mode 100644
index 0000000..70aa40b
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/audio_decoder_opus.h
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class AudioDecoderOpusImpl final : public AudioDecoder {
+ public:
+  explicit AudioDecoderOpusImpl(size_t num_channels);
+  ~AudioDecoderOpusImpl() override;
+
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  void Reset() override;
+  int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+  int PacketDurationRedundant(const uint8_t* encoded,
+                              size_t encoded_len) const override;
+  bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+  int DecodeRedundantInternal(const uint8_t* encoded,
+                              size_t encoded_len,
+                              int sample_rate_hz,
+                              int16_t* decoded,
+                              SpeechType* speech_type) override;
+
+ private:
+  OpusDecInst* dec_state_;
+  const size_t channels_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpusImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
new file mode 100644
index 0000000..caac4ae
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -0,0 +1,819 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h"
+#include "modules/audio_coding/audio_network_adaptor/controller_manager.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/exp_filter.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/protobuf_utils.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/string_to_number.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+
+// Codec parameters for Opus.
+// draft-spittka-payload-rtp-opus-03
+
+// Recommended bitrates:
+// 8-12 kb/s for NB speech,
+// 16-20 kb/s for WB speech,
+// 28-40 kb/s for FB speech,
+// 48-64 kb/s for FB mono music, and
+// 64-128 kb/s for FB stereo music.
+// The current implementation applies the following values to mono signals,
+// and multiplies them by 2 for stereo.
+constexpr int kOpusBitrateNbBps = 12000;
+constexpr int kOpusBitrateWbBps = 20000;
+constexpr int kOpusBitrateFbBps = 32000;
+
+constexpr int kSampleRateHz = 48000;
+constexpr int kDefaultMaxPlaybackRate = 48000;
+
+// These two lists must be sorted from low to high
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+constexpr int kANASupportedFrameLengths[] = {20, 60, 120};
+constexpr int kOpusSupportedFrameLengths[] = {10, 20, 40, 60, 120};
+#else
+constexpr int kANASupportedFrameLengths[] = {20, 60};
+constexpr int kOpusSupportedFrameLengths[] = {10, 20, 40, 60};
+#endif
+
+// PacketLossFractionSmoother uses an exponential filter with a time constant
+// of -1.0 / ln(0.9999) = 10000 ms.
+constexpr float kAlphaForPacketLossFractionSmoother = 0.9999f;
+
+// Optimize the loss rate to configure Opus. Basically, optimized loss rate is
+// the input loss rate rounded down to various levels, because a robustly good
+// audio quality is achieved by lowering the packet loss down.
+// Additionally, to prevent toggling, margins are used, i.e., when jumping to
+// a loss rate from below, a higher threshold is used than jumping to the same
+// level from above.
+float OptimizePacketLossRate(float new_loss_rate, float old_loss_rate) {
+  RTC_DCHECK_GE(new_loss_rate, 0.0f);
+  RTC_DCHECK_LE(new_loss_rate, 1.0f);
+  RTC_DCHECK_GE(old_loss_rate, 0.0f);
+  RTC_DCHECK_LE(old_loss_rate, 1.0f);
+  constexpr float kPacketLossRate20 = 0.20f;
+  constexpr float kPacketLossRate10 = 0.10f;
+  constexpr float kPacketLossRate5 = 0.05f;
+  constexpr float kPacketLossRate1 = 0.01f;
+  constexpr float kLossRate20Margin = 0.02f;
+  constexpr float kLossRate10Margin = 0.01f;
+  constexpr float kLossRate5Margin = 0.01f;
+  if (new_loss_rate >=
+      kPacketLossRate20 +
+          kLossRate20Margin *
+              (kPacketLossRate20 - old_loss_rate > 0 ? 1 : -1)) {
+    return kPacketLossRate20;
+  } else if (new_loss_rate >=
+             kPacketLossRate10 +
+                 kLossRate10Margin *
+                     (kPacketLossRate10 - old_loss_rate > 0 ? 1 : -1)) {
+    return kPacketLossRate10;
+  } else if (new_loss_rate >=
+             kPacketLossRate5 +
+                 kLossRate5Margin *
+                     (kPacketLossRate5 - old_loss_rate > 0 ? 1 : -1)) {
+    return kPacketLossRate5;
+  } else if (new_loss_rate >= kPacketLossRate1) {
+    return kPacketLossRate1;
+  } else {
+    return 0.0f;
+  }
+}
+
+rtc::Optional<std::string> GetFormatParameter(const SdpAudioFormat& format,
+                                              const std::string& param) {
+  auto it = format.parameters.find(param);
+  if (it == format.parameters.end())
+    return rtc::nullopt;
+
+  return it->second;
+}
+
+template <typename T>
+rtc::Optional<T> GetFormatParameter(const SdpAudioFormat& format,
+                                    const std::string& param) {
+  return rtc::StringToNumber<T>(GetFormatParameter(format, param).value_or(""));
+}
+
+int CalculateDefaultBitrate(int max_playback_rate, size_t num_channels) {
+  const int bitrate = [&] {
+    if (max_playback_rate <= 8000) {
+      return kOpusBitrateNbBps * rtc::dchecked_cast<int>(num_channels);
+    } else if (max_playback_rate <= 16000) {
+      return kOpusBitrateWbBps * rtc::dchecked_cast<int>(num_channels);
+    } else {
+      return kOpusBitrateFbBps * rtc::dchecked_cast<int>(num_channels);
+    }
+  }();
+  RTC_DCHECK_GE(bitrate, AudioEncoderOpusConfig::kMinBitrateBps);
+  RTC_DCHECK_LE(bitrate, AudioEncoderOpusConfig::kMaxBitrateBps);
+  return bitrate;
+}
+
+// Get the maxaveragebitrate parameter in string-form, so we can properly figure
+// out how invalid it is and accurately log invalid values.
+int CalculateBitrate(int max_playback_rate_hz,
+                     size_t num_channels,
+                     rtc::Optional<std::string> bitrate_param) {
+  const int default_bitrate =
+      CalculateDefaultBitrate(max_playback_rate_hz, num_channels);
+
+  if (bitrate_param) {
+    const auto bitrate = rtc::StringToNumber<int>(*bitrate_param);
+    if (bitrate) {
+      const int chosen_bitrate =
+          std::max(AudioEncoderOpusConfig::kMinBitrateBps,
+                   std::min(*bitrate, AudioEncoderOpusConfig::kMaxBitrateBps));
+      if (bitrate != chosen_bitrate) {
+        RTC_LOG(LS_WARNING) << "Invalid maxaveragebitrate " << *bitrate
+                            << " clamped to " << chosen_bitrate;
+      }
+      return chosen_bitrate;
+    }
+    RTC_LOG(LS_WARNING) << "Invalid maxaveragebitrate \"" << *bitrate_param
+                        << "\" replaced by default bitrate " << default_bitrate;
+  }
+
+  return default_bitrate;
+}
+
+int GetChannelCount(const SdpAudioFormat& format) {
+  const auto param = GetFormatParameter(format, "stereo");
+  if (param == "1") {
+    return 2;
+  } else {
+    return 1;
+  }
+}
+
+int GetMaxPlaybackRate(const SdpAudioFormat& format) {
+  const auto param = GetFormatParameter<int>(format, "maxplaybackrate");
+  if (param && *param >= 8000) {
+    return std::min(*param, kDefaultMaxPlaybackRate);
+  }
+  return kDefaultMaxPlaybackRate;
+}
+
+int GetFrameSizeMs(const SdpAudioFormat& format) {
+  const auto ptime = GetFormatParameter<int>(format, "ptime");
+  if (ptime) {
+    // Pick the next highest supported frame length from
+    // kOpusSupportedFrameLengths.
+    for (const int supported_frame_length : kOpusSupportedFrameLengths) {
+      if (supported_frame_length >= *ptime) {
+        return supported_frame_length;
+      }
+    }
+    // If none was found, return the largest supported frame length.
+    return *(std::end(kOpusSupportedFrameLengths) - 1);
+  }
+
+  return AudioEncoderOpusConfig::kDefaultFrameSizeMs;
+}
+
+void FindSupportedFrameLengths(int min_frame_length_ms,
+                               int max_frame_length_ms,
+                               std::vector<int>* out) {
+  out->clear();
+  std::copy_if(std::begin(kANASupportedFrameLengths),
+               std::end(kANASupportedFrameLengths), std::back_inserter(*out),
+               [&](int frame_length_ms) {
+                 return frame_length_ms >= min_frame_length_ms &&
+                        frame_length_ms <= max_frame_length_ms;
+               });
+  RTC_DCHECK(std::is_sorted(out->begin(), out->end()));
+}
+
+int GetBitrateBps(const AudioEncoderOpusConfig& config) {
+  RTC_DCHECK(config.IsOk());
+  return *config.bitrate_bps;
+}
+
+}  // namespace
+
+void AudioEncoderOpusImpl::AppendSupportedEncoders(
+    std::vector<AudioCodecSpec>* specs) {
+  const SdpAudioFormat fmt = {
+      "opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}};
+  const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
+  specs->push_back({fmt, info});
+}
+
+AudioCodecInfo AudioEncoderOpusImpl::QueryAudioEncoder(
+    const AudioEncoderOpusConfig& config) {
+  RTC_DCHECK(config.IsOk());
+  AudioCodecInfo info(48000, config.num_channels, *config.bitrate_bps,
+                      AudioEncoderOpusConfig::kMinBitrateBps,
+                      AudioEncoderOpusConfig::kMaxBitrateBps);
+  info.allow_comfort_noise = false;
+  info.supports_network_adaption = true;
+  return info;
+}
+
+std::unique_ptr<AudioEncoder> AudioEncoderOpusImpl::MakeAudioEncoder(
+    const AudioEncoderOpusConfig& config,
+    int payload_type) {
+  RTC_DCHECK(config.IsOk());
+  return rtc::MakeUnique<AudioEncoderOpusImpl>(config, payload_type);
+}
+
+rtc::Optional<AudioCodecInfo> AudioEncoderOpusImpl::QueryAudioEncoder(
+    const SdpAudioFormat& format) {
+  if (STR_CASE_CMP(format.name.c_str(), GetPayloadName()) == 0 &&
+      format.clockrate_hz == 48000 && format.num_channels == 2) {
+    const size_t num_channels = GetChannelCount(format);
+    const int bitrate =
+        CalculateBitrate(GetMaxPlaybackRate(format), num_channels,
+                         GetFormatParameter(format, "maxaveragebitrate"));
+    AudioCodecInfo info(48000, num_channels, bitrate,
+                        AudioEncoderOpusConfig::kMinBitrateBps,
+                        AudioEncoderOpusConfig::kMaxBitrateBps);
+    info.allow_comfort_noise = false;
+    info.supports_network_adaption = true;
+
+    return info;
+  }
+  return rtc::nullopt;
+}
+
+AudioEncoderOpusConfig AudioEncoderOpusImpl::CreateConfig(
+    const CodecInst& codec_inst) {
+  AudioEncoderOpusConfig config;
+  config.frame_size_ms = rtc::CheckedDivExact(codec_inst.pacsize, 48);
+  config.num_channels = codec_inst.channels;
+  config.bitrate_bps = codec_inst.rate;
+  config.application = config.num_channels == 1
+                           ? AudioEncoderOpusConfig::ApplicationMode::kVoip
+                           : AudioEncoderOpusConfig::ApplicationMode::kAudio;
+  config.supported_frame_lengths_ms.push_back(config.frame_size_ms);
+  return config;
+}
+
+rtc::Optional<AudioEncoderOpusConfig> AudioEncoderOpusImpl::SdpToConfig(
+    const SdpAudioFormat& format) {
+  if (STR_CASE_CMP(format.name.c_str(), "opus") != 0 ||
+      format.clockrate_hz != 48000 || format.num_channels != 2) {
+    return rtc::nullopt;
+  }
+
+  AudioEncoderOpusConfig config;
+  config.num_channels = GetChannelCount(format);
+  config.frame_size_ms = GetFrameSizeMs(format);
+  config.max_playback_rate_hz = GetMaxPlaybackRate(format);
+  config.fec_enabled = (GetFormatParameter(format, "useinbandfec") == "1");
+  config.dtx_enabled = (GetFormatParameter(format, "usedtx") == "1");
+  config.cbr_enabled = (GetFormatParameter(format, "cbr") == "1");
+  config.bitrate_bps =
+      CalculateBitrate(config.max_playback_rate_hz, config.num_channels,
+                       GetFormatParameter(format, "maxaveragebitrate"));
+  config.application = config.num_channels == 1
+                           ? AudioEncoderOpusConfig::ApplicationMode::kVoip
+                           : AudioEncoderOpusConfig::ApplicationMode::kAudio;
+
+  constexpr int kMinANAFrameLength = kANASupportedFrameLengths[0];
+  constexpr int kMaxANAFrameLength =
+      kANASupportedFrameLengths[arraysize(kANASupportedFrameLengths) - 1];
+
+  // For now, minptime and maxptime are only used with ANA. If ptime is outside
+  // of this range, it will get adjusted once ANA takes hold. Ideally, we'd know
+  // if ANA was to be used when setting up the config, and adjust accordingly.
+  const int min_frame_length_ms =
+      GetFormatParameter<int>(format, "minptime").value_or(kMinANAFrameLength);
+  const int max_frame_length_ms =
+      GetFormatParameter<int>(format, "maxptime").value_or(kMaxANAFrameLength);
+
+  FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
+                            &config.supported_frame_lengths_ms);
+  RTC_DCHECK(config.IsOk());
+  return config;
+}
+
+rtc::Optional<int> AudioEncoderOpusImpl::GetNewComplexity(
+    const AudioEncoderOpusConfig& config) {
+  RTC_DCHECK(config.IsOk());
+  const int bitrate_bps = GetBitrateBps(config);
+  if (bitrate_bps >= config.complexity_threshold_bps -
+                         config.complexity_threshold_window_bps &&
+      bitrate_bps <= config.complexity_threshold_bps +
+                         config.complexity_threshold_window_bps) {
+    // Within the hysteresis window; make no change.
+    return rtc::nullopt;
+  } else {
+    return bitrate_bps <= config.complexity_threshold_bps
+               ? config.low_rate_complexity
+               : config.complexity;
+  }
+}
+
+rtc::Optional<int> AudioEncoderOpusImpl::GetNewBandwidth(
+    const AudioEncoderOpusConfig& config,
+    OpusEncInst* inst) {
+  constexpr int kMinWidebandBitrate = 8000;
+  constexpr int kMaxNarrowbandBitrate = 9000;
+  constexpr int kAutomaticThreshold = 11000;
+  RTC_DCHECK(config.IsOk());
+  const int bitrate = GetBitrateBps(config);
+  if (bitrate > kAutomaticThreshold) {
+    return rtc::Optional<int>(OPUS_AUTO);
+  }
+  const int bandwidth = WebRtcOpus_GetBandwidth(inst);
+  RTC_DCHECK_GE(bandwidth, 0);
+  if (bitrate > kMaxNarrowbandBitrate && bandwidth < OPUS_BANDWIDTH_WIDEBAND) {
+    return rtc::Optional<int>(OPUS_BANDWIDTH_WIDEBAND);
+  } else if (bitrate < kMinWidebandBitrate &&
+             bandwidth > OPUS_BANDWIDTH_NARROWBAND) {
+    return rtc::Optional<int>(OPUS_BANDWIDTH_NARROWBAND);
+  }
+  return rtc::Optional<int>();
+}
+
+class AudioEncoderOpusImpl::PacketLossFractionSmoother {
+ public:
+  explicit PacketLossFractionSmoother()
+      : last_sample_time_ms_(rtc::TimeMillis()),
+        smoother_(kAlphaForPacketLossFractionSmoother) {}
+
+  // Gets the smoothed packet loss fraction.
+  float GetAverage() const {
+    float value = smoother_.filtered();
+    return (value == rtc::ExpFilter::kValueUndefined) ? 0.0f : value;
+  }
+
+  // Add new observation to the packet loss fraction smoother.
+  void AddSample(float packet_loss_fraction) {
+    int64_t now_ms = rtc::TimeMillis();
+    smoother_.Apply(static_cast<float>(now_ms - last_sample_time_ms_),
+                    packet_loss_fraction);
+    last_sample_time_ms_ = now_ms;
+  }
+
+ private:
+  int64_t last_sample_time_ms_;
+
+  // An exponential filter is used to smooth the packet loss fraction.
+  rtc::ExpFilter smoother_;
+};
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(const AudioEncoderOpusConfig& config,
+                                           int payload_type)
+    : AudioEncoderOpusImpl(
+          config,
+          payload_type,
+          [this](const ProtoString& config_string, RtcEventLog* event_log) {
+            return DefaultAudioNetworkAdaptorCreator(config_string, event_log);
+          },
+          // We choose 5sec as initial time constant due to empirical data.
+          rtc::MakeUnique<SmoothingFilterImpl>(5000)) {}
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(
+    const AudioEncoderOpusConfig& config,
+    int payload_type,
+    const AudioNetworkAdaptorCreator& audio_network_adaptor_creator,
+    std::unique_ptr<SmoothingFilter> bitrate_smoother)
+    : payload_type_(payload_type),
+      send_side_bwe_with_overhead_(
+          webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")),
+      adjust_bandwidth_(
+          webrtc::field_trial::IsEnabled("WebRTC-AdjustOpusBandwidth")),
+      bitrate_changed_(true),
+      packet_loss_rate_(0.0),
+      inst_(nullptr),
+      packet_loss_fraction_smoother_(new PacketLossFractionSmoother()),
+      audio_network_adaptor_creator_(audio_network_adaptor_creator),
+      bitrate_smoother_(std::move(bitrate_smoother)),
+      consecutive_dtx_frames_(0) {
+  RTC_DCHECK(0 <= payload_type && payload_type <= 127);
+
+  // Sanity check of the redundant payload type field that we want to get rid
+  // of. See https://bugs.chromium.org/p/webrtc/issues/detail?id=7847
+  RTC_CHECK(config.payload_type == -1 || config.payload_type == payload_type);
+
+  RTC_CHECK(RecreateEncoderInstance(config));
+}
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(const CodecInst& codec_inst)
+    : AudioEncoderOpusImpl(CreateConfig(codec_inst), codec_inst.pltype) {}
+
+AudioEncoderOpusImpl::AudioEncoderOpusImpl(int payload_type,
+                                           const SdpAudioFormat& format)
+    : AudioEncoderOpusImpl(*SdpToConfig(format), payload_type) {}
+
+AudioEncoderOpusImpl::~AudioEncoderOpusImpl() {
+  RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+}
+
+int AudioEncoderOpusImpl::SampleRateHz() const {
+  return kSampleRateHz;
+}
+
+size_t AudioEncoderOpusImpl::NumChannels() const {
+  return config_.num_channels;
+}
+
+size_t AudioEncoderOpusImpl::Num10MsFramesInNextPacket() const {
+  return Num10msFramesPerPacket();
+}
+
+size_t AudioEncoderOpusImpl::Max10MsFramesInAPacket() const {
+  return Num10msFramesPerPacket();
+}
+
+int AudioEncoderOpusImpl::GetTargetBitrate() const {
+  return GetBitrateBps(config_);
+}
+
+void AudioEncoderOpusImpl::Reset() {
+  RTC_CHECK(RecreateEncoderInstance(config_));
+}
+
+bool AudioEncoderOpusImpl::SetFec(bool enable) {
+  if (enable) {
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+  } else {
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+  }
+  config_.fec_enabled = enable;
+  return true;
+}
+
+bool AudioEncoderOpusImpl::SetDtx(bool enable) {
+  if (enable) {
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+  } else {
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+  }
+  config_.dtx_enabled = enable;
+  return true;
+}
+
+bool AudioEncoderOpusImpl::GetDtx() const {
+  return config_.dtx_enabled;
+}
+
+bool AudioEncoderOpusImpl::SetApplication(Application application) {
+  auto conf = config_;
+  switch (application) {
+    case Application::kSpeech:
+      conf.application = AudioEncoderOpusConfig::ApplicationMode::kVoip;
+      break;
+    case Application::kAudio:
+      conf.application = AudioEncoderOpusConfig::ApplicationMode::kAudio;
+      break;
+  }
+  return RecreateEncoderInstance(conf);
+}
+
+void AudioEncoderOpusImpl::SetMaxPlaybackRate(int frequency_hz) {
+  auto conf = config_;
+  conf.max_playback_rate_hz = frequency_hz;
+  RTC_CHECK(RecreateEncoderInstance(conf));
+}
+
+bool AudioEncoderOpusImpl::EnableAudioNetworkAdaptor(
+    const std::string& config_string,
+    RtcEventLog* event_log) {
+  audio_network_adaptor_ =
+      audio_network_adaptor_creator_(config_string, event_log);
+  return audio_network_adaptor_.get() != nullptr;
+}
+
+void AudioEncoderOpusImpl::DisableAudioNetworkAdaptor() {
+  audio_network_adaptor_.reset(nullptr);
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkPacketLossFraction(
+    float uplink_packet_loss_fraction) {
+  if (!audio_network_adaptor_) {
+    packet_loss_fraction_smoother_->AddSample(uplink_packet_loss_fraction);
+    float average_fraction_loss = packet_loss_fraction_smoother_->GetAverage();
+    return SetProjectedPacketLossRate(average_fraction_loss);
+  }
+  audio_network_adaptor_->SetUplinkPacketLossFraction(
+      uplink_packet_loss_fraction);
+  ApplyAudioNetworkAdaptor();
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkRecoverablePacketLossFraction(
+    float uplink_recoverable_packet_loss_fraction) {
+  if (!audio_network_adaptor_)
+    return;
+  audio_network_adaptor_->SetUplinkRecoverablePacketLossFraction(
+      uplink_recoverable_packet_loss_fraction);
+  ApplyAudioNetworkAdaptor();
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
+    int target_audio_bitrate_bps,
+    rtc::Optional<int64_t> bwe_period_ms) {
+  if (audio_network_adaptor_) {
+    audio_network_adaptor_->SetTargetAudioBitrate(target_audio_bitrate_bps);
+    // We give smoothed bitrate allocation to audio network adaptor as
+    // the uplink bandwidth.
+    // The BWE spikes should not affect the bitrate smoother more than 25%.
+    // To simplify the calculations we use a step response as input signal.
+    // The step response of an exponential filter is
+    // u(t) = 1 - e^(-t / time_constant).
+    // In order to limit the affect of a BWE spike within 25% of its value
+    // before
+    // the next BWE update, we would choose a time constant that fulfills
+    // 1 - e^(-bwe_period_ms / time_constant) < 0.25
+    // Then 4 * bwe_period_ms is a good choice.
+    if (bwe_period_ms)
+      bitrate_smoother_->SetTimeConstantMs(*bwe_period_ms * 4);
+    bitrate_smoother_->AddSample(target_audio_bitrate_bps);
+
+    ApplyAudioNetworkAdaptor();
+  } else if (send_side_bwe_with_overhead_) {
+    if (!overhead_bytes_per_packet_) {
+      RTC_LOG(LS_INFO)
+          << "AudioEncoderOpusImpl: Overhead unknown, target audio bitrate "
+          << target_audio_bitrate_bps << " bps is ignored.";
+      return;
+    }
+    const int overhead_bps = static_cast<int>(
+        *overhead_bytes_per_packet_ * 8 * 100 / Num10MsFramesInNextPacket());
+    SetTargetBitrate(
+        std::min(AudioEncoderOpusConfig::kMaxBitrateBps,
+                 std::max(AudioEncoderOpusConfig::kMinBitrateBps,
+                          target_audio_bitrate_bps - overhead_bps)));
+  } else {
+    SetTargetBitrate(target_audio_bitrate_bps);
+  }
+}
+
+void AudioEncoderOpusImpl::OnReceivedRtt(int rtt_ms) {
+  if (!audio_network_adaptor_)
+    return;
+  audio_network_adaptor_->SetRtt(rtt_ms);
+  ApplyAudioNetworkAdaptor();
+}
+
+void AudioEncoderOpusImpl::OnReceivedOverhead(
+    size_t overhead_bytes_per_packet) {
+  if (audio_network_adaptor_) {
+    audio_network_adaptor_->SetOverhead(overhead_bytes_per_packet);
+    ApplyAudioNetworkAdaptor();
+  } else {
+    overhead_bytes_per_packet_ = overhead_bytes_per_packet;
+  }
+}
+
+void AudioEncoderOpusImpl::SetReceiverFrameLengthRange(
+    int min_frame_length_ms,
+    int max_frame_length_ms) {
+  // Ensure that |SetReceiverFrameLengthRange| is called before
+  // |EnableAudioNetworkAdaptor|, otherwise we need to recreate
+  // |audio_network_adaptor_|, which is not a needed use case.
+  RTC_DCHECK(!audio_network_adaptor_);
+  FindSupportedFrameLengths(min_frame_length_ms, max_frame_length_ms,
+                            &config_.supported_frame_lengths_ms);
+}
+
+AudioEncoder::EncodedInfo AudioEncoderOpusImpl::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+  MaybeUpdateUplinkBandwidth();
+
+  if (input_buffer_.empty())
+    first_timestamp_in_buffer_ = rtp_timestamp;
+
+  input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
+  if (input_buffer_.size() <
+      (Num10msFramesPerPacket() * SamplesPer10msFrame())) {
+    return EncodedInfo();
+  }
+  RTC_CHECK_EQ(input_buffer_.size(),
+               Num10msFramesPerPacket() * SamplesPer10msFrame());
+
+  const size_t max_encoded_bytes = SufficientOutputBufferSize();
+  EncodedInfo info;
+  info.encoded_bytes =
+      encoded->AppendData(
+          max_encoded_bytes, [&] (rtc::ArrayView<uint8_t> encoded) {
+            int status = WebRtcOpus_Encode(
+                inst_, &input_buffer_[0],
+                rtc::CheckedDivExact(input_buffer_.size(),
+                                     config_.num_channels),
+                rtc::saturated_cast<int16_t>(max_encoded_bytes),
+                encoded.data());
+
+            RTC_CHECK_GE(status, 0);  // Fails only if fed invalid data.
+
+            return static_cast<size_t>(status);
+          });
+  input_buffer_.clear();
+
+  bool dtx_frame = (info.encoded_bytes <= 2);
+
+  // Will use new packet size for next encoding.
+  config_.frame_size_ms = next_frame_length_ms_;
+
+  if (adjust_bandwidth_ && bitrate_changed_) {
+    const auto bandwidth = GetNewBandwidth(config_, inst_);
+    if (bandwidth) {
+      RTC_CHECK_EQ(0, WebRtcOpus_SetBandwidth(inst_, *bandwidth));
+    }
+    bitrate_changed_ = false;
+  }
+
+  info.encoded_timestamp = first_timestamp_in_buffer_;
+  info.payload_type = payload_type_;
+  info.send_even_if_empty = true;  // Allows Opus to send empty packets.
+  // After 20 DTX frames (MAX_CONSECUTIVE_DTX) Opus will send a frame
+  // coding the background noise. Avoid flagging this frame as speech
+  // (even though there is a probability of the frame being speech).
+  info.speech = !dtx_frame && (consecutive_dtx_frames_ != 20);
+  info.encoder_type = CodecType::kOpus;
+
+  // Increase or reset DTX counter.
+  consecutive_dtx_frames_ = (dtx_frame) ? (consecutive_dtx_frames_ + 1) : (0);
+
+  return info;
+}
+
+size_t AudioEncoderOpusImpl::Num10msFramesPerPacket() const {
+  return static_cast<size_t>(rtc::CheckedDivExact(config_.frame_size_ms, 10));
+}
+
+size_t AudioEncoderOpusImpl::SamplesPer10msFrame() const {
+  return rtc::CheckedDivExact(kSampleRateHz, 100) * config_.num_channels;
+}
+
+size_t AudioEncoderOpusImpl::SufficientOutputBufferSize() const {
+  // Calculate the number of bytes we expect the encoder to produce,
+  // then multiply by two to give a wide margin for error.
+  const size_t bytes_per_millisecond =
+      static_cast<size_t>(GetBitrateBps(config_) / (1000 * 8) + 1);
+  const size_t approx_encoded_bytes =
+      Num10msFramesPerPacket() * 10 * bytes_per_millisecond;
+  return 2 * approx_encoded_bytes;
+}
+
+// If the given config is OK, recreate the Opus encoder instance with those
+// settings, save the config, and return true. Otherwise, do nothing and return
+// false.
+bool AudioEncoderOpusImpl::RecreateEncoderInstance(
+    const AudioEncoderOpusConfig& config) {
+  if (!config.IsOk())
+    return false;
+  config_ = config;
+  if (inst_)
+    RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+  input_buffer_.clear();
+  input_buffer_.reserve(Num10msFramesPerPacket() * SamplesPer10msFrame());
+  RTC_CHECK_EQ(0, WebRtcOpus_EncoderCreate(
+                      &inst_, config.num_channels,
+                      config.application ==
+                              AudioEncoderOpusConfig::ApplicationMode::kVoip
+                          ? 0
+                          : 1));
+  RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, GetBitrateBps(config)));
+  if (config.fec_enabled) {
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+  } else {
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+  }
+  RTC_CHECK_EQ(
+      0, WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
+  // Use the default complexity if the start bitrate is within the hysteresis
+  // window.
+  complexity_ = GetNewComplexity(config).value_or(config.complexity);
+  RTC_CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, complexity_));
+  bitrate_changed_ = true;
+  if (config.dtx_enabled) {
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+  } else {
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+  }
+  RTC_CHECK_EQ(0,
+               WebRtcOpus_SetPacketLossRate(
+                   inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+  if (config.cbr_enabled) {
+    RTC_CHECK_EQ(0, WebRtcOpus_EnableCbr(inst_));
+  } else {
+    RTC_CHECK_EQ(0, WebRtcOpus_DisableCbr(inst_));
+  }
+  num_channels_to_encode_ = NumChannels();
+  next_frame_length_ms_ = config_.frame_size_ms;
+  return true;
+}
+
+void AudioEncoderOpusImpl::SetFrameLength(int frame_length_ms) {
+  next_frame_length_ms_ = frame_length_ms;
+}
+
+void AudioEncoderOpusImpl::SetNumChannelsToEncode(
+    size_t num_channels_to_encode) {
+  RTC_DCHECK_GT(num_channels_to_encode, 0);
+  RTC_DCHECK_LE(num_channels_to_encode, config_.num_channels);
+
+  if (num_channels_to_encode_ == num_channels_to_encode)
+    return;
+
+  RTC_CHECK_EQ(0, WebRtcOpus_SetForceChannels(inst_, num_channels_to_encode));
+  num_channels_to_encode_ = num_channels_to_encode;
+}
+
+void AudioEncoderOpusImpl::SetProjectedPacketLossRate(float fraction) {
+  float opt_loss_rate = OptimizePacketLossRate(fraction, packet_loss_rate_);
+  if (packet_loss_rate_ != opt_loss_rate) {
+    packet_loss_rate_ = opt_loss_rate;
+    RTC_CHECK_EQ(
+        0, WebRtcOpus_SetPacketLossRate(
+               inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+  }
+}
+
+void AudioEncoderOpusImpl::SetTargetBitrate(int bits_per_second) {
+  config_.bitrate_bps = rtc::SafeClamp<int>(
+      bits_per_second, AudioEncoderOpusConfig::kMinBitrateBps,
+      AudioEncoderOpusConfig::kMaxBitrateBps);
+  RTC_DCHECK(config_.IsOk());
+  RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, GetBitrateBps(config_)));
+  const auto new_complexity = GetNewComplexity(config_);
+  if (new_complexity && complexity_ != *new_complexity) {
+    complexity_ = *new_complexity;
+    RTC_CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, complexity_));
+  }
+  bitrate_changed_ = true;
+}
+
+void AudioEncoderOpusImpl::ApplyAudioNetworkAdaptor() {
+  auto config = audio_network_adaptor_->GetEncoderRuntimeConfig();
+
+  if (config.bitrate_bps)
+    SetTargetBitrate(*config.bitrate_bps);
+  if (config.frame_length_ms)
+    SetFrameLength(*config.frame_length_ms);
+  if (config.enable_fec)
+    SetFec(*config.enable_fec);
+  if (config.uplink_packet_loss_fraction)
+    SetProjectedPacketLossRate(*config.uplink_packet_loss_fraction);
+  if (config.enable_dtx)
+    SetDtx(*config.enable_dtx);
+  if (config.num_channels)
+    SetNumChannelsToEncode(*config.num_channels);
+}
+
+std::unique_ptr<AudioNetworkAdaptor>
+AudioEncoderOpusImpl::DefaultAudioNetworkAdaptorCreator(
+    const ProtoString& config_string,
+    RtcEventLog* event_log) const {
+  AudioNetworkAdaptorImpl::Config config;
+  config.event_log = event_log;
+  return std::unique_ptr<AudioNetworkAdaptor>(new AudioNetworkAdaptorImpl(
+      config, ControllerManagerImpl::Create(
+                  config_string, NumChannels(), supported_frame_lengths_ms(),
+                  AudioEncoderOpusConfig::kMinBitrateBps,
+                  num_channels_to_encode_, next_frame_length_ms_,
+                  GetTargetBitrate(), config_.fec_enabled, GetDtx())));
+}
+
+void AudioEncoderOpusImpl::MaybeUpdateUplinkBandwidth() {
+  if (audio_network_adaptor_) {
+    int64_t now_ms = rtc::TimeMillis();
+    if (!bitrate_smoother_last_update_time_ ||
+        now_ms - *bitrate_smoother_last_update_time_ >=
+            config_.uplink_bandwidth_update_interval_ms) {
+      rtc::Optional<float> smoothed_bitrate = bitrate_smoother_->GetAverage();
+      if (smoothed_bitrate)
+        audio_network_adaptor_->SetUplinkBandwidth(*smoothed_bitrate);
+      bitrate_smoother_last_update_time_ = now_ms;
+    }
+  }
+}
+
+ANAStats AudioEncoderOpusImpl::GetANAStats() const {
+  if (audio_network_adaptor_) {
+    return audio_network_adaptor_->GetStats();
+  }
+  return ANAStats();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
new file mode 100644
index 0000000..49c5207
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/audio_codecs/audio_format.h"
+#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
+#include "api/optional.h"
+#include "common_audio/smoothing_filter.h"
+#include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/protobuf_utils.h"
+
+namespace webrtc {
+
+class RtcEventLog;
+
+struct CodecInst;
+
+class AudioEncoderOpusImpl final : public AudioEncoder {
+ public:
+  static AudioEncoderOpusConfig CreateConfig(const CodecInst& codec_inst);
+
+  // Returns empty if the current bitrate falls within the hysteresis window,
+  // defined by complexity_threshold_bps +/- complexity_threshold_window_bps.
+  // Otherwise, returns the current complexity depending on whether the
+  // current bitrate is above or below complexity_threshold_bps.
+  static rtc::Optional<int> GetNewComplexity(
+      const AudioEncoderOpusConfig& config);
+
+  // Returns OPUS_AUTO if the the current bitrate is above wideband threshold.
+  // Returns empty if it is below, but bandwidth coincides with the desired one.
+  // Otherwise returns the desired bandwidth.
+  static rtc::Optional<int> GetNewBandwidth(
+      const AudioEncoderOpusConfig& config,
+      OpusEncInst* inst);
+
+  using AudioNetworkAdaptorCreator =
+      std::function<std::unique_ptr<AudioNetworkAdaptor>(const std::string&,
+                                                         RtcEventLog*)>;
+
+  AudioEncoderOpusImpl(const AudioEncoderOpusConfig& config, int payload_type);
+
+  // Dependency injection for testing.
+  AudioEncoderOpusImpl(
+      const AudioEncoderOpusConfig& config,
+      int payload_type,
+      const AudioNetworkAdaptorCreator& audio_network_adaptor_creator,
+      std::unique_ptr<SmoothingFilter> bitrate_smoother);
+
+  explicit AudioEncoderOpusImpl(const CodecInst& codec_inst);
+  AudioEncoderOpusImpl(int payload_type, const SdpAudioFormat& format);
+  ~AudioEncoderOpusImpl() override;
+
+  // Static interface for use by BuiltinAudioEncoderFactory.
+  static constexpr const char* GetPayloadName() { return "opus"; }
+  static rtc::Optional<AudioCodecInfo> QueryAudioEncoder(
+      const SdpAudioFormat& format);
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+
+  void Reset() override;
+  bool SetFec(bool enable) override;
+
+  // Set Opus DTX. Once enabled, Opus stops transmission, when it detects
+  // voice being inactive. During that, it still sends 2 packets (one for
+  // content, one for signaling) about every 400 ms.
+  bool SetDtx(bool enable) override;
+  bool GetDtx() const override;
+
+  bool SetApplication(Application application) override;
+  void SetMaxPlaybackRate(int frequency_hz) override;
+  bool EnableAudioNetworkAdaptor(const std::string& config_string,
+                                 RtcEventLog* event_log) override;
+  void DisableAudioNetworkAdaptor() override;
+  void OnReceivedUplinkPacketLossFraction(
+      float uplink_packet_loss_fraction) override;
+  void OnReceivedUplinkRecoverablePacketLossFraction(
+      float uplink_recoverable_packet_loss_fraction) override;
+  void OnReceivedUplinkBandwidth(
+      int target_audio_bitrate_bps,
+      rtc::Optional<int64_t> bwe_period_ms) override;
+  void OnReceivedRtt(int rtt_ms) override;
+  void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
+  void SetReceiverFrameLengthRange(int min_frame_length_ms,
+                                   int max_frame_length_ms) override;
+  ANAStats GetANAStats() const override;
+  rtc::ArrayView<const int> supported_frame_lengths_ms() const {
+    return config_.supported_frame_lengths_ms;
+  }
+
+  // Getters for testing.
+  float packet_loss_rate() const { return packet_loss_rate_; }
+  AudioEncoderOpusConfig::ApplicationMode application() const {
+    return config_.application;
+  }
+  bool fec_enabled() const { return config_.fec_enabled; }
+  size_t num_channels_to_encode() const { return num_channels_to_encode_; }
+  int next_frame_length_ms() const { return next_frame_length_ms_; }
+
+ protected:
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+
+ private:
+  class PacketLossFractionSmoother;
+
+  static rtc::Optional<AudioEncoderOpusConfig> SdpToConfig(
+      const SdpAudioFormat& format);
+  static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
+  static AudioCodecInfo QueryAudioEncoder(const AudioEncoderOpusConfig& config);
+  static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
+      const AudioEncoderOpusConfig&,
+      int payload_type);
+
+  size_t Num10msFramesPerPacket() const;
+  size_t SamplesPer10msFrame() const;
+  size_t SufficientOutputBufferSize() const;
+  bool RecreateEncoderInstance(const AudioEncoderOpusConfig& config);
+  void SetFrameLength(int frame_length_ms);
+  void SetNumChannelsToEncode(size_t num_channels_to_encode);
+  void SetProjectedPacketLossRate(float fraction);
+
+  // TODO(minyue): remove "override" when we can deprecate
+  // |AudioEncoder::SetTargetBitrate|.
+  void SetTargetBitrate(int target_bps) override;
+
+  void ApplyAudioNetworkAdaptor();
+  std::unique_ptr<AudioNetworkAdaptor> DefaultAudioNetworkAdaptorCreator(
+      const ProtoString& config_string,
+      RtcEventLog* event_log) const;
+
+  void MaybeUpdateUplinkBandwidth();
+
+  AudioEncoderOpusConfig config_;
+  const int payload_type_;
+  const bool send_side_bwe_with_overhead_;
+  const bool adjust_bandwidth_;
+  bool bitrate_changed_;
+  float packet_loss_rate_;
+  std::vector<int16_t> input_buffer_;
+  OpusEncInst* inst_;
+  uint32_t first_timestamp_in_buffer_;
+  size_t num_channels_to_encode_;
+  int next_frame_length_ms_;
+  int complexity_;
+  std::unique_ptr<PacketLossFractionSmoother> packet_loss_fraction_smoother_;
+  const AudioNetworkAdaptorCreator audio_network_adaptor_creator_;
+  std::unique_ptr<AudioNetworkAdaptor> audio_network_adaptor_;
+  rtc::Optional<size_t> overhead_bytes_per_packet_;
+  const std::unique_ptr<SmoothingFilter> bitrate_smoother_;
+  rtc::Optional<int64_t> bitrate_smoother_last_update_time_;
+  int consecutive_dtx_frames_;
+
+  friend struct AudioEncoderOpus;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderOpusImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
new file mode 100644
index 0000000..dfef682
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -0,0 +1,877 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <memory>
+#include <utility>
+
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "common_audio/mocks/mock_smoothing_filter.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/audio_network_adaptor/mock/mock_audio_network_adaptor.h"
+#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fakeclock.h"
+#include "test/field_trial.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+using ::testing::NiceMock;
+using ::testing::Return;
+
+namespace {
+
+const CodecInst kDefaultOpusSettings = {105, "opus", 48000, 960, 1, 32000};
+constexpr int64_t kInitialTimeUs = 12345678;
+
+AudioEncoderOpusConfig CreateConfig(const CodecInst& codec_inst) {
+  AudioEncoderOpusConfig config;
+  config.frame_size_ms = rtc::CheckedDivExact(codec_inst.pacsize, 48);
+  config.num_channels = codec_inst.channels;
+  config.bitrate_bps = codec_inst.rate;
+  config.application = config.num_channels == 1
+                           ? AudioEncoderOpusConfig::ApplicationMode::kVoip
+                           : AudioEncoderOpusConfig::ApplicationMode::kAudio;
+  config.supported_frame_lengths_ms.push_back(config.frame_size_ms);
+  return config;
+}
+
+AudioEncoderOpusConfig CreateConfigWithParameters(
+    const SdpAudioFormat::Parameters& params) {
+  const SdpAudioFormat format("opus", 48000, 2, params);
+  return *AudioEncoderOpus::SdpToConfig(format);
+}
+
+struct AudioEncoderOpusStates {
+  std::shared_ptr<MockAudioNetworkAdaptor*> mock_audio_network_adaptor;
+  MockSmoothingFilter* mock_bitrate_smoother;
+  std::unique_ptr<AudioEncoderOpusImpl> encoder;
+  std::unique_ptr<rtc::ScopedFakeClock> fake_clock;
+  AudioEncoderOpusConfig config;
+};
+
+AudioEncoderOpusStates CreateCodec(size_t num_channels) {
+  AudioEncoderOpusStates states;
+  states.mock_audio_network_adaptor =
+      std::make_shared<MockAudioNetworkAdaptor*>(nullptr);
+  states.fake_clock.reset(new rtc::ScopedFakeClock());
+  states.fake_clock->SetTimeMicros(kInitialTimeUs);
+  std::weak_ptr<MockAudioNetworkAdaptor*> mock_ptr(
+      states.mock_audio_network_adaptor);
+  AudioEncoderOpusImpl::AudioNetworkAdaptorCreator creator =
+      [mock_ptr](const std::string&, RtcEventLog* event_log) {
+        std::unique_ptr<MockAudioNetworkAdaptor> adaptor(
+            new NiceMock<MockAudioNetworkAdaptor>());
+        EXPECT_CALL(*adaptor, Die());
+        if (auto sp = mock_ptr.lock()) {
+          *sp = adaptor.get();
+        } else {
+          RTC_NOTREACHED();
+        }
+        return adaptor;
+      };
+
+  CodecInst codec_inst = kDefaultOpusSettings;
+  codec_inst.channels = num_channels;
+  states.config = CreateConfig(codec_inst);
+  std::unique_ptr<MockSmoothingFilter> bitrate_smoother(
+      new MockSmoothingFilter());
+  states.mock_bitrate_smoother = bitrate_smoother.get();
+
+  states.encoder.reset(new AudioEncoderOpusImpl(
+      states.config, codec_inst.pltype, std::move(creator),
+      std::move(bitrate_smoother)));
+  return states;
+}
+
+AudioEncoderRuntimeConfig CreateEncoderRuntimeConfig() {
+  constexpr int kBitrate = 40000;
+  constexpr int kFrameLength = 60;
+  constexpr bool kEnableFec = true;
+  constexpr bool kEnableDtx = false;
+  constexpr size_t kNumChannels = 1;
+  constexpr float kPacketLossFraction = 0.1f;
+  AudioEncoderRuntimeConfig config;
+  config.bitrate_bps = kBitrate;
+  config.frame_length_ms = kFrameLength;
+  config.enable_fec = kEnableFec;
+  config.enable_dtx = kEnableDtx;
+  config.num_channels = kNumChannels;
+  config.uplink_packet_loss_fraction = kPacketLossFraction;
+  return config;
+}
+
+void CheckEncoderRuntimeConfig(const AudioEncoderOpusImpl* encoder,
+                               const AudioEncoderRuntimeConfig& config) {
+  EXPECT_EQ(*config.bitrate_bps, encoder->GetTargetBitrate());
+  EXPECT_EQ(*config.frame_length_ms, encoder->next_frame_length_ms());
+  EXPECT_EQ(*config.enable_fec, encoder->fec_enabled());
+  EXPECT_EQ(*config.enable_dtx, encoder->GetDtx());
+  EXPECT_EQ(*config.num_channels, encoder->num_channels_to_encode());
+}
+
+// Create 10ms audio data blocks for a total packet size of "packet_size_ms".
+std::unique_ptr<test::AudioLoop> Create10msAudioBlocks(
+    const std::unique_ptr<AudioEncoderOpusImpl>& encoder,
+    int packet_size_ms) {
+  const std::string file_name =
+      test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+
+  std::unique_ptr<test::AudioLoop> speech_data(new test::AudioLoop());
+  int audio_samples_per_ms =
+      rtc::CheckedDivExact(encoder->SampleRateHz(), 1000);
+  if (!speech_data->Init(
+          file_name,
+          packet_size_ms * audio_samples_per_ms *
+              encoder->num_channels_to_encode(),
+          10 * audio_samples_per_ms * encoder->num_channels_to_encode()))
+    return nullptr;
+  return speech_data;
+}
+
+}  // namespace
+
+TEST(AudioEncoderOpusTest, DefaultApplicationModeMono) {
+  auto states = CreateCodec(1);
+  EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+            states.encoder->application());
+}
+
+TEST(AudioEncoderOpusTest, DefaultApplicationModeStereo) {
+  auto states = CreateCodec(2);
+  EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kAudio,
+            states.encoder->application());
+}
+
+TEST(AudioEncoderOpusTest, ChangeApplicationMode) {
+  auto states = CreateCodec(2);
+  EXPECT_TRUE(
+      states.encoder->SetApplication(AudioEncoder::Application::kSpeech));
+  EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+            states.encoder->application());
+}
+
+TEST(AudioEncoderOpusTest, ResetWontChangeApplicationMode) {
+  auto states = CreateCodec(2);
+
+  // Trigger a reset.
+  states.encoder->Reset();
+  // Verify that the mode is still kAudio.
+  EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kAudio,
+            states.encoder->application());
+
+  // Now change to kVoip.
+  EXPECT_TRUE(
+      states.encoder->SetApplication(AudioEncoder::Application::kSpeech));
+  EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+            states.encoder->application());
+
+  // Trigger a reset again.
+  states.encoder->Reset();
+  // Verify that the mode is still kVoip.
+  EXPECT_EQ(AudioEncoderOpusConfig::ApplicationMode::kVoip,
+            states.encoder->application());
+}
+
+TEST(AudioEncoderOpusTest, ToggleDtx) {
+  auto states = CreateCodec(2);
+  // Enable DTX
+  EXPECT_TRUE(states.encoder->SetDtx(true));
+  EXPECT_TRUE(states.encoder->GetDtx());
+  // Turn off DTX.
+  EXPECT_TRUE(states.encoder->SetDtx(false));
+  EXPECT_FALSE(states.encoder->GetDtx());
+}
+
+TEST(AudioEncoderOpusTest,
+     OnReceivedUplinkBandwidthWithoutAudioNetworkAdaptor) {
+  auto states = CreateCodec(1);
+  // Constants are replicated from audio_states.encoderopus.cc.
+  const int kMinBitrateBps = 6000;
+  const int kMaxBitrateBps = 510000;
+  // Set a too low bitrate.
+  states.encoder->OnReceivedUplinkBandwidth(kMinBitrateBps - 1, rtc::nullopt);
+  EXPECT_EQ(kMinBitrateBps, states.encoder->GetTargetBitrate());
+  // Set a too high bitrate.
+  states.encoder->OnReceivedUplinkBandwidth(kMaxBitrateBps + 1, rtc::nullopt);
+  EXPECT_EQ(kMaxBitrateBps, states.encoder->GetTargetBitrate());
+  // Set the minimum rate.
+  states.encoder->OnReceivedUplinkBandwidth(kMinBitrateBps, rtc::nullopt);
+  EXPECT_EQ(kMinBitrateBps, states.encoder->GetTargetBitrate());
+  // Set the maximum rate.
+  states.encoder->OnReceivedUplinkBandwidth(kMaxBitrateBps, rtc::nullopt);
+  EXPECT_EQ(kMaxBitrateBps, states.encoder->GetTargetBitrate());
+  // Set rates from kMaxBitrateBps up to 32000 bps.
+  for (int rate = kMinBitrateBps; rate <= 32000; rate += 1000) {
+    states.encoder->OnReceivedUplinkBandwidth(rate, rtc::nullopt);
+    EXPECT_EQ(rate, states.encoder->GetTargetBitrate());
+  }
+}
+
+namespace {
+
+// Returns a vector with the n evenly-spaced numbers a, a + (b - a)/(n - 1),
+// ..., b.
+std::vector<float> IntervalSteps(float a, float b, size_t n) {
+  RTC_DCHECK_GT(n, 1u);
+  const float step = (b - a) / (n - 1);
+  std::vector<float> points;
+  points.push_back(a);
+  for (size_t i = 1; i < n - 1; ++i)
+    points.push_back(a + i * step);
+  points.push_back(b);
+  return points;
+}
+
+// Sets the packet loss rate to each number in the vector in turn, and verifies
+// that the loss rate as reported by the encoder is |expected_return| for all
+// of them.
+void TestSetPacketLossRate(AudioEncoderOpusStates* states,
+                           const std::vector<float>& losses,
+                           float expected_return) {
+  // |kSampleIntervalMs| is chosen to ease the calculation since
+  // 0.9999 ^ 184198 = 1e-8. Which minimizes the effect of
+  // PacketLossFractionSmoother used in AudioEncoderOpus.
+  constexpr int64_t kSampleIntervalMs = 184198;
+  for (float loss : losses) {
+    states->encoder->OnReceivedUplinkPacketLossFraction(loss);
+    states->fake_clock->AdvanceTime(
+        rtc::TimeDelta::FromMilliseconds(kSampleIntervalMs));
+    EXPECT_FLOAT_EQ(expected_return, states->encoder->packet_loss_rate());
+  }
+}
+
+}  // namespace
+
+TEST(AudioEncoderOpusTest, PacketLossRateOptimized) {
+  auto states = CreateCodec(1);
+  auto I = [](float a, float b) { return IntervalSteps(a, b, 10); };
+  constexpr float eps = 1e-8f;
+
+  // Note that the order of the following calls is critical.
+
+  // clang-format off
+  TestSetPacketLossRate(&states, I(0.00f      , 0.01f - eps), 0.00f);
+  TestSetPacketLossRate(&states, I(0.01f + eps, 0.06f - eps), 0.01f);
+  TestSetPacketLossRate(&states, I(0.06f + eps, 0.11f - eps), 0.05f);
+  TestSetPacketLossRate(&states, I(0.11f + eps, 0.22f - eps), 0.10f);
+  TestSetPacketLossRate(&states, I(0.22f + eps, 1.00f      ), 0.20f);
+
+  TestSetPacketLossRate(&states, I(1.00f      , 0.18f + eps), 0.20f);
+  TestSetPacketLossRate(&states, I(0.18f - eps, 0.09f + eps), 0.10f);
+  TestSetPacketLossRate(&states, I(0.09f - eps, 0.04f + eps), 0.05f);
+  TestSetPacketLossRate(&states, I(0.04f - eps, 0.01f + eps), 0.01f);
+  TestSetPacketLossRate(&states, I(0.01f - eps, 0.00f      ), 0.00f);
+  // clang-format on
+}
+
+TEST(AudioEncoderOpusTest, SetReceiverFrameLengthRange) {
+  auto states = CreateCodec(2);
+  // Before calling to |SetReceiverFrameLengthRange|,
+  // |supported_frame_lengths_ms| should contain only the frame length being
+  // used.
+  using ::testing::ElementsAre;
+  EXPECT_THAT(states.encoder->supported_frame_lengths_ms(),
+              ElementsAre(states.encoder->next_frame_length_ms()));
+  states.encoder->SetReceiverFrameLengthRange(0, 12345);
+  states.encoder->SetReceiverFrameLengthRange(21, 60);
+  EXPECT_THAT(states.encoder->supported_frame_lengths_ms(), ElementsAre(60));
+  states.encoder->SetReceiverFrameLengthRange(20, 59);
+  EXPECT_THAT(states.encoder->supported_frame_lengths_ms(), ElementsAre(20));
+}
+
+TEST(AudioEncoderOpusTest,
+     InvokeAudioNetworkAdaptorOnReceivedUplinkPacketLossFraction) {
+  auto states = CreateCodec(2);
+  states.encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+  auto config = CreateEncoderRuntimeConfig();
+  EXPECT_CALL(**states.mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+      .WillOnce(Return(config));
+
+  // Since using mock audio network adaptor, any packet loss fraction is fine.
+  constexpr float kUplinkPacketLoss = 0.1f;
+  EXPECT_CALL(**states.mock_audio_network_adaptor,
+              SetUplinkPacketLossFraction(kUplinkPacketLoss));
+  states.encoder->OnReceivedUplinkPacketLossFraction(kUplinkPacketLoss);
+
+  CheckEncoderRuntimeConfig(states.encoder.get(), config);
+}
+
+TEST(AudioEncoderOpusTest, InvokeAudioNetworkAdaptorOnReceivedUplinkBandwidth) {
+  auto states = CreateCodec(2);
+  states.encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+  auto config = CreateEncoderRuntimeConfig();
+  EXPECT_CALL(**states.mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+      .WillOnce(Return(config));
+
+  // Since using mock audio network adaptor, any target audio bitrate is fine.
+  constexpr int kTargetAudioBitrate = 30000;
+  constexpr int64_t kProbingIntervalMs = 3000;
+  EXPECT_CALL(**states.mock_audio_network_adaptor,
+              SetTargetAudioBitrate(kTargetAudioBitrate));
+  EXPECT_CALL(*states.mock_bitrate_smoother,
+              SetTimeConstantMs(kProbingIntervalMs * 4));
+  EXPECT_CALL(*states.mock_bitrate_smoother, AddSample(kTargetAudioBitrate));
+  states.encoder->OnReceivedUplinkBandwidth(kTargetAudioBitrate,
+                                            kProbingIntervalMs);
+
+  CheckEncoderRuntimeConfig(states.encoder.get(), config);
+}
+
+TEST(AudioEncoderOpusTest, InvokeAudioNetworkAdaptorOnReceivedRtt) {
+  auto states = CreateCodec(2);
+  states.encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+  auto config = CreateEncoderRuntimeConfig();
+  EXPECT_CALL(**states.mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+      .WillOnce(Return(config));
+
+  // Since using mock audio network adaptor, any rtt is fine.
+  constexpr int kRtt = 30;
+  EXPECT_CALL(**states.mock_audio_network_adaptor, SetRtt(kRtt));
+  states.encoder->OnReceivedRtt(kRtt);
+
+  CheckEncoderRuntimeConfig(states.encoder.get(), config);
+}
+
+TEST(AudioEncoderOpusTest, InvokeAudioNetworkAdaptorOnReceivedOverhead) {
+  auto states = CreateCodec(2);
+  states.encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+  auto config = CreateEncoderRuntimeConfig();
+  EXPECT_CALL(**states.mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+      .WillOnce(Return(config));
+
+  // Since using mock audio network adaptor, any overhead is fine.
+  constexpr size_t kOverhead = 64;
+  EXPECT_CALL(**states.mock_audio_network_adaptor, SetOverhead(kOverhead));
+  states.encoder->OnReceivedOverhead(kOverhead);
+
+  CheckEncoderRuntimeConfig(states.encoder.get(), config);
+}
+
+TEST(AudioEncoderOpusTest,
+     PacketLossFractionSmoothedOnSetUplinkPacketLossFraction) {
+  auto states = CreateCodec(2);
+
+  // The values are carefully chosen so that if no smoothing is made, the test
+  // will fail.
+  constexpr float kPacketLossFraction_1 = 0.02f;
+  constexpr float kPacketLossFraction_2 = 0.198f;
+  // |kSecondSampleTimeMs| is chosen to ease the calculation since
+  // 0.9999 ^ 6931 = 0.5.
+  constexpr int64_t kSecondSampleTimeMs = 6931;
+
+  // First time, no filtering.
+  states.encoder->OnReceivedUplinkPacketLossFraction(kPacketLossFraction_1);
+  EXPECT_FLOAT_EQ(0.01f, states.encoder->packet_loss_rate());
+
+  states.fake_clock->AdvanceTime(
+      rtc::TimeDelta::FromMilliseconds(kSecondSampleTimeMs));
+  states.encoder->OnReceivedUplinkPacketLossFraction(kPacketLossFraction_2);
+
+  // Now the output of packet loss fraction smoother should be
+  // (0.02 + 0.198) / 2 = 0.109, which reach the threshold for the optimized
+  // packet loss rate to increase to 0.05. If no smoothing has been made, the
+  // optimized packet loss rate should have been increase to 0.1.
+  EXPECT_FLOAT_EQ(0.05f, states.encoder->packet_loss_rate());
+}
+
+TEST(AudioEncoderOpusTest, DoNotInvokeSetTargetBitrateIfOverheadUnknown) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+
+  auto states = CreateCodec(2);
+
+  states.encoder->OnReceivedUplinkBandwidth(kDefaultOpusSettings.rate * 2,
+                                            rtc::nullopt);
+
+  // Since |OnReceivedOverhead| has not been called, the codec bitrate should
+  // not change.
+  EXPECT_EQ(kDefaultOpusSettings.rate, states.encoder->GetTargetBitrate());
+}
+
+TEST(AudioEncoderOpusTest, OverheadRemovedFromTargetAudioBitrate) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+
+  auto states = CreateCodec(2);
+
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  states.encoder->OnReceivedOverhead(kOverheadBytesPerPacket);
+
+  constexpr int kTargetBitrateBps = 40000;
+  states.encoder->OnReceivedUplinkBandwidth(kTargetBitrateBps, rtc::nullopt);
+
+  int packet_rate = rtc::CheckedDivExact(48000, kDefaultOpusSettings.pacsize);
+  EXPECT_EQ(kTargetBitrateBps -
+                8 * static_cast<int>(kOverheadBytesPerPacket) * packet_rate,
+            states.encoder->GetTargetBitrate());
+}
+
+TEST(AudioEncoderOpusTest, BitrateBounded) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-SendSideBwe-WithOverhead/Enabled/");
+
+  constexpr int kMinBitrateBps = 6000;
+  constexpr int kMaxBitrateBps = 510000;
+
+  auto states = CreateCodec(2);
+
+  constexpr size_t kOverheadBytesPerPacket = 64;
+  states.encoder->OnReceivedOverhead(kOverheadBytesPerPacket);
+
+  int packet_rate = rtc::CheckedDivExact(48000, kDefaultOpusSettings.pacsize);
+
+  // Set a target rate that is smaller than |kMinBitrateBps| when overhead is
+  // subtracted. The eventual codec rate should be bounded by |kMinBitrateBps|.
+  int target_bitrate =
+      kOverheadBytesPerPacket * 8 * packet_rate + kMinBitrateBps - 1;
+  states.encoder->OnReceivedUplinkBandwidth(target_bitrate, rtc::nullopt);
+  EXPECT_EQ(kMinBitrateBps, states.encoder->GetTargetBitrate());
+
+  // Set a target rate that is greater than |kMaxBitrateBps| when overhead is
+  // subtracted. The eventual codec rate should be bounded by |kMaxBitrateBps|.
+  target_bitrate =
+      kOverheadBytesPerPacket * 8 * packet_rate + kMaxBitrateBps + 1;
+  states.encoder->OnReceivedUplinkBandwidth(target_bitrate, rtc::nullopt);
+  EXPECT_EQ(kMaxBitrateBps, states.encoder->GetTargetBitrate());
+}
+
+// Verifies that the complexity adaptation in the config works as intended.
+TEST(AudioEncoderOpusTest, ConfigComplexityAdaptation) {
+  AudioEncoderOpusConfig config;
+  config.low_rate_complexity = 8;
+  config.complexity = 6;
+
+  // Bitrate within hysteresis window. Expect empty output.
+  config.bitrate_bps = 12500;
+  EXPECT_EQ(rtc::nullopt, AudioEncoderOpusImpl::GetNewComplexity(config));
+
+  // Bitrate below hysteresis window. Expect higher complexity.
+  config.bitrate_bps = 10999;
+  EXPECT_EQ(8, AudioEncoderOpusImpl::GetNewComplexity(config));
+
+  // Bitrate within hysteresis window. Expect empty output.
+  config.bitrate_bps = 12500;
+  EXPECT_EQ(rtc::nullopt, AudioEncoderOpusImpl::GetNewComplexity(config));
+
+  // Bitrate above hysteresis window. Expect lower complexity.
+  config.bitrate_bps = 14001;
+  EXPECT_EQ(6, AudioEncoderOpusImpl::GetNewComplexity(config));
+}
+
+// Verifies that the bandwidth adaptation in the config works as intended.
+TEST(AudioEncoderOpusTest, ConfigBandwidthAdaptation) {
+  AudioEncoderOpusConfig config;
+  // Sample rate of Opus.
+  constexpr size_t kOpusRateKhz = 48;
+  std::vector<int16_t> silence(
+      kOpusRateKhz * config.frame_size_ms * config.num_channels, 0);
+  constexpr size_t kMaxBytes = 1000;
+  uint8_t bitstream[kMaxBytes];
+
+  OpusEncInst* inst;
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(
+                   &inst, config.num_channels,
+                   config.application ==
+                           AudioEncoderOpusConfig::ApplicationMode::kVoip
+                       ? 0
+                       : 1));
+
+  // Bitrate below minmum wideband. Expect narrowband.
+  config.bitrate_bps = rtc::Optional<int>(7999);
+  auto bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+  EXPECT_EQ(rtc::Optional<int>(OPUS_BANDWIDTH_NARROWBAND), bandwidth);
+  WebRtcOpus_SetBandwidth(inst, *bandwidth);
+  // It is necessary to encode here because Opus has some logic in the encoder
+  // that goes from the user-set bandwidth to the used and returned one.
+  WebRtcOpus_Encode(inst, silence.data(),
+                    rtc::CheckedDivExact(silence.size(), config.num_channels),
+                    kMaxBytes, bitstream);
+
+  // Bitrate not yet above maximum narrowband. Expect empty.
+  config.bitrate_bps = rtc::Optional<int>(9000);
+  bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+  EXPECT_EQ(rtc::Optional<int>(), bandwidth);
+
+  // Bitrate above maximum narrowband. Expect wideband.
+  config.bitrate_bps = rtc::Optional<int>(9001);
+  bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+  EXPECT_EQ(rtc::Optional<int>(OPUS_BANDWIDTH_WIDEBAND), bandwidth);
+  WebRtcOpus_SetBandwidth(inst, *bandwidth);
+  // It is necessary to encode here because Opus has some logic in the encoder
+  // that goes from the user-set bandwidth to the used and returned one.
+  WebRtcOpus_Encode(inst, silence.data(),
+                    rtc::CheckedDivExact(silence.size(), config.num_channels),
+                    kMaxBytes, bitstream);
+
+  // Bitrate not yet below minimum wideband. Expect empty.
+  config.bitrate_bps = rtc::Optional<int>(8000);
+  bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+  EXPECT_EQ(rtc::Optional<int>(), bandwidth);
+
+  // Bitrate above automatic threshold. Expect automatic.
+  config.bitrate_bps = rtc::Optional<int>(12001);
+  bandwidth = AudioEncoderOpusImpl::GetNewBandwidth(config, inst);
+  EXPECT_EQ(rtc::Optional<int>(OPUS_AUTO), bandwidth);
+
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(inst));
+}
+
+TEST(AudioEncoderOpusTest, EmptyConfigDoesNotAffectEncoderSettings) {
+  auto states = CreateCodec(2);
+  states.encoder->EnableAudioNetworkAdaptor("", nullptr);
+
+  auto config = CreateEncoderRuntimeConfig();
+  AudioEncoderRuntimeConfig empty_config;
+
+  EXPECT_CALL(**states.mock_audio_network_adaptor, GetEncoderRuntimeConfig())
+      .WillOnce(Return(config))
+      .WillOnce(Return(empty_config));
+
+  constexpr size_t kOverhead = 64;
+  EXPECT_CALL(**states.mock_audio_network_adaptor, SetOverhead(kOverhead))
+      .Times(2);
+  states.encoder->OnReceivedOverhead(kOverhead);
+  states.encoder->OnReceivedOverhead(kOverhead);
+
+  CheckEncoderRuntimeConfig(states.encoder.get(), config);
+}
+
+TEST(AudioEncoderOpusTest, UpdateUplinkBandwidthInAudioNetworkAdaptor) {
+  auto states = CreateCodec(2);
+  states.encoder->EnableAudioNetworkAdaptor("", nullptr);
+  std::array<int16_t, 480 * 2> audio;
+  audio.fill(0);
+  rtc::Buffer encoded;
+  EXPECT_CALL(*states.mock_bitrate_smoother, GetAverage())
+      .WillOnce(Return(50000));
+  EXPECT_CALL(**states.mock_audio_network_adaptor, SetUplinkBandwidth(50000));
+  states.encoder->Encode(
+      0, rtc::ArrayView<const int16_t>(audio.data(), audio.size()), &encoded);
+
+  // Repeat update uplink bandwidth tests.
+  for (int i = 0; i < 5; i++) {
+    // Don't update till it is time to update again.
+    states.fake_clock->AdvanceTime(rtc::TimeDelta::FromMilliseconds(
+        states.config.uplink_bandwidth_update_interval_ms - 1));
+    states.encoder->Encode(
+        0, rtc::ArrayView<const int16_t>(audio.data(), audio.size()), &encoded);
+
+    // Update when it is time to update.
+    EXPECT_CALL(*states.mock_bitrate_smoother, GetAverage())
+        .WillOnce(Return(40000));
+    EXPECT_CALL(**states.mock_audio_network_adaptor, SetUplinkBandwidth(40000));
+    states.fake_clock->AdvanceTime(rtc::TimeDelta::FromMilliseconds(1));
+    states.encoder->Encode(
+        0, rtc::ArrayView<const int16_t>(audio.data(), audio.size()), &encoded);
+  }
+}
+
+TEST(AudioEncoderOpusTest, EncodeAtMinBitrate) {
+  auto states = CreateCodec(1);
+  constexpr int kNumPacketsToEncode = 2;
+  auto audio_frames =
+      Create10msAudioBlocks(states.encoder, kNumPacketsToEncode * 20);
+  ASSERT_TRUE(audio_frames) << "Create10msAudioBlocks failed";
+  rtc::Buffer encoded;
+  uint32_t rtp_timestamp = 12345;  // Just a number not important to this test.
+
+  states.encoder->OnReceivedUplinkBandwidth(0, rtc::nullopt);
+  for (int packet_index = 0; packet_index < kNumPacketsToEncode;
+       packet_index++) {
+    // Make sure we are not encoding before we have enough data for
+    // a 20ms packet.
+    for (int index = 0; index < 1; index++) {
+      states.encoder->Encode(rtp_timestamp, audio_frames->GetNextBlock(),
+                             &encoded);
+      EXPECT_EQ(0u, encoded.size());
+    }
+
+    // Should encode now.
+    states.encoder->Encode(rtp_timestamp, audio_frames->GetNextBlock(),
+                           &encoded);
+    EXPECT_GT(encoded.size(), 0u);
+    encoded.Clear();
+  }
+}
+
+TEST(AudioEncoderOpusTest, TestConfigDefaults) {
+  const auto config_opt = AudioEncoderOpus::SdpToConfig({"opus", 48000, 2});
+  ASSERT_TRUE(config_opt);
+  EXPECT_EQ(48000, config_opt->max_playback_rate_hz);
+  EXPECT_EQ(1u, config_opt->num_channels);
+  EXPECT_FALSE(config_opt->fec_enabled);
+  EXPECT_FALSE(config_opt->dtx_enabled);
+  EXPECT_EQ(20, config_opt->frame_size_ms);
+}
+
+TEST(AudioEncoderOpusTest, TestConfigFromParams) {
+  const auto config1 = CreateConfigWithParameters({{"stereo", "0"}});
+  EXPECT_EQ(1U, config1.num_channels);
+
+  const auto config2 = CreateConfigWithParameters({{"stereo", "1"}});
+  EXPECT_EQ(2U, config2.num_channels);
+
+  const auto config3 = CreateConfigWithParameters({{"useinbandfec", "0"}});
+  EXPECT_FALSE(config3.fec_enabled);
+
+  const auto config4 = CreateConfigWithParameters({{"useinbandfec", "1"}});
+  EXPECT_TRUE(config4.fec_enabled);
+
+  const auto config5 = CreateConfigWithParameters({{"usedtx", "0"}});
+  EXPECT_FALSE(config5.dtx_enabled);
+
+  const auto config6 = CreateConfigWithParameters({{"usedtx", "1"}});
+  EXPECT_TRUE(config6.dtx_enabled);
+
+  const auto config7 = CreateConfigWithParameters({{"cbr", "0"}});
+  EXPECT_FALSE(config7.cbr_enabled);
+
+  const auto config8 = CreateConfigWithParameters({{"cbr", "1"}});
+  EXPECT_TRUE(config8.cbr_enabled);
+
+  const auto config9 =
+      CreateConfigWithParameters({{"maxplaybackrate", "12345"}});
+  EXPECT_EQ(12345, config9.max_playback_rate_hz);
+
+  const auto config10 =
+      CreateConfigWithParameters({{"maxaveragebitrate", "96000"}});
+  EXPECT_EQ(96000, config10.bitrate_bps);
+
+  const auto config11 = CreateConfigWithParameters({{"maxptime", "40"}});
+  for (int frame_length : config11.supported_frame_lengths_ms) {
+    EXPECT_LE(frame_length, 40);
+  }
+
+  const auto config12 = CreateConfigWithParameters({{"minptime", "40"}});
+  for (int frame_length : config12.supported_frame_lengths_ms) {
+    EXPECT_GE(frame_length, 40);
+  }
+
+  const auto config13 = CreateConfigWithParameters({{"ptime", "40"}});
+  EXPECT_EQ(40, config13.frame_size_ms);
+
+  constexpr int kMinSupportedFrameLength = 10;
+  constexpr int kMaxSupportedFrameLength =
+      WEBRTC_OPUS_SUPPORT_120MS_PTIME ? 120 : 60;
+
+  const auto config14 = CreateConfigWithParameters({{"ptime", "1"}});
+  EXPECT_EQ(kMinSupportedFrameLength, config14.frame_size_ms);
+
+  const auto config15 = CreateConfigWithParameters({{"ptime", "2000"}});
+  EXPECT_EQ(kMaxSupportedFrameLength, config15.frame_size_ms);
+}
+
+TEST(AudioEncoderOpusTest, TestConfigFromInvalidParams) {
+  const webrtc::SdpAudioFormat format("opus", 48000, 2);
+  const auto default_config = *AudioEncoderOpus::SdpToConfig(format);
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+  const std::vector<int> default_supported_frame_lengths_ms({20, 60, 120});
+#else
+  const std::vector<int> default_supported_frame_lengths_ms({20, 60});
+#endif
+
+  AudioEncoderOpusConfig config;
+  config = CreateConfigWithParameters({{"stereo", "invalid"}});
+  EXPECT_EQ(default_config.num_channels, config.num_channels);
+
+  config = CreateConfigWithParameters({{"useinbandfec", "invalid"}});
+  EXPECT_EQ(default_config.fec_enabled, config.fec_enabled);
+
+  config = CreateConfigWithParameters({{"usedtx", "invalid"}});
+  EXPECT_EQ(default_config.dtx_enabled, config.dtx_enabled);
+
+  config = CreateConfigWithParameters({{"cbr", "invalid"}});
+  EXPECT_EQ(default_config.dtx_enabled, config.dtx_enabled);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "0"}});
+  EXPECT_EQ(default_config.max_playback_rate_hz, config.max_playback_rate_hz);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "-23"}});
+  EXPECT_EQ(default_config.max_playback_rate_hz, config.max_playback_rate_hz);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "not a number!"}});
+  EXPECT_EQ(default_config.max_playback_rate_hz, config.max_playback_rate_hz);
+
+  config = CreateConfigWithParameters({{"maxaveragebitrate", "0"}});
+  EXPECT_EQ(6000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxaveragebitrate", "-1000"}});
+  EXPECT_EQ(6000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxaveragebitrate", "1024000"}});
+  EXPECT_EQ(510000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxaveragebitrate", "not a number!"}});
+  EXPECT_EQ(default_config.bitrate_bps, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxptime", "invalid"}});
+  EXPECT_EQ(default_supported_frame_lengths_ms,
+            config.supported_frame_lengths_ms);
+
+  config = CreateConfigWithParameters({{"minptime", "invalid"}});
+  EXPECT_EQ(default_supported_frame_lengths_ms,
+            config.supported_frame_lengths_ms);
+
+  config = CreateConfigWithParameters({{"ptime", "invalid"}});
+  EXPECT_EQ(default_supported_frame_lengths_ms,
+            config.supported_frame_lengths_ms);
+}
+
+// Test that bitrate will be overridden by the "maxaveragebitrate" parameter.
+// Also test that the "maxaveragebitrate" can't be set to values outside the
+// range of 6000 and 510000
+TEST(AudioEncoderOpusTest, SetSendCodecOpusMaxAverageBitrate) {
+  // Ignore if less than 6000.
+  const auto config1 = AudioEncoderOpus::SdpToConfig(
+      {"opus", 48000, 2, {{"maxaveragebitrate", "5999"}}});
+  EXPECT_EQ(6000, config1->bitrate_bps);
+
+  // Ignore if larger than 510000.
+  const auto config2 = AudioEncoderOpus::SdpToConfig(
+      {"opus", 48000, 2, {{"maxaveragebitrate", "510001"}}});
+  EXPECT_EQ(510000, config2->bitrate_bps);
+
+  const auto config3 = AudioEncoderOpus::SdpToConfig(
+      {"opus", 48000, 2, {{"maxaveragebitrate", "200000"}}});
+  EXPECT_EQ(200000, config3->bitrate_bps);
+}
+
+// Test maxplaybackrate <= 8000 triggers Opus narrow band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateNb) {
+  auto config = CreateConfigWithParameters({{"maxplaybackrate", "8000"}});
+  EXPECT_EQ(8000, config.max_playback_rate_hz);
+  EXPECT_EQ(12000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "8000"},
+                                       {"stereo", "1"}});
+  EXPECT_EQ(8000, config.max_playback_rate_hz);
+  EXPECT_EQ(24000, config.bitrate_bps);
+}
+
+// Test 8000 < maxplaybackrate <= 12000 triggers Opus medium band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateMb) {
+  auto config = CreateConfigWithParameters({{"maxplaybackrate", "8001"}});
+  EXPECT_EQ(8001, config.max_playback_rate_hz);
+  EXPECT_EQ(20000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "8001"},
+                                       {"stereo", "1"}});
+  EXPECT_EQ(8001, config.max_playback_rate_hz);
+  EXPECT_EQ(40000, config.bitrate_bps);
+}
+
+// Test 12000 < maxplaybackrate <= 16000 triggers Opus wide band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateWb) {
+  auto config = CreateConfigWithParameters({{"maxplaybackrate", "12001"}});
+  EXPECT_EQ(12001, config.max_playback_rate_hz);
+  EXPECT_EQ(20000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "12001"},
+                                       {"stereo", "1"}});
+  EXPECT_EQ(12001, config.max_playback_rate_hz);
+  EXPECT_EQ(40000, config.bitrate_bps);
+}
+
+// Test 16000 < maxplaybackrate <= 24000 triggers Opus super wide band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateSwb) {
+  auto config = CreateConfigWithParameters({{"maxplaybackrate", "16001"}});
+  EXPECT_EQ(16001, config.max_playback_rate_hz);
+  EXPECT_EQ(32000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "16001"},
+                                       {"stereo", "1"}});
+  EXPECT_EQ(16001, config.max_playback_rate_hz);
+  EXPECT_EQ(64000, config.bitrate_bps);
+}
+
+// Test 24000 < maxplaybackrate triggers Opus full band mode.
+TEST(AudioEncoderOpusTest, SetMaxPlaybackRateFb) {
+  auto config = CreateConfigWithParameters({{"maxplaybackrate", "24001"}});
+  EXPECT_EQ(24001, config.max_playback_rate_hz);
+  EXPECT_EQ(32000, config.bitrate_bps);
+
+  config = CreateConfigWithParameters({{"maxplaybackrate", "24001"},
+                                       {"stereo", "1"}});
+  EXPECT_EQ(24001, config.max_playback_rate_hz);
+  EXPECT_EQ(64000, config.bitrate_bps);
+}
+
+TEST(AudioEncoderOpusTest, OpusFlagDtxAsNonSpeech) {
+  // Create encoder with DTX enabled.
+  AudioEncoderOpusConfig config;
+  config.dtx_enabled = true;
+  constexpr int payload_type = 17;
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+
+  // Open file containing speech and silence.
+  const std::string kInputFileName =
+      webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+  test::AudioLoop audio_loop;
+  // Use the file as if it were sampled at 48 kHz.
+  constexpr int kSampleRateHz = 48000;
+  EXPECT_EQ(kSampleRateHz, encoder->SampleRateHz());
+  constexpr size_t kMaxLoopLengthSamples =
+      kSampleRateHz * 10;  // Max 10 second loop.
+  constexpr size_t kInputBlockSizeSamples =
+      10 * kSampleRateHz / 1000;  // 10 ms.
+  EXPECT_TRUE(audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+                              kInputBlockSizeSamples));
+
+  // Encode.
+  AudioEncoder::EncodedInfo info;
+  rtc::Buffer encoded(500);
+  int nonspeech_frames = 0;
+  int max_nonspeech_frames = 0;
+  int dtx_frames = 0;
+  int max_dtx_frames = 0;
+  uint32_t rtp_timestamp = 0u;
+  for (size_t i = 0; i < 500; ++i) {
+    encoded.Clear();
+
+    // Every second call to the encoder will generate an Opus packet.
+    for (int j = 0; j < 2; j++) {
+      info =
+          encoder->Encode(rtp_timestamp, audio_loop.GetNextBlock(), &encoded);
+      rtp_timestamp += kInputBlockSizeSamples;
+    }
+
+    // Bookkeeping of number of DTX frames.
+    if (info.encoded_bytes <= 2) {
+      ++dtx_frames;
+    } else {
+      if (dtx_frames > max_dtx_frames)
+        max_dtx_frames = dtx_frames;
+      dtx_frames = 0;
+    }
+
+    // Bookkeeping of number of non-speech frames.
+    if (info.speech == 0) {
+      ++nonspeech_frames;
+    } else {
+      if (nonspeech_frames > max_nonspeech_frames)
+        max_nonspeech_frames = nonspeech_frames;
+      nonspeech_frames = 0;
+    }
+  }
+
+  // Maximum number of consecutive non-speech packets should exceed 20.
+  EXPECT_GT(max_nonspeech_frames, 20);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc b/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc
new file mode 100644
index 0000000..4394949
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_bandwidth_unittest.cc
@@ -0,0 +1,151 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_decoder_opus.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/lapped_transform.h"
+#include "common_audio/window_generator.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kNumChannels = 1u;
+constexpr int kSampleRateHz = 48000;
+constexpr size_t kMaxLoopLengthSamples = kSampleRateHz * 50;  // 50 seconds.
+constexpr size_t kInputBlockSizeSamples = 10 * kSampleRateHz / 1000;   // 10 ms
+constexpr size_t kOutputBlockSizeSamples = 20 * kSampleRateHz / 1000;  // 20 ms
+constexpr size_t kFftSize = 1024;
+constexpr size_t kNarrowbandSize = 4000 * kFftSize / kSampleRateHz;
+constexpr float kKbdAlpha = 1.5f;
+
+class PowerRatioEstimator : public LappedTransform::Callback {
+ public:
+  PowerRatioEstimator() : low_pow_(0.f), high_pow_(0.f) {
+    WindowGenerator::KaiserBesselDerived(kKbdAlpha, kFftSize, window_);
+    transform_.reset(new LappedTransform(kNumChannels, 0u,
+                                         kInputBlockSizeSamples, window_,
+                                         kFftSize, kFftSize / 2, this));
+  }
+
+  void ProcessBlock(float* data) { transform_->ProcessChunk(&data, nullptr); }
+
+  float PowerRatio() { return high_pow_ / low_pow_; }
+
+ protected:
+  void ProcessAudioBlock(const std::complex<float>* const* input,
+                         size_t num_input_channels,
+                         size_t num_freq_bins,
+                         size_t num_output_channels,
+                         std::complex<float>* const* output) override {
+    float low_pow = 0.f;
+    float high_pow = 0.f;
+    for (size_t i = 0u; i < num_input_channels; ++i) {
+      for (size_t j = 0u; j < kNarrowbandSize; ++j) {
+        float low_mag = std::abs(input[i][j]);
+        low_pow += low_mag * low_mag;
+        float high_mag = std::abs(input[i][j + kNarrowbandSize]);
+        high_pow += high_mag * high_mag;
+      }
+    }
+    low_pow_ += low_pow / (num_input_channels * kFftSize);
+    high_pow_ += high_pow / (num_input_channels * kFftSize);
+  }
+
+ private:
+  std::unique_ptr<LappedTransform> transform_;
+  float window_[kFftSize];
+  float low_pow_;
+  float high_pow_;
+};
+
+float EncodedPowerRatio(AudioEncoder* encoder,
+                        AudioDecoder* decoder,
+                        test::AudioLoop* audio_loop) {
+  // Encode and decode.
+  uint32_t rtp_timestamp = 0u;
+  constexpr size_t kBufferSize = 500;
+  rtc::Buffer encoded(kBufferSize);
+  std::vector<int16_t> decoded(kOutputBlockSizeSamples);
+  std::vector<float> decoded_float(kOutputBlockSizeSamples);
+  AudioDecoder::SpeechType speech_type = AudioDecoder::kSpeech;
+  PowerRatioEstimator power_ratio_estimator;
+  for (size_t i = 0; i < 1000; ++i) {
+    encoded.Clear();
+    AudioEncoder::EncodedInfo encoder_info =
+        encoder->Encode(rtp_timestamp, audio_loop->GetNextBlock(), &encoded);
+    rtp_timestamp += kInputBlockSizeSamples;
+    if (encoded.size() > 0) {
+      int decoder_info = decoder->Decode(
+          encoded.data(), encoded.size(), kSampleRateHz,
+          decoded.size() * sizeof(decoded[0]), decoded.data(), &speech_type);
+      if (decoder_info > 0) {
+        S16ToFloat(decoded.data(), decoded.size(), decoded_float.data());
+        power_ratio_estimator.ProcessBlock(decoded_float.data());
+      }
+    }
+  }
+  return power_ratio_estimator.PowerRatio();
+}
+
+}  // namespace
+
+TEST(BandwidthAdaptationTest, BandwidthAdaptationTest) {
+  test::ScopedFieldTrials override_field_trials(
+      "WebRTC-AdjustOpusBandwidth/Enabled/");
+
+  constexpr float kMaxNarrowbandRatio = 0.003f;
+  constexpr float kMinWidebandRatio = 0.03f;
+
+  // Create encoder.
+  AudioEncoderOpusConfig enc_config;
+  enc_config.bitrate_bps = rtc::Optional<int>(7999);
+  enc_config.num_channels = kNumChannels;
+  constexpr int payload_type = 17;
+  auto encoder = AudioEncoderOpus::MakeAudioEncoder(enc_config, payload_type);
+
+  // Create decoder.
+  AudioDecoderOpus::Config dec_config;
+  dec_config.num_channels = kNumChannels;
+  auto decoder = AudioDecoderOpus::MakeAudioDecoder(dec_config);
+
+  // Open speech file.
+  const std::string kInputFileName =
+      webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
+  test::AudioLoop audio_loop;
+  EXPECT_EQ(kSampleRateHz, encoder->SampleRateHz());
+  ASSERT_TRUE(audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+                              kInputBlockSizeSamples));
+
+  EXPECT_LT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+            kMaxNarrowbandRatio);
+
+  encoder->OnReceivedTargetAudioBitrate(9000);
+  EXPECT_LT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+            kMaxNarrowbandRatio);
+
+  encoder->OnReceivedTargetAudioBitrate(9001);
+  EXPECT_GT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+            kMinWidebandRatio);
+
+  encoder->OnReceivedTargetAudioBitrate(8000);
+  EXPECT_GT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+            kMinWidebandRatio);
+
+  encoder->OnReceivedTargetAudioBitrate(12001);
+  EXPECT_GT(EncodedPowerRatio(encoder.get(), decoder.get(), &audio_loop),
+            kMinWidebandRatio);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc b/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc
new file mode 100644
index 0000000..4c9174d
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_complexity_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "rtc_base/format_macros.h"
+#include "rtc_base/timeutils.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+#include "test/testsupport/perf_test.h"
+
+namespace webrtc {
+
+namespace {
+int64_t RunComplexityTest(const AudioEncoderOpusConfig& config) {
+  // Create encoder.
+  constexpr int payload_type = 17;
+  const auto encoder = AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+  // Open speech file.
+  const std::string kInputFileName =
+      webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
+  test::AudioLoop audio_loop;
+  constexpr int kSampleRateHz = 48000;
+  EXPECT_EQ(kSampleRateHz, encoder->SampleRateHz());
+  constexpr size_t kMaxLoopLengthSamples =
+      kSampleRateHz * 10;  // 10 second loop.
+  constexpr size_t kInputBlockSizeSamples =
+      10 * kSampleRateHz / 1000;  // 60 ms.
+  EXPECT_TRUE(audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+                              kInputBlockSizeSamples));
+  // Encode.
+  const int64_t start_time_ms = rtc::TimeMillis();
+  AudioEncoder::EncodedInfo info;
+  rtc::Buffer encoded(500);
+  uint32_t rtp_timestamp = 0u;
+  for (size_t i = 0; i < 10000; ++i) {
+    encoded.Clear();
+    info = encoder->Encode(rtp_timestamp, audio_loop.GetNextBlock(), &encoded);
+    rtp_timestamp += kInputBlockSizeSamples;
+  }
+  return rtc::TimeMillis() - start_time_ms;
+}
+}  // namespace
+
+// This test encodes an audio file using Opus twice with different bitrates
+// (~11 kbps and 15.5 kbps). The runtime for each is measured, and the ratio
+// between the two is calculated and tracked. This test explicitly sets the
+// low_rate_complexity to 9. When running on desktop platforms, this is the same
+// as the regular complexity, and the expectation is that the resulting ratio
+// should be less than 100% (since the encoder runs faster at lower bitrates,
+// given a fixed complexity setting). On the other hand, when running on
+// mobiles, the regular complexity is 5, and we expect the resulting ratio to
+// be higher, since we have explicitly asked for a higher complexity setting at
+// the lower rate.
+TEST(AudioEncoderOpusComplexityAdaptationTest, AdaptationOn) {
+  // Create config.
+  AudioEncoderOpusConfig config;
+  // The limit -- including the hysteresis window -- at which the complexity
+  // shuold be increased.
+  config.bitrate_bps = 11000 - 1;
+  config.low_rate_complexity = 9;
+  int64_t runtime_10999bps = RunComplexityTest(config);
+
+  config.bitrate_bps = 15500;
+  int64_t runtime_15500bps = RunComplexityTest(config);
+
+  test::PrintResult("opus_encoding_complexity_ratio", "", "adaptation_on",
+                    100.0 * runtime_10999bps / runtime_15500bps, "percent",
+                    true);
+}
+
+// This test is identical to the one above, but without the complexity
+// adaptation enabled (neither on desktop, nor on mobile). The expectation is
+// that the resulting ratio is less than 100% at all times.
+TEST(AudioEncoderOpusComplexityAdaptationTest, AdaptationOff) {
+  // Create config.
+  AudioEncoderOpusConfig config;
+  // The limit -- including the hysteresis window -- at which the complexity
+  // shuold be increased (but not in this test since complexity adaptation is
+  // disabled).
+  config.bitrate_bps = 11000 - 1;
+  int64_t runtime_10999bps = RunComplexityTest(config);
+
+  config.bitrate_bps = 15500;
+  int64_t runtime_15500bps = RunComplexityTest(config);
+
+  test::PrintResult("opus_encoding_complexity_ratio", "", "adaptation_off",
+                    100.0 * runtime_10999bps / runtime_15500bps, "percent",
+                    true);
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/opus_fec_test.cc b/modules/audio_coding/codecs/opus/opus_fec_test.cc
new file mode 100644
index 0000000..4e0a17e
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -0,0 +1,248 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "rtc_base/format_macros.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+using std::string;
+using std::tuple;
+using std::get;
+using ::testing::TestWithParam;
+
+namespace webrtc {
+
+// Define coding parameter as <channels, bit_rate, filename, extension>.
+typedef tuple<size_t, int, string, string> coding_param;
+typedef struct mode mode;
+
+struct mode {
+  bool fec;
+  uint8_t target_packet_loss_rate;
+};
+
+const int kOpusBlockDurationMs = 20;
+const int kOpusSamplingKhz = 48;
+
+class OpusFecTest : public TestWithParam<coding_param> {
+ protected:
+  OpusFecTest();
+
+  virtual void SetUp();
+  virtual void TearDown();
+
+  virtual void EncodeABlock();
+
+  virtual void DecodeABlock(bool lost_previous, bool lost_current);
+
+  int block_duration_ms_;
+  int sampling_khz_;
+  size_t block_length_sample_;
+
+  size_t channels_;
+  int bit_rate_;
+
+  size_t data_pointer_;
+  size_t loop_length_samples_;
+  size_t max_bytes_;
+  size_t encoded_bytes_;
+
+  WebRtcOpusEncInst* opus_encoder_;
+  WebRtcOpusDecInst* opus_decoder_;
+
+  string in_filename_;
+
+  std::unique_ptr<int16_t[]> in_data_;
+  std::unique_ptr<int16_t[]> out_data_;
+  std::unique_ptr<uint8_t[]> bit_stream_;
+};
+
+void OpusFecTest::SetUp() {
+  channels_ = get<0>(GetParam());
+  bit_rate_ = get<1>(GetParam());
+  printf("Coding %" PRIuS " channel signal at %d bps.\n", channels_, bit_rate_);
+
+  in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
+
+  FILE* fp = fopen(in_filename_.c_str(), "rb");
+  ASSERT_FALSE(fp == NULL);
+
+  // Obtain file size.
+  fseek(fp, 0, SEEK_END);
+  loop_length_samples_ = ftell(fp) / sizeof(int16_t);
+  rewind(fp);
+
+  // Allocate memory to contain the whole file.
+  in_data_.reset(new int16_t[loop_length_samples_ +
+      block_length_sample_ * channels_]);
+
+  // Copy the file into the buffer.
+  ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
+            loop_length_samples_);
+  fclose(fp);
+
+  // The audio will be used in a looped manner. To ease the acquisition of an
+  // audio frame that crosses the end of the excerpt, we add an extra block
+  // length of samples to the end of the array, starting over again from the
+  // beginning of the array. Audio frames cross the end of the excerpt always
+  // appear as a continuum of memory.
+  memcpy(&in_data_[loop_length_samples_], &in_data_[0],
+         block_length_sample_ * channels_ * sizeof(int16_t));
+
+  // Maximum number of bytes in output bitstream.
+  max_bytes_ = block_length_sample_ * channels_ * sizeof(int16_t);
+
+  out_data_.reset(new int16_t[2 * block_length_sample_ * channels_]);
+  bit_stream_.reset(new uint8_t[max_bytes_]);
+
+  // If channels_ == 1, use Opus VOIP mode, otherwise, audio mode.
+  int app = channels_ == 1 ? 0 : 1;
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, app));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+  // Set bitrate.
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_));
+}
+
+void OpusFecTest::TearDown() {
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+OpusFecTest::OpusFecTest()
+    : block_duration_ms_(kOpusBlockDurationMs),
+      sampling_khz_(kOpusSamplingKhz),
+      block_length_sample_(
+          static_cast<size_t>(block_duration_ms_ * sampling_khz_)),
+      data_pointer_(0),
+      max_bytes_(0),
+      encoded_bytes_(0),
+      opus_encoder_(NULL),
+      opus_decoder_(NULL) {
+}
+
+void OpusFecTest::EncodeABlock() {
+  int value = WebRtcOpus_Encode(opus_encoder_,
+                                &in_data_[data_pointer_],
+                                block_length_sample_,
+                                max_bytes_, &bit_stream_[0]);
+  EXPECT_GT(value, 0);
+
+  encoded_bytes_ = static_cast<size_t>(value);
+}
+
+void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
+  int16_t audio_type;
+  int value_1 = 0, value_2 = 0;
+
+  if (lost_previous) {
+    // Decode previous frame.
+    if (!lost_current &&
+        WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_) == 1) {
+      value_1 = WebRtcOpus_DecodeFec(opus_decoder_, &bit_stream_[0],
+                                     encoded_bytes_, &out_data_[0],
+                                     &audio_type);
+    } else {
+      value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
+    }
+    EXPECT_EQ(static_cast<int>(block_length_sample_), value_1);
+  }
+
+  if (!lost_current) {
+    // Decode current frame.
+    value_2 = WebRtcOpus_Decode(opus_decoder_, &bit_stream_[0], encoded_bytes_,
+                                &out_data_[value_1 * channels_], &audio_type);
+    EXPECT_EQ(static_cast<int>(block_length_sample_), value_2);
+  }
+}
+
+TEST_P(OpusFecTest, RandomPacketLossTest) {
+  const int kDurationMs = 200000;
+  int time_now_ms, fec_frames;
+  int actual_packet_loss_rate;
+  bool lost_current, lost_previous;
+  mode mode_set[3] = {{true, 0},
+                      {false, 0},
+                      {true, 50}};
+
+  lost_current = false;
+  for (int i = 0; i < 3; i++) {
+    if (mode_set[i].fec) {
+      EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_encoder_));
+      EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_,
+          mode_set[i].target_packet_loss_rate));
+      printf("FEC is ON, target at packet loss rate %d percent.\n",
+             mode_set[i].target_packet_loss_rate);
+    } else {
+      EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_encoder_));
+      printf("FEC is OFF.\n");
+    }
+    // In this test, we let the target packet loss rate match the actual rate.
+    actual_packet_loss_rate = mode_set[i].target_packet_loss_rate;
+    // Run every mode a certain time.
+    time_now_ms = 0;
+    fec_frames = 0;
+    while (time_now_ms < kDurationMs) {
+      // Encode & decode.
+      EncodeABlock();
+
+      // Check if payload has FEC.
+      int fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
+
+      // If FEC is disabled or the target packet loss rate is set to 0, there
+      // should be no FEC in the bit stream.
+      if (!mode_set[i].fec || mode_set[i].target_packet_loss_rate == 0) {
+        EXPECT_EQ(fec, 0);
+      } else if (fec == 1) {
+        fec_frames++;
+      }
+
+      lost_previous = lost_current;
+      lost_current = rand() < actual_packet_loss_rate * (RAND_MAX / 100);
+      DecodeABlock(lost_previous, lost_current);
+
+      time_now_ms += block_duration_ms_;
+
+      // |data_pointer_| is incremented and wrapped across
+      // |loop_length_samples_|.
+      data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
+        loop_length_samples_;
+    }
+    if (mode_set[i].fec) {
+      printf("%.2f percent frames has FEC.\n",
+             static_cast<float>(fec_frames) * block_duration_ms_ / 2000);
+    }
+  }
+}
+
+const coding_param param_set[] = {
+    std::make_tuple(1,
+                    64000,
+                    string("audio_coding/testfile32kHz"),
+                    string("pcm")),
+    std::make_tuple(1,
+                    32000,
+                    string("audio_coding/testfile32kHz"),
+                    string("pcm")),
+    std::make_tuple(2,
+                    64000,
+                    string("audio_coding/teststereo32kHz"),
+                    string("pcm"))};
+
+// 64 kbps, stereo
+INSTANTIATE_TEST_CASE_P(AllTest, OpusFecTest,
+                        ::testing::ValuesIn(param_set));
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/opus_inst.h b/modules/audio_coding/codecs/opus/opus_inst.h
new file mode 100644
index 0000000..066fa22
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_inst.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
+
+#include <stddef.h>
+
+#include "rtc_base/ignore_wundef.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#include "opus.h"
+RTC_POP_IGNORING_WUNDEF()
+
+struct WebRtcOpusEncInst {
+  OpusEncoder* encoder;
+  size_t channels;
+  int in_dtx_mode;
+};
+
+struct WebRtcOpusDecInst {
+  OpusDecoder* decoder;
+  int prev_decoded_samples;
+  size_t channels;
+  int in_dtx_mode;
+};
+
+
+#endif  // MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
diff --git a/modules/audio_coding/codecs/opus/opus_interface.c b/modules/audio_coding/codecs/opus/opus_interface.c
new file mode 100644
index 0000000..d219098
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_interface.c
@@ -0,0 +1,523 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+
+#include "rtc_base/checks.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+enum {
+#if WEBRTC_OPUS_SUPPORT_120MS_PTIME
+  /* Maximum supported frame size in WebRTC is 120 ms. */
+  kWebRtcOpusMaxEncodeFrameSizeMs = 120,
+#else
+  /* Maximum supported frame size in WebRTC is 60 ms. */
+  kWebRtcOpusMaxEncodeFrameSizeMs = 60,
+#endif
+
+  /* The format allows up to 120 ms frames. Since we don't control the other
+   * side, we must allow for packets of that size. NetEq is currently limited
+   * to 60 ms on the receive side. */
+  kWebRtcOpusMaxDecodeFrameSizeMs = 120,
+
+  /* Maximum sample count per channel is 48 kHz * maximum frame size in
+   * milliseconds. */
+  kWebRtcOpusMaxFrameSizePerChannel = 48 * kWebRtcOpusMaxDecodeFrameSizeMs,
+
+  /* Default frame size, 20 ms @ 48 kHz, in samples (for one channel). */
+  kWebRtcOpusDefaultFrameSize = 960,
+};
+
+int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
+                                 size_t channels,
+                                 int32_t application) {
+  int opus_app;
+  if (!inst)
+    return -1;
+
+  switch (application) {
+    case 0:
+      opus_app = OPUS_APPLICATION_VOIP;
+      break;
+    case 1:
+      opus_app = OPUS_APPLICATION_AUDIO;
+      break;
+    default:
+      return -1;
+  }
+
+  OpusEncInst* state = calloc(1, sizeof(OpusEncInst));
+  RTC_DCHECK(state);
+
+  int error;
+  state->encoder = opus_encoder_create(48000, (int)channels, opus_app,
+                                       &error);
+  if (error != OPUS_OK || !state->encoder) {
+    WebRtcOpus_EncoderFree(state);
+    return -1;
+  }
+
+  state->in_dtx_mode = 0;
+  state->channels = channels;
+
+  *inst = state;
+  return 0;
+}
+
+int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
+  if (inst) {
+    opus_encoder_destroy(inst->encoder);
+    free(inst);
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+int WebRtcOpus_Encode(OpusEncInst* inst,
+                      const int16_t* audio_in,
+                      size_t samples,
+                      size_t length_encoded_buffer,
+                      uint8_t* encoded) {
+  int res;
+
+  if (samples > 48 * kWebRtcOpusMaxEncodeFrameSizeMs) {
+    return -1;
+  }
+
+  res = opus_encode(inst->encoder,
+                    (const opus_int16*)audio_in,
+                    (int)samples,
+                    encoded,
+                    (opus_int32)length_encoded_buffer);
+
+  if (res <= 0) {
+    return -1;
+  }
+
+  if (res <= 2) {
+    // Indicates DTX since the packet has nothing but a header. In principle,
+    // there is no need to send this packet. However, we do transmit the first
+    // occurrence to let the decoder know that the encoder enters DTX mode.
+    if (inst->in_dtx_mode) {
+      return 0;
+    } else {
+      inst->in_dtx_mode = 1;
+      return res;
+    }
+  }
+
+  inst->in_dtx_mode = 0;
+  return res;
+}
+
+int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_BITRATE(rate));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder,
+                            OPUS_SET_PACKET_LOSS_PERC(loss_rate));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_SetMaxPlaybackRate(OpusEncInst* inst, int32_t frequency_hz) {
+  opus_int32 set_bandwidth;
+
+  if (!inst)
+    return -1;
+
+  if (frequency_hz <= 8000) {
+    set_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
+  } else if (frequency_hz <= 12000) {
+    set_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
+  } else if (frequency_hz <= 16000) {
+    set_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
+  } else if (frequency_hz <= 24000) {
+    set_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
+  } else {
+    set_bandwidth = OPUS_BANDWIDTH_FULLBAND;
+  }
+  return opus_encoder_ctl(inst->encoder,
+                          OPUS_SET_MAX_BANDWIDTH(set_bandwidth));
+}
+
+int16_t WebRtcOpus_EnableFec(OpusEncInst* inst) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_INBAND_FEC(1));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_DisableFec(OpusEncInst* inst) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_INBAND_FEC(0));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_EnableDtx(OpusEncInst* inst) {
+  if (!inst) {
+    return -1;
+  }
+
+  // To prevent Opus from entering CELT-only mode by forcing signal type to
+  // voice to make sure that DTX behaves correctly. Currently, DTX does not
+  // last long during a pure silence, if the signal type is not forced.
+  // TODO(minyue): Remove the signal type forcing when Opus DTX works properly
+  // without it.
+  int ret = opus_encoder_ctl(inst->encoder,
+                             OPUS_SET_SIGNAL(OPUS_SIGNAL_VOICE));
+  if (ret != OPUS_OK)
+    return ret;
+
+  return opus_encoder_ctl(inst->encoder, OPUS_SET_DTX(1));
+}
+
+int16_t WebRtcOpus_DisableDtx(OpusEncInst* inst) {
+  if (inst) {
+    int ret = opus_encoder_ctl(inst->encoder,
+                               OPUS_SET_SIGNAL(OPUS_AUTO));
+    if (ret != OPUS_OK)
+      return ret;
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_DTX(0));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_EnableCbr(OpusEncInst* inst) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_VBR(0));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_DisableCbr(OpusEncInst* inst) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_VBR(1));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_COMPLEXITY(complexity));
+  } else {
+    return -1;
+  }
+}
+
+int32_t WebRtcOpus_GetBandwidth(OpusEncInst* inst) {
+  if (!inst) {
+    return -1;
+  }
+  int32_t bandwidth;
+  if (opus_encoder_ctl(inst->encoder, OPUS_GET_BANDWIDTH(&bandwidth)) == 0) {
+    return bandwidth;
+  } else {
+    return -1;
+  }
+
+}
+
+int16_t WebRtcOpus_SetBandwidth(OpusEncInst* inst, int32_t bandwidth) {
+  if (inst) {
+    return opus_encoder_ctl(inst->encoder, OPUS_SET_BANDWIDTH(bandwidth));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_SetForceChannels(OpusEncInst* inst, size_t num_channels) {
+  if (!inst)
+    return -1;
+  if (num_channels == 0) {
+    return opus_encoder_ctl(inst->encoder,
+                            OPUS_SET_FORCE_CHANNELS(OPUS_AUTO));
+  } else if (num_channels == 1 || num_channels == 2) {
+    return opus_encoder_ctl(inst->encoder,
+                            OPUS_SET_FORCE_CHANNELS(num_channels));
+  } else {
+    return -1;
+  }
+}
+
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, size_t channels) {
+  int error;
+  OpusDecInst* state;
+
+  if (inst != NULL) {
+    /* Create Opus decoder state. */
+    state = (OpusDecInst*) calloc(1, sizeof(OpusDecInst));
+    if (state == NULL) {
+      return -1;
+    }
+
+    /* Create new memory, always at 48000 Hz. */
+    state->decoder = opus_decoder_create(48000, (int)channels, &error);
+    if (error == OPUS_OK && state->decoder != NULL) {
+      /* Creation of memory all ok. */
+      state->channels = channels;
+      state->prev_decoded_samples = kWebRtcOpusDefaultFrameSize;
+      state->in_dtx_mode = 0;
+      *inst = state;
+      return 0;
+    }
+
+    /* If memory allocation was unsuccessful, free the entire state. */
+    if (state->decoder) {
+      opus_decoder_destroy(state->decoder);
+    }
+    free(state);
+  }
+  return -1;
+}
+
+int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
+  if (inst) {
+    opus_decoder_destroy(inst->decoder);
+    free(inst);
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
+  return inst->channels;
+}
+
+void WebRtcOpus_DecoderInit(OpusDecInst* inst) {
+  opus_decoder_ctl(inst->decoder, OPUS_RESET_STATE);
+  inst->in_dtx_mode = 0;
+}
+
+/* For decoder to determine if it is to output speech or comfort noise. */
+static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
+  // Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
+  // to be so if the following |encoded_byte| are 0 or 1.
+  if (encoded_bytes == 0 && inst->in_dtx_mode) {
+    return 2;  // Comfort noise.
+  } else if (encoded_bytes == 1 || encoded_bytes == 2) {
+    // TODO(henrik.lundin): There is a slight risk that a 2-byte payload is in
+    // fact a 1-byte TOC with a 1-byte payload. That will be erroneously
+    // interpreted as comfort noise output, but such a payload is probably
+    // faulty anyway.
+    inst->in_dtx_mode = 1;
+    return 2;  // Comfort noise.
+  } else {
+    inst->in_dtx_mode = 0;
+    return 0;  // Speech.
+  }
+}
+
+/* |frame_size| is set to maximum Opus frame size in the normal case, and
+ * is set to the number of samples needed for PLC in case of losses.
+ * It is up to the caller to make sure the value is correct. */
+static int DecodeNative(OpusDecInst* inst, const uint8_t* encoded,
+                        size_t encoded_bytes, int frame_size,
+                        int16_t* decoded, int16_t* audio_type, int decode_fec) {
+  int res = opus_decode(inst->decoder, encoded, (opus_int32)encoded_bytes,
+                        (opus_int16*)decoded, frame_size, decode_fec);
+
+  if (res <= 0)
+    return -1;
+
+  *audio_type = DetermineAudioType(inst, encoded_bytes);
+
+  return res;
+}
+
+int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
+                      size_t encoded_bytes, int16_t* decoded,
+                      int16_t* audio_type) {
+  int decoded_samples;
+
+  if (encoded_bytes == 0) {
+    *audio_type = DetermineAudioType(inst, encoded_bytes);
+    decoded_samples = WebRtcOpus_DecodePlc(inst, decoded, 1);
+  } else {
+    decoded_samples = DecodeNative(inst,
+                                   encoded,
+                                   encoded_bytes,
+                                   kWebRtcOpusMaxFrameSizePerChannel,
+                                   decoded,
+                                   audio_type,
+                                   0);
+  }
+  if (decoded_samples < 0) {
+    return -1;
+  }
+
+  /* Update decoded sample memory, to be used by the PLC in case of losses. */
+  inst->prev_decoded_samples = decoded_samples;
+
+  return decoded_samples;
+}
+
+int WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
+                         int number_of_lost_frames) {
+  int16_t audio_type = 0;
+  int decoded_samples;
+  int plc_samples;
+
+  /* The number of samples we ask for is |number_of_lost_frames| times
+   * |prev_decoded_samples_|. Limit the number of samples to maximum
+   * |kWebRtcOpusMaxFrameSizePerChannel|. */
+  plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
+  plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
+      plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
+  decoded_samples = DecodeNative(inst, NULL, 0, plc_samples,
+                                 decoded, &audio_type, 0);
+  if (decoded_samples < 0) {
+    return -1;
+  }
+
+  return decoded_samples;
+}
+
+int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
+                         size_t encoded_bytes, int16_t* decoded,
+                         int16_t* audio_type) {
+  int decoded_samples;
+  int fec_samples;
+
+  if (WebRtcOpus_PacketHasFec(encoded, encoded_bytes) != 1) {
+    return 0;
+  }
+
+  fec_samples = opus_packet_get_samples_per_frame(encoded, 48000);
+
+  decoded_samples = DecodeNative(inst, encoded, encoded_bytes,
+                                 fec_samples, decoded, audio_type, 1);
+  if (decoded_samples < 0) {
+    return -1;
+  }
+
+  return decoded_samples;
+}
+
+int WebRtcOpus_DurationEst(OpusDecInst* inst,
+                           const uint8_t* payload,
+                           size_t payload_length_bytes) {
+  if (payload_length_bytes == 0) {
+    // WebRtcOpus_Decode calls PLC when payload length is zero. So we return
+    // PLC duration correspondingly.
+    return WebRtcOpus_PlcDuration(inst);
+  }
+
+  int frames, samples;
+  frames = opus_packet_get_nb_frames(payload, (opus_int32)payload_length_bytes);
+  if (frames < 0) {
+    /* Invalid payload data. */
+    return 0;
+  }
+  samples = frames * opus_packet_get_samples_per_frame(payload, 48000);
+  if (samples < 120 || samples > 5760) {
+    /* Invalid payload duration. */
+    return 0;
+  }
+  return samples;
+}
+
+int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
+  /* The number of samples we ask for is |number_of_lost_frames| times
+   * |prev_decoded_samples_|. Limit the number of samples to maximum
+   * |kWebRtcOpusMaxFrameSizePerChannel|. */
+  const int plc_samples = inst->prev_decoded_samples;
+  return (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
+      plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
+}
+
+int WebRtcOpus_FecDurationEst(const uint8_t* payload,
+                              size_t payload_length_bytes) {
+  int samples;
+  if (WebRtcOpus_PacketHasFec(payload, payload_length_bytes) != 1) {
+    return 0;
+  }
+
+  samples = opus_packet_get_samples_per_frame(payload, 48000);
+  if (samples < 480 || samples > 5760) {
+    /* Invalid payload duration. */
+    return 0;
+  }
+  return samples;
+}
+
+int WebRtcOpus_PacketHasFec(const uint8_t* payload,
+                            size_t payload_length_bytes) {
+  int frames, channels, payload_length_ms;
+  int n;
+  opus_int16 frame_sizes[48];
+  const unsigned char *frame_data[48];
+
+  if (payload == NULL || payload_length_bytes == 0)
+    return 0;
+
+  /* In CELT_ONLY mode, packets should not have FEC. */
+  if (payload[0] & 0x80)
+    return 0;
+
+  payload_length_ms = opus_packet_get_samples_per_frame(payload, 48000) / 48;
+  if (10 > payload_length_ms)
+    payload_length_ms = 10;
+
+  channels = opus_packet_get_nb_channels(payload);
+
+  switch (payload_length_ms) {
+    case 10:
+    case 20: {
+      frames = 1;
+      break;
+    }
+    case 40: {
+      frames = 2;
+      break;
+    }
+    case 60: {
+      frames = 3;
+      break;
+    }
+    default: {
+      return 0; // It is actually even an invalid packet.
+    }
+  }
+
+  /* The following is to parse the LBRR flags. */
+  if (opus_packet_parse(payload, (opus_int32)payload_length_bytes, NULL,
+                        frame_data, frame_sizes, NULL) < 0) {
+    return 0;
+  }
+
+  if (frame_sizes[0] <= 1) {
+    return 0;
+  }
+
+  for (n = 0; n < channels; n++) {
+    if (frame_data[0][0] & (0x80 >> ((n + 1) * (frames + 1) - 1)))
+      return 1;
+  }
+
+  return 0;
+}
diff --git a/modules/audio_coding/codecs/opus/opus_interface.h b/modules/audio_coding/codecs/opus/opus_interface.h
new file mode 100644
index 0000000..4b8e892
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_interface.h
@@ -0,0 +1,432 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
+#define MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
+
+#include <stddef.h>
+
+#include "modules/audio_coding/codecs/opus/opus_inst.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Opaque wrapper types for the codec state.
+typedef struct WebRtcOpusEncInst OpusEncInst;
+typedef struct WebRtcOpusDecInst OpusDecInst;
+
+/****************************************************************************
+ * WebRtcOpus_EncoderCreate(...)
+ *
+ * This function create an Opus encoder.
+ *
+ * Input:
+ *      - channels           : number of channels.
+ *      - application        : 0 - VOIP applications.
+ *                                 Favor speech intelligibility.
+ *                             1 - Audio applications.
+ *                                 Favor faithfulness to the original input.
+ *
+ * Output:
+ *      - inst               : a pointer to Encoder context that is created
+ *                             if success.
+ *
+ * Return value              : 0 - Success
+ *                            -1 - Error
+ */
+int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
+                                 size_t channels,
+                                 int32_t application);
+
+int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_Encode(...)
+ *
+ * This function encodes audio as a series of Opus frames and inserts
+ * it into a packet. Input buffer can be any length.
+ *
+ * Input:
+ *      - inst                  : Encoder context
+ *      - audio_in              : Input speech data buffer
+ *      - samples               : Samples per channel in audio_in
+ *      - length_encoded_buffer : Output buffer size
+ *
+ * Output:
+ *      - encoded               : Output compressed data buffer
+ *
+ * Return value                 : >=0 - Length (in bytes) of coded data
+ *                                -1 - Error
+ */
+int WebRtcOpus_Encode(OpusEncInst* inst,
+                      const int16_t* audio_in,
+                      size_t samples,
+                      size_t length_encoded_buffer,
+                      uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcOpus_SetBitRate(...)
+ *
+ * This function adjusts the target bitrate of the encoder.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - rate               : New target bitrate
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate);
+
+/****************************************************************************
+ * WebRtcOpus_SetPacketLossRate(...)
+ *
+ * This function configures the encoder's expected packet loss percentage.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - loss_rate          : loss percentage in the range 0-100, inclusive.
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate);
+
+/****************************************************************************
+ * WebRtcOpus_SetMaxPlaybackRate(...)
+ *
+ * Configures the maximum playback rate for encoding. Due to hardware
+ * limitations, the receiver may render audio up to a playback rate. Opus
+ * encoder can use this information to optimize for network usage and encoding
+ * complexity. This will affect the audio bandwidth in the coded audio. However,
+ * the input/output sample rate is not affected.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - frequency_hz       : Maximum playback rate in Hz.
+ *                             This parameter can take any value. The relation
+ *                             between the value and the Opus internal mode is
+ *                             as following:
+ *                             frequency_hz <= 8000           narrow band
+ *                             8000 < frequency_hz <= 12000   medium band
+ *                             12000 < frequency_hz <= 16000  wide band
+ *                             16000 < frequency_hz <= 24000  super wide band
+ *                             frequency_hz > 24000           full band
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetMaxPlaybackRate(OpusEncInst* inst, int32_t frequency_hz);
+
+/* TODO(minyue): Check whether an API to check the FEC and the packet loss rate
+ * is needed. It might not be very useful since there are not many use cases and
+ * the caller can always maintain the states. */
+
+/****************************************************************************
+ * WebRtcOpus_EnableFec()
+ *
+ * This function enables FEC for encoding.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_EnableFec(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DisableFec()
+ *
+ * This function disables FEC for encoding.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_DisableFec(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_EnableDtx()
+ *
+ * This function enables Opus internal DTX for encoding.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_EnableDtx(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DisableDtx()
+ *
+ * This function disables Opus internal DTX for encoding.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_DisableDtx(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_EnableCbr()
+ *
+ * This function enables CBR for encoding.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_EnableCbr(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DisableCbr()
+ *
+ * This function disables CBR for encoding.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_DisableCbr(OpusEncInst* inst);
+
+/*
+ * WebRtcOpus_SetComplexity(...)
+ *
+ * This function adjusts the computational complexity. The effect is the same as
+ * calling the complexity setting of Opus as an Opus encoder related CTL.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - complexity         : New target complexity (0-10, inclusive)
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity);
+
+/*
+ * WebRtcOpus_GetBandwidth(...)
+ *
+ * This function returns the current bandwidth.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *
+ * Return value              :  Bandwidth - Success
+ *                             -1 - Error
+ */
+int32_t WebRtcOpus_GetBandwidth(OpusEncInst* inst);
+
+/*
+ * WebRtcOpus_SetBandwidth(...)
+ *
+ * By default Opus decides which bandwidth to encode the signal in depending on
+ * the the bitrate. This function overrules the previous setting and forces the
+ * encoder to encode in narrowband/wideband/fullband/etc.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - bandwidth          : New target bandwidth. Valid values are:
+ *                             OPUS_BANDWIDTH_NARROWBAND
+ *                             OPUS_BANDWIDTH_MEDIUMBAND
+ *                             OPUS_BANDWIDTH_WIDEBAND
+ *                             OPUS_BANDWIDTH_SUPERWIDEBAND
+ *                             OPUS_BANDWIDTH_FULLBAND
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetBandwidth(OpusEncInst* inst, int32_t bandwidth);
+
+/*
+ * WebRtcOpus_SetForceChannels(...)
+ *
+ * If the encoder is initialized as a stereo encoder, Opus will by default
+ * decide whether to encode in mono or stereo based on the bitrate. This
+ * function overrules the previous setting, and forces the encoder to encode
+ * in auto/mono/stereo.
+ *
+ * If the Encoder is initialized as a mono encoder, and one tries to force
+ * stereo, the function will return an error.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - num_channels       : 0 - Not forced
+ *                             1 - Mono
+ *                             2 - Stereo
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetForceChannels(OpusEncInst* inst, size_t num_channels);
+
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, size_t channels);
+int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DecoderChannels(...)
+ *
+ * This function returns the number of channels created for Opus decoder.
+ */
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DecoderInit(...)
+ *
+ * This function resets state of the decoder.
+ *
+ * Input:
+ *      - inst               : Decoder context
+ */
+void WebRtcOpus_DecoderInit(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_Decode(...)
+ *
+ * This function decodes an Opus packet into one or more audio frames at the
+ * ACM interface's sampling rate (32 kHz).
+ *
+ * Input:
+ *      - inst               : Decoder context
+ *      - encoded            : Encoded data
+ *      - encoded_bytes      : Bytes in encoded vector
+ *
+ * Output:
+ *      - decoded            : The decoded vector
+ *      - audio_type         : 1 normal, 2 CNG (for Opus it should
+ *                             always return 1 since we're not using Opus's
+ *                             built-in DTX/CNG scheme)
+ *
+ * Return value              : >0 - Samples per channel in decoded vector
+ *                             -1 - Error
+ */
+int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
+                      size_t encoded_bytes, int16_t* decoded,
+                      int16_t* audio_type);
+
+/****************************************************************************
+ * WebRtcOpus_DecodePlc(...)
+ *
+ * This function processes PLC for opus frame(s).
+ * Input:
+ *        - inst                  : Decoder context
+ *        - number_of_lost_frames : Number of PLC frames to produce
+ *
+ * Output:
+ *        - decoded               : The decoded vector
+ *
+ * Return value                   : >0 - number of samples in decoded PLC vector
+ *                                  -1 - Error
+ */
+int WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
+                         int number_of_lost_frames);
+
+/****************************************************************************
+ * WebRtcOpus_DecodeFec(...)
+ *
+ * This function decodes the FEC data from an Opus packet into one or more audio
+ * frames at the ACM interface's sampling rate (32 kHz).
+ *
+ * Input:
+ *      - inst               : Decoder context
+ *      - encoded            : Encoded data
+ *      - encoded_bytes      : Bytes in encoded vector
+ *
+ * Output:
+ *      - decoded            : The decoded vector (previous frame)
+ *
+ * Return value              : >0 - Samples per channel in decoded vector
+ *                              0 - No FEC data in the packet
+ *                             -1 - Error
+ */
+int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
+                         size_t encoded_bytes, int16_t* decoded,
+                         int16_t* audio_type);
+
+/****************************************************************************
+ * WebRtcOpus_DurationEst(...)
+ *
+ * This function calculates the duration of an opus packet.
+ * Input:
+ *        - inst                 : Decoder context
+ *        - payload              : Encoded data pointer
+ *        - payload_length_bytes : Bytes of encoded data
+ *
+ * Return value                  : The duration of the packet, in samples per
+ *                                 channel.
+ */
+int WebRtcOpus_DurationEst(OpusDecInst* inst,
+                           const uint8_t* payload,
+                           size_t payload_length_bytes);
+
+/****************************************************************************
+ * WebRtcOpus_PlcDuration(...)
+ *
+ * This function calculates the duration of a frame returned by packet loss
+ * concealment (PLC).
+ *
+ * Input:
+ *        - inst                 : Decoder context
+ *
+ * Return value                  : The duration of a frame returned by PLC, in
+ *                                 samples per channel.
+ */
+int WebRtcOpus_PlcDuration(OpusDecInst* inst);
+
+/* TODO(minyue): Check whether it is needed to add a decoder context to the
+ * arguments, like WebRtcOpus_DurationEst(...). In fact, the packet itself tells
+ * the duration. The decoder context in WebRtcOpus_DurationEst(...) is not used.
+ * So it may be advisable to remove it from WebRtcOpus_DurationEst(...). */
+
+/****************************************************************************
+ * WebRtcOpus_FecDurationEst(...)
+ *
+ * This function calculates the duration of the FEC data within an opus packet.
+ * Input:
+ *        - payload              : Encoded data pointer
+ *        - payload_length_bytes : Bytes of encoded data
+ *
+ * Return value                  : >0 - The duration of the FEC data in the
+ *                                 packet in samples per channel.
+ *                                  0 - No FEC data in the packet.
+ */
+int WebRtcOpus_FecDurationEst(const uint8_t* payload,
+                              size_t payload_length_bytes);
+
+/****************************************************************************
+ * WebRtcOpus_PacketHasFec(...)
+ *
+ * This function detects if an opus packet has FEC.
+ * Input:
+ *        - payload              : Encoded data pointer
+ *        - payload_length_bytes : Bytes of encoded data
+ *
+ * Return value                  : 0 - the packet does NOT contain FEC.
+ *                                 1 - the packet contains FEC.
+ */
+int WebRtcOpus_PacketHasFec(const uint8_t* payload,
+                            size_t payload_length_bytes);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
diff --git a/modules/audio_coding/codecs/opus/opus_speed_test.cc b/modules/audio_coding/codecs/opus/opus_speed_test.cc
new file mode 100644
index 0000000..ca46aa1
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -0,0 +1,142 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
+
+using ::std::string;
+
+namespace webrtc {
+
+static const int kOpusBlockDurationMs = 20;
+static const int kOpusSamplingKhz = 48;
+
+class OpusSpeedTest : public AudioCodecSpeedTest {
+ protected:
+  OpusSpeedTest();
+  void SetUp() override;
+  void TearDown() override;
+  float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
+                     size_t max_bytes, size_t* encoded_bytes) override;
+  float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
+                     int16_t* out_data) override;
+  WebRtcOpusEncInst* opus_encoder_;
+  WebRtcOpusDecInst* opus_decoder_;
+};
+
+OpusSpeedTest::OpusSpeedTest()
+    : AudioCodecSpeedTest(kOpusBlockDurationMs,
+                          kOpusSamplingKhz,
+                          kOpusSamplingKhz),
+      opus_encoder_(NULL),
+      opus_decoder_(NULL) {
+}
+
+void OpusSpeedTest::SetUp() {
+  AudioCodecSpeedTest::SetUp();
+  // If channels_ == 1, use Opus VOIP mode, otherwise, audio mode.
+  int app = channels_ == 1 ? 0 : 1;
+  /* Create encoder memory. */
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, app));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+  /* Set bitrate. */
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_));
+}
+
+void OpusSpeedTest::TearDown() {
+  AudioCodecSpeedTest::TearDown();
+  /* Free memory. */
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+float OpusSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
+                                  size_t max_bytes, size_t* encoded_bytes) {
+  clock_t clocks = clock();
+  int value = WebRtcOpus_Encode(opus_encoder_, in_data,
+                                input_length_sample_, max_bytes,
+                                bit_stream);
+  clocks = clock() - clocks;
+  EXPECT_GT(value, 0);
+  *encoded_bytes = static_cast<size_t>(value);
+  return 1000.0 * clocks / CLOCKS_PER_SEC;
+}
+
+float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
+                                  size_t encoded_bytes, int16_t* out_data) {
+  int value;
+  int16_t audio_type;
+  clock_t clocks = clock();
+  value = WebRtcOpus_Decode(opus_decoder_, bit_stream, encoded_bytes, out_data,
+                            &audio_type);
+  clocks = clock() - clocks;
+  EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
+  return 1000.0 * clocks / CLOCKS_PER_SEC;
+}
+
+/* Test audio length in second. */
+constexpr size_t kDurationSec = 400;
+
+#define ADD_TEST(complexity) \
+TEST_P(OpusSpeedTest, OpusSetComplexityTest##complexity) { \
+  /* Set complexity. */ \
+  printf("Setting complexity to %d ...\n", complexity); \
+  EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, complexity)); \
+  EncodeDecode(kDurationSec); \
+}
+
+ADD_TEST(10);
+ADD_TEST(9);
+ADD_TEST(8);
+ADD_TEST(7);
+ADD_TEST(6);
+ADD_TEST(5);
+ADD_TEST(4);
+ADD_TEST(3);
+ADD_TEST(2);
+ADD_TEST(1);
+ADD_TEST(0);
+
+#define ADD_BANDWIDTH_TEST(bandwidth)                                \
+  TEST_P(OpusSpeedTest, OpusSetBandwidthTest##bandwidth) {           \
+    /* Set bandwidth. */                                             \
+    printf("Setting bandwidth to %d ...\n", bandwidth);              \
+    EXPECT_EQ(0, WebRtcOpus_SetBandwidth(opus_encoder_, bandwidth)); \
+    EncodeDecode(kDurationSec);                                      \
+  }
+
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_NARROWBAND);
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_MEDIUMBAND);
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_WIDEBAND);
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_SUPERWIDEBAND);
+ADD_BANDWIDTH_TEST(OPUS_BANDWIDTH_FULLBAND);
+
+// List all test cases: (channel, bit rat, filename, extension).
+const coding_param param_set[] = {
+    std::make_tuple(1,
+                    64000,
+                    string("audio_coding/speech_mono_32_48kHz"),
+                    string("pcm"),
+                    true),
+    std::make_tuple(1,
+                    32000,
+                    string("audio_coding/speech_mono_32_48kHz"),
+                    string("pcm"),
+                    true),
+    std::make_tuple(2,
+                    64000,
+                    string("audio_coding/music_stereo_48kHz"),
+                    string("pcm"),
+                    true)};
+
+INSTANTIATE_TEST_CASE_P(AllTest, OpusSpeedTest,
+                        ::testing::ValuesIn(param_set));
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc
new file mode 100644
index 0000000..12a1585
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -0,0 +1,816 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/codecs/opus/opus_inst.h"
+#include "modules/audio_coding/codecs/opus/opus_interface.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+using test::AudioLoop;
+using ::testing::TestWithParam;
+using ::testing::Values;
+using ::testing::Combine;
+
+// Maximum number of bytes in output bitstream.
+const size_t kMaxBytes = 1000;
+// Sample rate of Opus.
+const size_t kOpusRateKhz = 48;
+// Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
+const size_t kOpus20msFrameSamples = kOpusRateKhz * 20;
+// Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
+const size_t kOpus10msFrameSamples = kOpusRateKhz * 10;
+
+class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
+ protected:
+  OpusTest();
+
+  void TestDtxEffect(bool dtx, int block_length_ms);
+
+  void TestCbrEffect(bool dtx, int block_length_ms);
+
+  // Prepare |speech_data_| for encoding, read from a hard-coded file.
+  // After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
+  // block of |block_length_ms| milliseconds. The data is looped every
+  // |loop_length_ms| milliseconds.
+  void PrepareSpeechData(size_t channel,
+                         int block_length_ms,
+                         int loop_length_ms);
+
+  int EncodeDecode(WebRtcOpusEncInst* encoder,
+                   rtc::ArrayView<const int16_t> input_audio,
+                   WebRtcOpusDecInst* decoder,
+                   int16_t* output_audio,
+                   int16_t* audio_type);
+
+  void SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
+                          opus_int32 expect, int32_t set);
+
+  void CheckAudioBounded(const int16_t* audio, size_t samples, size_t channels,
+                         uint16_t bound) const;
+
+  WebRtcOpusEncInst* opus_encoder_;
+  WebRtcOpusDecInst* opus_decoder_;
+
+  AudioLoop speech_data_;
+  uint8_t bitstream_[kMaxBytes];
+  size_t encoded_bytes_;
+  size_t channels_;
+  int application_;
+};
+
+OpusTest::OpusTest()
+    : opus_encoder_(NULL),
+      opus_decoder_(NULL),
+      encoded_bytes_(0),
+      channels_(static_cast<size_t>(::testing::get<0>(GetParam()))),
+      application_(::testing::get<1>(GetParam())) {
+}
+
+void OpusTest::PrepareSpeechData(size_t channel, int block_length_ms,
+                                 int loop_length_ms) {
+  const std::string file_name =
+        webrtc::test::ResourcePath((channel == 1) ?
+            "audio_coding/testfile32kHz" :
+            "audio_coding/teststereo32kHz", "pcm");
+  if (loop_length_ms < block_length_ms) {
+    loop_length_ms = block_length_ms;
+  }
+  EXPECT_TRUE(speech_data_.Init(file_name,
+                                loop_length_ms * kOpusRateKhz * channel,
+                                block_length_ms * kOpusRateKhz * channel));
+}
+
+void OpusTest::SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
+                                  opus_int32 expect,
+                                  int32_t set) {
+  opus_int32 bandwidth;
+  EXPECT_EQ(0, WebRtcOpus_SetMaxPlaybackRate(opus_encoder_, set));
+  opus_encoder_ctl(opus_encoder_->encoder,
+                   OPUS_GET_MAX_BANDWIDTH(&bandwidth));
+  EXPECT_EQ(expect, bandwidth);
+}
+
+void OpusTest::CheckAudioBounded(const int16_t* audio, size_t samples,
+                                 size_t channels, uint16_t bound) const {
+  for (size_t i = 0; i < samples; ++i) {
+    for (size_t c = 0; c < channels; ++c) {
+      ASSERT_GE(audio[i * channels + c], -bound);
+      ASSERT_LE(audio[i * channels + c], bound);
+    }
+  }
+}
+
+int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
+                           rtc::ArrayView<const int16_t> input_audio,
+                           WebRtcOpusDecInst* decoder,
+                           int16_t* output_audio,
+                           int16_t* audio_type) {
+  int encoded_bytes_int = WebRtcOpus_Encode(
+      encoder, input_audio.data(),
+      rtc::CheckedDivExact(input_audio.size(), channels_),
+      kMaxBytes, bitstream_);
+  EXPECT_GE(encoded_bytes_int, 0);
+  encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
+  int est_len = WebRtcOpus_DurationEst(decoder, bitstream_, encoded_bytes_);
+  int act_len = WebRtcOpus_Decode(decoder, bitstream_,
+                                  encoded_bytes_, output_audio,
+                                  audio_type);
+  EXPECT_EQ(est_len, act_len);
+  return act_len;
+}
+
+// Test if encoder/decoder can enter DTX mode properly and do not enter DTX when
+// they should not. This test is signal dependent.
+void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
+  PrepareSpeechData(channels_, block_length_ms, 2000);
+  const size_t samples = kOpusRateKhz * block_length_ms;
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+
+  // Set bitrate.
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_,
+                                     channels_ == 1 ? 32000 : 64000));
+
+  // Set input audio as silence.
+  std::vector<int16_t> silence(samples * channels_, 0);
+
+  // Setting DTX.
+  EXPECT_EQ(0, dtx ? WebRtcOpus_EnableDtx(opus_encoder_) :
+      WebRtcOpus_DisableDtx(opus_encoder_));
+
+  int16_t audio_type;
+  int16_t* output_data_decode = new int16_t[samples * channels_];
+
+  for (int i = 0; i < 100; ++i) {
+    EXPECT_EQ(samples,
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+                  output_data_decode, &audio_type)));
+    // If not DTX, it should never enter DTX mode. If DTX, we do not care since
+    // whether it enters DTX depends on the signal type.
+    if (!dtx) {
+      EXPECT_GT(encoded_bytes_, 1U);
+      EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+      EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+      EXPECT_EQ(0, audio_type);  // Speech.
+    }
+  }
+
+  // We input some silent segments. In DTX mode, the encoder will stop sending.
+  // However, DTX may happen after a while.
+  for (int i = 0; i < 30; ++i) {
+    EXPECT_EQ(samples,
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, opus_decoder_, output_data_decode,
+                  &audio_type)));
+    if (!dtx) {
+      EXPECT_GT(encoded_bytes_, 1U);
+      EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+      EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+      EXPECT_EQ(0, audio_type);  // Speech.
+    } else if (encoded_bytes_ == 1) {
+      EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
+      EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
+      EXPECT_EQ(2, audio_type);  // Comfort noise.
+      break;
+    }
+  }
+
+  // When Opus is in DTX, it wakes up in a regular basis. It sends two packets,
+  // one with an arbitrary size and the other of 1-byte, then stops sending for
+  // a certain number of frames.
+
+  // |max_dtx_frames| is the maximum number of frames Opus can stay in DTX.
+  const int max_dtx_frames = 400 / block_length_ms + 1;
+
+  // We run |kRunTimeMs| milliseconds of pure silence.
+  const int kRunTimeMs = 4500;
+
+  // We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in
+  // Opus needs time to adapt), the absolute values of DTX decoded signal are
+  // bounded by |kOutputValueBound|.
+  const int kCheckTimeMs = 4000;
+
+#if defined(OPUS_FIXED_POINT)
+  // Fixed-point Opus generates a random (comfort) noise, which has a less
+  // predictable value bound than its floating-point Opus. This value depends on
+  // input signal, and the time window for checking the output values (between
+  // |kCheckTimeMs| and |kRunTimeMs|).
+  const uint16_t kOutputValueBound = 30;
+
+#else
+  const uint16_t kOutputValueBound = 2;
+#endif
+
+  int time = 0;
+  while (time < kRunTimeMs) {
+    // DTX mode is maintained for maximum |max_dtx_frames| frames.
+    int i = 0;
+    for (; i < max_dtx_frames; ++i) {
+      time += block_length_ms;
+      EXPECT_EQ(samples,
+                static_cast<size_t>(EncodeDecode(
+                    opus_encoder_, silence, opus_decoder_, output_data_decode,
+                    &audio_type)));
+      if (dtx) {
+        if (encoded_bytes_ > 1)
+          break;
+        EXPECT_EQ(0U, encoded_bytes_)  // Send 0 byte.
+            << "Opus should have entered DTX mode.";
+        EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
+        EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
+        EXPECT_EQ(2, audio_type);  // Comfort noise.
+        if (time >= kCheckTimeMs) {
+          CheckAudioBounded(output_data_decode, samples, channels_,
+                            kOutputValueBound);
+        }
+      } else {
+        EXPECT_GT(encoded_bytes_, 1U);
+        EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+        EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+        EXPECT_EQ(0, audio_type);  // Speech.
+      }
+    }
+
+    if (dtx) {
+      // With DTX, Opus must stop transmission for some time.
+      EXPECT_GT(i, 1);
+    }
+
+    // We expect a normal payload.
+    EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+    EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+    EXPECT_EQ(0, audio_type);  // Speech.
+
+    // Enters DTX again immediately.
+    time += block_length_ms;
+    EXPECT_EQ(samples,
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, opus_decoder_, output_data_decode,
+                  &audio_type)));
+    if (dtx) {
+      EXPECT_EQ(1U, encoded_bytes_);  // Send 1 byte.
+      EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
+      EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
+      EXPECT_EQ(2, audio_type);  // Comfort noise.
+      if (time >= kCheckTimeMs) {
+        CheckAudioBounded(output_data_decode, samples, channels_,
+                          kOutputValueBound);
+      }
+    } else {
+      EXPECT_GT(encoded_bytes_, 1U);
+      EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+      EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+      EXPECT_EQ(0, audio_type);  // Speech.
+    }
+  }
+
+  silence[0] = 10000;
+  if (dtx) {
+    // Verify that encoder/decoder can jump out from DTX mode.
+    EXPECT_EQ(samples,
+              static_cast<size_t>(EncodeDecode(
+                  opus_encoder_, silence, opus_decoder_, output_data_decode,
+                  &audio_type)));
+    EXPECT_GT(encoded_bytes_, 1U);
+    EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
+    EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
+    EXPECT_EQ(0, audio_type);  // Speech.
+  }
+
+  // Free memory.
+  delete[] output_data_decode;
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+// Test if CBR does what we expect.
+void OpusTest::TestCbrEffect(bool cbr, int block_length_ms) {
+  PrepareSpeechData(channels_, block_length_ms, 2000);
+  const size_t samples = kOpusRateKhz * block_length_ms;
+
+  int32_t max_pkt_size_diff = 0;
+  int32_t prev_pkt_size = 0;
+
+  // Create encoder memory.
+  EXPECT_EQ(0,
+            WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+
+  // Set bitrate.
+  EXPECT_EQ(
+      0, WebRtcOpus_SetBitRate(opus_encoder_, channels_ == 1 ? 32000 : 64000));
+
+  // Setting CBR.
+  EXPECT_EQ(0, cbr ? WebRtcOpus_EnableCbr(opus_encoder_)
+                   : WebRtcOpus_DisableCbr(opus_encoder_));
+
+  int16_t audio_type;
+  std::vector<int16_t> audio_out(samples * channels_);
+  for (int i = 0; i < 100; ++i) {
+    EXPECT_EQ(samples, static_cast<size_t>(EncodeDecode(
+                           opus_encoder_, speech_data_.GetNextBlock(),
+                           opus_decoder_, audio_out.data(), &audio_type)));
+
+    if (prev_pkt_size > 0) {
+      int32_t diff = std::abs((int32_t)encoded_bytes_ - prev_pkt_size);
+      max_pkt_size_diff = std::max(max_pkt_size_diff, diff);
+    }
+    prev_pkt_size = rtc::checked_cast<int32_t>(encoded_bytes_);
+  }
+
+  if (cbr) {
+    EXPECT_EQ(max_pkt_size_diff, 0);
+  } else {
+    EXPECT_GT(max_pkt_size_diff, 0);
+  }
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+// Test failing Create.
+TEST(OpusTest, OpusCreateFail) {
+  WebRtcOpusEncInst* opus_encoder;
+  WebRtcOpusDecInst* opus_decoder;
+
+  // Test to see that an invalid pointer is caught.
+  EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(NULL, 1, 0));
+  // Invalid channel number.
+  EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_encoder, 3, 0));
+  // Invalid applciation mode.
+  EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_encoder, 1, 2));
+
+  EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(NULL, 1));
+  // Invalid channel number.
+  EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(&opus_decoder, 3));
+}
+
+// Test failing Free.
+TEST(OpusTest, OpusFreeFail) {
+  // Test to see that an invalid pointer is caught.
+  EXPECT_EQ(-1, WebRtcOpus_EncoderFree(NULL));
+  EXPECT_EQ(-1, WebRtcOpus_DecoderFree(NULL));
+}
+
+// Test normal Create and Free.
+TEST_P(OpusTest, OpusCreateFree) {
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+  EXPECT_TRUE(opus_encoder_ != NULL);
+  EXPECT_TRUE(opus_decoder_ != NULL);
+  // Free encoder and decoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusEncodeDecode) {
+  PrepareSpeechData(channels_, 20, 20);
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_,
+                                        channels_));
+
+  // Set bitrate.
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_,
+                                     channels_ == 1 ? 32000 : 64000));
+
+  // Check number of channels for decoder.
+  EXPECT_EQ(channels_, WebRtcOpus_DecoderChannels(opus_decoder_));
+
+  // Check application mode.
+  opus_int32 app;
+  opus_encoder_ctl(opus_encoder_->encoder,
+                   OPUS_GET_APPLICATION(&app));
+  EXPECT_EQ(application_ == 0 ? OPUS_APPLICATION_VOIP : OPUS_APPLICATION_AUDIO,
+            app);
+
+  // Encode & decode.
+  int16_t audio_type;
+  int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
+  EXPECT_EQ(kOpus20msFrameSamples,
+            static_cast<size_t>(
+                EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+                             opus_decoder_, output_data_decode, &audio_type)));
+
+  // Free memory.
+  delete[] output_data_decode;
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusSetBitRate) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_SetBitRate(opus_encoder_, 60000));
+
+  // Create encoder memory, try with different bitrates.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 30000));
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 60000));
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 300000));
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, 600000));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusSetComplexity) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_encoder_, 9));
+
+  // Create encoder memory, try with different complexities.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+
+  EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, 0));
+  EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, 10));
+  EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_encoder_, 11));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusSetBandwidth) {
+  PrepareSpeechData(channels_, 20, 20);
+
+  int16_t audio_type;
+  std::unique_ptr<int16_t[]> output_data_decode(
+      new int16_t[kOpus20msFrameSamples * channels_]());
+
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1,
+            WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND));
+  EXPECT_EQ(-1, WebRtcOpus_GetBandwidth(opus_encoder_));
+
+  // Create encoder memory, try with different bandwidths.
+  EXPECT_EQ(0,
+            WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+
+  EXPECT_EQ(-1, WebRtcOpus_SetBandwidth(opus_encoder_,
+                                        OPUS_BANDWIDTH_NARROWBAND - 1));
+  EXPECT_EQ(0,
+            WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND));
+  EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+               output_data_decode.get(), &audio_type);
+  EXPECT_EQ(OPUS_BANDWIDTH_NARROWBAND, WebRtcOpus_GetBandwidth(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_FULLBAND));
+  EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+               output_data_decode.get(), &audio_type);
+  EXPECT_EQ(OPUS_BANDWIDTH_FULLBAND, WebRtcOpus_GetBandwidth(opus_encoder_));
+  EXPECT_EQ(
+      -1, WebRtcOpus_SetBandwidth(opus_encoder_, OPUS_BANDWIDTH_FULLBAND + 1));
+  EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+               output_data_decode.get(), &audio_type);
+  EXPECT_EQ(OPUS_BANDWIDTH_FULLBAND, WebRtcOpus_GetBandwidth(opus_encoder_));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusForceChannels) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_SetForceChannels(opus_encoder_, 1));
+
+  ASSERT_EQ(0,
+            WebRtcOpus_EncoderCreate(&opus_encoder_, channels_, application_));
+
+  if (channels_ == 2) {
+    EXPECT_EQ(-1, WebRtcOpus_SetForceChannels(opus_encoder_, 3));
+    EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 2));
+    EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 1));
+    EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 0));
+  } else {
+    EXPECT_EQ(-1, WebRtcOpus_SetForceChannels(opus_encoder_, 2));
+    EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 1));
+    EXPECT_EQ(0, WebRtcOpus_SetForceChannels(opus_encoder_, 0));
+  }
+
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+// Encode and decode one frame, initialize the decoder and
+// decode once more.
+TEST_P(OpusTest, OpusDecodeInit) {
+  PrepareSpeechData(channels_, 20, 20);
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+
+  // Encode & decode.
+  int16_t audio_type;
+  int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
+  EXPECT_EQ(kOpus20msFrameSamples,
+            static_cast<size_t>(
+                EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+                             opus_decoder_, output_data_decode, &audio_type)));
+
+  WebRtcOpus_DecoderInit(opus_decoder_);
+
+  EXPECT_EQ(kOpus20msFrameSamples,
+            static_cast<size_t>(WebRtcOpus_Decode(
+                opus_decoder_, bitstream_, encoded_bytes_, output_data_decode,
+                &audio_type)));
+
+  // Free memory.
+  delete[] output_data_decode;
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusEnableDisableFec) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_EnableFec(opus_encoder_));
+  EXPECT_EQ(-1, WebRtcOpus_DisableFec(opus_encoder_));
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+
+  EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_encoder_));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusEnableDisableDtx) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_EnableDtx(opus_encoder_));
+  EXPECT_EQ(-1, WebRtcOpus_DisableDtx(opus_encoder_));
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+
+  opus_int32 dtx;
+
+  // DTX is off by default.
+  opus_encoder_ctl(opus_encoder_->encoder,
+                   OPUS_GET_DTX(&dtx));
+  EXPECT_EQ(0, dtx);
+
+  // Test to enable DTX.
+  EXPECT_EQ(0, WebRtcOpus_EnableDtx(opus_encoder_));
+  opus_encoder_ctl(opus_encoder_->encoder,
+                   OPUS_GET_DTX(&dtx));
+  EXPECT_EQ(1, dtx);
+
+  // Test to disable DTX.
+  EXPECT_EQ(0, WebRtcOpus_DisableDtx(opus_encoder_));
+  opus_encoder_ctl(opus_encoder_->encoder,
+                   OPUS_GET_DTX(&dtx));
+  EXPECT_EQ(0, dtx);
+
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusDtxOff) {
+  TestDtxEffect(false, 10);
+  TestDtxEffect(false, 20);
+  TestDtxEffect(false, 40);
+}
+
+TEST_P(OpusTest, OpusDtxOn) {
+  TestDtxEffect(true, 10);
+  TestDtxEffect(true, 20);
+  TestDtxEffect(true, 40);
+}
+
+TEST_P(OpusTest, OpusCbrOff) {
+  TestCbrEffect(false, 10);
+  TestCbrEffect(false, 20);
+  TestCbrEffect(false, 40);
+}
+
+TEST_P(OpusTest, OpusCbrOn) {
+  TestCbrEffect(true, 10);
+  TestCbrEffect(true, 20);
+  TestCbrEffect(true, 40);
+}
+
+TEST_P(OpusTest, OpusSetPacketLossRate) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_encoder_, 50));
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+
+  EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_, 50));
+  EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_encoder_, -1));
+  EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_encoder_, 101));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+TEST_P(OpusTest, OpusSetMaxPlaybackRate) {
+  // Test without creating encoder memory.
+  EXPECT_EQ(-1, WebRtcOpus_SetMaxPlaybackRate(opus_encoder_, 20000));
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_FULLBAND, 48000);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_FULLBAND, 24001);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_SUPERWIDEBAND, 24000);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_SUPERWIDEBAND, 16001);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_WIDEBAND, 16000);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_WIDEBAND, 12001);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_MEDIUMBAND, 12000);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_MEDIUMBAND, 8001);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND, 8000);
+  SetMaxPlaybackRate(opus_encoder_, OPUS_BANDWIDTH_NARROWBAND, 4000);
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+}
+
+// Test PLC.
+TEST_P(OpusTest, OpusDecodePlc) {
+  PrepareSpeechData(channels_, 20, 20);
+
+  // Create encoder memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+
+  // Set bitrate.
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_,
+                                     channels_== 1 ? 32000 : 64000));
+
+  // Check number of channels for decoder.
+  EXPECT_EQ(channels_, WebRtcOpus_DecoderChannels(opus_decoder_));
+
+  // Encode & decode.
+  int16_t audio_type;
+  int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
+  EXPECT_EQ(kOpus20msFrameSamples,
+            static_cast<size_t>(
+                EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+                             opus_decoder_, output_data_decode, &audio_type)));
+
+  // Call decoder PLC.
+  int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
+  EXPECT_EQ(kOpus20msFrameSamples,
+            static_cast<size_t>(WebRtcOpus_DecodePlc(
+                opus_decoder_, plc_buffer, 1)));
+
+  // Free memory.
+  delete[] plc_buffer;
+  delete[] output_data_decode;
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+// Duration estimation.
+TEST_P(OpusTest, OpusDurationEstimation) {
+  PrepareSpeechData(channels_, 20, 20);
+
+  // Create.
+  EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
+
+  // 10 ms. We use only first 10 ms of a 20 ms block.
+  auto speech_block = speech_data_.GetNextBlock();
+  int encoded_bytes_int = WebRtcOpus_Encode(
+      opus_encoder_, speech_block.data(),
+      rtc::CheckedDivExact(speech_block.size(), 2 * channels_),
+      kMaxBytes, bitstream_);
+  EXPECT_GE(encoded_bytes_int, 0);
+  EXPECT_EQ(kOpus10msFrameSamples,
+            static_cast<size_t>(WebRtcOpus_DurationEst(
+                opus_decoder_, bitstream_,
+                static_cast<size_t>(encoded_bytes_int))));
+
+  // 20 ms
+  speech_block = speech_data_.GetNextBlock();
+  encoded_bytes_int = WebRtcOpus_Encode(
+      opus_encoder_, speech_block.data(),
+      rtc::CheckedDivExact(speech_block.size(), channels_),
+      kMaxBytes, bitstream_);
+  EXPECT_GE(encoded_bytes_int, 0);
+  EXPECT_EQ(kOpus20msFrameSamples,
+            static_cast<size_t>(WebRtcOpus_DurationEst(
+                opus_decoder_, bitstream_,
+                static_cast<size_t>(encoded_bytes_int))));
+
+  // Free memory.
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+TEST_P(OpusTest, OpusDecodeRepacketized) {
+  constexpr size_t kPackets = 6;
+
+  PrepareSpeechData(channels_, 20, 20 * kPackets);
+
+  // Create encoder memory.
+  ASSERT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
+                                        channels_,
+                                        application_));
+  ASSERT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_,
+                                        channels_));
+
+  // Set bitrate.
+  EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_,
+                                     channels_ == 1 ? 32000 : 64000));
+
+  // Check number of channels for decoder.
+  EXPECT_EQ(channels_, WebRtcOpus_DecoderChannels(opus_decoder_));
+
+  // Encode & decode.
+  int16_t audio_type;
+  std::unique_ptr<int16_t[]> output_data_decode(
+      new int16_t[kPackets * kOpus20msFrameSamples * channels_]);
+  OpusRepacketizer* rp = opus_repacketizer_create();
+
+  size_t num_packets = 0;
+  constexpr size_t kMaxCycles = 100;
+  for (size_t idx = 0; idx < kMaxCycles; ++idx) {
+    auto speech_block = speech_data_.GetNextBlock();
+    encoded_bytes_ =
+        WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
+                          rtc::CheckedDivExact(speech_block.size(), channels_),
+                          kMaxBytes, bitstream_);
+    if (opus_repacketizer_cat(
+            rp, bitstream_,
+            rtc::checked_cast<opus_int32>(encoded_bytes_)) == OPUS_OK) {
+      ++num_packets;
+      if (num_packets == kPackets) {
+        break;
+      }
+    } else {
+      // Opus repacketizer cannot guarantee a success. We try again if it fails.
+      opus_repacketizer_init(rp);
+      num_packets = 0;
+    }
+  }
+  EXPECT_EQ(kPackets, num_packets);
+
+  encoded_bytes_ = opus_repacketizer_out(rp, bitstream_, kMaxBytes);
+
+  EXPECT_EQ(kOpus20msFrameSamples * kPackets,
+            static_cast<size_t>(WebRtcOpus_DurationEst(
+                opus_decoder_, bitstream_, encoded_bytes_)));
+
+  EXPECT_EQ(kOpus20msFrameSamples * kPackets,
+            static_cast<size_t>(WebRtcOpus_Decode(
+                opus_decoder_, bitstream_, encoded_bytes_,
+                output_data_decode.get(), &audio_type)));
+
+  // Free memory.
+  opus_repacketizer_destroy(rp);
+  EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
+  EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
+}
+
+INSTANTIATE_TEST_CASE_P(VariousMode,
+                        OpusTest,
+                        Combine(Values(1, 2), Values(0, 1)));
+
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
new file mode 100644
index 0000000..b07624d
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
+
+#include "modules/audio_coding/codecs/legacy_encoded_audio_frame.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioDecoderPcm16B::AudioDecoderPcm16B(int sample_rate_hz, size_t num_channels)
+    : sample_rate_hz_(sample_rate_hz), num_channels_(num_channels) {
+  RTC_DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+             sample_rate_hz == 32000 || sample_rate_hz == 48000)
+      << "Unsupported sample rate " << sample_rate_hz;
+  RTC_DCHECK_GE(num_channels, 1);
+}
+
+void AudioDecoderPcm16B::Reset() {}
+
+int AudioDecoderPcm16B::SampleRateHz() const {
+  return sample_rate_hz_;
+}
+
+size_t AudioDecoderPcm16B::Channels() const {
+  return num_channels_;
+}
+
+int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded,
+                                       size_t encoded_len,
+                                       int sample_rate_hz,
+                                       int16_t* decoded,
+                                       SpeechType* speech_type) {
+  RTC_DCHECK_EQ(sample_rate_hz_, sample_rate_hz);
+  size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
+  *speech_type = ConvertSpeechType(1);
+  return static_cast<int>(ret);
+}
+
+std::vector<AudioDecoder::ParseResult> AudioDecoderPcm16B::ParsePayload(
+    rtc::Buffer&& payload,
+    uint32_t timestamp) {
+  const int samples_per_ms = rtc::CheckedDivExact(sample_rate_hz_, 1000);
+  return LegacyEncodedAudioFrame::SplitBySamples(
+      this, std::move(payload), timestamp, samples_per_ms * 2 * num_channels_,
+      samples_per_ms);
+}
+
+int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
+                                       size_t encoded_len) const {
+  // Two encoded byte per sample per channel.
+  return static_cast<int>(encoded_len / (2 * Channels()));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
new file mode 100644
index 0000000..7d23422
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class AudioDecoderPcm16B final : public AudioDecoder {
+ public:
+  AudioDecoderPcm16B(int sample_rate_hz, size_t num_channels);
+  void Reset() override;
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) override;
+  int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+  int SampleRateHz() const override;
+  size_t Channels() const override;
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+ private:
+  const int sample_rate_hz_;
+  const size_t num_channels_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcm16B);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
diff --git a/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
new file mode 100644
index 0000000..831daed
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+
+#include <algorithm>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
+                                      size_t input_len,
+                                      uint8_t* encoded) {
+  return WebRtcPcm16b_Encode(audio, input_len, encoded);
+}
+
+size_t AudioEncoderPcm16B::BytesPerSample() const {
+  return 2;
+}
+
+AudioEncoder::CodecType AudioEncoderPcm16B::GetCodecType() const {
+  return CodecType::kOther;
+}
+
+namespace {
+
+AudioEncoderPcm16B::Config CreateConfig(const CodecInst& codec_inst) {
+  AudioEncoderPcm16B::Config config;
+  config.num_channels = codec_inst.channels;
+  config.sample_rate_hz = codec_inst.plfreq;
+  config.frame_size_ms = rtc::CheckedDivExact(
+      codec_inst.pacsize, rtc::CheckedDivExact(config.sample_rate_hz, 1000));
+  config.payload_type = codec_inst.pltype;
+  return config;
+}
+
+}  // namespace
+
+bool AudioEncoderPcm16B::Config::IsOk() const {
+  if ((sample_rate_hz != 8000) && (sample_rate_hz != 16000) &&
+      (sample_rate_hz != 32000) && (sample_rate_hz != 48000))
+    return false;
+  return AudioEncoderPcm::Config::IsOk();
+}
+
+AudioEncoderPcm16B::AudioEncoderPcm16B(const CodecInst& codec_inst)
+    : AudioEncoderPcm16B(CreateConfig(codec_inst)) {}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h b/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
new file mode 100644
index 0000000..d6fd6e1
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
+
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+struct CodecInst;
+
+class AudioEncoderPcm16B final : public AudioEncoderPcm {
+ public:
+  struct Config : public AudioEncoderPcm::Config {
+   public:
+    Config() : AudioEncoderPcm::Config(107), sample_rate_hz(8000) {}
+    bool IsOk() const;
+
+    int sample_rate_hz;
+  };
+
+  explicit AudioEncoderPcm16B(const Config& config)
+      : AudioEncoderPcm(config, config.sample_rate_hz) {}
+  explicit AudioEncoderPcm16B(const CodecInst& codec_inst);
+
+ protected:
+  size_t EncodeCall(const int16_t* audio,
+                    size_t input_len,
+                    uint8_t* encoded) override;
+
+  size_t BytesPerSample() const override;
+
+  AudioEncoder::CodecType GetCodecType() const override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcm16B);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
diff --git a/modules/audio_coding/codecs/pcm16b/pcm16b.c b/modules/audio_coding/codecs/pcm16b/pcm16b.c
new file mode 100644
index 0000000..abcff4f
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+                           size_t len,
+                           uint8_t* encoded) {
+  size_t i;
+  for (i = 0; i < len; ++i) {
+    uint16_t s = speech[i];
+    encoded[2 * i] = s >> 8;
+    encoded[2 * i + 1] = s;
+  }
+  return 2 * len;
+}
+
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+                           size_t len,
+                           int16_t* speech) {
+  size_t i;
+  for (i = 0; i < len / 2; ++i)
+    speech[i] = encoded[2 * i] << 8 | encoded[2 * i + 1];
+  return len / 2;
+}
diff --git a/modules/audio_coding/codecs/pcm16b/pcm16b.h b/modules/audio_coding/codecs/pcm16b/pcm16b.h
new file mode 100644
index 0000000..041701a
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/pcm16b.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_
+/*
+ * Define the fixpoint numeric formats
+ */
+
+#include <stddef.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcPcm16b_Encode(...)
+ *
+ * "Encode" a sample vector to 16 bit linear (Encoded standard is big endian)
+ *
+ * Input:
+ *              - speech        : Input speech vector
+ *              - len           : Number of samples in speech vector
+ *
+ * Output:
+ *              - encoded       : Encoded data vector (big endian 16 bit)
+ *
+ * Returned value               : Length (in bytes) of coded data.
+ *                                Always equal to twice the len input parameter.
+ */
+
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+                           size_t len,
+                           uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcPcm16b_Decode(...)
+ *
+ * "Decode" a vector to 16 bit linear (Encoded standard is big endian)
+ *
+ * Input:
+ *              - encoded       : Encoded data vector (big endian 16 bit)
+ *              - len           : Number of bytes in encoded
+ *
+ * Output:
+ *              - speech        : Decoded speech vector
+ *
+ * Returned value               : Samples in speech
+ */
+
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+                           size_t len,
+                           int16_t* speech);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_ */
diff --git a/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc b/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc
new file mode 100644
index 0000000..6d0fc2d
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/pcm16b_common.cc
@@ -0,0 +1,25 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
+
+namespace webrtc {
+
+void Pcm16BAppendSupportedCodecSpecs(std::vector<AudioCodecSpec>* specs) {
+  for (uint8_t num_channels : {1, 2}) {
+    for (int sample_rate_hz : {8000, 16000, 32000}) {
+      specs->push_back(
+          {{"L16", sample_rate_hz, num_channels},
+           {sample_rate_hz, num_channels, sample_rate_hz * num_channels * 16}});
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/pcm16b/pcm16b_common.h b/modules/audio_coding/codecs/pcm16b/pcm16b_common.h
new file mode 100644
index 0000000..980a996
--- /dev/null
+++ b/modules/audio_coding/codecs/pcm16b/pcm16b_common.h
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_COMMON_H_
+#define MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_COMMON_H_
+
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+
+namespace webrtc {
+void Pcm16BAppendSupportedCodecSpecs(std::vector<AudioCodecSpec>* specs);
+}
+
+#endif  // MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_COMMON_H_
diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
new file mode 100644
index 0000000..4b9df6e
--- /dev/null
+++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -0,0 +1,142 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+
+#include <string.h>
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+AudioEncoderCopyRed::Config::Config() = default;
+AudioEncoderCopyRed::Config::Config(Config&&) = default;
+AudioEncoderCopyRed::Config::~Config() = default;
+
+AudioEncoderCopyRed::AudioEncoderCopyRed(Config&& config)
+    : speech_encoder_(std::move(config.speech_encoder)),
+      red_payload_type_(config.payload_type) {
+  RTC_CHECK(speech_encoder_) << "Speech encoder not provided.";
+}
+
+AudioEncoderCopyRed::~AudioEncoderCopyRed() = default;
+
+int AudioEncoderCopyRed::SampleRateHz() const {
+  return speech_encoder_->SampleRateHz();
+}
+
+size_t AudioEncoderCopyRed::NumChannels() const {
+  return speech_encoder_->NumChannels();
+}
+
+int AudioEncoderCopyRed::RtpTimestampRateHz() const {
+  return speech_encoder_->RtpTimestampRateHz();
+}
+
+size_t AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
+  return speech_encoder_->Num10MsFramesInNextPacket();
+}
+
+size_t AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
+  return speech_encoder_->Max10MsFramesInAPacket();
+}
+
+int AudioEncoderCopyRed::GetTargetBitrate() const {
+  return speech_encoder_->GetTargetBitrate();
+}
+
+AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeImpl(
+    uint32_t rtp_timestamp,
+    rtc::ArrayView<const int16_t> audio,
+    rtc::Buffer* encoded) {
+
+  const size_t primary_offset = encoded->size();
+  EncodedInfo info =
+      speech_encoder_->Encode(rtp_timestamp, audio, encoded);
+
+  RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
+  RTC_DCHECK_EQ(encoded->size() - primary_offset, info.encoded_bytes);
+
+  if (info.encoded_bytes > 0) {
+    // |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
+    // discarding the (empty) vector of redundant information. This is
+    // intentional.
+    info.redundant.push_back(info);
+    RTC_DCHECK_EQ(info.redundant.size(), 1);
+    if (secondary_info_.encoded_bytes > 0) {
+      encoded->AppendData(secondary_encoded_);
+      info.redundant.push_back(secondary_info_);
+      RTC_DCHECK_EQ(info.redundant.size(), 2);
+    }
+    // Save primary to secondary.
+    secondary_encoded_.SetData(encoded->data() + primary_offset,
+                               info.encoded_bytes);
+    secondary_info_ = info;
+    RTC_DCHECK_EQ(info.speech, info.redundant[0].speech);
+  }
+  // Update main EncodedInfo.
+  info.payload_type = red_payload_type_;
+  info.encoded_bytes = 0;
+  for (std::vector<EncodedInfoLeaf>::const_iterator it = info.redundant.begin();
+       it != info.redundant.end(); ++it) {
+    info.encoded_bytes += it->encoded_bytes;
+  }
+  return info;
+}
+
+void AudioEncoderCopyRed::Reset() {
+  speech_encoder_->Reset();
+  secondary_encoded_.Clear();
+  secondary_info_.encoded_bytes = 0;
+}
+
+bool AudioEncoderCopyRed::SetFec(bool enable) {
+  return speech_encoder_->SetFec(enable);
+}
+
+bool AudioEncoderCopyRed::SetDtx(bool enable) {
+  return speech_encoder_->SetDtx(enable);
+}
+
+bool AudioEncoderCopyRed::SetApplication(Application application) {
+  return speech_encoder_->SetApplication(application);
+}
+
+void AudioEncoderCopyRed::SetMaxPlaybackRate(int frequency_hz) {
+  speech_encoder_->SetMaxPlaybackRate(frequency_hz);
+}
+
+rtc::ArrayView<std::unique_ptr<AudioEncoder>>
+AudioEncoderCopyRed::ReclaimContainedEncoders() {
+  return rtc::ArrayView<std::unique_ptr<AudioEncoder>>(&speech_encoder_, 1);
+}
+
+void AudioEncoderCopyRed::OnReceivedUplinkPacketLossFraction(
+    float uplink_packet_loss_fraction) {
+  speech_encoder_->OnReceivedUplinkPacketLossFraction(
+      uplink_packet_loss_fraction);
+}
+
+void AudioEncoderCopyRed::OnReceivedUplinkRecoverablePacketLossFraction(
+    float uplink_recoverable_packet_loss_fraction) {
+  speech_encoder_->OnReceivedUplinkRecoverablePacketLossFraction(
+      uplink_recoverable_packet_loss_fraction);
+}
+
+void AudioEncoderCopyRed::OnReceivedUplinkBandwidth(
+    int target_audio_bitrate_bps,
+    rtc::Optional<int64_t> bwe_period_ms) {
+  speech_encoder_->OnReceivedUplinkBandwidth(target_audio_bitrate_bps,
+                                             bwe_period_ms);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
new file mode 100644
index 0000000..e625c50
--- /dev/null
+++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -0,0 +1,77 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
+#define MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// This class implements redundant audio coding. The class object will have an
+// underlying AudioEncoder object that performs the actual encodings. The
+// current class will gather the two latest encodings from the underlying codec
+// into one packet.
+class AudioEncoderCopyRed final : public AudioEncoder {
+ public:
+  struct Config {
+    Config();
+    Config(Config&&);
+    ~Config();
+    int payload_type;
+    std::unique_ptr<AudioEncoder> speech_encoder;
+  };
+
+  explicit AudioEncoderCopyRed(Config&& config);
+
+  ~AudioEncoderCopyRed() override;
+
+  int SampleRateHz() const override;
+  size_t NumChannels() const override;
+  int RtpTimestampRateHz() const override;
+  size_t Num10MsFramesInNextPacket() const override;
+  size_t Max10MsFramesInAPacket() const override;
+  int GetTargetBitrate() const override;
+  void Reset() override;
+  bool SetFec(bool enable) override;
+  bool SetDtx(bool enable) override;
+  bool SetApplication(Application application) override;
+  void SetMaxPlaybackRate(int frequency_hz) override;
+  rtc::ArrayView<std::unique_ptr<AudioEncoder>> ReclaimContainedEncoders()
+      override;
+  void OnReceivedUplinkPacketLossFraction(
+      float uplink_packet_loss_fraction) override;
+  void OnReceivedUplinkRecoverablePacketLossFraction(
+      float uplink_recoverable_packet_loss_fraction) override;
+  void OnReceivedUplinkBandwidth(
+      int target_audio_bitrate_bps,
+      rtc::Optional<int64_t> bwe_period_ms) override;
+
+ protected:
+  EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
+                         rtc::ArrayView<const int16_t> audio,
+                         rtc::Buffer* encoded) override;
+
+ private:
+  std::unique_ptr<AudioEncoder> speech_encoder_;
+  int red_payload_type_;
+  rtc::Buffer secondary_encoded_;
+  EncodedInfoLeaf secondary_info_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCopyRed);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
diff --git a/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
new file mode 100644
index 0000000..64bafd2
--- /dev/null
+++ b/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -0,0 +1,307 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/mock_audio_encoder.h"
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::SetArgPointee;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::MockFunction;
+
+namespace webrtc {
+
+namespace {
+static const size_t kMaxNumSamples = 48 * 10 * 2;  // 10 ms @ 48 kHz stereo.
+}
+
+class AudioEncoderCopyRedTest : public ::testing::Test {
+ protected:
+  AudioEncoderCopyRedTest()
+      : mock_encoder_(new MockAudioEncoder),
+        timestamp_(4711),
+        sample_rate_hz_(16000),
+        num_audio_samples_10ms(sample_rate_hz_ / 100),
+        red_payload_type_(200) {
+    AudioEncoderCopyRed::Config config;
+    config.payload_type = red_payload_type_;
+    config.speech_encoder = std::unique_ptr<AudioEncoder>(mock_encoder_);
+    red_.reset(new AudioEncoderCopyRed(std::move(config)));
+    memset(audio_, 0, sizeof(audio_));
+    EXPECT_CALL(*mock_encoder_, NumChannels()).WillRepeatedly(Return(1U));
+    EXPECT_CALL(*mock_encoder_, SampleRateHz())
+        .WillRepeatedly(Return(sample_rate_hz_));
+  }
+
+  void TearDown() override {
+    red_.reset();
+  }
+
+  void Encode() {
+    ASSERT_TRUE(red_.get() != NULL);
+    encoded_.Clear();
+    encoded_info_ = red_->Encode(
+        timestamp_,
+        rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
+        &encoded_);
+    timestamp_ += rtc::checked_cast<uint32_t>(num_audio_samples_10ms);
+  }
+
+  MockAudioEncoder* mock_encoder_;
+  std::unique_ptr<AudioEncoderCopyRed> red_;
+  uint32_t timestamp_;
+  int16_t audio_[kMaxNumSamples];
+  const int sample_rate_hz_;
+  size_t num_audio_samples_10ms;
+  rtc::Buffer encoded_;
+  AudioEncoder::EncodedInfo encoded_info_;
+  const int red_payload_type_;
+};
+
+TEST_F(AudioEncoderCopyRedTest, CreateAndDestroy) {
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckSampleRatePropagation) {
+  EXPECT_CALL(*mock_encoder_, SampleRateHz()).WillOnce(Return(17));
+  EXPECT_EQ(17, red_->SampleRateHz());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckNumChannelsPropagation) {
+  EXPECT_CALL(*mock_encoder_, NumChannels()).WillOnce(Return(17U));
+  EXPECT_EQ(17U, red_->NumChannels());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
+  EXPECT_CALL(*mock_encoder_, Num10MsFramesInNextPacket())
+      .WillOnce(Return(17U));
+  EXPECT_EQ(17U, red_->Num10MsFramesInNextPacket());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckMaxFrameSizePropagation) {
+  EXPECT_CALL(*mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17U));
+  EXPECT_EQ(17U, red_->Max10MsFramesInAPacket());
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckTargetAudioBitratePropagation) {
+  EXPECT_CALL(*mock_encoder_,
+              OnReceivedUplinkBandwidth(4711, rtc::Optional<int64_t>()));
+  red_->OnReceivedUplinkBandwidth(4711, rtc::nullopt);
+}
+
+TEST_F(AudioEncoderCopyRedTest, CheckPacketLossFractionPropagation) {
+  EXPECT_CALL(*mock_encoder_, OnReceivedUplinkPacketLossFraction(0.5));
+  red_->OnReceivedUplinkPacketLossFraction(0.5);
+}
+
+// Checks that the an Encode() call is immediately propagated to the speech
+// encoder.
+TEST_F(AudioEncoderCopyRedTest, CheckImmediateEncode) {
+  // Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
+  // check ensures that exactly one call to EncodeImpl happens in each
+  // Encode call.
+  InSequence s;
+  MockFunction<void(int check_point_id)> check;
+  for (int i = 1; i <= 6; ++i) {
+    EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+        .WillRepeatedly(Return(AudioEncoder::EncodedInfo()));
+    EXPECT_CALL(check, Call(i));
+    Encode();
+    check.Call(i);
+  }
+}
+
+// Checks that no output is produced if the underlying codec doesn't emit any
+// new data, even if the RED codec is loaded with a secondary encoding.
+TEST_F(AudioEncoderCopyRedTest, CheckNoOutput) {
+  static const size_t kEncodedSize = 17;
+  {
+    InSequence s;
+    EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+        .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(kEncodedSize)))
+        .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(0)))
+        .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(kEncodedSize)));
+  }
+
+  // Start with one Encode() call that will produce output.
+  Encode();
+  // First call is a special case, since it does not include a secondary
+  // payload.
+  EXPECT_EQ(1u, encoded_info_.redundant.size());
+  EXPECT_EQ(kEncodedSize, encoded_info_.encoded_bytes);
+
+  // Next call to the speech encoder will not produce any output.
+  Encode();
+  EXPECT_EQ(0u, encoded_info_.encoded_bytes);
+
+  // Final call to the speech encoder will produce output.
+  Encode();
+  EXPECT_EQ(2 * kEncodedSize, encoded_info_.encoded_bytes);
+  ASSERT_EQ(2u, encoded_info_.redundant.size());
+}
+
+// Checks that the correct payload sizes are populated into the redundancy
+// information.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadSizes) {
+  // Let the mock encoder return payload sizes 1, 2, 3, ..., 10 for the sequence
+  // of calls.
+  static const int kNumPackets = 10;
+  InSequence s;
+  for (int encode_size = 1; encode_size <= kNumPackets; ++encode_size) {
+    EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+        .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(encode_size)));
+  }
+
+  // First call is a special case, since it does not include a secondary
+  // payload.
+  Encode();
+  EXPECT_EQ(1u, encoded_info_.redundant.size());
+  EXPECT_EQ(1u, encoded_info_.encoded_bytes);
+
+  for (size_t i = 2; i <= kNumPackets; ++i) {
+    Encode();
+    ASSERT_EQ(2u, encoded_info_.redundant.size());
+    EXPECT_EQ(i, encoded_info_.redundant[0].encoded_bytes);
+    EXPECT_EQ(i - 1, encoded_info_.redundant[1].encoded_bytes);
+    EXPECT_EQ(i + i - 1, encoded_info_.encoded_bytes);
+  }
+}
+
+// Checks that the correct timestamps are returned.
+TEST_F(AudioEncoderCopyRedTest, CheckTimestamps) {
+  uint32_t primary_timestamp = timestamp_;
+  AudioEncoder::EncodedInfo info;
+  info.encoded_bytes = 17;
+  info.encoded_timestamp = timestamp_;
+
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+      .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+  // First call is a special case, since it does not include a secondary
+  // payload.
+  Encode();
+  EXPECT_EQ(primary_timestamp, encoded_info_.encoded_timestamp);
+
+  uint32_t secondary_timestamp = primary_timestamp;
+  primary_timestamp = timestamp_;
+  info.encoded_timestamp = timestamp_;
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+      .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+  Encode();
+  ASSERT_EQ(2u, encoded_info_.redundant.size());
+  EXPECT_EQ(primary_timestamp, encoded_info_.redundant[0].encoded_timestamp);
+  EXPECT_EQ(secondary_timestamp, encoded_info_.redundant[1].encoded_timestamp);
+  EXPECT_EQ(primary_timestamp, encoded_info_.encoded_timestamp);
+}
+
+// Checks that the primary and secondary payloads are written correctly.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloads) {
+  // Let the mock encoder write payloads with increasing values. The first
+  // payload will have values 0, 1, 2, ..., kPayloadLenBytes - 1.
+  static const size_t kPayloadLenBytes = 5;
+  uint8_t payload[kPayloadLenBytes];
+  for (uint8_t i = 0; i < kPayloadLenBytes; ++i) {
+    payload[i] = i;
+  }
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+      .WillRepeatedly(Invoke(MockAudioEncoder::CopyEncoding(payload)));
+
+  // First call is a special case, since it does not include a secondary
+  // payload.
+  Encode();
+  EXPECT_EQ(kPayloadLenBytes, encoded_info_.encoded_bytes);
+  for (size_t i = 0; i < kPayloadLenBytes; ++i) {
+    EXPECT_EQ(i, encoded_.data()[i]);
+  }
+
+  for (int j = 0; j < 5; ++j) {
+    // Increment all values of the payload by 10.
+    for (size_t i = 0; i < kPayloadLenBytes; ++i)
+      payload[i] += 10;
+
+    Encode();
+    ASSERT_EQ(2u, encoded_info_.redundant.size());
+    EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[0].encoded_bytes);
+    EXPECT_EQ(kPayloadLenBytes, encoded_info_.redundant[1].encoded_bytes);
+    for (size_t i = 0; i < kPayloadLenBytes; ++i) {
+      // Check primary payload.
+      EXPECT_EQ((j + 1) * 10 + i, encoded_.data()[i]);
+      // Check secondary payload.
+      EXPECT_EQ(j * 10 + i, encoded_.data()[i + kPayloadLenBytes]);
+    }
+  }
+}
+
+// Checks correct propagation of payload type.
+// Checks that the correct timestamps are returned.
+TEST_F(AudioEncoderCopyRedTest, CheckPayloadType) {
+  const int primary_payload_type = red_payload_type_ + 1;
+  AudioEncoder::EncodedInfo info;
+  info.encoded_bytes = 17;
+  info.payload_type = primary_payload_type;
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+      .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+  // First call is a special case, since it does not include a secondary
+  // payload.
+  Encode();
+  ASSERT_EQ(1u, encoded_info_.redundant.size());
+  EXPECT_EQ(primary_payload_type, encoded_info_.redundant[0].payload_type);
+  EXPECT_EQ(red_payload_type_, encoded_info_.payload_type);
+
+  const int secondary_payload_type = red_payload_type_ + 2;
+  info.payload_type = secondary_payload_type;
+  EXPECT_CALL(*mock_encoder_, EncodeImpl(_, _, _))
+      .WillOnce(Invoke(MockAudioEncoder::FakeEncoding(info)));
+
+  Encode();
+  ASSERT_EQ(2u, encoded_info_.redundant.size());
+  EXPECT_EQ(secondary_payload_type, encoded_info_.redundant[0].payload_type);
+  EXPECT_EQ(primary_payload_type, encoded_info_.redundant[1].payload_type);
+  EXPECT_EQ(red_payload_type_, encoded_info_.payload_type);
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// This test fixture tests various error conditions that makes the
+// AudioEncoderCng die via CHECKs.
+class AudioEncoderCopyRedDeathTest : public AudioEncoderCopyRedTest {
+ protected:
+  AudioEncoderCopyRedDeathTest() : AudioEncoderCopyRedTest() {}
+};
+
+TEST_F(AudioEncoderCopyRedDeathTest, WrongFrameSize) {
+  num_audio_samples_10ms *= 2;  // 20 ms frame.
+  EXPECT_DEATH(Encode(), "");
+  num_audio_samples_10ms = 0;  // Zero samples.
+  EXPECT_DEATH(Encode(), "");
+}
+
+TEST_F(AudioEncoderCopyRedDeathTest, NullSpeechEncoder) {
+  AudioEncoderCopyRed* red = NULL;
+  AudioEncoderCopyRed::Config config;
+  config.speech_encoder = NULL;
+  EXPECT_DEATH(red = new AudioEncoderCopyRed(std::move(config)),
+               "Speech encoder not provided.");
+  // The delete operation is needed to avoid leak reports from memcheck.
+  delete red;
+}
+
+#endif  // GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
new file mode 100644
index 0000000..d3749c1
--- /dev/null
+++ b/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
+
+#include "rtc_base/format_macros.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+using ::std::get;
+
+namespace webrtc {
+
+AudioCodecSpeedTest::AudioCodecSpeedTest(int block_duration_ms,
+                                         int input_sampling_khz,
+                                         int output_sampling_khz)
+    : block_duration_ms_(block_duration_ms),
+      input_sampling_khz_(input_sampling_khz),
+      output_sampling_khz_(output_sampling_khz),
+      input_length_sample_(
+          static_cast<size_t>(block_duration_ms_ * input_sampling_khz_)),
+      output_length_sample_(
+          static_cast<size_t>(block_duration_ms_ * output_sampling_khz_)),
+      data_pointer_(0),
+      loop_length_samples_(0),
+      max_bytes_(0),
+      encoded_bytes_(0),
+      encoding_time_ms_(0.0),
+      decoding_time_ms_(0.0),
+      out_file_(NULL) {
+}
+
+void AudioCodecSpeedTest::SetUp() {
+  channels_ = get<0>(GetParam());
+  bit_rate_ = get<1>(GetParam());
+  in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
+  save_out_data_ = get<4>(GetParam());
+
+  FILE* fp = fopen(in_filename_.c_str(), "rb");
+  assert(fp != NULL);
+
+  // Obtain file size.
+  fseek(fp, 0, SEEK_END);
+  loop_length_samples_ = ftell(fp) / sizeof(int16_t);
+  rewind(fp);
+
+  // Allocate memory to contain the whole file.
+  in_data_.reset(new int16_t[loop_length_samples_ +
+      input_length_sample_ * channels_]);
+
+  data_pointer_ = 0;
+
+  // Copy the file into the buffer.
+  ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
+            loop_length_samples_);
+  fclose(fp);
+
+  // Add an extra block length of samples to the end of the array, starting
+  // over again from the beginning of the array. This is done to simplify
+  // the reading process when reading over the end of the loop.
+  memcpy(&in_data_[loop_length_samples_], &in_data_[0],
+         input_length_sample_ * channels_ * sizeof(int16_t));
+
+  max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+  out_data_.reset(new int16_t[output_length_sample_ * channels_]);
+  bit_stream_.reset(new uint8_t[max_bytes_]);
+
+  if (save_out_data_) {
+    std::string out_filename =
+        ::testing::UnitTest::GetInstance()->current_test_info()->name();
+
+    // Erase '/'
+    size_t found;
+    while ((found = out_filename.find('/')) != std::string::npos)
+      out_filename.replace(found, 1, "_");
+
+    out_filename = test::OutputPath() + out_filename + ".pcm";
+
+    out_file_ = fopen(out_filename.c_str(), "wb");
+    assert(out_file_ != NULL);
+
+    printf("Output to be saved in %s.\n", out_filename.c_str());
+  }
+}
+
+void AudioCodecSpeedTest::TearDown() {
+  if (save_out_data_) {
+    fclose(out_file_);
+  }
+}
+
+void AudioCodecSpeedTest::EncodeDecode(size_t audio_duration_sec) {
+  size_t time_now_ms = 0;
+  float time_ms;
+
+  printf("Coding %d kHz-sampled %" PRIuS "-channel audio at %d bps ...\n",
+         input_sampling_khz_, channels_, bit_rate_);
+
+  while (time_now_ms < audio_duration_sec * 1000) {
+    // Encode & decode.
+    time_ms = EncodeABlock(&in_data_[data_pointer_], &bit_stream_[0],
+                           max_bytes_, &encoded_bytes_);
+    encoding_time_ms_ += time_ms;
+    time_ms = DecodeABlock(&bit_stream_[0], encoded_bytes_, &out_data_[0]);
+    decoding_time_ms_ += time_ms;
+    if (save_out_data_) {
+      fwrite(&out_data_[0], sizeof(int16_t),
+             output_length_sample_ * channels_, out_file_);
+    }
+    data_pointer_ = (data_pointer_ + input_length_sample_ * channels_) %
+        loop_length_samples_;
+    time_now_ms += block_duration_ms_;
+  }
+
+  printf("Encoding: %.2f%% real time,\nDecoding: %.2f%% real time.\n",
+         (encoding_time_ms_ / audio_duration_sec) / 10.0,
+         (decoding_time_ms_ / audio_duration_sec) / 10.0);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
new file mode 100644
index 0000000..9e616e7
--- /dev/null
+++ b/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_CODECS_TOOLS_AUDIO_CODEC_SPEED_TEST_H_
+#define MODULES_AUDIO_CODING_CODECS_TOOLS_AUDIO_CODEC_SPEED_TEST_H_
+
+#include <memory>
+#include <string>
+
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Define coding parameter as
+// <channels, bit_rate, file_name, extension, if_save_output>.
+typedef std::tuple<size_t, int, std::string, std::string, bool> coding_param;
+
+class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
+ protected:
+  AudioCodecSpeedTest(int block_duration_ms,
+                      int input_sampling_khz,
+                      int output_sampling_khz);
+  virtual void SetUp();
+  virtual void TearDown();
+
+  // EncodeABlock(...) does the following:
+  // 1. encodes a block of audio, saved in |in_data|,
+  // 2. save the bit stream to |bit_stream| of |max_bytes| bytes in size,
+  // 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
+  // 4. return the cost of time (in millisecond) spent on actual encoding.
+  virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
+                             size_t max_bytes, size_t* encoded_bytes) = 0;
+
+  // DecodeABlock(...) does the following:
+  // 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
+  // (in bytes),
+  // 2. save the decoded audio in |out_data|,
+  // 3. return the cost of time (in millisecond) spent on actual decoding.
+  virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
+                             int16_t* out_data) = 0;
+
+  // Encoding and decode an audio of |audio_duration| (in seconds) and
+  // record the runtime for encoding and decoding separately.
+  void EncodeDecode(size_t audio_duration);
+
+  int block_duration_ms_;
+  int input_sampling_khz_;
+  int output_sampling_khz_;
+
+  // Number of samples-per-channel in a frame.
+  size_t input_length_sample_;
+
+  // Expected output number of samples-per-channel in a frame.
+  size_t output_length_sample_;
+
+  std::unique_ptr<int16_t[]> in_data_;
+  std::unique_ptr<int16_t[]> out_data_;
+  size_t data_pointer_;
+  size_t loop_length_samples_;
+  std::unique_ptr<uint8_t[]> bit_stream_;
+
+  // Maximum number of bytes in output bitstream for a frame of audio.
+  size_t max_bytes_;
+
+  size_t encoded_bytes_;
+  float encoding_time_ms_;
+  float decoding_time_ms_;
+  FILE* out_file_;
+
+  size_t channels_;
+
+  // Bit rate is in bit-per-second.
+  int bit_rate_;
+
+  std::string in_filename_;
+
+  // Determines whether to save the output to file.
+  bool save_out_data_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_CODECS_TOOLS_AUDIO_CODEC_SPEED_TEST_H_
diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h
new file mode 100644
index 0000000..12c98ee
--- /dev/null
+++ b/modules/audio_coding/include/audio_coding_module.h
@@ -0,0 +1,810 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
+#define MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder.h"
+#include "api/optional.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/include/module.h"
+#include "rtc_base/deprecation.h"
+#include "rtc_base/function_view.h"
+#include "system_wrappers/include/clock.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// forward declarations
+struct CodecInst;
+struct WebRtcRTPHeader;
+class AudioDecoder;
+class AudioEncoder;
+class AudioFrame;
+class RTPFragmentationHeader;
+
+#define WEBRTC_10MS_PCM_AUDIO 960  // 16 bits super wideband 48 kHz
+
+// Callback class used for sending data ready to be packetized
+class AudioPacketizationCallback {
+ public:
+  virtual ~AudioPacketizationCallback() {}
+
+  virtual int32_t SendData(FrameType frame_type,
+                           uint8_t payload_type,
+                           uint32_t timestamp,
+                           const uint8_t* payload_data,
+                           size_t payload_len_bytes,
+                           const RTPFragmentationHeader* fragmentation) = 0;
+};
+
+// Callback class used for reporting VAD decision
+class ACMVADCallback {
+ public:
+  virtual ~ACMVADCallback() {}
+
+  virtual int32_t InFrameType(FrameType frame_type) = 0;
+};
+
+class AudioCodingModule {
+ protected:
+  AudioCodingModule() {}
+
+ public:
+  struct Config {
+    Config();
+    Config(const Config&);
+    ~Config();
+
+    NetEq::Config neteq_config;
+    Clock* clock;
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory;
+  };
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Creation and destruction of a ACM.
+  //
+  // The second method is used for testing where a simulated clock can be
+  // injected into ACM. ACM will take the ownership of the object clock and
+  // delete it when destroyed.
+  //
+  // TODO(solenberg): Remove once downstream projects are updated.
+  RTC_DEPRECATED static AudioCodingModule* Create(int id);
+  static AudioCodingModule* Create();
+  static AudioCodingModule* Create(Clock* clock);
+  static AudioCodingModule* Create(const Config& config);
+  virtual ~AudioCodingModule() = default;
+
+  ///////////////////////////////////////////////////////////////////////////
+  //   Utility functions
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // uint8_t NumberOfCodecs()
+  // Returns number of supported codecs.
+  //
+  // Return value:
+  //   number of supported codecs.
+  ///
+  static int NumberOfCodecs();
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t Codec()
+  // Get supported codec with list number.
+  //
+  // Input:
+  //   -list_id             : list number.
+  //
+  // Output:
+  //   -codec              : a structure where the parameters of the codec,
+  //                         given by list number is written to.
+  //
+  // Return value:
+  //   -1 if the list number (list_id) is invalid.
+  //    0 if succeeded.
+  //
+  static int Codec(int list_id, CodecInst* codec);
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t Codec()
+  // Get supported codec with the given codec name, sampling frequency, and
+  // a given number of channels.
+  //
+  // Input:
+  //   -payload_name       : name of the codec.
+  //   -sampling_freq_hz   : sampling frequency of the codec. Note! for RED
+  //                         a sampling frequency of -1 is a valid input.
+  //   -channels           : number of channels ( 1 - mono, 2 - stereo).
+  //
+  // Output:
+  //   -codec              : a structure where the function returns the
+  //                         default parameters of the codec.
+  //
+  // Return value:
+  //   -1 if no codec matches the given parameters.
+  //    0 if succeeded.
+  //
+  static int Codec(const char* payload_name, CodecInst* codec,
+                   int sampling_freq_hz, size_t channels);
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t Codec()
+  //
+  // Returns the list number of the given codec name, sampling frequency, and
+  // a given number of channels.
+  //
+  // Input:
+  //   -payload_name        : name of the codec.
+  //   -sampling_freq_hz    : sampling frequency of the codec. Note! for RED
+  //                          a sampling frequency of -1 is a valid input.
+  //   -channels            : number of channels ( 1 - mono, 2 - stereo).
+  //
+  // Return value:
+  //   if the codec is found, the index of the codec in the list,
+  //   -1 if the codec is not found.
+  //
+  static int Codec(const char* payload_name, int sampling_freq_hz,
+                   size_t channels);
+
+  ///////////////////////////////////////////////////////////////////////////
+  // bool IsCodecValid()
+  // Checks the validity of the parameters of the given codec.
+  //
+  // Input:
+  //   -codec              : the structure which keeps the parameters of the
+  //                         codec.
+  //
+  // Return value:
+  //   true if the parameters are valid,
+  //   false if any parameter is not valid.
+  //
+  static bool IsCodecValid(const CodecInst& codec);
+
+  ///////////////////////////////////////////////////////////////////////////
+  //   Sender
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t RegisterSendCodec()
+  // Registers a codec, specified by |send_codec|, as sending codec.
+  // This API can be called multiple of times to register Codec. The last codec
+  // registered overwrites the previous ones.
+  // The API can also be used to change payload type for CNG and RED, which are
+  // registered by default to default payload types.
+  // Note that registering CNG and RED won't overwrite speech codecs.
+  // This API can be called to set/change the send payload-type, frame-size
+  // or encoding rate (if applicable for the codec).
+  //
+  // Note: If a stereo codec is registered as send codec, VAD/DTX will
+  // automatically be turned off, since it is not supported for stereo sending.
+  //
+  // Note: If a secondary encoder is already registered, and the new send-codec
+  // has a sampling rate that does not match the secondary encoder, the
+  // secondary encoder will be unregistered.
+  //
+  // Input:
+  //   -send_codec         : Parameters of the codec to be registered, c.f.
+  //                         common_types.h for the definition of
+  //                         CodecInst.
+  //
+  // Return value:
+  //   -1 if failed to initialize,
+  //    0 if succeeded.
+  //
+  virtual int32_t RegisterSendCodec(const CodecInst& send_codec) = 0;
+
+  // Registers |external_speech_encoder| as encoder. The new encoder will
+  // replace any previously registered speech encoder (internal or external).
+  virtual void RegisterExternalSendCodec(
+      AudioEncoder* external_speech_encoder) = 0;
+
+  // |modifier| is called exactly once with one argument: a pointer to the
+  // unique_ptr that holds the current encoder (which is null if there is no
+  // current encoder). For the duration of the call, |modifier| has exclusive
+  // access to the unique_ptr; it may call the encoder, steal the encoder and
+  // replace it with another encoder or with nullptr, etc.
+  virtual void ModifyEncoder(
+      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) = 0;
+
+  // |modifier| is called exactly once with one argument: a const pointer to the
+  // current encoder (which is null if there is no current encoder).
+  virtual void QueryEncoder(
+      rtc::FunctionView<void(AudioEncoder const*)> query) = 0;
+
+  // Utility method for simply replacing the existing encoder with a new one.
+  void SetEncoder(std::unique_ptr<AudioEncoder> new_encoder) {
+    ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
+      *encoder = std::move(new_encoder);
+    });
+  }
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t SendCodec()
+  // Get parameters for the codec currently registered as send codec.
+  //
+  // Return value:
+  //   The send codec, or nothing if we don't have one
+  //
+  virtual rtc::Optional<CodecInst> SendCodec() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t SendFrequency()
+  // Get the sampling frequency of the current encoder in Hertz.
+  //
+  // Return value:
+  //   positive; sampling frequency [Hz] of the current encoder.
+  //   -1 if an error has happened.
+  //
+  virtual int32_t SendFrequency() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // Sets the bitrate to the specified value in bits/sec. If the value is not
+  // supported by the codec, it will choose another appropriate value.
+  //
+  // This is only used in test code that rely on old ACM APIs.
+  // TODO(minyue): Remove it when possible.
+  virtual void SetBitRate(int bitrate_bps) = 0;
+
+  // int32_t RegisterTransportCallback()
+  // Register a transport callback which will be called to deliver
+  // the encoded buffers whenever Process() is called and a
+  // bit-stream is ready.
+  //
+  // Input:
+  //   -transport          : pointer to the callback class
+  //                         transport->SendData() is called whenever
+  //                         Process() is called and bit-stream is ready
+  //                         to deliver.
+  //
+  // Return value:
+  //   -1 if the transport callback could not be registered
+  //    0 if registration is successful.
+  //
+  virtual int32_t RegisterTransportCallback(
+      AudioPacketizationCallback* transport) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t Add10MsData()
+  // Add 10MS of raw (PCM) audio data and encode it. If the sampling
+  // frequency of the audio does not match the sampling frequency of the
+  // current encoder ACM will resample the audio. If an encoded packet was
+  // produced, it will be delivered via the callback object registered using
+  // RegisterTransportCallback, and the return value from this function will
+  // be the number of bytes encoded.
+  //
+  // Input:
+  //   -audio_frame        : the input audio frame, containing raw audio
+  //                         sampling frequency etc.,
+  //                         c.f. module_common_types.h for definition of
+  //                         AudioFrame.
+  //
+  // Return value:
+  //   >= 0   number of bytes encoded.
+  //     -1   some error occurred.
+  //
+  virtual int32_t Add10MsData(const AudioFrame& audio_frame) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // (RED) Redundant Coding
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t SetREDStatus()
+  // configure RED status i.e. on/off.
+  //
+  // RFC 2198 describes a solution which has a single payload type which
+  // signifies a packet with redundancy. That packet then becomes a container,
+  // encapsulating multiple payloads into a single RTP packet.
+  // Such a scheme is flexible, since any amount of redundancy may be
+  // encapsulated within a single packet.  There is, however, a small overhead
+  // since each encapsulated payload must be preceded by a header indicating
+  // the type of data enclosed.
+  //
+  // Input:
+  //   -enable_red         : if true RED is enabled, otherwise RED is
+  //                         disabled.
+  //
+  // Return value:
+  //   -1 if failed to set RED status,
+  //    0 if succeeded.
+  //
+  virtual int32_t SetREDStatus(bool enable_red) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // bool REDStatus()
+  // Get RED status
+  //
+  // Return value:
+  //   true if RED is enabled,
+  //   false if RED is disabled.
+  //
+  virtual bool REDStatus() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // (FEC) Forward Error Correction (codec internal)
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t SetCodecFEC()
+  // Configures codec internal FEC status i.e. on/off. No effects on codecs that
+  // do not provide internal FEC.
+  //
+  // Input:
+  //   -enable_fec         : if true FEC will be enabled otherwise the FEC is
+  //                         disabled.
+  //
+  // Return value:
+  //   -1 if failed, or the codec does not support FEC
+  //    0 if succeeded.
+  //
+  virtual int SetCodecFEC(bool enable_codec_fec) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // bool CodecFEC()
+  // Gets status of codec internal FEC.
+  //
+  // Return value:
+  //   true if FEC is enabled,
+  //   false if FEC is disabled.
+  //
+  virtual bool CodecFEC() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int SetPacketLossRate()
+  // Sets expected packet loss rate for encoding. Some encoders provide packet
+  // loss gnostic encoding to make stream less sensitive to packet losses,
+  // through e.g., FEC. No effects on codecs that do not provide such encoding.
+  //
+  // Input:
+  //   -packet_loss_rate   : expected packet loss rate (0 -- 100 inclusive).
+  //
+  // Return value
+  //   -1 if failed to set packet loss rate,
+  //   0 if succeeded.
+  //
+  // This is only used in test code that rely on old ACM APIs.
+  // TODO(minyue): Remove it when possible.
+  virtual int SetPacketLossRate(int packet_loss_rate) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  //   (VAD) Voice Activity Detection
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t SetVAD()
+  // If DTX is enabled & the codec does not have internal DTX/VAD
+  // WebRtc VAD will be automatically enabled and |enable_vad| is ignored.
+  //
+  // If DTX is disabled but VAD is enabled no DTX packets are send,
+  // regardless of whether the codec has internal DTX/VAD or not. In this
+  // case, WebRtc VAD is running to label frames as active/in-active.
+  //
+  // NOTE! VAD/DTX is not supported when sending stereo.
+  //
+  // Inputs:
+  //   -enable_dtx         : if true DTX is enabled,
+  //                         otherwise DTX is disabled.
+  //   -enable_vad         : if true VAD is enabled,
+  //                         otherwise VAD is disabled.
+  //   -vad_mode           : determines the aggressiveness of VAD. A more
+  //                         aggressive mode results in more frames labeled
+  //                         as in-active, c.f. definition of
+  //                         ACMVADMode in audio_coding_module_typedefs.h
+  //                         for valid values.
+  //
+  // Return value:
+  //   -1 if failed to set up VAD/DTX,
+  //    0 if succeeded.
+  //
+  virtual int32_t SetVAD(const bool enable_dtx = true,
+                               const bool enable_vad = false,
+                               const ACMVADMode vad_mode = VADNormal) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t VAD()
+  // Get VAD status.
+  //
+  // Outputs:
+  //   -dtx_enabled        : is set to true if DTX is enabled, otherwise
+  //                         is set to false.
+  //   -vad_enabled        : is set to true if VAD is enabled, otherwise
+  //                         is set to false.
+  //   -vad_mode            : is set to the current aggressiveness of VAD.
+  //
+  // Return value:
+  //   -1 if fails to retrieve the setting of DTX/VAD,
+  //    0 if succeeded.
+  //
+  virtual int32_t VAD(bool* dtx_enabled, bool* vad_enabled,
+                            ACMVADMode* vad_mode) const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t RegisterVADCallback()
+  // Call this method to register a callback function which is called
+  // any time that ACM encounters an empty frame. That is a frame which is
+  // recognized inactive. Depending on the codec WebRtc VAD or internal codec
+  // VAD is employed to identify a frame as active/inactive.
+  //
+  // Input:
+  //   -vad_callback        : pointer to a callback function.
+  //
+  // Return value:
+  //   -1 if failed to register the callback function.
+  //    0 if the callback function is registered successfully.
+  //
+  virtual int32_t RegisterVADCallback(ACMVADCallback* vad_callback) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  //   Receiver
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t InitializeReceiver()
+  // Any decoder-related state of ACM will be initialized to the
+  // same state when ACM is created. This will not interrupt or
+  // effect encoding functionality of ACM. ACM would lose all the
+  // decoding-related settings by calling this function.
+  // For instance, all registered codecs are deleted and have to be
+  // registered again.
+  //
+  // Return value:
+  //   -1 if failed to initialize,
+  //    0 if succeeded.
+  //
+  virtual int32_t InitializeReceiver() = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t ReceiveFrequency()
+  // Get sampling frequency of the last received payload.
+  //
+  // Return value:
+  //   non-negative the sampling frequency in Hertz.
+  //   -1 if an error has occurred.
+  //
+  virtual int32_t ReceiveFrequency() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t PlayoutFrequency()
+  // Get sampling frequency of audio played out.
+  //
+  // Return value:
+  //   the sampling frequency in Hertz.
+  //
+  virtual int32_t PlayoutFrequency() const = 0;
+
+  // Replace any existing decoders with the given payload type -> decoder map.
+  virtual void SetReceiveCodecs(
+      const std::map<int, SdpAudioFormat>& codecs) = 0;
+
+  // Registers a decoder for the given payload type. Returns true iff
+  // successful.
+  virtual bool RegisterReceiveCodec(int rtp_payload_type,
+                                    const SdpAudioFormat& audio_format) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t RegisterReceiveCodec()
+  // Register possible decoders, can be called multiple times for
+  // codecs, CNG-NB, CNG-WB, CNG-SWB, AVT and RED.
+  //
+  // Input:
+  //   -receive_codec      : parameters of the codec to be registered, c.f.
+  //                         common_types.h for the definition of
+  //                         CodecInst.
+  //
+  // Return value:
+  //   -1 if failed to register the codec
+  //    0 if the codec registered successfully.
+  //
+  virtual int RegisterReceiveCodec(const CodecInst& receive_codec) = 0;
+
+  // Register a decoder; call repeatedly to register multiple decoders. |df| is
+  // a decoder factory that returns an iSAC decoder; it will be called once if
+  // the decoder being registered is iSAC.
+  virtual int RegisterReceiveCodec(
+      const CodecInst& receive_codec,
+      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) = 0;
+
+  // Registers an external decoder. The name is only used to provide information
+  // back to the caller about the decoder. Hence, the name is arbitrary, and may
+  // be empty.
+  virtual int RegisterExternalReceiveCodec(int rtp_payload_type,
+                                           AudioDecoder* external_decoder,
+                                           int sample_rate_hz,
+                                           int num_channels,
+                                           const std::string& name) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t UnregisterReceiveCodec()
+  // Unregister the codec currently registered with a specific payload type
+  // from the list of possible receive codecs.
+  //
+  // Input:
+  //   -payload_type        : The number representing the payload type to
+  //                         unregister.
+  //
+  // Output:
+  //   -1 if fails to unregister.
+  //    0 if the given codec is successfully unregistered.
+  //
+  virtual int UnregisterReceiveCodec(
+      uint8_t payload_type) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t ReceiveCodec()
+  // Get the codec associated with last received payload.
+  //
+  // Output:
+  //   -curr_receive_codec : parameters of the codec associated with the last
+  //                         received payload, c.f. common_types.h for
+  //                         the definition of CodecInst.
+  //
+  // Return value:
+  //   -1 if failed to retrieve the codec,
+  //    0 if the codec is successfully retrieved.
+  //
+  virtual int32_t ReceiveCodec(CodecInst* curr_receive_codec) const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // rtc::Optional<SdpAudioFormat> ReceiveFormat()
+  // Get the format associated with last received payload.
+  //
+  // Return value:
+  //    An SdpAudioFormat describing the format associated with the last
+  //    received payload.
+  //    An empty Optional if no payload has yet been received.
+  //
+  virtual rtc::Optional<SdpAudioFormat> ReceiveFormat() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t IncomingPacket()
+  // Call this function to insert a parsed RTP packet into ACM.
+  //
+  // Inputs:
+  //   -incoming_payload   : received payload.
+  //   -payload_len_bytes  : the length of payload in bytes.
+  //   -rtp_info           : the relevant information retrieved from RTP
+  //                         header.
+  //
+  // Return value:
+  //   -1 if failed to push in the payload
+  //    0 if payload is successfully pushed in.
+  //
+  virtual int32_t IncomingPacket(const uint8_t* incoming_payload,
+                                 const size_t payload_len_bytes,
+                                 const WebRtcRTPHeader& rtp_info) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int SetMinimumPlayoutDelay()
+  // Set a minimum for the playout delay, used for lip-sync. NetEq maintains
+  // such a delay unless channel condition yields to a higher delay.
+  //
+  // Input:
+  //   -time_ms            : minimum delay in milliseconds.
+  //
+  // Return value:
+  //   -1 if failed to set the delay,
+  //    0 if the minimum delay is set.
+  //
+  virtual int SetMinimumPlayoutDelay(int time_ms) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int SetMaximumPlayoutDelay()
+  // Set a maximum for the playout delay
+  //
+  // Input:
+  //   -time_ms            : maximum delay in milliseconds.
+  //
+  // Return value:
+  //   -1 if failed to set the delay,
+  //    0 if the maximum delay is set.
+  //
+  virtual int SetMaximumPlayoutDelay(int time_ms) = 0;
+
+  // TODO(kwiberg): Consider if this is needed anymore, now that voe::Channel
+  //                doesn't use it.
+  // The shortest latency, in milliseconds, required by jitter buffer. This
+  // is computed based on inter-arrival times and playout mode of NetEq. The
+  // actual delay is the maximum of least-required-delay and the minimum-delay
+  // specified by SetMinumumPlayoutDelay() API.
+  //
+  virtual int LeastRequiredDelayMs() const = 0;
+
+  // int32_t PlayoutTimestamp()
+  // The send timestamp of an RTP packet is associated with the decoded
+  // audio of the packet in question. This function returns the timestamp of
+  // the latest audio obtained by calling PlayoutData10ms().
+  //
+  // Input:
+  //   -timestamp          : a reference to a uint32_t to receive the
+  //                         timestamp.
+  // Return value:
+  //    0 if the output is a correct timestamp.
+  //   -1 if failed to output the correct timestamp.
+  //
+  RTC_DEPRECATED virtual int32_t PlayoutTimestamp(uint32_t* timestamp) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t PlayoutTimestamp()
+  // The send timestamp of an RTP packet is associated with the decoded
+  // audio of the packet in question. This function returns the timestamp of
+  // the latest audio obtained by calling PlayoutData10ms(), or empty if no
+  // valid timestamp is available.
+  //
+  virtual rtc::Optional<uint32_t> PlayoutTimestamp() = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int FilteredCurrentDelayMs()
+  // Returns the current total delay from NetEq (packet buffer and sync buffer)
+  // in ms, with smoothing applied to even out short-time fluctuations due to
+  // jitter. The packet buffer part of the delay is not updated during DTX/CNG
+  // periods.
+  //
+  virtual int FilteredCurrentDelayMs() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int FilteredCurrentDelayMs()
+  // Returns the current target delay for NetEq in ms.
+  //
+  virtual int TargetDelayMs() const = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t PlayoutData10Ms(
+  // Get 10 milliseconds of raw audio data for playout, at the given sampling
+  // frequency. ACM will perform a resampling if required.
+  //
+  // Input:
+  //   -desired_freq_hz    : the desired sampling frequency, in Hertz, of the
+  //                         output audio. If set to -1, the function returns
+  //                         the audio at the current sampling frequency.
+  //
+  // Output:
+  //   -audio_frame        : output audio frame which contains raw audio data
+  //                         and other relevant parameters, c.f.
+  //                         module_common_types.h for the definition of
+  //                         AudioFrame.
+  //   -muted              : if true, the sample data in audio_frame is not
+  //                         populated, and must be interpreted as all zero.
+  //
+  // Return value:
+  //   -1 if the function fails,
+  //    0 if the function succeeds.
+  //
+  virtual int32_t PlayoutData10Ms(int32_t desired_freq_hz,
+                                  AudioFrame* audio_frame,
+                                  bool* muted) = 0;
+
+  /////////////////////////////////////////////////////////////////////////////
+  // Same as above, but without the muted parameter. This methods should not be
+  // used if enable_fast_accelerate was set to true in NetEq::Config.
+  // TODO(henrik.lundin) Remove this method when downstream dependencies are
+  // ready.
+  virtual int32_t PlayoutData10Ms(int32_t desired_freq_hz,
+                                  AudioFrame* audio_frame) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  //   Codec specific
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int SetOpusApplication()
+  // Sets the intended application if current send codec is Opus. Opus uses this
+  // to optimize the encoding for applications like VOIP and music. Currently,
+  // two modes are supported: kVoip and kAudio.
+  //
+  // Input:
+  //   - application            : intended application.
+  //
+  // Return value:
+  //   -1 if current send codec is not Opus or error occurred in setting the
+  //      Opus application mode.
+  //    0 if the Opus application mode is successfully set.
+  //
+  virtual int SetOpusApplication(OpusApplicationMode application) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int SetOpusMaxPlaybackRate()
+  // If current send codec is Opus, informs it about maximum playback rate the
+  // receiver will render. Opus can use this information to optimize the bit
+  // rate and increase the computation efficiency.
+  //
+  // Input:
+  //   -frequency_hz            : maximum playback rate in Hz.
+  //
+  // Return value:
+  //   -1 if current send codec is not Opus or
+  //      error occurred in setting the maximum playback rate,
+  //    0 if maximum bandwidth is set successfully.
+  //
+  virtual int SetOpusMaxPlaybackRate(int frequency_hz) = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // EnableOpusDtx()
+  // Enable the DTX, if current send codec is Opus.
+  //
+  // Return value:
+  //   -1 if current send codec is not Opus or error occurred in enabling the
+  //      Opus DTX.
+  //    0 if Opus DTX is enabled successfully.
+  //
+  virtual int EnableOpusDtx() = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int DisableOpusDtx()
+  // If current send codec is Opus, disables its internal DTX.
+  //
+  // Return value:
+  //   -1 if current send codec is not Opus or error occurred in disabling DTX.
+  //    0 if Opus DTX is disabled successfully.
+  //
+  virtual int DisableOpusDtx() = 0;
+
+  ///////////////////////////////////////////////////////////////////////////
+  //   statistics
+  //
+
+  ///////////////////////////////////////////////////////////////////////////
+  // int32_t  GetNetworkStatistics()
+  // Get network statistics. Note that the internal statistics of NetEq are
+  // reset by this call.
+  //
+  // Input:
+  //   -network_statistics : a structure that contains network statistics.
+  //
+  // Return value:
+  //   -1 if failed to set the network statistics,
+  //    0 if statistics are set successfully.
+  //
+  virtual int32_t GetNetworkStatistics(
+      NetworkStatistics* network_statistics) = 0;
+
+  //
+  // Enable NACK and set the maximum size of the NACK list. If NACK is already
+  // enable then the maximum NACK list size is modified accordingly.
+  //
+  // If the sequence number of last received packet is N, the sequence numbers
+  // of NACK list are in the range of [N - |max_nack_list_size|, N).
+  //
+  // |max_nack_list_size| should be positive (none zero) and less than or
+  // equal to |Nack::kNackListSizeLimit|. Otherwise, No change is applied and -1
+  // is returned. 0 is returned at success.
+  //
+  virtual int EnableNack(size_t max_nack_list_size) = 0;
+
+  // Disable NACK.
+  virtual void DisableNack() = 0;
+
+  //
+  // Get a list of packets to be retransmitted. |round_trip_time_ms| is an
+  // estimate of the round-trip-time (in milliseconds). Missing packets which
+  // will be playout in a shorter time than the round-trip-time (with respect
+  // to the time this API is called) will not be included in the list.
+  //
+  // Negative |round_trip_time_ms| results is an error message and empty list
+  // is returned.
+  //
+  virtual std::vector<uint16_t> GetNackList(
+      int64_t round_trip_time_ms) const = 0;
+
+  virtual void GetDecodingCallStatistics(
+      AudioDecodingCallStats* call_stats) const = 0;
+
+  virtual ANAStats GetANAStats() const = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
diff --git a/modules/audio_coding/include/audio_coding_module_typedefs.h b/modules/audio_coding/include/audio_coding_module_typedefs.h
new file mode 100644
index 0000000..ad71ef1
--- /dev/null
+++ b/modules/audio_coding/include/audio_coding_module_typedefs.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+#define MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+
+#include <map>
+
+#include "modules/include/module_common_types.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+///////////////////////////////////////////////////////////////////////////
+// enum ACMVADMode
+// An enumerator for aggressiveness of VAD
+// -VADNormal                : least aggressive mode.
+// -VADLowBitrate            : more aggressive than "VADNormal" to save on
+//                             bit-rate.
+// -VADAggr                  : an aggressive mode.
+// -VADVeryAggr              : the most agressive mode.
+//
+enum ACMVADMode {
+  VADNormal = 0,
+  VADLowBitrate = 1,
+  VADAggr = 2,
+  VADVeryAggr = 3
+};
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Enumeration of Opus mode for intended application.
+//
+// kVoip              : optimized for voice signals.
+// kAudio             : optimized for non-voice signals like music.
+//
+enum OpusApplicationMode {
+ kVoip = 0,
+ kAudio = 1,
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
diff --git a/modules/audio_coding/neteq/accelerate.cc b/modules/audio_coding/neteq/accelerate.cc
new file mode 100644
index 0000000..183ad7b
--- /dev/null
+++ b/modules/audio_coding/neteq/accelerate.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/accelerate.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+Accelerate::ReturnCodes Accelerate::Process(const int16_t* input,
+                                            size_t input_length,
+                                            bool fast_accelerate,
+                                            AudioMultiVector* output,
+                                            size_t* length_change_samples) {
+  // Input length must be (almost) 30 ms.
+  static const size_t k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
+  if (num_channels_ == 0 ||
+      input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_) {
+    // Length of input data too short to do accelerate. Simply move all data
+    // from input to output.
+    output->PushBackInterleaved(input, input_length);
+    return kError;
+  }
+  return TimeStretch::Process(input, input_length, fast_accelerate, output,
+                              length_change_samples);
+}
+
+void Accelerate::SetParametersForPassiveSpeech(size_t /*len*/,
+                                               int16_t* best_correlation,
+                                               size_t* /*peak_index*/) const {
+  // When the signal does not contain any active speech, the correlation does
+  // not matter. Simply set it to zero.
+  *best_correlation = 0;
+}
+
+Accelerate::ReturnCodes Accelerate::CheckCriteriaAndStretch(
+    const int16_t* input,
+    size_t input_length,
+    size_t peak_index,
+    int16_t best_correlation,
+    bool active_speech,
+    bool fast_mode,
+    AudioMultiVector* output) const {
+  // Check for strong correlation or passive speech.
+  // Use 8192 (0.5 in Q14) in fast mode.
+  const int correlation_threshold = fast_mode ? 8192 : kCorrelationThreshold;
+  if ((best_correlation > correlation_threshold) || !active_speech) {
+    // Do accelerate operation by overlap add.
+
+    // Pre-calculate common multiplication with |fs_mult_|.
+    // 120 corresponds to 15 ms.
+    size_t fs_mult_120 = fs_mult_ * 120;
+
+    if (fast_mode) {
+      // Fit as many multiples of |peak_index| as possible in fs_mult_120.
+      // TODO(henrik.lundin) Consider finding multiple correlation peaks and
+      // pick the one with the longest correlation lag in this case.
+      peak_index = (fs_mult_120 / peak_index) * peak_index;
+    }
+
+    assert(fs_mult_120 >= peak_index);  // Should be handled in Process().
+    // Copy first part; 0 to 15 ms.
+    output->PushBackInterleaved(input, fs_mult_120 * num_channels_);
+    // Copy the |peak_index| starting at 15 ms to |temp_vector|.
+    AudioMultiVector temp_vector(num_channels_);
+    temp_vector.PushBackInterleaved(&input[fs_mult_120 * num_channels_],
+                                    peak_index * num_channels_);
+    // Cross-fade |temp_vector| onto the end of |output|.
+    output->CrossFade(temp_vector, peak_index);
+    // Copy the last unmodified part, 15 ms + pitch period until the end.
+    output->PushBackInterleaved(
+        &input[(fs_mult_120 + peak_index) * num_channels_],
+        input_length - (fs_mult_120 + peak_index) * num_channels_);
+
+    if (active_speech) {
+      return kSuccess;
+    } else {
+      return kSuccessLowEnergy;
+    }
+  } else {
+    // Accelerate not allowed. Simply move all data from decoded to outData.
+    output->PushBackInterleaved(input, input_length);
+    return kNoStretch;
+  }
+}
+
+Accelerate* AccelerateFactory::Create(
+    int sample_rate_hz,
+    size_t num_channels,
+    const BackgroundNoise& background_noise) const {
+  return new Accelerate(sample_rate_hz, num_channels, background_noise);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/accelerate.h b/modules/audio_coding/neteq/accelerate.h
new file mode 100644
index 0000000..bf4f0f7
--- /dev/null
+++ b/modules/audio_coding/neteq/accelerate.h
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_ACCELERATE_H_
+#define MODULES_AUDIO_CODING_NETEQ_ACCELERATE_H_
+
+#include <assert.h>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/time_stretch.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class BackgroundNoise;
+
+// This class implements the Accelerate operation. Most of the work is done
+// in the base class TimeStretch, which is shared with the PreemptiveExpand
+// operation. In the Accelerate class, the operations that are specific to
+// Accelerate are implemented.
+class Accelerate : public TimeStretch {
+ public:
+  Accelerate(int sample_rate_hz, size_t num_channels,
+             const BackgroundNoise& background_noise)
+      : TimeStretch(sample_rate_hz, num_channels, background_noise) {
+  }
+
+  // This method performs the actual Accelerate operation. The samples are
+  // read from |input|, of length |input_length| elements, and are written to
+  // |output|. The number of samples removed through time-stretching is
+  // is provided in the output |length_change_samples|. The method returns
+  // the outcome of the operation as an enumerator value. If |fast_accelerate|
+  // is true, the algorithm will relax the requirements on finding strong
+  // correlations, and may remove multiple pitch periods if possible.
+  ReturnCodes Process(const int16_t* input,
+                      size_t input_length,
+                      bool fast_accelerate,
+                      AudioMultiVector* output,
+                      size_t* length_change_samples);
+
+ protected:
+  // Sets the parameters |best_correlation| and |peak_index| to suitable
+  // values when the signal contains no active speech.
+  void SetParametersForPassiveSpeech(size_t len,
+                                     int16_t* best_correlation,
+                                     size_t* peak_index) const override;
+
+  // Checks the criteria for performing the time-stretching operation and,
+  // if possible, performs the time-stretching.
+  ReturnCodes CheckCriteriaAndStretch(const int16_t* input,
+                                      size_t input_length,
+                                      size_t peak_index,
+                                      int16_t best_correlation,
+                                      bool active_speech,
+                                      bool fast_mode,
+                                      AudioMultiVector* output) const override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(Accelerate);
+};
+
+struct AccelerateFactory {
+  AccelerateFactory() {}
+  virtual ~AccelerateFactory() {}
+
+  virtual Accelerate* Create(int sample_rate_hz,
+                             size_t num_channels,
+                             const BackgroundNoise& background_noise) const;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_ACCELERATE_H_
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
new file mode 100644
index 0000000..3181d6f
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -0,0 +1,630 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
+#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
+#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
+#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
+#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
+#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+namespace {
+// The absolute difference between the input and output (the first channel) is
+// compared vs |tolerance|. The parameter |delay| is used to correct for codec
+// delays.
+void CompareInputOutput(const std::vector<int16_t>& input,
+                        const std::vector<int16_t>& output,
+                        size_t num_samples,
+                        size_t channels,
+                        int tolerance,
+                        int delay) {
+  ASSERT_LE(num_samples, input.size());
+  ASSERT_LE(num_samples * channels, output.size());
+  for (unsigned int n = 0; n < num_samples - delay; ++n) {
+    ASSERT_NEAR(input[n], output[channels * n + delay], tolerance)
+        << "Exit test on first diff; n = " << n;
+  }
+}
+
+// The absolute difference between the first two channels in |output| is
+// compared vs |tolerance|.
+void CompareTwoChannels(const std::vector<int16_t>& output,
+                        size_t samples_per_channel,
+                        size_t channels,
+                        int tolerance) {
+  ASSERT_GE(channels, 2u);
+  ASSERT_LE(samples_per_channel * channels, output.size());
+  for (unsigned int n = 0; n < samples_per_channel; ++n)
+    ASSERT_NEAR(output[channels * n], output[channels * n + 1], tolerance)
+        << "Stereo samples differ.";
+}
+
+// Calculates mean-squared error between input and output (the first channel).
+// The parameter |delay| is used to correct for codec delays.
+double MseInputOutput(const std::vector<int16_t>& input,
+                      const std::vector<int16_t>& output,
+                      size_t num_samples,
+                      size_t channels,
+                      int delay) {
+  assert(delay < static_cast<int>(num_samples));
+  assert(num_samples <= input.size());
+  assert(num_samples * channels <= output.size());
+  if (num_samples == 0)
+    return 0.0;
+  double squared_sum = 0.0;
+  for (unsigned int n = 0; n < num_samples - delay; ++n) {
+    squared_sum += (input[n] - output[channels * n + delay]) *
+                   (input[n] - output[channels * n + delay]);
+  }
+  return squared_sum / (num_samples - delay);
+}
+}  // namespace
+
+class AudioDecoderTest : public ::testing::Test {
+ protected:
+  AudioDecoderTest()
+      : input_audio_(
+            webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+            32000),
+        codec_input_rate_hz_(32000),  // Legacy default value.
+        frame_size_(0),
+        data_length_(0),
+        channels_(1),
+        payload_type_(17),
+        decoder_(NULL) {}
+
+  virtual ~AudioDecoderTest() {}
+
+  virtual void SetUp() {
+    if (audio_encoder_)
+      codec_input_rate_hz_ = audio_encoder_->SampleRateHz();
+    // Create arrays.
+    ASSERT_GT(data_length_, 0u) << "The test must set data_length_ > 0";
+  }
+
+  virtual void TearDown() {
+    delete decoder_;
+    decoder_ = NULL;
+  }
+
+  virtual void InitEncoder() { }
+
+  // TODO(henrik.lundin) Change return type to size_t once most/all overriding
+  // implementations are gone.
+  virtual int EncodeFrame(const int16_t* input,
+                          size_t input_len_samples,
+                          rtc::Buffer* output) {
+    AudioEncoder::EncodedInfo encoded_info;
+    const size_t samples_per_10ms = audio_encoder_->SampleRateHz() / 100;
+    RTC_CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
+                 input_len_samples);
+    std::unique_ptr<int16_t[]> interleaved_input(
+        new int16_t[channels_ * samples_per_10ms]);
+    for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
+      EXPECT_EQ(0u, encoded_info.encoded_bytes);
+
+      // Duplicate the mono input signal to however many channels the test
+      // wants.
+      test::InputAudioFile::DuplicateInterleaved(input + i * samples_per_10ms,
+                                                 samples_per_10ms, channels_,
+                                                 interleaved_input.get());
+
+      encoded_info = audio_encoder_->Encode(
+          0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
+                                           audio_encoder_->NumChannels() *
+                                               audio_encoder_->SampleRateHz() /
+                                               100),
+          output);
+    }
+    EXPECT_EQ(payload_type_, encoded_info.payload_type);
+    return static_cast<int>(encoded_info.encoded_bytes);
+  }
+
+  // Encodes and decodes audio. The absolute difference between the input and
+  // output is compared vs |tolerance|, and the mean-squared error is compared
+  // with |mse|. The encoded stream should contain |expected_bytes|. For stereo
+  // audio, the absolute difference between the two channels is compared vs
+  // |channel_diff_tolerance|.
+  void EncodeDecodeTest(size_t expected_bytes, int tolerance, double mse,
+                        int delay = 0, int channel_diff_tolerance = 0) {
+    ASSERT_GE(tolerance, 0) << "Test must define a tolerance >= 0";
+    ASSERT_GE(channel_diff_tolerance, 0) <<
+        "Test must define a channel_diff_tolerance >= 0";
+    size_t processed_samples = 0u;
+    rtc::Buffer encoded;
+    size_t encoded_bytes = 0u;
+    InitEncoder();
+    std::vector<int16_t> input;
+    std::vector<int16_t> decoded;
+    while (processed_samples + frame_size_ <= data_length_) {
+      // Extend input vector with |frame_size_|.
+      input.resize(input.size() + frame_size_, 0);
+      // Read from input file.
+      ASSERT_GE(input.size() - processed_samples, frame_size_);
+      ASSERT_TRUE(input_audio_.Read(
+          frame_size_, codec_input_rate_hz_, &input[processed_samples]));
+      size_t enc_len = EncodeFrame(
+          &input[processed_samples], frame_size_, &encoded);
+      // Make sure that frame_size_ * channels_ samples are allocated and free.
+      decoded.resize((processed_samples + frame_size_) * channels_, 0);
+      AudioDecoder::SpeechType speech_type;
+      size_t dec_len = decoder_->Decode(
+          &encoded.data()[encoded_bytes], enc_len, codec_input_rate_hz_,
+          frame_size_ * channels_ * sizeof(int16_t),
+          &decoded[processed_samples * channels_], &speech_type);
+      EXPECT_EQ(frame_size_ * channels_, dec_len);
+      encoded_bytes += enc_len;
+      processed_samples += frame_size_;
+    }
+    // For some codecs it doesn't make sense to check expected number of bytes,
+    // since the number can vary for different platforms. Opus and iSAC are
+    // such codecs. In this case expected_bytes is set to 0.
+    if (expected_bytes) {
+      EXPECT_EQ(expected_bytes, encoded_bytes);
+    }
+    CompareInputOutput(
+        input, decoded, processed_samples, channels_, tolerance, delay);
+    if (channels_ == 2)
+      CompareTwoChannels(
+          decoded, processed_samples, channels_, channel_diff_tolerance);
+    EXPECT_LE(
+        MseInputOutput(input, decoded, processed_samples, channels_, delay),
+        mse);
+  }
+
+  // Encodes a payload and decodes it twice with decoder re-init before each
+  // decode. Verifies that the decoded result is the same.
+  void ReInitTest() {
+    InitEncoder();
+    std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
+    ASSERT_TRUE(
+        input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
+    rtc::Buffer encoded;
+    size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
+    size_t dec_len;
+    AudioDecoder::SpeechType speech_type1, speech_type2;
+    decoder_->Reset();
+    std::unique_ptr<int16_t[]> output1(new int16_t[frame_size_ * channels_]);
+    dec_len = decoder_->Decode(encoded.data(), enc_len, codec_input_rate_hz_,
+                               frame_size_ * channels_ * sizeof(int16_t),
+                               output1.get(), &speech_type1);
+    ASSERT_LE(dec_len, frame_size_ * channels_);
+    EXPECT_EQ(frame_size_ * channels_, dec_len);
+    // Re-init decoder and decode again.
+    decoder_->Reset();
+    std::unique_ptr<int16_t[]> output2(new int16_t[frame_size_ * channels_]);
+    dec_len = decoder_->Decode(encoded.data(), enc_len, codec_input_rate_hz_,
+                               frame_size_ * channels_ * sizeof(int16_t),
+                               output2.get(), &speech_type2);
+    ASSERT_LE(dec_len, frame_size_ * channels_);
+    EXPECT_EQ(frame_size_ * channels_, dec_len);
+    for (unsigned int n = 0; n < frame_size_; ++n) {
+      ASSERT_EQ(output1[n], output2[n]) << "Exit test on first diff; n = " << n;
+    }
+    EXPECT_EQ(speech_type1, speech_type2);
+  }
+
+  // Call DecodePlc and verify that the correct number of samples is produced.
+  void DecodePlcTest() {
+    InitEncoder();
+    std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
+    ASSERT_TRUE(
+        input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
+    rtc::Buffer encoded;
+    size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
+    AudioDecoder::SpeechType speech_type;
+    decoder_->Reset();
+    std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
+    size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
+                                      codec_input_rate_hz_,
+                                      frame_size_ * channels_ * sizeof(int16_t),
+                                      output.get(), &speech_type);
+    EXPECT_EQ(frame_size_ * channels_, dec_len);
+    // Call DecodePlc and verify that we get one frame of data.
+    // (Overwrite the output from the above Decode call, but that does not
+    // matter.)
+    dec_len = decoder_->DecodePlc(1, output.get());
+    EXPECT_EQ(frame_size_ * channels_, dec_len);
+  }
+
+  test::ResampleInputAudioFile input_audio_;
+  int codec_input_rate_hz_;
+  size_t frame_size_;
+  size_t data_length_;
+  size_t channels_;
+  const int payload_type_;
+  AudioDecoder* decoder_;
+  std::unique_ptr<AudioEncoder> audio_encoder_;
+};
+
+class AudioDecoderPcmUTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderPcmUTest() : AudioDecoderTest() {
+    frame_size_ = 160;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderPcmU(1);
+    AudioEncoderPcmU::Config config;
+    config.frame_size_ms = static_cast<int>(frame_size_ / 8);
+    config.payload_type = payload_type_;
+    audio_encoder_.reset(new AudioEncoderPcmU(config));
+  }
+};
+
+class AudioDecoderPcmATest : public AudioDecoderTest {
+ protected:
+  AudioDecoderPcmATest() : AudioDecoderTest() {
+    frame_size_ = 160;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderPcmA(1);
+    AudioEncoderPcmA::Config config;
+    config.frame_size_ms = static_cast<int>(frame_size_ / 8);
+    config.payload_type = payload_type_;
+    audio_encoder_.reset(new AudioEncoderPcmA(config));
+  }
+};
+
+class AudioDecoderPcm16BTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderPcm16BTest() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 16000;
+    frame_size_ = 20 * codec_input_rate_hz_ / 1000;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderPcm16B(codec_input_rate_hz_, 1);
+    assert(decoder_);
+    AudioEncoderPcm16B::Config config;
+    config.sample_rate_hz = codec_input_rate_hz_;
+    config.frame_size_ms =
+        static_cast<int>(frame_size_ / (config.sample_rate_hz / 1000));
+    config.payload_type = payload_type_;
+    audio_encoder_.reset(new AudioEncoderPcm16B(config));
+  }
+};
+
+class AudioDecoderIlbcTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderIlbcTest() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 8000;
+    frame_size_ = 240;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderIlbcImpl;
+    assert(decoder_);
+    AudioEncoderIlbcConfig config;
+    config.frame_size_ms = 30;
+    audio_encoder_.reset(new AudioEncoderIlbcImpl(config, payload_type_));
+  }
+
+  // Overload the default test since iLBC's function WebRtcIlbcfix_NetEqPlc does
+  // not return any data. It simply resets a few states and returns 0.
+  void DecodePlcTest() {
+    InitEncoder();
+    std::unique_ptr<int16_t[]> input(new int16_t[frame_size_]);
+    ASSERT_TRUE(
+        input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
+    rtc::Buffer encoded;
+    size_t enc_len = EncodeFrame(input.get(), frame_size_, &encoded);
+    AudioDecoder::SpeechType speech_type;
+    decoder_->Reset();
+    std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
+    size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
+                                      codec_input_rate_hz_,
+                                      frame_size_ * channels_ * sizeof(int16_t),
+                                      output.get(), &speech_type);
+    EXPECT_EQ(frame_size_, dec_len);
+    // Simply call DecodePlc and verify that we get 0 as return value.
+    EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));
+  }
+};
+
+class AudioDecoderIsacFloatTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderIsacFloatTest() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 16000;
+    frame_size_ = 480;
+    data_length_ = 10 * frame_size_;
+    AudioEncoderIsacFloatImpl::Config config;
+    config.payload_type = payload_type_;
+    config.sample_rate_hz = codec_input_rate_hz_;
+    config.adaptive_mode = false;
+    config.frame_size_ms =
+        1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
+    audio_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
+    decoder_ = new AudioDecoderIsacFloatImpl(codec_input_rate_hz_);
+  }
+};
+
+class AudioDecoderIsacSwbTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderIsacSwbTest() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 32000;
+    frame_size_ = 960;
+    data_length_ = 10 * frame_size_;
+    AudioEncoderIsacFloatImpl::Config config;
+    config.payload_type = payload_type_;
+    config.sample_rate_hz = codec_input_rate_hz_;
+    config.adaptive_mode = false;
+    config.frame_size_ms =
+        1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
+    audio_encoder_.reset(new AudioEncoderIsacFloatImpl(config));
+    decoder_ = new AudioDecoderIsacFloatImpl(codec_input_rate_hz_);
+  }
+};
+
+class AudioDecoderIsacFixTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderIsacFixTest() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 16000;
+    frame_size_ = 480;
+    data_length_ = 10 * frame_size_;
+    AudioEncoderIsacFixImpl::Config config;
+    config.payload_type = payload_type_;
+    config.sample_rate_hz = codec_input_rate_hz_;
+    config.adaptive_mode = false;
+    config.frame_size_ms =
+        1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
+    audio_encoder_.reset(new AudioEncoderIsacFixImpl(config));
+    decoder_ = new AudioDecoderIsacFixImpl(codec_input_rate_hz_);
+  }
+};
+
+class AudioDecoderG722Test : public AudioDecoderTest {
+ protected:
+  AudioDecoderG722Test() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 16000;
+    frame_size_ = 160;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderG722Impl;
+    assert(decoder_);
+    AudioEncoderG722Config config;
+    config.frame_size_ms = 10;
+    config.num_channels = 1;
+    audio_encoder_.reset(new AudioEncoderG722Impl(config, payload_type_));
+  }
+};
+
+class AudioDecoderG722StereoTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderG722StereoTest() : AudioDecoderTest() {
+    channels_ = 2;
+    codec_input_rate_hz_ = 16000;
+    frame_size_ = 160;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderG722StereoImpl;
+    assert(decoder_);
+    AudioEncoderG722Config config;
+    config.frame_size_ms = 10;
+    config.num_channels = 2;
+    audio_encoder_.reset(new AudioEncoderG722Impl(config, payload_type_));
+  }
+};
+
+class AudioDecoderOpusTest : public AudioDecoderTest {
+ protected:
+  AudioDecoderOpusTest() : AudioDecoderTest() {
+    codec_input_rate_hz_ = 48000;
+    frame_size_ = 480;
+    data_length_ = 10 * frame_size_;
+    decoder_ = new AudioDecoderOpusImpl(1);
+    AudioEncoderOpusConfig config;
+    config.frame_size_ms = static_cast<int>(frame_size_) / 48;
+    config.application = AudioEncoderOpusConfig::ApplicationMode::kVoip;
+    audio_encoder_ = AudioEncoderOpus::MakeAudioEncoder(config, payload_type_);
+  }
+};
+
+class AudioDecoderOpusStereoTest : public AudioDecoderOpusTest {
+ protected:
+  AudioDecoderOpusStereoTest() : AudioDecoderOpusTest() {
+    channels_ = 2;
+    delete decoder_;
+    decoder_ = new AudioDecoderOpusImpl(2);
+    AudioEncoderOpusConfig config;
+    config.frame_size_ms = static_cast<int>(frame_size_) / 48;
+    config.num_channels = 2;
+    config.application = AudioEncoderOpusConfig::ApplicationMode::kAudio;
+    audio_encoder_ = AudioEncoderOpus::MakeAudioEncoder(config, payload_type_);
+  }
+};
+
+TEST_F(AudioDecoderPcmUTest, EncodeDecode) {
+  int tolerance = 251;
+  double mse = 1734.0;
+  EncodeDecodeTest(data_length_, tolerance, mse);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+namespace {
+int SetAndGetTargetBitrate(AudioEncoder* audio_encoder, int rate) {
+  audio_encoder->OnReceivedUplinkBandwidth(rate, rtc::nullopt);
+  return audio_encoder->GetTargetBitrate();
+}
+void TestSetAndGetTargetBitratesWithFixedCodec(AudioEncoder* audio_encoder,
+                                               int fixed_rate) {
+  EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, 32000));
+  EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, fixed_rate - 1));
+  EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, fixed_rate));
+  EXPECT_EQ(fixed_rate, SetAndGetTargetBitrate(audio_encoder, fixed_rate + 1));
+}
+}  // namespace
+
+TEST_F(AudioDecoderPcmUTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
+}
+
+TEST_F(AudioDecoderPcmATest, EncodeDecode) {
+  int tolerance = 308;
+  double mse = 1931.0;
+  EncodeDecodeTest(data_length_, tolerance, mse);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderPcmATest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
+}
+
+TEST_F(AudioDecoderPcm16BTest, EncodeDecode) {
+  int tolerance = 0;
+  double mse = 0.0;
+  EncodeDecodeTest(2 * data_length_, tolerance, mse);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderPcm16BTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(),
+                                            codec_input_rate_hz_ * 16);
+}
+
+TEST_F(AudioDecoderIlbcTest, EncodeDecode) {
+  int tolerance = 6808;
+  double mse = 2.13e6;
+  int delay = 80;  // Delay from input to output.
+  EncodeDecodeTest(500, tolerance, mse, delay);
+  ReInitTest();
+  EXPECT_TRUE(decoder_->HasDecodePlc());
+  DecodePlcTest();
+}
+
+TEST_F(AudioDecoderIlbcTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 13333);
+}
+
+TEST_F(AudioDecoderIsacFloatTest, EncodeDecode) {
+  int tolerance = 3399;
+  double mse = 434951.0;
+  int delay = 48;  // Delay from input to output.
+  EncodeDecodeTest(0, tolerance, mse, delay);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderIsacFloatTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 32000);
+}
+
+TEST_F(AudioDecoderIsacSwbTest, EncodeDecode) {
+  int tolerance = 19757;
+  double mse = 8.18e6;
+  int delay = 160;  // Delay from input to output.
+  EncodeDecodeTest(0, tolerance, mse, delay);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderIsacSwbTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 32000);
+}
+
+TEST_F(AudioDecoderIsacFixTest, EncodeDecode) {
+  int tolerance = 11034;
+  double mse = 3.46e6;
+  int delay = 54;  // Delay from input to output.
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM)
+  static const int kEncodedBytes = 685;
+#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+  static const int kEncodedBytes = 673;
+#else
+  static const int kEncodedBytes = 671;
+#endif
+  EncodeDecodeTest(kEncodedBytes, tolerance, mse, delay);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderIsacFixTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 32000);
+}
+
+TEST_F(AudioDecoderG722Test, EncodeDecode) {
+  int tolerance = 6176;
+  double mse = 238630.0;
+  int delay = 22;  // Delay from input to output.
+  EncodeDecodeTest(data_length_ / 2, tolerance, mse, delay);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderG722Test, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
+}
+
+TEST_F(AudioDecoderG722StereoTest, EncodeDecode) {
+  int tolerance = 6176;
+  int channel_diff_tolerance = 0;
+  double mse = 238630.0;
+  int delay = 22;  // Delay from input to output.
+  EncodeDecodeTest(data_length_, tolerance, mse, delay, channel_diff_tolerance);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderG722StereoTest, SetTargetBitrate) {
+  TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 128000);
+}
+
+TEST_F(AudioDecoderOpusTest, EncodeDecode) {
+  int tolerance = 6176;
+  double mse = 238630.0;
+  int delay = 22;  // Delay from input to output.
+  EncodeDecodeTest(0, tolerance, mse, delay);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+namespace {
+void TestOpusSetTargetBitrates(AudioEncoder* audio_encoder) {
+  EXPECT_EQ(6000, SetAndGetTargetBitrate(audio_encoder, 5999));
+  EXPECT_EQ(6000, SetAndGetTargetBitrate(audio_encoder, 6000));
+  EXPECT_EQ(32000, SetAndGetTargetBitrate(audio_encoder, 32000));
+  EXPECT_EQ(510000, SetAndGetTargetBitrate(audio_encoder, 510000));
+  EXPECT_EQ(510000, SetAndGetTargetBitrate(audio_encoder, 511000));
+}
+}  // namespace
+
+TEST_F(AudioDecoderOpusTest, SetTargetBitrate) {
+  TestOpusSetTargetBitrates(audio_encoder_.get());
+}
+
+TEST_F(AudioDecoderOpusStereoTest, EncodeDecode) {
+  int tolerance = 6176;
+  int channel_diff_tolerance = 0;
+  double mse = 238630.0;
+  int delay = 22;  // Delay from input to output.
+  EncodeDecodeTest(0, tolerance, mse, delay, channel_diff_tolerance);
+  ReInitTest();
+  EXPECT_FALSE(decoder_->HasDecodePlc());
+}
+
+TEST_F(AudioDecoderOpusStereoTest, SetTargetBitrate) {
+  TestOpusSetTargetBitrates(audio_encoder_.get());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/audio_multi_vector.cc b/modules/audio_coding/neteq/audio_multi_vector.cc
new file mode 100644
index 0000000..c3e623f
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_multi_vector.cc
@@ -0,0 +1,222 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+#include <assert.h>
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+AudioMultiVector::AudioMultiVector(size_t N) {
+  assert(N > 0);
+  if (N < 1) N = 1;
+  for (size_t n = 0; n < N; ++n) {
+    channels_.push_back(new AudioVector);
+  }
+  num_channels_ = N;
+}
+
+AudioMultiVector::AudioMultiVector(size_t N, size_t initial_size) {
+  assert(N > 0);
+  if (N < 1) N = 1;
+  for (size_t n = 0; n < N; ++n) {
+    channels_.push_back(new AudioVector(initial_size));
+  }
+  num_channels_ = N;
+}
+
+AudioMultiVector::~AudioMultiVector() {
+  std::vector<AudioVector*>::iterator it = channels_.begin();
+  while (it != channels_.end()) {
+    delete (*it);
+    ++it;
+  }
+}
+
+void AudioMultiVector::Clear() {
+  for (size_t i = 0; i < num_channels_; ++i) {
+    channels_[i]->Clear();
+  }
+}
+
+void AudioMultiVector::Zeros(size_t length) {
+  for (size_t i = 0; i < num_channels_; ++i) {
+    channels_[i]->Clear();
+    channels_[i]->Extend(length);
+  }
+}
+
+void AudioMultiVector::CopyTo(AudioMultiVector* copy_to) const {
+  if (copy_to) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      channels_[i]->CopyTo(&(*copy_to)[i]);
+    }
+  }
+}
+
+void AudioMultiVector::PushBackInterleaved(const int16_t* append_this,
+                                           size_t length) {
+  assert(length % num_channels_ == 0);
+  if (num_channels_ == 1) {
+    // Special case to avoid extra allocation and data shuffling.
+    channels_[0]->PushBack(append_this, length);
+    return;
+  }
+  size_t length_per_channel = length / num_channels_;
+  int16_t* temp_array = new int16_t[length_per_channel];  // Temporary storage.
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    // Copy elements to |temp_array|.
+    // Set |source_ptr| to first element of this channel.
+    const int16_t* source_ptr = &append_this[channel];
+    for (size_t i = 0; i < length_per_channel; ++i) {
+      temp_array[i] = *source_ptr;
+      source_ptr += num_channels_;  // Jump to next element of this channel.
+    }
+    channels_[channel]->PushBack(temp_array, length_per_channel);
+  }
+  delete [] temp_array;
+}
+
+void AudioMultiVector::PushBack(const AudioMultiVector& append_this) {
+  assert(num_channels_ == append_this.num_channels_);
+  if (num_channels_ == append_this.num_channels_) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      channels_[i]->PushBack(append_this[i]);
+    }
+  }
+}
+
+void AudioMultiVector::PushBackFromIndex(const AudioMultiVector& append_this,
+                                         size_t index) {
+  assert(index < append_this.Size());
+  index = std::min(index, append_this.Size() - 1);
+  size_t length = append_this.Size() - index;
+  assert(num_channels_ == append_this.num_channels_);
+  if (num_channels_ == append_this.num_channels_) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      channels_[i]->PushBack(append_this[i], length, index);
+    }
+  }
+}
+
+void AudioMultiVector::PopFront(size_t length) {
+  for (size_t i = 0; i < num_channels_; ++i) {
+    channels_[i]->PopFront(length);
+  }
+}
+
+void AudioMultiVector::PopBack(size_t length) {
+  for (size_t i = 0; i < num_channels_; ++i) {
+    channels_[i]->PopBack(length);
+  }
+}
+
+size_t AudioMultiVector::ReadInterleaved(size_t length,
+                                         int16_t* destination) const {
+  return ReadInterleavedFromIndex(0, length, destination);
+}
+
+size_t AudioMultiVector::ReadInterleavedFromIndex(size_t start_index,
+                                                  size_t length,
+                                                  int16_t* destination) const {
+  RTC_DCHECK(destination);
+  size_t index = 0;  // Number of elements written to |destination| so far.
+  RTC_DCHECK_LE(start_index, Size());
+  start_index = std::min(start_index, Size());
+  if (length + start_index > Size()) {
+    length = Size() - start_index;
+  }
+  if (num_channels_ == 1) {
+    // Special case to avoid the nested for loop below.
+    (*this)[0].CopyTo(length, start_index, destination);
+    return length;
+  }
+  for (size_t i = 0; i < length; ++i) {
+    for (size_t channel = 0; channel < num_channels_; ++channel) {
+      destination[index] = (*this)[channel][i + start_index];
+      ++index;
+    }
+  }
+  return index;
+}
+
+size_t AudioMultiVector::ReadInterleavedFromEnd(size_t length,
+                                                int16_t* destination) const {
+  length = std::min(length, Size());  // Cannot read more than Size() elements.
+  return ReadInterleavedFromIndex(Size() - length, length, destination);
+}
+
+void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this,
+                                   size_t length,
+                                   size_t position) {
+  assert(num_channels_ == insert_this.num_channels_);
+  // Cap |length| at the length of |insert_this|.
+  assert(length <= insert_this.Size());
+  length = std::min(length, insert_this.Size());
+  if (num_channels_ == insert_this.num_channels_) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      channels_[i]->OverwriteAt(insert_this[i], length, position);
+    }
+  }
+}
+
+void AudioMultiVector::CrossFade(const AudioMultiVector& append_this,
+                                 size_t fade_length) {
+  assert(num_channels_ == append_this.num_channels_);
+  if (num_channels_ == append_this.num_channels_) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      channels_[i]->CrossFade(append_this[i], fade_length);
+    }
+  }
+}
+
+size_t AudioMultiVector::Channels() const {
+  return num_channels_;
+}
+
+size_t AudioMultiVector::Size() const {
+  assert(channels_[0]);
+  return channels_[0]->Size();
+}
+
+void AudioMultiVector::AssertSize(size_t required_size) {
+  if (Size() < required_size) {
+    size_t extend_length = required_size - Size();
+    for (size_t channel = 0; channel < num_channels_; ++channel) {
+      channels_[channel]->Extend(extend_length);
+    }
+  }
+}
+
+bool AudioMultiVector::Empty() const {
+  assert(channels_[0]);
+  return channels_[0]->Empty();
+}
+
+void AudioMultiVector::CopyChannel(size_t from_channel, size_t to_channel) {
+  assert(from_channel < num_channels_);
+  assert(to_channel < num_channels_);
+  channels_[from_channel]->CopyTo(channels_[to_channel]);
+}
+
+const AudioVector& AudioMultiVector::operator[](size_t index) const {
+  return *(channels_[index]);
+}
+
+AudioVector& AudioMultiVector::operator[](size_t index) {
+  return *(channels_[index]);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/audio_multi_vector.h b/modules/audio_coding/neteq/audio_multi_vector.h
new file mode 100644
index 0000000..f54c98b
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_multi_vector.h
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_AUDIO_MULTI_VECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_AUDIO_MULTI_VECTOR_H_
+
+#include <string.h>  // Access to size_t.
+
+#include <vector>
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioMultiVector {
+ public:
+  // Creates an empty AudioMultiVector with |N| audio channels. |N| must be
+  // larger than 0.
+  explicit AudioMultiVector(size_t N);
+
+  // Creates an AudioMultiVector with |N| audio channels, each channel having
+  // an initial size. |N| must be larger than 0.
+  AudioMultiVector(size_t N, size_t initial_size);
+
+  virtual ~AudioMultiVector();
+
+  // Deletes all values and make the vector empty.
+  virtual void Clear();
+
+  // Clears the vector and inserts |length| zeros into each channel.
+  virtual void Zeros(size_t length);
+
+  // Copies all values from this vector to |copy_to|. Any contents in |copy_to|
+  // are deleted. After the operation is done, |copy_to| will be an exact
+  // replica of this object. The source and the destination must have the same
+  // number of channels.
+  virtual void CopyTo(AudioMultiVector* copy_to) const;
+
+  // Appends the contents of array |append_this| to the end of this
+  // object. The array is assumed to be channel-interleaved. |length| must be
+  // an even multiple of this object's number of channels.
+  // The length of this object is increased with the |length| divided by the
+  // number of channels.
+  virtual void PushBackInterleaved(const int16_t* append_this, size_t length);
+
+  // Appends the contents of AudioMultiVector |append_this| to this object. The
+  // length of this object is increased with the length of |append_this|.
+  virtual void PushBack(const AudioMultiVector& append_this);
+
+  // Appends the contents of AudioMultiVector |append_this| to this object,
+  // taken from |index| up until the end of |append_this|. The length of this
+  // object is increased.
+  virtual void PushBackFromIndex(const AudioMultiVector& append_this,
+                                 size_t index);
+
+  // Removes |length| elements from the beginning of this object, from each
+  // channel.
+  virtual void PopFront(size_t length);
+
+  // Removes |length| elements from the end of this object, from each
+  // channel.
+  virtual void PopBack(size_t length);
+
+  // Reads |length| samples from each channel and writes them interleaved to
+  // |destination|. The total number of elements written to |destination| is
+  // returned, i.e., |length| * number of channels. If the AudioMultiVector
+  // contains less than |length| samples per channel, this is reflected in the
+  // return value.
+  virtual size_t ReadInterleaved(size_t length, int16_t* destination) const;
+
+  // Like ReadInterleaved() above, but reads from |start_index| instead of from
+  // the beginning.
+  virtual size_t ReadInterleavedFromIndex(size_t start_index,
+                                          size_t length,
+                                          int16_t* destination) const;
+
+  // Like ReadInterleaved() above, but reads from the end instead of from
+  // the beginning.
+  virtual size_t ReadInterleavedFromEnd(size_t length,
+                                        int16_t* destination) const;
+
+  // Overwrites each channel in this AudioMultiVector with values taken from
+  // |insert_this|. The values are taken from the beginning of |insert_this| and
+  // are inserted starting at |position|. |length| values are written into each
+  // channel. If |length| and |position| are selected such that the new data
+  // extends beyond the end of the current AudioVector, the vector is extended
+  // to accommodate the new data. |length| is limited to the length of
+  // |insert_this|.
+  virtual void OverwriteAt(const AudioMultiVector& insert_this,
+                           size_t length,
+                           size_t position);
+
+  // Appends |append_this| to the end of the current vector. Lets the two
+  // vectors overlap by |fade_length| samples (per channel), and cross-fade
+  // linearly in this region.
+  virtual void CrossFade(const AudioMultiVector& append_this,
+                         size_t fade_length);
+
+  // Returns the number of channels.
+  virtual size_t Channels() const;
+
+  // Returns the number of elements per channel in this AudioMultiVector.
+  virtual size_t Size() const;
+
+  // Verify that each channel can hold at least |required_size| elements. If
+  // not, extend accordingly.
+  virtual void AssertSize(size_t required_size);
+
+  virtual bool Empty() const;
+
+  // Copies the data between two channels in the AudioMultiVector. The method
+  // does not add any new channel. Thus, |from_channel| and |to_channel| must
+  // both be valid channel numbers.
+  virtual void CopyChannel(size_t from_channel, size_t to_channel);
+
+  // Accesses and modifies a channel (i.e., an AudioVector object) of this
+  // AudioMultiVector.
+  const AudioVector& operator[](size_t index) const;
+  AudioVector& operator[](size_t index);
+
+ protected:
+  std::vector<AudioVector*> channels_;
+  size_t num_channels_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioMultiVector);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_AUDIO_MULTI_VECTOR_H_
diff --git a/modules/audio_coding/neteq/audio_multi_vector_unittest.cc b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
new file mode 100644
index 0000000..f05aee0
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_multi_vector_unittest.cc
@@ -0,0 +1,324 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <string>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// This is a value-parameterized test. The test cases are instantiated with
+// different values for the test parameter, which is used to determine the
+// number of channels in the AudioMultiBuffer. Note that it is not possible
+// to combine typed testing with value-parameterized testing, and since the
+// tests for AudioVector already covers a number of different type parameters,
+// this test focuses on testing different number of channels, and keeping the
+// value type constant.
+
+class AudioMultiVectorTest : public ::testing::TestWithParam<size_t> {
+ protected:
+  AudioMultiVectorTest()
+      : num_channels_(GetParam()),  // Get the test parameter.
+        interleaved_length_(num_channels_ * array_length()) {
+    array_interleaved_ = new int16_t[num_channels_ * array_length()];
+  }
+
+  ~AudioMultiVectorTest() {
+    delete [] array_interleaved_;
+  }
+
+  virtual void SetUp() {
+    // Populate test arrays.
+    for (size_t i = 0; i < array_length(); ++i) {
+      array_[i] = static_cast<int16_t>(i);
+    }
+    int16_t* ptr = array_interleaved_;
+    // Write 100, 101, 102, ... for first channel.
+    // Write 200, 201, 202, ... for second channel.
+    // And so on.
+    for (size_t i = 0; i < array_length(); ++i) {
+      for (size_t j = 1; j <= num_channels_; ++j) {
+        *ptr = rtc::checked_cast<int16_t>(j * 100 + i);
+        ++ptr;
+      }
+    }
+  }
+
+  size_t array_length() const {
+    return sizeof(array_) / sizeof(array_[0]);
+  }
+
+  const size_t num_channels_;
+  size_t interleaved_length_;
+  int16_t array_[10];
+  int16_t* array_interleaved_;
+};
+
+// Create and destroy AudioMultiVector objects, both empty and with a predefined
+// length.
+TEST_P(AudioMultiVectorTest, CreateAndDestroy) {
+  AudioMultiVector vec1(num_channels_);
+  EXPECT_TRUE(vec1.Empty());
+  EXPECT_EQ(num_channels_, vec1.Channels());
+  EXPECT_EQ(0u, vec1.Size());
+
+  size_t initial_size = 17;
+  AudioMultiVector vec2(num_channels_, initial_size);
+  EXPECT_FALSE(vec2.Empty());
+  EXPECT_EQ(num_channels_, vec2.Channels());
+  EXPECT_EQ(initial_size, vec2.Size());
+}
+
+// Test the subscript operator [] for getting and setting.
+TEST_P(AudioMultiVectorTest, SubscriptOperator) {
+  AudioMultiVector vec(num_channels_, array_length());
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    for (size_t i = 0; i < array_length(); ++i) {
+      vec[channel][i] = static_cast<int16_t>(i);
+      // Make sure to use the const version.
+      const AudioVector& audio_vec = vec[channel];
+      EXPECT_EQ(static_cast<int16_t>(i), audio_vec[i]);
+    }
+  }
+}
+
+// Test the PushBackInterleaved method and the CopyFrom method. The Clear
+// method is also invoked.
+TEST_P(AudioMultiVectorTest, PushBackInterleavedAndCopy) {
+  AudioMultiVector vec(num_channels_);
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  AudioMultiVector vec_copy(num_channels_);
+  vec.CopyTo(&vec_copy);  // Copy from |vec| to |vec_copy|.
+  ASSERT_EQ(num_channels_, vec.Channels());
+  ASSERT_EQ(array_length(), vec.Size());
+  ASSERT_EQ(num_channels_, vec_copy.Channels());
+  ASSERT_EQ(array_length(), vec_copy.Size());
+  for (size_t channel = 0; channel < vec.Channels(); ++channel) {
+    for (size_t i = 0; i < array_length(); ++i) {
+      EXPECT_EQ(static_cast<int16_t>((channel + 1) * 100 + i), vec[channel][i]);
+      EXPECT_EQ(vec[channel][i], vec_copy[channel][i]);
+    }
+  }
+
+  // Clear |vec| and verify that it is empty.
+  vec.Clear();
+  EXPECT_TRUE(vec.Empty());
+
+  // Now copy the empty vector and verify that the copy becomes empty too.
+  vec.CopyTo(&vec_copy);
+  EXPECT_TRUE(vec_copy.Empty());
+}
+
+// Try to copy to a NULL pointer. Nothing should happen.
+TEST_P(AudioMultiVectorTest, CopyToNull) {
+  AudioMultiVector vec(num_channels_);
+  AudioMultiVector* vec_copy = NULL;
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  vec.CopyTo(vec_copy);
+}
+
+// Test the PushBack method with another AudioMultiVector as input argument.
+TEST_P(AudioMultiVectorTest, PushBackVector) {
+  AudioMultiVector vec1(num_channels_, array_length());
+  AudioMultiVector vec2(num_channels_, array_length());
+  // Set the first vector to [0, 1, ..., array_length() - 1] +
+  //   100 * channel_number.
+  // Set the second vector to [array_length(), array_length() + 1, ...,
+  //   2 * array_length() - 1] + 100 * channel_number.
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    for (size_t i = 0; i < array_length(); ++i) {
+      vec1[channel][i] = static_cast<int16_t>(i + 100 * channel);
+      vec2[channel][i] =
+          static_cast<int16_t>(i + 100 * channel + array_length());
+    }
+  }
+  // Append vec2 to the back of vec1.
+  vec1.PushBack(vec2);
+  ASSERT_EQ(2u * array_length(), vec1.Size());
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    for (size_t i = 0; i < 2 * array_length(); ++i) {
+      EXPECT_EQ(static_cast<int16_t>(i + 100 * channel), vec1[channel][i]);
+    }
+  }
+}
+
+// Test the PushBackFromIndex method.
+TEST_P(AudioMultiVectorTest, PushBackFromIndex) {
+  AudioMultiVector vec1(num_channels_);
+  vec1.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  AudioMultiVector vec2(num_channels_);
+
+  // Append vec1 to the back of vec2 (which is empty). Read vec1 from the second
+  // last element.
+  vec2.PushBackFromIndex(vec1, array_length() - 2);
+  ASSERT_EQ(2u, vec2.Size());
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    for (size_t i = 0; i < 2; ++i) {
+      EXPECT_EQ(array_interleaved_[channel + num_channels_ *
+                  (array_length() - 2 + i)], vec2[channel][i]);
+    }
+  }
+}
+
+// Starts with pushing some values to the vector, then test the Zeros method.
+TEST_P(AudioMultiVectorTest, Zeros) {
+  AudioMultiVector vec(num_channels_);
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  vec.Zeros(2 * array_length());
+  ASSERT_EQ(num_channels_, vec.Channels());
+  ASSERT_EQ(2u * array_length(), vec.Size());
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    for (size_t i = 0; i < 2 * array_length(); ++i) {
+      EXPECT_EQ(0, vec[channel][i]);
+    }
+  }
+}
+
+// Test the ReadInterleaved method
+TEST_P(AudioMultiVectorTest, ReadInterleaved) {
+  AudioMultiVector vec(num_channels_);
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  int16_t* output = new int16_t[interleaved_length_];
+  // Read 5 samples.
+  size_t read_samples = 5;
+  EXPECT_EQ(num_channels_ * read_samples,
+            vec.ReadInterleaved(read_samples, output));
+  EXPECT_EQ(0,
+            memcmp(array_interleaved_, output, read_samples * sizeof(int16_t)));
+
+  // Read too many samples. Expect to get all samples from the vector.
+  EXPECT_EQ(interleaved_length_,
+            vec.ReadInterleaved(array_length() + 1, output));
+  EXPECT_EQ(0,
+            memcmp(array_interleaved_, output, read_samples * sizeof(int16_t)));
+
+  delete [] output;
+}
+
+// Test the PopFront method.
+TEST_P(AudioMultiVectorTest, PopFront) {
+  AudioMultiVector vec(num_channels_);
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  vec.PopFront(1);  // Remove one element from each channel.
+  ASSERT_EQ(array_length() - 1u, vec.Size());
+  // Let |ptr| point to the second element of the first channel in the
+  // interleaved array.
+  int16_t* ptr = &array_interleaved_[num_channels_];
+  for (size_t i = 0; i < array_length() - 1; ++i) {
+    for (size_t channel = 0; channel < num_channels_; ++channel) {
+      EXPECT_EQ(*ptr, vec[channel][i]);
+      ++ptr;
+    }
+  }
+  vec.PopFront(array_length());  // Remove more elements than vector size.
+  EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the PopBack method.
+TEST_P(AudioMultiVectorTest, PopBack) {
+  AudioMultiVector vec(num_channels_);
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  vec.PopBack(1);  // Remove one element from each channel.
+  ASSERT_EQ(array_length() - 1u, vec.Size());
+  // Let |ptr| point to the first element of the first channel in the
+  // interleaved array.
+  int16_t* ptr = array_interleaved_;
+  for (size_t i = 0; i < array_length() - 1; ++i) {
+    for (size_t channel = 0; channel < num_channels_; ++channel) {
+      EXPECT_EQ(*ptr, vec[channel][i]);
+      ++ptr;
+    }
+  }
+  vec.PopBack(array_length());  // Remove more elements than vector size.
+  EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the AssertSize method.
+TEST_P(AudioMultiVectorTest, AssertSize) {
+  AudioMultiVector vec(num_channels_, array_length());
+  EXPECT_EQ(array_length(), vec.Size());
+  // Start with asserting with smaller sizes than already allocated.
+  vec.AssertSize(0);
+  vec.AssertSize(array_length() - 1);
+  // Nothing should have changed.
+  EXPECT_EQ(array_length(), vec.Size());
+  // Assert with one element longer than already allocated.
+  vec.AssertSize(array_length() + 1);
+  // Expect vector to have grown.
+  EXPECT_EQ(array_length() + 1, vec.Size());
+  // Also check the individual AudioVectors.
+  for (size_t channel = 0; channel < vec.Channels(); ++channel) {
+    EXPECT_EQ(array_length() + 1u, vec[channel].Size());
+  }
+}
+
+// Test the PushBack method with another AudioMultiVector as input argument.
+TEST_P(AudioMultiVectorTest, OverwriteAt) {
+  AudioMultiVector vec1(num_channels_);
+  vec1.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  AudioMultiVector vec2(num_channels_);
+  vec2.Zeros(3);  // 3 zeros in each channel.
+  // Overwrite vec2 at position 5.
+  vec1.OverwriteAt(vec2, 3, 5);
+  // Verify result.
+  // Length remains the same.
+  ASSERT_EQ(array_length(), vec1.Size());
+  int16_t* ptr = array_interleaved_;
+  for (size_t i = 0; i < array_length() - 1; ++i) {
+    for (size_t channel = 0; channel < num_channels_; ++channel) {
+      if (i >= 5 && i <= 7) {
+        // Elements 5, 6, 7 should have been replaced with zeros.
+        EXPECT_EQ(0, vec1[channel][i]);
+      } else {
+        EXPECT_EQ(*ptr, vec1[channel][i]);
+      }
+      ++ptr;
+    }
+  }
+}
+
+// Test the CopyChannel method, when the test is instantiated with at least two
+// channels.
+TEST_P(AudioMultiVectorTest, CopyChannel) {
+  if (num_channels_ < 2)
+    return;
+
+  AudioMultiVector vec(num_channels_);
+  vec.PushBackInterleaved(array_interleaved_, interleaved_length_);
+  // Create a reference copy.
+  AudioMultiVector ref(num_channels_);
+  ref.PushBack(vec);
+  // Copy from first to last channel.
+  vec.CopyChannel(0, num_channels_ - 1);
+  // Verify that the first and last channels are identical; the others should
+  // be left untouched.
+  for (size_t i = 0; i < array_length(); ++i) {
+    // Verify that all but the last channel are untouched.
+    for (size_t channel = 0; channel < num_channels_ - 1; ++channel) {
+      EXPECT_EQ(ref[channel][i], vec[channel][i]);
+    }
+    // Verify that the last and the first channels are identical.
+    EXPECT_EQ(vec[0][i], vec[num_channels_ - 1][i]);
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(TestNumChannels,
+                        AudioMultiVectorTest,
+                        ::testing::Values(static_cast<size_t>(1),
+                                          static_cast<size_t>(2),
+                                          static_cast<size_t>(5)));
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/audio_vector.cc b/modules/audio_coding/neteq/audio_vector.cc
new file mode 100644
index 0000000..93cd1fb
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_vector.cc
@@ -0,0 +1,386 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+#include <assert.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+AudioVector::AudioVector()
+    : AudioVector(kDefaultInitialSize) {
+  Clear();
+}
+
+AudioVector::AudioVector(size_t initial_size)
+    : array_(new int16_t[initial_size + 1]),
+      capacity_(initial_size + 1),
+      begin_index_(0),
+      end_index_(capacity_ - 1) {
+  memset(array_.get(), 0, capacity_ * sizeof(int16_t));
+}
+
+AudioVector::~AudioVector() = default;
+
+void AudioVector::Clear() {
+  end_index_ = begin_index_ = 0;
+}
+
+void AudioVector::CopyTo(AudioVector* copy_to) const {
+  RTC_DCHECK(copy_to);
+  copy_to->Reserve(Size());
+  CopyTo(Size(), 0, copy_to->array_.get());
+  copy_to->begin_index_ = 0;
+  copy_to->end_index_ = Size();
+}
+
+void AudioVector::CopyTo(
+    size_t length, size_t position, int16_t* copy_to) const {
+  if (length == 0)
+    return;
+  length = std::min(length, Size() - position);
+  const size_t copy_index = (begin_index_ + position) % capacity_;
+  const size_t first_chunk_length =
+      std::min(length, capacity_ - copy_index);
+  memcpy(copy_to, &array_[copy_index],
+         first_chunk_length * sizeof(int16_t));
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0) {
+    memcpy(&copy_to[first_chunk_length], array_.get(),
+           remaining_length * sizeof(int16_t));
+  }
+}
+
+void AudioVector::PushFront(const AudioVector& prepend_this) {
+  const size_t length = prepend_this.Size();
+  if (length == 0)
+    return;
+
+  // Although the subsequent calling to PushFront does Reserve in it, it is
+  // always more efficient to do a big Reserve first.
+  Reserve(Size() + length);
+
+  const size_t first_chunk_length =
+      std::min(length, prepend_this.capacity_ - prepend_this.begin_index_);
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0)
+    PushFront(prepend_this.array_.get(), remaining_length);
+  PushFront(&prepend_this.array_[prepend_this.begin_index_],
+            first_chunk_length);
+}
+
+void AudioVector::PushFront(const int16_t* prepend_this, size_t length) {
+  if (length == 0)
+    return;
+  Reserve(Size() + length);
+  const size_t first_chunk_length = std::min(length, begin_index_);
+  memcpy(&array_[begin_index_ - first_chunk_length],
+         &prepend_this[length - first_chunk_length],
+         first_chunk_length * sizeof(int16_t));
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0) {
+    memcpy(&array_[capacity_ - remaining_length], prepend_this,
+           remaining_length * sizeof(int16_t));
+  }
+  begin_index_ = (begin_index_ + capacity_ - length) % capacity_;
+}
+
+void AudioVector::PushBack(const AudioVector& append_this) {
+  PushBack(append_this, append_this.Size(), 0);
+}
+
+void AudioVector::PushBack(
+    const AudioVector& append_this, size_t length, size_t position) {
+  RTC_DCHECK_LE(position, append_this.Size());
+  RTC_DCHECK_LE(length, append_this.Size() - position);
+
+  if (length == 0)
+    return;
+
+  // Although the subsequent calling to PushBack does Reserve in it, it is
+  // always more efficient to do a big Reserve first.
+  Reserve(Size() + length);
+
+  const size_t start_index =
+      (append_this.begin_index_ + position) % append_this.capacity_;
+  const size_t first_chunk_length = std::min(
+      length, append_this.capacity_ - start_index);
+  PushBack(&append_this.array_[start_index], first_chunk_length);
+
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0)
+    PushBack(append_this.array_.get(), remaining_length);
+}
+
+void AudioVector::PushBack(const int16_t* append_this, size_t length) {
+  if (length == 0)
+    return;
+  Reserve(Size() + length);
+  const size_t first_chunk_length = std::min(length, capacity_ - end_index_);
+  memcpy(&array_[end_index_], append_this,
+         first_chunk_length * sizeof(int16_t));
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0) {
+    memcpy(array_.get(), &append_this[first_chunk_length],
+           remaining_length * sizeof(int16_t));
+  }
+  end_index_ = (end_index_ + length) % capacity_;
+}
+
+void AudioVector::PopFront(size_t length) {
+  if (length == 0)
+    return;
+  length = std::min(length, Size());
+  begin_index_ = (begin_index_ + length) % capacity_;
+}
+
+void AudioVector::PopBack(size_t length) {
+  if (length == 0)
+    return;
+  // Never remove more than what is in the array.
+  length = std::min(length, Size());
+  end_index_ = (end_index_ + capacity_ - length) % capacity_;
+}
+
+void AudioVector::Extend(size_t extra_length) {
+  if (extra_length == 0)
+    return;
+  InsertZerosByPushBack(extra_length, Size());
+}
+
+void AudioVector::InsertAt(const int16_t* insert_this,
+                           size_t length,
+                           size_t position) {
+  if (length == 0)
+    return;
+  // Cap the insert position at the current array length.
+  position = std::min(Size(), position);
+
+  // When inserting to a position closer to the beginning, it is more efficient
+  // to insert by pushing front than to insert by pushing back, since less data
+  // will be moved, vice versa.
+  if (position <= Size() - position) {
+    InsertByPushFront(insert_this, length, position);
+  } else {
+    InsertByPushBack(insert_this, length, position);
+  }
+}
+
+void AudioVector::InsertZerosAt(size_t length,
+                                size_t position) {
+  if (length == 0)
+    return;
+  // Cap the insert position at the current array length.
+  position = std::min(Size(), position);
+
+  // When inserting to a position closer to the beginning, it is more efficient
+  // to insert by pushing front than to insert by pushing back, since less data
+  // will be moved, vice versa.
+  if (position <= Size() - position) {
+    InsertZerosByPushFront(length, position);
+  } else {
+    InsertZerosByPushBack(length, position);
+  }
+}
+
+void AudioVector::OverwriteAt(const AudioVector& insert_this,
+                              size_t length,
+                              size_t position) {
+  RTC_DCHECK_LE(length, insert_this.Size());
+  if (length == 0)
+    return;
+
+  // Cap the insert position at the current array length.
+  position = std::min(Size(), position);
+
+  // Although the subsequent calling to OverwriteAt does Reserve in it, it is
+  // always more efficient to do a big Reserve first.
+  size_t new_size = std::max(Size(), position + length);
+  Reserve(new_size);
+
+  const size_t first_chunk_length =
+      std::min(length, insert_this.capacity_ - insert_this.begin_index_);
+  OverwriteAt(&insert_this.array_[insert_this.begin_index_], first_chunk_length,
+              position);
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0) {
+    OverwriteAt(insert_this.array_.get(), remaining_length,
+                position + first_chunk_length);
+  }
+}
+
+void AudioVector::OverwriteAt(const int16_t* insert_this,
+                              size_t length,
+                              size_t position) {
+  if (length == 0)
+    return;
+  // Cap the insert position at the current array length.
+  position = std::min(Size(), position);
+
+  size_t new_size = std::max(Size(), position + length);
+  Reserve(new_size);
+
+  const size_t overwrite_index = (begin_index_ + position) % capacity_;
+  const size_t first_chunk_length =
+      std::min(length, capacity_ - overwrite_index);
+  memcpy(&array_[overwrite_index], insert_this,
+         first_chunk_length * sizeof(int16_t));
+  const size_t remaining_length = length - first_chunk_length;
+  if (remaining_length > 0) {
+    memcpy(array_.get(), &insert_this[first_chunk_length],
+           remaining_length * sizeof(int16_t));
+  }
+
+  end_index_ = (begin_index_ + new_size) % capacity_;
+}
+
+void AudioVector::CrossFade(const AudioVector& append_this,
+                            size_t fade_length) {
+  // Fade length cannot be longer than the current vector or |append_this|.
+  assert(fade_length <= Size());
+  assert(fade_length <= append_this.Size());
+  fade_length = std::min(fade_length, Size());
+  fade_length = std::min(fade_length, append_this.Size());
+  size_t position = Size() - fade_length + begin_index_;
+  // Cross fade the overlapping regions.
+  // |alpha| is the mixing factor in Q14.
+  // TODO(hlundin): Consider skipping +1 in the denominator to produce a
+  // smoother cross-fade, in particular at the end of the fade.
+  int alpha_step = 16384 / (static_cast<int>(fade_length) + 1);
+  int alpha = 16384;
+  for (size_t i = 0; i < fade_length; ++i) {
+    alpha -= alpha_step;
+    array_[(position + i) % capacity_] =
+        (alpha * array_[(position + i) % capacity_] +
+            (16384 - alpha) * append_this[i] + 8192) >> 14;
+  }
+  assert(alpha >= 0);  // Verify that the slope was correct.
+  // Append what is left of |append_this|.
+  size_t samples_to_push_back = append_this.Size() - fade_length;
+  if (samples_to_push_back > 0)
+    PushBack(append_this, samples_to_push_back, fade_length);
+}
+
+// Returns the number of elements in this AudioVector.
+size_t AudioVector::Size() const {
+  return (end_index_ + capacity_ - begin_index_) % capacity_;
+}
+
+// Returns true if this AudioVector is empty.
+bool AudioVector::Empty() const {
+  return begin_index_ == end_index_;
+}
+
+void AudioVector::Reserve(size_t n) {
+  if (capacity_ > n)
+    return;
+  const size_t length = Size();
+  // Reserve one more sample to remove the ambiguity between empty vector and
+  // full vector. Therefore |begin_index_| == |end_index_| indicates empty
+  // vector, and |begin_index_| == (|end_index_| + 1) % capacity indicates
+  // full vector.
+  std::unique_ptr<int16_t[]> temp_array(new int16_t[n + 1]);
+  CopyTo(length, 0, temp_array.get());
+  array_.swap(temp_array);
+  begin_index_ = 0;
+  end_index_ = length;
+  capacity_ = n + 1;
+}
+
+void AudioVector::InsertByPushBack(const int16_t* insert_this,
+                                   size_t length,
+                                   size_t position) {
+  const size_t move_chunk_length = Size() - position;
+  std::unique_ptr<int16_t[]> temp_array(nullptr);
+  if (move_chunk_length > 0) {
+    // TODO(minyue): see if it is possible to avoid copying to a buffer.
+    temp_array.reset(new int16_t[move_chunk_length]);
+    CopyTo(move_chunk_length, position, temp_array.get());
+    PopBack(move_chunk_length);
+  }
+
+  Reserve(Size() + length + move_chunk_length);
+  PushBack(insert_this, length);
+  if (move_chunk_length > 0)
+    PushBack(temp_array.get(), move_chunk_length);
+}
+
+void AudioVector::InsertByPushFront(const int16_t* insert_this,
+                                   size_t length,
+                                   size_t position) {
+  std::unique_ptr<int16_t[]> temp_array(nullptr);
+  if (position > 0) {
+    // TODO(minyue): see if it is possible to avoid copying to a buffer.
+    temp_array.reset(new int16_t[position]);
+    CopyTo(position, 0, temp_array.get());
+    PopFront(position);
+  }
+
+  Reserve(Size() + length + position);
+  PushFront(insert_this, length);
+  if (position > 0)
+    PushFront(temp_array.get(), position);
+}
+
+void AudioVector::InsertZerosByPushBack(size_t length,
+                                        size_t position) {
+  const size_t move_chunk_length = Size() - position;
+  std::unique_ptr<int16_t[]> temp_array(nullptr);
+  if (move_chunk_length > 0) {
+    temp_array.reset(new int16_t[move_chunk_length]);
+    CopyTo(move_chunk_length, position, temp_array.get());
+    PopBack(move_chunk_length);
+  }
+
+  Reserve(Size() + length + move_chunk_length);
+
+  const size_t first_zero_chunk_length =
+      std::min(length, capacity_ - end_index_);
+  memset(&array_[end_index_], 0, first_zero_chunk_length * sizeof(int16_t));
+  const size_t remaining_zero_length = length - first_zero_chunk_length;
+  if (remaining_zero_length > 0)
+    memset(array_.get(), 0, remaining_zero_length * sizeof(int16_t));
+  end_index_ = (end_index_ + length) % capacity_;
+
+  if (move_chunk_length > 0)
+    PushBack(temp_array.get(), move_chunk_length);
+}
+
+void AudioVector::InsertZerosByPushFront(size_t length,
+                                         size_t position) {
+  std::unique_ptr<int16_t[]> temp_array(nullptr);
+  if (position > 0) {
+    temp_array.reset(new int16_t[position]);
+    CopyTo(position, 0, temp_array.get());
+    PopFront(position);
+  }
+
+  Reserve(Size() + length + position);
+
+  const size_t first_zero_chunk_length = std::min(length, begin_index_);
+  memset(&array_[begin_index_ - first_zero_chunk_length], 0,
+         first_zero_chunk_length * sizeof(int16_t));
+  const size_t remaining_zero_length = length - first_zero_chunk_length;
+  if (remaining_zero_length > 0)
+    memset(&array_[capacity_ - remaining_zero_length], 0,
+           remaining_zero_length * sizeof(int16_t));
+  begin_index_ = (begin_index_ + capacity_ - length) % capacity_;
+
+  if (position > 0)
+    PushFront(temp_array.get(), position);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/audio_vector.h b/modules/audio_coding/neteq/audio_vector.h
new file mode 100644
index 0000000..754a9fd
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_vector.h
@@ -0,0 +1,168 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_AUDIO_VECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_AUDIO_VECTOR_H_
+
+#include <string.h>  // Access to size_t.
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioVector {
+ public:
+  // Creates an empty AudioVector.
+  AudioVector();
+
+  // Creates an AudioVector with an initial size.
+  explicit AudioVector(size_t initial_size);
+
+  virtual ~AudioVector();
+
+  // Deletes all values and make the vector empty.
+  virtual void Clear();
+
+  // Copies all values from this vector to |copy_to|. Any contents in |copy_to|
+  // are deleted before the copy operation. After the operation is done,
+  // |copy_to| will be an exact replica of this object.
+  virtual void CopyTo(AudioVector* copy_to) const;
+
+  // Copies |length| values from |position| in this vector to |copy_to|.
+  virtual void CopyTo(size_t length, size_t position, int16_t* copy_to) const;
+
+  // Prepends the contents of AudioVector |prepend_this| to this object. The
+  // length of this object is increased with the length of |prepend_this|.
+  virtual void PushFront(const AudioVector& prepend_this);
+
+  // Same as above, but with an array |prepend_this| with |length| elements as
+  // source.
+  virtual void PushFront(const int16_t* prepend_this, size_t length);
+
+  // Same as PushFront but will append to the end of this object.
+  virtual void PushBack(const AudioVector& append_this);
+
+  // Appends a segment of |append_this| to the end of this object. The segment
+  // starts from |position| and has |length| samples.
+  virtual void PushBack(const AudioVector& append_this,
+                        size_t length,
+                        size_t position);
+
+  // Same as PushFront but will append to the end of this object.
+  virtual void PushBack(const int16_t* append_this, size_t length);
+
+  // Removes |length| elements from the beginning of this object.
+  virtual void PopFront(size_t length);
+
+  // Removes |length| elements from the end of this object.
+  virtual void PopBack(size_t length);
+
+  // Extends this object with |extra_length| elements at the end. The new
+  // elements are initialized to zero.
+  virtual void Extend(size_t extra_length);
+
+  // Inserts |length| elements taken from the array |insert_this| and insert
+  // them at |position|. The length of the AudioVector is increased by |length|.
+  // |position| = 0 means that the new values are prepended to the vector.
+  // |position| = Size() means that the new values are appended to the vector.
+  virtual void InsertAt(const int16_t* insert_this, size_t length,
+                        size_t position);
+
+  // Like InsertAt, but inserts |length| zero elements at |position|.
+  virtual void InsertZerosAt(size_t length, size_t position);
+
+  // Overwrites |length| elements of this AudioVector starting from |position|
+  // with first values in |AudioVector|. The definition of |position|
+  // is the same as for InsertAt(). If |length| and |position| are selected
+  // such that the new data extends beyond the end of the current AudioVector,
+  // the vector is extended to accommodate the new data.
+  virtual void OverwriteAt(const AudioVector& insert_this,
+                           size_t length,
+                           size_t position);
+
+  // Overwrites |length| elements of this AudioVector with values taken from the
+  // array |insert_this|, starting at |position|. The definition of |position|
+  // is the same as for InsertAt(). If |length| and |position| are selected
+  // such that the new data extends beyond the end of the current AudioVector,
+  // the vector is extended to accommodate the new data.
+  virtual void OverwriteAt(const int16_t* insert_this,
+                           size_t length,
+                           size_t position);
+
+  // Appends |append_this| to the end of the current vector. Lets the two
+  // vectors overlap by |fade_length| samples, and cross-fade linearly in this
+  // region.
+  virtual void CrossFade(const AudioVector& append_this, size_t fade_length);
+
+  // Returns the number of elements in this AudioVector.
+  virtual size_t Size() const;
+
+  // Returns true if this AudioVector is empty.
+  virtual bool Empty() const;
+
+  // Accesses and modifies an element of AudioVector.
+  inline const int16_t& operator[](size_t index) const {
+    return array_[WrapIndex(index, begin_index_, capacity_)];
+  }
+
+  inline int16_t& operator[](size_t index) {
+    return array_[WrapIndex(index, begin_index_, capacity_)];
+  }
+
+ private:
+  static const size_t kDefaultInitialSize = 10;
+
+  // This method is used by the [] operators to calculate an index within the
+  // capacity of the array, but without using the modulo operation (%).
+  static inline size_t WrapIndex(size_t index,
+                                 size_t begin_index,
+                                 size_t capacity) {
+    RTC_DCHECK_LT(index, capacity);
+    RTC_DCHECK_LT(begin_index, capacity);
+    size_t ix = begin_index + index;
+    RTC_DCHECK_GE(ix, index);  // Check for overflow.
+    if (ix >= capacity) {
+      ix -= capacity;
+    }
+    RTC_DCHECK_LT(ix, capacity);
+    return ix;
+  }
+
+  void Reserve(size_t n);
+
+  void InsertByPushBack(const int16_t* insert_this, size_t length,
+                        size_t position);
+
+  void InsertByPushFront(const int16_t* insert_this, size_t length,
+                         size_t position);
+
+  void InsertZerosByPushBack(size_t length, size_t position);
+
+  void InsertZerosByPushFront(size_t length, size_t position);
+
+  std::unique_ptr<int16_t[]> array_;
+
+  size_t capacity_;  // Allocated number of samples in the array.
+
+  // The index of the first sample in |array_|, except when
+  // |begin_index_ == end_index_|, which indicates an empty buffer.
+  size_t begin_index_;
+
+  // The index of the sample after the last sample in |array_|.
+  size_t end_index_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioVector);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_AUDIO_VECTOR_H_
diff --git a/modules/audio_coding/neteq/audio_vector_unittest.cc b/modules/audio_coding/neteq/audio_vector_unittest.cc
new file mode 100644
index 0000000..1b54abc
--- /dev/null
+++ b/modules/audio_coding/neteq/audio_vector_unittest.cc
@@ -0,0 +1,388 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/audio_vector.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <string>
+
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioVectorTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    // Populate test array.
+    for (size_t i = 0; i < array_length(); ++i) {
+      array_[i] = rtc::checked_cast<int16_t>(i);
+    }
+  }
+
+  size_t array_length() const {
+    return sizeof(array_) / sizeof(array_[0]);
+  }
+
+  int16_t array_[10];
+};
+
+// Create and destroy AudioVector objects, both empty and with a predefined
+// length.
+TEST_F(AudioVectorTest, CreateAndDestroy) {
+  AudioVector vec1;
+  EXPECT_TRUE(vec1.Empty());
+  EXPECT_EQ(0u, vec1.Size());
+
+  size_t initial_size = 17;
+  AudioVector vec2(initial_size);
+  EXPECT_FALSE(vec2.Empty());
+  EXPECT_EQ(initial_size, vec2.Size());
+}
+
+// Test the subscript operator [] for getting and setting.
+TEST_F(AudioVectorTest, SubscriptOperator) {
+  AudioVector vec(array_length());
+  for (size_t i = 0; i < array_length(); ++i) {
+    vec[i] = static_cast<int16_t>(i);
+    const int16_t& value = vec[i];  // Make sure to use the const version.
+    EXPECT_EQ(static_cast<int16_t>(i), value);
+  }
+}
+
+// Test the PushBack method and the CopyFrom method. The Clear method is also
+// invoked.
+TEST_F(AudioVectorTest, PushBackAndCopy) {
+  AudioVector vec;
+  AudioVector vec_copy;
+  vec.PushBack(array_, array_length());
+  vec.CopyTo(&vec_copy);  // Copy from |vec| to |vec_copy|.
+  ASSERT_EQ(array_length(), vec.Size());
+  ASSERT_EQ(array_length(), vec_copy.Size());
+  for (size_t i = 0; i < array_length(); ++i) {
+    EXPECT_EQ(array_[i], vec[i]);
+    EXPECT_EQ(array_[i], vec_copy[i]);
+  }
+
+  // Clear |vec| and verify that it is empty.
+  vec.Clear();
+  EXPECT_TRUE(vec.Empty());
+
+  // Now copy the empty vector and verify that the copy becomes empty too.
+  vec.CopyTo(&vec_copy);
+  EXPECT_TRUE(vec_copy.Empty());
+}
+
+// Test the PushBack method with another AudioVector as input argument.
+TEST_F(AudioVectorTest, PushBackVector) {
+  static const size_t kLength = 10;
+  AudioVector vec1(kLength);
+  AudioVector vec2(kLength);
+  // Set the first vector to [0, 1, ..., kLength - 1].
+  // Set the second vector to [kLength, kLength + 1, ..., 2 * kLength - 1].
+  for (size_t i = 0; i < kLength; ++i) {
+    vec1[i] = static_cast<int16_t>(i);
+    vec2[i] = static_cast<int16_t>(i + kLength);
+  }
+  // Append vec2 to the back of vec1.
+  vec1.PushBack(vec2);
+  ASSERT_EQ(2 * kLength, vec1.Size());
+  for (size_t i = 0; i < 2 * kLength; ++i) {
+    EXPECT_EQ(static_cast<int16_t>(i), vec1[i]);
+  }
+}
+
+// Test the PushFront method.
+TEST_F(AudioVectorTest, PushFront) {
+  AudioVector vec;
+  vec.PushFront(array_, array_length());
+  ASSERT_EQ(array_length(), vec.Size());
+  for (size_t i = 0; i < array_length(); ++i) {
+    EXPECT_EQ(array_[i], vec[i]);
+  }
+}
+
+// Test the PushFront method with another AudioVector as input argument.
+TEST_F(AudioVectorTest, PushFrontVector) {
+  static const size_t kLength = 10;
+  AudioVector vec1(kLength);
+  AudioVector vec2(kLength);
+  // Set the first vector to [0, 1, ..., kLength - 1].
+  // Set the second vector to [kLength, kLength + 1, ..., 2 * kLength - 1].
+  for (size_t i = 0; i < kLength; ++i) {
+    vec1[i] = static_cast<int16_t>(i);
+    vec2[i] = static_cast<int16_t>(i + kLength);
+  }
+  // Prepend vec1 to the front of vec2.
+  vec2.PushFront(vec1);
+  ASSERT_EQ(2 * kLength, vec2.Size());
+  for (size_t i = 0; i < 2 * kLength; ++i) {
+    EXPECT_EQ(static_cast<int16_t>(i), vec2[i]);
+  }
+}
+
+// Test the PopFront method.
+TEST_F(AudioVectorTest, PopFront) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  vec.PopFront(1);  // Remove one element.
+  EXPECT_EQ(array_length() - 1u, vec.Size());
+  for (size_t i = 0; i < array_length() - 1; ++i) {
+    EXPECT_EQ(static_cast<int16_t>(i + 1), vec[i]);
+  }
+  vec.PopFront(array_length());  // Remove more elements than vector size.
+  EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the PopBack method.
+TEST_F(AudioVectorTest, PopBack) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  vec.PopBack(1);  // Remove one element.
+  EXPECT_EQ(array_length() - 1u, vec.Size());
+  for (size_t i = 0; i < array_length() - 1; ++i) {
+    EXPECT_EQ(static_cast<int16_t>(i), vec[i]);
+  }
+  vec.PopBack(array_length());  // Remove more elements than vector size.
+  EXPECT_EQ(0u, vec.Size());
+}
+
+// Test the Extend method.
+TEST_F(AudioVectorTest, Extend) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  vec.Extend(5);  // Extend with 5 elements, which should all be zeros.
+  ASSERT_EQ(array_length() + 5u, vec.Size());
+  // Verify that all are zero.
+  for (size_t i = array_length(); i < array_length() + 5; ++i) {
+    EXPECT_EQ(0, vec[i]);
+  }
+}
+
+// Test the InsertAt method with an insert position in the middle of the vector.
+TEST_F(AudioVectorTest, InsertAt) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int16_t new_array[kNewLength];
+  // Set array elements to {100, 101, 102, ... }.
+  for (int i = 0; i < kNewLength; ++i) {
+    new_array[i] = 100 + i;
+  }
+  int insert_position = 5;
+  vec.InsertAt(new_array, kNewLength, insert_position);
+  // Verify that the vector looks as follows:
+  // {0, 1, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
+  //  |insert_position|, |insert_position| + 1, ..., kLength - 1}.
+  size_t pos = 0;
+  for (int i = 0; i < insert_position; ++i) {
+    EXPECT_EQ(array_[i], vec[pos]);
+    ++pos;
+  }
+  for (int i = 0; i < kNewLength; ++i) {
+    EXPECT_EQ(new_array[i], vec[pos]);
+    ++pos;
+  }
+  for (size_t i = insert_position; i < array_length(); ++i) {
+    EXPECT_EQ(array_[i], vec[pos]);
+    ++pos;
+  }
+}
+
+// Test the InsertZerosAt method with an insert position in the middle of the
+// vector. Use the InsertAt method as reference.
+TEST_F(AudioVectorTest, InsertZerosAt) {
+  AudioVector vec;
+  AudioVector vec_ref;
+  vec.PushBack(array_, array_length());
+  vec_ref.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int insert_position = 5;
+  vec.InsertZerosAt(kNewLength, insert_position);
+  int16_t new_array[kNewLength] = {0};  // All zero elements.
+  vec_ref.InsertAt(new_array, kNewLength, insert_position);
+  // Verify that the vectors are identical.
+  ASSERT_EQ(vec_ref.Size(), vec.Size());
+  for (size_t i = 0; i < vec.Size(); ++i) {
+    EXPECT_EQ(vec_ref[i], vec[i]);
+  }
+}
+
+// Test the InsertAt method with an insert position at the start of the vector.
+TEST_F(AudioVectorTest, InsertAtBeginning) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int16_t new_array[kNewLength];
+  // Set array elements to {100, 101, 102, ... }.
+  for (int i = 0; i < kNewLength; ++i) {
+    new_array[i] = 100 + i;
+  }
+  int insert_position = 0;
+  vec.InsertAt(new_array, kNewLength, insert_position);
+  // Verify that the vector looks as follows:
+  // {100, 101, ..., 100 + kNewLength - 1,
+  //  0, 1, ..., kLength - 1}.
+  size_t pos = 0;
+  for (int i = 0; i < kNewLength; ++i) {
+    EXPECT_EQ(new_array[i], vec[pos]);
+    ++pos;
+  }
+  for (size_t i = insert_position; i < array_length(); ++i) {
+    EXPECT_EQ(array_[i], vec[pos]);
+    ++pos;
+  }
+}
+
+// Test the InsertAt method with an insert position at the end of the vector.
+TEST_F(AudioVectorTest, InsertAtEnd) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int16_t new_array[kNewLength];
+  // Set array elements to {100, 101, 102, ... }.
+  for (int i = 0; i < kNewLength; ++i) {
+    new_array[i] = 100 + i;
+  }
+  int insert_position = rtc::checked_cast<int>(array_length());
+  vec.InsertAt(new_array, kNewLength, insert_position);
+  // Verify that the vector looks as follows:
+  // {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
+  size_t pos = 0;
+  for (size_t i = 0; i < array_length(); ++i) {
+    EXPECT_EQ(array_[i], vec[pos]);
+    ++pos;
+  }
+  for (int i = 0; i < kNewLength; ++i) {
+    EXPECT_EQ(new_array[i], vec[pos]);
+    ++pos;
+  }
+}
+
+// Test the InsertAt method with an insert position beyond the end of the
+// vector. Verify that a position beyond the end of the vector does not lead to
+// an error. The expected outcome is the same as if the vector end was used as
+// input position. That is, the input position should be capped at the maximum
+// allowed value.
+TEST_F(AudioVectorTest, InsertBeyondEnd) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int16_t new_array[kNewLength];
+  // Set array elements to {100, 101, 102, ... }.
+  for (int i = 0; i < kNewLength; ++i) {
+    new_array[i] = 100 + i;
+  }
+  int insert_position = rtc::checked_cast<int>(
+      array_length() + 10); // Too large.
+  vec.InsertAt(new_array, kNewLength, insert_position);
+  // Verify that the vector looks as follows:
+  // {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
+  size_t pos = 0;
+  for (size_t i = 0; i < array_length(); ++i) {
+    EXPECT_EQ(array_[i], vec[pos]);
+    ++pos;
+  }
+  for (int i = 0; i < kNewLength; ++i) {
+    EXPECT_EQ(new_array[i], vec[pos]);
+    ++pos;
+  }
+}
+
+// Test the OverwriteAt method with a position such that all of the new values
+// fit within the old vector.
+TEST_F(AudioVectorTest, OverwriteAt) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int16_t new_array[kNewLength];
+  // Set array elements to {100, 101, 102, ... }.
+  for (int i = 0; i < kNewLength; ++i) {
+    new_array[i] = 100 + i;
+  }
+  size_t insert_position = 2;
+  vec.OverwriteAt(new_array, kNewLength, insert_position);
+  // Verify that the vector looks as follows:
+  // {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
+  //  |insert_position|, |insert_position| + 1, ..., kLength - 1}.
+  size_t pos = 0;
+  for (pos = 0; pos < insert_position; ++pos) {
+    EXPECT_EQ(array_[pos], vec[pos]);
+  }
+  for (int i = 0; i < kNewLength; ++i) {
+    EXPECT_EQ(new_array[i], vec[pos]);
+    ++pos;
+  }
+  for (; pos < array_length(); ++pos) {
+    EXPECT_EQ(array_[pos], vec[pos]);
+  }
+}
+
+// Test the OverwriteAt method with a position such that some of the new values
+// extend beyond the end of the current vector. This is valid, and the vector is
+// expected to expand to accommodate the new values.
+TEST_F(AudioVectorTest, OverwriteBeyondEnd) {
+  AudioVector vec;
+  vec.PushBack(array_, array_length());
+  static const int kNewLength = 5;
+  int16_t new_array[kNewLength];
+  // Set array elements to {100, 101, 102, ... }.
+  for (int i = 0; i < kNewLength; ++i) {
+    new_array[i] = 100 + i;
+  }
+  int insert_position = rtc::checked_cast<int>(array_length() - 2);
+  vec.OverwriteAt(new_array, kNewLength, insert_position);
+  ASSERT_EQ(array_length() - 2u + kNewLength, vec.Size());
+  // Verify that the vector looks as follows:
+  // {0, ..., |insert_position| - 1, 100, 101, ..., 100 + kNewLength - 1,
+  //  |insert_position|, |insert_position| + 1, ..., kLength - 1}.
+  int pos = 0;
+  for (pos = 0; pos < insert_position; ++pos) {
+    EXPECT_EQ(array_[pos], vec[pos]);
+  }
+  for (int i = 0; i < kNewLength; ++i) {
+    EXPECT_EQ(new_array[i], vec[pos]);
+    ++pos;
+  }
+  // Verify that we checked to the end of |vec|.
+  EXPECT_EQ(vec.Size(), static_cast<size_t>(pos));
+}
+
+TEST_F(AudioVectorTest, CrossFade) {
+  static const size_t kLength = 100;
+  static const size_t kFadeLength = 10;
+  AudioVector vec1(kLength);
+  AudioVector vec2(kLength);
+  // Set all vector elements to 0 in |vec1| and 100 in |vec2|.
+  for (size_t i = 0; i < kLength; ++i) {
+    vec1[i] = 0;
+    vec2[i] = 100;
+  }
+  vec1.CrossFade(vec2, kFadeLength);
+  ASSERT_EQ(2 * kLength - kFadeLength, vec1.Size());
+  // First part untouched.
+  for (size_t i = 0; i < kLength - kFadeLength; ++i) {
+    EXPECT_EQ(0, vec1[i]);
+  }
+  // Check mixing zone.
+  for (size_t i = 0 ; i < kFadeLength; ++i) {
+    EXPECT_NEAR((i + 1) * 100 / (kFadeLength + 1),
+                vec1[kLength - kFadeLength + i], 1);
+  }
+  // Second part untouched.
+  for (size_t i = kLength; i < vec1.Size(); ++i) {
+    EXPECT_EQ(100, vec1[i]);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/background_noise.cc b/modules/audio_coding/neteq/background_noise.cc
new file mode 100644
index 0000000..eda5c75
--- /dev/null
+++ b/modules/audio_coding/neteq/background_noise.cc
@@ -0,0 +1,255 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/background_noise.h"
+
+#include <assert.h>
+#include <string.h>  // memcpy
+
+#include <algorithm>  // min, max
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+
+namespace webrtc {
+
+// static
+const size_t BackgroundNoise::kMaxLpcOrder;
+
+BackgroundNoise::BackgroundNoise(size_t num_channels)
+    : num_channels_(num_channels),
+      channel_parameters_(new ChannelParameters[num_channels_]),
+      mode_(NetEq::kBgnOn) {
+  Reset();
+}
+
+BackgroundNoise::~BackgroundNoise() {}
+
+void BackgroundNoise::Reset() {
+  initialized_ = false;
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    channel_parameters_[channel].Reset();
+  }
+  // Keep _bgnMode as it is.
+}
+
+void BackgroundNoise::Update(const AudioMultiVector& input,
+                             const PostDecodeVad& vad) {
+  if (vad.running() && vad.active_speech()) {
+    // Do not update the background noise parameters if we know that the signal
+    // is active speech.
+    return;
+  }
+
+  int32_t auto_correlation[kMaxLpcOrder + 1];
+  int16_t fiter_output[kMaxLpcOrder + kResidualLength];
+  int16_t reflection_coefficients[kMaxLpcOrder];
+  int16_t lpc_coefficients[kMaxLpcOrder + 1];
+
+  for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
+    ChannelParameters& parameters = channel_parameters_[channel_ix];
+    int16_t temp_signal_array[kVecLen + kMaxLpcOrder] = {0};
+    int16_t* temp_signal = &temp_signal_array[kMaxLpcOrder];
+    input[channel_ix].CopyTo(kVecLen, input.Size() - kVecLen, temp_signal);
+    int32_t sample_energy = CalculateAutoCorrelation(temp_signal, kVecLen,
+                                                     auto_correlation);
+
+    if ((!vad.running() &&
+        sample_energy < parameters.energy_update_threshold) ||
+        (vad.running() && !vad.active_speech())) {
+      // Generate LPC coefficients.
+      if (auto_correlation[0] > 0) {
+        // Regardless of whether the filter is actually updated or not,
+        // update energy threshold levels, since we have in fact observed
+        // a low energy signal.
+        if (sample_energy < parameters.energy_update_threshold) {
+          // Never go under 1.0 in average sample energy.
+          parameters.energy_update_threshold = std::max(sample_energy, 1);
+          parameters.low_energy_update_threshold = 0;
+        }
+
+        // Only update BGN if filter is stable, i.e., if return value from
+        // Levinson-Durbin function is 1.
+        if (WebRtcSpl_LevinsonDurbin(auto_correlation, lpc_coefficients,
+                                     reflection_coefficients,
+                                     kMaxLpcOrder) != 1) {
+          return;
+        }
+      } else {
+        // Center value in auto-correlation is not positive. Do not update.
+        return;
+      }
+
+      // Generate the CNG gain factor by looking at the energy of the residual.
+      WebRtcSpl_FilterMAFastQ12(temp_signal + kVecLen - kResidualLength,
+                                fiter_output, lpc_coefficients,
+                                kMaxLpcOrder + 1, kResidualLength);
+      int32_t residual_energy = WebRtcSpl_DotProductWithScale(fiter_output,
+                                                              fiter_output,
+                                                              kResidualLength,
+                                                              0);
+
+      // Check spectral flatness.
+      // Comparing the residual variance with the input signal variance tells
+      // if the spectrum is flat or not.
+      // If 5 * residual_energy >= 16 * sample_energy, the spectrum is flat
+      // enough.  Also ensure that the energy is non-zero.
+      if ((sample_energy > 0) &&
+          (int64_t{5} * residual_energy >= int64_t{16} * sample_energy)) {
+        // Spectrum is flat enough; save filter parameters.
+        // |temp_signal| + |kVecLen| - |kMaxLpcOrder| points at the first of the
+        // |kMaxLpcOrder| samples in the residual signal, which will form the
+        // filter state for the next noise generation.
+        SaveParameters(channel_ix, lpc_coefficients,
+                       temp_signal + kVecLen - kMaxLpcOrder, sample_energy,
+                       residual_energy);
+      }
+    } else {
+      // Will only happen if post-decode VAD is disabled and |sample_energy| is
+      // not low enough. Increase the threshold for update so that it increases
+      // by a factor 4 in 4 seconds.
+      IncrementEnergyThreshold(channel_ix, sample_energy);
+    }
+  }
+  return;
+}
+
+int32_t BackgroundNoise::Energy(size_t channel) const {
+  assert(channel < num_channels_);
+  return channel_parameters_[channel].energy;
+}
+
+void BackgroundNoise::SetMuteFactor(size_t channel, int16_t value) {
+  assert(channel < num_channels_);
+  channel_parameters_[channel].mute_factor = value;
+}
+
+int16_t BackgroundNoise::MuteFactor(size_t channel) const {
+  assert(channel < num_channels_);
+  return channel_parameters_[channel].mute_factor;
+}
+
+const int16_t* BackgroundNoise::Filter(size_t channel) const {
+  assert(channel < num_channels_);
+  return channel_parameters_[channel].filter;
+}
+
+const int16_t* BackgroundNoise::FilterState(size_t channel) const {
+  assert(channel < num_channels_);
+  return channel_parameters_[channel].filter_state;
+}
+
+void BackgroundNoise::SetFilterState(size_t channel, const int16_t* input,
+                                     size_t length) {
+  assert(channel < num_channels_);
+  length = std::min(length, kMaxLpcOrder);
+  memcpy(channel_parameters_[channel].filter_state, input,
+         length * sizeof(int16_t));
+}
+
+int16_t BackgroundNoise::Scale(size_t channel) const {
+  assert(channel < num_channels_);
+  return channel_parameters_[channel].scale;
+}
+int16_t BackgroundNoise::ScaleShift(size_t channel) const {
+  assert(channel < num_channels_);
+  return channel_parameters_[channel].scale_shift;
+}
+
+int32_t BackgroundNoise::CalculateAutoCorrelation(
+    const int16_t* signal, size_t length, int32_t* auto_correlation) const {
+  static const int kCorrelationStep = -1;
+  const int correlation_scale =
+      CrossCorrelationWithAutoShift(signal, signal, length, kMaxLpcOrder + 1,
+                                    kCorrelationStep, auto_correlation);
+
+  // Number of shifts to normalize energy to energy/sample.
+  int energy_sample_shift = kLogVecLen - correlation_scale;
+  return auto_correlation[0] >> energy_sample_shift;
+}
+
+void BackgroundNoise::IncrementEnergyThreshold(size_t channel,
+                                               int32_t sample_energy) {
+  // TODO(hlundin): Simplify the below threshold update. What this code
+  // does is simply "threshold += (increment * threshold) >> 16", but due
+  // to the limited-width operations, it is not exactly the same. The
+  // difference should be inaudible, but bit-exactness would not be
+  // maintained.
+  assert(channel < num_channels_);
+  ChannelParameters& parameters = channel_parameters_[channel];
+  int32_t temp_energy =
+    (kThresholdIncrement * parameters.low_energy_update_threshold) >> 16;
+  temp_energy += kThresholdIncrement *
+      (parameters.energy_update_threshold & 0xFF);
+  temp_energy += (kThresholdIncrement *
+      ((parameters.energy_update_threshold>>8) & 0xFF)) << 8;
+  parameters.low_energy_update_threshold += temp_energy;
+
+  parameters.energy_update_threshold += kThresholdIncrement *
+      (parameters.energy_update_threshold>>16);
+  parameters.energy_update_threshold +=
+      parameters.low_energy_update_threshold >> 16;
+  parameters.low_energy_update_threshold =
+      parameters.low_energy_update_threshold & 0x0FFFF;
+
+  // Update maximum energy.
+  // Decrease by a factor 1/1024 each time.
+  parameters.max_energy = parameters.max_energy -
+      (parameters.max_energy >> 10);
+  if (sample_energy > parameters.max_energy) {
+    parameters.max_energy = sample_energy;
+  }
+
+  // Set |energy_update_threshold| to no less than 60 dB lower than
+  // |max_energy_|. Adding 524288 assures proper rounding.
+  int32_t energy_update_threshold = (parameters.max_energy + 524288) >> 20;
+  if (energy_update_threshold > parameters.energy_update_threshold) {
+    parameters.energy_update_threshold = energy_update_threshold;
+  }
+}
+
+void BackgroundNoise::SaveParameters(size_t channel,
+                                     const int16_t* lpc_coefficients,
+                                     const int16_t* filter_state,
+                                     int32_t sample_energy,
+                                     int32_t residual_energy) {
+  assert(channel < num_channels_);
+  ChannelParameters& parameters = channel_parameters_[channel];
+  memcpy(parameters.filter, lpc_coefficients,
+         (kMaxLpcOrder+1) * sizeof(int16_t));
+  memcpy(parameters.filter_state, filter_state,
+         kMaxLpcOrder * sizeof(int16_t));
+  // Save energy level and update energy threshold levels.
+  // Never get under 1.0 in average sample energy.
+  parameters.energy = std::max(sample_energy, 1);
+  parameters.energy_update_threshold = parameters.energy;
+  parameters.low_energy_update_threshold = 0;
+
+  // Normalize residual_energy to 29 or 30 bits before sqrt.
+  int16_t norm_shift = WebRtcSpl_NormW32(residual_energy) - 1;
+  if (norm_shift & 0x1) {
+    norm_shift -= 1;  // Even number of shifts required.
+  }
+  residual_energy = WEBRTC_SPL_SHIFT_W32(residual_energy, norm_shift);
+
+  // Calculate scale and shift factor.
+  parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
+  // Add 13 to the |scale_shift_|, since the random numbers table is in
+  // Q13.
+  // TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
+  parameters.scale_shift =
+      static_cast<int16_t>(13 + ((kLogResidualLength + norm_shift) / 2));
+
+  initialized_ = true;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/background_noise.h b/modules/audio_coding/neteq/background_noise.h
new file mode 100644
index 0000000..718f41d
--- /dev/null
+++ b/modules/audio_coding/neteq/background_noise.h
@@ -0,0 +1,137 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_BACKGROUND_NOISE_H_
+#define MODULES_AUDIO_CODING_NETEQ_BACKGROUND_NOISE_H_
+
+#include <string.h>  // size_t
+#include <memory>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class PostDecodeVad;
+
+// This class handles estimation of background noise parameters.
+class BackgroundNoise {
+ public:
+  // TODO(hlundin): For 48 kHz support, increase kMaxLpcOrder to 10.
+  // Will work anyway, but probably sound a little worse.
+  static const size_t kMaxLpcOrder = 8;  // 32000 / 8000 + 4.
+
+  explicit BackgroundNoise(size_t num_channels);
+  virtual ~BackgroundNoise();
+
+  void Reset();
+
+  // Updates the parameter estimates based on the signal currently in the
+  // |sync_buffer|, and on the latest decision in |vad| if it is running.
+  void Update(const AudioMultiVector& sync_buffer,
+              const PostDecodeVad& vad);
+
+  // Returns |energy_| for |channel|.
+  int32_t Energy(size_t channel) const;
+
+  // Sets the value of |mute_factor_| for |channel| to |value|.
+  void SetMuteFactor(size_t channel, int16_t value);
+
+  // Returns |mute_factor_| for |channel|.
+  int16_t MuteFactor(size_t channel) const;
+
+  // Returns a pointer to |filter_| for |channel|.
+  const int16_t* Filter(size_t channel) const;
+
+  // Returns a pointer to |filter_state_| for |channel|.
+  const int16_t* FilterState(size_t channel) const;
+
+  // Copies |length| elements from |input| to the filter state. Will not copy
+  // more than |kMaxLpcOrder| elements.
+  void SetFilterState(size_t channel, const int16_t* input, size_t length);
+
+  // Returns |scale_| for |channel|.
+  int16_t Scale(size_t channel) const;
+
+  // Returns |scale_shift_| for |channel|.
+  int16_t ScaleShift(size_t channel) const;
+
+  // Accessors.
+  bool initialized() const { return initialized_; }
+  NetEq::BackgroundNoiseMode mode() const { return mode_; }
+
+  // Sets the mode of the background noise playout for cases when there is long
+  // duration of packet loss.
+  void set_mode(NetEq::BackgroundNoiseMode mode) { mode_ = mode; }
+
+ private:
+  static const int kThresholdIncrement = 229;  // 0.0035 in Q16.
+  static const size_t kVecLen = 256;
+  static const int kLogVecLen = 8;  // log2(kVecLen).
+  static const size_t kResidualLength = 64;
+  static const int16_t kLogResidualLength = 6;  // log2(kResidualLength)
+
+  struct ChannelParameters {
+    // Constructor.
+    ChannelParameters() {
+      Reset();
+    }
+
+    void Reset() {
+      energy = 2500;
+      max_energy = 0;
+      energy_update_threshold = 500000;
+      low_energy_update_threshold = 0;
+      memset(filter_state, 0, sizeof(filter_state));
+      memset(filter, 0, sizeof(filter));
+      filter[0] = 4096;
+      mute_factor = 0;
+      scale = 20000;
+      scale_shift = 24;
+    }
+
+    int32_t energy;
+    int32_t max_energy;
+    int32_t energy_update_threshold;
+    int32_t low_energy_update_threshold;
+    int16_t filter_state[kMaxLpcOrder];
+    int16_t filter[kMaxLpcOrder + 1];
+    int16_t mute_factor;
+    int16_t scale;
+    int16_t scale_shift;
+  };
+
+  int32_t CalculateAutoCorrelation(const int16_t* signal,
+                                   size_t length,
+                                   int32_t* auto_correlation) const;
+
+  // Increments the energy threshold by a factor 1 + |kThresholdIncrement|.
+  void IncrementEnergyThreshold(size_t channel, int32_t sample_energy);
+
+  // Updates the filter parameters.
+  void SaveParameters(size_t channel,
+                      const int16_t* lpc_coefficients,
+                      const int16_t* filter_state,
+                      int32_t sample_energy,
+                      int32_t residual_energy);
+
+  size_t num_channels_;
+  std::unique_ptr<ChannelParameters[]> channel_parameters_;
+  bool initialized_;
+  NetEq::BackgroundNoiseMode mode_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BackgroundNoise);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_BACKGROUND_NOISE_H_
diff --git a/modules/audio_coding/neteq/background_noise_unittest.cc b/modules/audio_coding/neteq/background_noise_unittest.cc
new file mode 100644
index 0000000..e32492f
--- /dev/null
+++ b/modules/audio_coding/neteq/background_noise_unittest.cc
@@ -0,0 +1,26 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for BackgroundNoise class.
+
+#include "modules/audio_coding/neteq/background_noise.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(BackgroundNoise, CreateAndDestroy) {
+  size_t channels = 1;
+  BackgroundNoise bgn(channels);
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/buffer_level_filter.cc b/modules/audio_coding/neteq/buffer_level_filter.cc
new file mode 100644
index 0000000..6005de6
--- /dev/null
+++ b/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+
+#include <algorithm>  // Provide access to std::max.
+
+namespace webrtc {
+
+BufferLevelFilter::BufferLevelFilter() {
+  Reset();
+}
+
+void BufferLevelFilter::Reset() {
+  filtered_current_level_ = 0;
+  level_factor_ = 253;
+}
+
+void BufferLevelFilter::Update(size_t buffer_size_packets,
+                               int time_stretched_samples,
+                               size_t packet_len_samples) {
+  // Filter:
+  // |filtered_current_level_| = |level_factor_| * |filtered_current_level_| +
+  //                            (1 - |level_factor_|) * |buffer_size_packets|
+  // |level_factor_| and |filtered_current_level_| are in Q8.
+  // |buffer_size_packets| is in Q0.
+  filtered_current_level_ = ((level_factor_ * filtered_current_level_) >> 8) +
+      ((256 - level_factor_) * static_cast<int>(buffer_size_packets));
+
+  // Account for time-scale operations (accelerate and pre-emptive expand).
+  if (time_stretched_samples && packet_len_samples > 0) {
+    // Time-scaling has been performed since last filter update. Subtract the
+    // value of |time_stretched_samples| from |filtered_current_level_| after
+    // converting |time_stretched_samples| from samples to packets in Q8.
+    // Make sure that the filtered value remains non-negative.
+    filtered_current_level_ = std::max(
+        0, filtered_current_level_ - (time_stretched_samples * (1 << 8)) /
+                                         static_cast<int>(packet_len_samples));
+  }
+}
+
+void BufferLevelFilter::SetTargetBufferLevel(int target_buffer_level) {
+  if (target_buffer_level <= 1) {
+    level_factor_ = 251;
+  } else if (target_buffer_level <= 3) {
+    level_factor_ = 252;
+  } else if (target_buffer_level <= 7) {
+    level_factor_ = 253;
+  } else {
+    level_factor_ = 254;
+  }
+}
+
+int BufferLevelFilter::filtered_current_level() const {
+  return filtered_current_level_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/buffer_level_filter.h b/modules/audio_coding/neteq/buffer_level_filter.h
new file mode 100644
index 0000000..7a48c72
--- /dev/null
+++ b/modules/audio_coding/neteq/buffer_level_filter.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
+
+#include <stddef.h>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class BufferLevelFilter {
+ public:
+  BufferLevelFilter();
+  virtual ~BufferLevelFilter() {}
+  virtual void Reset();
+
+  // Updates the filter. Current buffer size is |buffer_size_packets| (Q0).
+  // If |time_stretched_samples| is non-zero, the value is converted to the
+  // corresponding number of packets, and is subtracted from the filtered
+  // value (thus bypassing the filter operation). |packet_len_samples| is the
+  // number of audio samples carried in each incoming packet.
+  virtual void Update(size_t buffer_size_packets, int time_stretched_samples,
+                      size_t packet_len_samples);
+
+  // Set the current target buffer level (obtained from
+  // DelayManager::base_target_level()). Used to select the appropriate
+  // filter coefficient.
+  virtual void SetTargetBufferLevel(int target_buffer_level);
+
+  virtual int filtered_current_level() const;
+
+ private:
+  int level_factor_;  // Filter factor for the buffer level filter in Q8.
+  int filtered_current_level_;  // Filtered current buffer level in Q8.
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BufferLevelFilter);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
diff --git a/modules/audio_coding/neteq/buffer_level_filter_unittest.cc b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
new file mode 100644
index 0000000..72c8727
--- /dev/null
+++ b/modules/audio_coding/neteq/buffer_level_filter_unittest.cc
@@ -0,0 +1,162 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for BufferLevelFilter class.
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+
+#include <math.h>  // Access to pow function.
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(BufferLevelFilter, CreateAndDestroy) {
+  BufferLevelFilter* filter = new BufferLevelFilter();
+  EXPECT_EQ(0, filter->filtered_current_level());
+  delete filter;
+}
+
+TEST(BufferLevelFilter, ConvergenceTest) {
+  BufferLevelFilter filter;
+  for (int times = 10; times <= 50; times += 10) {
+    for (int value = 100; value <= 200; value += 10) {
+      filter.Reset();
+      filter.SetTargetBufferLevel(1);  // Makes filter coefficient 251/256.
+      std::ostringstream ss;
+      ss << "times = " << times << ", value = " << value;
+      SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
+      for (int i = 0; i < times; ++i) {
+        filter.Update(value, 0 /* time_stretched_samples */,
+                      160 /* packet_len_samples */);
+      }
+      // Expect the filtered value to be (theoretically)
+      // (1 - (251/256) ^ |times|) * |value|.
+      double expected_value_double =
+          (1 - pow(251.0 / 256.0, times)) * value;
+      int expected_value = static_cast<int>(expected_value_double);
+      // filtered_current_level() returns the value in Q8.
+      // The actual value may differ slightly from the expected value due to
+      // intermediate-stage rounding errors in the filter implementation.
+      // This is why we have to use EXPECT_NEAR with a tolerance of +/-1.
+      EXPECT_NEAR(expected_value, filter.filtered_current_level() >> 8, 1);
+    }
+  }
+}
+
+// Verify that target buffer level impacts on the filter convergence.
+TEST(BufferLevelFilter, FilterFactor) {
+  BufferLevelFilter filter;
+  // Update 10 times with value 100.
+  const int kTimes = 10;
+  const int kValue = 100;
+
+  filter.SetTargetBufferLevel(3);  // Makes filter coefficient 252/256.
+  for (int i = 0; i < kTimes; ++i) {
+    filter.Update(kValue, 0 /* time_stretched_samples */,
+                  160 /* packet_len_samples */);
+  }
+  // Expect the filtered value to be
+  // (1 - (252/256) ^ |kTimes|) * |kValue|.
+  int expected_value = 14;
+  // filtered_current_level() returns the value in Q8.
+  EXPECT_EQ(expected_value, filter.filtered_current_level() >> 8);
+
+  filter.Reset();
+  filter.SetTargetBufferLevel(7);  // Makes filter coefficient 253/256.
+  for (int i = 0; i < kTimes; ++i) {
+    filter.Update(kValue, 0 /* time_stretched_samples */,
+                  160 /* packet_len_samples */);
+  }
+  // Expect the filtered value to be
+  // (1 - (253/256) ^ |kTimes|) * |kValue|.
+  expected_value = 11;
+  // filtered_current_level() returns the value in Q8.
+  EXPECT_EQ(expected_value, filter.filtered_current_level() >> 8);
+
+  filter.Reset();
+  filter.SetTargetBufferLevel(8);  // Makes filter coefficient 254/256.
+  for (int i = 0; i < kTimes; ++i) {
+    filter.Update(kValue, 0 /* time_stretched_samples */,
+                  160 /* packet_len_samples */);
+  }
+  // Expect the filtered value to be
+  // (1 - (254/256) ^ |kTimes|) * |kValue|.
+  expected_value = 7;
+  // filtered_current_level() returns the value in Q8.
+  EXPECT_EQ(expected_value, filter.filtered_current_level() >> 8);
+}
+
+
+TEST(BufferLevelFilter, TimeStretchedSamples) {
+  BufferLevelFilter filter;
+  filter.SetTargetBufferLevel(1);  // Makes filter coefficient 251/256.
+  // Update 10 times with value 100.
+  const int kTimes = 10;
+  const int kValue = 100;
+  const int kPacketSizeSamples = 160;
+  const int kNumPacketsStretched = 2;
+  const int kTimeStretchedSamples = kNumPacketsStretched * kPacketSizeSamples;
+  for (int i = 0; i < kTimes; ++i) {
+    // Packet size set to 0. Do not expect the parameter
+    // |kTimeStretchedSamples| to have any effect.
+    filter.Update(kValue, kTimeStretchedSamples, 0 /* packet_len_samples */);
+  }
+  // Expect the filtered value to be
+  // (1 - (251/256) ^ |kTimes|) * |kValue|.
+  const int kExpectedValue = 17;
+  // filtered_current_level() returns the value in Q8.
+  EXPECT_EQ(kExpectedValue, filter.filtered_current_level() >> 8);
+
+  // Update filter again, now with non-zero value for packet length.
+  // Set the current filtered value to be the input, in order to isolate the
+  // impact of |kTimeStretchedSamples|.
+  filter.Update(filter.filtered_current_level() >> 8, kTimeStretchedSamples,
+                kPacketSizeSamples);
+  EXPECT_EQ(kExpectedValue - kNumPacketsStretched,
+            filter.filtered_current_level() >> 8);
+  // Try negative value and verify that we come back to the previous result.
+  filter.Update(filter.filtered_current_level() >> 8, -kTimeStretchedSamples,
+                kPacketSizeSamples);
+  EXPECT_EQ(kExpectedValue, filter.filtered_current_level() >> 8);
+}
+
+TEST(BufferLevelFilter, TimeStretchedSamplesNegativeUnevenFrames) {
+  BufferLevelFilter filter;
+  filter.SetTargetBufferLevel(1);  // Makes filter coefficient 251/256.
+  // Update 10 times with value 100.
+  const int kTimes = 10;
+  const int kValue = 100;
+  const int kPacketSizeSamples = 160;
+  const int kTimeStretchedSamples = -3.1415 * kPacketSizeSamples;
+  for (int i = 0; i < kTimes; ++i) {
+    // Packet size set to 0. Do not expect the parameter
+    // |kTimeStretchedSamples| to have any effect.
+    filter.Update(kValue, kTimeStretchedSamples, 0 /* packet_len_samples */);
+  }
+  // Expect the filtered value to be
+  // (1 - (251/256) ^ |kTimes|) * |kValue|.
+  const int kExpectedValue = 17;
+  // filtered_current_level() returns the value in Q8.
+  EXPECT_EQ(kExpectedValue, filter.filtered_current_level() >> 8);
+
+  // Update filter again, now with non-zero value for packet length.
+  // Set the current filtered value to be the input, in order to isolate the
+  // impact of |kTimeStretchedSamples|.
+  filter.Update(filter.filtered_current_level() >> 8, kTimeStretchedSamples,
+                kPacketSizeSamples);
+  EXPECT_EQ(21, filter.filtered_current_level() >> 8);
+  // Try negative value and verify that we come back to the previous result.
+  filter.Update(filter.filtered_current_level() >> 8, -kTimeStretchedSamples,
+                kPacketSizeSamples);
+  EXPECT_EQ(kExpectedValue, filter.filtered_current_level() >> 8);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/comfort_noise.cc b/modules/audio_coding/neteq/comfort_noise.cc
new file mode 100644
index 0000000..5e0a875
--- /dev/null
+++ b/modules/audio_coding/neteq/comfort_noise.cc
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/comfort_noise.h"
+
+#include <assert.h>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+void ComfortNoise::Reset() {
+  first_call_ = true;
+}
+
+int ComfortNoise::UpdateParameters(const Packet& packet) {
+  // Get comfort noise decoder.
+  if (decoder_database_->SetActiveCngDecoder(packet.payload_type) != kOK) {
+    return kUnknownPayloadType;
+  }
+  ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+  RTC_DCHECK(cng_decoder);
+  cng_decoder->UpdateSid(packet.payload);
+  return kOK;
+}
+
+int ComfortNoise::Generate(size_t requested_length,
+                           AudioMultiVector* output) {
+  // TODO(hlundin): Change to an enumerator and skip assert.
+  assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ ==  32000 ||
+         fs_hz_ == 48000);
+  // Not adapted for multi-channel yet.
+  if (output->Channels() != 1) {
+    RTC_LOG(LS_ERROR) << "No multi-channel support";
+    return kMultiChannelNotSupported;
+  }
+
+  size_t number_of_samples = requested_length;
+  bool new_period = false;
+  if (first_call_) {
+    // Generate noise and overlap slightly with old data.
+    number_of_samples = requested_length + overlap_length_;
+    new_period = true;
+  }
+  output->AssertSize(number_of_samples);
+  // Get the decoder from the database.
+  ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+  if (!cng_decoder) {
+    RTC_LOG(LS_ERROR) << "Unknwown payload type";
+    return kUnknownPayloadType;
+  }
+
+  std::unique_ptr<int16_t[]> temp(new int16_t[number_of_samples]);
+  if (!cng_decoder->Generate(
+          rtc::ArrayView<int16_t>(temp.get(), number_of_samples),
+          new_period)) {
+    // Error returned.
+    output->Zeros(requested_length);
+    RTC_LOG(LS_ERROR)
+        << "ComfortNoiseDecoder::Genererate failed to generate comfort noise";
+    return kInternalError;
+  }
+  (*output)[0].OverwriteAt(temp.get(), number_of_samples, 0);
+
+  if (first_call_) {
+    // Set tapering window parameters. Values are in Q15.
+    int16_t muting_window;  // Mixing factor for overlap data.
+    int16_t muting_window_increment;  // Mixing factor increment (negative).
+    int16_t unmuting_window;  // Mixing factor for comfort noise.
+    int16_t unmuting_window_increment;  // Mixing factor increment.
+    if (fs_hz_ == 8000) {
+      muting_window = DspHelper::kMuteFactorStart8kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement8kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart8kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement8kHz;
+    } else if (fs_hz_ == 16000) {
+      muting_window = DspHelper::kMuteFactorStart16kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement16kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart16kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement16kHz;
+    } else if (fs_hz_ == 32000) {
+      muting_window = DspHelper::kMuteFactorStart32kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement32kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart32kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement32kHz;
+    } else {  // fs_hz_ == 48000
+      muting_window = DspHelper::kMuteFactorStart48kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement48kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart48kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement48kHz;
+    }
+
+    // Do overlap-add between new vector and overlap.
+    size_t start_ix = sync_buffer_->Size() - overlap_length_;
+    for (size_t i = 0; i < overlap_length_; i++) {
+      /* overlapVec[i] = WinMute * overlapVec[i] + WinUnMute * outData[i] */
+      // The expression (*output)[0][i] is the i-th element in the first
+      // channel.
+      (*sync_buffer_)[0][start_ix + i] =
+          (((*sync_buffer_)[0][start_ix + i] * muting_window) +
+              ((*output)[0][i] * unmuting_window) + 16384) >> 15;
+      muting_window += muting_window_increment;
+      unmuting_window += unmuting_window_increment;
+    }
+    // Remove |overlap_length_| samples from the front of |output| since they
+    // were mixed into |sync_buffer_| above.
+    output->PopFront(overlap_length_);
+  }
+  first_call_ = false;
+  return kOK;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/comfort_noise.h b/modules/audio_coding/neteq/comfort_noise.h
new file mode 100644
index 0000000..18800ad
--- /dev/null
+++ b/modules/audio_coding/neteq/comfort_noise.h
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_COMFORT_NOISE_H_
+#define MODULES_AUDIO_CODING_NETEQ_COMFORT_NOISE_H_
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class DecoderDatabase;
+class SyncBuffer;
+struct Packet;
+
+// This class acts as an interface to the CNG generator.
+class ComfortNoise {
+ public:
+  enum ReturnCodes {
+    kOK = 0,
+    kUnknownPayloadType,
+    kInternalError,
+    kMultiChannelNotSupported
+  };
+
+  ComfortNoise(int fs_hz, DecoderDatabase* decoder_database,
+               SyncBuffer* sync_buffer)
+      : fs_hz_(fs_hz),
+        first_call_(true),
+        overlap_length_(5 * fs_hz_ / 8000),
+        decoder_database_(decoder_database),
+        sync_buffer_(sync_buffer) {
+  }
+
+  // Resets the state. Should be called before each new comfort noise period.
+  void Reset();
+
+  // Update the comfort noise generator with the parameters in |packet|.
+  int UpdateParameters(const Packet& packet);
+
+  // Generates |requested_length| samples of comfort noise and writes to
+  // |output|. If this is the first in call after Reset (or first after creating
+  // the object), it will also mix in comfort noise at the end of the
+  // SyncBuffer object provided in the constructor.
+  int Generate(size_t requested_length, AudioMultiVector* output);
+
+  // Returns the last error code that was produced by the comfort noise
+  // decoder. Returns 0 if no error has been encountered since the last reset.
+  int internal_error_code() { return internal_error_code_; }
+
+ private:
+  int fs_hz_;
+  bool first_call_;
+  size_t overlap_length_;
+  DecoderDatabase* decoder_database_;
+  SyncBuffer* sync_buffer_;
+  int internal_error_code_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(ComfortNoise);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_COMFORT_NOISE_H_
diff --git a/modules/audio_coding/neteq/comfort_noise_unittest.cc b/modules/audio_coding/neteq/comfort_noise_unittest.cc
new file mode 100644
index 0000000..b3fbb4e
--- /dev/null
+++ b/modules/audio_coding/neteq/comfort_noise_unittest.cc
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for ComfortNoise class.
+
+#include "modules/audio_coding/neteq/comfort_noise.h"
+
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ComfortNoise, CreateAndDestroy) {
+  int fs = 8000;
+  MockDecoderDatabase db;
+  SyncBuffer sync_buffer(1, 1000);
+  ComfortNoise cn(fs, &db, &sync_buffer);
+  EXPECT_CALL(db, Die());  // Called when |db| goes out of scope.
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/cross_correlation.cc b/modules/audio_coding/neteq/cross_correlation.cc
new file mode 100644
index 0000000..da9c913
--- /dev/null
+++ b/modules/audio_coding/neteq/cross_correlation.cc
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/cross_correlation.h"
+
+#include <cstdlib>
+#include <limits>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+// This function decides the overflow-protecting scaling and calls
+// WebRtcSpl_CrossCorrelation.
+int CrossCorrelationWithAutoShift(const int16_t* sequence_1,
+                                  const int16_t* sequence_2,
+                                  size_t sequence_1_length,
+                                  size_t cross_correlation_length,
+                                  int cross_correlation_step,
+                                  int32_t* cross_correlation) {
+  // Find the maximum absolute value of sequence_1 and 2.
+  const int16_t max_1 = WebRtcSpl_MaxAbsValueW16(sequence_1, sequence_1_length);
+  const int sequence_2_shift =
+      cross_correlation_step * (static_cast<int>(cross_correlation_length) - 1);
+  const int16_t* sequence_2_start =
+      sequence_2_shift >= 0 ? sequence_2 : sequence_2 + sequence_2_shift;
+  const size_t sequence_2_length =
+      sequence_1_length + std::abs(sequence_2_shift);
+  const int16_t max_2 =
+      WebRtcSpl_MaxAbsValueW16(sequence_2_start, sequence_2_length);
+
+  // In order to avoid overflow when computing the sum we should scale the
+  // samples so that (in_vector_length * max_1 * max_2) will not overflow.
+  // Expected scaling fulfills
+  // 1) sufficient:
+  //    sequence_1_length * (max_1 * max_2 >> scaling) <= 0x7fffffff;
+  // 2) necessary:
+  //    if (scaling > 0)
+  //      sequence_1_length * (max_1 * max_2 >> (scaling - 1)) > 0x7fffffff;
+  // The following calculation fulfills 1) and almost fulfills 2).
+  // There are some corner cases that 2) is not satisfied, e.g.,
+  // max_1 = 17, max_2 = 30848, sequence_1_length = 4095, in such case,
+  // optimal scaling is 0, while the following calculation results in 1.
+  const int32_t factor = (max_1 * max_2) / (std::numeric_limits<int32_t>::max()
+      / static_cast<int32_t>(sequence_1_length));
+  const int scaling = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+
+  WebRtcSpl_CrossCorrelation(cross_correlation, sequence_1, sequence_2,
+                             sequence_1_length, cross_correlation_length,
+                             scaling, cross_correlation_step);
+
+  return scaling;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/cross_correlation.h b/modules/audio_coding/neteq/cross_correlation.h
new file mode 100644
index 0000000..a747772
--- /dev/null
+++ b/modules/audio_coding/neteq/cross_correlation.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
+#define MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
+
+#include "common_types.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// The function calculates the cross-correlation between two sequences
+// |sequence_1| and |sequence_2|. |sequence_1| is taken as reference, with
+// |sequence_1_length| as its length. |sequence_2| slides for the calculation of
+// cross-correlation. The result will be saved in |cross_correlation|.
+// |cross_correlation_length| correlation points are calculated.
+// The corresponding lag starts from 0, and increases with a step of
+// |cross_correlation_step|. The result is without normalization. To avoid
+// overflow, the result will be right shifted. The amount of shifts will be
+// returned.
+//
+// Input:
+//     - sequence_1     : First sequence (reference).
+//     - sequence_2     : Second sequence (sliding during calculation).
+//     - sequence_1_length : Length of |sequence_1|.
+//     - cross_correlation_length : Number of cross-correlations to calculate.
+//     - cross_correlation_step : Step in the lag for the cross-correlation.
+//
+// Output:
+//      - cross_correlation : The cross-correlation in Q(-right_shifts)
+//
+// Return:
+//      Number of right shifts in cross_correlation.
+
+int CrossCorrelationWithAutoShift(const int16_t* sequence_1,
+                                  const int16_t* sequence_2,
+                                  size_t sequence_1_length,
+                                  size_t cross_correlation_length,
+                                  int cross_correlation_step,
+                                  int32_t* cross_correlation);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
diff --git a/modules/audio_coding/neteq/decision_logic.cc b/modules/audio_coding/neteq/decision_logic.cc
new file mode 100644
index 0000000..966d5c3
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic.cc
@@ -0,0 +1,170 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decision_logic.h"
+
+#include <algorithm>
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "modules/audio_coding/neteq/decision_logic_fax.h"
+#include "modules/audio_coding/neteq/decision_logic_normal.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+
+namespace webrtc {
+
+DecisionLogic* DecisionLogic::Create(int fs_hz,
+                                     size_t output_size_samples,
+                                     NetEqPlayoutMode playout_mode,
+                                     DecoderDatabase* decoder_database,
+                                     const PacketBuffer& packet_buffer,
+                                     DelayManager* delay_manager,
+                                     BufferLevelFilter* buffer_level_filter,
+                                     const TickTimer* tick_timer) {
+  switch (playout_mode) {
+    case kPlayoutOn:
+    case kPlayoutStreaming:
+      return new DecisionLogicNormal(
+          fs_hz, output_size_samples, playout_mode, decoder_database,
+          packet_buffer, delay_manager, buffer_level_filter, tick_timer);
+    case kPlayoutFax:
+    case kPlayoutOff:
+      return new DecisionLogicFax(
+          fs_hz, output_size_samples, playout_mode, decoder_database,
+          packet_buffer, delay_manager, buffer_level_filter, tick_timer);
+  }
+  // This line cannot be reached, but must be here to avoid compiler errors.
+  assert(false);
+  return NULL;
+}
+
+DecisionLogic::DecisionLogic(int fs_hz,
+                             size_t output_size_samples,
+                             NetEqPlayoutMode playout_mode,
+                             DecoderDatabase* decoder_database,
+                             const PacketBuffer& packet_buffer,
+                             DelayManager* delay_manager,
+                             BufferLevelFilter* buffer_level_filter,
+                             const TickTimer* tick_timer)
+    : decoder_database_(decoder_database),
+      packet_buffer_(packet_buffer),
+      delay_manager_(delay_manager),
+      buffer_level_filter_(buffer_level_filter),
+      tick_timer_(tick_timer),
+      cng_state_(kCngOff),
+      packet_length_samples_(0),
+      sample_memory_(0),
+      prev_time_scale_(false),
+      timescale_countdown_(
+          tick_timer_->GetNewCountdown(kMinTimescaleInterval + 1)),
+      num_consecutive_expands_(0),
+      playout_mode_(playout_mode) {
+  delay_manager_->set_streaming_mode(playout_mode_ == kPlayoutStreaming);
+  SetSampleRate(fs_hz, output_size_samples);
+}
+
+DecisionLogic::~DecisionLogic() = default;
+
+void DecisionLogic::Reset() {
+  cng_state_ = kCngOff;
+  noise_fast_forward_ = 0;
+  packet_length_samples_ = 0;
+  sample_memory_ = 0;
+  prev_time_scale_ = false;
+  timescale_countdown_.reset();
+  num_consecutive_expands_ = 0;
+}
+
+void DecisionLogic::SoftReset() {
+  packet_length_samples_ = 0;
+  sample_memory_ = 0;
+  prev_time_scale_ = false;
+  timescale_countdown_ =
+      tick_timer_->GetNewCountdown(kMinTimescaleInterval + 1);
+}
+
+void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
+  // TODO(hlundin): Change to an enumerator and skip assert.
+  assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz ==  32000 || fs_hz == 48000);
+  fs_mult_ = fs_hz / 8000;
+  output_size_samples_ = output_size_samples;
+}
+
+Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
+                                      const Expand& expand,
+                                      size_t decoder_frame_length,
+                                      const Packet* next_packet,
+                                      Modes prev_mode,
+                                      bool play_dtmf,
+                                      size_t generated_noise_samples,
+                                      bool* reset_decoder) {
+  // If last mode was CNG (or Expand, since this could be covering up for
+  // a lost CNG packet), remember that CNG is on. This is needed if comfort
+  // noise is interrupted by DTMF.
+  if (prev_mode == kModeRfc3389Cng) {
+    cng_state_ = kCngRfc3389On;
+  } else if (prev_mode == kModeCodecInternalCng) {
+    cng_state_ = kCngInternalOn;
+  }
+
+  const size_t samples_left =
+      sync_buffer.FutureLength() - expand.overlap_length();
+  const size_t cur_size_samples =
+      samples_left + packet_buffer_.NumSamplesInBuffer(decoder_frame_length);
+
+  prev_time_scale_ = prev_time_scale_ &&
+      (prev_mode == kModeAccelerateSuccess ||
+          prev_mode == kModeAccelerateLowEnergy ||
+          prev_mode == kModePreemptiveExpandSuccess ||
+          prev_mode == kModePreemptiveExpandLowEnergy);
+
+  FilterBufferLevel(cur_size_samples, prev_mode);
+
+  return GetDecisionSpecialized(sync_buffer, expand, decoder_frame_length,
+                                next_packet, prev_mode, play_dtmf,
+                                reset_decoder, generated_noise_samples);
+}
+
+void DecisionLogic::ExpandDecision(Operations operation) {
+  if (operation == kExpand) {
+    num_consecutive_expands_++;
+  } else {
+    num_consecutive_expands_ = 0;
+  }
+}
+
+void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples,
+                                      Modes prev_mode) {
+  // Do not update buffer history if currently playing CNG since it will bias
+  // the filtered buffer level.
+  if ((prev_mode != kModeRfc3389Cng) && (prev_mode != kModeCodecInternalCng)) {
+    buffer_level_filter_->SetTargetBufferLevel(
+        delay_manager_->base_target_level());
+
+    size_t buffer_size_packets = 0;
+    if (packet_length_samples_ > 0) {
+      // Calculate size in packets.
+      buffer_size_packets = buffer_size_samples / packet_length_samples_;
+    }
+    int sample_memory_local = 0;
+    if (prev_time_scale_) {
+      sample_memory_local = sample_memory_;
+      timescale_countdown_ =
+          tick_timer_->GetNewCountdown(kMinTimescaleInterval);
+    }
+    buffer_level_filter_->Update(buffer_size_packets, sample_memory_local,
+                                 packet_length_samples_);
+    prev_time_scale_ = false;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/decision_logic.h b/modules/audio_coding/neteq/decision_logic.h
new file mode 100644
index 0000000..5b67196
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic.h
@@ -0,0 +1,168 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_H_
+#define MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_H_
+
+#include "modules/audio_coding/neteq/defines.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class BufferLevelFilter;
+class DecoderDatabase;
+class DelayManager;
+class Expand;
+class PacketBuffer;
+class SyncBuffer;
+struct Packet;
+
+// This is the base class for the decision tree implementations. Derived classes
+// must implement the method GetDecisionSpecialized().
+class DecisionLogic {
+ public:
+  // Static factory function which creates different types of objects depending
+  // on the |playout_mode|.
+  static DecisionLogic* Create(int fs_hz,
+                               size_t output_size_samples,
+                               NetEqPlayoutMode playout_mode,
+                               DecoderDatabase* decoder_database,
+                               const PacketBuffer& packet_buffer,
+                               DelayManager* delay_manager,
+                               BufferLevelFilter* buffer_level_filter,
+                               const TickTimer* tick_timer);
+
+  // Constructor.
+  DecisionLogic(int fs_hz,
+                size_t output_size_samples,
+                NetEqPlayoutMode playout_mode,
+                DecoderDatabase* decoder_database,
+                const PacketBuffer& packet_buffer,
+                DelayManager* delay_manager,
+                BufferLevelFilter* buffer_level_filter,
+                const TickTimer* tick_timer);
+
+  virtual ~DecisionLogic();
+
+  // Resets object to a clean state.
+  void Reset();
+
+  // Resets parts of the state. Typically done when switching codecs.
+  void SoftReset();
+
+  // Sets the sample rate and the output block size.
+  void SetSampleRate(int fs_hz, size_t output_size_samples);
+
+  // Returns the operation that should be done next. |sync_buffer| and |expand|
+  // are provided for reference. |decoder_frame_length| is the number of samples
+  // obtained from the last decoded frame. If there is a packet available, it
+  // should be supplied in |next_packet|; otherwise it should be NULL. The mode
+  // resulting from the last call to NetEqImpl::GetAudio is supplied in
+  // |prev_mode|. If there is a DTMF event to play, |play_dtmf| should be set to
+  // true. The output variable |reset_decoder| will be set to true if a reset is
+  // required; otherwise it is left unchanged (i.e., it can remain true if it
+  // was true before the call).  This method end with calling
+  // GetDecisionSpecialized to get the actual return value.
+  Operations GetDecision(const SyncBuffer& sync_buffer,
+                         const Expand& expand,
+                         size_t decoder_frame_length,
+                         const Packet* next_packet,
+                         Modes prev_mode,
+                         bool play_dtmf,
+                         size_t generated_noise_samples,
+                         bool* reset_decoder);
+
+  // These methods test the |cng_state_| for different conditions.
+  bool CngRfc3389On() const { return cng_state_ == kCngRfc3389On; }
+  bool CngOff() const { return cng_state_ == kCngOff; }
+
+  // Resets the |cng_state_| to kCngOff.
+  void SetCngOff() { cng_state_ = kCngOff; }
+
+  // Reports back to DecisionLogic whether the decision to do expand remains or
+  // not. Note that this is necessary, since an expand decision can be changed
+  // to kNormal in NetEqImpl::GetDecision if there is still enough data in the
+  // sync buffer.
+  virtual void ExpandDecision(Operations operation);
+
+  // Adds |value| to |sample_memory_|.
+  void AddSampleMemory(int32_t value) {
+    sample_memory_ += value;
+  }
+
+  // Accessors and mutators.
+  void set_sample_memory(int32_t value) { sample_memory_ = value; }
+  size_t noise_fast_forward() const { return noise_fast_forward_; }
+  size_t packet_length_samples() const { return packet_length_samples_; }
+  void set_packet_length_samples(size_t value) {
+    packet_length_samples_ = value;
+  }
+  void set_prev_time_scale(bool value) { prev_time_scale_ = value; }
+  NetEqPlayoutMode playout_mode() const { return playout_mode_; }
+
+ protected:
+  // The value 5 sets maximum time-stretch rate to about 100 ms/s.
+  static const int kMinTimescaleInterval = 5;
+
+  enum CngState {
+    kCngOff,
+    kCngRfc3389On,
+    kCngInternalOn
+  };
+
+  // Returns the operation that should be done next. |sync_buffer| and |expand|
+  // are provided for reference. |decoder_frame_length| is the number of samples
+  // obtained from the last decoded frame. If there is a packet available, it
+  // should be supplied in |next_packet|; otherwise it should be NULL. The mode
+  // resulting from the last call to NetEqImpl::GetAudio is supplied in
+  // |prev_mode|. If there is a DTMF event to play, |play_dtmf| should be set to
+  // true. The output variable |reset_decoder| will be set to true if a reset is
+  // required; otherwise it is left unchanged (i.e., it can remain true if it
+  // was true before the call).  Should be implemented by derived classes.
+  virtual Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
+                                            const Expand& expand,
+                                            size_t decoder_frame_length,
+                                            const Packet* next_packet,
+                                            Modes prev_mode,
+                                            bool play_dtmf,
+                                            bool* reset_decoder,
+                                            size_t generated_noise_samples) = 0;
+
+  // Updates the |buffer_level_filter_| with the current buffer level
+  // |buffer_size_packets|.
+  void FilterBufferLevel(size_t buffer_size_packets, Modes prev_mode);
+
+  DecoderDatabase* decoder_database_;
+  const PacketBuffer& packet_buffer_;
+  DelayManager* delay_manager_;
+  BufferLevelFilter* buffer_level_filter_;
+  const TickTimer* tick_timer_;
+  int fs_mult_;
+  size_t output_size_samples_;
+  CngState cng_state_;  // Remember if comfort noise is interrupted by other
+                        // event (e.g., DTMF).
+  size_t noise_fast_forward_ = 0;
+  size_t packet_length_samples_;
+  int sample_memory_;
+  bool prev_time_scale_;
+  std::unique_ptr<TickTimer::Countdown> timescale_countdown_;
+  int num_consecutive_expands_;
+  const NetEqPlayoutMode playout_mode_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogic);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_H_
diff --git a/modules/audio_coding/neteq/decision_logic_fax.cc b/modules/audio_coding/neteq/decision_logic_fax.cc
new file mode 100644
index 0000000..cc21ee9
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic_fax.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decision_logic_fax.h"
+
+#include <assert.h>
+
+#include <algorithm>
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+
+namespace webrtc {
+
+Operations DecisionLogicFax::GetDecisionSpecialized(
+    const SyncBuffer& sync_buffer,
+    const Expand& expand,
+    size_t decoder_frame_length,
+    const Packet* next_packet,
+    Modes prev_mode,
+    bool play_dtmf,
+    bool* reset_decoder,
+    size_t generated_noise_samples) {
+  assert(playout_mode_ == kPlayoutFax || playout_mode_ == kPlayoutOff);
+  uint32_t target_timestamp = sync_buffer.end_timestamp();
+  uint32_t available_timestamp = 0;
+  int is_cng_packet = 0;
+  if (next_packet) {
+    available_timestamp = next_packet->timestamp;
+    is_cng_packet =
+        decoder_database_->IsComfortNoise(next_packet->payload_type);
+  }
+  if (is_cng_packet) {
+    if (static_cast<int32_t>((generated_noise_samples + target_timestamp)
+        - available_timestamp) >= 0) {
+      // Time to play this packet now.
+      return kRfc3389Cng;
+    } else {
+      // Wait before playing this packet.
+      return kRfc3389CngNoPacket;
+    }
+  }
+  if (!next_packet) {
+    // No packet. If in CNG mode, play as usual. Otherwise, use other method to
+    // generate data.
+    if (cng_state_ == kCngRfc3389On) {
+      // Continue playing comfort noise.
+      return kRfc3389CngNoPacket;
+    } else if (cng_state_ == kCngInternalOn) {
+      // Continue playing codec-internal comfort noise.
+      return kCodecInternalCng;
+    } else {
+      // Nothing to play. Generate some data to play out.
+      switch (playout_mode_) {
+        case kPlayoutOff:
+          return kAlternativePlc;
+        case kPlayoutFax:
+          return kAudioRepetition;
+        default:
+          assert(false);
+          return kUndefined;
+      }
+    }
+  } else if (target_timestamp == available_timestamp) {
+    return kNormal;
+  } else {
+    if (static_cast<int32_t>((generated_noise_samples + target_timestamp)
+        - available_timestamp) >= 0) {
+      return kNormal;
+    } else {
+      // If currently playing comfort noise, continue with that. Do not
+      // increase the timestamp counter since generated_noise_stopwatch_ in
+      // NetEqImpl will take care of the time-keeping.
+      if (cng_state_ == kCngRfc3389On) {
+        return kRfc3389CngNoPacket;
+      } else if (cng_state_ == kCngInternalOn) {
+        return kCodecInternalCng;
+      } else {
+        // Otherwise, do packet-loss concealment and increase the
+        // timestamp while waiting for the time to play this packet.
+        switch (playout_mode_) {
+          case kPlayoutOff:
+            return kAlternativePlcIncreaseTimestamp;
+          case kPlayoutFax:
+            return kAudioRepetitionIncreaseTimestamp;
+          default:
+            assert(0);
+            return kUndefined;
+        }
+      }
+    }
+  }
+}
+
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/decision_logic_fax.h b/modules/audio_coding/neteq/decision_logic_fax.h
new file mode 100644
index 0000000..cefd8e4
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic_fax.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_FAX_H_
+#define MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_FAX_H_
+
+#include "modules/audio_coding/neteq/decision_logic.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Implementation of the DecisionLogic class for playout modes kPlayoutFax and
+// kPlayoutOff.
+class DecisionLogicFax : public DecisionLogic {
+ public:
+  // Constructor.
+  DecisionLogicFax(int fs_hz,
+                   size_t output_size_samples,
+                   NetEqPlayoutMode playout_mode,
+                   DecoderDatabase* decoder_database,
+                   const PacketBuffer& packet_buffer,
+                   DelayManager* delay_manager,
+                   BufferLevelFilter* buffer_level_filter,
+                   const TickTimer* tick_timer)
+      : DecisionLogic(fs_hz,
+                      output_size_samples,
+                      playout_mode,
+                      decoder_database,
+                      packet_buffer,
+                      delay_manager,
+                      buffer_level_filter,
+                      tick_timer) {}
+
+ protected:
+  Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
+                                    const Expand& expand,
+                                    size_t decoder_frame_length,
+                                    const Packet* next_packet,
+                                    Modes prev_mode,
+                                    bool play_dtmf,
+                                    bool* reset_decoder,
+                                    size_t generated_noise_samples) override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogicFax);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_FAX_H_
diff --git a/modules/audio_coding/neteq/decision_logic_normal.cc b/modules/audio_coding/neteq/decision_logic_normal.cc
new file mode 100644
index 0000000..10f501a
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -0,0 +1,241 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decision_logic_normal.h"
+
+#include <assert.h>
+
+#include <algorithm>
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+Operations DecisionLogicNormal::GetDecisionSpecialized(
+    const SyncBuffer& sync_buffer,
+    const Expand& expand,
+    size_t decoder_frame_length,
+    const Packet* next_packet,
+    Modes prev_mode,
+    bool play_dtmf,
+    bool* reset_decoder,
+    size_t generated_noise_samples) {
+  assert(playout_mode_ == kPlayoutOn || playout_mode_ == kPlayoutStreaming);
+  // Guard for errors, to avoid getting stuck in error mode.
+  if (prev_mode == kModeError) {
+    if (!next_packet) {
+      return kExpand;
+    } else {
+      return kUndefined;  // Use kUndefined to flag for a reset.
+    }
+  }
+
+  uint32_t target_timestamp = sync_buffer.end_timestamp();
+  uint32_t available_timestamp = 0;
+  bool is_cng_packet = false;
+  if (next_packet) {
+    available_timestamp = next_packet->timestamp;
+    is_cng_packet =
+        decoder_database_->IsComfortNoise(next_packet->payload_type);
+  }
+
+  if (is_cng_packet) {
+    return CngOperation(prev_mode, target_timestamp, available_timestamp,
+                        generated_noise_samples);
+  }
+
+  // Handle the case with no packet at all available (except maybe DTMF).
+  if (!next_packet) {
+    return NoPacket(play_dtmf);
+  }
+
+  // If the expand period was very long, reset NetEQ since it is likely that the
+  // sender was restarted.
+  if (num_consecutive_expands_ > kReinitAfterExpands) {
+    *reset_decoder = true;
+    return kNormal;
+  }
+
+  const uint32_t five_seconds_samples =
+      static_cast<uint32_t>(5 * 8000 * fs_mult_);
+  // Check if the required packet is available.
+  if (target_timestamp == available_timestamp) {
+    return ExpectedPacketAvailable(prev_mode, play_dtmf);
+  } else if (!PacketBuffer::IsObsoleteTimestamp(
+                 available_timestamp, target_timestamp, five_seconds_samples)) {
+    return FuturePacketAvailable(sync_buffer, expand, decoder_frame_length,
+                                 prev_mode, target_timestamp,
+                                 available_timestamp, play_dtmf,
+                                 generated_noise_samples);
+  } else {
+    // This implies that available_timestamp < target_timestamp, which can
+    // happen when a new stream or codec is received. Signal for a reset.
+    return kUndefined;
+  }
+}
+
+Operations DecisionLogicNormal::CngOperation(Modes prev_mode,
+                                             uint32_t target_timestamp,
+                                             uint32_t available_timestamp,
+                                             size_t generated_noise_samples) {
+  // Signed difference between target and available timestamp.
+  int32_t timestamp_diff = static_cast<int32_t>(
+      static_cast<uint32_t>(generated_noise_samples + target_timestamp) -
+      available_timestamp);
+  int32_t optimal_level_samp = static_cast<int32_t>(
+      (delay_manager_->TargetLevel() * packet_length_samples_) >> 8);
+  const int64_t excess_waiting_time_samp =
+      -static_cast<int64_t>(timestamp_diff) - optimal_level_samp;
+
+  if (excess_waiting_time_samp > optimal_level_samp / 2) {
+    // The waiting time for this packet will be longer than 1.5
+    // times the wanted buffer delay. Apply fast-forward to cut the
+    // waiting time down to the optimal.
+    noise_fast_forward_ = rtc::dchecked_cast<size_t>(noise_fast_forward_ +
+                                                     excess_waiting_time_samp);
+    timestamp_diff =
+        rtc::saturated_cast<int32_t>(timestamp_diff + excess_waiting_time_samp);
+  }
+
+  if (timestamp_diff < 0 && prev_mode == kModeRfc3389Cng) {
+    // Not time to play this packet yet. Wait another round before using this
+    // packet. Keep on playing CNG from previous CNG parameters.
+    return kRfc3389CngNoPacket;
+  } else {
+    // Otherwise, go for the CNG packet now.
+    noise_fast_forward_ = 0;
+    return kRfc3389Cng;
+  }
+}
+
+Operations DecisionLogicNormal::NoPacket(bool play_dtmf) {
+  if (cng_state_ == kCngRfc3389On) {
+    // Keep on playing comfort noise.
+    return kRfc3389CngNoPacket;
+  } else if (cng_state_ == kCngInternalOn) {
+    // Keep on playing codec internal comfort noise.
+    return kCodecInternalCng;
+  } else if (play_dtmf) {
+    return kDtmf;
+  } else {
+    // Nothing to play, do expand.
+    return kExpand;
+  }
+}
+
+Operations DecisionLogicNormal::ExpectedPacketAvailable(Modes prev_mode,
+                                                        bool play_dtmf) {
+  if (prev_mode != kModeExpand && !play_dtmf) {
+    // Check criterion for time-stretching.
+    int low_limit, high_limit;
+    delay_manager_->BufferLimits(&low_limit, &high_limit);
+    if (buffer_level_filter_->filtered_current_level() >= high_limit << 2)
+      return kFastAccelerate;
+    if (TimescaleAllowed()) {
+      if (buffer_level_filter_->filtered_current_level() >= high_limit)
+        return kAccelerate;
+      if (buffer_level_filter_->filtered_current_level() < low_limit)
+        return kPreemptiveExpand;
+    }
+  }
+  return kNormal;
+}
+
+Operations DecisionLogicNormal::FuturePacketAvailable(
+    const SyncBuffer& sync_buffer,
+    const Expand& expand,
+    size_t decoder_frame_length,
+    Modes prev_mode,
+    uint32_t target_timestamp,
+    uint32_t available_timestamp,
+    bool play_dtmf,
+    size_t generated_noise_samples) {
+  // Required packet is not available, but a future packet is.
+  // Check if we should continue with an ongoing expand because the new packet
+  // is too far into the future.
+  uint32_t timestamp_leap = available_timestamp - target_timestamp;
+  if ((prev_mode == kModeExpand) &&
+      !ReinitAfterExpands(timestamp_leap) &&
+      !MaxWaitForPacket() &&
+      PacketTooEarly(timestamp_leap) &&
+      UnderTargetLevel()) {
+    if (play_dtmf) {
+      // Still have DTMF to play, so do not do expand.
+      return kDtmf;
+    } else {
+      // Nothing to play.
+      return kExpand;
+    }
+  }
+
+  const size_t samples_left =
+      sync_buffer.FutureLength() - expand.overlap_length();
+  const size_t cur_size_samples = samples_left +
+      packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;
+
+  // If previous was comfort noise, then no merge is needed.
+  if (prev_mode == kModeRfc3389Cng ||
+      prev_mode == kModeCodecInternalCng) {
+    // Keep the same delay as before the CNG, but make sure that the number of
+    // samples in buffer is no higher than 4 times the optimal level. (Note that
+    // TargetLevel() is in Q8.)
+    if (static_cast<uint32_t>(generated_noise_samples + target_timestamp) >=
+            available_timestamp ||
+        cur_size_samples >
+            ((delay_manager_->TargetLevel() * packet_length_samples_) >> 8) *
+            4) {
+      // Time to play this new packet.
+      return kNormal;
+    } else {
+      // Too early to play this new packet; keep on playing comfort noise.
+      if (prev_mode == kModeRfc3389Cng) {
+        return kRfc3389CngNoPacket;
+      } else {  // prevPlayMode == kModeCodecInternalCng.
+        return kCodecInternalCng;
+      }
+    }
+  }
+  // Do not merge unless we have done an expand before.
+  if (prev_mode == kModeExpand) {
+    return kMerge;
+  } else if (play_dtmf) {
+    // Play DTMF instead of expand.
+    return kDtmf;
+  } else {
+    return kExpand;
+  }
+}
+
+bool DecisionLogicNormal::UnderTargetLevel() const {
+  return buffer_level_filter_->filtered_current_level() <=
+      delay_manager_->TargetLevel();
+}
+
+bool DecisionLogicNormal::ReinitAfterExpands(uint32_t timestamp_leap) const {
+  return timestamp_leap >=
+      static_cast<uint32_t>(output_size_samples_ * kReinitAfterExpands);
+}
+
+bool DecisionLogicNormal::PacketTooEarly(uint32_t timestamp_leap) const {
+  return timestamp_leap >
+      static_cast<uint32_t>(output_size_samples_ * num_consecutive_expands_);
+}
+
+bool DecisionLogicNormal::MaxWaitForPacket() const {
+  return num_consecutive_expands_ >= kMaxWaitForPacket;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/decision_logic_normal.h b/modules/audio_coding/neteq/decision_logic_normal.h
new file mode 100644
index 0000000..366d103
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic_normal.h
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_NORMAL_H_
+#define MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_NORMAL_H_
+
+#include "modules/audio_coding/neteq/decision_logic.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Implementation of the DecisionLogic class for playout modes kPlayoutOn and
+// kPlayoutStreaming.
+class DecisionLogicNormal : public DecisionLogic {
+ public:
+  // Constructor.
+  DecisionLogicNormal(int fs_hz,
+                      size_t output_size_samples,
+                      NetEqPlayoutMode playout_mode,
+                      DecoderDatabase* decoder_database,
+                      const PacketBuffer& packet_buffer,
+                      DelayManager* delay_manager,
+                      BufferLevelFilter* buffer_level_filter,
+                      const TickTimer* tick_timer)
+      : DecisionLogic(fs_hz,
+                      output_size_samples,
+                      playout_mode,
+                      decoder_database,
+                      packet_buffer,
+                      delay_manager,
+                      buffer_level_filter,
+                      tick_timer) {}
+
+ protected:
+  static const int kReinitAfterExpands = 100;
+  static const int kMaxWaitForPacket = 10;
+
+  Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
+                                    const Expand& expand,
+                                    size_t decoder_frame_length,
+                                    const Packet* next_packet,
+                                    Modes prev_mode,
+                                    bool play_dtmf,
+                                    bool* reset_decoder,
+                                    size_t generated_noise_samples) override;
+
+  // Returns the operation to do given that the expected packet is not
+  // available, but a packet further into the future is at hand.
+  virtual Operations FuturePacketAvailable(
+      const SyncBuffer& sync_buffer,
+      const Expand& expand,
+      size_t decoder_frame_length,
+      Modes prev_mode,
+      uint32_t target_timestamp,
+      uint32_t available_timestamp,
+      bool play_dtmf,
+      size_t generated_noise_samples);
+
+  // Returns the operation to do given that the expected packet is available.
+  virtual Operations ExpectedPacketAvailable(Modes prev_mode, bool play_dtmf);
+
+  // Returns the operation given that no packets are available (except maybe
+  // a DTMF event, flagged by setting |play_dtmf| true).
+  virtual Operations NoPacket(bool play_dtmf);
+
+ private:
+  // Returns the operation given that the next available packet is a comfort
+  // noise payload (RFC 3389 only, not codec-internal).
+  Operations CngOperation(Modes prev_mode,
+                          uint32_t target_timestamp,
+                          uint32_t available_timestamp,
+                          size_t generated_noise_samples);
+
+  // Checks if enough time has elapsed since the last successful timescale
+  // operation was done (i.e., accelerate or preemptive expand).
+  bool TimescaleAllowed() const {
+    return !timescale_countdown_ || timescale_countdown_->Finished();
+  }
+
+  // Checks if the current (filtered) buffer level is under the target level.
+  bool UnderTargetLevel() const;
+
+  // Checks if |timestamp_leap| is so long into the future that a reset due
+  // to exceeding kReinitAfterExpands will be done.
+  bool ReinitAfterExpands(uint32_t timestamp_leap) const;
+
+  // Checks if we still have not done enough expands to cover the distance from
+  // the last decoded packet to the next available packet, the distance beeing
+  // conveyed in |timestamp_leap|.
+  bool PacketTooEarly(uint32_t timestamp_leap) const;
+
+  // Checks if num_consecutive_expands_ >= kMaxWaitForPacket.
+  bool MaxWaitForPacket() const;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogicNormal);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DECISION_LOGIC_NORMAL_H_
diff --git a/modules/audio_coding/neteq/decision_logic_unittest.cc b/modules/audio_coding/neteq/decision_logic_unittest.cc
new file mode 100644
index 0000000..1a7bab9
--- /dev/null
+++ b/modules/audio_coding/neteq/decision_logic_unittest.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DecisionLogic class and derived classes.
+
+#include "modules/audio_coding/neteq/decision_logic.h"
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "modules/audio_coding/neteq/delay_peak_detector.h"
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+
+namespace webrtc {
+
+TEST(DecisionLogic, CreateAndDestroy) {
+  int fs_hz = 8000;
+  int output_size_samples = fs_hz / 100;  // Samples per 10 ms.
+  DecoderDatabase decoder_database(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  TickTimer tick_timer;
+  PacketBuffer packet_buffer(10, &tick_timer);
+  DelayPeakDetector delay_peak_detector(&tick_timer);
+  DelayManager delay_manager(240, &delay_peak_detector, &tick_timer);
+  BufferLevelFilter buffer_level_filter;
+  DecisionLogic* logic = DecisionLogic::Create(
+      fs_hz, output_size_samples, kPlayoutOn, &decoder_database, packet_buffer,
+      &delay_manager, &buffer_level_filter, &tick_timer);
+  delete logic;
+  logic = DecisionLogic::Create(
+      fs_hz, output_size_samples, kPlayoutStreaming, &decoder_database,
+      packet_buffer, &delay_manager, &buffer_level_filter, &tick_timer);
+  delete logic;
+  logic = DecisionLogic::Create(
+      fs_hz, output_size_samples, kPlayoutFax, &decoder_database, packet_buffer,
+      &delay_manager, &buffer_level_filter, &tick_timer);
+  delete logic;
+  logic = DecisionLogic::Create(
+      fs_hz, output_size_samples, kPlayoutOff, &decoder_database, packet_buffer,
+      &delay_manager, &buffer_level_filter, &tick_timer);
+  delete logic;
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/decoder_database.cc b/modules/audio_coding/neteq/decoder_database.cc
new file mode 100644
index 0000000..5ddaf04
--- /dev/null
+++ b/modules/audio_coding/neteq/decoder_database.cc
@@ -0,0 +1,366 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+
+#include <utility>  // pair
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+DecoderDatabase::DecoderDatabase(
+    const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory)
+    : active_decoder_type_(-1),
+      active_cng_decoder_type_(-1),
+      decoder_factory_(decoder_factory) {}
+
+DecoderDatabase::~DecoderDatabase() = default;
+
+DecoderDatabase::DecoderInfo::DecoderInfo(const SdpAudioFormat& audio_format,
+                                          AudioDecoderFactory* factory,
+                                          const std::string& codec_name)
+    : name_(codec_name),
+      audio_format_(audio_format),
+      factory_(factory),
+      external_decoder_(nullptr),
+      cng_decoder_(CngDecoder::Create(audio_format)),
+      subtype_(SubtypeFromFormat(audio_format)) {}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(const SdpAudioFormat& audio_format,
+                                          AudioDecoderFactory* factory)
+    : DecoderInfo(audio_format, factory, audio_format.name) {}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(NetEqDecoder ct,
+                                          AudioDecoderFactory* factory)
+    : DecoderInfo(*NetEqDecoderToSdpAudioFormat(ct), factory) {}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(const SdpAudioFormat& audio_format,
+                                          AudioDecoder* ext_dec,
+                                          const std::string& codec_name)
+    : name_(codec_name),
+      audio_format_(audio_format),
+      factory_(nullptr),
+      external_decoder_(ext_dec),
+      subtype_(Subtype::kNormal) {
+  RTC_CHECK(ext_dec);
+}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(DecoderInfo&&) = default;
+DecoderDatabase::DecoderInfo::~DecoderInfo() = default;
+
+bool DecoderDatabase::DecoderInfo::CanGetDecoder() const {
+  if (subtype_ == Subtype::kNormal && !external_decoder_ && !decoder_) {
+    // TODO(ossu): Keep a check here for now, since a number of tests create
+    // DecoderInfos without factories.
+    RTC_DCHECK(factory_);
+    return factory_->IsSupportedDecoder(audio_format_);
+  } else {
+    return true;
+  }
+}
+
+AudioDecoder* DecoderDatabase::DecoderInfo::GetDecoder() const {
+  if (subtype_ != Subtype::kNormal) {
+    // These are handled internally, so they have no AudioDecoder objects.
+    return nullptr;
+  }
+  if (external_decoder_) {
+    RTC_DCHECK(!decoder_);
+    RTC_DCHECK(!cng_decoder_);
+    return external_decoder_;
+  }
+  if (!decoder_) {
+    // TODO(ossu): Keep a check here for now, since a number of tests create
+    // DecoderInfos without factories.
+    RTC_DCHECK(factory_);
+    decoder_ = factory_->MakeAudioDecoder(audio_format_, rtc::nullopt);
+  }
+  RTC_DCHECK(decoder_) << "Failed to create: " << audio_format_;
+  return decoder_.get();
+}
+
+bool DecoderDatabase::DecoderInfo::IsType(const char* name) const {
+  return STR_CASE_CMP(audio_format_.name.c_str(), name) == 0;
+}
+
+bool DecoderDatabase::DecoderInfo::IsType(const std::string& name) const {
+  return IsType(name.c_str());
+}
+
+rtc::Optional<DecoderDatabase::DecoderInfo::CngDecoder>
+DecoderDatabase::DecoderInfo::CngDecoder::Create(const SdpAudioFormat& format) {
+  if (STR_CASE_CMP(format.name.c_str(), "CN") == 0) {
+    // CN has a 1:1 RTP clock rate to sample rate ratio.
+    const int sample_rate_hz = format.clockrate_hz;
+    RTC_DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+               sample_rate_hz == 32000 || sample_rate_hz == 48000);
+    return DecoderDatabase::DecoderInfo::CngDecoder{sample_rate_hz};
+  } else {
+    return rtc::nullopt;
+  }
+}
+
+DecoderDatabase::DecoderInfo::Subtype
+DecoderDatabase::DecoderInfo::SubtypeFromFormat(const SdpAudioFormat& format) {
+  if (STR_CASE_CMP(format.name.c_str(), "CN") == 0) {
+    return Subtype::kComfortNoise;
+  } else if (STR_CASE_CMP(format.name.c_str(), "telephone-event") == 0) {
+    return Subtype::kDtmf;
+  } else if (STR_CASE_CMP(format.name.c_str(), "red") == 0) {
+    return Subtype::kRed;
+  }
+
+  return Subtype::kNormal;
+}
+
+bool DecoderDatabase::Empty() const { return decoders_.empty(); }
+
+int DecoderDatabase::Size() const { return static_cast<int>(decoders_.size()); }
+
+void DecoderDatabase::Reset() {
+  decoders_.clear();
+  active_decoder_type_ = -1;
+  active_cng_decoder_type_ = -1;
+}
+
+std::vector<int> DecoderDatabase::SetCodecs(
+    const std::map<int, SdpAudioFormat>& codecs) {
+  // First collect all payload types that we'll remove or reassign, then remove
+  // them from the database.
+  std::vector<int> changed_payload_types;
+  for (const std::pair<uint8_t, const DecoderInfo&> kv : decoders_) {
+    auto i = codecs.find(kv.first);
+    if (i == codecs.end() || i->second != kv.second.GetFormat()) {
+      changed_payload_types.push_back(kv.first);
+    }
+  }
+  for (int pl_type : changed_payload_types) {
+    Remove(pl_type);
+  }
+
+  // Enter the new and changed payload type mappings into the database.
+  for (const auto& kv : codecs) {
+    const int& rtp_payload_type = kv.first;
+    const SdpAudioFormat& audio_format = kv.second;
+    RTC_DCHECK_GE(rtp_payload_type, 0);
+    RTC_DCHECK_LE(rtp_payload_type, 0x7f);
+    if (decoders_.count(rtp_payload_type) == 0) {
+      decoders_.insert(std::make_pair(
+          rtp_payload_type, DecoderInfo(audio_format, decoder_factory_.get())));
+    } else {
+      // The mapping for this payload type hasn't changed.
+    }
+  }
+
+  return changed_payload_types;
+}
+
+int DecoderDatabase::RegisterPayload(uint8_t rtp_payload_type,
+                                     NetEqDecoder codec_type,
+                                     const std::string& name) {
+  if (rtp_payload_type > 0x7F) {
+    return kInvalidRtpPayloadType;
+  }
+  if (codec_type == NetEqDecoder::kDecoderArbitrary) {
+    return kCodecNotSupported;  // Only supported through InsertExternal.
+  }
+  const auto opt_format = NetEqDecoderToSdpAudioFormat(codec_type);
+  if (!opt_format) {
+    return kCodecNotSupported;
+  }
+  DecoderInfo info(*opt_format, decoder_factory_, name);
+  if (!info.CanGetDecoder()) {
+    return kCodecNotSupported;
+  }
+  auto ret =
+      decoders_.insert(std::make_pair(rtp_payload_type, std::move(info)));
+  if (ret.second == false) {
+    // Database already contains a decoder with type |rtp_payload_type|.
+    return kDecoderExists;
+  }
+  return kOK;
+}
+
+int DecoderDatabase::RegisterPayload(int rtp_payload_type,
+                                     const SdpAudioFormat& audio_format) {
+  if (rtp_payload_type < 0 || rtp_payload_type > 0x7f) {
+    return kInvalidRtpPayloadType;
+  }
+  const auto ret = decoders_.insert(std::make_pair(
+      rtp_payload_type, DecoderInfo(audio_format, decoder_factory_.get())));
+  if (ret.second == false) {
+    // Database already contains a decoder with type |rtp_payload_type|.
+    return kDecoderExists;
+  }
+  return kOK;
+}
+
+int DecoderDatabase::InsertExternal(uint8_t rtp_payload_type,
+                                    NetEqDecoder codec_type,
+                                    const std::string& codec_name,
+                                    AudioDecoder* decoder) {
+  if (rtp_payload_type > 0x7F) {
+    return kInvalidRtpPayloadType;
+  }
+  if (!decoder) {
+    return kInvalidPointer;
+  }
+
+  const auto opt_db_format = NetEqDecoderToSdpAudioFormat(codec_type);
+  const SdpAudioFormat format = opt_db_format.value_or({"arbitrary", 0, 0});
+
+  std::pair<DecoderMap::iterator, bool> ret;
+  DecoderInfo info(format, decoder, codec_name);
+  ret = decoders_.insert(std::make_pair(rtp_payload_type, std::move(info)));
+  if (ret.second == false) {
+    // Database already contains a decoder with type |rtp_payload_type|.
+    return kDecoderExists;
+  }
+  return kOK;
+}
+
+int DecoderDatabase::Remove(uint8_t rtp_payload_type) {
+  if (decoders_.erase(rtp_payload_type) == 0) {
+    // No decoder with that |rtp_payload_type|.
+    return kDecoderNotFound;
+  }
+  if (active_decoder_type_ == rtp_payload_type) {
+    active_decoder_type_ = -1;  // No active decoder.
+  }
+  if (active_cng_decoder_type_ == rtp_payload_type) {
+    active_cng_decoder_type_ = -1;  // No active CNG decoder.
+  }
+  return kOK;
+}
+
+void DecoderDatabase::RemoveAll() {
+  decoders_.clear();
+  active_decoder_type_ = -1;      // No active decoder.
+  active_cng_decoder_type_ = -1;  // No active CNG decoder.
+}
+
+const DecoderDatabase::DecoderInfo* DecoderDatabase::GetDecoderInfo(
+    uint8_t rtp_payload_type) const {
+  DecoderMap::const_iterator it = decoders_.find(rtp_payload_type);
+  if (it == decoders_.end()) {
+    // Decoder not found.
+    return NULL;
+  }
+  return &it->second;
+}
+
+int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type,
+                                      bool* new_decoder) {
+  // Check that |rtp_payload_type| exists in the database.
+  const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+  if (!info) {
+    // Decoder not found.
+    return kDecoderNotFound;
+  }
+  RTC_CHECK(!info->IsComfortNoise());
+  RTC_DCHECK(new_decoder);
+  *new_decoder = false;
+  if (active_decoder_type_ < 0) {
+    // This is the first active decoder.
+    *new_decoder = true;
+  } else if (active_decoder_type_ != rtp_payload_type) {
+    // Moving from one active decoder to another. Delete the first one.
+    const DecoderInfo *old_info = GetDecoderInfo(active_decoder_type_);
+    RTC_DCHECK(old_info);
+    old_info->DropDecoder();
+    *new_decoder = true;
+  }
+  active_decoder_type_ = rtp_payload_type;
+  return kOK;
+}
+
+AudioDecoder* DecoderDatabase::GetActiveDecoder() const {
+  if (active_decoder_type_ < 0) {
+    // No active decoder.
+    return NULL;
+  }
+  return GetDecoder(active_decoder_type_);
+}
+
+int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
+  // Check that |rtp_payload_type| exists in the database.
+  const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+  if (!info) {
+    // Decoder not found.
+    return kDecoderNotFound;
+  }
+  if (active_cng_decoder_type_ >= 0 &&
+      active_cng_decoder_type_ != rtp_payload_type) {
+    // Moving from one active CNG decoder to another. Delete the first one.
+    RTC_DCHECK(active_cng_decoder_);
+    active_cng_decoder_.reset();
+  }
+  active_cng_decoder_type_ = rtp_payload_type;
+  return kOK;
+}
+
+ComfortNoiseDecoder* DecoderDatabase::GetActiveCngDecoder() const {
+  if (active_cng_decoder_type_ < 0) {
+    // No active CNG decoder.
+    return NULL;
+  }
+  if (!active_cng_decoder_) {
+    active_cng_decoder_.reset(new ComfortNoiseDecoder);
+  }
+  return active_cng_decoder_.get();
+}
+
+AudioDecoder* DecoderDatabase::GetDecoder(uint8_t rtp_payload_type) const {
+  const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+  return info ? info->GetDecoder() : nullptr;
+}
+
+bool DecoderDatabase::IsType(uint8_t rtp_payload_type, const char* name) const {
+  const DecoderInfo* info = GetDecoderInfo(rtp_payload_type);
+  return info && info->IsType(name);
+}
+
+bool DecoderDatabase::IsType(uint8_t rtp_payload_type,
+                             const std::string& name) const {
+  return IsType(rtp_payload_type, name.c_str());
+}
+
+bool DecoderDatabase::IsComfortNoise(uint8_t rtp_payload_type) const {
+  const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+  return info && info->IsComfortNoise();
+}
+
+bool DecoderDatabase::IsDtmf(uint8_t rtp_payload_type) const {
+  const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+  return info && info->IsDtmf();
+}
+
+bool DecoderDatabase::IsRed(uint8_t rtp_payload_type) const {
+  const DecoderInfo *info = GetDecoderInfo(rtp_payload_type);
+  return info && info->IsRed();
+}
+
+int DecoderDatabase::CheckPayloadTypes(const PacketList& packet_list) const {
+  PacketList::const_iterator it;
+  for (it = packet_list.begin(); it != packet_list.end(); ++it) {
+    if (!GetDecoderInfo(it->payload_type)) {
+      // Payload type is not found.
+      RTC_LOG(LS_WARNING) << "CheckPayloadTypes: unknown RTP payload type "
+                          << static_cast<int>(it->payload_type);
+      return kDecoderNotFound;
+    }
+  }
+  return kOK;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/decoder_database.h b/modules/audio_coding/neteq/decoder_database.h
new file mode 100644
index 0000000..5f0d173
--- /dev/null
+++ b/modules/audio_coding/neteq/decoder_database.h
@@ -0,0 +1,250 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
+#define MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_format.h"
+#include "common_types.h"  // NOLINT(build/include)  // NULL
+#include "modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "modules/audio_coding/neteq/neteq_decoder_enum.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class DecoderDatabase {
+ public:
+  enum DatabaseReturnCodes {
+    kOK = 0,
+    kInvalidRtpPayloadType = -1,
+    kCodecNotSupported = -2,
+    kInvalidSampleRate = -3,
+    kDecoderExists = -4,
+    kDecoderNotFound = -5,
+    kInvalidPointer = -6
+  };
+
+  // Class that stores decoder info in the database.
+  class DecoderInfo {
+   public:
+    DecoderInfo(const SdpAudioFormat& audio_format,
+                AudioDecoderFactory* factory,
+                const std::string& codec_name);
+    explicit DecoderInfo(const SdpAudioFormat& audio_format,
+                         AudioDecoderFactory* factory = nullptr);
+    explicit DecoderInfo(NetEqDecoder ct,
+                         AudioDecoderFactory* factory = nullptr);
+    DecoderInfo(const SdpAudioFormat& audio_format,
+                AudioDecoder* ext_dec,
+                const std::string& codec_name);
+    DecoderInfo(DecoderInfo&&);
+    ~DecoderInfo();
+
+    // Was this info object created with a specification that allows us to
+    // actually produce a decoder?
+    bool CanGetDecoder() const;
+
+    // Get the AudioDecoder object, creating it first if necessary.
+    AudioDecoder* GetDecoder() const;
+
+    // Delete the AudioDecoder object, unless it's external. (This means we can
+    // always recreate it later if we need it.)
+    void DropDecoder() const { decoder_.reset(); }
+
+    int SampleRateHz() const {
+      if (IsDtmf()) {
+        // DTMF has a 1:1 mapping between clock rate and sample rate.
+        return audio_format_.clockrate_hz;
+      }
+      const AudioDecoder* decoder = GetDecoder();
+      RTC_DCHECK_EQ(1, !!decoder + !!cng_decoder_);
+      return decoder ? decoder->SampleRateHz() : cng_decoder_->sample_rate_hz;
+    }
+
+    const SdpAudioFormat& GetFormat() const { return audio_format_; }
+
+    // Returns true if the decoder's format is comfort noise.
+    bool IsComfortNoise() const {
+      RTC_DCHECK_EQ(!!cng_decoder_, subtype_ == Subtype::kComfortNoise);
+      return subtype_ == Subtype::kComfortNoise;
+    }
+
+    // Returns true if the decoder's format is DTMF.
+    bool IsDtmf() const {
+      return subtype_ == Subtype::kDtmf;
+    }
+
+    // Returns true if the decoder's format is RED.
+    bool IsRed() const {
+      return subtype_ == Subtype::kRed;
+    }
+
+    // Returns true if the decoder's format is named |name|.
+    bool IsType(const char* name) const;
+    // Returns true if the decoder's format is named |name|.
+    bool IsType(const std::string& name) const;
+
+    const std::string& get_name() const { return name_; }
+
+   private:
+    // TODO(ossu): |name_| is kept here while we retain the old external
+    //             decoder interface. Remove this once using an
+    //             AudioDecoderFactory has supplanted the old functionality.
+    const std::string name_;
+
+    const SdpAudioFormat audio_format_;
+    AudioDecoderFactory* const factory_;
+    mutable std::unique_ptr<AudioDecoder> decoder_;
+
+    // Set iff this is an external decoder.
+    AudioDecoder* const external_decoder_;
+
+    // Set iff this is a comfort noise decoder.
+    struct CngDecoder {
+      static rtc::Optional<CngDecoder> Create(const SdpAudioFormat& format);
+      int sample_rate_hz;
+    };
+    const rtc::Optional<CngDecoder> cng_decoder_;
+
+    enum class Subtype : int8_t {
+      kNormal,
+      kComfortNoise,
+      kDtmf,
+      kRed
+    };
+
+    static Subtype SubtypeFromFormat(const SdpAudioFormat& format);
+
+    const Subtype subtype_;
+  };
+
+  // Maximum value for 8 bits, and an invalid RTP payload type (since it is
+  // only 7 bits).
+  static const uint8_t kRtpPayloadTypeError = 0xFF;
+
+  DecoderDatabase(
+      const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory);
+
+  virtual ~DecoderDatabase();
+
+  // Returns true if the database is empty.
+  virtual bool Empty() const;
+
+  // Returns the number of decoders registered in the database.
+  virtual int Size() const;
+
+  // Resets the database, erasing all registered payload types, and deleting
+  // any AudioDecoder objects that were not externally created and inserted
+  // using InsertExternal().
+  virtual void Reset();
+
+  // Replaces the existing set of decoders with the given set. Returns the
+  // payload types that were reassigned or removed while doing so.
+  virtual std::vector<int> SetCodecs(
+      const std::map<int, SdpAudioFormat>& codecs);
+
+  // Registers |rtp_payload_type| as a decoder of type |codec_type|. The |name|
+  // is only used to populate the name field in the DecoderInfo struct in the
+  // database, and can be arbitrary (including empty). Returns kOK on success;
+  // otherwise an error code.
+  virtual int RegisterPayload(uint8_t rtp_payload_type,
+                              NetEqDecoder codec_type,
+                              const std::string& name);
+
+  // Registers a decoder for the given payload type. Returns kOK on success;
+  // otherwise an error code.
+  virtual int RegisterPayload(int rtp_payload_type,
+                              const SdpAudioFormat& audio_format);
+
+  // Registers an externally created AudioDecoder object, and associates it
+  // as a decoder of type |codec_type| with |rtp_payload_type|.
+  virtual int InsertExternal(uint8_t rtp_payload_type,
+                             NetEqDecoder codec_type,
+                             const std::string& codec_name,
+                             AudioDecoder* decoder);
+
+  // Removes the entry for |rtp_payload_type| from the database.
+  // Returns kDecoderNotFound or kOK depending on the outcome of the operation.
+  virtual int Remove(uint8_t rtp_payload_type);
+
+  // Remove all entries.
+  virtual void RemoveAll();
+
+  // Returns a pointer to the DecoderInfo struct for |rtp_payload_type|. If
+  // no decoder is registered with that |rtp_payload_type|, NULL is returned.
+  virtual const DecoderInfo* GetDecoderInfo(uint8_t rtp_payload_type) const;
+
+  // Sets the active decoder to be |rtp_payload_type|. If this call results in a
+  // change of active decoder, |new_decoder| is set to true. The previous active
+  // decoder's AudioDecoder object is deleted.
+  virtual int SetActiveDecoder(uint8_t rtp_payload_type, bool* new_decoder);
+
+  // Returns the current active decoder, or NULL if no active decoder exists.
+  virtual AudioDecoder* GetActiveDecoder() const;
+
+  // Sets the active comfort noise decoder to be |rtp_payload_type|. If this
+  // call results in a change of active comfort noise decoder, the previous
+  // active decoder's AudioDecoder object is deleted.
+  virtual int SetActiveCngDecoder(uint8_t rtp_payload_type);
+
+  // Returns the current active comfort noise decoder, or NULL if no active
+  // comfort noise decoder exists.
+  virtual ComfortNoiseDecoder* GetActiveCngDecoder() const;
+
+  // The following are utility methods: they will look up DecoderInfo through
+  // GetDecoderInfo and call the respective method on that info object, if it
+  // exists.
+
+  // Returns a pointer to the AudioDecoder object associated with
+  // |rtp_payload_type|, or NULL if none is registered. If the AudioDecoder
+  // object does not exist for that decoder, the object is created.
+  AudioDecoder* GetDecoder(uint8_t rtp_payload_type) const;
+
+  // Returns if |rtp_payload_type| is registered with a format named |name|.
+  bool IsType(uint8_t rtp_payload_type, const char* name) const;
+
+  // Returns if |rtp_payload_type| is registered with a format named |name|.
+  bool IsType(uint8_t rtp_payload_type, const std::string& name) const;
+
+  // Returns true if |rtp_payload_type| is registered as comfort noise.
+  bool IsComfortNoise(uint8_t rtp_payload_type) const;
+
+  // Returns true if |rtp_payload_type| is registered as DTMF.
+  bool IsDtmf(uint8_t rtp_payload_type) const;
+
+  // Returns true if |rtp_payload_type| is registered as RED.
+  bool IsRed(uint8_t rtp_payload_type) const;
+
+  // Returns kOK if all packets in |packet_list| carry payload types that are
+  // registered in the database. Otherwise, returns kDecoderNotFound.
+  int CheckPayloadTypes(const PacketList& packet_list) const;
+
+ private:
+  typedef std::map<uint8_t, DecoderInfo> DecoderMap;
+
+  DecoderMap decoders_;
+  int active_decoder_type_;
+  int active_cng_decoder_type_;
+  mutable std::unique_ptr<ComfortNoiseDecoder> active_cng_decoder_;
+  rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DecoderDatabase);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
diff --git a/modules/audio_coding/neteq/decoder_database_unittest.cc b/modules/audio_coding/neteq/decoder_database_unittest.cc
new file mode 100644
index 0000000..7f9b38e
--- /dev/null
+++ b/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -0,0 +1,296 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <string>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "rtc_base/refcountedobject.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder.h"
+#include "test/mock_audio_decoder_factory.h"
+
+using testing::_;
+using testing::Invoke;
+
+namespace webrtc {
+
+TEST(DecoderDatabase, CreateAndDestroy) {
+  DecoderDatabase db(new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_EQ(0, db.Size());
+  EXPECT_TRUE(db.Empty());
+}
+
+TEST(DecoderDatabase, InsertAndRemove) {
+  rtc::scoped_refptr<MockAudioDecoderFactory> factory(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_CALL(*factory, IsSupportedDecoder(_))
+      .WillOnce(Invoke([](const SdpAudioFormat& format) {
+        EXPECT_EQ("pcmu", format.name);
+        return true;
+      }));
+  DecoderDatabase db(factory);
+  const uint8_t kPayloadType = 0;
+  const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+  EXPECT_EQ(
+      DecoderDatabase::kOK,
+      db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu, kCodecName));
+  EXPECT_EQ(1, db.Size());
+  EXPECT_FALSE(db.Empty());
+  EXPECT_EQ(DecoderDatabase::kOK, db.Remove(kPayloadType));
+  EXPECT_EQ(0, db.Size());
+  EXPECT_TRUE(db.Empty());
+}
+
+TEST(DecoderDatabase, InsertAndRemoveAll) {
+  rtc::scoped_refptr<MockAudioDecoderFactory> factory(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_CALL(*factory, IsSupportedDecoder(_))
+      .WillOnce(Invoke([](const SdpAudioFormat& format) {
+        EXPECT_EQ("pcmu", format.name);
+        return true;
+      }))
+      .WillOnce(Invoke([](const SdpAudioFormat& format) {
+        EXPECT_EQ("pcma", format.name);
+        return true;
+      }));
+  DecoderDatabase db(factory);
+  const std::string kCodecName1 = "Robert\'); DROP TABLE Students;";
+  const std::string kCodecName2 = "https://xkcd.com/327/";
+  EXPECT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(0, NetEqDecoder::kDecoderPCMu, kCodecName1));
+  EXPECT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(1, NetEqDecoder::kDecoderPCMa, kCodecName2));
+  EXPECT_EQ(2, db.Size());
+  EXPECT_FALSE(db.Empty());
+  db.RemoveAll();
+  EXPECT_EQ(0, db.Size());
+  EXPECT_TRUE(db.Empty());
+}
+
+TEST(DecoderDatabase, GetDecoderInfo) {
+  rtc::scoped_refptr<MockAudioDecoderFactory> factory(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_CALL(*factory, IsSupportedDecoder(_))
+      .WillOnce(Invoke([](const SdpAudioFormat& format) {
+        EXPECT_EQ("pcmu", format.name);
+        return true;
+      }));
+  auto* decoder = new MockAudioDecoder;
+  EXPECT_CALL(*factory, MakeAudioDecoderMock(_, _, _))
+      .WillOnce(Invoke([decoder](const SdpAudioFormat& format,
+                                 rtc::Optional<AudioCodecPairId> codec_pair_id,
+                                 std::unique_ptr<AudioDecoder>* dec) {
+        EXPECT_EQ("pcmu", format.name);
+        dec->reset(decoder);
+      }));
+  DecoderDatabase db(factory);
+  const uint8_t kPayloadType = 0;
+  const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+  EXPECT_EQ(
+      DecoderDatabase::kOK,
+      db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu, kCodecName));
+  const DecoderDatabase::DecoderInfo* info;
+  info = db.GetDecoderInfo(kPayloadType);
+  ASSERT_TRUE(info != NULL);
+  EXPECT_TRUE(info->IsType("pcmu"));
+  EXPECT_EQ(kCodecName, info->get_name());
+  EXPECT_EQ(decoder, db.GetDecoder(kPayloadType));
+  info = db.GetDecoderInfo(kPayloadType + 1);  // Other payload type.
+  EXPECT_TRUE(info == NULL);  // Should not be found.
+}
+
+TEST(DecoderDatabase, GetDecoder) {
+  DecoderDatabase db(CreateBuiltinAudioDecoderFactory());
+  const uint8_t kPayloadType = 0;
+  const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+  EXPECT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCM16B,
+                               kCodecName));
+  AudioDecoder* dec = db.GetDecoder(kPayloadType);
+  ASSERT_TRUE(dec != NULL);
+}
+
+TEST(DecoderDatabase, TypeTests) {
+  rtc::scoped_refptr<MockAudioDecoderFactory> factory(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_CALL(*factory, IsSupportedDecoder(_))
+      .WillOnce(Invoke([](const SdpAudioFormat& format) {
+        EXPECT_EQ("pcmu", format.name);
+        return true;
+      }));
+  DecoderDatabase db(factory);
+  const uint8_t kPayloadTypePcmU = 0;
+  const uint8_t kPayloadTypeCng = 13;
+  const uint8_t kPayloadTypeDtmf = 100;
+  const uint8_t kPayloadTypeRed = 101;
+  const uint8_t kPayloadNotUsed = 102;
+  // Load into database.
+  EXPECT_EQ(
+      DecoderDatabase::kOK,
+      db.RegisterPayload(kPayloadTypePcmU, NetEqDecoder::kDecoderPCMu, "pcmu"));
+  EXPECT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(kPayloadTypeCng, NetEqDecoder::kDecoderCNGnb,
+                               "cng-nb"));
+  EXPECT_EQ(
+      DecoderDatabase::kOK,
+      db.RegisterPayload(kPayloadTypeDtmf, NetEqDecoder::kDecoderAVT, "avt"));
+  EXPECT_EQ(
+      DecoderDatabase::kOK,
+      db.RegisterPayload(kPayloadTypeRed, NetEqDecoder::kDecoderRED, "red"));
+  EXPECT_EQ(4, db.Size());
+  // Test.
+  EXPECT_FALSE(db.IsComfortNoise(kPayloadNotUsed));
+  EXPECT_FALSE(db.IsDtmf(kPayloadNotUsed));
+  EXPECT_FALSE(db.IsRed(kPayloadNotUsed));
+  EXPECT_FALSE(db.IsComfortNoise(kPayloadTypePcmU));
+  EXPECT_FALSE(db.IsDtmf(kPayloadTypePcmU));
+  EXPECT_FALSE(db.IsRed(kPayloadTypePcmU));
+  EXPECT_FALSE(db.IsType(kPayloadTypePcmU, "isac"));
+  EXPECT_TRUE(db.IsType(kPayloadTypePcmU, "pcmu"));
+  EXPECT_TRUE(db.IsComfortNoise(kPayloadTypeCng));
+  EXPECT_TRUE(db.IsDtmf(kPayloadTypeDtmf));
+  EXPECT_TRUE(db.IsRed(kPayloadTypeRed));
+}
+
+TEST(DecoderDatabase, ExternalDecoder) {
+  DecoderDatabase db(new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  const uint8_t kPayloadType = 0;
+  const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+  MockAudioDecoder decoder;
+  // Load into database.
+  EXPECT_EQ(DecoderDatabase::kOK,
+            db.InsertExternal(kPayloadType, NetEqDecoder::kDecoderPCMu,
+                              kCodecName, &decoder));
+  EXPECT_EQ(1, db.Size());
+  // Get decoder and make sure we get the external one.
+  EXPECT_EQ(&decoder, db.GetDecoder(kPayloadType));
+  // Get the decoder info struct and check it too.
+  const DecoderDatabase::DecoderInfo* info;
+  info = db.GetDecoderInfo(kPayloadType);
+  ASSERT_TRUE(info != NULL);
+  EXPECT_TRUE(info->IsType("pcmu"));
+  EXPECT_EQ(info->get_name(), kCodecName);
+  EXPECT_EQ(kCodecName, info->get_name());
+  // Expect not to delete the decoder when removing it from the database, since
+  // it was declared externally.
+  EXPECT_CALL(decoder, Die()).Times(0);
+  EXPECT_EQ(DecoderDatabase::kOK, db.Remove(kPayloadType));
+  EXPECT_TRUE(db.Empty());
+
+  EXPECT_CALL(decoder, Die()).Times(1);  // Will be called when |db| is deleted.
+}
+
+TEST(DecoderDatabase, CheckPayloadTypes) {
+  constexpr int kNumPayloads = 10;
+  rtc::scoped_refptr<MockAudioDecoderFactory> factory(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_CALL(*factory, IsSupportedDecoder(_))
+      .Times(kNumPayloads)
+      .WillRepeatedly(Invoke([](const SdpAudioFormat& format) {
+        EXPECT_EQ("pcmu", format.name);
+        return true;
+      }));
+  DecoderDatabase db(factory);
+  // Load a number of payloads into the database. Payload types are 0, 1, ...,
+  // while the decoder type is the same for all payload types (this does not
+  // matter for the test).
+  for (uint8_t payload_type = 0; payload_type < kNumPayloads; ++payload_type) {
+    EXPECT_EQ(DecoderDatabase::kOK,
+              db.RegisterPayload(payload_type, NetEqDecoder::kDecoderPCMu, ""));
+  }
+  PacketList packet_list;
+  for (int i = 0; i < kNumPayloads + 1; ++i) {
+    // Create packet with payload type |i|. The last packet will have a payload
+    // type that is not registered in the decoder database.
+    Packet packet;
+    packet.payload_type = i;
+    packet_list.push_back(std::move(packet));
+  }
+
+  // Expect to return false, since the last packet is of an unknown type.
+  EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
+            db.CheckPayloadTypes(packet_list));
+
+  packet_list.pop_back();  // Remove the unknown one.
+
+  EXPECT_EQ(DecoderDatabase::kOK, db.CheckPayloadTypes(packet_list));
+
+  // Delete all packets.
+  PacketList::iterator it = packet_list.begin();
+  while (it != packet_list.end()) {
+    it = packet_list.erase(it);
+  }
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
+// Test the methods for setting and getting active speech and CNG decoders.
+TEST(DecoderDatabase, IF_ISAC(ActiveDecoders)) {
+  DecoderDatabase db(CreateBuiltinAudioDecoderFactory());
+  // Load payload types.
+  ASSERT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(0, NetEqDecoder::kDecoderPCMu, "pcmu"));
+  ASSERT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(103, NetEqDecoder::kDecoderISAC, "isac"));
+  ASSERT_EQ(DecoderDatabase::kOK,
+            db.RegisterPayload(13, NetEqDecoder::kDecoderCNGnb, "cng-nb"));
+  // Verify that no decoders are active from the start.
+  EXPECT_EQ(NULL, db.GetActiveDecoder());
+  EXPECT_EQ(NULL, db.GetActiveCngDecoder());
+
+  // Set active speech codec.
+  bool changed;  // Should be true when the active decoder changed.
+  EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveDecoder(0, &changed));
+  EXPECT_TRUE(changed);
+  AudioDecoder* decoder = db.GetActiveDecoder();
+  ASSERT_FALSE(decoder == NULL);  // Should get a decoder here.
+
+  // Set the same again. Expect no change.
+  EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveDecoder(0, &changed));
+  EXPECT_FALSE(changed);
+  decoder = db.GetActiveDecoder();
+  ASSERT_FALSE(decoder == NULL);  // Should get a decoder here.
+
+  // Change active decoder.
+  EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveDecoder(103, &changed));
+  EXPECT_TRUE(changed);
+  decoder = db.GetActiveDecoder();
+  ASSERT_FALSE(decoder == NULL);  // Should get a decoder here.
+
+  // Remove the active decoder, and verify that the active becomes NULL.
+  EXPECT_EQ(DecoderDatabase::kOK, db.Remove(103));
+  EXPECT_EQ(NULL, db.GetActiveDecoder());
+
+  // Set active CNG codec.
+  EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveCngDecoder(13));
+  ComfortNoiseDecoder* cng = db.GetActiveCngDecoder();
+  ASSERT_FALSE(cng == NULL);  // Should get a decoder here.
+
+  // Remove the active CNG decoder, and verify that the active becomes NULL.
+  EXPECT_EQ(DecoderDatabase::kOK, db.Remove(13));
+  EXPECT_EQ(NULL, db.GetActiveCngDecoder());
+
+  // Try to set non-existing codecs as active.
+  EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
+            db.SetActiveDecoder(17, &changed));
+  EXPECT_EQ(DecoderDatabase::kDecoderNotFound,
+            db.SetActiveCngDecoder(17));
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/defines.h b/modules/audio_coding/neteq/defines.h
new file mode 100644
index 0000000..496a36d
--- /dev/null
+++ b/modules/audio_coding/neteq/defines.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DEFINES_H_
+#define MODULES_AUDIO_CODING_NETEQ_DEFINES_H_
+
+namespace webrtc {
+
+enum Operations {
+  kNormal = 0,
+  kMerge,
+  kExpand,
+  kAccelerate,
+  kFastAccelerate,
+  kPreemptiveExpand,
+  kRfc3389Cng,
+  kRfc3389CngNoPacket,
+  kCodecInternalCng,
+  kDtmf,
+  kAlternativePlc,
+  kAlternativePlcIncreaseTimestamp,
+  kAudioRepetition,
+  kAudioRepetitionIncreaseTimestamp,
+  kUndefined = -1
+};
+
+enum Modes {
+  kModeNormal = 0,
+  kModeExpand,
+  kModeMerge,
+  kModeAccelerateSuccess,
+  kModeAccelerateLowEnergy,
+  kModeAccelerateFail,
+  kModePreemptiveExpandSuccess,
+  kModePreemptiveExpandLowEnergy,
+  kModePreemptiveExpandFail,
+  kModeRfc3389Cng,
+  kModeCodecInternalCng,
+  kModeDtmf,
+  kModeError,
+  kModeUndefined = -1
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DEFINES_H_
diff --git a/modules/audio_coding/neteq/delay_manager.cc b/modules/audio_coding/neteq/delay_manager.cc
new file mode 100644
index 0000000..b70131d
--- /dev/null
+++ b/modules/audio_coding/neteq/delay_manager.cc
@@ -0,0 +1,483 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/delay_manager.h"
+
+#include <assert.h>
+#include <math.h>
+
+#include <algorithm>  // max, min
+#include <numeric>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/delay_peak_detector.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+DelayManager::DelayManager(size_t max_packets_in_buffer,
+                           DelayPeakDetector* peak_detector,
+                           const TickTimer* tick_timer)
+    : first_packet_received_(false),
+      max_packets_in_buffer_(max_packets_in_buffer),
+      iat_vector_(kMaxIat + 1, 0),
+      iat_factor_(0),
+      tick_timer_(tick_timer),
+      base_target_level_(4),                   // In Q0 domain.
+      target_level_(base_target_level_ << 8),  // In Q8 domain.
+      packet_len_ms_(0),
+      streaming_mode_(false),
+      last_seq_no_(0),
+      last_timestamp_(0),
+      minimum_delay_ms_(0),
+      least_required_delay_ms_(target_level_),
+      maximum_delay_ms_(target_level_),
+      iat_cumulative_sum_(0),
+      max_iat_cumulative_sum_(0),
+      peak_detector_(*peak_detector),
+      last_pack_cng_or_dtmf_(1),
+      frame_length_change_experiment_(
+          field_trial::IsEnabled("WebRTC-Audio-NetEqFramelengthExperiment")) {
+  assert(peak_detector);  // Should never be NULL.
+  Reset();
+}
+
+DelayManager::~DelayManager() {}
+
+const DelayManager::IATVector& DelayManager::iat_vector() const {
+  return iat_vector_;
+}
+
+// Set the histogram vector to an exponentially decaying distribution
+// iat_vector_[i] = 0.5^(i+1), i = 0, 1, 2, ...
+// iat_vector_ is in Q30.
+void DelayManager::ResetHistogram() {
+  // Set temp_prob to (slightly more than) 1 in Q14. This ensures that the sum
+  // of iat_vector_ is 1.
+  uint16_t temp_prob = 0x4002;  // 16384 + 2 = 100000000000010 binary.
+  IATVector::iterator it = iat_vector_.begin();
+  for (; it < iat_vector_.end(); it++) {
+    temp_prob >>= 1;
+    (*it) = temp_prob << 16;
+  }
+  base_target_level_ = 4;
+  target_level_ = base_target_level_ << 8;
+}
+
+int DelayManager::Update(uint16_t sequence_number,
+                         uint32_t timestamp,
+                         int sample_rate_hz) {
+  if (sample_rate_hz <= 0) {
+    return -1;
+  }
+
+  if (!first_packet_received_) {
+    // Prepare for next packet arrival.
+    packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+    last_seq_no_ = sequence_number;
+    last_timestamp_ = timestamp;
+    first_packet_received_ = true;
+    return 0;
+  }
+
+  // Try calculating packet length from current and previous timestamps.
+  int packet_len_ms;
+  if (!IsNewerTimestamp(timestamp, last_timestamp_) ||
+      !IsNewerSequenceNumber(sequence_number, last_seq_no_)) {
+    // Wrong timestamp or sequence order; use stored value.
+    packet_len_ms = packet_len_ms_;
+  } else {
+    // Calculate timestamps per packet and derive packet length in ms.
+    int64_t packet_len_samp =
+        static_cast<uint32_t>(timestamp - last_timestamp_) /
+        static_cast<uint16_t>(sequence_number - last_seq_no_);
+    packet_len_ms =
+        rtc::saturated_cast<int>(1000 * packet_len_samp / sample_rate_hz);
+  }
+
+  if (packet_len_ms > 0) {
+    // Cannot update statistics unless |packet_len_ms| is valid.
+    // Calculate inter-arrival time (IAT) in integer "packet times"
+    // (rounding down). This is the value used as index to the histogram
+    // vector |iat_vector_|.
+    int iat_packets = packet_iat_stopwatch_->ElapsedMs() / packet_len_ms;
+
+    if (streaming_mode_) {
+      UpdateCumulativeSums(packet_len_ms, sequence_number);
+    }
+
+    // Check for discontinuous packet sequence and re-ordering.
+    if (IsNewerSequenceNumber(sequence_number, last_seq_no_ + 1)) {
+      // Compensate for gap in the sequence numbers. Reduce IAT with the
+      // expected extra time due to lost packets, but ensure that the IAT is
+      // not negative.
+      iat_packets -= static_cast<uint16_t>(sequence_number - last_seq_no_ - 1);
+      iat_packets = std::max(iat_packets, 0);
+    } else if (!IsNewerSequenceNumber(sequence_number, last_seq_no_)) {
+      iat_packets += static_cast<uint16_t>(last_seq_no_ + 1 - sequence_number);
+    }
+
+    // Saturate IAT at maximum value.
+    const int max_iat = kMaxIat;
+    iat_packets = std::min(iat_packets, max_iat);
+    UpdateHistogram(iat_packets);
+    // Calculate new |target_level_| based on updated statistics.
+    target_level_ = CalculateTargetLevel(iat_packets);
+    if (streaming_mode_) {
+      target_level_ = std::max(target_level_, max_iat_cumulative_sum_);
+    }
+
+    LimitTargetLevel();
+  }  // End if (packet_len_ms > 0).
+
+  // Prepare for next packet arrival.
+  packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+  last_seq_no_ = sequence_number;
+  last_timestamp_ = timestamp;
+  return 0;
+}
+
+void DelayManager::UpdateCumulativeSums(int packet_len_ms,
+                                        uint16_t sequence_number) {
+  // Calculate IAT in Q8, including fractions of a packet (i.e., more
+  // accurate than |iat_packets|.
+  int iat_packets_q8 =
+      (packet_iat_stopwatch_->ElapsedMs() << 8) / packet_len_ms;
+  // Calculate cumulative sum IAT with sequence number compensation. The sum
+  // is zero if there is no clock-drift.
+  iat_cumulative_sum_ += (iat_packets_q8 -
+      (static_cast<int>(sequence_number - last_seq_no_) << 8));
+  // Subtract drift term.
+  iat_cumulative_sum_ -= kCumulativeSumDrift;
+  // Ensure not negative.
+  iat_cumulative_sum_ = std::max(iat_cumulative_sum_, 0);
+  if (iat_cumulative_sum_ > max_iat_cumulative_sum_) {
+    // Found a new maximum.
+    max_iat_cumulative_sum_ = iat_cumulative_sum_;
+    max_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+  }
+  if (max_iat_stopwatch_->ElapsedMs() > kMaxStreamingPeakPeriodMs) {
+    // Too long since the last maximum was observed; decrease max value.
+    max_iat_cumulative_sum_ -= kCumulativeSumDrift;
+  }
+}
+
+// Each element in the vector is first multiplied by the forgetting factor
+// |iat_factor_|. Then the vector element indicated by |iat_packets| is then
+// increased (additive) by 1 - |iat_factor_|. This way, the probability of
+// |iat_packets| is slightly increased, while the sum of the histogram remains
+// constant (=1).
+// Due to inaccuracies in the fixed-point arithmetic, the histogram may no
+// longer sum up to 1 (in Q30) after the update. To correct this, a correction
+// term is added or subtracted from the first element (or elements) of the
+// vector.
+// The forgetting factor |iat_factor_| is also updated. When the DelayManager
+// is reset, the factor is set to 0 to facilitate rapid convergence in the
+// beginning. With each update of the histogram, the factor is increased towards
+// the steady-state value |kIatFactor_|.
+void DelayManager::UpdateHistogram(size_t iat_packets) {
+  assert(iat_packets < iat_vector_.size());
+  int vector_sum = 0;  // Sum up the vector elements as they are processed.
+  // Multiply each element in |iat_vector_| with |iat_factor_|.
+  for (IATVector::iterator it = iat_vector_.begin();
+      it != iat_vector_.end(); ++it) {
+    *it = (static_cast<int64_t>(*it) * iat_factor_) >> 15;
+    vector_sum += *it;
+  }
+
+  // Increase the probability for the currently observed inter-arrival time
+  // by 1 - |iat_factor_|. The factor is in Q15, |iat_vector_| in Q30.
+  // Thus, left-shift 15 steps to obtain result in Q30.
+  iat_vector_[iat_packets] += (32768 - iat_factor_) << 15;
+  vector_sum += (32768 - iat_factor_) << 15;  // Add to vector sum.
+
+  // |iat_vector_| should sum up to 1 (in Q30), but it may not due to
+  // fixed-point rounding errors.
+  vector_sum -= 1 << 30;  // Should be zero. Compensate if not.
+  if (vector_sum != 0) {
+    // Modify a few values early in |iat_vector_|.
+    int flip_sign = vector_sum > 0 ? -1 : 1;
+    IATVector::iterator it = iat_vector_.begin();
+    while (it != iat_vector_.end() && abs(vector_sum) > 0) {
+      // Add/subtract 1/16 of the element, but not more than |vector_sum|.
+      int correction = flip_sign * std::min(abs(vector_sum), (*it) >> 4);
+      *it += correction;
+      vector_sum += correction;
+      ++it;
+    }
+  }
+  assert(vector_sum == 0);  // Verify that the above is correct.
+
+  // Update |iat_factor_| (changes only during the first seconds after a reset).
+  // The factor converges to |kIatFactor_|.
+  iat_factor_ += (kIatFactor_ - iat_factor_ + 3) >> 2;
+}
+
+// Enforces upper and lower limits for |target_level_|. The upper limit is
+// chosen to be minimum of i) 75% of |max_packets_in_buffer_|, to leave some
+// headroom for natural fluctuations around the target, and ii) equivalent of
+// |maximum_delay_ms_| in packets. Note that in practice, if no
+// |maximum_delay_ms_| is specified, this does not have any impact, since the
+// target level is far below the buffer capacity in all reasonable cases.
+// The lower limit is equivalent of |minimum_delay_ms_| in packets. We update
+// |least_required_level_| while the above limits are applied.
+// TODO(hlundin): Move this check to the buffer logistics class.
+void DelayManager::LimitTargetLevel() {
+  least_required_delay_ms_ = (target_level_ * packet_len_ms_) >> 8;
+
+  if (packet_len_ms_ > 0 && minimum_delay_ms_ > 0) {
+    int minimum_delay_packet_q8 =  (minimum_delay_ms_ << 8) / packet_len_ms_;
+    target_level_ = std::max(target_level_, minimum_delay_packet_q8);
+  }
+
+  if (maximum_delay_ms_ > 0 && packet_len_ms_ > 0) {
+    int maximum_delay_packet_q8 = (maximum_delay_ms_ << 8) / packet_len_ms_;
+    target_level_ = std::min(target_level_, maximum_delay_packet_q8);
+  }
+
+  // Shift to Q8, then 75%.;
+  int max_buffer_packets_q8 =
+      static_cast<int>((3 * (max_packets_in_buffer_ << 8)) / 4);
+  target_level_ = std::min(target_level_, max_buffer_packets_q8);
+
+  // Sanity check, at least 1 packet (in Q8).
+  target_level_ = std::max(target_level_, 1 << 8);
+}
+
+int DelayManager::CalculateTargetLevel(int iat_packets) {
+  int limit_probability = kLimitProbability;
+  if (streaming_mode_) {
+    limit_probability = kLimitProbabilityStreaming;
+  }
+
+  // Calculate target buffer level from inter-arrival time histogram.
+  // Find the |iat_index| for which the probability of observing an
+  // inter-arrival time larger than or equal to |iat_index| is less than or
+  // equal to |limit_probability|. The sought probability is estimated using
+  // the histogram as the reverse cumulant PDF, i.e., the sum of elements from
+  // the end up until |iat_index|. Now, since the sum of all elements is 1
+  // (in Q30) by definition, and since the solution is often a low value for
+  // |iat_index|, it is more efficient to start with |sum| = 1 and subtract
+  // elements from the start of the histogram.
+  size_t index = 0;  // Start from the beginning of |iat_vector_|.
+  int sum = 1 << 30;  // Assign to 1 in Q30.
+  sum -= iat_vector_[index];  // Ensure that target level is >= 1.
+
+  do {
+    // Subtract the probabilities one by one until the sum is no longer greater
+    // than limit_probability.
+    ++index;
+    sum -= iat_vector_[index];
+  } while ((sum > limit_probability) && (index < iat_vector_.size() - 1));
+
+  // This is the base value for the target buffer level.
+  int target_level = static_cast<int>(index);
+  base_target_level_ = static_cast<int>(index);
+
+  // Update detector for delay peaks.
+  bool delay_peak_found = peak_detector_.Update(iat_packets, target_level);
+  if (delay_peak_found) {
+    target_level = std::max(target_level, peak_detector_.MaxPeakHeight());
+  }
+
+  // Sanity check. |target_level| must be strictly positive.
+  target_level = std::max(target_level, 1);
+  // Scale to Q8 and assign to member variable.
+  target_level_ = target_level << 8;
+  return target_level_;
+}
+
+int DelayManager::SetPacketAudioLength(int length_ms) {
+  if (length_ms <= 0) {
+    RTC_LOG_F(LS_ERROR) << "length_ms = " << length_ms;
+    return -1;
+  }
+  if (frame_length_change_experiment_ && packet_len_ms_ != length_ms) {
+    iat_vector_ = ScaleHistogram(iat_vector_, packet_len_ms_, length_ms);
+  }
+
+  packet_len_ms_ = length_ms;
+  peak_detector_.SetPacketAudioLength(packet_len_ms_);
+  packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+  last_pack_cng_or_dtmf_ = 1;  // TODO(hlundin): Legacy. Remove?
+  return 0;
+}
+
+
+void DelayManager::Reset() {
+  packet_len_ms_ = 0;  // Packet size unknown.
+  streaming_mode_ = false;
+  peak_detector_.Reset();
+  ResetHistogram();  // Resets target levels too.
+  iat_factor_ = 0;  // Adapt the histogram faster for the first few packets.
+  packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+  max_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+  iat_cumulative_sum_ = 0;
+  max_iat_cumulative_sum_ = 0;
+  last_pack_cng_or_dtmf_ = 1;
+}
+
+double DelayManager::EstimatedClockDriftPpm() const {
+  double sum = 0.0;
+  // Calculate the expected value based on the probabilities in |iat_vector_|.
+  for (size_t i = 0; i < iat_vector_.size(); ++i) {
+    sum += static_cast<double>(iat_vector_[i]) * i;
+  }
+  // The probabilities in |iat_vector_| are in Q30. Divide by 1 << 30 to convert
+  // to Q0; subtract the nominal inter-arrival time (1) to make a zero
+  // clockdrift represent as 0; mulitply by 1000000 to produce parts-per-million
+  // (ppm).
+  return (sum / (1 << 30) - 1) * 1e6;
+}
+
+bool DelayManager::PeakFound() const {
+  return peak_detector_.peak_found();
+}
+
+void DelayManager::ResetPacketIatCount() {
+  packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+}
+
+// Note that |low_limit| and |higher_limit| are not assigned to
+// |minimum_delay_ms_| and |maximum_delay_ms_| defined by the client of this
+// class. They are computed from |target_level_| and used for decision making.
+void DelayManager::BufferLimits(int* lower_limit, int* higher_limit) const {
+  if (!lower_limit || !higher_limit) {
+    RTC_LOG_F(LS_ERROR) << "NULL pointers supplied as input";
+    assert(false);
+    return;
+  }
+
+  int window_20ms = 0x7FFF;  // Default large value for legacy bit-exactness.
+  if (packet_len_ms_ > 0) {
+    window_20ms = (20 << 8) / packet_len_ms_;
+  }
+
+  // |target_level_| is in Q8 already.
+  *lower_limit = (target_level_ * 3) / 4;
+  // |higher_limit| is equal to |target_level_|, but should at
+  // least be 20 ms higher than |lower_limit_|.
+  *higher_limit = std::max(target_level_, *lower_limit + window_20ms);
+}
+
+int DelayManager::TargetLevel() const {
+  return target_level_;
+}
+
+void DelayManager::LastDecodedWasCngOrDtmf(bool it_was) {
+  if (it_was) {
+    last_pack_cng_or_dtmf_ = 1;
+  } else if (last_pack_cng_or_dtmf_ != 0) {
+    last_pack_cng_or_dtmf_ = -1;
+  }
+}
+
+void DelayManager::RegisterEmptyPacket() {
+  ++last_seq_no_;
+}
+
+DelayManager::IATVector DelayManager::ScaleHistogram(const IATVector& histogram,
+                                                     int old_packet_length,
+                                                     int new_packet_length) {
+  if (old_packet_length == 0) {
+    // If we don't know the previous frame length, don't make any changes to the
+    // histogram.
+    return histogram;
+  }
+  RTC_DCHECK_GT(new_packet_length, 0);
+  RTC_DCHECK_EQ(old_packet_length % 10, 0);
+  RTC_DCHECK_EQ(new_packet_length % 10, 0);
+  IATVector new_histogram(histogram.size(), 0);
+  int64_t acc = 0;
+  int time_counter = 0;
+  size_t new_histogram_idx = 0;
+  for (size_t i = 0; i < histogram.size(); i++) {
+    acc += histogram[i];
+    time_counter += old_packet_length;
+    // The bins should be scaled, to ensure the histogram still sums to one.
+    const int64_t scaled_acc = acc * new_packet_length / time_counter;
+    int64_t actually_used_acc = 0;
+    while (time_counter >= new_packet_length) {
+      const int64_t old_histogram_val = new_histogram[new_histogram_idx];
+      new_histogram[new_histogram_idx] =
+          rtc::saturated_cast<int>(old_histogram_val + scaled_acc);
+      actually_used_acc += new_histogram[new_histogram_idx] - old_histogram_val;
+      new_histogram_idx =
+          std::min(new_histogram_idx + 1, new_histogram.size() - 1);
+      time_counter -= new_packet_length;
+    }
+    // Only subtract the part that was succesfully written to the new histogram.
+    acc -= actually_used_acc;
+  }
+  // If there is anything left in acc (due to rounding errors), add it to the
+  // last bin. If we cannot add everything to the last bin we need to add as
+  // much as possible to the bins after the last bin (this is only possible
+  // when compressing a histogram).
+  while (acc > 0 && new_histogram_idx < new_histogram.size()) {
+    const int64_t old_histogram_val = new_histogram[new_histogram_idx];
+    new_histogram[new_histogram_idx] =
+        rtc::saturated_cast<int>(old_histogram_val + acc);
+    acc -= new_histogram[new_histogram_idx] - old_histogram_val;
+    new_histogram_idx++;
+  }
+  RTC_DCHECK_EQ(histogram.size(), new_histogram.size());
+  if (acc == 0) {
+    // If acc is non-zero, we were not able to add everything to the new
+    // histogram, so this check will not hold.
+    RTC_DCHECK_EQ(accumulate(histogram.begin(), histogram.end(), 0ll),
+                  accumulate(new_histogram.begin(), new_histogram.end(), 0ll));
+  }
+  return new_histogram;
+}
+
+bool DelayManager::SetMinimumDelay(int delay_ms) {
+  // Minimum delay shouldn't be more than maximum delay, if any maximum is set.
+  // Also, if possible check |delay| to less than 75% of
+  // |max_packets_in_buffer_|.
+  if ((maximum_delay_ms_ > 0 && delay_ms > maximum_delay_ms_) ||
+      (packet_len_ms_ > 0 &&
+       delay_ms >
+           static_cast<int>(3 * max_packets_in_buffer_ * packet_len_ms_ / 4))) {
+    return false;
+  }
+  minimum_delay_ms_ = delay_ms;
+  return true;
+}
+
+bool DelayManager::SetMaximumDelay(int delay_ms) {
+  if (delay_ms == 0) {
+    // Zero input unsets the maximum delay.
+    maximum_delay_ms_ = 0;
+    return true;
+  } else if (delay_ms < minimum_delay_ms_ || delay_ms < packet_len_ms_) {
+    // Maximum delay shouldn't be less than minimum delay or less than a packet.
+    return false;
+  }
+  maximum_delay_ms_ = delay_ms;
+  return true;
+}
+
+int DelayManager::least_required_delay_ms() const {
+  return least_required_delay_ms_;
+}
+
+int DelayManager::base_target_level() const { return base_target_level_; }
+void DelayManager::set_streaming_mode(bool value) { streaming_mode_ = value; }
+int DelayManager::last_pack_cng_or_dtmf() const {
+  return last_pack_cng_or_dtmf_;
+}
+
+void DelayManager::set_last_pack_cng_or_dtmf(int value) {
+  last_pack_cng_or_dtmf_ = value;
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/delay_manager.h b/modules/audio_coding/neteq/delay_manager.h
new file mode 100644
index 0000000..0d082c8
--- /dev/null
+++ b/modules/audio_coding/neteq/delay_manager.h
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DELAY_MANAGER_H_
+#define MODULES_AUDIO_CODING_NETEQ_DELAY_MANAGER_H_
+
+#include <string.h>  // Provide access to size_t.
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declaration.
+class DelayPeakDetector;
+
+class DelayManager {
+ public:
+  typedef std::vector<int> IATVector;
+
+  // Create a DelayManager object. Notify the delay manager that the packet
+  // buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
+  // is the number of packet slots in the buffer). Supply a PeakDetector
+  // object to the DelayManager.
+  DelayManager(size_t max_packets_in_buffer,
+               DelayPeakDetector* peak_detector,
+               const TickTimer* tick_timer);
+
+  virtual ~DelayManager();
+
+  // Read the inter-arrival time histogram. Mainly for testing purposes.
+  virtual const IATVector& iat_vector() const;
+
+  // Updates the delay manager with a new incoming packet, with
+  // |sequence_number| and |timestamp| from the RTP header. This updates the
+  // inter-arrival time histogram and other statistics, as well as the
+  // associated DelayPeakDetector. A new target buffer level is calculated.
+  // Returns 0 on success, -1 on failure (invalid sample rate).
+  virtual int Update(uint16_t sequence_number,
+                     uint32_t timestamp,
+                     int sample_rate_hz);
+
+  // Calculates a new target buffer level. Called from the Update() method.
+  // Sets target_level_ (in Q8) and returns the same value. Also calculates
+  // and updates base_target_level_, which is the target buffer level before
+  // taking delay peaks into account.
+  virtual int CalculateTargetLevel(int iat_packets);
+
+  // Notifies the DelayManager of how much audio data is carried in each packet.
+  // The method updates the DelayPeakDetector too, and resets the inter-arrival
+  // time counter. Returns 0 on success, -1 on failure.
+  virtual int SetPacketAudioLength(int length_ms);
+
+  // Resets the DelayManager and the associated DelayPeakDetector.
+  virtual void Reset();
+
+  // Calculates the average inter-arrival time deviation from the histogram.
+  // The result is returned as parts-per-million deviation from the nominal
+  // inter-arrival time. That is, if the average inter-arrival time is equal to
+  // the nominal frame time, the return value is zero. A positive value
+  // corresponds to packet spacing being too large, while a negative value means
+  // that the packets arrive with less spacing than expected.
+  virtual double EstimatedClockDriftPpm() const;
+
+  // Returns true if peak-mode is active. That is, delay peaks were observed
+  // recently. This method simply asks for the same information from the
+  // DelayPeakDetector object.
+  virtual bool PeakFound() const;
+
+  // Reset the inter-arrival time counter to 0.
+  virtual void ResetPacketIatCount();
+
+  // Writes the lower and higher limits which the buffer level should stay
+  // within to the corresponding pointers. The values are in (fractions of)
+  // packets in Q8.
+  virtual void BufferLimits(int* lower_limit, int* higher_limit) const;
+
+  // Gets the target buffer level, in (fractions of) packets in Q8. This value
+  // includes any extra delay set through the set_extra_delay_ms() method.
+  virtual int TargetLevel() const;
+
+  // Informs the delay manager whether or not the last decoded packet contained
+  // speech.
+  virtual void LastDecodedWasCngOrDtmf(bool it_was);
+
+  // Notify the delay manager that empty packets have been received. These are
+  // packets that are part of the sequence number series, so that an empty
+  // packet will shift the sequence numbers for the following packets.
+  virtual void RegisterEmptyPacket();
+
+  // Apply compression or stretching to the IAT histogram, for a change in frame
+  // size. This returns an updated histogram. This function is public for
+  // testability.
+  static IATVector ScaleHistogram(const IATVector& histogram,
+                                  int old_packet_length,
+                                  int new_packet_length);
+
+  // Accessors and mutators.
+  // Assuming |delay| is in valid range.
+  virtual bool SetMinimumDelay(int delay_ms);
+  virtual bool SetMaximumDelay(int delay_ms);
+  virtual int least_required_delay_ms() const;
+  virtual int base_target_level() const;
+  virtual void set_streaming_mode(bool value);
+  virtual int last_pack_cng_or_dtmf() const;
+  virtual void set_last_pack_cng_or_dtmf(int value);
+
+ private:
+  static const int kLimitProbability = 53687091;  // 1/20 in Q30.
+  static const int kLimitProbabilityStreaming = 536871;  // 1/2000 in Q30.
+  static const int kMaxStreamingPeakPeriodMs = 600000;  // 10 minutes in ms.
+  static const int kCumulativeSumDrift = 2;  // Drift term for cumulative sum
+                                             // |iat_cumulative_sum_|.
+  // Steady-state forgetting factor for |iat_vector_|, 0.9993 in Q15.
+  static const int kIatFactor_ = 32745;
+  static const int kMaxIat = 64;  // Max inter-arrival time to register.
+
+  // Sets |iat_vector_| to the default start distribution and sets the
+  // |base_target_level_| and |target_level_| to the corresponding values.
+  void ResetHistogram();
+
+  // Updates |iat_cumulative_sum_| and |max_iat_cumulative_sum_|. (These are
+  // used by the streaming mode.) This method is called by Update().
+  void UpdateCumulativeSums(int packet_len_ms, uint16_t sequence_number);
+
+  // Updates the histogram |iat_vector_|. The probability for inter-arrival time
+  // equal to |iat_packets| (in integer packets) is increased slightly, while
+  // all other entries are decreased. This method is called by Update().
+  void UpdateHistogram(size_t iat_packets);
+
+  // Makes sure that |target_level_| is not too large, taking
+  // |max_packets_in_buffer_| and |extra_delay_ms_| into account. This method is
+  // called by Update().
+  void LimitTargetLevel();
+
+  bool first_packet_received_;
+  const size_t max_packets_in_buffer_;  // Capacity of the packet buffer.
+  IATVector iat_vector_;  // Histogram of inter-arrival times.
+  int iat_factor_;  // Forgetting factor for updating the IAT histogram (Q15).
+  const TickTimer* tick_timer_;
+  // Time elapsed since last packet.
+  std::unique_ptr<TickTimer::Stopwatch> packet_iat_stopwatch_;
+  int base_target_level_;   // Currently preferred buffer level before peak
+                            // detection and streaming mode (Q0).
+  // TODO(turajs) change the comment according to the implementation of
+  // minimum-delay.
+  int target_level_;  // Currently preferred buffer level in (fractions)
+                      // of packets (Q8), before adding any extra delay.
+  int packet_len_ms_;  // Length of audio in each incoming packet [ms].
+  bool streaming_mode_;
+  uint16_t last_seq_no_;  // Sequence number for last received packet.
+  uint32_t last_timestamp_;  // Timestamp for the last received packet.
+  int minimum_delay_ms_;  // Externally set minimum delay.
+  int least_required_delay_ms_;  // Smallest preferred buffer level (same unit
+                              // as |target_level_|), before applying
+                              // |minimum_delay_ms_| and/or |maximum_delay_ms_|.
+  int maximum_delay_ms_;  // Externally set maximum allowed delay.
+  int iat_cumulative_sum_;  // Cumulative sum of delta inter-arrival times.
+  int max_iat_cumulative_sum_;  // Max of |iat_cumulative_sum_|.
+  // Time elapsed since maximum was observed.
+  std::unique_ptr<TickTimer::Stopwatch> max_iat_stopwatch_;
+  DelayPeakDetector& peak_detector_;
+  int last_pack_cng_or_dtmf_;
+  const bool frame_length_change_experiment_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DelayManager);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DELAY_MANAGER_H_
diff --git a/modules/audio_coding/neteq/delay_manager_unittest.cc b/modules/audio_coding/neteq/delay_manager_unittest.cc
new file mode 100644
index 0000000..953bc6b
--- /dev/null
+++ b/modules/audio_coding/neteq/delay_manager_unittest.cc
@@ -0,0 +1,449 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DelayManager class.
+
+#include "modules/audio_coding/neteq/delay_manager.h"
+
+#include <math.h>
+
+#include "modules/audio_coding/neteq/mock/mock_delay_peak_detector.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using ::testing::Return;
+using ::testing::_;
+
+class DelayManagerTest : public ::testing::Test {
+ protected:
+  static const int kMaxNumberOfPackets = 240;
+  static const int kTimeStepMs = 10;
+  static const int kFs = 8000;
+  static const int kFrameSizeMs = 20;
+  static const int kTsIncrement = kFrameSizeMs * kFs / 1000;
+
+  DelayManagerTest();
+  virtual void SetUp();
+  virtual void TearDown();
+  void SetPacketAudioLength(int lengt_ms);
+  void InsertNextPacket();
+  void IncreaseTime(int inc_ms);
+
+  DelayManager* dm_;
+  TickTimer tick_timer_;
+  MockDelayPeakDetector detector_;
+  uint16_t seq_no_;
+  uint32_t ts_;
+};
+
+DelayManagerTest::DelayManagerTest()
+    : dm_(NULL), detector_(&tick_timer_), seq_no_(0x1234), ts_(0x12345678) {}
+
+void DelayManagerTest::SetUp() {
+  EXPECT_CALL(detector_, Reset())
+            .Times(1);
+  dm_ = new DelayManager(kMaxNumberOfPackets, &detector_, &tick_timer_);
+}
+
+void DelayManagerTest::SetPacketAudioLength(int lengt_ms) {
+  EXPECT_CALL(detector_, SetPacketAudioLength(lengt_ms));
+  dm_->SetPacketAudioLength(lengt_ms);
+}
+
+void DelayManagerTest::InsertNextPacket() {
+  EXPECT_EQ(0, dm_->Update(seq_no_, ts_, kFs));
+  seq_no_ += 1;
+  ts_ += kTsIncrement;
+}
+
+void DelayManagerTest::IncreaseTime(int inc_ms) {
+  for (int t = 0; t < inc_ms; t += kTimeStepMs) {
+    tick_timer_.Increment();
+  }
+}
+void DelayManagerTest::TearDown() {
+  EXPECT_CALL(detector_, Die());
+  delete dm_;
+}
+
+TEST_F(DelayManagerTest, CreateAndDestroy) {
+  // Nothing to do here. The test fixture creates and destroys the DelayManager
+  // object.
+}
+
+TEST_F(DelayManagerTest, VectorInitialization) {
+  const DelayManager::IATVector& vec = dm_->iat_vector();
+  double sum = 0.0;
+  for (size_t i = 0; i < vec.size(); i++) {
+    EXPECT_NEAR(ldexp(pow(0.5, static_cast<int>(i + 1)), 30), vec[i], 65537);
+    // Tolerance 65537 in Q30 corresponds to a delta of approximately 0.00006.
+    sum += vec[i];
+  }
+  EXPECT_EQ(1 << 30, static_cast<int>(sum));  // Should be 1 in Q30.
+}
+
+TEST_F(DelayManagerTest, SetPacketAudioLength) {
+  const int kLengthMs = 30;
+  // Expect DelayManager to pass on the new length to the detector object.
+  EXPECT_CALL(detector_, SetPacketAudioLength(kLengthMs))
+      .Times(1);
+  EXPECT_EQ(0, dm_->SetPacketAudioLength(kLengthMs));
+  EXPECT_EQ(-1, dm_->SetPacketAudioLength(-1));  // Illegal parameter value.
+}
+
+TEST_F(DelayManagerTest, PeakFound) {
+  // Expect DelayManager to pass on the question to the detector.
+  // Call twice, and let the detector return true the first time and false the
+  // second time.
+  EXPECT_CALL(detector_, peak_found())
+      .WillOnce(Return(true))
+      .WillOnce(Return(false));
+  EXPECT_TRUE(dm_->PeakFound());
+  EXPECT_FALSE(dm_->PeakFound());
+}
+
+TEST_F(DelayManagerTest, UpdateNormal) {
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+  // Advance time by one frame size.
+  IncreaseTime(kFrameSizeMs);
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to 1 packet, and (base) target level equal to 1 as well.
+  // Return false to indicate no peaks found.
+  EXPECT_CALL(detector_, Update(1, 1))
+      .WillOnce(Return(false));
+  InsertNextPacket();
+  EXPECT_EQ(1 << 8, dm_->TargetLevel());  // In Q8.
+  EXPECT_EQ(1, dm_->base_target_level());
+  int lower, higher;
+  dm_->BufferLimits(&lower, &higher);
+  // Expect |lower| to be 75% of target level, and |higher| to be target level,
+  // but also at least 20 ms higher than |lower|, which is the limiting case
+  // here.
+  EXPECT_EQ((1 << 8) * 3 / 4, lower);
+  EXPECT_EQ(lower + (20 << 8) / kFrameSizeMs, higher);
+}
+
+TEST_F(DelayManagerTest, UpdateLongInterArrivalTime) {
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+  // Advance time by two frame size.
+  IncreaseTime(2 * kFrameSizeMs);
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to 1 packet, and (base) target level equal to 1 as well.
+  // Return false to indicate no peaks found.
+  EXPECT_CALL(detector_, Update(2, 2))
+      .WillOnce(Return(false));
+  InsertNextPacket();
+  EXPECT_EQ(2 << 8, dm_->TargetLevel());  // In Q8.
+  EXPECT_EQ(2, dm_->base_target_level());
+  int lower, higher;
+  dm_->BufferLimits(&lower, &higher);
+  // Expect |lower| to be 75% of target level, and |higher| to be target level,
+  // but also at least 20 ms higher than |lower|, which is the limiting case
+  // here.
+  EXPECT_EQ((2 << 8) * 3 / 4, lower);
+  EXPECT_EQ(lower + (20 << 8) / kFrameSizeMs, higher);
+}
+
+TEST_F(DelayManagerTest, UpdatePeakFound) {
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+  // Advance time by one frame size.
+  IncreaseTime(kFrameSizeMs);
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to 1 packet, and (base) target level equal to 1 as well.
+  // Return true to indicate that peaks are found. Let the peak height be 5.
+  EXPECT_CALL(detector_, Update(1, 1))
+      .WillOnce(Return(true));
+  EXPECT_CALL(detector_, MaxPeakHeight())
+      .WillOnce(Return(5));
+  InsertNextPacket();
+  EXPECT_EQ(5 << 8, dm_->TargetLevel());
+  EXPECT_EQ(1, dm_->base_target_level());  // Base target level is w/o peaks.
+  int lower, higher;
+  dm_->BufferLimits(&lower, &higher);
+  // Expect |lower| to be 75% of target level, and |higher| to be target level.
+  EXPECT_EQ((5 << 8) * 3 / 4, lower);
+  EXPECT_EQ(5 << 8, higher);
+}
+
+TEST_F(DelayManagerTest, TargetDelay) {
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+  // Advance time by one frame size.
+  IncreaseTime(kFrameSizeMs);
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to 1 packet, and (base) target level equal to 1 as well.
+  // Return false to indicate no peaks found.
+  EXPECT_CALL(detector_, Update(1, 1))
+      .WillOnce(Return(false));
+  InsertNextPacket();
+  const int kExpectedTarget = 1;
+  EXPECT_EQ(kExpectedTarget << 8, dm_->TargetLevel());  // In Q8.
+  EXPECT_EQ(1, dm_->base_target_level());
+  int lower, higher;
+  dm_->BufferLimits(&lower, &higher);
+  // Expect |lower| to be 75% of base target level, and |higher| to be
+  // lower + 20 ms headroom.
+  EXPECT_EQ((1 << 8) * 3 / 4, lower);
+  EXPECT_EQ(lower + (20 << 8) / kFrameSizeMs, higher);
+}
+
+TEST_F(DelayManagerTest, MaxAndRequiredDelay) {
+  const int kExpectedTarget = 5;
+  const int kTimeIncrement = kExpectedTarget * kFrameSizeMs;
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to |kExpectedTarget| packet. Return true to indicate peaks found.
+  EXPECT_CALL(detector_, Update(kExpectedTarget, _))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(detector_, MaxPeakHeight())
+      .WillRepeatedly(Return(kExpectedTarget));
+  IncreaseTime(kTimeIncrement);
+  InsertNextPacket();
+
+  // No limit is set.
+  EXPECT_EQ(kExpectedTarget << 8, dm_->TargetLevel());
+
+  int kMaxDelayPackets = kExpectedTarget - 2;
+  int kMaxDelayMs = kMaxDelayPackets * kFrameSizeMs;
+  EXPECT_TRUE(dm_->SetMaximumDelay(kMaxDelayMs));
+  IncreaseTime(kTimeIncrement);
+  InsertNextPacket();
+  EXPECT_EQ(kExpectedTarget * kFrameSizeMs, dm_->least_required_delay_ms());
+  EXPECT_EQ(kMaxDelayPackets << 8, dm_->TargetLevel());
+
+  // Target level at least should be one packet.
+  EXPECT_FALSE(dm_->SetMaximumDelay(kFrameSizeMs - 1));
+}
+
+TEST_F(DelayManagerTest, MinAndRequiredDelay) {
+  const int kExpectedTarget = 5;
+  const int kTimeIncrement = kExpectedTarget * kFrameSizeMs;
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to |kExpectedTarget| packet. Return true to indicate peaks found.
+  EXPECT_CALL(detector_, Update(kExpectedTarget, _))
+      .WillRepeatedly(Return(true));
+  EXPECT_CALL(detector_, MaxPeakHeight())
+      .WillRepeatedly(Return(kExpectedTarget));
+  IncreaseTime(kTimeIncrement);
+  InsertNextPacket();
+
+  // No limit is applied.
+  EXPECT_EQ(kExpectedTarget << 8, dm_->TargetLevel());
+
+  int kMinDelayPackets = kExpectedTarget + 2;
+  int kMinDelayMs = kMinDelayPackets * kFrameSizeMs;
+  dm_->SetMinimumDelay(kMinDelayMs);
+  IncreaseTime(kTimeIncrement);
+  InsertNextPacket();
+  EXPECT_EQ(kExpectedTarget * kFrameSizeMs, dm_->least_required_delay_ms());
+  EXPECT_EQ(kMinDelayPackets << 8, dm_->TargetLevel());
+}
+
+// Tests that skipped sequence numbers (simulating empty packets) are handled
+// correctly.
+TEST_F(DelayManagerTest, EmptyPacketsReported) {
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+
+  // Advance time by one frame size.
+  IncreaseTime(kFrameSizeMs);
+
+  // Advance the sequence number by 5, simulating that 5 empty packets were
+  // received, but never inserted.
+  seq_no_ += 10;
+  for (int j = 0; j < 10; ++j) {
+    dm_->RegisterEmptyPacket();
+  }
+
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to 1 packet, and (base) target level equal to 1 as well.
+  // Return false to indicate no peaks found.
+  EXPECT_CALL(detector_, Update(1, 1)).WillOnce(Return(false));
+  InsertNextPacket();
+
+  EXPECT_EQ(1 << 8, dm_->TargetLevel());  // In Q8.
+}
+
+// Same as above, but do not call RegisterEmptyPacket. Observe the target level
+// increase dramatically.
+TEST_F(DelayManagerTest, EmptyPacketsNotReported) {
+  SetPacketAudioLength(kFrameSizeMs);
+  // First packet arrival.
+  InsertNextPacket();
+
+  // Advance time by one frame size.
+  IncreaseTime(kFrameSizeMs);
+
+  // Advance the sequence number by 5, simulating that 5 empty packets were
+  // received, but never inserted.
+  seq_no_ += 10;
+
+  // Second packet arrival.
+  // Expect detector update method to be called once with inter-arrival time
+  // equal to 1 packet, and (base) target level equal to 1 as well.
+  // Return false to indicate no peaks found.
+  EXPECT_CALL(detector_, Update(10, 10)).WillOnce(Return(false));
+  InsertNextPacket();
+
+  // Note 10 times higher target value.
+  EXPECT_EQ(10 * 1 << 8, dm_->TargetLevel());  // In Q8.
+}
+
+TEST_F(DelayManagerTest, Failures) {
+  // Wrong sample rate.
+  EXPECT_EQ(-1, dm_->Update(0, 0, -1));
+  // Wrong packet size.
+  EXPECT_EQ(-1, dm_->SetPacketAudioLength(0));
+  EXPECT_EQ(-1, dm_->SetPacketAudioLength(-1));
+
+  // Minimum delay higher than a maximum delay is not accepted.
+  EXPECT_TRUE(dm_->SetMaximumDelay(10));
+  EXPECT_FALSE(dm_->SetMinimumDelay(20));
+
+  // Maximum delay less than minimum delay is not accepted.
+  EXPECT_TRUE(dm_->SetMaximumDelay(100));
+  EXPECT_TRUE(dm_->SetMinimumDelay(80));
+  EXPECT_FALSE(dm_->SetMaximumDelay(60));
+}
+
+// Test if the histogram is stretched correctly if the packet size is decreased.
+TEST(DelayManagerIATScalingTest, StretchTest) {
+  using IATVector = DelayManager::IATVector;
+  // Test a straightforward 60ms to 20ms change.
+  IATVector iat = {12, 0, 0, 0, 0, 0};
+  IATVector expected_result = {4, 4, 4, 0, 0, 0};
+  IATVector stretched_iat = DelayManager::ScaleHistogram(iat, 60, 20);
+  EXPECT_EQ(stretched_iat, expected_result);
+
+  // Test an example where the last bin in the stretched histogram should
+  // contain the sum of the elements that don't fit into the new histogram.
+  iat = {18, 15, 12, 9, 6, 3, 0};
+  expected_result = {6, 6, 6, 5, 5, 5, 30};
+  stretched_iat = DelayManager::ScaleHistogram(iat, 60, 20);
+  EXPECT_EQ(stretched_iat, expected_result);
+
+  // Test a 120ms to 60ms change.
+  iat = {18, 16, 14, 4, 0};
+  expected_result = {9, 9, 8, 8, 18};
+  stretched_iat = DelayManager::ScaleHistogram(iat, 120, 60);
+  EXPECT_EQ(stretched_iat, expected_result);
+
+  // Test a 120ms to 20ms change.
+  iat = {19, 12, 0, 0, 0, 0, 0, 0};
+  expected_result = {3, 3, 3, 3, 3, 3, 2, 11};
+  stretched_iat = DelayManager::ScaleHistogram(iat, 120, 20);
+  EXPECT_EQ(stretched_iat, expected_result);
+
+  // Test a 70ms to 40ms change.
+  iat = {13, 7, 5, 3, 1, 5, 12, 11, 3, 0, 0, 0};
+  expected_result = {7, 5, 5, 3, 3, 2, 2, 1, 2, 2, 6, 22};
+  stretched_iat = DelayManager::ScaleHistogram(iat, 70, 40);
+  EXPECT_EQ(stretched_iat, expected_result);
+
+  // Test a 30ms to 20ms change.
+  iat = {13, 7, 5, 3, 1, 5, 12, 11, 3, 0, 0, 0};
+  expected_result = {8, 6, 6, 3, 2, 2, 1, 3, 3, 8, 7, 11};
+  stretched_iat = DelayManager::ScaleHistogram(iat, 30, 20);
+  EXPECT_EQ(stretched_iat, expected_result);
+}
+
+// Test if the histogram is compressed correctly if the packet size is
+// increased.
+TEST(DelayManagerIATScalingTest, CompressionTest) {
+  using IATVector = DelayManager::IATVector;
+  // Test a 20 to 60 ms change.
+  IATVector iat = {12, 11, 10, 3, 2, 1};
+  IATVector expected_result = {33, 6, 0, 0, 0, 0};
+  IATVector compressed_iat = DelayManager::ScaleHistogram(iat, 20, 60);
+  EXPECT_EQ(compressed_iat, expected_result);
+
+  // Test a 60ms to 120ms change.
+  iat = {18, 16, 14, 4, 1};
+  expected_result = {34, 18, 1, 0, 0};
+  compressed_iat = DelayManager::ScaleHistogram(iat, 60, 120);
+  EXPECT_EQ(compressed_iat, expected_result);
+
+  // Test a 20ms to 120ms change.
+  iat = {18, 12, 5, 4, 4, 3, 5, 1};
+  expected_result = {46, 6, 0, 0, 0, 0, 0, 0};
+  compressed_iat = DelayManager::ScaleHistogram(iat, 20, 120);
+  EXPECT_EQ(compressed_iat, expected_result);
+
+  // Test a 70ms to 80ms change.
+  iat = {13, 7, 5, 3, 1, 5, 12, 11, 3};
+  expected_result = {11, 8, 6, 2, 5, 12, 13, 3, 0};
+  compressed_iat = DelayManager::ScaleHistogram(iat, 70, 80);
+  EXPECT_EQ(compressed_iat, expected_result);
+
+  // Test a 50ms to 110ms change.
+  iat = {13, 7, 5, 3, 1, 5, 12, 11, 3};
+  expected_result = {18, 8, 16, 16, 2, 0, 0, 0, 0};
+  compressed_iat = DelayManager::ScaleHistogram(iat, 50, 110);
+  EXPECT_EQ(compressed_iat, expected_result);
+}
+
+// Test if the histogram scaling function handles overflows correctly.
+TEST(DelayManagerIATScalingTest, OverflowTest) {
+  using IATVector = DelayManager::IATVector;
+  // Test a compression operation that can cause overflow.
+  IATVector iat = {733544448, 0, 0, 0, 0, 0, 0, 340197376, 0, 0, 0, 0, 0, 0};
+  IATVector expected_result = {733544448, 340197376, 0, 0, 0, 0, 0,
+                               0,         0,         0, 0, 0, 0, 0};
+  IATVector scaled_iat = DelayManager::ScaleHistogram(iat, 10, 60);
+  EXPECT_EQ(scaled_iat, expected_result);
+
+  iat = {655591163, 39962288, 360736736, 1930514, 4003853, 1782764,
+         114119,    2072996,  0,         2149354, 0};
+  expected_result = {1056290187, 7717131, 2187115, 2149354, 0, 0,
+                     0,          0,       0,       0,       0};
+  scaled_iat = DelayManager::ScaleHistogram(iat, 20, 60);
+  EXPECT_EQ(scaled_iat, expected_result);
+
+  // In this test case we will not be able to add everything to the final bin in
+  // the scaled histogram. Check that the last bin doesn't overflow.
+  iat = {2000000000, 2000000000, 2000000000,
+         2000000000, 2000000000, 2000000000};
+  expected_result = {666666666, 666666666, 666666666,
+                     666666667, 666666667, 2147483647};
+  scaled_iat = DelayManager::ScaleHistogram(iat, 60, 20);
+  EXPECT_EQ(scaled_iat, expected_result);
+
+  // In this test case we will not be able to add enough to each of the bins,
+  // so the values should be smeared out past the end of the normal range.
+  iat = {2000000000, 2000000000, 2000000000,
+         2000000000, 2000000000, 2000000000};
+  expected_result = {2147483647, 2147483647, 2147483647,
+                     2147483647, 2147483647, 1262581765};
+  scaled_iat = DelayManager::ScaleHistogram(iat, 20, 60);
+  EXPECT_EQ(scaled_iat, expected_result);
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/delay_peak_detector.cc b/modules/audio_coding/neteq/delay_peak_detector.cc
new file mode 100644
index 0000000..eb9f6d5
--- /dev/null
+++ b/modules/audio_coding/neteq/delay_peak_detector.cc
@@ -0,0 +1,128 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/delay_peak_detector.h"
+
+#include <algorithm>  // max
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+// The DelayPeakDetector keeps track of severe inter-arrival times, called
+// delay peaks. When a peak is observed, the "height" (the time elapsed since
+// the previous packet arrival) and the peak "period" (the time since the last
+// observed peak) is recorded in a vector. When enough peaks have been observed,
+// peak-mode is engaged and the DelayManager asks the DelayPeakDetector for
+// the worst peak height.
+
+DelayPeakDetector::~DelayPeakDetector() = default;
+
+DelayPeakDetector::DelayPeakDetector(const TickTimer* tick_timer)
+    : peak_found_(false),
+      peak_detection_threshold_(0),
+      tick_timer_(tick_timer),
+      frame_length_change_experiment_(
+          field_trial::IsEnabled("WebRTC-Audio-NetEqFramelengthExperiment")) {
+  RTC_DCHECK(!peak_period_stopwatch_);
+}
+
+void DelayPeakDetector::Reset() {
+  peak_period_stopwatch_.reset();
+  peak_found_ = false;
+  peak_history_.clear();
+}
+
+// Calculates the threshold in number of packets.
+void DelayPeakDetector::SetPacketAudioLength(int length_ms) {
+  if (length_ms > 0) {
+    if (frame_length_change_experiment_) {
+      peak_detection_threshold_ = std::max(2, kPeakHeightMs / length_ms);
+    } else {
+      peak_detection_threshold_ = kPeakHeightMs / length_ms;
+    }
+  }
+  if (frame_length_change_experiment_) {
+    peak_history_.clear();
+  }
+}
+
+bool DelayPeakDetector::peak_found() {
+  return peak_found_;
+}
+
+int DelayPeakDetector::MaxPeakHeight() const {
+  int max_height = -1;  // Returns -1 for an empty history.
+  std::list<Peak>::const_iterator it;
+  for (it = peak_history_.begin(); it != peak_history_.end(); ++it) {
+    max_height = std::max(max_height, it->peak_height_packets);
+  }
+  return max_height;
+}
+
+uint64_t DelayPeakDetector::MaxPeakPeriod() const {
+  auto max_period_element = std::max_element(
+      peak_history_.begin(), peak_history_.end(),
+      [](Peak a, Peak b) { return a.period_ms < b.period_ms; });
+  if (max_period_element == peak_history_.end()) {
+    return 0;  // |peak_history_| is empty.
+  }
+  RTC_DCHECK_GT(max_period_element->period_ms, 0);
+  return max_period_element->period_ms;
+}
+
+bool DelayPeakDetector::Update(int inter_arrival_time, int target_level) {
+  if (inter_arrival_time > target_level + peak_detection_threshold_ ||
+      inter_arrival_time > 2 * target_level) {
+    // A delay peak is observed.
+    if (!peak_period_stopwatch_) {
+      // This is the first peak. Reset the period counter.
+      peak_period_stopwatch_ = tick_timer_->GetNewStopwatch();
+    } else if (peak_period_stopwatch_->ElapsedMs() > 0) {
+      if (peak_period_stopwatch_->ElapsedMs() <= kMaxPeakPeriodMs) {
+        // This is not the first peak, and the period is valid.
+        // Store peak data in the vector.
+        Peak peak_data;
+        peak_data.period_ms = peak_period_stopwatch_->ElapsedMs();
+        peak_data.peak_height_packets = inter_arrival_time;
+        peak_history_.push_back(peak_data);
+        while (peak_history_.size() > kMaxNumPeaks) {
+          // Delete the oldest data point.
+          peak_history_.pop_front();
+        }
+        peak_period_stopwatch_ = tick_timer_->GetNewStopwatch();
+      } else if (peak_period_stopwatch_->ElapsedMs() <= 2 * kMaxPeakPeriodMs) {
+        // Invalid peak due to too long period. Reset period counter and start
+        // looking for next peak.
+        peak_period_stopwatch_ = tick_timer_->GetNewStopwatch();
+      } else {
+        // More than 2 times the maximum period has elapsed since the last peak
+        // was registered. It seams that the network conditions have changed.
+        // Reset the peak statistics.
+        Reset();
+      }
+    }
+  }
+  return CheckPeakConditions();
+}
+
+bool DelayPeakDetector::CheckPeakConditions() {
+  size_t s = peak_history_.size();
+  if (s >= kMinPeaksToTrigger &&
+      peak_period_stopwatch_->ElapsedMs() <= 2 * MaxPeakPeriod()) {
+    peak_found_ = true;
+  } else {
+    peak_found_ = false;
+  }
+  return peak_found_;
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/delay_peak_detector.h b/modules/audio_coding/neteq/delay_peak_detector.h
new file mode 100644
index 0000000..9defca5
--- /dev/null
+++ b/modules/audio_coding/neteq/delay_peak_detector.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DELAY_PEAK_DETECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_DELAY_PEAK_DETECTOR_H_
+
+#include <string.h>  // size_t
+
+#include <list>
+#include <memory>
+
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class DelayPeakDetector {
+ public:
+  DelayPeakDetector(const TickTimer* tick_timer);
+  virtual ~DelayPeakDetector();
+  virtual void Reset();
+
+  // Notifies the DelayPeakDetector of how much audio data is carried in each
+  // packet.
+  virtual void SetPacketAudioLength(int length_ms);
+
+  // Returns true if peak-mode is active. That is, delay peaks were observed
+  // recently.
+  virtual bool peak_found();
+
+  // Calculates and returns the maximum delay peak height. Returns -1 if no
+  // delay peaks have been observed recently. The unit is number of packets.
+  virtual int MaxPeakHeight() const;
+
+  // Calculates and returns the maximum delay peak distance in ms (strictly
+  // larger than 0), or 0 if no delay peaks have been observed recently.
+  virtual uint64_t MaxPeakPeriod() const;
+
+  // Updates the DelayPeakDetector with a new inter-arrival time (in packets)
+  // and the current target buffer level (needed to decide if a peak is observed
+  // or not). Returns true if peak-mode is active, false if not.
+  virtual bool Update(int inter_arrival_time, int target_level);
+
+ private:
+  static const size_t kMaxNumPeaks = 8;
+  static const size_t kMinPeaksToTrigger = 2;
+  static const int kPeakHeightMs = 78;
+  static const int kMaxPeakPeriodMs = 10000;
+
+  typedef struct {
+    uint64_t period_ms;
+    int peak_height_packets;
+  } Peak;
+
+  bool CheckPeakConditions();
+
+  std::list<Peak> peak_history_;
+  bool peak_found_;
+  int peak_detection_threshold_;
+  const TickTimer* tick_timer_;
+  std::unique_ptr<TickTimer::Stopwatch> peak_period_stopwatch_;
+  const bool frame_length_change_experiment_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DelayPeakDetector);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DELAY_PEAK_DETECTOR_H_
diff --git a/modules/audio_coding/neteq/delay_peak_detector_unittest.cc b/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
new file mode 100644
index 0000000..058ba66
--- /dev/null
+++ b/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DelayPeakDetector class.
+
+#include "modules/audio_coding/neteq/delay_peak_detector.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(DelayPeakDetector, CreateAndDestroy) {
+  TickTimer tick_timer;
+  DelayPeakDetector* detector = new DelayPeakDetector(&tick_timer);
+  EXPECT_FALSE(detector->peak_found());
+  delete detector;
+}
+
+TEST(DelayPeakDetector, EmptyHistory) {
+  TickTimer tick_timer;
+  DelayPeakDetector detector(&tick_timer);
+  EXPECT_EQ(-1, detector.MaxPeakHeight());
+  EXPECT_EQ(0u, detector.MaxPeakPeriod());
+}
+
+// Inject a series of packet arrivals into the detector. Three of the packets
+// have suffered delays. After the third delay peak, peak-mode is expected to
+// start. This should then continue until it is disengaged due to lack of peaks.
+TEST(DelayPeakDetector, TriggerPeakMode) {
+  TickTimer tick_timer;
+  DelayPeakDetector detector(&tick_timer);
+  const int kPacketSizeMs = 30;
+  detector.SetPacketAudioLength(kPacketSizeMs);
+
+  // Load up normal arrival times; 0 ms, 30 ms, 60 ms, 90 ms, ...
+  const int kNumPackets = 1000;
+  int arrival_times_ms[kNumPackets];
+  for (int i = 0; i < kNumPackets; ++i) {
+    arrival_times_ms[i] = i * kPacketSizeMs;
+  }
+
+  // Delay three packets.
+  const int kPeakDelayMs = 100;
+  // First delay peak.
+  arrival_times_ms[100] += kPeakDelayMs;
+  // Second delay peak.
+  arrival_times_ms[200] += kPeakDelayMs;
+  // Third delay peak. Trigger peak-mode after this packet.
+  arrival_times_ms[400] += kPeakDelayMs;
+  // The second peak period is the longest, 200 packets.
+  const uint64_t kWorstPeakPeriod = 200 * kPacketSizeMs;
+  int peak_mode_start_ms = arrival_times_ms[400];
+  // Expect to disengage after no peaks are observed for two period times.
+  int peak_mode_end_ms = peak_mode_start_ms + 2 * kWorstPeakPeriod;
+
+  // Load into detector.
+  int time = 0;
+  int next = 1;  // Start with the second packet to get a proper IAT.
+  while (next < kNumPackets) {
+    while (next < kNumPackets && arrival_times_ms[next] <= time) {
+      int iat_packets = (arrival_times_ms[next] - arrival_times_ms[next - 1]) /
+          kPacketSizeMs;
+      const int kTargetBufferLevel = 1;  // Define peaks to be iat > 2.
+      if (time < peak_mode_start_ms || time > peak_mode_end_ms) {
+        EXPECT_FALSE(detector.Update(iat_packets, kTargetBufferLevel));
+      } else {
+        EXPECT_TRUE(detector.Update(iat_packets, kTargetBufferLevel));
+        EXPECT_EQ(kWorstPeakPeriod, detector.MaxPeakPeriod());
+        EXPECT_EQ(kPeakDelayMs / kPacketSizeMs + 1, detector.MaxPeakHeight());
+      }
+      ++next;
+    }
+    tick_timer.Increment();
+    time += 10;  // Increase time 10 ms.
+  }
+}
+
+// Same test as TriggerPeakMode, but with base target buffer level increased to
+// 2, in order to raise the bar for delay peaks to inter-arrival times > 4.
+// The delay pattern has peaks with delay = 3, thus should not trigger.
+TEST(DelayPeakDetector, DoNotTriggerPeakMode) {
+  TickTimer tick_timer;
+  DelayPeakDetector detector(&tick_timer);
+  const int kPacketSizeMs = 30;
+  detector.SetPacketAudioLength(kPacketSizeMs);
+
+  // Load up normal arrival times; 0 ms, 30 ms, 60 ms, 90 ms, ...
+  const int kNumPackets = 1000;
+  int arrival_times_ms[kNumPackets];
+  for (int i = 0; i < kNumPackets; ++i) {
+    arrival_times_ms[i] = i * kPacketSizeMs;
+  }
+
+  // Delay three packets.
+  const int kPeakDelayMs = 100;
+  // First delay peak.
+  arrival_times_ms[100] += kPeakDelayMs;
+  // Second delay peak.
+  arrival_times_ms[200] += kPeakDelayMs;
+  // Third delay peak.
+  arrival_times_ms[400] += kPeakDelayMs;
+
+  // Load into detector.
+  int time = 0;
+  int next = 1;  // Start with the second packet to get a proper IAT.
+  while (next < kNumPackets) {
+    while (next < kNumPackets && arrival_times_ms[next] <= time) {
+      int iat_packets = (arrival_times_ms[next] - arrival_times_ms[next - 1]) /
+          kPacketSizeMs;
+      const int kTargetBufferLevel = 2;  // Define peaks to be iat > 4.
+      EXPECT_FALSE(detector.Update(iat_packets, kTargetBufferLevel));
+      ++next;
+    }
+    tick_timer.Increment();
+    time += 10;  // Increase time 10 ms.
+  }
+}
+
+// In situations with reordered packets, the DelayPeakDetector may be updated
+// back-to-back (i.e., without the tick_timer moving) but still with non-zero
+// inter-arrival time. This test is to make sure that this does not cause
+// problems.
+TEST(DelayPeakDetector, ZeroDistancePeaks) {
+  TickTimer tick_timer;
+  DelayPeakDetector detector(&tick_timer);
+  const int kPacketSizeMs = 30;
+  detector.SetPacketAudioLength(kPacketSizeMs);
+
+  const int kTargetBufferLevel = 2;  // Define peaks to be iat > 4.
+  const int kInterArrivalTime = 3 * kTargetBufferLevel;  // Will trigger a peak.
+  EXPECT_FALSE(detector.Update(kInterArrivalTime, kTargetBufferLevel));
+  EXPECT_FALSE(detector.Update(kInterArrivalTime, kTargetBufferLevel));
+  EXPECT_FALSE(detector.Update(kInterArrivalTime, kTargetBufferLevel));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/dsp_helper.cc b/modules/audio_coding/neteq/dsp_helper.cc
new file mode 100644
index 0000000..2a1d81b
--- /dev/null
+++ b/modules/audio_coding/neteq/dsp_helper.cc
@@ -0,0 +1,368 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dsp_helper.h"
+
+#include <assert.h>
+#include <string.h>  // Access to memset.
+
+#include <algorithm>  // Access to min, max.
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+// Table of constants used in method DspHelper::ParabolicFit().
+const int16_t DspHelper::kParabolaCoefficients[17][3] = {
+    { 120, 32, 64 },
+    { 140, 44, 75 },
+    { 150, 50, 80 },
+    { 160, 57, 85 },
+    { 180, 72, 96 },
+    { 200, 89, 107 },
+    { 210, 98, 112 },
+    { 220, 108, 117 },
+    { 240, 128, 128 },
+    { 260, 150, 139 },
+    { 270, 162, 144 },
+    { 280, 174, 149 },
+    { 300, 200, 160 },
+    { 320, 228, 171 },
+    { 330, 242, 176 },
+    { 340, 257, 181 },
+    { 360, 288, 192 } };
+
+// Filter coefficients used when downsampling from the indicated sample rates
+// (8, 16, 32, 48 kHz) to 4 kHz. Coefficients are in Q12. The corresponding Q0
+// values are provided in the comments before each array.
+
+// Q0 values: {0.3, 0.4, 0.3}.
+const int16_t DspHelper::kDownsample8kHzTbl[3] = { 1229, 1638, 1229 };
+
+// Q0 values: {0.15, 0.2, 0.3, 0.2, 0.15}.
+const int16_t DspHelper::kDownsample16kHzTbl[5] = { 614, 819, 1229, 819, 614 };
+
+// Q0 values: {0.1425, 0.1251, 0.1525, 0.1628, 0.1525, 0.1251, 0.1425}.
+const int16_t DspHelper::kDownsample32kHzTbl[7] = {
+    584, 512, 625, 667, 625, 512, 584 };
+
+// Q0 values: {0.2487, 0.0952, 0.1042, 0.1074, 0.1042, 0.0952, 0.2487}.
+const int16_t DspHelper::kDownsample48kHzTbl[7] = {
+    1019, 390, 427, 440, 427, 390, 1019 };
+
+int DspHelper::RampSignal(const int16_t* input,
+                          size_t length,
+                          int factor,
+                          int increment,
+                          int16_t* output) {
+  int factor_q20 = (factor << 6) + 32;
+  // TODO(hlundin): Add 32 to factor_q20 when converting back to Q14?
+  for (size_t i = 0; i < length; ++i) {
+    output[i] = (factor * input[i] + 8192) >> 14;
+    factor_q20 += increment;
+    factor_q20 = std::max(factor_q20, 0);  // Never go negative.
+    factor = std::min(factor_q20 >> 6, 16384);
+  }
+  return factor;
+}
+
+int DspHelper::RampSignal(int16_t* signal,
+                          size_t length,
+                          int factor,
+                          int increment) {
+  return RampSignal(signal, length, factor, increment, signal);
+}
+
+int DspHelper::RampSignal(AudioVector* signal,
+                          size_t start_index,
+                          size_t length,
+                          int factor,
+                          int increment) {
+  int factor_q20 = (factor << 6) + 32;
+  // TODO(hlundin): Add 32 to factor_q20 when converting back to Q14?
+  for (size_t i = start_index; i < start_index + length; ++i) {
+    (*signal)[i] = (factor * (*signal)[i] + 8192) >> 14;
+    factor_q20 += increment;
+    factor_q20 = std::max(factor_q20, 0);  // Never go negative.
+    factor = std::min(factor_q20 >> 6, 16384);
+  }
+  return factor;
+}
+
+int DspHelper::RampSignal(AudioMultiVector* signal,
+                          size_t start_index,
+                          size_t length,
+                          int factor,
+                          int increment) {
+  assert(start_index + length <= signal->Size());
+  if (start_index + length > signal->Size()) {
+    // Wrong parameters. Do nothing and return the scale factor unaltered.
+    return factor;
+  }
+  int end_factor = 0;
+  // Loop over the channels, starting at the same |factor| each time.
+  for (size_t channel = 0; channel < signal->Channels(); ++channel) {
+    end_factor =
+        RampSignal(&(*signal)[channel], start_index, length, factor, increment);
+  }
+  return end_factor;
+}
+
+void DspHelper::PeakDetection(int16_t* data, size_t data_length,
+                              size_t num_peaks, int fs_mult,
+                              size_t* peak_index, int16_t* peak_value) {
+  size_t min_index = 0;
+  size_t max_index = 0;
+
+  for (size_t i = 0; i <= num_peaks - 1; i++) {
+    if (num_peaks == 1) {
+      // Single peak.  The parabola fit assumes that an extra point is
+      // available; worst case it gets a zero on the high end of the signal.
+      // TODO(hlundin): This can potentially get much worse. It breaks the
+      // API contract, that the length of |data| is |data_length|.
+      data_length++;
+    }
+
+    peak_index[i] = WebRtcSpl_MaxIndexW16(data, data_length - 1);
+
+    if (i != num_peaks - 1) {
+      min_index = (peak_index[i] > 2) ? (peak_index[i] - 2) : 0;
+      max_index = std::min(data_length - 1, peak_index[i] + 2);
+    }
+
+    if ((peak_index[i] != 0) && (peak_index[i] != (data_length - 2))) {
+      ParabolicFit(&data[peak_index[i] - 1], fs_mult, &peak_index[i],
+                   &peak_value[i]);
+    } else {
+      if (peak_index[i] == data_length - 2) {
+        if (data[peak_index[i]] > data[peak_index[i] + 1]) {
+          ParabolicFit(&data[peak_index[i] - 1], fs_mult, &peak_index[i],
+                       &peak_value[i]);
+        } else if (data[peak_index[i]] <= data[peak_index[i] + 1]) {
+          // Linear approximation.
+          peak_value[i] = (data[peak_index[i]] + data[peak_index[i] + 1]) >> 1;
+          peak_index[i] = (peak_index[i] * 2 + 1) * fs_mult;
+        }
+      } else {
+        peak_value[i] = data[peak_index[i]];
+        peak_index[i] = peak_index[i] * 2 * fs_mult;
+      }
+    }
+
+    if (i != num_peaks - 1) {
+      memset(&data[min_index], 0,
+             sizeof(data[0]) * (max_index - min_index + 1));
+    }
+  }
+}
+
+void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult,
+                             size_t* peak_index, int16_t* peak_value) {
+  uint16_t fit_index[13];
+  if (fs_mult == 1) {
+    fit_index[0] = 0;
+    fit_index[1] = 8;
+    fit_index[2] = 16;
+  } else if (fs_mult == 2) {
+    fit_index[0] = 0;
+    fit_index[1] = 4;
+    fit_index[2] = 8;
+    fit_index[3] = 12;
+    fit_index[4] = 16;
+  } else if (fs_mult == 4) {
+    fit_index[0] = 0;
+    fit_index[1] = 2;
+    fit_index[2] = 4;
+    fit_index[3] = 6;
+    fit_index[4] = 8;
+    fit_index[5] = 10;
+    fit_index[6] = 12;
+    fit_index[7] = 14;
+    fit_index[8] = 16;
+  } else {
+    fit_index[0] = 0;
+    fit_index[1] = 1;
+    fit_index[2] = 3;
+    fit_index[3] = 4;
+    fit_index[4] = 5;
+    fit_index[5] = 7;
+    fit_index[6] = 8;
+    fit_index[7] = 9;
+    fit_index[8] = 11;
+    fit_index[9] = 12;
+    fit_index[10] = 13;
+    fit_index[11] = 15;
+    fit_index[12] = 16;
+  }
+
+  //  num = -3 * signal_points[0] + 4 * signal_points[1] - signal_points[2];
+  //  den =      signal_points[0] - 2 * signal_points[1] + signal_points[2];
+  int32_t num = (signal_points[0] * -3) + (signal_points[1] * 4)
+      - signal_points[2];
+  int32_t den = signal_points[0] + (signal_points[1] * -2) + signal_points[2];
+  int32_t temp = num * 120;
+  int flag = 1;
+  int16_t stp = kParabolaCoefficients[fit_index[fs_mult]][0]
+      - kParabolaCoefficients[fit_index[fs_mult - 1]][0];
+  int16_t strt = (kParabolaCoefficients[fit_index[fs_mult]][0]
+      + kParabolaCoefficients[fit_index[fs_mult - 1]][0]) / 2;
+  int16_t lmt;
+  if (temp < -den * strt) {
+    lmt = strt - stp;
+    while (flag) {
+      if ((flag == fs_mult) || (temp > -den * lmt)) {
+        *peak_value = (den * kParabolaCoefficients[fit_index[fs_mult - flag]][1]
+            + num * kParabolaCoefficients[fit_index[fs_mult - flag]][2]
+            + signal_points[0] * 256) / 256;
+        *peak_index = *peak_index * 2 * fs_mult - flag;
+        flag = 0;
+      } else {
+        flag++;
+        lmt -= stp;
+      }
+    }
+  } else if (temp > -den * (strt + stp)) {
+    lmt = strt + 2 * stp;
+    while (flag) {
+      if ((flag == fs_mult) || (temp < -den * lmt)) {
+        int32_t temp_term_1 =
+            den * kParabolaCoefficients[fit_index[fs_mult+flag]][1];
+        int32_t temp_term_2 =
+            num * kParabolaCoefficients[fit_index[fs_mult+flag]][2];
+        int32_t temp_term_3 = signal_points[0] * 256;
+        *peak_value = (temp_term_1 + temp_term_2 + temp_term_3) / 256;
+        *peak_index = *peak_index * 2 * fs_mult + flag;
+        flag = 0;
+      } else {
+        flag++;
+        lmt += stp;
+      }
+    }
+  } else {
+    *peak_value = signal_points[1];
+    *peak_index = *peak_index * 2 * fs_mult;
+  }
+}
+
+size_t DspHelper::MinDistortion(const int16_t* signal, size_t min_lag,
+                                size_t max_lag, size_t length,
+                                int32_t* distortion_value) {
+  size_t best_index = 0;
+  int32_t min_distortion = WEBRTC_SPL_WORD32_MAX;
+  for (size_t i = min_lag; i <= max_lag; i++) {
+    int32_t sum_diff = 0;
+    const int16_t* data1 = signal;
+    const int16_t* data2 = signal - i;
+    for (size_t j = 0; j < length; j++) {
+      sum_diff += WEBRTC_SPL_ABS_W32(data1[j] - data2[j]);
+    }
+    // Compare with previous minimum.
+    if (sum_diff < min_distortion) {
+      min_distortion = sum_diff;
+      best_index = i;
+    }
+  }
+  *distortion_value = min_distortion;
+  return best_index;
+}
+
+void DspHelper::CrossFade(const int16_t* input1, const int16_t* input2,
+                          size_t length, int16_t* mix_factor,
+                          int16_t factor_decrement, int16_t* output) {
+  int16_t factor = *mix_factor;
+  int16_t complement_factor = 16384 - factor;
+  for (size_t i = 0; i < length; i++) {
+    output[i] =
+        (factor * input1[i] + complement_factor * input2[i] + 8192) >> 14;
+    factor -= factor_decrement;
+    complement_factor += factor_decrement;
+  }
+  *mix_factor = factor;
+}
+
+void DspHelper::UnmuteSignal(const int16_t* input, size_t length,
+                             int16_t* factor, int increment,
+                             int16_t* output) {
+  uint16_t factor_16b = *factor;
+  int32_t factor_32b = (static_cast<int32_t>(factor_16b) << 6) + 32;
+  for (size_t i = 0; i < length; i++) {
+    output[i] = (factor_16b * input[i] + 8192) >> 14;
+    factor_32b = std::max(factor_32b + increment, 0);
+    factor_16b = std::min(16384, factor_32b >> 6);
+  }
+  *factor = factor_16b;
+}
+
+void DspHelper::MuteSignal(int16_t* signal, int mute_slope, size_t length) {
+  int32_t factor = (16384 << 6) + 32;
+  for (size_t i = 0; i < length; i++) {
+    signal[i] = ((factor >> 6) * signal[i] + 8192) >> 14;
+    factor -= mute_slope;
+  }
+}
+
+int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
+                                size_t output_length, int input_rate_hz,
+                                bool compensate_delay, int16_t* output) {
+  // Set filter parameters depending on input frequency.
+  // NOTE: The phase delay values are wrong compared to the true phase delay
+  // of the filters. However, the error is preserved (through the +1 term) for
+  // consistency.
+  const int16_t* filter_coefficients;  // Filter coefficients.
+  size_t filter_length;  // Number of coefficients.
+  size_t filter_delay;  // Phase delay in samples.
+  int16_t factor;  // Conversion rate (inFsHz / 8000).
+  switch (input_rate_hz) {
+    case 8000: {
+      filter_length = 3;
+      factor = 2;
+      filter_coefficients = kDownsample8kHzTbl;
+      filter_delay = 1 + 1;
+      break;
+    }
+    case 16000: {
+      filter_length = 5;
+      factor = 4;
+      filter_coefficients = kDownsample16kHzTbl;
+      filter_delay = 2 + 1;
+      break;
+    }
+    case 32000: {
+      filter_length = 7;
+      factor = 8;
+      filter_coefficients = kDownsample32kHzTbl;
+      filter_delay = 3 + 1;
+      break;
+    }
+    case 48000: {
+      filter_length = 7;
+      factor = 12;
+      filter_coefficients = kDownsample48kHzTbl;
+      filter_delay = 3 + 1;
+      break;
+    }
+    default: {
+      assert(false);
+      return -1;
+    }
+  }
+
+  if (!compensate_delay) {
+    // Disregard delay compensation.
+    filter_delay = 0;
+  }
+
+  // Returns -1 if input signal is too short; 0 otherwise.
+  return WebRtcSpl_DownsampleFast(
+      &input[filter_length - 1], input_length - filter_length + 1, output,
+      output_length, filter_coefficients, filter_length, factor, filter_delay);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/dsp_helper.h b/modules/audio_coding/neteq/dsp_helper.h
new file mode 100644
index 0000000..7ceb66f
--- /dev/null
+++ b/modules/audio_coding/neteq/dsp_helper.h
@@ -0,0 +1,144 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DSP_HELPER_H_
+#define MODULES_AUDIO_CODING_NETEQ_DSP_HELPER_H_
+
+#include <string.h>  // Access to size_t.
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// This class contains various signal processing functions, all implemented as
+// static methods.
+class DspHelper {
+ public:
+  // Filter coefficients used when downsampling from the indicated sample rates
+  // (8, 16, 32, 48 kHz) to 4 kHz. Coefficients are in Q12.
+  static const int16_t kDownsample8kHzTbl[3];
+  static const int16_t kDownsample16kHzTbl[5];
+  static const int16_t kDownsample32kHzTbl[7];
+  static const int16_t kDownsample48kHzTbl[7];
+
+  // Constants used to mute and unmute over 5 samples. The coefficients are
+  // in Q15.
+  static const int kMuteFactorStart8kHz = 27307;
+  static const int kMuteFactorIncrement8kHz = -5461;
+  static const int kUnmuteFactorStart8kHz = 5461;
+  static const int kUnmuteFactorIncrement8kHz = 5461;
+  static const int kMuteFactorStart16kHz = 29789;
+  static const int kMuteFactorIncrement16kHz = -2979;
+  static const int kUnmuteFactorStart16kHz = 2979;
+  static const int kUnmuteFactorIncrement16kHz = 2979;
+  static const int kMuteFactorStart32kHz = 31208;
+  static const int kMuteFactorIncrement32kHz = -1560;
+  static const int kUnmuteFactorStart32kHz = 1560;
+  static const int kUnmuteFactorIncrement32kHz = 1560;
+  static const int kMuteFactorStart48kHz = 31711;
+  static const int kMuteFactorIncrement48kHz = -1057;
+  static const int kUnmuteFactorStart48kHz = 1057;
+  static const int kUnmuteFactorIncrement48kHz = 1057;
+
+  // Multiplies the signal with a gradually changing factor.
+  // The first sample is multiplied with |factor| (in Q14). For each sample,
+  // |factor| is increased (additive) by the |increment| (in Q20), which can
+  // be negative. Returns the scale factor after the last increment.
+  static int RampSignal(const int16_t* input,
+                        size_t length,
+                        int factor,
+                        int increment,
+                        int16_t* output);
+
+  // Same as above, but with the samples of |signal| being modified in-place.
+  static int RampSignal(int16_t* signal,
+                        size_t length,
+                        int factor,
+                        int increment);
+
+  // Same as above, but processes |length| samples from |signal|, starting at
+  // |start_index|.
+  static int RampSignal(AudioVector* signal,
+                        size_t start_index,
+                        size_t length,
+                        int factor,
+                        int increment);
+
+  // Same as above, but for an AudioMultiVector.
+  static int RampSignal(AudioMultiVector* signal,
+                        size_t start_index,
+                        size_t length,
+                        int factor,
+                        int increment);
+
+  // Peak detection with parabolic fit. Looks for |num_peaks| maxima in |data|,
+  // having length |data_length| and sample rate multiplier |fs_mult|. The peak
+  // locations and values are written to the arrays |peak_index| and
+  // |peak_value|, respectively. Both arrays must hold at least |num_peaks|
+  // elements.
+  static void PeakDetection(int16_t* data, size_t data_length,
+                            size_t num_peaks, int fs_mult,
+                            size_t* peak_index, int16_t* peak_value);
+
+  // Estimates the height and location of a maximum. The three values in the
+  // array |signal_points| are used as basis for a parabolic fit, which is then
+  // used to find the maximum in an interpolated signal. The |signal_points| are
+  // assumed to be from a 4 kHz signal, while the maximum, written to
+  // |peak_index| and |peak_value| is given in the full sample rate, as
+  // indicated by the sample rate multiplier |fs_mult|.
+  static void ParabolicFit(int16_t* signal_points, int fs_mult,
+                           size_t* peak_index, int16_t* peak_value);
+
+  // Calculates the sum-abs-diff for |signal| when compared to a displaced
+  // version of itself. Returns the displacement lag that results in the minimum
+  // distortion. The resulting distortion is written to |distortion_value|.
+  // The values of |min_lag| and |max_lag| are boundaries for the search.
+  static size_t MinDistortion(const int16_t* signal, size_t min_lag,
+                           size_t max_lag, size_t length,
+                           int32_t* distortion_value);
+
+  // Mixes |length| samples from |input1| and |input2| together and writes the
+  // result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
+  // is decreased by |factor_decrement| (Q14) for each sample. The gain for
+  // |input2| is the complement 16384 - mix_factor.
+  static void CrossFade(const int16_t* input1, const int16_t* input2,
+                        size_t length, int16_t* mix_factor,
+                        int16_t factor_decrement, int16_t* output);
+
+  // Scales |input| with an increasing gain. Applies |factor| (Q14) to the first
+  // sample and increases the gain by |increment| (Q20) for each sample. The
+  // result is written to |output|. |length| samples are processed.
+  static void UnmuteSignal(const int16_t* input, size_t length, int16_t* factor,
+                           int increment, int16_t* output);
+
+  // Starts at unity gain and gradually fades out |signal|. For each sample,
+  // the gain is reduced by |mute_slope| (Q14). |length| samples are processed.
+  static void MuteSignal(int16_t* signal, int mute_slope, size_t length);
+
+  // Downsamples |input| from |sample_rate_hz| to 4 kHz sample rate. The input
+  // has |input_length| samples, and the method will write |output_length|
+  // samples to |output|. Compensates for the phase delay of the downsampling
+  // filters if |compensate_delay| is true. Returns -1 if the input is too short
+  // to produce |output_length| samples, otherwise 0.
+  static int DownsampleTo4kHz(const int16_t* input, size_t input_length,
+                              size_t output_length, int input_rate_hz,
+                              bool compensate_delay, int16_t* output);
+
+ private:
+  // Table of constants used in method DspHelper::ParabolicFit().
+  static const int16_t kParabolaCoefficients[17][3];
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DspHelper);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DSP_HELPER_H_
diff --git a/modules/audio_coding/neteq/dsp_helper_unittest.cc b/modules/audio_coding/neteq/dsp_helper_unittest.cc
new file mode 100644
index 0000000..98ae2a2
--- /dev/null
+++ b/modules/audio_coding/neteq/dsp_helper_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dsp_helper.h"
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+TEST(DspHelper, RampSignalArray) {
+  static const int kLen = 100;
+  int16_t input[kLen];
+  int16_t output[kLen];
+  // Fill input with 1000.
+  for (int i = 0; i < kLen; ++i) {
+    input[i] = 1000;
+  }
+  int start_factor = 0;
+  // Ramp from 0 to 1 (in Q14) over the array. Note that |increment| is in Q20,
+  // while the factor is in Q14, hence the shift by 6.
+  int increment = (16384 << 6) / kLen;
+
+  // Test first method.
+  int stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment,
+                                          output);
+  EXPECT_EQ(16383, stop_factor);  // Almost reach 1 in Q14.
+  for (int i = 0; i < kLen; ++i) {
+    EXPECT_EQ(1000 * i / kLen, output[i]);
+  }
+
+  // Test second method. (Note that this modifies |input|.)
+  stop_factor = DspHelper::RampSignal(input, kLen, start_factor, increment);
+  EXPECT_EQ(16383, stop_factor);  // Almost reach 1 in Q14.
+  for (int i = 0; i < kLen; ++i) {
+    EXPECT_EQ(1000 * i / kLen, input[i]);
+  }
+}
+
+TEST(DspHelper, RampSignalAudioMultiVector) {
+  static const int kLen = 100;
+  static const int kChannels = 5;
+  AudioMultiVector input(kChannels, kLen * 3);
+  // Fill input with 1000.
+  for (int i = 0; i < kLen * 3; ++i) {
+    for (int channel = 0; channel < kChannels; ++channel) {
+      input[channel][i] = 1000;
+    }
+  }
+  // We want to start ramping at |start_index| and keep ramping for |kLen|
+  // samples.
+  int start_index = kLen;
+  int start_factor = 0;
+  // Ramp from 0 to 1 (in Q14) in |kLen| samples. Note that |increment| is in
+  // Q20, while the factor is in Q14, hence the shift by 6.
+  int increment = (16384 << 6) / kLen;
+
+  int stop_factor = DspHelper::RampSignal(&input, start_index, kLen,
+                                          start_factor, increment);
+  EXPECT_EQ(16383, stop_factor);  // Almost reach 1 in Q14.
+  // Verify that the first |kLen| samples are left untouched.
+  int i;
+  for (i = 0; i < kLen; ++i) {
+    for (int channel = 0; channel < kChannels; ++channel) {
+      EXPECT_EQ(1000, input[channel][i]);
+    }
+  }
+  // Verify that the next block of |kLen| samples are ramped.
+  for (; i < 2 * kLen; ++i) {
+    for (int channel = 0; channel < kChannels; ++channel) {
+      EXPECT_EQ(1000 * (i - kLen) / kLen, input[channel][i]);
+    }
+  }
+  // Verify the last |kLen| samples are left untouched.
+  for (; i < 3 * kLen; ++i) {
+    for (int channel = 0; channel < kChannels; ++channel) {
+      EXPECT_EQ(1000, input[channel][i]);
+    }
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/dtmf_buffer.cc b/modules/audio_coding/neteq/dtmf_buffer.cc
new file mode 100644
index 0000000..370de42
--- /dev/null
+++ b/modules/audio_coding/neteq/dtmf_buffer.cc
@@ -0,0 +1,248 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+
+#include <assert.h>
+#include <algorithm>  // max
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+// Modify the code to obtain backwards bit-exactness. Once bit-exactness is no
+// longer required, this #define should be removed (and the code that it
+// enables).
+#define LEGACY_BITEXACT
+
+namespace webrtc {
+
+DtmfBuffer::DtmfBuffer(int fs_hz) {
+  SetSampleRate(fs_hz);
+}
+
+DtmfBuffer::~DtmfBuffer() = default;
+
+void DtmfBuffer::Flush() {
+  buffer_.clear();
+}
+
+// The ParseEvent method parses 4 bytes from |payload| according to this format
+// from RFC 4733:
+//
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |     event     |E|R| volume    |          duration             |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Legend (adapted from RFC 4733)
+// - event:    The event field is a number between 0 and 255 identifying a
+//             specific telephony event. The buffer will not accept any event
+//             numbers larger than 15.
+// - E:        If set to a value of one, the "end" bit indicates that this
+//             packet contains the end of the event.  For long-lasting events
+//             that have to be split into segments, only the final packet for
+//             the final segment will have the E bit set.
+// - R:        Reserved.
+// - volume:   For DTMF digits and other events representable as tones, this
+//             field describes the power level of the tone, expressed in dBm0
+//             after dropping the sign.  Power levels range from 0 to -63 dBm0.
+//             Thus, larger values denote lower volume. The buffer discards
+//             values larger than 36 (i.e., lower than -36 dBm0).
+// - duration: The duration field indicates the duration of the event or segment
+//             being reported, in timestamp units, expressed as an unsigned
+//             integer in network byte order.  For a non-zero value, the event
+//             or segment began at the instant identified by the RTP timestamp
+//             and has so far lasted as long as indicated by this parameter.
+//             The event may or may not have ended.  If the event duration
+//             exceeds the maximum representable by the duration field, the
+//             event is split into several contiguous segments. The buffer will
+//             discard zero-duration events.
+//
+int DtmfBuffer::ParseEvent(uint32_t rtp_timestamp,
+                           const uint8_t* payload,
+                           size_t payload_length_bytes,
+                           DtmfEvent* event) {
+  RTC_CHECK(payload);
+  RTC_CHECK(event);
+  if (payload_length_bytes < 4) {
+    RTC_LOG(LS_WARNING) << "ParseEvent payload too short";
+    return kPayloadTooShort;
+  }
+
+  event->event_no = payload[0];
+  event->end_bit = ((payload[1] & 0x80) != 0);
+  event->volume = (payload[1] & 0x3F);
+  event->duration = payload[2] << 8 | payload[3];
+  event->timestamp = rtp_timestamp;
+  return kOK;
+}
+
+// Inserts a DTMF event into the buffer. The event should be parsed from the
+// bit stream using the ParseEvent method above before inserting it in the
+// buffer.
+// DTMF events can be quite long, and in most cases the duration of the event
+// is not known when the first packet describing it is sent. To deal with that,
+// the RFC 4733 specifies that multiple packets are sent for one and the same
+// event as it is being created (typically, as the user is pressing the key).
+// These packets will all share the same start timestamp and event number,
+// while the duration will be the cumulative duration from the start. When
+// inserting a new event, the InsertEvent method tries to find a matching event
+// already in the buffer. If so, the new event is simply merged with the
+// existing one.
+int DtmfBuffer::InsertEvent(const DtmfEvent& event) {
+  if (event.event_no < 0 || event.event_no > 15 ||
+      event.volume < 0 || event.volume > 63 ||
+      event.duration <= 0 || event.duration > 65535) {
+    RTC_LOG(LS_WARNING) << "InsertEvent invalid parameters";
+    return kInvalidEventParameters;
+  }
+  DtmfList::iterator it = buffer_.begin();
+  while (it != buffer_.end()) {
+    if (MergeEvents(it, event)) {
+      // A matching event was found and the new event was merged.
+      return kOK;
+    }
+    ++it;
+  }
+  buffer_.push_back(event);
+  // Sort the buffer using CompareEvents to rank the events.
+  buffer_.sort(CompareEvents);
+  return kOK;
+}
+
+bool DtmfBuffer::GetEvent(uint32_t current_timestamp, DtmfEvent* event) {
+  DtmfList::iterator it = buffer_.begin();
+  while (it != buffer_.end()) {
+    // |event_end| is an estimate of where the current event ends. If the end
+    // bit is set, we know that the event ends at |timestamp| + |duration|.
+    uint32_t event_end = it->timestamp + it->duration;
+#ifdef LEGACY_BITEXACT
+    bool next_available = false;
+#endif
+    if (!it->end_bit) {
+      // If the end bit is not set, we allow extrapolation of the event for
+      // some time.
+      event_end += max_extrapolation_samples_;
+      DtmfList::iterator next = it;
+      ++next;
+      if (next != buffer_.end()) {
+        // If there is a next event in the buffer, we will not extrapolate over
+        // the start of that new event.
+        event_end = std::min(event_end, next->timestamp);
+#ifdef LEGACY_BITEXACT
+        next_available = true;
+#endif
+      }
+    }
+    if (current_timestamp >= it->timestamp
+        && current_timestamp <= event_end) {  // TODO(hlundin): Change to <.
+      // Found a matching event.
+      if (event) {
+        event->event_no = it->event_no;
+        event->end_bit = it->end_bit;
+        event->volume = it->volume;
+        event->duration = it->duration;
+        event->timestamp = it->timestamp;
+      }
+#ifdef LEGACY_BITEXACT
+      if (it->end_bit &&
+          current_timestamp + frame_len_samples_ >= event_end) {
+        // We are done playing this. Erase the event.
+        buffer_.erase(it);
+      }
+#endif
+      return true;
+    } else if (current_timestamp > event_end) {  // TODO(hlundin): Change to >=.
+      // Erase old event. Operation returns a valid pointer to the next element
+      // in the list.
+#ifdef LEGACY_BITEXACT
+      if (!next_available) {
+        if (event) {
+          event->event_no = it->event_no;
+          event->end_bit = it->end_bit;
+          event->volume = it->volume;
+          event->duration = it->duration;
+          event->timestamp = it->timestamp;
+        }
+        it = buffer_.erase(it);
+        return true;
+      } else {
+        it = buffer_.erase(it);
+      }
+#else
+      it = buffer_.erase(it);
+#endif
+    } else {
+      ++it;
+    }
+  }
+  return false;
+}
+
+size_t DtmfBuffer::Length() const {
+  return buffer_.size();
+}
+
+bool DtmfBuffer::Empty() const {
+  return buffer_.empty();
+}
+
+int DtmfBuffer::SetSampleRate(int fs_hz) {
+  if (fs_hz != 8000 &&
+      fs_hz != 16000 &&
+      fs_hz != 32000 &&
+      fs_hz != 48000) {
+    return kInvalidSampleRate;
+  }
+  max_extrapolation_samples_ = 7 * fs_hz / 100;
+  frame_len_samples_ = fs_hz / 100;
+  return kOK;
+}
+
+// The method returns true if the two events are considered to be the same.
+// The are defined as equal if they share the same timestamp and event number.
+// The special case with long-lasting events that have to be split into segments
+// is not handled in this method. These will be treated as separate events in
+// the buffer.
+bool DtmfBuffer::SameEvent(const DtmfEvent& a, const DtmfEvent& b) {
+  return (a.event_no == b.event_no) && (a.timestamp == b.timestamp);
+}
+
+bool DtmfBuffer::MergeEvents(DtmfList::iterator it, const DtmfEvent& event) {
+  if (SameEvent(*it, event)) {
+    if (!it->end_bit) {
+      // Do not extend the duration of an event for which the end bit was
+      // already received.
+      it->duration = std::max(event.duration, it->duration);
+    }
+    if (event.end_bit) {
+      it->end_bit = true;
+    }
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// Returns true if |a| goes before |b| in the sorting order ("|a| < |b|").
+// The events are ranked using their start timestamp (taking wrap-around into
+// account). In the unlikely situation that two events share the same start
+// timestamp, the event number is used to rank the two. Note that packets
+// that belong to the same events, and therefore sharing the same start
+// timestamp, have already been merged before the sort method is called.
+bool DtmfBuffer::CompareEvents(const DtmfEvent& a, const DtmfEvent& b) {
+  if (a.timestamp == b.timestamp) {
+    return a.event_no < b.event_no;
+  }
+  // Take wrap-around into account.
+  return (static_cast<uint32_t>(b.timestamp - a.timestamp) < 0xFFFFFFFF / 2);
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/dtmf_buffer.h b/modules/audio_coding/neteq/dtmf_buffer.h
new file mode 100644
index 0000000..87a5655
--- /dev/null
+++ b/modules/audio_coding/neteq/dtmf_buffer.h
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DTMF_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_DTMF_BUFFER_H_
+
+#include <list>
+#include <string>  // size_t
+
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+struct DtmfEvent {
+  uint32_t timestamp;
+  int event_no;
+  int volume;
+  int duration;
+  bool end_bit;
+
+  // Constructors
+  DtmfEvent()
+      : timestamp(0),
+        event_no(0),
+        volume(0),
+        duration(0),
+        end_bit(false) {
+  }
+  DtmfEvent(uint32_t ts, int ev, int vol, int dur, bool end)
+      : timestamp(ts),
+        event_no(ev),
+        volume(vol),
+        duration(dur),
+        end_bit(end) {
+  }
+};
+
+// This is the buffer holding DTMF events while waiting for them to be played.
+class DtmfBuffer {
+ public:
+  enum BufferReturnCodes {
+    kOK = 0,
+    kInvalidPointer,
+    kPayloadTooShort,
+    kInvalidEventParameters,
+    kInvalidSampleRate
+  };
+
+  // Set up the buffer for use at sample rate |fs_hz|.
+  explicit DtmfBuffer(int fs_hz);
+
+  virtual ~DtmfBuffer();
+
+  // Flushes the buffer.
+  virtual void Flush();
+
+  // Static method to parse 4 bytes from |payload| as a DTMF event (RFC 4733)
+  // and write the parsed information into the struct |event|. Input variable
+  // |rtp_timestamp| is simply copied into the struct.
+  static int ParseEvent(uint32_t rtp_timestamp,
+                        const uint8_t* payload,
+                        size_t payload_length_bytes,
+                        DtmfEvent* event);
+
+  // Inserts |event| into the buffer. The method looks for a matching event and
+  // merges the two if a match is found.
+  virtual int InsertEvent(const DtmfEvent& event);
+
+  // Checks if a DTMF event should be played at time |current_timestamp|. If so,
+  // the method returns true; otherwise false. The parameters of the event to
+  // play will be written to |event|.
+  virtual bool GetEvent(uint32_t current_timestamp, DtmfEvent* event);
+
+  // Number of events in the buffer.
+  virtual size_t Length() const;
+
+  virtual bool Empty() const;
+
+  // Set a new sample rate.
+  virtual int SetSampleRate(int fs_hz);
+
+ private:
+  typedef std::list<DtmfEvent> DtmfList;
+
+  int max_extrapolation_samples_;
+  int frame_len_samples_;  // TODO(hlundin): Remove this later.
+
+  // Compares two events and returns true if they are the same.
+  static bool SameEvent(const DtmfEvent& a, const DtmfEvent& b);
+
+  // Merges |event| to the event pointed out by |it|. The method checks that
+  // the two events are the same (using the SameEvent method), and merges them
+  // if that was the case, returning true. If the events are not the same, false
+  // is returned.
+  bool MergeEvents(DtmfList::iterator it, const DtmfEvent& event);
+
+  // Method used by the sort algorithm to rank events in the buffer.
+  static bool CompareEvents(const DtmfEvent& a, const DtmfEvent& b);
+
+  DtmfList buffer_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DtmfBuffer);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DTMF_BUFFER_H_
diff --git a/modules/audio_coding/neteq/dtmf_buffer_unittest.cc b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
new file mode 100644
index 0000000..7bcf1e0
--- /dev/null
+++ b/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
@@ -0,0 +1,301 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+
+#ifdef WIN32
+#include <winsock2.h>  // ntohl()
+#else
+#include <arpa/inet.h>  // ntohl()
+#endif
+
+#include <iostream>
+
+#include "test/gtest.h"
+
+// Modify the tests so that they pass with the modifications done to DtmfBuffer
+// for backwards bit-exactness. Once bit-exactness is no longer required, this
+// #define should be removed (and the code that it enables).
+#define LEGACY_BITEXACT
+
+namespace webrtc {
+
+static int sample_rate_hz = 8000;
+
+static uint32_t MakeDtmfPayload(int event, bool end, int volume, int duration) {
+  uint32_t payload = 0;
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |     event     |E|R| volume    |          duration             |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  payload |= (event & 0x00FF) << 24;
+  payload |= (end ? 0x00800000 : 0x00000000);
+  payload |= (volume & 0x003F) << 16;
+  payload |= (duration & 0xFFFF);
+  payload = ntohl(payload);
+  return payload;
+}
+
+static bool EqualEvents(const DtmfEvent& a,
+                        const DtmfEvent& b) {
+  return (a.duration == b.duration
+      && a.end_bit == b.end_bit
+      && a.event_no == b.event_no
+      && a.timestamp == b.timestamp
+      && a.volume == b.volume);
+}
+
+TEST(DtmfBuffer, CreateAndDestroy) {
+  DtmfBuffer* buffer = new DtmfBuffer(sample_rate_hz);
+  delete buffer;
+}
+
+// Test the event parser.
+TEST(DtmfBuffer, ParseEvent) {
+  int event_no = 7;
+  bool end_bit = true;
+  int volume = 17;
+  int duration = 4711;
+  uint32_t timestamp = 0x12345678;
+  uint32_t payload = MakeDtmfPayload(event_no, end_bit, volume, duration);
+  uint8_t* payload_ptr = reinterpret_cast<uint8_t*>(&payload);
+  DtmfEvent event;
+  EXPECT_EQ(DtmfBuffer::kOK,
+            DtmfBuffer::ParseEvent(timestamp, payload_ptr, sizeof(payload),
+                                   &event));
+  EXPECT_EQ(duration, event.duration);
+  EXPECT_EQ(end_bit, event.end_bit);
+  EXPECT_EQ(event_no, event.event_no);
+  EXPECT_EQ(timestamp, event.timestamp);
+  EXPECT_EQ(volume, event.volume);
+
+  EXPECT_EQ(DtmfBuffer::kPayloadTooShort,
+            DtmfBuffer::ParseEvent(timestamp, payload_ptr, 3, &event));
+}
+
+TEST(DtmfBuffer, SimpleInsertAndGet) {
+  int event_no = 7;
+  bool end_bit = true;
+  int volume = 17;
+  int duration = 4711;
+  uint32_t timestamp = 0x12345678;
+  DtmfEvent event(timestamp, event_no, volume, duration, end_bit);
+  DtmfBuffer buffer(sample_rate_hz);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+  EXPECT_EQ(1u, buffer.Length());
+  EXPECT_FALSE(buffer.Empty());
+  DtmfEvent out_event;
+  // Too early to get event.
+  EXPECT_FALSE(buffer.GetEvent(timestamp - 10, &out_event));
+  EXPECT_EQ(1u, buffer.Length());
+  EXPECT_FALSE(buffer.Empty());
+  // Get the event at its starting timestamp.
+  EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+  EXPECT_TRUE(EqualEvents(event, out_event));
+  EXPECT_EQ(1u, buffer.Length());
+  EXPECT_FALSE(buffer.Empty());
+  // Get the event some time into the event.
+  EXPECT_TRUE(buffer.GetEvent(timestamp + duration / 2, &out_event));
+  EXPECT_TRUE(EqualEvents(event, out_event));
+  EXPECT_EQ(1u, buffer.Length());
+  EXPECT_FALSE(buffer.Empty());
+  // Give a "current" timestamp after the event has ended.
+#ifdef LEGACY_BITEXACT
+  EXPECT_TRUE(buffer.GetEvent(timestamp + duration + 10, &out_event));
+#endif
+  EXPECT_FALSE(buffer.GetEvent(timestamp + duration + 10, &out_event));
+  EXPECT_EQ(0u, buffer.Length());
+  EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(DtmfBuffer, MergingPackets) {
+  int event_no = 0;
+  bool end_bit = false;
+  int volume = 17;
+  int duration = 80;
+  uint32_t timestamp = 0x12345678;
+  DtmfEvent event(timestamp, event_no, volume, duration, end_bit);
+  DtmfBuffer buffer(sample_rate_hz);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+
+  event.duration += 80;
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+
+  event.duration += 80;
+  event.end_bit = true;
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+
+  EXPECT_EQ(1u, buffer.Length());
+
+  DtmfEvent out_event;
+  EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+  EXPECT_TRUE(EqualEvents(event, out_event));
+}
+
+// This test case inserts one shorter event completely overlapped by one longer
+// event. The expected outcome is that only the longer event is played.
+TEST(DtmfBuffer, OverlappingEvents) {
+  int event_no = 0;
+  bool end_bit = true;
+  int volume = 1;
+  int duration = 80;
+  uint32_t timestamp = 0x12345678 + 80;
+  DtmfEvent short_event(timestamp, event_no, volume, duration, end_bit);
+  DtmfBuffer buffer(sample_rate_hz);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(short_event));
+
+  event_no = 10;
+  end_bit = false;
+  timestamp = 0x12345678;
+  DtmfEvent long_event(timestamp, event_no, volume, duration, end_bit);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(long_event));
+
+  long_event.duration += 80;
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(long_event));
+
+  long_event.duration += 80;
+  long_event.end_bit = true;
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(long_event));
+
+  EXPECT_EQ(2u, buffer.Length());
+
+  DtmfEvent out_event;
+  // Expect to get the long event.
+  EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+  EXPECT_TRUE(EqualEvents(long_event, out_event));
+  // Expect no more events.
+#ifdef LEGACY_BITEXACT
+  EXPECT_TRUE(buffer.GetEvent(timestamp + long_event.duration + 10,
+                              &out_event));
+  EXPECT_TRUE(EqualEvents(long_event, out_event));
+  EXPECT_TRUE(buffer.GetEvent(timestamp + long_event.duration + 10,
+                              &out_event));
+  EXPECT_TRUE(EqualEvents(short_event, out_event));
+#else
+  EXPECT_FALSE(buffer.GetEvent(timestamp + long_event.duration + 10,
+                               &out_event));
+#endif
+  EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(DtmfBuffer, ExtrapolationTime) {
+  int event_no = 0;
+  bool end_bit = false;
+  int volume = 1;
+  int duration = 80;
+  uint32_t timestamp = 0x12345678;
+  DtmfEvent event1(timestamp, event_no, volume, duration, end_bit);
+  DtmfBuffer buffer(sample_rate_hz);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event1));
+  EXPECT_EQ(1u, buffer.Length());
+
+  DtmfEvent out_event;
+  // Get the event at the start.
+  EXPECT_TRUE(buffer.GetEvent(timestamp, &out_event));
+  EXPECT_TRUE(EqualEvents(event1, out_event));
+  // Also get the event 100 samples after the end of the event (since we're
+  // missing the end bit).
+  uint32_t timestamp_now = timestamp + duration + 100;
+  EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
+  EXPECT_TRUE(EqualEvents(event1, out_event));
+  // Insert another event starting back-to-back with the previous event.
+  timestamp += duration;
+  event_no = 1;
+  DtmfEvent event2(timestamp, event_no, volume, duration, end_bit);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
+  EXPECT_EQ(2u, buffer.Length());
+  // Now we expect to get the new event when supplying |timestamp_now|.
+  EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
+  EXPECT_TRUE(EqualEvents(event2, out_event));
+  // Expect the the first event to be erased now.
+  EXPECT_EQ(1u, buffer.Length());
+  // Move |timestamp_now| to more than 560 samples after the end of the second
+  // event. Expect that event to be erased.
+  timestamp_now = timestamp + duration + 600;
+#ifdef LEGACY_BITEXACT
+  EXPECT_TRUE(buffer.GetEvent(timestamp_now, &out_event));
+#endif
+  EXPECT_FALSE(buffer.GetEvent(timestamp_now, &out_event));
+  EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(DtmfBuffer, TimestampWraparound) {
+  int event_no = 0;
+  bool end_bit = true;
+  int volume = 1;
+  int duration = 80;
+  uint32_t timestamp1 = 0xFFFFFFFF - duration;
+  DtmfEvent event1(timestamp1, event_no, volume, duration, end_bit);
+  uint32_t timestamp2 = 0;
+  DtmfEvent event2(timestamp2, event_no, volume, duration, end_bit);
+  DtmfBuffer buffer(sample_rate_hz);
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event1));
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
+  EXPECT_EQ(2u, buffer.Length());
+  DtmfEvent out_event;
+  EXPECT_TRUE(buffer.GetEvent(timestamp1, &out_event));
+  EXPECT_TRUE(EqualEvents(event1, out_event));
+#ifdef LEGACY_BITEXACT
+  EXPECT_EQ(1u, buffer.Length());
+#else
+  EXPECT_EQ(2u, buffer.Length());
+#endif
+
+  buffer.Flush();
+  // Reverse the insert order. Expect same results.
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event2));
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event1));
+  EXPECT_EQ(2u, buffer.Length());
+  EXPECT_TRUE(buffer.GetEvent(timestamp1, &out_event));
+  EXPECT_TRUE(EqualEvents(event1, out_event));
+#ifdef LEGACY_BITEXACT
+  EXPECT_EQ(1u, buffer.Length());
+#else
+  EXPECT_EQ(2u, buffer.Length());
+#endif
+}
+
+TEST(DtmfBuffer, InvalidEvents) {
+  int event_no = 0;
+  bool end_bit = true;
+  int volume = 1;
+  int duration = 80;
+  uint32_t timestamp = 0x12345678;
+  DtmfEvent event(timestamp, event_no, volume, duration, end_bit);
+  DtmfBuffer buffer(sample_rate_hz);
+
+  // Invalid event number.
+  event.event_no = -1;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.event_no = 16;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.event_no = 0;  // Valid value;
+
+  // Invalid volume.
+  event.volume = -1;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.volume = 64;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.volume = 0;  // Valid value;
+
+  // Invalid duration.
+  event.duration = -1;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.duration = 0;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.duration = 0xFFFF + 1;
+  EXPECT_EQ(DtmfBuffer::kInvalidEventParameters, buffer.InsertEvent(event));
+  event.duration = 1;  // Valid value;
+
+  // Finish with a valid event, just to verify that all is ok.
+  EXPECT_EQ(DtmfBuffer::kOK, buffer.InsertEvent(event));
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.cc b/modules/audio_coding/neteq/dtmf_tone_generator.cc
new file mode 100644
index 0000000..b848c60
--- /dev/null
+++ b/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -0,0 +1,218 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This class provides a generator for DTMF tones. The tone generation is based
+// on a sinusoid recursion. Each sinusoid is generated using a recursion
+// formula; x[n] = a * x[n-1] - x[n-2], where the coefficient
+// a = 2*cos(2*pi*f/fs). The recursion is started with x[-1] = 0 and
+// x[-2] = sin(2*pi*f/fs). (Note that with this initialization, the resulting
+// sinusoid gets a "negative" rotation; x[n] = sin(-2*pi*f/fs * n + phi), but
+// kept this way due to historical reasons.)
+// TODO(hlundin): Change to positive rotation?
+//
+// Each key on the telephone keypad corresponds to an "event", 0-15. Each event
+// is mapped to a tone pair, with a low and a high frequency. There are four
+// low and four high frequencies, each corresponding to a row and column,
+// respectively, on the keypad as illustrated below.
+//
+//          1209 Hz  1336 Hz  1477 Hz  1633 Hz
+// 697 Hz      1        2        3       12
+// 770 Hz      4        5        6       13
+// 852 Hz      7        8        9       14
+// 941 Hz     10        0       11       15
+
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// The filter coefficient a = 2*cos(2*pi*f/fs) for the low frequency tone, for
+// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
+// Values are in Q14.
+const int DtmfToneGenerator::kCoeff1[4][16] = {
+    { 24219, 27980, 27980, 27980, 26956, 26956, 26956, 25701, 25701, 25701,
+      24219, 24219, 27980, 26956, 25701, 24219 },
+    { 30556, 31548, 31548, 31548, 31281, 31281, 31281, 30951, 30951, 30951,
+      30556, 30556, 31548, 31281, 30951, 30556 },
+    { 32210, 32462, 32462, 32462, 32394, 32394, 32394, 32311, 32311, 32311,
+      32210, 32210, 32462, 32394, 32311, 32210 },
+    { 32520, 32632, 32632, 32632, 32602, 32602, 32602, 32564, 32564, 32564,
+      32520, 32520, 32632, 32602, 32564, 32520 } };
+
+// The filter coefficient a = 2*cos(2*pi*f/fs) for the high frequency tone, for
+// sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0 through 15.
+// Values are in Q14.
+const int DtmfToneGenerator::kCoeff2[4][16] = {
+    { 16325, 19073, 16325, 13085, 19073, 16325, 13085, 19073, 16325, 13085,
+      19073, 13085, 9315, 9315, 9315, 9315},
+    { 28361, 29144, 28361, 27409, 29144, 28361, 27409, 29144, 28361, 27409,
+      29144, 27409, 26258, 26258, 26258, 26258},
+    { 31647, 31849, 31647, 31400, 31849, 31647, 31400, 31849, 31647, 31400,
+      31849, 31400, 31098, 31098, 31098, 31098},
+    { 32268, 32359, 32268, 32157, 32359, 32268, 32157, 32359, 32268, 32157,
+      32359, 32157, 32022, 32022, 32022, 32022} };
+
+// The initialization value x[-2] = sin(2*pi*f/fs) for the low frequency tone,
+// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
+// Values are in Q14.
+const int DtmfToneGenerator::kInitValue1[4][16] = {
+    { 11036, 8528, 8528, 8528, 9315, 9315, 9315, 10163, 10163, 10163, 11036,
+      11036, 8528, 9315, 10163, 11036},
+    { 5918, 4429, 4429, 4429, 4879, 4879, 4879, 5380, 5380, 5380, 5918, 5918,
+      4429, 4879, 5380, 5918},
+    { 3010, 2235, 2235, 2235, 2468, 2468, 2468, 2728, 2728, 2728, 3010, 3010,
+      2235, 2468, 2728, 3010},
+    { 2013, 1493, 1493, 1493, 1649, 1649, 1649, 1823, 1823, 1823, 2013, 2013,
+      1493, 1649, 1823, 2013 } };
+
+// The initialization value x[-2] = sin(2*pi*f/fs) for the high frequency tone,
+// for sample rates fs = {8000, 16000, 32000, 48000} Hz, and events 0-15.
+// Values are in Q14.
+const int DtmfToneGenerator::kInitValue2[4][16] = {
+    { 14206, 13323, 14206, 15021, 13323, 14206, 15021, 13323, 14206, 15021,
+      13323, 15021, 15708, 15708, 15708, 15708},
+    { 8207, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8207, 8979, 7490, 8979,
+      9801, 9801, 9801, 9801},
+    { 4249, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4249, 4685, 3853, 4685,
+      5164, 5164, 5164, 5164},
+    { 2851, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 2851, 3148, 2582, 3148,
+      3476, 3476, 3476, 3476} };
+
+// Amplitude multipliers for volume values 0 through 63, corresponding to
+// 0 dBm0 through -63 dBm0. Values are in Q14.
+// for a in range(0, 64):
+//   print round(16141.0 * 10**(-float(a)/20))
+const int DtmfToneGenerator::kAmplitude[64] = {
+    16141, 14386, 12821, 11427, 10184, 9077, 8090, 7210, 6426, 5727, 5104, 4549,
+    4054, 3614, 3221, 2870, 2558, 2280, 2032, 1811, 1614, 1439, 1282, 1143,
+    1018, 908, 809, 721, 643, 573, 510, 455, 405, 361, 322, 287, 256, 228, 203,
+    181, 161, 144, 128, 114, 102, 91, 81, 72, 64, 57, 51, 45, 41, 36, 32, 29,
+    26, 23, 20, 18, 16, 14, 13, 11 };
+
+// Constructor.
+DtmfToneGenerator::DtmfToneGenerator()
+    : initialized_(false),
+      coeff1_(0),
+      coeff2_(0),
+      amplitude_(0) {
+}
+
+// Initialize the DTMF generator with sample rate fs Hz (8000, 16000, 32000,
+// 48000), event (0-15) and attenuation (0-36 dB).
+// Returns 0 on success, otherwise an error code.
+int DtmfToneGenerator::Init(int fs, int event, int attenuation) {
+  initialized_ = false;
+  size_t fs_index;
+  if (fs == 8000) {
+    fs_index = 0;
+  } else if (fs == 16000) {
+    fs_index = 1;
+  } else if (fs == 32000) {
+    fs_index = 2;
+  } else if (fs == 48000) {
+    fs_index = 3;
+  } else {
+    RTC_NOTREACHED();
+    fs_index = 1;  // Default to 8000 Hz.
+  }
+
+  if (event < 0 || event > 15) {
+    return kParameterError;  // Invalid event number.
+  }
+
+  if (attenuation < 0 || attenuation > 63) {
+    return kParameterError;  // Invalid attenuation.
+  }
+
+  // Look up oscillator coefficient for low and high frequencies.
+  RTC_DCHECK_LE(0, fs_index);
+  RTC_DCHECK_GT(arraysize(kCoeff1), fs_index);
+  RTC_DCHECK_GT(arraysize(kCoeff2), fs_index);
+  RTC_DCHECK_LE(0, event);
+  RTC_DCHECK_GT(arraysize(kCoeff1[fs_index]), event);
+  RTC_DCHECK_GT(arraysize(kCoeff2[fs_index]), event);
+  coeff1_ = kCoeff1[fs_index][event];
+  coeff2_ = kCoeff2[fs_index][event];
+
+  // Look up amplitude multiplier.
+  RTC_DCHECK_LE(0, attenuation);
+  RTC_DCHECK_GT(arraysize(kAmplitude), attenuation);
+  amplitude_ = kAmplitude[attenuation];
+
+  // Initialize sample history.
+  RTC_DCHECK_LE(0, fs_index);
+  RTC_DCHECK_GT(arraysize(kInitValue1), fs_index);
+  RTC_DCHECK_GT(arraysize(kInitValue2), fs_index);
+  RTC_DCHECK_LE(0, event);
+  RTC_DCHECK_GT(arraysize(kInitValue1[fs_index]), event);
+  RTC_DCHECK_GT(arraysize(kInitValue2[fs_index]), event);
+  sample_history1_[0] = kInitValue1[fs_index][event];
+  sample_history1_[1] = 0;
+  sample_history2_[0] = kInitValue2[fs_index][event];
+  sample_history2_[1] = 0;
+
+  initialized_ = true;
+  return 0;
+}
+
+// Reset tone generator to uninitialized state.
+void DtmfToneGenerator::Reset() {
+  initialized_ = false;
+}
+
+// Generate num_samples of DTMF signal and write to |output|.
+int DtmfToneGenerator::Generate(size_t num_samples,
+                                AudioMultiVector* output) {
+  if (!initialized_) {
+    return kNotInitialized;
+  }
+
+  if (!output) {
+    return kParameterError;
+  }
+
+  output->AssertSize(num_samples);
+  for (size_t i = 0; i < num_samples; ++i) {
+    // Use recursion formula y[n] = a * y[n - 1] - y[n - 2].
+    int16_t temp_val_low = ((coeff1_ * sample_history1_[1] + 8192) >> 14)
+        - sample_history1_[0];
+    int16_t temp_val_high = ((coeff2_ * sample_history2_[1] + 8192) >> 14)
+        - sample_history2_[0];
+
+    // Update recursion memory.
+    sample_history1_[0] = sample_history1_[1];
+    sample_history1_[1] = temp_val_low;
+    sample_history2_[0] = sample_history2_[1];
+    sample_history2_[1] = temp_val_high;
+
+    // Attenuate the low frequency tone 3 dB.
+    int32_t temp_val =
+        kAmpMultiplier * temp_val_low + temp_val_high * (1 << 15);
+    // Normalize the signal to Q14 with proper rounding.
+    temp_val = (temp_val + 16384) >> 15;
+    // Scale the signal to correct volume.
+    (*output)[0][i] =
+        static_cast<int16_t>((temp_val * amplitude_ + 8192) >> 14);
+  }
+  // Copy first channel to all other channels.
+  for (size_t channel = 1; channel < output->Channels(); ++channel) {
+    output->CopyChannel(0, channel);
+  }
+
+  return static_cast<int>(num_samples);
+}
+
+bool DtmfToneGenerator::initialized() const {
+  return initialized_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator.h b/modules/audio_coding/neteq/dtmf_tone_generator.h
new file mode 100644
index 0000000..faad6a2
--- /dev/null
+++ b/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// This class provides a generator for DTMF tones.
+class DtmfToneGenerator {
+ public:
+  enum ReturnCodes {
+    kNotInitialized = -1,
+    kParameterError = -2,
+  };
+
+  DtmfToneGenerator();
+  virtual ~DtmfToneGenerator() {}
+  virtual int Init(int fs, int event, int attenuation);
+  virtual void Reset();
+  virtual int Generate(size_t num_samples, AudioMultiVector* output);
+  virtual bool initialized() const;
+
+ private:
+  static const int kCoeff1[4][16];  // 1st oscillator model coefficient table.
+  static const int kCoeff2[4][16];  // 2nd oscillator model coefficient table.
+  static const int kInitValue1[4][16];  // Initialization for 1st oscillator.
+  static const int kInitValue2[4][16];  // Initialization for 2nd oscillator.
+  static const int kAmplitude[64];  // Amplitude for 0 through -63 dBm0.
+  static const int16_t kAmpMultiplier = 23171;  // 3 dB attenuation (in Q15).
+
+  bool initialized_;            // True if generator is initialized properly.
+  int coeff1_;                  // 1st oscillator coefficient for this event.
+  int coeff2_;                  // 2nd oscillator coefficient for this event.
+  int amplitude_;               // Amplitude for this event.
+  int16_t sample_history1_[2];  // Last 2 samples for the 1st oscillator.
+  int16_t sample_history2_[2];  // Last 2 samples for the 2nd oscillator.
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DtmfToneGenerator);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_DTMF_TONE_GENERATOR_H_
diff --git a/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc b/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
new file mode 100644
index 0000000..8c22fe5
--- /dev/null
+++ b/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
@@ -0,0 +1,180 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for DtmfToneGenerator class.
+
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+
+#include <math.h>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class DtmfToneGeneratorTest : public ::testing::Test {
+ protected:
+  static const double kLowFreqHz[16];
+  static const double kHighFreqHz[16];
+  // This is the attenuation applied to all cases.
+  const double kBaseAttenuation = 16141.0 / 16384.0;
+  const double k3dbAttenuation = 23171.0 / 32768;
+  const int kNumSamples = 10;
+
+  void TestAllTones(int fs_hz, int channels) {
+    AudioMultiVector signal(channels);
+
+    for (int event = 0; event <= 15; ++event) {
+      std::ostringstream ss;
+      ss << "Checking event " << event << " at sample rate " << fs_hz;
+      SCOPED_TRACE(ss.str());
+      const int kAttenuation = 0;
+      ASSERT_EQ(0, tone_gen_.Init(fs_hz, event, kAttenuation));
+      EXPECT_TRUE(tone_gen_.initialized());
+      EXPECT_EQ(kNumSamples, tone_gen_.Generate(kNumSamples, &signal));
+
+      double f1 = kLowFreqHz[event];
+      double f2 = kHighFreqHz[event];
+      const double pi = 3.14159265358979323846;
+
+      for (int n = 0; n < kNumSamples; ++n) {
+        double x = k3dbAttenuation * sin(2.0 * pi * f1 / fs_hz * (-n - 1)) +
+                   sin(2.0 * pi * f2 / fs_hz * (-n - 1));
+        x *= kBaseAttenuation;
+        x = ldexp(x, 14);  // Scale to Q14.
+        for (int channel = 0; channel < channels; ++channel) {
+          EXPECT_NEAR(x, static_cast<double>(signal[channel][n]), 25);
+        }
+      }
+
+      tone_gen_.Reset();
+      EXPECT_FALSE(tone_gen_.initialized());
+    }
+  }
+
+  void TestAmplitudes(int fs_hz, int channels) {
+    AudioMultiVector signal(channels);
+    AudioMultiVector ref_signal(channels);
+
+    const int event_vec[] = {0, 4, 9, 13};  // Test a few events.
+    for (int e = 0; e < 4; ++e) {
+      int event = event_vec[e];
+      // Create full-scale reference.
+      ASSERT_EQ(0, tone_gen_.Init(fs_hz, event, 0));  // 0 attenuation.
+      EXPECT_EQ(kNumSamples, tone_gen_.Generate(kNumSamples, &ref_signal));
+      // Test every 5 steps (to save time).
+      for (int attenuation = 1; attenuation <= 63; attenuation += 5) {
+        std::ostringstream ss;
+        ss << "Checking event " << event << " at sample rate " << fs_hz;
+        ss << "; attenuation " << attenuation;
+        SCOPED_TRACE(ss.str());
+        ASSERT_EQ(0, tone_gen_.Init(fs_hz, event, attenuation));
+        EXPECT_EQ(kNumSamples, tone_gen_.Generate(kNumSamples, &signal));
+        for (int n = 0; n < kNumSamples; ++n) {
+          double attenuation_factor =
+              DbToRatio(-static_cast<float>(attenuation));
+          // Verify that the attenuation is correct.
+          for (int channel = 0; channel < channels; ++channel) {
+            EXPECT_NEAR(attenuation_factor * ref_signal[channel][n],
+                        signal[channel][n],
+                        2);
+          }
+        }
+
+        tone_gen_.Reset();
+      }
+    }
+  }
+
+  DtmfToneGenerator tone_gen_;
+};
+
+// Low and high frequencies for events 0 through 15.
+const double DtmfToneGeneratorTest::kLowFreqHz[16] = {
+    941.0, 697.0, 697.0, 697.0, 770.0, 770.0, 770.0, 852.0,
+    852.0, 852.0, 941.0, 941.0, 697.0, 770.0, 852.0, 941.0};
+const double DtmfToneGeneratorTest::kHighFreqHz[16] = {
+    1336.0, 1209.0, 1336.0, 1477.0, 1209.0, 1336.0, 1477.0, 1209.0,
+    1336.0, 1477.0, 1209.0, 1477.0, 1633.0, 1633.0, 1633.0, 1633.0};
+
+TEST_F(DtmfToneGeneratorTest, Test8000Mono) {
+  TestAllTones(8000, 1);
+  TestAmplitudes(8000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test16000Mono) {
+  TestAllTones(16000, 1);
+  TestAmplitudes(16000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test32000Mono) {
+  TestAllTones(32000, 1);
+  TestAmplitudes(32000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test48000Mono) {
+  TestAllTones(48000, 1);
+  TestAmplitudes(48000, 1);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test8000Stereo) {
+  TestAllTones(8000, 2);
+  TestAmplitudes(8000, 2);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test16000Stereo) {
+  TestAllTones(16000, 2);
+  TestAmplitudes(16000, 2);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test32000Stereo) {
+  TestAllTones(32000, 2);
+  TestAmplitudes(32000, 2);
+}
+
+TEST_F(DtmfToneGeneratorTest, Test48000Stereo) {
+  TestAllTones(48000, 2);
+  TestAmplitudes(48000, 2);
+}
+
+TEST(DtmfToneGenerator, TestErrors) {
+  DtmfToneGenerator tone_gen;
+  const int kNumSamples = 10;
+  AudioMultiVector signal(1);  // One channel.
+
+  // Try to generate tones without initializing.
+  EXPECT_EQ(DtmfToneGenerator::kNotInitialized,
+            tone_gen.Generate(kNumSamples, &signal));
+
+  const int fs = 16000;       // Valid sample rate.
+  const int event = 7;        // Valid event.
+  const int attenuation = 0;  // Valid attenuation.
+  // Initialize with invalid event -1.
+  EXPECT_EQ(DtmfToneGenerator::kParameterError,
+            tone_gen.Init(fs, -1, attenuation));
+  // Initialize with invalid event 16.
+  EXPECT_EQ(DtmfToneGenerator::kParameterError,
+            tone_gen.Init(fs, 16, attenuation));
+  // Initialize with invalid attenuation -1.
+  EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Init(fs, event, -1));
+  // Initialize with invalid attenuation 64.
+  EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Init(fs, event, 64));
+  EXPECT_FALSE(tone_gen.initialized());  // Should still be uninitialized.
+
+  // Initialize with valid parameters.
+  ASSERT_EQ(0, tone_gen.Init(fs, event, attenuation));
+  EXPECT_TRUE(tone_gen.initialized());
+  // NULL pointer to destination.
+  EXPECT_EQ(DtmfToneGenerator::kParameterError,
+            tone_gen.Generate(kNumSamples, NULL));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc
new file mode 100644
index 0000000..3fb09ea
--- /dev/null
+++ b/modules/audio_coding/neteq/expand.cc
@@ -0,0 +1,978 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/expand.h"
+
+#include <assert.h>
+#include <string.h>  // memset
+
+#include <algorithm>  // min, max
+#include <limits>  // numeric_limits<T>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+Expand::Expand(BackgroundNoise* background_noise,
+               SyncBuffer* sync_buffer,
+               RandomVector* random_vector,
+               StatisticsCalculator* statistics,
+               int fs,
+               size_t num_channels)
+    : random_vector_(random_vector),
+      sync_buffer_(sync_buffer),
+      first_expand_(true),
+      fs_hz_(fs),
+      num_channels_(num_channels),
+      consecutive_expands_(0),
+      background_noise_(background_noise),
+      statistics_(statistics),
+      overlap_length_(5 * fs / 8000),
+      lag_index_direction_(0),
+      current_lag_index_(0),
+      stop_muting_(false),
+      expand_duration_samples_(0),
+      channel_parameters_(new ChannelParameters[num_channels_]) {
+  assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
+  assert(fs <= static_cast<int>(kMaxSampleRate));  // Should not be possible.
+  assert(num_channels_ > 0);
+  memset(expand_lags_, 0, sizeof(expand_lags_));
+  Reset();
+}
+
+Expand::~Expand() = default;
+
+void Expand::Reset() {
+  first_expand_ = true;
+  consecutive_expands_ = 0;
+  max_lag_ = 0;
+  for (size_t ix = 0; ix < num_channels_; ++ix) {
+    channel_parameters_[ix].expand_vector0.Clear();
+    channel_parameters_[ix].expand_vector1.Clear();
+  }
+}
+
+int Expand::Process(AudioMultiVector* output) {
+  int16_t random_vector[kMaxSampleRate / 8000 * 120 + 30];
+  int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
+  static const int kTempDataSize = 3600;
+  int16_t temp_data[kTempDataSize];  // TODO(hlundin) Remove this.
+  int16_t* voiced_vector_storage = temp_data;
+  int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
+  static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+  int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
+  int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
+  int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
+
+  int fs_mult = fs_hz_ / 8000;
+
+  if (first_expand_) {
+    // Perform initial setup if this is the first expansion since last reset.
+    AnalyzeSignal(random_vector);
+    first_expand_ = false;
+    expand_duration_samples_ = 0;
+  } else {
+    // This is not the first expansion, parameters are already estimated.
+    // Extract a noise segment.
+    size_t rand_length = max_lag_;
+    // This only applies to SWB where length could be larger than 256.
+    assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30);
+    GenerateRandomVector(2, rand_length, random_vector);
+  }
+
+
+  // Generate signal.
+  UpdateLagIndex();
+
+  // Voiced part.
+  // Generate a weighted vector with the current lag.
+  size_t expansion_vector_length = max_lag_ + overlap_length_;
+  size_t current_lag = expand_lags_[current_lag_index_];
+  // Copy lag+overlap data.
+  size_t expansion_vector_position = expansion_vector_length - current_lag -
+      overlap_length_;
+  size_t temp_length = current_lag + overlap_length_;
+  for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
+    ChannelParameters& parameters = channel_parameters_[channel_ix];
+    if (current_lag_index_ == 0) {
+      // Use only expand_vector0.
+      assert(expansion_vector_position + temp_length <=
+             parameters.expand_vector0.Size());
+      parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+                                       voiced_vector_storage);
+    } else if (current_lag_index_ == 1) {
+      std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
+      parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+                                       temp_0.get());
+      std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
+      parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
+                                       temp_1.get());
+      // Mix 3/4 of expand_vector0 with 1/4 of expand_vector1.
+      WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 3, temp_1.get(), 1, 2,
+                                            voiced_vector_storage, temp_length);
+    } else if (current_lag_index_ == 2) {
+      // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
+      assert(expansion_vector_position + temp_length <=
+             parameters.expand_vector0.Size());
+      assert(expansion_vector_position + temp_length <=
+             parameters.expand_vector1.Size());
+
+      std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
+      parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+                                       temp_0.get());
+      std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
+      parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
+                                       temp_1.get());
+      WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 1, temp_1.get(), 1, 1,
+                                            voiced_vector_storage, temp_length);
+    }
+
+    // Get tapering window parameters. Values are in Q15.
+    int16_t muting_window, muting_window_increment;
+    int16_t unmuting_window, unmuting_window_increment;
+    if (fs_hz_ == 8000) {
+      muting_window = DspHelper::kMuteFactorStart8kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement8kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart8kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement8kHz;
+    } else if (fs_hz_ == 16000) {
+      muting_window = DspHelper::kMuteFactorStart16kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement16kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart16kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement16kHz;
+    } else if (fs_hz_ == 32000) {
+      muting_window = DspHelper::kMuteFactorStart32kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement32kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart32kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement32kHz;
+    } else {  // fs_ == 48000
+      muting_window = DspHelper::kMuteFactorStart48kHz;
+      muting_window_increment = DspHelper::kMuteFactorIncrement48kHz;
+      unmuting_window = DspHelper::kUnmuteFactorStart48kHz;
+      unmuting_window_increment = DspHelper::kUnmuteFactorIncrement48kHz;
+    }
+
+    // Smooth the expanded if it has not been muted to a low amplitude and
+    // |current_voice_mix_factor| is larger than 0.5.
+    if ((parameters.mute_factor > 819) &&
+        (parameters.current_voice_mix_factor > 8192)) {
+      size_t start_ix = sync_buffer_->Size() - overlap_length_;
+      for (size_t i = 0; i < overlap_length_; i++) {
+        // Do overlap add between new vector and overlap.
+        (*sync_buffer_)[channel_ix][start_ix + i] =
+            (((*sync_buffer_)[channel_ix][start_ix + i] * muting_window) +
+                (((parameters.mute_factor * voiced_vector_storage[i]) >> 14) *
+                    unmuting_window) + 16384) >> 15;
+        muting_window += muting_window_increment;
+        unmuting_window += unmuting_window_increment;
+      }
+    } else if (parameters.mute_factor == 0) {
+      // The expanded signal will consist of only comfort noise if
+      // mute_factor = 0. Set the output length to 15 ms for best noise
+      // production.
+      // TODO(hlundin): This has been disabled since the length of
+      // parameters.expand_vector0 and parameters.expand_vector1 no longer
+      // match with expand_lags_, causing invalid reads and writes. Is it a good
+      // idea to enable this again, and solve the vector size problem?
+//      max_lag_ = fs_mult * 120;
+//      expand_lags_[0] = fs_mult * 120;
+//      expand_lags_[1] = fs_mult * 120;
+//      expand_lags_[2] = fs_mult * 120;
+    }
+
+    // Unvoiced part.
+    // Filter |scaled_random_vector| through |ar_filter_|.
+    memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state,
+           sizeof(int16_t) * kUnvoicedLpcOrder);
+    int32_t add_constant = 0;
+    if (parameters.ar_gain_scale > 0) {
+      add_constant = 1 << (parameters.ar_gain_scale - 1);
+    }
+    WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
+                                    parameters.ar_gain, add_constant,
+                                    parameters.ar_gain_scale,
+                                    current_lag);
+    WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
+                              parameters.ar_filter, kUnvoicedLpcOrder + 1,
+                              current_lag);
+    memcpy(parameters.ar_filter_state,
+           &(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
+           sizeof(int16_t) * kUnvoicedLpcOrder);
+
+    // Combine voiced and unvoiced contributions.
+
+    // Set a suitable cross-fading slope.
+    // For lag =
+    //   <= 31 * fs_mult            => go from 1 to 0 in about 8 ms;
+    //  (>= 31 .. <= 63) * fs_mult  => go from 1 to 0 in about 16 ms;
+    //   >= 64 * fs_mult            => go from 1 to 0 in about 32 ms.
+    // temp_shift = getbits(max_lag_) - 5.
+    int temp_shift =
+        (31 - WebRtcSpl_NormW32(rtc::dchecked_cast<int32_t>(max_lag_))) - 5;
+    int16_t mix_factor_increment = 256 >> temp_shift;
+    if (stop_muting_) {
+      mix_factor_increment = 0;
+    }
+
+    // Create combined signal by shifting in more and more of unvoiced part.
+    temp_shift = 8 - temp_shift;  // = getbits(mix_factor_increment).
+    size_t temp_length = (parameters.current_voice_mix_factor -
+        parameters.voice_mix_factor) >> temp_shift;
+    temp_length = std::min(temp_length, current_lag);
+    DspHelper::CrossFade(voiced_vector, unvoiced_vector, temp_length,
+                         &parameters.current_voice_mix_factor,
+                         mix_factor_increment, temp_data);
+
+    // End of cross-fading period was reached before end of expanded signal
+    // path. Mix the rest with a fixed mixing factor.
+    if (temp_length < current_lag) {
+      if (mix_factor_increment != 0) {
+        parameters.current_voice_mix_factor = parameters.voice_mix_factor;
+      }
+      int16_t temp_scale = 16384 - parameters.current_voice_mix_factor;
+      WebRtcSpl_ScaleAndAddVectorsWithRound(
+          voiced_vector + temp_length, parameters.current_voice_mix_factor,
+          unvoiced_vector + temp_length, temp_scale, 14,
+          temp_data + temp_length, current_lag - temp_length);
+    }
+
+    // Select muting slope depending on how many consecutive expands we have
+    // done.
+    if (consecutive_expands_ == 3) {
+      // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms.
+      // mute_slope = 0.0010 / fs_mult in Q20.
+      parameters.mute_slope = std::max(parameters.mute_slope, 1049 / fs_mult);
+    }
+    if (consecutive_expands_ == 7) {
+      // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms.
+      // mute_slope = 0.0020 / fs_mult in Q20.
+      parameters.mute_slope = std::max(parameters.mute_slope, 2097 / fs_mult);
+    }
+
+    // Mute segment according to slope value.
+    if ((consecutive_expands_ != 0) || !parameters.onset) {
+      // Mute to the previous level, then continue with the muting.
+      WebRtcSpl_AffineTransformVector(temp_data, temp_data,
+                                      parameters.mute_factor, 8192,
+                                      14, current_lag);
+
+      if (!stop_muting_) {
+        DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
+
+        // Shift by 6 to go from Q20 to Q14.
+        // TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong.
+        // Legacy.
+        int16_t gain = static_cast<int16_t>(16384 -
+            (((current_lag * parameters.mute_slope) + 8192) >> 6));
+        gain = ((gain * parameters.mute_factor) + 8192) >> 14;
+
+        // Guard against getting stuck with very small (but sometimes audible)
+        // gain.
+        if ((consecutive_expands_ > 3) && (gain >= parameters.mute_factor)) {
+          parameters.mute_factor = 0;
+        } else {
+          parameters.mute_factor = gain;
+        }
+      }
+    }
+
+    // Background noise part.
+    GenerateBackgroundNoise(random_vector,
+                            channel_ix,
+                            channel_parameters_[channel_ix].mute_slope,
+                            TooManyExpands(),
+                            current_lag,
+                            unvoiced_array_memory);
+
+    // Add background noise to the combined voiced-unvoiced signal.
+    for (size_t i = 0; i < current_lag; i++) {
+      temp_data[i] = temp_data[i] + noise_vector[i];
+    }
+    if (channel_ix == 0) {
+      output->AssertSize(current_lag);
+    } else {
+      assert(output->Size() == current_lag);
+    }
+    (*output)[channel_ix].OverwriteAt(temp_data, current_lag, 0);
+  }
+
+  // Increase call number and cap it.
+  consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands ?
+      kMaxConsecutiveExpands : consecutive_expands_ + 1;
+  expand_duration_samples_ += output->Size();
+  // Clamp the duration counter at 2 seconds.
+  expand_duration_samples_ = std::min(expand_duration_samples_,
+                                      rtc::dchecked_cast<size_t>(fs_hz_ * 2));
+  return 0;
+}
+
+void Expand::SetParametersForNormalAfterExpand() {
+  current_lag_index_ = 0;
+  lag_index_direction_ = 0;
+  stop_muting_ = true;  // Do not mute signal any more.
+  statistics_->LogDelayedPacketOutageEvent(
+      rtc::dchecked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
+}
+
+void Expand::SetParametersForMergeAfterExpand() {
+  current_lag_index_ = -1; /* out of the 3 possible ones */
+  lag_index_direction_ = 1; /* make sure we get the "optimal" lag */
+  stop_muting_ = true;
+}
+
+bool Expand::Muted() const {
+  if (first_expand_ || stop_muting_)
+    return false;
+  RTC_DCHECK(channel_parameters_);
+  for (size_t ch = 0; ch < num_channels_; ++ch) {
+    if (channel_parameters_[ch].mute_factor != 0)
+      return false;
+  }
+  return true;
+}
+
+size_t Expand::overlap_length() const {
+  return overlap_length_;
+}
+
+void Expand::InitializeForAnExpandPeriod() {
+  lag_index_direction_ = 1;
+  current_lag_index_ = -1;
+  stop_muting_ = false;
+  random_vector_->set_seed_increment(1);
+  consecutive_expands_ = 0;
+  for (size_t ix = 0; ix < num_channels_; ++ix) {
+    channel_parameters_[ix].current_voice_mix_factor = 16384;  // 1.0 in Q14.
+    channel_parameters_[ix].mute_factor = 16384;  // 1.0 in Q14.
+    // Start with 0 gain for background noise.
+    background_noise_->SetMuteFactor(ix, 0);
+  }
+}
+
+bool Expand::TooManyExpands() {
+  return consecutive_expands_ >= kMaxConsecutiveExpands;
+}
+
+void Expand::AnalyzeSignal(int16_t* random_vector) {
+  int32_t auto_correlation[kUnvoicedLpcOrder + 1];
+  int16_t reflection_coeff[kUnvoicedLpcOrder];
+  int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
+  size_t best_correlation_index[kNumCorrelationCandidates];
+  int16_t best_correlation[kNumCorrelationCandidates];
+  size_t best_distortion_index[kNumCorrelationCandidates];
+  int16_t best_distortion[kNumCorrelationCandidates];
+  int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
+  int32_t best_distortion_w32[kNumCorrelationCandidates];
+  static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+  int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
+  int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
+
+  int fs_mult = fs_hz_ / 8000;
+
+  // Pre-calculate common multiplications with fs_mult.
+  size_t fs_mult_4 = static_cast<size_t>(fs_mult * 4);
+  size_t fs_mult_20 = static_cast<size_t>(fs_mult * 20);
+  size_t fs_mult_120 = static_cast<size_t>(fs_mult * 120);
+  size_t fs_mult_dist_len = fs_mult * kDistortionLength;
+  size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
+
+  const size_t signal_length = static_cast<size_t>(256 * fs_mult);
+
+  const size_t audio_history_position = sync_buffer_->Size() - signal_length;
+  std::unique_ptr<int16_t[]> audio_history(new int16_t[signal_length]);
+  (*sync_buffer_)[0].CopyTo(signal_length, audio_history_position,
+                            audio_history.get());
+
+  // Initialize.
+  InitializeForAnExpandPeriod();
+
+  // Calculate correlation in downsampled domain (4 kHz sample rate).
+  size_t correlation_length = 51;  // TODO(hlundin): Legacy bit-exactness.
+  // If it is decided to break bit-exactness |correlation_length| should be
+  // initialized to the return value of Correlation().
+  Correlation(audio_history.get(), signal_length, correlation_vector);
+
+  // Find peaks in correlation vector.
+  DspHelper::PeakDetection(correlation_vector, correlation_length,
+                           kNumCorrelationCandidates, fs_mult,
+                           best_correlation_index, best_correlation);
+
+  // Adjust peak locations; cross-correlation lags start at 2.5 ms
+  // (20 * fs_mult samples).
+  best_correlation_index[0] += fs_mult_20;
+  best_correlation_index[1] += fs_mult_20;
+  best_correlation_index[2] += fs_mult_20;
+
+  // Calculate distortion around the |kNumCorrelationCandidates| best lags.
+  int distortion_scale = 0;
+  for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
+    size_t min_index = std::max(fs_mult_20,
+                                best_correlation_index[i] - fs_mult_4);
+    size_t max_index = std::min(fs_mult_120 - 1,
+                                best_correlation_index[i] + fs_mult_4);
+    best_distortion_index[i] = DspHelper::MinDistortion(
+        &(audio_history[signal_length - fs_mult_dist_len]), min_index,
+        max_index, fs_mult_dist_len, &best_distortion_w32[i]);
+    distortion_scale = std::max(16 - WebRtcSpl_NormW32(best_distortion_w32[i]),
+                                distortion_scale);
+  }
+  // Shift the distortion values to fit in 16 bits.
+  WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates,
+                                   best_distortion_w32, distortion_scale);
+
+  // Find the maximizing index |i| of the cost function
+  // f[i] = best_correlation[i] / best_distortion[i].
+  int32_t best_ratio = std::numeric_limits<int32_t>::min();
+  size_t best_index = std::numeric_limits<size_t>::max();
+  for (size_t i = 0; i < kNumCorrelationCandidates; ++i) {
+    int32_t ratio;
+    if (best_distortion[i] > 0) {
+      ratio = (best_correlation[i] * (1 << 16)) / best_distortion[i];
+    } else if (best_correlation[i] == 0) {
+      ratio = 0;  // No correlation set result to zero.
+    } else {
+      ratio = std::numeric_limits<int32_t>::max();  // Denominator is zero.
+    }
+    if (ratio > best_ratio) {
+      best_index = i;
+      best_ratio = ratio;
+    }
+  }
+
+  size_t distortion_lag = best_distortion_index[best_index];
+  size_t correlation_lag = best_correlation_index[best_index];
+  max_lag_ = std::max(distortion_lag, correlation_lag);
+
+  // Calculate the exact best correlation in the range between
+  // |correlation_lag| and |distortion_lag|.
+  correlation_length =
+      std::max(std::min(distortion_lag + 10, fs_mult_120),
+               static_cast<size_t>(60 * fs_mult));
+
+  size_t start_index = std::min(distortion_lag, correlation_lag);
+  size_t correlation_lags = static_cast<size_t>(
+      WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1);
+  assert(correlation_lags <= static_cast<size_t>(99 * fs_mult + 1));
+
+  for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
+    ChannelParameters& parameters = channel_parameters_[channel_ix];
+    // Calculate suitable scaling.
+    int16_t signal_max = WebRtcSpl_MaxAbsValueW16(
+        &audio_history[signal_length - correlation_length - start_index
+                       - correlation_lags],
+                       correlation_length + start_index + correlation_lags - 1);
+    int correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
+        (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
+    correlation_scale = std::max(0, correlation_scale);
+
+    // Calculate the correlation, store in |correlation_vector2|.
+    WebRtcSpl_CrossCorrelation(
+        correlation_vector2,
+        &(audio_history[signal_length - correlation_length]),
+        &(audio_history[signal_length - correlation_length - start_index]),
+        correlation_length, correlation_lags, correlation_scale, -1);
+
+    // Find maximizing index.
+    best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags);
+    int32_t max_correlation = correlation_vector2[best_index];
+    // Compensate index with start offset.
+    best_index = best_index + start_index;
+
+    // Calculate energies.
+    int32_t energy1 = WebRtcSpl_DotProductWithScale(
+        &(audio_history[signal_length - correlation_length]),
+        &(audio_history[signal_length - correlation_length]),
+        correlation_length, correlation_scale);
+    int32_t energy2 = WebRtcSpl_DotProductWithScale(
+        &(audio_history[signal_length - correlation_length - best_index]),
+        &(audio_history[signal_length - correlation_length - best_index]),
+        correlation_length, correlation_scale);
+
+    // Calculate the correlation coefficient between the two portions of the
+    // signal.
+    int32_t corr_coefficient;
+    if ((energy1 > 0) && (energy2 > 0)) {
+      int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0);
+      int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
+      // Make sure total scaling is even (to simplify scale factor after sqrt).
+      if ((energy1_scale + energy2_scale) & 1) {
+        // If sum is odd, add 1 to make it even.
+        energy1_scale += 1;
+      }
+      int32_t scaled_energy1 = energy1 >> energy1_scale;
+      int32_t scaled_energy2 = energy2 >> energy2_scale;
+      int16_t sqrt_energy_product = static_cast<int16_t>(
+          WebRtcSpl_SqrtFloor(scaled_energy1 * scaled_energy2));
+      // Calculate max_correlation / sqrt(energy1 * energy2) in Q14.
+      int cc_shift = 14 - (energy1_scale + energy2_scale) / 2;
+      max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift);
+      corr_coefficient = WebRtcSpl_DivW32W16(max_correlation,
+                                             sqrt_energy_product);
+      // Cap at 1.0 in Q14.
+      corr_coefficient = std::min(16384, corr_coefficient);
+    } else {
+      corr_coefficient = 0;
+    }
+
+    // Extract the two vectors expand_vector0 and expand_vector1 from
+    // |audio_history|.
+    size_t expansion_length = max_lag_ + overlap_length_;
+    const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
+    const int16_t* vector2 = vector1 - distortion_lag;
+    // Normalize the second vector to the same energy as the first.
+    energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length,
+                                            correlation_scale);
+    energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length,
+                                            correlation_scale);
+    // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0,
+    // i.e., energy1 / energy2 is within 0.25 - 4.
+    int16_t amplitude_ratio;
+    if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) {
+      // Energy constraint fulfilled. Use both vectors and scale them
+      // accordingly.
+      int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
+      int32_t scaled_energy1 = scaled_energy2 - 13;
+      // Calculate scaled_energy1 / scaled_energy2 in Q13.
+      int32_t energy_ratio = WebRtcSpl_DivW32W16(
+          WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
+          static_cast<int16_t>(energy2 >> scaled_energy2));
+      // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
+      amplitude_ratio =
+          static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
+      // Copy the two vectors and give them the same energy.
+      parameters.expand_vector0.Clear();
+      parameters.expand_vector0.PushBack(vector1, expansion_length);
+      parameters.expand_vector1.Clear();
+      if (parameters.expand_vector1.Size() < expansion_length) {
+        parameters.expand_vector1.Extend(
+            expansion_length - parameters.expand_vector1.Size());
+      }
+      std::unique_ptr<int16_t[]> temp_1(new int16_t[expansion_length]);
+      WebRtcSpl_AffineTransformVector(temp_1.get(),
+                                      const_cast<int16_t*>(vector2),
+                                      amplitude_ratio,
+                                      4096,
+                                      13,
+                                      expansion_length);
+      parameters.expand_vector1.OverwriteAt(temp_1.get(), expansion_length, 0);
+    } else {
+      // Energy change constraint not fulfilled. Only use last vector.
+      parameters.expand_vector0.Clear();
+      parameters.expand_vector0.PushBack(vector1, expansion_length);
+      // Copy from expand_vector0 to expand_vector1.
+      parameters.expand_vector0.CopyTo(&parameters.expand_vector1);
+      // Set the energy_ratio since it is used by muting slope.
+      if ((energy1 / 4 < energy2) || (energy2 == 0)) {
+        amplitude_ratio = 4096;  // 0.5 in Q13.
+      } else {
+        amplitude_ratio = 16384;  // 2.0 in Q13.
+      }
+    }
+
+    // Set the 3 lag values.
+    if (distortion_lag == correlation_lag) {
+      expand_lags_[0] = distortion_lag;
+      expand_lags_[1] = distortion_lag;
+      expand_lags_[2] = distortion_lag;
+    } else {
+      // |distortion_lag| and |correlation_lag| are not equal; use different
+      // combinations of the two.
+      // First lag is |distortion_lag| only.
+      expand_lags_[0] = distortion_lag;
+      // Second lag is the average of the two.
+      expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
+      // Third lag is the average again, but rounding towards |correlation_lag|.
+      if (distortion_lag > correlation_lag) {
+        expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
+      } else {
+        expand_lags_[2] = (distortion_lag + correlation_lag + 1) / 2;
+      }
+    }
+
+    // Calculate the LPC and the gain of the filters.
+
+    // Calculate kUnvoicedLpcOrder + 1 lags of the auto-correlation function.
+    size_t temp_index = signal_length - fs_mult_lpc_analysis_len -
+        kUnvoicedLpcOrder;
+    // Copy signal to temporary vector to be able to pad with leading zeros.
+    int16_t* temp_signal = new int16_t[fs_mult_lpc_analysis_len
+                                       + kUnvoicedLpcOrder];
+    memset(temp_signal, 0,
+           sizeof(int16_t) * (fs_mult_lpc_analysis_len + kUnvoicedLpcOrder));
+    memcpy(&temp_signal[kUnvoicedLpcOrder],
+           &audio_history[temp_index + kUnvoicedLpcOrder],
+           sizeof(int16_t) * fs_mult_lpc_analysis_len);
+    CrossCorrelationWithAutoShift(
+        &temp_signal[kUnvoicedLpcOrder], &temp_signal[kUnvoicedLpcOrder],
+        fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1, -1, auto_correlation);
+    delete [] temp_signal;
+
+    // Verify that variance is positive.
+    if (auto_correlation[0] > 0) {
+      // Estimate AR filter parameters using Levinson-Durbin algorithm;
+      // kUnvoicedLpcOrder + 1 filter coefficients.
+      int16_t stability = WebRtcSpl_LevinsonDurbin(auto_correlation,
+                                                   parameters.ar_filter,
+                                                   reflection_coeff,
+                                                   kUnvoicedLpcOrder);
+
+      // Keep filter parameters only if filter is stable.
+      if (stability != 1) {
+        // Set first coefficient to 4096 (1.0 in Q12).
+        parameters.ar_filter[0] = 4096;
+        // Set remaining |kUnvoicedLpcOrder| coefficients to zero.
+        WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder);
+      }
+    }
+
+    if (channel_ix == 0) {
+      // Extract a noise segment.
+      size_t noise_length;
+      if (distortion_lag < 40) {
+        noise_length = 2 * distortion_lag + 30;
+      } else {
+        noise_length = distortion_lag + 30;
+      }
+      if (noise_length <= RandomVector::kRandomTableSize) {
+        memcpy(random_vector, RandomVector::kRandomTable,
+               sizeof(int16_t) * noise_length);
+      } else {
+        // Only applies to SWB where length could be larger than
+        // |kRandomTableSize|.
+        memcpy(random_vector, RandomVector::kRandomTable,
+               sizeof(int16_t) * RandomVector::kRandomTableSize);
+        assert(noise_length <= kMaxSampleRate / 8000 * 120 + 30);
+        random_vector_->IncreaseSeedIncrement(2);
+        random_vector_->Generate(
+            noise_length - RandomVector::kRandomTableSize,
+            &random_vector[RandomVector::kRandomTableSize]);
+      }
+    }
+
+    // Set up state vector and calculate scale factor for unvoiced filtering.
+    memcpy(parameters.ar_filter_state,
+           &(audio_history[signal_length - kUnvoicedLpcOrder]),
+           sizeof(int16_t) * kUnvoicedLpcOrder);
+    memcpy(unvoiced_vector - kUnvoicedLpcOrder,
+           &(audio_history[signal_length - 128 - kUnvoicedLpcOrder]),
+           sizeof(int16_t) * kUnvoicedLpcOrder);
+    WebRtcSpl_FilterMAFastQ12(&audio_history[signal_length - 128],
+                              unvoiced_vector,
+                              parameters.ar_filter,
+                              kUnvoicedLpcOrder + 1,
+                              128);
+    const int unvoiced_max_abs = [&] {
+      const int16_t max_abs = WebRtcSpl_MaxAbsValueW16(unvoiced_vector, 128);
+      // Since WebRtcSpl_MaxAbsValueW16 returns 2^15 - 1 when the input contains
+      // -2^15, we have to conservatively bump the return value by 1
+      // if it is 2^15 - 1.
+      return max_abs == WEBRTC_SPL_WORD16_MAX ? max_abs + 1 : max_abs;
+    }();
+    // Pick the smallest n such that 2^n > unvoiced_max_abs; then the maximum
+    // value of the dot product is less than 2^7 * 2^(2*n) = 2^(2*n + 7), so to
+    // prevent overflows we want 2n + 7 <= 31, which means we should shift by
+    // 2n + 7 - 31 bits, if this value is greater than zero.
+    int unvoiced_prescale =
+        std::max(0, 2 * WebRtcSpl_GetSizeInBits(unvoiced_max_abs) - 24);
+
+    int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(unvoiced_vector,
+                                                            unvoiced_vector,
+                                                            128,
+                                                            unvoiced_prescale);
+
+    // Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy.
+    int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
+    // Make sure we do an odd number of shifts since we already have 7 shifts
+    // from dividing with 128 earlier. This will make the total scale factor
+    // even, which is suitable for the sqrt.
+    unvoiced_scale += ((unvoiced_scale & 0x1) ^ 0x1);
+    unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
+    int16_t unvoiced_gain =
+        static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
+    parameters.ar_gain_scale = 13
+        + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
+    parameters.ar_gain = unvoiced_gain;
+
+    // Calculate voice_mix_factor from corr_coefficient.
+    // Let x = corr_coefficient. Then, we compute:
+    // if (x > 0.48)
+    //   voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096;
+    // else
+    //   voice_mix_factor = 0;
+    if (corr_coefficient > 7875) {
+      int16_t x1, x2, x3;
+      // |corr_coefficient| is in Q14.
+      x1 = static_cast<int16_t>(corr_coefficient);
+      x2 = (x1 * x1) >> 14;   // Shift 14 to keep result in Q14.
+      x3 = (x1 * x2) >> 14;
+      static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 };
+      int32_t temp_sum = kCoefficients[0] * 16384;
+      temp_sum += kCoefficients[1] * x1;
+      temp_sum += kCoefficients[2] * x2;
+      temp_sum += kCoefficients[3] * x3;
+      parameters.voice_mix_factor =
+          static_cast<int16_t>(std::min(temp_sum / 4096, 16384));
+      parameters.voice_mix_factor = std::max(parameters.voice_mix_factor,
+                                             static_cast<int16_t>(0));
+    } else {
+      parameters.voice_mix_factor = 0;
+    }
+
+    // Calculate muting slope. Reuse value from earlier scaling of
+    // |expand_vector0| and |expand_vector1|.
+    int16_t slope = amplitude_ratio;
+    if (slope > 12288) {
+      // slope > 1.5.
+      // Calculate (1 - (1 / slope)) / distortion_lag =
+      // (slope - 1) / (distortion_lag * slope).
+      // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
+      // the division.
+      // Shift the denominator from Q13 to Q5 before the division. The result of
+      // the division will then be in Q20.
+      int temp_ratio = WebRtcSpl_DivW32W16(
+          (slope - 8192) << 12,
+          static_cast<int16_t>((distortion_lag * slope) >> 8));
+      if (slope > 14746) {
+        // slope > 1.8.
+        // Divide by 2, with proper rounding.
+        parameters.mute_slope = (temp_ratio + 1) / 2;
+      } else {
+        // Divide by 8, with proper rounding.
+        parameters.mute_slope = (temp_ratio + 4) / 8;
+      }
+      parameters.onset = true;
+    } else {
+      // Calculate (1 - slope) / distortion_lag.
+      // Shift |slope| by 7 to Q20 before the division. The result is in Q20.
+      parameters.mute_slope = WebRtcSpl_DivW32W16(
+          (8192 - slope) * 128, static_cast<int16_t>(distortion_lag));
+      if (parameters.voice_mix_factor <= 13107) {
+        // Make sure the mute factor decreases from 1.0 to 0.9 in no more than
+        // 6.25 ms.
+        // mute_slope >= 0.005 / fs_mult in Q20.
+        parameters.mute_slope = std::max(5243 / fs_mult, parameters.mute_slope);
+      } else if (slope > 8028) {
+        parameters.mute_slope = 0;
+      }
+      parameters.onset = false;
+    }
+  }
+}
+
+Expand::ChannelParameters::ChannelParameters()
+    : mute_factor(16384),
+      ar_gain(0),
+      ar_gain_scale(0),
+      voice_mix_factor(0),
+      current_voice_mix_factor(0),
+      onset(false),
+      mute_slope(0) {
+  memset(ar_filter, 0, sizeof(ar_filter));
+  memset(ar_filter_state, 0, sizeof(ar_filter_state));
+}
+
+void Expand::Correlation(const int16_t* input,
+                         size_t input_length,
+                         int16_t* output) const {
+  // Set parameters depending on sample rate.
+  const int16_t* filter_coefficients;
+  size_t num_coefficients;
+  int16_t downsampling_factor;
+  if (fs_hz_ == 8000) {
+    num_coefficients = 3;
+    downsampling_factor = 2;
+    filter_coefficients = DspHelper::kDownsample8kHzTbl;
+  } else if (fs_hz_ == 16000) {
+    num_coefficients = 5;
+    downsampling_factor = 4;
+    filter_coefficients = DspHelper::kDownsample16kHzTbl;
+  } else if (fs_hz_ == 32000) {
+    num_coefficients = 7;
+    downsampling_factor = 8;
+    filter_coefficients = DspHelper::kDownsample32kHzTbl;
+  } else {  // fs_hz_ == 48000.
+    num_coefficients = 7;
+    downsampling_factor = 12;
+    filter_coefficients = DspHelper::kDownsample48kHzTbl;
+  }
+
+  // Correlate from lag 10 to lag 60 in downsampled domain.
+  // (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
+  static const size_t kCorrelationStartLag = 10;
+  static const size_t kNumCorrelationLags = 54;
+  static const size_t kCorrelationLength = 60;
+  // Downsample to 4 kHz sample rate.
+  static const size_t kDownsampledLength = kCorrelationStartLag
+      + kNumCorrelationLags + kCorrelationLength;
+  int16_t downsampled_input[kDownsampledLength];
+  static const size_t kFilterDelay = 0;
+  WebRtcSpl_DownsampleFast(
+      input + input_length - kDownsampledLength * downsampling_factor,
+      kDownsampledLength * downsampling_factor, downsampled_input,
+      kDownsampledLength, filter_coefficients, num_coefficients,
+      downsampling_factor, kFilterDelay);
+
+  // Normalize |downsampled_input| to using all 16 bits.
+  int16_t max_value = WebRtcSpl_MaxAbsValueW16(downsampled_input,
+                                               kDownsampledLength);
+  int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);
+  WebRtcSpl_VectorBitShiftW16(downsampled_input, kDownsampledLength,
+                              downsampled_input, norm_shift);
+
+  int32_t correlation[kNumCorrelationLags];
+  CrossCorrelationWithAutoShift(
+      &downsampled_input[kDownsampledLength - kCorrelationLength],
+      &downsampled_input[kDownsampledLength - kCorrelationLength
+          - kCorrelationStartLag],
+      kCorrelationLength, kNumCorrelationLags, -1, correlation);
+
+  // Normalize and move data from 32-bit to 16-bit vector.
+  int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
+                                                     kNumCorrelationLags);
+  int16_t norm_shift2 = static_cast<int16_t>(
+      std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
+  WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
+                                   norm_shift2);
+}
+
+void Expand::UpdateLagIndex() {
+  current_lag_index_ = current_lag_index_ + lag_index_direction_;
+  // Change direction if needed.
+  if (current_lag_index_ <= 0) {
+    lag_index_direction_ = 1;
+  }
+  if (current_lag_index_ >= kNumLags - 1) {
+    lag_index_direction_ = -1;
+  }
+}
+
+Expand* ExpandFactory::Create(BackgroundNoise* background_noise,
+                              SyncBuffer* sync_buffer,
+                              RandomVector* random_vector,
+                              StatisticsCalculator* statistics,
+                              int fs,
+                              size_t num_channels) const {
+  return new Expand(background_noise, sync_buffer, random_vector, statistics,
+                    fs, num_channels);
+}
+
+// TODO(turajs): This can be moved to BackgroundNoise class.
+void Expand::GenerateBackgroundNoise(int16_t* random_vector,
+                                     size_t channel,
+                                     int mute_slope,
+                                     bool too_many_expands,
+                                     size_t num_noise_samples,
+                                     int16_t* buffer) {
+  static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+  int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
+  assert(num_noise_samples <= (kMaxSampleRate / 8000 * 125));
+  int16_t* noise_samples = &buffer[kNoiseLpcOrder];
+  if (background_noise_->initialized()) {
+    // Use background noise parameters.
+    memcpy(noise_samples - kNoiseLpcOrder,
+           background_noise_->FilterState(channel),
+           sizeof(int16_t) * kNoiseLpcOrder);
+
+    int dc_offset = 0;
+    if (background_noise_->ScaleShift(channel) > 1) {
+      dc_offset = 1 << (background_noise_->ScaleShift(channel) - 1);
+    }
+
+    // Scale random vector to correct energy level.
+    WebRtcSpl_AffineTransformVector(
+        scaled_random_vector, random_vector,
+        background_noise_->Scale(channel), dc_offset,
+        background_noise_->ScaleShift(channel),
+        num_noise_samples);
+
+    WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
+                              background_noise_->Filter(channel),
+                              kNoiseLpcOrder + 1,
+                              num_noise_samples);
+
+    background_noise_->SetFilterState(
+        channel,
+        &(noise_samples[num_noise_samples - kNoiseLpcOrder]),
+        kNoiseLpcOrder);
+
+    // Unmute the background noise.
+    int16_t bgn_mute_factor = background_noise_->MuteFactor(channel);
+    NetEq::BackgroundNoiseMode bgn_mode = background_noise_->mode();
+    if (bgn_mode == NetEq::kBgnFade && too_many_expands &&
+        bgn_mute_factor > 0) {
+      // Fade BGN to zero.
+      // Calculate muting slope, approximately -2^18 / fs_hz.
+      int mute_slope;
+      if (fs_hz_ == 8000) {
+        mute_slope = -32;
+      } else if (fs_hz_ == 16000) {
+        mute_slope = -16;
+      } else if (fs_hz_ == 32000) {
+        mute_slope = -8;
+      } else {
+        mute_slope = -5;
+      }
+      // Use UnmuteSignal function with negative slope.
+      // |bgn_mute_factor| is in Q14. |mute_slope| is in Q20.
+      DspHelper::UnmuteSignal(noise_samples,
+                              num_noise_samples,
+                              &bgn_mute_factor,
+                              mute_slope,
+                              noise_samples);
+    } else if (bgn_mute_factor < 16384) {
+      // If mode is kBgnOn, or if kBgnFade has started fading,
+      // use regular |mute_slope|.
+      if (!stop_muting_ && bgn_mode != NetEq::kBgnOff &&
+          !(bgn_mode == NetEq::kBgnFade && too_many_expands)) {
+        DspHelper::UnmuteSignal(noise_samples,
+                                static_cast<int>(num_noise_samples),
+                                &bgn_mute_factor,
+                                mute_slope,
+                                noise_samples);
+      } else {
+        // kBgnOn and stop muting, or
+        // kBgnOff (mute factor is always 0), or
+        // kBgnFade has reached 0.
+        WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
+                                        bgn_mute_factor, 8192, 14,
+                                        num_noise_samples);
+      }
+    }
+    // Update mute_factor in BackgroundNoise class.
+    background_noise_->SetMuteFactor(channel, bgn_mute_factor);
+  } else {
+    // BGN parameters have not been initialized; use zero noise.
+    memset(noise_samples, 0, sizeof(int16_t) * num_noise_samples);
+  }
+}
+
+void Expand::GenerateRandomVector(int16_t seed_increment,
+                                  size_t length,
+                                  int16_t* random_vector) {
+  // TODO(turajs): According to hlundin The loop should not be needed. Should be
+  // just as good to generate all of the vector in one call.
+  size_t samples_generated = 0;
+  const size_t kMaxRandSamples = RandomVector::kRandomTableSize;
+  while (samples_generated < length) {
+    size_t rand_length = std::min(length - samples_generated, kMaxRandSamples);
+    random_vector_->IncreaseSeedIncrement(seed_increment);
+    random_vector_->Generate(rand_length, &random_vector[samples_generated]);
+    samples_generated += rand_length;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/expand.h b/modules/audio_coding/neteq/expand.h
new file mode 100644
index 0000000..39249f1
--- /dev/null
+++ b/modules/audio_coding/neteq/expand.h
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_EXPAND_H_
+#define MODULES_AUDIO_CODING_NETEQ_EXPAND_H_
+
+#include <assert.h>
+#include <memory>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class BackgroundNoise;
+class RandomVector;
+class StatisticsCalculator;
+class SyncBuffer;
+
+// This class handles extrapolation of audio data from the sync_buffer to
+// produce packet-loss concealment.
+// TODO(hlundin): Refactor this class to divide the long methods into shorter
+// ones.
+class Expand {
+ public:
+  Expand(BackgroundNoise* background_noise,
+         SyncBuffer* sync_buffer,
+         RandomVector* random_vector,
+         StatisticsCalculator* statistics,
+         int fs,
+         size_t num_channels);
+
+  virtual ~Expand();
+
+  // Resets the object.
+  virtual void Reset();
+
+  // The main method to produce concealment data. The data is appended to the
+  // end of |output|.
+  virtual int Process(AudioMultiVector* output);
+
+  // Prepare the object to do extra expansion during normal operation following
+  // a period of expands.
+  virtual void SetParametersForNormalAfterExpand();
+
+  // Prepare the object to do extra expansion during merge operation following
+  // a period of expands.
+  virtual void SetParametersForMergeAfterExpand();
+
+  // Returns the mute factor for |channel|.
+  int16_t MuteFactor(size_t channel) {
+    assert(channel < num_channels_);
+    return channel_parameters_[channel].mute_factor;
+  }
+
+  // Returns true if expansion has been faded down to zero amplitude (for all
+  // channels); false otherwise.
+  bool Muted() const;
+
+  // Accessors and mutators.
+  virtual size_t overlap_length() const;
+  size_t max_lag() const { return max_lag_; }
+
+ protected:
+  static const int kMaxConsecutiveExpands = 200;
+  void GenerateRandomVector(int16_t seed_increment,
+                            size_t length,
+                            int16_t* random_vector);
+
+  void GenerateBackgroundNoise(int16_t* random_vector,
+                               size_t channel,
+                               int mute_slope,
+                               bool too_many_expands,
+                               size_t num_noise_samples,
+                               int16_t* buffer);
+
+  // Initializes member variables at the beginning of an expand period.
+  void InitializeForAnExpandPeriod();
+
+  bool TooManyExpands();
+
+  // Analyzes the signal history in |sync_buffer_|, and set up all parameters
+  // necessary to produce concealment data.
+  void AnalyzeSignal(int16_t* random_vector);
+
+  RandomVector* const random_vector_;
+  SyncBuffer* const sync_buffer_;
+  bool first_expand_;
+  const int fs_hz_;
+  const size_t num_channels_;
+  int consecutive_expands_;
+
+ private:
+  static const size_t kUnvoicedLpcOrder = 6;
+  static const size_t kNumCorrelationCandidates = 3;
+  static const size_t kDistortionLength = 20;
+  static const size_t kLpcAnalysisLength = 160;
+  static const size_t kMaxSampleRate = 48000;
+  static const int kNumLags = 3;
+
+  struct ChannelParameters {
+    ChannelParameters();
+    int16_t mute_factor;
+    int16_t ar_filter[kUnvoicedLpcOrder + 1];
+    int16_t ar_filter_state[kUnvoicedLpcOrder];
+    int16_t ar_gain;
+    int16_t ar_gain_scale;
+    int16_t voice_mix_factor; /* Q14 */
+    int16_t current_voice_mix_factor; /* Q14 */
+    AudioVector expand_vector0;
+    AudioVector expand_vector1;
+    bool onset;
+    int mute_slope; /* Q20 */
+  };
+
+  // Calculate the auto-correlation of |input|, with length |input_length|
+  // samples. The correlation is calculated from a downsampled version of
+  // |input|, and is written to |output|.
+  void Correlation(const int16_t* input,
+                   size_t input_length,
+                   int16_t* output) const;
+
+  void UpdateLagIndex();
+
+  BackgroundNoise* const background_noise_;
+  StatisticsCalculator* const statistics_;
+  const size_t overlap_length_;
+  size_t max_lag_;
+  size_t expand_lags_[kNumLags];
+  int lag_index_direction_;
+  int current_lag_index_;
+  bool stop_muting_;
+  size_t expand_duration_samples_;
+  std::unique_ptr<ChannelParameters[]> channel_parameters_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Expand);
+};
+
+struct ExpandFactory {
+  ExpandFactory() {}
+  virtual ~ExpandFactory() {}
+
+  virtual Expand* Create(BackgroundNoise* background_noise,
+                         SyncBuffer* sync_buffer,
+                         RandomVector* random_vector,
+                         StatisticsCalculator* statistics,
+                         int fs,
+                         size_t num_channels) const;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_EXPAND_H_
diff --git a/modules/audio_coding/neteq/expand_unittest.cc b/modules/audio_coding/neteq/expand_unittest.cc
new file mode 100644
index 0000000..b4e6466
--- /dev/null
+++ b/modules/audio_coding/neteq/expand_unittest.cc
@@ -0,0 +1,206 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Expand class.
+
+#include "modules/audio_coding/neteq/expand.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TEST(Expand, CreateAndDestroy) {
+  int fs = 8000;
+  size_t channels = 1;
+  BackgroundNoise bgn(channels);
+  SyncBuffer sync_buffer(1, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
+}
+
+TEST(Expand, CreateUsingFactory) {
+  int fs = 8000;
+  size_t channels = 1;
+  BackgroundNoise bgn(channels);
+  SyncBuffer sync_buffer(1, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  ExpandFactory expand_factory;
+  Expand* expand = expand_factory.Create(&bgn, &sync_buffer, &random_vector,
+                                         &statistics, fs, channels);
+  EXPECT_TRUE(expand != NULL);
+  delete expand;
+}
+
+namespace {
+class FakeStatisticsCalculator : public StatisticsCalculator {
+ public:
+  void LogDelayedPacketOutageEvent(int outage_duration_ms) override {
+    last_outage_duration_ms_ = outage_duration_ms;
+  }
+
+  int last_outage_duration_ms() const { return last_outage_duration_ms_; }
+
+ private:
+  int last_outage_duration_ms_ = 0;
+};
+
+// This is the same size that is given to the SyncBuffer object in NetEq.
+const size_t kNetEqSyncBufferLengthMs = 720;
+}  // namespace
+
+class ExpandTest : public ::testing::Test {
+ protected:
+  ExpandTest()
+      : input_file_(test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+                    32000),
+        test_sample_rate_hz_(32000),
+        num_channels_(1),
+        background_noise_(num_channels_),
+        sync_buffer_(num_channels_,
+                     kNetEqSyncBufferLengthMs * test_sample_rate_hz_ / 1000),
+        expand_(&background_noise_,
+                &sync_buffer_,
+                &random_vector_,
+                &statistics_,
+                test_sample_rate_hz_,
+                num_channels_) {
+    WebRtcSpl_Init();
+    input_file_.set_output_rate_hz(test_sample_rate_hz_);
+  }
+
+  void SetUp() override {
+    // Fast-forward the input file until there is speech (about 1.1 second into
+    // the file).
+    const int speech_start_samples =
+        static_cast<int>(test_sample_rate_hz_ * 1.1f);
+    ASSERT_TRUE(input_file_.Seek(speech_start_samples));
+
+    // Pre-load the sync buffer with speech data.
+    std::unique_ptr<int16_t[]> temp(new int16_t[sync_buffer_.Size()]);
+    ASSERT_TRUE(input_file_.Read(sync_buffer_.Size(), temp.get()));
+    sync_buffer_.Channel(0).OverwriteAt(temp.get(), sync_buffer_.Size(), 0);
+    ASSERT_EQ(1u, num_channels_) << "Fix: Must populate all channels.";
+  }
+
+  test::ResampleInputAudioFile input_file_;
+  int test_sample_rate_hz_;
+  size_t num_channels_;
+  BackgroundNoise background_noise_;
+  SyncBuffer sync_buffer_;
+  RandomVector random_vector_;
+  FakeStatisticsCalculator statistics_;
+  Expand expand_;
+};
+
+// This test calls the expand object to produce concealment data a few times,
+// and then ends by calling SetParametersForNormalAfterExpand. This simulates
+// the situation where the packet next up for decoding was just delayed, not
+// lost.
+TEST_F(ExpandTest, DelayedPacketOutage) {
+  AudioMultiVector output(num_channels_);
+  size_t sum_output_len_samples = 0;
+  for (int i = 0; i < 10; ++i) {
+    EXPECT_EQ(0, expand_.Process(&output));
+    EXPECT_GT(output.Size(), 0u);
+    sum_output_len_samples += output.Size();
+    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+  }
+  expand_.SetParametersForNormalAfterExpand();
+  // Convert |sum_output_len_samples| to milliseconds.
+  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
+                                   (test_sample_rate_hz_ / 1000)),
+            statistics_.last_outage_duration_ms());
+}
+
+// This test is similar to DelayedPacketOutage, but ends by calling
+// SetParametersForMergeAfterExpand. This simulates the situation where the
+// packet next up for decoding was actually lost (or at least a later packet
+// arrived before it).
+TEST_F(ExpandTest, LostPacketOutage) {
+  AudioMultiVector output(num_channels_);
+  size_t sum_output_len_samples = 0;
+  for (int i = 0; i < 10; ++i) {
+    EXPECT_EQ(0, expand_.Process(&output));
+    EXPECT_GT(output.Size(), 0u);
+    sum_output_len_samples += output.Size();
+    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+  }
+  expand_.SetParametersForMergeAfterExpand();
+  EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+}
+
+// This test is similar to the DelayedPacketOutage test above, but with the
+// difference that Expand::Reset() is called after 5 calls to Expand::Process().
+// This should reset the statistics, and will in the end lead to an outage of
+// 5 periods instead of 10.
+TEST_F(ExpandTest, CheckOutageStatsAfterReset) {
+  AudioMultiVector output(num_channels_);
+  size_t sum_output_len_samples = 0;
+  for (int i = 0; i < 10; ++i) {
+    EXPECT_EQ(0, expand_.Process(&output));
+    EXPECT_GT(output.Size(), 0u);
+    sum_output_len_samples += output.Size();
+    if (i == 5) {
+      expand_.Reset();
+      sum_output_len_samples = 0;
+    }
+    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+  }
+  expand_.SetParametersForNormalAfterExpand();
+  // Convert |sum_output_len_samples| to milliseconds.
+  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
+                                   (test_sample_rate_hz_ / 1000)),
+            statistics_.last_outage_duration_ms());
+}
+
+namespace {
+// Runs expand until Muted() returns true. Times out after 1000 calls.
+void ExpandUntilMuted(size_t num_channels, Expand* expand) {
+  EXPECT_FALSE(expand->Muted()) << "Instance is muted from the start";
+  AudioMultiVector output(num_channels);
+  int num_calls = 0;
+  while (!expand->Muted()) {
+    ASSERT_LT(num_calls++, 1000) << "Test timed out";
+    EXPECT_EQ(0, expand->Process(&output));
+  }
+}
+}  // namespace
+
+// Verifies that Muted() returns true after a long expand period. Also verifies
+// that Muted() is reset to false after calling Reset(),
+// SetParametersForMergeAfterExpand() and SetParametersForNormalAfterExpand().
+TEST_F(ExpandTest, Muted) {
+  ExpandUntilMuted(num_channels_, &expand_);
+  expand_.Reset();
+  EXPECT_FALSE(expand_.Muted());  // Should be back to unmuted.
+
+  ExpandUntilMuted(num_channels_, &expand_);
+  expand_.SetParametersForMergeAfterExpand();
+  EXPECT_FALSE(expand_.Muted());  // Should be back to unmuted.
+
+  expand_.Reset();  // Must reset in order to start a new expand period.
+  ExpandUntilMuted(num_channels_, &expand_);
+  expand_.SetParametersForNormalAfterExpand();
+  EXPECT_FALSE(expand_.Muted());  // Should be back to unmuted.
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/include/neteq.h b/modules/audio_coding/neteq/include/neteq.h
new file mode 100644
index 0000000..f5bd8cd
--- /dev/null
+++ b/modules/audio_coding/neteq/include/neteq.h
@@ -0,0 +1,317 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_INCLUDE_NETEQ_H_
+#define MODULES_AUDIO_CODING_NETEQ_INCLUDE_NETEQ_H_
+
+#include <string.h>  // Provide access to size_t.
+
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/optional.h"
+#include "api/rtp_headers.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/neteq_decoder_enum.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class AudioFrame;
+class AudioDecoderFactory;
+
+struct NetEqNetworkStatistics {
+  uint16_t current_buffer_size_ms;  // Current jitter buffer size in ms.
+  uint16_t preferred_buffer_size_ms;  // Target buffer size in ms.
+  uint16_t jitter_peaks_found;  // 1 if adding extra delay due to peaky
+                                // jitter; 0 otherwise.
+  uint16_t packet_loss_rate;  // Loss rate (network + late) in Q14.
+  uint16_t expand_rate;  // Fraction (of original stream) of synthesized
+                         // audio inserted through expansion (in Q14).
+  uint16_t speech_expand_rate;  // Fraction (of original stream) of synthesized
+                                // speech inserted through expansion (in Q14).
+  uint16_t preemptive_rate;  // Fraction of data inserted through pre-emptive
+                             // expansion (in Q14).
+  uint16_t accelerate_rate;  // Fraction of data removed through acceleration
+                             // (in Q14).
+  uint16_t secondary_decoded_rate;  // Fraction of data coming from FEC/RED
+                                    // decoding (in Q14).
+  uint16_t secondary_discarded_rate;  // Fraction of discarded FEC/RED data (in
+                                      // Q14).
+  int32_t clockdrift_ppm;  // Average clock-drift in parts-per-million
+                           // (positive or negative).
+  size_t added_zero_samples;  // Number of zero samples added in "off" mode.
+  // Statistics for packet waiting times, i.e., the time between a packet
+  // arrives until it is decoded.
+  int mean_waiting_time_ms;
+  int median_waiting_time_ms;
+  int min_waiting_time_ms;
+  int max_waiting_time_ms;
+};
+
+// NetEq statistics that persist over the lifetime of the class.
+// These metrics are never reset.
+struct NetEqLifetimeStatistics {
+  // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
+  uint64_t total_samples_received = 0;
+  uint64_t concealed_samples = 0;
+  uint64_t concealment_events = 0;
+  uint64_t jitter_buffer_delay_ms = 0;
+  // Below stat is not part of the spec.
+  uint64_t voice_concealed_samples = 0;
+};
+
+enum NetEqPlayoutMode {
+  kPlayoutOn,
+  kPlayoutOff,
+  kPlayoutFax,
+  kPlayoutStreaming
+};
+
+// This is the interface class for NetEq.
+class NetEq {
+ public:
+  enum BackgroundNoiseMode {
+    kBgnOn,    // Default behavior with eternal noise.
+    kBgnFade,  // Noise fades to zero after some time.
+    kBgnOff    // Background noise is always zero.
+  };
+
+  struct Config {
+    Config()
+        : sample_rate_hz(16000),
+          enable_post_decode_vad(false),
+          max_packets_in_buffer(50),
+          // |max_delay_ms| has the same effect as calling SetMaximumDelay().
+          max_delay_ms(2000),
+          background_noise_mode(kBgnOff),
+          playout_mode(kPlayoutOn),
+          enable_fast_accelerate(false) {}
+
+    std::string ToString() const;
+
+    int sample_rate_hz;  // Initial value. Will change with input data.
+    bool enable_post_decode_vad;
+    size_t max_packets_in_buffer;
+    int max_delay_ms;
+    BackgroundNoiseMode background_noise_mode;
+    NetEqPlayoutMode playout_mode;
+    bool enable_fast_accelerate;
+    bool enable_muted_state = false;
+  };
+
+  enum ReturnCodes {
+    kOK = 0,
+    kFail = -1,
+    kNotImplemented = -2
+  };
+
+  // Creates a new NetEq object, with parameters set in |config|. The |config|
+  // object will only have to be valid for the duration of the call to this
+  // method.
+  static NetEq* Create(
+      const NetEq::Config& config,
+      const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory);
+
+  virtual ~NetEq() {}
+
+  // Inserts a new packet into NetEq. The |receive_timestamp| is an indication
+  // of the time when the packet was received, and should be measured with
+  // the same tick rate as the RTP timestamp of the current payload.
+  // Returns 0 on success, -1 on failure.
+  virtual int InsertPacket(const RTPHeader& rtp_header,
+                           rtc::ArrayView<const uint8_t> payload,
+                           uint32_t receive_timestamp) = 0;
+
+  // Lets NetEq know that a packet arrived with an empty payload. This typically
+  // happens when empty packets are used for probing the network channel, and
+  // these packets use RTP sequence numbers from the same series as the actual
+  // audio packets.
+  virtual void InsertEmptyPacket(const RTPHeader& rtp_header) = 0;
+
+  // Instructs NetEq to deliver 10 ms of audio data. The data is written to
+  // |audio_frame|. All data in |audio_frame| is wiped; |data_|, |speech_type_|,
+  // |num_channels_|, |sample_rate_hz_|, |samples_per_channel_|, and
+  // |vad_activity_| are updated upon success. If an error is returned, some
+  // fields may not have been updated, or may contain inconsistent values.
+  // If muted state is enabled (through Config::enable_muted_state), |muted|
+  // may be set to true after a prolonged expand period. When this happens, the
+  // |data_| in |audio_frame| is not written, but should be interpreted as being
+  // all zeros.
+  // Returns kOK on success, or kFail in case of an error.
+  virtual int GetAudio(AudioFrame* audio_frame, bool* muted) = 0;
+
+  // Replaces the current set of decoders with the given one.
+  virtual void SetCodecs(const std::map<int, SdpAudioFormat>& codecs) = 0;
+
+  // Associates |rtp_payload_type| with |codec| and |codec_name|, and stores the
+  // information in the codec database. Returns 0 on success, -1 on failure.
+  // The name is only used to provide information back to the caller about the
+  // decoders. Hence, the name is arbitrary, and may be empty.
+  virtual int RegisterPayloadType(NetEqDecoder codec,
+                                  const std::string& codec_name,
+                                  uint8_t rtp_payload_type) = 0;
+
+  // Provides an externally created decoder object |decoder| to insert in the
+  // decoder database. The decoder implements a decoder of type |codec| and
+  // associates it with |rtp_payload_type| and |codec_name|. Returns kOK on
+  // success, kFail on failure. The name is only used to provide information
+  // back to the caller about the decoders. Hence, the name is arbitrary, and
+  // may be empty.
+  virtual int RegisterExternalDecoder(AudioDecoder* decoder,
+                                      NetEqDecoder codec,
+                                      const std::string& codec_name,
+                                      uint8_t rtp_payload_type) = 0;
+
+  // Associates |rtp_payload_type| with the given codec, which NetEq will
+  // instantiate when it needs it. Returns true iff successful.
+  virtual bool RegisterPayloadType(int rtp_payload_type,
+                                   const SdpAudioFormat& audio_format) = 0;
+
+  // Removes |rtp_payload_type| from the codec database. Returns 0 on success,
+  // -1 on failure. Removing a payload type that is not registered is ok and
+  // will not result in an error.
+  virtual int RemovePayloadType(uint8_t rtp_payload_type) = 0;
+
+  // Removes all payload types from the codec database.
+  virtual void RemoveAllPayloadTypes() = 0;
+
+  // Sets a minimum delay in millisecond for packet buffer. The minimum is
+  // maintained unless a higher latency is dictated by channel condition.
+  // Returns true if the minimum is successfully applied, otherwise false is
+  // returned.
+  virtual bool SetMinimumDelay(int delay_ms) = 0;
+
+  // Sets a maximum delay in milliseconds for packet buffer. The latency will
+  // not exceed the given value, even required delay (given the channel
+  // conditions) is higher. Calling this method has the same effect as setting
+  // the |max_delay_ms| value in the NetEq::Config struct.
+  virtual bool SetMaximumDelay(int delay_ms) = 0;
+
+  // The smallest latency required. This is computed bases on inter-arrival
+  // time and internal NetEq logic. Note that in computing this latency none of
+  // the user defined limits (applied by calling setMinimumDelay() and/or
+  // SetMaximumDelay()) are applied.
+  virtual int LeastRequiredDelayMs() const = 0;
+
+  // Not implemented.
+  virtual int SetTargetDelay() = 0;
+
+  // Returns the current target delay in ms. This includes any extra delay
+  // requested through SetMinimumDelay.
+  virtual int TargetDelayMs() const = 0;
+
+  // Returns the current total delay (packet buffer and sync buffer) in ms.
+  virtual int CurrentDelayMs() const = 0;
+
+  // Returns the current total delay (packet buffer and sync buffer) in ms,
+  // with smoothing applied to even out short-time fluctuations due to jitter.
+  // The packet buffer part of the delay is not updated during DTX/CNG periods.
+  virtual int FilteredCurrentDelayMs() const = 0;
+
+  // Sets the playout mode to |mode|.
+  // Deprecated. Set the mode in the Config struct passed to the constructor.
+  // TODO(henrik.lundin) Delete.
+  virtual void SetPlayoutMode(NetEqPlayoutMode mode) = 0;
+
+  // Returns the current playout mode.
+  // Deprecated.
+  // TODO(henrik.lundin) Delete.
+  virtual NetEqPlayoutMode PlayoutMode() const = 0;
+
+  // Writes the current network statistics to |stats|. The statistics are reset
+  // after the call.
+  virtual int NetworkStatistics(NetEqNetworkStatistics* stats) = 0;
+
+  // Returns a copy of this class's lifetime statistics. These statistics are
+  // never reset.
+  virtual NetEqLifetimeStatistics GetLifetimeStatistics() const = 0;
+
+  // Writes the current RTCP statistics to |stats|. The statistics are reset
+  // and a new report period is started with the call.
+  virtual void GetRtcpStatistics(RtcpStatistics* stats) = 0;
+
+  // Same as RtcpStatistics(), but does not reset anything.
+  virtual void GetRtcpStatisticsNoReset(RtcpStatistics* stats) = 0;
+
+  // Enables post-decode VAD. When enabled, GetAudio() will return
+  // kOutputVADPassive when the signal contains no speech.
+  virtual void EnableVad() = 0;
+
+  // Disables post-decode VAD.
+  virtual void DisableVad() = 0;
+
+  // Returns the RTP timestamp for the last sample delivered by GetAudio().
+  // The return value will be empty if no valid timestamp is available.
+  virtual rtc::Optional<uint32_t> GetPlayoutTimestamp() const = 0;
+
+  // Returns the sample rate in Hz of the audio produced in the last GetAudio
+  // call. If GetAudio has not been called yet, the configured sample rate
+  // (Config::sample_rate_hz) is returned.
+  virtual int last_output_sample_rate_hz() const = 0;
+
+  // Returns info about the decoder for the given payload type, or an empty
+  // value if we have no decoder for that payload type.
+  virtual rtc::Optional<CodecInst> GetDecoder(int payload_type) const = 0;
+
+  // Returns the decoder format for the given payload type. Returns empty if no
+  // such payload type was registered.
+  virtual rtc::Optional<SdpAudioFormat> GetDecoderFormat(
+      int payload_type) const = 0;
+
+  // Not implemented.
+  virtual int SetTargetNumberOfChannels() = 0;
+
+  // Not implemented.
+  virtual int SetTargetSampleRate() = 0;
+
+  // Flushes both the packet buffer and the sync buffer.
+  virtual void FlushBuffers() = 0;
+
+  // Current usage of packet-buffer and it's limits.
+  virtual void PacketBufferStatistics(int* current_num_packets,
+                                      int* max_num_packets) const = 0;
+
+  // Enables NACK and sets the maximum size of the NACK list, which should be
+  // positive and no larger than Nack::kNackListSizeLimit. If NACK is already
+  // enabled then the maximum NACK list size is modified accordingly.
+  virtual void EnableNack(size_t max_nack_list_size) = 0;
+
+  virtual void DisableNack() = 0;
+
+  // Returns a list of RTP sequence numbers corresponding to packets to be
+  // retransmitted, given an estimate of the round-trip time in milliseconds.
+  virtual std::vector<uint16_t> GetNackList(
+      int64_t round_trip_time_ms) const = 0;
+
+  // Returns a vector containing the timestamps of the packets that were decoded
+  // in the last GetAudio call. If no packets were decoded in the last call, the
+  // vector is empty.
+  // Mainly intended for testing.
+  virtual std::vector<uint32_t> LastDecodedTimestamps() const = 0;
+
+  // Returns the length of the audio yet to play in the sync buffer.
+  // Mainly intended for testing.
+  virtual int SyncBufferSizeMs() const = 0;
+
+ protected:
+  NetEq() {}
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(NetEq);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_INCLUDE_NETEQ_H_
diff --git a/modules/audio_coding/neteq/merge.cc b/modules/audio_coding/neteq/merge.cc
new file mode 100644
index 0000000..b568ff0
--- /dev/null
+++ b/modules/audio_coding/neteq/merge.cc
@@ -0,0 +1,386 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/merge.h"
+
+#include <assert.h>
+#include <string.h>  // memmove, memcpy, memset, size_t
+
+#include <algorithm>  // min, max
+#include <memory>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+Merge::Merge(int fs_hz,
+             size_t num_channels,
+             Expand* expand,
+             SyncBuffer* sync_buffer)
+    : fs_hz_(fs_hz),
+      num_channels_(num_channels),
+      fs_mult_(fs_hz_ / 8000),
+      timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
+      expand_(expand),
+      sync_buffer_(sync_buffer),
+      expanded_(num_channels_) {
+  assert(num_channels_ > 0);
+}
+
+Merge::~Merge() = default;
+
+size_t Merge::Process(int16_t* input, size_t input_length,
+                      int16_t* external_mute_factor_array,
+                      AudioMultiVector* output) {
+  // TODO(hlundin): Change to an enumerator and skip assert.
+  assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ ==  32000 ||
+         fs_hz_ == 48000);
+  assert(fs_hz_ <= kMaxSampleRate);  // Should not be possible.
+
+  size_t old_length;
+  size_t expand_period;
+  // Get expansion data to overlap and mix with.
+  size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
+
+  // Transfer input signal to an AudioMultiVector.
+  AudioMultiVector input_vector(num_channels_);
+  input_vector.PushBackInterleaved(input, input_length);
+  size_t input_length_per_channel = input_vector.Size();
+  assert(input_length_per_channel == input_length / num_channels_);
+
+  size_t best_correlation_index = 0;
+  size_t output_length = 0;
+
+  std::unique_ptr<int16_t[]> input_channel(
+      new int16_t[input_length_per_channel]);
+  std::unique_ptr<int16_t[]> expanded_channel(new int16_t[expanded_length]);
+  for (size_t channel = 0; channel < num_channels_; ++channel) {
+    input_vector[channel].CopyTo(
+        input_length_per_channel, 0, input_channel.get());
+    expanded_[channel].CopyTo(expanded_length, 0, expanded_channel.get());
+
+    int16_t new_mute_factor = SignalScaling(
+        input_channel.get(), input_length_per_channel, expanded_channel.get());
+
+    // Adjust muting factor (product of "main" muting factor and expand muting
+    // factor).
+    int16_t* external_mute_factor = &external_mute_factor_array[channel];
+    *external_mute_factor =
+        (*external_mute_factor * expand_->MuteFactor(channel)) >> 14;
+
+    // Update |external_mute_factor| if it is lower than |new_mute_factor|.
+    if (new_mute_factor > *external_mute_factor) {
+      *external_mute_factor = std::min(new_mute_factor,
+                                       static_cast<int16_t>(16384));
+    }
+
+    if (channel == 0) {
+      // Downsample, correlate, and find strongest correlation period for the
+      // master (i.e., first) channel only.
+      // Downsample to 4kHz sample rate.
+      Downsample(input_channel.get(), input_length_per_channel,
+                 expanded_channel.get(), expanded_length);
+
+      // Calculate the lag of the strongest correlation period.
+      best_correlation_index = CorrelateAndPeakSearch(
+          old_length, input_length_per_channel, expand_period);
+    }
+
+    temp_data_.resize(input_length_per_channel + best_correlation_index);
+    int16_t* decoded_output = temp_data_.data() + best_correlation_index;
+
+    // Mute the new decoded data if needed (and unmute it linearly).
+    // This is the overlapping part of expanded_signal.
+    size_t interpolation_length = std::min(
+        kMaxCorrelationLength * fs_mult_,
+        expanded_length - best_correlation_index);
+    interpolation_length = std::min(interpolation_length,
+                                    input_length_per_channel);
+    if (*external_mute_factor < 16384) {
+      // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
+      // and so on.
+      int increment = 4194 / fs_mult_;
+      *external_mute_factor =
+          static_cast<int16_t>(DspHelper::RampSignal(input_channel.get(),
+                                                     interpolation_length,
+                                                     *external_mute_factor,
+                                                     increment));
+      DspHelper::UnmuteSignal(&input_channel[interpolation_length],
+                              input_length_per_channel - interpolation_length,
+                              external_mute_factor, increment,
+                              &decoded_output[interpolation_length]);
+    } else {
+      // No muting needed.
+      memmove(
+          &decoded_output[interpolation_length],
+          &input_channel[interpolation_length],
+          sizeof(int16_t) * (input_length_per_channel - interpolation_length));
+    }
+
+    // Do overlap and mix linearly.
+    int16_t increment =
+        static_cast<int16_t>(16384 / (interpolation_length + 1));  // In Q14.
+    int16_t mute_factor = 16384 - increment;
+    memmove(temp_data_.data(), expanded_channel.get(),
+            sizeof(int16_t) * best_correlation_index);
+    DspHelper::CrossFade(&expanded_channel[best_correlation_index],
+                         input_channel.get(), interpolation_length,
+                         &mute_factor, increment, decoded_output);
+
+    output_length = best_correlation_index + input_length_per_channel;
+    if (channel == 0) {
+      assert(output->Empty());  // Output should be empty at this point.
+      output->AssertSize(output_length);
+    } else {
+      assert(output->Size() == output_length);
+    }
+    (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0);
+  }
+
+  // Copy back the first part of the data to |sync_buffer_| and remove it from
+  // |output|.
+  sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
+  output->PopFront(old_length);
+
+  // Return new added length. |old_length| samples were borrowed from
+  // |sync_buffer_|.
+  RTC_DCHECK_GE(output_length, old_length);
+  return output_length - old_length;
+}
+
+size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
+  // Check how much data that is left since earlier.
+  *old_length = sync_buffer_->FutureLength();
+  // Should never be less than overlap_length.
+  assert(*old_length >= expand_->overlap_length());
+  // Generate data to merge the overlap with using expand.
+  expand_->SetParametersForMergeAfterExpand();
+
+  if (*old_length >= 210 * kMaxSampleRate / 8000) {
+    // TODO(hlundin): Write test case for this.
+    // The number of samples available in the sync buffer is more than what fits
+    // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples,
+    // but shift them towards the end of the buffer. This is ok, since all of
+    // the buffer will be expand data anyway, so as long as the beginning is
+    // left untouched, we're fine.
+    size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
+    sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
+    *old_length = 210 * kMaxSampleRate / 8000;
+    // This is the truncated length.
+  }
+  // This assert should always be true thanks to the if statement above.
+  assert(210 * kMaxSampleRate / 8000 >= *old_length);
+
+  AudioMultiVector expanded_temp(num_channels_);
+  expand_->Process(&expanded_temp);
+  *expand_period = expanded_temp.Size();  // Samples per channel.
+
+  expanded_.Clear();
+  // Copy what is left since earlier into the expanded vector.
+  expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
+  assert(expanded_.Size() == *old_length);
+  assert(expanded_temp.Size() > 0);
+  // Do "ugly" copy and paste from the expanded in order to generate more data
+  // to correlate (but not interpolate) with.
+  const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
+  if (expanded_.Size() < required_length) {
+    while (expanded_.Size() < required_length) {
+      // Append one more pitch period each time.
+      expanded_.PushBack(expanded_temp);
+    }
+    // Trim the length to exactly |required_length|.
+    expanded_.PopBack(expanded_.Size() - required_length);
+  }
+  assert(expanded_.Size() >= required_length);
+  return required_length;
+}
+
+int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
+                             const int16_t* expanded_signal) const {
+  // Adjust muting factor if new vector is more or less of the BGN energy.
+  const auto mod_input_length = rtc::SafeMin<size_t>(
+      64 * rtc::dchecked_cast<size_t>(fs_mult_), input_length);
+  const int16_t expanded_max =
+      WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
+  int32_t factor = (expanded_max * expanded_max) /
+      (std::numeric_limits<int32_t>::max() /
+          static_cast<int32_t>(mod_input_length));
+  const int expanded_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+  int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
+                                                          expanded_signal,
+                                                          mod_input_length,
+                                                          expanded_shift);
+
+  // Calculate energy of input signal.
+  const int16_t input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
+  factor = (input_max * input_max) / (std::numeric_limits<int32_t>::max() /
+      static_cast<int32_t>(mod_input_length));
+  const int input_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+  int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
+                                                       mod_input_length,
+                                                       input_shift);
+
+  // Align to the same Q-domain.
+  if (input_shift > expanded_shift) {
+    energy_expanded = energy_expanded >> (input_shift - expanded_shift);
+  } else {
+    energy_input = energy_input >> (expanded_shift - input_shift);
+  }
+
+  // Calculate muting factor to use for new frame.
+  int16_t mute_factor;
+  if (energy_input > energy_expanded) {
+    // Normalize |energy_input| to 14 bits.
+    int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
+    energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
+    // Put |energy_expanded| in a domain 14 higher, so that
+    // energy_expanded / energy_input is in Q14.
+    energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
+    // Calculate sqrt(energy_expanded / energy_input) in Q14.
+    mute_factor = static_cast<int16_t>(
+        WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
+  } else {
+    // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
+    mute_factor = 16384;
+  }
+
+  return mute_factor;
+}
+
+// TODO(hlundin): There are some parameter values in this method that seem
+// strange. Compare with Expand::Correlation.
+void Merge::Downsample(const int16_t* input, size_t input_length,
+                       const int16_t* expanded_signal, size_t expanded_length) {
+  const int16_t* filter_coefficients;
+  size_t num_coefficients;
+  int decimation_factor = fs_hz_ / 4000;
+  static const size_t kCompensateDelay = 0;
+  size_t length_limit = static_cast<size_t>(fs_hz_ / 100);  // 10 ms in samples.
+  if (fs_hz_ == 8000) {
+    filter_coefficients = DspHelper::kDownsample8kHzTbl;
+    num_coefficients = 3;
+  } else if (fs_hz_ == 16000) {
+    filter_coefficients = DspHelper::kDownsample16kHzTbl;
+    num_coefficients = 5;
+  } else if (fs_hz_ == 32000) {
+    filter_coefficients = DspHelper::kDownsample32kHzTbl;
+    num_coefficients = 7;
+  } else {  // fs_hz_ == 48000
+    filter_coefficients = DspHelper::kDownsample48kHzTbl;
+    num_coefficients = 7;
+  }
+  size_t signal_offset = num_coefficients - 1;
+  WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
+                           expanded_length - signal_offset,
+                           expanded_downsampled_, kExpandDownsampLength,
+                           filter_coefficients, num_coefficients,
+                           decimation_factor, kCompensateDelay);
+  if (input_length <= length_limit) {
+    // Not quite long enough, so we have to cheat a bit.
+    // If the input is really short, we'll just use the input length as is, and
+    // won't bother with correcting for the offset. This is clearly a
+    // pathological case, and the signal quality will suffer.
+    const size_t temp_len = input_length > signal_offset
+                                ? input_length - signal_offset
+                                : input_length;
+    // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
+    // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
+    size_t downsamp_temp_len = temp_len / decimation_factor;
+    WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
+                             input_downsampled_, downsamp_temp_len,
+                             filter_coefficients, num_coefficients,
+                             decimation_factor, kCompensateDelay);
+    memset(&input_downsampled_[downsamp_temp_len], 0,
+           sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
+  } else {
+    WebRtcSpl_DownsampleFast(&input[signal_offset],
+                             input_length - signal_offset, input_downsampled_,
+                             kInputDownsampLength, filter_coefficients,
+                             num_coefficients, decimation_factor,
+                             kCompensateDelay);
+  }
+}
+
+size_t Merge::CorrelateAndPeakSearch(size_t start_position, size_t input_length,
+                                     size_t expand_period) const {
+  // Calculate correlation without any normalization.
+  const size_t max_corr_length = kMaxCorrelationLength;
+  size_t stop_position_downsamp =
+      std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
+
+  int32_t correlation[kMaxCorrelationLength];
+  CrossCorrelationWithAutoShift(input_downsampled_, expanded_downsampled_,
+                                kInputDownsampLength, stop_position_downsamp, 1,
+                                correlation);
+
+  // Normalize correlation to 14 bits and copy to a 16-bit array.
+  const size_t pad_length = expand_->overlap_length() - 1;
+  const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
+  std::unique_ptr<int16_t[]> correlation16(
+      new int16_t[correlation_buffer_size]);
+  memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
+  int16_t* correlation_ptr = &correlation16[pad_length];
+  int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
+                                                     stop_position_downsamp);
+  int norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
+  WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
+                                   correlation, norm_shift);
+
+  // Calculate allowed starting point for peak finding.
+  // The peak location bestIndex must fulfill two criteria:
+  // (1) w16_bestIndex + input_length <
+  //     timestamps_per_call_ + expand_->overlap_length();
+  // (2) w16_bestIndex + input_length < start_position.
+  size_t start_index = timestamps_per_call_ + expand_->overlap_length();
+  start_index = std::max(start_position, start_index);
+  start_index = (input_length > start_index) ? 0 : (start_index - input_length);
+  // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
+  size_t start_index_downsamp = start_index / (fs_mult_ * 2);
+
+  // Calculate a modified |stop_position_downsamp| to account for the increased
+  // start index |start_index_downsamp| and the effective array length.
+  size_t modified_stop_pos =
+      std::min(stop_position_downsamp,
+               kMaxCorrelationLength + pad_length - start_index_downsamp);
+  size_t best_correlation_index;
+  int16_t best_correlation;
+  static const size_t kNumCorrelationCandidates = 1;
+  DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
+                           modified_stop_pos, kNumCorrelationCandidates,
+                           fs_mult_, &best_correlation_index,
+                           &best_correlation);
+  // Compensate for modified start index.
+  best_correlation_index += start_index;
+
+  // Ensure that underrun does not occur for 10ms case => we have to get at
+  // least 10ms + overlap . (This should never happen thanks to the above
+  // modification of peak-finding starting point.)
+  while (((best_correlation_index + input_length) <
+          (timestamps_per_call_ + expand_->overlap_length())) ||
+         ((best_correlation_index + input_length) < start_position)) {
+    assert(false);  // Should never happen.
+    best_correlation_index += expand_period;  // Jump one lag ahead.
+  }
+  return best_correlation_index;
+}
+
+size_t Merge::RequiredFutureSamples() {
+  return fs_hz_ / 100 * num_channels_;  // 10 ms.
+}
+
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/merge.h b/modules/audio_coding/neteq/merge.h
new file mode 100644
index 0000000..e027cd7
--- /dev/null
+++ b/modules/audio_coding/neteq/merge.h
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MERGE_H_
+#define MODULES_AUDIO_CODING_NETEQ_MERGE_H_
+
+#include <assert.h>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class Expand;
+class SyncBuffer;
+
+// This class handles the transition from expansion to normal operation.
+// When a packet is not available for decoding when needed, the expand operation
+// is called to generate extrapolation data. If the missing packet arrives,
+// i.e., it was just delayed, it can be decoded and appended directly to the
+// end of the expanded data (thanks to how the Expand class operates). However,
+// if a later packet arrives instead, the loss is a fact, and the new data must
+// be stitched together with the end of the expanded data. This stitching is
+// what the Merge class does.
+class Merge {
+ public:
+  Merge(int fs_hz,
+        size_t num_channels,
+        Expand* expand,
+        SyncBuffer* sync_buffer);
+  virtual ~Merge();
+
+  // The main method to produce the audio data. The decoded data is supplied in
+  // |input|, having |input_length| samples in total for all channels
+  // (interleaved). The result is written to |output|. The number of channels
+  // allocated in |output| defines the number of channels that will be used when
+  // de-interleaving |input|. The values in |external_mute_factor_array| (Q14)
+  // will be used to scale the audio, and is updated in the process. The array
+  // must have |num_channels_| elements.
+  virtual size_t Process(int16_t* input, size_t input_length,
+                         int16_t* external_mute_factor_array,
+                         AudioMultiVector* output);
+
+  virtual size_t RequiredFutureSamples();
+
+ protected:
+  const int fs_hz_;
+  const size_t num_channels_;
+
+ private:
+  static const int kMaxSampleRate = 48000;
+  static const size_t kExpandDownsampLength = 100;
+  static const size_t kInputDownsampLength = 40;
+  static const size_t kMaxCorrelationLength = 60;
+
+  // Calls |expand_| to get more expansion data to merge with. The data is
+  // written to |expanded_signal_|. Returns the length of the expanded data,
+  // while |expand_period| will be the number of samples in one expansion period
+  // (typically one pitch period). The value of |old_length| will be the number
+  // of samples that were taken from the |sync_buffer_|.
+  size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
+
+  // Analyzes |input| and |expanded_signal| and returns muting factor (Q14) to
+  // be used on the new data.
+  int16_t SignalScaling(const int16_t* input, size_t input_length,
+                        const int16_t* expanded_signal) const;
+
+  // Downsamples |input| (|input_length| samples) and |expanded_signal| to
+  // 4 kHz sample rate. The downsampled signals are written to
+  // |input_downsampled_| and |expanded_downsampled_|, respectively.
+  void Downsample(const int16_t* input, size_t input_length,
+                  const int16_t* expanded_signal, size_t expanded_length);
+
+  // Calculates cross-correlation between |input_downsampled_| and
+  // |expanded_downsampled_|, and finds the correlation maximum. The maximizing
+  // lag is returned.
+  size_t CorrelateAndPeakSearch(size_t start_position, size_t input_length,
+                                size_t expand_period) const;
+
+  const int fs_mult_;  // fs_hz_ / 8000.
+  const size_t timestamps_per_call_;
+  Expand* expand_;
+  SyncBuffer* sync_buffer_;
+  int16_t expanded_downsampled_[kExpandDownsampLength];
+  int16_t input_downsampled_[kInputDownsampLength];
+  AudioMultiVector expanded_;
+  std::vector<int16_t> temp_data_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Merge);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MERGE_H_
diff --git a/modules/audio_coding/neteq/merge_unittest.cc b/modules/audio_coding/neteq/merge_unittest.cc
new file mode 100644
index 0000000..7ff3b8c
--- /dev/null
+++ b/modules/audio_coding/neteq/merge_unittest.cc
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Merge class.
+
+#include "modules/audio_coding/neteq/merge.h"
+
+#include <vector>
+
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(Merge, CreateAndDestroy) {
+  int fs = 8000;
+  size_t channels = 1;
+  BackgroundNoise bgn(channels);
+  SyncBuffer sync_buffer(1, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
+  Merge merge(fs, channels, &expand, &sync_buffer);
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h b/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
new file mode 100644
index 0000000..f662fb6
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
+
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockBufferLevelFilter : public BufferLevelFilter {
+ public:
+  virtual ~MockBufferLevelFilter() { Die(); }
+  MOCK_METHOD0(Die,
+      void());
+  MOCK_METHOD0(Reset,
+      void());
+  MOCK_METHOD3(Update,
+      void(size_t buffer_size_packets, int time_stretched_samples,
+           size_t packet_len_samples));
+  MOCK_METHOD1(SetTargetBufferLevel,
+      void(int target_buffer_level));
+  MOCK_CONST_METHOD0(filtered_current_level,
+      int());
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_BUFFER_LEVEL_FILTER_H_
diff --git a/modules/audio_coding/neteq/mock/mock_decoder_database.h b/modules/audio_coding/neteq/mock/mock_decoder_database.h
new file mode 100644
index 0000000..049b693
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_decoder_database.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
+
+#include <string>
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDecoderDatabase : public DecoderDatabase {
+ public:
+  explicit MockDecoderDatabase(
+      rtc::scoped_refptr<AudioDecoderFactory> factory = nullptr)
+      : DecoderDatabase(factory) {}
+  virtual ~MockDecoderDatabase() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_CONST_METHOD0(Empty,
+      bool());
+  MOCK_CONST_METHOD0(Size,
+      int());
+  MOCK_METHOD0(Reset,
+      void());
+  MOCK_METHOD3(RegisterPayload,
+      int(uint8_t rtp_payload_type, NetEqDecoder codec_type,
+          const std::string& name));
+  MOCK_METHOD2(RegisterPayload,
+               int(int rtp_payload_type, const SdpAudioFormat& audio_format));
+  MOCK_METHOD4(InsertExternal,
+               int(uint8_t rtp_payload_type,
+                   NetEqDecoder codec_type,
+                   const std::string& codec_name,
+                   AudioDecoder* decoder));
+  MOCK_METHOD1(Remove,
+      int(uint8_t rtp_payload_type));
+  MOCK_METHOD0(RemoveAll, void());
+  MOCK_CONST_METHOD1(GetDecoderInfo,
+      const DecoderInfo*(uint8_t rtp_payload_type));
+  MOCK_METHOD2(SetActiveDecoder,
+      int(uint8_t rtp_payload_type, bool* new_decoder));
+  MOCK_CONST_METHOD0(GetActiveDecoder,
+      AudioDecoder*());
+  MOCK_METHOD1(SetActiveCngDecoder,
+      int(uint8_t rtp_payload_type));
+  MOCK_CONST_METHOD0(GetActiveCngDecoder,
+      ComfortNoiseDecoder*());
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
diff --git a/modules/audio_coding/neteq/mock/mock_delay_manager.h b/modules/audio_coding/neteq/mock/mock_delay_manager.h
new file mode 100644
index 0000000..61f209d
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_
+
+#include "modules/audio_coding/neteq/delay_manager.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDelayManager : public DelayManager {
+ public:
+  MockDelayManager(size_t max_packets_in_buffer,
+                   DelayPeakDetector* peak_detector,
+                   const TickTimer* tick_timer)
+      : DelayManager(max_packets_in_buffer, peak_detector, tick_timer) {}
+  virtual ~MockDelayManager() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_CONST_METHOD0(iat_vector,
+      const IATVector&());
+  MOCK_METHOD3(Update,
+      int(uint16_t sequence_number, uint32_t timestamp, int sample_rate_hz));
+  MOCK_METHOD1(CalculateTargetLevel,
+      int(int iat_packets));
+  MOCK_METHOD1(SetPacketAudioLength,
+      int(int length_ms));
+  MOCK_METHOD0(Reset,
+      void());
+  MOCK_CONST_METHOD0(PeakFound,
+      bool());
+  MOCK_METHOD1(UpdateCounters,
+      void(int elapsed_time_ms));
+  MOCK_METHOD0(ResetPacketIatCount,
+      void());
+  MOCK_CONST_METHOD2(BufferLimits,
+      void(int* lower_limit, int* higher_limit));
+  MOCK_CONST_METHOD0(TargetLevel,
+      int());
+  MOCK_METHOD0(RegisterEmptyPacket, void());
+  MOCK_METHOD1(set_extra_delay_ms,
+      void(int16_t delay));
+  MOCK_CONST_METHOD0(base_target_level,
+      int());
+  MOCK_METHOD1(set_streaming_mode,
+      void(bool value));
+  MOCK_CONST_METHOD0(last_pack_cng_or_dtmf,
+      int());
+  MOCK_METHOD1(set_last_pack_cng_or_dtmf,
+      void(int value));
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_MANAGER_H_
diff --git a/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h b/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h
new file mode 100644
index 0000000..f6cdea0
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_PEAK_DETECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_PEAK_DETECTOR_H_
+
+#include "modules/audio_coding/neteq/delay_peak_detector.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDelayPeakDetector : public DelayPeakDetector {
+ public:
+  MockDelayPeakDetector(const TickTimer* tick_timer)
+      : DelayPeakDetector(tick_timer) {}
+  virtual ~MockDelayPeakDetector() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD0(Reset, void());
+  MOCK_METHOD1(SetPacketAudioLength, void(int length_ms));
+  MOCK_METHOD0(peak_found, bool());
+  MOCK_CONST_METHOD0(MaxPeakHeight, int());
+  MOCK_CONST_METHOD0(MaxPeakPeriod, uint64_t());
+  MOCK_METHOD2(Update, bool(int inter_arrival_time, int target_level));
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DELAY_PEAK_DETECTOR_H_
diff --git a/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h b/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
new file mode 100644
index 0000000..153a4d7
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_dtmf_buffer.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_BUFFER_H_
+
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDtmfBuffer : public DtmfBuffer {
+ public:
+  MockDtmfBuffer(int fs) : DtmfBuffer(fs) {}
+  virtual ~MockDtmfBuffer() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD0(Flush,
+      void());
+  MOCK_METHOD1(InsertEvent,
+      int(const DtmfEvent& event));
+  MOCK_METHOD2(GetEvent,
+      bool(uint32_t current_timestamp, DtmfEvent* event));
+  MOCK_CONST_METHOD0(Length,
+      size_t());
+  MOCK_CONST_METHOD0(Empty,
+      bool());
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_BUFFER_H_
diff --git a/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h b/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
new file mode 100644
index 0000000..2cb5980
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_TONE_GENERATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_TONE_GENERATOR_H_
+
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockDtmfToneGenerator : public DtmfToneGenerator {
+ public:
+  virtual ~MockDtmfToneGenerator() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD3(Init,
+      int(int fs, int event, int attenuation));
+  MOCK_METHOD0(Reset,
+      void());
+  MOCK_METHOD2(Generate,
+      int(size_t num_samples, AudioMultiVector* output));
+  MOCK_CONST_METHOD0(initialized,
+      bool());
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DTMF_TONE_GENERATOR_H_
diff --git a/modules/audio_coding/neteq/mock/mock_expand.h b/modules/audio_coding/neteq/mock/mock_expand.h
new file mode 100644
index 0000000..05fdaec
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_expand.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXPAND_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXPAND_H_
+
+#include "modules/audio_coding/neteq/expand.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockExpand : public Expand {
+ public:
+  MockExpand(BackgroundNoise* background_noise,
+             SyncBuffer* sync_buffer,
+             RandomVector* random_vector,
+             StatisticsCalculator* statistics,
+             int fs,
+             size_t num_channels)
+      : Expand(background_noise,
+               sync_buffer,
+               random_vector,
+               statistics,
+               fs,
+               num_channels) {}
+  virtual ~MockExpand() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD0(Reset,
+      void());
+  MOCK_METHOD1(Process,
+      int(AudioMultiVector* output));
+  MOCK_METHOD0(SetParametersForNormalAfterExpand,
+      void());
+  MOCK_METHOD0(SetParametersForMergeAfterExpand,
+      void());
+  MOCK_CONST_METHOD0(overlap_length,
+      size_t());
+};
+
+}  // namespace webrtc
+
+namespace webrtc {
+
+class MockExpandFactory : public ExpandFactory {
+ public:
+  MOCK_CONST_METHOD6(Create,
+                     Expand*(BackgroundNoise* background_noise,
+                             SyncBuffer* sync_buffer,
+                             RandomVector* random_vector,
+                             StatisticsCalculator* statistics,
+                             int fs,
+                             size_t num_channels));
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXPAND_H_
diff --git a/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h b/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
new file mode 100644
index 0000000..b315240
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXTERNAL_DECODER_PCM16B_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXTERNAL_DECODER_PCM16B_H_
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "rtc_base/constructormagic.h"
+#include "test/gmock.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::Invoke;
+
+// Implement an external version of the PCM16b decoder.
+class ExternalPcm16B : public AudioDecoder {
+ public:
+  explicit ExternalPcm16B(int sample_rate_hz)
+      : sample_rate_hz_(sample_rate_hz) {}
+  void Reset() override {}
+
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override {
+    EXPECT_EQ(sample_rate_hz_, sample_rate_hz);
+    size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
+    *speech_type = ConvertSpeechType(1);
+    return static_cast<int>(ret);
+  }
+  int SampleRateHz() const override { return sample_rate_hz_; }
+  size_t Channels() const override { return 1; }
+
+ private:
+  const int sample_rate_hz_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(ExternalPcm16B);
+};
+
+// Create a mock of ExternalPcm16B which delegates all calls to the real object.
+// The reason is that we can then track that the correct calls are being made.
+class MockExternalPcm16B : public AudioDecoder {
+ public:
+  explicit MockExternalPcm16B(int sample_rate_hz) : real_(sample_rate_hz) {
+    // By default, all calls are delegated to the real object.
+    ON_CALL(*this, DecodeInternal(_, _, _, _, _))
+        .WillByDefault(Invoke(&real_, &ExternalPcm16B::DecodeInternal));
+    ON_CALL(*this, HasDecodePlc())
+        .WillByDefault(Invoke(&real_, &ExternalPcm16B::HasDecodePlc));
+    ON_CALL(*this, DecodePlc(_, _))
+        .WillByDefault(Invoke(&real_, &ExternalPcm16B::DecodePlc));
+    ON_CALL(*this, Reset())
+        .WillByDefault(Invoke(&real_, &ExternalPcm16B::Reset));
+    ON_CALL(*this, IncomingPacket(_, _, _, _, _))
+        .WillByDefault(Invoke(&real_, &ExternalPcm16B::IncomingPacket));
+    ON_CALL(*this, ErrorCode())
+        .WillByDefault(Invoke(&real_, &ExternalPcm16B::ErrorCode));
+  }
+  virtual ~MockExternalPcm16B() { Die(); }
+
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD5(DecodeInternal,
+               int(const uint8_t* encoded,
+                   size_t encoded_len,
+                   int sample_rate_hz,
+                   int16_t* decoded,
+                   SpeechType* speech_type));
+  MOCK_CONST_METHOD0(HasDecodePlc,
+      bool());
+  MOCK_METHOD2(DecodePlc,
+      size_t(size_t num_frames, int16_t* decoded));
+  MOCK_METHOD0(Reset, void());
+  MOCK_METHOD5(IncomingPacket,
+      int(const uint8_t* payload, size_t payload_len,
+          uint16_t rtp_sequence_number, uint32_t rtp_timestamp,
+          uint32_t arrival_timestamp));
+  MOCK_METHOD0(ErrorCode,
+      int());
+
+  int SampleRateHz() const /* override */ { return real_.SampleRateHz(); }
+  size_t Channels() const /* override */ { return real_.Channels(); }
+
+ private:
+  ExternalPcm16B real_;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_EXTERNAL_DECODER_PCM16B_H_
diff --git a/modules/audio_coding/neteq/mock/mock_packet_buffer.h b/modules/audio_coding/neteq/mock/mock_packet_buffer.h
new file mode 100644
index 0000000..ac7d9b7
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_packet_buffer.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_BUFFER_H_
+
+#include "modules/audio_coding/neteq/packet_buffer.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockPacketBuffer : public PacketBuffer {
+ public:
+  MockPacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer)
+      : PacketBuffer(max_number_of_packets, tick_timer) {}
+  virtual ~MockPacketBuffer() { Die(); }
+  MOCK_METHOD0(Die, void());
+  MOCK_METHOD0(Flush,
+      void());
+  MOCK_CONST_METHOD0(Empty,
+      bool());
+  int InsertPacket(Packet&& packet, StatisticsCalculator* stats) {
+    return InsertPacketWrapped(&packet, stats);
+  }
+  // Since gtest does not properly support move-only types, InsertPacket is
+  // implemented as a wrapper. You'll have to implement InsertPacketWrapped
+  // instead and move from |*packet|.
+  MOCK_METHOD2(InsertPacketWrapped,
+               int(Packet* packet, StatisticsCalculator* stats));
+  MOCK_METHOD5(InsertPacketList,
+               int(PacketList* packet_list,
+                   const DecoderDatabase& decoder_database,
+                   rtc::Optional<uint8_t>* current_rtp_payload_type,
+                   rtc::Optional<uint8_t>* current_cng_rtp_payload_type,
+                   StatisticsCalculator* stats));
+  MOCK_CONST_METHOD1(NextTimestamp,
+      int(uint32_t* next_timestamp));
+  MOCK_CONST_METHOD2(NextHigherTimestamp,
+      int(uint32_t timestamp, uint32_t* next_timestamp));
+  MOCK_CONST_METHOD0(PeekNextPacket,
+      const Packet*());
+  MOCK_METHOD0(GetNextPacket,
+      rtc::Optional<Packet>());
+  MOCK_METHOD1(DiscardNextPacket, int(StatisticsCalculator* stats));
+  MOCK_METHOD3(DiscardOldPackets,
+               void(uint32_t timestamp_limit,
+                    uint32_t horizon_samples,
+                    StatisticsCalculator* stats));
+  MOCK_METHOD2(DiscardAllOldPackets,
+               void(uint32_t timestamp_limit, StatisticsCalculator* stats));
+  MOCK_CONST_METHOD0(NumPacketsInBuffer,
+      size_t());
+  MOCK_METHOD1(IncrementWaitingTimes,
+      void(int));
+  MOCK_CONST_METHOD0(current_memory_bytes,
+      int());
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_PACKET_BUFFER_H_
diff --git a/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h b/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h
new file mode 100644
index 0000000..27a2276
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_red_payload_splitter.h
@@ -0,0 +1,29 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_RED_PAYLOAD_SPLITTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_RED_PAYLOAD_SPLITTER_H_
+
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockRedPayloadSplitter : public RedPayloadSplitter {
+ public:
+  MOCK_METHOD1(SplitRed, bool(PacketList* packet_list));
+  MOCK_METHOD2(CheckRedPayloads,
+               int(PacketList* packet_list,
+                   const DecoderDatabase& decoder_database));
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_RED_PAYLOAD_SPLITTER_H_
diff --git a/modules/audio_coding/neteq/mock/mock_statistics_calculator.h b/modules/audio_coding/neteq/mock/mock_statistics_calculator.h
new file mode 100644
index 0000000..85f2620
--- /dev/null
+++ b/modules/audio_coding/neteq/mock/mock_statistics_calculator.h
@@ -0,0 +1,27 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_STATISTICS_CALCULATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_STATISTICS_CALCULATOR_H_
+
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockStatisticsCalculator : public StatisticsCalculator {
+ public:
+  MOCK_METHOD1(PacketsDiscarded, void(size_t num_packets));
+  MOCK_METHOD1(SecondaryPacketsDiscarded, void(size_t num_packets));
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_STATISTICS_CALCULATOR_H_
diff --git a/modules/audio_coding/neteq/nack_tracker.cc b/modules/audio_coding/neteq/nack_tracker.cc
new file mode 100644
index 0000000..d187883
--- /dev/null
+++ b/modules/audio_coding/neteq/nack_tracker.cc
@@ -0,0 +1,232 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/nack_tracker.h"
+
+#include <assert.h>  // For assert.
+
+#include <algorithm>  // For std::max.
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const int kDefaultSampleRateKhz = 48;
+const int kDefaultPacketSizeMs = 20;
+
+}  // namespace
+
+NackTracker::NackTracker(int nack_threshold_packets)
+    : nack_threshold_packets_(nack_threshold_packets),
+      sequence_num_last_received_rtp_(0),
+      timestamp_last_received_rtp_(0),
+      any_rtp_received_(false),
+      sequence_num_last_decoded_rtp_(0),
+      timestamp_last_decoded_rtp_(0),
+      any_rtp_decoded_(false),
+      sample_rate_khz_(kDefaultSampleRateKhz),
+      samples_per_packet_(sample_rate_khz_ * kDefaultPacketSizeMs),
+      max_nack_list_size_(kNackListSizeLimit) {}
+
+NackTracker::~NackTracker() = default;
+
+NackTracker* NackTracker::Create(int nack_threshold_packets) {
+  return new NackTracker(nack_threshold_packets);
+}
+
+void NackTracker::UpdateSampleRate(int sample_rate_hz) {
+  assert(sample_rate_hz > 0);
+  sample_rate_khz_ = sample_rate_hz / 1000;
+}
+
+void NackTracker::UpdateLastReceivedPacket(uint16_t sequence_number,
+                                           uint32_t timestamp) {
+  // Just record the value of sequence number and timestamp if this is the
+  // first packet.
+  if (!any_rtp_received_) {
+    sequence_num_last_received_rtp_ = sequence_number;
+    timestamp_last_received_rtp_ = timestamp;
+    any_rtp_received_ = true;
+    // If no packet is decoded, to have a reasonable estimate of time-to-play
+    // use the given values.
+    if (!any_rtp_decoded_) {
+      sequence_num_last_decoded_rtp_ = sequence_number;
+      timestamp_last_decoded_rtp_ = timestamp;
+    }
+    return;
+  }
+
+  if (sequence_number == sequence_num_last_received_rtp_)
+    return;
+
+  // Received RTP should not be in the list.
+  nack_list_.erase(sequence_number);
+
+  // If this is an old sequence number, no more action is required, return.
+  if (IsNewerSequenceNumber(sequence_num_last_received_rtp_, sequence_number))
+    return;
+
+  UpdateSamplesPerPacket(sequence_number, timestamp);
+
+  UpdateList(sequence_number);
+
+  sequence_num_last_received_rtp_ = sequence_number;
+  timestamp_last_received_rtp_ = timestamp;
+  LimitNackListSize();
+}
+
+void NackTracker::UpdateSamplesPerPacket(
+    uint16_t sequence_number_current_received_rtp,
+    uint32_t timestamp_current_received_rtp) {
+  uint32_t timestamp_increase =
+      timestamp_current_received_rtp - timestamp_last_received_rtp_;
+  uint16_t sequence_num_increase =
+      sequence_number_current_received_rtp - sequence_num_last_received_rtp_;
+
+  samples_per_packet_ = timestamp_increase / sequence_num_increase;
+}
+
+void NackTracker::UpdateList(uint16_t sequence_number_current_received_rtp) {
+  // Some of the packets which were considered late, now are considered missing.
+  ChangeFromLateToMissing(sequence_number_current_received_rtp);
+
+  if (IsNewerSequenceNumber(sequence_number_current_received_rtp,
+                            sequence_num_last_received_rtp_ + 1))
+    AddToList(sequence_number_current_received_rtp);
+}
+
+void NackTracker::ChangeFromLateToMissing(
+    uint16_t sequence_number_current_received_rtp) {
+  NackList::const_iterator lower_bound =
+      nack_list_.lower_bound(static_cast<uint16_t>(
+          sequence_number_current_received_rtp - nack_threshold_packets_));
+
+  for (NackList::iterator it = nack_list_.begin(); it != lower_bound; ++it)
+    it->second.is_missing = true;
+}
+
+uint32_t NackTracker::EstimateTimestamp(uint16_t sequence_num) {
+  uint16_t sequence_num_diff = sequence_num - sequence_num_last_received_rtp_;
+  return sequence_num_diff * samples_per_packet_ + timestamp_last_received_rtp_;
+}
+
+void NackTracker::AddToList(uint16_t sequence_number_current_received_rtp) {
+  assert(!any_rtp_decoded_ ||
+         IsNewerSequenceNumber(sequence_number_current_received_rtp,
+                               sequence_num_last_decoded_rtp_));
+
+  // Packets with sequence numbers older than |upper_bound_missing| are
+  // considered missing, and the rest are considered late.
+  uint16_t upper_bound_missing =
+      sequence_number_current_received_rtp - nack_threshold_packets_;
+
+  for (uint16_t n = sequence_num_last_received_rtp_ + 1;
+       IsNewerSequenceNumber(sequence_number_current_received_rtp, n); ++n) {
+    bool is_missing = IsNewerSequenceNumber(upper_bound_missing, n);
+    uint32_t timestamp = EstimateTimestamp(n);
+    NackElement nack_element(TimeToPlay(timestamp), timestamp, is_missing);
+    nack_list_.insert(nack_list_.end(), std::make_pair(n, nack_element));
+  }
+}
+
+void NackTracker::UpdateEstimatedPlayoutTimeBy10ms() {
+  while (!nack_list_.empty() &&
+         nack_list_.begin()->second.time_to_play_ms <= 10)
+    nack_list_.erase(nack_list_.begin());
+
+  for (NackList::iterator it = nack_list_.begin(); it != nack_list_.end(); ++it)
+    it->second.time_to_play_ms -= 10;
+}
+
+void NackTracker::UpdateLastDecodedPacket(uint16_t sequence_number,
+                                          uint32_t timestamp) {
+  if (IsNewerSequenceNumber(sequence_number, sequence_num_last_decoded_rtp_) ||
+      !any_rtp_decoded_) {
+    sequence_num_last_decoded_rtp_ = sequence_number;
+    timestamp_last_decoded_rtp_ = timestamp;
+    // Packets in the list with sequence numbers less than the
+    // sequence number of the decoded RTP should be removed from the lists.
+    // They will be discarded by the jitter buffer if they arrive.
+    nack_list_.erase(nack_list_.begin(),
+                     nack_list_.upper_bound(sequence_num_last_decoded_rtp_));
+
+    // Update estimated time-to-play.
+    for (NackList::iterator it = nack_list_.begin(); it != nack_list_.end();
+         ++it)
+      it->second.time_to_play_ms = TimeToPlay(it->second.estimated_timestamp);
+  } else {
+    assert(sequence_number == sequence_num_last_decoded_rtp_);
+
+    // Same sequence number as before. 10 ms is elapsed, update estimations for
+    // time-to-play.
+    UpdateEstimatedPlayoutTimeBy10ms();
+
+    // Update timestamp for better estimate of time-to-play, for packets which
+    // are added to NACK list later on.
+    timestamp_last_decoded_rtp_ += sample_rate_khz_ * 10;
+  }
+  any_rtp_decoded_ = true;
+}
+
+NackTracker::NackList NackTracker::GetNackList() const {
+  return nack_list_;
+}
+
+void NackTracker::Reset() {
+  nack_list_.clear();
+
+  sequence_num_last_received_rtp_ = 0;
+  timestamp_last_received_rtp_ = 0;
+  any_rtp_received_ = false;
+  sequence_num_last_decoded_rtp_ = 0;
+  timestamp_last_decoded_rtp_ = 0;
+  any_rtp_decoded_ = false;
+  sample_rate_khz_ = kDefaultSampleRateKhz;
+  samples_per_packet_ = sample_rate_khz_ * kDefaultPacketSizeMs;
+}
+
+void NackTracker::SetMaxNackListSize(size_t max_nack_list_size) {
+  RTC_CHECK_GT(max_nack_list_size, 0);
+  // Ugly hack to get around the problem of passing static consts by reference.
+  const size_t kNackListSizeLimitLocal = NackTracker::kNackListSizeLimit;
+  RTC_CHECK_LE(max_nack_list_size, kNackListSizeLimitLocal);
+
+  max_nack_list_size_ = max_nack_list_size;
+  LimitNackListSize();
+}
+
+void NackTracker::LimitNackListSize() {
+  uint16_t limit = sequence_num_last_received_rtp_ -
+                   static_cast<uint16_t>(max_nack_list_size_) - 1;
+  nack_list_.erase(nack_list_.begin(), nack_list_.upper_bound(limit));
+}
+
+int64_t NackTracker::TimeToPlay(uint32_t timestamp) const {
+  uint32_t timestamp_increase = timestamp - timestamp_last_decoded_rtp_;
+  return timestamp_increase / sample_rate_khz_;
+}
+
+// We don't erase elements with time-to-play shorter than round-trip-time.
+std::vector<uint16_t> NackTracker::GetNackList(
+    int64_t round_trip_time_ms) const {
+  RTC_DCHECK_GE(round_trip_time_ms, 0);
+  std::vector<uint16_t> sequence_numbers;
+  for (NackList::const_iterator it = nack_list_.begin(); it != nack_list_.end();
+       ++it) {
+    if (it->second.is_missing &&
+        it->second.time_to_play_ms > round_trip_time_ms)
+      sequence_numbers.push_back(it->first);
+  }
+  return sequence_numbers;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/nack_tracker.h b/modules/audio_coding/neteq/nack_tracker.h
new file mode 100644
index 0000000..4f88d91
--- /dev/null
+++ b/modules/audio_coding/neteq/nack_tracker.h
@@ -0,0 +1,208 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
+#define MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
+
+#include <vector>
+#include <map>
+
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "rtc_base/gtest_prod_util.h"
+
+//
+// The NackTracker class keeps track of the lost packets, an estimate of
+// time-to-play for each packet is also given.
+//
+// Every time a packet is pushed into NetEq, LastReceivedPacket() has to be
+// called to update the NACK list.
+//
+// Every time 10ms audio is pulled from NetEq LastDecodedPacket() should be
+// called, and time-to-play is updated at that moment.
+//
+// If packet N is received, any packet prior to |N - NackThreshold| which is not
+// arrived is considered lost, and should be labeled as "missing" (the size of
+// the list might be limited and older packet eliminated from the list). Packets
+// |N - NackThreshold|, |N - NackThreshold + 1|, ..., |N - 1| are considered
+// "late." A "late" packet with sequence number K is changed to "missing" any
+// time a packet with sequence number newer than |K + NackList| is arrived.
+//
+// The NackTracker class has to know about the sample rate of the packets to
+// compute time-to-play. So sample rate should be set as soon as the first
+// packet is received. If there is a change in the receive codec (sender changes
+// codec) then NackTracker should be reset. This is because NetEQ would flush
+// its buffer and re-transmission is meaning less for old packet. Therefore, in
+// that case, after reset the sampling rate has to be updated.
+//
+// Thread Safety
+// =============
+// Please note that this class in not thread safe. The class must be protected
+// if different APIs are called from different threads.
+//
+namespace webrtc {
+
+class NackTracker {
+ public:
+  // A limit for the size of the NACK list.
+  static const size_t kNackListSizeLimit = 500;  // 10 seconds for 20 ms frame
+                                                 // packets.
+  // Factory method.
+  static NackTracker* Create(int nack_threshold_packets);
+
+  ~NackTracker();
+
+  // Set a maximum for the size of the NACK list. If the last received packet
+  // has sequence number of N, then NACK list will not contain any element
+  // with sequence number earlier than N - |max_nack_list_size|.
+  //
+  // The largest maximum size is defined by |kNackListSizeLimit|
+  void SetMaxNackListSize(size_t max_nack_list_size);
+
+  // Set the sampling rate.
+  //
+  // If associated sampling rate of the received packets is changed, call this
+  // function to update sampling rate. Note that if there is any change in
+  // received codec then NetEq will flush its buffer and NACK has to be reset.
+  // After Reset() is called sampling rate has to be set.
+  void UpdateSampleRate(int sample_rate_hz);
+
+  // Update the sequence number and the timestamp of the last decoded RTP. This
+  // API should be called every time 10 ms audio is pulled from NetEq.
+  void UpdateLastDecodedPacket(uint16_t sequence_number, uint32_t timestamp);
+
+  // Update the sequence number and the timestamp of the last received RTP. This
+  // API should be called every time a packet pushed into ACM.
+  void UpdateLastReceivedPacket(uint16_t sequence_number, uint32_t timestamp);
+
+  // Get a list of "missing" packets which have expected time-to-play larger
+  // than the given round-trip-time (in milliseconds).
+  // Note: Late packets are not included.
+  std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
+
+  // Reset to default values. The NACK list is cleared.
+  // |nack_threshold_packets_| & |max_nack_list_size_| preserve their values.
+  void Reset();
+
+ private:
+  // This test need to access the private method GetNackList().
+  FRIEND_TEST_ALL_PREFIXES(NackTrackerTest, EstimateTimestampAndTimeToPlay);
+
+  struct NackElement {
+    NackElement(int64_t initial_time_to_play_ms,
+                uint32_t initial_timestamp,
+                bool missing)
+        : time_to_play_ms(initial_time_to_play_ms),
+          estimated_timestamp(initial_timestamp),
+          is_missing(missing) {}
+
+    // Estimated time (ms) left for this packet to be decoded. This estimate is
+    // updated every time jitter buffer decodes a packet.
+    int64_t time_to_play_ms;
+
+    // A guess about the timestamp of the missing packet, it is used for
+    // estimation of |time_to_play_ms|. The estimate might be slightly wrong if
+    // there has been frame-size change since the last received packet and the
+    // missing packet. However, the risk of this is low, and in case of such
+    // errors, there will be a minor misestimation in time-to-play of missing
+    // packets. This will have a very minor effect on NACK performance.
+    uint32_t estimated_timestamp;
+
+    // True if the packet is considered missing. Otherwise indicates packet is
+    // late.
+    bool is_missing;
+  };
+
+  class NackListCompare {
+   public:
+    bool operator()(uint16_t sequence_number_old,
+                    uint16_t sequence_number_new) const {
+      return IsNewerSequenceNumber(sequence_number_new, sequence_number_old);
+    }
+  };
+
+  typedef std::map<uint16_t, NackElement, NackListCompare> NackList;
+
+  // Constructor.
+  explicit NackTracker(int nack_threshold_packets);
+
+  // This API is used only for testing to assess whether time-to-play is
+  // computed correctly.
+  NackList GetNackList() const;
+
+  // Given the |sequence_number_current_received_rtp| of currently received RTP,
+  // recognize packets which are not arrive and add to the list.
+  void AddToList(uint16_t sequence_number_current_received_rtp);
+
+  // This function subtracts 10 ms of time-to-play for all packets in NACK list.
+  // This is called when 10 ms elapsed with no new RTP packet decoded.
+  void UpdateEstimatedPlayoutTimeBy10ms();
+
+  // Given the |sequence_number_current_received_rtp| and
+  // |timestamp_current_received_rtp| of currently received RTP update number
+  // of samples per packet.
+  void UpdateSamplesPerPacket(uint16_t sequence_number_current_received_rtp,
+                              uint32_t timestamp_current_received_rtp);
+
+  // Given the |sequence_number_current_received_rtp| of currently received RTP
+  // update the list. That is; some packets will change from late to missing,
+  // some packets are inserted as missing and some inserted as late.
+  void UpdateList(uint16_t sequence_number_current_received_rtp);
+
+  // Packets which are considered late for too long (according to
+  // |nack_threshold_packets_|) are flagged as missing.
+  void ChangeFromLateToMissing(uint16_t sequence_number_current_received_rtp);
+
+  // Packets which have sequence number older that
+  // |sequence_num_last_received_rtp_| - |max_nack_list_size_| are removed
+  // from the NACK list.
+  void LimitNackListSize();
+
+  // Estimate timestamp of a missing packet given its sequence number.
+  uint32_t EstimateTimestamp(uint16_t sequence_number);
+
+  // Compute time-to-play given a timestamp.
+  int64_t TimeToPlay(uint32_t timestamp) const;
+
+  // If packet N is arrived, any packet prior to N - |nack_threshold_packets_|
+  // which is not arrived is considered missing, and should be in NACK list.
+  // Also any packet in the range of N-1 and N - |nack_threshold_packets_|,
+  // exclusive, which is not arrived is considered late, and should should be
+  // in the list of late packets.
+  const int nack_threshold_packets_;
+
+  // Valid if a packet is received.
+  uint16_t sequence_num_last_received_rtp_;
+  uint32_t timestamp_last_received_rtp_;
+  bool any_rtp_received_;  // If any packet received.
+
+  // Valid if a packet is decoded.
+  uint16_t sequence_num_last_decoded_rtp_;
+  uint32_t timestamp_last_decoded_rtp_;
+  bool any_rtp_decoded_;  // If any packet decoded.
+
+  int sample_rate_khz_;  // Sample rate in kHz.
+
+  // Number of samples per packet. We update this every time we receive a
+  // packet, not only for consecutive packets.
+  int samples_per_packet_;
+
+  // A list of missing packets to be retransmitted. Components of the list
+  // contain the sequence number of missing packets and the estimated time that
+  // each pack is going to be played out.
+  NackList nack_list_;
+
+  // NACK list will not keep track of missing packets prior to
+  // |sequence_num_last_received_rtp_| - |max_nack_list_size_|.
+  size_t max_nack_list_size_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_NACK_TRACKER_H_
diff --git a/modules/audio_coding/neteq/nack_tracker_unittest.cc b/modules/audio_coding/neteq/nack_tracker_unittest.cc
new file mode 100644
index 0000000..19adf30
--- /dev/null
+++ b/modules/audio_coding/neteq/nack_tracker_unittest.cc
@@ -0,0 +1,483 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/nack_tracker.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace {
+
+const int kNackThreshold = 3;
+const int kSampleRateHz = 16000;
+const int kPacketSizeMs = 30;
+const uint32_t kTimestampIncrement = 480;  // 30 ms.
+const int64_t kShortRoundTripTimeMs = 1;
+
+bool IsNackListCorrect(const std::vector<uint16_t>& nack_list,
+                       const uint16_t* lost_sequence_numbers,
+                       size_t num_lost_packets) {
+  if (nack_list.size() != num_lost_packets)
+    return false;
+
+  if (num_lost_packets == 0)
+    return true;
+
+  for (size_t k = 0; k < nack_list.size(); ++k) {
+    int seq_num = nack_list[k];
+    bool seq_num_matched = false;
+    for (size_t n = 0; n < num_lost_packets; ++n) {
+      if (seq_num == lost_sequence_numbers[n]) {
+        seq_num_matched = true;
+        break;
+      }
+    }
+    if (!seq_num_matched)
+      return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+TEST(NackTrackerTest, EmptyListWhenNoPacketLoss) {
+  std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+  nack->UpdateSampleRate(kSampleRateHz);
+
+  int seq_num = 1;
+  uint32_t timestamp = 0;
+
+  std::vector<uint16_t> nack_list;
+  for (int n = 0; n < 100; n++) {
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    seq_num++;
+    timestamp += kTimestampIncrement;
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(nack_list.empty());
+  }
+}
+
+TEST(NackTrackerTest, NoNackIfReorderWithinNackThreshold) {
+  std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+  nack->UpdateSampleRate(kSampleRateHz);
+
+  int seq_num = 1;
+  uint32_t timestamp = 0;
+  std::vector<uint16_t> nack_list;
+
+  nack->UpdateLastReceivedPacket(seq_num, timestamp);
+  nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+  EXPECT_TRUE(nack_list.empty());
+  int num_late_packets = kNackThreshold + 1;
+
+  // Push in reverse order
+  while (num_late_packets > 0) {
+    nack->UpdateLastReceivedPacket(
+        seq_num + num_late_packets,
+        timestamp + num_late_packets * kTimestampIncrement);
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(nack_list.empty());
+    num_late_packets--;
+  }
+}
+
+TEST(NackTrackerTest, LatePacketsMovedToNackThenNackListDoesNotChange) {
+  const uint16_t kSequenceNumberLostPackets[] = {2, 3, 4, 5, 6, 7, 8, 9};
+  static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) /
+                                        sizeof(kSequenceNumberLostPackets[0]);
+
+  for (int k = 0; k < 2; k++) {  // Two iteration with/without wrap around.
+    std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+    nack->UpdateSampleRate(kSampleRateHz);
+
+    uint16_t sequence_num_lost_packets[kNumAllLostPackets];
+    for (int n = 0; n < kNumAllLostPackets; n++) {
+      sequence_num_lost_packets[n] =
+          kSequenceNumberLostPackets[n] +
+          k * 65531;  // Have wrap around in sequence numbers for |k == 1|.
+    }
+    uint16_t seq_num = sequence_num_lost_packets[0] - 1;
+
+    uint32_t timestamp = 0;
+    std::vector<uint16_t> nack_list;
+
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(nack_list.empty());
+
+    seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1;
+    timestamp += kTimestampIncrement * (kNumAllLostPackets + 1);
+    int num_lost_packets = std::max(0, kNumAllLostPackets - kNackThreshold);
+
+    for (int n = 0; n < kNackThreshold + 1; ++n) {
+      nack->UpdateLastReceivedPacket(seq_num, timestamp);
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets,
+                                    num_lost_packets));
+      seq_num++;
+      timestamp += kTimestampIncrement;
+      num_lost_packets++;
+    }
+
+    for (int n = 0; n < 100; ++n) {
+      nack->UpdateLastReceivedPacket(seq_num, timestamp);
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets,
+                                    kNumAllLostPackets));
+      seq_num++;
+      timestamp += kTimestampIncrement;
+    }
+  }
+}
+
+TEST(NackTrackerTest, ArrivedPacketsAreRemovedFromNackList) {
+  const uint16_t kSequenceNumberLostPackets[] = {2, 3, 4, 5, 6, 7, 8, 9};
+  static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) /
+                                        sizeof(kSequenceNumberLostPackets[0]);
+
+  for (int k = 0; k < 2; ++k) {  // Two iteration with/without wrap around.
+    std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+    nack->UpdateSampleRate(kSampleRateHz);
+
+    uint16_t sequence_num_lost_packets[kNumAllLostPackets];
+    for (int n = 0; n < kNumAllLostPackets; ++n) {
+      sequence_num_lost_packets[n] = kSequenceNumberLostPackets[n] +
+                                     k * 65531;  // Wrap around for |k == 1|.
+    }
+
+    uint16_t seq_num = sequence_num_lost_packets[0] - 1;
+    uint32_t timestamp = 0;
+
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+    std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(nack_list.empty());
+
+    size_t index_retransmitted_rtp = 0;
+    uint32_t timestamp_retransmitted_rtp = timestamp + kTimestampIncrement;
+
+    seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1;
+    timestamp += kTimestampIncrement * (kNumAllLostPackets + 1);
+    size_t num_lost_packets = std::max(0, kNumAllLostPackets - kNackThreshold);
+    for (int n = 0; n < kNumAllLostPackets; ++n) {
+      // Number of lost packets does not change for the first
+      // |kNackThreshold + 1| packets, one is added to the list and one is
+      // removed. Thereafter, the list shrinks every iteration.
+      if (n >= kNackThreshold + 1)
+        num_lost_packets--;
+
+      nack->UpdateLastReceivedPacket(seq_num, timestamp);
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(IsNackListCorrect(
+          nack_list, &sequence_num_lost_packets[index_retransmitted_rtp],
+          num_lost_packets));
+      seq_num++;
+      timestamp += kTimestampIncrement;
+
+      // Retransmission of a lost RTP.
+      nack->UpdateLastReceivedPacket(
+          sequence_num_lost_packets[index_retransmitted_rtp],
+          timestamp_retransmitted_rtp);
+      index_retransmitted_rtp++;
+      timestamp_retransmitted_rtp += kTimestampIncrement;
+
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(IsNackListCorrect(
+          nack_list, &sequence_num_lost_packets[index_retransmitted_rtp],
+          num_lost_packets - 1));  // One less lost packet in the list.
+    }
+    ASSERT_TRUE(nack_list.empty());
+  }
+}
+
+// Assess if estimation of timestamps and time-to-play is correct. Introduce all
+// combinations that timestamps and sequence numbers might have wrap around.
+TEST(NackTrackerTest, EstimateTimestampAndTimeToPlay) {
+  const uint16_t kLostPackets[] = {2, 3,  4,  5,  6,  7,  8,
+                                   9, 10, 11, 12, 13, 14, 15};
+  static const int kNumAllLostPackets =
+      sizeof(kLostPackets) / sizeof(kLostPackets[0]);
+
+  for (int k = 0; k < 4; ++k) {
+    std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+    nack->UpdateSampleRate(kSampleRateHz);
+
+    // Sequence number wrap around if |k| is 2 or 3;
+    int seq_num_offset = (k < 2) ? 0 : 65531;
+
+    // Timestamp wrap around if |k| is 1 or 3.
+    uint32_t timestamp_offset =
+        (k & 0x1) ? static_cast<uint32_t>(0xffffffff) - 6 : 0;
+
+    uint32_t timestamp_lost_packets[kNumAllLostPackets];
+    uint16_t seq_num_lost_packets[kNumAllLostPackets];
+    for (int n = 0; n < kNumAllLostPackets; ++n) {
+      timestamp_lost_packets[n] =
+          timestamp_offset + kLostPackets[n] * kTimestampIncrement;
+      seq_num_lost_packets[n] = seq_num_offset + kLostPackets[n];
+    }
+
+    // We and to push two packets before lost burst starts.
+    uint16_t seq_num = seq_num_lost_packets[0] - 2;
+    uint32_t timestamp = timestamp_lost_packets[0] - 2 * kTimestampIncrement;
+
+    const uint16_t first_seq_num = seq_num;
+    const uint32_t first_timestamp = timestamp;
+
+    // Two consecutive packets to have a correct estimate of timestamp increase.
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+    seq_num++;
+    timestamp += kTimestampIncrement;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+    // A packet after the last one which is supposed to be lost.
+    seq_num = seq_num_lost_packets[kNumAllLostPackets - 1] + 1;
+    timestamp =
+        timestamp_lost_packets[kNumAllLostPackets - 1] + kTimestampIncrement;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+    NackTracker::NackList nack_list = nack->GetNackList();
+    EXPECT_EQ(static_cast<size_t>(kNumAllLostPackets), nack_list.size());
+
+    // Pretend the first packet is decoded.
+    nack->UpdateLastDecodedPacket(first_seq_num, first_timestamp);
+    nack_list = nack->GetNackList();
+
+    NackTracker::NackList::iterator it = nack_list.begin();
+    while (it != nack_list.end()) {
+      seq_num = it->first - seq_num_offset;
+      int index = seq_num - kLostPackets[0];
+      EXPECT_EQ(timestamp_lost_packets[index], it->second.estimated_timestamp);
+      EXPECT_EQ((index + 2) * kPacketSizeMs, it->second.time_to_play_ms);
+      ++it;
+    }
+
+    // Pretend 10 ms is passed, and we had pulled audio from NetEq, it still
+    // reports the same sequence number as decoded, time-to-play should be
+    // updated by 10 ms.
+    nack->UpdateLastDecodedPacket(first_seq_num, first_timestamp);
+    nack_list = nack->GetNackList();
+    it = nack_list.begin();
+    while (it != nack_list.end()) {
+      seq_num = it->first - seq_num_offset;
+      int index = seq_num - kLostPackets[0];
+      EXPECT_EQ((index + 2) * kPacketSizeMs - 10, it->second.time_to_play_ms);
+      ++it;
+    }
+  }
+}
+
+TEST(NackTrackerTest,
+     MissingPacketsPriorToLastDecodedRtpShouldNotBeInNackList) {
+  for (int m = 0; m < 2; ++m) {
+    uint16_t seq_num_offset = (m == 0) ? 0 : 65531;  // Wrap around if |m| is 1.
+    std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+    nack->UpdateSampleRate(kSampleRateHz);
+
+    // Two consecutive packets to have a correct estimate of timestamp increase.
+    uint16_t seq_num = 0;
+    nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
+                                   seq_num * kTimestampIncrement);
+    seq_num++;
+    nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
+                                   seq_num * kTimestampIncrement);
+
+    // Skip 10 packets (larger than NACK threshold).
+    const int kNumLostPackets = 10;
+    seq_num += kNumLostPackets + 1;
+    nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
+                                   seq_num * kTimestampIncrement);
+
+    const size_t kExpectedListSize = kNumLostPackets - kNackThreshold;
+    std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_EQ(kExpectedListSize, nack_list.size());
+
+    for (int k = 0; k < 2; ++k) {
+      // Decoding of the first and the second arrived packets.
+      for (int n = 0; n < kPacketSizeMs / 10; ++n) {
+        nack->UpdateLastDecodedPacket(seq_num_offset + k,
+                                      k * kTimestampIncrement);
+        nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+        EXPECT_EQ(kExpectedListSize, nack_list.size());
+      }
+    }
+
+    // Decoding of the last received packet.
+    nack->UpdateLastDecodedPacket(seq_num + seq_num_offset,
+                                  seq_num * kTimestampIncrement);
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(nack_list.empty());
+
+    // Make sure list of late packets is also empty. To check that, push few
+    // packets, if the late list is not empty its content will pop up in NACK
+    // list.
+    for (int n = 0; n < kNackThreshold + 10; ++n) {
+      seq_num++;
+      nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
+                                     seq_num * kTimestampIncrement);
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(nack_list.empty());
+    }
+  }
+}
+
+TEST(NackTrackerTest, Reset) {
+  std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+  nack->UpdateSampleRate(kSampleRateHz);
+
+  // Two consecutive packets to have a correct estimate of timestamp increase.
+  uint16_t seq_num = 0;
+  nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
+  seq_num++;
+  nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
+
+  // Skip 10 packets (larger than NACK threshold).
+  const int kNumLostPackets = 10;
+  seq_num += kNumLostPackets + 1;
+  nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
+
+  const size_t kExpectedListSize = kNumLostPackets - kNackThreshold;
+  std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+  EXPECT_EQ(kExpectedListSize, nack_list.size());
+
+  nack->Reset();
+  nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+  EXPECT_TRUE(nack_list.empty());
+}
+
+TEST(NackTrackerTest, ListSizeAppliedFromBeginning) {
+  const size_t kNackListSize = 10;
+  for (int m = 0; m < 2; ++m) {
+    uint16_t seq_num_offset = (m == 0) ? 0 : 65525;  // Wrap around if |m| is 1.
+    std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+    nack->UpdateSampleRate(kSampleRateHz);
+    nack->SetMaxNackListSize(kNackListSize);
+
+    uint16_t seq_num = seq_num_offset;
+    uint32_t timestamp = 0x12345678;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+    // Packet lost more than NACK-list size limit.
+    uint16_t num_lost_packets = kNackThreshold + kNackListSize + 5;
+
+    seq_num += num_lost_packets + 1;
+    timestamp += (num_lost_packets + 1) * kTimestampIncrement;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+    std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_EQ(kNackListSize - kNackThreshold, nack_list.size());
+  }
+}
+
+TEST(NackTrackerTest, ChangeOfListSizeAppliedAndOldElementsRemoved) {
+  const size_t kNackListSize = 10;
+  for (int m = 0; m < 2; ++m) {
+    uint16_t seq_num_offset = (m == 0) ? 0 : 65525;  // Wrap around if |m| is 1.
+    std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+    nack->UpdateSampleRate(kSampleRateHz);
+
+    uint16_t seq_num = seq_num_offset;
+    uint32_t timestamp = 0x87654321;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+    // Packet lost more than NACK-list size limit.
+    uint16_t num_lost_packets = kNackThreshold + kNackListSize + 5;
+
+    std::unique_ptr<uint16_t[]> seq_num_lost(new uint16_t[num_lost_packets]);
+    for (int n = 0; n < num_lost_packets; ++n) {
+      seq_num_lost[n] = ++seq_num;
+    }
+
+    ++seq_num;
+    timestamp += (num_lost_packets + 1) * kTimestampIncrement;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+    size_t expected_size = num_lost_packets - kNackThreshold;
+
+    std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_EQ(expected_size, nack_list.size());
+
+    nack->SetMaxNackListSize(kNackListSize);
+    expected_size = kNackListSize - kNackThreshold;
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(IsNackListCorrect(
+        nack_list, &seq_num_lost[num_lost_packets - kNackListSize],
+        expected_size));
+
+    // NACK list does not change size but the content is changing. The oldest
+    // element is removed and one from late list is inserted.
+    size_t n;
+    for (n = 1; n <= static_cast<size_t>(kNackThreshold); ++n) {
+      ++seq_num;
+      timestamp += kTimestampIncrement;
+      nack->UpdateLastReceivedPacket(seq_num, timestamp);
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(IsNackListCorrect(
+          nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n],
+          expected_size));
+    }
+
+    // NACK list should shrink.
+    for (; n < kNackListSize; ++n) {
+      ++seq_num;
+      timestamp += kTimestampIncrement;
+      nack->UpdateLastReceivedPacket(seq_num, timestamp);
+      --expected_size;
+      nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+      EXPECT_TRUE(IsNackListCorrect(
+          nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n],
+          expected_size));
+    }
+
+    // After this packet, NACK list should be empty.
+    ++seq_num;
+    timestamp += kTimestampIncrement;
+    nack->UpdateLastReceivedPacket(seq_num, timestamp);
+    nack_list = nack->GetNackList(kShortRoundTripTimeMs);
+    EXPECT_TRUE(nack_list.empty());
+  }
+}
+
+TEST(NackTrackerTest, RoudTripTimeIsApplied) {
+  const int kNackListSize = 200;
+  std::unique_ptr<NackTracker> nack(NackTracker::Create(kNackThreshold));
+  nack->UpdateSampleRate(kSampleRateHz);
+  nack->SetMaxNackListSize(kNackListSize);
+
+  uint16_t seq_num = 0;
+  uint32_t timestamp = 0x87654321;
+  nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+  // Packet lost more than NACK-list size limit.
+  uint16_t kNumLostPackets = kNackThreshold + 5;
+
+  seq_num += (1 + kNumLostPackets);
+  timestamp += (1 + kNumLostPackets) * kTimestampIncrement;
+  nack->UpdateLastReceivedPacket(seq_num, timestamp);
+
+  // Expected time-to-play are:
+  // kPacketSizeMs - 10, 2*kPacketSizeMs - 10, 3*kPacketSizeMs - 10, ...
+  //
+  // sequence number:  1,  2,  3,   4,   5
+  // time-to-play:    20, 50, 80, 110, 140
+  //
+  std::vector<uint16_t> nack_list = nack->GetNackList(100);
+  ASSERT_EQ(2u, nack_list.size());
+  EXPECT_EQ(4, nack_list[0]);
+  EXPECT_EQ(5, nack_list[1]);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq.cc b/modules/audio_coding/neteq/neteq.cc
new file mode 100644
index 0000000..8b74973
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq.cc
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/include/neteq.h"
+
+#include <memory>
+#include <sstream>
+
+#include "modules/audio_coding/neteq/neteq_impl.h"
+
+namespace webrtc {
+
+std::string NetEq::Config::ToString() const {
+  std::stringstream ss;
+  ss << "sample_rate_hz=" << sample_rate_hz
+     << ", enable_post_decode_vad="
+     << (enable_post_decode_vad ? "true" : "false")
+     << ", max_packets_in_buffer=" << max_packets_in_buffer
+     << ", background_noise_mode=" << background_noise_mode
+     << ", playout_mode=" << playout_mode
+     << ", enable_fast_accelerate="
+     << (enable_fast_accelerate ? " true": "false")
+     << ", enable_muted_state=" << (enable_muted_state ? " true": "false");
+  return ss.str();
+}
+
+// Creates all classes needed and inject them into a new NetEqImpl object.
+// Return the new object.
+NetEq* NetEq::Create(
+    const NetEq::Config& config,
+    const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory) {
+  return new NetEqImpl(config,
+                       NetEqImpl::Dependencies(config, decoder_factory));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_decoder_enum.cc b/modules/audio_coding/neteq/neteq_decoder_enum.cc
new file mode 100644
index 0000000..8d66c2a
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_decoder_enum.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <map>
+#include <string>
+
+#include "modules/audio_coding/neteq/neteq_decoder_enum.h"
+
+namespace webrtc {
+
+rtc::Optional<SdpAudioFormat> NetEqDecoderToSdpAudioFormat(NetEqDecoder nd) {
+  switch (nd) {
+    case NetEqDecoder::kDecoderPCMu:
+      return SdpAudioFormat("pcmu", 8000, 1);
+    case NetEqDecoder::kDecoderPCMa:
+      return SdpAudioFormat("pcma", 8000, 1);
+    case NetEqDecoder::kDecoderPCMu_2ch:
+      return SdpAudioFormat("pcmu", 8000, 2);
+    case NetEqDecoder::kDecoderPCMa_2ch:
+      return SdpAudioFormat("pcma", 8000, 2);
+    case NetEqDecoder::kDecoderILBC:
+      return SdpAudioFormat("ilbc", 8000, 1);
+    case NetEqDecoder::kDecoderISAC:
+      return SdpAudioFormat("isac", 16000, 1);
+    case NetEqDecoder::kDecoderISACswb:
+      return SdpAudioFormat("isac", 32000, 1);
+    case NetEqDecoder::kDecoderPCM16B:
+      return SdpAudioFormat("l16", 8000, 1);
+    case NetEqDecoder::kDecoderPCM16Bwb:
+      return SdpAudioFormat("l16", 16000, 1);
+    case NetEqDecoder::kDecoderPCM16Bswb32kHz:
+      return SdpAudioFormat("l16", 32000, 1);
+    case NetEqDecoder::kDecoderPCM16Bswb48kHz:
+      return SdpAudioFormat("l16", 48000, 1);
+    case NetEqDecoder::kDecoderPCM16B_2ch:
+      return SdpAudioFormat("l16", 8000, 2);
+    case NetEqDecoder::kDecoderPCM16Bwb_2ch:
+      return SdpAudioFormat("l16", 16000, 2);
+    case NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch:
+      return SdpAudioFormat("l16", 32000, 2);
+    case NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch:
+      return SdpAudioFormat("l16", 48000, 2);
+    case NetEqDecoder::kDecoderPCM16B_5ch:
+      return SdpAudioFormat("l16", 8000, 5);
+    case NetEqDecoder::kDecoderG722:
+      return SdpAudioFormat("g722", 8000, 1);
+    case NetEqDecoder::kDecoderG722_2ch:
+      return SdpAudioFormat("g722", 8000, 2);
+    case NetEqDecoder::kDecoderOpus:
+      return SdpAudioFormat("opus", 48000, 2);
+    case NetEqDecoder::kDecoderOpus_2ch:
+      return SdpAudioFormat(
+          "opus", 48000, 2,
+          std::map<std::string, std::string>{{"stereo", "1"}});
+    case NetEqDecoder::kDecoderRED:
+      return SdpAudioFormat("red", 8000, 1);
+    case NetEqDecoder::kDecoderAVT:
+      return SdpAudioFormat("telephone-event", 8000, 1);
+    case NetEqDecoder::kDecoderAVT16kHz:
+      return SdpAudioFormat("telephone-event", 16000, 1);
+    case NetEqDecoder::kDecoderAVT32kHz:
+      return SdpAudioFormat("telephone-event", 32000, 1);
+    case NetEqDecoder::kDecoderAVT48kHz:
+      return SdpAudioFormat("telephone-event", 48000, 1);
+    case NetEqDecoder::kDecoderCNGnb:
+      return SdpAudioFormat("cn", 8000, 1);
+    case NetEqDecoder::kDecoderCNGwb:
+      return SdpAudioFormat("cn", 16000, 1);
+    case NetEqDecoder::kDecoderCNGswb32kHz:
+      return SdpAudioFormat("cn", 32000, 1);
+    case NetEqDecoder::kDecoderCNGswb48kHz:
+      return SdpAudioFormat("cn", 48000, 1);
+    default:
+      return rtc::nullopt;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_decoder_enum.h b/modules/audio_coding/neteq/neteq_decoder_enum.h
new file mode 100644
index 0000000..024f03c
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_decoder_enum.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NETEQ_DECODER_ENUM_H_
+#define MODULES_AUDIO_CODING_NETEQ_NETEQ_DECODER_ENUM_H_
+
+#include "api/audio_codecs/audio_format.h"
+#include "api/optional.h"
+
+namespace webrtc {
+
+enum class NetEqDecoder {
+  kDecoderPCMu,
+  kDecoderPCMa,
+  kDecoderPCMu_2ch,
+  kDecoderPCMa_2ch,
+  kDecoderILBC,
+  kDecoderISAC,
+  kDecoderISACswb,
+  kDecoderPCM16B,
+  kDecoderPCM16Bwb,
+  kDecoderPCM16Bswb32kHz,
+  kDecoderPCM16Bswb48kHz,
+  kDecoderPCM16B_2ch,
+  kDecoderPCM16Bwb_2ch,
+  kDecoderPCM16Bswb32kHz_2ch,
+  kDecoderPCM16Bswb48kHz_2ch,
+  kDecoderPCM16B_5ch,
+  kDecoderG722,
+  kDecoderG722_2ch,
+  kDecoderRED,
+  kDecoderAVT,
+  kDecoderAVT16kHz,
+  kDecoderAVT32kHz,
+  kDecoderAVT48kHz,
+  kDecoderCNGnb,
+  kDecoderCNGwb,
+  kDecoderCNGswb32kHz,
+  kDecoderCNGswb48kHz,
+  kDecoderArbitrary,
+  kDecoderOpus,
+  kDecoderOpus_2ch,
+};
+
+rtc::Optional<SdpAudioFormat> NetEqDecoderToSdpAudioFormat(NetEqDecoder nd);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_NETEQ_DECODER_ENUM_H_
diff --git a/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
new file mode 100644
index 0000000..ec16627
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -0,0 +1,457 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Test to verify correct operation for externally created decoders.
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "modules/include/module_common_types.h"
+#include "test/gmock.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+using ::testing::_;
+using ::testing::Return;
+
+class NetEqExternalDecoderUnitTest : public test::NetEqExternalDecoderTest {
+ protected:
+  static const int kFrameSizeMs = 10;  // Frame size of Pcm16B.
+
+  NetEqExternalDecoderUnitTest(NetEqDecoder codec,
+                               int sample_rate_hz,
+                               MockExternalPcm16B* decoder)
+      : NetEqExternalDecoderTest(codec, sample_rate_hz, decoder),
+        external_decoder_(decoder),
+        samples_per_ms_(sample_rate_hz / 1000),
+        frame_size_samples_(kFrameSizeMs * samples_per_ms_),
+        rtp_generator_(new test::RtpGenerator(samples_per_ms_)),
+        input_(new int16_t[frame_size_samples_]),
+        // Payload should be no larger than input.
+        encoded_(new uint8_t[2 * frame_size_samples_]),
+        payload_size_bytes_(0),
+        last_send_time_(0),
+        last_arrival_time_(0) {
+    // NetEq is not allowed to delete the external decoder (hence Times(0)).
+    EXPECT_CALL(*external_decoder_, Die()).Times(0);
+    Init();
+
+    const std::string file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+    input_file_.reset(new test::InputAudioFile(file_name));
+  }
+
+  virtual ~NetEqExternalDecoderUnitTest() {
+    delete [] input_;
+    delete [] encoded_;
+    // ~NetEqExternalDecoderTest() will delete |external_decoder_|, so expecting
+    // Die() to be called.
+    EXPECT_CALL(*external_decoder_, Die()).Times(1);
+  }
+
+  // Method to draw kFrameSizeMs audio and verify the output.
+  // Use gTest methods. e.g. ASSERT_EQ() inside to trigger errors.
+  virtual void GetAndVerifyOutput() = 0;
+
+  // Method to get the number of calls to the Decode() method of the external
+  // decoder.
+  virtual int NumExpectedDecodeCalls(int num_loops) = 0;
+
+  // Method to generate packets and return the send time of the packet.
+  int GetNewPacket() {
+    if (!input_file_->Read(frame_size_samples_, input_)) {
+      return -1;
+    }
+    payload_size_bytes_ = WebRtcPcm16b_Encode(input_, frame_size_samples_,
+                                              encoded_);
+
+    int next_send_time = rtp_generator_->GetRtpHeader(
+        kPayloadType, frame_size_samples_, &rtp_header_);
+    return next_send_time;
+  }
+
+  // Method to decide packet losses.
+  virtual bool Lost() { return false; }
+
+  // Method to calculate packet arrival time.
+  int GetArrivalTime(int send_time) {
+    int arrival_time = last_arrival_time_ + (send_time - last_send_time_);
+    last_send_time_ = send_time;
+    last_arrival_time_ = arrival_time;
+    return arrival_time;
+  }
+
+  void RunTest(int num_loops) {
+    // Get next input packets (mono and multi-channel).
+    uint32_t next_send_time;
+    uint32_t next_arrival_time;
+    do {
+      next_send_time = GetNewPacket();
+      next_arrival_time = GetArrivalTime(next_send_time);
+    } while (Lost());  // If lost, immediately read the next packet.
+
+    EXPECT_CALL(
+        *external_decoder_,
+        DecodeInternal(_, payload_size_bytes_, 1000 * samples_per_ms_, _, _))
+        .Times(NumExpectedDecodeCalls(num_loops));
+
+    uint32_t time_now = 0;
+    for (int k = 0; k < num_loops; ++k) {
+      while (time_now >= next_arrival_time) {
+        InsertPacket(rtp_header_, rtc::ArrayView<const uint8_t>(
+                                      encoded_, payload_size_bytes_),
+                     next_arrival_time);
+        // Get next input packet.
+        do {
+          next_send_time = GetNewPacket();
+          next_arrival_time = GetArrivalTime(next_send_time);
+        } while (Lost());  // If lost, immediately read the next packet.
+      }
+
+      std::ostringstream ss;
+      ss << "Lap number " << k << ".";
+      SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
+      // Compare mono and multi-channel.
+      ASSERT_NO_FATAL_FAILURE(GetAndVerifyOutput());
+
+      time_now += kOutputLengthMs;
+    }
+  }
+
+  void InsertPacket(RTPHeader rtp_header,
+                    rtc::ArrayView<const uint8_t> payload,
+                    uint32_t receive_timestamp) override {
+    EXPECT_CALL(*external_decoder_,
+                IncomingPacket(_, payload.size(), rtp_header.sequenceNumber,
+                               rtp_header.timestamp, receive_timestamp));
+    NetEqExternalDecoderTest::InsertPacket(rtp_header, payload,
+                                           receive_timestamp);
+  }
+
+  MockExternalPcm16B* external_decoder() { return external_decoder_.get(); }
+
+  void ResetRtpGenerator(test::RtpGenerator* rtp_generator) {
+    rtp_generator_.reset(rtp_generator);
+  }
+
+  int samples_per_ms() const { return samples_per_ms_; }
+ private:
+  std::unique_ptr<MockExternalPcm16B> external_decoder_;
+  int samples_per_ms_;
+  size_t frame_size_samples_;
+  std::unique_ptr<test::RtpGenerator> rtp_generator_;
+  int16_t* input_;
+  uint8_t* encoded_;
+  size_t payload_size_bytes_;
+  uint32_t last_send_time_;
+  uint32_t last_arrival_time_;
+  std::unique_ptr<test::InputAudioFile> input_file_;
+  RTPHeader rtp_header_;
+};
+
+// This test encodes a few packets of PCM16b 32 kHz data and inserts it into two
+// different NetEq instances. The first instance uses the internal version of
+// the decoder object, while the second one uses an externally created decoder
+// object (ExternalPcm16B wrapped in MockExternalPcm16B, both defined above).
+// The test verifies that the output from both instances match.
+class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
+                                           public ::testing::Test {
+ protected:
+  static const size_t kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
+
+  NetEqExternalVsInternalDecoderTest()
+      : NetEqExternalDecoderUnitTest(NetEqDecoder::kDecoderPCM16Bswb32kHz,
+                                     32000,
+                                     new MockExternalPcm16B(32000)),
+        sample_rate_hz_(32000) {
+    NetEq::Config config;
+    config.sample_rate_hz = sample_rate_hz_;
+    neteq_internal_.reset(
+        NetEq::Create(config, CreateBuiltinAudioDecoderFactory()));
+  }
+
+  void SetUp() override {
+    ASSERT_EQ(true, neteq_internal_->RegisterPayloadType(
+                        kPayloadType, SdpAudioFormat("L16", 32000, 1)));
+  }
+
+  void GetAndVerifyOutput() override {
+    // Get audio from internal decoder instance.
+    bool muted;
+    EXPECT_EQ(NetEq::kOK, neteq_internal_->GetAudio(&output_internal_, &muted));
+    ASSERT_FALSE(muted);
+    EXPECT_EQ(1u, output_internal_.num_channels_);
+    EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+              output_internal_.samples_per_channel_);
+
+    // Get audio from external decoder instance.
+    GetOutputAudio(&output_);
+
+    const int16_t* output_data = output_.data();
+    const int16_t* output_internal_data = output_internal_.data();
+    for (size_t i = 0; i < output_.samples_per_channel_; ++i) {
+      ASSERT_EQ(output_data[i], output_internal_data[i])
+          << "Diff in sample " << i << ".";
+    }
+  }
+
+  void InsertPacket(RTPHeader rtp_header,
+                    rtc::ArrayView<const uint8_t> payload,
+                    uint32_t receive_timestamp) override {
+    // Insert packet in internal decoder.
+    ASSERT_EQ(NetEq::kOK, neteq_internal_->InsertPacket(rtp_header, payload,
+                                                        receive_timestamp));
+
+    // Insert packet in external decoder instance.
+    NetEqExternalDecoderUnitTest::InsertPacket(rtp_header, payload,
+                                               receive_timestamp);
+  }
+
+  int NumExpectedDecodeCalls(int num_loops) override { return num_loops; }
+
+ private:
+  int sample_rate_hz_;
+  std::unique_ptr<NetEq> neteq_internal_;
+  AudioFrame output_internal_;
+  AudioFrame output_;
+};
+
+TEST_F(NetEqExternalVsInternalDecoderTest, RunTest) {
+  RunTest(100);  // Run 100 laps @ 10 ms each in the test loop.
+}
+
+class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest,
+                               public ::testing::Test {
+ protected:
+  static const size_t kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
+
+  enum TestStates {
+    kInitialPhase,
+    kNormalPhase,
+    kExpandPhase,
+    kFadedExpandPhase,
+    kRecovered
+  };
+
+  LargeTimestampJumpTest()
+      : NetEqExternalDecoderUnitTest(NetEqDecoder::kDecoderPCM16B,
+                                     8000,
+                                     new MockExternalPcm16B(8000)),
+        test_state_(kInitialPhase) {
+    EXPECT_CALL(*external_decoder(), HasDecodePlc())
+        .WillRepeatedly(Return(false));
+  }
+
+  virtual void UpdateState(AudioFrame::SpeechType output_type) {
+    switch (test_state_) {
+      case kInitialPhase: {
+        if (output_type == AudioFrame::kNormalSpeech) {
+          test_state_ = kNormalPhase;
+        }
+        break;
+      }
+      case kNormalPhase: {
+        if (output_type == AudioFrame::kPLC) {
+          test_state_ = kExpandPhase;
+        }
+        break;
+      }
+      case kExpandPhase: {
+        if (output_type == AudioFrame::kPLCCNG) {
+          test_state_ = kFadedExpandPhase;
+        } else if (output_type == AudioFrame::kNormalSpeech) {
+          test_state_ = kRecovered;
+        }
+        break;
+      }
+      case kFadedExpandPhase: {
+        if (output_type == AudioFrame::kNormalSpeech) {
+          test_state_ = kRecovered;
+        }
+        break;
+      }
+      case kRecovered: {
+        break;
+      }
+    }
+  }
+
+  void GetAndVerifyOutput() override {
+    AudioFrame output;
+    GetOutputAudio(&output);
+    UpdateState(output.speech_type_);
+
+    if (test_state_ == kExpandPhase || test_state_ == kFadedExpandPhase) {
+      // Don't verify the output in this phase of the test.
+      return;
+    }
+
+    ASSERT_EQ(1u, output.num_channels_);
+    const int16_t* output_data = output.data();
+    for (size_t i = 0; i < output.samples_per_channel_; ++i) {
+      if (output_data[i] != 0)
+        return;
+    }
+    EXPECT_TRUE(false)
+        << "Expected at least one non-zero sample in each output block.";
+  }
+
+  int NumExpectedDecodeCalls(int num_loops) override {
+    // Some packets at the end of the stream won't be decoded. When the jump in
+    // timestamp happens, NetEq will do Expand during one GetAudio call. In the
+    // next call it will decode the packet after the jump, but the net result is
+    // that the delay increased by 1 packet. In another call, a Pre-emptive
+    // Expand operation is performed, leading to delay increase by 1 packet. In
+    // total, the test will end with a 2-packet delay, which results in the 2
+    // last packets not being decoded.
+    return num_loops - 2;
+  }
+
+  TestStates test_state_;
+};
+
+TEST_F(LargeTimestampJumpTest, JumpLongerThanHalfRange) {
+  // Set the timestamp series to start at 2880, increase to 7200, then jump to
+  // 2869342376. The sequence numbers start at 42076 and increase by 1 for each
+  // packet, also when the timestamp jumps.
+  static const uint16_t kStartSeqeunceNumber = 42076;
+  static const uint32_t kStartTimestamp = 2880;
+  static const uint32_t kJumpFromTimestamp = 7200;
+  static const uint32_t kJumpToTimestamp = 2869342376;
+  static_assert(kJumpFromTimestamp < kJumpToTimestamp,
+                "timestamp jump should not result in wrap");
+  static_assert(
+      static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) > 0x7FFFFFFF,
+      "jump should be larger than half range");
+  // Replace the default RTP generator with one that jumps in timestamp.
+  ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
+                                                        kStartSeqeunceNumber,
+                                                        kStartTimestamp,
+                                                        kJumpFromTimestamp,
+                                                        kJumpToTimestamp));
+
+  RunTest(130);  // Run 130 laps @ 10 ms each in the test loop.
+  EXPECT_EQ(kRecovered, test_state_);
+}
+
+TEST_F(LargeTimestampJumpTest, JumpLongerThanHalfRangeAndWrap) {
+  // Make a jump larger than half the 32-bit timestamp range. Set the start
+  // timestamp such that the jump will result in a wrap around.
+  static const uint16_t kStartSeqeunceNumber = 42076;
+  // Set the jump length slightly larger than 2^31.
+  static const uint32_t kStartTimestamp = 3221223116;
+  static const uint32_t kJumpFromTimestamp = 3221223216;
+  static const uint32_t kJumpToTimestamp = 1073744278;
+  static_assert(kJumpToTimestamp < kJumpFromTimestamp,
+                "timestamp jump should result in wrap");
+  static_assert(
+      static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) > 0x7FFFFFFF,
+      "jump should be larger than half range");
+  // Replace the default RTP generator with one that jumps in timestamp.
+  ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
+                                                        kStartSeqeunceNumber,
+                                                        kStartTimestamp,
+                                                        kJumpFromTimestamp,
+                                                        kJumpToTimestamp));
+
+  RunTest(130);  // Run 130 laps @ 10 ms each in the test loop.
+  EXPECT_EQ(kRecovered, test_state_);
+}
+
+class ShortTimestampJumpTest : public LargeTimestampJumpTest {
+ protected:
+  void UpdateState(AudioFrame::SpeechType output_type) override {
+    switch (test_state_) {
+      case kInitialPhase: {
+        if (output_type == AudioFrame::kNormalSpeech) {
+          test_state_ = kNormalPhase;
+        }
+        break;
+      }
+      case kNormalPhase: {
+        if (output_type == AudioFrame::kPLC) {
+          test_state_ = kExpandPhase;
+        }
+        break;
+      }
+      case kExpandPhase: {
+        if (output_type == AudioFrame::kNormalSpeech) {
+          test_state_ = kRecovered;
+        }
+        break;
+      }
+      case kRecovered: {
+        break;
+      }
+      default: { FAIL(); }
+    }
+  }
+
+  int NumExpectedDecodeCalls(int num_loops) override {
+    // Some packets won't be decoded because of the timestamp jump.
+    return num_loops - 2;
+  }
+};
+
+TEST_F(ShortTimestampJumpTest, JumpShorterThanHalfRange) {
+  // Make a jump shorter than half the 32-bit timestamp range. Set the start
+  // timestamp such that the jump will not result in a wrap around.
+  static const uint16_t kStartSeqeunceNumber = 42076;
+  // Set the jump length slightly smaller than 2^31.
+  static const uint32_t kStartTimestamp = 4711;
+  static const uint32_t kJumpFromTimestamp = 4811;
+  static const uint32_t kJumpToTimestamp = 2147483747;
+  static_assert(kJumpFromTimestamp < kJumpToTimestamp,
+                "timestamp jump should not result in wrap");
+  static_assert(
+      static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) < 0x7FFFFFFF,
+      "jump should be smaller than half range");
+  // Replace the default RTP generator with one that jumps in timestamp.
+  ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
+                                                        kStartSeqeunceNumber,
+                                                        kStartTimestamp,
+                                                        kJumpFromTimestamp,
+                                                        kJumpToTimestamp));
+
+  RunTest(130);  // Run 130 laps @ 10 ms each in the test loop.
+  EXPECT_EQ(kRecovered, test_state_);
+}
+
+TEST_F(ShortTimestampJumpTest, JumpShorterThanHalfRangeAndWrap) {
+  // Make a jump shorter than half the 32-bit timestamp range. Set the start
+  // timestamp such that the jump will result in a wrap around.
+  static const uint16_t kStartSeqeunceNumber = 42076;
+  // Set the jump length slightly smaller than 2^31.
+  static const uint32_t kStartTimestamp = 3221227827;
+  static const uint32_t kJumpFromTimestamp = 3221227927;
+  static const uint32_t kJumpToTimestamp = 1073739567;
+  static_assert(kJumpToTimestamp < kJumpFromTimestamp,
+                "timestamp jump should result in wrap");
+  static_assert(
+      static_cast<uint32_t>(kJumpToTimestamp - kJumpFromTimestamp) < 0x7FFFFFFF,
+      "jump should be smaller than half range");
+  // Replace the default RTP generator with one that jumps in timestamp.
+  ResetRtpGenerator(new test::TimestampJumpRtpGenerator(samples_per_ms(),
+                                                        kStartSeqeunceNumber,
+                                                        kStartTimestamp,
+                                                        kJumpFromTimestamp,
+                                                        kJumpToTimestamp));
+
+  RunTest(130);  // Run 130 laps @ 10 ms each in the test loop.
+  EXPECT_EQ(kRecovered, test_state_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc
new file mode 100644
index 0000000..b107626
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_impl.cc
@@ -0,0 +1,2138 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/neteq_impl.h"
+
+#include <assert.h>
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/accelerate.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/buffer_level_filter.h"
+#include "modules/audio_coding/neteq/comfort_noise.h"
+#include "modules/audio_coding/neteq/decision_logic.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/defines.h"
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "modules/audio_coding/neteq/delay_peak_detector.h"
+#include "modules/audio_coding/neteq/dtmf_buffer.h"
+#include "modules/audio_coding/neteq/dtmf_tone_generator.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/merge.h"
+#include "modules/audio_coding/neteq/nack_tracker.h"
+#include "modules/audio_coding/neteq/normal.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/sanitizer.h"
+#include "rtc_base/system/fallthrough.h"
+#include "rtc_base/trace_event.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+NetEqImpl::Dependencies::Dependencies(
+    const NetEq::Config& config,
+    const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory)
+    : tick_timer(new TickTimer),
+      buffer_level_filter(new BufferLevelFilter),
+      decoder_database(new DecoderDatabase(decoder_factory)),
+      delay_peak_detector(new DelayPeakDetector(tick_timer.get())),
+      delay_manager(new DelayManager(config.max_packets_in_buffer,
+                                     delay_peak_detector.get(),
+                                     tick_timer.get())),
+      dtmf_buffer(new DtmfBuffer(config.sample_rate_hz)),
+      dtmf_tone_generator(new DtmfToneGenerator),
+      packet_buffer(
+          new PacketBuffer(config.max_packets_in_buffer, tick_timer.get())),
+      red_payload_splitter(new RedPayloadSplitter),
+      timestamp_scaler(new TimestampScaler(*decoder_database)),
+      accelerate_factory(new AccelerateFactory),
+      expand_factory(new ExpandFactory),
+      preemptive_expand_factory(new PreemptiveExpandFactory) {}
+
+NetEqImpl::Dependencies::~Dependencies() = default;
+
+NetEqImpl::NetEqImpl(const NetEq::Config& config,
+                     Dependencies&& deps,
+                     bool create_components)
+    : tick_timer_(std::move(deps.tick_timer)),
+      buffer_level_filter_(std::move(deps.buffer_level_filter)),
+      decoder_database_(std::move(deps.decoder_database)),
+      delay_manager_(std::move(deps.delay_manager)),
+      delay_peak_detector_(std::move(deps.delay_peak_detector)),
+      dtmf_buffer_(std::move(deps.dtmf_buffer)),
+      dtmf_tone_generator_(std::move(deps.dtmf_tone_generator)),
+      packet_buffer_(std::move(deps.packet_buffer)),
+      red_payload_splitter_(std::move(deps.red_payload_splitter)),
+      timestamp_scaler_(std::move(deps.timestamp_scaler)),
+      vad_(new PostDecodeVad()),
+      expand_factory_(std::move(deps.expand_factory)),
+      accelerate_factory_(std::move(deps.accelerate_factory)),
+      preemptive_expand_factory_(std::move(deps.preemptive_expand_factory)),
+      last_mode_(kModeNormal),
+      decoded_buffer_length_(kMaxFrameSize),
+      decoded_buffer_(new int16_t[decoded_buffer_length_]),
+      playout_timestamp_(0),
+      new_codec_(false),
+      timestamp_(0),
+      reset_decoder_(false),
+      ssrc_(0),
+      first_packet_(true),
+      background_noise_mode_(config.background_noise_mode),
+      playout_mode_(config.playout_mode),
+      enable_fast_accelerate_(config.enable_fast_accelerate),
+      nack_enabled_(false),
+      enable_muted_state_(config.enable_muted_state) {
+  RTC_LOG(LS_INFO) << "NetEq config: " << config.ToString();
+  int fs = config.sample_rate_hz;
+  if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) {
+    RTC_LOG(LS_ERROR) << "Sample rate " << fs << " Hz not supported. "
+                      << "Changing to 8000 Hz.";
+    fs = 8000;
+  }
+  delay_manager_->SetMaximumDelay(config.max_delay_ms);
+  fs_hz_ = fs;
+  fs_mult_ = fs / 8000;
+  last_output_sample_rate_hz_ = fs;
+  output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
+  decoder_frame_length_ = 3 * output_size_samples_;
+  WebRtcSpl_Init();
+  if (create_components) {
+    SetSampleRateAndChannels(fs, 1);  // Default is 1 channel.
+  }
+  RTC_DCHECK(!vad_->enabled());
+  if (config.enable_post_decode_vad) {
+    vad_->Enable();
+  }
+}
+
+NetEqImpl::~NetEqImpl() = default;
+
+int NetEqImpl::InsertPacket(const RTPHeader& rtp_header,
+                            rtc::ArrayView<const uint8_t> payload,
+                            uint32_t receive_timestamp) {
+  rtc::MsanCheckInitialized(payload);
+  TRACE_EVENT0("webrtc", "NetEqImpl::InsertPacket");
+  rtc::CritScope lock(&crit_sect_);
+  if (InsertPacketInternal(rtp_header, payload, receive_timestamp) != 0) {
+    return kFail;
+  }
+  return kOK;
+}
+
+void NetEqImpl::InsertEmptyPacket(const RTPHeader& /*rtp_header*/) {
+  // TODO(henrik.lundin) Handle NACK as well. This will make use of the
+  // rtp_header parameter.
+  // https://bugs.chromium.org/p/webrtc/issues/detail?id=7611
+  rtc::CritScope lock(&crit_sect_);
+  delay_manager_->RegisterEmptyPacket();
+}
+
+namespace {
+void SetAudioFrameActivityAndType(bool vad_enabled,
+                                  NetEqImpl::OutputType type,
+                                  AudioFrame::VADActivity last_vad_activity,
+                                  AudioFrame* audio_frame) {
+  switch (type) {
+    case NetEqImpl::OutputType::kNormalSpeech: {
+      audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+      audio_frame->vad_activity_ = AudioFrame::kVadActive;
+      break;
+    }
+    case NetEqImpl::OutputType::kVadPassive: {
+      // This should only be reached if the VAD is enabled.
+      RTC_DCHECK(vad_enabled);
+      audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+      audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+      break;
+    }
+    case NetEqImpl::OutputType::kCNG: {
+      audio_frame->speech_type_ = AudioFrame::kCNG;
+      audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+      break;
+    }
+    case NetEqImpl::OutputType::kPLC: {
+      audio_frame->speech_type_ = AudioFrame::kPLC;
+      audio_frame->vad_activity_ = last_vad_activity;
+      break;
+    }
+    case NetEqImpl::OutputType::kPLCCNG: {
+      audio_frame->speech_type_ = AudioFrame::kPLCCNG;
+      audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+      break;
+    }
+    default:
+      RTC_NOTREACHED();
+  }
+  if (!vad_enabled) {
+    // Always set kVadUnknown when receive VAD is inactive.
+    audio_frame->vad_activity_ = AudioFrame::kVadUnknown;
+  }
+}
+}  // namespace
+
+int NetEqImpl::GetAudio(AudioFrame* audio_frame, bool* muted) {
+  TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
+  rtc::CritScope lock(&crit_sect_);
+  if (GetAudioInternal(audio_frame, muted) != 0) {
+    return kFail;
+  }
+  RTC_DCHECK_EQ(
+      audio_frame->sample_rate_hz_,
+      rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
+  RTC_DCHECK_EQ(*muted, audio_frame->muted());
+  SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(),
+                               last_vad_activity_, audio_frame);
+  last_vad_activity_ = audio_frame->vad_activity_;
+  last_output_sample_rate_hz_ = audio_frame->sample_rate_hz_;
+  RTC_DCHECK(last_output_sample_rate_hz_ == 8000 ||
+             last_output_sample_rate_hz_ == 16000 ||
+             last_output_sample_rate_hz_ == 32000 ||
+             last_output_sample_rate_hz_ == 48000)
+      << "Unexpected sample rate " << last_output_sample_rate_hz_;
+  return kOK;
+}
+
+void NetEqImpl::SetCodecs(const std::map<int, SdpAudioFormat>& codecs) {
+  rtc::CritScope lock(&crit_sect_);
+  const std::vector<int> changed_payload_types =
+      decoder_database_->SetCodecs(codecs);
+  for (const int pt : changed_payload_types) {
+    packet_buffer_->DiscardPacketsWithPayloadType(pt, &stats_);
+  }
+}
+
+int NetEqImpl::RegisterPayloadType(NetEqDecoder codec,
+                                   const std::string& name,
+                                   uint8_t rtp_payload_type) {
+  rtc::CritScope lock(&crit_sect_);
+  RTC_LOG(LS_VERBOSE) << "RegisterPayloadType "
+                      << static_cast<int>(rtp_payload_type) << " "
+                      << static_cast<int>(codec);
+  if (decoder_database_->RegisterPayload(rtp_payload_type, codec, name) !=
+      DecoderDatabase::kOK) {
+    return kFail;
+  }
+  return kOK;
+}
+
+int NetEqImpl::RegisterExternalDecoder(AudioDecoder* decoder,
+                                       NetEqDecoder codec,
+                                       const std::string& codec_name,
+                                       uint8_t rtp_payload_type) {
+  rtc::CritScope lock(&crit_sect_);
+  RTC_LOG(LS_VERBOSE) << "RegisterExternalDecoder "
+                      << static_cast<int>(rtp_payload_type) << " "
+                      << static_cast<int>(codec);
+  if (!decoder) {
+    RTC_LOG(LS_ERROR) << "Cannot register external decoder with NULL pointer";
+    assert(false);
+    return kFail;
+  }
+  if (decoder_database_->InsertExternal(rtp_payload_type, codec, codec_name,
+                                        decoder) != DecoderDatabase::kOK) {
+    return kFail;
+  }
+  return kOK;
+}
+
+bool NetEqImpl::RegisterPayloadType(int rtp_payload_type,
+                                    const SdpAudioFormat& audio_format) {
+  RTC_LOG(LS_VERBOSE) << "NetEqImpl::RegisterPayloadType: payload type "
+                      << rtp_payload_type << ", codec " << audio_format;
+  rtc::CritScope lock(&crit_sect_);
+  return decoder_database_->RegisterPayload(rtp_payload_type, audio_format) ==
+         DecoderDatabase::kOK;
+}
+
+int NetEqImpl::RemovePayloadType(uint8_t rtp_payload_type) {
+  rtc::CritScope lock(&crit_sect_);
+  int ret = decoder_database_->Remove(rtp_payload_type);
+  if (ret == DecoderDatabase::kOK || ret == DecoderDatabase::kDecoderNotFound) {
+    packet_buffer_->DiscardPacketsWithPayloadType(rtp_payload_type, &stats_);
+    return kOK;
+  }
+  return kFail;
+}
+
+void NetEqImpl::RemoveAllPayloadTypes() {
+  rtc::CritScope lock(&crit_sect_);
+  decoder_database_->RemoveAll();
+}
+
+bool NetEqImpl::SetMinimumDelay(int delay_ms) {
+  rtc::CritScope lock(&crit_sect_);
+  if (delay_ms >= 0 && delay_ms <= 10000) {
+    assert(delay_manager_.get());
+    return delay_manager_->SetMinimumDelay(delay_ms);
+  }
+  return false;
+}
+
+bool NetEqImpl::SetMaximumDelay(int delay_ms) {
+  rtc::CritScope lock(&crit_sect_);
+  if (delay_ms >= 0 && delay_ms <= 10000) {
+    assert(delay_manager_.get());
+    return delay_manager_->SetMaximumDelay(delay_ms);
+  }
+  return false;
+}
+
+int NetEqImpl::LeastRequiredDelayMs() const {
+  rtc::CritScope lock(&crit_sect_);
+  assert(delay_manager_.get());
+  return delay_manager_->least_required_delay_ms();
+}
+
+int NetEqImpl::SetTargetDelay() {
+  return kNotImplemented;
+}
+
+int NetEqImpl::TargetDelayMs() const {
+  rtc::CritScope lock(&crit_sect_);
+  RTC_DCHECK(delay_manager_.get());
+  // The value from TargetLevel() is in number of packets, represented in Q8.
+  const size_t target_delay_samples =
+      (delay_manager_->TargetLevel() * decoder_frame_length_) >> 8;
+  return static_cast<int>(target_delay_samples) /
+         rtc::CheckedDivExact(fs_hz_, 1000);
+}
+
+int NetEqImpl::CurrentDelayMs() const {
+  rtc::CritScope lock(&crit_sect_);
+  if (fs_hz_ == 0)
+    return 0;
+  // Sum up the samples in the packet buffer with the future length of the sync
+  // buffer, and divide the sum by the sample rate.
+  const size_t delay_samples =
+      packet_buffer_->NumSamplesInBuffer(decoder_frame_length_) +
+      sync_buffer_->FutureLength();
+  // The division below will truncate.
+  const int delay_ms =
+      static_cast<int>(delay_samples) / rtc::CheckedDivExact(fs_hz_, 1000);
+  return delay_ms;
+}
+
+int NetEqImpl::FilteredCurrentDelayMs() const {
+  rtc::CritScope lock(&crit_sect_);
+  // Calculate the filtered packet buffer level in samples. The value from
+  // |buffer_level_filter_| is in number of packets, represented in Q8.
+  const size_t packet_buffer_samples =
+      (buffer_level_filter_->filtered_current_level() *
+       decoder_frame_length_) >>
+      8;
+  // Sum up the filtered packet buffer level with the future length of the sync
+  // buffer, and divide the sum by the sample rate.
+  const size_t delay_samples =
+      packet_buffer_samples + sync_buffer_->FutureLength();
+  // The division below will truncate. The return value is in ms.
+  return static_cast<int>(delay_samples) / rtc::CheckedDivExact(fs_hz_, 1000);
+}
+
+// Deprecated.
+// TODO(henrik.lundin) Delete.
+void NetEqImpl::SetPlayoutMode(NetEqPlayoutMode mode) {
+  rtc::CritScope lock(&crit_sect_);
+  if (mode != playout_mode_) {
+    playout_mode_ = mode;
+    CreateDecisionLogic();
+  }
+}
+
+// Deprecated.
+// TODO(henrik.lundin) Delete.
+NetEqPlayoutMode NetEqImpl::PlayoutMode() const {
+  rtc::CritScope lock(&crit_sect_);
+  return playout_mode_;
+}
+
+int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
+  rtc::CritScope lock(&crit_sect_);
+  assert(decoder_database_.get());
+  const size_t total_samples_in_buffers =
+      packet_buffer_->NumSamplesInBuffer(decoder_frame_length_) +
+      sync_buffer_->FutureLength();
+  assert(delay_manager_.get());
+  assert(decision_logic_.get());
+  const int ms_per_packet = rtc::dchecked_cast<int>(
+      decision_logic_->packet_length_samples() / (fs_hz_ / 1000));
+  stats_.PopulateDelayManagerStats(ms_per_packet, *delay_manager_.get(), stats);
+  stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers,
+                              decoder_frame_length_, stats);
+  return 0;
+}
+
+NetEqLifetimeStatistics NetEqImpl::GetLifetimeStatistics() const {
+  rtc::CritScope lock(&crit_sect_);
+  return stats_.GetLifetimeStatistics();
+}
+
+void NetEqImpl::GetRtcpStatistics(RtcpStatistics* stats) {
+  rtc::CritScope lock(&crit_sect_);
+  if (stats) {
+    rtcp_.GetStatistics(false, stats);
+  }
+}
+
+void NetEqImpl::GetRtcpStatisticsNoReset(RtcpStatistics* stats) {
+  rtc::CritScope lock(&crit_sect_);
+  if (stats) {
+    rtcp_.GetStatistics(true, stats);
+  }
+}
+
+void NetEqImpl::EnableVad() {
+  rtc::CritScope lock(&crit_sect_);
+  assert(vad_.get());
+  vad_->Enable();
+}
+
+void NetEqImpl::DisableVad() {
+  rtc::CritScope lock(&crit_sect_);
+  assert(vad_.get());
+  vad_->Disable();
+}
+
+rtc::Optional<uint32_t> NetEqImpl::GetPlayoutTimestamp() const {
+  rtc::CritScope lock(&crit_sect_);
+  if (first_packet_ || last_mode_ == kModeRfc3389Cng ||
+      last_mode_ == kModeCodecInternalCng) {
+    // We don't have a valid RTP timestamp until we have decoded our first
+    // RTP packet. Also, the RTP timestamp is not accurate while playing CNG,
+    // which is indicated by returning an empty value.
+    return rtc::nullopt;
+  }
+  return timestamp_scaler_->ToExternal(playout_timestamp_);
+}
+
+int NetEqImpl::last_output_sample_rate_hz() const {
+  rtc::CritScope lock(&crit_sect_);
+  return last_output_sample_rate_hz_;
+}
+
+rtc::Optional<CodecInst> NetEqImpl::GetDecoder(int payload_type) const {
+  rtc::CritScope lock(&crit_sect_);
+  const DecoderDatabase::DecoderInfo* di =
+      decoder_database_->GetDecoderInfo(payload_type);
+  if (!di) {
+    return rtc::nullopt;
+  }
+
+  // Create a CodecInst with some fields set. The remaining fields are zeroed,
+  // but we tell MSan to consider them uninitialized.
+  CodecInst ci = {0};
+  rtc::MsanMarkUninitialized(rtc::MakeArrayView(&ci, 1));
+  ci.pltype = payload_type;
+  std::strncpy(ci.plname, di->get_name().c_str(), sizeof(ci.plname));
+  ci.plname[sizeof(ci.plname) - 1] = '\0';
+  ci.plfreq = di->IsRed() ? 8000 : di->SampleRateHz();
+  AudioDecoder* const decoder = di->GetDecoder();
+  ci.channels = decoder ? decoder->Channels() : 1;
+  return ci;
+}
+
+rtc::Optional<SdpAudioFormat> NetEqImpl::GetDecoderFormat(
+    int payload_type) const {
+  rtc::CritScope lock(&crit_sect_);
+  const DecoderDatabase::DecoderInfo* const di =
+      decoder_database_->GetDecoderInfo(payload_type);
+  if (!di) {
+    return rtc::nullopt;  // Payload type not registered.
+  }
+  return di->GetFormat();
+}
+
+int NetEqImpl::SetTargetNumberOfChannels() {
+  return kNotImplemented;
+}
+
+int NetEqImpl::SetTargetSampleRate() {
+  return kNotImplemented;
+}
+
+void NetEqImpl::FlushBuffers() {
+  rtc::CritScope lock(&crit_sect_);
+  RTC_LOG(LS_VERBOSE) << "FlushBuffers";
+  packet_buffer_->Flush();
+  assert(sync_buffer_.get());
+  assert(expand_.get());
+  sync_buffer_->Flush();
+  sync_buffer_->set_next_index(sync_buffer_->next_index() -
+                               expand_->overlap_length());
+  // Set to wait for new codec.
+  first_packet_ = true;
+}
+
+void NetEqImpl::PacketBufferStatistics(int* current_num_packets,
+                                       int* max_num_packets) const {
+  rtc::CritScope lock(&crit_sect_);
+  packet_buffer_->BufferStat(current_num_packets, max_num_packets);
+}
+
+void NetEqImpl::EnableNack(size_t max_nack_list_size) {
+  rtc::CritScope lock(&crit_sect_);
+  if (!nack_enabled_) {
+    const int kNackThresholdPackets = 2;
+    nack_.reset(NackTracker::Create(kNackThresholdPackets));
+    nack_enabled_ = true;
+    nack_->UpdateSampleRate(fs_hz_);
+  }
+  nack_->SetMaxNackListSize(max_nack_list_size);
+}
+
+void NetEqImpl::DisableNack() {
+  rtc::CritScope lock(&crit_sect_);
+  nack_.reset();
+  nack_enabled_ = false;
+}
+
+std::vector<uint16_t> NetEqImpl::GetNackList(int64_t round_trip_time_ms) const {
+  rtc::CritScope lock(&crit_sect_);
+  if (!nack_enabled_) {
+    return std::vector<uint16_t>();
+  }
+  RTC_DCHECK(nack_.get());
+  return nack_->GetNackList(round_trip_time_ms);
+}
+
+std::vector<uint32_t> NetEqImpl::LastDecodedTimestamps() const {
+  rtc::CritScope lock(&crit_sect_);
+  return last_decoded_timestamps_;
+}
+
+int NetEqImpl::SyncBufferSizeMs() const {
+  rtc::CritScope lock(&crit_sect_);
+  return rtc::dchecked_cast<int>(sync_buffer_->FutureLength() /
+                                 rtc::CheckedDivExact(fs_hz_, 1000));
+}
+
+const SyncBuffer* NetEqImpl::sync_buffer_for_test() const {
+  rtc::CritScope lock(&crit_sect_);
+  return sync_buffer_.get();
+}
+
+Operations NetEqImpl::last_operation_for_test() const {
+  rtc::CritScope lock(&crit_sect_);
+  return last_operation_;
+}
+
+// Methods below this line are private.
+
+int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
+                                    rtc::ArrayView<const uint8_t> payload,
+                                    uint32_t receive_timestamp) {
+  if (payload.empty()) {
+    RTC_LOG_F(LS_ERROR) << "payload is empty";
+    return kInvalidPointer;
+  }
+
+  PacketList packet_list;
+  // Insert packet in a packet list.
+  packet_list.push_back([&rtp_header, &payload] {
+    // Convert to Packet.
+    Packet packet;
+    packet.payload_type = rtp_header.payloadType;
+    packet.sequence_number = rtp_header.sequenceNumber;
+    packet.timestamp = rtp_header.timestamp;
+    packet.payload.SetData(payload.data(), payload.size());
+    // Waiting time will be set upon inserting the packet in the buffer.
+    RTC_DCHECK(!packet.waiting_time);
+    return packet;
+  }());
+
+  bool update_sample_rate_and_channels =
+      first_packet_ || (rtp_header.ssrc != ssrc_);
+
+  if (update_sample_rate_and_channels) {
+    // Reset timestamp scaling.
+    timestamp_scaler_->Reset();
+  }
+
+  if (!decoder_database_->IsRed(rtp_header.payloadType)) {
+    // Scale timestamp to internal domain (only for some codecs).
+    timestamp_scaler_->ToInternal(&packet_list);
+  }
+
+  // Store these for later use, since the first packet may very well disappear
+  // before we need these values.
+  uint32_t main_timestamp = packet_list.front().timestamp;
+  uint8_t main_payload_type = packet_list.front().payload_type;
+  uint16_t main_sequence_number = packet_list.front().sequence_number;
+
+  // Reinitialize NetEq if it's needed (changed SSRC or first call).
+  if (update_sample_rate_and_channels) {
+    // Note: |first_packet_| will be cleared further down in this method, once
+    // the packet has been successfully inserted into the packet buffer.
+
+    rtcp_.Init(rtp_header.sequenceNumber);
+
+    // Flush the packet buffer and DTMF buffer.
+    packet_buffer_->Flush();
+    dtmf_buffer_->Flush();
+
+    // Store new SSRC.
+    ssrc_ = rtp_header.ssrc;
+
+    // Update audio buffer timestamp.
+    sync_buffer_->IncreaseEndTimestamp(main_timestamp - timestamp_);
+
+    // Update codecs.
+    timestamp_ = main_timestamp;
+  }
+
+  // Update RTCP statistics, only for regular packets.
+  rtcp_.Update(rtp_header, receive_timestamp);
+
+  if (nack_enabled_) {
+    RTC_DCHECK(nack_);
+    if (update_sample_rate_and_channels) {
+      nack_->Reset();
+    }
+    nack_->UpdateLastReceivedPacket(rtp_header.sequenceNumber,
+                                    rtp_header.timestamp);
+  }
+
+  // Check for RED payload type, and separate payloads into several packets.
+  if (decoder_database_->IsRed(rtp_header.payloadType)) {
+    if (!red_payload_splitter_->SplitRed(&packet_list)) {
+      return kRedundancySplitError;
+    }
+    // Only accept a few RED payloads of the same type as the main data,
+    // DTMF events and CNG.
+    red_payload_splitter_->CheckRedPayloads(&packet_list, *decoder_database_);
+  }
+
+  // Check payload types.
+  if (decoder_database_->CheckPayloadTypes(packet_list) ==
+      DecoderDatabase::kDecoderNotFound) {
+    return kUnknownRtpPayloadType;
+  }
+
+  RTC_DCHECK(!packet_list.empty());
+
+  // Update main_timestamp, if new packets appear in the list
+  // after RED splitting.
+  if (decoder_database_->IsRed(rtp_header.payloadType)) {
+    timestamp_scaler_->ToInternal(&packet_list);
+    main_timestamp = packet_list.front().timestamp;
+    main_payload_type = packet_list.front().payload_type;
+    main_sequence_number = packet_list.front().sequence_number;
+  }
+
+  // Process DTMF payloads. Cycle through the list of packets, and pick out any
+  // DTMF payloads found.
+  PacketList::iterator it = packet_list.begin();
+  while (it != packet_list.end()) {
+    const Packet& current_packet = (*it);
+    RTC_DCHECK(!current_packet.payload.empty());
+    if (decoder_database_->IsDtmf(current_packet.payload_type)) {
+      DtmfEvent event;
+      int ret = DtmfBuffer::ParseEvent(current_packet.timestamp,
+                                       current_packet.payload.data(),
+                                       current_packet.payload.size(), &event);
+      if (ret != DtmfBuffer::kOK) {
+        return kDtmfParsingError;
+      }
+      if (dtmf_buffer_->InsertEvent(event) != DtmfBuffer::kOK) {
+        return kDtmfInsertError;
+      }
+      it = packet_list.erase(it);
+    } else {
+      ++it;
+    }
+  }
+
+  // Update bandwidth estimate, if the packet is not comfort noise.
+  if (!packet_list.empty() &&
+      !decoder_database_->IsComfortNoise(main_payload_type)) {
+    // The list can be empty here if we got nothing but DTMF payloads.
+    AudioDecoder* decoder = decoder_database_->GetDecoder(main_payload_type);
+    RTC_DCHECK(decoder);  // Should always get a valid object, since we have
+                          // already checked that the payload types are known.
+    decoder->IncomingPacket(packet_list.front().payload.data(),
+                            packet_list.front().payload.size(),
+                            packet_list.front().sequence_number,
+                            packet_list.front().timestamp,
+                            receive_timestamp);
+  }
+
+  PacketList parsed_packet_list;
+  while (!packet_list.empty()) {
+    Packet& packet = packet_list.front();
+    const DecoderDatabase::DecoderInfo* info =
+        decoder_database_->GetDecoderInfo(packet.payload_type);
+    if (!info) {
+      RTC_LOG(LS_WARNING) << "SplitAudio unknown payload type";
+      return kUnknownRtpPayloadType;
+    }
+
+    if (info->IsComfortNoise()) {
+      // Carry comfort noise packets along.
+      parsed_packet_list.splice(parsed_packet_list.end(), packet_list,
+                                packet_list.begin());
+    } else {
+      const auto sequence_number = packet.sequence_number;
+      const auto payload_type = packet.payload_type;
+      const Packet::Priority original_priority = packet.priority;
+      auto packet_from_result = [&] (AudioDecoder::ParseResult& result) {
+        Packet new_packet;
+        new_packet.sequence_number = sequence_number;
+        new_packet.payload_type = payload_type;
+        new_packet.timestamp = result.timestamp;
+        new_packet.priority.codec_level = result.priority;
+        new_packet.priority.red_level = original_priority.red_level;
+        new_packet.frame = std::move(result.frame);
+        return new_packet;
+      };
+
+      std::vector<AudioDecoder::ParseResult> results =
+          info->GetDecoder()->ParsePayload(std::move(packet.payload),
+                                           packet.timestamp);
+      if (results.empty()) {
+        packet_list.pop_front();
+      } else {
+        bool first = true;
+        for (auto& result : results) {
+          RTC_DCHECK(result.frame);
+          RTC_DCHECK_GE(result.priority, 0);
+          if (first) {
+            // Re-use the node and move it to parsed_packet_list.
+            packet_list.front() = packet_from_result(result);
+            parsed_packet_list.splice(parsed_packet_list.end(), packet_list,
+                                      packet_list.begin());
+            first = false;
+          } else {
+            parsed_packet_list.push_back(packet_from_result(result));
+          }
+        }
+      }
+    }
+  }
+
+  // Calculate the number of primary (non-FEC/RED) packets.
+  const int number_of_primary_packets = std::count_if(
+      parsed_packet_list.begin(), parsed_packet_list.end(),
+      [](const Packet& in) { return in.priority.codec_level == 0; });
+
+  // Insert packets in buffer.
+  const int ret = packet_buffer_->InsertPacketList(
+      &parsed_packet_list, *decoder_database_, &current_rtp_payload_type_,
+      &current_cng_rtp_payload_type_, &stats_);
+  if (ret == PacketBuffer::kFlushed) {
+    // Reset DSP timestamp etc. if packet buffer flushed.
+    new_codec_ = true;
+    update_sample_rate_and_channels = true;
+  } else if (ret != PacketBuffer::kOK) {
+    return kOtherError;
+  }
+
+  if (first_packet_) {
+    first_packet_ = false;
+    // Update the codec on the next GetAudio call.
+    new_codec_ = true;
+  }
+
+  if (current_rtp_payload_type_) {
+    RTC_DCHECK(decoder_database_->GetDecoderInfo(*current_rtp_payload_type_))
+        << "Payload type " << static_cast<int>(*current_rtp_payload_type_)
+        << " is unknown where it shouldn't be";
+  }
+
+  if (update_sample_rate_and_channels && !packet_buffer_->Empty()) {
+    // We do not use |current_rtp_payload_type_| to |set payload_type|, but
+    // get the next RTP header from |packet_buffer_| to obtain the payload type.
+    // The reason for it is the following corner case. If NetEq receives a
+    // CNG packet with a sample rate different than the current CNG then it
+    // flushes its buffer, assuming send codec must have been changed. However,
+    // payload type of the hypothetically new send codec is not known.
+    const Packet* next_packet = packet_buffer_->PeekNextPacket();
+    RTC_DCHECK(next_packet);
+    const int payload_type = next_packet->payload_type;
+    size_t channels = 1;
+    if (!decoder_database_->IsComfortNoise(payload_type)) {
+      AudioDecoder* decoder = decoder_database_->GetDecoder(payload_type);
+      assert(decoder);  // Payloads are already checked to be valid.
+      channels = decoder->Channels();
+    }
+    const DecoderDatabase::DecoderInfo* decoder_info =
+        decoder_database_->GetDecoderInfo(payload_type);
+    assert(decoder_info);
+    if (decoder_info->SampleRateHz() != fs_hz_ ||
+        channels != algorithm_buffer_->Channels()) {
+      SetSampleRateAndChannels(decoder_info->SampleRateHz(),
+                               channels);
+    }
+    if (nack_enabled_) {
+      RTC_DCHECK(nack_);
+      // Update the sample rate even if the rate is not new, because of Reset().
+      nack_->UpdateSampleRate(fs_hz_);
+    }
+  }
+
+  // TODO(hlundin): Move this code to DelayManager class.
+  const DecoderDatabase::DecoderInfo* dec_info =
+      decoder_database_->GetDecoderInfo(main_payload_type);
+  assert(dec_info);  // Already checked that the payload type is known.
+  delay_manager_->LastDecodedWasCngOrDtmf(dec_info->IsComfortNoise() ||
+                                          dec_info->IsDtmf());
+  if (delay_manager_->last_pack_cng_or_dtmf() == 0) {
+    // Calculate the total speech length carried in each packet.
+    if (number_of_primary_packets > 0) {
+      const size_t packet_length_samples =
+          number_of_primary_packets * decoder_frame_length_;
+      if (packet_length_samples != decision_logic_->packet_length_samples()) {
+        decision_logic_->set_packet_length_samples(packet_length_samples);
+        delay_manager_->SetPacketAudioLength(
+            rtc::dchecked_cast<int>((1000 * packet_length_samples) / fs_hz_));
+      }
+    }
+
+    // Update statistics.
+    if ((int32_t)(main_timestamp - timestamp_) >= 0 && !new_codec_) {
+      // Only update statistics if incoming packet is not older than last played
+      // out packet, and if new codec flag is not set.
+      delay_manager_->Update(main_sequence_number, main_timestamp, fs_hz_);
+    }
+  } else if (delay_manager_->last_pack_cng_or_dtmf() == -1) {
+    // This is first "normal" packet after CNG or DTMF.
+    // Reset packet time counter and measure time until next packet,
+    // but don't update statistics.
+    delay_manager_->set_last_pack_cng_or_dtmf(0);
+    delay_manager_->ResetPacketIatCount();
+  }
+  return 0;
+}
+
+int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
+  PacketList packet_list;
+  DtmfEvent dtmf_event;
+  Operations operation;
+  bool play_dtmf;
+  *muted = false;
+  last_decoded_timestamps_.clear();
+  tick_timer_->Increment();
+  stats_.IncreaseCounter(output_size_samples_, fs_hz_);
+
+  // Check for muted state.
+  if (enable_muted_state_ && expand_->Muted() && packet_buffer_->Empty()) {
+    RTC_DCHECK_EQ(last_mode_, kModeExpand);
+    audio_frame->Reset();
+    RTC_DCHECK(audio_frame->muted());  // Reset() should mute the frame.
+    playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
+    audio_frame->sample_rate_hz_ = fs_hz_;
+    audio_frame->samples_per_channel_ = output_size_samples_;
+    audio_frame->timestamp_ =
+        first_packet_
+            ? 0
+            : timestamp_scaler_->ToExternal(playout_timestamp_) -
+                  static_cast<uint32_t>(audio_frame->samples_per_channel_);
+    audio_frame->num_channels_ = sync_buffer_->Channels();
+    stats_.ExpandedNoiseSamples(output_size_samples_, false);
+    *muted = true;
+    return 0;
+  }
+
+  int return_value = GetDecision(&operation, &packet_list, &dtmf_event,
+                                 &play_dtmf);
+  if (return_value != 0) {
+    last_mode_ = kModeError;
+    return return_value;
+  }
+
+  AudioDecoder::SpeechType speech_type;
+  int length = 0;
+  const size_t start_num_packets = packet_list.size();
+  int decode_return_value = Decode(&packet_list, &operation,
+                                   &length, &speech_type);
+
+  assert(vad_.get());
+  bool sid_frame_available =
+      (operation == kRfc3389Cng && !packet_list.empty());
+  vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
+               sid_frame_available, fs_hz_);
+
+  // This is the criterion that we did decode some data through the speech
+  // decoder, and the operation resulted in comfort noise.
+  const bool codec_internal_sid_frame =
+      (speech_type == AudioDecoder::kComfortNoise &&
+       start_num_packets > packet_list.size());
+
+  if (sid_frame_available || codec_internal_sid_frame) {
+    // Start a new stopwatch since we are decoding a new CNG packet.
+    generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+  }
+
+  algorithm_buffer_->Clear();
+  switch (operation) {
+    case kNormal: {
+      DoNormal(decoded_buffer_.get(), length, speech_type, play_dtmf);
+      break;
+    }
+    case kMerge: {
+      DoMerge(decoded_buffer_.get(), length, speech_type, play_dtmf);
+      break;
+    }
+    case kExpand: {
+      return_value = DoExpand(play_dtmf);
+      break;
+    }
+    case kAccelerate:
+    case kFastAccelerate: {
+      const bool fast_accelerate =
+          enable_fast_accelerate_ && (operation == kFastAccelerate);
+      return_value = DoAccelerate(decoded_buffer_.get(), length, speech_type,
+                                  play_dtmf, fast_accelerate);
+      break;
+    }
+    case kPreemptiveExpand: {
+      return_value = DoPreemptiveExpand(decoded_buffer_.get(), length,
+                                        speech_type, play_dtmf);
+      break;
+    }
+    case kRfc3389Cng:
+    case kRfc3389CngNoPacket: {
+      return_value = DoRfc3389Cng(&packet_list, play_dtmf);
+      break;
+    }
+    case kCodecInternalCng: {
+      // This handles the case when there is no transmission and the decoder
+      // should produce internal comfort noise.
+      // TODO(hlundin): Write test for codec-internal CNG.
+      DoCodecInternalCng(decoded_buffer_.get(), length);
+      break;
+    }
+    case kDtmf: {
+      // TODO(hlundin): Write test for this.
+      return_value = DoDtmf(dtmf_event, &play_dtmf);
+      break;
+    }
+    case kAlternativePlc: {
+      // TODO(hlundin): Write test for this.
+      DoAlternativePlc(false);
+      break;
+    }
+    case kAlternativePlcIncreaseTimestamp: {
+      // TODO(hlundin): Write test for this.
+      DoAlternativePlc(true);
+      break;
+    }
+    case kAudioRepetitionIncreaseTimestamp: {
+      // TODO(hlundin): Write test for this.
+      sync_buffer_->IncreaseEndTimestamp(
+          static_cast<uint32_t>(output_size_samples_));
+      // Skipping break on purpose. Execution should move on into the
+      // next case.
+      RTC_FALLTHROUGH();
+    }
+    case kAudioRepetition: {
+      // TODO(hlundin): Write test for this.
+      // Copy last |output_size_samples_| from |sync_buffer_| to
+      // |algorithm_buffer|.
+      algorithm_buffer_->PushBackFromIndex(
+          *sync_buffer_, sync_buffer_->Size() - output_size_samples_);
+      expand_->Reset();
+      break;
+    }
+    case kUndefined: {
+      RTC_LOG(LS_ERROR) << "Invalid operation kUndefined.";
+      assert(false);  // This should not happen.
+      last_mode_ = kModeError;
+      return kInvalidOperation;
+    }
+  }  // End of switch.
+  last_operation_ = operation;
+  if (return_value < 0) {
+    return return_value;
+  }
+
+  if (last_mode_ != kModeRfc3389Cng) {
+    comfort_noise_->Reset();
+  }
+
+  // Copy from |algorithm_buffer| to |sync_buffer_|.
+  sync_buffer_->PushBack(*algorithm_buffer_);
+
+  // Extract data from |sync_buffer_| to |output|.
+  size_t num_output_samples_per_channel = output_size_samples_;
+  size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels();
+  if (num_output_samples > AudioFrame::kMaxDataSizeSamples) {
+    RTC_LOG(LS_WARNING) << "Output array is too short. "
+                        << AudioFrame::kMaxDataSizeSamples << " < "
+                        << output_size_samples_ << " * "
+                        << sync_buffer_->Channels();
+    num_output_samples = AudioFrame::kMaxDataSizeSamples;
+    num_output_samples_per_channel =
+        AudioFrame::kMaxDataSizeSamples / sync_buffer_->Channels();
+  }
+  sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
+                                        audio_frame);
+  audio_frame->sample_rate_hz_ = fs_hz_;
+  if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
+    // The sync buffer should always contain |overlap_length| samples, but now
+    // too many samples have been extracted. Reinstall the |overlap_length|
+    // lookahead by moving the index.
+    const size_t missing_lookahead_samples =
+        expand_->overlap_length() - sync_buffer_->FutureLength();
+    RTC_DCHECK_GE(sync_buffer_->next_index(), missing_lookahead_samples);
+    sync_buffer_->set_next_index(sync_buffer_->next_index() -
+                                 missing_lookahead_samples);
+  }
+  if (audio_frame->samples_per_channel_ != output_size_samples_) {
+    RTC_LOG(LS_ERROR) << "audio_frame->samples_per_channel_ ("
+                      << audio_frame->samples_per_channel_
+                      << ") != output_size_samples_ (" << output_size_samples_
+                      << ")";
+    // TODO(minyue): treatment of under-run, filling zeros
+    audio_frame->Mute();
+    return kSampleUnderrun;
+  }
+
+  // Should always have overlap samples left in the |sync_buffer_|.
+  RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
+
+  // TODO(yujo): For muted frames, this can be a copy rather than an addition.
+  if (play_dtmf) {
+    return_value = DtmfOverdub(dtmf_event, sync_buffer_->Channels(),
+                               audio_frame->mutable_data());
+  }
+
+  // Update the background noise parameters if last operation wrote data
+  // straight from the decoder to the |sync_buffer_|. That is, none of the
+  // operations that modify the signal can be followed by a parameter update.
+  if ((last_mode_ == kModeNormal) ||
+      (last_mode_ == kModeAccelerateFail) ||
+      (last_mode_ == kModePreemptiveExpandFail) ||
+      (last_mode_ == kModeRfc3389Cng) ||
+      (last_mode_ == kModeCodecInternalCng)) {
+    background_noise_->Update(*sync_buffer_, *vad_.get());
+  }
+
+  if (operation == kDtmf) {
+    // DTMF data was written the end of |sync_buffer_|.
+    // Update index to end of DTMF data in |sync_buffer_|.
+    sync_buffer_->set_dtmf_index(sync_buffer_->Size());
+  }
+
+  if (last_mode_ != kModeExpand) {
+    // If last operation was not expand, calculate the |playout_timestamp_| from
+    // the |sync_buffer_|. However, do not update the |playout_timestamp_| if it
+    // would be moved "backwards".
+    uint32_t temp_timestamp = sync_buffer_->end_timestamp() -
+        static_cast<uint32_t>(sync_buffer_->FutureLength());
+    if (static_cast<int32_t>(temp_timestamp - playout_timestamp_) > 0) {
+      playout_timestamp_ = temp_timestamp;
+    }
+  } else {
+    // Use dead reckoning to estimate the |playout_timestamp_|.
+    playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
+  }
+  // Set the timestamp in the audio frame to zero before the first packet has
+  // been inserted. Otherwise, subtract the frame size in samples to get the
+  // timestamp of the first sample in the frame (playout_timestamp_ is the
+  // last + 1).
+  audio_frame->timestamp_ =
+      first_packet_
+          ? 0
+          : timestamp_scaler_->ToExternal(playout_timestamp_) -
+                static_cast<uint32_t>(audio_frame->samples_per_channel_);
+
+  if (!(last_mode_ == kModeRfc3389Cng ||
+      last_mode_ == kModeCodecInternalCng ||
+      last_mode_ == kModeExpand)) {
+    generated_noise_stopwatch_.reset();
+  }
+
+  if (decode_return_value) return decode_return_value;
+  return return_value;
+}
+
+int NetEqImpl::GetDecision(Operations* operation,
+                           PacketList* packet_list,
+                           DtmfEvent* dtmf_event,
+                           bool* play_dtmf) {
+  // Initialize output variables.
+  *play_dtmf = false;
+  *operation = kUndefined;
+
+  assert(sync_buffer_.get());
+  uint32_t end_timestamp = sync_buffer_->end_timestamp();
+  if (!new_codec_) {
+    const uint32_t five_seconds_samples = 5 * fs_hz_;
+    packet_buffer_->DiscardOldPackets(end_timestamp, five_seconds_samples,
+                                      &stats_);
+  }
+  const Packet* packet = packet_buffer_->PeekNextPacket();
+
+  RTC_DCHECK(!generated_noise_stopwatch_ ||
+             generated_noise_stopwatch_->ElapsedTicks() >= 1);
+  uint64_t generated_noise_samples =
+      generated_noise_stopwatch_
+          ? (generated_noise_stopwatch_->ElapsedTicks() - 1) *
+                    output_size_samples_ +
+                decision_logic_->noise_fast_forward()
+          : 0;
+
+  if (decision_logic_->CngRfc3389On() || last_mode_ == kModeRfc3389Cng) {
+    // Because of timestamp peculiarities, we have to "manually" disallow using
+    // a CNG packet with the same timestamp as the one that was last played.
+    // This can happen when using redundancy and will cause the timing to shift.
+    while (packet && decoder_database_->IsComfortNoise(packet->payload_type) &&
+           (end_timestamp >= packet->timestamp ||
+            end_timestamp + generated_noise_samples > packet->timestamp)) {
+      // Don't use this packet, discard it.
+      if (packet_buffer_->DiscardNextPacket(&stats_) != PacketBuffer::kOK) {
+        assert(false);  // Must be ok by design.
+      }
+      // Check buffer again.
+      if (!new_codec_) {
+        packet_buffer_->DiscardOldPackets(end_timestamp, 5 * fs_hz_, &stats_);
+      }
+      packet = packet_buffer_->PeekNextPacket();
+    }
+  }
+
+  assert(expand_.get());
+  const int samples_left = static_cast<int>(sync_buffer_->FutureLength() -
+      expand_->overlap_length());
+  if (last_mode_ == kModeAccelerateSuccess ||
+      last_mode_ == kModeAccelerateLowEnergy ||
+      last_mode_ == kModePreemptiveExpandSuccess ||
+      last_mode_ == kModePreemptiveExpandLowEnergy) {
+    // Subtract (samples_left + output_size_samples_) from sampleMemory.
+    decision_logic_->AddSampleMemory(
+        -(samples_left + rtc::dchecked_cast<int>(output_size_samples_)));
+  }
+
+  // Check if it is time to play a DTMF event.
+  if (dtmf_buffer_->GetEvent(
+      static_cast<uint32_t>(
+          end_timestamp + generated_noise_samples),
+      dtmf_event)) {
+    *play_dtmf = true;
+  }
+
+  // Get instruction.
+  assert(sync_buffer_.get());
+  assert(expand_.get());
+  generated_noise_samples =
+      generated_noise_stopwatch_
+          ? generated_noise_stopwatch_->ElapsedTicks() * output_size_samples_ +
+                decision_logic_->noise_fast_forward()
+          : 0;
+  *operation = decision_logic_->GetDecision(
+      *sync_buffer_, *expand_, decoder_frame_length_, packet, last_mode_,
+      *play_dtmf, generated_noise_samples, &reset_decoder_);
+
+  // Check if we already have enough samples in the |sync_buffer_|. If so,
+  // change decision to normal, unless the decision was merge, accelerate, or
+  // preemptive expand.
+  if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
+      *operation != kMerge && *operation != kAccelerate &&
+      *operation != kFastAccelerate && *operation != kPreemptiveExpand) {
+    *operation = kNormal;
+    return 0;
+  }
+
+  decision_logic_->ExpandDecision(*operation);
+
+  // Check conditions for reset.
+  if (new_codec_ || *operation == kUndefined) {
+    // The only valid reason to get kUndefined is that new_codec_ is set.
+    assert(new_codec_);
+    if (*play_dtmf && !packet) {
+      timestamp_ = dtmf_event->timestamp;
+    } else {
+      if (!packet) {
+        RTC_LOG(LS_ERROR) << "Packet missing where it shouldn't.";
+        return -1;
+      }
+      timestamp_ = packet->timestamp;
+      if (*operation == kRfc3389CngNoPacket &&
+          decoder_database_->IsComfortNoise(packet->payload_type)) {
+        // Change decision to CNG packet, since we do have a CNG packet, but it
+        // was considered too early to use. Now, use it anyway.
+        *operation = kRfc3389Cng;
+      } else if (*operation != kRfc3389Cng) {
+        *operation = kNormal;
+      }
+    }
+    // Adjust |sync_buffer_| timestamp before setting |end_timestamp| to the
+    // new value.
+    sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp);
+    end_timestamp = timestamp_;
+    new_codec_ = false;
+    decision_logic_->SoftReset();
+    buffer_level_filter_->Reset();
+    delay_manager_->Reset();
+    stats_.ResetMcu();
+  }
+
+  size_t required_samples = output_size_samples_;
+  const size_t samples_10_ms = static_cast<size_t>(80 * fs_mult_);
+  const size_t samples_20_ms = 2 * samples_10_ms;
+  const size_t samples_30_ms = 3 * samples_10_ms;
+
+  switch (*operation) {
+    case kExpand: {
+      timestamp_ = end_timestamp;
+      return 0;
+    }
+    case kRfc3389CngNoPacket:
+    case kCodecInternalCng: {
+      return 0;
+    }
+    case kDtmf: {
+      // TODO(hlundin): Write test for this.
+      // Update timestamp.
+      timestamp_ = end_timestamp;
+      const uint64_t generated_noise_samples =
+          generated_noise_stopwatch_
+              ? generated_noise_stopwatch_->ElapsedTicks() *
+                        output_size_samples_ +
+                    decision_logic_->noise_fast_forward()
+              : 0;
+      if (generated_noise_samples > 0 && last_mode_ != kModeDtmf) {
+        // Make a jump in timestamp due to the recently played comfort noise.
+        uint32_t timestamp_jump =
+            static_cast<uint32_t>(generated_noise_samples);
+        sync_buffer_->IncreaseEndTimestamp(timestamp_jump);
+        timestamp_ += timestamp_jump;
+      }
+      return 0;
+    }
+    case kAccelerate:
+    case kFastAccelerate: {
+      // In order to do an accelerate we need at least 30 ms of audio data.
+      if (samples_left >= static_cast<int>(samples_30_ms)) {
+        // Already have enough data, so we do not need to extract any more.
+        decision_logic_->set_sample_memory(samples_left);
+        decision_logic_->set_prev_time_scale(true);
+        return 0;
+      } else if (samples_left >= static_cast<int>(samples_10_ms) &&
+          decoder_frame_length_ >= samples_30_ms) {
+        // Avoid decoding more data as it might overflow the playout buffer.
+        *operation = kNormal;
+        return 0;
+      } else if (samples_left < static_cast<int>(samples_20_ms) &&
+          decoder_frame_length_ < samples_30_ms) {
+        // Build up decoded data by decoding at least 20 ms of audio data. Do
+        // not perform accelerate yet, but wait until we only need to do one
+        // decoding.
+        required_samples = 2 * output_size_samples_;
+        *operation = kNormal;
+      }
+      // If none of the above is true, we have one of two possible situations:
+      // (1) 20 ms <= samples_left < 30 ms and decoder_frame_length_ < 30 ms; or
+      // (2) samples_left < 10 ms and decoder_frame_length_ >= 30 ms.
+      // In either case, we move on with the accelerate decision, and decode one
+      // frame now.
+      break;
+    }
+    case kPreemptiveExpand: {
+      // In order to do a preemptive expand we need at least 30 ms of decoded
+      // audio data.
+      if ((samples_left >= static_cast<int>(samples_30_ms)) ||
+          (samples_left >= static_cast<int>(samples_10_ms) &&
+              decoder_frame_length_ >= samples_30_ms)) {
+        // Already have enough data, so we do not need to extract any more.
+        // Or, avoid decoding more data as it might overflow the playout buffer.
+        // Still try preemptive expand, though.
+        decision_logic_->set_sample_memory(samples_left);
+        decision_logic_->set_prev_time_scale(true);
+        return 0;
+      }
+      if (samples_left < static_cast<int>(samples_20_ms) &&
+          decoder_frame_length_ < samples_30_ms) {
+        // Build up decoded data by decoding at least 20 ms of audio data.
+        // Still try to perform preemptive expand.
+        required_samples = 2 * output_size_samples_;
+      }
+      // Move on with the preemptive expand decision.
+      break;
+    }
+    case kMerge: {
+      required_samples =
+          std::max(merge_->RequiredFutureSamples(), required_samples);
+      break;
+    }
+    default: {
+      // Do nothing.
+    }
+  }
+
+  // Get packets from buffer.
+  int extracted_samples = 0;
+  if (packet && *operation != kAlternativePlc &&
+      *operation != kAlternativePlcIncreaseTimestamp &&
+      *operation != kAudioRepetition &&
+      *operation != kAudioRepetitionIncreaseTimestamp) {
+    sync_buffer_->IncreaseEndTimestamp(packet->timestamp - end_timestamp);
+    if (decision_logic_->CngOff()) {
+      // Adjustment of timestamp only corresponds to an actual packet loss
+      // if comfort noise is not played. If comfort noise was just played,
+      // this adjustment of timestamp is only done to get back in sync with the
+      // stream timestamp; no loss to report.
+      stats_.LostSamples(packet->timestamp - end_timestamp);
+    }
+
+    if (*operation != kRfc3389Cng) {
+      // We are about to decode and use a non-CNG packet.
+      decision_logic_->SetCngOff();
+    }
+
+    extracted_samples = ExtractPackets(required_samples, packet_list);
+    if (extracted_samples < 0) {
+      return kPacketBufferCorruption;
+    }
+  }
+
+  if (*operation == kAccelerate || *operation == kFastAccelerate ||
+      *operation == kPreemptiveExpand) {
+    decision_logic_->set_sample_memory(samples_left + extracted_samples);
+    decision_logic_->set_prev_time_scale(true);
+  }
+
+  if (*operation == kAccelerate || *operation == kFastAccelerate) {
+    // Check that we have enough data (30ms) to do accelerate.
+    if (extracted_samples + samples_left < static_cast<int>(samples_30_ms)) {
+      // TODO(hlundin): Write test for this.
+      // Not enough, do normal operation instead.
+      *operation = kNormal;
+    }
+  }
+
+  timestamp_ = end_timestamp;
+  return 0;
+}
+
+int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
+                      int* decoded_length,
+                      AudioDecoder::SpeechType* speech_type) {
+  *speech_type = AudioDecoder::kSpeech;
+
+  // When packet_list is empty, we may be in kCodecInternalCng mode, and for
+  // that we use current active decoder.
+  AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
+
+  if (!packet_list->empty()) {
+    const Packet& packet = packet_list->front();
+    uint8_t payload_type = packet.payload_type;
+    if (!decoder_database_->IsComfortNoise(payload_type)) {
+      decoder = decoder_database_->GetDecoder(payload_type);
+      assert(decoder);
+      if (!decoder) {
+        RTC_LOG(LS_WARNING)
+            << "Unknown payload type " << static_cast<int>(payload_type);
+        packet_list->clear();
+        return kDecoderNotFound;
+      }
+      bool decoder_changed;
+      decoder_database_->SetActiveDecoder(payload_type, &decoder_changed);
+      if (decoder_changed) {
+        // We have a new decoder. Re-init some values.
+        const DecoderDatabase::DecoderInfo* decoder_info = decoder_database_
+            ->GetDecoderInfo(payload_type);
+        assert(decoder_info);
+        if (!decoder_info) {
+          RTC_LOG(LS_WARNING)
+              << "Unknown payload type " << static_cast<int>(payload_type);
+          packet_list->clear();
+          return kDecoderNotFound;
+        }
+        // If sampling rate or number of channels has changed, we need to make
+        // a reset.
+        if (decoder_info->SampleRateHz() != fs_hz_ ||
+            decoder->Channels() != algorithm_buffer_->Channels()) {
+          // TODO(tlegrand): Add unittest to cover this event.
+          SetSampleRateAndChannels(decoder_info->SampleRateHz(),
+                                   decoder->Channels());
+        }
+        sync_buffer_->set_end_timestamp(timestamp_);
+        playout_timestamp_ = timestamp_;
+      }
+    }
+  }
+
+  if (reset_decoder_) {
+    // TODO(hlundin): Write test for this.
+    if (decoder)
+      decoder->Reset();
+
+    // Reset comfort noise decoder.
+    ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+    if (cng_decoder)
+      cng_decoder->Reset();
+
+    reset_decoder_ = false;
+  }
+
+  *decoded_length = 0;
+  // Update codec-internal PLC state.
+  if ((*operation == kMerge) && decoder && decoder->HasDecodePlc()) {
+    decoder->DecodePlc(1, &decoded_buffer_[*decoded_length]);
+  }
+
+  int return_value;
+  if (*operation == kCodecInternalCng) {
+    RTC_DCHECK(packet_list->empty());
+    return_value = DecodeCng(decoder, decoded_length, speech_type);
+  } else {
+    return_value = DecodeLoop(packet_list, *operation, decoder,
+                              decoded_length, speech_type);
+  }
+
+  if (*decoded_length < 0) {
+    // Error returned from the decoder.
+    *decoded_length = 0;
+    sync_buffer_->IncreaseEndTimestamp(
+        static_cast<uint32_t>(decoder_frame_length_));
+    int error_code = 0;
+    if (decoder)
+      error_code = decoder->ErrorCode();
+    if (error_code != 0) {
+      // Got some error code from the decoder.
+      return_value = kDecoderErrorCode;
+      RTC_LOG(LS_WARNING) << "Decoder returned error code: " << error_code;
+    } else {
+      // Decoder does not implement error codes. Return generic error.
+      return_value = kOtherDecoderError;
+      RTC_LOG(LS_WARNING) << "Decoder error (no error code)";
+    }
+    *operation = kExpand;  // Do expansion to get data instead.
+  }
+  if (*speech_type != AudioDecoder::kComfortNoise) {
+    // Don't increment timestamp if codec returned CNG speech type
+    // since in this case, the we will increment the CNGplayedTS counter.
+    // Increase with number of samples per channel.
+    assert(*decoded_length == 0 ||
+           (decoder && decoder->Channels() == sync_buffer_->Channels()));
+    sync_buffer_->IncreaseEndTimestamp(
+        *decoded_length / static_cast<int>(sync_buffer_->Channels()));
+  }
+  return return_value;
+}
+
+int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
+                         AudioDecoder::SpeechType* speech_type) {
+  if (!decoder) {
+    // This happens when active decoder is not defined.
+    *decoded_length = -1;
+    return 0;
+  }
+
+  while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
+    const int length = decoder->Decode(
+            nullptr, 0, fs_hz_,
+            (decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
+            &decoded_buffer_[*decoded_length], speech_type);
+    if (length > 0) {
+      *decoded_length += length;
+    } else {
+      // Error.
+      RTC_LOG(LS_WARNING) << "Failed to decode CNG";
+      *decoded_length = -1;
+      break;
+    }
+    if (*decoded_length > static_cast<int>(decoded_buffer_length_)) {
+      // Guard against overflow.
+      RTC_LOG(LS_WARNING) << "Decoded too much CNG.";
+      return kDecodedTooMuch;
+    }
+  }
+  return 0;
+}
+
+int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
+                          AudioDecoder* decoder, int* decoded_length,
+                          AudioDecoder::SpeechType* speech_type) {
+  RTC_DCHECK(last_decoded_timestamps_.empty());
+
+  // Do decoding.
+  while (
+      !packet_list->empty() &&
+      !decoder_database_->IsComfortNoise(packet_list->front().payload_type)) {
+    assert(decoder);  // At this point, we must have a decoder object.
+    // The number of channels in the |sync_buffer_| should be the same as the
+    // number decoder channels.
+    assert(sync_buffer_->Channels() == decoder->Channels());
+    assert(decoded_buffer_length_ >= kMaxFrameSize * decoder->Channels());
+    assert(operation == kNormal || operation == kAccelerate ||
+           operation == kFastAccelerate || operation == kMerge ||
+           operation == kPreemptiveExpand);
+
+    auto opt_result = packet_list->front().frame->Decode(
+        rtc::ArrayView<int16_t>(&decoded_buffer_[*decoded_length],
+                                decoded_buffer_length_ - *decoded_length));
+    last_decoded_timestamps_.push_back(packet_list->front().timestamp);
+    packet_list->pop_front();
+    if (opt_result) {
+      const auto& result = *opt_result;
+      *speech_type = result.speech_type;
+      if (result.num_decoded_samples > 0) {
+        *decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
+        // Update |decoder_frame_length_| with number of samples per channel.
+        decoder_frame_length_ =
+            result.num_decoded_samples / decoder->Channels();
+      }
+    } else {
+      // Error.
+      // TODO(ossu): What to put here?
+      RTC_LOG(LS_WARNING) << "Decode error";
+      *decoded_length = -1;
+      packet_list->clear();
+      break;
+    }
+    if (*decoded_length > rtc::dchecked_cast<int>(decoded_buffer_length_)) {
+      // Guard against overflow.
+      RTC_LOG(LS_WARNING) << "Decoded too much.";
+      packet_list->clear();
+      return kDecodedTooMuch;
+    }
+  }  // End of decode loop.
+
+  // If the list is not empty at this point, either a decoding error terminated
+  // the while-loop, or list must hold exactly one CNG packet.
+  assert(
+      packet_list->empty() || *decoded_length < 0 ||
+      (packet_list->size() == 1 &&
+       decoder_database_->IsComfortNoise(packet_list->front().payload_type)));
+  return 0;
+}
+
+void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
+                         AudioDecoder::SpeechType speech_type, bool play_dtmf) {
+  assert(normal_.get());
+  assert(mute_factor_array_.get());
+  normal_->Process(decoded_buffer, decoded_length, last_mode_,
+                   mute_factor_array_.get(), algorithm_buffer_.get());
+  if (decoded_length != 0) {
+    last_mode_ = kModeNormal;
+  }
+
+  // If last packet was decoded as an inband CNG, set mode to CNG instead.
+  if ((speech_type == AudioDecoder::kComfortNoise)
+      || ((last_mode_ == kModeCodecInternalCng)
+          && (decoded_length == 0))) {
+    // TODO(hlundin): Remove second part of || statement above.
+    last_mode_ = kModeCodecInternalCng;
+  }
+
+  if (!play_dtmf) {
+    dtmf_tone_generator_->Reset();
+  }
+}
+
+void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
+                        AudioDecoder::SpeechType speech_type, bool play_dtmf) {
+  assert(mute_factor_array_.get());
+  assert(merge_.get());
+  size_t new_length = merge_->Process(decoded_buffer, decoded_length,
+                                      mute_factor_array_.get(),
+                                      algorithm_buffer_.get());
+  // Correction can be negative.
+  int expand_length_correction =
+      rtc::dchecked_cast<int>(new_length) -
+      rtc::dchecked_cast<int>(decoded_length / algorithm_buffer_->Channels());
+
+  // Update in-call and post-call statistics.
+  if (expand_->MuteFactor(0) == 0) {
+    // Expand generates only noise.
+    stats_.ExpandedNoiseSamplesCorrection(expand_length_correction);
+  } else {
+    // Expansion generates more than only noise.
+    stats_.ExpandedVoiceSamplesCorrection(expand_length_correction);
+  }
+
+  last_mode_ = kModeMerge;
+  // If last packet was decoded as an inband CNG, set mode to CNG instead.
+  if (speech_type == AudioDecoder::kComfortNoise) {
+    last_mode_ = kModeCodecInternalCng;
+  }
+  expand_->Reset();
+  if (!play_dtmf) {
+    dtmf_tone_generator_->Reset();
+  }
+}
+
+int NetEqImpl::DoExpand(bool play_dtmf) {
+  while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
+      output_size_samples_) {
+    algorithm_buffer_->Clear();
+    int return_value = expand_->Process(algorithm_buffer_.get());
+    size_t length = algorithm_buffer_->Size();
+    bool is_new_concealment_event = (last_mode_ != kModeExpand);
+
+    // Update in-call and post-call statistics.
+    if (expand_->MuteFactor(0) == 0) {
+      // Expand operation generates only noise.
+      stats_.ExpandedNoiseSamples(length, is_new_concealment_event);
+    } else {
+      // Expand operation generates more than only noise.
+      stats_.ExpandedVoiceSamples(length, is_new_concealment_event);
+    }
+
+    last_mode_ = kModeExpand;
+
+    if (return_value < 0) {
+      return return_value;
+    }
+
+    sync_buffer_->PushBack(*algorithm_buffer_);
+    algorithm_buffer_->Clear();
+  }
+  if (!play_dtmf) {
+    dtmf_tone_generator_->Reset();
+  }
+
+  if (!generated_noise_stopwatch_) {
+    // Start a new stopwatch since we may be covering for a lost CNG packet.
+    generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+  }
+
+  return 0;
+}
+
+int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
+                            size_t decoded_length,
+                            AudioDecoder::SpeechType speech_type,
+                            bool play_dtmf,
+                            bool fast_accelerate) {
+  const size_t required_samples =
+      static_cast<size_t>(240 * fs_mult_);  // Must have 30 ms.
+  size_t borrowed_samples_per_channel = 0;
+  size_t num_channels = algorithm_buffer_->Channels();
+  size_t decoded_length_per_channel = decoded_length / num_channels;
+  if (decoded_length_per_channel < required_samples) {
+    // Must move data from the |sync_buffer_| in order to get 30 ms.
+    borrowed_samples_per_channel = static_cast<int>(required_samples -
+        decoded_length_per_channel);
+    memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
+            decoded_buffer,
+            sizeof(int16_t) * decoded_length);
+    sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
+                                         decoded_buffer);
+    decoded_length = required_samples * num_channels;
+  }
+
+  size_t samples_removed;
+  Accelerate::ReturnCodes return_code =
+      accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate,
+                           algorithm_buffer_.get(), &samples_removed);
+  stats_.AcceleratedSamples(samples_removed);
+  switch (return_code) {
+    case Accelerate::kSuccess:
+      last_mode_ = kModeAccelerateSuccess;
+      break;
+    case Accelerate::kSuccessLowEnergy:
+      last_mode_ = kModeAccelerateLowEnergy;
+      break;
+    case Accelerate::kNoStretch:
+      last_mode_ = kModeAccelerateFail;
+      break;
+    case Accelerate::kError:
+      // TODO(hlundin): Map to kModeError instead?
+      last_mode_ = kModeAccelerateFail;
+      return kAccelerateError;
+  }
+
+  if (borrowed_samples_per_channel > 0) {
+    // Copy borrowed samples back to the |sync_buffer_|.
+    size_t length = algorithm_buffer_->Size();
+    if (length < borrowed_samples_per_channel) {
+      // This destroys the beginning of the buffer, but will not cause any
+      // problems.
+      sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
+                                   sync_buffer_->Size() -
+                                   borrowed_samples_per_channel);
+      sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length);
+      algorithm_buffer_->PopFront(length);
+      assert(algorithm_buffer_->Empty());
+    } else {
+      sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
+                                   borrowed_samples_per_channel,
+                                   sync_buffer_->Size() -
+                                   borrowed_samples_per_channel);
+      algorithm_buffer_->PopFront(borrowed_samples_per_channel);
+    }
+  }
+
+  // If last packet was decoded as an inband CNG, set mode to CNG instead.
+  if (speech_type == AudioDecoder::kComfortNoise) {
+    last_mode_ = kModeCodecInternalCng;
+  }
+  if (!play_dtmf) {
+    dtmf_tone_generator_->Reset();
+  }
+  expand_->Reset();
+  return 0;
+}
+
+int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
+                                  size_t decoded_length,
+                                  AudioDecoder::SpeechType speech_type,
+                                  bool play_dtmf) {
+  const size_t required_samples =
+      static_cast<size_t>(240 * fs_mult_);  // Must have 30 ms.
+  size_t num_channels = algorithm_buffer_->Channels();
+  size_t borrowed_samples_per_channel = 0;
+  size_t old_borrowed_samples_per_channel = 0;
+  size_t decoded_length_per_channel = decoded_length / num_channels;
+  if (decoded_length_per_channel < required_samples) {
+    // Must move data from the |sync_buffer_| in order to get 30 ms.
+    borrowed_samples_per_channel =
+        required_samples - decoded_length_per_channel;
+    // Calculate how many of these were already played out.
+    old_borrowed_samples_per_channel =
+        (borrowed_samples_per_channel > sync_buffer_->FutureLength()) ?
+        (borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0;
+    memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
+            decoded_buffer,
+            sizeof(int16_t) * decoded_length);
+    sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
+                                         decoded_buffer);
+    decoded_length = required_samples * num_channels;
+  }
+
+  size_t samples_added;
+  PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
+      decoded_buffer, decoded_length,
+      old_borrowed_samples_per_channel,
+      algorithm_buffer_.get(), &samples_added);
+  stats_.PreemptiveExpandedSamples(samples_added);
+  switch (return_code) {
+    case PreemptiveExpand::kSuccess:
+      last_mode_ = kModePreemptiveExpandSuccess;
+      break;
+    case PreemptiveExpand::kSuccessLowEnergy:
+      last_mode_ = kModePreemptiveExpandLowEnergy;
+      break;
+    case PreemptiveExpand::kNoStretch:
+      last_mode_ = kModePreemptiveExpandFail;
+      break;
+    case PreemptiveExpand::kError:
+      // TODO(hlundin): Map to kModeError instead?
+      last_mode_ = kModePreemptiveExpandFail;
+      return kPreemptiveExpandError;
+  }
+
+  if (borrowed_samples_per_channel > 0) {
+    // Copy borrowed samples back to the |sync_buffer_|.
+    sync_buffer_->ReplaceAtIndex(
+        *algorithm_buffer_, borrowed_samples_per_channel,
+        sync_buffer_->Size() - borrowed_samples_per_channel);
+    algorithm_buffer_->PopFront(borrowed_samples_per_channel);
+  }
+
+  // If last packet was decoded as an inband CNG, set mode to CNG instead.
+  if (speech_type == AudioDecoder::kComfortNoise) {
+    last_mode_ = kModeCodecInternalCng;
+  }
+  if (!play_dtmf) {
+    dtmf_tone_generator_->Reset();
+  }
+  expand_->Reset();
+  return 0;
+}
+
+int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) {
+  if (!packet_list->empty()) {
+    // Must have exactly one SID frame at this point.
+    assert(packet_list->size() == 1);
+    const Packet& packet = packet_list->front();
+    if (!decoder_database_->IsComfortNoise(packet.payload_type)) {
+      RTC_LOG(LS_ERROR) << "Trying to decode non-CNG payload as CNG.";
+      return kOtherError;
+    }
+    if (comfort_noise_->UpdateParameters(packet) ==
+        ComfortNoise::kInternalError) {
+      algorithm_buffer_->Zeros(output_size_samples_);
+      return -comfort_noise_->internal_error_code();
+    }
+  }
+  int cn_return = comfort_noise_->Generate(output_size_samples_,
+                                           algorithm_buffer_.get());
+  expand_->Reset();
+  last_mode_ = kModeRfc3389Cng;
+  if (!play_dtmf) {
+    dtmf_tone_generator_->Reset();
+  }
+  if (cn_return == ComfortNoise::kInternalError) {
+    RTC_LOG(LS_WARNING) << "Comfort noise generator returned error code: "
+                        << comfort_noise_->internal_error_code();
+    return kComfortNoiseErrorCode;
+  } else if (cn_return == ComfortNoise::kUnknownPayloadType) {
+    return kUnknownRtpPayloadType;
+  }
+  return 0;
+}
+
+void NetEqImpl::DoCodecInternalCng(const int16_t* decoded_buffer,
+                                   size_t decoded_length) {
+  RTC_DCHECK(normal_.get());
+  RTC_DCHECK(mute_factor_array_.get());
+  normal_->Process(decoded_buffer, decoded_length, last_mode_,
+                   mute_factor_array_.get(), algorithm_buffer_.get());
+  last_mode_ = kModeCodecInternalCng;
+  expand_->Reset();
+}
+
+int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
+  // This block of the code and the block further down, handling |dtmf_switch|
+  // are commented out. Otherwise playing out-of-band DTMF would fail in VoE
+  // test, DtmfTest.ManualSuccessfullySendsOutOfBandTelephoneEvents. This is
+  // equivalent to |dtmf_switch| always be false.
+  //
+  // See http://webrtc-codereview.appspot.com/1195004/ for discussion
+  // On this issue. This change might cause some glitches at the point of
+  // switch from audio to DTMF. Issue 1545 is filed to track this.
+  //
+  //  bool dtmf_switch = false;
+  //  if ((last_mode_ != kModeDtmf) && dtmf_tone_generator_->initialized()) {
+  //    // Special case; see below.
+  //    // We must catch this before calling Generate, since |initialized| is
+  //    // modified in that call.
+  //    dtmf_switch = true;
+  //  }
+
+  int dtmf_return_value = 0;
+  if (!dtmf_tone_generator_->initialized()) {
+    // Initialize if not already done.
+    dtmf_return_value = dtmf_tone_generator_->Init(fs_hz_, dtmf_event.event_no,
+                                                   dtmf_event.volume);
+  }
+
+  if (dtmf_return_value == 0) {
+    // Generate DTMF signal.
+    dtmf_return_value = dtmf_tone_generator_->Generate(output_size_samples_,
+                                                       algorithm_buffer_.get());
+  }
+
+  if (dtmf_return_value < 0) {
+    algorithm_buffer_->Zeros(output_size_samples_);
+    return dtmf_return_value;
+  }
+
+  //  if (dtmf_switch) {
+  //    // This is the special case where the previous operation was DTMF
+  //    // overdub, but the current instruction is "regular" DTMF. We must make
+  //    // sure that the DTMF does not have any discontinuities. The first DTMF
+  //    // sample that we generate now must be played out immediately, therefore
+  //    // it must be copied to the speech buffer.
+  //    // TODO(hlundin): This code seems incorrect. (Legacy.) Write test and
+  //    // verify correct operation.
+  //    assert(false);
+  //    // Must generate enough data to replace all of the |sync_buffer_|
+  //    // "future".
+  //    int required_length = sync_buffer_->FutureLength();
+  //    assert(dtmf_tone_generator_->initialized());
+  //    dtmf_return_value = dtmf_tone_generator_->Generate(required_length,
+  //                                                       algorithm_buffer_);
+  //    assert((size_t) required_length == algorithm_buffer_->Size());
+  //    if (dtmf_return_value < 0) {
+  //      algorithm_buffer_->Zeros(output_size_samples_);
+  //      return dtmf_return_value;
+  //    }
+  //
+  //    // Overwrite the "future" part of the speech buffer with the new DTMF
+  //    // data.
+  //    // TODO(hlundin): It seems that this overwriting has gone lost.
+  //    // Not adapted for multi-channel yet.
+  //    assert(algorithm_buffer_->Channels() == 1);
+  //    if (algorithm_buffer_->Channels() != 1) {
+  //      RTC_LOG(LS_WARNING) << "DTMF not supported for more than one channel";
+  //      return kStereoNotSupported;
+  //    }
+  //    // Shuffle the remaining data to the beginning of algorithm buffer.
+  //    algorithm_buffer_->PopFront(sync_buffer_->FutureLength());
+  //  }
+
+  sync_buffer_->IncreaseEndTimestamp(
+      static_cast<uint32_t>(output_size_samples_));
+  expand_->Reset();
+  last_mode_ = kModeDtmf;
+
+  // Set to false because the DTMF is already in the algorithm buffer.
+  *play_dtmf = false;
+  return 0;
+}
+
+void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
+  AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
+  size_t length;
+  if (decoder && decoder->HasDecodePlc()) {
+    // Use the decoder's packet-loss concealment.
+    // TODO(hlundin): Will probably need a longer buffer for multi-channel.
+    int16_t decoded_buffer[kMaxFrameSize];
+    length = decoder->DecodePlc(1, decoded_buffer);
+    if (length > 0)
+      algorithm_buffer_->PushBackInterleaved(decoded_buffer, length);
+  } else {
+    // Do simple zero-stuffing.
+    length = output_size_samples_;
+    algorithm_buffer_->Zeros(length);
+    // By not advancing the timestamp, NetEq inserts samples.
+    stats_.AddZeros(length);
+  }
+  if (increase_timestamp) {
+    sync_buffer_->IncreaseEndTimestamp(static_cast<uint32_t>(length));
+  }
+  expand_->Reset();
+}
+
+int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
+                           int16_t* output) const {
+  size_t out_index = 0;
+  size_t overdub_length = output_size_samples_;  // Default value.
+
+  if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
+    // Special operation for transition from "DTMF only" to "DTMF overdub".
+    out_index = std::min(
+        sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
+        output_size_samples_);
+    overdub_length = output_size_samples_ - out_index;
+  }
+
+  AudioMultiVector dtmf_output(num_channels);
+  int dtmf_return_value = 0;
+  if (!dtmf_tone_generator_->initialized()) {
+    dtmf_return_value = dtmf_tone_generator_->Init(fs_hz_, dtmf_event.event_no,
+                                                   dtmf_event.volume);
+  }
+  if (dtmf_return_value == 0) {
+    dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length,
+                                                       &dtmf_output);
+    assert(overdub_length == dtmf_output.Size());
+  }
+  dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
+  return dtmf_return_value < 0 ? dtmf_return_value : 0;
+}
+
+int NetEqImpl::ExtractPackets(size_t required_samples,
+                              PacketList* packet_list) {
+  bool first_packet = true;
+  uint8_t prev_payload_type = 0;
+  uint32_t prev_timestamp = 0;
+  uint16_t prev_sequence_number = 0;
+  bool next_packet_available = false;
+
+  const Packet* next_packet = packet_buffer_->PeekNextPacket();
+  RTC_DCHECK(next_packet);
+  if (!next_packet) {
+    RTC_LOG(LS_ERROR) << "Packet buffer unexpectedly empty.";
+    return -1;
+  }
+  uint32_t first_timestamp = next_packet->timestamp;
+  size_t extracted_samples = 0;
+
+  // Packet extraction loop.
+  do {
+    timestamp_ = next_packet->timestamp;
+    rtc::Optional<Packet> packet = packet_buffer_->GetNextPacket();
+    // |next_packet| may be invalid after the |packet_buffer_| operation.
+    next_packet = nullptr;
+    if (!packet) {
+      RTC_LOG(LS_ERROR) << "Should always be able to extract a packet here";
+      assert(false);  // Should always be able to extract a packet here.
+      return -1;
+    }
+    const uint64_t waiting_time_ms = packet->waiting_time->ElapsedMs();
+    stats_.StoreWaitingTime(waiting_time_ms);
+    RTC_DCHECK(!packet->empty());
+
+    if (first_packet) {
+      first_packet = false;
+      if (nack_enabled_) {
+        RTC_DCHECK(nack_);
+        // TODO(henrik.lundin): Should we update this for all decoded packets?
+        nack_->UpdateLastDecodedPacket(packet->sequence_number,
+                                       packet->timestamp);
+      }
+      prev_sequence_number = packet->sequence_number;
+      prev_timestamp = packet->timestamp;
+      prev_payload_type = packet->payload_type;
+    }
+
+    const bool has_cng_packet =
+        decoder_database_->IsComfortNoise(packet->payload_type);
+    // Store number of extracted samples.
+    size_t packet_duration = 0;
+    if (packet->frame) {
+      packet_duration = packet->frame->Duration();
+      // TODO(ossu): Is this the correct way to track Opus FEC packets?
+      if (packet->priority.codec_level > 0) {
+        stats_.SecondaryDecodedSamples(
+            rtc::dchecked_cast<int>(packet_duration));
+      }
+    } else if (!has_cng_packet) {
+      RTC_LOG(LS_WARNING) << "Unknown payload type "
+                          << static_cast<int>(packet->payload_type);
+      RTC_NOTREACHED();
+    }
+
+    if (packet_duration == 0) {
+      // Decoder did not return a packet duration. Assume that the packet
+      // contains the same number of samples as the previous one.
+      packet_duration = decoder_frame_length_;
+    }
+    extracted_samples = packet->timestamp - first_timestamp + packet_duration;
+
+    stats_.JitterBufferDelay(extracted_samples, waiting_time_ms);
+
+    packet_list->push_back(std::move(*packet));  // Store packet in list.
+    packet = rtc::nullopt;  // Ensure it's never used after the move.
+
+    // Check what packet is available next.
+    next_packet = packet_buffer_->PeekNextPacket();
+    next_packet_available = false;
+    if (next_packet && prev_payload_type == next_packet->payload_type &&
+        !has_cng_packet) {
+      int16_t seq_no_diff = next_packet->sequence_number - prev_sequence_number;
+      size_t ts_diff = next_packet->timestamp - prev_timestamp;
+      if (seq_no_diff == 1 ||
+          (seq_no_diff == 0 && ts_diff == decoder_frame_length_)) {
+        // The next sequence number is available, or the next part of a packet
+        // that was split into pieces upon insertion.
+        next_packet_available = true;
+      }
+      prev_sequence_number = next_packet->sequence_number;
+    }
+  } while (extracted_samples < required_samples && next_packet_available);
+
+  if (extracted_samples > 0) {
+    // Delete old packets only when we are going to decode something. Otherwise,
+    // we could end up in the situation where we never decode anything, since
+    // all incoming packets are considered too old but the buffer will also
+    // never be flooded and flushed.
+    packet_buffer_->DiscardAllOldPackets(timestamp_, &stats_);
+  }
+
+  return rtc::dchecked_cast<int>(extracted_samples);
+}
+
+void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) {
+  // Delete objects and create new ones.
+  expand_.reset(expand_factory_->Create(background_noise_.get(),
+                                        sync_buffer_.get(), &random_vector_,
+                                        &stats_, fs_hz, channels));
+  merge_.reset(new Merge(fs_hz, channels, expand_.get(), sync_buffer_.get()));
+}
+
+void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
+  RTC_LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " "
+                      << channels;
+  // TODO(hlundin): Change to an enumerator and skip assert.
+  assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz ==  32000 || fs_hz == 48000);
+  assert(channels > 0);
+
+  fs_hz_ = fs_hz;
+  fs_mult_ = fs_hz / 8000;
+  output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
+  decoder_frame_length_ = 3 * output_size_samples_;  // Initialize to 30ms.
+
+  last_mode_ = kModeNormal;
+
+  // Create a new array of mute factors and set all to 1.
+  mute_factor_array_.reset(new int16_t[channels]);
+  for (size_t i = 0; i < channels; ++i) {
+    mute_factor_array_[i] = 16384;  // 1.0 in Q14.
+  }
+
+  ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+  if (cng_decoder)
+    cng_decoder->Reset();
+
+  // Reinit post-decode VAD with new sample rate.
+  assert(vad_.get());  // Cannot be NULL here.
+  vad_->Init();
+
+  // Delete algorithm buffer and create a new one.
+  algorithm_buffer_.reset(new AudioMultiVector(channels));
+
+  // Delete sync buffer and create a new one.
+  sync_buffer_.reset(new SyncBuffer(channels, kSyncBufferSize * fs_mult_));
+
+  // Delete BackgroundNoise object and create a new one.
+  background_noise_.reset(new BackgroundNoise(channels));
+  background_noise_->set_mode(background_noise_mode_);
+
+  // Reset random vector.
+  random_vector_.Reset();
+
+  UpdatePlcComponents(fs_hz, channels);
+
+  // Move index so that we create a small set of future samples (all 0).
+  sync_buffer_->set_next_index(sync_buffer_->next_index() -
+      expand_->overlap_length());
+
+  normal_.reset(new Normal(fs_hz, decoder_database_.get(), *background_noise_,
+                           expand_.get()));
+  accelerate_.reset(
+      accelerate_factory_->Create(fs_hz, channels, *background_noise_));
+  preemptive_expand_.reset(preemptive_expand_factory_->Create(
+      fs_hz, channels, *background_noise_, expand_->overlap_length()));
+
+  // Delete ComfortNoise object and create a new one.
+  comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(),
+                                        sync_buffer_.get()));
+
+  // Verify that |decoded_buffer_| is long enough.
+  if (decoded_buffer_length_ < kMaxFrameSize * channels) {
+    // Reallocate to larger size.
+    decoded_buffer_length_ = kMaxFrameSize * channels;
+    decoded_buffer_.reset(new int16_t[decoded_buffer_length_]);
+  }
+
+  // Create DecisionLogic if it is not created yet, then communicate new sample
+  // rate and output size to DecisionLogic object.
+  if (!decision_logic_.get()) {
+    CreateDecisionLogic();
+  }
+  decision_logic_->SetSampleRate(fs_hz_, output_size_samples_);
+}
+
+NetEqImpl::OutputType NetEqImpl::LastOutputType() {
+  assert(vad_.get());
+  assert(expand_.get());
+  if (last_mode_ == kModeCodecInternalCng || last_mode_ == kModeRfc3389Cng) {
+    return OutputType::kCNG;
+  } else if (last_mode_ == kModeExpand && expand_->MuteFactor(0) == 0) {
+    // Expand mode has faded down to background noise only (very long expand).
+    return OutputType::kPLCCNG;
+  } else if (last_mode_ == kModeExpand) {
+    return OutputType::kPLC;
+  } else if (vad_->running() && !vad_->active_speech()) {
+    return OutputType::kVadPassive;
+  } else {
+    return OutputType::kNormalSpeech;
+  }
+}
+
+void NetEqImpl::CreateDecisionLogic() {
+  decision_logic_.reset(DecisionLogic::Create(
+      fs_hz_, output_size_samples_, playout_mode_, decoder_database_.get(),
+      *packet_buffer_.get(), delay_manager_.get(), buffer_level_filter_.get(),
+      tick_timer_.get()));
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h
new file mode 100644
index 0000000..bdeb020
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_impl.h
@@ -0,0 +1,449 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
+#define MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
+
+#include <memory>
+#include <string>
+
+#include "api/optional.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/defines.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/packet.h"  // Declare PacketList.
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/rtcp.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class Accelerate;
+class BackgroundNoise;
+class BufferLevelFilter;
+class ComfortNoise;
+class DecisionLogic;
+class DecoderDatabase;
+class DelayManager;
+class DelayPeakDetector;
+class DtmfBuffer;
+class DtmfToneGenerator;
+class Expand;
+class Merge;
+class NackTracker;
+class Normal;
+class PacketBuffer;
+class RedPayloadSplitter;
+class PostDecodeVad;
+class PreemptiveExpand;
+class RandomVector;
+class SyncBuffer;
+class TimestampScaler;
+struct AccelerateFactory;
+struct DtmfEvent;
+struct ExpandFactory;
+struct PreemptiveExpandFactory;
+
+class NetEqImpl : public webrtc::NetEq {
+ public:
+  enum class OutputType {
+    kNormalSpeech,
+    kPLC,
+    kCNG,
+    kPLCCNG,
+    kVadPassive
+  };
+
+  enum ErrorCodes {
+    kNoError = 0,
+    kOtherError,
+    kUnknownRtpPayloadType,
+    kDecoderNotFound,
+    kInvalidPointer,
+    kAccelerateError,
+    kPreemptiveExpandError,
+    kComfortNoiseErrorCode,
+    kDecoderErrorCode,
+    kOtherDecoderError,
+    kInvalidOperation,
+    kDtmfParsingError,
+    kDtmfInsertError,
+    kSampleUnderrun,
+    kDecodedTooMuch,
+    kRedundancySplitError,
+    kPacketBufferCorruption
+  };
+
+  struct Dependencies {
+    // The constructor populates the Dependencies struct with the default
+    // implementations of the objects. They can all be replaced by the user
+    // before sending the struct to the NetEqImpl constructor. However, there
+    // are dependencies between some of the classes inside the struct, so
+    // swapping out one may make it necessary to re-create another one.
+    explicit Dependencies(
+        const NetEq::Config& config,
+        const rtc::scoped_refptr<AudioDecoderFactory>& decoder_factory);
+    ~Dependencies();
+
+    std::unique_ptr<TickTimer> tick_timer;
+    std::unique_ptr<BufferLevelFilter> buffer_level_filter;
+    std::unique_ptr<DecoderDatabase> decoder_database;
+    std::unique_ptr<DelayPeakDetector> delay_peak_detector;
+    std::unique_ptr<DelayManager> delay_manager;
+    std::unique_ptr<DtmfBuffer> dtmf_buffer;
+    std::unique_ptr<DtmfToneGenerator> dtmf_tone_generator;
+    std::unique_ptr<PacketBuffer> packet_buffer;
+    std::unique_ptr<RedPayloadSplitter> red_payload_splitter;
+    std::unique_ptr<TimestampScaler> timestamp_scaler;
+    std::unique_ptr<AccelerateFactory> accelerate_factory;
+    std::unique_ptr<ExpandFactory> expand_factory;
+    std::unique_ptr<PreemptiveExpandFactory> preemptive_expand_factory;
+  };
+
+  // Creates a new NetEqImpl object.
+  NetEqImpl(const NetEq::Config& config,
+            Dependencies&& deps,
+            bool create_components = true);
+
+  ~NetEqImpl() override;
+
+  // Inserts a new packet into NetEq. The |receive_timestamp| is an indication
+  // of the time when the packet was received, and should be measured with
+  // the same tick rate as the RTP timestamp of the current payload.
+  // Returns 0 on success, -1 on failure.
+  int InsertPacket(const RTPHeader& rtp_header,
+                   rtc::ArrayView<const uint8_t> payload,
+                   uint32_t receive_timestamp) override;
+
+  void InsertEmptyPacket(const RTPHeader& rtp_header) override;
+
+  int GetAudio(AudioFrame* audio_frame, bool* muted) override;
+
+  void SetCodecs(const std::map<int, SdpAudioFormat>& codecs) override;
+
+  int RegisterPayloadType(NetEqDecoder codec,
+                          const std::string& codec_name,
+                          uint8_t rtp_payload_type) override;
+
+  int RegisterExternalDecoder(AudioDecoder* decoder,
+                              NetEqDecoder codec,
+                              const std::string& codec_name,
+                              uint8_t rtp_payload_type) override;
+
+  bool RegisterPayloadType(int rtp_payload_type,
+                           const SdpAudioFormat& audio_format) override;
+
+  // Removes |rtp_payload_type| from the codec database. Returns 0 on success,
+  // -1 on failure.
+  int RemovePayloadType(uint8_t rtp_payload_type) override;
+
+  void RemoveAllPayloadTypes() override;
+
+  bool SetMinimumDelay(int delay_ms) override;
+
+  bool SetMaximumDelay(int delay_ms) override;
+
+  int LeastRequiredDelayMs() const override;
+
+  int SetTargetDelay() override;
+
+  int TargetDelayMs() const override;
+
+  int CurrentDelayMs() const override;
+
+  int FilteredCurrentDelayMs() const override;
+
+  // Sets the playout mode to |mode|.
+  // Deprecated.
+  // TODO(henrik.lundin) Delete.
+  void SetPlayoutMode(NetEqPlayoutMode mode) override;
+
+  // Returns the current playout mode.
+  // Deprecated.
+  // TODO(henrik.lundin) Delete.
+  NetEqPlayoutMode PlayoutMode() const override;
+
+  // Writes the current network statistics to |stats|. The statistics are reset
+  // after the call.
+  int NetworkStatistics(NetEqNetworkStatistics* stats) override;
+
+  // Writes the current RTCP statistics to |stats|. The statistics are reset
+  // and a new report period is started with the call.
+  void GetRtcpStatistics(RtcpStatistics* stats) override;
+
+  NetEqLifetimeStatistics GetLifetimeStatistics() const override;
+
+  // Same as RtcpStatistics(), but does not reset anything.
+  void GetRtcpStatisticsNoReset(RtcpStatistics* stats) override;
+
+  // Enables post-decode VAD. When enabled, GetAudio() will return
+  // kOutputVADPassive when the signal contains no speech.
+  void EnableVad() override;
+
+  // Disables post-decode VAD.
+  void DisableVad() override;
+
+  rtc::Optional<uint32_t> GetPlayoutTimestamp() const override;
+
+  int last_output_sample_rate_hz() const override;
+
+  rtc::Optional<CodecInst> GetDecoder(int payload_type) const override;
+
+  rtc::Optional<SdpAudioFormat> GetDecoderFormat(
+      int payload_type) const override;
+
+  int SetTargetNumberOfChannels() override;
+
+  int SetTargetSampleRate() override;
+
+  // Flushes both the packet buffer and the sync buffer.
+  void FlushBuffers() override;
+
+  void PacketBufferStatistics(int* current_num_packets,
+                              int* max_num_packets) const override;
+
+  void EnableNack(size_t max_nack_list_size) override;
+
+  void DisableNack() override;
+
+  std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const override;
+
+  std::vector<uint32_t> LastDecodedTimestamps() const override;
+
+  int SyncBufferSizeMs() const override;
+
+  // This accessor method is only intended for testing purposes.
+  const SyncBuffer* sync_buffer_for_test() const;
+  Operations last_operation_for_test() const;
+
+ protected:
+  static const int kOutputSizeMs = 10;
+  static const size_t kMaxFrameSize = 5760;  // 120 ms @ 48 kHz.
+  // TODO(hlundin): Provide a better value for kSyncBufferSize.
+  // Current value is kMaxFrameSize + 60 ms * 48 kHz, which is enough for
+  // calculating correlations of current frame against history.
+  static const size_t kSyncBufferSize = kMaxFrameSize + 60 * 48;
+
+  // Inserts a new packet into NetEq. This is used by the InsertPacket method
+  // above. Returns 0 on success, otherwise an error code.
+  // TODO(hlundin): Merge this with InsertPacket above?
+  int InsertPacketInternal(const RTPHeader& rtp_header,
+                           rtc::ArrayView<const uint8_t> payload,
+                           uint32_t receive_timestamp)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Delivers 10 ms of audio data. The data is written to |audio_frame|.
+  // Returns 0 on success, otherwise an error code.
+  int GetAudioInternal(AudioFrame* audio_frame, bool* muted)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Provides a decision to the GetAudioInternal method. The decision what to
+  // do is written to |operation|. Packets to decode are written to
+  // |packet_list|, and a DTMF event to play is written to |dtmf_event|. When
+  // DTMF should be played, |play_dtmf| is set to true by the method.
+  // Returns 0 on success, otherwise an error code.
+  int GetDecision(Operations* operation,
+                  PacketList* packet_list,
+                  DtmfEvent* dtmf_event,
+                  bool* play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Decodes the speech packets in |packet_list|, and writes the results to
+  // |decoded_buffer|, which is allocated to hold |decoded_buffer_length|
+  // elements. The length of the decoded data is written to |decoded_length|.
+  // The speech type -- speech or (codec-internal) comfort noise -- is written
+  // to |speech_type|. If |packet_list| contains any SID frames for RFC 3389
+  // comfort noise, those are not decoded.
+  int Decode(PacketList* packet_list,
+             Operations* operation,
+             int* decoded_length,
+             AudioDecoder::SpeechType* speech_type)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method to Decode(). Performs codec internal CNG.
+  int DecodeCng(AudioDecoder* decoder,
+                int* decoded_length,
+                AudioDecoder::SpeechType* speech_type)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method to Decode(). Performs the actual decoding.
+  int DecodeLoop(PacketList* packet_list,
+                 const Operations& operation,
+                 AudioDecoder* decoder,
+                 int* decoded_length,
+                 AudioDecoder::SpeechType* speech_type)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method which calls the Normal class to perform the normal operation.
+  void DoNormal(const int16_t* decoded_buffer,
+                size_t decoded_length,
+                AudioDecoder::SpeechType speech_type,
+                bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method which calls the Merge class to perform the merge operation.
+  void DoMerge(int16_t* decoded_buffer,
+               size_t decoded_length,
+               AudioDecoder::SpeechType speech_type,
+               bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method which calls the Expand class to perform the expand operation.
+  int DoExpand(bool play_dtmf) RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method which calls the Accelerate class to perform the accelerate
+  // operation.
+  int DoAccelerate(int16_t* decoded_buffer,
+                   size_t decoded_length,
+                   AudioDecoder::SpeechType speech_type,
+                   bool play_dtmf,
+                   bool fast_accelerate)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method which calls the PreemptiveExpand class to perform the
+  // preemtive expand operation.
+  int DoPreemptiveExpand(int16_t* decoded_buffer,
+                         size_t decoded_length,
+                         AudioDecoder::SpeechType speech_type,
+                         bool play_dtmf)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Sub-method which calls the ComfortNoise class to generate RFC 3389 comfort
+  // noise. |packet_list| can either contain one SID frame to update the
+  // noise parameters, or no payload at all, in which case the previously
+  // received parameters are used.
+  int DoRfc3389Cng(PacketList* packet_list, bool play_dtmf)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Calls the audio decoder to generate codec-internal comfort noise when
+  // no packet was received.
+  void DoCodecInternalCng(const int16_t* decoded_buffer, size_t decoded_length)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Calls the DtmfToneGenerator class to generate DTMF tones.
+  int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Produces packet-loss concealment using alternative methods. If the codec
+  // has an internal PLC, it is called to generate samples. Otherwise, the
+  // method performs zero-stuffing.
+  void DoAlternativePlc(bool increase_timestamp)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Overdub DTMF on top of |output|.
+  int DtmfOverdub(const DtmfEvent& dtmf_event,
+                  size_t num_channels,
+                  int16_t* output) const
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Extracts packets from |packet_buffer_| to produce at least
+  // |required_samples| samples. The packets are inserted into |packet_list|.
+  // Returns the number of samples that the packets in the list will produce, or
+  // -1 in case of an error.
+  int ExtractPackets(size_t required_samples, PacketList* packet_list)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Resets various variables and objects to new values based on the sample rate
+  // |fs_hz| and |channels| number audio channels.
+  void SetSampleRateAndChannels(int fs_hz, size_t channels)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Returns the output type for the audio produced by the latest call to
+  // GetAudio().
+  OutputType LastOutputType() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Updates Expand and Merge.
+  virtual void UpdatePlcComponents(int fs_hz, size_t channels)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  // Creates DecisionLogic object with the mode given by |playout_mode_|.
+  virtual void CreateDecisionLogic() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  rtc::CriticalSection crit_sect_;
+  const std::unique_ptr<TickTimer> tick_timer_ RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<BufferLevelFilter> buffer_level_filter_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<DecoderDatabase> decoder_database_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<DelayManager> delay_manager_ RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<DelayPeakDetector> delay_peak_detector_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<DtmfBuffer> dtmf_buffer_ RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<DtmfToneGenerator> dtmf_tone_generator_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<PacketBuffer> packet_buffer_ RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<RedPayloadSplitter> red_payload_splitter_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<TimestampScaler> timestamp_scaler_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<PostDecodeVad> vad_ RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<ExpandFactory> expand_factory_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<AccelerateFactory> accelerate_factory_
+      RTC_GUARDED_BY(crit_sect_);
+  const std::unique_ptr<PreemptiveExpandFactory> preemptive_expand_factory_
+      RTC_GUARDED_BY(crit_sect_);
+
+  std::unique_ptr<BackgroundNoise> background_noise_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<DecisionLogic> decision_logic_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<AudioMultiVector> algorithm_buffer_
+      RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<SyncBuffer> sync_buffer_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<Expand> expand_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<Normal> normal_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<Merge> merge_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<Accelerate> accelerate_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<PreemptiveExpand> preemptive_expand_
+      RTC_GUARDED_BY(crit_sect_);
+  RandomVector random_vector_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<ComfortNoise> comfort_noise_ RTC_GUARDED_BY(crit_sect_);
+  Rtcp rtcp_ RTC_GUARDED_BY(crit_sect_);
+  StatisticsCalculator stats_ RTC_GUARDED_BY(crit_sect_);
+  int fs_hz_ RTC_GUARDED_BY(crit_sect_);
+  int fs_mult_ RTC_GUARDED_BY(crit_sect_);
+  int last_output_sample_rate_hz_ RTC_GUARDED_BY(crit_sect_);
+  size_t output_size_samples_ RTC_GUARDED_BY(crit_sect_);
+  size_t decoder_frame_length_ RTC_GUARDED_BY(crit_sect_);
+  Modes last_mode_ RTC_GUARDED_BY(crit_sect_);
+  Operations last_operation_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<int16_t[]> mute_factor_array_ RTC_GUARDED_BY(crit_sect_);
+  size_t decoded_buffer_length_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<int16_t[]> decoded_buffer_ RTC_GUARDED_BY(crit_sect_);
+  uint32_t playout_timestamp_ RTC_GUARDED_BY(crit_sect_);
+  bool new_codec_ RTC_GUARDED_BY(crit_sect_);
+  uint32_t timestamp_ RTC_GUARDED_BY(crit_sect_);
+  bool reset_decoder_ RTC_GUARDED_BY(crit_sect_);
+  rtc::Optional<uint8_t> current_rtp_payload_type_ RTC_GUARDED_BY(crit_sect_);
+  rtc::Optional<uint8_t> current_cng_rtp_payload_type_
+      RTC_GUARDED_BY(crit_sect_);
+  uint32_t ssrc_ RTC_GUARDED_BY(crit_sect_);
+  bool first_packet_ RTC_GUARDED_BY(crit_sect_);
+  const BackgroundNoiseMode background_noise_mode_ RTC_GUARDED_BY(crit_sect_);
+  NetEqPlayoutMode playout_mode_ RTC_GUARDED_BY(crit_sect_);
+  bool enable_fast_accelerate_ RTC_GUARDED_BY(crit_sect_);
+  std::unique_ptr<NackTracker> nack_ RTC_GUARDED_BY(crit_sect_);
+  bool nack_enabled_ RTC_GUARDED_BY(crit_sect_);
+  const bool enable_muted_state_ RTC_GUARDED_BY(crit_sect_);
+  AudioFrame::VADActivity last_vad_activity_ RTC_GUARDED_BY(crit_sect_) =
+      AudioFrame::kVadPassive;
+  std::unique_ptr<TickTimer::Stopwatch> generated_noise_stopwatch_
+      RTC_GUARDED_BY(crit_sect_);
+  std::vector<uint32_t> last_decoded_timestamps_ RTC_GUARDED_BY(crit_sect_);
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(NetEqImpl);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc
new file mode 100644
index 0000000..12eabfa
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -0,0 +1,1516 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/accelerate.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/mock/mock_buffer_level_filter.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/mock/mock_delay_manager.h"
+#include "modules/audio_coding/neteq/mock/mock_delay_peak_detector.h"
+#include "modules/audio_coding/neteq/mock/mock_dtmf_buffer.h"
+#include "modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h"
+#include "modules/audio_coding/neteq/mock/mock_packet_buffer.h"
+#include "modules/audio_coding/neteq/mock/mock_red_payload_splitter.h"
+#include "modules/audio_coding/neteq/neteq_impl.h"
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder.h"
+#include "test/mock_audio_decoder_factory.h"
+
+using ::testing::AtLeast;
+using ::testing::Return;
+using ::testing::ReturnNull;
+using ::testing::_;
+using ::testing::SetArgPointee;
+using ::testing::SetArrayArgument;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::WithArg;
+using ::testing::Pointee;
+using ::testing::IsNull;
+
+namespace webrtc {
+
+// This function is called when inserting a packet list into the mock packet
+// buffer. The purpose is to delete all inserted packets properly, to avoid
+// memory leaks in the test.
+int DeletePacketsAndReturnOk(PacketList* packet_list) {
+  packet_list->clear();
+  return PacketBuffer::kOK;
+}
+
+class NetEqImplTest : public ::testing::Test {
+ protected:
+  NetEqImplTest() { config_.sample_rate_hz = 8000; }
+
+  void CreateInstance() {
+    NetEqImpl::Dependencies deps(config_, CreateBuiltinAudioDecoderFactory());
+
+    // Get a local pointer to NetEq's TickTimer object.
+    tick_timer_ = deps.tick_timer.get();
+
+    if (use_mock_buffer_level_filter_) {
+      std::unique_ptr<MockBufferLevelFilter> mock(new MockBufferLevelFilter);
+      mock_buffer_level_filter_ = mock.get();
+      deps.buffer_level_filter = std::move(mock);
+    }
+    buffer_level_filter_ = deps.buffer_level_filter.get();
+
+    if (use_mock_decoder_database_) {
+      std::unique_ptr<MockDecoderDatabase> mock(new MockDecoderDatabase);
+      mock_decoder_database_ = mock.get();
+      EXPECT_CALL(*mock_decoder_database_, GetActiveCngDecoder())
+          .WillOnce(ReturnNull());
+      deps.decoder_database = std::move(mock);
+    }
+    decoder_database_ = deps.decoder_database.get();
+
+    if (use_mock_delay_peak_detector_) {
+      std::unique_ptr<MockDelayPeakDetector> mock(
+          new MockDelayPeakDetector(tick_timer_));
+      mock_delay_peak_detector_ = mock.get();
+      EXPECT_CALL(*mock_delay_peak_detector_, Reset()).Times(1);
+      deps.delay_peak_detector = std::move(mock);
+    }
+    delay_peak_detector_ = deps.delay_peak_detector.get();
+
+    if (use_mock_delay_manager_) {
+      std::unique_ptr<MockDelayManager> mock(new MockDelayManager(
+          config_.max_packets_in_buffer, delay_peak_detector_, tick_timer_));
+      mock_delay_manager_ = mock.get();
+      EXPECT_CALL(*mock_delay_manager_, set_streaming_mode(false)).Times(1);
+      deps.delay_manager = std::move(mock);
+    }
+    delay_manager_ = deps.delay_manager.get();
+
+    if (use_mock_dtmf_buffer_) {
+      std::unique_ptr<MockDtmfBuffer> mock(
+          new MockDtmfBuffer(config_.sample_rate_hz));
+      mock_dtmf_buffer_ = mock.get();
+      deps.dtmf_buffer = std::move(mock);
+    }
+    dtmf_buffer_ = deps.dtmf_buffer.get();
+
+    if (use_mock_dtmf_tone_generator_) {
+      std::unique_ptr<MockDtmfToneGenerator> mock(new MockDtmfToneGenerator);
+      mock_dtmf_tone_generator_ = mock.get();
+      deps.dtmf_tone_generator = std::move(mock);
+    }
+    dtmf_tone_generator_ = deps.dtmf_tone_generator.get();
+
+    if (use_mock_packet_buffer_) {
+      std::unique_ptr<MockPacketBuffer> mock(
+          new MockPacketBuffer(config_.max_packets_in_buffer, tick_timer_));
+      mock_packet_buffer_ = mock.get();
+      deps.packet_buffer = std::move(mock);
+    }
+    packet_buffer_ = deps.packet_buffer.get();
+
+    if (use_mock_payload_splitter_) {
+      std::unique_ptr<MockRedPayloadSplitter> mock(new MockRedPayloadSplitter);
+      mock_payload_splitter_ = mock.get();
+      deps.red_payload_splitter = std::move(mock);
+    }
+    red_payload_splitter_ = deps.red_payload_splitter.get();
+
+    deps.timestamp_scaler = std::unique_ptr<TimestampScaler>(
+        new TimestampScaler(*deps.decoder_database.get()));
+
+    neteq_.reset(new NetEqImpl(config_, std::move(deps)));
+    ASSERT_TRUE(neteq_ != NULL);
+  }
+
+  void UseNoMocks() {
+    ASSERT_TRUE(neteq_ == NULL) << "Must call UseNoMocks before CreateInstance";
+    use_mock_buffer_level_filter_ = false;
+    use_mock_decoder_database_ = false;
+    use_mock_delay_peak_detector_ = false;
+    use_mock_delay_manager_ = false;
+    use_mock_dtmf_buffer_ = false;
+    use_mock_dtmf_tone_generator_ = false;
+    use_mock_packet_buffer_ = false;
+    use_mock_payload_splitter_ = false;
+  }
+
+  virtual ~NetEqImplTest() {
+    if (use_mock_buffer_level_filter_) {
+      EXPECT_CALL(*mock_buffer_level_filter_, Die()).Times(1);
+    }
+    if (use_mock_decoder_database_) {
+      EXPECT_CALL(*mock_decoder_database_, Die()).Times(1);
+    }
+    if (use_mock_delay_manager_) {
+      EXPECT_CALL(*mock_delay_manager_, Die()).Times(1);
+    }
+    if (use_mock_delay_peak_detector_) {
+      EXPECT_CALL(*mock_delay_peak_detector_, Die()).Times(1);
+    }
+    if (use_mock_dtmf_buffer_) {
+      EXPECT_CALL(*mock_dtmf_buffer_, Die()).Times(1);
+    }
+    if (use_mock_dtmf_tone_generator_) {
+      EXPECT_CALL(*mock_dtmf_tone_generator_, Die()).Times(1);
+    }
+    if (use_mock_packet_buffer_) {
+      EXPECT_CALL(*mock_packet_buffer_, Die()).Times(1);
+    }
+  }
+
+  void TestDtmfPacket(NetEqDecoder decoder_type) {
+    const size_t kPayloadLength = 4;
+    const uint8_t kPayloadType = 110;
+    const uint32_t kReceiveTime = 17;
+    const int kSampleRateHz = 16000;
+    config_.sample_rate_hz = kSampleRateHz;
+    UseNoMocks();
+    CreateInstance();
+    // Event: 2, E bit, Volume: 17, Length: 4336.
+    uint8_t payload[kPayloadLength] = { 0x02, 0x80 + 0x11, 0x10, 0xF0 };
+    RTPHeader rtp_header;
+    rtp_header.payloadType = kPayloadType;
+    rtp_header.sequenceNumber = 0x1234;
+    rtp_header.timestamp = 0x12345678;
+    rtp_header.ssrc = 0x87654321;
+
+    EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
+        decoder_type, "telephone-event", kPayloadType));
+
+    // Insert first packet.
+    EXPECT_EQ(NetEq::kOK,
+              neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+    // Pull audio once.
+    const size_t kMaxOutputSize =
+        static_cast<size_t>(10 * kSampleRateHz / 1000);
+    AudioFrame output;
+    bool muted;
+    EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+    ASSERT_FALSE(muted);
+    ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+    EXPECT_EQ(1u, output.num_channels_);
+    EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+    // Verify first 64 samples of actual output.
+    const std::vector<int16_t> kOutput({
+        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1578, -2816, -3460, -3403, -2709, -1594,
+        -363, 671, 1269, 1328, 908, 202, -513, -964, -955, -431, 504, 1617,
+        2602, 3164, 3101, 2364, 1073, -511, -2047, -3198, -3721, -3525, -2688,
+        -1440, -99, 1015, 1663, 1744, 1319, 588, -171, -680, -747, -315, 515,
+        1512, 2378, 2828, 2674, 1877, 568, -986, -2446, -3482, -3864, -3516,
+        -2534, -1163 });
+    ASSERT_GE(kMaxOutputSize, kOutput.size());
+    EXPECT_TRUE(std::equal(kOutput.begin(), kOutput.end(), output.data()));
+  }
+
+  std::unique_ptr<NetEqImpl> neteq_;
+  NetEq::Config config_;
+  TickTimer* tick_timer_ = nullptr;
+  MockBufferLevelFilter* mock_buffer_level_filter_ = nullptr;
+  BufferLevelFilter* buffer_level_filter_ = nullptr;
+  bool use_mock_buffer_level_filter_ = true;
+  MockDecoderDatabase* mock_decoder_database_ = nullptr;
+  DecoderDatabase* decoder_database_ = nullptr;
+  bool use_mock_decoder_database_ = true;
+  MockDelayPeakDetector* mock_delay_peak_detector_ = nullptr;
+  DelayPeakDetector* delay_peak_detector_ = nullptr;
+  bool use_mock_delay_peak_detector_ = true;
+  MockDelayManager* mock_delay_manager_ = nullptr;
+  DelayManager* delay_manager_ = nullptr;
+  bool use_mock_delay_manager_ = true;
+  MockDtmfBuffer* mock_dtmf_buffer_ = nullptr;
+  DtmfBuffer* dtmf_buffer_ = nullptr;
+  bool use_mock_dtmf_buffer_ = true;
+  MockDtmfToneGenerator* mock_dtmf_tone_generator_ = nullptr;
+  DtmfToneGenerator* dtmf_tone_generator_ = nullptr;
+  bool use_mock_dtmf_tone_generator_ = true;
+  MockPacketBuffer* mock_packet_buffer_ = nullptr;
+  PacketBuffer* packet_buffer_ = nullptr;
+  bool use_mock_packet_buffer_ = true;
+  MockRedPayloadSplitter* mock_payload_splitter_ = nullptr;
+  RedPayloadSplitter* red_payload_splitter_ = nullptr;
+  bool use_mock_payload_splitter_ = true;
+};
+
+
+// This tests the interface class NetEq.
+// TODO(hlundin): Move to separate file?
+TEST(NetEq, CreateAndDestroy) {
+  NetEq::Config config;
+  NetEq* neteq = NetEq::Create(config, CreateBuiltinAudioDecoderFactory());
+  delete neteq;
+}
+
+TEST_F(NetEqImplTest, RegisterPayloadTypeNetEqDecoder) {
+  CreateInstance();
+  uint8_t rtp_payload_type = 0;
+  NetEqDecoder codec_type = NetEqDecoder::kDecoderPCMu;
+  const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+  EXPECT_CALL(*mock_decoder_database_,
+              RegisterPayload(rtp_payload_type, codec_type, kCodecName));
+  neteq_->RegisterPayloadType(codec_type, kCodecName, rtp_payload_type);
+}
+
+TEST_F(NetEqImplTest, RegisterPayloadType) {
+  CreateInstance();
+  constexpr int rtp_payload_type = 0;
+  const SdpAudioFormat format("pcmu", 8000, 1);
+  EXPECT_CALL(*mock_decoder_database_,
+              RegisterPayload(rtp_payload_type, format));
+  neteq_->RegisterPayloadType(rtp_payload_type, format);
+}
+
+TEST_F(NetEqImplTest, RemovePayloadType) {
+  CreateInstance();
+  uint8_t rtp_payload_type = 0;
+  EXPECT_CALL(*mock_decoder_database_, Remove(rtp_payload_type))
+      .WillOnce(Return(DecoderDatabase::kDecoderNotFound));
+  // Check that kOK is returned when database returns kDecoderNotFound, because
+  // removing a payload type that was never registered is not an error.
+  EXPECT_EQ(NetEq::kOK, neteq_->RemovePayloadType(rtp_payload_type));
+}
+
+TEST_F(NetEqImplTest, RemoveAllPayloadTypes) {
+  CreateInstance();
+  EXPECT_CALL(*mock_decoder_database_, RemoveAll()).WillOnce(Return());
+  neteq_->RemoveAllPayloadTypes();
+}
+
+TEST_F(NetEqImplTest, InsertPacket) {
+  CreateInstance();
+  const size_t kPayloadLength = 100;
+  const uint8_t kPayloadType = 0;
+  const uint16_t kFirstSequenceNumber = 0x1234;
+  const uint32_t kFirstTimestamp = 0x12345678;
+  const uint32_t kSsrc = 0x87654321;
+  const uint32_t kFirstReceiveTime = 17;
+  uint8_t payload[kPayloadLength] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = kFirstSequenceNumber;
+  rtp_header.timestamp = kFirstTimestamp;
+  rtp_header.ssrc = kSsrc;
+  Packet fake_packet;
+  fake_packet.payload_type = kPayloadType;
+  fake_packet.sequence_number = kFirstSequenceNumber;
+  fake_packet.timestamp = kFirstTimestamp;
+
+  rtc::scoped_refptr<MockAudioDecoderFactory> mock_decoder_factory(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  EXPECT_CALL(*mock_decoder_factory, MakeAudioDecoderMock(_, _, _))
+      .WillOnce(Invoke([&](const SdpAudioFormat& format,
+                           rtc::Optional<AudioCodecPairId> codec_pair_id,
+                           std::unique_ptr<AudioDecoder>* dec) {
+        EXPECT_EQ("pcmu", format.name);
+
+        std::unique_ptr<MockAudioDecoder> mock_decoder(new MockAudioDecoder);
+        EXPECT_CALL(*mock_decoder, Channels()).WillRepeatedly(Return(1));
+        EXPECT_CALL(*mock_decoder, SampleRateHz()).WillRepeatedly(Return(8000));
+        // BWE update function called with first packet.
+        EXPECT_CALL(*mock_decoder,
+                    IncomingPacket(_, kPayloadLength, kFirstSequenceNumber,
+                                   kFirstTimestamp, kFirstReceiveTime));
+        // BWE update function called with second packet.
+        EXPECT_CALL(
+            *mock_decoder,
+            IncomingPacket(_, kPayloadLength, kFirstSequenceNumber + 1,
+                           kFirstTimestamp + 160, kFirstReceiveTime + 155));
+        EXPECT_CALL(*mock_decoder, Die()).Times(1);  // Called when deleted.
+
+        *dec = std::move(mock_decoder);
+      }));
+  DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu,
+                                    mock_decoder_factory);
+
+  // Expectations for decoder database.
+  EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  // Expectations for packet buffer.
+  EXPECT_CALL(*mock_packet_buffer_, Empty())
+      .WillOnce(Return(false));  // Called once after first packet is inserted.
+  EXPECT_CALL(*mock_packet_buffer_, Flush())
+      .Times(1);
+  EXPECT_CALL(*mock_packet_buffer_, InsertPacketList(_, _, _, _, _))
+      .Times(2)
+      .WillRepeatedly(DoAll(SetArgPointee<2>(kPayloadType),
+                            WithArg<0>(Invoke(DeletePacketsAndReturnOk))));
+  // SetArgPointee<2>(kPayloadType) means that the third argument (zero-based
+  // index) is a pointer, and the variable pointed to is set to kPayloadType.
+  // Also invoke the function DeletePacketsAndReturnOk to properly delete all
+  // packets in the list (to avoid memory leaks in the test).
+  EXPECT_CALL(*mock_packet_buffer_, PeekNextPacket())
+      .Times(1)
+      .WillOnce(Return(&fake_packet));
+
+  // Expectations for DTMF buffer.
+  EXPECT_CALL(*mock_dtmf_buffer_, Flush())
+      .Times(1);
+
+  // Expectations for delay manager.
+  {
+    // All expectations within this block must be called in this specific order.
+    InSequence sequence;  // Dummy variable.
+    // Expectations when the first packet is inserted.
+    EXPECT_CALL(*mock_delay_manager_, last_pack_cng_or_dtmf())
+        .Times(2)
+        .WillRepeatedly(Return(-1));
+    EXPECT_CALL(*mock_delay_manager_, set_last_pack_cng_or_dtmf(0))
+        .Times(1);
+    EXPECT_CALL(*mock_delay_manager_, ResetPacketIatCount()).Times(1);
+    // Expectations when the second packet is inserted. Slightly different.
+    EXPECT_CALL(*mock_delay_manager_, last_pack_cng_or_dtmf())
+        .WillOnce(Return(0));
+    EXPECT_CALL(*mock_delay_manager_, SetPacketAudioLength(30))
+        .WillOnce(Return(0));
+  }
+
+  // Insert first packet.
+  neteq_->InsertPacket(rtp_header, payload, kFirstReceiveTime);
+
+  // Insert second packet.
+  rtp_header.timestamp += 160;
+  rtp_header.sequenceNumber += 1;
+  neteq_->InsertPacket(rtp_header, payload, kFirstReceiveTime + 155);
+}
+
+TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
+  UseNoMocks();
+  CreateInstance();
+
+  const int kPayloadLengthSamples = 80;
+  const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples;  // PCM 16-bit.
+  const uint8_t kPayloadType = 17;  // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
+                            NetEqDecoder::kDecoderPCM16B, "", kPayloadType));
+
+  // Insert packets. The buffer should not flush.
+  for (size_t i = 1; i <= config_.max_packets_in_buffer; ++i) {
+    EXPECT_EQ(NetEq::kOK,
+              neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+    rtp_header.timestamp += kPayloadLengthSamples;
+    rtp_header.sequenceNumber += 1;
+    EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
+  }
+
+  // Insert one more packet and make sure the buffer got flushed. That is, it
+  // should only hold one single packet.
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+  EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
+  const Packet* test_packet = packet_buffer_->PeekNextPacket();
+  EXPECT_EQ(rtp_header.timestamp, test_packet->timestamp);
+  EXPECT_EQ(rtp_header.sequenceNumber, test_packet->sequence_number);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT) {
+  TestDtmfPacket(NetEqDecoder::kDecoderAVT);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT16kHz) {
+  TestDtmfPacket(NetEqDecoder::kDecoderAVT16kHz);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT32kHz) {
+  TestDtmfPacket(NetEqDecoder::kDecoderAVT32kHz);
+}
+
+TEST_F(NetEqImplTest, TestDtmfPacketAVT48kHz) {
+  TestDtmfPacket(NetEqDecoder::kDecoderAVT48kHz);
+}
+
+// This test verifies that timestamps propagate from the incoming packets
+// through to the sync buffer and to the playout timestamp.
+TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
+  const size_t kPayloadLengthBytes = kPayloadLengthSamples;
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // This is a dummy decoder that produces as many output samples as the input
+  // has bytes. The output is an increasing series, starting at 1 for the first
+  // sample, and then increasing by 1 for each sample.
+  class CountingSamplesDecoder : public AudioDecoder {
+   public:
+    CountingSamplesDecoder() : next_value_(1) {}
+
+    // Produce as many samples as input bytes (|encoded_len|).
+    int DecodeInternal(const uint8_t* encoded,
+                       size_t encoded_len,
+                       int /* sample_rate_hz */,
+                       int16_t* decoded,
+                       SpeechType* speech_type) override {
+      for (size_t i = 0; i < encoded_len; ++i) {
+        decoded[i] = next_value_++;
+      }
+      *speech_type = kSpeech;
+      return rtc::checked_cast<int>(encoded_len);
+    }
+
+    void Reset() override { next_value_ = 1; }
+
+    int SampleRateHz() const override { return kSampleRateHz; }
+
+    size_t Channels() const override { return 1; }
+
+    uint16_t next_value() const { return next_value_; }
+
+   private:
+    int16_t next_value_;
+  } decoder_;
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &decoder_, NetEqDecoder::kDecoderPCM16B,
+                            "dummy name", kPayloadType));
+
+  // Insert one packet.
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  // Pull audio once.
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  ASSERT_FALSE(muted);
+  ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+  // Start with a simple check that the fake decoder is behaving as expected.
+  EXPECT_EQ(kPayloadLengthSamples,
+            static_cast<size_t>(decoder_.next_value() - 1));
+
+  // The value of the last of the output samples is the same as the number of
+  // samples played from the decoded packet. Thus, this number + the RTP
+  // timestamp should match the playout timestamp.
+  // Wrap the expected value in an rtc::Optional to compare them as such.
+  EXPECT_EQ(
+      rtc::Optional<uint32_t>(rtp_header.timestamp +
+                              output.data()[output.samples_per_channel_ - 1]),
+      neteq_->GetPlayoutTimestamp());
+
+  // Check the timestamp for the last value in the sync buffer. This should
+  // be one full frame length ahead of the RTP timestamp.
+  const SyncBuffer* sync_buffer = neteq_->sync_buffer_for_test();
+  ASSERT_TRUE(sync_buffer != NULL);
+  EXPECT_EQ(rtp_header.timestamp + kPayloadLengthSamples,
+            sync_buffer->end_timestamp());
+
+  // Check that the number of samples still to play from the sync buffer add
+  // up with what was already played out.
+  EXPECT_EQ(
+      kPayloadLengthSamples - output.data()[output.samples_per_channel_ - 1],
+      sync_buffer->FutureLength());
+}
+
+TEST_F(NetEqImplTest, ReorderedPacket) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
+  const size_t kPayloadLengthBytes = kPayloadLengthSamples;
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // Create a mock decoder object.
+  MockAudioDecoder mock_decoder;
+  EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+  EXPECT_CALL(mock_decoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+      .WillRepeatedly(Return(0));
+  EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+  int16_t dummy_output[kPayloadLengthSamples] = {0};
+  // The below expectation will make the mock decoder write
+  // |kPayloadLengthSamples| zeros to the output array, and mark it as speech.
+  EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
+                                           kSampleRateHz, _, _))
+      .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+                                          dummy_output + kPayloadLengthSamples),
+                      SetArgPointee<4>(AudioDecoder::kSpeech),
+                      Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &mock_decoder, NetEqDecoder::kDecoderPCM16B,
+                            "dummy name", kPayloadType));
+
+  // Insert one packet.
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  // Pull audio once.
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+  // Insert two more packets. The first one is out of order, and is already too
+  // old, the second one is the expected next packet.
+  rtp_header.sequenceNumber -= 1;
+  rtp_header.timestamp -= kPayloadLengthSamples;
+  payload[0] = 1;
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+  rtp_header.sequenceNumber += 2;
+  rtp_header.timestamp += 2 * kPayloadLengthSamples;
+  payload[0] = 2;
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  // Expect only the second packet to be decoded (the one with "2" as the first
+  // payload byte).
+  EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(2), kPayloadLengthBytes,
+                                           kSampleRateHz, _, _))
+      .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+                                          dummy_output + kPayloadLengthSamples),
+                      SetArgPointee<4>(AudioDecoder::kSpeech),
+                      Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+  // Pull audio once.
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+  // Now check the packet buffer, and make sure it is empty, since the
+  // out-of-order packet should have been discarded.
+  EXPECT_TRUE(packet_buffer_->Empty());
+
+  EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test verifies that NetEq can handle the situation where the first
+// incoming packet is rejected.
+TEST_F(NetEqImplTest, FirstPacketUnknown) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
+  const size_t kPayloadLengthBytes = kPayloadLengthSamples * 2;
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // Insert one packet. Note that we have not registered any payload type, so
+  // this packet will be rejected.
+  EXPECT_EQ(NetEq::kFail,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  // Pull audio once.
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  ASSERT_LE(output.samples_per_channel_, kMaxOutputSize);
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
+
+  // Register the payload type.
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
+                            NetEqDecoder::kDecoderPCM16B, "", kPayloadType));
+
+  // Insert 10 packets.
+  for (size_t i = 0; i < 10; ++i) {
+    rtp_header.sequenceNumber++;
+    rtp_header.timestamp += kPayloadLengthSamples;
+    EXPECT_EQ(NetEq::kOK,
+              neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+    EXPECT_EQ(i + 1, packet_buffer_->NumPacketsInBuffer());
+  }
+
+  // Pull audio repeatedly and make sure we get normal output, that is not PLC.
+  for (size_t i = 0; i < 3; ++i) {
+    EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+    ASSERT_LE(output.samples_per_channel_, kMaxOutputSize);
+    EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+    EXPECT_EQ(1u, output.num_channels_);
+    EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_)
+        << "NetEq did not decode the packets as expected.";
+  }
+}
+
+// This test verifies that NetEq can handle comfort noise and enters/quits codec
+// internal CNG mode properly.
+TEST_F(NetEqImplTest, CodecInternalCng) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateKhz = 48;
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(20 * kSampleRateKhz);  // 20 ms.
+  const size_t kPayloadLengthBytes = 10;
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  int16_t dummy_output[kPayloadLengthSamples] = {0};
+
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // Create a mock decoder object.
+  MockAudioDecoder mock_decoder;
+  EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+  EXPECT_CALL(mock_decoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateKhz * 1000));
+  EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+      .WillRepeatedly(Return(0));
+  EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+  // Packed duration when asking the decoder for more CNG data (without a new
+  // packet).
+  EXPECT_CALL(mock_decoder, PacketDuration(nullptr, 0))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+
+  // Pointee(x) verifies that first byte of the payload equals x, this makes it
+  // possible to verify that the correct payload is fed to Decode().
+  EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
+                                           kSampleRateKhz * 1000, _, _))
+      .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+                                          dummy_output + kPayloadLengthSamples),
+                      SetArgPointee<4>(AudioDecoder::kSpeech),
+                      Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+  EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(1), kPayloadLengthBytes,
+                                           kSampleRateKhz * 1000, _, _))
+      .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+                                          dummy_output + kPayloadLengthSamples),
+                      SetArgPointee<4>(AudioDecoder::kComfortNoise),
+                      Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+  EXPECT_CALL(mock_decoder,
+              DecodeInternal(IsNull(), 0, kSampleRateKhz * 1000, _, _))
+      .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+                                          dummy_output + kPayloadLengthSamples),
+                      SetArgPointee<4>(AudioDecoder::kComfortNoise),
+                      Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+  EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(2), kPayloadLengthBytes,
+                                           kSampleRateKhz * 1000, _, _))
+      .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
+                                          dummy_output + kPayloadLengthSamples),
+                      SetArgPointee<4>(AudioDecoder::kSpeech),
+                      Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &mock_decoder, NetEqDecoder::kDecoderOpus,
+                            "dummy name", kPayloadType));
+
+  // Insert one packet (decoder will return speech).
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  // Insert second packet (decoder will return CNG).
+  payload[0] = 1;
+  rtp_header.sequenceNumber++;
+  rtp_header.timestamp += kPayloadLengthSamples;
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateKhz);
+  AudioFrame output;
+  AudioFrame::SpeechType expected_type[8] = {
+      AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech,
+      AudioFrame::kCNG, AudioFrame::kCNG,
+      AudioFrame::kCNG, AudioFrame::kCNG,
+      AudioFrame::kNormalSpeech, AudioFrame::kNormalSpeech
+  };
+  int expected_timestamp_increment[8] = {
+      -1,  // will not be used.
+      10 * kSampleRateKhz,
+      -1, -1,  // timestamp will be empty during CNG mode; indicated by -1 here.
+      -1, -1,
+      50 * kSampleRateKhz, 10 * kSampleRateKhz
+  };
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  rtc::Optional<uint32_t> last_timestamp = neteq_->GetPlayoutTimestamp();
+  ASSERT_TRUE(last_timestamp);
+
+  // Lambda for verifying the timestamps.
+  auto verify_timestamp = [&last_timestamp, &expected_timestamp_increment](
+      rtc::Optional<uint32_t> ts, size_t i) {
+    if (expected_timestamp_increment[i] == -1) {
+      // Expect to get an empty timestamp value during CNG and PLC.
+      EXPECT_FALSE(ts) << "i = " << i;
+    } else {
+      ASSERT_TRUE(ts) << "i = " << i;
+      EXPECT_EQ(*ts, *last_timestamp + expected_timestamp_increment[i])
+          << "i = " << i;
+      last_timestamp = ts;
+    }
+  };
+
+  for (size_t i = 1; i < 6; ++i) {
+    ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+    EXPECT_EQ(1u, output.num_channels_);
+    EXPECT_EQ(expected_type[i - 1], output.speech_type_);
+    EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+    SCOPED_TRACE("");
+    verify_timestamp(neteq_->GetPlayoutTimestamp(), i);
+  }
+
+  // Insert third packet, which leaves a gap from last packet.
+  payload[0] = 2;
+  rtp_header.sequenceNumber += 2;
+  rtp_header.timestamp += 2 * kPayloadLengthSamples;
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  for (size_t i = 6; i < 8; ++i) {
+    ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+    EXPECT_EQ(1u, output.num_channels_);
+    EXPECT_EQ(expected_type[i - 1], output.speech_type_);
+    EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+    SCOPED_TRACE("");
+    verify_timestamp(neteq_->GetPlayoutTimestamp(), i);
+  }
+
+  // Now check the packet buffer, and make sure it is empty.
+  EXPECT_TRUE(packet_buffer_->Empty());
+
+  EXPECT_CALL(mock_decoder, Die());
+}
+
+TEST_F(NetEqImplTest, UnsupportedDecoder) {
+  UseNoMocks();
+  CreateInstance();
+  static const size_t kNetEqMaxFrameSize = 5760;  // 120 ms @ 48 kHz.
+  static const size_t kChannels = 2;
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
+  const size_t kPayloadLengthBytes = 1;
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  int16_t dummy_output[kPayloadLengthSamples * kChannels] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  ::testing::NiceMock<MockAudioDecoder> decoder;
+
+  const uint8_t kFirstPayloadValue = 1;
+  const uint8_t kSecondPayloadValue = 2;
+
+  EXPECT_CALL(decoder,
+              PacketDuration(Pointee(kFirstPayloadValue), kPayloadLengthBytes))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kNetEqMaxFrameSize + 1)));
+
+  EXPECT_CALL(decoder, DecodeInternal(Pointee(kFirstPayloadValue), _, _, _, _))
+      .Times(0);
+
+  EXPECT_CALL(decoder, DecodeInternal(Pointee(kSecondPayloadValue),
+                                      kPayloadLengthBytes, kSampleRateHz, _, _))
+      .Times(1)
+      .WillOnce(DoAll(
+          SetArrayArgument<3>(dummy_output,
+                              dummy_output + kPayloadLengthSamples * kChannels),
+          SetArgPointee<4>(AudioDecoder::kSpeech),
+          Return(static_cast<int>(kPayloadLengthSamples * kChannels))));
+
+  EXPECT_CALL(decoder,
+              PacketDuration(Pointee(kSecondPayloadValue), kPayloadLengthBytes))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kNetEqMaxFrameSize)));
+
+  EXPECT_CALL(decoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+
+  EXPECT_CALL(decoder, Channels())
+      .WillRepeatedly(Return(kChannels));
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &decoder, NetEqDecoder::kDecoderPCM16B,
+                            "dummy name", kPayloadType));
+
+  // Insert one packet.
+  payload[0] = kFirstPayloadValue;  // This will make Decode() fail.
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  // Insert another packet.
+  payload[0] = kSecondPayloadValue;  // This will make Decode() successful.
+  rtp_header.sequenceNumber++;
+  // The second timestamp needs to be at least 30 ms after the first to make
+  // the second packet get decoded.
+  rtp_header.timestamp += 3 * kPayloadLengthSamples;
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  AudioFrame output;
+  bool muted;
+  // First call to GetAudio will try to decode the "faulty" packet.
+  // Expect kFail return value.
+  EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
+  // Output size and number of channels should be correct.
+  const size_t kExpectedOutputSize = 10 * (kSampleRateHz / 1000) * kChannels;
+  EXPECT_EQ(kExpectedOutputSize, output.samples_per_channel_ * kChannels);
+  EXPECT_EQ(kChannels, output.num_channels_);
+
+  // Second call to GetAudio will decode the packet that is ok. No errors are
+  // expected.
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kExpectedOutputSize, output.samples_per_channel_ * kChannels);
+  EXPECT_EQ(kChannels, output.num_channels_);
+
+  // Die isn't called through NiceMock (since it's called by the
+  // MockAudioDecoder constructor), so it needs to be mocked explicitly.
+  EXPECT_CALL(decoder, Die());
+}
+
+// This test inserts packets until the buffer is flushed. After that, it asks
+// NetEq for the network statistics. The purpose of the test is to make sure
+// that even though the buffer size increment is negative (which it becomes when
+// the packet causing a flush is inserted), the packet length stored in the
+// decision logic remains valid.
+TEST_F(NetEqImplTest, FloodBufferAndGetNetworkStats) {
+  UseNoMocks();
+  CreateInstance();
+
+  const size_t kPayloadLengthSamples = 80;
+  const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples;  // PCM 16-bit.
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
+                            NetEqDecoder::kDecoderPCM16B, "", kPayloadType));
+
+  // Insert packets until the buffer flushes.
+  for (size_t i = 0; i <= config_.max_packets_in_buffer; ++i) {
+    EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
+    EXPECT_EQ(NetEq::kOK,
+              neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+    rtp_header.timestamp += rtc::checked_cast<uint32_t>(kPayloadLengthSamples);
+    ++rtp_header.sequenceNumber;
+  }
+  EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
+
+  // Ask for network statistics. This should not crash.
+  NetEqNetworkStatistics stats;
+  EXPECT_EQ(NetEq::kOK, neteq_->NetworkStatistics(&stats));
+}
+
+TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+  const size_t kPayloadLengthSamples =
+      static_cast<size_t>(10 * kSampleRateHz / 1000);  // 10 ms.
+  const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples;
+  uint8_t payload[kPayloadLengthBytes] = {0};
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // Create a mock decoder object.
+  MockAudioDecoder mock_decoder;
+  EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+  EXPECT_CALL(mock_decoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+      .WillRepeatedly(Return(0));
+  EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
+  int16_t dummy_output[kPayloadLengthSamples] = {0};
+  // The below expectation will make the mock decoder write
+  // |kPayloadLengthSamples| - 5 zeros to the output array, and mark it as
+  // speech. That is, the decoded length is 5 samples shorter than the expected.
+  EXPECT_CALL(mock_decoder,
+              DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+      .WillOnce(
+          DoAll(SetArrayArgument<3>(dummy_output,
+                                    dummy_output + kPayloadLengthSamples - 5),
+                SetArgPointee<4>(AudioDecoder::kSpeech),
+                Return(rtc::checked_cast<int>(kPayloadLengthSamples - 5))));
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &mock_decoder, NetEqDecoder::kDecoderPCM16B,
+                            "dummy name", kPayloadType));
+
+  // Insert one packet.
+  EXPECT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+
+  EXPECT_EQ(5u, neteq_->sync_buffer_for_test()->FutureLength());
+
+  // Pull audio once.
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+  EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test checks the behavior of NetEq when audio decoder fails.
+TEST_F(NetEqImplTest, DecodingError) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+  const int kDecoderErrorCode = -97;  // Any negative number.
+
+  // We let decoder return 5 ms each time, and therefore, 2 packets make 10 ms.
+  const size_t kFrameLengthSamples =
+      static_cast<size_t>(5 * kSampleRateHz / 1000);
+
+  const size_t kPayloadLengthBytes = 1;  // This can be arbitrary.
+
+  uint8_t payload[kPayloadLengthBytes] = {0};
+
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // Create a mock decoder object.
+  MockAudioDecoder mock_decoder;
+  EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+  EXPECT_CALL(mock_decoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+      .WillRepeatedly(Return(0));
+  EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kFrameLengthSamples)));
+  EXPECT_CALL(mock_decoder, ErrorCode())
+      .WillOnce(Return(kDecoderErrorCode));
+  EXPECT_CALL(mock_decoder, HasDecodePlc())
+      .WillOnce(Return(false));
+  int16_t dummy_output[kFrameLengthSamples] = {0};
+
+  {
+    InSequence sequence;  // Dummy variable.
+    // Mock decoder works normally the first time.
+    EXPECT_CALL(mock_decoder,
+                DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+        .Times(3)
+        .WillRepeatedly(
+            DoAll(SetArrayArgument<3>(dummy_output,
+                                      dummy_output + kFrameLengthSamples),
+                  SetArgPointee<4>(AudioDecoder::kSpeech),
+                  Return(rtc::checked_cast<int>(kFrameLengthSamples))))
+        .RetiresOnSaturation();
+
+    // Then mock decoder fails. A common reason for failure can be buffer being
+    // too short
+    EXPECT_CALL(mock_decoder,
+                DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+        .WillOnce(Return(-1))
+        .RetiresOnSaturation();
+
+    // Mock decoder finally returns to normal.
+    EXPECT_CALL(mock_decoder,
+                DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+        .Times(2)
+        .WillRepeatedly(
+            DoAll(SetArrayArgument<3>(dummy_output,
+                                      dummy_output + kFrameLengthSamples),
+                  SetArgPointee<4>(AudioDecoder::kSpeech),
+                  Return(rtc::checked_cast<int>(kFrameLengthSamples))));
+  }
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &mock_decoder, NetEqDecoder::kDecoderPCM16B,
+                            "dummy name", kPayloadType));
+
+  // Insert packets.
+  for (int i = 0; i < 6; ++i) {
+    rtp_header.sequenceNumber += 1;
+    rtp_header.timestamp += kFrameLengthSamples;
+    EXPECT_EQ(NetEq::kOK,
+              neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+  }
+
+  // Pull audio.
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+  // Pull audio again. Decoder fails.
+  EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  // We are not expecting anything for output.speech_type_, since an error was
+  // returned.
+
+  // Pull audio again, should continue an expansion.
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
+
+  // Pull audio again, should behave normal.
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+  EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test checks the behavior of NetEq when audio decoder fails during CNG.
+TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
+  UseNoMocks();
+  CreateInstance();
+
+  const uint8_t kPayloadType = 17;   // Just an arbitrary number.
+  const uint32_t kReceiveTime = 17;  // Value doesn't matter for this test.
+  const int kSampleRateHz = 8000;
+  const int kDecoderErrorCode = -97;  // Any negative number.
+
+  // We let decoder return 5 ms each time, and therefore, 2 packets make 10 ms.
+  const size_t kFrameLengthSamples =
+      static_cast<size_t>(5 * kSampleRateHz / 1000);
+
+  const size_t kPayloadLengthBytes = 1;  // This can be arbitrary.
+
+  uint8_t payload[kPayloadLengthBytes] = {0};
+
+  RTPHeader rtp_header;
+  rtp_header.payloadType = kPayloadType;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  // Create a mock decoder object.
+  MockAudioDecoder mock_decoder;
+  EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+  EXPECT_CALL(mock_decoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+      .WillRepeatedly(Return(0));
+  EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+      .WillRepeatedly(Return(rtc::checked_cast<int>(kFrameLengthSamples)));
+  EXPECT_CALL(mock_decoder, ErrorCode())
+      .WillOnce(Return(kDecoderErrorCode));
+  int16_t dummy_output[kFrameLengthSamples] = {0};
+
+  {
+    InSequence sequence;  // Dummy variable.
+    // Mock decoder works normally the first 2 times.
+    EXPECT_CALL(mock_decoder,
+                DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
+        .Times(2)
+        .WillRepeatedly(
+            DoAll(SetArrayArgument<3>(dummy_output,
+                                      dummy_output + kFrameLengthSamples),
+                  SetArgPointee<4>(AudioDecoder::kComfortNoise),
+                  Return(rtc::checked_cast<int>(kFrameLengthSamples))))
+        .RetiresOnSaturation();
+
+    // Then mock decoder fails. A common reason for failure can be buffer being
+    // too short
+    EXPECT_CALL(mock_decoder, DecodeInternal(nullptr, 0, kSampleRateHz, _, _))
+        .WillOnce(Return(-1))
+        .RetiresOnSaturation();
+
+    // Mock decoder finally returns to normal.
+    EXPECT_CALL(mock_decoder, DecodeInternal(nullptr, 0, kSampleRateHz, _, _))
+        .Times(2)
+        .WillRepeatedly(
+            DoAll(SetArrayArgument<3>(dummy_output,
+                                      dummy_output + kFrameLengthSamples),
+                  SetArgPointee<4>(AudioDecoder::kComfortNoise),
+                  Return(rtc::checked_cast<int>(kFrameLengthSamples))));
+  }
+
+  EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                            &mock_decoder, NetEqDecoder::kDecoderPCM16B,
+                            "dummy name", kPayloadType));
+
+  // Insert 2 packets. This will make netEq into codec internal CNG mode.
+  for (int i = 0; i < 2; ++i) {
+    rtp_header.sequenceNumber += 1;
+    rtp_header.timestamp += kFrameLengthSamples;
+    EXPECT_EQ(NetEq::kOK,
+              neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
+  }
+
+  // Pull audio.
+  const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kCNG, output.speech_type_);
+
+  // Pull audio again. Decoder fails.
+  EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  // We are not expecting anything for output.speech_type_, since an error was
+  // returned.
+
+  // Pull audio again, should resume codec CNG.
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
+  EXPECT_EQ(1u, output.num_channels_);
+  EXPECT_EQ(AudioFrame::kCNG, output.speech_type_);
+
+  EXPECT_CALL(mock_decoder, Die());
+}
+
+// Tests that the return value from last_output_sample_rate_hz() is equal to the
+// configured inital sample rate.
+TEST_F(NetEqImplTest, InitialLastOutputSampleRate) {
+  UseNoMocks();
+  config_.sample_rate_hz = 48000;
+  CreateInstance();
+  EXPECT_EQ(48000, neteq_->last_output_sample_rate_hz());
+}
+
+TEST_F(NetEqImplTest, TickTimerIncrement) {
+  UseNoMocks();
+  CreateInstance();
+  ASSERT_TRUE(tick_timer_);
+  EXPECT_EQ(0u, tick_timer_->ticks());
+  AudioFrame output;
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+  EXPECT_EQ(1u, tick_timer_->ticks());
+}
+
+TEST_F(NetEqImplTest, TargetDelayMs) {
+  UseNoMocks();
+  use_mock_delay_manager_ = true;
+  CreateInstance();
+  // Let the dummy target delay be 17 packets.
+  constexpr int kTargetLevelPacketsQ8 = 17 << 8;
+  EXPECT_CALL(*mock_delay_manager_, TargetLevel())
+      .WillOnce(Return(kTargetLevelPacketsQ8));
+  // Default packet size before any packet has been decoded is 30 ms, so we are
+  // expecting 17 * 30 = 510 ms target delay.
+  EXPECT_EQ(17 * 30, neteq_->TargetDelayMs());
+}
+
+TEST_F(NetEqImplTest, InsertEmptyPacket) {
+  UseNoMocks();
+  use_mock_delay_manager_ = true;
+  CreateInstance();
+
+  RTPHeader rtp_header;
+  rtp_header.payloadType = 17;
+  rtp_header.sequenceNumber = 0x1234;
+  rtp_header.timestamp = 0x12345678;
+  rtp_header.ssrc = 0x87654321;
+
+  EXPECT_CALL(*mock_delay_manager_, RegisterEmptyPacket());
+  neteq_->InsertEmptyPacket(rtp_header);
+}
+
+class Decoder120ms : public AudioDecoder {
+ public:
+  Decoder120ms(int sample_rate_hz, SpeechType speech_type)
+      : sample_rate_hz_(sample_rate_hz),
+        next_value_(1),
+        speech_type_(speech_type) {}
+
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override {
+    EXPECT_EQ(sample_rate_hz_, sample_rate_hz);
+    size_t decoded_len =
+        rtc::CheckedDivExact(sample_rate_hz, 1000) * 120 * Channels();
+    for (size_t i = 0; i < decoded_len; ++i) {
+      decoded[i] = next_value_++;
+    }
+    *speech_type = speech_type_;
+    return rtc::checked_cast<int>(decoded_len);
+  }
+
+  void Reset() override { next_value_ = 1; }
+  int SampleRateHz() const override { return sample_rate_hz_; }
+  size_t Channels() const override { return 2; }
+
+ private:
+  int sample_rate_hz_;
+  int16_t next_value_;
+  SpeechType speech_type_;
+};
+
+class NetEqImplTest120ms : public NetEqImplTest {
+ protected:
+  NetEqImplTest120ms() : NetEqImplTest() {}
+  virtual ~NetEqImplTest120ms() {}
+
+  void CreateInstanceNoMocks() {
+    UseNoMocks();
+    CreateInstance();
+  }
+
+  void CreateInstanceWithDelayManagerMock() {
+    UseNoMocks();
+    use_mock_delay_manager_ = true;
+    CreateInstance();
+  }
+
+  uint32_t timestamp_diff_between_packets() const {
+    return rtc::CheckedDivExact(kSamplingFreq_, 1000u) * 120;
+  }
+
+  uint32_t first_timestamp() const { return 10u; }
+
+  void GetFirstPacket() {
+    bool muted;
+    for (int i = 0; i < 12; i++) {
+      EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+      EXPECT_FALSE(muted);
+    }
+  }
+
+  void InsertPacket(uint32_t timestamp) {
+    RTPHeader rtp_header;
+    rtp_header.payloadType = kPayloadType;
+    rtp_header.sequenceNumber = sequence_number_;
+    rtp_header.timestamp = timestamp;
+    rtp_header.ssrc = 15;
+    const size_t kPayloadLengthBytes = 1;  // This can be arbitrary.
+    uint8_t payload[kPayloadLengthBytes] = {0};
+    EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload, 10));
+    sequence_number_++;
+  }
+
+  void Register120msCodec(AudioDecoder::SpeechType speech_type) {
+    decoder_.reset(new Decoder120ms(kSamplingFreq_, speech_type));
+    ASSERT_EQ(2u, decoder_->Channels());
+    EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+                              decoder_.get(), NetEqDecoder::kDecoderOpus_2ch,
+                              "120ms codec", kPayloadType));
+  }
+
+  std::unique_ptr<Decoder120ms> decoder_;
+  AudioFrame output_;
+  const uint32_t kPayloadType = 17;
+  const uint32_t kSamplingFreq_ = 48000;
+  uint16_t sequence_number_ = 1;
+};
+
+TEST_F(NetEqImplTest120ms, AudioRepetition) {
+  config_.playout_mode = kPlayoutFax;
+  CreateInstanceNoMocks();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kAudioRepetition, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, AlternativePlc) {
+  config_.playout_mode = kPlayoutOff;
+  CreateInstanceNoMocks();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kAlternativePlc, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, CodecInternalCng) {
+  CreateInstanceNoMocks();
+  Register120msCodec(AudioDecoder::kComfortNoise);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kCodecInternalCng, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Normal) {
+  CreateInstanceNoMocks();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  EXPECT_EQ(kNormal, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Merge) {
+  CreateInstanceWithDelayManagerMock();
+
+  Register120msCodec(AudioDecoder::kSpeech);
+  InsertPacket(first_timestamp());
+
+  GetFirstPacket();
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+
+  InsertPacket(first_timestamp() + 2 * timestamp_diff_between_packets());
+
+  // Delay manager reports a target level which should cause a Merge.
+  EXPECT_CALL(*mock_delay_manager_, TargetLevel()).WillOnce(Return(-10));
+
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kMerge, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Expand) {
+  CreateInstanceNoMocks();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kExpand, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, FastAccelerate) {
+  CreateInstanceWithDelayManagerMock();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+  InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+  // Delay manager report buffer limit which should cause a FastAccelerate.
+  EXPECT_CALL(*mock_delay_manager_, BufferLimits(_, _))
+      .Times(1)
+      .WillOnce(DoAll(SetArgPointee<0>(0), SetArgPointee<1>(0)));
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kFastAccelerate, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, PreemptiveExpand) {
+  CreateInstanceWithDelayManagerMock();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+  // Delay manager report buffer limit which should cause a PreemptiveExpand.
+  EXPECT_CALL(*mock_delay_manager_, BufferLimits(_, _))
+      .Times(1)
+      .WillOnce(DoAll(SetArgPointee<0>(100), SetArgPointee<1>(100)));
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kPreemptiveExpand, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Accelerate) {
+  CreateInstanceWithDelayManagerMock();
+  Register120msCodec(AudioDecoder::kSpeech);
+
+  InsertPacket(first_timestamp());
+  GetFirstPacket();
+
+  InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+  // Delay manager report buffer limit which should cause a Accelerate.
+  EXPECT_CALL(*mock_delay_manager_, BufferLimits(_, _))
+      .Times(1)
+      .WillOnce(DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2)));
+
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+  EXPECT_EQ(kAccelerate, neteq_->last_operation_for_test());
+}
+
+}// namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
new file mode 100644
index 0000000..334715f
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -0,0 +1,336 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "modules/include/module_common_types.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+using ::testing::_;
+using ::testing::SetArgPointee;
+using ::testing::Return;
+
+class MockAudioDecoder final : public AudioDecoder {
+ public:
+  // TODO(nisse): Valid overrides commented out, because the gmock
+  // methods don't use any override declarations, and we want to avoid
+  // warnings from -Winconsistent-missing-override. See
+  // http://crbug.com/428099.
+  static const int kPacketDuration = 960;  // 48 kHz * 20 ms
+
+  MockAudioDecoder(int sample_rate_hz, size_t num_channels)
+      : sample_rate_hz_(sample_rate_hz),
+        num_channels_(num_channels),
+        fec_enabled_(false) {}
+  ~MockAudioDecoder() /* override */ { Die(); }
+  MOCK_METHOD0(Die, void());
+
+  MOCK_METHOD0(Reset, void());
+
+  class MockFrame : public AudioDecoder::EncodedAudioFrame {
+   public:
+    MockFrame(size_t num_channels) : num_channels_(num_channels) {}
+
+    size_t Duration() const override { return kPacketDuration; }
+
+    rtc::Optional<DecodeResult> Decode(
+        rtc::ArrayView<int16_t> decoded) const override {
+      const size_t output_size =
+          sizeof(int16_t) * kPacketDuration * num_channels_;
+      if (decoded.size() >= output_size) {
+        memset(decoded.data(), 0,
+               sizeof(int16_t) * kPacketDuration * num_channels_);
+        return DecodeResult{kPacketDuration * num_channels_, kSpeech};
+      } else {
+        ADD_FAILURE() << "Expected decoded.size() to be >= output_size ("
+                      << decoded.size() << " vs. " << output_size << ")";
+        return rtc::nullopt;
+      }
+    }
+
+   private:
+    const size_t num_channels_;
+  };
+
+  std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
+                                        uint32_t timestamp) /* override */ {
+    std::vector<ParseResult> results;
+    if (fec_enabled_) {
+      std::unique_ptr<MockFrame> fec_frame(new MockFrame(num_channels_));
+      results.emplace_back(timestamp - kPacketDuration, 1,
+                           std::move(fec_frame));
+    }
+
+    std::unique_ptr<MockFrame> frame(new MockFrame(num_channels_));
+    results.emplace_back(timestamp, 0, std::move(frame));
+    return results;
+  }
+
+  int PacketDuration(const uint8_t* encoded, size_t encoded_len) const
+  /* override */ {
+    ADD_FAILURE() << "Since going through ParsePayload, PacketDuration should "
+                     "never get called.";
+    return kPacketDuration;
+  }
+
+  bool PacketHasFec(
+      const uint8_t* encoded, size_t encoded_len) const /* override */ {
+    ADD_FAILURE() << "Since going through ParsePayload, PacketHasFec should "
+                     "never get called.";
+    return fec_enabled_;
+  }
+
+  int SampleRateHz() const /* override */ { return sample_rate_hz_; }
+
+  size_t Channels() const /* override */ { return num_channels_; }
+
+  void set_fec_enabled(bool enable_fec) { fec_enabled_ = enable_fec; }
+
+  bool fec_enabled() const { return fec_enabled_; }
+
+ protected:
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) /* override */ {
+    ADD_FAILURE() << "Since going through ParsePayload, DecodeInternal should "
+                     "never get called.";
+    return -1;
+  }
+
+ private:
+  const int sample_rate_hz_;
+  const size_t num_channels_;
+  bool fec_enabled_;
+};
+
+class NetEqNetworkStatsTest : public NetEqExternalDecoderTest {
+ public:
+  static const int kPayloadSizeByte = 30;
+  static const int kFrameSizeMs = 20;
+
+enum logic {
+  kIgnore,
+  kEqual,
+  kSmallerThan,
+  kLargerThan,
+};
+
+struct NetEqNetworkStatsCheck {
+  logic current_buffer_size_ms;
+  logic preferred_buffer_size_ms;
+  logic jitter_peaks_found;
+  logic packet_loss_rate;
+  logic expand_rate;
+  logic speech_expand_rate;
+  logic preemptive_rate;
+  logic accelerate_rate;
+  logic secondary_decoded_rate;
+  logic secondary_discarded_rate;
+  logic clockdrift_ppm;
+  logic added_zero_samples;
+  NetEqNetworkStatistics stats_ref;
+};
+
+NetEqNetworkStatsTest(NetEqDecoder codec,
+                      int sample_rate_hz,
+                      MockAudioDecoder* decoder)
+    : NetEqExternalDecoderTest(codec, sample_rate_hz, decoder),
+      external_decoder_(decoder),
+      samples_per_ms_(sample_rate_hz / 1000),
+      frame_size_samples_(kFrameSizeMs * samples_per_ms_),
+      rtp_generator_(new test::RtpGenerator(samples_per_ms_)),
+      last_lost_time_(0),
+      packet_loss_interval_(0xffffffff) {
+  Init();
+  }
+
+  bool Lost(uint32_t send_time) {
+    if (send_time - last_lost_time_ >= packet_loss_interval_) {
+      last_lost_time_ = send_time;
+      return true;
+    }
+    return false;
+  }
+
+  void SetPacketLossRate(double loss_rate) {
+      packet_loss_interval_ = (loss_rate >= 1e-3 ?
+          static_cast<double>(kFrameSizeMs) / loss_rate : 0xffffffff);
+  }
+
+  // |stats_ref|
+  // expects.x = -1, do not care
+  // expects.x = 0, 'x' in current stats should equal 'x' in |stats_ref|
+  // expects.x = 1, 'x' in current stats should < 'x' in |stats_ref|
+  // expects.x = 2, 'x' in current stats should > 'x' in |stats_ref|
+  void CheckNetworkStatistics(NetEqNetworkStatsCheck expects) {
+    NetEqNetworkStatistics stats;
+    neteq()->NetworkStatistics(&stats);
+
+#define CHECK_NETEQ_NETWORK_STATS(x)\
+  switch (expects.x) {\
+    case kEqual:\
+      EXPECT_EQ(stats.x, expects.stats_ref.x);\
+      break;\
+    case kSmallerThan:\
+      EXPECT_LT(stats.x, expects.stats_ref.x);\
+      break;\
+    case kLargerThan:\
+      EXPECT_GT(stats.x, expects.stats_ref.x);\
+      break;\
+    default:\
+      break;\
+  }
+
+    CHECK_NETEQ_NETWORK_STATS(current_buffer_size_ms);
+    CHECK_NETEQ_NETWORK_STATS(preferred_buffer_size_ms);
+    CHECK_NETEQ_NETWORK_STATS(jitter_peaks_found);
+    CHECK_NETEQ_NETWORK_STATS(packet_loss_rate);
+    CHECK_NETEQ_NETWORK_STATS(expand_rate);
+    CHECK_NETEQ_NETWORK_STATS(speech_expand_rate);
+    CHECK_NETEQ_NETWORK_STATS(preemptive_rate);
+    CHECK_NETEQ_NETWORK_STATS(accelerate_rate);
+    CHECK_NETEQ_NETWORK_STATS(secondary_decoded_rate);
+    CHECK_NETEQ_NETWORK_STATS(secondary_discarded_rate);
+    CHECK_NETEQ_NETWORK_STATS(clockdrift_ppm);
+    CHECK_NETEQ_NETWORK_STATS(added_zero_samples);
+
+#undef CHECK_NETEQ_NETWORK_STATS
+
+    // Compare with CurrentDelay, which should be identical.
+    EXPECT_EQ(stats.current_buffer_size_ms, neteq()->CurrentDelayMs());
+  }
+
+  void RunTest(int num_loops, NetEqNetworkStatsCheck expects) {
+    uint32_t time_now;
+    uint32_t next_send_time;
+
+    // Initiate |last_lost_time_|.
+    time_now = next_send_time = last_lost_time_ =
+        rtp_generator_->GetRtpHeader(kPayloadType, frame_size_samples_,
+                                     &rtp_header_);
+    for (int k = 0; k < num_loops; ++k) {
+      // Delay by one frame such that the FEC can come in.
+      while (time_now + kFrameSizeMs >= next_send_time) {
+        next_send_time = rtp_generator_->GetRtpHeader(kPayloadType,
+                                                      frame_size_samples_,
+                                                      &rtp_header_);
+        if (!Lost(next_send_time)) {
+          static const uint8_t payload[kPayloadSizeByte] = {0};
+          InsertPacket(rtp_header_, payload, next_send_time);
+        }
+      }
+      GetOutputAudio(&output_frame_);
+      time_now += kOutputLengthMs;
+    }
+    CheckNetworkStatistics(expects);
+    neteq()->FlushBuffers();
+  }
+
+  void DecodeFecTest() {
+    external_decoder_->set_fec_enabled(false);
+    NetEqNetworkStatsCheck expects = {
+      kIgnore,  // current_buffer_size_ms
+      kIgnore,  // preferred_buffer_size_ms
+      kIgnore,  // jitter_peaks_found
+      kEqual,  // packet_loss_rate
+      kEqual,  // expand_rate
+      kEqual,  // voice_expand_rate
+      kIgnore,  // preemptive_rate
+      kEqual,  // accelerate_rate
+      kEqual,  // decoded_fec_rate
+      kEqual,  // discarded_fec_rate
+      kIgnore,  // clockdrift_ppm
+      kEqual,  // added_zero_samples
+      {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+    };
+    RunTest(50, expects);
+
+    // Next we introduce packet losses.
+    SetPacketLossRate(0.1);
+    expects.stats_ref.packet_loss_rate = 1337;
+    expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 1065;
+    RunTest(50, expects);
+
+    // Next we enable FEC.
+    external_decoder_->set_fec_enabled(true);
+    // If FEC fills in the lost packets, no packet loss will be counted.
+    expects.stats_ref.packet_loss_rate = 0;
+    expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 0;
+    expects.stats_ref.secondary_decoded_rate = 2006;
+    expects.stats_ref.secondary_discarded_rate = 14336;
+    RunTest(50, expects);
+  }
+
+  void NoiseExpansionTest() {
+    NetEqNetworkStatsCheck expects = {
+      kIgnore,  // current_buffer_size_ms
+      kIgnore,  // preferred_buffer_size_ms
+      kIgnore,  // jitter_peaks_found
+      kEqual,  // packet_loss_rate
+      kEqual,  // expand_rate
+      kEqual,  // speech_expand_rate
+      kIgnore,  // preemptive_rate
+      kEqual,  // accelerate_rate
+      kEqual,  // decoded_fec_rate
+      kEqual,  // discard_fec_rate
+      kIgnore,  // clockdrift_ppm
+      kEqual,  // added_zero_samples
+      {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+    };
+    RunTest(50, expects);
+
+    SetPacketLossRate(1);
+    expects.stats_ref.expand_rate = 16384;
+    expects.stats_ref.speech_expand_rate = 5324;
+    RunTest(10, expects);  // Lost 10 * 20ms in a row.
+  }
+
+ private:
+  MockAudioDecoder* external_decoder_;
+  const int samples_per_ms_;
+  const size_t frame_size_samples_;
+  std::unique_ptr<test::RtpGenerator> rtp_generator_;
+  RTPHeader rtp_header_;
+  uint32_t last_lost_time_;
+  uint32_t packet_loss_interval_;
+  AudioFrame output_frame_;
+};
+
+TEST(NetEqNetworkStatsTest, DecodeFec) {
+  MockAudioDecoder decoder(48000, 1);
+  NetEqNetworkStatsTest test(NetEqDecoder::kDecoderOpus, 48000, &decoder);
+  test.DecodeFecTest();
+  EXPECT_CALL(decoder, Die()).Times(1);
+}
+
+TEST(NetEqNetworkStatsTest, StereoDecodeFec) {
+  MockAudioDecoder decoder(48000, 2);
+  NetEqNetworkStatsTest test(NetEqDecoder::kDecoderOpus, 48000, &decoder);
+  test.DecodeFecTest();
+  EXPECT_CALL(decoder, Die()).Times(1);
+}
+
+TEST(NetEqNetworkStatsTest, NoiseExpansionTest) {
+  MockAudioDecoder decoder(48000, 1);
+  NetEqNetworkStatsTest test(NetEqDecoder::kDecoderOpus, 48000, &decoder);
+  test.NoiseExpansionTest();
+  EXPECT_CALL(decoder, Die()).Times(1);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/modules/audio_coding/neteq/neteq_stereo_unittest.cc
new file mode 100644
index 0000000..1bef9c8
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -0,0 +1,442 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Test to verify correct stereo and multi-channel operation.
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <list>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "modules/include/module_common_types.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+struct TestParameters {
+  int frame_size;
+  int sample_rate;
+  size_t num_channels;
+};
+
+// This is a parameterized test. The test parameters are supplied through a
+// TestParameters struct, which is obtained through the GetParam() method.
+//
+// The objective of the test is to create a mono input signal and a
+// multi-channel input signal, where each channel is identical to the mono
+// input channel. The two input signals are processed through their respective
+// NetEq instances. After that, the output signals are compared. The expected
+// result is that each channel in the multi-channel output is identical to the
+// mono output.
+class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
+ protected:
+  static const int kTimeStepMs = 10;
+  static const size_t kMaxBlockSize = 480;  // 10 ms @ 48 kHz.
+  static const uint8_t kPayloadTypeMono = 95;
+  static const uint8_t kPayloadTypeMulti = 96;
+
+  NetEqStereoTest()
+      : num_channels_(GetParam().num_channels),
+        sample_rate_hz_(GetParam().sample_rate),
+        samples_per_ms_(sample_rate_hz_ / 1000),
+        frame_size_ms_(GetParam().frame_size),
+        frame_size_samples_(
+            static_cast<size_t>(frame_size_ms_ * samples_per_ms_)),
+        output_size_samples_(10 * samples_per_ms_),
+        rtp_generator_mono_(samples_per_ms_),
+        rtp_generator_(samples_per_ms_),
+        payload_size_bytes_(0),
+        multi_payload_size_bytes_(0),
+        last_send_time_(0),
+        last_arrival_time_(0) {
+    NetEq::Config config;
+    config.sample_rate_hz = sample_rate_hz_;
+    rtc::scoped_refptr<AudioDecoderFactory> factory =
+        CreateBuiltinAudioDecoderFactory();
+    neteq_mono_ = NetEq::Create(config, factory);
+    neteq_ = NetEq::Create(config, factory);
+    input_ = new int16_t[frame_size_samples_];
+    encoded_ = new uint8_t[2 * frame_size_samples_];
+    input_multi_channel_ = new int16_t[frame_size_samples_ * num_channels_];
+    encoded_multi_channel_ = new uint8_t[frame_size_samples_ * 2 *
+                                         num_channels_];
+  }
+
+  ~NetEqStereoTest() {
+    delete neteq_mono_;
+    delete neteq_;
+    delete [] input_;
+    delete [] encoded_;
+    delete [] input_multi_channel_;
+    delete [] encoded_multi_channel_;
+  }
+
+  virtual void SetUp() {
+    const std::string file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+    input_file_.reset(new test::InputAudioFile(file_name));
+    NetEqDecoder mono_decoder;
+    NetEqDecoder multi_decoder;
+    switch (sample_rate_hz_) {
+      case 8000:
+        mono_decoder = NetEqDecoder::kDecoderPCM16B;
+        if (num_channels_ == 2) {
+          multi_decoder = NetEqDecoder::kDecoderPCM16B_2ch;
+        } else if (num_channels_ == 5) {
+          multi_decoder = NetEqDecoder::kDecoderPCM16B_5ch;
+        } else {
+          FAIL() << "Only 2 and 5 channels supported for 8000 Hz.";
+        }
+        break;
+      case 16000:
+        mono_decoder = NetEqDecoder::kDecoderPCM16Bwb;
+        if (num_channels_ == 2) {
+          multi_decoder = NetEqDecoder::kDecoderPCM16Bwb_2ch;
+        } else {
+          FAIL() << "More than 2 channels is not supported for 16000 Hz.";
+        }
+        break;
+      case 32000:
+        mono_decoder = NetEqDecoder::kDecoderPCM16Bswb32kHz;
+        if (num_channels_ == 2) {
+          multi_decoder = NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch;
+        } else {
+          FAIL() << "More than 2 channels is not supported for 32000 Hz.";
+        }
+        break;
+      case 48000:
+        mono_decoder = NetEqDecoder::kDecoderPCM16Bswb48kHz;
+        if (num_channels_ == 2) {
+          multi_decoder = NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch;
+        } else {
+          FAIL() << "More than 2 channels is not supported for 48000 Hz.";
+        }
+        break;
+      default:
+        FAIL() << "We shouldn't get here.";
+    }
+    ASSERT_EQ(NetEq::kOK, neteq_mono_->RegisterPayloadType(mono_decoder, "mono",
+                                                           kPayloadTypeMono));
+    ASSERT_EQ(NetEq::kOK,
+              neteq_->RegisterPayloadType(multi_decoder, "multi-channel",
+                                          kPayloadTypeMulti));
+  }
+
+  virtual void TearDown() {}
+
+  int GetNewPackets() {
+    if (!input_file_->Read(frame_size_samples_, input_)) {
+      return -1;
+    }
+    payload_size_bytes_ = WebRtcPcm16b_Encode(input_, frame_size_samples_,
+                                             encoded_);
+    if (frame_size_samples_ * 2 != payload_size_bytes_) {
+      return -1;
+    }
+    int next_send_time = rtp_generator_mono_.GetRtpHeader(kPayloadTypeMono,
+                                                          frame_size_samples_,
+                                                          &rtp_header_mono_);
+    test::InputAudioFile::DuplicateInterleaved(input_, frame_size_samples_,
+                                               num_channels_,
+                                               input_multi_channel_);
+    multi_payload_size_bytes_ = WebRtcPcm16b_Encode(
+        input_multi_channel_, frame_size_samples_ * num_channels_,
+        encoded_multi_channel_);
+    if (frame_size_samples_ * 2 * num_channels_ != multi_payload_size_bytes_) {
+      return -1;
+    }
+    rtp_generator_.GetRtpHeader(kPayloadTypeMulti, frame_size_samples_,
+                                &rtp_header_);
+    return next_send_time;
+  }
+
+  virtual void VerifyOutput(size_t num_samples) {
+    const int16_t* output_data = output_.data();
+    const int16_t* output_multi_channel_data = output_multi_channel_.data();
+    for (size_t i = 0; i < num_samples; ++i) {
+      for (size_t j = 0; j < num_channels_; ++j) {
+        ASSERT_EQ(output_data[i],
+                  output_multi_channel_data[i * num_channels_ + j])
+            << "Diff in sample " << i << ", channel " << j << ".";
+      }
+    }
+  }
+
+  virtual int GetArrivalTime(int send_time) {
+    int arrival_time = last_arrival_time_ + (send_time - last_send_time_);
+    last_send_time_ = send_time;
+    last_arrival_time_ = arrival_time;
+    return arrival_time;
+  }
+
+  virtual bool Lost() { return false; }
+
+  void RunTest(int num_loops) {
+    // Get next input packets (mono and multi-channel).
+    int next_send_time;
+    int next_arrival_time;
+    do {
+      next_send_time = GetNewPackets();
+      ASSERT_NE(-1, next_send_time);
+      next_arrival_time = GetArrivalTime(next_send_time);
+    } while (Lost());  // If lost, immediately read the next packet.
+
+    int time_now = 0;
+    for (int k = 0; k < num_loops; ++k) {
+      while (time_now >= next_arrival_time) {
+        // Insert packet in mono instance.
+        ASSERT_EQ(NetEq::kOK,
+                  neteq_mono_->InsertPacket(rtp_header_mono_,
+                                            rtc::ArrayView<const uint8_t>(
+                                                encoded_, payload_size_bytes_),
+                                            next_arrival_time));
+        // Insert packet in multi-channel instance.
+        ASSERT_EQ(NetEq::kOK,
+                  neteq_->InsertPacket(
+                      rtp_header_,
+                      rtc::ArrayView<const uint8_t>(encoded_multi_channel_,
+                                                    multi_payload_size_bytes_),
+                      next_arrival_time));
+        // Get next input packets (mono and multi-channel).
+        do {
+          next_send_time = GetNewPackets();
+          ASSERT_NE(-1, next_send_time);
+          next_arrival_time = GetArrivalTime(next_send_time);
+        } while (Lost());  // If lost, immediately read the next packet.
+      }
+      // Get audio from mono instance.
+      bool muted;
+      EXPECT_EQ(NetEq::kOK, neteq_mono_->GetAudio(&output_, &muted));
+      ASSERT_FALSE(muted);
+      EXPECT_EQ(1u, output_.num_channels_);
+      EXPECT_EQ(output_size_samples_, output_.samples_per_channel_);
+      // Get audio from multi-channel instance.
+      ASSERT_EQ(NetEq::kOK, neteq_->GetAudio(&output_multi_channel_, &muted));
+      ASSERT_FALSE(muted);
+      EXPECT_EQ(num_channels_, output_multi_channel_.num_channels_);
+      EXPECT_EQ(output_size_samples_,
+                output_multi_channel_.samples_per_channel_);
+      std::ostringstream ss;
+      ss << "Lap number " << k << ".";
+      SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
+      // Compare mono and multi-channel.
+      ASSERT_NO_FATAL_FAILURE(VerifyOutput(output_size_samples_));
+
+      time_now += kTimeStepMs;
+    }
+  }
+
+  const size_t num_channels_;
+  const int sample_rate_hz_;
+  const int samples_per_ms_;
+  const int frame_size_ms_;
+  const size_t frame_size_samples_;
+  const size_t output_size_samples_;
+  NetEq* neteq_mono_;
+  NetEq* neteq_;
+  test::RtpGenerator rtp_generator_mono_;
+  test::RtpGenerator rtp_generator_;
+  int16_t* input_;
+  int16_t* input_multi_channel_;
+  uint8_t* encoded_;
+  uint8_t* encoded_multi_channel_;
+  AudioFrame output_;
+  AudioFrame output_multi_channel_;
+  RTPHeader rtp_header_mono_;
+  RTPHeader rtp_header_;
+  size_t payload_size_bytes_;
+  size_t multi_payload_size_bytes_;
+  int last_send_time_;
+  int last_arrival_time_;
+  std::unique_ptr<test::InputAudioFile> input_file_;
+};
+
+class NetEqStereoTestNoJitter : public NetEqStereoTest {
+ protected:
+  NetEqStereoTestNoJitter()
+      : NetEqStereoTest() {
+    // Start the sender 100 ms before the receiver to pre-fill the buffer.
+    // This is to avoid doing preemptive expand early in the test.
+    // TODO(hlundin): Mock the decision making instead to control the modes.
+    last_arrival_time_ = -100;
+  }
+};
+
+TEST_P(NetEqStereoTestNoJitter, RunTest) {
+  RunTest(8);
+}
+
+class NetEqStereoTestPositiveDrift : public NetEqStereoTest {
+ protected:
+  NetEqStereoTestPositiveDrift()
+      : NetEqStereoTest(),
+        drift_factor(0.9) {
+    // Start the sender 100 ms before the receiver to pre-fill the buffer.
+    // This is to avoid doing preemptive expand early in the test.
+    // TODO(hlundin): Mock the decision making instead to control the modes.
+    last_arrival_time_ = -100;
+  }
+  virtual int GetArrivalTime(int send_time) {
+    int arrival_time = last_arrival_time_ +
+        drift_factor * (send_time - last_send_time_);
+    last_send_time_ = send_time;
+    last_arrival_time_ = arrival_time;
+    return arrival_time;
+  }
+
+  double drift_factor;
+};
+
+TEST_P(NetEqStereoTestPositiveDrift, RunTest) {
+  RunTest(100);
+}
+
+class NetEqStereoTestNegativeDrift : public NetEqStereoTestPositiveDrift {
+ protected:
+  NetEqStereoTestNegativeDrift()
+      : NetEqStereoTestPositiveDrift() {
+    drift_factor = 1.1;
+    last_arrival_time_ = 0;
+  }
+};
+
+TEST_P(NetEqStereoTestNegativeDrift, RunTest) {
+  RunTest(100);
+}
+
+class NetEqStereoTestDelays : public NetEqStereoTest {
+ protected:
+  static const int kDelayInterval = 10;
+  static const int kDelay = 1000;
+  NetEqStereoTestDelays()
+      : NetEqStereoTest(),
+        frame_index_(0) {
+  }
+
+  virtual int GetArrivalTime(int send_time) {
+    // Deliver immediately, unless we have a back-log.
+    int arrival_time = std::min(last_arrival_time_, send_time);
+    if (++frame_index_ % kDelayInterval == 0) {
+      // Delay this packet.
+      arrival_time += kDelay;
+    }
+    last_send_time_ = send_time;
+    last_arrival_time_ = arrival_time;
+    return arrival_time;
+  }
+
+  int frame_index_;
+};
+
+TEST_P(NetEqStereoTestDelays, RunTest) {
+  RunTest(1000);
+}
+
+class NetEqStereoTestLosses : public NetEqStereoTest {
+ protected:
+  static const int kLossInterval = 10;
+  NetEqStereoTestLosses()
+      : NetEqStereoTest(),
+        frame_index_(0) {
+  }
+
+  virtual bool Lost() {
+    return (++frame_index_) % kLossInterval == 0;
+  }
+
+  // TODO(hlundin): NetEq is not giving bitexact results for these cases.
+  virtual void VerifyOutput(size_t num_samples) {
+    for (size_t i = 0; i < num_samples; ++i) {
+      const int16_t* output_data = output_.data();
+      const int16_t* output_multi_channel_data = output_multi_channel_.data();
+      auto first_channel_sample =
+          output_multi_channel_data[i * num_channels_];
+      for (size_t j = 0; j < num_channels_; ++j) {
+        const int kErrorMargin = 200;
+        EXPECT_NEAR(output_data[i],
+                    output_multi_channel_data[i * num_channels_ + j],
+                    kErrorMargin)
+            << "Diff in sample " << i << ", channel " << j << ".";
+        EXPECT_EQ(first_channel_sample,
+                  output_multi_channel_data[i * num_channels_ + j]);
+      }
+    }
+  }
+
+  int frame_index_;
+};
+
+TEST_P(NetEqStereoTestLosses, RunTest) {
+  RunTest(100);
+}
+
+
+// Creates a list of parameter sets.
+std::list<TestParameters> GetTestParameters() {
+  std::list<TestParameters> l;
+  const int sample_rates[] = {8000, 16000, 32000};
+  const int num_rates = sizeof(sample_rates) / sizeof(sample_rates[0]);
+  // Loop through sample rates.
+  for (int rate_index = 0; rate_index < num_rates; ++rate_index) {
+    int sample_rate = sample_rates[rate_index];
+    // Loop through all frame sizes between 10 and 60 ms.
+    for (int frame_size = 10; frame_size <= 60; frame_size += 10) {
+      TestParameters p;
+      p.frame_size = frame_size;
+      p.sample_rate = sample_rate;
+      p.num_channels = 2;
+      l.push_back(p);
+      if (sample_rate == 8000) {
+        // Add a five-channel test for 8000 Hz.
+        p.num_channels = 5;
+        l.push_back(p);
+      }
+    }
+  }
+  return l;
+}
+
+// Pretty-printing the test parameters in case of an error.
+void PrintTo(const TestParameters& p, ::std::ostream* os) {
+  *os << "{frame_size = " << p.frame_size <<
+      ", num_channels = " << p.num_channels <<
+      ", sample_rate = " << p.sample_rate << "}";
+}
+
+// Instantiate the tests. Each test is instantiated using the function above,
+// so that all different parameter combinations are tested.
+INSTANTIATE_TEST_CASE_P(MultiChannel,
+                        NetEqStereoTestNoJitter,
+                        ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_CASE_P(MultiChannel,
+                        NetEqStereoTestPositiveDrift,
+                        ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_CASE_P(MultiChannel,
+                        NetEqStereoTestNegativeDrift,
+                        ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_CASE_P(MultiChannel,
+                        NetEqStereoTestDelays,
+                        ::testing::ValuesIn(GetTestParameters()));
+
+INSTANTIATE_TEST_CASE_P(MultiChannel,
+                        NetEqStereoTestLosses,
+                        ::testing::ValuesIn(GetTestParameters()));
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc
new file mode 100644
index 0000000..ca93cf5
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_unittest.cc
@@ -0,0 +1,1792 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/include/neteq.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>  // memset
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/protobuf_utils.h"
+#include "rtc_base/stringencode.h"
+#include "test/field_trial.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// This must come after test/gtest.h
+#include "rtc_base/flags.h"  // NOLINT(build/include)
+
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
+#else
+#include "modules/audio_coding/neteq/neteq_unittest.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+#endif
+
+DEFINE_bool(gen_ref, false, "Generate reference files.");
+
+namespace webrtc {
+
+namespace {
+
+const std::string& PlatformChecksum(const std::string& checksum_general,
+                                    const std::string& checksum_android_32,
+                                    const std::string& checksum_android_64,
+                                    const std::string& checksum_win_32,
+                                    const std::string& checksum_win_64) {
+#if defined(WEBRTC_ANDROID)
+  #ifdef WEBRTC_ARCH_64_BITS
+    return checksum_android_64;
+  #else
+    return checksum_android_32;
+  #endif  // WEBRTC_ARCH_64_BITS
+#elif defined(WEBRTC_WIN)
+  #ifdef WEBRTC_ARCH_64_BITS
+    return checksum_win_64;
+  #else
+    return checksum_win_32;
+  #endif  // WEBRTC_ARCH_64_BITS
+#else
+  return checksum_general;
+#endif  // WEBRTC_WIN
+}
+
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+void Convert(const webrtc::NetEqNetworkStatistics& stats_raw,
+             webrtc::neteq_unittest::NetEqNetworkStatistics* stats) {
+  stats->set_current_buffer_size_ms(stats_raw.current_buffer_size_ms);
+  stats->set_preferred_buffer_size_ms(stats_raw.preferred_buffer_size_ms);
+  stats->set_jitter_peaks_found(stats_raw.jitter_peaks_found);
+  stats->set_packet_loss_rate(stats_raw.packet_loss_rate);
+  stats->set_expand_rate(stats_raw.expand_rate);
+  stats->set_speech_expand_rate(stats_raw.speech_expand_rate);
+  stats->set_preemptive_rate(stats_raw.preemptive_rate);
+  stats->set_accelerate_rate(stats_raw.accelerate_rate);
+  stats->set_secondary_decoded_rate(stats_raw.secondary_decoded_rate);
+  stats->set_secondary_discarded_rate(stats_raw.secondary_discarded_rate);
+  stats->set_clockdrift_ppm(stats_raw.clockdrift_ppm);
+  stats->set_added_zero_samples(stats_raw.added_zero_samples);
+  stats->set_mean_waiting_time_ms(stats_raw.mean_waiting_time_ms);
+  stats->set_median_waiting_time_ms(stats_raw.median_waiting_time_ms);
+  stats->set_min_waiting_time_ms(stats_raw.min_waiting_time_ms);
+  stats->set_max_waiting_time_ms(stats_raw.max_waiting_time_ms);
+}
+
+void Convert(const webrtc::RtcpStatistics& stats_raw,
+             webrtc::neteq_unittest::RtcpStatistics* stats) {
+  stats->set_fraction_lost(stats_raw.fraction_lost);
+  stats->set_cumulative_lost(stats_raw.packets_lost);
+  stats->set_extended_max_sequence_number(
+      stats_raw.extended_highest_sequence_number);
+  stats->set_jitter(stats_raw.jitter);
+}
+
+void AddMessage(FILE* file, rtc::MessageDigest* digest,
+                const std::string& message) {
+  int32_t size = message.length();
+  if (file)
+    ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
+  digest->Update(&size, sizeof(size));
+
+  if (file)
+    ASSERT_EQ(static_cast<size_t>(size),
+              fwrite(message.data(), sizeof(char), size, file));
+  digest->Update(message.data(), sizeof(char) * size);
+}
+
+#endif  // WEBRTC_NETEQ_UNITTEST_BITEXACT
+
+void LoadDecoders(webrtc::NetEq* neteq) {
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(0, SdpAudioFormat("pcmu", 8000, 1)));
+  // Use non-SdpAudioFormat argument when registering PCMa, so that we get test
+  // coverage for that as well.
+  ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMa,
+                                          "pcma", 8));
+#ifdef WEBRTC_CODEC_ILBC
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(102, SdpAudioFormat("ilbc", 8000, 1)));
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(103, SdpAudioFormat("isac", 16000, 1)));
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(104, SdpAudioFormat("isac", 32000, 1)));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(
+                111, SdpAudioFormat("opus", 48000, 2, {{"stereo", "0"}})));
+#endif
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(93, SdpAudioFormat("L16", 8000, 1)));
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(94, SdpAudioFormat("L16", 16000, 1)));
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(95, SdpAudioFormat("L16", 32000, 1)));
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(13, SdpAudioFormat("cn", 8000, 1)));
+  ASSERT_EQ(true,
+            neteq->RegisterPayloadType(98, SdpAudioFormat("cn", 16000, 1)));
+}
+}  // namespace
+
+class ResultSink {
+ public:
+  explicit ResultSink(const std::string& output_file);
+  ~ResultSink();
+
+  template<typename T> void AddResult(const T* test_results, size_t length);
+
+  void AddResult(const NetEqNetworkStatistics& stats);
+  void AddResult(const RtcpStatistics& stats);
+
+  void VerifyChecksum(const std::string& ref_check_sum);
+
+ private:
+  FILE* output_fp_;
+  std::unique_ptr<rtc::MessageDigest> digest_;
+};
+
+ResultSink::ResultSink(const std::string& output_file)
+    : output_fp_(nullptr),
+      digest_(rtc::MessageDigestFactory::Create(rtc::DIGEST_SHA_1)) {
+  if (!output_file.empty()) {
+    output_fp_ = fopen(output_file.c_str(), "wb");
+    EXPECT_TRUE(output_fp_ != NULL);
+  }
+}
+
+ResultSink::~ResultSink() {
+  if (output_fp_)
+    fclose(output_fp_);
+}
+
+template<typename T>
+void ResultSink::AddResult(const T* test_results, size_t length) {
+  if (output_fp_) {
+    ASSERT_EQ(length, fwrite(test_results, sizeof(T), length, output_fp_));
+  }
+  digest_->Update(test_results, sizeof(T) * length);
+}
+
+void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) {
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+  neteq_unittest::NetEqNetworkStatistics stats;
+  Convert(stats_raw, &stats);
+
+  ProtoString stats_string;
+  ASSERT_TRUE(stats.SerializeToString(&stats_string));
+  AddMessage(output_fp_, digest_.get(), stats_string);
+#else
+  FAIL() << "Writing to reference file requires Proto Buffer.";
+#endif  // WEBRTC_NETEQ_UNITTEST_BITEXACT
+}
+
+void ResultSink::AddResult(const RtcpStatistics& stats_raw) {
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+  neteq_unittest::RtcpStatistics stats;
+  Convert(stats_raw, &stats);
+
+  ProtoString stats_string;
+  ASSERT_TRUE(stats.SerializeToString(&stats_string));
+  AddMessage(output_fp_, digest_.get(), stats_string);
+#else
+  FAIL() << "Writing to reference file requires Proto Buffer.";
+#endif  // WEBRTC_NETEQ_UNITTEST_BITEXACT
+}
+
+void ResultSink::VerifyChecksum(const std::string& checksum) {
+  std::vector<char> buffer;
+  buffer.resize(digest_->Size());
+  digest_->Finish(&buffer[0], buffer.size());
+  const std::string result = rtc::hex_encode(&buffer[0], digest_->Size());
+  EXPECT_EQ(checksum, result);
+}
+
+class NetEqDecodingTest : public ::testing::Test {
+ protected:
+  // NetEQ must be polled for data once every 10 ms. Thus, neither of the
+  // constants below can be changed.
+  static const int kTimeStepMs = 10;
+  static const size_t kBlockSize8kHz = kTimeStepMs * 8;
+  static const size_t kBlockSize16kHz = kTimeStepMs * 16;
+  static const size_t kBlockSize32kHz = kTimeStepMs * 32;
+  static const size_t kBlockSize48kHz = kTimeStepMs * 48;
+  static const int kInitSampleRateHz = 8000;
+
+  NetEqDecodingTest();
+  virtual void SetUp();
+  virtual void TearDown();
+  void SelectDecoders(NetEqDecoder* used_codec);
+  void OpenInputFile(const std::string &rtp_file);
+  void Process();
+
+  void DecodeAndCompare(const std::string& rtp_file,
+                        const std::string& output_checksum,
+                        const std::string& network_stats_checksum,
+                        const std::string& rtcp_stats_checksum,
+                        bool gen_ref);
+
+  static void PopulateRtpInfo(int frame_index,
+                              int timestamp,
+                              RTPHeader* rtp_info);
+  static void PopulateCng(int frame_index,
+                          int timestamp,
+                          RTPHeader* rtp_info,
+                          uint8_t* payload,
+                          size_t* payload_len);
+
+  void WrapTest(uint16_t start_seq_no, uint32_t start_timestamp,
+                const std::set<uint16_t>& drop_seq_numbers,
+                bool expect_seq_no_wrap, bool expect_timestamp_wrap);
+
+  void LongCngWithClockDrift(double drift_factor,
+                             double network_freeze_ms,
+                             bool pull_audio_during_freeze,
+                             int delay_tolerance_ms,
+                             int max_time_to_speech_ms);
+
+  void DuplicateCng();
+
+  NetEq* neteq_;
+  NetEq::Config config_;
+  std::unique_ptr<test::RtpFileSource> rtp_source_;
+  std::unique_ptr<test::Packet> packet_;
+  unsigned int sim_clock_;
+  AudioFrame out_frame_;
+  int output_sample_rate_;
+  int algorithmic_delay_ms_;
+};
+
+// Allocating the static const so that it can be passed by reference.
+const int NetEqDecodingTest::kTimeStepMs;
+const size_t NetEqDecodingTest::kBlockSize8kHz;
+const size_t NetEqDecodingTest::kBlockSize16kHz;
+const size_t NetEqDecodingTest::kBlockSize32kHz;
+const int NetEqDecodingTest::kInitSampleRateHz;
+
+NetEqDecodingTest::NetEqDecodingTest()
+    : neteq_(NULL),
+      config_(),
+      sim_clock_(0),
+      output_sample_rate_(kInitSampleRateHz),
+      algorithmic_delay_ms_(0) {
+  config_.sample_rate_hz = kInitSampleRateHz;
+}
+
+void NetEqDecodingTest::SetUp() {
+  neteq_ = NetEq::Create(config_, CreateBuiltinAudioDecoderFactory());
+  NetEqNetworkStatistics stat;
+  ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
+  algorithmic_delay_ms_ = stat.current_buffer_size_ms;
+  ASSERT_TRUE(neteq_);
+  LoadDecoders(neteq_);
+}
+
+void NetEqDecodingTest::TearDown() {
+  delete neteq_;
+}
+
+void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
+  rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
+}
+
+void NetEqDecodingTest::Process() {
+  // Check if time to receive.
+  while (packet_ && sim_clock_ >= packet_->time_ms()) {
+    if (packet_->payload_length_bytes() > 0) {
+#ifndef WEBRTC_CODEC_ISAC
+      // Ignore payload type 104 (iSAC-swb) if ISAC is not supported.
+      if (packet_->header().payloadType != 104)
+#endif
+        ASSERT_EQ(0,
+                  neteq_->InsertPacket(
+                      packet_->header(),
+                      rtc::ArrayView<const uint8_t>(
+                          packet_->payload(), packet_->payload_length_bytes()),
+                      static_cast<uint32_t>(packet_->time_ms() *
+                                            (output_sample_rate_ / 1000))));
+    }
+    // Get next packet.
+    packet_ = rtp_source_->NextPacket();
+  }
+
+  // Get audio from NetEq.
+  bool muted;
+  ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+  ASSERT_FALSE(muted);
+  ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) ||
+              (out_frame_.samples_per_channel_ == kBlockSize16kHz) ||
+              (out_frame_.samples_per_channel_ == kBlockSize32kHz) ||
+              (out_frame_.samples_per_channel_ == kBlockSize48kHz));
+  output_sample_rate_ = out_frame_.sample_rate_hz_;
+  EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz());
+
+  // Increase time.
+  sim_clock_ += kTimeStepMs;
+}
+
+void NetEqDecodingTest::DecodeAndCompare(
+    const std::string& rtp_file,
+    const std::string& output_checksum,
+    const std::string& network_stats_checksum,
+    const std::string& rtcp_stats_checksum,
+    bool gen_ref) {
+  OpenInputFile(rtp_file);
+
+  std::string ref_out_file =
+      gen_ref ? webrtc::test::OutputPath() + "neteq_universal_ref.pcm" : "";
+  ResultSink output(ref_out_file);
+
+  std::string stat_out_file =
+      gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
+  ResultSink network_stats(stat_out_file);
+
+  std::string rtcp_out_file =
+      gen_ref ? webrtc::test::OutputPath() + "neteq_rtcp_stats.dat" : "";
+  ResultSink rtcp_stats(rtcp_out_file);
+
+  packet_ = rtp_source_->NextPacket();
+  int i = 0;
+  uint64_t last_concealed_samples = 0;
+  uint64_t last_total_samples_received = 0;
+  while (packet_) {
+    std::ostringstream ss;
+    ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
+    SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
+    ASSERT_NO_FATAL_FAILURE(Process());
+    ASSERT_NO_FATAL_FAILURE(output.AddResult(
+        out_frame_.data(), out_frame_.samples_per_channel_));
+
+    // Query the network statistics API once per second
+    if (sim_clock_ % 1000 == 0) {
+      // Process NetworkStatistics.
+      NetEqNetworkStatistics current_network_stats;
+      ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats));
+      ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats));
+
+      // Compare with CurrentDelay, which should be identical.
+      EXPECT_EQ(current_network_stats.current_buffer_size_ms,
+                neteq_->CurrentDelayMs());
+
+      // Verify that liftime stats and network stats report similar loss
+      // concealment rates.
+      auto lifetime_stats = neteq_->GetLifetimeStatistics();
+      const uint64_t delta_concealed_samples =
+          lifetime_stats.concealed_samples - last_concealed_samples;
+      last_concealed_samples = lifetime_stats.concealed_samples;
+      const uint64_t delta_total_samples_received =
+          lifetime_stats.total_samples_received - last_total_samples_received;
+      last_total_samples_received = lifetime_stats.total_samples_received;
+      // The tolerance is 1% but expressed in Q14.
+      EXPECT_NEAR(
+          (delta_concealed_samples << 14) / delta_total_samples_received,
+          current_network_stats.expand_rate, (2 << 14) / 100.0);
+
+      // Process RTCPstat.
+      RtcpStatistics current_rtcp_stats;
+      neteq_->GetRtcpStatistics(&current_rtcp_stats);
+      ASSERT_NO_FATAL_FAILURE(rtcp_stats.AddResult(current_rtcp_stats));
+    }
+  }
+
+  SCOPED_TRACE("Check output audio.");
+  output.VerifyChecksum(output_checksum);
+  SCOPED_TRACE("Check network stats.");
+  network_stats.VerifyChecksum(network_stats_checksum);
+  SCOPED_TRACE("Check rtcp stats.");
+  rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
+}
+
+void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
+                                        int timestamp,
+                                        RTPHeader* rtp_info) {
+  rtp_info->sequenceNumber = frame_index;
+  rtp_info->timestamp = timestamp;
+  rtp_info->ssrc = 0x1234;     // Just an arbitrary SSRC.
+  rtp_info->payloadType = 94;  // PCM16b WB codec.
+  rtp_info->markerBit = 0;
+}
+
+void NetEqDecodingTest::PopulateCng(int frame_index,
+                                    int timestamp,
+                                    RTPHeader* rtp_info,
+                                    uint8_t* payload,
+                                    size_t* payload_len) {
+  rtp_info->sequenceNumber = frame_index;
+  rtp_info->timestamp = timestamp;
+  rtp_info->ssrc = 0x1234;     // Just an arbitrary SSRC.
+  rtp_info->payloadType = 98;  // WB CNG.
+  rtp_info->markerBit = 0;
+  payload[0] = 64;  // Noise level -64 dBov, quite arbitrarily chosen.
+  *payload_len = 1;  // Only noise level, no spectral parameters.
+}
+
+#if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
+    (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) &&    \
+    defined(WEBRTC_CODEC_ILBC) && !defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_TestBitExactness TestBitExactness
+#else
+#define MAYBE_TestBitExactness DISABLED_TestBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
+  const std::string input_rtp_file =
+      webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
+
+  const std::string output_checksum = PlatformChecksum(
+      "09fa7646e2ad032a0b156177b95f09012430f81f",
+      "1c64eb8b55ce8878676c6a1e6ddd78f48de0668b",
+      "not used",
+      "09fa7646e2ad032a0b156177b95f09012430f81f",
+      "759fef89a5de52bd17e733dc255c671ce86be909");
+
+  const std::string network_stats_checksum =
+      PlatformChecksum("5b4262ca328e5f066af5d34f3380521583dd20de",
+                       "80235b6d727281203acb63b98f9a9e85d95f7ec0",
+                       "not used",
+                       "5b4262ca328e5f066af5d34f3380521583dd20de",
+                       "5b4262ca328e5f066af5d34f3380521583dd20de");
+
+  const std::string rtcp_stats_checksum = PlatformChecksum(
+      "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
+      "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4",
+      "not used",
+      "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
+      "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
+
+  DecodeAndCompare(input_rtp_file,
+                   output_checksum,
+                   network_stats_checksum,
+                   rtcp_stats_checksum,
+                   FLAG_gen_ref);
+}
+
+#if !defined(WEBRTC_IOS) &&                                         \
+    defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) &&                      \
+    defined(WEBRTC_CODEC_OPUS)
+#define MAYBE_TestOpusBitExactness TestOpusBitExactness
+#else
+#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
+  const std::string input_rtp_file =
+      webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
+
+  const std::string output_checksum = PlatformChecksum(
+      "7ea28d7edf9395f4ac8e8d8dd3a9e5c620b1bf48",
+      "5b1e691ab1c4465c742d6d944bc71e3b1c0e4c0e",
+      "b096114dd8c233eaf2b0ce9802ac95af13933772",
+      "7ea28d7edf9395f4ac8e8d8dd3a9e5c620b1bf48",
+      "7ea28d7edf9395f4ac8e8d8dd3a9e5c620b1bf48");
+
+  const std::string network_stats_checksum =
+      PlatformChecksum("9e72233c78baf685e500dd6c94212b30a4c5f27d",
+                       "9a37270e4242fbd31e80bb47dc5e7ab82cf2d557",
+                       "4f1e9734bc80a290faaf9d611efcb8d7802dbc4f",
+                       "9e72233c78baf685e500dd6c94212b30a4c5f27d",
+                       "9e72233c78baf685e500dd6c94212b30a4c5f27d");
+
+  const std::string rtcp_stats_checksum = PlatformChecksum(
+      "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+      "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+      "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+      "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+      "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
+
+  DecodeAndCompare(input_rtp_file,
+                   output_checksum,
+                   network_stats_checksum,
+                   rtcp_stats_checksum,
+                   FLAG_gen_ref);
+}
+
+#if !defined(WEBRTC_IOS) &&                                         \
+    defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) &&                      \
+    defined(WEBRTC_CODEC_OPUS)
+#define MAYBE_TestOpusDtxBitExactness TestOpusDtxBitExactness
+#else
+#define MAYBE_TestOpusDtxBitExactness DISABLED_TestOpusDtxBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestOpusDtxBitExactness) {
+  const std::string input_rtp_file =
+      webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
+
+  const std::string output_checksum =
+      PlatformChecksum("713af6c92881f5aab1285765ee6680da9d1c06ce",
+                       "3ec991b96872123f1554c03c543ca5d518431e46",
+                       "da9f9a2d94e0c2d67342fad4965d7b91cda50b25",
+                       "713af6c92881f5aab1285765ee6680da9d1c06ce",
+                       "713af6c92881f5aab1285765ee6680da9d1c06ce");
+
+  const std::string network_stats_checksum =
+      "bab58dc587d956f326056d7340c96eb9d2d3cc21";
+
+  const std::string rtcp_stats_checksum =
+      "ac27a7f305efb58b39bf123dccee25dee5758e63";
+
+  DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
+                   rtcp_stats_checksum, FLAG_gen_ref);
+}
+
+// Use fax mode to avoid time-scaling. This is to simplify the testing of
+// packet waiting times in the packet buffer.
+class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
+ protected:
+  NetEqDecodingTestFaxMode() : NetEqDecodingTest() {
+    config_.playout_mode = kPlayoutFax;
+  }
+  void TestJitterBufferDelay(bool apply_packet_loss);
+};
+
+TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
+  // Insert 30 dummy packets at once. Each packet contains 10 ms 16 kHz audio.
+  size_t num_frames = 30;
+  const size_t kSamples = 10 * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  for (size_t i = 0; i < num_frames; ++i) {
+    const uint8_t payload[kPayloadBytes] = {0};
+    RTPHeader rtp_info;
+    rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
+    rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
+    rtp_info.ssrc = 0x1234;     // Just an arbitrary SSRC.
+    rtp_info.payloadType = 94;  // PCM16b WB codec.
+    rtp_info.markerBit = 0;
+    ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+  }
+  // Pull out all data.
+  for (size_t i = 0; i < num_frames; ++i) {
+    bool muted;
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  }
+
+  NetEqNetworkStatistics stats;
+  EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+  // Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
+  // spacing (per definition), we expect the delay to increase with 10 ms for
+  // each packet. Thus, we are calculating the statistics for a series from 10
+  // to 300, in steps of 10 ms.
+  EXPECT_EQ(155, stats.mean_waiting_time_ms);
+  EXPECT_EQ(155, stats.median_waiting_time_ms);
+  EXPECT_EQ(10, stats.min_waiting_time_ms);
+  EXPECT_EQ(300, stats.max_waiting_time_ms);
+
+  // Check statistics again and make sure it's been reset.
+  EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+  EXPECT_EQ(-1, stats.mean_waiting_time_ms);
+  EXPECT_EQ(-1, stats.median_waiting_time_ms);
+  EXPECT_EQ(-1, stats.min_waiting_time_ms);
+  EXPECT_EQ(-1, stats.max_waiting_time_ms);
+}
+
+TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
+  const int kNumFrames = 3000;  // Needed for convergence.
+  int frame_index = 0;
+  const size_t kSamples = 10 * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  while (frame_index < kNumFrames) {
+    // Insert one packet each time, except every 10th time where we insert two
+    // packets at once. This will create a negative clock-drift of approx. 10%.
+    int num_packets = (frame_index % 10 == 0 ? 2 : 1);
+    for (int n = 0; n < num_packets; ++n) {
+      uint8_t payload[kPayloadBytes] = {0};
+      RTPHeader rtp_info;
+      PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
+      ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+      ++frame_index;
+    }
+
+    // Pull out data once.
+    bool muted;
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  }
+
+  NetEqNetworkStatistics network_stats;
+  ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
+  EXPECT_EQ(-103192, network_stats.clockdrift_ppm);
+}
+
+TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
+  const int kNumFrames = 5000;  // Needed for convergence.
+  int frame_index = 0;
+  const size_t kSamples = 10 * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  for (int i = 0; i < kNumFrames; ++i) {
+    // Insert one packet each time, except every 10th time where we don't insert
+    // any packet. This will create a positive clock-drift of approx. 11%.
+    int num_packets = (i % 10 == 9 ? 0 : 1);
+    for (int n = 0; n < num_packets; ++n) {
+      uint8_t payload[kPayloadBytes] = {0};
+      RTPHeader rtp_info;
+      PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
+      ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+      ++frame_index;
+    }
+
+    // Pull out data once.
+    bool muted;
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  }
+
+  NetEqNetworkStatistics network_stats;
+  ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
+  EXPECT_EQ(110953, network_stats.clockdrift_ppm);
+}
+
+void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
+                                              double network_freeze_ms,
+                                              bool pull_audio_during_freeze,
+                                              int delay_tolerance_ms,
+                                              int max_time_to_speech_ms) {
+  uint16_t seq_no = 0;
+  uint32_t timestamp = 0;
+  const int kFrameSizeMs = 30;
+  const size_t kSamples = kFrameSizeMs * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  double next_input_time_ms = 0.0;
+  double t_ms;
+  bool muted;
+
+  // Insert speech for 5 seconds.
+  const int kSpeechDurationMs = 5000;
+  for (t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
+    // Each turn in this for loop is 10 ms.
+    while (next_input_time_ms <= t_ms) {
+      // Insert one 30 ms speech frame.
+      uint8_t payload[kPayloadBytes] = {0};
+      RTPHeader rtp_info;
+      PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+      ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+      ++seq_no;
+      timestamp += kSamples;
+      next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
+    }
+    // Pull out data once.
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  }
+
+  EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+  rtc::Optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
+  ASSERT_TRUE(playout_timestamp);
+  int32_t delay_before = timestamp - *playout_timestamp;
+
+  // Insert CNG for 1 minute (= 60000 ms).
+  const int kCngPeriodMs = 100;
+  const int kCngPeriodSamples = kCngPeriodMs * 16;  // Period in 16 kHz samples.
+  const int kCngDurationMs = 60000;
+  for (; t_ms < kSpeechDurationMs + kCngDurationMs; t_ms += 10) {
+    // Each turn in this for loop is 10 ms.
+    while (next_input_time_ms <= t_ms) {
+      // Insert one CNG frame each 100 ms.
+      uint8_t payload[kPayloadBytes];
+      size_t payload_len;
+      RTPHeader rtp_info;
+      PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+      ASSERT_EQ(0, neteq_->InsertPacket(
+                       rtp_info,
+                       rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
+      ++seq_no;
+      timestamp += kCngPeriodSamples;
+      next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
+    }
+    // Pull out data once.
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  }
+
+  EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+
+  if (network_freeze_ms > 0) {
+    // First keep pulling audio for |network_freeze_ms| without inserting
+    // any data, then insert CNG data corresponding to |network_freeze_ms|
+    // without pulling any output audio.
+    const double loop_end_time = t_ms + network_freeze_ms;
+    for (; t_ms < loop_end_time; t_ms += 10) {
+      // Pull out data once.
+      ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+      ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+      EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+    }
+    bool pull_once = pull_audio_during_freeze;
+    // If |pull_once| is true, GetAudio will be called once half-way through
+    // the network recovery period.
+    double pull_time_ms = (t_ms + next_input_time_ms) / 2;
+    while (next_input_time_ms <= t_ms) {
+      if (pull_once && next_input_time_ms >= pull_time_ms) {
+        pull_once = false;
+        // Pull out data once.
+        ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+        ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+        EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+        t_ms += 10;
+      }
+      // Insert one CNG frame each 100 ms.
+      uint8_t payload[kPayloadBytes];
+      size_t payload_len;
+      RTPHeader rtp_info;
+      PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+      ASSERT_EQ(0, neteq_->InsertPacket(
+                       rtp_info,
+                       rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
+      ++seq_no;
+      timestamp += kCngPeriodSamples;
+      next_input_time_ms += kCngPeriodMs * drift_factor;
+    }
+  }
+
+  // Insert speech again until output type is speech.
+  double speech_restart_time_ms = t_ms;
+  while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
+    // Each turn in this for loop is 10 ms.
+    while (next_input_time_ms <= t_ms) {
+      // Insert one 30 ms speech frame.
+      uint8_t payload[kPayloadBytes] = {0};
+      RTPHeader rtp_info;
+      PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+      ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+      ++seq_no;
+      timestamp += kSamples;
+      next_input_time_ms += kFrameSizeMs * drift_factor;
+    }
+    // Pull out data once.
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+    // Increase clock.
+    t_ms += 10;
+  }
+
+  // Check that the speech starts again within reasonable time.
+  double time_until_speech_returns_ms = t_ms - speech_restart_time_ms;
+  EXPECT_LT(time_until_speech_returns_ms, max_time_to_speech_ms);
+  playout_timestamp = neteq_->GetPlayoutTimestamp();
+  ASSERT_TRUE(playout_timestamp);
+  int32_t delay_after = timestamp - *playout_timestamp;
+  // Compare delay before and after, and make sure it differs less than 20 ms.
+  EXPECT_LE(delay_after, delay_before + delay_tolerance_ms * 16);
+  EXPECT_GE(delay_after, delay_before - delay_tolerance_ms * 16);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDrift) {
+  // Apply a clock drift of -25 ms / s (sender faster than receiver).
+  const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
+  const double kNetworkFreezeTimeMs = 0.0;
+  const bool kGetAudioDuringFreezeRecovery = false;
+  const int kDelayToleranceMs = 20;
+  const int kMaxTimeToSpeechMs = 100;
+  LongCngWithClockDrift(kDriftFactor,
+                        kNetworkFreezeTimeMs,
+                        kGetAudioDuringFreezeRecovery,
+                        kDelayToleranceMs,
+                        kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDrift) {
+  // Apply a clock drift of +25 ms / s (sender slower than receiver).
+  const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
+  const double kNetworkFreezeTimeMs = 0.0;
+  const bool kGetAudioDuringFreezeRecovery = false;
+  const int kDelayToleranceMs = 20;
+  const int kMaxTimeToSpeechMs = 100;
+  LongCngWithClockDrift(kDriftFactor,
+                        kNetworkFreezeTimeMs,
+                        kGetAudioDuringFreezeRecovery,
+                        kDelayToleranceMs,
+                        kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithNegativeClockDriftNetworkFreeze) {
+  // Apply a clock drift of -25 ms / s (sender faster than receiver).
+  const double kDriftFactor = 1000.0 / (1000.0 + 25.0);
+  const double kNetworkFreezeTimeMs = 5000.0;
+  const bool kGetAudioDuringFreezeRecovery = false;
+  const int kDelayToleranceMs = 50;
+  const int kMaxTimeToSpeechMs = 200;
+  LongCngWithClockDrift(kDriftFactor,
+                        kNetworkFreezeTimeMs,
+                        kGetAudioDuringFreezeRecovery,
+                        kDelayToleranceMs,
+                        kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreeze) {
+  // Apply a clock drift of +25 ms / s (sender slower than receiver).
+  const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
+  const double kNetworkFreezeTimeMs = 5000.0;
+  const bool kGetAudioDuringFreezeRecovery = false;
+  const int kDelayToleranceMs = 20;
+  const int kMaxTimeToSpeechMs = 100;
+  LongCngWithClockDrift(kDriftFactor,
+                        kNetworkFreezeTimeMs,
+                        kGetAudioDuringFreezeRecovery,
+                        kDelayToleranceMs,
+                        kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithPositiveClockDriftNetworkFreezeExtraPull) {
+  // Apply a clock drift of +25 ms / s (sender slower than receiver).
+  const double kDriftFactor = 1000.0 / (1000.0 - 25.0);
+  const double kNetworkFreezeTimeMs = 5000.0;
+  const bool kGetAudioDuringFreezeRecovery = true;
+  const int kDelayToleranceMs = 20;
+  const int kMaxTimeToSpeechMs = 100;
+  LongCngWithClockDrift(kDriftFactor,
+                        kNetworkFreezeTimeMs,
+                        kGetAudioDuringFreezeRecovery,
+                        kDelayToleranceMs,
+                        kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, LongCngWithoutClockDrift) {
+  const double kDriftFactor = 1.0;  // No drift.
+  const double kNetworkFreezeTimeMs = 0.0;
+  const bool kGetAudioDuringFreezeRecovery = false;
+  const int kDelayToleranceMs = 10;
+  const int kMaxTimeToSpeechMs = 50;
+  LongCngWithClockDrift(kDriftFactor,
+                        kNetworkFreezeTimeMs,
+                        kGetAudioDuringFreezeRecovery,
+                        kDelayToleranceMs,
+                        kMaxTimeToSpeechMs);
+}
+
+TEST_F(NetEqDecodingTest, UnknownPayloadType) {
+  const size_t kPayloadBytes = 100;
+  uint8_t payload[kPayloadBytes] = {0};
+  RTPHeader rtp_info;
+  PopulateRtpInfo(0, 0, &rtp_info);
+  rtp_info.payloadType = 1;  // Not registered as a decoder.
+  EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload, 0));
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define MAYBE_DecoderError DecoderError
+#else
+#define MAYBE_DecoderError DISABLED_DecoderError
+#endif
+
+TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
+  const size_t kPayloadBytes = 100;
+  uint8_t payload[kPayloadBytes] = {0};
+  RTPHeader rtp_info;
+  PopulateRtpInfo(0, 0, &rtp_info);
+  rtp_info.payloadType = 103;  // iSAC, but the payload is invalid.
+  EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+  // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
+  // to GetAudio.
+  int16_t* out_frame_data = out_frame_.mutable_data();
+  for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
+    out_frame_data[i] = 1;
+  }
+  bool muted;
+  EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
+  ASSERT_FALSE(muted);
+
+  // Verify that the first 160 samples are set to 0.
+  static const int kExpectedOutputLength = 160;  // 10 ms at 16 kHz sample rate.
+  const int16_t* const_out_frame_data = out_frame_.data();
+  for (int i = 0; i < kExpectedOutputLength; ++i) {
+    std::ostringstream ss;
+    ss << "i = " << i;
+    SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
+    EXPECT_EQ(0, const_out_frame_data[i]);
+  }
+}
+
+TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
+  // Set all of |out_data_| to 1, and verify that it was set to 0 by the call
+  // to GetAudio.
+  int16_t* out_frame_data = out_frame_.mutable_data();
+  for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
+    out_frame_data[i] = 1;
+  }
+  bool muted;
+  EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+  ASSERT_FALSE(muted);
+  // Verify that the first block of samples is set to 0.
+  static const int kExpectedOutputLength =
+      kInitSampleRateHz / 100;  // 10 ms at initial sample rate.
+  const int16_t* const_out_frame_data = out_frame_.data();
+  for (int i = 0; i < kExpectedOutputLength; ++i) {
+    std::ostringstream ss;
+    ss << "i = " << i;
+    SCOPED_TRACE(ss.str());  // Print out the parameter values on failure.
+    EXPECT_EQ(0, const_out_frame_data[i]);
+  }
+  // Verify that the sample rate did not change from the initial configuration.
+  EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
+}
+
+class NetEqBgnTest : public NetEqDecodingTest {
+ protected:
+  virtual void TestCondition(double sum_squared_noise,
+                             bool should_be_faded) = 0;
+
+  void CheckBgn(int sampling_rate_hz) {
+    size_t expected_samples_per_channel = 0;
+    uint8_t payload_type = 0xFF;  // Invalid.
+    if (sampling_rate_hz == 8000) {
+      expected_samples_per_channel = kBlockSize8kHz;
+      payload_type = 93;  // PCM 16, 8 kHz.
+    } else if (sampling_rate_hz == 16000) {
+      expected_samples_per_channel = kBlockSize16kHz;
+      payload_type = 94;  // PCM 16, 16 kHZ.
+    } else if (sampling_rate_hz == 32000) {
+      expected_samples_per_channel = kBlockSize32kHz;
+      payload_type = 95;  // PCM 16, 32 kHz.
+    } else {
+      ASSERT_TRUE(false);  // Unsupported test case.
+    }
+
+    AudioFrame output;
+    test::AudioLoop input;
+    // We are using the same 32 kHz input file for all tests, regardless of
+    // |sampling_rate_hz|. The output may sound weird, but the test is still
+    // valid.
+    ASSERT_TRUE(input.Init(
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+        10 * sampling_rate_hz,  // Max 10 seconds loop length.
+        expected_samples_per_channel));
+
+    // Payload of 10 ms of PCM16 32 kHz.
+    uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
+    RTPHeader rtp_info;
+    PopulateRtpInfo(0, 0, &rtp_info);
+    rtp_info.payloadType = payload_type;
+
+    uint32_t receive_timestamp = 0;
+    bool muted;
+    for (int n = 0; n < 10; ++n) {  // Insert few packets and get audio.
+      auto block = input.GetNextBlock();
+      ASSERT_EQ(expected_samples_per_channel, block.size());
+      size_t enc_len_bytes =
+          WebRtcPcm16b_Encode(block.data(), block.size(), payload);
+      ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
+
+      ASSERT_EQ(0, neteq_->InsertPacket(
+                       rtp_info,
+                       rtc::ArrayView<const uint8_t>(payload, enc_len_bytes),
+                       receive_timestamp));
+      output.Reset();
+      ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+      ASSERT_EQ(1u, output.num_channels_);
+      ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
+      ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
+
+      // Next packet.
+      rtp_info.timestamp += rtc::checked_cast<uint32_t>(
+          expected_samples_per_channel);
+      rtp_info.sequenceNumber++;
+      receive_timestamp += rtc::checked_cast<uint32_t>(
+          expected_samples_per_channel);
+    }
+
+    output.Reset();
+
+    // Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
+    // one frame without checking speech-type. This is the first frame pulled
+    // without inserting any packet, and might not be labeled as PLC.
+    ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+    ASSERT_EQ(1u, output.num_channels_);
+    ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
+
+    // To be able to test the fading of background noise we need at lease to
+    // pull 611 frames.
+    const int kFadingThreshold = 611;
+
+    // Test several CNG-to-PLC packet for the expected behavior. The number 20
+    // is arbitrary, but sufficiently large to test enough number of frames.
+    const int kNumPlcToCngTestFrames = 20;
+    bool plc_to_cng = false;
+    for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
+      output.Reset();
+      // Set to non-zero.
+      memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes);
+      ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+      ASSERT_FALSE(muted);
+      ASSERT_EQ(1u, output.num_channels_);
+      ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
+      if (output.speech_type_ == AudioFrame::kPLCCNG) {
+        plc_to_cng = true;
+        double sum_squared = 0;
+        const int16_t* output_data = output.data();
+        for (size_t k = 0;
+             k < output.num_channels_ * output.samples_per_channel_; ++k)
+          sum_squared += output_data[k] * output_data[k];
+        TestCondition(sum_squared, n > kFadingThreshold);
+      } else {
+        EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
+      }
+    }
+    EXPECT_TRUE(plc_to_cng);  // Just to be sure that PLC-to-CNG has occurred.
+  }
+};
+
+class NetEqBgnTestOn : public NetEqBgnTest {
+ protected:
+  NetEqBgnTestOn() : NetEqBgnTest() {
+    config_.background_noise_mode = NetEq::kBgnOn;
+  }
+
+  void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
+    EXPECT_NE(0, sum_squared_noise);
+  }
+};
+
+class NetEqBgnTestOff : public NetEqBgnTest {
+ protected:
+  NetEqBgnTestOff() : NetEqBgnTest() {
+    config_.background_noise_mode = NetEq::kBgnOff;
+  }
+
+  void TestCondition(double sum_squared_noise, bool /*should_be_faded*/) {
+    EXPECT_EQ(0, sum_squared_noise);
+  }
+};
+
+class NetEqBgnTestFade : public NetEqBgnTest {
+ protected:
+  NetEqBgnTestFade() : NetEqBgnTest() {
+    config_.background_noise_mode = NetEq::kBgnFade;
+  }
+
+  void TestCondition(double sum_squared_noise, bool should_be_faded) {
+    if (should_be_faded)
+      EXPECT_EQ(0, sum_squared_noise);
+  }
+};
+
+TEST_F(NetEqBgnTestOn, RunTest) {
+  CheckBgn(8000);
+  CheckBgn(16000);
+  CheckBgn(32000);
+}
+
+TEST_F(NetEqBgnTestOff, RunTest) {
+  CheckBgn(8000);
+  CheckBgn(16000);
+  CheckBgn(32000);
+}
+
+TEST_F(NetEqBgnTestFade, RunTest) {
+  CheckBgn(8000);
+  CheckBgn(16000);
+  CheckBgn(32000);
+}
+
+void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
+                                 uint32_t start_timestamp,
+                                 const std::set<uint16_t>& drop_seq_numbers,
+                                 bool expect_seq_no_wrap,
+                                 bool expect_timestamp_wrap) {
+  uint16_t seq_no = start_seq_no;
+  uint32_t timestamp = start_timestamp;
+  const int kBlocksPerFrame = 3;  // Number of 10 ms blocks per frame.
+  const int kFrameSizeMs = kBlocksPerFrame * kTimeStepMs;
+  const int kSamples = kBlockSize16kHz * kBlocksPerFrame;
+  const size_t kPayloadBytes = kSamples * sizeof(int16_t);
+  double next_input_time_ms = 0.0;
+  uint32_t receive_timestamp = 0;
+
+  // Insert speech for 2 seconds.
+  const int kSpeechDurationMs = 2000;
+  int packets_inserted = 0;
+  uint16_t last_seq_no;
+  uint32_t last_timestamp;
+  bool timestamp_wrapped = false;
+  bool seq_no_wrapped = false;
+  for (double t_ms = 0; t_ms < kSpeechDurationMs; t_ms += 10) {
+    // Each turn in this for loop is 10 ms.
+    while (next_input_time_ms <= t_ms) {
+      // Insert one 30 ms speech frame.
+      uint8_t payload[kPayloadBytes] = {0};
+      RTPHeader rtp_info;
+      PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+      if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
+        // This sequence number was not in the set to drop. Insert it.
+        ASSERT_EQ(0,
+                  neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
+        ++packets_inserted;
+      }
+      NetEqNetworkStatistics network_stats;
+      ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
+
+      // Due to internal NetEq logic, preferred buffer-size is about 4 times the
+      // packet size for first few packets. Therefore we refrain from checking
+      // the criteria.
+      if (packets_inserted > 4) {
+        // Expect preferred and actual buffer size to be no more than 2 frames.
+        EXPECT_LE(network_stats.preferred_buffer_size_ms, kFrameSizeMs * 2);
+        EXPECT_LE(network_stats.current_buffer_size_ms, kFrameSizeMs * 2 +
+                  algorithmic_delay_ms_);
+      }
+      last_seq_no = seq_no;
+      last_timestamp = timestamp;
+
+      ++seq_no;
+      timestamp += kSamples;
+      receive_timestamp += kSamples;
+      next_input_time_ms += static_cast<double>(kFrameSizeMs);
+
+      seq_no_wrapped |= seq_no < last_seq_no;
+      timestamp_wrapped |= timestamp < last_timestamp;
+    }
+    // Pull out data once.
+    AudioFrame output;
+    bool muted;
+    ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+    ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
+    ASSERT_EQ(1u, output.num_channels_);
+
+    // Expect delay (in samples) to be less than 2 packets.
+    rtc::Optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
+    ASSERT_TRUE(playout_timestamp);
+    EXPECT_LE(timestamp - *playout_timestamp,
+              static_cast<uint32_t>(kSamples * 2));
+  }
+  // Make sure we have actually tested wrap-around.
+  ASSERT_EQ(expect_seq_no_wrap, seq_no_wrapped);
+  ASSERT_EQ(expect_timestamp_wrap, timestamp_wrapped);
+}
+
+TEST_F(NetEqDecodingTest, SequenceNumberWrap) {
+  // Start with a sequence number that will soon wrap.
+  std::set<uint16_t> drop_seq_numbers;  // Don't drop any packets.
+  WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
+}
+
+TEST_F(NetEqDecodingTest, SequenceNumberWrapAndDrop) {
+  // Start with a sequence number that will soon wrap.
+  std::set<uint16_t> drop_seq_numbers;
+  drop_seq_numbers.insert(0xFFFF);
+  drop_seq_numbers.insert(0x0);
+  WrapTest(0xFFFF - 10, 0, drop_seq_numbers, true, false);
+}
+
+TEST_F(NetEqDecodingTest, TimestampWrap) {
+  // Start with a timestamp that will soon wrap.
+  std::set<uint16_t> drop_seq_numbers;
+  WrapTest(0, 0xFFFFFFFF - 3000, drop_seq_numbers, false, true);
+}
+
+TEST_F(NetEqDecodingTest, TimestampAndSequenceNumberWrap) {
+  // Start with a timestamp and a sequence number that will wrap at the same
+  // time.
+  std::set<uint16_t> drop_seq_numbers;
+  WrapTest(0xFFFF - 10, 0xFFFFFFFF - 5000, drop_seq_numbers, true, true);
+}
+
+void NetEqDecodingTest::DuplicateCng() {
+  uint16_t seq_no = 0;
+  uint32_t timestamp = 0;
+  const int kFrameSizeMs = 10;
+  const int kSampleRateKhz = 16;
+  const int kSamples = kFrameSizeMs * kSampleRateKhz;
+  const size_t kPayloadBytes = kSamples * 2;
+
+  const int algorithmic_delay_samples = std::max(
+      algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
+  // Insert three speech packets. Three are needed to get the frame length
+  // correct.
+  uint8_t payload[kPayloadBytes] = {0};
+  RTPHeader rtp_info;
+  bool muted;
+  for (int i = 0; i < 3; ++i) {
+    PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+    ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+    ++seq_no;
+    timestamp += kSamples;
+
+    // Pull audio once.
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  }
+  // Verify speech output.
+  EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+
+  // Insert same CNG packet twice.
+  const int kCngPeriodMs = 100;
+  const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
+  size_t payload_len;
+  PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+  // This is the first time this CNG packet is inserted.
+  ASSERT_EQ(
+      0, neteq_->InsertPacket(
+             rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
+
+  // Pull audio once and make sure CNG is played.
+  ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+  ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+  EXPECT_FALSE(
+      neteq_->GetPlayoutTimestamp());  // Returns empty value during CNG.
+  EXPECT_EQ(timestamp - algorithmic_delay_samples,
+            out_frame_.timestamp_ + out_frame_.samples_per_channel_);
+
+  // Insert the same CNG packet again. Note that at this point it is old, since
+  // we have already decoded the first copy of it.
+  ASSERT_EQ(
+      0, neteq_->InsertPacket(
+             rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
+
+  // Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
+  // we have already pulled out CNG once.
+  for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+    EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+    EXPECT_FALSE(
+        neteq_->GetPlayoutTimestamp());  // Returns empty value during CNG.
+    EXPECT_EQ(timestamp - algorithmic_delay_samples,
+              out_frame_.timestamp_ + out_frame_.samples_per_channel_);
+  }
+
+  // Insert speech again.
+  ++seq_no;
+  timestamp += kCngPeriodSamples;
+  PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+  ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+
+  // Pull audio once and verify that the output is speech again.
+  ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+  ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+  rtc::Optional<uint32_t> playout_timestamp = neteq_->GetPlayoutTimestamp();
+  ASSERT_TRUE(playout_timestamp);
+  EXPECT_EQ(timestamp + kSamples - algorithmic_delay_samples,
+            *playout_timestamp);
+}
+
+TEST_F(NetEqDecodingTest, DiscardDuplicateCng) { DuplicateCng(); }
+
+TEST_F(NetEqDecodingTest, CngFirst) {
+  uint16_t seq_no = 0;
+  uint32_t timestamp = 0;
+  const int kFrameSizeMs = 10;
+  const int kSampleRateKhz = 16;
+  const int kSamples = kFrameSizeMs * kSampleRateKhz;
+  const int kPayloadBytes = kSamples * 2;
+  const int kCngPeriodMs = 100;
+  const int kCngPeriodSamples = kCngPeriodMs * kSampleRateKhz;
+  size_t payload_len;
+
+  uint8_t payload[kPayloadBytes] = {0};
+  RTPHeader rtp_info;
+
+  PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
+  ASSERT_EQ(
+      NetEq::kOK,
+      neteq_->InsertPacket(
+          rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
+  ++seq_no;
+  timestamp += kCngPeriodSamples;
+
+  // Pull audio once and make sure CNG is played.
+  bool muted;
+  ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+  ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+
+  // Insert some speech packets.
+  const uint32_t first_speech_timestamp = timestamp;
+  int timeout_counter = 0;
+  do {
+    ASSERT_LT(timeout_counter++, 20) << "Test timed out";
+    PopulateRtpInfo(seq_no, timestamp, &rtp_info);
+    ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+    ++seq_no;
+    timestamp += kSamples;
+
+    // Pull audio once.
+    ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
+  } while (!IsNewerTimestamp(out_frame_.timestamp_, first_speech_timestamp));
+  // Verify speech output.
+  EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+}
+
+class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
+ public:
+  NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
+    config_.enable_muted_state = true;
+  }
+
+ protected:
+  static constexpr size_t kSamples = 10 * 16;
+  static constexpr size_t kPayloadBytes = kSamples * 2;
+
+  void InsertPacket(uint32_t rtp_timestamp) {
+    uint8_t payload[kPayloadBytes] = {0};
+    RTPHeader rtp_info;
+    PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
+    EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+  }
+
+  void InsertCngPacket(uint32_t rtp_timestamp) {
+    uint8_t payload[kPayloadBytes] = {0};
+    RTPHeader rtp_info;
+    size_t payload_len;
+    PopulateCng(0, rtp_timestamp, &rtp_info, payload, &payload_len);
+    EXPECT_EQ(
+        NetEq::kOK,
+        neteq_->InsertPacket(
+            rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
+  }
+
+  bool GetAudioReturnMuted() {
+    bool muted;
+    EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    return muted;
+  }
+
+  void GetAudioUntilMuted() {
+    while (!GetAudioReturnMuted()) {
+      ASSERT_LT(counter_++, 1000) << "Test timed out";
+    }
+  }
+
+  void GetAudioUntilNormal() {
+    bool muted = false;
+    while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
+      EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+      ASSERT_LT(counter_++, 1000) << "Test timed out";
+    }
+    EXPECT_FALSE(muted);
+  }
+
+  int counter_ = 0;
+};
+
+// Verifies that NetEq goes in and out of muted state as expected.
+TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
+  // Insert one speech packet.
+  InsertPacket(0);
+  // Pull out audio once and expect it not to be muted.
+  EXPECT_FALSE(GetAudioReturnMuted());
+  // Pull data until faded out.
+  GetAudioUntilMuted();
+  EXPECT_TRUE(out_frame_.muted());
+
+  // Verify that output audio is not written during muted mode. Other parameters
+  // should be correct, though.
+  AudioFrame new_frame;
+  int16_t* frame_data = new_frame.mutable_data();
+  for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
+    frame_data[i] = 17;
+  }
+  bool muted;
+  EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
+  EXPECT_TRUE(muted);
+  EXPECT_TRUE(out_frame_.muted());
+  for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; i++) {
+    EXPECT_EQ(17, frame_data[i]);
+  }
+  EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
+            new_frame.timestamp_);
+  EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
+  EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
+  EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
+  EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
+  EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
+
+  // Insert new data. Timestamp is corrected for the time elapsed since the last
+  // packet. Verify that normal operation resumes.
+  InsertPacket(kSamples * counter_);
+  GetAudioUntilNormal();
+  EXPECT_FALSE(out_frame_.muted());
+
+  NetEqNetworkStatistics stats;
+  EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+  // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
+  // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
+  // concealment samples in this test.
+  EXPECT_GT(stats.expand_rate, 14000);
+  // And, it should be greater than the speech_expand_rate.
+  EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
+}
+
+// Verifies that NetEq goes out of muted state when given a delayed packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
+  // Insert one speech packet.
+  InsertPacket(0);
+  // Pull out audio once and expect it not to be muted.
+  EXPECT_FALSE(GetAudioReturnMuted());
+  // Pull data until faded out.
+  GetAudioUntilMuted();
+  // Insert new data. Timestamp is only corrected for the half of the time
+  // elapsed since the last packet. That is, the new packet is delayed. Verify
+  // that normal operation resumes.
+  InsertPacket(kSamples * counter_ / 2);
+  GetAudioUntilNormal();
+}
+
+// Verifies that NetEq goes out of muted state when given a future packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
+  // Insert one speech packet.
+  InsertPacket(0);
+  // Pull out audio once and expect it not to be muted.
+  EXPECT_FALSE(GetAudioReturnMuted());
+  // Pull data until faded out.
+  GetAudioUntilMuted();
+  // Insert new data. Timestamp is over-corrected for the time elapsed since the
+  // last packet. That is, the new packet is too early. Verify that normal
+  // operation resumes.
+  InsertPacket(kSamples * counter_ * 2);
+  GetAudioUntilNormal();
+}
+
+// Verifies that NetEq goes out of muted state when given an old packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
+  // Insert one speech packet.
+  InsertPacket(0);
+  // Pull out audio once and expect it not to be muted.
+  EXPECT_FALSE(GetAudioReturnMuted());
+  // Pull data until faded out.
+  GetAudioUntilMuted();
+
+  EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+  // Insert packet which is older than the first packet.
+  InsertPacket(kSamples * (counter_ - 1000));
+  EXPECT_FALSE(GetAudioReturnMuted());
+  EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+}
+
+// Verifies that NetEq doesn't enter muted state when CNG mode is active and the
+// packet stream is suspended for a long time.
+TEST_F(NetEqDecodingTestWithMutedState, DoNotMuteExtendedCngWithoutPackets) {
+  // Insert one CNG packet.
+  InsertCngPacket(0);
+
+  // Pull 10 seconds of audio (10 ms audio generated per lap).
+  for (int i = 0; i < 1000; ++i) {
+    bool muted;
+    EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+    ASSERT_FALSE(muted);
+  }
+  EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
+}
+
+// Verifies that NetEq goes back to normal after a long CNG period with the
+// packet stream suspended.
+TEST_F(NetEqDecodingTestWithMutedState, RecoverAfterExtendedCngWithoutPackets) {
+  // Insert one CNG packet.
+  InsertCngPacket(0);
+
+  // Pull 10 seconds of audio (10 ms audio generated per lap).
+  for (int i = 0; i < 1000; ++i) {
+    bool muted;
+    EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+  }
+
+  // Insert new data. Timestamp is corrected for the time elapsed since the last
+  // packet. Verify that normal operation resumes.
+  InsertPacket(kSamples * counter_);
+  GetAudioUntilNormal();
+}
+
+class NetEqDecodingTestTwoInstances : public NetEqDecodingTest {
+ public:
+  NetEqDecodingTestTwoInstances() : NetEqDecodingTest() {}
+
+  void SetUp() override {
+    NetEqDecodingTest::SetUp();
+    config2_ = config_;
+  }
+
+  void CreateSecondInstance() {
+    neteq2_.reset(NetEq::Create(config2_, CreateBuiltinAudioDecoderFactory()));
+    ASSERT_TRUE(neteq2_);
+    LoadDecoders(neteq2_.get());
+  }
+
+ protected:
+  std::unique_ptr<NetEq> neteq2_;
+  NetEq::Config config2_;
+};
+
+namespace {
+::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
+                                                      const AudioFrame& b) {
+  if (a.timestamp_ != b.timestamp_)
+    return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
+                                         << " != " << b.timestamp_ << ")";
+  if (a.sample_rate_hz_ != b.sample_rate_hz_)
+    return ::testing::AssertionFailure() << "sample_rate_hz_ diff ("
+                                         << a.sample_rate_hz_
+                                         << " != " << b.sample_rate_hz_ << ")";
+  if (a.samples_per_channel_ != b.samples_per_channel_)
+    return ::testing::AssertionFailure()
+           << "samples_per_channel_ diff (" << a.samples_per_channel_
+           << " != " << b.samples_per_channel_ << ")";
+  if (a.num_channels_ != b.num_channels_)
+    return ::testing::AssertionFailure() << "num_channels_ diff ("
+                                         << a.num_channels_
+                                         << " != " << b.num_channels_ << ")";
+  if (a.speech_type_ != b.speech_type_)
+    return ::testing::AssertionFailure() << "speech_type_ diff ("
+                                         << a.speech_type_
+                                         << " != " << b.speech_type_ << ")";
+  if (a.vad_activity_ != b.vad_activity_)
+    return ::testing::AssertionFailure() << "vad_activity_ diff ("
+                                         << a.vad_activity_
+                                         << " != " << b.vad_activity_ << ")";
+  return ::testing::AssertionSuccess();
+}
+
+::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
+                                            const AudioFrame& b) {
+  ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
+  if (!res)
+    return res;
+  if (memcmp(
+      a.data(), b.data(),
+      a.samples_per_channel_ * a.num_channels_ * sizeof(*a.data())) != 0) {
+    return ::testing::AssertionFailure() << "data_ diff";
+  }
+  return ::testing::AssertionSuccess();
+}
+
+}  // namespace
+
+TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
+  ASSERT_FALSE(config_.enable_muted_state);
+  config2_.enable_muted_state = true;
+  CreateSecondInstance();
+
+  // Insert one speech packet into both NetEqs.
+  const size_t kSamples = 10 * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  uint8_t payload[kPayloadBytes] = {0};
+  RTPHeader rtp_info;
+  PopulateRtpInfo(0, 0, &rtp_info);
+  EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+  EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
+
+  AudioFrame out_frame1, out_frame2;
+  bool muted;
+  for (int i = 0; i < 1000; ++i) {
+    std::ostringstream ss;
+    ss << "i = " << i;
+    SCOPED_TRACE(ss.str());  // Print out the loop iterator on failure.
+    EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
+    EXPECT_FALSE(muted);
+    EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
+    if (muted) {
+      EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
+    } else {
+      EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
+    }
+  }
+  EXPECT_TRUE(muted);
+
+  // Insert new data. Timestamp is corrected for the time elapsed since the last
+  // packet.
+  PopulateRtpInfo(0, kSamples * 1000, &rtp_info);
+  EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+  EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
+
+  int counter = 0;
+  while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
+    ASSERT_LT(counter++, 1000) << "Test timed out";
+    std::ostringstream ss;
+    ss << "counter = " << counter;
+    SCOPED_TRACE(ss.str());  // Print out the loop iterator on failure.
+    EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
+    EXPECT_FALSE(muted);
+    EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
+    if (muted) {
+      EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
+    } else {
+      EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
+    }
+  }
+  EXPECT_FALSE(muted);
+}
+
+TEST_F(NetEqDecodingTest, LastDecodedTimestampsEmpty) {
+  EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
+
+  // Pull out data once.
+  AudioFrame output;
+  bool muted;
+  ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+
+  EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
+}
+
+TEST_F(NetEqDecodingTest, LastDecodedTimestampsOneDecoded) {
+  // Insert one packet with PCM16b WB data (this is what PopulateRtpInfo does by
+  // default). Make the length 10 ms.
+  constexpr size_t kPayloadSamples = 16 * 10;
+  constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
+  uint8_t payload[kPayloadBytes] = {0};
+
+  RTPHeader rtp_info;
+  constexpr uint32_t kRtpTimestamp = 0x1234;
+  PopulateRtpInfo(0, kRtpTimestamp, &rtp_info);
+  EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+
+  // Pull out data once.
+  AudioFrame output;
+  bool muted;
+  ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+
+  EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp}),
+            neteq_->LastDecodedTimestamps());
+
+  // Nothing decoded on the second call.
+  ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+  EXPECT_TRUE(neteq_->LastDecodedTimestamps().empty());
+}
+
+TEST_F(NetEqDecodingTest, LastDecodedTimestampsTwoDecoded) {
+  // Insert two packets with PCM16b WB data (this is what PopulateRtpInfo does
+  // by default). Make the length 5 ms so that NetEq must decode them both in
+  // the same GetAudio call.
+  constexpr size_t kPayloadSamples = 16 * 5;
+  constexpr size_t kPayloadBytes = 2 * kPayloadSamples;
+  uint8_t payload[kPayloadBytes] = {0};
+
+  RTPHeader rtp_info;
+  constexpr uint32_t kRtpTimestamp1 = 0x1234;
+  PopulateRtpInfo(0, kRtpTimestamp1, &rtp_info);
+  EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+  constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp1 + kPayloadSamples;
+  PopulateRtpInfo(1, kRtpTimestamp2, &rtp_info);
+  EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+
+  // Pull out data once.
+  AudioFrame output;
+  bool muted;
+  ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+
+  EXPECT_EQ(std::vector<uint32_t>({kRtpTimestamp1, kRtpTimestamp2}),
+            neteq_->LastDecodedTimestamps());
+}
+
+TEST_F(NetEqDecodingTest, TestConcealmentEvents) {
+  const int kNumConcealmentEvents = 19;
+  const size_t kSamples = 10 * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  int seq_no = 0;
+  RTPHeader rtp_info;
+  rtp_info.ssrc = 0x1234;     // Just an arbitrary SSRC.
+  rtp_info.payloadType = 94;  // PCM16b WB codec.
+  rtp_info.markerBit = 0;
+  const uint8_t payload[kPayloadBytes] = {0};
+  bool muted;
+
+  for (int i = 0; i < kNumConcealmentEvents; i++) {
+    // Insert some packets of 10 ms size.
+    for (int j = 0; j < 10; j++) {
+      rtp_info.sequenceNumber = seq_no++;
+      rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
+      neteq_->InsertPacket(rtp_info, payload, 0);
+      neteq_->GetAudio(&out_frame_, &muted);
+    }
+
+    // Lose a number of packets.
+    int num_lost = 1 + i;
+    for (int j = 0; j < num_lost; j++) {
+      seq_no++;
+      neteq_->GetAudio(&out_frame_, &muted);
+    }
+  }
+
+  // Check number of concealment events.
+  NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
+  EXPECT_EQ(kNumConcealmentEvents, static_cast<int>(stats.concealment_events));
+}
+
+// Test that the jitter buffer delay stat is computed correctly.
+void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
+  const int kNumPackets = 10;
+  const int kDelayInNumPackets = 2;
+  const int kPacketLenMs = 10;  // All packets are of 10 ms size.
+  const size_t kSamples = kPacketLenMs * 16;
+  const size_t kPayloadBytes = kSamples * 2;
+  RTPHeader rtp_info;
+  rtp_info.ssrc = 0x1234;     // Just an arbitrary SSRC.
+  rtp_info.payloadType = 94;  // PCM16b WB codec.
+  rtp_info.markerBit = 0;
+  const uint8_t payload[kPayloadBytes] = {0};
+  bool muted;
+  int packets_sent = 0;
+  int packets_received = 0;
+  int expected_delay = 0;
+  while (packets_received < kNumPackets) {
+    // Insert packet.
+    if (packets_sent < kNumPackets) {
+      rtp_info.sequenceNumber = packets_sent++;
+      rtp_info.timestamp = rtp_info.sequenceNumber * kSamples;
+      neteq_->InsertPacket(rtp_info, payload, 0);
+    }
+
+    // Get packet.
+    if (packets_sent > kDelayInNumPackets) {
+      neteq_->GetAudio(&out_frame_, &muted);
+      packets_received++;
+
+      // The delay reported by the jitter buffer never exceeds
+      // the number of samples previously fetched with GetAudio
+      // (hence the min()).
+      int packets_delay = std::min(packets_received, kDelayInNumPackets + 1);
+
+      // The increase of the expected delay is the product of
+      // the current delay of the jitter buffer in ms * the
+      // number of samples that are sent for play out.
+      int current_delay_ms = packets_delay * kPacketLenMs;
+      expected_delay += current_delay_ms * kSamples;
+    }
+  }
+
+  if (apply_packet_loss) {
+    // Extra call to GetAudio to cause concealment.
+    neteq_->GetAudio(&out_frame_, &muted);
+  }
+
+  // Check jitter buffer delay.
+  NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
+  EXPECT_EQ(expected_delay, static_cast<int>(stats.jitter_buffer_delay_ms));
+}
+
+TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithoutLoss) {
+  TestJitterBufferDelay(false);
+}
+
+TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithLoss) {
+  TestJitterBufferDelay(true);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/neteq_unittest.proto b/modules/audio_coding/neteq/neteq_unittest.proto
new file mode 100644
index 0000000..344dcf6
--- /dev/null
+++ b/modules/audio_coding/neteq/neteq_unittest.proto
@@ -0,0 +1,32 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.neteq_unittest;
+
+message NetEqNetworkStatistics {
+  // Next field number 18.
+  optional uint32 current_buffer_size_ms = 1;
+  optional uint32 preferred_buffer_size_ms = 2;
+  optional uint32 jitter_peaks_found = 3;
+  optional uint32 packet_loss_rate = 4;
+  optional uint32 packet_discard_rate = 5 [deprecated = true];
+  optional uint32 expand_rate = 6;
+  optional uint32 speech_expand_rate = 7;
+  optional uint32 preemptive_rate = 8;
+  optional uint32 accelerate_rate = 9;
+  optional uint32 secondary_decoded_rate = 10;
+  optional uint32 secondary_discarded_rate = 17;
+  optional int32 clockdrift_ppm = 11;
+  optional uint64 added_zero_samples = 12;
+  optional int32 mean_waiting_time_ms = 13;
+  optional int32 median_waiting_time_ms = 14;
+  optional int32 min_waiting_time_ms = 15;
+  optional int32 max_waiting_time_ms = 16;
+}
+
+message RtcpStatistics {
+  optional uint32 fraction_lost = 1;
+  optional uint32 cumulative_lost = 2;
+  optional uint32 extended_max_sequence_number = 3;
+  optional uint32 jitter = 4;
+}
+
diff --git a/modules/audio_coding/neteq/normal.cc b/modules/audio_coding/neteq/normal.cc
new file mode 100644
index 0000000..48d723a
--- /dev/null
+++ b/modules/audio_coding/neteq/normal.cc
@@ -0,0 +1,216 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/normal.h"
+
+#include <string.h>  // memset, memcpy
+
+#include <algorithm>  // min
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+int Normal::Process(const int16_t* input,
+                    size_t length,
+                    Modes last_mode,
+                    int16_t* external_mute_factor_array,
+                    AudioMultiVector* output) {
+  if (length == 0) {
+    // Nothing to process.
+    output->Clear();
+    return static_cast<int>(length);
+  }
+
+  RTC_DCHECK(output->Empty());
+  // Output should be empty at this point.
+  if (length % output->Channels() != 0) {
+    // The length does not match the number of channels.
+    output->Clear();
+    return 0;
+  }
+  output->PushBackInterleaved(input, length);
+
+  const int fs_mult = fs_hz_ / 8000;
+  RTC_DCHECK_GT(fs_mult, 0);
+  // fs_shift = log2(fs_mult), rounded down.
+  // Note that |fs_shift| is not "exact" for 48 kHz.
+  // TODO(hlundin): Investigate this further.
+  const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
+
+  // Check if last RecOut call resulted in an Expand. If so, we have to take
+  // care of some cross-fading and unmuting.
+  if (last_mode == kModeExpand) {
+    // Generate interpolation data using Expand.
+    // First, set Expand parameters to appropriate values.
+    expand_->SetParametersForNormalAfterExpand();
+
+    // Call Expand.
+    AudioMultiVector expanded(output->Channels());
+    expand_->Process(&expanded);
+    expand_->Reset();
+
+    size_t length_per_channel = length / output->Channels();
+    std::unique_ptr<int16_t[]> signal(new int16_t[length_per_channel]);
+    for (size_t channel_ix = 0; channel_ix < output->Channels(); ++channel_ix) {
+      // Adjust muting factor (main muting factor times expand muting factor).
+      external_mute_factor_array[channel_ix] = static_cast<int16_t>(
+          (external_mute_factor_array[channel_ix] *
+          expand_->MuteFactor(channel_ix)) >> 14);
+
+      (*output)[channel_ix].CopyTo(length_per_channel, 0, signal.get());
+
+      // Find largest absolute value in new data.
+      int16_t decoded_max =
+          WebRtcSpl_MaxAbsValueW16(signal.get(), length_per_channel);
+      // Adjust muting factor if needed (to BGN level).
+      size_t energy_length =
+          std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
+      int scaling = 6 + fs_shift
+          - WebRtcSpl_NormW32(decoded_max * decoded_max);
+      scaling = std::max(scaling, 0);  // |scaling| should always be >= 0.
+      int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(),
+                                                     energy_length, scaling);
+      int32_t scaled_energy_length =
+          static_cast<int32_t>(energy_length >> scaling);
+      if (scaled_energy_length > 0) {
+        energy = energy / scaled_energy_length;
+      } else {
+        energy = 0;
+      }
+
+      int mute_factor;
+      if ((energy != 0) &&
+          (energy > background_noise_.Energy(channel_ix))) {
+        // Normalize new frame energy to 15 bits.
+        scaling = WebRtcSpl_NormW32(energy) - 16;
+        // We want background_noise_.energy() / energy in Q14.
+        int32_t bgn_energy = WEBRTC_SPL_SHIFT_W32(
+            background_noise_.Energy(channel_ix), scaling + 14);
+        int16_t energy_scaled =
+            static_cast<int16_t>(WEBRTC_SPL_SHIFT_W32(energy, scaling));
+        int32_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled);
+        mute_factor = WebRtcSpl_SqrtFloor(ratio << 14);
+      } else {
+        mute_factor = 16384;  // 1.0 in Q14.
+      }
+      if (mute_factor > external_mute_factor_array[channel_ix]) {
+        external_mute_factor_array[channel_ix] =
+            static_cast<int16_t>(std::min(mute_factor, 16384));
+      }
+
+      // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
+      int increment = 64 / fs_mult;
+      for (size_t i = 0; i < length_per_channel; i++) {
+        // Scale with mute factor.
+        RTC_DCHECK_LT(channel_ix, output->Channels());
+        RTC_DCHECK_LT(i, output->Size());
+        int32_t scaled_signal = (*output)[channel_ix][i] *
+            external_mute_factor_array[channel_ix];
+        // Shift 14 with proper rounding.
+        (*output)[channel_ix][i] =
+            static_cast<int16_t>((scaled_signal + 8192) >> 14);
+        // Increase mute_factor towards 16384.
+        external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min(
+            external_mute_factor_array[channel_ix] + increment, 16384));
+      }
+
+      // Interpolate the expanded data into the new vector.
+      // (NB/WB/SWB32/SWB48 8/16/32/48 samples.)
+      size_t win_length = samples_per_ms_;
+      int16_t win_slope_Q14 = default_win_slope_Q14_;
+      RTC_DCHECK_LT(channel_ix, output->Channels());
+      if (win_length > output->Size()) {
+        win_length = output->Size();
+        win_slope_Q14 = (1 << 14) / static_cast<int16_t>(win_length);
+      }
+      int16_t win_up_Q14 = 0;
+      for (size_t i = 0; i < win_length; i++) {
+        win_up_Q14 += win_slope_Q14;
+        (*output)[channel_ix][i] =
+            (win_up_Q14 * (*output)[channel_ix][i] +
+             ((1 << 14) - win_up_Q14) * expanded[channel_ix][i] + (1 << 13)) >>
+            14;
+      }
+      RTC_DCHECK_GT(win_up_Q14,
+                    (1 << 14) - 32);  // Worst case rouding is a length of 34
+    }
+  } else if (last_mode == kModeRfc3389Cng) {
+    RTC_DCHECK_EQ(output->Channels(), 1);  // Not adapted for multi-channel yet.
+    static const size_t kCngLength = 48;
+    RTC_DCHECK_LE(8 * fs_mult, kCngLength);
+    int16_t cng_output[kCngLength];
+    // Reset mute factor and start up fresh.
+    external_mute_factor_array[0] = 16384;
+    ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+
+    if (cng_decoder) {
+      // Generate long enough for 48kHz.
+      if (!cng_decoder->Generate(cng_output, 0)) {
+        // Error returned; set return vector to all zeros.
+        memset(cng_output, 0, sizeof(cng_output));
+      }
+    } else {
+      // If no CNG instance is defined, just copy from the decoded data.
+      // (This will result in interpolating the decoded with itself.)
+      (*output)[0].CopyTo(fs_mult * 8, 0, cng_output);
+    }
+    // Interpolate the CNG into the new vector.
+    // (NB/WB/SWB32/SWB48 8/16/32/48 samples.)
+    size_t win_length = samples_per_ms_;
+    int16_t win_slope_Q14 = default_win_slope_Q14_;
+    if (win_length > kCngLength) {
+      win_length = kCngLength;
+      win_slope_Q14 = (1 << 14) / static_cast<int16_t>(win_length);
+    }
+    int16_t win_up_Q14 = 0;
+    for (size_t i = 0; i < win_length; i++) {
+      win_up_Q14 += win_slope_Q14;
+      (*output)[0][i] =
+          (win_up_Q14 * (*output)[0][i] +
+           ((1 << 14) - win_up_Q14) * cng_output[i] + (1 << 13)) >>
+          14;
+    }
+    RTC_DCHECK_GT(win_up_Q14,
+                  (1 << 14) - 32);  // Worst case rouding is a length of 34
+  } else if (external_mute_factor_array[0] < 16384) {
+    // Previous was neither of Expand, FadeToBGN or RFC3389_CNG, but we are
+    // still ramping up from previous muting.
+    // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
+    int increment = 64 / fs_mult;
+    size_t length_per_channel = length / output->Channels();
+    for (size_t i = 0; i < length_per_channel; i++) {
+      for (size_t channel_ix = 0; channel_ix < output->Channels();
+          ++channel_ix) {
+        // Scale with mute factor.
+        RTC_DCHECK_LT(channel_ix, output->Channels());
+        RTC_DCHECK_LT(i, output->Size());
+        int32_t scaled_signal = (*output)[channel_ix][i] *
+            external_mute_factor_array[channel_ix];
+        // Shift 14 with proper rounding.
+        (*output)[channel_ix][i] =
+            static_cast<int16_t>((scaled_signal + 8192) >> 14);
+        // Increase mute_factor towards 16384.
+        external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min(
+            16384, external_mute_factor_array[channel_ix] + increment));
+      }
+    }
+  }
+
+  return static_cast<int>(length);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/normal.h b/modules/audio_coding/neteq/normal.h
new file mode 100644
index 0000000..ab02217
--- /dev/null
+++ b/modules/audio_coding/neteq/normal.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_NORMAL_H_
+#define MODULES_AUDIO_CODING_NETEQ_NORMAL_H_
+
+#include <string.h>  // Access to size_t.
+
+#include <vector>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/defines.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class BackgroundNoise;
+class DecoderDatabase;
+class Expand;
+
+// This class provides the "Normal" DSP operation, that is performed when
+// there is no data loss, no need to stretch the timing of the signal, and
+// no other "special circumstances" are at hand.
+class Normal {
+ public:
+  Normal(int fs_hz,
+         DecoderDatabase* decoder_database,
+         const BackgroundNoise& background_noise,
+         Expand* expand)
+      : fs_hz_(fs_hz),
+        decoder_database_(decoder_database),
+        background_noise_(background_noise),
+        expand_(expand),
+        samples_per_ms_(rtc::CheckedDivExact(fs_hz_, 1000)),
+        default_win_slope_Q14_(
+            rtc::dchecked_cast<uint16_t>((1 << 14) / samples_per_ms_)) {}
+
+  virtual ~Normal() {}
+
+  // Performs the "Normal" operation. The decoder data is supplied in |input|,
+  // having |length| samples in total for all channels (interleaved). The
+  // result is written to |output|. The number of channels allocated in
+  // |output| defines the number of channels that will be used when
+  // de-interleaving |input|. |last_mode| contains the mode used in the previous
+  // GetAudio call (i.e., not the current one), and |external_mute_factor| is
+  // a pointer to the mute factor in the NetEqImpl class.
+  int Process(const int16_t* input, size_t length,
+              Modes last_mode,
+              int16_t* external_mute_factor_array,
+              AudioMultiVector* output);
+
+ private:
+  int fs_hz_;
+  DecoderDatabase* decoder_database_;
+  const BackgroundNoise& background_noise_;
+  Expand* expand_;
+  const size_t samples_per_ms_;
+  const int16_t default_win_slope_Q14_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Normal);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_NORMAL_H_
diff --git a/modules/audio_coding/neteq/normal_unittest.cc b/modules/audio_coding/neteq/normal_unittest.cc
new file mode 100644
index 0000000..b0655d9
--- /dev/null
+++ b/modules/audio_coding/neteq/normal_unittest.cc
@@ -0,0 +1,176 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Normal class.
+
+#include "modules/audio_coding/neteq/normal.h"
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/expand.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/mock/mock_expand.h"
+#include "modules/audio_coding/neteq/random_vector.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+namespace {
+
+int ExpandProcess120ms(AudioMultiVector* output) {
+  AudioMultiVector dummy_audio(1, 11520u);
+  dummy_audio.CopyTo(output);
+  return 0;
+}
+
+} // namespace
+
+TEST(Normal, CreateAndDestroy) {
+  MockDecoderDatabase db;
+  int fs = 8000;
+  size_t channels = 1;
+  BackgroundNoise bgn(channels);
+  SyncBuffer sync_buffer(1, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
+  Normal normal(fs, &db, bgn, &expand);
+  EXPECT_CALL(db, Die());  // Called when |db| goes out of scope.
+}
+
+TEST(Normal, AvoidDivideByZero) {
+  WebRtcSpl_Init();
+  MockDecoderDatabase db;
+  int fs = 8000;
+  size_t channels = 1;
+  BackgroundNoise bgn(channels);
+  SyncBuffer sync_buffer(1, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs,
+                    channels);
+  Normal normal(fs, &db, bgn, &expand);
+
+  int16_t input[1000] = {0};
+  std::unique_ptr<int16_t[]> mute_factor_array(new int16_t[channels]);
+  for (size_t i = 0; i < channels; ++i) {
+    mute_factor_array[i] = 16384;
+  }
+  AudioMultiVector output(channels);
+
+  // Zero input length.
+  EXPECT_EQ(
+      0,
+      normal.Process(input, 0, kModeExpand, mute_factor_array.get(), &output));
+  EXPECT_EQ(0u, output.Size());
+
+  // Try to make energy_length >> scaling = 0;
+  EXPECT_CALL(expand, SetParametersForNormalAfterExpand());
+  EXPECT_CALL(expand, Process(_));
+  EXPECT_CALL(expand, Reset());
+  // If input_size_samples < 64, then energy_length in Normal::Process() will
+  // be equal to input_size_samples. Since the input is all zeros, decoded_max
+  // will be zero, and scaling will be >= 6. Thus, energy_length >> scaling = 0,
+  // and using this as a denominator would lead to problems.
+  int input_size_samples = 63;
+  EXPECT_EQ(input_size_samples,
+            normal.Process(input,
+                           input_size_samples,
+                           kModeExpand,
+                           mute_factor_array.get(),
+                           &output));
+
+  EXPECT_CALL(db, Die());      // Called when |db| goes out of scope.
+  EXPECT_CALL(expand, Die());  // Called when |expand| goes out of scope.
+}
+
+TEST(Normal, InputLengthAndChannelsDoNotMatch) {
+  WebRtcSpl_Init();
+  MockDecoderDatabase db;
+  int fs = 8000;
+  size_t channels = 2;
+  BackgroundNoise bgn(channels);
+  SyncBuffer sync_buffer(channels, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs,
+                    channels);
+  Normal normal(fs, &db, bgn, &expand);
+
+  int16_t input[1000] = {0};
+  std::unique_ptr<int16_t[]> mute_factor_array(new int16_t[channels]);
+  for (size_t i = 0; i < channels; ++i) {
+    mute_factor_array[i] = 16384;
+  }
+  AudioMultiVector output(channels);
+
+  // Let the number of samples be one sample less than 80 samples per channel.
+  size_t input_len = 80 * channels - 1;
+  EXPECT_EQ(
+      0,
+      normal.Process(
+          input, input_len, kModeExpand, mute_factor_array.get(), &output));
+  EXPECT_EQ(0u, output.Size());
+
+  EXPECT_CALL(db, Die());      // Called when |db| goes out of scope.
+  EXPECT_CALL(expand, Die());  // Called when |expand| goes out of scope.
+}
+
+TEST(Normal, LastModeExpand120msPacket) {
+  WebRtcSpl_Init();
+  MockDecoderDatabase db;
+  const int kFs = 48000;
+  const size_t kPacketsizeBytes = 11520u;
+  const size_t kChannels = 1;
+  BackgroundNoise bgn(kChannels);
+  SyncBuffer sync_buffer(kChannels, 1000);
+  RandomVector random_vector;
+  StatisticsCalculator statistics;
+  MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, kFs,
+                    kChannels);
+  Normal normal(kFs, &db, bgn, &expand);
+
+  int16_t input[kPacketsizeBytes] = {0};
+
+  std::unique_ptr<int16_t[]> mute_factor_array(new int16_t[kChannels]);
+  for (size_t i = 0; i < kChannels; ++i) {
+    mute_factor_array[i] = 16384;
+  }
+
+  AudioMultiVector output(kChannels);
+
+  EXPECT_CALL(expand, SetParametersForNormalAfterExpand());
+  EXPECT_CALL(expand, Process(_)).WillOnce(Invoke(ExpandProcess120ms));
+  EXPECT_CALL(expand, Reset());
+  EXPECT_EQ(static_cast<int>(kPacketsizeBytes),
+            normal.Process(input,
+                           kPacketsizeBytes,
+                           kModeExpand,
+                           mute_factor_array.get(),
+                           &output));
+
+  EXPECT_EQ(kPacketsizeBytes, output.Size());
+
+  EXPECT_CALL(db, Die());      // Called when |db| goes out of scope.
+  EXPECT_CALL(expand, Die());  // Called when |expand| goes out of scope.
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/packet.cc b/modules/audio_coding/neteq/packet.cc
new file mode 100644
index 0000000..3cec310
--- /dev/null
+++ b/modules/audio_coding/neteq/packet.cc
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/packet.h"
+
+namespace webrtc {
+
+Packet::Packet() = default;
+Packet::Packet(Packet&& b) = default;
+
+Packet::~Packet() = default;
+
+Packet& Packet::operator=(Packet&& b) = default;
+
+Packet Packet::Clone() const {
+  RTC_CHECK(!frame);
+
+  Packet clone;
+  clone.timestamp = timestamp;
+  clone.sequence_number = sequence_number;
+  clone.payload_type = payload_type;
+  clone.payload.SetData(payload.data(), payload.size());
+  clone.priority = priority;
+
+  return clone;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/packet.h b/modules/audio_coding/neteq/packet.h
new file mode 100644
index 0000000..61b0144
--- /dev/null
+++ b/modules/audio_coding/neteq/packet.h
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PACKET_H_
+#define MODULES_AUDIO_CODING_NETEQ_PACKET_H_
+
+#include <list>
+#include <memory>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "rtc_base/buffer.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Struct for holding RTP packets.
+struct Packet {
+  struct Priority {
+    Priority() : codec_level(0), red_level(0) {}
+    Priority(int codec_level, int red_level)
+        : codec_level(codec_level), red_level(red_level) {
+      CheckInvariant();
+    }
+
+    int codec_level;
+    int red_level;
+
+    // Priorities are sorted low-to-high, first on the level the codec
+    // prioritizes it, then on the level of RED packet it is; i.e. if it is a
+    // primary or secondary payload of a RED packet. For example: with Opus, an
+    // Fec packet (which the decoder prioritizes lower than a regular packet)
+    // will not be used if there is _any_ RED payload for the same
+    // timeframe. The highest priority packet will have levels {0, 0}. Negative
+    // priorities are not allowed.
+    bool operator<(const Priority& b) const {
+      CheckInvariant();
+      b.CheckInvariant();
+      if (codec_level == b.codec_level)
+        return red_level < b.red_level;
+
+      return codec_level < b.codec_level;
+    }
+    bool operator==(const Priority& b) const {
+      CheckInvariant();
+      b.CheckInvariant();
+      return codec_level == b.codec_level && red_level == b.red_level;
+    }
+    bool operator!=(const Priority& b) const { return !(*this == b); }
+    bool operator>(const Priority& b) const { return b < *this; }
+    bool operator<=(const Priority& b) const { return !(b > *this); }
+    bool operator>=(const Priority& b) const { return !(b < *this); }
+
+   private:
+    void CheckInvariant() const {
+      RTC_DCHECK_GE(codec_level, 0);
+      RTC_DCHECK_GE(red_level, 0);
+    }
+  };
+
+  uint32_t timestamp;
+  uint16_t sequence_number;
+  uint8_t payload_type;
+  // Datagram excluding RTP header and header extension.
+  rtc::Buffer payload;
+  Priority priority;
+  std::unique_ptr<TickTimer::Stopwatch> waiting_time;
+  std::unique_ptr<AudioDecoder::EncodedAudioFrame> frame;
+
+  Packet();
+  Packet(Packet&& b);
+  ~Packet();
+
+  // Packets should generally be moved around but sometimes it's useful to make
+  // a copy, for example for testing purposes. NOTE: Will only work for
+  // un-parsed packets, i.e. |frame| must be unset. The payload will, however,
+  // be copied. |waiting_time| will also not be copied.
+  Packet Clone() const;
+
+  Packet& operator=(Packet&& b);
+
+  // Comparison operators. Establish a packet ordering based on (1) timestamp,
+  // (2) sequence number and (3) redundancy.
+  // Timestamp and sequence numbers are compared taking wrap-around into
+  // account. For two packets with the same sequence number and timestamp a
+  // primary payload is considered "smaller" than a secondary.
+  bool operator==(const Packet& rhs) const {
+    return (this->timestamp == rhs.timestamp &&
+            this->sequence_number == rhs.sequence_number &&
+            this->priority == rhs.priority);
+  }
+  bool operator!=(const Packet& rhs) const { return !operator==(rhs); }
+  bool operator<(const Packet& rhs) const {
+    if (this->timestamp == rhs.timestamp) {
+      if (this->sequence_number == rhs.sequence_number) {
+        // Timestamp and sequence numbers are identical - deem the left hand
+        // side to be "smaller" (i.e., "earlier") if it has higher priority.
+        return this->priority < rhs.priority;
+      }
+      return (static_cast<uint16_t>(rhs.sequence_number -
+                                    this->sequence_number) < 0xFFFF / 2);
+    }
+    return (static_cast<uint32_t>(rhs.timestamp - this->timestamp) <
+            0xFFFFFFFF / 2);
+  }
+  bool operator>(const Packet& rhs) const { return rhs.operator<(*this); }
+  bool operator<=(const Packet& rhs) const { return !operator>(rhs); }
+  bool operator>=(const Packet& rhs) const { return !operator<(rhs); }
+
+  bool empty() const { return !frame && payload.empty(); }
+};
+
+// A list of packets.
+typedef std::list<Packet> PacketList;
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_PACKET_H_
diff --git a/modules/audio_coding/neteq/packet_buffer.cc b/modules/audio_coding/neteq/packet_buffer.cc
new file mode 100644
index 0000000..dfffebd
--- /dev/null
+++ b/modules/audio_coding/neteq/packet_buffer.cc
@@ -0,0 +1,293 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This is the implementation of the PacketBuffer class. It is mostly based on
+// an STL list. The list is kept sorted at all times so that the next packet to
+// decode is at the beginning of the list.
+
+#include "modules/audio_coding/neteq/packet_buffer.h"
+
+#include <algorithm>  // find_if()
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+// Predicate used when inserting packets in the buffer list.
+// Operator() returns true when |packet| goes before |new_packet|.
+class NewTimestampIsLarger {
+ public:
+  explicit NewTimestampIsLarger(const Packet& new_packet)
+      : new_packet_(new_packet) {
+  }
+  bool operator()(const Packet& packet) {
+    return (new_packet_ >= packet);
+  }
+
+ private:
+  const Packet& new_packet_;
+};
+
+// Returns true if both payload types are known to the decoder database, and
+// have the same sample rate.
+bool EqualSampleRates(uint8_t pt1,
+                      uint8_t pt2,
+                      const DecoderDatabase& decoder_database) {
+  auto* di1 = decoder_database.GetDecoderInfo(pt1);
+  auto* di2 = decoder_database.GetDecoderInfo(pt2);
+  return di1 && di2 && di1->SampleRateHz() == di2->SampleRateHz();
+}
+
+void LogPacketDiscarded(int codec_level, StatisticsCalculator* stats) {
+  RTC_CHECK(stats);
+  if (codec_level > 0) {
+    stats->SecondaryPacketsDiscarded(1);
+  } else {
+    stats->PacketsDiscarded(1);
+  }
+}
+
+}  // namespace
+
+PacketBuffer::PacketBuffer(size_t max_number_of_packets,
+                           const TickTimer* tick_timer)
+    : max_number_of_packets_(max_number_of_packets), tick_timer_(tick_timer) {}
+
+// Destructor. All packets in the buffer will be destroyed.
+PacketBuffer::~PacketBuffer() {
+  Flush();
+}
+
+// Flush the buffer. All packets in the buffer will be destroyed.
+void PacketBuffer::Flush() {
+  buffer_.clear();
+}
+
+bool PacketBuffer::Empty() const {
+  return buffer_.empty();
+}
+
+int PacketBuffer::InsertPacket(Packet&& packet, StatisticsCalculator* stats) {
+  if (packet.empty()) {
+    RTC_LOG(LS_WARNING) << "InsertPacket invalid packet";
+    return kInvalidPacket;
+  }
+
+  RTC_DCHECK_GE(packet.priority.codec_level, 0);
+  RTC_DCHECK_GE(packet.priority.red_level, 0);
+
+  int return_val = kOK;
+
+  packet.waiting_time = tick_timer_->GetNewStopwatch();
+
+  if (buffer_.size() >= max_number_of_packets_) {
+    // Buffer is full. Flush it.
+    Flush();
+    RTC_LOG(LS_WARNING) << "Packet buffer flushed";
+    return_val = kFlushed;
+  }
+
+  // Get an iterator pointing to the place in the buffer where the new packet
+  // should be inserted. The list is searched from the back, since the most
+  // likely case is that the new packet should be near the end of the list.
+  PacketList::reverse_iterator rit = std::find_if(
+      buffer_.rbegin(), buffer_.rend(),
+      NewTimestampIsLarger(packet));
+
+  // The new packet is to be inserted to the right of |rit|. If it has the same
+  // timestamp as |rit|, which has a higher priority, do not insert the new
+  // packet to list.
+  if (rit != buffer_.rend() && packet.timestamp == rit->timestamp) {
+    LogPacketDiscarded(packet.priority.codec_level, stats);
+    return return_val;
+  }
+
+  // The new packet is to be inserted to the left of |it|. If it has the same
+  // timestamp as |it|, which has a lower priority, replace |it| with the new
+  // packet.
+  PacketList::iterator it = rit.base();
+  if (it != buffer_.end() && packet.timestamp == it->timestamp) {
+    LogPacketDiscarded(packet.priority.codec_level, stats);
+    it = buffer_.erase(it);
+  }
+  buffer_.insert(it, std::move(packet));  // Insert the packet at that position.
+
+  return return_val;
+}
+
+int PacketBuffer::InsertPacketList(
+    PacketList* packet_list,
+    const DecoderDatabase& decoder_database,
+    rtc::Optional<uint8_t>* current_rtp_payload_type,
+    rtc::Optional<uint8_t>* current_cng_rtp_payload_type,
+    StatisticsCalculator* stats) {
+  RTC_DCHECK(stats);
+  bool flushed = false;
+  for (auto& packet : *packet_list) {
+    if (decoder_database.IsComfortNoise(packet.payload_type)) {
+      if (*current_cng_rtp_payload_type &&
+          **current_cng_rtp_payload_type != packet.payload_type) {
+        // New CNG payload type implies new codec type.
+        *current_rtp_payload_type = rtc::nullopt;
+        Flush();
+        flushed = true;
+      }
+      *current_cng_rtp_payload_type = packet.payload_type;
+    } else if (!decoder_database.IsDtmf(packet.payload_type)) {
+      // This must be speech.
+      if ((*current_rtp_payload_type &&
+           **current_rtp_payload_type != packet.payload_type) ||
+          (*current_cng_rtp_payload_type &&
+           !EqualSampleRates(packet.payload_type,
+                             **current_cng_rtp_payload_type,
+                             decoder_database))) {
+        *current_cng_rtp_payload_type = rtc::nullopt;
+        Flush();
+        flushed = true;
+      }
+      *current_rtp_payload_type = packet.payload_type;
+    }
+    int return_val = InsertPacket(std::move(packet), stats);
+    if (return_val == kFlushed) {
+      // The buffer flushed, but this is not an error. We can still continue.
+      flushed = true;
+    } else if (return_val != kOK) {
+      // An error occurred. Delete remaining packets in list and return.
+      packet_list->clear();
+      return return_val;
+    }
+  }
+  packet_list->clear();
+  return flushed ? kFlushed : kOK;
+}
+
+int PacketBuffer::NextTimestamp(uint32_t* next_timestamp) const {
+  if (Empty()) {
+    return kBufferEmpty;
+  }
+  if (!next_timestamp) {
+    return kInvalidPointer;
+  }
+  *next_timestamp = buffer_.front().timestamp;
+  return kOK;
+}
+
+int PacketBuffer::NextHigherTimestamp(uint32_t timestamp,
+                                      uint32_t* next_timestamp) const {
+  if (Empty()) {
+    return kBufferEmpty;
+  }
+  if (!next_timestamp) {
+    return kInvalidPointer;
+  }
+  PacketList::const_iterator it;
+  for (it = buffer_.begin(); it != buffer_.end(); ++it) {
+    if (it->timestamp >= timestamp) {
+      // Found a packet matching the search.
+      *next_timestamp = it->timestamp;
+      return kOK;
+    }
+  }
+  return kNotFound;
+}
+
+const Packet* PacketBuffer::PeekNextPacket() const {
+  return buffer_.empty() ? nullptr : &buffer_.front();
+}
+
+rtc::Optional<Packet> PacketBuffer::GetNextPacket() {
+  if (Empty()) {
+    // Buffer is empty.
+    return rtc::nullopt;
+  }
+
+  rtc::Optional<Packet> packet(std::move(buffer_.front()));
+  // Assert that the packet sanity checks in InsertPacket method works.
+  RTC_DCHECK(!packet->empty());
+  buffer_.pop_front();
+
+  return packet;
+}
+
+int PacketBuffer::DiscardNextPacket(StatisticsCalculator* stats) {
+  if (Empty()) {
+    return kBufferEmpty;
+  }
+  // Assert that the packet sanity checks in InsertPacket method works.
+  const Packet& packet = buffer_.front();
+  RTC_DCHECK(!packet.empty());
+  LogPacketDiscarded(packet.priority.codec_level, stats);
+  buffer_.pop_front();
+  return kOK;
+}
+
+void PacketBuffer::DiscardOldPackets(uint32_t timestamp_limit,
+                                     uint32_t horizon_samples,
+                                     StatisticsCalculator* stats) {
+  buffer_.remove_if([timestamp_limit, horizon_samples, stats](const Packet& p) {
+    if (timestamp_limit == p.timestamp ||
+        !IsObsoleteTimestamp(p.timestamp, timestamp_limit, horizon_samples)) {
+      return false;
+    }
+    LogPacketDiscarded(p.priority.codec_level, stats);
+    return true;
+  });
+}
+
+void PacketBuffer::DiscardAllOldPackets(uint32_t timestamp_limit,
+                                        StatisticsCalculator* stats) {
+  DiscardOldPackets(timestamp_limit, 0, stats);
+}
+
+void PacketBuffer::DiscardPacketsWithPayloadType(uint8_t payload_type,
+                                                 StatisticsCalculator* stats) {
+  buffer_.remove_if([payload_type, stats](const Packet& p) {
+    if (p.payload_type != payload_type) {
+      return false;
+    }
+    LogPacketDiscarded(p.priority.codec_level, stats);
+    return true;
+  });
+}
+
+size_t PacketBuffer::NumPacketsInBuffer() const {
+  return buffer_.size();
+}
+
+size_t PacketBuffer::NumSamplesInBuffer(size_t last_decoded_length) const {
+  size_t num_samples = 0;
+  size_t last_duration = last_decoded_length;
+  for (const Packet& packet : buffer_) {
+    if (packet.frame) {
+      // TODO(hlundin): Verify that it's fine to count all packets and remove
+      // this check.
+      if (packet.priority != Packet::Priority(0, 0)) {
+        continue;
+      }
+      size_t duration = packet.frame->Duration();
+      if (duration > 0) {
+        last_duration = duration;  // Save the most up-to-date (valid) duration.
+      }
+    }
+    num_samples += last_duration;
+  }
+  return num_samples;
+}
+
+void PacketBuffer::BufferStat(int* num_packets, int* max_num_packets) const {
+  *num_packets = static_cast<int>(buffer_.size());
+  *max_num_packets = static_cast<int>(max_number_of_packets_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/packet_buffer.h b/modules/audio_coding/neteq/packet_buffer.h
new file mode 100644
index 0000000..c83bf89
--- /dev/null
+++ b/modules/audio_coding/neteq/packet_buffer.h
@@ -0,0 +1,148 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PACKET_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_PACKET_BUFFER_H_
+
+#include "api/optional.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class DecoderDatabase;
+class StatisticsCalculator;
+class TickTimer;
+
+// This is the actual buffer holding the packets before decoding.
+class PacketBuffer {
+ public:
+  enum BufferReturnCodes {
+    kOK = 0,
+    kFlushed,
+    kNotFound,
+    kBufferEmpty,
+    kInvalidPacket,
+    kInvalidPointer
+  };
+
+  // Constructor creates a buffer which can hold a maximum of
+  // |max_number_of_packets| packets.
+  PacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer);
+
+  // Deletes all packets in the buffer before destroying the buffer.
+  virtual ~PacketBuffer();
+
+  // Flushes the buffer and deletes all packets in it.
+  virtual void Flush();
+
+  // Returns true for an empty buffer.
+  virtual bool Empty() const;
+
+  // Inserts |packet| into the buffer. The buffer will take over ownership of
+  // the packet object.
+  // Returns PacketBuffer::kOK on success, PacketBuffer::kFlushed if the buffer
+  // was flushed due to overfilling.
+  virtual int InsertPacket(Packet&& packet, StatisticsCalculator* stats);
+
+  // Inserts a list of packets into the buffer. The buffer will take over
+  // ownership of the packet objects.
+  // Returns PacketBuffer::kOK if all packets were inserted successfully.
+  // If the buffer was flushed due to overfilling, only a subset of the list is
+  // inserted, and PacketBuffer::kFlushed is returned.
+  // The last three parameters are included for legacy compatibility.
+  // TODO(hlundin): Redesign to not use current_*_payload_type and
+  // decoder_database.
+  virtual int InsertPacketList(
+      PacketList* packet_list,
+      const DecoderDatabase& decoder_database,
+      rtc::Optional<uint8_t>* current_rtp_payload_type,
+      rtc::Optional<uint8_t>* current_cng_rtp_payload_type,
+      StatisticsCalculator* stats);
+
+  // Gets the timestamp for the first packet in the buffer and writes it to the
+  // output variable |next_timestamp|.
+  // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
+  // PacketBuffer::kOK otherwise.
+  virtual int NextTimestamp(uint32_t* next_timestamp) const;
+
+  // Gets the timestamp for the first packet in the buffer with a timestamp no
+  // lower than the input limit |timestamp|. The result is written to the output
+  // variable |next_timestamp|.
+  // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
+  // PacketBuffer::kOK otherwise.
+  virtual int NextHigherTimestamp(uint32_t timestamp,
+                                  uint32_t* next_timestamp) const;
+
+  // Returns a (constant) pointer to the first packet in the buffer. Returns
+  // NULL if the buffer is empty.
+  virtual const Packet* PeekNextPacket() const;
+
+  // Extracts the first packet in the buffer and returns it.
+  // Returns an empty optional if the buffer is empty.
+  virtual rtc::Optional<Packet> GetNextPacket();
+
+  // Discards the first packet in the buffer. The packet is deleted.
+  // Returns PacketBuffer::kBufferEmpty if the buffer is empty,
+  // PacketBuffer::kOK otherwise.
+  virtual int DiscardNextPacket(StatisticsCalculator* stats);
+
+  // Discards all packets that are (strictly) older than timestamp_limit,
+  // but newer than timestamp_limit - horizon_samples. Setting horizon_samples
+  // to zero implies that the horizon is set to half the timestamp range. That
+  // is, if a packet is more than 2^31 timestamps into the future compared with
+  // timestamp_limit (including wrap-around), it is considered old.
+  virtual void DiscardOldPackets(uint32_t timestamp_limit,
+                                 uint32_t horizon_samples,
+                                 StatisticsCalculator* stats);
+
+  // Discards all packets that are (strictly) older than timestamp_limit.
+  virtual void DiscardAllOldPackets(uint32_t timestamp_limit,
+                                    StatisticsCalculator* stats);
+
+  // Removes all packets with a specific payload type from the buffer.
+  virtual void DiscardPacketsWithPayloadType(uint8_t payload_type,
+                                             StatisticsCalculator* stats);
+
+  // Returns the number of packets in the buffer, including duplicates and
+  // redundant packets.
+  virtual size_t NumPacketsInBuffer() const;
+
+  // Returns the number of samples in the buffer, including samples carried in
+  // duplicate and redundant packets.
+  virtual size_t NumSamplesInBuffer(size_t last_decoded_length) const;
+
+  virtual void BufferStat(int* num_packets, int* max_num_packets) const;
+
+  // Static method returning true if |timestamp| is older than |timestamp_limit|
+  // but less than |horizon_samples| behind |timestamp_limit|. For instance,
+  // with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the
+  // range (90, 100) is considered obsolete, and will yield true.
+  // Setting |horizon_samples| to 0 is the same as setting it to 2^31, i.e.,
+  // half the 32-bit timestamp range.
+  static bool IsObsoleteTimestamp(uint32_t timestamp,
+                                  uint32_t timestamp_limit,
+                                  uint32_t horizon_samples) {
+    return IsNewerTimestamp(timestamp_limit, timestamp) &&
+           (horizon_samples == 0 ||
+            IsNewerTimestamp(timestamp, timestamp_limit - horizon_samples));
+  }
+
+ private:
+  size_t max_number_of_packets_;
+  PacketList buffer_;
+  const TickTimer* tick_timer_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(PacketBuffer);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_PACKET_BUFFER_H_
diff --git a/modules/audio_coding/neteq/packet_buffer_unittest.cc b/modules/audio_coding/neteq/packet_buffer_unittest.cc
new file mode 100644
index 0000000..0ddeb8a
--- /dev/null
+++ b/modules/audio_coding/neteq/packet_buffer_unittest.cc
@@ -0,0 +1,733 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for PacketBuffer class.
+
+#include "modules/audio_coding/neteq/packet_buffer.h"
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/mock/mock_statistics_calculator.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/audio_coding/neteq/tick_timer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Return;
+using ::testing::StrictMock;
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::MockFunction;
+
+namespace webrtc {
+
+// Helper class to generate packets. Packets must be deleted by the user.
+class PacketGenerator {
+ public:
+  PacketGenerator(uint16_t seq_no, uint32_t ts, uint8_t pt, int frame_size);
+  virtual ~PacketGenerator() {}
+  void Reset(uint16_t seq_no, uint32_t ts, uint8_t pt, int frame_size);
+  Packet NextPacket(int payload_size_bytes);
+
+  uint16_t seq_no_;
+  uint32_t ts_;
+  uint8_t pt_;
+  int frame_size_;
+};
+
+PacketGenerator::PacketGenerator(uint16_t seq_no, uint32_t ts, uint8_t pt,
+                                 int frame_size) {
+  Reset(seq_no, ts, pt, frame_size);
+}
+
+void PacketGenerator::Reset(uint16_t seq_no, uint32_t ts, uint8_t pt,
+                            int frame_size) {
+  seq_no_ = seq_no;
+  ts_ = ts;
+  pt_ = pt;
+  frame_size_ = frame_size;
+}
+
+Packet PacketGenerator::NextPacket(int payload_size_bytes) {
+  Packet packet;
+  packet.sequence_number = seq_no_;
+  packet.timestamp = ts_;
+  packet.payload_type = pt_;
+  packet.payload.SetSize(payload_size_bytes);
+  ++seq_no_;
+  ts_ += frame_size_;
+  return packet;
+}
+
+struct PacketsToInsert {
+  uint16_t sequence_number;
+  uint32_t timestamp;
+  uint8_t payload_type;
+  bool primary;
+  // Order of this packet to appear upon extraction, after inserting a series
+  // of packets. A negative number means that it should have been discarded
+  // before extraction.
+  int extract_order;
+};
+
+// Start of test definitions.
+
+TEST(PacketBuffer, CreateAndDestroy) {
+  TickTimer tick_timer;
+  PacketBuffer* buffer = new PacketBuffer(10, &tick_timer);  // 10 packets.
+  EXPECT_TRUE(buffer->Empty());
+  delete buffer;
+}
+
+TEST(PacketBuffer, InsertPacket) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(10, &tick_timer);  // 10 packets.
+  PacketGenerator gen(17u, 4711u, 0, 10);
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  const int payload_len = 100;
+  const Packet packet = gen.NextPacket(payload_len);
+  EXPECT_EQ(0, buffer.InsertPacket(packet.Clone(), &mock_stats));
+  uint32_t next_ts;
+  EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+  EXPECT_EQ(4711u, next_ts);
+  EXPECT_FALSE(buffer.Empty());
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+  const Packet* next_packet = buffer.PeekNextPacket();
+  EXPECT_EQ(packet, *next_packet);  // Compare contents.
+
+  // Do not explicitly flush buffer or delete packet to test that it is deleted
+  // with the buffer. (Tested with Valgrind or similar tool.)
+}
+
+// Test to flush buffer.
+TEST(PacketBuffer, FlushBuffer) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(10, &tick_timer);  // 10 packets.
+  PacketGenerator gen(0, 0, 0, 10);
+  const int payload_len = 10;
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  // Insert 10 small packets; should be ok.
+  for (int i = 0; i < 10; ++i) {
+    EXPECT_EQ(PacketBuffer::kOK,
+              buffer.InsertPacket(gen.NextPacket(payload_len), &mock_stats));
+  }
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+  EXPECT_FALSE(buffer.Empty());
+
+  buffer.Flush();
+  // Buffer should delete the payloads itself.
+  EXPECT_EQ(0u, buffer.NumPacketsInBuffer());
+  EXPECT_TRUE(buffer.Empty());
+}
+
+// Test to fill the buffer over the limits, and verify that it flushes.
+TEST(PacketBuffer, OverfillBuffer) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(10, &tick_timer);  // 10 packets.
+  PacketGenerator gen(0, 0, 0, 10);
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  // Insert 10 small packets; should be ok.
+  const int payload_len = 10;
+  int i;
+  for (i = 0; i < 10; ++i) {
+    EXPECT_EQ(PacketBuffer::kOK,
+              buffer.InsertPacket(gen.NextPacket(payload_len), &mock_stats));
+  }
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+  uint32_t next_ts;
+  EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+  EXPECT_EQ(0u, next_ts);  // Expect first inserted packet to be first in line.
+
+  const Packet packet = gen.NextPacket(payload_len);
+  // Insert 11th packet; should flush the buffer and insert it after flushing.
+  EXPECT_EQ(PacketBuffer::kFlushed,
+            buffer.InsertPacket(packet.Clone(), &mock_stats));
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
+  // Expect last inserted packet to be first in line.
+  EXPECT_EQ(packet.timestamp, next_ts);
+
+  // Flush buffer to delete all packets.
+  buffer.Flush();
+}
+
+// Test inserting a list of packets.
+TEST(PacketBuffer, InsertPacketList) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(10, &tick_timer);  // 10 packets.
+  PacketGenerator gen(0, 0, 0, 10);
+  PacketList list;
+  const int payload_len = 10;
+
+  // Insert 10 small packets.
+  for (int i = 0; i < 10; ++i) {
+    list.push_back(gen.NextPacket(payload_len));
+  }
+
+  MockDecoderDatabase decoder_database;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+      .WillRepeatedly(Return(&info));
+
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  rtc::Optional<uint8_t> current_pt;
+  rtc::Optional<uint8_t> current_cng_pt;
+  EXPECT_EQ(PacketBuffer::kOK,
+            buffer.InsertPacketList(&list, decoder_database, &current_pt,
+                                    &current_cng_pt, &mock_stats));
+  EXPECT_TRUE(list.empty());  // The PacketBuffer should have depleted the list.
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+  EXPECT_EQ(0, current_pt);      // Current payload type changed to 0.
+  EXPECT_EQ(rtc::nullopt, current_cng_pt);  // CNG payload type not changed.
+
+  buffer.Flush();  // Clean up.
+
+  EXPECT_CALL(decoder_database, Die());  // Called when object is deleted.
+}
+
+// Test inserting a list of packets. Last packet is of a different payload type.
+// Expecting the buffer to flush.
+// TODO(hlundin): Remove this test when legacy operation is no longer needed.
+TEST(PacketBuffer, InsertPacketListChangePayloadType) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(10, &tick_timer);  // 10 packets.
+  PacketGenerator gen(0, 0, 0, 10);
+  PacketList list;
+  const int payload_len = 10;
+
+  // Insert 10 small packets.
+  for (int i = 0; i < 10; ++i) {
+    list.push_back(gen.NextPacket(payload_len));
+  }
+  // Insert 11th packet of another payload type (not CNG).
+  {
+    Packet packet = gen.NextPacket(payload_len);
+    packet.payload_type = 1;
+    list.push_back(std::move(packet));
+  }
+
+  MockDecoderDatabase decoder_database;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  const DecoderDatabase::DecoderInfo info0(NetEqDecoder::kDecoderPCMu, factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+      .WillRepeatedly(Return(&info0));
+  const DecoderDatabase::DecoderInfo info1(NetEqDecoder::kDecoderPCMa, factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(1))
+      .WillRepeatedly(Return(&info1));
+
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  rtc::Optional<uint8_t> current_pt;
+  rtc::Optional<uint8_t> current_cng_pt;
+  EXPECT_EQ(PacketBuffer::kFlushed,
+            buffer.InsertPacketList(&list, decoder_database, &current_pt,
+                                    &current_cng_pt, &mock_stats));
+  EXPECT_TRUE(list.empty());  // The PacketBuffer should have depleted the list.
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());  // Only the last packet.
+  EXPECT_EQ(1, current_pt);      // Current payload type changed to 1.
+  EXPECT_EQ(rtc::nullopt, current_cng_pt);  // CNG payload type not changed.
+
+  buffer.Flush();  // Clean up.
+
+  EXPECT_CALL(decoder_database, Die());  // Called when object is deleted.
+}
+
+TEST(PacketBuffer, ExtractOrderRedundancy) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(100, &tick_timer);  // 100 packets.
+  const int kPackets = 18;
+  const int kFrameSize = 10;
+  const int kPayloadLength = 10;
+
+  PacketsToInsert packet_facts[kPackets] = {
+    {0xFFFD, 0xFFFFFFD7, 0, true, 0},
+    {0xFFFE, 0xFFFFFFE1, 0, true, 1},
+    {0xFFFE, 0xFFFFFFD7, 1, false, -1},
+    {0xFFFF, 0xFFFFFFEB, 0, true, 2},
+    {0xFFFF, 0xFFFFFFE1, 1, false, -1},
+    {0x0000, 0xFFFFFFF5, 0, true, 3},
+    {0x0000, 0xFFFFFFEB, 1, false, -1},
+    {0x0001, 0xFFFFFFFF, 0, true, 4},
+    {0x0001, 0xFFFFFFF5, 1, false, -1},
+    {0x0002, 0x0000000A, 0, true, 5},
+    {0x0002, 0xFFFFFFFF, 1, false, -1},
+    {0x0003, 0x0000000A, 1, false, -1},
+    {0x0004, 0x0000001E, 0, true, 7},
+    {0x0004, 0x00000014, 1, false, 6},
+    {0x0005, 0x0000001E, 0, true, -1},
+    {0x0005, 0x00000014, 1, false, -1},
+    {0x0006, 0x00000028, 0, true, 8},
+    {0x0006, 0x0000001E, 1, false, -1},
+  };
+
+  const size_t kExpectPacketsInBuffer = 9;
+
+  std::vector<Packet> expect_order(kExpectPacketsInBuffer);
+
+  PacketGenerator gen(0, 0, 0, kFrameSize);
+
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  // Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
+  // check ensures that exactly one call to PacketsDiscarded happens in each
+  // DiscardNextPacket call.
+  InSequence s;
+  MockFunction<void(int check_point_id)> check;
+  for (int i = 0; i < kPackets; ++i) {
+    gen.Reset(packet_facts[i].sequence_number,
+              packet_facts[i].timestamp,
+              packet_facts[i].payload_type,
+              kFrameSize);
+    Packet packet = gen.NextPacket(kPayloadLength);
+    packet.priority.codec_level = packet_facts[i].primary ? 0 : 1;
+    if (packet_facts[i].extract_order < 0) {
+      if (packet.priority.codec_level > 0) {
+        EXPECT_CALL(mock_stats, SecondaryPacketsDiscarded(1));
+      } else {
+        EXPECT_CALL(mock_stats, PacketsDiscarded(1));
+      }
+    }
+    EXPECT_CALL(check, Call(i));
+    EXPECT_EQ(PacketBuffer::kOK,
+              buffer.InsertPacket(packet.Clone(), &mock_stats));
+    if (packet_facts[i].extract_order >= 0) {
+      expect_order[packet_facts[i].extract_order] = std::move(packet);
+    }
+    check.Call(i);
+  }
+
+  EXPECT_EQ(kExpectPacketsInBuffer, buffer.NumPacketsInBuffer());
+
+  for (size_t i = 0; i < kExpectPacketsInBuffer; ++i) {
+    const rtc::Optional<Packet> packet = buffer.GetNextPacket();
+    EXPECT_EQ(packet, expect_order[i]);  // Compare contents.
+  }
+  EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(PacketBuffer, DiscardPackets) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(100, &tick_timer);  // 100 packets.
+  const uint16_t start_seq_no = 17;
+  const uint32_t start_ts = 4711;
+  const uint32_t ts_increment = 10;
+  PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+  PacketList list;
+  const int payload_len = 10;
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  constexpr int kTotalPackets = 10;
+  // Insert 10 small packets.
+  for (int i = 0; i < kTotalPackets; ++i) {
+    buffer.InsertPacket(gen.NextPacket(payload_len), &mock_stats);
+  }
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+
+  uint32_t current_ts = start_ts;
+
+  // Discard them one by one and make sure that the right packets are at the
+  // front of the buffer.
+  constexpr int kDiscardPackets = 5;
+
+  // Interleaving the EXPECT_CALL sequence with expectations on the MockFunction
+  // check ensures that exactly one call to PacketsDiscarded happens in each
+  // DiscardNextPacket call.
+  InSequence s;
+  MockFunction<void(int check_point_id)> check;
+  for (int i = 0; i < kDiscardPackets; ++i) {
+    uint32_t ts;
+    EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&ts));
+    EXPECT_EQ(current_ts, ts);
+    EXPECT_CALL(mock_stats, PacketsDiscarded(1));
+    EXPECT_CALL(check, Call(i));
+    EXPECT_EQ(PacketBuffer::kOK, buffer.DiscardNextPacket(&mock_stats));
+    current_ts += ts_increment;
+    check.Call(i);
+  }
+
+  constexpr int kRemainingPackets = kTotalPackets - kDiscardPackets;
+  // This will discard all remaining packets but one. The oldest packet is older
+  // than the indicated horizon_samples, and will thus be left in the buffer.
+  constexpr size_t kSkipPackets = 1;
+  EXPECT_CALL(mock_stats, PacketsDiscarded(1))
+      .Times(kRemainingPackets - kSkipPackets);
+  EXPECT_CALL(check, Call(17));  // Arbitrary id number.
+  buffer.DiscardOldPackets(start_ts + kTotalPackets * ts_increment,
+                           kRemainingPackets * ts_increment, &mock_stats);
+  check.Call(17);  // Same arbitrary id number.
+
+  EXPECT_EQ(kSkipPackets, buffer.NumPacketsInBuffer());
+  uint32_t ts;
+  EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&ts));
+  EXPECT_EQ(current_ts, ts);
+
+  // Discard all remaining packets.
+  EXPECT_CALL(mock_stats, PacketsDiscarded(kSkipPackets));
+  buffer.DiscardAllOldPackets(start_ts + kTotalPackets * ts_increment,
+                              &mock_stats);
+
+  EXPECT_TRUE(buffer.Empty());
+}
+
+TEST(PacketBuffer, Reordering) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(100, &tick_timer);  // 100 packets.
+  const uint16_t start_seq_no = 17;
+  const uint32_t start_ts = 4711;
+  const uint32_t ts_increment = 10;
+  PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+  const int payload_len = 10;
+
+  // Generate 10 small packets and insert them into a PacketList. Insert every
+  // odd packet to the front, and every even packet to the back, thus creating
+  // a (rather strange) reordering.
+  PacketList list;
+  for (int i = 0; i < 10; ++i) {
+    Packet packet = gen.NextPacket(payload_len);
+    if (i % 2) {
+      list.push_front(std::move(packet));
+    } else {
+      list.push_back(std::move(packet));
+    }
+  }
+
+  MockDecoderDatabase decoder_database;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+      .WillRepeatedly(Return(&info));
+  rtc::Optional<uint8_t> current_pt;
+  rtc::Optional<uint8_t> current_cng_pt;
+
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  EXPECT_EQ(PacketBuffer::kOK,
+            buffer.InsertPacketList(&list, decoder_database, &current_pt,
+                                    &current_cng_pt, &mock_stats));
+  EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
+
+  // Extract them and make sure that come out in the right order.
+  uint32_t current_ts = start_ts;
+  for (int i = 0; i < 10; ++i) {
+    const rtc::Optional<Packet> packet = buffer.GetNextPacket();
+    ASSERT_TRUE(packet);
+    EXPECT_EQ(current_ts, packet->timestamp);
+    current_ts += ts_increment;
+  }
+  EXPECT_TRUE(buffer.Empty());
+
+  EXPECT_CALL(decoder_database, Die());  // Called when object is deleted.
+}
+
+// The test first inserts a packet with narrow-band CNG, then a packet with
+// wide-band speech. The expected behavior of the packet buffer is to detect a
+// change in sample rate, even though no speech packet has been inserted before,
+// and flush out the CNG packet.
+TEST(PacketBuffer, CngFirstThenSpeechWithNewSampleRate) {
+  TickTimer tick_timer;
+  PacketBuffer buffer(10, &tick_timer);  // 10 packets.
+  const uint8_t kCngPt = 13;
+  const int kPayloadLen = 10;
+  const uint8_t kSpeechPt = 100;
+
+  MockDecoderDatabase decoder_database;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  const DecoderDatabase::DecoderInfo info_cng(NetEqDecoder::kDecoderCNGnb,
+                                              factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(kCngPt))
+      .WillRepeatedly(Return(&info_cng));
+  const DecoderDatabase::DecoderInfo info_speech(NetEqDecoder::kDecoderPCM16Bwb,
+                                                 factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(kSpeechPt))
+      .WillRepeatedly(Return(&info_speech));
+
+  // Insert first packet, which is narrow-band CNG.
+  PacketGenerator gen(0, 0, kCngPt, 10);
+  PacketList list;
+  list.push_back(gen.NextPacket(kPayloadLen));
+  rtc::Optional<uint8_t> current_pt;
+  rtc::Optional<uint8_t> current_cng_pt;
+
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  EXPECT_EQ(PacketBuffer::kOK,
+            buffer.InsertPacketList(&list, decoder_database, &current_pt,
+                                    &current_cng_pt, &mock_stats));
+  EXPECT_TRUE(list.empty());
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+  ASSERT_TRUE(buffer.PeekNextPacket());
+  EXPECT_EQ(kCngPt, buffer.PeekNextPacket()->payload_type);
+  EXPECT_EQ(current_pt, rtc::nullopt);  // Current payload type not set.
+  EXPECT_EQ(kCngPt, current_cng_pt);  // CNG payload type set.
+
+  // Insert second packet, which is wide-band speech.
+  {
+    Packet packet = gen.NextPacket(kPayloadLen);
+    packet.payload_type = kSpeechPt;
+    list.push_back(std::move(packet));
+  }
+  // Expect the buffer to flush out the CNG packet, since it does not match the
+  // new speech sample rate.
+  EXPECT_EQ(PacketBuffer::kFlushed,
+            buffer.InsertPacketList(&list, decoder_database, &current_pt,
+                                    &current_cng_pt, &mock_stats));
+  EXPECT_TRUE(list.empty());
+  EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
+  ASSERT_TRUE(buffer.PeekNextPacket());
+  EXPECT_EQ(kSpeechPt, buffer.PeekNextPacket()->payload_type);
+
+  EXPECT_EQ(kSpeechPt, current_pt);  // Current payload type set.
+  EXPECT_EQ(rtc::nullopt, current_cng_pt);  // CNG payload type reset.
+
+  buffer.Flush();                        // Clean up.
+  EXPECT_CALL(decoder_database, Die());  // Called when object is deleted.
+}
+
+TEST(PacketBuffer, Failures) {
+  const uint16_t start_seq_no = 17;
+  const uint32_t start_ts = 4711;
+  const uint32_t ts_increment = 10;
+  int payload_len = 100;
+  PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+  TickTimer tick_timer;
+  StrictMock<MockStatisticsCalculator> mock_stats;
+
+  PacketBuffer* buffer = new PacketBuffer(100, &tick_timer);  // 100 packets.
+  {
+    Packet packet = gen.NextPacket(payload_len);
+    packet.payload.Clear();
+    EXPECT_EQ(PacketBuffer::kInvalidPacket,
+              buffer->InsertPacket(std::move(packet), &mock_stats));
+  }
+  // Buffer should still be empty. Test all empty-checks.
+  uint32_t temp_ts;
+  EXPECT_EQ(PacketBuffer::kBufferEmpty, buffer->NextTimestamp(&temp_ts));
+  EXPECT_EQ(PacketBuffer::kBufferEmpty,
+            buffer->NextHigherTimestamp(0, &temp_ts));
+  EXPECT_EQ(NULL, buffer->PeekNextPacket());
+  EXPECT_FALSE(buffer->GetNextPacket());
+
+  // Discarding packets will not invoke mock_stats.PacketDiscarded() because the
+  // packet buffer is empty.
+  EXPECT_EQ(PacketBuffer::kBufferEmpty, buffer->DiscardNextPacket(&mock_stats));
+  buffer->DiscardAllOldPackets(0, &mock_stats);
+
+  // Insert one packet to make the buffer non-empty.
+  EXPECT_EQ(PacketBuffer::kOK,
+            buffer->InsertPacket(gen.NextPacket(payload_len), &mock_stats));
+  EXPECT_EQ(PacketBuffer::kInvalidPointer, buffer->NextTimestamp(NULL));
+  EXPECT_EQ(PacketBuffer::kInvalidPointer,
+            buffer->NextHigherTimestamp(0, NULL));
+  delete buffer;
+
+  // Insert packet list of three packets, where the second packet has an invalid
+  // payload.  Expect first packet to be inserted, and the remaining two to be
+  // discarded.
+  buffer = new PacketBuffer(100, &tick_timer);  // 100 packets.
+  PacketList list;
+  list.push_back(gen.NextPacket(payload_len));  // Valid packet.
+  {
+    Packet packet = gen.NextPacket(payload_len);
+    packet.payload.Clear();  // Invalid.
+    list.push_back(std::move(packet));
+  }
+  list.push_back(gen.NextPacket(payload_len));  // Valid packet.
+  MockDecoderDatabase decoder_database;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, factory);
+  EXPECT_CALL(decoder_database, GetDecoderInfo(0))
+      .WillRepeatedly(Return(&info));
+  rtc::Optional<uint8_t> current_pt;
+  rtc::Optional<uint8_t> current_cng_pt;
+  EXPECT_EQ(PacketBuffer::kInvalidPacket,
+            buffer->InsertPacketList(&list, decoder_database, &current_pt,
+                                     &current_cng_pt, &mock_stats));
+  EXPECT_TRUE(list.empty());  // The PacketBuffer should have depleted the list.
+  EXPECT_EQ(1u, buffer->NumPacketsInBuffer());
+  delete buffer;
+  EXPECT_CALL(decoder_database, Die());  // Called when object is deleted.
+}
+
+// Test packet comparison function.
+// The function should return true if the first packet "goes before" the second.
+TEST(PacketBuffer, ComparePackets) {
+  PacketGenerator gen(0, 0, 0, 10);
+  Packet a(gen.NextPacket(10));  // SN = 0, TS = 0.
+  Packet b(gen.NextPacket(10));  // SN = 1, TS = 10.
+  EXPECT_FALSE(a == b);
+  EXPECT_TRUE(a != b);
+  EXPECT_TRUE(a < b);
+  EXPECT_FALSE(a > b);
+  EXPECT_TRUE(a <= b);
+  EXPECT_FALSE(a >= b);
+
+  // Testing wrap-around case; 'a' is earlier but has a larger timestamp value.
+  a.timestamp = 0xFFFFFFFF - 10;
+  EXPECT_FALSE(a == b);
+  EXPECT_TRUE(a != b);
+  EXPECT_TRUE(a < b);
+  EXPECT_FALSE(a > b);
+  EXPECT_TRUE(a <= b);
+  EXPECT_FALSE(a >= b);
+
+  // Test equal packets.
+  EXPECT_TRUE(a == a);
+  EXPECT_FALSE(a != a);
+  EXPECT_FALSE(a < a);
+  EXPECT_FALSE(a > a);
+  EXPECT_TRUE(a <= a);
+  EXPECT_TRUE(a >= a);
+
+  // Test equal timestamps but different sequence numbers (0 and 1).
+  a.timestamp = b.timestamp;
+  EXPECT_FALSE(a == b);
+  EXPECT_TRUE(a != b);
+  EXPECT_TRUE(a < b);
+  EXPECT_FALSE(a > b);
+  EXPECT_TRUE(a <= b);
+  EXPECT_FALSE(a >= b);
+
+  // Test equal timestamps but different sequence numbers (32767 and 1).
+  a.sequence_number = 0xFFFF;
+  EXPECT_FALSE(a == b);
+  EXPECT_TRUE(a != b);
+  EXPECT_TRUE(a < b);
+  EXPECT_FALSE(a > b);
+  EXPECT_TRUE(a <= b);
+  EXPECT_FALSE(a >= b);
+
+  // Test equal timestamps and sequence numbers, but differing priorities.
+  a.sequence_number = b.sequence_number;
+  a.priority = {1, 0};
+  b.priority = {0, 0};
+  // a after b
+  EXPECT_FALSE(a == b);
+  EXPECT_TRUE(a != b);
+  EXPECT_FALSE(a < b);
+  EXPECT_TRUE(a > b);
+  EXPECT_FALSE(a <= b);
+  EXPECT_TRUE(a >= b);
+
+  Packet c(gen.NextPacket(0));  // SN = 2, TS = 20.
+  Packet d(gen.NextPacket(0));  // SN = 3, TS = 20.
+  c.timestamp = b.timestamp;
+  d.timestamp = b.timestamp;
+  c.sequence_number = b.sequence_number;
+  d.sequence_number = b.sequence_number;
+  c.priority = {1, 1};
+  d.priority = {0, 1};
+  // c after d
+  EXPECT_FALSE(c == d);
+  EXPECT_TRUE(c != d);
+  EXPECT_FALSE(c < d);
+  EXPECT_TRUE(c > d);
+  EXPECT_FALSE(c <= d);
+  EXPECT_TRUE(c >= d);
+
+  // c after a
+  EXPECT_FALSE(c == a);
+  EXPECT_TRUE(c != a);
+  EXPECT_FALSE(c < a);
+  EXPECT_TRUE(c > a);
+  EXPECT_FALSE(c <= a);
+  EXPECT_TRUE(c >= a);
+
+  // c after b
+  EXPECT_FALSE(c == b);
+  EXPECT_TRUE(c != b);
+  EXPECT_FALSE(c < b);
+  EXPECT_TRUE(c > b);
+  EXPECT_FALSE(c <= b);
+  EXPECT_TRUE(c >= b);
+
+  // a after d
+  EXPECT_FALSE(a == d);
+  EXPECT_TRUE(a != d);
+  EXPECT_FALSE(a < d);
+  EXPECT_TRUE(a > d);
+  EXPECT_FALSE(a <= d);
+  EXPECT_TRUE(a >= d);
+
+  // d after b
+  EXPECT_FALSE(d == b);
+  EXPECT_TRUE(d != b);
+  EXPECT_FALSE(d < b);
+  EXPECT_TRUE(d > b);
+  EXPECT_FALSE(d <= b);
+  EXPECT_TRUE(d >= b);
+}
+
+namespace {
+void TestIsObsoleteTimestamp(uint32_t limit_timestamp) {
+  // Check with zero horizon, which implies that the horizon is at 2^31, i.e.,
+  // half the timestamp range.
+  static const uint32_t kZeroHorizon = 0;
+  static const uint32_t k2Pow31Minus1 = 0x7FFFFFFF;
+  // Timestamp on the limit is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp, limit_timestamp, kZeroHorizon));
+  // 1 sample behind is old.
+  EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp - 1, limit_timestamp, kZeroHorizon));
+  // 2^31 - 1 samples behind is old.
+  EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp - k2Pow31Minus1, limit_timestamp, kZeroHorizon));
+  // 1 sample ahead is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp + 1, limit_timestamp, kZeroHorizon));
+  // If |t1-t2|=2^31 and t1>t2, t2 is older than t1 but not the opposite.
+  uint32_t other_timestamp = limit_timestamp + (1 << 31);
+  uint32_t lowest_timestamp = std::min(limit_timestamp, other_timestamp);
+  uint32_t highest_timestamp = std::max(limit_timestamp, other_timestamp);
+  EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(
+      lowest_timestamp, highest_timestamp, kZeroHorizon));
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      highest_timestamp, lowest_timestamp, kZeroHorizon));
+
+  // Fixed horizon at 10 samples.
+  static const uint32_t kHorizon = 10;
+  // Timestamp on the limit is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp, limit_timestamp, kHorizon));
+  // 1 sample behind is old.
+  EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp - 1, limit_timestamp, kHorizon));
+  // 9 samples behind is old.
+  EXPECT_TRUE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp - 9, limit_timestamp, kHorizon));
+  // 10 samples behind is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp - 10, limit_timestamp, kHorizon));
+  // 2^31 - 1 samples behind is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp - k2Pow31Minus1, limit_timestamp, kHorizon));
+  // 1 sample ahead is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp + 1, limit_timestamp, kHorizon));
+  // 2^31 samples ahead is not old.
+  EXPECT_FALSE(PacketBuffer::IsObsoleteTimestamp(
+      limit_timestamp + (1 << 31), limit_timestamp, kHorizon));
+}
+}  // namespace
+
+// Test the IsObsoleteTimestamp method with different limit timestamps.
+TEST(PacketBuffer, IsObsoleteTimestamp) {
+  TestIsObsoleteTimestamp(0);
+  TestIsObsoleteTimestamp(1);
+  TestIsObsoleteTimestamp(0xFFFFFFFF);  // -1 in uint32_t.
+  TestIsObsoleteTimestamp(0x80000000);  // 2^31.
+  TestIsObsoleteTimestamp(0x80000001);  // 2^31 + 1.
+  TestIsObsoleteTimestamp(0x7FFFFFFF);  // 2^31 - 1.
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/post_decode_vad.cc b/modules/audio_coding/neteq/post_decode_vad.cc
new file mode 100644
index 0000000..a09d18f
--- /dev/null
+++ b/modules/audio_coding/neteq/post_decode_vad.cc
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+
+namespace webrtc {
+
+PostDecodeVad::~PostDecodeVad() {
+  if (vad_instance_)
+    WebRtcVad_Free(vad_instance_);
+}
+
+void PostDecodeVad::Enable() {
+  if (!vad_instance_) {
+    // Create the instance.
+    vad_instance_ = WebRtcVad_Create();
+    if (vad_instance_ == nullptr) {
+      // Failed to create instance.
+      Disable();
+      return;
+    }
+  }
+  Init();
+  enabled_ = true;
+}
+
+void PostDecodeVad::Disable() {
+  enabled_ = false;
+  running_ = false;
+}
+
+void PostDecodeVad::Init() {
+  running_ = false;
+  if (vad_instance_) {
+    WebRtcVad_Init(vad_instance_);
+    WebRtcVad_set_mode(vad_instance_, kVadMode);
+    running_ = true;
+  }
+}
+
+void PostDecodeVad::Update(int16_t* signal, size_t length,
+                           AudioDecoder::SpeechType speech_type,
+                           bool sid_frame,
+                           int fs_hz) {
+  if (!vad_instance_ || !enabled_) {
+    return;
+  }
+
+  if (speech_type == AudioDecoder::kComfortNoise || sid_frame ||
+      fs_hz > 16000) {
+    // TODO(hlundin): Remove restriction on fs_hz.
+    running_ = false;
+    active_speech_ = true;
+    sid_interval_counter_ = 0;
+  } else if (!running_) {
+    ++sid_interval_counter_;
+  }
+
+  if (sid_interval_counter_ >= kVadAutoEnable) {
+    Init();
+  }
+
+  if (length > 0 && running_) {
+    size_t vad_sample_index = 0;
+    active_speech_ = false;
+    // Loop through frame sizes 30, 20, and 10 ms.
+    for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
+        vad_frame_size_ms -= 10) {
+      size_t vad_frame_size_samples =
+          static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
+      while (length - vad_sample_index >= vad_frame_size_samples) {
+        int vad_return = WebRtcVad_Process(
+            vad_instance_, fs_hz, &signal[vad_sample_index],
+            vad_frame_size_samples);
+        active_speech_ |= (vad_return == 1);
+        vad_sample_index += vad_frame_size_samples;
+      }
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/post_decode_vad.h b/modules/audio_coding/neteq/post_decode_vad.h
new file mode 100644
index 0000000..7b67bbe
--- /dev/null
+++ b/modules/audio_coding/neteq/post_decode_vad.h
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
+#define MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
+
+#include <string>  // size_t
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "common_types.h"  // NOLINT(build/include)  // NULL
+#include "modules/audio_coding/neteq/defines.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class PostDecodeVad {
+ public:
+  PostDecodeVad()
+      : enabled_(false),
+        running_(false),
+        active_speech_(true),
+        sid_interval_counter_(0),
+        vad_instance_(NULL) {
+  }
+
+  virtual ~PostDecodeVad();
+
+  // Enables post-decode VAD.
+  void Enable();
+
+  // Disables post-decode VAD.
+  void Disable();
+
+  // Initializes post-decode VAD.
+  void Init();
+
+  // Updates post-decode VAD with the audio data in |signal| having |length|
+  // samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
+  void Update(int16_t* signal, size_t length,
+              AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz);
+
+  // Accessors.
+  bool enabled() const { return enabled_; }
+  bool running() const { return running_; }
+  bool active_speech() const { return active_speech_; }
+
+ private:
+  static const int kVadMode = 0;  // Sets aggressiveness to "Normal".
+  // Number of Update() calls without CNG/SID before re-enabling VAD.
+  static const int kVadAutoEnable = 3000;
+
+  bool enabled_;
+  bool running_;
+  bool active_speech_;
+  int sid_interval_counter_;
+  ::VadInst* vad_instance_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(PostDecodeVad);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_POST_DECODE_VAD_H_
diff --git a/modules/audio_coding/neteq/post_decode_vad_unittest.cc b/modules/audio_coding/neteq/post_decode_vad_unittest.cc
new file mode 100644
index 0000000..da3e4e8
--- /dev/null
+++ b/modules/audio_coding/neteq/post_decode_vad_unittest.cc
@@ -0,0 +1,25 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for PostDecodeVad class.
+
+#include "modules/audio_coding/neteq/post_decode_vad.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(PostDecodeVad, CreateAndDestroy) {
+  PostDecodeVad vad;
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/preemptive_expand.cc b/modules/audio_coding/neteq/preemptive_expand.cc
new file mode 100644
index 0000000..bc75389
--- /dev/null
+++ b/modules/audio_coding/neteq/preemptive_expand.cc
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+
+#include <algorithm>  // min, max
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
+    const int16_t* input,
+    size_t input_length,
+    size_t old_data_length,
+    AudioMultiVector* output,
+    size_t* length_change_samples) {
+  old_data_length_per_channel_ = old_data_length;
+  // Input length must be (almost) 30 ms.
+  // Also, the new part must be at least |overlap_samples_| elements.
+  static const size_t k15ms = 120;  // 15 ms = 120 samples at 8 kHz sample rate.
+  if (num_channels_ == 0 ||
+      input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ ||
+      old_data_length >= input_length / num_channels_ - overlap_samples_) {
+    // Length of input data too short to do preemptive expand. Simply move all
+    // data from input to output.
+    output->PushBackInterleaved(input, input_length);
+    return kError;
+  }
+  const bool kFastMode = false;  // Fast mode is not available for PE Expand.
+  return TimeStretch::Process(input, input_length, kFastMode, output,
+                              length_change_samples);
+}
+
+void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
+                                                     int16_t* best_correlation,
+                                                     size_t* peak_index) const {
+  // When the signal does not contain any active speech, the correlation does
+  // not matter. Simply set it to zero.
+  *best_correlation = 0;
+
+  // For low energy expansion, the new data can be less than 15 ms,
+  // but we must ensure that best_correlation is not larger than the length of
+  // the new data.
+  // but we must ensure that best_correlation is not larger than the new data.
+  *peak_index = std::min(*peak_index,
+                         len - old_data_length_per_channel_);
+}
+
+PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
+    const int16_t* input,
+    size_t input_length,
+    size_t peak_index,
+    int16_t best_correlation,
+    bool active_speech,
+    bool /*fast_mode*/,
+    AudioMultiVector* output) const {
+  // Pre-calculate common multiplication with |fs_mult_|.
+  // 120 corresponds to 15 ms.
+  size_t fs_mult_120 = static_cast<size_t>(fs_mult_ * 120);
+  // Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
+  // or passive speech.
+  if (((best_correlation > kCorrelationThreshold) &&
+      (old_data_length_per_channel_ <= fs_mult_120)) ||
+      !active_speech) {
+    // Do accelerate operation by overlap add.
+
+    // Set length of the first part, not to be modified.
+    size_t unmodified_length = std::max(old_data_length_per_channel_,
+                                        fs_mult_120);
+    // Copy first part, including cross-fade region.
+    output->PushBackInterleaved(
+        input, (unmodified_length + peak_index) * num_channels_);
+    // Copy the last |peak_index| samples up to 15 ms to |temp_vector|.
+    AudioMultiVector temp_vector(num_channels_);
+    temp_vector.PushBackInterleaved(
+        &input[(unmodified_length - peak_index) * num_channels_],
+        peak_index * num_channels_);
+    // Cross-fade |temp_vector| onto the end of |output|.
+    output->CrossFade(temp_vector, peak_index);
+    // Copy the last unmodified part, 15 ms + pitch period until the end.
+    output->PushBackInterleaved(
+        &input[unmodified_length * num_channels_],
+        input_length - unmodified_length * num_channels_);
+
+    if (active_speech) {
+      return kSuccess;
+    } else {
+      return kSuccessLowEnergy;
+    }
+  } else {
+    // Accelerate not allowed. Simply move all data from decoded to outData.
+    output->PushBackInterleaved(input, input_length);
+    return kNoStretch;
+  }
+}
+
+PreemptiveExpand* PreemptiveExpandFactory::Create(
+    int sample_rate_hz,
+    size_t num_channels,
+    const BackgroundNoise& background_noise,
+    size_t overlap_samples) const {
+  return new PreemptiveExpand(
+      sample_rate_hz, num_channels, background_noise, overlap_samples);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/preemptive_expand.h b/modules/audio_coding/neteq/preemptive_expand.h
new file mode 100644
index 0000000..303501d
--- /dev/null
+++ b/modules/audio_coding/neteq/preemptive_expand.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_PREEMPTIVE_EXPAND_H_
+#define MODULES_AUDIO_CODING_NETEQ_PREEMPTIVE_EXPAND_H_
+
+#include <assert.h>
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/audio_coding/neteq/time_stretch.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class BackgroundNoise;
+
+// This class implements the PreemptiveExpand operation. Most of the work is
+// done in the base class TimeStretch, which is shared with the Accelerate
+// operation. In the PreemptiveExpand class, the operations that are specific to
+// PreemptiveExpand are implemented.
+class PreemptiveExpand : public TimeStretch {
+ public:
+  PreemptiveExpand(int sample_rate_hz,
+                   size_t num_channels,
+                   const BackgroundNoise& background_noise,
+                   size_t overlap_samples)
+      : TimeStretch(sample_rate_hz, num_channels, background_noise),
+        old_data_length_per_channel_(0),
+        overlap_samples_(overlap_samples) {
+  }
+
+  // This method performs the actual PreemptiveExpand operation. The samples are
+  // read from |input|, of length |input_length| elements, and are written to
+  // |output|. The number of samples added through time-stretching is
+  // is provided in the output |length_change_samples|. The method returns
+  // the outcome of the operation as an enumerator value.
+  ReturnCodes Process(const int16_t *pw16_decoded,
+                      size_t len,
+                      size_t old_data_len,
+                      AudioMultiVector* output,
+                      size_t* length_change_samples);
+
+ protected:
+  // Sets the parameters |best_correlation| and |peak_index| to suitable
+  // values when the signal contains no active speech.
+  void SetParametersForPassiveSpeech(size_t input_length,
+                                     int16_t* best_correlation,
+                                     size_t* peak_index) const override;
+
+  // Checks the criteria for performing the time-stretching operation and,
+  // if possible, performs the time-stretching.
+  ReturnCodes CheckCriteriaAndStretch(const int16_t* input,
+                                      size_t input_length,
+                                      size_t peak_index,
+                                      int16_t best_correlation,
+                                      bool active_speech,
+                                      bool /*fast_mode*/,
+                                      AudioMultiVector* output) const override;
+
+ private:
+  size_t old_data_length_per_channel_;
+  size_t overlap_samples_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(PreemptiveExpand);
+};
+
+struct PreemptiveExpandFactory {
+  PreemptiveExpandFactory() {}
+  virtual ~PreemptiveExpandFactory() {}
+
+  virtual PreemptiveExpand* Create(
+      int sample_rate_hz,
+      size_t num_channels,
+      const BackgroundNoise& background_noise,
+      size_t overlap_samples) const;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_PREEMPTIVE_EXPAND_H_
diff --git a/modules/audio_coding/neteq/random_vector.cc b/modules/audio_coding/neteq/random_vector.cc
new file mode 100644
index 0000000..c2df8cf
--- /dev/null
+++ b/modules/audio_coding/neteq/random_vector.cc
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/random_vector.h"
+
+namespace webrtc {
+
+const int16_t RandomVector::kRandomTable[RandomVector::kRandomTableSize] = {
+    2680, 5532, 441, 5520, 16170, -5146, -1024, -8733, 3115, 9598, -10380,
+    -4959, -1280, -21716, 7133, -1522, 13458, -3902, 2789, -675, 3441, 5016,
+    -13599, -4003, -2739, 3922, -7209, 13352, -11617, -7241, 12905, -2314, 5426,
+    10121, -9702, 11207, -13542, 1373, 816, -5934, -12504, 4798, 1811, 4112,
+    -613, 201, -10367, -2960, -2419, 3442, 4299, -6116, -6092, 1552, -1650,
+    -480, -1237, 18720, -11858, -8303, -8212, 865, -2890, -16968, 12052, -5845,
+    -5912, 9777, -5665, -6294, 5426, -4737, -6335, 1652, 761, 3832, 641, -8552,
+    -9084, -5753, 8146, 12156, -4915, 15086, -1231, -1869, 11749, -9319, -6403,
+    11407, 6232, -1683, 24340, -11166, 4017, -10448, 3153, -2936, 6212, 2891,
+    -866, -404, -4807, -2324, -1917, -2388, -6470, -3895, -10300, 5323, -5403,
+    2205, 4640, 7022, -21186, -6244, -882, -10031, -3395, -12885, 7155, -5339,
+    5079, -2645, -9515, 6622, 14651, 15852, 359, 122, 8246, -3502, -6696, -3679,
+    -13535, -1409, -704, -7403, -4007, 1798, 279, -420, -12796, -14219, 1141,
+    3359, 11434, 7049, -6684, -7473, 14283, -4115, -9123, -8969, 4152, 4117,
+    13792, 5742, 16168, 8661, -1609, -6095, 1881, 14380, -5588, 6758, -6425,
+    -22969, -7269, 7031, 1119, -1611, -5850, -11281, 3559, -8952, -10146, -4667,
+    -16251, -1538, 2062, -1012, -13073, 227, -3142, -5265, 20, 5770, -7559,
+    4740, -4819, 992, -8208, -7130, -4652, 6725, 7369, -1036, 13144, -1588,
+    -5304, -2344, -449, -5705, -8894, 5205, -17904, -11188, -1022, 4852, 10101,
+    -5255, -4200, -752, 7941, -1543, 5959, 14719, 13346, 17045, -15605, -1678,
+    -1600, -9230, 68, 23348, 1172, 7750, 11212, -18227, 9956, 4161, 883, 3947,
+    4341, 1014, -4889, -2603, 1246, -5630, -3596, -870, -1298, 2784, -3317,
+    -6612, -20541, 4166, 4181, -8625, 3562, 12890, 4761, 3205, -12259, -8579 };
+
+void RandomVector::Reset() {
+  seed_ = 777;
+  seed_increment_ = 1;
+}
+
+void RandomVector::Generate(size_t length, int16_t* output) {
+  for (size_t i = 0; i < length; i++) {
+    seed_ += seed_increment_;
+    size_t position = seed_ & (kRandomTableSize - 1);
+    output[i] = kRandomTable[position];
+  }
+}
+
+void RandomVector::IncreaseSeedIncrement(int16_t increase_by) {
+  seed_increment_+= increase_by;
+  seed_increment_ &= kRandomTableSize - 1;
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/random_vector.h b/modules/audio_coding/neteq/random_vector.h
new file mode 100644
index 0000000..18adbe0
--- /dev/null
+++ b/modules/audio_coding/neteq/random_vector.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_RANDOM_VECTOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_RANDOM_VECTOR_H_
+
+#include <string.h>  // size_t
+
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// This class generates pseudo-random samples.
+class RandomVector {
+ public:
+  static const size_t kRandomTableSize = 256;
+  static const int16_t kRandomTable[kRandomTableSize];
+
+  RandomVector()
+      : seed_(777),
+        seed_increment_(1) {
+  }
+
+  void Reset();
+
+  void Generate(size_t length, int16_t* output);
+
+  void IncreaseSeedIncrement(int16_t increase_by);
+
+  // Accessors and mutators.
+  int16_t seed_increment() { return seed_increment_; }
+  void set_seed_increment(int16_t value) { seed_increment_ = value; }
+
+ private:
+  uint32_t seed_;
+  int16_t seed_increment_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RandomVector);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_RANDOM_VECTOR_H_
diff --git a/modules/audio_coding/neteq/random_vector_unittest.cc b/modules/audio_coding/neteq/random_vector_unittest.cc
new file mode 100644
index 0000000..44479a6
--- /dev/null
+++ b/modules/audio_coding/neteq/random_vector_unittest.cc
@@ -0,0 +1,25 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for RandomVector class.
+
+#include "modules/audio_coding/neteq/random_vector.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(RandomVector, CreateAndDestroy) {
+  RandomVector random_vector;
+}
+
+// TODO(hlundin): Write more tests.
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/red_payload_splitter.cc b/modules/audio_coding/neteq/red_payload_splitter.cc
new file mode 100644
index 0000000..85e399c
--- /dev/null
+++ b/modules/audio_coding/neteq/red_payload_splitter.cc
@@ -0,0 +1,162 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+
+#include <assert.h>
+#include <vector>
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+// The method loops through a list of packets {A, B, C, ...}. Each packet is
+// split into its corresponding RED payloads, {A1, A2, ...}, which is
+// temporarily held in the list |new_packets|.
+// When the first packet in |packet_list| has been processed, the orignal packet
+// is replaced by the new ones in |new_packets|, so that |packet_list| becomes:
+// {A1, A2, ..., B, C, ...}. The method then continues with B, and C, until all
+// the original packets have been replaced by their split payloads.
+bool RedPayloadSplitter::SplitRed(PacketList* packet_list) {
+  // Too many RED blocks indicates that something is wrong. Clamp it at some
+  // reasonable value.
+  const size_t kMaxRedBlocks = 32;
+  bool ret = true;
+  PacketList::iterator it = packet_list->begin();
+  while (it != packet_list->end()) {
+    const Packet& red_packet = *it;
+    assert(!red_packet.payload.empty());
+    const uint8_t* payload_ptr = red_packet.payload.data();
+
+    // Read RED headers (according to RFC 2198):
+    //
+    //    0                   1                   2                   3
+    //    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+    //   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    //   |F|   block PT  |  timestamp offset         |   block length    |
+    //   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    // Last RED header:
+    //    0 1 2 3 4 5 6 7
+    //   +-+-+-+-+-+-+-+-+
+    //   |0|   Block PT  |
+    //   +-+-+-+-+-+-+-+-+
+
+    struct RedHeader {
+      uint8_t payload_type;
+      uint32_t timestamp;
+      size_t payload_length;
+    };
+
+    std::vector<RedHeader> new_headers;
+    bool last_block = false;
+    size_t sum_length = 0;
+    while (!last_block) {
+      RedHeader new_header;
+      // Check the F bit. If F == 0, this was the last block.
+      last_block = ((*payload_ptr & 0x80) == 0);
+      // Bits 1 through 7 are payload type.
+      new_header.payload_type = payload_ptr[0] & 0x7F;
+      if (last_block) {
+        // No more header data to read.
+        ++sum_length;  // Account for RED header size of 1 byte.
+        new_header.timestamp = red_packet.timestamp;
+        new_header.payload_length = red_packet.payload.size() - sum_length;
+        payload_ptr += 1;  // Advance to first payload byte.
+      } else {
+        // Bits 8 through 21 are timestamp offset.
+        int timestamp_offset =
+            (payload_ptr[1] << 6) + ((payload_ptr[2] & 0xFC) >> 2);
+        new_header.timestamp = red_packet.timestamp - timestamp_offset;
+        // Bits 22 through 31 are payload length.
+        new_header.payload_length =
+            ((payload_ptr[2] & 0x03) << 8) + payload_ptr[3];
+        payload_ptr += 4;  // Advance to next RED header.
+      }
+      sum_length += new_header.payload_length;
+      sum_length += 4;  // Account for RED header size of 4 bytes.
+      // Store in new list of packets.
+      new_headers.push_back(new_header);
+    }
+
+    if (new_headers.size() <= kMaxRedBlocks) {
+      // Populate the new packets with payload data.
+      // |payload_ptr| now points at the first payload byte.
+      PacketList new_packets;  // An empty list to store the split packets in.
+      for (size_t i = 0; i != new_headers.size(); ++i) {
+        const auto& new_header = new_headers[i];
+        size_t payload_length = new_header.payload_length;
+        if (payload_ptr + payload_length >
+            red_packet.payload.data() + red_packet.payload.size()) {
+          // The block lengths in the RED headers do not match the overall
+          // packet length. Something is corrupt. Discard this and the remaining
+          // payloads from this packet.
+          RTC_LOG(LS_WARNING) << "SplitRed length mismatch";
+          ret = false;
+          break;
+        }
+
+        Packet new_packet;
+        new_packet.timestamp = new_header.timestamp;
+        new_packet.payload_type = new_header.payload_type;
+        new_packet.sequence_number = red_packet.sequence_number;
+        new_packet.priority.red_level =
+            rtc::dchecked_cast<int>((new_headers.size() - 1) - i);
+        new_packet.payload.SetData(payload_ptr, payload_length);
+        new_packets.push_front(std::move(new_packet));
+        payload_ptr += payload_length;
+      }
+      // Insert new packets into original list, before the element pointed to by
+      // iterator |it|.
+      packet_list->splice(it, std::move(new_packets));
+    } else {
+      RTC_LOG(LS_WARNING) << "SplitRed too many blocks: " << new_headers.size();
+      ret = false;
+    }
+    // Remove |it| from the packet list. This operation effectively moves the
+    // iterator |it| to the next packet in the list. Thus, we do not have to
+    // increment it manually.
+    it = packet_list->erase(it);
+  }
+  return ret;
+}
+
+int RedPayloadSplitter::CheckRedPayloads(
+    PacketList* packet_list,
+    const DecoderDatabase& decoder_database) {
+  int main_payload_type = -1;
+  int num_deleted_packets = 0;
+  for (auto it = packet_list->begin(); it != packet_list->end(); /* */) {
+    uint8_t this_payload_type = it->payload_type;
+    if (!decoder_database.IsDtmf(this_payload_type) &&
+        !decoder_database.IsComfortNoise(this_payload_type)) {
+      if (main_payload_type == -1) {
+        // This is the first packet in the list which is non-DTMF non-CNG.
+        main_payload_type = this_payload_type;
+      } else {
+        if (this_payload_type != main_payload_type) {
+          // We do not allow redundant payloads of a different type.
+          // Remove |it| from the packet list. This operation effectively
+          // moves the iterator |it| to the next packet in the list. Thus, we
+          // do not have to increment it manually.
+          it = packet_list->erase(it);
+          ++num_deleted_packets;
+          continue;
+        }
+      }
+    }
+    ++it;
+  }
+  return num_deleted_packets;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/red_payload_splitter.h b/modules/audio_coding/neteq/red_payload_splitter.h
new file mode 100644
index 0000000..1475b1b
--- /dev/null
+++ b/modules/audio_coding/neteq/red_payload_splitter.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_
+#define MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_
+
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class DecoderDatabase;
+
+// This class handles splitting of RED payloads into smaller parts.
+// Codec-specific packet splitting can be performed by
+// AudioDecoder::ParsePayload.
+class RedPayloadSplitter {
+ public:
+  RedPayloadSplitter() {}
+
+  virtual ~RedPayloadSplitter() {}
+
+  // Splits each packet in |packet_list| into its separate RED payloads. Each
+  // RED payload is packetized into a Packet. The original elements in
+  // |packet_list| are properly deleted, and replaced by the new packets.
+  // Note that all packets in |packet_list| must be RED payloads, i.e., have
+  // RED headers according to RFC 2198 at the very beginning of the payload.
+  // Returns kOK or an error.
+  virtual bool SplitRed(PacketList* packet_list);
+
+  // Checks all packets in |packet_list|. Packets that are DTMF events or
+  // comfort noise payloads are kept. Except that, only one single payload type
+  // is accepted. Any packet with another payload type is discarded.  Returns
+  // the number of discarded packets.
+  virtual int CheckRedPayloads(PacketList* packet_list,
+                               const DecoderDatabase& decoder_database);
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(RedPayloadSplitter);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_RED_PAYLOAD_SPLITTER_H_
diff --git a/modules/audio_coding/neteq/red_payload_splitter_unittest.cc b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
new file mode 100644
index 0000000..4f511ad
--- /dev/null
+++ b/modules/audio_coding/neteq/red_payload_splitter_unittest.cc
@@ -0,0 +1,346 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for RedPayloadSplitter class.
+
+#include "modules/audio_coding/neteq/red_payload_splitter.h"
+
+#include <assert.h>
+
+#include <memory>
+#include <utility>  // pair
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "test/mock_audio_decoder_factory.h"
+
+using ::testing::Return;
+using ::testing::ReturnNull;
+
+namespace webrtc {
+
+static const int kRedPayloadType = 100;
+static const size_t kPayloadLength = 10;
+static const size_t kRedHeaderLength = 4;  // 4 bytes RED header.
+static const uint16_t kSequenceNumber = 0;
+static const uint32_t kBaseTimestamp = 0x12345678;
+
+// A possible Opus packet that contains FEC is the following.
+// The frame is 20 ms in duration.
+//
+// 0                   1                   2                   3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |0|0|0|0|1|0|0|0|x|1|x|x|x|x|x|x|x|                             |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                             |
+// |                    Compressed frame 1 (N-2 bytes)...          :
+// :                                                               |
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+void CreateOpusFecPayload(uint8_t* payload,
+                          size_t payload_length,
+                          uint8_t payload_value) {
+  if (payload_length < 2) {
+    return;
+  }
+  payload[0] = 0x08;
+  payload[1] = 0x40;
+  memset(&payload[2], payload_value, payload_length - 2);
+}
+
+// RED headers (according to RFC 2198):
+//
+//    0                   1                   2                   3
+//    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |F|   block PT  |  timestamp offset         |   block length    |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Last RED header:
+//    0 1 2 3 4 5 6 7
+//   +-+-+-+-+-+-+-+-+
+//   |0|   Block PT  |
+//   +-+-+-+-+-+-+-+-+
+
+// Creates a RED packet, with |num_payloads| payloads, with payload types given
+// by the values in array |payload_types| (which must be of length
+// |num_payloads|). Each redundant payload is |timestamp_offset| samples
+// "behind" the the previous payload.
+Packet CreateRedPayload(size_t num_payloads,
+                        uint8_t* payload_types,
+                        int timestamp_offset,
+                        bool embed_opus_fec = false) {
+  Packet packet;
+  packet.payload_type = kRedPayloadType;
+  packet.timestamp = kBaseTimestamp;
+  packet.sequence_number = kSequenceNumber;
+  packet.payload.SetSize((kPayloadLength + 1) +
+                         (num_payloads - 1) *
+                             (kPayloadLength + kRedHeaderLength));
+  uint8_t* payload_ptr = packet.payload.data();
+  for (size_t i = 0; i < num_payloads; ++i) {
+    // Write the RED headers.
+    if (i == num_payloads - 1) {
+      // Special case for last payload.
+      *payload_ptr = payload_types[i] & 0x7F;  // F = 0;
+      ++payload_ptr;
+      break;
+    }
+    *payload_ptr = payload_types[i] & 0x7F;
+    // Not the last block; set F = 1.
+    *payload_ptr |= 0x80;
+    ++payload_ptr;
+    int this_offset = rtc::checked_cast<int>(
+        (num_payloads - i - 1) * timestamp_offset);
+    *payload_ptr = this_offset >> 6;
+    ++payload_ptr;
+    assert(kPayloadLength <= 1023);  // Max length described by 10 bits.
+    *payload_ptr = ((this_offset & 0x3F) << 2) | (kPayloadLength >> 8);
+    ++payload_ptr;
+    *payload_ptr = kPayloadLength & 0xFF;
+    ++payload_ptr;
+  }
+  for (size_t i = 0; i < num_payloads; ++i) {
+    // Write |i| to all bytes in each payload.
+    if (embed_opus_fec) {
+      CreateOpusFecPayload(payload_ptr, kPayloadLength,
+                           static_cast<uint8_t>(i));
+    } else {
+      memset(payload_ptr, static_cast<int>(i), kPayloadLength);
+    }
+    payload_ptr += kPayloadLength;
+  }
+  return packet;
+}
+
+// Create a packet with all payload bytes set to |payload_value|.
+Packet CreatePacket(uint8_t payload_type,
+                    size_t payload_length,
+                    uint8_t payload_value,
+                    bool opus_fec = false) {
+  Packet packet;
+  packet.payload_type = payload_type;
+  packet.timestamp = kBaseTimestamp;
+  packet.sequence_number = kSequenceNumber;
+  packet.payload.SetSize(payload_length);
+  if (opus_fec) {
+    CreateOpusFecPayload(packet.payload.data(), packet.payload.size(),
+                         payload_value);
+  } else {
+    memset(packet.payload.data(), payload_value, packet.payload.size());
+  }
+  return packet;
+}
+
+// Checks that |packet| has the attributes given in the remaining parameters.
+void VerifyPacket(const Packet& packet,
+                  size_t payload_length,
+                  uint8_t payload_type,
+                  uint16_t sequence_number,
+                  uint32_t timestamp,
+                  uint8_t payload_value,
+                  Packet::Priority priority) {
+  EXPECT_EQ(payload_length, packet.payload.size());
+  EXPECT_EQ(payload_type, packet.payload_type);
+  EXPECT_EQ(sequence_number, packet.sequence_number);
+  EXPECT_EQ(timestamp, packet.timestamp);
+  EXPECT_EQ(priority, packet.priority);
+  ASSERT_FALSE(packet.payload.empty());
+  for (size_t i = 0; i < packet.payload.size(); ++i) {
+    ASSERT_EQ(payload_value, packet.payload.data()[i]);
+  }
+}
+
+void VerifyPacket(const Packet& packet,
+                  size_t payload_length,
+                  uint8_t payload_type,
+                  uint16_t sequence_number,
+                  uint32_t timestamp,
+                  uint8_t payload_value,
+                  bool primary) {
+  return VerifyPacket(packet, payload_length, payload_type, sequence_number,
+                      timestamp, payload_value,
+                      Packet::Priority{0, primary ? 0 : 1});
+}
+
+// Start of test definitions.
+
+TEST(RedPayloadSplitter, CreateAndDestroy) {
+  RedPayloadSplitter* splitter = new RedPayloadSplitter;
+  delete splitter;
+}
+
+// Packet A is split into A1 and A2.
+TEST(RedPayloadSplitter, OnePacketTwoPayloads) {
+  uint8_t payload_types[] = {0, 0};
+  const int kTimestampOffset = 160;
+  PacketList packet_list;
+  packet_list.push_back(CreateRedPayload(2, payload_types, kTimestampOffset));
+  RedPayloadSplitter splitter;
+  EXPECT_TRUE(splitter.SplitRed(&packet_list));
+  ASSERT_EQ(2u, packet_list.size());
+  // Check first packet. The first in list should always be the primary payload.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[1],
+               kSequenceNumber, kBaseTimestamp, 1, true);
+  packet_list.pop_front();
+  // Check second packet.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+               kSequenceNumber, kBaseTimestamp - kTimestampOffset, 0, false);
+}
+
+// Packets A and B are not split at all. Only the RED header in each packet is
+// removed.
+TEST(RedPayloadSplitter, TwoPacketsOnePayload) {
+  uint8_t payload_types[] = {0};
+  const int kTimestampOffset = 160;
+  // Create first packet, with a single RED payload.
+  PacketList packet_list;
+  packet_list.push_back(CreateRedPayload(1, payload_types, kTimestampOffset));
+  // Create second packet, with a single RED payload.
+  {
+    Packet packet = CreateRedPayload(1, payload_types, kTimestampOffset);
+    // Manually change timestamp and sequence number of second packet.
+    packet.timestamp += kTimestampOffset;
+    packet.sequence_number++;
+    packet_list.push_back(std::move(packet));
+  }
+  RedPayloadSplitter splitter;
+  EXPECT_TRUE(splitter.SplitRed(&packet_list));
+  ASSERT_EQ(2u, packet_list.size());
+  // Check first packet.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+               kSequenceNumber, kBaseTimestamp, 0, true);
+  packet_list.pop_front();
+  // Check second packet.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+               kSequenceNumber + 1, kBaseTimestamp + kTimestampOffset, 0, true);
+}
+
+// Packets A and B are split into packets A1, A2, A3, B1, B2, B3, with
+// attributes as follows:
+//
+//                  A1*   A2    A3    B1*   B2    B3
+// Payload type     0     1     2     0     1     2
+// Timestamp        b     b-o   b-2o  b+o   b     b-o
+// Sequence number  0     0     0     1     1     1
+//
+// b = kBaseTimestamp, o = kTimestampOffset, * = primary.
+TEST(RedPayloadSplitter, TwoPacketsThreePayloads) {
+  uint8_t payload_types[] = {2, 1, 0};  // Primary is the last one.
+  const int kTimestampOffset = 160;
+  // Create first packet, with 3 RED payloads.
+  PacketList packet_list;
+  packet_list.push_back(CreateRedPayload(3, payload_types, kTimestampOffset));
+  // Create first packet, with 3 RED payloads.
+  {
+    Packet packet = CreateRedPayload(3, payload_types, kTimestampOffset);
+    // Manually change timestamp and sequence number of second packet.
+    packet.timestamp += kTimestampOffset;
+    packet.sequence_number++;
+    packet_list.push_back(std::move(packet));
+  }
+  RedPayloadSplitter splitter;
+  EXPECT_TRUE(splitter.SplitRed(&packet_list));
+  ASSERT_EQ(6u, packet_list.size());
+  // Check first packet, A1.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[2],
+               kSequenceNumber, kBaseTimestamp, 2, {0, 0});
+  packet_list.pop_front();
+  // Check second packet, A2.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[1],
+               kSequenceNumber, kBaseTimestamp - kTimestampOffset, 1, {0, 1});
+  packet_list.pop_front();
+  // Check third packet, A3.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+               kSequenceNumber, kBaseTimestamp - 2 * kTimestampOffset, 0,
+               {0, 2});
+  packet_list.pop_front();
+  // Check fourth packet, B1.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[2],
+               kSequenceNumber + 1, kBaseTimestamp + kTimestampOffset, 2,
+               {0, 0});
+  packet_list.pop_front();
+  // Check fifth packet, B2.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[1],
+               kSequenceNumber + 1, kBaseTimestamp, 1, {0, 1});
+  packet_list.pop_front();
+  // Check sixth packet, B3.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+               kSequenceNumber + 1, kBaseTimestamp - kTimestampOffset, 0,
+               {0, 2});
+}
+
+// Creates a list with 4 packets with these payload types:
+// 0 = CNGnb
+// 1 = PCMu
+// 2 = DTMF (AVT)
+// 3 = iLBC
+// We expect the method CheckRedPayloads to discard the iLBC packet, since it
+// is a non-CNG, non-DTMF payload of another type than the first speech payload
+// found in the list (which is PCMu).
+TEST(RedPayloadSplitter, CheckRedPayloads) {
+  PacketList packet_list;
+  for (uint8_t i = 0; i <= 3; ++i) {
+    // Create packet with payload type |i|, payload length 10 bytes, all 0.
+    packet_list.push_back(CreatePacket(i, 10, 0));
+  }
+
+  // Use a real DecoderDatabase object here instead of a mock, since it is
+  // easier to just register the payload types and let the actual implementation
+  // do its job.
+  DecoderDatabase decoder_database(
+      new rtc::RefCountedObject<MockAudioDecoderFactory>);
+  decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderCNGnb, "cng-nb");
+  decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu, "pcmu");
+  decoder_database.RegisterPayload(2, NetEqDecoder::kDecoderAVT, "avt");
+  decoder_database.RegisterPayload(3, NetEqDecoder::kDecoderILBC, "ilbc");
+
+  RedPayloadSplitter splitter;
+  splitter.CheckRedPayloads(&packet_list, decoder_database);
+
+  ASSERT_EQ(3u, packet_list.size());  // Should have dropped the last packet.
+  // Verify packets. The loop verifies that payload types 0, 1, and 2 are in the
+  // list.
+  for (int i = 0; i <= 2; ++i) {
+    VerifyPacket(packet_list.front(), 10, i, kSequenceNumber, kBaseTimestamp, 0,
+                 true);
+    packet_list.pop_front();
+  }
+  EXPECT_TRUE(packet_list.empty());
+}
+
+// Packet A is split into A1, A2 and A3. But the length parameter is off, so
+// the last payloads should be discarded.
+TEST(RedPayloadSplitter, WrongPayloadLength) {
+  uint8_t payload_types[] = {0, 0, 0};
+  const int kTimestampOffset = 160;
+  PacketList packet_list;
+  {
+    Packet packet = CreateRedPayload(3, payload_types, kTimestampOffset);
+    // Manually tamper with the payload length of the packet.
+    // This is one byte too short for the second payload (out of three).
+    // We expect only the first payload to be returned.
+    packet.payload.SetSize(packet.payload.size() - (kPayloadLength + 1));
+    packet_list.push_back(std::move(packet));
+  }
+  RedPayloadSplitter splitter;
+  EXPECT_FALSE(splitter.SplitRed(&packet_list));
+  ASSERT_EQ(1u, packet_list.size());
+  // Check first packet.
+  VerifyPacket(packet_list.front(), kPayloadLength, payload_types[0],
+               kSequenceNumber, kBaseTimestamp - 2 * kTimestampOffset, 0,
+               {0, 2});
+  packet_list.pop_front();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/rtcp.cc b/modules/audio_coding/neteq/rtcp.cc
new file mode 100644
index 0000000..2885398
--- /dev/null
+++ b/modules/audio_coding/neteq/rtcp.cc
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/rtcp.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+void Rtcp::Init(uint16_t start_sequence_number) {
+  cycles_ = 0;
+  max_seq_no_ = start_sequence_number;
+  base_seq_no_ = start_sequence_number;
+  received_packets_ = 0;
+  received_packets_prior_ = 0;
+  expected_prior_ = 0;
+  jitter_ = 0;
+  transit_ = 0;
+}
+
+void Rtcp::Update(const RTPHeader& rtp_header, uint32_t receive_timestamp) {
+  // Update number of received packets, and largest packet number received.
+  received_packets_++;
+  int16_t sn_diff = rtp_header.sequenceNumber - max_seq_no_;
+  if (sn_diff >= 0) {
+    if (rtp_header.sequenceNumber < max_seq_no_) {
+      // Wrap-around detected.
+      cycles_++;
+    }
+    max_seq_no_ = rtp_header.sequenceNumber;
+  }
+
+  // Calculate jitter according to RFC 3550, and update previous timestamps.
+  // Note that the value in |jitter_| is in Q4.
+  if (received_packets_ > 1) {
+    int32_t ts_diff = receive_timestamp - (rtp_header.timestamp - transit_);
+    int64_t jitter_diff = (std::abs(int64_t{ts_diff}) << 4) - jitter_;
+    // Calculate 15 * jitter_ / 16 + jitter_diff / 16 (with proper rounding).
+    jitter_ = jitter_ + ((jitter_diff + 8) >> 4);
+    RTC_DCHECK_GE(jitter_, 0);
+  }
+  transit_ = rtp_header.timestamp - receive_timestamp;
+}
+
+void Rtcp::GetStatistics(bool no_reset, RtcpStatistics* stats) {
+  // Extended highest sequence number received.
+  stats->extended_highest_sequence_number =
+      (static_cast<int>(cycles_) << 16) + max_seq_no_;
+
+  // Calculate expected number of packets and compare it with the number of
+  // packets that were actually received. The cumulative number of lost packets
+  // can be extracted.
+  uint32_t expected_packets =
+      stats->extended_highest_sequence_number - base_seq_no_ + 1;
+  if (received_packets_ == 0) {
+    // No packets received, assume none lost.
+    stats->packets_lost = 0;
+  } else if (expected_packets > received_packets_) {
+    stats->packets_lost = expected_packets - received_packets_;
+    if (stats->packets_lost > 0xFFFFFF) {
+      stats->packets_lost = 0xFFFFFF;
+    }
+  } else {
+    stats->packets_lost = 0;
+  }
+
+  // Fraction lost since last report.
+  uint32_t expected_since_last = expected_packets - expected_prior_;
+  uint32_t received_since_last = received_packets_ - received_packets_prior_;
+  if (!no_reset) {
+    expected_prior_ = expected_packets;
+    received_packets_prior_ = received_packets_;
+  }
+  int32_t lost = expected_since_last - received_since_last;
+  if (expected_since_last == 0 || lost <= 0 || received_packets_ == 0) {
+    stats->fraction_lost = 0;
+  } else {
+    stats->fraction_lost = std::min(0xFFU, (lost << 8) / expected_since_last);
+  }
+
+  stats->jitter = jitter_ >> 4;  // Scaling from Q4.
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/rtcp.h b/modules/audio_coding/neteq/rtcp.h
new file mode 100644
index 0000000..ce2035b
--- /dev/null
+++ b/modules/audio_coding/neteq/rtcp.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_RTCP_H_
+#define MODULES_AUDIO_CODING_NETEQ_RTCP_H_
+
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declaration.
+struct RTPHeader;
+
+class Rtcp {
+ public:
+  Rtcp() {
+    Init(0);
+  }
+
+  ~Rtcp() {}
+
+  // Resets the RTCP statistics, and sets the first received sequence number.
+  void Init(uint16_t start_sequence_number);
+
+  // Updates the RTCP statistics with a new received packet.
+  void Update(const RTPHeader& rtp_header, uint32_t receive_timestamp);
+
+  // Returns the current RTCP statistics. If |no_reset| is true, the statistics
+  // are not reset, otherwise they are.
+  void GetStatistics(bool no_reset, RtcpStatistics* stats);
+
+ private:
+  uint16_t cycles_;  // The number of wrap-arounds for the sequence number.
+  uint16_t max_seq_no_;  // The maximum sequence number received. Starts over
+                         // from 0 after wrap-around.
+  uint16_t base_seq_no_;  // The sequence number of the first received packet.
+  uint32_t received_packets_;  // The number of packets that have been received.
+  uint32_t received_packets_prior_;  // Number of packets received when last
+                                     // report was generated.
+  uint32_t expected_prior_;  // Expected number of packets, at the time of the
+                             // last report.
+  int64_t jitter_;  // Current jitter value in Q4.
+  int32_t transit_;  // Clock difference for previous packet.
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Rtcp);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_RTCP_H_
diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc
new file mode 100644
index 0000000..c698790
--- /dev/null
+++ b/modules/audio_coding/neteq/statistics_calculator.cc
@@ -0,0 +1,368 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+
+#include <assert.h>
+#include <string.h>  // memset
+#include <algorithm>
+
+#include "modules/audio_coding/neteq/delay_manager.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+size_t AddIntToSizeTWithLowerCap(int a, size_t b) {
+  const size_t ret = b + a;
+  // If a + b is negative, resulting in a negative wrap, cap it to zero instead.
+  static_assert(sizeof(size_t) >= sizeof(int),
+                "int must not be wider than size_t for this to work");
+  return (a < 0 && ret > b) ? 0 : ret;
+}
+}  // namespace
+
+// Allocating the static const so that it can be passed by reference to
+// RTC_DCHECK.
+const size_t StatisticsCalculator::kLenWaitingTimes;
+
+StatisticsCalculator::PeriodicUmaLogger::PeriodicUmaLogger(
+    const std::string& uma_name,
+    int report_interval_ms,
+    int max_value)
+    : uma_name_(uma_name),
+      report_interval_ms_(report_interval_ms),
+      max_value_(max_value),
+      timer_(0) {
+}
+
+StatisticsCalculator::PeriodicUmaLogger::~PeriodicUmaLogger() = default;
+
+void StatisticsCalculator::PeriodicUmaLogger::AdvanceClock(int step_ms) {
+  timer_ += step_ms;
+  if (timer_ < report_interval_ms_) {
+    return;
+  }
+  LogToUma(Metric());
+  Reset();
+  timer_ -= report_interval_ms_;
+  RTC_DCHECK_GE(timer_, 0);
+}
+
+void StatisticsCalculator::PeriodicUmaLogger::LogToUma(int value) const {
+  RTC_HISTOGRAM_COUNTS_SPARSE(uma_name_, value, 1, max_value_, 50);
+}
+
+StatisticsCalculator::PeriodicUmaCount::PeriodicUmaCount(
+    const std::string& uma_name,
+    int report_interval_ms,
+    int max_value)
+    : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {
+}
+
+StatisticsCalculator::PeriodicUmaCount::~PeriodicUmaCount() {
+  // Log the count for the current (incomplete) interval.
+  LogToUma(Metric());
+}
+
+void StatisticsCalculator::PeriodicUmaCount::RegisterSample() {
+  ++counter_;
+}
+
+int StatisticsCalculator::PeriodicUmaCount::Metric() const {
+  return counter_;
+}
+
+void StatisticsCalculator::PeriodicUmaCount::Reset() {
+  counter_ = 0;
+}
+
+StatisticsCalculator::PeriodicUmaAverage::PeriodicUmaAverage(
+    const std::string& uma_name,
+    int report_interval_ms,
+    int max_value)
+    : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {
+}
+
+StatisticsCalculator::PeriodicUmaAverage::~PeriodicUmaAverage() {
+  // Log the average for the current (incomplete) interval.
+  LogToUma(Metric());
+}
+
+void StatisticsCalculator::PeriodicUmaAverage::RegisterSample(int value) {
+  sum_ += value;
+  ++counter_;
+}
+
+int StatisticsCalculator::PeriodicUmaAverage::Metric() const {
+  return counter_ == 0 ? 0 : static_cast<int>(sum_ / counter_);
+}
+
+void StatisticsCalculator::PeriodicUmaAverage::Reset() {
+  sum_ = 0.0;
+  counter_ = 0;
+}
+
+StatisticsCalculator::StatisticsCalculator()
+    : preemptive_samples_(0),
+      accelerate_samples_(0),
+      added_zero_samples_(0),
+      expanded_speech_samples_(0),
+      expanded_noise_samples_(0),
+      discarded_packets_(0),
+      lost_timestamps_(0),
+      timestamps_since_last_report_(0),
+      secondary_decoded_samples_(0),
+      discarded_secondary_packets_(0),
+      delayed_packet_outage_counter_(
+          "WebRTC.Audio.DelayedPacketOutageEventsPerMinute",
+          60000,  // 60 seconds report interval.
+          100),
+      excess_buffer_delay_("WebRTC.Audio.AverageExcessBufferDelayMs",
+                           60000,  // 60 seconds report interval.
+                           1000) {}
+
+StatisticsCalculator::~StatisticsCalculator() = default;
+
+void StatisticsCalculator::Reset() {
+  preemptive_samples_ = 0;
+  accelerate_samples_ = 0;
+  added_zero_samples_ = 0;
+  expanded_speech_samples_ = 0;
+  expanded_noise_samples_ = 0;
+  secondary_decoded_samples_ = 0;
+  discarded_secondary_packets_ = 0;
+  waiting_times_.clear();
+}
+
+void StatisticsCalculator::ResetMcu() {
+  discarded_packets_ = 0;
+  lost_timestamps_ = 0;
+  timestamps_since_last_report_ = 0;
+}
+
+void StatisticsCalculator::ExpandedVoiceSamples(size_t num_samples,
+                                                bool is_new_concealment_event) {
+  expanded_speech_samples_ += num_samples;
+  ConcealedSamplesCorrection(rtc::dchecked_cast<int>(num_samples), true);
+  lifetime_stats_.concealment_events += is_new_concealment_event;
+}
+
+void StatisticsCalculator::ExpandedNoiseSamples(size_t num_samples,
+                                                bool is_new_concealment_event) {
+  expanded_noise_samples_ += num_samples;
+  ConcealedSamplesCorrection(rtc::dchecked_cast<int>(num_samples), false);
+  lifetime_stats_.concealment_events += is_new_concealment_event;
+}
+
+void StatisticsCalculator::ExpandedVoiceSamplesCorrection(int num_samples) {
+  expanded_speech_samples_ =
+      AddIntToSizeTWithLowerCap(num_samples, expanded_speech_samples_);
+  ConcealedSamplesCorrection(num_samples, true);
+}
+
+void StatisticsCalculator::ExpandedNoiseSamplesCorrection(int num_samples) {
+  expanded_noise_samples_ =
+      AddIntToSizeTWithLowerCap(num_samples, expanded_noise_samples_);
+  ConcealedSamplesCorrection(num_samples, false);
+}
+
+void StatisticsCalculator::ConcealedSamplesCorrection(int num_samples,
+                                                      bool is_voice) {
+  if (num_samples < 0) {
+    // Store negative correction to subtract from future positive additions.
+    // See also the function comment in the header file.
+    concealed_samples_correction_ -= num_samples;
+    if (is_voice) {
+      voice_concealed_samples_correction_ -= num_samples;
+    }
+    return;
+  }
+
+  const size_t canceled_out =
+      std::min(static_cast<size_t>(num_samples), concealed_samples_correction_);
+  concealed_samples_correction_ -= canceled_out;
+  lifetime_stats_.concealed_samples += num_samples - canceled_out;
+
+  if (is_voice) {
+    const size_t voice_canceled_out = std::min(
+        static_cast<size_t>(num_samples), voice_concealed_samples_correction_);
+    voice_concealed_samples_correction_ -= voice_canceled_out;
+    lifetime_stats_.voice_concealed_samples += num_samples - voice_canceled_out;
+  }
+}
+
+void StatisticsCalculator::PreemptiveExpandedSamples(size_t num_samples) {
+  preemptive_samples_ += num_samples;
+}
+
+void StatisticsCalculator::AcceleratedSamples(size_t num_samples) {
+  accelerate_samples_ += num_samples;
+}
+
+void StatisticsCalculator::AddZeros(size_t num_samples) {
+  added_zero_samples_ += num_samples;
+}
+
+void StatisticsCalculator::PacketsDiscarded(size_t num_packets) {
+  discarded_packets_ += num_packets;
+}
+
+void StatisticsCalculator::SecondaryPacketsDiscarded(size_t num_packets) {
+  discarded_secondary_packets_ += num_packets;
+}
+
+void StatisticsCalculator::LostSamples(size_t num_samples) {
+  lost_timestamps_ += num_samples;
+}
+
+void StatisticsCalculator::IncreaseCounter(size_t num_samples, int fs_hz) {
+  const int time_step_ms =
+      rtc::CheckedDivExact(static_cast<int>(1000 * num_samples), fs_hz);
+  delayed_packet_outage_counter_.AdvanceClock(time_step_ms);
+  excess_buffer_delay_.AdvanceClock(time_step_ms);
+  timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
+  if (timestamps_since_last_report_ >
+      static_cast<uint32_t>(fs_hz * kMaxReportPeriod)) {
+    lost_timestamps_ = 0;
+    timestamps_since_last_report_ = 0;
+    discarded_packets_ = 0;
+  }
+  lifetime_stats_.total_samples_received += num_samples;
+}
+
+void StatisticsCalculator::JitterBufferDelay(size_t num_samples,
+                                             uint64_t waiting_time_ms) {
+  lifetime_stats_.jitter_buffer_delay_ms += waiting_time_ms * num_samples;
+}
+
+void StatisticsCalculator::SecondaryDecodedSamples(int num_samples) {
+  secondary_decoded_samples_ += num_samples;
+}
+
+void StatisticsCalculator::LogDelayedPacketOutageEvent(int outage_duration_ms) {
+  RTC_HISTOGRAM_COUNTS("WebRTC.Audio.DelayedPacketOutageEventMs",
+                       outage_duration_ms, 1 /* min */, 2000 /* max */,
+                       100 /* bucket count */);
+  delayed_packet_outage_counter_.RegisterSample();
+}
+
+void StatisticsCalculator::StoreWaitingTime(int waiting_time_ms) {
+  excess_buffer_delay_.RegisterSample(waiting_time_ms);
+  RTC_DCHECK_LE(waiting_times_.size(), kLenWaitingTimes);
+  if (waiting_times_.size() == kLenWaitingTimes) {
+    // Erase first value.
+    waiting_times_.pop_front();
+  }
+  waiting_times_.push_back(waiting_time_ms);
+}
+
+void StatisticsCalculator::GetNetworkStatistics(
+    int fs_hz,
+    size_t num_samples_in_buffers,
+    size_t samples_per_packet,
+    NetEqNetworkStatistics *stats) {
+  RTC_DCHECK_GT(fs_hz, 0);
+  RTC_DCHECK(stats);
+
+  stats->added_zero_samples = added_zero_samples_;
+  stats->current_buffer_size_ms =
+      static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
+
+  stats->packet_loss_rate =
+      CalculateQ14Ratio(lost_timestamps_, timestamps_since_last_report_);
+
+  stats->accelerate_rate =
+      CalculateQ14Ratio(accelerate_samples_, timestamps_since_last_report_);
+
+  stats->preemptive_rate =
+      CalculateQ14Ratio(preemptive_samples_, timestamps_since_last_report_);
+
+  stats->expand_rate =
+      CalculateQ14Ratio(expanded_speech_samples_ + expanded_noise_samples_,
+                        timestamps_since_last_report_);
+
+  stats->speech_expand_rate =
+      CalculateQ14Ratio(expanded_speech_samples_,
+                        timestamps_since_last_report_);
+
+  stats->secondary_decoded_rate =
+      CalculateQ14Ratio(secondary_decoded_samples_,
+                        timestamps_since_last_report_);
+
+  const size_t discarded_secondary_samples =
+      discarded_secondary_packets_ * samples_per_packet;
+  stats->secondary_discarded_rate = CalculateQ14Ratio(
+      discarded_secondary_samples,
+      static_cast<uint32_t>(discarded_secondary_samples +
+        secondary_decoded_samples_));
+
+  if (waiting_times_.size() == 0) {
+    stats->mean_waiting_time_ms = -1;
+    stats->median_waiting_time_ms = -1;
+    stats->min_waiting_time_ms = -1;
+    stats->max_waiting_time_ms = -1;
+  } else {
+    std::sort(waiting_times_.begin(), waiting_times_.end());
+    // Find mid-point elements. If the size is odd, the two values
+    // |middle_left| and |middle_right| will both be the one middle element; if
+    // the size is even, they will be the the two neighboring elements at the
+    // middle of the list.
+    const int middle_left = waiting_times_[(waiting_times_.size() - 1) / 2];
+    const int middle_right = waiting_times_[waiting_times_.size() / 2];
+    // Calculate the average of the two. (Works also for odd sizes.)
+    stats->median_waiting_time_ms = (middle_left + middle_right) / 2;
+    stats->min_waiting_time_ms = waiting_times_.front();
+    stats->max_waiting_time_ms = waiting_times_.back();
+    double sum = 0;
+    for (auto time : waiting_times_) {
+      sum += time;
+    }
+    stats->mean_waiting_time_ms = static_cast<int>(sum / waiting_times_.size());
+  }
+
+  // Reset counters.
+  ResetMcu();
+  Reset();
+}
+
+void StatisticsCalculator::PopulateDelayManagerStats(
+    int ms_per_packet,
+    const DelayManager& delay_manager,
+    NetEqNetworkStatistics* stats) {
+  RTC_DCHECK(stats);
+  stats->preferred_buffer_size_ms =
+      (delay_manager.TargetLevel() >> 8) * ms_per_packet;
+  stats->jitter_peaks_found = delay_manager.PeakFound();
+  stats->clockdrift_ppm =
+      rtc::saturated_cast<int32_t>(delay_manager.EstimatedClockDriftPpm());
+}
+
+NetEqLifetimeStatistics StatisticsCalculator::GetLifetimeStatistics() const {
+  return lifetime_stats_;
+}
+
+uint16_t StatisticsCalculator::CalculateQ14Ratio(size_t numerator,
+                                                 uint32_t denominator) {
+  if (numerator == 0) {
+    return 0;
+  } else if (numerator < denominator) {
+    // Ratio must be smaller than 1 in Q14.
+    assert((numerator << 14) / denominator < (1 << 14));
+    return static_cast<uint16_t>((numerator << 14) / denominator);
+  } else {
+    // Will not produce a ratio larger than 1, since this is probably an error.
+    return 1 << 14;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/statistics_calculator.h b/modules/audio_coding/neteq/statistics_calculator.h
new file mode 100644
index 0000000..a06ddfb
--- /dev/null
+++ b/modules/audio_coding/neteq/statistics_calculator.h
@@ -0,0 +1,202 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
+
+#include <deque>
+#include <string>
+
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class DelayManager;
+
+// This class handles various network statistics in NetEq.
+class StatisticsCalculator {
+ public:
+  StatisticsCalculator();
+
+  virtual ~StatisticsCalculator();
+
+  // Resets most of the counters.
+  void Reset();
+
+  // Resets the counters that are not handled by Reset().
+  void ResetMcu();
+
+  // Reports that |num_samples| samples were produced through expansion, and
+  // that the expansion produced other than just noise samples.
+  void ExpandedVoiceSamples(size_t num_samples, bool is_new_concealment_event);
+
+  // Reports that |num_samples| samples were produced through expansion, and
+  // that the expansion produced only noise samples.
+  void ExpandedNoiseSamples(size_t num_samples, bool is_new_concealment_event);
+
+  // Corrects the statistics for number of samples produced through non-noise
+  // expansion by adding |num_samples| (negative or positive) to the current
+  // value. The result is capped to zero to avoid negative values.
+  void ExpandedVoiceSamplesCorrection(int num_samples);
+
+  // Same as ExpandedVoiceSamplesCorrection but for noise samples.
+  void ExpandedNoiseSamplesCorrection(int num_samples);
+
+  // Reports that |num_samples| samples were produced through preemptive
+  // expansion.
+  void PreemptiveExpandedSamples(size_t num_samples);
+
+  // Reports that |num_samples| samples were removed through accelerate.
+  void AcceleratedSamples(size_t num_samples);
+
+  // Reports that |num_samples| zeros were inserted into the output.
+  void AddZeros(size_t num_samples);
+
+  // Reports that |num_packets| packets were discarded.
+  virtual void PacketsDiscarded(size_t num_packets);
+
+  // Reports that |num_packets| packets samples were discarded.
+  virtual void SecondaryPacketsDiscarded(size_t num_samples);
+
+  // Reports that |num_samples| were lost.
+  void LostSamples(size_t num_samples);
+
+  // Increases the report interval counter with |num_samples| at a sample rate
+  // of |fs_hz|. This is how the StatisticsCalculator gets notified that current
+  // time is increasing.
+  void IncreaseCounter(size_t num_samples, int fs_hz);
+
+  // Update jitter buffer delay counter.
+  void JitterBufferDelay(size_t num_samples, uint64_t waiting_time_ms);
+
+  // Stores new packet waiting time in waiting time statistics.
+  void StoreWaitingTime(int waiting_time_ms);
+
+  // Reports that |num_samples| samples were decoded from secondary packets.
+  void SecondaryDecodedSamples(int num_samples);
+
+  // Logs a delayed packet outage event of |outage_duration_ms|. A delayed
+  // packet outage event is defined as an expand period caused not by an actual
+  // packet loss, but by a delayed packet.
+  virtual void LogDelayedPacketOutageEvent(int outage_duration_ms);
+
+  // Returns the current network statistics in |stats|. The current sample rate
+  // is |fs_hz|, the total number of samples in packet buffer and sync buffer
+  // yet to play out is |num_samples_in_buffers|, and the number of samples per
+  // packet is |samples_per_packet|. The method does not populate
+  // |preferred_buffer_size_ms|, |jitter_peaks_found| or |clockdrift_ppm|; use
+  // the PopulateDelayManagerStats method for those.
+  void GetNetworkStatistics(int fs_hz,
+                            size_t num_samples_in_buffers,
+                            size_t samples_per_packet,
+                            NetEqNetworkStatistics *stats);
+
+  // Populates |preferred_buffer_size_ms|, |jitter_peaks_found| and
+  // |clockdrift_ppm| in |stats|. This is a convenience method, and does not
+  // strictly have to be in the StatisticsCalculator class, but it makes sense
+  // since all other stats fields are populated by that class.
+  static void PopulateDelayManagerStats(int ms_per_packet,
+                                        const DelayManager& delay_manager,
+                                        NetEqNetworkStatistics* stats);
+
+  // Returns a copy of this class's lifetime statistics. These statistics are
+  // never reset.
+  NetEqLifetimeStatistics GetLifetimeStatistics() const;
+
+ private:
+  static const int kMaxReportPeriod = 60;  // Seconds before auto-reset.
+  static const size_t kLenWaitingTimes = 100;
+
+  class PeriodicUmaLogger {
+   public:
+    PeriodicUmaLogger(const std::string& uma_name,
+                      int report_interval_ms,
+                      int max_value);
+    virtual ~PeriodicUmaLogger();
+    void AdvanceClock(int step_ms);
+
+   protected:
+    void LogToUma(int value) const;
+    virtual int Metric() const = 0;
+    virtual void Reset() = 0;
+
+    const std::string uma_name_;
+    const int report_interval_ms_;
+    const int max_value_;
+    int timer_ = 0;
+  };
+
+  class PeriodicUmaCount final : public PeriodicUmaLogger {
+   public:
+    PeriodicUmaCount(const std::string& uma_name,
+                     int report_interval_ms,
+                     int max_value);
+    ~PeriodicUmaCount() override;
+    void RegisterSample();
+
+   protected:
+    int Metric() const override;
+    void Reset() override;
+
+   private:
+    int counter_ = 0;
+  };
+
+  class PeriodicUmaAverage final : public PeriodicUmaLogger {
+   public:
+    PeriodicUmaAverage(const std::string& uma_name,
+                       int report_interval_ms,
+                       int max_value);
+    ~PeriodicUmaAverage() override;
+    void RegisterSample(int value);
+
+   protected:
+    int Metric() const override;
+    void Reset() override;
+
+   private:
+    double sum_ = 0.0;
+    int counter_ = 0;
+  };
+
+  // Corrects the concealed samples counter in lifetime_stats_. The value of
+  // num_samples_ is added directly to the stat if the correction is positive.
+  // If the correction is negative, it is cached and will be subtracted against
+  // future additions to the counter. This is meant to be called from
+  // Expanded{Voice,Noise}Samples{Correction}.
+  void ConcealedSamplesCorrection(int num_samples, bool is_voice);
+
+  // Calculates numerator / denominator, and returns the value in Q14.
+  static uint16_t CalculateQ14Ratio(size_t numerator, uint32_t denominator);
+
+  NetEqLifetimeStatistics lifetime_stats_;
+  size_t concealed_samples_correction_ = 0;
+  size_t voice_concealed_samples_correction_ = 0;
+  size_t preemptive_samples_;
+  size_t accelerate_samples_;
+  size_t added_zero_samples_;
+  size_t expanded_speech_samples_;
+  size_t expanded_noise_samples_;
+  size_t discarded_packets_;
+  size_t lost_timestamps_;
+  uint32_t timestamps_since_last_report_;
+  std::deque<int> waiting_times_;
+  uint32_t secondary_decoded_samples_;
+  size_t discarded_secondary_packets_;
+  PeriodicUmaCount delayed_packet_outage_counter_;
+  PeriodicUmaAverage excess_buffer_delay_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(StatisticsCalculator);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
diff --git a/modules/audio_coding/neteq/statistics_calculator_unittest.cc b/modules/audio_coding/neteq/statistics_calculator_unittest.cc
new file mode 100644
index 0000000..0a4901d
--- /dev/null
+++ b/modules/audio_coding/neteq/statistics_calculator_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/statistics_calculator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(LifetimeStatistics, TotalSamplesReceived) {
+  StatisticsCalculator stats;
+  for (int i = 0; i < 10; ++i) {
+    stats.IncreaseCounter(480, 48000);  // 10 ms at 48 kHz.
+  }
+  EXPECT_EQ(10 * 480u, stats.GetLifetimeStatistics().total_samples_received);
+}
+
+TEST(LifetimeStatistics, SamplesConcealed) {
+  StatisticsCalculator stats;
+  stats.ExpandedVoiceSamples(100, false);
+  stats.ExpandedNoiseSamples(17, false);
+  EXPECT_EQ(100u + 17u, stats.GetLifetimeStatistics().concealed_samples);
+}
+
+// This test verifies that a negative correction of concealed_samples does not
+// result in a decrease in the stats value (because stats-consuming applications
+// would not expect the value to decrease). Instead, the correction should be
+// made to future increments to the stat.
+TEST(LifetimeStatistics, SamplesConcealedCorrection) {
+  StatisticsCalculator stats;
+  stats.ExpandedVoiceSamples(100, false);
+  EXPECT_EQ(100u, stats.GetLifetimeStatistics().concealed_samples);
+  stats.ExpandedVoiceSamplesCorrection(-10);
+  // Do not subtract directly, but keep the correction for later.
+  EXPECT_EQ(100u, stats.GetLifetimeStatistics().concealed_samples);
+  stats.ExpandedVoiceSamplesCorrection(20);
+  // The total correction is 20 - 10.
+  EXPECT_EQ(110u, stats.GetLifetimeStatistics().concealed_samples);
+
+  // Also test correction done to the next ExpandedVoiceSamples call.
+  stats.ExpandedVoiceSamplesCorrection(-17);
+  EXPECT_EQ(110u, stats.GetLifetimeStatistics().concealed_samples);
+  stats.ExpandedVoiceSamples(100, false);
+  EXPECT_EQ(110u + 100u - 17u, stats.GetLifetimeStatistics().concealed_samples);
+}
+
+// This test verifies that neither "accelerate" nor "pre-emptive expand" reults
+// in a modification to concealed_samples stats. Only PLC operations (i.e.,
+// "expand" and "merge") should affect the stat.
+TEST(LifetimeStatistics, NoUpdateOnTimeStretch) {
+  StatisticsCalculator stats;
+  stats.ExpandedVoiceSamples(100, false);
+  stats.AcceleratedSamples(4711);
+  stats.PreemptiveExpandedSamples(17);
+  stats.ExpandedVoiceSamples(100, false);
+  EXPECT_EQ(200u, stats.GetLifetimeStatistics().concealed_samples);
+}
+
+TEST(StatisticsCalculator, ExpandedSamplesCorrection) {
+  StatisticsCalculator stats;
+  NetEqNetworkStatistics stats_output;
+  constexpr int kSampleRateHz = 48000;
+  constexpr int k10MsSamples = kSampleRateHz / 100;
+  constexpr int kPacketSizeMs = 20;
+  constexpr size_t kSamplesPerPacket = kPacketSizeMs * kSampleRateHz / 1000;
+  // Assume 2 packets in the buffer.
+  constexpr size_t kNumSamplesInBuffer = 2 * kSamplesPerPacket;
+
+  // Advance time by 10 ms.
+  stats.IncreaseCounter(k10MsSamples, kSampleRateHz);
+
+  stats.GetNetworkStatistics(kSampleRateHz, kNumSamplesInBuffer,
+                             kSamplesPerPacket, &stats_output);
+
+  EXPECT_EQ(0u, stats_output.expand_rate);
+  EXPECT_EQ(0u, stats_output.speech_expand_rate);
+
+  // Correct with a negative value.
+  stats.ExpandedVoiceSamplesCorrection(-100);
+  stats.ExpandedNoiseSamplesCorrection(-100);
+  stats.IncreaseCounter(k10MsSamples, kSampleRateHz);
+  stats.GetNetworkStatistics(kSampleRateHz, kNumSamplesInBuffer,
+                             kSamplesPerPacket, &stats_output);
+  // Expect no change, since negative values are disallowed.
+  EXPECT_EQ(0u, stats_output.expand_rate);
+  EXPECT_EQ(0u, stats_output.speech_expand_rate);
+
+  // Correct with a positive value.
+  stats.ExpandedVoiceSamplesCorrection(50);
+  stats.ExpandedNoiseSamplesCorrection(200);
+  stats.IncreaseCounter(k10MsSamples, kSampleRateHz);
+  stats.GetNetworkStatistics(kSampleRateHz, kNumSamplesInBuffer,
+                             kSamplesPerPacket, &stats_output);
+  // Calculate expected rates in Q14. Expand rate is noise + voice, while
+  // speech expand rate is only voice.
+  EXPECT_EQ(((50u + 200u) << 14) / k10MsSamples, stats_output.expand_rate);
+  EXPECT_EQ((50u << 14) / k10MsSamples, stats_output.speech_expand_rate);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/sync_buffer.cc b/modules/audio_coding/neteq/sync_buffer.cc
new file mode 100644
index 0000000..28d7649
--- /dev/null
+++ b/modules/audio_coding/neteq/sync_buffer.cc
@@ -0,0 +1,108 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>  // Access to min.
+
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+size_t SyncBuffer::FutureLength() const {
+  return Size() - next_index_;
+}
+
+void SyncBuffer::PushBack(const AudioMultiVector& append_this) {
+  size_t samples_added = append_this.Size();
+  AudioMultiVector::PushBack(append_this);
+  AudioMultiVector::PopFront(samples_added);
+  if (samples_added <= next_index_) {
+    next_index_ -= samples_added;
+  } else {
+    // This means that we are pushing out future data that was never used.
+//    assert(false);
+    // TODO(hlundin): This assert must be disabled to support 60 ms frames.
+    // This should not happen even for 60 ms frames, but it does. Investigate
+    // why.
+    next_index_ = 0;
+  }
+  dtmf_index_ -= std::min(dtmf_index_, samples_added);
+}
+
+void SyncBuffer::PushFrontZeros(size_t length) {
+  InsertZerosAtIndex(length, 0);
+}
+
+void SyncBuffer::InsertZerosAtIndex(size_t length, size_t position) {
+  position = std::min(position, Size());
+  length = std::min(length, Size() - position);
+  AudioMultiVector::PopBack(length);
+  for (size_t channel = 0; channel < Channels(); ++channel) {
+    channels_[channel]->InsertZerosAt(length, position);
+  }
+  if (next_index_ >= position) {
+    // We are moving the |next_index_| sample.
+    set_next_index(next_index_ + length);  // Overflow handled by subfunction.
+  }
+  if (dtmf_index_ > 0 && dtmf_index_ >= position) {
+    // We are moving the |dtmf_index_| sample.
+    set_dtmf_index(dtmf_index_ + length);  // Overflow handled by subfunction.
+  }
+}
+
+void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
+                                size_t length,
+                                size_t position) {
+  position = std::min(position, Size());  // Cap |position| in the valid range.
+  length = std::min(length, Size() - position);
+  AudioMultiVector::OverwriteAt(insert_this, length, position);
+}
+
+void SyncBuffer::ReplaceAtIndex(const AudioMultiVector& insert_this,
+                                size_t position) {
+  ReplaceAtIndex(insert_this, insert_this.Size(), position);
+}
+
+void SyncBuffer::GetNextAudioInterleaved(size_t requested_len,
+                                         AudioFrame* output) {
+  RTC_DCHECK(output);
+  const size_t samples_to_read = std::min(FutureLength(), requested_len);
+  output->ResetWithoutMuting();
+  const size_t tot_samples_read =
+      ReadInterleavedFromIndex(next_index_, samples_to_read,
+                               output->mutable_data());
+  const size_t samples_read_per_channel = tot_samples_read / Channels();
+  next_index_ += samples_read_per_channel;
+  output->num_channels_ = Channels();
+  output->samples_per_channel_ = samples_read_per_channel;
+}
+
+void SyncBuffer::IncreaseEndTimestamp(uint32_t increment) {
+  end_timestamp_ += increment;
+}
+
+void SyncBuffer::Flush() {
+  Zeros(Size());
+  next_index_ = Size();
+  end_timestamp_ = 0;
+  dtmf_index_ = 0;
+}
+
+void SyncBuffer::set_next_index(size_t value) {
+  // Cannot set |next_index_| larger than the size of the buffer.
+  next_index_ = std::min(value, Size());
+}
+
+void SyncBuffer::set_dtmf_index(size_t value) {
+  // Cannot set |dtmf_index_| larger than the size of the buffer.
+  dtmf_index_ = std::min(value, Size());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/sync_buffer.h b/modules/audio_coding/neteq/sync_buffer.h
new file mode 100644
index 0000000..ab9ff52
--- /dev/null
+++ b/modules/audio_coding/neteq/sync_buffer.h
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_SYNC_BUFFER_H_
+#define MODULES_AUDIO_CODING_NETEQ_SYNC_BUFFER_H_
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class SyncBuffer : public AudioMultiVector {
+ public:
+  SyncBuffer(size_t channels, size_t length)
+      : AudioMultiVector(channels, length),
+        next_index_(length),
+        end_timestamp_(0),
+        dtmf_index_(0) {}
+
+  // Returns the number of samples yet to play out from the buffer.
+  size_t FutureLength() const;
+
+  // Adds the contents of |append_this| to the back of the SyncBuffer. Removes
+  // the same number of samples from the beginning of the SyncBuffer, to
+  // maintain a constant buffer size. The |next_index_| is updated to reflect
+  // the move of the beginning of "future" data.
+  void PushBack(const AudioMultiVector& append_this) override;
+
+  // Adds |length| zeros to the beginning of each channel. Removes
+  // the same number of samples from the end of the SyncBuffer, to
+  // maintain a constant buffer size. The |next_index_| is updated to reflect
+  // the move of the beginning of "future" data.
+  // Note that this operation may delete future samples that are waiting to
+  // be played.
+  void PushFrontZeros(size_t length);
+
+  // Inserts |length| zeros into each channel at index |position|. The size of
+  // the SyncBuffer is kept constant, which means that the last |length|
+  // elements in each channel will be purged.
+  virtual void InsertZerosAtIndex(size_t length, size_t position);
+
+  // Overwrites each channel in this SyncBuffer with values taken from
+  // |insert_this|. The values are taken from the beginning of |insert_this| and
+  // are inserted starting at |position|. |length| values are written into each
+  // channel. The size of the SyncBuffer is kept constant. That is, if |length|
+  // and |position| are selected such that the new data would extend beyond the
+  // end of the current SyncBuffer, the buffer is not extended.
+  // The |next_index_| is not updated.
+  virtual void ReplaceAtIndex(const AudioMultiVector& insert_this,
+                              size_t length,
+                              size_t position);
+
+  // Same as the above method, but where all of |insert_this| is written (with
+  // the same constraints as above, that the SyncBuffer is not extended).
+  virtual void ReplaceAtIndex(const AudioMultiVector& insert_this,
+                              size_t position);
+
+  // Reads |requested_len| samples from each channel and writes them interleaved
+  // into |output|. The |next_index_| is updated to point to the sample to read
+  // next time. The AudioFrame |output| is first reset, and the |data_|,
+  // |num_channels_|, and |samples_per_channel_| fields are updated.
+  void GetNextAudioInterleaved(size_t requested_len, AudioFrame* output);
+
+  // Adds |increment| to |end_timestamp_|.
+  void IncreaseEndTimestamp(uint32_t increment);
+
+  // Flushes the buffer. The buffer will contain only zeros after the flush, and
+  // |next_index_| will point to the end, like when the buffer was first
+  // created.
+  void Flush();
+
+  const AudioVector& Channel(size_t n) const { return *channels_[n]; }
+  AudioVector& Channel(size_t n) { return *channels_[n]; }
+
+  // Accessors and mutators.
+  size_t next_index() const { return next_index_; }
+  void set_next_index(size_t value);
+  uint32_t end_timestamp() const { return end_timestamp_; }
+  void set_end_timestamp(uint32_t value) { end_timestamp_ = value; }
+  size_t dtmf_index() const { return dtmf_index_; }
+  void set_dtmf_index(size_t value);
+
+ private:
+  size_t next_index_;
+  uint32_t end_timestamp_;  // The timestamp of the last sample in the buffer.
+  size_t dtmf_index_;  // Index to the first non-DTMF sample in the buffer.
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SyncBuffer);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_SYNC_BUFFER_H_
diff --git a/modules/audio_coding/neteq/sync_buffer_unittest.cc b/modules/audio_coding/neteq/sync_buffer_unittest.cc
new file mode 100644
index 0000000..29c3bca
--- /dev/null
+++ b/modules/audio_coding/neteq/sync_buffer_unittest.cc
@@ -0,0 +1,174 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/sync_buffer.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(SyncBuffer, CreateAndDestroy) {
+  // Create a SyncBuffer with two channels and 10 samples each.
+  static const size_t kLen = 10;
+  static const size_t kChannels = 2;
+  SyncBuffer sync_buffer(kChannels, kLen);
+  EXPECT_EQ(kChannels, sync_buffer.Channels());
+  EXPECT_EQ(kLen, sync_buffer.Size());
+  // When the buffer is empty, the next index to play out is at the end.
+  EXPECT_EQ(kLen, sync_buffer.next_index());
+  // Verify that all elements are zero.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kLen; ++i) {
+      EXPECT_EQ(0, sync_buffer[channel][i]);
+    }
+  }
+}
+
+TEST(SyncBuffer, SetNextIndex) {
+  // Create a SyncBuffer with two channels and 100 samples each.
+  static const size_t kLen = 100;
+  static const size_t kChannels = 2;
+  SyncBuffer sync_buffer(kChannels, kLen);
+  sync_buffer.set_next_index(0);
+  EXPECT_EQ(0u, sync_buffer.next_index());
+  sync_buffer.set_next_index(kLen / 2);
+  EXPECT_EQ(kLen / 2, sync_buffer.next_index());
+  sync_buffer.set_next_index(kLen);
+  EXPECT_EQ(kLen, sync_buffer.next_index());
+  // Try to set larger than the buffer size; should cap at buffer size.
+  sync_buffer.set_next_index(kLen + 1);
+  EXPECT_EQ(kLen, sync_buffer.next_index());
+}
+
+TEST(SyncBuffer, PushBackAndFlush) {
+  // Create a SyncBuffer with two channels and 100 samples each.
+  static const size_t kLen = 100;
+  static const size_t kChannels = 2;
+  SyncBuffer sync_buffer(kChannels, kLen);
+  static const size_t kNewLen = 10;
+  AudioMultiVector new_data(kChannels, kNewLen);
+  // Populate |new_data|.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kNewLen; ++i) {
+      new_data[channel][i] = rtc::checked_cast<int16_t>(i);
+    }
+  }
+  // Push back |new_data| into |sync_buffer|. This operation should pop out
+  // data from the front of |sync_buffer|, so that the size of the buffer
+  // remains the same. The |next_index_| should also move with the same length.
+  sync_buffer.PushBack(new_data);
+  ASSERT_EQ(kLen, sync_buffer.Size());
+  // Verify that |next_index_| moved accordingly.
+  EXPECT_EQ(kLen - kNewLen, sync_buffer.next_index());
+  // Verify the new contents.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kNewLen; ++i) {
+      EXPECT_EQ(new_data[channel][i],
+                sync_buffer[channel][sync_buffer.next_index() + i]);
+    }
+  }
+
+  // Now flush the buffer, and verify that it is all zeros, and that next_index
+  // points to the end.
+  sync_buffer.Flush();
+  ASSERT_EQ(kLen, sync_buffer.Size());
+  EXPECT_EQ(kLen, sync_buffer.next_index());
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kLen; ++i) {
+      EXPECT_EQ(0, sync_buffer[channel][i]);
+    }
+  }
+}
+
+TEST(SyncBuffer, PushFrontZeros) {
+  // Create a SyncBuffer with two channels and 100 samples each.
+  static const size_t kLen = 100;
+  static const size_t kChannels = 2;
+  SyncBuffer sync_buffer(kChannels, kLen);
+  static const size_t kNewLen = 10;
+  AudioMultiVector new_data(kChannels, kNewLen);
+  // Populate |new_data|.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kNewLen; ++i) {
+      new_data[channel][i] = rtc::checked_cast<int16_t>(1000 + i);
+    }
+  }
+  sync_buffer.PushBack(new_data);
+  EXPECT_EQ(kLen, sync_buffer.Size());
+
+  // Push |kNewLen| - 1 zeros into each channel in the front of the SyncBuffer.
+  sync_buffer.PushFrontZeros(kNewLen - 1);
+  EXPECT_EQ(kLen, sync_buffer.Size());  // Size should remain the same.
+  // Verify that |next_index_| moved accordingly. Should be at the end - 1.
+  EXPECT_EQ(kLen - 1, sync_buffer.next_index());
+  // Verify the zeros.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kNewLen - 1; ++i) {
+      EXPECT_EQ(0, sync_buffer[channel][i]);
+    }
+  }
+  // Verify that the correct data is at the end of the SyncBuffer.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    EXPECT_EQ(1000, sync_buffer[channel][sync_buffer.next_index()]);
+  }
+}
+
+TEST(SyncBuffer, GetNextAudioInterleaved) {
+  // Create a SyncBuffer with two channels and 100 samples each.
+  static const size_t kLen = 100;
+  static const size_t kChannels = 2;
+  SyncBuffer sync_buffer(kChannels, kLen);
+  static const size_t kNewLen = 10;
+  AudioMultiVector new_data(kChannels, kNewLen);
+  // Populate |new_data|.
+  for (size_t channel = 0; channel < kChannels; ++channel) {
+    for (size_t i = 0; i < kNewLen; ++i) {
+      new_data[channel][i] = rtc::checked_cast<int16_t>(i);
+    }
+  }
+  // Push back |new_data| into |sync_buffer|. This operation should pop out
+  // data from the front of |sync_buffer|, so that the size of the buffer
+  // remains the same. The |next_index_| should also move with the same length.
+  sync_buffer.PushBack(new_data);
+
+  // Read to interleaved output. Read in two batches, where each read operation
+  // should automatically update the |net_index_| in the SyncBuffer.
+  // Note that |samples_read| is the number of samples read from each channel.
+  // That is, the number of samples written to |output| is
+  // |samples_read| * |kChannels|.
+  AudioFrame output1;
+  sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output1);
+  EXPECT_EQ(kChannels, output1.num_channels_);
+  EXPECT_EQ(kNewLen / 2, output1.samples_per_channel_);
+
+  AudioFrame output2;
+  sync_buffer.GetNextAudioInterleaved(kNewLen / 2, &output2);
+  EXPECT_EQ(kChannels, output2.num_channels_);
+  EXPECT_EQ(kNewLen / 2, output2.samples_per_channel_);
+
+  // Verify the data.
+  const int16_t* output_ptr = output1.data();
+  for (size_t i = 0; i < kNewLen / 2; ++i) {
+    for (size_t channel = 0; channel < kChannels; ++channel) {
+      EXPECT_EQ(new_data[channel][i], *output_ptr);
+      ++output_ptr;
+    }
+  }
+  output_ptr = output2.data();
+  for (size_t i = kNewLen / 2; i < kNewLen; ++i) {
+    for (size_t channel = 0; channel < kChannels; ++channel) {
+      EXPECT_EQ(new_data[channel][i], *output_ptr);
+      ++output_ptr;
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tick_timer.cc b/modules/audio_coding/neteq/tick_timer.cc
new file mode 100644
index 0000000..17f83b1
--- /dev/null
+++ b/modules/audio_coding/neteq/tick_timer.cc
@@ -0,0 +1,25 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tick_timer.h"
+
+namespace webrtc {
+
+TickTimer::Stopwatch::Stopwatch(const TickTimer& ticktimer)
+    : ticktimer_(ticktimer), starttick_(ticktimer.ticks()) {}
+
+TickTimer::Countdown::Countdown(const TickTimer& ticktimer,
+                                uint64_t ticks_to_count)
+    : stopwatch_(ticktimer.GetNewStopwatch()),
+      ticks_to_count_(ticks_to_count) {}
+
+TickTimer::Countdown::~Countdown() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tick_timer.h b/modules/audio_coding/neteq/tick_timer.h
new file mode 100644
index 0000000..4a9ade5
--- /dev/null
+++ b/modules/audio_coding/neteq/tick_timer.h
@@ -0,0 +1,110 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TICK_TIMER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TICK_TIMER_H_
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Implements a time counter. The counter is advanced with the Increment()
+// methods, and is queried with the ticks() accessor. It is assumed that one
+// "tick" och the counter corresponds to 10 ms.
+// A TickTimer object can provide two types of associated time-measuring
+// objects: Stopwatch and Countdown.
+class TickTimer {
+ public:
+  // Stopwatch measures time elapsed since it was started, by querying the
+  // associated TickTimer for the current time. The intended use is to request a
+  // new Stopwatch object from a TickTimer object with the GetNewStopwatch()
+  // method. Note: since the Stopwatch object contains a reference to the
+  // TickTimer it is associated with, it cannot outlive the TickTimer.
+  class Stopwatch {
+   public:
+    explicit Stopwatch(const TickTimer& ticktimer);
+
+    uint64_t ElapsedTicks() const { return ticktimer_.ticks() - starttick_; }
+
+    uint64_t ElapsedMs() const {
+      const uint64_t elapsed_ticks = ticktimer_.ticks() - starttick_;
+      const int ms_per_tick = ticktimer_.ms_per_tick();
+      return elapsed_ticks < UINT64_MAX / ms_per_tick
+                 ? elapsed_ticks * ms_per_tick
+                 : UINT64_MAX;
+    }
+
+   private:
+    const TickTimer& ticktimer_;
+    const uint64_t starttick_;
+  };
+
+  // Countdown counts down from a given start value with each tick of the
+  // associated TickTimer, until zero is reached. The Finished() method will
+  // return true if zero has been reached, false otherwise. The intended use is
+  // to request a new Countdown object from a TickTimer object with the
+  // GetNewCountdown() method. Note: since the Countdown object contains a
+  // reference to the TickTimer it is associated with, it cannot outlive the
+  // TickTimer.
+  class Countdown {
+   public:
+    Countdown(const TickTimer& ticktimer, uint64_t ticks_to_count);
+
+    ~Countdown();
+
+    bool Finished() const {
+      return stopwatch_->ElapsedTicks() >= ticks_to_count_;
+    }
+
+   private:
+    const std::unique_ptr<Stopwatch> stopwatch_;
+    const uint64_t ticks_to_count_;
+  };
+
+  TickTimer() : TickTimer(10) {}
+  explicit TickTimer(int ms_per_tick) : ms_per_tick_(ms_per_tick) {
+    RTC_DCHECK_GT(ms_per_tick_, 0);
+  }
+
+  void Increment() { ++ticks_; }
+
+  // Mainly intended for testing.
+  void Increment(uint64_t x) { ticks_ += x; }
+
+  uint64_t ticks() const { return ticks_; }
+
+  int ms_per_tick() const { return ms_per_tick_; }
+
+  // Returns a new Stopwatch object, based on the current TickTimer. Note that
+  // the new Stopwatch object contains a reference to the current TickTimer,
+  // and must therefore not outlive the TickTimer.
+  std::unique_ptr<Stopwatch> GetNewStopwatch() const {
+    return std::unique_ptr<Stopwatch>(new Stopwatch(*this));
+  }
+
+  // Returns a new Countdown object, based on the current TickTimer. Note that
+  // the new Countdown object contains a reference to the current TickTimer,
+  // and must therefore not outlive the TickTimer.
+  std::unique_ptr<Countdown> GetNewCountdown(uint64_t ticks_to_count) const {
+    return std::unique_ptr<Countdown>(new Countdown(*this, ticks_to_count));
+  }
+
+ private:
+  uint64_t ticks_ = 0;
+  const int ms_per_tick_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(TickTimer);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TICK_TIMER_H_
diff --git a/modules/audio_coding/neteq/tick_timer_unittest.cc b/modules/audio_coding/neteq/tick_timer_unittest.cc
new file mode 100644
index 0000000..875f04d
--- /dev/null
+++ b/modules/audio_coding/neteq/tick_timer_unittest.cc
@@ -0,0 +1,135 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_coding/neteq/tick_timer.h"
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify that the default value for ms_per_tick is 10.
+TEST(TickTimer, DefaultMsPerTick) {
+  TickTimer tt;
+  EXPECT_EQ(10, tt.ms_per_tick());
+}
+
+TEST(TickTimer, CustomMsPerTick) {
+  TickTimer tt(17);
+  EXPECT_EQ(17, tt.ms_per_tick());
+}
+
+TEST(TickTimer, Increment) {
+  TickTimer tt;
+  EXPECT_EQ(0u, tt.ticks());
+  tt.Increment();
+  EXPECT_EQ(1u, tt.ticks());
+
+  for (int i = 0; i < 17; ++i) {
+    tt.Increment();
+  }
+  EXPECT_EQ(18u, tt.ticks());
+
+  tt.Increment(17);
+  EXPECT_EQ(35u, tt.ticks());
+}
+
+TEST(TickTimer, WrapAround) {
+  TickTimer tt;
+  tt.Increment(UINT64_MAX);
+  EXPECT_EQ(UINT64_MAX, tt.ticks());
+  tt.Increment();
+  EXPECT_EQ(0u, tt.ticks());
+}
+
+TEST(TickTimer, Stopwatch) {
+  TickTimer tt;
+  // Increment it a "random" number of steps.
+  tt.Increment(17);
+
+  std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+  ASSERT_TRUE(sw);
+
+  EXPECT_EQ(0u, sw->ElapsedTicks());  // Starts at zero.
+  EXPECT_EQ(0u, sw->ElapsedMs());
+  tt.Increment();
+  EXPECT_EQ(1u, sw->ElapsedTicks());  // Increases with the TickTimer.
+  EXPECT_EQ(10u, sw->ElapsedMs());
+}
+
+TEST(TickTimer, StopwatchWrapAround) {
+  TickTimer tt;
+  tt.Increment(UINT64_MAX);
+
+  std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+  ASSERT_TRUE(sw);
+
+  tt.Increment();
+  EXPECT_EQ(0u, tt.ticks());
+  EXPECT_EQ(1u, sw->ElapsedTicks());
+  EXPECT_EQ(10u, sw->ElapsedMs());
+
+  tt.Increment();
+  EXPECT_EQ(1u, tt.ticks());
+  EXPECT_EQ(2u, sw->ElapsedTicks());
+  EXPECT_EQ(20u, sw->ElapsedMs());
+}
+
+TEST(TickTimer, StopwatchMsOverflow) {
+  TickTimer tt;
+  std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+  ASSERT_TRUE(sw);
+
+  tt.Increment(UINT64_MAX / 10);
+  EXPECT_EQ(UINT64_MAX, sw->ElapsedMs());
+
+  tt.Increment();
+  EXPECT_EQ(UINT64_MAX, sw->ElapsedMs());
+
+  tt.Increment(UINT64_MAX - tt.ticks());
+  EXPECT_EQ(UINT64_MAX, tt.ticks());
+  EXPECT_EQ(UINT64_MAX, sw->ElapsedMs());
+}
+
+TEST(TickTimer, StopwatchWithCustomTicktime) {
+  const int kMsPerTick = 17;
+  TickTimer tt(kMsPerTick);
+  std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+  ASSERT_TRUE(sw);
+
+  EXPECT_EQ(0u, sw->ElapsedMs());
+  tt.Increment();
+  EXPECT_EQ(static_cast<uint64_t>(kMsPerTick), sw->ElapsedMs());
+}
+
+TEST(TickTimer, Countdown) {
+  TickTimer tt;
+  // Increment it a "random" number of steps.
+  tt.Increment(4711);
+
+  std::unique_ptr<TickTimer::Countdown> cd = tt.GetNewCountdown(17);
+  ASSERT_TRUE(cd);
+
+  EXPECT_FALSE(cd->Finished());
+  tt.Increment();
+  EXPECT_FALSE(cd->Finished());
+
+  tt.Increment(16);  // Total increment is now 17.
+  EXPECT_TRUE(cd->Finished());
+
+  // Further increments do not change the state.
+  tt.Increment();
+  EXPECT_TRUE(cd->Finished());
+  tt.Increment(1234);
+  EXPECT_TRUE(cd->Finished());
+}
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/time_stretch.cc b/modules/audio_coding/neteq/time_stretch.cc
new file mode 100644
index 0000000..8a1bfa2
--- /dev/null
+++ b/modules/audio_coding/neteq/time_stretch.cc
@@ -0,0 +1,214 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/time_stretch.h"
+
+#include <algorithm>  // min, max
+#include <memory>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/cross_correlation.h"
+#include "modules/audio_coding/neteq/dsp_helper.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
+                                              size_t input_len,
+                                              bool fast_mode,
+                                              AudioMultiVector* output,
+                                              size_t* length_change_samples) {
+  // Pre-calculate common multiplication with |fs_mult_|.
+  size_t fs_mult_120 =
+      static_cast<size_t>(fs_mult_ * 120);  // Corresponds to 15 ms.
+
+  const int16_t* signal;
+  std::unique_ptr<int16_t[]> signal_array;
+  size_t signal_len;
+  if (num_channels_ == 1) {
+    signal = input;
+    signal_len = input_len;
+  } else {
+    // We want |signal| to be only the first channel of |input|, which is
+    // interleaved. Thus, we take the first sample, skip forward |num_channels|
+    // samples, and continue like that.
+    signal_len = input_len / num_channels_;
+    signal_array.reset(new int16_t[signal_len]);
+    signal = signal_array.get();
+    size_t j = master_channel_;
+    for (size_t i = 0; i < signal_len; ++i) {
+      signal_array[i] = input[j];
+      j += num_channels_;
+    }
+  }
+
+  // Find maximum absolute value of input signal.
+  max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len);
+
+  // Downsample to 4 kHz sample rate and calculate auto-correlation.
+  DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen,
+                              sample_rate_hz_, true /* compensate delay*/,
+                              downsampled_input_);
+  AutoCorrelation();
+
+  // Find the strongest correlation peak.
+  static const size_t kNumPeaks = 1;
+  size_t peak_index;
+  int16_t peak_value;
+  DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks,
+                           fs_mult_, &peak_index, &peak_value);
+  // Assert that |peak_index| stays within boundaries.
+  assert(peak_index <= (2 * kCorrelationLen - 1) * fs_mult_);
+
+  // Compensate peak_index for displaced starting position. The displacement
+  // happens in AutoCorrelation(). Here, |kMinLag| is in the down-sampled 4 kHz
+  // domain, while the |peak_index| is in the original sample rate; hence, the
+  // multiplication by fs_mult_ * 2.
+  peak_index += kMinLag * fs_mult_ * 2;
+  // Assert that |peak_index| stays within boundaries.
+  assert(peak_index >= static_cast<size_t>(20 * fs_mult_));
+  assert(peak_index <= 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_);
+
+  // Calculate scaling to ensure that |peak_index| samples can be square-summed
+  // without overflowing.
+  int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
+      WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
+  scaling = std::max(0, scaling);
+
+  // |vec1| starts at 15 ms minus one pitch period.
+  const int16_t* vec1 = &signal[fs_mult_120 - peak_index];
+  // |vec2| start at 15 ms.
+  const int16_t* vec2 = &signal[fs_mult_120];
+  // Calculate energies for |vec1| and |vec2|, assuming they both contain
+  // |peak_index| samples.
+  int32_t vec1_energy =
+      WebRtcSpl_DotProductWithScale(vec1, vec1, peak_index, scaling);
+  int32_t vec2_energy =
+      WebRtcSpl_DotProductWithScale(vec2, vec2, peak_index, scaling);
+
+  // Calculate cross-correlation between |vec1| and |vec2|.
+  int32_t cross_corr =
+      WebRtcSpl_DotProductWithScale(vec1, vec2, peak_index, scaling);
+
+  // Check if the signal seems to be active speech or not (simple VAD).
+  bool active_speech = SpeechDetection(vec1_energy, vec2_energy, peak_index,
+                                       scaling);
+
+  int16_t best_correlation;
+  if (!active_speech) {
+    SetParametersForPassiveSpeech(signal_len, &best_correlation, &peak_index);
+  } else {
+    // Calculate correlation:
+    // cross_corr / sqrt(vec1_energy * vec2_energy).
+
+    // Start with calculating scale values.
+    int energy1_scale = std::max(0, 16 - WebRtcSpl_NormW32(vec1_energy));
+    int energy2_scale = std::max(0, 16 - WebRtcSpl_NormW32(vec2_energy));
+
+    // Make sure total scaling is even (to simplify scale factor after sqrt).
+    if ((energy1_scale + energy2_scale) & 1) {
+      // The sum is odd.
+      energy1_scale += 1;
+    }
+
+    // Scale energies to int16_t.
+    int16_t vec1_energy_int16 =
+        static_cast<int16_t>(vec1_energy >> energy1_scale);
+    int16_t vec2_energy_int16 =
+        static_cast<int16_t>(vec2_energy >> energy2_scale);
+
+    // Calculate square-root of energy product.
+    int16_t sqrt_energy_prod = WebRtcSpl_SqrtFloor(vec1_energy_int16 *
+                                                   vec2_energy_int16);
+
+    // Calculate cross_corr / sqrt(en1*en2) in Q14.
+    int temp_scale = 14 - (energy1_scale + energy2_scale) / 2;
+    cross_corr = WEBRTC_SPL_SHIFT_W32(cross_corr, temp_scale);
+    cross_corr = std::max(0, cross_corr);  // Don't use if negative.
+    best_correlation = WebRtcSpl_DivW32W16(cross_corr, sqrt_energy_prod);
+    // Make sure |best_correlation| is no larger than 1 in Q14.
+    best_correlation = std::min(static_cast<int16_t>(16384), best_correlation);
+  }
+
+
+  // Check accelerate criteria and stretch the signal.
+  ReturnCodes return_value =
+      CheckCriteriaAndStretch(input, input_len, peak_index, best_correlation,
+                              active_speech, fast_mode, output);
+  switch (return_value) {
+    case kSuccess:
+      *length_change_samples = peak_index;
+      break;
+    case kSuccessLowEnergy:
+      *length_change_samples = peak_index;
+      break;
+    case kNoStretch:
+    case kError:
+      *length_change_samples = 0;
+      break;
+  }
+  return return_value;
+}
+
+void TimeStretch::AutoCorrelation() {
+  // Calculate correlation from lag kMinLag to lag kMaxLag in 4 kHz domain.
+  int32_t auto_corr[kCorrelationLen];
+  CrossCorrelationWithAutoShift(
+      &downsampled_input_[kMaxLag], &downsampled_input_[kMaxLag - kMinLag],
+      kCorrelationLen, kMaxLag - kMinLag, -1, auto_corr);
+
+  // Normalize correlation to 14 bits and write to |auto_correlation_|.
+  int32_t max_corr = WebRtcSpl_MaxAbsValueW32(auto_corr, kCorrelationLen);
+  int scaling = std::max(0, 17 - WebRtcSpl_NormW32(max_corr));
+  WebRtcSpl_VectorBitShiftW32ToW16(auto_correlation_, kCorrelationLen,
+                                   auto_corr, scaling);
+}
+
+bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
+                                  size_t peak_index, int scaling) const {
+  // Check if the signal seems to be active speech or not (simple VAD).
+  // If (vec1_energy + vec2_energy) / (2 * peak_index) <=
+  // 8 * background_noise_energy, then we say that the signal contains no
+  // active speech.
+  // Rewrite the inequality as:
+  // (vec1_energy + vec2_energy) / 16 <= peak_index * background_noise_energy.
+  // The two sides of the inequality will be denoted |left_side| and
+  // |right_side|.
+  int32_t left_side = rtc::saturated_cast<int32_t>(
+      (static_cast<int64_t>(vec1_energy) + vec2_energy) / 16);
+  int32_t right_side;
+  if (background_noise_.initialized()) {
+    right_side = background_noise_.Energy(master_channel_);
+  } else {
+    // If noise parameters have not been estimated, use a fixed threshold.
+    right_side = 75000;
+  }
+  int right_scale = 16 - WebRtcSpl_NormW32(right_side);
+  right_scale = std::max(0, right_scale);
+  left_side = left_side >> right_scale;
+  right_side =
+      rtc::dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
+
+  // Scale |left_side| properly before comparing with |right_side|.
+  // (|scaling| is the scale factor before energy calculation, thus the scale
+  // factor for the energy is 2 * scaling.)
+  if (WebRtcSpl_NormW32(left_side) < 2 * scaling) {
+    // Cannot scale only |left_side|, must scale |right_side| too.
+    int temp_scale = WebRtcSpl_NormW32(left_side);
+    left_side = left_side << temp_scale;
+    right_side = right_side >> (2 * scaling - temp_scale);
+  } else {
+    left_side = left_side << 2 * scaling;
+  }
+  return left_side > right_side;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/time_stretch.h b/modules/audio_coding/neteq/time_stretch.h
new file mode 100644
index 0000000..ace10cd
--- /dev/null
+++ b/modules/audio_coding/neteq/time_stretch.h
@@ -0,0 +1,116 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_
+#define MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_
+
+#include <assert.h>
+#include <string.h>  // memset, size_t
+
+#include "modules/audio_coding/neteq/audio_multi_vector.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declarations.
+class BackgroundNoise;
+
+// This is the base class for Accelerate and PreemptiveExpand. This class
+// cannot be instantiated, but must be used through either of the derived
+// classes.
+class TimeStretch {
+ public:
+  enum ReturnCodes {
+    kSuccess = 0,
+    kSuccessLowEnergy = 1,
+    kNoStretch = 2,
+    kError = -1
+  };
+
+  TimeStretch(int sample_rate_hz, size_t num_channels,
+              const BackgroundNoise& background_noise)
+      : sample_rate_hz_(sample_rate_hz),
+        fs_mult_(sample_rate_hz / 8000),
+        num_channels_(num_channels),
+        master_channel_(0),  // First channel is master.
+        background_noise_(background_noise),
+        max_input_value_(0) {
+    assert(sample_rate_hz_ == 8000 ||
+           sample_rate_hz_ == 16000 ||
+           sample_rate_hz_ == 32000 ||
+           sample_rate_hz_ == 48000);
+    assert(num_channels_ > 0);
+    assert(master_channel_ < num_channels_);
+    memset(auto_correlation_, 0, sizeof(auto_correlation_));
+  }
+
+  virtual ~TimeStretch() {}
+
+  // This method performs the processing common to both Accelerate and
+  // PreemptiveExpand.
+  ReturnCodes Process(const int16_t* input,
+                      size_t input_len,
+                      bool fast_mode,
+                      AudioMultiVector* output,
+                      size_t* length_change_samples);
+
+ protected:
+  // Sets the parameters |best_correlation| and |peak_index| to suitable
+  // values when the signal contains no active speech. This method must be
+  // implemented by the sub-classes.
+  virtual void SetParametersForPassiveSpeech(size_t input_length,
+                                             int16_t* best_correlation,
+                                             size_t* peak_index) const = 0;
+
+  // Checks the criteria for performing the time-stretching operation and,
+  // if possible, performs the time-stretching. This method must be implemented
+  // by the sub-classes.
+  virtual ReturnCodes CheckCriteriaAndStretch(
+      const int16_t* input,
+      size_t input_length,
+      size_t peak_index,
+      int16_t best_correlation,
+      bool active_speech,
+      bool fast_mode,
+      AudioMultiVector* output) const = 0;
+
+  static const size_t kCorrelationLen = 50;
+  static const size_t kLogCorrelationLen = 6;  // >= log2(kCorrelationLen).
+  static const size_t kMinLag = 10;
+  static const size_t kMaxLag = 60;
+  static const size_t kDownsampledLen = kCorrelationLen + kMaxLag;
+  static const int kCorrelationThreshold = 14746;  // 0.9 in Q14.
+
+  const int sample_rate_hz_;
+  const int fs_mult_;  // Sample rate multiplier = sample_rate_hz_ / 8000.
+  const size_t num_channels_;
+  const size_t master_channel_;
+  const BackgroundNoise& background_noise_;
+  int16_t max_input_value_;
+  int16_t downsampled_input_[kDownsampledLen];
+  // Adding 1 to the size of |auto_correlation_| because of how it is used
+  // by the peak-detection algorithm.
+  int16_t auto_correlation_[kCorrelationLen + 1];
+
+ private:
+  // Calculates the auto-correlation of |downsampled_input_| and writes the
+  // result to |auto_correlation_|.
+  void AutoCorrelation();
+
+  // Performs a simple voice-activity detection based on the input parameters.
+  bool SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
+                       size_t peak_index, int scaling) const;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(TimeStretch);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TIME_STRETCH_H_
diff --git a/modules/audio_coding/neteq/time_stretch_unittest.cc b/modules/audio_coding/neteq/time_stretch_unittest.cc
new file mode 100644
index 0000000..8d0f4d4
--- /dev/null
+++ b/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for Accelerate and PreemptiveExpand classes.
+
+#include "modules/audio_coding/neteq/accelerate.h"
+#include "modules/audio_coding/neteq/preemptive_expand.h"
+
+#include <map>
+#include <memory>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_coding/neteq/background_noise.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+namespace {
+const size_t kNumChannels = 1;
+}
+
+TEST(TimeStretch, CreateAndDestroy) {
+  const int kSampleRate = 8000;
+  const int kOverlapSamples = 5 * kSampleRate / 8000;
+  BackgroundNoise bgn(kNumChannels);
+  Accelerate accelerate(kSampleRate, kNumChannels, bgn);
+  PreemptiveExpand preemptive_expand(
+      kSampleRate, kNumChannels, bgn, kOverlapSamples);
+}
+
+TEST(TimeStretch, CreateUsingFactory) {
+  const int kSampleRate = 8000;
+  const int kOverlapSamples = 5 * kSampleRate / 8000;
+  BackgroundNoise bgn(kNumChannels);
+
+  AccelerateFactory accelerate_factory;
+  Accelerate* accelerate =
+      accelerate_factory.Create(kSampleRate, kNumChannels, bgn);
+  EXPECT_TRUE(accelerate != NULL);
+  delete accelerate;
+
+  PreemptiveExpandFactory preemptive_expand_factory;
+  PreemptiveExpand* preemptive_expand = preemptive_expand_factory.Create(
+      kSampleRate, kNumChannels, bgn, kOverlapSamples);
+  EXPECT_TRUE(preemptive_expand != NULL);
+  delete preemptive_expand;
+}
+
+class TimeStretchTest : public ::testing::Test {
+ protected:
+  TimeStretchTest()
+      : input_file_(new test::InputAudioFile(
+            test::ResourcePath("audio_coding/testfile32kHz", "pcm"))),
+        sample_rate_hz_(32000),
+        block_size_(30 * sample_rate_hz_ / 1000),  // 30 ms
+        audio_(new int16_t[block_size_]),
+        background_noise_(kNumChannels) {
+    WebRtcSpl_Init();
+  }
+
+  const int16_t* Next30Ms() {
+    RTC_CHECK(input_file_->Read(block_size_, audio_.get()));
+    return audio_.get();
+  }
+
+  // Returns the total length change (in samples) that the accelerate operation
+  // resulted in during the run.
+  size_t TestAccelerate(size_t loops, bool fast_mode) {
+    Accelerate accelerate(sample_rate_hz_, kNumChannels, background_noise_);
+    size_t total_length_change = 0;
+    for (size_t i = 0; i < loops; ++i) {
+      AudioMultiVector output(kNumChannels);
+      size_t length_change;
+      UpdateReturnStats(accelerate.Process(Next30Ms(), block_size_, fast_mode,
+                                           &output, &length_change));
+      total_length_change += length_change;
+    }
+    return total_length_change;
+  }
+
+  void UpdateReturnStats(TimeStretch::ReturnCodes ret) {
+    switch (ret) {
+      case TimeStretch::kSuccess:
+      case TimeStretch::kSuccessLowEnergy:
+      case TimeStretch::kNoStretch:
+        ++return_stats_[ret];
+        break;
+      case TimeStretch::kError:
+        FAIL() << "Process returned an error";
+    }
+  }
+
+  std::unique_ptr<test::InputAudioFile> input_file_;
+  const int sample_rate_hz_;
+  const size_t block_size_;
+  std::unique_ptr<int16_t[]> audio_;
+  std::map<TimeStretch::ReturnCodes, int> return_stats_;
+  BackgroundNoise background_noise_;
+};
+
+TEST_F(TimeStretchTest, Accelerate) {
+  // TestAccelerate returns the total length change in samples.
+  EXPECT_EQ(15268U, TestAccelerate(100, false));
+  EXPECT_EQ(9, return_stats_[TimeStretch::kSuccess]);
+  EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
+  EXPECT_EQ(33, return_stats_[TimeStretch::kNoStretch]);
+}
+
+TEST_F(TimeStretchTest, AccelerateFastMode) {
+  // TestAccelerate returns the total length change in samples.
+  EXPECT_EQ(21400U, TestAccelerate(100, true));
+  EXPECT_EQ(31, return_stats_[TimeStretch::kSuccess]);
+  EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
+  EXPECT_EQ(11, return_stats_[TimeStretch::kNoStretch]);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/timestamp_scaler.cc b/modules/audio_coding/neteq/timestamp_scaler.cc
new file mode 100644
index 0000000..d7aa9fe
--- /dev/null
+++ b/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+
+#include "modules/audio_coding/neteq/decoder_database.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+void TimestampScaler::Reset() {
+  first_packet_received_ = false;
+}
+
+void TimestampScaler::ToInternal(Packet* packet) {
+  if (!packet) {
+    return;
+  }
+  packet->timestamp = ToInternal(packet->timestamp, packet->payload_type);
+}
+
+void TimestampScaler::ToInternal(PacketList* packet_list) {
+  PacketList::iterator it;
+  for (it = packet_list->begin(); it != packet_list->end(); ++it) {
+    ToInternal(&(*it));
+  }
+}
+
+uint32_t TimestampScaler::ToInternal(uint32_t external_timestamp,
+                                     uint8_t rtp_payload_type) {
+  const DecoderDatabase::DecoderInfo* info =
+      decoder_database_.GetDecoderInfo(rtp_payload_type);
+  if (!info) {
+    // Payload type is unknown. Do not scale.
+    return external_timestamp;
+  }
+  if (!(info->IsComfortNoise() || info->IsDtmf())) {
+    // Do not change the timestamp scaling settings for DTMF or CNG.
+    numerator_ = info->SampleRateHz();
+    if (info->GetFormat().clockrate_hz == 0) {
+      // If the clockrate is invalid (i.e. with an old-style external codec)
+      // we cannot do any timestamp scaling.
+      denominator_ = numerator_;
+    } else {
+      denominator_ = info->GetFormat().clockrate_hz;
+    }
+  }
+  if (numerator_ != denominator_) {
+    // We have a scale factor != 1.
+    if (!first_packet_received_) {
+      external_ref_ = external_timestamp;
+      internal_ref_ = external_timestamp;
+      first_packet_received_ = true;
+    }
+    const int64_t external_diff = int64_t{external_timestamp} - external_ref_;
+    RTC_DCHECK_GT(denominator_, 0);
+    external_ref_ = external_timestamp;
+    internal_ref_ += (external_diff * numerator_) / denominator_;
+    return internal_ref_;
+  } else {
+    // No scaling.
+    return external_timestamp;
+  }
+}
+
+
+uint32_t TimestampScaler::ToExternal(uint32_t internal_timestamp) const {
+  if (!first_packet_received_ || (numerator_ == denominator_)) {
+    // Not initialized, or scale factor is 1.
+    return internal_timestamp;
+  } else {
+    const int64_t internal_diff = int64_t{internal_timestamp} - internal_ref_;
+    RTC_DCHECK_GT(numerator_, 0);
+    // Do not update references in this method.
+    // Switch |denominator_| and |numerator_| to convert the other way.
+    return external_ref_ + (internal_diff * denominator_) / numerator_;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/timestamp_scaler.h b/modules/audio_coding/neteq/timestamp_scaler.h
new file mode 100644
index 0000000..f0b05d6
--- /dev/null
+++ b/modules/audio_coding/neteq/timestamp_scaler.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_
+
+#include "modules/audio_coding/neteq/packet.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Forward declaration.
+class DecoderDatabase;
+
+// This class scales timestamps for codecs that need timestamp scaling.
+// This is done for codecs where one RTP timestamp does not correspond to
+// one sample.
+class TimestampScaler {
+ public:
+  explicit TimestampScaler(const DecoderDatabase& decoder_database)
+      : first_packet_received_(false),
+        numerator_(1),
+        denominator_(1),
+        external_ref_(0),
+        internal_ref_(0),
+        decoder_database_(decoder_database) {}
+
+  virtual ~TimestampScaler() {}
+
+  // Start over.
+  virtual void Reset();
+
+  // Scale the timestamp in |packet| from external to internal.
+  virtual void ToInternal(Packet* packet);
+
+  // Scale the timestamp for all packets in |packet_list| from external to
+  // internal.
+  virtual void ToInternal(PacketList* packet_list);
+
+  // Returns the internal equivalent of |external_timestamp|, given the
+  // RTP payload type |rtp_payload_type|.
+  virtual uint32_t ToInternal(uint32_t external_timestamp,
+                              uint8_t rtp_payload_type);
+
+  // Scales back to external timestamp. This is the inverse of ToInternal().
+  virtual uint32_t ToExternal(uint32_t internal_timestamp) const;
+
+ private:
+  bool first_packet_received_;
+  int numerator_;
+  int denominator_;
+  uint32_t external_ref_;
+  uint32_t internal_ref_;
+  const DecoderDatabase& decoder_database_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(TimestampScaler);
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TIMESTAMP_SCALER_H_
diff --git a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
new file mode 100644
index 0000000..b3c1bb0
--- /dev/null
+++ b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -0,0 +1,315 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/neteq/mock/mock_decoder_database.h"
+#include "modules/audio_coding/neteq/packet.h"
+#include "modules/audio_coding/neteq/timestamp_scaler.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Return;
+using ::testing::ReturnNull;
+using ::testing::_;
+
+namespace webrtc {
+
+TEST(TimestampScaler, TestNoScaling) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use PCMu, because it doesn't use scaled timestamps.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, factory);
+  static const uint8_t kRtpPayloadType = 0;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  for (uint32_t timestamp = 0xFFFFFFFF - 5; timestamp != 5; ++timestamp) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(timestamp, scaler.ToExternal(timestamp));
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestNoScalingLargeStep) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use PCMu, because it doesn't use scaled timestamps.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, factory);
+  static const uint8_t kRtpPayloadType = 0;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  static const uint32_t kStep = 160;
+  uint32_t start_timestamp = 0;
+  // |external_timestamp| will be a large positive value.
+  start_timestamp = start_timestamp - 5 * kStep;
+  for (uint32_t timestamp = start_timestamp; timestamp != 5 * kStep;
+      timestamp += kStep) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(timestamp, scaler.ToExternal(timestamp));
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use G722, which has a factor 2 scaling.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, factory);
+  static const uint8_t kRtpPayloadType = 17;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  uint32_t external_timestamp = 0xFFFFFFFF - 5;
+  uint32_t internal_timestamp = external_timestamp;
+  for (; external_timestamp != 5; ++external_timestamp) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(internal_timestamp,
+              scaler.ToInternal(external_timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+    internal_timestamp += 2;
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722LargeStep) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use G722, which has a factor 2 scaling.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, factory);
+  static const uint8_t kRtpPayloadType = 17;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  static const uint32_t kStep = 320;
+  uint32_t external_timestamp = 0;
+  // |external_timestamp| will be a large positive value.
+  external_timestamp = external_timestamp - 5 * kStep;
+  uint32_t internal_timestamp = external_timestamp;
+  for (; external_timestamp != 5 * kStep; external_timestamp += kStep) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(internal_timestamp,
+              scaler.ToInternal(external_timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+    // Internal timestamp should be incremented with twice the step.
+    internal_timestamp += 2 * kStep;
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722WithCng) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use G722, which has a factor 2 scaling.
+  const DecoderDatabase::DecoderInfo info_g722(NetEqDecoder::kDecoderG722,
+                                               factory);
+  const DecoderDatabase::DecoderInfo info_cng(NetEqDecoder::kDecoderCNGwb,
+                                              factory);
+  static const uint8_t kRtpPayloadTypeG722 = 17;
+  static const uint8_t kRtpPayloadTypeCng = 13;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadTypeG722))
+      .WillRepeatedly(Return(&info_g722));
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadTypeCng))
+      .WillRepeatedly(Return(&info_cng));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  uint32_t external_timestamp = 0xFFFFFFFF - 5;
+  uint32_t internal_timestamp = external_timestamp;
+  bool next_is_cng = false;
+  for (; external_timestamp != 5; ++external_timestamp) {
+    // Alternate between G.722 and CNG every other packet.
+    if (next_is_cng) {
+      // Scale to internal timestamp.
+      EXPECT_EQ(internal_timestamp,
+                scaler.ToInternal(external_timestamp, kRtpPayloadTypeCng));
+      next_is_cng = false;
+    } else {
+      // Scale to internal timestamp.
+      EXPECT_EQ(internal_timestamp,
+                scaler.ToInternal(external_timestamp, kRtpPayloadTypeG722));
+      next_is_cng = true;
+    }
+    // Scale back.
+    EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+    internal_timestamp += 2;
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+// Make sure that the method ToInternal(Packet* packet) is wired up correctly.
+// Since it is simply calling the other ToInternal method, we are not doing
+// as many tests here.
+TEST(TimestampScaler, TestG722Packet) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use G722, which has a factor 2 scaling.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, factory);
+  static const uint8_t kRtpPayloadType = 17;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  uint32_t external_timestamp = 0xFFFFFFFF - 5;
+  uint32_t internal_timestamp = external_timestamp;
+  Packet packet;
+  packet.payload_type = kRtpPayloadType;
+  for (; external_timestamp != 5; ++external_timestamp) {
+    packet.timestamp = external_timestamp;
+    // Scale to internal timestamp.
+    scaler.ToInternal(&packet);
+    EXPECT_EQ(internal_timestamp, packet.timestamp);
+    internal_timestamp += 2;
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+// Make sure that the method ToInternal(PacketList* packet_list) is wired up
+// correctly. Since it is simply calling the ToInternal(Packet* packet) method,
+// we are not doing as many tests here.
+TEST(TimestampScaler, TestG722PacketList) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use G722, which has a factor 2 scaling.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, factory);
+  static const uint8_t kRtpPayloadType = 17;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  uint32_t external_timestamp = 0xFFFFFFFF - 5;
+  uint32_t internal_timestamp = external_timestamp;
+  PacketList packet_list;
+  {
+    Packet packet1;
+    packet1.payload_type = kRtpPayloadType;
+    packet1.timestamp = external_timestamp;
+    Packet packet2;
+    packet2.payload_type = kRtpPayloadType;
+    packet2.timestamp = external_timestamp + 10;
+    packet_list.push_back(std::move(packet1));
+    packet_list.push_back(std::move(packet2));
+  }
+
+  scaler.ToInternal(&packet_list);
+  EXPECT_EQ(internal_timestamp, packet_list.front().timestamp);
+  packet_list.pop_front();
+  EXPECT_EQ(internal_timestamp + 20, packet_list.front().timestamp);
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, TestG722Reset) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  // Use G722, which has a factor 2 scaling.
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, factory);
+  static const uint8_t kRtpPayloadType = 17;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  uint32_t external_timestamp = 0xFFFFFFFF - 5;
+  uint32_t internal_timestamp = external_timestamp;
+  for (; external_timestamp != 5; ++external_timestamp) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(internal_timestamp,
+              scaler.ToInternal(external_timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+    internal_timestamp += 2;
+  }
+  // Reset the scaler. After this, we expect the internal and external to start
+  // over at the same value again.
+  scaler.Reset();
+  internal_timestamp = external_timestamp;
+  for (; external_timestamp != 15; ++external_timestamp) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(internal_timestamp,
+              scaler.ToInternal(external_timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+    internal_timestamp += 2;
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+// TODO(minyue): This test becomes trivial since Opus does not need a timestamp
+// scaler. Therefore, this test may be removed in future. There is no harm to
+// keep it, since it can be taken as a test case for the situation of a trivial
+// timestamp scaler.
+TEST(TimestampScaler, TestOpusLargeStep) {
+  MockDecoderDatabase db;
+  auto factory = CreateBuiltinAudioDecoderFactory();
+  const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderOpus, factory);
+  static const uint8_t kRtpPayloadType = 17;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillRepeatedly(Return(&info));
+
+  TimestampScaler scaler(db);
+  // Test both sides of the timestamp wrap-around.
+  static const uint32_t kStep = 960;
+  uint32_t external_timestamp = 0;
+  // |external_timestamp| will be a large positive value.
+  external_timestamp = external_timestamp - 5 * kStep;
+  uint32_t internal_timestamp = external_timestamp;
+  for (; external_timestamp != 5 * kStep; external_timestamp += kStep) {
+    // Scale to internal timestamp.
+    EXPECT_EQ(internal_timestamp,
+              scaler.ToInternal(external_timestamp, kRtpPayloadType));
+    // Scale back.
+    EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
+    internal_timestamp += kStep;
+  }
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+TEST(TimestampScaler, Failures) {
+  static const uint8_t kRtpPayloadType = 17;
+  MockDecoderDatabase db;
+  EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
+      .WillOnce(ReturnNull());  // Return NULL to indicate unknown payload type.
+
+  TimestampScaler scaler(db);
+  uint32_t timestamp = 4711;  // Some number.
+  EXPECT_EQ(timestamp, scaler.ToInternal(timestamp, kRtpPayloadType));
+
+  Packet* packet = NULL;
+  scaler.ToInternal(packet);  // Should not crash. That's all we can test.
+
+  EXPECT_CALL(db, Die());  // Called when database object is deleted.
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/DEPS b/modules/audio_coding/neteq/tools/DEPS
new file mode 100644
index 0000000..4db1e1d
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+logging/rtc_event_log",
+]
diff --git a/modules/audio_coding/neteq/tools/audio_checksum.h b/modules/audio_coding/neteq/tools/audio_checksum.h
new file mode 100644
index 0000000..db67edf
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/audio_checksum.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/stringencode.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+class AudioChecksum : public AudioSink {
+ public:
+  AudioChecksum()
+      : checksum_(rtc::MessageDigestFactory::Create(rtc::DIGEST_MD5)),
+        checksum_result_(checksum_->Size()),
+        finished_(false) {}
+
+  bool WriteArray(const int16_t* audio, size_t num_samples) override {
+    if (finished_)
+      return false;
+
+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
+#error "Big-endian gives a different checksum"
+#endif
+    checksum_->Update(audio, num_samples * sizeof(*audio));
+    return true;
+  }
+
+  // Finalizes the computations, and returns the checksum.
+  std::string Finish() {
+    if (!finished_) {
+      finished_ = true;
+      checksum_->Finish(checksum_result_.data(), checksum_result_.size());
+    }
+    return rtc::hex_encode(checksum_result_.data<char>(),
+                           checksum_result_.size());
+  }
+
+ private:
+  std::unique_ptr<rtc::MessageDigest> checksum_;
+  rtc::Buffer checksum_result_;
+  bool finished_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioChecksum);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
diff --git a/modules/audio_coding/neteq/tools/audio_loop.cc b/modules/audio_coding/neteq/tools/audio_loop.cc
new file mode 100644
index 0000000..b5ad881
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/audio_loop.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+namespace webrtc {
+namespace test {
+
+bool AudioLoop::Init(const std::string file_name,
+                     size_t max_loop_length_samples,
+                     size_t block_length_samples) {
+  FILE* fp = fopen(file_name.c_str(), "rb");
+  if (!fp) return false;
+
+  audio_array_.reset(new int16_t[max_loop_length_samples +
+                                 block_length_samples]);
+  size_t samples_read = fread(audio_array_.get(), sizeof(int16_t),
+                              max_loop_length_samples, fp);
+  fclose(fp);
+
+  // Block length must be shorter than the loop length.
+  if (block_length_samples > samples_read) return false;
+
+  // Add an extra block length of samples to the end of the array, starting
+  // over again from the beginning of the array. This is done to simplify
+  // the reading process when reading over the end of the loop.
+  memcpy(&audio_array_[samples_read], audio_array_.get(),
+         block_length_samples * sizeof(int16_t));
+
+  loop_length_samples_ = samples_read;
+  block_length_samples_ = block_length_samples;
+  next_index_ = 0;
+  return true;
+}
+
+rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
+  // Check that the AudioLoop is initialized.
+  if (block_length_samples_ == 0)
+    return rtc::ArrayView<const int16_t>();
+
+  const int16_t* output_ptr = &audio_array_[next_index_];
+  next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
+  return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
+}
+
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/audio_loop.h b/modules/audio_coding/neteq/tools/audio_loop.h
new file mode 100644
index 0000000..abb1a36
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/audio_loop.h
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
+
+#include <memory>
+#include <string>
+
+#include "api/array_view.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Class serving as an infinite source of audio, realized by looping an audio
+// clip.
+class AudioLoop {
+ public:
+  AudioLoop()
+      : next_index_(0),
+        loop_length_samples_(0),
+        block_length_samples_(0) {
+  }
+
+  virtual ~AudioLoop() {}
+
+  // Initializes the AudioLoop by reading from |file_name|. The loop will be no
+  // longer than |max_loop_length_samples|, if the length of the file is
+  // greater. Otherwise, the loop length is the same as the file length.
+  // The audio will be delivered in blocks of |block_length_samples|.
+  // Returns false if the initialization failed, otherwise true.
+  bool Init(const std::string file_name, size_t max_loop_length_samples,
+            size_t block_length_samples);
+
+  // Returns a (pointer,size) pair for the next block of audio. The size is
+  // equal to the |block_length_samples| Init() argument.
+  rtc::ArrayView<const int16_t> GetNextBlock();
+
+ private:
+  size_t next_index_;
+  size_t loop_length_samples_;
+  size_t block_length_samples_;
+  std::unique_ptr<int16_t[]> audio_array_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioLoop);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
diff --git a/modules/audio_coding/neteq/tools/audio_sink.cc b/modules/audio_coding/neteq/tools/audio_sink.cc
new file mode 100644
index 0000000..7d7af7e
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/audio_sink.cc
@@ -0,0 +1,26 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+
+namespace webrtc {
+namespace test {
+
+bool AudioSinkFork::WriteArray(const int16_t* audio, size_t num_samples) {
+  return left_sink_->WriteArray(audio, num_samples) &&
+         right_sink_->WriteArray(audio, num_samples);
+}
+
+bool VoidAudioSink::WriteArray(const int16_t* audio, size_t num_samples) {
+  return true;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/audio_sink.h b/modules/audio_coding/neteq/tools/audio_sink.h
new file mode 100644
index 0000000..ecec51b
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/audio_sink.h
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Interface class for an object receiving raw output audio from test
+// applications.
+class AudioSink {
+ public:
+  AudioSink() {}
+  virtual ~AudioSink() {}
+
+  // Writes |num_samples| from |audio| to the AudioSink. Returns true if
+  // successful, otherwise false.
+  virtual bool WriteArray(const int16_t* audio, size_t num_samples) = 0;
+
+  // Writes |audio_frame| to the AudioSink. Returns true if successful,
+  // otherwise false.
+  bool WriteAudioFrame(const AudioFrame& audio_frame) {
+    return WriteArray(
+        audio_frame.data(),
+        audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+  }
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioSink);
+};
+
+// Forks the output audio to two AudioSink objects.
+class AudioSinkFork : public AudioSink {
+ public:
+  AudioSinkFork(AudioSink* left, AudioSink* right)
+      : left_sink_(left), right_sink_(right) {}
+
+  bool WriteArray(const int16_t* audio, size_t num_samples) override;
+
+ private:
+  AudioSink* left_sink_;
+  AudioSink* right_sink_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioSinkFork);
+};
+
+// An AudioSink implementation that does nothing.
+class VoidAudioSink : public AudioSink {
+ public:
+  VoidAudioSink() = default;
+  bool WriteArray(const int16_t* audio, size_t num_samples) override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(VoidAudioSink);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
diff --git a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
new file mode 100644
index 0000000..6b325b6
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
+
+#include <algorithm>
+
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+ConstantPcmPacketSource::ConstantPcmPacketSource(size_t payload_len_samples,
+                                                 int16_t sample_value,
+                                                 int sample_rate_hz,
+                                                 int payload_type)
+    : payload_len_samples_(payload_len_samples),
+      packet_len_bytes_(2 * payload_len_samples_ + kHeaderLenBytes),
+      samples_per_ms_(sample_rate_hz / 1000),
+      next_arrival_time_ms_(0.0),
+      payload_type_(payload_type),
+      seq_number_(0),
+      timestamp_(0),
+      payload_ssrc_(0xABCD1234) {
+  size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
+  RTC_CHECK_EQ(2U, encoded_len);
+}
+
+std::unique_ptr<Packet> ConstantPcmPacketSource::NextPacket() {
+  RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
+  uint8_t* packet_memory = new uint8_t[packet_len_bytes_];
+  // Fill the payload part of the packet memory with the pre-encoded value.
+  for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
+    packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2];
+  WriteHeader(packet_memory);
+  // |packet| assumes ownership of |packet_memory|.
+  std::unique_ptr<Packet> packet(
+      new Packet(packet_memory, packet_len_bytes_, next_arrival_time_ms_));
+  next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_;
+  return packet;
+}
+
+void ConstantPcmPacketSource::WriteHeader(uint8_t* packet_memory) {
+  packet_memory[0] = 0x80;
+  packet_memory[1] = static_cast<uint8_t>(payload_type_);
+  packet_memory[2] = seq_number_ >> 8;
+  packet_memory[3] = seq_number_ & 0xFF;
+  packet_memory[4] = timestamp_ >> 24;
+  packet_memory[5] = (timestamp_ >> 16) & 0xFF;
+  packet_memory[6] = (timestamp_ >> 8) & 0xFF;
+  packet_memory[7] = timestamp_ & 0xFF;
+  packet_memory[8] = payload_ssrc_ >> 24;
+  packet_memory[9] = (payload_ssrc_ >> 16) & 0xFF;
+  packet_memory[10] = (payload_ssrc_ >> 8) & 0xFF;
+  packet_memory[11] = payload_ssrc_ & 0xFF;
+  ++seq_number_;
+  timestamp_ += static_cast<uint32_t>(payload_len_samples_);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
new file mode 100644
index 0000000..4e216e4
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
+
+#include <stdio.h>
+#include <string>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace test {
+
+// This class implements a packet source that delivers PCM16b encoded packets
+// with a constant sample value. The payload length, constant sample value,
+// sample rate, and payload type are all set in the constructor.
+class ConstantPcmPacketSource : public PacketSource {
+ public:
+  ConstantPcmPacketSource(size_t payload_len_samples,
+                          int16_t sample_value,
+                          int sample_rate_hz,
+                          int payload_type);
+
+  std::unique_ptr<Packet> NextPacket() override;
+
+ private:
+  void WriteHeader(uint8_t* packet_memory);
+
+  const size_t kHeaderLenBytes = 12;
+  const size_t payload_len_samples_;
+  const size_t packet_len_bytes_;
+  uint8_t encoded_sample_[2];
+  const int samples_per_ms_;
+  double next_arrival_time_ms_;
+  const int payload_type_;
+  uint16_t seq_number_;
+  uint32_t timestamp_;
+  const uint32_t payload_ssrc_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ConstantPcmPacketSource);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
diff --git a/modules/audio_coding/neteq/tools/encode_neteq_input.cc b/modules/audio_coding/neteq/tools/encode_neteq_input.cc
new file mode 100644
index 0000000..a8d1bdf
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/encode_neteq_input.cc
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/encode_neteq_input.h"
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace test {
+
+EncodeNetEqInput::EncodeNetEqInput(std::unique_ptr<Generator> generator,
+                                   std::unique_ptr<AudioEncoder> encoder,
+                                   int64_t input_duration_ms)
+    : generator_(std::move(generator)),
+      encoder_(std::move(encoder)),
+      input_duration_ms_(input_duration_ms) {
+  CreatePacket();
+}
+
+rtc::Optional<int64_t> EncodeNetEqInput::NextPacketTime() const {
+  RTC_DCHECK(packet_data_);
+  return static_cast<int64_t>(packet_data_->time_ms);
+}
+
+rtc::Optional<int64_t> EncodeNetEqInput::NextOutputEventTime() const {
+  return next_output_event_ms_;
+}
+
+std::unique_ptr<NetEqInput::PacketData> EncodeNetEqInput::PopPacket() {
+  RTC_DCHECK(packet_data_);
+  // Grab the packet to return...
+  std::unique_ptr<PacketData> packet_to_return = std::move(packet_data_);
+  // ... and line up the next packet for future use.
+  CreatePacket();
+
+  return packet_to_return;
+}
+
+void EncodeNetEqInput::AdvanceOutputEvent() {
+  next_output_event_ms_ += kOutputPeriodMs;
+}
+
+rtc::Optional<RTPHeader> EncodeNetEqInput::NextHeader() const {
+  RTC_DCHECK(packet_data_);
+  return packet_data_->header;
+}
+
+void EncodeNetEqInput::CreatePacket() {
+  // Create a new PacketData object.
+  RTC_DCHECK(!packet_data_);
+  packet_data_.reset(new NetEqInput::PacketData);
+  RTC_DCHECK_EQ(packet_data_->payload.size(), 0);
+
+  // Loop until we get a packet.
+  AudioEncoder::EncodedInfo info;
+  RTC_DCHECK(!info.send_even_if_empty);
+  int num_blocks = 0;
+  while (packet_data_->payload.size() == 0 && !info.send_even_if_empty) {
+    const size_t num_samples = rtc::CheckedDivExact(
+        static_cast<int>(encoder_->SampleRateHz() * kOutputPeriodMs), 1000);
+
+    info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
+                            &packet_data_->payload);
+
+    rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
+        num_samples * encoder_->RtpTimestampRateHz() /
+        encoder_->SampleRateHz());
+    ++num_blocks;
+  }
+  packet_data_->header.timestamp = info.encoded_timestamp;
+  packet_data_->header.payloadType = info.payload_type;
+  packet_data_->header.sequenceNumber = sequence_number_++;
+  packet_data_->time_ms = next_packet_time_ms_;
+  next_packet_time_ms_ += num_blocks * kOutputPeriodMs;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/encode_neteq_input.h b/modules/audio_coding/neteq/tools/encode_neteq_input.h
new file mode 100644
index 0000000..b44d4ac
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/encode_neteq_input.h
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
+
+#include <memory>
+
+#include "api/audio_codecs/audio_encoder.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+namespace test {
+
+// This class provides a NetEqInput that takes audio from a generator object and
+// encodes it using a given audio encoder.
+class EncodeNetEqInput : public NetEqInput {
+ public:
+  // Generator class, to be provided to the EncodeNetEqInput constructor.
+  class Generator {
+   public:
+    virtual ~Generator() = default;
+    // Returns the next num_samples values from the signal generator.
+    virtual rtc::ArrayView<const int16_t> Generate(size_t num_samples) = 0;
+  };
+
+  // The source will end after the given input duration.
+  EncodeNetEqInput(std::unique_ptr<Generator> generator,
+                   std::unique_ptr<AudioEncoder> encoder,
+                   int64_t input_duration_ms);
+
+  rtc::Optional<int64_t> NextPacketTime() const override;
+
+  rtc::Optional<int64_t> NextOutputEventTime() const override;
+
+  std::unique_ptr<PacketData> PopPacket() override;
+
+  void AdvanceOutputEvent() override;
+
+  bool ended() const override {
+    return next_output_event_ms_ <= input_duration_ms_;
+  }
+
+  rtc::Optional<RTPHeader> NextHeader() const override;
+
+ private:
+  static constexpr int64_t kOutputPeriodMs = 10;
+
+  void CreatePacket();
+
+  std::unique_ptr<Generator> generator_;
+  std::unique_ptr<AudioEncoder> encoder_;
+  std::unique_ptr<PacketData> packet_data_;
+  uint32_t rtp_timestamp_ = 0;
+  int16_t sequence_number_ = 0;
+  int64_t next_packet_time_ms_ = 0;
+  int64_t next_output_event_ms_ = 0;
+  const int64_t input_duration_ms_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
diff --git a/modules/audio_coding/neteq/tools/fake_decode_from_file.cc b/modules/audio_coding/neteq/tools/fake_decode_from_file.cc
new file mode 100644
index 0000000..f437608
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/fake_decode_from_file.cc
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace test {
+
+int FakeDecodeFromFile::DecodeInternal(const uint8_t* encoded,
+                                       size_t encoded_len,
+                                       int sample_rate_hz,
+                                       int16_t* decoded,
+                                       SpeechType* speech_type) {
+  if (encoded_len == 0) {
+    // Decoder is asked to produce codec-internal comfort noise.
+    RTC_DCHECK(!encoded);  // NetEq always sends nullptr in this case.
+    RTC_DCHECK(cng_mode_);
+    RTC_DCHECK_GT(last_decoded_length_, 0);
+    std::fill_n(decoded, last_decoded_length_, 0);
+    *speech_type = kComfortNoise;
+    return rtc::dchecked_cast<int>(last_decoded_length_);
+  }
+
+  RTC_CHECK_GE(encoded_len, 12);
+  uint32_t timestamp_to_decode =
+      ByteReader<uint32_t>::ReadLittleEndian(encoded);
+  uint32_t samples_to_decode =
+      ByteReader<uint32_t>::ReadLittleEndian(&encoded[4]);
+  if (samples_to_decode == 0) {
+    // Number of samples in packet is unknown.
+    if (last_decoded_length_ > 0) {
+      // Use length of last decoded packet, but since this is the total for all
+      // channels, we have to divide by 2 in the stereo case.
+      samples_to_decode = rtc::dchecked_cast<int>(rtc::CheckedDivExact(
+          last_decoded_length_, static_cast<size_t>(stereo_ ? 2uL : 1uL)));
+    } else {
+      // This is the first packet to decode, and we do not know the length of
+      // it. Set it to 10 ms.
+      samples_to_decode = rtc::CheckedDivExact(sample_rate_hz, 100);
+    }
+  }
+
+  if (next_timestamp_from_input_ &&
+      timestamp_to_decode != *next_timestamp_from_input_) {
+    // A gap in the timestamp sequence is detected. Skip the same number of
+    // samples from the file.
+    uint32_t jump = timestamp_to_decode - *next_timestamp_from_input_;
+    RTC_CHECK(input_->Seek(jump));
+  }
+
+  next_timestamp_from_input_ = timestamp_to_decode + samples_to_decode;
+
+  uint32_t original_payload_size_bytes =
+      ByteReader<uint32_t>::ReadLittleEndian(&encoded[8]);
+  if (original_payload_size_bytes == 1) {
+    // This is a comfort noise payload.
+    RTC_DCHECK_GT(last_decoded_length_, 0);
+    std::fill_n(decoded, last_decoded_length_, 0);
+    *speech_type = kComfortNoise;
+    cng_mode_ = true;
+    return rtc::dchecked_cast<int>(last_decoded_length_);
+  }
+
+  cng_mode_ = false;
+  RTC_CHECK(input_->Read(static_cast<size_t>(samples_to_decode), decoded));
+
+  if (stereo_) {
+    InputAudioFile::DuplicateInterleaved(decoded, samples_to_decode, 2,
+                                         decoded);
+    samples_to_decode *= 2;
+  }
+
+  *speech_type = kSpeech;
+  last_decoded_length_ = samples_to_decode;
+  return rtc::dchecked_cast<int>(last_decoded_length_);
+}
+
+void FakeDecodeFromFile::PrepareEncoded(uint32_t timestamp,
+                                        size_t samples,
+                                        size_t original_payload_size_bytes,
+                                        rtc::ArrayView<uint8_t> encoded) {
+  RTC_CHECK_GE(encoded.size(), 12);
+  ByteWriter<uint32_t>::WriteLittleEndian(&encoded[0], timestamp);
+  ByteWriter<uint32_t>::WriteLittleEndian(&encoded[4],
+                                          rtc::checked_cast<uint32_t>(samples));
+  ByteWriter<uint32_t>::WriteLittleEndian(
+      &encoded[8], rtc::checked_cast<uint32_t>(original_payload_size_bytes));
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/fake_decode_from_file.h b/modules/audio_coding/neteq/tools/fake_decode_from_file.h
new file mode 100644
index 0000000..7aa8e6e
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/fake_decode_from_file.h
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
+
+#include <memory>
+
+#include "api/array_view.h"
+#include "api/audio_codecs/audio_decoder.h"
+#include "api/optional.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+
+namespace webrtc {
+namespace test {
+
+// Provides an AudioDecoder implementation that delivers audio data from a file.
+// The "encoded" input should contain information about what RTP timestamp the
+// encoding represents, and how many samples the decoder should produce for that
+// encoding. A helper method PrepareEncoded is provided to prepare such
+// encodings. If packets are missing, as determined from the timestamps, the
+// file reading will skip forward to match the loss.
+class FakeDecodeFromFile : public AudioDecoder {
+ public:
+  FakeDecodeFromFile(std::unique_ptr<InputAudioFile> input,
+                     int sample_rate_hz,
+                     bool stereo)
+      : input_(std::move(input)),
+        sample_rate_hz_(sample_rate_hz),
+        stereo_(stereo) {}
+
+  ~FakeDecodeFromFile() = default;
+
+  void Reset() override {}
+
+  int SampleRateHz() const override { return sample_rate_hz_; }
+
+  size_t Channels() const override { return stereo_ ? 2 : 1; }
+
+  int DecodeInternal(const uint8_t* encoded,
+                     size_t encoded_len,
+                     int sample_rate_hz,
+                     int16_t* decoded,
+                     SpeechType* speech_type) override;
+
+  // Helper method. Writes |timestamp|, |samples| and
+  // |original_payload_size_bytes| to |encoded| in a format that the
+  // FakeDecodeFromFile decoder will understand. |encoded| must be at least 12
+  // bytes long.
+  static void PrepareEncoded(uint32_t timestamp,
+                             size_t samples,
+                             size_t original_payload_size_bytes,
+                             rtc::ArrayView<uint8_t> encoded);
+
+ private:
+  std::unique_ptr<InputAudioFile> input_;
+  rtc::Optional<uint32_t> next_timestamp_from_input_;
+  const int sample_rate_hz_;
+  const bool stereo_;
+  size_t last_decoded_length_ = 0;
+  bool cng_mode_ = false;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
diff --git a/modules/audio_coding/neteq/tools/input_audio_file.cc b/modules/audio_coding/neteq/tools/input_audio_file.cc
new file mode 100644
index 0000000..31ebf98
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/input_audio_file.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+InputAudioFile::InputAudioFile(const std::string file_name, bool loop_at_end)
+    : loop_at_end_(loop_at_end) {
+  fp_ = fopen(file_name.c_str(), "rb");
+}
+
+InputAudioFile::~InputAudioFile() { fclose(fp_); }
+
+bool InputAudioFile::Read(size_t samples, int16_t* destination) {
+  if (!fp_) {
+    return false;
+  }
+  size_t samples_read = fread(destination, sizeof(int16_t), samples, fp_);
+  if (samples_read < samples) {
+    if (!loop_at_end_) {
+      return false;
+    }
+    // Rewind and read the missing samples.
+    rewind(fp_);
+    size_t missing_samples = samples - samples_read;
+    if (fread(destination + samples_read, sizeof(int16_t), missing_samples,
+              fp_) < missing_samples) {
+      // Could not read enough even after rewinding the file.
+      return false;
+    }
+  }
+  return true;
+}
+
+bool InputAudioFile::Seek(int samples) {
+  if (!fp_) {
+    return false;
+  }
+  // Find file boundaries.
+  const long current_pos = ftell(fp_);
+  RTC_CHECK_NE(EOF, current_pos)
+      << "Error returned when getting file position.";
+  RTC_CHECK_EQ(0, fseek(fp_, 0, SEEK_END));  // Move to end of file.
+  const long file_size = ftell(fp_);
+  RTC_CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
+  // Find new position.
+  long new_pos = current_pos + sizeof(int16_t) * samples;  // Samples to bytes.
+  RTC_CHECK_GE(new_pos, 0)
+      << "Trying to move to before the beginning of the file";
+  if (loop_at_end_) {
+    new_pos = new_pos % file_size;  // Wrap around the end of the file.
+  } else {
+    new_pos = new_pos > file_size ? file_size : new_pos;  // Don't loop.
+  }
+  // Move to new position relative to the beginning of the file.
+  RTC_CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
+  return true;
+}
+
+void InputAudioFile::DuplicateInterleaved(const int16_t* source, size_t samples,
+                                          size_t channels,
+                                          int16_t* destination) {
+  // Start from the end of |source| and |destination|, and work towards the
+  // beginning. This is to allow in-place interleaving of the same array (i.e.,
+  // |source| and |destination| are the same array).
+  for (int i = static_cast<int>(samples - 1); i >= 0; --i) {
+    for (int j = static_cast<int>(channels - 1); j >= 0; --j) {
+      destination[i * channels + j] = source[i];
+    }
+  }
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/input_audio_file.h b/modules/audio_coding/neteq/tools/input_audio_file.h
new file mode 100644
index 0000000..6bfa369
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/input_audio_file.h
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
+
+#include <stdio.h>
+
+#include <string>
+
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Class for handling a looping input audio file.
+class InputAudioFile {
+ public:
+  explicit InputAudioFile(const std::string file_name, bool loop_at_end = true);
+
+  virtual ~InputAudioFile();
+
+  // Reads |samples| elements from source file to |destination|. Returns true
+  // if the read was successful, otherwise false. If the file end is reached,
+  // the file is rewound and reading continues from the beginning.
+  // The output |destination| must have the capacity to hold |samples| elements.
+  virtual bool Read(size_t samples, int16_t* destination);
+
+  // Fast-forwards (|samples| > 0) or -backwards (|samples| < 0) the file by the
+  // indicated number of samples. Just like Read(), Seek() starts over at the
+  // beginning of the file if the end is reached. However, seeking backwards
+  // past the beginning of the file is not possible.
+  virtual bool Seek(int samples);
+
+  // Creates a multi-channel signal from a mono signal. Each sample is repeated
+  // |channels| times to create an interleaved multi-channel signal where all
+  // channels are identical. The output |destination| must have the capacity to
+  // hold samples * channels elements. Note that |source| and |destination| can
+  // be the same array (i.e., point to the same address).
+  static void DuplicateInterleaved(const int16_t* source, size_t samples,
+                                   size_t channels, int16_t* destination);
+
+ private:
+  FILE* fp_;
+  const bool loop_at_end_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(InputAudioFile);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
diff --git a/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc b/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc
new file mode 100644
index 0000000..bf016a1
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/input_audio_file_unittest.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for test InputAudioFile class.
+
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+TEST(TestInputAudioFile, DuplicateInterleaveSeparateSrcDst) {
+  static const size_t kSamples = 10;
+  static const size_t kChannels = 2;
+  int16_t input[kSamples];
+  for (size_t i = 0; i < kSamples; ++i) {
+    input[i] = rtc::checked_cast<int16_t>(i);
+  }
+  int16_t output[kSamples * kChannels];
+  InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, output);
+
+  // Verify output
+  int16_t* output_ptr = output;
+  for (size_t i = 0; i < kSamples; ++i) {
+    for (size_t j = 0; j < kChannels; ++j) {
+      EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
+    }
+  }
+}
+
+TEST(TestInputAudioFile, DuplicateInterleaveSameSrcDst) {
+  static const size_t kSamples = 10;
+  static const size_t kChannels = 5;
+  int16_t input[kSamples * kChannels];
+  for (size_t i = 0; i < kSamples; ++i) {
+    input[i] = rtc::checked_cast<int16_t>(i);
+  }
+  InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, input);
+
+  // Verify output
+  int16_t* output_ptr = input;
+  for (size_t i = 0; i < kSamples; ++i) {
+    for (size_t j = 0; j < kChannels; ++j) {
+      EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
+    }
+  }
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc
new file mode 100644
index 0000000..882f823
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.cc
@@ -0,0 +1,336 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+
+#include <algorithm>
+#include <fstream>
+#include <ios>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+namespace {
+// Helper function for NetEqDelayAnalyzer::CreateGraphs. Returns the
+// interpolated value of a function at the point x. Vector x_vec contains the
+// sample points, and y_vec contains the function values at these points. The
+// return value is a linear interpolation between y_vec values.
+double LinearInterpolate(double x,
+                         const std::vector<int64_t>& x_vec,
+                         const std::vector<int64_t>& y_vec) {
+  // Find first element which is larger than x.
+  auto it = std::upper_bound(x_vec.begin(), x_vec.end(), x);
+  if (it == x_vec.end()) {
+    --it;
+  }
+  const size_t upper_ix = it - x_vec.begin();
+
+  size_t lower_ix;
+  if (upper_ix == 0 || x_vec[upper_ix] <= x) {
+    lower_ix = upper_ix;
+  } else {
+    lower_ix = upper_ix - 1;
+  }
+  double y;
+  if (lower_ix == upper_ix) {
+    y = y_vec[lower_ix];
+  } else {
+    RTC_DCHECK_NE(x_vec[lower_ix], x_vec[upper_ix]);
+    y = (x - x_vec[lower_ix]) * (y_vec[upper_ix] - y_vec[lower_ix]) /
+            (x_vec[upper_ix] - x_vec[lower_ix]) +
+        y_vec[lower_ix];
+  }
+  return y;
+}
+}  // namespace
+
+void NetEqDelayAnalyzer::AfterInsertPacket(
+    const test::NetEqInput::PacketData& packet,
+    NetEq* neteq) {
+  data_.insert(
+      std::make_pair(packet.header.timestamp, TimingData(packet.time_ms)));
+  ssrcs_.insert(packet.header.ssrc);
+  payload_types_.insert(packet.header.payloadType);
+}
+
+void NetEqDelayAnalyzer::BeforeGetAudio(NetEq* neteq) {
+  last_sync_buffer_ms_ = neteq->SyncBufferSizeMs();
+}
+
+void NetEqDelayAnalyzer::AfterGetAudio(int64_t time_now_ms,
+                                       const AudioFrame& audio_frame,
+                                       bool /*muted*/,
+                                       NetEq* neteq) {
+  get_audio_time_ms_.push_back(time_now_ms);
+  // Check what timestamps were decoded in the last GetAudio call.
+  std::vector<uint32_t> dec_ts = neteq->LastDecodedTimestamps();
+  // Find those timestamps in data_, insert their decoding time and sync
+  // delay.
+  for (uint32_t ts : dec_ts) {
+    auto it = data_.find(ts);
+    if (it == data_.end()) {
+      // This is a packet that was split out from another packet. Skip it.
+      continue;
+    }
+    auto& it_timing = it->second;
+    RTC_CHECK(!it_timing.decode_get_audio_count)
+        << "Decode time already written";
+    it_timing.decode_get_audio_count = get_audio_count_;
+    RTC_CHECK(!it_timing.sync_delay_ms) << "Decode time already written";
+    it_timing.sync_delay_ms = last_sync_buffer_ms_;
+    it_timing.target_delay_ms = neteq->TargetDelayMs();
+    it_timing.current_delay_ms = neteq->FilteredCurrentDelayMs();
+  }
+  last_sample_rate_hz_ = audio_frame.sample_rate_hz_;
+  ++get_audio_count_;
+}
+
+void NetEqDelayAnalyzer::CreateGraphs(
+    std::vector<float>* send_time_s,
+    std::vector<float>* arrival_delay_ms,
+    std::vector<float>* corrected_arrival_delay_ms,
+    std::vector<rtc::Optional<float>>* playout_delay_ms,
+    std::vector<rtc::Optional<float>>* target_delay_ms) const {
+  if (get_audio_time_ms_.empty()) {
+    return;
+  }
+  // Create nominal_get_audio_time_ms, a vector starting at
+  // get_audio_time_ms_[0] and increasing by 10 for each element.
+  std::vector<int64_t> nominal_get_audio_time_ms(get_audio_time_ms_.size());
+  nominal_get_audio_time_ms[0] = get_audio_time_ms_[0];
+  std::transform(
+      nominal_get_audio_time_ms.begin(), nominal_get_audio_time_ms.end() - 1,
+      nominal_get_audio_time_ms.begin() + 1, [](int64_t& x) { return x + 10; });
+  RTC_DCHECK(
+      std::is_sorted(get_audio_time_ms_.begin(), get_audio_time_ms_.end()));
+
+  std::vector<double> rtp_timestamps_ms;
+  double offset = std::numeric_limits<double>::max();
+  TimestampUnwrapper unwrapper;
+  // This loop traverses data_ and populates rtp_timestamps_ms as well as
+  // calculates the base offset.
+  for (auto& d : data_) {
+    rtp_timestamps_ms.push_back(
+        unwrapper.Unwrap(d.first) /
+        rtc::CheckedDivExact(last_sample_rate_hz_, 1000));
+    offset =
+        std::min(offset, d.second.arrival_time_ms - rtp_timestamps_ms.back());
+  }
+
+  // Calculate send times in seconds for each packet. This is the (unwrapped)
+  // RTP timestamp in ms divided by 1000.
+  send_time_s->resize(rtp_timestamps_ms.size());
+  std::transform(rtp_timestamps_ms.begin(), rtp_timestamps_ms.end(),
+                 send_time_s->begin(), [rtp_timestamps_ms](double x) {
+                   return (x - rtp_timestamps_ms[0]) / 1000.f;
+                 });
+  RTC_DCHECK_EQ(send_time_s->size(), rtp_timestamps_ms.size());
+
+  // This loop traverses the data again and populates the graph vectors. The
+  // reason to have two loops and traverse twice is that the offset cannot be
+  // known until the first traversal is done. Meanwhile, the final offset must
+  // be known already at the start of this second loop.
+  auto data_it = data_.cbegin();
+  for (size_t i = 0; i < send_time_s->size(); ++i, ++data_it) {
+    RTC_DCHECK(data_it != data_.end());
+    const double offset_send_time_ms = rtp_timestamps_ms[i] + offset;
+    const auto& timing = data_it->second;
+    corrected_arrival_delay_ms->push_back(
+        LinearInterpolate(timing.arrival_time_ms, get_audio_time_ms_,
+                          nominal_get_audio_time_ms) -
+        offset_send_time_ms);
+    arrival_delay_ms->push_back(timing.arrival_time_ms - offset_send_time_ms);
+
+    if (timing.decode_get_audio_count) {
+      // This packet was decoded.
+      RTC_DCHECK(timing.sync_delay_ms);
+      const float playout_ms = *timing.decode_get_audio_count * 10 +
+                               get_audio_time_ms_[0] + *timing.sync_delay_ms -
+                               offset_send_time_ms;
+      playout_delay_ms->push_back(playout_ms);
+      RTC_DCHECK(timing.target_delay_ms);
+      RTC_DCHECK(timing.current_delay_ms);
+      const float target =
+          playout_ms - *timing.current_delay_ms + *timing.target_delay_ms;
+      target_delay_ms->push_back(target);
+    } else {
+      // This packet was never decoded. Mark target and playout delays as empty.
+      playout_delay_ms->push_back(rtc::nullopt);
+      target_delay_ms->push_back(rtc::nullopt);
+    }
+  }
+  RTC_DCHECK(data_it == data_.end());
+  RTC_DCHECK_EQ(send_time_s->size(), corrected_arrival_delay_ms->size());
+  RTC_DCHECK_EQ(send_time_s->size(), playout_delay_ms->size());
+  RTC_DCHECK_EQ(send_time_s->size(), target_delay_ms->size());
+}
+
+void NetEqDelayAnalyzer::CreateMatlabScript(
+    const std::string& script_name) const {
+  std::vector<float> send_time_s;
+  std::vector<float> arrival_delay_ms;
+  std::vector<float> corrected_arrival_delay_ms;
+  std::vector<rtc::Optional<float>> playout_delay_ms;
+  std::vector<rtc::Optional<float>> target_delay_ms;
+  CreateGraphs(&send_time_s, &arrival_delay_ms, &corrected_arrival_delay_ms,
+               &playout_delay_ms, &target_delay_ms);
+
+  // Create an output file stream to Matlab script file.
+  std::ofstream output(script_name);
+  // The iterator is used to batch-output comma-separated values from vectors.
+  std::ostream_iterator<float> output_iterator(output, ",");
+
+  output << "send_time_s = [ ";
+  std::copy(send_time_s.begin(), send_time_s.end(), output_iterator);
+  output << "];" << std::endl;
+
+  output << "arrival_delay_ms = [ ";
+  std::copy(arrival_delay_ms.begin(), arrival_delay_ms.end(), output_iterator);
+  output << "];" << std::endl;
+
+  output << "corrected_arrival_delay_ms = [ ";
+  std::copy(corrected_arrival_delay_ms.begin(),
+            corrected_arrival_delay_ms.end(), output_iterator);
+  output << "];" << std::endl;
+
+  output << "playout_delay_ms = [ ";
+  for (const auto& v : playout_delay_ms) {
+    if (!v) {
+      output << "nan, ";
+    } else {
+      output << *v << ", ";
+    }
+  }
+  output << "];" << std::endl;
+
+  output << "target_delay_ms = [ ";
+  for (const auto& v : target_delay_ms) {
+    if (!v) {
+      output << "nan, ";
+    } else {
+      output << *v << ", ";
+    }
+  }
+  output << "];" << std::endl;
+
+  output << "h=plot(send_time_s, arrival_delay_ms, "
+         << "send_time_s, target_delay_ms, 'g.', "
+         << "send_time_s, playout_delay_ms);" << std::endl;
+  output << "set(h(1),'color',0.75*[1 1 1]);" << std::endl;
+  output << "set(h(2),'markersize',6);" << std::endl;
+  output << "set(h(3),'linew',1.5);" << std::endl;
+  output << "ax1=axis;" << std::endl;
+  output << "axis tight" << std::endl;
+  output << "ax2=axis;" << std::endl;
+  output << "axis([ax2(1:3) ax1(4)])" << std::endl;
+  output << "xlabel('send time [s]');" << std::endl;
+  output << "ylabel('relative delay [ms]');" << std::endl;
+  if (!ssrcs_.empty()) {
+    auto ssrc_it = ssrcs_.cbegin();
+    output << "title('SSRC: 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
+    while (ssrc_it != ssrcs_.end()) {
+      output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
+    }
+    output << std::dec;
+    auto pt_it = payload_types_.cbegin();
+    output << "; Payload Types: " << *pt_it++;
+    while (pt_it != payload_types_.end()) {
+      output << ", " << *pt_it++;
+    }
+    output << "');" << std::endl;
+  }
+}
+
+void NetEqDelayAnalyzer::CreatePythonScript(
+    const std::string& script_name) const {
+  std::vector<float> send_time_s;
+  std::vector<float> arrival_delay_ms;
+  std::vector<float> corrected_arrival_delay_ms;
+  std::vector<rtc::Optional<float>> playout_delay_ms;
+  std::vector<rtc::Optional<float>> target_delay_ms;
+  CreateGraphs(&send_time_s, &arrival_delay_ms, &corrected_arrival_delay_ms,
+               &playout_delay_ms, &target_delay_ms);
+
+  // Create an output file stream to the python script file.
+  std::ofstream output(script_name);
+  // The iterator is used to batch-output comma-separated values from vectors.
+  std::ostream_iterator<float> output_iterator(output, ",");
+
+  // Necessary includes
+  output << "import numpy as np" << std::endl;
+  output << "import matplotlib.pyplot as plt" << std::endl;
+
+  output << "send_time_s = [";
+  std::copy(send_time_s.begin(), send_time_s.end(), output_iterator);
+  output << "]" << std::endl;
+
+  output << "arrival_delay_ms = [";
+  std::copy(arrival_delay_ms.begin(), arrival_delay_ms.end(), output_iterator);
+  output << "]" << std::endl;
+
+  output << "corrected_arrival_delay_ms = [";
+  std::copy(corrected_arrival_delay_ms.begin(),
+            corrected_arrival_delay_ms.end(), output_iterator);
+  output << "]" << std::endl;
+
+  output << "playout_delay_ms = [";
+  for (const auto& v : playout_delay_ms) {
+    if (!v) {
+      output << "float('nan'), ";
+    } else {
+      output << *v << ", ";
+    }
+  }
+  output << "]" << std::endl;
+
+  output << "target_delay_ms = [";
+  for (const auto& v : target_delay_ms) {
+    if (!v) {
+      output << "float('nan'), ";
+    } else {
+      output << *v << ", ";
+    }
+  }
+  output << "]" << std::endl;
+
+  output << "if __name__ == '__main__':" << std::endl;
+  output << "  h=plt.plot(send_time_s, arrival_delay_ms, "
+         << "send_time_s, target_delay_ms, 'g.', "
+         << "send_time_s, playout_delay_ms)" << std::endl;
+  output << "  plt.setp(h[0],'color',[.75, .75, .75])" << std::endl;
+  output << "  plt.setp(h[1],'markersize',6)" << std::endl;
+  output << "  plt.setp(h[2],'linewidth',1.5)" << std::endl;
+  output << "  plt.axis('tight')" << std::endl;
+  output << "  plt.xlabel('send time [s]')" << std::endl;
+  output << "  plt.ylabel('relative delay [ms]')" << std::endl;
+  if (!ssrcs_.empty()) {
+    auto ssrc_it = ssrcs_.cbegin();
+    output << "  plt.title('SSRC: 0x" << std::hex
+           << static_cast<int64_t>(*ssrc_it++);
+    while (ssrc_it != ssrcs_.end()) {
+      output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
+    }
+    output << std::dec;
+    auto pt_it = payload_types_.cbegin();
+    output << "; Payload Types: " << *pt_it++;
+    while (pt_it != payload_types_.end()) {
+      output << ", " << *pt_it++;
+    }
+    output << "')" << std::endl;
+  }
+  output << "  plt.show()" << std::endl;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
new file mode 100644
index 0000000..e6d6913
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h
@@ -0,0 +1,76 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "api/optional.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+class NetEqDelayAnalyzer : public test::NetEqPostInsertPacket,
+                           public test::NetEqGetAudioCallback {
+ public:
+  void AfterInsertPacket(const test::NetEqInput::PacketData& packet,
+                         NetEq* neteq) override;
+
+  void BeforeGetAudio(NetEq* neteq) override;
+
+  void AfterGetAudio(int64_t time_now_ms,
+                     const AudioFrame& audio_frame,
+                     bool muted,
+                     NetEq* neteq) override;
+
+  void CreateGraphs(std::vector<float>* send_times_s,
+                    std::vector<float>* arrival_delay_ms,
+                    std::vector<float>* corrected_arrival_delay_ms,
+                    std::vector<rtc::Optional<float>>* playout_delay_ms,
+                    std::vector<rtc::Optional<float>>* target_delay_ms) const;
+
+  // Creates a matlab script with file name script_name. When executed in
+  // Matlab, the script will generate graphs with the same timing information
+  // as provided by CreateGraphs.
+  void CreateMatlabScript(const std::string& script_name) const;
+
+  // Creates a python script with file name |script_name|. When executed in
+  // Python, the script will generate graphs with the same timing information
+  // as provided by CreateGraphs.
+  void CreatePythonScript(const std::string& script_name) const;
+
+ private:
+  struct TimingData {
+    explicit TimingData(double at) : arrival_time_ms(at) {}
+    double arrival_time_ms;
+    rtc::Optional<int64_t> decode_get_audio_count;
+    rtc::Optional<int64_t> sync_delay_ms;
+    rtc::Optional<int> target_delay_ms;
+    rtc::Optional<int> current_delay_ms;
+  };
+  std::map<uint32_t, TimingData> data_;
+  std::vector<int64_t> get_audio_time_ms_;
+  size_t get_audio_count_ = 0;
+  size_t last_sync_buffer_ms_ = 0;
+  int last_sample_rate_hz_ = 0;
+  std::set<uint32_t> ssrcs_;
+  std::set<int> payload_types_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
new file mode 100644
index 0000000..68dde52
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "rtc_base/format_macros.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqExternalDecoderTest::NetEqExternalDecoderTest(NetEqDecoder codec,
+                                                   int sample_rate_hz,
+                                                   AudioDecoder* decoder)
+    : codec_(codec),
+      decoder_(decoder),
+      sample_rate_hz_(sample_rate_hz),
+      channels_(decoder_->Channels()) {
+  NetEq::Config config;
+  config.sample_rate_hz = sample_rate_hz_;
+  neteq_.reset(NetEq::Create(config, CreateBuiltinAudioDecoderFactory()));
+}
+
+void NetEqExternalDecoderTest::Init() {
+  ASSERT_EQ(NetEq::kOK,
+            neteq_->RegisterExternalDecoder(decoder_, codec_, name_,
+                                            kPayloadType));
+}
+
+void NetEqExternalDecoderTest::InsertPacket(
+    RTPHeader rtp_header,
+    rtc::ArrayView<const uint8_t> payload,
+    uint32_t receive_timestamp) {
+  ASSERT_EQ(NetEq::kOK,
+            neteq_->InsertPacket(rtp_header, payload, receive_timestamp));
+}
+
+void NetEqExternalDecoderTest::GetOutputAudio(AudioFrame* output) {
+  // Get audio from regular instance.
+  bool muted;
+  EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(output, &muted));
+  ASSERT_FALSE(muted);
+  EXPECT_EQ(channels_, output->num_channels_);
+  EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+            output->samples_per_channel_);
+  EXPECT_EQ(sample_rate_hz_, neteq_->last_output_sample_rate_hz());
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
new file mode 100644
index 0000000..aefa62e
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
+
+#include <memory>
+#include <string>
+
+#include "api/audio_codecs/audio_decoder.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+namespace test {
+// This test class provides a way run NetEQ with an external decoder.
+class NetEqExternalDecoderTest {
+ protected:
+  static const uint8_t kPayloadType = 95;
+  static const int kOutputLengthMs = 10;
+
+  // The external decoder |decoder| is suppose to be of type |codec|.
+  NetEqExternalDecoderTest(NetEqDecoder codec,
+                           int sample_rate_hz,
+                           AudioDecoder* decoder);
+
+  virtual ~NetEqExternalDecoderTest() { }
+
+  // In Init(), we register the external decoder.
+  void Init();
+
+  // Inserts a new packet with |rtp_header| and |payload| of
+  // |payload_size_bytes| bytes. The |receive_timestamp| is an indication
+  // of the time when the packet was received, and should be measured with
+  // the same tick rate as the RTP timestamp of the current payload.
+  virtual void InsertPacket(RTPHeader rtp_header,
+                            rtc::ArrayView<const uint8_t> payload,
+                            uint32_t receive_timestamp);
+
+  // Get 10 ms of audio data.
+  void GetOutputAudio(AudioFrame* output);
+
+  NetEq* neteq() { return neteq_.get(); }
+
+ private:
+  NetEqDecoder codec_;
+  std::string name_ = "dummy name";
+  AudioDecoder* decoder_;
+  int sample_rate_hz_;
+  size_t channels_;
+  std::unique_ptr<NetEq> neteq_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_input.cc b/modules/audio_coding/neteq/tools/neteq_input.cc
new file mode 100644
index 0000000..44513ab
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_input.cc
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+#include <sstream>
+
+namespace webrtc {
+namespace test {
+
+std::string NetEqInput::PacketData::ToString() const {
+  std::stringstream ss;
+  ss << "{"
+     << "time_ms: " << static_cast<int64_t>(time_ms) << ", "
+     << "header: {"
+     << "pt: " << static_cast<int>(header.payloadType) << ", "
+     << "sn: " << header.sequenceNumber << ", "
+     << "ts: " << header.timestamp << ", "
+     << "ssrc: " << header.ssrc << "}, "
+     << "payload bytes: " << payload.size() << "}";
+  return ss.str();
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_input.h b/modules/audio_coding/neteq/tools/neteq_input.h
new file mode 100644
index 0000000..88d9eb9
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_input.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "api/optional.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "rtc_base/buffer.h"
+
+namespace webrtc {
+namespace test {
+
+// Interface class for input to the NetEqTest class.
+class NetEqInput {
+ public:
+  struct PacketData {
+    std::string ToString() const;
+
+    RTPHeader header;
+    rtc::Buffer payload;
+    double time_ms;
+  };
+
+  virtual ~NetEqInput() = default;
+
+  // Returns at what time (in ms) NetEq::InsertPacket should be called next, or
+  // empty if the source is out of packets.
+  virtual rtc::Optional<int64_t> NextPacketTime() const = 0;
+
+  // Returns at what time (in ms) NetEq::GetAudio should be called next, or
+  // empty if no more output events are available.
+  virtual rtc::Optional<int64_t> NextOutputEventTime() const = 0;
+
+  // Returns the time (in ms) for the next event from either NextPacketTime()
+  // or NextOutputEventTime(), or empty if both are out of events.
+  rtc::Optional<int64_t> NextEventTime() const {
+    const auto a = NextPacketTime();
+    const auto b = NextOutputEventTime();
+    // Return the minimum of non-empty |a| and |b|, or empty if both are empty.
+    if (a) {
+      return b ? std::min(*a, *b) : a;
+    }
+    return b ? b : rtc::nullopt;
+  }
+
+  // Returns the next packet to be inserted into NetEq. The packet following the
+  // returned one is pre-fetched in the NetEqInput object, such that future
+  // calls to NextPacketTime() or NextHeader() will return information from that
+  // packet.
+  virtual std::unique_ptr<PacketData> PopPacket() = 0;
+
+  // Move to the next output event. This will make NextOutputEventTime() return
+  // a new value (potentially the same if several output events share the same
+  // time).
+  virtual void AdvanceOutputEvent() = 0;
+
+  // Returns true if the source has come to an end. An implementation must
+  // eventually return true from this method, or the test will end up in an
+  // infinite loop.
+  virtual bool ended() const = 0;
+
+  // Returns the RTP header for the next packet, i.e., the packet that will be
+  // delivered next by PopPacket().
+  virtual rtc::Optional<RTPHeader> NextHeader() const = 0;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc b/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc
new file mode 100644
index 0000000..0741d7c
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_packet_source_input.cc
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "modules/audio_coding/neteq/tools/rtc_event_log_source.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqPacketSourceInput::NetEqPacketSourceInput() : next_output_event_ms_(0) {}
+
+rtc::Optional<int64_t> NetEqPacketSourceInput::NextPacketTime() const {
+  return packet_
+             ? rtc::Optional<int64_t>(static_cast<int64_t>(packet_->time_ms()))
+             : rtc::nullopt;
+}
+
+rtc::Optional<RTPHeader> NetEqPacketSourceInput::NextHeader() const {
+  return packet_ ? rtc::Optional<RTPHeader>(packet_->header())
+                 : rtc::nullopt;
+}
+
+void NetEqPacketSourceInput::LoadNextPacket() {
+  packet_ = source()->NextPacket();
+}
+
+std::unique_ptr<NetEqInput::PacketData> NetEqPacketSourceInput::PopPacket() {
+  if (!packet_) {
+    return std::unique_ptr<PacketData>();
+  }
+  std::unique_ptr<PacketData> packet_data(new PacketData);
+  packet_data->header = packet_->header();
+  if (packet_->payload_length_bytes() == 0 &&
+      packet_->virtual_payload_length_bytes() > 0) {
+    // This is a header-only "dummy" packet. Set the payload to all zeros, with
+    // length according to the virtual length.
+    packet_data->payload.SetSize(packet_->virtual_payload_length_bytes());
+    std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
+  } else {
+    packet_data->payload.SetData(packet_->payload(),
+                                 packet_->payload_length_bytes());
+  }
+  packet_data->time_ms = packet_->time_ms();
+
+  LoadNextPacket();
+
+  return packet_data;
+}
+
+NetEqRtpDumpInput::NetEqRtpDumpInput(const std::string& file_name,
+                                     const RtpHeaderExtensionMap& hdr_ext_map)
+    : source_(RtpFileSource::Create(file_name)) {
+  for (const auto& ext_pair : hdr_ext_map) {
+    source_->RegisterRtpHeaderExtension(ext_pair.second, ext_pair.first);
+  }
+  LoadNextPacket();
+}
+
+rtc::Optional<int64_t> NetEqRtpDumpInput::NextOutputEventTime() const {
+  return next_output_event_ms_;
+}
+
+void NetEqRtpDumpInput::AdvanceOutputEvent() {
+  if (next_output_event_ms_) {
+    *next_output_event_ms_ += kOutputPeriodMs;
+  }
+  if (!NextPacketTime()) {
+    next_output_event_ms_ = rtc::nullopt;
+  }
+}
+
+PacketSource* NetEqRtpDumpInput::source() {
+  return source_.get();
+}
+
+NetEqEventLogInput::NetEqEventLogInput(const std::string& file_name,
+                                       const RtpHeaderExtensionMap& hdr_ext_map)
+    : source_(RtcEventLogSource::Create(file_name)) {
+  for (const auto& ext_pair : hdr_ext_map) {
+    source_->RegisterRtpHeaderExtension(ext_pair.second, ext_pair.first);
+  }
+  LoadNextPacket();
+  AdvanceOutputEvent();
+}
+
+rtc::Optional<int64_t> NetEqEventLogInput::NextOutputEventTime() const {
+  return next_output_event_ms_;
+}
+
+void NetEqEventLogInput::AdvanceOutputEvent() {
+  next_output_event_ms_ = source_->NextAudioOutputEventMs();
+  if (*next_output_event_ms_ == std::numeric_limits<int64_t>::max()) {
+    next_output_event_ms_ = rtc::nullopt;
+  }
+}
+
+PacketSource* NetEqEventLogInput::source() {
+  return source_.get();
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_packet_source_input.h b/modules/audio_coding/neteq/tools/neteq_packet_source_input.h
new file mode 100644
index 0000000..b482556
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_packet_source_input.h
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
+
+#include <map>
+#include <string>
+
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+
+namespace webrtc {
+namespace test {
+
+class RtpFileSource;
+class RtcEventLogSource;
+
+// An adapter class to dress up a PacketSource object as a NetEqInput.
+class NetEqPacketSourceInput : public NetEqInput {
+ public:
+  using RtpHeaderExtensionMap = std::map<int, webrtc::RTPExtensionType>;
+
+  NetEqPacketSourceInput();
+  rtc::Optional<int64_t> NextPacketTime() const override;
+  std::unique_ptr<PacketData> PopPacket() override;
+  rtc::Optional<RTPHeader> NextHeader() const override;
+  bool ended() const override { return !next_output_event_ms_; }
+
+ protected:
+  virtual PacketSource* source() = 0;
+  void LoadNextPacket();
+
+  rtc::Optional<int64_t> next_output_event_ms_;
+
+ private:
+  std::unique_ptr<Packet> packet_;
+};
+
+// Implementation of NetEqPacketSourceInput to be used with an RtpFileSource.
+class NetEqRtpDumpInput final : public NetEqPacketSourceInput {
+ public:
+  NetEqRtpDumpInput(const std::string& file_name,
+                    const RtpHeaderExtensionMap& hdr_ext_map);
+
+  rtc::Optional<int64_t> NextOutputEventTime() const override;
+  void AdvanceOutputEvent() override;
+
+ protected:
+  PacketSource* source() override;
+
+ private:
+  static constexpr int64_t kOutputPeriodMs = 10;
+
+  std::unique_ptr<RtpFileSource> source_;
+};
+
+// Implementation of NetEqPacketSourceInput to be used with an
+// RtcEventLogSource.
+class NetEqEventLogInput final : public NetEqPacketSourceInput {
+ public:
+  NetEqEventLogInput(const std::string& file_name,
+                     const RtpHeaderExtensionMap& hdr_ext_map);
+
+  rtc::Optional<int64_t> NextOutputEventTime() const override;
+  void AdvanceOutputEvent() override;
+
+ protected:
+  PacketSource* source() override;
+
+ private:
+  std::unique_ptr<RtcEventLogSource> source_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/modules/audio_coding/neteq/tools/neteq_performance_test.cc
new file mode 100644
index 0000000..27ecdf4
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -0,0 +1,133 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_performance_test.h"
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/tools/audio_loop.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/clock.h"
+#include "test/testsupport/fileutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+using webrtc::NetEq;
+using webrtc::test::AudioLoop;
+using webrtc::test::RtpGenerator;
+
+namespace webrtc {
+namespace test {
+
+int64_t NetEqPerformanceTest::Run(int runtime_ms,
+                                  int lossrate,
+                                  double drift_factor) {
+  const std::string kInputFileName =
+      webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+  const int kSampRateHz = 32000;
+  const webrtc::NetEqDecoder kDecoderType =
+      webrtc::NetEqDecoder::kDecoderPCM16Bswb32kHz;
+  const std::string kDecoderName = "pcm16-swb32";
+  const int kPayloadType = 95;
+
+  // Initialize NetEq instance.
+  NetEq::Config config;
+  config.sample_rate_hz = kSampRateHz;
+  NetEq* neteq = NetEq::Create(config, CreateBuiltinAudioDecoderFactory());
+  // Register decoder in |neteq|.
+  if (neteq->RegisterPayloadType(kDecoderType, kDecoderName, kPayloadType) != 0)
+    return -1;
+
+  // Set up AudioLoop object.
+  AudioLoop audio_loop;
+  const size_t kMaxLoopLengthSamples = kSampRateHz * 10;  // 10 second loop.
+  const size_t kInputBlockSizeSamples = 60 * kSampRateHz / 1000;  // 60 ms.
+  if (!audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
+                       kInputBlockSizeSamples))
+    return -1;
+
+  int32_t time_now_ms = 0;
+
+  // Get first input packet.
+  RTPHeader rtp_header;
+  RtpGenerator rtp_gen(kSampRateHz / 1000);
+  // Start with positive drift first half of simulation.
+  rtp_gen.set_drift_factor(drift_factor);
+  bool drift_flipped = false;
+  int32_t packet_input_time_ms =
+      rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
+  auto input_samples = audio_loop.GetNextBlock();
+  if (input_samples.empty())
+    exit(1);
+  uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
+  size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+                                           input_samples.size(), input_payload);
+  RTC_CHECK_EQ(sizeof(input_payload), payload_len);
+
+  // Main loop.
+  webrtc::Clock* clock = webrtc::Clock::GetRealTimeClock();
+  int64_t start_time_ms = clock->TimeInMilliseconds();
+  AudioFrame out_frame;
+  while (time_now_ms < runtime_ms) {
+    while (packet_input_time_ms <= time_now_ms) {
+      // Drop every N packets, where N = FLAG_lossrate.
+      bool lost = false;
+      if (lossrate > 0) {
+        lost = ((rtp_header.sequenceNumber - 1) % lossrate) == 0;
+      }
+      if (!lost) {
+        // Insert packet.
+        int error =
+            neteq->InsertPacket(rtp_header, input_payload,
+                                packet_input_time_ms * kSampRateHz / 1000);
+        if (error != NetEq::kOK)
+          return -1;
+      }
+
+      // Get next packet.
+      packet_input_time_ms = rtp_gen.GetRtpHeader(kPayloadType,
+                                                  kInputBlockSizeSamples,
+                                                  &rtp_header);
+      input_samples = audio_loop.GetNextBlock();
+      if (input_samples.empty())
+        return -1;
+      payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+                                        input_samples.size(), input_payload);
+      assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
+    }
+
+    // Get output audio, but don't do anything with it.
+    bool muted;
+    int error = neteq->GetAudio(&out_frame, &muted);
+    RTC_CHECK(!muted);
+    if (error != NetEq::kOK)
+      return -1;
+
+    assert(out_frame.samples_per_channel_ ==
+           static_cast<size_t>(kSampRateHz * 10 / 1000));
+
+    static const int kOutputBlockSizeMs = 10;
+    time_now_ms += kOutputBlockSizeMs;
+    if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
+      // Apply negative drift second half of simulation.
+      rtp_gen.set_drift_factor(-drift_factor);
+      drift_flipped = true;
+    }
+  }
+  int64_t end_time_ms = clock->TimeInMilliseconds();
+  delete neteq;
+  return end_time_ms - start_time_ms;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_performance_test.h b/modules/audio_coding/neteq/tools/neteq_performance_test.h
new file mode 100644
index 0000000..dcf0314
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_performance_test.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+class NetEqPerformanceTest {
+ public:
+  // Runs a performance test with parameters as follows:
+  //   |runtime_ms|: the simulation time, i.e., the duration of the audio data.
+  //   |lossrate|: drop one out of |lossrate| packets, e.g., one out of 10.
+  //   |drift_factor|: clock drift in [0, 1].
+  // Returns the runtime in ms.
+  static int64_t Run(int runtime_ms, int lossrate, double drift_factor);
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
new file mode 100644
index 0000000..82fa90e
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -0,0 +1,432 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdio.h>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+#include "modules/audio_coding/neteq/tools/neteq_quality_test.h"
+#include "modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_wav_file.h"
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "rtc_base/checks.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+const uint8_t kPayloadType = 95;
+const int kOutputSizeMs = 10;
+const int kInitSeed = 0x12345678;
+const int kPacketLossTimeUnitMs = 10;
+
+const std::string& DefaultInFilename() {
+  static const std::string path =
+      ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+  return path;
+}
+
+const std::string& DefaultOutFilename() {
+  static const std::string path = OutputPath() + "neteq_quality_test_out.pcm";
+  return path;
+}
+
+// Common validator for file names.
+static bool ValidateFilename(const std::string& value, bool write) {
+  FILE* fid = write ? fopen(value.c_str(), "wb") : fopen(value.c_str(), "rb");
+  if (fid == nullptr)
+    return false;
+  fclose(fid);
+  return true;
+}
+
+DEFINE_string(in_filename, DefaultInFilename().c_str(),
+    "Filename for input audio (specify sample rate with --input_sample_rate, "
+    "and channels with --channels).");
+
+DEFINE_int(input_sample_rate, 16000, "Sample rate of input file in Hz.");
+
+DEFINE_int(channels, 1, "Number of channels in input audio.");
+
+DEFINE_string(out_filename, DefaultOutFilename().c_str(),
+    "Name of output audio file.");
+
+DEFINE_int(runtime_ms, 10000, "Simulated runtime (milliseconds).");
+
+DEFINE_int(packet_loss_rate, 10, "Percentile of packet loss.");
+
+DEFINE_int(random_loss_mode,
+           kUniformLoss,
+           "Random loss mode: 0--no loss, 1--uniform loss, 2--Gilbert Elliot "
+           "loss, 3--fixed loss.");
+
+DEFINE_int(burst_length, 30,
+    "Burst length in milliseconds, only valid for Gilbert Elliot loss.");
+
+DEFINE_float(drift_factor, 0.0, "Time drift factor.");
+
+DEFINE_int(preload_packets, 0, "Preload the buffer with this many packets.");
+
+DEFINE_string(loss_events,
+              "",
+              "List of loss events time and duration separated by comma: "
+              "<first_event_time> <first_event_duration>, <second_event_time> "
+              "<second_event_duration>, ...");
+
+// ProbTrans00Solver() is to calculate the transition probability from no-loss
+// state to itself in a modified Gilbert Elliot packet loss model. The result is
+// to achieve the target packet loss rate |loss_rate|, when a packet is not
+// lost only if all |units| drawings within the duration of the packet result in
+// no-loss.
+static double ProbTrans00Solver(int units, double loss_rate,
+                                double prob_trans_10) {
+  if (units == 1)
+    return prob_trans_10 / (1.0f - loss_rate) - prob_trans_10;
+// 0 == prob_trans_00 ^ (units - 1) + (1 - loss_rate) / prob_trans_10 *
+//     prob_trans_00 - (1 - loss_rate) * (1 + 1 / prob_trans_10).
+// There is a unique solution between 0.0 and 1.0, due to the monotonicity and
+// an opposite sign at 0.0 and 1.0.
+// For simplicity, we reformulate the equation as
+//     f(x) = x ^ (units - 1) + a x + b.
+// Its derivative is
+//     f'(x) = (units - 1) x ^ (units - 2) + a.
+// The derivative is strictly greater than 0 when x is between 0 and 1.
+// We use Newton's method to solve the equation, iteration is
+//     x(k+1) = x(k) - f(x) / f'(x);
+  const double kPrecision = 0.001f;
+  const int kIterations = 100;
+  const double a = (1.0f - loss_rate) / prob_trans_10;
+  const double b = (loss_rate - 1.0f) * (1.0f + 1.0f / prob_trans_10);
+  double x = 0.0f;  // Starting point;
+  double f = b;
+  double f_p;
+  int iter = 0;
+  while ((f >= kPrecision || f <= -kPrecision) && iter < kIterations) {
+    f_p = (units - 1.0f) * pow(x, units - 2) + a;
+    x -= f / f_p;
+    if (x > 1.0f) {
+      x = 1.0f;
+    } else if (x < 0.0f) {
+      x = 0.0f;
+    }
+    f = pow(x, units - 1) + a * x + b;
+    iter ++;
+  }
+  return x;
+}
+
+NetEqQualityTest::NetEqQualityTest(int block_duration_ms,
+                                   int in_sampling_khz,
+                                   int out_sampling_khz,
+                                   NetEqDecoder decoder_type)
+    : decoder_type_(decoder_type),
+      channels_(static_cast<size_t>(FLAG_channels)),
+      decoded_time_ms_(0),
+      decodable_time_ms_(0),
+      drift_factor_(FLAG_drift_factor),
+      packet_loss_rate_(FLAG_packet_loss_rate),
+      block_duration_ms_(block_duration_ms),
+      in_sampling_khz_(in_sampling_khz),
+      out_sampling_khz_(out_sampling_khz),
+      in_size_samples_(
+          static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
+      payload_size_bytes_(0),
+      max_payload_bytes_(0),
+      in_file_(new ResampleInputAudioFile(FLAG_in_filename,
+                                          FLAG_input_sample_rate,
+                                          in_sampling_khz * 1000)),
+      rtp_generator_(
+          new RtpGenerator(in_sampling_khz_, 0, 0, decodable_time_ms_)),
+      total_payload_size_bytes_(0) {
+  // Flag validation
+  RTC_CHECK(ValidateFilename(FLAG_in_filename, false))
+      << "Invalid input filename.";
+
+  RTC_CHECK(FLAG_input_sample_rate == 8000 || FLAG_input_sample_rate == 16000 ||
+            FLAG_input_sample_rate == 32000 || FLAG_input_sample_rate == 48000)
+      << "Invalid sample rate should be 8000, 16000, 32000 or 48000 Hz.";
+
+  RTC_CHECK_EQ(FLAG_channels, 1)
+      << "Invalid number of channels, current support only 1.";
+
+  RTC_CHECK(ValidateFilename(FLAG_out_filename, true))
+      << "Invalid output filename.";
+
+  RTC_CHECK_GT(FLAG_runtime_ms, 0)
+      << "Invalid runtime, should be greater than 0.";
+
+  RTC_CHECK(FLAG_packet_loss_rate >= 0 && FLAG_packet_loss_rate <= 100)
+      << "Invalid packet loss percentile, should be between 0 and 100.";
+
+  RTC_CHECK(FLAG_random_loss_mode >= 0 && FLAG_random_loss_mode < kLastLossMode)
+      << "Invalid random packet loss mode, should be between 0 and "
+      << kLastLossMode - 1 << ".";
+
+  RTC_CHECK_GE(FLAG_burst_length, kPacketLossTimeUnitMs)
+      << "Invalid burst length, should be greater than or equal to "
+      << kPacketLossTimeUnitMs << " ms.";
+
+  RTC_CHECK_GT(FLAG_drift_factor, -0.1)
+      << "Invalid drift factor, should be greater than -0.1.";
+
+  RTC_CHECK_GE(FLAG_preload_packets, 0)
+      << "Invalid number of packets to preload; must be non-negative.";
+
+  const std::string out_filename = FLAG_out_filename;
+  const std::string log_filename = out_filename + ".log";
+  log_file_.open(log_filename.c_str(), std::ofstream::out);
+  RTC_CHECK(log_file_.is_open());
+
+  if (out_filename.size() >= 4 &&
+      out_filename.substr(out_filename.size() - 4) == ".wav") {
+    // Open a wav file.
+    output_.reset(
+        new webrtc::test::OutputWavFile(out_filename, 1000 * out_sampling_khz));
+  } else {
+    // Open a pcm file.
+    output_.reset(new webrtc::test::OutputAudioFile(out_filename));
+  }
+
+  NetEq::Config config;
+  config.sample_rate_hz = out_sampling_khz_ * 1000;
+  neteq_.reset(
+      NetEq::Create(config, webrtc::CreateBuiltinAudioDecoderFactory()));
+  max_payload_bytes_ = in_size_samples_ * channels_ * sizeof(int16_t);
+  in_data_.reset(new int16_t[in_size_samples_ * channels_]);
+}
+
+NetEqQualityTest::~NetEqQualityTest() {
+  log_file_.close();
+}
+
+bool NoLoss::Lost(int now_ms) {
+  return false;
+}
+
+UniformLoss::UniformLoss(double loss_rate)
+    : loss_rate_(loss_rate) {
+}
+
+bool UniformLoss::Lost(int now_ms) {
+  int drop_this = rand();
+  return (drop_this < loss_rate_ * RAND_MAX);
+}
+
+GilbertElliotLoss::GilbertElliotLoss(double prob_trans_11, double prob_trans_01)
+    : prob_trans_11_(prob_trans_11),
+      prob_trans_01_(prob_trans_01),
+      lost_last_(false),
+      uniform_loss_model_(new UniformLoss(0)) {
+}
+
+GilbertElliotLoss::~GilbertElliotLoss() {}
+
+bool GilbertElliotLoss::Lost(int now_ms) {
+  // Simulate bursty channel (Gilbert model).
+  // (1st order) Markov chain model with memory of the previous/last
+  // packet state (lost or received).
+  if (lost_last_) {
+    // Previous packet was not received.
+    uniform_loss_model_->set_loss_rate(prob_trans_11_);
+    return lost_last_ = uniform_loss_model_->Lost(now_ms);
+  } else {
+    uniform_loss_model_->set_loss_rate(prob_trans_01_);
+    return lost_last_ = uniform_loss_model_->Lost(now_ms);
+  }
+}
+
+FixedLossModel::FixedLossModel(
+    std::set<FixedLossEvent, FixedLossEventCmp> loss_events)
+    : loss_events_(loss_events) {
+  loss_events_it_ = loss_events_.begin();
+}
+
+FixedLossModel::~FixedLossModel() {}
+
+bool FixedLossModel::Lost(int now_ms) {
+  if (loss_events_it_ != loss_events_.end() &&
+      now_ms > loss_events_it_->start_ms) {
+    if (now_ms <= loss_events_it_->start_ms + loss_events_it_->duration_ms) {
+      return true;
+    } else {
+      ++loss_events_it_;
+      return false;
+    }
+  }
+  return false;
+}
+
+void NetEqQualityTest::SetUp() {
+  ASSERT_EQ(0,
+            neteq_->RegisterPayloadType(decoder_type_, "noname", kPayloadType));
+  rtp_generator_->set_drift_factor(drift_factor_);
+
+  int units = block_duration_ms_ / kPacketLossTimeUnitMs;
+  switch (FLAG_random_loss_mode) {
+    case kUniformLoss: {
+      // |unit_loss_rate| is the packet loss rate for each unit time interval
+      // (kPacketLossTimeUnitMs). Since a packet loss event is generated if any
+      // of |block_duration_ms_ / kPacketLossTimeUnitMs| unit time intervals of
+      // a full packet duration is drawn with a loss, |unit_loss_rate| fulfills
+      // (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
+      // 1 - packet_loss_rate.
+      double unit_loss_rate = (1.0f - pow(1.0f - 0.01f * packet_loss_rate_,
+          1.0f / units));
+      loss_model_.reset(new UniformLoss(unit_loss_rate));
+      break;
+    }
+    case kGilbertElliotLoss: {
+      // |FLAG_burst_length| should be integer times of kPacketLossTimeUnitMs.
+      ASSERT_EQ(0, FLAG_burst_length % kPacketLossTimeUnitMs);
+
+      // We do not allow 100 percent packet loss in Gilbert Elliot model, which
+      // makes no sense.
+      ASSERT_GT(100, packet_loss_rate_);
+
+      // To guarantee the overall packet loss rate, transition probabilities
+      // need to satisfy:
+      // pi_0 * (1 - prob_trans_01_) ^ units +
+      //     pi_1 * prob_trans_10_ ^ (units - 1) == 1 - loss_rate
+      // pi_0 = prob_trans_10 / (prob_trans_10 + prob_trans_01_)
+      //     is the stationary state probability of no-loss
+      // pi_1 = prob_trans_01_ / (prob_trans_10 + prob_trans_01_)
+      //     is the stationary state probability of loss
+      // After a derivation prob_trans_00 should satisfy:
+      // prob_trans_00 ^ (units - 1) = (loss_rate - 1) / prob_trans_10 *
+      //     prob_trans_00 + (1 - loss_rate) * (1 + 1 / prob_trans_10).
+      double loss_rate = 0.01f * packet_loss_rate_;
+      double prob_trans_10 = 1.0f * kPacketLossTimeUnitMs / FLAG_burst_length;
+      double prob_trans_00 = ProbTrans00Solver(units, loss_rate, prob_trans_10);
+      loss_model_.reset(new GilbertElliotLoss(1.0f - prob_trans_10,
+                                              1.0f - prob_trans_00));
+      break;
+    }
+    case kFixedLoss: {
+      std::istringstream loss_events_stream(FLAG_loss_events);
+      std::string loss_event_string;
+      std::set<FixedLossEvent, FixedLossEventCmp> loss_events;
+      while (std::getline(loss_events_stream, loss_event_string, ',')) {
+        std::vector<int> loss_event_params;
+        std::istringstream loss_event_params_stream(loss_event_string);
+        std::copy(std::istream_iterator<int>(loss_event_params_stream),
+                  std::istream_iterator<int>(),
+                  std::back_inserter(loss_event_params));
+        RTC_CHECK_EQ(loss_event_params.size(), 2);
+        auto result = loss_events.insert(
+            FixedLossEvent(loss_event_params[0], loss_event_params[1]));
+        RTC_CHECK(result.second);
+      }
+      RTC_CHECK_GT(loss_events.size(), 0);
+      loss_model_.reset(new FixedLossModel(loss_events));
+      break;
+    }
+    default: {
+      loss_model_.reset(new NoLoss);
+      break;
+    }
+  }
+
+  // Make sure that the packet loss profile is same for all derived tests.
+  srand(kInitSeed);
+}
+
+std::ofstream& NetEqQualityTest::Log() {
+  return log_file_;
+}
+
+bool NetEqQualityTest::PacketLost() {
+  int cycles = block_duration_ms_ / kPacketLossTimeUnitMs;
+
+  // The loop is to make sure that codecs with different block lengths share the
+  // same packet loss profile.
+  bool lost = false;
+  for (int idx = 0; idx < cycles; idx ++) {
+    if (loss_model_->Lost(decoded_time_ms_)) {
+      // The packet will be lost if any of the drawings indicates a loss, but
+      // the loop has to go on to make sure that codecs with different block
+      // lengths keep the same pace.
+      lost = true;
+    }
+  }
+  return lost;
+}
+
+int NetEqQualityTest::Transmit() {
+  int packet_input_time_ms =
+      rtp_generator_->GetRtpHeader(kPayloadType, in_size_samples_,
+                                   &rtp_header_);
+  Log() << "Packet of size "
+        << payload_size_bytes_
+        << " bytes, for frame at "
+        << packet_input_time_ms
+        << " ms ";
+  if (payload_size_bytes_ > 0) {
+    if (!PacketLost()) {
+      int ret = neteq_->InsertPacket(
+          rtp_header_,
+          rtc::ArrayView<const uint8_t>(payload_.data(), payload_size_bytes_),
+          packet_input_time_ms * in_sampling_khz_);
+      if (ret != NetEq::kOK)
+        return -1;
+      Log() << "was sent.";
+    } else {
+      Log() << "was lost.";
+    }
+  }
+  Log() << std::endl;
+  return packet_input_time_ms;
+}
+
+int NetEqQualityTest::DecodeBlock() {
+  bool muted;
+  int ret = neteq_->GetAudio(&out_frame_, &muted);
+  RTC_CHECK(!muted);
+
+  if (ret != NetEq::kOK) {
+    return -1;
+  } else {
+    RTC_DCHECK_EQ(out_frame_.num_channels_, channels_);
+    RTC_DCHECK_EQ(out_frame_.samples_per_channel_,
+                  static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
+    RTC_CHECK(output_->WriteArray(
+        out_frame_.data(),
+        out_frame_.samples_per_channel_ * out_frame_.num_channels_));
+    return static_cast<int>(out_frame_.samples_per_channel_);
+  }
+}
+
+void NetEqQualityTest::Simulate() {
+  int audio_size_samples;
+
+  while (decoded_time_ms_ < FLAG_runtime_ms) {
+    // Preload the buffer if needed.
+    while (decodable_time_ms_ - FLAG_preload_packets * block_duration_ms_ <
+           decoded_time_ms_) {
+      ASSERT_TRUE(in_file_->Read(in_size_samples_ * channels_, &in_data_[0]));
+      payload_.Clear();
+      payload_size_bytes_ = EncodeBlock(&in_data_[0],
+                                        in_size_samples_, &payload_,
+                                        max_payload_bytes_);
+      total_payload_size_bytes_ += payload_size_bytes_;
+      decodable_time_ms_ = Transmit() + block_duration_ms_;
+    }
+    audio_size_samples = DecodeBlock();
+    if (audio_size_samples > 0) {
+      decoded_time_ms_ += audio_size_samples / out_sampling_khz_;
+    }
+  }
+  Log() << "Average bit rate was "
+        << 8.0f * total_payload_size_bytes_ / FLAG_runtime_ms
+        << " kbps"
+        << std::endl;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.h b/modules/audio_coding/neteq/tools/neteq_quality_test.h
new file mode 100644
index 0000000..531a080
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -0,0 +1,173 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
+
+#include <fstream>
+#include <memory>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/flags.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+enum LossModes {
+  kNoLoss,
+  kUniformLoss,
+  kGilbertElliotLoss,
+  kFixedLoss,
+  kLastLossMode
+};
+
+class LossModel {
+ public:
+  virtual ~LossModel() {};
+  virtual bool Lost(int now_ms) = 0;
+};
+
+class NoLoss : public LossModel {
+ public:
+  bool Lost(int now_ms) override;
+};
+
+class UniformLoss : public LossModel {
+ public:
+  UniformLoss(double loss_rate);
+  bool Lost(int now_ms) override;
+  void set_loss_rate(double loss_rate) { loss_rate_ = loss_rate; }
+
+ private:
+  double loss_rate_;
+};
+
+class GilbertElliotLoss : public LossModel {
+ public:
+  GilbertElliotLoss(double prob_trans_11, double prob_trans_01);
+  ~GilbertElliotLoss() override;
+  bool Lost(int now_ms) override;
+
+ private:
+  // Prob. of losing current packet, when previous packet is lost.
+  double prob_trans_11_;
+  // Prob. of losing current packet, when previous packet is not lost.
+  double prob_trans_01_;
+  bool lost_last_;
+  std::unique_ptr<UniformLoss> uniform_loss_model_;
+};
+
+struct FixedLossEvent {
+  int start_ms;
+  int duration_ms;
+  FixedLossEvent(int start_ms, int duration_ms)
+      : start_ms(start_ms), duration_ms(duration_ms) {}
+};
+
+struct FixedLossEventCmp {
+  bool operator()(const FixedLossEvent& l_event,
+                  const FixedLossEvent& r_event) const {
+    return l_event.start_ms < r_event.start_ms;
+  }
+};
+
+class FixedLossModel : public LossModel {
+ public:
+  FixedLossModel(std::set<FixedLossEvent, FixedLossEventCmp> loss_events);
+  ~FixedLossModel() override;
+  bool Lost(int now_ms) override;
+
+ private:
+  std::set<FixedLossEvent, FixedLossEventCmp> loss_events_;
+  std::set<FixedLossEvent, FixedLossEventCmp>::iterator loss_events_it_;
+};
+
+class NetEqQualityTest : public ::testing::Test {
+ protected:
+  NetEqQualityTest(int block_duration_ms,
+                   int in_sampling_khz,
+                   int out_sampling_khz,
+                   NetEqDecoder decoder_type);
+  ~NetEqQualityTest() override;
+
+  void SetUp() override;
+
+  // EncodeBlock(...) does the following:
+  // 1. encodes a block of audio, saved in |in_data| and has a length of
+  // |block_size_samples| (samples per channel),
+  // 2. save the bit stream to |payload| of |max_bytes| bytes in size,
+  // 3. returns the length of the payload (in bytes),
+  virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+                          rtc::Buffer* payload, size_t max_bytes) = 0;
+
+  // PacketLost(...) determines weather a packet sent at an indicated time gets
+  // lost or not.
+  bool PacketLost();
+
+  // DecodeBlock() decodes a block of audio using the payload stored in
+  // |payload_| with the length of |payload_size_bytes_| (bytes). The decoded
+  // audio is to be stored in |out_data_|.
+  int DecodeBlock();
+
+  // Transmit() uses |rtp_generator_| to generate a packet and passes it to
+  // |neteq_|.
+  int Transmit();
+
+  // Runs encoding / transmitting / decoding.
+  void Simulate();
+
+  // Write to log file. Usage Log() << ...
+  std::ofstream& Log();
+
+  NetEqDecoder decoder_type_;
+  const size_t channels_;
+
+ private:
+  int decoded_time_ms_;
+  int decodable_time_ms_;
+  double drift_factor_;
+  int packet_loss_rate_;
+  const int block_duration_ms_;
+  const int in_sampling_khz_;
+  const int out_sampling_khz_;
+
+  // Number of samples per channel in a frame.
+  const size_t in_size_samples_;
+
+  size_t payload_size_bytes_;
+  size_t max_payload_bytes_;
+
+  std::unique_ptr<InputAudioFile> in_file_;
+  std::unique_ptr<AudioSink> output_;
+  std::ofstream log_file_;
+
+  std::unique_ptr<RtpGenerator> rtp_generator_;
+  std::unique_ptr<NetEq> neteq_;
+  std::unique_ptr<LossModel> loss_model_;
+
+  std::unique_ptr<int16_t[]> in_data_;
+  rtc::Buffer payload_;
+  AudioFrame out_frame_;
+  RTPHeader rtp_header_;
+
+  size_t total_payload_size_bytes_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_replacement_input.cc b/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
new file mode 100644
index 0000000..6c846c0
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_replacement_input.cc
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
+
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+NetEqReplacementInput::NetEqReplacementInput(
+    std::unique_ptr<NetEqInput> source,
+    uint8_t replacement_payload_type,
+    const std::set<uint8_t>& comfort_noise_types,
+    const std::set<uint8_t>& forbidden_types)
+    : source_(std::move(source)),
+      replacement_payload_type_(replacement_payload_type),
+      comfort_noise_types_(comfort_noise_types),
+      forbidden_types_(forbidden_types) {
+  RTC_CHECK(source_);
+  packet_ = source_->PopPacket();
+  ReplacePacket();
+  RTC_CHECK(packet_);
+}
+
+rtc::Optional<int64_t> NetEqReplacementInput::NextPacketTime() const {
+  return packet_
+             ? rtc::Optional<int64_t>(static_cast<int64_t>(packet_->time_ms))
+             : rtc::nullopt;
+}
+
+rtc::Optional<int64_t> NetEqReplacementInput::NextOutputEventTime() const {
+  return source_->NextOutputEventTime();
+}
+
+std::unique_ptr<NetEqInput::PacketData> NetEqReplacementInput::PopPacket() {
+  std::unique_ptr<PacketData> to_return = std::move(packet_);
+  packet_ = source_->PopPacket();
+  ReplacePacket();
+  return to_return;
+}
+
+void NetEqReplacementInput::AdvanceOutputEvent() {
+  source_->AdvanceOutputEvent();
+}
+
+bool NetEqReplacementInput::ended() const {
+  return source_->ended();
+}
+
+rtc::Optional<RTPHeader> NetEqReplacementInput::NextHeader() const {
+  return source_->NextHeader();
+}
+
+void NetEqReplacementInput::ReplacePacket() {
+  if (!source_->NextPacketTime()) {
+    // End of input. Cannot do proper replacement on the very last packet, so we
+    // delete it instead.
+    packet_.reset();
+    return;
+  }
+
+  RTC_DCHECK(packet_);
+
+  RTC_CHECK_EQ(forbidden_types_.count(packet_->header.payloadType), 0)
+      << "Payload type " << static_cast<int>(packet_->header.payloadType)
+      << " is forbidden.";
+
+  // Check if this packet is comfort noise.
+  if (comfort_noise_types_.count(packet_->header.payloadType) != 0) {
+    // If CNG, simply insert a zero-energy one-byte payload.
+    uint8_t cng_payload[1] = {127};  // Max attenuation of CNG.
+    packet_->payload.SetData(cng_payload);
+    return;
+  }
+
+  rtc::Optional<RTPHeader> next_hdr = source_->NextHeader();
+  RTC_DCHECK(next_hdr);
+  uint8_t payload[12];
+  RTC_DCHECK_LE(last_frame_size_timestamps_, 120 * 48);
+  uint32_t input_frame_size_timestamps = last_frame_size_timestamps_;
+  const uint32_t timestamp_diff =
+      next_hdr->timestamp - packet_->header.timestamp;
+  if (next_hdr->sequenceNumber == packet_->header.sequenceNumber + 1 &&
+      timestamp_diff <= 120 * 48) {
+    // Packets are in order and the timestamp diff is less than 5760 samples.
+    // Accept the timestamp diff as a valid frame size.
+    input_frame_size_timestamps = timestamp_diff;
+    last_frame_size_timestamps_ = input_frame_size_timestamps;
+  }
+  RTC_DCHECK_LE(input_frame_size_timestamps, 120 * 48);
+  FakeDecodeFromFile::PrepareEncoded(packet_->header.timestamp,
+                                     input_frame_size_timestamps,
+                                     packet_->payload.size(), payload);
+  packet_->payload.SetData(payload);
+  packet_->header.payloadType = replacement_payload_type_;
+  return;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_replacement_input.h b/modules/audio_coding/neteq/tools/neteq_replacement_input.h
new file mode 100644
index 0000000..3a89399
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_replacement_input.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
+
+#include <memory>
+#include <set>
+
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+namespace webrtc {
+namespace test {
+
+// This class converts the packets from a NetEqInput to fake encodings to be
+// decoded by a FakeDecodeFromFile decoder.
+class NetEqReplacementInput : public NetEqInput {
+ public:
+  NetEqReplacementInput(std::unique_ptr<NetEqInput> source,
+                        uint8_t replacement_payload_type,
+                        const std::set<uint8_t>& comfort_noise_types,
+                        const std::set<uint8_t>& forbidden_types);
+
+  rtc::Optional<int64_t> NextPacketTime() const override;
+  rtc::Optional<int64_t> NextOutputEventTime() const override;
+  std::unique_ptr<PacketData> PopPacket() override;
+  void AdvanceOutputEvent() override;
+  bool ended() const override;
+  rtc::Optional<RTPHeader> NextHeader() const override;
+
+ private:
+  void ReplacePacket();
+
+  std::unique_ptr<NetEqInput> source_;
+  const uint8_t replacement_payload_type_;
+  const std::set<uint8_t> comfort_noise_types_;
+  const std::set<uint8_t> forbidden_types_;
+  std::unique_ptr<PacketData> packet_;  // The next packet to deliver.
+  uint32_t last_frame_size_timestamps_ = 960;  // Initial guess: 20 ms @ 48 kHz.
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
diff --git a/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
new file mode 100644
index 0000000..8c1fa38
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -0,0 +1,738 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>  // For ULONG_MAX returned by strtoul.
+#include <stdio.h>
+#include <stdlib.h>  // For strtoul.
+#include <string.h>
+
+#include <algorithm>
+#include <ios>
+#include <iostream>
+#include <memory>
+#include <numeric>
+#include <string>
+
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
+#include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h"
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+#include "modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "modules/audio_coding/neteq/tools/output_wav_file.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/flags.h"
+#include "test/testsupport/fileutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+namespace {
+
+// Parses the input string for a valid SSRC (at the start of the string). If a
+// valid SSRC is found, it is written to the output variable |ssrc|, and true is
+// returned. Otherwise, false is returned.
+bool ParseSsrc(const std::string& str, uint32_t* ssrc) {
+  if (str.empty())
+    return true;
+  int base = 10;
+  // Look for "0x" or "0X" at the start and change base to 16 if found.
+  if ((str.compare(0, 2, "0x") == 0) || (str.compare(0, 2, "0X") == 0))
+    base = 16;
+  errno = 0;
+  char* end_ptr;
+  unsigned long value = strtoul(str.c_str(), &end_ptr, base);
+  if (value == ULONG_MAX && errno == ERANGE)
+    return false;  // Value out of range for unsigned long.
+  if (sizeof(unsigned long) > sizeof(uint32_t) && value > 0xFFFFFFFF)
+    return false;  // Value out of range for uint32_t.
+  if (end_ptr - str.c_str() < static_cast<ptrdiff_t>(str.length()))
+    return false;  // Part of the string was not parsed.
+  *ssrc = static_cast<uint32_t>(value);
+  return true;
+}
+
+// Flag validators.
+bool ValidatePayloadType(int value) {
+  if (value >= 0 && value <= 127)  // Value is ok.
+    return true;
+  printf("Payload type must be between 0 and 127, not %d\n",
+         static_cast<int>(value));
+  return false;
+}
+
+bool ValidateSsrcValue(const std::string& str) {
+  uint32_t dummy_ssrc;
+  if (ParseSsrc(str, &dummy_ssrc)) // Value is ok.
+    return true;
+  printf("Invalid SSRC: %s\n", str.c_str());
+  return false;
+}
+
+static bool ValidateExtensionId(int value) {
+  if (value > 0 && value <= 255)  // Value is ok.
+    return true;
+  printf("Extension ID must be between 1 and 255, not %d\n",
+         static_cast<int>(value));
+  return false;
+}
+
+// Define command line flags.
+DEFINE_int(pcmu, 0, "RTP payload type for PCM-u");
+DEFINE_int(pcma, 8, "RTP payload type for PCM-a");
+DEFINE_int(ilbc, 102, "RTP payload type for iLBC");
+DEFINE_int(isac, 103, "RTP payload type for iSAC");
+DEFINE_int(isac_swb, 104, "RTP payload type for iSAC-swb (32 kHz)");
+DEFINE_int(opus, 111, "RTP payload type for Opus");
+DEFINE_int(pcm16b, 93, "RTP payload type for PCM16b-nb (8 kHz)");
+DEFINE_int(pcm16b_wb, 94, "RTP payload type for PCM16b-wb (16 kHz)");
+DEFINE_int(pcm16b_swb32, 95, "RTP payload type for PCM16b-swb32 (32 kHz)");
+DEFINE_int(pcm16b_swb48, 96, "RTP payload type for PCM16b-swb48 (48 kHz)");
+DEFINE_int(g722, 9, "RTP payload type for G.722");
+DEFINE_int(avt, 106, "RTP payload type for AVT/DTMF (8 kHz)");
+DEFINE_int(avt_16, 114, "RTP payload type for AVT/DTMF (16 kHz)");
+DEFINE_int(avt_32, 115, "RTP payload type for AVT/DTMF (32 kHz)");
+DEFINE_int(avt_48, 116, "RTP payload type for AVT/DTMF (48 kHz)");
+DEFINE_int(red, 117, "RTP payload type for redundant audio (RED)");
+DEFINE_int(cn_nb, 13, "RTP payload type for comfort noise (8 kHz)");
+DEFINE_int(cn_wb, 98, "RTP payload type for comfort noise (16 kHz)");
+DEFINE_int(cn_swb32, 99, "RTP payload type for comfort noise (32 kHz)");
+DEFINE_int(cn_swb48, 100, "RTP payload type for comfort noise (48 kHz)");
+DEFINE_bool(codec_map, false, "Prints the mapping between RTP payload type and "
+    "codec");
+DEFINE_string(replacement_audio_file, "",
+              "A PCM file that will be used to populate ""dummy"" RTP packets");
+DEFINE_string(ssrc,
+              "",
+              "Only use packets with this SSRC (decimal or hex, the latter "
+              "starting with 0x)");
+DEFINE_int(audio_level, 1, "Extension ID for audio level (RFC 6464)");
+DEFINE_int(abs_send_time, 3, "Extension ID for absolute sender time");
+DEFINE_int(transport_seq_no, 5, "Extension ID for transport sequence number");
+DEFINE_bool(matlabplot,
+            false,
+            "Generates a matlab script for plotting the delay profile");
+DEFINE_bool(pythonplot,
+            false,
+            "Generates a python script for plotting the delay profile");
+DEFINE_bool(help, false, "Prints this message");
+DEFINE_bool(concealment_events, false, "Prints concealment events");
+
+// Maps a codec type to a printable name string.
+std::string CodecName(NetEqDecoder codec) {
+  switch (codec) {
+    case NetEqDecoder::kDecoderPCMu:
+      return "PCM-u";
+    case NetEqDecoder::kDecoderPCMa:
+      return "PCM-a";
+    case NetEqDecoder::kDecoderILBC:
+      return "iLBC";
+    case NetEqDecoder::kDecoderISAC:
+      return "iSAC";
+    case NetEqDecoder::kDecoderISACswb:
+      return "iSAC-swb (32 kHz)";
+    case NetEqDecoder::kDecoderOpus:
+      return "Opus";
+    case NetEqDecoder::kDecoderPCM16B:
+      return "PCM16b-nb (8 kHz)";
+    case NetEqDecoder::kDecoderPCM16Bwb:
+      return "PCM16b-wb (16 kHz)";
+    case NetEqDecoder::kDecoderPCM16Bswb32kHz:
+      return "PCM16b-swb32 (32 kHz)";
+    case NetEqDecoder::kDecoderPCM16Bswb48kHz:
+      return "PCM16b-swb48 (48 kHz)";
+    case NetEqDecoder::kDecoderG722:
+      return "G.722";
+    case NetEqDecoder::kDecoderRED:
+      return "redundant audio (RED)";
+    case NetEqDecoder::kDecoderAVT:
+      return "AVT/DTMF (8 kHz)";
+    case NetEqDecoder::kDecoderAVT16kHz:
+      return "AVT/DTMF (16 kHz)";
+    case NetEqDecoder::kDecoderAVT32kHz:
+      return "AVT/DTMF (32 kHz)";
+    case NetEqDecoder::kDecoderAVT48kHz:
+      return "AVT/DTMF (48 kHz)";
+    case NetEqDecoder::kDecoderCNGnb:
+      return "comfort noise (8 kHz)";
+    case NetEqDecoder::kDecoderCNGwb:
+      return "comfort noise (16 kHz)";
+    case NetEqDecoder::kDecoderCNGswb32kHz:
+      return "comfort noise (32 kHz)";
+    case NetEqDecoder::kDecoderCNGswb48kHz:
+      return "comfort noise (48 kHz)";
+    default:
+      FATAL();
+      return "undefined";
+  }
+}
+
+void PrintCodecMappingEntry(NetEqDecoder codec, int flag) {
+  std::cout << CodecName(codec) << ": " << flag << std::endl;
+}
+
+void PrintCodecMapping() {
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderPCMu, FLAG_pcmu);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderPCMa, FLAG_pcma);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderILBC, FLAG_ilbc);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderISAC, FLAG_isac);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderISACswb, FLAG_isac_swb);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderOpus, FLAG_opus);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16B, FLAG_pcm16b);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16Bwb, FLAG_pcm16b_wb);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16Bswb32kHz,
+                         FLAG_pcm16b_swb32);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16Bswb48kHz,
+                         FLAG_pcm16b_swb48);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderG722, FLAG_g722);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT, FLAG_avt);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT16kHz, FLAG_avt_16);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT32kHz, FLAG_avt_32);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT48kHz, FLAG_avt_48);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderRED, FLAG_red);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGnb, FLAG_cn_nb);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGwb, FLAG_cn_wb);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGswb32kHz, FLAG_cn_swb32);
+  PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGswb48kHz, FLAG_cn_swb48);
+}
+
+rtc::Optional<int> CodecSampleRate(uint8_t payload_type) {
+  if (payload_type == FLAG_pcmu || payload_type == FLAG_pcma ||
+      payload_type == FLAG_ilbc || payload_type == FLAG_pcm16b ||
+      payload_type == FLAG_cn_nb || payload_type == FLAG_avt)
+    return 8000;
+  if (payload_type == FLAG_isac || payload_type == FLAG_pcm16b_wb ||
+      payload_type == FLAG_g722 || payload_type == FLAG_cn_wb ||
+      payload_type == FLAG_avt_16)
+    return 16000;
+  if (payload_type == FLAG_isac_swb || payload_type == FLAG_pcm16b_swb32 ||
+      payload_type == FLAG_cn_swb32 || payload_type == FLAG_avt_32)
+    return 32000;
+  if (payload_type == FLAG_opus || payload_type == FLAG_pcm16b_swb48 ||
+      payload_type == FLAG_cn_swb48 || payload_type == FLAG_avt_48)
+    return 48000;
+  if (payload_type == FLAG_red)
+    return 0;
+  return rtc::nullopt;
+}
+
+// Class to let through only the packets with a given SSRC. Should be used as an
+// outer layer on another NetEqInput object.
+class FilterSsrcInput : public NetEqInput {
+ public:
+  FilterSsrcInput(std::unique_ptr<NetEqInput> source, uint32_t ssrc)
+      : source_(std::move(source)), ssrc_(ssrc) {
+    FindNextWithCorrectSsrc();
+    RTC_CHECK(source_->NextHeader()) << "Found no packet with SSRC = 0x"
+                                     << std::hex << ssrc_;
+  }
+
+  // All methods but PopPacket() simply relay to the |source_| object.
+  rtc::Optional<int64_t> NextPacketTime() const override {
+    return source_->NextPacketTime();
+  }
+  rtc::Optional<int64_t> NextOutputEventTime() const override {
+    return source_->NextOutputEventTime();
+  }
+
+  // Returns the next packet, and throws away upcoming packets that do not match
+  // the desired SSRC.
+  std::unique_ptr<PacketData> PopPacket() override {
+    std::unique_ptr<PacketData> packet_to_return = source_->PopPacket();
+    RTC_DCHECK(!packet_to_return || packet_to_return->header.ssrc == ssrc_);
+    // Pre-fetch the next packet with correct SSRC. Hence, |source_| will always
+    // be have a valid packet (or empty if no more packets are available) when
+    // this method returns.
+    FindNextWithCorrectSsrc();
+    return packet_to_return;
+  }
+
+  void AdvanceOutputEvent() override { source_->AdvanceOutputEvent(); }
+
+  bool ended() const override { return source_->ended(); }
+
+  rtc::Optional<RTPHeader> NextHeader() const override {
+    return source_->NextHeader();
+  }
+
+ private:
+  void FindNextWithCorrectSsrc() {
+    while (source_->NextHeader() && source_->NextHeader()->ssrc != ssrc_) {
+      source_->PopPacket();
+    }
+  }
+
+  std::unique_ptr<NetEqInput> source_;
+  uint32_t ssrc_;
+};
+
+// A callback class which prints whenver the inserted packet stream changes
+// the SSRC.
+class SsrcSwitchDetector : public NetEqPostInsertPacket {
+ public:
+  // Takes a pointer to another callback object, which will be invoked after
+  // this object finishes. This does not transfer ownership, and null is a
+  // valid value.
+  explicit SsrcSwitchDetector(NetEqPostInsertPacket* other_callback)
+      : other_callback_(other_callback) {}
+
+  void AfterInsertPacket(const NetEqInput::PacketData& packet,
+                         NetEq* neteq) override {
+    if (last_ssrc_ && packet.header.ssrc != *last_ssrc_) {
+      std::cout << "Changing streams from 0x" << std::hex << *last_ssrc_
+                << " to 0x" << std::hex << packet.header.ssrc
+                << std::dec << " (payload type "
+                << static_cast<int>(packet.header.payloadType) << ")"
+                << std::endl;
+    }
+    last_ssrc_ = packet.header.ssrc;
+    if (other_callback_) {
+      other_callback_->AfterInsertPacket(packet, neteq);
+    }
+  }
+
+ private:
+  NetEqPostInsertPacket* other_callback_;
+  rtc::Optional<uint32_t> last_ssrc_;
+};
+
+class StatsGetter : public NetEqGetAudioCallback {
+ public:
+  // This struct is a replica of webrtc::NetEqNetworkStatistics, but with all
+  // values stored in double precision.
+  struct Stats {
+    double current_buffer_size_ms = 0.0;
+    double preferred_buffer_size_ms = 0.0;
+    double jitter_peaks_found = 0.0;
+    double packet_loss_rate = 0.0;
+    double expand_rate = 0.0;
+    double speech_expand_rate = 0.0;
+    double preemptive_rate = 0.0;
+    double accelerate_rate = 0.0;
+    double secondary_decoded_rate = 0.0;
+    double secondary_discarded_rate = 0.0;
+    double clockdrift_ppm = 0.0;
+    double added_zero_samples = 0.0;
+    double mean_waiting_time_ms = 0.0;
+    double median_waiting_time_ms = 0.0;
+    double min_waiting_time_ms = 0.0;
+    double max_waiting_time_ms = 0.0;
+  };
+
+  struct ConcealmentEvent {
+    uint64_t duration_ms;
+    size_t concealment_event_number;
+    int64_t time_from_previous_event_end_ms;
+
+    friend std::ostream& operator<<(std::ostream& stream,
+                                    const ConcealmentEvent& concealment_event) {
+      stream << "ConcealmentEvent duration_ms:" << concealment_event.duration_ms
+             << " event_number:" << concealment_event.concealment_event_number
+             << " time_from_previous_event_end_ms:"
+             << concealment_event.time_from_previous_event_end_ms << "\n";
+      return stream;
+    }
+  };
+
+  // Takes a pointer to another callback object, which will be invoked after
+  // this object finishes. This does not transfer ownership, and null is a
+  // valid value.
+  explicit StatsGetter(NetEqGetAudioCallback* other_callback)
+      : other_callback_(other_callback) {}
+
+  void BeforeGetAudio(NetEq* neteq) override {
+    if (other_callback_) {
+      other_callback_->BeforeGetAudio(neteq);
+    }
+  }
+
+  void AfterGetAudio(int64_t time_now_ms,
+                     const AudioFrame& audio_frame,
+                     bool muted,
+                     NetEq* neteq) override {
+    if (++counter_ >= 100) {
+      counter_ = 0;
+      NetEqNetworkStatistics stats;
+      RTC_CHECK_EQ(neteq->NetworkStatistics(&stats), 0);
+      stats_.push_back(stats);
+    }
+    const auto lifetime_stat = neteq->GetLifetimeStatistics();
+    if (current_concealment_event_ != lifetime_stat.concealment_events) {
+      if (last_event_end_time_ms_ > 0) {
+        // Do not account for the first event to avoid start of the call
+        // skewing.
+        ConcealmentEvent concealment_event;
+        uint64_t last_event_voice_concealed_samples =
+            lifetime_stat.voice_concealed_samples -
+            voice_concealed_samples_until_last_event_;
+        RTC_CHECK_GT(last_event_voice_concealed_samples, 0);
+        concealment_event.duration_ms = last_event_voice_concealed_samples /
+                                        (audio_frame.sample_rate_hz_ / 1000);
+        concealment_event.concealment_event_number = current_concealment_event_;
+        concealment_event.time_from_previous_event_end_ms =
+            time_now_ms - last_event_end_time_ms_;
+        concealment_events_.emplace_back(concealment_event);
+        voice_concealed_samples_until_last_event_ =
+            lifetime_stat.voice_concealed_samples;
+      }
+      last_event_end_time_ms_ = time_now_ms;
+      voice_concealed_samples_until_last_event_ =
+          lifetime_stat.voice_concealed_samples;
+      current_concealment_event_ = lifetime_stat.concealment_events;
+    }
+
+    if (other_callback_) {
+      other_callback_->AfterGetAudio(time_now_ms, audio_frame, muted, neteq);
+    }
+  }
+
+  double AverageSpeechExpandRate() const {
+    double sum_speech_expand =
+        std::accumulate(stats_.begin(), stats_.end(), double{0.0},
+                        [](double a, NetEqNetworkStatistics b) {
+                          return a + static_cast<double>(b.speech_expand_rate);
+                        });
+    return sum_speech_expand / 16384.0 / stats_.size();
+  }
+
+  const std::vector<ConcealmentEvent>& concealment_events() {
+    // Do not account for the last concealment event to avoid potential end
+    // call skewing.
+    return concealment_events_;
+  }
+
+  Stats AverageStats() const {
+    Stats sum_stats = std::accumulate(
+        stats_.begin(), stats_.end(), Stats(),
+        [](Stats a, NetEqNetworkStatistics b) {
+          a.current_buffer_size_ms += b.current_buffer_size_ms;
+          a.preferred_buffer_size_ms += b.preferred_buffer_size_ms;
+          a.jitter_peaks_found += b.jitter_peaks_found;
+          a.packet_loss_rate += b.packet_loss_rate / 16384.0;
+          a.expand_rate += b.expand_rate / 16384.0;
+          a.speech_expand_rate += b.speech_expand_rate / 16384.0;
+          a.preemptive_rate += b.preemptive_rate / 16384.0;
+          a.accelerate_rate += b.accelerate_rate / 16384.0;
+          a.secondary_decoded_rate += b.secondary_decoded_rate / 16384.0;
+          a.secondary_discarded_rate += b.secondary_discarded_rate / 16384.0;
+          a.clockdrift_ppm += b.clockdrift_ppm;
+          a.added_zero_samples += b.added_zero_samples;
+          a.mean_waiting_time_ms += b.mean_waiting_time_ms;
+          a.median_waiting_time_ms += b.median_waiting_time_ms;
+          a.min_waiting_time_ms =
+              std::min(a.min_waiting_time_ms,
+                       static_cast<double>(b.min_waiting_time_ms));
+          a.max_waiting_time_ms =
+              std::max(a.max_waiting_time_ms,
+                       static_cast<double>(b.max_waiting_time_ms));
+          return a;
+        });
+
+    sum_stats.current_buffer_size_ms /= stats_.size();
+    sum_stats.preferred_buffer_size_ms /= stats_.size();
+    sum_stats.jitter_peaks_found /= stats_.size();
+    sum_stats.packet_loss_rate /= stats_.size();
+    sum_stats.expand_rate /= stats_.size();
+    sum_stats.speech_expand_rate /= stats_.size();
+    sum_stats.preemptive_rate /= stats_.size();
+    sum_stats.accelerate_rate /= stats_.size();
+    sum_stats.secondary_decoded_rate /= stats_.size();
+    sum_stats.secondary_discarded_rate /= stats_.size();
+    sum_stats.clockdrift_ppm /= stats_.size();
+    sum_stats.added_zero_samples /= stats_.size();
+    sum_stats.mean_waiting_time_ms /= stats_.size();
+    sum_stats.median_waiting_time_ms /= stats_.size();
+
+    return sum_stats;
+  }
+
+ private:
+  NetEqGetAudioCallback* other_callback_;
+  size_t counter_ = 0;
+  std::vector<NetEqNetworkStatistics> stats_;
+  size_t current_concealment_event_ = 1;
+  uint64_t voice_concealed_samples_until_last_event_ = 0;
+  std::vector<ConcealmentEvent> concealment_events_;
+  int64_t last_event_end_time_ms_ = 0;
+};
+
+int RunTest(int argc, char* argv[]) {
+  std::string program_name = argv[0];
+  std::string usage = "Tool for decoding an RTP dump file using NetEq.\n"
+      "Run " + program_name + " --help for usage.\n"
+      "Example usage:\n" + program_name +
+      " input.rtp output.{pcm, wav}\n";
+  if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true)) {
+    return 1;
+  }
+  if (FLAG_help) {
+    std::cout << usage;
+    rtc::FlagList::Print(nullptr, false);
+    return 0;
+  }
+
+  if (FLAG_codec_map) {
+    PrintCodecMapping();
+  }
+
+  if (argc != 3) {
+    if (FLAG_codec_map) {
+      // We have already printed the codec map. Just end the program.
+      return 0;
+    }
+    // Print usage information.
+    std::cout << usage;
+    return 0;
+  }
+  RTC_CHECK(ValidatePayloadType(FLAG_pcmu));
+  RTC_CHECK(ValidatePayloadType(FLAG_pcma));
+  RTC_CHECK(ValidatePayloadType(FLAG_ilbc));
+  RTC_CHECK(ValidatePayloadType(FLAG_isac));
+  RTC_CHECK(ValidatePayloadType(FLAG_isac_swb));
+  RTC_CHECK(ValidatePayloadType(FLAG_opus));
+  RTC_CHECK(ValidatePayloadType(FLAG_pcm16b));
+  RTC_CHECK(ValidatePayloadType(FLAG_pcm16b_wb));
+  RTC_CHECK(ValidatePayloadType(FLAG_pcm16b_swb32));
+  RTC_CHECK(ValidatePayloadType(FLAG_pcm16b_swb48));
+  RTC_CHECK(ValidatePayloadType(FLAG_g722));
+  RTC_CHECK(ValidatePayloadType(FLAG_avt));
+  RTC_CHECK(ValidatePayloadType(FLAG_avt_16));
+  RTC_CHECK(ValidatePayloadType(FLAG_avt_32));
+  RTC_CHECK(ValidatePayloadType(FLAG_avt_48));
+  RTC_CHECK(ValidatePayloadType(FLAG_red));
+  RTC_CHECK(ValidatePayloadType(FLAG_cn_nb));
+  RTC_CHECK(ValidatePayloadType(FLAG_cn_wb));
+  RTC_CHECK(ValidatePayloadType(FLAG_cn_swb32));
+  RTC_CHECK(ValidatePayloadType(FLAG_cn_swb48));
+  RTC_CHECK(ValidateSsrcValue(FLAG_ssrc));
+  RTC_CHECK(ValidateExtensionId(FLAG_audio_level));
+  RTC_CHECK(ValidateExtensionId(FLAG_abs_send_time));
+  RTC_CHECK(ValidateExtensionId(FLAG_transport_seq_no));
+
+  // Gather RTP header extensions in a map.
+  NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
+      {FLAG_audio_level, kRtpExtensionAudioLevel},
+      {FLAG_abs_send_time, kRtpExtensionAbsoluteSendTime},
+      {FLAG_transport_seq_no, kRtpExtensionTransportSequenceNumber}};
+
+  const std::string input_file_name = argv[1];
+  std::unique_ptr<NetEqInput> input;
+  if (RtpFileSource::ValidRtpDump(input_file_name) ||
+      RtpFileSource::ValidPcap(input_file_name)) {
+    input.reset(new NetEqRtpDumpInput(input_file_name, rtp_ext_map));
+  } else {
+    input.reset(new NetEqEventLogInput(input_file_name, rtp_ext_map));
+  }
+
+  std::cout << "Input file: " << input_file_name << std::endl;
+  RTC_CHECK(input) << "Cannot open input file";
+  RTC_CHECK(!input->ended()) << "Input file is empty";
+
+  // Check if an SSRC value was provided.
+  if (strlen(FLAG_ssrc) > 0) {
+    uint32_t ssrc;
+    RTC_CHECK(ParseSsrc(FLAG_ssrc, &ssrc)) << "Flag verification has failed.";
+    input.reset(new FilterSsrcInput(std::move(input), ssrc));
+  }
+
+  // Check the sample rate.
+  rtc::Optional<int> sample_rate_hz;
+  std::set<std::pair<int, uint32_t>> discarded_pt_and_ssrc;
+  while (input->NextHeader()) {
+    rtc::Optional<RTPHeader> first_rtp_header = input->NextHeader();
+    RTC_DCHECK(first_rtp_header);
+    sample_rate_hz = CodecSampleRate(first_rtp_header->payloadType);
+    if (sample_rate_hz) {
+      std::cout << "Found valid packet with payload type "
+                << static_cast<int>(first_rtp_header->payloadType)
+                << " and SSRC 0x" << std::hex << first_rtp_header->ssrc
+                << std::dec << std::endl;
+      break;
+    }
+    // Discard this packet and move to the next. Keep track of discarded payload
+    // types and SSRCs.
+    discarded_pt_and_ssrc.emplace(first_rtp_header->payloadType,
+                                  first_rtp_header->ssrc);
+    input->PopPacket();
+  }
+  if (!discarded_pt_and_ssrc.empty()) {
+    std::cout << "Discarded initial packets with the following payload types "
+                 "and SSRCs:"
+              << std::endl;
+    for (const auto& d : discarded_pt_and_ssrc) {
+      std::cout << "PT " << d.first << "; SSRC 0x" << std::hex
+                << static_cast<int>(d.second) << std::dec << std::endl;
+    }
+  }
+  if (!sample_rate_hz) {
+    std::cout << "Cannot find any packets with known payload types"
+              << std::endl;
+    RTC_NOTREACHED();
+  }
+
+  // Open the output file now that we know the sample rate. (Rate is only needed
+  // for wav files.)
+  const std::string output_file_name = argv[2];
+  std::unique_ptr<AudioSink> output;
+  if (output_file_name.size() >= 4 &&
+      output_file_name.substr(output_file_name.size() - 4) == ".wav") {
+    // Open a wav file.
+    output.reset(new OutputWavFile(output_file_name, *sample_rate_hz));
+  } else {
+    // Open a pcm file.
+    output.reset(new OutputAudioFile(output_file_name));
+  }
+
+  std::cout << "Output file: " << output_file_name << std::endl;
+
+  NetEqTest::DecoderMap codecs = {
+      {FLAG_pcmu, std::make_pair(NetEqDecoder::kDecoderPCMu, "pcmu")},
+      {FLAG_pcma, std::make_pair(NetEqDecoder::kDecoderPCMa, "pcma")},
+      {FLAG_ilbc, std::make_pair(NetEqDecoder::kDecoderILBC, "ilbc")},
+      {FLAG_isac, std::make_pair(NetEqDecoder::kDecoderISAC, "isac")},
+      {FLAG_isac_swb,
+       std::make_pair(NetEqDecoder::kDecoderISACswb, "isac-swb")},
+      {FLAG_opus, std::make_pair(NetEqDecoder::kDecoderOpus, "opus")},
+      {FLAG_pcm16b, std::make_pair(NetEqDecoder::kDecoderPCM16B, "pcm16-nb")},
+      {FLAG_pcm16b_wb,
+       std::make_pair(NetEqDecoder::kDecoderPCM16Bwb, "pcm16-wb")},
+      {FLAG_pcm16b_swb32,
+       std::make_pair(NetEqDecoder::kDecoderPCM16Bswb32kHz, "pcm16-swb32")},
+      {FLAG_pcm16b_swb48,
+       std::make_pair(NetEqDecoder::kDecoderPCM16Bswb48kHz, "pcm16-swb48")},
+      {FLAG_g722, std::make_pair(NetEqDecoder::kDecoderG722, "g722")},
+      {FLAG_avt, std::make_pair(NetEqDecoder::kDecoderAVT, "avt")},
+      {FLAG_avt_16, std::make_pair(NetEqDecoder::kDecoderAVT16kHz, "avt-16")},
+      {FLAG_avt_32,
+       std::make_pair(NetEqDecoder::kDecoderAVT32kHz, "avt-32")},
+      {FLAG_avt_48,
+       std::make_pair(NetEqDecoder::kDecoderAVT48kHz, "avt-48")},
+      {FLAG_red, std::make_pair(NetEqDecoder::kDecoderRED, "red")},
+      {FLAG_cn_nb, std::make_pair(NetEqDecoder::kDecoderCNGnb, "cng-nb")},
+      {FLAG_cn_wb, std::make_pair(NetEqDecoder::kDecoderCNGwb, "cng-wb")},
+      {FLAG_cn_swb32,
+       std::make_pair(NetEqDecoder::kDecoderCNGswb32kHz, "cng-swb32")},
+      {FLAG_cn_swb48,
+       std::make_pair(NetEqDecoder::kDecoderCNGswb48kHz, "cng-swb48")}};
+
+  // Check if a replacement audio file was provided.
+  std::unique_ptr<AudioDecoder> replacement_decoder;
+  NetEqTest::ExtDecoderMap ext_codecs;
+  if (strlen(FLAG_replacement_audio_file) > 0) {
+    // Find largest unused payload type.
+    int replacement_pt = 127;
+    while (!(codecs.find(replacement_pt) == codecs.end() &&
+             ext_codecs.find(replacement_pt) == ext_codecs.end())) {
+      --replacement_pt;
+      RTC_CHECK_GE(replacement_pt, 0);
+    }
+
+    auto std_set_int32_to_uint8 = [](const std::set<int32_t>& a) {
+      std::set<uint8_t> b;
+      for (auto& x : a) {
+        b.insert(static_cast<uint8_t>(x));
+      }
+      return b;
+    };
+
+    std::set<uint8_t> cn_types = std_set_int32_to_uint8(
+        {FLAG_cn_nb, FLAG_cn_wb, FLAG_cn_swb32, FLAG_cn_swb48});
+    std::set<uint8_t> forbidden_types =
+        std_set_int32_to_uint8({FLAG_g722, FLAG_red, FLAG_avt,
+                                FLAG_avt_16, FLAG_avt_32, FLAG_avt_48});
+    input.reset(new NetEqReplacementInput(std::move(input), replacement_pt,
+                                          cn_types, forbidden_types));
+
+    replacement_decoder.reset(new FakeDecodeFromFile(
+        std::unique_ptr<InputAudioFile>(
+            new InputAudioFile(FLAG_replacement_audio_file)),
+        48000, false));
+    NetEqTest::ExternalDecoderInfo ext_dec_info = {
+        replacement_decoder.get(), NetEqDecoder::kDecoderArbitrary,
+        "replacement codec"};
+    ext_codecs[replacement_pt] = ext_dec_info;
+  }
+
+  NetEqTest::Callbacks callbacks;
+  std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer;
+  if (FLAG_matlabplot || FLAG_pythonplot) {
+    delay_analyzer.reset(new NetEqDelayAnalyzer);
+  }
+
+  SsrcSwitchDetector ssrc_switch_detector(delay_analyzer.get());
+  callbacks.post_insert_packet = &ssrc_switch_detector;
+  StatsGetter stats_getter(delay_analyzer.get());
+  callbacks.get_audio_callback = &stats_getter;
+  NetEq::Config config;
+  config.sample_rate_hz = *sample_rate_hz;
+  NetEqTest test(config, codecs, ext_codecs, std::move(input),
+                 std::move(output), callbacks);
+
+  int64_t test_duration_ms = test.Run();
+
+  if (FLAG_matlabplot) {
+    auto matlab_script_name = output_file_name;
+    std::replace(matlab_script_name.begin(), matlab_script_name.end(), '.',
+                 '_');
+    std::cout << "Creating Matlab plot script " << matlab_script_name + ".m"
+              << std::endl;
+    delay_analyzer->CreateMatlabScript(matlab_script_name + ".m");
+  }
+  if (FLAG_pythonplot) {
+    auto python_script_name = output_file_name;
+    std::replace(python_script_name.begin(), python_script_name.end(), '.',
+                 '_');
+    std::cout << "Creating Python plot script " << python_script_name + ".py"
+              << std::endl;
+    delay_analyzer->CreatePythonScript(python_script_name + ".py");
+  }
+
+  printf("Simulation statistics:\n");
+  printf("  output duration: %" PRId64 " ms\n", test_duration_ms);
+  auto stats = stats_getter.AverageStats();
+  printf("  packet_loss_rate: %f %%\n", 100.0 * stats.packet_loss_rate);
+  printf("  expand_rate: %f %%\n", 100.0 * stats.expand_rate);
+  printf("  speech_expand_rate: %f %%\n", 100.0 * stats.speech_expand_rate);
+  printf("  preemptive_rate: %f %%\n", 100.0 * stats.preemptive_rate);
+  printf("  accelerate_rate: %f %%\n", 100.0 * stats.accelerate_rate);
+  printf("  secondary_decoded_rate: %f %%\n",
+         100.0 * stats.secondary_decoded_rate);
+  printf("  secondary_discarded_rate: %f %%\n",
+         100.0 * stats.secondary_discarded_rate);
+  printf("  clockdrift_ppm: %f ppm\n", stats.clockdrift_ppm);
+  printf("  mean_waiting_time_ms: %f ms\n", stats.mean_waiting_time_ms);
+  printf("  median_waiting_time_ms: %f ms\n", stats.median_waiting_time_ms);
+  printf("  min_waiting_time_ms: %f ms\n", stats.min_waiting_time_ms);
+  printf("  max_waiting_time_ms: %f ms\n", stats.max_waiting_time_ms);
+  printf("  current_buffer_size_ms: %f ms\n", stats.current_buffer_size_ms);
+  printf("  preferred_buffer_size_ms: %f ms\n", stats.preferred_buffer_size_ms);
+  if (FLAG_concealment_events) {
+    std::cout << " concealment_events_ms:"
+              << "\n";
+    for (auto concealment_event : stats_getter.concealment_events())
+      std::cout << concealment_event;
+    std::cout << " end of concealment_events_ms\n";
+  }
+  return 0;
+}
+
+}  // namespace
+}  // namespace test
+}  // namespace webrtc
+
+int main(int argc, char* argv[]) {
+  return webrtc::test::RunTest(argc, argv);
+}
diff --git a/modules/audio_coding/neteq/tools/neteq_test.cc b/modules/audio_coding/neteq/tools/neteq_test.cc
new file mode 100644
index 0000000..e6dd114
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_test.cc
@@ -0,0 +1,140 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/neteq_test.h"
+
+#include <iostream>
+
+#include "api/audio_codecs/builtin_audio_decoder_factory.h"
+
+namespace webrtc {
+namespace test {
+
+void DefaultNetEqTestErrorCallback::OnInsertPacketError(
+    const NetEqInput::PacketData& packet) {
+  std::cerr << "InsertPacket returned an error." << std::endl;
+  std::cerr << "Packet data: " << packet.ToString() << std::endl;
+  FATAL();
+}
+
+void DefaultNetEqTestErrorCallback::OnGetAudioError() {
+  std::cerr << "GetAudio returned an error." << std::endl;
+  FATAL();
+}
+
+NetEqTest::NetEqTest(const NetEq::Config& config,
+                     const DecoderMap& codecs,
+                     const ExtDecoderMap& ext_codecs,
+                     std::unique_ptr<NetEqInput> input,
+                     std::unique_ptr<AudioSink> output,
+                     Callbacks callbacks)
+    : neteq_(NetEq::Create(config, CreateBuiltinAudioDecoderFactory())),
+      input_(std::move(input)),
+      output_(std::move(output)),
+      callbacks_(callbacks),
+      sample_rate_hz_(config.sample_rate_hz) {
+  RTC_CHECK(!config.enable_muted_state)
+      << "The code does not handle enable_muted_state";
+  RegisterDecoders(codecs);
+  RegisterExternalDecoders(ext_codecs);
+}
+
+int64_t NetEqTest::Run() {
+  const int64_t start_time_ms = *input_->NextEventTime();
+  int64_t time_now_ms = start_time_ms;
+
+  while (!input_->ended()) {
+    // Advance time to next event.
+    RTC_DCHECK(input_->NextEventTime());
+    time_now_ms = *input_->NextEventTime();
+    // Check if it is time to insert packet.
+    if (input_->NextPacketTime() && time_now_ms >= *input_->NextPacketTime()) {
+      std::unique_ptr<NetEqInput::PacketData> packet_data = input_->PopPacket();
+      RTC_CHECK(packet_data);
+      int error = neteq_->InsertPacket(
+          packet_data->header,
+          rtc::ArrayView<const uint8_t>(packet_data->payload),
+          static_cast<uint32_t>(packet_data->time_ms * sample_rate_hz_ / 1000));
+      if (error != NetEq::kOK && callbacks_.error_callback) {
+        callbacks_.error_callback->OnInsertPacketError(*packet_data);
+      }
+      if (callbacks_.post_insert_packet) {
+        callbacks_.post_insert_packet->AfterInsertPacket(*packet_data,
+                                                         neteq_.get());
+      }
+    }
+
+    // Check if it is time to get output audio.
+    if (input_->NextOutputEventTime() &&
+        time_now_ms >= *input_->NextOutputEventTime()) {
+      if (callbacks_.get_audio_callback) {
+        callbacks_.get_audio_callback->BeforeGetAudio(neteq_.get());
+      }
+      AudioFrame out_frame;
+      bool muted;
+      int error = neteq_->GetAudio(&out_frame, &muted);
+      RTC_CHECK(!muted) << "The code does not handle enable_muted_state";
+      if (error != NetEq::kOK) {
+        if (callbacks_.error_callback) {
+          callbacks_.error_callback->OnGetAudioError();
+        }
+      } else {
+        sample_rate_hz_ = out_frame.sample_rate_hz_;
+      }
+      if (callbacks_.get_audio_callback) {
+        callbacks_.get_audio_callback->AfterGetAudio(time_now_ms, out_frame,
+                                                     muted, neteq_.get());
+      }
+
+      if (output_) {
+        RTC_CHECK(output_->WriteArray(
+            out_frame.data(),
+            out_frame.samples_per_channel_ * out_frame.num_channels_));
+      }
+
+      input_->AdvanceOutputEvent();
+    }
+  }
+  return time_now_ms - start_time_ms;
+}
+
+NetEqNetworkStatistics NetEqTest::SimulationStats() {
+  NetEqNetworkStatistics stats;
+  RTC_CHECK_EQ(neteq_->NetworkStatistics(&stats), 0);
+  return stats;
+}
+
+NetEqLifetimeStatistics NetEqTest::LifetimeStats() const {
+  return neteq_->GetLifetimeStatistics();
+}
+
+void NetEqTest::RegisterDecoders(const DecoderMap& codecs) {
+  for (const auto& c : codecs) {
+    RTC_CHECK_EQ(
+        neteq_->RegisterPayloadType(c.second.first, c.second.second, c.first),
+        NetEq::kOK)
+        << "Cannot register " << c.second.second << " to payload type "
+        << c.first;
+  }
+}
+
+void NetEqTest::RegisterExternalDecoders(const ExtDecoderMap& codecs) {
+  for (const auto& c : codecs) {
+    RTC_CHECK_EQ(
+        neteq_->RegisterExternalDecoder(c.second.decoder, c.second.codec,
+                                        c.second.codec_name, c.first),
+        NetEq::kOK)
+        << "Cannot register " << c.second.codec_name << " to payload type "
+        << c.first;
+  }
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/neteq_test.h b/modules/audio_coding/neteq/tools/neteq_test.h
new file mode 100644
index 0000000..e645e42
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/neteq_test.h
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "modules/audio_coding/neteq/include/neteq.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "modules/audio_coding/neteq/tools/neteq_input.h"
+
+namespace webrtc {
+namespace test {
+
+class NetEqTestErrorCallback {
+ public:
+  virtual ~NetEqTestErrorCallback() = default;
+  virtual void OnInsertPacketError(const NetEqInput::PacketData& packet) {}
+  virtual void OnGetAudioError() {}
+};
+
+class DefaultNetEqTestErrorCallback : public NetEqTestErrorCallback {
+  void OnInsertPacketError(const NetEqInput::PacketData& packet) override;
+  void OnGetAudioError() override;
+};
+
+class NetEqPostInsertPacket {
+ public:
+  virtual ~NetEqPostInsertPacket() = default;
+  virtual void AfterInsertPacket(const NetEqInput::PacketData& packet,
+                                 NetEq* neteq) = 0;
+};
+
+class NetEqGetAudioCallback {
+ public:
+  virtual ~NetEqGetAudioCallback() = default;
+  virtual void BeforeGetAudio(NetEq* neteq) = 0;
+  virtual void AfterGetAudio(int64_t time_now_ms,
+                             const AudioFrame& audio_frame,
+                             bool muted,
+                             NetEq* neteq) = 0;
+};
+
+// Class that provides an input--output test for NetEq. The input (both packets
+// and output events) is provided by a NetEqInput object, while the output is
+// directed to an AudioSink object.
+class NetEqTest {
+ public:
+  using DecoderMap = std::map<int, std::pair<NetEqDecoder, std::string> >;
+
+  struct ExternalDecoderInfo {
+    AudioDecoder* decoder;
+    NetEqDecoder codec;
+    std::string codec_name;
+  };
+
+  using ExtDecoderMap = std::map<int, ExternalDecoderInfo>;
+
+  struct Callbacks {
+    NetEqTestErrorCallback* error_callback = nullptr;
+    NetEqPostInsertPacket* post_insert_packet = nullptr;
+    NetEqGetAudioCallback* get_audio_callback = nullptr;
+  };
+
+  // Sets up the test with given configuration, codec mappings, input, ouput,
+  // and callback objects for error reporting.
+  NetEqTest(const NetEq::Config& config,
+            const DecoderMap& codecs,
+            const ExtDecoderMap& ext_codecs,
+            std::unique_ptr<NetEqInput> input,
+            std::unique_ptr<AudioSink> output,
+            Callbacks callbacks);
+
+  ~NetEqTest() = default;
+
+  // Runs the test. Returns the duration of the produced audio in ms.
+  int64_t Run();
+
+  // Returns the statistics from NetEq.
+  NetEqNetworkStatistics SimulationStats();
+  NetEqLifetimeStatistics LifetimeStats() const;
+
+ private:
+  void RegisterDecoders(const DecoderMap& codecs);
+  void RegisterExternalDecoders(const ExtDecoderMap& codecs);
+
+  std::unique_ptr<NetEq> neteq_;
+  std::unique_ptr<NetEqInput> input_;
+  std::unique_ptr<AudioSink> output_;
+  Callbacks callbacks_;
+  int sample_rate_hz_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
diff --git a/modules/audio_coding/neteq/tools/output_audio_file.h b/modules/audio_coding/neteq/tools/output_audio_file.h
new file mode 100644
index 0000000..7e65bc2
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/output_audio_file.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
+
+#include <assert.h>
+#include <stdio.h>
+#include <string>
+
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace test {
+
+class OutputAudioFile : public AudioSink {
+ public:
+  // Creates an OutputAudioFile, opening a file named |file_name| for writing.
+  // The file format is 16-bit signed host-endian PCM.
+  explicit OutputAudioFile(const std::string& file_name) {
+    out_file_ = fopen(file_name.c_str(), "wb");
+  }
+
+  virtual ~OutputAudioFile() {
+    if (out_file_)
+      fclose(out_file_);
+  }
+
+  bool WriteArray(const int16_t* audio, size_t num_samples) override {
+    assert(out_file_);
+    return fwrite(audio, sizeof(*audio), num_samples, out_file_) == num_samples;
+  }
+
+ private:
+  FILE* out_file_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(OutputAudioFile);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
diff --git a/modules/audio_coding/neteq/tools/output_wav_file.h b/modules/audio_coding/neteq/tools/output_wav_file.h
new file mode 100644
index 0000000..031a8cb
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/output_wav_file.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
+
+#include <string>
+
+#include "common_audio/wav_file.h"
+#include "modules/audio_coding/neteq/tools/audio_sink.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace test {
+
+class OutputWavFile : public AudioSink {
+ public:
+  // Creates an OutputWavFile, opening a file named |file_name| for writing.
+  // The output file is a PCM encoded wav file.
+  OutputWavFile(const std::string& file_name, int sample_rate_hz)
+      : wav_writer_(file_name, sample_rate_hz, 1) {}
+
+  bool WriteArray(const int16_t* audio, size_t num_samples) override {
+    wav_writer_.WriteSamples(audio, num_samples);
+    return true;
+  }
+
+ private:
+  WavWriter wav_writer_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(OutputWavFile);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
diff --git a/modules/audio_coding/neteq/tools/packet.cc b/modules/audio_coding/neteq/tools/packet.cc
new file mode 100644
index 0000000..71337b6
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/packet.cc
@@ -0,0 +1,170 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+
+#include <string.h>
+
+#include <memory>
+
+#include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+Packet::Packet(uint8_t* packet_memory,
+               size_t allocated_bytes,
+               double time_ms,
+               const RtpHeaderParser& parser)
+    : payload_memory_(packet_memory),
+      payload_(NULL),
+      packet_length_bytes_(allocated_bytes),
+      payload_length_bytes_(0),
+      virtual_packet_length_bytes_(allocated_bytes),
+      virtual_payload_length_bytes_(0),
+      time_ms_(time_ms) {
+  valid_header_ = ParseHeader(parser);
+}
+
+Packet::Packet(uint8_t* packet_memory,
+               size_t allocated_bytes,
+               size_t virtual_packet_length_bytes,
+               double time_ms,
+               const RtpHeaderParser& parser)
+    : payload_memory_(packet_memory),
+      payload_(NULL),
+      packet_length_bytes_(allocated_bytes),
+      payload_length_bytes_(0),
+      virtual_packet_length_bytes_(virtual_packet_length_bytes),
+      virtual_payload_length_bytes_(0),
+      time_ms_(time_ms) {
+  valid_header_ = ParseHeader(parser);
+}
+
+Packet::Packet(uint8_t* packet_memory, size_t allocated_bytes, double time_ms)
+    : payload_memory_(packet_memory),
+      payload_(NULL),
+      packet_length_bytes_(allocated_bytes),
+      payload_length_bytes_(0),
+      virtual_packet_length_bytes_(allocated_bytes),
+      virtual_payload_length_bytes_(0),
+      time_ms_(time_ms) {
+  std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
+  valid_header_ = ParseHeader(*parser);
+}
+
+Packet::Packet(uint8_t* packet_memory,
+               size_t allocated_bytes,
+               size_t virtual_packet_length_bytes,
+               double time_ms)
+    : payload_memory_(packet_memory),
+      payload_(NULL),
+      packet_length_bytes_(allocated_bytes),
+      payload_length_bytes_(0),
+      virtual_packet_length_bytes_(virtual_packet_length_bytes),
+      virtual_payload_length_bytes_(0),
+      time_ms_(time_ms) {
+  std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
+  valid_header_ = ParseHeader(*parser);
+}
+
+Packet::~Packet() = default;
+
+bool Packet::ExtractRedHeaders(std::list<RTPHeader*>* headers) const {
+  //
+  //  0                   1                    2                   3
+  //  0 1 2 3 4 5 6 7 8 9 0 1 2 3  4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+  // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  // |1|   block PT  |  timestamp offset         |   block length    |
+  // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  // |1|    ...                                                      |
+  // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  // |0|   block PT  |
+  // +-+-+-+-+-+-+-+-+
+  //
+
+  assert(payload_);
+  const uint8_t* payload_ptr = payload_;
+  const uint8_t* payload_end_ptr = payload_ptr + payload_length_bytes_;
+
+  // Find all RED headers with the extension bit set to 1. That is, all headers
+  // but the last one.
+  while ((payload_ptr < payload_end_ptr) && (*payload_ptr & 0x80)) {
+    RTPHeader* header = new RTPHeader;
+    CopyToHeader(header);
+    header->payloadType = payload_ptr[0] & 0x7F;
+    uint32_t offset = (payload_ptr[1] << 6) + ((payload_ptr[2] & 0xFC) >> 2);
+    header->timestamp -= offset;
+    headers->push_front(header);
+    payload_ptr += 4;
+  }
+  // Last header.
+  assert(payload_ptr < payload_end_ptr);
+  if (payload_ptr >= payload_end_ptr) {
+    return false;  // Payload too short.
+  }
+  RTPHeader* header = new RTPHeader;
+  CopyToHeader(header);
+  header->payloadType = payload_ptr[0] & 0x7F;
+  headers->push_front(header);
+  return true;
+}
+
+void Packet::DeleteRedHeaders(std::list<RTPHeader*>* headers) {
+  while (!headers->empty()) {
+    delete headers->front();
+    headers->pop_front();
+  }
+}
+
+bool Packet::ParseHeader(const RtpHeaderParser& parser) {
+  bool valid_header = parser.Parse(
+      payload_memory_.get(), static_cast<int>(packet_length_bytes_), &header_);
+  // Special case for dummy packets that have padding marked in the RTP header.
+  // This causes the RTP header parser to report failure, but is fine in this
+  // context.
+  const bool header_only_with_padding =
+      (header_.headerLength == packet_length_bytes_ &&
+       header_.paddingLength > 0);
+  if (!valid_header && !header_only_with_padding) {
+    return false;
+  }
+  assert(header_.headerLength <= packet_length_bytes_);
+  payload_ = &payload_memory_[header_.headerLength];
+  assert(packet_length_bytes_ >= header_.headerLength);
+  payload_length_bytes_ = packet_length_bytes_ - header_.headerLength;
+  RTC_CHECK_GE(virtual_packet_length_bytes_, packet_length_bytes_);
+  assert(virtual_packet_length_bytes_ >= header_.headerLength);
+  virtual_payload_length_bytes_ =
+      virtual_packet_length_bytes_ - header_.headerLength;
+  return true;
+}
+
+void Packet::CopyToHeader(RTPHeader* destination) const {
+  destination->markerBit = header_.markerBit;
+  destination->payloadType = header_.payloadType;
+  destination->sequenceNumber = header_.sequenceNumber;
+  destination->timestamp = header_.timestamp;
+  destination->ssrc = header_.ssrc;
+  destination->numCSRCs = header_.numCSRCs;
+  destination->paddingLength = header_.paddingLength;
+  destination->headerLength = header_.headerLength;
+  destination->payload_type_frequency = header_.payload_type_frequency;
+  memcpy(&destination->arrOfCSRCs,
+         &header_.arrOfCSRCs,
+         sizeof(header_.arrOfCSRCs));
+  memcpy(
+      &destination->extension, &header_.extension, sizeof(header_.extension));
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/packet.h b/modules/audio_coding/neteq/tools/packet.h
new file mode 100644
index 0000000..94d45c5
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/packet.h
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
+
+#include <list>
+#include <memory>
+
+#include "api/rtp_headers.h"  // NOLINT(build/include)
+#include "common_types.h"  // NOLINT(build/include)
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class RtpHeaderParser;
+
+namespace test {
+
+// Class for handling RTP packets in test applications.
+class Packet {
+ public:
+  // Creates a packet, with the packet payload (including header bytes) in
+  // |packet_memory|. The length of |packet_memory| is |allocated_bytes|.
+  // The new object assumes ownership of |packet_memory| and will delete it
+  // when the Packet object is deleted. The |time_ms| is an extra time
+  // associated with this packet, typically used to denote arrival time.
+  // The first bytes in |packet_memory| will be parsed using |parser|.
+  Packet(uint8_t* packet_memory,
+         size_t allocated_bytes,
+         double time_ms,
+         const RtpHeaderParser& parser);
+
+  // Same as above, but with the extra argument |virtual_packet_length_bytes|.
+  // This is typically used when reading RTP dump files that only contain the
+  // RTP headers, and no payload (a.k.a RTP dummy files or RTP light). The
+  // |virtual_packet_length_bytes| tells what size the packet had on wire,
+  // including the now discarded payload, whereas |allocated_bytes| is the
+  // length of the remaining payload (typically only the RTP header).
+  Packet(uint8_t* packet_memory,
+         size_t allocated_bytes,
+         size_t virtual_packet_length_bytes,
+         double time_ms,
+         const RtpHeaderParser& parser);
+
+  // The following two constructors are the same as above, but without a
+  // parser. Note that when the object is constructed using any of these
+  // methods, the header will be parsed using a default RtpHeaderParser object.
+  // In particular, RTP header extensions won't be parsed.
+  Packet(uint8_t* packet_memory, size_t allocated_bytes, double time_ms);
+
+  Packet(uint8_t* packet_memory,
+         size_t allocated_bytes,
+         size_t virtual_packet_length_bytes,
+         double time_ms);
+
+  virtual ~Packet();
+
+  // Parses the first bytes of the RTP payload, interpreting them as RED headers
+  // according to RFC 2198. The headers will be inserted into |headers|. The
+  // caller of the method assumes ownership of the objects in the list, and
+  // must delete them properly.
+  bool ExtractRedHeaders(std::list<RTPHeader*>* headers) const;
+
+  // Deletes all RTPHeader objects in |headers|, but does not delete |headers|
+  // itself.
+  static void DeleteRedHeaders(std::list<RTPHeader*>* headers);
+
+  const uint8_t* payload() const { return payload_; }
+
+  size_t packet_length_bytes() const { return packet_length_bytes_; }
+
+  size_t payload_length_bytes() const { return payload_length_bytes_; }
+
+  size_t virtual_packet_length_bytes() const {
+    return virtual_packet_length_bytes_;
+  }
+
+  size_t virtual_payload_length_bytes() const {
+    return virtual_payload_length_bytes_;
+  }
+
+  const RTPHeader& header() const { return header_; }
+
+  void set_time_ms(double time) { time_ms_ = time; }
+  double time_ms() const { return time_ms_; }
+  bool valid_header() const { return valid_header_; }
+
+ private:
+  bool ParseHeader(const RtpHeaderParser& parser);
+  void CopyToHeader(RTPHeader* destination) const;
+
+  RTPHeader header_;
+  std::unique_ptr<uint8_t[]> payload_memory_;
+  const uint8_t* payload_;            // First byte after header.
+  const size_t packet_length_bytes_;  // Total length of packet.
+  size_t payload_length_bytes_;  // Length of the payload, after RTP header.
+                                 // Zero for dummy RTP packets.
+  // Virtual lengths are used when parsing RTP header files (dummy RTP files).
+  const size_t virtual_packet_length_bytes_;
+  size_t virtual_payload_length_bytes_;
+  double time_ms_;     // Used to denote a packet's arrival time.
+  bool valid_header_;  // Set by the RtpHeaderParser.
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Packet);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
diff --git a/modules/audio_coding/neteq/tools/packet_source.cc b/modules/audio_coding/neteq/tools/packet_source.cc
new file mode 100644
index 0000000..30bf431
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/packet_source.cc
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+
+namespace webrtc {
+namespace test {
+
+PacketSource::PacketSource() : use_ssrc_filter_(false), ssrc_(0) {}
+
+PacketSource::~PacketSource() = default;
+
+void PacketSource::FilterOutPayloadType(uint8_t payload_type) {
+  filter_.set(payload_type, true);
+}
+
+void PacketSource::SelectSsrc(uint32_t ssrc) {
+  use_ssrc_filter_ = true;
+  ssrc_ = ssrc;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/packet_source.h b/modules/audio_coding/neteq/tools/packet_source.h
new file mode 100644
index 0000000..3f98ba1
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/packet_source.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
+
+#include <bitset>
+#include <memory>
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Interface class for an object delivering RTP packets to test applications.
+class PacketSource {
+ public:
+  PacketSource();
+  virtual ~PacketSource();
+
+  // Returns next packet. Returns nullptr if the source is depleted, or if an
+  // error occurred.
+  virtual std::unique_ptr<Packet> NextPacket() = 0;
+
+  virtual void FilterOutPayloadType(uint8_t payload_type);
+
+  virtual void SelectSsrc(uint32_t ssrc);
+
+ protected:
+  std::bitset<128> filter_;  // Payload type is 7 bits in the RFC.
+  // If SSRC filtering discards all packet that do not match the SSRC.
+  bool use_ssrc_filter_;  // True when SSRC filtering is active.
+  uint32_t ssrc_;  // The selected SSRC. All other SSRCs will be discarded.
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(PacketSource);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
diff --git a/modules/audio_coding/neteq/tools/packet_unittest.cc b/modules/audio_coding/neteq/tools/packet_unittest.cc
new file mode 100644
index 0000000..ce6a3b9
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/packet_unittest.cc
@@ -0,0 +1,202 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Unit tests for test Packet class.
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+const int kHeaderLengthBytes = 12;
+
+void MakeRtpHeader(int payload_type,
+                   int seq_number,
+                   uint32_t timestamp,
+                   uint32_t ssrc,
+                   uint8_t* rtp_data) {
+  rtp_data[0] = 0x80;
+  rtp_data[1] = static_cast<uint8_t>(payload_type);
+  rtp_data[2] = (seq_number >> 8) & 0xFF;
+  rtp_data[3] = (seq_number) & 0xFF;
+  rtp_data[4] = timestamp >> 24;
+  rtp_data[5] = (timestamp >> 16) & 0xFF;
+  rtp_data[6] = (timestamp >> 8) & 0xFF;
+  rtp_data[7] = timestamp & 0xFF;
+  rtp_data[8] = ssrc >> 24;
+  rtp_data[9] = (ssrc >> 16) & 0xFF;
+  rtp_data[10] = (ssrc >> 8) & 0xFF;
+  rtp_data[11] = ssrc & 0xFF;
+}
+}  // namespace
+
+TEST(TestPacket, RegularPacket) {
+  const size_t kPacketLengthBytes = 100;
+  uint8_t* packet_memory = new uint8_t[kPacketLengthBytes];
+  const uint8_t kPayloadType = 17;
+  const uint16_t kSequenceNumber = 4711;
+  const uint32_t kTimestamp = 47114711;
+  const uint32_t kSsrc = 0x12345678;
+  MakeRtpHeader(
+      kPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
+  const double kPacketTime = 1.0;
+  // Hand over ownership of |packet_memory| to |packet|.
+  Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
+  ASSERT_TRUE(packet.valid_header());
+  EXPECT_EQ(kPayloadType, packet.header().payloadType);
+  EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+  EXPECT_EQ(kTimestamp, packet.header().timestamp);
+  EXPECT_EQ(kSsrc, packet.header().ssrc);
+  EXPECT_EQ(0, packet.header().numCSRCs);
+  EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+            packet.payload_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+            packet.virtual_payload_length_bytes());
+  EXPECT_EQ(kPacketTime, packet.time_ms());
+}
+
+TEST(TestPacket, DummyPacket) {
+  const size_t kPacketLengthBytes = kHeaderLengthBytes;  // Only RTP header.
+  const size_t kVirtualPacketLengthBytes = 100;
+  uint8_t* packet_memory = new uint8_t[kPacketLengthBytes];
+  const uint8_t kPayloadType = 17;
+  const uint16_t kSequenceNumber = 4711;
+  const uint32_t kTimestamp = 47114711;
+  const uint32_t kSsrc = 0x12345678;
+  MakeRtpHeader(
+      kPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
+  const double kPacketTime = 1.0;
+  // Hand over ownership of |packet_memory| to |packet|.
+  Packet packet(packet_memory,
+                kPacketLengthBytes,
+                kVirtualPacketLengthBytes,
+                kPacketTime);
+  ASSERT_TRUE(packet.valid_header());
+  EXPECT_EQ(kPayloadType, packet.header().payloadType);
+  EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+  EXPECT_EQ(kTimestamp, packet.header().timestamp);
+  EXPECT_EQ(kSsrc, packet.header().ssrc);
+  EXPECT_EQ(0, packet.header().numCSRCs);
+  EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+            packet.payload_length_bytes());
+  EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes());
+  EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes,
+            packet.virtual_payload_length_bytes());
+  EXPECT_EQ(kPacketTime, packet.time_ms());
+}
+
+namespace {
+// Writes one RED block header starting at |rtp_data|, according to RFC 2198.
+// returns the number of bytes written (1 or 4).
+//
+// Format if |last_payoad| is false:
+// 0                   1                    2                   3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3  4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |1|   block PT  |  timestamp offset         |   block length    |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Format if |last_payoad| is true:
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |0|   Block PT  |
+// +-+-+-+-+-+-+-+-+
+
+int MakeRedHeader(int payload_type,
+                  uint32_t timestamp_offset,
+                  int block_length,
+                  bool last_payload,
+                  uint8_t* rtp_data) {
+  rtp_data[0] = 0x80 | (payload_type & 0x7F);  // Set the first bit to 1.
+  if (last_payload) {
+    rtp_data[0] &= 0x7F;  // Reset the first but to 0 to indicate last block.
+    return 1;
+  }
+  rtp_data[1] = timestamp_offset >> 6;
+  rtp_data[2] = (timestamp_offset & 0x3F) << 2;
+  rtp_data[2] |= block_length >> 8;
+  rtp_data[3] = block_length & 0xFF;
+  return 4;
+}
+}  // namespace
+
+TEST(TestPacket, RED) {
+  const size_t kPacketLengthBytes = 100;
+  uint8_t* packet_memory = new uint8_t[kPacketLengthBytes];
+  const uint8_t kRedPayloadType = 17;
+  const uint16_t kSequenceNumber = 4711;
+  const uint32_t kTimestamp = 47114711;
+  const uint32_t kSsrc = 0x12345678;
+  MakeRtpHeader(
+      kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
+  // Create four RED headers.
+  // Payload types are just the same as the block index the offset is 100 times
+  // the block index.
+  const int kRedBlocks = 4;
+  uint8_t* payload_ptr =
+      &packet_memory[kHeaderLengthBytes];  // First byte after header.
+  for (int i = 0; i < kRedBlocks; ++i) {
+    int payload_type = i;
+    // Offset value is not used for the last block.
+    uint32_t timestamp_offset = 100 * i;
+    int block_length = 10 * i;
+    bool last_block = (i == kRedBlocks - 1) ? true : false;
+    payload_ptr += MakeRedHeader(
+        payload_type, timestamp_offset, block_length, last_block, payload_ptr);
+  }
+  const double kPacketTime = 1.0;
+  // Hand over ownership of |packet_memory| to |packet|.
+  Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
+  ASSERT_TRUE(packet.valid_header());
+  EXPECT_EQ(kRedPayloadType, packet.header().payloadType);
+  EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
+  EXPECT_EQ(kTimestamp, packet.header().timestamp);
+  EXPECT_EQ(kSsrc, packet.header().ssrc);
+  EXPECT_EQ(0, packet.header().numCSRCs);
+  EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+            packet.payload_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
+  EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
+            packet.virtual_payload_length_bytes());
+  EXPECT_EQ(kPacketTime, packet.time_ms());
+  std::list<RTPHeader*> red_headers;
+  EXPECT_TRUE(packet.ExtractRedHeaders(&red_headers));
+  EXPECT_EQ(kRedBlocks, static_cast<int>(red_headers.size()));
+  int block_index = 0;
+  for (std::list<RTPHeader*>::reverse_iterator it = red_headers.rbegin();
+       it != red_headers.rend();
+       ++it) {
+    // Reading list from the back, since the extraction puts the main payload
+    // (which is the last one on wire) first.
+    RTPHeader* red_block = *it;
+    EXPECT_EQ(block_index, red_block->payloadType);
+    EXPECT_EQ(kSequenceNumber, red_block->sequenceNumber);
+    if (block_index == kRedBlocks - 1) {
+      // Last block has zero offset per definition.
+      EXPECT_EQ(kTimestamp, red_block->timestamp);
+    } else {
+      EXPECT_EQ(kTimestamp - 100 * block_index, red_block->timestamp);
+    }
+    EXPECT_EQ(kSsrc, red_block->ssrc);
+    EXPECT_EQ(0, red_block->numCSRCs);
+    ++block_index;
+  }
+  Packet::DeleteRedHeaders(&red_headers);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/resample_input_audio_file.cc b/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
new file mode 100644
index 0000000..5050e1f
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+bool ResampleInputAudioFile::Read(size_t samples,
+                                  int output_rate_hz,
+                                  int16_t* destination) {
+  const size_t samples_to_read = samples * file_rate_hz_ / output_rate_hz;
+  RTC_CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
+      << "Frame size and sample rates don't add up to an integer.";
+  std::unique_ptr<int16_t[]> temp_destination(new int16_t[samples_to_read]);
+  if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
+    return false;
+  resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
+  size_t output_length = 0;
+  RTC_CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read,
+                               destination, samples, output_length),
+               0);
+  RTC_CHECK_EQ(samples, output_length);
+  return true;
+}
+
+bool ResampleInputAudioFile::Read(size_t samples, int16_t* destination) {
+  RTC_CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
+  return Read(samples, output_rate_hz_, destination);
+}
+
+void ResampleInputAudioFile::set_output_rate_hz(int rate_hz) {
+  output_rate_hz_ = rate_hz;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/resample_input_audio_file.h b/modules/audio_coding/neteq/tools/resample_input_audio_file.h
new file mode 100644
index 0000000..13c419d
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/resample_input_audio_file.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
+
+#include <string>
+
+#include "common_audio/resampler/include/resampler.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Class for handling a looping input audio file with resampling.
+class ResampleInputAudioFile : public InputAudioFile {
+ public:
+  ResampleInputAudioFile(const std::string file_name, int file_rate_hz)
+      : InputAudioFile(file_name),
+        file_rate_hz_(file_rate_hz),
+        output_rate_hz_(-1) {}
+  ResampleInputAudioFile(const std::string file_name,
+                         int file_rate_hz,
+                         int output_rate_hz)
+      : InputAudioFile(file_name),
+        file_rate_hz_(file_rate_hz),
+        output_rate_hz_(output_rate_hz) {}
+
+  bool Read(size_t samples, int output_rate_hz, int16_t* destination);
+  bool Read(size_t samples, int16_t* destination) override;
+  void set_output_rate_hz(int rate_hz);
+
+ private:
+  const int file_rate_hz_;
+  int output_rate_hz_;
+  Resampler resampler_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(ResampleInputAudioFile);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
diff --git a/modules/audio_coding/neteq/tools/rtc_event_log_source.cc b/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
new file mode 100644
index 0000000..d6224ff
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
@@ -0,0 +1,108 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/rtc_event_log_source.h"
+
+#include <assert.h>
+#include <string.h>
+#include <iostream>
+#include <limits>
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace test {
+
+RtcEventLogSource* RtcEventLogSource::Create(const std::string& file_name) {
+  RtcEventLogSource* source = new RtcEventLogSource();
+  RTC_CHECK(source->OpenFile(file_name));
+  return source;
+}
+
+RtcEventLogSource::~RtcEventLogSource() {}
+
+bool RtcEventLogSource::RegisterRtpHeaderExtension(RTPExtensionType type,
+                                                   uint8_t id) {
+  RTC_CHECK(parser_.get());
+  return parser_->RegisterRtpHeaderExtension(type, id);
+}
+
+std::unique_ptr<Packet> RtcEventLogSource::NextPacket() {
+  for (; rtp_packet_index_ < parsed_stream_.GetNumberOfEvents();
+       rtp_packet_index_++) {
+    if (parsed_stream_.GetEventType(rtp_packet_index_) ==
+        ParsedRtcEventLog::RTP_EVENT) {
+      PacketDirection direction;
+      size_t header_length;
+      size_t packet_length;
+      uint64_t timestamp_us = parsed_stream_.GetTimestamp(rtp_packet_index_);
+      parsed_stream_.GetRtpHeader(rtp_packet_index_, &direction, nullptr,
+                                  &header_length, &packet_length, nullptr);
+
+      if (direction != kIncomingPacket) {
+        continue;
+      }
+
+      uint8_t* packet_header = new uint8_t[header_length];
+      parsed_stream_.GetRtpHeader(rtp_packet_index_, nullptr, packet_header,
+                                  nullptr, nullptr, nullptr);
+      std::unique_ptr<Packet> packet(
+          new Packet(packet_header, header_length, packet_length,
+                     static_cast<double>(timestamp_us) / 1000, *parser_.get()));
+
+      if (!packet->valid_header()) {
+        std::cout << "Warning: Packet with index " << rtp_packet_index_
+                  << " has an invalid header and will be ignored." << std::endl;
+        continue;
+      }
+
+      if (parsed_stream_.GetMediaType(packet->header().ssrc, direction) !=
+          webrtc::ParsedRtcEventLog::MediaType::AUDIO) {
+        continue;
+      }
+
+      // Check if the packet should not be filtered out.
+      if (!filter_.test(packet->header().payloadType) &&
+          !(use_ssrc_filter_ && packet->header().ssrc != ssrc_)) {
+        ++rtp_packet_index_;
+        return packet;
+      }
+    }
+  }
+  return nullptr;
+}
+
+int64_t RtcEventLogSource::NextAudioOutputEventMs() {
+  while (audio_output_index_ < parsed_stream_.GetNumberOfEvents()) {
+    if (parsed_stream_.GetEventType(audio_output_index_) ==
+        ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT) {
+      uint64_t timestamp_us = parsed_stream_.GetTimestamp(audio_output_index_);
+      // We call GetAudioPlayout only to check that the protobuf event is
+      // well-formed.
+      parsed_stream_.GetAudioPlayout(audio_output_index_, nullptr);
+      audio_output_index_++;
+      return timestamp_us / 1000;
+    }
+    audio_output_index_++;
+  }
+  return std::numeric_limits<int64_t>::max();
+}
+
+RtcEventLogSource::RtcEventLogSource()
+    : PacketSource(), parser_(RtpHeaderParser::Create()) {}
+
+bool RtcEventLogSource::OpenFile(const std::string& file_name) {
+  return parsed_stream_.ParseFile(file_name);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/modules/audio_coding/neteq/tools/rtc_event_log_source.h
new file mode 100644
index 0000000..df01e06
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtc_event_log_source.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
+
+#include <memory>
+#include <string>
+
+#include "logging/rtc_event_log/rtc_event_log_parser.h"
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class RtpHeaderParser;
+
+namespace test {
+
+class Packet;
+
+class RtcEventLogSource : public PacketSource {
+ public:
+  // Creates an RtcEventLogSource reading from |file_name|. If the file cannot
+  // be opened, or has the wrong format, NULL will be returned.
+  static RtcEventLogSource* Create(const std::string& file_name);
+
+  virtual ~RtcEventLogSource();
+
+  // Registers an RTP header extension and binds it to |id|.
+  virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
+
+  std::unique_ptr<Packet> NextPacket() override;
+
+  // Returns the timestamp of the next audio output event, in milliseconds. The
+  // maximum value of int64_t is returned if there are no more audio output
+  // events available.
+  int64_t NextAudioOutputEventMs();
+
+ private:
+  RtcEventLogSource();
+
+  bool OpenFile(const std::string& file_name);
+
+  size_t rtp_packet_index_ = 0;
+  size_t audio_output_index_ = 0;
+
+  ParsedRtcEventLog parsed_stream_;
+  std::unique_ptr<RtpHeaderParser> parser_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogSource);
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
diff --git a/modules/audio_coding/neteq/tools/rtp_analyze.cc b/modules/audio_coding/neteq/tools/rtp_analyze.cc
new file mode 100644
index 0000000..12721cc
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_analyze.cc
@@ -0,0 +1,174 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "rtc_base/flags.h"
+
+// Define command line flags.
+DEFINE_int(red, 117, "RTP payload type for RED");
+DEFINE_int(audio_level, -1, "Extension ID for audio level (RFC 6464); "
+                            "-1 not to print audio level");
+DEFINE_int(abs_send_time, -1, "Extension ID for absolute sender time; "
+                             "-1 not to print absolute send time");
+DEFINE_bool(help, false, "Print this message");
+
+int main(int argc, char* argv[]) {
+  std::string program_name = argv[0];
+  std::string usage =
+      "Tool for parsing an RTP dump file to text output.\n"
+      "Run " +
+      program_name +
+      " --help for usage.\n"
+      "Example usage:\n" +
+      program_name + " input.rtp output.txt\n\n" +
+      "Output is sent to stdout if no output file is given. " +
+      "Note that this tool can read files with or without payloads.\n";
+  if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) ||
+      FLAG_help || (argc != 2 && argc != 3)) {
+    printf("%s", usage.c_str());
+    if (FLAG_help) {
+      rtc::FlagList::Print(nullptr, false);
+      return 0;
+    }
+    return 1;
+  }
+
+  RTC_CHECK(FLAG_red >= 0 && FLAG_red <= 127);  // Payload type
+  RTC_CHECK(FLAG_audio_level == -1 ||  // Default
+      (FLAG_audio_level > 0 && FLAG_audio_level <= 255));  // Extension ID
+  RTC_CHECK(FLAG_abs_send_time == -1 ||  // Default
+      (FLAG_abs_send_time > 0 && FLAG_abs_send_time <= 255));  // Extension ID
+
+  printf("Input file: %s\n", argv[1]);
+  std::unique_ptr<webrtc::test::RtpFileSource> file_source(
+      webrtc::test::RtpFileSource::Create(argv[1]));
+  assert(file_source.get());
+  // Set RTP extension IDs.
+  bool print_audio_level = false;
+  if (FLAG_audio_level != -1) {
+    print_audio_level = true;
+    file_source->RegisterRtpHeaderExtension(webrtc::kRtpExtensionAudioLevel,
+                                            FLAG_audio_level);
+  }
+  bool print_abs_send_time = false;
+  if (FLAG_abs_send_time != -1) {
+    print_abs_send_time = true;
+    file_source->RegisterRtpHeaderExtension(
+        webrtc::kRtpExtensionAbsoluteSendTime, FLAG_abs_send_time);
+  }
+
+  FILE* out_file;
+  if (argc == 3) {
+    out_file = fopen(argv[2], "wt");
+    if (!out_file) {
+      printf("Cannot open output file %s\n", argv[2]);
+      return -1;
+    }
+    printf("Output file: %s\n\n", argv[2]);
+  } else {
+    out_file = stdout;
+  }
+
+  // Print file header.
+  fprintf(out_file, "SeqNo  TimeStamp   SendTime  Size    PT  M       SSRC");
+  if (print_audio_level) {
+    fprintf(out_file, " AuLvl (V)");
+  }
+  if (print_abs_send_time) {
+    fprintf(out_file, " AbsSendTime");
+  }
+  fprintf(out_file, "\n");
+
+  uint32_t max_abs_send_time = 0;
+  int cycles = -1;
+  std::unique_ptr<webrtc::test::Packet> packet;
+  while (true) {
+    packet = file_source->NextPacket();
+    if (!packet.get()) {
+      // End of file reached.
+      break;
+    }
+    // Write packet data to file. Use virtual_packet_length_bytes so that the
+    // correct packet sizes are printed also for RTP header-only dumps.
+    fprintf(out_file,
+            "%5u %10u %10u %5i %5i %2i %#08X",
+            packet->header().sequenceNumber,
+            packet->header().timestamp,
+            static_cast<unsigned int>(packet->time_ms()),
+            static_cast<int>(packet->virtual_packet_length_bytes()),
+            packet->header().payloadType,
+            packet->header().markerBit,
+            packet->header().ssrc);
+    if (print_audio_level && packet->header().extension.hasAudioLevel) {
+      fprintf(out_file,
+              " %5u (%1i)",
+              packet->header().extension.audioLevel,
+              packet->header().extension.voiceActivity);
+    }
+    if (print_abs_send_time && packet->header().extension.hasAbsoluteSendTime) {
+      if (cycles == -1) {
+        // Initialize.
+        max_abs_send_time = packet->header().extension.absoluteSendTime;
+        cycles = 0;
+      }
+      // Abs sender time is 24 bit 6.18 fixed point. Shift by 8 to normalize to
+      // 32 bits (unsigned). Calculate the difference between this packet's
+      // send time and the maximum observed. Cast to signed 32-bit to get the
+      // desired wrap-around behavior.
+      if (static_cast<int32_t>(
+              (packet->header().extension.absoluteSendTime << 8) -
+              (max_abs_send_time << 8)) >= 0) {
+        // The difference is non-negative, meaning that this packet is newer
+        // than the previously observed maximum absolute send time.
+        if (packet->header().extension.absoluteSendTime < max_abs_send_time) {
+          // Wrap detected.
+          cycles++;
+        }
+        max_abs_send_time = packet->header().extension.absoluteSendTime;
+      }
+      // Abs sender time is 24 bit 6.18 fixed point. Divide by 2^18 to convert
+      // to floating point representation.
+      double send_time_seconds =
+          static_cast<double>(packet->header().extension.absoluteSendTime) /
+              262144 +
+          64.0 * cycles;
+      fprintf(out_file, " %11f", send_time_seconds);
+    }
+    fprintf(out_file, "\n");
+
+    if (packet->header().payloadType == FLAG_red) {
+      std::list<webrtc::RTPHeader*> red_headers;
+      packet->ExtractRedHeaders(&red_headers);
+      while (!red_headers.empty()) {
+        webrtc::RTPHeader* red = red_headers.front();
+        assert(red);
+        fprintf(out_file,
+                "* %5u %10u %10u %5i\n",
+                red->sequenceNumber,
+                red->timestamp,
+                static_cast<unsigned int>(packet->time_ms()),
+                red->payloadType);
+        red_headers.pop_front();
+        delete red;
+      }
+    }
+  }
+
+  fclose(out_file);
+
+  return 0;
+}
diff --git a/modules/audio_coding/neteq/tools/rtp_encode.cc b/modules/audio_coding/neteq/tools/rtp_encode.cc
new file mode 100644
index 0000000..ce07199
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_encode.cc
@@ -0,0 +1,356 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+#ifdef WEBRTC_LINUX
+#include <netinet/in.h>
+#endif
+
+#include <iostream>
+#include <map>
+#include <string>
+
+#include "api/audio_codecs/L16/audio_encoder_L16.h"
+#include "api/audio_codecs/g711/audio_encoder_g711.h"
+#include "api/audio_codecs/g722/audio_encoder_g722.h"
+#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
+#include "api/audio_codecs/isac/audio_encoder_isac.h"
+#include "api/audio_codecs/opus/audio_encoder_opus.h"
+#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "rtc_base/flags.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/ptr_util.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+namespace {
+
+// Define command line flags.
+DEFINE_bool(list_codecs, false, "Enumerate all codecs");
+DEFINE_string(codec, "opus", "Codec to use");
+DEFINE_int(frame_len, 0, "Frame length in ms; 0 indicates codec default value");
+DEFINE_int(bitrate, 0, "Bitrate in kbps; 0 indicates codec default value");
+DEFINE_int(payload_type,
+           -1,
+           "RTP payload type; -1 indicates codec default value");
+DEFINE_int(cng_payload_type,
+           -1,
+           "RTP payload type for CNG; -1 indicates default value");
+DEFINE_int(ssrc, 0, "SSRC to write to the RTP header");
+DEFINE_bool(dtx, false, "Use DTX/CNG");
+DEFINE_int(sample_rate, 48000, "Sample rate of the input file");
+DEFINE_bool(help, false, "Print this message");
+
+// Add new codecs here, and to the map below.
+enum class CodecType {
+  kOpus,
+  kPcmU,
+  kPcmA,
+  kG722,
+  kPcm16b8,
+  kPcm16b16,
+  kPcm16b32,
+  kPcm16b48,
+  kIlbc,
+  kIsac
+};
+
+struct CodecTypeAndInfo {
+  CodecType type;
+  int default_payload_type;
+  bool internal_dtx;
+};
+
+// List all supported codecs here. This map defines the command-line parameter
+// value (the key string) for selecting each codec, together with information
+// whether it is using internal or external DTX/CNG.
+const std::map<std::string, CodecTypeAndInfo>& CodecList() {
+  static const auto* const codec_list =
+      new std::map<std::string, CodecTypeAndInfo>{
+          {"opus", {CodecType::kOpus, 111, true}},
+          {"pcmu", {CodecType::kPcmU, 0, false}},
+          {"pcma", {CodecType::kPcmA, 8, false}},
+          {"g722", {CodecType::kG722, 9, false}},
+          {"pcm16b_8", {CodecType::kPcm16b8, 93, false}},
+          {"pcm16b_16", {CodecType::kPcm16b16, 94, false}},
+          {"pcm16b_32", {CodecType::kPcm16b32, 95, false}},
+          {"pcm16b_48", {CodecType::kPcm16b48, 96, false}},
+          {"ilbc", {CodecType::kIlbc, 102, false}},
+          {"isac", {CodecType::kIsac, 103, false}}};
+  return *codec_list;
+}
+
+// This class will receive callbacks from ACM when a packet is ready, and write
+// it to the output file.
+class Packetizer : public AudioPacketizationCallback {
+ public:
+  Packetizer(FILE* out_file, uint32_t ssrc, int timestamp_rate_hz)
+      : out_file_(out_file),
+        ssrc_(ssrc),
+        timestamp_rate_hz_(timestamp_rate_hz) {}
+
+  int32_t SendData(FrameType frame_type,
+                   uint8_t payload_type,
+                   uint32_t timestamp,
+                   const uint8_t* payload_data,
+                   size_t payload_len_bytes,
+                   const RTPFragmentationHeader* fragmentation) override {
+    RTC_CHECK(!fragmentation);
+    if (payload_len_bytes == 0) {
+      return 0;
+    }
+
+    constexpr size_t kRtpHeaderLength = 12;
+    constexpr size_t kRtpDumpHeaderLength = 8;
+    const uint16_t length = htons(rtc::checked_cast<uint16_t>(
+        kRtpHeaderLength + kRtpDumpHeaderLength + payload_len_bytes));
+    const uint16_t plen = htons(
+        rtc::checked_cast<uint16_t>(kRtpHeaderLength + payload_len_bytes));
+    const uint32_t offset = htonl(timestamp / (timestamp_rate_hz_ / 1000));
+    RTC_CHECK_EQ(fwrite(&length, sizeof(uint16_t), 1, out_file_), 1);
+    RTC_CHECK_EQ(fwrite(&plen, sizeof(uint16_t), 1, out_file_), 1);
+    RTC_CHECK_EQ(fwrite(&offset, sizeof(uint32_t), 1, out_file_), 1);
+
+    const uint8_t rtp_header[] = {0x80,
+                                  static_cast<uint8_t>(payload_type & 0x7F),
+                                  static_cast<uint8_t>(sequence_number_ >> 8),
+                                  static_cast<uint8_t>(sequence_number_),
+                                  static_cast<uint8_t>(timestamp >> 24),
+                                  static_cast<uint8_t>(timestamp >> 16),
+                                  static_cast<uint8_t>(timestamp >> 8),
+                                  static_cast<uint8_t>(timestamp),
+                                  static_cast<uint8_t>(ssrc_ >> 24),
+                                  static_cast<uint8_t>(ssrc_ >> 16),
+                                  static_cast<uint8_t>(ssrc_ >> 8),
+                                  static_cast<uint8_t>(ssrc_)};
+    static_assert(sizeof(rtp_header) == kRtpHeaderLength, "");
+    RTC_CHECK_EQ(
+        fwrite(rtp_header, sizeof(uint8_t), kRtpHeaderLength, out_file_),
+        kRtpHeaderLength);
+    ++sequence_number_;  // Intended to wrap on overflow.
+
+    RTC_CHECK_EQ(
+        fwrite(payload_data, sizeof(uint8_t), payload_len_bytes, out_file_),
+        payload_len_bytes);
+
+    return 0;
+  }
+
+ private:
+  FILE* const out_file_;
+  const uint32_t ssrc_;
+  const int timestamp_rate_hz_;
+  uint16_t sequence_number_ = 0;
+};
+
+void SetFrameLenIfFlagIsPositive(int* config_frame_len) {
+  if (FLAG_frame_len > 0) {
+    *config_frame_len = FLAG_frame_len;
+  }
+}
+
+template <typename T>
+typename T::Config GetCodecConfig() {
+  typename T::Config config;
+  SetFrameLenIfFlagIsPositive(&config.frame_size_ms);
+  RTC_CHECK(config.IsOk());
+  return config;
+}
+
+AudioEncoderL16::Config Pcm16bConfig(CodecType codec_type) {
+  auto config = GetCodecConfig<AudioEncoderL16>();
+  switch (codec_type) {
+    case CodecType::kPcm16b8:
+      config.sample_rate_hz = 8000;
+      return config;
+    case CodecType::kPcm16b16:
+      config.sample_rate_hz = 16000;
+      return config;
+    case CodecType::kPcm16b32:
+      config.sample_rate_hz = 32000;
+      return config;
+    case CodecType::kPcm16b48:
+      config.sample_rate_hz = 48000;
+      return config;
+    default:
+      RTC_NOTREACHED();
+      return config;
+  }
+}
+
+std::unique_ptr<AudioEncoder> CreateEncoder(CodecType codec_type,
+                                            int payload_type) {
+  switch (codec_type) {
+    case CodecType::kOpus: {
+      AudioEncoderOpus::Config config = GetCodecConfig<AudioEncoderOpus>();
+      if (FLAG_bitrate > 0) {
+        config.bitrate_bps = FLAG_bitrate;
+      }
+      config.dtx_enabled = FLAG_dtx;
+      RTC_CHECK(config.IsOk());
+      return AudioEncoderOpus::MakeAudioEncoder(config, payload_type);
+    }
+
+    case CodecType::kPcmU:
+    case CodecType::kPcmA: {
+      AudioEncoderG711::Config config = GetCodecConfig<AudioEncoderG711>();
+      config.type = codec_type == CodecType::kPcmU
+                        ? AudioEncoderG711::Config::Type::kPcmU
+                        : AudioEncoderG711::Config::Type::kPcmA;
+      RTC_CHECK(config.IsOk());
+      return AudioEncoderG711::MakeAudioEncoder(config, payload_type);
+    }
+
+    case CodecType::kG722: {
+      return AudioEncoderG722::MakeAudioEncoder(
+          GetCodecConfig<AudioEncoderG722>(), payload_type);
+    }
+
+    case CodecType::kPcm16b8:
+    case CodecType::kPcm16b16:
+    case CodecType::kPcm16b32:
+    case CodecType::kPcm16b48: {
+      return AudioEncoderL16::MakeAudioEncoder(Pcm16bConfig(codec_type),
+                                               payload_type);
+    }
+
+    case CodecType::kIlbc: {
+      return AudioEncoderIlbc::MakeAudioEncoder(
+          GetCodecConfig<AudioEncoderIlbc>(), payload_type);
+    }
+
+    case CodecType::kIsac: {
+      return AudioEncoderIsac::MakeAudioEncoder(
+          GetCodecConfig<AudioEncoderIsac>(), payload_type);
+    }
+  }
+  RTC_NOTREACHED();
+  return nullptr;
+}
+
+AudioEncoderCng::Config GetCngConfig(int sample_rate_hz) {
+  AudioEncoderCng::Config cng_config;
+  const auto default_payload_type = [&] {
+    switch (sample_rate_hz) {
+      case 8000: return 13;
+      case 16000: return 98;
+      case 32000: return 99;
+      case 48000: return 100;
+      default: RTC_NOTREACHED();
+    }
+    return 0;
+  };
+  cng_config.payload_type = FLAG_cng_payload_type != -1
+                                ? FLAG_cng_payload_type
+                                : default_payload_type();
+  return cng_config;
+}
+
+int RunRtpEncode(int argc, char* argv[]) {
+  const std::string program_name = argv[0];
+  const std::string usage =
+      "Tool for generating an RTP dump file from audio input.\n"
+      "Run " +
+      program_name +
+      " --help for usage.\n"
+      "Example usage:\n" +
+      program_name + " input.pcm output.rtp --codec=[codec] " +
+      "--frame_len=[frame_len] --bitrate=[bitrate]\n\n";
+  if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) || FLAG_help ||
+      (!FLAG_list_codecs && argc != 3)) {
+    printf("%s", usage.c_str());
+    if (FLAG_help) {
+      rtc::FlagList::Print(nullptr, false);
+      return 0;
+    }
+    return 1;
+  }
+
+  if (FLAG_list_codecs) {
+    printf("The following arguments are valid --codec parameters:\n");
+    for (const auto& c : CodecList()) {
+      printf("  %s\n", c.first.c_str());
+    }
+    return 0;
+  }
+
+  const auto codec_it = CodecList().find(FLAG_codec);
+  if (codec_it == CodecList().end()) {
+    printf("%s is not a valid codec name.\n", FLAG_codec);
+    printf("Use argument --list_codecs to see all valid codec names.\n");
+    return 1;
+  }
+
+  // Create the codec.
+  const int payload_type = FLAG_payload_type == -1
+                               ? codec_it->second.default_payload_type
+                               : FLAG_payload_type;
+  std::unique_ptr<AudioEncoder> codec =
+      CreateEncoder(codec_it->second.type, payload_type);
+
+  // Create an external VAD/CNG encoder if needed.
+  if (FLAG_dtx && !codec_it->second.internal_dtx) {
+    AudioEncoderCng::Config cng_config = GetCngConfig(codec->SampleRateHz());
+    RTC_DCHECK(codec);
+    cng_config.speech_encoder = std::move(codec);
+    codec = rtc::MakeUnique<AudioEncoderCng>(std::move(cng_config));
+  }
+  RTC_DCHECK(codec);
+
+  // Set up ACM.
+  const int timestamp_rate_hz = codec->RtpTimestampRateHz();
+  AudioCodingModule::Config config;
+  std::unique_ptr<AudioCodingModule> acm(AudioCodingModule::Create(config));
+  acm->SetEncoder(std::move(codec));
+
+  // Open files.
+  printf("Input file: %s\n", argv[1]);
+  InputAudioFile input_file(argv[1], false);  // Open input in non-looping mode.
+  FILE* out_file = fopen(argv[2], "wb");
+  RTC_CHECK(out_file) << "Could not open file " << argv[2] << " for writing";
+  printf("Output file: %s\n", argv[2]);
+  fprintf(out_file, "#!rtpplay1.0 \n");  //,
+  // Write 3 32-bit values followed by 2 16-bit values, all set to 0. This means
+  // a total of 16 bytes.
+  const uint8_t file_header[16] = {0};
+  RTC_CHECK_EQ(fwrite(file_header, sizeof(file_header), 1, out_file), 1);
+
+  // Create and register the packetizer, which will write the packets to file.
+  Packetizer packetizer(out_file, FLAG_ssrc, timestamp_rate_hz);
+  RTC_DCHECK_EQ(acm->RegisterTransportCallback(&packetizer), 0);
+
+  AudioFrame audio_frame;
+  audio_frame.samples_per_channel_ = FLAG_sample_rate / 100;  // 10 ms
+  audio_frame.sample_rate_hz_ = FLAG_sample_rate;
+  audio_frame.num_channels_ = 1;
+
+  while (input_file.Read(audio_frame.samples_per_channel_,
+                         audio_frame.mutable_data())) {
+    RTC_CHECK_GE(acm->Add10MsData(audio_frame), 0);
+    audio_frame.timestamp_ += audio_frame.samples_per_channel_;
+  }
+
+  return 0;
+}
+
+}  // namespace
+}  // namespace test
+}  // namespace webrtc
+
+int main(int argc, char* argv[]) {
+  return webrtc::test::RunRtpEncode(argc, argv);
+}
diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.cc b/modules/audio_coding/neteq/tools/rtp_file_source.cc
new file mode 100644
index 0000000..c9ae5f2
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+
+#include <assert.h>
+#include <string.h>
+#ifdef WIN32
+#include <winsock2.h>
+#else
+#include <netinet/in.h>
+#endif
+
+#include <memory>
+
+#include "modules/audio_coding/neteq/tools/packet.h"
+#include "modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "rtc_base/checks.h"
+#include "test/rtp_file_reader.h"
+
+namespace webrtc {
+namespace test {
+
+RtpFileSource* RtpFileSource::Create(const std::string& file_name) {
+  RtpFileSource* source = new RtpFileSource();
+  RTC_CHECK(source->OpenFile(file_name));
+  return source;
+}
+
+bool RtpFileSource::ValidRtpDump(const std::string& file_name) {
+  std::unique_ptr<RtpFileReader> temp_file(
+      RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
+  return !!temp_file;
+}
+
+bool RtpFileSource::ValidPcap(const std::string& file_name) {
+  std::unique_ptr<RtpFileReader> temp_file(
+      RtpFileReader::Create(RtpFileReader::kPcap, file_name));
+  return !!temp_file;
+}
+
+RtpFileSource::~RtpFileSource() {
+}
+
+bool RtpFileSource::RegisterRtpHeaderExtension(RTPExtensionType type,
+                                               uint8_t id) {
+  assert(parser_.get());
+  return parser_->RegisterRtpHeaderExtension(type, id);
+}
+
+std::unique_ptr<Packet> RtpFileSource::NextPacket() {
+  while (true) {
+    RtpPacket temp_packet;
+    if (!rtp_reader_->NextPacket(&temp_packet)) {
+      return NULL;
+    }
+    if (temp_packet.original_length == 0) {
+      // May be an RTCP packet.
+      // Read the next one.
+      continue;
+    }
+    std::unique_ptr<uint8_t[]> packet_memory(new uint8_t[temp_packet.length]);
+    memcpy(packet_memory.get(), temp_packet.data, temp_packet.length);
+    std::unique_ptr<Packet> packet(new Packet(
+        packet_memory.release(), temp_packet.length,
+        temp_packet.original_length, temp_packet.time_ms, *parser_.get()));
+    if (!packet->valid_header()) {
+      continue;
+    }
+    if (filter_.test(packet->header().payloadType) ||
+        (use_ssrc_filter_ && packet->header().ssrc != ssrc_)) {
+      // This payload type should be filtered out. Continue to the next packet.
+      continue;
+    }
+    return packet;
+  }
+}
+
+RtpFileSource::RtpFileSource()
+    : PacketSource(),
+      parser_(RtpHeaderParser::Create()) {}
+
+bool RtpFileSource::OpenFile(const std::string& file_name) {
+  rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
+  if (rtp_reader_)
+    return true;
+  rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kPcap, file_name));
+  if (!rtp_reader_) {
+    FATAL() << "Couldn't open input file as either a rtpdump or .pcap. Note "
+               "that .pcapng is not supported.";
+  }
+  return true;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.h b/modules/audio_coding/neteq/tools/rtp_file_source.h
new file mode 100644
index 0000000..b44bc64
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_file_source.h
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/neteq/tools/packet_source.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class RtpHeaderParser;
+
+namespace test {
+
+class RtpFileReader;
+
+class RtpFileSource : public PacketSource {
+ public:
+  // Creates an RtpFileSource reading from |file_name|. If the file cannot be
+  // opened, or has the wrong format, NULL will be returned.
+  static RtpFileSource* Create(const std::string& file_name);
+
+  // Checks whether a files is a valid RTP dump or PCAP (Wireshark) file.
+  static bool ValidRtpDump(const std::string& file_name);
+  static bool ValidPcap(const std::string& file_name);
+
+  virtual ~RtpFileSource();
+
+  // Registers an RTP header extension and binds it to |id|.
+  virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
+
+  std::unique_ptr<Packet> NextPacket() override;
+
+ private:
+  static const int kFirstLineLength = 40;
+  static const int kRtpFileHeaderSize = 4 + 4 + 4 + 2 + 2;
+  static const size_t kPacketHeaderSize = 8;
+
+  RtpFileSource();
+
+  bool OpenFile(const std::string& file_name);
+
+  std::unique_ptr<RtpFileReader> rtp_reader_;
+  std::unique_ptr<RtpHeaderParser> parser_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtpFileSource);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.cc b/modules/audio_coding/neteq/tools/rtp_generator.cc
new file mode 100644
index 0000000..cedd7ae
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_generator.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "modules/audio_coding/neteq/tools/rtp_generator.h"
+
+namespace webrtc {
+namespace test {
+
+uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type,
+                                    size_t payload_length_samples,
+                                    RTPHeader* rtp_header) {
+  assert(rtp_header);
+  if (!rtp_header) {
+    return 0;
+  }
+  rtp_header->sequenceNumber = seq_number_++;
+  rtp_header->timestamp = timestamp_;
+  timestamp_ += static_cast<uint32_t>(payload_length_samples);
+  rtp_header->payloadType = payload_type;
+  rtp_header->markerBit = false;
+  rtp_header->ssrc = ssrc_;
+  rtp_header->numCSRCs = 0;
+
+  uint32_t this_send_time = next_send_time_ms_;
+  assert(samples_per_ms_ > 0);
+  next_send_time_ms_ += ((1.0 + drift_factor_) * payload_length_samples) /
+      samples_per_ms_;
+  return this_send_time;
+}
+
+void RtpGenerator::set_drift_factor(double factor) {
+  if (factor > -1.0) {
+    drift_factor_ = factor;
+  }
+}
+
+uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type,
+                                                 size_t payload_length_samples,
+                                                 RTPHeader* rtp_header) {
+  uint32_t ret = RtpGenerator::GetRtpHeader(
+      payload_type, payload_length_samples, rtp_header);
+  if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
+          jump_from_timestamp_ &&
+      timestamp_ > jump_from_timestamp_) {
+    // We just moved across the |jump_from_timestamp_| timestamp. Do the jump.
+    timestamp_ = jump_to_timestamp_;
+  }
+  return ret;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/rtp_generator.h b/modules/audio_coding/neteq/tools/rtp_generator.h
new file mode 100644
index 0000000..3b3cca9
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
+#define MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
+
+#include "api/rtp_headers.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+
+// Class for generating RTP headers.
+class RtpGenerator {
+ public:
+  RtpGenerator(int samples_per_ms,
+               uint16_t start_seq_number = 0,
+               uint32_t start_timestamp = 0,
+               uint32_t start_send_time_ms = 0,
+               uint32_t ssrc = 0x12345678)
+      : seq_number_(start_seq_number),
+        timestamp_(start_timestamp),
+        next_send_time_ms_(start_send_time_ms),
+        ssrc_(ssrc),
+        samples_per_ms_(samples_per_ms),
+        drift_factor_(0.0) {
+  }
+
+  virtual ~RtpGenerator() {}
+
+  // Writes the next RTP header to |rtp_header|, which will be of type
+  // |payload_type|. Returns the send time for this packet (in ms). The value of
+  // |payload_length_samples| determines the send time for the next packet.
+  virtual uint32_t GetRtpHeader(uint8_t payload_type,
+                                size_t payload_length_samples,
+                                RTPHeader* rtp_header);
+
+  void set_drift_factor(double factor);
+
+ protected:
+  uint16_t seq_number_;
+  uint32_t timestamp_;
+  uint32_t next_send_time_ms_;
+  const uint32_t ssrc_;
+  const int samples_per_ms_;
+  double drift_factor_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtpGenerator);
+};
+
+class TimestampJumpRtpGenerator : public RtpGenerator {
+ public:
+  TimestampJumpRtpGenerator(int samples_per_ms,
+                            uint16_t start_seq_number,
+                            uint32_t start_timestamp,
+                            uint32_t jump_from_timestamp,
+                            uint32_t jump_to_timestamp)
+      : RtpGenerator(samples_per_ms, start_seq_number, start_timestamp),
+        jump_from_timestamp_(jump_from_timestamp),
+        jump_to_timestamp_(jump_to_timestamp) {}
+
+  uint32_t GetRtpHeader(uint8_t payload_type,
+                        size_t payload_length_samples,
+                        RTPHeader* rtp_header) override;
+
+ private:
+  uint32_t jump_from_timestamp_;
+  uint32_t jump_to_timestamp_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(TimestampJumpRtpGenerator);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
diff --git a/modules/audio_coding/neteq/tools/rtp_jitter.cc b/modules/audio_coding/neteq/tools/rtp_jitter.cc
new file mode 100644
index 0000000..d92fed0
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtp_jitter.cc
@@ -0,0 +1,152 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/rtp_rtcp/source/byte_io.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/flags.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace test {
+namespace {
+
+DEFINE_bool(help, false, "Print help message");
+
+constexpr size_t kRtpDumpHeaderLength = 8;
+
+// Returns the next packet or an empty buffer if end of file was encountered.
+rtc::Buffer ReadNextPacket(FILE* file) {
+  // Read the rtpdump header for the next packet.
+  rtc::Buffer buffer;
+  buffer.SetData(kRtpDumpHeaderLength, [&](rtc::ArrayView<uint8_t> x) {
+    return fread(x.data(), 1, x.size(), file);
+  });
+  if (buffer.size() != kRtpDumpHeaderLength) {
+    return rtc::Buffer();
+  }
+
+  // Get length field. This is the total length for this packet written to file,
+  // including the kRtpDumpHeaderLength bytes already read.
+  const uint16_t len = ByteReader<uint16_t>::ReadBigEndian(buffer.data());
+  RTC_CHECK_GE(len, kRtpDumpHeaderLength);
+
+  // Read remaining data from file directly into buffer.
+  buffer.AppendData(len - kRtpDumpHeaderLength, [&](rtc::ArrayView<uint8_t> x) {
+    return fread(x.data(), 1, x.size(), file);
+  });
+  if (buffer.size() != len) {
+    buffer.Clear();
+  }
+  return buffer;
+}
+
+struct PacketAndTime {
+  rtc::Buffer packet;
+  int time;
+};
+
+void WritePacket(const PacketAndTime& packet, FILE* file) {
+  // Write the first 4 bytes from the original packet.
+  const auto* payload_ptr = packet.packet.data();
+  RTC_CHECK_EQ(fwrite(payload_ptr, 4, 1, file), 1);
+  payload_ptr += 4;
+
+  // Convert the new time offset to network endian, and write to file.
+  uint8_t time[sizeof(uint32_t)];
+  ByteWriter<uint32_t, sizeof(uint32_t)>::WriteBigEndian(time, packet.time);
+  RTC_CHECK_EQ(fwrite(time, sizeof(uint32_t), 1, file), 1);
+  payload_ptr += 4;  // Skip the old time in the original payload.
+
+  // Write the remaining part of the payload.
+  RTC_DCHECK_EQ(payload_ptr - packet.packet.data(), kRtpDumpHeaderLength);
+  RTC_CHECK_EQ(
+      fwrite(payload_ptr, packet.packet.size() - kRtpDumpHeaderLength, 1, file),
+      1);
+}
+
+int RunRtpJitter(int argc, char* argv[]) {
+  const std::string program_name = argv[0];
+  const std::string usage =
+      "Tool for alternating the arrival times in an RTP dump file.\n"
+      "Example usage:\n" +
+      program_name + " input.rtp arrival_times_ms.txt output.rtp\n\n";
+  if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) || FLAG_help ||
+      argc != 4) {
+    printf("%s", usage.c_str());
+    return FLAG_help ? 0 : 1;
+  }
+
+  printf("Input RTP file: %s\n", argv[1]);
+  FILE* in_file = fopen(argv[1], "rb");
+  RTC_CHECK(in_file) << "Could not open file " << argv[1] << " for reading";
+  printf("Timing file: %s\n", argv[2]);
+  std::ifstream timing_file(argv[2]);
+  printf("Output file: %s\n", argv[3]);
+  FILE* out_file = fopen(argv[3], "wb");
+  RTC_CHECK(out_file) << "Could not open file " << argv[2] << " for writing";
+
+  // Copy the RTP file header to the output file.
+  char header_string[30];
+  RTC_CHECK(fgets(header_string, 30, in_file));
+  fprintf(out_file, "%s", header_string);
+  uint8_t file_header[16];
+  RTC_CHECK_EQ(fread(file_header, sizeof(file_header), 1, in_file), 1);
+  RTC_CHECK_EQ(fwrite(file_header, sizeof(file_header), 1, out_file), 1);
+
+  // Read all time values from the timing file. Store in a vector.
+  std::vector<int> new_arrival_times;
+  int new_time;
+  while (timing_file >> new_time) {
+    new_arrival_times.push_back(new_time);
+  }
+
+  // Read all packets from the input RTP file, but no more than the number of
+  // new time values. Store RTP packets together with new time values.
+  auto time_it = new_arrival_times.begin();
+  std::vector<PacketAndTime> packets;
+  while (1) {
+    auto packet = ReadNextPacket(in_file);
+    if (packet.empty() || time_it == new_arrival_times.end()) {
+      break;
+    }
+    packets.push_back({std::move(packet), *time_it});
+    ++time_it;
+  }
+
+  // Sort on new time values.
+  std::sort(packets.begin(), packets.end(),
+            [](const PacketAndTime& a, const PacketAndTime& b) {
+              return a.time < b.time;
+            });
+
+  // Write packets to output file.
+  for (const auto& p : packets) {
+    WritePacket(p, out_file);
+  }
+
+  fclose(in_file);
+  fclose(out_file);
+  return 0;
+}
+
+}  // namespace
+}  // namespace test
+}  // namespace webrtc
+
+int main(int argc, char* argv[]) {
+  return webrtc::test::RunRtpJitter(argc, argv);
+}
diff --git a/modules/audio_coding/neteq/tools/rtpcat.cc b/modules/audio_coding/neteq/tools/rtpcat.cc
new file mode 100644
index 0000000..431de55
--- /dev/null
+++ b/modules/audio_coding/neteq/tools/rtpcat.cc
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "test/rtp_file_reader.h"
+#include "test/rtp_file_writer.h"
+
+using webrtc::test::RtpFileReader;
+using webrtc::test::RtpFileWriter;
+
+int main(int argc, char* argv[]) {
+  if (argc < 3) {
+    printf("Concatenates multiple rtpdump files into one.\n");
+    printf("Usage: rtpcat in1.rtp int2.rtp [...] out.rtp\n");
+    exit(1);
+  }
+
+  std::unique_ptr<RtpFileWriter> output(
+      RtpFileWriter::Create(RtpFileWriter::kRtpDump, argv[argc - 1]));
+  RTC_CHECK(output.get() != NULL) << "Cannot open output file.";
+  printf("Output RTP file: %s\n", argv[argc - 1]);
+
+  for (int i = 1; i < argc - 1; i++) {
+    std::unique_ptr<RtpFileReader> input(
+        RtpFileReader::Create(RtpFileReader::kRtpDump, argv[i]));
+    RTC_CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
+    printf("Input RTP file: %s\n", argv[i]);
+
+    webrtc::test::RtpPacket packet;
+    while (input->NextPacket(&packet))
+      RTC_CHECK(output->WritePacket(&packet));
+  }
+  return 0;
+}
diff --git a/modules/audio_processing/BUILD.gn b/modules/audio_processing/BUILD.gn
new file mode 100644
index 0000000..09b0bd4
--- /dev/null
+++ b/modules/audio_processing/BUILD.gn
@@ -0,0 +1,849 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//build/config/arm.gni")
+import("../../webrtc.gni")
+if (!build_with_mozilla) {
+  import("//third_party/protobuf/proto_library.gni")
+}
+
+declare_args() {
+  # Disables the usual mode where we trust the reported system delay
+  # values the AEC receives. The corresponding define is set appropriately
+  # in the code, but it can be force-enabled here for testing.
+  aec_untrusted_delay_for_testing = false
+}
+
+config("apm_debug_dump") {
+  if (apm_debug_dump) {
+    defines = [ "WEBRTC_APM_DEBUG_DUMP=1" ]
+  } else {
+    defines = [ "WEBRTC_APM_DEBUG_DUMP=0" ]
+  }
+}
+
+rtc_static_library("audio_processing") {
+  visibility = [ "*" ]
+  configs += [ ":apm_debug_dump" ]
+  sources = [
+    "aec/aec_resampler.cc",
+    "aec/aec_resampler.h",
+    "aec/echo_cancellation.cc",
+    "aec/echo_cancellation.h",
+    "agc/agc.cc",
+    "agc/agc.h",
+    "agc/agc_manager_direct.cc",
+    "agc/agc_manager_direct.h",
+    "agc/gain_map_internal.h",
+    "agc/loudness_histogram.cc",
+    "agc/loudness_histogram.h",
+    "agc/utility.cc",
+    "agc/utility.h",
+    "audio_buffer.cc",
+    "audio_buffer.h",
+    "audio_processing_impl.cc",
+    "audio_processing_impl.h",
+    "beamformer/array_util.cc",
+    "beamformer/array_util.h",
+    "beamformer/complex_matrix.h",
+    "beamformer/covariance_matrix_generator.cc",
+    "beamformer/covariance_matrix_generator.h",
+    "beamformer/matrix.h",
+    "beamformer/nonlinear_beamformer.cc",
+    "beamformer/nonlinear_beamformer.h",
+    "common.h",
+    "echo_cancellation_impl.cc",
+    "echo_cancellation_impl.h",
+    "echo_control_mobile_impl.cc",
+    "echo_control_mobile_impl.h",
+    "echo_detector/circular_buffer.cc",
+    "echo_detector/circular_buffer.h",
+    "echo_detector/mean_variance_estimator.cc",
+    "echo_detector/mean_variance_estimator.h",
+    "echo_detector/moving_max.cc",
+    "echo_detector/moving_max.h",
+    "echo_detector/normalized_covariance_estimator.cc",
+    "echo_detector/normalized_covariance_estimator.h",
+    "gain_control_for_experimental_agc.cc",
+    "gain_control_for_experimental_agc.h",
+    "gain_control_impl.cc",
+    "gain_control_impl.h",
+    "gain_controller2.cc",
+    "gain_controller2.h",
+    "include/audio_processing.cc",
+    "include/audio_processing.h",
+    "include/config.cc",
+    "include/config.h",
+    "level_estimator_impl.cc",
+    "level_estimator_impl.h",
+    "low_cut_filter.cc",
+    "low_cut_filter.h",
+    "noise_suppression_impl.cc",
+    "noise_suppression_impl.h",
+    "render_queue_item_verifier.h",
+    "residual_echo_detector.cc",
+    "residual_echo_detector.h",
+    "rms_level.cc",
+    "rms_level.h",
+    "splitting_filter.cc",
+    "splitting_filter.h",
+    "three_band_filter_bank.cc",
+    "three_band_filter_bank.h",
+    "transient/common.h",
+    "transient/daubechies_8_wavelet_coeffs.h",
+    "transient/dyadic_decimator.h",
+    "transient/moving_moments.cc",
+    "transient/moving_moments.h",
+    "transient/transient_detector.cc",
+    "transient/transient_detector.h",
+    "transient/transient_suppressor.cc",
+    "transient/transient_suppressor.h",
+    "transient/wpd_node.cc",
+    "transient/wpd_node.h",
+    "transient/wpd_tree.cc",
+    "transient/wpd_tree.h",
+    "typing_detection.cc",
+    "typing_detection.h",
+    "voice_detection_impl.cc",
+    "voice_detection_impl.h",
+  ]
+
+  defines = []
+  deps = [
+    ":aec_core",
+    ":aec_dump_interface",
+    ":apm_logging",
+    ":audio_frame_view",
+    ":audio_generator_interface",
+    ":audio_processing_c",
+    ":audio_processing_statistics",
+    "..:module_api",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api:optional",
+    "../../api/audio:aec3_config",
+    "../../api/audio:echo_control",
+    "../../audio/utility:audio_frame_operations",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:deprecation",
+    "../../rtc_base:gtest_prod",
+    "../../rtc_base:protobuf_utils",
+    "../../rtc_base:safe_minmax",
+    "../../rtc_base:sanitizer",
+    "../../system_wrappers:cpu_features_api",
+    "../../system_wrappers:field_trial_api",
+    "../../system_wrappers:metrics_api",
+    "agc2",
+    "vad",
+  ]
+
+  if (aec_untrusted_delay_for_testing) {
+    defines += [ "WEBRTC_UNTRUSTED_DELAY" ]
+  }
+
+  if (rtc_enable_protobuf) {
+    defines += [ "WEBRTC_AUDIOPROC_DEBUG_DUMP" ]
+    deps += [ ":audioproc_debug_proto" ]
+  }
+
+  if (rtc_enable_intelligibility_enhancer) {
+    defines += [ "WEBRTC_INTELLIGIBILITY_ENHANCER=1" ]
+    sources += [
+      "intelligibility/intelligibility_enhancer.cc",
+      "intelligibility/intelligibility_enhancer.h",
+      "intelligibility/intelligibility_utils.cc",
+      "intelligibility/intelligibility_utils.h",
+    ]
+  } else {
+    defines += [ "WEBRTC_INTELLIGIBILITY_ENHANCER=0" ]
+  }
+
+  if (rtc_prefer_fixed_point) {
+    defines += [ "WEBRTC_NS_FIXED" ]
+  } else {
+    defines += [ "WEBRTC_NS_FLOAT" ]
+  }
+
+  # TODO(jschuh): Bug 1348: fix this warning.
+  configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+  deps += [
+    "../../common_audio",
+    "../../common_audio:fir_filter",
+    "../../common_audio:fir_filter_factory",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers",
+  ]
+}
+
+rtc_source_set("audio_processing_statistics") {
+  visibility = [ "*" ]
+  sources = [
+    "include/audio_processing_statistics.cc",
+    "include/audio_processing_statistics.h",
+  ]
+  deps = [
+    "../../api:optional",
+  ]
+}
+
+rtc_source_set("audio_frame_view") {
+  sources = [
+    "include/audio_frame_view.h",
+  ]
+  deps = [
+    "../../api:array_view",
+  ]
+}
+
+rtc_source_set("aec_dump_interface") {
+  sources = [
+    "include/aec_dump.cc",
+    "include/aec_dump.h",
+  ]
+
+  deps = [
+    ":audio_frame_view",
+    "../../api:array_view",
+    "../../rtc_base:rtc_base_approved",
+  ]
+}
+
+rtc_source_set("audio_generator_interface") {
+  visibility = [ "*" ]
+  sources = [
+    "include/audio_generator.h",
+  ]
+  deps = [
+    ":audio_frame_view",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers:system_wrappers",
+  ]
+}
+
+rtc_source_set("audio_generator_factory") {
+  visibility = [ "*" ]
+  sources = [
+    "include/audio_generator_factory.cc",
+    "include/audio_generator_factory.h",
+  ]
+  deps = [
+    ":audio_generator_interface",
+    ":file_audio_generator",
+    "../../common_audio:common_audio",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers:system_wrappers",
+  ]
+}
+
+rtc_source_set("file_audio_generator") {
+  sources = [
+    "audio_generator/file_audio_generator.cc",
+    "audio_generator/file_audio_generator.h",
+  ]
+  deps = [
+    ":audio_generator_interface",
+    "../../common_audio:common_audio",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers:system_wrappers",
+  ]
+}
+
+rtc_source_set("audio_processing_c") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+  sources = [
+    "agc/legacy/analog_agc.c",
+    "agc/legacy/analog_agc.h",
+    "agc/legacy/digital_agc.c",
+    "agc/legacy/digital_agc.h",
+    "agc/legacy/gain_control.h",
+  ]
+
+  if (rtc_prefer_fixed_point) {
+    sources += [
+      "ns/noise_suppression_x.c",
+      "ns/noise_suppression_x.h",
+      "ns/nsx_core.c",
+      "ns/nsx_core.h",
+      "ns/nsx_defines.h",
+    ]
+    if (current_cpu == "mipsel") {
+      sources += [ "ns/nsx_core_mips.c" ]
+    } else {
+      sources += [ "ns/nsx_core_c.c" ]
+    }
+  } else {
+    sources += [
+      "ns/defines.h",
+      "ns/noise_suppression.c",
+      "ns/noise_suppression.h",
+      "ns/ns_core.c",
+      "ns/ns_core.h",
+      "ns/windows_private.h",
+    ]
+  }
+
+  deps = [
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers:cpu_features_api",
+  ]
+
+  if (rtc_build_with_neon) {
+    sources += [ "ns/nsx_core_neon.c" ]
+
+    if (current_cpu != "arm64") {
+      # Enable compilation for the NEON instruction set. This is needed
+      # since //build/config/arm.gni only enables NEON for iOS, not Android.
+      # This provides the same functionality as webrtc/build/arm_neon.gypi.
+      suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+      cflags = [ "-mfpu=neon" ]
+    }
+
+    # Disable LTO on NEON targets due to compiler bug.
+    # TODO(fdegans): Enable this. See crbug.com/408997.
+    if (rtc_use_lto) {
+      cflags -= [
+        "-flto",
+        "-ffat-lto-objects",
+      ]
+    }
+  }
+}
+
+if (rtc_enable_protobuf) {
+  proto_library("audioproc_debug_proto") {
+    sources = [
+      "debug.proto",
+    ]
+
+    proto_out_dir = "modules/audio_processing"
+  }
+}
+
+rtc_source_set("apm_logging") {
+  configs += [ ":apm_debug_dump" ]
+  sources = [
+    "logging/apm_data_dumper.cc",
+    "logging/apm_data_dumper.h",
+  ]
+  deps = [
+    "../../api:array_view",
+    "../../common_audio:common_audio",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:stringutils",
+  ]
+  defines = []
+}
+
+rtc_source_set("aec_core") {
+  configs += [ ":apm_debug_dump" ]
+  sources = [
+    "aec/aec_common.h",
+    "aec/aec_core.cc",
+    "aec/aec_core.h",
+    "aec/aec_core_optimized_methods.h",
+    "aecm/aecm_core.cc",
+    "aecm/aecm_core.h",
+    "aecm/aecm_defines.h",
+    "aecm/echo_control_mobile.cc",
+    "aecm/echo_control_mobile.h",
+    "utility/block_mean_calculator.cc",
+    "utility/block_mean_calculator.h",
+    "utility/delay_estimator.cc",
+    "utility/delay_estimator.h",
+    "utility/delay_estimator_internal.h",
+    "utility/delay_estimator_wrapper.cc",
+    "utility/delay_estimator_wrapper.h",
+    "utility/ooura_fft.cc",
+    "utility/ooura_fft.h",
+    "utility/ooura_fft_tables_common.h",
+  ]
+  deps = [
+    ":apm_logging",
+    ":audio_processing_statistics",
+    "../..:typedefs",
+    "../..:webrtc_common",
+    "../../common_audio:common_audio",
+    "../../common_audio:common_audio_c",
+    "../../rtc_base:checks",
+    "../../rtc_base:rtc_base_approved",
+    "../../rtc_base:sanitizer",
+    "../../system_wrappers:cpu_features_api",
+    "../../system_wrappers:metrics_api",
+  ]
+  cflags = []
+  defines = []
+
+  if (current_cpu == "x86" || current_cpu == "x64") {
+    sources += [
+      "aec/aec_core_sse2.cc",
+      "utility/ooura_fft_sse2.cc",
+      "utility/ooura_fft_tables_neon_sse2.h",
+    ]
+    if (is_posix) {
+      cflags += [ "-msse2" ]
+    }
+  }
+
+  if (rtc_build_with_neon) {
+    sources += [
+      "aec/aec_core_neon.cc",
+      "aecm/aecm_core_neon.cc",
+      "utility/ooura_fft_neon.cc",
+      "utility/ooura_fft_tables_neon_sse2.h",
+    ]
+
+    if (current_cpu != "arm64") {
+      # Enable compilation for the NEON instruction set. This is needed
+      # since //build/config/arm.gni only enables NEON for iOS, not Android.
+      # This provides the same functionality as webrtc/build/arm_neon.gypi.
+      suppressed_configs += [ "//build/config/compiler:compiler_arm_fpu" ]
+      cflags += [ "-mfpu=neon" ]
+    }
+
+    # Disable LTO on NEON targets due to compiler bug.
+    # TODO(fdegans): Enable this. See crbug.com/408997.
+    if (rtc_use_lto) {
+      cflags -= [
+        "-flto",
+        "-ffat-lto-objects",
+      ]
+    }
+
+    deps += [ "../../common_audio" ]
+  }
+
+  if (current_cpu == "mipsel") {
+    sources += [ "aecm/aecm_core_mips.cc" ]
+    if (mips_float_abi == "hard") {
+      sources += [
+        "aec/aec_core_mips.cc",
+        "utility/ooura_fft_mips.cc",
+      ]
+    }
+  } else {
+    sources += [ "aecm/aecm_core_c.cc" ]
+  }
+
+  # TODO(jschuh): Bug 1348: fix this warning.
+  configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+}
+
+if (rtc_include_tests) {
+  rtc_source_set("mocks") {
+    testonly = true
+    sources = [
+      "include/mock_audio_processing.h",
+    ]
+    deps = [
+      ":aec_dump_interface",
+      ":audio_processing",
+      ":audio_processing_statistics",
+      "../../test:test_support",
+    ]
+  }
+
+  group("audio_processing_tests") {
+    testonly = true
+    deps = [
+      ":audioproc_test_utils",
+      ":click_annotate",
+      ":nonlinear_beamformer_test",
+      ":transient_suppression_test",
+    ]
+
+    if (rtc_enable_intelligibility_enhancer) {
+      deps += [ ":intelligibility_proc" ]
+    }
+
+    if (rtc_enable_protobuf) {
+      deps += [
+        ":audioproc_f",
+        ":audioproc_unittest_proto",
+        "aec_dump:aec_dump_unittests",
+        "test/conversational_speech",
+        "test/py_quality_assessment",
+      ]
+    }
+  }
+
+  rtc_source_set("audio_processing_unittests") {
+    testonly = true
+
+    configs += [ ":apm_debug_dump" ]
+    sources = [
+      "aec/echo_cancellation_unittest.cc",
+      "aec/system_delay_unittest.cc",
+      "agc/agc_manager_direct_unittest.cc",
+      "agc/loudness_histogram_unittest.cc",
+      "agc/mock_agc.h",
+      "audio_buffer_unittest.cc",
+      "audio_frame_view_unittest.cc",
+      "beamformer/array_util_unittest.cc",
+      "beamformer/complex_matrix_unittest.cc",
+      "beamformer/covariance_matrix_generator_unittest.cc",
+      "beamformer/matrix_test_helpers.h",
+      "beamformer/matrix_unittest.cc",
+      "beamformer/mock_nonlinear_beamformer.h",
+      "config_unittest.cc",
+      "echo_cancellation_impl_unittest.cc",
+      "gain_controller2_unittest.cc",
+      "splitting_filter_unittest.cc",
+      "test/fake_recording_device_unittest.cc",
+      "transient/dyadic_decimator_unittest.cc",
+      "transient/file_utils.cc",
+      "transient/file_utils.h",
+      "transient/file_utils_unittest.cc",
+      "transient/moving_moments_unittest.cc",
+      "transient/transient_detector_unittest.cc",
+      "transient/transient_suppressor_unittest.cc",
+      "transient/wpd_node_unittest.cc",
+      "transient/wpd_tree_unittest.cc",
+      "utility/block_mean_calculator_unittest.cc",
+      "utility/delay_estimator_unittest.cc",
+    ]
+
+    deps = [
+      ":aec_core",
+      ":analog_mic_simulation",
+      ":apm_logging",
+      ":audio_frame_view",
+      ":audio_processing",
+      ":audioproc_test_utils",
+      ":file_audio_generator_unittests",
+      ":mocks",
+      "..:module_api",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../api:array_view",
+      "../../api:optional",
+      "../../api/audio:aec3_config",
+      "../../api/audio:aec3_factory",
+      "../../common_audio:common_audio",
+      "../../common_audio:common_audio_c",
+      "../../rtc_base:checks",
+      "../../rtc_base:gtest_prod",
+      "../../rtc_base:protobuf_utils",
+      "../../rtc_base:rtc_base",
+      "../../rtc_base:rtc_base_approved",
+      "../../rtc_base:safe_minmax",
+      "../../system_wrappers",
+      "../../system_wrappers:cpu_features_api",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "../audio_coding:neteq_input_audio_tools",
+      "aec_dump:mock_aec_dump_unittests",
+      "agc2:fixed_digital_unittests",
+      "test/conversational_speech:unittest",
+      "vad:vad_unittests",
+      "//testing/gtest",
+    ]
+
+    defines = []
+
+    if (rtc_enable_intelligibility_enhancer) {
+      defines += [ "WEBRTC_INTELLIGIBILITY_ENHANCER=1" ]
+      sources += [
+        "intelligibility/intelligibility_enhancer_unittest.cc",
+        "intelligibility/intelligibility_utils_unittest.cc",
+      ]
+    } else {
+      defines += [ "WEBRTC_INTELLIGIBILITY_ENHANCER=0" ]
+    }
+
+    if (rtc_prefer_fixed_point) {
+      defines += [ "WEBRTC_AUDIOPROC_FIXED_PROFILE" ]
+    } else {
+      defines += [ "WEBRTC_AUDIOPROC_FLOAT_PROFILE" ]
+    }
+
+    if (rtc_enable_protobuf) {
+      defines += [ "WEBRTC_AUDIOPROC_DEBUG_DUMP" ]
+      deps += [
+        ":audioproc_debug_proto",
+        ":audioproc_protobuf_utils",
+        ":audioproc_test_utils",
+        ":audioproc_unittest_proto",
+        "../../rtc_base:rtc_task_queue",
+        "aec_dump",
+        "aec_dump:aec_dump_unittests",
+      ]
+      sources += [
+        "audio_processing_impl_locking_unittest.cc",
+        "audio_processing_impl_unittest.cc",
+        "audio_processing_unittest.cc",
+        "beamformer/nonlinear_beamformer_unittest.cc",
+        "echo_cancellation_bit_exact_unittest.cc",
+        "echo_control_mobile_unittest.cc",
+        "echo_detector/circular_buffer_unittest.cc",
+        "echo_detector/mean_variance_estimator_unittest.cc",
+        "echo_detector/moving_max_unittest.cc",
+        "echo_detector/normalized_covariance_estimator_unittest.cc",
+        "gain_control_unittest.cc",
+        "level_estimator_unittest.cc",
+        "low_cut_filter_unittest.cc",
+        "noise_suppression_unittest.cc",
+        "residual_echo_detector_unittest.cc",
+        "rms_level_unittest.cc",
+        "test/debug_dump_replayer.cc",
+        "test/debug_dump_replayer.h",
+        "test/debug_dump_test.cc",
+        "test/echo_canceller_test_tools.cc",
+        "test/echo_canceller_test_tools.h",
+        "test/echo_canceller_test_tools_unittest.cc",
+        "test/test_utils.h",
+        "voice_detection_unittest.cc",
+      ]
+    }
+
+    if ((!build_with_chromium || is_win) && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+  }
+
+  rtc_source_set("audio_processing_perf_tests") {
+    testonly = true
+
+    sources = [
+      "audio_processing_performance_unittest.cc",
+    ]
+    deps = [
+      ":audio_processing",
+      ":audioproc_test_utils",
+      "../../api:array_view",
+      "../../modules:module_api",
+      "../../rtc_base:protobuf_utils",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../test:perf_test",
+      "../../test:test_support",
+    ]
+
+    if (rtc_enable_intelligibility_enhancer) {
+      defines = [ "WEBRTC_INTELLIGIBILITY_ENHANCER=1" ]
+    } else {
+      defines = [ "WEBRTC_INTELLIGIBILITY_ENHANCER=0" ]
+    }
+  }
+
+  rtc_source_set("file_audio_generator_unittests") {
+    testonly = true
+
+    sources = [
+      "audio_generator/file_audio_generator_unittest.cc",
+    ]
+
+    deps = [
+      ":audio_generator_factory",
+      ":audio_processing",
+      ":file_audio_generator",
+      "../../rtc_base:rtc_base_approved",
+      "../../test:fileutils",
+      "../../test:test_support",
+    ]
+  }
+
+  rtc_source_set("analog_mic_simulation") {
+    sources = [
+      "test/fake_recording_device.cc",
+      "test/fake_recording_device.h",
+    ]
+    deps = [
+      "../../api:array_view",
+      "../../common_audio:common_audio",
+      "../../modules:module_api",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+    ]
+  }
+
+  if (rtc_enable_protobuf) {
+    rtc_source_set("audioproc_f_impl") {
+      testonly = true
+      sources = [
+        "test/aec_dump_based_simulator.cc",
+        "test/aec_dump_based_simulator.h",
+        "test/audio_processing_simulator.cc",
+        "test/audio_processing_simulator.h",
+        "test/audioproc_float_impl.cc",
+        "test/audioproc_float_impl.h",
+        "test/wav_based_simulator.cc",
+        "test/wav_based_simulator.h",
+      ]
+
+      deps = [
+        ":analog_mic_simulation",
+        ":audio_processing",
+        ":audioproc_debug_proto",
+        ":audioproc_protobuf_utils",
+        ":audioproc_test_utils",
+        "../../api:optional",
+        "../../api/audio:aec3_factory",
+        "../../common_audio:common_audio",
+        "../../rtc_base:checks",
+        "../../rtc_base:protobuf_utils",
+        "../../rtc_base:rtc_base_approved",
+        "../../rtc_base:rtc_json",
+        "../../rtc_base:rtc_task_queue",
+        "../../rtc_base:stringutils",
+        "../../system_wrappers",
+        "../../system_wrappers:system_wrappers_default",
+        "../../test:test_support",
+        "aec_dump",
+        "aec_dump:aec_dump_impl",
+        "//testing/gtest",
+      ]
+    }  # audioproc_f_impl
+    rtc_executable("audioproc_f") {
+      testonly = true
+      sources = [
+        "test/audioproc_float_main.cc",
+      ]
+      deps = [
+        ":audio_processing",
+        "../../api:audioproc_f_api",
+        "../../rtc_base:rtc_base_approved",
+      ]
+    }  # audioproc_f
+  }
+
+  rtc_source_set("audioproc_test_utils") {
+    visibility = [ "*" ]
+    testonly = true
+    sources = [
+      "test/audio_buffer_tools.cc",
+      "test/audio_buffer_tools.h",
+      "test/bitexactness_tools.cc",
+      "test/bitexactness_tools.h",
+      "test/performance_timer.cc",
+      "test/performance_timer.h",
+      "test/simulator_buffers.cc",
+      "test/simulator_buffers.h",
+      "test/test_utils.cc",
+      "test/test_utils.h",
+    ]
+
+    deps = [
+      ":audio_processing",
+      "..:module_api",
+      "../../api:array_view",
+      "../../api:optional",
+      "../../common_audio",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "../audio_coding:neteq_input_audio_tools",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("transient_suppression_test") {
+    testonly = true
+    sources = [
+      "transient/file_utils.cc",
+      "transient/file_utils.h",
+      "transient/transient_suppression_test.cc",
+    ]
+    deps = [
+      ":audio_processing",
+      "..:module_api",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../common_audio:common_audio",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers",
+      "../../system_wrappers:metrics_default",
+      "../../test:fileutils",
+      "../../test:test_support",
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_executable("click_annotate") {
+    testonly = true
+    sources = [
+      "transient/click_annotate.cc",
+      "transient/file_utils.cc",
+      "transient/file_utils.h",
+    ]
+    deps = [
+      ":audio_processing",
+      "../..:typedefs",
+      "../..:webrtc_common",
+      "../../system_wrappers",
+      "../../system_wrappers:metrics_default",
+    ]
+  }
+
+  rtc_executable("nonlinear_beamformer_test") {
+    testonly = true
+    sources = [
+      "beamformer/nonlinear_beamformer_test.cc",
+    ]
+    deps = [
+      ":audio_processing",
+      ":audioproc_test_utils",
+      "../../common_audio:common_audio",
+      "../../rtc_base:checks",
+      "../../rtc_base:rtc_base_approved",
+      "../../system_wrappers:metrics_default",
+    ]
+  }
+
+  if (rtc_enable_intelligibility_enhancer) {
+    rtc_executable("intelligibility_proc") {
+      testonly = true
+      sources = [
+        "intelligibility/test/intelligibility_proc.cc",
+      ]
+      deps = [
+        ":audio_processing",
+        ":audioproc_test_utils",
+        "../../common_audio",
+        "../../rtc_base:rtc_base_approved",
+        "../../system_wrappers:metrics_default",
+        "../../test:test_support",
+        "//testing/gtest",
+      ]
+    }
+  }
+
+  if (rtc_enable_protobuf) {
+    proto_library("audioproc_unittest_proto") {
+      sources = [
+        "test/unittest.proto",
+      ]
+      proto_out_dir = "modules/audio_processing/test"
+    }
+
+    rtc_static_library("audioproc_protobuf_utils") {
+      sources = [
+        "test/protobuf_utils.cc",
+        "test/protobuf_utils.h",
+      ]
+
+      deps = [
+        ":audioproc_debug_proto",
+        "../..:typedefs",
+        "../..:webrtc_common",
+        "../../rtc_base:protobuf_utils",
+        "../../rtc_base:rtc_base_approved",
+      ]
+    }
+  }
+}
diff --git a/modules/audio_processing/DEPS b/modules/audio_processing/DEPS
new file mode 100644
index 0000000..79fd071
--- /dev/null
+++ b/modules/audio_processing/DEPS
@@ -0,0 +1,14 @@
+include_rules = [
+  "+audio/utility/audio_frame_operations.h",
+  "+common_audio",
+  "+system_wrappers",
+]
+
+specific_include_rules = {
+  ".*test\.cc": [
+    "+rtc_tools",
+    # Android platform build has different paths.
+    "+gtest",
+    "+external/webrtc",
+  ],
+}
diff --git a/modules/audio_processing/OWNERS b/modules/audio_processing/OWNERS
new file mode 100644
index 0000000..910d566
--- /dev/null
+++ b/modules/audio_processing/OWNERS
@@ -0,0 +1,10 @@
+aleloi@webrtc.org
+aluebs@webrtc.org
+gustaf@webrtc.org
+henrik.lundin@webrtc.org
+peah@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/modules/audio_processing/aec/aec_common.h b/modules/audio_processing/aec/aec_common.h
new file mode 100644
index 0000000..80c5c14
--- /dev/null
+++ b/modules/audio_processing/aec/aec_common.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#ifdef _MSC_VER /* visual c++ */
+#define ALIGN16_BEG __declspec(align(16))
+#define ALIGN16_END
+#else /* gcc or icc */
+#define ALIGN16_BEG
+#define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+#ifdef __cplusplus
+namespace webrtc {
+#endif
+
+extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65];
+extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65];
+extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65];
+extern const float WebRtcAec_kExtendedSmoothingCoefficients[2][2];
+extern const float WebRtcAec_kNormalSmoothingCoefficients[2][2];
+extern const float WebRtcAec_kMinFarendPSD;
+
+#ifdef __cplusplus
+}  // namespace webrtc
+#endif
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_
diff --git a/modules/audio_processing/aec/aec_core.cc b/modules/audio_processing/aec/aec_core.cc
new file mode 100644
index 0000000..4394929
--- /dev/null
+++ b/modules/audio_processing/aec/aec_core.cc
@@ -0,0 +1,2052 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The core AEC algorithm, which is presented with time-aligned signals.
+ */
+
+#include "modules/audio_processing/aec/aec_core.h"
+
+#include <algorithm>
+#include <math.h>
+#include <stddef.h>  // size_t
+#include <stdlib.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+extern "C" {
+#include "common_audio/ring_buffer.h"
+}
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aec/aec_common.h"
+#include "modules/audio_processing/aec/aec_core_optimized_methods.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "system_wrappers/include/metrics.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace {
+enum class DelaySource {
+  kSystemDelay,    // The delay values come from the OS.
+  kDelayAgnostic,  // The delay values come from the DA-AEC.
+};
+
+constexpr int kMinDelayLogValue = -200;
+constexpr int kMaxDelayLogValue = 200;
+constexpr int kNumDelayLogBuckets = 100;
+
+void MaybeLogDelayAdjustment(int moved_ms, DelaySource source) {
+  if (moved_ms == 0)
+    return;
+  switch (source) {
+    case DelaySource::kSystemDelay:
+      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecDelayAdjustmentMsSystemValue",
+                           moved_ms, kMinDelayLogValue, kMaxDelayLogValue,
+                           kNumDelayLogBuckets);
+      return;
+    case DelaySource::kDelayAgnostic:
+      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecDelayAdjustmentMsAgnosticValue",
+                           moved_ms, kMinDelayLogValue, kMaxDelayLogValue,
+                           kNumDelayLogBuckets);
+      return;
+  }
+}
+}  // namespace
+
+// Buffer size (samples)
+static const size_t kBufferSizeBlocks = 250;  // 1 second of audio in 16 kHz.
+
+// Metrics
+static const size_t kSubCountLen = 4;
+static const size_t kCountLen = 50;
+static const int kDelayMetricsAggregationWindow = 1250;  // 5 seconds at 16 kHz.
+
+// Divergence metric is based on audio level, which gets updated every
+// |kSubCountLen + 1| * PART_LEN samples. Divergence metric takes the statistics
+// of |kDivergentFilterFractionAggregationWindowSize| audio levels. The
+// following value corresponds to 1 second at 16 kHz.
+static const int kDivergentFilterFractionAggregationWindowSize = 50;
+
+// Quantities to control H band scaling for SWB input
+static const float cnScaleHband = 0.4f;  // scale for comfort noise in H band.
+// Initial bin for averaging nlp gain in low band
+static const int freqAvgIc = PART_LEN / 2;
+
+// Matlab code to produce table:
+// win = sqrt(hanning(63)); win = [0 ; win(1:32)];
+// fprintf(1, '\t%.14f, %.14f, %.14f,\n', win);
+ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65] = {
+    0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
+    0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
+    0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+    0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f,
+    0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f,
+    0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+    0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f,
+    0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f,
+    0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+    0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f,
+    0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f,
+    0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+    0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f,
+    0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f,
+    0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+    0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f,
+    1.00000000000000f};
+
+// Matlab code to produce table:
+// weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1];
+// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve);
+ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65] = {
+    0.0000f, 0.1000f, 0.1378f, 0.1535f, 0.1655f, 0.1756f, 0.1845f, 0.1926f,
+    0.2000f, 0.2069f, 0.2134f, 0.2195f, 0.2254f, 0.2309f, 0.2363f, 0.2414f,
+    0.2464f, 0.2512f, 0.2558f, 0.2604f, 0.2648f, 0.2690f, 0.2732f, 0.2773f,
+    0.2813f, 0.2852f, 0.2890f, 0.2927f, 0.2964f, 0.3000f, 0.3035f, 0.3070f,
+    0.3104f, 0.3138f, 0.3171f, 0.3204f, 0.3236f, 0.3268f, 0.3299f, 0.3330f,
+    0.3360f, 0.3390f, 0.3420f, 0.3449f, 0.3478f, 0.3507f, 0.3535f, 0.3563f,
+    0.3591f, 0.3619f, 0.3646f, 0.3673f, 0.3699f, 0.3726f, 0.3752f, 0.3777f,
+    0.3803f, 0.3828f, 0.3854f, 0.3878f, 0.3903f, 0.3928f, 0.3952f, 0.3976f,
+    0.4000f};
+
+// Matlab code to produce table:
+// overDriveCurve = [sqrt(linspace(0,1,65))' + 1];
+// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve);
+ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65] = {
+    1.0000f, 1.1250f, 1.1768f, 1.2165f, 1.2500f, 1.2795f, 1.3062f, 1.3307f,
+    1.3536f, 1.3750f, 1.3953f, 1.4146f, 1.4330f, 1.4507f, 1.4677f, 1.4841f,
+    1.5000f, 1.5154f, 1.5303f, 1.5449f, 1.5590f, 1.5728f, 1.5863f, 1.5995f,
+    1.6124f, 1.6250f, 1.6374f, 1.6495f, 1.6614f, 1.6731f, 1.6847f, 1.6960f,
+    1.7071f, 1.7181f, 1.7289f, 1.7395f, 1.7500f, 1.7603f, 1.7706f, 1.7806f,
+    1.7906f, 1.8004f, 1.8101f, 1.8197f, 1.8292f, 1.8385f, 1.8478f, 1.8570f,
+    1.8660f, 1.8750f, 1.8839f, 1.8927f, 1.9014f, 1.9100f, 1.9186f, 1.9270f,
+    1.9354f, 1.9437f, 1.9520f, 1.9601f, 1.9682f, 1.9763f, 1.9843f, 1.9922f,
+    2.0000f};
+
+// Delay Agnostic AEC parameters, still under development and may change.
+static const float kDelayQualityThresholdMax = 0.07f;
+static const float kDelayQualityThresholdMin = 0.01f;
+static const int kInitialShiftOffset = 5;
+#if !defined(WEBRTC_ANDROID)
+static const int kDelayCorrectionStart = 1500;  // 10 ms chunks
+#endif
+
+// Target suppression levels for nlp modes.
+// log{0.001, 0.00001, 0.00000001}
+static const float kTargetSupp[3] = {-6.9f, -11.5f, -18.4f};
+
+// Two sets of parameters, one for the extended filter mode.
+static const float kExtendedMinOverDrive[3] = {3.0f, 6.0f, 15.0f};
+static const float kNormalMinOverDrive[3] = {1.0f, 2.0f, 5.0f};
+const float WebRtcAec_kExtendedSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
+                                                              {0.92f, 0.08f}};
+const float WebRtcAec_kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
+                                                            {0.93f, 0.07f}};
+
+// Number of partitions forming the NLP's "preferred" bands.
+enum { kPrefBandSize = 24 };
+
+WebRtcAecFilterFar WebRtcAec_FilterFar;
+WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal;
+WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation;
+WebRtcAecOverdrive WebRtcAec_Overdrive;
+WebRtcAecSuppress WebRtcAec_Suppress;
+WebRtcAecComputeCoherence WebRtcAec_ComputeCoherence;
+WebRtcAecUpdateCoherenceSpectra WebRtcAec_UpdateCoherenceSpectra;
+WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex;
+WebRtcAecPartitionDelay WebRtcAec_PartitionDelay;
+WebRtcAecWindowData WebRtcAec_WindowData;
+
+__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
+  return aRe * bRe - aIm * bIm;
+}
+
+__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
+  return aRe * bIm + aIm * bRe;
+}
+
+// TODO(minyue): Due to a legacy bug, |framelevel| and |averagelevel| use a
+// window, of which the length is 1 unit longer than indicated. Remove "+1" when
+// the code is refactored.
+PowerLevel::PowerLevel()
+    : framelevel(kSubCountLen + 1),
+      averagelevel(kCountLen + 1) {
+}
+
+BlockBuffer::BlockBuffer() {
+  buffer_ = WebRtc_CreateBuffer(kBufferSizeBlocks, sizeof(float) * PART_LEN);
+  RTC_CHECK(buffer_);
+  ReInit();
+}
+
+BlockBuffer::~BlockBuffer() {
+  WebRtc_FreeBuffer(buffer_);
+}
+
+void BlockBuffer::ReInit() {
+  WebRtc_InitBuffer(buffer_);
+}
+
+void BlockBuffer::Insert(const float block[PART_LEN]) {
+  WebRtc_WriteBuffer(buffer_, block, 1);
+}
+
+void BlockBuffer::ExtractExtendedBlock(float extended_block[PART_LEN2]) {
+  float* block_ptr = NULL;
+  RTC_DCHECK_LT(0, AvaliableSpace());
+
+  // Extract the previous block.
+  WebRtc_MoveReadPtr(buffer_, -1);
+  size_t read_elements = WebRtc_ReadBuffer(
+      buffer_, reinterpret_cast<void**>(&block_ptr), &extended_block[0], 1);
+  if (read_elements == 0u) {
+    std::fill_n(&extended_block[0], PART_LEN, 0.0f);
+  } else if (block_ptr != &extended_block[0]) {
+    memcpy(&extended_block[0], block_ptr, PART_LEN * sizeof(float));
+  }
+
+  // Extract the current block.
+  read_elements =
+      WebRtc_ReadBuffer(buffer_, reinterpret_cast<void**>(&block_ptr),
+                        &extended_block[PART_LEN], 1);
+  if (read_elements == 0u) {
+    std::fill_n(&extended_block[PART_LEN], PART_LEN, 0.0f);
+  } else if (block_ptr != &extended_block[PART_LEN]) {
+    memcpy(&extended_block[PART_LEN], block_ptr, PART_LEN * sizeof(float));
+  }
+}
+
+int BlockBuffer::AdjustSize(int buffer_size_decrease) {
+  return WebRtc_MoveReadPtr(buffer_, buffer_size_decrease);
+}
+
+size_t BlockBuffer::Size() {
+  return static_cast<int>(WebRtc_available_read(buffer_));
+}
+
+size_t BlockBuffer::AvaliableSpace() {
+  return WebRtc_available_write(buffer_);
+}
+
+DivergentFilterFraction::DivergentFilterFraction()
+    : count_(0),
+      occurrence_(0),
+      fraction_(-1.0) {
+}
+
+void DivergentFilterFraction::Reset() {
+  Clear();
+  fraction_ = -1.0;
+}
+
+void DivergentFilterFraction::AddObservation(const PowerLevel& nearlevel,
+                                             const PowerLevel& linoutlevel,
+                                             const PowerLevel& nlpoutlevel) {
+  const float near_level = nearlevel.framelevel.GetLatestMean();
+  const float level_increase =
+      linoutlevel.framelevel.GetLatestMean() - near_level;
+  const bool output_signal_active = nlpoutlevel.framelevel.GetLatestMean() >
+          40.0 * nlpoutlevel.minlevel;
+  // Level increase should be, in principle, negative, when the filter
+  // does not diverge. Here we allow some margin (0.01 * near end level) and
+  // numerical error (1.0). We count divergence only when the AEC output
+  // signal is active.
+  if (output_signal_active &&
+      level_increase > std::max(0.01 * near_level, 1.0))
+    occurrence_++;
+  ++count_;
+  if (count_ == kDivergentFilterFractionAggregationWindowSize) {
+    fraction_ = static_cast<float>(occurrence_) /
+        kDivergentFilterFractionAggregationWindowSize;
+    Clear();
+  }
+}
+
+float DivergentFilterFraction::GetLatestFraction() const {
+  return fraction_;
+}
+
+void DivergentFilterFraction::Clear() {
+  count_ = 0;
+  occurrence_ = 0;
+}
+
+// TODO(minyue): Moving some initialization from WebRtcAec_CreateAec() to ctor.
+AecCore::AecCore(int instance_index)
+    : data_dumper(new ApmDataDumper(instance_index)) {}
+
+AecCore::~AecCore() {}
+
+static int CmpFloat(const void* a, const void* b) {
+  const float* da = (const float*)a;
+  const float* db = (const float*)b;
+
+  return (*da > *db) - (*da < *db);
+}
+
+static void FilterFar(int num_partitions,
+                      int x_fft_buf_block_pos,
+                      float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+                      float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+                      float y_fft[2][PART_LEN1]) {
+  int i;
+  for (i = 0; i < num_partitions; i++) {
+    int j;
+    int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
+    int pos = i * PART_LEN1;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * (PART_LEN1);
+    }
+
+    for (j = 0; j < PART_LEN1; j++) {
+      y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j],
+                           h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]);
+      y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j],
+                           h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]);
+    }
+  }
+}
+
+static void ScaleErrorSignal(float mu,
+                             float error_threshold,
+                             float x_pow[PART_LEN1],
+                             float ef[2][PART_LEN1]) {
+  int i;
+  float abs_ef;
+  for (i = 0; i < (PART_LEN1); i++) {
+    ef[0][i] /= (x_pow[i] + 1e-10f);
+    ef[1][i] /= (x_pow[i] + 1e-10f);
+    abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
+
+    if (abs_ef > error_threshold) {
+      abs_ef = error_threshold / (abs_ef + 1e-10f);
+      ef[0][i] *= abs_ef;
+      ef[1][i] *= abs_ef;
+    }
+
+    // Stepsize factor
+    ef[0][i] *= mu;
+    ef[1][i] *= mu;
+  }
+}
+
+static void FilterAdaptation(
+    const OouraFft& ooura_fft,
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float e_fft[2][PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+  int i, j;
+  float fft[PART_LEN2];
+  for (i = 0; i < num_partitions; i++) {
+    int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1);
+    int pos;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * PART_LEN1;
+    }
+
+    pos = i * PART_LEN1;
+
+    for (j = 0; j < PART_LEN; j++) {
+      fft[2 * j] = MulRe(x_fft_buf[0][xPos + j], -x_fft_buf[1][xPos + j],
+                         e_fft[0][j], e_fft[1][j]);
+      fft[2 * j + 1] = MulIm(x_fft_buf[0][xPos + j], -x_fft_buf[1][xPos + j],
+                             e_fft[0][j], e_fft[1][j]);
+    }
+    fft[1] =
+        MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN],
+              e_fft[0][PART_LEN], e_fft[1][PART_LEN]);
+
+    ooura_fft.InverseFft(fft);
+    memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
+
+    // fft scaling
+    {
+      float scale = 2.0f / PART_LEN2;
+      for (j = 0; j < PART_LEN; j++) {
+        fft[j] *= scale;
+      }
+    }
+    ooura_fft.Fft(fft);
+
+    h_fft_buf[0][pos] += fft[0];
+    h_fft_buf[0][pos + PART_LEN] += fft[1];
+
+    for (j = 1; j < PART_LEN; j++) {
+      h_fft_buf[0][pos + j] += fft[2 * j];
+      h_fft_buf[1][pos + j] += fft[2 * j + 1];
+    }
+  }
+}
+
+static void Overdrive(float overdrive_scaling,
+                      const float hNlFb,
+                      float hNl[PART_LEN1]) {
+  for (int i = 0; i < PART_LEN1; ++i) {
+    // Weight subbands
+    if (hNl[i] > hNlFb) {
+      hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
+               (1 - WebRtcAec_weightCurve[i]) * hNl[i];
+    }
+    hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]);
+  }
+}
+
+static void Suppress(const float hNl[PART_LEN1], float efw[2][PART_LEN1]) {
+  for (int i = 0; i < PART_LEN1; ++i) {
+    // Suppress error signal
+    efw[0][i] *= hNl[i];
+    efw[1][i] *= hNl[i];
+
+    // Ooura fft returns incorrect sign on imaginary component. It matters here
+    // because we are making an additive change with comfort noise.
+    efw[1][i] *= -1;
+  }
+}
+
+static int PartitionDelay(int num_partitions,
+                          float h_fft_buf[2]
+                                         [kExtendedNumPartitions * PART_LEN1]) {
+  // Measures the energy in each filter partition and returns the partition with
+  // highest energy.
+  // TODO(bjornv): Spread computational cost by computing one partition per
+  // block?
+  float wfEnMax = 0;
+  int i;
+  int delay = 0;
+
+  for (i = 0; i < num_partitions; i++) {
+    int j;
+    int pos = i * PART_LEN1;
+    float wfEn = 0;
+    for (j = 0; j < PART_LEN1; j++) {
+      wfEn += h_fft_buf[0][pos + j] * h_fft_buf[0][pos + j] +
+              h_fft_buf[1][pos + j] * h_fft_buf[1][pos + j];
+    }
+
+    if (wfEn > wfEnMax) {
+      wfEnMax = wfEn;
+      delay = i;
+    }
+  }
+  return delay;
+}
+
+// Update metric with 10 * log10(numerator / denominator).
+static void UpdateLogRatioMetric(Stats* metric, float numerator,
+                                 float denominator) {
+  RTC_DCHECK(metric);
+  RTC_CHECK(numerator >= 0);
+  RTC_CHECK(denominator >= 0);
+
+  const float log_numerator = log10(numerator + 1e-10f);
+  const float log_denominator = log10(denominator + 1e-10f);
+  metric->instant = 10.0f * (log_numerator - log_denominator);
+
+  // Max.
+  if (metric->instant > metric->max)
+    metric->max = metric->instant;
+
+  // Min.
+  if (metric->instant < metric->min)
+    metric->min = metric->instant;
+
+  // Average.
+  metric->counter++;
+  // This is to protect overflow, which should almost never happen.
+  RTC_CHECK_NE(0, metric->counter);
+  metric->sum += metric->instant;
+  metric->average = metric->sum / metric->counter;
+
+  // Upper mean.
+  if (metric->instant > metric->average) {
+    metric->hicounter++;
+    // This is to protect overflow, which should almost never happen.
+    RTC_CHECK_NE(0, metric->hicounter);
+    metric->hisum += metric->instant;
+    metric->himean = metric->hisum / metric->hicounter;
+  }
+}
+
+// Threshold to protect against the ill-effects of a zero far-end.
+const float WebRtcAec_kMinFarendPSD = 15;
+
+// Updates the following smoothed Power Spectral Densities (PSD):
+//  - sd  : near-end
+//  - se  : residual echo
+//  - sx  : far-end
+//  - sde : cross-PSD of near-end and residual echo
+//  - sxd : cross-PSD of near-end and far-end
+//
+// In addition to updating the PSDs, also the filter diverge state is
+// determined.
+static void UpdateCoherenceSpectra(int mult,
+                                   bool extended_filter_enabled,
+                                   float efw[2][PART_LEN1],
+                                   float dfw[2][PART_LEN1],
+                                   float xfw[2][PART_LEN1],
+                                   CoherenceState* coherence_state,
+                                   short* filter_divergence_state,
+                                   int* extreme_filter_divergence) {
+  // Power estimate smoothing coefficients.
+  const float* ptrGCoh =
+      extended_filter_enabled
+          ? WebRtcAec_kExtendedSmoothingCoefficients[mult - 1]
+          : WebRtcAec_kNormalSmoothingCoefficients[mult - 1];
+  int i;
+  float sdSum = 0, seSum = 0;
+
+  for (i = 0; i < PART_LEN1; i++) {
+    coherence_state->sd[i] =
+        ptrGCoh[0] * coherence_state->sd[i] +
+        ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
+    coherence_state->se[i] =
+        ptrGCoh[0] * coherence_state->se[i] +
+        ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
+    // We threshold here to protect against the ill-effects of a zero farend.
+    // The threshold is not arbitrarily chosen, but balances protection and
+    // adverse interaction with the algorithm's tuning.
+    // TODO(bjornv): investigate further why this is so sensitive.
+    coherence_state->sx[i] =
+        ptrGCoh[0] * coherence_state->sx[i] +
+        ptrGCoh[1] *
+            WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
+                           WebRtcAec_kMinFarendPSD);
+
+    coherence_state->sde[i][0] =
+        ptrGCoh[0] * coherence_state->sde[i][0] +
+        ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
+    coherence_state->sde[i][1] =
+        ptrGCoh[0] * coherence_state->sde[i][1] +
+        ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
+
+    coherence_state->sxd[i][0] =
+        ptrGCoh[0] * coherence_state->sxd[i][0] +
+        ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
+    coherence_state->sxd[i][1] =
+        ptrGCoh[0] * coherence_state->sxd[i][1] +
+        ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
+
+    sdSum += coherence_state->sd[i];
+    seSum += coherence_state->se[i];
+  }
+
+  // Divergent filter safeguard update.
+  *filter_divergence_state =
+      (*filter_divergence_state ? 1.05f : 1.0f) * seSum > sdSum;
+
+  // Signal extreme filter divergence if the error is significantly larger
+  // than the nearend (13 dB).
+  *extreme_filter_divergence = (seSum > (19.95f * sdSum));
+}
+
+// Window time domain data to be used by the fft.
+__inline static void WindowData(float* x_windowed, const float* x) {
+  int i;
+  for (i = 0; i < PART_LEN; i++) {
+    x_windowed[i] = x[i] * WebRtcAec_sqrtHanning[i];
+    x_windowed[PART_LEN + i] =
+        x[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i];
+  }
+}
+
+// Puts fft output data into a complex valued array.
+__inline static void StoreAsComplex(const float* data,
+                                    float data_complex[2][PART_LEN1]) {
+  int i;
+  data_complex[0][0] = data[0];
+  data_complex[1][0] = 0;
+  for (i = 1; i < PART_LEN; i++) {
+    data_complex[0][i] = data[2 * i];
+    data_complex[1][i] = data[2 * i + 1];
+  }
+  data_complex[0][PART_LEN] = data[1];
+  data_complex[1][PART_LEN] = 0;
+}
+
+static void ComputeCoherence(const CoherenceState* coherence_state,
+                             float* cohde,
+                             float* cohxd) {
+  // Subband coherence
+  for (int i = 0; i < PART_LEN1; i++) {
+    cohde[i] = (coherence_state->sde[i][0] * coherence_state->sde[i][0] +
+                coherence_state->sde[i][1] * coherence_state->sde[i][1]) /
+               (coherence_state->sd[i] * coherence_state->se[i] + 1e-10f);
+    cohxd[i] = (coherence_state->sxd[i][0] * coherence_state->sxd[i][0] +
+                coherence_state->sxd[i][1] * coherence_state->sxd[i][1]) /
+               (coherence_state->sx[i] * coherence_state->sd[i] + 1e-10f);
+  }
+}
+
+static void GetHighbandGain(const float* lambda, float* nlpGainHband) {
+  int i;
+
+  *nlpGainHband = 0.0f;
+  for (i = freqAvgIc; i < PART_LEN1 - 1; i++) {
+    *nlpGainHband += lambda[i];
+  }
+  *nlpGainHband /= static_cast<float>(PART_LEN1 - 1 - freqAvgIc);
+}
+
+static void GenerateComplexNoise(uint32_t* seed, float noise[2][PART_LEN1]) {
+  const float kPi2 = 6.28318530717959f;
+  int16_t randW16[PART_LEN];
+  WebRtcSpl_RandUArray(randW16, PART_LEN, seed);
+
+  noise[0][0] = 0;
+  noise[1][0] = 0;
+  for (size_t i = 1; i < PART_LEN1; i++) {
+    float tmp = kPi2 * randW16[i - 1] / 32768.f;
+    noise[0][i] = cosf(tmp);
+    noise[1][i] = -sinf(tmp);
+  }
+  noise[1][PART_LEN] = 0;
+}
+
+static void ComfortNoise(bool generate_high_frequency_noise,
+                         uint32_t* seed,
+                         float e_fft[2][PART_LEN1],
+                         float high_frequency_comfort_noise[2][PART_LEN1],
+                         const float* noise_spectrum,
+                         const float* suppressor_gain) {
+  float complex_noise[2][PART_LEN1];
+
+  GenerateComplexNoise(seed, complex_noise);
+
+  // Shape, scale and add comfort noise.
+  for (int i = 1; i < PART_LEN1; ++i) {
+    float noise_scaling =
+        sqrtf(WEBRTC_SPL_MAX(1 - suppressor_gain[i] * suppressor_gain[i], 0)) *
+        sqrtf(noise_spectrum[i]);
+    e_fft[0][i] += noise_scaling * complex_noise[0][i];
+    e_fft[1][i] += noise_scaling * complex_noise[1][i];
+  }
+
+  // Form comfort noise for higher frequencies.
+  if (generate_high_frequency_noise) {
+    // Compute average noise power and nlp gain over the second half of freq
+    // spectrum (i.e., 4->8khz).
+    int start_avg_band = PART_LEN1 / 2;
+    float upper_bands_noise_power = 0.f;
+    float upper_bands_suppressor_gain = 0.f;
+    for (int i = start_avg_band; i < PART_LEN1; ++i) {
+      upper_bands_noise_power += sqrtf(noise_spectrum[i]);
+      upper_bands_suppressor_gain +=
+          sqrtf(WEBRTC_SPL_MAX(1 - suppressor_gain[i] * suppressor_gain[i], 0));
+    }
+    upper_bands_noise_power /= (PART_LEN1 - start_avg_band);
+    upper_bands_suppressor_gain /= (PART_LEN1 - start_avg_band);
+
+    // Shape, scale and add comfort noise.
+    float noise_scaling = upper_bands_suppressor_gain * upper_bands_noise_power;
+    high_frequency_comfort_noise[0][0] = 0;
+    high_frequency_comfort_noise[1][0] = 0;
+    for (int i = 1; i < PART_LEN1; ++i) {
+      high_frequency_comfort_noise[0][i] = noise_scaling * complex_noise[0][i];
+      high_frequency_comfort_noise[1][i] = noise_scaling * complex_noise[1][i];
+    }
+    high_frequency_comfort_noise[1][PART_LEN] = 0;
+  } else {
+    memset(high_frequency_comfort_noise, 0,
+           2 * PART_LEN1 * sizeof(high_frequency_comfort_noise[0][0]));
+  }
+}
+
+static void InitLevel(PowerLevel* level) {
+  const float kBigFloat = 1E17f;
+  level->averagelevel.Reset();
+  level->framelevel.Reset();
+  level->minlevel = kBigFloat;
+}
+
+static void InitStats(Stats* stats) {
+  stats->instant = kOffsetLevel;
+  stats->average = kOffsetLevel;
+  stats->max = kOffsetLevel;
+  stats->min = kOffsetLevel * (-1);
+  stats->sum = 0;
+  stats->hisum = 0;
+  stats->himean = kOffsetLevel;
+  stats->counter = 0;
+  stats->hicounter = 0;
+}
+
+static void InitMetrics(AecCore* self) {
+  self->stateCounter = 0;
+  InitLevel(&self->farlevel);
+  InitLevel(&self->nearlevel);
+  InitLevel(&self->linoutlevel);
+  InitLevel(&self->nlpoutlevel);
+
+  InitStats(&self->erl);
+  InitStats(&self->erle);
+  InitStats(&self->aNlp);
+  InitStats(&self->rerl);
+
+  self->divergent_filter_fraction.Reset();
+}
+
+static float CalculatePower(const float* in, size_t num_samples) {
+  size_t k;
+  float energy = 0.0f;
+
+  for (k = 0; k < num_samples; ++k) {
+    energy += in[k] * in[k];
+  }
+  return energy / num_samples;
+}
+
+static void UpdateLevel(PowerLevel* level, float power) {
+  level->framelevel.AddValue(power);
+  if (level->framelevel.EndOfBlock()) {
+    const float new_frame_level = level->framelevel.GetLatestMean();
+    if (new_frame_level > 0) {
+      if (new_frame_level < level->minlevel) {
+        level->minlevel = new_frame_level;  // New minimum.
+      } else {
+        level->minlevel *= (1 + 0.001f);  // Small increase.
+      }
+    }
+    level->averagelevel.AddValue(new_frame_level);
+  }
+}
+
+static void UpdateMetrics(AecCore* aec) {
+  const float actThresholdNoisy = 8.0f;
+  const float actThresholdClean = 40.0f;
+
+  const float noisyPower = 300000.0f;
+
+  float actThreshold;
+
+  if (aec->echoState) {  // Check if echo is likely present
+    aec->stateCounter++;
+  }
+
+  if (aec->linoutlevel.framelevel.EndOfBlock()) {
+    aec->divergent_filter_fraction.AddObservation(aec->nearlevel,
+                                                  aec->linoutlevel,
+                                                  aec->nlpoutlevel);
+  }
+
+  if (aec->farlevel.averagelevel.EndOfBlock()) {
+    if (aec->farlevel.minlevel < noisyPower) {
+      actThreshold = actThresholdClean;
+    } else {
+      actThreshold = actThresholdNoisy;
+    }
+
+    const float far_average_level = aec->farlevel.averagelevel.GetLatestMean();
+
+    // The last condition is to let estimation be made in active far-end
+    // segments only.
+    if ((aec->stateCounter > (0.5f * kCountLen * kSubCountLen)) &&
+        (aec->farlevel.framelevel.EndOfBlock()) &&
+        (far_average_level > (actThreshold * aec->farlevel.minlevel))) {
+
+      // ERL: error return loss.
+      const float near_average_level =
+          aec->nearlevel.averagelevel.GetLatestMean();
+      UpdateLogRatioMetric(&aec->erl, far_average_level, near_average_level);
+
+      // A_NLP: error return loss enhanced before the nonlinear suppression.
+      const float linout_average_level =
+          aec->linoutlevel.averagelevel.GetLatestMean();
+      UpdateLogRatioMetric(&aec->aNlp, near_average_level,
+                           linout_average_level);
+
+      // ERLE: error return loss enhanced.
+      const float nlpout_average_level =
+          aec->nlpoutlevel.averagelevel.GetLatestMean();
+      UpdateLogRatioMetric(&aec->erle, near_average_level,
+                           nlpout_average_level);
+    }
+
+    aec->stateCounter = 0;
+  }
+}
+
+static void UpdateDelayMetrics(AecCore* self) {
+  int i = 0;
+  int delay_values = 0;
+  int median = 0;
+  int lookahead = WebRtc_lookahead(self->delay_estimator);
+  const int kMsPerBlock = PART_LEN / (self->mult * 8);
+  int64_t l1_norm = 0;
+
+  if (self->num_delay_values == 0) {
+    // We have no new delay value data. Even though -1 is a valid |median| in
+    // the sense that we allow negative values, it will practically never be
+    // used since multiples of |kMsPerBlock| will always be returned.
+    // We therefore use -1 to indicate in the logs that the delay estimator was
+    // not able to estimate the delay.
+    self->delay_median = -1;
+    self->delay_std = -1;
+    self->fraction_poor_delays = -1;
+    return;
+  }
+
+  // Start value for median count down.
+  delay_values = self->num_delay_values >> 1;
+  // Get median of delay values since last update.
+  for (i = 0; i < kHistorySizeBlocks; i++) {
+    delay_values -= self->delay_histogram[i];
+    if (delay_values < 0) {
+      median = i;
+      break;
+    }
+  }
+  // Account for lookahead.
+  self->delay_median = (median - lookahead) * kMsPerBlock;
+
+  // Calculate the L1 norm, with median value as central moment.
+  for (i = 0; i < kHistorySizeBlocks; i++) {
+    l1_norm += abs(i - median) * self->delay_histogram[i];
+  }
+  self->delay_std =
+      static_cast<int>((l1_norm + self->num_delay_values / 2) /
+                       self->num_delay_values) * kMsPerBlock;
+
+  // Determine fraction of delays that are out of bounds, that is, either
+  // negative (anti-causal system) or larger than the AEC filter length.
+  {
+    int num_delays_out_of_bounds = self->num_delay_values;
+    const int histogram_length =
+        sizeof(self->delay_histogram) / sizeof(self->delay_histogram[0]);
+    for (i = lookahead; i < lookahead + self->num_partitions; ++i) {
+      if (i < histogram_length)
+        num_delays_out_of_bounds -= self->delay_histogram[i];
+    }
+    self->fraction_poor_delays =
+        static_cast<float>(num_delays_out_of_bounds) / self->num_delay_values;
+  }
+
+  // Reset histogram.
+  memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
+  self->num_delay_values = 0;
+}
+
+static void ScaledInverseFft(const OouraFft& ooura_fft,
+                             float freq_data[2][PART_LEN1],
+                             float time_data[PART_LEN2],
+                             float scale,
+                             int conjugate) {
+  int i;
+  const float normalization = scale / static_cast<float>(PART_LEN2);
+  const float sign = (conjugate ? -1 : 1);
+  time_data[0] = freq_data[0][0] * normalization;
+  time_data[1] = freq_data[0][PART_LEN] * normalization;
+  for (i = 1; i < PART_LEN; i++) {
+    time_data[2 * i] = freq_data[0][i] * normalization;
+    time_data[2 * i + 1] = sign * freq_data[1][i] * normalization;
+  }
+  ooura_fft.InverseFft(time_data);
+}
+
+static void Fft(const OouraFft& ooura_fft,
+                float time_data[PART_LEN2],
+                float freq_data[2][PART_LEN1]) {
+  int i;
+  ooura_fft.Fft(time_data);
+
+  // Reorder fft output data.
+  freq_data[1][0] = 0;
+  freq_data[1][PART_LEN] = 0;
+  freq_data[0][0] = time_data[0];
+  freq_data[0][PART_LEN] = time_data[1];
+  for (i = 1; i < PART_LEN; i++) {
+    freq_data[0][i] = time_data[2 * i];
+    freq_data[1][i] = time_data[2 * i + 1];
+  }
+}
+
+static int SignalBasedDelayCorrection(AecCore* self) {
+  int delay_correction = 0;
+  int last_delay = -2;
+  RTC_DCHECK(self);
+#if !defined(WEBRTC_ANDROID)
+  // On desktops, turn on correction after |kDelayCorrectionStart| frames.  This
+  // is to let the delay estimation get a chance to converge.  Also, if the
+  // playout audio volume is low (or even muted) the delay estimation can return
+  // a very large delay, which will break the AEC if it is applied.
+  if (self->frame_count < kDelayCorrectionStart) {
+    self->data_dumper->DumpRaw("aec_da_reported_delay", 1, &last_delay);
+    return 0;
+  }
+#endif
+
+  // 1. Check for non-negative delay estimate.  Note that the estimates we get
+  //    from the delay estimation are not compensated for lookahead.  Hence, a
+  //    negative |last_delay| is an invalid one.
+  // 2. Verify that there is a delay change.  In addition, only allow a change
+  //    if the delay is outside a certain region taking the AEC filter length
+  //    into account.
+  // TODO(bjornv): Investigate if we can remove the non-zero delay change check.
+  // 3. Only allow delay correction if the delay estimation quality exceeds
+  //    |delay_quality_threshold|.
+  // 4. Finally, verify that the proposed |delay_correction| is feasible by
+  //    comparing with the size of the far-end buffer.
+  last_delay = WebRtc_last_delay(self->delay_estimator);
+  self->data_dumper->DumpRaw("aec_da_reported_delay", 1, &last_delay);
+  if ((last_delay >= 0) && (last_delay != self->previous_delay) &&
+      (WebRtc_last_delay_quality(self->delay_estimator) >
+       self->delay_quality_threshold)) {
+    int delay = last_delay - WebRtc_lookahead(self->delay_estimator);
+    // Allow for a slack in the actual delay, defined by a |lower_bound| and an
+    // |upper_bound|.  The adaptive echo cancellation filter is currently
+    // |num_partitions| (of 64 samples) long.  If the delay estimate is negative
+    // or at least 3/4 of the filter length we open up for correction.
+    const int lower_bound = 0;
+    const int upper_bound = self->num_partitions * 3 / 4;
+    const int do_correction = delay <= lower_bound || delay > upper_bound;
+    if (do_correction == 1) {
+      int available_read = self->farend_block_buffer_.Size();
+      // With |shift_offset| we gradually rely on the delay estimates.  For
+      // positive delays we reduce the correction by |shift_offset| to lower the
+      // risk of pushing the AEC into a non causal state.  For negative delays
+      // we rely on the values up to a rounding error, hence compensate by 1
+      // element to make sure to push the delay into the causal region.
+      delay_correction = -delay;
+      delay_correction += delay > self->shift_offset ? self->shift_offset : 1;
+      self->shift_offset--;
+      self->shift_offset = (self->shift_offset <= 1 ? 1 : self->shift_offset);
+      if (delay_correction > available_read - self->mult - 1) {
+        // There is not enough data in the buffer to perform this shift.  Hence,
+        // we do not rely on the delay estimate and do nothing.
+        delay_correction = 0;
+      } else {
+        self->previous_delay = last_delay;
+        ++self->delay_correction_count;
+      }
+    }
+  }
+  // Update the |delay_quality_threshold| once we have our first delay
+  // correction.
+  if (self->delay_correction_count > 0) {
+    float delay_quality = WebRtc_last_delay_quality(self->delay_estimator);
+    delay_quality =
+        (delay_quality > kDelayQualityThresholdMax ? kDelayQualityThresholdMax
+                                                   : delay_quality);
+    self->delay_quality_threshold =
+        (delay_quality > self->delay_quality_threshold
+             ? delay_quality
+             : self->delay_quality_threshold);
+  }
+  self->data_dumper->DumpRaw("aec_da_delay_correction", 1, &delay_correction);
+
+  return delay_correction;
+}
+
+static void RegressorPower(int num_partitions,
+                           int latest_added_partition,
+                           float x_fft_buf[2]
+                                          [kExtendedNumPartitions * PART_LEN1],
+                           float x_pow[PART_LEN1]) {
+  RTC_DCHECK_LT(latest_added_partition, num_partitions);
+  memset(x_pow, 0, PART_LEN1 * sizeof(x_pow[0]));
+
+  int partition = latest_added_partition;
+  int x_fft_buf_position = partition * PART_LEN1;
+  for (int i = 0; i < num_partitions; ++i) {
+    for (int bin = 0; bin < PART_LEN1; ++bin) {
+      float re = x_fft_buf[0][x_fft_buf_position];
+      float im = x_fft_buf[1][x_fft_buf_position];
+      x_pow[bin] += re * re + im * im;
+      ++x_fft_buf_position;
+    }
+
+    ++partition;
+    if (partition == num_partitions) {
+      partition = 0;
+      RTC_DCHECK_EQ(num_partitions * PART_LEN1, x_fft_buf_position);
+      x_fft_buf_position = 0;
+    }
+  }
+}
+
+static void EchoSubtraction(const OouraFft& ooura_fft,
+                            int num_partitions,
+                            int extended_filter_enabled,
+                            int* extreme_filter_divergence,
+                            float filter_step_size,
+                            float error_threshold,
+                            float* x_fft,
+                            int* x_fft_buf_block_pos,
+                            float x_fft_buf[2]
+                                           [kExtendedNumPartitions * PART_LEN1],
+                            float* const y,
+                            float x_pow[PART_LEN1],
+                            float h_fft_buf[2]
+                                           [kExtendedNumPartitions * PART_LEN1],
+                            float echo_subtractor_output[PART_LEN]) {
+  float s_fft[2][PART_LEN1];
+  float e_extended[PART_LEN2];
+  float s_extended[PART_LEN2];
+  float* s;
+  float e[PART_LEN];
+  float e_fft[2][PART_LEN1];
+  int i;
+
+  // Update the x_fft_buf block position.
+  (*x_fft_buf_block_pos)--;
+  if ((*x_fft_buf_block_pos) == -1) {
+    *x_fft_buf_block_pos = num_partitions - 1;
+  }
+
+  // Buffer x_fft.
+  memcpy(x_fft_buf[0] + (*x_fft_buf_block_pos) * PART_LEN1, x_fft,
+         sizeof(float) * PART_LEN1);
+  memcpy(x_fft_buf[1] + (*x_fft_buf_block_pos) * PART_LEN1, &x_fft[PART_LEN1],
+         sizeof(float) * PART_LEN1);
+
+  memset(s_fft, 0, sizeof(s_fft));
+
+  // Conditionally reset the echo subtraction filter if the filter has diverged
+  // significantly.
+  if (!extended_filter_enabled && *extreme_filter_divergence) {
+    memset(h_fft_buf, 0,
+           2 * kExtendedNumPartitions * PART_LEN1 * sizeof(h_fft_buf[0][0]));
+    *extreme_filter_divergence = 0;
+  }
+
+  // Produce echo estimate s_fft.
+  WebRtcAec_FilterFar(num_partitions, *x_fft_buf_block_pos, x_fft_buf,
+                      h_fft_buf, s_fft);
+
+  // Compute the time-domain echo estimate s.
+  ScaledInverseFft(ooura_fft, s_fft, s_extended, 2.0f, 0);
+  s = &s_extended[PART_LEN];
+
+  // Compute the time-domain echo prediction error.
+  for (i = 0; i < PART_LEN; ++i) {
+    e[i] = y[i] - s[i];
+  }
+
+  // Compute the frequency domain echo prediction error.
+  memset(e_extended, 0, sizeof(float) * PART_LEN);
+  memcpy(e_extended + PART_LEN, e, sizeof(float) * PART_LEN);
+  Fft(ooura_fft, e_extended, e_fft);
+
+  // Scale error signal inversely with far power.
+  WebRtcAec_ScaleErrorSignal(filter_step_size, error_threshold, x_pow, e_fft);
+  WebRtcAec_FilterAdaptation(ooura_fft, num_partitions, *x_fft_buf_block_pos,
+                             x_fft_buf, e_fft, h_fft_buf);
+  memcpy(echo_subtractor_output, e, sizeof(float) * PART_LEN);
+}
+
+static void FormSuppressionGain(AecCore* aec,
+                                float cohde[PART_LEN1],
+                                float cohxd[PART_LEN1],
+                                float hNl[PART_LEN1]) {
+  float hNlDeAvg, hNlXdAvg;
+  float hNlPref[kPrefBandSize];
+  float hNlFb = 0, hNlFbLow = 0;
+  const int prefBandSize = kPrefBandSize / aec->mult;
+  const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f;
+  const int minPrefBand = 4 / aec->mult;
+  // Power estimate smoothing coefficients.
+  const float* min_overdrive = aec->extended_filter_enabled
+                                   ? kExtendedMinOverDrive
+                                   : kNormalMinOverDrive;
+
+  hNlXdAvg = 0;
+  for (int i = minPrefBand; i < prefBandSize + minPrefBand; ++i) {
+    hNlXdAvg += cohxd[i];
+  }
+  hNlXdAvg /= prefBandSize;
+  hNlXdAvg = 1 - hNlXdAvg;
+
+  hNlDeAvg = 0;
+  for (int i = minPrefBand; i < prefBandSize + minPrefBand; ++i) {
+    hNlDeAvg += cohde[i];
+  }
+  hNlDeAvg /= prefBandSize;
+
+  if (hNlXdAvg < 0.75f && hNlXdAvg < aec->hNlXdAvgMin) {
+    aec->hNlXdAvgMin = hNlXdAvg;
+  }
+
+  if (hNlDeAvg > 0.98f && hNlXdAvg > 0.9f) {
+    aec->stNearState = 1;
+  } else if (hNlDeAvg < 0.95f || hNlXdAvg < 0.8f) {
+    aec->stNearState = 0;
+  }
+
+  if (aec->hNlXdAvgMin == 1) {
+    aec->echoState = 0;
+    aec->overDrive = min_overdrive[aec->nlp_mode];
+
+    if (aec->stNearState == 1) {
+      memcpy(hNl, cohde, sizeof(hNl[0]) * PART_LEN1);
+      hNlFb = hNlDeAvg;
+      hNlFbLow = hNlDeAvg;
+    } else {
+      for (int i = 0; i < PART_LEN1; ++i) {
+        hNl[i] = 1 - cohxd[i];
+        hNl[i] = std::max(hNl[i], 0.f);
+      }
+      hNlFb = hNlXdAvg;
+      hNlFbLow = hNlXdAvg;
+    }
+  } else {
+    if (aec->stNearState == 1) {
+      aec->echoState = 0;
+      memcpy(hNl, cohde, sizeof(hNl[0]) * PART_LEN1);
+      hNlFb = hNlDeAvg;
+      hNlFbLow = hNlDeAvg;
+    } else {
+      aec->echoState = 1;
+      for (int i = 0; i < PART_LEN1; ++i) {
+        hNl[i] = WEBRTC_SPL_MIN(cohde[i], 1 - cohxd[i]);
+        hNl[i] = std::max(hNl[i], 0.f);
+      }
+
+      // Select an order statistic from the preferred bands.
+      // TODO(peah): Using quicksort now, but a selection algorithm may be
+      // preferred.
+      memcpy(hNlPref, &hNl[minPrefBand], sizeof(float) * prefBandSize);
+      qsort(hNlPref, prefBandSize, sizeof(float), CmpFloat);
+      hNlFb = hNlPref[static_cast<int>(floor(prefBandQuant *
+                                             (prefBandSize - 1)))];
+      hNlFbLow = hNlPref[static_cast<int>(floor(prefBandQuantLow *
+                                                (prefBandSize - 1)))];
+    }
+  }
+
+  // Track the local filter minimum to determine suppression overdrive.
+  if (hNlFbLow < 0.6f && hNlFbLow < aec->hNlFbLocalMin) {
+    aec->hNlFbLocalMin = hNlFbLow;
+    aec->hNlFbMin = hNlFbLow;
+    aec->hNlNewMin = 1;
+    aec->hNlMinCtr = 0;
+  }
+  aec->hNlFbLocalMin =
+      WEBRTC_SPL_MIN(aec->hNlFbLocalMin + 0.0008f / aec->mult, 1);
+  aec->hNlXdAvgMin = WEBRTC_SPL_MIN(aec->hNlXdAvgMin + 0.0006f / aec->mult, 1);
+
+  if (aec->hNlNewMin == 1) {
+    aec->hNlMinCtr++;
+  }
+  if (aec->hNlMinCtr == 2) {
+    aec->hNlNewMin = 0;
+    aec->hNlMinCtr = 0;
+    aec->overDrive =
+        WEBRTC_SPL_MAX(kTargetSupp[aec->nlp_mode] /
+                       static_cast<float>(log(aec->hNlFbMin + 1e-10f) + 1e-10f),
+                       min_overdrive[aec->nlp_mode]);
+  }
+
+  // Smooth the overdrive.
+  if (aec->overDrive < aec->overdrive_scaling) {
+    aec->overdrive_scaling =
+        0.99f * aec->overdrive_scaling + 0.01f * aec->overDrive;
+  } else {
+    aec->overdrive_scaling =
+        0.9f * aec->overdrive_scaling + 0.1f * aec->overDrive;
+  }
+
+  // Apply the overdrive.
+  WebRtcAec_Overdrive(aec->overdrive_scaling, hNlFb, hNl);
+}
+
+static void EchoSuppression(const OouraFft& ooura_fft,
+                            AecCore* aec,
+                            float* nearend_extended_block_lowest_band,
+                            float farend_extended_block[PART_LEN2],
+                            float* echo_subtractor_output,
+                            float output[NUM_HIGH_BANDS_MAX + 1][PART_LEN]) {
+  float efw[2][PART_LEN1];
+  float xfw[2][PART_LEN1];
+  float dfw[2][PART_LEN1];
+  float comfortNoiseHband[2][PART_LEN1];
+  float fft[PART_LEN2];
+  float nlpGainHband;
+  int i;
+  size_t j;
+
+  // Coherence and non-linear filter
+  float cohde[PART_LEN1], cohxd[PART_LEN1];
+  float hNl[PART_LEN1];
+
+  // Filter energy
+  const int delayEstInterval = 10 * aec->mult;
+
+  float* xfw_ptr = NULL;
+
+  // Update eBuf with echo subtractor output.
+  memcpy(aec->eBuf + PART_LEN, echo_subtractor_output,
+         sizeof(float) * PART_LEN);
+
+  // Analysis filter banks for the echo suppressor.
+  // Windowed near-end ffts.
+  WindowData(fft, nearend_extended_block_lowest_band);
+  ooura_fft.Fft(fft);
+  StoreAsComplex(fft, dfw);
+
+  // Windowed echo suppressor output ffts.
+  WindowData(fft, aec->eBuf);
+  ooura_fft.Fft(fft);
+  StoreAsComplex(fft, efw);
+
+  // NLP
+
+  // Convert far-end partition to the frequency domain with windowing.
+  WindowData(fft, farend_extended_block);
+  Fft(ooura_fft, fft, xfw);
+  xfw_ptr = &xfw[0][0];
+
+  // Buffer far.
+  memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1);
+
+  aec->delayEstCtr++;
+  if (aec->delayEstCtr == delayEstInterval) {
+    aec->delayEstCtr = 0;
+    aec->delayIdx = WebRtcAec_PartitionDelay(aec->num_partitions, aec->wfBuf);
+  }
+
+  aec->data_dumper->DumpRaw("aec_nlp_delay", 1, &aec->delayIdx);
+
+  // Use delayed far.
+  memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1,
+         sizeof(xfw[0][0]) * 2 * PART_LEN1);
+
+  WebRtcAec_UpdateCoherenceSpectra(aec->mult, aec->extended_filter_enabled == 1,
+                                   efw, dfw, xfw, &aec->coherence_state,
+                                   &aec->divergeState,
+                                   &aec->extreme_filter_divergence);
+
+  WebRtcAec_ComputeCoherence(&aec->coherence_state, cohde, cohxd);
+
+  // Select the microphone signal as output if the filter is deemed to have
+  // diverged.
+  if (aec->divergeState) {
+    memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
+  }
+
+  FormSuppressionGain(aec, cohde, cohxd, hNl);
+
+  aec->data_dumper->DumpRaw("aec_nlp_gain", PART_LEN1, hNl);
+
+  WebRtcAec_Suppress(hNl, efw);
+
+  // Add comfort noise.
+  ComfortNoise(aec->num_bands > 1, &aec->seed, efw, comfortNoiseHband,
+               aec->noisePow, hNl);
+
+  // Inverse error fft.
+  ScaledInverseFft(ooura_fft, efw, fft, 2.0f, 1);
+
+  // Overlap and add to obtain output.
+  for (i = 0; i < PART_LEN; i++) {
+    output[0][i] = (fft[i] * WebRtcAec_sqrtHanning[i] +
+                    aec->outBuf[i] * WebRtcAec_sqrtHanning[PART_LEN - i]);
+
+    // Saturate output to keep it in the allowed range.
+    output[0][i] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, output[0][i],
+                                  WEBRTC_SPL_WORD16_MIN);
+  }
+  memcpy(aec->outBuf, &fft[PART_LEN], PART_LEN * sizeof(aec->outBuf[0]));
+
+  // For H band
+  if (aec->num_bands > 1) {
+    // H band gain
+    // average nlp over low band: average over second half of freq spectrum
+    // (4->8khz)
+    GetHighbandGain(hNl, &nlpGainHband);
+
+    // Inverse comfort_noise
+    ScaledInverseFft(ooura_fft, comfortNoiseHband, fft, 2.0f, 0);
+
+    // compute gain factor
+    for (j = 1; j < aec->num_bands; ++j) {
+      for (i = 0; i < PART_LEN; i++) {
+        output[j][i] = aec->previous_nearend_block[j][i] * nlpGainHband;
+      }
+    }
+
+    // Add some comfort noise where Hband is attenuated.
+    for (i = 0; i < PART_LEN; i++) {
+      output[1][i] += cnScaleHband * fft[i];
+    }
+
+    // Saturate output to keep it in the allowed range.
+    for (j = 1; j < aec->num_bands; ++j) {
+      for (i = 0; i < PART_LEN; i++) {
+        output[j][i] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, output[j][i],
+                                      WEBRTC_SPL_WORD16_MIN);
+      }
+    }
+  }
+
+  // Copy the current block to the old position.
+  memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);
+
+  memmove(aec->xfwBuf + PART_LEN1, aec->xfwBuf,
+          sizeof(aec->xfwBuf) - sizeof(complex_t) * PART_LEN1);
+}
+
+static void ProcessNearendBlock(
+    AecCore* aec,
+    float farend_extended_block_lowest_band[PART_LEN2],
+    float nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN],
+    float output_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]) {
+  size_t i;
+
+  float fft[PART_LEN2];
+  float nearend_extended_block_lowest_band[PART_LEN2];
+  float farend_fft[2][PART_LEN1];
+  float nearend_fft[2][PART_LEN1];
+  float far_spectrum = 0.0f;
+  float near_spectrum = 0.0f;
+  float abs_far_spectrum[PART_LEN1];
+  float abs_near_spectrum[PART_LEN1];
+
+  const float gPow[2] = {0.9f, 0.1f};
+
+  // Noise estimate constants.
+  const int noiseInitBlocks = 500 * aec->mult;
+  const float step = 0.1f;
+  const float ramp = 1.0002f;
+  const float gInitNoise[2] = {0.999f, 0.001f};
+
+  float echo_subtractor_output[PART_LEN];
+
+  aec->data_dumper->DumpWav("aec_far", PART_LEN,
+                            &farend_extended_block_lowest_band[PART_LEN],
+                            std::min(aec->sampFreq, 16000), 1);
+  aec->data_dumper->DumpWav("aec_near", PART_LEN, &nearend_block[0][0],
+                            std::min(aec->sampFreq, 16000), 1);
+
+  if (aec->metricsMode == 1) {
+    // Update power levels
+    UpdateLevel(
+        &aec->farlevel,
+        CalculatePower(&farend_extended_block_lowest_band[PART_LEN], PART_LEN));
+    UpdateLevel(&aec->nearlevel,
+                CalculatePower(&nearend_block[0][0], PART_LEN));
+  }
+
+  // Convert far-end signal to the frequency domain.
+  memcpy(fft, farend_extended_block_lowest_band, sizeof(float) * PART_LEN2);
+  Fft(aec->ooura_fft, fft, farend_fft);
+
+  // Form extended nearend frame.
+  memcpy(&nearend_extended_block_lowest_band[0],
+         &aec->previous_nearend_block[0][0], sizeof(float) * PART_LEN);
+  memcpy(&nearend_extended_block_lowest_band[PART_LEN], &nearend_block[0][0],
+         sizeof(float) * PART_LEN);
+
+  // Convert near-end signal to the frequency domain.
+  memcpy(fft, nearend_extended_block_lowest_band, sizeof(float) * PART_LEN2);
+  Fft(aec->ooura_fft, fft, nearend_fft);
+
+  // Power smoothing.
+  if (aec->refined_adaptive_filter_enabled) {
+    for (i = 0; i < PART_LEN1; ++i) {
+      far_spectrum = farend_fft[0][i] * farend_fft[0][i] +
+                     farend_fft[1][i] * farend_fft[1][i];
+      // Calculate the magnitude spectrum.
+      abs_far_spectrum[i] = sqrtf(far_spectrum);
+    }
+    RegressorPower(aec->num_partitions, aec->xfBufBlockPos, aec->xfBuf,
+                   aec->xPow);
+  } else {
+    for (i = 0; i < PART_LEN1; ++i) {
+      far_spectrum = farend_fft[0][i] * farend_fft[0][i] +
+                     farend_fft[1][i] * farend_fft[1][i];
+      aec->xPow[i] =
+          gPow[0] * aec->xPow[i] + gPow[1] * aec->num_partitions * far_spectrum;
+      // Calculate the magnitude spectrum.
+      abs_far_spectrum[i] = sqrtf(far_spectrum);
+    }
+  }
+
+  for (i = 0; i < PART_LEN1; ++i) {
+    near_spectrum = nearend_fft[0][i] * nearend_fft[0][i] +
+                    nearend_fft[1][i] * nearend_fft[1][i];
+    aec->dPow[i] = gPow[0] * aec->dPow[i] + gPow[1] * near_spectrum;
+    // Calculate the magnitude spectrum.
+    abs_near_spectrum[i] = sqrtf(near_spectrum);
+  }
+
+  // Estimate noise power. Wait until dPow is more stable.
+  if (aec->noiseEstCtr > 50) {
+    for (i = 0; i < PART_LEN1; i++) {
+      if (aec->dPow[i] < aec->dMinPow[i]) {
+        aec->dMinPow[i] =
+            (aec->dPow[i] + step * (aec->dMinPow[i] - aec->dPow[i])) * ramp;
+      } else {
+        aec->dMinPow[i] *= ramp;
+      }
+    }
+  }
+
+  // Smooth increasing noise power from zero at the start,
+  // to avoid a sudden burst of comfort noise.
+  if (aec->noiseEstCtr < noiseInitBlocks) {
+    aec->noiseEstCtr++;
+    for (i = 0; i < PART_LEN1; i++) {
+      if (aec->dMinPow[i] > aec->dInitMinPow[i]) {
+        aec->dInitMinPow[i] = gInitNoise[0] * aec->dInitMinPow[i] +
+                              gInitNoise[1] * aec->dMinPow[i];
+      } else {
+        aec->dInitMinPow[i] = aec->dMinPow[i];
+      }
+    }
+    aec->noisePow = aec->dInitMinPow;
+  } else {
+    aec->noisePow = aec->dMinPow;
+  }
+
+  // Block wise delay estimation used for logging
+  if (aec->delay_logging_enabled) {
+    if (WebRtc_AddFarSpectrumFloat(aec->delay_estimator_farend,
+                                   abs_far_spectrum, PART_LEN1) == 0) {
+      int delay_estimate = WebRtc_DelayEstimatorProcessFloat(
+          aec->delay_estimator, abs_near_spectrum, PART_LEN1);
+      if (delay_estimate >= 0) {
+        // Update delay estimate buffer.
+        aec->delay_histogram[delay_estimate]++;
+        aec->num_delay_values++;
+      }
+      if (aec->delay_metrics_delivered == 1 &&
+          aec->num_delay_values >= kDelayMetricsAggregationWindow) {
+        UpdateDelayMetrics(aec);
+      }
+    }
+  }
+
+  // Perform echo subtraction.
+  EchoSubtraction(
+      aec->ooura_fft, aec->num_partitions, aec->extended_filter_enabled,
+      &aec->extreme_filter_divergence, aec->filter_step_size,
+      aec->error_threshold, &farend_fft[0][0], &aec->xfBufBlockPos, aec->xfBuf,
+      &nearend_block[0][0], aec->xPow, aec->wfBuf, echo_subtractor_output);
+  aec->data_dumper->DumpRaw("aec_h_fft", PART_LEN1 * aec->num_partitions,
+                            &aec->wfBuf[0][0]);
+  aec->data_dumper->DumpRaw("aec_h_fft", PART_LEN1 * aec->num_partitions,
+                            &aec->wfBuf[1][0]);
+
+  aec->data_dumper->DumpWav("aec_out_linear", PART_LEN, echo_subtractor_output,
+                            std::min(aec->sampFreq, 16000), 1);
+
+  if (aec->metricsMode == 1) {
+    UpdateLevel(&aec->linoutlevel,
+                CalculatePower(echo_subtractor_output, PART_LEN));
+  }
+
+  // Perform echo suppression.
+  EchoSuppression(aec->ooura_fft, aec, nearend_extended_block_lowest_band,
+                  farend_extended_block_lowest_band, echo_subtractor_output,
+                  output_block);
+
+  if (aec->metricsMode == 1) {
+    UpdateLevel(&aec->nlpoutlevel,
+                CalculatePower(&output_block[0][0], PART_LEN));
+    UpdateMetrics(aec);
+  }
+
+  // Store the nearend signal until the next frame.
+  for (i = 0; i < aec->num_bands; ++i) {
+    memcpy(&aec->previous_nearend_block[i][0], &nearend_block[i][0],
+           sizeof(float) * PART_LEN);
+  }
+
+  aec->data_dumper->DumpWav("aec_out", PART_LEN, &output_block[0][0],
+                            std::min(aec->sampFreq, 16000), 1);
+}
+
+AecCore* WebRtcAec_CreateAec(int instance_count) {
+  AecCore* aec = new AecCore(instance_count);
+
+  if (!aec) {
+    return NULL;
+  }
+  aec->nearend_buffer_size = 0;
+  memset(&aec->nearend_buffer[0], 0, sizeof(aec->nearend_buffer));
+  // Start the output buffer with zeros to be able to produce
+  // a full output frame in the first frame.
+  aec->output_buffer_size = PART_LEN - (FRAME_LEN - PART_LEN);
+  memset(&aec->output_buffer[0], 0, sizeof(aec->output_buffer));
+
+  aec->delay_estimator_farend =
+      WebRtc_CreateDelayEstimatorFarend(PART_LEN1, kHistorySizeBlocks);
+  if (aec->delay_estimator_farend == NULL) {
+    WebRtcAec_FreeAec(aec);
+    return NULL;
+  }
+  // We create the delay_estimator with the same amount of maximum lookahead as
+  // the delay history size (kHistorySizeBlocks) for symmetry reasons.
+  aec->delay_estimator = WebRtc_CreateDelayEstimator(
+      aec->delay_estimator_farend, kHistorySizeBlocks);
+  if (aec->delay_estimator == NULL) {
+    WebRtcAec_FreeAec(aec);
+    return NULL;
+  }
+#ifdef WEBRTC_ANDROID
+  aec->delay_agnostic_enabled = 1;  // DA-AEC enabled by default.
+  // DA-AEC assumes the system is causal from the beginning and will self adjust
+  // the lookahead when shifting is required.
+  WebRtc_set_lookahead(aec->delay_estimator, 0);
+#else
+  aec->delay_agnostic_enabled = 0;
+  WebRtc_set_lookahead(aec->delay_estimator, kLookaheadBlocks);
+#endif
+  aec->extended_filter_enabled = 0;
+  aec->refined_adaptive_filter_enabled = false;
+
+  // Assembly optimization
+  WebRtcAec_FilterFar = FilterFar;
+  WebRtcAec_ScaleErrorSignal = ScaleErrorSignal;
+  WebRtcAec_FilterAdaptation = FilterAdaptation;
+  WebRtcAec_Overdrive = Overdrive;
+  WebRtcAec_Suppress = Suppress;
+  WebRtcAec_ComputeCoherence = ComputeCoherence;
+  WebRtcAec_UpdateCoherenceSpectra = UpdateCoherenceSpectra;
+  WebRtcAec_StoreAsComplex = StoreAsComplex;
+  WebRtcAec_PartitionDelay = PartitionDelay;
+  WebRtcAec_WindowData = WindowData;
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+  if (WebRtc_GetCPUInfo(kSSE2)) {
+    WebRtcAec_InitAec_SSE2();
+  }
+#endif
+
+#if defined(MIPS_FPU_LE)
+  WebRtcAec_InitAec_mips();
+#endif
+
+#if defined(WEBRTC_HAS_NEON)
+  WebRtcAec_InitAec_neon();
+#endif
+
+  return aec;
+}
+
+void WebRtcAec_FreeAec(AecCore* aec) {
+  if (aec == NULL) {
+    return;
+  }
+
+  WebRtc_FreeDelayEstimator(aec->delay_estimator);
+  WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend);
+
+  delete aec;
+}
+
+static void SetAdaptiveFilterStepSize(AecCore* aec) {
+  // Extended filter adaptation parameter.
+  // TODO(ajm): No narrowband tuning yet.
+  const float kExtendedMu = 0.4f;
+
+  if (aec->refined_adaptive_filter_enabled) {
+    aec->filter_step_size = 0.05f;
+  } else {
+    if (aec->extended_filter_enabled) {
+      aec->filter_step_size = kExtendedMu;
+    } else {
+      if (aec->sampFreq == 8000) {
+        aec->filter_step_size = 0.6f;
+      } else {
+        aec->filter_step_size = 0.5f;
+      }
+    }
+  }
+}
+
+static void SetErrorThreshold(AecCore* aec) {
+  // Extended filter adaptation parameter.
+  // TODO(ajm): No narrowband tuning yet.
+  static const float kExtendedErrorThreshold = 1.0e-6f;
+
+  if (aec->extended_filter_enabled) {
+    aec->error_threshold = kExtendedErrorThreshold;
+  } else {
+    if (aec->sampFreq == 8000) {
+      aec->error_threshold = 2e-6f;
+    } else {
+      aec->error_threshold = 1.5e-6f;
+    }
+  }
+}
+
+int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
+  int i;
+  aec->data_dumper->InitiateNewSetOfRecordings();
+
+  aec->sampFreq = sampFreq;
+
+  SetAdaptiveFilterStepSize(aec);
+  SetErrorThreshold(aec);
+
+  if (sampFreq == 8000) {
+    aec->num_bands = 1;
+  } else {
+    aec->num_bands = (size_t)(sampFreq / 16000);
+  }
+
+  // Start the output buffer with zeros to be able to produce
+  // a full output frame in the first frame.
+  aec->output_buffer_size = PART_LEN - (FRAME_LEN - PART_LEN);
+  memset(&aec->output_buffer[0], 0, sizeof(aec->output_buffer));
+  aec->nearend_buffer_size = 0;
+  memset(&aec->nearend_buffer[0], 0, sizeof(aec->nearend_buffer));
+
+  // Initialize far-end buffer.
+  aec->farend_block_buffer_.ReInit();
+
+  aec->system_delay = 0;
+
+  if (WebRtc_InitDelayEstimatorFarend(aec->delay_estimator_farend) != 0) {
+    return -1;
+  }
+  if (WebRtc_InitDelayEstimator(aec->delay_estimator) != 0) {
+    return -1;
+  }
+  aec->delay_logging_enabled = 0;
+  aec->delay_metrics_delivered = 0;
+  memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram));
+  aec->num_delay_values = 0;
+  aec->delay_median = -1;
+  aec->delay_std = -1;
+  aec->fraction_poor_delays = -1.0f;
+
+  aec->previous_delay = -2;  // (-2): Uninitialized.
+  aec->delay_correction_count = 0;
+  aec->shift_offset = kInitialShiftOffset;
+  aec->delay_quality_threshold = kDelayQualityThresholdMin;
+
+  aec->num_partitions = kNormalNumPartitions;
+
+  // Update the delay estimator with filter length.  We use half the
+  // |num_partitions| to take the echo path into account.  In practice we say
+  // that the echo has a duration of maximum half |num_partitions|, which is not
+  // true, but serves as a crude measure.
+  WebRtc_set_allowed_offset(aec->delay_estimator, aec->num_partitions / 2);
+  // TODO(bjornv): I currently hard coded the enable.  Once we've established
+  // that AECM has no performance regression, robust_validation will be enabled
+  // all the time and the APIs to turn it on/off will be removed.  Hence, remove
+  // this line then.
+  WebRtc_enable_robust_validation(aec->delay_estimator, 1);
+  aec->frame_count = 0;
+
+  // Default target suppression mode.
+  aec->nlp_mode = 1;
+
+  // Sampling frequency multiplier w.r.t. 8 kHz.
+  // In case of multiple bands we process the lower band in 16 kHz, hence the
+  // multiplier is always 2.
+  if (aec->num_bands > 1) {
+    aec->mult = 2;
+  } else {
+    aec->mult = static_cast<int16_t>(aec->sampFreq) / 8000;
+  }
+
+  aec->farBufWritePos = 0;
+  aec->farBufReadPos = 0;
+
+  aec->inSamples = 0;
+  aec->outSamples = 0;
+  aec->knownDelay = 0;
+
+  // Initialize buffers
+  memset(aec->previous_nearend_block, 0, sizeof(aec->previous_nearend_block));
+  memset(aec->eBuf, 0, sizeof(aec->eBuf));
+
+  memset(aec->xPow, 0, sizeof(aec->xPow));
+  memset(aec->dPow, 0, sizeof(aec->dPow));
+  memset(aec->dInitMinPow, 0, sizeof(aec->dInitMinPow));
+  aec->noisePow = aec->dInitMinPow;
+  aec->noiseEstCtr = 0;
+
+  // Initial comfort noise power
+  for (i = 0; i < PART_LEN1; i++) {
+    aec->dMinPow[i] = 1.0e6f;
+  }
+
+  // Holds the last block written to
+  aec->xfBufBlockPos = 0;
+  // TODO(peah): Investigate need for these initializations. Deleting them
+  // doesn't change the output at all and yields 0.4% overall speedup.
+  memset(aec->xfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1);
+  memset(aec->wfBuf, 0, sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1);
+  memset(aec->coherence_state.sde, 0, sizeof(complex_t) * PART_LEN1);
+  memset(aec->coherence_state.sxd, 0, sizeof(complex_t) * PART_LEN1);
+  memset(aec->xfwBuf, 0,
+         sizeof(complex_t) * kExtendedNumPartitions * PART_LEN1);
+  memset(aec->coherence_state.se, 0, sizeof(float) * PART_LEN1);
+
+  // To prevent numerical instability in the first block.
+  for (i = 0; i < PART_LEN1; i++) {
+    aec->coherence_state.sd[i] = 1;
+  }
+  for (i = 0; i < PART_LEN1; i++) {
+    aec->coherence_state.sx[i] = 1;
+  }
+
+  memset(aec->hNs, 0, sizeof(aec->hNs));
+  memset(aec->outBuf, 0, sizeof(float) * PART_LEN);
+
+  aec->hNlFbMin = 1;
+  aec->hNlFbLocalMin = 1;
+  aec->hNlXdAvgMin = 1;
+  aec->hNlNewMin = 0;
+  aec->hNlMinCtr = 0;
+  aec->overDrive = 2;
+  aec->overdrive_scaling = 2;
+  aec->delayIdx = 0;
+  aec->stNearState = 0;
+  aec->echoState = 0;
+  aec->divergeState = 0;
+
+  aec->seed = 777;
+  aec->delayEstCtr = 0;
+
+  aec->extreme_filter_divergence = 0;
+
+  // Metrics disabled by default
+  aec->metricsMode = 0;
+  InitMetrics(aec);
+
+  return 0;
+}
+
+void WebRtcAec_BufferFarendBlock(AecCore* aec, const float* farend) {
+  // Check if the buffer is full, and in that case flush the oldest data.
+  if (aec->farend_block_buffer_.AvaliableSpace() < 1) {
+    aec->farend_block_buffer_.AdjustSize(1);
+  }
+  aec->farend_block_buffer_.Insert(farend);
+}
+
+int WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(AecCore* aec,
+                                                   int buffer_size_decrease) {
+  int achieved_buffer_size_decrease =
+      aec->farend_block_buffer_.AdjustSize(buffer_size_decrease);
+  aec->system_delay -= achieved_buffer_size_decrease * PART_LEN;
+  return achieved_buffer_size_decrease;
+}
+
+void FormNearendBlock(
+    size_t nearend_start_index,
+    size_t num_bands,
+    const float* const* nearend_frame,
+    size_t num_samples_from_nearend_frame,
+    const float nearend_buffer[NUM_HIGH_BANDS_MAX + 1]
+                              [PART_LEN - (FRAME_LEN - PART_LEN)],
+    float nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN]) {
+  RTC_DCHECK_LE(num_samples_from_nearend_frame, PART_LEN);
+  const int num_samples_from_buffer = PART_LEN - num_samples_from_nearend_frame;
+
+  if (num_samples_from_buffer > 0) {
+    for (size_t i = 0; i < num_bands; ++i) {
+      memcpy(&nearend_block[i][0], &nearend_buffer[i][0],
+             num_samples_from_buffer * sizeof(float));
+    }
+  }
+
+  for (size_t i = 0; i < num_bands; ++i) {
+    memcpy(&nearend_block[i][num_samples_from_buffer],
+           &nearend_frame[i][nearend_start_index],
+           num_samples_from_nearend_frame * sizeof(float));
+  }
+}
+
+void BufferNearendFrame(
+    size_t nearend_start_index,
+    size_t num_bands,
+    const float* const* nearend_frame,
+    size_t num_samples_to_buffer,
+    float nearend_buffer[NUM_HIGH_BANDS_MAX + 1]
+                        [PART_LEN - (FRAME_LEN - PART_LEN)]) {
+  for (size_t i = 0; i < num_bands; ++i) {
+    memcpy(
+        &nearend_buffer[i][0],
+        &nearend_frame[i]
+                      [nearend_start_index + FRAME_LEN - num_samples_to_buffer],
+        num_samples_to_buffer * sizeof(float));
+  }
+}
+
+void BufferOutputBlock(size_t num_bands,
+                       const float output_block[NUM_HIGH_BANDS_MAX + 1]
+                                               [PART_LEN],
+                       size_t* output_buffer_size,
+                       float output_buffer[NUM_HIGH_BANDS_MAX + 1]
+                                          [2 * PART_LEN]) {
+  for (size_t i = 0; i < num_bands; ++i) {
+    memcpy(&output_buffer[i][*output_buffer_size], &output_block[i][0],
+           PART_LEN * sizeof(float));
+  }
+  (*output_buffer_size) += PART_LEN;
+}
+
+void FormOutputFrame(size_t output_start_index,
+                     size_t num_bands,
+                     size_t* output_buffer_size,
+                     float output_buffer[NUM_HIGH_BANDS_MAX + 1][2 * PART_LEN],
+                     float* const* output_frame) {
+  RTC_DCHECK_LE(FRAME_LEN, *output_buffer_size);
+  for (size_t i = 0; i < num_bands; ++i) {
+    memcpy(&output_frame[i][output_start_index], &output_buffer[i][0],
+           FRAME_LEN * sizeof(float));
+  }
+  (*output_buffer_size) -= FRAME_LEN;
+  if (*output_buffer_size > 0) {
+    RTC_DCHECK_GE(2 * PART_LEN - FRAME_LEN, (*output_buffer_size));
+    for (size_t i = 0; i < num_bands; ++i) {
+      memcpy(&output_buffer[i][0], &output_buffer[i][FRAME_LEN],
+             (*output_buffer_size) * sizeof(float));
+    }
+  }
+}
+
+void WebRtcAec_ProcessFrames(AecCore* aec,
+                             const float* const* nearend,
+                             size_t num_bands,
+                             size_t num_samples,
+                             int knownDelay,
+                             float* const* out) {
+  RTC_DCHECK(num_samples == 80 || num_samples == 160);
+
+  aec->frame_count++;
+  // For each frame the process is as follows:
+  // 1) If the system_delay indicates on being too small for processing a
+  //    frame we stuff the buffer with enough data for 10 ms.
+  // 2 a) Adjust the buffer to the system delay, by moving the read pointer.
+  //   b) Apply signal based delay correction, if we have detected poor AEC
+  //    performance.
+  // 3) TODO(bjornv): Investigate if we need to add this:
+  //    If we can't move read pointer due to buffer size limitations we
+  //    flush/stuff the buffer.
+  // 4) Process as many partitions as possible.
+  // 5) Update the |system_delay| with respect to a full frame of FRAME_LEN
+  //    samples. Even though we will have data left to process (we work with
+  //    partitions) we consider updating a whole frame, since that's the
+  //    amount of data we input and output in audio_processing.
+  // 6) Update the outputs.
+
+  // The AEC has two different delay estimation algorithms built in.  The
+  // first relies on delay input values from the user and the amount of
+  // shifted buffer elements is controlled by |knownDelay|.  This delay will
+  // give a guess on how much we need to shift far-end buffers to align with
+  // the near-end signal.  The other delay estimation algorithm uses the
+  // far- and near-end signals to find the offset between them.  This one
+  // (called "signal delay") is then used to fine tune the alignment, or
+  // simply compensate for errors in the system based one.
+  // Note that the two algorithms operate independently.  Currently, we only
+  // allow one algorithm to be turned on.
+
+  RTC_DCHECK_EQ(aec->num_bands, num_bands);
+
+  for (size_t j = 0; j < num_samples; j += FRAME_LEN) {
+    // 1) At most we process |aec->mult|+1 partitions in 10 ms. Make sure we
+    // have enough far-end data for that by stuffing the buffer if the
+    // |system_delay| indicates others.
+    if (aec->system_delay < FRAME_LEN) {
+      // We don't have enough data so we rewind 10 ms.
+      WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aec, -(aec->mult + 1));
+    }
+
+    if (!aec->delay_agnostic_enabled) {
+      // 2 a) Compensate for a possible change in the system delay.
+
+      // TODO(bjornv): Investigate how we should round the delay difference;
+      // right now we know that incoming |knownDelay| is underestimated when
+      // it's less than |aec->knownDelay|. We therefore, round (-32) in that
+      // direction. In the other direction, we don't have this situation, but
+      // might flush one partition too little. This can cause non-causality,
+      // which should be investigated. Maybe, allow for a non-symmetric
+      // rounding, like -16.
+      int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN;
+      int moved_elements = aec->farend_block_buffer_.AdjustSize(move_elements);
+      MaybeLogDelayAdjustment(moved_elements * (aec->sampFreq == 8000 ? 8 : 4),
+                              DelaySource::kSystemDelay);
+      aec->knownDelay -= moved_elements * PART_LEN;
+    } else {
+      // 2 b) Apply signal based delay correction.
+      int move_elements = SignalBasedDelayCorrection(aec);
+      int moved_elements = aec->farend_block_buffer_.AdjustSize(move_elements);
+      MaybeLogDelayAdjustment(moved_elements * (aec->sampFreq == 8000 ? 8 : 4),
+                              DelaySource::kDelayAgnostic);
+      int far_near_buffer_diff =
+          aec->farend_block_buffer_.Size() -
+          (aec->nearend_buffer_size + FRAME_LEN) / PART_LEN;
+      WebRtc_SoftResetDelayEstimator(aec->delay_estimator, moved_elements);
+      WebRtc_SoftResetDelayEstimatorFarend(aec->delay_estimator_farend,
+                                           moved_elements);
+      // If we rely on reported system delay values only, a buffer underrun here
+      // can never occur since we've taken care of that in 1) above.  Here, we
+      // apply signal based delay correction and can therefore end up with
+      // buffer underruns since the delay estimation can be wrong.  We therefore
+      // stuff the buffer with enough elements if needed.
+      if (far_near_buffer_diff < 0) {
+        WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aec,
+                                                       far_near_buffer_diff);
+      }
+    }
+
+    static_assert(
+        16 == (FRAME_LEN - PART_LEN),
+        "These constants need to be properly related for this code to work");
+    float output_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN];
+    float nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN];
+    float farend_extended_block_lowest_band[PART_LEN2];
+
+    // Form and process a block of nearend samples, buffer the output block of
+    // samples.
+    aec->farend_block_buffer_.ExtractExtendedBlock(
+        farend_extended_block_lowest_band);
+    FormNearendBlock(j, num_bands, nearend, PART_LEN - aec->nearend_buffer_size,
+                     aec->nearend_buffer, nearend_block);
+    ProcessNearendBlock(aec, farend_extended_block_lowest_band, nearend_block,
+                        output_block);
+    BufferOutputBlock(num_bands, output_block, &aec->output_buffer_size,
+                      aec->output_buffer);
+
+    if ((FRAME_LEN - PART_LEN + aec->nearend_buffer_size) == PART_LEN) {
+      // When possible (every fourth frame) form and process a second block of
+      // nearend samples, buffer the output block of samples.
+      aec->farend_block_buffer_.ExtractExtendedBlock(
+          farend_extended_block_lowest_band);
+      FormNearendBlock(j + FRAME_LEN - PART_LEN, num_bands, nearend, PART_LEN,
+                       aec->nearend_buffer, nearend_block);
+      ProcessNearendBlock(aec, farend_extended_block_lowest_band, nearend_block,
+                          output_block);
+      BufferOutputBlock(num_bands, output_block, &aec->output_buffer_size,
+                        aec->output_buffer);
+
+      // Reset the buffer size as there are no samples left in the nearend input
+      // to buffer.
+      aec->nearend_buffer_size = 0;
+    } else {
+      // Buffer the remaining samples in the nearend input.
+      aec->nearend_buffer_size += FRAME_LEN - PART_LEN;
+      BufferNearendFrame(j, num_bands, nearend, aec->nearend_buffer_size,
+                         aec->nearend_buffer);
+    }
+
+    // 5) Update system delay with respect to the entire frame.
+    aec->system_delay -= FRAME_LEN;
+
+    // 6) Form the output frame.
+    FormOutputFrame(j, num_bands, &aec->output_buffer_size, aec->output_buffer,
+                    out);
+  }
+}
+
+int WebRtcAec_GetDelayMetricsCore(AecCore* self,
+                                  int* median,
+                                  int* std,
+                                  float* fraction_poor_delays) {
+  RTC_DCHECK(self);
+  RTC_DCHECK(median);
+  RTC_DCHECK(std);
+
+  if (self->delay_logging_enabled == 0) {
+    // Logging disabled.
+    return -1;
+  }
+
+  if (self->delay_metrics_delivered == 0) {
+    UpdateDelayMetrics(self);
+    self->delay_metrics_delivered = 1;
+  }
+  *median = self->delay_median;
+  *std = self->delay_std;
+  *fraction_poor_delays = self->fraction_poor_delays;
+
+  return 0;
+}
+
+int WebRtcAec_echo_state(AecCore* self) {
+  return self->echoState;
+}
+
+void WebRtcAec_GetEchoStats(AecCore* self,
+                            Stats* erl,
+                            Stats* erle,
+                            Stats* a_nlp,
+                            float* divergent_filter_fraction) {
+  RTC_DCHECK(erl);
+  RTC_DCHECK(erle);
+  RTC_DCHECK(a_nlp);
+  *erl = self->erl;
+  *erle = self->erle;
+  *a_nlp = self->aNlp;
+  *divergent_filter_fraction =
+      self->divergent_filter_fraction.GetLatestFraction();
+}
+
+void WebRtcAec_SetConfigCore(AecCore* self,
+                             int nlp_mode,
+                             int metrics_mode,
+                             int delay_logging) {
+  RTC_DCHECK_GE(nlp_mode, 0);
+  RTC_DCHECK_LT(nlp_mode, 3);
+  self->nlp_mode = nlp_mode;
+  self->metricsMode = metrics_mode;
+  if (self->metricsMode) {
+    InitMetrics(self);
+  }
+  // Turn on delay logging if it is either set explicitly or if delay agnostic
+  // AEC is enabled (which requires delay estimates).
+  self->delay_logging_enabled = delay_logging || self->delay_agnostic_enabled;
+  if (self->delay_logging_enabled) {
+    memset(self->delay_histogram, 0, sizeof(self->delay_histogram));
+  }
+}
+
+void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable) {
+  self->delay_agnostic_enabled = enable;
+}
+
+int WebRtcAec_delay_agnostic_enabled(AecCore* self) {
+  return self->delay_agnostic_enabled;
+}
+
+void WebRtcAec_enable_refined_adaptive_filter(AecCore* self, bool enable) {
+  self->refined_adaptive_filter_enabled = enable;
+  SetAdaptiveFilterStepSize(self);
+  SetErrorThreshold(self);
+}
+
+bool WebRtcAec_refined_adaptive_filter_enabled(const AecCore* self) {
+  return self->refined_adaptive_filter_enabled;
+}
+
+void WebRtcAec_enable_extended_filter(AecCore* self, int enable) {
+  self->extended_filter_enabled = enable;
+  SetAdaptiveFilterStepSize(self);
+  SetErrorThreshold(self);
+  self->num_partitions = enable ? kExtendedNumPartitions : kNormalNumPartitions;
+  // Update the delay estimator with filter length.  See InitAEC() for details.
+  WebRtc_set_allowed_offset(self->delay_estimator, self->num_partitions / 2);
+}
+
+int WebRtcAec_extended_filter_enabled(AecCore* self) {
+  return self->extended_filter_enabled;
+}
+
+int WebRtcAec_system_delay(AecCore* self) {
+  return self->system_delay;
+}
+
+void WebRtcAec_SetSystemDelay(AecCore* self, int delay) {
+  RTC_DCHECK_GE(delay, 0);
+  self->system_delay = delay;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/aec_core.h b/modules/audio_processing/aec/aec_core.h
new file mode 100644
index 0000000..78596ec
--- /dev/null
+++ b/modules/audio_processing/aec/aec_core.h
@@ -0,0 +1,335 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Specifies the interface for the AEC core.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
+#define MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+}
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/aec/aec_common.h"
+#include "modules/audio_processing/utility/block_mean_calculator.h"
+#include "modules/audio_processing/utility/ooura_fft.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+#define FRAME_LEN 80
+#define PART_LEN 64               // Length of partition
+#define PART_LEN1 (PART_LEN + 1)  // Unique fft coefficients
+#define PART_LEN2 (PART_LEN * 2)  // Length of partition * 2
+#define NUM_HIGH_BANDS_MAX 2      // Max number of high bands
+
+class ApmDataDumper;
+
+typedef float complex_t[2];
+// For performance reasons, some arrays of complex numbers are replaced by twice
+// as long arrays of float, all the real parts followed by all the imaginary
+// ones (complex_t[SIZE] -> float[2][SIZE]). This allows SIMD optimizations and
+// is better than two arrays (one for the real parts and one for the imaginary
+// parts) as this other way would require two pointers instead of one and cause
+// extra register spilling. This also allows the offsets to be calculated at
+// compile time.
+
+// Metrics
+enum { kOffsetLevel = -100 };
+
+typedef struct Stats {
+  float instant;
+  float average;
+  float min;
+  float max;
+  float sum;
+  float hisum;
+  float himean;
+  size_t counter;
+  size_t hicounter;
+} Stats;
+
+// Number of partitions for the extended filter mode. The first one is an enum
+// to be used in array declarations, as it represents the maximum filter length.
+enum { kExtendedNumPartitions = 32 };
+static const int kNormalNumPartitions = 12;
+
+// Delay estimator constants, used for logging and delay compensation if
+// if reported delays are disabled.
+enum { kLookaheadBlocks = 15 };
+enum {
+  // 500 ms for 16 kHz which is equivalent with the limit of reported delays.
+  kHistorySizeBlocks = 125
+};
+
+typedef struct PowerLevel {
+  PowerLevel();
+
+  BlockMeanCalculator framelevel;
+  BlockMeanCalculator averagelevel;
+  float minlevel;
+} PowerLevel;
+
+class BlockBuffer {
+ public:
+  BlockBuffer();
+  ~BlockBuffer();
+  void ReInit();
+  void Insert(const float block[PART_LEN]);
+  void ExtractExtendedBlock(float extended_block[PART_LEN]);
+  int AdjustSize(int buffer_size_decrease);
+  size_t Size();
+  size_t AvaliableSpace();
+
+ private:
+  RingBuffer* buffer_;
+};
+
+class DivergentFilterFraction {
+ public:
+  DivergentFilterFraction();
+
+  // Reset.
+  void Reset();
+
+  void AddObservation(const PowerLevel& nearlevel,
+                      const PowerLevel& linoutlevel,
+                      const PowerLevel& nlpoutlevel);
+
+  // Return the latest fraction.
+  float GetLatestFraction() const;
+
+ private:
+  // Clear all values added.
+  void Clear();
+
+  size_t count_;
+  size_t occurrence_;
+  float fraction_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DivergentFilterFraction);
+};
+
+typedef struct CoherenceState {
+  complex_t sde[PART_LEN1];  // cross-psd of nearend and error
+  complex_t sxd[PART_LEN1];  // cross-psd of farend and nearend
+  float sx[PART_LEN1], sd[PART_LEN1], se[PART_LEN1];  // far, near, error psd
+} CoherenceState;
+
+struct AecCore {
+  explicit AecCore(int instance_index);
+  ~AecCore();
+
+  std::unique_ptr<ApmDataDumper> data_dumper;
+  const OouraFft ooura_fft;
+
+  CoherenceState coherence_state;
+
+  int farBufWritePos, farBufReadPos;
+
+  int knownDelay;
+  int inSamples, outSamples;
+  int delayEstCtr;
+
+  // Nearend buffer used for changing from FRAME_LEN to PART_LEN sample block
+  // sizes. The buffer stores all the incoming bands and for each band a maximum
+  // of PART_LEN - (FRAME_LEN - PART_LEN) values need to be buffered in order to
+  // change the block size from FRAME_LEN to PART_LEN.
+  float nearend_buffer[NUM_HIGH_BANDS_MAX + 1]
+                      [PART_LEN - (FRAME_LEN - PART_LEN)];
+  size_t nearend_buffer_size;
+  float output_buffer[NUM_HIGH_BANDS_MAX + 1][2 * PART_LEN];
+  size_t output_buffer_size;
+
+  float eBuf[PART_LEN2];  // error
+
+  float previous_nearend_block[NUM_HIGH_BANDS_MAX + 1][PART_LEN];
+
+  float xPow[PART_LEN1];
+  float dPow[PART_LEN1];
+  float dMinPow[PART_LEN1];
+  float dInitMinPow[PART_LEN1];
+  float* noisePow;
+
+  float xfBuf[2][kExtendedNumPartitions * PART_LEN1];  // farend fft buffer
+  float wfBuf[2][kExtendedNumPartitions * PART_LEN1];  // filter fft
+  // Farend windowed fft buffer.
+  complex_t xfwBuf[kExtendedNumPartitions * PART_LEN1];
+
+  float hNs[PART_LEN1];
+  float hNlFbMin, hNlFbLocalMin;
+  float hNlXdAvgMin;
+  int hNlNewMin, hNlMinCtr;
+  float overDrive;
+  float overdrive_scaling;
+  int nlp_mode;
+  float outBuf[PART_LEN];
+  int delayIdx;
+
+  short stNearState, echoState;
+  short divergeState;
+
+  int xfBufBlockPos;
+
+  BlockBuffer farend_block_buffer_;
+
+  int system_delay;  // Current system delay buffered in AEC.
+
+  int mult;  // sampling frequency multiple
+  int sampFreq = 16000;
+  size_t num_bands;
+  uint32_t seed;
+
+  float filter_step_size;  // stepsize
+  float error_threshold;   // error threshold
+
+  int noiseEstCtr;
+
+  PowerLevel farlevel;
+  PowerLevel nearlevel;
+  PowerLevel linoutlevel;
+  PowerLevel nlpoutlevel;
+
+  int metricsMode;
+  int stateCounter;
+  Stats erl;
+  Stats erle;
+  Stats aNlp;
+  Stats rerl;
+  DivergentFilterFraction divergent_filter_fraction;
+
+  // Quantities to control H band scaling for SWB input
+  int freq_avg_ic;       // initial bin for averaging nlp gain
+  int flag_Hband_cn;     // for comfort noise
+  float cn_scale_Hband;  // scale for comfort noise in H band
+
+  int delay_metrics_delivered;
+  int delay_histogram[kHistorySizeBlocks];
+  int num_delay_values;
+  int delay_median;
+  int delay_std;
+  float fraction_poor_delays;
+  int delay_logging_enabled;
+  void* delay_estimator_farend;
+  void* delay_estimator;
+  // Variables associated with delay correction through signal based delay
+  // estimation feedback.
+  int previous_delay;
+  int delay_correction_count;
+  int shift_offset;
+  float delay_quality_threshold;
+  int frame_count;
+
+  // 0 = delay agnostic mode (signal based delay correction) disabled.
+  // Otherwise enabled.
+  int delay_agnostic_enabled;
+  // 1 = extended filter mode enabled, 0 = disabled.
+  int extended_filter_enabled;
+  // 1 = refined filter adaptation aec mode enabled, 0 = disabled.
+  bool refined_adaptive_filter_enabled;
+
+  // Runtime selection of number of filter partitions.
+  int num_partitions;
+
+  // Flag that extreme filter divergence has been detected by the Echo
+  // Suppressor.
+  int extreme_filter_divergence;
+};
+
+AecCore* WebRtcAec_CreateAec(int instance_count);  // Returns NULL on error.
+void WebRtcAec_FreeAec(AecCore* aec);
+int WebRtcAec_InitAec(AecCore* aec, int sampFreq);
+void WebRtcAec_InitAec_SSE2(void);
+#if defined(MIPS_FPU_LE)
+void WebRtcAec_InitAec_mips(void);
+#endif
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcAec_InitAec_neon(void);
+#endif
+
+void WebRtcAec_BufferFarendBlock(AecCore* aec, const float* farend);
+void WebRtcAec_ProcessFrames(AecCore* aec,
+                             const float* const* nearend,
+                             size_t num_bands,
+                             size_t num_samples,
+                             int knownDelay,
+                             float* const* out);
+
+// A helper function to call adjust the farend buffer size.
+// Returns the number of elements the size was decreased with, and adjusts
+// |system_delay| by the corresponding amount in ms.
+int WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(AecCore* aec,
+                                                   int size_decrease);
+
+// Calculates the median, standard deviation and amount of poor values among the
+// delay estimates aggregated up to the first call to the function. After that
+// first call the metrics are aggregated and updated every second. With poor
+// values we mean values that most likely will cause the AEC to perform poorly.
+// TODO(bjornv): Consider changing tests and tools to handle constant
+// constant aggregation window throughout the session instead.
+int WebRtcAec_GetDelayMetricsCore(AecCore* self,
+                                  int* median,
+                                  int* std,
+                                  float* fraction_poor_delays);
+
+// Returns the echo state (1: echo, 0: no echo).
+int WebRtcAec_echo_state(AecCore* self);
+
+// Gets statistics of the echo metrics ERL, ERLE, A_NLP.
+void WebRtcAec_GetEchoStats(AecCore* self,
+                            Stats* erl,
+                            Stats* erle,
+                            Stats* a_nlp,
+                            float* divergent_filter_fraction);
+
+// Sets local configuration modes.
+void WebRtcAec_SetConfigCore(AecCore* self,
+                             int nlp_mode,
+                             int metrics_mode,
+                             int delay_logging);
+
+// Non-zero enables, zero disables.
+void WebRtcAec_enable_delay_agnostic(AecCore* self, int enable);
+
+// Returns non-zero if delay agnostic (i.e., signal based delay estimation) is
+// enabled and zero if disabled.
+int WebRtcAec_delay_agnostic_enabled(AecCore* self);
+
+// Turns on/off the refined adaptive filter feature.
+void WebRtcAec_enable_refined_adaptive_filter(AecCore* self, bool enable);
+
+// Returns whether the refined adaptive filter is enabled.
+bool WebRtcAec_refined_adaptive_filter(const AecCore* self);
+
+// Enables or disables extended filter mode. Non-zero enables, zero disables.
+void WebRtcAec_enable_extended_filter(AecCore* self, int enable);
+
+// Returns non-zero if extended filter mode is enabled and zero if disabled.
+int WebRtcAec_extended_filter_enabled(AecCore* self);
+
+// Returns the current |system_delay|, i.e., the buffered difference between
+// far-end and near-end.
+int WebRtcAec_system_delay(AecCore* self);
+
+// Sets the |system_delay| to |value|.  Note that if the value is changed
+// improperly, there can be a performance regression.  So it should be used with
+// care.
+void WebRtcAec_SetSystemDelay(AecCore* self, int delay);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
diff --git a/modules/audio_processing/aec/aec_core_mips.cc b/modules/audio_processing/aec/aec_core_mips.cc
new file mode 100644
index 0000000..ebe6349
--- /dev/null
+++ b/modules/audio_processing/aec/aec_core_mips.cc
@@ -0,0 +1,490 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The core AEC algorithm, which is presented with time-aligned signals.
+ */
+
+#include "modules/audio_processing/aec/aec_core.h"
+
+#include <math.h>
+
+extern "C" {
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aec/aec_core_optimized_methods.h"
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+namespace webrtc {
+
+extern const float WebRtcAec_weightCurve[65];
+extern const float WebRtcAec_overDriveCurve[65];
+
+void WebRtcAec_FilterFar_mips(
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float y_fft[2][PART_LEN1]) {
+  int i;
+  for (i = 0; i < num_partitions; i++) {
+    int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
+    int pos = i * PART_LEN1;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * (PART_LEN1);
+    }
+    float* yf0 = y_fft[0];
+    float* yf1 = y_fft[1];
+    float* aRe = x_fft_buf[0] + xPos;
+    float* aIm = x_fft_buf[1] + xPos;
+    float* bRe = h_fft_buf[0] + pos;
+    float* bIm = h_fft_buf[1] + pos;
+    float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13;
+    int len = PART_LEN1 >> 1;
+
+    __asm __volatile(
+      ".set       push                                                \n\t"
+      ".set       noreorder                                           \n\t"
+      "1:                                                             \n\t"
+      "lwc1       %[f0],      0(%[aRe])                               \n\t"
+      "lwc1       %[f1],      0(%[bRe])                               \n\t"
+      "lwc1       %[f2],      0(%[bIm])                               \n\t"
+      "lwc1       %[f3],      0(%[aIm])                               \n\t"
+      "lwc1       %[f4],      4(%[aRe])                               \n\t"
+      "lwc1       %[f5],      4(%[bRe])                               \n\t"
+      "lwc1       %[f6],      4(%[bIm])                               \n\t"
+      "mul.s      %[f8],      %[f0],          %[f1]                   \n\t"
+      "mul.s      %[f0],      %[f0],          %[f2]                   \n\t"
+      "mul.s      %[f9],      %[f4],          %[f5]                   \n\t"
+      "mul.s      %[f4],      %[f4],          %[f6]                   \n\t"
+      "lwc1       %[f7],      4(%[aIm])                               \n\t"
+#if !defined(MIPS32_R2_LE)
+      "mul.s      %[f12],     %[f2],          %[f3]                   \n\t"
+      "mul.s      %[f1],      %[f3],          %[f1]                   \n\t"
+      "mul.s      %[f11],     %[f6],          %[f7]                   \n\t"
+      "addiu      %[aRe],     %[aRe],         8                       \n\t"
+      "addiu      %[aIm],     %[aIm],         8                       \n\t"
+      "addiu      %[len],     %[len],         -1                      \n\t"
+      "sub.s      %[f8],      %[f8],          %[f12]                  \n\t"
+      "mul.s      %[f12],     %[f7],          %[f5]                   \n\t"
+      "lwc1       %[f2],      0(%[yf0])                               \n\t"
+      "add.s      %[f1],      %[f0],          %[f1]                   \n\t"
+      "lwc1       %[f3],      0(%[yf1])                               \n\t"
+      "sub.s      %[f9],      %[f9],          %[f11]                  \n\t"
+      "lwc1       %[f6],      4(%[yf0])                               \n\t"
+      "add.s      %[f4],      %[f4],          %[f12]                  \n\t"
+#else  // #if !defined(MIPS32_R2_LE)
+      "addiu      %[aRe],     %[aRe],         8                       \n\t"
+      "addiu      %[aIm],     %[aIm],         8                       \n\t"
+      "addiu      %[len],     %[len],         -1                      \n\t"
+      "nmsub.s    %[f8],      %[f8],          %[f2],      %[f3]       \n\t"
+      "lwc1       %[f2],      0(%[yf0])                               \n\t"
+      "madd.s     %[f1],      %[f0],          %[f3],      %[f1]       \n\t"
+      "lwc1       %[f3],      0(%[yf1])                               \n\t"
+      "nmsub.s    %[f9],      %[f9],          %[f6],      %[f7]       \n\t"
+      "lwc1       %[f6],      4(%[yf0])                               \n\t"
+      "madd.s     %[f4],      %[f4],          %[f7],      %[f5]       \n\t"
+#endif  // #if !defined(MIPS32_R2_LE)
+      "lwc1       %[f5],      4(%[yf1])                               \n\t"
+      "add.s      %[f2],      %[f2],          %[f8]                   \n\t"
+      "addiu      %[bRe],     %[bRe],         8                       \n\t"
+      "addiu      %[bIm],     %[bIm],         8                       \n\t"
+      "add.s      %[f3],      %[f3],          %[f1]                   \n\t"
+      "add.s      %[f6],      %[f6],          %[f9]                   \n\t"
+      "add.s      %[f5],      %[f5],          %[f4]                   \n\t"
+      "swc1       %[f2],      0(%[yf0])                               \n\t"
+      "swc1       %[f3],      0(%[yf1])                               \n\t"
+      "swc1       %[f6],      4(%[yf0])                               \n\t"
+      "swc1       %[f5],      4(%[yf1])                               \n\t"
+      "addiu      %[yf0],     %[yf0],         8                       \n\t"
+      "bgtz       %[len],     1b                                      \n\t"
+      " addiu     %[yf1],     %[yf1],         8                       \n\t"
+      "lwc1       %[f0],      0(%[aRe])                               \n\t"
+      "lwc1       %[f1],      0(%[bRe])                               \n\t"
+      "lwc1       %[f2],      0(%[bIm])                               \n\t"
+      "lwc1       %[f3],      0(%[aIm])                               \n\t"
+      "mul.s      %[f8],      %[f0],          %[f1]                   \n\t"
+      "mul.s      %[f0],      %[f0],          %[f2]                   \n\t"
+#if !defined(MIPS32_R2_LE)
+      "mul.s      %[f12],     %[f2],          %[f3]                   \n\t"
+      "mul.s      %[f1],      %[f3],          %[f1]                   \n\t"
+      "sub.s      %[f8],      %[f8],          %[f12]                  \n\t"
+      "lwc1       %[f2],      0(%[yf0])                               \n\t"
+      "add.s      %[f1],      %[f0],          %[f1]                   \n\t"
+      "lwc1       %[f3],      0(%[yf1])                               \n\t"
+#else  // #if !defined(MIPS32_R2_LE)
+      "nmsub.s    %[f8],      %[f8],          %[f2],      %[f3]       \n\t"
+      "lwc1       %[f2],      0(%[yf0])                               \n\t"
+      "madd.s     %[f1],      %[f0],          %[f3],      %[f1]       \n\t"
+      "lwc1       %[f3],      0(%[yf1])                               \n\t"
+#endif  // #if !defined(MIPS32_R2_LE)
+      "add.s      %[f2],      %[f2],          %[f8]                   \n\t"
+      "add.s      %[f3],      %[f3],          %[f1]                   \n\t"
+      "swc1       %[f2],      0(%[yf0])                               \n\t"
+      "swc1       %[f3],      0(%[yf1])                               \n\t"
+      ".set       pop                                                 \n\t"
+      : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+        [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+        [f6] "=&f" (f6), [f7] "=&f" (f7), [f8] "=&f" (f8),
+        [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11),
+        [f12] "=&f" (f12), [f13] "=&f" (f13), [aRe] "+r" (aRe),
+        [aIm] "+r" (aIm), [bRe] "+r" (bRe), [bIm] "+r" (bIm),
+        [yf0] "+r" (yf0), [yf1] "+r" (yf1), [len] "+r" (len)
+      :
+      : "memory");
+  }
+}
+
+void WebRtcAec_FilterAdaptation_mips(
+    const OouraFft& ooura_fft,
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float e_fft[2][PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+  float fft[PART_LEN2];
+  int i;
+  for (i = 0; i < num_partitions; i++) {
+    int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1);
+    int pos;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * PART_LEN1;
+    }
+
+    pos = i * PART_LEN1;
+    float* aRe = x_fft_buf[0] + xPos;
+    float* aIm = x_fft_buf[1] + xPos;
+    float* bRe = e_fft[0];
+    float* bIm = e_fft[1];
+    float* fft_tmp;
+
+    float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12;
+    int len = PART_LEN >> 1;
+
+    __asm __volatile(
+      ".set       push                                                \n\t"
+      ".set       noreorder                                           \n\t"
+      "addiu      %[fft_tmp], %[fft],         0                       \n\t"
+      "1:                                                             \n\t"
+      "lwc1       %[f0],      0(%[aRe])                               \n\t"
+      "lwc1       %[f1],      0(%[bRe])                               \n\t"
+      "lwc1       %[f2],      0(%[bIm])                               \n\t"
+      "lwc1       %[f4],      4(%[aRe])                               \n\t"
+      "lwc1       %[f5],      4(%[bRe])                               \n\t"
+      "lwc1       %[f6],      4(%[bIm])                               \n\t"
+      "addiu      %[aRe],     %[aRe],         8                       \n\t"
+      "addiu      %[bRe],     %[bRe],         8                       \n\t"
+      "mul.s      %[f8],      %[f0],          %[f1]                   \n\t"
+      "mul.s      %[f0],      %[f0],          %[f2]                   \n\t"
+      "lwc1       %[f3],      0(%[aIm])                               \n\t"
+      "mul.s      %[f9],      %[f4],          %[f5]                   \n\t"
+      "lwc1       %[f7],      4(%[aIm])                               \n\t"
+      "mul.s      %[f4],      %[f4],          %[f6]                   \n\t"
+#if !defined(MIPS32_R2_LE)
+      "mul.s      %[f10],     %[f3],          %[f2]                   \n\t"
+      "mul.s      %[f1],      %[f3],          %[f1]                   \n\t"
+      "mul.s      %[f11],     %[f7],          %[f6]                   \n\t"
+      "mul.s      %[f5],      %[f7],          %[f5]                   \n\t"
+      "addiu      %[aIm],     %[aIm],         8                       \n\t"
+      "addiu      %[bIm],     %[bIm],         8                       \n\t"
+      "addiu      %[len],     %[len],         -1                      \n\t"
+      "add.s      %[f8],      %[f8],          %[f10]                  \n\t"
+      "sub.s      %[f1],      %[f0],          %[f1]                   \n\t"
+      "add.s      %[f9],      %[f9],          %[f11]                  \n\t"
+      "sub.s      %[f5],      %[f4],          %[f5]                   \n\t"
+#else  // #if !defined(MIPS32_R2_LE)
+      "addiu      %[aIm],     %[aIm],         8                       \n\t"
+      "addiu      %[bIm],     %[bIm],         8                       \n\t"
+      "addiu      %[len],     %[len],         -1                      \n\t"
+      "madd.s     %[f8],      %[f8],          %[f3],      %[f2]       \n\t"
+      "nmsub.s    %[f1],      %[f0],          %[f3],      %[f1]       \n\t"
+      "madd.s     %[f9],      %[f9],          %[f7],      %[f6]       \n\t"
+      "nmsub.s    %[f5],      %[f4],          %[f7],      %[f5]       \n\t"
+#endif  // #if !defined(MIPS32_R2_LE)
+      "swc1       %[f8],      0(%[fft_tmp])                           \n\t"
+      "swc1       %[f1],      4(%[fft_tmp])                           \n\t"
+      "swc1       %[f9],      8(%[fft_tmp])                           \n\t"
+      "swc1       %[f5],      12(%[fft_tmp])                          \n\t"
+      "bgtz       %[len],     1b                                      \n\t"
+      " addiu     %[fft_tmp], %[fft_tmp],     16                      \n\t"
+      "lwc1       %[f0],      0(%[aRe])                               \n\t"
+      "lwc1       %[f1],      0(%[bRe])                               \n\t"
+      "lwc1       %[f2],      0(%[bIm])                               \n\t"
+      "lwc1       %[f3],      0(%[aIm])                               \n\t"
+      "mul.s      %[f8],      %[f0],          %[f1]                   \n\t"
+#if !defined(MIPS32_R2_LE)
+      "mul.s      %[f10],     %[f3],          %[f2]                   \n\t"
+      "add.s      %[f8],      %[f8],          %[f10]                  \n\t"
+#else  // #if !defined(MIPS32_R2_LE)
+      "madd.s     %[f8],      %[f8],          %[f3],      %[f2]       \n\t"
+#endif  // #if !defined(MIPS32_R2_LE)
+      "swc1       %[f8],      4(%[fft])                               \n\t"
+      ".set       pop                                                 \n\t"
+      : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+        [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+        [f6] "=&f" (f6), [f7] "=&f" (f7), [f8] "=&f" (f8),
+        [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11),
+        [f12] "=&f" (f12), [aRe] "+r" (aRe), [aIm] "+r" (aIm),
+        [bRe] "+r" (bRe), [bIm] "+r" (bIm), [fft_tmp] "=&r" (fft_tmp),
+        [len] "+r" (len)
+      : [fft] "r" (fft)
+      : "memory");
+
+    ooura_fft.InverseFft(fft);
+    memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
+
+    // fft scaling
+    {
+      float scale = 2.0f / PART_LEN2;
+      __asm __volatile(
+        ".set     push                                    \n\t"
+        ".set     noreorder                               \n\t"
+        "addiu    %[fft_tmp], %[fft],        0            \n\t"
+        "addiu    %[len],     $zero,         8            \n\t"
+        "1:                                               \n\t"
+        "addiu    %[len],     %[len],        -1           \n\t"
+        "lwc1     %[f0],      0(%[fft_tmp])               \n\t"
+        "lwc1     %[f1],      4(%[fft_tmp])               \n\t"
+        "lwc1     %[f2],      8(%[fft_tmp])               \n\t"
+        "lwc1     %[f3],      12(%[fft_tmp])              \n\t"
+        "mul.s    %[f0],      %[f0],         %[scale]     \n\t"
+        "mul.s    %[f1],      %[f1],         %[scale]     \n\t"
+        "mul.s    %[f2],      %[f2],         %[scale]     \n\t"
+        "mul.s    %[f3],      %[f3],         %[scale]     \n\t"
+        "lwc1     %[f4],      16(%[fft_tmp])              \n\t"
+        "lwc1     %[f5],      20(%[fft_tmp])              \n\t"
+        "lwc1     %[f6],      24(%[fft_tmp])              \n\t"
+        "lwc1     %[f7],      28(%[fft_tmp])              \n\t"
+        "mul.s    %[f4],      %[f4],         %[scale]     \n\t"
+        "mul.s    %[f5],      %[f5],         %[scale]     \n\t"
+        "mul.s    %[f6],      %[f6],         %[scale]     \n\t"
+        "mul.s    %[f7],      %[f7],         %[scale]     \n\t"
+        "swc1     %[f0],      0(%[fft_tmp])               \n\t"
+        "swc1     %[f1],      4(%[fft_tmp])               \n\t"
+        "swc1     %[f2],      8(%[fft_tmp])               \n\t"
+        "swc1     %[f3],      12(%[fft_tmp])              \n\t"
+        "swc1     %[f4],      16(%[fft_tmp])              \n\t"
+        "swc1     %[f5],      20(%[fft_tmp])              \n\t"
+        "swc1     %[f6],      24(%[fft_tmp])              \n\t"
+        "swc1     %[f7],      28(%[fft_tmp])              \n\t"
+        "bgtz     %[len],     1b                          \n\t"
+        " addiu   %[fft_tmp], %[fft_tmp],    32           \n\t"
+        ".set     pop                                     \n\t"
+        : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+          [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+          [f6] "=&f" (f6), [f7] "=&f" (f7), [len] "=&r" (len),
+          [fft_tmp] "=&r" (fft_tmp)
+        : [scale] "f" (scale), [fft] "r" (fft)
+        : "memory");
+    }
+    ooura_fft.Fft(fft);
+    aRe = h_fft_buf[0] + pos;
+    aIm = h_fft_buf[1] + pos;
+    __asm __volatile(
+      ".set     push                                    \n\t"
+      ".set     noreorder                               \n\t"
+      "addiu    %[fft_tmp], %[fft],        0            \n\t"
+      "addiu    %[len],     $zero,         31           \n\t"
+      "lwc1     %[f0],      0(%[aRe])                   \n\t"
+      "lwc1     %[f1],      0(%[fft_tmp])               \n\t"
+      "lwc1     %[f2],      256(%[aRe])                 \n\t"
+      "lwc1     %[f3],      4(%[fft_tmp])               \n\t"
+      "lwc1     %[f4],      4(%[aRe])                   \n\t"
+      "lwc1     %[f5],      8(%[fft_tmp])               \n\t"
+      "lwc1     %[f6],      4(%[aIm])                   \n\t"
+      "lwc1     %[f7],      12(%[fft_tmp])              \n\t"
+      "add.s    %[f0],      %[f0],         %[f1]        \n\t"
+      "add.s    %[f2],      %[f2],         %[f3]        \n\t"
+      "add.s    %[f4],      %[f4],         %[f5]        \n\t"
+      "add.s    %[f6],      %[f6],         %[f7]        \n\t"
+      "addiu    %[fft_tmp], %[fft_tmp],    16           \n\t"
+      "swc1     %[f0],      0(%[aRe])                   \n\t"
+      "swc1     %[f2],      256(%[aRe])                 \n\t"
+      "swc1     %[f4],      4(%[aRe])                   \n\t"
+      "addiu    %[aRe],     %[aRe],        8            \n\t"
+      "swc1     %[f6],      4(%[aIm])                   \n\t"
+      "addiu    %[aIm],     %[aIm],        8            \n\t"
+      "1:                                               \n\t"
+      "lwc1     %[f0],      0(%[aRe])                   \n\t"
+      "lwc1     %[f1],      0(%[fft_tmp])               \n\t"
+      "lwc1     %[f2],      0(%[aIm])                   \n\t"
+      "lwc1     %[f3],      4(%[fft_tmp])               \n\t"
+      "lwc1     %[f4],      4(%[aRe])                   \n\t"
+      "lwc1     %[f5],      8(%[fft_tmp])               \n\t"
+      "lwc1     %[f6],      4(%[aIm])                   \n\t"
+      "lwc1     %[f7],      12(%[fft_tmp])              \n\t"
+      "add.s    %[f0],      %[f0],         %[f1]        \n\t"
+      "add.s    %[f2],      %[f2],         %[f3]        \n\t"
+      "add.s    %[f4],      %[f4],         %[f5]        \n\t"
+      "add.s    %[f6],      %[f6],         %[f7]        \n\t"
+      "addiu    %[len],     %[len],        -1           \n\t"
+      "addiu    %[fft_tmp], %[fft_tmp],    16           \n\t"
+      "swc1     %[f0],      0(%[aRe])                   \n\t"
+      "swc1     %[f2],      0(%[aIm])                   \n\t"
+      "swc1     %[f4],      4(%[aRe])                   \n\t"
+      "addiu    %[aRe],     %[aRe],        8            \n\t"
+      "swc1     %[f6],      4(%[aIm])                   \n\t"
+      "bgtz     %[len],     1b                          \n\t"
+      " addiu   %[aIm],     %[aIm],        8            \n\t"
+      ".set     pop                                     \n\t"
+      : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+        [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+        [f6] "=&f" (f6), [f7] "=&f" (f7), [len] "=&r" (len),
+        [fft_tmp] "=&r" (fft_tmp), [aRe] "+r" (aRe), [aIm] "+r" (aIm)
+      : [fft] "r" (fft)
+      : "memory");
+  }
+}
+
+void WebRtcAec_Overdrive_mips(float overdrive_scaling,
+                              float hNlFb,
+                              float hNl[PART_LEN1]) {
+  const float one = 1.0;
+  float* p_hNl;
+  const float* p_WebRtcAec_wC;
+  float temp1, temp2, temp3, temp4;
+
+  p_hNl = &hNl[0];
+  p_WebRtcAec_wC = &WebRtcAec_weightCurve[0];
+
+  for (int i = 0; i < PART_LEN1; ++i) {
+    // Weight subbands
+    __asm __volatile(
+      ".set      push                                              \n\t"
+      ".set      noreorder                                         \n\t"
+      "lwc1      %[temp1],    0(%[p_hNl])                          \n\t"
+      "lwc1      %[temp2],    0(%[p_wC])                           \n\t"
+      "c.lt.s    %[hNlFb],    %[temp1]                             \n\t"
+      "bc1f      1f                                                \n\t"
+      " mul.s    %[temp3],    %[temp2],     %[hNlFb]               \n\t"
+      "sub.s     %[temp4],    %[one],       %[temp2]               \n\t"
+#if !defined(MIPS32_R2_LE)
+      "mul.s     %[temp1],    %[temp1],     %[temp4]               \n\t"
+      "add.s     %[temp1],    %[temp3],     %[temp1]               \n\t"
+#else  // #if !defined(MIPS32_R2_LE)
+      "madd.s    %[temp1],    %[temp3],     %[temp1],   %[temp4]   \n\t"
+#endif  // #if !defined(MIPS32_R2_LE)
+      "swc1      %[temp1],    0(%[p_hNl])                          \n\t"
+     "1:                                                           \n\t"
+      "addiu     %[p_wC],     %[p_wC],      4                      \n\t"
+      ".set      pop                                               \n\t"
+      : [temp1] "=&f" (temp1), [temp2] "=&f" (temp2), [temp3] "=&f" (temp3),
+        [temp4] "=&f" (temp4), [p_wC] "+r" (p_WebRtcAec_wC)
+      : [hNlFb] "f" (hNlFb), [one] "f" (one), [p_hNl] "r" (p_hNl)
+      : "memory");
+
+    hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]);
+  }
+}
+
+void WebRtcAec_Suppress_mips(const float hNl[PART_LEN1],
+                             float efw[2][PART_LEN1]) {
+  const float* p_hNl;
+  float* p_efw0;
+  float* p_efw1;
+  float temp1, temp2, temp3, temp4;
+
+  p_hNl = &hNl[0];
+  p_efw0 = &efw[0][0];
+  p_efw1 = &efw[1][0];
+
+  for (int i = 0; i < PART_LEN1; ++i) {
+    __asm __volatile(
+      "lwc1      %[temp1],    0(%[p_hNl])              \n\t"
+      "lwc1      %[temp3],    0(%[p_efw1])             \n\t"
+      "lwc1      %[temp2],    0(%[p_efw0])             \n\t"
+      "addiu     %[p_hNl],    %[p_hNl],     4          \n\t"
+      "mul.s     %[temp3],    %[temp3],     %[temp1]   \n\t"
+      "mul.s     %[temp2],    %[temp2],     %[temp1]   \n\t"
+      "addiu     %[p_efw0],   %[p_efw0],    4          \n\t"
+      "addiu     %[p_efw1],   %[p_efw1],    4          \n\t"
+      "neg.s     %[temp4],    %[temp3]                 \n\t"
+      "swc1      %[temp2],    -4(%[p_efw0])            \n\t"
+      "swc1      %[temp4],    -4(%[p_efw1])            \n\t"
+      : [temp1] "=&f" (temp1), [temp2] "=&f" (temp2), [temp3] "=&f" (temp3),
+        [temp4] "=&f" (temp4), [p_efw0] "+r" (p_efw0), [p_efw1] "+r" (p_efw1),
+        [p_hNl] "+r" (p_hNl)
+      :
+      : "memory");
+  }
+}
+
+void WebRtcAec_ScaleErrorSignal_mips(float mu,
+                                     float error_threshold,
+                                     float x_pow[PART_LEN1],
+                                     float ef[2][PART_LEN1]) {
+  int len = (PART_LEN1);
+  float* ef0 = ef[0];
+  float* ef1 = ef[1];
+  float fac1 = 1e-10f;
+  float err_th2 = error_threshold * error_threshold;
+  float f0, f1, f2;
+#if !defined(MIPS32_R2_LE)
+  float f3;
+#endif
+
+  __asm __volatile(
+    ".set       push                                   \n\t"
+    ".set       noreorder                              \n\t"
+    "1:                                                \n\t"
+    "lwc1       %[f0],     0(%[x_pow])                 \n\t"
+    "lwc1       %[f1],     0(%[ef0])                   \n\t"
+    "lwc1       %[f2],     0(%[ef1])                   \n\t"
+    "add.s      %[f0],     %[f0],       %[fac1]        \n\t"
+    "div.s      %[f1],     %[f1],       %[f0]          \n\t"
+    "div.s      %[f2],     %[f2],       %[f0]          \n\t"
+    "mul.s      %[f0],     %[f1],       %[f1]          \n\t"
+#if defined(MIPS32_R2_LE)
+    "madd.s     %[f0],     %[f0],       %[f2],   %[f2] \n\t"
+#else
+    "mul.s      %[f3],     %[f2],       %[f2]          \n\t"
+    "add.s      %[f0],     %[f0],       %[f3]          \n\t"
+#endif
+    "c.le.s     %[f0],     %[err_th2]                  \n\t"
+    "nop                                               \n\t"
+    "bc1t       2f                                     \n\t"
+    " nop                                              \n\t"
+    "sqrt.s     %[f0],     %[f0]                       \n\t"
+    "add.s      %[f0],     %[f0],       %[fac1]        \n\t"
+    "div.s      %[f0],     %[err_th],   %[f0]          \n\t"
+    "mul.s      %[f1],     %[f1],       %[f0]          \n\t"
+    "mul.s      %[f2],     %[f2],       %[f0]          \n\t"
+    "2:                                                \n\t"
+    "mul.s      %[f1],     %[f1],       %[mu]          \n\t"
+    "mul.s      %[f2],     %[f2],       %[mu]          \n\t"
+    "swc1       %[f1],     0(%[ef0])                   \n\t"
+    "swc1       %[f2],     0(%[ef1])                   \n\t"
+    "addiu      %[len],    %[len],      -1             \n\t"
+    "addiu      %[x_pow],  %[x_pow],    4              \n\t"
+    "addiu      %[ef0],    %[ef0],      4              \n\t"
+    "bgtz       %[len],    1b                          \n\t"
+    " addiu     %[ef1],    %[ef1],      4              \n\t"
+    ".set       pop                                    \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+#if !defined(MIPS32_R2_LE)
+      [f3] "=&f" (f3),
+#endif
+      [x_pow] "+r" (x_pow), [ef0] "+r" (ef0), [ef1] "+r" (ef1),
+      [len] "+r" (len)
+    : [fac1] "f" (fac1), [err_th2] "f" (err_th2), [mu] "f" (mu),
+      [err_th] "f" (error_threshold)
+    : "memory");
+}
+
+void WebRtcAec_InitAec_mips(void) {
+  WebRtcAec_FilterFar = WebRtcAec_FilterFar_mips;
+  WebRtcAec_FilterAdaptation = WebRtcAec_FilterAdaptation_mips;
+  WebRtcAec_ScaleErrorSignal = WebRtcAec_ScaleErrorSignal_mips;
+  WebRtcAec_Overdrive = WebRtcAec_Overdrive_mips;
+  WebRtcAec_Suppress = WebRtcAec_Suppress_mips;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/aec_core_neon.cc b/modules/audio_processing/aec/aec_core_neon.cc
new file mode 100644
index 0000000..1fbf56b
--- /dev/null
+++ b/modules/audio_processing/aec/aec_core_neon.cc
@@ -0,0 +1,737 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The core AEC algorithm, neon version of speed-critical functions.
+ *
+ * Based on aec_core_sse2.c.
+ */
+
+#include <arm_neon.h>
+#include <math.h>
+#include <string.h>  // memset
+
+extern "C" {
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aec/aec_common.h"
+#include "modules/audio_processing/aec/aec_core_optimized_methods.h"
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+namespace webrtc {
+
+enum { kShiftExponentIntoTopMantissa = 8 };
+enum { kFloatExponentShift = 23 };
+
+__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
+  return aRe * bRe - aIm * bIm;
+}
+
+__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
+  return aRe * bIm + aIm * bRe;
+}
+
+static void FilterFarNEON(int num_partitions,
+                          int x_fft_buf_block_pos,
+                          float x_fft_buf[2]
+                                         [kExtendedNumPartitions * PART_LEN1],
+                          float h_fft_buf[2]
+                                         [kExtendedNumPartitions * PART_LEN1],
+                          float y_fft[2][PART_LEN1]) {
+  int i;
+  for (i = 0; i < num_partitions; i++) {
+    int j;
+    int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
+    int pos = i * PART_LEN1;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * PART_LEN1;
+    }
+
+    // vectorized code (four at once)
+    for (j = 0; j + 3 < PART_LEN1; j += 4) {
+      const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]);
+      const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]);
+      const float32x4_t h_fft_buf_re = vld1q_f32(&h_fft_buf[0][pos + j]);
+      const float32x4_t h_fft_buf_im = vld1q_f32(&h_fft_buf[1][pos + j]);
+      const float32x4_t y_fft_re = vld1q_f32(&y_fft[0][j]);
+      const float32x4_t y_fft_im = vld1q_f32(&y_fft[1][j]);
+      const float32x4_t a = vmulq_f32(x_fft_buf_re, h_fft_buf_re);
+      const float32x4_t e = vmlsq_f32(a, x_fft_buf_im, h_fft_buf_im);
+      const float32x4_t c = vmulq_f32(x_fft_buf_re, h_fft_buf_im);
+      const float32x4_t f = vmlaq_f32(c, x_fft_buf_im, h_fft_buf_re);
+      const float32x4_t g = vaddq_f32(y_fft_re, e);
+      const float32x4_t h = vaddq_f32(y_fft_im, f);
+      vst1q_f32(&y_fft[0][j], g);
+      vst1q_f32(&y_fft[1][j], h);
+    }
+    // scalar code for the remaining items.
+    for (; j < PART_LEN1; j++) {
+      y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j],
+                           h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]);
+      y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j],
+                           h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]);
+    }
+  }
+}
+
+// ARM64's arm_neon.h has already defined vdivq_f32 vsqrtq_f32.
+#if !defined(WEBRTC_ARCH_ARM64)
+static float32x4_t vdivq_f32(float32x4_t a, float32x4_t b) {
+  int i;
+  float32x4_t x = vrecpeq_f32(b);
+  // from arm documentation
+  // The Newton-Raphson iteration:
+  //     x[n+1] = x[n] * (2 - d * x[n])
+  // converges to (1/d) if x0 is the result of VRECPE applied to d.
+  //
+  // Note: The precision did not improve after 2 iterations.
+  for (i = 0; i < 2; i++) {
+    x = vmulq_f32(vrecpsq_f32(b, x), x);
+  }
+  // a/b = a*(1/b)
+  return vmulq_f32(a, x);
+}
+
+static float32x4_t vsqrtq_f32(float32x4_t s) {
+  int i;
+  float32x4_t x = vrsqrteq_f32(s);
+
+  // Code to handle sqrt(0).
+  // If the input to sqrtf() is zero, a zero will be returned.
+  // If the input to vrsqrteq_f32() is zero, positive infinity is returned.
+  const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000);
+  // check for divide by zero
+  const uint32x4_t div_by_zero = vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(x));
+  // zero out the positive infinity results
+  x = vreinterpretq_f32_u32(
+      vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(x)));
+  // from arm documentation
+  // The Newton-Raphson iteration:
+  //     x[n+1] = x[n] * (3 - d * (x[n] * x[n])) / 2)
+  // converges to (1/√d) if x0 is the result of VRSQRTE applied to d.
+  //
+  // Note: The precision did not improve after 2 iterations.
+  for (i = 0; i < 2; i++) {
+    x = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, x), s), x);
+  }
+  // sqrt(s) = s * 1/sqrt(s)
+  return vmulq_f32(s, x);
+}
+#endif  // WEBRTC_ARCH_ARM64
+
+static void ScaleErrorSignalNEON(float mu,
+                                 float error_threshold,
+                                 float x_pow[PART_LEN1],
+                                 float ef[2][PART_LEN1]) {
+  const float32x4_t k1e_10f = vdupq_n_f32(1e-10f);
+  const float32x4_t kMu = vmovq_n_f32(mu);
+  const float32x4_t kThresh = vmovq_n_f32(error_threshold);
+  int i;
+  // vectorized code (four at once)
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    const float32x4_t x_pow_local = vld1q_f32(&x_pow[i]);
+    const float32x4_t ef_re_base = vld1q_f32(&ef[0][i]);
+    const float32x4_t ef_im_base = vld1q_f32(&ef[1][i]);
+    const float32x4_t xPowPlus = vaddq_f32(x_pow_local, k1e_10f);
+    float32x4_t ef_re = vdivq_f32(ef_re_base, xPowPlus);
+    float32x4_t ef_im = vdivq_f32(ef_im_base, xPowPlus);
+    const float32x4_t ef_re2 = vmulq_f32(ef_re, ef_re);
+    const float32x4_t ef_sum2 = vmlaq_f32(ef_re2, ef_im, ef_im);
+    const float32x4_t absEf = vsqrtq_f32(ef_sum2);
+    const uint32x4_t bigger = vcgtq_f32(absEf, kThresh);
+    const float32x4_t absEfPlus = vaddq_f32(absEf, k1e_10f);
+    const float32x4_t absEfInv = vdivq_f32(kThresh, absEfPlus);
+    uint32x4_t ef_re_if = vreinterpretq_u32_f32(vmulq_f32(ef_re, absEfInv));
+    uint32x4_t ef_im_if = vreinterpretq_u32_f32(vmulq_f32(ef_im, absEfInv));
+    uint32x4_t ef_re_u32 =
+        vandq_u32(vmvnq_u32(bigger), vreinterpretq_u32_f32(ef_re));
+    uint32x4_t ef_im_u32 =
+        vandq_u32(vmvnq_u32(bigger), vreinterpretq_u32_f32(ef_im));
+    ef_re_if = vandq_u32(bigger, ef_re_if);
+    ef_im_if = vandq_u32(bigger, ef_im_if);
+    ef_re_u32 = vorrq_u32(ef_re_u32, ef_re_if);
+    ef_im_u32 = vorrq_u32(ef_im_u32, ef_im_if);
+    ef_re = vmulq_f32(vreinterpretq_f32_u32(ef_re_u32), kMu);
+    ef_im = vmulq_f32(vreinterpretq_f32_u32(ef_im_u32), kMu);
+    vst1q_f32(&ef[0][i], ef_re);
+    vst1q_f32(&ef[1][i], ef_im);
+  }
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    float abs_ef;
+    ef[0][i] /= (x_pow[i] + 1e-10f);
+    ef[1][i] /= (x_pow[i] + 1e-10f);
+    abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
+
+    if (abs_ef > error_threshold) {
+      abs_ef = error_threshold / (abs_ef + 1e-10f);
+      ef[0][i] *= abs_ef;
+      ef[1][i] *= abs_ef;
+    }
+
+    // Stepsize factor
+    ef[0][i] *= mu;
+    ef[1][i] *= mu;
+  }
+}
+
+static void FilterAdaptationNEON(
+    const OouraFft& ooura_fft,
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float e_fft[2][PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+  float fft[PART_LEN2];
+  int i;
+  for (i = 0; i < num_partitions; i++) {
+    int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
+    int pos = i * PART_LEN1;
+    int j;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * PART_LEN1;
+    }
+
+    // Process the whole array...
+    for (j = 0; j < PART_LEN; j += 4) {
+      // Load x_fft_buf and e_fft.
+      const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]);
+      const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]);
+      const float32x4_t e_fft_re = vld1q_f32(&e_fft[0][j]);
+      const float32x4_t e_fft_im = vld1q_f32(&e_fft[1][j]);
+      // Calculate the product of conjugate(x_fft_buf) by e_fft.
+      //   re(conjugate(a) * b) = aRe * bRe + aIm * bIm
+      //   im(conjugate(a) * b)=  aRe * bIm - aIm * bRe
+      const float32x4_t a = vmulq_f32(x_fft_buf_re, e_fft_re);
+      const float32x4_t e = vmlaq_f32(a, x_fft_buf_im, e_fft_im);
+      const float32x4_t c = vmulq_f32(x_fft_buf_re, e_fft_im);
+      const float32x4_t f = vmlsq_f32(c, x_fft_buf_im, e_fft_re);
+      // Interleave real and imaginary parts.
+      const float32x4x2_t g_n_h = vzipq_f32(e, f);
+      // Store
+      vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]);
+      vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]);
+    }
+    // ... and fixup the first imaginary entry.
+    fft[1] =
+        MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN],
+              e_fft[0][PART_LEN], e_fft[1][PART_LEN]);
+
+    ooura_fft.InverseFft(fft);
+    memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
+
+    // fft scaling
+    {
+      const float scale = 2.0f / PART_LEN2;
+      const float32x4_t scale_ps = vmovq_n_f32(scale);
+      for (j = 0; j < PART_LEN; j += 4) {
+        const float32x4_t fft_ps = vld1q_f32(&fft[j]);
+        const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps);
+        vst1q_f32(&fft[j], fft_scale);
+      }
+    }
+    ooura_fft.Fft(fft);
+
+    {
+      const float wt1 = h_fft_buf[1][pos];
+      h_fft_buf[0][pos + PART_LEN] += fft[1];
+      for (j = 0; j < PART_LEN; j += 4) {
+        float32x4_t wtBuf_re = vld1q_f32(&h_fft_buf[0][pos + j]);
+        float32x4_t wtBuf_im = vld1q_f32(&h_fft_buf[1][pos + j]);
+        const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]);
+        const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]);
+        const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4);
+        wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]);
+        wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]);
+
+        vst1q_f32(&h_fft_buf[0][pos + j], wtBuf_re);
+        vst1q_f32(&h_fft_buf[1][pos + j], wtBuf_im);
+      }
+      h_fft_buf[1][pos] = wt1;
+    }
+  }
+}
+
+static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) {
+  // a^b = exp2(b * log2(a))
+  //   exp2(x) and log2(x) are calculated using polynomial approximations.
+  float32x4_t log2_a, b_log2_a, a_exp_b;
+
+  // Calculate log2(x), x = a.
+  {
+    // To calculate log2(x), we decompose x like this:
+    //   x = y * 2^n
+    //     n is an integer
+    //     y is in the [1.0, 2.0) range
+    //
+    //   log2(x) = log2(y) + n
+    //     n       can be evaluated by playing with float representation.
+    //     log2(y) in a small range can be approximated, this code uses an order
+    //             five polynomial approximation. The coefficients have been
+    //             estimated with the Remez algorithm and the resulting
+    //             polynomial has a maximum relative error of 0.00086%.
+
+    // Compute n.
+    //    This is done by masking the exponent, shifting it into the top bit of
+    //    the mantissa, putting eight into the biased exponent (to shift/
+    //    compensate the fact that the exponent has been shifted in the top/
+    //    fractional part and finally getting rid of the implicit leading one
+    //    from the mantissa by substracting it out.
+    const uint32x4_t vec_float_exponent_mask = vdupq_n_u32(0x7F800000);
+    const uint32x4_t vec_eight_biased_exponent = vdupq_n_u32(0x43800000);
+    const uint32x4_t vec_implicit_leading_one = vdupq_n_u32(0x43BF8000);
+    const uint32x4_t two_n =
+        vandq_u32(vreinterpretq_u32_f32(a), vec_float_exponent_mask);
+    const uint32x4_t n_1 = vshrq_n_u32(two_n, kShiftExponentIntoTopMantissa);
+    const uint32x4_t n_0 = vorrq_u32(n_1, vec_eight_biased_exponent);
+    const float32x4_t n =
+        vsubq_f32(vreinterpretq_f32_u32(n_0),
+                  vreinterpretq_f32_u32(vec_implicit_leading_one));
+    // Compute y.
+    const uint32x4_t vec_mantissa_mask = vdupq_n_u32(0x007FFFFF);
+    const uint32x4_t vec_zero_biased_exponent_is_one = vdupq_n_u32(0x3F800000);
+    const uint32x4_t mantissa =
+        vandq_u32(vreinterpretq_u32_f32(a), vec_mantissa_mask);
+    const float32x4_t y = vreinterpretq_f32_u32(
+        vorrq_u32(mantissa, vec_zero_biased_exponent_is_one));
+    // Approximate log2(y) ~= (y - 1) * pol5(y).
+    //    pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
+    const float32x4_t C5 = vdupq_n_f32(-3.4436006e-2f);
+    const float32x4_t C4 = vdupq_n_f32(3.1821337e-1f);
+    const float32x4_t C3 = vdupq_n_f32(-1.2315303f);
+    const float32x4_t C2 = vdupq_n_f32(2.5988452f);
+    const float32x4_t C1 = vdupq_n_f32(-3.3241990f);
+    const float32x4_t C0 = vdupq_n_f32(3.1157899f);
+    float32x4_t pol5_y = C5;
+    pol5_y = vmlaq_f32(C4, y, pol5_y);
+    pol5_y = vmlaq_f32(C3, y, pol5_y);
+    pol5_y = vmlaq_f32(C2, y, pol5_y);
+    pol5_y = vmlaq_f32(C1, y, pol5_y);
+    pol5_y = vmlaq_f32(C0, y, pol5_y);
+    const float32x4_t y_minus_one =
+        vsubq_f32(y, vreinterpretq_f32_u32(vec_zero_biased_exponent_is_one));
+    const float32x4_t log2_y = vmulq_f32(y_minus_one, pol5_y);
+
+    // Combine parts.
+    log2_a = vaddq_f32(n, log2_y);
+  }
+
+  // b * log2(a)
+  b_log2_a = vmulq_f32(b, log2_a);
+
+  // Calculate exp2(x), x = b * log2(a).
+  {
+    // To calculate 2^x, we decompose x like this:
+    //   x = n + y
+    //     n is an integer, the value of x - 0.5 rounded down, therefore
+    //     y is in the [0.5, 1.5) range
+    //
+    //   2^x = 2^n * 2^y
+    //     2^n can be evaluated by playing with float representation.
+    //     2^y in a small range can be approximated, this code uses an order two
+    //         polynomial approximation. The coefficients have been estimated
+    //         with the Remez algorithm and the resulting polynomial has a
+    //         maximum relative error of 0.17%.
+    // To avoid over/underflow, we reduce the range of input to ]-127, 129].
+    const float32x4_t max_input = vdupq_n_f32(129.f);
+    const float32x4_t min_input = vdupq_n_f32(-126.99999f);
+    const float32x4_t x_min = vminq_f32(b_log2_a, max_input);
+    const float32x4_t x_max = vmaxq_f32(x_min, min_input);
+    // Compute n.
+    const float32x4_t half = vdupq_n_f32(0.5f);
+    const float32x4_t x_minus_half = vsubq_f32(x_max, half);
+    const int32x4_t x_minus_half_floor = vcvtq_s32_f32(x_minus_half);
+
+    // Compute 2^n.
+    const int32x4_t float_exponent_bias = vdupq_n_s32(127);
+    const int32x4_t two_n_exponent =
+        vaddq_s32(x_minus_half_floor, float_exponent_bias);
+    const float32x4_t two_n =
+        vreinterpretq_f32_s32(vshlq_n_s32(two_n_exponent, kFloatExponentShift));
+    // Compute y.
+    const float32x4_t y = vsubq_f32(x_max, vcvtq_f32_s32(x_minus_half_floor));
+
+    // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
+    const float32x4_t C2 = vdupq_n_f32(3.3718944e-1f);
+    const float32x4_t C1 = vdupq_n_f32(6.5763628e-1f);
+    const float32x4_t C0 = vdupq_n_f32(1.0017247f);
+    float32x4_t exp2_y = C2;
+    exp2_y = vmlaq_f32(C1, y, exp2_y);
+    exp2_y = vmlaq_f32(C0, y, exp2_y);
+
+    // Combine parts.
+    a_exp_b = vmulq_f32(exp2_y, two_n);
+  }
+
+  return a_exp_b;
+}
+
+static void OverdriveNEON(float overdrive_scaling,
+                          float hNlFb,
+                          float hNl[PART_LEN1]) {
+  int i;
+  const float32x4_t vec_hNlFb = vmovq_n_f32(hNlFb);
+  const float32x4_t vec_one = vdupq_n_f32(1.0f);
+  const float32x4_t vec_overdrive_scaling = vmovq_n_f32(overdrive_scaling);
+
+  // vectorized code (four at once)
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    // Weight subbands
+    float32x4_t vec_hNl = vld1q_f32(&hNl[i]);
+    const float32x4_t vec_weightCurve = vld1q_f32(&WebRtcAec_weightCurve[i]);
+    const uint32x4_t bigger = vcgtq_f32(vec_hNl, vec_hNlFb);
+    const float32x4_t vec_weightCurve_hNlFb =
+        vmulq_f32(vec_weightCurve, vec_hNlFb);
+    const float32x4_t vec_one_weightCurve = vsubq_f32(vec_one, vec_weightCurve);
+    const float32x4_t vec_one_weightCurve_hNl =
+        vmulq_f32(vec_one_weightCurve, vec_hNl);
+    const uint32x4_t vec_if0 =
+        vandq_u32(vmvnq_u32(bigger), vreinterpretq_u32_f32(vec_hNl));
+    const float32x4_t vec_one_weightCurve_add =
+        vaddq_f32(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl);
+    const uint32x4_t vec_if1 =
+        vandq_u32(bigger, vreinterpretq_u32_f32(vec_one_weightCurve_add));
+
+    vec_hNl = vreinterpretq_f32_u32(vorrq_u32(vec_if0, vec_if1));
+
+    const float32x4_t vec_overDriveCurve =
+        vld1q_f32(&WebRtcAec_overDriveCurve[i]);
+    const float32x4_t vec_overDriveSm_overDriveCurve =
+        vmulq_f32(vec_overdrive_scaling, vec_overDriveCurve);
+    vec_hNl = vpowq_f32(vec_hNl, vec_overDriveSm_overDriveCurve);
+    vst1q_f32(&hNl[i], vec_hNl);
+  }
+
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    // Weight subbands
+    if (hNl[i] > hNlFb) {
+      hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
+               (1 - WebRtcAec_weightCurve[i]) * hNl[i];
+    }
+
+    hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]);
+  }
+}
+
+static void SuppressNEON(const float hNl[PART_LEN1], float efw[2][PART_LEN1]) {
+  int i;
+  const float32x4_t vec_minus_one = vdupq_n_f32(-1.0f);
+  // vectorized code (four at once)
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    float32x4_t vec_hNl = vld1q_f32(&hNl[i]);
+    float32x4_t vec_efw_re = vld1q_f32(&efw[0][i]);
+    float32x4_t vec_efw_im = vld1q_f32(&efw[1][i]);
+    vec_efw_re = vmulq_f32(vec_efw_re, vec_hNl);
+    vec_efw_im = vmulq_f32(vec_efw_im, vec_hNl);
+
+    // Ooura fft returns incorrect sign on imaginary component. It matters
+    // here because we are making an additive change with comfort noise.
+    vec_efw_im = vmulq_f32(vec_efw_im, vec_minus_one);
+    vst1q_f32(&efw[0][i], vec_efw_re);
+    vst1q_f32(&efw[1][i], vec_efw_im);
+  }
+
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    efw[0][i] *= hNl[i];
+    efw[1][i] *= hNl[i];
+
+    // Ooura fft returns incorrect sign on imaginary component. It matters
+    // here because we are making an additive change with comfort noise.
+    efw[1][i] *= -1;
+  }
+}
+
+static int PartitionDelayNEON(
+    int num_partitions,
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+  // Measures the energy in each filter partition and returns the partition with
+  // highest energy.
+  // TODO(bjornv): Spread computational cost by computing one partition per
+  // block?
+  float wfEnMax = 0;
+  int i;
+  int delay = 0;
+
+  for (i = 0; i < num_partitions; i++) {
+    int j;
+    int pos = i * PART_LEN1;
+    float wfEn = 0;
+    float32x4_t vec_wfEn = vdupq_n_f32(0.0f);
+    // vectorized code (four at once)
+    for (j = 0; j + 3 < PART_LEN1; j += 4) {
+      const float32x4_t vec_wfBuf0 = vld1q_f32(&h_fft_buf[0][pos + j]);
+      const float32x4_t vec_wfBuf1 = vld1q_f32(&h_fft_buf[1][pos + j]);
+      vec_wfEn = vmlaq_f32(vec_wfEn, vec_wfBuf0, vec_wfBuf0);
+      vec_wfEn = vmlaq_f32(vec_wfEn, vec_wfBuf1, vec_wfBuf1);
+    }
+    {
+      float32x2_t vec_total;
+      // A B C D
+      vec_total = vpadd_f32(vget_low_f32(vec_wfEn), vget_high_f32(vec_wfEn));
+      // A+B C+D
+      vec_total = vpadd_f32(vec_total, vec_total);
+      // A+B+C+D A+B+C+D
+      wfEn = vget_lane_f32(vec_total, 0);
+    }
+
+    // scalar code for the remaining items.
+    for (; j < PART_LEN1; j++) {
+      wfEn += h_fft_buf[0][pos + j] * h_fft_buf[0][pos + j] +
+              h_fft_buf[1][pos + j] * h_fft_buf[1][pos + j];
+    }
+
+    if (wfEn > wfEnMax) {
+      wfEnMax = wfEn;
+      delay = i;
+    }
+  }
+  return delay;
+}
+
+// Updates the following smoothed  Power Spectral Densities (PSD):
+//  - sd  : near-end
+//  - se  : residual echo
+//  - sx  : far-end
+//  - sde : cross-PSD of near-end and residual echo
+//  - sxd : cross-PSD of near-end and far-end
+//
+// In addition to updating the PSDs, also the filter diverge state is determined
+// upon actions are taken.
+static void UpdateCoherenceSpectraNEON(int mult,
+                                       bool extended_filter_enabled,
+                                       float efw[2][PART_LEN1],
+                                       float dfw[2][PART_LEN1],
+                                       float xfw[2][PART_LEN1],
+                                       CoherenceState* coherence_state,
+                                       short* filter_divergence_state,
+                                       int* extreme_filter_divergence) {
+  // Power estimate smoothing coefficients.
+  const float* ptrGCoh =
+      extended_filter_enabled
+          ? WebRtcAec_kExtendedSmoothingCoefficients[mult - 1]
+          : WebRtcAec_kNormalSmoothingCoefficients[mult - 1];
+  int i;
+  float sdSum = 0, seSum = 0;
+  const float32x4_t vec_15 = vdupq_n_f32(WebRtcAec_kMinFarendPSD);
+  float32x4_t vec_sdSum = vdupq_n_f32(0.0f);
+  float32x4_t vec_seSum = vdupq_n_f32(0.0f);
+
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    const float32x4_t vec_dfw0 = vld1q_f32(&dfw[0][i]);
+    const float32x4_t vec_dfw1 = vld1q_f32(&dfw[1][i]);
+    const float32x4_t vec_efw0 = vld1q_f32(&efw[0][i]);
+    const float32x4_t vec_efw1 = vld1q_f32(&efw[1][i]);
+    const float32x4_t vec_xfw0 = vld1q_f32(&xfw[0][i]);
+    const float32x4_t vec_xfw1 = vld1q_f32(&xfw[1][i]);
+    float32x4_t vec_sd =
+        vmulq_n_f32(vld1q_f32(&coherence_state->sd[i]), ptrGCoh[0]);
+    float32x4_t vec_se =
+        vmulq_n_f32(vld1q_f32(&coherence_state->se[i]), ptrGCoh[0]);
+    float32x4_t vec_sx =
+        vmulq_n_f32(vld1q_f32(&coherence_state->sx[i]), ptrGCoh[0]);
+    float32x4_t vec_dfw_sumsq = vmulq_f32(vec_dfw0, vec_dfw0);
+    float32x4_t vec_efw_sumsq = vmulq_f32(vec_efw0, vec_efw0);
+    float32x4_t vec_xfw_sumsq = vmulq_f32(vec_xfw0, vec_xfw0);
+
+    vec_dfw_sumsq = vmlaq_f32(vec_dfw_sumsq, vec_dfw1, vec_dfw1);
+    vec_efw_sumsq = vmlaq_f32(vec_efw_sumsq, vec_efw1, vec_efw1);
+    vec_xfw_sumsq = vmlaq_f32(vec_xfw_sumsq, vec_xfw1, vec_xfw1);
+    vec_xfw_sumsq = vmaxq_f32(vec_xfw_sumsq, vec_15);
+    vec_sd = vmlaq_n_f32(vec_sd, vec_dfw_sumsq, ptrGCoh[1]);
+    vec_se = vmlaq_n_f32(vec_se, vec_efw_sumsq, ptrGCoh[1]);
+    vec_sx = vmlaq_n_f32(vec_sx, vec_xfw_sumsq, ptrGCoh[1]);
+
+    vst1q_f32(&coherence_state->sd[i], vec_sd);
+    vst1q_f32(&coherence_state->se[i], vec_se);
+    vst1q_f32(&coherence_state->sx[i], vec_sx);
+
+    {
+      float32x4x2_t vec_sde = vld2q_f32(&coherence_state->sde[i][0]);
+      float32x4_t vec_dfwefw0011 = vmulq_f32(vec_dfw0, vec_efw0);
+      float32x4_t vec_dfwefw0110 = vmulq_f32(vec_dfw0, vec_efw1);
+      vec_sde.val[0] = vmulq_n_f32(vec_sde.val[0], ptrGCoh[0]);
+      vec_sde.val[1] = vmulq_n_f32(vec_sde.val[1], ptrGCoh[0]);
+      vec_dfwefw0011 = vmlaq_f32(vec_dfwefw0011, vec_dfw1, vec_efw1);
+      vec_dfwefw0110 = vmlsq_f32(vec_dfwefw0110, vec_dfw1, vec_efw0);
+      vec_sde.val[0] = vmlaq_n_f32(vec_sde.val[0], vec_dfwefw0011, ptrGCoh[1]);
+      vec_sde.val[1] = vmlaq_n_f32(vec_sde.val[1], vec_dfwefw0110, ptrGCoh[1]);
+      vst2q_f32(&coherence_state->sde[i][0], vec_sde);
+    }
+
+    {
+      float32x4x2_t vec_sxd = vld2q_f32(&coherence_state->sxd[i][0]);
+      float32x4_t vec_dfwxfw0011 = vmulq_f32(vec_dfw0, vec_xfw0);
+      float32x4_t vec_dfwxfw0110 = vmulq_f32(vec_dfw0, vec_xfw1);
+      vec_sxd.val[0] = vmulq_n_f32(vec_sxd.val[0], ptrGCoh[0]);
+      vec_sxd.val[1] = vmulq_n_f32(vec_sxd.val[1], ptrGCoh[0]);
+      vec_dfwxfw0011 = vmlaq_f32(vec_dfwxfw0011, vec_dfw1, vec_xfw1);
+      vec_dfwxfw0110 = vmlsq_f32(vec_dfwxfw0110, vec_dfw1, vec_xfw0);
+      vec_sxd.val[0] = vmlaq_n_f32(vec_sxd.val[0], vec_dfwxfw0011, ptrGCoh[1]);
+      vec_sxd.val[1] = vmlaq_n_f32(vec_sxd.val[1], vec_dfwxfw0110, ptrGCoh[1]);
+      vst2q_f32(&coherence_state->sxd[i][0], vec_sxd);
+    }
+
+    vec_sdSum = vaddq_f32(vec_sdSum, vec_sd);
+    vec_seSum = vaddq_f32(vec_seSum, vec_se);
+  }
+  {
+    float32x2_t vec_sdSum_total;
+    float32x2_t vec_seSum_total;
+    // A B C D
+    vec_sdSum_total =
+        vpadd_f32(vget_low_f32(vec_sdSum), vget_high_f32(vec_sdSum));
+    vec_seSum_total =
+        vpadd_f32(vget_low_f32(vec_seSum), vget_high_f32(vec_seSum));
+    // A+B C+D
+    vec_sdSum_total = vpadd_f32(vec_sdSum_total, vec_sdSum_total);
+    vec_seSum_total = vpadd_f32(vec_seSum_total, vec_seSum_total);
+    // A+B+C+D A+B+C+D
+    sdSum = vget_lane_f32(vec_sdSum_total, 0);
+    seSum = vget_lane_f32(vec_seSum_total, 0);
+  }
+
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    coherence_state->sd[i] =
+        ptrGCoh[0] * coherence_state->sd[i] +
+        ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
+    coherence_state->se[i] =
+        ptrGCoh[0] * coherence_state->se[i] +
+        ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
+    // We threshold here to protect against the ill-effects of a zero farend.
+    // The threshold is not arbitrarily chosen, but balances protection and
+    // adverse interaction with the algorithm's tuning.
+    // TODO(bjornv): investigate further why this is so sensitive.
+    coherence_state->sx[i] =
+        ptrGCoh[0] * coherence_state->sx[i] +
+        ptrGCoh[1] *
+            WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
+                           WebRtcAec_kMinFarendPSD);
+
+    coherence_state->sde[i][0] =
+        ptrGCoh[0] * coherence_state->sde[i][0] +
+        ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
+    coherence_state->sde[i][1] =
+        ptrGCoh[0] * coherence_state->sde[i][1] +
+        ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
+
+    coherence_state->sxd[i][0] =
+        ptrGCoh[0] * coherence_state->sxd[i][0] +
+        ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
+    coherence_state->sxd[i][1] =
+        ptrGCoh[0] * coherence_state->sxd[i][1] +
+        ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
+
+    sdSum += coherence_state->sd[i];
+    seSum += coherence_state->se[i];
+  }
+
+  // Divergent filter safeguard update.
+  *filter_divergence_state =
+      (*filter_divergence_state ? 1.05f : 1.0f) * seSum > sdSum;
+
+  // Signal extreme filter divergence if the error is significantly larger
+  // than the nearend (13 dB).
+  *extreme_filter_divergence = (seSum > (19.95f * sdSum));
+}
+
+// Window time domain data to be used by the fft.
+static void WindowDataNEON(float* x_windowed, const float* x) {
+  int i;
+  for (i = 0; i < PART_LEN; i += 4) {
+    const float32x4_t vec_Buf1 = vld1q_f32(&x[i]);
+    const float32x4_t vec_Buf2 = vld1q_f32(&x[PART_LEN + i]);
+    const float32x4_t vec_sqrtHanning = vld1q_f32(&WebRtcAec_sqrtHanning[i]);
+    // A B C D
+    float32x4_t vec_sqrtHanning_rev =
+        vld1q_f32(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]);
+    // B A D C
+    vec_sqrtHanning_rev = vrev64q_f32(vec_sqrtHanning_rev);
+    // D C B A
+    vec_sqrtHanning_rev = vcombine_f32(vget_high_f32(vec_sqrtHanning_rev),
+                                       vget_low_f32(vec_sqrtHanning_rev));
+    vst1q_f32(&x_windowed[i], vmulq_f32(vec_Buf1, vec_sqrtHanning));
+    vst1q_f32(&x_windowed[PART_LEN + i],
+              vmulq_f32(vec_Buf2, vec_sqrtHanning_rev));
+  }
+}
+
+// Puts fft output data into a complex valued array.
+static void StoreAsComplexNEON(const float* data,
+                               float data_complex[2][PART_LEN1]) {
+  int i;
+  for (i = 0; i < PART_LEN; i += 4) {
+    const float32x4x2_t vec_data = vld2q_f32(&data[2 * i]);
+    vst1q_f32(&data_complex[0][i], vec_data.val[0]);
+    vst1q_f32(&data_complex[1][i], vec_data.val[1]);
+  }
+  // fix beginning/end values
+  data_complex[1][0] = 0;
+  data_complex[1][PART_LEN] = 0;
+  data_complex[0][0] = data[0];
+  data_complex[0][PART_LEN] = data[1];
+}
+
+static void ComputeCoherenceNEON(const CoherenceState* coherence_state,
+                                 float* cohde,
+                                 float* cohxd) {
+  int i;
+
+  {
+    const float32x4_t vec_1eminus10 = vdupq_n_f32(1e-10f);
+
+    // Subband coherence
+    for (i = 0; i + 3 < PART_LEN1; i += 4) {
+      const float32x4_t vec_sd = vld1q_f32(&coherence_state->sd[i]);
+      const float32x4_t vec_se = vld1q_f32(&coherence_state->se[i]);
+      const float32x4_t vec_sx = vld1q_f32(&coherence_state->sx[i]);
+      const float32x4_t vec_sdse = vmlaq_f32(vec_1eminus10, vec_sd, vec_se);
+      const float32x4_t vec_sdsx = vmlaq_f32(vec_1eminus10, vec_sd, vec_sx);
+      float32x4x2_t vec_sde = vld2q_f32(&coherence_state->sde[i][0]);
+      float32x4x2_t vec_sxd = vld2q_f32(&coherence_state->sxd[i][0]);
+      float32x4_t vec_cohde = vmulq_f32(vec_sde.val[0], vec_sde.val[0]);
+      float32x4_t vec_cohxd = vmulq_f32(vec_sxd.val[0], vec_sxd.val[0]);
+      vec_cohde = vmlaq_f32(vec_cohde, vec_sde.val[1], vec_sde.val[1]);
+      vec_cohde = vdivq_f32(vec_cohde, vec_sdse);
+      vec_cohxd = vmlaq_f32(vec_cohxd, vec_sxd.val[1], vec_sxd.val[1]);
+      vec_cohxd = vdivq_f32(vec_cohxd, vec_sdsx);
+
+      vst1q_f32(&cohde[i], vec_cohde);
+      vst1q_f32(&cohxd[i], vec_cohxd);
+    }
+  }
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    cohde[i] = (coherence_state->sde[i][0] * coherence_state->sde[i][0] +
+                coherence_state->sde[i][1] * coherence_state->sde[i][1]) /
+               (coherence_state->sd[i] * coherence_state->se[i] + 1e-10f);
+    cohxd[i] = (coherence_state->sxd[i][0] * coherence_state->sxd[i][0] +
+                coherence_state->sxd[i][1] * coherence_state->sxd[i][1]) /
+               (coherence_state->sx[i] * coherence_state->sd[i] + 1e-10f);
+  }
+}
+
+void WebRtcAec_InitAec_neon(void) {
+  WebRtcAec_FilterFar = FilterFarNEON;
+  WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON;
+  WebRtcAec_FilterAdaptation = FilterAdaptationNEON;
+  WebRtcAec_Overdrive = OverdriveNEON;
+  WebRtcAec_Suppress = SuppressNEON;
+  WebRtcAec_ComputeCoherence = ComputeCoherenceNEON;
+  WebRtcAec_UpdateCoherenceSpectra = UpdateCoherenceSpectraNEON;
+  WebRtcAec_StoreAsComplex = StoreAsComplexNEON;
+  WebRtcAec_PartitionDelay = PartitionDelayNEON;
+  WebRtcAec_WindowData = WindowDataNEON;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/aec_core_optimized_methods.h b/modules/audio_processing/aec/aec_core_optimized_methods.h
new file mode 100644
index 0000000..a8a20e9
--- /dev/null
+++ b/modules/audio_processing/aec/aec_core_optimized_methods.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_OPTIMIZED_METHODS_H_
+#define MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_OPTIMIZED_METHODS_H_
+
+#include <memory>
+
+#include "modules/audio_processing/aec/aec_core.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+typedef void (*WebRtcAecFilterFar)(
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float y_fft[2][PART_LEN1]);
+extern WebRtcAecFilterFar WebRtcAec_FilterFar;
+typedef void (*WebRtcAecScaleErrorSignal)(float mu,
+                                          float error_threshold,
+                                          float x_pow[PART_LEN1],
+                                          float ef[2][PART_LEN1]);
+extern WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal;
+typedef void (*WebRtcAecFilterAdaptation)(
+    const OouraFft& ooura_fft,
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float e_fft[2][PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]);
+extern WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation;
+
+typedef void (*WebRtcAecOverdrive)(float overdrive_scaling,
+                                   const float hNlFb,
+                                   float hNl[PART_LEN1]);
+extern WebRtcAecOverdrive WebRtcAec_Overdrive;
+
+typedef void (*WebRtcAecSuppress)(const float hNl[PART_LEN1],
+                                  float efw[2][PART_LEN1]);
+extern WebRtcAecSuppress WebRtcAec_Suppress;
+
+typedef void (*WebRtcAecComputeCoherence)(const CoherenceState* coherence_state,
+                                          float* cohde,
+                                          float* cohxd);
+extern WebRtcAecComputeCoherence WebRtcAec_ComputeCoherence;
+
+typedef void (*WebRtcAecUpdateCoherenceSpectra)(int mult,
+                                                bool extended_filter_enabled,
+                                                float efw[2][PART_LEN1],
+                                                float dfw[2][PART_LEN1],
+                                                float xfw[2][PART_LEN1],
+                                                CoherenceState* coherence_state,
+                                                short* filter_divergence_state,
+                                                int* extreme_filter_divergence);
+extern WebRtcAecUpdateCoherenceSpectra WebRtcAec_UpdateCoherenceSpectra;
+
+typedef int (*WebRtcAecPartitionDelay)(
+    int num_partitions,
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]);
+extern WebRtcAecPartitionDelay WebRtcAec_PartitionDelay;
+
+typedef void (*WebRtcAecStoreAsComplex)(const float* data,
+                                        float data_complex[2][PART_LEN1]);
+extern WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex;
+
+typedef void (*WebRtcAecWindowData)(float* x_windowed, const float* x);
+extern WebRtcAecWindowData WebRtcAec_WindowData;
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_OPTIMIZED_METHODS_H_
diff --git a/modules/audio_processing/aec/aec_core_sse2.cc b/modules/audio_processing/aec/aec_core_sse2.cc
new file mode 100644
index 0000000..0532662
--- /dev/null
+++ b/modules/audio_processing/aec/aec_core_sse2.cc
@@ -0,0 +1,751 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The core AEC algorithm, SSE2 version of speed-critical functions.
+ */
+
+#include <emmintrin.h>
+#include <math.h>
+#include <string.h>  // memset
+
+extern "C" {
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aec/aec_common.h"
+#include "modules/audio_processing/aec/aec_core_optimized_methods.h"
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+namespace webrtc {
+
+__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
+  return aRe * bRe - aIm * bIm;
+}
+
+__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
+  return aRe * bIm + aIm * bRe;
+}
+
+static void FilterFarSSE2(int num_partitions,
+                          int x_fft_buf_block_pos,
+                          float x_fft_buf[2]
+                                         [kExtendedNumPartitions * PART_LEN1],
+                          float h_fft_buf[2]
+                                         [kExtendedNumPartitions * PART_LEN1],
+                          float y_fft[2][PART_LEN1]) {
+  int i;
+  for (i = 0; i < num_partitions; i++) {
+    int j;
+    int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
+    int pos = i * PART_LEN1;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * (PART_LEN1);
+    }
+
+    // vectorized code (four at once)
+    for (j = 0; j + 3 < PART_LEN1; j += 4) {
+      const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]);
+      const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]);
+      const __m128 h_fft_buf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
+      const __m128 h_fft_buf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
+      const __m128 y_fft_re = _mm_loadu_ps(&y_fft[0][j]);
+      const __m128 y_fft_im = _mm_loadu_ps(&y_fft[1][j]);
+      const __m128 a = _mm_mul_ps(x_fft_buf_re, h_fft_buf_re);
+      const __m128 b = _mm_mul_ps(x_fft_buf_im, h_fft_buf_im);
+      const __m128 c = _mm_mul_ps(x_fft_buf_re, h_fft_buf_im);
+      const __m128 d = _mm_mul_ps(x_fft_buf_im, h_fft_buf_re);
+      const __m128 e = _mm_sub_ps(a, b);
+      const __m128 f = _mm_add_ps(c, d);
+      const __m128 g = _mm_add_ps(y_fft_re, e);
+      const __m128 h = _mm_add_ps(y_fft_im, f);
+      _mm_storeu_ps(&y_fft[0][j], g);
+      _mm_storeu_ps(&y_fft[1][j], h);
+    }
+    // scalar code for the remaining items.
+    for (; j < PART_LEN1; j++) {
+      y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j],
+                           h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]);
+      y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j], x_fft_buf[1][xPos + j],
+                           h_fft_buf[0][pos + j], h_fft_buf[1][pos + j]);
+    }
+  }
+}
+
+static void ScaleErrorSignalSSE2(float mu,
+                                 float error_threshold,
+                                 float x_pow[PART_LEN1],
+                                 float ef[2][PART_LEN1]) {
+  const __m128 k1e_10f = _mm_set1_ps(1e-10f);
+  const __m128 kMu = _mm_set1_ps(mu);
+  const __m128 kThresh = _mm_set1_ps(error_threshold);
+
+  int i;
+  // vectorized code (four at once)
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    const __m128 x_pow_local = _mm_loadu_ps(&x_pow[i]);
+    const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
+    const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
+
+    const __m128 xPowPlus = _mm_add_ps(x_pow_local, k1e_10f);
+    __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus);
+    __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus);
+    const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re);
+    const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im);
+    const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2);
+    const __m128 absEf = _mm_sqrt_ps(ef_sum2);
+    const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh);
+    __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f);
+    const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus);
+    __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv);
+    __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv);
+    ef_re_if = _mm_and_ps(bigger, ef_re_if);
+    ef_im_if = _mm_and_ps(bigger, ef_im_if);
+    ef_re = _mm_andnot_ps(bigger, ef_re);
+    ef_im = _mm_andnot_ps(bigger, ef_im);
+    ef_re = _mm_or_ps(ef_re, ef_re_if);
+    ef_im = _mm_or_ps(ef_im, ef_im_if);
+    ef_re = _mm_mul_ps(ef_re, kMu);
+    ef_im = _mm_mul_ps(ef_im, kMu);
+
+    _mm_storeu_ps(&ef[0][i], ef_re);
+    _mm_storeu_ps(&ef[1][i], ef_im);
+  }
+  // scalar code for the remaining items.
+  {
+    for (; i < (PART_LEN1); i++) {
+      float abs_ef;
+      ef[0][i] /= (x_pow[i] + 1e-10f);
+      ef[1][i] /= (x_pow[i] + 1e-10f);
+      abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
+
+      if (abs_ef > error_threshold) {
+        abs_ef = error_threshold / (abs_ef + 1e-10f);
+        ef[0][i] *= abs_ef;
+        ef[1][i] *= abs_ef;
+      }
+
+      // Stepsize factor
+      ef[0][i] *= mu;
+      ef[1][i] *= mu;
+    }
+  }
+}
+
+static void FilterAdaptationSSE2(
+    const OouraFft& ooura_fft,
+    int num_partitions,
+    int x_fft_buf_block_pos,
+    float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+    float e_fft[2][PART_LEN1],
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+  float fft[PART_LEN2];
+  int i, j;
+  for (i = 0; i < num_partitions; i++) {
+    int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1);
+    int pos = i * PART_LEN1;
+    // Check for wrap
+    if (i + x_fft_buf_block_pos >= num_partitions) {
+      xPos -= num_partitions * PART_LEN1;
+    }
+
+    // Process the whole array...
+    for (j = 0; j < PART_LEN; j += 4) {
+      // Load x_fft_buf and e_fft.
+      const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]);
+      const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]);
+      const __m128 e_fft_re = _mm_loadu_ps(&e_fft[0][j]);
+      const __m128 e_fft_im = _mm_loadu_ps(&e_fft[1][j]);
+      // Calculate the product of conjugate(x_fft_buf) by e_fft.
+      //   re(conjugate(a) * b) = aRe * bRe + aIm * bIm
+      //   im(conjugate(a) * b)=  aRe * bIm - aIm * bRe
+      const __m128 a = _mm_mul_ps(x_fft_buf_re, e_fft_re);
+      const __m128 b = _mm_mul_ps(x_fft_buf_im, e_fft_im);
+      const __m128 c = _mm_mul_ps(x_fft_buf_re, e_fft_im);
+      const __m128 d = _mm_mul_ps(x_fft_buf_im, e_fft_re);
+      const __m128 e = _mm_add_ps(a, b);
+      const __m128 f = _mm_sub_ps(c, d);
+      // Interleave real and imaginary parts.
+      const __m128 g = _mm_unpacklo_ps(e, f);
+      const __m128 h = _mm_unpackhi_ps(e, f);
+      // Store
+      _mm_storeu_ps(&fft[2 * j + 0], g);
+      _mm_storeu_ps(&fft[2 * j + 4], h);
+    }
+    // ... and fixup the first imaginary entry.
+    fft[1] =
+        MulRe(x_fft_buf[0][xPos + PART_LEN], -x_fft_buf[1][xPos + PART_LEN],
+              e_fft[0][PART_LEN], e_fft[1][PART_LEN]);
+
+    ooura_fft.InverseFft(fft);
+    memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
+
+    // fft scaling
+    {
+      float scale = 2.0f / PART_LEN2;
+      const __m128 scale_ps = _mm_load_ps1(&scale);
+      for (j = 0; j < PART_LEN; j += 4) {
+        const __m128 fft_ps = _mm_loadu_ps(&fft[j]);
+        const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps);
+        _mm_storeu_ps(&fft[j], fft_scale);
+      }
+    }
+    ooura_fft.Fft(fft);
+
+    {
+      float wt1 = h_fft_buf[1][pos];
+      h_fft_buf[0][pos + PART_LEN] += fft[1];
+      for (j = 0; j < PART_LEN; j += 4) {
+        __m128 wtBuf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
+        __m128 wtBuf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
+        const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]);
+        const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]);
+        const __m128 fft_re =
+            _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2, 0));
+        const __m128 fft_im =
+            _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3, 1));
+        wtBuf_re = _mm_add_ps(wtBuf_re, fft_re);
+        wtBuf_im = _mm_add_ps(wtBuf_im, fft_im);
+        _mm_storeu_ps(&h_fft_buf[0][pos + j], wtBuf_re);
+        _mm_storeu_ps(&h_fft_buf[1][pos + j], wtBuf_im);
+      }
+      h_fft_buf[1][pos] = wt1;
+    }
+  }
+}
+
+static __m128 mm_pow_ps(__m128 a, __m128 b) {
+  // a^b = exp2(b * log2(a))
+  //   exp2(x) and log2(x) are calculated using polynomial approximations.
+  __m128 log2_a, b_log2_a, a_exp_b;
+
+  // Calculate log2(x), x = a.
+  {
+    // To calculate log2(x), we decompose x like this:
+    //   x = y * 2^n
+    //     n is an integer
+    //     y is in the [1.0, 2.0) range
+    //
+    //   log2(x) = log2(y) + n
+    //     n       can be evaluated by playing with float representation.
+    //     log2(y) in a small range can be approximated, this code uses an order
+    //             five polynomial approximation. The coefficients have been
+    //             estimated with the Remez algorithm and the resulting
+    //             polynomial has a maximum relative error of 0.00086%.
+
+    // Compute n.
+    //    This is done by masking the exponent, shifting it into the top bit of
+    //    the mantissa, putting eight into the biased exponent (to shift/
+    //    compensate the fact that the exponent has been shifted in the top/
+    //    fractional part and finally getting rid of the implicit leading one
+    //    from the mantissa by substracting it out.
+    static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END = {
+        0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
+    static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END = {
+        0x43800000, 0x43800000, 0x43800000, 0x43800000};
+    static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END = {
+        0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000};
+    static const int shift_exponent_into_top_mantissa = 8;
+    const __m128 two_n =
+        _mm_and_ps(a, *(reinterpret_cast<const __m128*>(float_exponent_mask)));
+    const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32(
+        _mm_castps_si128(two_n), shift_exponent_into_top_mantissa));
+    const __m128 n_0 =
+      _mm_or_ps(n_1, *(reinterpret_cast<const __m128*>(eight_biased_exponent)));
+    const __m128 n =
+      _mm_sub_ps(n_0, *(reinterpret_cast<const __m128*>(implicit_leading_one)));
+
+    // Compute y.
+    static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END = {
+        0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF};
+    static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END = {
+        0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000};
+    const __m128 mantissa =
+        _mm_and_ps(a, *(reinterpret_cast<const __m128*>(mantissa_mask)));
+    const __m128 y =
+        _mm_or_ps(mantissa,
+               *(reinterpret_cast<const __m128*>(zero_biased_exponent_is_one)));
+
+    // Approximate log2(y) ~= (y - 1) * pol5(y).
+    //    pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
+    static const ALIGN16_BEG float ALIGN16_END C5[4] = {
+        -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f};
+    static const ALIGN16_BEG float ALIGN16_END C4[4] = {
+        3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f};
+    static const ALIGN16_BEG float ALIGN16_END C3[4] = {
+        -1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f};
+    static const ALIGN16_BEG float ALIGN16_END C2[4] = {2.5988452f, 2.5988452f,
+                                                        2.5988452f, 2.5988452f};
+    static const ALIGN16_BEG float ALIGN16_END C1[4] = {
+        -3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f};
+    static const ALIGN16_BEG float ALIGN16_END C0[4] = {3.1157899f, 3.1157899f,
+                                                        3.1157899f, 3.1157899f};
+    const __m128 pol5_y_0 =
+        _mm_mul_ps(y, *(reinterpret_cast<const __m128*>(C5)));
+    const __m128 pol5_y_1 =
+        _mm_add_ps(pol5_y_0, *(reinterpret_cast<const __m128*>(C4)));
+    const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y);
+    const __m128 pol5_y_3 =
+        _mm_add_ps(pol5_y_2, *(reinterpret_cast<const __m128*>(C3)));
+    const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y);
+    const __m128 pol5_y_5 =
+        _mm_add_ps(pol5_y_4, *(reinterpret_cast<const __m128*>(C2)));
+    const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y);
+    const __m128 pol5_y_7 =
+        _mm_add_ps(pol5_y_6, *(reinterpret_cast<const __m128*>(C1)));
+    const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y);
+    const __m128 pol5_y =
+        _mm_add_ps(pol5_y_8, *(reinterpret_cast<const __m128*>(C0)));
+    const __m128 y_minus_one =
+        _mm_sub_ps(y,
+               *(reinterpret_cast<const __m128*>(zero_biased_exponent_is_one)));
+    const __m128 log2_y = _mm_mul_ps(y_minus_one, pol5_y);
+
+    // Combine parts.
+    log2_a = _mm_add_ps(n, log2_y);
+  }
+
+  // b * log2(a)
+  b_log2_a = _mm_mul_ps(b, log2_a);
+
+  // Calculate exp2(x), x = b * log2(a).
+  {
+    // To calculate 2^x, we decompose x like this:
+    //   x = n + y
+    //     n is an integer, the value of x - 0.5 rounded down, therefore
+    //     y is in the [0.5, 1.5) range
+    //
+    //   2^x = 2^n * 2^y
+    //     2^n can be evaluated by playing with float representation.
+    //     2^y in a small range can be approximated, this code uses an order two
+    //         polynomial approximation. The coefficients have been estimated
+    //         with the Remez algorithm and the resulting polynomial has a
+    //         maximum relative error of 0.17%.
+
+    // To avoid over/underflow, we reduce the range of input to ]-127, 129].
+    static const ALIGN16_BEG float max_input[4] ALIGN16_END = {129.f, 129.f,
+                                                               129.f, 129.f};
+    static const ALIGN16_BEG float min_input[4] ALIGN16_END = {
+        -126.99999f, -126.99999f, -126.99999f, -126.99999f};
+    const __m128 x_min =
+        _mm_min_ps(b_log2_a, *(reinterpret_cast<const __m128*>(max_input)));
+    const __m128 x_max =
+        _mm_max_ps(x_min, *(reinterpret_cast<const __m128*>(min_input)));
+    // Compute n.
+    static const ALIGN16_BEG float half[4] ALIGN16_END = {0.5f, 0.5f, 0.5f,
+                                                          0.5f};
+    const __m128 x_minus_half =
+        _mm_sub_ps(x_max, *(reinterpret_cast<const __m128*>(half)));
+    const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half);
+    // Compute 2^n.
+    static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END = {
+        127, 127, 127, 127};
+    static const int float_exponent_shift = 23;
+    const __m128i two_n_exponent =
+        _mm_add_epi32(x_minus_half_floor,
+                      *(reinterpret_cast<const __m128i*>(float_exponent_bias)));
+    const __m128 two_n =
+        _mm_castsi128_ps(_mm_slli_epi32(two_n_exponent, float_exponent_shift));
+    // Compute y.
+    const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor));
+    // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
+    static const ALIGN16_BEG float C2[4] ALIGN16_END = {
+        3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f};
+    static const ALIGN16_BEG float C1[4] ALIGN16_END = {
+        6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f};
+    static const ALIGN16_BEG float C0[4] ALIGN16_END = {1.0017247f, 1.0017247f,
+                                                        1.0017247f, 1.0017247f};
+    const __m128 exp2_y_0 =
+        _mm_mul_ps(y, *(reinterpret_cast<const __m128*>(C2)));
+    const __m128 exp2_y_1 =
+        _mm_add_ps(exp2_y_0, *(reinterpret_cast<const __m128*>(C1)));
+    const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y);
+    const __m128 exp2_y =
+        _mm_add_ps(exp2_y_2, *(reinterpret_cast<const __m128*>(C0)));
+
+    // Combine parts.
+    a_exp_b = _mm_mul_ps(exp2_y, two_n);
+  }
+  return a_exp_b;
+}
+
+static void OverdriveSSE2(float overdrive_scaling,
+                          float hNlFb,
+                          float hNl[PART_LEN1]) {
+  int i;
+  const __m128 vec_hNlFb = _mm_set1_ps(hNlFb);
+  const __m128 vec_one = _mm_set1_ps(1.0f);
+  const __m128 vec_overdrive_scaling = _mm_set1_ps(overdrive_scaling);
+  // vectorized code (four at once)
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    // Weight subbands
+    __m128 vec_hNl = _mm_loadu_ps(&hNl[i]);
+    const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]);
+    const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb);
+    const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(vec_weightCurve, vec_hNlFb);
+    const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve);
+    const __m128 vec_one_weightCurve_hNl =
+        _mm_mul_ps(vec_one_weightCurve, vec_hNl);
+    const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl);
+    const __m128 vec_if1 = _mm_and_ps(
+        bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl));
+    vec_hNl = _mm_or_ps(vec_if0, vec_if1);
+
+    const __m128 vec_overDriveCurve =
+        _mm_loadu_ps(&WebRtcAec_overDriveCurve[i]);
+    const __m128 vec_overDriveSm_overDriveCurve =
+        _mm_mul_ps(vec_overdrive_scaling, vec_overDriveCurve);
+    vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve);
+    _mm_storeu_ps(&hNl[i], vec_hNl);
+  }
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    // Weight subbands
+    if (hNl[i] > hNlFb) {
+      hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
+               (1 - WebRtcAec_weightCurve[i]) * hNl[i];
+    }
+    hNl[i] = powf(hNl[i], overdrive_scaling * WebRtcAec_overDriveCurve[i]);
+  }
+}
+
+static void SuppressSSE2(const float hNl[PART_LEN1], float efw[2][PART_LEN1]) {
+  int i;
+  const __m128 vec_minus_one = _mm_set1_ps(-1.0f);
+  // vectorized code (four at once)
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    // Suppress error signal
+    __m128 vec_hNl = _mm_loadu_ps(&hNl[i]);
+    __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]);
+    __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]);
+    vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl);
+    vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl);
+
+    // Ooura fft returns incorrect sign on imaginary component. It matters
+    // here because we are making an additive change with comfort noise.
+    vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one);
+    _mm_storeu_ps(&efw[0][i], vec_efw_re);
+    _mm_storeu_ps(&efw[1][i], vec_efw_im);
+  }
+  // scalar code for the remaining items.
+  for (; i < PART_LEN1; i++) {
+    // Suppress error signal
+    efw[0][i] *= hNl[i];
+    efw[1][i] *= hNl[i];
+
+    // Ooura fft returns incorrect sign on imaginary component. It matters
+    // here because we are making an additive change with comfort noise.
+    efw[1][i] *= -1;
+  }
+}
+
+__inline static void _mm_add_ps_4x1(__m128 sum, float* dst) {
+  // A+B C+D
+  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(0, 0, 3, 2)));
+  // A+B+C+D A+B+C+D
+  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 1, 1, 1)));
+  _mm_store_ss(dst, sum);
+}
+
+static int PartitionDelaySSE2(
+    int num_partitions,
+    float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+  // Measures the energy in each filter partition and returns the partition with
+  // highest energy.
+  // TODO(bjornv): Spread computational cost by computing one partition per
+  // block?
+  float wfEnMax = 0;
+  int i;
+  int delay = 0;
+
+  for (i = 0; i < num_partitions; i++) {
+    int j;
+    int pos = i * PART_LEN1;
+    float wfEn = 0;
+    __m128 vec_wfEn = _mm_set1_ps(0.0f);
+    // vectorized code (four at once)
+    for (j = 0; j + 3 < PART_LEN1; j += 4) {
+      const __m128 vec_wfBuf0 = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
+      const __m128 vec_wfBuf1 = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
+      vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf0, vec_wfBuf0));
+      vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf1, vec_wfBuf1));
+    }
+    _mm_add_ps_4x1(vec_wfEn, &wfEn);
+
+    // scalar code for the remaining items.
+    for (; j < PART_LEN1; j++) {
+      wfEn += h_fft_buf[0][pos + j] * h_fft_buf[0][pos + j] +
+              h_fft_buf[1][pos + j] * h_fft_buf[1][pos + j];
+    }
+
+    if (wfEn > wfEnMax) {
+      wfEnMax = wfEn;
+      delay = i;
+    }
+  }
+  return delay;
+}
+
+// Updates the following smoothed  Power Spectral Densities (PSD):
+//  - sd  : near-end
+//  - se  : residual echo
+//  - sx  : far-end
+//  - sde : cross-PSD of near-end and residual echo
+//  - sxd : cross-PSD of near-end and far-end
+//
+// In addition to updating the PSDs, also the filter diverge state is determined
+// upon actions are taken.
+static void UpdateCoherenceSpectraSSE2(int mult,
+                                       bool extended_filter_enabled,
+                                       float efw[2][PART_LEN1],
+                                       float dfw[2][PART_LEN1],
+                                       float xfw[2][PART_LEN1],
+                                       CoherenceState* coherence_state,
+                                       short* filter_divergence_state,
+                                       int* extreme_filter_divergence) {
+  // Power estimate smoothing coefficients.
+  const float* ptrGCoh =
+      extended_filter_enabled
+          ? WebRtcAec_kExtendedSmoothingCoefficients[mult - 1]
+          : WebRtcAec_kNormalSmoothingCoefficients[mult - 1];
+  int i;
+  float sdSum = 0, seSum = 0;
+  const __m128 vec_15 = _mm_set1_ps(WebRtcAec_kMinFarendPSD);
+  const __m128 vec_GCoh0 = _mm_set1_ps(ptrGCoh[0]);
+  const __m128 vec_GCoh1 = _mm_set1_ps(ptrGCoh[1]);
+  __m128 vec_sdSum = _mm_set1_ps(0.0f);
+  __m128 vec_seSum = _mm_set1_ps(0.0f);
+
+  for (i = 0; i + 3 < PART_LEN1; i += 4) {
+    const __m128 vec_dfw0 = _mm_loadu_ps(&dfw[0][i]);
+    const __m128 vec_dfw1 = _mm_loadu_ps(&dfw[1][i]);
+    const __m128 vec_efw0 = _mm_loadu_ps(&efw[0][i]);
+    const __m128 vec_efw1 = _mm_loadu_ps(&efw[1][i]);
+    const __m128 vec_xfw0 = _mm_loadu_ps(&xfw[0][i]);
+    const __m128 vec_xfw1 = _mm_loadu_ps(&xfw[1][i]);
+    __m128 vec_sd =
+        _mm_mul_ps(_mm_loadu_ps(&coherence_state->sd[i]), vec_GCoh0);
+    __m128 vec_se =
+        _mm_mul_ps(_mm_loadu_ps(&coherence_state->se[i]), vec_GCoh0);
+    __m128 vec_sx =
+        _mm_mul_ps(_mm_loadu_ps(&coherence_state->sx[i]), vec_GCoh0);
+    __m128 vec_dfw_sumsq = _mm_mul_ps(vec_dfw0, vec_dfw0);
+    __m128 vec_efw_sumsq = _mm_mul_ps(vec_efw0, vec_efw0);
+    __m128 vec_xfw_sumsq = _mm_mul_ps(vec_xfw0, vec_xfw0);
+    vec_dfw_sumsq = _mm_add_ps(vec_dfw_sumsq, _mm_mul_ps(vec_dfw1, vec_dfw1));
+    vec_efw_sumsq = _mm_add_ps(vec_efw_sumsq, _mm_mul_ps(vec_efw1, vec_efw1));
+    vec_xfw_sumsq = _mm_add_ps(vec_xfw_sumsq, _mm_mul_ps(vec_xfw1, vec_xfw1));
+    vec_xfw_sumsq = _mm_max_ps(vec_xfw_sumsq, vec_15);
+    vec_sd = _mm_add_ps(vec_sd, _mm_mul_ps(vec_dfw_sumsq, vec_GCoh1));
+    vec_se = _mm_add_ps(vec_se, _mm_mul_ps(vec_efw_sumsq, vec_GCoh1));
+    vec_sx = _mm_add_ps(vec_sx, _mm_mul_ps(vec_xfw_sumsq, vec_GCoh1));
+    _mm_storeu_ps(&coherence_state->sd[i], vec_sd);
+    _mm_storeu_ps(&coherence_state->se[i], vec_se);
+    _mm_storeu_ps(&coherence_state->sx[i], vec_sx);
+
+    {
+      const __m128 vec_3210 = _mm_loadu_ps(&coherence_state->sde[i][0]);
+      const __m128 vec_7654 = _mm_loadu_ps(&coherence_state->sde[i + 2][0]);
+      __m128 vec_a =
+          _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(2, 0, 2, 0));
+      __m128 vec_b =
+          _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(3, 1, 3, 1));
+      __m128 vec_dfwefw0011 = _mm_mul_ps(vec_dfw0, vec_efw0);
+      __m128 vec_dfwefw0110 = _mm_mul_ps(vec_dfw0, vec_efw1);
+      vec_a = _mm_mul_ps(vec_a, vec_GCoh0);
+      vec_b = _mm_mul_ps(vec_b, vec_GCoh0);
+      vec_dfwefw0011 =
+          _mm_add_ps(vec_dfwefw0011, _mm_mul_ps(vec_dfw1, vec_efw1));
+      vec_dfwefw0110 =
+          _mm_sub_ps(vec_dfwefw0110, _mm_mul_ps(vec_dfw1, vec_efw0));
+      vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwefw0011, vec_GCoh1));
+      vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwefw0110, vec_GCoh1));
+      _mm_storeu_ps(&coherence_state->sde[i][0], _mm_unpacklo_ps(vec_a, vec_b));
+      _mm_storeu_ps(&coherence_state->sde[i + 2][0],
+                    _mm_unpackhi_ps(vec_a, vec_b));
+    }
+
+    {
+      const __m128 vec_3210 = _mm_loadu_ps(&coherence_state->sxd[i][0]);
+      const __m128 vec_7654 = _mm_loadu_ps(&coherence_state->sxd[i + 2][0]);
+      __m128 vec_a =
+          _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(2, 0, 2, 0));
+      __m128 vec_b =
+          _mm_shuffle_ps(vec_3210, vec_7654, _MM_SHUFFLE(3, 1, 3, 1));
+      __m128 vec_dfwxfw0011 = _mm_mul_ps(vec_dfw0, vec_xfw0);
+      __m128 vec_dfwxfw0110 = _mm_mul_ps(vec_dfw0, vec_xfw1);
+      vec_a = _mm_mul_ps(vec_a, vec_GCoh0);
+      vec_b = _mm_mul_ps(vec_b, vec_GCoh0);
+      vec_dfwxfw0011 =
+          _mm_add_ps(vec_dfwxfw0011, _mm_mul_ps(vec_dfw1, vec_xfw1));
+      vec_dfwxfw0110 =
+          _mm_sub_ps(vec_dfwxfw0110, _mm_mul_ps(vec_dfw1, vec_xfw0));
+      vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwxfw0011, vec_GCoh1));
+      vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwxfw0110, vec_GCoh1));
+      _mm_storeu_ps(&coherence_state->sxd[i][0], _mm_unpacklo_ps(vec_a, vec_b));
+      _mm_storeu_ps(&coherence_state->sxd[i + 2][0],
+                    _mm_unpackhi_ps(vec_a, vec_b));
+    }
+
+    vec_sdSum = _mm_add_ps(vec_sdSum, vec_sd);
+    vec_seSum = _mm_add_ps(vec_seSum, vec_se);
+  }
+
+  _mm_add_ps_4x1(vec_sdSum, &sdSum);
+  _mm_add_ps_4x1(vec_seSum, &seSum);
+
+  for (; i < PART_LEN1; i++) {
+    coherence_state->sd[i] =
+        ptrGCoh[0] * coherence_state->sd[i] +
+        ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
+    coherence_state->se[i] =
+        ptrGCoh[0] * coherence_state->se[i] +
+        ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
+    // We threshold here to protect against the ill-effects of a zero farend.
+    // The threshold is not arbitrarily chosen, but balances protection and
+    // adverse interaction with the algorithm's tuning.
+    // TODO(bjornv): investigate further why this is so sensitive.
+    coherence_state->sx[i] =
+        ptrGCoh[0] * coherence_state->sx[i] +
+        ptrGCoh[1] *
+            WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
+                           WebRtcAec_kMinFarendPSD);
+
+    coherence_state->sde[i][0] =
+        ptrGCoh[0] * coherence_state->sde[i][0] +
+        ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
+    coherence_state->sde[i][1] =
+        ptrGCoh[0] * coherence_state->sde[i][1] +
+        ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
+
+    coherence_state->sxd[i][0] =
+        ptrGCoh[0] * coherence_state->sxd[i][0] +
+        ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
+    coherence_state->sxd[i][1] =
+        ptrGCoh[0] * coherence_state->sxd[i][1] +
+        ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
+
+    sdSum += coherence_state->sd[i];
+    seSum += coherence_state->se[i];
+  }
+
+  // Divergent filter safeguard update.
+  *filter_divergence_state =
+      (*filter_divergence_state ? 1.05f : 1.0f) * seSum > sdSum;
+
+  // Signal extreme filter divergence if the error is significantly larger
+  // than the nearend (13 dB).
+  *extreme_filter_divergence = (seSum > (19.95f * sdSum));
+}
+
+// Window time domain data to be used by the fft.
+static void WindowDataSSE2(float* x_windowed, const float* x) {
+  int i;
+  for (i = 0; i < PART_LEN; i += 4) {
+    const __m128 vec_Buf1 = _mm_loadu_ps(&x[i]);
+    const __m128 vec_Buf2 = _mm_loadu_ps(&x[PART_LEN + i]);
+    const __m128 vec_sqrtHanning = _mm_load_ps(&WebRtcAec_sqrtHanning[i]);
+    // A B C D
+    __m128 vec_sqrtHanning_rev =
+        _mm_loadu_ps(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]);
+    // D C B A
+    vec_sqrtHanning_rev = _mm_shuffle_ps(
+        vec_sqrtHanning_rev, vec_sqrtHanning_rev, _MM_SHUFFLE(0, 1, 2, 3));
+    _mm_storeu_ps(&x_windowed[i], _mm_mul_ps(vec_Buf1, vec_sqrtHanning));
+    _mm_storeu_ps(&x_windowed[PART_LEN + i],
+                  _mm_mul_ps(vec_Buf2, vec_sqrtHanning_rev));
+  }
+}
+
+// Puts fft output data into a complex valued array.
+static void StoreAsComplexSSE2(const float* data,
+                               float data_complex[2][PART_LEN1]) {
+  int i;
+  for (i = 0; i < PART_LEN; i += 4) {
+    const __m128 vec_fft0 = _mm_loadu_ps(&data[2 * i]);
+    const __m128 vec_fft4 = _mm_loadu_ps(&data[2 * i + 4]);
+    const __m128 vec_a =
+        _mm_shuffle_ps(vec_fft0, vec_fft4, _MM_SHUFFLE(2, 0, 2, 0));
+    const __m128 vec_b =
+        _mm_shuffle_ps(vec_fft0, vec_fft4, _MM_SHUFFLE(3, 1, 3, 1));
+    _mm_storeu_ps(&data_complex[0][i], vec_a);
+    _mm_storeu_ps(&data_complex[1][i], vec_b);
+  }
+  // fix beginning/end values
+  data_complex[1][0] = 0;
+  data_complex[1][PART_LEN] = 0;
+  data_complex[0][0] = data[0];
+  data_complex[0][PART_LEN] = data[1];
+}
+
+static void ComputeCoherenceSSE2(const CoherenceState* coherence_state,
+                                 float* cohde,
+                                 float* cohxd) {
+  int i;
+
+  {
+    const __m128 vec_1eminus10 = _mm_set1_ps(1e-10f);
+
+    // Subband coherence
+    for (i = 0; i + 3 < PART_LEN1; i += 4) {
+      const __m128 vec_sd = _mm_loadu_ps(&coherence_state->sd[i]);
+      const __m128 vec_se = _mm_loadu_ps(&coherence_state->se[i]);
+      const __m128 vec_sx = _mm_loadu_ps(&coherence_state->sx[i]);
+      const __m128 vec_sdse =
+          _mm_add_ps(vec_1eminus10, _mm_mul_ps(vec_sd, vec_se));
+      const __m128 vec_sdsx =
+          _mm_add_ps(vec_1eminus10, _mm_mul_ps(vec_sd, vec_sx));
+      const __m128 vec_sde_3210 = _mm_loadu_ps(&coherence_state->sde[i][0]);
+      const __m128 vec_sde_7654 = _mm_loadu_ps(&coherence_state->sde[i + 2][0]);
+      const __m128 vec_sxd_3210 = _mm_loadu_ps(&coherence_state->sxd[i][0]);
+      const __m128 vec_sxd_7654 = _mm_loadu_ps(&coherence_state->sxd[i + 2][0]);
+      const __m128 vec_sde_0 =
+          _mm_shuffle_ps(vec_sde_3210, vec_sde_7654, _MM_SHUFFLE(2, 0, 2, 0));
+      const __m128 vec_sde_1 =
+          _mm_shuffle_ps(vec_sde_3210, vec_sde_7654, _MM_SHUFFLE(3, 1, 3, 1));
+      const __m128 vec_sxd_0 =
+          _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654, _MM_SHUFFLE(2, 0, 2, 0));
+      const __m128 vec_sxd_1 =
+          _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654, _MM_SHUFFLE(3, 1, 3, 1));
+      __m128 vec_cohde = _mm_mul_ps(vec_sde_0, vec_sde_0);
+      __m128 vec_cohxd = _mm_mul_ps(vec_sxd_0, vec_sxd_0);
+      vec_cohde = _mm_add_ps(vec_cohde, _mm_mul_ps(vec_sde_1, vec_sde_1));
+      vec_cohde = _mm_div_ps(vec_cohde, vec_sdse);
+      vec_cohxd = _mm_add_ps(vec_cohxd, _mm_mul_ps(vec_sxd_1, vec_sxd_1));
+      vec_cohxd = _mm_div_ps(vec_cohxd, vec_sdsx);
+      _mm_storeu_ps(&cohde[i], vec_cohde);
+      _mm_storeu_ps(&cohxd[i], vec_cohxd);
+    }
+
+    // scalar code for the remaining items.
+    for (; i < PART_LEN1; i++) {
+      cohde[i] = (coherence_state->sde[i][0] * coherence_state->sde[i][0] +
+                  coherence_state->sde[i][1] * coherence_state->sde[i][1]) /
+                 (coherence_state->sd[i] * coherence_state->se[i] + 1e-10f);
+      cohxd[i] = (coherence_state->sxd[i][0] * coherence_state->sxd[i][0] +
+                  coherence_state->sxd[i][1] * coherence_state->sxd[i][1]) /
+                 (coherence_state->sx[i] * coherence_state->sd[i] + 1e-10f);
+    }
+  }
+}
+
+void WebRtcAec_InitAec_SSE2(void) {
+  WebRtcAec_FilterFar = FilterFarSSE2;
+  WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
+  WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
+  WebRtcAec_Overdrive = OverdriveSSE2;
+  WebRtcAec_Suppress = SuppressSSE2;
+  WebRtcAec_ComputeCoherence = ComputeCoherenceSSE2;
+  WebRtcAec_UpdateCoherenceSpectra = UpdateCoherenceSpectraSSE2;
+  WebRtcAec_StoreAsComplex = StoreAsComplexSSE2;
+  WebRtcAec_PartitionDelay = PartitionDelaySSE2;
+  WebRtcAec_WindowData = WindowDataSSE2;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/aec_resampler.cc b/modules/audio_processing/aec/aec_resampler.cc
new file mode 100644
index 0000000..2851c0b
--- /dev/null
+++ b/modules/audio_processing/aec/aec_resampler.cc
@@ -0,0 +1,207 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* Resamples a signal to an arbitrary rate. Used by the AEC to compensate for
+ * clock skew by resampling the farend signal.
+ */
+
+#include "modules/audio_processing/aec/aec_resampler.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_processing/aec/aec_core.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+enum { kEstimateLengthFrames = 400 };
+
+typedef struct {
+  float buffer[kResamplerBufferSize];
+  float position;
+
+  int deviceSampleRateHz;
+  int skewData[kEstimateLengthFrames];
+  int skewDataIndex;
+  float skewEstimate;
+} AecResampler;
+
+static int EstimateSkew(const int* rawSkew,
+                        int size,
+                        int deviceSampleRateHz,
+                        float* skewEst);
+
+void* WebRtcAec_CreateResampler() {
+  return malloc(sizeof(AecResampler));
+}
+
+int WebRtcAec_InitResampler(void* resampInst, int deviceSampleRateHz) {
+  AecResampler* obj = static_cast<AecResampler*>(resampInst);
+  memset(obj->buffer, 0, sizeof(obj->buffer));
+  obj->position = 0.0;
+
+  obj->deviceSampleRateHz = deviceSampleRateHz;
+  memset(obj->skewData, 0, sizeof(obj->skewData));
+  obj->skewDataIndex = 0;
+  obj->skewEstimate = 0.0;
+
+  return 0;
+}
+
+void WebRtcAec_FreeResampler(void* resampInst) {
+  AecResampler* obj = static_cast<AecResampler*>(resampInst);
+  free(obj);
+}
+
+void WebRtcAec_ResampleLinear(void* resampInst,
+                              const float* inspeech,
+                              size_t size,
+                              float skew,
+                              float* outspeech,
+                              size_t* size_out) {
+  AecResampler* obj = static_cast<AecResampler*>(resampInst);
+
+  float* y;
+  float be, tnew;
+  size_t tn, mm;
+
+  RTC_DCHECK_LE(size, 2 * FRAME_LEN);
+  RTC_DCHECK(resampInst);
+  RTC_DCHECK(inspeech);
+  RTC_DCHECK(outspeech);
+  RTC_DCHECK(size_out);
+
+  // Add new frame data in lookahead
+  memcpy(&obj->buffer[FRAME_LEN + kResamplingDelay], inspeech,
+         size * sizeof(inspeech[0]));
+
+  // Sample rate ratio
+  be = 1 + skew;
+
+  // Loop over input frame
+  mm = 0;
+  y = &obj->buffer[FRAME_LEN];  // Point at current frame
+
+  tnew = be * mm + obj->position;
+  tn = (size_t)tnew;
+
+  while (tn < size) {
+    // Interpolation
+    outspeech[mm] = y[tn] + (tnew - tn) * (y[tn + 1] - y[tn]);
+    mm++;
+
+    tnew = be * mm + obj->position;
+    tn = static_cast<int>(tnew);
+  }
+
+  *size_out = mm;
+  obj->position += (*size_out) * be - size;
+
+  // Shift buffer
+  memmove(obj->buffer, &obj->buffer[size],
+          (kResamplerBufferSize - size) * sizeof(obj->buffer[0]));
+}
+
+int WebRtcAec_GetSkew(void* resampInst, int rawSkew, float* skewEst) {
+  AecResampler* obj = static_cast<AecResampler*>(resampInst);
+  int err = 0;
+
+  if (obj->skewDataIndex < kEstimateLengthFrames) {
+    obj->skewData[obj->skewDataIndex] = rawSkew;
+    obj->skewDataIndex++;
+  } else if (obj->skewDataIndex == kEstimateLengthFrames) {
+    err = EstimateSkew(obj->skewData, kEstimateLengthFrames,
+                       obj->deviceSampleRateHz, skewEst);
+    obj->skewEstimate = *skewEst;
+    obj->skewDataIndex++;
+  } else {
+    *skewEst = obj->skewEstimate;
+  }
+
+  return err;
+}
+
+int EstimateSkew(const int* rawSkew,
+                 int size,
+                 int deviceSampleRateHz,
+                 float* skewEst) {
+  const int absLimitOuter = static_cast<int>(0.04f * deviceSampleRateHz);
+  const int absLimitInner = static_cast<int>(0.0025f * deviceSampleRateHz);
+  int i = 0;
+  int n = 0;
+  float rawAvg = 0;
+  float err = 0;
+  float rawAbsDev = 0;
+  int upperLimit = 0;
+  int lowerLimit = 0;
+  float cumSum = 0;
+  float x = 0;
+  float x2 = 0;
+  float y = 0;
+  float xy = 0;
+  float xAvg = 0;
+  float denom = 0;
+  float skew = 0;
+
+  *skewEst = 0;  // Set in case of error below.
+  for (i = 0; i < size; i++) {
+    if ((rawSkew[i] < absLimitOuter && rawSkew[i] > -absLimitOuter)) {
+      n++;
+      rawAvg += rawSkew[i];
+    }
+  }
+
+  if (n == 0) {
+    return -1;
+  }
+  RTC_DCHECK_GT(n, 0);
+  rawAvg /= n;
+
+  for (i = 0; i < size; i++) {
+    if ((rawSkew[i] < absLimitOuter && rawSkew[i] > -absLimitOuter)) {
+      err = rawSkew[i] - rawAvg;
+      rawAbsDev += err >= 0 ? err : -err;
+    }
+  }
+  RTC_DCHECK_GT(n, 0);
+  rawAbsDev /= n;
+  upperLimit = static_cast<int>(rawAvg + 5 * rawAbsDev + 1);  // +1 for ceiling.
+  lowerLimit = static_cast<int>(rawAvg - 5 * rawAbsDev - 1);  // -1 for floor.
+
+  n = 0;
+  for (i = 0; i < size; i++) {
+    if ((rawSkew[i] < absLimitInner && rawSkew[i] > -absLimitInner) ||
+        (rawSkew[i] < upperLimit && rawSkew[i] > lowerLimit)) {
+      n++;
+      cumSum += rawSkew[i];
+      x += n;
+      x2 += n * n;
+      y += cumSum;
+      xy += n * cumSum;
+    }
+  }
+
+  if (n == 0) {
+    return -1;
+  }
+  RTC_DCHECK_GT(n, 0);
+  xAvg = x / n;
+  denom = x2 - xAvg * x;
+
+  if (denom != 0) {
+    skew = (xy - xAvg * y) / denom;
+  }
+
+  *skewEst = skew;
+  return 0;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/aec_resampler.h b/modules/audio_processing/aec/aec_resampler.h
new file mode 100644
index 0000000..130f7ec
--- /dev/null
+++ b/modules/audio_processing/aec/aec_resampler.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
+#define MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
+
+#include "modules/audio_processing/aec/aec_core.h"
+
+namespace webrtc {
+
+enum { kResamplingDelay = 1 };
+enum { kResamplerBufferSize = FRAME_LEN * 4 };
+
+// Unless otherwise specified, functions return 0 on success and -1 on error.
+void* WebRtcAec_CreateResampler();  // Returns NULL on error.
+int WebRtcAec_InitResampler(void* resampInst, int deviceSampleRateHz);
+void WebRtcAec_FreeResampler(void* resampInst);
+
+// Estimates skew from raw measurement.
+int WebRtcAec_GetSkew(void* resampInst, int rawSkew, float* skewEst);
+
+// Resamples input using linear interpolation.
+void WebRtcAec_ResampleLinear(void* resampInst,
+                              const float* inspeech,
+                              size_t size,
+                              float skew,
+                              float* outspeech,
+                              size_t* size_out);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
diff --git a/modules/audio_processing/aec/echo_cancellation.cc b/modules/audio_processing/aec/echo_cancellation.cc
new file mode 100644
index 0000000..864db53
--- /dev/null
+++ b/modules/audio_processing/aec/echo_cancellation.cc
@@ -0,0 +1,868 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Contains the API functions for the AEC.
+ */
+#include "modules/audio_processing/aec/echo_cancellation.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aec/aec_core.h"
+#include "modules/audio_processing/aec/aec_resampler.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+Aec::Aec() = default;
+Aec::~Aec() = default;
+
+// Measured delays [ms]
+// Device                Chrome  GTP
+// MacBook Air           10
+// MacBook Retina        10      100
+// MacPro                30?
+//
+// Win7 Desktop          70      80?
+// Win7 T430s            110
+// Win8 T420s            70
+//
+// Daisy                 50
+// Pixel (w/ preproc?)           240
+// Pixel (w/o preproc?)  110     110
+
+// The extended filter mode gives us the flexibility to ignore the system's
+// reported delays. We do this for platforms which we believe provide results
+// which are incompatible with the AEC's expectations. Based on measurements
+// (some provided above) we set a conservative (i.e. lower than measured)
+// fixed delay.
+//
+// WEBRTC_UNTRUSTED_DELAY will only have an impact when |extended_filter_mode|
+// is enabled. See the note along with |DelayCorrection| in
+// echo_cancellation_impl.h for more details on the mode.
+//
+// Justification:
+// Chromium/Mac: Here, the true latency is so low (~10-20 ms), that it plays
+// havoc with the AEC's buffering. To avoid this, we set a fixed delay of 20 ms
+// and then compensate by rewinding by 10 ms (in wideband) through
+// kDelayDiffOffsetSamples. This trick does not seem to work for larger rewind
+// values, but fortunately this is sufficient.
+//
+// Chromium/Linux(ChromeOS): The values we get on this platform don't correspond
+// well to reality. The variance doesn't match the AEC's buffer changes, and the
+// bulk values tend to be too low. However, the range across different hardware
+// appears to be too large to choose a single value.
+//
+// GTP/Linux(ChromeOS): TBD, but for the moment we will trust the values.
+#if defined(WEBRTC_CHROMIUM_BUILD) && defined(WEBRTC_MAC)
+#define WEBRTC_UNTRUSTED_DELAY
+#endif
+
+#if defined(WEBRTC_UNTRUSTED_DELAY) && defined(WEBRTC_MAC)
+static const int kDelayDiffOffsetSamples = -160;
+#else
+// Not enabled for now.
+static const int kDelayDiffOffsetSamples = 0;
+#endif
+
+#if defined(WEBRTC_MAC)
+static const int kFixedDelayMs = 20;
+#else
+static const int kFixedDelayMs = 50;
+#endif
+#if !defined(WEBRTC_UNTRUSTED_DELAY)
+static const int kMinTrustedDelayMs = 20;
+#endif
+static const int kMaxTrustedDelayMs = 500;
+
+// Maximum length of resampled signal. Must be an integer multiple of frames
+// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN
+// The factor of 2 handles wb, and the + 1 is as a safety margin
+// TODO(bjornv): Replace with kResamplerBufferSize
+#define MAX_RESAMP_LEN (5 * FRAME_LEN)
+
+static const int kMaxBufSizeStart = 62;  // In partitions
+static const int sampMsNb = 8;           // samples per ms in nb
+static const int initCheck = 42;
+
+int Aec::instance_count = 0;
+
+// Estimates delay to set the position of the far-end buffer read pointer
+// (controlled by knownDelay)
+static void EstBufDelayNormal(Aec* aecInst);
+static void EstBufDelayExtended(Aec* aecInst);
+static int ProcessNormal(Aec* aecInst,
+                         const float* const* nearend,
+                         size_t num_bands,
+                         float* const* out,
+                         size_t num_samples,
+                         int16_t reported_delay_ms,
+                         int32_t skew);
+static void ProcessExtended(Aec* aecInst,
+                            const float* const* nearend,
+                            size_t num_bands,
+                            float* const* out,
+                            size_t num_samples,
+                            int16_t reported_delay_ms,
+                            int32_t skew);
+
+void* WebRtcAec_Create() {
+  Aec* aecpc = new Aec();
+
+  if (!aecpc) {
+    return NULL;
+  }
+  aecpc->data_dumper.reset(new ApmDataDumper(aecpc->instance_count));
+
+  aecpc->aec = WebRtcAec_CreateAec(aecpc->instance_count);
+  if (!aecpc->aec) {
+    WebRtcAec_Free(aecpc);
+    return NULL;
+  }
+  aecpc->resampler = WebRtcAec_CreateResampler();
+  if (!aecpc->resampler) {
+    WebRtcAec_Free(aecpc);
+    return NULL;
+  }
+  // Create far-end pre-buffer. The buffer size has to be large enough for
+  // largest possible drift compensation (kResamplerBufferSize) + "almost" an
+  // FFT buffer (PART_LEN2 - 1).
+  aecpc->far_pre_buf =
+      WebRtc_CreateBuffer(PART_LEN2 + kResamplerBufferSize, sizeof(float));
+  if (!aecpc->far_pre_buf) {
+    WebRtcAec_Free(aecpc);
+    return NULL;
+  }
+
+  aecpc->initFlag = 0;
+
+  aecpc->instance_count++;
+  return aecpc;
+}
+
+void WebRtcAec_Free(void* aecInst) {
+  Aec* aecpc = reinterpret_cast<Aec*>(aecInst);
+
+  if (aecpc == NULL) {
+    return;
+  }
+
+  WebRtc_FreeBuffer(aecpc->far_pre_buf);
+
+  WebRtcAec_FreeAec(aecpc->aec);
+  WebRtcAec_FreeResampler(aecpc->resampler);
+  delete aecpc;
+}
+
+int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq) {
+  Aec* aecpc = reinterpret_cast<Aec*>(aecInst);
+  aecpc->data_dumper->InitiateNewSetOfRecordings();
+  AecConfig aecConfig;
+
+  if (sampFreq != 8000 && sampFreq != 16000 && sampFreq != 32000 &&
+      sampFreq != 48000) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+  aecpc->sampFreq = sampFreq;
+
+  if (scSampFreq < 1 || scSampFreq > 96000) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+  aecpc->scSampFreq = scSampFreq;
+
+  // Initialize echo canceller core
+  if (WebRtcAec_InitAec(aecpc->aec, aecpc->sampFreq) == -1) {
+    return AEC_UNSPECIFIED_ERROR;
+  }
+
+  if (WebRtcAec_InitResampler(aecpc->resampler, aecpc->scSampFreq) == -1) {
+    return AEC_UNSPECIFIED_ERROR;
+  }
+
+  WebRtc_InitBuffer(aecpc->far_pre_buf);
+  WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);  // Start overlap.
+
+  aecpc->initFlag = initCheck;  // indicates that initialization has been done
+
+  if (aecpc->sampFreq == 32000 || aecpc->sampFreq == 48000) {
+    aecpc->splitSampFreq = 16000;
+  } else {
+    aecpc->splitSampFreq = sampFreq;
+  }
+
+  aecpc->delayCtr = 0;
+  aecpc->sampFactor = (aecpc->scSampFreq * 1.0f) / aecpc->splitSampFreq;
+  // Sampling frequency multiplier (SWB is processed as 160 frame size).
+  aecpc->rate_factor = aecpc->splitSampFreq / 8000;
+
+  aecpc->sum = 0;
+  aecpc->counter = 0;
+  aecpc->checkBuffSize = 1;
+  aecpc->firstVal = 0;
+
+  // We skip the startup_phase completely (setting to 0) if DA-AEC is enabled,
+  // but not extended_filter mode.
+  aecpc->startup_phase = WebRtcAec_extended_filter_enabled(aecpc->aec) ||
+                         !WebRtcAec_delay_agnostic_enabled(aecpc->aec);
+  aecpc->bufSizeStart = 0;
+  aecpc->checkBufSizeCtr = 0;
+  aecpc->msInSndCardBuf = 0;
+  aecpc->filtDelay = -1;  // -1 indicates an initialized state.
+  aecpc->timeForDelayChange = 0;
+  aecpc->knownDelay = 0;
+  aecpc->lastDelayDiff = 0;
+
+  aecpc->skewFrCtr = 0;
+  aecpc->resample = kAecFalse;
+  aecpc->highSkewCtr = 0;
+  aecpc->skew = 0;
+
+  aecpc->farend_started = 0;
+
+  // Default settings.
+  aecConfig.nlpMode = kAecNlpModerate;
+  aecConfig.skewMode = kAecFalse;
+  aecConfig.metricsMode = kAecFalse;
+  aecConfig.delay_logging = kAecFalse;
+
+  if (WebRtcAec_set_config(aecpc, aecConfig) == -1) {
+    return AEC_UNSPECIFIED_ERROR;
+  }
+
+  return 0;
+}
+
+// Returns any error that is caused when buffering the
+// far-end signal.
+int32_t WebRtcAec_GetBufferFarendError(void* aecInst,
+                                       const float* farend,
+                                       size_t nrOfSamples) {
+  Aec* aecpc = reinterpret_cast<Aec*>(aecInst);
+
+  if (!farend)
+    return AEC_NULL_POINTER_ERROR;
+
+  if (aecpc->initFlag != initCheck)
+    return AEC_UNINITIALIZED_ERROR;
+
+  // number of samples == 160 for SWB input
+  if (nrOfSamples != 80 && nrOfSamples != 160)
+    return AEC_BAD_PARAMETER_ERROR;
+
+  return 0;
+}
+
+// only buffer L band for farend
+int32_t WebRtcAec_BufferFarend(void* aecInst,
+                               const float* farend,
+                               size_t nrOfSamples) {
+  Aec* aecpc = reinterpret_cast<Aec*>(aecInst);
+  size_t newNrOfSamples = nrOfSamples;
+  float new_farend[MAX_RESAMP_LEN];
+  const float* farend_ptr = farend;
+
+  // Get any error caused by buffering the farend signal.
+  int32_t error_code =
+      WebRtcAec_GetBufferFarendError(aecInst, farend, nrOfSamples);
+
+  if (error_code != 0)
+    return error_code;
+
+  if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
+    // Resample and get a new number of samples
+    WebRtcAec_ResampleLinear(aecpc->resampler, farend, nrOfSamples, aecpc->skew,
+                             new_farend, &newNrOfSamples);
+    farend_ptr = new_farend;
+  }
+
+  aecpc->farend_started = 1;
+  WebRtcAec_SetSystemDelay(aecpc->aec, WebRtcAec_system_delay(aecpc->aec) +
+                           static_cast<int>(newNrOfSamples));
+
+  // Write the time-domain data to |far_pre_buf|.
+  WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, newNrOfSamples);
+
+  // TODO(minyue): reduce to |PART_LEN| samples for each buffering.
+  while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
+    // We have enough data to pass to the FFT, hence read PART_LEN2 samples.
+    {
+      float* ptmp = NULL;
+      float tmp[PART_LEN2];
+      WebRtc_ReadBuffer(aecpc->far_pre_buf,
+                        reinterpret_cast<void**>(&ptmp), tmp, PART_LEN2);
+      WebRtcAec_BufferFarendBlock(aecpc->aec, &ptmp[PART_LEN]);
+    }
+
+    // Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
+    WebRtc_MoveReadPtr(aecpc->far_pre_buf, -PART_LEN);
+  }
+
+  return 0;
+}
+
+int32_t WebRtcAec_Process(void* aecInst,
+                          const float* const* nearend,
+                          size_t num_bands,
+                          float* const* out,
+                          size_t nrOfSamples,
+                          int16_t msInSndCardBuf,
+                          int32_t skew) {
+  Aec* aecpc = reinterpret_cast<Aec*>(aecInst);
+  int32_t retVal = 0;
+
+  if (out == NULL) {
+    return AEC_NULL_POINTER_ERROR;
+  }
+
+  if (aecpc->initFlag != initCheck) {
+    return AEC_UNINITIALIZED_ERROR;
+  }
+
+  // number of samples == 160 for SWB input
+  if (nrOfSamples != 80 && nrOfSamples != 160) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+
+  if (msInSndCardBuf < 0) {
+    msInSndCardBuf = 0;
+    retVal = AEC_BAD_PARAMETER_WARNING;
+  } else if (msInSndCardBuf > kMaxTrustedDelayMs) {
+    // The clamping is now done in ProcessExtended/Normal().
+    retVal = AEC_BAD_PARAMETER_WARNING;
+  }
+
+  // This returns the value of aec->extended_filter_enabled.
+  if (WebRtcAec_extended_filter_enabled(aecpc->aec)) {
+    ProcessExtended(aecpc, nearend, num_bands, out, nrOfSamples, msInSndCardBuf,
+                    skew);
+  } else {
+    retVal = ProcessNormal(aecpc, nearend, num_bands, out, nrOfSamples,
+                           msInSndCardBuf, skew);
+  }
+
+  int far_buf_size_samples = WebRtcAec_system_delay(aecpc->aec);
+  aecpc->data_dumper->DumpRaw("aec_system_delay", 1, &far_buf_size_samples);
+  aecpc->data_dumper->DumpRaw("aec_known_delay", 1, &aecpc->knownDelay);
+
+  return retVal;
+}
+
+int WebRtcAec_set_config(void* handle, AecConfig config) {
+  Aec* self = reinterpret_cast<Aec*>(handle);
+  if (self->initFlag != initCheck) {
+    return AEC_UNINITIALIZED_ERROR;
+  }
+
+  if (config.skewMode != kAecFalse && config.skewMode != kAecTrue) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+  self->skewMode = config.skewMode;
+
+  if (config.nlpMode != kAecNlpConservative &&
+      config.nlpMode != kAecNlpModerate &&
+      config.nlpMode != kAecNlpAggressive) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+
+  if (config.metricsMode != kAecFalse && config.metricsMode != kAecTrue) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+
+  if (config.delay_logging != kAecFalse && config.delay_logging != kAecTrue) {
+    return AEC_BAD_PARAMETER_ERROR;
+  }
+
+  WebRtcAec_SetConfigCore(self->aec, config.nlpMode, config.metricsMode,
+                          config.delay_logging);
+  return 0;
+}
+
+int WebRtcAec_get_echo_status(void* handle, int* status) {
+  Aec* self = reinterpret_cast<Aec*>(handle);
+  if (status == NULL) {
+    return AEC_NULL_POINTER_ERROR;
+  }
+  if (self->initFlag != initCheck) {
+    return AEC_UNINITIALIZED_ERROR;
+  }
+
+  *status = WebRtcAec_echo_state(self->aec);
+
+  return 0;
+}
+
+int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics) {
+  const float kUpWeight = 0.7f;
+  float dtmp;
+  int stmp;
+  Aec* self = reinterpret_cast<Aec*>(handle);
+  Stats erl;
+  Stats erle;
+  Stats a_nlp;
+
+  if (handle == NULL) {
+    return -1;
+  }
+  if (metrics == NULL) {
+    return AEC_NULL_POINTER_ERROR;
+  }
+  if (self->initFlag != initCheck) {
+    return AEC_UNINITIALIZED_ERROR;
+  }
+
+  WebRtcAec_GetEchoStats(self->aec, &erl, &erle, &a_nlp,
+                         &metrics->divergent_filter_fraction);
+
+  // ERL
+  metrics->erl.instant = static_cast<int>(erl.instant);
+
+  if ((erl.himean > kOffsetLevel) && (erl.average > kOffsetLevel)) {
+    // Use a mix between regular average and upper part average.
+    dtmp = kUpWeight * erl.himean + (1 - kUpWeight) * erl.average;
+    metrics->erl.average = static_cast<int>(dtmp);
+  } else {
+    metrics->erl.average = kOffsetLevel;
+  }
+
+  metrics->erl.max = static_cast<int>(erl.max);
+
+  if (erl.min < (kOffsetLevel * (-1))) {
+    metrics->erl.min = static_cast<int>(erl.min);
+  } else {
+    metrics->erl.min = kOffsetLevel;
+  }
+
+  // ERLE
+  metrics->erle.instant = static_cast<int>(erle.instant);
+
+  if ((erle.himean > kOffsetLevel) && (erle.average > kOffsetLevel)) {
+    // Use a mix between regular average and upper part average.
+    dtmp = kUpWeight * erle.himean + (1 - kUpWeight) * erle.average;
+    metrics->erle.average = static_cast<int>(dtmp);
+  } else {
+    metrics->erle.average = kOffsetLevel;
+  }
+
+  metrics->erle.max = static_cast<int>(erle.max);
+
+  if (erle.min < (kOffsetLevel * (-1))) {
+    metrics->erle.min = static_cast<int>(erle.min);
+  } else {
+    metrics->erle.min = kOffsetLevel;
+  }
+
+  // RERL
+  if ((metrics->erl.average > kOffsetLevel) &&
+      (metrics->erle.average > kOffsetLevel)) {
+    stmp = metrics->erl.average + metrics->erle.average;
+  } else {
+    stmp = kOffsetLevel;
+  }
+  metrics->rerl.average = stmp;
+
+  // No other statistics needed, but returned for completeness.
+  metrics->rerl.instant = stmp;
+  metrics->rerl.max = stmp;
+  metrics->rerl.min = stmp;
+
+  // A_NLP
+  metrics->aNlp.instant = static_cast<int>(a_nlp.instant);
+
+  if ((a_nlp.himean > kOffsetLevel) && (a_nlp.average > kOffsetLevel)) {
+    // Use a mix between regular average and upper part average.
+    dtmp = kUpWeight * a_nlp.himean + (1 - kUpWeight) * a_nlp.average;
+    metrics->aNlp.average = static_cast<int>(dtmp);
+  } else {
+    metrics->aNlp.average = kOffsetLevel;
+  }
+
+  metrics->aNlp.max = static_cast<int>(a_nlp.max);
+
+  if (a_nlp.min < (kOffsetLevel * (-1))) {
+    metrics->aNlp.min = static_cast<int>(a_nlp.min);
+  } else {
+    metrics->aNlp.min = kOffsetLevel;
+  }
+
+  return 0;
+}
+
+int WebRtcAec_GetDelayMetrics(void* handle,
+                              int* median,
+                              int* std,
+                              float* fraction_poor_delays) {
+  Aec* self = reinterpret_cast<Aec*>(handle);
+  if (median == NULL) {
+    return AEC_NULL_POINTER_ERROR;
+  }
+  if (std == NULL) {
+    return AEC_NULL_POINTER_ERROR;
+  }
+  if (self->initFlag != initCheck) {
+    return AEC_UNINITIALIZED_ERROR;
+  }
+  if (WebRtcAec_GetDelayMetricsCore(self->aec, median, std,
+                                    fraction_poor_delays) == -1) {
+    // Logging disabled.
+    return AEC_UNSUPPORTED_FUNCTION_ERROR;
+  }
+
+  return 0;
+}
+
+AecCore* WebRtcAec_aec_core(void* handle) {
+  if (!handle) {
+    return NULL;
+  }
+  return reinterpret_cast<Aec*>(handle)->aec;
+}
+
+static int ProcessNormal(Aec* aecInst,
+                         const float* const* nearend,
+                         size_t num_bands,
+                         float* const* out,
+                         size_t num_samples,
+                         int16_t reported_delay_ms,
+                         int32_t skew) {
+  int retVal = 0;
+  size_t i;
+  size_t nBlocks10ms;
+  // Limit resampling to doubling/halving of signal
+  const float minSkewEst = -0.5f;
+  const float maxSkewEst = 1.0f;
+
+  reported_delay_ms =
+      reported_delay_ms > kMaxTrustedDelayMs ? kMaxTrustedDelayMs :
+      reported_delay_ms;
+  // TODO(andrew): we need to investigate if this +10 is really wanted.
+  reported_delay_ms += 10;
+  aecInst->msInSndCardBuf = reported_delay_ms;
+
+  if (aecInst->skewMode == kAecTrue) {
+    if (aecInst->skewFrCtr < 25) {
+      aecInst->skewFrCtr++;
+    } else {
+      retVal = WebRtcAec_GetSkew(aecInst->resampler, skew, &aecInst->skew);
+      if (retVal == -1) {
+        aecInst->skew = 0;
+        retVal = AEC_BAD_PARAMETER_WARNING;
+      }
+
+      aecInst->skew /= aecInst->sampFactor * num_samples;
+
+      if (aecInst->skew < 1.0e-3 && aecInst->skew > -1.0e-3) {
+        aecInst->resample = kAecFalse;
+      } else {
+        aecInst->resample = kAecTrue;
+      }
+
+      if (aecInst->skew < minSkewEst) {
+        aecInst->skew = minSkewEst;
+      } else if (aecInst->skew > maxSkewEst) {
+        aecInst->skew = maxSkewEst;
+      }
+
+      aecInst->data_dumper->DumpRaw("aec_skew", 1, &aecInst->skew);
+    }
+  }
+
+  nBlocks10ms = num_samples / (FRAME_LEN * aecInst->rate_factor);
+
+  if (aecInst->startup_phase) {
+    for (i = 0; i < num_bands; ++i) {
+      // Only needed if they don't already point to the same place.
+      if (nearend[i] != out[i]) {
+        memcpy(out[i], nearend[i], sizeof(nearend[i][0]) * num_samples);
+      }
+    }
+
+    // The AEC is in the start up mode
+    // AEC is disabled until the system delay is OK
+
+    // Mechanism to ensure that the system delay is reasonably stable.
+    if (aecInst->checkBuffSize) {
+      aecInst->checkBufSizeCtr++;
+      // Before we fill up the far-end buffer we require the system delay
+      // to be stable (+/-8 ms) compared to the first value. This
+      // comparison is made during the following 6 consecutive 10 ms
+      // blocks. If it seems to be stable then we start to fill up the
+      // far-end buffer.
+      if (aecInst->counter == 0) {
+        aecInst->firstVal = aecInst->msInSndCardBuf;
+        aecInst->sum = 0;
+      }
+
+      if (abs(aecInst->firstVal - aecInst->msInSndCardBuf) <
+          WEBRTC_SPL_MAX(0.2 * aecInst->msInSndCardBuf, sampMsNb)) {
+        aecInst->sum += aecInst->msInSndCardBuf;
+        aecInst->counter++;
+      } else {
+        aecInst->counter = 0;
+      }
+
+      if (aecInst->counter * nBlocks10ms >= 6) {
+        // The far-end buffer size is determined in partitions of
+        // PART_LEN samples. Use 75% of the average value of the system
+        // delay as buffer size to start with.
+        aecInst->bufSizeStart =
+            WEBRTC_SPL_MIN((3 * aecInst->sum * aecInst->rate_factor * 8) /
+                               (4 * aecInst->counter * PART_LEN),
+                           kMaxBufSizeStart);
+        // Buffer size has now been determined.
+        aecInst->checkBuffSize = 0;
+      }
+
+      if (aecInst->checkBufSizeCtr * nBlocks10ms > 50) {
+        // For really bad systems, don't disable the echo canceller for
+        // more than 0.5 sec.
+        aecInst->bufSizeStart = WEBRTC_SPL_MIN(
+            (aecInst->msInSndCardBuf * aecInst->rate_factor * 3) / 40,
+            kMaxBufSizeStart);
+        aecInst->checkBuffSize = 0;
+      }
+    }
+
+    // If |checkBuffSize| changed in the if-statement above.
+    if (!aecInst->checkBuffSize) {
+      // The system delay is now reasonably stable (or has been unstable
+      // for too long). When the far-end buffer is filled with
+      // approximately the same amount of data as reported by the system
+      // we end the startup phase.
+      int overhead_elements =
+          WebRtcAec_system_delay(aecInst->aec) / PART_LEN -
+          aecInst->bufSizeStart;
+      if (overhead_elements == 0) {
+        // Enable the AEC
+        aecInst->startup_phase = 0;
+      } else if (overhead_elements > 0) {
+        // TODO(bjornv): Do we need a check on how much we actually
+        // moved the read pointer? It should always be possible to move
+        // the pointer |overhead_elements| since we have only added data
+        // to the buffer and no delay compensation nor AEC processing
+        // has been done.
+        WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aecInst->aec,
+                                                       overhead_elements);
+
+        // Enable the AEC
+        aecInst->startup_phase = 0;
+      }
+    }
+  } else {
+    // AEC is enabled.
+    EstBufDelayNormal(aecInst);
+
+    // Call the AEC.
+    // TODO(bjornv): Re-structure such that we don't have to pass
+    // |aecInst->knownDelay| as input. Change name to something like
+    // |system_buffer_diff|.
+    WebRtcAec_ProcessFrames(aecInst->aec, nearend, num_bands, num_samples,
+                            aecInst->knownDelay, out);
+  }
+
+  return retVal;
+}
+
+static void ProcessExtended(Aec* self,
+                            const float* const* near,
+                            size_t num_bands,
+                            float* const* out,
+                            size_t num_samples,
+                            int16_t reported_delay_ms,
+                            int32_t skew) {
+  size_t i;
+  const int delay_diff_offset = kDelayDiffOffsetSamples;
+  RTC_DCHECK(num_samples == 80 || num_samples == 160);
+#if defined(WEBRTC_UNTRUSTED_DELAY)
+  reported_delay_ms = kFixedDelayMs;
+#else
+  // This is the usual mode where we trust the reported system delay values.
+  // Due to the longer filter, we no longer add 10 ms to the reported delay
+  // to reduce chance of non-causality. Instead we apply a minimum here to avoid
+  // issues with the read pointer jumping around needlessly.
+  reported_delay_ms = reported_delay_ms < kMinTrustedDelayMs
+                          ? kMinTrustedDelayMs
+                          : reported_delay_ms;
+  // If the reported delay appears to be bogus, we attempt to recover by using
+  // the measured fixed delay values. We use >= here because higher layers
+  // may already clamp to this maximum value, and we would otherwise not
+  // detect it here.
+  reported_delay_ms = reported_delay_ms >= kMaxTrustedDelayMs
+                          ? kFixedDelayMs
+                          : reported_delay_ms;
+#endif
+  self->msInSndCardBuf = reported_delay_ms;
+
+  if (!self->farend_started) {
+    for (i = 0; i < num_bands; ++i) {
+      // Only needed if they don't already point to the same place.
+      if (near[i] != out[i]) {
+        memcpy(out[i], near[i], sizeof(near[i][0]) * num_samples);
+      }
+    }
+    return;
+  }
+  if (self->startup_phase) {
+    // In the extended mode, there isn't a startup "phase", just a special
+    // action on the first frame. In the trusted delay case, we'll take the
+    // current reported delay, unless it's less then our conservative
+    // measurement.
+    int startup_size_ms =
+        reported_delay_ms < kFixedDelayMs ? kFixedDelayMs : reported_delay_ms;
+#if defined(WEBRTC_ANDROID)
+    int target_delay = startup_size_ms * self->rate_factor * 8;
+#else
+    // To avoid putting the AEC in a non-causal state we're being slightly
+    // conservative and scale by 2. On Android we use a fixed delay and
+    // therefore there is no need to scale the target_delay.
+    int target_delay = startup_size_ms * self->rate_factor * 8 / 2;
+#endif
+    int overhead_elements =
+        (WebRtcAec_system_delay(self->aec) - target_delay) / PART_LEN;
+    WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(self->aec,
+                                                   overhead_elements);
+    self->startup_phase = 0;
+  }
+
+  EstBufDelayExtended(self);
+
+  {
+    // |delay_diff_offset| gives us the option to manually rewind the delay on
+    // very low delay platforms which can't be expressed purely through
+    // |reported_delay_ms|.
+    const int adjusted_known_delay =
+        WEBRTC_SPL_MAX(0, self->knownDelay + delay_diff_offset);
+
+    WebRtcAec_ProcessFrames(self->aec, near, num_bands, num_samples,
+                            adjusted_known_delay, out);
+  }
+}
+
+static void EstBufDelayNormal(Aec* aecInst) {
+  int nSampSndCard = aecInst->msInSndCardBuf * sampMsNb * aecInst->rate_factor;
+  int current_delay = nSampSndCard - WebRtcAec_system_delay(aecInst->aec);
+  int delay_difference = 0;
+
+  // Before we proceed with the delay estimate filtering we:
+  // 1) Compensate for the frame that will be read.
+  // 2) Compensate for drift resampling.
+  // 3) Compensate for non-causality if needed, since the estimated delay can't
+  //    be negative.
+
+  // 1) Compensating for the frame(s) that will be read/processed.
+  current_delay += FRAME_LEN * aecInst->rate_factor;
+
+  // 2) Account for resampling frame delay.
+  if (aecInst->skewMode == kAecTrue && aecInst->resample == kAecTrue) {
+    current_delay -= kResamplingDelay;
+  }
+
+  // 3) Compensate for non-causality, if needed, by flushing one block.
+  if (current_delay < PART_LEN) {
+    current_delay +=
+        WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aecInst->aec, 1) *
+        PART_LEN;
+  }
+
+  // We use -1 to signal an initialized state in the "extended" implementation;
+  // compensate for that.
+  aecInst->filtDelay = aecInst->filtDelay < 0 ? 0 : aecInst->filtDelay;
+  aecInst->filtDelay =
+      WEBRTC_SPL_MAX(0, static_cast<int16_t>(0.8 *
+                                             aecInst->filtDelay +
+                                             0.2 * current_delay));
+
+  delay_difference = aecInst->filtDelay - aecInst->knownDelay;
+  if (delay_difference > 224) {
+    if (aecInst->lastDelayDiff < 96) {
+      aecInst->timeForDelayChange = 0;
+    } else {
+      aecInst->timeForDelayChange++;
+    }
+  } else if (delay_difference < 96 && aecInst->knownDelay > 0) {
+    if (aecInst->lastDelayDiff > 224) {
+      aecInst->timeForDelayChange = 0;
+    } else {
+      aecInst->timeForDelayChange++;
+    }
+  } else {
+    aecInst->timeForDelayChange = 0;
+  }
+  aecInst->lastDelayDiff = delay_difference;
+
+  if (aecInst->timeForDelayChange > 25) {
+    aecInst->knownDelay = WEBRTC_SPL_MAX((int)aecInst->filtDelay - 160, 0);
+  }
+}
+
+static void EstBufDelayExtended(Aec* aecInst) {
+  int reported_delay = aecInst->msInSndCardBuf * sampMsNb *
+      aecInst->rate_factor;
+  int current_delay = reported_delay - WebRtcAec_system_delay(aecInst->aec);
+  int delay_difference = 0;
+
+  // Before we proceed with the delay estimate filtering we:
+  // 1) Compensate for the frame that will be read.
+  // 2) Compensate for drift resampling.
+  // 3) Compensate for non-causality if needed, since the estimated delay can't
+  //    be negative.
+
+  // 1) Compensating for the frame(s) that will be read/processed.
+  current_delay += FRAME_LEN * aecInst->rate_factor;
+
+  // 2) Account for resampling frame delay.
+  if (aecInst->skewMode == kAecTrue && aecInst->resample == kAecTrue) {
+    current_delay -= kResamplingDelay;
+  }
+
+  // 3) Compensate for non-causality, if needed, by flushing two blocks.
+  if (current_delay < PART_LEN) {
+    current_delay +=
+        WebRtcAec_AdjustFarendBufferSizeAndSystemDelay(aecInst->aec, 2) *
+        PART_LEN;
+  }
+
+  if (aecInst->filtDelay == -1) {
+    aecInst->filtDelay = WEBRTC_SPL_MAX(0, 0.5 * current_delay);
+  } else {
+    aecInst->filtDelay = WEBRTC_SPL_MAX(
+        0, static_cast<int16_t>(0.95 * aecInst->filtDelay + 0.05 *
+                                current_delay));
+  }
+
+  delay_difference = aecInst->filtDelay - aecInst->knownDelay;
+  if (delay_difference > 384) {
+    if (aecInst->lastDelayDiff < 128) {
+      aecInst->timeForDelayChange = 0;
+    } else {
+      aecInst->timeForDelayChange++;
+    }
+  } else if (delay_difference < 128 && aecInst->knownDelay > 0) {
+    if (aecInst->lastDelayDiff > 384) {
+      aecInst->timeForDelayChange = 0;
+    } else {
+      aecInst->timeForDelayChange++;
+    }
+  } else {
+    aecInst->timeForDelayChange = 0;
+  }
+  aecInst->lastDelayDiff = delay_difference;
+
+  if (aecInst->timeForDelayChange > 25) {
+    aecInst->knownDelay = WEBRTC_SPL_MAX((int)aecInst->filtDelay - 256, 0);
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/echo_cancellation.h b/modules/audio_processing/aec/echo_cancellation.h
new file mode 100644
index 0000000..d5c703e
--- /dev/null
+++ b/modules/audio_processing/aec/echo_cancellation.h
@@ -0,0 +1,299 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_
+#define MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_
+
+#include <memory>
+
+#include <stddef.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+}
+#include "modules/audio_processing/aec/aec_core.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Errors
+#define AEC_UNSPECIFIED_ERROR 12000
+#define AEC_UNSUPPORTED_FUNCTION_ERROR 12001
+#define AEC_UNINITIALIZED_ERROR 12002
+#define AEC_NULL_POINTER_ERROR 12003
+#define AEC_BAD_PARAMETER_ERROR 12004
+
+// Warnings
+#define AEC_BAD_PARAMETER_WARNING 12050
+
+enum { kAecNlpConservative = 0, kAecNlpModerate, kAecNlpAggressive };
+
+enum { kAecFalse = 0, kAecTrue };
+
+typedef struct {
+  int16_t nlpMode;      // default kAecNlpModerate
+  int16_t skewMode;     // default kAecFalse
+  int16_t metricsMode;  // default kAecFalse
+  int delay_logging;    // default kAecFalse
+  // float realSkew;
+} AecConfig;
+
+typedef struct {
+  int instant;
+  int average;
+  int max;
+  int min;
+} AecLevel;
+
+typedef struct {
+  AecLevel rerl;
+  AecLevel erl;
+  AecLevel erle;
+  AecLevel aNlp;
+  float divergent_filter_fraction;
+} AecMetrics;
+
+struct AecCore;
+
+class ApmDataDumper;
+
+typedef struct Aec {
+  Aec();
+  ~Aec();
+
+  std::unique_ptr<ApmDataDumper> data_dumper;
+
+  int delayCtr;
+  int sampFreq;
+  int splitSampFreq;
+  int scSampFreq;
+  float sampFactor;  // scSampRate / sampFreq
+  short skewMode;
+  int bufSizeStart;
+  int knownDelay;
+  int rate_factor;
+
+  short initFlag;  // indicates if AEC has been initialized
+
+  // Variables used for averaging far end buffer size
+  short counter;
+  int sum;
+  short firstVal;
+  short checkBufSizeCtr;
+
+  // Variables used for delay shifts
+  short msInSndCardBuf;
+  short filtDelay;  // Filtered delay estimate.
+  int timeForDelayChange;
+  int startup_phase;
+  int checkBuffSize;
+  short lastDelayDiff;
+
+  // Structures
+  void* resampler;
+
+  int skewFrCtr;
+  int resample;  // if the skew is small enough we don't resample
+  int highSkewCtr;
+  float skew;
+
+  RingBuffer* far_pre_buf;  // Time domain far-end pre-buffer.
+
+  int farend_started;
+
+  // Aec instance counter.
+  static int instance_count;
+  AecCore* aec;
+} Aec;
+
+/*
+ * Allocates the memory needed by the AEC. The memory needs to be initialized
+ * separately using the WebRtcAec_Init() function. Returns a pointer to the
+ * object or NULL on error.
+ */
+void* WebRtcAec_Create();
+
+/*
+ * This function releases the memory allocated by WebRtcAec_Create().
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*        aecInst         Pointer to the AEC instance
+ */
+void WebRtcAec_Free(void* aecInst);
+
+/*
+ * Initializes an AEC instance.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecInst       Pointer to the AEC instance
+ * int32_t        sampFreq      Sampling frequency of data
+ * int32_t        scSampFreq    Soundcard sampling frequency
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                             -1: error
+ */
+int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq);
+
+/*
+ * Inserts an 80 or 160 sample block of data into the farend buffer.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecInst       Pointer to the AEC instance
+ * const float*   farend        In buffer containing one frame of
+ *                              farend signal for L band
+ * int16_t        nrOfSamples   Number of samples in farend buffer
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                              12000-12050: error code
+ */
+int32_t WebRtcAec_BufferFarend(void* aecInst,
+                               const float* farend,
+                               size_t nrOfSamples);
+
+/*
+ * Reports any errors that would arise if buffering a farend buffer
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecInst       Pointer to the AEC instance
+ * const float*   farend        In buffer containing one frame of
+ *                              farend signal for L band
+ * int16_t        nrOfSamples   Number of samples in farend buffer
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                              12000-12050: error code
+ */
+int32_t WebRtcAec_GetBufferFarendError(void* aecInst,
+                                       const float* farend,
+                                       size_t nrOfSamples);
+
+/*
+ * Runs the echo canceller on an 80 or 160 sample blocks of data.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*         aecInst        Pointer to the AEC instance
+ * float* const* nearend        In buffer containing one frame of
+ *                              nearend+echo signal for each band
+ * int           num_bands      Number of bands in nearend buffer
+ * int16_t       nrOfSamples    Number of samples in nearend buffer
+ * int16_t       msInSndCardBuf Delay estimate for sound card and
+ *                              system buffers
+ * int16_t       skew           Difference between number of samples played
+ *                              and recorded at the soundcard (for clock skew
+ *                              compensation)
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * float* const* out            Out buffer, one frame of processed nearend
+ *                              for each band
+ * int32_t       return         0: OK
+ *                              12000-12050: error code
+ */
+int32_t WebRtcAec_Process(void* aecInst,
+                          const float* const* nearend,
+                          size_t num_bands,
+                          float* const* out,
+                          size_t nrOfSamples,
+                          int16_t msInSndCardBuf,
+                          int32_t skew);
+
+/*
+ * This function enables the user to set certain parameters on-the-fly.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          handle        Pointer to the AEC instance
+ * AecConfig      config        Config instance that contains all
+ *                              properties to be set
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int            return        0: OK
+ *                              12000-12050: error code
+ */
+int WebRtcAec_set_config(void* handle, AecConfig config);
+
+/*
+ * Gets the current echo status of the nearend signal.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          handle        Pointer to the AEC instance
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int*           status        0: Almost certainly nearend single-talk
+ *                              1: Might not be neared single-talk
+ * int            return        0: OK
+ *                              12000-12050: error code
+ */
+int WebRtcAec_get_echo_status(void* handle, int* status);
+
+/*
+ * Gets the current echo metrics for the session.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          handle        Pointer to the AEC instance
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * AecMetrics*    metrics       Struct which will be filled out with the
+ *                              current echo metrics.
+ * int            return        0: OK
+ *                              12000-12050: error code
+ */
+int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics);
+
+/*
+ * Gets the current delay metrics for the session.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*   handle               Pointer to the AEC instance
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int*    median               Delay median value.
+ * int*    std                  Delay standard deviation.
+ * float*  fraction_poor_delays Fraction of the delay estimates that may
+ *                              cause the AEC to perform poorly.
+ *
+ * int            return        0: OK
+ *                              12000-12050: error code
+ */
+int WebRtcAec_GetDelayMetrics(void* handle,
+                              int* median,
+                              int* std,
+                              float* fraction_poor_delays);
+
+// Returns a pointer to the low level AEC handle.
+//
+// Input:
+//  - handle                    : Pointer to the AEC instance.
+//
+// Return value:
+//  - AecCore pointer           : NULL for error.
+//
+struct AecCore* WebRtcAec_aec_core(void* handle);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_
diff --git a/modules/audio_processing/aec/echo_cancellation_unittest.cc b/modules/audio_processing/aec/echo_cancellation_unittest.cc
new file mode 100644
index 0000000..b9c89fd
--- /dev/null
+++ b/modules/audio_processing/aec/echo_cancellation_unittest.cc
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// TODO(bjornv): Make this a comprehensive test.
+
+#include "modules/audio_processing/aec/echo_cancellation.h"
+
+#include <stdlib.h>
+#include <time.h>
+
+#include "modules/audio_processing/aec/aec_core.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(EchoCancellationTest, CreateAndFreeHasExpectedBehavior) {
+  void* handle = WebRtcAec_Create();
+  ASSERT_TRUE(handle);
+  WebRtcAec_Free(nullptr);
+  WebRtcAec_Free(handle);
+}
+
+TEST(EchoCancellationTest, ApplyAecCoreHandle) {
+  void* handle = WebRtcAec_Create();
+  ASSERT_TRUE(handle);
+  EXPECT_TRUE(WebRtcAec_aec_core(NULL) == NULL);
+  AecCore* aec_core = WebRtcAec_aec_core(handle);
+  EXPECT_TRUE(aec_core != NULL);
+  // A simple test to verify that we can set and get a value from the lower
+  // level |aec_core| handle.
+  int delay = 111;
+  WebRtcAec_SetSystemDelay(aec_core, delay);
+  EXPECT_EQ(delay, WebRtcAec_system_delay(aec_core));
+  WebRtcAec_Free(handle);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec/system_delay_unittest.cc b/modules/audio_processing/aec/system_delay_unittest.cc
new file mode 100644
index 0000000..fc57af8
--- /dev/null
+++ b/modules/audio_processing/aec/system_delay_unittest.cc
@@ -0,0 +1,601 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec/aec_core.h"
+#include "modules/audio_processing/aec/echo_cancellation.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+namespace webrtc {
+namespace {
+
+class SystemDelayTest : public ::testing::Test {
+ protected:
+  SystemDelayTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  // Initialization of AEC handle with respect to |sample_rate_hz|. Since the
+  // device sample rate is unimportant we set that value to 48000 Hz.
+  void Init(int sample_rate_hz);
+
+  // Makes one render call and one capture call in that specific order.
+  void RenderAndCapture(int device_buffer_ms);
+
+  // Fills up the far-end buffer with respect to the default device buffer size.
+  size_t BufferFillUp();
+
+  // Runs and verifies the behavior in a stable startup procedure.
+  void RunStableStartup();
+
+  // Maps buffer size in ms into samples, taking the unprocessed frame into
+  // account.
+  int MapBufferSizeToSamples(int size_in_ms, bool extended_filter);
+
+  void* handle_;
+  Aec* self_;
+  size_t samples_per_frame_;
+  // Dummy input/output speech data.
+  static const int kSamplesPerChunk = 160;
+  float far_[kSamplesPerChunk];
+  float near_[kSamplesPerChunk];
+  float out_[kSamplesPerChunk];
+  const float* near_ptr_;
+  float* out_ptr_;
+};
+
+SystemDelayTest::SystemDelayTest()
+    : handle_(NULL), self_(NULL), samples_per_frame_(0) {
+  // Dummy input data are set with more or less arbitrary non-zero values.
+  for (int i = 0; i < kSamplesPerChunk; i++) {
+    far_[i] = 257.0;
+    near_[i] = 514.0;
+  }
+  memset(out_, 0, sizeof(out_));
+  near_ptr_ = near_;
+  out_ptr_ = out_;
+}
+
+void SystemDelayTest::SetUp() {
+  handle_ = WebRtcAec_Create();
+  ASSERT_TRUE(handle_);
+  self_ = reinterpret_cast<Aec*>(handle_);
+}
+
+void SystemDelayTest::TearDown() {
+  // Free AEC
+  WebRtcAec_Free(handle_);
+  handle_ = NULL;
+}
+
+// In SWB mode nothing is added to the buffer handling with respect to
+// functionality compared to WB. We therefore only verify behavior in NB and WB.
+static const int kSampleRateHz[] = {8000, 16000};
+static const size_t kNumSampleRates =
+    sizeof(kSampleRateHz) / sizeof(*kSampleRateHz);
+
+// Default audio device buffer size used.
+static const int kDeviceBufMs = 100;
+
+// Requirement for a stable device convergence time in ms. Should converge in
+// less than |kStableConvergenceMs|.
+static const int kStableConvergenceMs = 100;
+
+// Maximum convergence time in ms. This means that we should leave the startup
+// phase after |kMaxConvergenceMs| independent of device buffer stability
+// conditions.
+static const int kMaxConvergenceMs = 500;
+
+void SystemDelayTest::Init(int sample_rate_hz) {
+  // Initialize AEC
+  EXPECT_EQ(0, WebRtcAec_Init(handle_, sample_rate_hz, 48000));
+  EXPECT_EQ(0, WebRtcAec_system_delay(self_->aec));
+
+  // One frame equals 10 ms of data.
+  samples_per_frame_ = static_cast<size_t>(sample_rate_hz / 100);
+}
+
+void SystemDelayTest::RenderAndCapture(int device_buffer_ms) {
+  EXPECT_EQ(0, WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
+  EXPECT_EQ(0,
+            WebRtcAec_Process(handle_,
+                              &near_ptr_,
+                              1,
+                              &out_ptr_,
+                              samples_per_frame_,
+                              device_buffer_ms,
+                              0));
+}
+
+size_t SystemDelayTest::BufferFillUp() {
+  // To make sure we have a full buffer when we verify stability we first fill
+  // up the far-end buffer with the same amount as we will report in through
+  // Process().
+  size_t buffer_size = 0;
+  for (int i = 0; i < kDeviceBufMs / 10; i++) {
+    EXPECT_EQ(0, WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
+    buffer_size += samples_per_frame_;
+    EXPECT_EQ(static_cast<int>(buffer_size),
+              WebRtcAec_system_delay(self_->aec));
+  }
+  return buffer_size;
+}
+
+void SystemDelayTest::RunStableStartup() {
+  // To make sure we have a full buffer when we verify stability we first fill
+  // up the far-end buffer with the same amount as we will report in through
+  // Process().
+  size_t buffer_size = BufferFillUp();
+
+  if (WebRtcAec_delay_agnostic_enabled(self_->aec) == 1) {
+    // In extended_filter mode we set the buffer size after the first processed
+    // 10 ms chunk. Hence, we don't need to wait for the reported system delay
+    // values to become stable.
+    RenderAndCapture(kDeviceBufMs);
+    buffer_size += samples_per_frame_;
+    EXPECT_EQ(0, self_->startup_phase);
+  } else {
+    // A stable device should be accepted and put in a regular process mode
+    // within |kStableConvergenceMs|.
+    int process_time_ms = 0;
+    for (; process_time_ms < kStableConvergenceMs; process_time_ms += 10) {
+      RenderAndCapture(kDeviceBufMs);
+      buffer_size += samples_per_frame_;
+      if (self_->startup_phase == 0) {
+        // We have left the startup phase.
+        break;
+      }
+    }
+    // Verify convergence time.
+    EXPECT_GT(kStableConvergenceMs, process_time_ms);
+  }
+  // Verify that the buffer has been flushed.
+  EXPECT_GE(static_cast<int>(buffer_size),
+            WebRtcAec_system_delay(self_->aec));
+}
+
+  int SystemDelayTest::MapBufferSizeToSamples(int size_in_ms,
+                                              bool extended_filter) {
+  // If extended_filter is disabled we add an extra 10 ms for the unprocessed
+  // frame. That is simply how the algorithm is constructed.
+  return static_cast<int>(
+      (size_in_ms + (extended_filter ? 0 : 10)) * samples_per_frame_ / 10);
+}
+
+// The tests should meet basic requirements and not be adjusted to what is
+// actually implemented. If we don't get good code coverage this way we either
+// lack in tests or have unnecessary code.
+// General requirements:
+// 1) If we add far-end data the system delay should be increased with the same
+//    amount we add.
+// 2) If the far-end buffer is full we should flush the oldest data to make room
+//    for the new. In this case the system delay is unaffected.
+// 3) There should exist a startup phase in which the buffer size is to be
+//    determined. In this phase no cancellation should be performed.
+// 4) Under stable conditions (small variations in device buffer sizes) the AEC
+//    should determine an appropriate local buffer size within
+//    |kStableConvergenceMs| ms.
+// 5) Under unstable conditions the AEC should make a decision within
+//    |kMaxConvergenceMs| ms.
+// 6) If the local buffer runs out of data we should stuff the buffer with older
+//    frames.
+// 7) The system delay should within |kMaxConvergenceMs| ms heal from
+//    disturbances like drift, data glitches, toggling events and outliers.
+// 8) The system delay should never become negative.
+
+TEST_F(SystemDelayTest, CorrectIncreaseWhenBufferFarend) {
+  // When we add data to the AEC buffer the internal system delay should be
+  // incremented with the same amount as the size of data.
+  // This process should be independent of DA-AEC and extended_filter mode.
+  for (int extended_filter = 0; extended_filter <= 1; ++extended_filter) {
+    WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+    EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+    for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+      WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+      EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+      for (size_t i = 0; i < kNumSampleRates; i++) {
+        Init(kSampleRateHz[i]);
+        // Loop through a couple of calls to make sure the system delay
+        // increments correctly.
+        for (int j = 1; j <= 5; j++) {
+          EXPECT_EQ(0,
+                    WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
+          EXPECT_EQ(static_cast<int>(j * samples_per_frame_),
+                    WebRtcAec_system_delay(self_->aec));
+        }
+      }
+    }
+  }
+}
+
+// TODO(bjornv): Add a test to verify behavior if the far-end buffer is full
+// when adding new data.
+
+TEST_F(SystemDelayTest, CorrectDelayAfterStableStartup) {
+  // We run the system in a stable startup. After that we verify that the system
+  // delay meets the requirements.
+  // This process should be independent of DA-AEC and extended_filter mode.
+  for (int extended_filter = 0; extended_filter <= 1; ++extended_filter) {
+    WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+    EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+    for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+      WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+      EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+      for (size_t i = 0; i < kNumSampleRates; i++) {
+        Init(kSampleRateHz[i]);
+        RunStableStartup();
+
+        // Verify system delay with respect to requirements, i.e., the
+        // |system_delay| is in the interval [75%, 100%] of what's reported on
+        // the average.
+        // In extended_filter mode we target 50% and measure after one processed
+        // 10 ms chunk.
+        int average_reported_delay =
+            static_cast<int>(kDeviceBufMs * samples_per_frame_ / 10);
+        EXPECT_GE(average_reported_delay, WebRtcAec_system_delay(self_->aec));
+        int lower_bound = WebRtcAec_extended_filter_enabled(self_->aec)
+            ? (average_reported_delay / 2 -
+               rtc::checked_cast<int>(samples_per_frame_))
+            : average_reported_delay * 3 / 4;
+        EXPECT_LE(lower_bound, WebRtcAec_system_delay(self_->aec));
+      }
+    }
+  }
+}
+
+TEST_F(SystemDelayTest, CorrectDelayAfterUnstableStartup) {
+  // This test does not apply in extended_filter mode, since we only use the
+  // the first 10 ms chunk to determine a reasonable buffer size. Neither does
+  // it apply if DA-AEC is on because that overrides the startup procedure.
+  WebRtcAec_enable_extended_filter(self_->aec, 0);
+  EXPECT_EQ(0, WebRtcAec_extended_filter_enabled(self_->aec));
+  WebRtcAec_enable_delay_agnostic(self_->aec, 0);
+  EXPECT_EQ(0, WebRtcAec_delay_agnostic_enabled(self_->aec));
+
+  // In an unstable system we would start processing after |kMaxConvergenceMs|.
+  // On the last frame the AEC buffer is adjusted to 60% of the last reported
+  // device buffer size.
+  // We construct an unstable system by altering the device buffer size between
+  // two values |kDeviceBufMs| +- 25 ms.
+  for (size_t i = 0; i < kNumSampleRates; i++) {
+    Init(kSampleRateHz[i]);
+
+    // To make sure we have a full buffer when we verify stability we first fill
+    // up the far-end buffer with the same amount as we will report in on the
+    // average through Process().
+    size_t buffer_size = BufferFillUp();
+
+    int buffer_offset_ms = 25;
+    int reported_delay_ms = 0;
+    int process_time_ms = 0;
+    for (; process_time_ms <= kMaxConvergenceMs; process_time_ms += 10) {
+      reported_delay_ms = kDeviceBufMs + buffer_offset_ms;
+      RenderAndCapture(reported_delay_ms);
+      buffer_size += samples_per_frame_;
+      buffer_offset_ms = -buffer_offset_ms;
+      if (self_->startup_phase == 0) {
+        // We have left the startup phase.
+        break;
+      }
+    }
+    // Verify convergence time.
+    EXPECT_GE(kMaxConvergenceMs, process_time_ms);
+    // Verify that the buffer has been flushed.
+    EXPECT_GE(static_cast<int>(buffer_size),
+              WebRtcAec_system_delay(self_->aec));
+
+    // Verify system delay with respect to requirements, i.e., the
+    // |system_delay| is in the interval [60%, 100%] of what's last reported.
+    EXPECT_GE(static_cast<int>(reported_delay_ms * samples_per_frame_ / 10),
+              WebRtcAec_system_delay(self_->aec));
+    EXPECT_LE(
+        static_cast<int>(reported_delay_ms * samples_per_frame_ / 10 * 3 / 5),
+        WebRtcAec_system_delay(self_->aec));
+  }
+}
+
+TEST_F(SystemDelayTest, CorrectDelayAfterStableBufferBuildUp) {
+  // This test does not apply in extended_filter mode, since we only use the
+  // the first 10 ms chunk to determine a reasonable buffer size. Neither does
+  // it apply if DA-AEC is on because that overrides the startup procedure.
+  WebRtcAec_enable_extended_filter(self_->aec, 0);
+  EXPECT_EQ(0, WebRtcAec_extended_filter_enabled(self_->aec));
+  WebRtcAec_enable_delay_agnostic(self_->aec, 0);
+  EXPECT_EQ(0, WebRtcAec_delay_agnostic_enabled(self_->aec));
+
+  // In this test we start by establishing the device buffer size during stable
+  // conditions, but with an empty internal far-end buffer. Once that is done we
+  // verify that the system delay is increased correctly until we have reach an
+  // internal buffer size of 75% of what's been reported.
+  for (size_t i = 0; i < kNumSampleRates; i++) {
+    Init(kSampleRateHz[i]);
+
+    // We assume that running |kStableConvergenceMs| calls will put the
+    // algorithm in a state where the device buffer size has been determined. We
+    // can make that assumption since we have a separate stability test.
+    int process_time_ms = 0;
+    for (; process_time_ms < kStableConvergenceMs; process_time_ms += 10) {
+      EXPECT_EQ(0,
+                WebRtcAec_Process(handle_,
+                                  &near_ptr_,
+                                  1,
+                                  &out_ptr_,
+                                  samples_per_frame_,
+                                  kDeviceBufMs,
+                                  0));
+    }
+    // Verify that a buffer size has been established.
+    EXPECT_EQ(0, self_->checkBuffSize);
+
+    // We now have established the required buffer size. Let us verify that we
+    // fill up before leaving the startup phase for normal processing.
+    size_t buffer_size = 0;
+    size_t target_buffer_size = kDeviceBufMs * samples_per_frame_ / 10 * 3 / 4;
+    process_time_ms = 0;
+    for (; process_time_ms <= kMaxConvergenceMs; process_time_ms += 10) {
+      RenderAndCapture(kDeviceBufMs);
+      buffer_size += samples_per_frame_;
+      if (self_->startup_phase == 0) {
+        // We have left the startup phase.
+        break;
+      }
+    }
+    // Verify convergence time.
+    EXPECT_GT(kMaxConvergenceMs, process_time_ms);
+    // Verify that the buffer has reached the desired size.
+    EXPECT_LE(static_cast<int>(target_buffer_size),
+              WebRtcAec_system_delay(self_->aec));
+
+    // Verify normal behavior (system delay is kept constant) after startup by
+    // running a couple of calls to BufferFarend() and Process().
+    for (int j = 0; j < 6; j++) {
+      int system_delay_before_calls = WebRtcAec_system_delay(self_->aec);
+      RenderAndCapture(kDeviceBufMs);
+      EXPECT_EQ(system_delay_before_calls, WebRtcAec_system_delay(self_->aec));
+    }
+  }
+}
+
+TEST_F(SystemDelayTest, CorrectDelayWhenBufferUnderrun) {
+  // Here we test a buffer under run scenario. If we keep on calling
+  // WebRtcAec_Process() we will finally run out of data, but should
+  // automatically stuff the buffer. We verify this behavior by checking if the
+  // system delay goes negative.
+  // This process should be independent of DA-AEC and extended_filter mode.
+  for (int extended_filter = 0; extended_filter <= 1; ++extended_filter) {
+    WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+    EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+    for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+      WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+      EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+      for (size_t i = 0; i < kNumSampleRates; i++) {
+        Init(kSampleRateHz[i]);
+        RunStableStartup();
+
+        // The AEC has now left the Startup phase. We now have at most
+        // |kStableConvergenceMs| in the buffer. Keep on calling Process() until
+        // we run out of data and verify that the system delay is non-negative.
+        for (int j = 0; j <= kStableConvergenceMs; j += 10) {
+          EXPECT_EQ(0, WebRtcAec_Process(handle_, &near_ptr_, 1, &out_ptr_,
+                                         samples_per_frame_, kDeviceBufMs, 0));
+          EXPECT_LE(0, WebRtcAec_system_delay(self_->aec));
+        }
+      }
+    }
+  }
+}
+
+TEST_F(SystemDelayTest, CorrectDelayDuringDrift) {
+  // This drift test should verify that the system delay is never exceeding the
+  // device buffer. The drift is simulated by decreasing the reported device
+  // buffer size by 1 ms every 100 ms. If the device buffer size goes below 30
+  // ms we jump (add) 10 ms to give a repeated pattern.
+
+  // This process should be independent of DA-AEC and extended_filter mode.
+  for (int extended_filter = 0; extended_filter <= 1; ++extended_filter) {
+    WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+    EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+    for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+      WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+      EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+      for (size_t i = 0; i < kNumSampleRates; i++) {
+        Init(kSampleRateHz[i]);
+        RunStableStartup();
+
+        // We have left the startup phase and proceed with normal processing.
+        int jump = 0;
+        for (int j = 0; j < 1000; j++) {
+          // Drift = -1 ms per 100 ms of data.
+          int device_buf_ms = kDeviceBufMs - (j / 10) + jump;
+          int device_buf = MapBufferSizeToSamples(device_buf_ms,
+                                                  extended_filter == 1);
+
+          if (device_buf_ms < 30) {
+            // Add 10 ms data, taking affect next frame.
+            jump += 10;
+          }
+          RenderAndCapture(device_buf_ms);
+
+          // Verify that the system delay does not exceed the device buffer.
+          EXPECT_GE(device_buf, WebRtcAec_system_delay(self_->aec));
+
+          // Verify that the system delay is non-negative.
+          EXPECT_LE(0, WebRtcAec_system_delay(self_->aec));
+        }
+      }
+    }
+  }
+}
+
+TEST_F(SystemDelayTest, ShouldRecoverAfterGlitch) {
+  // This glitch test should verify that the system delay recovers if there is
+  // a glitch in data. The data glitch is constructed as 200 ms of buffering
+  // after which the stable procedure continues. The glitch is never reported by
+  // the device.
+  // The system is said to be in a non-causal state if the difference between
+  // the device buffer and system delay is less than a block (64 samples).
+
+  // This process should be independent of DA-AEC and extended_filter mode.
+  for (int extended_filter = 0; extended_filter <= 1; ++extended_filter) {
+    WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+    EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+    for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+      WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+      EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+      for (size_t i = 0; i < kNumSampleRates; i++) {
+        Init(kSampleRateHz[i]);
+        RunStableStartup();
+        int device_buf = MapBufferSizeToSamples(kDeviceBufMs,
+                                                extended_filter == 1);
+        // Glitch state.
+        for (int j = 0; j < 20; j++) {
+          EXPECT_EQ(0,
+                    WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
+          // No need to verify system delay, since that is done in a separate
+          // test.
+        }
+        // Verify that we are in a non-causal state, i.e.,
+        // |system_delay| > |device_buf|.
+        EXPECT_LT(device_buf, WebRtcAec_system_delay(self_->aec));
+
+        // Recover state. Should recover at least 4 ms of data per 10 ms, hence
+        // a glitch of 200 ms will take at most 200 * 10 / 4 = 500 ms to recover
+        // from.
+        bool non_causal = true;  // We are currently in a non-causal state.
+        for (int j = 0; j < 50; j++) {
+          int system_delay_before = WebRtcAec_system_delay(self_->aec);
+          RenderAndCapture(kDeviceBufMs);
+          int system_delay_after = WebRtcAec_system_delay(self_->aec);
+          // We have recovered if
+          // |device_buf| - |system_delay_after| >= PART_LEN (1 block).
+          // During recovery, |system_delay_after| < |system_delay_before|,
+          // otherwise they are equal.
+          if (non_causal) {
+            EXPECT_LT(system_delay_after, system_delay_before);
+            if (device_buf - system_delay_after >= PART_LEN) {
+              non_causal = false;
+            }
+          } else {
+            EXPECT_EQ(system_delay_before, system_delay_after);
+          }
+          // Verify that the system delay is non-negative.
+          EXPECT_LE(0, WebRtcAec_system_delay(self_->aec));
+        }
+        // Check that we have recovered.
+        EXPECT_FALSE(non_causal);
+      }
+    }
+  }
+}
+
+TEST_F(SystemDelayTest, UnaffectedWhenSpuriousDeviceBufferValues) {
+  // This test does not apply in extended_filter mode, since we only use the
+  // the first 10 ms chunk to determine a reasonable buffer size.
+  const int extended_filter = 0;
+  WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+  EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+
+  // Should be DA-AEC independent.
+  for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+    WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+    EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+    // This spurious device buffer data test aims at verifying that the system
+    // delay is unaffected by large outliers.
+    // The system is said to be in a non-causal state if the difference between
+    // the device buffer and system delay is less than a block (64 samples).
+    for (size_t i = 0; i < kNumSampleRates; i++) {
+      Init(kSampleRateHz[i]);
+      RunStableStartup();
+      int device_buf = MapBufferSizeToSamples(kDeviceBufMs,
+                                              extended_filter == 1);
+
+      // Normal state. We are currently not in a non-causal state.
+      bool non_causal = false;
+
+      // Run 1 s and replace device buffer size with 500 ms every 100 ms.
+      for (int j = 0; j < 100; j++) {
+        int system_delay_before_calls = WebRtcAec_system_delay(self_->aec);
+        int device_buf_ms = j % 10 == 0 ? 500 : kDeviceBufMs;
+        RenderAndCapture(device_buf_ms);
+
+        // Check for non-causality.
+        if (device_buf - WebRtcAec_system_delay(self_->aec) < PART_LEN) {
+          non_causal = true;
+        }
+        EXPECT_FALSE(non_causal);
+        EXPECT_EQ(system_delay_before_calls,
+                  WebRtcAec_system_delay(self_->aec));
+
+        // Verify that the system delay is non-negative.
+        EXPECT_LE(0, WebRtcAec_system_delay(self_->aec));
+      }
+    }
+  }
+}
+
+TEST_F(SystemDelayTest, CorrectImpactWhenTogglingDeviceBufferValues) {
+  // This test aims at verifying that the system delay is "unaffected" by
+  // toggling values reported by the device.
+  // The test is constructed such that every other device buffer value is zero
+  // and then 2 * |kDeviceBufMs|, hence the size is constant on the average. The
+  // zero values will force us into a non-causal state and thereby lowering the
+  // system delay until we basically run out of data. Once that happens the
+  // buffer will be stuffed.
+  // TODO(bjornv): This test will have a better impact if we verified that the
+  // delay estimate goes up when the system delay goes down to meet the average
+  // device buffer size.
+
+  // This test does not apply if DA-AEC is enabled and extended_filter mode
+  // disabled.
+  for (int extended_filter = 0; extended_filter <= 1; ++extended_filter) {
+    WebRtcAec_enable_extended_filter(self_->aec, extended_filter);
+    EXPECT_EQ(extended_filter, WebRtcAec_extended_filter_enabled(self_->aec));
+    for (int da_aec = 0; da_aec <= 1; ++da_aec) {
+      WebRtcAec_enable_delay_agnostic(self_->aec, da_aec);
+      EXPECT_EQ(da_aec, WebRtcAec_delay_agnostic_enabled(self_->aec));
+      if (extended_filter == 0 && da_aec == 1) {
+        continue;
+      }
+      for (size_t i = 0; i < kNumSampleRates; i++) {
+        Init(kSampleRateHz[i]);
+        RunStableStartup();
+        const int device_buf = MapBufferSizeToSamples(kDeviceBufMs,
+                                                      extended_filter == 1);
+
+        // Normal state. We are currently not in a non-causal state.
+        bool non_causal = false;
+
+        // Loop through 100 frames (both render and capture), which equals 1 s
+        // of data. Every odd frame we set the device buffer size to
+        // 2 * |kDeviceBufMs| and even frames we set the device buffer size to
+        // zero.
+        for (int j = 0; j < 100; j++) {
+          int system_delay_before_calls = WebRtcAec_system_delay(self_->aec);
+          int device_buf_ms = 2 * (j % 2) * kDeviceBufMs;
+          RenderAndCapture(device_buf_ms);
+
+          // Check for non-causality, compared with the average device buffer
+          // size.
+          non_causal |= (device_buf - WebRtcAec_system_delay(self_->aec) < 64);
+          EXPECT_GE(system_delay_before_calls,
+                    WebRtcAec_system_delay(self_->aec));
+
+          // Verify that the system delay is non-negative.
+          EXPECT_LE(0, WebRtcAec_system_delay(self_->aec));
+        }
+        // Verify we are not in a non-causal state.
+        EXPECT_FALSE(non_causal);
+      }
+    }
+  }
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/BUILD.gn b/modules/audio_processing/aec3/BUILD.gn
new file mode 100644
index 0000000..372b30f
--- /dev/null
+++ b/modules/audio_processing/aec3/BUILD.gn
@@ -0,0 +1,195 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//build/config/arm.gni")
+import("../../../webrtc.gni")
+
+rtc_static_library("aec3") {
+  visibility = [ "*" ]
+  configs += [ "..:apm_debug_dump" ]
+  sources = [
+    "adaptive_fir_filter.cc",
+    "adaptive_fir_filter.h",
+    "aec3_common.cc",
+    "aec3_common.h",
+    "aec3_fft.cc",
+    "aec3_fft.h",
+    "aec_state.cc",
+    "aec_state.h",
+    "block_framer.cc",
+    "block_framer.h",
+    "block_processor.cc",
+    "block_processor.h",
+    "block_processor_metrics.cc",
+    "block_processor_metrics.h",
+    "cascaded_biquad_filter.cc",
+    "cascaded_biquad_filter.h",
+    "comfort_noise_generator.cc",
+    "comfort_noise_generator.h",
+    "decimator.cc",
+    "decimator.h",
+    "delay_estimate.h",
+    "downsampled_render_buffer.cc",
+    "downsampled_render_buffer.h",
+    "echo_canceller3.cc",
+    "echo_canceller3.h",
+    "echo_path_delay_estimator.cc",
+    "echo_path_delay_estimator.h",
+    "echo_path_variability.cc",
+    "echo_path_variability.h",
+    "echo_remover.cc",
+    "echo_remover.h",
+    "echo_remover_metrics.cc",
+    "echo_remover_metrics.h",
+    "erl_estimator.cc",
+    "erl_estimator.h",
+    "erle_estimator.cc",
+    "erle_estimator.h",
+    "fft_buffer.cc",
+    "fft_buffer.h",
+    "fft_data.h",
+    "frame_blocker.cc",
+    "frame_blocker.h",
+    "main_filter_update_gain.cc",
+    "main_filter_update_gain.h",
+    "matched_filter.cc",
+    "matched_filter.h",
+    "matched_filter_lag_aggregator.cc",
+    "matched_filter_lag_aggregator.h",
+    "matrix_buffer.cc",
+    "matrix_buffer.h",
+    "output_selector.cc",
+    "output_selector.h",
+    "render_buffer.cc",
+    "render_buffer.h",
+    "render_delay_buffer.cc",
+    "render_delay_buffer.h",
+    "render_delay_controller.cc",
+    "render_delay_controller.h",
+    "render_delay_controller_metrics.cc",
+    "render_delay_controller_metrics.h",
+    "render_signal_analyzer.cc",
+    "render_signal_analyzer.h",
+    "residual_echo_estimator.cc",
+    "residual_echo_estimator.h",
+    "shadow_filter_update_gain.cc",
+    "shadow_filter_update_gain.h",
+    "skew_estimator.cc",
+    "skew_estimator.h",
+    "subtractor.cc",
+    "subtractor.h",
+    "subtractor_output.h",
+    "suppression_filter.cc",
+    "suppression_filter.h",
+    "suppression_gain.cc",
+    "suppression_gain.h",
+    "suppression_gain_limiter.cc",
+    "suppression_gain_limiter.h",
+    "vector_buffer.cc",
+    "vector_buffer.h",
+    "vector_math.h",
+  ]
+
+  defines = []
+  deps = [
+    "..:aec_core",
+    "..:apm_logging",
+    "..:audio_processing",
+    "../../..:typedefs",
+    "../../../api:array_view",
+    "../../../api:optional",
+    "../../../api/audio:aec3_config",
+    "../../../api/audio:echo_control",
+    "../../../common_audio:common_audio_c",
+    "../../../rtc_base:checks",
+    "../../../rtc_base:rtc_base_approved",
+    "../../../rtc_base:safe_minmax",
+    "../../../system_wrappers:cpu_features_api",
+    "../../../system_wrappers:metrics_api",
+  ]
+
+  configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+}
+
+if (rtc_include_tests) {
+  rtc_source_set("aec3_unittests") {
+    testonly = true
+
+    configs += [ "..:apm_debug_dump" ]
+    sources = [
+      "mock/mock_block_processor.h",
+      "mock/mock_echo_remover.h",
+      "mock/mock_render_delay_buffer.h",
+      "mock/mock_render_delay_controller.h",
+    ]
+
+    deps = [
+      ":aec3",
+      "..:apm_logging",
+      "..:audio_processing",
+      "..:audio_processing_unittests",
+      "../../..:typedefs",
+      "../../../api:array_view",
+      "../../../api:optional",
+      "../../../api/audio:aec3_config",
+      "../../../rtc_base:checks",
+      "../../../rtc_base:rtc_base_approved",
+      "../../../rtc_base:safe_minmax",
+      "../../../system_wrappers:cpu_features_api",
+      "../../../test:test_support",
+    ]
+
+    defines = []
+
+    if (rtc_enable_protobuf) {
+      sources += [
+        "adaptive_fir_filter_unittest.cc",
+        "aec3_fft_unittest.cc",
+        "aec_state_unittest.cc",
+        "block_framer_unittest.cc",
+        "block_processor_metrics_unittest.cc",
+        "block_processor_unittest.cc",
+        "cascaded_biquad_filter_unittest.cc",
+        "comfort_noise_generator_unittest.cc",
+        "decimator_unittest.cc",
+        "echo_canceller3_unittest.cc",
+        "echo_path_delay_estimator_unittest.cc",
+        "echo_path_variability_unittest.cc",
+        "echo_remover_metrics_unittest.cc",
+        "echo_remover_unittest.cc",
+        "erl_estimator_unittest.cc",
+        "erle_estimator_unittest.cc",
+        "fft_data_unittest.cc",
+        "frame_blocker_unittest.cc",
+        "main_filter_update_gain_unittest.cc",
+        "matched_filter_lag_aggregator_unittest.cc",
+        "matched_filter_unittest.cc",
+        "output_selector_unittest.cc",
+        "render_buffer_unittest.cc",
+        "render_delay_buffer_unittest.cc",
+        "render_delay_controller_metrics_unittest.cc",
+        "render_delay_controller_unittest.cc",
+        "render_signal_analyzer_unittest.cc",
+        "residual_echo_estimator_unittest.cc",
+        "shadow_filter_update_gain_unittest.cc",
+        "skew_estimator_unittest.cc",
+        "subtractor_unittest.cc",
+        "suppression_filter_unittest.cc",
+        "suppression_gain_unittest.cc",
+        "vector_math_unittest.cc",
+      ]
+    }
+
+    if ((!build_with_chromium || is_win) && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+  }
+}
diff --git a/modules/audio_processing/aec3/adaptive_fir_filter.cc b/modules/audio_processing/aec3/adaptive_fir_filter.cc
new file mode 100644
index 0000000..9bea40b
--- /dev/null
+++ b/modules/audio_processing/aec3/adaptive_fir_filter.cc
@@ -0,0 +1,616 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <functional>
+
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace aec3 {
+
+// Computes and stores the frequency response of the filter.
+void UpdateFrequencyResponse(
+    rtc::ArrayView<const FftData> H,
+    std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+  RTC_DCHECK_EQ(H.size(), H2->size());
+  for (size_t k = 0; k < H.size(); ++k) {
+    std::transform(H[k].re.begin(), H[k].re.end(), H[k].im.begin(),
+                   (*H2)[k].begin(),
+                   [](float a, float b) { return a * a + b * b; });
+  }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Computes and stores the frequency response of the filter.
+void UpdateFrequencyResponse_NEON(
+    rtc::ArrayView<const FftData> H,
+    std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+  RTC_DCHECK_EQ(H.size(), H2->size());
+  for (size_t k = 0; k < H.size(); ++k) {
+    for (size_t j = 0; j < kFftLengthBy2; j += 4) {
+      const float32x4_t re = vld1q_f32(&H[k].re[j]);
+      const float32x4_t im = vld1q_f32(&H[k].im[j]);
+      float32x4_t H2_k_j = vmulq_f32(re, re);
+      H2_k_j = vmlaq_f32(H2_k_j, im, im);
+      vst1q_f32(&(*H2)[k][j], H2_k_j);
+    }
+    (*H2)[k][kFftLengthBy2] = H[k].re[kFftLengthBy2] * H[k].re[kFftLengthBy2] +
+                              H[k].im[kFftLengthBy2] * H[k].im[kFftLengthBy2];
+  }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Computes and stores the frequency response of the filter.
+void UpdateFrequencyResponse_SSE2(
+    rtc::ArrayView<const FftData> H,
+    std::vector<std::array<float, kFftLengthBy2Plus1>>* H2) {
+  RTC_DCHECK_EQ(H.size(), H2->size());
+  for (size_t k = 0; k < H.size(); ++k) {
+    for (size_t j = 0; j < kFftLengthBy2; j += 4) {
+      const __m128 re = _mm_loadu_ps(&H[k].re[j]);
+      const __m128 re2 = _mm_mul_ps(re, re);
+      const __m128 im = _mm_loadu_ps(&H[k].im[j]);
+      const __m128 im2 = _mm_mul_ps(im, im);
+      const __m128 H2_k_j = _mm_add_ps(re2, im2);
+      _mm_storeu_ps(&(*H2)[k][j], H2_k_j);
+    }
+    (*H2)[k][kFftLengthBy2] = H[k].re[kFftLengthBy2] * H[k].re[kFftLengthBy2] +
+                              H[k].im[kFftLengthBy2] * H[k].im[kFftLengthBy2];
+  }
+}
+#endif
+
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void UpdateErlEstimator(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+    std::array<float, kFftLengthBy2Plus1>* erl) {
+  erl->fill(0.f);
+  for (auto& H2_j : H2) {
+    std::transform(H2_j.begin(), H2_j.end(), erl->begin(), erl->begin(),
+                   std::plus<float>());
+  }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void UpdateErlEstimator_NEON(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+    std::array<float, kFftLengthBy2Plus1>* erl) {
+  erl->fill(0.f);
+  for (auto& H2_j : H2) {
+    for (size_t k = 0; k < kFftLengthBy2; k += 4) {
+      const float32x4_t H2_j_k = vld1q_f32(&H2_j[k]);
+      float32x4_t erl_k = vld1q_f32(&(*erl)[k]);
+      erl_k = vaddq_f32(erl_k, H2_j_k);
+      vst1q_f32(&(*erl)[k], erl_k);
+    }
+    (*erl)[kFftLengthBy2] += H2_j[kFftLengthBy2];
+  }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void UpdateErlEstimator_SSE2(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+    std::array<float, kFftLengthBy2Plus1>* erl) {
+  erl->fill(0.f);
+  for (auto& H2_j : H2) {
+    for (size_t k = 0; k < kFftLengthBy2; k += 4) {
+      const __m128 H2_j_k = _mm_loadu_ps(&H2_j[k]);
+      __m128 erl_k = _mm_loadu_ps(&(*erl)[k]);
+      erl_k = _mm_add_ps(erl_k, H2_j_k);
+      _mm_storeu_ps(&(*erl)[k], erl_k);
+    }
+    (*erl)[kFftLengthBy2] += H2_j[kFftLengthBy2];
+  }
+}
+#endif
+
+// Adapts the filter partitions as H(t+1)=H(t)+G(t)*conj(X(t)).
+void AdaptPartitions(const RenderBuffer& render_buffer,
+                     const FftData& G,
+                     rtc::ArrayView<FftData> H) {
+  rtc::ArrayView<const FftData> render_buffer_data =
+      render_buffer.GetFftBuffer();
+  size_t index = render_buffer.Position();
+  for (auto& H_j : H) {
+    const FftData& X = render_buffer_data[index];
+    for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+      H_j.re[k] += X.re[k] * G.re[k] + X.im[k] * G.im[k];
+      H_j.im[k] += X.re[k] * G.im[k] - X.im[k] * G.re[k];
+    }
+
+    index = index < (render_buffer_data.size() - 1) ? index + 1 : 0;
+  }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Adapts the filter partitions. (NEON variant)
+void AdaptPartitions_NEON(const RenderBuffer& render_buffer,
+                          const FftData& G,
+                          rtc::ArrayView<FftData> H) {
+  rtc::ArrayView<const FftData> render_buffer_data =
+      render_buffer.GetFftBuffer();
+  const int lim1 =
+      std::min(render_buffer_data.size() - render_buffer.Position(), H.size());
+  const int lim2 = H.size();
+  constexpr int kNumFourBinBands = kFftLengthBy2 / 4;
+  FftData* H_j = &H[0];
+  const FftData* X = &render_buffer_data[render_buffer.Position()];
+  int limit = lim1;
+  int j = 0;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      for (int k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+        const float32x4_t G_re = vld1q_f32(&G.re[k]);
+        const float32x4_t G_im = vld1q_f32(&G.im[k]);
+        const float32x4_t X_re = vld1q_f32(&X->re[k]);
+        const float32x4_t X_im = vld1q_f32(&X->im[k]);
+        const float32x4_t H_re = vld1q_f32(&H_j->re[k]);
+        const float32x4_t H_im = vld1q_f32(&H_j->im[k]);
+        const float32x4_t a = vmulq_f32(X_re, G_re);
+        const float32x4_t e = vmlaq_f32(a, X_im, G_im);
+        const float32x4_t c = vmulq_f32(X_re, G_im);
+        const float32x4_t f = vmlsq_f32(c, X_im, G_re);
+        const float32x4_t g = vaddq_f32(H_re, e);
+        const float32x4_t h = vaddq_f32(H_im, f);
+
+        vst1q_f32(&H_j->re[k], g);
+        vst1q_f32(&H_j->im[k], h);
+      }
+    }
+
+    X = &render_buffer_data[0];
+    limit = lim2;
+  } while (j < lim2);
+
+  H_j = &H[0];
+  X = &render_buffer_data[render_buffer.Position()];
+  limit = lim1;
+  j = 0;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      H_j->re[kFftLengthBy2] += X->re[kFftLengthBy2] * G.re[kFftLengthBy2] +
+                                X->im[kFftLengthBy2] * G.im[kFftLengthBy2];
+      H_j->im[kFftLengthBy2] += X->re[kFftLengthBy2] * G.im[kFftLengthBy2] -
+                                X->im[kFftLengthBy2] * G.re[kFftLengthBy2];
+    }
+
+    X = &render_buffer_data[0];
+    limit = lim2;
+  } while (j < lim2);
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Adapts the filter partitions. (SSE2 variant)
+void AdaptPartitions_SSE2(const RenderBuffer& render_buffer,
+                          const FftData& G,
+                          rtc::ArrayView<FftData> H) {
+  rtc::ArrayView<const FftData> render_buffer_data =
+      render_buffer.GetFftBuffer();
+  const int lim1 =
+      std::min(render_buffer_data.size() - render_buffer.Position(), H.size());
+  const int lim2 = H.size();
+  constexpr int kNumFourBinBands = kFftLengthBy2 / 4;
+  FftData* H_j;
+  const FftData* X;
+  int limit;
+  int j;
+  for (int k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+    const __m128 G_re = _mm_loadu_ps(&G.re[k]);
+    const __m128 G_im = _mm_loadu_ps(&G.im[k]);
+
+    H_j = &H[0];
+    X = &render_buffer_data[render_buffer.Position()];
+    limit = lim1;
+    j = 0;
+    do {
+      for (; j < limit; ++j, ++H_j, ++X) {
+        const __m128 X_re = _mm_loadu_ps(&X->re[k]);
+        const __m128 X_im = _mm_loadu_ps(&X->im[k]);
+        const __m128 H_re = _mm_loadu_ps(&H_j->re[k]);
+        const __m128 H_im = _mm_loadu_ps(&H_j->im[k]);
+        const __m128 a = _mm_mul_ps(X_re, G_re);
+        const __m128 b = _mm_mul_ps(X_im, G_im);
+        const __m128 c = _mm_mul_ps(X_re, G_im);
+        const __m128 d = _mm_mul_ps(X_im, G_re);
+        const __m128 e = _mm_add_ps(a, b);
+        const __m128 f = _mm_sub_ps(c, d);
+        const __m128 g = _mm_add_ps(H_re, e);
+        const __m128 h = _mm_add_ps(H_im, f);
+        _mm_storeu_ps(&H_j->re[k], g);
+        _mm_storeu_ps(&H_j->im[k], h);
+      }
+
+      X = &render_buffer_data[0];
+      limit = lim2;
+    } while (j < lim2);
+  }
+
+  H_j = &H[0];
+  X = &render_buffer_data[render_buffer.Position()];
+  limit = lim1;
+  j = 0;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      H_j->re[kFftLengthBy2] += X->re[kFftLengthBy2] * G.re[kFftLengthBy2] +
+                                X->im[kFftLengthBy2] * G.im[kFftLengthBy2];
+      H_j->im[kFftLengthBy2] += X->re[kFftLengthBy2] * G.im[kFftLengthBy2] -
+                                X->im[kFftLengthBy2] * G.re[kFftLengthBy2];
+    }
+
+    X = &render_buffer_data[0];
+    limit = lim2;
+  } while (j < lim2);
+}
+#endif
+
+// Produces the filter output.
+void ApplyFilter(const RenderBuffer& render_buffer,
+                 rtc::ArrayView<const FftData> H,
+                 FftData* S) {
+  S->re.fill(0.f);
+  S->im.fill(0.f);
+
+  rtc::ArrayView<const FftData> render_buffer_data =
+      render_buffer.GetFftBuffer();
+  size_t index = render_buffer.Position();
+  for (auto& H_j : H) {
+    const FftData& X = render_buffer_data[index];
+    for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+      S->re[k] += X.re[k] * H_j.re[k] - X.im[k] * H_j.im[k];
+      S->im[k] += X.re[k] * H_j.im[k] + X.im[k] * H_j.re[k];
+    }
+    index = index < (render_buffer_data.size() - 1) ? index + 1 : 0;
+  }
+}
+
+#if defined(WEBRTC_HAS_NEON)
+// Produces the filter output (NEON variant).
+void ApplyFilter_NEON(const RenderBuffer& render_buffer,
+                      rtc::ArrayView<const FftData> H,
+                      FftData* S) {
+  RTC_DCHECK_GE(H.size(), H.size() - 1);
+  S->re.fill(0.f);
+  S->im.fill(0.f);
+
+  rtc::ArrayView<const FftData> render_buffer_data =
+      render_buffer.GetFftBuffer();
+  const int lim1 =
+      std::min(render_buffer_data.size() - render_buffer.Position(), H.size());
+  const int lim2 = H.size();
+  constexpr int kNumFourBinBands = kFftLengthBy2 / 4;
+  const FftData* H_j = &H[0];
+  const FftData* X = &render_buffer_data[render_buffer.Position()];
+
+  int j = 0;
+  int limit = lim1;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      for (int k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+        const float32x4_t X_re = vld1q_f32(&X->re[k]);
+        const float32x4_t X_im = vld1q_f32(&X->im[k]);
+        const float32x4_t H_re = vld1q_f32(&H_j->re[k]);
+        const float32x4_t H_im = vld1q_f32(&H_j->im[k]);
+        const float32x4_t S_re = vld1q_f32(&S->re[k]);
+        const float32x4_t S_im = vld1q_f32(&S->im[k]);
+        const float32x4_t a = vmulq_f32(X_re, H_re);
+        const float32x4_t e = vmlsq_f32(a, X_im, H_im);
+        const float32x4_t c = vmulq_f32(X_re, H_im);
+        const float32x4_t f = vmlaq_f32(c, X_im, H_re);
+        const float32x4_t g = vaddq_f32(S_re, e);
+        const float32x4_t h = vaddq_f32(S_im, f);
+        vst1q_f32(&S->re[k], g);
+        vst1q_f32(&S->im[k], h);
+      }
+    }
+    limit = lim2;
+    X = &render_buffer_data[0];
+  } while (j < lim2);
+
+  H_j = &H[0];
+  X = &render_buffer_data[render_buffer.Position()];
+  j = 0;
+  limit = lim1;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      S->re[kFftLengthBy2] += X->re[kFftLengthBy2] * H_j->re[kFftLengthBy2] -
+                              X->im[kFftLengthBy2] * H_j->im[kFftLengthBy2];
+      S->im[kFftLengthBy2] += X->re[kFftLengthBy2] * H_j->im[kFftLengthBy2] +
+                              X->im[kFftLengthBy2] * H_j->re[kFftLengthBy2];
+    }
+    limit = lim2;
+    X = &render_buffer_data[0];
+  } while (j < lim2);
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Produces the filter output (SSE2 variant).
+void ApplyFilter_SSE2(const RenderBuffer& render_buffer,
+                      rtc::ArrayView<const FftData> H,
+                      FftData* S) {
+  RTC_DCHECK_GE(H.size(), H.size() - 1);
+  S->re.fill(0.f);
+  S->im.fill(0.f);
+
+  rtc::ArrayView<const FftData> render_buffer_data =
+      render_buffer.GetFftBuffer();
+  const int lim1 =
+      std::min(render_buffer_data.size() - render_buffer.Position(), H.size());
+  const int lim2 = H.size();
+  constexpr int kNumFourBinBands = kFftLengthBy2 / 4;
+  const FftData* H_j = &H[0];
+  const FftData* X = &render_buffer_data[render_buffer.Position()];
+
+  int j = 0;
+  int limit = lim1;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      for (int k = 0, n = 0; n < kNumFourBinBands; ++n, k += 4) {
+        const __m128 X_re = _mm_loadu_ps(&X->re[k]);
+        const __m128 X_im = _mm_loadu_ps(&X->im[k]);
+        const __m128 H_re = _mm_loadu_ps(&H_j->re[k]);
+        const __m128 H_im = _mm_loadu_ps(&H_j->im[k]);
+        const __m128 S_re = _mm_loadu_ps(&S->re[k]);
+        const __m128 S_im = _mm_loadu_ps(&S->im[k]);
+        const __m128 a = _mm_mul_ps(X_re, H_re);
+        const __m128 b = _mm_mul_ps(X_im, H_im);
+        const __m128 c = _mm_mul_ps(X_re, H_im);
+        const __m128 d = _mm_mul_ps(X_im, H_re);
+        const __m128 e = _mm_sub_ps(a, b);
+        const __m128 f = _mm_add_ps(c, d);
+        const __m128 g = _mm_add_ps(S_re, e);
+        const __m128 h = _mm_add_ps(S_im, f);
+        _mm_storeu_ps(&S->re[k], g);
+        _mm_storeu_ps(&S->im[k], h);
+      }
+    }
+    limit = lim2;
+    X = &render_buffer_data[0];
+  } while (j < lim2);
+
+  H_j = &H[0];
+  X = &render_buffer_data[render_buffer.Position()];
+  j = 0;
+  limit = lim1;
+  do {
+    for (; j < limit; ++j, ++H_j, ++X) {
+      S->re[kFftLengthBy2] += X->re[kFftLengthBy2] * H_j->re[kFftLengthBy2] -
+                              X->im[kFftLengthBy2] * H_j->im[kFftLengthBy2];
+      S->im[kFftLengthBy2] += X->re[kFftLengthBy2] * H_j->im[kFftLengthBy2] +
+                              X->im[kFftLengthBy2] * H_j->re[kFftLengthBy2];
+    }
+    limit = lim2;
+    X = &render_buffer_data[0];
+  } while (j < lim2);
+}
+#endif
+
+}  // namespace aec3
+
+AdaptiveFirFilter::AdaptiveFirFilter(size_t max_size_partitions,
+                                     size_t initial_size_partitions,
+                                     size_t size_change_duration_blocks,
+                                     Aec3Optimization optimization,
+                                     ApmDataDumper* data_dumper)
+    : data_dumper_(data_dumper),
+      fft_(),
+      optimization_(optimization),
+      max_size_partitions_(max_size_partitions),
+      size_change_duration_blocks_(
+          static_cast<int>(size_change_duration_blocks)),
+      current_size_partitions_(initial_size_partitions),
+      target_size_partitions_(initial_size_partitions),
+      old_target_size_partitions_(initial_size_partitions),
+      H_(max_size_partitions_),
+      H2_(max_size_partitions_, std::array<float, kFftLengthBy2Plus1>()),
+      h_(GetTimeDomainLength(max_size_partitions_), 0.f) {
+  RTC_DCHECK(data_dumper_);
+  RTC_DCHECK_GE(max_size_partitions, initial_size_partitions);
+
+  RTC_DCHECK_LT(0, size_change_duration_blocks_);
+  one_by_size_change_duration_blocks_ = 1.f / size_change_duration_blocks_;
+
+  for (auto& H_j : H_) {
+    H_j.Clear();
+  }
+  for (auto& H2_k : H2_) {
+    H2_k.fill(0.f);
+  }
+  erl_.fill(0.f);
+  SetSizePartitions(current_size_partitions_, true);
+}
+
+AdaptiveFirFilter::~AdaptiveFirFilter() = default;
+
+void AdaptiveFirFilter::HandleEchoPathChange() {
+  size_t current_h_size = h_.size();
+  h_.resize(GetTimeDomainLength(max_size_partitions_));
+  std::fill(h_.begin(), h_.end(), 0.f);
+  h_.resize(current_h_size);
+
+  size_t current_size_partitions = H_.size();
+  H_.resize(max_size_partitions_);
+  for (auto& H_j : H_) {
+    H_j.Clear();
+  }
+  H_.resize(current_size_partitions);
+
+  H2_.resize(max_size_partitions_);
+  for (auto& H2_k : H2_) {
+    H2_k.fill(0.f);
+  }
+  H2_.resize(current_size_partitions);
+
+  erl_.fill(0.f);
+}
+
+void AdaptiveFirFilter::SetSizePartitions(size_t size, bool immediate_effect) {
+  RTC_DCHECK_EQ(max_size_partitions_, H_.capacity());
+  RTC_DCHECK_EQ(max_size_partitions_, H2_.capacity());
+  RTC_DCHECK_EQ(GetTimeDomainLength(max_size_partitions_), h_.capacity());
+  RTC_DCHECK_EQ(H_.size(), H2_.size());
+  RTC_DCHECK_EQ(h_.size(), GetTimeDomainLength(H_.size()));
+  RTC_DCHECK_LE(size, max_size_partitions_);
+
+  target_size_partitions_ = std::min(max_size_partitions_, size);
+  if (immediate_effect) {
+    current_size_partitions_ = old_target_size_partitions_ =
+        target_size_partitions_;
+    ResetFilterBuffersToCurrentSize();
+    size_change_counter_ = 0;
+  } else {
+    size_change_counter_ = size_change_duration_blocks_;
+  }
+}
+
+void AdaptiveFirFilter::ResetFilterBuffersToCurrentSize() {
+  if (current_size_partitions_ < H_.size()) {
+    for (size_t k = current_size_partitions_; k < H_.size(); ++k) {
+      H_[k].Clear();
+      H2_[k].fill(0.f);
+    }
+    std::fill(h_.begin() + GetTimeDomainLength(current_size_partitions_),
+              h_.end(), 0.f);
+  }
+
+  H_.resize(current_size_partitions_);
+  H2_.resize(current_size_partitions_);
+  h_.resize(GetTimeDomainLength(current_size_partitions_));
+}
+
+void AdaptiveFirFilter::UpdateSize() {
+  RTC_DCHECK_GE(size_change_duration_blocks_, size_change_counter_);
+  if (size_change_counter_ > 0) {
+    --size_change_counter_;
+
+    auto average = [](float from, float to, float from_weight) {
+      return from * from_weight + to * (1.f - from_weight);
+    };
+
+    float change_factor =
+        size_change_counter_ * one_by_size_change_duration_blocks_;
+
+    current_size_partitions_ = average(old_target_size_partitions_,
+                                       target_size_partitions_, change_factor);
+
+    ResetFilterBuffersToCurrentSize();
+  } else {
+    current_size_partitions_ = old_target_size_partitions_ =
+        target_size_partitions_;
+  }
+  RTC_DCHECK_LE(0, size_change_counter_);
+}
+
+void AdaptiveFirFilter::Filter(const RenderBuffer& render_buffer,
+                               FftData* S) const {
+  RTC_DCHECK(S);
+  switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+    case Aec3Optimization::kSse2:
+      aec3::ApplyFilter_SSE2(render_buffer, H_, S);
+      break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+    case Aec3Optimization::kNeon:
+      aec3::ApplyFilter_NEON(render_buffer, H_, S);
+      break;
+#endif
+    default:
+      aec3::ApplyFilter(render_buffer, H_, S);
+  }
+}
+
+void AdaptiveFirFilter::Adapt(const RenderBuffer& render_buffer,
+                              const FftData& G) {
+  // Update the filter size if needed.
+  UpdateSize();
+
+  // Adapt the filter.
+  switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+    case Aec3Optimization::kSse2:
+      aec3::AdaptPartitions_SSE2(render_buffer, G, H_);
+      break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+    case Aec3Optimization::kNeon:
+      aec3::AdaptPartitions_NEON(render_buffer, G, H_);
+      break;
+#endif
+    default:
+      aec3::AdaptPartitions(render_buffer, G, H_);
+  }
+
+  // Constrain the filter partitions in a cyclic manner.
+  Constrain();
+
+  // Update the frequency response and echo return loss for the filter.
+  switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+    case Aec3Optimization::kSse2:
+      aec3::UpdateFrequencyResponse_SSE2(H_, &H2_);
+      aec3::UpdateErlEstimator_SSE2(H2_, &erl_);
+      break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+    case Aec3Optimization::kNeon:
+      aec3::UpdateFrequencyResponse_NEON(H_, &H2_);
+      aec3::UpdateErlEstimator_NEON(H2_, &erl_);
+      break;
+#endif
+    default:
+      aec3::UpdateFrequencyResponse(H_, &H2_);
+      aec3::UpdateErlEstimator(H2_, &erl_);
+  }
+}
+
+// Constrains the a partiton of the frequency domain filter to be limited in
+// time via setting the relevant time-domain coefficients to zero.
+void AdaptiveFirFilter::Constrain() {
+  std::array<float, kFftLength> h;
+  fft_.Ifft(H_[partition_to_constrain_], &h);
+
+  static constexpr float kScale = 1.0f / kFftLengthBy2;
+  std::for_each(h.begin(), h.begin() + kFftLengthBy2,
+                [](float& a) { a *= kScale; });
+  std::fill(h.begin() + kFftLengthBy2, h.end(), 0.f);
+
+  std::copy(h.begin(), h.begin() + kFftLengthBy2,
+            h_.begin() + partition_to_constrain_ * kFftLengthBy2);
+
+  fft_.Fft(&h, &H_[partition_to_constrain_]);
+
+  partition_to_constrain_ = partition_to_constrain_ < (H_.size() - 1)
+                                ? partition_to_constrain_ + 1
+                                : 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/adaptive_fir_filter.h b/modules/audio_processing/aec3/adaptive_fir_filter.h
new file mode 100644
index 0000000..1e128b5
--- /dev/null
+++ b/modules/audio_processing/aec3/adaptive_fir_filter.h
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace aec3 {
+// Computes and stores the frequency response of the filter.
+void UpdateFrequencyResponse(
+    rtc::ArrayView<const FftData> H,
+    std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+#if defined(WEBRTC_HAS_NEON)
+void UpdateFrequencyResponse_NEON(
+    rtc::ArrayView<const FftData> H,
+    std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void UpdateFrequencyResponse_SSE2(
+    rtc::ArrayView<const FftData> H,
+    std::vector<std::array<float, kFftLengthBy2Plus1>>* H2);
+#endif
+
+// Computes and stores the echo return loss estimate of the filter, which is the
+// sum of the partition frequency responses.
+void UpdateErlEstimator(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+    std::array<float, kFftLengthBy2Plus1>* erl);
+#if defined(WEBRTC_HAS_NEON)
+void UpdateErlEstimator_NEON(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+    std::array<float, kFftLengthBy2Plus1>* erl);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void UpdateErlEstimator_SSE2(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>& H2,
+    std::array<float, kFftLengthBy2Plus1>* erl);
+#endif
+
+// Adapts the filter partitions.
+void AdaptPartitions(const RenderBuffer& render_buffer,
+                     const FftData& G,
+                     rtc::ArrayView<FftData> H);
+#if defined(WEBRTC_HAS_NEON)
+void AdaptPartitions_NEON(const RenderBuffer& render_buffer,
+                          const FftData& G,
+                          rtc::ArrayView<FftData> H);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void AdaptPartitions_SSE2(const RenderBuffer& render_buffer,
+                          const FftData& G,
+                          rtc::ArrayView<FftData> H);
+#endif
+
+// Produces the filter output.
+void ApplyFilter(const RenderBuffer& render_buffer,
+                 rtc::ArrayView<const FftData> H,
+                 FftData* S);
+#if defined(WEBRTC_HAS_NEON)
+void ApplyFilter_NEON(const RenderBuffer& render_buffer,
+                      rtc::ArrayView<const FftData> H,
+                      FftData* S);
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void ApplyFilter_SSE2(const RenderBuffer& render_buffer,
+                      rtc::ArrayView<const FftData> H,
+                      FftData* S);
+#endif
+
+}  // namespace aec3
+
+// Provides a frequency domain adaptive filter functionality.
+class AdaptiveFirFilter {
+ public:
+  AdaptiveFirFilter(size_t max_size_partitions,
+                    size_t initial_size_partitions,
+                    size_t size_change_duration_blocks,
+                    Aec3Optimization optimization,
+                    ApmDataDumper* data_dumper);
+
+  ~AdaptiveFirFilter();
+
+  // Produces the output of the filter.
+  void Filter(const RenderBuffer& render_buffer, FftData* S) const;
+
+  // Adapts the filter.
+  void Adapt(const RenderBuffer& render_buffer, const FftData& G);
+
+  // Receives reports that known echo path changes have occured and adjusts
+  // the filter adaptation accordingly.
+  void HandleEchoPathChange();
+
+  // Returns the filter size.
+  size_t SizePartitions() const { return H_.size(); }
+
+  // Sets the filter size.
+  void SetSizePartitions(size_t size, bool immediate_effect);
+
+  // Returns the filter based echo return loss.
+  const std::array<float, kFftLengthBy2Plus1>& Erl() const { return erl_; }
+
+  // Returns the frequency responses for the filter partitions.
+  const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+  FilterFrequencyResponse() const {
+    return H2_;
+  }
+
+  // Returns the estimate of the impulse response.
+  const std::vector<float>& FilterImpulseResponse() const { return h_; }
+
+  void DumpFilter(const char* name_frequency_domain,
+                  const char* name_time_domain) {
+    size_t current_size = H_.size();
+    H_.resize(max_size_partitions_);
+    for (auto& H : H_) {
+      data_dumper_->DumpRaw(name_frequency_domain, H.re);
+      data_dumper_->DumpRaw(name_frequency_domain, H.im);
+    }
+    H_.resize(current_size);
+
+    current_size = h_.size();
+    h_.resize(GetTimeDomainLength(max_size_partitions_));
+    data_dumper_->DumpRaw(name_time_domain, h_);
+    h_.resize(current_size);
+  }
+
+ private:
+  // Constrain the filter partitions in a cyclic manner.
+  void Constrain();
+
+  // Resets the filter buffers to use the current size.
+  void ResetFilterBuffersToCurrentSize();
+
+  // Gradually Updates the current filter size towards the target size.
+  void UpdateSize();
+
+  ApmDataDumper* const data_dumper_;
+  const Aec3Fft fft_;
+  const Aec3Optimization optimization_;
+  const size_t max_size_partitions_;
+  const int size_change_duration_blocks_;
+  float one_by_size_change_duration_blocks_;
+  size_t current_size_partitions_;
+  size_t target_size_partitions_;
+  size_t old_target_size_partitions_;
+  int size_change_counter_ = 0;
+  std::vector<FftData> H_;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2_;
+  std::vector<float> h_;
+  std::array<float, kFftLengthBy2Plus1> erl_;
+  size_t partition_to_constrain_ = 0;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AdaptiveFirFilter);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ADAPTIVE_FIR_FILTER_H_
diff --git a/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc b/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc
new file mode 100644
index 0000000..9fb11cd
--- /dev/null
+++ b/modules/audio_processing/aec3/adaptive_fir_filter_unittest.cc
@@ -0,0 +1,407 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+
+#include <math.h>
+#include <algorithm>
+#include <numeric>
+#include <string>
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/cascaded_biquad_filter.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/shadow_filter_update_gain.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+namespace {
+
+std::string ProduceDebugText(size_t delay) {
+  std::ostringstream ss;
+  ss << ", Delay: " << delay;
+  return ss.str();
+}
+
+}  // namespace
+
+#if defined(WEBRTC_HAS_NEON)
+// Verifies that the optimized methods for filter adaptation are similar to
+// their reference counterparts.
+TEST(AdaptiveFirFilter, FilterAdaptationNeonOptimizations) {
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+  Random random_generator(42U);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  FftData S_C;
+  FftData S_NEON;
+  FftData G;
+  Aec3Fft fft;
+  std::vector<FftData> H_C(10);
+  std::vector<FftData> H_NEON(10);
+  for (auto& H_j : H_C) {
+    H_j.Clear();
+  }
+  for (auto& H_j : H_NEON) {
+    H_j.Clear();
+  }
+
+  for (size_t k = 0; k < 30; ++k) {
+    RandomizeSampleVector(&random_generator, x[0]);
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+  }
+  const auto& render_buffer = render_delay_buffer->GetRenderBuffer();
+
+  for (size_t j = 0; j < G.re.size(); ++j) {
+    G.re[j] = j / 10001.f;
+  }
+  for (size_t j = 1; j < G.im.size() - 1; ++j) {
+    G.im[j] = j / 20001.f;
+  }
+  G.im[0] = 0.f;
+  G.im[G.im.size() - 1] = 0.f;
+
+  AdaptPartitions_NEON(*render_buffer, G, H_NEON);
+  AdaptPartitions(*render_buffer, G, H_C);
+  AdaptPartitions_NEON(*render_buffer, G, H_NEON);
+  AdaptPartitions(*render_buffer, G, H_C);
+
+  for (size_t l = 0; l < H_C.size(); ++l) {
+    for (size_t j = 0; j < H_C[l].im.size(); ++j) {
+      EXPECT_NEAR(H_C[l].re[j], H_NEON[l].re[j], fabs(H_C[l].re[j] * 0.00001f));
+      EXPECT_NEAR(H_C[l].im[j], H_NEON[l].im[j], fabs(H_C[l].im[j] * 0.00001f));
+    }
+  }
+
+  ApplyFilter_NEON(*render_buffer, H_NEON, &S_NEON);
+  ApplyFilter(*render_buffer, H_C, &S_C);
+  for (size_t j = 0; j < S_C.re.size(); ++j) {
+    EXPECT_NEAR(S_C.re[j], S_NEON.re[j], fabs(S_C.re[j] * 0.00001f));
+    EXPECT_NEAR(S_C.im[j], S_NEON.im[j], fabs(S_C.re[j] * 0.00001f));
+  }
+}
+
+// Verifies that the optimized method for frequency response computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateFrequencyResponseNeonOptimization) {
+  const size_t kNumPartitions = 12;
+  std::vector<FftData> H(kNumPartitions);
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2_NEON(kNumPartitions);
+
+  for (size_t j = 0; j < H.size(); ++j) {
+    for (size_t k = 0; k < H[j].re.size(); ++k) {
+      H[j].re[k] = k + j / 3.f;
+      H[j].im[k] = j + k / 7.f;
+    }
+  }
+
+  UpdateFrequencyResponse(H, &H2);
+  UpdateFrequencyResponse_NEON(H, &H2_NEON);
+
+  for (size_t j = 0; j < H2.size(); ++j) {
+    for (size_t k = 0; k < H[j].re.size(); ++k) {
+      EXPECT_FLOAT_EQ(H2[j][k], H2_NEON[j][k]);
+    }
+  }
+}
+
+// Verifies that the optimized method for echo return loss computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateErlNeonOptimization) {
+  const size_t kNumPartitions = 12;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+  std::array<float, kFftLengthBy2Plus1> erl;
+  std::array<float, kFftLengthBy2Plus1> erl_NEON;
+
+  for (size_t j = 0; j < H2.size(); ++j) {
+    for (size_t k = 0; k < H2[j].size(); ++k) {
+      H2[j][k] = k + j / 3.f;
+    }
+  }
+
+  UpdateErlEstimator(H2, &erl);
+  UpdateErlEstimator_NEON(H2, &erl_NEON);
+
+  for (size_t j = 0; j < erl.size(); ++j) {
+    EXPECT_FLOAT_EQ(erl[j], erl_NEON[j]);
+  }
+}
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods for filter adaptation are bitexact to
+// their reference counterparts.
+TEST(AdaptiveFirFilter, FilterAdaptationSse2Optimizations) {
+  bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0);
+  if (use_sse2) {
+    std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+        RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+    Random random_generator(42U);
+    std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+    FftData S_C;
+    FftData S_SSE2;
+    FftData G;
+    Aec3Fft fft;
+    std::vector<FftData> H_C(10);
+    std::vector<FftData> H_SSE2(10);
+    for (auto& H_j : H_C) {
+      H_j.Clear();
+    }
+    for (auto& H_j : H_SSE2) {
+      H_j.Clear();
+    }
+
+    for (size_t k = 0; k < 500; ++k) {
+      RandomizeSampleVector(&random_generator, x[0]);
+      render_delay_buffer->Insert(x);
+      if (k == 0) {
+        render_delay_buffer->Reset();
+      }
+      render_delay_buffer->PrepareCaptureProcessing();
+      const auto& render_buffer = render_delay_buffer->GetRenderBuffer();
+
+      ApplyFilter_SSE2(*render_buffer, H_SSE2, &S_SSE2);
+      ApplyFilter(*render_buffer, H_C, &S_C);
+      for (size_t j = 0; j < S_C.re.size(); ++j) {
+        EXPECT_FLOAT_EQ(S_C.re[j], S_SSE2.re[j]);
+        EXPECT_FLOAT_EQ(S_C.im[j], S_SSE2.im[j]);
+      }
+
+      std::for_each(G.re.begin(), G.re.end(),
+                    [&](float& a) { a = random_generator.Rand<float>(); });
+      std::for_each(G.im.begin(), G.im.end(),
+                    [&](float& a) { a = random_generator.Rand<float>(); });
+
+      AdaptPartitions_SSE2(*render_buffer, G, H_SSE2);
+      AdaptPartitions(*render_buffer, G, H_C);
+
+      for (size_t k = 0; k < H_C.size(); ++k) {
+        for (size_t j = 0; j < H_C[k].re.size(); ++j) {
+          EXPECT_FLOAT_EQ(H_C[k].re[j], H_SSE2[k].re[j]);
+          EXPECT_FLOAT_EQ(H_C[k].im[j], H_SSE2[k].im[j]);
+        }
+      }
+    }
+  }
+}
+
+// Verifies that the optimized method for frequency response computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateFrequencyResponseSse2Optimization) {
+  bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0);
+  if (use_sse2) {
+    const size_t kNumPartitions = 12;
+    std::vector<FftData> H(kNumPartitions);
+    std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+    std::vector<std::array<float, kFftLengthBy2Plus1>> H2_SSE2(kNumPartitions);
+
+    for (size_t j = 0; j < H.size(); ++j) {
+      for (size_t k = 0; k < H[j].re.size(); ++k) {
+        H[j].re[k] = k + j / 3.f;
+        H[j].im[k] = j + k / 7.f;
+      }
+    }
+
+    UpdateFrequencyResponse(H, &H2);
+    UpdateFrequencyResponse_SSE2(H, &H2_SSE2);
+
+    for (size_t j = 0; j < H2.size(); ++j) {
+      for (size_t k = 0; k < H[j].re.size(); ++k) {
+        EXPECT_FLOAT_EQ(H2[j][k], H2_SSE2[j][k]);
+      }
+    }
+  }
+}
+
+// Verifies that the optimized method for echo return loss computation is
+// bitexact to the reference counterpart.
+TEST(AdaptiveFirFilter, UpdateErlSse2Optimization) {
+  bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0);
+  if (use_sse2) {
+    const size_t kNumPartitions = 12;
+    std::vector<std::array<float, kFftLengthBy2Plus1>> H2(kNumPartitions);
+    std::array<float, kFftLengthBy2Plus1> erl;
+    std::array<float, kFftLengthBy2Plus1> erl_SSE2;
+
+    for (size_t j = 0; j < H2.size(); ++j) {
+      for (size_t k = 0; k < H2[j].size(); ++k) {
+        H2[j][k] = k + j / 3.f;
+      }
+    }
+
+    UpdateErlEstimator(H2, &erl);
+    UpdateErlEstimator_SSE2(H2, &erl_SSE2);
+
+    for (size_t j = 0; j < erl.size(); ++j) {
+      EXPECT_FLOAT_EQ(erl[j], erl_SSE2[j]);
+    }
+  }
+}
+
+#endif
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the check for non-null data dumper works.
+TEST(AdaptiveFirFilter, NullDataDumper) {
+  EXPECT_DEATH(AdaptiveFirFilter(9, 9, 250, DetectOptimization(), nullptr), "");
+}
+
+// Verifies that the check for non-null filter output works.
+TEST(AdaptiveFirFilter, NullFilterOutput) {
+  ApmDataDumper data_dumper(42);
+  AdaptiveFirFilter filter(9, 9, 250, DetectOptimization(), &data_dumper);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+  EXPECT_DEATH(filter.Filter(*render_delay_buffer->GetRenderBuffer(), nullptr),
+               "");
+}
+
+#endif
+
+// Verifies that the filter statistics can be accessed when filter statistics
+// are turned on.
+TEST(AdaptiveFirFilter, FilterStatisticsAccess) {
+  ApmDataDumper data_dumper(42);
+  AdaptiveFirFilter filter(9, 9, 250, DetectOptimization(), &data_dumper);
+  filter.Erl();
+  filter.FilterFrequencyResponse();
+}
+
+// Verifies that the filter size if correctly repported.
+TEST(AdaptiveFirFilter, FilterSize) {
+  ApmDataDumper data_dumper(42);
+  for (size_t filter_size = 1; filter_size < 5; ++filter_size) {
+    AdaptiveFirFilter filter(filter_size, filter_size, 250,
+                             DetectOptimization(), &data_dumper);
+    EXPECT_EQ(filter_size, filter.SizePartitions());
+  }
+}
+
+// Verifies that the filter is being able to properly filter a signal and to
+// adapt its coefficients.
+TEST(AdaptiveFirFilter, FilterAndAdapt) {
+  constexpr size_t kNumBlocksToProcess = 1000;
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  AdaptiveFirFilter filter(config.filter.main.length_blocks,
+                           config.filter.main.length_blocks,
+                           config.filter.config_change_duration_blocks,
+                           DetectOptimization(), &data_dumper);
+  Aec3Fft fft;
+  config.delay.min_echo_path_delay_blocks = 0;
+  config.delay.default_delay = 1;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  ShadowFilterUpdateGain gain(config.filter.shadow,
+                              config.filter.config_change_duration_blocks);
+  Random random_generator(42U);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<float> n(kBlockSize, 0.f);
+  std::vector<float> y(kBlockSize, 0.f);
+  AecState aec_state(EchoCanceller3Config{});
+  RenderSignalAnalyzer render_signal_analyzer(config);
+  rtc::Optional<DelayEstimate> delay_estimate;
+  std::vector<float> e(kBlockSize, 0.f);
+  std::array<float, kFftLength> s_scratch;
+  std::array<float, kBlockSize> s;
+  FftData S;
+  FftData G;
+  FftData E;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kFftLengthBy2Plus1> E2_main;
+  std::array<float, kFftLengthBy2Plus1> E2_shadow;
+  // [B,A] = butter(2,100/8000,'high')
+  constexpr CascadedBiQuadFilter::BiQuadCoefficients
+      kHighPassFilterCoefficients = {{0.97261f, -1.94523f, 0.97261f},
+                                     {-1.94448f, 0.94598f}};
+  Y2.fill(0.f);
+  E2_main.fill(0.f);
+  E2_shadow.fill(0.f);
+
+  constexpr float kScale = 1.0f / kFftLengthBy2;
+
+  for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+    DelayBuffer<float> delay_buffer(delay_samples);
+    CascadedBiQuadFilter x_hp_filter(kHighPassFilterCoefficients, 1);
+    CascadedBiQuadFilter y_hp_filter(kHighPassFilterCoefficients, 1);
+
+    SCOPED_TRACE(ProduceDebugText(delay_samples));
+    for (size_t k = 0; k < kNumBlocksToProcess; ++k) {
+      RandomizeSampleVector(&random_generator, x[0]);
+      delay_buffer.Delay(x[0], y);
+
+      RandomizeSampleVector(&random_generator, n);
+      static constexpr float kNoiseScaling = 1.f / 100.f;
+      std::transform(
+          y.begin(), y.end(), n.begin(), y.begin(),
+          [](float a, float b) { return a + b * kNoiseScaling; });
+
+      x_hp_filter.Process(x[0]);
+      y_hp_filter.Process(y);
+
+      render_delay_buffer->Insert(x);
+      if (k == 0) {
+        render_delay_buffer->Reset();
+      }
+      render_delay_buffer->PrepareCaptureProcessing();
+      const auto& render_buffer = render_delay_buffer->GetRenderBuffer();
+
+      render_signal_analyzer.Update(*render_buffer, aec_state.FilterDelay());
+
+      filter.Filter(*render_buffer, &S);
+      fft.Ifft(S, &s_scratch);
+      std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2,
+                     e.begin(),
+                     [&](float a, float b) { return a - b * kScale; });
+      std::for_each(e.begin(), e.end(),
+                    [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+      fft.ZeroPaddedFft(e, Aec3Fft::Window::kRectangular, &E);
+      for (size_t k = 0; k < kBlockSize; ++k) {
+        s[k] = kScale * s_scratch[k + kFftLengthBy2];
+      }
+
+      std::array<float, kFftLengthBy2Plus1> render_power;
+      render_buffer->SpectralSum(filter.SizePartitions(), &render_power);
+      gain.Compute(render_power, render_signal_analyzer, E,
+                   filter.SizePartitions(), false, &G);
+      filter.Adapt(*render_buffer, G);
+      aec_state.HandleEchoPathChange(EchoPathVariability(
+          false, EchoPathVariability::DelayAdjustment::kNone, false));
+      aec_state.Update(delay_estimate, filter.FilterFrequencyResponse(),
+                       filter.FilterImpulseResponse(), true, *render_buffer,
+                       E2_main, Y2, s, false);
+    }
+    // Verify that the filter is able to perform well.
+    EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+              std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+    EXPECT_EQ(delay_samples / kBlockSize,
+              static_cast<size_t>(aec_state.FilterDelay()));
+  }
+}
+}  // namespace aec3
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/aec3_common.cc b/modules/audio_processing/aec3/aec3_common.cc
new file mode 100644
index 0000000..7becce4
--- /dev/null
+++ b/modules/audio_processing/aec3/aec3_common.cc
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+Aec3Optimization DetectOptimization() {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+  if (WebRtc_GetCPUInfo(kSSE2) != 0) {
+    return Aec3Optimization::kSse2;
+  }
+#endif
+
+#if defined(WEBRTC_HAS_NEON)
+  return Aec3Optimization::kNeon;
+#endif
+
+  return Aec3Optimization::kNone;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/aec3_common.h b/modules/audio_processing/aec3/aec3_common.h
new file mode 100644
index 0000000..47f0784
--- /dev/null
+++ b/modules/audio_processing/aec3/aec3_common.h
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_
+
+#include <stddef.h>
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+#ifdef _MSC_VER /* visual c++ */
+#define ALIGN16_BEG __declspec(align(16))
+#define ALIGN16_END
+#else /* gcc or icc */
+#define ALIGN16_BEG
+#define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+enum class Aec3Optimization { kNone, kSse2, kNeon };
+
+constexpr int kNumBlocksPerSecond = 250;
+
+constexpr int kMetricsReportingIntervalBlocks = 10 * kNumBlocksPerSecond;
+constexpr int kMetricsComputationBlocks = 11;
+constexpr int kMetricsCollectionBlocks =
+    kMetricsReportingIntervalBlocks - kMetricsComputationBlocks;
+
+constexpr size_t kFftLengthBy2 = 64;
+constexpr size_t kFftLengthBy2Plus1 = kFftLengthBy2 + 1;
+constexpr size_t kFftLengthBy2Minus1 = kFftLengthBy2 - 1;
+constexpr size_t kFftLength = 2 * kFftLengthBy2;
+
+constexpr int kMaxAdaptiveFilterLength = 50;
+constexpr int kRenderTransferQueueSizeFrames = 100;
+
+constexpr size_t kMaxNumBands = 3;
+constexpr size_t kSubFrameLength = 80;
+
+constexpr size_t kBlockSize = kFftLengthBy2;
+constexpr size_t kBlockSizeLog2 = 6;
+
+constexpr size_t kExtendedBlockSize = 2 * kFftLengthBy2;
+constexpr size_t kMatchedFilterWindowSizeSubBlocks = 32;
+constexpr size_t kMatchedFilterAlignmentShiftSizeSubBlocks =
+    kMatchedFilterWindowSizeSubBlocks * 3 / 4;
+
+// TODO(peah): Integrate this with how it is done inside audio_processing_impl.
+constexpr size_t NumBandsForRate(int sample_rate_hz) {
+  return static_cast<size_t>(sample_rate_hz == 8000 ? 1
+                                                    : sample_rate_hz / 16000);
+}
+constexpr int LowestBandRate(int sample_rate_hz) {
+  return sample_rate_hz == 8000 ? sample_rate_hz : 16000;
+}
+
+constexpr bool ValidFullBandRate(int sample_rate_hz) {
+  return sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+         sample_rate_hz == 32000 || sample_rate_hz == 48000;
+}
+
+constexpr int GetTimeDomainLength(int filter_length_blocks) {
+  return filter_length_blocks * kFftLengthBy2;
+}
+
+constexpr size_t GetDownSampledBufferSize(size_t down_sampling_factor,
+                                          size_t num_matched_filters) {
+  return kBlockSize / down_sampling_factor *
+         (kMatchedFilterAlignmentShiftSizeSubBlocks * num_matched_filters +
+          kMatchedFilterWindowSizeSubBlocks + 1);
+}
+
+constexpr size_t GetRenderDelayBufferSize(size_t down_sampling_factor,
+                                          size_t num_matched_filters,
+                                          size_t filter_length_blocks) {
+  return GetDownSampledBufferSize(down_sampling_factor, num_matched_filters) /
+             (kBlockSize / down_sampling_factor) +
+         filter_length_blocks + 1;
+}
+
+// Detects what kind of optimizations to use for the code.
+Aec3Optimization DetectOptimization();
+
+static_assert(1 << kBlockSizeLog2 == kBlockSize,
+              "Proper number of shifts for blocksize");
+
+static_assert(1 == NumBandsForRate(8000), "Number of bands for 8 kHz");
+static_assert(1 == NumBandsForRate(16000), "Number of bands for 16 kHz");
+static_assert(2 == NumBandsForRate(32000), "Number of bands for 32 kHz");
+static_assert(3 == NumBandsForRate(48000), "Number of bands for 48 kHz");
+
+static_assert(8000 == LowestBandRate(8000), "Sample rate of band 0 for 8 kHz");
+static_assert(16000 == LowestBandRate(16000),
+              "Sample rate of band 0 for 16 kHz");
+static_assert(16000 == LowestBandRate(32000),
+              "Sample rate of band 0 for 32 kHz");
+static_assert(16000 == LowestBandRate(48000),
+              "Sample rate of band 0 for 48 kHz");
+
+static_assert(ValidFullBandRate(8000),
+              "Test that 8 kHz is a valid sample rate");
+static_assert(ValidFullBandRate(16000),
+              "Test that 16 kHz is a valid sample rate");
+static_assert(ValidFullBandRate(32000),
+              "Test that 32 kHz is a valid sample rate");
+static_assert(ValidFullBandRate(48000),
+              "Test that 48 kHz is a valid sample rate");
+static_assert(!ValidFullBandRate(8001),
+              "Test that 8001 Hz is not a valid sample rate");
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_AEC3_COMMON_H_
diff --git a/modules/audio_processing/aec3/aec3_fft.cc b/modules/audio_processing/aec3/aec3_fft.cc
new file mode 100644
index 0000000..d669036
--- /dev/null
+++ b/modules/audio_processing/aec3/aec3_fft.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+
+const float kHanning64[kFftLengthBy2] = {
+    0.f,         0.00248461f, 0.00991376f, 0.0222136f,  0.03926189f,
+    0.06088921f, 0.08688061f, 0.11697778f, 0.15088159f, 0.1882551f,
+    0.22872687f, 0.27189467f, 0.31732949f, 0.36457977f, 0.41317591f,
+    0.46263495f, 0.51246535f, 0.56217185f, 0.61126047f, 0.65924333f,
+    0.70564355f, 0.75f,       0.79187184f, 0.83084292f, 0.86652594f,
+    0.89856625f, 0.92664544f, 0.95048443f, 0.96984631f, 0.98453864f,
+    0.99441541f, 0.99937846f, 0.99937846f, 0.99441541f, 0.98453864f,
+    0.96984631f, 0.95048443f, 0.92664544f, 0.89856625f, 0.86652594f,
+    0.83084292f, 0.79187184f, 0.75f,       0.70564355f, 0.65924333f,
+    0.61126047f, 0.56217185f, 0.51246535f, 0.46263495f, 0.41317591f,
+    0.36457977f, 0.31732949f, 0.27189467f, 0.22872687f, 0.1882551f,
+    0.15088159f, 0.11697778f, 0.08688061f, 0.06088921f, 0.03926189f,
+    0.0222136f,  0.00991376f, 0.00248461f, 0.f};
+
+}  // namespace
+
+// TODO(peah): Change x to be std::array once the rest of the code allows this.
+void Aec3Fft::ZeroPaddedFft(rtc::ArrayView<const float> x,
+                            Window window,
+                            FftData* X) const {
+  RTC_DCHECK(X);
+  RTC_DCHECK_EQ(kFftLengthBy2, x.size());
+  std::array<float, kFftLength> fft;
+  std::fill(fft.begin(), fft.begin() + kFftLengthBy2, 0.f);
+  switch (window) {
+    case Window::kRectangular:
+      std::copy(x.begin(), x.end(), fft.begin() + kFftLengthBy2);
+      break;
+    case Window::kHanning:
+      std::transform(x.begin(), x.end(), std::begin(kHanning64),
+                     fft.begin() + kFftLengthBy2,
+                     [](float a, float b) { return a * b; });
+      break;
+    default:
+      RTC_NOTREACHED();
+  }
+
+  Fft(&fft, X);
+}
+
+void Aec3Fft::PaddedFft(rtc::ArrayView<const float> x,
+                        rtc::ArrayView<float> x_old,
+                        FftData* X) const {
+  RTC_DCHECK(X);
+  RTC_DCHECK_EQ(kFftLengthBy2, x.size());
+  RTC_DCHECK_EQ(kFftLengthBy2, x_old.size());
+  std::array<float, kFftLength> fft;
+  std::copy(x_old.begin(), x_old.end(), fft.begin());
+  std::copy(x.begin(), x.end(), fft.begin() + x_old.size());
+  std::copy(x.begin(), x.end(), x_old.begin());
+  Fft(&fft, X);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/aec3_fft.h b/modules/audio_processing/aec3/aec3_fft.h
new file mode 100644
index 0000000..f3dddb3
--- /dev/null
+++ b/modules/audio_processing/aec3/aec3_fft.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/utility/ooura_fft.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Wrapper class that provides 128 point real valued FFT functionality with the
+// FftData type.
+class Aec3Fft {
+ public:
+  enum class Window { kRectangular, kHanning };
+
+  Aec3Fft() = default;
+  // Computes the FFT. Note that both the input and output are modified.
+  void Fft(std::array<float, kFftLength>* x, FftData* X) const {
+    RTC_DCHECK(x);
+    RTC_DCHECK(X);
+    ooura_fft_.Fft(x->data());
+    X->CopyFromPackedArray(*x);
+  }
+  // Computes the inverse Fft.
+  void Ifft(const FftData& X, std::array<float, kFftLength>* x) const {
+    RTC_DCHECK(x);
+    X.CopyToPackedArray(x);
+    ooura_fft_.InverseFft(x->data());
+  }
+
+  // Windows the input using a Hanning window, and then adds padding of
+  // kFftLengthBy2 initial zeros before computing the Fft.
+  void ZeroPaddedFft(rtc::ArrayView<const float> x,
+                     Window window,
+                     FftData* X) const;
+
+  // Concatenates the kFftLengthBy2 values long x and x_old before computing the
+  // Fft. After that, x is copied to x_old.
+  void PaddedFft(rtc::ArrayView<const float> x,
+                 rtc::ArrayView<float> x_old,
+                 FftData* X) const;
+
+ private:
+  const OouraFft ooura_fft_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Aec3Fft);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_AEC3_FFT_H_
diff --git a/modules/audio_processing/aec3/aec3_fft_unittest.cc b/modules/audio_processing/aec3/aec3_fft_unittest.cc
new file mode 100644
index 0000000..87fe7a8
--- /dev/null
+++ b/modules/audio_processing/aec3/aec3_fft_unittest.cc
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+
+#include <algorithm>
+
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null input in Fft works.
+TEST(Aec3Fft, NullFftInput) {
+  Aec3Fft fft;
+  FftData X;
+  EXPECT_DEATH(fft.Fft(nullptr, &X), "");
+}
+
+// Verifies that the check for non-null input in Fft works.
+TEST(Aec3Fft, NullFftOutput) {
+  Aec3Fft fft;
+  std::array<float, kFftLength> x;
+  EXPECT_DEATH(fft.Fft(&x, nullptr), "");
+}
+
+// Verifies that the check for non-null output in Ifft works.
+TEST(Aec3Fft, NullIfftOutput) {
+  Aec3Fft fft;
+  FftData X;
+  EXPECT_DEATH(fft.Ifft(X, nullptr), "");
+}
+
+// Verifies that the check for non-null output in ZeroPaddedFft works.
+TEST(Aec3Fft, NullZeroPaddedFftOutput) {
+  Aec3Fft fft;
+  std::array<float, kFftLengthBy2> x;
+  EXPECT_DEATH(fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, nullptr),
+               "");
+}
+
+// Verifies that the check for input length in ZeroPaddedFft works.
+TEST(Aec3Fft, ZeroPaddedFftWrongInputLength) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLengthBy2 - 1> x;
+  EXPECT_DEATH(fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, &X), "");
+}
+
+// Verifies that the check for non-null output in PaddedFft works.
+TEST(Aec3Fft, NullPaddedFftOutput) {
+  Aec3Fft fft;
+  std::array<float, kFftLengthBy2> x;
+  std::array<float, kFftLengthBy2> x_old;
+  EXPECT_DEATH(fft.PaddedFft(x, x_old, nullptr), "");
+}
+
+// Verifies that the check for input length in PaddedFft works.
+TEST(Aec3Fft, PaddedFftWrongInputLength) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLengthBy2 - 1> x;
+  std::array<float, kFftLengthBy2> x_old;
+  EXPECT_DEATH(fft.PaddedFft(x, x_old, &X), "");
+}
+
+// Verifies that the check for length in the old value in PaddedFft works.
+TEST(Aec3Fft, PaddedFftWrongOldValuesLength) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLengthBy2> x;
+  std::array<float, kFftLengthBy2 - 1> x_old;
+  EXPECT_DEATH(fft.PaddedFft(x, x_old, &X), "");
+}
+
+#endif
+
+// Verifies that Fft works as intended.
+TEST(Aec3Fft, Fft) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLength> x;
+  x.fill(0.f);
+  fft.Fft(&x, &X);
+  EXPECT_THAT(X.re, ::testing::Each(0.f));
+  EXPECT_THAT(X.im, ::testing::Each(0.f));
+
+  x.fill(0.f);
+  x[0] = 1.f;
+  fft.Fft(&x, &X);
+  EXPECT_THAT(X.re, ::testing::Each(1.f));
+  EXPECT_THAT(X.im, ::testing::Each(0.f));
+
+  x.fill(1.f);
+  fft.Fft(&x, &X);
+  EXPECT_EQ(128.f, X.re[0]);
+  std::for_each(X.re.begin() + 1, X.re.end(),
+                [](float a) { EXPECT_EQ(0.f, a); });
+  EXPECT_THAT(X.im, ::testing::Each(0.f));
+}
+
+// Verifies that InverseFft works as intended.
+TEST(Aec3Fft, Ifft) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLength> x;
+
+  X.re.fill(0.f);
+  X.im.fill(0.f);
+  fft.Ifft(X, &x);
+  EXPECT_THAT(x, ::testing::Each(0.f));
+
+  X.re.fill(1.f);
+  X.im.fill(0.f);
+  fft.Ifft(X, &x);
+  EXPECT_EQ(64.f, x[0]);
+  std::for_each(x.begin() + 1, x.end(), [](float a) { EXPECT_EQ(0.f, a); });
+
+  X.re.fill(0.f);
+  X.re[0] = 128;
+  X.im.fill(0.f);
+  fft.Ifft(X, &x);
+  EXPECT_THAT(x, ::testing::Each(64.f));
+}
+
+// Verifies that InverseFft and Fft work as intended.
+TEST(Aec3Fft, FftAndIfft) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLength> x;
+  std::array<float, kFftLength> x_ref;
+
+  int v = 0;
+  for (int k = 0; k < 20; ++k) {
+    for (size_t j = 0; j < x.size(); ++j) {
+      x[j] = v++;
+      x_ref[j] = x[j] * 64.f;
+    }
+    fft.Fft(&x, &X);
+    fft.Ifft(X, &x);
+    for (size_t j = 0; j < x.size(); ++j) {
+      EXPECT_NEAR(x_ref[j], x[j], 0.001f);
+    }
+  }
+}
+
+// Verifies that ZeroPaddedFft work as intended.
+TEST(Aec3Fft, ZeroPaddedFft) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLengthBy2> x_in;
+  std::array<float, kFftLength> x_ref;
+  std::array<float, kFftLength> x_out;
+
+  int v = 0;
+  x_ref.fill(0.f);
+  for (int k = 0; k < 20; ++k) {
+    for (size_t j = 0; j < x_in.size(); ++j) {
+      x_in[j] = v++;
+      x_ref[j + kFftLengthBy2] = x_in[j] * 64.f;
+    }
+    fft.ZeroPaddedFft(x_in, Aec3Fft::Window::kRectangular, &X);
+    fft.Ifft(X, &x_out);
+    for (size_t j = 0; j < x_out.size(); ++j) {
+      EXPECT_NEAR(x_ref[j], x_out[j], 0.1f);
+    }
+  }
+}
+
+// Verifies that ZeroPaddedFft work as intended.
+TEST(Aec3Fft, PaddedFft) {
+  Aec3Fft fft;
+  FftData X;
+  std::array<float, kFftLengthBy2> x_in;
+  std::array<float, kFftLength> x_out;
+  std::array<float, kFftLengthBy2> x_old;
+  std::array<float, kFftLengthBy2> x_old_ref;
+  std::array<float, kFftLength> x_ref;
+
+  int v = 0;
+  x_old.fill(0.f);
+  for (int k = 0; k < 20; ++k) {
+    for (size_t j = 0; j < x_in.size(); ++j) {
+      x_in[j] = v++;
+    }
+
+    std::copy(x_old.begin(), x_old.end(), x_ref.begin());
+    std::copy(x_in.begin(), x_in.end(), x_ref.begin() + kFftLengthBy2);
+    std::copy(x_in.begin(), x_in.end(), x_old_ref.begin());
+    std::for_each(x_ref.begin(), x_ref.end(), [](float& a) { a *= 64.f; });
+
+    fft.PaddedFft(x_in, x_old, &X);
+    fft.Ifft(X, &x_out);
+
+    for (size_t j = 0; j < x_out.size(); ++j) {
+      EXPECT_NEAR(x_ref[j], x_out[j], 0.1f);
+    }
+
+    EXPECT_EQ(x_old_ref, x_old);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/aec_state.cc b/modules/audio_processing/aec3/aec_state.cc
new file mode 100644
index 0000000..533290c
--- /dev/null
+++ b/modules/audio_processing/aec3/aec_state.cc
@@ -0,0 +1,437 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec_state.h"
+
+#include <math.h>
+
+#include <numeric>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Computes delay of the adaptive filter.
+int EstimateFilterDelay(
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        adaptive_filter_frequency_response) {
+  const auto& H2 = adaptive_filter_frequency_response;
+  constexpr size_t kUpperBin = kFftLengthBy2 - 5;
+  RTC_DCHECK_GE(kMaxAdaptiveFilterLength, H2.size());
+  std::array<int, kMaxAdaptiveFilterLength> delays;
+  delays.fill(0);
+  for (size_t k = 1; k < kUpperBin; ++k) {
+    // Find the maximum of H2[j].
+    size_t peak = 0;
+    for (size_t j = 0; j < H2.size(); ++j) {
+      if (H2[j][k] > H2[peak][k]) {
+        peak = j;
+      }
+    }
+    ++delays[peak];
+  }
+
+  return std::distance(delays.begin(),
+                       std::max_element(delays.begin(), delays.end()));
+}
+
+float ComputeGainRampupIncrease(const EchoCanceller3Config& config) {
+  const auto& c = config.echo_removal_control.gain_rampup;
+  return powf(1.f / c.first_non_zero_gain, 1.f / c.non_zero_gain_blocks);
+}
+
+}  // namespace
+
+int AecState::instance_count_ = 0;
+
+AecState::AecState(const EchoCanceller3Config& config)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      erle_estimator_(config.erle.min, config.erle.max_l, config.erle.max_h),
+      config_(config),
+      max_render_(config_.filter.main.length_blocks, 0.f),
+      reverb_decay_(fabsf(config_.ep_strength.default_len)),
+      gain_rampup_increase_(ComputeGainRampupIncrease(config_)),
+      suppression_gain_limiter_(config_) {}
+
+AecState::~AecState() = default;
+
+void AecState::HandleEchoPathChange(
+    const EchoPathVariability& echo_path_variability) {
+  const auto full_reset = [&]() {
+    blocks_since_last_saturation_ = 0;
+    usable_linear_estimate_ = false;
+    echo_leakage_detected_ = false;
+    capture_signal_saturation_ = false;
+    echo_saturation_ = false;
+    previous_max_sample_ = 0.f;
+    std::fill(max_render_.begin(), max_render_.end(), 0.f);
+    blocks_with_proper_filter_adaptation_ = 0;
+    capture_block_counter_ = 0;
+    filter_has_had_time_to_converge_ = false;
+    render_received_ = false;
+    blocks_with_active_render_ = 0;
+    initial_state_ = true;
+    suppression_gain_limiter_.Reset();
+  };
+
+  // TODO(peah): Refine the reset scheme according to the type of gain and
+  // delay adjustment.
+  if (echo_path_variability.gain_change) {
+    full_reset();
+  }
+
+  if (echo_path_variability.delay_change !=
+      EchoPathVariability::DelayAdjustment::kBufferReadjustment) {
+    full_reset();
+  } else if (echo_path_variability.delay_change !=
+             EchoPathVariability::DelayAdjustment::kBufferFlush) {
+    full_reset();
+  } else if (echo_path_variability.delay_change !=
+             EchoPathVariability::DelayAdjustment::kDelayReset) {
+    full_reset();
+  } else if (echo_path_variability.delay_change !=
+             EchoPathVariability::DelayAdjustment::kNewDetectedDelay) {
+    full_reset();
+  } else if (echo_path_variability.gain_change) {
+    capture_block_counter_ = kNumBlocksPerSecond;
+  }
+}
+
+void AecState::Update(
+    const rtc::Optional<DelayEstimate>& delay_estimate,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        adaptive_filter_frequency_response,
+    const std::vector<float>& adaptive_filter_impulse_response,
+    bool converged_filter,
+    const RenderBuffer& render_buffer,
+    const std::array<float, kFftLengthBy2Plus1>& E2_main,
+    const std::array<float, kFftLengthBy2Plus1>& Y2,
+    const std::array<float, kBlockSize>& s,
+    bool echo_leakage_detected) {
+  // Store input parameters.
+  echo_leakage_detected_ = echo_leakage_detected;
+
+  // Estimate the filter delay.
+  filter_delay_ = EstimateFilterDelay(adaptive_filter_frequency_response);
+  const std::vector<float>& x = render_buffer.Block(-filter_delay_)[0];
+
+  // Update counters.
+  ++capture_block_counter_;
+  const bool active_render_block = DetectActiveRender(x);
+  blocks_with_active_render_ += active_render_block ? 1 : 0;
+  blocks_with_proper_filter_adaptation_ +=
+      active_render_block && !SaturatedCapture() ? 1 : 0;
+
+  // Update the limit on the echo suppression after an echo path change to avoid
+  // an initial echo burst.
+  suppression_gain_limiter_.Update(render_buffer.GetRenderActivity());
+
+  // Update the ERL and ERLE measures.
+  if (converged_filter && capture_block_counter_ >= 2 * kNumBlocksPerSecond) {
+    const auto& X2 = render_buffer.Spectrum(filter_delay_);
+    erle_estimator_.Update(X2, Y2, E2_main);
+    erl_estimator_.Update(X2, Y2);
+  }
+
+  // Update the echo audibility evaluator.
+  echo_audibility_.Update(x, s, converged_filter);
+
+  // Detect and flag echo saturation.
+  // TODO(peah): Add the delay in this computation to ensure that the render and
+  // capture signals are properly aligned.
+  if (config_.ep_strength.echo_can_saturate) {
+    echo_saturation_ = DetectEchoSaturation(x);
+  }
+
+  // TODO(peah): Move?
+  filter_has_had_time_to_converge_ =
+      blocks_with_proper_filter_adaptation_ >= 1.5f * kNumBlocksPerSecond;
+
+  initial_state_ =
+      blocks_with_proper_filter_adaptation_ < 5 * kNumBlocksPerSecond;
+
+  // Flag whether the linear filter estimate is usable.
+  usable_linear_estimate_ =
+      !echo_saturation_ &&
+      (converged_filter && filter_has_had_time_to_converge_) &&
+      capture_block_counter_ >= 1.f * kNumBlocksPerSecond && !TransparentMode();
+
+  // After an amount of active render samples for which an echo should have been
+  // detected in the capture signal if the ERL was not infinite, flag that a
+  // transparent mode should be entered.
+  transparent_mode_ =
+      !converged_filter &&
+      (blocks_with_active_render_ == 0 ||
+       blocks_with_proper_filter_adaptation_ >= 5 * kNumBlocksPerSecond);
+}
+
+void AecState::UpdateReverb(const std::vector<float>& impulse_response) {
+  // Echo tail estimation enabled if the below variable is set as negative.
+  if (config_.ep_strength.default_len > 0.f) {
+    return;
+  }
+
+  if ((!(filter_delay_ && usable_linear_estimate_)) ||
+      (filter_delay_ >
+       static_cast<int>(config_.filter.main.length_blocks) - 4)) {
+    return;
+  }
+
+  constexpr float kOneByFftLengthBy2 = 1.f / kFftLengthBy2;
+
+  // Form the data to match against by squaring the impulse response
+  // coefficients.
+  std::array<float, GetTimeDomainLength(kMaxAdaptiveFilterLength)>
+      matching_data_data;
+  RTC_DCHECK_LE(GetTimeDomainLength(config_.filter.main.length_blocks),
+                matching_data_data.size());
+  rtc::ArrayView<float> matching_data(
+      matching_data_data.data(),
+      GetTimeDomainLength(config_.filter.main.length_blocks));
+  std::transform(impulse_response.begin(), impulse_response.end(),
+                 matching_data.begin(), [](float a) { return a * a; });
+
+  if (current_reverb_decay_section_ < config_.filter.main.length_blocks) {
+    // Update accumulated variables for the current filter section.
+
+    const size_t start_index = current_reverb_decay_section_ * kFftLengthBy2;
+
+    RTC_DCHECK_GT(matching_data.size(), start_index);
+    RTC_DCHECK_GE(matching_data.size(), start_index + kFftLengthBy2);
+    float section_energy =
+        std::accumulate(matching_data.begin() + start_index,
+                        matching_data.begin() + start_index + kFftLengthBy2,
+                        0.f) *
+        kOneByFftLengthBy2;
+
+    section_energy = std::max(
+        section_energy, 1e-32f);  // Regularization to avoid division by 0.
+
+    RTC_DCHECK_LT(current_reverb_decay_section_, block_energies_.size());
+    const float energy_ratio =
+        block_energies_[current_reverb_decay_section_] / section_energy;
+
+    main_filter_is_adapting_ = main_filter_is_adapting_ ||
+                               (energy_ratio > 1.1f || energy_ratio < 0.9f);
+
+    // Count consecutive number of "good" filter sections, where "good" means:
+    // 1) energy is above noise floor.
+    // 2) energy of current section has not changed too much from last check.
+    if (!found_end_of_reverb_decay_ && section_energy > tail_energy_ &&
+        !main_filter_is_adapting_) {
+      ++num_reverb_decay_sections_next_;
+    } else {
+      found_end_of_reverb_decay_ = true;
+    }
+
+    block_energies_[current_reverb_decay_section_] = section_energy;
+
+    if (num_reverb_decay_sections_ > 0) {
+      // Linear regression of log squared magnitude of impulse response.
+      for (size_t i = 0; i < kFftLengthBy2; i++) {
+        auto fast_approx_log2f = [](const float in) {
+          RTC_DCHECK_GT(in, .0f);
+          // Read and interpret float as uint32_t and then cast to float.
+          // This is done to extract the exponent (bits 30 - 23).
+          // "Right shift" of the exponent is then performed by multiplying
+          // with the constant (1/2^23). Finally, we subtract a constant to
+          // remove the bias (https://en.wikipedia.org/wiki/Exponent_bias).
+          union {
+            float dummy;
+            uint32_t a;
+          } x = {in};
+          float out = x.a;
+          out *= 1.1920929e-7f;  // 1/2^23
+          out -= 126.942695f;  // Remove bias.
+          return out;
+        };
+        RTC_DCHECK_GT(matching_data.size(), start_index + i);
+        float z = fast_approx_log2f(matching_data[start_index + i]);
+        accumulated_nz_ += accumulated_count_ * z;
+        ++accumulated_count_;
+      }
+    }
+
+    num_reverb_decay_sections_ =
+        num_reverb_decay_sections_ > 0 ? num_reverb_decay_sections_ - 1 : 0;
+    ++current_reverb_decay_section_;
+
+  } else {
+    constexpr float kMaxDecay = 0.95f;  // ~1 sec min RT60.
+    constexpr float kMinDecay = 0.02f;  // ~15 ms max RT60.
+
+    // Accumulated variables throughout whole filter.
+
+    // Solve for decay rate.
+
+    float decay = reverb_decay_;
+
+    if (accumulated_nn_ != 0.f) {
+      const float exp_candidate = -accumulated_nz_ / accumulated_nn_;
+      decay = powf(2.0f, -exp_candidate * kFftLengthBy2);
+      decay = std::min(decay, kMaxDecay);
+      decay = std::max(decay, kMinDecay);
+    }
+
+    // Filter tail energy (assumed to be noise).
+
+    constexpr size_t kTailLength = kFftLength;
+    constexpr float k1ByTailLength = 1.f / kTailLength;
+    const size_t tail_index =
+        GetTimeDomainLength(config_.filter.main.length_blocks) - kTailLength;
+
+    RTC_DCHECK_GT(matching_data.size(), tail_index);
+    tail_energy_ = std::accumulate(matching_data.begin() + tail_index,
+                                   matching_data.end(), 0.f) *
+                   k1ByTailLength;
+
+    // Update length of decay.
+    num_reverb_decay_sections_ = num_reverb_decay_sections_next_;
+    num_reverb_decay_sections_next_ = 0;
+    // Must have enough data (number of sections) in order
+    // to estimate decay rate.
+    if (num_reverb_decay_sections_ < 5) {
+      num_reverb_decay_sections_ = 0;
+    }
+
+    const float N = num_reverb_decay_sections_ * kFftLengthBy2;
+    accumulated_nz_ = 0.f;
+    const float k1By12 = 1.f / 12.f;
+    // Arithmetic sum $2 \sum_{i=0}^{(N-1)/2}i^2$ calculated directly.
+    accumulated_nn_ = N * (N * N - 1.0f) * k1By12;
+    accumulated_count_ = -N * 0.5f;
+    // Linear regression approach assumes symmetric index around 0.
+    accumulated_count_ += 0.5f;
+
+    // Identify the peak index of the impulse response.
+    const size_t peak_index = std::distance(
+        matching_data.begin(),
+        std::max_element(matching_data.begin(), matching_data.end()));
+
+    current_reverb_decay_section_ = peak_index * kOneByFftLengthBy2 + 3;
+    // Make sure we're not out of bounds.
+    if (current_reverb_decay_section_ + 1 >=
+        config_.filter.main.length_blocks) {
+      current_reverb_decay_section_ = config_.filter.main.length_blocks;
+    }
+    size_t start_index = current_reverb_decay_section_ * kFftLengthBy2;
+    float first_section_energy =
+        std::accumulate(matching_data.begin() + start_index,
+                        matching_data.begin() + start_index + kFftLengthBy2,
+                        0.f) *
+        kOneByFftLengthBy2;
+
+    // To estimate the reverb decay, the energy of the first filter section
+    // must be substantially larger than the last.
+    // Also, the first filter section energy must not deviate too much
+    // from the max peak.
+    bool main_filter_has_reverb = first_section_energy > 4.f * tail_energy_;
+    bool main_filter_is_sane = first_section_energy > 2.f * tail_energy_ &&
+                               matching_data[peak_index] < 100.f;
+
+    // Not detecting any decay, but tail is over noise - assume max decay.
+    if (num_reverb_decay_sections_ == 0 && main_filter_is_sane &&
+        main_filter_has_reverb) {
+      decay = kMaxDecay;
+    }
+
+    if (!main_filter_is_adapting_ && main_filter_is_sane &&
+        num_reverb_decay_sections_ > 0) {
+      decay = std::max(.97f * reverb_decay_, decay);
+      reverb_decay_ -= .1f * (reverb_decay_ - decay);
+    }
+
+    found_end_of_reverb_decay_ =
+        !(main_filter_is_sane && main_filter_has_reverb);
+    main_filter_is_adapting_ = false;
+  }
+
+  data_dumper_->DumpRaw("aec3_reverb_decay", reverb_decay_);
+  data_dumper_->DumpRaw("aec3_reverb_tail_energy", tail_energy_);
+  data_dumper_->DumpRaw("aec3_suppression_gain_limit", SuppressionGainLimit());
+}
+
+bool AecState::DetectActiveRender(rtc::ArrayView<const float> x) const {
+  const float x_energy = std::inner_product(x.begin(), x.end(), x.begin(), 0.f);
+  return x_energy > (config_.render_levels.active_render_limit *
+                     config_.render_levels.active_render_limit) *
+                        kFftLengthBy2;
+}
+
+bool AecState::DetectEchoSaturation(rtc::ArrayView<const float> x) {
+  RTC_DCHECK_LT(0, x.size());
+  const float max_sample = fabs(*std::max_element(
+      x.begin(), x.end(), [](float a, float b) { return a * a < b * b; }));
+  previous_max_sample_ = max_sample;
+
+  // Set flag for potential presence of saturated echo
+  blocks_since_last_saturation_ =
+      previous_max_sample_ > 200.f && SaturatedCapture()
+          ? 0
+          : blocks_since_last_saturation_ + 1;
+
+  return blocks_since_last_saturation_ < 20;
+}
+
+void AecState::EchoAudibility::Update(rtc::ArrayView<const float> x,
+                                      const std::array<float, kBlockSize>& s,
+                                      bool converged_filter) {
+  auto result_x = std::minmax_element(x.begin(), x.end());
+  auto result_s = std::minmax_element(s.begin(), s.end());
+  const float x_abs = std::max(fabsf(*result_x.first), fabsf(*result_x.second));
+  const float s_abs = std::max(fabsf(*result_s.first), fabsf(*result_s.second));
+
+  if (converged_filter) {
+    if (x_abs < 20.f) {
+      ++low_farend_counter_;
+    } else {
+      low_farend_counter_ = 0;
+    }
+  } else {
+    if (x_abs < 100.f) {
+      ++low_farend_counter_;
+    } else {
+      low_farend_counter_ = 0;
+    }
+  }
+
+  // The echo is deemed as not audible if the echo estimate is on the level of
+  // the quantization noise in the FFTs and the nearend level is sufficiently
+  // strong to mask that by ensuring that the playout and AGC gains do not boost
+  // any residual echo that is below the quantization noise level. Furthermore,
+  // cases where the render signal is very close to zero are also identified as
+  // not producing audible echo.
+  inaudible_echo_ = (max_nearend_ > 500 && s_abs < 30.f) ||
+                    (!converged_filter && x_abs < 500);
+  inaudible_echo_ = inaudible_echo_ || low_farend_counter_ > 20;
+}
+
+void AecState::EchoAudibility::UpdateWithOutput(rtc::ArrayView<const float> e) {
+  const float e_max = *std::max_element(e.begin(), e.end());
+  const float e_min = *std::min_element(e.begin(), e.end());
+  const float e_abs = std::max(fabsf(e_max), fabsf(e_min));
+
+  if (max_nearend_ < e_abs) {
+    max_nearend_ = e_abs;
+    max_nearend_counter_ = 0;
+  } else {
+    if (++max_nearend_counter_ > 5 * kNumBlocksPerSecond) {
+      max_nearend_ *= 0.995f;
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/aec_state.h b/modules/audio_processing/aec3/aec_state.h
new file mode 100644
index 0000000..6dcd43d
--- /dev/null
+++ b/modules/audio_processing/aec3/aec_state.h
@@ -0,0 +1,189 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_
+
+#include <math.h>
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/erl_estimator.h"
+#include "modules/audio_processing/aec3/erle_estimator.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/suppression_gain_limiter.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Handles the state and the conditions for the echo removal functionality.
+class AecState {
+ public:
+  explicit AecState(const EchoCanceller3Config& config);
+  ~AecState();
+
+  // Returns whether the echo subtractor can be used to determine the residual
+  // echo.
+  bool UsableLinearEstimate() const { return usable_linear_estimate_; }
+
+  // Returns whether there has been echo leakage detected.
+  bool EchoLeakageDetected() const { return echo_leakage_detected_; }
+
+  // Returns whether the render signal is currently active.
+  bool ActiveRender() const { return blocks_with_active_render_ > 200; }
+
+  // Returns the ERLE.
+  const std::array<float, kFftLengthBy2Plus1>& Erle() const {
+    return erle_estimator_.Erle();
+  }
+
+  // Returns the time-domain ERLE.
+  float ErleTimeDomain() const { return erle_estimator_.ErleTimeDomain(); }
+
+  // Returns the ERL.
+  const std::array<float, kFftLengthBy2Plus1>& Erl() const {
+    return erl_estimator_.Erl();
+  }
+
+  // Returns the time-domain ERL.
+  float ErlTimeDomain() const { return erl_estimator_.ErlTimeDomain(); }
+
+  // Returns the delay estimate based on the linear filter.
+  int FilterDelay() const { return filter_delay_; }
+
+  // Returns whether the capture signal is saturated.
+  bool SaturatedCapture() const { return capture_signal_saturation_; }
+
+  // Returns whether the echo signal is saturated.
+  bool SaturatedEcho() const { return echo_saturation_; }
+
+  // Returns whether the echo path can saturate.
+  bool SaturatingEchoPath() const { return saturating_echo_path_; }
+
+  // Updates the capture signal saturation.
+  void UpdateCaptureSaturation(bool capture_signal_saturation) {
+    capture_signal_saturation_ = capture_signal_saturation;
+  }
+
+  // Returns whether the transparent mode is active
+  bool TransparentMode() const { return transparent_mode_; }
+
+  // Takes appropriate action at an echo path change.
+  void HandleEchoPathChange(const EchoPathVariability& echo_path_variability);
+
+  // Returns the decay factor for the echo reverberation.
+  float ReverbDecay() const { return reverb_decay_; }
+
+  // Returns the upper limit for the echo suppression gain.
+  float SuppressionGainLimit() const {
+    return suppression_gain_limiter_.Limit();
+  }
+
+  // Returns whether the echo in the capture signal is audible.
+  bool InaudibleEcho() const { return echo_audibility_.InaudibleEcho(); }
+
+  // Updates the aec state with the AEC output signal.
+  void UpdateWithOutput(rtc::ArrayView<const float> e) {
+    echo_audibility_.UpdateWithOutput(e);
+  }
+
+  // Returns whether the linear filter should have been able to properly adapt.
+  bool FilterHasHadTimeToConverge() const {
+    return filter_has_had_time_to_converge_;
+  }
+
+  // Returns whether the filter adaptation is still in the initial state.
+  bool InitialState() const { return initial_state_; }
+
+  // Updates the aec state.
+  void Update(const rtc::Optional<DelayEstimate>& delay_estimate,
+              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                  adaptive_filter_frequency_response,
+              const std::vector<float>& adaptive_filter_impulse_response,
+              bool converged_filter,
+              const RenderBuffer& render_buffer,
+              const std::array<float, kFftLengthBy2Plus1>& E2_main,
+              const std::array<float, kFftLengthBy2Plus1>& Y2,
+              const std::array<float, kBlockSize>& s_main,
+              bool echo_leakage_detected);
+
+ private:
+  class EchoAudibility {
+   public:
+    void Update(rtc::ArrayView<const float> x,
+                const std::array<float, kBlockSize>& s,
+                bool converged_filter);
+    void UpdateWithOutput(rtc::ArrayView<const float> e);
+    bool InaudibleEcho() const { return inaudible_echo_; }
+
+   private:
+    float max_nearend_ = 0.f;
+    size_t max_nearend_counter_ = 0;
+    size_t low_farend_counter_ = 0;
+    bool inaudible_echo_ = false;
+  };
+
+  void UpdateReverb(const std::vector<float>& impulse_response);
+  bool DetectActiveRender(rtc::ArrayView<const float> x) const;
+  void UpdateSuppressorGainLimit(bool render_activity);
+  bool DetectEchoSaturation(rtc::ArrayView<const float> x);
+
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  ErlEstimator erl_estimator_;
+  ErleEstimator erle_estimator_;
+  size_t capture_block_counter_ = 0;
+  size_t blocks_with_proper_filter_adaptation_ = 0;
+  size_t blocks_with_active_render_ = 0;
+  bool usable_linear_estimate_ = false;
+  bool echo_leakage_detected_ = false;
+  bool capture_signal_saturation_ = false;
+  bool echo_saturation_ = false;
+  bool transparent_mode_ = false;
+  float previous_max_sample_ = 0.f;
+  bool render_received_ = false;
+  int filter_delay_ = 0;
+  size_t blocks_since_last_saturation_ = 1000;
+  float tail_energy_ = 0.f;
+  float accumulated_nz_ = 0.f;
+  float accumulated_nn_ = 0.f;
+  float accumulated_count_ = 0.f;
+  size_t current_reverb_decay_section_ = 0;
+  size_t num_reverb_decay_sections_ = 0;
+  size_t num_reverb_decay_sections_next_ = 0;
+  bool found_end_of_reverb_decay_ = false;
+  bool main_filter_is_adapting_ = true;
+  std::array<float, kMaxAdaptiveFilterLength> block_energies_;
+  EchoAudibility echo_audibility_;
+  const EchoCanceller3Config config_;
+  std::vector<float> max_render_;
+  float reverb_decay_ = fabsf(config_.ep_strength.default_len);
+  bool saturating_echo_path_ = false;
+  bool filter_has_had_time_to_converge_ = false;
+  bool initial_state_ = true;
+  const float gain_rampup_increase_;
+  SuppressionGainUpperLimiter suppression_gain_limiter_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AecState);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_AEC_STATE_H_
diff --git a/modules/audio_processing/aec3/aec_state_unittest.cc b/modules/audio_processing/aec3/aec_state_unittest.cc
new file mode 100644
index 0000000..9008232
--- /dev/null
+++ b/modules/audio_processing/aec3/aec_state_unittest.cc
@@ -0,0 +1,219 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec_state.h"
+
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify the general functionality of AecState
+TEST(AecState, NormalUsage) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  AecState state(config);
+  rtc::Optional<DelayEstimate> delay_estimate;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  std::array<float, kFftLengthBy2Plus1> E2_main = {};
+  std::array<float, kFftLengthBy2Plus1> Y2 = {};
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  EchoPathVariability echo_path_variability(
+      false, EchoPathVariability::DelayAdjustment::kNone, false);
+  std::array<float, kBlockSize> s;
+  Aec3Fft fft;
+  s.fill(100.f);
+
+  std::vector<std::array<float, kFftLengthBy2Plus1>>
+      converged_filter_frequency_response(10);
+  for (auto& v : converged_filter_frequency_response) {
+    v.fill(0.01f);
+  }
+  std::vector<std::array<float, kFftLengthBy2Plus1>>
+      diverged_filter_frequency_response = converged_filter_frequency_response;
+  converged_filter_frequency_response[2].fill(100.f);
+  converged_filter_frequency_response[2][0] = 1.f;
+
+  std::vector<float> impulse_response(
+      GetTimeDomainLength(config.filter.main.length_blocks), 0.f);
+
+  // Verify that linear AEC usability is false when the filter is diverged.
+  state.Update(delay_estimate, diverged_filter_frequency_response,
+               impulse_response, true, *render_delay_buffer->GetRenderBuffer(),
+               E2_main, Y2, s, false);
+  EXPECT_FALSE(state.UsableLinearEstimate());
+
+  // Verify that linear AEC usability is true when the filter is converged
+  std::fill(x[0].begin(), x[0].end(), 101.f);
+  for (int k = 0; k < 3000; ++k) {
+    render_delay_buffer->Insert(x);
+    state.Update(
+        delay_estimate, converged_filter_frequency_response, impulse_response,
+        true, *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s, false);
+  }
+  EXPECT_TRUE(state.UsableLinearEstimate());
+
+  // Verify that linear AEC usability becomes false after an echo path change is
+  // reported
+  state.HandleEchoPathChange(EchoPathVariability(
+      true, EchoPathVariability::DelayAdjustment::kNone, false));
+  state.Update(delay_estimate, converged_filter_frequency_response,
+               impulse_response, true, *render_delay_buffer->GetRenderBuffer(),
+               E2_main, Y2, s, false);
+  EXPECT_FALSE(state.UsableLinearEstimate());
+
+  // Verify that the active render detection works as intended.
+  std::fill(x[0].begin(), x[0].end(), 101.f);
+  render_delay_buffer->Insert(x);
+  state.HandleEchoPathChange(EchoPathVariability(
+      true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false));
+  state.Update(delay_estimate, converged_filter_frequency_response,
+               impulse_response, true, *render_delay_buffer->GetRenderBuffer(),
+               E2_main, Y2, s, false);
+  EXPECT_FALSE(state.ActiveRender());
+
+  for (int k = 0; k < 1000; ++k) {
+    render_delay_buffer->Insert(x);
+    state.Update(
+        delay_estimate, converged_filter_frequency_response, impulse_response,
+        true, *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s, false);
+  }
+  EXPECT_TRUE(state.ActiveRender());
+
+  // Verify that echo leakage is properly reported.
+  state.Update(delay_estimate, converged_filter_frequency_response,
+               impulse_response, true, *render_delay_buffer->GetRenderBuffer(),
+               E2_main, Y2, s, false);
+  EXPECT_FALSE(state.EchoLeakageDetected());
+
+  state.Update(delay_estimate, converged_filter_frequency_response,
+               impulse_response, true, *render_delay_buffer->GetRenderBuffer(),
+               E2_main, Y2, s, true);
+  EXPECT_TRUE(state.EchoLeakageDetected());
+
+  // Verify that the ERL is properly estimated
+  for (auto& x_k : x) {
+    x_k = std::vector<float>(kBlockSize, 0.f);
+  }
+
+  x[0][0] = 5000.f;
+  for (size_t k = 0;
+       k < render_delay_buffer->GetRenderBuffer()->GetFftBuffer().size(); ++k) {
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+  }
+
+  Y2.fill(10.f * 10000.f * 10000.f);
+  for (size_t k = 0; k < 1000; ++k) {
+    state.Update(
+        delay_estimate, converged_filter_frequency_response, impulse_response,
+        true, *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s, false);
+  }
+
+  ASSERT_TRUE(state.UsableLinearEstimate());
+  const std::array<float, kFftLengthBy2Plus1>& erl = state.Erl();
+  EXPECT_EQ(erl[0], erl[1]);
+  for (size_t k = 1; k < erl.size() - 1; ++k) {
+    EXPECT_NEAR(k % 2 == 0 ? 10.f : 1000.f, erl[k], 0.1);
+  }
+  EXPECT_EQ(erl[erl.size() - 2], erl[erl.size() - 1]);
+
+  // Verify that the ERLE is properly estimated
+  E2_main.fill(1.f * 10000.f * 10000.f);
+  Y2.fill(10.f * E2_main[0]);
+  for (size_t k = 0; k < 1000; ++k) {
+    state.Update(
+        delay_estimate, converged_filter_frequency_response, impulse_response,
+        true, *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s, false);
+  }
+  ASSERT_TRUE(state.UsableLinearEstimate());
+  {
+    const auto& erle = state.Erle();
+    EXPECT_EQ(erle[0], erle[1]);
+    constexpr size_t kLowFrequencyLimit = 32;
+    for (size_t k = 1; k < kLowFrequencyLimit; ++k) {
+      EXPECT_NEAR(k % 2 == 0 ? 8.f : 1.f, erle[k], 0.1);
+    }
+    for (size_t k = kLowFrequencyLimit; k < erle.size() - 1; ++k) {
+      EXPECT_NEAR(k % 2 == 0 ? 1.5f : 1.f, erle[k], 0.1);
+    }
+    EXPECT_EQ(erle[erle.size() - 2], erle[erle.size() - 1]);
+  }
+
+  E2_main.fill(1.f * 10000.f * 10000.f);
+  Y2.fill(5.f * E2_main[0]);
+  for (size_t k = 0; k < 1000; ++k) {
+    state.Update(
+        delay_estimate, converged_filter_frequency_response, impulse_response,
+        true, *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s, false);
+  }
+
+  ASSERT_TRUE(state.UsableLinearEstimate());
+  {
+    const auto& erle = state.Erle();
+    EXPECT_EQ(erle[0], erle[1]);
+    constexpr size_t kLowFrequencyLimit = 32;
+    for (size_t k = 1; k < kLowFrequencyLimit; ++k) {
+      EXPECT_NEAR(k % 2 == 0 ? 5.f : 1.f, erle[k], 0.1);
+    }
+    for (size_t k = kLowFrequencyLimit; k < erle.size() - 1; ++k) {
+      EXPECT_NEAR(k % 2 == 0 ? 1.5f : 1.f, erle[k], 0.1);
+    }
+    EXPECT_EQ(erle[erle.size() - 2], erle[erle.size() - 1]);
+  }
+}
+
+// Verifies the delay for a converged filter is correctly identified.
+TEST(AecState, ConvergedFilterDelay) {
+  constexpr int kFilterLength = 10;
+  EchoCanceller3Config config;
+  AecState state(config);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  rtc::Optional<DelayEstimate> delay_estimate;
+  std::array<float, kFftLengthBy2Plus1> E2_main;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kBlockSize> x;
+  EchoPathVariability echo_path_variability(
+      false, EchoPathVariability::DelayAdjustment::kNone, false);
+  std::array<float, kBlockSize> s;
+  s.fill(100.f);
+  x.fill(0.f);
+
+  std::vector<std::array<float, kFftLengthBy2Plus1>> frequency_response(
+      kFilterLength);
+
+  std::vector<float> impulse_response(
+      GetTimeDomainLength(config.filter.main.length_blocks), 0.f);
+
+  // Verify that the filter delay for a converged filter is properly identified.
+  for (int k = 0; k < kFilterLength; ++k) {
+    for (auto& v : frequency_response) {
+      v.fill(0.01f);
+    }
+    frequency_response[k].fill(100.f);
+    frequency_response[k][0] = 0.f;
+    state.HandleEchoPathChange(echo_path_variability);
+    state.Update(delay_estimate, frequency_response, impulse_response, true,
+                 *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s,
+                 false);
+    if (k != (kFilterLength - 1)) {
+      EXPECT_EQ(k, state.FilterDelay());
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_framer.cc b/modules/audio_processing/aec3/block_framer.cc
new file mode 100644
index 0000000..3160624
--- /dev/null
+++ b/modules/audio_processing/aec3/block_framer.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_framer.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+BlockFramer::BlockFramer(size_t num_bands)
+    : num_bands_(num_bands),
+      buffer_(num_bands_, std::vector<float>(kBlockSize, 0.f)) {}
+
+BlockFramer::~BlockFramer() = default;
+
+// All the constants are chosen so that the buffer is either empty or has enough
+// samples for InsertBlockAndExtractSubFrame to produce a frame. In order to
+// achieve this, the InsertBlockAndExtractSubFrame and InsertBlock methods need
+// to be called in the correct order.
+void BlockFramer::InsertBlock(const std::vector<std::vector<float>>& block) {
+  RTC_DCHECK_EQ(num_bands_, block.size());
+  for (size_t i = 0; i < num_bands_; ++i) {
+    RTC_DCHECK_EQ(kBlockSize, block[i].size());
+    RTC_DCHECK_EQ(0, buffer_[i].size());
+    buffer_[i].insert(buffer_[i].begin(), block[i].begin(), block[i].end());
+  }
+}
+
+void BlockFramer::InsertBlockAndExtractSubFrame(
+    const std::vector<std::vector<float>>& block,
+    std::vector<rtc::ArrayView<float>>* sub_frame) {
+  RTC_DCHECK(sub_frame);
+  RTC_DCHECK_EQ(num_bands_, block.size());
+  RTC_DCHECK_EQ(num_bands_, sub_frame->size());
+  for (size_t i = 0; i < num_bands_; ++i) {
+    RTC_DCHECK_LE(kSubFrameLength, buffer_[i].size() + kBlockSize);
+    RTC_DCHECK_EQ(kBlockSize, block[i].size());
+    RTC_DCHECK_GE(kBlockSize, buffer_[i].size());
+    RTC_DCHECK_EQ(kSubFrameLength, (*sub_frame)[i].size());
+    const int samples_to_frame = kSubFrameLength - buffer_[i].size();
+    std::copy(buffer_[i].begin(), buffer_[i].end(), (*sub_frame)[i].begin());
+    std::copy(block[i].begin(), block[i].begin() + samples_to_frame,
+              (*sub_frame)[i].begin() + buffer_[i].size());
+    buffer_[i].clear();
+    buffer_[i].insert(buffer_[i].begin(), block[i].begin() + samples_to_frame,
+                      block[i].end());
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_framer.h b/modules/audio_processing/aec3/block_framer.h
new file mode 100644
index 0000000..923e4cf
--- /dev/null
+++ b/modules/audio_processing/aec3/block_framer.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Class for producing frames consisting of 1 or 2 subframes of 80 samples each
+// from 64 sample blocks. The class is designed to work together with the
+// FrameBlocker class which performs the reverse conversion. Used together with
+// that, this class produces output frames are the same rate as frames are
+// received by the FrameBlocker class. Note that the internal buffers will
+// overrun if any other rate of packets insertion is used.
+class BlockFramer {
+ public:
+  explicit BlockFramer(size_t num_bands);
+  ~BlockFramer();
+  // Adds a 64 sample block into the data that will form the next output frame.
+  void InsertBlock(const std::vector<std::vector<float>>& block);
+  // Adds a 64 sample block and extracts an 80 sample subframe.
+  void InsertBlockAndExtractSubFrame(
+      const std::vector<std::vector<float>>& block,
+      std::vector<rtc::ArrayView<float>>* sub_frame);
+
+ private:
+  const size_t num_bands_;
+  std::vector<std::vector<float>> buffer_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(BlockFramer);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_FRAMER_H_
diff --git a/modules/audio_processing/aec3/block_framer_unittest.cc b/modules/audio_processing/aec3/block_framer_unittest.cc
new file mode 100644
index 0000000..16d3944
--- /dev/null
+++ b/modules/audio_processing/aec3/block_framer_unittest.cc
@@ -0,0 +1,261 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_framer.h"
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+void SetupSubFrameView(std::vector<std::vector<float>>* sub_frame,
+                       std::vector<rtc::ArrayView<float>>* sub_frame_view) {
+  for (size_t k = 0; k < sub_frame_view->size(); ++k) {
+    (*sub_frame_view)[k] =
+        rtc::ArrayView<float>((*sub_frame)[k].data(), (*sub_frame)[k].size());
+  }
+}
+
+float ComputeSampleValue(size_t chunk_counter,
+                         size_t chunk_size,
+                         size_t band,
+                         size_t sample_index,
+                         int offset) {
+  float value =
+      static_cast<int>(chunk_counter * chunk_size + sample_index) + offset;
+  return value > 0 ? 5000 * band + value : 0;
+}
+
+bool VerifySubFrame(size_t sub_frame_counter,
+                    int offset,
+                    const std::vector<rtc::ArrayView<float>>& sub_frame_view) {
+  for (size_t k = 0; k < sub_frame_view.size(); ++k) {
+    for (size_t i = 0; i < sub_frame_view[k].size(); ++i) {
+      const float reference_value =
+          ComputeSampleValue(sub_frame_counter, kSubFrameLength, k, i, offset);
+      if (reference_value != sub_frame_view[k][i]) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+void FillBlock(size_t block_counter, std::vector<std::vector<float>>* block) {
+  for (size_t k = 0; k < block->size(); ++k) {
+    for (size_t i = 0; i < (*block)[0].size(); ++i) {
+      (*block)[k][i] = ComputeSampleValue(block_counter, kBlockSize, k, i, 0);
+    }
+  }
+}
+
+// Verifies that the BlockFramer is able to produce the expected frame content.
+void RunFramerTest(int sample_rate_hz) {
+  constexpr size_t kNumSubFramesToProcess = 2;
+  const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> output_sub_frame(
+      num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> output_sub_frame_view(num_bands);
+  SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+  BlockFramer framer(num_bands);
+
+  size_t block_index = 0;
+  for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess;
+       ++sub_frame_index) {
+    FillBlock(block_index++, &block);
+    framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view);
+    EXPECT_TRUE(VerifySubFrame(sub_frame_index, -64, output_sub_frame_view));
+
+    if ((sub_frame_index + 1) % 4 == 0) {
+      FillBlock(block_index++, &block);
+      framer.InsertBlock(block);
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the BlockFramer crashes if the InsertBlockAndExtractSubFrame
+// method is called for inputs with the wrong number of bands or band lengths.
+void RunWronglySizedInsertAndExtractParametersTest(int sample_rate_hz,
+                                                   size_t num_block_bands,
+                                                   size_t block_length,
+                                                   size_t num_sub_frame_bands,
+                                                   size_t sub_frame_length) {
+  const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(num_block_bands,
+                                        std::vector<float>(block_length, 0.f));
+  std::vector<std::vector<float>> output_sub_frame(
+      num_sub_frame_bands, std::vector<float>(sub_frame_length, 0.f));
+  std::vector<rtc::ArrayView<float>> output_sub_frame_view(
+      output_sub_frame.size());
+  SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+  BlockFramer framer(correct_num_bands);
+  EXPECT_DEATH(
+      framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view), "");
+}
+
+// Verifies that the BlockFramer crashes if the InsertBlock method is called for
+// inputs with the wrong number of bands or band lengths.
+void RunWronglySizedInsertParameterTest(int sample_rate_hz,
+                                        size_t num_block_bands,
+                                        size_t block_length) {
+  const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> correct_block(
+      correct_num_bands, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> wrong_block(
+      num_block_bands, std::vector<float>(block_length, 0.f));
+  std::vector<std::vector<float>> output_sub_frame(
+      correct_num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> output_sub_frame_view(
+      output_sub_frame.size());
+  SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+  BlockFramer framer(correct_num_bands);
+  framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+  framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+  framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+  framer.InsertBlockAndExtractSubFrame(correct_block, &output_sub_frame_view);
+
+  EXPECT_DEATH(framer.InsertBlock(wrong_block), "");
+}
+
+// Verifies that the BlockFramer crashes if the InsertBlock method is called
+// after a wrong number of previous InsertBlockAndExtractSubFrame method calls
+// have been made.
+void RunWronglyInsertOrderTest(int sample_rate_hz,
+                               size_t num_preceeding_api_calls) {
+  const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(correct_num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> output_sub_frame(
+      correct_num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> output_sub_frame_view(
+      output_sub_frame.size());
+  SetupSubFrameView(&output_sub_frame, &output_sub_frame_view);
+  BlockFramer framer(correct_num_bands);
+  for (size_t k = 0; k < num_preceeding_api_calls; ++k) {
+    framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view);
+  }
+
+  EXPECT_DEATH(framer.InsertBlock(block), "");
+}
+#endif
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(BlockFramer, WrongNumberOfBandsInBlockForInsertBlockAndExtractSubFrame) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+    RunWronglySizedInsertAndExtractParametersTest(
+        rate, wrong_num_bands, kBlockSize, correct_num_bands, kSubFrameLength);
+  }
+}
+
+TEST(BlockFramer,
+     WrongNumberOfBandsInSubFrameForInsertBlockAndExtractSubFrame) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+    RunWronglySizedInsertAndExtractParametersTest(
+        rate, correct_num_bands, kBlockSize, wrong_num_bands, kSubFrameLength);
+  }
+}
+
+TEST(BlockFramer, WrongNumberOfSamplesInBlockForInsertBlockAndExtractSubFrame) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    RunWronglySizedInsertAndExtractParametersTest(
+        rate, correct_num_bands, kBlockSize - 1, correct_num_bands,
+        kSubFrameLength);
+  }
+}
+
+TEST(BlockFramer,
+     WrongNumberOfSamplesInSubFrameForInsertBlockAndExtractSubFrame) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    RunWronglySizedInsertAndExtractParametersTest(rate, correct_num_bands,
+                                                  kBlockSize, correct_num_bands,
+                                                  kSubFrameLength - 1);
+  }
+}
+
+TEST(BlockFramer, WrongNumberOfBandsInBlockForInsertBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+    RunWronglySizedInsertParameterTest(rate, wrong_num_bands, kBlockSize);
+  }
+}
+
+TEST(BlockFramer, WrongNumberOfSamplesInBlockForInsertBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    RunWronglySizedInsertParameterTest(rate, correct_num_bands, kBlockSize - 1);
+  }
+}
+
+TEST(BlockFramer, WrongNumberOfPreceedingApiCallsForInsertBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (size_t num_calls = 0; num_calls < 4; ++num_calls) {
+      std::ostringstream ss;
+      ss << "Sample rate: " << rate;
+      ss << ", Num preceeding InsertBlockAndExtractSubFrame calls: "
+         << num_calls;
+
+      SCOPED_TRACE(ss.str());
+      RunWronglyInsertOrderTest(rate, num_calls);
+    }
+  }
+}
+
+// Verifiers that the verification for null sub_frame pointer works.
+TEST(BlockFramer, NullSubFrameParameter) {
+  EXPECT_DEATH(BlockFramer(1).InsertBlockAndExtractSubFrame(
+                   std::vector<std::vector<float>>(
+                       1, std::vector<float>(kBlockSize, 0.f)),
+                   nullptr),
+               "");
+}
+
+#endif
+
+TEST(BlockFramer, FrameBitexactness) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunFramerTest(rate);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_processor.cc b/modules/audio_processing/aec3/block_processor.cc
new file mode 100644
index 0000000..7f702ff
--- /dev/null
+++ b/modules/audio_processing/aec3/block_processor.cc
@@ -0,0 +1,272 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/block_processor.h"
+
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor_metrics.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+enum class BlockProcessorApiCall { kCapture, kRender };
+
+class BlockProcessorImpl final : public BlockProcessor {
+ public:
+  BlockProcessorImpl(const EchoCanceller3Config& config,
+                     int sample_rate_hz,
+                     std::unique_ptr<RenderDelayBuffer> render_buffer,
+                     std::unique_ptr<RenderDelayController> delay_controller,
+                     std::unique_ptr<EchoRemover> echo_remover);
+
+  ~BlockProcessorImpl() override;
+
+  void ProcessCapture(bool echo_path_gain_change,
+                      bool capture_signal_saturation,
+                      std::vector<std::vector<float>>* capture_block) override;
+
+  void BufferRender(const std::vector<std::vector<float>>& block) override;
+
+  void UpdateEchoLeakageStatus(bool leakage_detected) override;
+
+  void GetMetrics(EchoControl::Metrics* metrics) const override;
+
+ private:
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  const EchoCanceller3Config config_;
+  bool capture_properly_started_ = false;
+  bool render_properly_started_ = false;
+  const size_t sample_rate_hz_;
+  std::unique_ptr<RenderDelayBuffer> render_buffer_;
+  std::unique_ptr<RenderDelayController> delay_controller_;
+  std::unique_ptr<EchoRemover> echo_remover_;
+  BlockProcessorMetrics metrics_;
+  RenderDelayBuffer::BufferingEvent render_event_;
+  size_t capture_call_counter_ = 0;
+  rtc::Optional<DelayEstimate> estimated_delay_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(BlockProcessorImpl);
+};
+
+int BlockProcessorImpl::instance_count_ = 0;
+
+BlockProcessorImpl::BlockProcessorImpl(
+    const EchoCanceller3Config& config,
+    int sample_rate_hz,
+    std::unique_ptr<RenderDelayBuffer> render_buffer,
+    std::unique_ptr<RenderDelayController> delay_controller,
+    std::unique_ptr<EchoRemover> echo_remover)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      config_(config),
+      sample_rate_hz_(sample_rate_hz),
+      render_buffer_(std::move(render_buffer)),
+      delay_controller_(std::move(delay_controller)),
+      echo_remover_(std::move(echo_remover)),
+      render_event_(RenderDelayBuffer::BufferingEvent::kNone) {
+  RTC_DCHECK(ValidFullBandRate(sample_rate_hz_));
+}
+
+BlockProcessorImpl::~BlockProcessorImpl() = default;
+
+void BlockProcessorImpl::ProcessCapture(
+    bool echo_path_gain_change,
+    bool capture_signal_saturation,
+    std::vector<std::vector<float>>* capture_block) {
+  RTC_DCHECK(capture_block);
+  RTC_DCHECK_EQ(NumBandsForRate(sample_rate_hz_), capture_block->size());
+  RTC_DCHECK_EQ(kBlockSize, (*capture_block)[0].size());
+
+  capture_call_counter_++;
+
+  data_dumper_->DumpRaw("aec3_processblock_call_order",
+                        static_cast<int>(BlockProcessorApiCall::kCapture));
+  data_dumper_->DumpWav("aec3_processblock_capture_input", kBlockSize,
+                        &(*capture_block)[0][0],
+                        LowestBandRate(sample_rate_hz_), 1);
+
+  if (render_properly_started_) {
+    if (!capture_properly_started_) {
+      capture_properly_started_ = true;
+      render_buffer_->Reset();
+      delay_controller_->Reset();
+    }
+  } else {
+    // If no render data has yet arrived, do not process the capture signal.
+    return;
+  }
+
+  EchoPathVariability echo_path_variability(
+      echo_path_gain_change, EchoPathVariability::DelayAdjustment::kNone,
+      false);
+
+  if (render_event_ == RenderDelayBuffer::BufferingEvent::kRenderOverrun &&
+      render_properly_started_) {
+    echo_path_variability.delay_change =
+        EchoPathVariability::DelayAdjustment::kBufferFlush;
+    delay_controller_->Reset();
+    RTC_LOG(LS_WARNING) << "Reset due to render buffer overrun at block  "
+                        << capture_call_counter_;
+  }
+
+  // Update the render buffers with any newly arrived render blocks and prepare
+  // the render buffers for reading the render data corresponding to the current
+  // capture block.
+  render_event_ = render_buffer_->PrepareCaptureProcessing();
+  RTC_DCHECK(RenderDelayBuffer::BufferingEvent::kRenderOverrun !=
+             render_event_);
+  if (render_event_ == RenderDelayBuffer::BufferingEvent::kRenderUnderrun) {
+    if (estimated_delay_ &&
+        estimated_delay_->quality == DelayEstimate::Quality::kRefined) {
+      echo_path_variability.delay_change =
+          EchoPathVariability::DelayAdjustment::kDelayReset;
+      delay_controller_->Reset();
+      capture_properly_started_ = false;
+      render_properly_started_ = false;
+
+      RTC_LOG(LS_WARNING) << "Reset due to render buffer underrrun at block "
+                          << capture_call_counter_;
+    }
+  } else if (render_event_ == RenderDelayBuffer::BufferingEvent::kApiCallSkew) {
+    // There have been too many render calls in a row. Reset to avoid noncausal
+    // echo.
+    echo_path_variability.delay_change =
+        EchoPathVariability::DelayAdjustment::kDelayReset;
+    delay_controller_->Reset();
+    capture_properly_started_ = false;
+    render_properly_started_ = false;
+    RTC_LOG(LS_WARNING) << "Reset due to render buffer api skew at block "
+                        << capture_call_counter_;
+  }
+
+  data_dumper_->DumpWav("aec3_processblock_capture_input2", kBlockSize,
+                        &(*capture_block)[0][0],
+                        LowestBandRate(sample_rate_hz_), 1);
+
+  // Compute and and apply the render delay required to achieve proper signal
+  // alignment.
+  estimated_delay_ = delay_controller_->GetDelay(
+      render_buffer_->GetDownsampledRenderBuffer(), (*capture_block)[0]);
+
+  if (estimated_delay_) {
+    if (render_buffer_->CausalDelay(estimated_delay_->delay)) {
+      bool delay_change = render_buffer_->SetDelay(estimated_delay_->delay);
+      if (delay_change) {
+        RTC_LOG(LS_WARNING) << "Delay changed to " << estimated_delay_->delay
+                            << " at block " << capture_call_counter_;
+        echo_path_variability.delay_change =
+            EchoPathVariability::DelayAdjustment::kNewDetectedDelay;
+      }
+    } else {
+      // A noncausal delay has been detected. This can only happen if there is
+      // clockdrift, an audio pipeline issue has occurred, an unreliable delay
+      // estimate is used or the specified minimum delay is too short.
+      if (estimated_delay_->quality == DelayEstimate::Quality::kRefined) {
+        echo_path_variability.delay_change =
+            EchoPathVariability::DelayAdjustment::kDelayReset;
+        delay_controller_->Reset();
+        render_buffer_->Reset();
+        capture_properly_started_ = false;
+        render_properly_started_ = false;
+        RTC_LOG(LS_WARNING) << "Reset due to noncausal delay at block "
+                            << capture_call_counter_;
+      }
+    }
+  }
+
+  // Remove the echo from the capture signal.
+  echo_remover_->ProcessCapture(
+      echo_path_variability, capture_signal_saturation, estimated_delay_,
+      render_buffer_->GetRenderBuffer(), capture_block);
+
+  // Update the metrics.
+  metrics_.UpdateCapture(false);
+
+  render_event_ = RenderDelayBuffer::BufferingEvent::kNone;
+}
+
+void BlockProcessorImpl::BufferRender(
+    const std::vector<std::vector<float>>& block) {
+  RTC_DCHECK_EQ(NumBandsForRate(sample_rate_hz_), block.size());
+  RTC_DCHECK_EQ(kBlockSize, block[0].size());
+  data_dumper_->DumpRaw("aec3_processblock_call_order",
+                        static_cast<int>(BlockProcessorApiCall::kRender));
+  data_dumper_->DumpWav("aec3_processblock_render_input", kBlockSize,
+                        &block[0][0], LowestBandRate(sample_rate_hz_), 1);
+  data_dumper_->DumpWav("aec3_processblock_render_input2", kBlockSize,
+                        &block[0][0], LowestBandRate(sample_rate_hz_), 1);
+
+  render_event_ = render_buffer_->Insert(block);
+
+  metrics_.UpdateRender(render_event_ !=
+                        RenderDelayBuffer::BufferingEvent::kNone);
+
+  render_properly_started_ = true;
+  delay_controller_->LogRenderCall();
+}
+
+void BlockProcessorImpl::UpdateEchoLeakageStatus(bool leakage_detected) {
+  echo_remover_->UpdateEchoLeakageStatus(leakage_detected);
+}
+
+void BlockProcessorImpl::GetMetrics(EchoControl::Metrics* metrics) const {
+  echo_remover_->GetMetrics(metrics);
+  const int block_size_ms = sample_rate_hz_ == 8000 ? 8 : 4;
+  rtc::Optional<size_t> delay = render_buffer_->Delay();
+  metrics->delay_ms = delay ? static_cast<int>(*delay) * block_size_ms : 0;
+}
+
+}  // namespace
+
+BlockProcessor* BlockProcessor::Create(const EchoCanceller3Config& config,
+                                       int sample_rate_hz) {
+  std::unique_ptr<RenderDelayBuffer> render_buffer(
+      RenderDelayBuffer::Create(config, NumBandsForRate(sample_rate_hz)));
+  std::unique_ptr<RenderDelayController> delay_controller(
+      RenderDelayController::Create(
+          config, RenderDelayBuffer::DelayEstimatorOffset(config),
+          sample_rate_hz));
+  std::unique_ptr<EchoRemover> echo_remover(
+      EchoRemover::Create(config, sample_rate_hz));
+  return Create(config, sample_rate_hz, std::move(render_buffer),
+                std::move(delay_controller), std::move(echo_remover));
+}
+
+BlockProcessor* BlockProcessor::Create(
+    const EchoCanceller3Config& config,
+    int sample_rate_hz,
+    std::unique_ptr<RenderDelayBuffer> render_buffer) {
+  std::unique_ptr<RenderDelayController> delay_controller(
+      RenderDelayController::Create(
+          config, RenderDelayBuffer::DelayEstimatorOffset(config),
+          sample_rate_hz));
+  std::unique_ptr<EchoRemover> echo_remover(
+      EchoRemover::Create(config, sample_rate_hz));
+  return Create(config, sample_rate_hz, std::move(render_buffer),
+                std::move(delay_controller), std::move(echo_remover));
+}
+
+BlockProcessor* BlockProcessor::Create(
+    const EchoCanceller3Config& config,
+    int sample_rate_hz,
+    std::unique_ptr<RenderDelayBuffer> render_buffer,
+    std::unique_ptr<RenderDelayController> delay_controller,
+    std::unique_ptr<EchoRemover> echo_remover) {
+  return new BlockProcessorImpl(
+      config, sample_rate_hz, std::move(render_buffer),
+      std::move(delay_controller), std::move(echo_remover));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_processor.h b/modules/audio_processing/aec3/block_processor.h
new file mode 100644
index 0000000..8687bc2
--- /dev/null
+++ b/modules/audio_processing/aec3/block_processor.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/aec3/echo_remover.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+
+namespace webrtc {
+
+// Class for performing echo cancellation on 64 sample blocks of audio data.
+class BlockProcessor {
+ public:
+  static BlockProcessor* Create(const EchoCanceller3Config& config,
+                                int sample_rate_hz);
+  // Only used for testing purposes.
+  static BlockProcessor* Create(
+      const EchoCanceller3Config& config,
+      int sample_rate_hz,
+      std::unique_ptr<RenderDelayBuffer> render_buffer);
+  static BlockProcessor* Create(
+      const EchoCanceller3Config& config,
+      int sample_rate_hz,
+      std::unique_ptr<RenderDelayBuffer> render_buffer,
+      std::unique_ptr<RenderDelayController> delay_controller,
+      std::unique_ptr<EchoRemover> echo_remover);
+
+  virtual ~BlockProcessor() = default;
+
+  // Get current metrics.
+  virtual void GetMetrics(EchoControl::Metrics* metrics) const = 0;
+
+  // Processes a block of capture data.
+  virtual void ProcessCapture(
+      bool echo_path_gain_change,
+      bool capture_signal_saturation,
+      std::vector<std::vector<float>>* capture_block) = 0;
+
+  // Buffers a block of render data supplied by a FrameBlocker object.
+  virtual void BufferRender(
+      const std::vector<std::vector<float>>& render_block) = 0;
+
+  // Reports whether echo leakage has been detected in the echo canceller
+  // output.
+  virtual void UpdateEchoLeakageStatus(bool leakage_detected) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_H_
diff --git a/modules/audio_processing/aec3/block_processor_metrics.cc b/modules/audio_processing/aec3/block_processor_metrics.cc
new file mode 100644
index 0000000..c8bdda7
--- /dev/null
+++ b/modules/audio_processing/aec3/block_processor_metrics.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_processor_metrics.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+enum class RenderUnderrunCategory {
+  kNone,
+  kFew,
+  kSeveral,
+  kMany,
+  kConstant,
+  kNumCategories
+};
+
+enum class RenderOverrunCategory {
+  kNone,
+  kFew,
+  kSeveral,
+  kMany,
+  kConstant,
+  kNumCategories
+};
+
+}  // namespace
+
+void BlockProcessorMetrics::UpdateCapture(bool underrun) {
+  ++capture_block_counter_;
+  if (underrun) {
+    ++render_buffer_underruns_;
+  }
+
+  if (capture_block_counter_ == kMetricsReportingIntervalBlocks) {
+    metrics_reported_ = true;
+
+    RenderUnderrunCategory underrun_category;
+    if (render_buffer_underruns_ == 0) {
+      underrun_category = RenderUnderrunCategory::kNone;
+    } else if (render_buffer_underruns_ > (capture_block_counter_ >> 1)) {
+      underrun_category = RenderUnderrunCategory::kConstant;
+    } else if (render_buffer_underruns_ > 100) {
+      underrun_category = RenderUnderrunCategory::kMany;
+    } else if (render_buffer_underruns_ > 10) {
+      underrun_category = RenderUnderrunCategory::kSeveral;
+    } else {
+      underrun_category = RenderUnderrunCategory::kFew;
+    }
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.EchoCanceller.RenderUnderruns",
+        static_cast<int>(underrun_category),
+        static_cast<int>(RenderUnderrunCategory::kNumCategories));
+
+    RenderOverrunCategory overrun_category;
+    if (render_buffer_overruns_ == 0) {
+      overrun_category = RenderOverrunCategory::kNone;
+    } else if (render_buffer_overruns_ > (buffer_render_calls_ >> 1)) {
+      overrun_category = RenderOverrunCategory::kConstant;
+    } else if (render_buffer_overruns_ > 100) {
+      overrun_category = RenderOverrunCategory::kMany;
+    } else if (render_buffer_overruns_ > 10) {
+      overrun_category = RenderOverrunCategory::kSeveral;
+    } else {
+      overrun_category = RenderOverrunCategory::kFew;
+    }
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.EchoCanceller.RenderOverruns",
+        static_cast<int>(overrun_category),
+        static_cast<int>(RenderOverrunCategory::kNumCategories));
+
+    ResetMetrics();
+    capture_block_counter_ = 0;
+  } else {
+    metrics_reported_ = false;
+  }
+}
+
+void BlockProcessorMetrics::UpdateRender(bool overrun) {
+  ++buffer_render_calls_;
+  if (overrun) {
+    ++render_buffer_overruns_;
+  }
+}
+
+void BlockProcessorMetrics::ResetMetrics() {
+  render_buffer_underruns_ = 0;
+  render_buffer_overruns_ = 0;
+  buffer_render_calls_ = 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_processor_metrics.h b/modules/audio_processing/aec3/block_processor_metrics.h
new file mode 100644
index 0000000..9b437c0
--- /dev/null
+++ b/modules/audio_processing/aec3/block_processor_metrics.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Handles the reporting of metrics for the block_processor.
+class BlockProcessorMetrics {
+ public:
+  BlockProcessorMetrics() = default;
+
+  // Updates the metric with new capture data.
+  void UpdateCapture(bool underrun);
+
+  // Updates the metric with new render data.
+  void UpdateRender(bool overrun);
+
+  // Returns true if the metrics have just been reported, otherwise false.
+  bool MetricsReported() { return metrics_reported_; }
+
+ private:
+  // Resets the metrics.
+  void ResetMetrics();
+
+  int capture_block_counter_ = 0;
+  bool metrics_reported_ = false;
+  int render_buffer_underruns_ = 0;
+  int render_buffer_overruns_ = 0;
+  int buffer_render_calls_ = 0;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BlockProcessorMetrics);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_BLOCK_PROCESSOR_METRICS_H_
diff --git a/modules/audio_processing/aec3/block_processor_metrics_unittest.cc b/modules/audio_processing/aec3/block_processor_metrics_unittest.cc
new file mode 100644
index 0000000..7ce8573
--- /dev/null
+++ b/modules/audio_processing/aec3/block_processor_metrics_unittest.cc
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor_metrics.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify the general functionality of BlockProcessorMetrics.
+TEST(BlockProcessorMetrics, NormalUsage) {
+  BlockProcessorMetrics metrics;
+
+  for (int j = 0; j < 3; ++j) {
+    for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
+      metrics.UpdateRender(false);
+      metrics.UpdateRender(false);
+      metrics.UpdateCapture(false);
+      EXPECT_FALSE(metrics.MetricsReported());
+    }
+    metrics.UpdateCapture(false);
+    EXPECT_TRUE(metrics.MetricsReported());
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_processor_unittest.cc b/modules/audio_processing/aec3/block_processor_unittest.cc
new file mode 100644
index 0000000..87b5da9
--- /dev/null
+++ b/modules/audio_processing/aec3/block_processor_unittest.cc
@@ -0,0 +1,258 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/block_processor.h"
+
+#include <memory>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/mock/mock_echo_remover.h"
+#include "modules/audio_processing/aec3/mock/mock_render_delay_buffer.h"
+#include "modules/audio_processing/aec3/mock/mock_render_delay_controller.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/random.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using testing::AtLeast;
+using testing::Return;
+using testing::StrictMock;
+using testing::_;
+
+// Verifies that the basic BlockProcessor functionality works and that the API
+// methods are callable.
+void RunBasicSetupAndApiCallTest(int sample_rate_hz, int num_iterations) {
+  std::unique_ptr<BlockProcessor> block_processor(
+      BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz));
+  std::vector<std::vector<float>> block(NumBandsForRate(sample_rate_hz),
+                                        std::vector<float>(kBlockSize, 1000.f));
+
+  for (int k = 0; k < num_iterations; ++k) {
+    block_processor->BufferRender(block);
+    block_processor->ProcessCapture(false, false, &block);
+    block_processor->UpdateEchoLeakageStatus(false);
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+void RunRenderBlockSizeVerificationTest(int sample_rate_hz) {
+  std::unique_ptr<BlockProcessor> block_processor(
+      BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz));
+  std::vector<std::vector<float>> block(
+      NumBandsForRate(sample_rate_hz), std::vector<float>(kBlockSize - 1, 0.f));
+
+  EXPECT_DEATH(block_processor->BufferRender(block), "");
+}
+
+void RunCaptureBlockSizeVerificationTest(int sample_rate_hz) {
+  std::unique_ptr<BlockProcessor> block_processor(
+      BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz));
+  std::vector<std::vector<float>> block(
+      NumBandsForRate(sample_rate_hz), std::vector<float>(kBlockSize - 1, 0.f));
+
+  EXPECT_DEATH(block_processor->ProcessCapture(false, false, &block), "");
+}
+
+void RunRenderNumBandsVerificationTest(int sample_rate_hz) {
+  const size_t wrong_num_bands = NumBandsForRate(sample_rate_hz) < 3
+                                     ? NumBandsForRate(sample_rate_hz) + 1
+                                     : 1;
+  std::unique_ptr<BlockProcessor> block_processor(
+      BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz));
+  std::vector<std::vector<float>> block(wrong_num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+
+  EXPECT_DEATH(block_processor->BufferRender(block), "");
+}
+
+void RunCaptureNumBandsVerificationTest(int sample_rate_hz) {
+  const size_t wrong_num_bands = NumBandsForRate(sample_rate_hz) < 3
+                                     ? NumBandsForRate(sample_rate_hz) + 1
+                                     : 1;
+  std::unique_ptr<BlockProcessor> block_processor(
+      BlockProcessor::Create(EchoCanceller3Config(), sample_rate_hz));
+  std::vector<std::vector<float>> block(wrong_num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+
+  EXPECT_DEATH(block_processor->ProcessCapture(false, false, &block), "");
+}
+#endif
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+}  // namespace
+
+// Verifies that the delay controller functionality is properly integrated with
+// the render delay buffer inside block processor.
+// TODO(peah): Activate the unittest once the required code has been landed.
+TEST(BlockProcessor, DISABLED_DelayControllerIntegration) {
+  constexpr size_t kNumBlocks = 310;
+  constexpr size_t kDelayInSamples = 640;
+  constexpr size_t kDelayHeadroom = 1;
+  constexpr size_t kDelayInBlocks =
+      kDelayInSamples / kBlockSize - kDelayHeadroom;
+  Random random_generator(42U);
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<testing::StrictMock<webrtc::test::MockRenderDelayBuffer>>
+        render_delay_buffer_mock(
+            new StrictMock<webrtc::test::MockRenderDelayBuffer>(rate));
+    EXPECT_CALL(*render_delay_buffer_mock, Insert(_))
+        .Times(kNumBlocks)
+        .WillRepeatedly(Return(RenderDelayBuffer::BufferingEvent::kNone));
+    EXPECT_CALL(*render_delay_buffer_mock, SetDelay(kDelayInBlocks))
+        .Times(AtLeast(1));
+    EXPECT_CALL(*render_delay_buffer_mock, MaxDelay()).WillOnce(Return(30));
+    EXPECT_CALL(*render_delay_buffer_mock, Delay())
+        .Times(kNumBlocks + 1)
+        .WillRepeatedly(Return(0));
+    std::unique_ptr<BlockProcessor> block_processor(BlockProcessor::Create(
+        EchoCanceller3Config(), rate, std::move(render_delay_buffer_mock)));
+
+    std::vector<std::vector<float>> render_block(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+    std::vector<std::vector<float>> capture_block(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+    DelayBuffer<float> signal_delay_buffer(kDelayInSamples);
+    for (size_t k = 0; k < kNumBlocks; ++k) {
+      RandomizeSampleVector(&random_generator, render_block[0]);
+      signal_delay_buffer.Delay(render_block[0], capture_block[0]);
+      block_processor->BufferRender(render_block);
+      block_processor->ProcessCapture(false, false, &capture_block);
+    }
+  }
+}
+
+// Verifies that BlockProcessor submodules are called in a proper manner.
+TEST(BlockProcessor, DISABLED_SubmoduleIntegration) {
+  constexpr size_t kNumBlocks = 310;
+  Random random_generator(42U);
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<testing::StrictMock<webrtc::test::MockRenderDelayBuffer>>
+        render_delay_buffer_mock(
+            new StrictMock<webrtc::test::MockRenderDelayBuffer>(rate));
+    std::unique_ptr<
+        testing::StrictMock<webrtc::test::MockRenderDelayController>>
+        render_delay_controller_mock(
+            new StrictMock<webrtc::test::MockRenderDelayController>());
+    std::unique_ptr<testing::StrictMock<webrtc::test::MockEchoRemover>>
+        echo_remover_mock(new StrictMock<webrtc::test::MockEchoRemover>());
+
+    EXPECT_CALL(*render_delay_buffer_mock, Insert(_))
+        .Times(kNumBlocks - 1)
+        .WillRepeatedly(Return(RenderDelayBuffer::BufferingEvent::kNone));
+    EXPECT_CALL(*render_delay_buffer_mock, PrepareCaptureProcessing())
+        .Times(kNumBlocks);
+    EXPECT_CALL(*render_delay_buffer_mock, SetDelay(9)).Times(AtLeast(1));
+    EXPECT_CALL(*render_delay_buffer_mock, Delay())
+        .Times(kNumBlocks)
+        .WillRepeatedly(Return(0));
+    EXPECT_CALL(*render_delay_controller_mock, GetDelay(_, _))
+        .Times(kNumBlocks);
+    EXPECT_CALL(*echo_remover_mock, ProcessCapture(_, _, _, _, _))
+        .Times(kNumBlocks);
+    EXPECT_CALL(*echo_remover_mock, UpdateEchoLeakageStatus(_))
+        .Times(kNumBlocks);
+
+    std::unique_ptr<BlockProcessor> block_processor(BlockProcessor::Create(
+        EchoCanceller3Config(), rate, std::move(render_delay_buffer_mock),
+        std::move(render_delay_controller_mock), std::move(echo_remover_mock)));
+
+    std::vector<std::vector<float>> render_block(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+    std::vector<std::vector<float>> capture_block(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+    DelayBuffer<float> signal_delay_buffer(640);
+    for (size_t k = 0; k < kNumBlocks; ++k) {
+      RandomizeSampleVector(&random_generator, render_block[0]);
+      signal_delay_buffer.Delay(render_block[0], capture_block[0]);
+      block_processor->BufferRender(render_block);
+      block_processor->ProcessCapture(false, false, &capture_block);
+      block_processor->UpdateEchoLeakageStatus(false);
+    }
+  }
+}
+
+TEST(BlockProcessor, BasicSetupAndApiCalls) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunBasicSetupAndApiCallTest(rate, 1);
+  }
+}
+
+TEST(BlockProcessor, TestLongerCall) {
+  RunBasicSetupAndApiCallTest(16000, 20 * kNumBlocksPerSecond);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// TODO(gustaf): Re-enable the test once the issue with memory leaks during
+// DEATH tests on test bots has been fixed.
+TEST(BlockProcessor, DISABLED_VerifyRenderBlockSizeCheck) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunRenderBlockSizeVerificationTest(rate);
+  }
+}
+
+TEST(BlockProcessor, VerifyCaptureBlockSizeCheck) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunCaptureBlockSizeVerificationTest(rate);
+  }
+}
+
+TEST(BlockProcessor, VerifyRenderNumBandsCheck) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunRenderNumBandsVerificationTest(rate);
+  }
+}
+
+// TODO(peah): Verify the check for correct number of bands in the capture
+// signal.
+TEST(BlockProcessor, VerifyCaptureNumBandsCheck) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunCaptureNumBandsVerificationTest(rate);
+  }
+}
+
+// Verifiers that the verification for null ProcessCapture input works.
+TEST(BlockProcessor, NullProcessCaptureParameter) {
+  EXPECT_DEATH(std::unique_ptr<BlockProcessor>(
+                   BlockProcessor::Create(EchoCanceller3Config(), 8000))
+                   ->ProcessCapture(false, false, nullptr),
+               "");
+}
+
+// Verifies the check for correct sample rate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(BlockProcessor, DISABLED_WrongSampleRate) {
+  EXPECT_DEATH(std::unique_ptr<BlockProcessor>(
+                   BlockProcessor::Create(EchoCanceller3Config(), 8001)),
+               "");
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/cascaded_biquad_filter.cc b/modules/audio_processing/aec3/cascaded_biquad_filter.cc
new file mode 100644
index 0000000..9a472f5
--- /dev/null
+++ b/modules/audio_processing/aec3/cascaded_biquad_filter.cc
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/cascaded_biquad_filter.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+CascadedBiQuadFilter::CascadedBiQuadFilter(
+    const CascadedBiQuadFilter::BiQuadCoefficients& coefficients,
+    size_t num_biquads)
+    : biquad_states_(num_biquads), coefficients_(coefficients) {}
+
+CascadedBiQuadFilter::~CascadedBiQuadFilter() = default;
+
+void CascadedBiQuadFilter::Process(rtc::ArrayView<const float> x,
+                                   rtc::ArrayView<float> y) {
+  ApplyBiQuad(x, y, &biquad_states_[0]);
+  for (size_t k = 1; k < biquad_states_.size(); ++k) {
+    ApplyBiQuad(y, y, &biquad_states_[k]);
+  }
+}
+
+void CascadedBiQuadFilter::Process(rtc::ArrayView<float> y) {
+  for (auto& biquad : biquad_states_) {
+    ApplyBiQuad(y, y, &biquad);
+  }
+}
+
+void CascadedBiQuadFilter::ApplyBiQuad(
+    rtc::ArrayView<const float> x,
+    rtc::ArrayView<float> y,
+    CascadedBiQuadFilter::BiQuadState* biquad_state) {
+  RTC_DCHECK_EQ(x.size(), y.size());
+  RTC_DCHECK(biquad_state);
+  const auto* c_b = coefficients_.b;
+  const auto* c_a = coefficients_.a;
+  auto* m_x = biquad_state->x;
+  auto* m_y = biquad_state->y;
+  for (size_t k = 0; k < x.size(); ++k) {
+    const float tmp = x[k];
+    y[k] = c_b[0] * tmp + c_b[1] * m_x[0] + c_b[2] * m_x[1] - c_a[0] * m_y[0] -
+           c_a[1] * m_y[1];
+    m_x[1] = m_x[0];
+    m_x[0] = tmp;
+    m_y[1] = m_y[0];
+    m_y[0] = y[k];
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/cascaded_biquad_filter.h b/modules/audio_processing/aec3/cascaded_biquad_filter.h
new file mode 100644
index 0000000..aea889a
--- /dev/null
+++ b/modules/audio_processing/aec3/cascaded_biquad_filter.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_CASCADED_BIQUAD_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_CASCADED_BIQUAD_FILTER_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Applies a number of identical biquads in a cascaded manner. The filter
+// implementation is direct form 1.
+class CascadedBiQuadFilter {
+ public:
+  struct BiQuadState {
+    BiQuadState() : x(), y() {}
+    float x[2];
+    float y[2];
+  };
+
+  struct BiQuadCoefficients {
+    float b[3];
+    float a[2];
+  };
+
+  CascadedBiQuadFilter(
+      const CascadedBiQuadFilter::BiQuadCoefficients& coefficients,
+      size_t num_biquads);
+  ~CascadedBiQuadFilter();
+  // Applies the biquads on the values in x in order to form the output in y.
+  void Process(rtc::ArrayView<const float> x, rtc::ArrayView<float> y);
+  // Applies the biquads on the values in y in an in-place manner.
+  void Process(rtc::ArrayView<float> y);
+
+ private:
+  void ApplyBiQuad(rtc::ArrayView<const float> x,
+                   rtc::ArrayView<float> y,
+                   CascadedBiQuadFilter::BiQuadState* biquad_state);
+
+  std::vector<BiQuadState> biquad_states_;
+  const BiQuadCoefficients coefficients_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(CascadedBiQuadFilter);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_CASCADED_BIQUAD_FILTER_H_
diff --git a/modules/audio_processing/aec3/cascaded_biquad_filter_unittest.cc b/modules/audio_processing/aec3/cascaded_biquad_filter_unittest.cc
new file mode 100644
index 0000000..fcb77e1
--- /dev/null
+++ b/modules/audio_processing/aec3/cascaded_biquad_filter_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/cascaded_biquad_filter.h"
+
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Coefficients for a second order Butterworth high-pass filter with cutoff
+// frequency 100 Hz.
+const CascadedBiQuadFilter::BiQuadCoefficients kHighPassFilterCoefficients = {
+    {0.97261f, -1.94523f, 0.97261f},
+    {-1.94448f, 0.94598f}};
+
+const CascadedBiQuadFilter::BiQuadCoefficients kTransparentCoefficients = {
+    {1.f, 0.f, 0.f},
+    {0.f, 0.f}};
+
+const CascadedBiQuadFilter::BiQuadCoefficients kBlockingCoefficients = {
+    {0.f, 0.f, 0.f},
+    {0.f, 0.f}};
+
+std::vector<float> CreateInputWithIncreasingValues(size_t vector_length) {
+  std::vector<float> v(vector_length);
+  for (size_t k = 0; k < v.size(); ++k) {
+    v[k] = k;
+  }
+  return v;
+}
+
+}  // namespace
+
+// Verifies that the filter applies an effect which removes the input signal.
+// The test also verifies that the in-place Process API call works as intended.
+TEST(CascadedBiquadFilter, BlockingConfiguration) {
+  std::vector<float> values = CreateInputWithIncreasingValues(1000);
+
+  CascadedBiQuadFilter filter(kBlockingCoefficients, 1);
+  filter.Process(values);
+
+  EXPECT_EQ(std::vector<float>(1000, 0.f), values);
+}
+
+// Verifies that the filter is able to form a zero-mean output from a
+// non-zeromean input signal when coefficients for a high-pass filter are
+// applied. The test also verifies that the filter works with multiple biquads.
+TEST(CascadedBiquadFilter, HighPassConfiguration) {
+  std::vector<float> values(1000);
+  for (size_t k = 0; k < values.size(); ++k) {
+    values[k] = 1.f;
+  }
+
+  CascadedBiQuadFilter filter(kHighPassFilterCoefficients, 2);
+  filter.Process(values);
+
+  for (size_t k = values.size() / 2; k < values.size(); ++k) {
+    EXPECT_NEAR(0.f, values[k], 1e-4);
+  }
+}
+
+// Verifies that the filter is able to produce a transparent effect with no
+// impact on the data when the proper coefficients are applied. The test also
+// verifies that the non-in-place Process API call works as intended.
+TEST(CascadedBiquadFilter, TransparentConfiguration) {
+  const std::vector<float> input = CreateInputWithIncreasingValues(1000);
+  std::vector<float> output(input.size());
+
+  CascadedBiQuadFilter filter(kTransparentCoefficients, 1);
+  filter.Process(input, output);
+
+  EXPECT_EQ(input, output);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the check of the lengths for the input and output works for the
+// non-in-place call.
+TEST(CascadedBiquadFilter, InputSizeCheckVerification) {
+  const std::vector<float> input = CreateInputWithIncreasingValues(10);
+  std::vector<float> output(input.size() - 1);
+
+  CascadedBiQuadFilter filter(kTransparentCoefficients, 1);
+  EXPECT_DEATH(filter.Process(input, output), "");
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/comfort_noise_generator.cc b/modules/audio_processing/aec3/comfort_noise_generator.cc
new file mode 100644
index 0000000..dab40a9
--- /dev/null
+++ b/modules/audio_processing/aec3/comfort_noise_generator.cc
@@ -0,0 +1,219 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/comfort_noise_generator.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <math.h>
+#include <algorithm>
+#include <array>
+#include <functional>
+#include <numeric>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+namespace {
+
+// Creates an array of uniformly distributed variables.
+void TableRandomValue(int16_t* vector, int16_t vector_length, uint32_t* seed) {
+  for (int i = 0; i < vector_length; i++) {
+    seed[0] = (seed[0] * ((int32_t)69069) + 1) & (0x80000000 - 1);
+    vector[i] = (int16_t)(seed[0] >> 16);
+  }
+}
+
+}  // namespace
+
+namespace aec3 {
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+void EstimateComfortNoise_SSE2(const std::array<float, kFftLengthBy2Plus1>& N2,
+                               uint32_t* seed,
+                               FftData* lower_band_noise,
+                               FftData* upper_band_noise) {
+  FftData* N_low = lower_band_noise;
+  FftData* N_high = upper_band_noise;
+
+  // Compute square root spectrum.
+  std::array<float, kFftLengthBy2Plus1> N;
+  for (size_t k = 0; k < kFftLengthBy2; k += 4) {
+    __m128 v = _mm_loadu_ps(&N2[k]);
+    v = _mm_sqrt_ps(v);
+    _mm_storeu_ps(&N[k], v);
+  }
+
+  N[kFftLengthBy2] = sqrtf(N2[kFftLengthBy2]);
+
+  // Compute the noise level for the upper bands.
+  constexpr float kOneByNumBands = 1.f / (kFftLengthBy2Plus1 / 2 + 1);
+  constexpr int kFftLengthBy2Plus1By2 = kFftLengthBy2Plus1 / 2;
+  const float high_band_noise_level =
+      std::accumulate(N.begin() + kFftLengthBy2Plus1By2, N.end(), 0.f) *
+      kOneByNumBands;
+
+  // Generate complex noise.
+  std::array<int16_t, kFftLengthBy2 - 1> random_values_int;
+  TableRandomValue(random_values_int.data(), random_values_int.size(), seed);
+
+  std::array<float, kFftLengthBy2 - 1> sin;
+  std::array<float, kFftLengthBy2 - 1> cos;
+  constexpr float kScale = 6.28318530717959f / 32768.0f;
+  std::transform(random_values_int.begin(), random_values_int.end(),
+                 sin.begin(), [&](int16_t a) { return -sinf(kScale * a); });
+  std::transform(random_values_int.begin(), random_values_int.end(),
+                 cos.begin(), [&](int16_t a) { return cosf(kScale * a); });
+
+  // Form low-frequency noise via spectral shaping.
+  N_low->re[0] = N_low->re[kFftLengthBy2] = N_high->re[0] =
+      N_high->re[kFftLengthBy2] = 0.f;
+  std::transform(cos.begin(), cos.end(), N.begin() + 1, N_low->re.begin() + 1,
+                 std::multiplies<float>());
+  std::transform(sin.begin(), sin.end(), N.begin() + 1, N_low->im.begin() + 1,
+                 std::multiplies<float>());
+
+  // Form the high-frequency noise via simple levelling.
+  std::transform(cos.begin(), cos.end(), N_high->re.begin() + 1,
+                 [&](float a) { return high_band_noise_level * a; });
+  std::transform(sin.begin(), sin.end(), N_high->im.begin() + 1,
+                 [&](float a) { return high_band_noise_level * a; });
+}
+
+#endif
+
+void EstimateComfortNoise(const std::array<float, kFftLengthBy2Plus1>& N2,
+                          uint32_t* seed,
+                          FftData* lower_band_noise,
+                          FftData* upper_band_noise) {
+  FftData* N_low = lower_band_noise;
+  FftData* N_high = upper_band_noise;
+
+  // Compute square root spectrum.
+  std::array<float, kFftLengthBy2Plus1> N;
+  std::transform(N2.begin(), N2.end(), N.begin(),
+                 [](float a) { return sqrtf(a); });
+
+  // Compute the noise level for the upper bands.
+  constexpr float kOneByNumBands = 1.f / (kFftLengthBy2Plus1 / 2 + 1);
+  constexpr int kFftLengthBy2Plus1By2 = kFftLengthBy2Plus1 / 2;
+  const float high_band_noise_level =
+      std::accumulate(N.begin() + kFftLengthBy2Plus1By2, N.end(), 0.f) *
+      kOneByNumBands;
+
+  // Generate complex noise.
+  std::array<int16_t, kFftLengthBy2 - 1> random_values_int;
+  TableRandomValue(random_values_int.data(), random_values_int.size(), seed);
+
+  std::array<float, kFftLengthBy2 - 1> sin;
+  std::array<float, kFftLengthBy2 - 1> cos;
+  constexpr float kScale = 6.28318530717959f / 32768.0f;
+  std::transform(random_values_int.begin(), random_values_int.end(),
+                 sin.begin(), [&](int16_t a) { return -sinf(kScale * a); });
+  std::transform(random_values_int.begin(), random_values_int.end(),
+                 cos.begin(), [&](int16_t a) { return cosf(kScale * a); });
+
+  // Form low-frequency noise via spectral shaping.
+  N_low->re[0] = N_low->re[kFftLengthBy2] = N_high->re[0] =
+      N_high->re[kFftLengthBy2] = 0.f;
+  std::transform(cos.begin(), cos.end(), N.begin() + 1, N_low->re.begin() + 1,
+                 std::multiplies<float>());
+  std::transform(sin.begin(), sin.end(), N.begin() + 1, N_low->im.begin() + 1,
+                 std::multiplies<float>());
+
+  // Form the high-frequency noise via simple levelling.
+  std::transform(cos.begin(), cos.end(), N_high->re.begin() + 1,
+                 [&](float a) { return high_band_noise_level * a; });
+  std::transform(sin.begin(), sin.end(), N_high->im.begin() + 1,
+                 [&](float a) { return high_band_noise_level * a; });
+}
+
+}  // namespace aec3
+
+ComfortNoiseGenerator::ComfortNoiseGenerator(Aec3Optimization optimization)
+    : optimization_(optimization),
+      seed_(42),
+      N2_initial_(new std::array<float, kFftLengthBy2Plus1>()) {
+  N2_initial_->fill(0.f);
+  Y2_smoothed_.fill(0.f);
+  N2_.fill(1.0e6f);
+}
+
+ComfortNoiseGenerator::~ComfortNoiseGenerator() = default;
+
+void ComfortNoiseGenerator::Compute(
+    const AecState& aec_state,
+    const std::array<float, kFftLengthBy2Plus1>& capture_spectrum,
+    FftData* lower_band_noise,
+    FftData* upper_band_noise) {
+  RTC_DCHECK(lower_band_noise);
+  RTC_DCHECK(upper_band_noise);
+  const auto& Y2 = capture_spectrum;
+
+  if (!aec_state.SaturatedCapture()) {
+    // Smooth Y2.
+    std::transform(Y2_smoothed_.begin(), Y2_smoothed_.end(), Y2.begin(),
+                   Y2_smoothed_.begin(),
+                   [](float a, float b) { return a + 0.1f * (b - a); });
+
+    if (N2_counter_ > 50) {
+      // Update N2 from Y2_smoothed.
+      std::transform(N2_.begin(), N2_.end(), Y2_smoothed_.begin(), N2_.begin(),
+                     [](float a, float b) {
+                       return b < a ? (0.9f * b + 0.1f * a) * 1.0002f
+                                    : a * 1.0002f;
+                     });
+    }
+
+    if (N2_initial_) {
+      if (++N2_counter_ == 1000) {
+        N2_initial_.reset();
+      } else {
+        // Compute the N2_initial from N2.
+        std::transform(
+            N2_.begin(), N2_.end(), N2_initial_->begin(), N2_initial_->begin(),
+            [](float a, float b) { return a > b ? b + 0.001f * (a - b) : a; });
+      }
+    }
+  }
+
+  // Limit the noise to a floor of -96 dBFS.
+  constexpr float kNoiseFloor = 440.f;
+  for (auto& n : N2_) {
+    n = std::max(n, kNoiseFloor);
+  }
+  if (N2_initial_) {
+    for (auto& n : *N2_initial_) {
+      n = std::max(n, kNoiseFloor);
+    }
+  }
+
+  // Choose N2 estimate to use.
+  const std::array<float, kFftLengthBy2Plus1>& N2 =
+      N2_initial_ ? *N2_initial_ : N2_;
+
+  switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+    case Aec3Optimization::kSse2:
+      aec3::EstimateComfortNoise_SSE2(N2, &seed_, lower_band_noise,
+                                      upper_band_noise);
+      break;
+#endif
+    default:
+      aec3::EstimateComfortNoise(N2, &seed_, lower_band_noise,
+                                 upper_band_noise);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/comfort_noise_generator.h b/modules/audio_processing/aec3/comfort_noise_generator.h
new file mode 100644
index 0000000..2d998be
--- /dev/null
+++ b/modules/audio_processing/aec3/comfort_noise_generator.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_
+
+#include <array>
+#include <memory>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace aec3 {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+void EstimateComfortNoise_SSE2(const std::array<float, kFftLengthBy2Plus1>& N2,
+                               uint32_t* seed,
+                               FftData* lower_band_noise,
+                               FftData* upper_band_noise);
+#endif
+void EstimateComfortNoise(const std::array<float, kFftLengthBy2Plus1>& N2,
+                          uint32_t* seed,
+                          FftData* lower_band_noise,
+                          FftData* upper_band_noise);
+
+}  // namespace aec3
+
+// Generates the comfort noise.
+class ComfortNoiseGenerator {
+ public:
+  explicit ComfortNoiseGenerator(Aec3Optimization optimization);
+  ~ComfortNoiseGenerator();
+
+  // Computes the comfort noise.
+  void Compute(const AecState& aec_state,
+               const std::array<float, kFftLengthBy2Plus1>& capture_spectrum,
+               FftData* lower_band_noise,
+               FftData* upper_band_noise);
+
+  // Returns the estimate of the background noise spectrum.
+  const std::array<float, kFftLengthBy2Plus1>& NoiseSpectrum() const {
+    return N2_;
+  }
+
+ private:
+  const Aec3Optimization optimization_;
+  uint32_t seed_;
+  std::unique_ptr<std::array<float, kFftLengthBy2Plus1>> N2_initial_;
+  std::array<float, kFftLengthBy2Plus1> Y2_smoothed_;
+  std::array<float, kFftLengthBy2Plus1> N2_;
+  int N2_counter_ = 0;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ComfortNoiseGenerator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_COMFORT_NOISE_GENERATOR_H_
diff --git a/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc b/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc
new file mode 100644
index 0000000..d7e9407
--- /dev/null
+++ b/modules/audio_processing/aec3/comfort_noise_generator_unittest.cc
@@ -0,0 +1,121 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/comfort_noise_generator.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "rtc_base/random.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace aec3 {
+namespace {
+
+float Power(const FftData& N) {
+  std::array<float, kFftLengthBy2Plus1> N2;
+  N.Spectrum(Aec3Optimization::kNone, N2);
+  return std::accumulate(N2.begin(), N2.end(), 0.f) / N2.size();
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(ComfortNoiseGenerator, NullLowerBandNoise) {
+  std::array<float, kFftLengthBy2Plus1> N2;
+  FftData noise;
+  EXPECT_DEATH(
+      ComfortNoiseGenerator(DetectOptimization())
+          .Compute(AecState(EchoCanceller3Config{}), N2, nullptr, &noise),
+      "");
+}
+
+TEST(ComfortNoiseGenerator, NullUpperBandNoise) {
+  std::array<float, kFftLengthBy2Plus1> N2;
+  FftData noise;
+  EXPECT_DEATH(
+      ComfortNoiseGenerator(DetectOptimization())
+          .Compute(AecState(EchoCanceller3Config{}), N2, &noise, nullptr),
+      "");
+}
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods are bitexact to their reference
+// counterparts.
+TEST(ComfortNoiseGenerator, TestOptimizations) {
+  if (WebRtc_GetCPUInfo(kSSE2) != 0) {
+    Random random_generator(42U);
+    uint32_t seed = 42;
+    uint32_t seed_SSE2 = 42;
+    std::array<float, kFftLengthBy2Plus1> N2;
+    FftData lower_band_noise;
+    FftData upper_band_noise;
+    FftData lower_band_noise_SSE2;
+    FftData upper_band_noise_SSE2;
+    for (int k = 0; k < 10; ++k) {
+      for (size_t j = 0; j < N2.size(); ++j) {
+        N2[j] = random_generator.Rand<float>() * 1000.f;
+      }
+
+      EstimateComfortNoise(N2, &seed, &lower_band_noise, &upper_band_noise);
+      EstimateComfortNoise_SSE2(N2, &seed_SSE2, &lower_band_noise_SSE2,
+                                &upper_band_noise_SSE2);
+      for (size_t j = 0; j < lower_band_noise.re.size(); ++j) {
+        EXPECT_NEAR(lower_band_noise.re[j], lower_band_noise_SSE2.re[j],
+                    0.00001f);
+        EXPECT_NEAR(upper_band_noise.re[j], upper_band_noise_SSE2.re[j],
+                    0.00001f);
+      }
+      for (size_t j = 1; j < lower_band_noise.re.size() - 1; ++j) {
+        EXPECT_NEAR(lower_band_noise.im[j], lower_band_noise_SSE2.im[j],
+                    0.00001f);
+        EXPECT_NEAR(upper_band_noise.im[j], upper_band_noise_SSE2.im[j],
+                    0.00001f);
+      }
+    }
+  }
+}
+
+#endif
+
+TEST(ComfortNoiseGenerator, CorrectLevel) {
+  ComfortNoiseGenerator cng(DetectOptimization());
+  AecState aec_state(EchoCanceller3Config{});
+
+  std::array<float, kFftLengthBy2Plus1> N2;
+  N2.fill(1000.f * 1000.f);
+
+  FftData n_lower;
+  FftData n_upper;
+  n_lower.re.fill(0.f);
+  n_lower.im.fill(0.f);
+  n_upper.re.fill(0.f);
+  n_upper.im.fill(0.f);
+
+  // Ensure instantaneous updata to nonzero noise.
+  cng.Compute(aec_state, N2, &n_lower, &n_upper);
+  EXPECT_LT(0.f, Power(n_lower));
+  EXPECT_LT(0.f, Power(n_upper));
+
+  for (int k = 0; k < 10000; ++k) {
+    cng.Compute(aec_state, N2, &n_lower, &n_upper);
+  }
+  EXPECT_NEAR(N2[0], Power(n_lower), N2[0] / 10.f);
+  EXPECT_NEAR(N2[0], Power(n_upper), N2[0] / 10.f);
+}
+
+}  // namespace aec3
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/decimator.cc b/modules/audio_processing/aec3/decimator.cc
new file mode 100644
index 0000000..135a771
--- /dev/null
+++ b/modules/audio_processing/aec3/decimator.cc
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/decimator.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// b, a = signal.butter(2, 3400/8000.0, 'lowpass', analog=False) which are the
+// same as b, a = signal.butter(2, 1700/4000.0, 'lowpass', analog=False).
+const CascadedBiQuadFilter::BiQuadCoefficients kLowPassFilterCoefficients2 = {
+    {0.22711796f, 0.45423593f, 0.22711796f},
+    {-0.27666461f, 0.18513647f}};
+constexpr int kNumFilters2 = 3;
+
+// b, a = signal.butter(2, 1500/8000.0, 'lowpass', analog=False) which are the
+// same as b, a = signal.butter(2, 75/4000.0, 'lowpass', analog=False).
+const CascadedBiQuadFilter::BiQuadCoefficients kLowPassFilterCoefficients4 = {
+    {0.0179f, 0.0357f, 0.0179f},
+    {-1.5879f, 0.6594f}};
+constexpr int kNumFilters4 = 3;
+
+// b, a = signal.butter(2, 800/8000.0, 'lowpass', analog=False) which are the
+// same as b, a = signal.butter(2, 400/4000.0, 'lowpass', analog=False).
+const CascadedBiQuadFilter::BiQuadCoefficients kLowPassFilterCoefficients8 = {
+    {0.02008337f, 0.04016673f, 0.02008337f},
+    {-1.56101808f, 0.64135154f}};
+constexpr int kNumFilters8 = 4;
+
+}  // namespace
+
+Decimator::Decimator(size_t down_sampling_factor)
+    : down_sampling_factor_(down_sampling_factor),
+      low_pass_filter_(
+          down_sampling_factor_ == 4
+              ? kLowPassFilterCoefficients4
+              : (down_sampling_factor_ == 8 ? kLowPassFilterCoefficients8
+                                            : kLowPassFilterCoefficients2),
+          down_sampling_factor_ == 4
+              ? kNumFilters4
+              : (down_sampling_factor_ == 8 ? kNumFilters8 : kNumFilters2)) {
+  RTC_DCHECK(down_sampling_factor_ == 2 || down_sampling_factor_ == 4 ||
+             down_sampling_factor_ == 8);
+}
+
+void Decimator::Decimate(rtc::ArrayView<const float> in,
+                         rtc::ArrayView<float> out) {
+  RTC_DCHECK_EQ(kBlockSize, in.size());
+  RTC_DCHECK_EQ(kBlockSize / down_sampling_factor_, out.size());
+  std::array<float, kBlockSize> x;
+
+  // Limit the frequency content of the signal to avoid aliasing.
+  low_pass_filter_.Process(in, x);
+
+  // Downsample the signal.
+  for (size_t j = 0, k = 0; j < out.size(); ++j, k += down_sampling_factor_) {
+    RTC_DCHECK_GT(kBlockSize, k);
+    out[j] = x[k];
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/decimator.h b/modules/audio_processing/aec3/decimator.h
new file mode 100644
index 0000000..7418a26
--- /dev/null
+++ b/modules/audio_processing/aec3/decimator.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/cascaded_biquad_filter.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Provides functionality for decimating a signal.
+class Decimator {
+ public:
+  explicit Decimator(size_t down_sampling_factor);
+
+  // Downsamples the signal.
+  void Decimate(rtc::ArrayView<const float> in, rtc::ArrayView<float> out);
+
+ private:
+  const size_t down_sampling_factor_;
+  CascadedBiQuadFilter low_pass_filter_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Decimator);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_DECIMATOR_H_
diff --git a/modules/audio_processing/aec3/decimator_unittest.cc b/modules/audio_processing/aec3/decimator_unittest.cc
new file mode 100644
index 0000000..e77a990
--- /dev/null
+++ b/modules/audio_processing/aec3/decimator_unittest.cc
@@ -0,0 +1,148 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/decimator.h"
+
+#include <math.h>
+#include <algorithm>
+#include <array>
+#include <numeric>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+constexpr float kPi = 3.141592f;
+constexpr size_t kNumStartupBlocks = 50;
+constexpr size_t kNumBlocks = 1000;
+
+void ProduceDecimatedSinusoidalOutputPower(int sample_rate_hz,
+                                           size_t down_sampling_factor,
+                                           float sinusoidal_frequency_hz,
+                                           float* input_power,
+                                           float* output_power) {
+  float input[kBlockSize * kNumBlocks];
+  const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+  // Produce a sinusoid of the specified frequency.
+  for (size_t k = 0; k < kBlockSize * kNumBlocks; ++k) {
+    input[k] =
+        32767.f * sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz);
+  }
+
+  Decimator decimator(down_sampling_factor);
+  std::vector<float> output(sub_block_size * kNumBlocks);
+
+  for (size_t k = 0; k < kNumBlocks; ++k) {
+    std::vector<float> sub_block(sub_block_size);
+
+    decimator.Decimate(
+        rtc::ArrayView<const float>(&input[k * kBlockSize], kBlockSize),
+        sub_block);
+
+    std::copy(sub_block.begin(), sub_block.end(),
+              output.begin() + k * sub_block_size);
+  }
+
+  ASSERT_GT(kNumBlocks, kNumStartupBlocks);
+  rtc::ArrayView<const float> input_to_evaluate(
+      &input[kNumStartupBlocks * kBlockSize],
+      (kNumBlocks - kNumStartupBlocks) * kBlockSize);
+  rtc::ArrayView<const float> output_to_evaluate(
+      &output[kNumStartupBlocks * sub_block_size],
+      (kNumBlocks - kNumStartupBlocks) * sub_block_size);
+  *input_power =
+      std::inner_product(input_to_evaluate.begin(), input_to_evaluate.end(),
+                         input_to_evaluate.begin(), 0.f) /
+      input_to_evaluate.size();
+  *output_power =
+      std::inner_product(output_to_evaluate.begin(), output_to_evaluate.end(),
+                         output_to_evaluate.begin(), 0.f) /
+      output_to_evaluate.size();
+}
+
+}  // namespace
+
+// Verifies that there is little aliasing from upper frequencies in the
+// downsampling.
+TEST(Decimator, NoLeakageFromUpperFrequencies) {
+  float input_power;
+  float output_power;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      ProduceDebugText(rate);
+      ProduceDecimatedSinusoidalOutputPower(rate, down_sampling_factor,
+                                            3.f / 8.f * rate, &input_power,
+                                            &output_power);
+      EXPECT_GT(0.0001f * input_power, output_power);
+    }
+  }
+}
+
+// Verifies that the impact of low-frequency content is small during the
+// downsampling.
+TEST(Decimator, NoImpactOnLowerFrequencies) {
+  float input_power;
+  float output_power;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      ProduceDebugText(rate);
+      ProduceDecimatedSinusoidalOutputPower(rate, down_sampling_factor, 200.f,
+                                            &input_power, &output_power);
+      EXPECT_LT(0.7f * input_power, output_power);
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies the check for the input size.
+TEST(Decimator, WrongInputSize) {
+  Decimator decimator(4);
+  std::vector<float> x(std::vector<float>(kBlockSize - 1, 0.f));
+  std::array<float, kBlockSize / 4> x_downsampled;
+  EXPECT_DEATH(decimator.Decimate(x, x_downsampled), "");
+}
+
+// Verifies the check for non-null output parameter.
+TEST(Decimator, NullOutput) {
+  Decimator decimator(4);
+  std::vector<float> x(std::vector<float>(kBlockSize, 0.f));
+  EXPECT_DEATH(decimator.Decimate(x, nullptr), "");
+}
+
+// Verifies the check for the output size.
+TEST(Decimator, WrongOutputSize) {
+  Decimator decimator(4);
+  std::vector<float> x(std::vector<float>(kBlockSize, 0.f));
+  std::array<float, kBlockSize / 4 - 1> x_downsampled;
+  EXPECT_DEATH(decimator.Decimate(x, x_downsampled), "");
+}
+
+// Verifies the check for the correct downsampling factor.
+TEST(Decimator, CorrectDownSamplingFactor) {
+  EXPECT_DEATH(Decimator(3), "");
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/delay_estimate.h b/modules/audio_processing/aec3/delay_estimate.h
new file mode 100644
index 0000000..ea5dd27
--- /dev/null
+++ b/modules/audio_processing/aec3/delay_estimate.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_
+
+namespace webrtc {
+
+// Stores delay_estimates.
+struct DelayEstimate {
+  enum class Quality { kCoarse, kRefined };
+
+  DelayEstimate(Quality quality, size_t delay)
+      : quality(quality), delay(delay) {}
+
+  Quality quality;
+  size_t delay;
+  size_t blocks_since_last_change = 0;
+  size_t blocks_since_last_update = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_DELAY_ESTIMATE_H_
diff --git a/modules/audio_processing/aec3/downsampled_render_buffer.cc b/modules/audio_processing/aec3/downsampled_render_buffer.cc
new file mode 100644
index 0000000..df0af6e
--- /dev/null
+++ b/modules/audio_processing/aec3/downsampled_render_buffer.cc
@@ -0,0 +1,23 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+
+namespace webrtc {
+
+DownsampledRenderBuffer::DownsampledRenderBuffer(size_t downsampled_buffer_size)
+    : size(static_cast<int>(downsampled_buffer_size)),
+      buffer(downsampled_buffer_size, 0.f) {
+  std::fill(buffer.begin(), buffer.end(), 0.f);
+}
+
+DownsampledRenderBuffer::~DownsampledRenderBuffer() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/downsampled_render_buffer.h b/modules/audio_processing/aec3/downsampled_render_buffer.h
new file mode 100644
index 0000000..9439496
--- /dev/null
+++ b/modules/audio_processing/aec3/downsampled_render_buffer.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Holds the circular buffer of the downsampled render data.
+struct DownsampledRenderBuffer {
+  explicit DownsampledRenderBuffer(size_t downsampled_buffer_size);
+  ~DownsampledRenderBuffer();
+
+  int IncIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index < size - 1 ? index + 1 : 0;
+  }
+
+  int DecIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index > 0 ? index - 1 : size - 1;
+  }
+
+  int OffsetIndex(int index, int offset) const {
+    RTC_DCHECK_GE(buffer.size(), offset);
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return (size + index + offset) % size;
+  }
+
+  void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+  void IncWriteIndex() { write = IncIndex(write); }
+  void DecWriteIndex() { write = DecIndex(write); }
+  void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+  void IncReadIndex() { read = IncIndex(read); }
+  void DecReadIndex() { read = DecIndex(read); }
+
+  const int size;
+  std::vector<float> buffer;
+  int write = 0;
+  int read = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_DOWNSAMPLED_RENDER_BUFFER_H_
diff --git a/modules/audio_processing/aec3/echo_canceller3.cc b/modules/audio_processing/aec3/echo_canceller3.cc
new file mode 100644
index 0000000..f0cbbc8
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_canceller3.cc
@@ -0,0 +1,362 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/echo_canceller3.h"
+
+#include <sstream>
+
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+
+namespace webrtc {
+
+namespace {
+
+enum class EchoCanceller3ApiCall { kCapture, kRender };
+
+bool DetectSaturation(rtc::ArrayView<const float> y) {
+  for (auto y_k : y) {
+    if (y_k >= 32700.0f || y_k <= -32700.0f) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void FillSubFrameView(AudioBuffer* frame,
+                      size_t sub_frame_index,
+                      std::vector<rtc::ArrayView<float>>* sub_frame_view) {
+  RTC_DCHECK_GE(1, sub_frame_index);
+  RTC_DCHECK_LE(0, sub_frame_index);
+  RTC_DCHECK_EQ(frame->num_bands(), sub_frame_view->size());
+  for (size_t k = 0; k < sub_frame_view->size(); ++k) {
+    (*sub_frame_view)[k] = rtc::ArrayView<float>(
+        &frame->split_bands_f(0)[k][sub_frame_index * kSubFrameLength],
+        kSubFrameLength);
+  }
+}
+
+void FillSubFrameView(std::vector<std::vector<float>>* frame,
+                      size_t sub_frame_index,
+                      std::vector<rtc::ArrayView<float>>* sub_frame_view) {
+  RTC_DCHECK_GE(1, sub_frame_index);
+  RTC_DCHECK_EQ(frame->size(), sub_frame_view->size());
+  for (size_t k = 0; k < frame->size(); ++k) {
+    (*sub_frame_view)[k] = rtc::ArrayView<float>(
+        &(*frame)[k][sub_frame_index * kSubFrameLength], kSubFrameLength);
+  }
+}
+
+void ProcessCaptureFrameContent(
+    AudioBuffer* capture,
+    bool level_change,
+    bool saturated_microphone_signal,
+    size_t sub_frame_index,
+    FrameBlocker* capture_blocker,
+    BlockFramer* output_framer,
+    BlockProcessor* block_processor,
+    std::vector<std::vector<float>>* block,
+    std::vector<rtc::ArrayView<float>>* sub_frame_view) {
+  FillSubFrameView(capture, sub_frame_index, sub_frame_view);
+  capture_blocker->InsertSubFrameAndExtractBlock(*sub_frame_view, block);
+  block_processor->ProcessCapture(level_change, saturated_microphone_signal,
+                                  block);
+  output_framer->InsertBlockAndExtractSubFrame(*block, sub_frame_view);
+}
+
+void ProcessRemainingCaptureFrameContent(
+    bool level_change,
+    bool saturated_microphone_signal,
+    FrameBlocker* capture_blocker,
+    BlockFramer* output_framer,
+    BlockProcessor* block_processor,
+    std::vector<std::vector<float>>* block) {
+  if (!capture_blocker->IsBlockAvailable()) {
+    return;
+  }
+
+  capture_blocker->ExtractBlock(block);
+  block_processor->ProcessCapture(level_change, saturated_microphone_signal,
+                                  block);
+  output_framer->InsertBlock(*block);
+}
+
+void BufferRenderFrameContent(
+    std::vector<std::vector<float>>* render_frame,
+    size_t sub_frame_index,
+    FrameBlocker* render_blocker,
+    BlockProcessor* block_processor,
+    std::vector<std::vector<float>>* block,
+    std::vector<rtc::ArrayView<float>>* sub_frame_view) {
+  FillSubFrameView(render_frame, sub_frame_index, sub_frame_view);
+  render_blocker->InsertSubFrameAndExtractBlock(*sub_frame_view, block);
+  block_processor->BufferRender(*block);
+}
+
+void BufferRemainingRenderFrameContent(FrameBlocker* render_blocker,
+                                       BlockProcessor* block_processor,
+                                       std::vector<std::vector<float>>* block) {
+  if (!render_blocker->IsBlockAvailable()) {
+    return;
+  }
+  render_blocker->ExtractBlock(block);
+  block_processor->BufferRender(*block);
+}
+
+void CopyBufferIntoFrame(AudioBuffer* buffer,
+                         size_t num_bands,
+                         size_t frame_length,
+                         std::vector<std::vector<float>>* frame) {
+  RTC_DCHECK_EQ(num_bands, frame->size());
+  RTC_DCHECK_EQ(frame_length, (*frame)[0].size());
+  for (size_t k = 0; k < num_bands; ++k) {
+    rtc::ArrayView<float> buffer_view(&buffer->split_bands_f(0)[k][0],
+                                      frame_length);
+    std::copy(buffer_view.begin(), buffer_view.end(), (*frame)[k].begin());
+  }
+}
+
+// [B,A] = butter(2,100/4000,'high')
+const CascadedBiQuadFilter::BiQuadCoefficients
+    kHighPassFilterCoefficients_8kHz = {{0.94598f, -1.89195f, 0.94598f},
+                                        {-1.88903f, 0.89487f}};
+const int kNumberOfHighPassBiQuads_8kHz = 1;
+
+// [B,A] = butter(2,100/8000,'high')
+const CascadedBiQuadFilter::BiQuadCoefficients
+    kHighPassFilterCoefficients_16kHz = {{0.97261f, -1.94523f, 0.97261f},
+                                         {-1.94448f, 0.94598f}};
+const int kNumberOfHighPassBiQuads_16kHz = 1;
+
+}  // namespace
+
+class EchoCanceller3::RenderWriter {
+ public:
+  RenderWriter(ApmDataDumper* data_dumper,
+               SwapQueue<std::vector<std::vector<float>>,
+                         Aec3RenderQueueItemVerifier>* render_transfer_queue,
+               std::unique_ptr<CascadedBiQuadFilter> render_highpass_filter,
+               int sample_rate_hz,
+               int frame_length,
+               int num_bands);
+  ~RenderWriter();
+  void Insert(AudioBuffer* input);
+
+ private:
+  ApmDataDumper* data_dumper_;
+  const int sample_rate_hz_;
+  const size_t frame_length_;
+  const int num_bands_;
+  std::unique_ptr<CascadedBiQuadFilter> render_highpass_filter_;
+  std::vector<std::vector<float>> render_queue_input_frame_;
+  SwapQueue<std::vector<std::vector<float>>, Aec3RenderQueueItemVerifier>*
+      render_transfer_queue_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderWriter);
+};
+
+EchoCanceller3::RenderWriter::RenderWriter(
+    ApmDataDumper* data_dumper,
+    SwapQueue<std::vector<std::vector<float>>, Aec3RenderQueueItemVerifier>*
+        render_transfer_queue,
+    std::unique_ptr<CascadedBiQuadFilter> render_highpass_filter,
+    int sample_rate_hz,
+    int frame_length,
+    int num_bands)
+    : data_dumper_(data_dumper),
+      sample_rate_hz_(sample_rate_hz),
+      frame_length_(frame_length),
+      num_bands_(num_bands),
+      render_highpass_filter_(std::move(render_highpass_filter)),
+      render_queue_input_frame_(num_bands_,
+                                std::vector<float>(frame_length_, 0.f)),
+      render_transfer_queue_(render_transfer_queue) {
+  RTC_DCHECK(data_dumper);
+}
+
+EchoCanceller3::RenderWriter::~RenderWriter() = default;
+
+void EchoCanceller3::RenderWriter::Insert(AudioBuffer* input) {
+  RTC_DCHECK_EQ(1, input->num_channels());
+  RTC_DCHECK_EQ(frame_length_, input->num_frames_per_band());
+  RTC_DCHECK_EQ(num_bands_, input->num_bands());
+
+  // TODO(bugs.webrtc.org/8759) Temporary work-around.
+  if (num_bands_ != static_cast<int>(input->num_bands()))
+    return;
+
+  data_dumper_->DumpWav("aec3_render_input", frame_length_,
+                        &input->split_bands_f(0)[0][0],
+                        LowestBandRate(sample_rate_hz_), 1);
+
+  CopyBufferIntoFrame(input, num_bands_, frame_length_,
+                      &render_queue_input_frame_);
+
+  if (render_highpass_filter_) {
+    render_highpass_filter_->Process(render_queue_input_frame_[0]);
+  }
+
+  static_cast<void>(render_transfer_queue_->Insert(&render_queue_input_frame_));
+}
+
+int EchoCanceller3::instance_count_ = 0;
+
+EchoCanceller3::EchoCanceller3(const EchoCanceller3Config& config,
+                               int sample_rate_hz,
+                               bool use_highpass_filter)
+    : EchoCanceller3(config,
+                     sample_rate_hz,
+                     use_highpass_filter,
+                     std::unique_ptr<BlockProcessor>(
+                         BlockProcessor::Create(config, sample_rate_hz))) {}
+EchoCanceller3::EchoCanceller3(const EchoCanceller3Config& config,
+                               int sample_rate_hz,
+                               bool use_highpass_filter,
+                               std::unique_ptr<BlockProcessor> block_processor)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      sample_rate_hz_(sample_rate_hz),
+      num_bands_(NumBandsForRate(sample_rate_hz_)),
+      frame_length_(rtc::CheckedDivExact(LowestBandRate(sample_rate_hz_), 100)),
+      output_framer_(num_bands_),
+      capture_blocker_(num_bands_),
+      render_blocker_(num_bands_),
+      render_transfer_queue_(
+          kRenderTransferQueueSizeFrames,
+          std::vector<std::vector<float>>(
+              num_bands_,
+              std::vector<float>(frame_length_, 0.f)),
+          Aec3RenderQueueItemVerifier(num_bands_, frame_length_)),
+      block_processor_(std::move(block_processor)),
+      render_queue_output_frame_(num_bands_,
+                                 std::vector<float>(frame_length_, 0.f)),
+      block_(num_bands_, std::vector<float>(kBlockSize, 0.f)),
+      sub_frame_view_(num_bands_) {
+  RTC_DCHECK(ValidFullBandRate(sample_rate_hz_));
+
+  std::unique_ptr<CascadedBiQuadFilter> render_highpass_filter;
+  if (use_highpass_filter) {
+    render_highpass_filter.reset(new CascadedBiQuadFilter(
+        sample_rate_hz_ == 8000 ? kHighPassFilterCoefficients_8kHz
+                                : kHighPassFilterCoefficients_16kHz,
+        sample_rate_hz_ == 8000 ? kNumberOfHighPassBiQuads_8kHz
+                                : kNumberOfHighPassBiQuads_16kHz));
+    capture_highpass_filter_.reset(new CascadedBiQuadFilter(
+        sample_rate_hz_ == 8000 ? kHighPassFilterCoefficients_8kHz
+                                : kHighPassFilterCoefficients_16kHz,
+        sample_rate_hz_ == 8000 ? kNumberOfHighPassBiQuads_8kHz
+                                : kNumberOfHighPassBiQuads_16kHz));
+  }
+
+  render_writer_.reset(
+      new RenderWriter(data_dumper_.get(), &render_transfer_queue_,
+                       std::move(render_highpass_filter), sample_rate_hz_,
+                       frame_length_, num_bands_));
+
+  RTC_DCHECK_EQ(num_bands_, std::max(sample_rate_hz_, 16000) / 16000);
+  RTC_DCHECK_GE(kMaxNumBands, num_bands_);
+}
+
+EchoCanceller3::~EchoCanceller3() = default;
+
+void EchoCanceller3::AnalyzeRender(AudioBuffer* render) {
+  RTC_DCHECK_RUNS_SERIALIZED(&render_race_checker_);
+  RTC_DCHECK(render);
+  data_dumper_->DumpRaw("aec3_call_order",
+                        static_cast<int>(EchoCanceller3ApiCall::kRender));
+
+  return render_writer_->Insert(render);
+}
+
+void EchoCanceller3::AnalyzeCapture(AudioBuffer* capture) {
+  RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+  RTC_DCHECK(capture);
+  data_dumper_->DumpWav("aec3_capture_analyze_input", capture->num_frames(),
+                        capture->channels_f()[0], sample_rate_hz_, 1);
+
+  saturated_microphone_signal_ = false;
+  for (size_t k = 0; k < capture->num_channels(); ++k) {
+    saturated_microphone_signal_ |=
+        DetectSaturation(rtc::ArrayView<const float>(capture->channels_f()[k],
+                                                     capture->num_frames()));
+    if (saturated_microphone_signal_) {
+      break;
+    }
+  }
+}
+
+void EchoCanceller3::ProcessCapture(AudioBuffer* capture, bool level_change) {
+  RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+  RTC_DCHECK(capture);
+  RTC_DCHECK_EQ(1u, capture->num_channels());
+  RTC_DCHECK_EQ(num_bands_, capture->num_bands());
+  RTC_DCHECK_EQ(frame_length_, capture->num_frames_per_band());
+  data_dumper_->DumpRaw("aec3_call_order",
+                        static_cast<int>(EchoCanceller3ApiCall::kCapture));
+
+  rtc::ArrayView<float> capture_lower_band =
+      rtc::ArrayView<float>(&capture->split_bands_f(0)[0][0], frame_length_);
+
+  data_dumper_->DumpWav("aec3_capture_input", capture_lower_band,
+                        LowestBandRate(sample_rate_hz_), 1);
+
+  EmptyRenderQueue();
+
+  if (capture_highpass_filter_) {
+    capture_highpass_filter_->Process(capture_lower_band);
+  }
+
+  ProcessCaptureFrameContent(
+      capture, level_change, saturated_microphone_signal_, 0, &capture_blocker_,
+      &output_framer_, block_processor_.get(), &block_, &sub_frame_view_);
+
+  if (sample_rate_hz_ != 8000) {
+    ProcessCaptureFrameContent(
+        capture, level_change, saturated_microphone_signal_, 1,
+        &capture_blocker_, &output_framer_, block_processor_.get(), &block_,
+        &sub_frame_view_);
+  }
+
+  ProcessRemainingCaptureFrameContent(
+      level_change, saturated_microphone_signal_, &capture_blocker_,
+      &output_framer_, block_processor_.get(), &block_);
+
+  data_dumper_->DumpWav("aec3_capture_output", frame_length_,
+                        &capture->split_bands_f(0)[0][0],
+                        LowestBandRate(sample_rate_hz_), 1);
+}
+
+EchoControl::Metrics EchoCanceller3::GetMetrics() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+  Metrics metrics;
+  block_processor_->GetMetrics(&metrics);
+  return metrics;
+}
+
+void EchoCanceller3::EmptyRenderQueue() {
+  RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+  bool frame_to_buffer =
+      render_transfer_queue_.Remove(&render_queue_output_frame_);
+  while (frame_to_buffer) {
+    BufferRenderFrameContent(&render_queue_output_frame_, 0, &render_blocker_,
+                             block_processor_.get(), &block_, &sub_frame_view_);
+
+    if (sample_rate_hz_ != 8000) {
+      BufferRenderFrameContent(&render_queue_output_frame_, 1, &render_blocker_,
+                               block_processor_.get(), &block_,
+                               &sub_frame_view_);
+    }
+
+    BufferRemainingRenderFrameContent(&render_blocker_, block_processor_.get(),
+                                      &block_);
+
+    frame_to_buffer =
+        render_transfer_queue_.Remove(&render_queue_output_frame_);
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_canceller3.h b/modules/audio_processing/aec3/echo_canceller3.h
new file mode 100644
index 0000000..8658814
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_canceller3.h
@@ -0,0 +1,135 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/block_framer.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/cascaded_biquad_filter.h"
+#include "modules/audio_processing/aec3/frame_blocker.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/swap_queue.h"
+
+namespace webrtc {
+
+// Functor for verifying the invariance of the frames being put into the render
+// queue.
+class Aec3RenderQueueItemVerifier {
+ public:
+  explicit Aec3RenderQueueItemVerifier(size_t num_bands, size_t frame_length)
+      : num_bands_(num_bands), frame_length_(frame_length) {}
+
+  bool operator()(const std::vector<std::vector<float>>& v) const {
+    if (v.size() != num_bands_) {
+      return false;
+    }
+    for (const auto& v_k : v) {
+      if (v_k.size() != frame_length_) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+ private:
+  const size_t num_bands_;
+  const size_t frame_length_;
+};
+
+// Main class for the echo canceller3.
+// It does 4 things:
+// -Receives 10 ms frames of band-split audio.
+// -Optionally applies an anti-hum (high-pass) filter on the
+// received signals.
+// -Provides the lower level echo canceller functionality with
+// blocks of 64 samples of audio data.
+// -Partially handles the jitter in the render and capture API
+// call sequence.
+//
+// The class is supposed to be used in a non-concurrent manner apart from the
+// AnalyzeRender call which can be called concurrently with the other methods.
+class EchoCanceller3 : public EchoControl {
+ public:
+  // Normal c-tor to use.
+  EchoCanceller3(const EchoCanceller3Config& config,
+                 int sample_rate_hz,
+                 bool use_highpass_filter);
+  // Testing c-tor that is used only for testing purposes.
+  EchoCanceller3(const EchoCanceller3Config& config,
+                 int sample_rate_hz,
+                 bool use_highpass_filter,
+                 std::unique_ptr<BlockProcessor> block_processor);
+  ~EchoCanceller3() override;
+  // Analyzes and stores an internal copy of the split-band domain render
+  // signal.
+  void AnalyzeRender(AudioBuffer* farend) override;
+  // Analyzes the full-band domain capture signal to detect signal saturation.
+  void AnalyzeCapture(AudioBuffer* capture) override;
+  // Processes the split-band domain capture signal in order to remove any echo
+  // present in the signal.
+  void ProcessCapture(AudioBuffer* capture, bool level_change) override;
+  // Collect current metrics from the echo canceller.
+  Metrics GetMetrics() const override;
+
+  // Signals whether an external detector has detected echo leakage from the
+  // echo canceller.
+  // Note that in the case echo leakage has been flagged, it should be unflagged
+  // once it is no longer occurring.
+  void UpdateEchoLeakageStatus(bool leakage_detected) {
+    RTC_DCHECK_RUNS_SERIALIZED(&capture_race_checker_);
+    block_processor_->UpdateEchoLeakageStatus(leakage_detected);
+  }
+
+ private:
+  class RenderWriter;
+
+  // Empties the render SwapQueue.
+  void EmptyRenderQueue();
+
+  rtc::RaceChecker capture_race_checker_;
+  rtc::RaceChecker render_race_checker_;
+
+  // State that is accessed by the AnalyzeRender call.
+  std::unique_ptr<RenderWriter> render_writer_
+      RTC_GUARDED_BY(render_race_checker_);
+
+  // State that may be accessed by the capture thread.
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  const int sample_rate_hz_;
+  const int num_bands_;
+  const size_t frame_length_;
+  BlockFramer output_framer_ RTC_GUARDED_BY(capture_race_checker_);
+  FrameBlocker capture_blocker_ RTC_GUARDED_BY(capture_race_checker_);
+  FrameBlocker render_blocker_ RTC_GUARDED_BY(capture_race_checker_);
+  SwapQueue<std::vector<std::vector<float>>, Aec3RenderQueueItemVerifier>
+      render_transfer_queue_;
+  std::unique_ptr<BlockProcessor> block_processor_
+      RTC_GUARDED_BY(capture_race_checker_);
+  std::vector<std::vector<float>> render_queue_output_frame_
+      RTC_GUARDED_BY(capture_race_checker_);
+  std::unique_ptr<CascadedBiQuadFilter> capture_highpass_filter_
+      RTC_GUARDED_BY(capture_race_checker_);
+  bool saturated_microphone_signal_ RTC_GUARDED_BY(capture_race_checker_) =
+      false;
+  std::vector<std::vector<float>> block_ RTC_GUARDED_BY(capture_race_checker_);
+  std::vector<rtc::ArrayView<float>> sub_frame_view_
+      RTC_GUARDED_BY(capture_race_checker_);
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoCanceller3);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ECHO_CANCELLER3_H_
diff --git a/modules/audio_processing/aec3/echo_canceller3_unittest.cc b/modules/audio_processing/aec3/echo_canceller3_unittest.cc
new file mode 100644
index 0000000..d4ad4f6
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_canceller3_unittest.cc
@@ -0,0 +1,744 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_canceller3.h"
+
+#include <deque>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/frame_blocker.h"
+#include "modules/audio_processing/aec3/mock/mock_block_processor.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+using testing::StrictMock;
+using testing::_;
+
+// Populates the frame with linearly increasing sample values for each band,
+// with a band-specific offset, in order to allow simple bitexactness
+// verification for each band.
+void PopulateInputFrame(size_t frame_length,
+                        size_t num_bands,
+                        size_t frame_index,
+                        float* const* frame,
+                        int offset) {
+  for (size_t k = 0; k < num_bands; ++k) {
+    for (size_t i = 0; i < frame_length; ++i) {
+      float value = static_cast<int>(frame_index * frame_length + i) + offset;
+      frame[k][i] = (value > 0 ? 5000 * k + value : 0);
+    }
+  }
+}
+
+// Populates the frame with linearly increasing sample values.
+void PopulateInputFrame(size_t frame_length,
+                        size_t frame_index,
+                        float* frame,
+                        int offset) {
+  for (size_t i = 0; i < frame_length; ++i) {
+    float value = static_cast<int>(frame_index * frame_length + i) + offset;
+    frame[i] = std::max(value, 0.f);
+  }
+}
+
+// Verifies the that samples in the output frame are identical to the samples
+// that were produced for the input frame, with an offset in order to compensate
+// for buffering delays.
+bool VerifyOutputFrameBitexactness(size_t frame_length,
+                                   size_t num_bands,
+                                   size_t frame_index,
+                                   const float* const* frame,
+                                   int offset) {
+  float reference_frame_data[kMaxNumBands][2 * kSubFrameLength];
+  float* reference_frame[kMaxNumBands];
+  for (size_t k = 0; k < num_bands; ++k) {
+    reference_frame[k] = &reference_frame_data[k][0];
+  }
+
+  PopulateInputFrame(frame_length, num_bands, frame_index, reference_frame,
+                     offset);
+  for (size_t k = 0; k < num_bands; ++k) {
+    for (size_t i = 0; i < frame_length; ++i) {
+      if (reference_frame[k][i] != frame[k][i]) {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+// Class for testing that the capture data is properly received by the block
+// processor and that the processor data is properly passed to the
+// EchoCanceller3 output.
+class CaptureTransportVerificationProcessor : public BlockProcessor {
+ public:
+  explicit CaptureTransportVerificationProcessor(size_t num_bands) {}
+  ~CaptureTransportVerificationProcessor() override = default;
+
+  void ProcessCapture(bool level_change,
+                      bool saturated_microphone_signal,
+                      std::vector<std::vector<float>>* capture_block) override {
+  }
+
+  void BufferRender(const std::vector<std::vector<float>>& block) override {}
+
+  void UpdateEchoLeakageStatus(bool leakage_detected) override {}
+
+  void GetMetrics(EchoControl::Metrics* metrics) const override {}
+
+ private:
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(CaptureTransportVerificationProcessor);
+};
+
+// Class for testing that the render data is properly received by the block
+// processor.
+class RenderTransportVerificationProcessor : public BlockProcessor {
+ public:
+  explicit RenderTransportVerificationProcessor(size_t num_bands) {}
+  ~RenderTransportVerificationProcessor() override = default;
+
+  void ProcessCapture(bool level_change,
+                      bool saturated_microphone_signal,
+                      std::vector<std::vector<float>>* capture_block) override {
+    std::vector<std::vector<float>> render_block =
+        received_render_blocks_.front();
+    received_render_blocks_.pop_front();
+    capture_block->swap(render_block);
+  }
+
+  void BufferRender(const std::vector<std::vector<float>>& block) override {
+    received_render_blocks_.push_back(block);
+  }
+
+  void UpdateEchoLeakageStatus(bool leakage_detected) override {}
+
+  void GetMetrics(EchoControl::Metrics* metrics) const override {}
+
+ private:
+  std::deque<std::vector<std::vector<float>>> received_render_blocks_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderTransportVerificationProcessor);
+};
+
+class EchoCanceller3Tester {
+ public:
+  explicit EchoCanceller3Tester(int sample_rate_hz)
+      : sample_rate_hz_(sample_rate_hz),
+        num_bands_(NumBandsForRate(sample_rate_hz_)),
+        frame_length_(sample_rate_hz_ == 8000 ? 80 : 160),
+        fullband_frame_length_(rtc::CheckedDivExact(sample_rate_hz_, 100)),
+        capture_buffer_(fullband_frame_length_,
+                        1,
+                        fullband_frame_length_,
+                        1,
+                        fullband_frame_length_),
+        render_buffer_(fullband_frame_length_,
+                       1,
+                       fullband_frame_length_,
+                       1,
+                       fullband_frame_length_) {}
+
+  // Verifies that the capture data is properly received by the block processor
+  // and that the processor data is properly passed to the EchoCanceller3
+  // output.
+  void RunCaptureTransportVerificationTest() {
+    EchoCanceller3 aec3(
+        EchoCanceller3Config(), sample_rate_hz_, false,
+        std::unique_ptr<BlockProcessor>(
+            new CaptureTransportVerificationProcessor(num_bands_)));
+
+    for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+         ++frame_index) {
+      aec3.AnalyzeCapture(&capture_buffer_);
+      OptionalBandSplit();
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &capture_buffer_.split_bands_f(0)[0], 0);
+      PopulateInputFrame(frame_length_, frame_index,
+                         &render_buffer_.channels_f()[0][0], 0);
+
+      aec3.AnalyzeRender(&render_buffer_);
+      aec3.ProcessCapture(&capture_buffer_, false);
+      EXPECT_TRUE(VerifyOutputFrameBitexactness(
+          frame_length_, num_bands_, frame_index,
+          &capture_buffer_.split_bands_f(0)[0], -64));
+    }
+  }
+
+  // Test method for testing that the render data is properly received by the
+  // block processor.
+  void RunRenderTransportVerificationTest() {
+    EchoCanceller3 aec3(
+        EchoCanceller3Config(), sample_rate_hz_, false,
+        std::unique_ptr<BlockProcessor>(
+            new RenderTransportVerificationProcessor(num_bands_)));
+
+    for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+         ++frame_index) {
+      aec3.AnalyzeCapture(&capture_buffer_);
+      OptionalBandSplit();
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &capture_buffer_.split_bands_f(0)[0], 100);
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &render_buffer_.split_bands_f(0)[0], 0);
+
+      aec3.AnalyzeRender(&render_buffer_);
+      aec3.ProcessCapture(&capture_buffer_, false);
+      EXPECT_TRUE(VerifyOutputFrameBitexactness(
+          frame_length_, num_bands_, frame_index,
+          &capture_buffer_.split_bands_f(0)[0], -64));
+    }
+  }
+
+  // Verifies that information about echo path changes are properly propagated
+  // to the block processor.
+  // The cases tested are:
+  // -That no set echo path change flags are received when there is no echo path
+  // change.
+  // -That set echo path change flags are received and continues to be received
+  // as long as echo path changes are flagged.
+  // -That set echo path change flags are no longer received when echo path
+  // change events stop being flagged.
+  enum class EchoPathChangeTestVariant { kNone, kOneSticky, kOneNonSticky };
+
+  void RunEchoPathChangeVerificationTest(
+      EchoPathChangeTestVariant echo_path_change_test_variant) {
+    const size_t num_full_blocks_per_frame =
+        rtc::CheckedDivExact(LowestBandRate(sample_rate_hz_), 100) / kBlockSize;
+    const size_t expected_num_block_to_process =
+        (kNumFramesToProcess *
+         rtc::CheckedDivExact(LowestBandRate(sample_rate_hz_), 100)) /
+        kBlockSize;
+    std::unique_ptr<testing::StrictMock<webrtc::test::MockBlockProcessor>>
+        block_processor_mock(
+            new StrictMock<webrtc::test::MockBlockProcessor>());
+    EXPECT_CALL(*block_processor_mock, BufferRender(_))
+        .Times(expected_num_block_to_process);
+    EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0);
+
+    switch (echo_path_change_test_variant) {
+      case EchoPathChangeTestVariant::kNone:
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(false, _, _))
+            .Times(expected_num_block_to_process);
+        break;
+      case EchoPathChangeTestVariant::kOneSticky:
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(true, _, _))
+            .Times(expected_num_block_to_process);
+        break;
+      case EchoPathChangeTestVariant::kOneNonSticky:
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(true, _, _))
+            .Times(num_full_blocks_per_frame);
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(false, _, _))
+            .Times(expected_num_block_to_process - num_full_blocks_per_frame);
+        break;
+    }
+
+    EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, false,
+                        std::move(block_processor_mock));
+
+    for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+         ++frame_index) {
+      bool echo_path_change = false;
+      switch (echo_path_change_test_variant) {
+        case EchoPathChangeTestVariant::kNone:
+          break;
+        case EchoPathChangeTestVariant::kOneSticky:
+          echo_path_change = true;
+          break;
+        case EchoPathChangeTestVariant::kOneNonSticky:
+          if (frame_index == 0) {
+            echo_path_change = true;
+          }
+          break;
+      }
+
+      aec3.AnalyzeCapture(&capture_buffer_);
+      OptionalBandSplit();
+
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &capture_buffer_.split_bands_f(0)[0], 0);
+      PopulateInputFrame(frame_length_, frame_index,
+                         &render_buffer_.channels_f()[0][0], 0);
+
+      aec3.AnalyzeRender(&render_buffer_);
+      aec3.ProcessCapture(&capture_buffer_, echo_path_change);
+    }
+  }
+
+  // Test for verifying that echo leakage information is being properly passed
+  // to the processor.
+  // The cases tested are:
+  // -That no method calls are received when they should not.
+  // -That false values are received each time they are flagged.
+  // -That true values are received each time they are flagged.
+  // -That a false value is received when flagged after a true value has been
+  // flagged.
+  enum class EchoLeakageTestVariant {
+    kNone,
+    kFalseSticky,
+    kTrueSticky,
+    kTrueNonSticky
+  };
+
+  void RunEchoLeakageVerificationTest(
+      EchoLeakageTestVariant leakage_report_variant) {
+    const size_t expected_num_block_to_process =
+        (kNumFramesToProcess *
+         rtc::CheckedDivExact(LowestBandRate(sample_rate_hz_), 100)) /
+        kBlockSize;
+    std::unique_ptr<testing::StrictMock<webrtc::test::MockBlockProcessor>>
+        block_processor_mock(
+            new StrictMock<webrtc::test::MockBlockProcessor>());
+    EXPECT_CALL(*block_processor_mock, BufferRender(_))
+        .Times(expected_num_block_to_process);
+    EXPECT_CALL(*block_processor_mock, ProcessCapture(_, _, _))
+        .Times(expected_num_block_to_process);
+
+    switch (leakage_report_variant) {
+      case EchoLeakageTestVariant::kNone:
+        EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0);
+        break;
+      case EchoLeakageTestVariant::kFalseSticky:
+        EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(false))
+            .Times(1);
+        break;
+      case EchoLeakageTestVariant::kTrueSticky:
+        EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(true))
+            .Times(1);
+        break;
+      case EchoLeakageTestVariant::kTrueNonSticky: {
+        testing::InSequence s;
+        EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(true))
+            .Times(1);
+        EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(false))
+            .Times(kNumFramesToProcess - 1);
+      } break;
+    }
+
+    EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, false,
+                        std::move(block_processor_mock));
+
+    for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+         ++frame_index) {
+      switch (leakage_report_variant) {
+        case EchoLeakageTestVariant::kNone:
+          break;
+        case EchoLeakageTestVariant::kFalseSticky:
+          if (frame_index == 0) {
+            aec3.UpdateEchoLeakageStatus(false);
+          }
+          break;
+        case EchoLeakageTestVariant::kTrueSticky:
+          if (frame_index == 0) {
+            aec3.UpdateEchoLeakageStatus(true);
+          }
+          break;
+        case EchoLeakageTestVariant::kTrueNonSticky:
+          if (frame_index == 0) {
+            aec3.UpdateEchoLeakageStatus(true);
+          } else {
+            aec3.UpdateEchoLeakageStatus(false);
+          }
+          break;
+      }
+
+      aec3.AnalyzeCapture(&capture_buffer_);
+      OptionalBandSplit();
+
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &capture_buffer_.split_bands_f(0)[0], 0);
+      PopulateInputFrame(frame_length_, frame_index,
+                         &render_buffer_.channels_f()[0][0], 0);
+
+      aec3.AnalyzeRender(&render_buffer_);
+      aec3.ProcessCapture(&capture_buffer_, false);
+    }
+  }
+
+  // This verifies that saturation information is properly passed to the
+  // BlockProcessor.
+  // The cases tested are:
+  // -That no saturation event is passed to the processor if there is no
+  // saturation.
+  // -That one frame with one negative saturated sample value is reported to be
+  // saturated and that following non-saturated frames are properly reported as
+  // not being saturated.
+  // -That one frame with one positive saturated sample value is reported to be
+  // saturated and that following non-saturated frames are properly reported as
+  // not being saturated.
+  enum class SaturationTestVariant { kNone, kOneNegative, kOnePositive };
+
+  void RunCaptureSaturationVerificationTest(
+      SaturationTestVariant saturation_variant) {
+    const size_t num_full_blocks_per_frame =
+        rtc::CheckedDivExact(LowestBandRate(sample_rate_hz_), 100) / kBlockSize;
+    const size_t expected_num_block_to_process =
+        (kNumFramesToProcess *
+         rtc::CheckedDivExact(LowestBandRate(sample_rate_hz_), 100)) /
+        kBlockSize;
+    std::unique_ptr<testing::StrictMock<webrtc::test::MockBlockProcessor>>
+        block_processor_mock(
+            new StrictMock<webrtc::test::MockBlockProcessor>());
+    EXPECT_CALL(*block_processor_mock, BufferRender(_))
+        .Times(expected_num_block_to_process);
+    EXPECT_CALL(*block_processor_mock, UpdateEchoLeakageStatus(_)).Times(0);
+
+    switch (saturation_variant) {
+      case SaturationTestVariant::kNone:
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _))
+            .Times(expected_num_block_to_process);
+        break;
+      case SaturationTestVariant::kOneNegative: {
+        testing::InSequence s;
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(_, true, _))
+            .Times(num_full_blocks_per_frame);
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _))
+            .Times(expected_num_block_to_process - num_full_blocks_per_frame);
+      } break;
+      case SaturationTestVariant::kOnePositive: {
+        testing::InSequence s;
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(_, true, _))
+            .Times(num_full_blocks_per_frame);
+        EXPECT_CALL(*block_processor_mock, ProcessCapture(_, false, _))
+            .Times(expected_num_block_to_process - num_full_blocks_per_frame);
+      } break;
+    }
+
+    EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, false,
+                        std::move(block_processor_mock));
+    for (size_t frame_index = 0; frame_index < kNumFramesToProcess;
+         ++frame_index) {
+      for (int k = 0; k < fullband_frame_length_; ++k) {
+        capture_buffer_.channels_f()[0][k] = 0.f;
+      }
+      switch (saturation_variant) {
+        case SaturationTestVariant::kNone:
+          break;
+        case SaturationTestVariant::kOneNegative:
+          if (frame_index == 0) {
+            capture_buffer_.channels_f()[0][10] = -32768.f;
+          }
+          break;
+        case SaturationTestVariant::kOnePositive:
+          if (frame_index == 0) {
+            capture_buffer_.channels_f()[0][10] = 32767.f;
+          }
+          break;
+      }
+
+      aec3.AnalyzeCapture(&capture_buffer_);
+      OptionalBandSplit();
+
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &capture_buffer_.split_bands_f(0)[0], 0);
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &render_buffer_.split_bands_f(0)[0], 0);
+
+      aec3.AnalyzeRender(&render_buffer_);
+      aec3.ProcessCapture(&capture_buffer_, false);
+    }
+  }
+
+  // This test verifies that the swapqueue is able to handle jitter in the
+  // capture and render API calls.
+  void RunRenderSwapQueueVerificationTest() {
+    const EchoCanceller3Config config;
+    EchoCanceller3 aec3(
+        config, sample_rate_hz_, false,
+        std::unique_ptr<BlockProcessor>(
+            new RenderTransportVerificationProcessor(num_bands_)));
+
+    for (size_t frame_index = 0; frame_index < kRenderTransferQueueSizeFrames;
+         ++frame_index) {
+      if (sample_rate_hz_ > 16000) {
+        render_buffer_.SplitIntoFrequencyBands();
+      }
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &render_buffer_.split_bands_f(0)[0], 0);
+
+      if (sample_rate_hz_ > 16000) {
+        render_buffer_.SplitIntoFrequencyBands();
+      }
+
+      aec3.AnalyzeRender(&render_buffer_);
+    }
+
+    for (size_t frame_index = 0; frame_index < kRenderTransferQueueSizeFrames;
+         ++frame_index) {
+      aec3.AnalyzeCapture(&capture_buffer_);
+      if (sample_rate_hz_ > 16000) {
+        capture_buffer_.SplitIntoFrequencyBands();
+      }
+
+      PopulateInputFrame(frame_length_, num_bands_, frame_index,
+                         &capture_buffer_.split_bands_f(0)[0], 0);
+
+      aec3.ProcessCapture(&capture_buffer_, false);
+      EXPECT_TRUE(VerifyOutputFrameBitexactness(
+          frame_length_, num_bands_, frame_index,
+          &capture_buffer_.split_bands_f(0)[0], -64));
+    }
+  }
+
+  // This test verifies that a buffer overrun in the render swapqueue is
+  // properly reported.
+  void RunRenderPipelineSwapQueueOverrunReturnValueTest() {
+    EchoCanceller3 aec3(EchoCanceller3Config(), sample_rate_hz_, false);
+
+    constexpr size_t kRenderTransferQueueSize = 30;
+    for (size_t k = 0; k < 2; ++k) {
+      for (size_t frame_index = 0; frame_index < kRenderTransferQueueSize;
+           ++frame_index) {
+        if (sample_rate_hz_ > 16000) {
+          render_buffer_.SplitIntoFrequencyBands();
+        }
+        PopulateInputFrame(frame_length_, frame_index,
+                           &render_buffer_.channels_f()[0][0], 0);
+
+        if (k == 0) {
+          aec3.AnalyzeRender(&render_buffer_);
+        } else {
+          aec3.AnalyzeRender(&render_buffer_);
+        }
+      }
+    }
+  }
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+  // Verifies the that the check for the number of bands in the AnalyzeRender
+  // input is correct by adjusting the sample rates of EchoCanceller3 and the
+  // input AudioBuffer to have a different number of bands.
+  void RunAnalyzeRenderNumBandsCheckVerification() {
+    // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a
+    // way that the number of bands for the rates are different.
+    const int aec3_sample_rate_hz = sample_rate_hz_ == 48000 ? 32000 : 48000;
+    EchoCanceller3 aec3(EchoCanceller3Config(), aec3_sample_rate_hz, false);
+    PopulateInputFrame(frame_length_, 0, &render_buffer_.channels_f()[0][0], 0);
+
+    EXPECT_DEATH(aec3.AnalyzeRender(&render_buffer_), "");
+  }
+
+  // Verifies the that the check for the number of bands in the ProcessCapture
+  // input is correct by adjusting the sample rates of EchoCanceller3 and the
+  // input AudioBuffer to have a different number of bands.
+  void RunProcessCaptureNumBandsCheckVerification() {
+    // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a
+    // way that the number of bands for the rates are different.
+    const int aec3_sample_rate_hz = sample_rate_hz_ == 48000 ? 32000 : 48000;
+    EchoCanceller3 aec3(EchoCanceller3Config(), aec3_sample_rate_hz, false);
+    PopulateInputFrame(frame_length_, num_bands_, 0,
+                       &capture_buffer_.split_bands_f(0)[0], 100);
+    EXPECT_DEATH(aec3.ProcessCapture(&capture_buffer_, false), "");
+  }
+
+  // Verifies the that the check for the frame length in the AnalyzeRender input
+  // is correct by adjusting the sample rates of EchoCanceller3 and the input
+  // AudioBuffer to have a different frame lengths.
+  void RunAnalyzeRenderFrameLengthCheckVerification() {
+    // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a
+    // way that the band frame lengths are different.
+    const int aec3_sample_rate_hz = sample_rate_hz_ == 8000 ? 16000 : 8000;
+    EchoCanceller3 aec3(EchoCanceller3Config(), aec3_sample_rate_hz, false);
+
+    OptionalBandSplit();
+    PopulateInputFrame(frame_length_, 0, &render_buffer_.channels_f()[0][0], 0);
+
+    EXPECT_DEATH(aec3.AnalyzeRender(&render_buffer_), "");
+  }
+
+  // Verifies the that the check for the frame length in the AnalyzeRender input
+  // is correct by adjusting the sample rates of EchoCanceller3 and the input
+  // AudioBuffer to have a different frame lengths.
+  void RunProcessCaptureFrameLengthCheckVerification() {
+    // Set aec3_sample_rate_hz to be different from sample_rate_hz_ in such a
+    // way that the band frame lengths are different.
+    const int aec3_sample_rate_hz = sample_rate_hz_ == 8000 ? 16000 : 8000;
+    EchoCanceller3 aec3(EchoCanceller3Config(), aec3_sample_rate_hz, false);
+
+    OptionalBandSplit();
+    PopulateInputFrame(frame_length_, num_bands_, 0,
+                       &capture_buffer_.split_bands_f(0)[0], 100);
+
+    EXPECT_DEATH(aec3.ProcessCapture(&capture_buffer_, false), "");
+  }
+
+#endif
+
+ private:
+  void OptionalBandSplit() {
+    if (sample_rate_hz_ > 16000) {
+      capture_buffer_.SplitIntoFrequencyBands();
+      render_buffer_.SplitIntoFrequencyBands();
+    }
+  }
+
+  static constexpr size_t kNumFramesToProcess = 20;
+  const int sample_rate_hz_;
+  const size_t num_bands_;
+  const size_t frame_length_;
+  const int fullband_frame_length_;
+  AudioBuffer capture_buffer_;
+  AudioBuffer render_buffer_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoCanceller3Tester);
+};
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+std::string ProduceDebugText(int sample_rate_hz, int variant) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz << ", variant: " << variant;
+  return ss.str();
+}
+
+}  // namespace
+
+TEST(EchoCanceller3Buffering, CaptureBitexactness) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate).RunCaptureTransportVerificationTest();
+  }
+}
+
+TEST(EchoCanceller3Buffering, RenderBitexactness) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate).RunRenderTransportVerificationTest();
+  }
+}
+
+TEST(EchoCanceller3Buffering, RenderSwapQueue) {
+  for (auto rate : {8000, 16000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate).RunRenderSwapQueueVerificationTest();
+  }
+}
+
+TEST(EchoCanceller3Buffering, RenderSwapQueueOverrunReturnValue) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate)
+        .RunRenderPipelineSwapQueueOverrunReturnValueTest();
+  }
+}
+
+TEST(EchoCanceller3Messaging, CaptureSaturation) {
+  auto variants = {EchoCanceller3Tester::SaturationTestVariant::kNone,
+                   EchoCanceller3Tester::SaturationTestVariant::kOneNegative,
+                   EchoCanceller3Tester::SaturationTestVariant::kOnePositive};
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (auto variant : variants) {
+      SCOPED_TRACE(ProduceDebugText(rate, static_cast<int>(variant)));
+      EchoCanceller3Tester(rate).RunCaptureSaturationVerificationTest(variant);
+    }
+  }
+}
+
+TEST(EchoCanceller3Messaging, EchoPathChange) {
+  auto variants = {
+      EchoCanceller3Tester::EchoPathChangeTestVariant::kNone,
+      EchoCanceller3Tester::EchoPathChangeTestVariant::kOneSticky,
+      EchoCanceller3Tester::EchoPathChangeTestVariant::kOneNonSticky};
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (auto variant : variants) {
+      SCOPED_TRACE(ProduceDebugText(rate, static_cast<int>(variant)));
+      EchoCanceller3Tester(rate).RunEchoPathChangeVerificationTest(variant);
+    }
+  }
+}
+
+TEST(EchoCanceller3Messaging, EchoLeakage) {
+  auto variants = {
+      EchoCanceller3Tester::EchoLeakageTestVariant::kNone,
+      EchoCanceller3Tester::EchoLeakageTestVariant::kFalseSticky,
+      EchoCanceller3Tester::EchoLeakageTestVariant::kTrueSticky,
+      EchoCanceller3Tester::EchoLeakageTestVariant::kTrueNonSticky};
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (auto variant : variants) {
+      SCOPED_TRACE(ProduceDebugText(rate, static_cast<int>(variant)));
+      EchoCanceller3Tester(rate).RunEchoLeakageVerificationTest(variant);
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+TEST(EchoCanceller3InputCheck, WrongCaptureNumBandsCheckVerification) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate).RunProcessCaptureNumBandsCheckVerification();
+  }
+}
+
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoCanceller3InputCheck,
+     DISABLED_WrongRenderFrameLengthCheckVerification) {
+  for (auto rate : {8000, 16000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate).RunAnalyzeRenderFrameLengthCheckVerification();
+  }
+}
+
+TEST(EchoCanceller3InputCheck, WrongCaptureFrameLengthCheckVerification) {
+  for (auto rate : {8000, 16000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Tester(rate).RunProcessCaptureFrameLengthCheckVerification();
+  }
+}
+
+// Verifiers that the verification for null input to the render analysis api
+// call works.
+TEST(EchoCanceller3InputCheck, NullRenderAnalysisParameter) {
+  EXPECT_DEATH(EchoCanceller3(EchoCanceller3Config(), 8000, false)
+                   .AnalyzeRender(nullptr),
+               "");
+}
+
+// Verifiers that the verification for null input to the capture analysis api
+// call works.
+TEST(EchoCanceller3InputCheck, NullCaptureAnalysisParameter) {
+  EXPECT_DEATH(EchoCanceller3(EchoCanceller3Config(), 8000, false)
+                   .AnalyzeCapture(nullptr),
+               "");
+}
+
+// Verifiers that the verification for null input to the capture processing api
+// call works.
+TEST(EchoCanceller3InputCheck, NullCaptureProcessingParameter) {
+  EXPECT_DEATH(EchoCanceller3(EchoCanceller3Config(), 8000, false)
+                   .ProcessCapture(nullptr, false),
+               "");
+}
+
+// Verifies the check for correct sample rate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoCanceller3InputCheck, DISABLED_WrongSampleRate) {
+  ApmDataDumper data_dumper(0);
+  EXPECT_DEATH(EchoCanceller3(EchoCanceller3Config(), 8001, false), "");
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.cc b/modules/audio_processing/aec3/echo_path_delay_estimator.cc
new file mode 100644
index 0000000..0026522
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator.cc
@@ -0,0 +1,105 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/echo_path_delay_estimator.h"
+
+#include <algorithm>
+#include <array>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+EchoPathDelayEstimator::EchoPathDelayEstimator(
+    ApmDataDumper* data_dumper,
+    const EchoCanceller3Config& config)
+    : data_dumper_(data_dumper),
+      down_sampling_factor_(config.delay.down_sampling_factor),
+      sub_block_size_(down_sampling_factor_ != 0
+                          ? kBlockSize / down_sampling_factor_
+                          : kBlockSize),
+      capture_decimator_(down_sampling_factor_),
+      matched_filter_(data_dumper_,
+                      DetectOptimization(),
+                      sub_block_size_,
+                      kMatchedFilterWindowSizeSubBlocks,
+                      config.delay.num_filters,
+                      kMatchedFilterAlignmentShiftSizeSubBlocks,
+                      config.render_levels.poor_excitation_render_limit),
+      matched_filter_lag_aggregator_(data_dumper_,
+                                     matched_filter_.GetMaxFilterLag()) {
+  RTC_DCHECK(data_dumper);
+  RTC_DCHECK(down_sampling_factor_ > 0);
+}
+
+EchoPathDelayEstimator::~EchoPathDelayEstimator() = default;
+
+void EchoPathDelayEstimator::Reset(bool soft_reset) {
+  if (!soft_reset) {
+    matched_filter_lag_aggregator_.Reset();
+  }
+  matched_filter_.Reset();
+  old_aggregated_lag_ = rtc::nullopt;
+  consistent_estimate_counter_ = 0;
+}
+
+rtc::Optional<DelayEstimate> EchoPathDelayEstimator::EstimateDelay(
+    const DownsampledRenderBuffer& render_buffer,
+    rtc::ArrayView<const float> capture) {
+  RTC_DCHECK_EQ(kBlockSize, capture.size());
+
+  std::array<float, kBlockSize> downsampled_capture_data;
+  rtc::ArrayView<float> downsampled_capture(downsampled_capture_data.data(),
+                                            sub_block_size_);
+  data_dumper_->DumpWav("aec3_capture_decimator_input", capture.size(),
+                        capture.data(), 16000, 1);
+  capture_decimator_.Decimate(capture, downsampled_capture);
+  data_dumper_->DumpWav("aec3_capture_decimator_output",
+                        downsampled_capture.size(), downsampled_capture.data(),
+                        16000 / down_sampling_factor_, 1);
+  matched_filter_.Update(render_buffer, downsampled_capture);
+
+  rtc::Optional<DelayEstimate> aggregated_matched_filter_lag =
+      matched_filter_lag_aggregator_.Aggregate(
+          matched_filter_.GetLagEstimates());
+
+  // TODO(peah): Move this logging outside of this class once EchoCanceller3
+  // development is done.
+  data_dumper_->DumpRaw(
+      "aec3_echo_path_delay_estimator_delay",
+      aggregated_matched_filter_lag
+          ? static_cast<int>(aggregated_matched_filter_lag->delay *
+                             down_sampling_factor_)
+          : -1);
+
+  // Return the detected delay in samples as the aggregated matched filter lag
+  // compensated by the down sampling factor for the signal being correlated.
+  if (aggregated_matched_filter_lag) {
+    aggregated_matched_filter_lag->delay *= down_sampling_factor_;
+  }
+
+  if (old_aggregated_lag_ && aggregated_matched_filter_lag &&
+      old_aggregated_lag_->delay == aggregated_matched_filter_lag->delay) {
+    ++consistent_estimate_counter_;
+  } else {
+    consistent_estimate_counter_ = 0;
+  }
+  old_aggregated_lag_ = aggregated_matched_filter_lag;
+  constexpr size_t kNumBlocksPerSecondBy2 = kNumBlocksPerSecond / 2;
+  if (consistent_estimate_counter_ > kNumBlocksPerSecondBy2) {
+    Reset(true);
+  }
+
+  return aggregated_matched_filter_lag;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.h b/modules/audio_processing/aec3/echo_path_delay_estimator.h
new file mode 100644
index 0000000..6389098
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_
+
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/matched_filter.h"
+#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Estimates the delay of the echo path.
+class EchoPathDelayEstimator {
+ public:
+  EchoPathDelayEstimator(ApmDataDumper* data_dumper,
+                         const EchoCanceller3Config& config);
+  ~EchoPathDelayEstimator();
+
+  // Resets the estimation. If the soft-reset is specified, only  the matched
+  // filters are reset.
+  void Reset(bool soft_reset);
+
+  // Produce a delay estimate if such is avaliable.
+  rtc::Optional<DelayEstimate> EstimateDelay(
+      const DownsampledRenderBuffer& render_buffer,
+      rtc::ArrayView<const float> capture);
+
+  // Log delay estimator properties.
+  void LogDelayEstimationProperties(int sample_rate_hz, size_t shift) const {
+    matched_filter_.LogFilterProperties(sample_rate_hz, shift,
+                                        down_sampling_factor_);
+  }
+
+ private:
+  ApmDataDumper* const data_dumper_;
+  const size_t down_sampling_factor_;
+  const size_t sub_block_size_;
+  Decimator capture_decimator_;
+  MatchedFilter matched_filter_;
+  MatchedFilterLagAggregator matched_filter_lag_aggregator_;
+  rtc::Optional<DelayEstimate> old_aggregated_lag_;
+  size_t consistent_estimate_counter_ = 0;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(EchoPathDelayEstimator);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_DELAY_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc b/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc
new file mode 100644
index 0000000..38f31c9
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator_unittest.cc
@@ -0,0 +1,172 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_path_delay_estimator.h"
+
+#include <algorithm>
+#include <sstream>
+#include <string>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(size_t delay, size_t down_sampling_factor) {
+  std::ostringstream ss;
+  ss << "Delay: " << delay;
+  ss << ", Down sampling factor: " << down_sampling_factor;
+  return ss.str();
+}
+
+}  // namespace
+
+// Verifies that the basic API calls work.
+TEST(EchoPathDelayEstimator, BasicApiCalls) {
+  ApmDataDumper data_dumper(0);
+  EchoCanceller3Config config;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  EchoPathDelayEstimator estimator(&data_dumper, config);
+  std::vector<std::vector<float>> render(3, std::vector<float>(kBlockSize));
+  std::vector<float> capture(kBlockSize);
+  for (size_t k = 0; k < 100; ++k) {
+    render_delay_buffer->Insert(render);
+    estimator.EstimateDelay(render_delay_buffer->GetDownsampledRenderBuffer(),
+                            capture);
+  }
+}
+
+// Verifies that the delay estimator produces correct delay for artificially
+// delayed signals.
+TEST(EchoPathDelayEstimator, DelayEstimation) {
+  Random random_generator(42U);
+  std::vector<std::vector<float>> render(3, std::vector<float>(kBlockSize));
+  std::vector<float> capture(kBlockSize);
+  ApmDataDumper data_dumper(0);
+  constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+  for (auto down_sampling_factor : kDownSamplingFactors) {
+    EchoCanceller3Config config;
+    config.delay.down_sampling_factor = down_sampling_factor;
+    config.delay.num_filters = 10;
+    for (size_t delay_samples : {30, 64, 150, 200, 800, 4000}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, down_sampling_factor));
+
+      config.delay.api_call_jitter_blocks = 5;
+      std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+          RenderDelayBuffer::Create(config, 3));
+      DelayBuffer<float> signal_delay_buffer(
+          delay_samples + 2 * config.delay.api_call_jitter_blocks * 64);
+      EchoPathDelayEstimator estimator(&data_dumper, config);
+
+      rtc::Optional<DelayEstimate> estimated_delay_samples;
+      for (size_t k = 0; k < (500 + (delay_samples) / kBlockSize); ++k) {
+        RandomizeSampleVector(&random_generator, render[0]);
+        signal_delay_buffer.Delay(render[0], capture);
+        render_delay_buffer->Insert(render);
+
+        if (k == 0) {
+          render_delay_buffer->Reset();
+        }
+
+        render_delay_buffer->PrepareCaptureProcessing();
+
+        auto estimate = estimator.EstimateDelay(
+            render_delay_buffer->GetDownsampledRenderBuffer(), capture);
+
+        if (estimate) {
+          estimated_delay_samples = estimate;
+        }
+      }
+
+      if (estimated_delay_samples) {
+        // Due to the internal down-sampling done inside the delay estimator
+        // the estimated delay cannot be expected to be exact to the true delay.
+        EXPECT_NEAR(delay_samples,
+                    estimated_delay_samples->delay -
+                        (config.delay.api_call_jitter_blocks + 1) * 64,
+                    config.delay.down_sampling_factor);
+      } else {
+        ADD_FAILURE();
+      }
+  }
+}
+}
+
+// Verifies that the delay estimator does not produce delay estimates for render
+// signals of low level.
+TEST(EchoPathDelayEstimator, NoDelayEstimatesForLowLevelRenderSignals) {
+  Random random_generator(42U);
+  EchoCanceller3Config config;
+  std::vector<std::vector<float>> render(3, std::vector<float>(kBlockSize));
+  std::vector<float> capture(kBlockSize);
+  ApmDataDumper data_dumper(0);
+  EchoPathDelayEstimator estimator(&data_dumper, config);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+  for (size_t k = 0; k < 100; ++k) {
+    RandomizeSampleVector(&random_generator, render[0]);
+    for (auto& render_k : render[0]) {
+      render_k *= 100.f / 32767.f;
+    }
+    std::copy(render[0].begin(), render[0].end(), capture.begin());
+    render_delay_buffer->Insert(render);
+    render_delay_buffer->PrepareCaptureProcessing();
+    EXPECT_FALSE(estimator.EstimateDelay(
+        render_delay_buffer->GetDownsampledRenderBuffer(), capture));
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for the render blocksize.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoPathDelayEstimator, DISABLED_WrongRenderBlockSize) {
+  ApmDataDumper data_dumper(0);
+  EchoCanceller3Config config;
+  EchoPathDelayEstimator estimator(&data_dumper, config);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  std::vector<float> capture(kBlockSize);
+  EXPECT_DEATH(estimator.EstimateDelay(
+                   render_delay_buffer->GetDownsampledRenderBuffer(), capture),
+               "");
+}
+
+// Verifies the check for the capture blocksize.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoPathDelayEstimator, WrongCaptureBlockSize) {
+  ApmDataDumper data_dumper(0);
+  EchoCanceller3Config config;
+  EchoPathDelayEstimator estimator(&data_dumper, config);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  std::vector<float> capture(std::vector<float>(kBlockSize - 1));
+  EXPECT_DEATH(estimator.EstimateDelay(
+                   render_delay_buffer->GetDownsampledRenderBuffer(), capture),
+               "");
+}
+
+// Verifies the check for non-null data dumper.
+TEST(EchoPathDelayEstimator, NullDataDumper) {
+  EXPECT_DEATH(EchoPathDelayEstimator(nullptr, EchoCanceller3Config()), "");
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_path_variability.cc b/modules/audio_processing/aec3/echo_path_variability.cc
new file mode 100644
index 0000000..0ae9cff
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_path_variability.cc
@@ -0,0 +1,22 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+
+namespace webrtc {
+
+EchoPathVariability::EchoPathVariability(bool gain_change,
+                                         DelayAdjustment delay_change,
+                                         bool clock_drift)
+    : gain_change(gain_change),
+      delay_change(delay_change),
+      clock_drift(clock_drift) {}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_path_variability.h b/modules/audio_processing/aec3/echo_path_variability.h
new file mode 100644
index 0000000..adf0d7a
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_path_variability.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_
+
+namespace webrtc {
+
+struct EchoPathVariability {
+  enum class DelayAdjustment {
+    kNone,
+    kBufferReadjustment,
+    kBufferFlush,
+    kDelayReset,
+    kNewDetectedDelay
+  };
+
+  EchoPathVariability(bool gain_change,
+                      DelayAdjustment delay_change,
+                      bool clock_drift);
+
+  bool AudioPathChanged() const {
+    return gain_change || delay_change != DelayAdjustment::kNone;
+  }
+  bool gain_change;
+  DelayAdjustment delay_change;
+  bool clock_drift;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ECHO_PATH_VARIABILITY_H_
diff --git a/modules/audio_processing/aec3/echo_path_variability_unittest.cc b/modules/audio_processing/aec3/echo_path_variability_unittest.cc
new file mode 100644
index 0000000..b1795ed
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_path_variability_unittest.cc
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(EchoPathVariability, CorrectBehavior) {
+  // Test correct passing and reporting of the gain change information.
+  EchoPathVariability v(
+      true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false);
+  EXPECT_TRUE(v.gain_change);
+  EXPECT_TRUE(v.delay_change ==
+              EchoPathVariability::DelayAdjustment::kNewDetectedDelay);
+  EXPECT_TRUE(v.AudioPathChanged());
+  EXPECT_FALSE(v.clock_drift);
+
+  v = EchoPathVariability(true, EchoPathVariability::DelayAdjustment::kNone,
+                          false);
+  EXPECT_TRUE(v.gain_change);
+  EXPECT_TRUE(v.delay_change == EchoPathVariability::DelayAdjustment::kNone);
+  EXPECT_TRUE(v.AudioPathChanged());
+  EXPECT_FALSE(v.clock_drift);
+
+  v = EchoPathVariability(
+      false, EchoPathVariability::DelayAdjustment::kNewDetectedDelay, false);
+  EXPECT_FALSE(v.gain_change);
+  EXPECT_TRUE(v.delay_change ==
+              EchoPathVariability::DelayAdjustment::kNewDetectedDelay);
+  EXPECT_TRUE(v.AudioPathChanged());
+  EXPECT_FALSE(v.clock_drift);
+
+  v = EchoPathVariability(false, EchoPathVariability::DelayAdjustment::kNone,
+                          false);
+  EXPECT_FALSE(v.gain_change);
+  EXPECT_TRUE(v.delay_change == EchoPathVariability::DelayAdjustment::kNone);
+  EXPECT_FALSE(v.AudioPathChanged());
+  EXPECT_FALSE(v.clock_drift);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_remover.cc b/modules/audio_processing/aec3/echo_remover.cc
new file mode 100644
index 0000000..da1fa4b
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_remover.cc
@@ -0,0 +1,259 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/echo_remover.h"
+
+#include <math.h>
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <string>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/comfort_noise_generator.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/echo_remover_metrics.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/output_selector.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/residual_echo_estimator.h"
+#include "modules/audio_processing/aec3/subtractor.h"
+#include "modules/audio_processing/aec3/suppression_filter.h"
+#include "modules/audio_processing/aec3/suppression_gain.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+namespace {
+
+void LinearEchoPower(const FftData& E,
+                     const FftData& Y,
+                     std::array<float, kFftLengthBy2Plus1>* S2) {
+  for (size_t k = 0; k < E.re.size(); ++k) {
+    (*S2)[k] = (Y.re[k] - E.re[k]) * (Y.re[k] - E.re[k]) +
+               (Y.im[k] - E.im[k]) * (Y.im[k] - E.im[k]);
+  }
+}
+
+// Class for removing the echo from the capture signal.
+class EchoRemoverImpl final : public EchoRemover {
+ public:
+  explicit EchoRemoverImpl(const EchoCanceller3Config& config,
+                           int sample_rate_hz);
+  ~EchoRemoverImpl() override;
+
+  void GetMetrics(EchoControl::Metrics* metrics) const override;
+
+  // Removes the echo from a block of samples from the capture signal. The
+  // supplied render signal is assumed to be pre-aligned with the capture
+  // signal.
+  void ProcessCapture(const EchoPathVariability& echo_path_variability,
+                      bool capture_signal_saturation,
+                      const rtc::Optional<DelayEstimate>& delay_estimate,
+                      RenderBuffer* render_buffer,
+                      std::vector<std::vector<float>>* capture) override;
+
+  // Updates the status on whether echo leakage is detected in the output of the
+  // echo remover.
+  void UpdateEchoLeakageStatus(bool leakage_detected) override {
+    echo_leakage_detected_ = leakage_detected;
+  }
+
+ private:
+  static int instance_count_;
+  const EchoCanceller3Config config_;
+  const Aec3Fft fft_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  const Aec3Optimization optimization_;
+  const int sample_rate_hz_;
+  Subtractor subtractor_;
+  SuppressionGain suppression_gain_;
+  ComfortNoiseGenerator cng_;
+  SuppressionFilter suppression_filter_;
+  RenderSignalAnalyzer render_signal_analyzer_;
+  OutputSelector output_selector_;
+  ResidualEchoEstimator residual_echo_estimator_;
+  bool echo_leakage_detected_ = false;
+  AecState aec_state_;
+  EchoRemoverMetrics metrics_;
+  bool initial_state_ = true;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(EchoRemoverImpl);
+};
+
+int EchoRemoverImpl::instance_count_ = 0;
+
+EchoRemoverImpl::EchoRemoverImpl(const EchoCanceller3Config& config,
+                                 int sample_rate_hz)
+    : config_(config),
+      fft_(),
+      data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      optimization_(DetectOptimization()),
+      sample_rate_hz_(sample_rate_hz),
+      subtractor_(config, data_dumper_.get(), optimization_),
+      suppression_gain_(config_, optimization_),
+      cng_(optimization_),
+      suppression_filter_(sample_rate_hz_),
+      render_signal_analyzer_(config_),
+      residual_echo_estimator_(config_),
+      aec_state_(config_) {
+  RTC_DCHECK(ValidFullBandRate(sample_rate_hz));
+}
+
+EchoRemoverImpl::~EchoRemoverImpl() = default;
+
+void EchoRemoverImpl::GetMetrics(EchoControl::Metrics* metrics) const {
+  // Echo return loss (ERL) is inverted to go from gain to attenuation.
+  metrics->echo_return_loss = -10.0 * log10(aec_state_.ErlTimeDomain());
+  metrics->echo_return_loss_enhancement =
+      10.0 * log10(aec_state_.ErleTimeDomain());
+}
+
+void EchoRemoverImpl::ProcessCapture(
+    const EchoPathVariability& echo_path_variability,
+    bool capture_signal_saturation,
+    const rtc::Optional<DelayEstimate>& delay_estimate,
+    RenderBuffer* render_buffer,
+    std::vector<std::vector<float>>* capture) {
+  const std::vector<std::vector<float>>& x = render_buffer->Block(0);
+  std::vector<std::vector<float>>* y = capture;
+  RTC_DCHECK(render_buffer);
+  RTC_DCHECK(y);
+  RTC_DCHECK_EQ(x.size(), NumBandsForRate(sample_rate_hz_));
+  RTC_DCHECK_EQ(y->size(), NumBandsForRate(sample_rate_hz_));
+  RTC_DCHECK_EQ(x[0].size(), kBlockSize);
+  RTC_DCHECK_EQ((*y)[0].size(), kBlockSize);
+  const std::vector<float>& x0 = x[0];
+  std::vector<float>& y0 = (*y)[0];
+
+  data_dumper_->DumpWav("aec3_echo_remover_capture_input", kBlockSize, &y0[0],
+                        LowestBandRate(sample_rate_hz_), 1);
+  data_dumper_->DumpWav("aec3_echo_remover_render_input", kBlockSize, &x0[0],
+                        LowestBandRate(sample_rate_hz_), 1);
+  data_dumper_->DumpRaw("aec3_echo_remover_capture_input", y0);
+  data_dumper_->DumpRaw("aec3_echo_remover_render_input", x0);
+
+  aec_state_.UpdateCaptureSaturation(capture_signal_saturation);
+
+  if (echo_path_variability.AudioPathChanged()) {
+    subtractor_.HandleEchoPathChange(echo_path_variability);
+    aec_state_.HandleEchoPathChange(echo_path_variability);
+    suppression_gain_.SetInitialState(true);
+    initial_state_ = true;
+  }
+
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kFftLengthBy2Plus1> R2;
+  std::array<float, kFftLengthBy2Plus1> S2_linear;
+  std::array<float, kFftLengthBy2Plus1> G;
+  float high_bands_gain;
+  FftData Y;
+  FftData comfort_noise;
+  FftData high_band_comfort_noise;
+  SubtractorOutput subtractor_output;
+  FftData& E_main_nonwindowed = subtractor_output.E_main_nonwindowed;
+  auto& E2_main = subtractor_output.E2_main_nonwindowed;
+  auto& E2_shadow = subtractor_output.E2_shadow;
+  auto& e_main = subtractor_output.e_main;
+
+  // Analyze the render signal.
+  render_signal_analyzer_.Update(*render_buffer, aec_state_.FilterDelay());
+
+  // Perform linear echo cancellation.
+  if (initial_state_ && !aec_state_.InitialState()) {
+    subtractor_.ExitInitialState();
+    suppression_gain_.SetInitialState(false);
+    initial_state_ = false;
+  }
+  subtractor_.Process(*render_buffer, y0, render_signal_analyzer_, aec_state_,
+                      &subtractor_output);
+
+  // Compute spectra.
+  // fft_.ZeroPaddedFft(y0, Aec3Fft::Window::kHanning, &Y);
+  fft_.ZeroPaddedFft(y0, Aec3Fft::Window::kRectangular, &Y);
+  LinearEchoPower(E_main_nonwindowed, Y, &S2_linear);
+  Y.Spectrum(optimization_, Y2);
+
+  // Update the AEC state information.
+  aec_state_.Update(delay_estimate, subtractor_.FilterFrequencyResponse(),
+                    subtractor_.FilterImpulseResponse(),
+                    subtractor_.ConvergedFilter(), *render_buffer, E2_main, Y2,
+                    subtractor_output.s_main, echo_leakage_detected_);
+
+  // Choose the linear output.
+  output_selector_.FormLinearOutput(!aec_state_.TransparentMode(), e_main, y0);
+  data_dumper_->DumpWav("aec3_output_linear", kBlockSize, &y0[0],
+                        LowestBandRate(sample_rate_hz_), 1);
+  data_dumper_->DumpRaw("aec3_output_linear", y0);
+  const auto& E2 = output_selector_.UseSubtractorOutput() ? E2_main : Y2;
+
+  // Estimate the residual echo power.
+  residual_echo_estimator_.Estimate(aec_state_, *render_buffer, S2_linear, Y2,
+                                    &R2);
+
+  // Estimate the comfort noise.
+  cng_.Compute(aec_state_, Y2, &comfort_noise, &high_band_comfort_noise);
+
+  // A choose and apply echo suppression gain.
+  suppression_gain_.GetGain(E2, R2, cng_.NoiseSpectrum(),
+                            render_signal_analyzer_, aec_state_, x,
+                            &high_bands_gain, &G);
+  suppression_filter_.ApplyGain(comfort_noise, high_band_comfort_noise, G,
+                                high_bands_gain, y);
+
+  // Update the metrics.
+  metrics_.Update(aec_state_, cng_.NoiseSpectrum(), G);
+
+  // Update the aec state with the aec output characteristics.
+  aec_state_.UpdateWithOutput(y0);
+
+  // Debug outputs for the purpose of development and analysis.
+  data_dumper_->DumpWav("aec3_echo_estimate", kBlockSize,
+                        &subtractor_output.s_main[0],
+                        LowestBandRate(sample_rate_hz_), 1);
+  data_dumper_->DumpRaw("aec3_output", y0);
+  data_dumper_->DumpRaw("aec3_narrow_render",
+                        render_signal_analyzer_.NarrowPeakBand() ? 1 : 0);
+  data_dumper_->DumpRaw("aec3_N2", cng_.NoiseSpectrum());
+  data_dumper_->DumpRaw("aec3_suppressor_gain", G);
+  data_dumper_->DumpWav("aec3_output",
+                        rtc::ArrayView<const float>(&y0[0], kBlockSize),
+                        LowestBandRate(sample_rate_hz_), 1);
+  data_dumper_->DumpRaw("aec3_using_subtractor_output",
+                        output_selector_.UseSubtractorOutput() ? 1 : 0);
+  data_dumper_->DumpRaw("aec3_E2", E2);
+  data_dumper_->DumpRaw("aec3_E2_main", E2_main);
+  data_dumper_->DumpRaw("aec3_E2_shadow", E2_shadow);
+  data_dumper_->DumpRaw("aec3_S2_linear", S2_linear);
+  data_dumper_->DumpRaw("aec3_Y2", Y2);
+  data_dumper_->DumpRaw("aec3_X2", render_buffer->Spectrum(0));
+  data_dumper_->DumpRaw("aec3_R2", R2);
+  data_dumper_->DumpRaw("aec3_erle", aec_state_.Erle());
+  data_dumper_->DumpRaw("aec3_erl", aec_state_.Erl());
+  data_dumper_->DumpRaw("aec3_usable_linear_estimate",
+                        aec_state_.UsableLinearEstimate());
+  data_dumper_->DumpRaw("aec3_filter_delay", aec_state_.FilterDelay());
+  data_dumper_->DumpRaw("aec3_capture_saturation",
+                        aec_state_.SaturatedCapture() ? 1 : 0);
+}
+
+}  // namespace
+
+EchoRemover* EchoRemover::Create(const EchoCanceller3Config& config,
+                                 int sample_rate_hz) {
+  return new EchoRemoverImpl(config, sample_rate_hz);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_remover.h b/modules/audio_processing/aec3/echo_remover.h
new file mode 100644
index 0000000..08fc3db
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_remover.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_
+
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+namespace webrtc {
+
+// Class for removing the echo from the capture signal.
+class EchoRemover {
+ public:
+  static EchoRemover* Create(const EchoCanceller3Config& config,
+                             int sample_rate_hz);
+  virtual ~EchoRemover() = default;
+
+  // Get current metrics.
+  virtual void GetMetrics(EchoControl::Metrics* metrics) const = 0;
+
+  // Removes the echo from a block of samples from the capture signal. The
+  // supplied render signal is assumed to be pre-aligned with the capture
+  // signal.
+  virtual void ProcessCapture(
+      const EchoPathVariability& echo_path_variability,
+      bool capture_signal_saturation,
+      const rtc::Optional<DelayEstimate>& delay_estimate,
+      RenderBuffer* render_buffer,
+      std::vector<std::vector<float>>* capture) = 0;
+
+  // Updates the status on whether echo leakage is detected in the output of the
+  // echo remover.
+  virtual void UpdateEchoLeakageStatus(bool leakage_detected) = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_H_
diff --git a/modules/audio_processing/aec3/echo_remover_metrics.cc b/modules/audio_processing/aec3/echo_remover_metrics.cc
new file mode 100644
index 0000000..bc815eb
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_remover_metrics.cc
@@ -0,0 +1,324 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_remover_metrics.h"
+
+#include <math.h>
+#include <algorithm>
+#include <numeric>
+
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kOneByMetricsCollectionBlocks = 1.f / kMetricsCollectionBlocks;
+
+}  // namespace
+
+EchoRemoverMetrics::DbMetric::DbMetric() : DbMetric(0.f, 0.f, 0.f) {}
+EchoRemoverMetrics::DbMetric::DbMetric(float sum_value,
+                                       float floor_value,
+                                       float ceil_value)
+    : sum_value(sum_value), floor_value(floor_value), ceil_value(ceil_value) {}
+
+void EchoRemoverMetrics::DbMetric::Update(float value) {
+  sum_value += value;
+  floor_value = std::min(floor_value, value);
+  ceil_value = std::max(ceil_value, value);
+}
+
+void EchoRemoverMetrics::DbMetric::UpdateInstant(float value) {
+  sum_value = value;
+  floor_value = std::min(floor_value, value);
+  ceil_value = std::max(ceil_value, value);
+}
+
+EchoRemoverMetrics::EchoRemoverMetrics() {
+  ResetMetrics();
+}
+
+void EchoRemoverMetrics::ResetMetrics() {
+  erl_.fill(DbMetric(0.f, 10000.f, 0.000f));
+  erl_time_domain_ = DbMetric(0.f, 10000.f, 0.000f);
+  erle_.fill(DbMetric(0.f, 0.f, 1000.f));
+  erle_time_domain_ = DbMetric(0.f, 0.f, 1000.f);
+  comfort_noise_.fill(DbMetric(0.f, 100000000.f, 0.f));
+  suppressor_gain_.fill(DbMetric(0.f, 1.f, 0.f));
+  active_render_count_ = 0;
+  saturated_capture_ = false;
+}
+
+void EchoRemoverMetrics::Update(
+    const AecState& aec_state,
+    const std::array<float, kFftLengthBy2Plus1>& comfort_noise_spectrum,
+    const std::array<float, kFftLengthBy2Plus1>& suppressor_gain) {
+  metrics_reported_ = false;
+  if (++block_counter_ <= kMetricsCollectionBlocks) {
+    aec3::UpdateDbMetric(aec_state.Erl(), &erl_);
+    erl_time_domain_.UpdateInstant(aec_state.ErlTimeDomain());
+    aec3::UpdateDbMetric(aec_state.Erle(), &erle_);
+    erle_time_domain_.UpdateInstant(aec_state.ErleTimeDomain());
+    aec3::UpdateDbMetric(comfort_noise_spectrum, &comfort_noise_);
+    aec3::UpdateDbMetric(suppressor_gain, &suppressor_gain_);
+    active_render_count_ += (aec_state.ActiveRender() ? 1 : 0);
+    saturated_capture_ = saturated_capture_ || aec_state.SaturatedCapture();
+  } else {
+    // Report the metrics over several frames in order to lower the impact of
+    // the logarithms involved on the computational complexity.
+    constexpr int kMetricsCollectionBlocksBy2 = kMetricsCollectionBlocks / 2;
+    constexpr float kComfortNoiseScaling = 1.f / (kBlockSize * kBlockSize);
+    switch (block_counter_) {
+      case kMetricsCollectionBlocks + 1:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErleBand0.Average",
+            aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f,
+                                                kOneByMetricsCollectionBlocks,
+                                                erle_[0].sum_value),
+            0, 19, 20);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErleBand0.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f,
+                                                erle_[0].ceil_value),
+            0, 19, 20);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErleBand0.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f,
+                                                erle_[0].floor_value),
+            0, 19, 20);
+        break;
+      case kMetricsCollectionBlocks + 2:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErleBand1.Average",
+            aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f,
+                                                kOneByMetricsCollectionBlocks,
+                                                erle_[1].sum_value),
+            0, 19, 20);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErleBand1.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f,
+                                                erle_[1].ceil_value),
+            0, 19, 20);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErleBand1.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 19.f, 0.f, 1.f,
+                                                erle_[1].floor_value),
+            0, 19, 20);
+        break;
+      case kMetricsCollectionBlocks + 3:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErlBand0.Average",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f,
+                                                kOneByMetricsCollectionBlocks,
+                                                erl_[0].sum_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErlBand0.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_[0].ceil_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErlBand0.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_[0].floor_value),
+            0, 59, 30);
+        break;
+      case kMetricsCollectionBlocks + 4:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErlBand1.Average",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f,
+                                                kOneByMetricsCollectionBlocks,
+                                                erl_[1].sum_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErlBand1.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_[1].ceil_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ErlBand1.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_[1].floor_value),
+            0, 59, 30);
+        break;
+      case kMetricsCollectionBlocks + 5:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ComfortNoiseBand0.Average",
+            aec3::TransformDbMetricForReporting(
+                true, 0.f, 89.f, -90.3f,
+                kComfortNoiseScaling * kOneByMetricsCollectionBlocks,
+                comfort_noise_[0].sum_value),
+            0, 89, 45);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ComfortNoiseBand0.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f,
+                                                kComfortNoiseScaling,
+                                                comfort_noise_[0].ceil_value),
+            0, 89, 45);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ComfortNoiseBand0.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f,
+                                                kComfortNoiseScaling,
+                                                comfort_noise_[0].floor_value),
+            0, 89, 45);
+        break;
+      case kMetricsCollectionBlocks + 6:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ComfortNoiseBand1.Average",
+            aec3::TransformDbMetricForReporting(
+                true, 0.f, 89.f, -90.3f,
+                kComfortNoiseScaling * kOneByMetricsCollectionBlocks,
+                comfort_noise_[1].sum_value),
+            0, 89, 45);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ComfortNoiseBand1.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f,
+                                                kComfortNoiseScaling,
+                                                comfort_noise_[1].ceil_value),
+            0, 89, 45);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.ComfortNoiseBand1.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 89.f, -90.3f,
+                                                kComfortNoiseScaling,
+                                                comfort_noise_[1].floor_value),
+            0, 89, 45);
+        break;
+      case kMetricsCollectionBlocks + 7:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.SuppressorGainBand0.Average",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f,
+                                                kOneByMetricsCollectionBlocks,
+                                                suppressor_gain_[0].sum_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.SuppressorGainBand0.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f, 1.f,
+                                                suppressor_gain_[0].ceil_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.SuppressorGainBand0.Min",
+            aec3::TransformDbMetricForReporting(
+                true, 0.f, 59.f, 0.f, 1.f, suppressor_gain_[0].floor_value),
+            0, 59, 30);
+        break;
+      case kMetricsCollectionBlocks + 8:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.SuppressorGainBand1.Average",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f,
+                                                kOneByMetricsCollectionBlocks,
+                                                suppressor_gain_[1].sum_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.SuppressorGainBand1.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 0.f, 1.f,
+                                                suppressor_gain_[1].ceil_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.SuppressorGainBand1.Min",
+            aec3::TransformDbMetricForReporting(
+                true, 0.f, 59.f, 0.f, 1.f, suppressor_gain_[1].floor_value),
+            0, 59, 30);
+        break;
+      case kMetricsCollectionBlocks + 9:
+        RTC_HISTOGRAM_BOOLEAN(
+            "WebRTC.Audio.EchoCanceller.UsableLinearEstimate",
+            static_cast<int>(aec_state.UsableLinearEstimate() ? 1 : 0));
+        RTC_HISTOGRAM_BOOLEAN(
+            "WebRTC.Audio.EchoCanceller.ActiveRender",
+            static_cast<int>(
+                active_render_count_ > kMetricsCollectionBlocksBy2 ? 1 : 0));
+        RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.FilterDelay",
+                                    aec_state.FilterDelay(), 0, 30, 31);
+        RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.EchoCanceller.CaptureSaturation",
+                              static_cast<int>(saturated_capture_ ? 1 : 0));
+        break;
+      case kMetricsCollectionBlocks + 10:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.Erl.Value",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_time_domain_.sum_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.Erl.Max",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_time_domain_.ceil_value),
+            0, 59, 30);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.Erl.Min",
+            aec3::TransformDbMetricForReporting(true, 0.f, 59.f, 30.f, 1.f,
+                                                erl_time_domain_.floor_value),
+            0, 59, 30);
+        break;
+      case kMetricsCollectionBlocks + 11:
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.Erle.Value",
+            aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f,
+                                                erle_time_domain_.sum_value),
+            0, 19, 20);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.Erle.Max",
+            aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f,
+                                                erle_time_domain_.ceil_value),
+            0, 19, 20);
+        RTC_HISTOGRAM_COUNTS_LINEAR(
+            "WebRTC.Audio.EchoCanceller.Erle.Min",
+            aec3::TransformDbMetricForReporting(false, 0.f, 19.f, 0.f, 1.f,
+                                                erle_time_domain_.floor_value),
+            0, 19, 20);
+        metrics_reported_ = true;
+        RTC_DCHECK_EQ(kMetricsReportingIntervalBlocks, block_counter_);
+        block_counter_ = 0;
+        ResetMetrics();
+        break;
+      default:
+        RTC_NOTREACHED();
+        break;
+    }
+  }
+}
+
+namespace aec3 {
+
+void UpdateDbMetric(const std::array<float, kFftLengthBy2Plus1>& value,
+                    std::array<EchoRemoverMetrics::DbMetric, 2>* statistic) {
+  RTC_DCHECK(statistic);
+  // Truncation is intended in the band width computation.
+  constexpr int kNumBands = 2;
+  constexpr int kBandWidth = 65 / kNumBands;
+  constexpr float kOneByBandWidth = 1.f / kBandWidth;
+  RTC_DCHECK_EQ(kNumBands, statistic->size());
+  RTC_DCHECK_EQ(65, value.size());
+  for (size_t k = 0; k < statistic->size(); ++k) {
+    float average_band =
+        std::accumulate(value.begin() + kBandWidth * k,
+                        value.begin() + kBandWidth * (k + 1), 0.f) *
+        kOneByBandWidth;
+    (*statistic)[k].Update(average_band);
+  }
+}
+
+int TransformDbMetricForReporting(bool negate,
+                                  float min_value,
+                                  float max_value,
+                                  float offset,
+                                  float scaling,
+                                  float value) {
+  float new_value = 10.f * log10(value * scaling + 1e-10f) + offset;
+  if (negate) {
+    new_value = -new_value;
+  }
+  return static_cast<int>(rtc::SafeClamp(new_value, min_value, max_value));
+}
+
+}  // namespace aec3
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_remover_metrics.h b/modules/audio_processing/aec3/echo_remover_metrics.h
new file mode 100644
index 0000000..17b803a
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_remover_metrics.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_
+
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Handles the reporting of metrics for the echo remover.
+class EchoRemoverMetrics {
+ public:
+  struct DbMetric {
+    DbMetric();
+    DbMetric(float sum_value, float floor_value, float ceil_value);
+    void Update(float value);
+    void UpdateInstant(float value);
+    float sum_value;
+    float floor_value;
+    float ceil_value;
+  };
+
+  EchoRemoverMetrics();
+
+  // Updates the metric with new data.
+  void Update(
+      const AecState& aec_state,
+      const std::array<float, kFftLengthBy2Plus1>& comfort_noise_spectrum,
+      const std::array<float, kFftLengthBy2Plus1>& suppressor_gain);
+
+  // Returns true if the metrics have just been reported, otherwise false.
+  bool MetricsReported() { return metrics_reported_; }
+
+ private:
+  // Resets the metrics.
+  void ResetMetrics();
+
+  int block_counter_ = 0;
+  std::array<DbMetric, 2> erl_;
+  DbMetric erl_time_domain_;
+  std::array<DbMetric, 2> erle_;
+  DbMetric erle_time_domain_;
+  std::array<DbMetric, 2> comfort_noise_;
+  std::array<DbMetric, 2> suppressor_gain_;
+  int active_render_count_ = 0;
+  bool saturated_capture_ = false;
+  bool metrics_reported_ = false;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(EchoRemoverMetrics);
+};
+
+namespace aec3 {
+
+// Updates a banded metric of type DbMetric with the values in the supplied
+// array.
+void UpdateDbMetric(const std::array<float, kFftLengthBy2Plus1>& value,
+                    std::array<EchoRemoverMetrics::DbMetric, 2>* statistic);
+
+// Transforms a DbMetric from the linear domain into the logarithmic domain.
+int TransformDbMetricForReporting(bool negate,
+                                  float min_value,
+                                  float max_value,
+                                  float offset,
+                                  float scaling,
+                                  float value);
+
+}  // namespace aec3
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ECHO_REMOVER_METRICS_H_
diff --git a/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc b/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc
new file mode 100644
index 0000000..fbd30d1
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_remover_metrics_unittest.cc
@@ -0,0 +1,158 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_remover_metrics.h"
+
+#include <math.h>
+
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-null input.
+TEST(UpdateDbMetric, NullValue) {
+  std::array<float, kFftLengthBy2Plus1> value;
+  value.fill(0.f);
+  EXPECT_DEATH(aec3::UpdateDbMetric(value, nullptr), "");
+}
+
+#endif
+
+// Verifies the updating functionality of UpdateDbMetric.
+TEST(UpdateDbMetric, Updating) {
+  std::array<float, kFftLengthBy2Plus1> value;
+  std::array<EchoRemoverMetrics::DbMetric, 2> statistic;
+  statistic.fill(EchoRemoverMetrics::DbMetric(0.f, 100.f, -100.f));
+  constexpr float kValue0 = 10.f;
+  constexpr float kValue1 = 20.f;
+  std::fill(value.begin(), value.begin() + 32, kValue0);
+  std::fill(value.begin() + 32, value.begin() + 64, kValue1);
+
+  aec3::UpdateDbMetric(value, &statistic);
+  EXPECT_FLOAT_EQ(kValue0, statistic[0].sum_value);
+  EXPECT_FLOAT_EQ(kValue0, statistic[0].ceil_value);
+  EXPECT_FLOAT_EQ(kValue0, statistic[0].floor_value);
+  EXPECT_FLOAT_EQ(kValue1, statistic[1].sum_value);
+  EXPECT_FLOAT_EQ(kValue1, statistic[1].ceil_value);
+  EXPECT_FLOAT_EQ(kValue1, statistic[1].floor_value);
+
+  aec3::UpdateDbMetric(value, &statistic);
+  EXPECT_FLOAT_EQ(2.f * kValue0, statistic[0].sum_value);
+  EXPECT_FLOAT_EQ(kValue0, statistic[0].ceil_value);
+  EXPECT_FLOAT_EQ(kValue0, statistic[0].floor_value);
+  EXPECT_FLOAT_EQ(2.f * kValue1, statistic[1].sum_value);
+  EXPECT_FLOAT_EQ(kValue1, statistic[1].ceil_value);
+  EXPECT_FLOAT_EQ(kValue1, statistic[1].floor_value);
+}
+
+// Verifies that the TransformDbMetricForReporting method produces the desired
+// output for values for dBFS.
+TEST(TransformDbMetricForReporting, DbFsScaling) {
+  std::array<float, kBlockSize> x;
+  FftData X;
+  std::array<float, kFftLengthBy2Plus1> X2;
+  Aec3Fft fft;
+  x.fill(1000.f);
+  fft.ZeroPaddedFft(x, Aec3Fft::Window::kRectangular, &X);
+  X.Spectrum(Aec3Optimization::kNone, X2);
+
+  float offset = -10.f * log10(32768.f * 32768.f);
+  EXPECT_NEAR(offset, -90.3f, 0.1f);
+  EXPECT_EQ(
+      static_cast<int>(30.3f),
+      aec3::TransformDbMetricForReporting(
+          true, 0.f, 90.f, offset, 1.f / (kBlockSize * kBlockSize), X2[0]));
+}
+
+// Verifies that the TransformDbMetricForReporting method is able to properly
+// limit the output.
+TEST(TransformDbMetricForReporting, Limits) {
+  EXPECT_EQ(
+      0,
+      aec3::TransformDbMetricForReporting(false, 0.f, 10.f, 0.f, 1.f, 0.001f));
+  EXPECT_EQ(
+      10,
+      aec3::TransformDbMetricForReporting(false, 0.f, 10.f, 0.f, 1.f, 100.f));
+}
+
+// Verifies that the TransformDbMetricForReporting method is able to properly
+// negate output.
+TEST(TransformDbMetricForReporting, Negate) {
+  EXPECT_EQ(
+      10,
+      aec3::TransformDbMetricForReporting(true, -20.f, 20.f, 0.f, 1.f, 0.1f));
+  EXPECT_EQ(
+      -10,
+      aec3::TransformDbMetricForReporting(true, -20.f, 20.f, 0.f, 1.f, 10.f));
+}
+
+// Verify the Update functionality of DbMetric.
+TEST(DbMetric, Update) {
+  EchoRemoverMetrics::DbMetric metric(0.f, 20.f, -20.f);
+  constexpr int kNumValues = 100;
+  constexpr float kValue = 10.f;
+  for (int k = 0; k < kNumValues; ++k) {
+    metric.Update(kValue);
+  }
+  EXPECT_FLOAT_EQ(kValue * kNumValues, metric.sum_value);
+  EXPECT_FLOAT_EQ(kValue, metric.ceil_value);
+  EXPECT_FLOAT_EQ(kValue, metric.floor_value);
+}
+
+// Verify the Update functionality of DbMetric.
+TEST(DbMetric, UpdateInstant) {
+  EchoRemoverMetrics::DbMetric metric(0.f, 20.f, -20.f);
+  constexpr float kMinValue = -77.f;
+  constexpr float kMaxValue = 33.f;
+  constexpr float kLastValue = (kMinValue + kMaxValue) / 2.0f;
+  for (float value = kMinValue; value <= kMaxValue; value++)
+    metric.UpdateInstant(value);
+  metric.UpdateInstant(kLastValue);
+  EXPECT_FLOAT_EQ(kLastValue, metric.sum_value);
+  EXPECT_FLOAT_EQ(kMaxValue, metric.ceil_value);
+  EXPECT_FLOAT_EQ(kMinValue, metric.floor_value);
+}
+
+// Verify the constructor functionality of DbMetric.
+TEST(DbMetric, Constructor) {
+  EchoRemoverMetrics::DbMetric metric;
+  EXPECT_FLOAT_EQ(0.f, metric.sum_value);
+  EXPECT_FLOAT_EQ(0.f, metric.ceil_value);
+  EXPECT_FLOAT_EQ(0.f, metric.floor_value);
+
+  metric = EchoRemoverMetrics::DbMetric(1.f, 2.f, 3.f);
+  EXPECT_FLOAT_EQ(1.f, metric.sum_value);
+  EXPECT_FLOAT_EQ(2.f, metric.floor_value);
+  EXPECT_FLOAT_EQ(3.f, metric.ceil_value);
+}
+
+// Verify the general functionality of EchoRemoverMetrics.
+TEST(EchoRemoverMetrics, NormalUsage) {
+  EchoRemoverMetrics metrics;
+  AecState aec_state(EchoCanceller3Config{});
+  std::array<float, kFftLengthBy2Plus1> comfort_noise_spectrum;
+  std::array<float, kFftLengthBy2Plus1> suppressor_gain;
+  comfort_noise_spectrum.fill(10.f);
+  suppressor_gain.fill(1.f);
+  for (int j = 0; j < 3; ++j) {
+    for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
+      metrics.Update(aec_state, comfort_noise_spectrum, suppressor_gain);
+      EXPECT_FALSE(metrics.MetricsReported());
+    }
+    metrics.Update(aec_state, comfort_noise_spectrum, suppressor_gain);
+    EXPECT_TRUE(metrics.MetricsReported());
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_remover_unittest.cc b/modules/audio_processing/aec3/echo_remover_unittest.cc
new file mode 100644
index 0000000..8e131fe
--- /dev/null
+++ b/modules/audio_processing/aec3/echo_remover_unittest.cc
@@ -0,0 +1,211 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/echo_remover.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <sstream>
+#include <string>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+std::string ProduceDebugText(int sample_rate_hz, int delay) {
+  std::ostringstream ss(ProduceDebugText(sample_rate_hz));
+  ss << ", Delay: " << delay;
+  return ss.str();
+}
+
+}  // namespace
+
+// Verifies the basic API call sequence
+TEST(EchoRemover, BasicApiCalls) {
+  rtc::Optional<DelayEstimate> delay_estimate;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<EchoRemover> remover(
+        EchoRemover::Create(EchoCanceller3Config(), rate));
+    std::unique_ptr<RenderDelayBuffer> render_buffer(RenderDelayBuffer::Create(
+        EchoCanceller3Config(), NumBandsForRate(rate)));
+
+    std::vector<std::vector<float>> render(NumBandsForRate(rate),
+                                           std::vector<float>(kBlockSize, 0.f));
+    std::vector<std::vector<float>> capture(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+    for (size_t k = 0; k < 100; ++k) {
+      EchoPathVariability echo_path_variability(
+          k % 3 == 0 ? true : false,
+          k % 5 == 0 ? EchoPathVariability::DelayAdjustment::kNewDetectedDelay
+                     : EchoPathVariability::DelayAdjustment::kNone,
+          false);
+      render_buffer->Insert(render);
+      render_buffer->PrepareCaptureProcessing();
+
+      remover->ProcessCapture(echo_path_variability, k % 2 == 0 ? true : false,
+                              delay_estimate, render_buffer->GetRenderBuffer(),
+                              &capture);
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for the samplerate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(EchoRemover, DISABLED_WrongSampleRate) {
+  EXPECT_DEATH(std::unique_ptr<EchoRemover>(
+                   EchoRemover::Create(EchoCanceller3Config(), 8001)),
+               "");
+}
+
+// Verifies the check for the capture block size.
+TEST(EchoRemover, WrongCaptureBlockSize) {
+  rtc::Optional<DelayEstimate> delay_estimate;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<EchoRemover> remover(
+        EchoRemover::Create(EchoCanceller3Config(), rate));
+    std::unique_ptr<RenderDelayBuffer> render_buffer(RenderDelayBuffer::Create(
+        EchoCanceller3Config(), NumBandsForRate(rate)));
+    std::vector<std::vector<float>> capture(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize - 1, 0.f));
+    EchoPathVariability echo_path_variability(
+        false, EchoPathVariability::DelayAdjustment::kNone, false);
+    EXPECT_DEATH(
+        remover->ProcessCapture(echo_path_variability, false, delay_estimate,
+                                render_buffer->GetRenderBuffer(), &capture),
+        "");
+  }
+}
+
+// Verifies the check for the number of capture bands.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.c
+TEST(EchoRemover, DISABLED_WrongCaptureNumBands) {
+  rtc::Optional<DelayEstimate> delay_estimate;
+  for (auto rate : {16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<EchoRemover> remover(
+        EchoRemover::Create(EchoCanceller3Config(), rate));
+    std::unique_ptr<RenderDelayBuffer> render_buffer(RenderDelayBuffer::Create(
+        EchoCanceller3Config(), NumBandsForRate(rate)));
+    std::vector<std::vector<float>> capture(
+        NumBandsForRate(rate == 48000 ? 16000 : rate + 16000),
+        std::vector<float>(kBlockSize, 0.f));
+    EchoPathVariability echo_path_variability(
+        false, EchoPathVariability::DelayAdjustment::kNone, false);
+    EXPECT_DEATH(
+        remover->ProcessCapture(echo_path_variability, false, delay_estimate,
+                                render_buffer->GetRenderBuffer(), &capture),
+        "");
+  }
+}
+
+// Verifies the check for non-null capture block.
+TEST(EchoRemover, NullCapture) {
+  rtc::Optional<DelayEstimate> delay_estimate;
+  std::unique_ptr<EchoRemover> remover(
+      EchoRemover::Create(EchoCanceller3Config(), 8000));
+  std::unique_ptr<RenderDelayBuffer> render_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+  EchoPathVariability echo_path_variability(
+      false, EchoPathVariability::DelayAdjustment::kNone, false);
+  EXPECT_DEATH(
+      remover->ProcessCapture(echo_path_variability, false, delay_estimate,
+                              render_buffer->GetRenderBuffer(), nullptr),
+      "");
+}
+
+#endif
+
+// Performs a sanity check that the echo_remover is able to properly
+// remove echoes.
+TEST(EchoRemover, BasicEchoRemoval) {
+  constexpr int kNumBlocksToProcess = 500;
+  Random random_generator(42U);
+  rtc::Optional<DelayEstimate> delay_estimate;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    std::vector<std::vector<float>> x(NumBandsForRate(rate),
+                                      std::vector<float>(kBlockSize, 0.f));
+    std::vector<std::vector<float>> y(NumBandsForRate(rate),
+                                      std::vector<float>(kBlockSize, 0.f));
+    EchoPathVariability echo_path_variability(
+        false, EchoPathVariability::DelayAdjustment::kNone, false);
+    for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+      SCOPED_TRACE(ProduceDebugText(rate, delay_samples));
+      EchoCanceller3Config config;
+      config.delay.min_echo_path_delay_blocks = 0;
+      std::unique_ptr<EchoRemover> remover(EchoRemover::Create(config, rate));
+      std::unique_ptr<RenderDelayBuffer> render_buffer(
+          RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+      render_buffer->SetDelay(delay_samples / kBlockSize);
+
+      std::vector<std::unique_ptr<DelayBuffer<float>>> delay_buffers(x.size());
+      for (size_t j = 0; j < x.size(); ++j) {
+        delay_buffers[j].reset(new DelayBuffer<float>(delay_samples));
+      }
+
+      float input_energy = 0.f;
+      float output_energy = 0.f;
+      for (int k = 0; k < kNumBlocksToProcess; ++k) {
+        const bool silence = k < 100 || (k % 100 >= 10);
+
+        for (size_t j = 0; j < x.size(); ++j) {
+          if (silence) {
+            std::fill(x[j].begin(), x[j].end(), 0.f);
+          } else {
+            RandomizeSampleVector(&random_generator, x[j]);
+          }
+          delay_buffers[j]->Delay(x[j], y[j]);
+        }
+
+        if (k > kNumBlocksToProcess / 2) {
+          for (size_t j = 0; j < x.size(); ++j) {
+            input_energy = std::inner_product(y[j].begin(), y[j].end(),
+                                              y[j].begin(), input_energy);
+          }
+        }
+
+        render_buffer->Insert(x);
+        render_buffer->PrepareCaptureProcessing();
+
+        remover->ProcessCapture(echo_path_variability, false, delay_estimate,
+                                render_buffer->GetRenderBuffer(), &y);
+
+        if (k > kNumBlocksToProcess / 2) {
+          for (size_t j = 0; j < x.size(); ++j) {
+            output_energy = std::inner_product(y[j].begin(), y[j].end(),
+                                               y[j].begin(), output_energy);
+          }
+        }
+      }
+      EXPECT_GT(input_energy, 10.f * output_energy);
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/erl_estimator.cc b/modules/audio_processing/aec3/erl_estimator.cc
new file mode 100644
index 0000000..b2849db
--- /dev/null
+++ b/modules/audio_processing/aec3/erl_estimator.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erl_estimator.h"
+
+#include <algorithm>
+#include <numeric>
+
+namespace webrtc {
+
+namespace {
+
+constexpr float kMinErl = 0.01f;
+constexpr float kMaxErl = 1000.f;
+
+}  // namespace
+
+ErlEstimator::ErlEstimator() {
+  erl_.fill(kMaxErl);
+  hold_counters_.fill(0);
+  erl_time_domain_ = kMaxErl;
+  hold_counter_time_domain_ = 0;
+}
+
+ErlEstimator::~ErlEstimator() = default;
+
+void ErlEstimator::Update(rtc::ArrayView<const float> render_spectrum,
+                          rtc::ArrayView<const float> capture_spectrum) {
+  RTC_DCHECK_EQ(kFftLengthBy2Plus1, render_spectrum.size());
+  RTC_DCHECK_EQ(kFftLengthBy2Plus1, capture_spectrum.size());
+  const auto& X2 = render_spectrum;
+  const auto& Y2 = capture_spectrum;
+
+  // Corresponds to WGN of power -46 dBFS.
+  constexpr float kX2Min = 44015068.0f;
+
+  // Update the estimates in a maximum statistics manner.
+  for (size_t k = 1; k < kFftLengthBy2; ++k) {
+    if (X2[k] > kX2Min) {
+      const float new_erl = Y2[k] / X2[k];
+      if (new_erl < erl_[k]) {
+        hold_counters_[k - 1] = 1000;
+        erl_[k] += 0.1f * (new_erl - erl_[k]);
+        erl_[k] = std::max(erl_[k], kMinErl);
+      }
+    }
+  }
+
+  std::for_each(hold_counters_.begin(), hold_counters_.end(),
+                [](int& a) { --a; });
+  std::transform(hold_counters_.begin(), hold_counters_.end(), erl_.begin() + 1,
+                 erl_.begin() + 1, [](int a, float b) {
+                   return a > 0 ? b : std::min(kMaxErl, 2.f * b);
+                 });
+
+  erl_[0] = erl_[1];
+  erl_[kFftLengthBy2] = erl_[kFftLengthBy2 - 1];
+
+  // Compute ERL over all frequency bins.
+  const float X2_sum = std::accumulate(X2.begin(), X2.end(), 0.0f);
+
+  if (X2_sum > kX2Min * X2.size()) {
+    const float Y2_sum = std::accumulate(Y2.begin(), Y2.end(), 0.0f);
+    const float new_erl = Y2_sum / X2_sum;
+    if (new_erl < erl_time_domain_) {
+      hold_counter_time_domain_ = 1000;
+      erl_time_domain_ += 0.1f * (new_erl - erl_time_domain_);
+      erl_time_domain_ = std::max(erl_time_domain_, kMinErl);
+    }
+  }
+
+  --hold_counter_time_domain_;
+  erl_time_domain_ = (hold_counter_time_domain_ > 0)
+                         ? erl_time_domain_
+                         : std::min(kMaxErl, 2.f * erl_time_domain_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/erl_estimator.h b/modules/audio_processing/aec3/erl_estimator.h
new file mode 100644
index 0000000..215c22e
--- /dev/null
+++ b/modules/audio_processing/aec3/erl_estimator.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Estimates the echo return loss based on the signal spectra.
+class ErlEstimator {
+ public:
+  ErlEstimator();
+  ~ErlEstimator();
+
+  // Updates the ERL estimate.
+  void Update(rtc::ArrayView<const float> render_spectrum,
+              rtc::ArrayView<const float> capture_spectrum);
+
+  // Returns the most recent ERL estimate.
+  const std::array<float, kFftLengthBy2Plus1>& Erl() const { return erl_; }
+  float ErlTimeDomain() const { return erl_time_domain_; }
+
+ private:
+  std::array<float, kFftLengthBy2Plus1> erl_;
+  std::array<int, kFftLengthBy2Minus1> hold_counters_;
+  float erl_time_domain_;
+  int hold_counter_time_domain_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ErlEstimator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ERL_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/erl_estimator_unittest.cc b/modules/audio_processing/aec3/erl_estimator_unittest.cc
new file mode 100644
index 0000000..a406581
--- /dev/null
+++ b/modules/audio_processing/aec3/erl_estimator_unittest.cc
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erl_estimator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void VerifyErl(const std::array<float, kFftLengthBy2Plus1>& erl,
+               float erl_time_domain,
+               float reference) {
+  std::for_each(erl.begin(), erl.end(),
+                [reference](float a) { EXPECT_NEAR(reference, a, 0.001); });
+  EXPECT_NEAR(reference, erl_time_domain, 0.001);
+}
+
+}  // namespace
+
+// Verifies that the correct ERL estimates are achieved.
+TEST(ErlEstimator, Estimates) {
+  std::array<float, kFftLengthBy2Plus1> X2;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+
+  ErlEstimator estimator;
+
+  // Verifies that the ERL estimate is properly reduced to lower values.
+  X2.fill(500 * 1000.f * 1000.f);
+  Y2.fill(10 * X2[0]);
+  for (size_t k = 0; k < 200; ++k) {
+    estimator.Update(X2, Y2);
+  }
+  VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 10.f);
+
+  // Verifies that the ERL is not immediately increased when the ERL in the data
+  // increases.
+  Y2.fill(10000 * X2[0]);
+  for (size_t k = 0; k < 998; ++k) {
+    estimator.Update(X2, Y2);
+  }
+  VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 10.f);
+
+  // Verifies that the rate of increase is 3 dB.
+  estimator.Update(X2, Y2);
+  VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 20.f);
+
+  // Verifies that the maximum ERL is achieved when there are no low RLE
+  // estimates.
+  for (size_t k = 0; k < 1000; ++k) {
+    estimator.Update(X2, Y2);
+  }
+  VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 1000.f);
+
+  // Verifies that the ERL estimate is is not updated for low-level signals
+  X2.fill(1000.f * 1000.f);
+  Y2.fill(10 * X2[0]);
+  for (size_t k = 0; k < 200; ++k) {
+    estimator.Update(X2, Y2);
+  }
+  VerifyErl(estimator.Erl(), estimator.ErlTimeDomain(), 1000.f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/erle_estimator.cc b/modules/audio_processing/aec3/erle_estimator.cc
new file mode 100644
index 0000000..0e4cbe1
--- /dev/null
+++ b/modules/audio_processing/aec3/erle_estimator.cc
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erle_estimator.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+ErleEstimator::ErleEstimator(float min_erle,
+                             float max_erle_lf,
+                             float max_erle_hf)
+    : min_erle_(min_erle),
+      max_erle_lf_(max_erle_lf),
+      max_erle_hf_(max_erle_hf) {
+  erle_.fill(min_erle_);
+  hold_counters_.fill(0);
+  erle_time_domain_ = min_erle_;
+  hold_counter_time_domain_ = 0;
+}
+
+ErleEstimator::~ErleEstimator() = default;
+
+void ErleEstimator::Update(rtc::ArrayView<const float> render_spectrum,
+                           rtc::ArrayView<const float> capture_spectrum,
+                           rtc::ArrayView<const float> subtractor_spectrum) {
+  RTC_DCHECK_EQ(kFftLengthBy2Plus1, render_spectrum.size());
+  RTC_DCHECK_EQ(kFftLengthBy2Plus1, capture_spectrum.size());
+  RTC_DCHECK_EQ(kFftLengthBy2Plus1, subtractor_spectrum.size());
+  const auto& X2 = render_spectrum;
+  const auto& Y2 = capture_spectrum;
+  const auto& E2 = subtractor_spectrum;
+
+  // Corresponds of WGN of power -46 dBFS.
+  constexpr float kX2Min = 44015068.0f;
+
+  // Update the estimates in a clamped minimum statistics manner.
+  auto erle_update = [&](size_t start, size_t stop, float max_erle) {
+    for (size_t k = start; k < stop; ++k) {
+      if (X2[k] > kX2Min && E2[k] > 0.f) {
+        const float new_erle = Y2[k] / E2[k];
+        if (new_erle > erle_[k]) {
+          hold_counters_[k - 1] = 100;
+          erle_[k] += 0.1f * (new_erle - erle_[k]);
+          erle_[k] = rtc::SafeClamp(erle_[k], min_erle_, max_erle);
+        }
+      }
+    }
+  };
+  erle_update(1, kFftLengthBy2 / 2, max_erle_lf_);
+  erle_update(kFftLengthBy2 / 2, kFftLengthBy2, max_erle_hf_);
+
+  std::for_each(hold_counters_.begin(), hold_counters_.end(),
+                [](int& a) { --a; });
+  std::transform(hold_counters_.begin(), hold_counters_.end(),
+                 erle_.begin() + 1, erle_.begin() + 1, [&](int a, float b) {
+                   return a > 0 ? b : std::max(min_erle_, 0.97f * b);
+                 });
+
+  erle_[0] = erle_[1];
+  erle_[kFftLengthBy2] = erle_[kFftLengthBy2 - 1];
+
+  // Compute ERLE over all frequency bins.
+  const float X2_sum = std::accumulate(X2.begin(), X2.end(), 0.0f);
+  const float E2_sum = std::accumulate(E2.begin(), E2.end(), 0.0f);
+  if (X2_sum > kX2Min * X2.size() && E2_sum > 0.f) {
+    const float Y2_sum = std::accumulate(Y2.begin(), Y2.end(), 0.0f);
+    const float new_erle = Y2_sum / E2_sum;
+    if (new_erle > erle_time_domain_) {
+      hold_counter_time_domain_ = 100;
+      erle_time_domain_ += 0.1f * (new_erle - erle_time_domain_);
+      erle_time_domain_ =
+          rtc::SafeClamp(erle_time_domain_, min_erle_, max_erle_lf_);
+    }
+  }
+  --hold_counter_time_domain_;
+  erle_time_domain_ = (hold_counter_time_domain_ > 0)
+                        ? erle_time_domain_
+                        : std::max(min_erle_, 0.97f * erle_time_domain_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/erle_estimator.h b/modules/audio_processing/aec3/erle_estimator.h
new file mode 100644
index 0000000..cb9fce6
--- /dev/null
+++ b/modules/audio_processing/aec3/erle_estimator.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_
+
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Estimates the echo return loss enhancement based on the signal spectra.
+class ErleEstimator {
+ public:
+  ErleEstimator(float min_erle, float max_erle_lf, float max_erle_hf);
+  ~ErleEstimator();
+
+  // Updates the ERLE estimate.
+  void Update(rtc::ArrayView<const float> render_spectrum,
+              rtc::ArrayView<const float> capture_spectrum,
+              rtc::ArrayView<const float> subtractor_spectrum);
+
+  // Returns the most recent ERLE estimate.
+  const std::array<float, kFftLengthBy2Plus1>& Erle() const { return erle_; }
+  float ErleTimeDomain() const { return erle_time_domain_; }
+
+ private:
+  std::array<float, kFftLengthBy2Plus1> erle_;
+  std::array<int, kFftLengthBy2Minus1> hold_counters_;
+  float erle_time_domain_;
+  int hold_counter_time_domain_;
+  const float min_erle_;
+  const float max_erle_lf_;
+  const float max_erle_hf_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ErleEstimator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_ERLE_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/erle_estimator_unittest.cc b/modules/audio_processing/aec3/erle_estimator_unittest.cc
new file mode 100644
index 0000000..f3dd7d9
--- /dev/null
+++ b/modules/audio_processing/aec3/erle_estimator_unittest.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/erle_estimator.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kLowFrequencyLimit = kFftLengthBy2 / 2;
+
+void VerifyErle(const std::array<float, kFftLengthBy2Plus1>& erle,
+                float erle_time_domain,
+                float reference_lf,
+                float reference_hf) {
+  std::for_each(
+      erle.begin(), erle.begin() + kLowFrequencyLimit,
+      [reference_lf](float a) { EXPECT_NEAR(reference_lf, a, 0.001); });
+  std::for_each(
+      erle.begin() + kLowFrequencyLimit, erle.end(),
+      [reference_hf](float a) { EXPECT_NEAR(reference_hf, a, 0.001); });
+  EXPECT_NEAR(reference_lf, erle_time_domain, 0.001);
+}
+
+}  // namespace
+
+// Verifies that the correct ERLE estimates are achieved.
+TEST(ErleEstimator, Estimates) {
+  std::array<float, kFftLengthBy2Plus1> X2;
+  std::array<float, kFftLengthBy2Plus1> E2;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+
+  ErleEstimator estimator(1.f, 8.f, 1.5f);
+
+  // Verifies that the ERLE estimate is properley increased to higher values.
+  X2.fill(500 * 1000.f * 1000.f);
+  E2.fill(1000.f * 1000.f);
+  Y2.fill(10 * E2[0]);
+  for (size_t k = 0; k < 200; ++k) {
+    estimator.Update(X2, Y2, E2);
+  }
+  VerifyErle(estimator.Erle(), estimator.ErleTimeDomain(), 8.f, 1.5f);
+
+  // Verifies that the ERLE is not immediately decreased when the ERLE in the
+  // data decreases.
+  Y2.fill(0.1f * E2[0]);
+  for (size_t k = 0; k < 98; ++k) {
+    estimator.Update(X2, Y2, E2);
+  }
+  VerifyErle(estimator.Erle(), estimator.ErleTimeDomain(), 8.f, 1.5f);
+
+  // Verifies that the minimum ERLE is eventually achieved.
+  for (size_t k = 0; k < 1000; ++k) {
+    estimator.Update(X2, Y2, E2);
+  }
+  VerifyErle(estimator.Erle(), estimator.ErleTimeDomain(), 1.f, 1.f);
+
+  // Verifies that the ERLE estimate is is not updated for low-level render
+  // signals.
+  X2.fill(1000.f * 1000.f);
+  Y2.fill(10 * E2[0]);
+  for (size_t k = 0; k < 200; ++k) {
+    estimator.Update(X2, Y2, E2);
+  }
+  VerifyErle(estimator.Erle(), estimator.ErleTimeDomain(), 1.f, 1.f);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/fft_buffer.cc b/modules/audio_processing/aec3/fft_buffer.cc
new file mode 100644
index 0000000..379ef7c
--- /dev/null
+++ b/modules/audio_processing/aec3/fft_buffer.cc
@@ -0,0 +1,23 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/fft_buffer.h"
+
+namespace webrtc {
+
+FftBuffer::FftBuffer(size_t size) : size(static_cast<int>(size)), buffer(size) {
+  for (auto& b : buffer) {
+    b.Clear();
+  }
+}
+
+FftBuffer::~FftBuffer() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/fft_buffer.h b/modules/audio_processing/aec3/fft_buffer.h
new file mode 100644
index 0000000..47ede41
--- /dev/null
+++ b/modules/audio_processing/aec3/fft_buffer.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for bundling a circular buffer of FftData objects together with the
+// read and write indices.
+struct FftBuffer {
+  explicit FftBuffer(size_t size);
+  ~FftBuffer();
+
+  int IncIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index < size - 1 ? index + 1 : 0;
+  }
+
+  int DecIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index > 0 ? index - 1 : size - 1;
+  }
+
+  int OffsetIndex(int index, int offset) const {
+    RTC_DCHECK_GE(buffer.size(), offset);
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return (size + index + offset) % size;
+  }
+
+  void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+  void IncWriteIndex() { write = IncIndex(write); }
+  void DecWriteIndex() { write = DecIndex(write); }
+  void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+  void IncReadIndex() { read = IncIndex(read); }
+  void DecReadIndex() { read = DecIndex(read); }
+
+  const int size;
+  std::vector<FftData> buffer;
+  int write = 0;
+  int read = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_FFT_BUFFER_H_
diff --git a/modules/audio_processing/aec3/fft_data.h b/modules/audio_processing/aec3/fft_data.h
new file mode 100644
index 0000000..59511b5
--- /dev/null
+++ b/modules/audio_processing/aec3/fft_data.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <array>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+// Struct that holds imaginary data produced from 128 point real-valued FFTs.
+struct FftData {
+  // Copies the data in src.
+  void Assign(const FftData& src) {
+    std::copy(src.re.begin(), src.re.end(), re.begin());
+    std::copy(src.im.begin(), src.im.end(), im.begin());
+    im[0] = im[kFftLengthBy2] = 0;
+  }
+
+  // Clears all the imaginary.
+  void Clear() {
+    re.fill(0.f);
+    im.fill(0.f);
+  }
+
+  // Computes the power spectrum of the data.
+  void Spectrum(Aec3Optimization optimization,
+                rtc::ArrayView<float> power_spectrum) const {
+    RTC_DCHECK_EQ(kFftLengthBy2Plus1, power_spectrum.size());
+    switch (optimization) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+      case Aec3Optimization::kSse2: {
+        constexpr int kNumFourBinBands = kFftLengthBy2 / 4;
+        constexpr int kLimit = kNumFourBinBands * 4;
+        for (size_t k = 0; k < kLimit; k += 4) {
+          const __m128 r = _mm_loadu_ps(&re[k]);
+          const __m128 i = _mm_loadu_ps(&im[k]);
+          const __m128 ii = _mm_mul_ps(i, i);
+          const __m128 rr = _mm_mul_ps(r, r);
+          const __m128 rrii = _mm_add_ps(rr, ii);
+          _mm_storeu_ps(&power_spectrum[k], rrii);
+        }
+        power_spectrum[kFftLengthBy2] = re[kFftLengthBy2] * re[kFftLengthBy2] +
+                                        im[kFftLengthBy2] * im[kFftLengthBy2];
+      } break;
+#endif
+      default:
+        std::transform(re.begin(), re.end(), im.begin(), power_spectrum.begin(),
+                       [](float a, float b) { return a * a + b * b; });
+    }
+  }
+
+  // Copy the data from an interleaved array.
+  void CopyFromPackedArray(const std::array<float, kFftLength>& v) {
+    re[0] = v[0];
+    re[kFftLengthBy2] = v[1];
+    im[0] = im[kFftLengthBy2] = 0;
+    for (size_t k = 1, j = 2; k < kFftLengthBy2; ++k) {
+      re[k] = v[j++];
+      im[k] = v[j++];
+    }
+  }
+
+  // Copies the data into an interleaved array.
+  void CopyToPackedArray(std::array<float, kFftLength>* v) const {
+    RTC_DCHECK(v);
+    (*v)[0] = re[0];
+    (*v)[1] = re[kFftLengthBy2];
+    for (size_t k = 1, j = 2; k < kFftLengthBy2; ++k) {
+      (*v)[j++] = re[k];
+      (*v)[j++] = im[k];
+    }
+  }
+
+  std::array<float, kFftLengthBy2Plus1> re;
+  std::array<float, kFftLengthBy2Plus1> im;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_FFT_DATA_H_
diff --git a/modules/audio_processing/aec3/fft_data_unittest.cc b/modules/audio_processing/aec3/fft_data_unittest.cc
new file mode 100644
index 0000000..8fc5ca7
--- /dev/null
+++ b/modules/audio_processing/aec3/fft_data_unittest.cc
@@ -0,0 +1,163 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/fft_data.h"
+
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods are bitexact to their reference
+// counterparts.
+TEST(FftData, TestOptimizations) {
+  if (WebRtc_GetCPUInfo(kSSE2) != 0) {
+    FftData x;
+
+    for (size_t k = 0; k < x.re.size(); ++k) {
+      x.re[k] = k + 1;
+    }
+
+    x.im[0] = x.im[x.im.size() - 1] = 0.f;
+    for (size_t k = 1; k < x.im.size() - 1; ++k) {
+      x.im[k] = 2.f * (k + 1);
+    }
+
+    std::array<float, kFftLengthBy2Plus1> spectrum;
+    std::array<float, kFftLengthBy2Plus1> spectrum_sse2;
+    x.Spectrum(Aec3Optimization::kNone, spectrum);
+    x.Spectrum(Aec3Optimization::kSse2, spectrum_sse2);
+    EXPECT_EQ(spectrum, spectrum_sse2);
+  }
+}
+#endif
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for null output in CopyToPackedArray.
+TEST(FftData, NonNullCopyToPackedArrayOutput) {
+  EXPECT_DEATH(FftData().CopyToPackedArray(nullptr), "");
+}
+
+// Verifies the check for null output in Spectrum.
+TEST(FftData, NonNullSpectrumOutput) {
+  EXPECT_DEATH(FftData().Spectrum(Aec3Optimization::kNone, nullptr), "");
+}
+
+#endif
+
+// Verifies that the Assign method properly copies the data from the source and
+// ensures that the imaginary components for the DC and Nyquist bins are 0.
+TEST(FftData, Assign) {
+  FftData x;
+  FftData y;
+
+  x.re.fill(1.f);
+  x.im.fill(2.f);
+  y.Assign(x);
+  EXPECT_EQ(x.re, y.re);
+  EXPECT_EQ(0.f, y.im[0]);
+  EXPECT_EQ(0.f, y.im[x.im.size() - 1]);
+  for (size_t k = 1; k < x.im.size() - 1; ++k) {
+    EXPECT_EQ(x.im[k], y.im[k]);
+  }
+}
+
+// Verifies that the Clear method properly clears all the data.
+TEST(FftData, Clear) {
+  FftData x_ref;
+  FftData x;
+
+  x_ref.re.fill(0.f);
+  x_ref.im.fill(0.f);
+
+  x.re.fill(1.f);
+  x.im.fill(2.f);
+  x.Clear();
+
+  EXPECT_EQ(x_ref.re, x.re);
+  EXPECT_EQ(x_ref.im, x.im);
+}
+
+// Verifies that the spectrum is correctly computed.
+TEST(FftData, Spectrum) {
+  FftData x;
+
+  for (size_t k = 0; k < x.re.size(); ++k) {
+    x.re[k] = k + 1;
+  }
+
+  x.im[0] = x.im[x.im.size() - 1] = 0.f;
+  for (size_t k = 1; k < x.im.size() - 1; ++k) {
+    x.im[k] = 2.f * (k + 1);
+  }
+
+  std::array<float, kFftLengthBy2Plus1> spectrum;
+  x.Spectrum(Aec3Optimization::kNone, spectrum);
+
+  EXPECT_EQ(x.re[0] * x.re[0], spectrum[0]);
+  EXPECT_EQ(x.re[spectrum.size() - 1] * x.re[spectrum.size() - 1],
+            spectrum[spectrum.size() - 1]);
+  for (size_t k = 1; k < spectrum.size() - 1; ++k) {
+    EXPECT_EQ(x.re[k] * x.re[k] + x.im[k] * x.im[k], spectrum[k]);
+  }
+}
+
+// Verifies that the functionality in CopyToPackedArray works as intended.
+TEST(FftData, CopyToPackedArray) {
+  FftData x;
+  std::array<float, kFftLength> x_packed;
+
+  for (size_t k = 0; k < x.re.size(); ++k) {
+    x.re[k] = k + 1;
+  }
+
+  x.im[0] = x.im[x.im.size() - 1] = 0.f;
+  for (size_t k = 1; k < x.im.size() - 1; ++k) {
+    x.im[k] = 2.f * (k + 1);
+  }
+
+  x.CopyToPackedArray(&x_packed);
+
+  EXPECT_EQ(x.re[0], x_packed[0]);
+  EXPECT_EQ(x.re[x.re.size() - 1], x_packed[1]);
+  for (size_t k = 1; k < x_packed.size() / 2; ++k) {
+    EXPECT_EQ(x.re[k], x_packed[2 * k]);
+    EXPECT_EQ(x.im[k], x_packed[2 * k + 1]);
+  }
+}
+
+// Verifies that the functionality in CopyFromPackedArray works as intended
+// (relies on that the functionality in CopyToPackedArray has been verified in
+// the test above).
+TEST(FftData, CopyFromPackedArray) {
+  FftData x_ref;
+  FftData x;
+  std::array<float, kFftLength> x_packed;
+
+  for (size_t k = 0; k < x_ref.re.size(); ++k) {
+    x_ref.re[k] = k + 1;
+  }
+
+  x_ref.im[0] = x_ref.im[x_ref.im.size() - 1] = 0.f;
+  for (size_t k = 1; k < x_ref.im.size() - 1; ++k) {
+    x_ref.im[k] = 2.f * (k + 1);
+  }
+
+  x_ref.CopyToPackedArray(&x_packed);
+  x.CopyFromPackedArray(x_packed);
+
+  EXPECT_EQ(x_ref.re, x.re);
+  EXPECT_EQ(x_ref.im, x.im);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/frame_blocker.cc b/modules/audio_processing/aec3/frame_blocker.cc
new file mode 100644
index 0000000..0a0c0e2
--- /dev/null
+++ b/modules/audio_processing/aec3/frame_blocker.cc
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/frame_blocker.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FrameBlocker::FrameBlocker(size_t num_bands)
+    : num_bands_(num_bands), buffer_(num_bands_) {
+  for (auto& b : buffer_) {
+    b.reserve(kBlockSize);
+    RTC_DCHECK(b.empty());
+  }
+}
+
+FrameBlocker::~FrameBlocker() = default;
+
+void FrameBlocker::InsertSubFrameAndExtractBlock(
+    const std::vector<rtc::ArrayView<float>>& sub_frame,
+    std::vector<std::vector<float>>* block) {
+  RTC_DCHECK(block);
+  RTC_DCHECK_EQ(num_bands_, block->size());
+  RTC_DCHECK_EQ(num_bands_, sub_frame.size());
+  for (size_t i = 0; i < num_bands_; ++i) {
+    RTC_DCHECK_GE(kBlockSize - 16, buffer_[i].size());
+    RTC_DCHECK_EQ(kBlockSize, (*block)[i].size());
+    RTC_DCHECK_EQ(kSubFrameLength, sub_frame[i].size());
+    const int samples_to_block = kBlockSize - buffer_[i].size();
+    (*block)[i].clear();
+    (*block)[i].insert((*block)[i].begin(), buffer_[i].begin(),
+                       buffer_[i].end());
+    (*block)[i].insert((*block)[i].begin() + buffer_[i].size(),
+                       sub_frame[i].begin(),
+                       sub_frame[i].begin() + samples_to_block);
+    buffer_[i].clear();
+    buffer_[i].insert(buffer_[i].begin(),
+                      sub_frame[i].begin() + samples_to_block,
+                      sub_frame[i].end());
+  }
+}
+
+bool FrameBlocker::IsBlockAvailable() const {
+  return kBlockSize == buffer_[0].size();
+}
+
+void FrameBlocker::ExtractBlock(std::vector<std::vector<float>>* block) {
+  RTC_DCHECK(block);
+  RTC_DCHECK_EQ(num_bands_, block->size());
+  RTC_DCHECK(IsBlockAvailable());
+  for (size_t i = 0; i < num_bands_; ++i) {
+    RTC_DCHECK_EQ(kBlockSize, buffer_[i].size());
+    RTC_DCHECK_EQ(kBlockSize, (*block)[i].size());
+    (*block)[i].clear();
+    (*block)[i].insert((*block)[i].begin(), buffer_[i].begin(),
+                       buffer_[i].end());
+    buffer_[i].clear();
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/frame_blocker.h b/modules/audio_processing/aec3/frame_blocker.h
new file mode 100644
index 0000000..08e1e1d
--- /dev/null
+++ b/modules/audio_processing/aec3/frame_blocker.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_
+
+#include <stddef.h>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Class for producing 64 sample multiband blocks from frames consisting of 1 or
+// 2 subframes of 80 samples.
+class FrameBlocker {
+ public:
+  explicit FrameBlocker(size_t num_bands);
+  ~FrameBlocker();
+  // Inserts one 80 sample multiband subframe from the multiband frame and
+  // extracts one 64 sample multiband block.
+  void InsertSubFrameAndExtractBlock(
+      const std::vector<rtc::ArrayView<float>>& sub_frame,
+      std::vector<std::vector<float>>* block);
+  // Reports whether a multiband block of 64 samples is available for
+  // extraction.
+  bool IsBlockAvailable() const;
+  // Extracts a multiband block of 64 samples.
+  void ExtractBlock(std::vector<std::vector<float>>* block);
+
+ private:
+  const size_t num_bands_;
+  std::vector<std::vector<float>> buffer_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(FrameBlocker);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_FRAME_BLOCKER_H_
diff --git a/modules/audio_processing/aec3/frame_blocker_unittest.cc b/modules/audio_processing/aec3/frame_blocker_unittest.cc
new file mode 100644
index 0000000..6e73d4b
--- /dev/null
+++ b/modules/audio_processing/aec3/frame_blocker_unittest.cc
@@ -0,0 +1,341 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/frame_blocker.h"
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_framer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+float ComputeSampleValue(size_t chunk_counter,
+                         size_t chunk_size,
+                         size_t band,
+                         size_t sample_index,
+                         int offset) {
+  float value =
+      static_cast<int>(chunk_counter * chunk_size + sample_index) + offset;
+  return value > 0 ? 5000 * band + value : 0;
+}
+
+void FillSubFrame(size_t sub_frame_counter,
+                  int offset,
+                  std::vector<std::vector<float>>* sub_frame) {
+  for (size_t k = 0; k < sub_frame->size(); ++k) {
+    for (size_t i = 0; i < (*sub_frame)[0].size(); ++i) {
+      (*sub_frame)[k][i] =
+          ComputeSampleValue(sub_frame_counter, kSubFrameLength, k, i, offset);
+    }
+  }
+}
+
+void FillSubFrameView(size_t sub_frame_counter,
+                      int offset,
+                      std::vector<std::vector<float>>* sub_frame,
+                      std::vector<rtc::ArrayView<float>>* sub_frame_view) {
+  FillSubFrame(sub_frame_counter, offset, sub_frame);
+  for (size_t k = 0; k < sub_frame_view->size(); ++k) {
+    (*sub_frame_view)[k] =
+        rtc::ArrayView<float>(&(*sub_frame)[k][0], (*sub_frame)[k].size());
+  }
+}
+
+bool VerifySubFrame(size_t sub_frame_counter,
+                    int offset,
+                    const std::vector<rtc::ArrayView<float>>& sub_frame_view) {
+  std::vector<std::vector<float>> reference_sub_frame(
+      sub_frame_view.size(), std::vector<float>(sub_frame_view[0].size(), 0.f));
+  FillSubFrame(sub_frame_counter, offset, &reference_sub_frame);
+  for (size_t k = 0; k < sub_frame_view.size(); ++k) {
+    for (size_t i = 0; i < sub_frame_view[k].size(); ++i) {
+      if (reference_sub_frame[k][i] != sub_frame_view[k][i]) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+bool VerifyBlock(size_t block_counter,
+                 int offset,
+                 const std::vector<std::vector<float>>& block) {
+  for (size_t k = 0; k < block.size(); ++k) {
+    for (size_t i = 0; i < block[k].size(); ++i) {
+      const float reference_value =
+          ComputeSampleValue(block_counter, kBlockSize, k, i, offset);
+      if (reference_value != block[k][i]) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+// Verifies that the FrameBlocker properly forms blocks out of the frames.
+void RunBlockerTest(int sample_rate_hz) {
+  constexpr size_t kNumSubFramesToProcess = 20;
+  const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> input_sub_frame(
+      num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> input_sub_frame_view(num_bands);
+  FrameBlocker blocker(num_bands);
+
+  size_t block_counter = 0;
+  for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess;
+       ++sub_frame_index) {
+    FillSubFrameView(sub_frame_index, 0, &input_sub_frame,
+                     &input_sub_frame_view);
+
+    blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block);
+    VerifyBlock(block_counter++, 0, block);
+
+    if ((sub_frame_index + 1) % 4 == 0) {
+      EXPECT_TRUE(blocker.IsBlockAvailable());
+    } else {
+      EXPECT_FALSE(blocker.IsBlockAvailable());
+    }
+    if (blocker.IsBlockAvailable()) {
+      blocker.ExtractBlock(&block);
+      VerifyBlock(block_counter++, 0, block);
+    }
+  }
+}
+
+// Verifies that the FrameBlocker and BlockFramer work well together and produce
+// the expected output.
+void RunBlockerAndFramerTest(int sample_rate_hz) {
+  const size_t kNumSubFramesToProcess = 20;
+  const size_t num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> input_sub_frame(
+      num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<std::vector<float>> output_sub_frame(
+      num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> output_sub_frame_view(num_bands);
+  std::vector<rtc::ArrayView<float>> input_sub_frame_view(num_bands);
+  FrameBlocker blocker(num_bands);
+  BlockFramer framer(num_bands);
+
+  for (size_t sub_frame_index = 0; sub_frame_index < kNumSubFramesToProcess;
+       ++sub_frame_index) {
+    FillSubFrameView(sub_frame_index, 0, &input_sub_frame,
+                     &input_sub_frame_view);
+    FillSubFrameView(sub_frame_index, 0, &output_sub_frame,
+                     &output_sub_frame_view);
+
+    blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block);
+    framer.InsertBlockAndExtractSubFrame(block, &output_sub_frame_view);
+
+    if ((sub_frame_index + 1) % 4 == 0) {
+      EXPECT_TRUE(blocker.IsBlockAvailable());
+    } else {
+      EXPECT_FALSE(blocker.IsBlockAvailable());
+    }
+    if (blocker.IsBlockAvailable()) {
+      blocker.ExtractBlock(&block);
+      framer.InsertBlock(block);
+    }
+    EXPECT_TRUE(VerifySubFrame(sub_frame_index, -64, output_sub_frame_view));
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the FrameBlocker crashes if the InsertSubFrameAndExtractBlock
+// method is called for inputs with the wrong number of bands or band lengths.
+void RunWronglySizedInsertAndExtractParametersTest(int sample_rate_hz,
+                                                   size_t num_block_bands,
+                                                   size_t block_length,
+                                                   size_t num_sub_frame_bands,
+                                                   size_t sub_frame_length) {
+  const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(num_block_bands,
+                                        std::vector<float>(block_length, 0.f));
+  std::vector<std::vector<float>> input_sub_frame(
+      num_sub_frame_bands, std::vector<float>(sub_frame_length, 0.f));
+  std::vector<rtc::ArrayView<float>> input_sub_frame_view(
+      input_sub_frame.size());
+  FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view);
+  FrameBlocker blocker(correct_num_bands);
+  EXPECT_DEATH(
+      blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block), "");
+}
+
+// Verifies that the FrameBlocker crashes if the ExtractBlock method is called
+// for inputs with the wrong number of bands or band lengths.
+void RunWronglySizedExtractParameterTest(int sample_rate_hz,
+                                         size_t num_block_bands,
+                                         size_t block_length) {
+  const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> correct_block(
+      correct_num_bands, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> wrong_block(
+      num_block_bands, std::vector<float>(block_length, 0.f));
+  std::vector<std::vector<float>> input_sub_frame(
+      correct_num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> input_sub_frame_view(
+      input_sub_frame.size());
+  FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view);
+  FrameBlocker blocker(correct_num_bands);
+  blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+  blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+  blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+  blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &correct_block);
+
+  EXPECT_DEATH(blocker.ExtractBlock(&wrong_block), "");
+}
+
+// Verifies that the FrameBlocker crashes if the ExtractBlock method is called
+// after a wrong number of previous InsertSubFrameAndExtractBlock method calls
+// have been made.
+void RunWrongExtractOrderTest(int sample_rate_hz,
+                              size_t num_preceeding_api_calls) {
+  const size_t correct_num_bands = NumBandsForRate(sample_rate_hz);
+
+  std::vector<std::vector<float>> block(correct_num_bands,
+                                        std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> input_sub_frame(
+      correct_num_bands, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> input_sub_frame_view(
+      input_sub_frame.size());
+  FillSubFrameView(0, 0, &input_sub_frame, &input_sub_frame_view);
+  FrameBlocker blocker(correct_num_bands);
+  for (size_t k = 0; k < num_preceeding_api_calls; ++k) {
+    blocker.InsertSubFrameAndExtractBlock(input_sub_frame_view, &block);
+  }
+
+  EXPECT_DEATH(blocker.ExtractBlock(&block), "");
+}
+#endif
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(FrameBlocker, WrongNumberOfBandsInBlockForInsertSubFrameAndExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+    RunWronglySizedInsertAndExtractParametersTest(
+        rate, wrong_num_bands, kBlockSize, correct_num_bands, kSubFrameLength);
+  }
+}
+
+TEST(FrameBlocker,
+     WrongNumberOfBandsInSubFrameForInsertSubFrameAndExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+    RunWronglySizedInsertAndExtractParametersTest(
+        rate, correct_num_bands, kBlockSize, wrong_num_bands, kSubFrameLength);
+  }
+}
+
+TEST(FrameBlocker,
+     WrongNumberOfSamplesInBlockForInsertSubFrameAndExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    RunWronglySizedInsertAndExtractParametersTest(
+        rate, correct_num_bands, kBlockSize - 1, correct_num_bands,
+        kSubFrameLength);
+  }
+}
+
+TEST(FrameBlocker,
+     WrongNumberOfSamplesInSubFrameForInsertSubFrameAndExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    RunWronglySizedInsertAndExtractParametersTest(rate, correct_num_bands,
+                                                  kBlockSize, correct_num_bands,
+                                                  kSubFrameLength - 1);
+  }
+}
+
+TEST(FrameBlocker, WrongNumberOfBandsInBlockForExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    const size_t wrong_num_bands = (correct_num_bands % 3) + 1;
+    RunWronglySizedExtractParameterTest(rate, wrong_num_bands, kBlockSize);
+  }
+}
+
+TEST(FrameBlocker, WrongNumberOfSamplesInBlockForExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    const size_t correct_num_bands = NumBandsForRate(rate);
+    RunWronglySizedExtractParameterTest(rate, correct_num_bands,
+                                        kBlockSize - 1);
+  }
+}
+
+TEST(FrameBlocker, WrongNumberOfPreceedingApiCallsForExtractBlock) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    for (size_t num_calls = 0; num_calls < 4; ++num_calls) {
+      std::ostringstream ss;
+      ss << "Sample rate: " << rate;
+      ss << ", Num preceeding InsertSubFrameAndExtractBlock calls: "
+         << num_calls;
+
+      SCOPED_TRACE(ss.str());
+      RunWrongExtractOrderTest(rate, num_calls);
+    }
+  }
+}
+
+// Verifiers that the verification for null sub_frame pointer works.
+TEST(FrameBlocker, NullBlockParameter) {
+  std::vector<std::vector<float>> sub_frame(
+      1, std::vector<float>(kSubFrameLength, 0.f));
+  std::vector<rtc::ArrayView<float>> sub_frame_view(sub_frame.size());
+  FillSubFrameView(0, 0, &sub_frame, &sub_frame_view);
+  EXPECT_DEATH(
+      FrameBlocker(1).InsertSubFrameAndExtractBlock(sub_frame_view, nullptr),
+      "");
+}
+
+#endif
+
+TEST(FrameBlocker, BlockBitexactness) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunBlockerTest(rate);
+  }
+}
+
+TEST(FrameBlocker, BlockerAndFramer) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    RunBlockerAndFramerTest(rate);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/main_filter_update_gain.cc b/modules/audio_processing/aec3/main_filter_update_gain.cc
new file mode 100644
index 0000000..6aa5780
--- /dev/null
+++ b/modules/audio_processing/aec3/main_filter_update_gain.cc
@@ -0,0 +1,157 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/main_filter_update_gain.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kHErrorInitial = 10000.f;
+constexpr int kPoorExcitationCounterInitial = 1000;
+
+}  // namespace
+
+int MainFilterUpdateGain::instance_count_ = 0;
+
+MainFilterUpdateGain::MainFilterUpdateGain(
+    const EchoCanceller3Config::Filter::MainConfiguration& config,
+    size_t config_change_duration_blocks)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      config_change_duration_blocks_(
+          static_cast<int>(config_change_duration_blocks)),
+      poor_excitation_counter_(kPoorExcitationCounterInitial) {
+  SetConfig(config, true);
+  H_error_.fill(kHErrorInitial);
+  RTC_DCHECK_LT(0, config_change_duration_blocks_);
+  one_by_config_change_duration_blocks_ = 1.f / config_change_duration_blocks_;
+}
+
+MainFilterUpdateGain::~MainFilterUpdateGain() {}
+
+void MainFilterUpdateGain::HandleEchoPathChange(
+    const EchoPathVariability& echo_path_variability) {
+  // TODO(peah): Add even-specific behavior.
+  H_error_.fill(kHErrorInitial);
+  poor_excitation_counter_ = kPoorExcitationCounterInitial;
+  call_counter_ = 0;
+}
+
+void MainFilterUpdateGain::Compute(
+    const std::array<float, kFftLengthBy2Plus1>& render_power,
+    const RenderSignalAnalyzer& render_signal_analyzer,
+    const SubtractorOutput& subtractor_output,
+    const AdaptiveFirFilter& filter,
+    bool saturated_capture_signal,
+    FftData* gain_fft) {
+  RTC_DCHECK(gain_fft);
+  // Introducing shorter notation to improve readability.
+  const FftData& E_main = subtractor_output.E_main;
+  const auto& E2_main = subtractor_output.E2_main;
+  const auto& E2_shadow = subtractor_output.E2_shadow;
+  FftData* G = gain_fft;
+  const size_t size_partitions = filter.SizePartitions();
+  auto X2 = render_power;
+  const auto& erl = filter.Erl();
+  ++call_counter_;
+
+  UpdateCurrentConfig();
+
+  if (render_signal_analyzer.PoorSignalExcitation()) {
+    poor_excitation_counter_ = 0;
+  }
+
+  // Do not update the filter if the render is not sufficiently excited.
+  if (++poor_excitation_counter_ < size_partitions ||
+      saturated_capture_signal || call_counter_ <= size_partitions) {
+    G->re.fill(0.f);
+    G->im.fill(0.f);
+  } else {
+    // Corresponds to WGN of power -39 dBFS.
+    std::array<float, kFftLengthBy2Plus1> mu;
+    // mu = H_error / (0.5* H_error* X2 + n * E2).
+    for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+      mu[k] = X2[k] > current_config_.noise_gate
+                  ? H_error_[k] / (0.5f * H_error_[k] * X2[k] +
+                                   size_partitions * E2_main[k])
+                  : 0.f;
+    }
+
+    // Avoid updating the filter close to narrow bands in the render signals.
+    render_signal_analyzer.MaskRegionsAroundNarrowBands(&mu);
+
+    // H_error = H_error - 0.5 * mu * X2 * H_error.
+    for (size_t k = 0; k < H_error_.size(); ++k) {
+      H_error_[k] -= 0.5f * mu[k] * X2[k] * H_error_[k];
+    }
+
+    // G = mu * E.
+    std::transform(mu.begin(), mu.end(), E_main.re.begin(), G->re.begin(),
+                   std::multiplies<float>());
+    std::transform(mu.begin(), mu.end(), E_main.im.begin(), G->im.begin(),
+                   std::multiplies<float>());
+  }
+
+  // H_error = H_error + factor * erl.
+  std::array<float, kFftLengthBy2Plus1> H_error_increase;
+  std::transform(E2_shadow.begin(), E2_shadow.end(), E2_main.begin(),
+                 H_error_increase.begin(), [&](float a, float b) {
+                   return a >= b ? current_config_.leakage_converged
+                                 : current_config_.leakage_diverged;
+                 });
+  std::transform(erl.begin(), erl.end(), H_error_increase.begin(),
+                 H_error_increase.begin(), std::multiplies<float>());
+  std::transform(H_error_.begin(), H_error_.end(), H_error_increase.begin(),
+                 H_error_.begin(), [&](float a, float b) {
+                   return std::max(a + b, current_config_.error_floor);
+                 });
+
+  data_dumper_->DumpRaw("aec3_main_gain_H_error", H_error_);
+}
+
+void MainFilterUpdateGain::UpdateCurrentConfig() {
+  RTC_DCHECK_GE(config_change_duration_blocks_, config_change_counter_);
+  if (config_change_counter_ > 0) {
+    if (--config_change_counter_ > 0) {
+      auto average = [](float from, float to, float from_weight) {
+        return from * from_weight + to * (1.f - from_weight);
+      };
+
+      float change_factor =
+          config_change_counter_ * one_by_config_change_duration_blocks_;
+
+      current_config_.leakage_converged =
+          average(old_target_config_.leakage_converged,
+                  target_config_.leakage_converged, change_factor);
+      current_config_.leakage_diverged =
+          average(old_target_config_.leakage_diverged,
+                  target_config_.leakage_diverged, change_factor);
+      current_config_.error_floor =
+          average(old_target_config_.error_floor, target_config_.error_floor,
+                  change_factor);
+      current_config_.noise_gate =
+          average(old_target_config_.noise_gate, target_config_.noise_gate,
+                  change_factor);
+    } else {
+      current_config_ = old_target_config_ = target_config_;
+    }
+  }
+  RTC_DCHECK_LE(0, config_change_counter_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/main_filter_update_gain.h b/modules/audio_processing/aec3/main_filter_update_gain.h
new file mode 100644
index 0000000..525b522
--- /dev/null
+++ b/modules/audio_processing/aec3/main_filter_update_gain.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MAIN_FILTER_UPDATE_GAIN_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MAIN_FILTER_UPDATE_GAIN_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Provides functionality for  computing the adaptive gain for the main filter.
+class MainFilterUpdateGain {
+ public:
+  explicit MainFilterUpdateGain(
+      const EchoCanceller3Config::Filter::MainConfiguration& config,
+      size_t config_change_duration_blocks);
+  ~MainFilterUpdateGain();
+
+  // Takes action in the case of a known echo path change.
+  void HandleEchoPathChange(const EchoPathVariability& echo_path_variability);
+
+  // Computes the gain.
+  void Compute(const std::array<float, kFftLengthBy2Plus1>& render_power,
+               const RenderSignalAnalyzer& render_signal_analyzer,
+               const SubtractorOutput& subtractor_output,
+               const AdaptiveFirFilter& filter,
+               bool saturated_capture_signal,
+               FftData* gain_fft);
+
+  // Sets a new config.
+  void SetConfig(const EchoCanceller3Config::Filter::MainConfiguration& config,
+                 bool immediate_effect) {
+    if (immediate_effect) {
+      old_target_config_ = current_config_ = target_config_ = config;
+      config_change_counter_ = 0;
+    } else {
+      old_target_config_ = current_config_;
+      target_config_ = config;
+      config_change_counter_ = config_change_duration_blocks_;
+    }
+  }
+
+ private:
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  const int config_change_duration_blocks_;
+  float one_by_config_change_duration_blocks_;
+  EchoCanceller3Config::Filter::MainConfiguration current_config_;
+  EchoCanceller3Config::Filter::MainConfiguration target_config_;
+  EchoCanceller3Config::Filter::MainConfiguration old_target_config_;
+  std::array<float, kFftLengthBy2Plus1> H_error_;
+  size_t poor_excitation_counter_;
+  size_t call_counter_ = 0;
+  int config_change_counter_ = 0;
+
+  // Updates the current config towards the target config.
+  void UpdateCurrentConfig();
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(MainFilterUpdateGain);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MAIN_FILTER_UPDATE_GAIN_H_
diff --git a/modules/audio_processing/aec3/main_filter_update_gain_unittest.cc b/modules/audio_processing/aec3/main_filter_update_gain_unittest.cc
new file mode 100644
index 0000000..13747d4
--- /dev/null
+++ b/modules/audio_processing/aec3/main_filter_update_gain_unittest.cc
@@ -0,0 +1,350 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/main_filter_update_gain.h"
+
+#include <algorithm>
+#include <numeric>
+#include <string>
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "modules/audio_processing/aec3/shadow_filter_update_gain.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Method for performing the simulations needed to test the main filter update
+// gain functionality.
+void RunFilterUpdateTest(int num_blocks_to_process,
+                         size_t delay_samples,
+                         int filter_length_blocks,
+                         const std::vector<int>& blocks_with_echo_path_changes,
+                         const std::vector<int>& blocks_with_saturation,
+                         bool use_silent_render_in_second_half,
+                         std::array<float, kBlockSize>* e_last_block,
+                         std::array<float, kBlockSize>* y_last_block,
+                         FftData* G_last_block) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  config.filter.main.length_blocks = filter_length_blocks;
+  config.filter.shadow.length_blocks = filter_length_blocks;
+  AdaptiveFirFilter main_filter(config.filter.main.length_blocks,
+                                config.filter.main.length_blocks,
+                                config.filter.config_change_duration_blocks,
+                                DetectOptimization(), &data_dumper);
+  AdaptiveFirFilter shadow_filter(config.filter.shadow.length_blocks,
+                                  config.filter.shadow.length_blocks,
+                                  config.filter.config_change_duration_blocks,
+                                  DetectOptimization(), &data_dumper);
+  Aec3Fft fft;
+  std::array<float, kBlockSize> x_old;
+  x_old.fill(0.f);
+  ShadowFilterUpdateGain shadow_gain(
+      config.filter.shadow, config.filter.config_change_duration_blocks);
+  MainFilterUpdateGain main_gain(config.filter.main,
+                                 config.filter.config_change_duration_blocks);
+  Random random_generator(42U);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<float> y(kBlockSize, 0.f);
+  config.delay.min_echo_path_delay_blocks = 0;
+  config.delay.default_delay = 1;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  AecState aec_state(config);
+  RenderSignalAnalyzer render_signal_analyzer(config);
+  rtc::Optional<DelayEstimate> delay_estimate;
+  std::array<float, kFftLength> s_scratch;
+  std::array<float, kBlockSize> s;
+  FftData S;
+  FftData G;
+  SubtractorOutput output;
+  output.Reset();
+  FftData& E_main = output.E_main;
+  FftData E_shadow;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kFftLengthBy2Plus1>& E2_main = output.E2_main;
+  std::array<float, kBlockSize>& e_main = output.e_main;
+  std::array<float, kBlockSize>& e_shadow = output.e_shadow;
+  Y2.fill(0.f);
+
+  constexpr float kScale = 1.0f / kFftLengthBy2;
+
+  DelayBuffer<float> delay_buffer(delay_samples);
+  for (int k = 0; k < num_blocks_to_process; ++k) {
+    // Handle echo path changes.
+    if (std::find(blocks_with_echo_path_changes.begin(),
+                  blocks_with_echo_path_changes.end(),
+                  k) != blocks_with_echo_path_changes.end()) {
+      main_filter.HandleEchoPathChange();
+    }
+
+    // Handle saturation.
+    const bool saturation =
+        std::find(blocks_with_saturation.begin(), blocks_with_saturation.end(),
+                  k) != blocks_with_saturation.end();
+
+    // Create the render signal.
+    if (use_silent_render_in_second_half && k > num_blocks_to_process / 2) {
+      std::fill(x[0].begin(), x[0].end(), 0.f);
+    } else {
+      RandomizeSampleVector(&random_generator, x[0]);
+    }
+    delay_buffer.Delay(x[0], y);
+
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+
+    render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+                                  aec_state.FilterDelay());
+
+    // Apply the main filter.
+    main_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S);
+    fft.Ifft(S, &s_scratch);
+    std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2,
+                   e_main.begin(),
+                   [&](float a, float b) { return a - b * kScale; });
+    std::for_each(e_main.begin(), e_main.end(),
+                  [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+    fft.ZeroPaddedFft(e_main, Aec3Fft::Window::kRectangular, &E_main);
+    for (size_t k = 0; k < kBlockSize; ++k) {
+      s[k] = kScale * s_scratch[k + kFftLengthBy2];
+    }
+
+    // Apply the shadow filter.
+    shadow_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S);
+    fft.Ifft(S, &s_scratch);
+    std::transform(y.begin(), y.end(), s_scratch.begin() + kFftLengthBy2,
+                   e_shadow.begin(),
+                   [&](float a, float b) { return a - b * kScale; });
+    std::for_each(e_shadow.begin(), e_shadow.end(),
+                  [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+    fft.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kRectangular, &E_shadow);
+
+    // Compute spectra for future use.
+    E_main.Spectrum(Aec3Optimization::kNone, output.E2_main);
+    E_shadow.Spectrum(Aec3Optimization::kNone, output.E2_shadow);
+
+    // Adapt the shadow filter.
+    std::array<float, kFftLengthBy2Plus1> render_power;
+    render_delay_buffer->GetRenderBuffer()->SpectralSum(
+        shadow_filter.SizePartitions(), &render_power);
+    shadow_gain.Compute(render_power, render_signal_analyzer, E_shadow,
+                        shadow_filter.SizePartitions(), saturation, &G);
+    shadow_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G);
+
+    // Adapt the main filter
+    render_delay_buffer->GetRenderBuffer()->SpectralSum(
+        main_filter.SizePartitions(), &render_power);
+    main_gain.Compute(render_power, render_signal_analyzer, output, main_filter,
+                      saturation, &G);
+    main_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G);
+
+    // Update the delay.
+    aec_state.HandleEchoPathChange(EchoPathVariability(
+        false, EchoPathVariability::DelayAdjustment::kNone, false));
+    aec_state.Update(delay_estimate, main_filter.FilterFrequencyResponse(),
+                     main_filter.FilterImpulseResponse(), true,
+                     *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s,
+                     false);
+  }
+
+  std::copy(e_main.begin(), e_main.end(), e_last_block->begin());
+  std::copy(y.begin(), y.end(), y_last_block->begin());
+  std::copy(G.re.begin(), G.re.end(), G_last_block->re.begin());
+  std::copy(G.im.begin(), G.im.end(), G_last_block->im.begin());
+}
+
+std::string ProduceDebugText(int filter_length_blocks) {
+  std::ostringstream ss;
+  ss << "Length: " << filter_length_blocks;
+  return ss.str();
+}
+
+std::string ProduceDebugText(size_t delay, int filter_length_blocks) {
+  std::ostringstream ss;
+  ss << "Delay: " << delay << ", ";
+  ss << ProduceDebugText(filter_length_blocks);
+  return ss.str();
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output gain parameter works.
+TEST(MainFilterUpdateGain, NullDataOutputGain) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  AdaptiveFirFilter filter(config.filter.main.length_blocks,
+                           config.filter.main.length_blocks,
+                           config.filter.config_change_duration_blocks,
+                           DetectOptimization(), &data_dumper);
+  RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+  SubtractorOutput output;
+  MainFilterUpdateGain gain(config.filter.main,
+                            config.filter.config_change_duration_blocks);
+  std::array<float, kFftLengthBy2Plus1> render_power;
+  render_power.fill(0.f);
+  EXPECT_DEATH(
+      gain.Compute(render_power, analyzer, output, filter, false, nullptr), "");
+}
+
+#endif
+
+// Verifies that the gain formed causes the filter using it to converge.
+TEST(MainFilterUpdateGain, GainCausesFilterToConverge) {
+  std::vector<int> blocks_with_echo_path_changes;
+  std::vector<int> blocks_with_saturation;
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+      std::array<float, kBlockSize> e;
+      std::array<float, kBlockSize> y;
+      FftData G;
+
+      RunFilterUpdateTest(500, delay_samples, filter_length_blocks,
+                          blocks_with_echo_path_changes, blocks_with_saturation,
+                          false, &e, &y, &G);
+
+      // Verify that the main filter is able to perform well.
+      // Use different criteria to take overmodelling into account.
+      if (filter_length_blocks == 12) {
+        EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+                  std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+      } else {
+        EXPECT_LT(std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+                  std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+      }
+    }
+  }
+}
+
+// Verifies that the magnitude of the gain on average decreases for a
+// persistently exciting signal.
+TEST(MainFilterUpdateGain, DecreasingGain) {
+    std::vector<int> blocks_with_echo_path_changes;
+    std::vector<int> blocks_with_saturation;
+
+    std::array<float, kBlockSize> e;
+    std::array<float, kBlockSize> y;
+    FftData G_a;
+    FftData G_b;
+    FftData G_c;
+    std::array<float, kFftLengthBy2Plus1> G_a_power;
+    std::array<float, kFftLengthBy2Plus1> G_b_power;
+    std::array<float, kFftLengthBy2Plus1> G_c_power;
+
+    RunFilterUpdateTest(100, 65, 12, blocks_with_echo_path_changes,
+                        blocks_with_saturation, false, &e, &y, &G_a);
+    RunFilterUpdateTest(300, 65, 12, blocks_with_echo_path_changes,
+                        blocks_with_saturation, false, &e, &y, &G_b);
+    RunFilterUpdateTest(600, 65, 12, blocks_with_echo_path_changes,
+                        blocks_with_saturation, false, &e, &y, &G_c);
+
+    G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+    G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+    G_c.Spectrum(Aec3Optimization::kNone, G_c_power);
+
+    EXPECT_GT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+              std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+
+    EXPECT_GT(std::accumulate(G_b_power.begin(), G_b_power.end(), 0.),
+              std::accumulate(G_c_power.begin(), G_c_power.end(), 0.));
+}
+
+// Verifies that the gain is zero when there is saturation and that the internal
+// error estimates cause the gain to increase after a period of saturation.
+TEST(MainFilterUpdateGain, SaturationBehavior) {
+  std::vector<int> blocks_with_echo_path_changes;
+  std::vector<int> blocks_with_saturation;
+  for (int k = 99; k < 200; ++k) {
+    blocks_with_saturation.push_back(k);
+  }
+
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+    std::array<float, kBlockSize> e;
+    std::array<float, kBlockSize> y;
+    FftData G_a;
+    FftData G_b;
+    FftData G_a_ref;
+    G_a_ref.re.fill(0.f);
+    G_a_ref.im.fill(0.f);
+
+    std::array<float, kFftLengthBy2Plus1> G_a_power;
+    std::array<float, kFftLengthBy2Plus1> G_b_power;
+
+    RunFilterUpdateTest(100, 65, filter_length_blocks,
+                        blocks_with_echo_path_changes, blocks_with_saturation,
+                        false, &e, &y, &G_a);
+
+    EXPECT_EQ(G_a_ref.re, G_a.re);
+    EXPECT_EQ(G_a_ref.im, G_a.im);
+
+    RunFilterUpdateTest(99, 65, filter_length_blocks,
+                        blocks_with_echo_path_changes, blocks_with_saturation,
+                        false, &e, &y, &G_a);
+    RunFilterUpdateTest(201, 65, filter_length_blocks,
+                        blocks_with_echo_path_changes, blocks_with_saturation,
+                        false, &e, &y, &G_b);
+
+    G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+    G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+
+    EXPECT_LT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+              std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+  }
+}
+
+// Verifies that the gain increases after an echo path change.
+// TODO(peah): Correct and reactivate this test.
+TEST(MainFilterUpdateGain, DISABLED_EchoPathChangeBehavior) {
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+    std::vector<int> blocks_with_echo_path_changes;
+    std::vector<int> blocks_with_saturation;
+    blocks_with_echo_path_changes.push_back(99);
+
+    std::array<float, kBlockSize> e;
+    std::array<float, kBlockSize> y;
+    FftData G_a;
+    FftData G_b;
+    std::array<float, kFftLengthBy2Plus1> G_a_power;
+    std::array<float, kFftLengthBy2Plus1> G_b_power;
+
+    RunFilterUpdateTest(100, 65, filter_length_blocks,
+                        blocks_with_echo_path_changes, blocks_with_saturation,
+                        false, &e, &y, &G_a);
+    RunFilterUpdateTest(101, 65, filter_length_blocks,
+                        blocks_with_echo_path_changes, blocks_with_saturation,
+                        false, &e, &y, &G_b);
+
+    G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+    G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+
+    EXPECT_LT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+              std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/matched_filter.cc b/modules/audio_processing/aec3/matched_filter.cc
new file mode 100644
index 0000000..466acd4
--- /dev/null
+++ b/modules/audio_processing/aec3/matched_filter.cc
@@ -0,0 +1,458 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/matched_filter.h"
+
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <numeric>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace aec3 {
+
+#if defined(WEBRTC_HAS_NEON)
+
+void MatchedFilterCore_NEON(size_t x_start_index,
+                            float x2_sum_threshold,
+                            rtc::ArrayView<const float> x,
+                            rtc::ArrayView<const float> y,
+                            rtc::ArrayView<float> h,
+                            bool* filters_updated,
+                            float* error_sum) {
+  const int h_size = static_cast<int>(h.size());
+  const int x_size = static_cast<int>(x.size());
+  RTC_DCHECK_EQ(0, h_size % 4);
+
+  // Process for all samples in the sub-block.
+  for (size_t i = 0; i < y.size(); ++i) {
+    // Apply the matched filter as filter * x, and compute x * x.
+
+    RTC_DCHECK_GT(x_size, x_start_index);
+    const float* x_p = &x[x_start_index];
+    const float* h_p = &h[0];
+
+    // Initialize values for the accumulation.
+    float32x4_t s_128 = vdupq_n_f32(0);
+    float32x4_t x2_sum_128 = vdupq_n_f32(0);
+    float x2_sum = 0.f;
+    float s = 0;
+
+    // Compute loop chunk sizes until, and after, the wraparound of the circular
+    // buffer for x.
+    const int chunk1 =
+        std::min(h_size, static_cast<int>(x_size - x_start_index));
+
+    // Perform the loop in two chunks.
+    const int chunk2 = h_size - chunk1;
+    for (int limit : {chunk1, chunk2}) {
+      // Perform 128 bit vector operations.
+      const int limit_by_4 = limit >> 2;
+      for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+        // Load the data into 128 bit vectors.
+        const float32x4_t x_k = vld1q_f32(x_p);
+        const float32x4_t h_k = vld1q_f32(h_p);
+        // Compute and accumulate x * x and h * x.
+        x2_sum_128 = vmlaq_f32(x2_sum_128, x_k, x_k);
+        s_128 = vmlaq_f32(s_128, h_k, x_k);
+      }
+
+      // Perform non-vector operations for any remaining items.
+      for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+        const float x_k = *x_p;
+        x2_sum += x_k * x_k;
+        s += *h_p * x_k;
+      }
+
+      x_p = &x[0];
+    }
+
+    // Combine the accumulated vector and scalar values.
+    float* v = reinterpret_cast<float*>(&x2_sum_128);
+    x2_sum += v[0] + v[1] + v[2] + v[3];
+    v = reinterpret_cast<float*>(&s_128);
+    s += v[0] + v[1] + v[2] + v[3];
+
+    // Compute the matched filter error.
+    float e = y[i] - s;
+    const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f ||
+                            s >= 32000.f || s <= -32000.f || e >= 32000.f ||
+                            e <= -32000.f;
+
+    e = std::min(32767.f, std::max(-32768.f, e));
+    (*error_sum) += e * e;
+
+    // Update the matched filter estimate in an NLMS manner.
+    if (x2_sum > x2_sum_threshold && !saturation) {
+      RTC_DCHECK_LT(0.f, x2_sum);
+      const float alpha = 0.7f * e / x2_sum;
+      const float32x4_t alpha_128 = vmovq_n_f32(alpha);
+
+      // filter = filter + 0.7 * (y - filter * x) / x * x.
+      float* h_p = &h[0];
+      x_p = &x[x_start_index];
+
+      // Perform the loop in two chunks.
+      for (int limit : {chunk1, chunk2}) {
+        // Perform 128 bit vector operations.
+        const int limit_by_4 = limit >> 2;
+        for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+          // Load the data into 128 bit vectors.
+          float32x4_t h_k = vld1q_f32(h_p);
+          const float32x4_t x_k = vld1q_f32(x_p);
+          // Compute h = h + alpha * x.
+          h_k = vmlaq_f32(h_k, alpha_128, x_k);
+
+          // Store the result.
+          vst1q_f32(h_p, h_k);
+        }
+
+        // Perform non-vector operations for any remaining items.
+        for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+          *h_p += alpha * *x_p;
+        }
+
+        x_p = &x[0];
+      }
+
+      *filters_updated = true;
+    }
+
+    x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+  }
+}
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+void MatchedFilterCore_SSE2(size_t x_start_index,
+                            float x2_sum_threshold,
+                            rtc::ArrayView<const float> x,
+                            rtc::ArrayView<const float> y,
+                            rtc::ArrayView<float> h,
+                            bool* filters_updated,
+                            float* error_sum) {
+  const int h_size = static_cast<int>(h.size());
+  const int x_size = static_cast<int>(x.size());
+  RTC_DCHECK_EQ(0, h_size % 4);
+
+  // Process for all samples in the sub-block.
+  for (size_t i = 0; i < y.size(); ++i) {
+    // Apply the matched filter as filter * x, and compute x * x.
+
+    RTC_DCHECK_GT(x_size, x_start_index);
+    const float* x_p = &x[x_start_index];
+    const float* h_p = &h[0];
+
+    // Initialize values for the accumulation.
+    __m128 s_128 = _mm_set1_ps(0);
+    __m128 x2_sum_128 = _mm_set1_ps(0);
+    float x2_sum = 0.f;
+    float s = 0;
+
+    // Compute loop chunk sizes until, and after, the wraparound of the circular
+    // buffer for x.
+    const int chunk1 =
+        std::min(h_size, static_cast<int>(x_size - x_start_index));
+
+    // Perform the loop in two chunks.
+    const int chunk2 = h_size - chunk1;
+    for (int limit : {chunk1, chunk2}) {
+      // Perform 128 bit vector operations.
+      const int limit_by_4 = limit >> 2;
+      for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+        // Load the data into 128 bit vectors.
+        const __m128 x_k = _mm_loadu_ps(x_p);
+        const __m128 h_k = _mm_loadu_ps(h_p);
+        const __m128 xx = _mm_mul_ps(x_k, x_k);
+        // Compute and accumulate x * x and h * x.
+        x2_sum_128 = _mm_add_ps(x2_sum_128, xx);
+        const __m128 hx = _mm_mul_ps(h_k, x_k);
+        s_128 = _mm_add_ps(s_128, hx);
+      }
+
+      // Perform non-vector operations for any remaining items.
+      for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+        const float x_k = *x_p;
+        x2_sum += x_k * x_k;
+        s += *h_p * x_k;
+      }
+
+      x_p = &x[0];
+    }
+
+    // Combine the accumulated vector and scalar values.
+    float* v = reinterpret_cast<float*>(&x2_sum_128);
+    x2_sum += v[0] + v[1] + v[2] + v[3];
+    v = reinterpret_cast<float*>(&s_128);
+    s += v[0] + v[1] + v[2] + v[3];
+
+    // Compute the matched filter error.
+    float e = y[i] - s;
+    const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f ||
+                            s >= 32000.f || s <= -32000.f || e >= 32000.f ||
+                            e <= -32000.f;
+
+    e = std::min(32767.f, std::max(-32768.f, e));
+    (*error_sum) += e * e;
+
+    // Update the matched filter estimate in an NLMS manner.
+    if (x2_sum > x2_sum_threshold && !saturation) {
+      RTC_DCHECK_LT(0.f, x2_sum);
+      const float alpha = 0.7f * e / x2_sum;
+      const __m128 alpha_128 = _mm_set1_ps(alpha);
+
+      // filter = filter + 0.7 * (y - filter * x) / x * x.
+      float* h_p = &h[0];
+      x_p = &x[x_start_index];
+
+      // Perform the loop in two chunks.
+      for (int limit : {chunk1, chunk2}) {
+        // Perform 128 bit vector operations.
+        const int limit_by_4 = limit >> 2;
+        for (int k = limit_by_4; k > 0; --k, h_p += 4, x_p += 4) {
+          // Load the data into 128 bit vectors.
+          __m128 h_k = _mm_loadu_ps(h_p);
+          const __m128 x_k = _mm_loadu_ps(x_p);
+
+          // Compute h = h + alpha * x.
+          const __m128 alpha_x = _mm_mul_ps(alpha_128, x_k);
+          h_k = _mm_add_ps(h_k, alpha_x);
+
+          // Store the result.
+          _mm_storeu_ps(h_p, h_k);
+        }
+
+        // Perform non-vector operations for any remaining items.
+        for (int k = limit - limit_by_4 * 4; k > 0; --k, ++h_p, ++x_p) {
+          *h_p += alpha * *x_p;
+        }
+
+        x_p = &x[0];
+      }
+
+      *filters_updated = true;
+    }
+
+    x_start_index = x_start_index > 0 ? x_start_index - 1 : x_size - 1;
+  }
+}
+#endif
+
+void MatchedFilterCore(size_t x_start_index,
+                       float x2_sum_threshold,
+                       rtc::ArrayView<const float> x,
+                       rtc::ArrayView<const float> y,
+                       rtc::ArrayView<float> h,
+                       bool* filters_updated,
+                       float* error_sum) {
+  // Process for all samples in the sub-block.
+  for (size_t i = 0; i < y.size(); ++i) {
+    // Apply the matched filter as filter * x, and compute x * x.
+    float x2_sum = 0.f;
+    float s = 0;
+    size_t x_index = x_start_index;
+    for (size_t k = 0; k < h.size(); ++k) {
+      x2_sum += x[x_index] * x[x_index];
+      s += h[k] * x[x_index];
+      x_index = x_index < (x.size() - 1) ? x_index + 1 : 0;
+    }
+
+    // Compute the matched filter error.
+    float e = y[i] - s;
+    const bool saturation = y[i] >= 32000.f || y[i] <= -32000.f ||
+                            s >= 32000.f || s <= -32000.f || e >= 32000.f ||
+                            e <= -32000.f;
+
+    e = std::min(32767.f, std::max(-32768.f, e));
+    (*error_sum) += e * e;
+
+    // Update the matched filter estimate in an NLMS manner.
+    if (x2_sum > x2_sum_threshold && !saturation) {
+      RTC_DCHECK_LT(0.f, x2_sum);
+      const float alpha = 0.7f * e / x2_sum;
+
+      // filter = filter + 0.7 * (y - filter * x) / x * x.
+      size_t x_index = x_start_index;
+      for (size_t k = 0; k < h.size(); ++k) {
+        h[k] += alpha * x[x_index];
+        x_index = x_index < (x.size() - 1) ? x_index + 1 : 0;
+      }
+      *filters_updated = true;
+    }
+
+    x_start_index = x_start_index > 0 ? x_start_index - 1 : x.size() - 1;
+  }
+}
+
+}  // namespace aec3
+
+MatchedFilter::MatchedFilter(ApmDataDumper* data_dumper,
+                             Aec3Optimization optimization,
+                             size_t sub_block_size,
+                             size_t window_size_sub_blocks,
+                             int num_matched_filters,
+                             size_t alignment_shift_sub_blocks,
+                             float excitation_limit)
+    : data_dumper_(data_dumper),
+      optimization_(optimization),
+      sub_block_size_(sub_block_size),
+      filter_intra_lag_shift_(alignment_shift_sub_blocks * sub_block_size_),
+      filters_(
+          num_matched_filters,
+          std::vector<float>(window_size_sub_blocks * sub_block_size_, 0.f)),
+      lag_estimates_(num_matched_filters),
+      filters_offsets_(num_matched_filters, 0),
+      excitation_limit_(excitation_limit) {
+  RTC_DCHECK(data_dumper);
+  RTC_DCHECK_LT(0, window_size_sub_blocks);
+  RTC_DCHECK((kBlockSize % sub_block_size) == 0);
+  RTC_DCHECK((sub_block_size % 4) == 0);
+}
+
+MatchedFilter::~MatchedFilter() = default;
+
+void MatchedFilter::Reset() {
+  for (auto& f : filters_) {
+    std::fill(f.begin(), f.end(), 0.f);
+  }
+
+  for (auto& l : lag_estimates_) {
+    l = MatchedFilter::LagEstimate();
+  }
+}
+
+void MatchedFilter::Update(const DownsampledRenderBuffer& render_buffer,
+                           rtc::ArrayView<const float> capture) {
+  RTC_DCHECK_EQ(sub_block_size_, capture.size());
+  auto& y = capture;
+
+  const float x2_sum_threshold =
+      filters_[0].size() * excitation_limit_ * excitation_limit_;
+
+  // Apply all matched filters.
+  size_t alignment_shift = 0;
+  for (size_t n = 0; n < filters_.size(); ++n) {
+    float error_sum = 0.f;
+    bool filters_updated = false;
+
+    size_t x_start_index =
+        (render_buffer.read + alignment_shift + sub_block_size_ - 1) %
+        render_buffer.buffer.size();
+
+    switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+      case Aec3Optimization::kSse2:
+        aec3::MatchedFilterCore_SSE2(x_start_index, x2_sum_threshold,
+                                     render_buffer.buffer, y, filters_[n],
+                                     &filters_updated, &error_sum);
+        break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+      case Aec3Optimization::kNeon:
+        aec3::MatchedFilterCore_NEON(x_start_index, x2_sum_threshold,
+                                     render_buffer.buffer, y, filters_[n],
+                                     &filters_updated, &error_sum);
+        break;
+#endif
+      default:
+        aec3::MatchedFilterCore(x_start_index, x2_sum_threshold,
+                                render_buffer.buffer, y, filters_[n],
+                                &filters_updated, &error_sum);
+    }
+
+    // Compute anchor for the matched filter error.
+    const float error_sum_anchor =
+        std::inner_product(y.begin(), y.end(), y.begin(), 0.f);
+
+    // Estimate the lag in the matched filter as the distance to the portion in
+    // the filter that contributes the most to the matched filter output. This
+    // is detected as the peak of the matched filter.
+    const size_t lag_estimate = std::distance(
+        filters_[n].begin(),
+        std::max_element(
+            filters_[n].begin(), filters_[n].end(),
+            [](float a, float b) -> bool { return a * a < b * b; }));
+
+    // Update the lag estimates for the matched filter.
+    const float kMatchingFilterThreshold = 0.2f;
+    lag_estimates_[n] = LagEstimate(
+        error_sum_anchor - error_sum,
+        (lag_estimate > 2 && lag_estimate < (filters_[n].size() - 10) &&
+         error_sum < kMatchingFilterThreshold * error_sum_anchor),
+        lag_estimate + alignment_shift, filters_updated);
+
+    RTC_DCHECK_GE(10, filters_.size());
+    switch (n) {
+      case 0:
+        data_dumper_->DumpRaw("aec3_correlator_0_h", filters_[0]);
+        break;
+      case 1:
+        data_dumper_->DumpRaw("aec3_correlator_1_h", filters_[1]);
+        break;
+      case 2:
+        data_dumper_->DumpRaw("aec3_correlator_2_h", filters_[2]);
+        break;
+      case 3:
+        data_dumper_->DumpRaw("aec3_correlator_3_h", filters_[3]);
+        break;
+      case 4:
+        data_dumper_->DumpRaw("aec3_correlator_4_h", filters_[4]);
+        break;
+      case 5:
+        data_dumper_->DumpRaw("aec3_correlator_5_h", filters_[5]);
+        break;
+      case 6:
+        data_dumper_->DumpRaw("aec3_correlator_6_h", filters_[6]);
+        break;
+      case 7:
+        data_dumper_->DumpRaw("aec3_correlator_7_h", filters_[7]);
+        break;
+      case 8:
+        data_dumper_->DumpRaw("aec3_correlator_8_h", filters_[8]);
+        break;
+      case 9:
+        data_dumper_->DumpRaw("aec3_correlator_9_h", filters_[9]);
+        break;
+      default:
+        RTC_NOTREACHED();
+    }
+
+    alignment_shift += filter_intra_lag_shift_;
+  }
+}
+
+void MatchedFilter::LogFilterProperties(int sample_rate_hz,
+                                        size_t shift,
+                                        size_t downsampling_factor) const {
+  size_t alignment_shift = 0;
+  const int fs_by_1000 = LowestBandRate(sample_rate_hz) / 1000;
+  for (size_t k = 0; k < filters_.size(); ++k) {
+    int start = static_cast<int>(alignment_shift * downsampling_factor);
+    int end = static_cast<int>((alignment_shift + filters_[k].size()) *
+                               downsampling_factor);
+    RTC_LOG(LS_INFO) << "Filter " << k << ": start: "
+                     << (start - static_cast<int>(shift)) / fs_by_1000
+                     << " ms, end: "
+                     << (end - static_cast<int>(shift)) / fs_by_1000 << " ms.";
+    alignment_shift += filter_intra_lag_shift_;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/matched_filter.h b/modules/audio_processing/aec3/matched_filter.h
new file mode 100644
index 0000000..c9bdc46
--- /dev/null
+++ b/modules/audio_processing/aec3/matched_filter.h
@@ -0,0 +1,129 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+namespace aec3 {
+
+#if defined(WEBRTC_HAS_NEON)
+
+// Filter core for the matched filter that is optimized for NEON.
+void MatchedFilterCore_NEON(size_t x_start_index,
+                            float x2_sum_threshold,
+                            rtc::ArrayView<const float> x,
+                            rtc::ArrayView<const float> y,
+                            rtc::ArrayView<float> h,
+                            bool* filters_updated,
+                            float* error_sum);
+
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+// Filter core for the matched filter that is optimized for SSE2.
+void MatchedFilterCore_SSE2(size_t x_start_index,
+                            float x2_sum_threshold,
+                            rtc::ArrayView<const float> x,
+                            rtc::ArrayView<const float> y,
+                            rtc::ArrayView<float> h,
+                            bool* filters_updated,
+                            float* error_sum);
+
+#endif
+
+// Filter core for the matched filter.
+void MatchedFilterCore(size_t x_start_index,
+                       float x2_sum_threshold,
+                       rtc::ArrayView<const float> x,
+                       rtc::ArrayView<const float> y,
+                       rtc::ArrayView<float> h,
+                       bool* filters_updated,
+                       float* error_sum);
+
+}  // namespace aec3
+
+class ApmDataDumper;
+
+// Produces recursively updated cross-correlation estimates for several signal
+// shifts where the intra-shift spacing is uniform.
+class MatchedFilter {
+ public:
+  // Stores properties for the lag estimate corresponding to a particular signal
+  // shift.
+  struct LagEstimate {
+    LagEstimate() = default;
+    LagEstimate(float accuracy, bool reliable, size_t lag, bool updated)
+        : accuracy(accuracy), reliable(reliable), lag(lag), updated(updated) {}
+
+    float accuracy = 0.f;
+    bool reliable = false;
+    size_t lag = 0;
+    bool updated = false;
+  };
+
+  MatchedFilter(ApmDataDumper* data_dumper,
+                Aec3Optimization optimization,
+                size_t sub_block_size,
+                size_t window_size_sub_blocks,
+                int num_matched_filters,
+                size_t alignment_shift_sub_blocks,
+                float excitation_limit);
+
+  ~MatchedFilter();
+
+  // Updates the correlation with the values in the capture buffer.
+  void Update(const DownsampledRenderBuffer& render_buffer,
+              rtc::ArrayView<const float> capture);
+
+  // Resets the matched filter.
+  void Reset();
+
+  // Returns the current lag estimates.
+  rtc::ArrayView<const MatchedFilter::LagEstimate> GetLagEstimates() const {
+    return lag_estimates_;
+  }
+
+  // Returns the maximum filter lag.
+  size_t GetMaxFilterLag() const {
+    return filters_.size() * filter_intra_lag_shift_ + filters_[0].size();
+  }
+
+  // Log matched filter properties.
+  void LogFilterProperties(int sample_rate_hz,
+                           size_t shift,
+                           size_t downsampling_factor) const;
+
+ private:
+  ApmDataDumper* const data_dumper_;
+  const Aec3Optimization optimization_;
+  const size_t sub_block_size_;
+  const size_t filter_intra_lag_shift_;
+  std::vector<std::vector<float>> filters_;
+  std::vector<LagEstimate> lag_estimates_;
+  std::vector<size_t> filters_offsets_;
+  const float excitation_limit_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MatchedFilter);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_H_
diff --git a/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc b/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc
new file mode 100644
index 0000000..9041924
--- /dev/null
+++ b/modules/audio_processing/aec3/matched_filter_lag_aggregator.cc
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h"
+
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+MatchedFilterLagAggregator::MatchedFilterLagAggregator(
+    ApmDataDumper* data_dumper,
+    size_t max_filter_lag)
+    : data_dumper_(data_dumper), histogram_(max_filter_lag + 1, 0) {
+  RTC_DCHECK(data_dumper);
+  histogram_data_.fill(0);
+}
+
+MatchedFilterLagAggregator::~MatchedFilterLagAggregator() = default;
+
+void MatchedFilterLagAggregator::Reset() {
+  std::fill(histogram_.begin(), histogram_.end(), 0);
+  histogram_data_.fill(0);
+  histogram_data_index_ = 0;
+  significant_candidate_found_ = false;
+}
+
+rtc::Optional<DelayEstimate> MatchedFilterLagAggregator::Aggregate(
+    rtc::ArrayView<const MatchedFilter::LagEstimate> lag_estimates) {
+  // Choose the strongest lag estimate as the best one.
+  float best_accuracy = 0.f;
+  int best_lag_estimate_index = -1;
+  for (size_t k = 0; k < lag_estimates.size(); ++k) {
+    if (lag_estimates[k].updated && lag_estimates[k].reliable) {
+      if (lag_estimates[k].accuracy > best_accuracy) {
+        best_accuracy = lag_estimates[k].accuracy;
+        best_lag_estimate_index = static_cast<int>(k);
+      }
+    }
+  }
+
+  // TODO(peah): Remove this logging once all development is done.
+  data_dumper_->DumpRaw("aec3_echo_path_delay_estimator_best_index",
+                        best_lag_estimate_index);
+
+  if (best_lag_estimate_index != -1) {
+    RTC_DCHECK_GT(histogram_.size(), histogram_data_[histogram_data_index_]);
+    RTC_DCHECK_LE(0, histogram_data_[histogram_data_index_]);
+    --histogram_[histogram_data_[histogram_data_index_]];
+
+    histogram_data_[histogram_data_index_] =
+        lag_estimates[best_lag_estimate_index].lag;
+
+    RTC_DCHECK_GT(histogram_.size(), histogram_data_[histogram_data_index_]);
+    RTC_DCHECK_LE(0, histogram_data_[histogram_data_index_]);
+    ++histogram_[histogram_data_[histogram_data_index_]];
+
+    histogram_data_index_ =
+        (histogram_data_index_ + 1) % histogram_data_.size();
+
+    const int candidate =
+        std::distance(histogram_.begin(),
+                      std::max_element(histogram_.begin(), histogram_.end()));
+
+    if (histogram_[candidate] > 25) {
+      significant_candidate_found_ = true;
+      return DelayEstimate(DelayEstimate::Quality::kRefined, candidate);
+    } else if (!significant_candidate_found_) {
+      return DelayEstimate(DelayEstimate::Quality::kCoarse, candidate);
+    }
+  }
+  return rtc::nullopt;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/matched_filter_lag_aggregator.h b/modules/audio_processing/aec3/matched_filter_lag_aggregator.h
new file mode 100644
index 0000000..86968bd
--- /dev/null
+++ b/modules/audio_processing/aec3/matched_filter_lag_aggregator.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_
+
+#include <vector>
+
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/matched_filter.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// Aggregates lag estimates produced by the MatchedFilter class into a single
+// reliable combined lag estimate.
+class MatchedFilterLagAggregator {
+ public:
+  MatchedFilterLagAggregator(ApmDataDumper* data_dumper, size_t max_filter_lag);
+  ~MatchedFilterLagAggregator();
+
+  // Resets the aggregator.
+  void Reset();
+
+  // Aggregates the provided lag estimates.
+  rtc::Optional<DelayEstimate> Aggregate(
+      rtc::ArrayView<const MatchedFilter::LagEstimate> lag_estimates);
+
+ private:
+  ApmDataDumper* const data_dumper_;
+  std::vector<int> histogram_;
+  std::array<int, 250> histogram_data_;
+  int histogram_data_index_ = 0;
+  bool significant_candidate_found_ = false;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MatchedFilterLagAggregator);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MATCHED_FILTER_LAG_AGGREGATOR_H_
diff --git a/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc b/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc
new file mode 100644
index 0000000..ce303d4
--- /dev/null
+++ b/modules/audio_processing/aec3/matched_filter_lag_aggregator_unittest.cc
@@ -0,0 +1,142 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+  */
+
+#include "modules/audio_processing/aec3/matched_filter_lag_aggregator.h"
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr size_t kNumLagsBeforeDetection = 25;
+
+}  // namespace
+
+// Verifies that the most accurate lag estimate is chosen.
+TEST(MatchedFilterLagAggregator, MostAccurateLagChosen) {
+  constexpr size_t kLag1 = 5;
+  constexpr size_t kLag2 = 10;
+  ApmDataDumper data_dumper(0);
+  std::vector<MatchedFilter::LagEstimate> lag_estimates(2);
+  MatchedFilterLagAggregator aggregator(&data_dumper, std::max(kLag1, kLag2));
+  lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, kLag1, true);
+  lag_estimates[1] = MatchedFilter::LagEstimate(0.5f, true, kLag2, true);
+
+  for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) {
+    EXPECT_TRUE(aggregator.Aggregate(lag_estimates));
+  }
+
+  rtc::Optional<DelayEstimate> aggregated_lag =
+      aggregator.Aggregate(lag_estimates);
+  EXPECT_TRUE(aggregated_lag);
+  EXPECT_EQ(kLag1, aggregated_lag->delay);
+
+  lag_estimates[0] = MatchedFilter::LagEstimate(0.5f, true, kLag1, true);
+  lag_estimates[1] = MatchedFilter::LagEstimate(1.f, true, kLag2, true);
+
+  for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) {
+    aggregated_lag = aggregator.Aggregate(lag_estimates);
+    EXPECT_TRUE(aggregated_lag);
+    EXPECT_EQ(kLag1, aggregated_lag->delay);
+  }
+
+  aggregated_lag = aggregator.Aggregate(lag_estimates);
+  aggregated_lag = aggregator.Aggregate(lag_estimates);
+  EXPECT_TRUE(aggregated_lag);
+  EXPECT_EQ(kLag2, aggregated_lag->delay);
+}
+
+// Verifies that varying lag estimates causes lag estimates to not be deemed
+// reliable.
+TEST(MatchedFilterLagAggregator,
+     LagEstimateInvarianceRequiredForAggregatedLag) {
+  ApmDataDumper data_dumper(0);
+  std::vector<MatchedFilter::LagEstimate> lag_estimates(1);
+  MatchedFilterLagAggregator aggregator(&data_dumper, 100);
+
+  rtc::Optional<DelayEstimate> aggregated_lag;
+  for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) {
+    lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, 10, true);
+    aggregated_lag = aggregator.Aggregate(lag_estimates);
+  }
+  EXPECT_TRUE(aggregated_lag);
+
+  for (size_t k = 0; k < kNumLagsBeforeDetection * 100; ++k) {
+    lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, k % 100, true);
+    aggregated_lag = aggregator.Aggregate(lag_estimates);
+  }
+  EXPECT_FALSE(aggregated_lag);
+
+  for (size_t k = 0; k < kNumLagsBeforeDetection * 100; ++k) {
+    lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, k % 100, true);
+    aggregated_lag = aggregator.Aggregate(lag_estimates);
+    EXPECT_FALSE(aggregated_lag);
+  }
+}
+
+// Verifies that lag estimate updates are required to produce an updated lag
+// aggregate.
+TEST(MatchedFilterLagAggregator,
+     DISABLED_LagEstimateUpdatesRequiredForAggregatedLag) {
+  constexpr size_t kLag = 5;
+  ApmDataDumper data_dumper(0);
+  std::vector<MatchedFilter::LagEstimate> lag_estimates(1);
+  MatchedFilterLagAggregator aggregator(&data_dumper, kLag);
+  for (size_t k = 0; k < kNumLagsBeforeDetection * 10; ++k) {
+    lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, kLag, false);
+    rtc::Optional<DelayEstimate> aggregated_lag =
+        aggregator.Aggregate(lag_estimates);
+    EXPECT_FALSE(aggregated_lag);
+    EXPECT_EQ(kLag, aggregated_lag->delay);
+  }
+}
+
+// Verifies that an aggregated lag is persistent if the lag estimates do not
+// change and that an aggregated lag is not produced without gaining lag
+// estimate confidence.
+TEST(MatchedFilterLagAggregator, DISABLED_PersistentAggregatedLag) {
+  constexpr size_t kLag1 = 5;
+  constexpr size_t kLag2 = 10;
+  ApmDataDumper data_dumper(0);
+  std::vector<MatchedFilter::LagEstimate> lag_estimates(1);
+  MatchedFilterLagAggregator aggregator(&data_dumper, std::max(kLag1, kLag2));
+  rtc::Optional<DelayEstimate> aggregated_lag;
+  for (size_t k = 0; k < kNumLagsBeforeDetection; ++k) {
+    lag_estimates[0] = MatchedFilter::LagEstimate(1.f, true, kLag1, true);
+    aggregated_lag = aggregator.Aggregate(lag_estimates);
+  }
+  EXPECT_TRUE(aggregated_lag);
+  EXPECT_EQ(kLag1, aggregated_lag->delay);
+
+  for (size_t k = 0; k < kNumLagsBeforeDetection * 40; ++k) {
+    lag_estimates[0] = MatchedFilter::LagEstimate(1.f, false, kLag2, true);
+    aggregated_lag = aggregator.Aggregate(lag_estimates);
+    EXPECT_TRUE(aggregated_lag);
+    EXPECT_EQ(kLag1, aggregated_lag->delay);
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-null data dumper.
+TEST(MatchedFilterLagAggregator, NullDataDumper) {
+  EXPECT_DEATH(MatchedFilterLagAggregator(nullptr, 10), "");
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/matched_filter_unittest.cc b/modules/audio_processing/aec3/matched_filter_unittest.cc
new file mode 100644
index 0000000..e30c78c
--- /dev/null
+++ b/modules/audio_processing/aec3/matched_filter_unittest.cc
@@ -0,0 +1,378 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+  */
+
+#include "modules/audio_processing/aec3/matched_filter.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <algorithm>
+#include <sstream>
+#include <string>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+namespace {
+
+std::string ProduceDebugText(size_t delay, size_t down_sampling_factor) {
+  std::ostringstream ss;
+  ss << "Delay: " << delay;
+  ss << ", Down sampling factor: " << down_sampling_factor;
+  return ss.str();
+}
+
+constexpr size_t kNumMatchedFilters = 10;
+constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+constexpr size_t kWindowSizeSubBlocks = 32;
+constexpr size_t kAlignmentShiftSubBlocks = kWindowSizeSubBlocks * 3 / 4;
+
+}  // namespace
+
+#if defined(WEBRTC_HAS_NEON)
+// Verifies that the optimized methods for NEON are similar to their reference
+// counterparts.
+TEST(MatchedFilter, TestNeonOptimizations) {
+  Random random_generator(42U);
+  for (auto down_sampling_factor : kDownSamplingFactors) {
+    const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+    std::vector<float> x(2000);
+    RandomizeSampleVector(&random_generator, x);
+    std::vector<float> y(sub_block_size);
+    std::vector<float> h_NEON(512);
+    std::vector<float> h(512);
+    int x_index = 0;
+    for (int k = 0; k < 1000; ++k) {
+      RandomizeSampleVector(&random_generator, y);
+
+      bool filters_updated = false;
+      float error_sum = 0.f;
+      bool filters_updated_NEON = false;
+      float error_sum_NEON = 0.f;
+
+      MatchedFilterCore_NEON(x_index, h.size() * 150.f * 150.f, x, y, h_NEON,
+                             &filters_updated_NEON, &error_sum_NEON);
+
+      MatchedFilterCore(x_index, h.size() * 150.f * 150.f, x, y, h,
+                        &filters_updated, &error_sum);
+
+      EXPECT_EQ(filters_updated, filters_updated_NEON);
+      EXPECT_NEAR(error_sum, error_sum_NEON, error_sum / 100000.f);
+
+      for (size_t j = 0; j < h.size(); ++j) {
+        EXPECT_NEAR(h[j], h_NEON[j], 0.00001f);
+      }
+
+      x_index = (x_index + sub_block_size) % x.size();
+    }
+  }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Verifies that the optimized methods for SSE2 are bitexact to their reference
+// counterparts.
+TEST(MatchedFilter, TestSse2Optimizations) {
+  bool use_sse2 = (WebRtc_GetCPUInfo(kSSE2) != 0);
+  if (use_sse2) {
+    Random random_generator(42U);
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      const size_t sub_block_size = kBlockSize / down_sampling_factor;
+      std::vector<float> x(2000);
+      RandomizeSampleVector(&random_generator, x);
+      std::vector<float> y(sub_block_size);
+      std::vector<float> h_SSE2(512);
+      std::vector<float> h(512);
+      int x_index = 0;
+      for (int k = 0; k < 1000; ++k) {
+        RandomizeSampleVector(&random_generator, y);
+
+        bool filters_updated = false;
+        float error_sum = 0.f;
+        bool filters_updated_SSE2 = false;
+        float error_sum_SSE2 = 0.f;
+
+        MatchedFilterCore_SSE2(x_index, h.size() * 150.f * 150.f, x, y, h_SSE2,
+                               &filters_updated_SSE2, &error_sum_SSE2);
+
+        MatchedFilterCore(x_index, h.size() * 150.f * 150.f, x, y, h,
+                          &filters_updated, &error_sum);
+
+        EXPECT_EQ(filters_updated, filters_updated_SSE2);
+        EXPECT_NEAR(error_sum, error_sum_SSE2, error_sum / 100000.f);
+
+        for (size_t j = 0; j < h.size(); ++j) {
+          EXPECT_NEAR(h[j], h_SSE2[j], 0.00001f);
+        }
+
+        x_index = (x_index + sub_block_size) % x.size();
+      }
+    }
+  }
+}
+
+#endif
+
+// Verifies that the matched filter produces proper lag estimates for
+// artificially
+// delayed signals.
+TEST(MatchedFilter, LagEstimation) {
+  Random random_generator(42U);
+  for (auto down_sampling_factor : kDownSamplingFactors) {
+    const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+    std::vector<std::vector<float>> render(3,
+                                           std::vector<float>(kBlockSize, 0.f));
+    std::array<float, kBlockSize> capture;
+    capture.fill(0.f);
+    ApmDataDumper data_dumper(0);
+    for (size_t delay_samples : {5, 64, 150, 200, 800, 1000}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, down_sampling_factor));
+      EchoCanceller3Config config;
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = kNumMatchedFilters;
+      config.delay.min_echo_path_delay_blocks = 0;
+      config.delay.api_call_jitter_blocks = 0;
+      Decimator capture_decimator(down_sampling_factor);
+      DelayBuffer<float> signal_delay_buffer(down_sampling_factor *
+                                             delay_samples);
+      MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size,
+                           kWindowSizeSubBlocks, kNumMatchedFilters,
+                           kAlignmentShiftSubBlocks, 150);
+
+      std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+          RenderDelayBuffer::Create(config, 3));
+
+      // Analyze the correlation between render and capture.
+      for (size_t k = 0; k < (600 + delay_samples / sub_block_size); ++k) {
+        RandomizeSampleVector(&random_generator, render[0]);
+        signal_delay_buffer.Delay(render[0], capture);
+        render_delay_buffer->Insert(render);
+
+        if (k == 0) {
+          render_delay_buffer->Reset();
+        }
+
+        render_delay_buffer->PrepareCaptureProcessing();
+        std::array<float, kBlockSize> downsampled_capture_data;
+        rtc::ArrayView<float> downsampled_capture(
+            downsampled_capture_data.data(), sub_block_size);
+        capture_decimator.Decimate(capture, downsampled_capture);
+        filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(),
+                      downsampled_capture);
+      }
+
+      // Obtain the lag estimates.
+      auto lag_estimates = filter.GetLagEstimates();
+
+      // Find which lag estimate should be the most accurate.
+      rtc::Optional<size_t> expected_most_accurate_lag_estimate;
+      size_t alignment_shift_sub_blocks = 0;
+      for (size_t k = 0; k < config.delay.num_filters; ++k) {
+        if ((alignment_shift_sub_blocks + 3 * kWindowSizeSubBlocks / 4) *
+                sub_block_size >
+            delay_samples) {
+          expected_most_accurate_lag_estimate = k > 0 ? k - 1 : 0;
+          break;
+        }
+        alignment_shift_sub_blocks += kAlignmentShiftSubBlocks;
+      }
+      ASSERT_TRUE(expected_most_accurate_lag_estimate);
+
+      // Verify that the expected most accurate lag estimate is the most
+      // accurate estimate.
+      for (size_t k = 0; k < kNumMatchedFilters; ++k) {
+        if (k != *expected_most_accurate_lag_estimate &&
+            k != (*expected_most_accurate_lag_estimate + 1)) {
+          EXPECT_TRUE(
+              lag_estimates[*expected_most_accurate_lag_estimate].accuracy >
+                  lag_estimates[k].accuracy ||
+              !lag_estimates[k].reliable ||
+              !lag_estimates[*expected_most_accurate_lag_estimate].reliable);
+        }
+      }
+
+      // Verify that all lag estimates are updated as expected for signals
+      // containing strong noise.
+      for (auto& le : lag_estimates) {
+        EXPECT_TRUE(le.updated);
+      }
+
+      // Verify that the expected most accurate lag estimate is reliable.
+      EXPECT_TRUE(
+          lag_estimates[*expected_most_accurate_lag_estimate].reliable ||
+          lag_estimates[std::min(*expected_most_accurate_lag_estimate + 1,
+                                 lag_estimates.size() - 1)]
+              .reliable);
+
+      // Verify that the expected most accurate lag estimate is correct.
+      if (lag_estimates[*expected_most_accurate_lag_estimate].reliable) {
+        EXPECT_TRUE(delay_samples ==
+                    lag_estimates[*expected_most_accurate_lag_estimate].lag);
+      } else {
+        EXPECT_TRUE(
+            delay_samples ==
+            lag_estimates[std::min(*expected_most_accurate_lag_estimate + 1,
+                                   lag_estimates.size() - 1)]
+                .lag);
+      }
+    }
+  }
+}
+
+// Verifies that the matched filter does not produce reliable and accurate
+// estimates for uncorrelated render and capture signals.
+TEST(MatchedFilter, LagNotReliableForUncorrelatedRenderAndCapture) {
+  Random random_generator(42U);
+  for (auto down_sampling_factor : kDownSamplingFactors) {
+    EchoCanceller3Config config;
+    config.delay.down_sampling_factor = down_sampling_factor;
+    config.delay.num_filters = kNumMatchedFilters;
+    const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+    std::vector<std::vector<float>> render(3,
+                                           std::vector<float>(kBlockSize, 0.f));
+    std::array<float, kBlockSize> capture_data;
+    rtc::ArrayView<float> capture(capture_data.data(), sub_block_size);
+    std::fill(capture.begin(), capture.end(), 0.f);
+    ApmDataDumper data_dumper(0);
+    std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+        RenderDelayBuffer::Create(config, 3));
+    MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size,
+                         kWindowSizeSubBlocks, kNumMatchedFilters,
+                         kAlignmentShiftSubBlocks, 150);
+
+    // Analyze the correlation between render and capture.
+    for (size_t k = 0; k < 100; ++k) {
+      RandomizeSampleVector(&random_generator, render[0]);
+      RandomizeSampleVector(&random_generator, capture);
+      render_delay_buffer->Insert(render);
+      filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(), capture);
+    }
+
+    // Obtain the lag estimates.
+    auto lag_estimates = filter.GetLagEstimates();
+    EXPECT_EQ(kNumMatchedFilters, lag_estimates.size());
+
+    // Verify that no lag estimates are reliable.
+    for (auto& le : lag_estimates) {
+      EXPECT_FALSE(le.reliable);
+    }
+  }
+}
+
+// Verifies that the matched filter does not produce updated lag estimates for
+// render signals of low level.
+TEST(MatchedFilter, LagNotUpdatedForLowLevelRender) {
+  Random random_generator(42U);
+  for (auto down_sampling_factor : kDownSamplingFactors) {
+    const size_t sub_block_size = kBlockSize / down_sampling_factor;
+
+    std::vector<std::vector<float>> render(3,
+                                           std::vector<float>(kBlockSize, 0.f));
+    std::array<float, kBlockSize> capture;
+    capture.fill(0.f);
+    ApmDataDumper data_dumper(0);
+    MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size,
+                         kWindowSizeSubBlocks, kNumMatchedFilters,
+                         kAlignmentShiftSubBlocks, 150);
+    std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+        RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+    Decimator capture_decimator(down_sampling_factor);
+
+    // Analyze the correlation between render and capture.
+    for (size_t k = 0; k < 100; ++k) {
+      RandomizeSampleVector(&random_generator, render[0]);
+      for (auto& render_k : render[0]) {
+        render_k *= 149.f / 32767.f;
+      }
+      std::copy(render[0].begin(), render[0].end(), capture.begin());
+      std::array<float, kBlockSize> downsampled_capture_data;
+      rtc::ArrayView<float> downsampled_capture(downsampled_capture_data.data(),
+                                                sub_block_size);
+      capture_decimator.Decimate(capture, downsampled_capture);
+      filter.Update(render_delay_buffer->GetDownsampledRenderBuffer(),
+                    downsampled_capture);
+    }
+
+    // Obtain the lag estimates.
+    auto lag_estimates = filter.GetLagEstimates();
+    EXPECT_EQ(kNumMatchedFilters, lag_estimates.size());
+
+    // Verify that no lag estimates are updated and that no lag estimates are
+    // reliable.
+    for (auto& le : lag_estimates) {
+      EXPECT_FALSE(le.updated);
+      EXPECT_FALSE(le.reliable);
+    }
+  }
+}
+
+// Verifies that the correct number of lag estimates are produced for a certain
+// number of alignment shifts.
+TEST(MatchedFilter, NumberOfLagEstimates) {
+  ApmDataDumper data_dumper(0);
+  for (auto down_sampling_factor : kDownSamplingFactors) {
+    const size_t sub_block_size = kBlockSize / down_sampling_factor;
+    for (size_t num_matched_filters = 0; num_matched_filters < 10;
+         ++num_matched_filters) {
+      MatchedFilter filter(&data_dumper, DetectOptimization(), sub_block_size,
+                           32, num_matched_filters, 1, 150);
+      EXPECT_EQ(num_matched_filters, filter.GetLagEstimates().size());
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-zero windows size.
+TEST(MatchedFilter, ZeroWindowSize) {
+  ApmDataDumper data_dumper(0);
+  EXPECT_DEATH(
+      MatchedFilter(&data_dumper, DetectOptimization(), 16, 0, 1, 1, 150), "");
+}
+
+// Verifies the check for non-null data dumper.
+TEST(MatchedFilter, NullDataDumper) {
+  EXPECT_DEATH(MatchedFilter(nullptr, DetectOptimization(), 16, 1, 1, 1, 150),
+               "");
+}
+
+// Verifies the check for that the sub block size is a multiple of 4.
+// TODO(peah): Activate the unittest once the required code has been landed.
+TEST(MatchedFilter, DISABLED_BlockSizeMultipleOf4) {
+  ApmDataDumper data_dumper(0);
+  EXPECT_DEATH(
+      MatchedFilter(&data_dumper, DetectOptimization(), 15, 1, 1, 1, 150), "");
+}
+
+// Verifies the check for that there is an integer number of sub blocks that add
+// up to a block size.
+// TODO(peah): Activate the unittest once the required code has been landed.
+TEST(MatchedFilter, DISABLED_SubBlockSizeAddsUpToBlockSize) {
+  ApmDataDumper data_dumper(0);
+  EXPECT_DEATH(
+      MatchedFilter(&data_dumper, DetectOptimization(), 12, 1, 1, 1, 150), "");
+}
+
+#endif
+
+}  // namespace aec3
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/matrix_buffer.cc b/modules/audio_processing/aec3/matrix_buffer.cc
new file mode 100644
index 0000000..f95e7f4
--- /dev/null
+++ b/modules/audio_processing/aec3/matrix_buffer.cc
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/matrix_buffer.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+MatrixBuffer::MatrixBuffer(size_t size, size_t height, size_t width)
+    : size(static_cast<int>(size)),
+      buffer(size,
+             std::vector<std::vector<float>>(height,
+                                             std::vector<float>(width, 0.f))) {
+  for (auto& c : buffer) {
+    for (auto& b : c) {
+      std::fill(b.begin(), b.end(), 0.f);
+    }
+  }
+}
+
+MatrixBuffer::~MatrixBuffer() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/matrix_buffer.h b/modules/audio_processing/aec3/matrix_buffer.h
new file mode 100644
index 0000000..64aac0a
--- /dev/null
+++ b/modules/audio_processing/aec3/matrix_buffer.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MATRIX_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MATRIX_BUFFER_H_
+
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for bundling a circular buffer of two dimensional vector objects
+// together with the read and write indices.
+struct MatrixBuffer {
+  MatrixBuffer(size_t size, size_t height, size_t width);
+  ~MatrixBuffer();
+
+  int IncIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index < size - 1 ? index + 1 : 0;
+  }
+
+  int DecIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index > 0 ? index - 1 : size - 1;
+  }
+
+  int OffsetIndex(int index, int offset) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    RTC_DCHECK_GE(size, offset);
+    return (size + index + offset) % size;
+  }
+
+  void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+  void IncWriteIndex() { write = IncIndex(write); }
+  void DecWriteIndex() { write = DecIndex(write); }
+  void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+  void IncReadIndex() { read = IncIndex(read); }
+  void DecReadIndex() { read = DecIndex(read); }
+
+  const int size;
+  std::vector<std::vector<std::vector<float>>> buffer;
+  int write = 0;
+  int read = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MATRIX_BUFFER_H_
diff --git a/modules/audio_processing/aec3/mock/mock_block_processor.h b/modules/audio_processing/aec3/mock/mock_block_processor.h
new file mode 100644
index 0000000..5fff456
--- /dev/null
+++ b/modules/audio_processing/aec3/mock/mock_block_processor.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_BLOCK_PROCESSOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_BLOCK_PROCESSOR_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockBlockProcessor : public BlockProcessor {
+ public:
+  virtual ~MockBlockProcessor() {}
+
+  MOCK_METHOD3(ProcessCapture,
+               void(bool level_change,
+                    bool saturated_microphone_signal,
+                    std::vector<std::vector<float>>* capture_block));
+  MOCK_METHOD1(BufferRender,
+               void(const std::vector<std::vector<float>>& block));
+  MOCK_METHOD1(UpdateEchoLeakageStatus, void(bool leakage_detected));
+  MOCK_CONST_METHOD1(GetMetrics, void(EchoControl::Metrics* metrics));
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_BLOCK_PROCESSOR_H_
diff --git a/modules/audio_processing/aec3/mock/mock_echo_remover.h b/modules/audio_processing/aec3/mock/mock_echo_remover.h
new file mode 100644
index 0000000..638e3f0
--- /dev/null
+++ b/modules/audio_processing/aec3/mock/mock_echo_remover.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_ECHO_REMOVER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_ECHO_REMOVER_H_
+
+#include <vector>
+
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/echo_remover.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockEchoRemover : public EchoRemover {
+ public:
+  virtual ~MockEchoRemover() = default;
+
+  MOCK_METHOD5(ProcessCapture,
+               void(const EchoPathVariability& echo_path_variability,
+                    bool capture_signal_saturation,
+                    const rtc::Optional<DelayEstimate>& delay_estimate,
+                    RenderBuffer* render_buffer,
+                    std::vector<std::vector<float>>* capture));
+
+  MOCK_METHOD1(UpdateEchoLeakageStatus, void(bool leakage_detected));
+  MOCK_CONST_METHOD1(GetMetrics, void(EchoControl::Metrics* metrics));
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_ECHO_REMOVER_H_
diff --git a/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h b/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h
new file mode 100644
index 0000000..1ed2b40
--- /dev/null
+++ b/modules/audio_processing/aec3/mock/mock_render_delay_buffer.h
@@ -0,0 +1,72 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_BUFFER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockRenderDelayBuffer : public RenderDelayBuffer {
+ public:
+  explicit MockRenderDelayBuffer(int sample_rate_hz)
+      : block_buffer_(GetRenderDelayBufferSize(4, 4, 12),
+                      NumBandsForRate(sample_rate_hz),
+                      kBlockSize),
+        spectrum_buffer_(block_buffer_.buffer.size(), kFftLengthBy2Plus1),
+        fft_buffer_(block_buffer_.buffer.size()),
+        render_buffer_(&block_buffer_, &spectrum_buffer_, &fft_buffer_),
+        downsampled_render_buffer_(GetDownSampledBufferSize(4, 4)) {
+    ON_CALL(*this, GetRenderBuffer())
+        .WillByDefault(
+            testing::Invoke(this, &MockRenderDelayBuffer::FakeGetRenderBuffer));
+    ON_CALL(*this, GetDownsampledRenderBuffer())
+        .WillByDefault(testing::Invoke(
+            this, &MockRenderDelayBuffer::FakeGetDownsampledRenderBuffer));
+  }
+  virtual ~MockRenderDelayBuffer() = default;
+
+  MOCK_METHOD0(Reset, void());
+  MOCK_METHOD1(Insert,
+               RenderDelayBuffer::BufferingEvent(
+                   const std::vector<std::vector<float>>& block));
+  MOCK_METHOD0(PrepareCaptureProcessing, RenderDelayBuffer::BufferingEvent());
+  MOCK_METHOD1(SetDelay, bool(size_t delay));
+  MOCK_CONST_METHOD0(Delay, rtc::Optional<size_t>());
+  MOCK_CONST_METHOD0(MaxDelay, size_t());
+  MOCK_METHOD0(GetRenderBuffer, RenderBuffer*());
+  MOCK_CONST_METHOD0(GetDownsampledRenderBuffer,
+                     const DownsampledRenderBuffer&());
+  MOCK_CONST_METHOD1(CausalDelay, bool(size_t delay));
+
+ private:
+  RenderBuffer* FakeGetRenderBuffer() { return &render_buffer_; }
+  const DownsampledRenderBuffer& FakeGetDownsampledRenderBuffer() const {
+    return downsampled_render_buffer_;
+  }
+  MatrixBuffer block_buffer_;
+  VectorBuffer spectrum_buffer_;
+  FftBuffer fft_buffer_;
+  RenderBuffer render_buffer_;
+  DownsampledRenderBuffer downsampled_render_buffer_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_BUFFER_H_
diff --git a/modules/audio_processing/aec3/mock/mock_render_delay_controller.h b/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
new file mode 100644
index 0000000..8fb7a8e
--- /dev/null
+++ b/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_CONTROLLER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_CONTROLLER_H_
+
+#include "api/array_view.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+namespace test {
+
+class MockRenderDelayController : public RenderDelayController {
+ public:
+  virtual ~MockRenderDelayController() = default;
+
+  MOCK_METHOD0(Reset, void());
+  MOCK_METHOD0(LogRenderCall, void());
+  MOCK_METHOD2(
+      GetDelay,
+      rtc::Optional<DelayEstimate>(const DownsampledRenderBuffer& render_buffer,
+                                   rtc::ArrayView<const float> capture));
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_MOCK_MOCK_RENDER_DELAY_CONTROLLER_H_
diff --git a/modules/audio_processing/aec3/output_selector.cc b/modules/audio_processing/aec3/output_selector.cc
new file mode 100644
index 0000000..4f547d9
--- /dev/null
+++ b/modules/audio_processing/aec3/output_selector.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/output_selector.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Performs the transition between the signals in a smooth manner.
+void SmoothFrameTransition(bool from_y_to_e,
+                           rtc::ArrayView<const float> e,
+                           rtc::ArrayView<float> y) {
+  RTC_DCHECK_LT(0u, e.size());
+  RTC_DCHECK_EQ(y.size(), e.size());
+
+  const float change_factor = (from_y_to_e ? 1.f : -1.f) / e.size();
+  float averaging = from_y_to_e ? 0.f : 1.f;
+  for (size_t k = 0; k < e.size(); ++k) {
+    y[k] += averaging * (e[k] - y[k]);
+    averaging += change_factor;
+  }
+  RTC_DCHECK_EQ(from_y_to_e ? 1.f : 0.f, averaging);
+}
+
+}  // namespace
+
+OutputSelector::OutputSelector() = default;
+
+OutputSelector::~OutputSelector() = default;
+
+void OutputSelector::FormLinearOutput(
+    bool use_subtractor_output,
+    rtc::ArrayView<const float> subtractor_output,
+    rtc::ArrayView<float> capture) {
+  RTC_DCHECK_EQ(subtractor_output.size(), capture.size());
+  rtc::ArrayView<const float>& e_main = subtractor_output;
+  rtc::ArrayView<float> y = capture;
+
+  if (use_subtractor_output != use_subtractor_output_) {
+    use_subtractor_output_ = use_subtractor_output;
+    SmoothFrameTransition(use_subtractor_output_, e_main, y);
+  } else if (use_subtractor_output_) {
+    std::copy(e_main.begin(), e_main.end(), y.begin());
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/output_selector.h b/modules/audio_processing/aec3/output_selector.h
new file mode 100644
index 0000000..a406c61
--- /dev/null
+++ b/modules/audio_processing/aec3/output_selector.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_OUTPUT_SELECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_OUTPUT_SELECTOR_H_
+
+#include "api/array_view.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Performs the selection between which of the linear aec output and the
+// microphone signal should be used as the echo suppressor output.
+class OutputSelector {
+ public:
+  OutputSelector();
+  ~OutputSelector();
+
+  // Forms the most appropriate output signal.
+  void FormLinearOutput(bool use_subtractor_output,
+                        rtc::ArrayView<const float> subtractor_output,
+                        rtc::ArrayView<float> capture);
+
+  // Returns true if the linear aec output is the one used.
+  bool UseSubtractorOutput() const { return use_subtractor_output_; }
+
+ private:
+  bool use_subtractor_output_ = false;
+  RTC_DISALLOW_COPY_AND_ASSIGN(OutputSelector);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_OUTPUT_SELECTOR_H_
diff --git a/modules/audio_processing/aec3/output_selector_unittest.cc b/modules/audio_processing/aec3/output_selector_unittest.cc
new file mode 100644
index 0000000..c7add1c
--- /dev/null
+++ b/modules/audio_processing/aec3/output_selector_unittest.cc
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/output_selector.h"
+
+#include <algorithm>
+#include <array>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verifies that the switching between the signals in the output works as
+// intended.
+TEST(OutputSelector, ProperSwitching) {
+  OutputSelector selector;
+
+  std::array<float, kBlockSize> y;
+  std::array<float, kBlockSize> e;
+  std::array<float, kBlockSize> e_ref;
+  std::array<float, kBlockSize> y_ref;
+  auto init_blocks = [](std::array<float, kBlockSize>* e,
+                        std::array<float, kBlockSize>* y) {
+    e->fill(10.f);
+    y->fill(20.f);
+  };
+
+  init_blocks(&e_ref, &y_ref);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(false, e, y);
+  EXPECT_EQ(y_ref, y);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(true, e, y);
+  EXPECT_NE(e_ref, y);
+  EXPECT_NE(y_ref, y);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(true, e, y);
+  EXPECT_EQ(e_ref, y);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(true, e, y);
+  EXPECT_EQ(e_ref, y);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(false, e, y);
+  EXPECT_NE(e_ref, y);
+  EXPECT_NE(y_ref, y);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(false, e, y);
+  EXPECT_EQ(y_ref, y);
+
+  init_blocks(&e, &y);
+  selector.FormLinearOutput(false, e, y);
+  EXPECT_EQ(y_ref, y);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_buffer.cc b/modules/audio_processing/aec3/render_buffer.cc
new file mode 100644
index 0000000..6e224be
--- /dev/null
+++ b/modules/audio_processing/aec3/render_buffer.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+RenderBuffer::RenderBuffer(MatrixBuffer* block_buffer,
+                           VectorBuffer* spectrum_buffer,
+                           FftBuffer* fft_buffer)
+    : block_buffer_(block_buffer),
+      spectrum_buffer_(spectrum_buffer),
+      fft_buffer_(fft_buffer) {
+  RTC_DCHECK(block_buffer_);
+  RTC_DCHECK(spectrum_buffer_);
+  RTC_DCHECK(fft_buffer_);
+  RTC_DCHECK_EQ(block_buffer_->buffer.size(), fft_buffer_->buffer.size());
+  RTC_DCHECK_EQ(spectrum_buffer_->buffer.size(), fft_buffer_->buffer.size());
+  RTC_DCHECK_EQ(spectrum_buffer_->read, fft_buffer_->read);
+  RTC_DCHECK_EQ(spectrum_buffer_->write, fft_buffer_->write);
+}
+
+RenderBuffer::~RenderBuffer() = default;
+
+void RenderBuffer::SpectralSum(
+    size_t num_spectra,
+    std::array<float, kFftLengthBy2Plus1>* X2) const {
+  X2->fill(0.f);
+  int position = spectrum_buffer_->read;
+  for (size_t j = 0; j < num_spectra; ++j) {
+    std::transform(X2->begin(), X2->end(),
+                   spectrum_buffer_->buffer[position].begin(), X2->begin(),
+                   std::plus<float>());
+    position = spectrum_buffer_->IncIndex(position);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_buffer.h b/modules/audio_processing/aec3/render_buffer.h
new file mode 100644
index 0000000..7789ffd
--- /dev/null
+++ b/modules/audio_processing/aec3/render_buffer.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_
+
+#include <array>
+#include <memory>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/fft_buffer.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/matrix_buffer.h"
+#include "modules/audio_processing/aec3/vector_buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Provides a buffer of the render data for the echo remover.
+class RenderBuffer {
+ public:
+  RenderBuffer(MatrixBuffer* block_buffer,
+               VectorBuffer* spectrum_buffer,
+               FftBuffer* fft_buffer);
+  ~RenderBuffer();
+
+  // Get a block.
+  const std::vector<std::vector<float>>& Block(int buffer_offset_blocks) const {
+    int position =
+        block_buffer_->OffsetIndex(block_buffer_->read, buffer_offset_blocks);
+    return block_buffer_->buffer[position];
+  }
+
+  // Get the spectrum from one of the FFTs in the buffer.
+  rtc::ArrayView<const float> Spectrum(int buffer_offset_ffts) const {
+    int position = spectrum_buffer_->OffsetIndex(spectrum_buffer_->read,
+                                                 buffer_offset_ffts);
+    return spectrum_buffer_->buffer[position];
+  }
+
+  // Returns the circular fft buffer.
+  rtc::ArrayView<const FftData> GetFftBuffer() const {
+    return fft_buffer_->buffer;
+  }
+
+  // Returns the current position in the circular buffer.
+  size_t Position() const {
+    RTC_DCHECK_EQ(spectrum_buffer_->read, fft_buffer_->read);
+    RTC_DCHECK_EQ(spectrum_buffer_->write, fft_buffer_->write);
+    return fft_buffer_->read;
+  }
+
+  // Returns the sum of the spectrums for a certain number of FFTs.
+  void SpectralSum(size_t num_spectra,
+                   std::array<float, kFftLengthBy2Plus1>* X2) const;
+
+  // Gets the recent activity seen in the render signal.
+  bool GetRenderActivity() const { return render_activity_; }
+
+  // Specifies the recent activity seen in the render signal.
+  void SetRenderActivity(bool activity) { render_activity_ = activity; }
+
+ private:
+  const MatrixBuffer* const block_buffer_;
+  const VectorBuffer* const spectrum_buffer_;
+  const FftBuffer* const fft_buffer_;
+  bool render_activity_ = false;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderBuffer);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_RENDER_BUFFER_H_
diff --git a/modules/audio_processing/aec3/render_buffer_unittest.cc b/modules/audio_processing/aec3/render_buffer_unittest.cc
new file mode 100644
index 0000000..fadd600
--- /dev/null
+++ b/modules/audio_processing/aec3/render_buffer_unittest.cc
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+#include <algorithm>
+#include <functional>
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for non-null fft buffer.
+TEST(RenderBuffer, NullExternalFftBuffer) {
+  MatrixBuffer block_buffer(10, 3, kBlockSize);
+  VectorBuffer spectrum_buffer(10, kFftLengthBy2Plus1);
+  EXPECT_DEATH(RenderBuffer(&block_buffer, &spectrum_buffer, nullptr), "");
+}
+
+// Verifies the check for non-null spectrum buffer.
+TEST(RenderBuffer, NullExternalSpectrumBuffer) {
+  FftBuffer fft_buffer(10);
+  MatrixBuffer block_buffer(10, 3, kBlockSize);
+  EXPECT_DEATH(RenderBuffer(&block_buffer, nullptr, &fft_buffer), "");
+}
+
+// Verifies the check for non-null block buffer.
+TEST(RenderBuffer, NullExternalBlockBuffer) {
+  FftBuffer fft_buffer(10);
+  VectorBuffer spectrum_buffer(10, kFftLengthBy2Plus1);
+  EXPECT_DEATH(RenderBuffer(nullptr, &spectrum_buffer, &fft_buffer), "");
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_delay_buffer.cc b/modules/audio_processing/aec3/render_delay_buffer.cc
new file mode 100644
index 0000000..60606bf
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_buffer.cc
@@ -0,0 +1,392 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+
+#include <string.h>
+#include <algorithm>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/fft_buffer.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/matrix_buffer.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+namespace {
+
+class RenderDelayBufferImpl final : public RenderDelayBuffer {
+ public:
+  RenderDelayBufferImpl(const EchoCanceller3Config& config, size_t num_bands);
+  ~RenderDelayBufferImpl() override;
+
+  void Reset() override;
+  BufferingEvent Insert(const std::vector<std::vector<float>>& block) override;
+  BufferingEvent PrepareCaptureProcessing() override;
+  bool SetDelay(size_t delay) override;
+  rtc::Optional<size_t> Delay() const override { return delay_; }
+  size_t MaxDelay() const override {
+    return blocks_.buffer.size() - 1 - buffer_headroom_;
+  }
+  RenderBuffer* GetRenderBuffer() override { return &echo_remover_buffer_; }
+
+  const DownsampledRenderBuffer& GetDownsampledRenderBuffer() const override {
+    return low_rate_;
+  }
+
+  bool CausalDelay(size_t delay) const override;
+
+ private:
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  const Aec3Optimization optimization_;
+  const EchoCanceller3Config config_;
+  const int sub_block_size_;
+  MatrixBuffer blocks_;
+  VectorBuffer spectra_;
+  FftBuffer ffts_;
+  rtc::Optional<size_t> delay_;
+  rtc::Optional<int> internal_delay_;
+  RenderBuffer echo_remover_buffer_;
+  DownsampledRenderBuffer low_rate_;
+  Decimator render_decimator_;
+  const std::vector<std::vector<float>> zero_block_;
+  const Aec3Fft fft_;
+  std::vector<float> render_ds_;
+  const int buffer_headroom_;
+  bool last_call_was_render_ = false;
+  int num_api_calls_in_a_row_ = 0;
+  int max_observed_jitter_ = 1;
+  size_t capture_call_counter_ = 0;
+  size_t render_call_counter_ = 0;
+  bool render_activity_ = false;
+  size_t render_activity_counter_ = 0;
+
+  int LowRateBufferOffset() const { return DelayEstimatorOffset(config_) >> 1; }
+  int MaxExternalDelayToInternalDelay(size_t delay) const;
+  void ApplyDelay(int delay);
+  void InsertBlock(const std::vector<std::vector<float>>& block,
+                   int previous_write);
+  bool DetectActiveRender(rtc::ArrayView<const float> x) const;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderDelayBufferImpl);
+};
+
+// Increases the write indices for the render buffers.
+void IncreaseWriteIndices(int sub_block_size,
+                          MatrixBuffer* blocks,
+                          VectorBuffer* spectra,
+                          FftBuffer* ffts,
+                          DownsampledRenderBuffer* low_rate) {
+  low_rate->UpdateWriteIndex(-sub_block_size);
+  blocks->IncWriteIndex();
+  spectra->DecWriteIndex();
+  ffts->DecWriteIndex();
+}
+
+// Increases the read indices for the render buffers.
+void IncreaseReadIndices(const rtc::Optional<int>& delay,
+                         int sub_block_size,
+                         MatrixBuffer* blocks,
+                         VectorBuffer* spectra,
+                         FftBuffer* ffts,
+                         DownsampledRenderBuffer* low_rate) {
+  RTC_DCHECK_NE(low_rate->read, low_rate->write);
+  low_rate->UpdateReadIndex(-sub_block_size);
+
+  if (blocks->read != blocks->write) {
+    blocks->IncReadIndex();
+    spectra->DecReadIndex();
+    ffts->DecReadIndex();
+  } else {
+    // Only allow underrun for blocks_ when the delay is not set.
+    RTC_DCHECK(!delay);
+  }
+}
+
+// Checks for a render buffer overrun.
+bool RenderOverrun(const MatrixBuffer& b, const DownsampledRenderBuffer& l) {
+  return l.read == l.write || b.read == b.write;
+}
+
+// Checks for a render buffer underrun. If the delay is not specified, only the
+// low rate buffer underrun is counted as the delay offset for the other buffers
+// is unknown.
+bool RenderUnderrun(const rtc::Optional<int>& delay,
+                    const MatrixBuffer& b,
+                    const DownsampledRenderBuffer& l) {
+  return l.read == l.write || (delay && b.read == b.write);
+}
+
+// Computes the latency in the buffer (the number of unread elements).
+int BufferLatency(const DownsampledRenderBuffer& l) {
+  return (l.buffer.size() + l.read - l.write) % l.buffer.size();
+}
+
+// Computes the mismatch between the number of render and capture calls based on
+// the known offset (achieved during reset) of the low rate buffer.
+bool ApiCallSkew(const DownsampledRenderBuffer& low_rate_buffer,
+                 int sub_block_size,
+                 int low_rate_buffer_offset_sub_blocks) {
+  int latency = BufferLatency(low_rate_buffer);
+  int skew = abs(low_rate_buffer_offset_sub_blocks * sub_block_size - latency);
+  int skew_limit = low_rate_buffer_offset_sub_blocks * sub_block_size;
+  return skew >= skew_limit;
+}
+
+int RenderDelayBufferImpl::instance_count_ = 0;
+
+RenderDelayBufferImpl::RenderDelayBufferImpl(const EchoCanceller3Config& config,
+                                             size_t num_bands)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      optimization_(DetectOptimization()),
+      config_(config),
+      sub_block_size_(
+          static_cast<int>(config.delay.down_sampling_factor > 0
+                               ? kBlockSize / config.delay.down_sampling_factor
+                               : kBlockSize)),
+      blocks_(GetRenderDelayBufferSize(config.delay.down_sampling_factor,
+                                       config.delay.num_filters,
+                                       config.filter.main.length_blocks),
+              num_bands,
+              kBlockSize),
+      spectra_(blocks_.buffer.size(), kFftLengthBy2Plus1),
+      ffts_(blocks_.buffer.size()),
+      delay_(config_.delay.min_echo_path_delay_blocks),
+      echo_remover_buffer_(&blocks_, &spectra_, &ffts_),
+      low_rate_(GetDownSampledBufferSize(config.delay.down_sampling_factor,
+                                         config.delay.num_filters)),
+      render_decimator_(config.delay.down_sampling_factor),
+      zero_block_(num_bands, std::vector<float>(kBlockSize, 0.f)),
+      fft_(),
+      render_ds_(sub_block_size_, 0.f),
+      buffer_headroom_(config.filter.main.length_blocks) {
+  RTC_DCHECK_EQ(blocks_.buffer.size(), ffts_.buffer.size());
+  RTC_DCHECK_EQ(spectra_.buffer.size(), ffts_.buffer.size());
+
+  // Necessary condition to avoid unrecoverable echp due to noncausal alignment.
+  RTC_DCHECK_EQ(DelayEstimatorOffset(config_), LowRateBufferOffset() * 2);
+  Reset();
+}
+
+RenderDelayBufferImpl::~RenderDelayBufferImpl() = default;
+
+// Resets the buffer delays and clears the reported delays.
+void RenderDelayBufferImpl::Reset() {
+  last_call_was_render_ = false;
+  num_api_calls_in_a_row_ = 1;
+
+  // Pre-fill the low rate buffer (which is used for delay estimation) to add
+  // headroom for the allowed api call jitter.
+  low_rate_.read = low_rate_.OffsetIndex(
+      low_rate_.write, LowRateBufferOffset() * sub_block_size_);
+
+  // Set the render buffer delays to the default delay.
+  ApplyDelay(config_.delay.default_delay);
+
+  // Unset the delays which are set by ApplyConfig.
+  delay_ = rtc::nullopt;
+  internal_delay_ = rtc::nullopt;
+}
+
+// Inserts a new block into the render buffers.
+RenderDelayBuffer::BufferingEvent RenderDelayBufferImpl::Insert(
+    const std::vector<std::vector<float>>& block) {
+  ++render_call_counter_;
+  if (delay_) {
+    if (!last_call_was_render_) {
+      last_call_was_render_ = true;
+      num_api_calls_in_a_row_ = 1;
+    } else {
+      if (++num_api_calls_in_a_row_ > max_observed_jitter_) {
+        max_observed_jitter_ = num_api_calls_in_a_row_;
+        RTC_LOG(LS_INFO)
+            << "New max number api jitter observed at render block "
+            << render_call_counter_ << ":  " << num_api_calls_in_a_row_
+            << " blocks";
+      }
+    }
+  }
+
+  // Increase the write indices to where the new blocks should be written.
+  const int previous_write = blocks_.write;
+  IncreaseWriteIndices(sub_block_size_, &blocks_, &spectra_, &ffts_,
+                       &low_rate_);
+
+  // Allow overrun and do a reset when render overrun occurrs due to more render
+  // data being inserted than capture data is received.
+  BufferingEvent event = RenderOverrun(blocks_, low_rate_)
+                             ? BufferingEvent::kRenderOverrun
+                             : BufferingEvent::kNone;
+
+  // Detect and update render activity.
+  if (!render_activity_) {
+    render_activity_counter_ += DetectActiveRender(block[0]) ? 1 : 0;
+    render_activity_ = render_activity_counter_ >= 20;
+  }
+
+  // Insert the new render block into the specified position.
+  InsertBlock(block, previous_write);
+
+  if (event != BufferingEvent::kNone) {
+    Reset();
+  }
+
+  return event;
+}
+
+// Prepares the render buffers for processing another capture block.
+RenderDelayBuffer::BufferingEvent
+RenderDelayBufferImpl::PrepareCaptureProcessing() {
+  BufferingEvent event = BufferingEvent::kNone;
+  ++capture_call_counter_;
+
+  if (delay_) {
+    if (last_call_was_render_) {
+      last_call_was_render_ = false;
+      num_api_calls_in_a_row_ = 1;
+    } else {
+      if (++num_api_calls_in_a_row_ > max_observed_jitter_) {
+        max_observed_jitter_ = num_api_calls_in_a_row_;
+        RTC_LOG(LS_INFO)
+            << "New max number api jitter observed at capture block "
+            << capture_call_counter_ << ":  " << num_api_calls_in_a_row_
+            << " blocks";
+      }
+    }
+  }
+
+  if (RenderUnderrun(internal_delay_, blocks_, low_rate_)) {
+    // Don't increase the read indices if there is a render underrun.
+    event = BufferingEvent::kRenderUnderrun;
+  } else {
+    // Increase the read indices in the render buffers to point to the most
+    // recent block to use in the capture processing.
+    IncreaseReadIndices(internal_delay_, sub_block_size_, &blocks_, &spectra_,
+                        &ffts_, &low_rate_);
+
+    // Check for skew in the API calls which, if too large, causes the delay
+    // estimation to be noncausal. Doing this check after the render indice
+    // increase saves one unit of allowed skew. Note that the skew check only
+    // should need to be one-sided as one of the skew directions results in an
+    // underrun.
+    bool skew = ApiCallSkew(low_rate_, sub_block_size_, LowRateBufferOffset());
+    event = skew ? BufferingEvent::kApiCallSkew : BufferingEvent::kNone;
+  }
+
+  if (event != BufferingEvent::kNone) {
+    Reset();
+  }
+
+  echo_remover_buffer_.SetRenderActivity(render_activity_);
+  if (render_activity_) {
+    render_activity_counter_ = 0;
+    render_activity_ = false;
+  }
+
+  return event;
+}
+
+// Sets the delay and returns a bool indicating whether the delay was changed.
+bool RenderDelayBufferImpl::SetDelay(size_t delay) {
+  if (delay_ && *delay_ == delay) {
+    return false;
+  }
+  delay_ = delay;
+
+  // Compute the internal delay and limit the delay to the allowed range.
+  int internal_delay = MaxExternalDelayToInternalDelay(*delay_);
+  internal_delay_ =
+      std::min(MaxDelay(), static_cast<size_t>(std::max(internal_delay, 0)));
+
+  // Apply the delay to the buffers.
+  ApplyDelay(*internal_delay_);
+  return true;
+}
+
+// Returns whether the specified delay is causal.
+bool RenderDelayBufferImpl::CausalDelay(size_t delay) const {
+  // Compute the internal delay and limit the delay to the allowed range.
+  int internal_delay = MaxExternalDelayToInternalDelay(delay);
+  internal_delay =
+      std::min(MaxDelay(), static_cast<size_t>(std::max(internal_delay, 0)));
+
+  return internal_delay >=
+         static_cast<int>(config_.delay.min_echo_path_delay_blocks);
+}
+
+// Maps the externally computed delay to the delay used internally.
+int RenderDelayBufferImpl::MaxExternalDelayToInternalDelay(
+    size_t external_delay_blocks) const {
+  const int latency = BufferLatency(low_rate_);
+  RTC_DCHECK_LT(0, sub_block_size_);
+  RTC_DCHECK_EQ(0, latency % sub_block_size_);
+  int latency_blocks = latency / sub_block_size_;
+  return latency_blocks + static_cast<int>(external_delay_blocks) -
+         DelayEstimatorOffset(config_);
+}
+
+// Set the read indices according to the delay.
+void RenderDelayBufferImpl::ApplyDelay(int delay) {
+  blocks_.read = blocks_.OffsetIndex(blocks_.write, -delay);
+  spectra_.read = spectra_.OffsetIndex(spectra_.write, delay);
+  ffts_.read = ffts_.OffsetIndex(ffts_.write, delay);
+}
+
+// Inserts a block into the render buffers.
+void RenderDelayBufferImpl::InsertBlock(
+    const std::vector<std::vector<float>>& block,
+    int previous_write) {
+  auto& b = blocks_;
+  auto& lr = low_rate_;
+  auto& ds = render_ds_;
+  auto& f = ffts_;
+  auto& s = spectra_;
+  RTC_DCHECK_EQ(block.size(), b.buffer[b.write].size());
+  for (size_t k = 0; k < block.size(); ++k) {
+    RTC_DCHECK_EQ(block[k].size(), b.buffer[b.write][k].size());
+    std::copy(block[k].begin(), block[k].end(), b.buffer[b.write][k].begin());
+  }
+
+  render_decimator_.Decimate(block[0], ds);
+  std::copy(ds.rbegin(), ds.rend(), lr.buffer.begin() + lr.write);
+  fft_.PaddedFft(block[0], b.buffer[previous_write][0], &f.buffer[f.write]);
+  f.buffer[f.write].Spectrum(optimization_, s.buffer[s.write]);
+}
+
+bool RenderDelayBufferImpl::DetectActiveRender(
+    rtc::ArrayView<const float> x) const {
+  const float x_energy = std::inner_product(x.begin(), x.end(), x.begin(), 0.f);
+  return x_energy > (config_.render_levels.active_render_limit *
+                     config_.render_levels.active_render_limit) *
+                        kFftLengthBy2;
+}
+
+}  // namespace
+
+int RenderDelayBuffer::RenderDelayBuffer::DelayEstimatorOffset(
+    const EchoCanceller3Config& config) {
+  return config.delay.api_call_jitter_blocks * 2;
+}
+
+RenderDelayBuffer* RenderDelayBuffer::Create(const EchoCanceller3Config& config,
+                                             size_t num_bands) {
+  return new RenderDelayBufferImpl(config, num_bands);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_delay_buffer.h b/modules/audio_processing/aec3/render_delay_buffer.h
new file mode 100644
index 0000000..22b0c7f
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_buffer.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_
+
+#include <stddef.h>
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+
+namespace webrtc {
+
+// Class for buffering the incoming render blocks such that these may be
+// extracted with a specified delay.
+class RenderDelayBuffer {
+ public:
+  enum class BufferingEvent {
+    kNone,
+    kRenderUnderrun,
+    kRenderOverrun,
+    kApiCallSkew,
+    kRenderDataLost
+  };
+
+  static RenderDelayBuffer* Create(const EchoCanceller3Config& config,
+                                   size_t num_bands);
+  virtual ~RenderDelayBuffer() = default;
+
+  // Resets the buffer alignment.
+  virtual void Reset() = 0;
+
+  // Inserts a block into the buffer.
+  virtual BufferingEvent Insert(
+      const std::vector<std::vector<float>>& block) = 0;
+
+  // Updates the buffers one step based on the specified buffer delay. Returns
+  // an enum indicating whether there was a special event that occurred.
+  virtual BufferingEvent PrepareCaptureProcessing() = 0;
+
+  // Sets the buffer delay and returns a bool indicating whether the delay
+  // changed.
+  virtual bool SetDelay(size_t delay) = 0;
+
+  // Gets the buffer delay.
+  virtual rtc::Optional<size_t> Delay() const = 0;
+
+  // Gets the buffer delay.
+  virtual size_t MaxDelay() const = 0;
+
+  // Returns the render buffer for the echo remover.
+  virtual RenderBuffer* GetRenderBuffer() = 0;
+
+  // Returns the downsampled render buffer.
+  virtual const DownsampledRenderBuffer& GetDownsampledRenderBuffer() const = 0;
+
+  // Returns whether the current delay is noncausal.
+  virtual bool CausalDelay(size_t delay) const = 0;
+
+  // Returns the maximum non calusal offset that can occur in the delay buffer.
+  static int DelayEstimatorOffset(const EchoCanceller3Config& config);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_BUFFER_H_
diff --git a/modules/audio_processing/aec3/render_delay_buffer_unittest.cc b/modules/audio_processing/aec3/render_delay_buffer_unittest.cc
new file mode 100644
index 0000000..fb9c48d
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_buffer_unittest.cc
@@ -0,0 +1,125 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+
+#include <memory>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+}  // namespace
+
+// Verifies that the buffer overflow is correctly reported.
+TEST(RenderDelayBuffer, BufferOverflow) {
+  const EchoCanceller3Config config;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<RenderDelayBuffer> delay_buffer(
+        RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+    std::vector<std::vector<float>> block_to_insert(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+    for (size_t k = 0; k < 10; ++k) {
+      EXPECT_EQ(RenderDelayBuffer::BufferingEvent::kNone,
+                delay_buffer->Insert(block_to_insert));
+    }
+    bool overrun_occurred = false;
+    for (size_t k = 0; k < 1000; ++k) {
+      RenderDelayBuffer::BufferingEvent event =
+          delay_buffer->Insert(block_to_insert);
+      overrun_occurred =
+          overrun_occurred ||
+          RenderDelayBuffer::BufferingEvent::kRenderOverrun == event;
+    }
+
+    EXPECT_TRUE(overrun_occurred);
+  }
+}
+
+// Verifies that the check for available block works.
+TEST(RenderDelayBuffer, AvailableBlock) {
+  constexpr size_t kNumBands = 1;
+  std::unique_ptr<RenderDelayBuffer> delay_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), kNumBands));
+  std::vector<std::vector<float>> input_block(
+      kNumBands, std::vector<float>(kBlockSize, 1.f));
+  EXPECT_EQ(RenderDelayBuffer::BufferingEvent::kNone,
+            delay_buffer->Insert(input_block));
+  delay_buffer->PrepareCaptureProcessing();
+}
+
+// Verifies the SetDelay method.
+TEST(RenderDelayBuffer, SetDelay) {
+  EchoCanceller3Config config;
+  std::unique_ptr<RenderDelayBuffer> delay_buffer(
+      RenderDelayBuffer::Create(config, 1));
+  ASSERT_FALSE(delay_buffer->Delay());
+  for (size_t delay = config.delay.min_echo_path_delay_blocks + 1; delay < 20;
+       ++delay) {
+    delay_buffer->SetDelay(delay);
+    ASSERT_TRUE(delay_buffer->Delay());
+    EXPECT_EQ(delay, *delay_buffer->Delay());
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for feasible delay.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(RenderDelayBuffer, DISABLED_WrongDelay) {
+  std::unique_ptr<RenderDelayBuffer> delay_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+  EXPECT_DEATH(delay_buffer->SetDelay(21), "");
+}
+
+// Verifies the check for the number of bands in the inserted blocks.
+TEST(RenderDelayBuffer, WrongNumberOfBands) {
+  for (auto rate : {16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<RenderDelayBuffer> delay_buffer(RenderDelayBuffer::Create(
+        EchoCanceller3Config(), NumBandsForRate(rate)));
+    std::vector<std::vector<float>> block_to_insert(
+        NumBandsForRate(rate < 48000 ? rate + 16000 : 16000),
+        std::vector<float>(kBlockSize, 0.f));
+    EXPECT_DEATH(delay_buffer->Insert(block_to_insert), "");
+  }
+}
+
+// Verifies the check of the length of the inserted blocks.
+TEST(RenderDelayBuffer, WrongBlockLength) {
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<RenderDelayBuffer> delay_buffer(
+        RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+    std::vector<std::vector<float>> block_to_insert(
+        NumBandsForRate(rate), std::vector<float>(kBlockSize - 1, 0.f));
+    EXPECT_DEATH(delay_buffer->Insert(block_to_insert), "");
+  }
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_delay_controller.cc b/modules/audio_processing/aec3/render_delay_controller.cc
new file mode 100644
index 0000000..db00b9b
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_controller.cc
@@ -0,0 +1,266 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/echo_path_delay_estimator.h"
+#include "modules/audio_processing/aec3/render_delay_controller_metrics.h"
+#include "modules/audio_processing/aec3/skew_estimator.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr int kSkewHistorySizeLog2 = 8;
+
+class RenderDelayControllerImpl final : public RenderDelayController {
+ public:
+  RenderDelayControllerImpl(const EchoCanceller3Config& config,
+                            int non_causal_offset,
+                            int sample_rate_hz);
+  ~RenderDelayControllerImpl() override;
+  void Reset() override;
+  void LogRenderCall() override;
+  rtc::Optional<DelayEstimate> GetDelay(
+      const DownsampledRenderBuffer& render_buffer,
+      rtc::ArrayView<const float> capture) override;
+
+ private:
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  const int delay_headroom_blocks_;
+  const int hysteresis_limit_1_blocks_;
+  const int hysteresis_limit_2_blocks_;
+  const int skew_hysteresis_blocks_;
+  rtc::Optional<DelayEstimate> delay_;
+  EchoPathDelayEstimator delay_estimator_;
+  std::vector<float> delay_buf_;
+  int delay_buf_index_ = 0;
+  RenderDelayControllerMetrics metrics_;
+  SkewEstimator skew_estimator_;
+  rtc::Optional<DelayEstimate> delay_samples_;
+  rtc::Optional<int> skew_;
+  int previous_offset_blocks_ = 0;
+  int skew_shift_reporting_counter_ = 0;
+  size_t capture_call_counter_ = 0;
+  int delay_change_counter_ = 0;
+  size_t soft_reset_counter_ = 0;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RenderDelayControllerImpl);
+};
+
+DelayEstimate ComputeBufferDelay(
+    const rtc::Optional<DelayEstimate>& current_delay,
+    int delay_headroom_blocks,
+    int hysteresis_limit_1_blocks,
+    int hysteresis_limit_2_blocks,
+    int offset_blocks,
+    DelayEstimate estimated_delay) {
+  // The below division is not exact and the truncation is intended.
+  const int echo_path_delay_blocks = estimated_delay.delay >> kBlockSizeLog2;
+
+  // Compute the buffer delay increase required to achieve the desired latency.
+  size_t new_delay_blocks = std::max(
+      echo_path_delay_blocks + offset_blocks - delay_headroom_blocks, 0);
+
+  // Add hysteresis.
+  if (current_delay) {
+    size_t current_delay_blocks = current_delay->delay;
+    if (new_delay_blocks > current_delay_blocks) {
+      if (new_delay_blocks <=
+          current_delay_blocks + hysteresis_limit_1_blocks) {
+        new_delay_blocks = current_delay_blocks;
+      }
+    } else if (new_delay_blocks < current_delay_blocks) {
+      size_t hysteresis_limit = std::max(
+          static_cast<int>(current_delay_blocks) - hysteresis_limit_2_blocks,
+          0);
+      if (new_delay_blocks >= hysteresis_limit) {
+        new_delay_blocks = current_delay_blocks;
+      }
+    }
+  }
+
+  DelayEstimate new_delay = estimated_delay;
+  new_delay.delay = new_delay_blocks;
+  return new_delay;
+}
+
+int RenderDelayControllerImpl::instance_count_ = 0;
+
+RenderDelayControllerImpl::RenderDelayControllerImpl(
+    const EchoCanceller3Config& config,
+    int non_causal_offset,
+    int sample_rate_hz)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      delay_headroom_blocks_(
+          static_cast<int>(config.delay.delay_headroom_blocks)),
+      hysteresis_limit_1_blocks_(
+          static_cast<int>(config.delay.hysteresis_limit_1_blocks)),
+      hysteresis_limit_2_blocks_(
+          static_cast<int>(config.delay.hysteresis_limit_2_blocks)),
+      skew_hysteresis_blocks_(
+          static_cast<int>(config.delay.skew_hysteresis_blocks)),
+      delay_estimator_(data_dumper_.get(), config),
+      delay_buf_(kBlockSize * non_causal_offset, 0.f),
+      skew_estimator_(kSkewHistorySizeLog2) {
+  RTC_DCHECK(ValidFullBandRate(sample_rate_hz));
+  delay_estimator_.LogDelayEstimationProperties(sample_rate_hz,
+                                                delay_buf_.size());
+}
+
+RenderDelayControllerImpl::~RenderDelayControllerImpl() = default;
+
+void RenderDelayControllerImpl::Reset() {
+  delay_ = rtc::nullopt;
+  delay_samples_ = rtc::nullopt;
+  skew_ = rtc::nullopt;
+  previous_offset_blocks_ = 0;
+  std::fill(delay_buf_.begin(), delay_buf_.end(), 0.f);
+  delay_estimator_.Reset(false);
+  skew_estimator_.Reset();
+  delay_change_counter_ = 0;
+  soft_reset_counter_ = 0;
+}
+
+void RenderDelayControllerImpl::LogRenderCall() {
+  skew_estimator_.LogRenderCall();
+}
+
+rtc::Optional<DelayEstimate> RenderDelayControllerImpl::GetDelay(
+    const DownsampledRenderBuffer& render_buffer,
+    rtc::ArrayView<const float> capture) {
+  RTC_DCHECK_EQ(kBlockSize, capture.size());
+  ++capture_call_counter_;
+
+  // Estimate the delay with a delayed capture.
+  RTC_DCHECK_LT(delay_buf_index_ + kBlockSize - 1, delay_buf_.size());
+  rtc::ArrayView<const float> capture_delayed(&delay_buf_[delay_buf_index_],
+                                              kBlockSize);
+  auto delay_samples =
+      delay_estimator_.EstimateDelay(render_buffer, capture_delayed);
+
+  std::copy(capture.begin(), capture.end(),
+            delay_buf_.begin() + delay_buf_index_);
+  delay_buf_index_ = (delay_buf_index_ + kBlockSize) % delay_buf_.size();
+
+  // Compute the latest skew update.
+  rtc::Optional<int> skew = skew_estimator_.GetSkewFromCapture();
+
+  if (delay_samples) {
+    if (!delay_samples_ || delay_samples->delay != delay_samples_->delay) {
+      delay_change_counter_ = 0;
+    }
+    if (delay_samples_) {
+      delay_samples_->blocks_since_last_change =
+          delay_samples_->delay == delay_samples->delay
+              ? delay_samples_->blocks_since_last_change + 1
+              : 0;
+      delay_samples_->blocks_since_last_update = 0;
+      delay_samples_->delay = delay_samples->delay;
+      delay_samples_->quality = delay_samples->quality;
+    } else {
+      delay_samples_ = delay_samples;
+    }
+  } else {
+    if (delay_samples_) {
+      ++delay_samples_->blocks_since_last_change;
+      ++delay_samples_->blocks_since_last_update;
+    }
+  }
+
+  if (delay_change_counter_ < 2 * kNumBlocksPerSecond) {
+    ++delay_change_counter_;
+    // If a new delay estimate is recently obtained, store the skew for that.
+    skew_ = skew;
+  } else {
+    // A reliable skew should have been obtained after 2 seconds.
+    RTC_DCHECK(skew_);
+    RTC_DCHECK(skew);
+  }
+
+  ++soft_reset_counter_;
+  int offset_blocks = 0;
+  if (skew_ && skew && delay_samples_ &&
+      delay_samples_->quality == DelayEstimate::Quality::kRefined) {
+    // Compute the skew offset and add a margin.
+    offset_blocks = *skew_ - *skew;
+    if (abs(offset_blocks) <= skew_hysteresis_blocks_) {
+      offset_blocks = 0;
+    } else if (soft_reset_counter_ > 10 * kNumBlocksPerSecond) {
+      // Soft reset the delay estimator if there is a significant offset
+      // detected.
+      delay_estimator_.Reset(true);
+      soft_reset_counter_ = 0;
+    }
+  }
+
+  // Log any changes in the skew.
+  skew_shift_reporting_counter_ =
+      std::max(0, skew_shift_reporting_counter_ - 1);
+  rtc::Optional<int> skew_shift =
+      skew_shift_reporting_counter_ == 0 &&
+              previous_offset_blocks_ != offset_blocks
+          ? rtc::Optional<int>(offset_blocks - previous_offset_blocks_)
+          : rtc::nullopt;
+  previous_offset_blocks_ = offset_blocks;
+  if (skew_shift) {
+    RTC_LOG(LS_WARNING) << "API call skew shift of " << *skew_shift
+                        << " blocks detected at capture block "
+                        << capture_call_counter_;
+    skew_shift_reporting_counter_ = 3 * kNumBlocksPerSecond;
+  }
+
+  if (delay_samples_) {
+    // Compute the render delay buffer delay.
+    delay_ = ComputeBufferDelay(
+        delay_, delay_headroom_blocks_, hysteresis_limit_1_blocks_,
+        hysteresis_limit_2_blocks_, offset_blocks, *delay_samples_);
+  }
+
+  metrics_.Update(delay_samples_ ? rtc::Optional<size_t>(delay_samples_->delay)
+                                 : rtc::nullopt,
+                  delay_ ? delay_->delay : 0, skew_shift);
+
+  data_dumper_->DumpRaw("aec3_render_delay_controller_delay",
+                        delay_samples ? delay_samples->delay : 0);
+  data_dumper_->DumpRaw("aec3_render_delay_controller_buffer_delay",
+                        delay_ ? delay_->delay : 0);
+
+  data_dumper_->DumpRaw("aec3_render_delay_controller_new_skew",
+                        skew ? *skew : 0);
+  data_dumper_->DumpRaw("aec3_render_delay_controller_old_skew",
+                        skew_ ? *skew_ : 0);
+  data_dumper_->DumpRaw("aec3_render_delay_controller_offset", offset_blocks);
+
+  return delay_;
+}
+
+}  // namespace
+
+RenderDelayController* RenderDelayController::Create(
+    const EchoCanceller3Config& config,
+    int non_causal_offset,
+    int sample_rate_hz) {
+  return new RenderDelayControllerImpl(config, non_causal_offset,
+                                       sample_rate_hz);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_delay_controller.h b/modules/audio_processing/aec3/render_delay_controller.h
new file mode 100644
index 0000000..24d7590
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_controller.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/delay_estimate.h"
+#include "modules/audio_processing/aec3/downsampled_render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// Class for aligning the render and capture signal using a RenderDelayBuffer.
+class RenderDelayController {
+ public:
+  static RenderDelayController* Create(const EchoCanceller3Config& config,
+                                       int non_causal_offset,
+                                       int sample_rate_hz);
+  virtual ~RenderDelayController() = default;
+
+  // Resets the delay controller.
+  virtual void Reset() = 0;
+
+  // Logs a render call.
+  virtual void LogRenderCall() = 0;
+
+  // Aligns the render buffer content with the capture signal.
+  virtual rtc::Optional<DelayEstimate> GetDelay(
+      const DownsampledRenderBuffer& render_buffer,
+      rtc::ArrayView<const float> capture) = 0;
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_H_
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.cc b/modules/audio_processing/aec3/render_delay_controller_metrics.cc
new file mode 100644
index 0000000..e5668da
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics.cc
@@ -0,0 +1,136 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_controller_metrics.h"
+
+#include <algorithm>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+enum class DelayReliabilityCategory {
+  kNone,
+  kPoor,
+  kMedium,
+  kGood,
+  kExcellent,
+  kNumCategories
+};
+enum class DelayChangesCategory {
+  kNone,
+  kFew,
+  kSeveral,
+  kMany,
+  kConstant,
+  kNumCategories
+};
+
+constexpr int kMaxSkewShiftCount = 20;
+
+}  // namespace
+
+RenderDelayControllerMetrics::RenderDelayControllerMetrics() = default;
+
+void RenderDelayControllerMetrics::Update(
+    rtc::Optional<size_t> delay_samples,
+    size_t buffer_delay_blocks,
+    rtc::Optional<int> skew_shift_blocks) {
+  ++call_counter_;
+
+  if (!initial_update) {
+    if (delay_samples) {
+      ++reliable_delay_estimate_counter_;
+      size_t delay_blocks = (*delay_samples) / kBlockSize;
+
+      if (delay_blocks != delay_blocks_) {
+        ++delay_change_counter_;
+        delay_blocks_ = delay_blocks;
+      }
+    }
+
+    if (skew_shift_blocks) {
+      skew_shift_count_ = std::min(kMaxSkewShiftCount, skew_shift_count_);
+    }
+  } else if (++initial_call_counter_ == 5 * kNumBlocksPerSecond) {
+    initial_update = false;
+  }
+
+  if (call_counter_ == kMetricsReportingIntervalBlocks) {
+    int value_to_report = static_cast<int>(delay_blocks_);
+    value_to_report = std::min(124, value_to_report);
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.EchoPathDelay",
+                                value_to_report, 0, 124, 125);
+
+    value_to_report = static_cast<int>(buffer_delay_blocks);
+    value_to_report = std::min(124, value_to_report);
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.BufferDelay",
+                                value_to_report, 0, 124, 125);
+
+    DelayReliabilityCategory delay_reliability;
+    if (reliable_delay_estimate_counter_ == 0) {
+      delay_reliability = DelayReliabilityCategory::kNone;
+    } else if (reliable_delay_estimate_counter_ > (call_counter_ >> 1)) {
+      delay_reliability = DelayReliabilityCategory::kExcellent;
+    } else if (reliable_delay_estimate_counter_ > 100) {
+      delay_reliability = DelayReliabilityCategory::kGood;
+    } else if (reliable_delay_estimate_counter_ > 10) {
+      delay_reliability = DelayReliabilityCategory::kMedium;
+    } else {
+      delay_reliability = DelayReliabilityCategory::kPoor;
+    }
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.EchoCanceller.ReliableDelayEstimates",
+        static_cast<int>(delay_reliability),
+        static_cast<int>(DelayReliabilityCategory::kNumCategories));
+
+    DelayChangesCategory delay_changes;
+    if (delay_change_counter_ == 0) {
+      delay_changes = DelayChangesCategory::kNone;
+    } else if (delay_change_counter_ > 10) {
+      delay_changes = DelayChangesCategory::kConstant;
+    } else if (delay_change_counter_ > 5) {
+      delay_changes = DelayChangesCategory::kMany;
+    } else if (delay_change_counter_ > 2) {
+      delay_changes = DelayChangesCategory::kSeveral;
+    } else {
+      delay_changes = DelayChangesCategory::kFew;
+    }
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.EchoCanceller.DelayChanges",
+        static_cast<int>(delay_changes),
+        static_cast<int>(DelayChangesCategory::kNumCategories));
+
+    metrics_reported_ = true;
+    call_counter_ = 0;
+    ResetMetrics();
+  } else {
+    metrics_reported_ = false;
+  }
+
+  if (!initial_update && ++skew_report_timer_ == 60 * kNumBlocksPerSecond) {
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.EchoCanceller.MaxSkewShiftCount",
+                                skew_shift_count_, 0, kMaxSkewShiftCount,
+                                kMaxSkewShiftCount + 1);
+
+    skew_shift_count_ = 0;
+    skew_report_timer_ = 0;
+  }
+}
+
+void RenderDelayControllerMetrics::ResetMetrics() {
+  delay_change_counter_ = 0;
+  reliable_delay_estimate_counter_ = 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.h b/modules/audio_processing/aec3/render_delay_controller_metrics.h
new file mode 100644
index 0000000..8c8845e
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics.h
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_
+
+#include "api/optional.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Handles the reporting of metrics for the render delay controller.
+class RenderDelayControllerMetrics {
+ public:
+  RenderDelayControllerMetrics();
+
+  // Updates the metric with new data.
+  void Update(rtc::Optional<size_t> delay_samples,
+              size_t buffer_delay_blocks,
+              rtc::Optional<int> skew_shift_blocks);
+
+  // Returns true if the metrics have just been reported, otherwise false.
+  bool MetricsReported() { return metrics_reported_; }
+
+ private:
+  // Resets the metrics.
+  void ResetMetrics();
+
+  size_t delay_blocks_ = 0;
+  int reliable_delay_estimate_counter_ = 0;
+  int delay_change_counter_ = 0;
+  int call_counter_ = 0;
+  int skew_report_timer_ = 0;
+  int initial_call_counter_ = 0;
+  bool metrics_reported_ = false;
+  bool initial_update = true;
+  int skew_shift_count_ = 0;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RenderDelayControllerMetrics);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_RENDER_DELAY_CONTROLLER_METRICS_H_
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc b/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
new file mode 100644
index 0000000..1129f49
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_controller_metrics.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify the general functionality of RenderDelayControllerMetrics.
+TEST(RenderDelayControllerMetrics, NormalUsage) {
+  RenderDelayControllerMetrics metrics;
+
+  for (int j = 0; j < 3; ++j) {
+    for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
+      metrics.Update(rtc::nullopt, 0, rtc::nullopt);
+      EXPECT_FALSE(metrics.MetricsReported());
+    }
+    metrics.Update(rtc::nullopt, 0, rtc::nullopt);
+    EXPECT_TRUE(metrics.MetricsReported());
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_delay_controller_unittest.cc b/modules/audio_processing/aec3/render_delay_controller_unittest.cc
new file mode 100644
index 0000000..656c5e8
--- /dev/null
+++ b/modules/audio_processing/aec3/render_delay_controller_unittest.cc
@@ -0,0 +1,323 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_delay_controller.h"
+
+#include <algorithm>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/block_processor.h"
+#include "modules/audio_processing/aec3/decimator.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+std::string ProduceDebugText(int sample_rate_hz) {
+  std::ostringstream ss;
+  ss << "Sample rate: " << sample_rate_hz;
+  return ss.str();
+}
+
+std::string ProduceDebugText(int sample_rate_hz, size_t delay) {
+  std::ostringstream ss;
+  ss << ProduceDebugText(sample_rate_hz) << ", Delay: " << delay;
+  return ss.str();
+}
+
+constexpr size_t kDownSamplingFactors[] = {2, 4, 8};
+
+}  // namespace
+
+// Verifies the output of GetDelay when there are no AnalyzeRender calls.
+TEST(RenderDelayController, NoRenderSignal) {
+  std::vector<float> block(kBlockSize, 0.f);
+  EchoCanceller3Config config;
+  for (size_t num_matched_filters = 4; num_matched_filters == 10;
+       num_matched_filters++) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = num_matched_filters;
+      for (auto rate : {8000, 16000, 32000, 48000}) {
+        SCOPED_TRACE(ProduceDebugText(rate));
+        std::unique_ptr<RenderDelayBuffer> delay_buffer(
+            RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+        std::unique_ptr<RenderDelayController> delay_controller(
+            RenderDelayController::Create(
+                config, RenderDelayBuffer::DelayEstimatorOffset(config), rate));
+        for (size_t k = 0; k < 100; ++k) {
+          auto delay = delay_controller->GetDelay(
+              delay_buffer->GetDownsampledRenderBuffer(), block);
+          EXPECT_EQ(config.delay.min_echo_path_delay_blocks, delay->delay);
+        }
+      }
+    }
+  }
+}
+
+// Verifies the basic API call sequence.
+TEST(RenderDelayController, BasicApiCalls) {
+  std::vector<float> capture_block(kBlockSize, 0.f);
+  rtc::Optional<DelayEstimate> delay_blocks;
+  for (size_t num_matched_filters = 4; num_matched_filters == 10;
+       num_matched_filters++) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      EchoCanceller3Config config;
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = num_matched_filters;
+      for (auto rate : {8000, 16000, 32000, 48000}) {
+        std::vector<std::vector<float>> render_block(
+            NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+        std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+            RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+        std::unique_ptr<RenderDelayController> delay_controller(
+            RenderDelayController::Create(
+                EchoCanceller3Config(),
+                RenderDelayBuffer::DelayEstimatorOffset(config), rate));
+        for (size_t k = 0; k < 10; ++k) {
+          render_delay_buffer->Insert(render_block);
+          render_delay_buffer->PrepareCaptureProcessing();
+
+          delay_blocks = delay_controller->GetDelay(
+              render_delay_buffer->GetDownsampledRenderBuffer(), capture_block);
+        }
+        EXPECT_TRUE(delay_blocks);
+        EXPECT_EQ(config.delay.min_echo_path_delay_blocks, delay_blocks->delay);
+      }
+    }
+  }
+}
+
+// Verifies that the RenderDelayController is able to align the signals for
+// simple timeshifts between the signals.
+TEST(RenderDelayController, Alignment) {
+  Random random_generator(42U);
+  std::vector<float> capture_block(kBlockSize, 0.f);
+  for (size_t num_matched_filters = 4; num_matched_filters == 10;
+       num_matched_filters++) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      EchoCanceller3Config config;
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = num_matched_filters;
+
+      for (auto rate : {8000, 16000, 32000, 48000}) {
+        std::vector<std::vector<float>> render_block(
+            NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+
+        for (size_t delay_samples : {15, 50, 150, 200, 800, 4000}) {
+          rtc::Optional<DelayEstimate> delay_blocks;
+          SCOPED_TRACE(ProduceDebugText(rate, delay_samples));
+          std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+              RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+          std::unique_ptr<RenderDelayController> delay_controller(
+              RenderDelayController::Create(
+                  config, RenderDelayBuffer::DelayEstimatorOffset(config),
+                  rate));
+          DelayBuffer<float> signal_delay_buffer(delay_samples);
+          for (size_t k = 0; k < (400 + delay_samples / kBlockSize); ++k) {
+            RandomizeSampleVector(&random_generator, render_block[0]);
+            signal_delay_buffer.Delay(render_block[0], capture_block);
+            render_delay_buffer->Insert(render_block);
+            render_delay_buffer->PrepareCaptureProcessing();
+            delay_blocks = delay_controller->GetDelay(
+                render_delay_buffer->GetDownsampledRenderBuffer(),
+                capture_block);
+          }
+          ASSERT_TRUE(!!delay_blocks);
+
+          constexpr int kDelayHeadroomBlocks = 1;
+          size_t expected_delay_blocks =
+              std::max(0, static_cast<int>(delay_samples / kBlockSize) -
+                              kDelayHeadroomBlocks);
+
+          EXPECT_EQ(expected_delay_blocks, delay_blocks->delay);
+        }
+      }
+    }
+  }
+}
+
+// Verifies that the RenderDelayController is able to properly handle noncausal
+// delays.
+TEST(RenderDelayController, NonCausalAlignment) {
+  Random random_generator(42U);
+  for (size_t num_matched_filters = 4; num_matched_filters == 10;
+       num_matched_filters++) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      EchoCanceller3Config config;
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = num_matched_filters;
+      for (auto rate : {8000, 16000, 32000, 48000}) {
+        std::vector<std::vector<float>> render_block(
+            NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+        std::vector<std::vector<float>> capture_block(
+            NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+
+        for (int delay_samples : {-15, -50, -150, -200}) {
+          rtc::Optional<DelayEstimate> delay_blocks;
+          SCOPED_TRACE(ProduceDebugText(rate, -delay_samples));
+          std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+              RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+          std::unique_ptr<RenderDelayController> delay_controller(
+              RenderDelayController::Create(
+                  EchoCanceller3Config(),
+                  RenderDelayBuffer::DelayEstimatorOffset(config), rate));
+          DelayBuffer<float> signal_delay_buffer(-delay_samples);
+          for (int k = 0;
+               k < (400 - delay_samples / static_cast<int>(kBlockSize)); ++k) {
+            RandomizeSampleVector(&random_generator, capture_block[0]);
+            signal_delay_buffer.Delay(capture_block[0], render_block[0]);
+            render_delay_buffer->Insert(render_block);
+            render_delay_buffer->PrepareCaptureProcessing();
+            delay_blocks = delay_controller->GetDelay(
+                render_delay_buffer->GetDownsampledRenderBuffer(),
+                capture_block[0]);
+          }
+
+          ASSERT_FALSE(delay_blocks);
+        }
+      }
+    }
+  }
+}
+
+// Verifies that the RenderDelayController is able to align the signals for
+// simple timeshifts between the signals when there is jitter in the API calls.
+TEST(RenderDelayController, AlignmentWithJitter) {
+  Random random_generator(42U);
+  std::vector<float> capture_block(kBlockSize, 0.f);
+  for (size_t num_matched_filters = 4; num_matched_filters == 10;
+       num_matched_filters++) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      EchoCanceller3Config config;
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = num_matched_filters;
+      for (auto rate : {8000, 16000, 32000, 48000}) {
+        std::vector<std::vector<float>> render_block(
+            NumBandsForRate(rate), std::vector<float>(kBlockSize, 0.f));
+        for (size_t delay_samples : {15, 50, 300, 800}) {
+          rtc::Optional<DelayEstimate> delay_blocks;
+          SCOPED_TRACE(ProduceDebugText(rate, delay_samples));
+          std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+              RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+          std::unique_ptr<RenderDelayController> delay_controller(
+              RenderDelayController::Create(
+                  config, RenderDelayBuffer::DelayEstimatorOffset(config),
+                  rate));
+          DelayBuffer<float> signal_delay_buffer(delay_samples);
+          for (size_t j = 0; j < (1000 + delay_samples / kBlockSize) /
+                                         config.delay.api_call_jitter_blocks +
+                                     1;
+               ++j) {
+            std::vector<std::vector<float>> capture_block_buffer;
+            for (size_t k = 0; k < (config.delay.api_call_jitter_blocks - 1);
+                 ++k) {
+              RandomizeSampleVector(&random_generator, render_block[0]);
+              signal_delay_buffer.Delay(render_block[0], capture_block);
+              capture_block_buffer.push_back(capture_block);
+              render_delay_buffer->Insert(render_block);
+            }
+            for (size_t k = 0; k < (config.delay.api_call_jitter_blocks - 1);
+                 ++k) {
+              render_delay_buffer->PrepareCaptureProcessing();
+              delay_blocks = delay_controller->GetDelay(
+                  render_delay_buffer->GetDownsampledRenderBuffer(),
+                  capture_block_buffer[k]);
+            }
+          }
+
+          constexpr int kDelayHeadroomBlocks = 1;
+          size_t expected_delay_blocks =
+              std::max(0, static_cast<int>(delay_samples / kBlockSize) -
+                              kDelayHeadroomBlocks);
+          if (expected_delay_blocks < 2) {
+            expected_delay_blocks = 0;
+          }
+
+          ASSERT_TRUE(delay_blocks);
+          EXPECT_EQ(expected_delay_blocks, delay_blocks->delay);
+        }
+      }
+    }
+  }
+}
+
+// Verifies the initial value for the AlignmentHeadroomSamples.
+TEST(RenderDelayController, InitialHeadroom) {
+  std::vector<float> render_block(kBlockSize, 0.f);
+  std::vector<float> capture_block(kBlockSize, 0.f);
+  for (size_t num_matched_filters = 4; num_matched_filters == 10;
+       num_matched_filters++) {
+    for (auto down_sampling_factor : kDownSamplingFactors) {
+      EchoCanceller3Config config;
+      config.delay.down_sampling_factor = down_sampling_factor;
+      config.delay.num_filters = num_matched_filters;
+      for (auto rate : {8000, 16000, 32000, 48000}) {
+        SCOPED_TRACE(ProduceDebugText(rate));
+        std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+            RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+
+        std::unique_ptr<RenderDelayController> delay_controller(
+            RenderDelayController::Create(
+                config, RenderDelayBuffer::DelayEstimatorOffset(config), rate));
+      }
+    }
+  }
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for the capture signal block size.
+TEST(RenderDelayController, WrongCaptureSize) {
+  std::vector<float> block(kBlockSize - 1, 0.f);
+  EchoCanceller3Config config;
+  for (auto rate : {8000, 16000, 32000, 48000}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+        RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+    EXPECT_DEATH(
+        std::unique_ptr<RenderDelayController>(
+            RenderDelayController::Create(
+                EchoCanceller3Config(),
+                RenderDelayBuffer::DelayEstimatorOffset(config), rate))
+            ->GetDelay(render_delay_buffer->GetDownsampledRenderBuffer(),
+                       block),
+        "");
+  }
+}
+
+// Verifies the check for correct sample rate.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(RenderDelayController, DISABLED_WrongSampleRate) {
+  for (auto rate : {-1, 0, 8001, 16001}) {
+    SCOPED_TRACE(ProduceDebugText(rate));
+    EchoCanceller3Config config;
+    std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+        RenderDelayBuffer::Create(config, NumBandsForRate(rate)));
+    EXPECT_DEATH(
+        std::unique_ptr<RenderDelayController>(RenderDelayController::Create(
+            EchoCanceller3Config(),
+            RenderDelayBuffer::DelayEstimatorOffset(config), rate)),
+        "");
+  }
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_signal_analyzer.cc b/modules/audio_processing/aec3/render_signal_analyzer.cc
new file mode 100644
index 0000000..10b68b0
--- /dev/null
+++ b/modules/audio_processing/aec3/render_signal_analyzer.cc
@@ -0,0 +1,129 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+
+#include <math.h>
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace {
+constexpr size_t kCounterThreshold = 5;
+
+// Identifies local bands with narrow characteristics.
+void IdentifySmallNarrowBandRegions(
+    const RenderBuffer& render_buffer,
+    const rtc::Optional<size_t>& delay_partitions,
+    std::array<size_t, kFftLengthBy2 - 1>* narrow_band_counters) {
+  if (!delay_partitions) {
+    narrow_band_counters->fill(0);
+    return;
+  }
+
+  rtc::ArrayView<const float> X2 = render_buffer.Spectrum(*delay_partitions);
+  RTC_DCHECK_EQ(kFftLengthBy2Plus1, X2.size());
+
+  for (size_t k = 1; k < (X2.size() - 1); ++k) {
+    (*narrow_band_counters)[k - 1] = X2[k] > 3 * std::max(X2[k - 1], X2[k + 1])
+                                         ? (*narrow_band_counters)[k - 1] + 1
+                                         : 0;
+  }
+}
+
+// Identifies whether the signal has a single strong narrow-band component.
+void IdentifyStrongNarrowBandComponent(const RenderBuffer& render_buffer,
+                                       int strong_peak_freeze_duration,
+                                       rtc::Optional<int>* narrow_peak_band,
+                                       size_t* narrow_peak_counter) {
+  const auto X2_latest = render_buffer.Spectrum(0);
+
+  // Identify the spectral peak.
+  const int peak_bin = static_cast<int>(
+      std::max_element(X2_latest.begin(), X2_latest.end()) - X2_latest.begin());
+
+  // Compute the level around the peak.
+  float non_peak_power = 0.f;
+  for (int k = std::max(0, peak_bin - 14); k < peak_bin - 4; ++k) {
+    non_peak_power = std::max(X2_latest[k], non_peak_power);
+  }
+  for (int k = peak_bin + 5;
+       k < std::min(peak_bin + 15, static_cast<int>(kFftLengthBy2Plus1)); ++k) {
+    non_peak_power = std::max(X2_latest[k], non_peak_power);
+  }
+
+  // Assess the render signal strength.
+  const std::vector<std::vector<float>>& x_latest = render_buffer.Block(0);
+  auto result0 = std::minmax_element(x_latest[0].begin(), x_latest[0].end());
+  float max_abs = std::max(fabs(*result0.first), fabs(*result0.second));
+
+  if (x_latest.size() > 1) {
+    const auto result1 =
+        std::minmax_element(x_latest[1].begin(), x_latest[1].end());
+    max_abs =
+        std::max(max_abs, static_cast<float>(std::max(fabs(*result1.first),
+                                                      fabs(*result1.second))));
+  }
+
+  // Detect whether the spectal peak has as strong narrowband nature.
+  if (peak_bin > 0 && max_abs > 100 &&
+      X2_latest[peak_bin] > 100 * non_peak_power) {
+    *narrow_peak_band = peak_bin;
+    *narrow_peak_counter = 0;
+  } else {
+    if (*narrow_peak_band &&
+        ++(*narrow_peak_counter) >
+            static_cast<size_t>(strong_peak_freeze_duration)) {
+      *narrow_peak_band = rtc::nullopt;
+    }
+  }
+}
+
+}  // namespace
+
+RenderSignalAnalyzer::RenderSignalAnalyzer(const EchoCanceller3Config& config)
+    : strong_peak_freeze_duration_(config.filter.main.length_blocks) {
+  narrow_band_counters_.fill(0);
+}
+RenderSignalAnalyzer::~RenderSignalAnalyzer() = default;
+
+void RenderSignalAnalyzer::Update(
+    const RenderBuffer& render_buffer,
+    const rtc::Optional<size_t>& delay_partitions) {
+  // Identify bands of narrow nature.
+  IdentifySmallNarrowBandRegions(render_buffer, delay_partitions,
+                                 &narrow_band_counters_);
+
+  // Identify the presence of a strong narrow band.
+  IdentifyStrongNarrowBandComponent(render_buffer, strong_peak_freeze_duration_,
+                                    &narrow_peak_band_, &narrow_peak_counter_);
+}
+
+void RenderSignalAnalyzer::MaskRegionsAroundNarrowBands(
+    std::array<float, kFftLengthBy2Plus1>* v) const {
+  RTC_DCHECK(v);
+
+  // Set v to zero around narrow band signal regions.
+  if (narrow_band_counters_[0] > kCounterThreshold) {
+    (*v)[1] = (*v)[0] = 0.f;
+  }
+  for (size_t k = 2; k < kFftLengthBy2 - 1; ++k) {
+    if (narrow_band_counters_[k - 1] > kCounterThreshold) {
+      (*v)[k - 2] = (*v)[k - 1] = (*v)[k] = (*v)[k + 1] = (*v)[k + 2] = 0.f;
+    }
+  }
+  if (narrow_band_counters_[kFftLengthBy2 - 2] > kCounterThreshold) {
+    (*v)[kFftLengthBy2] = (*v)[kFftLengthBy2 - 1] = 0.f;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/render_signal_analyzer.h b/modules/audio_processing/aec3/render_signal_analyzer.h
new file mode 100644
index 0000000..8cd2172
--- /dev/null
+++ b/modules/audio_processing/aec3/render_signal_analyzer.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_
+
+#include <array>
+#include <memory>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "api/optional.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Provides functionality for analyzing the properties of the render signal.
+class RenderSignalAnalyzer {
+ public:
+  explicit RenderSignalAnalyzer(const EchoCanceller3Config& config);
+  ~RenderSignalAnalyzer();
+
+  // Updates the render signal analysis with the most recent render signal.
+  void Update(const RenderBuffer& render_buffer,
+              const rtc::Optional<size_t>& delay_partitions);
+
+  // Returns true if the render signal is poorly exciting.
+  bool PoorSignalExcitation() const {
+    RTC_DCHECK_LT(2, narrow_band_counters_.size());
+    return std::any_of(narrow_band_counters_.begin(),
+                       narrow_band_counters_.end(),
+                       [](size_t a) { return a > 10; });
+  }
+
+  // Zeros the array around regions with narrow bands signal characteristics.
+  void MaskRegionsAroundNarrowBands(
+      std::array<float, kFftLengthBy2Plus1>* v) const;
+
+  rtc::Optional<int> NarrowPeakBand() const { return narrow_peak_band_; }
+
+ private:
+  const int strong_peak_freeze_duration_;
+  std::array<size_t, kFftLengthBy2 - 1> narrow_band_counters_;
+  rtc::Optional<int> narrow_peak_band_;
+  size_t narrow_peak_counter_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RenderSignalAnalyzer);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_RENDER_SIGNAL_ANALYZER_H_
diff --git a/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc b/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc
new file mode 100644
index 0000000..5191874
--- /dev/null
+++ b/modules/audio_processing/aec3/render_signal_analyzer_unittest.cc
@@ -0,0 +1,135 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+
+#include <math.h>
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kPi = 3.141592f;
+
+void ProduceSinusoid(int sample_rate_hz,
+                     float sinusoidal_frequency_hz,
+                     size_t* sample_counter,
+                     rtc::ArrayView<float> x) {
+  // Produce a sinusoid of the specified frequency.
+  for (size_t k = *sample_counter, j = 0; k < (*sample_counter + kBlockSize);
+       ++k, ++j) {
+    x[j] =
+        32767.f * sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz);
+  }
+  *sample_counter = *sample_counter + kBlockSize;
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+// Verifies that the check for non-null output parameter works.
+TEST(RenderSignalAnalyzer, NullMaskOutput) {
+  RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+  EXPECT_DEATH(analyzer.MaskRegionsAroundNarrowBands(nullptr), "");
+}
+
+#endif
+
+// Verify that no narrow bands are detected in a Gaussian noise signal.
+TEST(RenderSignalAnalyzer, NoFalseDetectionOfNarrowBands) {
+  RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+  Random random_generator(42U);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::array<float, kBlockSize> x_old;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(EchoCanceller3Config(), 3));
+  std::array<float, kFftLengthBy2Plus1> mask;
+  x_old.fill(0.f);
+
+  for (size_t k = 0; k < 100; ++k) {
+    RandomizeSampleVector(&random_generator, x[0]);
+
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+
+    analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+                    rtc::Optional<size_t>(0));
+  }
+
+  mask.fill(1.f);
+  analyzer.MaskRegionsAroundNarrowBands(&mask);
+  EXPECT_TRUE(
+      std::all_of(mask.begin(), mask.end(), [](float a) { return a == 1.f; }));
+  EXPECT_FALSE(analyzer.PoorSignalExcitation());
+}
+
+// Verify that a sinusiod signal is detected as narrow bands.
+TEST(RenderSignalAnalyzer, NarrowBandDetection) {
+  RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+  Random random_generator(42U);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::array<float, kBlockSize> x_old;
+  Aec3Fft fft;
+  EchoCanceller3Config config;
+  config.delay.min_echo_path_delay_blocks = 0;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+
+  std::array<float, kFftLengthBy2Plus1> mask;
+  x_old.fill(0.f);
+  constexpr int kSinusFrequencyBin = 32;
+
+  auto generate_sinusoid_test = [&](bool known_delay) {
+    size_t sample_counter = 0;
+    for (size_t k = 0; k < 100; ++k) {
+      ProduceSinusoid(16000, 16000 / 2 * kSinusFrequencyBin / kFftLengthBy2,
+                      &sample_counter, x[0]);
+
+      render_delay_buffer->Insert(x);
+      if (k == 0) {
+        render_delay_buffer->Reset();
+      }
+      render_delay_buffer->PrepareCaptureProcessing();
+
+      analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+                      known_delay ? rtc::Optional<size_t>(0) : rtc::nullopt);
+    }
+  };
+
+  generate_sinusoid_test(true);
+  mask.fill(1.f);
+  analyzer.MaskRegionsAroundNarrowBands(&mask);
+  for (int k = 0; k < static_cast<int>(mask.size()); ++k) {
+    EXPECT_EQ(abs(k - kSinusFrequencyBin) <= 2 ? 0.f : 1.f, mask[k]);
+  }
+  EXPECT_TRUE(analyzer.PoorSignalExcitation());
+
+  // Verify that no bands are detected as narrow when the delay is unknown.
+  generate_sinusoid_test(false);
+  mask.fill(1.f);
+  analyzer.MaskRegionsAroundNarrowBands(&mask);
+  std::for_each(mask.begin(), mask.end(), [](float a) { EXPECT_EQ(1.f, a); });
+  EXPECT_FALSE(analyzer.PoorSignalExcitation());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/residual_echo_estimator.cc b/modules/audio_processing/aec3/residual_echo_estimator.cc
new file mode 100644
index 0000000..f0c971d
--- /dev/null
+++ b/modules/audio_processing/aec3/residual_echo_estimator.cc
@@ -0,0 +1,264 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/residual_echo_estimator.h"
+
+#include <numeric>
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Estimates the echo generating signal power as gated maximal power over a time
+// window.
+void EchoGeneratingPower(const RenderBuffer& render_buffer,
+                         size_t min_delay,
+                         size_t max_delay,
+                         std::array<float, kFftLengthBy2Plus1>* X2) {
+  X2->fill(0.f);
+  for (size_t k = min_delay; k <= max_delay; ++k) {
+    std::transform(X2->begin(), X2->end(), render_buffer.Spectrum(k).begin(),
+                   X2->begin(),
+                   [](float a, float b) { return std::max(a, b); });
+  }
+
+  // Apply soft noise gate of -78 dBFS.
+  static constexpr float kNoiseGatePower = 27509.42f;
+  std::for_each(X2->begin(), X2->end(), [](float& a) {
+    if (kNoiseGatePower > a) {
+      a = std::max(0.f, a - 0.3f * (kNoiseGatePower - a));
+    }
+  });
+}
+
+constexpr int kNoiseFloorCounterMax = 50;
+constexpr float kNoiseFloorMin = 10.f * 10.f * 128.f * 128.f;
+
+// Updates estimate for the power of the stationary noise component in the
+// render signal.
+void RenderNoisePower(
+    const RenderBuffer& render_buffer,
+    std::array<float, kFftLengthBy2Plus1>* X2_noise_floor,
+    std::array<int, kFftLengthBy2Plus1>* X2_noise_floor_counter) {
+  RTC_DCHECK(X2_noise_floor);
+  RTC_DCHECK(X2_noise_floor_counter);
+
+  const auto render_power = render_buffer.Spectrum(0);
+  RTC_DCHECK_EQ(X2_noise_floor->size(), render_power.size());
+  RTC_DCHECK_EQ(X2_noise_floor_counter->size(), render_power.size());
+
+  // Estimate the stationary noise power in a minimum statistics manner.
+  for (size_t k = 0; k < render_power.size(); ++k) {
+    // Decrease rapidly.
+    if (render_power[k] < (*X2_noise_floor)[k]) {
+      (*X2_noise_floor)[k] = render_power[k];
+      (*X2_noise_floor_counter)[k] = 0;
+    } else {
+      // Increase in a delayed, leaky manner.
+      if ((*X2_noise_floor_counter)[k] >= kNoiseFloorCounterMax) {
+        (*X2_noise_floor)[k] =
+            std::max((*X2_noise_floor)[k] * 1.1f, kNoiseFloorMin);
+      } else {
+        ++(*X2_noise_floor_counter)[k];
+      }
+    }
+  }
+}
+
+}  // namespace
+
+ResidualEchoEstimator::ResidualEchoEstimator(const EchoCanceller3Config& config)
+    : config_(config), S2_old_(config_.filter.main.length_blocks) {
+  Reset();
+}
+
+ResidualEchoEstimator::~ResidualEchoEstimator() = default;
+
+void ResidualEchoEstimator::Estimate(
+    const AecState& aec_state,
+    const RenderBuffer& render_buffer,
+    const std::array<float, kFftLengthBy2Plus1>& S2_linear,
+    const std::array<float, kFftLengthBy2Plus1>& Y2,
+    std::array<float, kFftLengthBy2Plus1>* R2) {
+  RTC_DCHECK(R2);
+
+  // Estimate the power of the stationary noise in the render signal.
+  RenderNoisePower(render_buffer, &X2_noise_floor_, &X2_noise_floor_counter_);
+
+  // Estimate the residual echo power.
+  if (aec_state.UsableLinearEstimate()) {
+    LinearEstimate(S2_linear, aec_state.Erle(), aec_state.FilterDelay(), R2);
+    AddEchoReverb(S2_linear, aec_state.SaturatedEcho(), aec_state.FilterDelay(),
+                  aec_state.ReverbDecay(), R2);
+
+    // If the echo is saturated, estimate the echo power as the maximum echo
+    // power with a leakage factor.
+    if (aec_state.SaturatedEcho()) {
+      R2->fill((*std::max_element(R2->begin(), R2->end())) * 100.f);
+    }
+  } else {
+    // Estimate the echo generating signal power.
+    std::array<float, kFftLengthBy2Plus1> X2;
+
+    // Computes the spectral power over the blocks surrounding the delay.
+    EchoGeneratingPower(render_buffer, std::max(0, aec_state.FilterDelay() - 1),
+                        aec_state.FilterDelay() + 10, &X2);
+
+    // Subtract the stationary noise power to avoid stationary noise causing
+    // excessive echo suppression.
+    std::transform(
+        X2.begin(), X2.end(), X2_noise_floor_.begin(), X2.begin(),
+        [](float a, float b) { return std::max(0.f, a - 10.f * b); });
+
+    NonLinearEstimate(aec_state.FilterHasHadTimeToConverge(),
+                      aec_state.SaturatedEcho(),
+                      config_.ep_strength.bounded_erl,
+                      aec_state.TransparentMode(), X2, Y2, R2);
+
+    if (aec_state.SaturatedEcho()) {
+      // TODO(peah): Modify to make sense theoretically.
+      AddEchoReverb(*R2, aec_state.SaturatedEcho(),
+                    config_.filter.main.length_blocks, aec_state.ReverbDecay(),
+                    R2);
+    }
+  }
+
+  // If the echo is deemed inaudible, set the residual echo to zero.
+  if (aec_state.InaudibleEcho()) {
+    R2->fill(0.f);
+    R2_old_.fill(0.f);
+    R2_hold_counter_.fill(0.f);
+  }
+
+  std::copy(R2->begin(), R2->end(), R2_old_.begin());
+}
+
+void ResidualEchoEstimator::Reset() {
+  X2_noise_floor_counter_.fill(kNoiseFloorCounterMax);
+  X2_noise_floor_.fill(kNoiseFloorMin);
+  R2_reverb_.fill(0.f);
+  R2_old_.fill(0.f);
+  R2_hold_counter_.fill(0.f);
+  for (auto& S2_k : S2_old_) {
+    S2_k.fill(0.f);
+  }
+}
+
+void ResidualEchoEstimator::LinearEstimate(
+    const std::array<float, kFftLengthBy2Plus1>& S2_linear,
+    const std::array<float, kFftLengthBy2Plus1>& erle,
+    size_t delay,
+    std::array<float, kFftLengthBy2Plus1>* R2) {
+  std::fill(R2_hold_counter_.begin(), R2_hold_counter_.end(), 10.f);
+  std::transform(erle.begin(), erle.end(), S2_linear.begin(), R2->begin(),
+                 [](float a, float b) {
+                   RTC_DCHECK_LT(0.f, a);
+                   return b / a;
+                 });
+}
+
+void ResidualEchoEstimator::NonLinearEstimate(
+    bool sufficient_filter_updates,
+    bool saturated_echo,
+    bool bounded_erl,
+    bool transparent_mode,
+    const std::array<float, kFftLengthBy2Plus1>& X2,
+    const std::array<float, kFftLengthBy2Plus1>& Y2,
+    std::array<float, kFftLengthBy2Plus1>* R2) {
+  float echo_path_gain_lf;
+  float echo_path_gain_mf;
+  float echo_path_gain_hf;
+
+  // Set echo path gains.
+  if (saturated_echo) {
+    // If the echo could be saturated, use a very conservative gain.
+    echo_path_gain_lf = echo_path_gain_mf = echo_path_gain_hf = 10000.f;
+  } else if (sufficient_filter_updates && !bounded_erl) {
+    // If the filter should have been able to converge, and no assumption is
+    // possible on the ERL, use a low gain.
+    echo_path_gain_lf = echo_path_gain_mf = echo_path_gain_hf = 0.01f;
+  } else if ((sufficient_filter_updates && bounded_erl) || transparent_mode) {
+    // If the filter should have been able to converge, and and it is known that
+    // the ERL is bounded, use a very low gain.
+    echo_path_gain_lf = echo_path_gain_mf = echo_path_gain_hf = 0.001f;
+  } else {
+    // In the initial state, use conservative gains.
+    echo_path_gain_lf = config_.ep_strength.lf;
+    echo_path_gain_mf = config_.ep_strength.mf;
+    echo_path_gain_hf = config_.ep_strength.hf;
+  }
+
+  // Compute preliminary residual echo.
+  std::transform(
+      X2.begin(), X2.begin() + 12, R2->begin(),
+      [echo_path_gain_lf](float a) { return a * echo_path_gain_lf; });
+  std::transform(
+      X2.begin() + 12, X2.begin() + 25, R2->begin() + 12,
+      [echo_path_gain_mf](float a) { return a * echo_path_gain_mf; });
+  std::transform(
+      X2.begin() + 25, X2.end(), R2->begin() + 25,
+      [echo_path_gain_hf](float a) { return a * echo_path_gain_hf; });
+
+  for (size_t k = 0; k < R2->size(); ++k) {
+    // Update hold counter.
+    R2_hold_counter_[k] = R2_old_[k] < (*R2)[k] ? 0 : R2_hold_counter_[k] + 1;
+
+    // Compute the residual echo by holding a maximum echo powers and an echo
+    // fading corresponding to a room with an RT60 value of about 50 ms.
+    (*R2)[k] = R2_hold_counter_[k] < 2
+                   ? std::max((*R2)[k], R2_old_[k])
+                   : std::min((*R2)[k] + R2_old_[k] * 0.1f, Y2[k]);
+  }
+}
+
+void ResidualEchoEstimator::AddEchoReverb(
+    const std::array<float, kFftLengthBy2Plus1>& S2,
+    bool saturated_echo,
+    size_t delay,
+    float reverb_decay_factor,
+    std::array<float, kFftLengthBy2Plus1>* R2) {
+  // Compute the decay factor for how much the echo has decayed before leaving
+  // the region covered by the linear model.
+  auto integer_power = [](float base, int exp) {
+    float result = 1.f;
+    for (int k = 0; k < exp; ++k) {
+      result *= base;
+    }
+    return result;
+  };
+  RTC_DCHECK_LE(delay, S2_old_.size());
+  const float reverb_decay_for_delay =
+      integer_power(reverb_decay_factor, S2_old_.size() - delay);
+
+  // Update the estimate of the reverberant residual echo power.
+  S2_old_index_ = S2_old_index_ > 0 ? S2_old_index_ - 1 : S2_old_.size() - 1;
+  const auto& S2_end = S2_old_[S2_old_index_];
+  std::transform(
+      S2_end.begin(), S2_end.end(), R2_reverb_.begin(), R2_reverb_.begin(),
+      [reverb_decay_for_delay, reverb_decay_factor](float a, float b) {
+        return (b + a * reverb_decay_for_delay) * reverb_decay_factor;
+      });
+
+  // Update the buffer of old echo powers.
+  if (saturated_echo) {
+    S2_old_[S2_old_index_].fill((*std::max_element(S2.begin(), S2.end())) *
+                                100.f);
+  } else {
+    std::copy(S2.begin(), S2.end(), S2_old_[S2_old_index_].begin());
+  }
+
+  // Add the power of the echo reverb to the residual echo power.
+  std::transform(R2->begin(), R2->end(), R2_reverb_.begin(), R2->begin(),
+                 std::plus<float>());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/residual_echo_estimator.h b/modules/audio_processing/aec3/residual_echo_estimator.h
new file mode 100644
index 0000000..f7e2d1d
--- /dev/null
+++ b/modules/audio_processing/aec3/residual_echo_estimator.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_
+
+#include <algorithm>
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ResidualEchoEstimator {
+ public:
+  explicit ResidualEchoEstimator(const EchoCanceller3Config& config);
+  ~ResidualEchoEstimator();
+
+  void Estimate(const AecState& aec_state,
+                const RenderBuffer& render_buffer,
+                const std::array<float, kFftLengthBy2Plus1>& S2_linear,
+                const std::array<float, kFftLengthBy2Plus1>& Y2,
+                std::array<float, kFftLengthBy2Plus1>* R2);
+
+ private:
+  // Resets the state.
+  void Reset();
+
+  // Estimates the residual echo power based on the echo return loss enhancement
+  // (ERLE) and the linear power estimate.
+  void LinearEstimate(const std::array<float, kFftLengthBy2Plus1>& S2_linear,
+                      const std::array<float, kFftLengthBy2Plus1>& erle,
+                      size_t delay,
+                      std::array<float, kFftLengthBy2Plus1>* R2);
+
+  // Estimates the residual echo power based on the estimate of the echo path
+  // gain.
+  void NonLinearEstimate(bool sufficient_filter_updates,
+                         bool saturated_echo,
+                         bool bounded_erl,
+                         bool transparent_mode,
+                         const std::array<float, kFftLengthBy2Plus1>& X2,
+                         const std::array<float, kFftLengthBy2Plus1>& Y2,
+                         std::array<float, kFftLengthBy2Plus1>* R2);
+
+  // Adds the estimated unmodelled echo power to the residual echo power
+  // estimate.
+  void AddEchoReverb(const std::array<float, kFftLengthBy2Plus1>& S2,
+                     bool saturated_echo,
+                     size_t delay,
+                     float reverb_decay_factor,
+                     std::array<float, kFftLengthBy2Plus1>* R2);
+  const EchoCanceller3Config config_;
+  std::array<float, kFftLengthBy2Plus1> R2_old_;
+  std::array<int, kFftLengthBy2Plus1> R2_hold_counter_;
+  std::array<float, kFftLengthBy2Plus1> R2_reverb_;
+  int S2_old_index_ = 0;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> S2_old_;
+  std::array<float, kFftLengthBy2Plus1> X2_noise_floor_;
+  std::array<int, kFftLengthBy2Plus1> X2_noise_floor_counter_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ResidualEchoEstimator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_RESIDUAL_ECHO_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc b/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc
new file mode 100644
index 0000000..d46d518
--- /dev/null
+++ b/modules/audio_processing/aec3/residual_echo_estimator_unittest.cc
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/residual_echo_estimator.h"
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output residual echo power works.
+TEST(ResidualEchoEstimator, NullResidualEchoPowerOutput) {
+  EchoCanceller3Config config;
+  AecState aec_state(config);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2;
+  std::array<float, kFftLengthBy2Plus1> S2_linear;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  EXPECT_DEATH(ResidualEchoEstimator(EchoCanceller3Config{})
+                   .Estimate(aec_state, *render_delay_buffer->GetRenderBuffer(),
+                             S2_linear, Y2, nullptr),
+               "");
+}
+
+#endif
+
+// TODO(peah): This test is broken in the sense that it not at all tests what it
+// seems to test. Enable the test once that is adressed.
+TEST(ResidualEchoEstimator, DISABLED_BasicTest) {
+  EchoCanceller3Config config;
+  config.ep_strength.default_len = 0.f;
+  config.delay.min_echo_path_delay_blocks = 0;
+  ResidualEchoEstimator estimator(config);
+  AecState aec_state(config);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+
+  std::array<float, kFftLengthBy2Plus1> E2_main;
+  std::array<float, kFftLengthBy2Plus1> E2_shadow;
+  std::array<float, kFftLengthBy2Plus1> S2_linear;
+  std::array<float, kFftLengthBy2Plus1> S2_fallback;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kFftLengthBy2Plus1> R2;
+  EchoPathVariability echo_path_variability(
+      false, EchoPathVariability::DelayAdjustment::kNone, false);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2(10);
+  Random random_generator(42U);
+  std::array<float, kBlockSize> s;
+  Aec3Fft fft;
+  rtc::Optional<DelayEstimate> delay_estimate;
+
+  for (auto& H2_k : H2) {
+    H2_k.fill(0.01f);
+  }
+  H2[2].fill(10.f);
+  H2[2][0] = 0.1f;
+
+  std::vector<float> h(GetTimeDomainLength(config.filter.main.length_blocks),
+                       0.f);
+
+  s.fill(100.f);
+
+  constexpr float kLevel = 10.f;
+  E2_shadow.fill(kLevel);
+  E2_main.fill(kLevel);
+  S2_linear.fill(kLevel);
+  S2_fallback.fill(kLevel);
+  Y2.fill(kLevel);
+
+  for (int k = 0; k < 1993; ++k) {
+    RandomizeSampleVector(&random_generator, x[0]);
+    std::for_each(x[0].begin(), x[0].end(), [](float& a) { a /= 30.f; });
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+
+    aec_state.HandleEchoPathChange(echo_path_variability);
+    aec_state.Update(delay_estimate, H2, h, true,
+                     *render_delay_buffer->GetRenderBuffer(), E2_main, Y2, s,
+                     false);
+
+    estimator.Estimate(aec_state, *render_delay_buffer->GetRenderBuffer(),
+                       S2_linear, Y2, &R2);
+  }
+  std::for_each(R2.begin(), R2.end(),
+                [&](float a) { EXPECT_NEAR(kLevel, a, 0.1f); });
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/shadow_filter_update_gain.cc b/modules/audio_processing/aec3/shadow_filter_update_gain.cc
new file mode 100644
index 0000000..e27437a
--- /dev/null
+++ b/modules/audio_processing/aec3/shadow_filter_update_gain.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/shadow_filter_update_gain.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+ShadowFilterUpdateGain::ShadowFilterUpdateGain(
+    const EchoCanceller3Config::Filter::ShadowConfiguration& config,
+    size_t config_change_duration_blocks)
+    : config_change_duration_blocks_(
+          static_cast<int>(config_change_duration_blocks)) {
+  SetConfig(config, true);
+  RTC_DCHECK_LT(0, config_change_duration_blocks_);
+  one_by_config_change_duration_blocks_ = 1.f / config_change_duration_blocks_;
+}
+
+void ShadowFilterUpdateGain::HandleEchoPathChange() {
+  // TODO(peah): Check whether this counter should instead be initialized to a
+  // large value.
+  poor_signal_excitation_counter_ = 0;
+  call_counter_ = 0;
+}
+
+void ShadowFilterUpdateGain::Compute(
+    const std::array<float, kFftLengthBy2Plus1>& render_power,
+    const RenderSignalAnalyzer& render_signal_analyzer,
+    const FftData& E_shadow,
+    size_t size_partitions,
+    bool saturated_capture_signal,
+    FftData* G) {
+  RTC_DCHECK(G);
+  ++call_counter_;
+
+  UpdateCurrentConfig();
+
+  if (render_signal_analyzer.PoorSignalExcitation()) {
+    poor_signal_excitation_counter_ = 0;
+  }
+
+  // Do not update the filter if the render is not sufficiently excited.
+  if (++poor_signal_excitation_counter_ < size_partitions ||
+      saturated_capture_signal || call_counter_ <= size_partitions) {
+    G->re.fill(0.f);
+    G->im.fill(0.f);
+    return;
+  }
+
+  // Compute mu.
+  std::array<float, kFftLengthBy2Plus1> mu;
+  auto X2 = render_power;
+  std::transform(X2.begin(), X2.end(), mu.begin(), [&](float a) {
+    return a > current_config_.noise_gate ? current_config_.rate / a : 0.f;
+  });
+
+  // Avoid updating the filter close to narrow bands in the render signals.
+  render_signal_analyzer.MaskRegionsAroundNarrowBands(&mu);
+
+  // G = mu * E * X2.
+  std::transform(mu.begin(), mu.end(), E_shadow.re.begin(), G->re.begin(),
+                 std::multiplies<float>());
+  std::transform(mu.begin(), mu.end(), E_shadow.im.begin(), G->im.begin(),
+                 std::multiplies<float>());
+}
+
+void ShadowFilterUpdateGain::UpdateCurrentConfig() {
+  RTC_DCHECK_GE(config_change_duration_blocks_, config_change_counter_);
+  if (config_change_counter_ > 0) {
+    if (--config_change_counter_ > 0) {
+      auto average = [](float from, float to, float from_weight) {
+        return from * from_weight + to * (1.f - from_weight);
+      };
+
+      float change_factor =
+          config_change_counter_ * one_by_config_change_duration_blocks_;
+
+      current_config_.rate =
+          average(old_target_config_.rate, target_config_.rate, change_factor);
+      current_config_.noise_gate =
+          average(old_target_config_.noise_gate, target_config_.noise_gate,
+                  change_factor);
+    } else {
+      current_config_ = old_target_config_ = target_config_;
+    }
+  }
+  RTC_DCHECK_LE(0, config_change_counter_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/shadow_filter_update_gain.h b/modules/audio_processing/aec3/shadow_filter_update_gain.h
new file mode 100644
index 0000000..a92bc3b
--- /dev/null
+++ b/modules/audio_processing/aec3/shadow_filter_update_gain.h
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SHADOW_FILTER_UPDATE_GAIN_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SHADOW_FILTER_UPDATE_GAIN_H_
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Provides functionality for computing the fixed gain for the shadow filter.
+class ShadowFilterUpdateGain {
+ public:
+  explicit ShadowFilterUpdateGain(
+      const EchoCanceller3Config::Filter::ShadowConfiguration& config,
+      size_t config_change_duration_blocks);
+
+  // Takes action in the case of a known echo path change.
+  void HandleEchoPathChange();
+
+  // Computes the gain.
+  void Compute(const std::array<float, kFftLengthBy2Plus1>& render_power,
+               const RenderSignalAnalyzer& render_signal_analyzer,
+               const FftData& E_shadow,
+               size_t size_partitions,
+               bool saturated_capture_signal,
+               FftData* G);
+
+  // Sets a new config.
+  void SetConfig(
+      const EchoCanceller3Config::Filter::ShadowConfiguration& config,
+      bool immediate_effect) {
+    if (immediate_effect) {
+      old_target_config_ = current_config_ = target_config_ = config;
+      config_change_counter_ = 0;
+    } else {
+      old_target_config_ = current_config_;
+      target_config_ = config;
+      config_change_counter_ = config_change_duration_blocks_;
+    }
+  }
+
+ private:
+  EchoCanceller3Config::Filter::ShadowConfiguration current_config_;
+  EchoCanceller3Config::Filter::ShadowConfiguration target_config_;
+  EchoCanceller3Config::Filter::ShadowConfiguration old_target_config_;
+  const int config_change_duration_blocks_;
+  float one_by_config_change_duration_blocks_;
+  // TODO(peah): Check whether this counter should instead be initialized to a
+  // large value.
+  size_t poor_signal_excitation_counter_ = 0;
+  size_t call_counter_ = 0;
+  int config_change_counter_ = 0;
+
+  void UpdateCurrentConfig();
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SHADOW_FILTER_UPDATE_GAIN_H_
diff --git a/modules/audio_processing/aec3/shadow_filter_update_gain_unittest.cc b/modules/audio_processing/aec3/shadow_filter_update_gain_unittest.cc
new file mode 100644
index 0000000..d77da33
--- /dev/null
+++ b/modules/audio_processing/aec3/shadow_filter_update_gain_unittest.cc
@@ -0,0 +1,239 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/shadow_filter_update_gain.h"
+
+#include <algorithm>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Method for performing the simulations needed to test the main filter update
+// gain functionality.
+void RunFilterUpdateTest(int num_blocks_to_process,
+                         size_t delay_samples,
+                         int filter_length_blocks,
+                         const std::vector<int>& blocks_with_saturation,
+                         std::array<float, kBlockSize>* e_last_block,
+                         std::array<float, kBlockSize>* y_last_block,
+                         FftData* G_last_block) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  config.filter.main.length_blocks = filter_length_blocks;
+  AdaptiveFirFilter main_filter(config.filter.main.length_blocks,
+                                config.filter.main.length_blocks,
+                                config.filter.config_change_duration_blocks,
+                                DetectOptimization(), &data_dumper);
+  AdaptiveFirFilter shadow_filter(config.filter.shadow.length_blocks,
+                                  config.filter.shadow.length_blocks,
+                                  config.filter.config_change_duration_blocks,
+                                  DetectOptimization(), &data_dumper);
+  Aec3Fft fft;
+
+  config.delay.min_echo_path_delay_blocks = 0;
+  config.delay.default_delay = 1;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+
+  std::array<float, kBlockSize> x_old;
+  x_old.fill(0.f);
+  ShadowFilterUpdateGain shadow_gain(
+      config.filter.shadow, config.filter.config_change_duration_blocks);
+  Random random_generator(42U);
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<float> y(kBlockSize, 0.f);
+  AecState aec_state(config);
+  RenderSignalAnalyzer render_signal_analyzer(config);
+  std::array<float, kFftLength> s;
+  FftData S;
+  FftData G;
+  FftData E_shadow;
+  std::array<float, kBlockSize> e_shadow;
+
+  constexpr float kScale = 1.0f / kFftLengthBy2;
+
+  DelayBuffer<float> delay_buffer(delay_samples);
+  for (int k = 0; k < num_blocks_to_process; ++k) {
+    // Handle saturation.
+    bool saturation =
+        std::find(blocks_with_saturation.begin(), blocks_with_saturation.end(),
+                  k) != blocks_with_saturation.end();
+
+    // Create the render signal.
+    RandomizeSampleVector(&random_generator, x[0]);
+    delay_buffer.Delay(x[0], y);
+
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+
+    render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+                                  delay_samples / kBlockSize);
+
+    shadow_filter.Filter(*render_delay_buffer->GetRenderBuffer(), &S);
+    fft.Ifft(S, &s);
+    std::transform(y.begin(), y.end(), s.begin() + kFftLengthBy2,
+                   e_shadow.begin(),
+                   [&](float a, float b) { return a - b * kScale; });
+    std::for_each(e_shadow.begin(), e_shadow.end(),
+                  [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+    fft.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kRectangular, &E_shadow);
+
+    std::array<float, kFftLengthBy2Plus1> render_power;
+    render_delay_buffer->GetRenderBuffer()->SpectralSum(
+        shadow_filter.SizePartitions(), &render_power);
+    shadow_gain.Compute(render_power, render_signal_analyzer, E_shadow,
+                        shadow_filter.SizePartitions(), saturation, &G);
+    shadow_filter.Adapt(*render_delay_buffer->GetRenderBuffer(), G);
+  }
+
+  std::copy(e_shadow.begin(), e_shadow.end(), e_last_block->begin());
+  std::copy(y.begin(), y.end(), y_last_block->begin());
+  std::copy(G.re.begin(), G.re.end(), G_last_block->re.begin());
+  std::copy(G.im.begin(), G.im.end(), G_last_block->im.begin());
+}
+
+std::string ProduceDebugText(int filter_length_blocks) {
+  std::ostringstream ss;
+  ss << "Length: " << filter_length_blocks;
+  return ss.str();
+}
+
+std::string ProduceDebugText(size_t delay, int filter_length_blocks) {
+  std::ostringstream ss;
+  ss << "Delay: " << delay << ", ";
+  ss << ProduceDebugText(filter_length_blocks);
+  return ss.str();
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output gain parameter works.
+TEST(ShadowFilterUpdateGain, NullDataOutputGain) {
+  ApmDataDumper data_dumper(42);
+  FftBuffer fft_buffer(1);
+  RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+  FftData E;
+  const EchoCanceller3Config::Filter::ShadowConfiguration& config = {
+      12, 0.5f, 220075344.f};
+  ShadowFilterUpdateGain gain(config, 250);
+  std::array<float, kFftLengthBy2Plus1> render_power;
+  render_power.fill(0.f);
+  EXPECT_DEATH(gain.Compute(render_power, analyzer, E, 1, false, nullptr), "");
+}
+
+#endif
+
+// Verifies that the gain formed causes the filter using it to converge.
+TEST(ShadowFilterUpdateGain, GainCausesFilterToConverge) {
+  std::vector<int> blocks_with_echo_path_changes;
+  std::vector<int> blocks_with_saturation;
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+      std::array<float, kBlockSize> e;
+      std::array<float, kBlockSize> y;
+      FftData G;
+
+      RunFilterUpdateTest(1000, delay_samples, filter_length_blocks,
+                          blocks_with_saturation, &e, &y, &G);
+
+      // Verify that the main filter is able to perform well.
+      // Use different criteria to take overmodelling into account.
+      if (filter_length_blocks == 12) {
+        EXPECT_LT(1000 * std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+                  std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+      } else {
+        EXPECT_LT(std::inner_product(e.begin(), e.end(), e.begin(), 0.f),
+                  std::inner_product(y.begin(), y.end(), y.begin(), 0.f));
+      }
+    }
+  }
+}
+
+// Verifies that the magnitude of the gain on average decreases for a
+// persistently exciting signal.
+TEST(ShadowFilterUpdateGain, DecreasingGain) {
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+    std::vector<int> blocks_with_echo_path_changes;
+    std::vector<int> blocks_with_saturation;
+
+    std::array<float, kBlockSize> e;
+    std::array<float, kBlockSize> y;
+    FftData G_a;
+    FftData G_b;
+    FftData G_c;
+    std::array<float, kFftLengthBy2Plus1> G_a_power;
+    std::array<float, kFftLengthBy2Plus1> G_b_power;
+    std::array<float, kFftLengthBy2Plus1> G_c_power;
+
+    RunFilterUpdateTest(100, 65, filter_length_blocks, blocks_with_saturation,
+                        &e, &y, &G_a);
+    RunFilterUpdateTest(200, 65, filter_length_blocks, blocks_with_saturation,
+                        &e, &y, &G_b);
+    RunFilterUpdateTest(300, 65, filter_length_blocks, blocks_with_saturation,
+                        &e, &y, &G_c);
+
+    G_a.Spectrum(Aec3Optimization::kNone, G_a_power);
+    G_b.Spectrum(Aec3Optimization::kNone, G_b_power);
+    G_c.Spectrum(Aec3Optimization::kNone, G_c_power);
+
+    EXPECT_GT(std::accumulate(G_a_power.begin(), G_a_power.end(), 0.),
+              std::accumulate(G_b_power.begin(), G_b_power.end(), 0.));
+
+    EXPECT_GT(std::accumulate(G_b_power.begin(), G_b_power.end(), 0.),
+              std::accumulate(G_c_power.begin(), G_c_power.end(), 0.));
+  }
+}
+
+// Verifies that the gain is zero when there is saturation.
+TEST(ShadowFilterUpdateGain, SaturationBehavior) {
+  std::vector<int> blocks_with_echo_path_changes;
+  std::vector<int> blocks_with_saturation;
+  for (int k = 99; k < 200; ++k) {
+    blocks_with_saturation.push_back(k);
+  }
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    SCOPED_TRACE(ProduceDebugText(filter_length_blocks));
+
+    std::array<float, kBlockSize> e;
+    std::array<float, kBlockSize> y;
+    FftData G_a;
+    FftData G_a_ref;
+    G_a_ref.re.fill(0.f);
+    G_a_ref.im.fill(0.f);
+
+    RunFilterUpdateTest(100, 65, filter_length_blocks, blocks_with_saturation,
+                        &e, &y, &G_a);
+
+    EXPECT_EQ(G_a_ref.re, G_a.re);
+    EXPECT_EQ(G_a_ref.im, G_a.im);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/skew_estimator.cc b/modules/audio_processing/aec3/skew_estimator.cc
new file mode 100644
index 0000000..608a707
--- /dev/null
+++ b/modules/audio_processing/aec3/skew_estimator.cc
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/skew_estimator.h"
+
+#include <algorithm>
+#include <numeric>
+
+namespace webrtc {
+
+SkewEstimator::SkewEstimator(size_t skew_history_size_log2)
+    : skew_history_size_log2_(static_cast<int>(skew_history_size_log2)),
+      skew_history_(1ULL << skew_history_size_log2_, 0) {}
+
+SkewEstimator::~SkewEstimator() = default;
+
+void SkewEstimator::Reset() {
+  skew_ = 0;
+  skew_sum_ = 0;
+  next_index_ = 0;
+  sufficient_skew_stored_ = false;
+  std::fill(skew_history_.begin(), skew_history_.end(), 0);
+}
+
+rtc::Optional<int> SkewEstimator::GetSkewFromCapture() {
+  --skew_;
+
+  skew_sum_ += skew_ - skew_history_[next_index_];
+  skew_history_[next_index_] = skew_;
+  if (++next_index_ == skew_history_.size()) {
+    next_index_ = 0;
+    sufficient_skew_stored_ = true;
+  }
+
+  return sufficient_skew_stored_
+             ? rtc::Optional<int>(skew_sum_ >> skew_history_size_log2_)
+             : rtc::nullopt;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/skew_estimator.h b/modules/audio_processing/aec3/skew_estimator.h
new file mode 100644
index 0000000..2afcd76
--- /dev/null
+++ b/modules/audio_processing/aec3/skew_estimator.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SKEW_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SKEW_ESTIMATOR_H_
+
+#include <vector>
+
+#include "api/optional.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Estimator of API call skew between render and capture.
+class SkewEstimator {
+ public:
+  explicit SkewEstimator(size_t skew_history_size_log2);
+  ~SkewEstimator();
+
+  // Resets the estimation.
+  void Reset();
+
+  // Updates the skew data for a render call.
+  void LogRenderCall() { ++skew_; }
+
+  // Updates and computes the skew at a capture call. Returns an optional which
+  // is non-null if a reliable skew has been found.
+  rtc::Optional<int> GetSkewFromCapture();
+
+ private:
+  const int skew_history_size_log2_;
+  std::vector<float> skew_history_;
+  int skew_ = 0;
+  int skew_sum_ = 0;
+  size_t next_index_ = 0;
+  bool sufficient_skew_stored_ = false;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SkewEstimator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SKEW_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/skew_estimator_unittest.cc b/modules/audio_processing/aec3/skew_estimator_unittest.cc
new file mode 100644
index 0000000..a1a679f
--- /dev/null
+++ b/modules/audio_processing/aec3/skew_estimator_unittest.cc
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/skew_estimator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace aec3 {
+
+// Tests that the skew ends up as it should after a skew change.
+TEST(SkewEstimator, SkewChangeAdaptation) {
+  constexpr int kNumSkewsLog2 = 7;
+  constexpr int kNumSkews = 1 << kNumSkewsLog2;
+
+  SkewEstimator estimator(kNumSkewsLog2);
+
+  for (int k = 0; k < kNumSkews - 1; ++k) {
+    estimator.LogRenderCall();
+    auto skew = estimator.GetSkewFromCapture();
+    EXPECT_FALSE(skew);
+  }
+
+  estimator.LogRenderCall();
+
+  rtc::Optional<int> skew;
+  for (int k = 0; k < kNumSkews; ++k) {
+    estimator.LogRenderCall();
+    skew = estimator.GetSkewFromCapture();
+    EXPECT_TRUE(skew);
+  }
+  EXPECT_EQ(1, *skew);
+
+  estimator.LogRenderCall();
+
+  for (int k = 0; k < kNumSkews; ++k) {
+    estimator.LogRenderCall();
+    skew = estimator.GetSkewFromCapture();
+    EXPECT_TRUE(skew);
+  }
+  EXPECT_EQ(2, *skew);
+}
+
+// Tests that the skew ends up as it should for a surplus of render calls.
+TEST(SkewEstimator, SkewForSurplusRender) {
+  constexpr int kNumSkewsLog2 = 7;
+  constexpr int kNumSkews = 1 << kNumSkewsLog2;
+
+  SkewEstimator estimator(kNumSkewsLog2);
+
+  for (int k = 0; k < kNumSkews - 1; ++k) {
+    estimator.LogRenderCall();
+    auto skew = estimator.GetSkewFromCapture();
+    EXPECT_FALSE(skew);
+  }
+
+  estimator.LogRenderCall();
+
+  rtc::Optional<int> skew;
+  for (int k = 0; k < kNumSkews; ++k) {
+    estimator.LogRenderCall();
+    skew = estimator.GetSkewFromCapture();
+    EXPECT_TRUE(skew);
+  }
+  EXPECT_EQ(1, *skew);
+}
+
+// Tests that the skew ends up as it should for a surplus of capture calls.
+TEST(SkewEstimator, SkewForSurplusCapture) {
+  constexpr int kNumSkewsLog2 = 7;
+  constexpr int kNumSkews = 1 << kNumSkewsLog2;
+
+  SkewEstimator estimator(kNumSkewsLog2);
+
+  for (int k = 0; k < kNumSkews - 1; ++k) {
+    estimator.LogRenderCall();
+    auto skew = estimator.GetSkewFromCapture();
+    EXPECT_FALSE(skew);
+  }
+
+  rtc::Optional<int> skew;
+  skew = estimator.GetSkewFromCapture();
+
+  for (int k = 0; k < kNumSkews; ++k) {
+    estimator.LogRenderCall();
+    skew = estimator.GetSkewFromCapture();
+    EXPECT_TRUE(skew);
+  }
+  EXPECT_EQ(-1, *skew);
+}
+
+// Tests that the skew estimator returns a null optional when it should.
+TEST(SkewEstimator, NullEstimate) {
+  constexpr int kNumSkewsLog2 = 4;
+  constexpr int kNumSkews = 1 << kNumSkewsLog2;
+
+  SkewEstimator estimator(kNumSkewsLog2);
+
+  for (int k = 0; k < kNumSkews - 1; ++k) {
+    estimator.LogRenderCall();
+    auto skew = estimator.GetSkewFromCapture();
+    EXPECT_FALSE(skew);
+  }
+
+  estimator.LogRenderCall();
+  auto skew = estimator.GetSkewFromCapture();
+  EXPECT_TRUE(skew);
+
+  estimator.Reset();
+  for (int k = 0; k < kNumSkews - 1; ++k) {
+    estimator.LogRenderCall();
+    auto skew = estimator.GetSkewFromCapture();
+    EXPECT_FALSE(skew);
+  }
+}
+}  // namespace aec3
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/subtractor.cc b/modules/audio_processing/aec3/subtractor.cc
new file mode 100644
index 0000000..b6a68af
--- /dev/null
+++ b/modules/audio_processing/aec3/subtractor.cc
@@ -0,0 +1,213 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subtractor.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+void PredictionError(const Aec3Fft& fft,
+                     const FftData& S,
+                     rtc::ArrayView<const float> y,
+                     std::array<float, kBlockSize>* e,
+                     std::array<float, kBlockSize>* s,
+                     bool* saturation) {
+  std::array<float, kFftLength> tmp;
+  fft.Ifft(S, &tmp);
+  constexpr float kScale = 1.0f / kFftLengthBy2;
+  std::transform(y.begin(), y.end(), tmp.begin() + kFftLengthBy2, e->begin(),
+                 [&](float a, float b) { return a - b * kScale; });
+
+  *saturation = false;
+
+  if (s) {
+    for (size_t k = 0; k < s->size(); ++k) {
+      (*s)[k] = kScale * tmp[k + kFftLengthBy2];
+    }
+    auto result = std::minmax_element(s->begin(), s->end());
+    *saturation = *result.first <= -32768 || *result.first >= 32767;
+  }
+  if (!(*saturation)) {
+    auto result = std::minmax_element(e->begin(), e->end());
+    *saturation = *result.first <= -32768 || *result.first >= 32767;
+  }
+
+  std::for_each(e->begin(), e->end(),
+                [](float& a) { a = rtc::SafeClamp(a, -32768.f, 32767.f); });
+}
+
+}  // namespace
+
+Subtractor::Subtractor(const EchoCanceller3Config& config,
+                       ApmDataDumper* data_dumper,
+                       Aec3Optimization optimization)
+    : fft_(),
+      data_dumper_(data_dumper),
+      optimization_(optimization),
+      config_(config),
+      main_filter_(config_.filter.main.length_blocks,
+                   config_.filter.main_initial.length_blocks,
+                   config.filter.config_change_duration_blocks,
+                   optimization,
+                   data_dumper_),
+      shadow_filter_(config_.filter.shadow.length_blocks,
+                     config_.filter.shadow_initial.length_blocks,
+                     config.filter.config_change_duration_blocks,
+                     optimization,
+                     data_dumper_),
+      G_main_(config_.filter.main_initial,
+              config_.filter.config_change_duration_blocks),
+      G_shadow_(config_.filter.shadow_initial,
+                config.filter.config_change_duration_blocks) {
+  RTC_DCHECK(data_dumper_);
+  // Currently, the rest of AEC3 requires the main and shadow filter lengths to
+  // be identical.
+  RTC_DCHECK_EQ(config_.filter.main.length_blocks,
+                config_.filter.shadow.length_blocks);
+  RTC_DCHECK_EQ(config_.filter.main_initial.length_blocks,
+                config_.filter.shadow_initial.length_blocks);
+}
+
+Subtractor::~Subtractor() = default;
+
+void Subtractor::HandleEchoPathChange(
+    const EchoPathVariability& echo_path_variability) {
+  const auto full_reset = [&]() {
+    main_filter_.HandleEchoPathChange();
+    shadow_filter_.HandleEchoPathChange();
+    G_main_.HandleEchoPathChange(echo_path_variability);
+    G_shadow_.HandleEchoPathChange();
+    G_main_.SetConfig(config_.filter.main_initial, true);
+    G_shadow_.SetConfig(config_.filter.shadow_initial, true);
+    main_filter_converged_ = false;
+    shadow_filter_converged_ = false;
+    main_filter_.SetSizePartitions(config_.filter.main_initial.length_blocks,
+                                   true);
+    shadow_filter_.SetSizePartitions(
+        config_.filter.shadow_initial.length_blocks, true);
+  };
+
+  // TODO(peah): Add delay-change specific reset behavior.
+  if ((echo_path_variability.delay_change ==
+       EchoPathVariability::DelayAdjustment::kBufferFlush) ||
+      (echo_path_variability.delay_change ==
+       EchoPathVariability::DelayAdjustment::kDelayReset)) {
+    full_reset();
+  } else if (echo_path_variability.delay_change ==
+             EchoPathVariability::DelayAdjustment::kNewDetectedDelay) {
+    full_reset();
+  } else if (echo_path_variability.delay_change ==
+             EchoPathVariability::DelayAdjustment::kBufferReadjustment) {
+    full_reset();
+  }
+}
+
+void Subtractor::ExitInitialState() {
+  G_main_.SetConfig(config_.filter.main, false);
+  G_shadow_.SetConfig(config_.filter.shadow, false);
+  main_filter_.SetSizePartitions(config_.filter.main.length_blocks, false);
+  shadow_filter_.SetSizePartitions(config_.filter.shadow.length_blocks, false);
+}
+
+void Subtractor::Process(const RenderBuffer& render_buffer,
+                         const rtc::ArrayView<const float> capture,
+                         const RenderSignalAnalyzer& render_signal_analyzer,
+                         const AecState& aec_state,
+                         SubtractorOutput* output) {
+  RTC_DCHECK_EQ(kBlockSize, capture.size());
+  rtc::ArrayView<const float> y = capture;
+  FftData& E_main = output->E_main;
+  FftData& E_main_nonwindowed = output->E_main_nonwindowed;
+  FftData E_shadow;
+  std::array<float, kBlockSize>& e_main = output->e_main;
+  std::array<float, kBlockSize>& e_shadow = output->e_shadow;
+
+  FftData S;
+  FftData& G = S;
+
+  // Form the output of the main filter.
+  main_filter_.Filter(render_buffer, &S);
+  bool main_saturation = false;
+  PredictionError(fft_, S, y, &e_main, &output->s_main, &main_saturation);
+  fft_.ZeroPaddedFft(e_main, Aec3Fft::Window::kHanning, &E_main);
+
+  // Form the output of the shadow filter.
+  shadow_filter_.Filter(render_buffer, &S);
+  bool shadow_saturation = false;
+  PredictionError(fft_, S, y, &e_shadow, nullptr, &shadow_saturation);
+  fft_.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kHanning, &E_shadow);
+
+  if (!(main_filter_converged_ || shadow_filter_converged_)) {
+    const auto sum_of_squares = [](float a, float b) { return a + b * b; };
+    const float y2 = std::accumulate(y.begin(), y.end(), 0.f, sum_of_squares);
+
+    if (!main_filter_converged_) {
+      const float e2_main =
+          std::accumulate(e_main.begin(), e_main.end(), 0.f, sum_of_squares);
+      main_filter_converged_ = e2_main > 0.1 * y2;
+    }
+
+    if (!shadow_filter_converged_) {
+      const float e2_shadow = std::accumulate(e_shadow.begin(), e_shadow.end(),
+                                              0.f, sum_of_squares);
+      shadow_filter_converged_ = e2_shadow > 0.1 * y2;
+    }
+  }
+
+  // Compute spectra for future use.
+  E_shadow.Spectrum(optimization_, output->E2_shadow);
+  E_main.Spectrum(optimization_, output->E2_main);
+
+  if (main_filter_converged_ || !shadow_filter_converged_) {
+    fft_.ZeroPaddedFft(e_main, Aec3Fft::Window::kRectangular,
+                       &E_main_nonwindowed);
+    E_main_nonwindowed.Spectrum(optimization_, output->E2_main_nonwindowed);
+  } else {
+    fft_.ZeroPaddedFft(e_shadow, Aec3Fft::Window::kRectangular,
+                       &E_main_nonwindowed);
+    E_main_nonwindowed.Spectrum(optimization_, output->E2_main_nonwindowed);
+  }
+
+  // Update the main filter.
+  std::array<float, kFftLengthBy2Plus1> X2;
+  render_buffer.SpectralSum(main_filter_.SizePartitions(), &X2);
+  G_main_.Compute(X2, render_signal_analyzer, *output, main_filter_,
+                  aec_state.SaturatedCapture() || main_saturation, &G);
+  main_filter_.Adapt(render_buffer, G);
+  data_dumper_->DumpRaw("aec3_subtractor_G_main", G.re);
+  data_dumper_->DumpRaw("aec3_subtractor_G_main", G.im);
+
+  // Update the shadow filter.
+  if (shadow_filter_.SizePartitions() != main_filter_.SizePartitions()) {
+    render_buffer.SpectralSum(shadow_filter_.SizePartitions(), &X2);
+  }
+  G_shadow_.Compute(X2, render_signal_analyzer, E_shadow,
+                    shadow_filter_.SizePartitions(),
+                    aec_state.SaturatedCapture() || shadow_saturation, &G);
+  shadow_filter_.Adapt(render_buffer, G);
+
+  data_dumper_->DumpRaw("aec3_subtractor_G_shadow", G.re);
+  data_dumper_->DumpRaw("aec3_subtractor_G_shadow", G.im);
+
+  main_filter_.DumpFilter("aec3_subtractor_H_main", "aec3_subtractor_h_main");
+  shadow_filter_.DumpFilter("aec3_subtractor_H_shadow",
+                            "aec3_subtractor_h_shadow");
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/subtractor.h b/modules/audio_processing/aec3/subtractor.h
new file mode 100644
index 0000000..b3c8506
--- /dev/null
+++ b/modules/audio_processing/aec3/subtractor.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_
+
+#include <array>
+#include <algorithm>
+#include <vector>
+
+#include "modules/audio_processing/aec3/adaptive_fir_filter.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/echo_path_variability.h"
+#include "modules/audio_processing/aec3/main_filter_update_gain.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/shadow_filter_update_gain.h"
+#include "modules/audio_processing/aec3/subtractor_output.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "modules/audio_processing/utility/ooura_fft.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Proves linear echo cancellation functionality
+class Subtractor {
+ public:
+  Subtractor(const EchoCanceller3Config& config,
+             ApmDataDumper* data_dumper,
+             Aec3Optimization optimization);
+  ~Subtractor();
+
+  // Performs the echo subtraction.
+  void Process(const RenderBuffer& render_buffer,
+               const rtc::ArrayView<const float> capture,
+               const RenderSignalAnalyzer& render_signal_analyzer,
+               const AecState& aec_state,
+               SubtractorOutput* output);
+
+  void HandleEchoPathChange(const EchoPathVariability& echo_path_variability);
+
+  // Exits the initial state.
+  void ExitInitialState();
+
+  // Returns the block-wise frequency response for the main adaptive filter.
+  const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+  FilterFrequencyResponse() const {
+    return main_filter_converged_ || (!shadow_filter_converged_)
+               ? main_filter_.FilterFrequencyResponse()
+               : shadow_filter_.FilterFrequencyResponse();
+  }
+
+  // Returns the estimate of the impulse response for the main adaptive filter.
+  const std::vector<float>& FilterImpulseResponse() const {
+    return main_filter_converged_ || (!shadow_filter_converged_)
+               ? main_filter_.FilterImpulseResponse()
+               : shadow_filter_.FilterImpulseResponse();
+  }
+
+  bool ConvergedFilter() const {
+    return main_filter_converged_ || shadow_filter_converged_;
+  }
+
+ private:
+  const Aec3Fft fft_;
+  ApmDataDumper* data_dumper_;
+  const Aec3Optimization optimization_;
+  const EchoCanceller3Config config_;
+  AdaptiveFirFilter main_filter_;
+  AdaptiveFirFilter shadow_filter_;
+  MainFilterUpdateGain G_main_;
+  ShadowFilterUpdateGain G_shadow_;
+  bool main_filter_converged_ = false;
+  bool shadow_filter_converged_ = false;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Subtractor);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_H_
diff --git a/modules/audio_processing/aec3/subtractor_output.h b/modules/audio_processing/aec3/subtractor_output.h
new file mode 100644
index 0000000..83f6cf5
--- /dev/null
+++ b/modules/audio_processing/aec3/subtractor_output.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_
+
+#include <array>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/fft_data.h"
+
+namespace webrtc {
+
+// Stores the values being returned from the echo subtractor.
+struct SubtractorOutput {
+  std::array<float, kBlockSize> s_main;
+  std::array<float, kBlockSize> e_main;
+  std::array<float, kBlockSize> e_shadow;
+  FftData E_main;
+  FftData E_main_nonwindowed;
+  std::array<float, kFftLengthBy2Plus1> E2_main;
+  std::array<float, kFftLengthBy2Plus1> E2_main_nonwindowed;
+  std::array<float, kFftLengthBy2Plus1> E2_shadow;
+
+  void Reset() {
+    s_main.fill(0.f);
+    e_main.fill(0.f);
+    e_shadow.fill(0.f);
+    E_main.re.fill(0.f);
+    E_main.im.fill(0.f);
+    E2_main.fill(0.f);
+    E2_shadow.fill(0.f);
+  }
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SUBTRACTOR_OUTPUT_H_
diff --git a/modules/audio_processing/aec3/subtractor_unittest.cc b/modules/audio_processing/aec3/subtractor_unittest.cc
new file mode 100644
index 0000000..5a8e070
--- /dev/null
+++ b/modules/audio_processing/aec3/subtractor_unittest.cc
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/subtractor.h"
+
+#include <algorithm>
+#include <numeric>
+#include <string>
+
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/test/echo_canceller_test_tools.h"
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+float RunSubtractorTest(int num_blocks_to_process,
+                        int delay_samples,
+                        int filter_length_blocks,
+                        bool uncorrelated_inputs,
+                        const std::vector<int>& blocks_with_echo_path_changes) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  config.filter.main.length_blocks = config.filter.shadow.length_blocks =
+      filter_length_blocks;
+  Subtractor subtractor(config, &data_dumper, DetectOptimization());
+  rtc::Optional<DelayEstimate> delay_estimate;
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<float> y(kBlockSize, 0.f);
+  std::array<float, kBlockSize> x_old;
+  SubtractorOutput output;
+  config.delay.min_echo_path_delay_blocks = 0;
+  config.delay.default_delay = 1;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  RenderSignalAnalyzer render_signal_analyzer(config);
+  Random random_generator(42U);
+  Aec3Fft fft;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kFftLengthBy2Plus1> E2_main;
+  std::array<float, kFftLengthBy2Plus1> E2_shadow;
+  AecState aec_state(config);
+  x_old.fill(0.f);
+  Y2.fill(0.f);
+  E2_main.fill(0.f);
+  E2_shadow.fill(0.f);
+
+  DelayBuffer<float> delay_buffer(delay_samples);
+  for (int k = 0; k < num_blocks_to_process; ++k) {
+    RandomizeSampleVector(&random_generator, x[0]);
+    if (uncorrelated_inputs) {
+      RandomizeSampleVector(&random_generator, y);
+    } else {
+      delay_buffer.Delay(x[0], y);
+    }
+    render_delay_buffer->Insert(x);
+    if (k == 0) {
+      render_delay_buffer->Reset();
+    }
+    render_delay_buffer->PrepareCaptureProcessing();
+    render_signal_analyzer.Update(*render_delay_buffer->GetRenderBuffer(),
+                                  aec_state.FilterDelay());
+
+    // Handle echo path changes.
+    if (std::find(blocks_with_echo_path_changes.begin(),
+                  blocks_with_echo_path_changes.end(),
+                  k) != blocks_with_echo_path_changes.end()) {
+      subtractor.HandleEchoPathChange(EchoPathVariability(
+          true, EchoPathVariability::DelayAdjustment::kNewDetectedDelay,
+          false));
+    }
+    subtractor.Process(*render_delay_buffer->GetRenderBuffer(), y,
+                       render_signal_analyzer, aec_state, &output);
+
+    aec_state.HandleEchoPathChange(EchoPathVariability(
+        false, EchoPathVariability::DelayAdjustment::kNone, false));
+    aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponse(),
+                     subtractor.FilterImpulseResponse(),
+                     subtractor.ConvergedFilter(),
+                     *render_delay_buffer->GetRenderBuffer(), E2_main, Y2,
+                     output.s_main, false);
+  }
+
+  const float output_power = std::inner_product(
+      output.e_main.begin(), output.e_main.end(), output.e_main.begin(), 0.f);
+  const float y_power = std::inner_product(y.begin(), y.end(), y.begin(), 0.f);
+  if (y_power == 0.f) {
+    ADD_FAILURE();
+    return -1.0;
+  }
+  return output_power / y_power;
+}
+
+std::string ProduceDebugText(size_t delay, int filter_length_blocks) {
+  std::ostringstream ss;
+  ss << "Delay: " << delay << ", ";
+  ss << "Length: " << filter_length_blocks;
+  return ss.str();
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non data dumper works.
+TEST(Subtractor, NullDataDumper) {
+  EXPECT_DEATH(
+      Subtractor(EchoCanceller3Config(), nullptr, DetectOptimization()), "");
+}
+
+// Verifies the check for null subtractor output.
+// TODO(peah): Re-enable the test once the issue with memory leaks during DEATH
+// tests on test bots has been fixed.
+TEST(Subtractor, DISABLED_NullOutput) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  Subtractor subtractor(config, &data_dumper, DetectOptimization());
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  RenderSignalAnalyzer render_signal_analyzer(config);
+  std::vector<float> y(kBlockSize, 0.f);
+
+  EXPECT_DEATH(
+      subtractor.Process(*render_delay_buffer->GetRenderBuffer(), y,
+                         render_signal_analyzer, AecState(config), nullptr),
+      "");
+}
+
+// Verifies the check for the capture signal size.
+TEST(Subtractor, WrongCaptureSize) {
+  ApmDataDumper data_dumper(42);
+  EchoCanceller3Config config;
+  Subtractor subtractor(config, &data_dumper, DetectOptimization());
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  RenderSignalAnalyzer render_signal_analyzer(config);
+  std::vector<float> y(kBlockSize - 1, 0.f);
+  SubtractorOutput output;
+
+  EXPECT_DEATH(
+      subtractor.Process(*render_delay_buffer->GetRenderBuffer(), y,
+                         render_signal_analyzer, AecState(config), &output),
+      "");
+}
+
+#endif
+
+// Verifies that the subtractor is able to converge on correlated data.
+TEST(Subtractor, Convergence) {
+  std::vector<int> blocks_with_echo_path_changes;
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+      float echo_to_nearend_power =
+          RunSubtractorTest(400, delay_samples, filter_length_blocks, false,
+                            blocks_with_echo_path_changes);
+
+      // Use different criteria to take overmodelling into account.
+      if (filter_length_blocks == 12) {
+        EXPECT_GT(0.1f, echo_to_nearend_power);
+      } else {
+        EXPECT_GT(1.f, echo_to_nearend_power);
+      }
+    }
+  }
+}
+
+// Verifies that the subtractor does not converge on uncorrelated signals.
+TEST(Subtractor, NonConvergenceOnUncorrelatedSignals) {
+  std::vector<int> blocks_with_echo_path_changes;
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+      float echo_to_nearend_power =
+          RunSubtractorTest(300, delay_samples, filter_length_blocks, true,
+                            blocks_with_echo_path_changes);
+      EXPECT_NEAR(1.f, echo_to_nearend_power, 0.1);
+    }
+  }
+}
+
+// Verifies that the subtractor is properly reset when there is an echo path
+// change.
+TEST(Subtractor, EchoPathChangeReset) {
+  std::vector<int> blocks_with_echo_path_changes;
+  blocks_with_echo_path_changes.push_back(99);
+  for (size_t filter_length_blocks : {12, 20, 30}) {
+    for (size_t delay_samples : {0, 64, 150, 200, 301}) {
+      SCOPED_TRACE(ProduceDebugText(delay_samples, filter_length_blocks));
+
+      float echo_to_nearend_power =
+          RunSubtractorTest(100, delay_samples, filter_length_blocks, false,
+                            blocks_with_echo_path_changes);
+      EXPECT_NEAR(1.f, echo_to_nearend_power, 0.0000001f);
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_filter.cc b/modules/audio_processing/aec3/suppression_filter.cc
new file mode 100644
index 0000000..8c92bf5
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_filter.cc
@@ -0,0 +1,172 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_filter.h"
+
+#include <math.h>
+#include <algorithm>
+#include <cstring>
+#include <functional>
+#include <numeric>
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+// Hanning window from Matlab command win = sqrt(hanning(128)).
+const float kSqrtHanning[kFftLength] = {
+    0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
+    0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
+    0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+    0.29028467725446f, 0.31368174039889f, 0.33688985339222f, 0.35989503653499f,
+    0.38268343236509f, 0.40524131400499f, 0.42755509343028f, 0.44961132965461f,
+    0.47139673682600f, 0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+    0.55557023301960f, 0.57580819141785f, 0.59569930449243f, 0.61523159058063f,
+    0.63439328416365f, 0.65317284295378f, 0.67155895484702f, 0.68954054473707f,
+    0.70710678118655f, 0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+    0.77301045336274f, 0.78834642762661f, 0.80320753148064f, 0.81758481315158f,
+    0.83146961230255f, 0.84485356524971f, 0.85772861000027f, 0.87008699110871f,
+    0.88192126434835f, 0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+    0.92387953251129f, 0.93299279883474f, 0.94154406518302f, 0.94952818059304f,
+    0.95694033573221f, 0.96377606579544f, 0.97003125319454f, 0.97570213003853f,
+    0.98078528040323f, 0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+    0.99518472667220f, 0.99729045667869f, 0.99879545620517f, 0.99969881869620f,
+    1.00000000000000f, 0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
+    0.99518472667220f, 0.99247953459871f, 0.98917650996478f, 0.98527764238894f,
+    0.98078528040323f, 0.97570213003853f, 0.97003125319454f, 0.96377606579544f,
+    0.95694033573221f, 0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
+    0.92387953251129f, 0.91420975570353f, 0.90398929312344f, 0.89322430119552f,
+    0.88192126434835f, 0.87008699110871f, 0.85772861000027f, 0.84485356524971f,
+    0.83146961230255f, 0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
+    0.77301045336274f, 0.75720884650648f, 0.74095112535496f, 0.72424708295147f,
+    0.70710678118655f, 0.68954054473707f, 0.67155895484702f, 0.65317284295378f,
+    0.63439328416365f, 0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
+    0.55557023301960f, 0.53499761988710f, 0.51410274419322f, 0.49289819222978f,
+    0.47139673682600f, 0.44961132965461f, 0.42755509343028f, 0.40524131400499f,
+    0.38268343236509f, 0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
+    0.29028467725446f, 0.26671275747490f, 0.24298017990326f, 0.21910124015687f,
+    0.19509032201613f, 0.17096188876030f, 0.14673047445536f, 0.12241067519922f,
+    0.09801714032956f, 0.07356456359967f, 0.04906767432742f, 0.02454122852291f};
+
+}  // namespace
+
+SuppressionFilter::SuppressionFilter(int sample_rate_hz)
+    : sample_rate_hz_(sample_rate_hz),
+      fft_(),
+      e_output_old_(NumBandsForRate(sample_rate_hz_)) {
+  RTC_DCHECK(ValidFullBandRate(sample_rate_hz_));
+  e_input_old_.fill(0.f);
+  std::for_each(e_output_old_.begin(), e_output_old_.end(),
+                [](std::array<float, kFftLengthBy2>& a) { a.fill(0.f); });
+}
+
+SuppressionFilter::~SuppressionFilter() = default;
+
+void SuppressionFilter::ApplyGain(
+    const FftData& comfort_noise,
+    const FftData& comfort_noise_high_band,
+    const std::array<float, kFftLengthBy2Plus1>& suppression_gain,
+    float high_bands_gain,
+    std::vector<std::vector<float>>* e) {
+  RTC_DCHECK(e);
+  RTC_DCHECK_EQ(e->size(), NumBandsForRate(sample_rate_hz_));
+  FftData E;
+  std::array<float, kFftLength> e_extended;
+  constexpr float kIfftNormalization = 2.f / kFftLength;
+
+  // Analysis filterbank.
+  std::transform(e_input_old_.begin(), e_input_old_.end(),
+                 std::begin(kSqrtHanning), e_extended.begin(),
+                 std::multiplies<float>());
+  std::transform((*e)[0].begin(), (*e)[0].end(),
+                 std::begin(kSqrtHanning) + kFftLengthBy2,
+                 e_extended.begin() + kFftLengthBy2, std::multiplies<float>());
+  std::copy((*e)[0].begin(), (*e)[0].end(), e_input_old_.begin());
+  fft_.Fft(&e_extended, &E);
+
+  // Apply gain.
+  std::transform(suppression_gain.begin(), suppression_gain.end(), E.re.begin(),
+                 E.re.begin(), std::multiplies<float>());
+  std::transform(suppression_gain.begin(), suppression_gain.end(), E.im.begin(),
+                 E.im.begin(), std::multiplies<float>());
+
+  // Compute and add the comfort noise.
+  std::array<float, kFftLengthBy2Plus1> scaled_comfort_noise;
+  std::transform(suppression_gain.begin(), suppression_gain.end(),
+                 comfort_noise.re.begin(), scaled_comfort_noise.begin(),
+                 [](float a, float b) { return std::max(1.f - a, 0.f) * b; });
+  std::transform(scaled_comfort_noise.begin(), scaled_comfort_noise.end(),
+                 E.re.begin(), E.re.begin(), std::plus<float>());
+  std::transform(suppression_gain.begin(), suppression_gain.end(),
+                 comfort_noise.im.begin(), scaled_comfort_noise.begin(),
+                 [](float a, float b) { return std::max(1.f - a, 0.f) * b; });
+  std::transform(scaled_comfort_noise.begin(), scaled_comfort_noise.end(),
+                 E.im.begin(), E.im.begin(), std::plus<float>());
+
+  // Synthesis filterbank.
+  fft_.Ifft(E, &e_extended);
+  std::transform(e_output_old_[0].begin(), e_output_old_[0].end(),
+                 std::begin(kSqrtHanning) + kFftLengthBy2, (*e)[0].begin(),
+                 [&](float a, float b) { return kIfftNormalization * a * b; });
+  std::transform(e_extended.begin(), e_extended.begin() + kFftLengthBy2,
+                 std::begin(kSqrtHanning), e_extended.begin(),
+                 [&](float a, float b) { return kIfftNormalization * a * b; });
+  std::transform((*e)[0].begin(), (*e)[0].end(), e_extended.begin(),
+                 (*e)[0].begin(), std::plus<float>());
+  std::for_each((*e)[0].begin(), (*e)[0].end(), [](float& x_k) {
+    x_k = rtc::SafeClamp(x_k, -32768.f, 32767.f);
+  });
+  std::copy(e_extended.begin() + kFftLengthBy2, e_extended.begin() + kFftLength,
+            std::begin(e_output_old_[0]));
+
+  if (e->size() > 1) {
+    // Form time-domain high-band noise.
+    std::array<float, kFftLength> time_domain_high_band_noise;
+    std::transform(comfort_noise_high_band.re.begin(),
+                   comfort_noise_high_band.re.end(), E.re.begin(),
+                   [&](float a) { return kIfftNormalization * a; });
+    std::transform(comfort_noise_high_band.im.begin(),
+                   comfort_noise_high_band.im.end(), E.im.begin(),
+                   [&](float a) { return kIfftNormalization * a; });
+    fft_.Ifft(E, &time_domain_high_band_noise);
+
+    // Scale and apply the noise to the signals.
+    const float high_bands_noise_scaling =
+        0.4f * std::max(1.f - high_bands_gain, 0.f);
+
+    std::transform(
+        (*e)[1].begin(), (*e)[1].end(), time_domain_high_band_noise.begin(),
+        (*e)[1].begin(), [&](float a, float b) {
+          return std::max(
+              std::min(b * high_bands_noise_scaling + high_bands_gain * a,
+                       32767.0f),
+              -32768.0f);
+        });
+
+    if (e->size() > 2) {
+      RTC_DCHECK_EQ(3, e->size());
+      std::for_each((*e)[2].begin(), (*e)[2].end(), [&](float& a) {
+        a = rtc::SafeClamp(a * high_bands_gain, -32768.f, 32767.f);
+      });
+    }
+
+    std::array<float, kFftLengthBy2> tmp;
+    for (size_t k = 1; k < e->size(); ++k) {
+      std::copy((*e)[k].begin(), (*e)[k].end(), tmp.begin());
+      std::copy(e_output_old_[k].begin(), e_output_old_[k].end(),
+                (*e)[k].begin());
+      std::copy(tmp.begin(), tmp.end(), e_output_old_[k].begin());
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_filter.h b/modules/audio_processing/aec3/suppression_filter.h
new file mode 100644
index 0000000..5f91dea
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_filter.h
@@ -0,0 +1,44 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec3_fft.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class SuppressionFilter {
+ public:
+  explicit SuppressionFilter(int sample_rate_hz);
+  ~SuppressionFilter();
+  void ApplyGain(const FftData& comfort_noise,
+                 const FftData& comfort_noise_high_bands,
+                 const std::array<float, kFftLengthBy2Plus1>& suppression_gain,
+                 float high_bands_gain,
+                 std::vector<std::vector<float>>* e);
+
+ private:
+  const int sample_rate_hz_;
+  const OouraFft ooura_fft_;
+  const Aec3Fft fft_;
+  std::array<float, kFftLengthBy2> e_input_old_;
+  std::vector<std::array<float, kFftLengthBy2>> e_output_old_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionFilter);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_FILTER_H_
diff --git a/modules/audio_processing/aec3/suppression_filter_unittest.cc b/modules/audio_processing/aec3/suppression_filter_unittest.cc
new file mode 100644
index 0000000..51b3f91
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_filter_unittest.cc
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_filter.h"
+
+#include <math.h>
+#include <algorithm>
+#include <numeric>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kPi = 3.141592f;
+
+void ProduceSinusoid(int sample_rate_hz,
+                     float sinusoidal_frequency_hz,
+                     size_t* sample_counter,
+                     rtc::ArrayView<float> x) {
+  // Produce a sinusoid of the specified frequency.
+  for (size_t k = *sample_counter, j = 0; k < (*sample_counter + kBlockSize);
+       ++k, ++j) {
+    x[j] =
+        32767.f * sin(2.f * kPi * sinusoidal_frequency_hz * k / sample_rate_hz);
+  }
+  *sample_counter = *sample_counter + kBlockSize;
+}
+
+}  // namespace
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies the check for null suppressor output.
+TEST(SuppressionFilter, NullOutput) {
+  FftData cn;
+  FftData cn_high_bands;
+  std::array<float, kFftLengthBy2Plus1> gain;
+
+  EXPECT_DEATH(SuppressionFilter(16000).ApplyGain(cn, cn_high_bands, gain, 1.0f,
+                                                  nullptr),
+               "");
+}
+
+// Verifies the check for allowed sample rate.
+TEST(SuppressionFilter, ProperSampleRate) {
+  EXPECT_DEATH(SuppressionFilter(16001), "");
+}
+
+#endif
+
+// Verifies that no comfort noise is added when the gain is 1.
+TEST(SuppressionFilter, ComfortNoiseInUnityGain) {
+  SuppressionFilter filter(48000);
+  FftData cn;
+  FftData cn_high_bands;
+  std::array<float, kFftLengthBy2Plus1> gain;
+
+  gain.fill(1.f);
+  cn.re.fill(1.f);
+  cn.im.fill(1.f);
+  cn_high_bands.re.fill(1.f);
+  cn_high_bands.im.fill(1.f);
+
+  std::vector<std::vector<float>> e(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::vector<float>> e_ref = e;
+  filter.ApplyGain(cn, cn_high_bands, gain, 1.f, &e);
+
+  for (size_t k = 0; k < e.size(); ++k) {
+    EXPECT_EQ(e_ref[k], e[k]);
+  }
+}
+
+// Verifies that the suppressor is able to suppress a signal.
+TEST(SuppressionFilter, SignalSuppression) {
+  SuppressionFilter filter(48000);
+  FftData cn;
+  FftData cn_high_bands;
+  std::array<float, kFftLengthBy2Plus1> gain;
+  std::vector<std::vector<float>> e(3, std::vector<float>(kBlockSize, 0.f));
+
+  gain.fill(1.f);
+  std::for_each(gain.begin() + 10, gain.end(), [](float& a) { a = 0.f; });
+
+  cn.re.fill(0.f);
+  cn.im.fill(0.f);
+  cn_high_bands.re.fill(0.f);
+  cn_high_bands.im.fill(0.f);
+
+  size_t sample_counter = 0;
+
+  float e0_input = 0.f;
+  float e0_output = 0.f;
+  for (size_t k = 0; k < 100; ++k) {
+    ProduceSinusoid(16000, 16000 * 40 / kFftLengthBy2 / 2, &sample_counter,
+                    e[0]);
+    e0_input =
+        std::inner_product(e[0].begin(), e[0].end(), e[0].begin(), e0_input);
+    filter.ApplyGain(cn, cn_high_bands, gain, 1.f, &e);
+    e0_output =
+        std::inner_product(e[0].begin(), e[0].end(), e[0].begin(), e0_output);
+  }
+
+  EXPECT_LT(e0_output, e0_input / 1000.f);
+}
+
+// Verifies that the suppressor is able to pass through a desired signal while
+// applying suppressing for some frequencies.
+TEST(SuppressionFilter, SignalTransparency) {
+  SuppressionFilter filter(48000);
+  FftData cn;
+  FftData cn_high_bands;
+  std::array<float, kFftLengthBy2Plus1> gain;
+  std::vector<std::vector<float>> e(3, std::vector<float>(kBlockSize, 0.f));
+
+  gain.fill(1.f);
+  std::for_each(gain.begin() + 30, gain.end(), [](float& a) { a = 0.f; });
+
+  cn.re.fill(0.f);
+  cn.im.fill(0.f);
+  cn_high_bands.re.fill(0.f);
+  cn_high_bands.im.fill(0.f);
+
+  size_t sample_counter = 0;
+
+  float e0_input = 0.f;
+  float e0_output = 0.f;
+  for (size_t k = 0; k < 100; ++k) {
+    ProduceSinusoid(16000, 16000 * 10 / kFftLengthBy2 / 2, &sample_counter,
+                    e[0]);
+    e0_input =
+        std::inner_product(e[0].begin(), e[0].end(), e[0].begin(), e0_input);
+    filter.ApplyGain(cn, cn_high_bands, gain, 1.f, &e);
+    e0_output =
+        std::inner_product(e[0].begin(), e[0].end(), e[0].begin(), e0_output);
+  }
+
+  EXPECT_LT(0.9f * e0_input, e0_output);
+}
+
+// Verifies that the suppressor delay.
+TEST(SuppressionFilter, Delay) {
+  SuppressionFilter filter(48000);
+  FftData cn;
+  FftData cn_high_bands;
+  std::array<float, kFftLengthBy2Plus1> gain;
+  std::vector<std::vector<float>> e(3, std::vector<float>(kBlockSize, 0.f));
+
+  gain.fill(1.f);
+
+  cn.re.fill(0.f);
+  cn.im.fill(0.f);
+  cn_high_bands.re.fill(0.f);
+  cn_high_bands.im.fill(0.f);
+
+  for (size_t k = 0; k < 100; ++k) {
+    for (size_t j = 0; j < 3; ++j) {
+      for (size_t i = 0; i < kBlockSize; ++i) {
+        e[j][i] = k * kBlockSize + i;
+      }
+    }
+
+    filter.ApplyGain(cn, cn_high_bands, gain, 1.f, &e);
+    if (k > 2) {
+      for (size_t j = 0; j < 2; ++j) {
+        for (size_t i = 0; i < kBlockSize; ++i) {
+          EXPECT_NEAR(k * kBlockSize + i - kBlockSize, e[j][i], 0.01);
+        }
+      }
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_gain.cc b/modules/audio_processing/aec3/suppression_gain.cc
new file mode 100644
index 0000000..53fd575
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_gain.cc
@@ -0,0 +1,462 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_gain.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <math.h>
+#include <algorithm>
+#include <functional>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/vector_math.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Reduce gain to avoid narrow band echo leakage.
+void NarrowBandAttenuation(int narrow_bin,
+                           std::array<float, kFftLengthBy2Plus1>* gain) {
+  const int upper_bin =
+      std::min(narrow_bin + 6, static_cast<int>(kFftLengthBy2Plus1 - 1));
+  for (int k = std::max(0, narrow_bin - 6); k <= upper_bin; ++k) {
+    (*gain)[k] = std::min((*gain)[k], 0.001f);
+  }
+}
+
+// Adjust the gains according to the presence of known external filters.
+void AdjustForExternalFilters(std::array<float, kFftLengthBy2Plus1>* gain) {
+  // Limit the low frequency gains to avoid the impact of the high-pass filter
+  // on the lower-frequency gain influencing the overall achieved gain.
+  (*gain)[0] = (*gain)[1] = std::min((*gain)[1], (*gain)[2]);
+
+  // Limit the high frequency gains to avoid the impact of the anti-aliasing
+  // filter on the upper-frequency gains influencing the overall achieved
+  // gain. TODO(peah): Update this when new anti-aliasing filters are
+  // implemented.
+  constexpr size_t kAntiAliasingImpactLimit = (64 * 2000) / 8000;
+  const float min_upper_gain = (*gain)[kAntiAliasingImpactLimit];
+  std::for_each(
+      gain->begin() + kAntiAliasingImpactLimit, gain->end() - 1,
+      [min_upper_gain](float& a) { a = std::min(a, min_upper_gain); });
+  (*gain)[kFftLengthBy2] = (*gain)[kFftLengthBy2Minus1];
+}
+
+// Computes the gain to apply for the bands beyond the first band.
+float UpperBandsGain(
+    const rtc::Optional<int>& narrow_peak_band,
+    bool saturated_echo,
+    const std::vector<std::vector<float>>& render,
+    const std::array<float, kFftLengthBy2Plus1>& low_band_gain) {
+  RTC_DCHECK_LT(0, render.size());
+  if (render.size() == 1) {
+    return 1.f;
+  }
+
+  if (narrow_peak_band &&
+      (*narrow_peak_band > static_cast<int>(kFftLengthBy2Plus1 - 10))) {
+    return 0.001f;
+  }
+
+  constexpr size_t kLowBandGainLimit = kFftLengthBy2 / 2;
+  const float gain_below_8_khz = *std::min_element(
+      low_band_gain.begin() + kLowBandGainLimit, low_band_gain.end());
+
+  // Always attenuate the upper bands when there is saturated echo.
+  if (saturated_echo) {
+    return std::min(0.001f, gain_below_8_khz);
+  }
+
+  // Compute the upper and lower band energies.
+  const auto sum_of_squares = [](float a, float b) { return a + b * b; };
+  const float low_band_energy =
+      std::accumulate(render[0].begin(), render[0].end(), 0.f, sum_of_squares);
+  float high_band_energy = 0.f;
+  for (size_t k = 1; k < render.size(); ++k) {
+    const float energy = std::accumulate(render[k].begin(), render[k].end(),
+                                         0.f, sum_of_squares);
+    high_band_energy = std::max(high_band_energy, energy);
+  }
+
+  // If there is more power in the lower frequencies than the upper frequencies,
+  // or if the power in upper frequencies is low, do not bound the gain in the
+  // upper bands.
+  float anti_howling_gain;
+  constexpr float kThreshold = kBlockSize * 10.f * 10.f / 4.f;
+  if (high_band_energy < std::max(low_band_energy, kThreshold)) {
+    anti_howling_gain = 1.f;
+  } else {
+    // In all other cases, bound the gain for upper frequencies.
+    RTC_DCHECK_LE(low_band_energy, high_band_energy);
+    RTC_DCHECK_NE(0.f, high_band_energy);
+    anti_howling_gain = 0.01f * sqrtf(low_band_energy / high_band_energy);
+  }
+
+  // Choose the gain as the minimum of the lower and upper gains.
+  return std::min(gain_below_8_khz, anti_howling_gain);
+}
+
+// Computes the gain to reduce the echo to a non audible level.
+void GainToNoAudibleEcho(
+    const EchoCanceller3Config& config,
+    bool low_noise_render,
+    bool saturated_echo,
+    bool saturating_echo_path,
+    bool linear_echo_estimate,
+    const std::array<float, kFftLengthBy2Plus1>& nearend,
+    const std::array<float, kFftLengthBy2Plus1>& echo,
+    const std::array<float, kFftLengthBy2Plus1>& masker,
+    const std::array<float, kFftLengthBy2Plus1>& min_gain,
+    const std::array<float, kFftLengthBy2Plus1>& max_gain,
+    const std::array<float, kFftLengthBy2Plus1>& one_by_echo,
+    std::array<float, kFftLengthBy2Plus1>* gain) {
+  float nearend_masking_margin = 0.f;
+  if (linear_echo_estimate) {
+    nearend_masking_margin =
+        low_noise_render
+            ? config.gain_mask.m9
+            : (saturated_echo ? config.gain_mask.m2 : config.gain_mask.m3);
+  } else {
+    nearend_masking_margin = config.gain_mask.m7;
+  }
+
+  RTC_DCHECK_LE(0.f, nearend_masking_margin);
+  RTC_DCHECK_GT(1.f, nearend_masking_margin);
+  const float one_by_one_minus_nearend_masking_margin =
+      1.f / (1.0f - nearend_masking_margin);
+
+  const float masker_margin =
+      linear_echo_estimate ? config.gain_mask.m1 : config.gain_mask.m8;
+
+  for (size_t k = 0; k < gain->size(); ++k) {
+    const float unity_gain_masker = std::max(nearend[k], masker[k]);
+    RTC_DCHECK_LE(0.f, nearend_masking_margin * unity_gain_masker);
+    if (echo[k] <= nearend_masking_margin * unity_gain_masker ||
+        unity_gain_masker <= 0.f) {
+      (*gain)[k] = 1.f;
+    } else {
+      RTC_DCHECK_LT(0.f, unity_gain_masker);
+      (*gain)[k] = std::max(0.f, (1.f - 5.f * echo[k] / unity_gain_masker) *
+                                     one_by_one_minus_nearend_masking_margin);
+      (*gain)[k] =
+          std::max(masker_margin * masker[k] * one_by_echo[k], (*gain)[k]);
+    }
+
+    (*gain)[k] = std::min(std::max((*gain)[k], min_gain[k]), max_gain[k]);
+  }
+}
+
+// TODO(peah): Make adaptive to take the actual filter error into account.
+constexpr size_t kUpperAccurateBandPlus1 = 29;
+
+// Computes the signal output power that masks the echo signal.
+void MaskingPower(const EchoCanceller3Config& config,
+                  const std::array<float, kFftLengthBy2Plus1>& nearend,
+                  const std::array<float, kFftLengthBy2Plus1>& comfort_noise,
+                  const std::array<float, kFftLengthBy2Plus1>& last_masker,
+                  const std::array<float, kFftLengthBy2Plus1>& gain,
+                  std::array<float, kFftLengthBy2Plus1>* masker) {
+  std::array<float, kFftLengthBy2Plus1> side_band_masker;
+  float max_nearend_after_gain = 0.f;
+  for (size_t k = 0; k < gain.size(); ++k) {
+    const float nearend_after_gain = nearend[k] * gain[k];
+    max_nearend_after_gain =
+        std::max(max_nearend_after_gain, nearend_after_gain);
+    side_band_masker[k] = nearend_after_gain + comfort_noise[k];
+    (*masker)[k] = comfort_noise[k] + config.gain_mask.m4 * last_masker[k];
+  }
+
+  // Apply masking only between lower frequency bands.
+  RTC_DCHECK_LT(kUpperAccurateBandPlus1, gain.size());
+  for (size_t k = 1; k < kUpperAccurateBandPlus1; ++k) {
+    (*masker)[k] += config.gain_mask.m5 *
+                    (side_band_masker[k - 1] + side_band_masker[k + 1]);
+  }
+
+  // Add full-band masking as a minimum value for the masker.
+  const float min_masker = max_nearend_after_gain * config.gain_mask.m6;
+  std::for_each(masker->begin(), masker->end(),
+                [min_masker](float& a) { a = std::max(a, min_masker); });
+}
+
+// Limits the gain in the frequencies for which the adaptive filter has not
+// converged. Currently, these frequencies are not hardcoded to the frequencies
+// which are typically not excited by speech.
+// TODO(peah): Make adaptive to take the actual filter error into account.
+void AdjustNonConvergedFrequencies(
+    std::array<float, kFftLengthBy2Plus1>* gain) {
+  constexpr float oneByBandsInSum =
+      1 / static_cast<float>(kUpperAccurateBandPlus1 - 20);
+  const float hf_gain_bound =
+      std::accumulate(gain->begin() + 20,
+                      gain->begin() + kUpperAccurateBandPlus1, 0.f) *
+      oneByBandsInSum;
+
+  std::for_each(gain->begin() + kUpperAccurateBandPlus1, gain->end(),
+                [hf_gain_bound](float& a) { a = std::min(a, hf_gain_bound); });
+}
+
+}  // namespace
+
+// TODO(peah): Add further optimizations, in particular for the divisions.
+void SuppressionGain::LowerBandGain(
+    bool low_noise_render,
+    const rtc::Optional<int>& narrow_peak_band,
+    const AecState& aec_state,
+    const std::array<float, kFftLengthBy2Plus1>& nearend,
+    const std::array<float, kFftLengthBy2Plus1>& echo,
+    const std::array<float, kFftLengthBy2Plus1>& comfort_noise,
+    std::array<float, kFftLengthBy2Plus1>* gain) {
+  const bool saturated_echo = aec_state.SaturatedEcho();
+  const bool saturating_echo_path = aec_state.SaturatingEchoPath();
+  const bool linear_echo_estimate = aec_state.UsableLinearEstimate();
+
+  // Count the number of blocks since saturation.
+  no_saturation_counter_ = saturated_echo ? 0 : no_saturation_counter_ + 1;
+
+  // Precompute 1/echo (note that when the echo is zero, the precomputed value
+  // is never used).
+  std::array<float, kFftLengthBy2Plus1> one_by_echo;
+  std::transform(echo.begin(), echo.end(), one_by_echo.begin(),
+                 [](float a) { return a > 0.f ? 1.f / a : 1.f; });
+
+  // Compute the minimum gain as the attenuating gain to put the signal just
+  // above the zero sample values.
+  std::array<float, kFftLengthBy2Plus1> min_gain;
+  const float min_echo_power =
+      low_noise_render ? config_.echo_audibility.low_render_limit
+                       : config_.echo_audibility.normal_render_limit;
+  if (no_saturation_counter_ > 10) {
+    for (size_t k = 0; k < nearend.size(); ++k) {
+      const float denom = std::min(nearend[k], echo[k]);
+      min_gain[k] = denom > 0.f ? min_echo_power / denom : 1.f;
+      min_gain[k] = std::min(min_gain[k], 1.f);
+    }
+  } else {
+    min_gain.fill(0.f);
+  }
+
+  // Compute the maximum gain by limiting the gain increase from the previous
+  // gain.
+  std::array<float, kFftLengthBy2Plus1> max_gain;
+  for (size_t k = 0; k < gain->size(); ++k) {
+    max_gain[k] = std::min(std::max(last_gain_[k] * gain_increase_[k],
+                                    config_.gain_updates.floor_first_increase),
+                           1.f);
+  }
+
+  // Iteratively compute the gain required to attenuate the echo to a non
+  // noticeable level.
+  gain->fill(0.f);
+  for (int k = 0; k < 2; ++k) {
+    std::array<float, kFftLengthBy2Plus1> masker;
+    MaskingPower(config_, nearend, comfort_noise, last_masker_, *gain, &masker);
+    GainToNoAudibleEcho(config_, low_noise_render, saturated_echo,
+                        saturating_echo_path, linear_echo_estimate, nearend,
+                        echo, masker, min_gain, max_gain, one_by_echo, gain);
+    AdjustForExternalFilters(gain);
+    if (narrow_peak_band) {
+      NarrowBandAttenuation(*narrow_peak_band, gain);
+    }
+  }
+
+  // Adjust the gain for frequencies which have not yet converged.
+  AdjustNonConvergedFrequencies(gain);
+
+  // Update the allowed maximum gain increase.
+  UpdateGainIncrease(low_noise_render, linear_echo_estimate, echo, *gain);
+
+  // Adjust gain dynamics.
+  const float gain_bound =
+      std::max(0.001f, *std::min_element(gain->begin(), gain->end()) * 10000.f);
+  std::for_each(gain->begin(), gain->end(),
+                [gain_bound](float& a) { a = std::min(a, gain_bound); });
+
+  // Store data required for the gain computation of the next block.
+  std::copy(echo.begin(), echo.end(), last_echo_.begin());
+  std::copy(gain->begin(), gain->end(), last_gain_.begin());
+  MaskingPower(config_, nearend, comfort_noise, last_masker_, *gain,
+               &last_masker_);
+  aec3::VectorMath(optimization_).Sqrt(*gain);
+}
+
+SuppressionGain::SuppressionGain(const EchoCanceller3Config& config,
+                                 Aec3Optimization optimization)
+    : optimization_(optimization),
+      config_(config),
+      state_change_duration_blocks_(
+          static_cast<int>(config_.filter.config_change_duration_blocks)) {
+  RTC_DCHECK_LT(0, state_change_duration_blocks_);
+  one_by_state_change_duration_blocks_ = 1.f / state_change_duration_blocks_;
+  last_gain_.fill(1.f);
+  last_masker_.fill(0.f);
+  gain_increase_.fill(1.f);
+  last_echo_.fill(0.f);
+}
+
+void SuppressionGain::GetGain(
+    const std::array<float, kFftLengthBy2Plus1>& nearend,
+    const std::array<float, kFftLengthBy2Plus1>& echo,
+    const std::array<float, kFftLengthBy2Plus1>& comfort_noise,
+    const RenderSignalAnalyzer& render_signal_analyzer,
+    const AecState& aec_state,
+    const std::vector<std::vector<float>>& render,
+    float* high_bands_gain,
+    std::array<float, kFftLengthBy2Plus1>* low_band_gain) {
+  RTC_DCHECK(high_bands_gain);
+  RTC_DCHECK(low_band_gain);
+
+  // Compute gain for the lower band.
+  bool low_noise_render = low_render_detector_.Detect(render);
+  const rtc::Optional<int> narrow_peak_band =
+      render_signal_analyzer.NarrowPeakBand();
+  LowerBandGain(low_noise_render, narrow_peak_band, aec_state, nearend, echo,
+                comfort_noise, low_band_gain);
+
+  const float gain_upper_bound = aec_state.SuppressionGainLimit();
+  if (gain_upper_bound < 1.f) {
+    for (size_t k = 0; k < low_band_gain->size(); ++k) {
+      (*low_band_gain)[k] = std::min((*low_band_gain)[k], gain_upper_bound);
+    }
+  }
+
+  // Compute the gain for the upper bands.
+  *high_bands_gain = UpperBandsGain(narrow_peak_band, aec_state.SaturatedEcho(),
+                                    render, *low_band_gain);
+}
+
+void SuppressionGain::SetInitialState(bool state) {
+  initial_state_ = state;
+  if (state) {
+    initial_state_change_counter_ = state_change_duration_blocks_;
+  } else {
+    initial_state_change_counter_ = 0;
+  }
+}
+
+void SuppressionGain::UpdateGainIncrease(
+    bool low_noise_render,
+    bool linear_echo_estimate,
+    const std::array<float, kFftLengthBy2Plus1>& echo,
+    const std::array<float, kFftLengthBy2Plus1>& new_gain) {
+  float max_inc;
+  float max_dec;
+  float rate_inc;
+  float rate_dec;
+  float min_inc;
+  float min_dec;
+
+  RTC_DCHECK_GE(state_change_duration_blocks_, initial_state_change_counter_);
+  if (initial_state_change_counter_ > 0) {
+    if (--initial_state_change_counter_ == 0) {
+      initial_state_ = false;
+    }
+  }
+  RTC_DCHECK_LE(0, initial_state_change_counter_);
+
+  // EchoCanceller3Config::GainUpdates
+  auto& p = config_.gain_updates;
+  if (!linear_echo_estimate) {
+    max_inc = p.nonlinear.max_inc;
+    max_dec = p.nonlinear.max_dec;
+    rate_inc = p.nonlinear.rate_inc;
+    rate_dec = p.nonlinear.rate_dec;
+    min_inc = p.nonlinear.min_inc;
+    min_dec = p.nonlinear.min_dec;
+  } else if (initial_state_ && no_saturation_counter_ > 10) {
+    if (initial_state_change_counter_ > 0) {
+      float change_factor =
+          initial_state_change_counter_ * one_by_state_change_duration_blocks_;
+
+      auto average = [](float from, float to, float from_weight) {
+        return from * from_weight + to * (1.f - from_weight);
+      };
+
+      max_inc = average(p.initial.max_inc, p.normal.max_inc, change_factor);
+      max_dec = average(p.initial.max_dec, p.normal.max_dec, change_factor);
+      rate_inc = average(p.initial.rate_inc, p.normal.rate_inc, change_factor);
+      rate_dec = average(p.initial.rate_dec, p.normal.rate_dec, change_factor);
+      min_inc = average(p.initial.min_inc, p.normal.min_inc, change_factor);
+      min_dec = average(p.initial.min_dec, p.normal.min_dec, change_factor);
+    } else {
+      max_inc = p.initial.max_inc;
+      max_dec = p.initial.max_dec;
+      rate_inc = p.initial.rate_inc;
+      rate_dec = p.initial.rate_dec;
+      min_inc = p.initial.min_inc;
+      min_dec = p.initial.min_dec;
+    }
+  } else if (low_noise_render) {
+    max_inc = p.low_noise.max_inc;
+    max_dec = p.low_noise.max_dec;
+    rate_inc = p.low_noise.rate_inc;
+    rate_dec = p.low_noise.rate_dec;
+    min_inc = p.low_noise.min_inc;
+    min_dec = p.low_noise.min_dec;
+  } else if (no_saturation_counter_ > 10) {
+    max_inc = p.normal.max_inc;
+    max_dec = p.normal.max_dec;
+    rate_inc = p.normal.rate_inc;
+    rate_dec = p.normal.rate_dec;
+    min_inc = p.normal.min_inc;
+    min_dec = p.normal.min_dec;
+  } else {
+    max_inc = p.saturation.max_inc;
+    max_dec = p.saturation.max_dec;
+    rate_inc = p.saturation.rate_inc;
+    rate_dec = p.saturation.rate_dec;
+    min_inc = p.saturation.min_inc;
+    min_dec = p.saturation.min_dec;
+  }
+
+  for (size_t k = 0; k < new_gain.size(); ++k) {
+    auto increase_update = [](float new_gain, float last_gain,
+                              float current_inc, float max_inc, float min_inc,
+                              float change_rate) {
+      return new_gain > last_gain ? std::min(max_inc, current_inc * change_rate)
+                                  : min_inc;
+    };
+
+    if (echo[k] > last_echo_[k]) {
+      gain_increase_[k] =
+          increase_update(new_gain[k], last_gain_[k], gain_increase_[k],
+                          max_inc, min_inc, rate_inc);
+    } else {
+      gain_increase_[k] =
+          increase_update(new_gain[k], last_gain_[k], gain_increase_[k],
+                          max_dec, min_dec, rate_dec);
+    }
+  }
+}
+
+// Detects when the render signal can be considered to have low power and
+// consist of stationary noise.
+bool SuppressionGain::LowNoiseRenderDetector::Detect(
+    const std::vector<std::vector<float>>& render) {
+  float x2_sum = 0.f;
+  float x2_max = 0.f;
+  for (auto x_k : render[0]) {
+    const float x2 = x_k * x_k;
+    x2_sum += x2;
+    x2_max = std::max(x2_max, x2);
+  }
+
+  constexpr float kThreshold = 50.f * 50.f * 64.f;
+  const bool low_noise_render =
+      average_power_ < kThreshold && x2_max < 3 * average_power_;
+  average_power_ = average_power_ * 0.9f + x2_sum * 0.1f;
+  return low_noise_render;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_gain.h b/modules/audio_processing/aec3/suppression_gain.h
new file mode 100644
index 0000000..6624c1c
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_gain.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_
+
+#include <array>
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_signal_analyzer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class SuppressionGain {
+ public:
+  SuppressionGain(const EchoCanceller3Config& config,
+                  Aec3Optimization optimization);
+  void GetGain(const std::array<float, kFftLengthBy2Plus1>& nearend,
+               const std::array<float, kFftLengthBy2Plus1>& echo,
+               const std::array<float, kFftLengthBy2Plus1>& comfort_noise,
+               const RenderSignalAnalyzer& render_signal_analyzer,
+               const AecState& aec_state,
+               const std::vector<std::vector<float>>& render,
+               float* high_bands_gain,
+               std::array<float, kFftLengthBy2Plus1>* low_band_gain);
+
+  // Toggles the usage of the initial state.
+  void SetInitialState(bool state);
+
+ private:
+  void LowerBandGain(bool stationary_with_low_power,
+                     const rtc::Optional<int>& narrow_peak_band,
+                     const AecState& aec_state,
+                     const std::array<float, kFftLengthBy2Plus1>& nearend,
+                     const std::array<float, kFftLengthBy2Plus1>& echo,
+                     const std::array<float, kFftLengthBy2Plus1>& comfort_noise,
+                     std::array<float, kFftLengthBy2Plus1>* gain);
+
+  // Limits the gain increase.
+  void UpdateGainIncrease(
+      bool low_noise_render,
+      bool linear_echo_estimate,
+      const std::array<float, kFftLengthBy2Plus1>& echo,
+      const std::array<float, kFftLengthBy2Plus1>& new_gain);
+
+  class LowNoiseRenderDetector {
+   public:
+    bool Detect(const std::vector<std::vector<float>>& render);
+
+   private:
+    float average_power_ = 32768.f * 32768.f;
+  };
+
+  const Aec3Optimization optimization_;
+  const EchoCanceller3Config config_;
+  const int state_change_duration_blocks_;
+  float one_by_state_change_duration_blocks_;
+  std::array<float, kFftLengthBy2Plus1> last_gain_;
+  std::array<float, kFftLengthBy2Plus1> last_masker_;
+  std::array<float, kFftLengthBy2Plus1> gain_increase_;
+  std::array<float, kFftLengthBy2Plus1> last_echo_;
+
+  LowNoiseRenderDetector low_render_detector_;
+  size_t no_saturation_counter_ = 0;
+  bool initial_state_ = true;
+  int initial_state_change_counter_ = 0;
+  RTC_DISALLOW_COPY_AND_ASSIGN(SuppressionGain);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_H_
diff --git a/modules/audio_processing/aec3/suppression_gain_limiter.cc b/modules/audio_processing/aec3/suppression_gain_limiter.cc
new file mode 100644
index 0000000..643bb58
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_gain_limiter.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_gain_limiter.h"
+
+#include <math.h>
+#include <algorithm>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+namespace {
+
+// Computes the gain rampup factor to use.
+float ComputeGainRampupIncrease(
+    const EchoCanceller3Config::EchoRemovalControl::GainRampup& rampup_config) {
+  return powf(1.f / rampup_config.first_non_zero_gain,
+              1.f / rampup_config.non_zero_gain_blocks);
+}
+
+}  // namespace
+
+SuppressionGainUpperLimiter::SuppressionGainUpperLimiter(
+    const EchoCanceller3Config& config)
+    : rampup_config_(config.echo_removal_control.gain_rampup),
+      gain_rampup_increase_(ComputeGainRampupIncrease(rampup_config_)) {
+  Reset();
+}
+
+void SuppressionGainUpperLimiter::Reset() {
+  recent_reset_ = true;
+}
+
+void SuppressionGainUpperLimiter::Update(bool render_activity) {
+  if (recent_reset_ && !call_startup_phase_) {
+    // Only enforce 250 ms full suppression after in-call resets,
+    constexpr int kMuteFramesAfterReset = kNumBlocksPerSecond / 4;
+    realignment_counter_ = kMuteFramesAfterReset;
+  } else if (!active_render_seen_ && render_activity) {
+    // Enforce a tailormade suppression limiting during call startup.
+    active_render_seen_ = true;
+    realignment_counter_ = rampup_config_.full_gain_blocks;
+  } else if (realignment_counter_ > 0) {
+    if (--realignment_counter_ == 0) {
+      call_startup_phase_ = false;
+    }
+  }
+  recent_reset_ = false;
+
+  // Do not enforce any gain limit on the suppressor.
+  if (realignment_counter_ <= 0) {
+    suppressor_gain_limit_ = 1.f;
+    return;
+  }
+
+  // Enforce full suppression.
+  if (realignment_counter_ > rampup_config_.non_zero_gain_blocks ||
+      (!call_startup_phase_ && realignment_counter_ > 0)) {
+    suppressor_gain_limit_ = 0.f;
+    return;
+  }
+
+  // Start increasing the gain limit.
+  if (realignment_counter_ == rampup_config_.non_zero_gain_blocks) {
+    suppressor_gain_limit_ = rampup_config_.first_non_zero_gain;
+    return;
+  }
+
+  // Increase the gain limit until it reaches 1.f.
+  RTC_DCHECK_LT(0.f, suppressor_gain_limit_);
+  suppressor_gain_limit_ =
+      std::min(1.f, suppressor_gain_limit_ * gain_rampup_increase_);
+  RTC_DCHECK_GE(1.f, suppressor_gain_limit_);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_gain_limiter.h b/modules/audio_processing/aec3/suppression_gain_limiter.h
new file mode 100644
index 0000000..7a3f228
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_gain_limiter.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_LIMITER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_LIMITER_H_
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Class for applying a smoothly increasing limit for the suppression gain
+// during call startup and after in-call resets.
+class SuppressionGainUpperLimiter {
+ public:
+  explicit SuppressionGainUpperLimiter(const EchoCanceller3Config& config);
+
+  // Reset the limiting behavior.
+  void Reset();
+
+  // Updates the limiting behavior for the current capture bloc.
+  void Update(bool render_activity);
+
+  // Returns the current suppressor gain limit.
+  float Limit() const { return suppressor_gain_limit_; }
+
+ private:
+  const EchoCanceller3Config::EchoRemovalControl::GainRampup rampup_config_;
+  const float gain_rampup_increase_;
+  bool call_startup_phase_ = true;
+  int realignment_counter_ = 0;
+  bool active_render_seen_ = false;
+  float suppressor_gain_limit_ = 1.f;
+  bool recent_reset_ = false;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(SuppressionGainUpperLimiter);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SUPPRESSION_GAIN_LIMITER_H_
diff --git a/modules/audio_processing/aec3/suppression_gain_unittest.cc b/modules/audio_processing/aec3/suppression_gain_unittest.cc
new file mode 100644
index 0000000..0e48102
--- /dev/null
+++ b/modules/audio_processing/aec3/suppression_gain_unittest.cc
@@ -0,0 +1,126 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/suppression_gain.h"
+
+#include "modules/audio_processing/aec3/aec_state.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/subtractor.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+namespace aec3 {
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+// Verifies that the check for non-null output gains works.
+TEST(SuppressionGain, NullOutputGains) {
+  std::array<float, kFftLengthBy2Plus1> E2;
+  std::array<float, kFftLengthBy2Plus1> R2;
+  std::array<float, kFftLengthBy2Plus1> N2;
+  E2.fill(0.f);
+  R2.fill(0.f);
+  N2.fill(0.f);
+  float high_bands_gain;
+  AecState aec_state(EchoCanceller3Config{});
+  EXPECT_DEATH(
+      SuppressionGain(EchoCanceller3Config{}, DetectOptimization())
+          .GetGain(E2, R2, N2, RenderSignalAnalyzer((EchoCanceller3Config{})),
+                   aec_state,
+                   std::vector<std::vector<float>>(
+                       3, std::vector<float>(kBlockSize, 0.f)),
+                   &high_bands_gain, nullptr),
+      "");
+}
+
+#endif
+
+// Does a sanity check that the gains are correctly computed.
+TEST(SuppressionGain, BasicGainComputation) {
+  SuppressionGain suppression_gain(EchoCanceller3Config(),
+                                   DetectOptimization());
+  RenderSignalAnalyzer analyzer(EchoCanceller3Config{});
+  float high_bands_gain;
+  std::array<float, kFftLengthBy2Plus1> E2;
+  std::array<float, kFftLengthBy2Plus1> Y2;
+  std::array<float, kFftLengthBy2Plus1> R2;
+  std::array<float, kFftLengthBy2Plus1> N2;
+  std::array<float, kFftLengthBy2Plus1> g;
+  std::array<float, kBlockSize> s;
+  std::vector<std::vector<float>> x(1, std::vector<float>(kBlockSize, 0.f));
+  EchoCanceller3Config config;
+  AecState aec_state(config);
+  ApmDataDumper data_dumper(42);
+  Subtractor subtractor(config, &data_dumper, DetectOptimization());
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create(config, 3));
+  rtc::Optional<DelayEstimate> delay_estimate;
+
+  // Ensure that a strong noise is detected to mask any echoes.
+  E2.fill(10.f);
+  Y2.fill(10.f);
+  R2.fill(0.1f);
+  N2.fill(100.f);
+  s.fill(10.f);
+
+  // Ensure that the gain is no longer forced to zero.
+  for (int k = 0; k <= kNumBlocksPerSecond / 5 + 1; ++k) {
+    aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponse(),
+                     subtractor.FilterImpulseResponse(),
+                     subtractor.ConvergedFilter(),
+                     *render_delay_buffer->GetRenderBuffer(), E2, Y2, s, false);
+  }
+
+  for (int k = 0; k < 100; ++k) {
+    aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponse(),
+                     subtractor.FilterImpulseResponse(),
+                     subtractor.ConvergedFilter(),
+                     *render_delay_buffer->GetRenderBuffer(), E2, Y2, s, false);
+    suppression_gain.GetGain(E2, R2, N2, analyzer, aec_state, x,
+                             &high_bands_gain, &g);
+  }
+  std::for_each(g.begin(), g.end(),
+                [](float a) { EXPECT_NEAR(1.f, a, 0.001); });
+
+  // Ensure that a strong nearend is detected to mask any echoes.
+  E2.fill(100.f);
+  Y2.fill(100.f);
+  R2.fill(0.1f);
+  N2.fill(0.f);
+  for (int k = 0; k < 100; ++k) {
+    aec_state.Update(delay_estimate, subtractor.FilterFrequencyResponse(),
+                     subtractor.FilterImpulseResponse(),
+                     subtractor.ConvergedFilter(),
+                     *render_delay_buffer->GetRenderBuffer(), E2, Y2, s, false);
+    suppression_gain.GetGain(E2, R2, N2, analyzer, aec_state, x,
+                             &high_bands_gain, &g);
+  }
+  std::for_each(g.begin(), g.end(),
+                [](float a) { EXPECT_NEAR(1.f, a, 0.001); });
+
+  // Ensure that a strong echo is suppressed.
+  E2.fill(1000000000.f);
+  R2.fill(10000000000000.f);
+  N2.fill(0.f);
+  for (int k = 0; k < 10; ++k) {
+    suppression_gain.GetGain(E2, R2, N2, analyzer, aec_state, x,
+                             &high_bands_gain, &g);
+  }
+  std::for_each(g.begin(), g.end(),
+                [](float a) { EXPECT_NEAR(0.f, a, 0.001); });
+
+}
+
+}  // namespace aec3
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/vector_buffer.cc b/modules/audio_processing/aec3/vector_buffer.cc
new file mode 100644
index 0000000..f491168
--- /dev/null
+++ b/modules/audio_processing/aec3/vector_buffer.cc
@@ -0,0 +1,27 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/vector_buffer.h"
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+namespace webrtc {
+
+VectorBuffer::VectorBuffer(size_t size, size_t height)
+    : size(static_cast<int>(size)),
+      buffer(size, std::vector<float>(height, 0.f)) {
+  for (auto& c : buffer) {
+    std::fill(c.begin(), c.end(), 0.f);
+  }
+}
+
+VectorBuffer::~VectorBuffer() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/vector_buffer.h b/modules/audio_processing/aec3/vector_buffer.h
new file mode 100644
index 0000000..a7f9932
--- /dev/null
+++ b/modules/audio_processing/aec3/vector_buffer.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_VECTOR_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_VECTOR_BUFFER_H_
+
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Struct for bundling a circular buffer of one dimensional vector objects
+// together with the read and write indices.
+struct VectorBuffer {
+  VectorBuffer(size_t size, size_t height);
+  ~VectorBuffer();
+
+  int IncIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index < size - 1 ? index + 1 : 0;
+  }
+
+  int DecIndex(int index) const {
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return index > 0 ? index - 1 : size - 1;
+  }
+
+  int OffsetIndex(int index, int offset) const {
+    RTC_DCHECK_GE(size, offset);
+    RTC_DCHECK_EQ(buffer.size(), static_cast<size_t>(size));
+    return (size + index + offset) % size;
+  }
+
+  void UpdateWriteIndex(int offset) { write = OffsetIndex(write, offset); }
+  void IncWriteIndex() { write = IncIndex(write); }
+  void DecWriteIndex() { write = DecIndex(write); }
+  void UpdateReadIndex(int offset) { read = OffsetIndex(read, offset); }
+  void IncReadIndex() { read = IncIndex(read); }
+  void DecReadIndex() { read = DecIndex(read); }
+
+  const int size;
+  std::vector<std::vector<float>> buffer;
+  int write = 0;
+  int read = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_VECTOR_BUFFER_H_
diff --git a/modules/audio_processing/aec3/vector_math.h b/modules/audio_processing/aec3/vector_math.h
new file mode 100644
index 0000000..0672b51
--- /dev/null
+++ b/modules/audio_processing/aec3/vector_math.h
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+#if defined(WEBRTC_HAS_NEON)
+#include <arm_neon.h>
+#endif
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#include <emmintrin.h>
+#endif
+#include <math.h>
+#include <algorithm>
+#include <array>
+#include <functional>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace aec3 {
+
+// Provides optimizations for mathematical operations based on vectors.
+class VectorMath {
+ public:
+  explicit VectorMath(Aec3Optimization optimization)
+      : optimization_(optimization) {}
+
+  // Elementwise square root.
+  void Sqrt(rtc::ArrayView<float> x) {
+    switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+      case Aec3Optimization::kSse2: {
+        const int x_size = static_cast<int>(x.size());
+        const int vector_limit = x_size >> 2;
+
+        int j = 0;
+        for (; j < vector_limit * 4; j += 4) {
+          __m128 g = _mm_loadu_ps(&x[j]);
+          g = _mm_sqrt_ps(g);
+          _mm_storeu_ps(&x[j], g);
+        }
+
+        for (; j < x_size; ++j) {
+          x[j] = sqrtf(x[j]);
+        }
+      } break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+      case Aec3Optimization::kNeon: {
+        const int x_size = static_cast<int>(x.size());
+        const int vector_limit = x_size >> 2;
+
+        int j = 0;
+        for (; j < vector_limit * 4; j += 4) {
+          float32x4_t g = vld1q_f32(&x[j]);
+#if !defined(WEBRTC_ARCH_ARM64)
+          float32x4_t y = vrsqrteq_f32(g);
+
+          // Code to handle sqrt(0).
+          // If the input to sqrtf() is zero, a zero will be returned.
+          // If the input to vrsqrteq_f32() is zero, positive infinity is
+          // returned.
+          const uint32x4_t vec_p_inf = vdupq_n_u32(0x7F800000);
+          // check for divide by zero
+          const uint32x4_t div_by_zero =
+              vceqq_u32(vec_p_inf, vreinterpretq_u32_f32(y));
+          // zero out the positive infinity results
+          y = vreinterpretq_f32_u32(
+              vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(y)));
+          // from arm documentation
+          // The Newton-Raphson iteration:
+          //     y[n+1] = y[n] * (3 - d * (y[n] * y[n])) / 2)
+          // converges to (1/√d) if y0 is the result of VRSQRTE applied to d.
+          //
+          // Note: The precision did not improve after 2 iterations.
+          for (int i = 0; i < 2; i++) {
+            y = vmulq_f32(vrsqrtsq_f32(vmulq_f32(y, y), g), y);
+          }
+          // sqrt(g) = g * 1/sqrt(g)
+          g = vmulq_f32(g, y);
+#else
+          g = vsqrtq_f32(g);
+#endif
+          vst1q_f32(&x[j], g);
+        }
+
+        for (; j < x_size; ++j) {
+          x[j] = sqrtf(x[j]);
+        }
+      }
+#endif
+      break;
+      default:
+        std::for_each(x.begin(), x.end(), [](float& a) { a = sqrtf(a); });
+    }
+  }
+
+  // Elementwise vector multiplication z = x * y.
+  void Multiply(rtc::ArrayView<const float> x,
+                rtc::ArrayView<const float> y,
+                rtc::ArrayView<float> z) {
+    RTC_DCHECK_EQ(z.size(), x.size());
+    RTC_DCHECK_EQ(z.size(), y.size());
+    switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+      case Aec3Optimization::kSse2: {
+        const int x_size = static_cast<int>(x.size());
+        const int vector_limit = x_size >> 2;
+
+        int j = 0;
+        for (; j < vector_limit * 4; j += 4) {
+          const __m128 x_j = _mm_loadu_ps(&x[j]);
+          const __m128 y_j = _mm_loadu_ps(&y[j]);
+          const __m128 z_j = _mm_mul_ps(x_j, y_j);
+          _mm_storeu_ps(&z[j], z_j);
+        }
+
+        for (; j < x_size; ++j) {
+          z[j] = x[j] * y[j];
+        }
+      } break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+      case Aec3Optimization::kNeon: {
+        const int x_size = static_cast<int>(x.size());
+        const int vector_limit = x_size >> 2;
+
+        int j = 0;
+        for (; j < vector_limit * 4; j += 4) {
+          const float32x4_t x_j = vld1q_f32(&x[j]);
+          const float32x4_t y_j = vld1q_f32(&y[j]);
+          const float32x4_t z_j = vmulq_f32(x_j, y_j);
+          vst1q_f32(&z[j], z_j);
+        }
+
+        for (; j < x_size; ++j) {
+          z[j] = x[j] * y[j];
+        }
+      } break;
+#endif
+      default:
+        std::transform(x.begin(), x.end(), y.begin(), z.begin(),
+                       std::multiplies<float>());
+    }
+  }
+
+  // Elementwise vector accumulation z += x.
+  void Accumulate(rtc::ArrayView<const float> x, rtc::ArrayView<float> z) {
+    RTC_DCHECK_EQ(z.size(), x.size());
+    switch (optimization_) {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+      case Aec3Optimization::kSse2: {
+        const int x_size = static_cast<int>(x.size());
+        const int vector_limit = x_size >> 2;
+
+        int j = 0;
+        for (; j < vector_limit * 4; j += 4) {
+          const __m128 x_j = _mm_loadu_ps(&x[j]);
+          __m128 z_j = _mm_loadu_ps(&z[j]);
+          z_j = _mm_add_ps(x_j, z_j);
+          _mm_storeu_ps(&z[j], z_j);
+        }
+
+        for (; j < x_size; ++j) {
+          z[j] += x[j];
+        }
+      } break;
+#endif
+#if defined(WEBRTC_HAS_NEON)
+      case Aec3Optimization::kNeon: {
+        const int x_size = static_cast<int>(x.size());
+        const int vector_limit = x_size >> 2;
+
+        int j = 0;
+        for (; j < vector_limit * 4; j += 4) {
+          const float32x4_t x_j = vld1q_f32(&x[j]);
+          float32x4_t z_j = vld1q_f32(&z[j]);
+          z_j = vaddq_f32(z_j, x_j);
+          vst1q_f32(&z[j], z_j);
+        }
+
+        for (; j < x_size; ++j) {
+          z[j] += x[j];
+        }
+      } break;
+#endif
+      default:
+        std::transform(x.begin(), x.end(), z.begin(), z.begin(),
+                       std::plus<float>());
+    }
+  }
+
+ private:
+  Aec3Optimization optimization_;
+};
+
+}  // namespace aec3
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_VECTOR_MATH_H_
diff --git a/modules/audio_processing/aec3/vector_math_unittest.cc b/modules/audio_processing/aec3/vector_math_unittest.cc
new file mode 100644
index 0000000..6bf60ec
--- /dev/null
+++ b/modules/audio_processing/aec3/vector_math_unittest.cc
@@ -0,0 +1,146 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/vector_math.h"
+
+#include <math.h>
+
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+#if defined(WEBRTC_HAS_NEON)
+
+TEST(VectorMath, Sqrt) {
+  std::array<float, kFftLengthBy2Plus1> x;
+  std::array<float, kFftLengthBy2Plus1> z;
+  std::array<float, kFftLengthBy2Plus1> z_neon;
+
+  for (size_t k = 0; k < x.size(); ++k) {
+    x[k] = (2.f / 3.f) * k;
+  }
+
+  std::copy(x.begin(), x.end(), z.begin());
+  aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z);
+  std::copy(x.begin(), x.end(), z_neon.begin());
+  aec3::VectorMath(Aec3Optimization::kNeon).Sqrt(z_neon);
+  for (size_t k = 0; k < z.size(); ++k) {
+    EXPECT_NEAR(z[k], z_neon[k], 0.0001f);
+    EXPECT_NEAR(sqrtf(x[k]), z_neon[k], 0.0001f);
+  }
+}
+
+TEST(VectorMath, Multiply) {
+  std::array<float, kFftLengthBy2Plus1> x;
+  std::array<float, kFftLengthBy2Plus1> y;
+  std::array<float, kFftLengthBy2Plus1> z;
+  std::array<float, kFftLengthBy2Plus1> z_neon;
+
+  for (size_t k = 0; k < x.size(); ++k) {
+    x[k] = k;
+    y[k] = (2.f / 3.f) * k;
+  }
+
+  aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z);
+  aec3::VectorMath(Aec3Optimization::kNeon).Multiply(x, y, z_neon);
+  for (size_t k = 0; k < z.size(); ++k) {
+    EXPECT_FLOAT_EQ(z[k], z_neon[k]);
+    EXPECT_FLOAT_EQ(x[k] * y[k], z_neon[k]);
+  }
+}
+
+TEST(VectorMath, Accumulate) {
+  std::array<float, kFftLengthBy2Plus1> x;
+  std::array<float, kFftLengthBy2Plus1> z;
+  std::array<float, kFftLengthBy2Plus1> z_neon;
+
+  for (size_t k = 0; k < x.size(); ++k) {
+    x[k] = k;
+    z[k] = z_neon[k] = 2.f * k;
+  }
+
+  aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z);
+  aec3::VectorMath(Aec3Optimization::kNeon).Accumulate(x, z_neon);
+  for (size_t k = 0; k < z.size(); ++k) {
+    EXPECT_FLOAT_EQ(z[k], z_neon[k]);
+    EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_neon[k]);
+  }
+}
+#endif
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+TEST(VectorMath, Sqrt) {
+  if (WebRtc_GetCPUInfo(kSSE2) != 0) {
+    std::array<float, kFftLengthBy2Plus1> x;
+    std::array<float, kFftLengthBy2Plus1> z;
+    std::array<float, kFftLengthBy2Plus1> z_sse2;
+
+    for (size_t k = 0; k < x.size(); ++k) {
+      x[k] = (2.f / 3.f) * k;
+    }
+
+    std::copy(x.begin(), x.end(), z.begin());
+    aec3::VectorMath(Aec3Optimization::kNone).Sqrt(z);
+    std::copy(x.begin(), x.end(), z_sse2.begin());
+    aec3::VectorMath(Aec3Optimization::kSse2).Sqrt(z_sse2);
+    EXPECT_EQ(z, z_sse2);
+    for (size_t k = 0; k < z.size(); ++k) {
+      EXPECT_FLOAT_EQ(z[k], z_sse2[k]);
+      EXPECT_FLOAT_EQ(sqrtf(x[k]), z_sse2[k]);
+    }
+  }
+}
+
+TEST(VectorMath, Multiply) {
+  if (WebRtc_GetCPUInfo(kSSE2) != 0) {
+    std::array<float, kFftLengthBy2Plus1> x;
+    std::array<float, kFftLengthBy2Plus1> y;
+    std::array<float, kFftLengthBy2Plus1> z;
+    std::array<float, kFftLengthBy2Plus1> z_sse2;
+
+    for (size_t k = 0; k < x.size(); ++k) {
+      x[k] = k;
+      y[k] = (2.f / 3.f) * k;
+    }
+
+    aec3::VectorMath(Aec3Optimization::kNone).Multiply(x, y, z);
+    aec3::VectorMath(Aec3Optimization::kSse2).Multiply(x, y, z_sse2);
+    for (size_t k = 0; k < z.size(); ++k) {
+      EXPECT_FLOAT_EQ(z[k], z_sse2[k]);
+      EXPECT_FLOAT_EQ(x[k] * y[k], z_sse2[k]);
+    }
+  }
+}
+
+TEST(VectorMath, Accumulate) {
+  if (WebRtc_GetCPUInfo(kSSE2) != 0) {
+    std::array<float, kFftLengthBy2Plus1> x;
+    std::array<float, kFftLengthBy2Plus1> z;
+    std::array<float, kFftLengthBy2Plus1> z_sse2;
+
+    for (size_t k = 0; k < x.size(); ++k) {
+      x[k] = k;
+      z[k] = z_sse2[k] = 2.f * k;
+    }
+
+    aec3::VectorMath(Aec3Optimization::kNone).Accumulate(x, z);
+    aec3::VectorMath(Aec3Optimization::kSse2).Accumulate(x, z_sse2);
+    for (size_t k = 0; k < z.size(); ++k) {
+      EXPECT_FLOAT_EQ(z[k], z_sse2[k]);
+      EXPECT_FLOAT_EQ(x[k] + 2.f * x[k], z_sse2[k]);
+    }
+  }
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec_dump/BUILD.gn b/modules/audio_processing/aec_dump/BUILD.gn
new file mode 100644
index 0000000..7afaaf4
--- /dev/null
+++ b/modules/audio_processing/aec_dump/BUILD.gn
@@ -0,0 +1,107 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")  # This contains def of 'rtc_enable_protobuf'
+
+rtc_source_set("aec_dump") {
+  visibility = [ "*" ]
+  sources = [
+    "aec_dump_factory.h",
+  ]
+
+  deps = [
+    "..:aec_dump_interface",
+    "../../../rtc_base:rtc_base_approved",
+  ]
+}
+
+rtc_source_set("mock_aec_dump") {
+  testonly = true
+  sources = [
+    "mock_aec_dump.cc",
+    "mock_aec_dump.h",
+  ]
+
+  deps = [
+    "..:aec_dump_interface",
+    "../..:module_api",
+    "../../../test:test_support",
+  ]
+}
+
+rtc_source_set("mock_aec_dump_unittests") {
+  testonly = true
+
+  sources = [
+    "aec_dump_integration_test.cc",
+  ]
+
+  deps = [
+    ":mock_aec_dump",
+    "..:audio_processing",
+    "../../../rtc_base:rtc_base_approved",
+    "//testing/gtest",
+  ]
+}
+
+if (rtc_enable_protobuf) {
+  rtc_source_set("aec_dump_impl") {
+    sources = [
+      "aec_dump_impl.cc",
+      "aec_dump_impl.h",
+      "capture_stream_info.cc",
+      "capture_stream_info.h",
+      "write_to_file_task.cc",
+      "write_to_file_task.h",
+    ]
+
+    deps = [
+      ":aec_dump",
+      "..:aec_dump_interface",
+      "../../../modules:module_api",
+      "../../../rtc_base:checks",
+      "../../../rtc_base:protobuf_utils",
+      "../../../rtc_base:rtc_base_approved",
+      "../../../rtc_base:rtc_task_queue",
+      "../../../system_wrappers",
+    ]
+
+    deps += [ "../:audioproc_debug_proto" ]
+  }
+
+  rtc_source_set("aec_dump_unittests") {
+    testonly = true
+    defines = []
+    deps = [
+      ":aec_dump",
+      ":aec_dump_impl",
+      "..:aec_dump_interface",
+      "..:audioproc_debug_proto",
+      "../../../modules:module_api",
+      "../../../rtc_base:rtc_task_queue",
+      "../../../test:fileutils",
+      "../../../test:test_support",
+      "//testing/gtest",
+    ]
+    sources = [
+      "aec_dump_unittest.cc",
+    ]
+  }
+}
+
+rtc_source_set("null_aec_dump_factory") {
+  assert_no_deps = [ ":aec_dump_impl" ]
+  sources = [
+    "null_aec_dump_factory.cc",
+  ]
+
+  deps = [
+    ":aec_dump",
+    "..:aec_dump_interface",
+  ]
+}
diff --git a/modules/audio_processing/aec_dump/aec_dump_factory.h b/modules/audio_processing/aec_dump/aec_dump_factory.h
new file mode 100644
index 0000000..e3f00f6
--- /dev/null
+++ b/modules/audio_processing/aec_dump/aec_dump_factory.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_FACTORY_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_FACTORY_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_processing/include/aec_dump.h"
+#include "rtc_base/platform_file.h"
+
+namespace rtc {
+class TaskQueue;
+}  // namespace rtc
+
+namespace webrtc {
+
+class AecDumpFactory {
+ public:
+  // The |worker_queue| may not be null and must outlive the created
+  // AecDump instance. |max_log_size_bytes == -1| means the log size
+  // will be unlimited. |handle| may not be null. The AecDump takes
+  // responsibility for |handle| and closes it in the destructor. A
+  // non-null return value indicates that the file has been
+  // sucessfully opened.
+  static std::unique_ptr<AecDump> Create(rtc::PlatformFile file,
+                                         int64_t max_log_size_bytes,
+                                         rtc::TaskQueue* worker_queue);
+  static std::unique_ptr<AecDump> Create(std::string file_name,
+                                         int64_t max_log_size_bytes,
+                                         rtc::TaskQueue* worker_queue);
+  static std::unique_ptr<AecDump> Create(FILE* handle,
+                                         int64_t max_log_size_bytes,
+                                         rtc::TaskQueue* worker_queue);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_FACTORY_H_
diff --git a/modules/audio_processing/aec_dump/aec_dump_impl.cc b/modules/audio_processing/aec_dump/aec_dump_impl.cc
new file mode 100644
index 0000000..4deb192
--- /dev/null
+++ b/modules/audio_processing/aec_dump/aec_dump_impl.cc
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "modules/audio_processing/aec_dump/aec_dump_impl.h"
+
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/ptr_util.h"
+
+namespace webrtc {
+
+namespace {
+void CopyFromConfigToEvent(const webrtc::InternalAPMConfig& config,
+                           webrtc::audioproc::Config* pb_cfg) {
+  pb_cfg->set_aec_enabled(config.aec_enabled);
+  pb_cfg->set_aec_delay_agnostic_enabled(config.aec_delay_agnostic_enabled);
+  pb_cfg->set_aec_drift_compensation_enabled(
+      config.aec_drift_compensation_enabled);
+  pb_cfg->set_aec_extended_filter_enabled(config.aec_extended_filter_enabled);
+  pb_cfg->set_aec_suppression_level(config.aec_suppression_level);
+
+  pb_cfg->set_aecm_enabled(config.aecm_enabled);
+  pb_cfg->set_aecm_comfort_noise_enabled(config.aecm_comfort_noise_enabled);
+  pb_cfg->set_aecm_routing_mode(config.aecm_routing_mode);
+
+  pb_cfg->set_agc_enabled(config.agc_enabled);
+  pb_cfg->set_agc_mode(config.agc_mode);
+  pb_cfg->set_agc_limiter_enabled(config.agc_limiter_enabled);
+  pb_cfg->set_noise_robust_agc_enabled(config.noise_robust_agc_enabled);
+
+  pb_cfg->set_hpf_enabled(config.hpf_enabled);
+
+  pb_cfg->set_ns_enabled(config.ns_enabled);
+  pb_cfg->set_ns_level(config.ns_level);
+
+  pb_cfg->set_transient_suppression_enabled(
+      config.transient_suppression_enabled);
+  pb_cfg->set_intelligibility_enhancer_enabled(
+      config.intelligibility_enhancer_enabled);
+
+  pb_cfg->set_experiments_description(config.experiments_description);
+}
+
+}  // namespace
+
+AecDumpImpl::AecDumpImpl(std::unique_ptr<FileWrapper> debug_file,
+                         int64_t max_log_size_bytes,
+                         rtc::TaskQueue* worker_queue)
+    : debug_file_(std::move(debug_file)),
+      num_bytes_left_for_log_(max_log_size_bytes),
+      worker_queue_(worker_queue),
+      capture_stream_info_(CreateWriteToFileTask()) {}
+
+AecDumpImpl::~AecDumpImpl() {
+  // Block until all tasks have finished running.
+  rtc::Event thread_sync_event(false /* manual_reset */, false);
+  worker_queue_->PostTask([&thread_sync_event] { thread_sync_event.Set(); });
+  // Wait until the event has been signaled with .Set(). By then all
+  // pending tasks will have finished.
+  thread_sync_event.Wait(rtc::Event::kForever);
+}
+
+void AecDumpImpl::WriteInitMessage(
+    const InternalAPMStreamsConfig& streams_config) {
+  auto task = CreateWriteToFileTask();
+  auto* event = task->GetEvent();
+  event->set_type(audioproc::Event::INIT);
+  audioproc::Init* msg = event->mutable_init();
+
+  msg->set_sample_rate(streams_config.input_sample_rate);
+  msg->set_output_sample_rate(streams_config.output_sample_rate);
+  msg->set_reverse_sample_rate(streams_config.render_input_sample_rate);
+  msg->set_reverse_output_sample_rate(streams_config.render_output_sample_rate);
+
+  msg->set_num_input_channels(
+      static_cast<int32_t>(streams_config.input_num_channels));
+  msg->set_num_output_channels(
+      static_cast<int32_t>(streams_config.output_num_channels));
+  msg->set_num_reverse_channels(
+      static_cast<int32_t>(streams_config.render_input_num_channels));
+  msg->set_num_reverse_output_channels(
+      streams_config.render_output_num_channels);
+
+  worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(std::move(task)));
+}
+
+void AecDumpImpl::AddCaptureStreamInput(
+    const AudioFrameView<const float>& src) {
+  capture_stream_info_.AddInput(src);
+}
+
+void AecDumpImpl::AddCaptureStreamOutput(
+    const AudioFrameView<const float>& src) {
+  capture_stream_info_.AddOutput(src);
+}
+
+void AecDumpImpl::AddCaptureStreamInput(const AudioFrame& frame) {
+  capture_stream_info_.AddInput(frame);
+}
+
+void AecDumpImpl::AddCaptureStreamOutput(const AudioFrame& frame) {
+  capture_stream_info_.AddOutput(frame);
+}
+
+void AecDumpImpl::AddAudioProcessingState(const AudioProcessingState& state) {
+  capture_stream_info_.AddAudioProcessingState(state);
+}
+
+void AecDumpImpl::WriteCaptureStreamMessage() {
+  auto task = capture_stream_info_.GetTask();
+  RTC_DCHECK(task);
+  worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(std::move(task)));
+  capture_stream_info_.SetTask(CreateWriteToFileTask());
+}
+
+void AecDumpImpl::WriteRenderStreamMessage(const AudioFrame& frame) {
+  auto task = CreateWriteToFileTask();
+  auto* event = task->GetEvent();
+
+  event->set_type(audioproc::Event::REVERSE_STREAM);
+  audioproc::ReverseStream* msg = event->mutable_reverse_stream();
+  const size_t data_size =
+      sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_;
+  msg->set_data(frame.data(), data_size);
+
+  worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(std::move(task)));
+}
+
+void AecDumpImpl::WriteRenderStreamMessage(
+    const AudioFrameView<const float>& src) {
+  auto task = CreateWriteToFileTask();
+  auto* event = task->GetEvent();
+
+  event->set_type(audioproc::Event::REVERSE_STREAM);
+
+  audioproc::ReverseStream* msg = event->mutable_reverse_stream();
+
+  for (size_t i = 0; i < src.num_channels(); ++i) {
+    const auto& channel_view = src.channel(i);
+    msg->add_channel(channel_view.begin(), sizeof(float) * channel_view.size());
+  }
+
+  worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(std::move(task)));
+}
+
+void AecDumpImpl::WriteConfig(const InternalAPMConfig& config) {
+  RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
+  auto task = CreateWriteToFileTask();
+  auto* event = task->GetEvent();
+  event->set_type(audioproc::Event::CONFIG);
+  CopyFromConfigToEvent(config, event->mutable_config());
+  worker_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(std::move(task)));
+}
+
+std::unique_ptr<WriteToFileTask> AecDumpImpl::CreateWriteToFileTask() {
+  return rtc::MakeUnique<WriteToFileTask>(debug_file_.get(),
+                                          &num_bytes_left_for_log_);
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(rtc::PlatformFile file,
+                                                int64_t max_log_size_bytes,
+                                                rtc::TaskQueue* worker_queue) {
+  RTC_DCHECK(worker_queue);
+  std::unique_ptr<FileWrapper> debug_file(FileWrapper::Create());
+  FILE* handle = rtc::FdopenPlatformFileForWriting(file);
+  if (!handle) {
+    return nullptr;
+  }
+  if (!debug_file->OpenFromFileHandle(handle)) {
+    return nullptr;
+  }
+  return rtc::MakeUnique<AecDumpImpl>(std::move(debug_file), max_log_size_bytes,
+                                      worker_queue);
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(std::string file_name,
+                                                int64_t max_log_size_bytes,
+                                                rtc::TaskQueue* worker_queue) {
+  RTC_DCHECK(worker_queue);
+  std::unique_ptr<FileWrapper> debug_file(FileWrapper::Create());
+  if (!debug_file->OpenFile(file_name.c_str(), false)) {
+    return nullptr;
+  }
+  return rtc::MakeUnique<AecDumpImpl>(std::move(debug_file), max_log_size_bytes,
+                                      worker_queue);
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(FILE* handle,
+                                                int64_t max_log_size_bytes,
+                                                rtc::TaskQueue* worker_queue) {
+  RTC_DCHECK(worker_queue);
+  RTC_DCHECK(handle);
+  std::unique_ptr<FileWrapper> debug_file(FileWrapper::Create());
+  if (!debug_file->OpenFromFileHandle(handle)) {
+    return nullptr;
+  }
+  return rtc::MakeUnique<AecDumpImpl>(std::move(debug_file), max_log_size_bytes,
+                                      worker_queue);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec_dump/aec_dump_impl.h b/modules/audio_processing/aec_dump/aec_dump_impl.h
new file mode 100644
index 0000000..36d72e9
--- /dev/null
+++ b/modules/audio_processing/aec_dump/aec_dump_impl.h
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_IMPL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "modules/audio_processing/aec_dump/capture_stream_info.h"
+#include "modules/audio_processing/aec_dump/write_to_file_task.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/platform_file.h"
+#include "rtc_base/race_checker.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/file_wrapper.h"
+
+// Files generated at build-time by the protobuf compiler.
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "modules/audio_processing/debug.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace rtc {
+class TaskQueue;
+}  // namespace rtc
+
+namespace webrtc {
+
+// Task-queue based implementation of AecDump. It is thread safe by
+// relying on locks in TaskQueue.
+class AecDumpImpl : public AecDump {
+ public:
+  // Does member variables initialization shared across all c-tors.
+  AecDumpImpl(std::unique_ptr<FileWrapper> debug_file,
+              int64_t max_log_size_bytes,
+              rtc::TaskQueue* worker_queue);
+
+  ~AecDumpImpl() override;
+
+  void WriteInitMessage(const InternalAPMStreamsConfig& api_format) override;
+
+  void AddCaptureStreamInput(const AudioFrameView<const float>& src) override;
+  void AddCaptureStreamOutput(const AudioFrameView<const float>& src) override;
+  void AddCaptureStreamInput(const AudioFrame& frame) override;
+  void AddCaptureStreamOutput(const AudioFrame& frame) override;
+  void AddAudioProcessingState(const AudioProcessingState& state) override;
+  void WriteCaptureStreamMessage() override;
+
+  void WriteRenderStreamMessage(const AudioFrame& frame) override;
+  void WriteRenderStreamMessage(
+      const AudioFrameView<const float>& src) override;
+
+  void WriteConfig(const InternalAPMConfig& config) override;
+
+ private:
+  std::unique_ptr<WriteToFileTask> CreateWriteToFileTask();
+
+  std::unique_ptr<FileWrapper> debug_file_;
+  int64_t num_bytes_left_for_log_ = 0;
+  rtc::RaceChecker race_checker_;
+  rtc::TaskQueue* worker_queue_;
+  CaptureStreamInfo capture_stream_info_;
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_DUMP_AEC_DUMP_IMPL_H_
diff --git a/modules/audio_processing/aec_dump/aec_dump_integration_test.cc b/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
new file mode 100644
index 0000000..a7d53b5
--- /dev/null
+++ b/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "modules/audio_processing/aec_dump/mock_aec_dump.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/ptr_util.h"
+
+using testing::_;
+using testing::AtLeast;
+using testing::Exactly;
+using testing::Matcher;
+using testing::StrictMock;
+
+namespace {
+std::unique_ptr<webrtc::AudioProcessing> CreateAudioProcessing() {
+  webrtc::Config config;
+  std::unique_ptr<webrtc::AudioProcessing> apm(
+      webrtc::AudioProcessingBuilder().Create(config));
+  RTC_DCHECK(apm);
+  return apm;
+}
+
+std::unique_ptr<webrtc::test::MockAecDump> CreateMockAecDump() {
+  auto mock_aec_dump =
+      rtc::MakeUnique<testing::StrictMock<webrtc::test::MockAecDump>>();
+  EXPECT_CALL(*mock_aec_dump.get(), WriteConfig(_)).Times(AtLeast(1));
+  EXPECT_CALL(*mock_aec_dump.get(), WriteInitMessage(_)).Times(AtLeast(1));
+  return std::unique_ptr<webrtc::test::MockAecDump>(std::move(mock_aec_dump));
+}
+
+std::unique_ptr<webrtc::AudioFrame> CreateFakeFrame() {
+  auto fake_frame = rtc::MakeUnique<webrtc::AudioFrame>();
+  fake_frame->num_channels_ = 1;
+  fake_frame->sample_rate_hz_ = 48000;
+  fake_frame->samples_per_channel_ = 480;
+  return fake_frame;
+}
+
+}  // namespace
+
+TEST(AecDumpIntegration, ConfigurationAndInitShouldBeLogged) {
+  auto apm = CreateAudioProcessing();
+
+  apm->AttachAecDump(CreateMockAecDump());
+}
+
+TEST(AecDumpIntegration,
+     RenderStreamShouldBeLoggedOnceEveryProcessReverseStream) {
+  auto apm = CreateAudioProcessing();
+  auto mock_aec_dump = CreateMockAecDump();
+  auto fake_frame = CreateFakeFrame();
+
+  EXPECT_CALL(*mock_aec_dump.get(),
+              WriteRenderStreamMessage(Matcher<const webrtc::AudioFrame&>(_)))
+      .Times(Exactly(1));
+
+  apm->AttachAecDump(std::move(mock_aec_dump));
+  apm->ProcessReverseStream(fake_frame.get());
+}
+
+TEST(AecDumpIntegration, CaptureStreamShouldBeLoggedOnceEveryProcessStream) {
+  auto apm = CreateAudioProcessing();
+  auto mock_aec_dump = CreateMockAecDump();
+  auto fake_frame = CreateFakeFrame();
+
+  EXPECT_CALL(*mock_aec_dump.get(),
+              AddCaptureStreamInput(Matcher<const webrtc::AudioFrame&>(_)))
+      .Times(AtLeast(1));
+
+  EXPECT_CALL(*mock_aec_dump.get(),
+              AddCaptureStreamOutput(Matcher<const webrtc::AudioFrame&>(_)))
+      .Times(Exactly(1));
+
+  EXPECT_CALL(*mock_aec_dump.get(), AddAudioProcessingState(_))
+      .Times(Exactly(1));
+
+  EXPECT_CALL(*mock_aec_dump.get(), WriteCaptureStreamMessage())
+      .Times(Exactly(1));
+
+  apm->AttachAecDump(std::move(mock_aec_dump));
+  apm->ProcessStream(fake_frame.get());
+}
diff --git a/modules/audio_processing/aec_dump/aec_dump_unittest.cc b/modules/audio_processing/aec_dump/aec_dump_unittest.cc
new file mode 100644
index 0000000..965ac03
--- /dev/null
+++ b/modules/audio_processing/aec_dump/aec_dump_unittest.cc
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <utility>
+
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/task_queue.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+TEST(AecDumper, APICallsDoNotCrash) {
+  // Note order of initialization: Task queue has to be initialized
+  // before AecDump.
+  rtc::TaskQueue file_writer_queue("file_writer_queue");
+
+  const std::string filename =
+      webrtc::test::TempFilename(webrtc::test::OutputPath(), "aec_dump");
+
+  {
+    std::unique_ptr<webrtc::AecDump> aec_dump =
+        webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue);
+
+    const webrtc::AudioFrame frame;
+    aec_dump->WriteRenderStreamMessage(frame);
+
+    aec_dump->AddCaptureStreamInput(frame);
+    aec_dump->AddCaptureStreamOutput(frame);
+
+    aec_dump->WriteCaptureStreamMessage();
+
+    webrtc::InternalAPMConfig apm_config;
+    aec_dump->WriteConfig(apm_config);
+
+    webrtc::InternalAPMStreamsConfig streams_config;
+    aec_dump->WriteInitMessage(streams_config);
+  }
+  // Remove file after the AecDump d-tor has finished.
+  ASSERT_EQ(0, remove(filename.c_str()));
+}
+
+TEST(AecDumper, WriteToFile) {
+  rtc::TaskQueue file_writer_queue("file_writer_queue");
+
+  const std::string filename =
+      webrtc::test::TempFilename(webrtc::test::OutputPath(), "aec_dump");
+
+  {
+    std::unique_ptr<webrtc::AecDump> aec_dump =
+        webrtc::AecDumpFactory::Create(filename, -1, &file_writer_queue);
+    const webrtc::AudioFrame frame;
+    aec_dump->WriteRenderStreamMessage(frame);
+  }
+
+  // Verify the file has been written after the AecDump d-tor has
+  // finished.
+  FILE* fid = fopen(filename.c_str(), "r");
+  ASSERT_TRUE(fid != NULL);
+
+  // Clean it up.
+  ASSERT_EQ(0, fclose(fid));
+  ASSERT_EQ(0, remove(filename.c_str()));
+}
diff --git a/modules/audio_processing/aec_dump/capture_stream_info.cc b/modules/audio_processing/aec_dump/capture_stream_info.cc
new file mode 100644
index 0000000..dd48fd4
--- /dev/null
+++ b/modules/audio_processing/aec_dump/capture_stream_info.cc
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec_dump/capture_stream_info.h"
+
+namespace webrtc {
+CaptureStreamInfo::CaptureStreamInfo(std::unique_ptr<WriteToFileTask> task)
+    : task_(std::move(task)) {
+  RTC_DCHECK(task_);
+  task_->GetEvent()->set_type(audioproc::Event::STREAM);
+}
+
+CaptureStreamInfo::~CaptureStreamInfo() = default;
+
+void CaptureStreamInfo::AddInput(const AudioFrameView<const float>& src) {
+  RTC_DCHECK(task_);
+  auto* stream = task_->GetEvent()->mutable_stream();
+
+  for (size_t i = 0; i < src.num_channels(); ++i) {
+    const auto& channel_view = src.channel(i);
+    stream->add_input_channel(channel_view.begin(),
+                              sizeof(float) * channel_view.size());
+  }
+}
+
+void CaptureStreamInfo::AddOutput(const AudioFrameView<const float>& src) {
+  RTC_DCHECK(task_);
+  auto* stream = task_->GetEvent()->mutable_stream();
+
+  for (size_t i = 0; i < src.num_channels(); ++i) {
+    const auto& channel_view = src.channel(i);
+    stream->add_output_channel(channel_view.begin(),
+                               sizeof(float) * channel_view.size());
+  }
+}
+
+void CaptureStreamInfo::AddInput(const AudioFrame& frame) {
+  RTC_DCHECK(task_);
+  auto* stream = task_->GetEvent()->mutable_stream();
+  const size_t data_size =
+      sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_;
+  stream->set_input_data(frame.data(), data_size);
+}
+
+void CaptureStreamInfo::AddOutput(const AudioFrame& frame) {
+  RTC_DCHECK(task_);
+  auto* stream = task_->GetEvent()->mutable_stream();
+  const size_t data_size =
+      sizeof(int16_t) * frame.samples_per_channel_ * frame.num_channels_;
+  stream->set_output_data(frame.data(), data_size);
+}
+
+void CaptureStreamInfo::AddAudioProcessingState(
+    const AecDump::AudioProcessingState& state) {
+  RTC_DCHECK(task_);
+  auto* stream = task_->GetEvent()->mutable_stream();
+  stream->set_delay(state.delay);
+  stream->set_drift(state.drift);
+  stream->set_level(state.level);
+  stream->set_keypress(state.keypress);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec_dump/capture_stream_info.h b/modules/audio_processing/aec_dump/capture_stream_info.h
new file mode 100644
index 0000000..91bb1fa
--- /dev/null
+++ b/modules/audio_processing/aec_dump/capture_stream_info.h
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_CAPTURE_STREAM_INFO_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_CAPTURE_STREAM_INFO_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/aec_dump/write_to_file_task.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/logging.h"
+
+// Files generated at build-time by the protobuf compiler.
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "modules/audio_processing/debug.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+
+class CaptureStreamInfo {
+ public:
+  explicit CaptureStreamInfo(std::unique_ptr<WriteToFileTask> task);
+  ~CaptureStreamInfo();
+  void AddInput(const AudioFrameView<const float>& src);
+  void AddOutput(const AudioFrameView<const float>& src);
+
+  void AddInput(const AudioFrame& frame);
+  void AddOutput(const AudioFrame& frame);
+
+  void AddAudioProcessingState(const AecDump::AudioProcessingState& state);
+
+  std::unique_ptr<WriteToFileTask> GetTask() {
+    RTC_DCHECK(task_);
+    return std::move(task_);
+  }
+
+  void SetTask(std::unique_ptr<WriteToFileTask> task) {
+    RTC_DCHECK(!task_);
+    RTC_DCHECK(task);
+    task_ = std::move(task);
+    task_->GetEvent()->set_type(audioproc::Event::STREAM);
+  }
+
+ private:
+  std::unique_ptr<WriteToFileTask> task_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_DUMP_CAPTURE_STREAM_INFO_H_
diff --git a/modules/audio_processing/aec_dump/mock_aec_dump.cc b/modules/audio_processing/aec_dump/mock_aec_dump.cc
new file mode 100644
index 0000000..aa89e45
--- /dev/null
+++ b/modules/audio_processing/aec_dump/mock_aec_dump.cc
@@ -0,0 +1,19 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec_dump/mock_aec_dump.h"
+
+namespace webrtc {
+
+namespace test {
+
+MockAecDump::MockAecDump() = default;
+MockAecDump::~MockAecDump() = default;
+}
+}
diff --git a/modules/audio_processing/aec_dump/mock_aec_dump.h b/modules/audio_processing/aec_dump/mock_aec_dump.h
new file mode 100644
index 0000000..8cfabdd
--- /dev/null
+++ b/modules/audio_processing/aec_dump/mock_aec_dump.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_MOCK_AEC_DUMP_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_MOCK_AEC_DUMP_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/include/module_common_types.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+namespace test {
+
+class MockAecDump : public AecDump {
+ public:
+  MockAecDump();
+  virtual ~MockAecDump();
+
+  MOCK_METHOD1(WriteInitMessage,
+               void(const InternalAPMStreamsConfig& streams_config));
+
+  MOCK_METHOD1(AddCaptureStreamInput,
+               void(const AudioFrameView<const float>& src));
+  MOCK_METHOD1(AddCaptureStreamOutput,
+               void(const AudioFrameView<const float>& src));
+  MOCK_METHOD1(AddCaptureStreamInput, void(const AudioFrame& frame));
+  MOCK_METHOD1(AddCaptureStreamOutput, void(const AudioFrame& frame));
+  MOCK_METHOD1(AddAudioProcessingState,
+               void(const AudioProcessingState& state));
+  MOCK_METHOD0(WriteCaptureStreamMessage, void());
+
+  MOCK_METHOD1(WriteRenderStreamMessage, void(const AudioFrame& frame));
+  MOCK_METHOD1(WriteRenderStreamMessage,
+               void(const AudioFrameView<const float>& src));
+
+  MOCK_METHOD1(WriteConfig, void(const InternalAPMConfig& config));
+};
+
+}  // namespace test
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_DUMP_MOCK_AEC_DUMP_H_
diff --git a/modules/audio_processing/aec_dump/null_aec_dump_factory.cc b/modules/audio_processing/aec_dump/null_aec_dump_factory.cc
new file mode 100644
index 0000000..5623e24
--- /dev/null
+++ b/modules/audio_processing/aec_dump/null_aec_dump_factory.cc
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/include/aec_dump.h"
+
+namespace webrtc {
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(rtc::PlatformFile file,
+                                                int64_t max_log_size_bytes,
+                                                rtc::TaskQueue* worker_queue) {
+  return nullptr;
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(std::string file_name,
+                                                int64_t max_log_size_bytes,
+                                                rtc::TaskQueue* worker_queue) {
+  return nullptr;
+}
+
+std::unique_ptr<AecDump> AecDumpFactory::Create(FILE* handle,
+                                                int64_t max_log_size_bytes,
+                                                rtc::TaskQueue* worker_queue) {
+  return nullptr;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec_dump/write_to_file_task.cc b/modules/audio_processing/aec_dump/write_to_file_task.cc
new file mode 100644
index 0000000..8dddd47
--- /dev/null
+++ b/modules/audio_processing/aec_dump/write_to_file_task.cc
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec_dump/write_to_file_task.h"
+
+#include "rtc_base/protobuf_utils.h"
+
+namespace webrtc {
+
+WriteToFileTask::WriteToFileTask(webrtc::FileWrapper* debug_file,
+                                 int64_t* num_bytes_left_for_log)
+    : debug_file_(debug_file),
+      num_bytes_left_for_log_(num_bytes_left_for_log) {}
+
+WriteToFileTask::~WriteToFileTask() = default;
+
+audioproc::Event* WriteToFileTask::GetEvent() {
+  return &event_;
+}
+
+bool WriteToFileTask::IsRoomForNextEvent(size_t event_byte_size) const {
+  int64_t next_message_size = event_byte_size + sizeof(int32_t);
+  return (*num_bytes_left_for_log_ < 0) ||
+         (*num_bytes_left_for_log_ >= next_message_size);
+}
+
+void WriteToFileTask::UpdateBytesLeft(size_t event_byte_size) {
+  RTC_DCHECK(IsRoomForNextEvent(event_byte_size));
+  if (*num_bytes_left_for_log_ >= 0) {
+    *num_bytes_left_for_log_ -= (sizeof(int32_t) + event_byte_size);
+  }
+}
+
+bool WriteToFileTask::Run() {
+  if (!debug_file_->is_open()) {
+    return true;
+  }
+
+  ProtoString event_string;
+  event_.SerializeToString(&event_string);
+
+  const size_t event_byte_size = event_.ByteSizeLong();
+
+  if (!IsRoomForNextEvent(event_byte_size)) {
+    debug_file_->CloseFile();
+    return true;
+  }
+
+  UpdateBytesLeft(event_byte_size);
+
+  // Write message preceded by its size.
+  if (!debug_file_->Write(&event_byte_size, sizeof(int32_t))) {
+    RTC_NOTREACHED();
+  }
+  if (!debug_file_->Write(event_string.data(), event_string.length())) {
+    RTC_NOTREACHED();
+  }
+  return true;  // Delete task from queue at once.
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec_dump/write_to_file_task.h b/modules/audio_processing/aec_dump/write_to_file_task.h
new file mode 100644
index 0000000..7301473
--- /dev/null
+++ b/modules/audio_processing/aec_dump/write_to_file_task.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC_DUMP_WRITE_TO_FILE_TASK_H_
+#define MODULES_AUDIO_PROCESSING_AEC_DUMP_WRITE_TO_FILE_TASK_H_
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/platform_file.h"
+#include "rtc_base/task_queue.h"
+#include "system_wrappers/include/file_wrapper.h"
+
+// Files generated at build-time by the protobuf compiler.
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "modules/audio_processing/debug.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+
+class WriteToFileTask : public rtc::QueuedTask {
+ public:
+  WriteToFileTask(webrtc::FileWrapper* debug_file,
+                  int64_t* num_bytes_left_for_log);
+  ~WriteToFileTask() override;
+
+  audioproc::Event* GetEvent();
+
+ private:
+  bool IsRoomForNextEvent(size_t event_byte_size) const;
+
+  void UpdateBytesLeft(size_t event_byte_size);
+
+  bool Run() override;
+
+  webrtc::FileWrapper* debug_file_;
+  audioproc::Event event_;
+  int64_t* num_bytes_left_for_log_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC_DUMP_WRITE_TO_FILE_TASK_H_
diff --git a/modules/audio_processing/aecm/aecm_core.cc b/modules/audio_processing/aecm/aecm_core.cc
new file mode 100644
index 0000000..3d9faca
--- /dev/null
+++ b/modules/audio_processing/aecm/aecm_core.cc
@@ -0,0 +1,1238 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/real_fft.h"
+}
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+extern "C" {
+#include "system_wrappers/include/cpu_features_wrapper.h"
+}
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#ifdef AEC_DEBUG
+FILE *dfile;
+FILE *testfile;
+#endif
+
+const int16_t WebRtcAecm_kCosTable[] = {
+    8192,  8190,  8187,  8180,  8172,  8160,  8147,  8130,  8112,
+    8091,  8067,  8041,  8012,  7982,  7948,  7912,  7874,  7834,
+    7791,  7745,  7697,  7647,  7595,  7540,  7483,  7424,  7362,
+    7299,  7233,  7164,  7094,  7021,  6947,  6870,  6791,  6710,
+    6627,  6542,  6455,  6366,  6275,  6182,  6087,  5991,  5892,
+    5792,  5690,  5586,  5481,  5374,  5265,  5155,  5043,  4930,
+    4815,  4698,  4580,  4461,  4341,  4219,  4096,  3971,  3845,
+    3719,  3591,  3462,  3331,  3200,  3068,  2935,  2801,  2667,
+    2531,  2395,  2258,  2120,  1981,  1842,  1703,  1563,  1422,
+    1281,  1140,   998,   856,   713,   571,   428,   285,   142,
+       0,  -142,  -285,  -428,  -571,  -713,  -856,  -998, -1140,
+   -1281, -1422, -1563, -1703, -1842, -1981, -2120, -2258, -2395,
+   -2531, -2667, -2801, -2935, -3068, -3200, -3331, -3462, -3591,
+   -3719, -3845, -3971, -4095, -4219, -4341, -4461, -4580, -4698,
+   -4815, -4930, -5043, -5155, -5265, -5374, -5481, -5586, -5690,
+   -5792, -5892, -5991, -6087, -6182, -6275, -6366, -6455, -6542,
+   -6627, -6710, -6791, -6870, -6947, -7021, -7094, -7164, -7233,
+   -7299, -7362, -7424, -7483, -7540, -7595, -7647, -7697, -7745,
+   -7791, -7834, -7874, -7912, -7948, -7982, -8012, -8041, -8067,
+   -8091, -8112, -8130, -8147, -8160, -8172, -8180, -8187, -8190,
+   -8191, -8190, -8187, -8180, -8172, -8160, -8147, -8130, -8112,
+   -8091, -8067, -8041, -8012, -7982, -7948, -7912, -7874, -7834,
+   -7791, -7745, -7697, -7647, -7595, -7540, -7483, -7424, -7362,
+   -7299, -7233, -7164, -7094, -7021, -6947, -6870, -6791, -6710,
+   -6627, -6542, -6455, -6366, -6275, -6182, -6087, -5991, -5892,
+   -5792, -5690, -5586, -5481, -5374, -5265, -5155, -5043, -4930,
+   -4815, -4698, -4580, -4461, -4341, -4219, -4096, -3971, -3845,
+   -3719, -3591, -3462, -3331, -3200, -3068, -2935, -2801, -2667,
+   -2531, -2395, -2258, -2120, -1981, -1842, -1703, -1563, -1422,
+   -1281, -1140,  -998,  -856,  -713,  -571,  -428,  -285,  -142,
+       0,   142,   285,   428,   571,   713,   856,   998,  1140,
+    1281,  1422,  1563,  1703,  1842,  1981,  2120,  2258,  2395,
+    2531,  2667,  2801,  2935,  3068,  3200,  3331,  3462,  3591,
+    3719,  3845,  3971,  4095,  4219,  4341,  4461,  4580,  4698,
+    4815,  4930,  5043,  5155,  5265,  5374,  5481,  5586,  5690,
+    5792,  5892,  5991,  6087,  6182,  6275,  6366,  6455,  6542,
+    6627,  6710,  6791,  6870,  6947,  7021,  7094,  7164,  7233,
+    7299,  7362,  7424,  7483,  7540,  7595,  7647,  7697,  7745,
+    7791,  7834,  7874,  7912,  7948,  7982,  8012,  8041,  8067,
+    8091,  8112,  8130,  8147,  8160,  8172,  8180,  8187,  8190
+};
+
+const int16_t WebRtcAecm_kSinTable[] = {
+       0,    142,    285,    428,    571,    713,    856,    998,
+    1140,   1281,   1422,   1563,   1703,   1842,   1981,   2120,
+    2258,   2395,   2531,   2667,   2801,   2935,   3068,   3200,
+    3331,   3462,   3591,   3719,   3845,   3971,   4095,   4219,
+    4341,   4461,   4580,   4698,   4815,   4930,   5043,   5155,
+    5265,   5374,   5481,   5586,   5690,   5792,   5892,   5991,
+    6087,   6182,   6275,   6366,   6455,   6542,   6627,   6710,
+    6791,   6870,   6947,   7021,   7094,   7164,   7233,   7299,
+    7362,   7424,   7483,   7540,   7595,   7647,   7697,   7745,
+    7791,   7834,   7874,   7912,   7948,   7982,   8012,   8041,
+    8067,   8091,   8112,   8130,   8147,   8160,   8172,   8180,
+    8187,   8190,   8191,   8190,   8187,   8180,   8172,   8160,
+    8147,   8130,   8112,   8091,   8067,   8041,   8012,   7982,
+    7948,   7912,   7874,   7834,   7791,   7745,   7697,   7647,
+    7595,   7540,   7483,   7424,   7362,   7299,   7233,   7164,
+    7094,   7021,   6947,   6870,   6791,   6710,   6627,   6542,
+    6455,   6366,   6275,   6182,   6087,   5991,   5892,   5792,
+    5690,   5586,   5481,   5374,   5265,   5155,   5043,   4930,
+    4815,   4698,   4580,   4461,   4341,   4219,   4096,   3971,
+    3845,   3719,   3591,   3462,   3331,   3200,   3068,   2935,
+    2801,   2667,   2531,   2395,   2258,   2120,   1981,   1842,
+    1703,   1563,   1422,   1281,   1140,    998,    856,    713,
+     571,    428,    285,    142,      0,   -142,   -285,   -428,
+    -571,   -713,   -856,   -998,  -1140,  -1281,  -1422,  -1563,
+   -1703,  -1842,  -1981,  -2120,  -2258,  -2395,  -2531,  -2667,
+   -2801,  -2935,  -3068,  -3200,  -3331,  -3462,  -3591,  -3719,
+   -3845,  -3971,  -4095,  -4219,  -4341,  -4461,  -4580,  -4698,
+   -4815,  -4930,  -5043,  -5155,  -5265,  -5374,  -5481,  -5586,
+   -5690,  -5792,  -5892,  -5991,  -6087,  -6182,  -6275,  -6366,
+   -6455,  -6542,  -6627,  -6710,  -6791,  -6870,  -6947,  -7021,
+   -7094,  -7164,  -7233,  -7299,  -7362,  -7424,  -7483,  -7540,
+   -7595,  -7647,  -7697,  -7745,  -7791,  -7834,  -7874,  -7912,
+   -7948,  -7982,  -8012,  -8041,  -8067,  -8091,  -8112,  -8130,
+   -8147,  -8160,  -8172,  -8180,  -8187,  -8190,  -8191,  -8190,
+   -8187,  -8180,  -8172,  -8160,  -8147,  -8130,  -8112,  -8091,
+   -8067,  -8041,  -8012,  -7982,  -7948,  -7912,  -7874,  -7834,
+   -7791,  -7745,  -7697,  -7647,  -7595,  -7540,  -7483,  -7424,
+   -7362,  -7299,  -7233,  -7164,  -7094,  -7021,  -6947,  -6870,
+   -6791,  -6710,  -6627,  -6542,  -6455,  -6366,  -6275,  -6182,
+   -6087,  -5991,  -5892,  -5792,  -5690,  -5586,  -5481,  -5374,
+   -5265,  -5155,  -5043,  -4930,  -4815,  -4698,  -4580,  -4461,
+   -4341,  -4219,  -4096,  -3971,  -3845,  -3719,  -3591,  -3462,
+   -3331,  -3200,  -3068,  -2935,  -2801,  -2667,  -2531,  -2395,
+   -2258,  -2120,  -1981,  -1842,  -1703,  -1563,  -1422,  -1281,
+   -1140,   -998,   -856,   -713,   -571,   -428,   -285,   -142
+};
+
+// Initialization table for echo channel in 8 kHz
+static const int16_t kChannelStored8kHz[PART_LEN1] = {
+    2040,   1815,   1590,   1498,   1405,   1395,   1385,   1418,
+    1451,   1506,   1562,   1644,   1726,   1804,   1882,   1918,
+    1953,   1982,   2010,   2025,   2040,   2034,   2027,   2021,
+    2014,   1997,   1980,   1925,   1869,   1800,   1732,   1683,
+    1635,   1604,   1572,   1545,   1517,   1481,   1444,   1405,
+    1367,   1331,   1294,   1270,   1245,   1239,   1233,   1247,
+    1260,   1282,   1303,   1338,   1373,   1407,   1441,   1470,
+    1499,   1524,   1549,   1565,   1582,   1601,   1621,   1649,
+    1676
+};
+
+// Initialization table for echo channel in 16 kHz
+static const int16_t kChannelStored16kHz[PART_LEN1] = {
+    2040,   1590,   1405,   1385,   1451,   1562,   1726,   1882,
+    1953,   2010,   2040,   2027,   2014,   1980,   1869,   1732,
+    1635,   1572,   1517,   1444,   1367,   1294,   1245,   1233,
+    1260,   1303,   1373,   1441,   1499,   1549,   1582,   1621,
+    1676,   1741,   1802,   1861,   1921,   1983,   2040,   2102,
+    2170,   2265,   2375,   2515,   2651,   2781,   2922,   3075,
+    3253,   3471,   3738,   3976,   4151,   4258,   4308,   4288,
+    4270,   4253,   4237,   4179,   4086,   3947,   3757,   3484,
+    3153
+};
+
+// Moves the pointer to the next entry and inserts |far_spectrum| and
+// corresponding Q-domain in its buffer.
+//
+// Inputs:
+//      - self          : Pointer to the delay estimation instance
+//      - far_spectrum  : Pointer to the far end spectrum
+//      - far_q         : Q-domain of far end spectrum
+//
+void WebRtcAecm_UpdateFarHistory(AecmCore* self,
+                                 uint16_t* far_spectrum,
+                                 int far_q) {
+  // Get new buffer position
+  self->far_history_pos++;
+  if (self->far_history_pos >= MAX_DELAY) {
+    self->far_history_pos = 0;
+  }
+  // Update Q-domain buffer
+  self->far_q_domains[self->far_history_pos] = far_q;
+  // Update far end spectrum buffer
+  memcpy(&(self->far_history[self->far_history_pos * PART_LEN1]),
+         far_spectrum,
+         sizeof(uint16_t) * PART_LEN1);
+}
+
+// Returns a pointer to the far end spectrum aligned to current near end
+// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been
+// called before AlignedFarend(...). Otherwise, you get the pointer to the
+// previous frame. The memory is only valid until the next call of
+// WebRtc_DelayEstimatorProcessFix(...).
+//
+// Inputs:
+//      - self              : Pointer to the AECM instance.
+//      - delay             : Current delay estimate.
+//
+// Output:
+//      - far_q             : The Q-domain of the aligned far end spectrum
+//
+// Return value:
+//      - far_spectrum      : Pointer to the aligned far end spectrum
+//                            NULL - Error
+//
+const uint16_t* WebRtcAecm_AlignedFarend(AecmCore* self,
+                                         int* far_q,
+                                         int delay) {
+  int buffer_position = 0;
+  RTC_DCHECK(self);
+  buffer_position = self->far_history_pos - delay;
+
+  // Check buffer position
+  if (buffer_position < 0) {
+    buffer_position += MAX_DELAY;
+  }
+  // Get Q-domain
+  *far_q = self->far_q_domains[buffer_position];
+  // Return far end spectrum
+  return &(self->far_history[buffer_position * PART_LEN1]);
+}
+
+// Declare function pointers.
+CalcLinearEnergies WebRtcAecm_CalcLinearEnergies;
+StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel;
+ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel;
+
+AecmCore* WebRtcAecm_CreateCore() {
+    AecmCore* aecm = static_cast<AecmCore*>(malloc(sizeof(AecmCore)));
+
+    aecm->farFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN,
+                                            sizeof(int16_t));
+    if (!aecm->farFrameBuf)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        return NULL;
+    }
+
+    aecm->nearNoisyFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN,
+                                                  sizeof(int16_t));
+    if (!aecm->nearNoisyFrameBuf)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        return NULL;
+    }
+
+    aecm->nearCleanFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN,
+                                                  sizeof(int16_t));
+    if (!aecm->nearCleanFrameBuf)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        return NULL;
+    }
+
+    aecm->outFrameBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN,
+                                            sizeof(int16_t));
+    if (!aecm->outFrameBuf)
+    {
+        WebRtcAecm_FreeCore(aecm);
+        return NULL;
+    }
+
+    aecm->delay_estimator_farend = WebRtc_CreateDelayEstimatorFarend(PART_LEN1,
+                                                                     MAX_DELAY);
+    if (aecm->delay_estimator_farend == NULL) {
+      WebRtcAecm_FreeCore(aecm);
+      return NULL;
+    }
+    aecm->delay_estimator =
+        WebRtc_CreateDelayEstimator(aecm->delay_estimator_farend, 0);
+    if (aecm->delay_estimator == NULL) {
+      WebRtcAecm_FreeCore(aecm);
+      return NULL;
+    }
+    // TODO(bjornv): Explicitly disable robust delay validation until no
+    // performance regression has been established.  Then remove the line.
+    WebRtc_enable_robust_validation(aecm->delay_estimator, 0);
+
+    aecm->real_fft = WebRtcSpl_CreateRealFFT(PART_LEN_SHIFT);
+    if (aecm->real_fft == NULL) {
+      WebRtcAecm_FreeCore(aecm);
+      return NULL;
+    }
+
+    // Init some aecm pointers. 16 and 32 byte alignment is only necessary
+    // for Neon code currently.
+    aecm->xBuf = (int16_t*) (((uintptr_t)aecm->xBuf_buf + 31) & ~ 31);
+    aecm->dBufClean = (int16_t*) (((uintptr_t)aecm->dBufClean_buf + 31) & ~ 31);
+    aecm->dBufNoisy = (int16_t*) (((uintptr_t)aecm->dBufNoisy_buf + 31) & ~ 31);
+    aecm->outBuf = (int16_t*) (((uintptr_t)aecm->outBuf_buf + 15) & ~ 15);
+    aecm->channelStored = (int16_t*) (((uintptr_t)
+                                             aecm->channelStored_buf + 15) & ~ 15);
+    aecm->channelAdapt16 = (int16_t*) (((uintptr_t)
+                                              aecm->channelAdapt16_buf + 15) & ~ 15);
+    aecm->channelAdapt32 = (int32_t*) (((uintptr_t)
+                                              aecm->channelAdapt32_buf + 31) & ~ 31);
+
+    return aecm;
+}
+
+void WebRtcAecm_InitEchoPathCore(AecmCore* aecm, const int16_t* echo_path) {
+    int i = 0;
+
+    // Reset the stored channel
+    memcpy(aecm->channelStored, echo_path, sizeof(int16_t) * PART_LEN1);
+    // Reset the adapted channels
+    memcpy(aecm->channelAdapt16, echo_path, sizeof(int16_t) * PART_LEN1);
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        aecm->channelAdapt32[i] = (int32_t)aecm->channelAdapt16[i] << 16;
+    }
+
+    // Reset channel storing variables
+    aecm->mseAdaptOld = 1000;
+    aecm->mseStoredOld = 1000;
+    aecm->mseThreshold = WEBRTC_SPL_WORD32_MAX;
+    aecm->mseChannelCount = 0;
+}
+
+static void CalcLinearEnergiesC(AecmCore* aecm,
+                                const uint16_t* far_spectrum,
+                                int32_t* echo_est,
+                                uint32_t* far_energy,
+                                uint32_t* echo_energy_adapt,
+                                uint32_t* echo_energy_stored) {
+    int i;
+
+    // Get energy for the delayed far end signal and estimated
+    // echo using both stored and adapted channels.
+    for (i = 0; i < PART_LEN1; i++)
+    {
+        echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                           far_spectrum[i]);
+        (*far_energy) += (uint32_t)(far_spectrum[i]);
+        *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i];
+        (*echo_energy_stored) += (uint32_t)echo_est[i];
+    }
+}
+
+static void StoreAdaptiveChannelC(AecmCore* aecm,
+                                  const uint16_t* far_spectrum,
+                                  int32_t* echo_est) {
+    int i;
+
+    // During startup we store the channel every block.
+    memcpy(aecm->channelStored, aecm->channelAdapt16, sizeof(int16_t) * PART_LEN1);
+    // Recalculate echo estimate
+    for (i = 0; i < PART_LEN; i += 4)
+    {
+        echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                           far_spectrum[i]);
+        echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1],
+                                           far_spectrum[i + 1]);
+        echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2],
+                                           far_spectrum[i + 2]);
+        echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3],
+                                           far_spectrum[i + 3]);
+    }
+    echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                       far_spectrum[i]);
+}
+
+static void ResetAdaptiveChannelC(AecmCore* aecm) {
+    int i;
+
+    // The stored channel has a significantly lower MSE than the adaptive one for
+    // two consecutive calculations. Reset the adaptive channel.
+    memcpy(aecm->channelAdapt16, aecm->channelStored,
+           sizeof(int16_t) * PART_LEN1);
+    // Restore the W32 channel
+    for (i = 0; i < PART_LEN; i += 4)
+    {
+        aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16;
+        aecm->channelAdapt32[i + 1] = (int32_t)aecm->channelStored[i + 1] << 16;
+        aecm->channelAdapt32[i + 2] = (int32_t)aecm->channelStored[i + 2] << 16;
+        aecm->channelAdapt32[i + 3] = (int32_t)aecm->channelStored[i + 3] << 16;
+    }
+    aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16;
+}
+
+// Initialize function pointers for ARM Neon platform.
+#if defined(WEBRTC_HAS_NEON)
+static void WebRtcAecm_InitNeon(void)
+{
+  WebRtcAecm_StoreAdaptiveChannel = WebRtcAecm_StoreAdaptiveChannelNeon;
+  WebRtcAecm_ResetAdaptiveChannel = WebRtcAecm_ResetAdaptiveChannelNeon;
+  WebRtcAecm_CalcLinearEnergies = WebRtcAecm_CalcLinearEnergiesNeon;
+}
+#endif
+
+// Initialize function pointers for MIPS platform.
+#if defined(MIPS32_LE)
+static void WebRtcAecm_InitMips(void)
+{
+#if defined(MIPS_DSP_R1_LE)
+  WebRtcAecm_StoreAdaptiveChannel = WebRtcAecm_StoreAdaptiveChannel_mips;
+  WebRtcAecm_ResetAdaptiveChannel = WebRtcAecm_ResetAdaptiveChannel_mips;
+#endif
+  WebRtcAecm_CalcLinearEnergies = WebRtcAecm_CalcLinearEnergies_mips;
+}
+#endif
+
+// WebRtcAecm_InitCore(...)
+//
+// This function initializes the AECM instant created with WebRtcAecm_CreateCore(...)
+// Input:
+//      - aecm            : Pointer to the Echo Suppression instance
+//      - samplingFreq   : Sampling Frequency
+//
+// Output:
+//      - aecm            : Initialized instance
+//
+// Return value         :  0 - Ok
+//                        -1 - Error
+//
+int WebRtcAecm_InitCore(AecmCore* const aecm, int samplingFreq) {
+    int i = 0;
+    int32_t tmp32 = PART_LEN1 * PART_LEN1;
+    int16_t tmp16 = PART_LEN1;
+
+    if (samplingFreq != 8000 && samplingFreq != 16000)
+    {
+        samplingFreq = 8000;
+        return -1;
+    }
+    // sanity check of sampling frequency
+    aecm->mult = (int16_t)samplingFreq / 8000;
+
+    aecm->farBufWritePos = 0;
+    aecm->farBufReadPos = 0;
+    aecm->knownDelay = 0;
+    aecm->lastKnownDelay = 0;
+
+    WebRtc_InitBuffer(aecm->farFrameBuf);
+    WebRtc_InitBuffer(aecm->nearNoisyFrameBuf);
+    WebRtc_InitBuffer(aecm->nearCleanFrameBuf);
+    WebRtc_InitBuffer(aecm->outFrameBuf);
+
+    memset(aecm->xBuf_buf, 0, sizeof(aecm->xBuf_buf));
+    memset(aecm->dBufClean_buf, 0, sizeof(aecm->dBufClean_buf));
+    memset(aecm->dBufNoisy_buf, 0, sizeof(aecm->dBufNoisy_buf));
+    memset(aecm->outBuf_buf, 0, sizeof(aecm->outBuf_buf));
+
+    aecm->seed = 666;
+    aecm->totCount = 0;
+
+    if (WebRtc_InitDelayEstimatorFarend(aecm->delay_estimator_farend) != 0) {
+      return -1;
+    }
+    if (WebRtc_InitDelayEstimator(aecm->delay_estimator) != 0) {
+      return -1;
+    }
+    // Set far end histories to zero
+    memset(aecm->far_history, 0, sizeof(uint16_t) * PART_LEN1 * MAX_DELAY);
+    memset(aecm->far_q_domains, 0, sizeof(int) * MAX_DELAY);
+    aecm->far_history_pos = MAX_DELAY;
+
+    aecm->nlpFlag = 1;
+    aecm->fixedDelay = -1;
+
+    aecm->dfaCleanQDomain = 0;
+    aecm->dfaCleanQDomainOld = 0;
+    aecm->dfaNoisyQDomain = 0;
+    aecm->dfaNoisyQDomainOld = 0;
+
+    memset(aecm->nearLogEnergy, 0, sizeof(aecm->nearLogEnergy));
+    aecm->farLogEnergy = 0;
+    memset(aecm->echoAdaptLogEnergy, 0, sizeof(aecm->echoAdaptLogEnergy));
+    memset(aecm->echoStoredLogEnergy, 0, sizeof(aecm->echoStoredLogEnergy));
+
+    // Initialize the echo channels with a stored shape.
+    if (samplingFreq == 8000)
+    {
+        WebRtcAecm_InitEchoPathCore(aecm, kChannelStored8kHz);
+    }
+    else
+    {
+        WebRtcAecm_InitEchoPathCore(aecm, kChannelStored16kHz);
+    }
+
+    memset(aecm->echoFilt, 0, sizeof(aecm->echoFilt));
+    memset(aecm->nearFilt, 0, sizeof(aecm->nearFilt));
+    aecm->noiseEstCtr = 0;
+
+    aecm->cngMode = AecmTrue;
+
+    memset(aecm->noiseEstTooLowCtr, 0, sizeof(aecm->noiseEstTooLowCtr));
+    memset(aecm->noiseEstTooHighCtr, 0, sizeof(aecm->noiseEstTooHighCtr));
+    // Shape the initial noise level to an approximate pink noise.
+    for (i = 0; i < (PART_LEN1 >> 1) - 1; i++)
+    {
+        aecm->noiseEst[i] = (tmp32 << 8);
+        tmp16--;
+        tmp32 -= (int32_t)((tmp16 << 1) + 1);
+    }
+    for (; i < PART_LEN1; i++)
+    {
+        aecm->noiseEst[i] = (tmp32 << 8);
+    }
+
+    aecm->farEnergyMin = WEBRTC_SPL_WORD16_MAX;
+    aecm->farEnergyMax = WEBRTC_SPL_WORD16_MIN;
+    aecm->farEnergyMaxMin = 0;
+    aecm->farEnergyVAD = FAR_ENERGY_MIN; // This prevents false speech detection at the
+                                         // beginning.
+    aecm->farEnergyMSE = 0;
+    aecm->currentVADValue = 0;
+    aecm->vadUpdateCount = 0;
+    aecm->firstVAD = 1;
+
+    aecm->startupState = 0;
+    aecm->supGain = SUPGAIN_DEFAULT;
+    aecm->supGainOld = SUPGAIN_DEFAULT;
+
+    aecm->supGainErrParamA = SUPGAIN_ERROR_PARAM_A;
+    aecm->supGainErrParamD = SUPGAIN_ERROR_PARAM_D;
+    aecm->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B;
+    aecm->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D;
+
+    // Assert a preprocessor definition at compile-time. It's an assumption
+    // used in assembly code, so check the assembly files before any change.
+    static_assert(PART_LEN % 16 == 0, "PART_LEN is not a multiple of 16");
+
+    // Initialize function pointers.
+    WebRtcAecm_CalcLinearEnergies = CalcLinearEnergiesC;
+    WebRtcAecm_StoreAdaptiveChannel = StoreAdaptiveChannelC;
+    WebRtcAecm_ResetAdaptiveChannel = ResetAdaptiveChannelC;
+
+#if defined(WEBRTC_HAS_NEON)
+    WebRtcAecm_InitNeon();
+#endif
+
+#if defined(MIPS32_LE)
+    WebRtcAecm_InitMips();
+#endif
+    return 0;
+}
+
+// TODO(bjornv): This function is currently not used. Add support for these
+// parameters from a higher level
+int WebRtcAecm_Control(AecmCore* aecm, int delay, int nlpFlag) {
+    aecm->nlpFlag = nlpFlag;
+    aecm->fixedDelay = delay;
+
+    return 0;
+}
+
+void WebRtcAecm_FreeCore(AecmCore* aecm) {
+    if (aecm == NULL) {
+      return;
+    }
+
+    WebRtc_FreeBuffer(aecm->farFrameBuf);
+    WebRtc_FreeBuffer(aecm->nearNoisyFrameBuf);
+    WebRtc_FreeBuffer(aecm->nearCleanFrameBuf);
+    WebRtc_FreeBuffer(aecm->outFrameBuf);
+
+    WebRtc_FreeDelayEstimator(aecm->delay_estimator);
+    WebRtc_FreeDelayEstimatorFarend(aecm->delay_estimator_farend);
+    WebRtcSpl_FreeRealFFT(aecm->real_fft);
+
+    free(aecm);
+}
+
+int WebRtcAecm_ProcessFrame(AecmCore* aecm,
+                            const int16_t* farend,
+                            const int16_t* nearendNoisy,
+                            const int16_t* nearendClean,
+                            int16_t* out) {
+    int16_t outBlock_buf[PART_LEN + 8]; // Align buffer to 8-byte boundary.
+    int16_t* outBlock = (int16_t*) (((uintptr_t) outBlock_buf + 15) & ~ 15);
+
+    int16_t farFrame[FRAME_LEN];
+    const int16_t* out_ptr = NULL;
+    int size = 0;
+
+    // Buffer the current frame.
+    // Fetch an older one corresponding to the delay.
+    WebRtcAecm_BufferFarFrame(aecm, farend, FRAME_LEN);
+    WebRtcAecm_FetchFarFrame(aecm, farFrame, FRAME_LEN, aecm->knownDelay);
+
+    // Buffer the synchronized far and near frames,
+    // to pass the smaller blocks individually.
+    WebRtc_WriteBuffer(aecm->farFrameBuf, farFrame, FRAME_LEN);
+    WebRtc_WriteBuffer(aecm->nearNoisyFrameBuf, nearendNoisy, FRAME_LEN);
+    if (nearendClean != NULL)
+    {
+        WebRtc_WriteBuffer(aecm->nearCleanFrameBuf, nearendClean, FRAME_LEN);
+    }
+
+    // Process as many blocks as possible.
+    while (WebRtc_available_read(aecm->farFrameBuf) >= PART_LEN)
+    {
+        int16_t far_block[PART_LEN];
+        const int16_t* far_block_ptr = NULL;
+        int16_t near_noisy_block[PART_LEN];
+        const int16_t* near_noisy_block_ptr = NULL;
+
+        WebRtc_ReadBuffer(aecm->farFrameBuf, (void**) &far_block_ptr, far_block,
+                          PART_LEN);
+        WebRtc_ReadBuffer(aecm->nearNoisyFrameBuf,
+                          (void**) &near_noisy_block_ptr,
+                          near_noisy_block,
+                          PART_LEN);
+        if (nearendClean != NULL)
+        {
+            int16_t near_clean_block[PART_LEN];
+            const int16_t* near_clean_block_ptr = NULL;
+
+            WebRtc_ReadBuffer(aecm->nearCleanFrameBuf,
+                              (void**) &near_clean_block_ptr,
+                              near_clean_block,
+                              PART_LEN);
+            if (WebRtcAecm_ProcessBlock(aecm,
+                                        far_block_ptr,
+                                        near_noisy_block_ptr,
+                                        near_clean_block_ptr,
+                                        outBlock) == -1)
+            {
+                return -1;
+            }
+        } else
+        {
+            if (WebRtcAecm_ProcessBlock(aecm,
+                                        far_block_ptr,
+                                        near_noisy_block_ptr,
+                                        NULL,
+                                        outBlock) == -1)
+            {
+                return -1;
+            }
+        }
+
+        WebRtc_WriteBuffer(aecm->outFrameBuf, outBlock, PART_LEN);
+    }
+
+    // Stuff the out buffer if we have less than a frame to output.
+    // This should only happen for the first frame.
+    size = (int) WebRtc_available_read(aecm->outFrameBuf);
+    if (size < FRAME_LEN)
+    {
+        WebRtc_MoveReadPtr(aecm->outFrameBuf, size - FRAME_LEN);
+    }
+
+    // Obtain an output frame.
+    WebRtc_ReadBuffer(aecm->outFrameBuf, (void**) &out_ptr, out, FRAME_LEN);
+    if (out_ptr != out) {
+      // ReadBuffer() hasn't copied to |out| in this case.
+      memcpy(out, out_ptr, FRAME_LEN * sizeof(int16_t));
+    }
+
+    return 0;
+}
+
+// WebRtcAecm_AsymFilt(...)
+//
+// Performs asymmetric filtering.
+//
+// Inputs:
+//      - filtOld       : Previous filtered value.
+//      - inVal         : New input value.
+//      - stepSizePos   : Step size when we have a positive contribution.
+//      - stepSizeNeg   : Step size when we have a negative contribution.
+//
+// Output:
+//
+// Return: - Filtered value.
+//
+int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, const int16_t inVal,
+                            const int16_t stepSizePos,
+                            const int16_t stepSizeNeg)
+{
+    int16_t retVal;
+
+    if ((filtOld == WEBRTC_SPL_WORD16_MAX) | (filtOld == WEBRTC_SPL_WORD16_MIN))
+    {
+        return inVal;
+    }
+    retVal = filtOld;
+    if (filtOld > inVal)
+    {
+        retVal -= (filtOld - inVal) >> stepSizeNeg;
+    } else
+    {
+        retVal += (inVal - filtOld) >> stepSizePos;
+    }
+
+    return retVal;
+}
+
+// ExtractFractionPart(a, zeros)
+//
+// returns the fraction part of |a|, with |zeros| number of leading zeros, as an
+// int16_t scaled to Q8. There is no sanity check of |a| in the sense that the
+// number of zeros match.
+static int16_t ExtractFractionPart(uint32_t a, int zeros) {
+  return (int16_t)(((a << zeros) & 0x7FFFFFFF) >> 23);
+}
+
+// Calculates and returns the log of |energy| in Q8. The input |energy| is
+// supposed to be in Q(|q_domain|).
+static int16_t LogOfEnergyInQ8(uint32_t energy, int q_domain) {
+  static const int16_t kLogLowValue = PART_LEN_SHIFT << 7;
+  int16_t log_energy_q8 = kLogLowValue;
+  if (energy > 0) {
+    int zeros = WebRtcSpl_NormU32(energy);
+    int16_t frac = ExtractFractionPart(energy, zeros);
+    // log2 of |energy| in Q8.
+    log_energy_q8 += ((31 - zeros) << 8) + frac - (q_domain << 8);
+  }
+  return log_energy_q8;
+}
+
+// WebRtcAecm_CalcEnergies(...)
+//
+// This function calculates the log of energies for nearend, farend and estimated
+// echoes. There is also an update of energy decision levels, i.e. internal VAD.
+//
+//
+// @param  aecm         [i/o]   Handle of the AECM instance.
+// @param  far_spectrum [in]    Pointer to farend spectrum.
+// @param  far_q        [in]    Q-domain of farend spectrum.
+// @param  nearEner     [in]    Near end energy for current block in
+//                              Q(aecm->dfaQDomain).
+// @param  echoEst      [out]   Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_CalcEnergies(AecmCore* aecm,
+                             const uint16_t* far_spectrum,
+                             const int16_t far_q,
+                             const uint32_t nearEner,
+                             int32_t* echoEst) {
+    // Local variables
+    uint32_t tmpAdapt = 0;
+    uint32_t tmpStored = 0;
+    uint32_t tmpFar = 0;
+
+    int i;
+
+    int16_t tmp16;
+    int16_t increase_max_shifts = 4;
+    int16_t decrease_max_shifts = 11;
+    int16_t increase_min_shifts = 11;
+    int16_t decrease_min_shifts = 3;
+
+    // Get log of near end energy and store in buffer
+
+    // Shift buffer
+    memmove(aecm->nearLogEnergy + 1, aecm->nearLogEnergy,
+            sizeof(int16_t) * (MAX_BUF_LEN - 1));
+
+    // Logarithm of integrated magnitude spectrum (nearEner)
+    aecm->nearLogEnergy[0] = LogOfEnergyInQ8(nearEner, aecm->dfaNoisyQDomain);
+
+    WebRtcAecm_CalcLinearEnergies(aecm, far_spectrum, echoEst, &tmpFar, &tmpAdapt, &tmpStored);
+
+    // Shift buffers
+    memmove(aecm->echoAdaptLogEnergy + 1, aecm->echoAdaptLogEnergy,
+            sizeof(int16_t) * (MAX_BUF_LEN - 1));
+    memmove(aecm->echoStoredLogEnergy + 1, aecm->echoStoredLogEnergy,
+            sizeof(int16_t) * (MAX_BUF_LEN - 1));
+
+    // Logarithm of delayed far end energy
+    aecm->farLogEnergy = LogOfEnergyInQ8(tmpFar, far_q);
+
+    // Logarithm of estimated echo energy through adapted channel
+    aecm->echoAdaptLogEnergy[0] = LogOfEnergyInQ8(tmpAdapt,
+                                                  RESOLUTION_CHANNEL16 + far_q);
+
+    // Logarithm of estimated echo energy through stored channel
+    aecm->echoStoredLogEnergy[0] =
+        LogOfEnergyInQ8(tmpStored, RESOLUTION_CHANNEL16 + far_q);
+
+    // Update farend energy levels (min, max, vad, mse)
+    if (aecm->farLogEnergy > FAR_ENERGY_MIN)
+    {
+        if (aecm->startupState == 0)
+        {
+            increase_max_shifts = 2;
+            decrease_min_shifts = 2;
+            increase_min_shifts = 8;
+        }
+
+        aecm->farEnergyMin = WebRtcAecm_AsymFilt(aecm->farEnergyMin, aecm->farLogEnergy,
+                                                 increase_min_shifts, decrease_min_shifts);
+        aecm->farEnergyMax = WebRtcAecm_AsymFilt(aecm->farEnergyMax, aecm->farLogEnergy,
+                                                 increase_max_shifts, decrease_max_shifts);
+        aecm->farEnergyMaxMin = (aecm->farEnergyMax - aecm->farEnergyMin);
+
+        // Dynamic VAD region size
+        tmp16 = 2560 - aecm->farEnergyMin;
+        if (tmp16 > 0)
+        {
+          tmp16 = (int16_t)((tmp16 * FAR_ENERGY_VAD_REGION) >> 9);
+        } else
+        {
+            tmp16 = 0;
+        }
+        tmp16 += FAR_ENERGY_VAD_REGION;
+
+        if ((aecm->startupState == 0) | (aecm->vadUpdateCount > 1024))
+        {
+            // In startup phase or VAD update halted
+            aecm->farEnergyVAD = aecm->farEnergyMin + tmp16;
+        } else
+        {
+            if (aecm->farEnergyVAD > aecm->farLogEnergy)
+            {
+                aecm->farEnergyVAD +=
+                    (aecm->farLogEnergy + tmp16 - aecm->farEnergyVAD) >> 6;
+                aecm->vadUpdateCount = 0;
+            } else
+            {
+                aecm->vadUpdateCount++;
+            }
+        }
+        // Put MSE threshold higher than VAD
+        aecm->farEnergyMSE = aecm->farEnergyVAD + (1 << 8);
+    }
+
+    // Update VAD variables
+    if (aecm->farLogEnergy > aecm->farEnergyVAD)
+    {
+        if ((aecm->startupState == 0) | (aecm->farEnergyMaxMin > FAR_ENERGY_DIFF))
+        {
+            // We are in startup or have significant dynamics in input speech level
+            aecm->currentVADValue = 1;
+        }
+    } else
+    {
+        aecm->currentVADValue = 0;
+    }
+    if ((aecm->currentVADValue) && (aecm->firstVAD))
+    {
+        aecm->firstVAD = 0;
+        if (aecm->echoAdaptLogEnergy[0] > aecm->nearLogEnergy[0])
+        {
+            // The estimated echo has higher energy than the near end signal.
+            // This means that the initialization was too aggressive. Scale
+            // down by a factor 8
+            for (i = 0; i < PART_LEN1; i++)
+            {
+                aecm->channelAdapt16[i] >>= 3;
+            }
+            // Compensate the adapted echo energy level accordingly.
+            aecm->echoAdaptLogEnergy[0] -= (3 << 8);
+            aecm->firstVAD = 1;
+        }
+    }
+}
+
+// WebRtcAecm_CalcStepSize(...)
+//
+// This function calculates the step size used in channel estimation
+//
+//
+// @param  aecm  [in]    Handle of the AECM instance.
+// @param  mu    [out]   (Return value) Stepsize in log2(), i.e. number of shifts.
+//
+//
+int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm) {
+    int32_t tmp32;
+    int16_t tmp16;
+    int16_t mu = MU_MAX;
+
+    // Here we calculate the step size mu used in the
+    // following NLMS based Channel estimation algorithm
+    if (!aecm->currentVADValue)
+    {
+        // Far end energy level too low, no channel update
+        mu = 0;
+    } else if (aecm->startupState > 0)
+    {
+        if (aecm->farEnergyMin >= aecm->farEnergyMax)
+        {
+            mu = MU_MIN;
+        } else
+        {
+            tmp16 = (aecm->farLogEnergy - aecm->farEnergyMin);
+            tmp32 = tmp16 * MU_DIFF;
+            tmp32 = WebRtcSpl_DivW32W16(tmp32, aecm->farEnergyMaxMin);
+            mu = MU_MIN - 1 - (int16_t)(tmp32);
+            // The -1 is an alternative to rounding. This way we get a larger
+            // stepsize, so we in some sense compensate for truncation in NLMS
+        }
+        if (mu < MU_MAX)
+        {
+            mu = MU_MAX; // Equivalent with maximum step size of 2^-MU_MAX
+        }
+    }
+
+    return mu;
+}
+
+// WebRtcAecm_UpdateChannel(...)
+//
+// This function performs channel estimation. NLMS and decision on channel storage.
+//
+//
+// @param  aecm         [i/o]   Handle of the AECM instance.
+// @param  far_spectrum [in]    Absolute value of the farend signal in Q(far_q)
+// @param  far_q        [in]    Q-domain of the farend signal
+// @param  dfa          [in]    Absolute value of the nearend signal (Q[aecm->dfaQDomain])
+// @param  mu           [in]    NLMS step size.
+// @param  echoEst      [i/o]   Estimated echo in Q(far_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_UpdateChannel(AecmCore* aecm,
+                              const uint16_t* far_spectrum,
+                              const int16_t far_q,
+                              const uint16_t* const dfa,
+                              const int16_t mu,
+                              int32_t* echoEst) {
+    uint32_t tmpU32no1, tmpU32no2;
+    int32_t tmp32no1, tmp32no2;
+    int32_t mseStored;
+    int32_t mseAdapt;
+
+    int i;
+
+    int16_t zerosFar, zerosNum, zerosCh, zerosDfa;
+    int16_t shiftChFar, shiftNum, shift2ResChan;
+    int16_t tmp16no1;
+    int16_t xfaQ, dfaQ;
+
+    // This is the channel estimation algorithm. It is base on NLMS but has a variable step
+    // length, which was calculated above.
+    if (mu)
+    {
+        for (i = 0; i < PART_LEN1; i++)
+        {
+            // Determine norm of channel and farend to make sure we don't get overflow in
+            // multiplication
+            zerosCh = WebRtcSpl_NormU32(aecm->channelAdapt32[i]);
+            zerosFar = WebRtcSpl_NormU32((uint32_t)far_spectrum[i]);
+            if (zerosCh + zerosFar > 31)
+            {
+                // Multiplication is safe
+                tmpU32no1 = WEBRTC_SPL_UMUL_32_16(aecm->channelAdapt32[i],
+                        far_spectrum[i]);
+                shiftChFar = 0;
+            } else
+            {
+                // We need to shift down before multiplication
+                shiftChFar = 32 - zerosCh - zerosFar;
+                // If zerosCh == zerosFar == 0, shiftChFar is 32. A
+                // right shift of 32 is undefined. To avoid that, we
+                // do this check.
+                tmpU32no1 = rtc::dchecked_cast<uint32_t>(
+                                shiftChFar >= 32
+                                    ? 0
+                                    : aecm->channelAdapt32[i] >> shiftChFar) *
+                            far_spectrum[i];
+            }
+            // Determine Q-domain of numerator
+            zerosNum = WebRtcSpl_NormU32(tmpU32no1);
+            if (dfa[i])
+            {
+                zerosDfa = WebRtcSpl_NormU32((uint32_t)dfa[i]);
+            } else
+            {
+                zerosDfa = 32;
+            }
+            tmp16no1 = zerosDfa - 2 + aecm->dfaNoisyQDomain -
+                RESOLUTION_CHANNEL32 - far_q + shiftChFar;
+            if (zerosNum > tmp16no1 + 1)
+            {
+                xfaQ = tmp16no1;
+                dfaQ = zerosDfa - 2;
+            } else
+            {
+                xfaQ = zerosNum - 2;
+                dfaQ = RESOLUTION_CHANNEL32 + far_q - aecm->dfaNoisyQDomain -
+                    shiftChFar + xfaQ;
+            }
+            // Add in the same Q-domain
+            tmpU32no1 = WEBRTC_SPL_SHIFT_W32(tmpU32no1, xfaQ);
+            tmpU32no2 = WEBRTC_SPL_SHIFT_W32((uint32_t)dfa[i], dfaQ);
+            tmp32no1 = (int32_t)tmpU32no2 - (int32_t)tmpU32no1;
+            zerosNum = WebRtcSpl_NormW32(tmp32no1);
+            if ((tmp32no1) && (far_spectrum[i] > (CHANNEL_VAD << far_q)))
+            {
+                //
+                // Update is needed
+                //
+                // This is what we would like to compute
+                //
+                // tmp32no1 = dfa[i] - (aecm->channelAdapt[i] * far_spectrum[i])
+                // tmp32norm = (i + 1)
+                // aecm->channelAdapt[i] += (2^mu) * tmp32no1
+                //                        / (tmp32norm * far_spectrum[i])
+                //
+
+                // Make sure we don't get overflow in multiplication.
+                if (zerosNum + zerosFar > 31)
+                {
+                    if (tmp32no1 > 0)
+                    {
+                        tmp32no2 = (int32_t)WEBRTC_SPL_UMUL_32_16(tmp32no1,
+                                                                        far_spectrum[i]);
+                    } else
+                    {
+                        tmp32no2 = -(int32_t)WEBRTC_SPL_UMUL_32_16(-tmp32no1,
+                                                                         far_spectrum[i]);
+                    }
+                    shiftNum = 0;
+                } else
+                {
+                    shiftNum = 32 - (zerosNum + zerosFar);
+                    if (tmp32no1 > 0)
+                    {
+                        tmp32no2 = (tmp32no1 >> shiftNum) * far_spectrum[i];
+                    } else
+                    {
+                        tmp32no2 = -((-tmp32no1 >> shiftNum) * far_spectrum[i]);
+                    }
+                }
+                // Normalize with respect to frequency bin
+                tmp32no2 = WebRtcSpl_DivW32W16(tmp32no2, i + 1);
+                // Make sure we are in the right Q-domain
+                shift2ResChan = shiftNum + shiftChFar - xfaQ - mu - ((30 - zerosFar) << 1);
+                if (WebRtcSpl_NormW32(tmp32no2) < shift2ResChan)
+                {
+                    tmp32no2 = WEBRTC_SPL_WORD32_MAX;
+                } else
+                {
+                    tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, shift2ResChan);
+                }
+                aecm->channelAdapt32[i] =
+                    WebRtcSpl_AddSatW32(aecm->channelAdapt32[i], tmp32no2);
+                if (aecm->channelAdapt32[i] < 0)
+                {
+                    // We can never have negative channel gain
+                    aecm->channelAdapt32[i] = 0;
+                }
+                aecm->channelAdapt16[i] =
+                    (int16_t)(aecm->channelAdapt32[i] >> 16);
+            }
+        }
+    }
+    // END: Adaptive channel update
+
+    // Determine if we should store or restore the channel
+    if ((aecm->startupState == 0) & (aecm->currentVADValue))
+    {
+        // During startup we store the channel every block,
+        // and we recalculate echo estimate
+        WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst);
+    } else
+    {
+        if (aecm->farLogEnergy < aecm->farEnergyMSE)
+        {
+            aecm->mseChannelCount = 0;
+        } else
+        {
+            aecm->mseChannelCount++;
+        }
+        // Enough data for validation. Store channel if we can.
+        if (aecm->mseChannelCount >= (MIN_MSE_COUNT + 10))
+        {
+            // We have enough data.
+            // Calculate MSE of "Adapt" and "Stored" versions.
+            // It is actually not MSE, but average absolute error.
+            mseStored = 0;
+            mseAdapt = 0;
+            for (i = 0; i < MIN_MSE_COUNT; i++)
+            {
+                tmp32no1 = ((int32_t)aecm->echoStoredLogEnergy[i]
+                        - (int32_t)aecm->nearLogEnergy[i]);
+                tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
+                mseStored += tmp32no2;
+
+                tmp32no1 = ((int32_t)aecm->echoAdaptLogEnergy[i]
+                        - (int32_t)aecm->nearLogEnergy[i]);
+                tmp32no2 = WEBRTC_SPL_ABS_W32(tmp32no1);
+                mseAdapt += tmp32no2;
+            }
+            if (((mseStored << MSE_RESOLUTION) < (MIN_MSE_DIFF * mseAdapt))
+                    & ((aecm->mseStoredOld << MSE_RESOLUTION) < (MIN_MSE_DIFF
+                            * aecm->mseAdaptOld)))
+            {
+                // The stored channel has a significantly lower MSE than the adaptive one for
+                // two consecutive calculations. Reset the adaptive channel.
+                WebRtcAecm_ResetAdaptiveChannel(aecm);
+            } else if (((MIN_MSE_DIFF * mseStored) > (mseAdapt << MSE_RESOLUTION)) & (mseAdapt
+                    < aecm->mseThreshold) & (aecm->mseAdaptOld < aecm->mseThreshold))
+            {
+                // The adaptive channel has a significantly lower MSE than the stored one.
+                // The MSE for the adaptive channel has also been low for two consecutive
+                // calculations. Store the adaptive channel.
+                WebRtcAecm_StoreAdaptiveChannel(aecm, far_spectrum, echoEst);
+
+                // Update threshold
+                if (aecm->mseThreshold == WEBRTC_SPL_WORD32_MAX)
+                {
+                    aecm->mseThreshold = (mseAdapt + aecm->mseAdaptOld);
+                } else
+                {
+                  int scaled_threshold = aecm->mseThreshold * 5 / 8;
+                  aecm->mseThreshold +=
+                      ((mseAdapt - scaled_threshold) * 205) >> 8;
+                }
+
+            }
+
+            // Reset counter
+            aecm->mseChannelCount = 0;
+
+            // Store the MSE values.
+            aecm->mseStoredOld = mseStored;
+            aecm->mseAdaptOld = mseAdapt;
+        }
+    }
+    // END: Determine if we should store or reset channel estimate.
+}
+
+// CalcSuppressionGain(...)
+//
+// This function calculates the suppression gain that is used in the Wiener filter.
+//
+//
+// @param  aecm     [i/n]   Handle of the AECM instance.
+// @param  supGain  [out]   (Return value) Suppression gain with which to scale the noise
+//                          level (Q14).
+//
+//
+int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm) {
+    int32_t tmp32no1;
+
+    int16_t supGain = SUPGAIN_DEFAULT;
+    int16_t tmp16no1;
+    int16_t dE = 0;
+
+    // Determine suppression gain used in the Wiener filter. The gain is based on a mix of far
+    // end energy and echo estimation error.
+    // Adjust for the far end signal level. A low signal level indicates no far end signal,
+    // hence we set the suppression gain to 0
+    if (!aecm->currentVADValue)
+    {
+        supGain = 0;
+    } else
+    {
+        // Adjust for possible double talk. If we have large variations in estimation error we
+        // likely have double talk (or poor channel).
+        tmp16no1 = (aecm->nearLogEnergy[0] - aecm->echoStoredLogEnergy[0] - ENERGY_DEV_OFFSET);
+        dE = WEBRTC_SPL_ABS_W16(tmp16no1);
+
+        if (dE < ENERGY_DEV_TOL)
+        {
+            // Likely no double talk. The better estimation, the more we can suppress signal.
+            // Update counters
+            if (dE < SUPGAIN_EPC_DT)
+            {
+                tmp32no1 = aecm->supGainErrParamDiffAB * dE;
+                tmp32no1 += (SUPGAIN_EPC_DT >> 1);
+                tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, SUPGAIN_EPC_DT);
+                supGain = aecm->supGainErrParamA - tmp16no1;
+            } else
+            {
+                tmp32no1 = aecm->supGainErrParamDiffBD * (ENERGY_DEV_TOL - dE);
+                tmp32no1 += ((ENERGY_DEV_TOL - SUPGAIN_EPC_DT) >> 1);
+                tmp16no1 = (int16_t)WebRtcSpl_DivW32W16(tmp32no1, (ENERGY_DEV_TOL
+                        - SUPGAIN_EPC_DT));
+                supGain = aecm->supGainErrParamD + tmp16no1;
+            }
+        } else
+        {
+            // Likely in double talk. Use default value
+            supGain = aecm->supGainErrParamD;
+        }
+    }
+
+    if (supGain > aecm->supGainOld)
+    {
+        tmp16no1 = supGain;
+    } else
+    {
+        tmp16no1 = aecm->supGainOld;
+    }
+    aecm->supGainOld = supGain;
+    if (tmp16no1 < aecm->supGain)
+    {
+        aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4);
+    } else
+    {
+        aecm->supGain += (int16_t)((tmp16no1 - aecm->supGain) >> 4);
+    }
+
+    // END: Update suppression gain
+
+    return aecm->supGain;
+}
+
+void WebRtcAecm_BufferFarFrame(AecmCore* const aecm,
+                               const int16_t* const farend,
+                               const int farLen) {
+    int writeLen = farLen, writePos = 0;
+
+    // Check if the write position must be wrapped
+    while (aecm->farBufWritePos + writeLen > FAR_BUF_LEN)
+    {
+        // Write to remaining buffer space before wrapping
+        writeLen = FAR_BUF_LEN - aecm->farBufWritePos;
+        memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
+               sizeof(int16_t) * writeLen);
+        aecm->farBufWritePos = 0;
+        writePos = writeLen;
+        writeLen = farLen - writeLen;
+    }
+
+    memcpy(aecm->farBuf + aecm->farBufWritePos, farend + writePos,
+           sizeof(int16_t) * writeLen);
+    aecm->farBufWritePos += writeLen;
+}
+
+void WebRtcAecm_FetchFarFrame(AecmCore* const aecm,
+                              int16_t* const farend,
+                              const int farLen,
+                              const int knownDelay) {
+    int readLen = farLen;
+    int readPos = 0;
+    int delayChange = knownDelay - aecm->lastKnownDelay;
+
+    aecm->farBufReadPos -= delayChange;
+
+    // Check if delay forces a read position wrap
+    while (aecm->farBufReadPos < 0)
+    {
+        aecm->farBufReadPos += FAR_BUF_LEN;
+    }
+    while (aecm->farBufReadPos > FAR_BUF_LEN - 1)
+    {
+        aecm->farBufReadPos -= FAR_BUF_LEN;
+    }
+
+    aecm->lastKnownDelay = knownDelay;
+
+    // Check if read position must be wrapped
+    while (aecm->farBufReadPos + readLen > FAR_BUF_LEN)
+    {
+
+        // Read from remaining buffer space before wrapping
+        readLen = FAR_BUF_LEN - aecm->farBufReadPos;
+        memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
+               sizeof(int16_t) * readLen);
+        aecm->farBufReadPos = 0;
+        readPos = readLen;
+        readLen = farLen - readLen;
+    }
+    memcpy(farend + readPos, aecm->farBuf + aecm->farBufReadPos,
+           sizeof(int16_t) * readLen);
+    aecm->farBufReadPos += readLen;
+}
diff --git a/modules/audio_processing/aecm/aecm_core.h b/modules/audio_processing/aecm/aecm_core.h
new file mode 100644
index 0000000..feb997e
--- /dev/null
+++ b/modules/audio_processing/aecm/aecm_core.h
@@ -0,0 +1,436 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs echo control (suppression) with fft routines in fixed-point.
+
+#ifndef MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_
+#define MODULES_AUDIO_PROCESSING_AECM_AECM_CORE_H_
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aecm/aecm_defines.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#ifdef _MSC_VER  // visual c++
+#define ALIGN8_BEG __declspec(align(8))
+#define ALIGN8_END
+#else  // gcc or icc
+#define ALIGN8_BEG
+#define ALIGN8_END __attribute__((aligned(8)))
+#endif
+
+typedef struct {
+    int16_t real;
+    int16_t imag;
+} ComplexInt16;
+
+typedef struct {
+    int farBufWritePos;
+    int farBufReadPos;
+    int knownDelay;
+    int lastKnownDelay;
+    int firstVAD;  // Parameter to control poorly initialized channels
+
+    RingBuffer* farFrameBuf;
+    RingBuffer* nearNoisyFrameBuf;
+    RingBuffer* nearCleanFrameBuf;
+    RingBuffer* outFrameBuf;
+
+    int16_t farBuf[FAR_BUF_LEN];
+
+    int16_t mult;
+    uint32_t seed;
+
+    // Delay estimation variables
+    void* delay_estimator_farend;
+    void* delay_estimator;
+    uint16_t currentDelay;
+    // Far end history variables
+    // TODO(bjornv): Replace |far_history| with ring_buffer.
+    uint16_t far_history[PART_LEN1 * MAX_DELAY];
+    int far_history_pos;
+    int far_q_domains[MAX_DELAY];
+
+    int16_t nlpFlag;
+    int16_t fixedDelay;
+
+    uint32_t totCount;
+
+    int16_t dfaCleanQDomain;
+    int16_t dfaCleanQDomainOld;
+    int16_t dfaNoisyQDomain;
+    int16_t dfaNoisyQDomainOld;
+
+    int16_t nearLogEnergy[MAX_BUF_LEN];
+    int16_t farLogEnergy;
+    int16_t echoAdaptLogEnergy[MAX_BUF_LEN];
+    int16_t echoStoredLogEnergy[MAX_BUF_LEN];
+
+    // The extra 16 or 32 bytes in the following buffers are for alignment based
+    // Neon code.
+    // It's designed this way since the current GCC compiler can't align a
+    // buffer in 16 or 32 byte boundaries properly.
+    int16_t channelStored_buf[PART_LEN1 + 8];
+    int16_t channelAdapt16_buf[PART_LEN1 + 8];
+    int32_t channelAdapt32_buf[PART_LEN1 + 8];
+    int16_t xBuf_buf[PART_LEN2 + 16];  // farend
+    int16_t dBufClean_buf[PART_LEN2 + 16];  // nearend
+    int16_t dBufNoisy_buf[PART_LEN2 + 16];  // nearend
+    int16_t outBuf_buf[PART_LEN + 8];
+
+    // Pointers to the above buffers
+    int16_t *channelStored;
+    int16_t *channelAdapt16;
+    int32_t *channelAdapt32;
+    int16_t *xBuf;
+    int16_t *dBufClean;
+    int16_t *dBufNoisy;
+    int16_t *outBuf;
+
+    int32_t echoFilt[PART_LEN1];
+    int16_t nearFilt[PART_LEN1];
+    int32_t noiseEst[PART_LEN1];
+    int           noiseEstTooLowCtr[PART_LEN1];
+    int           noiseEstTooHighCtr[PART_LEN1];
+    int16_t noiseEstCtr;
+    int16_t cngMode;
+
+    int32_t mseAdaptOld;
+    int32_t mseStoredOld;
+    int32_t mseThreshold;
+
+    int16_t farEnergyMin;
+    int16_t farEnergyMax;
+    int16_t farEnergyMaxMin;
+    int16_t farEnergyVAD;
+    int16_t farEnergyMSE;
+    int currentVADValue;
+    int16_t vadUpdateCount;
+
+    int16_t startupState;
+    int16_t mseChannelCount;
+    int16_t supGain;
+    int16_t supGainOld;
+
+    int16_t supGainErrParamA;
+    int16_t supGainErrParamD;
+    int16_t supGainErrParamDiffAB;
+    int16_t supGainErrParamDiffBD;
+
+    struct RealFFT* real_fft;
+
+#ifdef AEC_DEBUG
+    FILE *farFile;
+    FILE *nearFile;
+    FILE *outFile;
+#endif
+} AecmCore;
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CreateCore()
+//
+// Allocates the memory needed by the AECM. The memory needs to be
+// initialized separately using the WebRtcAecm_InitCore() function.
+// Returns a pointer to the instance and a nullptr at failure.
+AecmCore* WebRtcAecm_CreateCore();
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_InitCore(...)
+//
+// This function initializes the AECM instant created with
+// WebRtcAecm_CreateCore()
+// Input:
+//      - aecm          : Pointer to the AECM instance
+//      - samplingFreq  : Sampling Frequency
+//
+// Output:
+//      - aecm          : Initialized instance
+//
+// Return value         :  0 - Ok
+//                        -1 - Error
+//
+int WebRtcAecm_InitCore(AecmCore* const aecm, int samplingFreq);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_FreeCore(...)
+//
+// This function releases the memory allocated by WebRtcAecm_CreateCore()
+// Input:
+//      - aecm          : Pointer to the AECM instance
+//
+void WebRtcAecm_FreeCore(AecmCore* aecm);
+
+int WebRtcAecm_Control(AecmCore* aecm, int delay, int nlpFlag);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_InitEchoPathCore(...)
+//
+// This function resets the echo channel adaptation with the specified channel.
+// Input:
+//      - aecm          : Pointer to the AECM instance
+//      - echo_path     : Pointer to the data that should initialize the echo
+//                        path
+//
+// Output:
+//      - aecm          : Initialized instance
+//
+void WebRtcAecm_InitEchoPathCore(AecmCore* aecm, const int16_t* echo_path);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_ProcessFrame(...)
+//
+// This function processes frames and sends blocks to
+// WebRtcAecm_ProcessBlock(...)
+//
+// Inputs:
+//      - aecm          : Pointer to the AECM instance
+//      - farend        : In buffer containing one frame of echo signal
+//      - nearendNoisy  : In buffer containing one frame of nearend+echo signal
+//                        without NS
+//      - nearendClean  : In buffer containing one frame of nearend+echo signal
+//                        with NS
+//
+// Output:
+//      - out           : Out buffer, one frame of nearend signal          :
+//
+//
+int WebRtcAecm_ProcessFrame(AecmCore* aecm,
+                            const int16_t* farend,
+                            const int16_t* nearendNoisy,
+                            const int16_t* nearendClean,
+                            int16_t* out);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_ProcessBlock(...)
+//
+// This function is called for every block within one frame
+// This function is called by WebRtcAecm_ProcessFrame(...)
+//
+// Inputs:
+//      - aecm          : Pointer to the AECM instance
+//      - farend        : In buffer containing one block of echo signal
+//      - nearendNoisy  : In buffer containing one frame of nearend+echo signal
+//                        without NS
+//      - nearendClean  : In buffer containing one frame of nearend+echo signal
+//                        with NS
+//
+// Output:
+//      - out           : Out buffer, one block of nearend signal          :
+//
+//
+int WebRtcAecm_ProcessBlock(AecmCore* aecm,
+                            const int16_t* farend,
+                            const int16_t* nearendNoisy,
+                            const int16_t* noisyClean,
+                            int16_t* out);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_BufferFarFrame()
+//
+// Inserts a frame of data into farend buffer.
+//
+// Inputs:
+//      - aecm          : Pointer to the AECM instance
+//      - farend        : In buffer containing one frame of farend signal
+//      - farLen        : Length of frame
+//
+void WebRtcAecm_BufferFarFrame(AecmCore* const aecm,
+                               const int16_t* const farend,
+                               const int farLen);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_FetchFarFrame()
+//
+// Read the farend buffer to account for known delay
+//
+// Inputs:
+//      - aecm          : Pointer to the AECM instance
+//      - farend        : In buffer containing one frame of farend signal
+//      - farLen        : Length of frame
+//      - knownDelay    : known delay
+//
+void WebRtcAecm_FetchFarFrame(AecmCore* const aecm,
+                              int16_t* const farend,
+                              const int farLen,
+                              const int knownDelay);
+
+// All the functions below are intended to be private
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_UpdateFarHistory()
+//
+// Moves the pointer to the next entry and inserts |far_spectrum| and
+// corresponding Q-domain in its buffer.
+//
+// Inputs:
+//      - self          : Pointer to the delay estimation instance
+//      - far_spectrum  : Pointer to the far end spectrum
+//      - far_q         : Q-domain of far end spectrum
+//
+void WebRtcAecm_UpdateFarHistory(AecmCore* self,
+                                 uint16_t* far_spectrum,
+                                 int far_q);
+
+////////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_AlignedFarend()
+//
+// Returns a pointer to the far end spectrum aligned to current near end
+// spectrum. The function WebRtc_DelayEstimatorProcessFix(...) should have been
+// called before AlignedFarend(...). Otherwise, you get the pointer to the
+// previous frame. The memory is only valid until the next call of
+// WebRtc_DelayEstimatorProcessFix(...).
+//
+// Inputs:
+//      - self              : Pointer to the AECM instance.
+//      - delay             : Current delay estimate.
+//
+// Output:
+//      - far_q             : The Q-domain of the aligned far end spectrum
+//
+// Return value:
+//      - far_spectrum      : Pointer to the aligned far end spectrum
+//                            NULL - Error
+//
+const uint16_t* WebRtcAecm_AlignedFarend(AecmCore* self, int* far_q, int delay);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CalcSuppressionGain()
+//
+// This function calculates the suppression gain that is used in the
+// Wiener filter.
+//
+// Inputs:
+//      - aecm              : Pointer to the AECM instance.
+//
+// Return value:
+//      - supGain           : Suppression gain with which to scale the noise
+//                            level (Q14).
+//
+int16_t WebRtcAecm_CalcSuppressionGain(AecmCore* const aecm);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CalcEnergies()
+//
+// This function calculates the log of energies for nearend, farend and
+// estimated echoes. There is also an update of energy decision levels,
+// i.e. internal VAD.
+//
+// Inputs:
+//      - aecm              : Pointer to the AECM instance.
+//      - far_spectrum      : Pointer to farend spectrum.
+//      - far_q             : Q-domain of farend spectrum.
+//      - nearEner          : Near end energy for current block in
+//                            Q(aecm->dfaQDomain).
+//
+// Output:
+//     - echoEst            : Estimated echo in Q(xfa_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_CalcEnergies(AecmCore* aecm,
+                             const uint16_t* far_spectrum,
+                             const int16_t far_q,
+                             const uint32_t nearEner,
+                             int32_t* echoEst);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_CalcStepSize()
+//
+// This function calculates the step size used in channel estimation
+//
+// Inputs:
+//      - aecm              : Pointer to the AECM instance.
+//
+// Return value:
+//      - mu                : Stepsize in log2(), i.e. number of shifts.
+//
+int16_t WebRtcAecm_CalcStepSize(AecmCore* const aecm);
+
+///////////////////////////////////////////////////////////////////////////////
+// WebRtcAecm_UpdateChannel(...)
+//
+// This function performs channel estimation.
+// NLMS and decision on channel storage.
+//
+// Inputs:
+//      - aecm              : Pointer to the AECM instance.
+//      - far_spectrum      : Absolute value of the farend signal in Q(far_q)
+//      - far_q             : Q-domain of the farend signal
+//      - dfa               : Absolute value of the nearend signal
+//                            (Q[aecm->dfaQDomain])
+//      - mu                : NLMS step size.
+// Input/Output:
+//      - echoEst           : Estimated echo in Q(far_q+RESOLUTION_CHANNEL16).
+//
+void WebRtcAecm_UpdateChannel(AecmCore* aecm,
+                              const uint16_t* far_spectrum,
+                              const int16_t far_q,
+                              const uint16_t* const dfa,
+                              const int16_t mu,
+                              int32_t* echoEst);
+
+extern const int16_t WebRtcAecm_kCosTable[];
+extern const int16_t WebRtcAecm_kSinTable[];
+
+///////////////////////////////////////////////////////////////////////////////
+// Some function pointers, for internal functions shared by ARM NEON and
+// generic C code.
+//
+typedef void (*CalcLinearEnergies)(AecmCore* aecm,
+                                   const uint16_t* far_spectrum,
+                                   int32_t* echoEst,
+                                   uint32_t* far_energy,
+                                   uint32_t* echo_energy_adapt,
+                                   uint32_t* echo_energy_stored);
+extern CalcLinearEnergies WebRtcAecm_CalcLinearEnergies;
+
+typedef void (*StoreAdaptiveChannel)(AecmCore* aecm,
+                                     const uint16_t* far_spectrum,
+                                     int32_t* echo_est);
+extern StoreAdaptiveChannel WebRtcAecm_StoreAdaptiveChannel;
+
+typedef void (*ResetAdaptiveChannel)(AecmCore* aecm);
+extern ResetAdaptiveChannel WebRtcAecm_ResetAdaptiveChannel;
+
+// For the above function pointers, functions for generic platforms are declared
+// and defined as static in file aecm_core.c, while those for ARM Neon platforms
+// are declared below and defined in file aecm_core_neon.c.
+#if defined(WEBRTC_HAS_NEON)
+void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm,
+                                       const uint16_t* far_spectrum,
+                                       int32_t* echo_est,
+                                       uint32_t* far_energy,
+                                       uint32_t* echo_energy_adapt,
+                                       uint32_t* echo_energy_stored);
+
+void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm,
+                                         const uint16_t* far_spectrum,
+                                         int32_t* echo_est);
+
+void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm);
+#endif
+
+#if defined(MIPS32_LE)
+void WebRtcAecm_CalcLinearEnergies_mips(AecmCore* aecm,
+                                        const uint16_t* far_spectrum,
+                                        int32_t* echo_est,
+                                        uint32_t* far_energy,
+                                        uint32_t* echo_energy_adapt,
+                                        uint32_t* echo_energy_stored);
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcAecm_StoreAdaptiveChannel_mips(AecmCore* aecm,
+                                          const uint16_t* far_spectrum,
+                                          int32_t* echo_est);
+
+void WebRtcAecm_ResetAdaptiveChannel_mips(AecmCore* aecm);
+#endif
+#endif
+
+#endif
diff --git a/modules/audio_processing/aecm/aecm_core_c.cc b/modules/audio_processing/aecm/aecm_core_c.cc
new file mode 100644
index 0000000..effe048
--- /dev/null
+++ b/modules/audio_processing/aecm/aecm_core_c.cc
@@ -0,0 +1,773 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/real_fft.h"
+}
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+extern "C" {
+#include "system_wrappers/include/cpu_features_wrapper.h"
+}
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/sanitizer.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Square root of Hanning window in Q14.
+static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = {
+  0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172,
+  3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224,
+  6591, 6954, 7313, 7668, 8019, 8364, 8705, 9040,
+  9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514,
+  11795, 12068, 12335, 12594, 12845, 13089, 13325, 13553,
+  13773, 13985, 14189, 14384, 14571, 14749, 14918, 15079,
+  15231, 15373, 15506, 15631, 15746, 15851, 15947, 16034,
+  16111, 16179, 16237, 16286, 16325, 16354, 16373, 16384
+};
+
+#ifdef AECM_WITH_ABS_APPROX
+//Q15 alpha = 0.99439986968132  const Factor for magnitude approximation
+static const uint16_t kAlpha1 = 32584;
+//Q15 beta = 0.12967166976970   const Factor for magnitude approximation
+static const uint16_t kBeta1 = 4249;
+//Q15 alpha = 0.94234827210087  const Factor for magnitude approximation
+static const uint16_t kAlpha2 = 30879;
+//Q15 beta = 0.33787806009150   const Factor for magnitude approximation
+static const uint16_t kBeta2 = 11072;
+//Q15 alpha = 0.82247698684306  const Factor for magnitude approximation
+static const uint16_t kAlpha3 = 26951;
+//Q15 beta = 0.57762063060713   const Factor for magnitude approximation
+static const uint16_t kBeta3 = 18927;
+#endif
+
+static const int16_t kNoiseEstQDomain = 15;
+static const int16_t kNoiseEstIncCount = 5;
+
+static void ComfortNoise(AecmCore* aecm,
+                         const uint16_t* dfa,
+                         ComplexInt16* out,
+                         const int16_t* lambda);
+
+static void WindowAndFFT(AecmCore* aecm,
+                         int16_t* fft,
+                         const int16_t* time_signal,
+                         ComplexInt16* freq_signal,
+                         int time_signal_scaling) {
+  int i = 0;
+
+  // FFT of signal
+  for (i = 0; i < PART_LEN; i++) {
+    // Window time domain signal and insert into real part of
+    // transformation array |fft|
+    int16_t scaled_time_signal = time_signal[i] * (1 << time_signal_scaling);
+    fft[i] = (int16_t)((scaled_time_signal * WebRtcAecm_kSqrtHanning[i]) >> 14);
+    scaled_time_signal = time_signal[i + PART_LEN] * (1 << time_signal_scaling);
+    fft[PART_LEN + i] = (int16_t)((
+        scaled_time_signal * WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14);
+  }
+
+  // Do forward FFT, then take only the first PART_LEN complex samples,
+  // and change signs of the imaginary parts.
+  WebRtcSpl_RealForwardFFT(aecm->real_fft, fft, (int16_t*)freq_signal);
+  for (i = 0; i < PART_LEN; i++) {
+    freq_signal[i].imag = -freq_signal[i].imag;
+  }
+}
+
+static void InverseFFTAndWindow(AecmCore* aecm,
+                                int16_t* fft,
+                                ComplexInt16* efw,
+                                int16_t* output,
+                                const int16_t* nearendClean) {
+  int i, j, outCFFT;
+  int32_t tmp32no1;
+  // Reuse |efw| for the inverse FFT output after transferring
+  // the contents to |fft|.
+  int16_t* ifft_out = (int16_t*)efw;
+
+  // Synthesis
+  for (i = 1, j = 2; i < PART_LEN; i += 1, j += 2) {
+    fft[j] = efw[i].real;
+    fft[j + 1] = -efw[i].imag;
+  }
+  fft[0] = efw[0].real;
+  fft[1] = -efw[0].imag;
+
+  fft[PART_LEN2] = efw[PART_LEN].real;
+  fft[PART_LEN2 + 1] = -efw[PART_LEN].imag;
+
+  // Inverse FFT. Keep outCFFT to scale the samples in the next block.
+  outCFFT = WebRtcSpl_RealInverseFFT(aecm->real_fft, fft, ifft_out);
+  for (i = 0; i < PART_LEN; i++) {
+    ifft_out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                    ifft_out[i], WebRtcAecm_kSqrtHanning[i], 14);
+    tmp32no1 = WEBRTC_SPL_SHIFT_W32((int32_t)ifft_out[i],
+                                     outCFFT - aecm->dfaCleanQDomain);
+    output[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+                                        tmp32no1 + aecm->outBuf[i],
+                                        WEBRTC_SPL_WORD16_MIN);
+
+    tmp32no1 = (ifft_out[PART_LEN + i] *
+        WebRtcAecm_kSqrtHanning[PART_LEN - i]) >> 14;
+    tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1,
+                                    outCFFT - aecm->dfaCleanQDomain);
+    aecm->outBuf[i] = (int16_t)WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+                                                tmp32no1,
+                                                WEBRTC_SPL_WORD16_MIN);
+  }
+
+  // Copy the current block to the old position
+  // (aecm->outBuf is shifted elsewhere)
+  memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(int16_t) * PART_LEN);
+  memcpy(aecm->dBufNoisy,
+         aecm->dBufNoisy + PART_LEN,
+         sizeof(int16_t) * PART_LEN);
+  if (nearendClean != NULL)
+  {
+    memcpy(aecm->dBufClean,
+           aecm->dBufClean + PART_LEN,
+           sizeof(int16_t) * PART_LEN);
+  }
+}
+
+// Transforms a time domain signal into the frequency domain, outputting the
+// complex valued signal, absolute value and sum of absolute values.
+//
+// time_signal          [in]    Pointer to time domain signal
+// freq_signal_real     [out]   Pointer to real part of frequency domain array
+// freq_signal_imag     [out]   Pointer to imaginary part of frequency domain
+//                              array
+// freq_signal_abs      [out]   Pointer to absolute value of frequency domain
+//                              array
+// freq_signal_sum_abs  [out]   Pointer to the sum of all absolute values in
+//                              the frequency domain array
+// return value                 The Q-domain of current frequency values
+//
+static int TimeToFrequencyDomain(AecmCore* aecm,
+                                 const int16_t* time_signal,
+                                 ComplexInt16* freq_signal,
+                                 uint16_t* freq_signal_abs,
+                                 uint32_t* freq_signal_sum_abs) {
+  int i = 0;
+  int time_signal_scaling = 0;
+
+  int32_t tmp32no1 = 0;
+  int32_t tmp32no2 = 0;
+
+  // In fft_buf, +16 for 32-byte alignment.
+  int16_t fft_buf[PART_LEN4 + 16];
+  int16_t *fft = (int16_t *) (((uintptr_t) fft_buf + 31) & ~31);
+
+  int16_t tmp16no1;
+#ifndef WEBRTC_ARCH_ARM_V7
+  int16_t tmp16no2;
+#endif
+#ifdef AECM_WITH_ABS_APPROX
+  int16_t max_value = 0;
+  int16_t min_value = 0;
+  uint16_t alpha = 0;
+  uint16_t beta = 0;
+#endif
+
+#ifdef AECM_DYNAMIC_Q
+  tmp16no1 = WebRtcSpl_MaxAbsValueW16(time_signal, PART_LEN2);
+  time_signal_scaling = WebRtcSpl_NormW16(tmp16no1);
+#endif
+
+  WindowAndFFT(aecm, fft, time_signal, freq_signal, time_signal_scaling);
+
+  // Extract imaginary and real part, calculate the magnitude for
+  // all frequency bins
+  freq_signal[0].imag = 0;
+  freq_signal[PART_LEN].imag = 0;
+  freq_signal_abs[0] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[0].real);
+  freq_signal_abs[PART_LEN] = (uint16_t)WEBRTC_SPL_ABS_W16(
+                                freq_signal[PART_LEN].real);
+  (*freq_signal_sum_abs) = (uint32_t)(freq_signal_abs[0]) +
+                           (uint32_t)(freq_signal_abs[PART_LEN]);
+
+  for (i = 1; i < PART_LEN; i++)
+  {
+    if (freq_signal[i].real == 0)
+    {
+      freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+    }
+    else if (freq_signal[i].imag == 0)
+    {
+      freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+    }
+    else
+    {
+      // Approximation for magnitude of complex fft output
+      // magn = sqrt(real^2 + imag^2)
+      // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
+      //
+      // The parameters alpha and beta are stored in Q15
+
+#ifdef AECM_WITH_ABS_APPROX
+      tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+      tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+
+      if(tmp16no1 > tmp16no2)
+      {
+        max_value = tmp16no1;
+        min_value = tmp16no2;
+      } else
+      {
+        max_value = tmp16no2;
+        min_value = tmp16no1;
+      }
+
+      // Magnitude in Q(-6)
+      if ((max_value >> 2) > min_value)
+      {
+        alpha = kAlpha1;
+        beta = kBeta1;
+      } else if ((max_value >> 1) > min_value)
+      {
+        alpha = kAlpha2;
+        beta = kBeta2;
+      } else
+      {
+        alpha = kAlpha3;
+        beta = kBeta3;
+      }
+      tmp16no1 = (int16_t)((max_value * alpha) >> 15);
+      tmp16no2 = (int16_t)((min_value * beta) >> 15);
+      freq_signal_abs[i] = (uint16_t)tmp16no1 + (uint16_t)tmp16no2;
+#else
+#ifdef WEBRTC_ARCH_ARM_V7
+      __asm __volatile(
+        "smulbb %[tmp32no1], %[real], %[real]\n\t"
+        "smlabb %[tmp32no2], %[imag], %[imag], %[tmp32no1]\n\t"
+        :[tmp32no1]"+&r"(tmp32no1),
+         [tmp32no2]"=r"(tmp32no2)
+        :[real]"r"(freq_signal[i].real),
+         [imag]"r"(freq_signal[i].imag)
+      );
+#else
+      tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+      tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+      tmp32no1 = tmp16no1 * tmp16no1;
+      tmp32no2 = tmp16no2 * tmp16no2;
+      tmp32no2 = WebRtcSpl_AddSatW32(tmp32no1, tmp32no2);
+#endif // WEBRTC_ARCH_ARM_V7
+      tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2);
+
+      freq_signal_abs[i] = (uint16_t)tmp32no1;
+#endif // AECM_WITH_ABS_APPROX
+    }
+    (*freq_signal_sum_abs) += (uint32_t)freq_signal_abs[i];
+  }
+
+  return time_signal_scaling;
+}
+
+int RTC_NO_SANITIZE("signed-integer-overflow")  // bugs.webrtc.org/8200
+WebRtcAecm_ProcessBlock(AecmCore* aecm,
+                        const int16_t* farend,
+                        const int16_t* nearendNoisy,
+                        const int16_t* nearendClean,
+                        int16_t* output) {
+  int i;
+
+  uint32_t xfaSum;
+  uint32_t dfaNoisySum;
+  uint32_t dfaCleanSum;
+  uint32_t echoEst32Gained;
+  uint32_t tmpU32;
+
+  int32_t tmp32no1;
+
+  uint16_t xfa[PART_LEN1];
+  uint16_t dfaNoisy[PART_LEN1];
+  uint16_t dfaClean[PART_LEN1];
+  uint16_t* ptrDfaClean = dfaClean;
+  const uint16_t* far_spectrum_ptr = NULL;
+
+  // 32 byte aligned buffers (with +8 or +16).
+  // TODO(kma): define fft with ComplexInt16.
+  int16_t fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe.
+  int32_t echoEst32_buf[PART_LEN1 + 8];
+  int32_t dfw_buf[PART_LEN2 + 8];
+  int32_t efw_buf[PART_LEN2 + 8];
+
+  int16_t* fft = (int16_t*) (((uintptr_t) fft_buf + 31) & ~ 31);
+  int32_t* echoEst32 = (int32_t*) (((uintptr_t) echoEst32_buf + 31) & ~ 31);
+  ComplexInt16* dfw = (ComplexInt16*)(((uintptr_t)dfw_buf + 31) & ~31);
+  ComplexInt16* efw = (ComplexInt16*)(((uintptr_t)efw_buf + 31) & ~31);
+
+  int16_t hnl[PART_LEN1];
+  int16_t numPosCoef = 0;
+  int16_t nlpGain = ONE_Q14;
+  int delay;
+  int16_t tmp16no1;
+  int16_t tmp16no2;
+  int16_t mu;
+  int16_t supGain;
+  int16_t zeros32, zeros16;
+  int16_t zerosDBufNoisy, zerosDBufClean, zerosXBuf;
+  int far_q;
+  int16_t resolutionDiff, qDomainDiff, dfa_clean_q_domain_diff;
+
+  const int kMinPrefBand = 4;
+  const int kMaxPrefBand = 24;
+  int32_t avgHnl32 = 0;
+
+  // Determine startup state. There are three states:
+  // (0) the first CONV_LEN blocks
+  // (1) another CONV_LEN blocks
+  // (2) the rest
+
+  if (aecm->startupState < 2)
+  {
+    aecm->startupState = (aecm->totCount >= CONV_LEN) +
+                         (aecm->totCount >= CONV_LEN2);
+  }
+  // END: Determine startup state
+
+  // Buffer near and far end signals
+  memcpy(aecm->xBuf + PART_LEN, farend, sizeof(int16_t) * PART_LEN);
+  memcpy(aecm->dBufNoisy + PART_LEN, nearendNoisy, sizeof(int16_t) * PART_LEN);
+  if (nearendClean != NULL)
+  {
+    memcpy(aecm->dBufClean + PART_LEN,
+           nearendClean,
+           sizeof(int16_t) * PART_LEN);
+  }
+
+  // Transform far end signal from time domain to frequency domain.
+  far_q = TimeToFrequencyDomain(aecm,
+                                aecm->xBuf,
+                                dfw,
+                                xfa,
+                                &xfaSum);
+
+  // Transform noisy near end signal from time domain to frequency domain.
+  zerosDBufNoisy = TimeToFrequencyDomain(aecm,
+                                         aecm->dBufNoisy,
+                                         dfw,
+                                         dfaNoisy,
+                                         &dfaNoisySum);
+  aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain;
+  aecm->dfaNoisyQDomain = (int16_t)zerosDBufNoisy;
+
+
+  if (nearendClean == NULL)
+  {
+    ptrDfaClean = dfaNoisy;
+    aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld;
+    aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain;
+    dfaCleanSum = dfaNoisySum;
+  } else
+  {
+    // Transform clean near end signal from time domain to frequency domain.
+    zerosDBufClean = TimeToFrequencyDomain(aecm,
+                                           aecm->dBufClean,
+                                           dfw,
+                                           dfaClean,
+                                           &dfaCleanSum);
+    aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain;
+    aecm->dfaCleanQDomain = (int16_t)zerosDBufClean;
+  }
+
+  // Get the delay
+  // Save far-end history and estimate delay
+  WebRtcAecm_UpdateFarHistory(aecm, xfa, far_q);
+  if (WebRtc_AddFarSpectrumFix(aecm->delay_estimator_farend,
+                               xfa,
+                               PART_LEN1,
+                               far_q) == -1) {
+    return -1;
+  }
+  delay = WebRtc_DelayEstimatorProcessFix(aecm->delay_estimator,
+                                          dfaNoisy,
+                                          PART_LEN1,
+                                          zerosDBufNoisy);
+  if (delay == -1)
+  {
+    return -1;
+  }
+  else if (delay == -2)
+  {
+    // If the delay is unknown, we assume zero.
+    // NOTE: this will have to be adjusted if we ever add lookahead.
+    delay = 0;
+  }
+
+  if (aecm->fixedDelay >= 0)
+  {
+    // Use fixed delay
+    delay = aecm->fixedDelay;
+  }
+
+  // Get aligned far end spectrum
+  far_spectrum_ptr = WebRtcAecm_AlignedFarend(aecm, &far_q, delay);
+  zerosXBuf = (int16_t) far_q;
+  if (far_spectrum_ptr == NULL)
+  {
+    return -1;
+  }
+
+  // Calculate log(energy) and update energy threshold levels
+  WebRtcAecm_CalcEnergies(aecm,
+                          far_spectrum_ptr,
+                          zerosXBuf,
+                          dfaNoisySum,
+                          echoEst32);
+
+  // Calculate stepsize
+  mu = WebRtcAecm_CalcStepSize(aecm);
+
+  // Update counters
+  aecm->totCount++;
+
+  // This is the channel estimation algorithm.
+  // It is base on NLMS but has a variable step length,
+  // which was calculated above.
+  WebRtcAecm_UpdateChannel(aecm,
+                           far_spectrum_ptr,
+                           zerosXBuf,
+                           dfaNoisy,
+                           mu,
+                           echoEst32);
+  supGain = WebRtcAecm_CalcSuppressionGain(aecm);
+
+
+  // Calculate Wiener filter hnl[]
+  for (i = 0; i < PART_LEN1; i++)
+  {
+    // Far end signal through channel estimate in Q8
+    // How much can we shift right to preserve resolution
+    tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
+    aecm->echoFilt[i] +=
+        rtc::dchecked_cast<int32_t>((int64_t{tmp32no1} * 50) >> 8);
+
+    zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
+    zeros16 = WebRtcSpl_NormW16(supGain) + 1;
+    if (zeros32 + zeros16 > 16)
+    {
+      // Multiplication is safe
+      // Result in
+      // Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+
+      //   aecm->xfaQDomainBuf[diff])
+      echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i],
+                                              (uint16_t)supGain);
+      resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+      resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+    } else
+    {
+      tmp16no1 = 17 - zeros32 - zeros16;
+      resolutionDiff = 14 + tmp16no1 - RESOLUTION_CHANNEL16 -
+                       RESOLUTION_SUPGAIN;
+      resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+      if (zeros32 > tmp16no1)
+      {
+        echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i],
+                                                supGain >> tmp16no1);
+      } else
+      {
+        // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
+        echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain;
+      }
+    }
+
+    zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
+    RTC_DCHECK_GE(zeros16, 0);  // |zeros16| is a norm, hence non-negative.
+    dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld;
+    if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) {
+      tmp16no1 = aecm->nearFilt[i] * (1 << zeros16);
+      qDomainDiff = zeros16 - dfa_clean_q_domain_diff;
+      tmp16no2 = ptrDfaClean[i] >> -qDomainDiff;
+    } else {
+      tmp16no1 = dfa_clean_q_domain_diff < 0
+                     ? aecm->nearFilt[i] >> -dfa_clean_q_domain_diff
+                     : aecm->nearFilt[i] * (1 << dfa_clean_q_domain_diff);
+      qDomainDiff = 0;
+      tmp16no2 = ptrDfaClean[i];
+    }
+    tmp32no1 = (int32_t)(tmp16no2 - tmp16no1);
+    tmp16no2 = (int16_t)(tmp32no1 >> 4);
+    tmp16no2 += tmp16no1;
+    zeros16 = WebRtcSpl_NormW16(tmp16no2);
+    if ((tmp16no2) & (-qDomainDiff > zeros16)) {
+      aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX;
+    } else {
+      aecm->nearFilt[i] = qDomainDiff < 0 ? tmp16no2 * (1 << -qDomainDiff)
+                                          : tmp16no2 >> qDomainDiff;
+    }
+
+    // Wiener filter coefficients, resulting hnl in Q14
+    if (echoEst32Gained == 0)
+    {
+      hnl[i] = ONE_Q14;
+    } else if (aecm->nearFilt[i] == 0)
+    {
+      hnl[i] = 0;
+    } else
+    {
+      // Multiply the suppression gain
+      // Rounding
+      echoEst32Gained += (uint32_t)(aecm->nearFilt[i] >> 1);
+      tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained,
+                                   (uint16_t)aecm->nearFilt[i]);
+
+      // Current resolution is
+      // Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN- max(0,17-zeros16- zeros32))
+      // Make sure we are in Q14
+      tmp32no1 = (int32_t)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff);
+      if (tmp32no1 > ONE_Q14)
+      {
+        hnl[i] = 0;
+      } else if (tmp32no1 < 0)
+      {
+        hnl[i] = ONE_Q14;
+      } else
+      {
+        // 1-echoEst/dfa
+        hnl[i] = ONE_Q14 - (int16_t)tmp32no1;
+        if (hnl[i] < 0)
+        {
+          hnl[i] = 0;
+        }
+      }
+    }
+    if (hnl[i])
+    {
+      numPosCoef++;
+    }
+  }
+  // Only in wideband. Prevent the gain in upper band from being larger than
+  // in lower band.
+  if (aecm->mult == 2)
+  {
+    // TODO(bjornv): Investigate if the scaling of hnl[i] below can cause
+    //               speech distortion in double-talk.
+    for (i = 0; i < PART_LEN1; i++)
+    {
+      hnl[i] = (int16_t)((hnl[i] * hnl[i]) >> 14);
+    }
+
+    for (i = kMinPrefBand; i <= kMaxPrefBand; i++)
+    {
+      avgHnl32 += (int32_t)hnl[i];
+    }
+    RTC_DCHECK_GT(kMaxPrefBand - kMinPrefBand + 1, 0);
+    avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1);
+
+    for (i = kMaxPrefBand; i < PART_LEN1; i++)
+    {
+      if (hnl[i] > (int16_t)avgHnl32)
+      {
+        hnl[i] = (int16_t)avgHnl32;
+      }
+    }
+  }
+
+  // Calculate NLP gain, result is in Q14
+  if (aecm->nlpFlag)
+  {
+    for (i = 0; i < PART_LEN1; i++)
+    {
+      // Truncate values close to zero and one.
+      if (hnl[i] > NLP_COMP_HIGH)
+      {
+        hnl[i] = ONE_Q14;
+      } else if (hnl[i] < NLP_COMP_LOW)
+      {
+        hnl[i] = 0;
+      }
+
+      // Remove outliers
+      if (numPosCoef < 3)
+      {
+        nlpGain = 0;
+      } else
+      {
+        nlpGain = ONE_Q14;
+      }
+
+      // NLP
+      if ((hnl[i] == ONE_Q14) && (nlpGain == ONE_Q14))
+      {
+        hnl[i] = ONE_Q14;
+      } else
+      {
+        hnl[i] = (int16_t)((hnl[i] * nlpGain) >> 14);
+      }
+
+      // multiply with Wiener coefficients
+      efw[i].real = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real,
+                                                                   hnl[i], 14));
+      efw[i].imag = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag,
+                                                                   hnl[i], 14));
+    }
+  }
+  else
+  {
+    // multiply with Wiener coefficients
+    for (i = 0; i < PART_LEN1; i++)
+    {
+      efw[i].real = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real,
+                                                                   hnl[i], 14));
+      efw[i].imag = (int16_t)(WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag,
+                                                                   hnl[i], 14));
+    }
+  }
+
+  if (aecm->cngMode == AecmTrue)
+  {
+    ComfortNoise(aecm, ptrDfaClean, efw, hnl);
+  }
+
+  InverseFFTAndWindow(aecm, fft, efw, output, nearendClean);
+
+  return 0;
+}
+
+static void ComfortNoise(AecmCore* aecm,
+                         const uint16_t* dfa,
+                         ComplexInt16* out,
+                         const int16_t* lambda) {
+  int16_t i;
+  int16_t tmp16;
+  int32_t tmp32;
+
+  int16_t randW16[PART_LEN];
+  int16_t uReal[PART_LEN1];
+  int16_t uImag[PART_LEN1];
+  int32_t outLShift32;
+  int16_t noiseRShift16[PART_LEN1];
+
+  int16_t shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain;
+  int16_t minTrackShift;
+
+  RTC_DCHECK_GE(shiftFromNearToNoise, 0);
+  RTC_DCHECK_LT(shiftFromNearToNoise, 16);
+
+  if (aecm->noiseEstCtr < 100)
+  {
+    // Track the minimum more quickly initially.
+    aecm->noiseEstCtr++;
+    minTrackShift = 6;
+  } else
+  {
+    minTrackShift = 9;
+  }
+
+  // Estimate noise power.
+  for (i = 0; i < PART_LEN1; i++)
+  {
+    // Shift to the noise domain.
+    tmp32 = (int32_t)dfa[i];
+    outLShift32 = tmp32 << shiftFromNearToNoise;
+
+    if (outLShift32 < aecm->noiseEst[i])
+    {
+      // Reset "too low" counter
+      aecm->noiseEstTooLowCtr[i] = 0;
+      // Track the minimum.
+      if (aecm->noiseEst[i] < (1 << minTrackShift))
+      {
+        // For small values, decrease noiseEst[i] every
+        // |kNoiseEstIncCount| block. The regular approach below can not
+        // go further down due to truncation.
+        aecm->noiseEstTooHighCtr[i]++;
+        if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount)
+        {
+          aecm->noiseEst[i]--;
+          aecm->noiseEstTooHighCtr[i] = 0; // Reset the counter
+        }
+      }
+      else
+      {
+        aecm->noiseEst[i] -= ((aecm->noiseEst[i] - outLShift32)
+                              >> minTrackShift);
+      }
+    } else
+    {
+      // Reset "too high" counter
+      aecm->noiseEstTooHighCtr[i] = 0;
+      // Ramp slowly upwards until we hit the minimum again.
+      if ((aecm->noiseEst[i] >> 19) > 0)
+      {
+        // Avoid overflow.
+        // Multiplication with 2049 will cause wrap around. Scale
+        // down first and then multiply
+        aecm->noiseEst[i] >>= 11;
+        aecm->noiseEst[i] *= 2049;
+      }
+      else if ((aecm->noiseEst[i] >> 11) > 0)
+      {
+        // Large enough for relative increase
+        aecm->noiseEst[i] *= 2049;
+        aecm->noiseEst[i] >>= 11;
+      }
+      else
+      {
+        // Make incremental increases based on size every
+        // |kNoiseEstIncCount| block
+        aecm->noiseEstTooLowCtr[i]++;
+        if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount)
+        {
+          aecm->noiseEst[i] += (aecm->noiseEst[i] >> 9) + 1;
+          aecm->noiseEstTooLowCtr[i] = 0; // Reset counter
+        }
+      }
+    }
+  }
+
+  for (i = 0; i < PART_LEN1; i++)
+  {
+    tmp32 = aecm->noiseEst[i] >> shiftFromNearToNoise;
+    if (tmp32 > 32767)
+    {
+      tmp32 = 32767;
+      aecm->noiseEst[i] = tmp32 << shiftFromNearToNoise;
+    }
+    noiseRShift16[i] = (int16_t)tmp32;
+
+    tmp16 = ONE_Q14 - lambda[i];
+    noiseRShift16[i] = (int16_t)((tmp16 * noiseRShift16[i]) >> 14);
+  }
+
+  // Generate a uniform random array on [0 2^15-1].
+  WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed);
+
+  // Generate noise according to estimated energy.
+  uReal[0] = 0; // Reject LF noise.
+  uImag[0] = 0;
+  for (i = 1; i < PART_LEN1; i++)
+  {
+    // Get a random index for the cos and sin tables over [0 359].
+    tmp16 = (int16_t)((359 * randW16[i - 1]) >> 15);
+
+    // Tables are in Q13.
+    uReal[i] = (int16_t)((noiseRShift16[i] * WebRtcAecm_kCosTable[tmp16]) >>
+        13);
+    uImag[i] = (int16_t)((-noiseRShift16[i] * WebRtcAecm_kSinTable[tmp16]) >>
+        13);
+  }
+  uImag[PART_LEN] = 0;
+
+  for (i = 0; i < PART_LEN1; i++)
+  {
+    out[i].real = WebRtcSpl_AddSatW16(out[i].real, uReal[i]);
+    out[i].imag = WebRtcSpl_AddSatW16(out[i].imag, uImag[i]);
+  }
+}
diff --git a/modules/audio_processing/aecm/aecm_core_mips.cc b/modules/audio_processing/aecm/aecm_core_mips.cc
new file mode 100644
index 0000000..58e5ec5
--- /dev/null
+++ b/modules/audio_processing/aecm/aecm_core_mips.cc
@@ -0,0 +1,1566 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = {
+  0, 399, 798, 1196, 1594, 1990, 2386, 2780, 3172,
+  3562, 3951, 4337, 4720, 5101, 5478, 5853, 6224,
+  6591, 6954, 7313, 7668, 8019, 8364, 8705, 9040,
+  9370, 9695, 10013, 10326, 10633, 10933, 11227, 11514,
+  11795, 12068, 12335, 12594, 12845, 13089, 13325, 13553,
+  13773, 13985, 14189, 14384, 14571, 14749, 14918, 15079,
+  15231, 15373, 15506, 15631, 15746, 15851, 15947, 16034,
+  16111, 16179, 16237, 16286, 16325, 16354, 16373, 16384
+};
+
+static const int16_t kNoiseEstQDomain = 15;
+static const int16_t kNoiseEstIncCount = 5;
+
+static int16_t coefTable[] = {
+   0,   4, 256, 260, 128, 132, 384, 388,
+  64,  68, 320, 324, 192, 196, 448, 452,
+  32,  36, 288, 292, 160, 164, 416, 420,
+  96, 100, 352, 356, 224, 228, 480, 484,
+  16,  20, 272, 276, 144, 148, 400, 404,
+  80,  84, 336, 340, 208, 212, 464, 468,
+  48,  52, 304, 308, 176, 180, 432, 436,
+ 112, 116, 368, 372, 240, 244, 496, 500,
+   8,  12, 264, 268, 136, 140, 392, 396,
+  72,  76, 328, 332, 200, 204, 456, 460,
+  40,  44, 296, 300, 168, 172, 424, 428,
+ 104, 108, 360, 364, 232, 236, 488, 492,
+  24,  28, 280, 284, 152, 156, 408, 412,
+  88,  92, 344, 348, 216, 220, 472, 476,
+  56,  60, 312, 316, 184, 188, 440, 444,
+ 120, 124, 376, 380, 248, 252, 504, 508
+};
+
+static int16_t coefTable_ifft[] = {
+    0, 512, 256, 508, 128, 252, 384, 380,
+   64, 124, 320, 444, 192, 188, 448, 316,
+   32,  60, 288, 476, 160, 220, 416, 348,
+   96,  92, 352, 412, 224, 156, 480, 284,
+   16,  28, 272, 492, 144, 236, 400, 364,
+   80, 108, 336, 428, 208, 172, 464, 300,
+   48,  44, 304, 460, 176, 204, 432, 332,
+  112,  76, 368, 396, 240, 140, 496, 268,
+    8,  12, 264, 500, 136, 244, 392, 372,
+   72, 116, 328, 436, 200, 180, 456, 308,
+   40,  52, 296, 468, 168, 212, 424, 340,
+  104,  84, 360, 404, 232, 148, 488, 276,
+   24,  20, 280, 484, 152, 228, 408, 356,
+   88, 100, 344, 420, 216, 164, 472, 292,
+   56,  36, 312, 452, 184, 196, 440, 324,
+  120,  68, 376, 388, 248, 132, 504, 260
+};
+
+static void ComfortNoise(AecmCore* aecm,
+                         const uint16_t* dfa,
+                         ComplexInt16* out,
+                         const int16_t* lambda);
+
+static void WindowAndFFT(AecmCore* aecm,
+                         int16_t* fft,
+                         const int16_t* time_signal,
+                         ComplexInt16* freq_signal,
+                         int time_signal_scaling) {
+  int i, j;
+  int32_t tmp1, tmp2, tmp3, tmp4;
+  int16_t* pfrfi;
+  ComplexInt16* pfreq_signal;
+  int16_t  f_coef, s_coef;
+  int32_t load_ptr, store_ptr1, store_ptr2, shift, shift1;
+  int32_t hann, hann1, coefs;
+
+  memset(fft, 0, sizeof(int16_t) * PART_LEN4);
+
+  // FFT of signal
+  __asm __volatile (
+    ".set        push                                                    \n\t"
+    ".set        noreorder                                               \n\t"
+    "addiu       %[shift],          %[time_signal_scaling], -14          \n\t"
+    "addiu       %[i],              $zero,                  64           \n\t"
+    "addiu       %[load_ptr],       %[time_signal],         0            \n\t"
+    "addiu       %[hann],           %[hanning],             0            \n\t"
+    "addiu       %[hann1],          %[hanning],             128          \n\t"
+    "addiu       %[coefs],          %[coefTable],           0            \n\t"
+    "bltz        %[shift],          2f                                   \n\t"
+    " negu       %[shift1],         %[shift]                             \n\t"
+   "1:                                                                   \n\t"
+    "lh          %[tmp1],           0(%[load_ptr])                       \n\t"
+    "lh          %[tmp2],           0(%[hann])                           \n\t"
+    "lh          %[tmp3],           128(%[load_ptr])                     \n\t"
+    "lh          %[tmp4],           0(%[hann1])                          \n\t"
+    "addiu       %[i],              %[i],                   -1           \n\t"
+    "mul         %[tmp1],           %[tmp1],                %[tmp2]      \n\t"
+    "mul         %[tmp3],           %[tmp3],                %[tmp4]      \n\t"
+    "lh          %[f_coef],         0(%[coefs])                          \n\t"
+    "lh          %[s_coef],         2(%[coefs])                          \n\t"
+    "addiu       %[load_ptr],       %[load_ptr],            2            \n\t"
+    "addiu       %[hann],           %[hann],                2            \n\t"
+    "addiu       %[hann1],          %[hann1],               -2           \n\t"
+    "addu        %[store_ptr1],     %[fft],                 %[f_coef]    \n\t"
+    "addu        %[store_ptr2],     %[fft],                 %[s_coef]    \n\t"
+    "sllv        %[tmp1],           %[tmp1],                %[shift]     \n\t"
+    "sllv        %[tmp3],           %[tmp3],                %[shift]     \n\t"
+    "sh          %[tmp1],           0(%[store_ptr1])                     \n\t"
+    "sh          %[tmp3],           0(%[store_ptr2])                     \n\t"
+    "bgtz        %[i],              1b                                   \n\t"
+    " addiu      %[coefs],          %[coefs],               4            \n\t"
+    "b           3f                                                      \n\t"
+    " nop                                                                \n\t"
+   "2:                                                                   \n\t"
+    "lh          %[tmp1],           0(%[load_ptr])                       \n\t"
+    "lh          %[tmp2],           0(%[hann])                           \n\t"
+    "lh          %[tmp3],           128(%[load_ptr])                     \n\t"
+    "lh          %[tmp4],           0(%[hann1])                          \n\t"
+    "addiu       %[i],              %[i],                   -1           \n\t"
+    "mul         %[tmp1],           %[tmp1],                %[tmp2]      \n\t"
+    "mul         %[tmp3],           %[tmp3],                %[tmp4]      \n\t"
+    "lh          %[f_coef],         0(%[coefs])                          \n\t"
+    "lh          %[s_coef],         2(%[coefs])                          \n\t"
+    "addiu       %[load_ptr],       %[load_ptr],            2            \n\t"
+    "addiu       %[hann],           %[hann],                2            \n\t"
+    "addiu       %[hann1],          %[hann1],               -2           \n\t"
+    "addu        %[store_ptr1],     %[fft],                 %[f_coef]    \n\t"
+    "addu        %[store_ptr2],     %[fft],                 %[s_coef]    \n\t"
+    "srav        %[tmp1],           %[tmp1],                %[shift1]    \n\t"
+    "srav        %[tmp3],           %[tmp3],                %[shift1]    \n\t"
+    "sh          %[tmp1],           0(%[store_ptr1])                     \n\t"
+    "sh          %[tmp3],           0(%[store_ptr2])                     \n\t"
+    "bgtz        %[i],              2b                                   \n\t"
+    " addiu      %[coefs],          %[coefs],               4            \n\t"
+   "3:                                                                   \n\t"
+    ".set        pop                                                     \n\t"
+    : [load_ptr] "=&r" (load_ptr), [shift] "=&r" (shift), [hann] "=&r" (hann),
+      [hann1] "=&r" (hann1), [shift1] "=&r" (shift1), [coefs] "=&r" (coefs),
+      [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+      [tmp4] "=&r" (tmp4), [i] "=&r" (i), [f_coef] "=&r" (f_coef),
+      [s_coef] "=&r" (s_coef), [store_ptr1] "=&r" (store_ptr1),
+      [store_ptr2] "=&r" (store_ptr2)
+    : [time_signal] "r" (time_signal), [coefTable] "r" (coefTable),
+      [time_signal_scaling] "r" (time_signal_scaling),
+      [hanning] "r" (WebRtcAecm_kSqrtHanning), [fft] "r" (fft)
+    : "memory", "hi", "lo"
+  );
+
+  WebRtcSpl_ComplexFFT(fft, PART_LEN_SHIFT, 1);
+  pfrfi = fft;
+  pfreq_signal = freq_signal;
+
+  __asm __volatile (
+    ".set        push                                                     \n\t"
+    ".set        noreorder                                                \n\t"
+    "addiu       %[j],              $zero,                 128            \n\t"
+   "1:                                                                    \n\t"
+    "lh          %[tmp1],           0(%[pfrfi])                           \n\t"
+    "lh          %[tmp2],           2(%[pfrfi])                           \n\t"
+    "lh          %[tmp3],           4(%[pfrfi])                           \n\t"
+    "lh          %[tmp4],           6(%[pfrfi])                           \n\t"
+    "subu        %[tmp2],           $zero,                 %[tmp2]        \n\t"
+    "sh          %[tmp1],           0(%[pfreq_signal])                    \n\t"
+    "sh          %[tmp2],           2(%[pfreq_signal])                    \n\t"
+    "subu        %[tmp4],           $zero,                 %[tmp4]        \n\t"
+    "sh          %[tmp3],           4(%[pfreq_signal])                    \n\t"
+    "sh          %[tmp4],           6(%[pfreq_signal])                    \n\t"
+    "lh          %[tmp1],           8(%[pfrfi])                           \n\t"
+    "lh          %[tmp2],           10(%[pfrfi])                          \n\t"
+    "lh          %[tmp3],           12(%[pfrfi])                          \n\t"
+    "lh          %[tmp4],           14(%[pfrfi])                          \n\t"
+    "addiu       %[j],              %[j],                  -8             \n\t"
+    "subu        %[tmp2],           $zero,                 %[tmp2]        \n\t"
+    "sh          %[tmp1],           8(%[pfreq_signal])                    \n\t"
+    "sh          %[tmp2],           10(%[pfreq_signal])                   \n\t"
+    "subu        %[tmp4],           $zero,                 %[tmp4]        \n\t"
+    "sh          %[tmp3],           12(%[pfreq_signal])                   \n\t"
+    "sh          %[tmp4],           14(%[pfreq_signal])                   \n\t"
+    "addiu       %[pfreq_signal],   %[pfreq_signal],       16             \n\t"
+    "bgtz        %[j],              1b                                    \n\t"
+    " addiu      %[pfrfi],          %[pfrfi],              16             \n\t"
+    ".set        pop                                                      \n\t"
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+      [j] "=&r" (j), [pfrfi] "+r" (pfrfi), [pfreq_signal] "+r" (pfreq_signal),
+      [tmp4] "=&r" (tmp4)
+    :
+    : "memory"
+  );
+}
+
+static void InverseFFTAndWindow(AecmCore* aecm,
+                                int16_t* fft,
+                                ComplexInt16* efw,
+                                int16_t* output,
+                                const int16_t* nearendClean) {
+  int i, outCFFT;
+  int32_t tmp1, tmp2, tmp3, tmp4, tmp_re, tmp_im;
+  int16_t* pcoefTable_ifft = coefTable_ifft;
+  int16_t* pfft = fft;
+  int16_t* ppfft = fft;
+  ComplexInt16* pefw = efw;
+  int32_t out_aecm;
+  int16_t* paecm_buf = aecm->outBuf;
+  const int16_t* p_kSqrtHanning = WebRtcAecm_kSqrtHanning;
+  const int16_t* pp_kSqrtHanning = &WebRtcAecm_kSqrtHanning[PART_LEN];
+  int16_t* output1 = output;
+
+  __asm __volatile (
+    ".set      push                                                        \n\t"
+    ".set      noreorder                                                   \n\t"
+    "addiu     %[i],                $zero,                   64            \n\t"
+   "1:                                                                     \n\t"
+    "lh        %[tmp1],             0(%[pcoefTable_ifft])                  \n\t"
+    "lh        %[tmp2],             2(%[pcoefTable_ifft])                  \n\t"
+    "lh        %[tmp_re],           0(%[pefw])                             \n\t"
+    "lh        %[tmp_im],           2(%[pefw])                             \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp2]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp1]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "subu      %[tmp_im],           $zero,                   %[tmp_im]     \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "lh        %[tmp1],             4(%[pcoefTable_ifft])                  \n\t"
+    "lh        %[tmp2],             6(%[pcoefTable_ifft])                  \n\t"
+    "lh        %[tmp_re],           4(%[pefw])                             \n\t"
+    "lh        %[tmp_im],           6(%[pefw])                             \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp2]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp1]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "subu      %[tmp_im],           $zero,                   %[tmp_im]     \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "lh        %[tmp1],             8(%[pcoefTable_ifft])                  \n\t"
+    "lh        %[tmp2],             10(%[pcoefTable_ifft])                 \n\t"
+    "lh        %[tmp_re],           8(%[pefw])                             \n\t"
+    "lh        %[tmp_im],           10(%[pefw])                            \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp2]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp1]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "subu      %[tmp_im],           $zero,                   %[tmp_im]     \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "lh        %[tmp1],             12(%[pcoefTable_ifft])                 \n\t"
+    "lh        %[tmp2],             14(%[pcoefTable_ifft])                 \n\t"
+    "lh        %[tmp_re],           12(%[pefw])                            \n\t"
+    "lh        %[tmp_im],           14(%[pefw])                            \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp2]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "addu      %[pfft],             %[fft],                  %[tmp1]       \n\t"
+    "sh        %[tmp_re],           0(%[pfft])                             \n\t"
+    "subu      %[tmp_im],           $zero,                   %[tmp_im]     \n\t"
+    "sh        %[tmp_im],           2(%[pfft])                             \n\t"
+    "addiu     %[pcoefTable_ifft],  %[pcoefTable_ifft],      16            \n\t"
+    "addiu     %[i],                %[i],                    -4            \n\t"
+    "bgtz      %[i],                1b                                     \n\t"
+    " addiu    %[pefw],             %[pefw],                 16            \n\t"
+    ".set      pop                                                         \n\t"
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [pfft] "+r" (pfft),
+      [i] "=&r" (i), [tmp_re] "=&r" (tmp_re), [tmp_im] "=&r" (tmp_im),
+      [pefw] "+r" (pefw), [pcoefTable_ifft] "+r" (pcoefTable_ifft),
+      [fft] "+r" (fft)
+    :
+    : "memory"
+  );
+
+  fft[2] = efw[PART_LEN].real;
+  fft[3] = -efw[PART_LEN].imag;
+
+  outCFFT = WebRtcSpl_ComplexIFFT(fft, PART_LEN_SHIFT, 1);
+  pfft = fft;
+
+  __asm __volatile (
+    ".set       push                                               \n\t"
+    ".set       noreorder                                          \n\t"
+    "addiu      %[i],            $zero,               128          \n\t"
+   "1:                                                             \n\t"
+    "lh         %[tmp1],         0(%[ppfft])                       \n\t"
+    "lh         %[tmp2],         4(%[ppfft])                       \n\t"
+    "lh         %[tmp3],         8(%[ppfft])                       \n\t"
+    "lh         %[tmp4],         12(%[ppfft])                      \n\t"
+    "addiu      %[i],            %[i],                -4           \n\t"
+    "sh         %[tmp1],         0(%[pfft])                        \n\t"
+    "sh         %[tmp2],         2(%[pfft])                        \n\t"
+    "sh         %[tmp3],         4(%[pfft])                        \n\t"
+    "sh         %[tmp4],         6(%[pfft])                        \n\t"
+    "addiu      %[ppfft],        %[ppfft],            16           \n\t"
+    "bgtz       %[i],            1b                                \n\t"
+    " addiu     %[pfft],         %[pfft],             8            \n\t"
+    ".set       pop                                                \n\t"
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [pfft] "+r" (pfft),
+      [i] "=&r" (i), [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4),
+      [ppfft] "+r" (ppfft)
+    :
+    : "memory"
+  );
+
+  pfft = fft;
+  out_aecm = (int32_t)(outCFFT - aecm->dfaCleanQDomain);
+
+  __asm __volatile (
+    ".set       push                                                       \n\t"
+    ".set       noreorder                                                  \n\t"
+    "addiu      %[i],                $zero,                  64            \n\t"
+   "11:                                                                    \n\t"
+    "lh         %[tmp1],             0(%[pfft])                            \n\t"
+    "lh         %[tmp2],             0(%[p_kSqrtHanning])                  \n\t"
+    "addiu      %[i],                %[i],                   -2            \n\t"
+    "mul        %[tmp1],             %[tmp1],                %[tmp2]       \n\t"
+    "lh         %[tmp3],             2(%[pfft])                            \n\t"
+    "lh         %[tmp4],             2(%[p_kSqrtHanning])                  \n\t"
+    "mul        %[tmp3],             %[tmp3],                %[tmp4]       \n\t"
+    "addiu      %[tmp1],             %[tmp1],                8192          \n\t"
+    "sra        %[tmp1],             %[tmp1],                14            \n\t"
+    "addiu      %[tmp3],             %[tmp3],                8192          \n\t"
+    "sra        %[tmp3],             %[tmp3],                14            \n\t"
+    "bgez       %[out_aecm],         1f                                    \n\t"
+    " negu      %[tmp2],             %[out_aecm]                           \n\t"
+    "srav       %[tmp1],             %[tmp1],                %[tmp2]       \n\t"
+    "b          2f                                                         \n\t"
+    " srav      %[tmp3],             %[tmp3],                %[tmp2]       \n\t"
+   "1:                                                                     \n\t"
+    "sllv       %[tmp1],             %[tmp1],                %[out_aecm]   \n\t"
+    "sllv       %[tmp3],             %[tmp3],                %[out_aecm]   \n\t"
+   "2:                                                                     \n\t"
+    "lh         %[tmp4],             0(%[paecm_buf])                       \n\t"
+    "lh         %[tmp2],             2(%[paecm_buf])                       \n\t"
+    "addu       %[tmp3],             %[tmp3],                %[tmp2]       \n\t"
+    "addu       %[tmp1],             %[tmp1],                %[tmp4]       \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shll_s.w   %[tmp1],             %[tmp1],                16            \n\t"
+    "sra        %[tmp1],             %[tmp1],                16            \n\t"
+    "shll_s.w   %[tmp3],             %[tmp3],                16            \n\t"
+    "sra        %[tmp3],             %[tmp3],                16            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "sra        %[tmp4],             %[tmp1],                31            \n\t"
+    "sra        %[tmp2],             %[tmp1],                15            \n\t"
+    "beq        %[tmp4],             %[tmp2],                3f            \n\t"
+    " ori       %[tmp2],             $zero,                  0x7fff        \n\t"
+    "xor        %[tmp1],             %[tmp2],                %[tmp4]       \n\t"
+   "3:                                                                     \n\t"
+    "sra        %[tmp2],             %[tmp3],                31            \n\t"
+    "sra        %[tmp4],             %[tmp3],                15            \n\t"
+    "beq        %[tmp2],             %[tmp4],                4f            \n\t"
+    " ori       %[tmp4],             $zero,                  0x7fff        \n\t"
+    "xor        %[tmp3],             %[tmp4],                %[tmp2]       \n\t"
+   "4:                                                                     \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sh         %[tmp1],             0(%[pfft])                            \n\t"
+    "sh         %[tmp1],             0(%[output1])                         \n\t"
+    "sh         %[tmp3],             2(%[pfft])                            \n\t"
+    "sh         %[tmp3],             2(%[output1])                         \n\t"
+    "lh         %[tmp1],             128(%[pfft])                          \n\t"
+    "lh         %[tmp2],             0(%[pp_kSqrtHanning])                 \n\t"
+    "mul        %[tmp1],             %[tmp1],                %[tmp2]       \n\t"
+    "lh         %[tmp3],             130(%[pfft])                          \n\t"
+    "lh         %[tmp4],             -2(%[pp_kSqrtHanning])                \n\t"
+    "mul        %[tmp3],             %[tmp3],                %[tmp4]       \n\t"
+    "sra        %[tmp1],             %[tmp1],                14            \n\t"
+    "sra        %[tmp3],             %[tmp3],                14            \n\t"
+    "bgez       %[out_aecm],         5f                                    \n\t"
+    " negu      %[tmp2],             %[out_aecm]                           \n\t"
+    "srav       %[tmp3],             %[tmp3],                %[tmp2]       \n\t"
+    "b          6f                                                         \n\t"
+    " srav      %[tmp1],             %[tmp1],                %[tmp2]       \n\t"
+   "5:                                                                     \n\t"
+    "sllv       %[tmp1],             %[tmp1],                %[out_aecm]   \n\t"
+    "sllv       %[tmp3],             %[tmp3],                %[out_aecm]   \n\t"
+   "6:                                                                     \n\t"
+#if defined(MIPS_DSP_R1_LE)
+    "shll_s.w   %[tmp1],             %[tmp1],                16            \n\t"
+    "sra        %[tmp1],             %[tmp1],                16            \n\t"
+    "shll_s.w   %[tmp3],             %[tmp3],                16            \n\t"
+    "sra        %[tmp3],             %[tmp3],                16            \n\t"
+#else  // #if defined(MIPS_DSP_R1_LE)
+    "sra        %[tmp4],             %[tmp1],                31            \n\t"
+    "sra        %[tmp2],             %[tmp1],                15            \n\t"
+    "beq        %[tmp4],             %[tmp2],                7f            \n\t"
+    " ori       %[tmp2],             $zero,                  0x7fff        \n\t"
+    "xor        %[tmp1],             %[tmp2],                %[tmp4]       \n\t"
+   "7:                                                                     \n\t"
+    "sra        %[tmp2],             %[tmp3],                31            \n\t"
+    "sra        %[tmp4],             %[tmp3],                15            \n\t"
+    "beq        %[tmp2],             %[tmp4],                8f            \n\t"
+    " ori       %[tmp4],             $zero,                  0x7fff        \n\t"
+    "xor        %[tmp3],             %[tmp4],                %[tmp2]       \n\t"
+   "8:                                                                     \n\t"
+#endif  // #if defined(MIPS_DSP_R1_LE)
+    "sh         %[tmp1],             0(%[paecm_buf])                       \n\t"
+    "sh         %[tmp3],             2(%[paecm_buf])                       \n\t"
+    "addiu      %[output1],          %[output1],             4             \n\t"
+    "addiu      %[paecm_buf],        %[paecm_buf],           4             \n\t"
+    "addiu      %[pfft],             %[pfft],                4             \n\t"
+    "addiu      %[p_kSqrtHanning],   %[p_kSqrtHanning],      4             \n\t"
+    "bgtz       %[i],                11b                                   \n\t"
+    " addiu     %[pp_kSqrtHanning],  %[pp_kSqrtHanning],     -4            \n\t"
+    ".set       pop                                                        \n\t"
+    : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [pfft] "+r" (pfft),
+      [output1] "+r" (output1), [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4),
+      [paecm_buf] "+r" (paecm_buf), [i] "=&r" (i),
+      [pp_kSqrtHanning] "+r" (pp_kSqrtHanning),
+      [p_kSqrtHanning] "+r" (p_kSqrtHanning)
+    : [out_aecm] "r" (out_aecm),
+      [WebRtcAecm_kSqrtHanning] "r" (WebRtcAecm_kSqrtHanning)
+    : "hi", "lo","memory"
+  );
+
+  // Copy the current block to the old position
+  // (aecm->outBuf is shifted elsewhere)
+  memcpy(aecm->xBuf, aecm->xBuf + PART_LEN, sizeof(int16_t) * PART_LEN);
+  memcpy(aecm->dBufNoisy,
+         aecm->dBufNoisy + PART_LEN,
+         sizeof(int16_t) * PART_LEN);
+  if (nearendClean != NULL) {
+    memcpy(aecm->dBufClean,
+           aecm->dBufClean + PART_LEN,
+           sizeof(int16_t) * PART_LEN);
+  }
+}
+
+void WebRtcAecm_CalcLinearEnergies_mips(AecmCore* aecm,
+                                        const uint16_t* far_spectrum,
+                                        int32_t* echo_est,
+                                        uint32_t* far_energy,
+                                        uint32_t* echo_energy_adapt,
+                                        uint32_t* echo_energy_stored) {
+  int i;
+  uint32_t par1 = (*far_energy);
+  uint32_t par2 = (*echo_energy_adapt);
+  uint32_t par3 = (*echo_energy_stored);
+  int16_t* ch_stored_p = &(aecm->channelStored[0]);
+  int16_t* ch_adapt_p = &(aecm->channelAdapt16[0]);
+  uint16_t* spectrum_p = (uint16_t*)(&(far_spectrum[0]));
+  int32_t* echo_p = &(echo_est[0]);
+  int32_t temp0, stored0, echo0, adept0, spectrum0;
+  int32_t stored1, adept1, spectrum1, echo1, temp1;
+
+  // Get energy for the delayed far end signal and estimated
+  // echo using both stored and adapted channels.
+  for (i = 0; i < PART_LEN; i+= 4) {
+    __asm __volatile (
+      ".set           push                                            \n\t"
+      ".set           noreorder                                       \n\t"
+      "lh             %[stored0],     0(%[ch_stored_p])               \n\t"
+      "lhu            %[adept0],      0(%[ch_adapt_p])                \n\t"
+      "lhu            %[spectrum0],   0(%[spectrum_p])                \n\t"
+      "lh             %[stored1],     2(%[ch_stored_p])               \n\t"
+      "lhu            %[adept1],      2(%[ch_adapt_p])                \n\t"
+      "lhu            %[spectrum1],   2(%[spectrum_p])                \n\t"
+      "mul            %[echo0],       %[stored0],     %[spectrum0]    \n\t"
+      "mul            %[temp0],       %[adept0],      %[spectrum0]    \n\t"
+      "mul            %[echo1],       %[stored1],     %[spectrum1]    \n\t"
+      "mul            %[temp1],       %[adept1],      %[spectrum1]    \n\t"
+      "addu           %[par1],        %[par1],        %[spectrum0]    \n\t"
+      "addu           %[par1],        %[par1],        %[spectrum1]    \n\t"
+      "addiu          %[echo_p],      %[echo_p],      16              \n\t"
+      "addu           %[par3],        %[par3],        %[echo0]        \n\t"
+      "addu           %[par2],        %[par2],        %[temp0]        \n\t"
+      "addu           %[par3],        %[par3],        %[echo1]        \n\t"
+      "addu           %[par2],        %[par2],        %[temp1]        \n\t"
+      "usw            %[echo0],       -16(%[echo_p])                  \n\t"
+      "usw            %[echo1],       -12(%[echo_p])                  \n\t"
+      "lh             %[stored0],     4(%[ch_stored_p])               \n\t"
+      "lhu            %[adept0],      4(%[ch_adapt_p])                \n\t"
+      "lhu            %[spectrum0],   4(%[spectrum_p])                \n\t"
+      "lh             %[stored1],     6(%[ch_stored_p])               \n\t"
+      "lhu            %[adept1],      6(%[ch_adapt_p])                \n\t"
+      "lhu            %[spectrum1],   6(%[spectrum_p])                \n\t"
+      "mul            %[echo0],       %[stored0],     %[spectrum0]    \n\t"
+      "mul            %[temp0],       %[adept0],      %[spectrum0]    \n\t"
+      "mul            %[echo1],       %[stored1],     %[spectrum1]    \n\t"
+      "mul            %[temp1],       %[adept1],      %[spectrum1]    \n\t"
+      "addu           %[par1],        %[par1],        %[spectrum0]    \n\t"
+      "addu           %[par1],        %[par1],        %[spectrum1]    \n\t"
+      "addiu          %[ch_stored_p], %[ch_stored_p], 8               \n\t"
+      "addiu          %[ch_adapt_p],  %[ch_adapt_p],  8               \n\t"
+      "addiu          %[spectrum_p],  %[spectrum_p],  8               \n\t"
+      "addu           %[par3],        %[par3],        %[echo0]        \n\t"
+      "addu           %[par2],        %[par2],        %[temp0]        \n\t"
+      "addu           %[par3],        %[par3],        %[echo1]        \n\t"
+      "addu           %[par2],        %[par2],        %[temp1]        \n\t"
+      "usw            %[echo0],       -8(%[echo_p])                   \n\t"
+      "usw            %[echo1],       -4(%[echo_p])                   \n\t"
+      ".set           pop                                             \n\t"
+      : [temp0] "=&r" (temp0), [stored0] "=&r" (stored0),
+        [adept0] "=&r" (adept0), [spectrum0] "=&r" (spectrum0),
+        [echo0] "=&r" (echo0), [echo_p] "+r" (echo_p), [par3] "+r" (par3),
+        [par1] "+r" (par1), [par2] "+r" (par2), [stored1] "=&r" (stored1),
+        [adept1] "=&r" (adept1), [echo1] "=&r" (echo1),
+        [spectrum1] "=&r" (spectrum1), [temp1] "=&r" (temp1),
+        [ch_stored_p] "+r" (ch_stored_p), [ch_adapt_p] "+r" (ch_adapt_p),
+        [spectrum_p] "+r" (spectrum_p)
+      :
+      : "hi", "lo", "memory"
+    );
+  }
+
+  echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN],
+                                             far_spectrum[PART_LEN]);
+  par1 += (uint32_t)(far_spectrum[PART_LEN]);
+  par2 += aecm->channelAdapt16[PART_LEN] * far_spectrum[PART_LEN];
+  par3 += (uint32_t)echo_est[PART_LEN];
+
+  (*far_energy) = par1;
+  (*echo_energy_adapt) = par2;
+  (*echo_energy_stored) = par3;
+}
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcAecm_StoreAdaptiveChannel_mips(AecmCore* aecm,
+                                          const uint16_t* far_spectrum,
+                                          int32_t* echo_est) {
+  int i;
+  int16_t* temp1;
+  uint16_t* temp8;
+  int32_t temp0, temp2, temp3, temp4, temp5, temp6;
+  int32_t* temp7 = &(echo_est[0]);
+  temp1 = &(aecm->channelStored[0]);
+  temp8 = (uint16_t*)(&far_spectrum[0]);
+
+  // During startup we store the channel every block.
+  memcpy(aecm->channelStored, aecm->channelAdapt16,
+         sizeof(int16_t) * PART_LEN1);
+  // Recalculate echo estimate
+  for (i = 0; i < PART_LEN; i += 4) {
+    __asm __volatile (
+      "ulw            %[temp0],   0(%[temp8])               \n\t"
+      "ulw            %[temp2],   0(%[temp1])               \n\t"
+      "ulw            %[temp4],   4(%[temp8])               \n\t"
+      "ulw            %[temp5],   4(%[temp1])               \n\t"
+      "muleq_s.w.phl  %[temp3],   %[temp2],     %[temp0]    \n\t"
+      "muleq_s.w.phr  %[temp0],   %[temp2],     %[temp0]    \n\t"
+      "muleq_s.w.phl  %[temp6],   %[temp5],     %[temp4]    \n\t"
+      "muleq_s.w.phr  %[temp4],   %[temp5],     %[temp4]    \n\t"
+      "addiu          %[temp7],   %[temp7],     16          \n\t"
+      "addiu          %[temp1],   %[temp1],     8           \n\t"
+      "addiu          %[temp8],   %[temp8],     8           \n\t"
+      "sra            %[temp3],   %[temp3],     1           \n\t"
+      "sra            %[temp0],   %[temp0],     1           \n\t"
+      "sra            %[temp6],   %[temp6],     1           \n\t"
+      "sra            %[temp4],   %[temp4],     1           \n\t"
+      "usw            %[temp3],   -12(%[temp7])             \n\t"
+      "usw            %[temp0],   -16(%[temp7])             \n\t"
+      "usw            %[temp6],   -4(%[temp7])              \n\t"
+      "usw            %[temp4],   -8(%[temp7])              \n\t"
+      : [temp0] "=&r" (temp0), [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
+        [temp4] "=&r" (temp4), [temp5] "=&r" (temp5), [temp6] "=&r" (temp6),
+        [temp1] "+r" (temp1), [temp8] "+r" (temp8), [temp7] "+r" (temp7)
+      :
+      : "hi", "lo", "memory"
+    );
+  }
+  echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+                                      far_spectrum[i]);
+}
+
+void WebRtcAecm_ResetAdaptiveChannel_mips(AecmCore* aecm) {
+  int i;
+  int32_t* temp3;
+  int16_t* temp0;
+  int32_t temp1, temp2, temp4, temp5;
+
+  temp0 = &(aecm->channelStored[0]);
+  temp3 = &(aecm->channelAdapt32[0]);
+
+  // The stored channel has a significantly lower MSE than the adaptive one for
+  // two consecutive calculations. Reset the adaptive channel.
+  memcpy(aecm->channelAdapt16,
+         aecm->channelStored,
+         sizeof(int16_t) * PART_LEN1);
+
+  // Restore the W32 channel
+  for (i = 0; i < PART_LEN; i += 4) {
+    __asm __volatile (
+      "ulw            %[temp1], 0(%[temp0])           \n\t"
+      "ulw            %[temp4], 4(%[temp0])           \n\t"
+      "preceq.w.phl   %[temp2], %[temp1]              \n\t"
+      "preceq.w.phr   %[temp1], %[temp1]              \n\t"
+      "preceq.w.phl   %[temp5], %[temp4]              \n\t"
+      "preceq.w.phr   %[temp4], %[temp4]              \n\t"
+      "addiu          %[temp0], %[temp0], 8           \n\t"
+      "usw            %[temp2], 4(%[temp3])           \n\t"
+      "usw            %[temp1], 0(%[temp3])           \n\t"
+      "usw            %[temp5], 12(%[temp3])          \n\t"
+      "usw            %[temp4], 8(%[temp3])           \n\t"
+      "addiu          %[temp3], %[temp3], 16          \n\t"
+      : [temp1] "=&r" (temp1), [temp2] "=&r" (temp2),
+        [temp4] "=&r" (temp4), [temp5] "=&r" (temp5),
+        [temp3] "+r" (temp3), [temp0] "+r" (temp0)
+      :
+      : "memory"
+    );
+  }
+
+  aecm->channelAdapt32[i] = (int32_t)aecm->channelStored[i] << 16;
+}
+#endif  // #if defined(MIPS_DSP_R1_LE)
+
+// Transforms a time domain signal into the frequency domain, outputting the
+// complex valued signal, absolute value and sum of absolute values.
+//
+// time_signal          [in]    Pointer to time domain signal
+// freq_signal_real     [out]   Pointer to real part of frequency domain array
+// freq_signal_imag     [out]   Pointer to imaginary part of frequency domain
+//                              array
+// freq_signal_abs      [out]   Pointer to absolute value of frequency domain
+//                              array
+// freq_signal_sum_abs  [out]   Pointer to the sum of all absolute values in
+//                              the frequency domain array
+// return value                 The Q-domain of current frequency values
+//
+static int TimeToFrequencyDomain(AecmCore* aecm,
+                                 const int16_t* time_signal,
+                                 ComplexInt16* freq_signal,
+                                 uint16_t* freq_signal_abs,
+                                 uint32_t* freq_signal_sum_abs) {
+  int i = 0;
+  int time_signal_scaling = 0;
+
+  // In fft_buf, +16 for 32-byte alignment.
+  int16_t fft_buf[PART_LEN4 + 16];
+  int16_t *fft = (int16_t *) (((uintptr_t) fft_buf + 31) & ~31);
+
+  int16_t tmp16no1;
+#if !defined(MIPS_DSP_R2_LE)
+  int32_t tmp32no1;
+  int32_t tmp32no2;
+  int16_t tmp16no2;
+#else
+  int32_t tmp32no10, tmp32no11, tmp32no12, tmp32no13;
+  int32_t tmp32no20, tmp32no21, tmp32no22, tmp32no23;
+  int16_t* freqp;
+  uint16_t* freqabsp;
+  uint32_t freqt0, freqt1, freqt2, freqt3;
+  uint32_t freqs;
+#endif
+
+#ifdef AECM_DYNAMIC_Q
+  tmp16no1 = WebRtcSpl_MaxAbsValueW16(time_signal, PART_LEN2);
+  time_signal_scaling = WebRtcSpl_NormW16(tmp16no1);
+#endif
+
+  WindowAndFFT(aecm, fft, time_signal, freq_signal, time_signal_scaling);
+
+  // Extract imaginary and real part,
+  // calculate the magnitude for all frequency bins
+  freq_signal[0].imag = 0;
+  freq_signal[PART_LEN].imag = 0;
+  freq_signal[PART_LEN].real = fft[PART_LEN2];
+  freq_signal_abs[0] = (uint16_t)WEBRTC_SPL_ABS_W16(freq_signal[0].real);
+  freq_signal_abs[PART_LEN] = (uint16_t)WEBRTC_SPL_ABS_W16(
+    freq_signal[PART_LEN].real);
+  (*freq_signal_sum_abs) = (uint32_t)(freq_signal_abs[0]) +
+    (uint32_t)(freq_signal_abs[PART_LEN]);
+
+#if !defined(MIPS_DSP_R2_LE)
+  for (i = 1; i < PART_LEN; i++) {
+    if (freq_signal[i].real == 0)
+    {
+      freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(
+        freq_signal[i].imag);
+    }
+    else if (freq_signal[i].imag == 0)
+    {
+      freq_signal_abs[i] = (uint16_t)WEBRTC_SPL_ABS_W16(
+        freq_signal[i].real);
+    }
+    else
+    {
+      // Approximation for magnitude of complex fft output
+      // magn = sqrt(real^2 + imag^2)
+      // magn ~= alpha * max(|imag|,|real|) + beta * min(|imag|,|real|)
+      //
+      // The parameters alpha and beta are stored in Q15
+      tmp16no1 = WEBRTC_SPL_ABS_W16(freq_signal[i].real);
+      tmp16no2 = WEBRTC_SPL_ABS_W16(freq_signal[i].imag);
+      tmp32no1 = tmp16no1 * tmp16no1;
+      tmp32no2 = tmp16no2 * tmp16no2;
+      tmp32no2 = WebRtcSpl_AddSatW32(tmp32no1, tmp32no2);
+      tmp32no1 = WebRtcSpl_SqrtFloor(tmp32no2);
+
+      freq_signal_abs[i] = (uint16_t)tmp32no1;
+    }
+    (*freq_signal_sum_abs) += (uint32_t)freq_signal_abs[i];
+  }
+#else // #if !defined(MIPS_DSP_R2_LE)
+  freqs = (uint32_t)(freq_signal_abs[0]) +
+          (uint32_t)(freq_signal_abs[PART_LEN]);
+  freqp = &(freq_signal[1].real);
+
+  __asm __volatile (
+    "lw             %[freqt0],      0(%[freqp])             \n\t"
+    "lw             %[freqt1],      4(%[freqp])             \n\t"
+    "lw             %[freqt2],      8(%[freqp])             \n\t"
+    "mult           $ac0,           $zero,      $zero       \n\t"
+    "mult           $ac1,           $zero,      $zero       \n\t"
+    "mult           $ac2,           $zero,      $zero       \n\t"
+    "dpaq_s.w.ph    $ac0,           %[freqt0],  %[freqt0]   \n\t"
+    "dpaq_s.w.ph    $ac1,           %[freqt1],  %[freqt1]   \n\t"
+    "dpaq_s.w.ph    $ac2,           %[freqt2],  %[freqt2]   \n\t"
+    "addiu          %[freqp],       %[freqp],   12          \n\t"
+    "extr.w         %[tmp32no20],   $ac0,       1           \n\t"
+    "extr.w         %[tmp32no21],   $ac1,       1           \n\t"
+    "extr.w         %[tmp32no22],   $ac2,       1           \n\t"
+    : [freqt0] "=&r" (freqt0), [freqt1] "=&r" (freqt1),
+      [freqt2] "=&r" (freqt2), [freqp] "+r" (freqp),
+      [tmp32no20] "=r" (tmp32no20), [tmp32no21] "=r" (tmp32no21),
+      [tmp32no22] "=r" (tmp32no22)
+    :
+    : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo"
+  );
+
+  tmp32no10 = WebRtcSpl_SqrtFloor(tmp32no20);
+  tmp32no11 = WebRtcSpl_SqrtFloor(tmp32no21);
+  tmp32no12 = WebRtcSpl_SqrtFloor(tmp32no22);
+  freq_signal_abs[1] = (uint16_t)tmp32no10;
+  freq_signal_abs[2] = (uint16_t)tmp32no11;
+  freq_signal_abs[3] = (uint16_t)tmp32no12;
+  freqs += (uint32_t)tmp32no10;
+  freqs += (uint32_t)tmp32no11;
+  freqs += (uint32_t)tmp32no12;
+  freqabsp = &(freq_signal_abs[4]);
+  for (i = 4; i < PART_LEN; i+=4)
+  {
+    __asm __volatile (
+      "ulw            %[freqt0],      0(%[freqp])                 \n\t"
+      "ulw            %[freqt1],      4(%[freqp])                 \n\t"
+      "ulw            %[freqt2],      8(%[freqp])                 \n\t"
+      "ulw            %[freqt3],      12(%[freqp])                \n\t"
+      "mult           $ac0,           $zero,          $zero       \n\t"
+      "mult           $ac1,           $zero,          $zero       \n\t"
+      "mult           $ac2,           $zero,          $zero       \n\t"
+      "mult           $ac3,           $zero,          $zero       \n\t"
+      "dpaq_s.w.ph    $ac0,           %[freqt0],      %[freqt0]   \n\t"
+      "dpaq_s.w.ph    $ac1,           %[freqt1],      %[freqt1]   \n\t"
+      "dpaq_s.w.ph    $ac2,           %[freqt2],      %[freqt2]   \n\t"
+      "dpaq_s.w.ph    $ac3,           %[freqt3],      %[freqt3]   \n\t"
+      "addiu          %[freqp],       %[freqp],       16          \n\t"
+      "addiu          %[freqabsp],    %[freqabsp],    8           \n\t"
+      "extr.w         %[tmp32no20],   $ac0,           1           \n\t"
+      "extr.w         %[tmp32no21],   $ac1,           1           \n\t"
+      "extr.w         %[tmp32no22],   $ac2,           1           \n\t"
+      "extr.w         %[tmp32no23],   $ac3,           1           \n\t"
+      : [freqt0] "=&r" (freqt0), [freqt1] "=&r" (freqt1),
+        [freqt2] "=&r" (freqt2), [freqt3] "=&r" (freqt3),
+        [tmp32no20] "=r" (tmp32no20), [tmp32no21] "=r" (tmp32no21),
+        [tmp32no22] "=r" (tmp32no22), [tmp32no23] "=r" (tmp32no23),
+        [freqabsp] "+r" (freqabsp), [freqp] "+r" (freqp)
+      :
+      : "memory", "hi", "lo", "$ac1hi", "$ac1lo",
+        "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+    );
+
+    tmp32no10 = WebRtcSpl_SqrtFloor(tmp32no20);
+    tmp32no11 = WebRtcSpl_SqrtFloor(tmp32no21);
+    tmp32no12 = WebRtcSpl_SqrtFloor(tmp32no22);
+    tmp32no13 = WebRtcSpl_SqrtFloor(tmp32no23);
+
+    __asm __volatile (
+      "sh             %[tmp32no10],   -8(%[freqabsp])                 \n\t"
+      "sh             %[tmp32no11],   -6(%[freqabsp])                 \n\t"
+      "sh             %[tmp32no12],   -4(%[freqabsp])                 \n\t"
+      "sh             %[tmp32no13],   -2(%[freqabsp])                 \n\t"
+      "addu           %[freqs],       %[freqs],       %[tmp32no10]    \n\t"
+      "addu           %[freqs],       %[freqs],       %[tmp32no11]    \n\t"
+      "addu           %[freqs],       %[freqs],       %[tmp32no12]    \n\t"
+      "addu           %[freqs],       %[freqs],       %[tmp32no13]    \n\t"
+      : [freqs] "+r" (freqs)
+      : [tmp32no10] "r" (tmp32no10), [tmp32no11] "r" (tmp32no11),
+        [tmp32no12] "r" (tmp32no12), [tmp32no13] "r" (tmp32no13),
+        [freqabsp] "r" (freqabsp)
+      : "memory"
+    );
+  }
+
+  (*freq_signal_sum_abs) = freqs;
+#endif
+
+  return time_signal_scaling;
+}
+
+int WebRtcAecm_ProcessBlock(AecmCore* aecm,
+                            const int16_t* farend,
+                            const int16_t* nearendNoisy,
+                            const int16_t* nearendClean,
+                            int16_t* output) {
+  int i;
+  uint32_t xfaSum;
+  uint32_t dfaNoisySum;
+  uint32_t dfaCleanSum;
+  uint32_t echoEst32Gained;
+  uint32_t tmpU32;
+  int32_t tmp32no1;
+
+  uint16_t xfa[PART_LEN1];
+  uint16_t dfaNoisy[PART_LEN1];
+  uint16_t dfaClean[PART_LEN1];
+  uint16_t* ptrDfaClean = dfaClean;
+  const uint16_t* far_spectrum_ptr = NULL;
+
+  // 32 byte aligned buffers (with +8 or +16).
+  int16_t fft_buf[PART_LEN4 + 2 + 16]; // +2 to make a loop safe.
+  int32_t echoEst32_buf[PART_LEN1 + 8];
+  int32_t dfw_buf[PART_LEN2 + 8];
+  int32_t efw_buf[PART_LEN2 + 8];
+
+  int16_t* fft = (int16_t*)(((uint32_t)fft_buf + 31) & ~ 31);
+  int32_t* echoEst32 = (int32_t*)(((uint32_t)echoEst32_buf + 31) & ~ 31);
+  ComplexInt16* dfw = (ComplexInt16*)(((uint32_t)dfw_buf + 31) & ~31);
+  ComplexInt16* efw = (ComplexInt16*)(((uint32_t)efw_buf + 31) & ~31);
+
+  int16_t hnl[PART_LEN1];
+  int16_t numPosCoef = 0;
+  int delay;
+  int16_t tmp16no1;
+  int16_t tmp16no2;
+  int16_t mu;
+  int16_t supGain;
+  int16_t zeros32, zeros16;
+  int16_t zerosDBufNoisy, zerosDBufClean, zerosXBuf;
+  int far_q;
+  int16_t resolutionDiff, qDomainDiff, dfa_clean_q_domain_diff;
+
+  const int kMinPrefBand = 4;
+  const int kMaxPrefBand = 24;
+  int32_t avgHnl32 = 0;
+
+  int32_t temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
+  int16_t* ptr;
+  int16_t* ptr1;
+  int16_t* er_ptr;
+  int16_t* dr_ptr;
+
+  ptr = &hnl[0];
+  ptr1 = &hnl[0];
+  er_ptr = &efw[0].real;
+  dr_ptr = &dfw[0].real;
+
+  // Determine startup state. There are three states:
+  // (0) the first CONV_LEN blocks
+  // (1) another CONV_LEN blocks
+  // (2) the rest
+
+  if (aecm->startupState < 2) {
+    aecm->startupState = (aecm->totCount >= CONV_LEN) +
+                         (aecm->totCount >= CONV_LEN2);
+  }
+  // END: Determine startup state
+
+  // Buffer near and far end signals
+  memcpy(aecm->xBuf + PART_LEN, farend, sizeof(int16_t) * PART_LEN);
+  memcpy(aecm->dBufNoisy + PART_LEN,
+         nearendNoisy,
+         sizeof(int16_t) * PART_LEN);
+  if (nearendClean != NULL) {
+    memcpy(aecm->dBufClean + PART_LEN,
+           nearendClean,
+           sizeof(int16_t) * PART_LEN);
+  }
+
+  // Transform far end signal from time domain to frequency domain.
+  far_q = TimeToFrequencyDomain(aecm,
+                                aecm->xBuf,
+                                dfw,
+                                xfa,
+                                &xfaSum);
+
+  // Transform noisy near end signal from time domain to frequency domain.
+  zerosDBufNoisy = TimeToFrequencyDomain(aecm,
+                                         aecm->dBufNoisy,
+                                         dfw,
+                                         dfaNoisy,
+                                         &dfaNoisySum);
+  aecm->dfaNoisyQDomainOld = aecm->dfaNoisyQDomain;
+  aecm->dfaNoisyQDomain = (int16_t)zerosDBufNoisy;
+
+  if (nearendClean == NULL) {
+    ptrDfaClean = dfaNoisy;
+    aecm->dfaCleanQDomainOld = aecm->dfaNoisyQDomainOld;
+    aecm->dfaCleanQDomain = aecm->dfaNoisyQDomain;
+    dfaCleanSum = dfaNoisySum;
+  } else {
+    // Transform clean near end signal from time domain to frequency domain.
+    zerosDBufClean = TimeToFrequencyDomain(aecm,
+                                           aecm->dBufClean,
+                                           dfw,
+                                           dfaClean,
+                                           &dfaCleanSum);
+    aecm->dfaCleanQDomainOld = aecm->dfaCleanQDomain;
+    aecm->dfaCleanQDomain = (int16_t)zerosDBufClean;
+  }
+
+  // Get the delay
+  // Save far-end history and estimate delay
+  WebRtcAecm_UpdateFarHistory(aecm, xfa, far_q);
+
+  if (WebRtc_AddFarSpectrumFix(aecm->delay_estimator_farend, xfa, PART_LEN1,
+                               far_q) == -1) {
+    return -1;
+  }
+  delay = WebRtc_DelayEstimatorProcessFix(aecm->delay_estimator,
+                                          dfaNoisy,
+                                          PART_LEN1,
+                                          zerosDBufNoisy);
+  if (delay == -1) {
+    return -1;
+  }
+  else if (delay == -2) {
+    // If the delay is unknown, we assume zero.
+    // NOTE: this will have to be adjusted if we ever add lookahead.
+    delay = 0;
+  }
+
+  if (aecm->fixedDelay >= 0) {
+    // Use fixed delay
+    delay = aecm->fixedDelay;
+  }
+
+  // Get aligned far end spectrum
+  far_spectrum_ptr = WebRtcAecm_AlignedFarend(aecm, &far_q, delay);
+  zerosXBuf = (int16_t) far_q;
+
+  if (far_spectrum_ptr == NULL) {
+    return -1;
+  }
+
+  // Calculate log(energy) and update energy threshold levels
+  WebRtcAecm_CalcEnergies(aecm,
+                          far_spectrum_ptr,
+                          zerosXBuf,
+                          dfaNoisySum,
+                          echoEst32);
+  // Calculate stepsize
+  mu = WebRtcAecm_CalcStepSize(aecm);
+
+  // Update counters
+  aecm->totCount++;
+
+  // This is the channel estimation algorithm.
+  // It is base on NLMS but has a variable step length,
+  // which was calculated above.
+  WebRtcAecm_UpdateChannel(aecm,
+                           far_spectrum_ptr,
+                           zerosXBuf,
+                           dfaNoisy,
+                           mu,
+                           echoEst32);
+
+  supGain = WebRtcAecm_CalcSuppressionGain(aecm);
+
+  // Calculate Wiener filter hnl[]
+  for (i = 0; i < PART_LEN1; i++) {
+    // Far end signal through channel estimate in Q8
+    // How much can we shift right to preserve resolution
+    tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
+    aecm->echoFilt[i] +=
+        rtc::dchecked_cast<int32_t>((int64_t{tmp32no1} * 50) >> 8);
+
+    zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
+    zeros16 = WebRtcSpl_NormW16(supGain) + 1;
+    if (zeros32 + zeros16 > 16) {
+      // Multiplication is safe
+      // Result in
+      // Q(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN+aecm->xfaQDomainBuf[diff])
+      echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i],
+                                              (uint16_t)supGain);
+      resolutionDiff = 14 - RESOLUTION_CHANNEL16 - RESOLUTION_SUPGAIN;
+      resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+    } else {
+      tmp16no1 = 17 - zeros32 - zeros16;
+      resolutionDiff = 14 + tmp16no1 - RESOLUTION_CHANNEL16 -
+                       RESOLUTION_SUPGAIN;
+      resolutionDiff += (aecm->dfaCleanQDomain - zerosXBuf);
+      if (zeros32 > tmp16no1) {
+        echoEst32Gained = WEBRTC_SPL_UMUL_32_16(
+                            (uint32_t)aecm->echoFilt[i],
+                            supGain >> tmp16no1);
+      } else {
+        // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16)
+        echoEst32Gained = (aecm->echoFilt[i] >> tmp16no1) * supGain;
+      }
+    }
+
+    zeros16 = WebRtcSpl_NormW16(aecm->nearFilt[i]);
+    RTC_DCHECK_GE(zeros16, 0);  // |zeros16| is a norm, hence non-negative.
+    dfa_clean_q_domain_diff = aecm->dfaCleanQDomain - aecm->dfaCleanQDomainOld;
+    if (zeros16 < dfa_clean_q_domain_diff && aecm->nearFilt[i]) {
+      tmp16no1 = aecm->nearFilt[i] << zeros16;
+      qDomainDiff = zeros16 - dfa_clean_q_domain_diff;
+      tmp16no2 = ptrDfaClean[i] >> -qDomainDiff;
+    } else {
+      tmp16no1 = dfa_clean_q_domain_diff < 0
+          ? aecm->nearFilt[i] >> -dfa_clean_q_domain_diff
+          : aecm->nearFilt[i] << dfa_clean_q_domain_diff;
+      qDomainDiff = 0;
+      tmp16no2 = ptrDfaClean[i];
+    }
+
+    tmp32no1 = (int32_t)(tmp16no2 - tmp16no1);
+    tmp16no2 = (int16_t)(tmp32no1 >> 4);
+    tmp16no2 += tmp16no1;
+    zeros16 = WebRtcSpl_NormW16(tmp16no2);
+    if ((tmp16no2) & (-qDomainDiff > zeros16)) {
+      aecm->nearFilt[i] = WEBRTC_SPL_WORD16_MAX;
+    } else {
+      aecm->nearFilt[i] = qDomainDiff < 0 ? tmp16no2 << -qDomainDiff
+                                          : tmp16no2 >> qDomainDiff;
+    }
+
+    // Wiener filter coefficients, resulting hnl in Q14
+    if (echoEst32Gained == 0) {
+      hnl[i] = ONE_Q14;
+      numPosCoef++;
+    } else if (aecm->nearFilt[i] == 0) {
+      hnl[i] = 0;
+    } else {
+      // Multiply the suppression gain
+      // Rounding
+      echoEst32Gained += (uint32_t)(aecm->nearFilt[i] >> 1);
+      tmpU32 = WebRtcSpl_DivU32U16(echoEst32Gained,
+                                   (uint16_t)aecm->nearFilt[i]);
+
+      // Current resolution is
+      // Q-(RESOLUTION_CHANNEL + RESOLUTION_SUPGAIN
+      //    - max(0, 17 - zeros16 - zeros32))
+      // Make sure we are in Q14
+      tmp32no1 = (int32_t)WEBRTC_SPL_SHIFT_W32(tmpU32, resolutionDiff);
+      if (tmp32no1 > ONE_Q14) {
+        hnl[i] = 0;
+      } else if (tmp32no1 < 0) {
+        hnl[i] = ONE_Q14;
+        numPosCoef++;
+      } else {
+        // 1-echoEst/dfa
+        hnl[i] = ONE_Q14 - (int16_t)tmp32no1;
+        if (hnl[i] <= 0) {
+          hnl[i] = 0;
+        } else {
+          numPosCoef++;
+        }
+      }
+    }
+  }
+
+  // Only in wideband. Prevent the gain in upper band from being larger than
+  // in lower band.
+  if (aecm->mult == 2) {
+    // TODO(bjornv): Investigate if the scaling of hnl[i] below can cause
+    //               speech distortion in double-talk.
+    for (i = 0; i < (PART_LEN1 >> 3); i++) {
+      __asm __volatile (
+        "lh         %[temp1],       0(%[ptr1])                  \n\t"
+        "lh         %[temp2],       2(%[ptr1])                  \n\t"
+        "lh         %[temp3],       4(%[ptr1])                  \n\t"
+        "lh         %[temp4],       6(%[ptr1])                  \n\t"
+        "lh         %[temp5],       8(%[ptr1])                  \n\t"
+        "lh         %[temp6],       10(%[ptr1])                 \n\t"
+        "lh         %[temp7],       12(%[ptr1])                 \n\t"
+        "lh         %[temp8],       14(%[ptr1])                 \n\t"
+        "mul        %[temp1],       %[temp1],       %[temp1]    \n\t"
+        "mul        %[temp2],       %[temp2],       %[temp2]    \n\t"
+        "mul        %[temp3],       %[temp3],       %[temp3]    \n\t"
+        "mul        %[temp4],       %[temp4],       %[temp4]    \n\t"
+        "mul        %[temp5],       %[temp5],       %[temp5]    \n\t"
+        "mul        %[temp6],       %[temp6],       %[temp6]    \n\t"
+        "mul        %[temp7],       %[temp7],       %[temp7]    \n\t"
+        "mul        %[temp8],       %[temp8],       %[temp8]    \n\t"
+        "sra        %[temp1],       %[temp1],       14          \n\t"
+        "sra        %[temp2],       %[temp2],       14          \n\t"
+        "sra        %[temp3],       %[temp3],       14          \n\t"
+        "sra        %[temp4],       %[temp4],       14          \n\t"
+        "sra        %[temp5],       %[temp5],       14          \n\t"
+        "sra        %[temp6],       %[temp6],       14          \n\t"
+        "sra        %[temp7],       %[temp7],       14          \n\t"
+        "sra        %[temp8],       %[temp8],       14          \n\t"
+        "sh         %[temp1],       0(%[ptr1])                  \n\t"
+        "sh         %[temp2],       2(%[ptr1])                  \n\t"
+        "sh         %[temp3],       4(%[ptr1])                  \n\t"
+        "sh         %[temp4],       6(%[ptr1])                  \n\t"
+        "sh         %[temp5],       8(%[ptr1])                  \n\t"
+        "sh         %[temp6],       10(%[ptr1])                 \n\t"
+        "sh         %[temp7],       12(%[ptr1])                 \n\t"
+        "sh         %[temp8],       14(%[ptr1])                 \n\t"
+        "addiu      %[ptr1],        %[ptr1],        16          \n\t"
+        : [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
+          [temp4] "=&r" (temp4), [temp5] "=&r" (temp5), [temp6] "=&r" (temp6),
+          [temp7] "=&r" (temp7), [temp8] "=&r" (temp8), [ptr1] "+r" (ptr1)
+        :
+        : "memory", "hi", "lo"
+      );
+    }
+    for(i = 0; i < (PART_LEN1 & 7); i++) {
+      __asm __volatile (
+        "lh         %[temp1],       0(%[ptr1])                  \n\t"
+        "mul        %[temp1],       %[temp1],       %[temp1]    \n\t"
+        "sra        %[temp1],       %[temp1],       14          \n\t"
+        "sh         %[temp1],       0(%[ptr1])                  \n\t"
+        "addiu      %[ptr1],        %[ptr1],        2           \n\t"
+        : [temp1] "=&r" (temp1), [ptr1] "+r" (ptr1)
+        :
+        : "memory", "hi", "lo"
+      );
+    }
+
+    for (i = kMinPrefBand; i <= kMaxPrefBand; i++) {
+      avgHnl32 += (int32_t)hnl[i];
+    }
+
+    RTC_DCHECK_GT(kMaxPrefBand - kMinPrefBand + 1, 0);
+    avgHnl32 /= (kMaxPrefBand - kMinPrefBand + 1);
+
+    for (i = kMaxPrefBand; i < PART_LEN1; i++) {
+      if (hnl[i] > (int16_t)avgHnl32) {
+        hnl[i] = (int16_t)avgHnl32;
+      }
+    }
+  }
+
+  // Calculate NLP gain, result is in Q14
+  if (aecm->nlpFlag) {
+    if (numPosCoef < 3) {
+      for (i = 0; i < PART_LEN1; i++) {
+        efw[i].real = 0;
+        efw[i].imag = 0;
+        hnl[i] = 0;
+      }
+    } else {
+      for (i = 0; i < PART_LEN1; i++) {
+#if defined(MIPS_DSP_R1_LE)
+        __asm __volatile (
+          ".set       push                                        \n\t"
+          ".set       noreorder                                   \n\t"
+          "lh         %[temp1],       0(%[ptr])                   \n\t"
+          "lh         %[temp2],       0(%[dr_ptr])                \n\t"
+          "slti       %[temp4],       %[temp1],       0x4001      \n\t"
+          "beqz       %[temp4],       3f                          \n\t"
+          " lh        %[temp3],       2(%[dr_ptr])                \n\t"
+          "slti       %[temp5],       %[temp1],       3277        \n\t"
+          "bnez       %[temp5],       2f                          \n\t"
+          " addiu     %[dr_ptr],      %[dr_ptr],      4           \n\t"
+          "mul        %[temp2],       %[temp2],       %[temp1]    \n\t"
+          "mul        %[temp3],       %[temp3],       %[temp1]    \n\t"
+          "shra_r.w   %[temp2],       %[temp2],       14          \n\t"
+          "shra_r.w   %[temp3],       %[temp3],       14          \n\t"
+          "b          4f                                          \n\t"
+          " nop                                                   \n\t"
+         "2:                                                      \n\t"
+          "addu       %[temp1],       $zero,          $zero       \n\t"
+          "addu       %[temp2],       $zero,          $zero       \n\t"
+          "addu       %[temp3],       $zero,          $zero       \n\t"
+          "b          1f                                          \n\t"
+          " nop                                                   \n\t"
+         "3:                                                      \n\t"
+          "addiu      %[temp1],       $0,             0x4000      \n\t"
+         "1:                                                      \n\t"
+          "sh         %[temp1],       0(%[ptr])                   \n\t"
+         "4:                                                      \n\t"
+          "sh         %[temp2],       0(%[er_ptr])                \n\t"
+          "sh         %[temp3],       2(%[er_ptr])                \n\t"
+          "addiu      %[ptr],         %[ptr],         2           \n\t"
+          "addiu      %[er_ptr],      %[er_ptr],      4           \n\t"
+          ".set       pop                                         \n\t"
+          : [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
+            [temp4] "=&r" (temp4), [temp5] "=&r" (temp5), [ptr] "+r" (ptr),
+            [er_ptr] "+r" (er_ptr), [dr_ptr] "+r" (dr_ptr)
+          :
+          : "memory", "hi", "lo"
+        );
+#else
+        __asm __volatile (
+          ".set       push                                        \n\t"
+          ".set       noreorder                                   \n\t"
+          "lh         %[temp1],       0(%[ptr])                   \n\t"
+          "lh         %[temp2],       0(%[dr_ptr])                \n\t"
+          "slti       %[temp4],       %[temp1],       0x4001      \n\t"
+          "beqz       %[temp4],       3f                          \n\t"
+          " lh        %[temp3],       2(%[dr_ptr])                \n\t"
+          "slti       %[temp5],       %[temp1],       3277        \n\t"
+          "bnez       %[temp5],       2f                          \n\t"
+          " addiu     %[dr_ptr],      %[dr_ptr],      4           \n\t"
+          "mul        %[temp2],       %[temp2],       %[temp1]    \n\t"
+          "mul        %[temp3],       %[temp3],       %[temp1]    \n\t"
+          "addiu      %[temp2],       %[temp2],       0x2000      \n\t"
+          "addiu      %[temp3],       %[temp3],       0x2000      \n\t"
+          "sra        %[temp2],       %[temp2],       14          \n\t"
+          "sra        %[temp3],       %[temp3],       14          \n\t"
+          "b          4f                                          \n\t"
+          " nop                                                   \n\t"
+         "2:                                                      \n\t"
+          "addu       %[temp1],       $zero,          $zero       \n\t"
+          "addu       %[temp2],       $zero,          $zero       \n\t"
+          "addu       %[temp3],       $zero,          $zero       \n\t"
+          "b          1f                                          \n\t"
+          " nop                                                   \n\t"
+         "3:                                                      \n\t"
+          "addiu      %[temp1],       $0,             0x4000      \n\t"
+         "1:                                                      \n\t"
+          "sh         %[temp1],       0(%[ptr])                   \n\t"
+         "4:                                                      \n\t"
+          "sh         %[temp2],       0(%[er_ptr])                \n\t"
+          "sh         %[temp3],       2(%[er_ptr])                \n\t"
+          "addiu      %[ptr],         %[ptr],         2           \n\t"
+          "addiu      %[er_ptr],      %[er_ptr],      4           \n\t"
+          ".set       pop                                         \n\t"
+          : [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
+            [temp4] "=&r" (temp4), [temp5] "=&r" (temp5), [ptr] "+r" (ptr),
+            [er_ptr] "+r" (er_ptr), [dr_ptr] "+r" (dr_ptr)
+          :
+          : "memory", "hi", "lo"
+        );
+#endif
+      }
+    }
+  }
+  else {
+    // multiply with Wiener coefficients
+    for (i = 0; i < PART_LEN1; i++) {
+      efw[i].real = (int16_t)
+                      (WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].real,
+                                                            hnl[i],
+                                                            14));
+      efw[i].imag = (int16_t)
+                      (WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(dfw[i].imag,
+                                                            hnl[i],
+                                                            14));
+    }
+  }
+
+  if (aecm->cngMode == AecmTrue) {
+    ComfortNoise(aecm, ptrDfaClean, efw, hnl);
+  }
+
+  InverseFFTAndWindow(aecm, fft, efw, output, nearendClean);
+
+  return 0;
+}
+
+// Generate comfort noise and add to output signal.
+static void ComfortNoise(AecmCore* aecm,
+                         const uint16_t* dfa,
+                         ComplexInt16* out,
+                         const int16_t* lambda) {
+  int16_t i;
+  int16_t tmp16, tmp161, tmp162, tmp163, nrsh1, nrsh2;
+  int32_t tmp32, tmp321, tnoise, tnoise1;
+  int32_t tmp322, tmp323, *tmp1;
+  int16_t* dfap;
+  int16_t* lambdap;
+  const int32_t c2049 = 2049;
+  const int32_t c359 = 359;
+  const int32_t c114 = ONE_Q14;
+
+  int16_t randW16[PART_LEN];
+  int16_t uReal[PART_LEN1];
+  int16_t uImag[PART_LEN1];
+  int32_t outLShift32;
+
+  int16_t shiftFromNearToNoise = kNoiseEstQDomain - aecm->dfaCleanQDomain;
+  int16_t minTrackShift = 9;
+
+  RTC_DCHECK_GE(shiftFromNearToNoise, 0);
+  RTC_DCHECK_LT(shiftFromNearToNoise, 16);
+
+  if (aecm->noiseEstCtr < 100) {
+    // Track the minimum more quickly initially.
+    aecm->noiseEstCtr++;
+    minTrackShift = 6;
+  }
+
+  // Generate a uniform random array on [0 2^15-1].
+  WebRtcSpl_RandUArray(randW16, PART_LEN, &aecm->seed);
+  int16_t* randW16p = (int16_t*)randW16;
+#if defined (MIPS_DSP_R1_LE)
+  int16_t* kCosTablep = (int16_t*)WebRtcAecm_kCosTable;
+  int16_t* kSinTablep = (int16_t*)WebRtcAecm_kSinTable;
+#endif   // #if defined(MIPS_DSP_R1_LE)
+  tmp1 = (int32_t*)aecm->noiseEst + 1;
+  dfap = (int16_t*)dfa + 1;
+  lambdap = (int16_t*)lambda + 1;
+  // Estimate noise power.
+  for (i = 1; i < PART_LEN1; i+=2) {
+  // Shift to the noise domain.
+    __asm __volatile (
+      "lh     %[tmp32],       0(%[dfap])                              \n\t"
+      "lw     %[tnoise],      0(%[tmp1])                              \n\t"
+      "sllv   %[outLShift32], %[tmp32],   %[shiftFromNearToNoise]     \n\t"
+      : [tmp32] "=&r" (tmp32), [outLShift32] "=r" (outLShift32),
+        [tnoise] "=&r" (tnoise)
+      : [tmp1] "r" (tmp1), [dfap] "r" (dfap),
+        [shiftFromNearToNoise] "r" (shiftFromNearToNoise)
+      : "memory"
+    );
+
+    if (outLShift32 < tnoise) {
+      // Reset "too low" counter
+      aecm->noiseEstTooLowCtr[i] = 0;
+      // Track the minimum.
+      if (tnoise < (1 << minTrackShift)) {
+        // For small values, decrease noiseEst[i] every
+        // |kNoiseEstIncCount| block. The regular approach below can not
+        // go further down due to truncation.
+        aecm->noiseEstTooHighCtr[i]++;
+        if (aecm->noiseEstTooHighCtr[i] >= kNoiseEstIncCount) {
+          tnoise--;
+          aecm->noiseEstTooHighCtr[i] = 0;  // Reset the counter
+        }
+      } else {
+        __asm __volatile (
+          "subu   %[tmp32],       %[tnoise],      %[outLShift32]      \n\t"
+          "srav   %[tmp32],       %[tmp32],       %[minTrackShift]    \n\t"
+          "subu   %[tnoise],      %[tnoise],      %[tmp32]            \n\t"
+          : [tmp32] "=&r" (tmp32), [tnoise] "+r" (tnoise)
+          : [outLShift32] "r" (outLShift32), [minTrackShift] "r" (minTrackShift)
+        );
+      }
+    } else {
+      // Reset "too high" counter
+      aecm->noiseEstTooHighCtr[i] = 0;
+      // Ramp slowly upwards until we hit the minimum again.
+      if ((tnoise >> 19) <= 0) {
+        if ((tnoise >> 11) > 0) {
+          // Large enough for relative increase
+          __asm __volatile (
+            "mul    %[tnoise],  %[tnoise],  %[c2049]    \n\t"
+            "sra    %[tnoise],  %[tnoise],  11          \n\t"
+            : [tnoise] "+r" (tnoise)
+            : [c2049] "r" (c2049)
+            : "hi", "lo"
+          );
+        } else {
+          // Make incremental increases based on size every
+          // |kNoiseEstIncCount| block
+          aecm->noiseEstTooLowCtr[i]++;
+          if (aecm->noiseEstTooLowCtr[i] >= kNoiseEstIncCount) {
+            __asm __volatile (
+              "sra    %[tmp32],   %[tnoise],  9           \n\t"
+              "addi   %[tnoise],  %[tnoise],  1           \n\t"
+              "addu   %[tnoise],  %[tnoise],  %[tmp32]    \n\t"
+              : [tnoise] "+r" (tnoise), [tmp32] "=&r" (tmp32)
+              :
+            );
+            aecm->noiseEstTooLowCtr[i] = 0; // Reset counter
+          }
+        }
+      } else {
+        // Avoid overflow.
+        // Multiplication with 2049 will cause wrap around. Scale
+        // down first and then multiply
+        __asm __volatile (
+          "sra    %[tnoise],  %[tnoise],  11          \n\t"
+          "mul    %[tnoise],  %[tnoise],  %[c2049]    \n\t"
+          : [tnoise] "+r" (tnoise)
+          : [c2049] "r" (c2049)
+          : "hi", "lo"
+        );
+      }
+    }
+
+    // Shift to the noise domain.
+    __asm __volatile (
+      "lh     %[tmp32],       2(%[dfap])                              \n\t"
+      "lw     %[tnoise1],     4(%[tmp1])                              \n\t"
+      "addiu  %[dfap],        %[dfap],    4                           \n\t"
+      "sllv   %[outLShift32], %[tmp32],   %[shiftFromNearToNoise]     \n\t"
+      : [tmp32] "=&r" (tmp32), [dfap] "+r" (dfap),
+        [outLShift32] "=r" (outLShift32), [tnoise1] "=&r" (tnoise1)
+      : [tmp1] "r" (tmp1), [shiftFromNearToNoise] "r" (shiftFromNearToNoise)
+      : "memory"
+    );
+
+    if (outLShift32 < tnoise1) {
+      // Reset "too low" counter
+      aecm->noiseEstTooLowCtr[i + 1] = 0;
+      // Track the minimum.
+      if (tnoise1 < (1 << minTrackShift)) {
+        // For small values, decrease noiseEst[i] every
+        // |kNoiseEstIncCount| block. The regular approach below can not
+        // go further down due to truncation.
+        aecm->noiseEstTooHighCtr[i + 1]++;
+        if (aecm->noiseEstTooHighCtr[i + 1] >= kNoiseEstIncCount) {
+          tnoise1--;
+          aecm->noiseEstTooHighCtr[i + 1] = 0; // Reset the counter
+        }
+      } else {
+        __asm __volatile (
+          "subu   %[tmp32],       %[tnoise1],     %[outLShift32]      \n\t"
+          "srav   %[tmp32],       %[tmp32],       %[minTrackShift]    \n\t"
+          "subu   %[tnoise1],     %[tnoise1],     %[tmp32]            \n\t"
+          : [tmp32] "=&r" (tmp32), [tnoise1] "+r" (tnoise1)
+          : [outLShift32] "r" (outLShift32), [minTrackShift] "r" (minTrackShift)
+        );
+      }
+    } else {
+      // Reset "too high" counter
+      aecm->noiseEstTooHighCtr[i + 1] = 0;
+      // Ramp slowly upwards until we hit the minimum again.
+      if ((tnoise1 >> 19) <= 0) {
+        if ((tnoise1 >> 11) > 0) {
+          // Large enough for relative increase
+          __asm __volatile (
+            "mul    %[tnoise1], %[tnoise1], %[c2049]   \n\t"
+            "sra    %[tnoise1], %[tnoise1], 11         \n\t"
+            : [tnoise1] "+r" (tnoise1)
+            : [c2049] "r" (c2049)
+            : "hi", "lo"
+          );
+        } else {
+          // Make incremental increases based on size every
+          // |kNoiseEstIncCount| block
+          aecm->noiseEstTooLowCtr[i + 1]++;
+          if (aecm->noiseEstTooLowCtr[i + 1] >= kNoiseEstIncCount) {
+            __asm __volatile (
+              "sra    %[tmp32],   %[tnoise1], 9           \n\t"
+              "addi   %[tnoise1], %[tnoise1], 1           \n\t"
+              "addu   %[tnoise1], %[tnoise1], %[tmp32]    \n\t"
+              : [tnoise1] "+r" (tnoise1), [tmp32] "=&r" (tmp32)
+              :
+            );
+            aecm->noiseEstTooLowCtr[i + 1] = 0; // Reset counter
+          }
+        }
+      } else {
+        // Avoid overflow.
+        // Multiplication with 2049 will cause wrap around. Scale
+        // down first and then multiply
+        __asm __volatile (
+          "sra    %[tnoise1], %[tnoise1], 11          \n\t"
+          "mul    %[tnoise1], %[tnoise1], %[c2049]    \n\t"
+          : [tnoise1] "+r" (tnoise1)
+          : [c2049] "r" (c2049)
+          : "hi", "lo"
+        );
+      }
+    }
+
+    __asm __volatile (
+      "lh     %[tmp16],   0(%[lambdap])                           \n\t"
+      "lh     %[tmp161],  2(%[lambdap])                           \n\t"
+      "sw     %[tnoise],  0(%[tmp1])                              \n\t"
+      "sw     %[tnoise1], 4(%[tmp1])                              \n\t"
+      "subu   %[tmp16],   %[c114],        %[tmp16]                \n\t"
+      "subu   %[tmp161],  %[c114],        %[tmp161]               \n\t"
+      "srav   %[tmp32],   %[tnoise],      %[shiftFromNearToNoise] \n\t"
+      "srav   %[tmp321],  %[tnoise1],     %[shiftFromNearToNoise] \n\t"
+      "addiu  %[lambdap], %[lambdap],     4                       \n\t"
+      "addiu  %[tmp1],    %[tmp1],        8                       \n\t"
+      : [tmp16] "=&r" (tmp16), [tmp161] "=&r" (tmp161), [tmp1] "+r" (tmp1),
+        [tmp32] "=&r" (tmp32), [tmp321] "=&r" (tmp321), [lambdap] "+r" (lambdap)
+      : [tnoise] "r" (tnoise), [tnoise1] "r" (tnoise1), [c114] "r" (c114),
+        [shiftFromNearToNoise] "r" (shiftFromNearToNoise)
+      : "memory"
+    );
+
+    if (tmp32 > 32767) {
+      tmp32 = 32767;
+      aecm->noiseEst[i] = tmp32 << shiftFromNearToNoise;
+    }
+    if (tmp321 > 32767) {
+      tmp321 = 32767;
+      aecm->noiseEst[i+1] = tmp321 << shiftFromNearToNoise;
+    }
+
+    __asm __volatile (
+      "mul    %[tmp32],   %[tmp32],       %[tmp16]                \n\t"
+      "mul    %[tmp321],  %[tmp321],      %[tmp161]               \n\t"
+      "sra    %[nrsh1],   %[tmp32],       14                      \n\t"
+      "sra    %[nrsh2],   %[tmp321],      14                      \n\t"
+      : [nrsh1] "=&r" (nrsh1), [nrsh2] "=r" (nrsh2)
+      : [tmp16] "r" (tmp16), [tmp161] "r" (tmp161), [tmp32] "r" (tmp32),
+        [tmp321] "r" (tmp321)
+      : "memory", "hi", "lo"
+    );
+
+    __asm __volatile (
+      "lh     %[tmp32],       0(%[randW16p])              \n\t"
+      "lh     %[tmp321],      2(%[randW16p])              \n\t"
+      "addiu  %[randW16p],    %[randW16p],    4           \n\t"
+      "mul    %[tmp32],       %[tmp32],       %[c359]     \n\t"
+      "mul    %[tmp321],      %[tmp321],      %[c359]     \n\t"
+      "sra    %[tmp16],       %[tmp32],       15          \n\t"
+      "sra    %[tmp161],      %[tmp321],      15          \n\t"
+      : [randW16p] "+r" (randW16p), [tmp32] "=&r" (tmp32),
+        [tmp16] "=r" (tmp16), [tmp161] "=r" (tmp161), [tmp321] "=&r" (tmp321)
+      : [c359] "r" (c359)
+      : "memory", "hi", "lo"
+    );
+
+#if !defined(MIPS_DSP_R1_LE)
+    tmp32 = WebRtcAecm_kCosTable[tmp16];
+    tmp321 = WebRtcAecm_kSinTable[tmp16];
+    tmp322 = WebRtcAecm_kCosTable[tmp161];
+    tmp323 = WebRtcAecm_kSinTable[tmp161];
+#else
+    __asm __volatile (
+      "sll    %[tmp16],       %[tmp16],                   1           \n\t"
+      "sll    %[tmp161],      %[tmp161],                  1           \n\t"
+      "lhx    %[tmp32],       %[tmp16](%[kCosTablep])                 \n\t"
+      "lhx    %[tmp321],      %[tmp16](%[kSinTablep])                 \n\t"
+      "lhx    %[tmp322],      %[tmp161](%[kCosTablep])                \n\t"
+      "lhx    %[tmp323],      %[tmp161](%[kSinTablep])                \n\t"
+      : [tmp32] "=&r" (tmp32), [tmp321] "=&r" (tmp321),
+        [tmp322] "=&r" (tmp322), [tmp323] "=&r" (tmp323)
+      : [kCosTablep] "r" (kCosTablep), [tmp16] "r" (tmp16),
+        [tmp161] "r" (tmp161), [kSinTablep] "r" (kSinTablep)
+      : "memory"
+    );
+#endif
+    __asm __volatile (
+      "mul    %[tmp32],       %[tmp32],                   %[nrsh1]    \n\t"
+      "negu   %[tmp162],      %[nrsh1]                                \n\t"
+      "mul    %[tmp322],      %[tmp322],                  %[nrsh2]    \n\t"
+      "negu   %[tmp163],      %[nrsh2]                                \n\t"
+      "sra    %[tmp32],       %[tmp32],                   13          \n\t"
+      "mul    %[tmp321],      %[tmp321],                  %[tmp162]   \n\t"
+      "sra    %[tmp322],      %[tmp322],                  13          \n\t"
+      "mul    %[tmp323],      %[tmp323],                  %[tmp163]   \n\t"
+      "sra    %[tmp321],      %[tmp321],                  13          \n\t"
+      "sra    %[tmp323],      %[tmp323],                  13          \n\t"
+      : [tmp32] "+r" (tmp32), [tmp321] "+r" (tmp321), [tmp162] "=&r" (tmp162),
+        [tmp322] "+r" (tmp322), [tmp323] "+r" (tmp323), [tmp163] "=&r" (tmp163)
+      : [nrsh1] "r" (nrsh1), [nrsh2] "r" (nrsh2)
+      : "hi", "lo"
+    );
+    // Tables are in Q13.
+    uReal[i] = (int16_t)tmp32;
+    uImag[i] = (int16_t)tmp321;
+    uReal[i + 1] = (int16_t)tmp322;
+    uImag[i + 1] = (int16_t)tmp323;
+  }
+
+  int32_t tt, sgn;
+  tt = out[0].real;
+  sgn = ((int)tt) >> 31;
+  out[0].real = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+  tt = out[0].imag;
+  sgn = ((int)tt) >> 31;
+  out[0].imag = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+  for (i = 1; i < PART_LEN; i++) {
+    tt = out[i].real + uReal[i];
+    sgn = ((int)tt) >> 31;
+    out[i].real = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+    tt = out[i].imag + uImag[i];
+    sgn = ((int)tt) >> 31;
+    out[i].imag = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+  }
+  tt = out[PART_LEN].real + uReal[PART_LEN];
+  sgn = ((int)tt) >> 31;
+  out[PART_LEN].real = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+  tt = out[PART_LEN].imag;
+  sgn = ((int)tt) >> 31;
+  out[PART_LEN].imag = sgn == (int16_t)(tt >> 15) ? (int16_t)tt : (16384 ^ sgn);
+}
diff --git a/modules/audio_processing/aecm/aecm_core_neon.cc b/modules/audio_processing/aecm/aecm_core_neon.cc
new file mode 100644
index 0000000..ca7211f
--- /dev/null
+++ b/modules/audio_processing/aecm/aecm_core_neon.cc
@@ -0,0 +1,199 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+#include <arm_neon.h>
+
+#include "common_audio/signal_processing/include/real_fft.h"
+#include "rtc_base/checks.h"
+
+// TODO(kma): Re-write the corresponding assembly file, the offset
+// generating script and makefile, to replace these C functions.
+
+static inline void AddLanes(uint32_t* ptr, uint32x4_t v) {
+#if defined(WEBRTC_ARCH_ARM64)
+  *(ptr) = vaddvq_u32(v);
+#else
+  uint32x2_t tmp_v;
+  tmp_v = vadd_u32(vget_low_u32(v), vget_high_u32(v));
+  tmp_v = vpadd_u32(tmp_v, tmp_v);
+  *(ptr) = vget_lane_u32(tmp_v, 0);
+#endif
+}
+
+void WebRtcAecm_CalcLinearEnergiesNeon(AecmCore* aecm,
+                                       const uint16_t* far_spectrum,
+                                       int32_t* echo_est,
+                                       uint32_t* far_energy,
+                                       uint32_t* echo_energy_adapt,
+                                       uint32_t* echo_energy_stored) {
+  int16_t* start_stored_p = aecm->channelStored;
+  int16_t* start_adapt_p = aecm->channelAdapt16;
+  int32_t* echo_est_p = echo_est;
+  const int16_t* end_stored_p = aecm->channelStored + PART_LEN;
+  const uint16_t* far_spectrum_p = far_spectrum;
+  int16x8_t store_v, adapt_v;
+  uint16x8_t spectrum_v;
+  uint32x4_t echo_est_v_low, echo_est_v_high;
+  uint32x4_t far_energy_v, echo_stored_v, echo_adapt_v;
+
+  far_energy_v = vdupq_n_u32(0);
+  echo_adapt_v = vdupq_n_u32(0);
+  echo_stored_v = vdupq_n_u32(0);
+
+  // Get energy for the delayed far end signal and estimated
+  // echo using both stored and adapted channels.
+  // The C code:
+  //  for (i = 0; i < PART_LEN1; i++) {
+  //      echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+  //                                         far_spectrum[i]);
+  //      (*far_energy) += (uint32_t)(far_spectrum[i]);
+  //      *echo_energy_adapt += aecm->channelAdapt16[i] * far_spectrum[i];
+  //      (*echo_energy_stored) += (uint32_t)echo_est[i];
+  //  }
+  while (start_stored_p < end_stored_p) {
+    spectrum_v = vld1q_u16(far_spectrum_p);
+    adapt_v = vld1q_s16(start_adapt_p);
+    store_v = vld1q_s16(start_stored_p);
+
+    far_energy_v = vaddw_u16(far_energy_v, vget_low_u16(spectrum_v));
+    far_energy_v = vaddw_u16(far_energy_v, vget_high_u16(spectrum_v));
+
+    echo_est_v_low = vmull_u16(vreinterpret_u16_s16(vget_low_s16(store_v)),
+                               vget_low_u16(spectrum_v));
+    echo_est_v_high = vmull_u16(vreinterpret_u16_s16(vget_high_s16(store_v)),
+                                vget_high_u16(spectrum_v));
+    vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low));
+    vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high));
+
+    echo_stored_v = vaddq_u32(echo_est_v_low, echo_stored_v);
+    echo_stored_v = vaddq_u32(echo_est_v_high, echo_stored_v);
+
+    echo_adapt_v = vmlal_u16(echo_adapt_v,
+                             vreinterpret_u16_s16(vget_low_s16(adapt_v)),
+                             vget_low_u16(spectrum_v));
+    echo_adapt_v = vmlal_u16(echo_adapt_v,
+                             vreinterpret_u16_s16(vget_high_s16(adapt_v)),
+                             vget_high_u16(spectrum_v));
+
+    start_stored_p += 8;
+    start_adapt_p += 8;
+    far_spectrum_p += 8;
+    echo_est_p += 8;
+  }
+
+  AddLanes(far_energy, far_energy_v);
+  AddLanes(echo_energy_stored, echo_stored_v);
+  AddLanes(echo_energy_adapt, echo_adapt_v);
+
+  echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN],
+                                             far_spectrum[PART_LEN]);
+  *echo_energy_stored += (uint32_t)echo_est[PART_LEN];
+  *far_energy += (uint32_t)far_spectrum[PART_LEN];
+  *echo_energy_adapt += aecm->channelAdapt16[PART_LEN] * far_spectrum[PART_LEN];
+}
+
+void WebRtcAecm_StoreAdaptiveChannelNeon(AecmCore* aecm,
+                                         const uint16_t* far_spectrum,
+                                         int32_t* echo_est) {
+  RTC_DCHECK_EQ(0, (uintptr_t)echo_est % 32);
+  RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelStored % 16);
+  RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelAdapt16 % 16);
+
+  // This is C code of following optimized code.
+  // During startup we store the channel every block.
+  //  memcpy(aecm->channelStored,
+  //         aecm->channelAdapt16,
+  //         sizeof(int16_t) * PART_LEN1);
+  // Recalculate echo estimate
+  //  for (i = 0; i < PART_LEN; i += 4) {
+  //    echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+  //                                        far_spectrum[i]);
+  //    echo_est[i + 1] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 1],
+  //                                            far_spectrum[i + 1]);
+  //    echo_est[i + 2] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 2],
+  //                                            far_spectrum[i + 2]);
+  //    echo_est[i + 3] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i + 3],
+  //                                            far_spectrum[i + 3]);
+  //  }
+  //  echo_est[i] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[i],
+  //                                     far_spectrum[i]);
+  const uint16_t* far_spectrum_p = far_spectrum;
+  int16_t* start_adapt_p = aecm->channelAdapt16;
+  int16_t* start_stored_p = aecm->channelStored;
+  const int16_t* end_stored_p = aecm->channelStored + PART_LEN;
+  int32_t* echo_est_p = echo_est;
+
+  uint16x8_t far_spectrum_v;
+  int16x8_t adapt_v;
+  uint32x4_t echo_est_v_low, echo_est_v_high;
+
+  while (start_stored_p < end_stored_p) {
+    far_spectrum_v = vld1q_u16(far_spectrum_p);
+    adapt_v = vld1q_s16(start_adapt_p);
+
+    vst1q_s16(start_stored_p, adapt_v);
+
+    echo_est_v_low = vmull_u16(vget_low_u16(far_spectrum_v),
+                               vget_low_u16(vreinterpretq_u16_s16(adapt_v)));
+    echo_est_v_high = vmull_u16(vget_high_u16(far_spectrum_v),
+                                vget_high_u16(vreinterpretq_u16_s16(adapt_v)));
+
+    vst1q_s32(echo_est_p, vreinterpretq_s32_u32(echo_est_v_low));
+    vst1q_s32(echo_est_p + 4, vreinterpretq_s32_u32(echo_est_v_high));
+
+    far_spectrum_p += 8;
+    start_adapt_p += 8;
+    start_stored_p += 8;
+    echo_est_p += 8;
+  }
+  aecm->channelStored[PART_LEN] = aecm->channelAdapt16[PART_LEN];
+  echo_est[PART_LEN] = WEBRTC_SPL_MUL_16_U16(aecm->channelStored[PART_LEN],
+                                             far_spectrum[PART_LEN]);
+}
+
+void WebRtcAecm_ResetAdaptiveChannelNeon(AecmCore* aecm) {
+  RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelStored % 16);
+  RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelAdapt16 % 16);
+  RTC_DCHECK_EQ(0, (uintptr_t)aecm->channelAdapt32 % 32);
+
+  // The C code of following optimized code.
+  // for (i = 0; i < PART_LEN1; i++) {
+  //   aecm->channelAdapt16[i] = aecm->channelStored[i];
+  //   aecm->channelAdapt32[i] = WEBRTC_SPL_LSHIFT_W32(
+  //              (int32_t)aecm->channelStored[i], 16);
+  // }
+
+  int16_t* start_stored_p = aecm->channelStored;
+  int16_t* start_adapt16_p = aecm->channelAdapt16;
+  int32_t* start_adapt32_p = aecm->channelAdapt32;
+  const int16_t* end_stored_p = start_stored_p + PART_LEN;
+
+  int16x8_t stored_v;
+  int32x4_t adapt32_v_low, adapt32_v_high;
+
+  while (start_stored_p < end_stored_p) {
+    stored_v = vld1q_s16(start_stored_p);
+    vst1q_s16(start_adapt16_p, stored_v);
+
+    adapt32_v_low = vshll_n_s16(vget_low_s16(stored_v), 16);
+    adapt32_v_high = vshll_n_s16(vget_high_s16(stored_v), 16);
+
+    vst1q_s32(start_adapt32_p, adapt32_v_low);
+    vst1q_s32(start_adapt32_p + 4, adapt32_v_high);
+
+    start_stored_p += 8;
+    start_adapt16_p += 8;
+    start_adapt32_p += 8;
+  }
+  aecm->channelAdapt16[PART_LEN] = aecm->channelStored[PART_LEN];
+  aecm->channelAdapt32[PART_LEN] = (int32_t)aecm->channelStored[PART_LEN] << 16;
+}
diff --git a/modules/audio_processing/aecm/aecm_defines.h b/modules/audio_processing/aecm/aecm_defines.h
new file mode 100644
index 0000000..ae2d2bc
--- /dev/null
+++ b/modules/audio_processing/aecm/aecm_defines.h
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AECM_AECM_DEFINES_H_
+#define MODULES_AUDIO_PROCESSING_AECM_AECM_DEFINES_H_
+
+#define AECM_DYNAMIC_Q                 /* Turn on/off dynamic Q-domain. */
+
+/* Algorithm parameters */
+#define FRAME_LEN       80             /* Total frame length, 10 ms. */
+
+#define PART_LEN        64             /* Length of partition. */
+#define PART_LEN_SHIFT  7              /* Length of (PART_LEN * 2) in base 2. */
+
+#define PART_LEN1       (PART_LEN + 1)  /* Unique fft coefficients. */
+#define PART_LEN2       (PART_LEN << 1) /* Length of partition * 2. */
+#define PART_LEN4       (PART_LEN << 2) /* Length of partition * 4. */
+#define FAR_BUF_LEN     PART_LEN4       /* Length of buffers. */
+#define MAX_DELAY       100
+
+/* Counter parameters */
+#define CONV_LEN        512          /* Convergence length used at startup. */
+#define CONV_LEN2       (CONV_LEN << 1) /* Used at startup. */
+
+/* Energy parameters */
+#define MAX_BUF_LEN     64           /* History length of energy signals. */
+#define FAR_ENERGY_MIN  1025         /* Lowest Far energy level: At least 2 */
+                                     /* in energy. */
+#define FAR_ENERGY_DIFF 929          /* Allowed difference between max */
+                                     /* and min. */
+#define ENERGY_DEV_OFFSET       0    /* The energy error offset in Q8. */
+#define ENERGY_DEV_TOL  400          /* The energy estimation tolerance (Q8). */
+#define FAR_ENERGY_VAD_REGION   230  /* Far VAD tolerance region. */
+
+/* Stepsize parameters */
+#define MU_MIN          10          /* Min stepsize 2^-MU_MIN (far end energy */
+                                    /* dependent). */
+#define MU_MAX          1           /* Max stepsize 2^-MU_MAX (far end energy */
+                                    /* dependent). */
+#define MU_DIFF         9           /* MU_MIN - MU_MAX */
+
+/* Channel parameters */
+#define MIN_MSE_COUNT   20 /* Min number of consecutive blocks with enough */
+                           /* far end energy to compare channel estimates. */
+#define MIN_MSE_DIFF    29 /* The ratio between adapted and stored channel to */
+                           /* accept a new storage (0.8 in Q-MSE_RESOLUTION). */
+#define MSE_RESOLUTION  5           /* MSE parameter resolution. */
+#define RESOLUTION_CHANNEL16    12  /* W16 Channel in Q-RESOLUTION_CHANNEL16. */
+#define RESOLUTION_CHANNEL32    28  /* W32 Channel in Q-RESOLUTION_CHANNEL. */
+#define CHANNEL_VAD     16          /* Minimum energy in frequency band */
+                                    /* to update channel. */
+
+/* Suppression gain parameters: SUPGAIN parameters in Q-(RESOLUTION_SUPGAIN). */
+#define RESOLUTION_SUPGAIN      8     /* Channel in Q-(RESOLUTION_SUPGAIN). */
+#define SUPGAIN_DEFAULT (1 << RESOLUTION_SUPGAIN)  /* Default. */
+#define SUPGAIN_ERROR_PARAM_A   3072  /* Estimation error parameter */
+                                      /* (Maximum gain) (8 in Q8). */
+#define SUPGAIN_ERROR_PARAM_B   1536  /* Estimation error parameter */
+                                      /* (Gain before going down). */
+#define SUPGAIN_ERROR_PARAM_D   SUPGAIN_DEFAULT /* Estimation error parameter */
+                                /* (Should be the same as Default) (1 in Q8). */
+#define SUPGAIN_EPC_DT  200     /* SUPGAIN_ERROR_PARAM_C * ENERGY_DEV_TOL */
+
+/* Defines for "check delay estimation" */
+#define CORR_WIDTH      31      /* Number of samples to correlate over. */
+#define CORR_MAX        16      /* Maximum correlation offset. */
+#define CORR_MAX_BUF    63
+#define CORR_DEV        4
+#define CORR_MAX_LEVEL  20
+#define CORR_MAX_LOW    4
+#define CORR_BUF_LEN    (CORR_MAX << 1) + 1
+/* Note that CORR_WIDTH + 2*CORR_MAX <= MAX_BUF_LEN. */
+
+#define ONE_Q14         (1 << 14)
+
+/* NLP defines */
+#define NLP_COMP_LOW    3277    /* 0.2 in Q14 */
+#define NLP_COMP_HIGH   ONE_Q14 /* 1 in Q14 */
+
+#endif
diff --git a/modules/audio_processing/aecm/echo_control_mobile.cc b/modules/audio_processing/aecm/echo_control_mobile.cc
new file mode 100644
index 0000000..36e2271
--- /dev/null
+++ b/modules/audio_processing/aecm/echo_control_mobile.cc
@@ -0,0 +1,648 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+
+#ifdef AEC_DEBUG
+#include <stdio.h>
+#endif
+#include <stdlib.h>
+
+extern "C" {
+#include "common_audio/ring_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+}
+#include "modules/audio_processing/aecm/aecm_core.h"
+
+#define BUF_SIZE_FRAMES 50 // buffer size (frames)
+// Maximum length of resampled signal. Must be an integer multiple of frames
+// (ceil(1/(1 + MIN_SKEW)*2) + 1)*FRAME_LEN
+// The factor of 2 handles wb, and the + 1 is as a safety margin
+#define MAX_RESAMP_LEN (5 * FRAME_LEN)
+
+static const size_t kBufSizeSamp = BUF_SIZE_FRAMES * FRAME_LEN; // buffer size (samples)
+static const int kSampMsNb = 8; // samples per ms in nb
+// Target suppression levels for nlp modes
+// log{0.001, 0.00001, 0.00000001}
+static const int kInitCheck = 42;
+
+typedef struct
+{
+    int sampFreq;
+    int scSampFreq;
+    short bufSizeStart;
+    int knownDelay;
+
+    // Stores the last frame added to the farend buffer
+    short farendOld[2][FRAME_LEN];
+    short initFlag; // indicates if AEC has been initialized
+
+    // Variables used for averaging far end buffer size
+    short counter;
+    short sum;
+    short firstVal;
+    short checkBufSizeCtr;
+
+    // Variables used for delay shifts
+    short msInSndCardBuf;
+    short filtDelay;
+    int timeForDelayChange;
+    int ECstartup;
+    int checkBuffSize;
+    int delayChange;
+    short lastDelayDiff;
+
+    int16_t echoMode;
+
+#ifdef AEC_DEBUG
+    FILE *bufFile;
+    FILE *delayFile;
+    FILE *preCompFile;
+    FILE *postCompFile;
+#endif // AEC_DEBUG
+    // Structures
+    RingBuffer *farendBuf;
+
+    AecmCore* aecmCore;
+} AecMobile;
+
+// Estimates delay to set the position of the farend buffer read pointer
+// (controlled by knownDelay)
+static int WebRtcAecm_EstBufDelay(AecMobile* aecm, short msInSndCardBuf);
+
+// Stuffs the farend buffer if the estimated delay is too large
+static int WebRtcAecm_DelayComp(AecMobile* aecm);
+
+void* WebRtcAecm_Create() {
+    AecMobile* aecm = static_cast<AecMobile*>(malloc(sizeof(AecMobile)));
+
+    WebRtcSpl_Init();
+
+    aecm->aecmCore = WebRtcAecm_CreateCore();
+    if (!aecm->aecmCore) {
+        WebRtcAecm_Free(aecm);
+        return NULL;
+    }
+
+    aecm->farendBuf = WebRtc_CreateBuffer(kBufSizeSamp,
+                                          sizeof(int16_t));
+    if (!aecm->farendBuf)
+    {
+        WebRtcAecm_Free(aecm);
+        return NULL;
+    }
+
+    aecm->initFlag = 0;
+
+#ifdef AEC_DEBUG
+    aecm->aecmCore->farFile = fopen("aecFar.pcm","wb");
+    aecm->aecmCore->nearFile = fopen("aecNear.pcm","wb");
+    aecm->aecmCore->outFile = fopen("aecOut.pcm","wb");
+    //aecm->aecmCore->outLpFile = fopen("aecOutLp.pcm","wb");
+
+    aecm->bufFile = fopen("aecBuf.dat", "wb");
+    aecm->delayFile = fopen("aecDelay.dat", "wb");
+    aecm->preCompFile = fopen("preComp.pcm", "wb");
+    aecm->postCompFile = fopen("postComp.pcm", "wb");
+#endif // AEC_DEBUG
+    return aecm;
+}
+
+void WebRtcAecm_Free(void* aecmInst) {
+  AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+    if (aecm == NULL) {
+      return;
+    }
+
+#ifdef AEC_DEBUG
+    fclose(aecm->aecmCore->farFile);
+    fclose(aecm->aecmCore->nearFile);
+    fclose(aecm->aecmCore->outFile);
+    //fclose(aecm->aecmCore->outLpFile);
+
+    fclose(aecm->bufFile);
+    fclose(aecm->delayFile);
+    fclose(aecm->preCompFile);
+    fclose(aecm->postCompFile);
+#endif // AEC_DEBUG
+    WebRtcAecm_FreeCore(aecm->aecmCore);
+    WebRtc_FreeBuffer(aecm->farendBuf);
+    free(aecm);
+}
+
+int32_t WebRtcAecm_Init(void *aecmInst, int32_t sampFreq)
+{
+    AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+    AecmConfig aecConfig;
+
+    if (aecm == NULL)
+    {
+        return -1;
+    }
+
+    if (sampFreq != 8000 && sampFreq != 16000)
+    {
+        return AECM_BAD_PARAMETER_ERROR;
+    }
+    aecm->sampFreq = sampFreq;
+
+    // Initialize AECM core
+    if (WebRtcAecm_InitCore(aecm->aecmCore, aecm->sampFreq) == -1)
+    {
+        return AECM_UNSPECIFIED_ERROR;
+    }
+
+    // Initialize farend buffer
+    WebRtc_InitBuffer(aecm->farendBuf);
+
+    aecm->initFlag = kInitCheck; // indicates that initialization has been done
+
+    aecm->delayChange = 1;
+
+    aecm->sum = 0;
+    aecm->counter = 0;
+    aecm->checkBuffSize = 1;
+    aecm->firstVal = 0;
+
+    aecm->ECstartup = 1;
+    aecm->bufSizeStart = 0;
+    aecm->checkBufSizeCtr = 0;
+    aecm->filtDelay = 0;
+    aecm->timeForDelayChange = 0;
+    aecm->knownDelay = 0;
+    aecm->lastDelayDiff = 0;
+
+    memset(&aecm->farendOld, 0, sizeof(aecm->farendOld));
+
+    // Default settings.
+    aecConfig.cngMode = AecmTrue;
+    aecConfig.echoMode = 3;
+
+    if (WebRtcAecm_set_config(aecm, aecConfig) == -1)
+    {
+        return AECM_UNSPECIFIED_ERROR;
+    }
+
+    return 0;
+}
+
+// Returns any error that is caused when buffering the
+// farend signal.
+int32_t WebRtcAecm_GetBufferFarendError(void *aecmInst, const int16_t *farend,
+                                size_t nrOfSamples) {
+  AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+  if (aecm == NULL)
+    return -1;
+
+  if (farend == NULL)
+    return AECM_NULL_POINTER_ERROR;
+
+  if (aecm->initFlag != kInitCheck)
+    return AECM_UNINITIALIZED_ERROR;
+
+  if (nrOfSamples != 80 && nrOfSamples != 160)
+    return AECM_BAD_PARAMETER_ERROR;
+
+  return 0;
+}
+
+
+int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend,
+                                size_t nrOfSamples) {
+  AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+  const int32_t err =
+      WebRtcAecm_GetBufferFarendError(aecmInst, farend, nrOfSamples);
+
+  if (err != 0)
+    return err;
+
+  // TODO(unknown): Is this really a good idea?
+  if (!aecm->ECstartup)
+  {
+    WebRtcAecm_DelayComp(aecm);
+  }
+
+  WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
+
+  return 0;
+}
+
+int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy,
+                           const int16_t *nearendClean, int16_t *out,
+                           size_t nrOfSamples, int16_t msInSndCardBuf)
+{
+    AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+    int32_t retVal = 0;
+    size_t i;
+    short nmbrOfFilledBuffers;
+    size_t nBlocks10ms;
+    size_t nFrames;
+#ifdef AEC_DEBUG
+    short msInAECBuf;
+#endif
+
+    if (aecm == NULL)
+    {
+        return -1;
+    }
+
+    if (nearendNoisy == NULL)
+    {
+        return AECM_NULL_POINTER_ERROR;
+    }
+
+    if (out == NULL)
+    {
+        return AECM_NULL_POINTER_ERROR;
+    }
+
+    if (aecm->initFlag != kInitCheck)
+    {
+        return AECM_UNINITIALIZED_ERROR;
+    }
+
+    if (nrOfSamples != 80 && nrOfSamples != 160)
+    {
+        return AECM_BAD_PARAMETER_ERROR;
+    }
+
+    if (msInSndCardBuf < 0)
+    {
+        msInSndCardBuf = 0;
+        retVal = AECM_BAD_PARAMETER_WARNING;
+    } else if (msInSndCardBuf > 500)
+    {
+        msInSndCardBuf = 500;
+        retVal = AECM_BAD_PARAMETER_WARNING;
+    }
+    msInSndCardBuf += 10;
+    aecm->msInSndCardBuf = msInSndCardBuf;
+
+    nFrames = nrOfSamples / FRAME_LEN;
+    nBlocks10ms = nFrames / aecm->aecmCore->mult;
+
+    if (aecm->ECstartup)
+    {
+        if (nearendClean == NULL)
+        {
+            if (out != nearendNoisy)
+            {
+                memcpy(out, nearendNoisy, sizeof(short) * nrOfSamples);
+            }
+        } else if (out != nearendClean)
+        {
+            memcpy(out, nearendClean, sizeof(short) * nrOfSamples);
+        }
+
+        nmbrOfFilledBuffers =
+            (short) WebRtc_available_read(aecm->farendBuf) / FRAME_LEN;
+        // The AECM is in the start up mode
+        // AECM is disabled until the soundcard buffer and farend buffers are OK
+
+        // Mechanism to ensure that the soundcard buffer is reasonably stable.
+        if (aecm->checkBuffSize)
+        {
+            aecm->checkBufSizeCtr++;
+            // Before we fill up the far end buffer we require the amount of data on the
+            // sound card to be stable (+/-8 ms) compared to the first value. This
+            // comparison is made during the following 4 consecutive frames. If it seems
+            // to be stable then we start to fill up the far end buffer.
+
+            if (aecm->counter == 0)
+            {
+                aecm->firstVal = aecm->msInSndCardBuf;
+                aecm->sum = 0;
+            }
+
+            if (abs(aecm->firstVal - aecm->msInSndCardBuf)
+                    < WEBRTC_SPL_MAX(0.2 * aecm->msInSndCardBuf, kSampMsNb))
+            {
+                aecm->sum += aecm->msInSndCardBuf;
+                aecm->counter++;
+            } else
+            {
+                aecm->counter = 0;
+            }
+
+            if (aecm->counter * nBlocks10ms >= 6)
+            {
+                // The farend buffer size is determined in blocks of 80 samples
+                // Use 75% of the average value of the soundcard buffer
+                aecm->bufSizeStart
+                        = WEBRTC_SPL_MIN((3 * aecm->sum
+                                        * aecm->aecmCore->mult) / (aecm->counter * 40), BUF_SIZE_FRAMES);
+                // buffersize has now been determined
+                aecm->checkBuffSize = 0;
+            }
+
+            if (aecm->checkBufSizeCtr * nBlocks10ms > 50)
+            {
+                // for really bad sound cards, don't disable echocanceller for more than 0.5 sec
+                aecm->bufSizeStart = WEBRTC_SPL_MIN((3 * aecm->msInSndCardBuf
+                                * aecm->aecmCore->mult) / 40, BUF_SIZE_FRAMES);
+                aecm->checkBuffSize = 0;
+            }
+        }
+
+        // if checkBuffSize changed in the if-statement above
+        if (!aecm->checkBuffSize)
+        {
+            // soundcard buffer is now reasonably stable
+            // When the far end buffer is filled with approximately the same amount of
+            // data as the amount on the sound card we end the start up phase and start
+            // to cancel echoes.
+
+            if (nmbrOfFilledBuffers == aecm->bufSizeStart)
+            {
+                aecm->ECstartup = 0; // Enable the AECM
+            } else if (nmbrOfFilledBuffers > aecm->bufSizeStart)
+            {
+                WebRtc_MoveReadPtr(aecm->farendBuf,
+                                   (int) WebRtc_available_read(aecm->farendBuf)
+                                   - (int) aecm->bufSizeStart * FRAME_LEN);
+                aecm->ECstartup = 0;
+            }
+        }
+
+    } else
+    {
+        // AECM is enabled
+
+        // Note only 1 block supported for nb and 2 blocks for wb
+        for (i = 0; i < nFrames; i++)
+        {
+            int16_t farend[FRAME_LEN];
+            const int16_t* farend_ptr = NULL;
+
+            nmbrOfFilledBuffers =
+                (short) WebRtc_available_read(aecm->farendBuf) / FRAME_LEN;
+
+            // Check that there is data in the far end buffer
+            if (nmbrOfFilledBuffers > 0)
+            {
+                // Get the next 80 samples from the farend buffer
+                WebRtc_ReadBuffer(aecm->farendBuf, (void**) &farend_ptr, farend,
+                                  FRAME_LEN);
+
+                // Always store the last frame for use when we run out of data
+                memcpy(&(aecm->farendOld[i][0]), farend_ptr,
+                       FRAME_LEN * sizeof(short));
+            } else
+            {
+                // We have no data so we use the last played frame
+                memcpy(farend, &(aecm->farendOld[i][0]), FRAME_LEN * sizeof(short));
+                farend_ptr = farend;
+            }
+
+            // Call buffer delay estimator when all data is extracted,
+            // i,e. i = 0 for NB and i = 1 for WB
+            if ((i == 0 && aecm->sampFreq == 8000) || (i == 1 && aecm->sampFreq == 16000))
+            {
+                WebRtcAecm_EstBufDelay(aecm, aecm->msInSndCardBuf);
+            }
+
+            // Call the AECM
+            /*WebRtcAecm_ProcessFrame(aecm->aecmCore, farend, &nearend[FRAME_LEN * i],
+             &out[FRAME_LEN * i], aecm->knownDelay);*/
+            if (WebRtcAecm_ProcessFrame(aecm->aecmCore,
+                                        farend_ptr,
+                                        &nearendNoisy[FRAME_LEN * i],
+                                        (nearendClean
+                                         ? &nearendClean[FRAME_LEN * i]
+                                         : NULL),
+                                        &out[FRAME_LEN * i]) == -1)
+                return -1;
+        }
+    }
+
+#ifdef AEC_DEBUG
+    msInAECBuf = (short) WebRtc_available_read(aecm->farendBuf) /
+        (kSampMsNb * aecm->aecmCore->mult);
+    fwrite(&msInAECBuf, 2, 1, aecm->bufFile);
+    fwrite(&(aecm->knownDelay), sizeof(aecm->knownDelay), 1, aecm->delayFile);
+#endif
+
+    return retVal;
+}
+
+int32_t WebRtcAecm_set_config(void *aecmInst, AecmConfig config)
+{
+    AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+
+    if (aecm == NULL)
+    {
+        return -1;
+    }
+
+    if (aecm->initFlag != kInitCheck)
+    {
+        return AECM_UNINITIALIZED_ERROR;
+    }
+
+    if (config.cngMode != AecmFalse && config.cngMode != AecmTrue)
+    {
+        return AECM_BAD_PARAMETER_ERROR;
+    }
+    aecm->aecmCore->cngMode = config.cngMode;
+
+    if (config.echoMode < 0 || config.echoMode > 4)
+    {
+        return AECM_BAD_PARAMETER_ERROR;
+    }
+    aecm->echoMode = config.echoMode;
+
+    if (aecm->echoMode == 0)
+    {
+        aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 3;
+        aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 3;
+        aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 3;
+        aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 3;
+        aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A >> 3)
+                - (SUPGAIN_ERROR_PARAM_B >> 3);
+        aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B >> 3)
+                - (SUPGAIN_ERROR_PARAM_D >> 3);
+    } else if (aecm->echoMode == 1)
+    {
+        aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 2;
+        aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 2;
+        aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 2;
+        aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 2;
+        aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A >> 2)
+                - (SUPGAIN_ERROR_PARAM_B >> 2);
+        aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B >> 2)
+                - (SUPGAIN_ERROR_PARAM_D >> 2);
+    } else if (aecm->echoMode == 2)
+    {
+        aecm->aecmCore->supGain = SUPGAIN_DEFAULT >> 1;
+        aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT >> 1;
+        aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A >> 1;
+        aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D >> 1;
+        aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A >> 1)
+                - (SUPGAIN_ERROR_PARAM_B >> 1);
+        aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B >> 1)
+                - (SUPGAIN_ERROR_PARAM_D >> 1);
+    } else if (aecm->echoMode == 3)
+    {
+        aecm->aecmCore->supGain = SUPGAIN_DEFAULT;
+        aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT;
+        aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A;
+        aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D;
+        aecm->aecmCore->supGainErrParamDiffAB = SUPGAIN_ERROR_PARAM_A - SUPGAIN_ERROR_PARAM_B;
+        aecm->aecmCore->supGainErrParamDiffBD = SUPGAIN_ERROR_PARAM_B - SUPGAIN_ERROR_PARAM_D;
+    } else if (aecm->echoMode == 4)
+    {
+        aecm->aecmCore->supGain = SUPGAIN_DEFAULT << 1;
+        aecm->aecmCore->supGainOld = SUPGAIN_DEFAULT << 1;
+        aecm->aecmCore->supGainErrParamA = SUPGAIN_ERROR_PARAM_A << 1;
+        aecm->aecmCore->supGainErrParamD = SUPGAIN_ERROR_PARAM_D << 1;
+        aecm->aecmCore->supGainErrParamDiffAB = (SUPGAIN_ERROR_PARAM_A << 1)
+                - (SUPGAIN_ERROR_PARAM_B << 1);
+        aecm->aecmCore->supGainErrParamDiffBD = (SUPGAIN_ERROR_PARAM_B << 1)
+                - (SUPGAIN_ERROR_PARAM_D << 1);
+    }
+
+    return 0;
+}
+
+int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
+                                const void* echo_path,
+                                size_t size_bytes)
+{
+    AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+    const int16_t* echo_path_ptr = static_cast<const int16_t*>(echo_path);
+
+    if (aecmInst == NULL) {
+      return -1;
+    }
+    if (echo_path == NULL) {
+      return AECM_NULL_POINTER_ERROR;
+    }
+    if (size_bytes != WebRtcAecm_echo_path_size_bytes())
+    {
+        // Input channel size does not match the size of AECM
+        return AECM_BAD_PARAMETER_ERROR;
+    }
+    if (aecm->initFlag != kInitCheck)
+    {
+        return AECM_UNINITIALIZED_ERROR;
+    }
+
+    WebRtcAecm_InitEchoPathCore(aecm->aecmCore, echo_path_ptr);
+
+    return 0;
+}
+
+int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
+                               void* echo_path,
+                               size_t size_bytes)
+{
+    AecMobile* aecm = static_cast<AecMobile*>(aecmInst);
+    int16_t* echo_path_ptr = static_cast<int16_t*>(echo_path);
+
+    if (aecmInst == NULL) {
+      return -1;
+    }
+    if (echo_path == NULL) {
+      return AECM_NULL_POINTER_ERROR;
+    }
+    if (size_bytes != WebRtcAecm_echo_path_size_bytes())
+    {
+        // Input channel size does not match the size of AECM
+        return AECM_BAD_PARAMETER_ERROR;
+    }
+    if (aecm->initFlag != kInitCheck)
+    {
+        return AECM_UNINITIALIZED_ERROR;
+    }
+
+    memcpy(echo_path_ptr, aecm->aecmCore->channelStored, size_bytes);
+    return 0;
+}
+
+size_t WebRtcAecm_echo_path_size_bytes()
+{
+    return (PART_LEN1 * sizeof(int16_t));
+}
+
+
+static int WebRtcAecm_EstBufDelay(AecMobile* aecm, short msInSndCardBuf) {
+    short delayNew, nSampSndCard;
+    short nSampFar = (short) WebRtc_available_read(aecm->farendBuf);
+    short diff;
+
+    nSampSndCard = msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult;
+
+    delayNew = nSampSndCard - nSampFar;
+
+    if (delayNew < FRAME_LEN)
+    {
+        WebRtc_MoveReadPtr(aecm->farendBuf, FRAME_LEN);
+        delayNew += FRAME_LEN;
+    }
+
+    aecm->filtDelay = WEBRTC_SPL_MAX(0, (8 * aecm->filtDelay + 2 * delayNew) / 10);
+
+    diff = aecm->filtDelay - aecm->knownDelay;
+    if (diff > 224)
+    {
+        if (aecm->lastDelayDiff < 96)
+        {
+            aecm->timeForDelayChange = 0;
+        } else
+        {
+            aecm->timeForDelayChange++;
+        }
+    } else if (diff < 96 && aecm->knownDelay > 0)
+    {
+        if (aecm->lastDelayDiff > 224)
+        {
+            aecm->timeForDelayChange = 0;
+        } else
+        {
+            aecm->timeForDelayChange++;
+        }
+    } else
+    {
+        aecm->timeForDelayChange = 0;
+    }
+    aecm->lastDelayDiff = diff;
+
+    if (aecm->timeForDelayChange > 25)
+    {
+        aecm->knownDelay = WEBRTC_SPL_MAX((int)aecm->filtDelay - 160, 0);
+    }
+    return 0;
+}
+
+static int WebRtcAecm_DelayComp(AecMobile* aecm) {
+    int nSampFar = (int) WebRtc_available_read(aecm->farendBuf);
+    int nSampSndCard, delayNew, nSampAdd;
+    const int maxStuffSamp = 10 * FRAME_LEN;
+
+    nSampSndCard = aecm->msInSndCardBuf * kSampMsNb * aecm->aecmCore->mult;
+    delayNew = nSampSndCard - nSampFar;
+
+    if (delayNew > FAR_BUF_LEN - FRAME_LEN * aecm->aecmCore->mult)
+    {
+        // The difference of the buffer sizes is larger than the maximum
+        // allowed known delay. Compensate by stuffing the buffer.
+        nSampAdd = (int)(WEBRTC_SPL_MAX(((nSampSndCard >> 1) - nSampFar),
+                FRAME_LEN));
+        nSampAdd = WEBRTC_SPL_MIN(nSampAdd, maxStuffSamp);
+
+        WebRtc_MoveReadPtr(aecm->farendBuf, -nSampAdd);
+        aecm->delayChange = 1; // the delay needs to be updated
+    }
+
+    return 0;
+}
diff --git a/modules/audio_processing/aecm/echo_control_mobile.h b/modules/audio_processing/aecm/echo_control_mobile.h
new file mode 100644
index 0000000..e0091c3
--- /dev/null
+++ b/modules/audio_processing/aecm/echo_control_mobile.h
@@ -0,0 +1,209 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
+#define MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
+
+#include <stdlib.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+enum {
+    AecmFalse = 0,
+    AecmTrue
+};
+
+// Errors
+#define AECM_UNSPECIFIED_ERROR           12000
+#define AECM_UNSUPPORTED_FUNCTION_ERROR  12001
+#define AECM_UNINITIALIZED_ERROR         12002
+#define AECM_NULL_POINTER_ERROR          12003
+#define AECM_BAD_PARAMETER_ERROR         12004
+
+// Warnings
+#define AECM_BAD_PARAMETER_WARNING       12100
+
+typedef struct {
+    int16_t cngMode;            // AECM_FALSE, AECM_TRUE (default)
+    int16_t echoMode;           // 0, 1, 2, 3 (default), 4
+} AecmConfig;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Allocates the memory needed by the AECM. The memory needs to be
+ * initialized separately using the WebRtcAecm_Init() function.
+ * Returns a pointer to the instance and a nullptr at failure.
+ */
+void* WebRtcAecm_Create();
+
+/*
+ * This function releases the memory allocated by WebRtcAecm_Create()
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*    aecmInst            Pointer to the AECM instance
+ */
+void WebRtcAecm_Free(void* aecmInst);
+
+/*
+ * Initializes an AECM instance.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecmInst      Pointer to the AECM instance
+ * int32_t        sampFreq      Sampling frequency of data
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                              1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq);
+
+/*
+ * Inserts an 80 or 160 sample block of data into the farend buffer.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecmInst      Pointer to the AECM instance
+ * int16_t*       farend        In buffer containing one frame of
+ *                              farend signal
+ * int16_t        nrOfSamples   Number of samples in farend buffer
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                              1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_BufferFarend(void* aecmInst,
+                                const int16_t* farend,
+                                size_t nrOfSamples);
+
+/*
+ * Reports any errors that would arise when buffering a farend buffer.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecmInst      Pointer to the AECM instance
+ * int16_t*       farend        In buffer containing one frame of
+ *                              farend signal
+ * int16_t        nrOfSamples   Number of samples in farend buffer
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                              1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_GetBufferFarendError(void* aecmInst,
+                                        const int16_t* farend,
+                                        size_t nrOfSamples);
+
+/*
+ * Runs the AECM on an 80 or 160 sample blocks of data.
+ *
+ * Inputs                        Description
+ * -------------------------------------------------------------------
+ * void*          aecmInst       Pointer to the AECM instance
+ * int16_t*       nearendNoisy   In buffer containing one frame of
+ *                               reference nearend+echo signal. If
+ *                               noise reduction is active, provide
+ *                               the noisy signal here.
+ * int16_t*       nearendClean   In buffer containing one frame of
+ *                               nearend+echo signal. If noise
+ *                               reduction is active, provide the
+ *                               clean signal here. Otherwise pass a
+ *                               NULL pointer.
+ * int16_t        nrOfSamples    Number of samples in nearend buffer
+ * int16_t        msInSndCardBuf Delay estimate for sound card and
+ *                               system buffers
+ *
+ * Outputs                       Description
+ * -------------------------------------------------------------------
+ * int16_t*       out            Out buffer, one frame of processed nearend
+ * int32_t        return         0: OK
+ *                               1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_Process(void* aecmInst,
+                           const int16_t* nearendNoisy,
+                           const int16_t* nearendClean,
+                           int16_t* out,
+                           size_t nrOfSamples,
+                           int16_t msInSndCardBuf);
+
+/*
+ * This function enables the user to set certain parameters on-the-fly
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*          aecmInst      Pointer to the AECM instance
+ * AecmConfig     config        Config instance that contains all
+ *                              properties to be set
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t        return        0: OK
+ *                              1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config);
+
+/*
+ * This function enables the user to set the echo path on-the-fly.
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*        aecmInst        Pointer to the AECM instance
+ * void*        echo_path       Pointer to the echo path to be set
+ * size_t       size_bytes      Size in bytes of the echo path
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t      return          0: OK
+ *                              1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
+                                const void* echo_path,
+                                size_t size_bytes);
+
+/*
+ * This function enables the user to get the currently used echo path
+ * on-the-fly
+ *
+ * Inputs                       Description
+ * -------------------------------------------------------------------
+ * void*        aecmInst        Pointer to the AECM instance
+ * void*        echo_path       Pointer to echo path
+ * size_t       size_bytes      Size in bytes of the echo path
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * int32_t      return          0: OK
+ *                              1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
+                               void* echo_path,
+                               size_t size_bytes);
+
+/*
+ * This function enables the user to get the echo path size in bytes
+ *
+ * Outputs                      Description
+ * -------------------------------------------------------------------
+ * size_t       return          Size in bytes
+ */
+size_t WebRtcAecm_echo_path_size_bytes();
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif  // MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
diff --git a/modules/audio_processing/agc/agc.cc b/modules/audio_processing/agc/agc.cc
new file mode 100644
index 0000000..e161676
--- /dev/null
+++ b/modules/audio_processing/agc/agc.cc
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/agc.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <algorithm>
+#include <vector>
+
+#include "modules/audio_processing/agc/loudness_histogram.h"
+#include "modules/audio_processing/agc/utility.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const int kDefaultLevelDbfs = -18;
+const int kNumAnalysisFrames = 100;
+const double kActivityThreshold = 0.3;
+
+}  // namespace
+
+Agc::Agc()
+    : target_level_loudness_(Dbfs2Loudness(kDefaultLevelDbfs)),
+      target_level_dbfs_(kDefaultLevelDbfs),
+      histogram_(LoudnessHistogram::Create(kNumAnalysisFrames)),
+      inactive_histogram_(LoudnessHistogram::Create()) {}
+
+Agc::~Agc() {}
+
+float Agc::AnalyzePreproc(const int16_t* audio, size_t length) {
+  RTC_DCHECK_GT(length, 0);
+  size_t num_clipped = 0;
+  for (size_t i = 0; i < length; ++i) {
+    if (audio[i] == 32767 || audio[i] == -32768)
+      ++num_clipped;
+  }
+  return 1.0f * num_clipped / length;
+}
+
+void Agc::Process(const int16_t* audio, size_t length, int sample_rate_hz) {
+  vad_.ProcessChunk(audio, length, sample_rate_hz);
+  const std::vector<double>& rms = vad_.chunkwise_rms();
+  const std::vector<double>& probabilities =
+      vad_.chunkwise_voice_probabilities();
+  RTC_DCHECK_EQ(rms.size(), probabilities.size());
+  for (size_t i = 0; i < rms.size(); ++i) {
+    histogram_->Update(rms[i], probabilities[i]);
+  }
+}
+
+bool Agc::GetRmsErrorDb(int* error) {
+  if (!error) {
+    RTC_NOTREACHED();
+    return false;
+  }
+
+  if (histogram_->num_updates() < kNumAnalysisFrames) {
+    // We haven't yet received enough frames.
+    return false;
+  }
+
+  if (histogram_->AudioContent() < kNumAnalysisFrames * kActivityThreshold) {
+    // We are likely in an inactive segment.
+    return false;
+  }
+
+  double loudness = Linear2Loudness(histogram_->CurrentRms());
+  *error = std::floor(Loudness2Db(target_level_loudness_ - loudness) + 0.5);
+  histogram_->Reset();
+  return true;
+}
+
+void Agc::Reset() {
+  histogram_->Reset();
+}
+
+int Agc::set_target_level_dbfs(int level) {
+  // TODO(turajs): just some arbitrary sanity check. We can come up with better
+  // limits. The upper limit should be chosen such that the risk of clipping is
+  // low. The lower limit should not result in a too quiet signal.
+  if (level >= 0 || level <= -100)
+    return -1;
+  target_level_dbfs_ = level;
+  target_level_loudness_ = Dbfs2Loudness(level);
+  return 0;
+}
+
+int Agc::target_level_dbfs() const {
+  return target_level_dbfs_;
+}
+
+float Agc::voice_probability() const {
+  return vad_.last_voice_probability();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc/agc.h b/modules/audio_processing/agc/agc.h
new file mode 100644
index 0000000..98bbf1f
--- /dev/null
+++ b/modules/audio_processing/agc/agc.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_AGC_H_
+
+#include <memory>
+
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+class LoudnessHistogram;
+
+class Agc {
+ public:
+  Agc();
+  virtual ~Agc();
+
+  // Returns the proportion of samples in the buffer which are at full-scale
+  // (and presumably clipped).
+  virtual float AnalyzePreproc(const int16_t* audio, size_t length);
+  // |audio| must be mono; in a multi-channel stream, provide the first (usually
+  // left) channel.
+  virtual void Process(const int16_t* audio, size_t length, int sample_rate_hz);
+
+  // Retrieves the difference between the target RMS level and the current
+  // signal RMS level in dB. Returns true if an update is available and false
+  // otherwise, in which case |error| should be ignored and no action taken.
+  virtual bool GetRmsErrorDb(int* error);
+  virtual void Reset();
+
+  virtual int set_target_level_dbfs(int level);
+  virtual int target_level_dbfs() const;
+  virtual float voice_probability() const;
+
+ private:
+  double target_level_loudness_;
+  int target_level_dbfs_;
+  std::unique_ptr<LoudnessHistogram> histogram_;
+  std::unique_ptr<LoudnessHistogram> inactive_histogram_;
+  VoiceActivityDetector vad_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_AGC_H_
diff --git a/modules/audio_processing/agc/agc_manager_direct.cc b/modules/audio_processing/agc/agc_manager_direct.cc
new file mode 100644
index 0000000..5ba5f4f
--- /dev/null
+++ b/modules/audio_processing/agc/agc_manager_direct.cc
@@ -0,0 +1,453 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+
+#include <cmath>
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+#include <cstdio>
+#endif
+
+#include "modules/audio_processing/agc/gain_map_internal.h"
+#include "modules/audio_processing/gain_control_impl.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+namespace {
+
+// Amount the microphone level is lowered with every clipping event.
+const int kClippedLevelStep = 15;
+// Proportion of clipped samples required to declare a clipping event.
+const float kClippedRatioThreshold = 0.1f;
+// Time in frames to wait after a clipping event before checking again.
+const int kClippedWaitFrames = 300;
+
+// Amount of error we tolerate in the microphone level (presumably due to OS
+// quantization) before we assume the user has manually adjusted the microphone.
+const int kLevelQuantizationSlack = 25;
+
+const int kDefaultCompressionGain = 7;
+const int kMaxCompressionGain = 12;
+const int kMinCompressionGain = 2;
+// Controls the rate of compression changes towards the target.
+const float kCompressionGainStep = 0.05f;
+
+const int kMaxMicLevel = 255;
+static_assert(kGainMapSize > kMaxMicLevel, "gain map too small");
+const int kMinMicLevel = 12;
+
+// Prevent very large microphone level changes.
+const int kMaxResidualGainChange = 15;
+
+// Maximum additional gain allowed to compensate for microphone level
+// restrictions from clipping events.
+const int kSurplusCompressionGain = 6;
+
+int ClampLevel(int mic_level) {
+  return rtc::SafeClamp(mic_level, kMinMicLevel, kMaxMicLevel);
+}
+
+int LevelFromGainError(int gain_error, int level) {
+  RTC_DCHECK_GE(level, 0);
+  RTC_DCHECK_LE(level, kMaxMicLevel);
+  if (gain_error == 0) {
+    return level;
+  }
+  // TODO(ajm): Could be made more efficient with a binary search.
+  int new_level = level;
+  if (gain_error > 0) {
+    while (kGainMap[new_level] - kGainMap[level] < gain_error &&
+          new_level < kMaxMicLevel) {
+      ++new_level;
+    }
+  } else {
+    while (kGainMap[new_level] - kGainMap[level] > gain_error &&
+          new_level > kMinMicLevel) {
+      --new_level;
+    }
+  }
+  return new_level;
+}
+
+}  // namespace
+
+// Facility for dumping debug audio files. All methods are no-ops in the
+// default case where WEBRTC_AGC_DEBUG_DUMP is undefined.
+class DebugFile {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+ public:
+  explicit DebugFile(const char* filename)
+      : file_(fopen(filename, "wb")) {
+    RTC_DCHECK(file_);
+  }
+  ~DebugFile() {
+    fclose(file_);
+  }
+  void Write(const int16_t* data, size_t length_samples) {
+    fwrite(data, 1, length_samples * sizeof(int16_t), file_);
+  }
+ private:
+  FILE* file_;
+#else
+ public:
+  explicit DebugFile(const char* filename) {
+  }
+  ~DebugFile() {
+  }
+  void Write(const int16_t* data, size_t length_samples) {
+  }
+#endif  // WEBRTC_AGC_DEBUG_DUMP
+};
+
+AgcManagerDirect::AgcManagerDirect(GainControl* gctrl,
+                                   VolumeCallbacks* volume_callbacks,
+                                   int startup_min_level,
+                                   int clipped_level_min)
+    : agc_(new Agc()),
+      gctrl_(gctrl),
+      volume_callbacks_(volume_callbacks),
+      frames_since_clipped_(kClippedWaitFrames),
+      level_(0),
+      max_level_(kMaxMicLevel),
+      max_compression_gain_(kMaxCompressionGain),
+      target_compression_(kDefaultCompressionGain),
+      compression_(target_compression_),
+      compression_accumulator_(compression_),
+      capture_muted_(false),
+      check_volume_on_next_process_(true),  // Check at startup.
+      startup_(true),
+      startup_min_level_(ClampLevel(startup_min_level)),
+      clipped_level_min_(clipped_level_min),
+      file_preproc_(new DebugFile("agc_preproc.pcm")),
+      file_postproc_(new DebugFile("agc_postproc.pcm")) {}
+
+AgcManagerDirect::AgcManagerDirect(Agc* agc,
+                                   GainControl* gctrl,
+                                   VolumeCallbacks* volume_callbacks,
+                                   int startup_min_level,
+                                   int clipped_level_min)
+    : agc_(agc),
+      gctrl_(gctrl),
+      volume_callbacks_(volume_callbacks),
+      frames_since_clipped_(kClippedWaitFrames),
+      level_(0),
+      max_level_(kMaxMicLevel),
+      max_compression_gain_(kMaxCompressionGain),
+      target_compression_(kDefaultCompressionGain),
+      compression_(target_compression_),
+      compression_accumulator_(compression_),
+      capture_muted_(false),
+      check_volume_on_next_process_(true),  // Check at startup.
+      startup_(true),
+      startup_min_level_(ClampLevel(startup_min_level)),
+      clipped_level_min_(clipped_level_min),
+      file_preproc_(new DebugFile("agc_preproc.pcm")),
+      file_postproc_(new DebugFile("agc_postproc.pcm")) {}
+
+AgcManagerDirect::~AgcManagerDirect() {}
+
+int AgcManagerDirect::Initialize() {
+  max_level_ = kMaxMicLevel;
+  max_compression_gain_ = kMaxCompressionGain;
+  target_compression_ = kDefaultCompressionGain;
+  compression_ = target_compression_;
+  compression_accumulator_ = compression_;
+  capture_muted_ = false;
+  check_volume_on_next_process_ = true;
+  // TODO(bjornv): Investigate if we need to reset |startup_| as well. For
+  // example, what happens when we change devices.
+
+  if (gctrl_->set_mode(GainControl::kFixedDigital) != 0) {
+    RTC_LOG(LS_ERROR) << "set_mode(GainControl::kFixedDigital) failed.";
+    return -1;
+  }
+  if (gctrl_->set_target_level_dbfs(2) != 0) {
+    RTC_LOG(LS_ERROR) << "set_target_level_dbfs(2) failed.";
+    return -1;
+  }
+  if (gctrl_->set_compression_gain_db(kDefaultCompressionGain) != 0) {
+    RTC_LOG(LS_ERROR)
+        << "set_compression_gain_db(kDefaultCompressionGain) failed.";
+    return -1;
+  }
+  if (gctrl_->enable_limiter(true) != 0) {
+    RTC_LOG(LS_ERROR) << "enable_limiter(true) failed.";
+    return -1;
+  }
+  return 0;
+}
+
+void AgcManagerDirect::AnalyzePreProcess(int16_t* audio,
+                                         int num_channels,
+                                         size_t samples_per_channel) {
+  size_t length = num_channels * samples_per_channel;
+  if (capture_muted_) {
+    return;
+  }
+
+  file_preproc_->Write(audio, length);
+
+  if (frames_since_clipped_ < kClippedWaitFrames) {
+    ++frames_since_clipped_;
+    return;
+  }
+
+  // Check for clipped samples, as the AGC has difficulty detecting pitch
+  // under clipping distortion. We do this in the preprocessing phase in order
+  // to catch clipped echo as well.
+  //
+  // If we find a sufficiently clipped frame, drop the current microphone level
+  // and enforce a new maximum level, dropped the same amount from the current
+  // maximum. This harsh treatment is an effort to avoid repeated clipped echo
+  // events. As compensation for this restriction, the maximum compression
+  // gain is increased, through SetMaxLevel().
+  float clipped_ratio = agc_->AnalyzePreproc(audio, length);
+  if (clipped_ratio > kClippedRatioThreshold) {
+    RTC_DLOG(LS_INFO) << "[agc] Clipping detected. clipped_ratio="
+                      << clipped_ratio;
+    // Always decrease the maximum level, even if the current level is below
+    // threshold.
+    SetMaxLevel(std::max(clipped_level_min_, max_level_ - kClippedLevelStep));
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.AgcClippingAdjustmentAllowed",
+                          level_ - kClippedLevelStep >= clipped_level_min_);
+    if (level_ > clipped_level_min_) {
+      // Don't try to adjust the level if we're already below the limit. As
+      // a consequence, if the user has brought the level above the limit, we
+      // will still not react until the postproc updates the level.
+      SetLevel(std::max(clipped_level_min_, level_ - kClippedLevelStep));
+      // Reset the AGC since the level has changed.
+      agc_->Reset();
+    }
+    frames_since_clipped_ = 0;
+  }
+}
+
+void AgcManagerDirect::Process(const int16_t* audio,
+                               size_t length,
+                               int sample_rate_hz) {
+  if (capture_muted_) {
+    return;
+  }
+
+  if (check_volume_on_next_process_) {
+    check_volume_on_next_process_ = false;
+    // We have to wait until the first process call to check the volume,
+    // because Chromium doesn't guarantee it to be valid any earlier.
+    CheckVolumeAndReset();
+  }
+
+  agc_->Process(audio, length, sample_rate_hz);
+
+  UpdateGain();
+  UpdateCompressor();
+
+  file_postproc_->Write(audio, length);
+}
+
+void AgcManagerDirect::SetLevel(int new_level) {
+  int voe_level = volume_callbacks_->GetMicVolume();
+  if (voe_level == 0) {
+    RTC_DLOG(LS_INFO)
+        << "[agc] VolumeCallbacks returned level=0, taking no action.";
+    return;
+  }
+  if (voe_level < 0 || voe_level > kMaxMicLevel) {
+    RTC_LOG(LS_ERROR) << "VolumeCallbacks returned an invalid level="
+                      << voe_level;
+    return;
+  }
+
+  if (voe_level > level_ + kLevelQuantizationSlack ||
+      voe_level < level_ - kLevelQuantizationSlack) {
+    RTC_DLOG(LS_INFO) << "[agc] Mic volume was manually adjusted. Updating "
+                         "stored level from " << level_ << " to " << voe_level;
+    level_ = voe_level;
+    // Always allow the user to increase the volume.
+    if (level_ > max_level_) {
+      SetMaxLevel(level_);
+    }
+    // Take no action in this case, since we can't be sure when the volume
+    // was manually adjusted. The compressor will still provide some of the
+    // desired gain change.
+    agc_->Reset();
+    return;
+  }
+
+  new_level = std::min(new_level, max_level_);
+  if (new_level == level_) {
+    return;
+  }
+
+  volume_callbacks_->SetMicVolume(new_level);
+  RTC_DLOG(LS_INFO) << "[agc] voe_level=" << voe_level << ", "
+                    << "level_=" << level_ << ", "
+                    << "new_level=" << new_level;
+  level_ = new_level;
+}
+
+void AgcManagerDirect::SetMaxLevel(int level) {
+  RTC_DCHECK_GE(level, clipped_level_min_);
+  max_level_ = level;
+  // Scale the |kSurplusCompressionGain| linearly across the restricted
+  // level range.
+  max_compression_gain_ =
+      kMaxCompressionGain + std::floor((1.f * kMaxMicLevel - max_level_) /
+                                           (kMaxMicLevel - clipped_level_min_) *
+                                           kSurplusCompressionGain +
+                                       0.5f);
+  RTC_DLOG(LS_INFO) << "[agc] max_level_=" << max_level_
+                    << ", max_compression_gain_=" << max_compression_gain_;
+}
+
+void AgcManagerDirect::SetCaptureMuted(bool muted) {
+  if (capture_muted_ == muted) {
+    return;
+  }
+  capture_muted_ = muted;
+
+  if (!muted) {
+    // When we unmute, we should reset things to be safe.
+    check_volume_on_next_process_ = true;
+  }
+}
+
+float AgcManagerDirect::voice_probability() {
+  return agc_->voice_probability();
+}
+
+int AgcManagerDirect::CheckVolumeAndReset() {
+  int level = volume_callbacks_->GetMicVolume();
+  // Reasons for taking action at startup:
+  // 1) A person starting a call is expected to be heard.
+  // 2) Independent of interpretation of |level| == 0 we should raise it so the
+  // AGC can do its job properly.
+  if (level == 0 && !startup_) {
+    RTC_DLOG(LS_INFO)
+        << "[agc] VolumeCallbacks returned level=0, taking no action.";
+    return 0;
+  }
+  if (level < 0 || level > kMaxMicLevel) {
+    RTC_LOG(LS_ERROR) << "[agc] VolumeCallbacks returned an invalid level="
+                      << level;
+    return -1;
+  }
+  RTC_DLOG(LS_INFO) << "[agc] Initial GetMicVolume()=" << level;
+
+  int minLevel = startup_ ? startup_min_level_ : kMinMicLevel;
+  if (level < minLevel) {
+    level = minLevel;
+    RTC_DLOG(LS_INFO) << "[agc] Initial volume too low, raising to " << level;
+    volume_callbacks_->SetMicVolume(level);
+  }
+  agc_->Reset();
+  level_ = level;
+  startup_ = false;
+  return 0;
+}
+
+// Requests the RMS error from AGC and distributes the required gain change
+// between the digital compression stage and volume slider. We use the
+// compressor first, providing a slack region around the current slider
+// position to reduce movement.
+//
+// If the slider needs to be moved, we check first if the user has adjusted
+// it, in which case we take no action and cache the updated level.
+void AgcManagerDirect::UpdateGain() {
+  int rms_error = 0;
+  if (!agc_->GetRmsErrorDb(&rms_error)) {
+    // No error update ready.
+    return;
+  }
+  // The compressor will always add at least kMinCompressionGain. In effect,
+  // this adjusts our target gain upward by the same amount and rms_error
+  // needs to reflect that.
+  rms_error += kMinCompressionGain;
+
+  // Handle as much error as possible with the compressor first.
+  int raw_compression =
+      rtc::SafeClamp(rms_error, kMinCompressionGain, max_compression_gain_);
+
+  // Deemphasize the compression gain error. Move halfway between the current
+  // target and the newly received target. This serves to soften perceptible
+  // intra-talkspurt adjustments, at the cost of some adaptation speed.
+  if ((raw_compression == max_compression_gain_ &&
+      target_compression_ == max_compression_gain_ - 1) ||
+      (raw_compression == kMinCompressionGain &&
+      target_compression_ == kMinCompressionGain + 1)) {
+    // Special case to allow the target to reach the endpoints of the
+    // compression range. The deemphasis would otherwise halt it at 1 dB shy.
+    target_compression_ = raw_compression;
+  } else {
+    target_compression_ = (raw_compression - target_compression_) / 2
+        + target_compression_;
+  }
+
+  // Residual error will be handled by adjusting the volume slider. Use the
+  // raw rather than deemphasized compression here as we would otherwise
+  // shrink the amount of slack the compressor provides.
+  const int residual_gain =
+      rtc::SafeClamp(rms_error - raw_compression, -kMaxResidualGainChange,
+                     kMaxResidualGainChange);
+  RTC_DLOG(LS_INFO) << "[agc] rms_error=" << rms_error
+                    << ", target_compression=" << target_compression_
+                    << ", residual_gain=" << residual_gain;
+  if (residual_gain == 0)
+    return;
+
+  int old_level = level_;
+  SetLevel(LevelFromGainError(residual_gain, level_));
+  if (old_level != level_) {
+    // level_ was updated by SetLevel; log the new value.
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.AgcSetLevel", level_, 1,
+                                kMaxMicLevel, 50);
+  }
+}
+
+void AgcManagerDirect::UpdateCompressor() {
+  if (compression_ == target_compression_) {
+    return;
+  }
+
+  // Adapt the compression gain slowly towards the target, in order to avoid
+  // highly perceptible changes.
+  if (target_compression_ > compression_) {
+    compression_accumulator_ += kCompressionGainStep;
+  } else {
+    compression_accumulator_ -= kCompressionGainStep;
+  }
+
+  // The compressor accepts integer gains in dB. Adjust the gain when
+  // we've come within half a stepsize of the nearest integer.  (We don't
+  // check for equality due to potential floating point imprecision).
+  int new_compression = compression_;
+  int nearest_neighbor = std::floor(compression_accumulator_ + 0.5);
+  if (std::fabs(compression_accumulator_ - nearest_neighbor) <
+      kCompressionGainStep / 2) {
+    new_compression = nearest_neighbor;
+  }
+
+  // Set the new compression gain.
+  if (new_compression != compression_) {
+    compression_ = new_compression;
+    compression_accumulator_ = new_compression;
+    if (gctrl_->set_compression_gain_db(compression_) != 0) {
+      RTC_LOG(LS_ERROR) << "set_compression_gain_db(" << compression_
+                        << ") failed.";
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc/agc_manager_direct.h b/modules/audio_processing/agc/agc_manager_direct.h
new file mode 100644
index 0000000..03d2607
--- /dev/null
+++ b/modules/audio_processing/agc/agc_manager_direct.h
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_AGC_MANAGER_DIRECT_H_
+#define MODULES_AUDIO_PROCESSING_AGC_AGC_MANAGER_DIRECT_H_
+
+#include <memory>
+
+#include "modules/audio_processing/agc/agc.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class AudioFrame;
+class DebugFile;
+class GainControl;
+
+// Callbacks that need to be injected into AgcManagerDirect to read and control
+// the volume values. This is done to remove the VoiceEngine dependency in
+// AgcManagerDirect.
+// TODO(aluebs): Remove VolumeCallbacks.
+class VolumeCallbacks {
+ public:
+  virtual ~VolumeCallbacks() {}
+  virtual void SetMicVolume(int volume) = 0;
+  virtual int GetMicVolume() = 0;
+};
+
+// Direct interface to use AGC to set volume and compression values.
+// AudioProcessing uses this interface directly to integrate the callback-less
+// AGC.
+//
+// This class is not thread-safe.
+class AgcManagerDirect final {
+ public:
+  // AgcManagerDirect will configure GainControl internally. The user is
+  // responsible for processing the audio using it after the call to Process.
+  // The operating range of startup_min_level is [12, 255] and any input value
+  // outside that range will be clamped.
+  AgcManagerDirect(GainControl* gctrl,
+                   VolumeCallbacks* volume_callbacks,
+                   int startup_min_level,
+                   int clipped_level_min);
+  // Dependency injection for testing. Don't delete |agc| as the memory is owned
+  // by the manager.
+  AgcManagerDirect(Agc* agc,
+                   GainControl* gctrl,
+                   VolumeCallbacks* volume_callbacks,
+                   int startup_min_level,
+                   int clipped_level_min);
+  ~AgcManagerDirect();
+
+  int Initialize();
+  void AnalyzePreProcess(int16_t* audio,
+                         int num_channels,
+                         size_t samples_per_channel);
+  void Process(const int16_t* audio, size_t length, int sample_rate_hz);
+
+  // Call when the capture stream has been muted/unmuted. This causes the
+  // manager to disregard all incoming audio; chances are good it's background
+  // noise to which we'd like to avoid adapting.
+  void SetCaptureMuted(bool muted);
+  bool capture_muted() { return capture_muted_; }
+
+  float voice_probability();
+
+ private:
+  // Sets a new microphone level, after first checking that it hasn't been
+  // updated by the user, in which case no action is taken.
+  void SetLevel(int new_level);
+
+  // Set the maximum level the AGC is allowed to apply. Also updates the
+  // maximum compression gain to compensate. The level must be at least
+  // |kClippedLevelMin|.
+  void SetMaxLevel(int level);
+
+  int CheckVolumeAndReset();
+  void UpdateGain();
+  void UpdateCompressor();
+
+  std::unique_ptr<Agc> agc_;
+  GainControl* gctrl_;
+  VolumeCallbacks* volume_callbacks_;
+
+  int frames_since_clipped_;
+  int level_;
+  int max_level_;
+  int max_compression_gain_;
+  int target_compression_;
+  int compression_;
+  float compression_accumulator_;
+  bool capture_muted_;
+  bool check_volume_on_next_process_;
+  bool startup_;
+  int startup_min_level_;
+  const int clipped_level_min_;
+
+  std::unique_ptr<DebugFile> file_preproc_;
+  std::unique_ptr<DebugFile> file_postproc_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AgcManagerDirect);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_AGC_MANAGER_DIRECT_H_
diff --git a/modules/audio_processing/agc/agc_manager_direct_unittest.cc b/modules/audio_processing/agc/agc_manager_direct_unittest.cc
new file mode 100644
index 0000000..1a03402
--- /dev/null
+++ b/modules/audio_processing/agc/agc_manager_direct_unittest.cc
@@ -0,0 +1,682 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_processing/agc/mock_agc.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+
+namespace webrtc {
+namespace {
+
+const int kSampleRateHz = 32000;
+const int kNumChannels = 1;
+const int kSamplesPerChannel = kSampleRateHz / 100;
+const int kInitialVolume = 128;
+constexpr int kClippedMin = 165;  // Arbitrary, but different from the default.
+const float kAboveClippedThreshold = 0.2f;
+
+class TestVolumeCallbacks : public VolumeCallbacks {
+ public:
+  TestVolumeCallbacks() : volume_(0) {}
+  void SetMicVolume(int volume) override { volume_ = volume; }
+  int GetMicVolume() override { return volume_; }
+
+ private:
+  int volume_;
+};
+
+}  // namespace
+
+class AgcManagerDirectTest : public ::testing::Test {
+ protected:
+  AgcManagerDirectTest()
+      : agc_(new MockAgc),
+        manager_(agc_, &gctrl_, &volume_, kInitialVolume, kClippedMin) {
+    ExpectInitialize();
+    manager_.Initialize();
+  }
+
+  void FirstProcess() {
+    EXPECT_CALL(*agc_, Reset());
+    EXPECT_CALL(*agc_, GetRmsErrorDb(_)).WillOnce(Return(false));
+    CallProcess(1);
+  }
+
+  void SetVolumeAndProcess(int volume) {
+    volume_.SetMicVolume(volume);
+    FirstProcess();
+  }
+
+  void ExpectCheckVolumeAndReset(int volume) {
+    volume_.SetMicVolume(volume);
+    EXPECT_CALL(*agc_, Reset());
+  }
+
+  void ExpectInitialize() {
+    EXPECT_CALL(gctrl_, set_mode(GainControl::kFixedDigital));
+    EXPECT_CALL(gctrl_, set_target_level_dbfs(2));
+    EXPECT_CALL(gctrl_, set_compression_gain_db(7));
+    EXPECT_CALL(gctrl_, enable_limiter(true));
+  }
+
+  void CallProcess(int num_calls) {
+    for (int i = 0; i < num_calls; ++i) {
+      EXPECT_CALL(*agc_, Process(_, _, _)).WillOnce(Return());
+      manager_.Process(nullptr, kSamplesPerChannel, kSampleRateHz);
+    }
+  }
+
+  void CallPreProc(int num_calls) {
+    for (int i = 0; i < num_calls; ++i) {
+      manager_.AnalyzePreProcess(nullptr, kNumChannels, kSamplesPerChannel);
+    }
+  }
+
+  MockAgc* agc_;
+  test::MockGainControl gctrl_;
+  TestVolumeCallbacks volume_;
+  AgcManagerDirect manager_;
+};
+
+TEST_F(AgcManagerDirectTest, StartupMinVolumeConfigurationIsRespected) {
+  FirstProcess();
+  EXPECT_EQ(kInitialVolume, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, MicVolumeResponseToRmsError) {
+  FirstProcess();
+
+  // Compressor default; no residual error.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)));
+  CallProcess(1);
+
+  // Inside the compressor's window; no change of volume.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)));
+  CallProcess(1);
+
+  // Above the compressor's window; volume should be increased.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(130, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(168, volume_.GetMicVolume());
+
+  // Inside the compressor's window; no change of volume.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)));
+  CallProcess(1);
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)));
+  CallProcess(1);
+
+  // Below the compressor's window; volume should be decreased.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(167, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(163, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-9), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(129, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, MicVolumeIsLimited) {
+  FirstProcess();
+
+  // Maximum upwards change is limited.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(183, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(243, volume_.GetMicVolume());
+
+  // Won't go higher than the maximum.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(255, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(254, volume_.GetMicVolume());
+
+  // Maximum downwards change is limited.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(194, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(137, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(88, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(54, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(33, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(18, volume_.GetMicVolume());
+
+  // Won't go lower than the minimum.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(12, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, CompressorStepsTowardsTarget) {
+  FirstProcess();
+
+  // Compressor default; no call to set_compression_gain_db.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)))
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(20);
+
+  // Moves slowly upwards.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(9), Return(true)))
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+  CallProcess(1);
+
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+  CallProcess(1);
+
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(20);
+
+  // Moves slowly downward, then reverses before reaching the original target.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)))
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+  CallProcess(1);
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(9), Return(true)))
+      .WillRepeatedly(Return(false));
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+  CallProcess(1);
+
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(20);
+}
+
+TEST_F(AgcManagerDirectTest, CompressorErrorIsDeemphasized) {
+  FirstProcess();
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+      .WillRepeatedly(Return(false));
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+  CallProcess(1);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(20);
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+      .WillRepeatedly(Return(false));
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(7)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(6)).WillOnce(Return(0));
+  CallProcess(1);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+  CallProcess(20);
+}
+
+TEST_F(AgcManagerDirectTest, CompressorReachesMaximum) {
+  FirstProcess();
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+      .WillRepeatedly(Return(false));
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(10)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(11)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(12)).WillOnce(Return(0));
+  CallProcess(1);
+}
+
+TEST_F(AgcManagerDirectTest, CompressorReachesMinimum) {
+  FirstProcess();
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+      .WillRepeatedly(Return(false));
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(6)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(5)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(4)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(3)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(2)).WillOnce(Return(0));
+  CallProcess(1);
+}
+
+TEST_F(AgcManagerDirectTest, NoActionWhileMuted) {
+  manager_.SetCaptureMuted(true);
+  manager_.Process(nullptr, kSamplesPerChannel, kSampleRateHz);
+}
+
+TEST_F(AgcManagerDirectTest, UnmutingChecksVolumeWithoutRaising) {
+  FirstProcess();
+
+  manager_.SetCaptureMuted(true);
+  manager_.SetCaptureMuted(false);
+  ExpectCheckVolumeAndReset(127);
+  // SetMicVolume should not be called.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_)).WillOnce(Return(false));
+  CallProcess(1);
+  EXPECT_EQ(127, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, UnmutingRaisesTooLowVolume) {
+  FirstProcess();
+
+  manager_.SetCaptureMuted(true);
+  manager_.SetCaptureMuted(false);
+  ExpectCheckVolumeAndReset(11);
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_)).WillOnce(Return(false));
+  CallProcess(1);
+  EXPECT_EQ(12, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ManualLevelChangeResultsInNoSetMicCall) {
+  FirstProcess();
+
+  // Change outside of compressor's range, which would normally trigger a call
+  // to SetMicVolume.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+  // GetMicVolume returns a value outside of the quantization slack, indicating
+  // a manual volume change.
+  volume_.SetMicVolume(154);
+  // SetMicVolume should not be called.
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallProcess(1);
+  EXPECT_EQ(154, volume_.GetMicVolume());
+
+  // Do the same thing, except downwards now.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  volume_.SetMicVolume(100);
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallProcess(1);
+  EXPECT_EQ(100, volume_.GetMicVolume());
+
+  // And finally verify the AGC continues working without a manual change.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(99, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, RecoveryAfterManualLevelChangeFromMax) {
+  FirstProcess();
+
+  // Force the mic up to max volume. Takes a few steps due to the residual
+  // gain limitation.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(183, volume_.GetMicVolume());
+  CallProcess(1);
+  EXPECT_EQ(243, volume_.GetMicVolume());
+  CallProcess(1);
+  EXPECT_EQ(255, volume_.GetMicVolume());
+
+  // Manual change does not result in SetMicVolume call.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  volume_.SetMicVolume(50);
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallProcess(1);
+  EXPECT_EQ(50, volume_.GetMicVolume());
+
+  // Continues working as usual afterwards.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(69, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, RecoveryAfterManualLevelChangeBelowMin) {
+  FirstProcess();
+
+  // Manual change below min.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+  // Don't set to zero, which will cause AGC to take no action.
+  volume_.SetMicVolume(1);
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallProcess(1);
+  EXPECT_EQ(1, volume_.GetMicVolume());
+
+  // Continues working as usual afterwards.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(2, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(11, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(18, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, NoClippingHasNoImpact) {
+  FirstProcess();
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _)).WillRepeatedly(Return(0));
+  CallPreProc(100);
+  EXPECT_EQ(128, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingUnderThresholdHasNoImpact) {
+  FirstProcess();
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _)).WillOnce(Return(0.099));
+  CallPreProc(1);
+  EXPECT_EQ(128, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingLowersVolume) {
+  SetVolumeAndProcess(255);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _)).WillOnce(Return(0.101));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, WaitingPeriodBetweenClippingChecks) {
+  SetVolumeAndProcess(255);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillRepeatedly(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(0);
+  CallPreProc(300);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(225, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingLoweringIsLimited) {
+  SetVolumeAndProcess(180);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(kClippedMin, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillRepeatedly(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(0);
+  CallPreProc(1000);
+  EXPECT_EQ(kClippedMin, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingMaxIsRespectedWhenEqualToLevel) {
+  SetVolumeAndProcess(255);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(10);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingMaxIsRespectedWhenHigherThanLevel) {
+  SetVolumeAndProcess(200);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(185, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+  CallProcess(10);
+  EXPECT_EQ(240, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, MaxCompressionIsIncreasedAfterClipping) {
+  SetVolumeAndProcess(210);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(195, volume_.GetMicVolume());
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+      .WillRepeatedly(Return(false));
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(10)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(11)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(12)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(13)).WillOnce(Return(0));
+  CallProcess(1);
+
+  // Continue clipping until we hit the maximum surplus compression.
+  CallPreProc(300);
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(180, volume_.GetMicVolume());
+
+  CallPreProc(300);
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(kClippedMin, volume_.GetMicVolume());
+
+  // Current level is now at the minimum, but the maximum allowed level still
+  // has more to decrease.
+  CallPreProc(300);
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  CallPreProc(1);
+
+  CallPreProc(300);
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  CallPreProc(1);
+
+  CallPreProc(300);
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  CallPreProc(1);
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+      .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+      .WillRepeatedly(Return(false));
+  CallProcess(19);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(14)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(15)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(16)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(17)).WillOnce(Return(0));
+  CallProcess(20);
+  EXPECT_CALL(gctrl_, set_compression_gain_db(18)).WillOnce(Return(0));
+  CallProcess(1);
+}
+
+TEST_F(AgcManagerDirectTest, UserCanRaiseVolumeAfterClipping) {
+  SetVolumeAndProcess(225);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallPreProc(1);
+  EXPECT_EQ(210, volume_.GetMicVolume());
+
+  // High enough error to trigger a volume check.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(14), Return(true)));
+  // User changed the volume.
+  volume_.SetMicVolume(250);
+  EXPECT_CALL(*agc_, Reset()).Times(1);
+  CallProcess(1);
+  EXPECT_EQ(250, volume_.GetMicVolume());
+
+  // Move down...
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(-10), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(210, volume_.GetMicVolume());
+  // And back up to the new max established by the user.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(40), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(250, volume_.GetMicVolume());
+  // Will not move above new maximum.
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+  CallProcess(1);
+  EXPECT_EQ(250, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingDoesNotPullLowVolumeBackUp) {
+  SetVolumeAndProcess(80);
+
+  EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+      .WillOnce(Return(kAboveClippedThreshold));
+  EXPECT_CALL(*agc_, Reset()).Times(0);
+  int initial_volume = volume_.GetMicVolume();
+  CallPreProc(1);
+  EXPECT_EQ(initial_volume, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, TakesNoActionOnZeroMicVolume) {
+  FirstProcess();
+
+  EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+      .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+  volume_.SetMicVolume(0);
+  CallProcess(10);
+  EXPECT_EQ(0, volume_.GetMicVolume());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc/gain_map_internal.h b/modules/audio_processing/agc/gain_map_internal.h
new file mode 100644
index 0000000..f09c748
--- /dev/null
+++ b/modules/audio_processing/agc/gain_map_internal.h
@@ -0,0 +1,275 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_GAIN_MAP_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_AGC_GAIN_MAP_INTERNAL_H_
+
+static const int kGainMapSize = 256;
+// Uses parameters: si = 2, sf = 0.25, D = 8/256
+static const int kGainMap[kGainMapSize] = {
+  -56,
+  -54,
+  -52,
+  -50,
+  -48,
+  -47,
+  -45,
+  -43,
+  -42,
+  -40,
+  -38,
+  -37,
+  -35,
+  -34,
+  -33,
+  -31,
+  -30,
+  -29,
+  -27,
+  -26,
+  -25,
+  -24,
+  -23,
+  -22,
+  -20,
+  -19,
+  -18,
+  -17,
+  -16,
+  -15,
+  -14,
+  -14,
+  -13,
+  -12,
+  -11,
+  -10,
+  -9,
+  -8,
+  -8,
+  -7,
+  -6,
+  -5,
+  -5,
+  -4,
+  -3,
+  -2,
+  -2,
+  -1,
+  0,
+  0,
+  1,
+  1,
+  2,
+  3,
+  3,
+  4,
+  4,
+  5,
+  5,
+  6,
+  6,
+  7,
+  7,
+  8,
+  8,
+  9,
+  9,
+  10,
+  10,
+  11,
+  11,
+  12,
+  12,
+  13,
+  13,
+  13,
+  14,
+  14,
+  15,
+  15,
+  15,
+  16,
+  16,
+  17,
+  17,
+  17,
+  18,
+  18,
+  18,
+  19,
+  19,
+  19,
+  20,
+  20,
+  21,
+  21,
+  21,
+  22,
+  22,
+  22,
+  23,
+  23,
+  23,
+  24,
+  24,
+  24,
+  24,
+  25,
+  25,
+  25,
+  26,
+  26,
+  26,
+  27,
+  27,
+  27,
+  28,
+  28,
+  28,
+  28,
+  29,
+  29,
+  29,
+  30,
+  30,
+  30,
+  30,
+  31,
+  31,
+  31,
+  32,
+  32,
+  32,
+  32,
+  33,
+  33,
+  33,
+  33,
+  34,
+  34,
+  34,
+  35,
+  35,
+  35,
+  35,
+  36,
+  36,
+  36,
+  36,
+  37,
+  37,
+  37,
+  38,
+  38,
+  38,
+  38,
+  39,
+  39,
+  39,
+  39,
+  40,
+  40,
+  40,
+  40,
+  41,
+  41,
+  41,
+  41,
+  42,
+  42,
+  42,
+  42,
+  43,
+  43,
+  43,
+  44,
+  44,
+  44,
+  44,
+  45,
+  45,
+  45,
+  45,
+  46,
+  46,
+  46,
+  46,
+  47,
+  47,
+  47,
+  47,
+  48,
+  48,
+  48,
+  48,
+  49,
+  49,
+  49,
+  49,
+  50,
+  50,
+  50,
+  50,
+  51,
+  51,
+  51,
+  51,
+  52,
+  52,
+  52,
+  52,
+  53,
+  53,
+  53,
+  53,
+  54,
+  54,
+  54,
+  54,
+  55,
+  55,
+  55,
+  55,
+  56,
+  56,
+  56,
+  56,
+  57,
+  57,
+  57,
+  57,
+  58,
+  58,
+  58,
+  58,
+  59,
+  59,
+  59,
+  59,
+  60,
+  60,
+  60,
+  60,
+  61,
+  61,
+  61,
+  61,
+  62,
+  62,
+  62,
+  62,
+  63,
+  63,
+  63,
+  63,
+  64
+};
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_GAIN_MAP_INTERNAL_H_
diff --git a/modules/audio_processing/agc/legacy/analog_agc.c b/modules/audio_processing/agc/legacy/analog_agc.c
new file mode 100644
index 0000000..662e88b
--- /dev/null
+++ b/modules/audio_processing/agc/legacy/analog_agc.c
@@ -0,0 +1,1390 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* analog_agc.c
+ *
+ * Using a feedback system, determines an appropriate analog volume level
+ * given an input signal and current volume level. Targets a conservative
+ * signal level and is intended for use with a digital AGC to apply
+ * additional gain.
+ *
+ */
+
+#include "modules/audio_processing/agc/legacy/analog_agc.h"
+
+#include <stdlib.h>
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+#include <stdio.h>
+#endif
+
+#include "rtc_base/checks.h"
+
+/* The slope of in Q13*/
+static const int16_t kSlope1[8] = {21793, 12517, 7189, 4129,
+                                   2372,  1362,  472,  78};
+
+/* The offset in Q14 */
+static const int16_t kOffset1[8] = {25395, 23911, 22206, 20737,
+                                    19612, 18805, 17951, 17367};
+
+/* The slope of in Q13*/
+static const int16_t kSlope2[8] = {2063, 1731, 1452, 1218, 1021, 857, 597, 337};
+
+/* The offset in Q14 */
+static const int16_t kOffset2[8] = {18432, 18379, 18290, 18177,
+                                    18052, 17920, 17670, 17286};
+
+static const int16_t kMuteGuardTimeMs = 8000;
+static const int16_t kInitCheck = 42;
+static const size_t kNumSubframes = 10;
+
+/* Default settings if config is not used */
+#define AGC_DEFAULT_TARGET_LEVEL 3
+#define AGC_DEFAULT_COMP_GAIN 9
+/* This is the target level for the analog part in ENV scale. To convert to RMS
+ * scale you
+ * have to add OFFSET_ENV_TO_RMS.
+ */
+#define ANALOG_TARGET_LEVEL 11
+#define ANALOG_TARGET_LEVEL_2 5  // ANALOG_TARGET_LEVEL / 2
+/* Offset between RMS scale (analog part) and ENV scale (digital part). This
+ * value actually
+ * varies with the FIXED_ANALOG_TARGET_LEVEL, hence we should in the future
+ * replace it with
+ * a table.
+ */
+#define OFFSET_ENV_TO_RMS 9
+/* The reference input level at which the digital part gives an output of
+ * targetLevelDbfs
+ * (desired level) if we have no compression gain. This level should be set high
+ * enough not
+ * to compress the peaks due to the dynamics.
+ */
+#define DIGITAL_REF_AT_0_COMP_GAIN 4
+/* Speed of reference level decrease.
+ */
+#define DIFF_REF_TO_ANALOG 5
+
+#ifdef MIC_LEVEL_FEEDBACK
+#define NUM_BLOCKS_IN_SAT_BEFORE_CHANGE_TARGET 7
+#endif
+/* Size of analog gain table */
+#define GAIN_TBL_LEN 32
+/* Matlab code:
+ * fprintf(1, '\t%i, %i, %i, %i,\n', round(10.^(linspace(0,10,32)/20) * 2^12));
+ */
+/* Q12 */
+static const uint16_t kGainTableAnalog[GAIN_TBL_LEN] = {
+    4096, 4251, 4412, 4579,  4752,  4932,  5118,  5312,  5513,  5722, 5938,
+    6163, 6396, 6638, 6889,  7150,  7420,  7701,  7992,  8295,  8609, 8934,
+    9273, 9623, 9987, 10365, 10758, 11165, 11587, 12025, 12480, 12953};
+
+/* Gain/Suppression tables for virtual Mic (in Q10) */
+static const uint16_t kGainTableVirtualMic[128] = {
+    1052,  1081,  1110,  1141,  1172,  1204,  1237,  1271,  1305,  1341,  1378,
+    1416,  1454,  1494,  1535,  1577,  1620,  1664,  1710,  1757,  1805,  1854,
+    1905,  1957,  2010,  2065,  2122,  2180,  2239,  2301,  2364,  2428,  2495,
+    2563,  2633,  2705,  2779,  2855,  2933,  3013,  3096,  3180,  3267,  3357,
+    3449,  3543,  3640,  3739,  3842,  3947,  4055,  4166,  4280,  4397,  4517,
+    4640,  4767,  4898,  5032,  5169,  5311,  5456,  5605,  5758,  5916,  6078,
+    6244,  6415,  6590,  6770,  6956,  7146,  7341,  7542,  7748,  7960,  8178,
+    8402,  8631,  8867,  9110,  9359,  9615,  9878,  10148, 10426, 10711, 11004,
+    11305, 11614, 11932, 12258, 12593, 12938, 13292, 13655, 14029, 14412, 14807,
+    15212, 15628, 16055, 16494, 16945, 17409, 17885, 18374, 18877, 19393, 19923,
+    20468, 21028, 21603, 22194, 22801, 23425, 24065, 24724, 25400, 26095, 26808,
+    27541, 28295, 29069, 29864, 30681, 31520, 32382};
+static const uint16_t kSuppressionTableVirtualMic[128] = {
+    1024, 1006, 988, 970, 952, 935, 918, 902, 886, 870, 854, 839, 824, 809, 794,
+    780,  766,  752, 739, 726, 713, 700, 687, 675, 663, 651, 639, 628, 616, 605,
+    594,  584,  573, 563, 553, 543, 533, 524, 514, 505, 496, 487, 478, 470, 461,
+    453,  445,  437, 429, 421, 414, 406, 399, 392, 385, 378, 371, 364, 358, 351,
+    345,  339,  333, 327, 321, 315, 309, 304, 298, 293, 288, 283, 278, 273, 268,
+    263,  258,  254, 249, 244, 240, 236, 232, 227, 223, 219, 215, 211, 208, 204,
+    200,  197,  193, 190, 186, 183, 180, 176, 173, 170, 167, 164, 161, 158, 155,
+    153,  150,  147, 145, 142, 139, 137, 134, 132, 130, 127, 125, 123, 121, 118,
+    116,  114,  112, 110, 108, 106, 104, 102};
+
+/* Table for target energy levels. Values in Q(-7)
+ * Matlab code
+ * targetLevelTable = fprintf('%d,\t%d,\t%d,\t%d,\n',
+ * round((32767*10.^(-(0:63)'/20)).^2*16/2^7) */
+
+static const int32_t kTargetLevelTable[64] = {
+    134209536, 106606424, 84680493, 67264106, 53429779, 42440782, 33711911,
+    26778323,  21270778,  16895980, 13420954, 10660642, 8468049,  6726411,
+    5342978,   4244078,   3371191,  2677832,  2127078,  1689598,  1342095,
+    1066064,   846805,    672641,   534298,   424408,   337119,   267783,
+    212708,    168960,    134210,   106606,   84680,    67264,    53430,
+    42441,     33712,     26778,    21271,    16896,    13421,    10661,
+    8468,      6726,      5343,     4244,     3371,     2678,     2127,
+    1690,      1342,      1066,     847,      673,      534,      424,
+    337,       268,       213,      169,      134,      107,      85,
+    67};
+
+int WebRtcAgc_AddMic(void* state,
+                     int16_t* const* in_mic,
+                     size_t num_bands,
+                     size_t samples) {
+  int32_t nrg, max_nrg, sample, tmp32;
+  int32_t* ptr;
+  uint16_t targetGainIdx, gain;
+  size_t i;
+  int16_t n, L, tmp16, tmp_speech[16];
+  LegacyAgc* stt;
+  stt = (LegacyAgc*)state;
+
+  if (stt->fs == 8000) {
+    L = 8;
+    if (samples != 80) {
+      return -1;
+    }
+  } else {
+    L = 16;
+    if (samples != 160) {
+      return -1;
+    }
+  }
+
+  /* apply slowly varying digital gain */
+  if (stt->micVol > stt->maxAnalog) {
+    /* |maxLevel| is strictly >= |micVol|, so this condition should be
+     * satisfied here, ensuring there is no divide-by-zero. */
+    RTC_DCHECK_GT(stt->maxLevel, stt->maxAnalog);
+
+    /* Q1 */
+    tmp16 = (int16_t)(stt->micVol - stt->maxAnalog);
+    tmp32 = (GAIN_TBL_LEN - 1) * tmp16;
+    tmp16 = (int16_t)(stt->maxLevel - stt->maxAnalog);
+    targetGainIdx = tmp32 / tmp16;
+    RTC_DCHECK_LT(targetGainIdx, GAIN_TBL_LEN);
+
+    /* Increment through the table towards the target gain.
+     * If micVol drops below maxAnalog, we allow the gain
+     * to be dropped immediately. */
+    if (stt->gainTableIdx < targetGainIdx) {
+      stt->gainTableIdx++;
+    } else if (stt->gainTableIdx > targetGainIdx) {
+      stt->gainTableIdx--;
+    }
+
+    /* Q12 */
+    gain = kGainTableAnalog[stt->gainTableIdx];
+
+    for (i = 0; i < samples; i++) {
+      size_t j;
+      for (j = 0; j < num_bands; ++j) {
+        sample = (in_mic[j][i] * gain) >> 12;
+        if (sample > 32767) {
+          in_mic[j][i] = 32767;
+        } else if (sample < -32768) {
+          in_mic[j][i] = -32768;
+        } else {
+          in_mic[j][i] = (int16_t)sample;
+        }
+      }
+    }
+  } else {
+    stt->gainTableIdx = 0;
+  }
+
+  /* compute envelope */
+  if (stt->inQueue > 0) {
+    ptr = stt->env[1];
+  } else {
+    ptr = stt->env[0];
+  }
+
+  for (i = 0; i < kNumSubframes; i++) {
+    /* iterate over samples */
+    max_nrg = 0;
+    for (n = 0; n < L; n++) {
+      nrg = in_mic[0][i * L + n] * in_mic[0][i * L + n];
+      if (nrg > max_nrg) {
+        max_nrg = nrg;
+      }
+    }
+    ptr[i] = max_nrg;
+  }
+
+  /* compute energy */
+  if (stt->inQueue > 0) {
+    ptr = stt->Rxx16w32_array[1];
+  } else {
+    ptr = stt->Rxx16w32_array[0];
+  }
+
+  for (i = 0; i < kNumSubframes / 2; i++) {
+    if (stt->fs == 16000) {
+      WebRtcSpl_DownsampleBy2(&in_mic[0][i * 32], 32, tmp_speech,
+                              stt->filterState);
+    } else {
+      memcpy(tmp_speech, &in_mic[0][i * 16], 16 * sizeof(short));
+    }
+    /* Compute energy in blocks of 16 samples */
+    ptr[i] = WebRtcSpl_DotProductWithScale(tmp_speech, tmp_speech, 16, 4);
+  }
+
+  /* update queue information */
+  if (stt->inQueue == 0) {
+    stt->inQueue = 1;
+  } else {
+    stt->inQueue = 2;
+  }
+
+  /* call VAD (use low band only) */
+  WebRtcAgc_ProcessVad(&stt->vadMic, in_mic[0], samples);
+
+  return 0;
+}
+
+int WebRtcAgc_AddFarend(void* state, const int16_t* in_far, size_t samples) {
+  LegacyAgc* stt = (LegacyAgc*)state;
+
+  int err = WebRtcAgc_GetAddFarendError(state, samples);
+
+  if (err != 0)
+    return err;
+
+  return WebRtcAgc_AddFarendToDigital(&stt->digitalAgc, in_far, samples);
+}
+
+int WebRtcAgc_GetAddFarendError(void* state, size_t samples) {
+  LegacyAgc* stt;
+  stt = (LegacyAgc*)state;
+
+  if (stt == NULL)
+    return -1;
+
+  if (stt->fs == 8000) {
+    if (samples != 80)
+      return -1;
+  } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) {
+    if (samples != 160)
+      return -1;
+  } else {
+    return -1;
+  }
+
+  return 0;
+}
+
+int WebRtcAgc_VirtualMic(void* agcInst,
+                         int16_t* const* in_near,
+                         size_t num_bands,
+                         size_t samples,
+                         int32_t micLevelIn,
+                         int32_t* micLevelOut) {
+  int32_t tmpFlt, micLevelTmp, gainIdx;
+  uint16_t gain;
+  size_t ii, j;
+  LegacyAgc* stt;
+
+  uint32_t nrg;
+  size_t sampleCntr;
+  uint32_t frameNrg = 0;
+  uint32_t frameNrgLimit = 5500;
+  int16_t numZeroCrossing = 0;
+  const int16_t kZeroCrossingLowLim = 15;
+  const int16_t kZeroCrossingHighLim = 20;
+
+  stt = (LegacyAgc*)agcInst;
+
+  /*
+   *  Before applying gain decide if this is a low-level signal.
+   *  The idea is that digital AGC will not adapt to low-level
+   *  signals.
+   */
+  if (stt->fs != 8000) {
+    frameNrgLimit = frameNrgLimit << 1;
+  }
+
+  frameNrg = (uint32_t)(in_near[0][0] * in_near[0][0]);
+  for (sampleCntr = 1; sampleCntr < samples; sampleCntr++) {
+    // increment frame energy if it is less than the limit
+    // the correct value of the energy is not important
+    if (frameNrg < frameNrgLimit) {
+      nrg = (uint32_t)(in_near[0][sampleCntr] * in_near[0][sampleCntr]);
+      frameNrg += nrg;
+    }
+
+    // Count the zero crossings
+    numZeroCrossing +=
+        ((in_near[0][sampleCntr] ^ in_near[0][sampleCntr - 1]) < 0);
+  }
+
+  if ((frameNrg < 500) || (numZeroCrossing <= 5)) {
+    stt->lowLevelSignal = 1;
+  } else if (numZeroCrossing <= kZeroCrossingLowLim) {
+    stt->lowLevelSignal = 0;
+  } else if (frameNrg <= frameNrgLimit) {
+    stt->lowLevelSignal = 1;
+  } else if (numZeroCrossing >= kZeroCrossingHighLim) {
+    stt->lowLevelSignal = 1;
+  } else {
+    stt->lowLevelSignal = 0;
+  }
+
+  micLevelTmp = micLevelIn << stt->scale;
+  /* Set desired level */
+  gainIdx = stt->micVol;
+  if (stt->micVol > stt->maxAnalog) {
+    gainIdx = stt->maxAnalog;
+  }
+  if (micLevelTmp != stt->micRef) {
+    /* Something has happened with the physical level, restart. */
+    stt->micRef = micLevelTmp;
+    stt->micVol = 127;
+    *micLevelOut = 127;
+    stt->micGainIdx = 127;
+    gainIdx = 127;
+  }
+  /* Pre-process the signal to emulate the microphone level. */
+  /* Take one step at a time in the gain table. */
+  if (gainIdx > 127) {
+    gain = kGainTableVirtualMic[gainIdx - 128];
+  } else {
+    gain = kSuppressionTableVirtualMic[127 - gainIdx];
+  }
+  for (ii = 0; ii < samples; ii++) {
+    tmpFlt = (in_near[0][ii] * gain) >> 10;
+    if (tmpFlt > 32767) {
+      tmpFlt = 32767;
+      gainIdx--;
+      if (gainIdx >= 127) {
+        gain = kGainTableVirtualMic[gainIdx - 127];
+      } else {
+        gain = kSuppressionTableVirtualMic[127 - gainIdx];
+      }
+    }
+    if (tmpFlt < -32768) {
+      tmpFlt = -32768;
+      gainIdx--;
+      if (gainIdx >= 127) {
+        gain = kGainTableVirtualMic[gainIdx - 127];
+      } else {
+        gain = kSuppressionTableVirtualMic[127 - gainIdx];
+      }
+    }
+    in_near[0][ii] = (int16_t)tmpFlt;
+    for (j = 1; j < num_bands; ++j) {
+      tmpFlt = (in_near[j][ii] * gain) >> 10;
+      if (tmpFlt > 32767) {
+        tmpFlt = 32767;
+      }
+      if (tmpFlt < -32768) {
+        tmpFlt = -32768;
+      }
+      in_near[j][ii] = (int16_t)tmpFlt;
+    }
+  }
+  /* Set the level we (finally) used */
+  stt->micGainIdx = gainIdx;
+  //    *micLevelOut = stt->micGainIdx;
+  *micLevelOut = stt->micGainIdx >> stt->scale;
+  /* Add to Mic as if it was the output from a true microphone */
+  if (WebRtcAgc_AddMic(agcInst, in_near, num_bands, samples) != 0) {
+    return -1;
+  }
+  return 0;
+}
+
+void WebRtcAgc_UpdateAgcThresholds(LegacyAgc* stt) {
+  int16_t tmp16;
+#ifdef MIC_LEVEL_FEEDBACK
+  int zeros;
+
+  if (stt->micLvlSat) {
+    /* Lower the analog target level since we have reached its maximum */
+    zeros = WebRtcSpl_NormW32(stt->Rxx160_LPw32);
+    stt->targetIdxOffset = (3 * zeros - stt->targetIdx - 2) / 4;
+  }
+#endif
+
+  /* Set analog target level in envelope dBOv scale */
+  tmp16 = (DIFF_REF_TO_ANALOG * stt->compressionGaindB) + ANALOG_TARGET_LEVEL_2;
+  tmp16 = WebRtcSpl_DivW32W16ResW16((int32_t)tmp16, ANALOG_TARGET_LEVEL);
+  stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN + tmp16;
+  if (stt->analogTarget < DIGITAL_REF_AT_0_COMP_GAIN) {
+    stt->analogTarget = DIGITAL_REF_AT_0_COMP_GAIN;
+  }
+  if (stt->agcMode == kAgcModeFixedDigital) {
+    /* Adjust for different parameter interpretation in FixedDigital mode */
+    stt->analogTarget = stt->compressionGaindB;
+  }
+#ifdef MIC_LEVEL_FEEDBACK
+  stt->analogTarget += stt->targetIdxOffset;
+#endif
+  /* Since the offset between RMS and ENV is not constant, we should make this
+   * into a
+   * table, but for now, we'll stick with a constant, tuned for the chosen
+   * analog
+   * target level.
+   */
+  stt->targetIdx = ANALOG_TARGET_LEVEL + OFFSET_ENV_TO_RMS;
+#ifdef MIC_LEVEL_FEEDBACK
+  stt->targetIdx += stt->targetIdxOffset;
+#endif
+  /* Analog adaptation limits */
+  /* analogTargetLevel = round((32767*10^(-targetIdx/20))^2*16/2^7) */
+  stt->analogTargetLevel =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx]; /* ex. -20 dBov */
+  stt->startUpperLimit =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx - 1]; /* -19 dBov */
+  stt->startLowerLimit =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx + 1]; /* -21 dBov */
+  stt->upperPrimaryLimit =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx - 2]; /* -18 dBov */
+  stt->lowerPrimaryLimit =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx + 2]; /* -22 dBov */
+  stt->upperSecondaryLimit =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx - 5]; /* -15 dBov */
+  stt->lowerSecondaryLimit =
+      RXX_BUFFER_LEN * kTargetLevelTable[stt->targetIdx + 5]; /* -25 dBov */
+  stt->upperLimit = stt->startUpperLimit;
+  stt->lowerLimit = stt->startLowerLimit;
+}
+
+void WebRtcAgc_SaturationCtrl(LegacyAgc* stt,
+                              uint8_t* saturated,
+                              int32_t* env) {
+  int16_t i, tmpW16;
+
+  /* Check if the signal is saturated */
+  for (i = 0; i < 10; i++) {
+    tmpW16 = (int16_t)(env[i] >> 20);
+    if (tmpW16 > 875) {
+      stt->envSum += tmpW16;
+    }
+  }
+
+  if (stt->envSum > 25000) {
+    *saturated = 1;
+    stt->envSum = 0;
+  }
+
+  /* stt->envSum *= 0.99; */
+  stt->envSum = (int16_t)((stt->envSum * 32440) >> 15);
+}
+
+void WebRtcAgc_ZeroCtrl(LegacyAgc* stt, int32_t* inMicLevel, int32_t* env) {
+  int16_t i;
+  int64_t tmp = 0;
+  int32_t midVal;
+
+  /* Is the input signal zero? */
+  for (i = 0; i < 10; i++) {
+    tmp += env[i];
+  }
+
+  /* Each block is allowed to have a few non-zero
+   * samples.
+   */
+  if (tmp < 500) {
+    stt->msZero += 10;
+  } else {
+    stt->msZero = 0;
+  }
+
+  if (stt->muteGuardMs > 0) {
+    stt->muteGuardMs -= 10;
+  }
+
+  if (stt->msZero > 500) {
+    stt->msZero = 0;
+
+    /* Increase microphone level only if it's less than 50% */
+    midVal = (stt->maxAnalog + stt->minLevel + 1) / 2;
+    if (*inMicLevel < midVal) {
+      /* *inMicLevel *= 1.1; */
+      *inMicLevel = (1126 * *inMicLevel) >> 10;
+      /* Reduces risk of a muted mic repeatedly triggering excessive levels due
+       * to zero signal detection. */
+      *inMicLevel = WEBRTC_SPL_MIN(*inMicLevel, stt->zeroCtrlMax);
+      stt->micVol = *inMicLevel;
+    }
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt,
+            "\t\tAGC->zeroCntrl, frame %d: 500 ms under threshold,"
+            " micVol: %d\n",
+            stt->fcount, stt->micVol);
+#endif
+
+    stt->activeSpeech = 0;
+    stt->Rxx16_LPw32Max = 0;
+
+    /* The AGC has a tendency (due to problems with the VAD parameters), to
+     * vastly increase the volume after a muting event. This timer prevents
+     * upwards adaptation for a short period. */
+    stt->muteGuardMs = kMuteGuardTimeMs;
+  }
+}
+
+void WebRtcAgc_SpeakerInactiveCtrl(LegacyAgc* stt) {
+  /* Check if the near end speaker is inactive.
+   * If that is the case the VAD threshold is
+   * increased since the VAD speech model gets
+   * more sensitive to any sound after a long
+   * silence.
+   */
+
+  int32_t tmp32;
+  int16_t vadThresh;
+
+  if (stt->vadMic.stdLongTerm < 2500) {
+    stt->vadThreshold = 1500;
+  } else {
+    vadThresh = kNormalVadThreshold;
+    if (stt->vadMic.stdLongTerm < 4500) {
+      /* Scale between min and max threshold */
+      vadThresh += (4500 - stt->vadMic.stdLongTerm) / 2;
+    }
+
+    /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */
+    tmp32 = vadThresh + 31 * stt->vadThreshold;
+    stt->vadThreshold = (int16_t)(tmp32 >> 5);
+  }
+}
+
+void WebRtcAgc_ExpCurve(int16_t volume, int16_t* index) {
+  // volume in Q14
+  // index in [0-7]
+  /* 8 different curves */
+  if (volume > 5243) {
+    if (volume > 7864) {
+      if (volume > 12124) {
+        *index = 7;
+      } else {
+        *index = 6;
+      }
+    } else {
+      if (volume > 6554) {
+        *index = 5;
+      } else {
+        *index = 4;
+      }
+    }
+  } else {
+    if (volume > 2621) {
+      if (volume > 3932) {
+        *index = 3;
+      } else {
+        *index = 2;
+      }
+    } else {
+      if (volume > 1311) {
+        *index = 1;
+      } else {
+        *index = 0;
+      }
+    }
+  }
+}
+
+int32_t WebRtcAgc_ProcessAnalog(void* state,
+                                int32_t inMicLevel,
+                                int32_t* outMicLevel,
+                                int16_t vadLogRatio,
+                                int16_t echo,
+                                uint8_t* saturationWarning) {
+  uint32_t tmpU32;
+  int32_t Rxx16w32, tmp32;
+  int32_t inMicLevelTmp, lastMicVol;
+  int16_t i;
+  uint8_t saturated = 0;
+  LegacyAgc* stt;
+
+  stt = (LegacyAgc*)state;
+  inMicLevelTmp = inMicLevel << stt->scale;
+
+  if (inMicLevelTmp > stt->maxAnalog) {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl > maxAnalog\n",
+            stt->fcount);
+#endif
+    return -1;
+  } else if (inMicLevelTmp < stt->minLevel) {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel\n",
+            stt->fcount);
+#endif
+    return -1;
+  }
+
+  if (stt->firstCall == 0) {
+    int32_t tmpVol;
+    stt->firstCall = 1;
+    tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9;
+    tmpVol = (stt->minLevel + tmp32);
+
+    /* If the mic level is very low at start, increase it! */
+    if ((inMicLevelTmp < tmpVol) && (stt->agcMode == kAgcModeAdaptiveAnalog)) {
+      inMicLevelTmp = tmpVol;
+    }
+    stt->micVol = inMicLevelTmp;
+  }
+
+  /* Set the mic level to the previous output value if there is digital input
+   * gain */
+  if ((inMicLevelTmp == stt->maxAnalog) && (stt->micVol > stt->maxAnalog)) {
+    inMicLevelTmp = stt->micVol;
+  }
+
+  /* If the mic level was manually changed to a very low value raise it! */
+  if ((inMicLevelTmp != stt->micVol) && (inMicLevelTmp < stt->minOutput)) {
+    tmp32 = ((stt->maxLevel - stt->minLevel) * 51) >> 9;
+    inMicLevelTmp = (stt->minLevel + tmp32);
+    stt->micVol = inMicLevelTmp;
+#ifdef MIC_LEVEL_FEEDBACK
+// stt->numBlocksMicLvlSat = 0;
+#endif
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt,
+            "\tAGC->ProcessAnalog, frame %d: micLvl < minLevel by manual"
+            " decrease, raise vol\n",
+            stt->fcount);
+#endif
+  }
+
+  if (inMicLevelTmp != stt->micVol) {
+    if (inMicLevel == stt->lastInMicLevel) {
+      // We requested a volume adjustment, but it didn't occur. This is
+      // probably due to a coarse quantization of the volume slider.
+      // Restore the requested value to prevent getting stuck.
+      inMicLevelTmp = stt->micVol;
+    } else {
+      // As long as the value changed, update to match.
+      stt->micVol = inMicLevelTmp;
+    }
+  }
+
+  if (inMicLevelTmp > stt->maxLevel) {
+    // Always allow the user to raise the volume above the maxLevel.
+    stt->maxLevel = inMicLevelTmp;
+  }
+
+  // Store last value here, after we've taken care of manual updates etc.
+  stt->lastInMicLevel = inMicLevel;
+  lastMicVol = stt->micVol;
+
+  /* Checks if the signal is saturated. Also a check if individual samples
+   * are larger than 12000 is done. If they are the counter for increasing
+   * the volume level is set to -100ms
+   */
+  WebRtcAgc_SaturationCtrl(stt, &saturated, stt->env[0]);
+
+  /* The AGC is always allowed to lower the level if the signal is saturated */
+  if (saturated == 1) {
+    /* Lower the recording level
+     * Rxx160_LP is adjusted down because it is so slow it could
+     * cause the AGC to make wrong decisions. */
+    /* stt->Rxx160_LPw32 *= 0.875; */
+    stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 8) * 7;
+
+    stt->zeroCtrlMax = stt->micVol;
+
+    /* stt->micVol *= 0.903; */
+    tmp32 = inMicLevelTmp - stt->minLevel;
+    tmpU32 = WEBRTC_SPL_UMUL(29591, (uint32_t)(tmp32));
+    stt->micVol = (tmpU32 >> 15) + stt->minLevel;
+    if (stt->micVol > lastMicVol - 2) {
+      stt->micVol = lastMicVol - 2;
+    }
+    inMicLevelTmp = stt->micVol;
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt,
+            "\tAGC->ProcessAnalog, frame %d: saturated, micVol = %d\n",
+            stt->fcount, stt->micVol);
+#endif
+
+    if (stt->micVol < stt->minOutput) {
+      *saturationWarning = 1;
+    }
+
+    /* Reset counter for decrease of volume level to avoid
+     * decreasing too much. The saturation control can still
+     * lower the level if needed. */
+    stt->msTooHigh = -100;
+
+    /* Enable the control mechanism to ensure that our measure,
+     * Rxx160_LP, is in the correct range. This must be done since
+     * the measure is very slow. */
+    stt->activeSpeech = 0;
+    stt->Rxx16_LPw32Max = 0;
+
+    /* Reset to initial values */
+    stt->msecSpeechInnerChange = kMsecSpeechInner;
+    stt->msecSpeechOuterChange = kMsecSpeechOuter;
+    stt->changeToSlowMode = 0;
+
+    stt->muteGuardMs = 0;
+
+    stt->upperLimit = stt->startUpperLimit;
+    stt->lowerLimit = stt->startLowerLimit;
+#ifdef MIC_LEVEL_FEEDBACK
+// stt->numBlocksMicLvlSat = 0;
+#endif
+  }
+
+  /* Check if the input speech is zero. If so the mic volume
+   * is increased. On some computers the input is zero up as high
+   * level as 17% */
+  WebRtcAgc_ZeroCtrl(stt, &inMicLevelTmp, stt->env[0]);
+
+  /* Check if the near end speaker is inactive.
+   * If that is the case the VAD threshold is
+   * increased since the VAD speech model gets
+   * more sensitive to any sound after a long
+   * silence.
+   */
+  WebRtcAgc_SpeakerInactiveCtrl(stt);
+
+  for (i = 0; i < 5; i++) {
+    /* Computed on blocks of 16 samples */
+
+    Rxx16w32 = stt->Rxx16w32_array[0][i];
+
+    /* Rxx160w32 in Q(-7) */
+    tmp32 = (Rxx16w32 - stt->Rxx16_vectorw32[stt->Rxx16pos]) >> 3;
+    stt->Rxx160w32 = stt->Rxx160w32 + tmp32;
+    stt->Rxx16_vectorw32[stt->Rxx16pos] = Rxx16w32;
+
+    /* Circular buffer */
+    stt->Rxx16pos++;
+    if (stt->Rxx16pos == RXX_BUFFER_LEN) {
+      stt->Rxx16pos = 0;
+    }
+
+    /* Rxx16_LPw32 in Q(-4) */
+    tmp32 = (Rxx16w32 - stt->Rxx16_LPw32) >> kAlphaShortTerm;
+    stt->Rxx16_LPw32 = (stt->Rxx16_LPw32) + tmp32;
+
+    if (vadLogRatio > stt->vadThreshold) {
+      /* Speech detected! */
+
+      /* Check if Rxx160_LP is in the correct range. If
+       * it is too high/low then we set it to the maximum of
+       * Rxx16_LPw32 during the first 200ms of speech.
+       */
+      if (stt->activeSpeech < 250) {
+        stt->activeSpeech += 2;
+
+        if (stt->Rxx16_LPw32 > stt->Rxx16_LPw32Max) {
+          stt->Rxx16_LPw32Max = stt->Rxx16_LPw32;
+        }
+      } else if (stt->activeSpeech == 250) {
+        stt->activeSpeech += 2;
+        tmp32 = stt->Rxx16_LPw32Max >> 3;
+        stt->Rxx160_LPw32 = tmp32 * RXX_BUFFER_LEN;
+      }
+
+      tmp32 = (stt->Rxx160w32 - stt->Rxx160_LPw32) >> kAlphaLongTerm;
+      stt->Rxx160_LPw32 = stt->Rxx160_LPw32 + tmp32;
+
+      if (stt->Rxx160_LPw32 > stt->upperSecondaryLimit) {
+        stt->msTooHigh += 2;
+        stt->msTooLow = 0;
+        stt->changeToSlowMode = 0;
+
+        if (stt->msTooHigh > stt->msecSpeechOuterChange) {
+          stt->msTooHigh = 0;
+
+          /* Lower the recording level */
+          /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */
+          tmp32 = stt->Rxx160_LPw32 >> 6;
+          stt->Rxx160_LPw32 = tmp32 * 53;
+
+          /* Reduce the max gain to avoid excessive oscillation
+           * (but never drop below the maximum analog level).
+           */
+          stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16;
+          stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog);
+
+          stt->zeroCtrlMax = stt->micVol;
+
+          /* 0.95 in Q15 */
+          tmp32 = inMicLevelTmp - stt->minLevel;
+          tmpU32 = WEBRTC_SPL_UMUL(31130, (uint32_t)(tmp32));
+          stt->micVol = (tmpU32 >> 15) + stt->minLevel;
+          if (stt->micVol > lastMicVol - 1) {
+            stt->micVol = lastMicVol - 1;
+          }
+          inMicLevelTmp = stt->micVol;
+
+          /* Enable the control mechanism to ensure that our measure,
+           * Rxx160_LP, is in the correct range.
+           */
+          stt->activeSpeech = 0;
+          stt->Rxx16_LPw32Max = 0;
+#ifdef MIC_LEVEL_FEEDBACK
+// stt->numBlocksMicLvlSat = 0;
+#endif
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+          fprintf(stt->fpt,
+                  "\tAGC->ProcessAnalog, frame %d: measure >"
+                  " 2ndUpperLim, micVol = %d, maxLevel = %d\n",
+                  stt->fcount, stt->micVol, stt->maxLevel);
+#endif
+        }
+      } else if (stt->Rxx160_LPw32 > stt->upperLimit) {
+        stt->msTooHigh += 2;
+        stt->msTooLow = 0;
+        stt->changeToSlowMode = 0;
+
+        if (stt->msTooHigh > stt->msecSpeechInnerChange) {
+          /* Lower the recording level */
+          stt->msTooHigh = 0;
+          /* Multiply by 0.828125 which corresponds to decreasing ~0.8dB */
+          stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 53;
+
+          /* Reduce the max gain to avoid excessive oscillation
+           * (but never drop below the maximum analog level).
+           */
+          stt->maxLevel = (15 * stt->maxLevel + stt->micVol) / 16;
+          stt->maxLevel = WEBRTC_SPL_MAX(stt->maxLevel, stt->maxAnalog);
+
+          stt->zeroCtrlMax = stt->micVol;
+
+          /* 0.965 in Q15 */
+          tmp32 = inMicLevelTmp - stt->minLevel;
+          tmpU32 =
+              WEBRTC_SPL_UMUL(31621, (uint32_t)(inMicLevelTmp - stt->minLevel));
+          stt->micVol = (tmpU32 >> 15) + stt->minLevel;
+          if (stt->micVol > lastMicVol - 1) {
+            stt->micVol = lastMicVol - 1;
+          }
+          inMicLevelTmp = stt->micVol;
+
+#ifdef MIC_LEVEL_FEEDBACK
+// stt->numBlocksMicLvlSat = 0;
+#endif
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+          fprintf(stt->fpt,
+                  "\tAGC->ProcessAnalog, frame %d: measure >"
+                  " UpperLim, micVol = %d, maxLevel = %d\n",
+                  stt->fcount, stt->micVol, stt->maxLevel);
+#endif
+        }
+      } else if (stt->Rxx160_LPw32 < stt->lowerSecondaryLimit) {
+        stt->msTooHigh = 0;
+        stt->changeToSlowMode = 0;
+        stt->msTooLow += 2;
+
+        if (stt->msTooLow > stt->msecSpeechOuterChange) {
+          /* Raise the recording level */
+          int16_t index, weightFIX;
+          int16_t volNormFIX = 16384;  // =1 in Q14.
+
+          stt->msTooLow = 0;
+
+          /* Normalize the volume level */
+          tmp32 = (inMicLevelTmp - stt->minLevel) << 14;
+          if (stt->maxInit != stt->minLevel) {
+            volNormFIX = tmp32 / (stt->maxInit - stt->minLevel);
+          }
+
+          /* Find correct curve */
+          WebRtcAgc_ExpCurve(volNormFIX, &index);
+
+          /* Compute weighting factor for the volume increase, 32^(-2*X)/2+1.05
+           */
+          weightFIX =
+              kOffset1[index] - (int16_t)((kSlope1[index] * volNormFIX) >> 13);
+
+          /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */
+          stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67;
+
+          tmp32 = inMicLevelTmp - stt->minLevel;
+          tmpU32 =
+              ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel));
+          stt->micVol = (tmpU32 >> 14) + stt->minLevel;
+          if (stt->micVol < lastMicVol + 2) {
+            stt->micVol = lastMicVol + 2;
+          }
+
+          inMicLevelTmp = stt->micVol;
+
+#ifdef MIC_LEVEL_FEEDBACK
+          /* Count ms in level saturation */
+          // if (stt->micVol > stt->maxAnalog) {
+          if (stt->micVol > 150) {
+            /* mic level is saturated */
+            stt->numBlocksMicLvlSat++;
+            fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
+          }
+#endif
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+          fprintf(stt->fpt,
+                  "\tAGC->ProcessAnalog, frame %d: measure <"
+                  " 2ndLowerLim, micVol = %d\n",
+                  stt->fcount, stt->micVol);
+#endif
+        }
+      } else if (stt->Rxx160_LPw32 < stt->lowerLimit) {
+        stt->msTooHigh = 0;
+        stt->changeToSlowMode = 0;
+        stt->msTooLow += 2;
+
+        if (stt->msTooLow > stt->msecSpeechInnerChange) {
+          /* Raise the recording level */
+          int16_t index, weightFIX;
+          int16_t volNormFIX = 16384;  // =1 in Q14.
+
+          stt->msTooLow = 0;
+
+          /* Normalize the volume level */
+          tmp32 = (inMicLevelTmp - stt->minLevel) << 14;
+          if (stt->maxInit != stt->minLevel) {
+            volNormFIX = tmp32 / (stt->maxInit - stt->minLevel);
+          }
+
+          /* Find correct curve */
+          WebRtcAgc_ExpCurve(volNormFIX, &index);
+
+          /* Compute weighting factor for the volume increase, (3.^(-2.*X))/8+1
+           */
+          weightFIX =
+              kOffset2[index] - (int16_t)((kSlope2[index] * volNormFIX) >> 13);
+
+          /* stt->Rxx160_LPw32 *= 1.047 [~0.2 dB]; */
+          stt->Rxx160_LPw32 = (stt->Rxx160_LPw32 / 64) * 67;
+
+          tmp32 = inMicLevelTmp - stt->minLevel;
+          tmpU32 =
+              ((uint32_t)weightFIX * (uint32_t)(inMicLevelTmp - stt->minLevel));
+          stt->micVol = (tmpU32 >> 14) + stt->minLevel;
+          if (stt->micVol < lastMicVol + 1) {
+            stt->micVol = lastMicVol + 1;
+          }
+
+          inMicLevelTmp = stt->micVol;
+
+#ifdef MIC_LEVEL_FEEDBACK
+          /* Count ms in level saturation */
+          // if (stt->micVol > stt->maxAnalog) {
+          if (stt->micVol > 150) {
+            /* mic level is saturated */
+            stt->numBlocksMicLvlSat++;
+            fprintf(stderr, "Sat mic Level: %d\n", stt->numBlocksMicLvlSat);
+          }
+#endif
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+          fprintf(stt->fpt,
+                  "\tAGC->ProcessAnalog, frame %d: measure < LowerLim, micVol "
+                  "= %d\n",
+                  stt->fcount, stt->micVol);
+#endif
+        }
+      } else {
+        /* The signal is inside the desired range which is:
+         * lowerLimit < Rxx160_LP/640 < upperLimit
+         */
+        if (stt->changeToSlowMode > 4000) {
+          stt->msecSpeechInnerChange = 1000;
+          stt->msecSpeechOuterChange = 500;
+          stt->upperLimit = stt->upperPrimaryLimit;
+          stt->lowerLimit = stt->lowerPrimaryLimit;
+        } else {
+          stt->changeToSlowMode += 2;  // in milliseconds
+        }
+        stt->msTooLow = 0;
+        stt->msTooHigh = 0;
+
+        stt->micVol = inMicLevelTmp;
+      }
+#ifdef MIC_LEVEL_FEEDBACK
+      if (stt->numBlocksMicLvlSat > NUM_BLOCKS_IN_SAT_BEFORE_CHANGE_TARGET) {
+        stt->micLvlSat = 1;
+        fprintf(stderr, "target before = %d (%d)\n", stt->analogTargetLevel,
+                stt->targetIdx);
+        WebRtcAgc_UpdateAgcThresholds(stt);
+        WebRtcAgc_CalculateGainTable(
+            &(stt->digitalAgc.gainTable[0]), stt->compressionGaindB,
+            stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget);
+        stt->numBlocksMicLvlSat = 0;
+        stt->micLvlSat = 0;
+        fprintf(stderr, "target offset = %d\n", stt->targetIdxOffset);
+        fprintf(stderr, "target after  = %d (%d)\n", stt->analogTargetLevel,
+                stt->targetIdx);
+      }
+#endif
+    }
+  }
+
+  /* Ensure gain is not increased in presence of echo or after a mute event
+   * (but allow the zeroCtrl() increase on the frame of a mute detection).
+   */
+  if (echo == 1 ||
+      (stt->muteGuardMs > 0 && stt->muteGuardMs < kMuteGuardTimeMs)) {
+    if (stt->micVol > lastMicVol) {
+      stt->micVol = lastMicVol;
+    }
+  }
+
+  /* limit the gain */
+  if (stt->micVol > stt->maxLevel) {
+    stt->micVol = stt->maxLevel;
+  } else if (stt->micVol < stt->minOutput) {
+    stt->micVol = stt->minOutput;
+  }
+
+  *outMicLevel = WEBRTC_SPL_MIN(stt->micVol, stt->maxAnalog) >> stt->scale;
+
+  return 0;
+}
+
+int WebRtcAgc_Process(void* agcInst,
+                      const int16_t* const* in_near,
+                      size_t num_bands,
+                      size_t samples,
+                      int16_t* const* out,
+                      int32_t inMicLevel,
+                      int32_t* outMicLevel,
+                      int16_t echo,
+                      uint8_t* saturationWarning) {
+  LegacyAgc* stt;
+
+  stt = (LegacyAgc*)agcInst;
+
+  //
+  if (stt == NULL) {
+    return -1;
+  }
+  //
+
+  if (stt->fs == 8000) {
+    if (samples != 80) {
+      return -1;
+    }
+  } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) {
+    if (samples != 160) {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
+
+  *saturationWarning = 0;
+  // TODO(minyue): PUT IN RANGE CHECKING FOR INPUT LEVELS
+  *outMicLevel = inMicLevel;
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  stt->fcount++;
+#endif
+
+  if (WebRtcAgc_ProcessDigital(&stt->digitalAgc, in_near, num_bands, out,
+                               stt->fs, stt->lowLevelSignal) == -1) {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "AGC->Process, frame %d: Error from DigAGC\n\n",
+            stt->fcount);
+#endif
+    return -1;
+  }
+  if (stt->agcMode < kAgcModeFixedDigital &&
+      (stt->lowLevelSignal == 0 || stt->agcMode != kAgcModeAdaptiveDigital)) {
+    if (WebRtcAgc_ProcessAnalog(agcInst, inMicLevel, outMicLevel,
+                                stt->vadMic.logRatio, echo,
+                                saturationWarning) == -1) {
+      return -1;
+    }
+  }
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  fprintf(stt->agcLog, "%5d\t%d\t%d\t%d\t%d\n", stt->fcount, inMicLevel,
+          *outMicLevel, stt->maxLevel, stt->micVol);
+#endif
+
+  /* update queue */
+  if (stt->inQueue > 1) {
+    memcpy(stt->env[0], stt->env[1], 10 * sizeof(int32_t));
+    memcpy(stt->Rxx16w32_array[0], stt->Rxx16w32_array[1], 5 * sizeof(int32_t));
+  }
+
+  if (stt->inQueue > 0) {
+    stt->inQueue--;
+  }
+
+  return 0;
+}
+
+int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig agcConfig) {
+  LegacyAgc* stt;
+  stt = (LegacyAgc*)agcInst;
+
+  if (stt == NULL) {
+    return -1;
+  }
+
+  if (stt->initFlag != kInitCheck) {
+    stt->lastError = AGC_UNINITIALIZED_ERROR;
+    return -1;
+  }
+
+  if (agcConfig.limiterEnable != kAgcFalse &&
+      agcConfig.limiterEnable != kAgcTrue) {
+    stt->lastError = AGC_BAD_PARAMETER_ERROR;
+    return -1;
+  }
+  stt->limiterEnable = agcConfig.limiterEnable;
+  stt->compressionGaindB = agcConfig.compressionGaindB;
+  if ((agcConfig.targetLevelDbfs < 0) || (agcConfig.targetLevelDbfs > 31)) {
+    stt->lastError = AGC_BAD_PARAMETER_ERROR;
+    return -1;
+  }
+  stt->targetLevelDbfs = agcConfig.targetLevelDbfs;
+
+  if (stt->agcMode == kAgcModeFixedDigital) {
+    /* Adjust for different parameter interpretation in FixedDigital mode */
+    stt->compressionGaindB += agcConfig.targetLevelDbfs;
+  }
+
+  /* Update threshold levels for analog adaptation */
+  WebRtcAgc_UpdateAgcThresholds(stt);
+
+  /* Recalculate gain table */
+  if (WebRtcAgc_CalculateGainTable(
+          &(stt->digitalAgc.gainTable[0]), stt->compressionGaindB,
+          stt->targetLevelDbfs, stt->limiterEnable, stt->analogTarget) == -1) {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "AGC->set_config, frame %d: Error from calcGainTable\n\n",
+            stt->fcount);
+#endif
+    return -1;
+  }
+  /* Store the config in a WebRtcAgcConfig */
+  stt->usedConfig.compressionGaindB = agcConfig.compressionGaindB;
+  stt->usedConfig.limiterEnable = agcConfig.limiterEnable;
+  stt->usedConfig.targetLevelDbfs = agcConfig.targetLevelDbfs;
+
+  return 0;
+}
+
+int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config) {
+  LegacyAgc* stt;
+  stt = (LegacyAgc*)agcInst;
+
+  if (stt == NULL) {
+    return -1;
+  }
+
+  if (config == NULL) {
+    stt->lastError = AGC_NULL_POINTER_ERROR;
+    return -1;
+  }
+
+  if (stt->initFlag != kInitCheck) {
+    stt->lastError = AGC_UNINITIALIZED_ERROR;
+    return -1;
+  }
+
+  config->limiterEnable = stt->usedConfig.limiterEnable;
+  config->targetLevelDbfs = stt->usedConfig.targetLevelDbfs;
+  config->compressionGaindB = stt->usedConfig.compressionGaindB;
+
+  return 0;
+}
+
+void* WebRtcAgc_Create() {
+  LegacyAgc* stt = malloc(sizeof(LegacyAgc));
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  stt->fpt = fopen("./agc_test_log.txt", "wt");
+  stt->agcLog = fopen("./agc_debug_log.txt", "wt");
+  stt->digitalAgc.logFile = fopen("./agc_log.txt", "wt");
+#endif
+
+  stt->initFlag = 0;
+  stt->lastError = 0;
+
+  return stt;
+}
+
+void WebRtcAgc_Free(void* state) {
+  LegacyAgc* stt;
+
+  stt = (LegacyAgc*)state;
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  fclose(stt->fpt);
+  fclose(stt->agcLog);
+  fclose(stt->digitalAgc.logFile);
+#endif
+  free(stt);
+}
+
+/* minLevel     - Minimum volume level
+ * maxLevel     - Maximum volume level
+ */
+int WebRtcAgc_Init(void* agcInst,
+                   int32_t minLevel,
+                   int32_t maxLevel,
+                   int16_t agcMode,
+                   uint32_t fs) {
+  int32_t max_add, tmp32;
+  int16_t i;
+  int tmpNorm;
+  LegacyAgc* stt;
+
+  /* typecast state pointer */
+  stt = (LegacyAgc*)agcInst;
+
+  if (WebRtcAgc_InitDigital(&stt->digitalAgc, agcMode) != 0) {
+    stt->lastError = AGC_UNINITIALIZED_ERROR;
+    return -1;
+  }
+
+  /* Analog AGC variables */
+  stt->envSum = 0;
+
+/* mode     = 0 - Only saturation protection
+ *            1 - Analog Automatic Gain Control [-targetLevelDbfs (default -3
+ * dBOv)]
+ *            2 - Digital Automatic Gain Control [-targetLevelDbfs (default -3
+ * dBOv)]
+ *            3 - Fixed Digital Gain [compressionGaindB (default 8 dB)]
+ */
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  stt->fcount = 0;
+  fprintf(stt->fpt, "AGC->Init\n");
+#endif
+  if (agcMode < kAgcModeUnchanged || agcMode > kAgcModeFixedDigital) {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "AGC->Init: error, incorrect mode\n\n");
+#endif
+    return -1;
+  }
+  stt->agcMode = agcMode;
+  stt->fs = fs;
+
+  /* initialize input VAD */
+  WebRtcAgc_InitVad(&stt->vadMic);
+
+  /* If the volume range is smaller than 0-256 then
+   * the levels are shifted up to Q8-domain */
+  tmpNorm = WebRtcSpl_NormU32((uint32_t)maxLevel);
+  stt->scale = tmpNorm - 23;
+  if (stt->scale < 0) {
+    stt->scale = 0;
+  }
+  // TODO(bjornv): Investigate if we really need to scale up a small range now
+  // when we have
+  // a guard against zero-increments. For now, we do not support scale up (scale
+  // = 0).
+  stt->scale = 0;
+  maxLevel <<= stt->scale;
+  minLevel <<= stt->scale;
+
+  /* Make minLevel and maxLevel static in AdaptiveDigital */
+  if (stt->agcMode == kAgcModeAdaptiveDigital) {
+    minLevel = 0;
+    maxLevel = 255;
+    stt->scale = 0;
+  }
+  /* The maximum supplemental volume range is based on a vague idea
+   * of how much lower the gain will be than the real analog gain. */
+  max_add = (maxLevel - minLevel) / 4;
+
+  /* Minimum/maximum volume level that can be set */
+  stt->minLevel = minLevel;
+  stt->maxAnalog = maxLevel;
+  stt->maxLevel = maxLevel + max_add;
+  stt->maxInit = stt->maxLevel;
+
+  stt->zeroCtrlMax = stt->maxAnalog;
+  stt->lastInMicLevel = 0;
+
+  /* Initialize micVol parameter */
+  stt->micVol = stt->maxAnalog;
+  if (stt->agcMode == kAgcModeAdaptiveDigital) {
+    stt->micVol = 127; /* Mid-point of mic level */
+  }
+  stt->micRef = stt->micVol;
+  stt->micGainIdx = 127;
+#ifdef MIC_LEVEL_FEEDBACK
+  stt->numBlocksMicLvlSat = 0;
+  stt->micLvlSat = 0;
+#endif
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  fprintf(stt->fpt, "AGC->Init: minLevel = %d, maxAnalog = %d, maxLevel = %d\n",
+          stt->minLevel, stt->maxAnalog, stt->maxLevel);
+#endif
+
+  /* Minimum output volume is 4% higher than the available lowest volume level
+   */
+  tmp32 = ((stt->maxLevel - stt->minLevel) * 10) >> 8;
+  stt->minOutput = (stt->minLevel + tmp32);
+
+  stt->msTooLow = 0;
+  stt->msTooHigh = 0;
+  stt->changeToSlowMode = 0;
+  stt->firstCall = 0;
+  stt->msZero = 0;
+  stt->muteGuardMs = 0;
+  stt->gainTableIdx = 0;
+
+  stt->msecSpeechInnerChange = kMsecSpeechInner;
+  stt->msecSpeechOuterChange = kMsecSpeechOuter;
+
+  stt->activeSpeech = 0;
+  stt->Rxx16_LPw32Max = 0;
+
+  stt->vadThreshold = kNormalVadThreshold;
+  stt->inActive = 0;
+
+  for (i = 0; i < RXX_BUFFER_LEN; i++) {
+    stt->Rxx16_vectorw32[i] = (int32_t)1000; /* -54dBm0 */
+  }
+  stt->Rxx160w32 =
+      125 * RXX_BUFFER_LEN; /* (stt->Rxx16_vectorw32[0]>>3) = 125 */
+
+  stt->Rxx16pos = 0;
+  stt->Rxx16_LPw32 = (int32_t)16284; /* Q(-4) */
+
+  for (i = 0; i < 5; i++) {
+    stt->Rxx16w32_array[0][i] = 0;
+  }
+  for (i = 0; i < 10; i++) {
+    stt->env[0][i] = 0;
+    stt->env[1][i] = 0;
+  }
+  stt->inQueue = 0;
+
+#ifdef MIC_LEVEL_FEEDBACK
+  stt->targetIdxOffset = 0;
+#endif
+
+  WebRtcSpl_MemSetW32(stt->filterState, 0, 8);
+
+  stt->initFlag = kInitCheck;
+  // Default config settings.
+  stt->defaultConfig.limiterEnable = kAgcTrue;
+  stt->defaultConfig.targetLevelDbfs = AGC_DEFAULT_TARGET_LEVEL;
+  stt->defaultConfig.compressionGaindB = AGC_DEFAULT_COMP_GAIN;
+
+  if (WebRtcAgc_set_config(stt, stt->defaultConfig) == -1) {
+    stt->lastError = AGC_UNSPECIFIED_ERROR;
+    return -1;
+  }
+  stt->Rxx160_LPw32 = stt->analogTargetLevel;  // Initialize rms value
+
+  stt->lowLevelSignal = 0;
+
+  /* Only positive values are allowed that are not too large */
+  if ((minLevel >= maxLevel) || (maxLevel & 0xFC000000)) {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "minLevel, maxLevel value(s) are invalid\n\n");
+#endif
+    return -1;
+  } else {
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    fprintf(stt->fpt, "\n");
+#endif
+    return 0;
+  }
+}
diff --git a/modules/audio_processing/agc/legacy/analog_agc.h b/modules/audio_processing/agc/legacy/analog_agc.h
new file mode 100644
index 0000000..1fed377
--- /dev/null
+++ b/modules/audio_processing/agc/legacy/analog_agc.h
@@ -0,0 +1,132 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
+
+//#define MIC_LEVEL_FEEDBACK
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+#include <stdio.h>
+#endif
+
+#include "modules/audio_processing/agc/legacy/digital_agc.h"
+#include "modules/audio_processing/agc/legacy/gain_control.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+/* Analog Automatic Gain Control variables:
+ * Constant declarations (inner limits inside which no changes are done)
+ * In the beginning the range is narrower to widen as soon as the measure
+ * 'Rxx160_LP' is inside it. Currently the starting limits are -22.2+/-1dBm0
+ * and the final limits -22.2+/-2.5dBm0. These levels makes the speech signal
+ * go towards -25.4dBm0 (-31.4dBov). Tuned with wbfile-31.4dBov.pcm
+ * The limits are created by running the AGC with a file having the desired
+ * signal level and thereafter plotting Rxx160_LP in the dBm0-domain defined
+ * by out=10*log10(in/260537279.7); Set the target level to the average level
+ * of our measure Rxx160_LP. Remember that the levels are in blocks of 16 in
+ * Q(-7). (Example matlab code: round(db2pow(-21.2)*16/2^7) )
+ */
+#define RXX_BUFFER_LEN 10
+
+static const int16_t kMsecSpeechInner = 520;
+static const int16_t kMsecSpeechOuter = 340;
+
+static const int16_t kNormalVadThreshold = 400;
+
+static const int16_t kAlphaShortTerm = 6;  // 1 >> 6 = 0.0156
+static const int16_t kAlphaLongTerm = 10;  // 1 >> 10 = 0.000977
+
+typedef struct {
+  // Configurable parameters/variables
+  uint32_t fs;                // Sampling frequency
+  int16_t compressionGaindB;  // Fixed gain level in dB
+  int16_t targetLevelDbfs;    // Target level in -dBfs of envelope (default -3)
+  int16_t agcMode;            // Hard coded mode (adaptAna/adaptDig/fixedDig)
+  uint8_t limiterEnable;      // Enabling limiter (on/off (default off))
+  WebRtcAgcConfig defaultConfig;
+  WebRtcAgcConfig usedConfig;
+
+  // General variables
+  int16_t initFlag;
+  int16_t lastError;
+
+  // Target level parameters
+  // Based on the above: analogTargetLevel = round((32767*10^(-22/20))^2*16/2^7)
+  int32_t analogTargetLevel;    // = RXX_BUFFER_LEN * 846805;       -22 dBfs
+  int32_t startUpperLimit;      // = RXX_BUFFER_LEN * 1066064;      -21 dBfs
+  int32_t startLowerLimit;      // = RXX_BUFFER_LEN * 672641;       -23 dBfs
+  int32_t upperPrimaryLimit;    // = RXX_BUFFER_LEN * 1342095;      -20 dBfs
+  int32_t lowerPrimaryLimit;    // = RXX_BUFFER_LEN * 534298;       -24 dBfs
+  int32_t upperSecondaryLimit;  // = RXX_BUFFER_LEN * 2677832;      -17 dBfs
+  int32_t lowerSecondaryLimit;  // = RXX_BUFFER_LEN * 267783;       -27 dBfs
+  uint16_t targetIdx;           // Table index for corresponding target level
+#ifdef MIC_LEVEL_FEEDBACK
+  uint16_t targetIdxOffset;  // Table index offset for level compensation
+#endif
+  int16_t analogTarget;  // Digital reference level in ENV scale
+
+  // Analog AGC specific variables
+  int32_t filterState[8];  // For downsampling wb to nb
+  int32_t upperLimit;      // Upper limit for mic energy
+  int32_t lowerLimit;      // Lower limit for mic energy
+  int32_t Rxx160w32;       // Average energy for one frame
+  int32_t Rxx16_LPw32;     // Low pass filtered subframe energies
+  int32_t Rxx160_LPw32;    // Low pass filtered frame energies
+  int32_t Rxx16_LPw32Max;  // Keeps track of largest energy subframe
+  int32_t Rxx16_vectorw32[RXX_BUFFER_LEN];  // Array with subframe energies
+  int32_t Rxx16w32_array[2][5];  // Energy values of microphone signal
+  int32_t env[2][10];            // Envelope values of subframes
+
+  int16_t Rxx16pos;          // Current position in the Rxx16_vectorw32
+  int16_t envSum;            // Filtered scaled envelope in subframes
+  int16_t vadThreshold;      // Threshold for VAD decision
+  int16_t inActive;          // Inactive time in milliseconds
+  int16_t msTooLow;          // Milliseconds of speech at a too low level
+  int16_t msTooHigh;         // Milliseconds of speech at a too high level
+  int16_t changeToSlowMode;  // Change to slow mode after some time at target
+  int16_t firstCall;         // First call to the process-function
+  int16_t msZero;            // Milliseconds of zero input
+  int16_t msecSpeechOuterChange;  // Min ms of speech between volume changes
+  int16_t msecSpeechInnerChange;  // Min ms of speech between volume changes
+  int16_t activeSpeech;           // Milliseconds of active speech
+  int16_t muteGuardMs;            // Counter to prevent mute action
+  int16_t inQueue;                // 10 ms batch indicator
+
+  // Microphone level variables
+  int32_t micRef;         // Remember ref. mic level for virtual mic
+  uint16_t gainTableIdx;  // Current position in virtual gain table
+  int32_t micGainIdx;     // Gain index of mic level to increase slowly
+  int32_t micVol;         // Remember volume between frames
+  int32_t maxLevel;       // Max possible vol level, incl dig gain
+  int32_t maxAnalog;      // Maximum possible analog volume level
+  int32_t maxInit;        // Initial value of "max"
+  int32_t minLevel;       // Minimum possible volume level
+  int32_t minOutput;      // Minimum output volume level
+  int32_t zeroCtrlMax;    // Remember max gain => don't amp low input
+  int32_t lastInMicLevel;
+
+  int16_t scale;  // Scale factor for internal volume levels
+#ifdef MIC_LEVEL_FEEDBACK
+  int16_t numBlocksMicLvlSat;
+  uint8_t micLvlSat;
+#endif
+  // Structs for VAD and digital_agc
+  AgcVad vadMic;
+  DigitalAgc digitalAgc;
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  FILE* fpt;
+  FILE* agcLog;
+  int32_t fcount;
+#endif
+
+  int16_t lowLevelSignal;
+} LegacyAgc;
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_LEGACY_ANALOG_AGC_H_
diff --git a/modules/audio_processing/agc/legacy/digital_agc.c b/modules/audio_processing/agc/legacy/digital_agc.c
new file mode 100644
index 0000000..3269a17
--- /dev/null
+++ b/modules/audio_processing/agc/legacy/digital_agc.c
@@ -0,0 +1,703 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* digital_agc.c
+ *
+ */
+
+#include "modules/audio_processing/agc/legacy/digital_agc.h"
+
+#include <string.h>
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+#include <stdio.h>
+#endif
+
+#include "rtc_base/checks.h"
+#include "modules/audio_processing/agc/legacy/gain_control.h"
+
+// To generate the gaintable, copy&paste the following lines to a Matlab window:
+// MaxGain = 6; MinGain = 0; CompRatio = 3; Knee = 1;
+// zeros = 0:31; lvl = 2.^(1-zeros);
+// A = -10*log10(lvl) * (CompRatio - 1) / CompRatio;
+// B = MaxGain - MinGain;
+// gains = round(2^16*10.^(0.05 * (MinGain + B * (
+// log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) /
+// log(1/(1+exp(Knee*B))))));
+// fprintf(1, '\t%i, %i, %i, %i,\n', gains);
+// % Matlab code for plotting the gain and input/output level characteristic
+// (copy/paste the following 3 lines):
+// in = 10*log10(lvl); out = 20*log10(gains/65536);
+// subplot(121); plot(in, out); axis([-30, 0, -5, 20]); grid on; xlabel('Input
+// (dB)'); ylabel('Gain (dB)');
+// subplot(122); plot(in, in+out); axis([-30, 0, -30, 5]); grid on;
+// xlabel('Input (dB)'); ylabel('Output (dB)');
+// zoom on;
+
+// Generator table for y=log2(1+e^x) in Q8.
+enum { kGenFuncTableSize = 128 };
+static const uint16_t kGenFuncTable[kGenFuncTableSize] = {
+    256,   485,   786,   1126,  1484,  1849,  2217,  2586,  2955,  3324,  3693,
+    4063,  4432,  4801,  5171,  5540,  5909,  6279,  6648,  7017,  7387,  7756,
+    8125,  8495,  8864,  9233,  9603,  9972,  10341, 10711, 11080, 11449, 11819,
+    12188, 12557, 12927, 13296, 13665, 14035, 14404, 14773, 15143, 15512, 15881,
+    16251, 16620, 16989, 17359, 17728, 18097, 18466, 18836, 19205, 19574, 19944,
+    20313, 20682, 21052, 21421, 21790, 22160, 22529, 22898, 23268, 23637, 24006,
+    24376, 24745, 25114, 25484, 25853, 26222, 26592, 26961, 27330, 27700, 28069,
+    28438, 28808, 29177, 29546, 29916, 30285, 30654, 31024, 31393, 31762, 32132,
+    32501, 32870, 33240, 33609, 33978, 34348, 34717, 35086, 35456, 35825, 36194,
+    36564, 36933, 37302, 37672, 38041, 38410, 38780, 39149, 39518, 39888, 40257,
+    40626, 40996, 41365, 41734, 42104, 42473, 42842, 43212, 43581, 43950, 44320,
+    44689, 45058, 45428, 45797, 46166, 46536, 46905};
+
+static const int16_t kAvgDecayTime = 250;  // frames; < 3000
+
+int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable,       // Q16
+                                     int16_t digCompGaindB,    // Q0
+                                     int16_t targetLevelDbfs,  // Q0
+                                     uint8_t limiterEnable,
+                                     int16_t analogTarget)  // Q0
+{
+  // This function generates the compressor gain table used in the fixed digital
+  // part.
+  uint32_t tmpU32no1, tmpU32no2, absInLevel, logApprox;
+  int32_t inLevel, limiterLvl;
+  int32_t tmp32, tmp32no1, tmp32no2, numFIX, den, y32;
+  const uint16_t kLog10 = 54426;    // log2(10)     in Q14
+  const uint16_t kLog10_2 = 49321;  // 10*log10(2)  in Q14
+  const uint16_t kLogE_1 = 23637;   // log2(e)      in Q14
+  uint16_t constMaxGain;
+  uint16_t tmpU16, intPart, fracPart;
+  const int16_t kCompRatio = 3;
+  const int16_t kSoftLimiterLeft = 1;
+  int16_t limiterOffset = 0;  // Limiter offset
+  int16_t limiterIdx, limiterLvlX;
+  int16_t constLinApprox, zeroGainLvl, maxGain, diffGain;
+  int16_t i, tmp16, tmp16no1;
+  int zeros, zerosScale;
+
+  // Constants
+  //    kLogE_1 = 23637; // log2(e)      in Q14
+  //    kLog10 = 54426; // log2(10)     in Q14
+  //    kLog10_2 = 49321; // 10*log10(2)  in Q14
+
+  // Calculate maximum digital gain and zero gain level
+  tmp32no1 = (digCompGaindB - analogTarget) * (kCompRatio - 1);
+  tmp16no1 = analogTarget - targetLevelDbfs;
+  tmp16no1 +=
+      WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
+  maxGain = WEBRTC_SPL_MAX(tmp16no1, (analogTarget - targetLevelDbfs));
+  tmp32no1 = maxGain * kCompRatio;
+  zeroGainLvl = digCompGaindB;
+  zeroGainLvl -= WebRtcSpl_DivW32W16ResW16(tmp32no1 + ((kCompRatio - 1) >> 1),
+                                           kCompRatio - 1);
+  if ((digCompGaindB <= analogTarget) && (limiterEnable)) {
+    zeroGainLvl += (analogTarget - digCompGaindB + kSoftLimiterLeft);
+    limiterOffset = 0;
+  }
+
+  // Calculate the difference between maximum gain and gain at 0dB0v:
+  //  diffGain = maxGain + (compRatio-1)*zeroGainLvl/compRatio
+  //           = (compRatio-1)*digCompGaindB/compRatio
+  tmp32no1 = digCompGaindB * (kCompRatio - 1);
+  diffGain =
+      WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
+  if (diffGain < 0 || diffGain >= kGenFuncTableSize) {
+    RTC_DCHECK(0);
+    return -1;
+  }
+
+  // Calculate the limiter level and index:
+  //  limiterLvlX = analogTarget - limiterOffset
+  //  limiterLvl  = targetLevelDbfs + limiterOffset/compRatio
+  limiterLvlX = analogTarget - limiterOffset;
+  limiterIdx = 2 + WebRtcSpl_DivW32W16ResW16((int32_t)limiterLvlX * (1 << 13),
+                                             kLog10_2 / 2);
+  tmp16no1 =
+      WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio);
+  limiterLvl = targetLevelDbfs + tmp16no1;
+
+  // Calculate (through table lookup):
+  //  constMaxGain = log2(1+2^(log2(e)*diffGain)); (in Q8)
+  constMaxGain = kGenFuncTable[diffGain];  // in Q8
+
+  // Calculate a parameter used to approximate the fractional part of 2^x with a
+  // piecewise linear function in Q14:
+  //  constLinApprox = round(3/2*(4*(3-2*sqrt(2))/(log(2)^2)-0.5)*2^14);
+  constLinApprox = 22817;  // in Q14
+
+  // Calculate a denominator used in the exponential part to convert from dB to
+  // linear scale:
+  //  den = 20*constMaxGain (in Q8)
+  den = WEBRTC_SPL_MUL_16_U16(20, constMaxGain);  // in Q8
+
+  for (i = 0; i < 32; i++) {
+    // Calculate scaled input level (compressor):
+    //  inLevel =
+    //  fix((-constLog10_2*(compRatio-1)*(1-i)+fix(compRatio/2))/compRatio)
+    tmp16 = (int16_t)((kCompRatio - 1) * (i - 1));       // Q0
+    tmp32 = WEBRTC_SPL_MUL_16_U16(tmp16, kLog10_2) + 1;  // Q14
+    inLevel = WebRtcSpl_DivW32W16(tmp32, kCompRatio);    // Q14
+
+    // Calculate diffGain-inLevel, to map using the genFuncTable
+    inLevel = (int32_t)diffGain * (1 << 14) - inLevel;  // Q14
+
+    // Make calculations on abs(inLevel) and compensate for the sign afterwards.
+    absInLevel = (uint32_t)WEBRTC_SPL_ABS_W32(inLevel);  // Q14
+
+    // LUT with interpolation
+    intPart = (uint16_t)(absInLevel >> 14);
+    fracPart =
+        (uint16_t)(absInLevel & 0x00003FFF);  // extract the fractional part
+    tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart];  // Q8
+    tmpU32no1 = tmpU16 * fracPart;                                 // Q22
+    tmpU32no1 += (uint32_t)kGenFuncTable[intPart] << 14;           // Q22
+    logApprox = tmpU32no1 >> 8;                                    // Q14
+    // Compensate for negative exponent using the relation:
+    //  log2(1 + 2^-x) = log2(1 + 2^x) - x
+    if (inLevel < 0) {
+      zeros = WebRtcSpl_NormU32(absInLevel);
+      zerosScale = 0;
+      if (zeros < 15) {
+        // Not enough space for multiplication
+        tmpU32no2 = absInLevel >> (15 - zeros);                 // Q(zeros-1)
+        tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no2, kLogE_1);  // Q(zeros+13)
+        if (zeros < 9) {
+          zerosScale = 9 - zeros;
+          tmpU32no1 >>= zerosScale;  // Q(zeros+13)
+        } else {
+          tmpU32no2 >>= zeros - 9;  // Q22
+        }
+      } else {
+        tmpU32no2 = WEBRTC_SPL_UMUL_32_16(absInLevel, kLogE_1);  // Q28
+        tmpU32no2 >>= 6;                                         // Q22
+      }
+      logApprox = 0;
+      if (tmpU32no2 < tmpU32no1) {
+        logApprox = (tmpU32no1 - tmpU32no2) >> (8 - zerosScale);  // Q14
+      }
+    }
+    numFIX = (maxGain * constMaxGain) * (1 << 6);  // Q14
+    numFIX -= (int32_t)logApprox * diffGain;       // Q14
+
+    // Calculate ratio
+    // Shift |numFIX| as much as possible.
+    // Ensure we avoid wrap-around in |den| as well.
+    if (numFIX > (den >> 8) || -numFIX > (den >> 8))  // |den| is Q8.
+    {
+      zeros = WebRtcSpl_NormW32(numFIX);
+    } else {
+      zeros = WebRtcSpl_NormW32(den) + 8;
+    }
+    numFIX *= 1 << zeros;  // Q(14+zeros)
+
+    // Shift den so we end up in Qy1
+    tmp32no1 = WEBRTC_SPL_SHIFT_W32(den, zeros - 9);  // Q(zeros - 1)
+    y32 = numFIX / tmp32no1;  // in Q15
+    // This is to do rounding in Q14.
+    y32 = y32 >= 0 ? (y32 + 1) >> 1 : -((-y32 + 1) >> 1);
+
+    if (limiterEnable && (i < limiterIdx)) {
+      tmp32 = WEBRTC_SPL_MUL_16_U16(i - 1, kLog10_2);  // Q14
+      tmp32 -= limiterLvl * (1 << 14);                 // Q14
+      y32 = WebRtcSpl_DivW32W16(tmp32 + 10, 20);
+    }
+    if (y32 > 39000) {
+      tmp32 = (y32 >> 1) * kLog10 + 4096;  // in Q27
+      tmp32 >>= 13;                        // In Q14.
+    } else {
+      tmp32 = y32 * kLog10 + 8192;  // in Q28
+      tmp32 >>= 14;                 // In Q14.
+    }
+    tmp32 += 16 << 14;  // in Q14 (Make sure final output is in Q16)
+
+    // Calculate power
+    if (tmp32 > 0) {
+      intPart = (int16_t)(tmp32 >> 14);
+      fracPart = (uint16_t)(tmp32 & 0x00003FFF);  // in Q14
+      if ((fracPart >> 13) != 0) {
+        tmp16 = (2 << 14) - constLinApprox;
+        tmp32no2 = (1 << 14) - fracPart;
+        tmp32no2 *= tmp16;
+        tmp32no2 >>= 13;
+        tmp32no2 = (1 << 14) - tmp32no2;
+      } else {
+        tmp16 = constLinApprox - (1 << 14);
+        tmp32no2 = (fracPart * tmp16) >> 13;
+      }
+      fracPart = (uint16_t)tmp32no2;
+      gainTable[i] =
+          (1 << intPart) + WEBRTC_SPL_SHIFT_W32(fracPart, intPart - 14);
+    } else {
+      gainTable[i] = 0;
+    }
+  }
+
+  return 0;
+}
+
+int32_t WebRtcAgc_InitDigital(DigitalAgc* stt, int16_t agcMode) {
+  if (agcMode == kAgcModeFixedDigital) {
+    // start at minimum to find correct gain faster
+    stt->capacitorSlow = 0;
+  } else {
+    // start out with 0 dB gain
+    stt->capacitorSlow = 134217728;  // (int32_t)(0.125f * 32768.0f * 32768.0f);
+  }
+  stt->capacitorFast = 0;
+  stt->gain = 65536;
+  stt->gatePrevious = 0;
+  stt->agcMode = agcMode;
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  stt->frameCounter = 0;
+#endif
+
+  // initialize VADs
+  WebRtcAgc_InitVad(&stt->vadNearend);
+  WebRtcAgc_InitVad(&stt->vadFarend);
+
+  return 0;
+}
+
+int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
+                                     const int16_t* in_far,
+                                     size_t nrSamples) {
+  RTC_DCHECK(stt);
+  // VAD for far end
+  WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
+
+  return 0;
+}
+
+int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
+                                 const int16_t* const* in_near,
+                                 size_t num_bands,
+                                 int16_t* const* out,
+                                 uint32_t FS,
+                                 int16_t lowlevelSignal) {
+  // array for gains (one value per ms, incl start & end)
+  int32_t gains[11];
+
+  int32_t out_tmp, tmp32;
+  int32_t env[10];
+  int32_t max_nrg;
+  int32_t cur_level;
+  int32_t gain32, delta;
+  int16_t logratio;
+  int16_t lower_thr, upper_thr;
+  int16_t zeros = 0, zeros_fast, frac = 0;
+  int16_t decay;
+  int16_t gate, gain_adj;
+  int16_t k;
+  size_t n, i, L;
+  int16_t L2;  // samples/subframe
+
+  // determine number of samples per ms
+  if (FS == 8000) {
+    L = 8;
+    L2 = 3;
+  } else if (FS == 16000 || FS == 32000 || FS == 48000) {
+    L = 16;
+    L2 = 4;
+  } else {
+    return -1;
+  }
+
+  for (i = 0; i < num_bands; ++i) {
+    if (in_near[i] != out[i]) {
+      // Only needed if they don't already point to the same place.
+      memcpy(out[i], in_near[i], 10 * L * sizeof(in_near[i][0]));
+    }
+  }
+  // VAD for near end
+  logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, out[0], L * 10);
+
+  // Account for far end VAD
+  if (stt->vadFarend.counter > 10) {
+    tmp32 = 3 * logratio;
+    logratio = (int16_t)((tmp32 - stt->vadFarend.logRatio) >> 2);
+  }
+
+  // Determine decay factor depending on VAD
+  //  upper_thr = 1.0f;
+  //  lower_thr = 0.25f;
+  upper_thr = 1024;  // Q10
+  lower_thr = 0;     // Q10
+  if (logratio > upper_thr) {
+    // decay = -2^17 / DecayTime;  ->  -65
+    decay = -65;
+  } else if (logratio < lower_thr) {
+    decay = 0;
+  } else {
+    // decay = (int16_t)(((lower_thr - logratio)
+    //       * (2^27/(DecayTime*(upper_thr-lower_thr)))) >> 10);
+    // SUBSTITUTED: 2^27/(DecayTime*(upper_thr-lower_thr))  ->  65
+    tmp32 = (lower_thr - logratio) * 65;
+    decay = (int16_t)(tmp32 >> 10);
+  }
+
+  // adjust decay factor for long silence (detected as low standard deviation)
+  // This is only done in the adaptive modes
+  if (stt->agcMode != kAgcModeFixedDigital) {
+    if (stt->vadNearend.stdLongTerm < 4000) {
+      decay = 0;
+    } else if (stt->vadNearend.stdLongTerm < 8096) {
+      // decay = (int16_t)(((stt->vadNearend.stdLongTerm - 4000) * decay) >>
+      // 12);
+      tmp32 = (stt->vadNearend.stdLongTerm - 4000) * decay;
+      decay = (int16_t)(tmp32 >> 12);
+    }
+
+    if (lowlevelSignal != 0) {
+      decay = 0;
+    }
+  }
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  stt->frameCounter++;
+  fprintf(stt->logFile, "%5.2f\t%d\t%d\t%d\t", (float)(stt->frameCounter) / 100,
+          logratio, decay, stt->vadNearend.stdLongTerm);
+#endif
+  // Find max amplitude per sub frame
+  // iterate over sub frames
+  for (k = 0; k < 10; k++) {
+    // iterate over samples
+    max_nrg = 0;
+    for (n = 0; n < L; n++) {
+      int32_t nrg = out[0][k * L + n] * out[0][k * L + n];
+      if (nrg > max_nrg) {
+        max_nrg = nrg;
+      }
+    }
+    env[k] = max_nrg;
+  }
+
+  // Calculate gain per sub frame
+  gains[0] = stt->gain;
+  for (k = 0; k < 10; k++) {
+    // Fast envelope follower
+    //  decay time = -131000 / -1000 = 131 (ms)
+    stt->capacitorFast =
+        AGC_SCALEDIFF32(-1000, stt->capacitorFast, stt->capacitorFast);
+    if (env[k] > stt->capacitorFast) {
+      stt->capacitorFast = env[k];
+    }
+    // Slow envelope follower
+    if (env[k] > stt->capacitorSlow) {
+      // increase capacitorSlow
+      stt->capacitorSlow = AGC_SCALEDIFF32(500, (env[k] - stt->capacitorSlow),
+                                           stt->capacitorSlow);
+    } else {
+      // decrease capacitorSlow
+      stt->capacitorSlow =
+          AGC_SCALEDIFF32(decay, stt->capacitorSlow, stt->capacitorSlow);
+    }
+
+    // use maximum of both capacitors as current level
+    if (stt->capacitorFast > stt->capacitorSlow) {
+      cur_level = stt->capacitorFast;
+    } else {
+      cur_level = stt->capacitorSlow;
+    }
+    // Translate signal level into gain, using a piecewise linear approximation
+    // find number of leading zeros
+    zeros = WebRtcSpl_NormU32((uint32_t)cur_level);
+    if (cur_level == 0) {
+      zeros = 31;
+    }
+    tmp32 = ((uint32_t)cur_level << zeros) & 0x7FFFFFFF;
+    frac = (int16_t)(tmp32 >> 19);  // Q12.
+    tmp32 = (stt->gainTable[zeros - 1] - stt->gainTable[zeros]) * frac;
+    gains[k + 1] = stt->gainTable[zeros] + (tmp32 >> 12);
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+    if (k == 0) {
+      fprintf(stt->logFile, "%d\t%d\t%d\t%d\t%d\n", env[0], cur_level,
+              stt->capacitorFast, stt->capacitorSlow, zeros);
+    }
+#endif
+  }
+
+  // Gate processing (lower gain during absence of speech)
+  zeros = (zeros << 9) - (frac >> 3);
+  // find number of leading zeros
+  zeros_fast = WebRtcSpl_NormU32((uint32_t)stt->capacitorFast);
+  if (stt->capacitorFast == 0) {
+    zeros_fast = 31;
+  }
+  tmp32 = ((uint32_t)stt->capacitorFast << zeros_fast) & 0x7FFFFFFF;
+  zeros_fast <<= 9;
+  zeros_fast -= (int16_t)(tmp32 >> 22);
+
+  gate = 1000 + zeros_fast - zeros - stt->vadNearend.stdShortTerm;
+
+  if (gate < 0) {
+    stt->gatePrevious = 0;
+  } else {
+    tmp32 = stt->gatePrevious * 7;
+    gate = (int16_t)((gate + tmp32) >> 3);
+    stt->gatePrevious = gate;
+  }
+  // gate < 0     -> no gate
+  // gate > 2500  -> max gate
+  if (gate > 0) {
+    if (gate < 2500) {
+      gain_adj = (2500 - gate) >> 5;
+    } else {
+      gain_adj = 0;
+    }
+    for (k = 0; k < 10; k++) {
+      if ((gains[k + 1] - stt->gainTable[0]) > 8388608) {
+        // To prevent wraparound
+        tmp32 = (gains[k + 1] - stt->gainTable[0]) >> 8;
+        tmp32 *= 178 + gain_adj;
+      } else {
+        tmp32 = (gains[k + 1] - stt->gainTable[0]) * (178 + gain_adj);
+        tmp32 >>= 8;
+      }
+      gains[k + 1] = stt->gainTable[0] + tmp32;
+    }
+  }
+
+  // Limit gain to avoid overload distortion
+  for (k = 0; k < 10; k++) {
+    // To prevent wrap around
+    zeros = 10;
+    if (gains[k + 1] > 47453132) {
+      zeros = 16 - WebRtcSpl_NormW32(gains[k + 1]);
+    }
+    gain32 = (gains[k + 1] >> zeros) + 1;
+    gain32 *= gain32;
+    // check for overflow
+    while (AGC_MUL32((env[k] >> 12) + 1, gain32) >
+           WEBRTC_SPL_SHIFT_W32((int32_t)32767, 2 * (1 - zeros + 10))) {
+      // multiply by 253/256 ==> -0.1 dB
+      if (gains[k + 1] > 8388607) {
+        // Prevent wrap around
+        gains[k + 1] = (gains[k + 1] / 256) * 253;
+      } else {
+        gains[k + 1] = (gains[k + 1] * 253) / 256;
+      }
+      gain32 = (gains[k + 1] >> zeros) + 1;
+      gain32 *= gain32;
+    }
+  }
+  // gain reductions should be done 1 ms earlier than gain increases
+  for (k = 1; k < 10; k++) {
+    if (gains[k] > gains[k + 1]) {
+      gains[k] = gains[k + 1];
+    }
+  }
+  // save start gain for next frame
+  stt->gain = gains[10];
+
+  // Apply gain
+  // handle first sub frame separately
+  delta = (gains[1] - gains[0]) * (1 << (4 - L2));
+  gain32 = gains[0] * (1 << 4);
+  // iterate over samples
+  for (n = 0; n < L; n++) {
+    for (i = 0; i < num_bands; ++i) {
+      tmp32 = out[i][n] * ((gain32 + 127) >> 7);
+      out_tmp = tmp32 >> 16;
+      if (out_tmp > 4095) {
+        out[i][n] = (int16_t)32767;
+      } else if (out_tmp < -4096) {
+        out[i][n] = (int16_t)-32768;
+      } else {
+        tmp32 = out[i][n] * (gain32 >> 4);
+        out[i][n] = (int16_t)(tmp32 >> 16);
+      }
+    }
+    //
+
+    gain32 += delta;
+  }
+  // iterate over subframes
+  for (k = 1; k < 10; k++) {
+    delta = (gains[k + 1] - gains[k]) * (1 << (4 - L2));
+    gain32 = gains[k] * (1 << 4);
+    // iterate over samples
+    for (n = 0; n < L; n++) {
+      for (i = 0; i < num_bands; ++i) {
+        int64_t tmp64 = ((int64_t)(out[i][k * L + n])) * (gain32 >> 4);
+        tmp64 = tmp64 >> 16;
+        if (tmp64 > 32767) {
+          out[i][k * L + n] = 32767;
+        }
+        else if (tmp64 < -32768) {
+          out[i][k * L + n] = -32768;
+        }
+        else {
+          out[i][k * L + n] = (int16_t)(tmp64);
+        }
+      }
+      gain32 += delta;
+    }
+  }
+
+  return 0;
+}
+
+void WebRtcAgc_InitVad(AgcVad* state) {
+  int16_t k;
+
+  state->HPstate = 0;   // state of high pass filter
+  state->logRatio = 0;  // log( P(active) / P(inactive) )
+  // average input level (Q10)
+  state->meanLongTerm = 15 << 10;
+
+  // variance of input level (Q8)
+  state->varianceLongTerm = 500 << 8;
+
+  state->stdLongTerm = 0;  // standard deviation of input level in dB
+  // short-term average input level (Q10)
+  state->meanShortTerm = 15 << 10;
+
+  // short-term variance of input level (Q8)
+  state->varianceShortTerm = 500 << 8;
+
+  state->stdShortTerm =
+      0;               // short-term standard deviation of input level in dB
+  state->counter = 3;  // counts updates
+  for (k = 0; k < 8; k++) {
+    // downsampling filter
+    state->downState[k] = 0;
+  }
+}
+
+int16_t WebRtcAgc_ProcessVad(AgcVad* state,      // (i) VAD state
+                             const int16_t* in,  // (i) Speech signal
+                             size_t nrSamples)   // (i) number of samples
+{
+  uint32_t nrg;
+  int32_t out, tmp32, tmp32b;
+  uint16_t tmpU16;
+  int16_t k, subfr, tmp16;
+  int16_t buf1[8];
+  int16_t buf2[4];
+  int16_t HPstate;
+  int16_t zeros, dB;
+
+  // process in 10 sub frames of 1 ms (to save on memory)
+  nrg = 0;
+  HPstate = state->HPstate;
+  for (subfr = 0; subfr < 10; subfr++) {
+    // downsample to 4 kHz
+    if (nrSamples == 160) {
+      for (k = 0; k < 8; k++) {
+        tmp32 = (int32_t)in[2 * k] + (int32_t)in[2 * k + 1];
+        tmp32 >>= 1;
+        buf1[k] = (int16_t)tmp32;
+      }
+      in += 16;
+
+      WebRtcSpl_DownsampleBy2(buf1, 8, buf2, state->downState);
+    } else {
+      WebRtcSpl_DownsampleBy2(in, 8, buf2, state->downState);
+      in += 8;
+    }
+
+    // high pass filter and compute energy
+    for (k = 0; k < 4; k++) {
+      out = buf2[k] + HPstate;
+      tmp32 = 600 * out;
+      HPstate = (int16_t)((tmp32 >> 10) - buf2[k]);
+
+      // Add 'out * out / 2**6' to 'nrg' in a non-overflowing
+      // way. Guaranteed to work as long as 'out * out / 2**6' fits in
+      // an int32_t.
+      nrg += out * (out / (1 << 6));
+      nrg += out * (out % (1 << 6)) / (1 << 6);
+    }
+  }
+  state->HPstate = HPstate;
+
+  // find number of leading zeros
+  if (!(0xFFFF0000 & nrg)) {
+    zeros = 16;
+  } else {
+    zeros = 0;
+  }
+  if (!(0xFF000000 & (nrg << zeros))) {
+    zeros += 8;
+  }
+  if (!(0xF0000000 & (nrg << zeros))) {
+    zeros += 4;
+  }
+  if (!(0xC0000000 & (nrg << zeros))) {
+    zeros += 2;
+  }
+  if (!(0x80000000 & (nrg << zeros))) {
+    zeros += 1;
+  }
+
+  // energy level (range {-32..30}) (Q10)
+  dB = (15 - zeros) * (1 << 11);
+
+  // Update statistics
+
+  if (state->counter < kAvgDecayTime) {
+    // decay time = AvgDecTime * 10 ms
+    state->counter++;
+  }
+
+  // update short-term estimate of mean energy level (Q10)
+  tmp32 = state->meanShortTerm * 15 + dB;
+  state->meanShortTerm = (int16_t)(tmp32 >> 4);
+
+  // update short-term estimate of variance in energy level (Q8)
+  tmp32 = (dB * dB) >> 12;
+  tmp32 += state->varianceShortTerm * 15;
+  state->varianceShortTerm = tmp32 / 16;
+
+  // update short-term estimate of standard deviation in energy level (Q10)
+  tmp32 = state->meanShortTerm * state->meanShortTerm;
+  tmp32 = (state->varianceShortTerm << 12) - tmp32;
+  state->stdShortTerm = (int16_t)WebRtcSpl_Sqrt(tmp32);
+
+  // update long-term estimate of mean energy level (Q10)
+  tmp32 = state->meanLongTerm * state->counter + dB;
+  state->meanLongTerm =
+      WebRtcSpl_DivW32W16ResW16(tmp32, WebRtcSpl_AddSatW16(state->counter, 1));
+
+  // update long-term estimate of variance in energy level (Q8)
+  tmp32 = (dB * dB) >> 12;
+  tmp32 += state->varianceLongTerm * state->counter;
+  state->varianceLongTerm =
+      WebRtcSpl_DivW32W16(tmp32, WebRtcSpl_AddSatW16(state->counter, 1));
+
+  // update long-term estimate of standard deviation in energy level (Q10)
+  tmp32 = state->meanLongTerm * state->meanLongTerm;
+  tmp32 = (state->varianceLongTerm << 12) - tmp32;
+  state->stdLongTerm = (int16_t)WebRtcSpl_Sqrt(tmp32);
+
+  // update voice activity measure (Q10)
+  tmp16 = 3 << 12;
+  // TODO(bjornv): (dB - state->meanLongTerm) can overflow, e.g., in
+  // ApmTest.Process unit test. Previously the macro WEBRTC_SPL_MUL_16_16()
+  // was used, which did an intermediate cast to (int16_t), hence losing
+  // significant bits. This cause logRatio to max out positive, rather than
+  // negative. This is a bug, but has very little significance.
+  tmp32 = tmp16 * (int16_t)(dB - state->meanLongTerm);
+  tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm);
+  tmpU16 = (13 << 12);
+  tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16);
+  tmp32 += tmp32b >> 10;
+
+  state->logRatio = (int16_t)(tmp32 >> 6);
+
+  // limit
+  if (state->logRatio > 2048) {
+    state->logRatio = 2048;
+  }
+  if (state->logRatio < -2048) {
+    state->logRatio = -2048;
+  }
+
+  return state->logRatio;  // Q10
+}
diff --git a/modules/audio_processing/agc/legacy/digital_agc.h b/modules/audio_processing/agc/legacy/digital_agc.h
new file mode 100644
index 0000000..af6cf48
--- /dev/null
+++ b/modules/audio_processing/agc/legacy/digital_agc.h
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
+
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+#include <stdio.h>
+#endif
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// the 32 most significant bits of A(19) * B(26) >> 13
+#define AGC_MUL32(A, B) (((B) >> 13) * (A) + (((0x00001FFF & (B)) * (A)) >> 13))
+// C + the 32 most significant bits of A * B
+#define AGC_SCALEDIFF32(A, B, C) \
+  ((C) + ((B) >> 16) * (A) + (((0x0000FFFF & (B)) * (A)) >> 16))
+
+typedef struct {
+  int32_t downState[8];
+  int16_t HPstate;
+  int16_t counter;
+  int16_t logRatio;           // log( P(active) / P(inactive) ) (Q10)
+  int16_t meanLongTerm;       // Q10
+  int32_t varianceLongTerm;   // Q8
+  int16_t stdLongTerm;        // Q10
+  int16_t meanShortTerm;      // Q10
+  int32_t varianceShortTerm;  // Q8
+  int16_t stdShortTerm;       // Q10
+} AgcVad;                     // total = 54 bytes
+
+typedef struct {
+  int32_t capacitorSlow;
+  int32_t capacitorFast;
+  int32_t gain;
+  int32_t gainTable[32];
+  int16_t gatePrevious;
+  int16_t agcMode;
+  AgcVad vadNearend;
+  AgcVad vadFarend;
+#ifdef WEBRTC_AGC_DEBUG_DUMP
+  FILE* logFile;
+  int frameCounter;
+#endif
+} DigitalAgc;
+
+int32_t WebRtcAgc_InitDigital(DigitalAgc* digitalAgcInst, int16_t agcMode);
+
+int32_t WebRtcAgc_ProcessDigital(DigitalAgc* digitalAgcInst,
+                                 const int16_t* const* inNear,
+                                 size_t num_bands,
+                                 int16_t* const* out,
+                                 uint32_t FS,
+                                 int16_t lowLevelSignal);
+
+int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst,
+                                     const int16_t* inFar,
+                                     size_t nrSamples);
+
+void WebRtcAgc_InitVad(AgcVad* vadInst);
+
+int16_t WebRtcAgc_ProcessVad(AgcVad* vadInst,    // (i) VAD state
+                             const int16_t* in,  // (i) Speech signal
+                             size_t nrSamples);  // (i) number of samples
+
+int32_t WebRtcAgc_CalculateGainTable(int32_t* gainTable,         // Q16
+                                     int16_t compressionGaindB,  // Q0 (in dB)
+                                     int16_t targetLevelDbfs,    // Q0 (in dB)
+                                     uint8_t limiterEnable,
+                                     int16_t analogTarget);
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_LEGACY_DIGITAL_AGC_H_
diff --git a/modules/audio_processing/agc/legacy/gain_control.h b/modules/audio_processing/agc/legacy/gain_control.h
new file mode 100644
index 0000000..0f121b1
--- /dev/null
+++ b/modules/audio_processing/agc/legacy/gain_control.h
@@ -0,0 +1,247 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Errors
+#define AGC_UNSPECIFIED_ERROR 18000
+#define AGC_UNSUPPORTED_FUNCTION_ERROR 18001
+#define AGC_UNINITIALIZED_ERROR 18002
+#define AGC_NULL_POINTER_ERROR 18003
+#define AGC_BAD_PARAMETER_ERROR 18004
+
+// Warnings
+#define AGC_BAD_PARAMETER_WARNING 18050
+
+enum {
+  kAgcModeUnchanged,
+  kAgcModeAdaptiveAnalog,
+  kAgcModeAdaptiveDigital,
+  kAgcModeFixedDigital
+};
+
+enum { kAgcFalse = 0, kAgcTrue };
+
+typedef struct {
+  int16_t targetLevelDbfs;    // default 3 (-3 dBOv)
+  int16_t compressionGaindB;  // default 9 dB
+  uint8_t limiterEnable;      // default kAgcTrue (on)
+} WebRtcAgcConfig;
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * This function analyses the number of samples passed to
+ * farend and produces any error code that could arise.
+ *
+ * Input:
+ *      - agcInst           : AGC instance.
+ *      - samples           : Number of samples in input vector.
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error.
+ */
+int WebRtcAgc_GetAddFarendError(void* state, size_t samples);
+
+/*
+ * This function processes a 10 ms frame of far-end speech to determine
+ * if there is active speech. The length of the input speech vector must be
+ * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
+ * FS=48000).
+ *
+ * Input:
+ *      - agcInst           : AGC instance.
+ *      - inFar             : Far-end input speech vector
+ *      - samples           : Number of samples in input vector
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error
+ */
+int WebRtcAgc_AddFarend(void* agcInst, const int16_t* inFar, size_t samples);
+
+/*
+ * This function processes a 10 ms frame of microphone speech to determine
+ * if there is active speech. The length of the input speech vector must be
+ * given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
+ * FS=48000). For very low input levels, the input signal is increased in level
+ * by multiplying and overwriting the samples in inMic[].
+ *
+ * This function should be called before any further processing of the
+ * near-end microphone signal.
+ *
+ * Input:
+ *      - agcInst           : AGC instance.
+ *      - inMic             : Microphone input speech vector for each band
+ *      - num_bands         : Number of bands in input vector
+ *      - samples           : Number of samples in input vector
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error
+ */
+int WebRtcAgc_AddMic(void* agcInst,
+                     int16_t* const* inMic,
+                     size_t num_bands,
+                     size_t samples);
+
+/*
+ * This function replaces the analog microphone with a virtual one.
+ * It is a digital gain applied to the input signal and is used in the
+ * agcAdaptiveDigital mode where no microphone level is adjustable. The length
+ * of the input speech vector must be given in samples (80 when FS=8000, and 160
+ * when FS=16000, FS=32000 or FS=48000).
+ *
+ * Input:
+ *      - agcInst           : AGC instance.
+ *      - inMic             : Microphone input speech vector for each band
+ *      - num_bands         : Number of bands in input vector
+ *      - samples           : Number of samples in input vector
+ *      - micLevelIn        : Input level of microphone (static)
+ *
+ * Output:
+ *      - inMic             : Microphone output after processing (L band)
+ *      - inMic_H           : Microphone output after processing (H band)
+ *      - micLevelOut       : Adjusted microphone level after processing
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error
+ */
+int WebRtcAgc_VirtualMic(void* agcInst,
+                         int16_t* const* inMic,
+                         size_t num_bands,
+                         size_t samples,
+                         int32_t micLevelIn,
+                         int32_t* micLevelOut);
+
+/*
+ * This function processes a 10 ms frame and adjusts (normalizes) the gain both
+ * analog and digitally. The gain adjustments are done only during active
+ * periods of speech. The length of the speech vectors must be given in samples
+ * (80 when FS=8000, and 160 when FS=16000, FS=32000 or FS=48000). The echo
+ * parameter can be used to ensure the AGC will not adjust upward in the
+ * presence of echo.
+ *
+ * This function should be called after processing the near-end microphone
+ * signal, in any case after any echo cancellation.
+ *
+ * Input:
+ *      - agcInst           : AGC instance
+ *      - inNear            : Near-end input speech vector for each band
+ *      - num_bands         : Number of bands in input/output vector
+ *      - samples           : Number of samples in input/output vector
+ *      - inMicLevel        : Current microphone volume level
+ *      - echo              : Set to 0 if the signal passed to add_mic is
+ *                            almost certainly free of echo; otherwise set
+ *                            to 1. If you have no information regarding echo
+ *                            set to 0.
+ *
+ * Output:
+ *      - outMicLevel       : Adjusted microphone volume level
+ *      - out               : Gain-adjusted near-end speech vector
+ *                          : May be the same vector as the input.
+ *      - saturationWarning : A returned value of 1 indicates a saturation event
+ *                            has occurred and the volume cannot be further
+ *                            reduced. Otherwise will be set to 0.
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error
+ */
+int WebRtcAgc_Process(void* agcInst,
+                      const int16_t* const* inNear,
+                      size_t num_bands,
+                      size_t samples,
+                      int16_t* const* out,
+                      int32_t inMicLevel,
+                      int32_t* outMicLevel,
+                      int16_t echo,
+                      uint8_t* saturationWarning);
+
+/*
+ * This function sets the config parameters (targetLevelDbfs,
+ * compressionGaindB and limiterEnable).
+ *
+ * Input:
+ *      - agcInst           : AGC instance
+ *      - config            : config struct
+ *
+ * Output:
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error
+ */
+int WebRtcAgc_set_config(void* agcInst, WebRtcAgcConfig config);
+
+/*
+ * This function returns the config parameters (targetLevelDbfs,
+ * compressionGaindB and limiterEnable).
+ *
+ * Input:
+ *      - agcInst           : AGC instance
+ *
+ * Output:
+ *      - config            : config struct
+ *
+ * Return value:
+ *                          :  0 - Normal operation.
+ *                          : -1 - Error
+ */
+int WebRtcAgc_get_config(void* agcInst, WebRtcAgcConfig* config);
+
+/*
+ * This function creates and returns an AGC instance, which will contain the
+ * state information for one (duplex) channel.
+ */
+void* WebRtcAgc_Create();
+
+/*
+ * This function frees the AGC instance created at the beginning.
+ *
+ * Input:
+ *      - agcInst           : AGC instance.
+ */
+void WebRtcAgc_Free(void* agcInst);
+
+/*
+ * This function initializes an AGC instance.
+ *
+ * Input:
+ *      - agcInst           : AGC instance.
+ *      - minLevel          : Minimum possible mic level
+ *      - maxLevel          : Maximum possible mic level
+ *      - agcMode           : 0 - Unchanged
+ *                          : 1 - Adaptive Analog Automatic Gain Control -3dBOv
+ *                          : 2 - Adaptive Digital Automatic Gain Control -3dBOv
+ *                          : 3 - Fixed Digital Gain 0dB
+ *      - fs                : Sampling frequency
+ *
+ * Return value             :  0 - Ok
+ *                            -1 - Error
+ */
+int WebRtcAgc_Init(void* agcInst,
+                   int32_t minLevel,
+                   int32_t maxLevel,
+                   int16_t agcMode,
+                   uint32_t fs);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_LEGACY_GAIN_CONTROL_H_
diff --git a/modules/audio_processing/agc/loudness_histogram.cc b/modules/audio_processing/agc/loudness_histogram.cc
new file mode 100644
index 0000000..63d5f7c
--- /dev/null
+++ b/modules/audio_processing/agc/loudness_histogram.cc
@@ -0,0 +1,229 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/loudness_histogram.h"
+
+#include <cmath>
+#include <cstring>
+
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+static const double kHistBinCenters[] = {
+    7.59621091765857e-02, 9.02036021061016e-02, 1.07115112009343e-01,
+    1.27197217770508e-01, 1.51044347572047e-01, 1.79362373905283e-01,
+    2.12989507320644e-01, 2.52921107370304e-01, 3.00339145144454e-01,
+    3.56647189489147e-01, 4.23511952494003e-01, 5.02912623991786e-01,
+    5.97199455365749e-01, 7.09163326739184e-01, 8.42118356728544e-01,
+    1.00000000000000e+00, 1.18748153630660e+00, 1.41011239906908e+00,
+    1.67448243801153e+00, 1.98841697800836e+00, 2.36120844786349e+00,
+    2.80389143520905e+00, 3.32956930911896e+00, 3.95380207843188e+00,
+    4.69506696634852e+00, 5.57530533426190e+00, 6.62057214370769e+00,
+    7.86180718043869e+00, 9.33575086877358e+00, 1.10860317842269e+01,
+    1.31644580546776e+01, 1.56325508754123e+01, 1.85633655299256e+01,
+    2.20436538184971e+01, 2.61764319021997e+01, 3.10840295702492e+01,
+    3.69117111886792e+01, 4.38319755100383e+01, 5.20496616180135e+01,
+    6.18080121423973e+01, 7.33958732149108e+01, 8.71562442838066e+01,
+    1.03496430860848e+02, 1.22900100720889e+02, 1.45941600416277e+02,
+    1.73302955873365e+02, 2.05794060286978e+02, 2.44376646872353e+02,
+    2.90192756065437e+02, 3.44598539797631e+02, 4.09204403447902e+02,
+    4.85922673669740e+02, 5.77024203055553e+02, 6.85205587130498e+02,
+    8.13668983291589e+02, 9.66216894324125e+02, 1.14736472207740e+03,
+    1.36247442287647e+03, 1.61791322085579e+03, 1.92124207711260e+03,
+    2.28143949334655e+03, 2.70916727454970e+03, 3.21708611729384e+03,
+    3.82023036499473e+03, 4.53645302286906e+03, 5.38695420497926e+03,
+    6.39690865534207e+03, 7.59621091765857e+03, 9.02036021061016e+03,
+    1.07115112009343e+04, 1.27197217770508e+04, 1.51044347572047e+04,
+    1.79362373905283e+04, 2.12989507320644e+04, 2.52921107370304e+04,
+    3.00339145144454e+04, 3.56647189489147e+04};
+
+static const double kProbQDomain = 1024.0;
+// Loudness of -15 dB (smallest expected loudness) in log domain,
+// loudness_db = 13.5 * log10(rms);
+static const double kLogDomainMinBinCenter = -2.57752062648587;
+// Loudness step of 1 dB in log domain
+static const double kLogDomainStepSizeInverse = 5.81954605750359;
+
+static const int kTransientWidthThreshold = 7;
+static const double kLowProbabilityThreshold = 0.2;
+
+static const int kLowProbThresholdQ10 =
+    static_cast<int>(kLowProbabilityThreshold * kProbQDomain);
+
+LoudnessHistogram::LoudnessHistogram()
+    : num_updates_(0),
+      audio_content_q10_(0),
+      bin_count_q10_(),
+      activity_probability_(),
+      hist_bin_index_(),
+      buffer_index_(0),
+      buffer_is_full_(false),
+      len_circular_buffer_(0),
+      len_high_activity_(0) {
+  static_assert(
+      kHistSize == sizeof(kHistBinCenters) / sizeof(kHistBinCenters[0]),
+      "histogram bin centers incorrect size");
+}
+
+LoudnessHistogram::LoudnessHistogram(int window_size)
+    : num_updates_(0),
+      audio_content_q10_(0),
+      bin_count_q10_(),
+      activity_probability_(new int[window_size]),
+      hist_bin_index_(new int[window_size]),
+      buffer_index_(0),
+      buffer_is_full_(false),
+      len_circular_buffer_(window_size),
+      len_high_activity_(0) {}
+
+LoudnessHistogram::~LoudnessHistogram() {}
+
+void LoudnessHistogram::Update(double rms, double activity_probaility) {
+  // If circular histogram is activated then remove the oldest entry.
+  if (len_circular_buffer_ > 0)
+    RemoveOldestEntryAndUpdate();
+
+  // Find the corresponding bin.
+  int hist_index = GetBinIndex(rms);
+  // To Q10 domain.
+  int prob_q10 =
+      static_cast<int16_t>(floor(activity_probaility * kProbQDomain));
+  InsertNewestEntryAndUpdate(prob_q10, hist_index);
+}
+
+// Doing nothing if buffer is not full, yet.
+void LoudnessHistogram::RemoveOldestEntryAndUpdate() {
+  RTC_DCHECK_GT(len_circular_buffer_, 0);
+  // Do nothing if circular buffer is not full.
+  if (!buffer_is_full_)
+    return;
+
+  int oldest_prob = activity_probability_[buffer_index_];
+  int oldest_hist_index = hist_bin_index_[buffer_index_];
+  UpdateHist(-oldest_prob, oldest_hist_index);
+}
+
+void LoudnessHistogram::RemoveTransient() {
+  // Don't expect to be here if high-activity region is longer than
+  // |kTransientWidthThreshold| or there has not been any transient.
+  RTC_DCHECK_LE(len_high_activity_, kTransientWidthThreshold);
+  int index =
+      (buffer_index_ > 0) ? (buffer_index_ - 1) : len_circular_buffer_ - 1;
+  while (len_high_activity_ > 0) {
+    UpdateHist(-activity_probability_[index], hist_bin_index_[index]);
+    activity_probability_[index] = 0;
+    index = (index > 0) ? (index - 1) : (len_circular_buffer_ - 1);
+    len_high_activity_--;
+  }
+}
+
+void LoudnessHistogram::InsertNewestEntryAndUpdate(int activity_prob_q10,
+                                                   int hist_index) {
+  // Update the circular buffer if it is enabled.
+  if (len_circular_buffer_ > 0) {
+    // Removing transient.
+    if (activity_prob_q10 <= kLowProbThresholdQ10) {
+      // Lower than threshold probability, set it to zero.
+      activity_prob_q10 = 0;
+      // Check if this has been a transient.
+      if (len_high_activity_ <= kTransientWidthThreshold)
+        RemoveTransient();  // Remove this transient.
+      len_high_activity_ = 0;
+    } else if (len_high_activity_ <= kTransientWidthThreshold) {
+      len_high_activity_++;
+    }
+    // Updating the circular buffer.
+    activity_probability_[buffer_index_] = activity_prob_q10;
+    hist_bin_index_[buffer_index_] = hist_index;
+    // Increment the buffer index and check for wrap-around.
+    buffer_index_++;
+    if (buffer_index_ >= len_circular_buffer_) {
+      buffer_index_ = 0;
+      buffer_is_full_ = true;
+    }
+  }
+
+  num_updates_++;
+  if (num_updates_ < 0)
+    num_updates_--;
+
+  UpdateHist(activity_prob_q10, hist_index);
+}
+
+void LoudnessHistogram::UpdateHist(int activity_prob_q10, int hist_index) {
+  bin_count_q10_[hist_index] += activity_prob_q10;
+  audio_content_q10_ += activity_prob_q10;
+}
+
+double LoudnessHistogram::AudioContent() const {
+  return audio_content_q10_ / kProbQDomain;
+}
+
+LoudnessHistogram* LoudnessHistogram::Create() {
+  return new LoudnessHistogram;
+}
+
+LoudnessHistogram* LoudnessHistogram::Create(int window_size) {
+  if (window_size < 0)
+    return NULL;
+  return new LoudnessHistogram(window_size);
+}
+
+void LoudnessHistogram::Reset() {
+  // Reset the histogram, audio-content and number of updates.
+  memset(bin_count_q10_, 0, sizeof(bin_count_q10_));
+  audio_content_q10_ = 0;
+  num_updates_ = 0;
+  // Empty the circular buffer.
+  buffer_index_ = 0;
+  buffer_is_full_ = false;
+  len_high_activity_ = 0;
+}
+
+int LoudnessHistogram::GetBinIndex(double rms) {
+  // First exclude overload cases.
+  if (rms <= kHistBinCenters[0]) {
+    return 0;
+  } else if (rms >= kHistBinCenters[kHistSize - 1]) {
+    return kHistSize - 1;
+  } else {
+    // The quantizer is uniform in log domain. Alternatively we could do binary
+    // search in linear domain.
+    double rms_log = log(rms);
+
+    int index = static_cast<int>(
+        floor((rms_log - kLogDomainMinBinCenter) * kLogDomainStepSizeInverse));
+    // The final decision is in linear domain.
+    double b = 0.5 * (kHistBinCenters[index] + kHistBinCenters[index + 1]);
+    if (rms > b) {
+      return index + 1;
+    }
+    return index;
+  }
+}
+
+double LoudnessHistogram::CurrentRms() const {
+  double p;
+  double mean_val = 0;
+  if (audio_content_q10_ > 0) {
+    double p_total_inverse = 1. / static_cast<double>(audio_content_q10_);
+    for (int n = 0; n < kHistSize; n++) {
+      p = static_cast<double>(bin_count_q10_[n]) * p_total_inverse;
+      mean_val += p * kHistBinCenters[n];
+    }
+  } else {
+    mean_val = kHistBinCenters[0];
+  }
+  return mean_val;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc/loudness_histogram.h b/modules/audio_processing/agc/loudness_histogram.h
new file mode 100644
index 0000000..ab45276
--- /dev/null
+++ b/modules/audio_processing/agc/loudness_histogram.h
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_LOUDNESS_HISTOGRAM_H_
+#define MODULES_AUDIO_PROCESSING_AGC_LOUDNESS_HISTOGRAM_H_
+
+#include <string.h>
+
+#include <memory>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// This class implements the histogram of loudness with circular buffers so that
+// the histogram tracks the last T seconds of the loudness.
+class LoudnessHistogram {
+ public:
+  // Create a non-sliding LoudnessHistogram.
+  static LoudnessHistogram* Create();
+
+  // Create a sliding LoudnessHistogram, i.e. the histogram represents the last
+  // |window_size| samples.
+  static LoudnessHistogram* Create(int window_size);
+  ~LoudnessHistogram();
+
+  // Insert RMS and the corresponding activity probability.
+  void Update(double rms, double activity_probability);
+
+  // Reset the histogram, forget the past.
+  void Reset();
+
+  // Current loudness, which is actually the mean of histogram in loudness
+  // domain.
+  double CurrentRms() const;
+
+  // Sum of the histogram content.
+  double AudioContent() const;
+
+  // Number of times the histogram has been updated.
+  int num_updates() const { return num_updates_; }
+
+ private:
+  LoudnessHistogram();
+  explicit LoudnessHistogram(int window);
+
+  // Find the histogram bin associated with the given |rms|.
+  int GetBinIndex(double rms);
+
+  void RemoveOldestEntryAndUpdate();
+  void InsertNewestEntryAndUpdate(int activity_prob_q10, int hist_index);
+  void UpdateHist(int activity_prob_q10, int hist_index);
+  void RemoveTransient();
+
+  // Number of histogram bins.
+  static const int kHistSize = 77;
+
+  // Number of times the histogram is updated
+  int num_updates_;
+  // Audio content, this should be equal to the sum of the components of
+  // |bin_count_q10_|.
+  int64_t audio_content_q10_;
+
+  // LoudnessHistogram of input RMS in Q10 with |kHistSize_| bins. In each
+  // 'Update(),' we increment the associated histogram-bin with the given
+  // probability. The increment is implemented in Q10 to avoid rounding errors.
+  int64_t bin_count_q10_[kHistSize];
+
+  // Circular buffer for probabilities
+  std::unique_ptr<int[]> activity_probability_;
+  // Circular buffer for histogram-indices of probabilities.
+  std::unique_ptr<int[]> hist_bin_index_;
+  // Current index of circular buffer, where the newest data will be written to,
+  // therefore, pointing to the oldest data if buffer is full.
+  int buffer_index_;
+  // Indicating if buffer is full and we had a wrap around.
+  int buffer_is_full_;
+  // Size of circular buffer.
+  int len_circular_buffer_;
+  int len_high_activity_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_LOUDNESS_HISTOGRAM_H_
diff --git a/modules/audio_processing/agc/loudness_histogram_unittest.cc b/modules/audio_processing/agc/loudness_histogram_unittest.cc
new file mode 100644
index 0000000..8c61710
--- /dev/null
+++ b/modules/audio_processing/agc/loudness_histogram_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Use CreateHistUnittestFile.m to generate the input file.
+
+#include "modules/audio_processing/agc/loudness_histogram.h"
+
+#include <stdio.h>
+#include <algorithm>
+#include <cmath>
+#include <memory>
+
+#include "modules/audio_processing/agc/utility.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+struct InputOutput {
+  double rms;
+  double activity_probability;
+  double audio_content;
+  double loudness;
+};
+
+const double kRelativeErrTol = 1e-10;
+
+class LoudnessHistogramTest : public ::testing::Test {
+ protected:
+  void RunTest(bool enable_circular_buff, const char* filename);
+
+ private:
+  void TestClean();
+  std::unique_ptr<LoudnessHistogram> hist_;
+};
+
+void LoudnessHistogramTest::TestClean() {
+  EXPECT_EQ(hist_->CurrentRms(), 7.59621091765857e-02);
+  EXPECT_EQ(hist_->AudioContent(), 0);
+  EXPECT_EQ(hist_->num_updates(), 0);
+}
+
+void LoudnessHistogramTest::RunTest(bool enable_circular_buff,
+                                    const char* filename) {
+  FILE* in_file = fopen(filename, "rb");
+  ASSERT_TRUE(in_file != NULL);
+  if (enable_circular_buff) {
+    int buffer_size;
+    EXPECT_EQ(fread(&buffer_size, sizeof(buffer_size), 1, in_file), 1u);
+    hist_.reset(LoudnessHistogram::Create(buffer_size));
+  } else {
+    hist_.reset(LoudnessHistogram::Create());
+  }
+  TestClean();
+
+  InputOutput io;
+  int num_updates = 0;
+  int num_reset = 0;
+  while (fread(&io, sizeof(InputOutput), 1, in_file) == 1) {
+    if (io.rms < 0) {
+      // We have to reset.
+      hist_->Reset();
+      TestClean();
+      num_updates = 0;
+      num_reset++;
+      // Read the next chunk of input.
+      if (fread(&io, sizeof(InputOutput), 1, in_file) != 1)
+        break;
+    }
+    hist_->Update(io.rms, io.activity_probability);
+    num_updates++;
+    EXPECT_EQ(hist_->num_updates(), num_updates);
+    double audio_content = hist_->AudioContent();
+
+    double abs_err =
+        std::min(audio_content, io.audio_content) * kRelativeErrTol;
+
+    ASSERT_NEAR(audio_content, io.audio_content, abs_err);
+    double current_loudness = Linear2Loudness(hist_->CurrentRms());
+    abs_err =
+        std::min(fabs(current_loudness), fabs(io.loudness)) * kRelativeErrTol;
+    ASSERT_NEAR(current_loudness, io.loudness, abs_err);
+  }
+  fclose(in_file);
+}
+
+TEST_F(LoudnessHistogramTest, ActiveCircularBuffer) {
+  RunTest(true, test::ResourcePath(
+                    "audio_processing/agc/agc_with_circular_buffer", "dat")
+                    .c_str());
+}
+
+TEST_F(LoudnessHistogramTest, InactiveCircularBuffer) {
+  RunTest(false, test::ResourcePath(
+                     "audio_processing/agc/agc_no_circular_buffer", "dat")
+                     .c_str());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc/mock_agc.h b/modules/audio_processing/agc/mock_agc.h
new file mode 100644
index 0000000..b27d28c
--- /dev/null
+++ b/modules/audio_processing/agc/mock_agc.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_
+#define MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_
+
+#include "modules/audio_processing/agc/agc.h"
+
+#include "modules/include/module_common_types.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockAgc : public Agc {
+ public:
+  MOCK_METHOD2(AnalyzePreproc, float(const int16_t* audio, size_t length));
+  MOCK_METHOD3(Process, void(const int16_t* audio, size_t length,
+                             int sample_rate_hz));
+  MOCK_METHOD1(GetRmsErrorDb, bool(int* error));
+  MOCK_METHOD0(Reset, void());
+  MOCK_METHOD1(set_target_level_dbfs, int(int level));
+  MOCK_CONST_METHOD0(target_level_dbfs, int());
+  MOCK_METHOD1(EnableStandaloneVad, void(bool enable));
+  MOCK_CONST_METHOD0(standalone_vad_enabled, bool());
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_MOCK_AGC_H_
diff --git a/modules/audio_processing/agc/utility.cc b/modules/audio_processing/agc/utility.cc
new file mode 100644
index 0000000..554a837
--- /dev/null
+++ b/modules/audio_processing/agc/utility.cc
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc/utility.h"
+
+#include <math.h>
+
+static const double kLog10 = 2.30258509299;
+static const double kLinear2DbScale = 20.0 / kLog10;
+static const double kLinear2LoudnessScale = 13.4 / kLog10;
+
+double Loudness2Db(double loudness) {
+  return loudness * kLinear2DbScale / kLinear2LoudnessScale;
+}
+
+double Linear2Loudness(double rms) {
+  if (rms == 0)
+    return -15;
+  return kLinear2LoudnessScale * log(rms);
+}
+
+double Db2Loudness(double db) {
+  return db * kLinear2LoudnessScale / kLinear2DbScale;
+}
+
+double Dbfs2Loudness(double dbfs) {
+  return Db2Loudness(90 + dbfs);
+}
diff --git a/modules/audio_processing/agc/utility.h b/modules/audio_processing/agc/utility.h
new file mode 100644
index 0000000..8ba87b6
--- /dev/null
+++ b/modules/audio_processing/agc/utility.h
@@ -0,0 +1,23 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_UTILITY_H_
+#define MODULES_AUDIO_PROCESSING_AGC_UTILITY_H_
+
+// TODO(turajs): Add description of function.
+double Loudness2Db(double loudness);
+
+double Linear2Loudness(double rms);
+
+double Db2Loudness(double db);
+
+double Dbfs2Loudness(double dbfs);
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_UTILITY_H_
diff --git a/modules/audio_processing/agc2/BUILD.gn b/modules/audio_processing/agc2/BUILD.gn
new file mode 100644
index 0000000..aca80d4
--- /dev/null
+++ b/modules/audio_processing/agc2/BUILD.gn
@@ -0,0 +1,69 @@
+# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+
+rtc_source_set("agc2") {
+  sources = [
+    "agc2_common.h",
+    "fixed_digital_level_estimator.cc",
+    "fixed_digital_level_estimator.h",
+    "fixed_gain_controller.cc",
+    "fixed_gain_controller.h",
+    "gain_curve_applier.cc",
+    "gain_curve_applier.h",
+    "interpolated_gain_curve.cc",
+    "interpolated_gain_curve.h",
+  ]
+
+  configs += [ "..:apm_debug_dump" ]
+
+  deps = [
+    "..:apm_logging",
+    "..:audio_frame_view",
+    "../../../api:array_view",
+    "../../../common_audio",
+    "../../../rtc_base:checks",
+    "../../../rtc_base:gtest_prod",
+    "../../../rtc_base:rtc_base_approved",
+    "../../../rtc_base:safe_minmax",
+    "../../../system_wrappers:metrics_api",
+  ]
+}
+
+rtc_source_set("fixed_digital_unittests") {
+  testonly = true
+  configs += [ "..:apm_debug_dump" ]
+
+  sources = [
+    "agc2_testing_common.cc",
+    "agc2_testing_common.h",
+    "agc2_testing_common_unittest.cc",
+    "compute_interpolated_gain_curve.cc",
+    "compute_interpolated_gain_curve.h",
+    "fixed_digital_level_estimator_unittest.cc",
+    "fixed_gain_controller_unittest.cc",
+    "gain_curve_applier_unittest.cc",
+    "interpolated_gain_curve_unittest.cc",
+    "limiter.cc",
+    "limiter.h",
+    "limiter_unittest.cc",
+    "vector_float_frame.cc",
+    "vector_float_frame.h",
+  ]
+  deps = [
+    ":agc2",
+    "..:apm_logging",
+    "..:audio_frame_view",
+    "../../../api:array_view",
+    "../../../common_audio",
+    "../../../rtc_base:checks",
+    "../../../rtc_base:rtc_base_approved",
+    "../../../rtc_base:rtc_base_tests_utils",
+  ]
+}
diff --git a/modules/audio_processing/agc2/agc2_common.h b/modules/audio_processing/agc2/agc2_common.h
new file mode 100644
index 0000000..ad0ab4e
--- /dev/null
+++ b/modules/audio_processing/agc2/agc2_common.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_AGC2_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_AGC2_COMMON_H_
+
+#include <cmath>
+
+#include "rtc_base/basictypes.h"
+
+namespace webrtc {
+
+constexpr float kMinFloatS16Value = -32768.f;
+constexpr float kMaxFloatS16Value = 32767.f;
+constexpr double kMaxAbsFloatS16Value = 32768.0;
+
+constexpr size_t kFrameDurationMs = 10;
+constexpr size_t kSubFramesInFrame = 20;
+constexpr size_t kMaximalNumberOfSamplesPerChannel = 480;
+
+constexpr float kAttackFilterConstant = 0.f;
+
+// This is computed from kDecayMs by
+// 10 ** (-1/20 * subframe_duration / kDecayMs).
+// |subframe_duration| is |kFrameDurationMs / kSubFramesInFrame|.
+// kDecayMs is defined in agc2_testing_common.h
+constexpr float kDecayFilterConstant = 0.9998848773724686f;
+
+// Number of interpolation points for each region of the limiter.
+// These values have been tuned to limit the interpolated gain curve error given
+// the limiter parameters and allowing a maximum error of +/- 32768^-1.
+constexpr size_t kInterpolatedGainCurveKneePoints = 22;
+constexpr size_t kInterpolatedGainCurveBeyondKneePoints = 10;
+constexpr size_t kInterpolatedGainCurveTotalPoints =
+    kInterpolatedGainCurveKneePoints + kInterpolatedGainCurveBeyondKneePoints;
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_AGC2_COMMON_H_
diff --git a/modules/audio_processing/agc2/agc2_testing_common.cc b/modules/audio_processing/agc2/agc2_testing_common.cc
new file mode 100644
index 0000000..6c22492
--- /dev/null
+++ b/modules/audio_processing/agc2/agc2_testing_common.cc
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+namespace test {
+
+std::vector<double> LinSpace(const double l,
+                             const double r,
+                             size_t num_points) {
+  RTC_CHECK(num_points >= 2);
+  std::vector<double> points(num_points);
+  const double step = (r - l) / (num_points - 1.0);
+  points[0] = l;
+  for (size_t i = 1; i < num_points - 1; i++) {
+    points[i] = static_cast<double>(l) + i * step;
+  }
+  points[num_points - 1] = r;
+  return points;
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/agc2_testing_common.h b/modules/audio_processing/agc2/agc2_testing_common.h
new file mode 100644
index 0000000..a176282
--- /dev/null
+++ b/modules/audio_processing/agc2/agc2_testing_common.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_AGC2_TESTING_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_AGC2_TESTING_COMMON_H_
+
+#include <vector>
+
+#include "rtc_base/basictypes.h"
+
+namespace webrtc {
+
+namespace test {
+
+// Level Estimator test parameters.
+constexpr float kDecayMs = 500.f;
+
+// Limiter parameters.
+constexpr float kLimiterMaxInputLevelDbFs = 1.f;
+constexpr float kLimiterKneeSmoothnessDb = 1.f;
+constexpr float kLimiterCompressionRatio = 5.f;
+
+std::vector<double> LinSpace(const double l, const double r, size_t num_points);
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_AGC2_TESTING_COMMON_H_
diff --git a/modules/audio_processing/agc2/agc2_testing_common_unittest.cc b/modules/audio_processing/agc2/agc2_testing_common_unittest.cc
new file mode 100644
index 0000000..b9f7126
--- /dev/null
+++ b/modules/audio_processing/agc2/agc2_testing_common_unittest.cc
@@ -0,0 +1,26 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(AutomaticGainController2Common, TestLinSpace) {
+  std::vector<double> points1 = test::LinSpace(-1.0, 2.0, 4);
+  const std::vector<double> expected_points1{{-1.0, 0.0, 1.0, 2.0}};
+  EXPECT_EQ(expected_points1, points1);
+
+  std::vector<double> points2 = test::LinSpace(0.0, 1.0, 4);
+  const std::vector<double> expected_points2{{0.0, 1.0 / 3.0, 2.0 / 3.0, 1.0}};
+  EXPECT_EQ(points2, expected_points2);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc b/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
new file mode 100644
index 0000000..f395bce
--- /dev/null
+++ b/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc
@@ -0,0 +1,228 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/compute_interpolated_gain_curve.h"
+
+#include <algorithm>
+#include <cmath>
+#include <queue>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/limiter.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+std::pair<double, double> ComputeLinearApproximationParams(
+    const Limiter* limiter,
+    const double x) {
+  const double m = limiter->GetGainFirstDerivativeLinear(x);
+  const double q = limiter->GetGainLinear(x) - m * x;
+  return {m, q};
+}
+
+double ComputeAreaUnderPiecewiseLinearApproximation(const Limiter* limiter,
+                                                    const double x0,
+                                                    const double x1) {
+  RTC_CHECK_LT(x0, x1);
+
+  // Linear approximation in x0 and x1.
+  double m0, q0, m1, q1;
+  std::tie(m0, q0) = ComputeLinearApproximationParams(limiter, x0);
+  std::tie(m1, q1) = ComputeLinearApproximationParams(limiter, x1);
+
+  // Intersection point between two adjacent linear pieces.
+  RTC_CHECK_NE(m1, m0);
+  const double x_split = (q0 - q1) / (m1 - m0);
+  RTC_CHECK_LT(x0, x_split);
+  RTC_CHECK_LT(x_split, x1);
+
+  auto area_under_linear_piece = [](double x_l, double x_r, double m,
+                                    double q) {
+    return x_r * (m * x_r / 2.0 + q) - x_l * (m * x_l / 2.0 + q);
+  };
+  return area_under_linear_piece(x0, x_split, m0, q0) +
+         area_under_linear_piece(x_split, x1, m1, q1);
+}
+
+// Computes the approximation error in the limiter region for a given interval.
+// The error is computed as the difference between the areas beneath the limiter
+// curve to approximate and its linear under-approximation.
+double LimiterUnderApproximationNegativeError(const Limiter* limiter,
+                                              const double x0,
+                                              const double x1) {
+  const double area_limiter = limiter->GetGainIntegralLinear(x0, x1);
+  const double area_interpolated_curve =
+      ComputeAreaUnderPiecewiseLinearApproximation(limiter, x0, x1);
+  RTC_CHECK_GE(area_limiter, area_interpolated_curve);
+  return area_limiter - area_interpolated_curve;
+}
+
+// Automatically finds where to sample the beyond-knee region of a limiter using
+// a greedy optimization algorithm that iteratively decreases the approximation
+// error.
+// The solution is sub-optimal because the algorithm is greedy and the points
+// are assigned by halving intervals (starting with the whole beyond-knee region
+// as a single interval). However, even if sub-optimal, this algorithm works
+// well in practice and it is efficiently implemented using priority queues.
+std::vector<double> SampleLimiterRegion(const Limiter* limiter) {
+  static_assert(kInterpolatedGainCurveBeyondKneePoints > 2, "");
+
+  struct Interval {
+    Interval() = default;  // Ctor required by std::priority_queue.
+    Interval(double l, double r, double e) : x0(l), x1(r), error(e) {
+      RTC_CHECK(x0 < x1);
+    }
+    bool operator<(const Interval& other) const { return error < other.error; }
+
+    double x0;
+    double x1;
+    double error;
+  };
+
+  std::priority_queue<Interval, std::vector<Interval>> q;
+  q.emplace(limiter->limiter_start_linear(), limiter->max_input_level_linear(),
+            LimiterUnderApproximationNegativeError(
+                limiter, limiter->limiter_start_linear(),
+                limiter->max_input_level_linear()));
+
+  // Iteratively find points by halving the interval with greatest error.
+  while (q.size() < kInterpolatedGainCurveBeyondKneePoints) {
+    // Get the interval with highest error.
+    const auto interval = q.top();
+    q.pop();
+
+    // Split |interval| and enqueue.
+    double x_split = (interval.x0 + interval.x1) / 2.0;
+    q.emplace(interval.x0, x_split,
+              LimiterUnderApproximationNegativeError(limiter, interval.x0,
+                                                     x_split));  // Left.
+    q.emplace(x_split, interval.x1,
+              LimiterUnderApproximationNegativeError(limiter, x_split,
+                                                     interval.x1));  // Right.
+  }
+
+  // Copy x1 values and sort them.
+  RTC_CHECK_EQ(q.size(), kInterpolatedGainCurveBeyondKneePoints);
+  std::vector<double> samples(kInterpolatedGainCurveBeyondKneePoints);
+  for (size_t i = 0; i < kInterpolatedGainCurveBeyondKneePoints; ++i) {
+    const auto interval = q.top();
+    q.pop();
+    samples[i] = interval.x1;
+  }
+  RTC_CHECK(q.empty());
+  std::sort(samples.begin(), samples.end());
+
+  return samples;
+}
+
+// Compute the parameters to over-approximate the knee region via linear
+// interpolation. Over-approximating is saturation-safe since the knee region is
+// convex.
+void PrecomputeKneeApproxParams(const Limiter* limiter,
+                                test::InterpolatedParameters* parameters) {
+  static_assert(kInterpolatedGainCurveKneePoints > 2, "");
+  // Get |kInterpolatedGainCurveKneePoints| - 1 equally spaced points.
+  const std::vector<double> points = test::LinSpace(
+      limiter->knee_start_linear(), limiter->limiter_start_linear(),
+      kInterpolatedGainCurveKneePoints - 1);
+
+  // Set the first two points. The second is computed to help with the beginning
+  // of the knee region, which has high curvature.
+  parameters->computed_approximation_params_x[0] = points[0];
+  parameters->computed_approximation_params_x[1] =
+      (points[0] + points[1]) / 2.0;
+  // Copy the remaining points.
+  std::copy(std::begin(points) + 1, std::end(points),
+            std::begin(parameters->computed_approximation_params_x) + 2);
+
+  // Compute (m, q) pairs for each linear piece y = mx + q.
+  for (size_t i = 0; i < kInterpolatedGainCurveKneePoints - 1; ++i) {
+    const double x0 = parameters->computed_approximation_params_x[i];
+    const double x1 = parameters->computed_approximation_params_x[i + 1];
+    const double y0 = limiter->GetGainLinear(x0);
+    const double y1 = limiter->GetGainLinear(x1);
+    RTC_CHECK_NE(x1, x0);
+    parameters->computed_approximation_params_m[i] = (y1 - y0) / (x1 - x0);
+    parameters->computed_approximation_params_q[i] =
+        y0 - parameters->computed_approximation_params_m[i] * x0;
+  }
+}
+
+// Compute the parameters to under-approximate the beyond-knee region via linear
+// interpolation and greedy sampling. Under-approximating is saturation-safe
+// since the beyond-knee region is concave.
+void PrecomputeBeyondKneeApproxParams(
+    const Limiter* limiter,
+    test::InterpolatedParameters* parameters) {
+  // Find points on which the linear pieces are tangent to the gain curve.
+  const auto samples = SampleLimiterRegion(limiter);
+
+  // Parametrize each linear piece.
+  double m, q;
+  std::tie(m, q) = ComputeLinearApproximationParams(
+      limiter,
+      parameters
+          ->computed_approximation_params_x[kInterpolatedGainCurveKneePoints -
+                                            1]);
+  parameters
+      ->computed_approximation_params_m[kInterpolatedGainCurveKneePoints - 1] =
+      m;
+  parameters
+      ->computed_approximation_params_q[kInterpolatedGainCurveKneePoints - 1] =
+      q;
+  for (size_t i = 0; i < samples.size(); ++i) {
+    std::tie(m, q) = ComputeLinearApproximationParams(limiter, samples[i]);
+    parameters
+        ->computed_approximation_params_m[i +
+                                          kInterpolatedGainCurveKneePoints] = m;
+    parameters
+        ->computed_approximation_params_q[i +
+                                          kInterpolatedGainCurveKneePoints] = q;
+  }
+
+  // Find the point of intersection between adjacent linear pieces. They will be
+  // used as boundaries between adjacent linear pieces.
+  for (size_t i = kInterpolatedGainCurveKneePoints;
+       i < kInterpolatedGainCurveKneePoints +
+               kInterpolatedGainCurveBeyondKneePoints;
+       ++i) {
+    RTC_CHECK_NE(parameters->computed_approximation_params_m[i],
+                 parameters->computed_approximation_params_m[i - 1]);
+    parameters->computed_approximation_params_x[i] =
+        (  // Formula: (q0 - q1) / (m1 - m0).
+            parameters->computed_approximation_params_q[i - 1] -
+            parameters->computed_approximation_params_q[i]) /
+        (parameters->computed_approximation_params_m[i] -
+         parameters->computed_approximation_params_m[i - 1]);
+  }
+}
+
+}  // namespace
+
+namespace test {
+
+InterpolatedParameters ComputeInterpolatedGainCurveApproximationParams() {
+  InterpolatedParameters parameters;
+  Limiter limiter;
+  parameters.computed_approximation_params_x.fill(0.0f);
+  parameters.computed_approximation_params_m.fill(0.0f);
+  parameters.computed_approximation_params_q.fill(0.0f);
+  PrecomputeKneeApproxParams(&limiter, &parameters);
+  PrecomputeBeyondKneeApproxParams(&limiter, &parameters);
+  return parameters;
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/compute_interpolated_gain_curve.h b/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
new file mode 100644
index 0000000..5f52441
--- /dev/null
+++ b/modules/audio_processing/agc2/compute_interpolated_gain_curve.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_COMPUTE_INTERPOLATED_GAIN_CURVE_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_COMPUTE_INTERPOLATED_GAIN_CURVE_H_
+
+#include <array>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+
+namespace webrtc {
+
+namespace test {
+
+// Parameters for interpolated gain curve using under-approximation to
+// avoid saturation.
+//
+// The saturation gain is defined in order to let hard-clipping occur for
+// those samples having a level that falls in the saturation region. It is an
+// upper bound of the actual gain to apply - i.e., that returned by the
+// limiter.
+
+// Knee and beyond-knee regions approximation parameters.
+// The gain curve is approximated as a piece-wise linear function.
+// |approx_params_x_| are the boundaries between adjacent linear pieces,
+// |approx_params_m_| and |approx_params_q_| are the slope and the y-intercept
+// values of each piece.
+struct InterpolatedParameters {
+  std::array<float, kInterpolatedGainCurveTotalPoints>
+      computed_approximation_params_x;
+  std::array<float, kInterpolatedGainCurveTotalPoints>
+      computed_approximation_params_m;
+  std::array<float, kInterpolatedGainCurveTotalPoints>
+      computed_approximation_params_q;
+};
+
+InterpolatedParameters ComputeInterpolatedGainCurveApproximationParams();
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_COMPUTE_INTERPOLATED_GAIN_CURVE_H_
diff --git a/modules/audio_processing/agc2/fixed_digital_level_estimator.cc b/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
new file mode 100644
index 0000000..9a1fd28
--- /dev/null
+++ b/modules/audio_processing/agc2/fixed_digital_level_estimator.cc
@@ -0,0 +1,100 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/fixed_digital_level_estimator.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+FixedDigitalLevelEstimator::FixedDigitalLevelEstimator(
+    size_t sample_rate_hz,
+    ApmDataDumper* apm_data_dumper)
+    : apm_data_dumper_(apm_data_dumper) {
+  SetSampleRate(sample_rate_hz);
+  CheckParameterCombination();
+  RTC_DCHECK(apm_data_dumper_);
+  apm_data_dumper_->DumpRaw("agc2_level_estimator_samplerate", sample_rate_hz);
+}
+
+void FixedDigitalLevelEstimator::CheckParameterCombination() {
+  RTC_DCHECK_GT(samples_in_frame_, 0);
+  RTC_DCHECK_LE(kSubFramesInFrame, samples_in_frame_);
+  RTC_DCHECK_EQ(samples_in_frame_ % kSubFramesInFrame, 0);
+  RTC_DCHECK_GT(samples_in_sub_frame_, 1);
+}
+
+std::array<float, kSubFramesInFrame> FixedDigitalLevelEstimator::ComputeLevel(
+    const AudioFrameView<const float>& float_frame) {
+  RTC_DCHECK_GT(float_frame.num_channels(), 0);
+  RTC_DCHECK_EQ(float_frame.samples_per_channel(), samples_in_frame_);
+
+  // Compute max envelope without smoothing.
+  std::array<float, kSubFramesInFrame> envelope{};
+  for (size_t channel_idx = 0; channel_idx < float_frame.num_channels();
+       ++channel_idx) {
+    const auto channel = float_frame.channel(channel_idx);
+    for (size_t sub_frame = 0; sub_frame < kSubFramesInFrame; ++sub_frame) {
+      for (size_t sample_in_sub_frame = 0;
+           sample_in_sub_frame < samples_in_sub_frame_; ++sample_in_sub_frame) {
+        envelope[sub_frame] =
+            std::max(envelope[sub_frame],
+                     std::abs(channel[sub_frame * samples_in_sub_frame_ +
+                                      sample_in_sub_frame]));
+      }
+    }
+  }
+
+  // Make sure envelope increases happen one step earlier so that the
+  // corresponding *gain decrease* doesn't miss a sudden signal
+  // increase due to interpolation.
+  for (size_t sub_frame = 0; sub_frame < kSubFramesInFrame - 1; ++sub_frame) {
+    if (envelope[sub_frame] < envelope[sub_frame + 1]) {
+      envelope[sub_frame] = envelope[sub_frame + 1];
+    }
+  }
+
+  // Add attack / decay smoothing.
+  for (size_t sub_frame = 0; sub_frame < kSubFramesInFrame; ++sub_frame) {
+    const float envelope_value = envelope[sub_frame];
+    if (envelope_value > filter_state_level_) {
+      envelope[sub_frame] = envelope_value * (1 - kAttackFilterConstant) +
+                            filter_state_level_ * kAttackFilterConstant;
+    } else {
+      envelope[sub_frame] = envelope_value * (1 - kDecayFilterConstant) +
+                            filter_state_level_ * kDecayFilterConstant;
+    }
+    filter_state_level_ = envelope[sub_frame];
+
+    // Dump data for debug.
+    RTC_DCHECK(apm_data_dumper_);
+    const auto channel = float_frame.channel(0);
+    apm_data_dumper_->DumpRaw("agc2_level_estimator_samples",
+                              samples_in_sub_frame_,
+                              &channel[sub_frame * samples_in_sub_frame_]);
+    apm_data_dumper_->DumpRaw("agc2_level_estimator_level",
+                              envelope[sub_frame]);
+  }
+
+  return envelope;
+}
+
+void FixedDigitalLevelEstimator::SetSampleRate(size_t sample_rate_hz) {
+  samples_in_frame_ = rtc::CheckedDivExact(sample_rate_hz * kFrameDurationMs,
+                                           static_cast<size_t>(1000));
+  samples_in_sub_frame_ =
+      rtc::CheckedDivExact(samples_in_frame_, kSubFramesInFrame);
+  CheckParameterCombination();
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/fixed_digital_level_estimator.h b/modules/audio_processing/agc2/fixed_digital_level_estimator.h
new file mode 100644
index 0000000..b0e7a6d
--- /dev/null
+++ b/modules/audio_processing/agc2/fixed_digital_level_estimator.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_FIXED_DIGITAL_LEVEL_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_FIXED_DIGITAL_LEVEL_ESTIMATOR_H_
+
+#include <array>
+#include <vector>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+// Produces a smooth signal level estimate from an input audio
+// stream. The estimate smoothing is done through exponential
+// filtering.
+class FixedDigitalLevelEstimator {
+ public:
+  // Sample rates are allowed if the number of samples in a frame
+  // (sample_rate_hz * kFrameDurationMs / 1000) is divisible by
+  // kSubFramesInSample. For kFrameDurationMs=10 and
+  // kSubFramesInSample=20, this means that sample_rate_hz has to be
+  // divisible by 2000.
+  FixedDigitalLevelEstimator(size_t sample_rate_hz,
+                             ApmDataDumper* apm_data_dumper);
+
+  // The input is assumed to be in FloatS16 format. Scaled input will
+  // produce similarly scaled output. A frame of with kFrameDurationMs
+  // ms of audio produces a level estimates in the same scale. The
+  // level estimate contains kSubFramesInFrame values.
+  std::array<float, kSubFramesInFrame> ComputeLevel(
+      const AudioFrameView<const float>& float_frame);
+
+  // Rate may be changed at any time (but not concurrently) from the
+  // value passed to the constructor. The class is not thread safe.
+  void SetSampleRate(size_t sample_rate_hz);
+
+ private:
+  void CheckParameterCombination();
+
+  ApmDataDumper* const apm_data_dumper_ = nullptr;
+  float filter_state_level_ = 0.f;
+  size_t samples_in_frame_;
+  size_t samples_in_sub_frame_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FixedDigitalLevelEstimator);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_FIXED_DIGITAL_LEVEL_ESTIMATOR_H_
diff --git a/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc b/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc
new file mode 100644
index 0000000..7547f8e
--- /dev/null
+++ b/modules/audio_processing/agc2/fixed_digital_level_estimator_unittest.cc
@@ -0,0 +1,158 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/fixed_digital_level_estimator.h"
+
+#include <limits>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kInputLevel = 10000.f;
+
+// Run audio at specified settings through the level estimator, and
+// verify that the output level falls within the bounds.
+void TestLevelEstimator(int sample_rate_hz,
+                        int num_channels,
+                        float input_level_linear_scale,
+                        float expected_min,
+                        float expected_max) {
+  ApmDataDumper apm_data_dumper(0);
+  FixedDigitalLevelEstimator level_estimator(sample_rate_hz, &apm_data_dumper);
+
+  const VectorFloatFrame vectors_with_float_frame(
+      num_channels, rtc::CheckedDivExact(sample_rate_hz, 100),
+      input_level_linear_scale);
+
+  for (int i = 0; i < 500; ++i) {
+    const auto level = level_estimator.ComputeLevel(
+        vectors_with_float_frame.float_frame_view());
+
+    // Give the estimator some time to ramp up.
+    if (i < 50) {
+      continue;
+    }
+
+    for (const auto& x : level) {
+      EXPECT_LE(expected_min, x);
+      EXPECT_LE(x, expected_max);
+    }
+  }
+}
+
+// Returns time it takes for the level estimator to decrease its level
+// estimate by 'level_reduction_db'.
+float TimeMsToDecreaseLevel(int sample_rate_hz,
+                            int num_channels,
+                            float input_level_db,
+                            float level_reduction_db) {
+  const float input_level = DbfsToFloatS16(input_level_db);
+  RTC_DCHECK_GT(level_reduction_db, 0);
+
+  const VectorFloatFrame vectors_with_float_frame(
+      num_channels, rtc::CheckedDivExact(sample_rate_hz, 100), input_level);
+
+  ApmDataDumper apm_data_dumper(0);
+  FixedDigitalLevelEstimator level_estimator(sample_rate_hz, &apm_data_dumper);
+
+  // Give the LevelEstimator plenty of time to ramp up and stabilize
+  float last_level = 0.f;
+  for (int i = 0; i < 500; ++i) {
+    const auto level_envelope = level_estimator.ComputeLevel(
+        vectors_with_float_frame.float_frame_view());
+    last_level = *level_envelope.rbegin();
+  }
+
+  // Set input to 0.
+  VectorFloatFrame vectors_with_zero_float_frame(
+      num_channels, rtc::CheckedDivExact(sample_rate_hz, 100), 0);
+
+  const float reduced_level_linear =
+      DbfsToFloatS16(input_level_db - level_reduction_db);
+  int sub_frames_until_level_reduction = 0;
+  while (last_level > reduced_level_linear) {
+    const auto level_envelope = level_estimator.ComputeLevel(
+        vectors_with_zero_float_frame.float_frame_view());
+    for (const auto& v : level_envelope) {
+      EXPECT_LT(v, last_level);
+      sub_frames_until_level_reduction++;
+      last_level = v;
+      if (last_level <= reduced_level_linear) {
+        break;
+      }
+    }
+  }
+  return static_cast<float>(sub_frames_until_level_reduction) *
+         kFrameDurationMs / kSubFramesInFrame;
+}
+}  // namespace
+
+TEST(AutomaticGainController2LevelEstimator, EstimatorShouldNotCrash) {
+  TestLevelEstimator(8000, 1, 0, std::numeric_limits<float>::lowest(),
+                     std::numeric_limits<float>::max());
+}
+
+TEST(AutomaticGainController2LevelEstimator,
+     EstimatorShouldEstimateConstantLevel) {
+  TestLevelEstimator(10000, 1, kInputLevel, kInputLevel * 0.99,
+                     kInputLevel * 1.01);
+}
+
+TEST(AutomaticGainController2LevelEstimator,
+     EstimatorShouldEstimateConstantLevelForManyChannels) {
+  constexpr size_t num_channels = 10;
+  TestLevelEstimator(20000, num_channels, kInputLevel, kInputLevel * 0.99,
+                     kInputLevel * 1.01);
+}
+
+TEST(AutomaticGainController2LevelEstimator, TimeToDecreaseForLowLevel) {
+  constexpr float kLevelReductionDb = 25;
+  constexpr float kInitialLowLevel = -40;
+  constexpr float kExpectedTime = kLevelReductionDb * test::kDecayMs;
+
+  const float time_to_decrease =
+      TimeMsToDecreaseLevel(22000, 1, kInitialLowLevel, kLevelReductionDb);
+
+  EXPECT_LE(kExpectedTime * 0.9, time_to_decrease);
+  EXPECT_LE(time_to_decrease, kExpectedTime * 1.1);
+}
+
+TEST(AutomaticGainController2LevelEstimator, TimeToDecreaseForFullScaleLevel) {
+  constexpr float kLevelReductionDb = 25;
+  constexpr float kExpectedTime = kLevelReductionDb * test::kDecayMs;
+
+  const float time_to_decrease =
+      TimeMsToDecreaseLevel(26000, 1, 0, kLevelReductionDb);
+
+  EXPECT_LE(kExpectedTime * 0.9, time_to_decrease);
+  EXPECT_LE(time_to_decrease, kExpectedTime * 1.1);
+}
+
+TEST(AutomaticGainController2LevelEstimator,
+     TimeToDecreaseForMultipleChannels) {
+  constexpr float kLevelReductionDb = 25;
+  constexpr float kExpectedTime = kLevelReductionDb * test::kDecayMs;
+  constexpr size_t kNumChannels = 10;
+
+  const float time_to_decrease =
+      TimeMsToDecreaseLevel(28000, kNumChannels, 0, kLevelReductionDb);
+
+  EXPECT_LE(kExpectedTime * 0.9, time_to_decrease);
+  EXPECT_LE(time_to_decrease, kExpectedTime * 1.1);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/fixed_gain_controller.cc b/modules/audio_processing/agc2/fixed_gain_controller.cc
new file mode 100644
index 0000000..a565613
--- /dev/null
+++ b/modules/audio_processing/agc2/fixed_gain_controller.cc
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/fixed_gain_controller.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "api/array_view.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+namespace {
+
+// Returns true when the gain factor is so close to 1 that it would
+// not affect int16 samples.
+bool CloseToOne(float gain_factor) {
+  return 1.f - 1.f / kMaxFloatS16Value <= gain_factor &&
+         gain_factor <= 1.f + 1.f / kMaxFloatS16Value;
+}
+}  // namespace
+
+FixedGainController::FixedGainController(ApmDataDumper* apm_data_dumper)
+    : apm_data_dumper_(apm_data_dumper),
+      gain_curve_applier_(48000, apm_data_dumper_) {}
+
+void FixedGainController::SetGain(float gain_to_apply_db) {
+  // Changes in gain_to_apply_ cause discontinuities. We assume
+  // gain_to_apply_ is set in the beginning of the call. If it is
+  // frequently changed, we should add interpolation between the
+  // values.
+  // The gain
+  RTC_DCHECK_LE(-50.f, gain_to_apply_db);
+  RTC_DCHECK_LE(gain_to_apply_db, 50.f);
+  gain_to_apply_ = DbToRatio(gain_to_apply_db);
+  RTC_DCHECK_LT(0.f, gain_to_apply_);
+  RTC_DLOG(LS_INFO) << "Gain to apply: " << gain_to_apply_db << " db.";
+}
+
+void FixedGainController::SetSampleRate(size_t sample_rate_hz) {
+  gain_curve_applier_.SetSampleRate(sample_rate_hz);
+}
+
+void FixedGainController::EnableLimiter(bool enable_limiter) {
+  enable_limiter_ = enable_limiter;
+}
+
+void FixedGainController::Process(AudioFrameView<float> signal) {
+  // Apply fixed digital gain; interpolate if necessary. One of the
+  // planned usages of the FGC is to only use the limiter. In that
+  // case, the gain would be 1.0. Not doing the multiplications speeds
+  // it up considerably. Hence the check.
+  if (!CloseToOne(gain_to_apply_)) {
+    for (size_t k = 0; k < signal.num_channels(); ++k) {
+      rtc::ArrayView<float> channel_view = signal.channel(k);
+      for (auto& sample : channel_view) {
+        sample *= gain_to_apply_;
+      }
+    }
+  }
+
+  // Use the limiter (if configured to).
+  if (enable_limiter_) {
+    gain_curve_applier_.Process(signal);
+
+    // Dump data for debug.
+    const auto channel_view = signal.channel(0);
+    apm_data_dumper_->DumpRaw("agc2_fixed_digital_gain_curve_applier",
+                              channel_view.size(), channel_view.data());
+  }
+
+  // Hard-clipping.
+  for (size_t k = 0; k < signal.num_channels(); ++k) {
+    rtc::ArrayView<float> channel_view = signal.channel(k);
+    for (auto& sample : channel_view) {
+      sample = rtc::SafeClamp(sample, kMinFloatS16Value, kMaxFloatS16Value);
+    }
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/fixed_gain_controller.h b/modules/audio_processing/agc2/fixed_gain_controller.h
new file mode 100644
index 0000000..fd80348
--- /dev/null
+++ b/modules/audio_processing/agc2/fixed_gain_controller.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_FIXED_GAIN_CONTROLLER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_FIXED_GAIN_CONTROLLER_H_
+
+#include "modules/audio_processing/agc2/gain_curve_applier.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+class FixedGainController {
+ public:
+  explicit FixedGainController(ApmDataDumper* apm_data_dumper);
+
+  void Process(AudioFrameView<float> signal);
+
+  // Rate and gain may be changed at any time (but not concurrently
+  // with any other method call).
+  void SetGain(float gain_to_apply_db);
+  void SetSampleRate(size_t sample_rate_hz);
+  void EnableLimiter(bool enable_limiter);
+
+ private:
+  float gain_to_apply_ = 1.f;
+  ApmDataDumper* apm_data_dumper_ = nullptr;
+  GainCurveApplier gain_curve_applier_;
+  bool enable_limiter_ = true;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_FIXED_GAIN_CONTROLLER_H_
diff --git a/modules/audio_processing/agc2/fixed_gain_controller_unittest.cc b/modules/audio_processing/agc2/fixed_gain_controller_unittest.cc
new file mode 100644
index 0000000..1d6c2ae
--- /dev/null
+++ b/modules/audio_processing/agc2/fixed_gain_controller_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/fixed_gain_controller.h"
+
+#include "api/array_view.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/ptr_util.h"
+
+namespace webrtc {
+namespace {
+
+constexpr float kInputLevelLinear = 15000.f;
+
+constexpr float kGainToApplyDb = 15.f;
+
+float RunFixedGainControllerWithConstantInput(FixedGainController* fixed_gc,
+                                              const float input_level,
+                                              const size_t num_frames,
+                                              const int sample_rate) {
+  // Give time to the level etimator to converge.
+  for (size_t i = 0; i < num_frames; ++i) {
+    VectorFloatFrame vectors_with_float_frame(
+        1, rtc::CheckedDivExact(sample_rate, 100), input_level);
+    fixed_gc->Process(vectors_with_float_frame.float_frame_view());
+  }
+
+  // Process the last frame with constant input level.
+  VectorFloatFrame vectors_with_float_frame_last(
+      1, rtc::CheckedDivExact(sample_rate, 100), input_level);
+  fixed_gc->Process(vectors_with_float_frame_last.float_frame_view());
+
+  // Return the last sample from the last processed frame.
+  const auto channel =
+      vectors_with_float_frame_last.float_frame_view().channel(0);
+  return channel[channel.size() - 1];
+}
+ApmDataDumper test_data_dumper(0);
+
+std::unique_ptr<FixedGainController> CreateFixedGainController(
+    float gain_to_apply,
+    size_t rate,
+    bool enable_limiter) {
+  std::unique_ptr<FixedGainController> fgc =
+      rtc::MakeUnique<FixedGainController>(&test_data_dumper);
+  fgc->SetGain(gain_to_apply);
+  fgc->SetSampleRate(rate);
+  fgc->EnableLimiter(enable_limiter);
+  return fgc;
+}
+
+}  // namespace
+
+TEST(AutomaticGainController2FixedDigital, CreateUseWithoutLimiter) {
+  const int kSampleRate = 48000;
+  std::unique_ptr<FixedGainController> fixed_gc =
+      CreateFixedGainController(kGainToApplyDb, kSampleRate, false);
+  VectorFloatFrame vectors_with_float_frame(
+      1, rtc::CheckedDivExact(kSampleRate, 100), kInputLevelLinear);
+  auto float_frame = vectors_with_float_frame.float_frame_view();
+  fixed_gc->Process(float_frame);
+  const auto channel = float_frame.channel(0);
+  EXPECT_LT(kInputLevelLinear, channel[0]);
+}
+
+TEST(AutomaticGainController2FixedDigital, CreateUseWithLimiter) {
+  const int kSampleRate = 44000;
+  std::unique_ptr<FixedGainController> fixed_gc =
+      CreateFixedGainController(kGainToApplyDb, kSampleRate, true);
+  VectorFloatFrame vectors_with_float_frame(
+      1, rtc::CheckedDivExact(kSampleRate, 100), kInputLevelLinear);
+  auto float_frame = vectors_with_float_frame.float_frame_view();
+  fixed_gc->Process(float_frame);
+  const auto channel = float_frame.channel(0);
+  EXPECT_LT(kInputLevelLinear, channel[0]);
+}
+
+TEST(AutomaticGainController2FixedDigital, CheckSaturationBehaviorWithLimiter) {
+  const float kInputLevel = 32767.f;
+  const size_t kNumFrames = 5;
+  const size_t kSampleRate = 42000;
+
+  const auto gains_no_saturation =
+      test::LinSpace(0.1, test::kLimiterMaxInputLevelDbFs - 0.01, 10);
+  for (const auto gain_db : gains_no_saturation) {
+    // Since |test::kLimiterMaxInputLevelDbFs| > |gain_db|, the
+    // limiter will not saturate the signal.
+    std::unique_ptr<FixedGainController> fixed_gc_no_saturation =
+        CreateFixedGainController(gain_db, kSampleRate, true);
+
+    // Saturation not expected.
+    SCOPED_TRACE(std::to_string(gain_db));
+    EXPECT_LT(
+        RunFixedGainControllerWithConstantInput(
+            fixed_gc_no_saturation.get(), kInputLevel, kNumFrames, kSampleRate),
+        32767.f);
+  }
+
+  const auto gains_saturation =
+      test::LinSpace(test::kLimiterMaxInputLevelDbFs + 0.01, 10, 10);
+  for (const auto gain_db : gains_saturation) {
+    // Since |test::kLimiterMaxInputLevelDbFs| < |gain|, the limiter
+    // will saturate the signal.
+    std::unique_ptr<FixedGainController> fixed_gc_saturation =
+        CreateFixedGainController(gain_db, kSampleRate, true);
+
+    // Saturation expected.
+    SCOPED_TRACE(std::to_string(gain_db));
+    EXPECT_FLOAT_EQ(
+        RunFixedGainControllerWithConstantInput(
+            fixed_gc_saturation.get(), kInputLevel, kNumFrames, kSampleRate),
+        32767.f);
+  }
+}
+
+TEST(AutomaticGainController2FixedDigital,
+     CheckSaturationBehaviorWithLimiterSingleSample) {
+  const float kInputLevel = 32767.f;
+  const size_t kNumFrames = 5;
+  const size_t kSampleRate = 8000;
+
+  const auto gains_no_saturation =
+      test::LinSpace(0.1, test::kLimiterMaxInputLevelDbFs - 0.01, 10);
+  for (const auto gain_db : gains_no_saturation) {
+    // Since |gain| > |test::kLimiterMaxInputLevelDbFs|, the limiter will
+    // not saturate the signal.
+    std::unique_ptr<FixedGainController> fixed_gc_no_saturation =
+        CreateFixedGainController(gain_db, kSampleRate, true);
+
+    // Saturation not expected.
+    SCOPED_TRACE(std::to_string(gain_db));
+    EXPECT_LT(
+        RunFixedGainControllerWithConstantInput(
+            fixed_gc_no_saturation.get(), kInputLevel, kNumFrames, kSampleRate),
+        32767.f);
+  }
+
+  const auto gains_saturation =
+      test::LinSpace(test::kLimiterMaxInputLevelDbFs + 0.01, 10, 10);
+  for (const auto gain_db : gains_saturation) {
+    // Singe |gain| < |test::kLimiterMaxInputLevelDbFs|, the limiter will
+    // saturate the signal.
+    std::unique_ptr<FixedGainController> fixed_gc_saturation =
+        CreateFixedGainController(gain_db, kSampleRate, true);
+
+    // Saturation expected.
+    SCOPED_TRACE(std::to_string(gain_db));
+    EXPECT_FLOAT_EQ(
+        RunFixedGainControllerWithConstantInput(
+            fixed_gc_saturation.get(), kInputLevel, kNumFrames, kSampleRate),
+        32767.f);
+  }
+}
+
+TEST(AutomaticGainController2FixedDigital, GainShouldChangeOnSetGain) {
+  constexpr float kInputLevel = 1000.f;
+  constexpr size_t kNumFrames = 5;
+  constexpr size_t kSampleRate = 8000;
+  constexpr float kGainDbNoChange = 0.f;
+  constexpr float kGainDbFactor10 = 20.f;
+
+  std::unique_ptr<FixedGainController> fixed_gc_no_saturation =
+      CreateFixedGainController(kGainDbNoChange, kSampleRate, false);
+
+  // Signal level is unchanged with 0 db gain.
+  EXPECT_FLOAT_EQ(
+      RunFixedGainControllerWithConstantInput(
+          fixed_gc_no_saturation.get(), kInputLevel, kNumFrames, kSampleRate),
+      kInputLevel);
+
+  fixed_gc_no_saturation->SetGain(kGainDbFactor10);
+
+  // +20db should increase signal by a factor of 10.
+  EXPECT_FLOAT_EQ(
+      RunFixedGainControllerWithConstantInput(
+          fixed_gc_no_saturation.get(), kInputLevel, kNumFrames, kSampleRate),
+      kInputLevel * 10);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/gain_curve_applier.cc b/modules/audio_processing/agc2/gain_curve_applier.cc
new file mode 100644
index 0000000..122839a
--- /dev/null
+++ b/modules/audio_processing/agc2/gain_curve_applier.cc
@@ -0,0 +1,132 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/gain_curve_applier.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// This constant affects the way scaling factors are interpolated for the first
+// sub-frame of a frame. Only in the case in which the first sub-frame has an
+// estimated level which is greater than the that of the previous analyzed
+// sub-frame, linear interpolation is replaced with a power function which
+// reduces the chances of over-shooting (and hence saturation), however reducing
+// the fixed gain effectiveness.
+constexpr float kAttackFirstSubframeInterpolationPower = 8.f;
+
+void InterpolateFirstSubframe(float last_factor,
+                              float current_factor,
+                              rtc::ArrayView<float> subframe) {
+  const auto n = subframe.size();
+  constexpr auto p = kAttackFirstSubframeInterpolationPower;
+  for (size_t i = 0; i < n; ++i) {
+    subframe[i] = std::pow(1.f - i / n, p) * (last_factor - current_factor) +
+                  current_factor;
+  }
+}
+
+void ComputePerSampleSubframeFactors(
+    const std::array<float, kSubFramesInFrame + 1>& scaling_factors,
+    size_t samples_per_channel,
+    rtc::ArrayView<float> per_sample_scaling_factors) {
+  const size_t num_subframes = scaling_factors.size() - 1;
+  const size_t subframe_size =
+      rtc::CheckedDivExact(samples_per_channel, num_subframes);
+
+  // Handle first sub-frame differently in case of attack.
+  const bool is_attack = scaling_factors[0] > scaling_factors[1];
+  if (is_attack) {
+    InterpolateFirstSubframe(
+        scaling_factors[0], scaling_factors[1],
+        rtc::ArrayView<float>(
+            per_sample_scaling_factors.subview(0, subframe_size)));
+  }
+
+  for (size_t i = is_attack ? 1 : 0; i < num_subframes; ++i) {
+    const size_t subframe_start = i * subframe_size;
+    const float scaling_start = scaling_factors[i];
+    const float scaling_end = scaling_factors[i + 1];
+    const float scaling_diff = (scaling_end - scaling_start) / subframe_size;
+    for (size_t j = 0; j < subframe_size; ++j) {
+      per_sample_scaling_factors[subframe_start + j] =
+          scaling_start + scaling_diff * j;
+    }
+  }
+}
+
+void ScaleSamples(rtc::ArrayView<const float> per_sample_scaling_factors,
+                  AudioFrameView<float> signal) {
+  const size_t samples_per_channel = signal.samples_per_channel();
+  RTC_DCHECK_EQ(samples_per_channel, per_sample_scaling_factors.size());
+  for (size_t i = 0; i < signal.num_channels(); ++i) {
+    auto channel = signal.channel(i);
+    for (size_t j = 0; j < samples_per_channel; ++j) {
+      channel[j] *= per_sample_scaling_factors[j];
+    }
+  }
+}
+
+}  // namespace
+
+GainCurveApplier::GainCurveApplier(size_t sample_rate_hz,
+                                   ApmDataDumper* apm_data_dumper)
+    : interp_gain_curve_(apm_data_dumper),
+      level_estimator_(sample_rate_hz, apm_data_dumper),
+      apm_data_dumper_(apm_data_dumper) {}
+
+GainCurveApplier::~GainCurveApplier() = default;
+
+void GainCurveApplier::Process(AudioFrameView<float> signal) {
+  const auto level_estimate = level_estimator_.ComputeLevel(signal);
+
+  RTC_DCHECK_EQ(level_estimate.size() + 1, scaling_factors_.size());
+  scaling_factors_[0] = last_scaling_factor_;
+  std::transform(level_estimate.begin(), level_estimate.end(),
+                 scaling_factors_.begin() + 1, [this](float x) {
+                   return interp_gain_curve_.LookUpGainToApply(x);
+                 });
+
+  const size_t samples_per_channel = signal.samples_per_channel();
+  RTC_DCHECK_LE(samples_per_channel, kMaximalNumberOfSamplesPerChannel);
+
+  auto per_sample_scaling_factors = rtc::ArrayView<float>(
+      &per_sample_scaling_factors_[0], samples_per_channel);
+  ComputePerSampleSubframeFactors(scaling_factors_, samples_per_channel,
+                                  per_sample_scaling_factors);
+  ScaleSamples(per_sample_scaling_factors, signal);
+
+  last_scaling_factor_ = scaling_factors_.back();
+
+  // Dump data for debug.
+  apm_data_dumper_->DumpRaw("agc2_gain_curve_applier_scaling_factors",
+                            samples_per_channel,
+                            per_sample_scaling_factors_.data());
+}
+
+InterpolatedGainCurve::Stats GainCurveApplier::GetGainCurveStats() const {
+  return interp_gain_curve_.get_stats();
+}
+
+void GainCurveApplier::SetSampleRate(size_t sample_rate_hz) {
+  level_estimator_.SetSampleRate(sample_rate_hz);
+  // Check that per_sample_scaling_factors_ is large enough.
+  RTC_DCHECK_LE(sample_rate_hz,
+                kMaximalNumberOfSamplesPerChannel * 1000 / kFrameDurationMs);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/gain_curve_applier.h b/modules/audio_processing/agc2/gain_curve_applier.h
new file mode 100644
index 0000000..86ca251
--- /dev/null
+++ b/modules/audio_processing/agc2/gain_curve_applier.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_GAIN_CURVE_APPLIER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_GAIN_CURVE_APPLIER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/agc2/fixed_digital_level_estimator.h"
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+class ApmDataDumper;
+
+class GainCurveApplier {
+ public:
+  GainCurveApplier(size_t sample_rate_hz, ApmDataDumper* apm_data_dumper);
+
+  ~GainCurveApplier();
+
+  void Process(AudioFrameView<float> signal);
+  InterpolatedGainCurve::Stats GetGainCurveStats() const;
+
+  // Supported rates must be
+  // * supported by FixedDigitalLevelEstimator
+  // * below kMaximalNumberOfSamplesPerChannel*1000/kFrameDurationMs
+  //   so that samples_per_channel fit in the
+  //   per_sample_scaling_factors_ array.
+  void SetSampleRate(size_t sample_rate_hz);
+
+ private:
+  const InterpolatedGainCurve interp_gain_curve_;
+  FixedDigitalLevelEstimator level_estimator_;
+  ApmDataDumper* const apm_data_dumper_ = nullptr;
+
+  // Work array containing the sub-frame scaling factors to be interpolated.
+  std::array<float, kSubFramesInFrame + 1> scaling_factors_ = {};
+  std::array<float, kMaximalNumberOfSamplesPerChannel>
+      per_sample_scaling_factors_ = {};
+  float last_scaling_factor_ = 1.f;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(GainCurveApplier);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_GAIN_CURVE_APPLIER_H_
diff --git a/modules/audio_processing/agc2/gain_curve_applier_unittest.cc b/modules/audio_processing/agc2/gain_curve_applier_unittest.cc
new file mode 100644
index 0000000..d9179a4
--- /dev/null
+++ b/modules/audio_processing/agc2/gain_curve_applier_unittest.cc
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/gain_curve_applier.h"
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(GainCurveApplier, GainCurveApplierShouldConstructAndRun) {
+  const int sample_rate_hz = 48000;
+  ApmDataDumper apm_data_dumper(0);
+
+  GainCurveApplier gain_curve_applier(sample_rate_hz, &apm_data_dumper);
+
+  VectorFloatFrame vectors_with_float_frame(1, sample_rate_hz / 100,
+                                            kMaxAbsFloatS16Value);
+  gain_curve_applier.Process(vectors_with_float_frame.float_frame_view());
+}
+
+TEST(GainCurveApplier, OutputVolumeAboveThreshold) {
+  const int sample_rate_hz = 48000;
+  const float input_level =
+      (kMaxAbsFloatS16Value + DbfsToFloatS16(test::kLimiterMaxInputLevelDbFs)) /
+      2.f;
+  ApmDataDumper apm_data_dumper(0);
+
+  GainCurveApplier gain_curve_applier(sample_rate_hz, &apm_data_dumper);
+
+  // Give the level estimator time to adapt.
+  for (int i = 0; i < 5; ++i) {
+    VectorFloatFrame vectors_with_float_frame(1, sample_rate_hz / 100,
+                                              input_level);
+    gain_curve_applier.Process(vectors_with_float_frame.float_frame_view());
+  }
+
+  VectorFloatFrame vectors_with_float_frame(1, sample_rate_hz / 100,
+                                            input_level);
+  gain_curve_applier.Process(vectors_with_float_frame.float_frame_view());
+  rtc::ArrayView<const float> channel =
+      vectors_with_float_frame.float_frame_view().channel(0);
+
+  for (const auto& sample : channel) {
+    EXPECT_LT(0.9f * kMaxAbsFloatS16Value, sample);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/interpolated_gain_curve.cc b/modules/audio_processing/agc2/interpolated_gain_curve.cc
new file mode 100644
index 0000000..69602b5
--- /dev/null
+++ b/modules/audio_processing/agc2/interpolated_gain_curve.cc
@@ -0,0 +1,139 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+void LogRegionStats(const InterpolatedGainCurve::Stats& stats) {
+  using Region = InterpolatedGainCurve::GainCurveRegion;
+
+  std::string histogram_name = "WebRTC.Audio.AGC2.FixedDigitalGainCurveRegion.";
+  if (stats.region == Region::kIdentity) {
+    histogram_name += "Identity";
+  } else if (stats.region == Region::kKnee) {
+    histogram_name += "Knee";
+  } else if (stats.region == Region::kLimiter) {
+    histogram_name += "Limiter";
+  } else {
+    histogram_name += "Saturation";
+  }
+  RTC_HISTOGRAM_COUNTS_10000(histogram_name,
+                             stats.region_duration_frames / 100);
+}
+}  // namespace
+
+constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+    InterpolatedGainCurve::approximation_params_x_;
+
+constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+    InterpolatedGainCurve::approximation_params_m_;
+
+constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+    InterpolatedGainCurve::approximation_params_q_;
+
+InterpolatedGainCurve::InterpolatedGainCurve(ApmDataDumper* apm_data_dumper)
+    : apm_data_dumper_(apm_data_dumper) {}
+
+InterpolatedGainCurve::~InterpolatedGainCurve() {
+  if (stats_.available) {
+    RTC_DCHECK(apm_data_dumper_);
+    apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_identity",
+                              stats_.look_ups_identity_region);
+    apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_knee",
+                              stats_.look_ups_knee_region);
+    apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_limiter",
+                              stats_.look_ups_limiter_region);
+    apm_data_dumper_->DumpRaw("agc2_interp_gain_curve_lookups_saturation",
+                              stats_.look_ups_saturation_region);
+    LogRegionStats(stats_);
+  }
+}
+
+void InterpolatedGainCurve::UpdateStats(float input_level) const {
+  stats_.available = true;
+
+  GainCurveRegion region;
+
+  if (input_level < approximation_params_x_[0]) {
+    stats_.look_ups_identity_region++;
+    region = GainCurveRegion::kIdentity;
+  } else if (input_level <
+             approximation_params_x_[kInterpolatedGainCurveKneePoints - 1]) {
+    stats_.look_ups_knee_region++;
+    region = GainCurveRegion::kKnee;
+  } else if (input_level < kMaxInputLevelLinear) {
+    stats_.look_ups_limiter_region++;
+    region = GainCurveRegion::kLimiter;
+  } else {
+    stats_.look_ups_saturation_region++;
+    region = GainCurveRegion::kSaturation;
+  }
+
+  if (region == stats_.region) {
+    ++stats_.region_duration_frames;
+  } else {
+    LogRegionStats(stats_);
+
+    stats_.region_duration_frames = 0;
+    stats_.region = region;
+  }
+}
+
+// Looks up a gain to apply given a non-negative input level.
+// The cost of this operation depends on the region in which |input_level|
+// falls.
+// For the identity and the saturation regions the cost is O(1).
+// For the other regions, namely knee and limiter, the cost is
+// O(2 + log2(|LightkInterpolatedGainCurveTotalPoints|), plus O(1) for the
+// linear interpolation (one product and one sum).
+float InterpolatedGainCurve::LookUpGainToApply(float input_level) const {
+  UpdateStats(input_level);
+
+  if (input_level <= approximation_params_x_[0]) {
+    // Identity region.
+    return 1.0f;
+  }
+
+  if (input_level >= kMaxInputLevelLinear) {
+    // Saturating lower bound. The saturing samples exactly hit the clipping
+    // level. This method achieves has the lowest harmonic distorsion, but it
+    // may reduce the amplitude of the non-saturating samples too much.
+    return 32768.f / input_level;
+  }
+
+  // Knee and limiter regions; find the linear piece index. Spelling
+  // out the complete type was the only way to silence both the clang
+  // plugin and the windows compilers.
+  std::array<float, kInterpolatedGainCurveTotalPoints>::const_iterator it =
+      std::lower_bound(approximation_params_x_.begin(),
+                       approximation_params_x_.end(), input_level);
+  const size_t index = std::distance(approximation_params_x_.begin(), it) - 1;
+  RTC_DCHECK_LE(0, index);
+  RTC_DCHECK_LT(index, approximation_params_m_.size());
+  RTC_DCHECK_LE(approximation_params_x_[index], input_level);
+  if (index < approximation_params_m_.size() - 1) {
+    RTC_DCHECK_LE(input_level, approximation_params_x_[index + 1]);
+  }
+
+  // Piece-wise linear interploation.
+  const float gain = approximation_params_m_[index] * input_level +
+                     approximation_params_q_[index];
+  RTC_DCHECK_LE(0.f, gain);
+  return gain;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/interpolated_gain_curve.h b/modules/audio_processing/agc2/interpolated_gain_curve.h
new file mode 100644
index 0000000..ddceaaa
--- /dev/null
+++ b/modules/audio_processing/agc2/interpolated_gain_curve.h
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_INTERPOLATED_GAIN_CURVE_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_INTERPOLATED_GAIN_CURVE_H_
+
+#include <array>
+
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/gtest_prod_util.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+constexpr float kInputLevelScalingFactor = 32768.0f;
+
+// Defined as DbfsToLinear(kLimiterMaxInputLevelDbFs)
+constexpr float kMaxInputLevelLinear = static_cast<float>(36766.300710566735);
+
+// Interpolated gain curve using under-approximation to avoid saturation.
+//
+// The goal of this class is allowing fast look ups to get an accurate
+// estimates of the gain to apply given an estimated input level.
+class InterpolatedGainCurve {
+ public:
+  enum class GainCurveRegion {
+    kIdentity = 0,
+    kKnee = 1,
+    kLimiter = 2,
+    kSaturation = 3
+  };
+
+  struct Stats {
+    // Region in which the output level equals the input one.
+    size_t look_ups_identity_region = 0;
+    // Smoothing between the identity and the limiter regions.
+    size_t look_ups_knee_region = 0;
+    // Limiter region in which the output and input levels are linearly related.
+    size_t look_ups_limiter_region = 0;
+    // Region in which saturation may occur since the input level is beyond the
+    // maximum expected by the limiter.
+    size_t look_ups_saturation_region = 0;
+    // True if stats have been populated.
+    bool available = false;
+
+    // The current region, and for how many frames the level has been
+    // in that region.
+    GainCurveRegion region = GainCurveRegion::kIdentity;
+    int64_t region_duration_frames = 0;
+  };
+
+  // InterpolatedGainCurve(InterpolatedGainCurve&&);
+  explicit InterpolatedGainCurve(ApmDataDumper* apm_data_dumper);
+  ~InterpolatedGainCurve();
+
+  Stats get_stats() const { return stats_; }
+
+  // Given a non-negative input level (linear scale), a scalar factor to apply
+  // to a sub-frame is returned.
+  // Levels above kLimiterMaxInputLevelDbFs will be reduced to 0 dBFS
+  // after applying this gain
+  float LookUpGainToApply(float input_level) const;
+
+ private:
+  // For comparing 'approximation_params_*_' with ones computed by
+  // ComputeInterpolatedGainCurve.
+  FRIEND_TEST_ALL_PREFIXES(AutomaticGainController2InterpolatedGainCurve,
+                           CheckApproximationParams);
+  void UpdateStats(float input_level) const;
+
+  ApmDataDumper* const apm_data_dumper_;
+
+  static constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+      approximation_params_x_ = {
+          {30057.296875,    30148.986328125, 30240.67578125,  30424.052734375,
+           30607.4296875,   30790.806640625, 30974.18359375,  31157.560546875,
+           31340.939453125, 31524.31640625,  31707.693359375, 31891.0703125,
+           32074.447265625, 32257.82421875,  32441.201171875, 32624.580078125,
+           32807.95703125,  32991.33203125,  33174.7109375,   33358.08984375,
+           33541.46484375,  33724.84375,     33819.53515625,  34009.5390625,
+           34200.05859375,  34389.81640625,  34674.48828125,  35054.375,
+           35434.86328125,  35814.81640625,  36195.16796875,  36575.03125}};
+  static constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+      approximation_params_m_ = {
+          {-3.515235675877192989e-07, -1.050251626111275982e-06,
+           -2.085213736791047268e-06, -3.443004743530764244e-06,
+           -4.773849468620028347e-06, -6.077375928725814447e-06,
+           -7.353257842623861507e-06, -8.601219633419532329e-06,
+           -9.821013009059242904e-06, -1.101243378798244521e-05,
+           -1.217532644659513608e-05, -1.330956911260727793e-05,
+           -1.441507538402220234e-05, -1.549179251014720649e-05,
+           -1.653970684856176376e-05, -1.755882840370759368e-05,
+           -1.854918446042574942e-05, -1.951086778717581183e-05,
+           -2.044398024736437947e-05, -2.1348627342376858e-05,
+           -2.222496914328075945e-05, -2.265374678245279938e-05,
+           -2.242570917587727308e-05, -2.220122041762806475e-05,
+           -2.19802095671184361e-05,  -2.176260204578284174e-05,
+           -2.133731686626560986e-05, -2.092481918225530535e-05,
+           -2.052459603874012828e-05, -2.013615448959171772e-05,
+           -1.975903069251216948e-05, -1.939277899509761482e-05}};
+
+  static constexpr std::array<float, kInterpolatedGainCurveTotalPoints>
+      approximation_params_q_ = {
+          {1.010565876960754395, 1.031631827354431152, 1.062929749488830566,
+           1.104239225387573242, 1.144973039627075195, 1.185109615325927734,
+           1.224629044532775879, 1.263512492179870605, 1.301741957664489746,
+           1.339300632476806641, 1.376173257827758789, 1.412345528602600098,
+           1.447803974151611328, 1.482536554336547852, 1.516532182693481445,
+           1.549780607223510742, 1.582272171974182129, 1.613999366760253906,
+           1.644955039024353027, 1.675132393836975098, 1.704526185989379883,
+           1.718986630439758301, 1.711274504661560059, 1.703639745712280273,
+           1.696081161499023438, 1.688597679138183594, 1.673851132392883301,
+           1.659391283988952637, 1.645209431648254395, 1.631297469139099121,
+           1.617647409439086914, 1.604251742362976074}};
+
+  // Stats.
+  mutable Stats stats_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(InterpolatedGainCurve);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_INTERPOLATED_GAIN_CURVE_H_
diff --git a/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc b/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc
new file mode 100644
index 0000000..d75ddbd
--- /dev/null
+++ b/modules/audio_processing/agc2/interpolated_gain_curve_unittest.cc
@@ -0,0 +1,202 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <array>
+#include <vector>
+
+#include "api/array_view.h"
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "modules/audio_processing/agc2/compute_interpolated_gain_curve.h"
+#include "modules/audio_processing/agc2/interpolated_gain_curve.h"
+#include "modules/audio_processing/agc2/limiter.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace {
+
+constexpr double kLevelEpsilon = 1e-2 * kMaxAbsFloatS16Value;
+constexpr float kInterpolatedGainCurveTolerance = 1.f / 32768.f;
+ApmDataDumper apm_data_dumper(0);
+const Limiter limiter;
+
+}  // namespace
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CreateUse) {
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels = test::LinSpace(
+      kLevelEpsilon, DbfsToFloatS16(limiter.max_input_level_db() + 1), 500);
+  for (const auto level : levels) {
+    EXPECT_GE(igc.LookUpGainToApply(level), 0.0f);
+  }
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CheckValidOutput) {
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels = test::LinSpace(
+      kLevelEpsilon, limiter.max_input_level_linear() * 2.0, 500);
+  for (const auto level : levels) {
+    SCOPED_TRACE(std::to_string(level));
+    const float gain = igc.LookUpGainToApply(level);
+    EXPECT_LE(0.0f, gain);
+    EXPECT_LE(gain, 1.0f);
+  }
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CheckMonotonicity) {
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels = test::LinSpace(
+      kLevelEpsilon, limiter.max_input_level_linear() + kLevelEpsilon + 0.5,
+      500);
+  float prev_gain = igc.LookUpGainToApply(0.0f);
+  for (const auto level : levels) {
+    const float gain = igc.LookUpGainToApply(level);
+    EXPECT_GE(prev_gain, gain);
+    prev_gain = gain;
+  }
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CheckApproximation) {
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels = test::LinSpace(
+      kLevelEpsilon, limiter.max_input_level_linear() - kLevelEpsilon, 500);
+  for (const auto level : levels) {
+    SCOPED_TRACE(std::to_string(level));
+    EXPECT_LT(
+        std::fabs(limiter.GetGainLinear(level) - igc.LookUpGainToApply(level)),
+        kInterpolatedGainCurveTolerance);
+  }
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CheckRegionBoundaries) {
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const std::vector<double> levels{
+      {kLevelEpsilon, limiter.knee_start_linear() + kLevelEpsilon,
+       limiter.limiter_start_linear() + kLevelEpsilon,
+       limiter.max_input_level_linear() + kLevelEpsilon}};
+  for (const auto level : levels) {
+    igc.LookUpGainToApply(level);
+  }
+
+  const auto stats = igc.get_stats();
+  EXPECT_EQ(1ul, stats.look_ups_identity_region);
+  EXPECT_EQ(1ul, stats.look_ups_knee_region);
+  EXPECT_EQ(1ul, stats.look_ups_limiter_region);
+  EXPECT_EQ(1ul, stats.look_ups_saturation_region);
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CheckIdentityRegion) {
+  constexpr size_t kNumSteps = 10;
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels =
+      test::LinSpace(kLevelEpsilon, limiter.knee_start_linear(), kNumSteps);
+  for (const auto level : levels) {
+    SCOPED_TRACE(std::to_string(level));
+    EXPECT_EQ(1.0f, igc.LookUpGainToApply(level));
+  }
+
+  const auto stats = igc.get_stats();
+  EXPECT_EQ(kNumSteps - 1, stats.look_ups_identity_region);
+  EXPECT_EQ(1ul, stats.look_ups_knee_region);
+  EXPECT_EQ(0ul, stats.look_ups_limiter_region);
+  EXPECT_EQ(0ul, stats.look_ups_saturation_region);
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve,
+     CheckNoOverApproximationKnee) {
+  constexpr size_t kNumSteps = 10;
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels =
+      test::LinSpace(limiter.knee_start_linear() + kLevelEpsilon,
+                     limiter.limiter_start_linear(), kNumSteps);
+  for (const auto level : levels) {
+    SCOPED_TRACE(std::to_string(level));
+    // Small tolerance added (needed because comparing a float with a double).
+    EXPECT_LE(igc.LookUpGainToApply(level),
+              limiter.GetGainLinear(level) + 1e-7);
+  }
+
+  const auto stats = igc.get_stats();
+  EXPECT_EQ(0ul, stats.look_ups_identity_region);
+  EXPECT_EQ(kNumSteps - 1, stats.look_ups_knee_region);
+  EXPECT_EQ(1ul, stats.look_ups_limiter_region);
+  EXPECT_EQ(0ul, stats.look_ups_saturation_region);
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve,
+     CheckNoOverApproximationBeyondKnee) {
+  constexpr size_t kNumSteps = 10;
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels = test::LinSpace(
+      limiter.limiter_start_linear() + kLevelEpsilon,
+      limiter.max_input_level_linear() - kLevelEpsilon, kNumSteps);
+  for (const auto level : levels) {
+    SCOPED_TRACE(std::to_string(level));
+    // Small tolerance added (needed because comparing a float with a double).
+    EXPECT_LE(igc.LookUpGainToApply(level),
+              limiter.GetGainLinear(level) + 1e-7);
+  }
+
+  const auto stats = igc.get_stats();
+  EXPECT_EQ(0ul, stats.look_ups_identity_region);
+  EXPECT_EQ(0ul, stats.look_ups_knee_region);
+  EXPECT_EQ(kNumSteps, stats.look_ups_limiter_region);
+  EXPECT_EQ(0ul, stats.look_ups_saturation_region);
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve,
+     CheckNoOverApproximationWithSaturation) {
+  constexpr size_t kNumSteps = 3;
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  const auto levels = test::LinSpace(
+      limiter.max_input_level_linear() + kLevelEpsilon,
+      limiter.max_input_level_linear() + kLevelEpsilon + 0.5, kNumSteps);
+  for (const auto level : levels) {
+    SCOPED_TRACE(std::to_string(level));
+    EXPECT_LE(igc.LookUpGainToApply(level), limiter.GetGainLinear(level));
+  }
+
+  const auto stats = igc.get_stats();
+  EXPECT_EQ(0ul, stats.look_ups_identity_region);
+  EXPECT_EQ(0ul, stats.look_ups_knee_region);
+  EXPECT_EQ(0ul, stats.look_ups_limiter_region);
+  EXPECT_EQ(kNumSteps, stats.look_ups_saturation_region);
+}
+
+TEST(AutomaticGainController2InterpolatedGainCurve, CheckApproximationParams) {
+  test::InterpolatedParameters parameters =
+      test::ComputeInterpolatedGainCurveApproximationParams();
+
+  InterpolatedGainCurve igc(&apm_data_dumper);
+
+  for (size_t i = 0; i < kInterpolatedGainCurveTotalPoints; ++i) {
+    // The tolerance levels are chosen to account for deviations due
+    // to computing with single precision floating point numbers.
+    EXPECT_NEAR(igc.approximation_params_x_[i],
+                parameters.computed_approximation_params_x[i], 0.9f);
+    EXPECT_NEAR(igc.approximation_params_m_[i],
+                parameters.computed_approximation_params_m[i], 0.00001f);
+    EXPECT_NEAR(igc.approximation_params_q_[i],
+                parameters.computed_approximation_params_q[i], 0.001f);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/limiter.cc b/modules/audio_processing/agc2/limiter.cc
new file mode 100644
index 0000000..d2b9877
--- /dev/null
+++ b/modules/audio_processing/agc2/limiter.cc
@@ -0,0 +1,137 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/limiter.h"
+
+#include <cmath>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc2/agc2_common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+double ComputeKneeStart(double max_input_level_db,
+                        double knee_smoothness_db,
+                        double compression_ratio) {
+  RTC_CHECK_LT((compression_ratio - 1.0) * knee_smoothness_db /
+                   (2.0 * compression_ratio),
+               max_input_level_db);
+  return -knee_smoothness_db / 2.0 -
+         max_input_level_db / (compression_ratio - 1.0);
+}
+
+std::array<double, 3> ComputeKneeRegionPolynomial(double knee_start_dbfs,
+                                                  double knee_smoothness_db,
+                                                  double compression_ratio) {
+  const double a = (1.0 - compression_ratio) /
+                   (2.0 * knee_smoothness_db * compression_ratio);
+  const double b = 1.0 - 2.0 * a * knee_start_dbfs;
+  const double c = a * knee_start_dbfs * knee_start_dbfs;
+  return {{a, b, c}};
+}
+
+double ComputeLimiterD1(double max_input_level_db, double compression_ratio) {
+  return (std::pow(10.0, -max_input_level_db / (20.0 * compression_ratio)) *
+          (1.0 - compression_ratio) / compression_ratio) /
+         kMaxAbsFloatS16Value;
+}
+
+constexpr double ComputeLimiterD2(double compression_ratio) {
+  return (1.0 - 2.0 * compression_ratio) / compression_ratio;
+}
+
+double ComputeLimiterI2(double max_input_level_db,
+                        double compression_ratio,
+                        double gain_curve_limiter_i1) {
+  RTC_CHECK_NE(gain_curve_limiter_i1, 0.f);
+  return std::pow(10.0, -max_input_level_db / (20.0 * compression_ratio)) /
+         gain_curve_limiter_i1 /
+         std::pow(kMaxAbsFloatS16Value, gain_curve_limiter_i1 - 1);
+}
+
+}  // namespace
+
+Limiter::Limiter()
+    : max_input_level_linear_(DbfsToFloatS16(max_input_level_db_)),
+      knee_start_dbfs_(ComputeKneeStart(max_input_level_db_,
+                                        knee_smoothness_db_,
+                                        compression_ratio_)),
+      knee_start_linear_(DbfsToFloatS16(knee_start_dbfs_)),
+      limiter_start_dbfs_(knee_start_dbfs_ + knee_smoothness_db_),
+      limiter_start_linear_(DbfsToFloatS16(limiter_start_dbfs_)),
+      knee_region_polynomial_(ComputeKneeRegionPolynomial(knee_start_dbfs_,
+                                                          knee_smoothness_db_,
+                                                          compression_ratio_)),
+      gain_curve_limiter_d1_(
+          ComputeLimiterD1(max_input_level_db_, compression_ratio_)),
+      gain_curve_limiter_d2_(ComputeLimiterD2(compression_ratio_)),
+      gain_curve_limiter_i1_(1.0 / compression_ratio_),
+      gain_curve_limiter_i2_(ComputeLimiterI2(max_input_level_db_,
+                                              compression_ratio_,
+                                              gain_curve_limiter_i1_)) {
+  static_assert(knee_smoothness_db_ > 0.0f, "");
+  static_assert(compression_ratio_ > 1.0f, "");
+  RTC_CHECK_GE(max_input_level_db_, knee_start_dbfs_ + knee_smoothness_db_);
+}
+
+constexpr double Limiter::max_input_level_db_;
+constexpr double Limiter::knee_smoothness_db_;
+constexpr double Limiter::compression_ratio_;
+
+double Limiter::GetOutputLevelDbfs(double input_level_dbfs) const {
+  if (input_level_dbfs < knee_start_dbfs_) {
+    return input_level_dbfs;
+  } else if (input_level_dbfs < limiter_start_dbfs_) {
+    return GetKneeRegionOutputLevelDbfs(input_level_dbfs);
+  }
+  return GetCompressorRegionOutputLevelDbfs(input_level_dbfs);
+}
+
+double Limiter::GetGainLinear(double input_level_linear) const {
+  if (input_level_linear < knee_start_linear_) {
+    return 1.0;
+  }
+  return DbfsToFloatS16(
+             GetOutputLevelDbfs(FloatS16ToDbfs(input_level_linear))) /
+         input_level_linear;
+}
+
+// Computes the first derivative of GetGainLinear() in |x|.
+double Limiter::GetGainFirstDerivativeLinear(double x) const {
+  // Beyond-knee region only.
+  RTC_CHECK_GE(x, limiter_start_linear_ - 1e-7 * kMaxAbsFloatS16Value);
+  return gain_curve_limiter_d1_ *
+         std::pow(x / kMaxAbsFloatS16Value, gain_curve_limiter_d2_);
+}
+
+// Computes the integral of GetGainLinear() in the range [x0, x1].
+double Limiter::GetGainIntegralLinear(double x0, double x1) const {
+  RTC_CHECK_LE(x0, x1);                     // Valid interval.
+  RTC_CHECK_GE(x0, limiter_start_linear_);  // Beyond-knee region only.
+  auto limiter_integral = [this](const double& x) {
+    return gain_curve_limiter_i2_ * std::pow(x, gain_curve_limiter_i1_);
+  };
+  return limiter_integral(x1) - limiter_integral(x0);
+}
+
+double Limiter::GetKneeRegionOutputLevelDbfs(double input_level_dbfs) const {
+  return knee_region_polynomial_[0] * input_level_dbfs * input_level_dbfs +
+         knee_region_polynomial_[1] * input_level_dbfs +
+         knee_region_polynomial_[2];
+}
+
+double Limiter::GetCompressorRegionOutputLevelDbfs(
+    double input_level_dbfs) const {
+  return (input_level_dbfs - max_input_level_db_) / compression_ratio_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/limiter.h b/modules/audio_processing/agc2/limiter.h
new file mode 100644
index 0000000..f350bae
--- /dev/null
+++ b/modules/audio_processing/agc2/limiter.h
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_LIMITER_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_LIMITER_H_
+
+#include <array>
+
+#include "modules/audio_processing/agc2/agc2_testing_common.h"
+
+namespace webrtc {
+
+// A class for computing gain curve parameters. The gain curve is
+// defined by constants kLimiterMaxInputLevelDbFs, kLimiterKneeSmoothnessDb,
+// kLimiterCompressionRatio. The curve consints of one linear part,
+// one quadratic polynomial part and another linear part. The
+// constants define the parameters of the parts.
+class Limiter {
+ public:
+  Limiter();
+
+  double max_input_level_db() const { return max_input_level_db_; }
+  double max_input_level_linear() const { return max_input_level_linear_; }
+  double knee_start_linear() const { return knee_start_linear_; }
+  double limiter_start_linear() const { return limiter_start_linear_; }
+
+  // These methods can be marked 'constexpr' in C++ 14.
+  double GetOutputLevelDbfs(double input_level_dbfs) const;
+  double GetGainLinear(double input_level_linear) const;
+  double GetGainFirstDerivativeLinear(double x) const;
+  double GetGainIntegralLinear(double x0, double x1) const;
+
+ private:
+  double GetKneeRegionOutputLevelDbfs(double input_level_dbfs) const;
+  double GetCompressorRegionOutputLevelDbfs(double input_level_dbfs) const;
+
+  static constexpr double max_input_level_db_ = test::kLimiterMaxInputLevelDbFs;
+  static constexpr double knee_smoothness_db_ = test::kLimiterKneeSmoothnessDb;
+  static constexpr double compression_ratio_ = test::kLimiterCompressionRatio;
+
+  const double max_input_level_linear_;
+
+  // Do not modify signal with level <= knee_start_dbfs_.
+  const double knee_start_dbfs_;
+  const double knee_start_linear_;
+
+  // The upper end of the knee region, which is between knee_start_dbfs_ and
+  // limiter_start_dbfs_.
+  const double limiter_start_dbfs_;
+  const double limiter_start_linear_;
+
+  // Coefficients {a, b, c} of the knee region polynomial
+  // ax^2 + bx + c in the DB scale.
+  const std::array<double, 3> knee_region_polynomial_;
+
+  // Parameters for the computation of the first derivative of GetGainLinear().
+  const double gain_curve_limiter_d1_;
+  const double gain_curve_limiter_d2_;
+
+  // Parameters for the computation of the integral of GetGainLinear().
+  const double gain_curve_limiter_i1_;
+  const double gain_curve_limiter_i2_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_LIMITER_H_
diff --git a/modules/audio_processing/agc2/limiter_unittest.cc b/modules/audio_processing/agc2/limiter_unittest.cc
new file mode 100644
index 0000000..7079812
--- /dev/null
+++ b/modules/audio_processing/agc2/limiter_unittest.cc
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/limiter.h"
+
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(FixedDigitalGainController2Limiter, ConstructDestruct) {
+  Limiter l;
+}
+
+TEST(FixedDigitalGainController2Limiter, GainCurveShouldBeMonotone) {
+  Limiter l;
+  float last_output_level = 0.f;
+  bool has_last_output_level = false;
+  for (float level = -90.f; level <= l.max_input_level_db(); level += 0.5f) {
+    const float current_output_level = l.GetOutputLevelDbfs(level);
+    if (!has_last_output_level) {
+      last_output_level = current_output_level;
+      has_last_output_level = true;
+    }
+    EXPECT_LE(last_output_level, current_output_level);
+    last_output_level = current_output_level;
+  }
+}
+
+TEST(FixedDigitalGainController2Limiter, GainCurveShouldBeContinuous) {
+  Limiter l;
+  float last_output_level = 0.f;
+  bool has_last_output_level = false;
+  constexpr float kMaxDelta = 0.5f;
+  for (float level = -90.f; level <= l.max_input_level_db(); level += 0.5f) {
+    const float current_output_level = l.GetOutputLevelDbfs(level);
+    if (!has_last_output_level) {
+      last_output_level = current_output_level;
+      has_last_output_level = true;
+    }
+    EXPECT_LE(current_output_level, last_output_level + kMaxDelta);
+    last_output_level = current_output_level;
+  }
+}
+
+TEST(FixedDigitalGainController2Limiter, OutputGainShouldBeLessThanFullScale) {
+  Limiter l;
+  for (float level = -90.f; level <= l.max_input_level_db(); level += 0.5f) {
+    const float current_output_level = l.GetOutputLevelDbfs(level);
+    EXPECT_LE(current_output_level, 0.f);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/vector_float_frame.cc b/modules/audio_processing/agc2/vector_float_frame.cc
new file mode 100644
index 0000000..a70d815
--- /dev/null
+++ b/modules/audio_processing/agc2/vector_float_frame.cc
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/vector_float_frame.h"
+
+namespace webrtc {
+
+namespace {
+
+std::vector<float*> ConstructChannelPointers(
+    std::vector<std::vector<float>>* x) {
+  std::vector<float*> channel_ptrs;
+  for (auto& v : *x) {
+    channel_ptrs.push_back(v.data());
+  }
+  return channel_ptrs;
+}
+}  // namespace
+
+VectorFloatFrame::VectorFloatFrame(int num_channels,
+                                   int samples_per_channel,
+                                   float start_value)
+    : channels_(num_channels,
+                std::vector<float>(samples_per_channel, start_value)),
+      channel_ptrs_(ConstructChannelPointers(&channels_)),
+      float_frame_view_(channel_ptrs_.data(),
+                        channels_.size(),
+                        samples_per_channel) {}
+
+VectorFloatFrame::~VectorFloatFrame() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/agc2/vector_float_frame.h b/modules/audio_processing/agc2/vector_float_frame.h
new file mode 100644
index 0000000..b521f34
--- /dev/null
+++ b/modules/audio_processing/agc2/vector_float_frame.h
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC2_VECTOR_FLOAT_FRAME_H_
+#define MODULES_AUDIO_PROCESSING_AGC2_VECTOR_FLOAT_FRAME_H_
+
+#include <vector>
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+
+// A construct consisting of a multi-channel audio frame, and a FloatFrame view
+// of it.
+class VectorFloatFrame {
+ public:
+  VectorFloatFrame(int num_channels,
+                   int samples_per_channel,
+                   float start_value);
+  const AudioFrameView<float>& float_frame_view() { return float_frame_view_; }
+  AudioFrameView<const float> float_frame_view() const {
+    return float_frame_view_;
+  }
+
+  ~VectorFloatFrame();
+
+ private:
+  std::vector<std::vector<float>> channels_;
+  std::vector<float*> channel_ptrs_;
+  AudioFrameView<float> float_frame_view_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC2_VECTOR_FLOAT_FRAME_H_
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
new file mode 100644
index 0000000..16f1174
--- /dev/null
+++ b/modules/audio_processing/audio_buffer.cc
@@ -0,0 +1,475 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_buffer.h"
+
+#include "common_audio/channel_buffer.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/common.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer32kHzChannel = 320;
+const size_t kSamplesPer48kHzChannel = 480;
+
+int KeyboardChannelIndex(const StreamConfig& stream_config) {
+  if (!stream_config.has_keyboard()) {
+    RTC_NOTREACHED();
+    return 0;
+  }
+
+  return stream_config.num_channels();
+}
+
+size_t NumBandsFromSamplesPerChannel(size_t num_frames) {
+  size_t num_bands = 1;
+  if (num_frames == kSamplesPer32kHzChannel ||
+      num_frames == kSamplesPer48kHzChannel) {
+    num_bands = rtc::CheckedDivExact(num_frames, kSamplesPer16kHzChannel);
+  }
+  return num_bands;
+}
+
+}  // namespace
+
+AudioBuffer::AudioBuffer(size_t input_num_frames,
+                         size_t num_input_channels,
+                         size_t process_num_frames,
+                         size_t num_process_channels,
+                         size_t output_num_frames)
+  : input_num_frames_(input_num_frames),
+    num_input_channels_(num_input_channels),
+    proc_num_frames_(process_num_frames),
+    num_proc_channels_(num_process_channels),
+    output_num_frames_(output_num_frames),
+    num_channels_(num_process_channels),
+    num_bands_(NumBandsFromSamplesPerChannel(proc_num_frames_)),
+    num_split_frames_(rtc::CheckedDivExact(proc_num_frames_, num_bands_)),
+    mixed_low_pass_valid_(false),
+    reference_copied_(false),
+    activity_(AudioFrame::kVadUnknown),
+    keyboard_data_(NULL),
+    data_(new IFChannelBuffer(proc_num_frames_, num_proc_channels_)),
+    output_buffer_(new IFChannelBuffer(output_num_frames_, num_channels_)) {
+  RTC_DCHECK_GT(input_num_frames_, 0);
+  RTC_DCHECK_GT(proc_num_frames_, 0);
+  RTC_DCHECK_GT(output_num_frames_, 0);
+  RTC_DCHECK_GT(num_input_channels_, 0);
+  RTC_DCHECK_GT(num_proc_channels_, 0);
+  RTC_DCHECK_LE(num_proc_channels_, num_input_channels_);
+
+  if (input_num_frames_ != proc_num_frames_ ||
+      output_num_frames_ != proc_num_frames_) {
+    // Create an intermediate buffer for resampling.
+    process_buffer_.reset(new ChannelBuffer<float>(proc_num_frames_,
+                                                   num_proc_channels_));
+
+    if (input_num_frames_ != proc_num_frames_) {
+      for (size_t i = 0; i < num_proc_channels_; ++i) {
+        input_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
+            new PushSincResampler(input_num_frames_, proc_num_frames_)));
+      }
+    }
+
+    if (output_num_frames_ != proc_num_frames_) {
+      for (size_t i = 0; i < num_proc_channels_; ++i) {
+        output_resamplers_.push_back(std::unique_ptr<PushSincResampler>(
+            new PushSincResampler(proc_num_frames_, output_num_frames_)));
+      }
+    }
+  }
+
+  if (num_bands_ > 1) {
+    split_data_.reset(new IFChannelBuffer(proc_num_frames_,
+                                          num_proc_channels_,
+                                          num_bands_));
+    splitting_filter_.reset(new SplittingFilter(num_proc_channels_,
+                                                num_bands_,
+                                                proc_num_frames_));
+  }
+}
+
+AudioBuffer::~AudioBuffer() {}
+
+void AudioBuffer::CopyFrom(const float* const* data,
+                           const StreamConfig& stream_config) {
+  RTC_DCHECK_EQ(stream_config.num_frames(), input_num_frames_);
+  RTC_DCHECK_EQ(stream_config.num_channels(), num_input_channels_);
+  InitForNewData();
+  // Initialized lazily because there's a different condition in
+  // DeinterleaveFrom.
+  const bool need_to_downmix =
+      num_input_channels_ > 1 && num_proc_channels_ == 1;
+  if (need_to_downmix && !input_buffer_) {
+    input_buffer_.reset(
+        new IFChannelBuffer(input_num_frames_, num_proc_channels_));
+  }
+
+  if (stream_config.has_keyboard()) {
+    keyboard_data_ = data[KeyboardChannelIndex(stream_config)];
+  }
+
+  // Downmix.
+  const float* const* data_ptr = data;
+  if (need_to_downmix) {
+    DownmixToMono<float, float>(data, input_num_frames_, num_input_channels_,
+                                input_buffer_->fbuf()->channels()[0]);
+    data_ptr = input_buffer_->fbuf_const()->channels();
+  }
+
+  // Resample.
+  if (input_num_frames_ != proc_num_frames_) {
+    for (size_t i = 0; i < num_proc_channels_; ++i) {
+      input_resamplers_[i]->Resample(data_ptr[i],
+                                     input_num_frames_,
+                                     process_buffer_->channels()[i],
+                                     proc_num_frames_);
+    }
+    data_ptr = process_buffer_->channels();
+  }
+
+  // Convert to the S16 range.
+  for (size_t i = 0; i < num_proc_channels_; ++i) {
+    FloatToFloatS16(data_ptr[i],
+                    proc_num_frames_,
+                    data_->fbuf()->channels()[i]);
+  }
+}
+
+void AudioBuffer::CopyTo(const StreamConfig& stream_config,
+                         float* const* data) {
+  RTC_DCHECK_EQ(stream_config.num_frames(), output_num_frames_);
+  RTC_DCHECK(stream_config.num_channels() == num_channels_ ||
+             num_channels_ == 1);
+
+  // Convert to the float range.
+  float* const* data_ptr = data;
+  if (output_num_frames_ != proc_num_frames_) {
+    // Convert to an intermediate buffer for subsequent resampling.
+    data_ptr = process_buffer_->channels();
+  }
+  for (size_t i = 0; i < num_channels_; ++i) {
+    FloatS16ToFloat(data_->fbuf()->channels()[i],
+                    proc_num_frames_,
+                    data_ptr[i]);
+  }
+
+  // Resample.
+  if (output_num_frames_ != proc_num_frames_) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      output_resamplers_[i]->Resample(data_ptr[i],
+                                      proc_num_frames_,
+                                      data[i],
+                                      output_num_frames_);
+    }
+  }
+
+  // Upmix.
+  for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
+    memcpy(data[i], data[0], output_num_frames_ * sizeof(**data));
+  }
+}
+
+void AudioBuffer::InitForNewData() {
+  keyboard_data_ = NULL;
+  mixed_low_pass_valid_ = false;
+  reference_copied_ = false;
+  activity_ = AudioFrame::kVadUnknown;
+  num_channels_ = num_proc_channels_;
+  data_->set_num_channels(num_proc_channels_);
+  if (split_data_.get()) {
+    split_data_->set_num_channels(num_proc_channels_);
+  }
+}
+
+const int16_t* const* AudioBuffer::channels_const() const {
+  return data_->ibuf_const()->channels();
+}
+
+int16_t* const* AudioBuffer::channels() {
+  mixed_low_pass_valid_ = false;
+  return data_->ibuf()->channels();
+}
+
+const int16_t* const* AudioBuffer::split_bands_const(size_t channel) const {
+  return split_data_.get() ?
+         split_data_->ibuf_const()->bands(channel) :
+         data_->ibuf_const()->bands(channel);
+}
+
+int16_t* const* AudioBuffer::split_bands(size_t channel) {
+  mixed_low_pass_valid_ = false;
+  return split_data_.get() ?
+         split_data_->ibuf()->bands(channel) :
+         data_->ibuf()->bands(channel);
+}
+
+const int16_t* const* AudioBuffer::split_channels_const(Band band) const {
+  if (split_data_.get()) {
+    return split_data_->ibuf_const()->channels(band);
+  } else {
+    return band == kBand0To8kHz ? data_->ibuf_const()->channels() : nullptr;
+  }
+}
+
+int16_t* const* AudioBuffer::split_channels(Band band) {
+  mixed_low_pass_valid_ = false;
+  if (split_data_.get()) {
+    return split_data_->ibuf()->channels(band);
+  } else {
+    return band == kBand0To8kHz ? data_->ibuf()->channels() : nullptr;
+  }
+}
+
+ChannelBuffer<int16_t>* AudioBuffer::data() {
+  mixed_low_pass_valid_ = false;
+  return data_->ibuf();
+}
+
+const ChannelBuffer<int16_t>* AudioBuffer::data() const {
+  return data_->ibuf_const();
+}
+
+ChannelBuffer<int16_t>* AudioBuffer::split_data() {
+  mixed_low_pass_valid_ = false;
+  return split_data_.get() ? split_data_->ibuf() : data_->ibuf();
+}
+
+const ChannelBuffer<int16_t>* AudioBuffer::split_data() const {
+  return split_data_.get() ? split_data_->ibuf_const() : data_->ibuf_const();
+}
+
+const float* const* AudioBuffer::channels_const_f() const {
+  return data_->fbuf_const()->channels();
+}
+
+float* const* AudioBuffer::channels_f() {
+  mixed_low_pass_valid_ = false;
+  return data_->fbuf()->channels();
+}
+
+const float* const* AudioBuffer::split_bands_const_f(size_t channel) const {
+  return split_data_.get() ?
+         split_data_->fbuf_const()->bands(channel) :
+         data_->fbuf_const()->bands(channel);
+}
+
+float* const* AudioBuffer::split_bands_f(size_t channel) {
+  mixed_low_pass_valid_ = false;
+  return split_data_.get() ?
+         split_data_->fbuf()->bands(channel) :
+         data_->fbuf()->bands(channel);
+}
+
+const float* const* AudioBuffer::split_channels_const_f(Band band) const {
+  if (split_data_.get()) {
+    return split_data_->fbuf_const()->channels(band);
+  } else {
+    return band == kBand0To8kHz ? data_->fbuf_const()->channels() : nullptr;
+  }
+}
+
+float* const* AudioBuffer::split_channels_f(Band band) {
+  mixed_low_pass_valid_ = false;
+  if (split_data_.get()) {
+    return split_data_->fbuf()->channels(band);
+  } else {
+    return band == kBand0To8kHz ? data_->fbuf()->channels() : nullptr;
+  }
+}
+
+ChannelBuffer<float>* AudioBuffer::data_f() {
+  mixed_low_pass_valid_ = false;
+  return data_->fbuf();
+}
+
+const ChannelBuffer<float>* AudioBuffer::data_f() const {
+  return data_->fbuf_const();
+}
+
+ChannelBuffer<float>* AudioBuffer::split_data_f() {
+  mixed_low_pass_valid_ = false;
+  return split_data_.get() ? split_data_->fbuf() : data_->fbuf();
+}
+
+const ChannelBuffer<float>* AudioBuffer::split_data_f() const {
+  return split_data_.get() ? split_data_->fbuf_const() : data_->fbuf_const();
+}
+
+const int16_t* AudioBuffer::mixed_low_pass_data() {
+  if (num_proc_channels_ == 1) {
+    return split_bands_const(0)[kBand0To8kHz];
+  }
+
+  if (!mixed_low_pass_valid_) {
+    if (!mixed_low_pass_channels_.get()) {
+      mixed_low_pass_channels_.reset(
+          new ChannelBuffer<int16_t>(num_split_frames_, 1));
+    }
+
+    DownmixToMono<int16_t, int32_t>(split_channels_const(kBand0To8kHz),
+                                    num_split_frames_, num_channels_,
+                                    mixed_low_pass_channels_->channels()[0]);
+    mixed_low_pass_valid_ = true;
+  }
+  return mixed_low_pass_channels_->channels()[0];
+}
+
+const int16_t* AudioBuffer::low_pass_reference(int channel) const {
+  if (!reference_copied_) {
+    return NULL;
+  }
+
+  return low_pass_reference_channels_->channels()[channel];
+}
+
+const float* AudioBuffer::keyboard_data() const {
+  return keyboard_data_;
+}
+
+void AudioBuffer::set_activity(AudioFrame::VADActivity activity) {
+  activity_ = activity;
+}
+
+AudioFrame::VADActivity AudioBuffer::activity() const {
+  return activity_;
+}
+
+size_t AudioBuffer::num_channels() const {
+  return num_channels_;
+}
+
+void AudioBuffer::set_num_channels(size_t num_channels) {
+  num_channels_ = num_channels;
+  data_->set_num_channels(num_channels);
+  if (split_data_.get()) {
+    split_data_->set_num_channels(num_channels);
+  }
+}
+
+size_t AudioBuffer::num_frames() const {
+  return proc_num_frames_;
+}
+
+size_t AudioBuffer::num_frames_per_band() const {
+  return num_split_frames_;
+}
+
+size_t AudioBuffer::num_keyboard_frames() const {
+  // We don't resample the keyboard channel.
+  return input_num_frames_;
+}
+
+size_t AudioBuffer::num_bands() const {
+  return num_bands_;
+}
+
+// The resampler is only for supporting 48kHz to 16kHz in the reverse stream.
+void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
+  RTC_DCHECK_EQ(frame->num_channels_, num_input_channels_);
+  RTC_DCHECK_EQ(frame->samples_per_channel_, input_num_frames_);
+  InitForNewData();
+  // Initialized lazily because there's a different condition in CopyFrom.
+  if ((input_num_frames_ != proc_num_frames_) && !input_buffer_) {
+    input_buffer_.reset(
+        new IFChannelBuffer(input_num_frames_, num_proc_channels_));
+  }
+  activity_ = frame->vad_activity_;
+
+  int16_t* const* deinterleaved;
+  if (input_num_frames_ == proc_num_frames_) {
+    deinterleaved = data_->ibuf()->channels();
+  } else {
+    deinterleaved = input_buffer_->ibuf()->channels();
+  }
+  // TODO(yujo): handle muted frames more efficiently.
+  if (num_proc_channels_ == 1) {
+    // Downmix and deinterleave simultaneously.
+    DownmixInterleavedToMono(frame->data(), input_num_frames_,
+                             num_input_channels_, deinterleaved[0]);
+  } else {
+    RTC_DCHECK_EQ(num_proc_channels_, num_input_channels_);
+    Deinterleave(frame->data(),
+                 input_num_frames_,
+                 num_proc_channels_,
+                 deinterleaved);
+  }
+
+  // Resample.
+  if (input_num_frames_ != proc_num_frames_) {
+    for (size_t i = 0; i < num_proc_channels_; ++i) {
+      input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i],
+                                     input_num_frames_,
+                                     data_->fbuf()->channels()[i],
+                                     proc_num_frames_);
+    }
+  }
+}
+
+void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
+  frame->vad_activity_ = activity_;
+  if (!data_changed) {
+    return;
+  }
+
+  RTC_DCHECK(frame->num_channels_ == num_channels_ || num_channels_ == 1);
+  RTC_DCHECK_EQ(frame->samples_per_channel_, output_num_frames_);
+
+  // Resample if necessary.
+  IFChannelBuffer* data_ptr = data_.get();
+  if (proc_num_frames_ != output_num_frames_) {
+    for (size_t i = 0; i < num_channels_; ++i) {
+      output_resamplers_[i]->Resample(
+          data_->fbuf()->channels()[i], proc_num_frames_,
+          output_buffer_->fbuf()->channels()[i], output_num_frames_);
+    }
+    data_ptr = output_buffer_.get();
+  }
+
+  // TODO(yujo): handle muted frames more efficiently.
+  if (frame->num_channels_ == num_channels_) {
+    Interleave(data_ptr->ibuf()->channels(), output_num_frames_, num_channels_,
+               frame->mutable_data());
+  } else {
+    UpmixMonoToInterleaved(data_ptr->ibuf()->channels()[0], output_num_frames_,
+                           frame->num_channels_, frame->mutable_data());
+  }
+}
+
+void AudioBuffer::CopyLowPassToReference() {
+  reference_copied_ = true;
+  if (!low_pass_reference_channels_.get() ||
+      low_pass_reference_channels_->num_channels() != num_channels_) {
+    low_pass_reference_channels_.reset(
+        new ChannelBuffer<int16_t>(num_split_frames_,
+                                   num_proc_channels_));
+  }
+  for (size_t i = 0; i < num_proc_channels_; i++) {
+    memcpy(low_pass_reference_channels_->channels()[i],
+           split_bands_const(i)[kBand0To8kHz],
+           low_pass_reference_channels_->num_frames_per_band() *
+               sizeof(split_bands_const(i)[kBand0To8kHz][0]));
+  }
+}
+
+void AudioBuffer::SplitIntoFrequencyBands() {
+  splitting_filter_->Analysis(data_.get(), split_data_.get());
+}
+
+void AudioBuffer::MergeFrequencyBands() {
+  splitting_filter_->Synthesis(split_data_.get(), data_.get());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
new file mode 100644
index 0000000..8451bde
--- /dev/null
+++ b/modules/audio_processing/audio_buffer.h
@@ -0,0 +1,166 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/splitting_filter.h"
+#include "modules/include/module_common_types.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class PushSincResampler;
+class IFChannelBuffer;
+
+enum Band {
+  kBand0To8kHz = 0,
+  kBand8To16kHz = 1,
+  kBand16To24kHz = 2
+};
+
+class AudioBuffer {
+ public:
+  // TODO(ajm): Switch to take ChannelLayouts.
+  AudioBuffer(size_t input_num_frames,
+              size_t num_input_channels,
+              size_t process_num_frames,
+              size_t num_process_channels,
+              size_t output_num_frames);
+  virtual ~AudioBuffer();
+
+  size_t num_channels() const;
+  void set_num_channels(size_t num_channels);
+  size_t num_frames() const;
+  size_t num_frames_per_band() const;
+  size_t num_keyboard_frames() const;
+  size_t num_bands() const;
+
+  // Returns a pointer array to the full-band channels.
+  // Usage:
+  // channels()[channel][sample].
+  // Where:
+  // 0 <= channel < |num_proc_channels_|
+  // 0 <= sample < |proc_num_frames_|
+  int16_t* const* channels();
+  const int16_t* const* channels_const() const;
+  float* const* channels_f();
+  const float* const* channels_const_f() const;
+
+  // Returns a pointer array to the bands for a specific channel.
+  // Usage:
+  // split_bands(channel)[band][sample].
+  // Where:
+  // 0 <= channel < |num_proc_channels_|
+  // 0 <= band < |num_bands_|
+  // 0 <= sample < |num_split_frames_|
+  int16_t* const* split_bands(size_t channel);
+  const int16_t* const* split_bands_const(size_t channel) const;
+  float* const* split_bands_f(size_t channel);
+  const float* const* split_bands_const_f(size_t channel) const;
+
+  // Returns a pointer array to the channels for a specific band.
+  // Usage:
+  // split_channels(band)[channel][sample].
+  // Where:
+  // 0 <= band < |num_bands_|
+  // 0 <= channel < |num_proc_channels_|
+  // 0 <= sample < |num_split_frames_|
+  int16_t* const* split_channels(Band band);
+  const int16_t* const* split_channels_const(Band band) const;
+  float* const* split_channels_f(Band band);
+  const float* const* split_channels_const_f(Band band) const;
+
+  // Returns a pointer to the ChannelBuffer that encapsulates the full-band
+  // data.
+  ChannelBuffer<int16_t>* data();
+  const ChannelBuffer<int16_t>* data() const;
+  ChannelBuffer<float>* data_f();
+  const ChannelBuffer<float>* data_f() const;
+
+  // Returns a pointer to the ChannelBuffer that encapsulates the split data.
+  ChannelBuffer<int16_t>* split_data();
+  const ChannelBuffer<int16_t>* split_data() const;
+  ChannelBuffer<float>* split_data_f();
+  const ChannelBuffer<float>* split_data_f() const;
+
+  // Returns a pointer to the low-pass data downmixed to mono. If this data
+  // isn't already available it re-calculates it.
+  const int16_t* mixed_low_pass_data();
+  const int16_t* low_pass_reference(int channel) const;
+
+  const float* keyboard_data() const;
+
+  void set_activity(AudioFrame::VADActivity activity);
+  AudioFrame::VADActivity activity() const;
+
+  // Use for int16 interleaved data.
+  void DeinterleaveFrom(AudioFrame* audioFrame);
+  // If |data_changed| is false, only the non-audio data members will be copied
+  // to |frame|.
+  void InterleaveTo(AudioFrame* frame, bool data_changed) const;
+
+  // Use for float deinterleaved data.
+  void CopyFrom(const float* const* data, const StreamConfig& stream_config);
+  void CopyTo(const StreamConfig& stream_config, float* const* data);
+  void CopyLowPassToReference();
+
+  // Splits the signal into different bands.
+  void SplitIntoFrequencyBands();
+  // Recombine the different bands into one signal.
+  void MergeFrequencyBands();
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(AudioBufferTest,
+                           SetNumChannelsSetsChannelBuffersNumChannels);
+  // Called from DeinterleaveFrom() and CopyFrom().
+  void InitForNewData();
+
+  // The audio is passed into DeinterleaveFrom() or CopyFrom() with input
+  // format (samples per channel and number of channels).
+  const size_t input_num_frames_;
+  const size_t num_input_channels_;
+  // The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
+  // format.
+  const size_t proc_num_frames_;
+  const size_t num_proc_channels_;
+  // The audio is returned by InterleaveTo() and CopyTo() with output samples
+  // per channels and the current number of channels. This last one can be
+  // changed at any time using set_num_channels().
+  const size_t output_num_frames_;
+  size_t num_channels_;
+
+  size_t num_bands_;
+  size_t num_split_frames_;
+  bool mixed_low_pass_valid_;
+  bool reference_copied_;
+  AudioFrame::VADActivity activity_;
+
+  const float* keyboard_data_;
+  std::unique_ptr<IFChannelBuffer> data_;
+  std::unique_ptr<IFChannelBuffer> split_data_;
+  std::unique_ptr<SplittingFilter> splitting_filter_;
+  std::unique_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
+  std::unique_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
+  std::unique_ptr<IFChannelBuffer> input_buffer_;
+  std::unique_ptr<IFChannelBuffer> output_buffer_;
+  std::unique_ptr<ChannelBuffer<float> > process_buffer_;
+  std::vector<std::unique_ptr<PushSincResampler>> input_resamplers_;
+  std::vector<std::unique_ptr<PushSincResampler>> output_resamplers_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
diff --git a/modules/audio_processing/audio_buffer_unittest.cc b/modules/audio_processing/audio_buffer_unittest.cc
new file mode 100644
index 0000000..4cbb98e
--- /dev/null
+++ b/modules/audio_processing/audio_buffer_unittest.cc
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kNumFrames = 480u;
+const size_t kStereo = 2u;
+const size_t kMono = 1u;
+
+void ExpectNumChannels(const AudioBuffer& ab, size_t num_channels) {
+  EXPECT_EQ(ab.data()->num_channels(), num_channels);
+  EXPECT_EQ(ab.data_f()->num_channels(), num_channels);
+  EXPECT_EQ(ab.split_data()->num_channels(), num_channels);
+  EXPECT_EQ(ab.split_data_f()->num_channels(), num_channels);
+  EXPECT_EQ(ab.num_channels(), num_channels);
+}
+
+}  // namespace
+
+TEST(AudioBufferTest, SetNumChannelsSetsChannelBuffersNumChannels) {
+  AudioBuffer ab(kNumFrames, kStereo, kNumFrames, kStereo, kNumFrames);
+  ExpectNumChannels(ab, kStereo);
+  ab.set_num_channels(kMono);
+  ExpectNumChannels(ab, kMono);
+  ab.InitForNewData();
+  ExpectNumChannels(ab, kStereo);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(AudioBufferTest, SetNumChannelsDeathTest) {
+  AudioBuffer ab(kNumFrames, kMono, kNumFrames, kMono, kNumFrames);
+  EXPECT_DEATH(ab.set_num_channels(kStereo), "num_channels");
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_frame_view_unittest.cc b/modules/audio_processing/audio_frame_view_unittest.cc
new file mode 100644
index 0000000..cf30496
--- /dev/null
+++ b/modules/audio_processing/audio_frame_view_unittest.cc
@@ -0,0 +1,49 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "test/gtest.h"
+
+TEST(AudioFrameTest, ConstructFromAudioBuffer) {
+  constexpr int kSampleRateHz = 48000;
+  constexpr int kNumChannels = 2;
+  constexpr float kFloatConstant = 1272.f;
+  constexpr float kIntConstant = 17252;
+  const webrtc::StreamConfig stream_config(kSampleRateHz, kNumChannels, false);
+  webrtc::AudioBuffer buffer(
+      stream_config.num_frames(), stream_config.num_channels(),
+      stream_config.num_frames(), stream_config.num_channels(),
+      stream_config.num_frames());
+
+  AudioFrameView<float> non_const_view(
+      buffer.channels_f(), buffer.num_channels(), buffer.num_frames());
+  // Modification is allowed.
+  non_const_view.channel(0)[0] = kFloatConstant;
+  EXPECT_EQ(buffer.channels_f()[0][0], kFloatConstant);
+
+  AudioFrameView<const float> const_view(
+      buffer.channels_f(), buffer.num_channels(), buffer.num_frames());
+  // Modification is not allowed.
+  // const_view.channel(0)[0] = kFloatConstant;
+
+  // Assignment is allowed.
+  AudioFrameView<const float> other_const_view = non_const_view;
+  static_cast<void>(other_const_view);
+
+  // But not the other way. The following will fail:
+  // non_const_view = other_const_view;
+
+  AudioFrameView<int16_t> non_const_int16_view(
+      buffer.channels(), buffer.num_channels(), buffer.num_frames());
+  non_const_int16_view.channel(0)[0] = kIntConstant;
+  EXPECT_EQ(buffer.channels()[0][0], kIntConstant);
+}
diff --git a/modules/audio_processing/audio_generator/file_audio_generator.cc b/modules/audio_processing/audio_generator/file_audio_generator.cc
new file mode 100644
index 0000000..0d691b8
--- /dev/null
+++ b/modules/audio_processing/audio_generator/file_audio_generator.cc
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_generator/file_audio_generator.h"
+
+namespace webrtc {
+
+FileAudioGenerator::FileAudioGenerator(
+    std::unique_ptr<WavReader> input_audio_file) {
+  // TODO(bugs.webrtc.org/8882) Stub.
+  // Read audio from file into internal buffer.
+}
+
+FileAudioGenerator::~FileAudioGenerator() = default;
+
+void FileAudioGenerator::FillFrame(AudioFrameView<float> audio) {
+  // TODO(bugs.webrtc.org/8882) Stub.
+  // Fill |audio| from internal buffer.
+}
+
+size_t FileAudioGenerator::NumChannels() {
+  return num_channels_;
+}
+
+size_t FileAudioGenerator::SampleRateHz() {
+  return sample_rate_hz_;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_generator/file_audio_generator.h b/modules/audio_processing/audio_generator/file_audio_generator.h
new file mode 100644
index 0000000..01979a4
--- /dev/null
+++ b/modules/audio_processing/audio_generator/file_audio_generator.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AUDIO_GENERATOR_FILE_AUDIO_GENERATOR_H_
+#define MODULES_AUDIO_PROCESSING_AUDIO_GENERATOR_FILE_AUDIO_GENERATOR_H_
+
+#include <memory>
+
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/include/audio_generator.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Provides looping audio from a file. The file is read in its entirety on
+// construction and then closed. This class wraps a webrtc::WavReader, and is
+// hence unsuitable for non-diagnostic code.
+class FileAudioGenerator : public AudioGenerator {
+ public:
+  // Reads the playout audio from a given WAV file.
+  explicit FileAudioGenerator(std::unique_ptr<WavReader> input_audio_file);
+
+  ~FileAudioGenerator() override;
+
+  // Fill |audio| with audio from a file.
+  void FillFrame(AudioFrameView<float> audio) override;
+
+  size_t NumChannels() override;
+
+  size_t SampleRateHz() override;
+
+ private:
+  size_t num_channels_;
+  size_t sample_rate_hz_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FileAudioGenerator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AUDIO_GENERATOR_FILE_AUDIO_GENERATOR_H_
diff --git a/modules/audio_processing/audio_generator/file_audio_generator_unittest.cc b/modules/audio_processing/audio_generator/file_audio_generator_unittest.cc
new file mode 100644
index 0000000..6ed3b94
--- /dev/null
+++ b/modules/audio_processing/audio_generator/file_audio_generator_unittest.cc
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "modules/audio_processing/include/audio_generator_factory.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+// TODO(bugs.webrtc.org/8882) Stub.
+// Add unit tests for both file audio and generated audio.
+
+TEST(FileAudioGeneratorTest, CreationDeletion) {
+  const std::string audio_filename =
+      test::ResourcePath("voice_engine/audio_tiny48", "wav");
+  auto audio_generator = AudioGeneratorFactory::Create(audio_filename);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
new file mode 100644
index 0000000..554dead
--- /dev/null
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -0,0 +1,2029 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_processing_impl.h"
+
+#include <math.h>
+#include <algorithm>
+#include <string>
+
+#include "common_audio/audio_converter.h"
+#include "common_audio/channel_buffer.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aec/aec_core.h"
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/beamformer/nonlinear_beamformer.h"
+#include "modules/audio_processing/common.h"
+#include "modules/audio_processing/echo_cancellation_impl.h"
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/gain_control_for_experimental_agc.h"
+#include "modules/audio_processing/gain_control_impl.h"
+#include "modules/audio_processing/gain_controller2.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_file.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/trace_event.h"
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+#include "modules/audio_processing/intelligibility/intelligibility_enhancer.h"
+#endif
+#include "modules/audio_processing/level_estimator_impl.h"
+#include "modules/audio_processing/low_cut_filter.h"
+#include "modules/audio_processing/noise_suppression_impl.h"
+#include "modules/audio_processing/residual_echo_detector.h"
+#include "modules/audio_processing/transient/transient_suppressor.h"
+#include "modules/audio_processing/voice_detection_impl.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/atomicops.h"
+#include "system_wrappers/include/file_wrapper.h"
+#include "system_wrappers/include/metrics.h"
+
+// Check to verify that the define for the intelligibility enhancer is properly
+// set.
+#if !defined(WEBRTC_INTELLIGIBILITY_ENHANCER) || \
+    (WEBRTC_INTELLIGIBILITY_ENHANCER != 0 &&     \
+     WEBRTC_INTELLIGIBILITY_ENHANCER != 1)
+#error "Set WEBRTC_INTELLIGIBILITY_ENHANCER to either 0 or 1"
+#endif
+
+#define RETURN_ON_ERR(expr) \
+  do {                      \
+    int err = (expr);       \
+    if (err != kNoError) {  \
+      return err;           \
+    }                       \
+  } while (0)
+
+namespace webrtc {
+
+constexpr int AudioProcessing::kNativeSampleRatesHz[];
+
+namespace {
+
+static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) {
+  switch (layout) {
+    case AudioProcessing::kMono:
+    case AudioProcessing::kStereo:
+      return false;
+    case AudioProcessing::kMonoAndKeyboard:
+    case AudioProcessing::kStereoAndKeyboard:
+      return true;
+  }
+
+  RTC_NOTREACHED();
+  return false;
+}
+
+bool SampleRateSupportsMultiBand(int sample_rate_hz) {
+  return sample_rate_hz == AudioProcessing::kSampleRate32kHz ||
+         sample_rate_hz == AudioProcessing::kSampleRate48kHz;
+}
+
+int FindNativeProcessRateToUse(int minimum_rate, bool band_splitting_required) {
+#ifdef WEBRTC_ARCH_ARM_FAMILY
+  constexpr int kMaxSplittingNativeProcessRate =
+      AudioProcessing::kSampleRate32kHz;
+#else
+  constexpr int kMaxSplittingNativeProcessRate =
+      AudioProcessing::kSampleRate48kHz;
+#endif
+  static_assert(
+      kMaxSplittingNativeProcessRate <= AudioProcessing::kMaxNativeSampleRateHz,
+      "");
+  const int uppermost_native_rate = band_splitting_required
+                                        ? kMaxSplittingNativeProcessRate
+                                        : AudioProcessing::kSampleRate48kHz;
+
+  for (auto rate : AudioProcessing::kNativeSampleRatesHz) {
+    if (rate >= uppermost_native_rate) {
+      return uppermost_native_rate;
+    }
+    if (rate >= minimum_rate) {
+      return rate;
+    }
+  }
+  RTC_NOTREACHED();
+  return uppermost_native_rate;
+}
+
+// Maximum lengths that frame of samples being passed from the render side to
+// the capture side can have (does not apply to AEC3).
+static const size_t kMaxAllowedValuesOfSamplesPerBand = 160;
+static const size_t kMaxAllowedValuesOfSamplesPerFrame = 480;
+
+// Maximum number of frames to buffer in the render queue.
+// TODO(peah): Decrease this once we properly handle hugely unbalanced
+// reverse and forward call numbers.
+static const size_t kMaxNumFramesToBuffer = 100;
+
+class HighPassFilterImpl : public HighPassFilter {
+ public:
+  explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {}
+  ~HighPassFilterImpl() override = default;
+
+  // HighPassFilter implementation.
+  int Enable(bool enable) override {
+    apm_->MutateConfig([enable](AudioProcessing::Config* config) {
+      config->high_pass_filter.enabled = enable;
+    });
+
+    return AudioProcessing::kNoError;
+  }
+
+  bool is_enabled() const override {
+    return apm_->GetConfig().high_pass_filter.enabled;
+  }
+
+ private:
+  AudioProcessingImpl* apm_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl);
+};
+
+webrtc::InternalAPMStreamsConfig ToStreamsConfig(
+    const ProcessingConfig& api_format) {
+  webrtc::InternalAPMStreamsConfig result;
+  result.input_sample_rate = api_format.input_stream().sample_rate_hz();
+  result.input_num_channels = api_format.input_stream().num_channels();
+  result.output_num_channels = api_format.output_stream().num_channels();
+  result.render_input_num_channels =
+      api_format.reverse_input_stream().num_channels();
+  result.render_input_sample_rate =
+      api_format.reverse_input_stream().sample_rate_hz();
+  result.output_sample_rate = api_format.output_stream().sample_rate_hz();
+  result.render_output_sample_rate =
+      api_format.reverse_output_stream().sample_rate_hz();
+  result.render_output_num_channels =
+      api_format.reverse_output_stream().num_channels();
+  return result;
+}
+}  // namespace
+
+// Throughout webrtc, it's assumed that success is represented by zero.
+static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
+
+AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates(
+    bool capture_post_processor_enabled,
+    bool render_pre_processor_enabled)
+    : capture_post_processor_enabled_(capture_post_processor_enabled),
+      render_pre_processor_enabled_(render_pre_processor_enabled) {}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::Update(
+    bool low_cut_filter_enabled,
+    bool echo_canceller_enabled,
+    bool mobile_echo_controller_enabled,
+    bool residual_echo_detector_enabled,
+    bool noise_suppressor_enabled,
+    bool intelligibility_enhancer_enabled,
+    bool beamformer_enabled,
+    bool adaptive_gain_controller_enabled,
+    bool gain_controller2_enabled,
+    bool echo_controller_enabled,
+    bool voice_activity_detector_enabled,
+    bool level_estimator_enabled,
+    bool transient_suppressor_enabled) {
+  bool changed = false;
+  changed |= (low_cut_filter_enabled != low_cut_filter_enabled_);
+  changed |= (echo_canceller_enabled != echo_canceller_enabled_);
+  changed |=
+      (mobile_echo_controller_enabled != mobile_echo_controller_enabled_);
+  changed |=
+      (residual_echo_detector_enabled != residual_echo_detector_enabled_);
+  changed |= (noise_suppressor_enabled != noise_suppressor_enabled_);
+  changed |=
+      (intelligibility_enhancer_enabled != intelligibility_enhancer_enabled_);
+  changed |= (beamformer_enabled != beamformer_enabled_);
+  changed |=
+      (adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_);
+  changed |=
+      (gain_controller2_enabled != gain_controller2_enabled_);
+  changed |= (echo_controller_enabled != echo_controller_enabled_);
+  changed |= (level_estimator_enabled != level_estimator_enabled_);
+  changed |=
+      (voice_activity_detector_enabled != voice_activity_detector_enabled_);
+  changed |= (transient_suppressor_enabled != transient_suppressor_enabled_);
+  if (changed) {
+    low_cut_filter_enabled_ = low_cut_filter_enabled;
+    echo_canceller_enabled_ = echo_canceller_enabled;
+    mobile_echo_controller_enabled_ = mobile_echo_controller_enabled;
+    residual_echo_detector_enabled_ = residual_echo_detector_enabled;
+    noise_suppressor_enabled_ = noise_suppressor_enabled;
+    intelligibility_enhancer_enabled_ = intelligibility_enhancer_enabled;
+    beamformer_enabled_ = beamformer_enabled;
+    adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled;
+    gain_controller2_enabled_ = gain_controller2_enabled;
+    echo_controller_enabled_ = echo_controller_enabled;
+    level_estimator_enabled_ = level_estimator_enabled;
+    voice_activity_detector_enabled_ = voice_activity_detector_enabled;
+    transient_suppressor_enabled_ = transient_suppressor_enabled;
+  }
+
+  changed |= first_update_;
+  first_update_ = false;
+  return changed;
+}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandSubModulesActive()
+    const {
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  return CaptureMultiBandProcessingActive() ||
+         intelligibility_enhancer_enabled_ || voice_activity_detector_enabled_;
+#else
+  return CaptureMultiBandProcessingActive() || voice_activity_detector_enabled_;
+#endif
+}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandProcessingActive()
+    const {
+  return low_cut_filter_enabled_ || echo_canceller_enabled_ ||
+         mobile_echo_controller_enabled_ || noise_suppressor_enabled_ ||
+         beamformer_enabled_ || adaptive_gain_controller_enabled_ ||
+         echo_controller_enabled_;
+}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::CaptureFullBandProcessingActive()
+    const {
+  return gain_controller2_enabled_ || capture_post_processor_enabled_;
+}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandSubModulesActive()
+    const {
+  return RenderMultiBandProcessingActive() || echo_canceller_enabled_ ||
+         mobile_echo_controller_enabled_ || adaptive_gain_controller_enabled_ ||
+         echo_controller_enabled_;
+}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::RenderFullBandProcessingActive()
+    const {
+  return render_pre_processor_enabled_;
+}
+
+bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandProcessingActive()
+    const {
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  return intelligibility_enhancer_enabled_;
+#else
+  return false;
+#endif
+}
+
+struct AudioProcessingImpl::ApmPublicSubmodules {
+  ApmPublicSubmodules() {}
+  // Accessed externally of APM without any lock acquired.
+  std::unique_ptr<EchoCancellationImpl> echo_cancellation;
+  std::unique_ptr<EchoControlMobileImpl> echo_control_mobile;
+  std::unique_ptr<GainControlImpl> gain_control;
+  std::unique_ptr<LevelEstimatorImpl> level_estimator;
+  std::unique_ptr<NoiseSuppressionImpl> noise_suppression;
+  std::unique_ptr<VoiceDetectionImpl> voice_detection;
+  std::unique_ptr<GainControlForExperimentalAgc>
+      gain_control_for_experimental_agc;
+
+  // Accessed internally from both render and capture.
+  std::unique_ptr<TransientSuppressor> transient_suppressor;
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  std::unique_ptr<IntelligibilityEnhancer> intelligibility_enhancer;
+#endif
+};
+
+struct AudioProcessingImpl::ApmPrivateSubmodules {
+  ApmPrivateSubmodules(NonlinearBeamformer* beamformer,
+                       std::unique_ptr<CustomProcessing> capture_post_processor,
+                       std::unique_ptr<CustomProcessing> render_pre_processor,
+                       std::unique_ptr<EchoDetector> echo_detector)
+      : beamformer(beamformer),
+        echo_detector(std::move(echo_detector)),
+        capture_post_processor(std::move(capture_post_processor)),
+        render_pre_processor(std::move(render_pre_processor)) {}
+  // Accessed internally from capture or during initialization
+  std::unique_ptr<NonlinearBeamformer> beamformer;
+  std::unique_ptr<AgcManagerDirect> agc_manager;
+  std::unique_ptr<GainController2> gain_controller2;
+  std::unique_ptr<LowCutFilter> low_cut_filter;
+  std::unique_ptr<EchoDetector> echo_detector;
+  std::unique_ptr<EchoControl> echo_controller;
+  std::unique_ptr<CustomProcessing> capture_post_processor;
+  std::unique_ptr<CustomProcessing> render_pre_processor;
+};
+
+AudioProcessingBuilder::AudioProcessingBuilder() = default;
+AudioProcessingBuilder::~AudioProcessingBuilder() = default;
+
+AudioProcessingBuilder& AudioProcessingBuilder::SetCapturePostProcessing(
+    std::unique_ptr<CustomProcessing> capture_post_processing) {
+  capture_post_processing_ = std::move(capture_post_processing);
+  return *this;
+}
+
+AudioProcessingBuilder& AudioProcessingBuilder::SetRenderPreProcessing(
+    std::unique_ptr<CustomProcessing> render_pre_processing) {
+  render_pre_processing_ = std::move(render_pre_processing);
+  return *this;
+}
+
+AudioProcessingBuilder& AudioProcessingBuilder::SetEchoControlFactory(
+    std::unique_ptr<EchoControlFactory> echo_control_factory) {
+  echo_control_factory_ = std::move(echo_control_factory);
+  return *this;
+}
+
+AudioProcessingBuilder& AudioProcessingBuilder::SetNonlinearBeamformer(
+    std::unique_ptr<NonlinearBeamformer> nonlinear_beamformer) {
+  nonlinear_beamformer_ = std::move(nonlinear_beamformer);
+  return *this;
+}
+
+AudioProcessingBuilder& AudioProcessingBuilder::SetEchoDetector(
+    std::unique_ptr<EchoDetector> echo_detector) {
+  echo_detector_ = std::move(echo_detector);
+  return *this;
+}
+
+AudioProcessing* AudioProcessingBuilder::Create() {
+  webrtc::Config config;
+  return Create(config);
+}
+
+AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
+  AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
+      config, std::move(capture_post_processing_),
+      std::move(render_pre_processing_), std::move(echo_control_factory_),
+      std::move(echo_detector_), nonlinear_beamformer_.release());
+  if (apm->Initialize() != AudioProcessing::kNoError) {
+    delete apm;
+    apm = nullptr;
+  }
+  return apm;
+}
+
+AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config)
+    : AudioProcessingImpl(config, nullptr, nullptr, nullptr, nullptr, nullptr) {
+}
+
+int AudioProcessingImpl::instance_count_ = 0;
+
+AudioProcessingImpl::AudioProcessingImpl(
+    const webrtc::Config& config,
+    std::unique_ptr<CustomProcessing> capture_post_processor,
+    std::unique_ptr<CustomProcessing> render_pre_processor,
+    std::unique_ptr<EchoControlFactory> echo_control_factory,
+    std::unique_ptr<EchoDetector> echo_detector,
+    NonlinearBeamformer* beamformer)
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      high_pass_filter_impl_(new HighPassFilterImpl(this)),
+      echo_control_factory_(std::move(echo_control_factory)),
+      submodule_states_(!!capture_post_processor, !!render_pre_processor),
+      public_submodules_(new ApmPublicSubmodules()),
+      private_submodules_(
+          new ApmPrivateSubmodules(beamformer,
+                                   std::move(capture_post_processor),
+                                   std::move(render_pre_processor),
+                                   std::move(echo_detector))),
+      constants_(config.Get<ExperimentalAgc>().startup_min_volume,
+                 config.Get<ExperimentalAgc>().clipped_level_min,
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+                 false),
+#else
+                 config.Get<ExperimentalAgc>().enabled),
+#endif
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+      capture_(false,
+#else
+      capture_(config.Get<ExperimentalNs>().enabled,
+#endif
+               config.Get<Beamforming>().array_geometry,
+               config.Get<Beamforming>().target_direction),
+      capture_nonlocked_(config.Get<Beamforming>().enabled,
+                         config.Get<Intelligibility>().enabled) {
+  {
+    rtc::CritScope cs_render(&crit_render_);
+    rtc::CritScope cs_capture(&crit_capture_);
+
+    // Mark Echo Controller enabled if a factory is injected.
+    capture_nonlocked_.echo_controller_enabled =
+        static_cast<bool>(echo_control_factory_);
+
+    public_submodules_->echo_cancellation.reset(
+        new EchoCancellationImpl(&crit_render_, &crit_capture_));
+    public_submodules_->echo_control_mobile.reset(
+        new EchoControlMobileImpl(&crit_render_, &crit_capture_));
+    public_submodules_->gain_control.reset(
+        new GainControlImpl(&crit_capture_, &crit_capture_));
+    public_submodules_->level_estimator.reset(
+        new LevelEstimatorImpl(&crit_capture_));
+    public_submodules_->noise_suppression.reset(
+        new NoiseSuppressionImpl(&crit_capture_));
+    public_submodules_->voice_detection.reset(
+        new VoiceDetectionImpl(&crit_capture_));
+    public_submodules_->gain_control_for_experimental_agc.reset(
+        new GainControlForExperimentalAgc(
+            public_submodules_->gain_control.get(), &crit_capture_));
+
+    // If no echo detector is injected, use the ResidualEchoDetector.
+    if (!private_submodules_->echo_detector) {
+      private_submodules_->echo_detector.reset(new ResidualEchoDetector());
+    }
+
+    // TODO(alessiob): Move the injected gain controller once injection is
+    // implemented.
+    private_submodules_->gain_controller2.reset(new GainController2());
+
+    RTC_LOG(LS_INFO) << "Capture post processor activated: "
+                     << !!private_submodules_->capture_post_processor
+                     << "\nRender pre processor activated: "
+                     << !!private_submodules_->render_pre_processor;
+  }
+
+  SetExtraOptions(config);
+}
+
+AudioProcessingImpl::~AudioProcessingImpl() {
+  // Depends on gain_control_ and
+  // public_submodules_->gain_control_for_experimental_agc.
+  private_submodules_->agc_manager.reset();
+  // Depends on gain_control_.
+  public_submodules_->gain_control_for_experimental_agc.reset();
+}
+
+int AudioProcessingImpl::Initialize() {
+  // Run in a single-threaded manner during initialization.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+  return InitializeLocked();
+}
+
+int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz,
+                                    int capture_output_sample_rate_hz,
+                                    int render_input_sample_rate_hz,
+                                    ChannelLayout capture_input_layout,
+                                    ChannelLayout capture_output_layout,
+                                    ChannelLayout render_input_layout) {
+  const ProcessingConfig processing_config = {
+      {{capture_input_sample_rate_hz, ChannelsFromLayout(capture_input_layout),
+        LayoutHasKeyboard(capture_input_layout)},
+       {capture_output_sample_rate_hz,
+        ChannelsFromLayout(capture_output_layout),
+        LayoutHasKeyboard(capture_output_layout)},
+       {render_input_sample_rate_hz, ChannelsFromLayout(render_input_layout),
+        LayoutHasKeyboard(render_input_layout)},
+       {render_input_sample_rate_hz, ChannelsFromLayout(render_input_layout),
+        LayoutHasKeyboard(render_input_layout)}}};
+
+  return Initialize(processing_config);
+}
+
+int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
+  // Run in a single-threaded manner during initialization.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+  return InitializeLocked(processing_config);
+}
+
+int AudioProcessingImpl::MaybeInitializeRender(
+    const ProcessingConfig& processing_config) {
+  return MaybeInitialize(processing_config, false);
+}
+
+int AudioProcessingImpl::MaybeInitializeCapture(
+    const ProcessingConfig& processing_config,
+    bool force_initialization) {
+  return MaybeInitialize(processing_config, force_initialization);
+}
+
+// Calls InitializeLocked() if any of the audio parameters have changed from
+// their current values (needs to be called while holding the crit_render_lock).
+int AudioProcessingImpl::MaybeInitialize(
+    const ProcessingConfig& processing_config,
+    bool force_initialization) {
+  // Called from both threads. Thread check is therefore not possible.
+  if (processing_config == formats_.api_format && !force_initialization) {
+    return kNoError;
+  }
+
+  rtc::CritScope cs_capture(&crit_capture_);
+  return InitializeLocked(processing_config);
+}
+
+int AudioProcessingImpl::InitializeLocked() {
+  UpdateActiveSubmoduleStates();
+
+  const int capture_audiobuffer_num_channels =
+      capture_nonlocked_.beamformer_enabled
+          ? formats_.api_format.input_stream().num_channels()
+          : formats_.api_format.output_stream().num_channels();
+
+  const int render_audiobuffer_num_output_frames =
+      formats_.api_format.reverse_output_stream().num_frames() == 0
+          ? formats_.render_processing_format.num_frames()
+          : formats_.api_format.reverse_output_stream().num_frames();
+  if (formats_.api_format.reverse_input_stream().num_channels() > 0) {
+    render_.render_audio.reset(new AudioBuffer(
+        formats_.api_format.reverse_input_stream().num_frames(),
+        formats_.api_format.reverse_input_stream().num_channels(),
+        formats_.render_processing_format.num_frames(),
+        formats_.render_processing_format.num_channels(),
+        render_audiobuffer_num_output_frames));
+    if (formats_.api_format.reverse_input_stream() !=
+        formats_.api_format.reverse_output_stream()) {
+      render_.render_converter = AudioConverter::Create(
+          formats_.api_format.reverse_input_stream().num_channels(),
+          formats_.api_format.reverse_input_stream().num_frames(),
+          formats_.api_format.reverse_output_stream().num_channels(),
+          formats_.api_format.reverse_output_stream().num_frames());
+    } else {
+      render_.render_converter.reset(nullptr);
+    }
+  } else {
+    render_.render_audio.reset(nullptr);
+    render_.render_converter.reset(nullptr);
+  }
+
+  capture_.capture_audio.reset(
+      new AudioBuffer(formats_.api_format.input_stream().num_frames(),
+                      formats_.api_format.input_stream().num_channels(),
+                      capture_nonlocked_.capture_processing_format.num_frames(),
+                      capture_audiobuffer_num_channels,
+                      formats_.api_format.output_stream().num_frames()));
+
+  public_submodules_->echo_cancellation->Initialize(
+      proc_sample_rate_hz(), num_reverse_channels(), num_output_channels(),
+      num_proc_channels());
+  AllocateRenderQueue();
+
+  int success = public_submodules_->echo_cancellation->enable_metrics(true);
+  RTC_DCHECK_EQ(0, success);
+  success = public_submodules_->echo_cancellation->enable_delay_logging(true);
+  RTC_DCHECK_EQ(0, success);
+  public_submodules_->echo_control_mobile->Initialize(
+      proc_split_sample_rate_hz(), num_reverse_channels(),
+      num_output_channels());
+
+  public_submodules_->gain_control->Initialize(num_proc_channels(),
+                                               proc_sample_rate_hz());
+  if (constants_.use_experimental_agc) {
+    if (!private_submodules_->agc_manager.get()) {
+      private_submodules_->agc_manager.reset(new AgcManagerDirect(
+          public_submodules_->gain_control.get(),
+          public_submodules_->gain_control_for_experimental_agc.get(),
+          constants_.agc_startup_min_volume, constants_.agc_clipped_level_min));
+    }
+    private_submodules_->agc_manager->Initialize();
+    private_submodules_->agc_manager->SetCaptureMuted(
+        capture_.output_will_be_muted);
+    public_submodules_->gain_control_for_experimental_agc->Initialize();
+  }
+  InitializeTransient();
+  InitializeBeamformer();
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  InitializeIntelligibility();
+#endif
+  InitializeLowCutFilter();
+  public_submodules_->noise_suppression->Initialize(num_proc_channels(),
+                                                    proc_sample_rate_hz());
+  public_submodules_->voice_detection->Initialize(proc_split_sample_rate_hz());
+  public_submodules_->level_estimator->Initialize();
+  InitializeResidualEchoDetector();
+  InitializeEchoController();
+  InitializeGainController2();
+  InitializePostProcessor();
+  InitializePreProcessor();
+
+  if (aec_dump_) {
+    aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format));
+  }
+  return kNoError;
+}
+
+int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
+  UpdateActiveSubmoduleStates();
+
+  for (const auto& stream : config.streams) {
+    if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
+      return kBadSampleRateError;
+    }
+  }
+
+  const size_t num_in_channels = config.input_stream().num_channels();
+  const size_t num_out_channels = config.output_stream().num_channels();
+
+  // Need at least one input channel.
+  // Need either one output channel or as many outputs as there are inputs.
+  if (num_in_channels == 0 ||
+      !(num_out_channels == 1 || num_out_channels == num_in_channels)) {
+    return kBadNumberChannelsError;
+  }
+
+  if (capture_nonlocked_.beamformer_enabled &&
+      num_in_channels != capture_.array_geometry.size()) {
+    return kBadNumberChannelsError;
+  }
+
+  formats_.api_format = config;
+
+  int capture_processing_rate = FindNativeProcessRateToUse(
+      std::min(formats_.api_format.input_stream().sample_rate_hz(),
+               formats_.api_format.output_stream().sample_rate_hz()),
+      submodule_states_.CaptureMultiBandSubModulesActive() ||
+          submodule_states_.RenderMultiBandSubModulesActive());
+
+  capture_nonlocked_.capture_processing_format =
+      StreamConfig(capture_processing_rate);
+
+  int render_processing_rate;
+  if (!capture_nonlocked_.echo_controller_enabled) {
+    render_processing_rate = FindNativeProcessRateToUse(
+        std::min(formats_.api_format.reverse_input_stream().sample_rate_hz(),
+                 formats_.api_format.reverse_output_stream().sample_rate_hz()),
+        submodule_states_.CaptureMultiBandSubModulesActive() ||
+            submodule_states_.RenderMultiBandSubModulesActive());
+  } else {
+    render_processing_rate = capture_processing_rate;
+  }
+
+  // TODO(aluebs): Remove this restriction once we figure out why the 3-band
+  // splitting filter degrades the AEC performance.
+  if (render_processing_rate > kSampleRate32kHz &&
+      !capture_nonlocked_.echo_controller_enabled) {
+    render_processing_rate = submodule_states_.RenderMultiBandProcessingActive()
+                                 ? kSampleRate32kHz
+                                 : kSampleRate16kHz;
+  }
+
+  // If the forward sample rate is 8 kHz, the render stream is also processed
+  // at this rate.
+  if (capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
+      kSampleRate8kHz) {
+    render_processing_rate = kSampleRate8kHz;
+  } else {
+    render_processing_rate =
+        std::max(render_processing_rate, static_cast<int>(kSampleRate16kHz));
+  }
+
+  // Always downmix the render stream to mono for analysis. This has been
+  // demonstrated to work well for AEC in most practical scenarios.
+  if (submodule_states_.RenderMultiBandSubModulesActive()) {
+    formats_.render_processing_format = StreamConfig(render_processing_rate, 1);
+  } else {
+    formats_.render_processing_format = StreamConfig(
+        formats_.api_format.reverse_input_stream().sample_rate_hz(),
+        formats_.api_format.reverse_input_stream().num_channels());
+  }
+
+  if (capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
+          kSampleRate32kHz ||
+      capture_nonlocked_.capture_processing_format.sample_rate_hz() ==
+          kSampleRate48kHz) {
+    capture_nonlocked_.split_rate = kSampleRate16kHz;
+  } else {
+    capture_nonlocked_.split_rate =
+        capture_nonlocked_.capture_processing_format.sample_rate_hz();
+  }
+
+  return InitializeLocked();
+}
+
+void AudioProcessingImpl::ApplyConfig(const AudioProcessing::Config& config) {
+  config_ = config;
+
+  // Run in a single-threaded manner when applying the settings.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+
+  InitializeLowCutFilter();
+
+  RTC_LOG(LS_INFO) << "Highpass filter activated: "
+                   << config_.high_pass_filter.enabled;
+
+  const bool config_ok = GainController2::Validate(config_.gain_controller2);
+  if (!config_ok) {
+    RTC_LOG(LS_ERROR) << "AudioProcessing module config error\n"
+                         "Gain Controller 2: "
+                      << GainController2::ToString(config_.gain_controller2)
+                      << "\nReverting to default parameter set";
+    config_.gain_controller2 = AudioProcessing::Config::GainController2();
+  }
+  InitializeGainController2();
+  private_submodules_->gain_controller2->ApplyConfig(config_.gain_controller2);
+  RTC_LOG(LS_INFO) << "Gain Controller 2 activated: "
+                   << config_.gain_controller2.enabled;
+}
+
+void AudioProcessingImpl::SetExtraOptions(const webrtc::Config& config) {
+  // Run in a single-threaded manner when setting the extra options.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+
+  public_submodules_->echo_cancellation->SetExtraOptions(config);
+
+  if (capture_.transient_suppressor_enabled !=
+      config.Get<ExperimentalNs>().enabled) {
+    capture_.transient_suppressor_enabled =
+        config.Get<ExperimentalNs>().enabled;
+    InitializeTransient();
+  }
+
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  if (capture_nonlocked_.intelligibility_enabled !=
+     config.Get<Intelligibility>().enabled) {
+    capture_nonlocked_.intelligibility_enabled =
+        config.Get<Intelligibility>().enabled;
+    InitializeIntelligibility();
+  }
+#endif
+
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+  if (capture_nonlocked_.beamformer_enabled !=
+          config.Get<Beamforming>().enabled) {
+    capture_nonlocked_.beamformer_enabled = config.Get<Beamforming>().enabled;
+    if (config.Get<Beamforming>().array_geometry.size() > 1) {
+      capture_.array_geometry = config.Get<Beamforming>().array_geometry;
+    }
+    capture_.target_direction = config.Get<Beamforming>().target_direction;
+    InitializeBeamformer();
+  }
+#endif  // WEBRTC_ANDROID_PLATFORM_BUILD
+}
+
+int AudioProcessingImpl::proc_sample_rate_hz() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_nonlocked_.capture_processing_format.sample_rate_hz();
+}
+
+int AudioProcessingImpl::proc_split_sample_rate_hz() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_nonlocked_.split_rate;
+}
+
+size_t AudioProcessingImpl::num_reverse_channels() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return formats_.render_processing_format.num_channels();
+}
+
+size_t AudioProcessingImpl::num_input_channels() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return formats_.api_format.input_stream().num_channels();
+}
+
+size_t AudioProcessingImpl::num_proc_channels() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return (capture_nonlocked_.beamformer_enabled ||
+          capture_nonlocked_.echo_controller_enabled)
+             ? 1
+             : num_output_channels();
+}
+
+size_t AudioProcessingImpl::num_output_channels() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return formats_.api_format.output_stream().num_channels();
+}
+
+void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
+  rtc::CritScope cs(&crit_capture_);
+  capture_.output_will_be_muted = muted;
+  if (private_submodules_->agc_manager.get()) {
+    private_submodules_->agc_manager->SetCaptureMuted(
+        capture_.output_will_be_muted);
+  }
+}
+
+
+int AudioProcessingImpl::ProcessStream(const float* const* src,
+                                       size_t samples_per_channel,
+                                       int input_sample_rate_hz,
+                                       ChannelLayout input_layout,
+                                       int output_sample_rate_hz,
+                                       ChannelLayout output_layout,
+                                       float* const* dest) {
+  TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_ChannelLayout");
+  StreamConfig input_stream;
+  StreamConfig output_stream;
+  {
+    // Access the formats_.api_format.input_stream beneath the capture lock.
+    // The lock must be released as it is later required in the call
+    // to ProcessStream(,,,);
+    rtc::CritScope cs(&crit_capture_);
+    input_stream = formats_.api_format.input_stream();
+    output_stream = formats_.api_format.output_stream();
+  }
+
+  input_stream.set_sample_rate_hz(input_sample_rate_hz);
+  input_stream.set_num_channels(ChannelsFromLayout(input_layout));
+  input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
+  output_stream.set_sample_rate_hz(output_sample_rate_hz);
+  output_stream.set_num_channels(ChannelsFromLayout(output_layout));
+  output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
+
+  if (samples_per_channel != input_stream.num_frames()) {
+    return kBadDataLengthError;
+  }
+  return ProcessStream(src, input_stream, output_stream, dest);
+}
+
+int AudioProcessingImpl::ProcessStream(const float* const* src,
+                                       const StreamConfig& input_config,
+                                       const StreamConfig& output_config,
+                                       float* const* dest) {
+  TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig");
+  ProcessingConfig processing_config;
+  bool reinitialization_required = false;
+  {
+    // Acquire the capture lock in order to safely call the function
+    // that retrieves the render side data. This function accesses apm
+    // getters that need the capture lock held when being called.
+    rtc::CritScope cs_capture(&crit_capture_);
+    EmptyQueuedRenderAudio();
+
+    if (!src || !dest) {
+      return kNullPointerError;
+    }
+
+    processing_config = formats_.api_format;
+    reinitialization_required = UpdateActiveSubmoduleStates();
+  }
+
+  processing_config.input_stream() = input_config;
+  processing_config.output_stream() = output_config;
+
+  {
+    // Do conditional reinitialization.
+    rtc::CritScope cs_render(&crit_render_);
+    RETURN_ON_ERR(
+        MaybeInitializeCapture(processing_config, reinitialization_required));
+  }
+  rtc::CritScope cs_capture(&crit_capture_);
+  RTC_DCHECK_EQ(processing_config.input_stream().num_frames(),
+                formats_.api_format.input_stream().num_frames());
+
+  if (aec_dump_) {
+    RecordUnprocessedCaptureStream(src);
+  }
+
+  capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
+  RETURN_ON_ERR(ProcessCaptureStreamLocked());
+  capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
+
+  if (aec_dump_) {
+    RecordProcessedCaptureStream(dest);
+  }
+  return kNoError;
+}
+
+void AudioProcessingImpl::QueueBandedRenderAudio(AudioBuffer* audio) {
+  EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(),
+                                              num_reverse_channels(),
+                                              &aec_render_queue_buffer_);
+
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+
+  // Insert the samples into the queue.
+  if (!aec_render_signal_queue_->Insert(&aec_render_queue_buffer_)) {
+    // The data queue is full and needs to be emptied.
+    EmptyQueuedRenderAudio();
+
+    // Retry the insert (should always work).
+    bool result = aec_render_signal_queue_->Insert(&aec_render_queue_buffer_);
+    RTC_DCHECK(result);
+  }
+
+  EchoControlMobileImpl::PackRenderAudioBuffer(audio, num_output_channels(),
+                                               num_reverse_channels(),
+                                               &aecm_render_queue_buffer_);
+
+  // Insert the samples into the queue.
+  if (!aecm_render_signal_queue_->Insert(&aecm_render_queue_buffer_)) {
+    // The data queue is full and needs to be emptied.
+    EmptyQueuedRenderAudio();
+
+    // Retry the insert (should always work).
+    bool result = aecm_render_signal_queue_->Insert(&aecm_render_queue_buffer_);
+    RTC_DCHECK(result);
+  }
+
+  if (!constants_.use_experimental_agc) {
+    GainControlImpl::PackRenderAudioBuffer(audio, &agc_render_queue_buffer_);
+    // Insert the samples into the queue.
+    if (!agc_render_signal_queue_->Insert(&agc_render_queue_buffer_)) {
+      // The data queue is full and needs to be emptied.
+      EmptyQueuedRenderAudio();
+
+      // Retry the insert (should always work).
+      bool result = agc_render_signal_queue_->Insert(&agc_render_queue_buffer_);
+      RTC_DCHECK(result);
+    }
+  }
+}
+
+void AudioProcessingImpl::QueueNonbandedRenderAudio(AudioBuffer* audio) {
+  ResidualEchoDetector::PackRenderAudioBuffer(audio, &red_render_queue_buffer_);
+
+  // Insert the samples into the queue.
+  if (!red_render_signal_queue_->Insert(&red_render_queue_buffer_)) {
+    // The data queue is full and needs to be emptied.
+    EmptyQueuedRenderAudio();
+
+    // Retry the insert (should always work).
+    bool result = red_render_signal_queue_->Insert(&red_render_queue_buffer_);
+    RTC_DCHECK(result);
+  }
+}
+
+void AudioProcessingImpl::AllocateRenderQueue() {
+  const size_t new_aec_render_queue_element_max_size =
+      std::max(static_cast<size_t>(1),
+               kMaxAllowedValuesOfSamplesPerBand *
+                   EchoCancellationImpl::NumCancellersRequired(
+                       num_output_channels(), num_reverse_channels()));
+
+  const size_t new_aecm_render_queue_element_max_size =
+      std::max(static_cast<size_t>(1),
+               kMaxAllowedValuesOfSamplesPerBand *
+                   EchoControlMobileImpl::NumCancellersRequired(
+                       num_output_channels(), num_reverse_channels()));
+
+  const size_t new_agc_render_queue_element_max_size =
+      std::max(static_cast<size_t>(1), kMaxAllowedValuesOfSamplesPerBand);
+
+  const size_t new_red_render_queue_element_max_size =
+      std::max(static_cast<size_t>(1), kMaxAllowedValuesOfSamplesPerFrame);
+
+  // Reallocate the queues if the queue item sizes are too small to fit the
+  // data to put in the queues.
+  if (aec_render_queue_element_max_size_ <
+      new_aec_render_queue_element_max_size) {
+    aec_render_queue_element_max_size_ = new_aec_render_queue_element_max_size;
+
+    std::vector<float> template_queue_element(
+        aec_render_queue_element_max_size_);
+
+    aec_render_signal_queue_.reset(
+        new SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>(
+            kMaxNumFramesToBuffer, template_queue_element,
+            RenderQueueItemVerifier<float>(
+                aec_render_queue_element_max_size_)));
+
+    aec_render_queue_buffer_.resize(aec_render_queue_element_max_size_);
+    aec_capture_queue_buffer_.resize(aec_render_queue_element_max_size_);
+  } else {
+    aec_render_signal_queue_->Clear();
+  }
+
+  if (aecm_render_queue_element_max_size_ <
+      new_aecm_render_queue_element_max_size) {
+    aecm_render_queue_element_max_size_ =
+        new_aecm_render_queue_element_max_size;
+
+    std::vector<int16_t> template_queue_element(
+        aecm_render_queue_element_max_size_);
+
+    aecm_render_signal_queue_.reset(
+        new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
+            kMaxNumFramesToBuffer, template_queue_element,
+            RenderQueueItemVerifier<int16_t>(
+                aecm_render_queue_element_max_size_)));
+
+    aecm_render_queue_buffer_.resize(aecm_render_queue_element_max_size_);
+    aecm_capture_queue_buffer_.resize(aecm_render_queue_element_max_size_);
+  } else {
+    aecm_render_signal_queue_->Clear();
+  }
+
+  if (agc_render_queue_element_max_size_ <
+      new_agc_render_queue_element_max_size) {
+    agc_render_queue_element_max_size_ = new_agc_render_queue_element_max_size;
+
+    std::vector<int16_t> template_queue_element(
+        agc_render_queue_element_max_size_);
+
+    agc_render_signal_queue_.reset(
+        new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
+            kMaxNumFramesToBuffer, template_queue_element,
+            RenderQueueItemVerifier<int16_t>(
+                agc_render_queue_element_max_size_)));
+
+    agc_render_queue_buffer_.resize(agc_render_queue_element_max_size_);
+    agc_capture_queue_buffer_.resize(agc_render_queue_element_max_size_);
+  } else {
+    agc_render_signal_queue_->Clear();
+  }
+
+  if (red_render_queue_element_max_size_ <
+      new_red_render_queue_element_max_size) {
+    red_render_queue_element_max_size_ = new_red_render_queue_element_max_size;
+
+    std::vector<float> template_queue_element(
+        red_render_queue_element_max_size_);
+
+    red_render_signal_queue_.reset(
+        new SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>(
+            kMaxNumFramesToBuffer, template_queue_element,
+            RenderQueueItemVerifier<float>(
+                red_render_queue_element_max_size_)));
+
+    red_render_queue_buffer_.resize(red_render_queue_element_max_size_);
+    red_capture_queue_buffer_.resize(red_render_queue_element_max_size_);
+  } else {
+    red_render_signal_queue_->Clear();
+  }
+}
+
+void AudioProcessingImpl::EmptyQueuedRenderAudio() {
+  rtc::CritScope cs_capture(&crit_capture_);
+  while (aec_render_signal_queue_->Remove(&aec_capture_queue_buffer_)) {
+    public_submodules_->echo_cancellation->ProcessRenderAudio(
+        aec_capture_queue_buffer_);
+  }
+
+  while (aecm_render_signal_queue_->Remove(&aecm_capture_queue_buffer_)) {
+    public_submodules_->echo_control_mobile->ProcessRenderAudio(
+        aecm_capture_queue_buffer_);
+  }
+
+  while (agc_render_signal_queue_->Remove(&agc_capture_queue_buffer_)) {
+    public_submodules_->gain_control->ProcessRenderAudio(
+        agc_capture_queue_buffer_);
+  }
+
+  while (red_render_signal_queue_->Remove(&red_capture_queue_buffer_)) {
+    RTC_DCHECK(private_submodules_->echo_detector);
+    private_submodules_->echo_detector->AnalyzeRenderAudio(
+        red_capture_queue_buffer_);
+  }
+}
+
+int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
+  TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_AudioFrame");
+  {
+    // Acquire the capture lock in order to safely call the function
+    // that retrieves the render side data. This function accesses apm
+    // getters that need the capture lock held when being called.
+    // The lock needs to be released as
+    // public_submodules_->echo_control_mobile->is_enabled() aquires this lock
+    // as well.
+    rtc::CritScope cs_capture(&crit_capture_);
+    EmptyQueuedRenderAudio();
+  }
+
+  if (!frame) {
+    return kNullPointerError;
+  }
+  // Must be a native rate.
+  if (frame->sample_rate_hz_ != kSampleRate8kHz &&
+      frame->sample_rate_hz_ != kSampleRate16kHz &&
+      frame->sample_rate_hz_ != kSampleRate32kHz &&
+      frame->sample_rate_hz_ != kSampleRate48kHz) {
+    return kBadSampleRateError;
+  }
+
+  ProcessingConfig processing_config;
+  bool reinitialization_required = false;
+  {
+    // Aquire lock for the access of api_format.
+    // The lock is released immediately due to the conditional
+    // reinitialization.
+    rtc::CritScope cs_capture(&crit_capture_);
+    // TODO(ajm): The input and output rates and channels are currently
+    // constrained to be identical in the int16 interface.
+    processing_config = formats_.api_format;
+
+    reinitialization_required = UpdateActiveSubmoduleStates();
+  }
+  processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
+  processing_config.input_stream().set_num_channels(frame->num_channels_);
+  processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
+  processing_config.output_stream().set_num_channels(frame->num_channels_);
+
+  {
+    // Do conditional reinitialization.
+    rtc::CritScope cs_render(&crit_render_);
+    RETURN_ON_ERR(
+        MaybeInitializeCapture(processing_config, reinitialization_required));
+  }
+  rtc::CritScope cs_capture(&crit_capture_);
+  if (frame->samples_per_channel_ !=
+      formats_.api_format.input_stream().num_frames()) {
+    return kBadDataLengthError;
+  }
+
+  if (aec_dump_) {
+    RecordUnprocessedCaptureStream(*frame);
+  }
+
+  capture_.capture_audio->DeinterleaveFrom(frame);
+  RETURN_ON_ERR(ProcessCaptureStreamLocked());
+  capture_.capture_audio->InterleaveTo(
+      frame, submodule_states_.CaptureMultiBandProcessingActive() ||
+                 submodule_states_.CaptureFullBandProcessingActive());
+
+  if (aec_dump_) {
+    RecordProcessedCaptureStream(*frame);
+  }
+
+  return kNoError;
+}
+
+int AudioProcessingImpl::ProcessCaptureStreamLocked() {
+  // Ensure that not both the AEC and AECM are active at the same time.
+  // TODO(peah): Simplify once the public API Enable functions for these
+  // are moved to APM.
+  RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() &&
+               public_submodules_->echo_control_mobile->is_enabled()));
+
+  MaybeUpdateHistograms();
+
+  AudioBuffer* capture_buffer = capture_.capture_audio.get();  // For brevity.
+
+  capture_input_rms_.Analyze(rtc::ArrayView<const int16_t>(
+      capture_buffer->channels_const()[0],
+      capture_nonlocked_.capture_processing_format.num_frames()));
+  const bool log_rms = ++capture_rms_interval_counter_ >= 1000;
+  if (log_rms) {
+    capture_rms_interval_counter_ = 0;
+    RmsLevel::Levels levels = capture_input_rms_.AverageAndPeak();
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureInputLevelAverageRms",
+                                levels.average, 1, RmsLevel::kMinLevelDb, 64);
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureInputLevelPeakRms",
+                                levels.peak, 1, RmsLevel::kMinLevelDb, 64);
+  }
+
+  if (private_submodules_->echo_controller) {
+    // TODO(peah): Reactivate analogue AGC gain detection once the analogue AGC
+    // issues have been addressed.
+    capture_.echo_path_gain_change = false;
+    private_submodules_->echo_controller->AnalyzeCapture(capture_buffer);
+  }
+
+  if (constants_.use_experimental_agc &&
+      public_submodules_->gain_control->is_enabled()) {
+    private_submodules_->agc_manager->AnalyzePreProcess(
+        capture_buffer->channels()[0], capture_buffer->num_channels(),
+        capture_nonlocked_.capture_processing_format.num_frames());
+  }
+
+  if (submodule_states_.CaptureMultiBandSubModulesActive() &&
+      SampleRateSupportsMultiBand(
+          capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
+    capture_buffer->SplitIntoFrequencyBands();
+  }
+
+  if (private_submodules_->echo_controller) {
+    // Force down-mixing of the number of channels after the detection of
+    // capture signal saturation.
+    // TODO(peah): Look into ensuring that this kind of tampering with the
+    // AudioBuffer functionality should not be needed.
+    capture_buffer->set_num_channels(1);
+  }
+
+  if (capture_nonlocked_.beamformer_enabled) {
+    private_submodules_->beamformer->AnalyzeChunk(
+        *capture_buffer->split_data_f());
+    // Discards all channels by the leftmost one.
+    capture_buffer->set_num_channels(1);
+  }
+
+  // TODO(peah): Move the AEC3 low-cut filter to this place.
+  if (private_submodules_->low_cut_filter &&
+      !private_submodules_->echo_controller) {
+    private_submodules_->low_cut_filter->Process(capture_buffer);
+  }
+  RETURN_ON_ERR(
+      public_submodules_->gain_control->AnalyzeCaptureAudio(capture_buffer));
+  public_submodules_->noise_suppression->AnalyzeCaptureAudio(capture_buffer);
+
+  // Ensure that the stream delay was set before the call to the
+  // AEC ProcessCaptureAudio function.
+  if (public_submodules_->echo_cancellation->is_enabled() &&
+      !was_stream_delay_set()) {
+    return AudioProcessing::kStreamParameterNotSetError;
+  }
+
+  if (private_submodules_->echo_controller) {
+    data_dumper_->DumpRaw("stream_delay", stream_delay_ms());
+
+    private_submodules_->echo_controller->ProcessCapture(
+        capture_buffer, capture_.echo_path_gain_change);
+  } else {
+    RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(
+        capture_buffer, stream_delay_ms()));
+  }
+
+  if (public_submodules_->echo_control_mobile->is_enabled() &&
+      public_submodules_->noise_suppression->is_enabled()) {
+    capture_buffer->CopyLowPassToReference();
+  }
+  public_submodules_->noise_suppression->ProcessCaptureAudio(capture_buffer);
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  if (capture_nonlocked_.intelligibility_enabled) {
+    RTC_DCHECK(public_submodules_->noise_suppression->is_enabled());
+    const int gain_db =
+        public_submodules_->gain_control->is_enabled()
+            ? public_submodules_->gain_control->compression_gain_db()
+            : 0;
+    const float gain = DbToRatio(gain_db);
+    public_submodules_->intelligibility_enhancer->SetCaptureNoiseEstimate(
+        public_submodules_->noise_suppression->NoiseEstimate(), gain);
+  }
+#endif
+
+  // Ensure that the stream delay was set before the call to the
+  // AECM ProcessCaptureAudio function.
+  if (public_submodules_->echo_control_mobile->is_enabled() &&
+      !was_stream_delay_set()) {
+    return AudioProcessing::kStreamParameterNotSetError;
+  }
+
+  if (!(private_submodules_->echo_controller ||
+        public_submodules_->echo_cancellation->is_enabled())) {
+    RETURN_ON_ERR(public_submodules_->echo_control_mobile->ProcessCaptureAudio(
+        capture_buffer, stream_delay_ms()));
+  }
+
+  if (capture_nonlocked_.beamformer_enabled) {
+    private_submodules_->beamformer->PostFilter(capture_buffer->split_data_f());
+  }
+
+  public_submodules_->voice_detection->ProcessCaptureAudio(capture_buffer);
+
+  if (constants_.use_experimental_agc &&
+      public_submodules_->gain_control->is_enabled() &&
+      (!capture_nonlocked_.beamformer_enabled ||
+       private_submodules_->beamformer->is_target_present())) {
+    private_submodules_->agc_manager->Process(
+        capture_buffer->split_bands_const(0)[kBand0To8kHz],
+        capture_buffer->num_frames_per_band(), capture_nonlocked_.split_rate);
+  }
+  RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(
+      capture_buffer, echo_cancellation()->stream_has_echo()));
+
+  if (submodule_states_.CaptureMultiBandProcessingActive() &&
+      SampleRateSupportsMultiBand(
+          capture_nonlocked_.capture_processing_format.sample_rate_hz())) {
+    capture_buffer->MergeFrequencyBands();
+  }
+
+  if (config_.residual_echo_detector.enabled) {
+    RTC_DCHECK(private_submodules_->echo_detector);
+    private_submodules_->echo_detector->AnalyzeCaptureAudio(
+        rtc::ArrayView<const float>(capture_buffer->channels_f()[0],
+                                    capture_buffer->num_frames()));
+  }
+
+  // TODO(aluebs): Investigate if the transient suppression placement should be
+  // before or after the AGC.
+  if (capture_.transient_suppressor_enabled) {
+    float voice_probability =
+        private_submodules_->agc_manager.get()
+            ? private_submodules_->agc_manager->voice_probability()
+            : 1.f;
+
+    public_submodules_->transient_suppressor->Suppress(
+        capture_buffer->channels_f()[0], capture_buffer->num_frames(),
+        capture_buffer->num_channels(),
+        capture_buffer->split_bands_const_f(0)[kBand0To8kHz],
+        capture_buffer->num_frames_per_band(), capture_buffer->keyboard_data(),
+        capture_buffer->num_keyboard_frames(), voice_probability,
+        capture_.key_pressed);
+  }
+
+  if (config_.gain_controller2.enabled) {
+    private_submodules_->gain_controller2->Process(capture_buffer);
+  }
+
+  if (private_submodules_->capture_post_processor) {
+    private_submodules_->capture_post_processor->Process(capture_buffer);
+  }
+
+  // The level estimator operates on the recombined data.
+  public_submodules_->level_estimator->ProcessStream(capture_buffer);
+
+  capture_output_rms_.Analyze(rtc::ArrayView<const int16_t>(
+      capture_buffer->channels_const()[0],
+      capture_nonlocked_.capture_processing_format.num_frames()));
+  if (log_rms) {
+    RmsLevel::Levels levels = capture_output_rms_.AverageAndPeak();
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelAverageRms",
+                                levels.average, 1, RmsLevel::kMinLevelDb, 64);
+    RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.ApmCaptureOutputLevelPeakRms",
+                                levels.peak, 1, RmsLevel::kMinLevelDb, 64);
+  }
+
+  capture_.was_stream_delay_set = false;
+  return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
+                                              size_t samples_per_channel,
+                                              int sample_rate_hz,
+                                              ChannelLayout layout) {
+  TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_ChannelLayout");
+  rtc::CritScope cs(&crit_render_);
+  const StreamConfig reverse_config = {
+      sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout),
+  };
+  if (samples_per_channel != reverse_config.num_frames()) {
+    return kBadDataLengthError;
+  }
+  return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config);
+}
+
+int AudioProcessingImpl::ProcessReverseStream(const float* const* src,
+                                              const StreamConfig& input_config,
+                                              const StreamConfig& output_config,
+                                              float* const* dest) {
+  TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig");
+  rtc::CritScope cs(&crit_render_);
+  RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, input_config, output_config));
+  if (submodule_states_.RenderMultiBandProcessingActive() ||
+      submodule_states_.RenderFullBandProcessingActive()) {
+    render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(),
+                                 dest);
+  } else if (formats_.api_format.reverse_input_stream() !=
+             formats_.api_format.reverse_output_stream()) {
+    render_.render_converter->Convert(src, input_config.num_samples(), dest,
+                                      output_config.num_samples());
+  } else {
+    CopyAudioIfNeeded(src, input_config.num_frames(),
+                      input_config.num_channels(), dest);
+  }
+
+  return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStreamLocked(
+    const float* const* src,
+    const StreamConfig& input_config,
+    const StreamConfig& output_config) {
+  if (src == nullptr) {
+    return kNullPointerError;
+  }
+
+  if (input_config.num_channels() == 0) {
+    return kBadNumberChannelsError;
+  }
+
+  ProcessingConfig processing_config = formats_.api_format;
+  processing_config.reverse_input_stream() = input_config;
+  processing_config.reverse_output_stream() = output_config;
+
+  RETURN_ON_ERR(MaybeInitializeRender(processing_config));
+  assert(input_config.num_frames() ==
+         formats_.api_format.reverse_input_stream().num_frames());
+
+  if (aec_dump_) {
+    const size_t channel_size =
+        formats_.api_format.reverse_input_stream().num_frames();
+    const size_t num_channels =
+        formats_.api_format.reverse_input_stream().num_channels();
+    aec_dump_->WriteRenderStreamMessage(
+        AudioFrameView<const float>(src, num_channels, channel_size));
+  }
+  render_.render_audio->CopyFrom(src,
+                                 formats_.api_format.reverse_input_stream());
+  return ProcessRenderStreamLocked();
+}
+
+int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
+  TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
+  rtc::CritScope cs(&crit_render_);
+  if (frame == nullptr) {
+    return kNullPointerError;
+  }
+  // Must be a native rate.
+  if (frame->sample_rate_hz_ != kSampleRate8kHz &&
+      frame->sample_rate_hz_ != kSampleRate16kHz &&
+      frame->sample_rate_hz_ != kSampleRate32kHz &&
+      frame->sample_rate_hz_ != kSampleRate48kHz) {
+    return kBadSampleRateError;
+  }
+
+  if (frame->num_channels_ <= 0) {
+    return kBadNumberChannelsError;
+  }
+
+  ProcessingConfig processing_config = formats_.api_format;
+  processing_config.reverse_input_stream().set_sample_rate_hz(
+      frame->sample_rate_hz_);
+  processing_config.reverse_input_stream().set_num_channels(
+      frame->num_channels_);
+  processing_config.reverse_output_stream().set_sample_rate_hz(
+      frame->sample_rate_hz_);
+  processing_config.reverse_output_stream().set_num_channels(
+      frame->num_channels_);
+
+  RETURN_ON_ERR(MaybeInitializeRender(processing_config));
+  if (frame->samples_per_channel_ !=
+      formats_.api_format.reverse_input_stream().num_frames()) {
+    return kBadDataLengthError;
+  }
+
+  if (aec_dump_) {
+    aec_dump_->WriteRenderStreamMessage(*frame);
+  }
+
+  render_.render_audio->DeinterleaveFrom(frame);
+  RETURN_ON_ERR(ProcessRenderStreamLocked());
+  render_.render_audio->InterleaveTo(
+      frame, submodule_states_.RenderMultiBandProcessingActive() ||
+                 submodule_states_.RenderFullBandProcessingActive());
+  return kNoError;
+}
+
+int AudioProcessingImpl::ProcessRenderStreamLocked() {
+  AudioBuffer* render_buffer = render_.render_audio.get();  // For brevity.
+
+  QueueNonbandedRenderAudio(render_buffer);
+
+  if (private_submodules_->render_pre_processor) {
+    private_submodules_->render_pre_processor->Process(render_buffer);
+  }
+
+  if (submodule_states_.RenderMultiBandSubModulesActive() &&
+      SampleRateSupportsMultiBand(
+          formats_.render_processing_format.sample_rate_hz())) {
+    render_buffer->SplitIntoFrequencyBands();
+  }
+
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  if (capture_nonlocked_.intelligibility_enabled) {
+    public_submodules_->intelligibility_enhancer->ProcessRenderAudio(
+        render_buffer);
+  }
+#endif
+
+  if (submodule_states_.RenderMultiBandSubModulesActive()) {
+    QueueBandedRenderAudio(render_buffer);
+  }
+
+  // TODO(peah): Perform the queueing ínside QueueRenderAudiuo().
+  if (private_submodules_->echo_controller) {
+    private_submodules_->echo_controller->AnalyzeRender(render_buffer);
+  }
+
+  if (submodule_states_.RenderMultiBandProcessingActive() &&
+      SampleRateSupportsMultiBand(
+          formats_.render_processing_format.sample_rate_hz())) {
+    render_buffer->MergeFrequencyBands();
+  }
+
+  return kNoError;
+}
+
+int AudioProcessingImpl::set_stream_delay_ms(int delay) {
+  rtc::CritScope cs(&crit_capture_);
+  Error retval = kNoError;
+  capture_.was_stream_delay_set = true;
+  delay += capture_.delay_offset_ms;
+
+  if (delay < 0) {
+    delay = 0;
+    retval = kBadStreamParameterWarning;
+  }
+
+  // TODO(ajm): the max is rather arbitrarily chosen; investigate.
+  if (delay > 500) {
+    delay = 500;
+    retval = kBadStreamParameterWarning;
+  }
+
+  capture_nonlocked_.stream_delay_ms = delay;
+  return retval;
+}
+
+int AudioProcessingImpl::stream_delay_ms() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_nonlocked_.stream_delay_ms;
+}
+
+bool AudioProcessingImpl::was_stream_delay_set() const {
+  // Used as callback from submodules, hence locking is not allowed.
+  return capture_.was_stream_delay_set;
+}
+
+void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
+  rtc::CritScope cs(&crit_capture_);
+  capture_.key_pressed = key_pressed;
+}
+
+void AudioProcessingImpl::set_delay_offset_ms(int offset) {
+  rtc::CritScope cs(&crit_capture_);
+  capture_.delay_offset_ms = offset;
+}
+
+int AudioProcessingImpl::delay_offset_ms() const {
+  rtc::CritScope cs(&crit_capture_);
+  return capture_.delay_offset_ms;
+}
+
+void AudioProcessingImpl::AttachAecDump(std::unique_ptr<AecDump> aec_dump) {
+  RTC_DCHECK(aec_dump);
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+
+  // The previously attached AecDump will be destroyed with the
+  // 'aec_dump' parameter, which is after locks are released.
+  aec_dump_.swap(aec_dump);
+  WriteAecDumpConfigMessage(true);
+  aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format));
+}
+
+void AudioProcessingImpl::DetachAecDump() {
+  // The d-tor of a task-queue based AecDump blocks until all pending
+  // tasks are done. This construction avoids blocking while holding
+  // the render and capture locks.
+  std::unique_ptr<AecDump> aec_dump = nullptr;
+  {
+    rtc::CritScope cs_render(&crit_render_);
+    rtc::CritScope cs_capture(&crit_capture_);
+    aec_dump = std::move(aec_dump_);
+  }
+}
+
+void AudioProcessingImpl::AttachPlayoutAudioGenerator(
+    std::unique_ptr<AudioGenerator> audio_generator) {
+  // TODO(bugs.webrtc.org/8882) Stub.
+  // Reset internal audio generator with audio_generator.
+}
+
+void AudioProcessingImpl::DetachPlayoutAudioGenerator() {
+  // TODO(bugs.webrtc.org/8882) Stub.
+  // Delete audio generator, if one is attached.
+}
+
+AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() {
+  residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f);
+  echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f);
+  echo_return_loss_enhancement.Set(-100.0f, -100.0f, -100.0f, -100.0f);
+  a_nlp.Set(-100.0f, -100.0f, -100.0f, -100.0f);
+}
+
+AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics(
+    const AudioProcessingStatistics& other) = default;
+
+AudioProcessing::AudioProcessingStatistics::~AudioProcessingStatistics() =
+    default;
+
+// TODO(ivoc): Remove this when GetStatistics() becomes pure virtual.
+AudioProcessing::AudioProcessingStatistics AudioProcessing::GetStatistics()
+    const {
+  return AudioProcessingStatistics();
+}
+
+// TODO(ivoc): Remove this when GetStatistics() becomes pure virtual.
+AudioProcessingStats AudioProcessing::GetStatistics(
+    bool has_remote_tracks) const {
+  return AudioProcessingStats();
+}
+
+AudioProcessing::AudioProcessingStatistics AudioProcessingImpl::GetStatistics()
+    const {
+  AudioProcessingStatistics stats;
+  EchoCancellation::Metrics metrics;
+  if (private_submodules_->echo_controller) {
+    rtc::CritScope cs_capture(&crit_capture_);
+    auto ec_metrics = private_submodules_->echo_controller->GetMetrics();
+    float erl = static_cast<float>(ec_metrics.echo_return_loss);
+    float erle = static_cast<float>(ec_metrics.echo_return_loss_enhancement);
+    // Instant value will also be used for min, max and average.
+    stats.echo_return_loss.Set(erl, erl, erl, erl);
+    stats.echo_return_loss_enhancement.Set(erle, erle, erle, erle);
+  } else if (public_submodules_->echo_cancellation->GetMetrics(&metrics) ==
+             Error::kNoError) {
+    stats.a_nlp.Set(metrics.a_nlp);
+    stats.divergent_filter_fraction = metrics.divergent_filter_fraction;
+    stats.echo_return_loss.Set(metrics.echo_return_loss);
+    stats.echo_return_loss_enhancement.Set(
+        metrics.echo_return_loss_enhancement);
+    stats.residual_echo_return_loss.Set(metrics.residual_echo_return_loss);
+  }
+  {
+    rtc::CritScope cs_capture(&crit_capture_);
+    RTC_DCHECK(private_submodules_->echo_detector);
+    auto ed_metrics = private_submodules_->echo_detector->GetMetrics();
+    stats.residual_echo_likelihood = ed_metrics.echo_likelihood;
+    stats.residual_echo_likelihood_recent_max =
+        ed_metrics.echo_likelihood_recent_max;
+  }
+  public_submodules_->echo_cancellation->GetDelayMetrics(
+      &stats.delay_median, &stats.delay_standard_deviation,
+      &stats.fraction_poor_delays);
+  return stats;
+}
+
+AudioProcessingStats AudioProcessingImpl::GetStatistics(
+    bool has_remote_tracks) const {
+  AudioProcessingStats stats;
+  if (has_remote_tracks) {
+    EchoCancellation::Metrics metrics;
+    if (private_submodules_->echo_controller) {
+      rtc::CritScope cs_capture(&crit_capture_);
+      auto ec_metrics = private_submodules_->echo_controller->GetMetrics();
+      stats.echo_return_loss = ec_metrics.echo_return_loss;
+      stats.echo_return_loss_enhancement =
+          ec_metrics.echo_return_loss_enhancement;
+      stats.delay_ms = ec_metrics.delay_ms;
+    } else if (public_submodules_->echo_cancellation->GetMetrics(&metrics) ==
+               Error::kNoError) {
+      if (metrics.divergent_filter_fraction != -1.0f) {
+        stats.divergent_filter_fraction =
+            rtc::Optional<double>(metrics.divergent_filter_fraction);
+      }
+      if (metrics.echo_return_loss.instant != -100) {
+        stats.echo_return_loss =
+            rtc::Optional<double>(metrics.echo_return_loss.instant);
+      }
+      if (metrics.echo_return_loss_enhancement.instant != -100) {
+        stats.echo_return_loss_enhancement =
+            rtc::Optional<double>(metrics.echo_return_loss_enhancement.instant);
+      }
+    }
+    if (config_.residual_echo_detector.enabled) {
+      rtc::CritScope cs_capture(&crit_capture_);
+      RTC_DCHECK(private_submodules_->echo_detector);
+      auto ed_metrics = private_submodules_->echo_detector->GetMetrics();
+      stats.residual_echo_likelihood = ed_metrics.echo_likelihood;
+      stats.residual_echo_likelihood_recent_max =
+          ed_metrics.echo_likelihood_recent_max;
+    }
+    int delay_median, delay_std;
+    float fraction_poor_delays;
+    if (public_submodules_->echo_cancellation->GetDelayMetrics(
+            &delay_median, &delay_std, &fraction_poor_delays) ==
+        Error::kNoError) {
+      if (delay_median >= 0) {
+        stats.delay_median_ms = rtc::Optional<int32_t>(delay_median);
+      }
+      if (delay_std >= 0) {
+        stats.delay_standard_deviation_ms = rtc::Optional<int32_t>(delay_std);
+      }
+    }
+  }
+  return stats;
+}
+
+EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
+  return public_submodules_->echo_cancellation.get();
+}
+
+EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
+  return public_submodules_->echo_control_mobile.get();
+}
+
+GainControl* AudioProcessingImpl::gain_control() const {
+  if (constants_.use_experimental_agc) {
+    return public_submodules_->gain_control_for_experimental_agc.get();
+  }
+  return public_submodules_->gain_control.get();
+}
+
+HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
+  return high_pass_filter_impl_.get();
+}
+
+LevelEstimator* AudioProcessingImpl::level_estimator() const {
+  return public_submodules_->level_estimator.get();
+}
+
+NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
+  return public_submodules_->noise_suppression.get();
+}
+
+VoiceDetection* AudioProcessingImpl::voice_detection() const {
+  return public_submodules_->voice_detection.get();
+}
+
+void AudioProcessingImpl::MutateConfig(
+    rtc::FunctionView<void(AudioProcessing::Config*)> mutator) {
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+  mutator(&config_);
+  ApplyConfig(config_);
+}
+
+AudioProcessing::Config AudioProcessingImpl::GetConfig() const {
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+  return config_;
+}
+
+bool AudioProcessingImpl::UpdateActiveSubmoduleStates() {
+  return submodule_states_.Update(
+      config_.high_pass_filter.enabled,
+      public_submodules_->echo_cancellation->is_enabled(),
+      public_submodules_->echo_control_mobile->is_enabled(),
+      config_.residual_echo_detector.enabled,
+      public_submodules_->noise_suppression->is_enabled(),
+      capture_nonlocked_.intelligibility_enabled,
+      capture_nonlocked_.beamformer_enabled,
+      public_submodules_->gain_control->is_enabled(),
+      config_.gain_controller2.enabled,
+      capture_nonlocked_.echo_controller_enabled,
+      public_submodules_->voice_detection->is_enabled(),
+      public_submodules_->level_estimator->is_enabled(),
+      capture_.transient_suppressor_enabled);
+}
+
+
+void AudioProcessingImpl::InitializeTransient() {
+  if (capture_.transient_suppressor_enabled) {
+    if (!public_submodules_->transient_suppressor.get()) {
+      public_submodules_->transient_suppressor.reset(new TransientSuppressor());
+    }
+    public_submodules_->transient_suppressor->Initialize(
+        capture_nonlocked_.capture_processing_format.sample_rate_hz(),
+        capture_nonlocked_.split_rate, num_proc_channels());
+  }
+}
+
+void AudioProcessingImpl::InitializeBeamformer() {
+  if (capture_nonlocked_.beamformer_enabled) {
+    if (!private_submodules_->beamformer) {
+      private_submodules_->beamformer.reset(new NonlinearBeamformer(
+          capture_.array_geometry, 1u, capture_.target_direction));
+    }
+    private_submodules_->beamformer->Initialize(kChunkSizeMs,
+                                                capture_nonlocked_.split_rate);
+  }
+}
+
+void AudioProcessingImpl::InitializeIntelligibility() {
+#if WEBRTC_INTELLIGIBILITY_ENHANCER
+  if (capture_nonlocked_.intelligibility_enabled) {
+    public_submodules_->intelligibility_enhancer.reset(
+        new IntelligibilityEnhancer(capture_nonlocked_.split_rate,
+                                    render_.render_audio->num_channels(),
+                                    render_.render_audio->num_bands(),
+                                    NoiseSuppressionImpl::num_noise_bins()));
+  }
+#endif
+}
+
+void AudioProcessingImpl::InitializeLowCutFilter() {
+  if (config_.high_pass_filter.enabled) {
+    private_submodules_->low_cut_filter.reset(
+        new LowCutFilter(num_proc_channels(), proc_sample_rate_hz()));
+  } else {
+    private_submodules_->low_cut_filter.reset();
+  }
+}
+
+void AudioProcessingImpl::InitializeEchoController() {
+  if (echo_control_factory_) {
+    private_submodules_->echo_controller =
+        echo_control_factory_->Create(proc_sample_rate_hz());
+  } else {
+    private_submodules_->echo_controller.reset();
+  }
+}
+
+void AudioProcessingImpl::InitializeGainController2() {
+  if (config_.gain_controller2.enabled) {
+    private_submodules_->gain_controller2->Initialize(proc_sample_rate_hz());
+  }
+}
+
+void AudioProcessingImpl::InitializeResidualEchoDetector() {
+  RTC_DCHECK(private_submodules_->echo_detector);
+  private_submodules_->echo_detector->Initialize(
+      proc_sample_rate_hz(), num_proc_channels(),
+      formats_.render_processing_format.sample_rate_hz(),
+      formats_.render_processing_format.num_channels());
+}
+
+void AudioProcessingImpl::InitializePostProcessor() {
+  if (private_submodules_->capture_post_processor) {
+    private_submodules_->capture_post_processor->Initialize(
+        proc_sample_rate_hz(), num_proc_channels());
+  }
+}
+
+void AudioProcessingImpl::InitializePreProcessor() {
+  if (private_submodules_->render_pre_processor) {
+    private_submodules_->render_pre_processor->Initialize(
+        formats_.render_processing_format.sample_rate_hz(),
+        formats_.render_processing_format.num_channels());
+  }
+}
+
+void AudioProcessingImpl::MaybeUpdateHistograms() {
+  static const int kMinDiffDelayMs = 60;
+
+  if (echo_cancellation()->is_enabled()) {
+    // Activate delay_jumps_ counters if we know echo_cancellation is running.
+    // If a stream has echo we know that the echo_cancellation is in process.
+    if (capture_.stream_delay_jumps == -1 &&
+        echo_cancellation()->stream_has_echo()) {
+      capture_.stream_delay_jumps = 0;
+    }
+    if (capture_.aec_system_delay_jumps == -1 &&
+        echo_cancellation()->stream_has_echo()) {
+      capture_.aec_system_delay_jumps = 0;
+    }
+
+    // Detect a jump in platform reported system delay and log the difference.
+    const int diff_stream_delay_ms =
+        capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms;
+    if (diff_stream_delay_ms > kMinDiffDelayMs &&
+        capture_.last_stream_delay_ms != 0) {
+      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump",
+                           diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100);
+      if (capture_.stream_delay_jumps == -1) {
+        capture_.stream_delay_jumps = 0;  // Activate counter if needed.
+      }
+      capture_.stream_delay_jumps++;
+    }
+    capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms;
+
+    // Detect a jump in AEC system delay and log the difference.
+    const int samples_per_ms =
+        rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000);
+    RTC_DCHECK_LT(0, samples_per_ms);
+    const int aec_system_delay_ms =
+        public_submodules_->echo_cancellation->GetSystemDelayInSamples() /
+        samples_per_ms;
+    const int diff_aec_system_delay_ms =
+        aec_system_delay_ms - capture_.last_aec_system_delay_ms;
+    if (diff_aec_system_delay_ms > kMinDiffDelayMs &&
+        capture_.last_aec_system_delay_ms != 0) {
+      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump",
+                           diff_aec_system_delay_ms, kMinDiffDelayMs, 1000,
+                           100);
+      if (capture_.aec_system_delay_jumps == -1) {
+        capture_.aec_system_delay_jumps = 0;  // Activate counter if needed.
+      }
+      capture_.aec_system_delay_jumps++;
+    }
+    capture_.last_aec_system_delay_ms = aec_system_delay_ms;
+  }
+}
+
+void AudioProcessingImpl::UpdateHistogramsOnCallEnd() {
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(&crit_render_);
+  rtc::CritScope cs_capture(&crit_capture_);
+
+  if (capture_.stream_delay_jumps > -1) {
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps",
+        capture_.stream_delay_jumps, 51);
+  }
+  capture_.stream_delay_jumps = -1;
+  capture_.last_stream_delay_ms = 0;
+
+  if (capture_.aec_system_delay_jumps > -1) {
+    RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
+                              capture_.aec_system_delay_jumps, 51);
+  }
+  capture_.aec_system_delay_jumps = -1;
+  capture_.last_aec_system_delay_ms = 0;
+}
+
+void AudioProcessingImpl::WriteAecDumpConfigMessage(bool forced) {
+  if (!aec_dump_) {
+    return;
+  }
+  std::string experiments_description =
+      public_submodules_->echo_cancellation->GetExperimentsDescription();
+  // TODO(peah): Add semicolon-separated concatenations of experiment
+  // descriptions for other submodules.
+  if (constants_.agc_clipped_level_min != kClippedLevelMin) {
+    experiments_description += "AgcClippingLevelExperiment;";
+  }
+  if (capture_nonlocked_.echo_controller_enabled) {
+    experiments_description += "EchoController;";
+  }
+  if (config_.gain_controller2.enabled) {
+    experiments_description += "GainController2;";
+  }
+
+  InternalAPMConfig apm_config;
+
+  apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled();
+  apm_config.aec_delay_agnostic_enabled =
+      public_submodules_->echo_cancellation->is_delay_agnostic_enabled();
+  apm_config.aec_drift_compensation_enabled =
+      public_submodules_->echo_cancellation->is_drift_compensation_enabled();
+  apm_config.aec_extended_filter_enabled =
+      public_submodules_->echo_cancellation->is_extended_filter_enabled();
+  apm_config.aec_suppression_level = static_cast<int>(
+      public_submodules_->echo_cancellation->suppression_level());
+
+  apm_config.aecm_enabled =
+      public_submodules_->echo_control_mobile->is_enabled();
+  apm_config.aecm_comfort_noise_enabled =
+      public_submodules_->echo_control_mobile->is_comfort_noise_enabled();
+  apm_config.aecm_routing_mode =
+      static_cast<int>(public_submodules_->echo_control_mobile->routing_mode());
+
+  apm_config.agc_enabled = public_submodules_->gain_control->is_enabled();
+  apm_config.agc_mode =
+      static_cast<int>(public_submodules_->gain_control->mode());
+  apm_config.agc_limiter_enabled =
+      public_submodules_->gain_control->is_limiter_enabled();
+  apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc;
+
+  apm_config.hpf_enabled = config_.high_pass_filter.enabled;
+
+  apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled();
+  apm_config.ns_level =
+      static_cast<int>(public_submodules_->noise_suppression->level());
+
+  apm_config.transient_suppression_enabled =
+      capture_.transient_suppressor_enabled;
+  apm_config.intelligibility_enhancer_enabled =
+      capture_nonlocked_.intelligibility_enabled;
+  apm_config.experiments_description = experiments_description;
+
+  if (!forced && apm_config == apm_config_for_aec_dump_) {
+    return;
+  }
+  aec_dump_->WriteConfig(apm_config);
+  apm_config_for_aec_dump_ = apm_config;
+}
+
+void AudioProcessingImpl::RecordUnprocessedCaptureStream(
+    const float* const* src) {
+  RTC_DCHECK(aec_dump_);
+  WriteAecDumpConfigMessage(false);
+
+  const size_t channel_size = formats_.api_format.input_stream().num_frames();
+  const size_t num_channels = formats_.api_format.input_stream().num_channels();
+  aec_dump_->AddCaptureStreamInput(
+      AudioFrameView<const float>(src, num_channels, channel_size));
+  RecordAudioProcessingState();
+}
+
+void AudioProcessingImpl::RecordUnprocessedCaptureStream(
+    const AudioFrame& capture_frame) {
+  RTC_DCHECK(aec_dump_);
+  WriteAecDumpConfigMessage(false);
+
+  aec_dump_->AddCaptureStreamInput(capture_frame);
+  RecordAudioProcessingState();
+}
+
+void AudioProcessingImpl::RecordProcessedCaptureStream(
+    const float* const* processed_capture_stream) {
+  RTC_DCHECK(aec_dump_);
+
+  const size_t channel_size = formats_.api_format.output_stream().num_frames();
+  const size_t num_channels =
+      formats_.api_format.output_stream().num_channels();
+  aec_dump_->AddCaptureStreamOutput(AudioFrameView<const float>(
+      processed_capture_stream, num_channels, channel_size));
+  aec_dump_->WriteCaptureStreamMessage();
+}
+
+void AudioProcessingImpl::RecordProcessedCaptureStream(
+    const AudioFrame& processed_capture_frame) {
+  RTC_DCHECK(aec_dump_);
+
+  aec_dump_->AddCaptureStreamOutput(processed_capture_frame);
+  aec_dump_->WriteCaptureStreamMessage();
+}
+
+void AudioProcessingImpl::RecordAudioProcessingState() {
+  RTC_DCHECK(aec_dump_);
+  AecDump::AudioProcessingState audio_proc_state;
+  audio_proc_state.delay = capture_nonlocked_.stream_delay_ms;
+  audio_proc_state.drift =
+      public_submodules_->echo_cancellation->stream_drift_samples();
+  audio_proc_state.level = gain_control()->stream_analog_level();
+  audio_proc_state.keypress = capture_.key_pressed;
+  aec_dump_->AddAudioProcessingState(audio_proc_state);
+}
+
+AudioProcessingImpl::ApmCaptureState::ApmCaptureState(
+    bool transient_suppressor_enabled,
+    const std::vector<Point>& array_geometry,
+    SphericalPointf target_direction)
+    : aec_system_delay_jumps(-1),
+      delay_offset_ms(0),
+      was_stream_delay_set(false),
+      last_stream_delay_ms(0),
+      last_aec_system_delay_ms(0),
+      stream_delay_jumps(-1),
+      output_will_be_muted(false),
+      key_pressed(false),
+      transient_suppressor_enabled(transient_suppressor_enabled),
+      array_geometry(array_geometry),
+      target_direction(target_direction),
+      capture_processing_format(kSampleRate16kHz),
+      split_rate(kSampleRate16kHz),
+      echo_path_gain_change(false) {}
+
+AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default;
+
+AudioProcessingImpl::ApmRenderState::ApmRenderState() = default;
+
+AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_impl.h b/modules/audio_processing/audio_processing_impl.h
new file mode 100644
index 0000000..55c47ac
--- /dev/null
+++ b/modules/audio_processing/audio_processing_impl.h
@@ -0,0 +1,435 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
+
+#include <list>
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/render_queue_item_verifier.h"
+#include "modules/audio_processing/rms_level.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/function_view.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/protobuf_utils.h"
+#include "rtc_base/swap_queue.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/file_wrapper.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioConverter;
+class NonlinearBeamformer;
+
+class AudioProcessingImpl : public AudioProcessing {
+ public:
+  // Methods forcing APM to run in a single-threaded manner.
+  // Acquires both the render and capture locks.
+  explicit AudioProcessingImpl(const webrtc::Config& config);
+  // AudioProcessingImpl takes ownership of capture post processor and
+  // beamformer.
+  AudioProcessingImpl(const webrtc::Config& config,
+                      std::unique_ptr<CustomProcessing> capture_post_processor,
+                      std::unique_ptr<CustomProcessing> render_pre_processor,
+                      std::unique_ptr<EchoControlFactory> echo_control_factory,
+                      std::unique_ptr<EchoDetector> echo_detector,
+                      NonlinearBeamformer* beamformer);
+  ~AudioProcessingImpl() override;
+  int Initialize() override;
+  int Initialize(int capture_input_sample_rate_hz,
+                 int capture_output_sample_rate_hz,
+                 int render_sample_rate_hz,
+                 ChannelLayout capture_input_layout,
+                 ChannelLayout capture_output_layout,
+                 ChannelLayout render_input_layout) override;
+  int Initialize(const ProcessingConfig& processing_config) override;
+  void ApplyConfig(const AudioProcessing::Config& config) override;
+  void SetExtraOptions(const webrtc::Config& config) override;
+  void UpdateHistogramsOnCallEnd() override;
+  void AttachAecDump(std::unique_ptr<AecDump> aec_dump) override;
+  void DetachAecDump() override;
+  void AttachPlayoutAudioGenerator(
+      std::unique_ptr<AudioGenerator> audio_generator) override;
+  void DetachPlayoutAudioGenerator() override;
+
+  // Capture-side exclusive methods possibly running APM in a
+  // multi-threaded manner. Acquire the capture lock.
+  int ProcessStream(AudioFrame* frame) override;
+  int ProcessStream(const float* const* src,
+                    size_t samples_per_channel,
+                    int input_sample_rate_hz,
+                    ChannelLayout input_layout,
+                    int output_sample_rate_hz,
+                    ChannelLayout output_layout,
+                    float* const* dest) override;
+  int ProcessStream(const float* const* src,
+                    const StreamConfig& input_config,
+                    const StreamConfig& output_config,
+                    float* const* dest) override;
+  void set_output_will_be_muted(bool muted) override;
+  int set_stream_delay_ms(int delay) override;
+  void set_delay_offset_ms(int offset) override;
+  int delay_offset_ms() const override;
+  void set_stream_key_pressed(bool key_pressed) override;
+
+  // Render-side exclusive methods possibly running APM in a
+  // multi-threaded manner. Acquire the render lock.
+  int ProcessReverseStream(AudioFrame* frame) override;
+  int AnalyzeReverseStream(const float* const* data,
+                           size_t samples_per_channel,
+                           int sample_rate_hz,
+                           ChannelLayout layout) override;
+  int ProcessReverseStream(const float* const* src,
+                           const StreamConfig& input_config,
+                           const StreamConfig& output_config,
+                           float* const* dest) override;
+
+  // Methods only accessed from APM submodules or
+  // from AudioProcessing tests in a single-threaded manner.
+  // Hence there is no need for locks in these.
+  int proc_sample_rate_hz() const override;
+  int proc_split_sample_rate_hz() const override;
+  size_t num_input_channels() const override;
+  size_t num_proc_channels() const override;
+  size_t num_output_channels() const override;
+  size_t num_reverse_channels() const override;
+  int stream_delay_ms() const override;
+  bool was_stream_delay_set() const override
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  AudioProcessingStatistics GetStatistics() const override;
+  AudioProcessingStats GetStatistics(bool has_remote_tracks) const override;
+
+  // Methods returning pointers to APM submodules.
+  // No locks are aquired in those, as those locks
+  // would offer no protection (the submodules are
+  // created only once in a single-treaded manner
+  // during APM creation).
+  EchoCancellation* echo_cancellation() const override;
+  EchoControlMobile* echo_control_mobile() const override;
+  GainControl* gain_control() const override;
+  // TODO(peah): Deprecate this API call.
+  HighPassFilter* high_pass_filter() const override;
+  LevelEstimator* level_estimator() const override;
+  NoiseSuppression* noise_suppression() const override;
+  VoiceDetection* voice_detection() const override;
+
+  // TODO(peah): Remove MutateConfig once the new API allows that.
+  void MutateConfig(rtc::FunctionView<void(AudioProcessing::Config*)> mutator);
+  AudioProcessing::Config GetConfig() const override;
+
+ protected:
+  // Overridden in a mock.
+  virtual int InitializeLocked()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+
+ private:
+  // TODO(peah): These friend classes should be removed as soon as the new
+  // parameter setting scheme allows.
+  FRIEND_TEST_ALL_PREFIXES(ApmConfiguration, DefaultBehavior);
+  FRIEND_TEST_ALL_PREFIXES(ApmConfiguration, ValidConfigBehavior);
+  FRIEND_TEST_ALL_PREFIXES(ApmConfiguration, InValidConfigBehavior);
+  struct ApmPublicSubmodules;
+  struct ApmPrivateSubmodules;
+
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  static int instance_count_;
+
+  // Submodule interface implementations.
+  std::unique_ptr<HighPassFilter> high_pass_filter_impl_;
+
+  // EchoControl factory.
+  std::unique_ptr<EchoControlFactory> echo_control_factory_;
+
+  class ApmSubmoduleStates {
+   public:
+    ApmSubmoduleStates(bool capture_post_processor_enabled,
+                       bool render_pre_processor_enabled);
+    // Updates the submodule state and returns true if it has changed.
+    bool Update(bool low_cut_filter_enabled,
+                bool echo_canceller_enabled,
+                bool mobile_echo_controller_enabled,
+                bool residual_echo_detector_enabled,
+                bool noise_suppressor_enabled,
+                bool intelligibility_enhancer_enabled,
+                bool beamformer_enabled,
+                bool adaptive_gain_controller_enabled,
+                bool gain_controller2_enabled,
+                bool echo_controller_enabled,
+                bool voice_activity_detector_enabled,
+                bool level_estimator_enabled,
+                bool transient_suppressor_enabled);
+    bool CaptureMultiBandSubModulesActive() const;
+    bool CaptureMultiBandProcessingActive() const;
+    bool CaptureFullBandProcessingActive() const;
+    bool RenderMultiBandSubModulesActive() const;
+    bool RenderFullBandProcessingActive() const;
+    bool RenderMultiBandProcessingActive() const;
+
+   private:
+    const bool capture_post_processor_enabled_ = false;
+    const bool render_pre_processor_enabled_ = false;
+    bool low_cut_filter_enabled_ = false;
+    bool echo_canceller_enabled_ = false;
+    bool mobile_echo_controller_enabled_ = false;
+    bool residual_echo_detector_enabled_ = false;
+    bool noise_suppressor_enabled_ = false;
+    bool intelligibility_enhancer_enabled_ = false;
+    bool beamformer_enabled_ = false;
+    bool adaptive_gain_controller_enabled_ = false;
+    bool gain_controller2_enabled_ = false;
+    bool echo_controller_enabled_ = false;
+    bool level_estimator_enabled_ = false;
+    bool voice_activity_detector_enabled_ = false;
+    bool transient_suppressor_enabled_ = false;
+    bool first_update_ = true;
+  };
+
+  // Method for modifying the formats struct that are called from both
+  // the render and capture threads. The check for whether modifications
+  // are needed is done while holding the render lock only, thereby avoiding
+  // that the capture thread blocks the render thread.
+  // The struct is modified in a single-threaded manner by holding both the
+  // render and capture locks.
+  int MaybeInitialize(const ProcessingConfig& config, bool force_initialization)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  int MaybeInitializeRender(const ProcessingConfig& processing_config)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  int MaybeInitializeCapture(const ProcessingConfig& processing_config,
+                             bool force_initialization)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  // Method for updating the state keeping track of the active submodules.
+  // Returns a bool indicating whether the state has changed.
+  bool UpdateActiveSubmoduleStates()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Methods requiring APM running in a single-threaded manner.
+  // Are called with both the render and capture locks already
+  // acquired.
+  void InitializeTransient()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeBeamformer()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeIntelligibility()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  int InitializeLocked(const ProcessingConfig& config)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeResidualEchoDetector()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void InitializeLowCutFilter() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  void InitializeEchoController() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  void InitializeGainController2() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  void InitializePostProcessor() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  void InitializePreProcessor() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  void EmptyQueuedRenderAudio();
+  void AllocateRenderQueue()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+  void QueueBandedRenderAudio(AudioBuffer* audio)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+  void QueueNonbandedRenderAudio(AudioBuffer* audio)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  // Capture-side exclusive methods possibly running APM in a multi-threaded
+  // manner that are called with the render lock already acquired.
+  int ProcessCaptureStreamLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+  void MaybeUpdateHistograms() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Render-side exclusive methods possibly running APM in a multi-threaded
+  // manner that are called with the render lock already acquired.
+  // TODO(ekm): Remove once all clients updated to new interface.
+  int AnalyzeReverseStreamLocked(const float* const* src,
+                                 const StreamConfig& input_config,
+                                 const StreamConfig& output_config)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+  int ProcessRenderStreamLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+  // Collects configuration settings from public and private
+  // submodules to be saved as an audioproc::Config message on the
+  // AecDump if it is attached.  If not |forced|, only writes the current
+  // config if it is different from the last saved one; if |forced|,
+  // writes the config regardless of the last saved.
+  void WriteAecDumpConfigMessage(bool forced)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Notifies attached AecDump of current configuration and capture data.
+  void RecordUnprocessedCaptureStream(const float* const* capture_stream)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  void RecordUnprocessedCaptureStream(const AudioFrame& capture_frame)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Notifies attached AecDump of current configuration and
+  // processed capture data and issues a capture stream recording
+  // request.
+  void RecordProcessedCaptureStream(
+      const float* const* processed_capture_stream)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  void RecordProcessedCaptureStream(const AudioFrame& processed_capture_frame)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // Notifies attached AecDump about current state (delay, drift, etc).
+  void RecordAudioProcessingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+  // AecDump instance used for optionally logging APM config, input
+  // and output to file in the AEC-dump format defined in debug.proto.
+  std::unique_ptr<AecDump> aec_dump_;
+
+  // Hold the last config written with AecDump for avoiding writing
+  // the same config twice.
+  InternalAPMConfig apm_config_for_aec_dump_ RTC_GUARDED_BY(crit_capture_);
+
+  // Critical sections.
+  rtc::CriticalSection crit_render_ RTC_ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection crit_capture_;
+
+  // Struct containing the Config specifying the behavior of APM.
+  AudioProcessing::Config config_;
+
+  // Class containing information about what submodules are active.
+  ApmSubmoduleStates submodule_states_;
+
+  // Structs containing the pointers to the submodules.
+  std::unique_ptr<ApmPublicSubmodules> public_submodules_;
+  std::unique_ptr<ApmPrivateSubmodules> private_submodules_;
+
+  // State that is written to while holding both the render and capture locks
+  // but can be read without any lock being held.
+  // As this is only accessed internally of APM, and all internal methods in APM
+  // either are holding the render or capture locks, this construct is safe as
+  // it is not possible to read the variables while writing them.
+  struct ApmFormatState {
+    ApmFormatState()
+        :  // Format of processing streams at input/output call sites.
+          api_format({{{kSampleRate16kHz, 1, false},
+                       {kSampleRate16kHz, 1, false},
+                       {kSampleRate16kHz, 1, false},
+                       {kSampleRate16kHz, 1, false}}}),
+          render_processing_format(kSampleRate16kHz, 1) {}
+    ProcessingConfig api_format;
+    StreamConfig render_processing_format;
+  } formats_;
+
+  // APM constants.
+  const struct ApmConstants {
+    ApmConstants(int agc_startup_min_volume,
+                 int agc_clipped_level_min,
+                 bool use_experimental_agc)
+        :  // Format of processing streams at input/output call sites.
+          agc_startup_min_volume(agc_startup_min_volume),
+          agc_clipped_level_min(agc_clipped_level_min),
+          use_experimental_agc(use_experimental_agc) {}
+    int agc_startup_min_volume;
+    int agc_clipped_level_min;
+    bool use_experimental_agc;
+  } constants_;
+
+  struct ApmCaptureState {
+    ApmCaptureState(bool transient_suppressor_enabled,
+                    const std::vector<Point>& array_geometry,
+                    SphericalPointf target_direction);
+    ~ApmCaptureState();
+    int aec_system_delay_jumps;
+    int delay_offset_ms;
+    bool was_stream_delay_set;
+    int last_stream_delay_ms;
+    int last_aec_system_delay_ms;
+    int stream_delay_jumps;
+    bool output_will_be_muted;
+    bool key_pressed;
+    bool transient_suppressor_enabled;
+    std::vector<Point> array_geometry;
+    SphericalPointf target_direction;
+    std::unique_ptr<AudioBuffer> capture_audio;
+    // Only the rate and samples fields of capture_processing_format_ are used
+    // because the capture processing number of channels is mutable and is
+    // tracked by the capture_audio_.
+    StreamConfig capture_processing_format;
+    int split_rate;
+    bool echo_path_gain_change;
+  } capture_ RTC_GUARDED_BY(crit_capture_);
+
+  struct ApmCaptureNonLockedState {
+    ApmCaptureNonLockedState(bool beamformer_enabled,
+                             bool intelligibility_enabled)
+        : capture_processing_format(kSampleRate16kHz),
+          split_rate(kSampleRate16kHz),
+          stream_delay_ms(0),
+          beamformer_enabled(beamformer_enabled),
+          intelligibility_enabled(intelligibility_enabled) {}
+    // Only the rate and samples fields of capture_processing_format_ are used
+    // because the forward processing number of channels is mutable and is
+    // tracked by the capture_audio_.
+    StreamConfig capture_processing_format;
+    int split_rate;
+    int stream_delay_ms;
+    bool beamformer_enabled;
+    bool intelligibility_enabled;
+    bool echo_controller_enabled = false;
+  } capture_nonlocked_;
+
+  struct ApmRenderState {
+    ApmRenderState();
+    ~ApmRenderState();
+    std::unique_ptr<AudioConverter> render_converter;
+    std::unique_ptr<AudioBuffer> render_audio;
+  } render_ RTC_GUARDED_BY(crit_render_);
+
+  size_t aec_render_queue_element_max_size_ RTC_GUARDED_BY(crit_render_)
+      RTC_GUARDED_BY(crit_capture_) = 0;
+  std::vector<float> aec_render_queue_buffer_ RTC_GUARDED_BY(crit_render_);
+  std::vector<float> aec_capture_queue_buffer_ RTC_GUARDED_BY(crit_capture_);
+
+  size_t aecm_render_queue_element_max_size_ RTC_GUARDED_BY(crit_render_)
+      RTC_GUARDED_BY(crit_capture_) = 0;
+  std::vector<int16_t> aecm_render_queue_buffer_ RTC_GUARDED_BY(crit_render_);
+  std::vector<int16_t> aecm_capture_queue_buffer_ RTC_GUARDED_BY(crit_capture_);
+
+  size_t agc_render_queue_element_max_size_ RTC_GUARDED_BY(crit_render_)
+      RTC_GUARDED_BY(crit_capture_) = 0;
+  std::vector<int16_t> agc_render_queue_buffer_ RTC_GUARDED_BY(crit_render_);
+  std::vector<int16_t> agc_capture_queue_buffer_ RTC_GUARDED_BY(crit_capture_);
+
+  size_t red_render_queue_element_max_size_ RTC_GUARDED_BY(crit_render_)
+      RTC_GUARDED_BY(crit_capture_) = 0;
+  std::vector<float> red_render_queue_buffer_ RTC_GUARDED_BY(crit_render_);
+  std::vector<float> red_capture_queue_buffer_ RTC_GUARDED_BY(crit_capture_);
+
+  RmsLevel capture_input_rms_ RTC_GUARDED_BY(crit_capture_);
+  RmsLevel capture_output_rms_ RTC_GUARDED_BY(crit_capture_);
+  int capture_rms_interval_counter_ RTC_GUARDED_BY(crit_capture_) = 0;
+
+  // Lock protection not needed.
+  std::unique_ptr<SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>>
+      aec_render_signal_queue_;
+  std::unique_ptr<
+      SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
+      aecm_render_signal_queue_;
+  std::unique_ptr<
+      SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
+      agc_render_signal_queue_;
+  std::unique_ptr<SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>>
+      red_render_signal_queue_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AUDIO_PROCESSING_IMPL_H_
diff --git a/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/modules/audio_processing/audio_processing_impl_locking_unittest.cc
new file mode 100644
index 0000000..d4cff45
--- /dev/null
+++ b/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -0,0 +1,1135 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_processing_impl.h"
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+class AudioProcessingImplLockTest;
+
+// Type of the render thread APM API call to use in the test.
+enum class RenderApiImpl {
+  ProcessReverseStreamImpl1,
+  ProcessReverseStreamImpl2,
+  AnalyzeReverseStreamImpl
+};
+
+// Type of the capture thread APM API call to use in the test.
+enum class CaptureApiImpl {
+  ProcessStreamImpl1,
+  ProcessStreamImpl2,
+  ProcessStreamImpl3
+};
+
+// The runtime parameter setting scheme to use in the test.
+enum class RuntimeParameterSettingScheme {
+  SparseStreamMetadataChangeScheme,
+  ExtremeStreamMetadataChangeScheme,
+  FixedMonoStreamMetadataScheme,
+  FixedStereoStreamMetadataScheme
+};
+
+// Variant of echo canceller settings to use in the test.
+enum class AecType {
+  BasicWebRtcAecSettings,
+  AecTurnedOff,
+  BasicWebRtcAecSettingsWithExtentedFilter,
+  BasicWebRtcAecSettingsWithDelayAgnosticAec,
+  BasicWebRtcAecSettingsWithAecMobile
+};
+
+// Thread-safe random number generator wrapper.
+class RandomGenerator {
+ public:
+  RandomGenerator() : rand_gen_(42U) {}
+
+  int RandInt(int min, int max) {
+    rtc::CritScope cs(&crit_);
+    return rand_gen_.Rand(min, max);
+  }
+
+  int RandInt(int max) {
+    rtc::CritScope cs(&crit_);
+    return rand_gen_.Rand(max);
+  }
+
+  float RandFloat() {
+    rtc::CritScope cs(&crit_);
+    return rand_gen_.Rand<float>();
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  Random rand_gen_ RTC_GUARDED_BY(crit_);
+};
+
+// Variables related to the audio data and formats.
+struct AudioFrameData {
+  explicit AudioFrameData(int max_frame_size) {
+    // Set up the two-dimensional arrays needed for the APM API calls.
+    input_framechannels.resize(2 * max_frame_size);
+    input_frame.resize(2);
+    input_frame[0] = &input_framechannels[0];
+    input_frame[1] = &input_framechannels[max_frame_size];
+
+    output_frame_channels.resize(2 * max_frame_size);
+    output_frame.resize(2);
+    output_frame[0] = &output_frame_channels[0];
+    output_frame[1] = &output_frame_channels[max_frame_size];
+  }
+
+  AudioFrame frame;
+  std::vector<float*> output_frame;
+  std::vector<float> output_frame_channels;
+  AudioProcessing::ChannelLayout output_channel_layout =
+      AudioProcessing::ChannelLayout::kMono;
+  int input_sample_rate_hz = 16000;
+  int input_number_of_channels = -1;
+  std::vector<float*> input_frame;
+  std::vector<float> input_framechannels;
+  AudioProcessing::ChannelLayout input_channel_layout =
+      AudioProcessing::ChannelLayout::kMono;
+  int output_sample_rate_hz = 16000;
+  int output_number_of_channels = -1;
+  StreamConfig input_stream_config;
+  StreamConfig output_stream_config;
+  int input_samples_per_channel = -1;
+  int output_samples_per_channel = -1;
+};
+
+// The configuration for the test.
+struct TestConfig {
+  // Test case generator for the test configurations to use in the brief tests.
+  static std::vector<TestConfig> GenerateBriefTestConfigs() {
+    std::vector<TestConfig> test_configs;
+    AecType aec_types[] = {AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec,
+                           AecType::BasicWebRtcAecSettingsWithAecMobile};
+    for (auto aec_type : aec_types) {
+      TestConfig test_config;
+      test_config.aec_type = aec_type;
+
+      test_config.min_number_of_calls = 300;
+
+      // Perform tests only with the extreme runtime parameter setting scheme.
+      test_config.runtime_parameter_setting_scheme =
+          RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme;
+
+      // Only test 16 kHz for this test suite.
+      test_config.initial_sample_rate_hz = 16000;
+
+      // Create test config for the second processing API function set.
+      test_config.render_api_function =
+          RenderApiImpl::ProcessReverseStreamImpl2;
+      test_config.capture_api_function = CaptureApiImpl::ProcessStreamImpl2;
+
+      // Create test config for the first processing API function set.
+      test_configs.push_back(test_config);
+      test_config.render_api_function =
+          RenderApiImpl::AnalyzeReverseStreamImpl;
+      test_config.capture_api_function = CaptureApiImpl::ProcessStreamImpl3;
+      test_configs.push_back(test_config);
+    }
+
+    // Return the created test configurations.
+    return test_configs;
+  }
+
+  // Test case generator for the test configurations to use in the extensive
+  // tests.
+  static std::vector<TestConfig> GenerateExtensiveTestConfigs() {
+    // Lambda functions for the test config generation.
+    auto add_processing_apis = [](TestConfig test_config) {
+      struct AllowedApiCallCombinations {
+        RenderApiImpl render_api;
+        CaptureApiImpl capture_api;
+      };
+
+      const AllowedApiCallCombinations api_calls[] = {
+          {RenderApiImpl::ProcessReverseStreamImpl1,
+           CaptureApiImpl::ProcessStreamImpl1},
+          {RenderApiImpl::ProcessReverseStreamImpl2,
+           CaptureApiImpl::ProcessStreamImpl2},
+          {RenderApiImpl::ProcessReverseStreamImpl2,
+           CaptureApiImpl::ProcessStreamImpl3},
+          {RenderApiImpl::AnalyzeReverseStreamImpl,
+           CaptureApiImpl::ProcessStreamImpl2},
+          {RenderApiImpl::AnalyzeReverseStreamImpl,
+           CaptureApiImpl::ProcessStreamImpl3}};
+      std::vector<TestConfig> out;
+      for (auto api_call : api_calls) {
+        test_config.render_api_function = api_call.render_api;
+        test_config.capture_api_function = api_call.capture_api;
+        out.push_back(test_config);
+      }
+      return out;
+    };
+
+    auto add_aec_settings = [](const std::vector<TestConfig>& in) {
+      std::vector<TestConfig> out;
+      AecType aec_types[] = {
+          AecType::BasicWebRtcAecSettings, AecType::AecTurnedOff,
+          AecType::BasicWebRtcAecSettingsWithExtentedFilter,
+          AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec,
+          AecType::BasicWebRtcAecSettingsWithAecMobile};
+      for (auto test_config : in) {
+        // Due to a VisualStudio 2015 compiler issue, the internal loop
+        // variable here cannot override a previously defined name.
+        // In other words "type" cannot be named "aec_type" here.
+        // https://connect.microsoft.com/VisualStudio/feedback/details/2291755
+        for (auto type : aec_types) {
+          test_config.aec_type = type;
+          out.push_back(test_config);
+        }
+      }
+      return out;
+    };
+
+    auto add_settings_scheme = [](const std::vector<TestConfig>& in) {
+      std::vector<TestConfig> out;
+      RuntimeParameterSettingScheme schemes[] = {
+          RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme,
+          RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme,
+          RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme,
+          RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme};
+
+      for (auto test_config : in) {
+        for (auto scheme : schemes) {
+          test_config.runtime_parameter_setting_scheme = scheme;
+          out.push_back(test_config);
+        }
+      }
+      return out;
+    };
+
+    auto add_sample_rates = [](const std::vector<TestConfig>& in) {
+      const int sample_rates[] = {8000, 16000, 32000, 48000};
+
+      std::vector<TestConfig> out;
+      for (auto test_config : in) {
+        auto available_rates =
+            (test_config.aec_type ==
+                     AecType::BasicWebRtcAecSettingsWithAecMobile
+                 ? rtc::ArrayView<const int>(sample_rates, 2)
+                 : rtc::ArrayView<const int>(sample_rates));
+
+        for (auto rate : available_rates) {
+          test_config.initial_sample_rate_hz = rate;
+          out.push_back(test_config);
+        }
+      }
+      return out;
+    };
+
+    // Generate test configurations of the relevant combinations of the
+    // parameters to
+    // test.
+    TestConfig test_config;
+    test_config.min_number_of_calls = 10000;
+    return add_sample_rates(add_settings_scheme(
+        add_aec_settings(add_processing_apis(test_config))));
+  }
+
+  RenderApiImpl render_api_function = RenderApiImpl::ProcessReverseStreamImpl2;
+  CaptureApiImpl capture_api_function = CaptureApiImpl::ProcessStreamImpl2;
+  RuntimeParameterSettingScheme runtime_parameter_setting_scheme =
+      RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme;
+  int initial_sample_rate_hz = 16000;
+  AecType aec_type = AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec;
+  int min_number_of_calls = 300;
+};
+
+// Handler for the frame counters.
+class FrameCounters {
+ public:
+  void IncreaseRenderCounter() {
+    rtc::CritScope cs(&crit_);
+    render_count++;
+  }
+
+  void IncreaseCaptureCounter() {
+    rtc::CritScope cs(&crit_);
+    capture_count++;
+  }
+
+  int GetCaptureCounter() const {
+    rtc::CritScope cs(&crit_);
+    return capture_count;
+  }
+
+  int GetRenderCounter() const {
+    rtc::CritScope cs(&crit_);
+    return render_count;
+  }
+
+  int CaptureMinusRenderCounters() const {
+    rtc::CritScope cs(&crit_);
+    return capture_count - render_count;
+  }
+
+  int RenderMinusCaptureCounters() const {
+    return -CaptureMinusRenderCounters();
+  }
+
+  bool BothCountersExceedeThreshold(int threshold) {
+    rtc::CritScope cs(&crit_);
+    return (render_count > threshold && capture_count > threshold);
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  int render_count RTC_GUARDED_BY(crit_) = 0;
+  int capture_count RTC_GUARDED_BY(crit_) = 0;
+};
+
+// Class for handling the capture side processing.
+class CaptureProcessor {
+ public:
+  CaptureProcessor(int max_frame_size,
+                   RandomGenerator* rand_gen,
+                   rtc::Event* render_call_event,
+                   rtc::Event* capture_call_event,
+                   FrameCounters* shared_counters_state,
+                   AudioProcessingImplLockTest* test_framework,
+                   TestConfig* test_config,
+                   AudioProcessing* apm);
+  bool Process();
+
+ private:
+  static const int kMaxCallDifference = 10;
+  static const float kCaptureInputFloatLevel;
+  static const int kCaptureInputFixLevel = 1024;
+
+  void PrepareFrame();
+  void CallApmCaptureSide();
+  void ApplyRuntimeSettingScheme();
+
+  RandomGenerator* const rand_gen_ = nullptr;
+  rtc::Event* const render_call_event_ = nullptr;
+  rtc::Event* const capture_call_event_ = nullptr;
+  FrameCounters* const frame_counters_ = nullptr;
+  AudioProcessingImplLockTest* const test_ = nullptr;
+  const TestConfig* const test_config_ = nullptr;
+  AudioProcessing* const apm_ = nullptr;
+  AudioFrameData frame_data_;
+};
+
+// Class for handling the stats processing.
+class StatsProcessor {
+ public:
+  StatsProcessor(RandomGenerator* rand_gen,
+                 TestConfig* test_config,
+                 AudioProcessing* apm);
+  bool Process();
+
+ private:
+  RandomGenerator* rand_gen_ = nullptr;
+  TestConfig* test_config_ = nullptr;
+  AudioProcessing* apm_ = nullptr;
+};
+
+// Class for handling the render side processing.
+class RenderProcessor {
+ public:
+  RenderProcessor(int max_frame_size,
+                  RandomGenerator* rand_gen,
+                  rtc::Event* render_call_event,
+                  rtc::Event* capture_call_event,
+                  FrameCounters* shared_counters_state,
+                  AudioProcessingImplLockTest* test_framework,
+                  TestConfig* test_config,
+                  AudioProcessing* apm);
+  bool Process();
+
+ private:
+  static const int kMaxCallDifference = 10;
+  static const int kRenderInputFixLevel = 16384;
+  static const float kRenderInputFloatLevel;
+
+  void PrepareFrame();
+  void CallApmRenderSide();
+  void ApplyRuntimeSettingScheme();
+
+  RandomGenerator* const rand_gen_ = nullptr;
+  rtc::Event* const render_call_event_ = nullptr;
+  rtc::Event* const capture_call_event_ = nullptr;
+  FrameCounters* const frame_counters_ = nullptr;
+  AudioProcessingImplLockTest* const test_ = nullptr;
+  const TestConfig* const test_config_ = nullptr;
+  AudioProcessing* const apm_ = nullptr;
+  AudioFrameData frame_data_;
+  bool first_render_call_ = true;
+};
+
+class AudioProcessingImplLockTest
+    : public ::testing::TestWithParam<TestConfig> {
+ public:
+  AudioProcessingImplLockTest();
+  bool RunTest();
+  bool MaybeEndTest();
+
+ private:
+  static const int kTestTimeOutLimit = 10 * 60 * 1000;
+  static const int kMaxFrameSize = 480;
+
+  // ::testing::TestWithParam<> implementation
+  void SetUp() override;
+  void TearDown() override;
+
+  // Thread callback for the render thread
+  static bool RenderProcessorThreadFunc(void* context) {
+    return reinterpret_cast<AudioProcessingImplLockTest*>(context)
+        ->render_thread_state_.Process();
+  }
+
+  // Thread callback for the capture thread
+  static bool CaptureProcessorThreadFunc(void* context) {
+    return reinterpret_cast<AudioProcessingImplLockTest*>(context)
+        ->capture_thread_state_.Process();
+  }
+
+  // Thread callback for the stats thread
+  static bool StatsProcessorThreadFunc(void* context) {
+    return reinterpret_cast<AudioProcessingImplLockTest*>(context)
+        ->stats_thread_state_.Process();
+  }
+
+  // Tests whether all the required render and capture side calls have been
+  // done.
+  bool TestDone() {
+    return frame_counters_.BothCountersExceedeThreshold(
+        test_config_.min_number_of_calls);
+  }
+
+  // Start the threads used in the test.
+  void StartThreads() {
+    render_thread_.Start();
+    render_thread_.SetPriority(rtc::kRealtimePriority);
+    capture_thread_.Start();
+    capture_thread_.SetPriority(rtc::kRealtimePriority);
+    stats_thread_.Start();
+    stats_thread_.SetPriority(rtc::kNormalPriority);
+  }
+
+  // Event handlers for the test.
+  rtc::Event test_complete_;
+  rtc::Event render_call_event_;
+  rtc::Event capture_call_event_;
+
+  // Thread related variables.
+  rtc::PlatformThread render_thread_;
+  rtc::PlatformThread capture_thread_;
+  rtc::PlatformThread stats_thread_;
+  mutable RandomGenerator rand_gen_;
+
+  std::unique_ptr<AudioProcessing> apm_;
+  TestConfig test_config_;
+  FrameCounters frame_counters_;
+  RenderProcessor render_thread_state_;
+  CaptureProcessor capture_thread_state_;
+  StatsProcessor stats_thread_state_;
+};
+
+// Sleeps a random time between 0 and max_sleep milliseconds.
+void SleepRandomMs(int max_sleep, RandomGenerator* rand_gen) {
+  int sleeptime = rand_gen->RandInt(0, max_sleep);
+  SleepMs(sleeptime);
+}
+
+// Populates a float audio frame with random data.
+void PopulateAudioFrame(float** frame,
+                        float amplitude,
+                        size_t num_channels,
+                        size_t samples_per_channel,
+                        RandomGenerator* rand_gen) {
+  for (size_t ch = 0; ch < num_channels; ch++) {
+    for (size_t k = 0; k < samples_per_channel; k++) {
+      // Store random 16 bit quantized float number between +-amplitude.
+      frame[ch][k] = amplitude * (2 * rand_gen->RandFloat() - 1);
+    }
+  }
+}
+
+// Populates an audioframe frame of AudioFrame type with random data.
+void PopulateAudioFrame(AudioFrame* frame,
+                        int16_t amplitude,
+                        RandomGenerator* rand_gen) {
+  ASSERT_GT(amplitude, 0);
+  ASSERT_LE(amplitude, 32767);
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t ch = 0; ch < frame->num_channels_; ch++) {
+    for (size_t k = 0; k < frame->samples_per_channel_; k++) {
+      // Store random 16 bit number between -(amplitude+1) and
+      // amplitude.
+      frame_data[k * ch] =
+          rand_gen->RandInt(2 * amplitude + 1) - amplitude - 1;
+    }
+  }
+}
+
+AudioProcessingImplLockTest::AudioProcessingImplLockTest()
+    : test_complete_(false, false),
+      render_call_event_(false, false),
+      capture_call_event_(false, false),
+      render_thread_(RenderProcessorThreadFunc, this, "render"),
+      capture_thread_(CaptureProcessorThreadFunc, this, "capture"),
+      stats_thread_(StatsProcessorThreadFunc, this, "stats"),
+      apm_(AudioProcessingBuilder().Create()),
+      render_thread_state_(kMaxFrameSize,
+                           &rand_gen_,
+                           &render_call_event_,
+                           &capture_call_event_,
+                           &frame_counters_,
+                           this,
+                           &test_config_,
+                           apm_.get()),
+      capture_thread_state_(kMaxFrameSize,
+                            &rand_gen_,
+                            &render_call_event_,
+                            &capture_call_event_,
+                            &frame_counters_,
+                            this,
+                            &test_config_,
+                            apm_.get()),
+      stats_thread_state_(&rand_gen_, &test_config_, apm_.get()) {}
+
+// Run the test with a timeout.
+bool AudioProcessingImplLockTest::RunTest() {
+  StartThreads();
+  return test_complete_.Wait(kTestTimeOutLimit);
+}
+
+bool AudioProcessingImplLockTest::MaybeEndTest() {
+  if (HasFatalFailure() || TestDone()) {
+    test_complete_.Set();
+    return true;
+  }
+  return false;
+}
+
+// Setup of test and APM.
+void AudioProcessingImplLockTest::SetUp() {
+  test_config_ = static_cast<TestConfig>(GetParam());
+
+  ASSERT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+  ASSERT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+  ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+  ASSERT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
+  ASSERT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+
+  Config config;
+  if (test_config_.aec_type == AecType::AecTurnedOff) {
+    ASSERT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+    ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+  } else if (test_config_.aec_type ==
+             AecType::BasicWebRtcAecSettingsWithAecMobile) {
+    ASSERT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+    ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+  } else {
+    ASSERT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+    ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+    ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->enable_metrics(true));
+    ASSERT_EQ(apm_->kNoError,
+              apm_->echo_cancellation()->enable_delay_logging(true));
+
+    config.Set<ExtendedFilter>(
+        new ExtendedFilter(test_config_.aec_type ==
+                           AecType::BasicWebRtcAecSettingsWithExtentedFilter));
+
+    config.Set<DelayAgnostic>(
+        new DelayAgnostic(test_config_.aec_type ==
+                          AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec));
+
+    apm_->SetExtraOptions(config);
+  }
+}
+
+void AudioProcessingImplLockTest::TearDown() {
+  render_call_event_.Set();
+  capture_call_event_.Set();
+  render_thread_.Stop();
+  capture_thread_.Stop();
+  stats_thread_.Stop();
+}
+
+StatsProcessor::StatsProcessor(RandomGenerator* rand_gen,
+                               TestConfig* test_config,
+                               AudioProcessing* apm)
+    : rand_gen_(rand_gen), test_config_(test_config), apm_(apm) {}
+
+// Implements the callback functionality for the statistics
+// collection thread.
+bool StatsProcessor::Process() {
+  SleepRandomMs(100, rand_gen_);
+
+  EXPECT_EQ(apm_->echo_cancellation()->is_enabled(),
+            ((test_config_->aec_type != AecType::AecTurnedOff) &&
+             (test_config_->aec_type !=
+              AecType::BasicWebRtcAecSettingsWithAecMobile)));
+  apm_->echo_cancellation()->stream_drift_samples();
+  EXPECT_EQ(apm_->echo_control_mobile()->is_enabled(),
+            (test_config_->aec_type != AecType::AecTurnedOff) &&
+                (test_config_->aec_type ==
+                 AecType::BasicWebRtcAecSettingsWithAecMobile));
+  EXPECT_TRUE(apm_->gain_control()->is_enabled());
+  EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
+
+  // The below return values are not testable.
+  apm_->noise_suppression()->speech_probability();
+  apm_->voice_detection()->is_enabled();
+
+  return true;
+}
+
+const float CaptureProcessor::kCaptureInputFloatLevel = 0.03125f;
+
+CaptureProcessor::CaptureProcessor(int max_frame_size,
+                                   RandomGenerator* rand_gen,
+                                   rtc::Event* render_call_event,
+                                   rtc::Event* capture_call_event,
+                                   FrameCounters* shared_counters_state,
+                                   AudioProcessingImplLockTest* test_framework,
+                                   TestConfig* test_config,
+                                   AudioProcessing* apm)
+    : rand_gen_(rand_gen),
+      render_call_event_(render_call_event),
+      capture_call_event_(capture_call_event),
+      frame_counters_(shared_counters_state),
+      test_(test_framework),
+      test_config_(test_config),
+      apm_(apm),
+      frame_data_(max_frame_size) {}
+
+// Implements the callback functionality for the capture thread.
+bool CaptureProcessor::Process() {
+  // Sleep a random time to simulate thread jitter.
+  SleepRandomMs(3, rand_gen_);
+
+  // Check whether the test is done.
+  if (test_->MaybeEndTest()) {
+    return false;
+  }
+
+  // Ensure that the number of render and capture calls do not
+  // differ too much.
+  if (frame_counters_->CaptureMinusRenderCounters() > kMaxCallDifference) {
+    render_call_event_->Wait(rtc::Event::kForever);
+  }
+
+  // Apply any specified capture side APM non-processing runtime calls.
+  ApplyRuntimeSettingScheme();
+
+  // Apply the capture side processing call.
+  CallApmCaptureSide();
+
+  // Increase the number of capture-side calls.
+  frame_counters_->IncreaseCaptureCounter();
+
+  // Flag to the render thread that another capture API call has occurred
+  // by triggering this threads call event.
+  capture_call_event_->Set();
+
+  return true;
+}
+
+// Prepares a frame with relevant audio data and metadata.
+void CaptureProcessor::PrepareFrame() {
+  // Restrict to a common fixed sample rate if the AudioFrame
+  // interface is used.
+  if (test_config_->capture_api_function ==
+      CaptureApiImpl::ProcessStreamImpl1) {
+    frame_data_.input_sample_rate_hz = test_config_->initial_sample_rate_hz;
+    frame_data_.output_sample_rate_hz = test_config_->initial_sample_rate_hz;
+  }
+
+  // Prepare the audioframe data and metadata.
+  frame_data_.input_samples_per_channel =
+      frame_data_.input_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+  frame_data_.frame.sample_rate_hz_ = frame_data_.input_sample_rate_hz;
+  frame_data_.frame.num_channels_ = frame_data_.input_number_of_channels;
+  frame_data_.frame.samples_per_channel_ =
+      frame_data_.input_samples_per_channel;
+  PopulateAudioFrame(&frame_data_.frame, kCaptureInputFixLevel, rand_gen_);
+
+  // Prepare the float audio input data and metadata.
+  frame_data_.input_stream_config.set_sample_rate_hz(
+      frame_data_.input_sample_rate_hz);
+  frame_data_.input_stream_config.set_num_channels(
+      frame_data_.input_number_of_channels);
+  frame_data_.input_stream_config.set_has_keyboard(false);
+  PopulateAudioFrame(&frame_data_.input_frame[0], kCaptureInputFloatLevel,
+                     frame_data_.input_number_of_channels,
+                     frame_data_.input_samples_per_channel, rand_gen_);
+  frame_data_.input_channel_layout =
+      (frame_data_.input_number_of_channels == 1
+           ? AudioProcessing::ChannelLayout::kMono
+           : AudioProcessing::ChannelLayout::kStereo);
+
+  // Prepare the float audio output data and metadata.
+  frame_data_.output_samples_per_channel =
+      frame_data_.output_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+  frame_data_.output_stream_config.set_sample_rate_hz(
+      frame_data_.output_sample_rate_hz);
+  frame_data_.output_stream_config.set_num_channels(
+      frame_data_.output_number_of_channels);
+  frame_data_.output_stream_config.set_has_keyboard(false);
+  frame_data_.output_channel_layout =
+      (frame_data_.output_number_of_channels == 1
+           ? AudioProcessing::ChannelLayout::kMono
+           : AudioProcessing::ChannelLayout::kStereo);
+}
+
+// Applies the capture side processing API call.
+void CaptureProcessor::CallApmCaptureSide() {
+  // Prepare a proper capture side processing API call input.
+  PrepareFrame();
+
+  // Set the stream delay.
+  apm_->set_stream_delay_ms(30);
+
+  // Set the analog level.
+  apm_->gain_control()->set_stream_analog_level(80);
+
+  // Call the specified capture side API processing method.
+  int result = AudioProcessing::kNoError;
+  switch (test_config_->capture_api_function) {
+    case CaptureApiImpl::ProcessStreamImpl1:
+      result = apm_->ProcessStream(&frame_data_.frame);
+      break;
+    case CaptureApiImpl::ProcessStreamImpl2:
+      result = apm_->ProcessStream(
+          &frame_data_.input_frame[0], frame_data_.input_samples_per_channel,
+          frame_data_.input_sample_rate_hz, frame_data_.input_channel_layout,
+          frame_data_.output_sample_rate_hz, frame_data_.output_channel_layout,
+          &frame_data_.output_frame[0]);
+      break;
+    case CaptureApiImpl::ProcessStreamImpl3:
+      result = apm_->ProcessStream(
+          &frame_data_.input_frame[0], frame_data_.input_stream_config,
+          frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+      break;
+    default:
+      FAIL();
+  }
+
+  // Retrieve the new analog level.
+  apm_->gain_control()->stream_analog_level();
+
+  // Check the return code for error.
+  ASSERT_EQ(AudioProcessing::kNoError, result);
+}
+
+// Applies any runtime capture APM API calls and audio stream characteristics
+// specified by the scheme for the test.
+void CaptureProcessor::ApplyRuntimeSettingScheme() {
+  const int capture_count_local = frame_counters_->GetCaptureCounter();
+
+  // Update the number of channels and sample rates for the input and output.
+  // Note that the counts frequencies for when to set parameters
+  // are set using prime numbers in order to ensure that the
+  // permutation scheme in the parameter setting changes.
+  switch (test_config_->runtime_parameter_setting_scheme) {
+    case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+      if (capture_count_local == 0)
+        frame_data_.input_sample_rate_hz = 16000;
+      else if (capture_count_local % 11 == 0)
+        frame_data_.input_sample_rate_hz = 32000;
+      else if (capture_count_local % 73 == 0)
+        frame_data_.input_sample_rate_hz = 48000;
+      else if (capture_count_local % 89 == 0)
+        frame_data_.input_sample_rate_hz = 16000;
+      else if (capture_count_local % 97 == 0)
+        frame_data_.input_sample_rate_hz = 8000;
+
+      if (capture_count_local == 0)
+        frame_data_.input_number_of_channels = 1;
+      else if (capture_count_local % 4 == 0)
+        frame_data_.input_number_of_channels =
+            (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+
+      if (capture_count_local == 0)
+        frame_data_.output_sample_rate_hz = 16000;
+      else if (capture_count_local % 5 == 0)
+        frame_data_.output_sample_rate_hz = 32000;
+      else if (capture_count_local % 47 == 0)
+        frame_data_.output_sample_rate_hz = 48000;
+      else if (capture_count_local % 53 == 0)
+        frame_data_.output_sample_rate_hz = 16000;
+      else if (capture_count_local % 71 == 0)
+        frame_data_.output_sample_rate_hz = 8000;
+
+      if (capture_count_local == 0)
+        frame_data_.output_number_of_channels = 1;
+      else if (capture_count_local % 8 == 0)
+        frame_data_.output_number_of_channels =
+            (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+      break;
+    case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+      if (capture_count_local % 2 == 0) {
+        frame_data_.input_number_of_channels = 1;
+        frame_data_.input_sample_rate_hz = 16000;
+        frame_data_.output_number_of_channels = 1;
+        frame_data_.output_sample_rate_hz = 16000;
+      } else {
+        frame_data_.input_number_of_channels =
+            (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+        if (frame_data_.input_sample_rate_hz == 8000)
+          frame_data_.input_sample_rate_hz = 16000;
+        else if (frame_data_.input_sample_rate_hz == 16000)
+          frame_data_.input_sample_rate_hz = 32000;
+        else if (frame_data_.input_sample_rate_hz == 32000)
+          frame_data_.input_sample_rate_hz = 48000;
+        else if (frame_data_.input_sample_rate_hz == 48000)
+          frame_data_.input_sample_rate_hz = 8000;
+
+        frame_data_.output_number_of_channels =
+            (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+        if (frame_data_.output_sample_rate_hz == 8000)
+          frame_data_.output_sample_rate_hz = 16000;
+        else if (frame_data_.output_sample_rate_hz == 16000)
+          frame_data_.output_sample_rate_hz = 32000;
+        else if (frame_data_.output_sample_rate_hz == 32000)
+          frame_data_.output_sample_rate_hz = 48000;
+        else if (frame_data_.output_sample_rate_hz == 48000)
+          frame_data_.output_sample_rate_hz = 8000;
+      }
+      break;
+    case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+      if (capture_count_local == 0) {
+        frame_data_.input_sample_rate_hz = 16000;
+        frame_data_.input_number_of_channels = 1;
+        frame_data_.output_sample_rate_hz = 16000;
+        frame_data_.output_number_of_channels = 1;
+      }
+      break;
+    case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+      if (capture_count_local == 0) {
+        frame_data_.input_sample_rate_hz = 16000;
+        frame_data_.input_number_of_channels = 2;
+        frame_data_.output_sample_rate_hz = 16000;
+        frame_data_.output_number_of_channels = 2;
+      }
+      break;
+    default:
+      FAIL();
+  }
+
+  // Call any specified runtime APM setter and
+  // getter calls.
+  switch (test_config_->runtime_parameter_setting_scheme) {
+    case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+    case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+      break;
+    case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+    case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+      if (capture_count_local % 2 == 0) {
+        ASSERT_EQ(AudioProcessing::Error::kNoError,
+                  apm_->set_stream_delay_ms(30));
+        apm_->set_stream_key_pressed(true);
+        apm_->set_delay_offset_ms(15);
+        EXPECT_EQ(apm_->delay_offset_ms(), 15);
+      } else {
+        ASSERT_EQ(AudioProcessing::Error::kNoError,
+                  apm_->set_stream_delay_ms(50));
+        apm_->set_stream_key_pressed(false);
+        apm_->set_delay_offset_ms(20);
+        EXPECT_EQ(apm_->delay_offset_ms(), 20);
+        apm_->delay_offset_ms();
+      }
+      break;
+    default:
+      FAIL();
+  }
+
+  // Restric the number of output channels not to exceed
+  // the number of input channels.
+  frame_data_.output_number_of_channels =
+      std::min(frame_data_.output_number_of_channels,
+               frame_data_.input_number_of_channels);
+}
+
+const float RenderProcessor::kRenderInputFloatLevel = 0.5f;
+
+RenderProcessor::RenderProcessor(int max_frame_size,
+                                 RandomGenerator* rand_gen,
+                                 rtc::Event* render_call_event,
+                                 rtc::Event* capture_call_event,
+                                 FrameCounters* shared_counters_state,
+                                 AudioProcessingImplLockTest* test_framework,
+                                 TestConfig* test_config,
+                                 AudioProcessing* apm)
+    : rand_gen_(rand_gen),
+      render_call_event_(render_call_event),
+      capture_call_event_(capture_call_event),
+      frame_counters_(shared_counters_state),
+      test_(test_framework),
+      test_config_(test_config),
+      apm_(apm),
+      frame_data_(max_frame_size) {}
+
+// Implements the callback functionality for the render thread.
+bool RenderProcessor::Process() {
+  // Conditional wait to ensure that a capture call has been done
+  // before the first render call is performed (implicitly
+  // required by the APM API).
+  if (first_render_call_) {
+    capture_call_event_->Wait(rtc::Event::kForever);
+    first_render_call_ = false;
+  }
+
+  // Sleep a random time to simulate thread jitter.
+  SleepRandomMs(3, rand_gen_);
+
+  // Check whether the test is done.
+  if (test_->MaybeEndTest()) {
+    return false;
+  }
+
+  // Ensure that the number of render and capture calls do not
+  // differ too much.
+  if (frame_counters_->RenderMinusCaptureCounters() > kMaxCallDifference) {
+    capture_call_event_->Wait(rtc::Event::kForever);
+  }
+
+  // Apply any specified render side APM non-processing runtime calls.
+  ApplyRuntimeSettingScheme();
+
+  // Apply the render side processing call.
+  CallApmRenderSide();
+
+  // Increase the number of render-side calls.
+  frame_counters_->IncreaseRenderCounter();
+
+  // Flag to the capture thread that another render API call has occurred
+  // by triggering this threads call event.
+  render_call_event_->Set();
+  return true;
+}
+
+// Prepares the render side frame and the accompanying metadata
+// with the appropriate information.
+void RenderProcessor::PrepareFrame() {
+  // Restrict to a common fixed sample rate if the AudioFrame interface is
+  // used.
+  if ((test_config_->render_api_function ==
+       RenderApiImpl::ProcessReverseStreamImpl1) ||
+      (test_config_->aec_type !=
+       AecType::BasicWebRtcAecSettingsWithAecMobile)) {
+    frame_data_.input_sample_rate_hz = test_config_->initial_sample_rate_hz;
+    frame_data_.output_sample_rate_hz = test_config_->initial_sample_rate_hz;
+  }
+
+  // Prepare the audioframe data and metadata
+  frame_data_.input_samples_per_channel =
+      frame_data_.input_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+  frame_data_.frame.sample_rate_hz_ = frame_data_.input_sample_rate_hz;
+  frame_data_.frame.num_channels_ = frame_data_.input_number_of_channels;
+  frame_data_.frame.samples_per_channel_ =
+      frame_data_.input_samples_per_channel;
+  PopulateAudioFrame(&frame_data_.frame, kRenderInputFixLevel, rand_gen_);
+
+  // Prepare the float audio input data and metadata.
+  frame_data_.input_stream_config.set_sample_rate_hz(
+      frame_data_.input_sample_rate_hz);
+  frame_data_.input_stream_config.set_num_channels(
+      frame_data_.input_number_of_channels);
+  frame_data_.input_stream_config.set_has_keyboard(false);
+  PopulateAudioFrame(&frame_data_.input_frame[0], kRenderInputFloatLevel,
+                     frame_data_.input_number_of_channels,
+                     frame_data_.input_samples_per_channel, rand_gen_);
+  frame_data_.input_channel_layout =
+      (frame_data_.input_number_of_channels == 1
+           ? AudioProcessing::ChannelLayout::kMono
+           : AudioProcessing::ChannelLayout::kStereo);
+
+  // Prepare the float audio output data and metadata.
+  frame_data_.output_samples_per_channel =
+      frame_data_.output_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+  frame_data_.output_stream_config.set_sample_rate_hz(
+      frame_data_.output_sample_rate_hz);
+  frame_data_.output_stream_config.set_num_channels(
+      frame_data_.output_number_of_channels);
+  frame_data_.output_stream_config.set_has_keyboard(false);
+  frame_data_.output_channel_layout =
+      (frame_data_.output_number_of_channels == 1
+           ? AudioProcessing::ChannelLayout::kMono
+           : AudioProcessing::ChannelLayout::kStereo);
+}
+
+// Makes the render side processing API call.
+void RenderProcessor::CallApmRenderSide() {
+  // Prepare a proper render side processing API call input.
+  PrepareFrame();
+
+  // Call the specified render side API processing method.
+  int result = AudioProcessing::kNoError;
+  switch (test_config_->render_api_function) {
+    case RenderApiImpl::ProcessReverseStreamImpl1:
+      result = apm_->ProcessReverseStream(&frame_data_.frame);
+      break;
+    case RenderApiImpl::ProcessReverseStreamImpl2:
+      result = apm_->ProcessReverseStream(
+          &frame_data_.input_frame[0], frame_data_.input_stream_config,
+          frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+      break;
+    case RenderApiImpl::AnalyzeReverseStreamImpl:
+      result = apm_->AnalyzeReverseStream(
+          &frame_data_.input_frame[0], frame_data_.input_samples_per_channel,
+          frame_data_.input_sample_rate_hz, frame_data_.input_channel_layout);
+      break;
+    default:
+      FAIL();
+  }
+
+  // Check the return code for error.
+  ASSERT_EQ(AudioProcessing::kNoError, result);
+}
+
+// Applies any render capture side APM API calls and audio stream
+// characteristics
+// specified by the scheme for the test.
+void RenderProcessor::ApplyRuntimeSettingScheme() {
+  const int render_count_local = frame_counters_->GetRenderCounter();
+
+  // Update the number of channels and sample rates for the input and output.
+  // Note that the counts frequencies for when to set parameters
+  // are set using prime numbers in order to ensure that the
+  // permutation scheme in the parameter setting changes.
+  switch (test_config_->runtime_parameter_setting_scheme) {
+    case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+      if (render_count_local == 0)
+        frame_data_.input_sample_rate_hz = 16000;
+      else if (render_count_local % 47 == 0)
+        frame_data_.input_sample_rate_hz = 32000;
+      else if (render_count_local % 71 == 0)
+        frame_data_.input_sample_rate_hz = 48000;
+      else if (render_count_local % 79 == 0)
+        frame_data_.input_sample_rate_hz = 16000;
+      else if (render_count_local % 83 == 0)
+        frame_data_.input_sample_rate_hz = 8000;
+
+      if (render_count_local == 0)
+        frame_data_.input_number_of_channels = 1;
+      else if (render_count_local % 4 == 0)
+        frame_data_.input_number_of_channels =
+            (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+
+      if (render_count_local == 0)
+        frame_data_.output_sample_rate_hz = 16000;
+      else if (render_count_local % 17 == 0)
+        frame_data_.output_sample_rate_hz = 32000;
+      else if (render_count_local % 19 == 0)
+        frame_data_.output_sample_rate_hz = 48000;
+      else if (render_count_local % 29 == 0)
+        frame_data_.output_sample_rate_hz = 16000;
+      else if (render_count_local % 61 == 0)
+        frame_data_.output_sample_rate_hz = 8000;
+
+      if (render_count_local == 0)
+        frame_data_.output_number_of_channels = 1;
+      else if (render_count_local % 8 == 0)
+        frame_data_.output_number_of_channels =
+            (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+      break;
+    case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+      if (render_count_local == 0) {
+        frame_data_.input_number_of_channels = 1;
+        frame_data_.input_sample_rate_hz = 16000;
+        frame_data_.output_number_of_channels = 1;
+        frame_data_.output_sample_rate_hz = 16000;
+      } else {
+        frame_data_.input_number_of_channels =
+            (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+        if (frame_data_.input_sample_rate_hz == 8000)
+          frame_data_.input_sample_rate_hz = 16000;
+        else if (frame_data_.input_sample_rate_hz == 16000)
+          frame_data_.input_sample_rate_hz = 32000;
+        else if (frame_data_.input_sample_rate_hz == 32000)
+          frame_data_.input_sample_rate_hz = 48000;
+        else if (frame_data_.input_sample_rate_hz == 48000)
+          frame_data_.input_sample_rate_hz = 8000;
+
+        frame_data_.output_number_of_channels =
+            (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+        if (frame_data_.output_sample_rate_hz == 8000)
+          frame_data_.output_sample_rate_hz = 16000;
+        else if (frame_data_.output_sample_rate_hz == 16000)
+          frame_data_.output_sample_rate_hz = 32000;
+        else if (frame_data_.output_sample_rate_hz == 32000)
+          frame_data_.output_sample_rate_hz = 48000;
+        else if (frame_data_.output_sample_rate_hz == 48000)
+          frame_data_.output_sample_rate_hz = 8000;
+      }
+      break;
+    case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+      if (render_count_local == 0) {
+        frame_data_.input_sample_rate_hz = 16000;
+        frame_data_.input_number_of_channels = 1;
+        frame_data_.output_sample_rate_hz = 16000;
+        frame_data_.output_number_of_channels = 1;
+      }
+      break;
+    case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+      if (render_count_local == 0) {
+        frame_data_.input_sample_rate_hz = 16000;
+        frame_data_.input_number_of_channels = 2;
+        frame_data_.output_sample_rate_hz = 16000;
+        frame_data_.output_number_of_channels = 2;
+      }
+      break;
+    default:
+      FAIL();
+  }
+
+  // Restric the number of output channels not to exceed
+  // the number of input channels.
+  frame_data_.output_number_of_channels =
+      std::min(frame_data_.output_number_of_channels,
+               frame_data_.input_number_of_channels);
+}
+
+}  // anonymous namespace
+
+TEST_P(AudioProcessingImplLockTest, LockTest) {
+  // Run test and verify that it did not time out.
+  ASSERT_TRUE(RunTest());
+}
+
+// Instantiate tests from the extreme test configuration set.
+INSTANTIATE_TEST_CASE_P(
+    DISABLED_AudioProcessingImplLockExtensive,
+    AudioProcessingImplLockTest,
+    ::testing::ValuesIn(TestConfig::GenerateExtensiveTestConfigs()));
+
+INSTANTIATE_TEST_CASE_P(
+    AudioProcessingImplLockBrief,
+    AudioProcessingImplLockTest,
+    ::testing::ValuesIn(TestConfig::GenerateBriefTestConfigs()));
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_impl_unittest.cc b/modules/audio_processing/audio_processing_impl_unittest.cc
new file mode 100644
index 0000000..e152bef
--- /dev/null
+++ b/modules/audio_processing/audio_processing_impl_unittest.cc
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/audio_processing_impl.h"
+
+#include "modules/audio_processing/test/test_utils.h"
+#include "modules/include/module_common_types.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+using ::testing::Invoke;
+
+namespace webrtc {
+namespace {
+
+class MockInitialize : public AudioProcessingImpl {
+ public:
+  explicit MockInitialize(const webrtc::Config& config)
+      : AudioProcessingImpl(config) {}
+
+  MOCK_METHOD0(InitializeLocked, int());
+  int RealInitializeLocked() RTC_NO_THREAD_SAFETY_ANALYSIS {
+    return AudioProcessingImpl::InitializeLocked();
+  }
+
+  MOCK_CONST_METHOD0(AddRef, void());
+  MOCK_CONST_METHOD0(Release, rtc::RefCountReleaseStatus());
+};
+
+}  // namespace
+
+TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
+  webrtc::Config config;
+  MockInitialize mock(config);
+  ON_CALL(mock, InitializeLocked())
+      .WillByDefault(Invoke(&mock, &MockInitialize::RealInitializeLocked));
+
+  EXPECT_CALL(mock, InitializeLocked()).Times(1);
+  mock.Initialize();
+
+  AudioFrame frame;
+  // Call with the default parameters; there should be an init.
+  frame.num_channels_ = 1;
+  SetFrameSampleRate(&frame, 16000);
+  EXPECT_CALL(mock, InitializeLocked()).Times(0);
+  EXPECT_NOERR(mock.ProcessStream(&frame));
+  EXPECT_NOERR(mock.ProcessReverseStream(&frame));
+
+  // New sample rate. (Only impacts ProcessStream).
+  SetFrameSampleRate(&frame, 32000);
+  EXPECT_CALL(mock, InitializeLocked())
+      .Times(1);
+  EXPECT_NOERR(mock.ProcessStream(&frame));
+
+  // New number of channels.
+  // TODO(peah): Investigate why this causes 2 inits.
+  frame.num_channels_ = 2;
+  EXPECT_CALL(mock, InitializeLocked())
+      .Times(2);
+  EXPECT_NOERR(mock.ProcessStream(&frame));
+  // ProcessStream sets num_channels_ == num_output_channels.
+  frame.num_channels_ = 2;
+  EXPECT_NOERR(mock.ProcessReverseStream(&frame));
+
+  // A new sample rate passed to ProcessReverseStream should cause an init.
+  SetFrameSampleRate(&frame, 16000);
+  EXPECT_CALL(mock, InitializeLocked()).Times(1);
+  EXPECT_NOERR(mock.ProcessReverseStream(&frame));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_performance_unittest.cc b/modules/audio_processing/audio_processing_performance_unittest.cc
new file mode 100644
index 0000000..8dd81b2
--- /dev/null
+++ b/modules/audio_processing/audio_processing_performance_unittest.cc
@@ -0,0 +1,713 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/audio_processing_impl.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/random.h"
+#include "system_wrappers/include/clock.h"
+#include "system_wrappers/include/event_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/perf_test.h"
+
+// Check to verify that the define for the intelligibility enhancer is properly
+// set.
+#if !defined(WEBRTC_INTELLIGIBILITY_ENHANCER) || \
+    (WEBRTC_INTELLIGIBILITY_ENHANCER != 0 &&     \
+     WEBRTC_INTELLIGIBILITY_ENHANCER != 1)
+#error "Set WEBRTC_INTELLIGIBILITY_ENHANCER to either 0 or 1"
+#endif
+
+namespace webrtc {
+
+namespace {
+
+static const bool kPrintAllDurations = false;
+
+class CallSimulator;
+
+// Type of the render thread APM API call to use in the test.
+enum class ProcessorType { kRender, kCapture };
+
+// Variant of APM processing settings to use in the test.
+enum class SettingsType {
+  kDefaultApmDesktop,
+  kDefaultApmMobile,
+  kDefaultApmDesktopAndBeamformer,
+  kDefaultApmDesktopAndIntelligibilityEnhancer,
+  kAllSubmodulesTurnedOff,
+  kDefaultApmDesktopWithoutDelayAgnostic,
+  kDefaultApmDesktopWithoutExtendedFilter
+};
+
+// Variables related to the audio data and formats.
+struct AudioFrameData {
+  explicit AudioFrameData(size_t max_frame_size) {
+    // Set up the two-dimensional arrays needed for the APM API calls.
+    input_framechannels.resize(2 * max_frame_size);
+    input_frame.resize(2);
+    input_frame[0] = &input_framechannels[0];
+    input_frame[1] = &input_framechannels[max_frame_size];
+
+    output_frame_channels.resize(2 * max_frame_size);
+    output_frame.resize(2);
+    output_frame[0] = &output_frame_channels[0];
+    output_frame[1] = &output_frame_channels[max_frame_size];
+  }
+
+  std::vector<float> output_frame_channels;
+  std::vector<float*> output_frame;
+  std::vector<float> input_framechannels;
+  std::vector<float*> input_frame;
+  StreamConfig input_stream_config;
+  StreamConfig output_stream_config;
+};
+
+// The configuration for the test.
+struct SimulationConfig {
+  SimulationConfig(int sample_rate_hz, SettingsType simulation_settings)
+      : sample_rate_hz(sample_rate_hz),
+        simulation_settings(simulation_settings) {}
+
+  static std::vector<SimulationConfig> GenerateSimulationConfigs() {
+    std::vector<SimulationConfig> simulation_configs;
+#ifndef WEBRTC_ANDROID
+    const SettingsType desktop_settings[] = {
+        SettingsType::kDefaultApmDesktop, SettingsType::kAllSubmodulesTurnedOff,
+        SettingsType::kDefaultApmDesktopWithoutDelayAgnostic,
+        SettingsType::kDefaultApmDesktopWithoutExtendedFilter};
+
+    const int desktop_sample_rates[] = {8000, 16000, 32000, 48000};
+
+    for (auto sample_rate : desktop_sample_rates) {
+      for (auto settings : desktop_settings) {
+        simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+      }
+    }
+
+#if WEBRTC_INTELLIGIBILITY_ENHANCER == 1
+    const SettingsType intelligibility_enhancer_settings[] = {
+        SettingsType::kDefaultApmDesktopAndIntelligibilityEnhancer};
+
+    const int intelligibility_enhancer_sample_rates[] = {8000, 16000, 32000,
+                                                         48000};
+
+    for (auto sample_rate : intelligibility_enhancer_sample_rates) {
+      for (auto settings : intelligibility_enhancer_settings) {
+        simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+      }
+    }
+#endif
+
+    const SettingsType beamformer_settings[] = {
+        SettingsType::kDefaultApmDesktopAndBeamformer};
+
+    const int beamformer_sample_rates[] = {8000, 16000, 32000, 48000};
+
+    for (auto sample_rate : beamformer_sample_rates) {
+      for (auto settings : beamformer_settings) {
+        simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+      }
+    }
+#endif
+
+    const SettingsType mobile_settings[] = {SettingsType::kDefaultApmMobile};
+
+    const int mobile_sample_rates[] = {8000, 16000};
+
+    for (auto sample_rate : mobile_sample_rates) {
+      for (auto settings : mobile_settings) {
+        simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+      }
+    }
+
+    return simulation_configs;
+  }
+
+  std::string SettingsDescription() const {
+    std::string description;
+    switch (simulation_settings) {
+      case SettingsType::kDefaultApmMobile:
+        description = "DefaultApmMobile";
+        break;
+      case SettingsType::kDefaultApmDesktop:
+        description = "DefaultApmDesktop";
+        break;
+      case SettingsType::kDefaultApmDesktopAndBeamformer:
+        description = "DefaultApmDesktopAndBeamformer";
+        break;
+      case SettingsType::kDefaultApmDesktopAndIntelligibilityEnhancer:
+        description = "DefaultApmDesktopAndIntelligibilityEnhancer";
+        break;
+      case SettingsType::kAllSubmodulesTurnedOff:
+        description = "AllSubmodulesOff";
+        break;
+      case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic:
+        description = "DefaultApmDesktopWithoutDelayAgnostic";
+        break;
+      case SettingsType::kDefaultApmDesktopWithoutExtendedFilter:
+        description = "DefaultApmDesktopWithoutExtendedFilter";
+        break;
+    }
+    return description;
+  }
+
+  int sample_rate_hz = 16000;
+  SettingsType simulation_settings = SettingsType::kDefaultApmDesktop;
+};
+
+// Handler for the frame counters.
+class FrameCounters {
+ public:
+  void IncreaseRenderCounter() {
+    rtc::AtomicOps::Increment(&render_count_);
+  }
+
+  void IncreaseCaptureCounter() {
+    rtc::AtomicOps::Increment(&capture_count_);
+  }
+
+  int CaptureMinusRenderCounters() const {
+    // The return value will be approximate, but that's good enough since
+    // by the time we return the value, it's not guaranteed to be correct
+    // anyway.
+    return rtc::AtomicOps::AcquireLoad(&capture_count_) -
+           rtc::AtomicOps::AcquireLoad(&render_count_);
+  }
+
+  int RenderMinusCaptureCounters() const {
+    return -CaptureMinusRenderCounters();
+  }
+
+  bool BothCountersExceedeThreshold(int threshold) const {
+    // TODO(tommi): We could use an event to signal this so that we don't need
+    // to be polling from the main thread and possibly steal cycles.
+    const int capture_count = rtc::AtomicOps::AcquireLoad(&capture_count_);
+    const int render_count = rtc::AtomicOps::AcquireLoad(&render_count_);
+    return (render_count > threshold && capture_count > threshold);
+  }
+
+ private:
+  int render_count_ = 0;
+  int capture_count_ = 0;
+};
+
+// Class that represents a flag that can only be raised.
+class LockedFlag {
+ public:
+  bool get_flag() const {
+    return rtc::AtomicOps::AcquireLoad(&flag_);
+  }
+
+  void set_flag() {
+    if (!get_flag())  // read-only operation to avoid affecting the cache-line.
+      rtc::AtomicOps::CompareAndSwap(&flag_, 0, 1);
+  }
+
+ private:
+  int flag_ = 0;
+};
+
+// Parent class for the thread processors.
+class TimedThreadApiProcessor {
+ public:
+  TimedThreadApiProcessor(ProcessorType processor_type,
+                          Random* rand_gen,
+                          FrameCounters* shared_counters_state,
+                          LockedFlag* capture_call_checker,
+                          CallSimulator* test_framework,
+                          const SimulationConfig* simulation_config,
+                          AudioProcessing* apm,
+                          int num_durations_to_store,
+                          float input_level,
+                          int num_channels)
+      : rand_gen_(rand_gen),
+        frame_counters_(shared_counters_state),
+        capture_call_checker_(capture_call_checker),
+        test_(test_framework),
+        simulation_config_(simulation_config),
+        apm_(apm),
+        frame_data_(kMaxFrameSize),
+        clock_(webrtc::Clock::GetRealTimeClock()),
+        num_durations_to_store_(num_durations_to_store),
+        input_level_(input_level),
+        processor_type_(processor_type),
+        num_channels_(num_channels) {
+    api_call_durations_.reserve(num_durations_to_store_);
+  }
+
+  // Implements the callback functionality for the threads.
+  bool Process();
+
+  // Method for printing out the simulation statistics.
+  void print_processor_statistics(const std::string& processor_name) const {
+    const std::string modifier = "_api_call_duration";
+
+    const std::string sample_rate_name =
+        "_" + std::to_string(simulation_config_->sample_rate_hz) + "Hz";
+
+    webrtc::test::PrintResultMeanAndError(
+        "apm_timing", sample_rate_name, processor_name,
+        GetDurationAverage(), GetDurationStandardDeviation(),
+        "us", false);
+
+    if (kPrintAllDurations) {
+      webrtc::test::PrintResultList("apm_call_durations", sample_rate_name,
+                                    processor_name, api_call_durations_, "us",
+                                    false);
+    }
+  }
+
+  void AddDuration(int64_t duration) {
+    if (api_call_durations_.size() < num_durations_to_store_) {
+      api_call_durations_.push_back(duration);
+    }
+  }
+
+ private:
+  static const int kMaxCallDifference = 10;
+  static const int kMaxFrameSize = 480;
+  static const int kNumInitializationFrames = 5;
+
+  int64_t GetDurationStandardDeviation() const {
+    double variance = 0;
+    const int64_t average_duration = GetDurationAverage();
+    for (size_t k = kNumInitializationFrames; k < api_call_durations_.size();
+         k++) {
+      int64_t tmp = api_call_durations_[k] - average_duration;
+      variance += static_cast<double>(tmp * tmp);
+    }
+    const int denominator = rtc::checked_cast<int>(api_call_durations_.size()) -
+                            kNumInitializationFrames;
+    return (denominator > 0
+                ? rtc::checked_cast<int64_t>(sqrt(variance / denominator))
+                : -1);
+  }
+
+  int64_t GetDurationAverage() const {
+    int64_t average_duration = 0;
+    for (size_t k = kNumInitializationFrames; k < api_call_durations_.size();
+         k++) {
+      average_duration += api_call_durations_[k];
+    }
+    const int denominator = rtc::checked_cast<int>(api_call_durations_.size()) -
+                            kNumInitializationFrames;
+    return (denominator > 0 ? average_duration / denominator : -1);
+  }
+
+  int ProcessCapture() {
+    // Set the stream delay.
+    apm_->set_stream_delay_ms(30);
+
+    // Call and time the specified capture side API processing method.
+    const int64_t start_time = clock_->TimeInMicroseconds();
+    const int result = apm_->ProcessStream(
+        &frame_data_.input_frame[0], frame_data_.input_stream_config,
+        frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+    const int64_t end_time = clock_->TimeInMicroseconds();
+
+    frame_counters_->IncreaseCaptureCounter();
+
+    AddDuration(end_time - start_time);
+
+    if (first_process_call_) {
+      // Flag that the capture side has been called at least once
+      // (needed to ensure that a capture call has been done
+      // before the first render call is performed (implicitly
+      // required by the APM API).
+      capture_call_checker_->set_flag();
+      first_process_call_ = false;
+    }
+    return result;
+  }
+
+  bool ReadyToProcessCapture() {
+    return (frame_counters_->CaptureMinusRenderCounters() <=
+            kMaxCallDifference);
+  }
+
+  int ProcessRender() {
+    // Call and time the specified render side API processing method.
+    const int64_t start_time = clock_->TimeInMicroseconds();
+    const int result = apm_->ProcessReverseStream(
+        &frame_data_.input_frame[0], frame_data_.input_stream_config,
+        frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+    const int64_t end_time = clock_->TimeInMicroseconds();
+    frame_counters_->IncreaseRenderCounter();
+
+    AddDuration(end_time - start_time);
+
+    return result;
+  }
+
+  bool ReadyToProcessRender() {
+    // Do not process until at least one capture call has been done.
+    // (implicitly required by the APM API).
+    if (first_process_call_ && !capture_call_checker_->get_flag()) {
+      return false;
+    }
+
+    // Ensure that the number of render and capture calls do not differ too
+    // much.
+    if (frame_counters_->RenderMinusCaptureCounters() > kMaxCallDifference) {
+      return false;
+    }
+
+    first_process_call_ = false;
+    return true;
+  }
+
+  void PrepareFrame() {
+    // Lambda function for populating a float multichannel audio frame
+    // with random data.
+    auto populate_audio_frame = [](float amplitude, size_t num_channels,
+                                   size_t samples_per_channel, Random* rand_gen,
+                                   float** frame) {
+      for (size_t ch = 0; ch < num_channels; ch++) {
+        for (size_t k = 0; k < samples_per_channel; k++) {
+          // Store random float number with a value between +-amplitude.
+          frame[ch][k] = amplitude * (2 * rand_gen->Rand<float>() - 1);
+        }
+      }
+    };
+
+    // Prepare the audio input data and metadata.
+    frame_data_.input_stream_config.set_sample_rate_hz(
+        simulation_config_->sample_rate_hz);
+    frame_data_.input_stream_config.set_num_channels(num_channels_);
+    frame_data_.input_stream_config.set_has_keyboard(false);
+    populate_audio_frame(input_level_, num_channels_,
+                         (simulation_config_->sample_rate_hz *
+                          AudioProcessing::kChunkSizeMs / 1000),
+                         rand_gen_, &frame_data_.input_frame[0]);
+
+    // Prepare the float audio output data and metadata.
+    frame_data_.output_stream_config.set_sample_rate_hz(
+        simulation_config_->sample_rate_hz);
+    frame_data_.output_stream_config.set_num_channels(1);
+    frame_data_.output_stream_config.set_has_keyboard(false);
+  }
+
+  bool ReadyToProcess() {
+    switch (processor_type_) {
+      case ProcessorType::kRender:
+        return ReadyToProcessRender();
+
+      case ProcessorType::kCapture:
+        return ReadyToProcessCapture();
+    }
+
+    // Should not be reached, but the return statement is needed for the code to
+    // build successfully on Android.
+    RTC_NOTREACHED();
+    return false;
+  }
+
+  Random* rand_gen_ = nullptr;
+  FrameCounters* frame_counters_ = nullptr;
+  LockedFlag* capture_call_checker_ = nullptr;
+  CallSimulator* test_ = nullptr;
+  const SimulationConfig* const simulation_config_ = nullptr;
+  AudioProcessing* apm_ = nullptr;
+  AudioFrameData frame_data_;
+  webrtc::Clock* clock_;
+  const size_t num_durations_to_store_;
+  std::vector<double> api_call_durations_;
+  const float input_level_;
+  bool first_process_call_ = true;
+  const ProcessorType processor_type_;
+  const int num_channels_ = 1;
+};
+
+// Class for managing the test simulation.
+class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
+ public:
+  CallSimulator()
+      : test_complete_(EventWrapper::Create()),
+        render_thread_(
+            new rtc::PlatformThread(RenderProcessorThreadFunc, this, "render")),
+        capture_thread_(new rtc::PlatformThread(CaptureProcessorThreadFunc,
+                                                this,
+                                                "capture")),
+        rand_gen_(42U),
+        simulation_config_(static_cast<SimulationConfig>(GetParam())) {}
+
+  // Run the call simulation with a timeout.
+  EventTypeWrapper Run() {
+    StartThreads();
+
+    EventTypeWrapper result = test_complete_->Wait(kTestTimeout);
+
+    StopThreads();
+
+    render_thread_state_->print_processor_statistics(
+        simulation_config_.SettingsDescription() + "_render");
+    capture_thread_state_->print_processor_statistics(
+        simulation_config_.SettingsDescription() + "_capture");
+
+    return result;
+  }
+
+  // Tests whether all the required render and capture side calls have been
+  // done.
+  bool MaybeEndTest() {
+    if (frame_counters_.BothCountersExceedeThreshold(kMinNumFramesToProcess)) {
+      test_complete_->Set();
+      return true;
+    }
+    return false;
+  }
+
+ private:
+  static const float kCaptureInputFloatLevel;
+  static const float kRenderInputFloatLevel;
+  static const int kMinNumFramesToProcess = 150;
+  static const int32_t kTestTimeout = 3 * 10 * kMinNumFramesToProcess;
+
+  // ::testing::TestWithParam<> implementation.
+  void TearDown() override { StopThreads(); }
+
+  // Stop all running threads.
+  void StopThreads() {
+    render_thread_->Stop();
+    capture_thread_->Stop();
+  }
+
+  // Simulator and APM setup.
+  void SetUp() override {
+    // Lambda function for setting the default APM runtime settings for desktop.
+    auto set_default_desktop_apm_runtime_settings = [](AudioProcessing* apm) {
+      ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->enable_metrics(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_delay_logging(true));
+    };
+
+    // Lambda function for setting the default APM runtime settings for mobile.
+    auto set_default_mobile_apm_runtime_settings = [](AudioProcessing* apm) {
+      ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(false));
+    };
+
+    // Lambda function for turning off all of the APM runtime settings
+    // submodules.
+    auto turn_off_default_apm_runtime_settings = [](AudioProcessing* apm) {
+      ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(false));
+      ASSERT_EQ(apm->kNoError,
+                apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+      ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(false));
+      ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->enable_metrics(false));
+      ASSERT_EQ(apm->kNoError,
+                apm->echo_cancellation()->enable_delay_logging(false));
+    };
+
+    // Lambda function for adding default desktop APM settings to a config.
+    auto add_default_desktop_config = [](Config* config) {
+      config->Set<ExtendedFilter>(new ExtendedFilter(true));
+      config->Set<DelayAgnostic>(new DelayAgnostic(true));
+    };
+
+    // Lambda function for adding beamformer settings to a config.
+    auto add_beamformer_config = [](Config* config) {
+      const size_t num_mics = 2;
+      const std::vector<Point> array_geometry =
+          ParseArrayGeometry("0 0 0 0.05 0 0", num_mics);
+      RTC_CHECK_EQ(array_geometry.size(), num_mics);
+
+      config->Set<Beamforming>(
+          new Beamforming(true, array_geometry,
+                          SphericalPointf(DegreesToRadians(90), 0.f, 1.f)));
+    };
+
+    int num_capture_channels = 1;
+    switch (simulation_config_.simulation_settings) {
+      case SettingsType::kDefaultApmMobile: {
+        apm_.reset(AudioProcessingBuilder().Create());
+        ASSERT_TRUE(!!apm_);
+        set_default_mobile_apm_runtime_settings(apm_.get());
+        break;
+      }
+      case SettingsType::kDefaultApmDesktop: {
+        Config config;
+        add_default_desktop_config(&config);
+        apm_.reset(AudioProcessingBuilder().Create(config));
+        ASSERT_TRUE(!!apm_);
+        set_default_desktop_apm_runtime_settings(apm_.get());
+        apm_->SetExtraOptions(config);
+        break;
+      }
+      case SettingsType::kDefaultApmDesktopAndBeamformer: {
+        Config config;
+        add_beamformer_config(&config);
+        add_default_desktop_config(&config);
+        apm_.reset(AudioProcessingBuilder().Create(config));
+        ASSERT_TRUE(!!apm_);
+        set_default_desktop_apm_runtime_settings(apm_.get());
+        apm_->SetExtraOptions(config);
+        num_capture_channels = 2;
+        break;
+      }
+      case SettingsType::kDefaultApmDesktopAndIntelligibilityEnhancer: {
+        Config config;
+        config.Set<Intelligibility>(new Intelligibility(true));
+        add_default_desktop_config(&config);
+        apm_.reset(AudioProcessingBuilder().Create(config));
+        ASSERT_TRUE(!!apm_);
+        set_default_desktop_apm_runtime_settings(apm_.get());
+        apm_->SetExtraOptions(config);
+        break;
+      }
+      case SettingsType::kAllSubmodulesTurnedOff: {
+        apm_.reset(AudioProcessingBuilder().Create());
+        ASSERT_TRUE(!!apm_);
+        turn_off_default_apm_runtime_settings(apm_.get());
+        break;
+      }
+      case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: {
+        Config config;
+        config.Set<ExtendedFilter>(new ExtendedFilter(true));
+        config.Set<DelayAgnostic>(new DelayAgnostic(false));
+        apm_.reset(AudioProcessingBuilder().Create(config));
+        ASSERT_TRUE(!!apm_);
+        set_default_desktop_apm_runtime_settings(apm_.get());
+        apm_->SetExtraOptions(config);
+        break;
+      }
+      case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: {
+        Config config;
+        config.Set<ExtendedFilter>(new ExtendedFilter(false));
+        config.Set<DelayAgnostic>(new DelayAgnostic(true));
+        apm_.reset(AudioProcessingBuilder().Create(config));
+        ASSERT_TRUE(!!apm_);
+        set_default_desktop_apm_runtime_settings(apm_.get());
+        apm_->SetExtraOptions(config);
+        break;
+      }
+    }
+
+    render_thread_state_.reset(new TimedThreadApiProcessor(
+        ProcessorType::kRender, &rand_gen_, &frame_counters_,
+        &capture_call_checker_, this, &simulation_config_, apm_.get(),
+        kMinNumFramesToProcess, kRenderInputFloatLevel, 1));
+    capture_thread_state_.reset(new TimedThreadApiProcessor(
+        ProcessorType::kCapture, &rand_gen_, &frame_counters_,
+        &capture_call_checker_, this, &simulation_config_, apm_.get(),
+        kMinNumFramesToProcess, kCaptureInputFloatLevel, num_capture_channels));
+  }
+
+  // Thread callback for the render thread.
+  static bool RenderProcessorThreadFunc(void* context) {
+    return reinterpret_cast<CallSimulator*>(context)
+        ->render_thread_state_->Process();
+  }
+
+  // Thread callback for the capture thread.
+  static bool CaptureProcessorThreadFunc(void* context) {
+    return reinterpret_cast<CallSimulator*>(context)
+        ->capture_thread_state_->Process();
+  }
+
+  // Start the threads used in the test.
+  void StartThreads() {
+    ASSERT_NO_FATAL_FAILURE(render_thread_->Start());
+    render_thread_->SetPriority(rtc::kRealtimePriority);
+    ASSERT_NO_FATAL_FAILURE(capture_thread_->Start());
+    capture_thread_->SetPriority(rtc::kRealtimePriority);
+  }
+
+  // Event handler for the test.
+  const std::unique_ptr<EventWrapper> test_complete_;
+
+  // Thread related variables.
+  std::unique_ptr<rtc::PlatformThread> render_thread_;
+  std::unique_ptr<rtc::PlatformThread> capture_thread_;
+  Random rand_gen_;
+
+  std::unique_ptr<AudioProcessing> apm_;
+  const SimulationConfig simulation_config_;
+  FrameCounters frame_counters_;
+  LockedFlag capture_call_checker_;
+  std::unique_ptr<TimedThreadApiProcessor> render_thread_state_;
+  std::unique_ptr<TimedThreadApiProcessor> capture_thread_state_;
+};
+
+// Implements the callback functionality for the threads.
+bool TimedThreadApiProcessor::Process() {
+  PrepareFrame();
+
+  // Wait in a spinlock manner until it is ok to start processing.
+  // Note that SleepMs is not applicable since it only allows sleeping
+  // on a millisecond basis which is too long.
+  // TODO(tommi): This loop may affect the performance of the test that it's
+  // meant to measure.  See if we could use events instead to signal readiness.
+  while (!ReadyToProcess()) {
+  }
+
+  int result = AudioProcessing::kNoError;
+  switch (processor_type_) {
+    case ProcessorType::kRender:
+      result = ProcessRender();
+      break;
+    case ProcessorType::kCapture:
+      result = ProcessCapture();
+      break;
+  }
+
+  EXPECT_EQ(result, AudioProcessing::kNoError);
+
+  return !test_->MaybeEndTest();
+}
+
+const float CallSimulator::kRenderInputFloatLevel = 0.5f;
+const float CallSimulator::kCaptureInputFloatLevel = 0.03125f;
+}  // anonymous namespace
+
+// TODO(peah): Reactivate once issue 7712 has been resolved.
+TEST_P(CallSimulator, DISABLED_ApiCallDurationTest) {
+  // Run test and verify that it did not time out.
+  EXPECT_EQ(kEventSignaled, Run());
+}
+
+INSTANTIATE_TEST_CASE_P(
+    AudioProcessingPerformanceTest,
+    CallSimulator,
+    ::testing::ValuesIn(SimulationConfig::GenerateSimulationConfigs()));
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
new file mode 100644
index 0000000..89d6cb9
--- /dev/null
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -0,0 +1,3053 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <math.h>
+#include <stdio.h>
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <queue>
+
+#include "common_audio/include/audio_util.h"
+#include "common_audio/resampler/include/push_resampler.h"
+#include "common_audio/resampler/push_sinc_resampler.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/beamformer/mock_nonlinear_beamformer.h"
+#include "modules/audio_processing/common.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/audio_processing/test/protobuf_utils.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gtest_prod_util.h"
+#include "rtc_base/ignore_wundef.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/protobuf_utils.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread.h"
+#include "system_wrappers/include/event_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+RTC_PUSH_IGNORING_WUNDEF()
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
+#else
+#include "modules/audio_processing/test/unittest.pb.h"
+#endif
+RTC_POP_IGNORING_WUNDEF()
+
+namespace webrtc {
+namespace {
+
+// TODO(ekmeyerson): Switch to using StreamConfig and ProcessingConfig where
+// applicable.
+
+// TODO(bjornv): This is not feasible until the functionality has been
+// re-implemented; see comment at the bottom of this file. For now, the user has
+// to hard code the |write_ref_data| value.
+// When false, this will compare the output data with the results stored to
+// file. This is the typical case. When the file should be updated, it can
+// be set to true with the command-line switch --write_ref_data.
+bool write_ref_data = false;
+const int32_t kChannels[] = {1, 2};
+const int kSampleRates[] = {8000, 16000, 32000, 48000};
+
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+// Android doesn't support 48kHz.
+const int kProcessSampleRates[] = {8000, 16000, 32000};
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+const int kProcessSampleRates[] = {8000, 16000, 32000, 48000};
+#endif
+
+enum StreamDirection { kForward = 0, kReverse };
+
+void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
+  ChannelBuffer<int16_t> cb_int(cb->num_frames(),
+                                cb->num_channels());
+  Deinterleave(int_data,
+               cb->num_frames(),
+               cb->num_channels(),
+               cb_int.channels());
+  for (size_t i = 0; i < cb->num_channels(); ++i) {
+    S16ToFloat(cb_int.channels()[i],
+               cb->num_frames(),
+               cb->channels()[i]);
+  }
+}
+
+void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
+  ConvertToFloat(frame.data(), cb);
+}
+
+// Number of channels including the keyboard channel.
+size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+  switch (layout) {
+    case AudioProcessing::kMono:
+      return 1;
+    case AudioProcessing::kMonoAndKeyboard:
+    case AudioProcessing::kStereo:
+      return 2;
+    case AudioProcessing::kStereoAndKeyboard:
+      return 3;
+  }
+  RTC_NOTREACHED();
+  return 0;
+}
+
+int TruncateToMultipleOf10(int value) {
+  return (value / 10) * 10;
+}
+
+void MixStereoToMono(const float* stereo, float* mono,
+                     size_t samples_per_channel) {
+  for (size_t i = 0; i < samples_per_channel; ++i)
+    mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
+}
+
+void MixStereoToMono(const int16_t* stereo, int16_t* mono,
+                     size_t samples_per_channel) {
+  for (size_t i = 0; i < samples_per_channel; ++i)
+    mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
+}
+
+void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
+    stereo[i * 2 + 1] = stereo[i * 2];
+  }
+}
+
+void VerifyChannelsAreEqual(const int16_t* stereo, size_t samples_per_channel) {
+  for (size_t i = 0; i < samples_per_channel; i++) {
+    EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
+  }
+}
+
+void SetFrameTo(AudioFrame* frame, int16_t value) {
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+       ++i) {
+    frame_data[i] = value;
+  }
+}
+
+void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
+  ASSERT_EQ(2u, frame->num_channels_);
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+    frame_data[i] = left;
+    frame_data[i + 1] = right;
+  }
+}
+
+void ScaleFrame(AudioFrame* frame, float scale) {
+  int16_t* frame_data = frame->mutable_data();
+  for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+       ++i) {
+    frame_data[i] = FloatS16ToS16(frame_data[i] * scale);
+  }
+}
+
+bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
+  if (frame1.samples_per_channel_ != frame2.samples_per_channel_) {
+    return false;
+  }
+  if (frame1.num_channels_ != frame2.num_channels_) {
+    return false;
+  }
+  if (memcmp(frame1.data(), frame2.data(),
+             frame1.samples_per_channel_ * frame1.num_channels_ *
+                 sizeof(int16_t))) {
+    return false;
+  }
+  return true;
+}
+
+void EnableAllAPComponents(AudioProcessing* ap) {
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+  EXPECT_NOERR(ap->echo_control_mobile()->Enable(true));
+
+  EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+  EXPECT_NOERR(ap->gain_control()->Enable(true));
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+  EXPECT_NOERR(ap->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_NOERR(ap->echo_cancellation()->enable_metrics(true));
+  EXPECT_NOERR(ap->echo_cancellation()->enable_delay_logging(true));
+  EXPECT_NOERR(ap->echo_cancellation()->Enable(true));
+
+  EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_NOERR(ap->gain_control()->set_analog_level_limits(0, 255));
+  EXPECT_NOERR(ap->gain_control()->Enable(true));
+#endif
+
+  AudioProcessing::Config apm_config;
+  apm_config.high_pass_filter.enabled = true;
+  ap->ApplyConfig(apm_config);
+
+  EXPECT_NOERR(ap->level_estimator()->Enable(true));
+  EXPECT_NOERR(ap->noise_suppression()->Enable(true));
+
+  EXPECT_NOERR(ap->voice_detection()->Enable(true));
+}
+
+// These functions are only used by ApmTest.Process.
+template <class T>
+T AbsValue(T a) {
+  return a > 0 ? a: -a;
+}
+
+int16_t MaxAudioFrame(const AudioFrame& frame) {
+  const size_t length = frame.samples_per_channel_ * frame.num_channels_;
+  const int16_t* frame_data = frame.data();
+  int16_t max_data = AbsValue(frame_data[0]);
+  for (size_t i = 1; i < length; i++) {
+    max_data = std::max(max_data, AbsValue(frame_data[i]));
+  }
+
+  return max_data;
+}
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+void TestStats(const AudioProcessing::Statistic& test,
+               const audioproc::Test::Statistic& reference) {
+  EXPECT_EQ(reference.instant(), test.instant);
+  EXPECT_EQ(reference.average(), test.average);
+  EXPECT_EQ(reference.maximum(), test.maximum);
+  EXPECT_EQ(reference.minimum(), test.minimum);
+}
+
+void WriteStatsMessage(const AudioProcessing::Statistic& output,
+                       audioproc::Test::Statistic* msg) {
+  msg->set_instant(output.instant);
+  msg->set_average(output.average);
+  msg->set_maximum(output.maximum);
+  msg->set_minimum(output.minimum);
+}
+#endif
+
+void OpenFileAndWriteMessage(const std::string& filename,
+                             const MessageLite& msg) {
+  FILE* file = fopen(filename.c_str(), "wb");
+  ASSERT_TRUE(file != NULL);
+
+  int32_t size = rtc::checked_cast<int32_t>(msg.ByteSizeLong());
+  ASSERT_GT(size, 0);
+  std::unique_ptr<uint8_t[]> array(new uint8_t[size]);
+  ASSERT_TRUE(msg.SerializeToArray(array.get(), size));
+
+  ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
+  ASSERT_EQ(static_cast<size_t>(size),
+      fwrite(array.get(), sizeof(array[0]), size, file));
+  fclose(file);
+}
+
+std::string ResourceFilePath(const std::string& name, int sample_rate_hz) {
+  std::ostringstream ss;
+  // Resource files are all stereo.
+  ss << name << sample_rate_hz / 1000 << "_stereo";
+  return test::ResourcePath(ss.str(), "pcm");
+}
+
+// Temporary filenames unique to this process. Used to be able to run these
+// tests in parallel as each process needs to be running in isolation they can't
+// have competing filenames.
+std::map<std::string, std::string> temp_filenames;
+
+std::string OutputFilePath(const std::string& name,
+                           int input_rate,
+                           int output_rate,
+                           int reverse_input_rate,
+                           int reverse_output_rate,
+                           size_t num_input_channels,
+                           size_t num_output_channels,
+                           size_t num_reverse_input_channels,
+                           size_t num_reverse_output_channels,
+                           StreamDirection file_direction) {
+  std::ostringstream ss;
+  ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
+     << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_";
+  if (num_output_channels == 1) {
+    ss << "mono";
+  } else if (num_output_channels == 2) {
+    ss << "stereo";
+  } else {
+    RTC_NOTREACHED();
+  }
+  ss << output_rate / 1000;
+  if (num_reverse_output_channels == 1) {
+    ss << "_rmono";
+  } else if (num_reverse_output_channels == 2) {
+    ss << "_rstereo";
+  } else {
+    RTC_NOTREACHED();
+  }
+  ss << reverse_output_rate / 1000;
+  ss << "_d" << file_direction << "_pcm";
+
+  std::string filename = ss.str();
+  if (temp_filenames[filename].empty())
+    temp_filenames[filename] = test::TempFilename(test::OutputPath(), filename);
+  return temp_filenames[filename];
+}
+
+void ClearTempFiles() {
+  for (auto& kv : temp_filenames)
+    remove(kv.second.c_str());
+}
+
+// Only remove "out" files. Keep "ref" files.
+void ClearTempOutFiles() {
+  for (auto it = temp_filenames.begin(); it != temp_filenames.end();) {
+    const std::string& filename = it->first;
+    if (filename.substr(0, 3).compare("out") == 0) {
+      remove(it->second.c_str());
+      temp_filenames.erase(it++);
+    } else {
+      it++;
+    }
+  }
+}
+
+void OpenFileAndReadMessage(const std::string& filename, MessageLite* msg) {
+  FILE* file = fopen(filename.c_str(), "rb");
+  ASSERT_TRUE(file != NULL);
+  ReadMessageFromFile(file, msg);
+  fclose(file);
+}
+
+// Reads a 10 ms chunk of int16 interleaved audio from the given (assumed
+// stereo) file, converts to deinterleaved float (optionally downmixing) and
+// returns the result in |cb|. Returns false if the file ended (or on error) and
+// true otherwise.
+//
+// |int_data| and |float_data| are just temporary space that must be
+// sufficiently large to hold the 10 ms chunk.
+bool ReadChunk(FILE* file, int16_t* int_data, float* float_data,
+               ChannelBuffer<float>* cb) {
+  // The files always contain stereo audio.
+  size_t frame_size = cb->num_frames() * 2;
+  size_t read_count = fread(int_data, sizeof(int16_t), frame_size, file);
+  if (read_count != frame_size) {
+    // Check that the file really ended.
+    RTC_DCHECK(feof(file));
+    return false;  // This is expected.
+  }
+
+  S16ToFloat(int_data, frame_size, float_data);
+  if (cb->num_channels() == 1) {
+    MixStereoToMono(float_data, cb->channels()[0], cb->num_frames());
+  } else {
+    Deinterleave(float_data, cb->num_frames(), 2,
+                 cb->channels());
+  }
+
+  return true;
+}
+
+class ApmTest : public ::testing::Test {
+ protected:
+  ApmTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  static void SetUpTestCase() {
+  }
+
+  static void TearDownTestCase() {
+    ClearTempFiles();
+  }
+
+  // Used to select between int and float interface tests.
+  enum Format {
+    kIntFormat,
+    kFloatFormat
+  };
+
+  void Init(int sample_rate_hz,
+            int output_sample_rate_hz,
+            int reverse_sample_rate_hz,
+            size_t num_input_channels,
+            size_t num_output_channels,
+            size_t num_reverse_channels,
+            bool open_output_file);
+  void Init(AudioProcessing* ap);
+  void EnableAllComponents();
+  bool ReadFrame(FILE* file, AudioFrame* frame);
+  bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb);
+  void ReadFrameWithRewind(FILE* file, AudioFrame* frame);
+  void ReadFrameWithRewind(FILE* file, AudioFrame* frame,
+                           ChannelBuffer<float>* cb);
+  void ProcessWithDefaultStreamParameters(AudioFrame* frame);
+  void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
+                                    int delay_min, int delay_max);
+  void TestChangingChannelsInt16Interface(
+      size_t num_channels,
+      AudioProcessing::Error expected_return);
+  void TestChangingForwardChannels(size_t num_in_channels,
+                                   size_t num_out_channels,
+                                   AudioProcessing::Error expected_return);
+  void TestChangingReverseChannels(size_t num_rev_channels,
+                                   AudioProcessing::Error expected_return);
+  void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
+  void RunManualVolumeChangeIsPossibleTest(int sample_rate);
+  void StreamParametersTest(Format format);
+  int ProcessStreamChooser(Format format);
+  int AnalyzeReverseStreamChooser(Format format);
+  void ProcessDebugDump(const std::string& in_filename,
+                        const std::string& out_filename,
+                        Format format,
+                        int max_size_bytes);
+  void VerifyDebugDumpTest(Format format);
+
+  const std::string output_path_;
+  const std::string ref_filename_;
+  std::unique_ptr<AudioProcessing> apm_;
+  AudioFrame* frame_;
+  AudioFrame* revframe_;
+  std::unique_ptr<ChannelBuffer<float> > float_cb_;
+  std::unique_ptr<ChannelBuffer<float> > revfloat_cb_;
+  int output_sample_rate_hz_;
+  size_t num_output_channels_;
+  FILE* far_file_;
+  FILE* near_file_;
+  FILE* out_file_;
+};
+
+ApmTest::ApmTest()
+    : output_path_(test::OutputPath()),
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+      ref_filename_(test::ResourcePath("audio_processing/output_data_fixed",
+                                       "pb")),
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+#if defined(WEBRTC_MAC)
+      // A different file for Mac is needed because on this platform the AEC
+      // constant |kFixedDelayMs| value is 20 and not 50 as it is on the rest.
+      ref_filename_(test::ResourcePath("audio_processing/output_data_mac",
+                                       "pb")),
+#else
+      ref_filename_(test::ResourcePath("audio_processing/output_data_float",
+                                       "pb")),
+#endif
+#endif
+      frame_(NULL),
+      revframe_(NULL),
+      output_sample_rate_hz_(0),
+      num_output_channels_(0),
+      far_file_(NULL),
+      near_file_(NULL),
+      out_file_(NULL) {
+  Config config;
+  config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+  apm_.reset(AudioProcessingBuilder().Create(config));
+}
+
+void ApmTest::SetUp() {
+  ASSERT_TRUE(apm_.get() != NULL);
+
+  frame_ = new AudioFrame();
+  revframe_ = new AudioFrame();
+
+  Init(32000, 32000, 32000, 2, 2, 2, false);
+}
+
+void ApmTest::TearDown() {
+  if (frame_) {
+    delete frame_;
+  }
+  frame_ = NULL;
+
+  if (revframe_) {
+    delete revframe_;
+  }
+  revframe_ = NULL;
+
+  if (far_file_) {
+    ASSERT_EQ(0, fclose(far_file_));
+  }
+  far_file_ = NULL;
+
+  if (near_file_) {
+    ASSERT_EQ(0, fclose(near_file_));
+  }
+  near_file_ = NULL;
+
+  if (out_file_) {
+    ASSERT_EQ(0, fclose(out_file_));
+  }
+  out_file_ = NULL;
+}
+
+void ApmTest::Init(AudioProcessing* ap) {
+  ASSERT_EQ(kNoErr,
+            ap->Initialize(
+                {{{frame_->sample_rate_hz_, frame_->num_channels_},
+                  {output_sample_rate_hz_, num_output_channels_},
+                  {revframe_->sample_rate_hz_, revframe_->num_channels_},
+                  {revframe_->sample_rate_hz_, revframe_->num_channels_}}}));
+}
+
+void ApmTest::Init(int sample_rate_hz,
+                   int output_sample_rate_hz,
+                   int reverse_sample_rate_hz,
+                   size_t num_input_channels,
+                   size_t num_output_channels,
+                   size_t num_reverse_channels,
+                   bool open_output_file) {
+  SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
+  output_sample_rate_hz_ = output_sample_rate_hz;
+  num_output_channels_ = num_output_channels;
+
+  SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_,
+                     &revfloat_cb_);
+  Init(apm_.get());
+
+  if (far_file_) {
+    ASSERT_EQ(0, fclose(far_file_));
+  }
+  std::string filename = ResourceFilePath("far", sample_rate_hz);
+  far_file_ = fopen(filename.c_str(), "rb");
+  ASSERT_TRUE(far_file_ != NULL) << "Could not open file " <<
+      filename << "\n";
+
+  if (near_file_) {
+    ASSERT_EQ(0, fclose(near_file_));
+  }
+  filename = ResourceFilePath("near", sample_rate_hz);
+  near_file_ = fopen(filename.c_str(), "rb");
+  ASSERT_TRUE(near_file_ != NULL) << "Could not open file " <<
+        filename << "\n";
+
+  if (open_output_file) {
+    if (out_file_) {
+      ASSERT_EQ(0, fclose(out_file_));
+    }
+    filename = OutputFilePath(
+        "out", sample_rate_hz, output_sample_rate_hz, reverse_sample_rate_hz,
+        reverse_sample_rate_hz, num_input_channels, num_output_channels,
+        num_reverse_channels, num_reverse_channels, kForward);
+    out_file_ = fopen(filename.c_str(), "wb");
+    ASSERT_TRUE(out_file_ != NULL) << "Could not open file " <<
+          filename << "\n";
+  }
+}
+
+void ApmTest::EnableAllComponents() {
+  EnableAllAPComponents(apm_.get());
+}
+
+bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame,
+                        ChannelBuffer<float>* cb) {
+  // The files always contain stereo audio.
+  size_t frame_size = frame->samples_per_channel_ * 2;
+  size_t read_count = fread(frame->mutable_data(),
+                            sizeof(int16_t),
+                            frame_size,
+                            file);
+  if (read_count != frame_size) {
+    // Check that the file really ended.
+    EXPECT_NE(0, feof(file));
+    return false;  // This is expected.
+  }
+
+  if (frame->num_channels_ == 1) {
+    MixStereoToMono(frame->data(), frame->mutable_data(),
+                    frame->samples_per_channel_);
+  }
+
+  if (cb) {
+    ConvertToFloat(*frame, cb);
+  }
+  return true;
+}
+
+bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
+  return ReadFrame(file, frame, NULL);
+}
+
+// If the end of the file has been reached, rewind it and attempt to read the
+// frame again.
+void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame,
+                                  ChannelBuffer<float>* cb) {
+  if (!ReadFrame(near_file_, frame_, cb)) {
+    rewind(near_file_);
+    ASSERT_TRUE(ReadFrame(near_file_, frame_, cb));
+  }
+}
+
+void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame) {
+  ReadFrameWithRewind(file, frame, NULL);
+}
+
+void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
+}
+
+int ApmTest::ProcessStreamChooser(Format format) {
+  if (format == kIntFormat) {
+    return apm_->ProcessStream(frame_);
+  }
+  return apm_->ProcessStream(float_cb_->channels(),
+                             frame_->samples_per_channel_,
+                             frame_->sample_rate_hz_,
+                             LayoutFromChannels(frame_->num_channels_),
+                             output_sample_rate_hz_,
+                             LayoutFromChannels(num_output_channels_),
+                             float_cb_->channels());
+}
+
+int ApmTest::AnalyzeReverseStreamChooser(Format format) {
+  if (format == kIntFormat) {
+    return apm_->ProcessReverseStream(revframe_);
+  }
+  return apm_->AnalyzeReverseStream(
+      revfloat_cb_->channels(),
+      revframe_->samples_per_channel_,
+      revframe_->sample_rate_hz_,
+      LayoutFromChannels(revframe_->num_channels_));
+}
+
+void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
+                                           int delay_min, int delay_max) {
+  // The |revframe_| and |frame_| should include the proper frame information,
+  // hence can be used for extracting information.
+  AudioFrame tmp_frame;
+  std::queue<AudioFrame*> frame_queue;
+  bool causal = true;
+
+  tmp_frame.CopyFrom(*revframe_);
+  SetFrameTo(&tmp_frame, 0);
+
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  // Initialize the |frame_queue| with empty frames.
+  int frame_delay = delay_ms / 10;
+  while (frame_delay < 0) {
+    AudioFrame* frame = new AudioFrame();
+    frame->CopyFrom(tmp_frame);
+    frame_queue.push(frame);
+    frame_delay++;
+    causal = false;
+  }
+  while (frame_delay > 0) {
+    AudioFrame* frame = new AudioFrame();
+    frame->CopyFrom(tmp_frame);
+    frame_queue.push(frame);
+    frame_delay--;
+  }
+  // Run for 4.5 seconds, skipping statistics from the first 2.5 seconds.  We
+  // need enough frames with audio to have reliable estimates, but as few as
+  // possible to keep processing time down.  4.5 seconds seemed to be a good
+  // compromise for this recording.
+  for (int frame_count = 0; frame_count < 450; ++frame_count) {
+    AudioFrame* frame = new AudioFrame();
+    frame->CopyFrom(tmp_frame);
+    // Use the near end recording, since that has more speech in it.
+    ASSERT_TRUE(ReadFrame(near_file_, frame));
+    frame_queue.push(frame);
+    AudioFrame* reverse_frame = frame;
+    AudioFrame* process_frame = frame_queue.front();
+    if (!causal) {
+      reverse_frame = frame_queue.front();
+      // When we call ProcessStream() the frame is modified, so we can't use the
+      // pointer directly when things are non-causal. Use an intermediate frame
+      // and copy the data.
+      process_frame = &tmp_frame;
+      process_frame->CopyFrom(*frame);
+    }
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(reverse_frame));
+    EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(system_delay_ms));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(process_frame));
+    frame = frame_queue.front();
+    frame_queue.pop();
+    delete frame;
+
+    if (frame_count == 250) {
+      int median;
+      int std;
+      float poor_fraction;
+      // Discard the first delay metrics to avoid convergence effects.
+      EXPECT_EQ(apm_->kNoError,
+                apm_->echo_cancellation()->GetDelayMetrics(&median, &std,
+                                                           &poor_fraction));
+    }
+  }
+
+  rewind(near_file_);
+  while (!frame_queue.empty()) {
+    AudioFrame* frame = frame_queue.front();
+    frame_queue.pop();
+    delete frame;
+  }
+  // Calculate expected delay estimate and acceptable regions. Further,
+  // limit them w.r.t. AEC delay estimation support.
+  const size_t samples_per_ms =
+      rtc::SafeMin<size_t>(16u, frame_->samples_per_channel_ / 10);
+  const int expected_median =
+      rtc::SafeClamp<int>(delay_ms - system_delay_ms, delay_min, delay_max);
+  const int expected_median_high = rtc::SafeClamp<int>(
+      expected_median + rtc::dchecked_cast<int>(96 / samples_per_ms), delay_min,
+      delay_max);
+  const int expected_median_low = rtc::SafeClamp<int>(
+      expected_median - rtc::dchecked_cast<int>(96 / samples_per_ms), delay_min,
+      delay_max);
+  // Verify delay metrics.
+  int median;
+  int std;
+  float poor_fraction;
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->GetDelayMetrics(&median, &std,
+                                                       &poor_fraction));
+  EXPECT_GE(expected_median_high, median);
+  EXPECT_LE(expected_median_low, median);
+}
+
+void ApmTest::StreamParametersTest(Format format) {
+  // No errors when the components are disabled.
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+
+  // -- Missing AGC level --
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // Resets after successful ProcessStream().
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // Other stream parameters set correctly.
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(false));
+
+  // -- Missing delay --
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // Resets after successful ProcessStream().
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // Other stream parameters set correctly.
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+
+  // -- Missing drift --
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // Resets after successful ProcessStream().
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // Other stream parameters set correctly.
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // -- No stream parameters --
+  EXPECT_EQ(apm_->kNoError,
+            AnalyzeReverseStreamChooser(format));
+  EXPECT_EQ(apm_->kStreamParameterNotSetError,
+            ProcessStreamChooser(format));
+
+  // -- All there --
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_stream_analog_level(127));
+  EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
+}
+
+TEST_F(ApmTest, StreamParametersInt) {
+  StreamParametersTest(kIntFormat);
+}
+
+TEST_F(ApmTest, StreamParametersFloat) {
+  StreamParametersTest(kFloatFormat);
+}
+
+TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
+  EXPECT_EQ(0, apm_->delay_offset_ms());
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(50));
+  EXPECT_EQ(50, apm_->stream_delay_ms());
+}
+
+TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
+  // High limit of 500 ms.
+  apm_->set_delay_offset_ms(100);
+  EXPECT_EQ(100, apm_->delay_offset_ms());
+  EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(450));
+  EXPECT_EQ(500, apm_->stream_delay_ms());
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(200, apm_->stream_delay_ms());
+
+  // Low limit of 0 ms.
+  apm_->set_delay_offset_ms(-50);
+  EXPECT_EQ(-50, apm_->delay_offset_ms());
+  EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20));
+  EXPECT_EQ(0, apm_->stream_delay_ms());
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+  EXPECT_EQ(50, apm_->stream_delay_ms());
+}
+
+void ApmTest::TestChangingChannelsInt16Interface(
+    size_t num_channels,
+    AudioProcessing::Error expected_return) {
+  frame_->num_channels_ = num_channels;
+  EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
+  EXPECT_EQ(expected_return, apm_->ProcessReverseStream(frame_));
+}
+
+void ApmTest::TestChangingForwardChannels(
+    size_t num_in_channels,
+    size_t num_out_channels,
+    AudioProcessing::Error expected_return) {
+  const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels};
+  const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
+
+  EXPECT_EQ(expected_return,
+            apm_->ProcessStream(float_cb_->channels(), input_stream,
+                                output_stream, float_cb_->channels()));
+}
+
+void ApmTest::TestChangingReverseChannels(
+    size_t num_rev_channels,
+    AudioProcessing::Error expected_return) {
+  const ProcessingConfig processing_config = {
+      {{frame_->sample_rate_hz_, apm_->num_input_channels()},
+       {output_sample_rate_hz_, apm_->num_output_channels()},
+       {frame_->sample_rate_hz_, num_rev_channels},
+       {frame_->sample_rate_hz_, num_rev_channels}}};
+
+  EXPECT_EQ(
+      expected_return,
+      apm_->ProcessReverseStream(
+          float_cb_->channels(), processing_config.reverse_input_stream(),
+          processing_config.reverse_output_stream(), float_cb_->channels()));
+}
+
+TEST_F(ApmTest, ChannelsInt16Interface) {
+  // Testing number of invalid and valid channels.
+  Init(16000, 16000, 16000, 4, 4, 4, false);
+
+  TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
+
+  for (size_t i = 1; i < 4; i++) {
+    TestChangingChannelsInt16Interface(i, kNoErr);
+    EXPECT_EQ(i, apm_->num_input_channels());
+  }
+}
+
+TEST_F(ApmTest, Channels) {
+  // Testing number of invalid and valid channels.
+  Init(16000, 16000, 16000, 4, 4, 4, false);
+
+  TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
+  TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
+
+  for (size_t i = 1; i < 4; ++i) {
+    for (size_t j = 0; j < 1; ++j) {
+      // Output channels much be one or match input channels.
+      if (j == 1 || i == j) {
+        TestChangingForwardChannels(i, j, kNoErr);
+        TestChangingReverseChannels(i, kNoErr);
+
+        EXPECT_EQ(i, apm_->num_input_channels());
+        EXPECT_EQ(j, apm_->num_output_channels());
+        // The number of reverse channels used for processing to is always 1.
+        EXPECT_EQ(1u, apm_->num_reverse_channels());
+      } else {
+        TestChangingForwardChannels(i, j,
+                                    AudioProcessing::kBadNumberChannelsError);
+      }
+    }
+  }
+}
+
+TEST_F(ApmTest, SampleRatesInt) {
+  // Testing invalid sample rates
+  SetContainerFormat(10000, 2, frame_, &float_cb_);
+  EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat));
+  // Testing valid sample rates
+  int fs[] = {8000, 16000, 32000, 48000};
+  for (size_t i = 0; i < arraysize(fs); i++) {
+    SetContainerFormat(fs[i], 2, frame_, &float_cb_);
+    EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
+  }
+}
+
+TEST_F(ApmTest, EchoCancellation) {
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+
+  EchoCancellation::SuppressionLevel level[] = {
+    EchoCancellation::kLowSuppression,
+    EchoCancellation::kModerateSuppression,
+    EchoCancellation::kHighSuppression,
+  };
+  for (size_t i = 0; i < arraysize(level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->echo_cancellation()->set_suppression_level(level[i]));
+    EXPECT_EQ(level[i],
+        apm_->echo_cancellation()->suppression_level());
+  }
+
+  EchoCancellation::Metrics metrics;
+  EXPECT_EQ(apm_->kNotEnabledError,
+            apm_->echo_cancellation()->GetMetrics(&metrics));
+
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_metrics(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_metrics(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
+
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_delay_logging(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_delay_logging_enabled());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_delay_logging(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_delay_logging_enabled());
+
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+
+  int median = 0;
+  int std = 0;
+  float poor_fraction = 0;
+  EXPECT_EQ(apm_->kNotEnabledError, apm_->echo_cancellation()->GetDelayMetrics(
+                                        &median, &std, &poor_fraction));
+
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
+  EXPECT_TRUE(apm_->echo_cancellation()->aec_core() != NULL);
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+  EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+  EXPECT_FALSE(apm_->echo_cancellation()->aec_core() != NULL);
+}
+
+TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
+  // TODO(bjornv): Fix this test to work with DA-AEC.
+  // Enable AEC only.
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_drift_compensation(false));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_metrics(false));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_cancellation()->enable_delay_logging(true));
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  Config config;
+  config.Set<DelayAgnostic>(new DelayAgnostic(false));
+  apm_->SetExtraOptions(config);
+
+  // Internally in the AEC the amount of lookahead the delay estimation can
+  // handle is 15 blocks and the maximum delay is set to 60 blocks.
+  const int kLookaheadBlocks = 15;
+  const int kMaxDelayBlocks = 60;
+  // The AEC has a startup time before it actually starts to process. This
+  // procedure can flush the internal far-end buffer, which of course affects
+  // the delay estimation. Therefore, we set a system_delay high enough to
+  // avoid that. The smallest system_delay you can report without flushing the
+  // buffer is 66 ms in 8 kHz.
+  //
+  // It is known that for 16 kHz (and 32 kHz) sampling frequency there is an
+  // additional stuffing of 8 ms on the fly, but it seems to have no impact on
+  // delay estimation. This should be noted though. In case of test failure,
+  // this could be the cause.
+  const int kSystemDelayMs = 66;
+  // Test a couple of corner cases and verify that the estimated delay is
+  // within a valid region (set to +-1.5 blocks). Note that these cases are
+  // sampling frequency dependent.
+  for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
+    Init(kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         2,
+         2,
+         2,
+         false);
+    // Sampling frequency dependent variables.
+    const int num_ms_per_block =
+        std::max(4, static_cast<int>(640 / frame_->samples_per_channel_));
+    const int delay_min_ms = -kLookaheadBlocks * num_ms_per_block;
+    const int delay_max_ms = (kMaxDelayBlocks - 1) * num_ms_per_block;
+
+    // 1) Verify correct delay estimate at lookahead boundary.
+    int delay_ms = TruncateToMultipleOf10(kSystemDelayMs + delay_min_ms);
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+    // 2) A delay less than maximum lookahead should give an delay estimate at
+    //    the boundary (= -kLookaheadBlocks * num_ms_per_block).
+    delay_ms -= 20;
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+    // 3) Three values around zero delay. Note that we need to compensate for
+    //    the fake system_delay.
+    delay_ms = TruncateToMultipleOf10(kSystemDelayMs - 10);
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+    delay_ms = TruncateToMultipleOf10(kSystemDelayMs);
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+    delay_ms = TruncateToMultipleOf10(kSystemDelayMs + 10);
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+    // 4) Verify correct delay estimate at maximum delay boundary.
+    delay_ms = TruncateToMultipleOf10(kSystemDelayMs + delay_max_ms);
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+    // 5) A delay above the maximum delay should give an estimate at the
+    //    boundary (= (kMaxDelayBlocks - 1) * num_ms_per_block).
+    delay_ms += 20;
+    ProcessDelayVerificationTest(delay_ms, kSystemDelayMs, delay_min_ms,
+                                 delay_max_ms);
+  }
+}
+
+TEST_F(ApmTest, EchoControlMobile) {
+  // Turn AECM on (and AEC off)
+  Init(16000, 16000, 16000, 2, 2, 2, false);
+  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+  EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
+
+  // Toggle routing modes
+  EchoControlMobile::RoutingMode mode[] = {
+      EchoControlMobile::kQuietEarpieceOrHeadset,
+      EchoControlMobile::kEarpiece,
+      EchoControlMobile::kLoudEarpiece,
+      EchoControlMobile::kSpeakerphone,
+      EchoControlMobile::kLoudSpeakerphone,
+  };
+  for (size_t i = 0; i < arraysize(mode); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->echo_control_mobile()->set_routing_mode(mode[i]));
+    EXPECT_EQ(mode[i],
+        apm_->echo_control_mobile()->routing_mode());
+  }
+  // Turn comfort noise off/on
+  EXPECT_EQ(apm_->kNoError,
+      apm_->echo_control_mobile()->enable_comfort_noise(false));
+  EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+  EXPECT_EQ(apm_->kNoError,
+      apm_->echo_control_mobile()->enable_comfort_noise(true));
+  EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+  // Set and get echo path
+  const size_t echo_path_size =
+      apm_->echo_control_mobile()->echo_path_size_bytes();
+  std::unique_ptr<char[]> echo_path_in(new char[echo_path_size]);
+  std::unique_ptr<char[]> echo_path_out(new char[echo_path_size]);
+  EXPECT_EQ(apm_->kNullPointerError,
+            apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
+  EXPECT_EQ(apm_->kNullPointerError,
+            apm_->echo_control_mobile()->GetEchoPath(NULL, echo_path_size));
+  EXPECT_EQ(apm_->kBadParameterError,
+            apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(), 1));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
+                                                     echo_path_size));
+  for (size_t i = 0; i < echo_path_size; i++) {
+    echo_path_in[i] = echo_path_out[i] + 1;
+  }
+  EXPECT_EQ(apm_->kBadParameterError,
+            apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(), 1));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(),
+                                                     echo_path_size));
+  EXPECT_EQ(apm_->kNoError,
+            apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
+                                                     echo_path_size));
+  for (size_t i = 0; i < echo_path_size; i++) {
+    EXPECT_EQ(echo_path_in[i], echo_path_out[i]);
+  }
+
+  // Process a few frames with NS in the default disabled state. This exercises
+  // a different codepath than with it enabled.
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
+  // Turn AECM off
+  EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+  EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
+}
+
+TEST_F(ApmTest, GainControl) {
+  // Testing gain modes
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_mode(
+      apm_->gain_control()->mode()));
+
+  GainControl::Mode mode[] = {
+    GainControl::kAdaptiveAnalog,
+    GainControl::kAdaptiveDigital,
+    GainControl::kFixedDigital
+  };
+  for (size_t i = 0; i < arraysize(mode); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_mode(mode[i]));
+    EXPECT_EQ(mode[i], apm_->gain_control()->mode());
+  }
+  // Testing invalid target levels
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_target_level_dbfs(-3));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_target_level_dbfs(-40));
+  // Testing valid target levels
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_target_level_dbfs(
+      apm_->gain_control()->target_level_dbfs()));
+
+  int level_dbfs[] = {0, 6, 31};
+  for (size_t i = 0; i < arraysize(level_dbfs); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
+    EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
+  }
+
+  // Testing invalid compression gains
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_compression_gain_db(-1));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_compression_gain_db(100));
+
+  // Testing valid compression gains
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_compression_gain_db(
+      apm_->gain_control()->compression_gain_db()));
+
+  int gain_db[] = {0, 10, 90};
+  for (size_t i = 0; i < arraysize(gain_db); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_compression_gain_db(gain_db[i]));
+    EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
+  }
+
+  // Testing limiter off/on
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
+  EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
+  EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
+
+  // Testing invalid level limits
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(-1, 512));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(100000, 512));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(512, -1));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(512, 100000));
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->gain_control()->set_analog_level_limits(512, 255));
+
+  // Testing valid level limits
+  EXPECT_EQ(apm_->kNoError,
+      apm_->gain_control()->set_analog_level_limits(
+      apm_->gain_control()->analog_level_minimum(),
+      apm_->gain_control()->analog_level_maximum()));
+
+  int min_level[] = {0, 255, 1024};
+  for (size_t i = 0; i < arraysize(min_level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
+    EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
+  }
+
+  int max_level[] = {0, 1024, 65535};
+  for (size_t i = 0; i < arraysize(min_level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
+    EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
+  }
+
+  // TODO(ajm): stream_is_saturated() and stream_analog_level()
+
+  // Turn AGC off
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+  EXPECT_FALSE(apm_->gain_control()->is_enabled());
+}
+
+void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
+  Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+  int out_analog_level = 0;
+  for (int i = 0; i < 2000; ++i) {
+    ReadFrameWithRewind(near_file_, frame_);
+    // Ensure the audio is at a low level, so the AGC will try to increase it.
+    ScaleFrame(frame_, 0.25);
+
+    // Always pass in the same volume.
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_stream_analog_level(100));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    out_analog_level = apm_->gain_control()->stream_analog_level();
+  }
+
+  // Ensure the AGC is still able to reach the maximum.
+  EXPECT_EQ(255, out_analog_level);
+}
+
+// Verifies that despite volume slider quantization, the AGC can continue to
+// increase its volume.
+TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
+  for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
+    RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]);
+  }
+}
+
+void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
+  Init(sample_rate, sample_rate, sample_rate, 2, 2, 2, false);
+  EXPECT_EQ(apm_->kNoError,
+            apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+  EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+  int out_analog_level = 100;
+  for (int i = 0; i < 1000; ++i) {
+    ReadFrameWithRewind(near_file_, frame_);
+    // Ensure the audio is at a low level, so the AGC will try to increase it.
+    ScaleFrame(frame_, 0.25);
+
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_stream_analog_level(out_analog_level));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    out_analog_level = apm_->gain_control()->stream_analog_level();
+  }
+
+  // Ensure the volume was raised.
+  EXPECT_GT(out_analog_level, 100);
+  int highest_level_reached = out_analog_level;
+  // Simulate a user manual volume change.
+  out_analog_level = 100;
+
+  for (int i = 0; i < 300; ++i) {
+    ReadFrameWithRewind(near_file_, frame_);
+    ScaleFrame(frame_, 0.25);
+
+    EXPECT_EQ(apm_->kNoError,
+        apm_->gain_control()->set_stream_analog_level(out_analog_level));
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    out_analog_level = apm_->gain_control()->stream_analog_level();
+    // Check that AGC respected the manually adjusted volume.
+    EXPECT_LT(out_analog_level, highest_level_reached);
+  }
+  // Check that the volume was still raised.
+  EXPECT_GT(out_analog_level, 100);
+}
+
+TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
+  for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
+    RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
+  }
+}
+
+#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
+TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
+  const int kSampleRateHz = 16000;
+  const size_t kSamplesPerChannel =
+      static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
+  const size_t kNumInputChannels = 2;
+  const size_t kNumOutputChannels = 1;
+  const size_t kNumChunks = 700;
+  const float kScaleFactor = 0.25f;
+  Config config;
+  std::vector<webrtc::Point> geometry;
+  geometry.push_back(webrtc::Point(0.f, 0.f, 0.f));
+  geometry.push_back(webrtc::Point(0.05f, 0.f, 0.f));
+  config.Set<Beamforming>(new Beamforming(true, geometry));
+  testing::NiceMock<MockNonlinearBeamformer>* beamformer =
+      new testing::NiceMock<MockNonlinearBeamformer>(geometry, 1u);
+  std::unique_ptr<AudioProcessing> apm(
+      AudioProcessingBuilder()
+          .SetNonlinearBeamformer(
+              std::unique_ptr<webrtc::NonlinearBeamformer>(beamformer))
+          .Create(config));
+  EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
+  ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels);
+  ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels);
+  const size_t max_length = kSamplesPerChannel * std::max(kNumInputChannels,
+                                                          kNumOutputChannels);
+  std::unique_ptr<int16_t[]> int_data(new int16_t[max_length]);
+  std::unique_ptr<float[]> float_data(new float[max_length]);
+  std::string filename = ResourceFilePath("far", kSampleRateHz);
+  FILE* far_file = fopen(filename.c_str(), "rb");
+  ASSERT_TRUE(far_file != NULL) << "Could not open file " << filename << "\n";
+  const int kDefaultVolume = apm->gain_control()->stream_analog_level();
+  const int kDefaultCompressionGain =
+      apm->gain_control()->compression_gain_db();
+  bool is_target = false;
+  EXPECT_CALL(*beamformer, is_target_present())
+      .WillRepeatedly(testing::ReturnPointee(&is_target));
+  for (size_t i = 0; i < kNumChunks; ++i) {
+    ASSERT_TRUE(ReadChunk(far_file,
+                          int_data.get(),
+                          float_data.get(),
+                          &src_buf));
+    for (size_t j = 0; j < kNumInputChannels; ++j) {
+      for (size_t k = 0; k < kSamplesPerChannel; ++k) {
+        src_buf.channels()[j][k] *= kScaleFactor;
+      }
+    }
+    EXPECT_EQ(kNoErr,
+              apm->ProcessStream(src_buf.channels(),
+                                 src_buf.num_frames(),
+                                 kSampleRateHz,
+                                 LayoutFromChannels(src_buf.num_channels()),
+                                 kSampleRateHz,
+                                 LayoutFromChannels(dest_buf.num_channels()),
+                                 dest_buf.channels()));
+  }
+  EXPECT_EQ(kDefaultVolume,
+            apm->gain_control()->stream_analog_level());
+  EXPECT_EQ(kDefaultCompressionGain,
+            apm->gain_control()->compression_gain_db());
+  rewind(far_file);
+  is_target = true;
+  for (size_t i = 0; i < kNumChunks; ++i) {
+    ASSERT_TRUE(ReadChunk(far_file,
+                          int_data.get(),
+                          float_data.get(),
+                          &src_buf));
+    for (size_t j = 0; j < kNumInputChannels; ++j) {
+      for (size_t k = 0; k < kSamplesPerChannel; ++k) {
+        src_buf.channels()[j][k] *= kScaleFactor;
+      }
+    }
+    EXPECT_EQ(kNoErr,
+              apm->ProcessStream(src_buf.channels(),
+                                 src_buf.num_frames(),
+                                 kSampleRateHz,
+                                 LayoutFromChannels(src_buf.num_channels()),
+                                 kSampleRateHz,
+                                 LayoutFromChannels(dest_buf.num_channels()),
+                                 dest_buf.channels()));
+  }
+  EXPECT_LT(kDefaultVolume,
+            apm->gain_control()->stream_analog_level());
+  EXPECT_LT(kDefaultCompressionGain,
+            apm->gain_control()->compression_gain_db());
+  ASSERT_EQ(0, fclose(far_file));
+}
+#endif
+
+TEST_F(ApmTest, NoiseSuppression) {
+  // Test valid suppression levels.
+  NoiseSuppression::Level level[] = {
+    NoiseSuppression::kLow,
+    NoiseSuppression::kModerate,
+    NoiseSuppression::kHigh,
+    NoiseSuppression::kVeryHigh
+  };
+  for (size_t i = 0; i < arraysize(level); i++) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->noise_suppression()->set_level(level[i]));
+    EXPECT_EQ(level[i], apm_->noise_suppression()->level());
+  }
+
+  // Turn NS on/off
+  EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
+  EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
+  EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
+}
+
+TEST_F(ApmTest, HighPassFilter) {
+  // Turn HP filter on/off
+  AudioProcessing::Config apm_config;
+  apm_config.high_pass_filter.enabled = true;
+  apm_->ApplyConfig(apm_config);
+  apm_config.high_pass_filter.enabled = false;
+  apm_->ApplyConfig(apm_config);
+}
+
+TEST_F(ApmTest, LevelEstimator) {
+  // Turn level estimator on/off
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+  EXPECT_FALSE(apm_->level_estimator()->is_enabled());
+
+  EXPECT_EQ(apm_->kNotEnabledError, apm_->level_estimator()->RMS());
+
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  EXPECT_TRUE(apm_->level_estimator()->is_enabled());
+
+  // Run this test in wideband; in super-wb, the splitting filter distorts the
+  // audio enough to cause deviation from the expectation for small values.
+  frame_->samples_per_channel_ = 160;
+  frame_->num_channels_ = 2;
+  frame_->sample_rate_hz_ = 16000;
+
+  // Min value if no frames have been processed.
+  EXPECT_EQ(127, apm_->level_estimator()->RMS());
+
+  // Min value on zero frames.
+  SetFrameTo(frame_, 0);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(127, apm_->level_estimator()->RMS());
+
+  // Try a few RMS values.
+  // (These also test that the value resets after retrieving it.)
+  SetFrameTo(frame_, 32767);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(0, apm_->level_estimator()->RMS());
+
+  SetFrameTo(frame_, 30000);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(1, apm_->level_estimator()->RMS());
+
+  SetFrameTo(frame_, 10000);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(10, apm_->level_estimator()->RMS());
+
+  SetFrameTo(frame_, 10);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(70, apm_->level_estimator()->RMS());
+
+  // Verify reset after enable/disable.
+  SetFrameTo(frame_, 32767);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  SetFrameTo(frame_, 1);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(90, apm_->level_estimator()->RMS());
+
+  // Verify reset after initialize.
+  SetFrameTo(frame_, 32767);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+  SetFrameTo(frame_, 1);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(90, apm_->level_estimator()->RMS());
+}
+
+TEST_F(ApmTest, VoiceDetection) {
+  // Test external VAD
+  EXPECT_EQ(apm_->kNoError,
+            apm_->voice_detection()->set_stream_has_voice(true));
+  EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
+  EXPECT_EQ(apm_->kNoError,
+            apm_->voice_detection()->set_stream_has_voice(false));
+  EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
+
+  // Test valid likelihoods
+  VoiceDetection::Likelihood likelihood[] = {
+      VoiceDetection::kVeryLowLikelihood,
+      VoiceDetection::kLowLikelihood,
+      VoiceDetection::kModerateLikelihood,
+      VoiceDetection::kHighLikelihood
+  };
+  for (size_t i = 0; i < arraysize(likelihood); i++) {
+    EXPECT_EQ(apm_->kNoError,
+              apm_->voice_detection()->set_likelihood(likelihood[i]));
+    EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
+  }
+
+  /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
+  // Test invalid frame sizes
+  EXPECT_EQ(apm_->kBadParameterError,
+      apm_->voice_detection()->set_frame_size_ms(12));
+
+  // Test valid frame sizes
+  for (int i = 10; i <= 30; i += 10) {
+    EXPECT_EQ(apm_->kNoError,
+        apm_->voice_detection()->set_frame_size_ms(i));
+    EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
+  }
+  */
+
+  // Turn VAD on/off
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  EXPECT_TRUE(apm_->voice_detection()->is_enabled());
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+  EXPECT_FALSE(apm_->voice_detection()->is_enabled());
+
+  // Test that AudioFrame activity is maintained when VAD is disabled.
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+  AudioFrame::VADActivity activity[] = {
+      AudioFrame::kVadActive,
+      AudioFrame::kVadPassive,
+      AudioFrame::kVadUnknown
+  };
+  for (size_t i = 0; i < arraysize(activity); i++) {
+    frame_->vad_activity_ = activity[i];
+    EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+    EXPECT_EQ(activity[i], frame_->vad_activity_);
+  }
+
+  // Test that AudioFrame activity is set when VAD is enabled.
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  frame_->vad_activity_ = AudioFrame::kVadUnknown;
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_NE(AudioFrame::kVadUnknown, frame_->vad_activity_);
+
+  // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
+}
+
+TEST_F(ApmTest, AllProcessingDisabledByDefault) {
+  EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+  EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
+  EXPECT_FALSE(apm_->gain_control()->is_enabled());
+  EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
+  EXPECT_FALSE(apm_->level_estimator()->is_enabled());
+  EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
+  EXPECT_FALSE(apm_->voice_detection()->is_enabled());
+}
+
+TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
+  for (size_t i = 0; i < arraysize(kSampleRates); i++) {
+    Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
+    SetFrameTo(frame_, 1000, 2000);
+    AudioFrame frame_copy;
+    frame_copy.CopyFrom(*frame_);
+    for (int j = 0; j < 1000; j++) {
+      EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+      EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+      EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(frame_));
+      EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+    }
+  }
+}
+
+TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
+  // Test that ProcessStream copies input to output even with no processing.
+  const size_t kSamples = 80;
+  const int sample_rate = 8000;
+  const float src[kSamples] = {
+    -1.0f, 0.0f, 1.0f
+  };
+  float dest[kSamples] = {};
+
+  auto src_channels = &src[0];
+  auto dest_channels = &dest[0];
+
+  apm_.reset(AudioProcessingBuilder().Create());
+  EXPECT_NOERR(apm_->ProcessStream(
+      &src_channels, kSamples, sample_rate, LayoutFromChannels(1),
+      sample_rate, LayoutFromChannels(1), &dest_channels));
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    EXPECT_EQ(src[i], dest[i]);
+  }
+
+  // Same for ProcessReverseStream.
+  float rev_dest[kSamples] = {};
+  auto rev_dest_channels = &rev_dest[0];
+
+  StreamConfig input_stream = {sample_rate, 1};
+  StreamConfig output_stream = {sample_rate, 1};
+  EXPECT_NOERR(apm_->ProcessReverseStream(&src_channels, input_stream,
+                                          output_stream, &rev_dest_channels));
+
+  for (size_t i = 0; i < kSamples; ++i) {
+    EXPECT_EQ(src[i], rev_dest[i]);
+  }
+}
+
+TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
+  EnableAllComponents();
+
+  for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
+    Init(kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         kProcessSampleRates[i],
+         2,
+         2,
+         2,
+         false);
+    int analog_level = 127;
+    ASSERT_EQ(0, feof(far_file_));
+    ASSERT_EQ(0, feof(near_file_));
+    while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
+      CopyLeftToRightChannel(revframe_->mutable_data(),
+                             revframe_->samples_per_channel_);
+
+      ASSERT_EQ(kNoErr, apm_->ProcessReverseStream(revframe_));
+
+      CopyLeftToRightChannel(frame_->mutable_data(),
+                             frame_->samples_per_channel_);
+      frame_->vad_activity_ = AudioFrame::kVadUnknown;
+
+      ASSERT_EQ(kNoErr, apm_->set_stream_delay_ms(0));
+      apm_->echo_cancellation()->set_stream_drift_samples(0);
+      ASSERT_EQ(kNoErr,
+          apm_->gain_control()->set_stream_analog_level(analog_level));
+      ASSERT_EQ(kNoErr, apm_->ProcessStream(frame_));
+      analog_level = apm_->gain_control()->stream_analog_level();
+
+      VerifyChannelsAreEqual(frame_->data(), frame_->samples_per_channel_);
+    }
+    rewind(far_file_);
+    rewind(near_file_);
+  }
+}
+
+TEST_F(ApmTest, SplittingFilter) {
+  // Verify the filter is not active through undistorted audio when:
+  // 1. No components are enabled...
+  SetFrameTo(frame_, 1000);
+  AudioFrame frame_copy;
+  frame_copy.CopyFrom(*frame_);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+
+  // 2. Only the level estimator is enabled...
+  SetFrameTo(frame_, 1000);
+  frame_copy.CopyFrom(*frame_);
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+
+  // 3. Only VAD is enabled...
+  SetFrameTo(frame_, 1000);
+  frame_copy.CopyFrom(*frame_);
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+
+  // 4. Both VAD and the level estimator are enabled...
+  SetFrameTo(frame_, 1000);
+  frame_copy.CopyFrom(*frame_);
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+  EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+  EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+
+  // 5. Not using super-wb.
+  frame_->samples_per_channel_ = 160;
+  frame_->num_channels_ = 2;
+  frame_->sample_rate_hz_ = 16000;
+  // Enable AEC, which would require the filter in super-wb. We rely on the
+  // first few frames of data being unaffected by the AEC.
+  // TODO(andrew): This test, and the one below, rely rather tenuously on the
+  // behavior of the AEC. Think of something more robust.
+  EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+  // Make sure we have extended filter enabled. This makes sure nothing is
+  // touched until we have a farend frame.
+  Config config;
+  config.Set<ExtendedFilter>(new ExtendedFilter(true));
+  apm_->SetExtraOptions(config);
+  SetFrameTo(frame_, 1000);
+  frame_copy.CopyFrom(*frame_);
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+
+  // Check the test is valid. We should have distortion from the filter
+  // when AEC is enabled (which won't affect the audio).
+  frame_->samples_per_channel_ = 320;
+  frame_->num_channels_ = 2;
+  frame_->sample_rate_hz_ = 32000;
+  SetFrameTo(frame_, 1000);
+  frame_copy.CopyFrom(*frame_);
+  EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+  apm_->echo_cancellation()->set_stream_drift_samples(0);
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
+}
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+void ApmTest::ProcessDebugDump(const std::string& in_filename,
+                               const std::string& out_filename,
+                               Format format,
+                               int max_size_bytes) {
+  rtc::TaskQueue worker_queue("ApmTest_worker_queue");
+  FILE* in_file = fopen(in_filename.c_str(), "rb");
+  ASSERT_TRUE(in_file != NULL);
+  audioproc::Event event_msg;
+  bool first_init = true;
+
+  while (ReadMessageFromFile(in_file, &event_msg)) {
+    if (event_msg.type() == audioproc::Event::INIT) {
+      const audioproc::Init msg = event_msg.init();
+      int reverse_sample_rate = msg.sample_rate();
+      if (msg.has_reverse_sample_rate()) {
+        reverse_sample_rate = msg.reverse_sample_rate();
+      }
+      int output_sample_rate = msg.sample_rate();
+      if (msg.has_output_sample_rate()) {
+        output_sample_rate = msg.output_sample_rate();
+      }
+
+      Init(msg.sample_rate(),
+           output_sample_rate,
+           reverse_sample_rate,
+           msg.num_input_channels(),
+           msg.num_output_channels(),
+           msg.num_reverse_channels(),
+           false);
+      if (first_init) {
+        // AttachAecDump() writes an additional init message. Don't start
+        // recording until after the first init to avoid the extra message.
+        auto aec_dump =
+            AecDumpFactory::Create(out_filename, max_size_bytes, &worker_queue);
+        EXPECT_TRUE(aec_dump);
+        apm_->AttachAecDump(std::move(aec_dump));
+        first_init = false;
+      }
+
+    } else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) {
+      const audioproc::ReverseStream msg = event_msg.reverse_stream();
+
+      if (msg.channel_size() > 0) {
+        ASSERT_EQ(revframe_->num_channels_,
+                  static_cast<size_t>(msg.channel_size()));
+        for (int i = 0; i < msg.channel_size(); ++i) {
+           memcpy(revfloat_cb_->channels()[i],
+                  msg.channel(i).data(),
+                  msg.channel(i).size());
+        }
+      } else {
+        memcpy(revframe_->mutable_data(), msg.data().data(), msg.data().size());
+        if (format == kFloatFormat) {
+          // We're using an int16 input file; convert to float.
+          ConvertToFloat(*revframe_, revfloat_cb_.get());
+        }
+      }
+      AnalyzeReverseStreamChooser(format);
+
+    } else if (event_msg.type() == audioproc::Event::STREAM) {
+      const audioproc::Stream msg = event_msg.stream();
+      // ProcessStream could have changed this for the output frame.
+      frame_->num_channels_ = apm_->num_input_channels();
+
+      EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level()));
+      EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
+      apm_->echo_cancellation()->set_stream_drift_samples(msg.drift());
+      if (msg.has_keypress()) {
+        apm_->set_stream_key_pressed(msg.keypress());
+      } else {
+        apm_->set_stream_key_pressed(true);
+      }
+
+      if (msg.input_channel_size() > 0) {
+        ASSERT_EQ(frame_->num_channels_,
+                  static_cast<size_t>(msg.input_channel_size()));
+        for (int i = 0; i < msg.input_channel_size(); ++i) {
+           memcpy(float_cb_->channels()[i],
+                  msg.input_channel(i).data(),
+                  msg.input_channel(i).size());
+        }
+      } else {
+        memcpy(frame_->mutable_data(), msg.input_data().data(),
+               msg.input_data().size());
+        if (format == kFloatFormat) {
+          // We're using an int16 input file; convert to float.
+          ConvertToFloat(*frame_, float_cb_.get());
+        }
+      }
+      ProcessStreamChooser(format);
+    }
+  }
+  apm_->DetachAecDump();
+  fclose(in_file);
+}
+
+void ApmTest::VerifyDebugDumpTest(Format format) {
+  const std::string in_filename = test::ResourcePath("ref03", "aecdump");
+  std::string format_string;
+  switch (format) {
+    case kIntFormat:
+      format_string = "_int";
+      break;
+    case kFloatFormat:
+      format_string = "_float";
+      break;
+  }
+  const std::string ref_filename = test::TempFilename(
+      test::OutputPath(), std::string("ref") + format_string + "_aecdump");
+  const std::string out_filename = test::TempFilename(
+      test::OutputPath(), std::string("out") + format_string + "_aecdump");
+  const std::string limited_filename = test::TempFilename(
+      test::OutputPath(), std::string("limited") + format_string + "_aecdump");
+  const size_t logging_limit_bytes = 100000;
+  // We expect at least this many bytes in the created logfile.
+  const size_t logging_expected_bytes = 95000;
+  EnableAllComponents();
+  ProcessDebugDump(in_filename, ref_filename, format, -1);
+  ProcessDebugDump(ref_filename, out_filename, format, -1);
+  ProcessDebugDump(ref_filename, limited_filename, format, logging_limit_bytes);
+
+  FILE* ref_file = fopen(ref_filename.c_str(), "rb");
+  FILE* out_file = fopen(out_filename.c_str(), "rb");
+  FILE* limited_file = fopen(limited_filename.c_str(), "rb");
+  ASSERT_TRUE(ref_file != NULL);
+  ASSERT_TRUE(out_file != NULL);
+  ASSERT_TRUE(limited_file != NULL);
+  std::unique_ptr<uint8_t[]> ref_bytes;
+  std::unique_ptr<uint8_t[]> out_bytes;
+  std::unique_ptr<uint8_t[]> limited_bytes;
+
+  size_t ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
+  size_t out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
+  size_t limited_size = ReadMessageBytesFromFile(limited_file, &limited_bytes);
+  size_t bytes_read = 0;
+  size_t bytes_read_limited = 0;
+  while (ref_size > 0 && out_size > 0) {
+    bytes_read += ref_size;
+    bytes_read_limited += limited_size;
+    EXPECT_EQ(ref_size, out_size);
+    EXPECT_GE(ref_size, limited_size);
+    EXPECT_EQ(0, memcmp(ref_bytes.get(), out_bytes.get(), ref_size));
+    EXPECT_EQ(0, memcmp(ref_bytes.get(), limited_bytes.get(), limited_size));
+    ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
+    out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
+    limited_size = ReadMessageBytesFromFile(limited_file, &limited_bytes);
+  }
+  EXPECT_GT(bytes_read, 0u);
+  EXPECT_GT(bytes_read_limited, logging_expected_bytes);
+  EXPECT_LE(bytes_read_limited, logging_limit_bytes);
+  EXPECT_NE(0, feof(ref_file));
+  EXPECT_NE(0, feof(out_file));
+  EXPECT_NE(0, feof(limited_file));
+  ASSERT_EQ(0, fclose(ref_file));
+  ASSERT_EQ(0, fclose(out_file));
+  ASSERT_EQ(0, fclose(limited_file));
+  remove(ref_filename.c_str());
+  remove(out_filename.c_str());
+  remove(limited_filename.c_str());
+}
+
+TEST_F(ApmTest, VerifyDebugDumpInt) {
+  VerifyDebugDumpTest(kIntFormat);
+}
+
+TEST_F(ApmTest, VerifyDebugDumpFloat) {
+  VerifyDebugDumpTest(kFloatFormat);
+}
+#endif
+
+// TODO(andrew): expand test to verify output.
+TEST_F(ApmTest, DebugDump) {
+  rtc::TaskQueue worker_queue("ApmTest_worker_queue");
+  const std::string filename =
+      test::TempFilename(test::OutputPath(), "debug_aec");
+  {
+    auto aec_dump = AecDumpFactory::Create("", -1, &worker_queue);
+    EXPECT_FALSE(aec_dump);
+  }
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  // Stopping without having started should be OK.
+  apm_->DetachAecDump();
+
+  auto aec_dump = AecDumpFactory::Create(filename, -1, &worker_queue);
+  EXPECT_TRUE(aec_dump);
+  apm_->AttachAecDump(std::move(aec_dump));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
+  apm_->DetachAecDump();
+
+  // Verify the file has been written.
+  FILE* fid = fopen(filename.c_str(), "r");
+  ASSERT_TRUE(fid != NULL);
+
+  // Clean it up.
+  ASSERT_EQ(0, fclose(fid));
+  ASSERT_EQ(0, remove(filename.c_str()));
+#else
+  // Verify the file has NOT been written.
+  ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
+}
+
+// TODO(andrew): expand test to verify output.
+TEST_F(ApmTest, DebugDumpFromFileHandle) {
+  rtc::TaskQueue worker_queue("ApmTest_worker_queue");
+
+  const std::string filename =
+      test::TempFilename(test::OutputPath(), "debug_aec");
+  FILE* fid = fopen(filename.c_str(), "w");
+  ASSERT_TRUE(fid);
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+  // Stopping without having started should be OK.
+  apm_->DetachAecDump();
+
+  auto aec_dump = AecDumpFactory::Create(fid, -1, &worker_queue);
+  EXPECT_TRUE(aec_dump);
+  apm_->AttachAecDump(std::move(aec_dump));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
+  EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+  apm_->DetachAecDump();
+
+  // Verify the file has been written.
+  fid = fopen(filename.c_str(), "r");
+  ASSERT_TRUE(fid != NULL);
+
+  // Clean it up.
+  ASSERT_EQ(0, fclose(fid));
+  ASSERT_EQ(0, remove(filename.c_str()));
+#else
+  ASSERT_EQ(0, fclose(fid));
+#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
+}
+
+TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
+  audioproc::OutputData ref_data;
+  OpenFileAndReadMessage(ref_filename_, &ref_data);
+
+  Config config;
+  config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+  std::unique_ptr<AudioProcessing> fapm(
+      AudioProcessingBuilder().Create(config));
+  EnableAllComponents();
+  EnableAllAPComponents(fapm.get());
+  for (int i = 0; i < ref_data.test_size(); i++) {
+    printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
+
+    audioproc::Test* test = ref_data.mutable_test(i);
+    // TODO(ajm): Restore downmixing test cases.
+    if (test->num_input_channels() != test->num_output_channels())
+      continue;
+
+    const size_t num_render_channels =
+        static_cast<size_t>(test->num_reverse_channels());
+    const size_t num_input_channels =
+        static_cast<size_t>(test->num_input_channels());
+    const size_t num_output_channels =
+        static_cast<size_t>(test->num_output_channels());
+    const size_t samples_per_channel = static_cast<size_t>(
+        test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
+
+    Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
+         num_input_channels, num_output_channels, num_render_channels, true);
+    Init(fapm.get());
+
+    ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
+    ChannelBuffer<int16_t> output_int16(samples_per_channel,
+                                        num_input_channels);
+
+    int analog_level = 127;
+    size_t num_bad_chunks = 0;
+    while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) &&
+           ReadFrame(near_file_, frame_, float_cb_.get())) {
+      frame_->vad_activity_ = AudioFrame::kVadUnknown;
+
+      EXPECT_NOERR(apm_->ProcessReverseStream(revframe_));
+      EXPECT_NOERR(fapm->AnalyzeReverseStream(
+          revfloat_cb_->channels(),
+          samples_per_channel,
+          test->sample_rate(),
+          LayoutFromChannels(num_render_channels)));
+
+      EXPECT_NOERR(apm_->set_stream_delay_ms(0));
+      EXPECT_NOERR(fapm->set_stream_delay_ms(0));
+      apm_->echo_cancellation()->set_stream_drift_samples(0);
+      fapm->echo_cancellation()->set_stream_drift_samples(0);
+      EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level));
+      EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level));
+
+      EXPECT_NOERR(apm_->ProcessStream(frame_));
+      Deinterleave(frame_->data(), samples_per_channel, num_output_channels,
+                   output_int16.channels());
+
+      EXPECT_NOERR(fapm->ProcessStream(
+          float_cb_->channels(),
+          samples_per_channel,
+          test->sample_rate(),
+          LayoutFromChannels(num_input_channels),
+          test->sample_rate(),
+          LayoutFromChannels(num_output_channels),
+          float_cb_->channels()));
+      for (size_t j = 0; j < num_output_channels; ++j) {
+        FloatToS16(float_cb_->channels()[j],
+                   samples_per_channel,
+                   output_cb.channels()[j]);
+        float variance = 0;
+        float snr = ComputeSNR(output_int16.channels()[j],
+                               output_cb.channels()[j],
+                               samples_per_channel, &variance);
+
+        const float kVarianceThreshold = 20;
+        const float kSNRThreshold = 20;
+
+        // Skip frames with low energy.
+        if (sqrt(variance) > kVarianceThreshold && snr < kSNRThreshold) {
+          ++num_bad_chunks;
+        }
+      }
+
+      analog_level = fapm->gain_control()->stream_analog_level();
+      EXPECT_EQ(apm_->gain_control()->stream_analog_level(),
+                fapm->gain_control()->stream_analog_level());
+      EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(),
+                fapm->echo_cancellation()->stream_has_echo());
+      EXPECT_NEAR(apm_->noise_suppression()->speech_probability(),
+                  fapm->noise_suppression()->speech_probability(),
+                  0.01);
+
+      // Reset in case of downmixing.
+      frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
+    }
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+    const size_t kMaxNumBadChunks = 0;
+#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+    // There are a few chunks in the fixed-point profile that give low SNR.
+    // Listening confirmed the difference is acceptable.
+    const size_t kMaxNumBadChunks = 60;
+#endif
+    EXPECT_LE(num_bad_chunks, kMaxNumBadChunks);
+
+    rewind(far_file_);
+    rewind(near_file_);
+  }
+}
+
+// TODO(andrew): Add a test to process a few frames with different combinations
+// of enabled components.
+
+TEST_F(ApmTest, Process) {
+  GOOGLE_PROTOBUF_VERIFY_VERSION;
+  audioproc::OutputData ref_data;
+
+  if (!write_ref_data) {
+    OpenFileAndReadMessage(ref_filename_, &ref_data);
+  } else {
+    // Write the desired tests to the protobuf reference file.
+    for (size_t i = 0; i < arraysize(kChannels); i++) {
+      for (size_t j = 0; j < arraysize(kChannels); j++) {
+        for (size_t l = 0; l < arraysize(kProcessSampleRates); l++) {
+          audioproc::Test* test = ref_data.add_test();
+          test->set_num_reverse_channels(kChannels[i]);
+          test->set_num_input_channels(kChannels[j]);
+          test->set_num_output_channels(kChannels[j]);
+          test->set_sample_rate(kProcessSampleRates[l]);
+          test->set_use_aec_extended_filter(false);
+        }
+      }
+    }
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+    // To test the extended filter mode.
+    audioproc::Test* test = ref_data.add_test();
+    test->set_num_reverse_channels(2);
+    test->set_num_input_channels(2);
+    test->set_num_output_channels(2);
+    test->set_sample_rate(AudioProcessing::kSampleRate32kHz);
+    test->set_use_aec_extended_filter(true);
+#endif
+  }
+
+  for (int i = 0; i < ref_data.test_size(); i++) {
+    printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
+
+    audioproc::Test* test = ref_data.mutable_test(i);
+    // TODO(ajm): We no longer allow different input and output channels. Skip
+    // these tests for now, but they should be removed from the set.
+    if (test->num_input_channels() != test->num_output_channels())
+      continue;
+
+    Config config;
+    config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+    config.Set<ExtendedFilter>(
+        new ExtendedFilter(test->use_aec_extended_filter()));
+    apm_.reset(AudioProcessingBuilder().Create(config));
+
+    EnableAllComponents();
+
+    Init(test->sample_rate(),
+         test->sample_rate(),
+         test->sample_rate(),
+         static_cast<size_t>(test->num_input_channels()),
+         static_cast<size_t>(test->num_output_channels()),
+         static_cast<size_t>(test->num_reverse_channels()),
+         true);
+
+    int frame_count = 0;
+    int has_echo_count = 0;
+    int has_voice_count = 0;
+    int is_saturated_count = 0;
+    int analog_level = 127;
+    int analog_level_average = 0;
+    int max_output_average = 0;
+    float ns_speech_prob_average = 0.0f;
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+  int stats_index = 0;
+#endif
+
+    while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
+      EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
+
+      frame_->vad_activity_ = AudioFrame::kVadUnknown;
+
+      EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+      apm_->echo_cancellation()->set_stream_drift_samples(0);
+      EXPECT_EQ(apm_->kNoError,
+          apm_->gain_control()->set_stream_analog_level(analog_level));
+
+      EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
+      // Ensure the frame was downmixed properly.
+      EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
+                frame_->num_channels_);
+
+      max_output_average += MaxAudioFrame(*frame_);
+
+      if (apm_->echo_cancellation()->stream_has_echo()) {
+        has_echo_count++;
+      }
+
+      analog_level = apm_->gain_control()->stream_analog_level();
+      analog_level_average += analog_level;
+      if (apm_->gain_control()->stream_is_saturated()) {
+        is_saturated_count++;
+      }
+      if (apm_->voice_detection()->stream_has_voice()) {
+        has_voice_count++;
+        EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
+      } else {
+        EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
+      }
+
+      ns_speech_prob_average += apm_->noise_suppression()->speech_probability();
+
+      size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_;
+      size_t write_count = fwrite(frame_->data(),
+                                  sizeof(int16_t),
+                                  frame_size,
+                                  out_file_);
+      ASSERT_EQ(frame_size, write_count);
+
+      // Reset in case of downmixing.
+      frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
+      frame_count++;
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+      const int kStatsAggregationFrameNum = 100;  // 1 second.
+      if (frame_count % kStatsAggregationFrameNum == 0) {
+        // Get echo metrics.
+        EchoCancellation::Metrics echo_metrics;
+        EXPECT_EQ(apm_->kNoError,
+                  apm_->echo_cancellation()->GetMetrics(&echo_metrics));
+
+        // Get delay metrics.
+        int median = 0;
+        int std = 0;
+        float fraction_poor_delays = 0;
+        EXPECT_EQ(apm_->kNoError,
+                  apm_->echo_cancellation()->GetDelayMetrics(
+                      &median, &std, &fraction_poor_delays));
+
+        // Get RMS.
+        int rms_level = apm_->level_estimator()->RMS();
+        EXPECT_LE(0, rms_level);
+        EXPECT_GE(127, rms_level);
+
+        if (!write_ref_data) {
+          const audioproc::Test::EchoMetrics& reference =
+              test->echo_metrics(stats_index);
+          TestStats(echo_metrics.residual_echo_return_loss,
+                    reference.residual_echo_return_loss());
+          TestStats(echo_metrics.echo_return_loss,
+                    reference.echo_return_loss());
+          TestStats(echo_metrics.echo_return_loss_enhancement,
+                    reference.echo_return_loss_enhancement());
+          TestStats(echo_metrics.a_nlp,
+                    reference.a_nlp());
+          EXPECT_EQ(echo_metrics.divergent_filter_fraction,
+                    reference.divergent_filter_fraction());
+
+          const audioproc::Test::DelayMetrics& reference_delay =
+              test->delay_metrics(stats_index);
+          EXPECT_EQ(reference_delay.median(), median);
+          EXPECT_EQ(reference_delay.std(), std);
+          EXPECT_EQ(reference_delay.fraction_poor_delays(),
+                    fraction_poor_delays);
+
+          EXPECT_EQ(test->rms_level(stats_index), rms_level);
+
+          ++stats_index;
+        } else {
+          audioproc::Test::EchoMetrics* message =
+              test->add_echo_metrics();
+          WriteStatsMessage(echo_metrics.residual_echo_return_loss,
+                            message->mutable_residual_echo_return_loss());
+          WriteStatsMessage(echo_metrics.echo_return_loss,
+                            message->mutable_echo_return_loss());
+          WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
+                            message->mutable_echo_return_loss_enhancement());
+          WriteStatsMessage(echo_metrics.a_nlp,
+                            message->mutable_a_nlp());
+          message->set_divergent_filter_fraction(
+              echo_metrics.divergent_filter_fraction);
+
+          audioproc::Test::DelayMetrics* message_delay =
+              test->add_delay_metrics();
+          message_delay->set_median(median);
+          message_delay->set_std(std);
+          message_delay->set_fraction_poor_delays(fraction_poor_delays);
+
+          test->add_rms_level(rms_level);
+        }
+      }
+#endif  // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE).
+    }
+    max_output_average /= frame_count;
+    analog_level_average /= frame_count;
+    ns_speech_prob_average /= frame_count;
+
+    if (!write_ref_data) {
+      const int kIntNear = 1;
+      // When running the test on a N7 we get a {2, 6} difference of
+      // |has_voice_count| and |max_output_average| is up to 18 higher.
+      // All numbers being consistently higher on N7 compare to ref_data.
+      // TODO(bjornv): If we start getting more of these offsets on Android we
+      // should consider a different approach. Either using one slack for all,
+      // or generate a separate android reference.
+#if defined(WEBRTC_ANDROID)
+      const int kHasVoiceCountOffset = 3;
+      const int kHasVoiceCountNear = 4;
+      const int kMaxOutputAverageOffset = 9;
+      const int kMaxOutputAverageNear = 9;
+#else
+      const int kHasVoiceCountOffset = 0;
+      const int kHasVoiceCountNear = kIntNear;
+      const int kMaxOutputAverageOffset = 0;
+      const int kMaxOutputAverageNear = kIntNear;
+#endif
+      EXPECT_NEAR(test->has_echo_count(), has_echo_count, kIntNear);
+      EXPECT_NEAR(test->has_voice_count(),
+                  has_voice_count - kHasVoiceCountOffset,
+                  kHasVoiceCountNear);
+      EXPECT_NEAR(test->is_saturated_count(), is_saturated_count, kIntNear);
+
+      EXPECT_NEAR(test->analog_level_average(), analog_level_average, kIntNear);
+      EXPECT_NEAR(test->max_output_average(),
+                  max_output_average - kMaxOutputAverageOffset,
+                  kMaxOutputAverageNear);
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+      const double kFloatNear = 0.0005;
+      EXPECT_NEAR(test->ns_speech_probability_average(),
+                  ns_speech_prob_average,
+                  kFloatNear);
+#endif
+    } else {
+      test->set_has_echo_count(has_echo_count);
+      test->set_has_voice_count(has_voice_count);
+      test->set_is_saturated_count(is_saturated_count);
+
+      test->set_analog_level_average(analog_level_average);
+      test->set_max_output_average(max_output_average);
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+      EXPECT_LE(0.0f, ns_speech_prob_average);
+      EXPECT_GE(1.0f, ns_speech_prob_average);
+      test->set_ns_speech_probability_average(ns_speech_prob_average);
+#endif
+    }
+
+    rewind(far_file_);
+    rewind(near_file_);
+  }
+
+  if (write_ref_data) {
+    OpenFileAndWriteMessage(ref_filename_, ref_data);
+  }
+}
+
+TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
+  struct ChannelFormat {
+    AudioProcessing::ChannelLayout in_layout;
+    AudioProcessing::ChannelLayout out_layout;
+  };
+  ChannelFormat cf[] = {
+    {AudioProcessing::kMonoAndKeyboard, AudioProcessing::kMono},
+    {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono},
+    {AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
+  };
+
+  std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
+  // Enable one component just to ensure some processing takes place.
+  ap->noise_suppression()->Enable(true);
+  for (size_t i = 0; i < arraysize(cf); ++i) {
+    const int in_rate = 44100;
+    const int out_rate = 48000;
+    ChannelBuffer<float> in_cb(SamplesFromRate(in_rate),
+                               TotalChannelsFromLayout(cf[i].in_layout));
+    ChannelBuffer<float> out_cb(SamplesFromRate(out_rate),
+                                ChannelsFromLayout(cf[i].out_layout));
+
+    // Run over a few chunks.
+    for (int j = 0; j < 10; ++j) {
+      EXPECT_NOERR(ap->ProcessStream(
+          in_cb.channels(),
+          in_cb.num_frames(),
+          in_rate,
+          cf[i].in_layout,
+          out_rate,
+          cf[i].out_layout,
+          out_cb.channels()));
+    }
+  }
+}
+
+// Compares the reference and test arrays over a region around the expected
+// delay. Finds the highest SNR in that region and adds the variance and squared
+// error results to the supplied accumulators.
+void UpdateBestSNR(const float* ref,
+                   const float* test,
+                   size_t length,
+                   int expected_delay,
+                   double* variance_acc,
+                   double* sq_error_acc) {
+  double best_snr = std::numeric_limits<double>::min();
+  double best_variance = 0;
+  double best_sq_error = 0;
+  // Search over a region of eight samples around the expected delay.
+  for (int delay = std::max(expected_delay - 4, 0); delay <= expected_delay + 4;
+       ++delay) {
+    double sq_error = 0;
+    double variance = 0;
+    for (size_t i = 0; i < length - delay; ++i) {
+      double error = test[i + delay] - ref[i];
+      sq_error += error * error;
+      variance += ref[i] * ref[i];
+    }
+
+    if (sq_error == 0) {
+      *variance_acc += variance;
+      return;
+    }
+    double snr = variance / sq_error;
+    if (snr > best_snr) {
+      best_snr = snr;
+      best_variance = variance;
+      best_sq_error = sq_error;
+    }
+  }
+
+  *variance_acc += best_variance;
+  *sq_error_acc += best_sq_error;
+}
+
+// Used to test a multitude of sample rate and channel combinations. It works
+// by first producing a set of reference files (in SetUpTestCase) that are
+// assumed to be correct, as the used parameters are verified by other tests
+// in this collection. Primarily the reference files are all produced at
+// "native" rates which do not involve any resampling.
+
+// Each test pass produces an output file with a particular format. The output
+// is matched against the reference file closest to its internal processing
+// format. If necessary the output is resampled back to its process format.
+// Due to the resampling distortion, we don't expect identical results, but
+// enforce SNR thresholds which vary depending on the format. 0 is a special
+// case SNR which corresponds to inf, or zero error.
+typedef std::tuple<int, int, int, int, double, double> AudioProcessingTestData;
+class AudioProcessingTest
+    : public testing::TestWithParam<AudioProcessingTestData> {
+ public:
+  AudioProcessingTest()
+      : input_rate_(std::get<0>(GetParam())),
+        output_rate_(std::get<1>(GetParam())),
+        reverse_input_rate_(std::get<2>(GetParam())),
+        reverse_output_rate_(std::get<3>(GetParam())),
+        expected_snr_(std::get<4>(GetParam())),
+        expected_reverse_snr_(std::get<5>(GetParam())) {}
+
+  virtual ~AudioProcessingTest() {}
+
+  static void SetUpTestCase() {
+    // Create all needed output reference files.
+    const int kNativeRates[] = {8000, 16000, 32000, 48000};
+    const size_t kNumChannels[] = {1, 2};
+    for (size_t i = 0; i < arraysize(kNativeRates); ++i) {
+      for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
+        for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
+          // The reference files always have matching input and output channels.
+          ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i],
+                        kNativeRates[i], kNumChannels[j], kNumChannels[j],
+                        kNumChannels[k], kNumChannels[k], "ref");
+        }
+      }
+    }
+  }
+
+  void TearDown() {
+    // Remove "out" files after each test.
+    ClearTempOutFiles();
+  }
+
+  static void TearDownTestCase() {
+    ClearTempFiles();
+  }
+
+  // Runs a process pass on files with the given parameters and dumps the output
+  // to a file specified with |output_file_prefix|. Both forward and reverse
+  // output streams are dumped.
+  static void ProcessFormat(int input_rate,
+                            int output_rate,
+                            int reverse_input_rate,
+                            int reverse_output_rate,
+                            size_t num_input_channels,
+                            size_t num_output_channels,
+                            size_t num_reverse_input_channels,
+                            size_t num_reverse_output_channels,
+                            const std::string& output_file_prefix) {
+    Config config;
+    config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+    std::unique_ptr<AudioProcessing> ap(
+        AudioProcessingBuilder().Create(config));
+    EnableAllAPComponents(ap.get());
+
+    ProcessingConfig processing_config = {
+        {{input_rate, num_input_channels},
+         {output_rate, num_output_channels},
+         {reverse_input_rate, num_reverse_input_channels},
+         {reverse_output_rate, num_reverse_output_channels}}};
+    ap->Initialize(processing_config);
+
+    FILE* far_file =
+        fopen(ResourceFilePath("far", reverse_input_rate).c_str(), "rb");
+    FILE* near_file = fopen(ResourceFilePath("near", input_rate).c_str(), "rb");
+    FILE* out_file =
+        fopen(OutputFilePath(output_file_prefix, input_rate, output_rate,
+                             reverse_input_rate, reverse_output_rate,
+                             num_input_channels, num_output_channels,
+                             num_reverse_input_channels,
+                             num_reverse_output_channels, kForward).c_str(),
+              "wb");
+    FILE* rev_out_file =
+        fopen(OutputFilePath(output_file_prefix, input_rate, output_rate,
+                             reverse_input_rate, reverse_output_rate,
+                             num_input_channels, num_output_channels,
+                             num_reverse_input_channels,
+                             num_reverse_output_channels, kReverse).c_str(),
+              "wb");
+    ASSERT_TRUE(far_file != NULL);
+    ASSERT_TRUE(near_file != NULL);
+    ASSERT_TRUE(out_file != NULL);
+    ASSERT_TRUE(rev_out_file != NULL);
+
+    ChannelBuffer<float> fwd_cb(SamplesFromRate(input_rate),
+                                num_input_channels);
+    ChannelBuffer<float> rev_cb(SamplesFromRate(reverse_input_rate),
+                                num_reverse_input_channels);
+    ChannelBuffer<float> out_cb(SamplesFromRate(output_rate),
+                                num_output_channels);
+    ChannelBuffer<float> rev_out_cb(SamplesFromRate(reverse_output_rate),
+                                    num_reverse_output_channels);
+
+    // Temporary buffers.
+    const int max_length =
+        2 * std::max(std::max(out_cb.num_frames(), rev_out_cb.num_frames()),
+                     std::max(fwd_cb.num_frames(), rev_cb.num_frames()));
+    std::unique_ptr<float[]> float_data(new float[max_length]);
+    std::unique_ptr<int16_t[]> int_data(new int16_t[max_length]);
+
+    int analog_level = 127;
+    while (ReadChunk(far_file, int_data.get(), float_data.get(), &rev_cb) &&
+           ReadChunk(near_file, int_data.get(), float_data.get(), &fwd_cb)) {
+      EXPECT_NOERR(ap->ProcessReverseStream(
+          rev_cb.channels(), processing_config.reverse_input_stream(),
+          processing_config.reverse_output_stream(), rev_out_cb.channels()));
+
+      EXPECT_NOERR(ap->set_stream_delay_ms(0));
+      ap->echo_cancellation()->set_stream_drift_samples(0);
+      EXPECT_NOERR(ap->gain_control()->set_stream_analog_level(analog_level));
+
+      EXPECT_NOERR(ap->ProcessStream(
+          fwd_cb.channels(),
+          fwd_cb.num_frames(),
+          input_rate,
+          LayoutFromChannels(num_input_channels),
+          output_rate,
+          LayoutFromChannels(num_output_channels),
+          out_cb.channels()));
+
+      // Dump forward output to file.
+      Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(),
+                 float_data.get());
+      size_t out_length = out_cb.num_channels() * out_cb.num_frames();
+
+      ASSERT_EQ(out_length,
+                fwrite(float_data.get(), sizeof(float_data[0]),
+                       out_length, out_file));
+
+      // Dump reverse output to file.
+      Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(),
+                 rev_out_cb.num_channels(), float_data.get());
+      size_t rev_out_length =
+          rev_out_cb.num_channels() * rev_out_cb.num_frames();
+
+      ASSERT_EQ(rev_out_length,
+                fwrite(float_data.get(), sizeof(float_data[0]), rev_out_length,
+                       rev_out_file));
+
+      analog_level = ap->gain_control()->stream_analog_level();
+    }
+    fclose(far_file);
+    fclose(near_file);
+    fclose(out_file);
+    fclose(rev_out_file);
+  }
+
+ protected:
+  int input_rate_;
+  int output_rate_;
+  int reverse_input_rate_;
+  int reverse_output_rate_;
+  double expected_snr_;
+  double expected_reverse_snr_;
+};
+
+TEST_P(AudioProcessingTest, Formats) {
+  struct ChannelFormat {
+    int num_input;
+    int num_output;
+    int num_reverse_input;
+    int num_reverse_output;
+  };
+  ChannelFormat cf[] = {
+      {1, 1, 1, 1},
+      {1, 1, 2, 1},
+      {2, 1, 1, 1},
+      {2, 1, 2, 1},
+      {2, 2, 1, 1},
+      {2, 2, 2, 2},
+  };
+
+  for (size_t i = 0; i < arraysize(cf); ++i) {
+    ProcessFormat(input_rate_, output_rate_, reverse_input_rate_,
+                  reverse_output_rate_, cf[i].num_input, cf[i].num_output,
+                  cf[i].num_reverse_input, cf[i].num_reverse_output, "out");
+
+    // Verify output for both directions.
+    std::vector<StreamDirection> stream_directions;
+    stream_directions.push_back(kForward);
+    stream_directions.push_back(kReverse);
+    for (StreamDirection file_direction : stream_directions) {
+      const int in_rate = file_direction ? reverse_input_rate_ : input_rate_;
+      const int out_rate = file_direction ? reverse_output_rate_ : output_rate_;
+      const int out_num =
+          file_direction ? cf[i].num_reverse_output : cf[i].num_output;
+      const double expected_snr =
+          file_direction ? expected_reverse_snr_ : expected_snr_;
+
+      const int min_ref_rate = std::min(in_rate, out_rate);
+      int ref_rate;
+
+      if (min_ref_rate > 32000) {
+        ref_rate = 48000;
+      } else if (min_ref_rate > 16000) {
+        ref_rate = 32000;
+      } else if (min_ref_rate > 8000) {
+        ref_rate = 16000;
+      } else {
+        ref_rate = 8000;
+      }
+#ifdef WEBRTC_ARCH_ARM_FAMILY
+      if (file_direction == kForward) {
+        ref_rate = std::min(ref_rate, 32000);
+      }
+#endif
+      FILE* out_file = fopen(
+          OutputFilePath("out", input_rate_, output_rate_, reverse_input_rate_,
+                         reverse_output_rate_, cf[i].num_input,
+                         cf[i].num_output, cf[i].num_reverse_input,
+                         cf[i].num_reverse_output, file_direction).c_str(),
+          "rb");
+      // The reference files always have matching input and output channels.
+      FILE* ref_file = fopen(
+          OutputFilePath("ref", ref_rate, ref_rate, ref_rate, ref_rate,
+                         cf[i].num_output, cf[i].num_output,
+                         cf[i].num_reverse_output, cf[i].num_reverse_output,
+                         file_direction).c_str(),
+          "rb");
+      ASSERT_TRUE(out_file != NULL);
+      ASSERT_TRUE(ref_file != NULL);
+
+      const size_t ref_length = SamplesFromRate(ref_rate) * out_num;
+      const size_t out_length = SamplesFromRate(out_rate) * out_num;
+      // Data from the reference file.
+      std::unique_ptr<float[]> ref_data(new float[ref_length]);
+      // Data from the output file.
+      std::unique_ptr<float[]> out_data(new float[out_length]);
+      // Data from the resampled output, in case the reference and output rates
+      // don't match.
+      std::unique_ptr<float[]> cmp_data(new float[ref_length]);
+
+      PushResampler<float> resampler;
+      resampler.InitializeIfNeeded(out_rate, ref_rate, out_num);
+
+      // Compute the resampling delay of the output relative to the reference,
+      // to find the region over which we should search for the best SNR.
+      float expected_delay_sec = 0;
+      if (in_rate != ref_rate) {
+        // Input resampling delay.
+        expected_delay_sec +=
+            PushSincResampler::AlgorithmicDelaySeconds(in_rate);
+      }
+      if (out_rate != ref_rate) {
+        // Output resampling delay.
+        expected_delay_sec +=
+            PushSincResampler::AlgorithmicDelaySeconds(ref_rate);
+        // Delay of converting the output back to its processing rate for
+        // testing.
+        expected_delay_sec +=
+            PushSincResampler::AlgorithmicDelaySeconds(out_rate);
+      }
+      int expected_delay =
+          floor(expected_delay_sec * ref_rate + 0.5f) * out_num;
+
+      double variance = 0;
+      double sq_error = 0;
+      while (fread(out_data.get(), sizeof(out_data[0]), out_length, out_file) &&
+             fread(ref_data.get(), sizeof(ref_data[0]), ref_length, ref_file)) {
+        float* out_ptr = out_data.get();
+        if (out_rate != ref_rate) {
+          // Resample the output back to its internal processing rate if
+          // necssary.
+          ASSERT_EQ(ref_length,
+                    static_cast<size_t>(resampler.Resample(
+                        out_ptr, out_length, cmp_data.get(), ref_length)));
+          out_ptr = cmp_data.get();
+        }
+
+        // Update the |sq_error| and |variance| accumulators with the highest
+        // SNR of reference vs output.
+        UpdateBestSNR(ref_data.get(), out_ptr, ref_length, expected_delay,
+                      &variance, &sq_error);
+      }
+
+      std::cout << "(" << input_rate_ << ", " << output_rate_ << ", "
+                << reverse_input_rate_ << ", " << reverse_output_rate_ << ", "
+                << cf[i].num_input << ", " << cf[i].num_output << ", "
+                << cf[i].num_reverse_input << ", " << cf[i].num_reverse_output
+                << ", " << file_direction << "): ";
+      if (sq_error > 0) {
+        double snr = 10 * log10(variance / sq_error);
+        EXPECT_GE(snr, expected_snr);
+        EXPECT_NE(0, expected_snr);
+        std::cout << "SNR=" << snr << " dB" << std::endl;
+      } else {
+        std::cout << "SNR=inf dB" << std::endl;
+      }
+
+      fclose(out_file);
+      fclose(ref_file);
+    }
+  }
+}
+
+#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+INSTANTIATE_TEST_CASE_P(
+    CommonFormats,
+    AudioProcessingTest,
+    testing::Values(std::make_tuple(48000, 48000, 48000, 48000, 0, 0),
+                    std::make_tuple(48000, 48000, 32000, 48000, 40, 30),
+                    std::make_tuple(48000, 48000, 16000, 48000, 40, 20),
+                    std::make_tuple(48000, 44100, 48000, 44100, 20, 20),
+                    std::make_tuple(48000, 44100, 32000, 44100, 20, 15),
+                    std::make_tuple(48000, 44100, 16000, 44100, 20, 15),
+                    std::make_tuple(48000, 32000, 48000, 32000, 30, 35),
+                    std::make_tuple(48000, 32000, 32000, 32000, 30, 0),
+                    std::make_tuple(48000, 32000, 16000, 32000, 30, 20),
+                    std::make_tuple(48000, 16000, 48000, 16000, 25, 20),
+                    std::make_tuple(48000, 16000, 32000, 16000, 25, 20),
+                    std::make_tuple(48000, 16000, 16000, 16000, 25, 0),
+
+                    std::make_tuple(44100, 48000, 48000, 48000, 30, 0),
+                    std::make_tuple(44100, 48000, 32000, 48000, 30, 30),
+                    std::make_tuple(44100, 48000, 16000, 48000, 30, 20),
+                    std::make_tuple(44100, 44100, 48000, 44100, 20, 20),
+                    std::make_tuple(44100, 44100, 32000, 44100, 20, 15),
+                    std::make_tuple(44100, 44100, 16000, 44100, 20, 15),
+                    std::make_tuple(44100, 32000, 48000, 32000, 30, 35),
+                    std::make_tuple(44100, 32000, 32000, 32000, 30, 0),
+                    std::make_tuple(44100, 32000, 16000, 32000, 30, 20),
+                    std::make_tuple(44100, 16000, 48000, 16000, 25, 20),
+                    std::make_tuple(44100, 16000, 32000, 16000, 25, 20),
+                    std::make_tuple(44100, 16000, 16000, 16000, 25, 0),
+
+                    std::make_tuple(32000, 48000, 48000, 48000, 30, 0),
+                    std::make_tuple(32000, 48000, 32000, 48000, 35, 30),
+                    std::make_tuple(32000, 48000, 16000, 48000, 30, 20),
+                    std::make_tuple(32000, 44100, 48000, 44100, 20, 20),
+                    std::make_tuple(32000, 44100, 32000, 44100, 20, 15),
+                    std::make_tuple(32000, 44100, 16000, 44100, 20, 15),
+                    std::make_tuple(32000, 32000, 48000, 32000, 40, 35),
+                    std::make_tuple(32000, 32000, 32000, 32000, 0, 0),
+                    std::make_tuple(32000, 32000, 16000, 32000, 40, 20),
+                    std::make_tuple(32000, 16000, 48000, 16000, 25, 20),
+                    std::make_tuple(32000, 16000, 32000, 16000, 25, 20),
+                    std::make_tuple(32000, 16000, 16000, 16000, 25, 0),
+
+                    std::make_tuple(16000, 48000, 48000, 48000, 25, 0),
+                    std::make_tuple(16000, 48000, 32000, 48000, 25, 30),
+                    std::make_tuple(16000, 48000, 16000, 48000, 25, 20),
+                    std::make_tuple(16000, 44100, 48000, 44100, 15, 20),
+                    std::make_tuple(16000, 44100, 32000, 44100, 15, 15),
+                    std::make_tuple(16000, 44100, 16000, 44100, 15, 15),
+                    std::make_tuple(16000, 32000, 48000, 32000, 25, 35),
+                    std::make_tuple(16000, 32000, 32000, 32000, 25, 0),
+                    std::make_tuple(16000, 32000, 16000, 32000, 25, 20),
+                    std::make_tuple(16000, 16000, 48000, 16000, 40, 20),
+                    std::make_tuple(16000, 16000, 32000, 16000, 40, 20),
+                    std::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
+
+#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+INSTANTIATE_TEST_CASE_P(
+    CommonFormats,
+    AudioProcessingTest,
+    testing::Values(std::make_tuple(48000, 48000, 48000, 48000, 20, 0),
+                    std::make_tuple(48000, 48000, 32000, 48000, 20, 30),
+                    std::make_tuple(48000, 48000, 16000, 48000, 20, 20),
+                    std::make_tuple(48000, 44100, 48000, 44100, 15, 20),
+                    std::make_tuple(48000, 44100, 32000, 44100, 15, 15),
+                    std::make_tuple(48000, 44100, 16000, 44100, 15, 15),
+                    std::make_tuple(48000, 32000, 48000, 32000, 20, 35),
+                    std::make_tuple(48000, 32000, 32000, 32000, 20, 0),
+                    std::make_tuple(48000, 32000, 16000, 32000, 20, 20),
+                    std::make_tuple(48000, 16000, 48000, 16000, 20, 20),
+                    std::make_tuple(48000, 16000, 32000, 16000, 20, 20),
+                    std::make_tuple(48000, 16000, 16000, 16000, 20, 0),
+
+                    std::make_tuple(44100, 48000, 48000, 48000, 15, 0),
+                    std::make_tuple(44100, 48000, 32000, 48000, 15, 30),
+                    std::make_tuple(44100, 48000, 16000, 48000, 15, 20),
+                    std::make_tuple(44100, 44100, 48000, 44100, 15, 20),
+                    std::make_tuple(44100, 44100, 32000, 44100, 15, 15),
+                    std::make_tuple(44100, 44100, 16000, 44100, 15, 15),
+                    std::make_tuple(44100, 32000, 48000, 32000, 20, 35),
+                    std::make_tuple(44100, 32000, 32000, 32000, 20, 0),
+                    std::make_tuple(44100, 32000, 16000, 32000, 20, 20),
+                    std::make_tuple(44100, 16000, 48000, 16000, 20, 20),
+                    std::make_tuple(44100, 16000, 32000, 16000, 20, 20),
+                    std::make_tuple(44100, 16000, 16000, 16000, 20, 0),
+
+                    std::make_tuple(32000, 48000, 48000, 48000, 35, 0),
+                    std::make_tuple(32000, 48000, 32000, 48000, 65, 30),
+                    std::make_tuple(32000, 48000, 16000, 48000, 40, 20),
+                    std::make_tuple(32000, 44100, 48000, 44100, 20, 20),
+                    std::make_tuple(32000, 44100, 32000, 44100, 20, 15),
+                    std::make_tuple(32000, 44100, 16000, 44100, 20, 15),
+                    std::make_tuple(32000, 32000, 48000, 32000, 35, 35),
+                    std::make_tuple(32000, 32000, 32000, 32000, 0, 0),
+                    std::make_tuple(32000, 32000, 16000, 32000, 40, 20),
+                    std::make_tuple(32000, 16000, 48000, 16000, 20, 20),
+                    std::make_tuple(32000, 16000, 32000, 16000, 20, 20),
+                    std::make_tuple(32000, 16000, 16000, 16000, 20, 0),
+
+                    std::make_tuple(16000, 48000, 48000, 48000, 25, 0),
+                    std::make_tuple(16000, 48000, 32000, 48000, 25, 30),
+                    std::make_tuple(16000, 48000, 16000, 48000, 25, 20),
+                    std::make_tuple(16000, 44100, 48000, 44100, 15, 20),
+                    std::make_tuple(16000, 44100, 32000, 44100, 15, 15),
+                    std::make_tuple(16000, 44100, 16000, 44100, 15, 15),
+                    std::make_tuple(16000, 32000, 48000, 32000, 25, 35),
+                    std::make_tuple(16000, 32000, 32000, 32000, 25, 0),
+                    std::make_tuple(16000, 32000, 16000, 32000, 25, 20),
+                    std::make_tuple(16000, 16000, 48000, 16000, 35, 20),
+                    std::make_tuple(16000, 16000, 32000, 16000, 35, 20),
+                    std::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
+#endif
+
+}  // namespace
+
+TEST(ApmConfiguration, EnablePostProcessing) {
+  // Verify that apm uses a capture post processing module if one is provided.
+  webrtc::Config webrtc_config;
+  auto mock_post_processor_ptr =
+      new testing::NiceMock<test::MockCustomProcessing>();
+  auto mock_post_processor =
+      std::unique_ptr<CustomProcessing>(mock_post_processor_ptr);
+  rtc::scoped_refptr<AudioProcessing> apm =
+      AudioProcessingBuilder()
+          .SetCapturePostProcessing(std::move(mock_post_processor))
+          .Create(webrtc_config);
+
+  AudioFrame audio;
+  audio.num_channels_ = 1;
+  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+
+  EXPECT_CALL(*mock_post_processor_ptr, Process(testing::_)).Times(1);
+  apm->ProcessStream(&audio);
+}
+
+TEST(ApmConfiguration, EnablePreProcessing) {
+  // Verify that apm uses a capture post processing module if one is provided.
+  webrtc::Config webrtc_config;
+  auto mock_pre_processor_ptr =
+      new testing::NiceMock<test::MockCustomProcessing>();
+  auto mock_pre_processor =
+      std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
+  rtc::scoped_refptr<AudioProcessing> apm =
+      AudioProcessingBuilder()
+          .SetRenderPreProcessing(std::move(mock_pre_processor))
+          .Create(webrtc_config);
+
+  AudioFrame audio;
+  audio.num_channels_ = 1;
+  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+
+  EXPECT_CALL(*mock_pre_processor_ptr, Process(testing::_)).Times(1);
+  apm->ProcessReverseStream(&audio);
+}
+
+class MyEchoControlFactory : public EchoControlFactory {
+ public:
+  std::unique_ptr<EchoControl> Create(int sample_rate_hz) {
+    auto ec = new test::MockEchoControl();
+    EXPECT_CALL(*ec, AnalyzeRender(testing::_)).Times(1);
+    EXPECT_CALL(*ec, AnalyzeCapture(testing::_)).Times(2);
+    EXPECT_CALL(*ec, ProcessCapture(testing::_, testing::_)).Times(2);
+    return std::unique_ptr<EchoControl>(ec);
+  }
+};
+
+TEST(ApmConfiguration, EchoControlInjection) {
+  // Verify that apm uses an injected echo controller if one is provided.
+  webrtc::Config webrtc_config;
+  std::unique_ptr<EchoControlFactory> echo_control_factory(
+      new MyEchoControlFactory());
+
+  rtc::scoped_refptr<AudioProcessing> apm =
+      AudioProcessingBuilder()
+          .SetEchoControlFactory(std::move(echo_control_factory))
+          .Create(webrtc_config);
+
+  AudioFrame audio;
+  audio.num_channels_ = 1;
+  SetFrameSampleRate(&audio, AudioProcessing::NativeRate::kSampleRate16kHz);
+  apm->ProcessStream(&audio);
+  apm->ProcessReverseStream(&audio);
+  apm->ProcessStream(&audio);
+}
+
+std::unique_ptr<AudioProcessing> CreateApm(bool use_AEC2) {
+  Config old_config;
+  if (use_AEC2) {
+    old_config.Set<ExtendedFilter>(new ExtendedFilter(true));
+    old_config.Set<DelayAgnostic>(new DelayAgnostic(true));
+  }
+  std::unique_ptr<AudioProcessing> apm(
+      AudioProcessingBuilder().Create(old_config));
+  if (!apm) {
+    return apm;
+  }
+
+  ProcessingConfig processing_config = {
+      {{32000, 1}, {32000, 1}, {32000, 1}, {32000, 1}}};
+
+  if (apm->Initialize(processing_config) != 0) {
+    return nullptr;
+  }
+
+  // Disable all components except for an AEC and the residual echo detector.
+  AudioProcessing::Config config;
+  config.residual_echo_detector.enabled = true;
+  config.high_pass_filter.enabled = false;
+  config.gain_controller2.enabled = false;
+  apm->ApplyConfig(config);
+  EXPECT_EQ(apm->gain_control()->Enable(false), 0);
+  EXPECT_EQ(apm->level_estimator()->Enable(false), 0);
+  EXPECT_EQ(apm->noise_suppression()->Enable(false), 0);
+  EXPECT_EQ(apm->voice_detection()->Enable(false), 0);
+
+  if (use_AEC2) {
+    EXPECT_EQ(apm->echo_control_mobile()->Enable(false), 0);
+    EXPECT_EQ(apm->echo_cancellation()->enable_metrics(true), 0);
+    EXPECT_EQ(apm->echo_cancellation()->enable_delay_logging(true), 0);
+    EXPECT_EQ(apm->echo_cancellation()->Enable(true), 0);
+  } else {
+    EXPECT_EQ(apm->echo_cancellation()->Enable(false), 0);
+    EXPECT_EQ(apm->echo_control_mobile()->Enable(true), 0);
+  }
+  return apm;
+}
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
+#define MAYBE_ApmStatistics DISABLED_ApmStatistics
+#else
+#define MAYBE_ApmStatistics ApmStatistics
+#endif
+
+TEST(MAYBE_ApmStatistics, AEC2EnabledTest) {
+  // Set up APM with AEC2 and process some audio.
+  std::unique_ptr<AudioProcessing> apm = CreateApm(true);
+  ASSERT_TRUE(apm);
+
+  // Set up an audioframe.
+  AudioFrame frame;
+  frame.num_channels_ = 1;
+  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate48kHz);
+
+  // Fill the audio frame with a sawtooth pattern.
+  int16_t* ptr = frame.mutable_data();
+  for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) {
+    ptr[i] = 10000 * ((i % 3) - 1);
+  }
+
+  // Do some processing.
+  for (int i = 0; i < 200; i++) {
+    EXPECT_EQ(apm->ProcessReverseStream(&frame), 0);
+    EXPECT_EQ(apm->set_stream_delay_ms(0), 0);
+    EXPECT_EQ(apm->ProcessStream(&frame), 0);
+  }
+
+  // Test statistics interface.
+  AudioProcessingStats stats = apm->GetStatistics(true);
+  // We expect all statistics to be set and have a sensible value.
+  ASSERT_TRUE(stats.residual_echo_likelihood);
+  EXPECT_GE(*stats.residual_echo_likelihood, 0.0);
+  EXPECT_LE(*stats.residual_echo_likelihood, 1.0);
+  ASSERT_TRUE(stats.residual_echo_likelihood_recent_max);
+  EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0);
+  EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0);
+  ASSERT_TRUE(stats.echo_return_loss);
+  EXPECT_NE(*stats.echo_return_loss, -100.0);
+  ASSERT_TRUE(stats.echo_return_loss_enhancement);
+  EXPECT_NE(*stats.echo_return_loss_enhancement, -100.0);
+  ASSERT_TRUE(stats.divergent_filter_fraction);
+  EXPECT_NE(*stats.divergent_filter_fraction, -1.0);
+  ASSERT_TRUE(stats.delay_standard_deviation_ms);
+  EXPECT_GE(*stats.delay_standard_deviation_ms, 0);
+  // We don't check stats.delay_median_ms since it takes too long to settle to a
+  // value. At least 20 seconds of data need to be processed before it will get
+  // a value, which would make this test take too much time.
+
+  // If there are no receive streams, we expect the stats not to be set. The
+  // 'false' argument signals to APM that no receive streams are currently
+  // active. In that situation the statistics would get stuck at their last
+  // calculated value (AEC and echo detection need at least one stream in each
+  // direction), so to avoid that, they should not be set by APM.
+  stats = apm->GetStatistics(false);
+  EXPECT_FALSE(stats.residual_echo_likelihood);
+  EXPECT_FALSE(stats.residual_echo_likelihood_recent_max);
+  EXPECT_FALSE(stats.echo_return_loss);
+  EXPECT_FALSE(stats.echo_return_loss_enhancement);
+  EXPECT_FALSE(stats.divergent_filter_fraction);
+  EXPECT_FALSE(stats.delay_median_ms);
+  EXPECT_FALSE(stats.delay_standard_deviation_ms);
+}
+
+TEST(MAYBE_ApmStatistics, AECMEnabledTest) {
+  // Set up APM with AECM and process some audio.
+  std::unique_ptr<AudioProcessing> apm = CreateApm(false);
+  ASSERT_TRUE(apm);
+
+  // Set up an audioframe.
+  AudioFrame frame;
+  frame.num_channels_ = 1;
+  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate48kHz);
+
+  // Fill the audio frame with a sawtooth pattern.
+  int16_t* ptr = frame.mutable_data();
+  for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) {
+    ptr[i] = 10000 * ((i % 3) - 1);
+  }
+
+  // Do some processing.
+  for (int i = 0; i < 200; i++) {
+    EXPECT_EQ(apm->ProcessReverseStream(&frame), 0);
+    EXPECT_EQ(apm->set_stream_delay_ms(0), 0);
+    EXPECT_EQ(apm->ProcessStream(&frame), 0);
+  }
+
+  // Test statistics interface.
+  AudioProcessingStats stats = apm->GetStatistics(true);
+  // We expect only the residual echo detector statistics to be set and have a
+  // sensible value.
+  EXPECT_TRUE(stats.residual_echo_likelihood);
+  if (stats.residual_echo_likelihood) {
+    EXPECT_GE(*stats.residual_echo_likelihood, 0.0);
+    EXPECT_LE(*stats.residual_echo_likelihood, 1.0);
+  }
+  EXPECT_TRUE(stats.residual_echo_likelihood_recent_max);
+  if (stats.residual_echo_likelihood_recent_max) {
+    EXPECT_GE(*stats.residual_echo_likelihood_recent_max, 0.0);
+    EXPECT_LE(*stats.residual_echo_likelihood_recent_max, 1.0);
+  }
+  EXPECT_FALSE(stats.echo_return_loss);
+  EXPECT_FALSE(stats.echo_return_loss_enhancement);
+  EXPECT_FALSE(stats.divergent_filter_fraction);
+  EXPECT_FALSE(stats.delay_median_ms);
+  EXPECT_FALSE(stats.delay_standard_deviation_ms);
+
+  // If there are no receive streams, we expect the stats not to be set.
+  stats = apm->GetStatistics(false);
+  EXPECT_FALSE(stats.residual_echo_likelihood);
+  EXPECT_FALSE(stats.residual_echo_likelihood_recent_max);
+  EXPECT_FALSE(stats.echo_return_loss);
+  EXPECT_FALSE(stats.echo_return_loss_enhancement);
+  EXPECT_FALSE(stats.divergent_filter_fraction);
+  EXPECT_FALSE(stats.delay_median_ms);
+  EXPECT_FALSE(stats.delay_standard_deviation_ms);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/array_util.cc b/modules/audio_processing/beamformer/array_util.cc
new file mode 100644
index 0000000..e853559
--- /dev/null
+++ b/modules/audio_processing/beamformer/array_util.cc
@@ -0,0 +1,119 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/beamformer/array_util.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const float kMaxDotProduct = 1e-6f;
+
+}  // namespace
+
+float GetMinimumSpacing(const std::vector<Point>& array_geometry) {
+  RTC_CHECK_GT(array_geometry.size(), 1);
+  float mic_spacing = std::numeric_limits<float>::max();
+  for (size_t i = 0; i < (array_geometry.size() - 1); ++i) {
+    for (size_t j = i + 1; j < array_geometry.size(); ++j) {
+      mic_spacing =
+          std::min(mic_spacing, Distance(array_geometry[i], array_geometry[j]));
+    }
+  }
+  return mic_spacing;
+}
+
+Point PairDirection(const Point& a, const Point& b) {
+  return {b.x() - a.x(), b.y() - a.y(), b.z() - a.z()};
+}
+
+float DotProduct(const Point& a, const Point& b) {
+  return a.x() * b.x() + a.y() * b.y() + a.z() * b.z();
+}
+
+Point CrossProduct(const Point& a, const Point& b) {
+  return {a.y() * b.z() - a.z() * b.y(), a.z() * b.x() - a.x() * b.z(),
+          a.x() * b.y() - a.y() * b.x()};
+}
+
+bool AreParallel(const Point& a, const Point& b) {
+  Point cross_product = CrossProduct(a, b);
+  return DotProduct(cross_product, cross_product) < kMaxDotProduct;
+}
+
+bool ArePerpendicular(const Point& a, const Point& b) {
+  return std::abs(DotProduct(a, b)) < kMaxDotProduct;
+}
+
+rtc::Optional<Point> GetDirectionIfLinear(
+    const std::vector<Point>& array_geometry) {
+  RTC_DCHECK_GT(array_geometry.size(), 1);
+  const Point first_pair_direction =
+      PairDirection(array_geometry[0], array_geometry[1]);
+  for (size_t i = 2u; i < array_geometry.size(); ++i) {
+    const Point pair_direction =
+        PairDirection(array_geometry[i - 1], array_geometry[i]);
+    if (!AreParallel(first_pair_direction, pair_direction)) {
+      return rtc::nullopt;
+    }
+  }
+  return first_pair_direction;
+}
+
+rtc::Optional<Point> GetNormalIfPlanar(
+    const std::vector<Point>& array_geometry) {
+  RTC_DCHECK_GT(array_geometry.size(), 1);
+  const Point first_pair_direction =
+      PairDirection(array_geometry[0], array_geometry[1]);
+  Point pair_direction(0.f, 0.f, 0.f);
+  size_t i = 2u;
+  bool is_linear = true;
+  for (; i < array_geometry.size() && is_linear; ++i) {
+    pair_direction = PairDirection(array_geometry[i - 1], array_geometry[i]);
+    if (!AreParallel(first_pair_direction, pair_direction)) {
+      is_linear = false;
+    }
+  }
+  if (is_linear) {
+    return rtc::nullopt;
+  }
+  const Point normal_direction =
+      CrossProduct(first_pair_direction, pair_direction);
+  for (; i < array_geometry.size(); ++i) {
+    pair_direction = PairDirection(array_geometry[i - 1], array_geometry[i]);
+    if (!ArePerpendicular(normal_direction, pair_direction)) {
+      return rtc::nullopt;
+    }
+  }
+  return normal_direction;
+}
+
+rtc::Optional<Point> GetArrayNormalIfExists(
+    const std::vector<Point>& array_geometry) {
+  const rtc::Optional<Point> direction = GetDirectionIfLinear(array_geometry);
+  if (direction) {
+    return Point(direction->y(), -direction->x(), 0.f);
+  }
+  const rtc::Optional<Point> normal = GetNormalIfPlanar(array_geometry);
+  if (normal && normal->z() < kMaxDotProduct) {
+    return normal;
+  }
+  return rtc::nullopt;
+}
+
+Point AzimuthToPoint(float azimuth) {
+  return Point(std::cos(azimuth), std::sin(azimuth), 0.f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/array_util.h b/modules/audio_processing/beamformer/array_util.h
new file mode 100644
index 0000000..f234929
--- /dev/null
+++ b/modules/audio_processing/beamformer/array_util.h
@@ -0,0 +1,117 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_ARRAY_UTIL_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_ARRAY_UTIL_H_
+
+#include <cmath>
+#include <vector>
+
+#include "api/optional.h"
+
+namespace webrtc {
+
+// Coordinates in meters. The convention used is:
+// x: the horizontal dimension, with positive to the right from the camera's
+//    perspective.
+// y: the depth dimension, with positive forward from the camera's
+//    perspective.
+// z: the vertical dimension, with positive upwards.
+template<typename T>
+struct CartesianPoint {
+  CartesianPoint() {
+    c[0] = 0;
+    c[1] = 0;
+    c[2] = 0;
+  }
+  CartesianPoint(T x, T y, T z) {
+    c[0] = x;
+    c[1] = y;
+    c[2] = z;
+  }
+  T x() const { return c[0]; }
+  T y() const { return c[1]; }
+  T z() const { return c[2]; }
+  T c[3];
+};
+
+using Point = CartesianPoint<float>;
+
+// Calculates the direction from a to b.
+Point PairDirection(const Point& a, const Point& b);
+
+float DotProduct(const Point& a, const Point& b);
+Point CrossProduct(const Point& a, const Point& b);
+
+bool AreParallel(const Point& a, const Point& b);
+bool ArePerpendicular(const Point& a, const Point& b);
+
+// Returns the minimum distance between any two Points in the given
+// |array_geometry|.
+float GetMinimumSpacing(const std::vector<Point>& array_geometry);
+
+// If the given array geometry is linear it returns the direction without
+// normalizing.
+rtc::Optional<Point> GetDirectionIfLinear(
+    const std::vector<Point>& array_geometry);
+
+// If the given array geometry is planar it returns the normal without
+// normalizing.
+rtc::Optional<Point> GetNormalIfPlanar(
+    const std::vector<Point>& array_geometry);
+
+// Returns the normal of an array if it has one and it is in the xy-plane.
+rtc::Optional<Point> GetArrayNormalIfExists(
+    const std::vector<Point>& array_geometry);
+
+// The resulting Point will be in the xy-plane.
+Point AzimuthToPoint(float azimuth);
+
+template<typename T>
+float Distance(CartesianPoint<T> a, CartesianPoint<T> b) {
+  return std::sqrt((a.x() - b.x()) * (a.x() - b.x()) +
+                   (a.y() - b.y()) * (a.y() - b.y()) +
+                   (a.z() - b.z()) * (a.z() - b.z()));
+}
+
+// The convention used:
+// azimuth: zero is to the right from the camera's perspective, with positive
+//          angles in radians counter-clockwise.
+// elevation: zero is horizontal, with positive angles in radians upwards.
+// radius: distance from the camera in meters.
+template <typename T>
+struct SphericalPoint {
+  SphericalPoint(T azimuth, T elevation, T radius) {
+    s[0] = azimuth;
+    s[1] = elevation;
+    s[2] = radius;
+  }
+  T azimuth() const { return s[0]; }
+  T elevation() const { return s[1]; }
+  T distance() const { return s[2]; }
+  T s[3];
+};
+
+using SphericalPointf = SphericalPoint<float>;
+
+// Helper functions to transform degrees to radians and the inverse.
+template <typename T>
+T DegreesToRadians(T angle_degrees) {
+  return M_PI * angle_degrees / 180;
+}
+
+template <typename T>
+T RadiansToDegrees(T angle_radians) {
+  return 180 * angle_radians / M_PI;
+}
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_ARRAY_UTIL_H_
diff --git a/modules/audio_processing/beamformer/array_util_unittest.cc b/modules/audio_processing/beamformer/array_util_unittest.cc
new file mode 100644
index 0000000..a5c075a
--- /dev/null
+++ b/modules/audio_processing/beamformer/array_util_unittest.cc
@@ -0,0 +1,185 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/beamformer/array_util.h"
+
+#include <math.h>
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+bool operator==(const Point& lhs, const Point& rhs) {
+  return lhs.x() == rhs.x() && lhs.y() == rhs.y() && lhs.z() == rhs.z();
+}
+
+TEST(ArrayUtilTest, PairDirection) {
+  EXPECT_EQ(Point(1.f, 2.f, 3.f),
+            PairDirection(Point(0.f, 0.f, 0.f), Point(1.f, 2.f, 3.f)));
+  EXPECT_EQ(Point(-1.f, -2.f, -3.f),
+            PairDirection(Point(1.f, 2.f, 3.f), Point(0.f, 0.f, 0.f)));
+  EXPECT_EQ(Point(0.f, 0.f, 0.f),
+            PairDirection(Point(1.f, 0.f, 0.f), Point(1.f, 0.f, 0.f)));
+  EXPECT_EQ(Point(-1.f, 2.f, 0.f),
+            PairDirection(Point(1.f, 0.f, 0.f), Point(0.f, 2.f, 0.f)));
+  EXPECT_EQ(Point(-4.f, 4.f, -4.f),
+            PairDirection(Point(1.f, -2.f, 3.f), Point(-3.f, 2.f, -1.f)));
+}
+
+TEST(ArrayUtilTest, DotProduct) {
+  EXPECT_FLOAT_EQ(0.f, DotProduct(Point(0.f, 0.f, 0.f), Point(1.f, 2.f, 3.f)));
+  EXPECT_FLOAT_EQ(0.f, DotProduct(Point(1.f, 0.f, 2.f), Point(0.f, 3.f, 0.f)));
+  EXPECT_FLOAT_EQ(0.f, DotProduct(Point(1.f, 1.f, 0.f), Point(1.f, -1.f, 0.f)));
+  EXPECT_FLOAT_EQ(2.f, DotProduct(Point(1.f, 0.f, 0.f), Point(2.f, 0.f, 0.f)));
+  EXPECT_FLOAT_EQ(-6.f,
+                  DotProduct(Point(-2.f, 0.f, 0.f), Point(3.f, 0.f, 0.f)));
+  EXPECT_FLOAT_EQ(-10.f,
+                  DotProduct(Point(1.f, -2.f, 3.f), Point(-3.f, 2.f, -1.f)));
+}
+
+TEST(ArrayUtilTest, CrossProduct) {
+  EXPECT_EQ(Point(0.f, 0.f, 0.f),
+            CrossProduct(Point(0.f, 0.f, 0.f), Point(1.f, 2.f, 3.f)));
+  EXPECT_EQ(Point(0.f, 0.f, 1.f),
+            CrossProduct(Point(1.f, 0.f, 0.f), Point(0.f, 1.f, 0.f)));
+  EXPECT_EQ(Point(1.f, 0.f, 0.f),
+            CrossProduct(Point(0.f, 1.f, 0.f), Point(0.f, 0.f, 1.f)));
+  EXPECT_EQ(Point(0.f, -1.f, 0.f),
+            CrossProduct(Point(1.f, 0.f, 0.f), Point(0.f, 0.f, 1.f)));
+  EXPECT_EQ(Point(-4.f, -8.f, -4.f),
+            CrossProduct(Point(1.f, -2.f, 3.f), Point(-3.f, 2.f, -1.f)));
+}
+
+TEST(ArrayUtilTest, AreParallel) {
+  EXPECT_TRUE(AreParallel(Point(0.f, 0.f, 0.f), Point(1.f, 2.f, 3.f)));
+  EXPECT_FALSE(AreParallel(Point(1.f, 0.f, 2.f), Point(0.f, 3.f, 0.f)));
+  EXPECT_FALSE(AreParallel(Point(1.f, 2.f, 0.f), Point(1.f, -0.5f, 0.f)));
+  EXPECT_FALSE(AreParallel(Point(1.f, -2.f, 3.f), Point(-3.f, 2.f, -1.f)));
+  EXPECT_TRUE(AreParallel(Point(1.f, 0.f, 0.f), Point(2.f, 0.f, 0.f)));
+  EXPECT_TRUE(AreParallel(Point(1.f, 2.f, 3.f), Point(-2.f, -4.f, -6.f)));
+}
+
+TEST(ArrayUtilTest, ArePerpendicular) {
+  EXPECT_TRUE(ArePerpendicular(Point(0.f, 0.f, 0.f), Point(1.f, 2.f, 3.f)));
+  EXPECT_TRUE(ArePerpendicular(Point(1.f, 0.f, 2.f), Point(0.f, 3.f, 0.f)));
+  EXPECT_TRUE(ArePerpendicular(Point(1.f, 2.f, 0.f), Point(1.f, -0.5f, 0.f)));
+  EXPECT_FALSE(ArePerpendicular(Point(1.f, -2.f, 3.f), Point(-3.f, 2.f, -1.f)));
+  EXPECT_FALSE(ArePerpendicular(Point(1.f, 0.f, 0.f), Point(2.f, 0.f, 0.f)));
+  EXPECT_FALSE(ArePerpendicular(Point(1.f, 2.f, 3.f), Point(-2.f, -4.f, -6.f)));
+}
+
+TEST(ArrayUtilTest, GetMinimumSpacing) {
+  std::vector<Point> geometry;
+  geometry.push_back(Point(0.f, 0.f, 0.f));
+  geometry.push_back(Point(0.1f, 0.f, 0.f));
+  EXPECT_FLOAT_EQ(0.1f, GetMinimumSpacing(geometry));
+  geometry.push_back(Point(0.f, 0.05f, 0.f));
+  EXPECT_FLOAT_EQ(0.05f, GetMinimumSpacing(geometry));
+  geometry.push_back(Point(0.f, 0.f, 0.02f));
+  EXPECT_FLOAT_EQ(0.02f, GetMinimumSpacing(geometry));
+  geometry.push_back(Point(-0.003f, -0.004f, 0.02f));
+  EXPECT_FLOAT_EQ(0.005f, GetMinimumSpacing(geometry));
+}
+
+TEST(ArrayUtilTest, GetDirectionIfLinear) {
+  std::vector<Point> geometry;
+  geometry.push_back(Point(0.f, 0.f, 0.f));
+  geometry.push_back(Point(0.1f, 0.f, 0.f));
+  EXPECT_TRUE(
+      AreParallel(Point(1.f, 0.f, 0.f), *GetDirectionIfLinear(geometry)));
+  geometry.push_back(Point(0.15f, 0.f, 0.f));
+  EXPECT_TRUE(
+      AreParallel(Point(1.f, 0.f, 0.f), *GetDirectionIfLinear(geometry)));
+  geometry.push_back(Point(-0.2f, 0.f, 0.f));
+  EXPECT_TRUE(
+      AreParallel(Point(1.f, 0.f, 0.f), *GetDirectionIfLinear(geometry)));
+  geometry.push_back(Point(0.05f, 0.f, 0.f));
+  EXPECT_TRUE(
+      AreParallel(Point(1.f, 0.f, 0.f), *GetDirectionIfLinear(geometry)));
+  geometry.push_back(Point(0.1f, 0.1f, 0.f));
+  EXPECT_FALSE(GetDirectionIfLinear(geometry));
+  geometry.push_back(Point(0.f, 0.f, -0.2f));
+  EXPECT_FALSE(GetDirectionIfLinear(geometry));
+}
+
+TEST(ArrayUtilTest, GetNormalIfPlanar) {
+  std::vector<Point> geometry;
+  geometry.push_back(Point(0.f, 0.f, 0.f));
+  geometry.push_back(Point(0.1f, 0.f, 0.f));
+  EXPECT_FALSE(GetNormalIfPlanar(geometry));
+  geometry.push_back(Point(0.15f, 0.f, 0.f));
+  EXPECT_FALSE(GetNormalIfPlanar(geometry));
+  geometry.push_back(Point(0.1f, 0.2f, 0.f));
+  EXPECT_TRUE(AreParallel(Point(0.f, 0.f, 1.f), *GetNormalIfPlanar(geometry)));
+  geometry.push_back(Point(0.f, -0.15f, 0.f));
+  EXPECT_TRUE(AreParallel(Point(0.f, 0.f, 1.f), *GetNormalIfPlanar(geometry)));
+  geometry.push_back(Point(0.f, 0.1f, 0.2f));
+  EXPECT_FALSE(GetNormalIfPlanar(geometry));
+  geometry.push_back(Point(0.f, 0.f, -0.15f));
+  EXPECT_FALSE(GetNormalIfPlanar(geometry));
+  geometry.push_back(Point(0.1f, 0.2f, 0.f));
+  EXPECT_FALSE(GetNormalIfPlanar(geometry));
+}
+
+TEST(ArrayUtilTest, GetArrayNormalIfExists) {
+  std::vector<Point> geometry;
+  geometry.push_back(Point(0.f, 0.f, 0.f));
+  geometry.push_back(Point(0.1f, 0.f, 0.f));
+  EXPECT_TRUE(
+      AreParallel(Point(0.f, 1.f, 0.f), *GetArrayNormalIfExists(geometry)));
+  geometry.push_back(Point(0.15f, 0.f, 0.f));
+  EXPECT_TRUE(
+      AreParallel(Point(0.f, 1.f, 0.f), *GetArrayNormalIfExists(geometry)));
+  geometry.push_back(Point(0.1f, 0.f, 0.2f));
+  EXPECT_TRUE(
+      AreParallel(Point(0.f, 1.f, 0.f), *GetArrayNormalIfExists(geometry)));
+  geometry.push_back(Point(0.f, 0.f, -0.1f));
+  EXPECT_TRUE(
+      AreParallel(Point(0.f, 1.f, 0.f), *GetArrayNormalIfExists(geometry)));
+  geometry.push_back(Point(0.1f, 0.2f, 0.3f));
+  EXPECT_FALSE(GetArrayNormalIfExists(geometry));
+  geometry.push_back(Point(0.f, -0.1f, 0.f));
+  EXPECT_FALSE(GetArrayNormalIfExists(geometry));
+  geometry.push_back(Point(1.f, 0.f, -0.2f));
+  EXPECT_FALSE(GetArrayNormalIfExists(geometry));
+}
+
+TEST(ArrayUtilTest, DegreesToRadians) {
+  EXPECT_FLOAT_EQ(0.f, DegreesToRadians(0.f));
+  EXPECT_FLOAT_EQ(static_cast<float>(M_PI) / 6.f, DegreesToRadians(30.f));
+  EXPECT_FLOAT_EQ(-static_cast<float>(M_PI) / 4.f, DegreesToRadians(-45.f));
+  EXPECT_FLOAT_EQ(static_cast<float>(M_PI) / 3.f, DegreesToRadians(60.f));
+  EXPECT_FLOAT_EQ(-static_cast<float>(M_PI) / 2.f, DegreesToRadians(-90.f));
+  EXPECT_FLOAT_EQ(2.f * static_cast<float>(M_PI) / 3.f,
+                  DegreesToRadians(120.f));
+  EXPECT_FLOAT_EQ(-3.f * static_cast<float>(M_PI) / 4.f,
+                  DegreesToRadians(-135.f));
+  EXPECT_FLOAT_EQ(5.f * static_cast<float>(M_PI) / 6.f,
+                  DegreesToRadians(150.f));
+  EXPECT_FLOAT_EQ(-static_cast<float>(M_PI), DegreesToRadians(-180.f));
+}
+
+TEST(ArrayUtilTest, RadiansToDegrees) {
+  EXPECT_FLOAT_EQ(0.f, RadiansToDegrees(0.f));
+  EXPECT_FLOAT_EQ(30.f, RadiansToDegrees(M_PI / 6.f));
+  EXPECT_FLOAT_EQ(-45.f, RadiansToDegrees(-M_PI / 4.f));
+  EXPECT_FLOAT_EQ(60.f, RadiansToDegrees(M_PI / 3.f));
+  EXPECT_FLOAT_EQ(-90.f, RadiansToDegrees(-M_PI / 2.f));
+  EXPECT_FLOAT_EQ(120.f, RadiansToDegrees(2.f * M_PI / 3.f));
+  EXPECT_FLOAT_EQ(-135.f, RadiansToDegrees(-3.f * M_PI / 4.f));
+  EXPECT_FLOAT_EQ(150.f, RadiansToDegrees(5.f * M_PI / 6.f));
+  EXPECT_FLOAT_EQ(-180.f, RadiansToDegrees(-M_PI));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/complex_matrix.h b/modules/audio_processing/beamformer/complex_matrix.h
new file mode 100644
index 0000000..9960e1d
--- /dev/null
+++ b/modules/audio_processing/beamformer/complex_matrix.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_COMPLEX_MATRIX_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_COMPLEX_MATRIX_H_
+
+#include <complex>
+
+#include "modules/audio_processing/beamformer/matrix.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+using std::complex;
+
+// An extension of Matrix for operations that only work on a complex type.
+template <typename T>
+class ComplexMatrix : public Matrix<complex<T> > {
+ public:
+  ComplexMatrix() : Matrix<complex<T> >() {}
+
+  ComplexMatrix(size_t num_rows, size_t num_columns)
+      : Matrix<complex<T> >(num_rows, num_columns) {}
+
+  ComplexMatrix(const complex<T>* data, size_t num_rows, size_t num_columns)
+      : Matrix<complex<T> >(data, num_rows, num_columns) {}
+
+  // Complex Matrix operations.
+  ComplexMatrix& PointwiseConjugate() {
+    complex<T>* const data = this->data();
+    size_t size = this->num_rows() * this->num_columns();
+    for (size_t i = 0; i < size; ++i) {
+      data[i] = conj(data[i]);
+    }
+
+    return *this;
+  }
+
+  ComplexMatrix& PointwiseConjugate(const ComplexMatrix& operand) {
+    this->CopyFrom(operand);
+    return PointwiseConjugate();
+  }
+
+  ComplexMatrix& ConjugateTranspose() {
+    this->CopyDataToScratch();
+    size_t num_rows = this->num_rows();
+    this->SetNumRows(this->num_columns());
+    this->SetNumColumns(num_rows);
+    this->Resize();
+    return ConjugateTranspose(this->scratch_elements());
+  }
+
+  ComplexMatrix& ConjugateTranspose(const ComplexMatrix& operand) {
+    RTC_CHECK_EQ(operand.num_rows(), this->num_columns());
+    RTC_CHECK_EQ(operand.num_columns(), this->num_rows());
+    return ConjugateTranspose(operand.elements());
+  }
+
+  ComplexMatrix& ZeroImag() {
+    complex<T>* const data = this->data();
+    size_t size = this->num_rows() * this->num_columns();
+    for (size_t i = 0; i < size; ++i) {
+      data[i] = complex<T>(data[i].real(), 0);
+    }
+
+    return *this;
+  }
+
+  ComplexMatrix& ZeroImag(const ComplexMatrix& operand) {
+    this->CopyFrom(operand);
+    return ZeroImag();
+  }
+
+ private:
+  ComplexMatrix& ConjugateTranspose(const complex<T>* const* src) {
+    complex<T>* const* elements = this->elements();
+    for (size_t i = 0; i < this->num_rows(); ++i) {
+      for (size_t j = 0; j < this->num_columns(); ++j) {
+        elements[i][j] = conj(src[j][i]);
+      }
+    }
+
+    return *this;
+  }
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_COMPLEX_MATRIX_H_
diff --git a/modules/audio_processing/beamformer/complex_matrix_unittest.cc b/modules/audio_processing/beamformer/complex_matrix_unittest.cc
new file mode 100644
index 0000000..e11dfd2
--- /dev/null
+++ b/modules/audio_processing/beamformer/complex_matrix_unittest.cc
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/beamformer/complex_matrix.h"
+#include "modules/audio_processing/beamformer/matrix_test_helpers.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ComplexMatrixTest, TestPointwiseConjugate) {
+  const int kNumRows = 2;
+  const int kNumCols = 4;
+
+  const complex<float> kValuesInitial[kNumRows][kNumCols] = {
+      {complex<float>(1.1f, 1.1f), complex<float>(2.2f, -2.2f),
+       complex<float>(3.3f, 3.3f), complex<float>(4.4f, -4.4f)},
+      {complex<float>(5.5f, 5.5f), complex<float>(6.6f, -6.6f),
+       complex<float>(7.7f, 7.7f), complex<float>(8.8f, -8.8f)}};
+
+  const complex<float> kValuesExpected[kNumRows][kNumCols] = {
+      {complex<float>(1.1f, -1.1f), complex<float>(2.2f, 2.2f),
+       complex<float>(3.3f, -3.3f), complex<float>(4.4f, 4.4f)},
+      {complex<float>(5.5f, -5.5f), complex<float>(6.6f, 6.6f),
+       complex<float>(7.7f, -7.7f), complex<float>(8.8f, 8.8f)}};
+
+  ComplexMatrix<float> initial_mat(*kValuesInitial, kNumRows, kNumCols);
+  ComplexMatrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  ComplexMatrix<float> actual_result(kNumRows, kNumCols);
+
+  actual_result.PointwiseConjugate(initial_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(expected_result,
+                                                        actual_result);
+
+  initial_mat.PointwiseConjugate();
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(initial_mat,
+                                                        actual_result);
+}
+
+TEST(ComplexMatrixTest, TestConjugateTranspose) {
+  const int kNumInitialRows = 2;
+  const int kNumInitialCols = 4;
+  const int kNumResultRows = 4;
+  const int kNumResultCols = 2;
+
+  const complex<float> kValuesInitial[kNumInitialRows][kNumInitialCols] = {
+      {complex<float>(1.1f, 1.1f), complex<float>(2.2f, 2.2f),
+       complex<float>(3.3f, 3.3f), complex<float>(4.4f, 4.4f)},
+      {complex<float>(5.5f, 5.5f), complex<float>(6.6f, 6.6f),
+       complex<float>(7.7f, 7.7f), complex<float>(8.8f, 8.8f)}};
+
+  const complex<float> kValuesExpected[kNumResultRows][kNumResultCols] = {
+      {complex<float>(1.1f, -1.1f), complex<float>(5.5f, -5.5f)},
+      {complex<float>(2.2f, -2.2f), complex<float>(6.6f, -6.6f)},
+      {complex<float>(3.3f, -3.3f), complex<float>(7.7f, -7.7f)},
+      {complex<float>(4.4f, -4.4f), complex<float>(8.8f, -8.8f)}};
+
+  ComplexMatrix<float> initial_mat(
+      *kValuesInitial, kNumInitialRows, kNumInitialCols);
+  ComplexMatrix<float> expected_result(
+      *kValuesExpected, kNumResultRows, kNumResultCols);
+  ComplexMatrix<float> actual_result(kNumResultRows, kNumResultCols);
+
+  actual_result.ConjugateTranspose(initial_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(expected_result,
+                                                        actual_result);
+
+  initial_mat.ConjugateTranspose();
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(initial_mat,
+                                                        actual_result);
+}
+
+TEST(ComplexMatrixTest, TestZeroImag) {
+  const int kNumRows = 2;
+  const int kNumCols = 2;
+  const complex<float> kValuesInitial[kNumRows][kNumCols] = {
+      {complex<float>(1.1f, 1.1f), complex<float>(2.2f, 2.2f)},
+      {complex<float>(3.3f, 3.3f), complex<float>(4.4f, 4.4f)}};
+  const complex<float> kValuesExpected[kNumRows][kNumCols] = {
+      {complex<float>(1.1f, 0.f), complex<float>(2.2f, 0.f)},
+      {complex<float>(3.3f, 0.f), complex<float>(4.4f, 0.f)}};
+
+  ComplexMatrix<float> initial_mat(*kValuesInitial, kNumRows, kNumCols);
+  ComplexMatrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  ComplexMatrix<float> actual_result;
+
+  actual_result.ZeroImag(initial_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(expected_result,
+                                                        actual_result);
+
+  initial_mat.ZeroImag();
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(initial_mat,
+                                                        actual_result);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/covariance_matrix_generator.cc b/modules/audio_processing/beamformer/covariance_matrix_generator.cc
new file mode 100644
index 0000000..df36d59
--- /dev/null
+++ b/modules/audio_processing/beamformer/covariance_matrix_generator.cc
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/beamformer/covariance_matrix_generator.h"
+
+#include <cmath>
+
+namespace webrtc {
+namespace {
+
+float BesselJ0(float x) {
+#ifdef WEBRTC_WIN
+  return _j0(x);
+#else
+  return j0(x);
+#endif
+}
+
+// Calculates the Euclidean norm for a row vector.
+float Norm(const ComplexMatrix<float>& x) {
+  RTC_CHECK_EQ(1, x.num_rows());
+  const size_t length = x.num_columns();
+  const complex<float>* elems = x.elements()[0];
+  float result = 0.f;
+  for (size_t i = 0u; i < length; ++i) {
+    result += std::norm(elems[i]);
+  }
+  return std::sqrt(result);
+}
+
+}  // namespace
+
+void CovarianceMatrixGenerator::UniformCovarianceMatrix(
+    float wave_number,
+    const std::vector<Point>& geometry,
+    ComplexMatrix<float>* mat) {
+  RTC_CHECK_EQ(geometry.size(), mat->num_rows());
+  RTC_CHECK_EQ(geometry.size(), mat->num_columns());
+
+  complex<float>* const* mat_els = mat->elements();
+  for (size_t i = 0; i < geometry.size(); ++i) {
+    for (size_t j = 0; j < geometry.size(); ++j) {
+      if (wave_number > 0.f) {
+        mat_els[i][j] =
+            BesselJ0(wave_number * Distance(geometry[i], geometry[j]));
+      } else {
+        mat_els[i][j] = i == j ? 1.f : 0.f;
+      }
+    }
+  }
+}
+
+void CovarianceMatrixGenerator::AngledCovarianceMatrix(
+    float sound_speed,
+    float angle,
+    size_t frequency_bin,
+    size_t fft_size,
+    size_t num_freq_bins,
+    int sample_rate,
+    const std::vector<Point>& geometry,
+    ComplexMatrix<float>* mat) {
+  RTC_CHECK_EQ(geometry.size(), mat->num_rows());
+  RTC_CHECK_EQ(geometry.size(), mat->num_columns());
+
+  ComplexMatrix<float> interf_cov_vector(1, geometry.size());
+  ComplexMatrix<float> interf_cov_vector_transposed(geometry.size(), 1);
+  PhaseAlignmentMasks(frequency_bin,
+                      fft_size,
+                      sample_rate,
+                      sound_speed,
+                      geometry,
+                      angle,
+                      &interf_cov_vector);
+  interf_cov_vector.Scale(1.f / Norm(interf_cov_vector));
+  interf_cov_vector_transposed.Transpose(interf_cov_vector);
+  interf_cov_vector.PointwiseConjugate();
+  mat->Multiply(interf_cov_vector_transposed, interf_cov_vector);
+}
+
+void CovarianceMatrixGenerator::PhaseAlignmentMasks(
+    size_t frequency_bin,
+    size_t fft_size,
+    int sample_rate,
+    float sound_speed,
+    const std::vector<Point>& geometry,
+    float angle,
+    ComplexMatrix<float>* mat) {
+  RTC_CHECK_EQ(1, mat->num_rows());
+  RTC_CHECK_EQ(geometry.size(), mat->num_columns());
+
+  float freq_in_hertz =
+      (static_cast<float>(frequency_bin) / fft_size) * sample_rate;
+
+  complex<float>* const* mat_els = mat->elements();
+  for (size_t c_ix = 0; c_ix < geometry.size(); ++c_ix) {
+    float distance = std::cos(angle) * geometry[c_ix].x() +
+                     std::sin(angle) * geometry[c_ix].y();
+    float phase_shift = -2.f * M_PI * distance * freq_in_hertz / sound_speed;
+
+    // Euler's formula for mat[0][c_ix] = e^(j * phase_shift).
+    mat_els[0][c_ix] = complex<float>(cos(phase_shift), sin(phase_shift));
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/covariance_matrix_generator.h b/modules/audio_processing/beamformer/covariance_matrix_generator.h
new file mode 100644
index 0000000..6a5841f
--- /dev/null
+++ b/modules/audio_processing/beamformer/covariance_matrix_generator.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_COVARIANCE_MATRIX_GENERATOR_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_COVARIANCE_MATRIX_GENERATOR_H_
+
+#include "modules/audio_processing/beamformer/complex_matrix.h"
+#include "modules/audio_processing/beamformer/array_util.h"
+
+namespace webrtc {
+
+// Helper class for Beamformer in charge of generating covariance matrices. For
+// each function, the passed-in ComplexMatrix is expected to be of size
+// |num_input_channels| x |num_input_channels|.
+class CovarianceMatrixGenerator {
+ public:
+  // A uniform covariance matrix with a gap at the target location. WARNING:
+  // The target angle is assumed to be 0.
+  static void UniformCovarianceMatrix(float wave_number,
+                                      const std::vector<Point>& geometry,
+                                      ComplexMatrix<float>* mat);
+
+  // The covariance matrix of a source at the given angle.
+  static void AngledCovarianceMatrix(float sound_speed,
+                                     float angle,
+                                     size_t frequency_bin,
+                                     size_t fft_size,
+                                     size_t num_freq_bins,
+                                     int sample_rate,
+                                     const std::vector<Point>& geometry,
+                                     ComplexMatrix<float>* mat);
+
+  // Calculates phase shifts that, when applied to a multichannel signal and
+  // added together, cause constructive interferernce for sources located at
+  // the given angle.
+  static void PhaseAlignmentMasks(size_t frequency_bin,
+                                  size_t fft_size,
+                                  int sample_rate,
+                                  float sound_speed,
+                                  const std::vector<Point>& geometry,
+                                  float angle,
+                                  ComplexMatrix<float>* mat);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_BF_HELPERS_H_
diff --git a/modules/audio_processing/beamformer/covariance_matrix_generator_unittest.cc b/modules/audio_processing/beamformer/covariance_matrix_generator_unittest.cc
new file mode 100644
index 0000000..a6518e5
--- /dev/null
+++ b/modules/audio_processing/beamformer/covariance_matrix_generator_unittest.cc
@@ -0,0 +1,230 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/beamformer/covariance_matrix_generator.h"
+
+#include <cmath>
+
+#include "modules/audio_processing/beamformer/matrix_test_helpers.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using std::complex;
+
+TEST(CovarianceMatrixGeneratorTest, TestUniformCovarianceMatrix2Mics) {
+  const float kWaveNumber = 0.5775f;
+  const int kNumberMics = 2;
+  const float kMicSpacing = 0.05f;
+  const float kTolerance = 0.0001f;
+  std::vector<Point> geometry;
+  float first_mic = (kNumberMics - 1) * kMicSpacing / 2.f;
+  for (int i = 0; i < kNumberMics; ++i) {
+    geometry.push_back(Point(i * kMicSpacing - first_mic, 0.f, 0.f));
+  }
+  ComplexMatrix<float> actual_covariance_matrix(kNumberMics, kNumberMics);
+  CovarianceMatrixGenerator::UniformCovarianceMatrix(kWaveNumber,
+                                                     geometry,
+                                                     &actual_covariance_matrix);
+
+  complex<float>* const* actual_els = actual_covariance_matrix.elements();
+
+  EXPECT_NEAR(actual_els[0][0].real(), 1.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].real(), 0.9998f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].real(), 0.9998f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].real(), 1.f, kTolerance);
+
+  EXPECT_NEAR(actual_els[0][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].imag(), 0.f, kTolerance);
+}
+
+TEST(CovarianceMatrixGeneratorTest, TestUniformCovarianceMatrix3Mics) {
+  const float kWaveNumber = 10.3861f;
+  const int kNumberMics = 3;
+  const float kMicSpacing = 0.04f;
+  const float kTolerance = 0.0001f;
+  std::vector<Point> geometry;
+  float first_mic = (kNumberMics - 1) * kMicSpacing / 2.f;
+  for (int i = 0; i < kNumberMics; ++i) {
+    geometry.push_back(Point(i * kMicSpacing - first_mic, 0.f, 0.f));
+  }
+  ComplexMatrix<float> actual_covariance_matrix(kNumberMics, kNumberMics);
+  CovarianceMatrixGenerator::UniformCovarianceMatrix(kWaveNumber,
+                                                     geometry,
+                                                     &actual_covariance_matrix);
+
+  complex<float>* const* actual_els = actual_covariance_matrix.elements();
+
+  EXPECT_NEAR(actual_els[0][0].real(), 1.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].real(), 0.9573f, kTolerance);
+  EXPECT_NEAR(actual_els[0][2].real(), 0.8347f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].real(), 0.9573f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].real(), 1.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][2].real(), 0.9573f, kTolerance);
+  EXPECT_NEAR(actual_els[2][0].real(), 0.8347f, kTolerance);
+  EXPECT_NEAR(actual_els[2][1].real(), 0.9573f, kTolerance);
+  EXPECT_NEAR(actual_els[2][2].real(), 1.f, kTolerance);
+
+  EXPECT_NEAR(actual_els[0][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][2].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][2].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][2].imag(), 0.f, kTolerance);
+}
+
+TEST(CovarianceMatrixGeneratorTest, TestUniformCovarianceMatrix3DArray) {
+  const float kWaveNumber = 1.2345f;
+  const int kNumberMics = 4;
+  const float kTolerance = 0.0001f;
+  std::vector<Point> geometry;
+  geometry.push_back(Point(-0.025f, -0.05f, -0.075f));
+  geometry.push_back(Point(0.075f, -0.05f, -0.075f));
+  geometry.push_back(Point(-0.025f, 0.15f, -0.075f));
+  geometry.push_back(Point(-0.025f, -0.05f, 0.225f));
+  ComplexMatrix<float> actual_covariance_matrix(kNumberMics, kNumberMics);
+  CovarianceMatrixGenerator::UniformCovarianceMatrix(kWaveNumber,
+                                                     geometry,
+                                                     &actual_covariance_matrix);
+
+  complex<float>* const* actual_els = actual_covariance_matrix.elements();
+
+  EXPECT_NEAR(actual_els[0][0].real(), 1.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].real(), 0.9962f, kTolerance);
+  EXPECT_NEAR(actual_els[0][2].real(), 0.9848f, kTolerance);
+  EXPECT_NEAR(actual_els[0][3].real(), 0.9660f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].real(), 0.9962f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].real(), 1.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][2].real(), 0.9810f, kTolerance);
+  EXPECT_NEAR(actual_els[1][3].real(), 0.9623f, kTolerance);
+  EXPECT_NEAR(actual_els[2][0].real(), 0.9848f, kTolerance);
+  EXPECT_NEAR(actual_els[2][1].real(), 0.9810f, kTolerance);
+  EXPECT_NEAR(actual_els[2][2].real(), 1.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][3].real(), 0.9511f, kTolerance);
+  EXPECT_NEAR(actual_els[3][0].real(), 0.9660f, kTolerance);
+  EXPECT_NEAR(actual_els[3][1].real(), 0.9623f, kTolerance);
+  EXPECT_NEAR(actual_els[3][2].real(), 0.9511f, kTolerance);
+  EXPECT_NEAR(actual_els[3][3].real(), 1.f, kTolerance);
+
+  EXPECT_NEAR(actual_els[0][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][2].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][3].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][2].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][3].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][2].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[2][3].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[3][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[3][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[3][2].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[3][3].imag(), 0.f, kTolerance);
+}
+
+TEST(CovarianceMatrixGeneratorTest, TestAngledCovarianceMatrix2Mics) {
+  const float kSpeedOfSound = 340;
+  const float kAngle = static_cast<float>(M_PI) / 4.f;
+  const float kFrequencyBin = 6;
+  const float kFftSize = 512;
+  const int kNumberFrequencyBins = 257;
+  const int kSampleRate = 16000;
+  const int kNumberMics = 2;
+  const float kMicSpacing = 0.04f;
+  const float kTolerance = 0.0001f;
+  std::vector<Point> geometry;
+  float first_mic = (kNumberMics - 1) * kMicSpacing / 2.f;
+  for (int i = 0; i < kNumberMics; ++i) {
+    geometry.push_back(Point(i * kMicSpacing - first_mic, 0.f, 0.f));
+  }
+  ComplexMatrix<float> actual_covariance_matrix(kNumberMics, kNumberMics);
+  CovarianceMatrixGenerator::AngledCovarianceMatrix(kSpeedOfSound,
+                                                    kAngle,
+                                                    kFrequencyBin,
+                                                    kFftSize,
+                                                    kNumberFrequencyBins,
+                                                    kSampleRate,
+                                                    geometry,
+                                                    &actual_covariance_matrix);
+
+  complex<float>* const* actual_els = actual_covariance_matrix.elements();
+
+  EXPECT_NEAR(actual_els[0][0].real(), 0.5f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].real(), 0.4976f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].real(), 0.4976f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].real(), 0.5f, kTolerance);
+
+  EXPECT_NEAR(actual_els[0][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].imag(), 0.0489f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].imag(), -0.0489f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].imag(), 0.f, kTolerance);
+}
+
+TEST(CovarianceMatrixGeneratorTest, TestAngledCovarianceMatrix3Mics) {
+  const float kSpeedOfSound = 340;
+  const float kAngle = static_cast<float>(M_PI) / 4.f;
+  const float kFrequencyBin = 9;
+  const float kFftSize = 512;
+  const int kNumberFrequencyBins = 257;
+  const int kSampleRate = 42000;
+  const int kNumberMics = 3;
+  const float kMicSpacing = 0.05f;
+  const float kTolerance = 0.0001f;
+  std::vector<Point> geometry;
+  float first_mic = (kNumberMics - 1) * kMicSpacing / 2.f;
+  for (int i = 0; i < kNumberMics; ++i) {
+    geometry.push_back(Point(i * kMicSpacing - first_mic, 0.f, 0.f));
+  }
+  ComplexMatrix<float> actual_covariance_matrix(kNumberMics, kNumberMics);
+  CovarianceMatrixGenerator::AngledCovarianceMatrix(kSpeedOfSound,
+                                                    kAngle,
+                                                    kFrequencyBin,
+                                                    kFftSize,
+                                                    kNumberFrequencyBins,
+                                                    kSampleRate,
+                                                    geometry,
+                                                    &actual_covariance_matrix);
+
+  complex<float>* const* actual_els = actual_covariance_matrix.elements();
+
+  EXPECT_NEAR(actual_els[0][0].real(), 0.3333f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].real(), 0.2953f, kTolerance);
+  EXPECT_NEAR(actual_els[0][2].real(), 0.1899f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].real(), 0.2953f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].real(), 0.3333f, kTolerance);
+  EXPECT_NEAR(actual_els[1][2].real(), 0.2953f, kTolerance);
+  EXPECT_NEAR(actual_els[2][0].real(), 0.1899f, kTolerance);
+  EXPECT_NEAR(actual_els[2][1].real(), 0.2953f, kTolerance);
+  EXPECT_NEAR(actual_els[2][2].real(), 0.3333f, kTolerance);
+
+  EXPECT_NEAR(actual_els[0][0].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[0][1].imag(), 0.1546f, kTolerance);
+  EXPECT_NEAR(actual_els[0][2].imag(), 0.274f, kTolerance);
+  EXPECT_NEAR(actual_els[1][0].imag(), -0.1546f, kTolerance);
+  EXPECT_NEAR(actual_els[1][1].imag(), 0.f, kTolerance);
+  EXPECT_NEAR(actual_els[1][2].imag(), 0.1546f, kTolerance);
+  EXPECT_NEAR(actual_els[2][0].imag(), -0.274f, kTolerance);
+  EXPECT_NEAR(actual_els[2][1].imag(), -0.1546f, kTolerance);
+  EXPECT_NEAR(actual_els[2][2].imag(), 0.f, kTolerance);
+}
+
+// PhaseAlignmentMasks is tested by AngledCovarianceMatrix and by
+// InitBeamformerWeights in BeamformerUnittest.
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/matrix.h b/modules/audio_processing/beamformer/matrix.h
new file mode 100644
index 0000000..bf94c25
--- /dev/null
+++ b/modules/audio_processing/beamformer/matrix.h
@@ -0,0 +1,369 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_H_
+
+#include <algorithm>
+#include <cstring>
+#include <string>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+
+namespace {
+
+// Wrappers to get around the compiler warning resulting from the fact that
+// there's no std::sqrt overload for ints. We cast all non-complex types to
+// a double for the sqrt method.
+template <typename T>
+T sqrt_wrapper(T x) {
+  return sqrt(static_cast<double>(x));
+}
+
+template <typename S>
+std::complex<S> sqrt_wrapper(std::complex<S> x) {
+  return sqrt(x);
+}
+} // namespace
+
+namespace webrtc {
+
+// Matrix is a class for doing standard matrix operations on 2 dimensional
+// matrices of any size. Results of matrix operations are stored in the
+// calling object. Function overloads exist for both in-place (the calling
+// object is used as both an operand and the result) and out-of-place (all
+// operands are passed in as parameters) operations. If operand dimensions
+// mismatch, the program crashes. Out-of-place operations change the size of
+// the calling object, if necessary, before operating.
+//
+// 'In-place' operations that inherently change the size of the matrix (eg.
+// Transpose, Multiply on different-sized matrices) must make temporary copies
+// (|scratch_elements_| and |scratch_data_|) of existing data to complete the
+// operations.
+//
+// The data is stored contiguously. Data can be accessed internally as a flat
+// array, |data_|, or as an array of row pointers, |elements_|, but is
+// available to users only as an array of row pointers through |elements()|.
+// Memory for storage is allocated when a matrix is resized only if the new
+// size overflows capacity. Memory needed temporarily for any operations is
+// similarly resized only if the new size overflows capacity.
+//
+// If you pass in storage through the ctor, that storage is copied into the
+// matrix. TODO(claguna): albeit tricky, allow for data to be referenced
+// instead of copied, and owned by the user.
+template <typename T>
+class Matrix {
+ public:
+  Matrix() : num_rows_(0), num_columns_(0) {}
+
+  // Allocates space for the elements and initializes all values to zero.
+  Matrix(size_t num_rows, size_t num_columns)
+      : num_rows_(num_rows), num_columns_(num_columns) {
+    Resize();
+    scratch_data_.resize(num_rows_ * num_columns_);
+    scratch_elements_.resize(num_rows_);
+  }
+
+  // Copies |data| into the new Matrix.
+  Matrix(const T* data, size_t num_rows, size_t num_columns)
+      : num_rows_(0), num_columns_(0) {
+    CopyFrom(data, num_rows, num_columns);
+    scratch_data_.resize(num_rows_ * num_columns_);
+    scratch_elements_.resize(num_rows_);
+  }
+
+  virtual ~Matrix() {}
+
+  // Deep copy an existing matrix.
+  void CopyFrom(const Matrix& other) {
+    CopyFrom(&other.data_[0], other.num_rows_, other.num_columns_);
+  }
+
+  // Copy |data| into the Matrix. The current data is lost.
+  void CopyFrom(const T* const data, size_t num_rows, size_t num_columns) {
+    Resize(num_rows, num_columns);
+    memcpy(&data_[0], data, num_rows_ * num_columns_ * sizeof(data_[0]));
+  }
+
+  Matrix& CopyFromColumn(const T* const* src,
+                         size_t column_index,
+                         size_t num_rows) {
+    Resize(1, num_rows);
+    for (size_t i = 0; i < num_columns_; ++i) {
+      data_[i] = src[i][column_index];
+    }
+
+    return *this;
+  }
+
+  void Resize(size_t num_rows, size_t num_columns) {
+    if (num_rows != num_rows_ || num_columns != num_columns_) {
+      num_rows_ = num_rows;
+      num_columns_ = num_columns;
+      Resize();
+    }
+  }
+
+  // Accessors and mutators.
+  size_t num_rows() const { return num_rows_; }
+  size_t num_columns() const { return num_columns_; }
+  T* const* elements() { return &elements_[0]; }
+  const T* const* elements() const { return &elements_[0]; }
+
+  T Trace() {
+    RTC_CHECK_EQ(num_rows_, num_columns_);
+
+    T trace = 0;
+    for (size_t i = 0; i < num_rows_; ++i) {
+      trace += elements_[i][i];
+    }
+    return trace;
+  }
+
+  // Matrix Operations. Returns *this to support method chaining.
+  Matrix& Transpose() {
+    CopyDataToScratch();
+    Resize(num_columns_, num_rows_);
+    return Transpose(scratch_elements());
+  }
+
+  Matrix& Transpose(const Matrix& operand) {
+    RTC_CHECK_EQ(operand.num_rows_, num_columns_);
+    RTC_CHECK_EQ(operand.num_columns_, num_rows_);
+
+    return Transpose(operand.elements());
+  }
+
+  template <typename S>
+  Matrix& Scale(const S& scalar) {
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] *= scalar;
+    }
+
+    return *this;
+  }
+
+  template <typename S>
+  Matrix& Scale(const Matrix& operand, const S& scalar) {
+    CopyFrom(operand);
+    return Scale(scalar);
+  }
+
+  Matrix& Add(const Matrix& operand) {
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
+
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] += operand.data_[i];
+    }
+
+    return *this;
+  }
+
+  Matrix& Add(const Matrix& lhs, const Matrix& rhs) {
+    CopyFrom(lhs);
+    return Add(rhs);
+  }
+
+  Matrix& Subtract(const Matrix& operand) {
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
+
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] -= operand.data_[i];
+    }
+
+    return *this;
+  }
+
+  Matrix& Subtract(const Matrix& lhs, const Matrix& rhs) {
+    CopyFrom(lhs);
+    return Subtract(rhs);
+  }
+
+  Matrix& PointwiseMultiply(const Matrix& operand) {
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
+
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] *= operand.data_[i];
+    }
+
+    return *this;
+  }
+
+  Matrix& PointwiseMultiply(const Matrix& lhs, const Matrix& rhs) {
+    CopyFrom(lhs);
+    return PointwiseMultiply(rhs);
+  }
+
+  Matrix& PointwiseDivide(const Matrix& operand) {
+    RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+    RTC_CHECK_EQ(num_columns_, operand.num_columns_);
+
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] /= operand.data_[i];
+    }
+
+    return *this;
+  }
+
+  Matrix& PointwiseDivide(const Matrix& lhs, const Matrix& rhs) {
+    CopyFrom(lhs);
+    return PointwiseDivide(rhs);
+  }
+
+  Matrix& PointwiseSquareRoot() {
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] = sqrt_wrapper(data_[i]);
+    }
+
+    return *this;
+  }
+
+  Matrix& PointwiseSquareRoot(const Matrix& operand) {
+    CopyFrom(operand);
+    return PointwiseSquareRoot();
+  }
+
+  Matrix& PointwiseAbsoluteValue() {
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] = abs(data_[i]);
+    }
+
+    return *this;
+  }
+
+  Matrix& PointwiseAbsoluteValue(const Matrix& operand) {
+    CopyFrom(operand);
+    return PointwiseAbsoluteValue();
+  }
+
+  Matrix& PointwiseSquare() {
+    for (size_t i = 0; i < data_.size(); ++i) {
+      data_[i] *= data_[i];
+    }
+
+    return *this;
+  }
+
+  Matrix& PointwiseSquare(const Matrix& operand) {
+    CopyFrom(operand);
+    return PointwiseSquare();
+  }
+
+  Matrix& Multiply(const Matrix& lhs, const Matrix& rhs) {
+    RTC_CHECK_EQ(lhs.num_columns_, rhs.num_rows_);
+    RTC_CHECK_EQ(num_rows_, lhs.num_rows_);
+    RTC_CHECK_EQ(num_columns_, rhs.num_columns_);
+
+    return Multiply(lhs.elements(), rhs.num_rows_, rhs.elements());
+  }
+
+  Matrix& Multiply(const Matrix& rhs) {
+    RTC_CHECK_EQ(num_columns_, rhs.num_rows_);
+
+    CopyDataToScratch();
+    Resize(num_rows_, rhs.num_columns_);
+    return Multiply(scratch_elements(), rhs.num_rows_, rhs.elements());
+  }
+
+  std::string ToString() const {
+    std::ostringstream ss;
+    ss << std::endl << "Matrix" << std::endl;
+
+    for (size_t i = 0; i < num_rows_; ++i) {
+      for (size_t j = 0; j < num_columns_; ++j) {
+        ss << elements_[i][j] << " ";
+      }
+      ss << std::endl;
+    }
+    ss << std::endl;
+
+    return ss.str();
+  }
+
+ protected:
+  void SetNumRows(const size_t num_rows) { num_rows_ = num_rows; }
+  void SetNumColumns(const size_t num_columns) { num_columns_ = num_columns; }
+  T* data() { return &data_[0]; }
+  const T* data() const { return &data_[0]; }
+  const T* const* scratch_elements() const { return &scratch_elements_[0]; }
+
+  // Resize the matrix. If an increase in capacity is required, the current
+  // data is lost.
+  void Resize() {
+    size_t size = num_rows_ * num_columns_;
+    data_.resize(size);
+    elements_.resize(num_rows_);
+
+    for (size_t i = 0; i < num_rows_; ++i) {
+      elements_[i] = &data_[i * num_columns_];
+    }
+  }
+
+  // Copies data_ into scratch_data_ and updates scratch_elements_ accordingly.
+  void CopyDataToScratch() {
+    scratch_data_ = data_;
+    scratch_elements_.resize(num_rows_);
+
+    for (size_t i = 0; i < num_rows_; ++i) {
+      scratch_elements_[i] = &scratch_data_[i * num_columns_];
+    }
+  }
+
+ private:
+  size_t num_rows_;
+  size_t num_columns_;
+  std::vector<T> data_;
+  std::vector<T*> elements_;
+
+  // Stores temporary copies of |data_| and |elements_| for in-place operations
+  // where referring to original data is necessary.
+  std::vector<T> scratch_data_;
+  std::vector<T*> scratch_elements_;
+
+  // Helpers for Transpose and Multiply operations that unify in-place and
+  // out-of-place solutions.
+  Matrix& Transpose(const T* const* src) {
+    for (size_t i = 0; i < num_rows_; ++i) {
+      for (size_t j = 0; j < num_columns_; ++j) {
+        elements_[i][j] = src[j][i];
+      }
+    }
+
+    return *this;
+  }
+
+  Matrix& Multiply(const T* const* lhs,
+                   size_t num_rows_rhs,
+                   const T* const* rhs) {
+    for (size_t row = 0; row < num_rows_; ++row) {
+      for (size_t col = 0; col < num_columns_; ++col) {
+        T cur_element = 0;
+        for (size_t i = 0; i < num_rows_rhs; ++i) {
+          cur_element += lhs[row][i] * rhs[i][col];
+        }
+
+        elements_[row][col] = cur_element;
+      }
+    }
+
+    return *this;
+  }
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Matrix);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_H_
diff --git a/modules/audio_processing/beamformer/matrix_test_helpers.h b/modules/audio_processing/beamformer/matrix_test_helpers.h
new file mode 100644
index 0000000..62b4708
--- /dev/null
+++ b/modules/audio_processing/beamformer/matrix_test_helpers.h
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_TEST_HELPERS_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_TEST_HELPERS_H_
+
+#include "modules/audio_processing/beamformer/complex_matrix.h"
+#include "modules/audio_processing/beamformer/matrix.h"
+#include "test/gtest.h"
+
+namespace {
+const float kTolerance = 0.001f;
+}
+
+namespace webrtc {
+
+using std::complex;
+
+// Functions used in both matrix_unittest and complex_matrix_unittest.
+class MatrixTestHelpers {
+ public:
+  template <typename T>
+  static void ValidateMatrixEquality(const Matrix<T>& expected,
+                                     const Matrix<T>& actual) {
+    EXPECT_EQ(expected.num_rows(), actual.num_rows());
+    EXPECT_EQ(expected.num_columns(), actual.num_columns());
+
+    const T* const* expected_elements = expected.elements();
+    const T* const* actual_elements = actual.elements();
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
+        EXPECT_EQ(expected_elements[i][j], actual_elements[i][j]);
+      }
+    }
+  }
+
+  static void ValidateMatrixEqualityFloat(const Matrix<float>& expected,
+                                          const Matrix<float>& actual) {
+    EXPECT_EQ(expected.num_rows(), actual.num_rows());
+    EXPECT_EQ(expected.num_columns(), actual.num_columns());
+
+    const float* const* expected_elements = expected.elements();
+    const float* const* actual_elements = actual.elements();
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
+        EXPECT_NEAR(expected_elements[i][j], actual_elements[i][j], kTolerance);
+      }
+    }
+  }
+
+  static void ValidateMatrixEqualityComplexFloat(
+      const Matrix<complex<float> >& expected,
+      const Matrix<complex<float> >& actual) {
+    EXPECT_EQ(expected.num_rows(), actual.num_rows());
+    EXPECT_EQ(expected.num_columns(), actual.num_columns());
+
+    const complex<float>* const* expected_elements = expected.elements();
+    const complex<float>* const* actual_elements = actual.elements();
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
+        EXPECT_NEAR(expected_elements[i][j].real(),
+                    actual_elements[i][j].real(),
+                    kTolerance);
+        EXPECT_NEAR(expected_elements[i][j].imag(),
+                    actual_elements[i][j].imag(),
+                    kTolerance);
+      }
+    }
+  }
+
+  static void ValidateMatrixNearEqualityComplexFloat(
+      const Matrix<complex<float> >& expected,
+      const Matrix<complex<float> >& actual,
+      float tolerance) {
+    EXPECT_EQ(expected.num_rows(), actual.num_rows());
+    EXPECT_EQ(expected.num_columns(), actual.num_columns());
+
+    const complex<float>* const* expected_elements = expected.elements();
+    const complex<float>* const* actual_elements = actual.elements();
+    for (size_t i = 0; i < expected.num_rows(); ++i) {
+      for (size_t j = 0; j < expected.num_columns(); ++j) {
+        EXPECT_NEAR(expected_elements[i][j].real(),
+                    actual_elements[i][j].real(),
+                    tolerance);
+        EXPECT_NEAR(expected_elements[i][j].imag(),
+                    actual_elements[i][j].imag(),
+                    tolerance);
+      }
+    }
+  }
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_MATRIX_TEST_HELPERS_H_
diff --git a/modules/audio_processing/beamformer/matrix_unittest.cc b/modules/audio_processing/beamformer/matrix_unittest.cc
new file mode 100644
index 0000000..4badfd0
--- /dev/null
+++ b/modules/audio_processing/beamformer/matrix_unittest.cc
@@ -0,0 +1,326 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <complex>
+
+#include "modules/audio_processing/beamformer/matrix.h"
+#include "modules/audio_processing/beamformer/matrix_test_helpers.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+using std::complex;
+
+TEST(MatrixTest, TestMultiplySameSize) {
+  const int kNumRows = 2;
+  const int kNumCols = 2;
+  const float kValuesLeft[kNumRows][kNumCols] = {{1.1f, 2.2f}, {3.3f, 4.4f}};
+  const float kValuesRight[kNumRows][kNumCols] = {{5.4f, 127.f},
+                                                  {4600.f, -555.f}};
+  const float kValuesExpected[kNumRows][kNumCols] = {{10125.94f, -1081.3f},
+                                                     {20257.82f, -2022.9f}};
+
+  Matrix<float> lh_mat(*kValuesLeft, kNumRows, kNumCols);
+  Matrix<float> rh_mat(*kValuesRight, kNumRows, kNumCols);
+  Matrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<float> actual_result(kNumRows, kNumCols);
+
+  actual_result.Multiply(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEquality(expected_result, actual_result);
+
+  lh_mat.Multiply(rh_mat);
+  MatrixTestHelpers::ValidateMatrixEquality(lh_mat, actual_result);
+}
+
+TEST(MatrixTest, TestMultiplyDifferentSize) {
+  const int kNumRowsLeft = 2;
+  const int kNumColsLeft = 3;
+  const int kNumRowsRight = 3;
+  const int kNumColsRight = 2;
+  const int kValuesLeft[kNumRowsLeft][kNumColsLeft] = {{35, 466, -15},
+                                                       {-3, 3422, 9}};
+  const int kValuesRight[kNumRowsRight][kNumColsRight] = {
+      {765, -42}, {0, 194}, {625, 66321}};
+  const int kValuesExpected[kNumRowsLeft][kNumColsRight] = {{17400, -905881},
+                                                            {3330, 1260883}};
+
+  Matrix<int> lh_mat(*kValuesLeft, kNumRowsLeft, kNumColsLeft);
+  Matrix<int> rh_mat(*kValuesRight, kNumRowsRight, kNumColsRight);
+  Matrix<int> expected_result(*kValuesExpected, kNumRowsLeft, kNumColsRight);
+  Matrix<int> actual_result(kNumRowsLeft, kNumColsRight);
+
+  actual_result.Multiply(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEquality(expected_result, actual_result);
+
+  lh_mat.Multiply(rh_mat);
+  MatrixTestHelpers::ValidateMatrixEquality(lh_mat, actual_result);
+}
+
+TEST(MatrixTest, TestTranspose) {
+  const int kNumInitialRows = 2;
+  const int kNumInitialCols = 4;
+  const int kNumResultRows = 4;
+  const int kNumResultCols = 2;
+  const float kValuesInitial[kNumInitialRows][kNumInitialCols] = {
+      {1.1f, 2.2f, 3.3f, 4.4f}, {5.5f, 6.6f, 7.7f, 8.8f}};
+  const float kValuesExpected[kNumResultRows][kNumResultCols] = {
+      {1.1f, 5.5f}, {2.2f, 6.6f}, {3.3f, 7.7f}, {4.4f, 8.8f}};
+
+  Matrix<float> initial_mat(*kValuesInitial, kNumInitialRows, kNumInitialCols);
+  Matrix<float> expected_result(
+      *kValuesExpected, kNumResultRows, kNumResultCols);
+  Matrix<float> actual_result(kNumResultRows, kNumResultCols);
+
+  actual_result.Transpose(initial_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(expected_result,
+                                                 actual_result);
+  initial_mat.Transpose();
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(initial_mat, actual_result);
+}
+
+TEST(MatrixTest, TestScale) {
+  const int kNumRows = 3;
+  const int kNumCols = 3;
+  const int kScaleFactor = -9;
+  const int kValuesInitial[kNumRows][kNumCols] = {
+      {1, 20, 5000}, {-3, -29, 66}, {7654, 0, -23455}};
+  const int kValuesExpected[kNumRows][kNumCols] = {
+      {-9, -180, -45000}, {27, 261, -594}, {-68886, 0, 211095}};
+
+  Matrix<int> initial_mat(*kValuesInitial, kNumRows, kNumCols);
+  Matrix<int> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<int> actual_result;
+
+  actual_result.Scale(initial_mat, kScaleFactor);
+  MatrixTestHelpers::ValidateMatrixEquality(expected_result, actual_result);
+
+  initial_mat.Scale(kScaleFactor);
+  MatrixTestHelpers::ValidateMatrixEquality(initial_mat, actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseAdd) {
+  const int kNumRows = 2;
+  const int kNumCols = 3;
+  const float kValuesLeft[kNumRows][kNumCols] = {{1.1f, 210.45f, -549.2f},
+                                                 {11.876f, 586.7f, -64.35f}};
+  const float kValuesRight[kNumRows][kNumCols] = {{-50.4f, 1.f, 0.5f},
+                                                  {460.f, -554.2f, 4566.f}};
+  const float kValuesExpected[kNumRows][kNumCols] = {
+      {-49.3f, 211.45f, -548.7f}, {471.876f, 32.5f, 4501.65f}};
+
+  Matrix<float> lh_mat(*kValuesLeft, kNumRows, kNumCols);
+  Matrix<float> rh_mat(*kValuesRight, kNumRows, kNumCols);
+  Matrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<float> actual_result;
+
+  actual_result.Add(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(expected_result,
+                                                 actual_result);
+  lh_mat.Add(rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(lh_mat, actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseSubtract) {
+  const int kNumRows = 3;
+  const int kNumCols = 2;
+  const float kValuesLeft[kNumRows][kNumCols] = {
+      {1.1f, 210.45f}, {-549.2f, 11.876f}, {586.7f, -64.35f}};
+  const float kValuesRight[kNumRows][kNumCols] = {
+      {-50.4f, 1.f}, {0.5f, 460.f}, {-554.2f, 4566.f}};
+  const float kValuesExpected[kNumRows][kNumCols] = {
+      {51.5f, 209.45f}, {-549.7f, -448.124f}, {1140.9f, -4630.35f}};
+
+  Matrix<float> lh_mat(*kValuesLeft, kNumRows, kNumCols);
+  Matrix<float> rh_mat(*kValuesRight, kNumRows, kNumCols);
+  Matrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<float> actual_result;
+
+  actual_result.Subtract(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(expected_result,
+                                                 actual_result);
+
+  lh_mat.Subtract(rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(lh_mat, actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseMultiply) {
+  const int kNumRows = 1;
+  const int kNumCols = 5;
+  const float kValuesLeft[kNumRows][kNumCols] = {
+      {1.1f, 6.4f, 0.f, -1.f, -88.3f}};
+  const float kValuesRight[kNumRows][kNumCols] = {
+      {53.2f, -210.45f, -549.2f, 99.99f, -45.2f}};
+  const float kValuesExpected[kNumRows][kNumCols] = {
+      {58.52f, -1346.88f, 0.f, -99.99f, 3991.16f}};
+
+  Matrix<float> lh_mat(*kValuesLeft, kNumRows, kNumCols);
+  Matrix<float> rh_mat(*kValuesRight, kNumRows, kNumCols);
+  Matrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<float> actual_result;
+
+  actual_result.PointwiseMultiply(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(expected_result,
+                                                 actual_result);
+
+  lh_mat.PointwiseMultiply(rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(lh_mat, actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseDivide) {
+  const int kNumRows = 5;
+  const int kNumCols = 1;
+  const float kValuesLeft[kNumRows][kNumCols] = {
+      {1.1f}, {6.4f}, {0.f}, {-1.f}, {-88.3f}};
+  const float kValuesRight[kNumRows][kNumCols] = {
+      {53.2f}, {-210.45f}, {-549.2f}, {99.99f}, {-45.2f}};
+  const float kValuesExpected[kNumRows][kNumCols] = {
+      {0.020676691f}, {-0.03041102399f}, {0.f}, {-0.010001f}, {1.9535398f}};
+
+  Matrix<float> lh_mat(*kValuesLeft, kNumRows, kNumCols);
+  Matrix<float> rh_mat(*kValuesRight, kNumRows, kNumCols);
+  Matrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<float> actual_result;
+
+  actual_result.PointwiseDivide(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(expected_result,
+                                                 actual_result);
+
+  lh_mat.PointwiseDivide(rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(lh_mat, actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseSquareRoot) {
+  const int kNumRows = 2;
+  const int kNumCols = 2;
+  const int kValues[kNumRows][kNumCols] = {{4, 9}, {16, 0}};
+  const int kValuesExpected[kNumRows][kNumCols] = {{2, 3}, {4, 0}};
+
+  Matrix<int> operand_mat(*kValues, kNumRows, kNumCols);
+  Matrix<int> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<int> actual_result;
+
+  actual_result.PointwiseSquareRoot(operand_mat);
+  MatrixTestHelpers::ValidateMatrixEquality(expected_result, actual_result);
+
+  operand_mat.PointwiseSquareRoot();
+  MatrixTestHelpers::ValidateMatrixEquality(operand_mat, actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseSquareRootComplex) {
+  const int kNumRows = 1;
+  const int kNumCols = 3;
+  const complex<float> kValues[kNumRows][kNumCols] = {
+      {complex<float>(-4.f, 0), complex<float>(0, 9), complex<float>(3, -4)}};
+  const complex<float> kValuesExpected[kNumRows][kNumCols] = {
+      {complex<float>(0.f, 2.f), complex<float>(2.1213202f, 2.1213202f),
+       complex<float>(2.f, -1.f)}};
+
+  Matrix<complex<float> > operand_mat(*kValues, kNumRows, kNumCols);
+  Matrix<complex<float> > expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<complex<float> > actual_result;
+
+  actual_result.PointwiseSquareRoot(operand_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(expected_result,
+                                                        actual_result);
+
+  operand_mat.PointwiseSquareRoot();
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(operand_mat,
+                                                        actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseAbsoluteValue) {
+  const int kNumRows = 1;
+  const int kNumCols = 3;
+  const complex<float> kValues[kNumRows][kNumCols] = {
+      {complex<float>(-4.f, 0), complex<float>(0, 9), complex<float>(3, -4)}};
+  const complex<float> kValuesExpected[kNumRows][kNumCols] = {
+      {complex<float>(4.f, 0), complex<float>(9.f, 0), complex<float>(5.f, 0)}};
+
+  Matrix<complex<float> > operand_mat(*kValues, kNumRows, kNumCols);
+  Matrix<complex<float> > expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<complex<float> > actual_result;
+
+  actual_result.PointwiseAbsoluteValue(operand_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(expected_result,
+                                                        actual_result);
+
+  operand_mat.PointwiseAbsoluteValue();
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(operand_mat,
+                                                        actual_result);
+}
+
+TEST(MatrixTest, TestPointwiseSquare) {
+  const int kNumRows = 1;
+  const int kNumCols = 3;
+  const float kValues[kNumRows][kNumCols] = {{2.4f, -4.f, 3.3f}};
+  const float kValuesExpected[kNumRows][kNumCols] = {{5.76f, 16.f, 10.89f}};
+
+  Matrix<float> operand_mat(*kValues, kNumRows, kNumCols);
+  Matrix<float> expected_result(*kValuesExpected, kNumRows, kNumCols);
+  Matrix<float> actual_result;
+
+  actual_result.PointwiseSquare(operand_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(expected_result,
+                                                 actual_result);
+
+  operand_mat.PointwiseSquare();
+  MatrixTestHelpers::ValidateMatrixEqualityFloat(operand_mat, actual_result);
+}
+
+TEST(MatrixTest, TestComplexOperations) {
+  const int kNumRows = 2;
+  const int kNumCols = 2;
+
+  const complex<float> kValuesLeft[kNumRows][kNumCols] = {
+      {complex<float>(1.f, 1.f), complex<float>(2.f, 2.f)},
+      {complex<float>(3.f, 3.f), complex<float>(4.f, 4.f)}};
+
+  const complex<float> kValuesRight[kNumRows][kNumCols] = {
+      {complex<float>(5.f, 5.f), complex<float>(6.f, 6.f)},
+      {complex<float>(7.f, 7.f), complex<float>(8.f, 8.f)}};
+
+  const complex<float> kValuesExpectedAdd[kNumRows][kNumCols] = {
+      {complex<float>(6.f, 6.f), complex<float>(8.f, 8.f)},
+      {complex<float>(10.f, 10.f), complex<float>(12.f, 12.f)}};
+
+  const complex<float> kValuesExpectedMultiply[kNumRows][kNumCols] = {
+      {complex<float>(0.f, 38.f), complex<float>(0.f, 44.f)},
+      {complex<float>(0.f, 86.f), complex<float>(0.f, 100.f)}};
+
+  const complex<float> kValuesExpectedPointwiseDivide[kNumRows][kNumCols] = {
+      {complex<float>(0.2f, 0.f), complex<float>(0.33333333f, 0.f)},
+      {complex<float>(0.42857143f, 0.f), complex<float>(0.5f, 0.f)}};
+
+  Matrix<complex<float> > lh_mat(*kValuesLeft, kNumRows, kNumCols);
+  Matrix<complex<float> > rh_mat(*kValuesRight, kNumRows, kNumCols);
+  Matrix<complex<float> > expected_result_add(
+      *kValuesExpectedAdd, kNumRows, kNumCols);
+  Matrix<complex<float> > expected_result_multiply(
+      *kValuesExpectedMultiply, kNumRows, kNumCols);
+  Matrix<complex<float> > expected_result_pointwise_divide(
+      *kValuesExpectedPointwiseDivide, kNumRows, kNumCols);
+  Matrix<complex<float> > actual_result_add;
+  Matrix<complex<float> > actual_result_multiply(kNumRows, kNumCols);
+  Matrix<complex<float> > actual_result_pointwise_divide;
+
+  actual_result_add.Add(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(expected_result_add,
+                                                        actual_result_add);
+
+  actual_result_multiply.Multiply(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(
+      expected_result_multiply, actual_result_multiply);
+
+  actual_result_pointwise_divide.PointwiseDivide(lh_mat, rh_mat);
+  MatrixTestHelpers::ValidateMatrixEqualityComplexFloat(
+      expected_result_pointwise_divide, actual_result_pointwise_divide);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h b/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
new file mode 100644
index 0000000..c4c7358
--- /dev/null
+++ b/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_MOCK_BEAMFORMER_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_MOCK_BEAMFORMER_H_
+
+#include <vector>
+
+#include "modules/audio_processing/beamformer/nonlinear_beamformer.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+class MockNonlinearBeamformer : public NonlinearBeamformer {
+ public:
+  MockNonlinearBeamformer(const std::vector<Point>& array_geometry,
+                          size_t num_postfilter_channels)
+      : NonlinearBeamformer(array_geometry, num_postfilter_channels) {}
+
+  MockNonlinearBeamformer(const std::vector<Point>& array_geometry)
+      : NonlinearBeamformer(array_geometry, 1u) {}
+
+  MOCK_METHOD2(Initialize, void(int chunk_size_ms, int sample_rate_hz));
+  MOCK_METHOD1(AnalyzeChunk, void(const ChannelBuffer<float>& data));
+  MOCK_METHOD1(PostFilter, void(ChannelBuffer<float>* data));
+  MOCK_METHOD1(IsInBeam, bool(const SphericalPointf& spherical_point));
+  MOCK_METHOD0(is_target_present, bool());
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_MOCK_BEAMFORMER_H_
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/modules/audio_processing/beamformer/nonlinear_beamformer.cc
new file mode 100644
index 0000000..12f6d2f
--- /dev/null
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -0,0 +1,599 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/beamformer/nonlinear_beamformer.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "common_audio/window_generator.h"
+#include "modules/audio_processing/beamformer/covariance_matrix_generator.h"
+#include "rtc_base/arraysize.h"
+
+namespace webrtc {
+namespace {
+
+// Alpha for the Kaiser Bessel Derived window.
+const float kKbdAlpha = 1.5f;
+
+const float kSpeedOfSoundMeterSeconds = 343;
+
+// The minimum separation in radians between the target direction and an
+// interferer scenario.
+const float kMinAwayRadians = 0.2f;
+
+// The separation between the target direction and the closest interferer
+// scenario is proportional to this constant.
+const float kAwaySlope = 0.008f;
+
+// When calculating the interference covariance matrix, this is the weight for
+// the weighted average between the uniform covariance matrix and the angled
+// covariance matrix.
+// Rpsi = Rpsi_angled * kBalance + Rpsi_uniform * (1 - kBalance)
+const float kBalance = 0.95f;
+
+// Alpha coefficients for mask smoothing.
+const float kMaskTimeSmoothAlpha = 0.2f;
+const float kMaskFrequencySmoothAlpha = 0.6f;
+
+// The average mask is computed from masks in this mid-frequency range. If these
+// ranges are changed |kMaskQuantile| might need to be adjusted.
+const int kLowMeanStartHz = 200;
+const int kLowMeanEndHz = 400;
+
+// Range limiter for subtractive terms in the nominator and denominator of the
+// postfilter expression. It handles the scenario mismatch between the true and
+// model sources (target and interference).
+const float kCutOffConstant = 0.9999f;
+
+// Quantile of mask values which is used to estimate target presence.
+const float kMaskQuantile = 0.7f;
+// Mask threshold over which the data is considered signal and not interference.
+// It has to be updated every time the postfilter calculation is changed
+// significantly.
+// TODO(aluebs): Write a tool to tune the target threshold automatically based
+// on files annotated with target and interference ground truth.
+const float kMaskTargetThreshold = 0.01f;
+// Time in seconds after which the data is considered interference if the mask
+// does not pass |kMaskTargetThreshold|.
+const float kHoldTargetSeconds = 0.25f;
+
+// To compensate for the attenuation this algorithm introduces to the target
+// signal. It was estimated empirically from a low-noise low-reverberation
+// recording from broadside.
+const float kCompensationGain = 2.f;
+
+// Does conjugate(|norm_mat|) * |mat| * transpose(|norm_mat|). No extra space is
+// used; to accomplish this, we compute both multiplications in the same loop.
+// The returned norm is clamped to be non-negative.
+float Norm(const ComplexMatrix<float>& mat,
+           const ComplexMatrix<float>& norm_mat) {
+  RTC_CHECK_EQ(1, norm_mat.num_rows());
+  RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
+  RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
+
+  complex<float> first_product = complex<float>(0.f, 0.f);
+  complex<float> second_product = complex<float>(0.f, 0.f);
+
+  const complex<float>* const* mat_els = mat.elements();
+  const complex<float>* const* norm_mat_els = norm_mat.elements();
+
+  for (size_t i = 0; i < norm_mat.num_columns(); ++i) {
+    for (size_t j = 0; j < norm_mat.num_columns(); ++j) {
+      first_product += conj(norm_mat_els[0][j]) * mat_els[j][i];
+    }
+    second_product += first_product * norm_mat_els[0][i];
+    first_product = 0.f;
+  }
+  return std::max(second_product.real(), 0.f);
+}
+
+// Does conjugate(|lhs|) * |rhs| for row vectors |lhs| and |rhs|.
+complex<float> ConjugateDotProduct(const ComplexMatrix<float>& lhs,
+                                   const ComplexMatrix<float>& rhs) {
+  RTC_CHECK_EQ(1, lhs.num_rows());
+  RTC_CHECK_EQ(1, rhs.num_rows());
+  RTC_CHECK_EQ(lhs.num_columns(), rhs.num_columns());
+
+  const complex<float>* const* lhs_elements = lhs.elements();
+  const complex<float>* const* rhs_elements = rhs.elements();
+
+  complex<float> result = complex<float>(0.f, 0.f);
+  for (size_t i = 0; i < lhs.num_columns(); ++i) {
+    result += conj(lhs_elements[0][i]) * rhs_elements[0][i];
+  }
+
+  return result;
+}
+
+// Works for positive numbers only.
+size_t Round(float x) {
+  return static_cast<size_t>(std::floor(x + 0.5f));
+}
+
+// Calculates the sum of squares of a complex matrix.
+float SumSquares(const ComplexMatrix<float>& mat) {
+  float sum_squares = 0.f;
+  const complex<float>* const* mat_els = mat.elements();
+  for (size_t i = 0; i < mat.num_rows(); ++i) {
+    for (size_t j = 0; j < mat.num_columns(); ++j) {
+      float abs_value = std::abs(mat_els[i][j]);
+      sum_squares += abs_value * abs_value;
+    }
+  }
+  return sum_squares;
+}
+
+// Does |out| = |in|.' * conj(|in|) for row vector |in|.
+void TransposedConjugatedProduct(const ComplexMatrix<float>& in,
+                                 ComplexMatrix<float>* out) {
+  RTC_CHECK_EQ(1, in.num_rows());
+  RTC_CHECK_EQ(out->num_rows(), in.num_columns());
+  RTC_CHECK_EQ(out->num_columns(), in.num_columns());
+  const complex<float>* in_elements = in.elements()[0];
+  complex<float>* const* out_elements = out->elements();
+  for (size_t i = 0; i < out->num_rows(); ++i) {
+    for (size_t j = 0; j < out->num_columns(); ++j) {
+      out_elements[i][j] = in_elements[i] * conj(in_elements[j]);
+    }
+  }
+}
+
+std::vector<Point> GetCenteredArray(std::vector<Point> array_geometry) {
+  for (size_t dim = 0; dim < 3; ++dim) {
+    float center = 0.f;
+    for (size_t i = 0; i < array_geometry.size(); ++i) {
+      center += array_geometry[i].c[dim];
+    }
+    center /= array_geometry.size();
+    for (size_t i = 0; i < array_geometry.size(); ++i) {
+      array_geometry[i].c[dim] -= center;
+    }
+  }
+  return array_geometry;
+}
+
+}  // namespace
+
+const float NonlinearBeamformer::kHalfBeamWidthRadians = DegreesToRadians(20.f);
+
+// static
+const size_t NonlinearBeamformer::kNumFreqBins;
+
+PostFilterTransform::PostFilterTransform(size_t num_channels,
+                                         size_t chunk_length,
+                                         float* window,
+                                         size_t fft_size)
+    : transform_(num_channels,
+                 num_channels,
+                 chunk_length,
+                 window,
+                 fft_size,
+                 fft_size / 2,
+                 this),
+      num_freq_bins_(fft_size / 2 + 1) {}
+
+void PostFilterTransform::ProcessChunk(float* const* data, float* final_mask) {
+  final_mask_ = final_mask;
+  transform_.ProcessChunk(data, data);
+}
+
+void PostFilterTransform::ProcessAudioBlock(const complex<float>* const* input,
+                                            size_t num_input_channels,
+                                            size_t num_freq_bins,
+                                            size_t num_output_channels,
+                                            complex<float>* const* output) {
+  RTC_DCHECK_EQ(num_freq_bins_, num_freq_bins);
+  RTC_DCHECK_EQ(num_input_channels, num_output_channels);
+
+  for (size_t ch = 0; ch < num_input_channels; ++ch) {
+    for (size_t f_ix = 0; f_ix < num_freq_bins_; ++f_ix) {
+      output[ch][f_ix] =
+          kCompensationGain * final_mask_[f_ix] * input[ch][f_ix];
+    }
+  }
+}
+
+NonlinearBeamformer::NonlinearBeamformer(
+    const std::vector<Point>& array_geometry,
+    size_t num_postfilter_channels,
+    SphericalPointf target_direction)
+    : num_input_channels_(array_geometry.size()),
+      num_postfilter_channels_(num_postfilter_channels),
+      array_geometry_(GetCenteredArray(array_geometry)),
+      array_normal_(GetArrayNormalIfExists(array_geometry)),
+      min_mic_spacing_(GetMinimumSpacing(array_geometry)),
+      target_angle_radians_(target_direction.azimuth()),
+      away_radians_(std::min(
+          static_cast<float>(M_PI),
+          std::max(kMinAwayRadians,
+                   kAwaySlope * static_cast<float>(M_PI) / min_mic_spacing_))) {
+  WindowGenerator::KaiserBesselDerived(kKbdAlpha, kFftSize, window_);
+}
+
+NonlinearBeamformer::~NonlinearBeamformer() = default;
+
+void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
+  chunk_length_ =
+      static_cast<size_t>(sample_rate_hz / (1000.f / chunk_size_ms));
+  sample_rate_hz_ = sample_rate_hz;
+
+  high_pass_postfilter_mask_ = 1.f;
+  is_target_present_ = false;
+  hold_target_blocks_ = kHoldTargetSeconds * 2 * sample_rate_hz / kFftSize;
+  interference_blocks_count_ = hold_target_blocks_;
+
+  process_transform_.reset(new LappedTransform(num_input_channels_,
+                                               0u,
+                                               chunk_length_,
+                                               window_,
+                                               kFftSize,
+                                               kFftSize / 2,
+                                               this));
+  postfilter_transform_.reset(new PostFilterTransform(
+      num_postfilter_channels_, chunk_length_, window_, kFftSize));
+  const float wave_number_step =
+      (2.f * M_PI * sample_rate_hz_) / (kFftSize * kSpeedOfSoundMeterSeconds);
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
+    time_smooth_mask_[i] = 1.f;
+    final_mask_[i] = 1.f;
+    wave_numbers_[i] = i * wave_number_step;
+  }
+
+  InitLowFrequencyCorrectionRanges();
+  InitDiffuseCovMats();
+  AimAt(SphericalPointf(target_angle_radians_, 0.f, 1.f));
+}
+
+// These bin indexes determine the regions over which a mean is taken. This is
+// applied as a constant value over the adjacent end "frequency correction"
+// regions.
+//
+//             low_mean_start_bin_     high_mean_start_bin_
+//                   v                         v              constant
+// |----------------|--------|----------------|-------|----------------|
+//   constant               ^                        ^
+//             low_mean_end_bin_       high_mean_end_bin_
+//
+void NonlinearBeamformer::InitLowFrequencyCorrectionRanges() {
+  low_mean_start_bin_ = Round(static_cast<float>(kLowMeanStartHz) *
+                                  kFftSize / sample_rate_hz_);
+  low_mean_end_bin_ = Round(static_cast<float>(kLowMeanEndHz) *
+                                  kFftSize / sample_rate_hz_);
+
+  RTC_DCHECK_GT(low_mean_start_bin_, 0U);
+  RTC_DCHECK_LT(low_mean_start_bin_, low_mean_end_bin_);
+}
+
+void NonlinearBeamformer::InitHighFrequencyCorrectionRanges() {
+  const float kAliasingFreqHz =
+      kSpeedOfSoundMeterSeconds /
+      (min_mic_spacing_ * (1.f + std::abs(std::cos(target_angle_radians_))));
+  const float kHighMeanStartHz = std::min(0.5f *  kAliasingFreqHz,
+                                          sample_rate_hz_ / 2.f);
+  const float kHighMeanEndHz = std::min(0.75f *  kAliasingFreqHz,
+                                        sample_rate_hz_ / 2.f);
+  high_mean_start_bin_ = Round(kHighMeanStartHz * kFftSize / sample_rate_hz_);
+  high_mean_end_bin_ = Round(kHighMeanEndHz * kFftSize / sample_rate_hz_);
+
+  RTC_DCHECK_LT(low_mean_end_bin_, high_mean_end_bin_);
+  RTC_DCHECK_LT(high_mean_start_bin_, high_mean_end_bin_);
+  RTC_DCHECK_LT(high_mean_end_bin_, kNumFreqBins - 1);
+}
+
+void NonlinearBeamformer::InitInterfAngles() {
+  interf_angles_radians_.clear();
+  const Point target_direction = AzimuthToPoint(target_angle_radians_);
+  const Point clockwise_interf_direction =
+      AzimuthToPoint(target_angle_radians_ - away_radians_);
+  if (!array_normal_ ||
+      DotProduct(*array_normal_, target_direction) *
+              DotProduct(*array_normal_, clockwise_interf_direction) >=
+          0.f) {
+    // The target and clockwise interferer are in the same half-plane defined
+    // by the array.
+    interf_angles_radians_.push_back(target_angle_radians_ - away_radians_);
+  } else {
+    // Otherwise, the interferer will begin reflecting back at the target.
+    // Instead rotate it away 180 degrees.
+    interf_angles_radians_.push_back(target_angle_radians_ - away_radians_ +
+                                     M_PI);
+  }
+  const Point counterclock_interf_direction =
+      AzimuthToPoint(target_angle_radians_ + away_radians_);
+  if (!array_normal_ ||
+      DotProduct(*array_normal_, target_direction) *
+              DotProduct(*array_normal_, counterclock_interf_direction) >=
+          0.f) {
+    // The target and counter-clockwise interferer are in the same half-plane
+    // defined by the array.
+    interf_angles_radians_.push_back(target_angle_radians_ + away_radians_);
+  } else {
+    // Otherwise, the interferer will begin reflecting back at the target.
+    // Instead rotate it away 180 degrees.
+    interf_angles_radians_.push_back(target_angle_radians_ + away_radians_ -
+                                     M_PI);
+  }
+}
+
+void NonlinearBeamformer::InitDelaySumMasks() {
+  for (size_t f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
+    delay_sum_masks_[f_ix].Resize(1, num_input_channels_);
+    CovarianceMatrixGenerator::PhaseAlignmentMasks(
+        f_ix, kFftSize, sample_rate_hz_, kSpeedOfSoundMeterSeconds,
+        array_geometry_, target_angle_radians_, &delay_sum_masks_[f_ix]);
+
+    complex_f norm_factor = sqrt(
+        ConjugateDotProduct(delay_sum_masks_[f_ix], delay_sum_masks_[f_ix]));
+    delay_sum_masks_[f_ix].Scale(1.f / norm_factor);
+  }
+}
+
+void NonlinearBeamformer::InitTargetCovMats() {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
+    target_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
+    TransposedConjugatedProduct(delay_sum_masks_[i], &target_cov_mats_[i]);
+  }
+}
+
+void NonlinearBeamformer::InitDiffuseCovMats() {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
+    uniform_cov_mat_[i].Resize(num_input_channels_, num_input_channels_);
+    CovarianceMatrixGenerator::UniformCovarianceMatrix(
+        wave_numbers_[i], array_geometry_, &uniform_cov_mat_[i]);
+    complex_f normalization_factor = uniform_cov_mat_[i].elements()[0][0];
+    uniform_cov_mat_[i].Scale(1.f / normalization_factor);
+    uniform_cov_mat_[i].Scale(1 - kBalance);
+  }
+}
+
+void NonlinearBeamformer::InitInterfCovMats() {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
+    interf_cov_mats_[i].clear();
+    for (size_t j = 0; j < interf_angles_radians_.size(); ++j) {
+      interf_cov_mats_[i].push_back(std::unique_ptr<ComplexMatrixF>(
+          new ComplexMatrixF(num_input_channels_, num_input_channels_)));
+      ComplexMatrixF angled_cov_mat(num_input_channels_, num_input_channels_);
+      CovarianceMatrixGenerator::AngledCovarianceMatrix(
+          kSpeedOfSoundMeterSeconds,
+          interf_angles_radians_[j],
+          i,
+          kFftSize,
+          kNumFreqBins,
+          sample_rate_hz_,
+          array_geometry_,
+          &angled_cov_mat);
+      // Normalize matrices before averaging them.
+      complex_f normalization_factor = angled_cov_mat.elements()[0][0];
+      angled_cov_mat.Scale(1.f / normalization_factor);
+      // Weighted average of matrices.
+      angled_cov_mat.Scale(kBalance);
+      interf_cov_mats_[i][j]->Add(uniform_cov_mat_[i], angled_cov_mat);
+    }
+  }
+}
+
+void NonlinearBeamformer::NormalizeCovMats() {
+  for (size_t i = 0; i < kNumFreqBins; ++i) {
+    rxiws_[i] = Norm(target_cov_mats_[i], delay_sum_masks_[i]);
+    rpsiws_[i].clear();
+    for (size_t j = 0; j < interf_angles_radians_.size(); ++j) {
+      rpsiws_[i].push_back(Norm(*interf_cov_mats_[i][j], delay_sum_masks_[i]));
+    }
+  }
+}
+
+void NonlinearBeamformer::AnalyzeChunk(const ChannelBuffer<float>& data) {
+  RTC_DCHECK_EQ(data.num_channels(), num_input_channels_);
+  RTC_DCHECK_EQ(data.num_frames_per_band(), chunk_length_);
+
+  old_high_pass_mask_ = high_pass_postfilter_mask_;
+  process_transform_->ProcessChunk(data.channels(0), nullptr);
+}
+
+void NonlinearBeamformer::PostFilter(ChannelBuffer<float>* data) {
+  RTC_DCHECK_EQ(data->num_frames_per_band(), chunk_length_);
+  // TODO(aluebs): Change to RTC_CHECK_EQ once the ChannelBuffer is updated.
+  RTC_DCHECK_GE(data->num_channels(), num_postfilter_channels_);
+
+  postfilter_transform_->ProcessChunk(data->channels(0), final_mask_);
+
+  // Ramp up/down for smoothing is needed in order to avoid discontinuities in
+  // the transitions between 10 ms frames.
+  const float ramp_increment =
+      (high_pass_postfilter_mask_ - old_high_pass_mask_) /
+      data->num_frames_per_band();
+  for (size_t i = 1; i < data->num_bands(); ++i) {
+    float smoothed_mask = old_high_pass_mask_;
+    for (size_t j = 0; j < data->num_frames_per_band(); ++j) {
+      smoothed_mask += ramp_increment;
+      for (size_t k = 0; k < num_postfilter_channels_; ++k) {
+        data->channels(i)[k][j] *= smoothed_mask;
+      }
+    }
+  }
+}
+
+void NonlinearBeamformer::AimAt(const SphericalPointf& target_direction) {
+  target_angle_radians_ = target_direction.azimuth();
+  InitHighFrequencyCorrectionRanges();
+  InitInterfAngles();
+  InitDelaySumMasks();
+  InitTargetCovMats();
+  InitInterfCovMats();
+  NormalizeCovMats();
+}
+
+bool NonlinearBeamformer::IsInBeam(const SphericalPointf& spherical_point) {
+  // If more than half-beamwidth degrees away from the beam's center,
+  // you are out of the beam.
+  return fabs(spherical_point.azimuth() - target_angle_radians_) <
+         kHalfBeamWidthRadians;
+}
+
+bool NonlinearBeamformer::is_target_present() { return is_target_present_; }
+
+void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
+                                            size_t num_input_channels,
+                                            size_t num_freq_bins,
+                                            size_t num_output_channels,
+                                            complex_f* const* output) {
+  RTC_CHECK_EQ(kNumFreqBins, num_freq_bins);
+  RTC_CHECK_EQ(num_input_channels_, num_input_channels);
+  RTC_CHECK_EQ(0, num_output_channels);
+
+  // Calculating the post-filter masks. Note that we need two for each
+  // frequency bin to account for the positive and negative interferer
+  // angle.
+  for (size_t i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
+    eig_m_.CopyFromColumn(input, i, num_input_channels_);
+    float eig_m_norm_factor = std::sqrt(SumSquares(eig_m_));
+    if (eig_m_norm_factor != 0.f) {
+      eig_m_.Scale(1.f / eig_m_norm_factor);
+    }
+
+    float rxim = Norm(target_cov_mats_[i], eig_m_);
+    float ratio_rxiw_rxim = 0.f;
+    if (rxim > 0.f) {
+      ratio_rxiw_rxim = rxiws_[i] / rxim;
+    }
+
+    complex_f rmw = abs(ConjugateDotProduct(delay_sum_masks_[i], eig_m_));
+    rmw *= rmw;
+    float rmw_r = rmw.real();
+
+    new_mask_[i] = CalculatePostfilterMask(*interf_cov_mats_[i][0],
+                                           rpsiws_[i][0],
+                                           ratio_rxiw_rxim,
+                                           rmw_r);
+    for (size_t j = 1; j < interf_angles_radians_.size(); ++j) {
+      float tmp_mask = CalculatePostfilterMask(*interf_cov_mats_[i][j],
+                                               rpsiws_[i][j],
+                                               ratio_rxiw_rxim,
+                                               rmw_r);
+      if (tmp_mask < new_mask_[i]) {
+        new_mask_[i] = tmp_mask;
+      }
+    }
+  }
+
+  ApplyMaskTimeSmoothing();
+  EstimateTargetPresence();
+  ApplyLowFrequencyCorrection();
+  ApplyHighFrequencyCorrection();
+  ApplyMaskFrequencySmoothing();
+}
+
+float NonlinearBeamformer::CalculatePostfilterMask(
+    const ComplexMatrixF& interf_cov_mat,
+    float rpsiw,
+    float ratio_rxiw_rxim,
+    float rmw_r) {
+  float rpsim = Norm(interf_cov_mat, eig_m_);
+
+  float ratio = 0.f;
+  if (rpsim > 0.f) {
+    ratio = rpsiw / rpsim;
+  }
+
+  float numerator = 1.f - kCutOffConstant;
+  if (rmw_r > 0.f) {
+    numerator = 1.f - std::min(kCutOffConstant, ratio / rmw_r);
+  }
+
+  float denominator = 1.f - kCutOffConstant;
+  if (ratio_rxiw_rxim > 0.f) {
+    denominator = 1.f - std::min(kCutOffConstant, ratio / ratio_rxiw_rxim);
+  }
+
+  return numerator / denominator;
+}
+
+// Smooth new_mask_ into time_smooth_mask_.
+void NonlinearBeamformer::ApplyMaskTimeSmoothing() {
+  for (size_t i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
+    time_smooth_mask_[i] = kMaskTimeSmoothAlpha * new_mask_[i] +
+                           (1 - kMaskTimeSmoothAlpha) * time_smooth_mask_[i];
+  }
+}
+
+// Copy time_smooth_mask_ to final_mask_ and smooth over frequency.
+void NonlinearBeamformer::ApplyMaskFrequencySmoothing() {
+  // Smooth over frequency in both directions. The "frequency correction"
+  // regions have constant value, but we enter them to smooth over the jump
+  // that exists at the boundary. However, this does mean when smoothing "away"
+  // from the region that we only need to use the last element.
+  //
+  // Upward smoothing:
+  //   low_mean_start_bin_
+  //         v
+  // |------|------------|------|
+  //       ^------------------>^
+  //
+  // Downward smoothing:
+  //         high_mean_end_bin_
+  //                    v
+  // |------|------------|------|
+  //  ^<------------------^
+  std::copy(time_smooth_mask_, time_smooth_mask_ + kNumFreqBins, final_mask_);
+  for (size_t i = low_mean_start_bin_; i < kNumFreqBins; ++i) {
+    final_mask_[i] = kMaskFrequencySmoothAlpha * final_mask_[i] +
+                     (1 - kMaskFrequencySmoothAlpha) * final_mask_[i - 1];
+  }
+  for (size_t i = high_mean_end_bin_ + 1; i > 0; --i) {
+    final_mask_[i - 1] = kMaskFrequencySmoothAlpha * final_mask_[i - 1] +
+                         (1 - kMaskFrequencySmoothAlpha) * final_mask_[i];
+  }
+}
+
+// Apply low frequency correction to time_smooth_mask_.
+void NonlinearBeamformer::ApplyLowFrequencyCorrection() {
+  const float low_frequency_mask =
+      MaskRangeMean(low_mean_start_bin_, low_mean_end_bin_ + 1);
+  std::fill(time_smooth_mask_, time_smooth_mask_ + low_mean_start_bin_,
+            low_frequency_mask);
+}
+
+// Apply high frequency correction to time_smooth_mask_. Update
+// high_pass_postfilter_mask_ to use for the high frequency time-domain bands.
+void NonlinearBeamformer::ApplyHighFrequencyCorrection() {
+  high_pass_postfilter_mask_ =
+      MaskRangeMean(high_mean_start_bin_, high_mean_end_bin_ + 1);
+  std::fill(time_smooth_mask_ + high_mean_end_bin_ + 1,
+            time_smooth_mask_ + kNumFreqBins, high_pass_postfilter_mask_);
+}
+
+// Compute mean over the given range of time_smooth_mask_, [first, last).
+float NonlinearBeamformer::MaskRangeMean(size_t first, size_t last) {
+  RTC_DCHECK_GT(last, first);
+  const float sum = std::accumulate(time_smooth_mask_ + first,
+                                    time_smooth_mask_ + last, 0.f);
+  return sum / (last - first);
+}
+
+void NonlinearBeamformer::EstimateTargetPresence() {
+  const size_t quantile = static_cast<size_t>(
+      (high_mean_end_bin_ - low_mean_start_bin_) * kMaskQuantile +
+      low_mean_start_bin_);
+  std::nth_element(new_mask_ + low_mean_start_bin_, new_mask_ + quantile,
+                   new_mask_ + high_mean_end_bin_ + 1);
+  if (new_mask_[quantile] > kMaskTargetThreshold) {
+    is_target_present_ = true;
+    interference_blocks_count_ = 0;
+  } else {
+    is_target_present_ = interference_blocks_count_++ < hold_target_blocks_;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer.h b/modules/audio_processing/beamformer/nonlinear_beamformer.h
new file mode 100644
index 0000000..5b79dc4
--- /dev/null
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer.h
@@ -0,0 +1,230 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_BEAMFORMER_NONLINEAR_BEAMFORMER_H_
+#define MODULES_AUDIO_PROCESSING_BEAMFORMER_NONLINEAR_BEAMFORMER_H_
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#ifndef _USE_MATH_DEFINES
+#define _USE_MATH_DEFINES
+#endif
+
+#include <math.h>
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/lapped_transform.h"
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_processing/beamformer/array_util.h"
+#include "modules/audio_processing/beamformer/complex_matrix.h"
+
+namespace webrtc {
+
+class PostFilterTransform : public LappedTransform::Callback {
+ public:
+  PostFilterTransform(size_t num_channels,
+                      size_t chunk_length,
+                      float* window,
+                      size_t fft_size);
+
+  void ProcessChunk(float* const* data, float* final_mask);
+
+ protected:
+  void ProcessAudioBlock(const complex<float>* const* input,
+                         size_t num_input_channels,
+                         size_t num_freq_bins,
+                         size_t num_output_channels,
+                         complex<float>* const* output) override;
+
+ private:
+  LappedTransform transform_;
+  const size_t num_freq_bins_;
+  float* final_mask_;
+};
+
+// Enhances sound sources coming directly in front of a uniform linear array
+// and suppresses sound sources coming from all other directions. Operates on
+// multichannel signals and produces single-channel output.
+//
+// The implemented nonlinear postfilter algorithm taken from "A Robust Nonlinear
+// Beamforming Postprocessor" by Bastiaan Kleijn.
+class NonlinearBeamformer : public LappedTransform::Callback {
+ public:
+  static const float kHalfBeamWidthRadians;
+
+  explicit NonlinearBeamformer(
+      const std::vector<Point>& array_geometry,
+      size_t num_postfilter_channels = 1u,
+      SphericalPointf target_direction =
+          SphericalPointf(static_cast<float>(M_PI) / 2.f, 0.f, 1.f));
+  ~NonlinearBeamformer() override;
+
+  // Sample rate corresponds to the lower band.
+  // Needs to be called before the NonlinearBeamformer can be used.
+  virtual void Initialize(int chunk_size_ms, int sample_rate_hz);
+
+  // Analyzes one time-domain chunk of audio. The audio is expected to be split
+  // into frequency bands inside the ChannelBuffer. The number of frames and
+  // channels must correspond to the constructor parameters.
+  virtual void AnalyzeChunk(const ChannelBuffer<float>& data);
+
+  // Applies the postfilter mask to one chunk of audio. The audio is expected to
+  // be split into frequency bands inside the ChannelBuffer. The number of
+  // frames and channels must correspond to the constructor parameters.
+  virtual void PostFilter(ChannelBuffer<float>* data);
+
+  virtual void AimAt(const SphericalPointf& target_direction);
+
+  virtual bool IsInBeam(const SphericalPointf& spherical_point);
+
+  // After processing each block |is_target_present_| is set to true if the
+  // target signal es present and to false otherwise. This methods can be called
+  // to know if the data is target signal or interference and process it
+  // accordingly.
+  virtual bool is_target_present();
+
+ protected:
+  // Process one frequency-domain block of audio. This is where the fun
+  // happens. Implements LappedTransform::Callback.
+  void ProcessAudioBlock(const complex<float>* const* input,
+                         size_t num_input_channels,
+                         size_t num_freq_bins,
+                         size_t num_output_channels,
+                         complex<float>* const* output) override;
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(NonlinearBeamformerTest,
+                           InterfAnglesTakeAmbiguityIntoAccount);
+
+  typedef Matrix<float> MatrixF;
+  typedef ComplexMatrix<float> ComplexMatrixF;
+  typedef complex<float> complex_f;
+
+  void InitLowFrequencyCorrectionRanges();
+  void InitHighFrequencyCorrectionRanges();
+  void InitInterfAngles();
+  void InitDelaySumMasks();
+  void InitTargetCovMats();
+  void InitDiffuseCovMats();
+  void InitInterfCovMats();
+  void NormalizeCovMats();
+
+  // Calculates postfilter masks that minimize the mean squared error of our
+  // estimation of the desired signal.
+  float CalculatePostfilterMask(const ComplexMatrixF& interf_cov_mat,
+                                float rpsiw,
+                                float ratio_rxiw_rxim,
+                                float rmxi_r);
+
+  // Prevents the postfilter masks from degenerating too quickly (a cause of
+  // musical noise).
+  void ApplyMaskTimeSmoothing();
+  void ApplyMaskFrequencySmoothing();
+
+  // The postfilter masks are unreliable at low frequencies. Calculates a better
+  // mask by averaging mid-low frequency values.
+  void ApplyLowFrequencyCorrection();
+
+  // Postfilter masks are also unreliable at high frequencies. Average mid-high
+  // frequency masks to calculate a single mask per block which can be applied
+  // in the time-domain. Further, we average these block-masks over a chunk,
+  // resulting in one postfilter mask per audio chunk. This allows us to skip
+  // both transforming and blocking the high-frequency signal.
+  void ApplyHighFrequencyCorrection();
+
+  // Compute the means needed for the above frequency correction.
+  float MaskRangeMean(size_t start_bin, size_t end_bin);
+
+  // Applies post-filter mask to |input| and store in |output|.
+  void ApplyPostFilter(const complex_f* input, complex_f* output);
+
+  void EstimateTargetPresence();
+
+  static const size_t kFftSize = 256;
+  static const size_t kNumFreqBins = kFftSize / 2 + 1;
+
+  // Deals with the fft transform and blocking.
+  size_t chunk_length_;
+  std::unique_ptr<LappedTransform> process_transform_;
+  std::unique_ptr<PostFilterTransform> postfilter_transform_;
+  float window_[kFftSize];
+
+  // Parameters exposed to the user.
+  const size_t num_input_channels_;
+  const size_t num_postfilter_channels_;
+  int sample_rate_hz_;
+
+  const std::vector<Point> array_geometry_;
+  // The normal direction of the array if it has one and it is in the xy-plane.
+  const rtc::Optional<Point> array_normal_;
+
+  // Minimum spacing between microphone pairs.
+  const float min_mic_spacing_;
+
+  // Calculated based on user-input and constants in the .cc file.
+  size_t low_mean_start_bin_;
+  size_t low_mean_end_bin_;
+  size_t high_mean_start_bin_;
+  size_t high_mean_end_bin_;
+
+  // Quickly varying mask updated every block.
+  float new_mask_[kNumFreqBins];
+  // Time smoothed mask.
+  float time_smooth_mask_[kNumFreqBins];
+  // Time and frequency smoothed mask.
+  float final_mask_[kNumFreqBins];
+
+  float target_angle_radians_;
+  // Angles of the interferer scenarios.
+  std::vector<float> interf_angles_radians_;
+  // The angle between the target and the interferer scenarios.
+  const float away_radians_;
+
+  // Array of length |kNumFreqBins|, Matrix of size |1| x |num_channels_|.
+  ComplexMatrixF delay_sum_masks_[kNumFreqBins];
+
+  // Arrays of length |kNumFreqBins|, Matrix of size |num_input_channels_| x
+  // |num_input_channels_|.
+  ComplexMatrixF target_cov_mats_[kNumFreqBins];
+  ComplexMatrixF uniform_cov_mat_[kNumFreqBins];
+  // Array of length |kNumFreqBins|, Matrix of size |num_input_channels_| x
+  // |num_input_channels_|. The vector has a size equal to the number of
+  // interferer scenarios.
+  std::vector<std::unique_ptr<ComplexMatrixF>> interf_cov_mats_[kNumFreqBins];
+
+  // Of length |kNumFreqBins|.
+  float wave_numbers_[kNumFreqBins];
+
+  // Preallocated for ProcessAudioBlock()
+  // Of length |kNumFreqBins|.
+  float rxiws_[kNumFreqBins];
+  // The vector has a size equal to the number of interferer scenarios.
+  std::vector<float> rpsiws_[kNumFreqBins];
+
+  // The microphone normalization factor.
+  ComplexMatrixF eig_m_;
+
+  // For processing the high-frequency input signal.
+  float high_pass_postfilter_mask_;
+  float old_high_pass_mask_;
+
+  // True when the target signal is present.
+  bool is_target_present_;
+  // Number of blocks after which the data is considered interference if the
+  // mask does not pass |kMaskSignalThreshold|.
+  size_t hold_target_blocks_;
+  // Number of blocks since the last mask that passed |kMaskSignalThreshold|.
+  size_t interference_blocks_count_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_BEAMFORMER_NONLINEAR_BEAMFORMER_H_
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc b/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
new file mode 100644
index 0000000..296cd6d
--- /dev/null
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <vector>
+
+#include "common_audio/channel_buffer.h"
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/beamformer/nonlinear_beamformer.h"
+#include "modules/audio_processing/test/test_utils.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/flags.h"
+#include "rtc_base/format_macros.h"
+
+DEFINE_string(i, "", "The name of the input file to read from.");
+DEFINE_string(o, "out.wav", "Name of the output file to write to.");
+DEFINE_string(mic_positions, "",
+    "Space delimited cartesian coordinates of microphones in meters. "
+    "The coordinates of each point are contiguous. "
+    "For a two element array: \"x1 y1 z1 x2 y2 z2\"");
+DEFINE_bool(help, false, "Prints this message.");
+
+namespace webrtc {
+namespace {
+
+const int kChunksPerSecond = 100;
+const int kChunkSizeMs = 1000 / kChunksPerSecond;
+
+const char kUsage[] =
+    "Command-line tool to run beamforming on WAV files. The signal is passed\n"
+    "in as a single band, unlike the audio processing interface which splits\n"
+    "signals into multiple bands.\n";
+
+}  // namespace
+
+int main(int argc, char* argv[]) {
+  if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) ||
+      FLAG_help || argc != 1) {
+    printf("%s", kUsage);
+    if (FLAG_help) {
+      rtc::FlagList::Print(nullptr, false);
+      return 0;
+    }
+    return 1;
+  }
+
+  WavReader in_file(FLAG_i);
+  WavWriter out_file(FLAG_o, in_file.sample_rate(), in_file.num_channels());
+
+  const size_t num_mics = in_file.num_channels();
+  const std::vector<Point> array_geometry =
+      ParseArrayGeometry(FLAG_mic_positions, num_mics);
+  RTC_CHECK_EQ(array_geometry.size(), num_mics);
+
+  NonlinearBeamformer bf(array_geometry, array_geometry.size());
+  bf.Initialize(kChunkSizeMs, in_file.sample_rate());
+
+  printf("Input file: %s\nChannels: %" PRIuS ", Sample rate: %d Hz\n\n",
+         FLAG_i, in_file.num_channels(), in_file.sample_rate());
+  printf("Output file: %s\nChannels: %" PRIuS ", Sample rate: %d Hz\n\n",
+         FLAG_o, out_file.num_channels(), out_file.sample_rate());
+
+  ChannelBuffer<float> buf(
+      rtc::CheckedDivExact(in_file.sample_rate(), kChunksPerSecond),
+      in_file.num_channels());
+
+  std::vector<float> interleaved(buf.size());
+  while (in_file.ReadSamples(interleaved.size(),
+                             &interleaved[0]) == interleaved.size()) {
+    FloatS16ToFloat(&interleaved[0], interleaved.size(), &interleaved[0]);
+    Deinterleave(&interleaved[0], buf.num_frames(),
+                 buf.num_channels(), buf.channels());
+
+    bf.AnalyzeChunk(buf);
+    bf.PostFilter(&buf);
+
+    Interleave(buf.channels(), buf.num_frames(),
+               buf.num_channels(), &interleaved[0]);
+    FloatToFloatS16(&interleaved[0], interleaved.size(), &interleaved[0]);
+    out_file.WriteSamples(&interleaved[0], interleaved.size());
+  }
+
+  return 0;
+}
+
+}  // namespace webrtc
+
+int main(int argc, char* argv[]) {
+  return webrtc::main(argc, argv);
+}
diff --git a/modules/audio_processing/beamformer/nonlinear_beamformer_unittest.cc b/modules/audio_processing/beamformer/nonlinear_beamformer_unittest.cc
new file mode 100644
index 0000000..78b2f0a
--- /dev/null
+++ b/modules/audio_processing/beamformer/nonlinear_beamformer_unittest.cc
@@ -0,0 +1,375 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/beamformer/nonlinear_beamformer.h"
+
+#include <math.h>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kChunkSizeMs = 10;
+const int kSampleRateHz = 16000;
+
+SphericalPointf AzimuthToSphericalPoint(float azimuth_radians) {
+  return SphericalPointf(azimuth_radians, 0.f, 1.f);
+}
+
+void Verify(NonlinearBeamformer* bf, float target_azimuth_radians) {
+  EXPECT_TRUE(bf->IsInBeam(AzimuthToSphericalPoint(target_azimuth_radians)));
+  EXPECT_TRUE(bf->IsInBeam(AzimuthToSphericalPoint(
+      target_azimuth_radians - NonlinearBeamformer::kHalfBeamWidthRadians +
+      0.001f)));
+  EXPECT_TRUE(bf->IsInBeam(AzimuthToSphericalPoint(
+      target_azimuth_radians + NonlinearBeamformer::kHalfBeamWidthRadians -
+      0.001f)));
+  EXPECT_FALSE(bf->IsInBeam(AzimuthToSphericalPoint(
+      target_azimuth_radians - NonlinearBeamformer::kHalfBeamWidthRadians -
+      0.001f)));
+  EXPECT_FALSE(bf->IsInBeam(AzimuthToSphericalPoint(
+      target_azimuth_radians + NonlinearBeamformer::kHalfBeamWidthRadians +
+      0.001f)));
+}
+
+void AimAndVerify(NonlinearBeamformer* bf, float target_azimuth_radians) {
+  bf->AimAt(AzimuthToSphericalPoint(target_azimuth_radians));
+  Verify(bf, target_azimuth_radians);
+}
+
+// Bitexactness test code.
+const size_t kNumFramesToProcess = 1000;
+
+void ProcessOneFrame(int sample_rate_hz,
+                     AudioBuffer* capture_audio_buffer,
+                     NonlinearBeamformer* beamformer) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_audio_buffer->SplitIntoFrequencyBands();
+  }
+
+  beamformer->AnalyzeChunk(*capture_audio_buffer->split_data_f());
+  capture_audio_buffer->set_num_channels(1);
+  beamformer->PostFilter(capture_audio_buffer->split_data_f());
+
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_audio_buffer->MergeFrequencyBands();
+  }
+}
+
+int BeamformerSampleRate(int sample_rate_hz) {
+  return (sample_rate_hz > AudioProcessing::kSampleRate16kHz
+              ? AudioProcessing::kSampleRate16kHz
+              : sample_rate_hz);
+}
+
+void RunBitExactnessTest(int sample_rate_hz,
+                         const std::vector<Point>& array_geometry,
+                         const SphericalPointf& target_direction,
+                         rtc::ArrayView<const float> output_reference) {
+  NonlinearBeamformer beamformer(array_geometry, 1u, target_direction);
+  beamformer.Initialize(AudioProcessing::kChunkSizeMs,
+                        BeamformerSampleRate(sample_rate_hz));
+
+  const StreamConfig capture_config(sample_rate_hz, array_geometry.size(),
+                                    false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(capture_config.num_frames() *
+                                   capture_config.num_channels());
+  for (size_t frame_no = 0u; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(capture_config.num_frames(),
+                                   capture_config.num_channels(), &capture_file,
+                                   capture_input);
+
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, &capture_buffer, &beamformer);
+  }
+
+  // Extract and verify the test results.
+  std::vector<float> capture_output;
+  test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+                                     &capture_output);
+
+  const float kElementErrorBound = 1.f / static_cast<float>(1 << 15);
+
+  // Compare the output with the reference. Only the first values of the output
+  // from last frame processed are compared in order not having to specify all
+  // preceeding frames as testvectors. As the algorithm being tested has a
+  // memory, testing only the last frame implicitly also tests the preceeding
+  // frames.
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      capture_config.num_frames(), capture_config.num_channels(),
+      output_reference, capture_output, kElementErrorBound));
+}
+
+// TODO(peah): Add bitexactness tests for scenarios with more than 2 input
+// channels.
+std::vector<Point> CreateArrayGeometry(int variant) {
+  std::vector<Point> array_geometry;
+  switch (variant) {
+    case 1:
+      array_geometry.push_back(Point(-0.025f, 0.f, 0.f));
+      array_geometry.push_back(Point(0.025f, 0.f, 0.f));
+      break;
+    case 2:
+      array_geometry.push_back(Point(-0.035f, 0.f, 0.f));
+      array_geometry.push_back(Point(0.035f, 0.f, 0.f));
+      break;
+    case 3:
+      array_geometry.push_back(Point(-0.5f, 0.f, 0.f));
+      array_geometry.push_back(Point(0.5f, 0.f, 0.f));
+      break;
+    default:
+      RTC_CHECK(false);
+  }
+  return array_geometry;
+}
+
+const SphericalPointf TargetDirection1(0.4f * static_cast<float>(M_PI) / 2.f,
+                                       0.f,
+                                       1.f);
+const SphericalPointf TargetDirection2(static_cast<float>(M_PI) / 2.f,
+                                       1.f,
+                                       2.f);
+
+}  // namespace
+
+TEST(NonlinearBeamformerTest, AimingModifiesBeam) {
+  std::vector<Point> array_geometry;
+  array_geometry.push_back(Point(-0.025f, 0.f, 0.f));
+  array_geometry.push_back(Point(0.025f, 0.f, 0.f));
+  NonlinearBeamformer bf(array_geometry, 1u);
+  bf.Initialize(kChunkSizeMs, kSampleRateHz);
+  // The default constructor parameter sets the target angle to PI / 2.
+  Verify(&bf, static_cast<float>(M_PI) / 2.f);
+  AimAndVerify(&bf, static_cast<float>(M_PI) / 3.f);
+  AimAndVerify(&bf, 3.f * static_cast<float>(M_PI) / 4.f);
+  AimAndVerify(&bf, static_cast<float>(M_PI) / 6.f);
+  AimAndVerify(&bf, static_cast<float>(M_PI));
+}
+
+TEST(NonlinearBeamformerTest, InterfAnglesTakeAmbiguityIntoAccount) {
+  {
+    // For linear arrays there is ambiguity.
+    std::vector<Point> array_geometry;
+    array_geometry.push_back(Point(-0.1f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.2f, 0.f, 0.f));
+    NonlinearBeamformer bf(array_geometry, 1u);
+    bf.Initialize(kChunkSizeMs, kSampleRateHz);
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(M_PI / 2.f - bf.away_radians_,
+                    bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(M_PI / 2.f + bf.away_radians_,
+                    bf.interf_angles_radians_[1]);
+    bf.AimAt(AzimuthToSphericalPoint(bf.away_radians_ / 2.f));
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(M_PI - bf.away_radians_ / 2.f,
+                    bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(3.f * bf.away_radians_ / 2.f, bf.interf_angles_radians_[1]);
+  }
+  {
+    // For planar arrays with normal in the xy-plane there is ambiguity.
+    std::vector<Point> array_geometry;
+    array_geometry.push_back(Point(-0.1f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.2f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.1f, 0.f, 0.2f));
+    array_geometry.push_back(Point(0.f, 0.f, -0.1f));
+    NonlinearBeamformer bf(array_geometry, 1u);
+    bf.Initialize(kChunkSizeMs, kSampleRateHz);
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(M_PI / 2.f - bf.away_radians_,
+                    bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(M_PI / 2.f + bf.away_radians_,
+                    bf.interf_angles_radians_[1]);
+    bf.AimAt(AzimuthToSphericalPoint(bf.away_radians_ / 2.f));
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(M_PI - bf.away_radians_ / 2.f,
+                    bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(3.f * bf.away_radians_ / 2.f, bf.interf_angles_radians_[1]);
+  }
+  {
+    // For planar arrays with normal not in the xy-plane there is no ambiguity.
+    std::vector<Point> array_geometry;
+    array_geometry.push_back(Point(0.f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.2f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.f, 0.1f, -0.2f));
+    NonlinearBeamformer bf(array_geometry, 1u);
+    bf.Initialize(kChunkSizeMs, kSampleRateHz);
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(M_PI / 2.f - bf.away_radians_,
+                    bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(M_PI / 2.f + bf.away_radians_,
+                    bf.interf_angles_radians_[1]);
+    bf.AimAt(AzimuthToSphericalPoint(bf.away_radians_ / 2.f));
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(-bf.away_radians_ / 2.f, bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(3.f * bf.away_radians_ / 2.f, bf.interf_angles_radians_[1]);
+  }
+  {
+    // For arrays which are not linear or planar there is no ambiguity.
+    std::vector<Point> array_geometry;
+    array_geometry.push_back(Point(0.f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.1f, 0.f, 0.f));
+    array_geometry.push_back(Point(0.f, 0.2f, 0.f));
+    array_geometry.push_back(Point(0.f, 0.f, 0.3f));
+    NonlinearBeamformer bf(array_geometry, 1u);
+    bf.Initialize(kChunkSizeMs, kSampleRateHz);
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(M_PI / 2.f - bf.away_radians_,
+                    bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(M_PI / 2.f + bf.away_radians_,
+                    bf.interf_angles_radians_[1]);
+    bf.AimAt(AzimuthToSphericalPoint(bf.away_radians_ / 2.f));
+    EXPECT_EQ(2u, bf.interf_angles_radians_.size());
+    EXPECT_FLOAT_EQ(-bf.away_radians_ / 2.f, bf.interf_angles_radians_[0]);
+    EXPECT_FLOAT_EQ(3.f * bf.away_radians_ / 2.f, bf.interf_angles_radians_[1]);
+  }
+}
+
+// TODO(peah): Investigate why the nonlinear_beamformer.cc causes a DCHECK in
+// this setup.
+TEST(BeamformerBitExactnessTest,
+     DISABLED_Stereo8kHz_ArrayGeometry1_TargetDirection1) {
+  const float kOutputReference[] = {0.001318f, -0.001091f, 0.000990f,
+                                    0.001318f, -0.001091f, 0.000990f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate8kHz, CreateArrayGeometry(1),
+                      TargetDirection1, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo16kHz_ArrayGeometry1_TargetDirection1) {
+  const float kOutputReference[] = {-0.000077f, -0.000147f, -0.000138f,
+                                    -0.000077f, -0.000147f, -0.000138f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate16kHz, CreateArrayGeometry(1),
+                      TargetDirection1, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo32kHz_ArrayGeometry1_TargetDirection1) {
+  const float kOutputReference[] = {-0.000061f, -0.000061f, -0.000061f,
+                                    -0.000061f, -0.000061f, -0.000061f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate32kHz, CreateArrayGeometry(1),
+                      TargetDirection1, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo48kHz_ArrayGeometry1_TargetDirection1) {
+  const float kOutputReference[] = {0.000450f, 0.000436f, 0.000433f,
+                                    0.000450f, 0.000436f, 0.000433f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate48kHz, CreateArrayGeometry(1),
+                      TargetDirection1, kOutputReference);
+}
+
+// TODO(peah): Investigate why the nonlinear_beamformer.cc causes a DCHECK in
+// this setup.
+TEST(BeamformerBitExactnessTest,
+     DISABLED_Stereo8kHz_ArrayGeometry1_TargetDirection2) {
+  const float kOutputReference[] = {0.001144f,  -0.001026f, 0.001074f,
+                                    -0.016205f, -0.007324f, -0.015656f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate8kHz, CreateArrayGeometry(1),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo16kHz_ArrayGeometry1_TargetDirection2) {
+  const float kOutputReference[] = {0.000221f, -0.000249f, 0.000140f,
+                                    0.000221f, -0.000249f, 0.000140f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate16kHz, CreateArrayGeometry(1),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo32kHz_ArrayGeometry1_TargetDirection2) {
+  const float kOutputReference[] = {0.000763f, -0.000336f, 0.000549f,
+                                    0.000763f, -0.000336f, 0.000549f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate32kHz, CreateArrayGeometry(1),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo48kHz_ArrayGeometry1_TargetDirection2) {
+  const float kOutputReference[] = {-0.000004f, -0.000494f, 0.000255f,
+                                    -0.000004f, -0.000494f, 0.000255f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate48kHz, CreateArrayGeometry(1),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo8kHz_ArrayGeometry2_TargetDirection2) {
+  const float kOutputReference[] = {-0.000914f, 0.002170f, -0.002382f,
+                                    -0.000914f, 0.002170f, -0.002382f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate8kHz, CreateArrayGeometry(2),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo16kHz_ArrayGeometry2_TargetDirection2) {
+  const float kOutputReference[] = {0.000179f, -0.000179f, 0.000081f,
+                                    0.000179f, -0.000179f, 0.000081f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate16kHz, CreateArrayGeometry(2),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo32kHz_ArrayGeometry2_TargetDirection2) {
+  const float kOutputReference[] = {0.000549f, -0.000214f, 0.000366f,
+                                    0.000549f, -0.000214f, 0.000366f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate32kHz, CreateArrayGeometry(2),
+                      TargetDirection2, kOutputReference);
+}
+
+TEST(BeamformerBitExactnessTest,
+     Stereo48kHz_ArrayGeometry2_TargetDirection2) {
+  const float kOutputReference[] = {0.000019f, -0.000310f, 0.000182f,
+                                    0.000019f, -0.000310f, 0.000182f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate48kHz, CreateArrayGeometry(2),
+                      TargetDirection2, kOutputReference);
+}
+
+// TODO(peah): Investigate why the nonlinear_beamformer.cc causes a DCHECK in
+// this setup.
+TEST(BeamformerBitExactnessTest,
+     DISABLED_Stereo16kHz_ArrayGeometry3_TargetDirection1) {
+  const float kOutputReference[] = {-0.000161f, 0.000171f, -0.000096f,
+                                    0.001007f,  0.000427f, 0.000977f};
+
+  RunBitExactnessTest(AudioProcessing::kSampleRate16kHz, CreateArrayGeometry(3),
+                      TargetDirection1, kOutputReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/common.h b/modules/audio_processing/common.h
new file mode 100644
index 0000000..d8532c5
--- /dev/null
+++ b/modules/audio_processing/common.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_COMMON_H_
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+static inline size_t ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+  switch (layout) {
+    case AudioProcessing::kMono:
+    case AudioProcessing::kMonoAndKeyboard:
+      return 1;
+    case AudioProcessing::kStereo:
+    case AudioProcessing::kStereoAndKeyboard:
+      return 2;
+  }
+  RTC_NOTREACHED();
+  return 0;
+}
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_COMMON_H_
diff --git a/modules/audio_processing/config_unittest.cc b/modules/audio_processing/config_unittest.cc
new file mode 100644
index 0000000..8776ee3
--- /dev/null
+++ b/modules/audio_processing/config_unittest.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/include/config.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+struct MyExperiment {
+  static const ConfigOptionID identifier = ConfigOptionID::kMyExperimentForTest;
+  static const int kDefaultFactor;
+  static const int kDefaultOffset;
+
+  MyExperiment()
+    : factor(kDefaultFactor), offset(kDefaultOffset) {}
+
+  MyExperiment(int factor, int offset)
+    : factor(factor), offset(offset) {}
+
+  int factor;
+  int offset;
+};
+
+const int MyExperiment::kDefaultFactor = 1;
+const int MyExperiment::kDefaultOffset = 2;
+
+TEST(Config, ReturnsDefaultInstanceIfNotConfigured) {
+  Config config;
+  const MyExperiment& my_exp = config.Get<MyExperiment>();
+  EXPECT_EQ(MyExperiment::kDefaultFactor, my_exp.factor);
+  EXPECT_EQ(MyExperiment::kDefaultOffset, my_exp.offset);
+}
+
+TEST(Config, ReturnOptionWhenSet) {
+  Config config;
+  config.Set<MyExperiment>(new MyExperiment(5, 1));
+  const MyExperiment& my_exp = config.Get<MyExperiment>();
+  EXPECT_EQ(5, my_exp.factor);
+  EXPECT_EQ(1, my_exp.offset);
+}
+
+TEST(Config, SetNullSetsTheOptionBackToDefault) {
+  Config config;
+  config.Set<MyExperiment>(new MyExperiment(5, 1));
+  config.Set<MyExperiment>(NULL);
+  const MyExperiment& my_exp = config.Get<MyExperiment>();
+  EXPECT_EQ(MyExperiment::kDefaultFactor, my_exp.factor);
+  EXPECT_EQ(MyExperiment::kDefaultOffset, my_exp.offset);
+}
+
+struct Algo1_CostFunction {
+  static const ConfigOptionID identifier =
+      ConfigOptionID::kAlgo1CostFunctionForTest;
+  Algo1_CostFunction() {}
+
+  virtual int cost(int x) const {
+    return x;
+  }
+
+  virtual ~Algo1_CostFunction() {}
+};
+
+struct SqrCost : Algo1_CostFunction {
+  virtual int cost(int x) const {
+    return x*x;
+  }
+};
+
+TEST(Config, SupportsPolymorphism) {
+  Config config;
+  config.Set<Algo1_CostFunction>(new SqrCost());
+  EXPECT_EQ(25, config.Get<Algo1_CostFunction>().cost(5));
+}
+}  // namespace
+}  // namespace webrtc
diff --git a/modules/audio_processing/debug.proto b/modules/audio_processing/debug.proto
new file mode 100644
index 0000000..4417773
--- /dev/null
+++ b/modules/audio_processing/debug.proto
@@ -0,0 +1,94 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+// Contains the format of input/output/reverse audio. An Init message is added
+// when any of the fields are changed.
+message Init {
+  optional int32 sample_rate = 1;
+  optional int32 device_sample_rate = 2 [deprecated=true];
+  optional int32 num_input_channels = 3;
+  optional int32 num_output_channels = 4;
+  optional int32 num_reverse_channels = 5;
+  optional int32 reverse_sample_rate = 6;
+  optional int32 output_sample_rate = 7;
+  optional int32 reverse_output_sample_rate = 8;
+  optional int32 num_reverse_output_channels = 9;
+}
+
+// May contain interleaved or deinterleaved data, but don't store both formats.
+message ReverseStream {
+  // int16 interleaved data.
+  optional bytes data = 1;
+
+  // float deinterleaved data, where each repeated element points to a single
+  // channel buffer of data.
+  repeated bytes channel = 2;
+}
+
+// May contain interleaved or deinterleaved data, but don't store both formats.
+message Stream {
+  // int16 interleaved data.
+  optional bytes input_data = 1;
+  optional bytes output_data = 2;
+
+  optional int32 delay = 3;
+  optional sint32 drift = 4;
+  optional int32 level = 5;
+  optional bool keypress = 6;
+
+  // float deinterleaved data, where each repeated element points to a single
+  // channel buffer of data.
+  repeated bytes input_channel = 7;
+  repeated bytes output_channel = 8;
+}
+
+// Contains the configurations of various APM component. A Config message is
+// added when any of the fields are changed.
+message Config {
+  // Next field number 19.
+  // Acoustic echo canceler.
+  optional bool aec_enabled = 1;
+  optional bool aec_delay_agnostic_enabled = 2;
+  optional bool aec_drift_compensation_enabled = 3;
+  optional bool aec_extended_filter_enabled = 4;
+  optional int32 aec_suppression_level = 5;
+  // Mobile AEC.
+  optional bool aecm_enabled = 6;
+  optional bool aecm_comfort_noise_enabled = 7;
+  optional int32 aecm_routing_mode = 8;
+  // Automatic gain controller.
+  optional bool agc_enabled = 9;
+  optional int32 agc_mode = 10;
+  optional bool agc_limiter_enabled = 11;
+  optional bool noise_robust_agc_enabled = 12;
+  // High pass filter.
+  optional bool hpf_enabled = 13;
+  // Noise suppression.
+  optional bool ns_enabled = 14;
+  optional int32 ns_level = 15;
+  // Transient suppression.
+  optional bool transient_suppression_enabled = 16;
+  // Semicolon-separated string containing experimental feature
+  // descriptions.
+  optional string experiments_description = 17;
+  // Intelligibility Enhancer
+  optional bool intelligibility_enhancer_enabled = 18;
+}
+
+message Event {
+  enum Type {
+    INIT = 0;
+    REVERSE_STREAM = 1;
+    STREAM = 2;
+    CONFIG = 3;
+    UNKNOWN_EVENT = 4;
+  }
+
+  required Type type = 1;
+
+  optional Init init = 2;
+  optional ReverseStream reverse_stream = 3;
+  optional Stream stream = 4;
+  optional Config config = 5;
+}
diff --git a/modules/audio_processing/echo_cancellation_bit_exact_unittest.cc b/modules/audio_processing/echo_cancellation_bit_exact_unittest.cc
new file mode 100644
index 0000000..857cb1c
--- /dev/null
+++ b/modules/audio_processing/echo_cancellation_bit_exact_unittest.cc
@@ -0,0 +1,357 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/echo_cancellation_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kNumFramesToProcess = 100;
+
+void SetupComponent(int sample_rate_hz,
+                    EchoCancellation::SuppressionLevel suppression_level,
+                    bool drift_compensation_enabled,
+                    EchoCancellationImpl* echo_canceller) {
+  echo_canceller->Initialize(sample_rate_hz, 1, 1, 1);
+  EchoCancellation* ec = static_cast<EchoCancellation*>(echo_canceller);
+  ec->Enable(true);
+  ec->set_suppression_level(suppression_level);
+  ec->enable_drift_compensation(drift_compensation_enabled);
+
+  Config config;
+  config.Set<DelayAgnostic>(new DelayAgnostic(true));
+  config.Set<ExtendedFilter>(new ExtendedFilter(true));
+  echo_canceller->SetExtraOptions(config);
+}
+
+void ProcessOneFrame(int sample_rate_hz,
+                     int stream_delay_ms,
+                     bool drift_compensation_enabled,
+                     int stream_drift_samples,
+                     AudioBuffer* render_audio_buffer,
+                     AudioBuffer* capture_audio_buffer,
+                     EchoCancellationImpl* echo_canceller) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    render_audio_buffer->SplitIntoFrequencyBands();
+    capture_audio_buffer->SplitIntoFrequencyBands();
+  }
+
+  std::vector<float> render_audio;
+  EchoCancellationImpl::PackRenderAudioBuffer(
+      render_audio_buffer, 1, render_audio_buffer->num_channels(),
+      &render_audio);
+  echo_canceller->ProcessRenderAudio(render_audio);
+
+  if (drift_compensation_enabled) {
+    static_cast<EchoCancellation*>(echo_canceller)
+        ->set_stream_drift_samples(stream_drift_samples);
+  }
+
+  echo_canceller->ProcessCaptureAudio(capture_audio_buffer, stream_delay_ms);
+
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_audio_buffer->MergeFrequencyBands();
+  }
+}
+
+void RunBitexactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         int stream_delay_ms,
+                         bool drift_compensation_enabled,
+                         int stream_drift_samples,
+                         EchoCancellation::SuppressionLevel suppression_level,
+                         bool stream_has_echo_reference,
+                         const rtc::ArrayView<const float>& output_reference) {
+  rtc::CriticalSection crit_render;
+  rtc::CriticalSection crit_capture;
+  EchoCancellationImpl echo_canceller(&crit_render, &crit_capture);
+  SetupComponent(sample_rate_hz, suppression_level, drift_compensation_enabled,
+                 &echo_canceller);
+
+  const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+  const StreamConfig render_config(sample_rate_hz, num_channels, false);
+  AudioBuffer render_buffer(
+      render_config.num_frames(), render_config.num_channels(),
+      render_config.num_frames(), 1, render_config.num_frames());
+  test::InputAudioFile render_file(
+      test::GetApmRenderTestVectorFileName(sample_rate_hz));
+  std::vector<float> render_input(samples_per_channel * num_channels);
+
+  const StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), 1, capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(samples_per_channel * num_channels);
+
+  for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &render_file, render_input);
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &capture_file, capture_input);
+
+    test::CopyVectorToAudioBuffer(render_config, render_input, &render_buffer);
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, stream_delay_ms, drift_compensation_enabled,
+                    stream_drift_samples, &render_buffer, &capture_buffer,
+                    &echo_canceller);
+  }
+
+  // Extract and verify the test results.
+  std::vector<float> capture_output;
+  test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+                                     &capture_output);
+
+  EXPECT_EQ(stream_has_echo_reference,
+            static_cast<EchoCancellation*>(&echo_canceller)->stream_has_echo());
+
+  // Compare the output with the reference. Only the first values of the output
+  // from last frame processed are compared in order not having to specify all
+  // preceeding frames as testvectors. As the algorithm being tested has a
+  // memory, testing only the last frame implicitly also tests the preceeding
+  // frames.
+  const float kElementErrorBound = 1.0f / 32768.0f;
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      capture_config.num_frames(), capture_config.num_channels(),
+      output_reference, capture_output, kElementErrorBound));
+}
+
+const bool kStreamHasEchoReference = true;
+
+}  // namespace
+
+// TODO(peah): Activate all these tests for ARM and ARM64 once the issue on the
+// Chromium ARM and ARM64 boths have been identified. This is tracked in the
+// issue https://bugs.chromium.org/p/webrtc/issues/detail?id=5711.
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono8kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono8kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {-0.000646f, -0.001525f, 0.002688f};
+  RunBitexactnessTest(8000, 1, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {0.000055f, 0.000421f, 0.001149f};
+  RunBitexactnessTest(16000, 1, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono32kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono32kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {-0.000671f, 0.000061f, -0.000031f};
+  RunBitexactnessTest(32000, 1, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono48kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono48kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {-0.001403f, -0.001411f, -0.000755f};
+  RunBitexactnessTest(48000, 1, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_LowLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_LowLevel_NoDrift_StreamDelay0) {
+#endif
+#if defined(WEBRTC_MAC)
+  const float kOutputReference[] = {-0.000145f, 0.000179f, 0.000917f};
+#else
+  const float kOutputReference[] = {-0.000009f, 0.000363f, 0.001094f};
+#endif
+  RunBitexactnessTest(16000, 1, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kLowSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_ModerateLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_ModerateLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {0.000055f, 0.000421f, 0.001149f};
+  RunBitexactnessTest(16000, 1, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kModerateSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_HighLevel_NoDrift_StreamDelay10) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_HighLevel_NoDrift_StreamDelay10) {
+#endif
+  const float kOutputReference[] = {0.000055f, 0.000421f, 0.001149f};
+  RunBitexactnessTest(16000, 1, 10, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_HighLevel_NoDrift_StreamDelay20) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_HighLevel_NoDrift_StreamDelay20) {
+#endif
+  const float kOutputReference[] = {0.000055f, 0.000421f, 0.001149f};
+  RunBitexactnessTest(16000, 1, 20, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_HighLevel_Drift0_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_HighLevel_Drift0_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {0.000055f, 0.000421f, 0.001149f};
+  RunBitexactnessTest(16000, 1, 0, true, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Mono16kHz_HighLevel_Drift5_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Mono16kHz_HighLevel_Drift5_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {0.000055f, 0.000421f, 0.001149f};
+  RunBitexactnessTest(16000, 1, 0, true, 5,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Stereo8kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Stereo8kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+#if defined(WEBRTC_MAC)
+  const float kOutputReference[] = {-0.000392f, -0.001449f, 0.003004f,
+                                    -0.000392f, -0.001449f, 0.003004f};
+#else
+  const float kOutputReference[] = {-0.000464f, -0.001525f, 0.002933f,
+                                    -0.000464f, -0.001525f, 0.002933f};
+#endif
+  RunBitexactnessTest(8000, 2, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Stereo16kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Stereo16kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {0.000166f, 0.000735f, 0.000841f,
+                                    0.000166f, 0.000735f, 0.000841f};
+  RunBitexactnessTest(16000, 2, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Stereo32kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Stereo32kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+#if defined(WEBRTC_MAC)
+  const float kOutputReference[] = {-0.000458f, 0.000244f, 0.000153f,
+                                    -0.000458f, 0.000244f, 0.000153f};
+#else
+  const float kOutputReference[] = {-0.000427f, 0.000183f, 0.000183f,
+                                    -0.000427f, 0.000183f, 0.000183f};
+#endif
+  RunBitexactnessTest(32000, 2, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(EchoCancellationBitExactnessTest,
+     Stereo48kHz_HighLevel_NoDrift_StreamDelay0) {
+#else
+TEST(EchoCancellationBitExactnessTest,
+     DISABLED_Stereo48kHz_HighLevel_NoDrift_StreamDelay0) {
+#endif
+  const float kOutputReference[] = {-0.001101f, -0.001101f, -0.000449f,
+                                    -0.001101f, -0.001101f, -0.000449f};
+  RunBitexactnessTest(48000, 2, 0, false, 0,
+                      EchoCancellation::SuppressionLevel::kHighSuppression,
+                      kStreamHasEchoReference, kOutputReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_cancellation_impl.cc b/modules/audio_processing/echo_cancellation_impl.cc
new file mode 100644
index 0000000..99f676c
--- /dev/null
+++ b/modules/audio_processing/echo_cancellation_impl.cc
@@ -0,0 +1,516 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_cancellation_impl.h"
+
+#include <string.h>
+
+#include "modules/audio_processing/aec/aec_core.h"
+#include "modules/audio_processing/aec/echo_cancellation.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+namespace {
+int16_t MapSetting(EchoCancellation::SuppressionLevel level) {
+  switch (level) {
+    case EchoCancellation::kLowSuppression:
+      return kAecNlpConservative;
+    case EchoCancellation::kModerateSuppression:
+      return kAecNlpModerate;
+    case EchoCancellation::kHighSuppression:
+      return kAecNlpAggressive;
+  }
+  RTC_NOTREACHED();
+  return -1;
+}
+
+AudioProcessing::Error MapError(int err) {
+  switch (err) {
+    case AEC_UNSUPPORTED_FUNCTION_ERROR:
+      return AudioProcessing::kUnsupportedFunctionError;
+    case AEC_BAD_PARAMETER_ERROR:
+      return AudioProcessing::kBadParameterError;
+    case AEC_BAD_PARAMETER_WARNING:
+      return AudioProcessing::kBadStreamParameterWarning;
+    default:
+      // AEC_UNSPECIFIED_ERROR
+      // AEC_UNINITIALIZED_ERROR
+      // AEC_NULL_POINTER_ERROR
+      return AudioProcessing::kUnspecifiedError;
+  }
+}
+
+bool EnforceZeroStreamDelay() {
+#if defined(CHROMEOS)
+  return !field_trial::IsEnabled("WebRTC-Aec2ZeroStreamDelayKillSwitch");
+#else
+  return false;
+#endif
+}
+
+}  // namespace
+
+struct EchoCancellationImpl::StreamProperties {
+  StreamProperties() = delete;
+  StreamProperties(int sample_rate_hz,
+                   size_t num_reverse_channels,
+                   size_t num_output_channels,
+                   size_t num_proc_channels)
+      : sample_rate_hz(sample_rate_hz),
+        num_reverse_channels(num_reverse_channels),
+        num_output_channels(num_output_channels),
+        num_proc_channels(num_proc_channels) {}
+
+  const int sample_rate_hz;
+  const size_t num_reverse_channels;
+  const size_t num_output_channels;
+  const size_t num_proc_channels;
+};
+
+class EchoCancellationImpl::Canceller {
+ public:
+  Canceller() {
+    state_ = WebRtcAec_Create();
+    RTC_DCHECK(state_);
+  }
+
+  ~Canceller() {
+    RTC_CHECK(state_);
+    WebRtcAec_Free(state_);
+  }
+
+  void* state() { return state_; }
+
+  void Initialize(int sample_rate_hz) {
+    // TODO(ajm): Drift compensation is disabled in practice. If restored, it
+    // should be managed internally and not depend on the hardware sample rate.
+    // For now, just hardcode a 48 kHz value.
+    const int error = WebRtcAec_Init(state_, sample_rate_hz, 48000);
+    RTC_DCHECK_EQ(0, error);
+  }
+
+ private:
+  void* state_;
+};
+
+EchoCancellationImpl::EchoCancellationImpl(rtc::CriticalSection* crit_render,
+                                           rtc::CriticalSection* crit_capture)
+    : crit_render_(crit_render),
+      crit_capture_(crit_capture),
+      drift_compensation_enabled_(false),
+      metrics_enabled_(false),
+      suppression_level_(kModerateSuppression),
+      stream_drift_samples_(0),
+      was_stream_drift_set_(false),
+      stream_has_echo_(false),
+      delay_logging_enabled_(false),
+      extended_filter_enabled_(false),
+      delay_agnostic_enabled_(false),
+      enforce_zero_stream_delay_(EnforceZeroStreamDelay()) {
+  RTC_DCHECK(crit_render);
+  RTC_DCHECK(crit_capture);
+}
+
+EchoCancellationImpl::~EchoCancellationImpl() = default;
+
+void EchoCancellationImpl::ProcessRenderAudio(
+    rtc::ArrayView<const float> packed_render_audio) {
+  rtc::CritScope cs_capture(crit_capture_);
+  if (!enabled_) {
+    return;
+  }
+
+  RTC_DCHECK(stream_properties_);
+  size_t handle_index = 0;
+  size_t buffer_index = 0;
+  const size_t num_frames_per_band =
+      packed_render_audio.size() / (stream_properties_->num_output_channels *
+                                    stream_properties_->num_reverse_channels);
+  for (size_t i = 0; i < stream_properties_->num_output_channels; i++) {
+    for (size_t j = 0; j < stream_properties_->num_reverse_channels; j++) {
+      WebRtcAec_BufferFarend(cancellers_[handle_index++]->state(),
+                             &packed_render_audio[buffer_index],
+                             num_frames_per_band);
+
+      buffer_index += num_frames_per_band;
+    }
+  }
+}
+
+
+int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio,
+                                              int stream_delay_ms) {
+  rtc::CritScope cs_capture(crit_capture_);
+  if (!enabled_) {
+    return AudioProcessing::kNoError;
+  }
+
+  const int stream_delay_ms_use =
+      enforce_zero_stream_delay_ ? 0 : stream_delay_ms;
+
+  if (drift_compensation_enabled_ && !was_stream_drift_set_) {
+    return AudioProcessing::kStreamParameterNotSetError;
+  }
+
+  RTC_DCHECK(stream_properties_);
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(audio->num_channels(), stream_properties_->num_proc_channels);
+
+  int err = AudioProcessing::kNoError;
+
+  // The ordering convention must be followed to pass to the correct AEC.
+  size_t handle_index = 0;
+  stream_has_echo_ = false;
+  for (size_t i = 0; i < audio->num_channels(); i++) {
+    for (size_t j = 0; j < stream_properties_->num_reverse_channels; j++) {
+      err = WebRtcAec_Process(cancellers_[handle_index]->state(),
+                              audio->split_bands_const_f(i), audio->num_bands(),
+                              audio->split_bands_f(i),
+                              audio->num_frames_per_band(), stream_delay_ms_use,
+                              stream_drift_samples_);
+
+      if (err != AudioProcessing::kNoError) {
+        err = MapError(err);
+        // TODO(ajm): Figure out how to return warnings properly.
+        if (err != AudioProcessing::kBadStreamParameterWarning) {
+          return err;
+        }
+      }
+
+      int status = 0;
+      err = WebRtcAec_get_echo_status(cancellers_[handle_index]->state(),
+                                      &status);
+      if (err != AudioProcessing::kNoError) {
+        return MapError(err);
+      }
+
+      if (status == 1) {
+        stream_has_echo_ = true;
+      }
+
+      handle_index++;
+    }
+  }
+
+  was_stream_drift_set_ = false;
+  return AudioProcessing::kNoError;
+}
+
+int EchoCancellationImpl::Enable(bool enable) {
+  // Run in a single-threaded manner.
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
+  if (enable && !enabled_) {
+    enabled_ = enable;  // Must be set before Initialize() is called.
+
+    // TODO(peah): Simplify once the Enable function has been removed from
+    // the public APM API.
+    RTC_DCHECK(stream_properties_);
+    Initialize(stream_properties_->sample_rate_hz,
+               stream_properties_->num_reverse_channels,
+               stream_properties_->num_output_channels,
+               stream_properties_->num_proc_channels);
+  } else {
+    enabled_ = enable;
+  }
+  return AudioProcessing::kNoError;
+}
+
+bool EchoCancellationImpl::is_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return enabled_;
+}
+
+int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
+  {
+    if (MapSetting(level) == -1) {
+      return AudioProcessing::kBadParameterError;
+    }
+    rtc::CritScope cs(crit_capture_);
+    suppression_level_ = level;
+  }
+  return Configure();
+}
+
+EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
+    const {
+  rtc::CritScope cs(crit_capture_);
+  return suppression_level_;
+}
+
+int EchoCancellationImpl::enable_drift_compensation(bool enable) {
+  {
+    rtc::CritScope cs(crit_capture_);
+    drift_compensation_enabled_ = enable;
+  }
+  return Configure();
+}
+
+bool EchoCancellationImpl::is_drift_compensation_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return drift_compensation_enabled_;
+}
+
+void EchoCancellationImpl::set_stream_drift_samples(int drift) {
+  rtc::CritScope cs(crit_capture_);
+  was_stream_drift_set_ = true;
+  stream_drift_samples_ = drift;
+}
+
+int EchoCancellationImpl::stream_drift_samples() const {
+  rtc::CritScope cs(crit_capture_);
+  return stream_drift_samples_;
+}
+
+int EchoCancellationImpl::enable_metrics(bool enable) {
+  {
+    rtc::CritScope cs(crit_capture_);
+    metrics_enabled_ = enable;
+  }
+  return Configure();
+}
+
+bool EchoCancellationImpl::are_metrics_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return enabled_ && metrics_enabled_;
+}
+
+// TODO(ajm): we currently just use the metrics from the first AEC. Think more
+//            aboue the best way to extend this to multi-channel.
+int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
+  rtc::CritScope cs(crit_capture_);
+  if (metrics == NULL) {
+    return AudioProcessing::kNullPointerError;
+  }
+
+  if (!enabled_ || !metrics_enabled_) {
+    return AudioProcessing::kNotEnabledError;
+  }
+
+  AecMetrics my_metrics;
+  memset(&my_metrics, 0, sizeof(my_metrics));
+  memset(metrics, 0, sizeof(Metrics));
+
+  const int err = WebRtcAec_GetMetrics(cancellers_[0]->state(), &my_metrics);
+  if (err != AudioProcessing::kNoError) {
+    return MapError(err);
+  }
+
+  metrics->residual_echo_return_loss.instant = my_metrics.rerl.instant;
+  metrics->residual_echo_return_loss.average = my_metrics.rerl.average;
+  metrics->residual_echo_return_loss.maximum = my_metrics.rerl.max;
+  metrics->residual_echo_return_loss.minimum = my_metrics.rerl.min;
+
+  metrics->echo_return_loss.instant = my_metrics.erl.instant;
+  metrics->echo_return_loss.average = my_metrics.erl.average;
+  metrics->echo_return_loss.maximum = my_metrics.erl.max;
+  metrics->echo_return_loss.minimum = my_metrics.erl.min;
+
+  metrics->echo_return_loss_enhancement.instant = my_metrics.erle.instant;
+  metrics->echo_return_loss_enhancement.average = my_metrics.erle.average;
+  metrics->echo_return_loss_enhancement.maximum = my_metrics.erle.max;
+  metrics->echo_return_loss_enhancement.minimum = my_metrics.erle.min;
+
+  metrics->a_nlp.instant = my_metrics.aNlp.instant;
+  metrics->a_nlp.average = my_metrics.aNlp.average;
+  metrics->a_nlp.maximum = my_metrics.aNlp.max;
+  metrics->a_nlp.minimum = my_metrics.aNlp.min;
+
+  metrics->divergent_filter_fraction = my_metrics.divergent_filter_fraction;
+  return AudioProcessing::kNoError;
+}
+
+bool EchoCancellationImpl::stream_has_echo() const {
+  rtc::CritScope cs(crit_capture_);
+  return stream_has_echo_;
+}
+
+int EchoCancellationImpl::enable_delay_logging(bool enable) {
+  {
+    rtc::CritScope cs(crit_capture_);
+    delay_logging_enabled_ = enable;
+  }
+  return Configure();
+}
+
+bool EchoCancellationImpl::is_delay_logging_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return enabled_ && delay_logging_enabled_;
+}
+
+bool EchoCancellationImpl::is_delay_agnostic_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return delay_agnostic_enabled_;
+}
+
+std::string EchoCancellationImpl::GetExperimentsDescription() {
+  rtc::CritScope cs(crit_capture_);
+  return refined_adaptive_filter_enabled_ ? "RefinedAdaptiveFilter;" : "";
+}
+
+bool EchoCancellationImpl::is_refined_adaptive_filter_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return refined_adaptive_filter_enabled_;
+}
+
+bool EchoCancellationImpl::is_extended_filter_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return extended_filter_enabled_;
+}
+
+// TODO(bjornv): How should we handle the multi-channel case?
+int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
+  rtc::CritScope cs(crit_capture_);
+  float fraction_poor_delays = 0;
+  return GetDelayMetrics(median, std, &fraction_poor_delays);
+}
+
+int EchoCancellationImpl::GetDelayMetrics(int* median, int* std,
+                                          float* fraction_poor_delays) {
+  rtc::CritScope cs(crit_capture_);
+  if (median == NULL) {
+    return AudioProcessing::kNullPointerError;
+  }
+  if (std == NULL) {
+    return AudioProcessing::kNullPointerError;
+  }
+
+  if (!enabled_ || !delay_logging_enabled_) {
+    return AudioProcessing::kNotEnabledError;
+  }
+
+  const int err = WebRtcAec_GetDelayMetrics(cancellers_[0]->state(), median,
+                                            std, fraction_poor_delays);
+  if (err != AudioProcessing::kNoError) {
+    return MapError(err);
+  }
+
+  return AudioProcessing::kNoError;
+}
+
+struct AecCore* EchoCancellationImpl::aec_core() const {
+  rtc::CritScope cs(crit_capture_);
+  if (!enabled_) {
+    return NULL;
+  }
+  return WebRtcAec_aec_core(cancellers_[0]->state());
+}
+
+void EchoCancellationImpl::Initialize(int sample_rate_hz,
+                                      size_t num_reverse_channels,
+                                      size_t num_output_channels,
+                                      size_t num_proc_channels) {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
+  stream_properties_.reset(
+      new StreamProperties(sample_rate_hz, num_reverse_channels,
+                           num_output_channels, num_proc_channels));
+
+  if (!enabled_) {
+    return;
+  }
+
+  const size_t num_cancellers_required =
+      NumCancellersRequired(stream_properties_->num_output_channels,
+                            stream_properties_->num_reverse_channels);
+  if (num_cancellers_required > cancellers_.size()) {
+    const size_t cancellers_old_size = cancellers_.size();
+    cancellers_.resize(num_cancellers_required);
+
+    for (size_t i = cancellers_old_size; i < cancellers_.size(); ++i) {
+      cancellers_[i].reset(new Canceller());
+    }
+  }
+
+  for (auto& canceller : cancellers_) {
+    canceller->Initialize(sample_rate_hz);
+  }
+
+  Configure();
+}
+
+int EchoCancellationImpl::GetSystemDelayInSamples() const {
+  rtc::CritScope cs(crit_capture_);
+  RTC_DCHECK(enabled_);
+  // Report the delay for the first AEC component.
+  return WebRtcAec_system_delay(
+      WebRtcAec_aec_core(cancellers_[0]->state()));
+}
+
+void EchoCancellationImpl::PackRenderAudioBuffer(
+    const AudioBuffer* audio,
+    size_t num_output_channels,
+    size_t num_channels,
+    std::vector<float>* packed_buffer) {
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(num_channels, audio->num_channels());
+
+  packed_buffer->clear();
+  // The ordering convention must be followed to pass the correct data.
+  for (size_t i = 0; i < num_output_channels; i++) {
+    for (size_t j = 0; j < audio->num_channels(); j++) {
+      // Buffer the samples in the render queue.
+      packed_buffer->insert(packed_buffer->end(),
+                            audio->split_bands_const_f(j)[kBand0To8kHz],
+                            (audio->split_bands_const_f(j)[kBand0To8kHz] +
+                             audio->num_frames_per_band()));
+    }
+  }
+}
+
+void EchoCancellationImpl::SetExtraOptions(const webrtc::Config& config) {
+  {
+    rtc::CritScope cs(crit_capture_);
+    extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
+    delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
+    refined_adaptive_filter_enabled_ =
+        config.Get<RefinedAdaptiveFilter>().enabled;
+  }
+  Configure();
+}
+
+int EchoCancellationImpl::Configure() {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  AecConfig config;
+  config.metricsMode = metrics_enabled_;
+  config.nlpMode = MapSetting(suppression_level_);
+  config.skewMode = drift_compensation_enabled_;
+  config.delay_logging = delay_logging_enabled_;
+
+  int error = AudioProcessing::kNoError;
+  for (auto& canceller : cancellers_) {
+    WebRtcAec_enable_extended_filter(WebRtcAec_aec_core(canceller->state()),
+                                     extended_filter_enabled_ ? 1 : 0);
+    WebRtcAec_enable_delay_agnostic(WebRtcAec_aec_core(canceller->state()),
+                                    delay_agnostic_enabled_ ? 1 : 0);
+    WebRtcAec_enable_refined_adaptive_filter(
+        WebRtcAec_aec_core(canceller->state()),
+        refined_adaptive_filter_enabled_);
+    const int handle_error = WebRtcAec_set_config(canceller->state(), config);
+    if (handle_error != AudioProcessing::kNoError) {
+      error = AudioProcessing::kNoError;
+    }
+  }
+  return error;
+}
+
+size_t EchoCancellationImpl::NumCancellersRequired(
+    size_t num_output_channels,
+    size_t num_reverse_channels) {
+  return num_output_channels * num_reverse_channels;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_cancellation_impl.h b/modules/audio_processing/echo_cancellation_impl.h
new file mode 100644
index 0000000..6700249
--- /dev/null
+++ b/modules/audio_processing/echo_cancellation_impl.h
@@ -0,0 +1,119 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class EchoCancellationImpl : public EchoCancellation {
+ public:
+  EchoCancellationImpl(rtc::CriticalSection* crit_render,
+                       rtc::CriticalSection* crit_capture);
+  ~EchoCancellationImpl() override;
+
+  void ProcessRenderAudio(rtc::ArrayView<const float> packed_render_audio);
+  int ProcessCaptureAudio(AudioBuffer* audio, int stream_delay_ms);
+
+  // EchoCancellation implementation.
+  bool is_enabled() const override;
+  int stream_drift_samples() const override;
+  SuppressionLevel suppression_level() const override;
+  bool is_drift_compensation_enabled() const override;
+
+  void Initialize(int sample_rate_hz,
+                  size_t num_reverse_channels_,
+                  size_t num_output_channels_,
+                  size_t num_proc_channels_);
+  void SetExtraOptions(const webrtc::Config& config);
+  bool is_delay_agnostic_enabled() const;
+  bool is_extended_filter_enabled() const;
+  std::string GetExperimentsDescription();
+  bool is_refined_adaptive_filter_enabled() const;
+
+  // Returns the system delay of the first AEC component.
+  int GetSystemDelayInSamples() const;
+
+  static void PackRenderAudioBuffer(const AudioBuffer* audio,
+                                    size_t num_output_channels,
+                                    size_t num_channels,
+                                    std::vector<float>* packed_buffer);
+  static size_t NumCancellersRequired(size_t num_output_channels,
+                                      size_t num_reverse_channels);
+
+  // Enable logging of various AEC statistics.
+  int enable_metrics(bool enable) override;
+
+  // Provides various statistics about the AEC.
+  int GetMetrics(Metrics* metrics) override;
+
+  // Enable logging of delay metrics.
+  int enable_delay_logging(bool enable) override;
+
+  // Provides delay metrics.
+  int GetDelayMetrics(int* median,
+                      int* std,
+                      float* fraction_poor_delays) override;
+
+ private:
+  class Canceller;
+  struct StreamProperties;
+
+  // EchoCancellation implementation.
+  int Enable(bool enable) override;
+  int enable_drift_compensation(bool enable) override;
+  void set_stream_drift_samples(int drift) override;
+  int set_suppression_level(SuppressionLevel level) override;
+  bool are_metrics_enabled() const override;
+  bool stream_has_echo() const override;
+  bool is_delay_logging_enabled() const override;
+  int GetDelayMetrics(int* median, int* std) override;
+
+  struct AecCore* aec_core() const override;
+
+  void AllocateRenderQueue();
+  int Configure();
+
+  rtc::CriticalSection* const crit_render_ RTC_ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection* const crit_capture_;
+
+  bool enabled_ = false;
+  bool drift_compensation_enabled_ RTC_GUARDED_BY(crit_capture_);
+  bool metrics_enabled_ RTC_GUARDED_BY(crit_capture_);
+  SuppressionLevel suppression_level_ RTC_GUARDED_BY(crit_capture_);
+  int stream_drift_samples_ RTC_GUARDED_BY(crit_capture_);
+  bool was_stream_drift_set_ RTC_GUARDED_BY(crit_capture_);
+  bool stream_has_echo_ RTC_GUARDED_BY(crit_capture_);
+  bool delay_logging_enabled_ RTC_GUARDED_BY(crit_capture_);
+  bool extended_filter_enabled_ RTC_GUARDED_BY(crit_capture_);
+  bool delay_agnostic_enabled_ RTC_GUARDED_BY(crit_capture_);
+  bool refined_adaptive_filter_enabled_ RTC_GUARDED_BY(crit_capture_) = false;
+
+  // Only active on Chrome OS devices.
+  const bool enforce_zero_stream_delay_ RTC_GUARDED_BY(crit_capture_);
+
+  std::vector<std::unique_ptr<Canceller>> cancellers_;
+  std::unique_ptr<StreamProperties> stream_properties_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoCancellationImpl);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
diff --git a/modules/audio_processing/echo_cancellation_impl_unittest.cc b/modules/audio_processing/echo_cancellation_impl_unittest.cc
new file mode 100644
index 0000000..ec30abc
--- /dev/null
+++ b/modules/audio_processing/echo_cancellation_impl_unittest.cc
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "modules/audio_processing/aec/aec_core.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(EchoCancellationInternalTest, ExtendedFilter) {
+  std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
+  EXPECT_TRUE(ap->echo_cancellation()->aec_core() == NULL);
+
+  EXPECT_EQ(ap->kNoError, ap->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(ap->echo_cancellation()->is_enabled());
+
+  AecCore* aec_core = ap->echo_cancellation()->aec_core();
+  ASSERT_TRUE(aec_core != NULL);
+  // Disabled by default.
+  EXPECT_EQ(0, WebRtcAec_extended_filter_enabled(aec_core));
+
+  Config config;
+  config.Set<ExtendedFilter>(new ExtendedFilter(true));
+  ap->SetExtraOptions(config);
+  EXPECT_EQ(1, WebRtcAec_extended_filter_enabled(aec_core));
+
+  // Retains setting after initialization.
+  EXPECT_EQ(ap->kNoError, ap->Initialize());
+  EXPECT_EQ(1, WebRtcAec_extended_filter_enabled(aec_core));
+
+  config.Set<ExtendedFilter>(new ExtendedFilter(false));
+  ap->SetExtraOptions(config);
+  EXPECT_EQ(0, WebRtcAec_extended_filter_enabled(aec_core));
+
+  // Retains setting after initialization.
+  EXPECT_EQ(ap->kNoError, ap->Initialize());
+  EXPECT_EQ(0, WebRtcAec_extended_filter_enabled(aec_core));
+}
+
+TEST(EchoCancellationInternalTest, DelayAgnostic) {
+  std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
+  EXPECT_TRUE(ap->echo_cancellation()->aec_core() == NULL);
+
+  EXPECT_EQ(ap->kNoError, ap->echo_cancellation()->Enable(true));
+  EXPECT_TRUE(ap->echo_cancellation()->is_enabled());
+
+  AecCore* aec_core = ap->echo_cancellation()->aec_core();
+  ASSERT_TRUE(aec_core != NULL);
+  // Enabled by default.
+  EXPECT_EQ(0, WebRtcAec_delay_agnostic_enabled(aec_core));
+
+  Config config;
+  config.Set<DelayAgnostic>(new DelayAgnostic(true));
+  ap->SetExtraOptions(config);
+  EXPECT_EQ(1, WebRtcAec_delay_agnostic_enabled(aec_core));
+
+  // Retains setting after initialization.
+  EXPECT_EQ(ap->kNoError, ap->Initialize());
+  EXPECT_EQ(1, WebRtcAec_delay_agnostic_enabled(aec_core));
+
+  config.Set<DelayAgnostic>(new DelayAgnostic(false));
+  ap->SetExtraOptions(config);
+  EXPECT_EQ(0, WebRtcAec_delay_agnostic_enabled(aec_core));
+
+  // Retains setting after initialization.
+  EXPECT_EQ(ap->kNoError, ap->Initialize());
+  EXPECT_EQ(0, WebRtcAec_delay_agnostic_enabled(aec_core));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_control_mobile_impl.cc b/modules/audio_processing/echo_control_mobile_impl.cc
new file mode 100644
index 0000000..0ff1bce
--- /dev/null
+++ b/modules/audio_processing/echo_control_mobile_impl.cc
@@ -0,0 +1,391 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+
+#include <string.h>
+
+#include "modules/audio_processing/aecm/echo_control_mobile.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+namespace {
+int16_t MapSetting(EchoControlMobile::RoutingMode mode) {
+  switch (mode) {
+    case EchoControlMobile::kQuietEarpieceOrHeadset:
+      return 0;
+    case EchoControlMobile::kEarpiece:
+      return 1;
+    case EchoControlMobile::kLoudEarpiece:
+      return 2;
+    case EchoControlMobile::kSpeakerphone:
+      return 3;
+    case EchoControlMobile::kLoudSpeakerphone:
+      return 4;
+  }
+  RTC_NOTREACHED();
+  return -1;
+}
+
+AudioProcessing::Error MapError(int err) {
+  switch (err) {
+    case AECM_UNSUPPORTED_FUNCTION_ERROR:
+      return AudioProcessing::kUnsupportedFunctionError;
+    case AECM_NULL_POINTER_ERROR:
+      return AudioProcessing::kNullPointerError;
+    case AECM_BAD_PARAMETER_ERROR:
+      return AudioProcessing::kBadParameterError;
+    case AECM_BAD_PARAMETER_WARNING:
+      return AudioProcessing::kBadStreamParameterWarning;
+    default:
+      // AECM_UNSPECIFIED_ERROR
+      // AECM_UNINITIALIZED_ERROR
+      return AudioProcessing::kUnspecifiedError;
+  }
+}
+}  // namespace
+
+size_t EchoControlMobile::echo_path_size_bytes() {
+  return WebRtcAecm_echo_path_size_bytes();
+}
+
+struct EchoControlMobileImpl::StreamProperties {
+  StreamProperties() = delete;
+  StreamProperties(int sample_rate_hz,
+                   size_t num_reverse_channels,
+                   size_t num_output_channels)
+      : sample_rate_hz(sample_rate_hz),
+        num_reverse_channels(num_reverse_channels),
+        num_output_channels(num_output_channels) {}
+
+  int sample_rate_hz;
+  size_t num_reverse_channels;
+  size_t num_output_channels;
+};
+
+class EchoControlMobileImpl::Canceller {
+ public:
+  Canceller() {
+    state_ = WebRtcAecm_Create();
+    RTC_CHECK(state_);
+  }
+
+  ~Canceller() {
+    RTC_DCHECK(state_);
+    WebRtcAecm_Free(state_);
+  }
+
+  void* state() {
+    RTC_DCHECK(state_);
+    return state_;
+  }
+
+  void Initialize(int sample_rate_hz,
+                  unsigned char* external_echo_path,
+                  size_t echo_path_size_bytes) {
+    RTC_DCHECK(state_);
+    int error = WebRtcAecm_Init(state_, sample_rate_hz);
+    RTC_DCHECK_EQ(AudioProcessing::kNoError, error);
+    if (external_echo_path != NULL) {
+      error = WebRtcAecm_InitEchoPath(state_, external_echo_path,
+                                      echo_path_size_bytes);
+      RTC_DCHECK_EQ(AudioProcessing::kNoError, error);
+    }
+  }
+
+ private:
+  void* state_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(Canceller);
+};
+
+EchoControlMobileImpl::EchoControlMobileImpl(rtc::CriticalSection* crit_render,
+                                             rtc::CriticalSection* crit_capture)
+    : crit_render_(crit_render),
+      crit_capture_(crit_capture),
+      routing_mode_(kSpeakerphone),
+      comfort_noise_enabled_(true),
+      external_echo_path_(NULL) {
+  RTC_DCHECK(crit_render);
+  RTC_DCHECK(crit_capture);
+}
+
+EchoControlMobileImpl::~EchoControlMobileImpl() {
+    if (external_echo_path_ != NULL) {
+      delete [] external_echo_path_;
+      external_echo_path_ = NULL;
+    }
+}
+
+void EchoControlMobileImpl::ProcessRenderAudio(
+    rtc::ArrayView<const int16_t> packed_render_audio) {
+  rtc::CritScope cs_capture(crit_capture_);
+  if (!enabled_) {
+    return;
+  }
+
+  RTC_DCHECK(stream_properties_);
+
+  size_t buffer_index = 0;
+  size_t num_frames_per_band =
+      packed_render_audio.size() / (stream_properties_->num_output_channels *
+                                    stream_properties_->num_reverse_channels);
+
+  for (auto& canceller : cancellers_) {
+    WebRtcAecm_BufferFarend(canceller->state(),
+                            &packed_render_audio[buffer_index],
+                            num_frames_per_band);
+
+    buffer_index += num_frames_per_band;
+  }
+}
+
+void EchoControlMobileImpl::PackRenderAudioBuffer(
+    const AudioBuffer* audio,
+    size_t num_output_channels,
+    size_t num_channels,
+    std::vector<int16_t>* packed_buffer) {
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(num_channels, audio->num_channels());
+
+  // The ordering convention must be followed to pass to the correct AECM.
+  packed_buffer->clear();
+  int render_channel = 0;
+  for (size_t i = 0; i < num_output_channels; i++) {
+    for (size_t j = 0; j < audio->num_channels(); j++) {
+      // Buffer the samples in the render queue.
+      packed_buffer->insert(
+          packed_buffer->end(),
+          audio->split_bands_const(render_channel)[kBand0To8kHz],
+          (audio->split_bands_const(render_channel)[kBand0To8kHz] +
+           audio->num_frames_per_band()));
+      render_channel = (render_channel + 1) % audio->num_channels();
+    }
+  }
+}
+
+size_t EchoControlMobileImpl::NumCancellersRequired(
+    size_t num_output_channels,
+    size_t num_reverse_channels) {
+  return num_output_channels * num_reverse_channels;
+}
+
+int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio,
+                                               int stream_delay_ms) {
+  rtc::CritScope cs_capture(crit_capture_);
+  if (!enabled_) {
+    return AudioProcessing::kNoError;
+  }
+
+  RTC_DCHECK(stream_properties_);
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(audio->num_channels(), stream_properties_->num_output_channels);
+  RTC_DCHECK_GE(cancellers_.size(), stream_properties_->num_reverse_channels *
+                                        audio->num_channels());
+
+  int err = AudioProcessing::kNoError;
+
+  // The ordering convention must be followed to pass to the correct AECM.
+  size_t handle_index = 0;
+  for (size_t capture = 0; capture < audio->num_channels(); ++capture) {
+    // TODO(ajm): improve how this works, possibly inside AECM.
+    //            This is kind of hacked up.
+    const int16_t* noisy = audio->low_pass_reference(capture);
+    const int16_t* clean = audio->split_bands_const(capture)[kBand0To8kHz];
+    if (noisy == NULL) {
+      noisy = clean;
+      clean = NULL;
+    }
+    for (size_t render = 0; render < stream_properties_->num_reverse_channels;
+         ++render) {
+      err = WebRtcAecm_Process(cancellers_[handle_index]->state(), noisy, clean,
+                               audio->split_bands(capture)[kBand0To8kHz],
+                               audio->num_frames_per_band(), stream_delay_ms);
+
+      if (err != AudioProcessing::kNoError) {
+        return MapError(err);
+      }
+
+      ++handle_index;
+    }
+    for (size_t band = 1u; band < audio->num_bands(); ++band) {
+      memset(audio->split_bands(capture)[band],
+             0,
+             audio->num_frames_per_band() *
+                 sizeof(audio->split_bands(capture)[band][0]));
+    }
+  }
+  return AudioProcessing::kNoError;
+}
+
+int EchoControlMobileImpl::Enable(bool enable) {
+  // Ensure AEC and AECM are not both enabled.
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  RTC_DCHECK(stream_properties_);
+
+  if (enable &&
+      stream_properties_->sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    return AudioProcessing::kBadSampleRateError;
+  }
+
+  if (enable && !enabled_) {
+    enabled_ = enable;  // Must be set before Initialize() is called.
+
+    // TODO(peah): Simplify once the Enable function has been removed from
+    // the public APM API.
+    Initialize(stream_properties_->sample_rate_hz,
+               stream_properties_->num_reverse_channels,
+               stream_properties_->num_output_channels);
+  } else {
+    enabled_ = enable;
+  }
+  return AudioProcessing::kNoError;
+}
+
+bool EchoControlMobileImpl::is_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return enabled_;
+}
+
+int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
+  if (MapSetting(mode) == -1) {
+    return AudioProcessing::kBadParameterError;
+  }
+
+  {
+    rtc::CritScope cs(crit_capture_);
+    routing_mode_ = mode;
+  }
+  return Configure();
+}
+
+EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
+    const {
+  rtc::CritScope cs(crit_capture_);
+  return routing_mode_;
+}
+
+int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
+  {
+    rtc::CritScope cs(crit_capture_);
+    comfort_noise_enabled_ = enable;
+  }
+  return Configure();
+}
+
+bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return comfort_noise_enabled_;
+}
+
+int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
+                                       size_t size_bytes) {
+  {
+    rtc::CritScope cs_render(crit_render_);
+    rtc::CritScope cs_capture(crit_capture_);
+    if (echo_path == NULL) {
+      return AudioProcessing::kNullPointerError;
+    }
+    if (size_bytes != echo_path_size_bytes()) {
+      // Size mismatch
+      return AudioProcessing::kBadParameterError;
+    }
+
+    if (external_echo_path_ == NULL) {
+      external_echo_path_ = new unsigned char[size_bytes];
+    }
+    memcpy(external_echo_path_, echo_path, size_bytes);
+  }
+
+  // TODO(peah): Simplify once the Enable function has been removed from
+  // the public APM API.
+  RTC_DCHECK(stream_properties_);
+  Initialize(stream_properties_->sample_rate_hz,
+             stream_properties_->num_reverse_channels,
+             stream_properties_->num_output_channels);
+  return AudioProcessing::kNoError;
+}
+
+int EchoControlMobileImpl::GetEchoPath(void* echo_path,
+                                       size_t size_bytes) const {
+  rtc::CritScope cs(crit_capture_);
+  if (echo_path == NULL) {
+    return AudioProcessing::kNullPointerError;
+  }
+  if (size_bytes != echo_path_size_bytes()) {
+    // Size mismatch
+    return AudioProcessing::kBadParameterError;
+  }
+  if (!enabled_) {
+    return AudioProcessing::kNotEnabledError;
+  }
+
+  // Get the echo path from the first channel
+  int32_t err =
+      WebRtcAecm_GetEchoPath(cancellers_[0]->state(), echo_path, size_bytes);
+  if (err != 0) {
+    return MapError(err);
+  }
+
+  return AudioProcessing::kNoError;
+}
+
+void EchoControlMobileImpl::Initialize(int sample_rate_hz,
+                                       size_t num_reverse_channels,
+                                       size_t num_output_channels) {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+
+  stream_properties_.reset(new StreamProperties(
+      sample_rate_hz, num_reverse_channels, num_output_channels));
+
+  if (!enabled_) {
+    return;
+  }
+
+  // AECM only supports 16 kHz or lower sample rates.
+  RTC_DCHECK_LE(stream_properties_->sample_rate_hz,
+                AudioProcessing::kSampleRate16kHz);
+
+  cancellers_.resize(
+      NumCancellersRequired(stream_properties_->num_output_channels,
+                            stream_properties_->num_reverse_channels));
+
+  for (auto& canceller : cancellers_) {
+    if (!canceller) {
+      canceller.reset(new Canceller());
+    }
+    canceller->Initialize(sample_rate_hz, external_echo_path_,
+                          echo_path_size_bytes());
+  }
+
+  Configure();
+}
+
+int EchoControlMobileImpl::Configure() {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  AecmConfig config;
+  config.cngMode = comfort_noise_enabled_;
+  config.echoMode = MapSetting(routing_mode_);
+  int error = AudioProcessing::kNoError;
+  for (auto& canceller : cancellers_) {
+    int handle_error = WebRtcAecm_set_config(canceller->state(), config);
+    if (handle_error != AudioProcessing::kNoError) {
+      error = handle_error;
+    }
+  }
+  return error;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_control_mobile_impl.h b/modules/audio_processing/echo_control_mobile_impl.h
new file mode 100644
index 0000000..a03ab4d
--- /dev/null
+++ b/modules/audio_processing/echo_control_mobile_impl.h
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/render_queue_item_verifier.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/swap_queue.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class EchoControlMobileImpl : public EchoControlMobile {
+ public:
+  EchoControlMobileImpl(rtc::CriticalSection* crit_render,
+                        rtc::CriticalSection* crit_capture);
+
+  ~EchoControlMobileImpl() override;
+
+  void ProcessRenderAudio(rtc::ArrayView<const int16_t> packed_render_audio);
+  int ProcessCaptureAudio(AudioBuffer* audio, int stream_delay_ms);
+
+  // EchoControlMobile implementation.
+  bool is_enabled() const override;
+  RoutingMode routing_mode() const override;
+  bool is_comfort_noise_enabled() const override;
+
+  void Initialize(int sample_rate_hz,
+                  size_t num_reverse_channels,
+                  size_t num_output_channels);
+
+  static void PackRenderAudioBuffer(const AudioBuffer* audio,
+                                    size_t num_output_channels,
+                                    size_t num_channels,
+                                    std::vector<int16_t>* packed_buffer);
+
+  static size_t NumCancellersRequired(size_t num_output_channels,
+                                      size_t num_reverse_channels);
+
+ private:
+  class Canceller;
+  struct StreamProperties;
+
+  // EchoControlMobile implementation.
+  int Enable(bool enable) override;
+  int set_routing_mode(RoutingMode mode) override;
+  int enable_comfort_noise(bool enable) override;
+  int SetEchoPath(const void* echo_path, size_t size_bytes) override;
+  int GetEchoPath(void* echo_path, size_t size_bytes) const override;
+
+  int Configure();
+
+  rtc::CriticalSection* const crit_render_ RTC_ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection* const crit_capture_;
+
+  bool enabled_ = false;
+
+  RoutingMode routing_mode_ RTC_GUARDED_BY(crit_capture_);
+  bool comfort_noise_enabled_ RTC_GUARDED_BY(crit_capture_);
+  unsigned char* external_echo_path_ RTC_GUARDED_BY(crit_render_)
+      RTC_GUARDED_BY(crit_capture_);
+
+  std::vector<std::unique_ptr<Canceller>> cancellers_;
+  std::unique_ptr<StreamProperties> stream_properties_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoControlMobileImpl);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
diff --git a/modules/audio_processing/echo_control_mobile_unittest.cc b/modules/audio_processing/echo_control_mobile_unittest.cc
new file mode 100644
index 0000000..fb58a5b
--- /dev/null
+++ b/modules/audio_processing/echo_control_mobile_unittest.cc
@@ -0,0 +1,224 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/echo_control_mobile_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// TODO(peah): Increase the number of frames to proces when the issue of
+// non repeatable test results have been found.
+const int kNumFramesToProcess = 200;
+
+void SetupComponent(int sample_rate_hz,
+                    EchoControlMobile::RoutingMode routing_mode,
+                    bool comfort_noise_enabled,
+                    EchoControlMobileImpl* echo_control_mobile) {
+  echo_control_mobile->Initialize(
+      sample_rate_hz > 16000 ? 16000 : sample_rate_hz, 1, 1);
+  EchoControlMobile* ec = static_cast<EchoControlMobile*>(echo_control_mobile);
+  ec->Enable(true);
+  ec->set_routing_mode(routing_mode);
+  ec->enable_comfort_noise(comfort_noise_enabled);
+}
+
+void ProcessOneFrame(int sample_rate_hz,
+                     int stream_delay_ms,
+                     AudioBuffer* render_audio_buffer,
+                     AudioBuffer* capture_audio_buffer,
+                     EchoControlMobileImpl* echo_control_mobile) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    render_audio_buffer->SplitIntoFrequencyBands();
+    capture_audio_buffer->SplitIntoFrequencyBands();
+  }
+
+  std::vector<int16_t> render_audio;
+  EchoControlMobileImpl::PackRenderAudioBuffer(
+      render_audio_buffer, 1, render_audio_buffer->num_channels(),
+      &render_audio);
+  echo_control_mobile->ProcessRenderAudio(render_audio);
+
+  echo_control_mobile->ProcessCaptureAudio(capture_audio_buffer,
+                                           stream_delay_ms);
+
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_audio_buffer->MergeFrequencyBands();
+  }
+}
+
+void RunBitexactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         int stream_delay_ms,
+                         EchoControlMobile::RoutingMode routing_mode,
+                         bool comfort_noise_enabled,
+                         const rtc::ArrayView<const float>& output_reference) {
+  rtc::CriticalSection crit_render;
+  rtc::CriticalSection crit_capture;
+  EchoControlMobileImpl echo_control_mobile(&crit_render, &crit_capture);
+  SetupComponent(sample_rate_hz, routing_mode, comfort_noise_enabled,
+                 &echo_control_mobile);
+
+  const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+  const StreamConfig render_config(sample_rate_hz, num_channels, false);
+  AudioBuffer render_buffer(
+      render_config.num_frames(), render_config.num_channels(),
+      render_config.num_frames(), 1, render_config.num_frames());
+  test::InputAudioFile render_file(
+      test::GetApmRenderTestVectorFileName(sample_rate_hz));
+  std::vector<float> render_input(samples_per_channel * num_channels);
+
+  const StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), 1, capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(samples_per_channel * num_channels);
+
+  for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &render_file, render_input);
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &capture_file, capture_input);
+
+    test::CopyVectorToAudioBuffer(render_config, render_input, &render_buffer);
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, stream_delay_ms, &render_buffer,
+                    &capture_buffer, &echo_control_mobile);
+  }
+
+  // Extract and verify the test results.
+  std::vector<float> capture_output;
+  test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+                                     &capture_output);
+
+  // Compare the output with the reference. Only the first values of the output
+  // from last frame processed are compared in order not having to specify all
+  // preceeding frames as testvectors. As the algorithm being tested has a
+  // memory, testing only the last frame implicitly also tests the preceeding
+  // frames.
+  const float kElementErrorBound = 1.0f / 32768.0f;
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      capture_config.num_frames(), capture_config.num_channels(),
+      output_reference, capture_output, kElementErrorBound));
+}
+
+}  // namespace
+
+// TODO(peah): Renable once the integer overflow issue in aecm_core.c:932:69
+// has been solved.
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono8kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.005280f, 0.002380f, -0.000427f};
+
+  RunBitexactnessTest(8000, 1, 0,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.003601f, 0.002991f, 0.001923f};
+  RunBitexactnessTest(16000, 1, 0,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono32kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.002258f, 0.002899f, 0.003906f};
+
+  RunBitexactnessTest(32000, 1, 0,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono48kHz_LoudSpeakerPhone_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {-0.000046f, 0.000041f, 0.000249f};
+
+  RunBitexactnessTest(48000, 1, 0,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_LoudSpeakerPhone_CngOff_StreamDelay0) {
+  const float kOutputReference[] = {0.000000f, 0.000000f, 0.000000f};
+
+  RunBitexactnessTest(16000, 1, 0,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, false,
+                      kOutputReference);
+}
+
+// TODO(peah): Renable once the integer overflow issue in aecm_core.c:932:69
+// has been solved.
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_LoudSpeakerPhone_CngOn_StreamDelay5) {
+  const float kOutputReference[] = {0.003693f, 0.002930f, 0.001801f};
+
+  RunBitexactnessTest(16000, 1, 5,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     Mono16kHz_LoudSpeakerPhone_CngOn_StreamDelay10) {
+  const float kOutputReference[] = {-0.002380f, -0.002533f, -0.002563f};
+
+  RunBitexactnessTest(16000, 1, 10,
+                      EchoControlMobile::RoutingMode::kLoudSpeakerphone, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_QuietEarpieceOrHeadset_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.000397f, 0.000000f, -0.000305f};
+
+  RunBitexactnessTest(16000, 1, 0,
+                      EchoControlMobile::RoutingMode::kQuietEarpieceOrHeadset,
+                      true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_Earpiece_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.002167f, 0.001617f, 0.001038f};
+
+  RunBitexactnessTest(16000, 1, 0, EchoControlMobile::RoutingMode::kEarpiece,
+                      true, kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_LoudEarpiece_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.003540f, 0.002899f, 0.001862f};
+
+  RunBitexactnessTest(16000, 1, 0,
+                      EchoControlMobile::RoutingMode::kLoudEarpiece, true,
+                      kOutputReference);
+}
+
+TEST(EchoControlMobileBitExactnessTest,
+     DISABLED_Mono16kHz_SpeakerPhone_CngOn_StreamDelay0) {
+  const float kOutputReference[] = {0.003632f, 0.003052f, 0.001984f};
+
+  RunBitexactnessTest(16000, 1, 0,
+                      EchoControlMobile::RoutingMode::kSpeakerphone, true,
+                      kOutputReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/circular_buffer.cc b/modules/audio_processing/echo_detector/circular_buffer.cc
new file mode 100644
index 0000000..0c6cc8a
--- /dev/null
+++ b/modules/audio_processing/echo_detector/circular_buffer.cc
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/circular_buffer.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+CircularBuffer::CircularBuffer(size_t size) : buffer_(size) {}
+CircularBuffer::~CircularBuffer() = default;
+
+void CircularBuffer::Push(float value) {
+  buffer_[next_insertion_index_] = value;
+  ++next_insertion_index_;
+  next_insertion_index_ %= buffer_.size();
+  RTC_DCHECK_LT(next_insertion_index_, buffer_.size());
+  nr_elements_in_buffer_ = std::min(nr_elements_in_buffer_ + 1, buffer_.size());
+  RTC_DCHECK_LE(nr_elements_in_buffer_, buffer_.size());
+}
+
+rtc::Optional<float> CircularBuffer::Pop() {
+  if (nr_elements_in_buffer_ == 0) {
+    return rtc::nullopt;
+  }
+  const size_t index =
+      (buffer_.size() + next_insertion_index_ - nr_elements_in_buffer_) %
+      buffer_.size();
+  RTC_DCHECK_LT(index, buffer_.size());
+  --nr_elements_in_buffer_;
+  return buffer_[index];
+}
+
+void CircularBuffer::Clear() {
+  std::fill(buffer_.begin(), buffer_.end(), 0.f);
+  next_insertion_index_ = 0;
+  nr_elements_in_buffer_ = 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/circular_buffer.h b/modules/audio_processing/echo_detector/circular_buffer.h
new file mode 100644
index 0000000..53d4afb
--- /dev/null
+++ b/modules/audio_processing/echo_detector/circular_buffer.h
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_CIRCULAR_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_CIRCULAR_BUFFER_H_
+
+#include <vector>
+
+#include "api/optional.h"
+
+namespace webrtc {
+
+// Ring buffer containing floating point values.
+struct CircularBuffer {
+ public:
+  explicit CircularBuffer(size_t size);
+  ~CircularBuffer();
+
+  void Push(float value);
+  rtc::Optional<float> Pop();
+  size_t Size() const { return nr_elements_in_buffer_; }
+  // This function fills the buffer with zeros, but does not change its size.
+  void Clear();
+
+ private:
+  std::vector<float> buffer_;
+  size_t next_insertion_index_ = 0;
+  // This is the number of elements that have been pushed into the circular
+  // buffer, not the allocated buffer size.
+  size_t nr_elements_in_buffer_ = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_CIRCULAR_BUFFER_H_
diff --git a/modules/audio_processing/echo_detector/circular_buffer_unittest.cc b/modules/audio_processing/echo_detector/circular_buffer_unittest.cc
new file mode 100644
index 0000000..657bd05
--- /dev/null
+++ b/modules/audio_processing/echo_detector/circular_buffer_unittest.cc
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/circular_buffer.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(CircularBufferTests, LessThanMaxTest) {
+  CircularBuffer test_buffer(3);
+  test_buffer.Push(1.f);
+  test_buffer.Push(2.f);
+  EXPECT_EQ(1.f, test_buffer.Pop());
+  EXPECT_EQ(2.f, test_buffer.Pop());
+}
+
+TEST(CircularBufferTests, FillTest) {
+  CircularBuffer test_buffer(3);
+  test_buffer.Push(1.f);
+  test_buffer.Push(2.f);
+  test_buffer.Push(3.f);
+  EXPECT_EQ(1.f, test_buffer.Pop());
+  EXPECT_EQ(2.f, test_buffer.Pop());
+  EXPECT_EQ(3.f, test_buffer.Pop());
+}
+
+TEST(CircularBufferTests, OverflowTest) {
+  CircularBuffer test_buffer(3);
+  test_buffer.Push(1.f);
+  test_buffer.Push(2.f);
+  test_buffer.Push(3.f);
+  test_buffer.Push(4.f);
+  // Because the circular buffer has a size of 3, the first insert should have
+  // been forgotten.
+  EXPECT_EQ(2.f, test_buffer.Pop());
+  EXPECT_EQ(3.f, test_buffer.Pop());
+  EXPECT_EQ(4.f, test_buffer.Pop());
+}
+
+TEST(CircularBufferTests, ReadFromEmpty) {
+  CircularBuffer test_buffer(3);
+  EXPECT_EQ(rtc::nullopt, test_buffer.Pop());
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/mean_variance_estimator.cc b/modules/audio_processing/echo_detector/mean_variance_estimator.cc
new file mode 100644
index 0000000..a857403
--- /dev/null
+++ b/modules/audio_processing/echo_detector/mean_variance_estimator.cc
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/mean_variance_estimator.h"
+
+#include <math.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Parameter controlling the adaptation speed.
+constexpr float kAlpha = 0.001f;
+
+}  // namespace
+
+void MeanVarianceEstimator::Update(float value) {
+  mean_ = (1.f - kAlpha) * mean_ + kAlpha * value;
+  variance_ =
+      (1.f - kAlpha) * variance_ + kAlpha * (value - mean_) * (value - mean_);
+  RTC_DCHECK(isfinite(mean_));
+  RTC_DCHECK(isfinite(variance_));
+}
+
+float MeanVarianceEstimator::std_deviation() const {
+  RTC_DCHECK_GE(variance_, 0.f);
+  return sqrtf(variance_);
+}
+
+float MeanVarianceEstimator::mean() const {
+  return mean_;
+}
+
+void MeanVarianceEstimator::Clear() {
+  mean_ = 0.f;
+  variance_ = 0.f;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/mean_variance_estimator.h b/modules/audio_processing/echo_detector/mean_variance_estimator.h
new file mode 100644
index 0000000..7f793df
--- /dev/null
+++ b/modules/audio_processing/echo_detector/mean_variance_estimator.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MEAN_VARIANCE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MEAN_VARIANCE_ESTIMATOR_H_
+
+namespace webrtc {
+
+// This class iteratively estimates the mean and variance of a signal.
+class MeanVarianceEstimator {
+ public:
+  void Update(float value);
+  float std_deviation() const;
+  float mean() const;
+  void Clear();
+
+ private:
+  // Estimate of the expected value of the input values.
+  float mean_ = 0.f;
+  // Estimate of the variance of the input values.
+  float variance_ = 0.f;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MEAN_VARIANCE_ESTIMATOR_H_
diff --git a/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc b/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc
new file mode 100644
index 0000000..f8efc3a
--- /dev/null
+++ b/modules/audio_processing/echo_detector/mean_variance_estimator_unittest.cc
@@ -0,0 +1,64 @@
+
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/mean_variance_estimator.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MeanVarianceEstimatorTests, InsertTwoValues) {
+  MeanVarianceEstimator test_estimator;
+  // Insert two values.
+  test_estimator.Update(3.f);
+  test_estimator.Update(5.f);
+
+  EXPECT_GT(test_estimator.mean(), 0.f);
+  EXPECT_GT(test_estimator.std_deviation(), 0.f);
+  // Test Clear method
+  test_estimator.Clear();
+  EXPECT_EQ(test_estimator.mean(), 0.f);
+  EXPECT_EQ(test_estimator.std_deviation(), 0.f);
+}
+
+TEST(MeanVarianceEstimatorTests, InsertZeroes) {
+  MeanVarianceEstimator test_estimator;
+  // Insert the same value many times.
+  for (size_t i = 0; i < 20000; i++) {
+    test_estimator.Update(0.f);
+  }
+  EXPECT_EQ(test_estimator.mean(), 0.f);
+  EXPECT_EQ(test_estimator.std_deviation(), 0.f);
+}
+
+TEST(MeanVarianceEstimatorTests, ConstantValueTest) {
+  MeanVarianceEstimator test_estimator;
+  for (size_t i = 0; i < 20000; i++) {
+    test_estimator.Update(3.f);
+  }
+  // The mean should be close to three, and the standard deviation should be
+  // close to zero.
+  EXPECT_NEAR(3.0f, test_estimator.mean(), 0.01f);
+  EXPECT_NEAR(0.0f, test_estimator.std_deviation(), 0.01f);
+}
+
+TEST(MeanVarianceEstimatorTests, AlternatingValueTest) {
+  MeanVarianceEstimator test_estimator;
+  for (size_t i = 0; i < 20000; i++) {
+    test_estimator.Update(1.f);
+    test_estimator.Update(-1.f);
+  }
+  // The mean should be close to zero, and the standard deviation should be
+  // close to one.
+  EXPECT_NEAR(0.0f, test_estimator.mean(), 0.01f);
+  EXPECT_NEAR(1.0f, test_estimator.std_deviation(), 0.01f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/moving_max.cc b/modules/audio_processing/echo_detector/moving_max.cc
new file mode 100644
index 0000000..3054e98
--- /dev/null
+++ b/modules/audio_processing/echo_detector/moving_max.cc
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/moving_max.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Parameter for controlling how fast the estimated maximum decays after the
+// previous maximum is no longer valid. With a value of 0.99, the maximum will
+// decay to 1% of its former value after 460 updates.
+constexpr float kDecayFactor = 0.99f;
+
+}  // namespace
+
+MovingMax::MovingMax(size_t window_size) : window_size_(window_size) {
+  RTC_DCHECK_GT(window_size, 0);
+}
+
+MovingMax::~MovingMax() {}
+
+void MovingMax::Update(float value) {
+  if (counter_ >= window_size_ - 1) {
+    max_value_ *= kDecayFactor;
+  } else {
+    ++counter_;
+  }
+  if (value > max_value_) {
+    max_value_ = value;
+    counter_ = 0;
+  }
+}
+
+float MovingMax::max() const {
+  return max_value_;
+}
+
+void MovingMax::Clear() {
+  max_value_ = 0.f;
+  counter_ = 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/moving_max.h b/modules/audio_processing/echo_detector/moving_max.h
new file mode 100644
index 0000000..f7d8ee8
--- /dev/null
+++ b/modules/audio_processing/echo_detector/moving_max.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MOVING_MAX_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MOVING_MAX_H_
+
+#include <stddef.h>
+
+namespace webrtc {
+
+class MovingMax {
+ public:
+  explicit MovingMax(size_t window_size);
+  ~MovingMax();
+
+  void Update(float value);
+  float max() const;
+  // Reset all of the state in this class.
+  void Clear();
+
+ private:
+  float max_value_ = 0.f;
+  size_t counter_ = 0;
+  size_t window_size_ = 1;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_MOVING_MAX_H_
diff --git a/modules/audio_processing/echo_detector/moving_max_unittest.cc b/modules/audio_processing/echo_detector/moving_max_unittest.cc
new file mode 100644
index 0000000..b67b86f
--- /dev/null
+++ b/modules/audio_processing/echo_detector/moving_max_unittest.cc
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/moving_max.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Test if the maximum is correctly found.
+TEST(MovingMaxTests, SimpleTest) {
+  MovingMax test_moving_max(5);
+  test_moving_max.Update(1.0f);
+  test_moving_max.Update(1.1f);
+  test_moving_max.Update(1.9f);
+  test_moving_max.Update(1.87f);
+  test_moving_max.Update(1.89f);
+  EXPECT_EQ(1.9f, test_moving_max.max());
+}
+
+// Test if values fall out of the window when expected.
+TEST(MovingMaxTests, SlidingWindowTest) {
+  MovingMax test_moving_max(5);
+  test_moving_max.Update(1.0f);
+  test_moving_max.Update(1.9f);
+  test_moving_max.Update(1.7f);
+  test_moving_max.Update(1.87f);
+  test_moving_max.Update(1.89f);
+  test_moving_max.Update(1.3f);
+  test_moving_max.Update(1.2f);
+  EXPECT_LT(test_moving_max.max(), 1.9f);
+}
+
+// Test if Clear() works as expected.
+TEST(MovingMaxTests, ClearTest) {
+  MovingMax test_moving_max(5);
+  test_moving_max.Update(1.0f);
+  test_moving_max.Update(1.1f);
+  test_moving_max.Update(1.9f);
+  test_moving_max.Update(1.87f);
+  test_moving_max.Update(1.89f);
+  EXPECT_EQ(1.9f, test_moving_max.max());
+  test_moving_max.Clear();
+  EXPECT_EQ(0.f, test_moving_max.max());
+}
+
+// Test the decay of the estimated maximum.
+TEST(MovingMaxTests, DecayTest) {
+  MovingMax test_moving_max(1);
+  test_moving_max.Update(1.0f);
+  float previous_value = 1.0f;
+  for (int i = 0; i < 500; i++) {
+    test_moving_max.Update(0.0f);
+    EXPECT_LT(test_moving_max.max(), previous_value);
+    EXPECT_GT(test_moving_max.max(), 0.0f);
+    previous_value = test_moving_max.max();
+  }
+  EXPECT_LT(test_moving_max.max(), 0.01f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc b/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc
new file mode 100644
index 0000000..8ec9fe9
--- /dev/null
+++ b/modules/audio_processing/echo_detector/normalized_covariance_estimator.cc
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/normalized_covariance_estimator.h"
+
+#include <math.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+// Parameter controlling the adaptation speed.
+constexpr float kAlpha = 0.001f;
+
+}  // namespace
+
+void NormalizedCovarianceEstimator::Update(float x,
+                                           float x_mean,
+                                           float x_sigma,
+                                           float y,
+                                           float y_mean,
+                                           float y_sigma) {
+  covariance_ =
+      (1.f - kAlpha) * covariance_ + kAlpha * (x - x_mean) * (y - y_mean);
+  normalized_cross_correlation_ = covariance_ / (x_sigma * y_sigma + .0001f);
+  RTC_DCHECK(isfinite(covariance_));
+  RTC_DCHECK(isfinite(normalized_cross_correlation_));
+}
+
+void NormalizedCovarianceEstimator::Clear() {
+  covariance_ = 0.f;
+  normalized_cross_correlation_ = 0.f;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/echo_detector/normalized_covariance_estimator.h b/modules/audio_processing/echo_detector/normalized_covariance_estimator.h
new file mode 100644
index 0000000..e3c36d8
--- /dev/null
+++ b/modules/audio_processing/echo_detector/normalized_covariance_estimator.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_NORMALIZED_COVARIANCE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_NORMALIZED_COVARIANCE_ESTIMATOR_H_
+
+namespace webrtc {
+
+// This class iteratively estimates the normalized covariance between two
+// signals.
+class NormalizedCovarianceEstimator {
+ public:
+  void Update(float x,
+              float x_mean,
+              float x_var,
+              float y,
+              float y_mean,
+              float y_var);
+  // This function returns an estimate of the Pearson product-moment correlation
+  // coefficient of the two signals.
+  float normalized_cross_correlation() const {
+    return normalized_cross_correlation_;
+  }
+  float covariance() const { return covariance_; }
+  // This function resets the estimated values to zero.
+  void Clear();
+
+ private:
+  float normalized_cross_correlation_ = 0.f;
+  // Estimate of the covariance value.
+  float covariance_ = 0.f;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_ECHO_DETECTOR_NORMALIZED_COVARIANCE_ESTIMATOR_H_
diff --git a/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc b/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc
new file mode 100644
index 0000000..7e0512e
--- /dev/null
+++ b/modules/audio_processing/echo_detector/normalized_covariance_estimator_unittest.cc
@@ -0,0 +1,40 @@
+
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/echo_detector/normalized_covariance_estimator.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(NormalizedCovarianceEstimatorTests, IdenticalSignalTest) {
+  NormalizedCovarianceEstimator test_estimator;
+  for (size_t i = 0; i < 10000; i++) {
+    test_estimator.Update(1.f, 0.f, 1.f, 1.f, 0.f, 1.f);
+    test_estimator.Update(-1.f, 0.f, 1.f, -1.f, 0.f, 1.f);
+  }
+  // A normalized covariance value close to 1 is expected.
+  EXPECT_NEAR(1.f, test_estimator.normalized_cross_correlation(), 0.01f);
+  test_estimator.Clear();
+  EXPECT_EQ(0.f, test_estimator.normalized_cross_correlation());
+}
+
+TEST(NormalizedCovarianceEstimatorTests, OppositeSignalTest) {
+  NormalizedCovarianceEstimator test_estimator;
+  // Insert the same value many times.
+  for (size_t i = 0; i < 10000; i++) {
+    test_estimator.Update(1.f, 0.f, 1.f, -1.f, 0.f, 1.f);
+    test_estimator.Update(-1.f, 0.f, 1.f, 1.f, 0.f, 1.f);
+  }
+  // A normalized covariance value close to -1 is expected.
+  EXPECT_NEAR(-1.f, test_estimator.normalized_cross_correlation(), 0.01f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/gain_control_for_experimental_agc.cc b/modules/audio_processing/gain_control_for_experimental_agc.cc
new file mode 100644
index 0000000..d5d978c
--- /dev/null
+++ b/modules/audio_processing/gain_control_for_experimental_agc.cc
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/gain_control_for_experimental_agc.h"
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+
+namespace webrtc {
+
+int GainControlForExperimentalAgc::instance_counter_ = 0;
+
+GainControlForExperimentalAgc::GainControlForExperimentalAgc(
+    GainControl* gain_control,
+    rtc::CriticalSection* crit_capture)
+    : data_dumper_(new ApmDataDumper(instance_counter_)),
+      real_gain_control_(gain_control),
+      volume_(0),
+      crit_capture_(crit_capture) {
+  instance_counter_++;
+}
+
+GainControlForExperimentalAgc::~GainControlForExperimentalAgc() = default;
+
+int GainControlForExperimentalAgc::Enable(bool enable) {
+  return real_gain_control_->Enable(enable);
+}
+
+bool GainControlForExperimentalAgc::is_enabled() const {
+  return real_gain_control_->is_enabled();
+}
+
+int GainControlForExperimentalAgc::set_stream_analog_level(int level) {
+  rtc::CritScope cs_capture(crit_capture_);
+  data_dumper_->DumpRaw("experimental_gain_control_set_stream_analog_level", 1,
+                        &level);
+  volume_ = level;
+  return AudioProcessing::kNoError;
+}
+
+int GainControlForExperimentalAgc::stream_analog_level() {
+  rtc::CritScope cs_capture(crit_capture_);
+  data_dumper_->DumpRaw("experimental_gain_control_stream_analog_level", 1,
+                        &volume_);
+  return volume_;
+}
+
+int GainControlForExperimentalAgc::set_mode(Mode mode) {
+  return AudioProcessing::kNoError;
+}
+
+GainControl::Mode GainControlForExperimentalAgc::mode() const {
+  return GainControl::kAdaptiveAnalog;
+}
+
+int GainControlForExperimentalAgc::set_target_level_dbfs(int level) {
+  return AudioProcessing::kNoError;
+}
+
+int GainControlForExperimentalAgc::target_level_dbfs() const {
+  return real_gain_control_->target_level_dbfs();
+}
+
+int GainControlForExperimentalAgc::set_compression_gain_db(int gain) {
+  return AudioProcessing::kNoError;
+}
+
+int GainControlForExperimentalAgc::compression_gain_db() const {
+  return real_gain_control_->compression_gain_db();
+}
+
+int GainControlForExperimentalAgc::enable_limiter(bool enable) {
+  return AudioProcessing::kNoError;
+}
+
+bool GainControlForExperimentalAgc::is_limiter_enabled() const {
+  return real_gain_control_->is_limiter_enabled();
+}
+
+int GainControlForExperimentalAgc::set_analog_level_limits(int minimum,
+                                                           int maximum) {
+  return AudioProcessing::kNoError;
+}
+
+int GainControlForExperimentalAgc::analog_level_minimum() const {
+  return real_gain_control_->analog_level_minimum();
+}
+
+int GainControlForExperimentalAgc::analog_level_maximum() const {
+  return real_gain_control_->analog_level_maximum();
+}
+
+bool GainControlForExperimentalAgc::stream_is_saturated() const {
+  return real_gain_control_->stream_is_saturated();
+}
+
+void GainControlForExperimentalAgc::SetMicVolume(int volume) {
+  rtc::CritScope cs_capture(crit_capture_);
+  volume_ = volume;
+}
+
+int GainControlForExperimentalAgc::GetMicVolume() {
+  rtc::CritScope cs_capture(crit_capture_);
+  return volume_;
+}
+
+void GainControlForExperimentalAgc::Initialize() {
+  data_dumper_->InitiateNewSetOfRecordings();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/gain_control_for_experimental_agc.h b/modules/audio_processing/gain_control_for_experimental_agc.h
new file mode 100644
index 0000000..0894a0e
--- /dev/null
+++ b/modules/audio_processing/gain_control_for_experimental_agc.h
@@ -0,0 +1,77 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_GAIN_CONTROL_FOR_EXPERIMENTAL_AGC_H_
+#define MODULES_AUDIO_PROCESSING_GAIN_CONTROL_FOR_EXPERIMENTAL_AGC_H_
+
+#include "modules/audio_processing/agc/agc_manager_direct.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_checker.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+
+// This class has two main purposes:
+//
+// 1) It is returned instead of the real GainControl after the new AGC has been
+//    enabled in order to prevent an outside user from overriding compression
+//    settings. It doesn't do anything in its implementation, except for
+//    delegating the const methods and Enable calls to the real GainControl, so
+//    AGC can still be disabled.
+//
+// 2) It is injected into AgcManagerDirect and implements volume callbacks for
+//    getting and setting the volume level. It just caches this value to be used
+//    in VoiceEngine later.
+class GainControlForExperimentalAgc : public GainControl,
+                                      public VolumeCallbacks {
+ public:
+  GainControlForExperimentalAgc(GainControl* gain_control,
+                                rtc::CriticalSection* crit_capture);
+  ~GainControlForExperimentalAgc() override;
+
+  // GainControl implementation.
+  int Enable(bool enable) override;
+  bool is_enabled() const override;
+  int set_stream_analog_level(int level) override;
+  int stream_analog_level() override;
+  int set_mode(Mode mode) override;
+  Mode mode() const override;
+  int set_target_level_dbfs(int level) override;
+  int target_level_dbfs() const override;
+  int set_compression_gain_db(int gain) override;
+  int compression_gain_db() const override;
+  int enable_limiter(bool enable) override;
+  bool is_limiter_enabled() const override;
+  int set_analog_level_limits(int minimum, int maximum) override;
+  int analog_level_minimum() const override;
+  int analog_level_maximum() const override;
+  bool stream_is_saturated() const override;
+
+  // VolumeCallbacks implementation.
+  void SetMicVolume(int volume) override;
+  int GetMicVolume() override;
+
+  void Initialize();
+
+ private:
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  GainControl* real_gain_control_;
+  int volume_;
+  rtc::CriticalSection* crit_capture_;
+  static int instance_counter_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(GainControlForExperimentalAgc);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_GAIN_CONTROL_FOR_EXPERIMENTAL_AGC_H_
diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc
new file mode 100644
index 0000000..e550ebb
--- /dev/null
+++ b/modules/audio_processing/gain_control_impl.cc
@@ -0,0 +1,439 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/gain_control_impl.h"
+
+#include "api/optional.h"
+#include "modules/audio_processing/agc/legacy/gain_control.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+int16_t MapSetting(GainControl::Mode mode) {
+  switch (mode) {
+    case GainControl::kAdaptiveAnalog:
+      return kAgcModeAdaptiveAnalog;
+    case GainControl::kAdaptiveDigital:
+      return kAgcModeAdaptiveDigital;
+    case GainControl::kFixedDigital:
+      return kAgcModeFixedDigital;
+  }
+  RTC_NOTREACHED();
+  return -1;
+}
+
+}  // namespace
+
+class GainControlImpl::GainController {
+ public:
+  explicit GainController() {
+    state_ = WebRtcAgc_Create();
+    RTC_CHECK(state_);
+  }
+
+  ~GainController() {
+    RTC_DCHECK(state_);
+    WebRtcAgc_Free(state_);
+  }
+
+  Handle* state() {
+    RTC_DCHECK(state_);
+    return state_;
+  }
+
+  void Initialize(int minimum_capture_level,
+                  int maximum_capture_level,
+                  Mode mode,
+                  int sample_rate_hz,
+                  int capture_level) {
+    RTC_DCHECK(state_);
+    int error =
+        WebRtcAgc_Init(state_, minimum_capture_level, maximum_capture_level,
+                       MapSetting(mode), sample_rate_hz);
+    RTC_DCHECK_EQ(0, error);
+
+    set_capture_level(capture_level);
+  }
+
+  void set_capture_level(int capture_level) {
+    capture_level_ = capture_level;
+  }
+
+  int get_capture_level() {
+    RTC_DCHECK(capture_level_);
+    return *capture_level_;
+  }
+
+ private:
+  Handle* state_;
+  // TODO(peah): Remove the optional once the initialization is moved into the
+  // ctor.
+  rtc::Optional<int> capture_level_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(GainController);
+};
+
+int GainControlImpl::instance_counter_ = 0;
+
+GainControlImpl::GainControlImpl(rtc::CriticalSection* crit_render,
+                                 rtc::CriticalSection* crit_capture)
+    : crit_render_(crit_render),
+      crit_capture_(crit_capture),
+      data_dumper_(new ApmDataDumper(instance_counter_)),
+      mode_(kAdaptiveAnalog),
+      minimum_capture_level_(0),
+      maximum_capture_level_(255),
+      limiter_enabled_(true),
+      target_level_dbfs_(3),
+      compression_gain_db_(9),
+      analog_capture_level_(0),
+      was_analog_level_set_(false),
+      stream_is_saturated_(false) {
+  RTC_DCHECK(crit_render);
+  RTC_DCHECK(crit_capture);
+}
+
+GainControlImpl::~GainControlImpl() {}
+
+void GainControlImpl::ProcessRenderAudio(
+    rtc::ArrayView<const int16_t> packed_render_audio) {
+  rtc::CritScope cs_capture(crit_capture_);
+  if (!enabled_) {
+    return;
+  }
+
+  for (auto& gain_controller : gain_controllers_) {
+    WebRtcAgc_AddFarend(gain_controller->state(), packed_render_audio.data(),
+                        packed_render_audio.size());
+  }
+}
+
+void GainControlImpl::PackRenderAudioBuffer(
+    AudioBuffer* audio,
+    std::vector<int16_t>* packed_buffer) {
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+
+  packed_buffer->clear();
+  packed_buffer->insert(
+      packed_buffer->end(), audio->mixed_low_pass_data(),
+      (audio->mixed_low_pass_data() + audio->num_frames_per_band()));
+}
+
+int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_capture_);
+
+  if (!enabled_) {
+    return AudioProcessing::kNoError;
+  }
+
+  RTC_DCHECK(num_proc_channels_);
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
+  RTC_DCHECK_LE(*num_proc_channels_, gain_controllers_.size());
+
+  if (mode_ == kAdaptiveAnalog) {
+    int capture_channel = 0;
+    for (auto& gain_controller : gain_controllers_) {
+      gain_controller->set_capture_level(analog_capture_level_);
+      int err = WebRtcAgc_AddMic(
+          gain_controller->state(), audio->split_bands(capture_channel),
+          audio->num_bands(), audio->num_frames_per_band());
+
+      if (err != AudioProcessing::kNoError) {
+        return AudioProcessing::kUnspecifiedError;
+      }
+      ++capture_channel;
+    }
+  } else if (mode_ == kAdaptiveDigital) {
+    int capture_channel = 0;
+    for (auto& gain_controller : gain_controllers_) {
+      int32_t capture_level_out = 0;
+      int err = WebRtcAgc_VirtualMic(
+          gain_controller->state(), audio->split_bands(capture_channel),
+          audio->num_bands(), audio->num_frames_per_band(),
+          analog_capture_level_, &capture_level_out);
+
+      gain_controller->set_capture_level(capture_level_out);
+
+      if (err != AudioProcessing::kNoError) {
+        return AudioProcessing::kUnspecifiedError;
+      }
+      ++capture_channel;
+    }
+  }
+
+  return AudioProcessing::kNoError;
+}
+
+int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio,
+                                         bool stream_has_echo) {
+  rtc::CritScope cs(crit_capture_);
+
+  if (!enabled_) {
+    return AudioProcessing::kNoError;
+  }
+
+  if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
+    return AudioProcessing::kStreamParameterNotSetError;
+  }
+
+  RTC_DCHECK(num_proc_channels_);
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
+
+  stream_is_saturated_ = false;
+  int capture_channel = 0;
+  for (auto& gain_controller : gain_controllers_) {
+    int32_t capture_level_out = 0;
+    uint8_t saturation_warning = 0;
+
+    // The call to stream_has_echo() is ok from a deadlock perspective
+    // as the capture lock is allready held.
+    int err = WebRtcAgc_Process(
+        gain_controller->state(), audio->split_bands_const(capture_channel),
+        audio->num_bands(), audio->num_frames_per_band(),
+        audio->split_bands(capture_channel),
+        gain_controller->get_capture_level(), &capture_level_out,
+        stream_has_echo, &saturation_warning);
+
+    if (err != AudioProcessing::kNoError) {
+      return AudioProcessing::kUnspecifiedError;
+    }
+
+    gain_controller->set_capture_level(capture_level_out);
+    if (saturation_warning == 1) {
+      stream_is_saturated_ = true;
+    }
+
+    ++capture_channel;
+  }
+
+  RTC_DCHECK_LT(0ul, *num_proc_channels_);
+  if (mode_ == kAdaptiveAnalog) {
+    // Take the analog level to be the average across the handles.
+    analog_capture_level_ = 0;
+    for (auto& gain_controller : gain_controllers_) {
+      analog_capture_level_ += gain_controller->get_capture_level();
+    }
+
+    analog_capture_level_ /= (*num_proc_channels_);
+  }
+
+  was_analog_level_set_ = false;
+  return AudioProcessing::kNoError;
+}
+
+int GainControlImpl::compression_gain_db() const {
+  rtc::CritScope cs(crit_capture_);
+  return compression_gain_db_;
+}
+
+// TODO(ajm): ensure this is called under kAdaptiveAnalog.
+int GainControlImpl::set_stream_analog_level(int level) {
+  rtc::CritScope cs(crit_capture_);
+  data_dumper_->DumpRaw("gain_control_set_stream_analog_level", 1, &level);
+
+  was_analog_level_set_ = true;
+  if (level < minimum_capture_level_ || level > maximum_capture_level_) {
+    return AudioProcessing::kBadParameterError;
+  }
+  analog_capture_level_ = level;
+
+  return AudioProcessing::kNoError;
+}
+
+int GainControlImpl::stream_analog_level() {
+  rtc::CritScope cs(crit_capture_);
+  data_dumper_->DumpRaw("gain_control_stream_analog_level", 1,
+                        &analog_capture_level_);
+  // TODO(ajm): enable this assertion?
+  //RTC_DCHECK_EQ(kAdaptiveAnalog, mode_);
+
+  return analog_capture_level_;
+}
+
+int GainControlImpl::Enable(bool enable) {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  if (enable && !enabled_) {
+    enabled_ = enable;  // Must be set before Initialize() is called.
+
+    RTC_DCHECK(num_proc_channels_);
+    RTC_DCHECK(sample_rate_hz_);
+    Initialize(*num_proc_channels_, *sample_rate_hz_);
+  } else {
+    enabled_ = enable;
+  }
+  return AudioProcessing::kNoError;
+}
+
+bool GainControlImpl::is_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return enabled_;
+}
+
+int GainControlImpl::set_mode(Mode mode) {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  if (MapSetting(mode) == -1) {
+    return AudioProcessing::kBadParameterError;
+  }
+
+  mode_ = mode;
+  RTC_DCHECK(num_proc_channels_);
+  RTC_DCHECK(sample_rate_hz_);
+  Initialize(*num_proc_channels_, *sample_rate_hz_);
+  return AudioProcessing::kNoError;
+}
+
+GainControl::Mode GainControlImpl::mode() const {
+  rtc::CritScope cs(crit_capture_);
+  return mode_;
+}
+
+int GainControlImpl::set_analog_level_limits(int minimum,
+                                             int maximum) {
+  if (minimum < 0) {
+    return AudioProcessing::kBadParameterError;
+  }
+
+  if (maximum > 65535) {
+    return AudioProcessing::kBadParameterError;
+  }
+
+  if (maximum < minimum) {
+    return AudioProcessing::kBadParameterError;
+  }
+
+  size_t num_proc_channels_local = 0u;
+  int sample_rate_hz_local = 0;
+  {
+    rtc::CritScope cs(crit_capture_);
+
+    minimum_capture_level_ = minimum;
+    maximum_capture_level_ = maximum;
+
+    RTC_DCHECK(num_proc_channels_);
+    RTC_DCHECK(sample_rate_hz_);
+    num_proc_channels_local = *num_proc_channels_;
+    sample_rate_hz_local = *sample_rate_hz_;
+  }
+  Initialize(num_proc_channels_local, sample_rate_hz_local);
+  return AudioProcessing::kNoError;
+}
+
+int GainControlImpl::analog_level_minimum() const {
+  rtc::CritScope cs(crit_capture_);
+  return minimum_capture_level_;
+}
+
+int GainControlImpl::analog_level_maximum() const {
+  rtc::CritScope cs(crit_capture_);
+  return maximum_capture_level_;
+}
+
+bool GainControlImpl::stream_is_saturated() const {
+  rtc::CritScope cs(crit_capture_);
+  return stream_is_saturated_;
+}
+
+int GainControlImpl::set_target_level_dbfs(int level) {
+  if (level > 31 || level < 0) {
+    return AudioProcessing::kBadParameterError;
+  }
+  {
+    rtc::CritScope cs(crit_capture_);
+    target_level_dbfs_ = level;
+  }
+  return Configure();
+}
+
+int GainControlImpl::target_level_dbfs() const {
+  rtc::CritScope cs(crit_capture_);
+  return target_level_dbfs_;
+}
+
+int GainControlImpl::set_compression_gain_db(int gain) {
+  if (gain < 0 || gain > 90) {
+    return AudioProcessing::kBadParameterError;
+  }
+  {
+    rtc::CritScope cs(crit_capture_);
+    compression_gain_db_ = gain;
+  }
+  return Configure();
+}
+
+int GainControlImpl::enable_limiter(bool enable) {
+  {
+    rtc::CritScope cs(crit_capture_);
+    limiter_enabled_ = enable;
+  }
+  return Configure();
+}
+
+bool GainControlImpl::is_limiter_enabled() const {
+  rtc::CritScope cs(crit_capture_);
+  return limiter_enabled_;
+}
+
+void GainControlImpl::Initialize(size_t num_proc_channels, int sample_rate_hz) {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  data_dumper_->InitiateNewSetOfRecordings();
+
+  num_proc_channels_ = num_proc_channels;
+  sample_rate_hz_ = sample_rate_hz;
+
+  if (!enabled_) {
+    return;
+  }
+
+  gain_controllers_.resize(*num_proc_channels_);
+  for (auto& gain_controller : gain_controllers_) {
+    if (!gain_controller) {
+      gain_controller.reset(new GainController());
+    }
+    gain_controller->Initialize(minimum_capture_level_, maximum_capture_level_,
+                                mode_, *sample_rate_hz_, analog_capture_level_);
+  }
+
+  Configure();
+}
+
+int GainControlImpl::Configure() {
+  rtc::CritScope cs_render(crit_render_);
+  rtc::CritScope cs_capture(crit_capture_);
+  WebRtcAgcConfig config;
+  // TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
+  //            change the interface.
+  //RTC_DCHECK_LE(target_level_dbfs_, 0);
+  //config.targetLevelDbfs = static_cast<int16_t>(-target_level_dbfs_);
+  config.targetLevelDbfs = static_cast<int16_t>(target_level_dbfs_);
+  config.compressionGaindB =
+      static_cast<int16_t>(compression_gain_db_);
+  config.limiterEnable = limiter_enabled_;
+
+  int error = AudioProcessing::kNoError;
+  for (auto& gain_controller : gain_controllers_) {
+    const int handle_error =
+        WebRtcAgc_set_config(gain_controller->state(), config);
+    if (handle_error != AudioProcessing::kNoError) {
+      error = handle_error;
+    }
+  }
+  return error;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/gain_control_impl.h b/modules/audio_processing/gain_control_impl.h
new file mode 100644
index 0000000..2674506
--- /dev/null
+++ b/modules/audio_processing/gain_control_impl.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_GAIN_CONTROL_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_GAIN_CONTROL_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/render_queue_item_verifier.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/swap_queue.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioBuffer;
+
+class GainControlImpl : public GainControl {
+ public:
+  GainControlImpl(rtc::CriticalSection* crit_render,
+                  rtc::CriticalSection* crit_capture);
+  ~GainControlImpl() override;
+
+  void ProcessRenderAudio(rtc::ArrayView<const int16_t> packed_render_audio);
+  int AnalyzeCaptureAudio(AudioBuffer* audio);
+  int ProcessCaptureAudio(AudioBuffer* audio, bool stream_has_echo);
+
+  void Initialize(size_t num_proc_channels, int sample_rate_hz);
+
+  static void PackRenderAudioBuffer(AudioBuffer* audio,
+                                    std::vector<int16_t>* packed_buffer);
+
+  // GainControl implementation.
+  bool is_enabled() const override;
+  int stream_analog_level() override;
+  bool is_limiter_enabled() const override;
+  Mode mode() const override;
+
+  int compression_gain_db() const override;
+
+ private:
+  class GainController;
+
+  // GainControl implementation.
+  int Enable(bool enable) override;
+  int set_stream_analog_level(int level) override;
+  int set_mode(Mode mode) override;
+  int set_target_level_dbfs(int level) override;
+  int target_level_dbfs() const override;
+  int set_compression_gain_db(int gain) override;
+  int enable_limiter(bool enable) override;
+  int set_analog_level_limits(int minimum, int maximum) override;
+  int analog_level_minimum() const override;
+  int analog_level_maximum() const override;
+  bool stream_is_saturated() const override;
+
+  int Configure();
+
+  rtc::CriticalSection* const crit_render_ RTC_ACQUIRED_BEFORE(crit_capture_);
+  rtc::CriticalSection* const crit_capture_;
+
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+
+  bool enabled_ = false;
+
+  Mode mode_ RTC_GUARDED_BY(crit_capture_);
+  int minimum_capture_level_ RTC_GUARDED_BY(crit_capture_);
+  int maximum_capture_level_ RTC_GUARDED_BY(crit_capture_);
+  bool limiter_enabled_ RTC_GUARDED_BY(crit_capture_);
+  int target_level_dbfs_ RTC_GUARDED_BY(crit_capture_);
+  int compression_gain_db_ RTC_GUARDED_BY(crit_capture_);
+  int analog_capture_level_ RTC_GUARDED_BY(crit_capture_);
+  bool was_analog_level_set_ RTC_GUARDED_BY(crit_capture_);
+  bool stream_is_saturated_ RTC_GUARDED_BY(crit_capture_);
+
+  std::vector<std::unique_ptr<GainController>> gain_controllers_;
+
+  rtc::Optional<size_t> num_proc_channels_ RTC_GUARDED_BY(crit_capture_);
+  rtc::Optional<int> sample_rate_hz_ RTC_GUARDED_BY(crit_capture_);
+
+  static int instance_counter_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(GainControlImpl);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_GAIN_CONTROL_IMPL_H_
diff --git a/modules/audio_processing/gain_control_unittest.cc b/modules/audio_processing/gain_control_unittest.cc
new file mode 100644
index 0000000..62908c7
--- /dev/null
+++ b/modules/audio_processing/gain_control_unittest.cc
@@ -0,0 +1,441 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/gain_control_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kNumFramesToProcess = 100;
+
+void ProcessOneFrame(int sample_rate_hz,
+                     AudioBuffer* render_audio_buffer,
+                     AudioBuffer* capture_audio_buffer,
+                     GainControlImpl* gain_controller) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    render_audio_buffer->SplitIntoFrequencyBands();
+    capture_audio_buffer->SplitIntoFrequencyBands();
+  }
+
+  std::vector<int16_t> render_audio;
+  GainControlImpl::PackRenderAudioBuffer(render_audio_buffer, &render_audio);
+  gain_controller->ProcessRenderAudio(render_audio);
+  gain_controller->AnalyzeCaptureAudio(capture_audio_buffer);
+  gain_controller->ProcessCaptureAudio(capture_audio_buffer, false);
+
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_audio_buffer->MergeFrequencyBands();
+  }
+}
+
+void SetupComponent(int sample_rate_hz,
+                    GainControl::Mode mode,
+                    int target_level_dbfs,
+                    int stream_analog_level,
+                    int compression_gain_db,
+                    bool enable_limiter,
+                    int analog_level_min,
+                    int analog_level_max,
+                    GainControlImpl* gain_controller) {
+  gain_controller->Initialize(1, sample_rate_hz);
+  GainControl* gc = static_cast<GainControl*>(gain_controller);
+  gc->Enable(true);
+  gc->set_mode(mode);
+  gc->set_stream_analog_level(stream_analog_level);
+  gc->set_target_level_dbfs(target_level_dbfs);
+  gc->set_compression_gain_db(compression_gain_db);
+  gc->enable_limiter(enable_limiter);
+  gc->set_analog_level_limits(analog_level_min, analog_level_max);
+}
+
+void RunBitExactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         GainControl::Mode mode,
+                         int target_level_dbfs,
+                         int stream_analog_level,
+                         int compression_gain_db,
+                         bool enable_limiter,
+                         int analog_level_min,
+                         int analog_level_max,
+                         int achieved_stream_analog_level_reference,
+                         rtc::ArrayView<const float> output_reference) {
+  rtc::CriticalSection crit_render;
+  rtc::CriticalSection crit_capture;
+  GainControlImpl gain_controller(&crit_render, &crit_capture);
+  SetupComponent(sample_rate_hz, mode, target_level_dbfs, stream_analog_level,
+                 compression_gain_db, enable_limiter, analog_level_min,
+                 analog_level_max, &gain_controller);
+
+  const int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+  const StreamConfig render_config(sample_rate_hz, num_channels, false);
+  AudioBuffer render_buffer(
+      render_config.num_frames(), render_config.num_channels(),
+      render_config.num_frames(), 1, render_config.num_frames());
+  test::InputAudioFile render_file(
+      test::GetApmRenderTestVectorFileName(sample_rate_hz));
+  std::vector<float> render_input(samples_per_channel * num_channels);
+
+  const StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), 1, capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(samples_per_channel * num_channels);
+
+  for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &render_file, render_input);
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &capture_file, capture_input);
+
+    test::CopyVectorToAudioBuffer(render_config, render_input, &render_buffer);
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, &render_buffer, &capture_buffer,
+                    &gain_controller);
+  }
+
+  // Extract and verify the test results.
+  std::vector<float> capture_output;
+  test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+                                     &capture_output);
+
+  EXPECT_EQ(achieved_stream_analog_level_reference,
+            gain_controller.stream_analog_level());
+
+  // Compare the output with the reference. Only the first values of the output
+  // from last frame processed are compared in order not having to specify all
+  // preceeding frames as testvectors. As the algorithm being tested has a
+  // memory, testing only the last frame implicitly also tests the preceeding
+  // frames.
+  const float kElementErrorBound = 1.0f / 32768.0f;
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      capture_config.num_frames(), capture_config.num_channels(),
+      output_reference, capture_output, kElementErrorBound));
+}
+
+}  // namespace
+
+// TODO(peah): Activate all these tests for ARM and ARM64 once the issue on the
+// Chromium ARM and ARM64 boths have been identified. This is tracked in the
+// issue https://bugs.chromium.org/p/webrtc/issues/detail?id=5711.
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono8kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono8kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.006622f, -0.002747f, 0.001587f};
+  RunBitExactnessTest(8000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.006561f, -0.004608f, -0.002899f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Stereo16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Stereo16kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.027313f, -0.015900f, -0.028107f,
+                                    -0.027313f, -0.015900f, -0.028107f};
+  RunBitExactnessTest(16000, 2, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono32kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono32kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.010162f, -0.009155f, -0.008301f};
+  RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono48kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono48kHz_AdaptiveAnalog_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.010162f, -0.009155f, -0.008301f};
+  RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono8kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono8kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.004028f, -0.001678f, 0.000946f};
+  RunBitExactnessTest(8000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.003967f, -0.002777f, -0.001770f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Stereo16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Stereo16kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.015411f, -0.008972f, -0.015839f,
+                                    -0.015411f, -0.008972f, -0.015839f};
+  RunBitExactnessTest(16000, 2, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono32kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono32kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.006104f, -0.005524f, -0.004974f};
+  RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono48kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono48kHz_AdaptiveDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.006104f, -0.005524f, -0.004974f};
+  RunBitExactnessTest(32000, 1, GainControl::Mode::kAdaptiveDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono8kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono8kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.011871f, -0.004944f, 0.002838f};
+  RunBitExactnessTest(8000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.011749f, -0.008270f, -0.005219f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Stereo16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Stereo16kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.048950f, -0.028503f, -0.050354f,
+                                    -0.048950f, -0.028503f, -0.050354f};
+  RunBitExactnessTest(16000, 2, GainControl::Mode::kFixedDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono32kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono32kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.018188f, -0.016418f, -0.014862f};
+  RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono48kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono48kHz_FixedDigital_Tl10_SL50_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 50;
+  const float kOutputReference[] = {-0.018188f, -0.016418f, -0.014862f};
+  RunBitExactnessTest(32000, 1, GainControl::Mode::kFixedDigital, 10, 50, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveAnalog_Tl10_SL10_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveAnalog_Tl10_SL10_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 12;
+  const float kOutputReference[] = {-0.006561f, -0.004608f, -0.002899f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 10, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveAnalog_Tl10_SL100_CG5_Lim_AL70_80) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveAnalog_Tl10_SL100_CG5_Lim_AL70_80) {
+#endif
+  const int kStreamAnalogLevelReference = 100;
+  const float kOutputReference[] = {-0.003998f, -0.002808f, -0.001770f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveAnalog, 10, 100, 5,
+                      true, 70, 80, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveDigital_Tl10_SL100_CG5_NoLim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveDigital_Tl10_SL100_CG5_NoLim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 100;
+  const float kOutputReference[] = {-0.004028f, -0.002838f, -0.001770f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 10, 100, 5,
+                      false, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveDigital_Tl40_SL100_CG5_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveDigital_Tl40_SL100_CG5_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 100;
+  const float kOutputReference[] = {-0.008728f, -0.006134f, -0.003845f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 40, 100, 5,
+                      true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+#if !(defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM) || \
+      defined(WEBRTC_ANDROID))
+TEST(GainControlBitExactnessTest,
+     Mono16kHz_AdaptiveDigital_Tl10_SL100_CG30_Lim_AL0_100) {
+#else
+TEST(GainControlBitExactnessTest,
+     DISABLED_Mono16kHz_AdaptiveDigital_Tl10_SL100_CG30_Lim_AL0_100) {
+#endif
+  const int kStreamAnalogLevelReference = 100;
+  const float kOutputReference[] = {-0.005859f, -0.004120f, -0.002594f};
+  RunBitExactnessTest(16000, 1, GainControl::Mode::kAdaptiveDigital, 10, 100,
+                      30, true, 0, 100, kStreamAnalogLevelReference,
+                      kOutputReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/gain_controller2.cc b/modules/audio_processing/gain_controller2.cc
new file mode 100644
index 0000000..aa866c1
--- /dev/null
+++ b/modules/audio_processing/gain_controller2.cc
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/gain_controller2.h"
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+int GainController2::instance_count_ = 0;
+
+GainController2::GainController2()
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      gain_controller_(data_dumper_.get()) {}
+
+GainController2::~GainController2() = default;
+
+void GainController2::Initialize(int sample_rate_hz) {
+  RTC_DCHECK(sample_rate_hz == AudioProcessing::kSampleRate8kHz ||
+             sample_rate_hz == AudioProcessing::kSampleRate16kHz ||
+             sample_rate_hz == AudioProcessing::kSampleRate32kHz ||
+             sample_rate_hz == AudioProcessing::kSampleRate48kHz);
+  gain_controller_.SetSampleRate(sample_rate_hz);
+  data_dumper_->InitiateNewSetOfRecordings();
+  data_dumper_->DumpRaw("sample_rate_hz", sample_rate_hz);
+}
+
+void GainController2::Process(AudioBuffer* audio) {
+  AudioFrameView<float> float_frame(audio->channels_f(), audio->num_channels(),
+                                    audio->num_frames());
+  gain_controller_.Process(float_frame);
+}
+
+void GainController2::ApplyConfig(
+    const AudioProcessing::Config::GainController2& config) {
+  RTC_DCHECK(Validate(config));
+  config_ = config;
+  gain_controller_.SetGain(config_.fixed_gain_db);
+  gain_controller_.EnableLimiter(config_.enable_limiter);
+}
+
+bool GainController2::Validate(
+    const AudioProcessing::Config::GainController2& config) {
+  return config.fixed_gain_db >= 0.f;
+}
+
+std::string GainController2::ToString(
+    const AudioProcessing::Config::GainController2& config) {
+  std::stringstream ss;
+  ss << "{enabled: " << (config.enabled ? "true" : "false") << ", "
+     << "fixed_gain_dB: " << config.fixed_gain_db << ", "
+     << "enable_limiter: " << (config.enable_limiter ? "true" : "false") << "}";
+  return ss.str();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/gain_controller2.h b/modules/audio_processing/gain_controller2.h
new file mode 100644
index 0000000..6de4564
--- /dev/null
+++ b/modules/audio_processing/gain_controller2.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_GAIN_CONTROLLER2_H_
+#define MODULES_AUDIO_PROCESSING_GAIN_CONTROLLER2_H_
+
+#include <memory>
+#include <string>
+
+#include "modules/audio_processing/agc2/fixed_gain_controller.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioBuffer;
+
+// Gain Controller 2 aims to automatically adjust levels by acting on the
+// microphone gain and/or applying digital gain.
+class GainController2 {
+ public:
+  GainController2();
+  ~GainController2();
+
+  void Initialize(int sample_rate_hz);
+  void Process(AudioBuffer* audio);
+
+  void ApplyConfig(const AudioProcessing::Config::GainController2& config);
+  static bool Validate(const AudioProcessing::Config::GainController2& config);
+  static std::string ToString(
+      const AudioProcessing::Config::GainController2& config);
+
+ private:
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  FixedGainController gain_controller_;
+  AudioProcessing::Config::GainController2 config_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(GainController2);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_GAIN_CONTROLLER2_H_
diff --git a/modules/audio_processing/gain_controller2_unittest.cc b/modules/audio_processing/gain_controller2_unittest.cc
new file mode 100644
index 0000000..0c3b383
--- /dev/null
+++ b/modules/audio_processing/gain_controller2_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/gain_controller2.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+constexpr size_t kFrameSizeMs = 10u;
+constexpr size_t kStereo = 2u;
+
+void SetAudioBufferSamples(float value, AudioBuffer* ab) {
+  // Sets all the samples in |ab| to |value|.
+  for (size_t k = 0; k < ab->num_channels(); ++k) {
+    std::fill(ab->channels_f()[k], ab->channels_f()[k] + ab->num_frames(),
+              value);
+  }
+}
+
+}  // namespace
+
+TEST(GainController2, CreateApplyConfig) {
+  // Instances GainController2 and applies different configurations.
+  std::unique_ptr<GainController2> gain_controller2(new GainController2());
+
+  // Check that the default config is valid.
+  AudioProcessing::Config::GainController2 config;
+  EXPECT_TRUE(GainController2::Validate(config));
+  gain_controller2->ApplyConfig(config);
+
+  // Check that attenuation is not allowed.
+  config.fixed_gain_db = -5.f;
+  EXPECT_FALSE(GainController2::Validate(config));
+
+  // Check that valid configurations are applied.
+  for (const float& fixed_gain_db : {0.f, 5.f, 10.f, 50.f}) {
+    config.fixed_gain_db = fixed_gain_db;
+    EXPECT_TRUE(GainController2::Validate(config));
+    gain_controller2->ApplyConfig(config);
+  }
+}
+
+TEST(GainController2, ToString) {
+  // Tests GainController2::ToString().
+  AudioProcessing::Config::GainController2 config;
+  config.fixed_gain_db = 5.f;
+
+  config.enabled = false;
+  config.enable_limiter = true;
+  EXPECT_EQ("{enabled: false, fixed_gain_dB: 5, enable_limiter: true}",
+            GainController2::ToString(config));
+
+  config.enabled = true;
+  EXPECT_EQ("{enabled: true, fixed_gain_dB: 5, enable_limiter: true}",
+            GainController2::ToString(config));
+}
+
+TEST(GainController2, Usage) {
+  // Tests GainController2::Process() on an AudioBuffer instance.
+  std::unique_ptr<GainController2> gain_controller2(new GainController2());
+  gain_controller2->Initialize(AudioProcessing::kSampleRate48kHz);
+  const size_t num_frames = rtc::CheckedDivExact<size_t>(
+      kFrameSizeMs * AudioProcessing::kSampleRate48kHz, 1000);
+  AudioBuffer ab(num_frames, kStereo, num_frames, kStereo, num_frames);
+  constexpr float sample_value = 1000.f;
+  SetAudioBufferSamples(sample_value, &ab);
+  AudioProcessing::Config::GainController2 config;
+
+  // Check that samples are not modified when the fixed gain is 0 dB.
+  config.fixed_gain_db = 0.f;
+  gain_controller2->ApplyConfig(config);
+  gain_controller2->Process(&ab);
+  EXPECT_EQ(ab.channels_f()[0][0], sample_value);
+
+  // Check that samples are amplified when the fixed gain is greater than 0 dB.
+  config.fixed_gain_db = 5.f;
+  gain_controller2->ApplyConfig(config);
+  gain_controller2->Process(&ab);
+  EXPECT_LT(sample_value, ab.channels_f()[0][0]);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_processing/include/aec_dump.cc b/modules/audio_processing/include/aec_dump.cc
new file mode 100644
index 0000000..365d015
--- /dev/null
+++ b/modules/audio_processing/include/aec_dump.cc
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/aec_dump.h"
+
+namespace webrtc {
+InternalAPMConfig::InternalAPMConfig() = default;
+InternalAPMConfig::InternalAPMConfig(const InternalAPMConfig&) = default;
+InternalAPMConfig::InternalAPMConfig(InternalAPMConfig&&) = default;
+InternalAPMConfig& InternalAPMConfig::operator=(const InternalAPMConfig&) =
+    default;
+
+bool InternalAPMConfig::operator==(const InternalAPMConfig& other) {
+  return aec_enabled == other.aec_enabled &&
+         aec_delay_agnostic_enabled == other.aec_delay_agnostic_enabled &&
+         aec_drift_compensation_enabled ==
+             other.aec_drift_compensation_enabled &&
+         aec_extended_filter_enabled == other.aec_extended_filter_enabled &&
+         aec_suppression_level == other.aec_suppression_level &&
+         aecm_enabled == other.aecm_enabled &&
+         aecm_comfort_noise_enabled == other.aecm_comfort_noise_enabled &&
+         aecm_routing_mode == other.aecm_routing_mode &&
+         agc_enabled == other.agc_enabled && agc_mode == other.agc_mode &&
+         agc_limiter_enabled == other.agc_limiter_enabled &&
+         hpf_enabled == other.hpf_enabled && ns_enabled == other.ns_enabled &&
+         ns_level == other.ns_level &&
+         transient_suppression_enabled == other.transient_suppression_enabled &&
+         intelligibility_enhancer_enabled ==
+             other.intelligibility_enhancer_enabled &&
+         noise_robust_agc_enabled == other.noise_robust_agc_enabled &&
+         experiments_description == other.experiments_description;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/include/aec_dump.h b/modules/audio_processing/include/aec_dump.h
new file mode 100644
index 0000000..2035bf4
--- /dev/null
+++ b/modules/audio_processing/include/aec_dump.h
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+
+class AudioFrame;
+
+// Struct for passing current config from APM without having to
+// include protobuf headers.
+struct InternalAPMConfig {
+  InternalAPMConfig();
+  InternalAPMConfig(const InternalAPMConfig&);
+  InternalAPMConfig(InternalAPMConfig&&);
+
+  InternalAPMConfig& operator=(const InternalAPMConfig&);
+  InternalAPMConfig& operator=(InternalAPMConfig&&) = delete;
+
+  bool operator==(const InternalAPMConfig& other);
+
+  bool aec_enabled = false;
+  bool aec_delay_agnostic_enabled = false;
+  bool aec_drift_compensation_enabled = false;
+  bool aec_extended_filter_enabled = false;
+  int aec_suppression_level = 0;
+  bool aecm_enabled = false;
+  bool aecm_comfort_noise_enabled = false;
+  int aecm_routing_mode = 0;
+  bool agc_enabled = false;
+  int agc_mode = 0;
+  bool agc_limiter_enabled = false;
+  bool hpf_enabled = false;
+  bool ns_enabled = false;
+  int ns_level = 0;
+  bool transient_suppression_enabled = false;
+  bool intelligibility_enhancer_enabled = false;
+  bool noise_robust_agc_enabled = false;
+  std::string experiments_description = "";
+};
+
+struct InternalAPMStreamsConfig {
+  int input_sample_rate = 0;
+  int output_sample_rate = 0;
+  int render_input_sample_rate = 0;
+  int render_output_sample_rate = 0;
+
+  size_t input_num_channels = 0;
+  size_t output_num_channels = 0;
+  size_t render_input_num_channels = 0;
+  size_t render_output_num_channels = 0;
+};
+
+// An interface for recording configuration and input/output streams
+// of the Audio Processing Module. The recordings are called
+// 'aec-dumps' and are stored in a protobuf format defined in
+// debug.proto.
+// The Write* methods are always safe to call concurrently or
+// otherwise for all implementing subclasses. The intended mode of
+// operation is to create a protobuf object from the input, and send
+// it away to be written to file asynchronously.
+class AecDump {
+ public:
+  struct AudioProcessingState {
+    int delay;
+    int drift;
+    int level;
+    bool keypress;
+  };
+
+  virtual ~AecDump() = default;
+
+  // Logs Event::Type INIT message.
+  virtual void WriteInitMessage(
+      const InternalAPMStreamsConfig& streams_config) = 0;
+
+  // Logs Event::Type STREAM message. To log an input/output pair,
+  // call the AddCapture* and AddAudioProcessingState methods followed
+  // by a WriteCaptureStreamMessage call.
+  virtual void AddCaptureStreamInput(
+      const AudioFrameView<const float>& src) = 0;
+  virtual void AddCaptureStreamOutput(
+      const AudioFrameView<const float>& src) = 0;
+  virtual void AddCaptureStreamInput(const AudioFrame& frame) = 0;
+  virtual void AddCaptureStreamOutput(const AudioFrame& frame) = 0;
+  virtual void AddAudioProcessingState(const AudioProcessingState& state) = 0;
+  virtual void WriteCaptureStreamMessage() = 0;
+
+  // Logs Event::Type REVERSE_STREAM message.
+  virtual void WriteRenderStreamMessage(const AudioFrame& frame) = 0;
+  virtual void WriteRenderStreamMessage(
+      const AudioFrameView<const float>& src) = 0;
+
+  // Logs Event::Type CONFIG message.
+  virtual void WriteConfig(const InternalAPMConfig& config) = 0;
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_
diff --git a/modules/audio_processing/include/audio_frame_view.h b/modules/audio_processing/include/audio_frame_view.h
new file mode 100644
index 0000000..366fc32
--- /dev/null
+++ b/modules/audio_processing/include/audio_frame_view.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
+
+#include "api/array_view.h"
+
+// Class to pass audio data in T** format, where T is a numeric type.
+template <class T>
+class AudioFrameView {
+ public:
+  // |num_channels| and |channel_size| describe the T**
+  // |audio_samples|. |audio_samples| is assumed to point to a
+  // two-dimensional |num_channels * channel_size| array of floats.
+  AudioFrameView(T* const* audio_samples,
+                 size_t num_channels,
+                 size_t channel_size)
+      : audio_samples_(audio_samples),
+        num_channels_(num_channels),
+        channel_size_(channel_size) {}
+
+  // Implicit cast to allow converting Frame<float> to
+  // Frame<const float>.
+  template <class U>
+  AudioFrameView(AudioFrameView<U> other)
+      : audio_samples_(other.data()),
+        num_channels_(other.num_channels()),
+        channel_size_(other.samples_per_channel()) {}
+
+  AudioFrameView() = delete;
+
+  size_t num_channels() const { return num_channels_; }
+
+  size_t samples_per_channel() const { return channel_size_; }
+
+  rtc::ArrayView<T> channel(size_t idx) {
+    RTC_DCHECK_LE(0, idx);
+    RTC_DCHECK_LE(idx, num_channels_);
+    return rtc::ArrayView<T>(audio_samples_[idx], channel_size_);
+  }
+
+  rtc::ArrayView<const T> channel(size_t idx) const {
+    RTC_DCHECK_LE(0, idx);
+    RTC_DCHECK_LE(idx, num_channels_);
+    return rtc::ArrayView<const T>(audio_samples_[idx], channel_size_);
+  }
+
+  T* const* data() { return audio_samples_; }
+
+ private:
+  T* const* audio_samples_;
+  size_t num_channels_;
+  size_t channel_size_;
+};
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_FRAME_VIEW_H_
diff --git a/modules/audio_processing/include/audio_generator.h b/modules/audio_processing/include/audio_generator.h
new file mode 100644
index 0000000..77e6284
--- /dev/null
+++ b/modules/audio_processing/include/audio_generator.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_GENERATOR_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_GENERATOR_H_
+
+#include "modules/audio_processing/include/audio_frame_view.h"
+
+namespace webrtc {
+// This class is used as input sink for the APM, for diagnostic purposes.
+// Generates an infinite audio signal, [-1, 1] floating point values, in frames
+// of fixed channel count and sample rate.
+class AudioGenerator {
+ public:
+  virtual ~AudioGenerator() {}
+
+  // Fill |audio| with the next samples of the audio signal.
+  virtual void FillFrame(AudioFrameView<float> audio) = 0;
+
+  // Return the number of channels output by the AudioGenerator.
+  virtual size_t NumChannels() = 0;
+
+  // Return the sample rate output by the AudioGenerator.
+  virtual size_t SampleRateHz() = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_GENERATOR_H_
diff --git a/modules/audio_processing/include/audio_generator_factory.cc b/modules/audio_processing/include/audio_generator_factory.cc
new file mode 100644
index 0000000..9084a1e
--- /dev/null
+++ b/modules/audio_processing/include/audio_generator_factory.cc
@@ -0,0 +1,25 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_generator_factory.h"
+
+#include "common_audio/wav_file.h"
+#include "modules/audio_processing/audio_generator/file_audio_generator.h"
+#include "rtc_base/ptr_util.h"
+
+namespace webrtc {
+
+std::unique_ptr<AudioGenerator> AudioGeneratorFactory::Create(
+    const std::string& file_name) {
+  std::unique_ptr<WavReader> input_audio_file(new WavReader(file_name));
+  return rtc::MakeUnique<FileAudioGenerator>(std::move(input_audio_file));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/include/audio_generator_factory.h b/modules/audio_processing/include/audio_generator_factory.h
new file mode 100644
index 0000000..a9513ef
--- /dev/null
+++ b/modules/audio_processing/include/audio_generator_factory.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_GENERATOR_FACTORY_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_GENERATOR_FACTORY_H_
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "modules/audio_processing/include/audio_generator.h"
+
+namespace webrtc {
+
+class AudioGeneratorFactory {
+ public:
+  // Creates an AudioGenerator that reads the playout audio from a given 16-bit
+  // int-encoded WAV file.
+  static std::unique_ptr<AudioGenerator> Create(const std::string& file_name);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_GENERATOR_FACTORY_H_
diff --git a/modules/audio_processing/include/audio_processing.cc b/modules/audio_processing/include/audio_processing.cc
new file mode 100644
index 0000000..e9c56e8
--- /dev/null
+++ b/modules/audio_processing/include/audio_processing.cc
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_processing.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+Beamforming::Beamforming()
+    : enabled(false),
+      array_geometry(),
+      target_direction(
+          SphericalPointf(static_cast<float>(M_PI) / 2.f, 0.f, 1.f)) {}
+Beamforming::Beamforming(bool enabled, const std::vector<Point>& array_geometry)
+    : Beamforming(enabled,
+                  array_geometry,
+                  SphericalPointf(static_cast<float>(M_PI) / 2.f, 0.f, 1.f)) {}
+
+Beamforming::Beamforming(bool enabled,
+                         const std::vector<Point>& array_geometry,
+                         SphericalPointf target_direction)
+    : enabled(enabled),
+      array_geometry(array_geometry),
+      target_direction(target_direction) {}
+
+Beamforming::~Beamforming() {}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
new file mode 100644
index 0000000..d31174a
--- /dev/null
+++ b/modules/audio_processing/include/audio_processing.h
@@ -0,0 +1,1174 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#ifndef _USE_MATH_DEFINES
+#define _USE_MATH_DEFINES
+#endif
+
+#include <math.h>
+#include <stddef.h>  // size_t
+#include <stdio.h>  // FILE
+#include <string.h>
+#include <vector>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "api/audio/echo_control.h"
+#include "api/optional.h"
+#include "modules/audio_processing/beamformer/array_util.h"
+#include "modules/audio_processing/include/audio_generator.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "modules/audio_processing/include/config.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/deprecation.h"
+#include "rtc_base/platform_file.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+struct AecCore;
+
+class AecDump;
+class AudioBuffer;
+class AudioFrame;
+
+class NonlinearBeamformer;
+
+class StreamConfig;
+class ProcessingConfig;
+
+class EchoCancellation;
+class EchoControlMobile;
+class EchoDetector;
+class GainControl;
+class HighPassFilter;
+class LevelEstimator;
+class NoiseSuppression;
+class CustomProcessing;
+class VoiceDetection;
+
+// webrtc:8665, addedd temporarily to avoid breaking dependencies.
+typedef CustomProcessing PostProcessing;
+
+// Use to enable the extended filter mode in the AEC, along with robustness
+// measures around the reported system delays. It comes with a significant
+// increase in AEC complexity, but is much more robust to unreliable reported
+// delays.
+//
+// Detailed changes to the algorithm:
+// - The filter length is changed from 48 to 128 ms. This comes with tuning of
+//   several parameters: i) filter adaptation stepsize and error threshold;
+//   ii) non-linear processing smoothing and overdrive.
+// - Option to ignore the reported delays on platforms which we deem
+//   sufficiently unreliable. See WEBRTC_UNTRUSTED_DELAY in echo_cancellation.c.
+// - Faster startup times by removing the excessive "startup phase" processing
+//   of reported delays.
+// - Much more conservative adjustments to the far-end read pointer. We smooth
+//   the delay difference more heavily, and back off from the difference more.
+//   Adjustments force a readaptation of the filter, so they should be avoided
+//   except when really necessary.
+struct ExtendedFilter {
+  ExtendedFilter() : enabled(false) {}
+  explicit ExtendedFilter(bool enabled) : enabled(enabled) {}
+  static const ConfigOptionID identifier = ConfigOptionID::kExtendedFilter;
+  bool enabled;
+};
+
+// Enables the refined linear filter adaptation in the echo canceller.
+// This configuration only applies to EchoCancellation and not
+// EchoControlMobile. It can be set in the constructor
+// or using AudioProcessing::SetExtraOptions().
+struct RefinedAdaptiveFilter {
+  RefinedAdaptiveFilter() : enabled(false) {}
+  explicit RefinedAdaptiveFilter(bool enabled) : enabled(enabled) {}
+  static const ConfigOptionID identifier =
+      ConfigOptionID::kAecRefinedAdaptiveFilter;
+  bool enabled;
+};
+
+// Enables delay-agnostic echo cancellation. This feature relies on internally
+// estimated delays between the process and reverse streams, thus not relying
+// on reported system delays. This configuration only applies to
+// EchoCancellation and not EchoControlMobile. It can be set in the constructor
+// or using AudioProcessing::SetExtraOptions().
+struct DelayAgnostic {
+  DelayAgnostic() : enabled(false) {}
+  explicit DelayAgnostic(bool enabled) : enabled(enabled) {}
+  static const ConfigOptionID identifier = ConfigOptionID::kDelayAgnostic;
+  bool enabled;
+};
+
+// Use to enable experimental gain control (AGC). At startup the experimental
+// AGC moves the microphone volume up to |startup_min_volume| if the current
+// microphone volume is set too low. The value is clamped to its operating range
+// [12, 255]. Here, 255 maps to 100%.
+//
+// Must be provided through AudioProcessingBuilder().Create(config).
+#if defined(WEBRTC_CHROMIUM_BUILD)
+static const int kAgcStartupMinVolume = 85;
+#else
+static const int kAgcStartupMinVolume = 0;
+#endif  // defined(WEBRTC_CHROMIUM_BUILD)
+static constexpr int kClippedLevelMin = 70;
+struct ExperimentalAgc {
+  ExperimentalAgc() = default;
+  explicit ExperimentalAgc(bool enabled) : enabled(enabled) {}
+  ExperimentalAgc(bool enabled, int startup_min_volume)
+      : enabled(enabled), startup_min_volume(startup_min_volume) {}
+  ExperimentalAgc(bool enabled, int startup_min_volume, int clipped_level_min)
+      : enabled(enabled),
+        startup_min_volume(startup_min_volume),
+        clipped_level_min(clipped_level_min) {}
+  static const ConfigOptionID identifier = ConfigOptionID::kExperimentalAgc;
+  bool enabled = true;
+  int startup_min_volume = kAgcStartupMinVolume;
+  // Lowest microphone level that will be applied in response to clipping.
+  int clipped_level_min = kClippedLevelMin;
+};
+
+// Use to enable experimental noise suppression. It can be set in the
+// constructor or using AudioProcessing::SetExtraOptions().
+struct ExperimentalNs {
+  ExperimentalNs() : enabled(false) {}
+  explicit ExperimentalNs(bool enabled) : enabled(enabled) {}
+  static const ConfigOptionID identifier = ConfigOptionID::kExperimentalNs;
+  bool enabled;
+};
+
+// Use to enable beamforming. Must be provided through the constructor. It will
+// have no impact if used with AudioProcessing::SetExtraOptions().
+struct Beamforming {
+  Beamforming();
+  Beamforming(bool enabled, const std::vector<Point>& array_geometry);
+  Beamforming(bool enabled,
+              const std::vector<Point>& array_geometry,
+              SphericalPointf target_direction);
+  ~Beamforming();
+
+  static const ConfigOptionID identifier = ConfigOptionID::kBeamforming;
+  const bool enabled;
+  const std::vector<Point> array_geometry;
+  const SphericalPointf target_direction;
+};
+
+// Use to enable intelligibility enhancer in audio processing.
+//
+// Note: If enabled and the reverse stream has more than one output channel,
+// the reverse stream will become an upmixed mono signal.
+struct Intelligibility {
+  Intelligibility() : enabled(false) {}
+  explicit Intelligibility(bool enabled) : enabled(enabled) {}
+  static const ConfigOptionID identifier = ConfigOptionID::kIntelligibility;
+  bool enabled;
+};
+
+// The Audio Processing Module (APM) provides a collection of voice processing
+// components designed for real-time communications software.
+//
+// APM operates on two audio streams on a frame-by-frame basis. Frames of the
+// primary stream, on which all processing is applied, are passed to
+// |ProcessStream()|. Frames of the reverse direction stream are passed to
+// |ProcessReverseStream()|. On the client-side, this will typically be the
+// near-end (capture) and far-end (render) streams, respectively. APM should be
+// placed in the signal chain as close to the audio hardware abstraction layer
+// (HAL) as possible.
+//
+// On the server-side, the reverse stream will normally not be used, with
+// processing occurring on each incoming stream.
+//
+// Component interfaces follow a similar pattern and are accessed through
+// corresponding getters in APM. All components are disabled at create-time,
+// with default settings that are recommended for most situations. New settings
+// can be applied without enabling a component. Enabling a component triggers
+// memory allocation and initialization to allow it to start processing the
+// streams.
+//
+// Thread safety is provided with the following assumptions to reduce locking
+// overhead:
+//   1. The stream getters and setters are called from the same thread as
+//      ProcessStream(). More precisely, stream functions are never called
+//      concurrently with ProcessStream().
+//   2. Parameter getters are never called concurrently with the corresponding
+//      setter.
+//
+// APM accepts only linear PCM audio data in chunks of 10 ms. The int16
+// interfaces use interleaved data, while the float interfaces use deinterleaved
+// data.
+//
+// Usage example, omitting error checking:
+// AudioProcessing* apm = AudioProcessingBuilder().Create();
+//
+// AudioProcessing::Config config;
+// config.high_pass_filter.enabled = true;
+// config.gain_controller2.enabled = true;
+// apm->ApplyConfig(config)
+//
+// apm->echo_cancellation()->enable_drift_compensation(false);
+// apm->echo_cancellation()->Enable(true);
+//
+// apm->noise_reduction()->set_level(kHighSuppression);
+// apm->noise_reduction()->Enable(true);
+//
+// apm->gain_control()->set_analog_level_limits(0, 255);
+// apm->gain_control()->set_mode(kAdaptiveAnalog);
+// apm->gain_control()->Enable(true);
+//
+// apm->voice_detection()->Enable(true);
+//
+// // Start a voice call...
+//
+// // ... Render frame arrives bound for the audio HAL ...
+// apm->ProcessReverseStream(render_frame);
+//
+// // ... Capture frame arrives from the audio HAL ...
+// // Call required set_stream_ functions.
+// apm->set_stream_delay_ms(delay_ms);
+// apm->gain_control()->set_stream_analog_level(analog_level);
+//
+// apm->ProcessStream(capture_frame);
+//
+// // Call required stream_ functions.
+// analog_level = apm->gain_control()->stream_analog_level();
+// has_voice = apm->stream_has_voice();
+//
+// // Repeate render and capture processing for the duration of the call...
+// // Start a new call...
+// apm->Initialize();
+//
+// // Close the application...
+// delete apm;
+//
+class AudioProcessing : public rtc::RefCountInterface {
+ public:
+  // The struct below constitutes the new parameter scheme for the audio
+  // processing. It is being introduced gradually and until it is fully
+  // introduced, it is prone to change.
+  // TODO(peah): Remove this comment once the new config scheme is fully rolled
+  // out.
+  //
+  // The parameters and behavior of the audio processing module are controlled
+  // by changing the default values in the AudioProcessing::Config struct.
+  // The config is applied by passing the struct to the ApplyConfig method.
+  struct Config {
+    struct ResidualEchoDetector {
+      bool enabled = true;
+    } residual_echo_detector;
+
+    struct HighPassFilter {
+      bool enabled = false;
+    } high_pass_filter;
+
+    // Enables the next generation AGC functionality. This feature replaces the
+    // standard methods of gain control in the previous AGC.
+    // The functionality is not yet activated in the code and turning this on
+    // does not yet have the desired behavior.
+    struct GainController2 {
+      bool enabled = false;
+      float fixed_gain_db = 0.f;
+      bool enable_limiter = true;
+    } gain_controller2;
+
+    // Explicit copy assignment implementation to avoid issues with memory
+    // sanitizer complaints in case of self-assignment.
+    // TODO(peah): Add buildflag to ensure that this is only included for memory
+    // sanitizer builds.
+    Config& operator=(const Config& config) {
+      if (this != &config) {
+        memcpy(this, &config, sizeof(*this));
+      }
+      return *this;
+    }
+  };
+
+  // TODO(mgraczyk): Remove once all methods that use ChannelLayout are gone.
+  enum ChannelLayout {
+    kMono,
+    // Left, right.
+    kStereo,
+    // Mono, keyboard, and mic.
+    kMonoAndKeyboard,
+    // Left, right, keyboard, and mic.
+    kStereoAndKeyboard
+  };
+
+  ~AudioProcessing() override {}
+
+  // Initializes internal states, while retaining all user settings. This
+  // should be called before beginning to process a new audio stream. However,
+  // it is not necessary to call before processing the first stream after
+  // creation.
+  //
+  // It is also not necessary to call if the audio parameters (sample
+  // rate and number of channels) have changed. Passing updated parameters
+  // directly to |ProcessStream()| and |ProcessReverseStream()| is permissible.
+  // If the parameters are known at init-time though, they may be provided.
+  virtual int Initialize() = 0;
+
+  // The int16 interfaces require:
+  //   - only |NativeRate|s be used
+  //   - that the input, output and reverse rates must match
+  //   - that |processing_config.output_stream()| matches
+  //     |processing_config.input_stream()|.
+  //
+  // The float interfaces accept arbitrary rates and support differing input and
+  // output layouts, but the output must have either one channel or the same
+  // number of channels as the input.
+  virtual int Initialize(const ProcessingConfig& processing_config) = 0;
+
+  // Initialize with unpacked parameters. See Initialize() above for details.
+  //
+  // TODO(mgraczyk): Remove once clients are updated to use the new interface.
+  virtual int Initialize(int capture_input_sample_rate_hz,
+                         int capture_output_sample_rate_hz,
+                         int render_sample_rate_hz,
+                         ChannelLayout capture_input_layout,
+                         ChannelLayout capture_output_layout,
+                         ChannelLayout render_input_layout) = 0;
+
+  // TODO(peah): This method is a temporary solution used to take control
+  // over the parameters in the audio processing module and is likely to change.
+  virtual void ApplyConfig(const Config& config) = 0;
+
+  // Pass down additional options which don't have explicit setters. This
+  // ensures the options are applied immediately.
+  virtual void SetExtraOptions(const webrtc::Config& config) = 0;
+
+  // TODO(ajm): Only intended for internal use. Make private and friend the
+  // necessary classes?
+  virtual int proc_sample_rate_hz() const = 0;
+  virtual int proc_split_sample_rate_hz() const = 0;
+  virtual size_t num_input_channels() const = 0;
+  virtual size_t num_proc_channels() const = 0;
+  virtual size_t num_output_channels() const = 0;
+  virtual size_t num_reverse_channels() const = 0;
+
+  // Set to true when the output of AudioProcessing will be muted or in some
+  // other way not used. Ideally, the captured audio would still be processed,
+  // but some components may change behavior based on this information.
+  // Default false.
+  virtual void set_output_will_be_muted(bool muted) = 0;
+
+  // Processes a 10 ms |frame| of the primary audio stream. On the client-side,
+  // this is the near-end (or captured) audio.
+  //
+  // If needed for enabled functionality, any function with the set_stream_ tag
+  // must be called prior to processing the current frame. Any getter function
+  // with the stream_ tag which is needed should be called after processing.
+  //
+  // The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
+  // members of |frame| must be valid. If changed from the previous call to this
+  // method, it will trigger an initialization.
+  virtual int ProcessStream(AudioFrame* frame) = 0;
+
+  // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+  // of |src| points to a channel buffer, arranged according to
+  // |input_layout|. At output, the channels will be arranged according to
+  // |output_layout| at |output_sample_rate_hz| in |dest|.
+  //
+  // The output layout must have one channel or as many channels as the input.
+  // |src| and |dest| may use the same memory, if desired.
+  //
+  // TODO(mgraczyk): Remove once clients are updated to use the new interface.
+  virtual int ProcessStream(const float* const* src,
+                            size_t samples_per_channel,
+                            int input_sample_rate_hz,
+                            ChannelLayout input_layout,
+                            int output_sample_rate_hz,
+                            ChannelLayout output_layout,
+                            float* const* dest) = 0;
+
+  // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
+  // |src| points to a channel buffer, arranged according to |input_stream|. At
+  // output, the channels will be arranged according to |output_stream| in
+  // |dest|.
+  //
+  // The output must have one channel or as many channels as the input. |src|
+  // and |dest| may use the same memory, if desired.
+  virtual int ProcessStream(const float* const* src,
+                            const StreamConfig& input_config,
+                            const StreamConfig& output_config,
+                            float* const* dest) = 0;
+
+  // Processes a 10 ms |frame| of the reverse direction audio stream. The frame
+  // may be modified. On the client-side, this is the far-end (or to be
+  // rendered) audio.
+  //
+  // It is necessary to provide this if echo processing is enabled, as the
+  // reverse stream forms the echo reference signal. It is recommended, but not
+  // necessary, to provide if gain control is enabled. On the server-side this
+  // typically will not be used. If you're not sure what to pass in here,
+  // chances are you don't need to use it.
+  //
+  // The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
+  // members of |frame| must be valid.
+  virtual int ProcessReverseStream(AudioFrame* frame) = 0;
+
+  // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+  // of |data| points to a channel buffer, arranged according to |layout|.
+  // TODO(mgraczyk): Remove once clients are updated to use the new interface.
+  virtual int AnalyzeReverseStream(const float* const* data,
+                                   size_t samples_per_channel,
+                                   int sample_rate_hz,
+                                   ChannelLayout layout) = 0;
+
+  // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
+  // |data| points to a channel buffer, arranged according to |reverse_config|.
+  virtual int ProcessReverseStream(const float* const* src,
+                                   const StreamConfig& input_config,
+                                   const StreamConfig& output_config,
+                                   float* const* dest) = 0;
+
+  // This must be called if and only if echo processing is enabled.
+  //
+  // Sets the |delay| in ms between ProcessReverseStream() receiving a far-end
+  // frame and ProcessStream() receiving a near-end frame containing the
+  // corresponding echo. On the client-side this can be expressed as
+  //   delay = (t_render - t_analyze) + (t_process - t_capture)
+  // where,
+  //   - t_analyze is the time a frame is passed to ProcessReverseStream() and
+  //     t_render is the time the first sample of the same frame is rendered by
+  //     the audio hardware.
+  //   - t_capture is the time the first sample of a frame is captured by the
+  //     audio hardware and t_process is the time the same frame is passed to
+  //     ProcessStream().
+  virtual int set_stream_delay_ms(int delay) = 0;
+  virtual int stream_delay_ms() const = 0;
+  virtual bool was_stream_delay_set() const = 0;
+
+  // Call to signal that a key press occurred (true) or did not occur (false)
+  // with this chunk of audio.
+  virtual void set_stream_key_pressed(bool key_pressed) = 0;
+
+  // Sets a delay |offset| in ms to add to the values passed in through
+  // set_stream_delay_ms(). May be positive or negative.
+  //
+  // Note that this could cause an otherwise valid value passed to
+  // set_stream_delay_ms() to return an error.
+  virtual void set_delay_offset_ms(int offset) = 0;
+  virtual int delay_offset_ms() const = 0;
+
+  // Attaches provided webrtc::AecDump for recording debugging
+  // information. Log file and maximum file size logic is supposed to
+  // be handled by implementing instance of AecDump. Calling this
+  // method when another AecDump is attached resets the active AecDump
+  // with a new one. This causes the d-tor of the earlier AecDump to
+  // be called. The d-tor call may block until all pending logging
+  // tasks are completed.
+  virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) = 0;
+
+  // If no AecDump is attached, this has no effect. If an AecDump is
+  // attached, it's destructor is called. The d-tor may block until
+  // all pending logging tasks are completed.
+  virtual void DetachAecDump() = 0;
+
+  // Attaches provided webrtc::AudioGenerator for modifying playout audio.
+  // Calling this method when another AudioGenerator is attached replaces the
+  // active AudioGenerator with a new one.
+  virtual void AttachPlayoutAudioGenerator(
+      std::unique_ptr<AudioGenerator> audio_generator) = 0;
+
+  // If no AudioGenerator is attached, this has no effect. If an AecDump is
+  // attached, its destructor is called.
+  virtual void DetachPlayoutAudioGenerator() = 0;
+
+  // Use to send UMA histograms at end of a call. Note that all histogram
+  // specific member variables are reset.
+  virtual void UpdateHistogramsOnCallEnd() = 0;
+
+  // TODO(ivoc): Remove when the calling code no longer uses the old Statistics
+  //             API.
+  struct Statistic {
+    int instant = 0;  // Instantaneous value.
+    int average = 0;  // Long-term average.
+    int maximum = 0;  // Long-term maximum.
+    int minimum = 0;  // Long-term minimum.
+  };
+
+  struct Stat {
+    void Set(const Statistic& other) {
+      Set(other.instant, other.average, other.maximum, other.minimum);
+    }
+    void Set(float instant, float average, float maximum, float minimum) {
+      instant_ = instant;
+      average_ = average;
+      maximum_ = maximum;
+      minimum_ = minimum;
+    }
+    float instant() const { return instant_; }
+    float average() const { return average_; }
+    float maximum() const { return maximum_; }
+    float minimum() const { return minimum_; }
+
+   private:
+    float instant_ = 0.0f;  // Instantaneous value.
+    float average_ = 0.0f;  // Long-term average.
+    float maximum_ = 0.0f;  // Long-term maximum.
+    float minimum_ = 0.0f;  // Long-term minimum.
+  };
+
+  struct AudioProcessingStatistics {
+    AudioProcessingStatistics();
+    AudioProcessingStatistics(const AudioProcessingStatistics& other);
+    ~AudioProcessingStatistics();
+
+    // AEC Statistics.
+    // RERL = ERL + ERLE
+    Stat residual_echo_return_loss;
+    // ERL = 10log_10(P_far / P_echo)
+    Stat echo_return_loss;
+    // ERLE = 10log_10(P_echo / P_out)
+    Stat echo_return_loss_enhancement;
+    // (Pre non-linear processing suppression) A_NLP = 10log_10(P_echo / P_a)
+    Stat a_nlp;
+    // Fraction of time that the AEC linear filter is divergent, in a 1-second
+    // non-overlapped aggregation window.
+    float divergent_filter_fraction = -1.0f;
+
+    // The delay metrics consists of the delay median and standard deviation. It
+    // also consists of the fraction of delay estimates that can make the echo
+    // cancellation perform poorly. The values are aggregated until the first
+    // call to |GetStatistics()| and afterwards aggregated and updated every
+    // second. Note that if there are several clients pulling metrics from
+    // |GetStatistics()| during a session the first call from any of them will
+    // change to one second aggregation window for all.
+    int delay_median = -1;
+    int delay_standard_deviation = -1;
+    float fraction_poor_delays = -1.0f;
+
+    // Residual echo detector likelihood.
+    float residual_echo_likelihood = -1.0f;
+    // Maximum residual echo likelihood from the last time period.
+    float residual_echo_likelihood_recent_max = -1.0f;
+  };
+
+  // TODO(ivoc): Make this pure virtual when all subclasses have been updated.
+  virtual AudioProcessingStatistics GetStatistics() const;
+
+  // This returns the stats as optionals and it will replace the regular
+  // GetStatistics.
+  virtual AudioProcessingStats GetStatistics(bool has_remote_tracks) const;
+
+  // These provide access to the component interfaces and should never return
+  // NULL. The pointers will be valid for the lifetime of the APM instance.
+  // The memory for these objects is entirely managed internally.
+  virtual EchoCancellation* echo_cancellation() const = 0;
+  virtual EchoControlMobile* echo_control_mobile() const = 0;
+  virtual GainControl* gain_control() const = 0;
+  // TODO(peah): Deprecate this API call.
+  virtual HighPassFilter* high_pass_filter() const = 0;
+  virtual LevelEstimator* level_estimator() const = 0;
+  virtual NoiseSuppression* noise_suppression() const = 0;
+  virtual VoiceDetection* voice_detection() const = 0;
+
+  // Returns the last applied configuration.
+  virtual AudioProcessing::Config GetConfig() const = 0;
+
+  enum Error {
+    // Fatal errors.
+    kNoError = 0,
+    kUnspecifiedError = -1,
+    kCreationFailedError = -2,
+    kUnsupportedComponentError = -3,
+    kUnsupportedFunctionError = -4,
+    kNullPointerError = -5,
+    kBadParameterError = -6,
+    kBadSampleRateError = -7,
+    kBadDataLengthError = -8,
+    kBadNumberChannelsError = -9,
+    kFileError = -10,
+    kStreamParameterNotSetError = -11,
+    kNotEnabledError = -12,
+
+    // Warnings are non-fatal.
+    // This results when a set_stream_ parameter is out of range. Processing
+    // will continue, but the parameter may have been truncated.
+    kBadStreamParameterWarning = -13
+  };
+
+  enum NativeRate {
+    kSampleRate8kHz = 8000,
+    kSampleRate16kHz = 16000,
+    kSampleRate32kHz = 32000,
+    kSampleRate48kHz = 48000
+  };
+
+  // TODO(kwiberg): We currently need to support a compiler (Visual C++) that
+  // complains if we don't explicitly state the size of the array here. Remove
+  // the size when that's no longer the case.
+  static constexpr int kNativeSampleRatesHz[4] = {
+      kSampleRate8kHz, kSampleRate16kHz, kSampleRate32kHz, kSampleRate48kHz};
+  static constexpr size_t kNumNativeSampleRates =
+      arraysize(kNativeSampleRatesHz);
+  static constexpr int kMaxNativeSampleRateHz =
+      kNativeSampleRatesHz[kNumNativeSampleRates - 1];
+
+  static const int kChunkSizeMs = 10;
+};
+
+class AudioProcessingBuilder {
+ public:
+  AudioProcessingBuilder();
+  ~AudioProcessingBuilder();
+  // The AudioProcessingBuilder takes ownership of the echo_control_factory.
+  AudioProcessingBuilder& SetEchoControlFactory(
+      std::unique_ptr<EchoControlFactory> echo_control_factory);
+  // The AudioProcessingBuilder takes ownership of the capture_post_processing.
+  AudioProcessingBuilder& SetCapturePostProcessing(
+      std::unique_ptr<CustomProcessing> capture_post_processing);
+  // The AudioProcessingBuilder takes ownership of the render_pre_processing.
+  AudioProcessingBuilder& SetRenderPreProcessing(
+      std::unique_ptr<CustomProcessing> render_pre_processing);
+  // The AudioProcessingBuilder takes ownership of the nonlinear beamformer.
+  AudioProcessingBuilder& SetNonlinearBeamformer(
+      std::unique_ptr<NonlinearBeamformer> nonlinear_beamformer);
+  // The AudioProcessingBuilder takes ownership of the echo_detector.
+  AudioProcessingBuilder& SetEchoDetector(
+      std::unique_ptr<EchoDetector> echo_detector);
+  // This creates an APM instance using the previously set components. Calling
+  // the Create function resets the AudioProcessingBuilder to its initial state.
+  AudioProcessing* Create();
+  AudioProcessing* Create(const webrtc::Config& config);
+
+ private:
+  std::unique_ptr<EchoControlFactory> echo_control_factory_;
+  std::unique_ptr<CustomProcessing> capture_post_processing_;
+  std::unique_ptr<CustomProcessing> render_pre_processing_;
+  std::unique_ptr<NonlinearBeamformer> nonlinear_beamformer_;
+  std::unique_ptr<EchoDetector> echo_detector_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AudioProcessingBuilder);
+};
+
+class StreamConfig {
+ public:
+  // sample_rate_hz: The sampling rate of the stream.
+  //
+  // num_channels: The number of audio channels in the stream, excluding the
+  //               keyboard channel if it is present. When passing a
+  //               StreamConfig with an array of arrays T*[N],
+  //
+  //                N == {num_channels + 1  if  has_keyboard
+  //                     {num_channels      if  !has_keyboard
+  //
+  // has_keyboard: True if the stream has a keyboard channel. When has_keyboard
+  //               is true, the last channel in any corresponding list of
+  //               channels is the keyboard channel.
+  StreamConfig(int sample_rate_hz = 0,
+               size_t num_channels = 0,
+               bool has_keyboard = false)
+      : sample_rate_hz_(sample_rate_hz),
+        num_channels_(num_channels),
+        has_keyboard_(has_keyboard),
+        num_frames_(calculate_frames(sample_rate_hz)) {}
+
+  void set_sample_rate_hz(int value) {
+    sample_rate_hz_ = value;
+    num_frames_ = calculate_frames(value);
+  }
+  void set_num_channels(size_t value) { num_channels_ = value; }
+  void set_has_keyboard(bool value) { has_keyboard_ = value; }
+
+  int sample_rate_hz() const { return sample_rate_hz_; }
+
+  // The number of channels in the stream, not including the keyboard channel if
+  // present.
+  size_t num_channels() const { return num_channels_; }
+
+  bool has_keyboard() const { return has_keyboard_; }
+  size_t num_frames() const { return num_frames_; }
+  size_t num_samples() const { return num_channels_ * num_frames_; }
+
+  bool operator==(const StreamConfig& other) const {
+    return sample_rate_hz_ == other.sample_rate_hz_ &&
+           num_channels_ == other.num_channels_ &&
+           has_keyboard_ == other.has_keyboard_;
+  }
+
+  bool operator!=(const StreamConfig& other) const { return !(*this == other); }
+
+ private:
+  static size_t calculate_frames(int sample_rate_hz) {
+    return static_cast<size_t>(
+        AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000);
+  }
+
+  int sample_rate_hz_;
+  size_t num_channels_;
+  bool has_keyboard_;
+  size_t num_frames_;
+};
+
+class ProcessingConfig {
+ public:
+  enum StreamName {
+    kInputStream,
+    kOutputStream,
+    kReverseInputStream,
+    kReverseOutputStream,
+    kNumStreamNames,
+  };
+
+  const StreamConfig& input_stream() const {
+    return streams[StreamName::kInputStream];
+  }
+  const StreamConfig& output_stream() const {
+    return streams[StreamName::kOutputStream];
+  }
+  const StreamConfig& reverse_input_stream() const {
+    return streams[StreamName::kReverseInputStream];
+  }
+  const StreamConfig& reverse_output_stream() const {
+    return streams[StreamName::kReverseOutputStream];
+  }
+
+  StreamConfig& input_stream() { return streams[StreamName::kInputStream]; }
+  StreamConfig& output_stream() { return streams[StreamName::kOutputStream]; }
+  StreamConfig& reverse_input_stream() {
+    return streams[StreamName::kReverseInputStream];
+  }
+  StreamConfig& reverse_output_stream() {
+    return streams[StreamName::kReverseOutputStream];
+  }
+
+  bool operator==(const ProcessingConfig& other) const {
+    for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
+      if (this->streams[i] != other.streams[i]) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  bool operator!=(const ProcessingConfig& other) const {
+    return !(*this == other);
+  }
+
+  StreamConfig streams[StreamName::kNumStreamNames];
+};
+
+// The acoustic echo cancellation (AEC) component provides better performance
+// than AECM but also requires more processing power and is dependent on delay
+// stability and reporting accuracy. As such it is well-suited and recommended
+// for PC and IP phone applications.
+//
+// Not recommended to be enabled on the server-side.
+class EchoCancellation {
+ public:
+  // EchoCancellation and EchoControlMobile may not be enabled simultaneously.
+  // Enabling one will disable the other.
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  // Differences in clock speed on the primary and reverse streams can impact
+  // the AEC performance. On the client-side, this could be seen when different
+  // render and capture devices are used, particularly with webcams.
+  //
+  // This enables a compensation mechanism, and requires that
+  // set_stream_drift_samples() be called.
+  virtual int enable_drift_compensation(bool enable) = 0;
+  virtual bool is_drift_compensation_enabled() const = 0;
+
+  // Sets the difference between the number of samples rendered and captured by
+  // the audio devices since the last call to |ProcessStream()|. Must be called
+  // if drift compensation is enabled, prior to |ProcessStream()|.
+  virtual void set_stream_drift_samples(int drift) = 0;
+  virtual int stream_drift_samples() const = 0;
+
+  enum SuppressionLevel {
+    kLowSuppression,
+    kModerateSuppression,
+    kHighSuppression
+  };
+
+  // Sets the aggressiveness of the suppressor. A higher level trades off
+  // double-talk performance for increased echo suppression.
+  virtual int set_suppression_level(SuppressionLevel level) = 0;
+  virtual SuppressionLevel suppression_level() const = 0;
+
+  // Returns false if the current frame almost certainly contains no echo
+  // and true if it _might_ contain echo.
+  virtual bool stream_has_echo() const = 0;
+
+  // Enables the computation of various echo metrics. These are obtained
+  // through |GetMetrics()|.
+  virtual int enable_metrics(bool enable) = 0;
+  virtual bool are_metrics_enabled() const = 0;
+
+  // Each statistic is reported in dB.
+  // P_far:  Far-end (render) signal power.
+  // P_echo: Near-end (capture) echo signal power.
+  // P_out:  Signal power at the output of the AEC.
+  // P_a:    Internal signal power at the point before the AEC's non-linear
+  //         processor.
+  struct Metrics {
+    // RERL = ERL + ERLE
+    AudioProcessing::Statistic residual_echo_return_loss;
+
+    // ERL = 10log_10(P_far / P_echo)
+    AudioProcessing::Statistic echo_return_loss;
+
+    // ERLE = 10log_10(P_echo / P_out)
+    AudioProcessing::Statistic echo_return_loss_enhancement;
+
+    // (Pre non-linear processing suppression) A_NLP = 10log_10(P_echo / P_a)
+    AudioProcessing::Statistic a_nlp;
+
+    // Fraction of time that the AEC linear filter is divergent, in a 1-second
+    // non-overlapped aggregation window.
+    float divergent_filter_fraction;
+  };
+
+  // Deprecated. Use GetStatistics on the AudioProcessing interface instead.
+  // TODO(ajm): discuss the metrics update period.
+  virtual int GetMetrics(Metrics* metrics) = 0;
+
+  // Enables computation and logging of delay values. Statistics are obtained
+  // through |GetDelayMetrics()|.
+  virtual int enable_delay_logging(bool enable) = 0;
+  virtual bool is_delay_logging_enabled() const = 0;
+
+  // The delay metrics consists of the delay |median| and the delay standard
+  // deviation |std|. It also consists of the fraction of delay estimates
+  // |fraction_poor_delays| that can make the echo cancellation perform poorly.
+  // The values are aggregated until the first call to |GetDelayMetrics()| and
+  // afterwards aggregated and updated every second.
+  // Note that if there are several clients pulling metrics from
+  // |GetDelayMetrics()| during a session the first call from any of them will
+  // change to one second aggregation window for all.
+  // Deprecated. Use GetStatistics on the AudioProcessing interface instead.
+  virtual int GetDelayMetrics(int* median, int* std) = 0;
+  // Deprecated. Use GetStatistics on the AudioProcessing interface instead.
+  virtual int GetDelayMetrics(int* median, int* std,
+                              float* fraction_poor_delays) = 0;
+
+  // Returns a pointer to the low level AEC component.  In case of multiple
+  // channels, the pointer to the first one is returned.  A NULL pointer is
+  // returned when the AEC component is disabled or has not been initialized
+  // successfully.
+  virtual struct AecCore* aec_core() const = 0;
+
+ protected:
+  virtual ~EchoCancellation() {}
+};
+
+// The acoustic echo control for mobile (AECM) component is a low complexity
+// robust option intended for use on mobile devices.
+//
+// Not recommended to be enabled on the server-side.
+class EchoControlMobile {
+ public:
+  // EchoCancellation and EchoControlMobile may not be enabled simultaneously.
+  // Enabling one will disable the other.
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  // Recommended settings for particular audio routes. In general, the louder
+  // the echo is expected to be, the higher this value should be set. The
+  // preferred setting may vary from device to device.
+  enum RoutingMode {
+    kQuietEarpieceOrHeadset,
+    kEarpiece,
+    kLoudEarpiece,
+    kSpeakerphone,
+    kLoudSpeakerphone
+  };
+
+  // Sets echo control appropriate for the audio routing |mode| on the device.
+  // It can and should be updated during a call if the audio routing changes.
+  virtual int set_routing_mode(RoutingMode mode) = 0;
+  virtual RoutingMode routing_mode() const = 0;
+
+  // Comfort noise replaces suppressed background noise to maintain a
+  // consistent signal level.
+  virtual int enable_comfort_noise(bool enable) = 0;
+  virtual bool is_comfort_noise_enabled() const = 0;
+
+  // A typical use case is to initialize the component with an echo path from a
+  // previous call. The echo path is retrieved using |GetEchoPath()|, typically
+  // at the end of a call. The data can then be stored for later use as an
+  // initializer before the next call, using |SetEchoPath()|.
+  //
+  // Controlling the echo path this way requires the data |size_bytes| to match
+  // the internal echo path size. This size can be acquired using
+  // |echo_path_size_bytes()|. |SetEchoPath()| causes an entire reset, worth
+  // noting if it is to be called during an ongoing call.
+  //
+  // It is possible that version incompatibilities may result in a stored echo
+  // path of the incorrect size. In this case, the stored path should be
+  // discarded.
+  virtual int SetEchoPath(const void* echo_path, size_t size_bytes) = 0;
+  virtual int GetEchoPath(void* echo_path, size_t size_bytes) const = 0;
+
+  // The returned path size is guaranteed not to change for the lifetime of
+  // the application.
+  static size_t echo_path_size_bytes();
+
+ protected:
+  virtual ~EchoControlMobile() {}
+};
+
+// The automatic gain control (AGC) component brings the signal to an
+// appropriate range. This is done by applying a digital gain directly and, in
+// the analog mode, prescribing an analog gain to be applied at the audio HAL.
+//
+// Recommended to be enabled on the client-side.
+class GainControl {
+ public:
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  // When an analog mode is set, this must be called prior to |ProcessStream()|
+  // to pass the current analog level from the audio HAL. Must be within the
+  // range provided to |set_analog_level_limits()|.
+  virtual int set_stream_analog_level(int level) = 0;
+
+  // When an analog mode is set, this should be called after |ProcessStream()|
+  // to obtain the recommended new analog level for the audio HAL. It is the
+  // users responsibility to apply this level.
+  virtual int stream_analog_level() = 0;
+
+  enum Mode {
+    // Adaptive mode intended for use if an analog volume control is available
+    // on the capture device. It will require the user to provide coupling
+    // between the OS mixer controls and AGC through the |stream_analog_level()|
+    // functions.
+    //
+    // It consists of an analog gain prescription for the audio device and a
+    // digital compression stage.
+    kAdaptiveAnalog,
+
+    // Adaptive mode intended for situations in which an analog volume control
+    // is unavailable. It operates in a similar fashion to the adaptive analog
+    // mode, but with scaling instead applied in the digital domain. As with
+    // the analog mode, it additionally uses a digital compression stage.
+    kAdaptiveDigital,
+
+    // Fixed mode which enables only the digital compression stage also used by
+    // the two adaptive modes.
+    //
+    // It is distinguished from the adaptive modes by considering only a
+    // short time-window of the input signal. It applies a fixed gain through
+    // most of the input level range, and compresses (gradually reduces gain
+    // with increasing level) the input signal at higher levels. This mode is
+    // preferred on embedded devices where the capture signal level is
+    // predictable, so that a known gain can be applied.
+    kFixedDigital
+  };
+
+  virtual int set_mode(Mode mode) = 0;
+  virtual Mode mode() const = 0;
+
+  // Sets the target peak |level| (or envelope) of the AGC in dBFs (decibels
+  // from digital full-scale). The convention is to use positive values. For
+  // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
+  // level 3 dB below full-scale. Limited to [0, 31].
+  //
+  // TODO(ajm): use a negative value here instead, if/when VoE will similarly
+  //            update its interface.
+  virtual int set_target_level_dbfs(int level) = 0;
+  virtual int target_level_dbfs() const = 0;
+
+  // Sets the maximum |gain| the digital compression stage may apply, in dB. A
+  // higher number corresponds to greater compression, while a value of 0 will
+  // leave the signal uncompressed. Limited to [0, 90].
+  virtual int set_compression_gain_db(int gain) = 0;
+  virtual int compression_gain_db() const = 0;
+
+  // When enabled, the compression stage will hard limit the signal to the
+  // target level. Otherwise, the signal will be compressed but not limited
+  // above the target level.
+  virtual int enable_limiter(bool enable) = 0;
+  virtual bool is_limiter_enabled() const = 0;
+
+  // Sets the |minimum| and |maximum| analog levels of the audio capture device.
+  // Must be set if and only if an analog mode is used. Limited to [0, 65535].
+  virtual int set_analog_level_limits(int minimum,
+                                      int maximum) = 0;
+  virtual int analog_level_minimum() const = 0;
+  virtual int analog_level_maximum() const = 0;
+
+  // Returns true if the AGC has detected a saturation event (period where the
+  // signal reaches digital full-scale) in the current frame and the analog
+  // level cannot be reduced.
+  //
+  // This could be used as an indicator to reduce or disable analog mic gain at
+  // the audio HAL.
+  virtual bool stream_is_saturated() const = 0;
+
+ protected:
+  virtual ~GainControl() {}
+};
+// TODO(peah): Remove this interface.
+// A filtering component which removes DC offset and low-frequency noise.
+// Recommended to be enabled on the client-side.
+class HighPassFilter {
+ public:
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  virtual ~HighPassFilter() {}
+};
+
+// An estimation component used to retrieve level metrics.
+class LevelEstimator {
+ public:
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  // Returns the root mean square (RMS) level in dBFs (decibels from digital
+  // full-scale), or alternately dBov. It is computed over all primary stream
+  // frames since the last call to RMS(). The returned value is positive but
+  // should be interpreted as negative. It is constrained to [0, 127].
+  //
+  // The computation follows: https://tools.ietf.org/html/rfc6465
+  // with the intent that it can provide the RTP audio level indication.
+  //
+  // Frames passed to ProcessStream() with an |_energy| of zero are considered
+  // to have been muted. The RMS of the frame will be interpreted as -127.
+  virtual int RMS() = 0;
+
+ protected:
+  virtual ~LevelEstimator() {}
+};
+
+// The noise suppression (NS) component attempts to remove noise while
+// retaining speech. Recommended to be enabled on the client-side.
+//
+// Recommended to be enabled on the client-side.
+class NoiseSuppression {
+ public:
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  // Determines the aggressiveness of the suppression. Increasing the level
+  // will reduce the noise level at the expense of a higher speech distortion.
+  enum Level {
+    kLow,
+    kModerate,
+    kHigh,
+    kVeryHigh
+  };
+
+  virtual int set_level(Level level) = 0;
+  virtual Level level() const = 0;
+
+  // Returns the internally computed prior speech probability of current frame
+  // averaged over output channels. This is not supported in fixed point, for
+  // which |kUnsupportedFunctionError| is returned.
+  virtual float speech_probability() const = 0;
+
+  // Returns the noise estimate per frequency bin averaged over all channels.
+  virtual std::vector<float> NoiseEstimate() = 0;
+
+ protected:
+  virtual ~NoiseSuppression() {}
+};
+
+// Interface for a custom processing submodule.
+class CustomProcessing {
+ public:
+  // (Re-)Initializes the submodule.
+  virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
+  // Processes the given capture or render signal.
+  virtual void Process(AudioBuffer* audio) = 0;
+  // Returns a string representation of the module state.
+  virtual std::string ToString() const = 0;
+
+  virtual ~CustomProcessing() {}
+};
+
+// Interface for an echo detector submodule.
+class EchoDetector {
+ public:
+  // (Re-)Initializes the submodule.
+  virtual void Initialize(int capture_sample_rate_hz,
+                          int num_capture_channels,
+                          int render_sample_rate_hz,
+                          int num_render_channels) = 0;
+
+  // Analysis (not changing) of the render signal.
+  virtual void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) = 0;
+
+  // Analysis (not changing) of the capture signal.
+  virtual void AnalyzeCaptureAudio(
+      rtc::ArrayView<const float> capture_audio) = 0;
+
+  // Pack an AudioBuffer into a vector<float>.
+  static void PackRenderAudioBuffer(AudioBuffer* audio,
+                                    std::vector<float>* packed_buffer);
+
+  struct Metrics {
+    double echo_likelihood;
+    double echo_likelihood_recent_max;
+  };
+
+  // Collect current metrics from the echo detector.
+  virtual Metrics GetMetrics() const = 0;
+
+  virtual ~EchoDetector() {}
+};
+
+// The voice activity detection (VAD) component analyzes the stream to
+// determine if voice is present. A facility is also provided to pass in an
+// external VAD decision.
+//
+// In addition to |stream_has_voice()| the VAD decision is provided through the
+// |AudioFrame| passed to |ProcessStream()|. The |vad_activity_| member will be
+// modified to reflect the current decision.
+class VoiceDetection {
+ public:
+  virtual int Enable(bool enable) = 0;
+  virtual bool is_enabled() const = 0;
+
+  // Returns true if voice is detected in the current frame. Should be called
+  // after |ProcessStream()|.
+  virtual bool stream_has_voice() const = 0;
+
+  // Some of the APM functionality requires a VAD decision. In the case that
+  // a decision is externally available for the current frame, it can be passed
+  // in here, before |ProcessStream()| is called.
+  //
+  // VoiceDetection does _not_ need to be enabled to use this. If it happens to
+  // be enabled, detection will be skipped for any frame in which an external
+  // VAD decision is provided.
+  virtual int set_stream_has_voice(bool has_voice) = 0;
+
+  // Specifies the likelihood that a frame will be declared to contain voice.
+  // A higher value makes it more likely that speech will not be clipped, at
+  // the expense of more noise being detected as voice.
+  enum Likelihood {
+    kVeryLowLikelihood,
+    kLowLikelihood,
+    kModerateLikelihood,
+    kHighLikelihood
+  };
+
+  virtual int set_likelihood(Likelihood likelihood) = 0;
+  virtual Likelihood likelihood() const = 0;
+
+  // Sets the |size| of the frames in ms on which the VAD will operate. Larger
+  // frames will improve detection accuracy, but reduce the frequency of
+  // updates.
+  //
+  // This does not impact the size of frames passed to |ProcessStream()|.
+  virtual int set_frame_size_ms(int size) = 0;
+  virtual int frame_size_ms() const = 0;
+
+ protected:
+  virtual ~VoiceDetection() {}
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
diff --git a/modules/audio_processing/include/audio_processing_statistics.cc b/modules/audio_processing/include/audio_processing_statistics.cc
new file mode 100644
index 0000000..7139ee5
--- /dev/null
+++ b/modules/audio_processing/include/audio_processing_statistics.cc
@@ -0,0 +1,22 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+
+namespace webrtc {
+
+AudioProcessingStats::AudioProcessingStats() = default;
+
+AudioProcessingStats::AudioProcessingStats(const AudioProcessingStats& other) =
+    default;
+
+AudioProcessingStats::~AudioProcessingStats() = default;
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/include/audio_processing_statistics.h b/modules/audio_processing/include/audio_processing_statistics.h
new file mode 100644
index 0000000..05c5905
--- /dev/null
+++ b/modules/audio_processing/include/audio_processing_statistics.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_STATISTICS_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_STATISTICS_H_
+
+#include "api/optional.h"
+
+namespace webrtc {
+// This version of the stats uses Optionals, it will replace the regular
+// AudioProcessingStatistics struct.
+struct AudioProcessingStats {
+  AudioProcessingStats();
+  AudioProcessingStats(const AudioProcessingStats& other);
+  ~AudioProcessingStats();
+
+  // AEC Statistics.
+  // ERL = 10log_10(P_far / P_echo)
+  rtc::Optional<double> echo_return_loss;
+  // ERLE = 10log_10(P_echo / P_out)
+  rtc::Optional<double> echo_return_loss_enhancement;
+  // Fraction of time that the AEC linear filter is divergent, in a 1-second
+  // non-overlapped aggregation window.
+  rtc::Optional<double> divergent_filter_fraction;
+
+  // The delay metrics consists of the delay median and standard deviation. It
+  // also consists of the fraction of delay estimates that can make the echo
+  // cancellation perform poorly. The values are aggregated until the first
+  // call to |GetStatistics()| and afterwards aggregated and updated every
+  // second. Note that if there are several clients pulling metrics from
+  // |GetStatistics()| during a session the first call from any of them will
+  // change to one second aggregation window for all.
+  rtc::Optional<int32_t> delay_median_ms;
+  rtc::Optional<int32_t> delay_standard_deviation_ms;
+
+  // Residual echo detector likelihood.
+  rtc::Optional<double> residual_echo_likelihood;
+  // Maximum residual echo likelihood from the last time period.
+  rtc::Optional<double> residual_echo_likelihood_recent_max;
+
+  // The instantaneous delay estimate produced in the AEC. The unit is in
+  // milliseconds and the value is the instantaneous value at the time of the
+  // call to |GetStatistics()|.
+  rtc::Optional<int32_t> delay_ms;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_STATISTICS_H_
diff --git a/modules/audio_processing/include/config.cc b/modules/audio_processing/include/config.cc
new file mode 100644
index 0000000..14240db
--- /dev/null
+++ b/modules/audio_processing/include/config.cc
@@ -0,0 +1,23 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/config.h"
+
+namespace webrtc {
+
+Config::Config() {}
+
+Config::~Config() {
+  for (OptionMap::iterator it = options_.begin(); it != options_.end(); ++it) {
+    delete it->second;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/include/config.h b/modules/audio_processing/include/config.h
new file mode 100644
index 0000000..7615f62
--- /dev/null
+++ b/modules/audio_processing/include/config.h
@@ -0,0 +1,133 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_CONFIG_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_CONFIG_H_
+
+#include <map>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// Only add new values to the end of the enumeration and never remove (only
+// deprecate) to maintain binary compatibility.
+enum class ConfigOptionID {
+  kMyExperimentForTest,
+  kAlgo1CostFunctionForTest,
+  kTemporalLayersFactory,  // Deprecated
+  kNetEqCapacityConfig,    // Deprecated
+  kNetEqFastAccelerate,    // Deprecated
+  kVoicePacing,            // Deprecated
+  kExtendedFilter,
+  kDelayAgnostic,
+  kExperimentalAgc,
+  kExperimentalNs,
+  kBeamforming,
+  kIntelligibility,
+  kEchoCanceller3,  // Deprecated
+  kAecRefinedAdaptiveFilter,
+  kLevelControl  // Deprecated
+};
+
+// Class Config is designed to ease passing a set of options across webrtc code.
+// Options are identified by typename in order to avoid incorrect casts.
+//
+// Usage:
+// * declaring an option:
+//    struct Algo1_CostFunction {
+//      virtual float cost(int x) const { return x; }
+//      virtual ~Algo1_CostFunction() {}
+//    };
+//
+// * accessing an option:
+//    config.Get<Algo1_CostFunction>().cost(value);
+//
+// * setting an option:
+//    struct SqrCost : Algo1_CostFunction {
+//      virtual float cost(int x) const { return x*x; }
+//    };
+//    config.Set<Algo1_CostFunction>(new SqrCost());
+//
+// Note: This class is thread-compatible (like STL containers).
+class Config {
+ public:
+  // Returns the option if set or a default constructed one.
+  // Callers that access options too often are encouraged to cache the result.
+  // Returned references are owned by this.
+  //
+  // Requires std::is_default_constructible<T>
+  template<typename T> const T& Get() const;
+
+  // Set the option, deleting any previous instance of the same.
+  // This instance gets ownership of the newly set value.
+  template<typename T> void Set(T* value);
+
+  Config();
+  ~Config();
+
+ private:
+  struct BaseOption {
+    virtual ~BaseOption() {}
+  };
+
+  template<typename T>
+  struct Option : BaseOption {
+    explicit Option(T* v): value(v) {}
+    ~Option() {
+      delete value;
+    }
+    T* value;
+  };
+
+  template<typename T>
+  static ConfigOptionID identifier() {
+    return T::identifier;
+  }
+
+  // Used to instantiate a default constructed object that doesn't needs to be
+  // owned. This allows Get<T> to be implemented without requiring explicitly
+  // locks.
+  template<typename T>
+  static const T& default_value() {
+    RTC_DEFINE_STATIC_LOCAL(const T, def, ());
+    return def;
+  }
+
+  typedef std::map<ConfigOptionID, BaseOption*> OptionMap;
+  OptionMap options_;
+
+  // RTC_DISALLOW_COPY_AND_ASSIGN
+  Config(const Config&);
+  void operator=(const Config&);
+};
+
+template<typename T>
+const T& Config::Get() const {
+  OptionMap::const_iterator it = options_.find(identifier<T>());
+  if (it != options_.end()) {
+    const T* t = static_cast<Option<T>*>(it->second)->value;
+    if (t) {
+      return *t;
+    }
+  }
+  return default_value<T>();
+}
+
+template<typename T>
+void Config::Set(T* value) {
+  BaseOption*& it = options_[identifier<T>()];
+  delete it;
+  it = new Option<T>(value);
+}
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_CONFIG_H_
diff --git a/modules/audio_processing/include/mock_audio_processing.h b/modules/audio_processing/include/mock_audio_processing.h
new file mode 100644
index 0000000..96a04ef
--- /dev/null
+++ b/modules/audio_processing/include/mock_audio_processing.h
@@ -0,0 +1,245 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INCLUDE_MOCK_AUDIO_PROCESSING_H_
+#define MODULES_AUDIO_PROCESSING_INCLUDE_MOCK_AUDIO_PROCESSING_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/aec_dump.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
+#include "test/gmock.h"
+
+namespace webrtc {
+
+namespace test {
+
+class MockEchoCancellation : public EchoCancellation {
+ public:
+  virtual ~MockEchoCancellation() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+  MOCK_METHOD1(enable_drift_compensation, int(bool enable));
+  MOCK_CONST_METHOD0(is_drift_compensation_enabled, bool());
+  MOCK_METHOD1(set_stream_drift_samples, void(int drift));
+  MOCK_CONST_METHOD0(stream_drift_samples, int());
+  MOCK_METHOD1(set_suppression_level, int(SuppressionLevel level));
+  MOCK_CONST_METHOD0(suppression_level, SuppressionLevel());
+  MOCK_CONST_METHOD0(stream_has_echo, bool());
+  MOCK_METHOD1(enable_metrics, int(bool enable));
+  MOCK_CONST_METHOD0(are_metrics_enabled, bool());
+  MOCK_METHOD1(GetMetrics, int(Metrics* metrics));
+  MOCK_METHOD1(enable_delay_logging, int(bool enable));
+  MOCK_CONST_METHOD0(is_delay_logging_enabled, bool());
+  MOCK_METHOD2(GetDelayMetrics, int(int* median, int* std));
+  MOCK_METHOD3(GetDelayMetrics, int(int* median, int* std,
+                                    float* fraction_poor_delays));
+  MOCK_CONST_METHOD0(aec_core, struct AecCore*());
+};
+
+class MockEchoControlMobile : public EchoControlMobile {
+ public:
+  virtual ~MockEchoControlMobile() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+  MOCK_METHOD1(set_routing_mode, int(RoutingMode mode));
+  MOCK_CONST_METHOD0(routing_mode, RoutingMode());
+  MOCK_METHOD1(enable_comfort_noise, int(bool enable));
+  MOCK_CONST_METHOD0(is_comfort_noise_enabled, bool());
+  MOCK_METHOD2(SetEchoPath, int(const void* echo_path, size_t size_bytes));
+  MOCK_CONST_METHOD2(GetEchoPath, int(void* echo_path, size_t size_bytes));
+};
+
+class MockGainControl : public GainControl {
+ public:
+  virtual ~MockGainControl() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+  MOCK_METHOD1(set_stream_analog_level, int(int level));
+  MOCK_METHOD0(stream_analog_level, int());
+  MOCK_METHOD1(set_mode, int(Mode mode));
+  MOCK_CONST_METHOD0(mode, Mode());
+  MOCK_METHOD1(set_target_level_dbfs, int(int level));
+  MOCK_CONST_METHOD0(target_level_dbfs, int());
+  MOCK_METHOD1(set_compression_gain_db, int(int gain));
+  MOCK_CONST_METHOD0(compression_gain_db, int());
+  MOCK_METHOD1(enable_limiter, int(bool enable));
+  MOCK_CONST_METHOD0(is_limiter_enabled, bool());
+  MOCK_METHOD2(set_analog_level_limits, int(int minimum, int maximum));
+  MOCK_CONST_METHOD0(analog_level_minimum, int());
+  MOCK_CONST_METHOD0(analog_level_maximum, int());
+  MOCK_CONST_METHOD0(stream_is_saturated, bool());
+};
+
+class MockHighPassFilter : public HighPassFilter {
+ public:
+  virtual ~MockHighPassFilter() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+};
+
+class MockLevelEstimator : public LevelEstimator {
+ public:
+  virtual ~MockLevelEstimator() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+  MOCK_METHOD0(RMS, int());
+};
+
+class MockNoiseSuppression : public NoiseSuppression {
+ public:
+  virtual ~MockNoiseSuppression() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+  MOCK_METHOD1(set_level, int(Level level));
+  MOCK_CONST_METHOD0(level, Level());
+  MOCK_CONST_METHOD0(speech_probability, float());
+  MOCK_METHOD0(NoiseEstimate, std::vector<float>());
+};
+
+class MockCustomProcessing : public CustomProcessing {
+ public:
+  virtual ~MockCustomProcessing() {}
+  MOCK_METHOD2(Initialize, void(int sample_rate_hz, int num_channels));
+  MOCK_METHOD1(Process, void(AudioBuffer* audio));
+  MOCK_CONST_METHOD0(ToString, std::string());
+};
+
+class MockEchoControl : public EchoControl {
+ public:
+  virtual ~MockEchoControl() {}
+  MOCK_METHOD1(AnalyzeRender, void(AudioBuffer* render));
+  MOCK_METHOD1(AnalyzeCapture, void(AudioBuffer* capture));
+  MOCK_METHOD2(ProcessCapture,
+               void(AudioBuffer* capture, bool echo_path_change));
+  MOCK_CONST_METHOD0(GetMetrics, Metrics());
+};
+
+class MockVoiceDetection : public VoiceDetection {
+ public:
+  virtual ~MockVoiceDetection() {}
+  MOCK_METHOD1(Enable, int(bool enable));
+  MOCK_CONST_METHOD0(is_enabled, bool());
+  MOCK_CONST_METHOD0(stream_has_voice, bool());
+  MOCK_METHOD1(set_stream_has_voice, int(bool has_voice));
+  MOCK_METHOD1(set_likelihood, int(Likelihood likelihood));
+  MOCK_CONST_METHOD0(likelihood, Likelihood());
+  MOCK_METHOD1(set_frame_size_ms, int(int size));
+  MOCK_CONST_METHOD0(frame_size_ms, int());
+};
+
+class MockAudioProcessing : public testing::NiceMock<AudioProcessing> {
+ public:
+  MockAudioProcessing()
+      : echo_cancellation_(new testing::NiceMock<MockEchoCancellation>()),
+        echo_control_mobile_(new testing::NiceMock<MockEchoControlMobile>()),
+        gain_control_(new testing::NiceMock<MockGainControl>()),
+        high_pass_filter_(new testing::NiceMock<MockHighPassFilter>()),
+        level_estimator_(new testing::NiceMock<MockLevelEstimator>()),
+        noise_suppression_(new testing::NiceMock<MockNoiseSuppression>()),
+        voice_detection_(new testing::NiceMock<MockVoiceDetection>()) {
+  }
+
+  virtual ~MockAudioProcessing() {}
+
+  MOCK_METHOD0(Initialize, int());
+  MOCK_METHOD6(Initialize, int(int capture_input_sample_rate_hz,
+                               int capture_output_sample_rate_hz,
+                               int render_sample_rate_hz,
+                               ChannelLayout capture_input_layout,
+                               ChannelLayout capture_output_layout,
+                               ChannelLayout render_input_layout));
+  MOCK_METHOD1(Initialize, int(const ProcessingConfig& processing_config));
+  MOCK_METHOD1(ApplyConfig, void(const Config& config));
+  MOCK_METHOD1(SetExtraOptions, void(const webrtc::Config& config));
+  MOCK_CONST_METHOD0(proc_sample_rate_hz, int());
+  MOCK_CONST_METHOD0(proc_split_sample_rate_hz, int());
+  MOCK_CONST_METHOD0(num_input_channels, size_t());
+  MOCK_CONST_METHOD0(num_proc_channels, size_t());
+  MOCK_CONST_METHOD0(num_output_channels, size_t());
+  MOCK_CONST_METHOD0(num_reverse_channels, size_t());
+  MOCK_METHOD1(set_output_will_be_muted, void(bool muted));
+  MOCK_METHOD1(ProcessStream, int(AudioFrame* frame));
+  MOCK_METHOD7(ProcessStream, int(const float* const* src,
+                                  size_t samples_per_channel,
+                                  int input_sample_rate_hz,
+                                  ChannelLayout input_layout,
+                                  int output_sample_rate_hz,
+                                  ChannelLayout output_layout,
+                                  float* const* dest));
+  MOCK_METHOD4(ProcessStream, int(const float* const* src,
+                                 const StreamConfig& input_config,
+                                 const StreamConfig& output_config,
+                                 float* const* dest));
+  MOCK_METHOD1(ProcessReverseStream, int(AudioFrame* frame));
+  MOCK_METHOD4(AnalyzeReverseStream, int(const float* const* data,
+                                         size_t samples_per_channel,
+                                         int sample_rate_hz,
+                                         ChannelLayout layout));
+  MOCK_METHOD4(ProcessReverseStream, int(const float* const* src,
+                                         const StreamConfig& input_config,
+                                         const StreamConfig& output_config,
+                                         float* const* dest));
+  MOCK_METHOD1(set_stream_delay_ms, int(int delay));
+  MOCK_CONST_METHOD0(stream_delay_ms, int());
+  MOCK_CONST_METHOD0(was_stream_delay_set, bool());
+  MOCK_METHOD1(set_stream_key_pressed, void(bool key_pressed));
+  MOCK_METHOD1(set_delay_offset_ms, void(int offset));
+  MOCK_CONST_METHOD0(delay_offset_ms, int());
+
+  virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) {}
+  MOCK_METHOD0(DetachAecDump, void());
+
+  virtual void AttachPlayoutAudioGenerator(
+      std::unique_ptr<AudioGenerator> audio_generator) {}
+  MOCK_METHOD0(DetachPlayoutAudioGenerator, void());
+
+  MOCK_METHOD0(UpdateHistogramsOnCallEnd, void());
+  MOCK_CONST_METHOD0(GetStatistics, AudioProcessingStatistics());
+  MOCK_CONST_METHOD1(GetStatistics, AudioProcessingStats(bool));
+  virtual MockEchoCancellation* echo_cancellation() const {
+    return echo_cancellation_.get();
+  }
+  virtual MockEchoControlMobile* echo_control_mobile() const {
+    return echo_control_mobile_.get();
+  }
+  virtual MockGainControl* gain_control() const {
+    return gain_control_.get();
+  }
+  virtual MockHighPassFilter* high_pass_filter() const {
+    return high_pass_filter_.get();
+  }
+  virtual MockLevelEstimator* level_estimator() const {
+    return level_estimator_.get();
+  }
+  virtual MockNoiseSuppression* noise_suppression() const {
+    return noise_suppression_.get();
+  }
+  virtual MockVoiceDetection* voice_detection() const {
+    return voice_detection_.get();
+  }
+
+  MOCK_CONST_METHOD0(GetConfig, AudioProcessing::Config());
+
+ private:
+  std::unique_ptr<MockEchoCancellation> echo_cancellation_;
+  std::unique_ptr<MockEchoControlMobile> echo_control_mobile_;
+  std::unique_ptr<MockGainControl> gain_control_;
+  std::unique_ptr<MockHighPassFilter> high_pass_filter_;
+  std::unique_ptr<MockLevelEstimator> level_estimator_;
+  std::unique_ptr<MockNoiseSuppression> noise_suppression_;
+  std::unique_ptr<MockVoiceDetection> voice_detection_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INCLUDE_MOCK_AUDIO_PROCESSING_H_
diff --git a/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
new file mode 100644
index 0000000..0e696d9
--- /dev/null
+++ b/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
@@ -0,0 +1,392 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/intelligibility/intelligibility_enhancer.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <algorithm>
+#include <limits>
+#include <numeric>
+
+#include "common_audio/include/audio_util.h"
+#include "common_audio/window_generator.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+const size_t kErbResolution = 2;
+const int kWindowSizeMs = 16;
+const int kChunkSizeMs = 10;  // Size provided by APM.
+const float kClipFreqKhz = 0.2f;
+const float kKbdAlpha = 1.5f;
+const float kLambdaBot = -1.f;      // Extreme values in bisection
+const float kLambdaTop = -1e-5f;      // search for lamda.
+const float kVoiceProbabilityThreshold = 0.5f;
+// Number of chunks after voice activity which is still considered speech.
+const size_t kSpeechOffsetDelay = 10;
+const float kDecayRate = 0.995f;              // Power estimation decay rate.
+const float kMaxRelativeGainChange = 0.005f;
+const float kRho = 0.0004f;  // Default production and interpretation SNR.
+const float kPowerNormalizationFactor = 1.f / (1 << 30);
+const float kMaxActiveSNR = 128.f;  // 21dB
+const float kMinInactiveSNR = 32.f;  // 15dB
+const size_t kGainUpdatePeriod = 10u;
+
+// Returns dot product of vectors |a| and |b| with size |length|.
+float DotProduct(const float* a, const float* b, size_t length) {
+  float ret = 0.f;
+  for (size_t i = 0; i < length; ++i) {
+    ret += a[i] * b[i];
+  }
+  return ret;
+}
+
+// Computes the power across ERB bands from the power spectral density |pow|.
+// Stores it in |result|.
+void MapToErbBands(const float* pow,
+                   const std::vector<std::vector<float>>& filter_bank,
+                   float* result) {
+  for (size_t i = 0; i < filter_bank.size(); ++i) {
+    RTC_DCHECK_GT(filter_bank[i].size(), 0);
+    result[i] = kPowerNormalizationFactor *
+                DotProduct(filter_bank[i].data(), pow, filter_bank[i].size());
+  }
+}
+
+}  // namespace
+
+IntelligibilityEnhancer::IntelligibilityEnhancer(int sample_rate_hz,
+                                                 size_t num_render_channels,
+                                                 size_t num_bands,
+                                                 size_t num_noise_bins)
+    : freqs_(RealFourier::ComplexLength(
+          RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))),
+      num_noise_bins_(num_noise_bins),
+      chunk_length_(static_cast<size_t>(sample_rate_hz * kChunkSizeMs / 1000)),
+      bank_size_(GetBankSize(sample_rate_hz, kErbResolution)),
+      sample_rate_hz_(sample_rate_hz),
+      num_render_channels_(num_render_channels),
+      clear_power_estimator_(freqs_, kDecayRate),
+      noise_power_estimator_(num_noise_bins, kDecayRate),
+      filtered_clear_pow_(bank_size_, 0.f),
+      filtered_noise_pow_(num_noise_bins, 0.f),
+      center_freqs_(bank_size_),
+      capture_filter_bank_(CreateErbBank(num_noise_bins)),
+      render_filter_bank_(CreateErbBank(freqs_)),
+      gains_eq_(bank_size_),
+      gain_applier_(freqs_, kMaxRelativeGainChange),
+      audio_s16_(chunk_length_),
+      chunks_since_voice_(kSpeechOffsetDelay),
+      is_speech_(false),
+      snr_(kMaxActiveSNR),
+      is_active_(false),
+      num_chunks_(0u),
+      num_active_chunks_(0u),
+      noise_estimation_buffer_(num_noise_bins),
+      noise_estimation_queue_(kMaxNumNoiseEstimatesToBuffer,
+                              std::vector<float>(num_noise_bins),
+                              RenderQueueItemVerifier<float>(num_noise_bins)) {
+  RTC_DCHECK_LE(kRho, 1.f);
+
+  const size_t erb_index = static_cast<size_t>(
+      ceilf(11.17f * logf((kClipFreqKhz + 0.312f) / (kClipFreqKhz + 14.6575f)) +
+            43.f));
+  start_freq_ = std::max(static_cast<size_t>(1), erb_index * kErbResolution);
+
+  size_t window_size = static_cast<size_t>(1) << RealFourier::FftOrder(freqs_);
+  std::vector<float> kbd_window(window_size);
+  WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size,
+                                       kbd_window.data());
+  render_mangler_.reset(new LappedTransform(
+      num_render_channels_, num_render_channels_, chunk_length_,
+      kbd_window.data(), window_size, window_size / 2, this));
+
+  const size_t initial_delay = render_mangler_->initial_delay();
+  for (size_t i = 0u; i < num_bands - 1; ++i) {
+    high_bands_buffers_.push_back(std::unique_ptr<intelligibility::DelayBuffer>(
+        new intelligibility::DelayBuffer(initial_delay, num_render_channels_)));
+  }
+}
+
+IntelligibilityEnhancer::~IntelligibilityEnhancer() {
+  // Don't rely on this log, since the destructor isn't called when the
+  // app/tab is killed.
+  if (num_chunks_ > 0) {
+    RTC_LOG(LS_INFO) << "Intelligibility Enhancer was active for "
+                     << 100.f * static_cast<float>(num_active_chunks_) /
+                            num_chunks_
+                     << "% of the call.";
+  } else {
+    RTC_LOG(LS_INFO) << "Intelligibility Enhancer processed no chunk.";
+  }
+}
+
+void IntelligibilityEnhancer::SetCaptureNoiseEstimate(
+    std::vector<float> noise, float gain) {
+  RTC_DCHECK_EQ(noise.size(), num_noise_bins_);
+  for (auto& bin : noise) {
+    bin *= gain;
+  }
+  // Disregarding return value since buffer overflow is acceptable, because it
+  // is not critical to get each noise estimate.
+  if (noise_estimation_queue_.Insert(&noise)) {
+  };
+}
+
+void IntelligibilityEnhancer::ProcessRenderAudio(AudioBuffer* audio) {
+  RTC_DCHECK_EQ(num_render_channels_, audio->num_channels());
+  while (noise_estimation_queue_.Remove(&noise_estimation_buffer_)) {
+    noise_power_estimator_.Step(noise_estimation_buffer_.data());
+  }
+  float* const* low_band = audio->split_channels_f(kBand0To8kHz);
+  is_speech_ = IsSpeech(low_band[0]);
+  render_mangler_->ProcessChunk(low_band, low_band);
+  DelayHighBands(audio);
+}
+
+void IntelligibilityEnhancer::ProcessAudioBlock(
+    const std::complex<float>* const* in_block,
+    size_t in_channels,
+    size_t frames,
+    size_t /* out_channels */,
+    std::complex<float>* const* out_block) {
+  RTC_DCHECK_EQ(freqs_, frames);
+  if (is_speech_) {
+    clear_power_estimator_.Step(in_block[0]);
+  }
+  SnrBasedEffectActivation();
+  ++num_chunks_;
+  if (is_active_) {
+    ++num_active_chunks_;
+    if (num_chunks_ % kGainUpdatePeriod == 0) {
+      MapToErbBands(clear_power_estimator_.power().data(), render_filter_bank_,
+                    filtered_clear_pow_.data());
+      MapToErbBands(noise_power_estimator_.power().data(), capture_filter_bank_,
+                    filtered_noise_pow_.data());
+      SolveForGainsGivenLambda(kLambdaTop, start_freq_, gains_eq_.data());
+      const float power_target = std::accumulate(
+          filtered_clear_pow_.data(),
+          filtered_clear_pow_.data() + bank_size_,
+          0.f);
+      const float power_top =
+          DotProduct(gains_eq_.data(), filtered_clear_pow_.data(), bank_size_);
+      SolveForGainsGivenLambda(kLambdaBot, start_freq_, gains_eq_.data());
+      const float power_bot =
+          DotProduct(gains_eq_.data(), filtered_clear_pow_.data(), bank_size_);
+      if (power_target >= power_bot && power_target <= power_top) {
+        SolveForLambda(power_target);
+        UpdateErbGains();
+      }  // Else experiencing power underflow, so do nothing.
+    }
+  }
+  for (size_t i = 0; i < in_channels; ++i) {
+    gain_applier_.Apply(in_block[i], out_block[i]);
+  }
+}
+
+void IntelligibilityEnhancer::SnrBasedEffectActivation() {
+  const float* clear_psd = clear_power_estimator_.power().data();
+  const float* noise_psd = noise_power_estimator_.power().data();
+  const float clear_power =
+      std::accumulate(clear_psd, clear_psd + freqs_, 0.f);
+  const float noise_power =
+      std::accumulate(noise_psd, noise_psd + freqs_, 0.f);
+  snr_ = kDecayRate * snr_ + (1.f - kDecayRate) * clear_power /
+      (noise_power + std::numeric_limits<float>::epsilon());
+  if (is_active_) {
+    if (snr_ > kMaxActiveSNR) {
+      RTC_LOG(LS_INFO) << "Intelligibility Enhancer was deactivated at chunk "
+                       << num_chunks_;
+      is_active_ = false;
+      // Set the target gains to unity.
+      float* gains = gain_applier_.target();
+      for (size_t i = 0; i < freqs_; ++i) {
+        gains[i] = 1.f;
+      }
+    }
+  } else {
+    if (snr_ < kMinInactiveSNR) {
+      RTC_LOG(LS_INFO) << "Intelligibility Enhancer was activated at chunk "
+                       << num_chunks_;
+      is_active_ = true;
+    }
+  }
+}
+
+void IntelligibilityEnhancer::SolveForLambda(float power_target) {
+  const float kConvergeThresh = 0.001f;  // TODO(ekmeyerson): Find best values
+  const int kMaxIters = 100;             // for these, based on experiments.
+
+  const float reciprocal_power_target =
+      1.f / (power_target + std::numeric_limits<float>::epsilon());
+  float lambda_bot = kLambdaBot;
+  float lambda_top = kLambdaTop;
+  float power_ratio = 2.f;  // Ratio of achieved power to target power.
+  int iters = 0;
+  while (std::fabs(power_ratio - 1.f) > kConvergeThresh && iters <= kMaxIters) {
+    const float lambda = (lambda_bot + lambda_top) / 2.f;
+    SolveForGainsGivenLambda(lambda, start_freq_, gains_eq_.data());
+    const float power =
+        DotProduct(gains_eq_.data(), filtered_clear_pow_.data(), bank_size_);
+    if (power < power_target) {
+      lambda_bot = lambda;
+    } else {
+      lambda_top = lambda;
+    }
+    power_ratio = std::fabs(power * reciprocal_power_target);
+    ++iters;
+  }
+}
+
+void IntelligibilityEnhancer::UpdateErbGains() {
+  // (ERB gain) = filterbank' * (freq gain)
+  float* gains = gain_applier_.target();
+  for (size_t i = 0; i < freqs_; ++i) {
+    gains[i] = 0.f;
+    for (size_t j = 0; j < bank_size_; ++j) {
+      gains[i] += render_filter_bank_[j][i] * gains_eq_[j];
+    }
+  }
+}
+
+size_t IntelligibilityEnhancer::GetBankSize(int sample_rate,
+                                            size_t erb_resolution) {
+  float freq_limit = sample_rate / 2000.f;
+  size_t erb_scale = static_cast<size_t>(ceilf(
+      11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.f));
+  return erb_scale * erb_resolution;
+}
+
+std::vector<std::vector<float>> IntelligibilityEnhancer::CreateErbBank(
+    size_t num_freqs) {
+  std::vector<std::vector<float>> filter_bank(bank_size_);
+  size_t lf = 1, rf = 4;
+
+  for (size_t i = 0; i < bank_size_; ++i) {
+    float abs_temp = fabsf((i + 1.f) / static_cast<float>(kErbResolution));
+    center_freqs_[i] = 676170.4f / (47.06538f - expf(0.08950404f * abs_temp));
+    center_freqs_[i] -= 14678.49f;
+  }
+  float last_center_freq = center_freqs_[bank_size_ - 1];
+  for (size_t i = 0; i < bank_size_; ++i) {
+    center_freqs_[i] *= 0.5f * sample_rate_hz_ / last_center_freq;
+  }
+
+  for (size_t i = 0; i < bank_size_; ++i) {
+    filter_bank[i].resize(num_freqs);
+  }
+
+  for (size_t i = 1; i <= bank_size_; ++i) {
+    size_t lll = static_cast<size_t>(
+        round(center_freqs_[rtc::SafeMax<size_t>(1, i - lf) - 1] * num_freqs /
+              (0.5f * sample_rate_hz_)));
+    size_t ll = static_cast<size_t>(
+        round(center_freqs_[rtc::SafeMax<size_t>(1, i) - 1] * num_freqs /
+              (0.5f * sample_rate_hz_)));
+    lll = rtc::SafeClamp<size_t>(lll, 1, num_freqs) - 1;
+    ll = rtc::SafeClamp<size_t>(ll, 1, num_freqs) - 1;
+
+    size_t rrr = static_cast<size_t>(
+        round(center_freqs_[rtc::SafeMin<size_t>(bank_size_, i + rf) - 1] *
+              num_freqs / (0.5f * sample_rate_hz_)));
+    size_t rr = static_cast<size_t>(
+        round(center_freqs_[rtc::SafeMin<size_t>(bank_size_, i + 1) - 1] *
+              num_freqs / (0.5f * sample_rate_hz_)));
+    rrr = rtc::SafeClamp<size_t>(rrr, 1, num_freqs) - 1;
+    rr = rtc::SafeClamp<size_t>(rr, 1, num_freqs) - 1;
+
+    float step = ll == lll ? 0.f : 1.f / (ll - lll);
+    float element = 0.f;
+    for (size_t j = lll; j <= ll; ++j) {
+      filter_bank[i - 1][j] = element;
+      element += step;
+    }
+    step = rr == rrr ? 0.f : 1.f / (rrr - rr);
+    element = 1.f;
+    for (size_t j = rr; j <= rrr; ++j) {
+      filter_bank[i - 1][j] = element;
+      element -= step;
+    }
+    for (size_t j = ll; j <= rr; ++j) {
+      filter_bank[i - 1][j] = 1.f;
+    }
+  }
+
+  for (size_t i = 0; i < num_freqs; ++i) {
+    float sum = 0.f;
+    for (size_t j = 0; j < bank_size_; ++j) {
+      sum += filter_bank[j][i];
+    }
+    for (size_t j = 0; j < bank_size_; ++j) {
+      filter_bank[j][i] /= sum;
+    }
+  }
+  return filter_bank;
+}
+
+void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
+                                                       size_t start_freq,
+                                                       float* sols) {
+  const float kMinPower = 1e-5f;
+
+  const float* pow_x0 = filtered_clear_pow_.data();
+  const float* pow_n0 = filtered_noise_pow_.data();
+
+  for (size_t n = 0; n < start_freq; ++n) {
+    sols[n] = 1.f;
+  }
+
+  // Analytic solution for optimal gains. See paper for derivation.
+  for (size_t n = start_freq; n < bank_size_; ++n) {
+    if (pow_x0[n] < kMinPower || pow_n0[n] < kMinPower) {
+      sols[n] = 1.f;
+    } else {
+      const float gamma0 = 0.5f * kRho * pow_x0[n] * pow_n0[n] +
+                           lambda * pow_x0[n] * pow_n0[n] * pow_n0[n];
+      const float beta0 =
+          lambda * pow_x0[n] * (2.f - kRho) * pow_x0[n] * pow_n0[n];
+      const float alpha0 =
+          lambda * pow_x0[n] * (1.f - kRho) * pow_x0[n] * pow_x0[n];
+      RTC_DCHECK_LT(alpha0, 0.f);
+      // The quadratic equation should always have real roots, but to guard
+      // against numerical errors we limit it to a minimum of zero.
+      sols[n] = std::max(
+          0.f, (-beta0 - std::sqrt(std::max(
+                             0.f, beta0 * beta0 - 4.f * alpha0 * gamma0))) /
+                   (2.f * alpha0));
+    }
+  }
+}
+
+bool IntelligibilityEnhancer::IsSpeech(const float* audio) {
+  FloatToS16(audio, chunk_length_, audio_s16_.data());
+  vad_.ProcessChunk(audio_s16_.data(), chunk_length_, sample_rate_hz_);
+  if (vad_.last_voice_probability() > kVoiceProbabilityThreshold) {
+    chunks_since_voice_ = 0;
+  } else if (chunks_since_voice_ < kSpeechOffsetDelay) {
+    ++chunks_since_voice_;
+  }
+  return chunks_since_voice_ < kSpeechOffsetDelay;
+}
+
+void IntelligibilityEnhancer::DelayHighBands(AudioBuffer* audio) {
+  RTC_DCHECK_EQ(audio->num_bands(), high_bands_buffers_.size() + 1);
+  for (size_t i = 0u; i < high_bands_buffers_.size(); ++i) {
+    Band band = static_cast<Band>(i + 1);
+    high_bands_buffers_[i]->Delay(audio->split_channels_f(band), chunk_length_);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/modules/audio_processing/intelligibility/intelligibility_enhancer.h
new file mode 100644
index 0000000..3e0e269
--- /dev/null
+++ b/modules/audio_processing/intelligibility/intelligibility_enhancer.h
@@ -0,0 +1,137 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
+#define MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
+
+#include <complex>
+#include <memory>
+#include <vector>
+
+#include "common_audio/channel_buffer.h"
+#include "common_audio/lapped_transform.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/intelligibility/intelligibility_utils.h"
+#include "modules/audio_processing/render_queue_item_verifier.h"
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+#include "rtc_base/swap_queue.h"
+
+namespace webrtc {
+
+// Speech intelligibility enhancement module. Reads render and capture
+// audio streams and modifies the render stream with a set of gains per
+// frequency bin to enhance speech against the noise background.
+// Details of the model and algorithm can be found in the original paper:
+// http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788
+class IntelligibilityEnhancer : public LappedTransform::Callback {
+ public:
+  IntelligibilityEnhancer(int sample_rate_hz,
+                          size_t num_render_channels,
+                          size_t num_bands,
+                          size_t num_noise_bins);
+
+  ~IntelligibilityEnhancer() override;
+
+  // Sets the capture noise magnitude spectrum estimate.
+  void SetCaptureNoiseEstimate(std::vector<float> noise, float gain);
+
+  // Reads chunk of speech in time domain and updates with modified signal.
+  void ProcessRenderAudio(AudioBuffer* audio);
+  bool active() const;
+
+ protected:
+  // All in frequency domain, receives input |in_block|, applies
+  // intelligibility enhancement, and writes result to |out_block|.
+  void ProcessAudioBlock(const std::complex<float>* const* in_block,
+                         size_t in_channels,
+                         size_t frames,
+                         size_t out_channels,
+                         std::complex<float>* const* out_block) override;
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestRenderUpdate);
+  FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestErbCreation);
+  FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestSolveForGains);
+  FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest,
+                           TestNoiseGainHasExpectedResult);
+  FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest,
+                           TestAllBandsHaveSameDelay);
+
+  // Updates the SNR estimation and enables or disables this component using a
+  // hysteresis.
+  void SnrBasedEffectActivation();
+
+  // Bisection search for optimal |lambda|.
+  void SolveForLambda(float power_target);
+
+  // Transforms freq gains to ERB gains.
+  void UpdateErbGains();
+
+  // Returns number of ERB filters.
+  static size_t GetBankSize(int sample_rate, size_t erb_resolution);
+
+  // Initializes ERB filterbank.
+  std::vector<std::vector<float>> CreateErbBank(size_t num_freqs);
+
+  // Analytically solves quadratic for optimal gains given |lambda|.
+  // Negative gains are set to 0. Stores the results in |sols|.
+  void SolveForGainsGivenLambda(float lambda, size_t start_freq, float* sols);
+
+  // Returns true if the audio is speech.
+  bool IsSpeech(const float* audio);
+
+  // Delays the high bands to compensate for the processing delay in the low
+  // band.
+  void DelayHighBands(AudioBuffer* audio);
+
+  static const size_t kMaxNumNoiseEstimatesToBuffer = 5;
+
+  const size_t freqs_;         // Num frequencies in frequency domain.
+  const size_t num_noise_bins_;
+  const size_t chunk_length_;  // Chunk size in samples.
+  const size_t bank_size_;     // Num ERB filters.
+  const int sample_rate_hz_;
+  const size_t num_render_channels_;
+
+  intelligibility::PowerEstimator<std::complex<float>> clear_power_estimator_;
+  intelligibility::PowerEstimator<float> noise_power_estimator_;
+  std::vector<float> filtered_clear_pow_;
+  std::vector<float> filtered_noise_pow_;
+  std::vector<float> center_freqs_;
+  std::vector<std::vector<float>> capture_filter_bank_;
+  std::vector<std::vector<float>> render_filter_bank_;
+  size_t start_freq_;
+
+  std::vector<float> gains_eq_;  // Pre-filter modified gains.
+  intelligibility::GainApplier gain_applier_;
+
+  std::unique_ptr<LappedTransform> render_mangler_;
+
+  VoiceActivityDetector vad_;
+  std::vector<int16_t> audio_s16_;
+  size_t chunks_since_voice_;
+  bool is_speech_;
+  float snr_;
+  bool is_active_;
+
+  unsigned long int num_chunks_;
+  unsigned long int num_active_chunks_;
+
+  std::vector<float> noise_estimation_buffer_;
+  SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>
+      noise_estimation_queue_;
+
+  std::vector<std::unique_ptr<intelligibility::DelayBuffer>>
+      high_bands_buffers_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
diff --git a/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc b/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
new file mode 100644
index 0000000..98a8dae
--- /dev/null
+++ b/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
@@ -0,0 +1,536 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/intelligibility/intelligibility_enhancer.h"
+#include "modules/audio_processing/noise_suppression_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Target output for ERB create test. Generated with matlab.
+const float kTestCenterFreqs[] = {
+    14.5213f, 29.735f,  45.6781f, 62.3884f, 79.9058f, 98.2691f, 117.521f,
+    137.708f, 158.879f, 181.084f, 204.378f, 228.816f, 254.459f, 281.371f,
+    309.618f, 339.273f, 370.411f, 403.115f, 437.469f, 473.564f, 511.497f,
+    551.371f, 593.293f, 637.386f, 683.77f,  732.581f, 783.96f,  838.06f,
+    895.046f, 955.09f,  1018.38f, 1085.13f, 1155.54f, 1229.85f, 1308.32f,
+    1391.22f, 1478.83f, 1571.5f,  1669.55f, 1773.37f, 1883.37f, 2000.f};
+const float kTestFilterBank[][33] = {
+    {0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.2f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.2f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,  0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.25f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,   0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,   0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,   0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.25f, 0.142857f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,   0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,   0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.25f, 0.285714f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,   0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,   0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.142857f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f, 0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.285714f, 0.157895f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.210526f, 0.117647f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f, 0.315789f, 0.176471f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.315789f, 0.352941f, 0.142857f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f},
+    {0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.352941f, 0.285714f,
+     0.157895f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,
+     0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f},
+    {0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.285714f,
+     0.210526f, 0.111111f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,       0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f, 0.f,       0.f,       0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.285714f, 0.315789f, 0.222222f, 0.111111f, 0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,       0.f,       0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,       0.f,       0.f,       0.f,       0.f},
+    {0.f, 0.f, 0.f,       0.f,       0.f,       0.f,       0.f, 0.f, 0.f,
+     0.f, 0.f, 0.315789f, 0.333333f, 0.222222f, 0.111111f, 0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,       0.f,       0.f,       0.f,       0.f, 0.f, 0.f,
+     0.f, 0.f, 0.f,       0.f,       0.f,       0.f},
+    {0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,       0.f, 0.f,
+     0.f, 0.f, 0.f, 0.333333f, 0.333333f, 0.222222f, 0.111111f, 0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,       0.f, 0.f,
+     0.f, 0.f, 0.f, 0.f,       0.f,       0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.333333f, 0.333333f, 0.222222f, 0.111111f, 0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f,       0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.333333f, 0.333333f, 0.222222f, 0.111111f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,
+     0.f,       0.f, 0.f, 0.f, 0.f, 0.f, 0.333333f, 0.333333f, 0.222222f,
+     0.108108f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,
+     0.f,       0.f, 0.f, 0.f, 0.f, 0.f},
+    {0.f,       0.f,       0.f,        0.f, 0.f, 0.f, 0.f, 0.f,       0.f,
+     0.f,       0.f,       0.f,        0.f, 0.f, 0.f, 0.f, 0.333333f, 0.333333f,
+     0.243243f, 0.153846f, 0.0833333f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,
+     0.f,       0.f,       0.f,        0.f, 0.f, 0.f},
+    {0.f,       0.f,       0.f,       0.f,        0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,       0.f,       0.f,       0.f,        0.f, 0.f, 0.f, 0.f, 0.333333f,
+     0.324324f, 0.230769f, 0.166667f, 0.0909091f, 0.f, 0.f, 0.f, 0.f, 0.f,
+     0.f,       0.f,       0.f,       0.f,        0.f, 0.f},
+    {0.f,       0.f,       0.f,   0.f,       0.f,        0.f, 0.f, 0.f, 0.f,
+     0.f,       0.f,       0.f,   0.f,       0.f,        0.f, 0.f, 0.f, 0.f,
+     0.324324f, 0.307692f, 0.25f, 0.181818f, 0.0833333f, 0.f, 0.f, 0.f, 0.f,
+     0.f,       0.f,       0.f,   0.f,       0.f,        0.f},
+    {0.f,       0.f,   0.f,       0.f,        0.f, 0.f,       0.f,
+     0.f,       0.f,   0.f,       0.f,        0.f, 0.f,       0.f,
+     0.f,       0.f,   0.f,       0.f,        0.f, 0.307692f, 0.333333f,
+     0.363636f, 0.25f, 0.151515f, 0.0793651f, 0.f, 0.f,       0.f,
+     0.f,       0.f,   0.f,       0.f,        0.f},
+    {0.f,       0.f,       0.f,        0.f,       0.f,       0.f,
+     0.f,       0.f,       0.f,        0.f,       0.f,       0.f,
+     0.f,       0.f,       0.f,        0.f,       0.f,       0.f,
+     0.f,       0.f,       0.166667f,  0.363636f, 0.333333f, 0.242424f,
+     0.190476f, 0.133333f, 0.0689655f, 0.f,       0.f,       0.f,
+     0.f,       0.f,       0.f},
+    {0.f,        0.f, 0.f, 0.f, 0.f,       0.f,      0.f,       0.f,  0.f,
+     0.f,        0.f, 0.f, 0.f, 0.f,       0.f,      0.f,       0.f,  0.f,
+     0.f,        0.f, 0.f, 0.f, 0.333333f, 0.30303f, 0.253968f, 0.2f, 0.137931f,
+     0.0714286f, 0.f, 0.f, 0.f, 0.f,       0.f},
+    {0.f,    0.f,        0.f,      0.f,      0.f,       0.f,       0.f,
+     0.f,    0.f,        0.f,      0.f,      0.f,       0.f,       0.f,
+     0.f,    0.f,        0.f,      0.f,      0.f,       0.f,       0.f,
+     0.f,    0.f,        0.30303f, 0.31746f, 0.333333f, 0.275862f, 0.214286f,
+     0.125f, 0.0655738f, 0.f,      0.f,      0.f},
+    {0.f,   0.f,       0.f,       0.f,        0.f,       0.f,       0.f,
+     0.f,   0.f,       0.f,       0.f,        0.f,       0.f,       0.f,
+     0.f,   0.f,       0.f,       0.f,        0.f,       0.f,       0.f,
+     0.f,   0.f,       0.f,       0.15873f,   0.333333f, 0.344828f, 0.357143f,
+     0.25f, 0.196721f, 0.137931f, 0.0816327f, 0.f},
+    {0.f,     0.f,       0.f,       0.f,       0.f, 0.f,       0.f,
+     0.f,     0.f,       0.f,       0.f,       0.f, 0.f,       0.f,
+     0.f,     0.f,       0.f,       0.f,       0.f, 0.f,       0.f,
+     0.f,     0.f,       0.f,       0.f,       0.f, 0.172414f, 0.357143f,
+     0.3125f, 0.245902f, 0.172414f, 0.102041f, 0.f},
+    {0.f, 0.f,     0.f,       0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,     0.f,       0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.f,     0.f,       0.f,       0.f,       0.f, 0.f, 0.f, 0.f,
+     0.f, 0.3125f, 0.327869f, 0.344828f, 0.204082f, 0.f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,       0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.163934f, 0.344828f, 0.408163f, 0.5f},
+    {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,       0.f,
+     0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.204082f, 0.5f}};
+static_assert(arraysize(kTestCenterFreqs) == arraysize(kTestFilterBank),
+              "Test filterbank badly initialized.");
+
+// Target output for gain solving test. Generated with matlab.
+const size_t kTestStartFreq = 12;  // Lowest integral frequency for ERBs.
+const float kTestZeroVar = 1.f;
+const float kTestNonZeroVarLambdaTop[] = {
+    1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, 0.f,
+    0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+    0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
+static_assert(arraysize(kTestCenterFreqs) ==
+                  arraysize(kTestNonZeroVarLambdaTop),
+              "Power test data badly initialized.");
+const float kMaxTestError = 0.005f;
+
+// Enhancer initialization parameters.
+const int kSamples = 10000;
+const int kSampleRate = 4000;
+const int kNumChannels = 1;
+const int kFragmentSize = kSampleRate / 100;
+const size_t kNumNoiseBins = 129;
+const size_t kNumBands = 1;
+
+// Number of frames to process in the bitexactness tests.
+const size_t kNumFramesToProcess = 1000;
+
+int IntelligibilityEnhancerSampleRate(int sample_rate_hz) {
+  return (sample_rate_hz > AudioProcessing::kSampleRate16kHz
+              ? AudioProcessing::kSampleRate16kHz
+              : sample_rate_hz);
+}
+
+// Process one frame of data and produce the output.
+void ProcessOneFrame(int sample_rate_hz,
+                     AudioBuffer* render_audio_buffer,
+                     AudioBuffer* capture_audio_buffer,
+                     NoiseSuppressionImpl* noise_suppressor,
+                     IntelligibilityEnhancer* intelligibility_enhancer) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    render_audio_buffer->SplitIntoFrequencyBands();
+    capture_audio_buffer->SplitIntoFrequencyBands();
+  }
+
+  intelligibility_enhancer->ProcessRenderAudio(render_audio_buffer);
+
+  noise_suppressor->AnalyzeCaptureAudio(capture_audio_buffer);
+  noise_suppressor->ProcessCaptureAudio(capture_audio_buffer);
+
+  intelligibility_enhancer->SetCaptureNoiseEstimate(
+      noise_suppressor->NoiseEstimate(), 0);
+
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    render_audio_buffer->MergeFrequencyBands();
+  }
+}
+
+// Processes a specified amount of frames, verifies the results and reports
+// any errors.
+void RunBitexactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         rtc::ArrayView<const float> output_reference) {
+  const StreamConfig render_config(sample_rate_hz, num_channels, false);
+  AudioBuffer render_buffer(
+      render_config.num_frames(), render_config.num_channels(),
+      render_config.num_frames(), render_config.num_channels(),
+      render_config.num_frames());
+  test::InputAudioFile render_file(
+      test::GetApmRenderTestVectorFileName(sample_rate_hz));
+  std::vector<float> render_input(render_buffer.num_frames() *
+                                  render_buffer.num_channels());
+
+  const StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(render_buffer.num_frames() *
+                                   capture_buffer.num_channels());
+
+  rtc::CriticalSection crit_capture;
+  NoiseSuppressionImpl noise_suppressor(&crit_capture);
+  noise_suppressor.Initialize(capture_config.num_channels(), sample_rate_hz);
+  noise_suppressor.Enable(true);
+
+  IntelligibilityEnhancer intelligibility_enhancer(
+      IntelligibilityEnhancerSampleRate(sample_rate_hz),
+      render_config.num_channels(), kNumBands,
+      NoiseSuppressionImpl::num_noise_bins());
+
+  for (size_t frame_no = 0u; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(render_buffer.num_frames(),
+                                   render_buffer.num_channels(), &render_file,
+                                   render_input);
+    ReadFloatSamplesFromStereoFile(capture_buffer.num_frames(),
+                                   capture_buffer.num_channels(), &capture_file,
+                                   capture_input);
+
+    test::CopyVectorToAudioBuffer(render_config, render_input, &render_buffer);
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, &render_buffer, &capture_buffer,
+                    &noise_suppressor, &intelligibility_enhancer);
+  }
+
+  // Extract and verify the test results.
+  std::vector<float> render_output;
+  test::ExtractVectorFromAudioBuffer(render_config, &render_buffer,
+                                     &render_output);
+
+  const float kElementErrorBound = 1.f / static_cast<float>(1 << 15);
+
+  // Compare the output with the reference. Only the first values of the output
+  // from last frame processed are compared in order not having to specify all
+  // preceeding frames as testvectors. As the algorithm being tested has a
+  // memory, testing only the last frame implicitly also tests the preceeding
+  // frames.
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      render_buffer.num_frames(), render_config.num_channels(),
+      output_reference, render_output, kElementErrorBound));
+}
+
+float float_rand() {
+  return std::rand() * 2.f / RAND_MAX - 1;
+}
+
+}  // namespace
+
+class IntelligibilityEnhancerTest : public ::testing::Test {
+ protected:
+  IntelligibilityEnhancerTest()
+      : clear_buffer_(kFragmentSize,
+                      kNumChannels,
+                      kFragmentSize,
+                      kNumChannels,
+                      kFragmentSize),
+        stream_config_(kSampleRate, kNumChannels),
+        clear_data_(kSamples),
+        noise_data_(kNumNoiseBins),
+        orig_data_(kSamples) {
+    std::srand(1);
+    enh_.reset(new IntelligibilityEnhancer(kSampleRate, kNumChannels, kNumBands,
+                                           kNumNoiseBins));
+  }
+
+  bool CheckUpdate() {
+    enh_.reset(new IntelligibilityEnhancer(kSampleRate, kNumChannels, kNumBands,
+                                           kNumNoiseBins));
+    float* clear_cursor = clear_data_.data();
+    for (int i = 0; i < kSamples; i += kFragmentSize) {
+      enh_->SetCaptureNoiseEstimate(noise_data_, 1);
+      clear_buffer_.CopyFrom(&clear_cursor, stream_config_);
+      enh_->ProcessRenderAudio(&clear_buffer_);
+      clear_buffer_.CopyTo(stream_config_, &clear_cursor);
+      clear_cursor += kFragmentSize;
+    }
+    for (int i = initial_delay_; i < kSamples; i++) {
+      if (std::fabs(clear_data_[i] - orig_data_[i - initial_delay_]) >
+          kMaxTestError) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  std::unique_ptr<IntelligibilityEnhancer> enh_;
+  // Render clean speech buffer.
+  AudioBuffer clear_buffer_;
+  StreamConfig stream_config_;
+  std::vector<float> clear_data_;
+  std::vector<float> noise_data_;
+  std::vector<float> orig_data_;
+  size_t initial_delay_;
+};
+
+// For each class of generated data, tests that render stream is updated when
+// it should be.
+TEST_F(IntelligibilityEnhancerTest, TestRenderUpdate) {
+  initial_delay_ = enh_->render_mangler_->initial_delay();
+  std::fill(noise_data_.begin(), noise_data_.end(), 0.f);
+  std::fill(orig_data_.begin(), orig_data_.end(), 0.f);
+  std::fill(clear_data_.begin(), clear_data_.end(), 0.f);
+  EXPECT_FALSE(CheckUpdate());
+  std::generate(clear_data_.begin(), clear_data_.end(), float_rand);
+  orig_data_ = clear_data_;
+  EXPECT_FALSE(CheckUpdate());
+  std::generate(clear_data_.begin(), clear_data_.end(), float_rand);
+  orig_data_ = clear_data_;
+  std::generate(noise_data_.begin(), noise_data_.end(), float_rand);
+  FloatToFloatS16(noise_data_.data(), noise_data_.size(), noise_data_.data());
+  EXPECT_TRUE(CheckUpdate());
+}
+
+// Tests ERB bank creation, comparing against matlab output.
+TEST_F(IntelligibilityEnhancerTest, TestErbCreation) {
+  ASSERT_EQ(arraysize(kTestCenterFreqs), enh_->bank_size_);
+  for (size_t i = 0; i < enh_->bank_size_; ++i) {
+    EXPECT_NEAR(kTestCenterFreqs[i], enh_->center_freqs_[i], kMaxTestError);
+    ASSERT_EQ(arraysize(kTestFilterBank[0]), enh_->freqs_);
+    for (size_t j = 0; j < enh_->freqs_; ++j) {
+      EXPECT_NEAR(kTestFilterBank[i][j], enh_->render_filter_bank_[i][j],
+                  kMaxTestError);
+    }
+  }
+}
+
+// Tests analytic solution for optimal gains, comparing
+// against matlab output.
+TEST_F(IntelligibilityEnhancerTest, TestSolveForGains) {
+  ASSERT_EQ(kTestStartFreq, enh_->start_freq_);
+  std::vector<float> sols(enh_->bank_size_);
+  float lambda = -0.001f;
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
+    enh_->filtered_clear_pow_[i] = 0.f;
+    enh_->filtered_noise_pow_[i] = 0.f;
+  }
+  enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, sols.data());
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
+    EXPECT_NEAR(kTestZeroVar, sols[i], kMaxTestError);
+  }
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
+    enh_->filtered_clear_pow_[i] = static_cast<float>(i + 1);
+    enh_->filtered_noise_pow_[i] = static_cast<float>(enh_->bank_size_ - i);
+  }
+  enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, sols.data());
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
+    EXPECT_NEAR(kTestNonZeroVarLambdaTop[i], sols[i], kMaxTestError);
+  }
+  lambda = -1.f;
+  enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, sols.data());
+  for (size_t i = 0; i < enh_->bank_size_; i++) {
+    EXPECT_NEAR(kTestNonZeroVarLambdaTop[i], sols[i], kMaxTestError);
+  }
+}
+
+TEST_F(IntelligibilityEnhancerTest, TestNoiseGainHasExpectedResult) {
+  const float kGain = 2.f;
+  const float kTolerance = 0.007f;
+  std::vector<float> noise(kNumNoiseBins);
+  std::vector<float> noise_psd(kNumNoiseBins);
+  std::generate(noise.begin(), noise.end(), float_rand);
+  for (size_t i = 0; i < kNumNoiseBins; ++i) {
+    noise_psd[i] = kGain * kGain * noise[i] * noise[i];
+  }
+  float* clear_cursor = clear_data_.data();
+  for (size_t i = 0; i < kNumFramesToProcess; ++i) {
+    enh_->SetCaptureNoiseEstimate(noise, kGain);
+    clear_buffer_.CopyFrom(&clear_cursor, stream_config_);
+    enh_->ProcessRenderAudio(&clear_buffer_);
+  }
+  const std::vector<float>& estimated_psd =
+      enh_->noise_power_estimator_.power();
+  for (size_t i = 0; i < kNumNoiseBins; ++i) {
+    EXPECT_LT(std::abs(estimated_psd[i] - noise_psd[i]) / noise_psd[i],
+              kTolerance);
+  }
+}
+
+TEST_F(IntelligibilityEnhancerTest, TestAllBandsHaveSameDelay) {
+  const int kTestSampleRate = AudioProcessing::kSampleRate32kHz;
+  const int kTestSplitRate = AudioProcessing::kSampleRate16kHz;
+  const size_t kTestNumBands =
+      rtc::CheckedDivExact(kTestSampleRate, kTestSplitRate);
+  const size_t kTestFragmentSize = rtc::CheckedDivExact(kTestSampleRate, 100);
+  const size_t kTestSplitFragmentSize =
+      rtc::CheckedDivExact(kTestSplitRate, 100);
+  enh_.reset(new IntelligibilityEnhancer(kTestSplitRate, kNumChannels,
+                                         kTestNumBands, kNumNoiseBins));
+  size_t initial_delay = enh_->render_mangler_->initial_delay();
+  std::vector<float> rand_gen_buf(kTestFragmentSize);
+  AudioBuffer original_buffer(kTestFragmentSize, kNumChannels,
+                              kTestFragmentSize, kNumChannels,
+                              kTestFragmentSize);
+  AudioBuffer audio_buffer(kTestFragmentSize, kNumChannels, kTestFragmentSize,
+                           kNumChannels, kTestFragmentSize);
+  for (size_t i = 0u; i < kTestNumBands; ++i) {
+    std::generate(rand_gen_buf.begin(), rand_gen_buf.end(), float_rand);
+    original_buffer.split_data_f()->SetDataForTesting(rand_gen_buf.data(),
+                                                      rand_gen_buf.size());
+    audio_buffer.split_data_f()->SetDataForTesting(rand_gen_buf.data(),
+                                                   rand_gen_buf.size());
+  }
+  enh_->ProcessRenderAudio(&audio_buffer);
+  for (size_t i = 0u; i < kTestNumBands; ++i) {
+    const float* original_ptr = original_buffer.split_bands_const_f(0)[i];
+    const float* audio_ptr = audio_buffer.split_bands_const_f(0)[i];
+    for (size_t j = initial_delay; j < kTestSplitFragmentSize; ++j) {
+      EXPECT_LT(std::fabs(original_ptr[j - initial_delay] - audio_ptr[j]),
+                kMaxTestError);
+    }
+  }
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Mono8kHz) {
+  const float kOutputReference[] = {-0.001892f, -0.003296f, -0.001953f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate8kHz, 1, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Mono16kHz) {
+  const float kOutputReference[] = {-0.000977f, -0.003296f, -0.002441f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate16kHz, 1, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Mono32kHz) {
+  const float kOutputReference[] = {0.003021f, -0.011780f, -0.008209f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate32kHz, 1, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Mono48kHz) {
+  const float kOutputReference[] = {-0.027696f, -0.026253f, -0.018001f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate48kHz, 1, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Stereo8kHz) {
+  const float kOutputReference[] = {0.021454f,  0.035919f, 0.026428f,
+                                    -0.000641f, 0.000366f, 0.000641f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate8kHz, 2, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Stereo16kHz) {
+  const float kOutputReference[] = {0.021362f,  0.035736f,  0.023895f,
+                                    -0.001404f, -0.001465f, 0.000549f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate16kHz, 2, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Stereo32kHz) {
+  const float kOutputReference[] = {0.030641f,  0.027406f,  0.028321f,
+                                    -0.001343f, -0.004578f, 0.000977f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate32kHz, 2, kOutputReference);
+}
+
+TEST(IntelligibilityEnhancerBitExactnessTest, DISABLED_Stereo48kHz) {
+  const float kOutputReference[] = {-0.009276f, -0.001601f, -0.008255f,
+                                    -0.012975f, -0.015940f, -0.017820f};
+
+  RunBitexactnessTest(AudioProcessing::kSampleRate48kHz, 2, kOutputReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/intelligibility/intelligibility_utils.cc b/modules/audio_processing/intelligibility/intelligibility_utils.cc
new file mode 100644
index 0000000..b6917f4
--- /dev/null
+++ b/modules/audio_processing/intelligibility/intelligibility_utils.cc
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/intelligibility/intelligibility_utils.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+#include <limits>
+
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace intelligibility {
+
+namespace {
+
+const float kMinFactor = 0.01f;
+const float kMaxFactor = 100.f;
+
+// Return |current| changed towards |target|, with the relative change being at
+// most |limit|.
+float UpdateFactor(float target, float current, float limit) {
+  const float gain = target / (current + std::numeric_limits<float>::epsilon());
+  const float clamped_gain = rtc::SafeClamp(gain, 1 - limit, 1 + limit);
+  return rtc::SafeClamp(current * clamped_gain, kMinFactor, kMaxFactor);
+}
+
+}  // namespace
+
+template<typename T>
+PowerEstimator<T>::PowerEstimator(size_t num_freqs, float decay)
+    : power_(num_freqs, 0.f), decay_(decay) {}
+
+template<typename T>
+void PowerEstimator<T>::Step(const T* data) {
+  for (size_t i = 0; i < power_.size(); ++i) {
+    power_[i] = decay_ * power_[i] +
+                (1.f - decay_) * std::abs(data[i]) * std::abs(data[i]);
+  }
+}
+
+template class PowerEstimator<float>;
+template class PowerEstimator<std::complex<float>>;
+
+GainApplier::GainApplier(size_t freqs, float relative_change_limit)
+    : num_freqs_(freqs),
+      relative_change_limit_(relative_change_limit),
+      target_(freqs, 1.f),
+      current_(freqs, 1.f) {}
+
+GainApplier::~GainApplier() {}
+
+void GainApplier::Apply(const std::complex<float>* in_block,
+                        std::complex<float>* out_block) {
+  for (size_t i = 0; i < num_freqs_; ++i) {
+    current_[i] = UpdateFactor(target_[i], current_[i], relative_change_limit_);
+    out_block[i] = sqrtf(fabsf(current_[i])) * in_block[i];
+  }
+}
+
+DelayBuffer::DelayBuffer(size_t delay, size_t num_channels)
+    : buffer_(num_channels, std::vector<float>(delay, 0.f)), read_index_(0u) {}
+
+DelayBuffer::~DelayBuffer() {}
+
+void DelayBuffer::Delay(float* const* data, size_t length) {
+  size_t sample_index = read_index_;
+  for (size_t i = 0u; i < buffer_.size(); ++i) {
+    sample_index = read_index_;
+    for (size_t j = 0u; j < length; ++j) {
+      float swap = data[i][j];
+      data[i][j] = buffer_[i][sample_index];
+      buffer_[i][sample_index] = swap;
+      if (++sample_index == buffer_.size()) {
+        sample_index = 0u;
+      }
+    }
+  }
+  read_index_ = sample_index;
+}
+
+}  // namespace intelligibility
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/intelligibility/intelligibility_utils.h b/modules/audio_processing/intelligibility/intelligibility_utils.h
new file mode 100644
index 0000000..4dc17d5
--- /dev/null
+++ b/modules/audio_processing/intelligibility/intelligibility_utils.h
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
+#define MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
+
+#include <complex>
+#include <vector>
+
+namespace webrtc {
+
+namespace intelligibility {
+
+// Internal helper for computing the power of a stream of arrays.
+// The result is an array of power per position: the i-th power is the power of
+// the stream of data on the i-th positions in the input arrays.
+template <typename T>
+class PowerEstimator {
+ public:
+  // Construct an instance for the given input array length (|freqs|), with the
+  // appropriate parameters. |decay| is the forgetting factor.
+  PowerEstimator(size_t freqs, float decay);
+
+  // Add a new data point to the series.
+  void Step(const T* data);
+
+  // The current power array.
+  const std::vector<float>& power() { return power_; };
+
+ private:
+  // The current power array.
+  std::vector<float> power_;
+
+  const float decay_;
+};
+
+// Helper class for smoothing gain changes. On each application step, the
+// currently used gains are changed towards a set of settable target gains,
+// constrained by a limit on the relative changes.
+class GainApplier {
+ public:
+  GainApplier(size_t freqs, float relative_change_limit);
+
+  ~GainApplier();
+
+  // Copy |in_block| to |out_block|, multiplied by the current set of gains,
+  // and step the current set of gains towards the target set.
+  void Apply(const std::complex<float>* in_block,
+             std::complex<float>* out_block);
+
+  // Return the current target gain set. Modify this array to set the targets.
+  float* target() { return target_.data(); }
+
+ private:
+  const size_t num_freqs_;
+  const float relative_change_limit_;
+  std::vector<float> target_;
+  std::vector<float> current_;
+};
+
+// Helper class to delay a signal by an integer number of samples.
+class DelayBuffer {
+ public:
+  DelayBuffer(size_t delay, size_t num_channels);
+
+  ~DelayBuffer();
+
+  void Delay(float* const* data, size_t length);
+
+ private:
+  std::vector<std::vector<float>> buffer_;
+  size_t read_index_;
+};
+
+}  // namespace intelligibility
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
diff --git a/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc b/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc
new file mode 100644
index 0000000..fea394c
--- /dev/null
+++ b/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cmath>
+#include <complex>
+#include <vector>
+
+#include "modules/audio_processing/intelligibility/intelligibility_utils.h"
+#include "rtc_base/arraysize.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace intelligibility {
+
+std::vector<std::vector<std::complex<float>>> GenerateTestData(size_t freqs,
+                                                               size_t samples) {
+  std::vector<std::vector<std::complex<float>>> data(samples);
+  for (size_t i = 0; i < samples; ++i) {
+    for (size_t j = 0; j < freqs; ++j) {
+      const float val = 0.99f / ((i + 1) * (j + 1));
+      data[i].push_back(std::complex<float>(val, val));
+    }
+  }
+  return data;
+}
+
+// Tests PowerEstimator, for all power step types.
+TEST(IntelligibilityUtilsTest, TestPowerEstimator) {
+  const size_t kFreqs = 10;
+  const size_t kSamples = 100;
+  const float kDecay = 0.5f;
+  const std::vector<std::vector<std::complex<float>>> test_data(
+      GenerateTestData(kFreqs, kSamples));
+  PowerEstimator<std::complex<float>> power_estimator(kFreqs, kDecay);
+  EXPECT_EQ(0, power_estimator.power()[0]);
+
+  // Makes sure Step is doing something.
+  power_estimator.Step(test_data[0].data());
+  for (size_t i = 1; i < kSamples; ++i) {
+    power_estimator.Step(test_data[i].data());
+    for (size_t j = 0; j < kFreqs; ++j) {
+      EXPECT_GE(power_estimator.power()[j], 0.f);
+      EXPECT_LE(power_estimator.power()[j], 1.f);
+    }
+  }
+}
+
+// Tests gain applier.
+TEST(IntelligibilityUtilsTest, TestGainApplier) {
+  const size_t kFreqs = 10;
+  const size_t kSamples = 100;
+  const float kChangeLimit = 0.1f;
+  GainApplier gain_applier(kFreqs, kChangeLimit);
+  const std::vector<std::vector<std::complex<float>>> in_data(
+      GenerateTestData(kFreqs, kSamples));
+  std::vector<std::vector<std::complex<float>>> out_data(
+      GenerateTestData(kFreqs, kSamples));
+  for (size_t i = 0; i < kSamples; ++i) {
+    gain_applier.Apply(in_data[i].data(), out_data[i].data());
+    for (size_t j = 0; j < kFreqs; ++j) {
+      EXPECT_GT(out_data[i][j].real(), 0.f);
+      EXPECT_LT(out_data[i][j].real(), 1.f);
+      EXPECT_GT(out_data[i][j].imag(), 0.f);
+      EXPECT_LT(out_data[i][j].imag(), 1.f);
+    }
+  }
+}
+
+}  // namespace intelligibility
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/level_estimator_impl.cc b/modules/audio_processing/level_estimator_impl.cc
new file mode 100644
index 0000000..c937f84
--- /dev/null
+++ b/modules/audio_processing/level_estimator_impl.cc
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/level_estimator_impl.h"
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/rms_level.h"
+
+namespace webrtc {
+
+LevelEstimatorImpl::LevelEstimatorImpl(rtc::CriticalSection* crit)
+    : crit_(crit), rms_(new RmsLevel()) {
+  RTC_DCHECK(crit);
+}
+
+LevelEstimatorImpl::~LevelEstimatorImpl() {}
+
+void LevelEstimatorImpl::Initialize() {
+  rtc::CritScope cs(crit_);
+  rms_->Reset();
+}
+
+void LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
+  RTC_DCHECK(audio);
+  rtc::CritScope cs(crit_);
+  if (!enabled_) {
+    return;
+  }
+
+  for (size_t i = 0; i < audio->num_channels(); i++) {
+    rms_->Analyze(rtc::ArrayView<const int16_t>(audio->channels_const()[i],
+                                                audio->num_frames()));
+  }
+}
+
+int LevelEstimatorImpl::Enable(bool enable) {
+  rtc::CritScope cs(crit_);
+  if (enable && !enabled_) {
+    rms_->Reset();
+  }
+  enabled_ = enable;
+  return AudioProcessing::kNoError;
+}
+
+bool LevelEstimatorImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
+  return enabled_;
+}
+
+int LevelEstimatorImpl::RMS() {
+  rtc::CritScope cs(crit_);
+  if (!enabled_) {
+    return AudioProcessing::kNotEnabledError;
+  }
+
+  return rms_->Average();
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/level_estimator_impl.h b/modules/audio_processing/level_estimator_impl.h
new file mode 100644
index 0000000..901ae4c
--- /dev/null
+++ b/modules/audio_processing/level_estimator_impl.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+class RmsLevel;
+
+class LevelEstimatorImpl : public LevelEstimator {
+ public:
+  explicit LevelEstimatorImpl(rtc::CriticalSection* crit);
+  ~LevelEstimatorImpl() override;
+
+  // TODO(peah): Fold into ctor, once public API is removed.
+  void Initialize();
+  void ProcessStream(AudioBuffer* audio);
+
+  // LevelEstimator implementation.
+  int Enable(bool enable) override;
+  bool is_enabled() const override;
+  int RMS() override;
+
+ private:
+  rtc::CriticalSection* const crit_ = nullptr;
+  bool enabled_ RTC_GUARDED_BY(crit_) = false;
+  std::unique_ptr<RmsLevel> rms_ RTC_GUARDED_BY(crit_);
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LevelEstimatorImpl);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
diff --git a/modules/audio_processing/level_estimator_unittest.cc b/modules/audio_processing/level_estimator_unittest.cc
new file mode 100644
index 0000000..94b84bb
--- /dev/null
+++ b/modules/audio_processing/level_estimator_unittest.cc
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/level_estimator_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kNumFramesToProcess = 1000;
+
+// Processes a specified amount of frames, verifies the results and reports
+// any errors.
+void RunBitexactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         int rms_reference) {
+  rtc::CriticalSection crit_capture;
+  LevelEstimatorImpl level_estimator(&crit_capture);
+  level_estimator.Initialize();
+  level_estimator.Enable(true);
+
+  int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+  StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames());
+
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(samples_per_channel * num_channels);
+  for (size_t frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &capture_file, capture_input);
+
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    level_estimator.ProcessStream(&capture_buffer);
+  }
+
+  // Extract test results.
+  int rms = level_estimator.RMS();
+
+  // Compare the output to the reference.
+  EXPECT_EQ(rms_reference, rms);
+}
+
+}  // namespace
+
+TEST(LevelEstimatorBitExactnessTest, Mono8kHz) {
+  const int kRmsReference = 31;
+
+  RunBitexactnessTest(8000, 1, kRmsReference);
+}
+
+TEST(LevelEstimatorBitExactnessTest, Mono16kHz) {
+  const int kRmsReference = 31;
+
+  RunBitexactnessTest(16000, 1, kRmsReference);
+}
+
+TEST(LevelEstimatorBitExactnessTest, Mono32kHz) {
+  const int kRmsReference = 31;
+
+  RunBitexactnessTest(32000, 1, kRmsReference);
+}
+
+TEST(LevelEstimatorBitExactnessTest, Mono48kHz) {
+  const int kRmsReference = 31;
+
+  RunBitexactnessTest(48000, 1, kRmsReference);
+}
+
+TEST(LevelEstimatorBitExactnessTest, Stereo16kHz) {
+  const int kRmsReference = 30;
+
+  RunBitexactnessTest(16000, 2, kRmsReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/logging/apm_data_dumper.cc b/modules/audio_processing/logging/apm_data_dumper.cc
new file mode 100644
index 0000000..2f6a6d6
--- /dev/null
+++ b/modules/audio_processing/logging/apm_data_dumper.cc
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+#include <sstream>
+
+#include "rtc_base/stringutils.h"
+
+// Check to verify that the define is properly set.
+#if !defined(WEBRTC_APM_DEBUG_DUMP) || \
+    (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1)
+#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1"
+#endif
+
+namespace webrtc {
+
+namespace {
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+std::string FormFileName(const char* name,
+                         int instance_index,
+                         int reinit_index,
+                         const std::string& suffix) {
+  std::stringstream ss;
+  ss << name << "_" << instance_index << "-" << reinit_index << suffix;
+  return ss.str();
+}
+#endif
+
+}  // namespace
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+ApmDataDumper::ApmDataDumper(int instance_index)
+    : instance_index_(instance_index) {}
+#else
+ApmDataDumper::ApmDataDumper(int instance_index) {}
+#endif
+
+ApmDataDumper::~ApmDataDumper() {}
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+FILE* ApmDataDumper::GetRawFile(const char* name) {
+  std::string filename =
+      FormFileName(name, instance_index_, recording_set_index_, ".dat");
+  auto& f = raw_files_[filename];
+  if (!f) {
+    f.reset(fopen(filename.c_str(), "wb"));
+  }
+  return f.get();
+}
+
+WavWriter* ApmDataDumper::GetWavFile(const char* name,
+                                     int sample_rate_hz,
+                                     int num_channels) {
+  std::string filename =
+      FormFileName(name, instance_index_, recording_set_index_, ".wav");
+  auto& f = wav_files_[filename];
+  if (!f) {
+    f.reset(new WavWriter(filename.c_str(), sample_rate_hz, num_channels));
+  }
+  return f.get();
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/logging/apm_data_dumper.h b/modules/audio_processing/logging/apm_data_dumper.h
new file mode 100644
index 0000000..d045027
--- /dev/null
+++ b/modules/audio_processing/logging/apm_data_dumper.h
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_
+#define MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+#include "api/array_view.h"
+#include "common_audio/wav_file.h"
+#include "rtc_base/constructormagic.h"
+
+// Check to verify that the define is properly set.
+#if !defined(WEBRTC_APM_DEBUG_DUMP) || \
+    (WEBRTC_APM_DEBUG_DUMP != 0 && WEBRTC_APM_DEBUG_DUMP != 1)
+#error "Set WEBRTC_APM_DEBUG_DUMP to either 0 or 1"
+#endif
+
+namespace webrtc {
+
+#if WEBRTC_APM_DEBUG_DUMP == 1
+// Functor used to use as a custom deleter in the map of file pointers to raw
+// files.
+struct RawFileCloseFunctor {
+  void operator()(FILE* f) const { fclose(f); }
+};
+#endif
+
+// Class that handles dumping of variables into files.
+class ApmDataDumper {
+ public:
+  // Constructor that takes an instance index that may
+  // be used to distinguish data dumped from different
+  // instances of the code.
+  explicit ApmDataDumper(int instance_index);
+
+  ~ApmDataDumper();
+
+  // Reinitializes the data dumping such that new versions
+  // of all files being dumped to are created.
+  void InitiateNewSetOfRecordings() {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    ++recording_set_index_;
+#endif
+  }
+
+  // Methods for performing dumping of data of various types into
+  // various formats.
+  void DumpRaw(const char* name, double v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(&v, sizeof(v), 1, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v_length, const double* v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(v, sizeof(v[0]), v_length, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, rtc::ArrayView<const double> v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, v.size(), v.data());
+#endif
+  }
+
+  void DumpRaw(const char* name, float v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(&v, sizeof(v), 1, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v_length, const float* v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(v, sizeof(v[0]), v_length, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, rtc::ArrayView<const float> v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, v.size(), v.data());
+#endif
+  }
+
+  void DumpRaw(const char* name, bool v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, static_cast<int16_t>(v));
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v_length, const bool* v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    for (size_t k = 0; k < v_length; ++k) {
+      int16_t value = static_cast<int16_t>(v[k]);
+      fwrite(&value, sizeof(value), 1, file);
+    }
+#endif
+  }
+
+  void DumpRaw(const char* name, rtc::ArrayView<const bool> v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, v.size(), v.data());
+#endif
+  }
+
+  void DumpRaw(const char* name, int16_t v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(&v, sizeof(v), 1, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v_length, const int16_t* v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(v, sizeof(v[0]), v_length, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, rtc::ArrayView<const int16_t> v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, v.size(), v.data());
+#endif
+  }
+
+  void DumpRaw(const char* name, int32_t v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(&v, sizeof(v), 1, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v_length, const int32_t* v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(v, sizeof(v[0]), v_length, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(&v, sizeof(v), 1, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, size_t v_length, const size_t* v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    FILE* file = GetRawFile(name);
+    fwrite(v, sizeof(v[0]), v_length, file);
+#endif
+  }
+
+  void DumpRaw(const char* name, rtc::ArrayView<const int32_t> v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, v.size(), v.data());
+#endif
+  }
+
+  void DumpWav(const char* name,
+               size_t v_length,
+               const float* v,
+               int sample_rate_hz,
+               int num_channels) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    WavWriter* file = GetWavFile(name, sample_rate_hz, num_channels);
+    file->WriteSamples(v, v_length);
+#endif
+  }
+
+  void DumpWav(const char* name,
+               rtc::ArrayView<const float> v,
+               int sample_rate_hz,
+               int num_channels) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpWav(name, v.size(), v.data(), sample_rate_hz, num_channels);
+#endif
+  }
+
+ private:
+#if WEBRTC_APM_DEBUG_DUMP == 1
+  const int instance_index_;
+  int recording_set_index_ = 0;
+  std::unordered_map<std::string, std::unique_ptr<FILE, RawFileCloseFunctor>>
+      raw_files_;
+  std::unordered_map<std::string, std::unique_ptr<WavWriter>> wav_files_;
+
+  FILE* GetRawFile(const char* name);
+  WavWriter* GetWavFile(const char* name, int sample_rate_hz, int num_channels);
+#endif
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ApmDataDumper);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_LOGGING_APM_DATA_DUMPER_H_
diff --git a/modules/audio_processing/low_cut_filter.cc b/modules/audio_processing/low_cut_filter.cc
new file mode 100644
index 0000000..5245c68
--- /dev/null
+++ b/modules/audio_processing/low_cut_filter.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/low_cut_filter.h"
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/audio_buffer.h"
+
+namespace webrtc {
+namespace {
+const int16_t kFilterCoefficients8kHz[5] = {3798, -7596, 3798, 7807, -3733};
+const int16_t kFilterCoefficients[5] = {4012, -8024, 4012, 8002, -3913};
+}  // namespace
+
+class LowCutFilter::BiquadFilter {
+ public:
+  explicit BiquadFilter(int sample_rate_hz)
+      : ba_(sample_rate_hz == AudioProcessing::kSampleRate8kHz
+                ? kFilterCoefficients8kHz
+                : kFilterCoefficients) {
+    std::memset(x_, 0, sizeof(x_));
+    std::memset(y_, 0, sizeof(y_));
+  }
+
+  void Process(int16_t* data, size_t length) {
+    const int16_t* const ba = ba_;
+    int16_t* x = x_;
+    int16_t* y = y_;
+    int32_t tmp_int32 = 0;
+
+    for (size_t i = 0; i < length; i++) {
+      //  y[i] = b[0] * x[i] +  b[1] * x[i-1] +  b[2] * x[i-2]
+      //                     + -a[1] * y[i-1] + -a[2] * y[i-2];
+
+      tmp_int32 = y[1] * ba[3];   // -a[1] * y[i-1] (low part)
+      tmp_int32 += y[3] * ba[4];  // -a[2] * y[i-2] (low part)
+      tmp_int32 = (tmp_int32 >> 15);
+      tmp_int32 += y[0] * ba[3];  // -a[1] * y[i-1] (high part)
+      tmp_int32 += y[2] * ba[4];  // -a[2] * y[i-2] (high part)
+      tmp_int32 *= 2;
+
+      tmp_int32 += data[i] * ba[0];  // b[0] * x[0]
+      tmp_int32 += x[0] * ba[1];     // b[1] * x[i-1]
+      tmp_int32 += x[1] * ba[2];     // b[2] * x[i-2]
+
+      // Update state (input part).
+      x[1] = x[0];
+      x[0] = data[i];
+
+      // Update state (filtered part).
+      y[2] = y[0];
+      y[3] = y[1];
+      y[0] = static_cast<int16_t>(tmp_int32 >> 13);
+
+      y[1] = static_cast<int16_t>((tmp_int32 & 0x00001FFF) * 4);
+
+      // Rounding in Q12, i.e. add 2^11.
+      tmp_int32 += 2048;
+
+      // Saturate (to 2^27) so that the HP filtered signal does not overflow.
+      tmp_int32 = WEBRTC_SPL_SAT(static_cast<int32_t>(134217727), tmp_int32,
+                                 static_cast<int32_t>(-134217728));
+
+      // Convert back to Q0 and use rounding.
+      data[i] = static_cast<int16_t>(tmp_int32 >> 12);
+    }
+  }
+
+ private:
+  const int16_t* const ba_ = nullptr;
+  int16_t x_[2];
+  int16_t y_[4];
+};
+
+LowCutFilter::LowCutFilter(size_t channels, int sample_rate_hz) {
+  filters_.resize(channels);
+  for (size_t i = 0; i < channels; i++) {
+    filters_[i].reset(new BiquadFilter(sample_rate_hz));
+  }
+}
+
+LowCutFilter::~LowCutFilter() {}
+
+void LowCutFilter::Process(AudioBuffer* audio) {
+  RTC_DCHECK(audio);
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(filters_.size(), audio->num_channels());
+  for (size_t i = 0; i < filters_.size(); i++) {
+    filters_[i]->Process(audio->split_bands(i)[kBand0To8kHz],
+                         audio->num_frames_per_band());
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/low_cut_filter.h b/modules/audio_processing/low_cut_filter.h
new file mode 100644
index 0000000..fd4c6f1
--- /dev/null
+++ b/modules/audio_processing/low_cut_filter.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_LOW_CUT_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_LOW_CUT_FILTER_H_
+
+#include <memory>
+#include <vector>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class LowCutFilter {
+ public:
+  LowCutFilter(size_t channels, int sample_rate_hz);
+  ~LowCutFilter();
+  void Process(AudioBuffer* audio);
+
+ private:
+  class BiquadFilter;
+  std::vector<std::unique_ptr<BiquadFilter>> filters_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LowCutFilter);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_LOW_CUT_FILTER_H_
diff --git a/modules/audio_processing/low_cut_filter_unittest.cc b/modules/audio_processing/low_cut_filter_unittest.cc
new file mode 100644
index 0000000..d98d665
--- /dev/null
+++ b/modules/audio_processing/low_cut_filter_unittest.cc
@@ -0,0 +1,682 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/low_cut_filter.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+// Process one frame of data and produce the output.
+std::vector<float> ProcessOneFrame(const std::vector<float>& frame_input,
+                                   const StreamConfig& stream_config,
+                                   LowCutFilter* low_cut_filter) {
+  AudioBuffer audio_buffer(
+      stream_config.num_frames(), stream_config.num_channels(),
+      stream_config.num_frames(), stream_config.num_channels(),
+      stream_config.num_frames());
+
+  test::CopyVectorToAudioBuffer(stream_config, frame_input, &audio_buffer);
+  low_cut_filter->Process(&audio_buffer);
+  std::vector<float> frame_output;
+  test::ExtractVectorFromAudioBuffer(stream_config, &audio_buffer,
+                                     &frame_output);
+  return frame_output;
+}
+
+// Processes a specified amount of frames, verifies the results and reports
+// any errors.
+void RunBitexactnessTest(int sample_rate,
+                         int num_channels,
+                         const std::vector<float>& input,
+                         const std::vector<float>& reference) {
+  const StreamConfig stream_config(sample_rate, num_channels, false);
+  LowCutFilter low_cut_filter(num_channels, sample_rate);
+
+  std::vector<float> output;
+  const size_t num_frames_to_process =
+      input.size() /
+      (stream_config.num_frames() * stream_config.num_channels());
+  for (size_t frame_no = 0; frame_no < num_frames_to_process; ++frame_no) {
+    std::vector<float> frame_input(
+        input.begin() +
+            stream_config.num_frames() * stream_config.num_channels() *
+                frame_no,
+        input.begin() +
+            stream_config.num_frames() * stream_config.num_channels() *
+                (frame_no + 1));
+
+    output = ProcessOneFrame(frame_input, stream_config, &low_cut_filter);
+  }
+
+  // Form vector to compare the reference to. Only the last frame processed
+  // is compared in order not having to specify all preceeding frames as
+  // inputs. As the algorithm being tested has a memory, testing only
+  // the last frame implicitly also tests the preceeding frames.
+  const size_t reference_frame_length =
+      reference.size() / stream_config.num_channels();
+  std::vector<float> output_to_verify;
+  for (size_t channel_no = 0; channel_no < stream_config.num_channels();
+       ++channel_no) {
+    output_to_verify.insert(
+        output_to_verify.end(),
+        output.begin() + channel_no * stream_config.num_frames(),
+        output.begin() + channel_no * stream_config.num_frames() +
+            reference_frame_length);
+  }
+
+  const float kElementErrorBound = 1.0f / 32768.0f;
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      reference_frame_length, num_channels, reference, output_to_verify,
+      kElementErrorBound));
+}
+
+// Method for forming a vector out of an array.
+// TODO(peah): Remove once braced initialization is allowed.
+std::vector<float> CreateVector(const rtc::ArrayView<const float>& array_view) {
+  std::vector<float> v;
+  for (auto value : array_view) {
+    v.push_back(value);
+  }
+  return v;
+}
+}  // namespace
+
+TEST(LowCutFilterBitExactnessTest, Mono8kHzInitial) {
+  const float kReferenceInput[] = {
+      0.153442f,  -0.436920f, -0.057602f, -0.141767f, 0.108608f,  0.116834f,
+      0.114979f,  -0.103151f, -0.169925f, -0.167180f, 0.242024f,  -0.525426f,
+      -0.058781f, 0.076667f,  -0.185095f, 0.135319f,  -0.020223f, -0.266058f,
+      0.045755f,  -0.076044f, -0.116221f, -0.201698f, 0.017423f,  -0.523475f,
+      -0.112949f, -0.154125f, -0.258572f, 0.185075f,  -0.208205f, 0.153298f,
+      0.276703f,  -0.044481f, 0.078771f,  0.181337f,  -0.022962f, 0.153365f,
+      -0.358004f, 0.314864f,  -0.280593f, -0.518572f, 0.392579f,  -0.017786f,
+      0.127293f,  -0.103003f, -0.289389f, -0.871355f, 0.177583f,  -0.081290f,
+      -0.055957f, 0.115011f,  -0.402460f, -0.206836f, 0.325328f,  0.169526f,
+      -0.363311f, -0.624742f, -0.161979f, 0.060679f,  0.267214f,  0.026576f,
+      -0.318235f, 0.086812f,  -0.332419f, -0.272485f, -0.185369f, -0.348598f,
+      -0.076833f, -0.255184f, -0.081007f, -0.131121f, -0.116196f, -0.142780f,
+      0.349705f,  0.173054f,  0.016750f,  -0.415957f, -0.461001f, -0.557111f,
+      0.738711f,  0.275720f};
+
+  const float kReference[] = {0.142277f,  -0.418518f, -0.028229f, -0.102112f,
+                              0.141270f,  0.137791f,  0.124577f,  -0.088715f,
+                              -0.142273f, -0.125885f, 0.266640f,  -0.468079f};
+
+  RunBitexactnessTest(
+      8000, 1, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Mono8kHzConverged) {
+  const float kReferenceInput[] = {
+      0.153442f,  -0.436920f, -0.057602f, -0.141767f, 0.108608f,  0.116834f,
+      0.114979f,  -0.103151f, -0.169925f, -0.167180f, 0.242024f,  -0.525426f,
+      -0.058781f, 0.076667f,  -0.185095f, 0.135319f,  -0.020223f, -0.266058f,
+      0.045755f,  -0.076044f, -0.116221f, -0.201698f, 0.017423f,  -0.523475f,
+      -0.112949f, -0.154125f, -0.258572f, 0.185075f,  -0.208205f, 0.153298f,
+      0.276703f,  -0.044481f, 0.078771f,  0.181337f,  -0.022962f, 0.153365f,
+      -0.358004f, 0.314864f,  -0.280593f, -0.518572f, 0.392579f,  -0.017786f,
+      0.127293f,  -0.103003f, -0.289389f, -0.871355f, 0.177583f,  -0.081290f,
+      -0.055957f, 0.115011f,  -0.402460f, -0.206836f, 0.325328f,  0.169526f,
+      -0.363311f, -0.624742f, -0.161979f, 0.060679f,  0.267214f,  0.026576f,
+      -0.318235f, 0.086812f,  -0.332419f, -0.272485f, -0.185369f, -0.348598f,
+      -0.076833f, -0.255184f, -0.081007f, -0.131121f, -0.116196f, -0.142780f,
+      0.349705f,  0.173054f,  0.016750f,  -0.415957f, -0.461001f, -0.557111f,
+      0.738711f,  0.275720f,  0.072868f,  -0.276249f, -0.325055f, 0.155285f,
+      0.443784f,  -0.480153f, -0.127428f, -0.023901f, -0.564837f, 0.238538f,
+      -0.117578f, 0.542205f,  -0.110840f, 0.116025f,  -0.323939f, -0.177182f,
+      -0.331395f, 0.111316f,  0.369140f,  -0.168329f, 0.123736f,  -0.143013f,
+      0.028953f,  0.339200f,  0.034107f,  -0.294000f, -0.243034f, -0.048168f,
+      -0.054348f, -0.245504f, 0.051228f,  0.359128f,  -0.071220f, -0.058006f,
+      -0.624248f, -0.219615f, -0.395067f, -0.109518f, 0.149032f,  0.431928f,
+      0.509968f,  -0.033143f, -0.090793f, 0.231809f,  0.138986f,  0.216989f,
+      0.220683f,  -0.419745f, 0.153222f,  -0.025956f, -0.215572f, -0.196671f,
+      0.363361f,  -0.229604f, -0.350704f, 0.060875f,  0.570160f,  0.007246f,
+      0.087419f,  -0.266043f, 0.474729f,  0.035441f,  0.150312f,  -0.269962f,
+      0.242166f,  0.110343f,  -0.327788f, 0.011268f,  -0.127769f, 0.030978f,
+      -0.071045f, -0.053847f, -0.292886f, -0.091670f, 0.217351f,  0.494707f,
+      -0.329069f, 0.674122f,  0.432724f,  0.047781f,  -0.085408f, -0.198105f,
+      0.236135f,  -0.196957f, -0.130968f, 0.250552f,  0.123613f,  0.254275f,
+      0.143118f,  -0.113676f, -0.145703f, 0.225812f,  -0.190318f, 0.336481f,
+      0.224206f,  0.081584f,  0.000915f,  0.103672f,  1.000000f,  -0.031882f,
+      -0.441377f, 0.543033f,  0.172924f,  -0.183717f, 0.742153f,  0.156224f,
+      0.083422f,  -0.220560f, -0.301964f, -0.501439f, -0.119920f, -0.298610f,
+      0.183673f,  -0.090064f, 0.501603f,  0.428330f,  0.046506f,  -0.080178f,
+      0.326700f,  -0.325096f, 0.191029f,  -0.189729f, -0.113513f, -0.190492f,
+      0.163221f,  -0.220631f, -0.301576f, 0.156799f,  -0.120065f, 0.102529f,
+      -0.099779f, 0.076429f,  -0.727157f, 0.132097f,  0.525583f,  0.294694f,
+      0.258287f,  -0.067977f, 0.051323f,  0.069258f,  0.027332f,  -0.235482f,
+      -0.099882f, -0.049558f, -0.136291f, 0.237288f,  0.719757f,  -0.375235f,
+      0.036391f,  -0.408991f, 0.369330f,  0.399785f,  -0.471419f, 0.551138f,
+      -0.307569f, 0.064315f,  0.311605f,  0.041736f,  0.650943f,  0.780496f};
+
+  const float kReference[] = {-0.173553f, -0.265778f, 0.158757f,  -0.259399f,
+                              -0.176361f, 0.192877f,  0.056825f,  0.171453f,
+                              0.050752f,  -0.194580f, -0.208679f, 0.153722f};
+
+  RunBitexactnessTest(
+      8000, 1, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Stereo8kHzInitial) {
+  const float kReferenceInput[] = {
+      0.790847f,  0.165037f,  0.165494f,  0.709852f,  -0.930269f, 0.770840f,
+      -0.184538f, -0.927236f, 0.492296f,  -0.690342f, -0.712183f, 0.211918f,
+      -0.491038f, -0.351692f, -0.196418f, -0.187253f, -0.227618f, 0.219604f,
+      -0.666219f, -0.623816f, -0.810742f, -0.353627f, 0.539194f,  -0.531764f,
+      0.480731f,  0.385637f,  0.648156f,  0.655955f,  -0.413264f, -0.381262f,
+      0.046060f,  -0.349402f, 0.663685f,  0.620590f,  0.113997f,  -0.474072f,
+      0.361132f,  -0.532694f, -0.087149f, -0.230866f, 0.077203f,  0.983407f,
+      0.510441f,  0.960910f,  -0.530435f, 0.057118f,  -0.897128f, 0.513751f,
+      0.203960f,  0.714337f,  0.976554f,  0.858969f,  -0.180970f, -0.999317f,
+      0.081757f,  -0.584539f, -0.561433f, -0.348387f, -0.808101f, 0.495067f,
+      0.497018f,  0.086599f,  -0.323735f, 0.664667f,  0.105144f,  0.915086f,
+      0.785667f,  -0.286993f, 0.092804f,  -0.306636f, 0.245606f,  0.593249f,
+      0.491750f,  -0.748928f, 0.644788f,  -0.949699f, -0.171142f, 0.462815f,
+      0.562748f,  -0.265428f, 0.489736f,  0.784534f,  -0.514793f, -0.740806f,
+      -0.549864f, -0.299972f, -0.425831f, 0.854976f,  -0.897372f, 0.185334f,
+      -0.674202f, 0.676812f,  -0.664878f, 0.004401f,  0.998659f,  -0.289186f,
+      -0.905845f, -0.572679f, -0.204322f, -0.332664f, -0.540795f, 0.872240f,
+      0.366378f,  0.924228f,  -0.124054f, 0.880673f,  -0.988331f, 0.220614f,
+      0.602152f,  -0.534037f, 0.864937f,  0.526526f,  0.652899f,  0.146927f,
+      0.585163f,  -0.341918f, -0.553076f, -0.375227f, 0.169047f,  0.659828f,
+      -0.419075f, -0.194891f, 0.724115f,  0.229479f,  0.982376f,  -0.592602f,
+      0.654418f,  0.351723f,  -0.502101f, -0.048429f, -0.201850f, 0.198876f,
+      0.601046f,  -0.789862f, 0.642884f,  0.682173f,  -0.290988f, -0.139861f,
+      0.144478f,  0.401649f,  0.484940f,  0.515768f,  -0.221742f, -0.141395f,
+      0.912689f,  0.145943f,  0.699444f,  -0.447309f, 0.244647f,  0.176723f,
+      0.926937f,  -0.828195f, 0.000998f,  0.043179f,  -0.819668f, 0.809333f,
+      0.768778f,  -0.122021f, 0.563445f,  -0.703070f};
+
+  const float kReference[] = {
+      0.733329f,  0.084109f,  0.072695f,  0.566210f,  -1.000000f, 0.652120f,
+      -0.297424f, -0.964020f, 0.438551f,  -0.698364f, -0.654449f, 0.266243f,
+      0.454115f,  0.684774f,  -0.586823f, -0.747345f, -0.503021f, -0.222961f,
+      -0.314972f, 0.907224f,  -0.796265f, 0.284280f,  -0.533417f, 0.773980f};
+
+  RunBitexactnessTest(
+      8000, 2, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Stereo8kHzConverged) {
+  const float kReferenceInput[] = {
+      -0.502095f, -0.227154f, -0.137133f, 0.661773f,  0.649294f,  -0.094003f,
+      -0.238880f, 0.851737f,  0.481687f,  0.475266f,  0.893832f,  0.020199f,
+      0.583758f,  -0.095653f, 0.698397f,  -0.219138f, 0.476753f,  0.952877f,
+      0.046598f,  -0.140169f, -0.585684f, -0.353197f, -0.778260f, -0.249580f,
+      -0.340192f, -0.315790f, 0.634238f,  0.063371f,  0.042244f,  0.548619f,
+      -0.759474f, 0.250900f,  -0.306703f, -0.330761f, 0.149233f,  0.727875f,
+      -0.602874f, 0.344902f,  0.803663f,  -0.601686f, -0.403432f, -0.006959f,
+      0.779808f,  0.002829f,  -0.446010f, 0.067916f,  0.148499f,  -0.174391f,
+      -0.970473f, 0.405530f,  0.013494f,  -0.237468f, -0.870137f, -0.282840f,
+      -0.531498f, -0.592992f, 0.627559f,  -0.213131f, -0.892850f, -0.249897f,
+      0.549988f,  -0.669405f, 0.824438f,  -0.361588f, -0.340441f, -0.591529f,
+      0.534429f,  -0.860054f, 0.900068f,  -0.683580f, -0.427108f, 0.374258f,
+      -0.717700f, 0.024173f,  0.442654f,  0.857690f,  0.464208f,  0.499696f,
+      -0.185361f, -0.521017f, 0.041701f,  -0.561845f, 0.684776f,  0.325866f,
+      0.632471f,  0.587755f,  -0.061790f, -0.380950f, 0.375158f,  0.973704f,
+      0.539868f,  0.659162f,  0.412170f,  0.190673f,  0.505748f,  -0.006556f,
+      0.730265f,  -0.863945f, 0.937092f,  -0.802487f, 0.093954f,  -0.194060f,
+      -0.785920f, 0.448332f,  0.227365f,  0.565936f,  0.133241f,  0.622638f,
+      0.153552f,  0.888058f,  0.742904f,  0.015204f,  0.577646f,  -0.053939f,
+      0.657603f,  -0.355037f, 0.952293f,  -0.443578f, -0.854338f, 0.502447f,
+      0.662377f,  0.844676f,  -0.345951f, 0.608139f,  0.076501f,  -0.073410f,
+      0.641501f,  0.903813f,  -0.847454f, 0.417342f,  -0.530147f, -0.202209f,
+      -0.463751f, 0.665027f,  0.990748f,  0.299502f,  0.407906f,  0.864606f,
+      0.375305f,  0.136708f,  -0.238305f, 0.269159f,  -0.273543f, -0.184761f,
+      -0.262601f, -0.063202f, 0.006828f,  0.821072f,  -0.587138f, -0.322793f,
+      0.148251f,  -0.026135f, -0.475562f, 0.159187f,  0.756655f,  -0.878100f,
+      -0.118247f, -0.831484f, 0.126475f,  0.078621f,  0.536116f,  -0.533819f,
+      0.174723f,  -0.082052f, 0.721963f,  0.321672f,  -0.292242f, -0.305627f,
+      -0.492564f, 0.905056f,  -0.403598f, -0.683188f, -0.277406f, 0.483258f,
+      0.411800f,  0.401784f,  -0.987548f, -0.251309f, 0.802991f,  -0.363310f,
+      0.194166f,  -0.404410f, -0.749971f, -0.223289f, 0.635375f,  0.962351f,
+      0.723980f,  -0.832358f, -0.324576f, -0.527742f, -0.364389f, 0.968897f,
+      0.096502f,  0.498503f,  0.683703f,  -0.666221f, 0.806195f,  -0.789752f,
+      0.490186f,  0.458744f,  0.434939f,  -0.733136f, -0.108422f, 0.017574f,
+      0.060981f,  0.719434f,  0.355450f,  0.611677f,  0.062486f,  0.911792f,
+      -0.866646f, 0.083036f,  -0.436679f, -0.038199f, 0.369728f,  -0.583483f,
+      0.216322f,  -0.347648f, 0.761694f,  -0.733211f, -0.795184f, 0.918234f,
+      -0.694196f, -0.694924f, -0.688895f, -0.820861f, -0.091151f, 0.337791f,
+      0.662603f,  0.580470f,  0.425422f,  -0.054805f, 0.417176f,  0.916119f,
+      0.011551f,  -0.389894f, 0.579622f,  -0.527226f, -0.531394f, -0.070601f,
+      0.238774f,  0.230659f,  -0.754752f, -0.752413f, -0.431082f, 0.471466f,
+      -0.177384f, 0.657964f,  0.870228f,  -0.201867f, -0.895577f, 0.142372f,
+      0.495340f,  -0.359513f, -0.014131f, -0.556694f, 0.878547f,  -0.035389f,
+      0.079992f,  -0.557886f, -0.808110f, -0.879669f, 0.639018f,  0.542957f,
+      -0.608609f, 0.790236f,  0.368600f,  0.313693f,  0.980762f,  -0.932616f,
+      -0.151493f, -0.020033f, 0.167009f,  -0.833461f, 0.320309f,  -0.895390f,
+      0.113661f,  0.424050f,  -0.024179f, 0.235201f,  -0.572445f, 0.291317f,
+      -0.238715f, -0.792574f, -0.244977f, -0.474278f, -0.517429f, 0.245848f,
+      0.045856f,  -0.173525f, -0.564416f, 0.717107f,  0.722017f,  -0.432122f,
+      0.230786f,  0.558979f,  0.909695f,  0.839206f,  -0.230369f, -0.674714f,
+      0.593503f,  -0.772366f, -0.682351f, -0.288344f, 0.695517f,  0.165562f,
+      0.172355f,  0.851676f,  0.150157f,  -0.980045f, 0.618755f,  0.217617f,
+      -0.040173f, -0.463120f, -0.483807f, -0.037981f, -0.545317f, -0.902795f,
+      -0.661516f, -0.483107f, -0.604180f, 0.211386f,  0.647407f,  0.621230f,
+      0.604474f,  0.416227f,  0.718756f,  0.562169f,  -0.592406f, 0.986686f,
+      -0.812751f, 0.301237f,  -0.569647f, -0.512254f, -0.320624f, -0.604275f,
+      0.013667f,  0.901516f,  -0.210786f, 0.168930f,  0.213074f,  0.429286f,
+      -0.196927f, 0.717382f,  0.840970f,  0.501678f,  -0.428817f, 0.593632f,
+      -0.714468f, 0.009100f,  0.221376f,  0.407593f,  -0.233320f, 0.457367f,
+      0.774569f,  -0.888303f, -0.723567f, 0.726130f,  -0.156507f, -0.177372f,
+      0.918283f,  0.500491f,  0.961994f,  -0.532968f, -0.807546f, -0.230836f,
+      0.000545f,  0.140512f,  0.953263f,  -0.014290f, -0.198234f, 0.989981f,
+      -0.478004f, 0.330649f,  0.928513f,  0.342302f,  -0.401650f, 0.062253f,
+      -0.997074f, 0.767578f,  -0.191232f, -0.397589f, 0.901163f,  -0.078704f,
+      -0.424705f, -0.830747f, 0.164363f,  -0.693863f, -0.853811f, 0.161130f,
+      -0.425970f, -0.276160f, 0.449649f,  0.716623f,  -0.304169f, 0.923491f,
+      0.907138f,  -0.587925f, 0.536490f,  0.231064f,  0.837845f,  0.205075f,
+      0.404276f,  0.487350f,  -0.229795f, -0.496992f, -0.926481f, -0.055754f,
+      0.290145f,  -0.442060f, 0.035722f,  -0.508667f, -0.404984f, 0.300948f,
+      0.782787f,  0.722213f,  -0.580170f, -0.201812f, 0.775766f,  -0.486944f,
+      0.933603f,  0.238315f,  -0.669308f, 0.652398f,  0.311386f,  0.092905f,
+      -0.497341f, -0.919687f, -0.533249f, -0.277774f, 0.266910f,  0.972196f,
+      -0.585687f, 0.514168f,  0.772656f,  -0.055540f, -0.682173f, 0.621842f,
+      -0.046984f, -0.767425f, 0.751441f,  0.270373f,  -0.805437f, 0.816879f,
+      -0.929968f, -0.920501f, 0.977136f,  0.372363f,  -0.246622f, 0.008649f,
+      0.526991f,  -0.902250f, 0.451855f,  0.402656f,  -0.082218f, 0.164590f,
+      -0.321820f, -0.658749f, -0.201613f, 0.839554f,  -0.547909f, -0.277987f,
+      -0.350876f, -0.832836f, 0.025331f,  0.665730f,  0.809227f,  0.447192f,
+      -0.234008f, -0.403966f, 0.383423f,  0.760914f,  0.849097f,  -0.837494f,
+      -0.034654f, -0.743470f, -0.494178f, 0.767923f,  -0.607446f, -0.757293f};
+
+  const float kReference[] = {
+      -0.544495f, 0.264199f, 0.647938f,  0.565569f,  0.496231f,  0.271340f,
+      0.519944f,  0.318094f, -0.792999f, 0.733421f,  -1.000000f, 0.103977f,
+      0.981719f,  0.314859f, 0.476882f,  0.514267f,  -0.196381f, -0.425781f,
+      -0.783081f, 0.101108f, 0.419782f,  -0.291718f, 0.183355f,  -0.332489f};
+
+  RunBitexactnessTest(
+      8000, 2, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Mono16kHzInitial) {
+  const float kReferenceInput[] = {
+      0.150254f,  0.512488f,  -0.631245f, 0.240938f,  0.089080f,  -0.365440f,
+      -0.121169f, 0.095748f,  1.000000f,  0.773932f,  -0.377232f, 0.848124f,
+      0.202718f,  -0.017621f, 0.199738f,  -0.057279f, -0.034693f, 0.416303f,
+      0.393761f,  0.396041f,  0.187653f,  -0.337438f, 0.200436f,  0.455577f,
+      0.136624f,  0.289150f,  0.203131f,  -0.084798f, 0.082124f,  -0.220010f,
+      0.248266f,  -0.320554f, -0.298701f, -0.226218f, -0.822794f, 0.401962f,
+      0.090876f,  -0.210968f, 0.382936f,  -0.478291f, -0.028572f, -0.067474f,
+      0.089204f,  0.087430f,  -0.241695f, -0.008398f, -0.046076f, 0.175416f,
+      0.305518f,  0.309992f,  -0.241352f, 0.021618f,  -0.339291f, -0.311173f,
+      -0.001914f, 0.428301f,  -0.215087f, 0.103784f,  -0.063041f, 0.312250f,
+      -0.304344f, 0.009098f,  0.154406f,  0.307571f,  0.431537f,  0.024014f,
+      -0.416832f, -0.207440f, -0.296664f, 0.656846f,  -0.172033f, 0.209054f,
+      -0.053772f, 0.248326f,  -0.213741f, -0.391871f, -0.397490f, 0.136428f,
+      -0.049568f, -0.054788f, 0.396633f,  0.081485f,  0.055279f,  0.443690f,
+      -0.224812f, 0.194675f,  0.233369f,  -0.068107f, 0.060270f,  -0.325801f,
+      -0.320801f, 0.029308f,  0.201837f,  0.722528f,  -0.186366f, 0.052351f,
+      -0.023053f, -0.540192f, -0.122671f, -0.501532f, 0.234847f,  -0.248165f,
+      0.027971f,  -0.152171f, 0.084820f,  -0.167764f, 0.136923f,  0.206619f,
+      0.478395f,  -0.054249f, -0.597574f, -0.234627f, 0.378548f,  -0.299619f,
+      0.268543f,  0.034666f,  0.401492f,  -0.547983f, -0.055248f, -0.337538f,
+      0.812657f,  0.230611f,  0.385360f,  -0.295713f, -0.130957f, -0.076143f,
+      0.306960f,  -0.077653f, 0.196049f,  -0.573390f, -0.098885f, -0.230155f,
+      -0.440716f, 0.141956f,  0.078802f,  0.009356f,  -0.372703f, 0.315083f,
+      0.097859f,  -0.083575f, 0.006397f,  -0.073216f, -0.489105f, -0.079827f,
+      -0.232329f, -0.273644f, -0.323162f, -0.149105f, -0.559646f, 0.269458f,
+      0.145333f,  -0.005597f, -0.009717f, -0.223051f, 0.284676f,  -0.037228f,
+      -0.199679f, 0.377651f,  -0.062813f, -0.164607f};
+
+  const float kReference[] = {0.147160f, 0.495163f,  -0.648346f, 0.234931f,
+                              0.075289f, -0.373779f, -0.117676f, 0.100345f,
+                              0.981719f, 0.714896f,  -0.447357f, 0.770867f};
+
+  RunBitexactnessTest(
+      16000, 1, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Mono16kHzConverged) {
+  const float kReferenceInput[] = {
+      0.150254f,  0.512488f,  -0.631245f, 0.240938f,  0.089080f,  -0.365440f,
+      -0.121169f, 0.095748f,  1.000000f,  0.773932f,  -0.377232f, 0.848124f,
+      0.202718f,  -0.017621f, 0.199738f,  -0.057279f, -0.034693f, 0.416303f,
+      0.393761f,  0.396041f,  0.187653f,  -0.337438f, 0.200436f,  0.455577f,
+      0.136624f,  0.289150f,  0.203131f,  -0.084798f, 0.082124f,  -0.220010f,
+      0.248266f,  -0.320554f, -0.298701f, -0.226218f, -0.822794f, 0.401962f,
+      0.090876f,  -0.210968f, 0.382936f,  -0.478291f, -0.028572f, -0.067474f,
+      0.089204f,  0.087430f,  -0.241695f, -0.008398f, -0.046076f, 0.175416f,
+      0.305518f,  0.309992f,  -0.241352f, 0.021618f,  -0.339291f, -0.311173f,
+      -0.001914f, 0.428301f,  -0.215087f, 0.103784f,  -0.063041f, 0.312250f,
+      -0.304344f, 0.009098f,  0.154406f,  0.307571f,  0.431537f,  0.024014f,
+      -0.416832f, -0.207440f, -0.296664f, 0.656846f,  -0.172033f, 0.209054f,
+      -0.053772f, 0.248326f,  -0.213741f, -0.391871f, -0.397490f, 0.136428f,
+      -0.049568f, -0.054788f, 0.396633f,  0.081485f,  0.055279f,  0.443690f,
+      -0.224812f, 0.194675f,  0.233369f,  -0.068107f, 0.060270f,  -0.325801f,
+      -0.320801f, 0.029308f,  0.201837f,  0.722528f,  -0.186366f, 0.052351f,
+      -0.023053f, -0.540192f, -0.122671f, -0.501532f, 0.234847f,  -0.248165f,
+      0.027971f,  -0.152171f, 0.084820f,  -0.167764f, 0.136923f,  0.206619f,
+      0.478395f,  -0.054249f, -0.597574f, -0.234627f, 0.378548f,  -0.299619f,
+      0.268543f,  0.034666f,  0.401492f,  -0.547983f, -0.055248f, -0.337538f,
+      0.812657f,  0.230611f,  0.385360f,  -0.295713f, -0.130957f, -0.076143f,
+      0.306960f,  -0.077653f, 0.196049f,  -0.573390f, -0.098885f, -0.230155f,
+      -0.440716f, 0.141956f,  0.078802f,  0.009356f,  -0.372703f, 0.315083f,
+      0.097859f,  -0.083575f, 0.006397f,  -0.073216f, -0.489105f, -0.079827f,
+      -0.232329f, -0.273644f, -0.323162f, -0.149105f, -0.559646f, 0.269458f,
+      0.145333f,  -0.005597f, -0.009717f, -0.223051f, 0.284676f,  -0.037228f,
+      -0.199679f, 0.377651f,  -0.062813f, -0.164607f, -0.082091f, -0.236957f,
+      -0.313025f, 0.705903f,  0.462637f,  0.085942f,  -0.351308f, -0.241859f,
+      -0.049333f, 0.221165f,  -0.372235f, -0.651092f, -0.404957f, 0.093201f,
+      0.109366f,  0.126224f,  -0.036409f, 0.051333f,  -0.133063f, 0.240896f,
+      -0.380532f, 0.127160f,  -0.237176f, -0.093586f, 0.154478f,  0.290379f,
+      -0.312329f, 0.352297f,  0.184480f,  -0.018965f, -0.054555f, -0.060811f,
+      -0.084705f, 0.006440f,  0.014333f,  0.230847f,  0.426721f,  0.130481f,
+      -0.058605f, 0.174712f,  0.051204f,  -0.287773f, 0.265265f,  0.085810f,
+      0.037775f,  0.143988f,  0.073051f,  -0.263103f, -0.045366f, -0.040816f,
+      -0.148673f, 0.470072f,  -0.244727f, -0.135204f, -0.198973f, -0.328139f,
+      -0.053722f, -0.076590f, 0.427586f,  -0.069591f, -0.297399f, 0.448094f,
+      0.345037f,  -0.064170f, -0.420903f, -0.124253f, -0.043578f, 0.077149f,
+      -0.072983f, 0.123916f,  0.109517f,  -0.349508f, -0.264912f, -0.207106f,
+      -0.141912f, -0.089586f, 0.003485f,  -0.846518f, -0.127715f, 0.347208f,
+      -0.298095f, 0.260935f,  0.097899f,  -0.008106f, 0.050987f,  -0.437362f,
+      -0.023625f, 0.448230f,  0.027484f,  0.011562f,  -0.205167f, -0.008611f,
+      0.064930f,  0.119156f,  -0.104183f, -0.066078f, 0.565530f,  -0.631108f,
+      0.623029f,  0.094334f,  0.279472f,  -0.465059f, -0.164888f, -0.077706f,
+      0.118130f,  -0.466746f, 0.131800f,  -0.338936f, 0.018497f,  0.182304f,
+      0.091398f,  0.302547f,  0.281153f,  -0.181899f, 0.071836f,  -0.263911f,
+      -0.369380f, 0.258447f,  0.000014f,  -0.015347f, 0.254619f,  0.166159f,
+      0.097865f,  0.349389f,  0.259834f,  0.067003f,  -0.192925f, -0.182080f,
+      0.333139f,  -0.450434f, -0.006836f, -0.544615f, 0.285183f,  0.240811f,
+      0.000325f,  -0.019796f, -0.694804f, 0.162411f,  -0.612686f, -0.648134f,
+      0.022338f,  -0.265058f, 0.114993f,  0.189185f,  0.239697f,  -0.193148f,
+      0.125581f,  0.028122f,  0.230849f,  0.149832f,  0.250919f,  -0.036871f,
+      -0.041136f, 0.281627f,  -0.593466f, -0.141009f, -0.355074f, -0.106915f,
+      0.181276f,  0.230753f,  -0.283631f, -0.131643f, 0.038292f,  -0.081563f,
+      0.084345f,  0.111763f,  -0.259882f, -0.049416f, -0.595824f, 0.320077f,
+      -0.175802f, -0.336422f, -0.070966f, -0.399242f, -0.005829f, -0.156680f,
+      0.608591f,  0.318150f,  -0.697767f, 0.123331f,  -0.390716f, -0.071276f,
+      0.045943f,  0.208958f,  -0.076304f, 0.440505f,  -0.134400f, 0.091525f,
+      0.185763f,  0.023806f,  0.246186f,  0.090323f,  -0.219133f, -0.504520f,
+      0.519393f,  -0.168939f, 0.028884f,  0.157380f,  0.031745f,  -0.252830f,
+      -0.130705f, -0.034901f, 0.413302f,  -0.240559f, 0.219279f,  0.086246f,
+      -0.065353f, -0.295376f, -0.079405f, -0.024226f, -0.410629f, 0.053706f,
+      -0.229794f, -0.026336f, 0.093956f,  -0.252810f, -0.080555f, 0.097827f,
+      -0.513040f, 0.289508f,  0.677527f,  0.268109f,  -0.088244f, 0.119781f,
+      -0.289511f, 0.524778f,  0.262884f,  0.220028f,  -0.244767f, 0.089411f,
+      -0.156018f, -0.087030f, -0.159292f, -0.286646f, -0.253953f, -0.058657f,
+      -0.474756f, 0.169797f,  -0.032919f, 0.195384f,  0.075355f,  0.138131f,
+      -0.414465f, -0.285118f, -0.124915f, 0.030645f,  0.315431f,  -0.081032f,
+      0.352546f,  0.132860f,  0.328112f,  0.035476f,  -0.183550f, -0.413984f,
+      0.043452f,  0.228748f,  -0.081765f, -0.151125f, -0.086251f, -0.306448f,
+      -0.137774f, -0.050508f, 0.012811f,  -0.017824f, 0.170841f,  0.030549f,
+      0.506935f,  0.087197f,  0.504274f,  -0.202080f, 0.147146f,  -0.072728f,
+      0.167713f,  0.165977f,  -0.610894f, -0.370849f, -0.402698f, 0.112297f,
+      0.410855f,  -0.091330f, 0.227008f,  0.152454f,  -0.293884f, 0.111074f,
+      -0.210121f, 0.423728f,  -0.009101f, 0.457188f,  -0.118785f, 0.164720f,
+      -0.017547f, -0.565046f, -0.274461f, 0.171169f,  -0.015338f, -0.312635f,
+      -0.175044f, 0.069729f,  -0.277504f, 0.272454f,  -0.179049f, 0.505495f,
+      -0.301774f, 0.055664f,  -0.425058f, -0.202222f, -0.165787f, 0.112155f,
+      0.263284f,  0.083972f,  -0.104256f, 0.227892f,  0.223253f,  0.033592f,
+      0.159638f,  0.115358f,  -0.275811f, 0.212265f,  -0.183658f, -0.168768f};
+
+  const float kReference[] = {-0.248962f, -0.088257f, 0.083041f,  -0.037323f,
+                              0.127659f,  0.149388f,  -0.220978f, -0.004242f,
+                              -0.538544f, 0.384289f,  -0.117615f, -0.268524f};
+
+  RunBitexactnessTest(
+      16000, 1, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Stereo16kHzInitial) {
+  const float kReferenceInput[] = {
+      0.087390f,  -0.370759f, -0.235918f, 0.583079f,  0.678359f,  0.360473f,
+      -0.166156f, 0.285780f,  -0.571837f, 0.234542f,  0.350382f,  0.202047f,
+      -0.307381f, -0.271197f, -0.657038f, 0.590723f,  -0.014666f, -0.290754f,
+      0.550122f,  -0.526390f, 0.689667f,  0.633054f,  0.692457f,  -0.259626f,
+      -0.233541f, 0.722669f,  -0.072182f, 0.141096f,  0.390614f,  0.921835f,
+      0.092626f,  0.273153f,  0.141785f,  0.854224f,  0.727531f,  -0.660321f,
+      -0.642602f, -0.512991f, 0.503559f,  -0.601731f, 0.965881f,  0.419277f,
+      -0.649128f, 0.716595f,  0.818823f,  0.923326f,  0.141199f,  0.125758f,
+      -0.646678f, 0.027358f,  0.096944f,  -0.669445f, -0.012214f, 0.070235f,
+      -0.602386f, 0.246338f,  -0.947369f, -0.362418f, 0.065999f,  -0.346453f,
+      0.204381f,  -0.276135f, -0.730159f, 0.827627f,  0.281118f,  0.317548f,
+      0.350661f,  0.489115f,  0.684355f,  0.033314f,  -0.696263f, -0.238671f,
+      0.642039f,  -0.657271f, -0.340049f, 0.932944f,  0.612585f,  -0.555624f,
+      0.999546f,  -0.872523f, -0.149034f, -0.191324f, -0.199414f, -0.776155f,
+      -0.151378f, 0.227092f,  0.976123f,  -0.560198f, -0.291838f, -0.467516f,
+      -0.417004f, -0.623221f, -0.954281f, -0.101192f, -0.512720f, 0.737453f,
+      0.057222f,  0.828270f,  0.947860f,  0.170852f,  -0.762049f, 0.853065f,
+      0.187122f,  0.767231f,  -0.151048f, 0.214515f,  -0.858473f, 0.849545f,
+      0.284159f,  -0.791001f, 0.400450f,  -0.208391f, -0.830190f, -0.571042f,
+      -0.502402f, -0.546694f, 0.406009f,  0.508305f,  0.094573f,  0.106967f,
+      0.261146f,  0.970914f,  0.268556f,  0.200911f,  0.818374f,  0.141673f,
+      -0.329160f, 0.914278f,  -0.120154f, 0.203085f,  0.440525f,  0.357557f,
+      -0.574482f, -0.836753f, -0.451041f, 0.735037f,  0.118714f,  -0.070744f,
+      -0.139398f, 0.547972f,  0.307841f,  0.315459f,  -0.677958f, -0.135246f,
+      0.010172f,  -0.249335f, -0.039256f, -0.315157f, 0.554293f,  -0.232112f,
+      0.423113f,  -0.038133f, 0.458360f,  0.875118f,  0.034509f,  0.806137f,
+      -0.563615f, 0.746439f,  -0.834614f, -0.069193f, -0.956140f, 0.616561f,
+      -0.641581f, -0.669216f, -0.636793f, 0.382873f,  -0.572473f, -0.403790f,
+      0.536670f,  0.002300f,  0.818930f,  -0.884294f, -0.126496f, 0.144509f,
+      0.130134f,  0.647633f,  -0.747802f, -0.399766f, -0.995756f, 0.902215f,
+      0.532599f,  0.502608f,  -0.722270f, -0.301361f, -0.697319f, -0.006559f,
+      0.617305f,  0.265738f,  0.376803f,  0.279140f,  0.458643f,  0.719691f,
+      0.253911f,  -0.638817f, 0.146613f,  -0.672868f, 0.812103f,  -0.845314f,
+      -0.322931f, 0.161235f,  -0.049530f, 0.610641f,  0.061556f,  -0.545379f,
+      0.418970f,  -0.702735f, 0.316232f,  0.267965f,  -0.541387f, -0.635544f,
+      -0.667295f, -0.700786f, -0.594505f, 0.909918f,  -0.968183f, 0.915029f,
+      -0.948615f, 0.942221f,  -0.404809f, 0.050146f,  0.724678f,  0.792810f,
+      -0.621979f, 0.321439f,  0.882462f,  0.951414f,  -0.784129f, -0.642202f,
+      0.493103f,  -0.901063f, -0.857430f, -0.021749f, 0.699788f,  0.994083f,
+      -0.991215f, 0.085215f,  0.722696f,  0.818278f,  0.690701f,  0.757746f,
+      0.492364f,  -0.765021f, 0.018045f,  -0.662336f, 0.662223f,  0.856022f,
+      -0.661031f, 0.767475f,  -0.224274f, -0.234861f, -0.457094f, 0.735766f,
+      0.483005f,  -0.104255f, 0.419278f,  0.888663f,  -0.651764f, -0.510807f,
+      0.281858f,  0.617225f,  0.706742f,  -0.203765f, -0.769012f, -0.839438f,
+      -0.279065f, 0.657811f,  -0.570781f, 0.582081f,  0.309377f,  -0.947707f,
+      0.571553f,  0.845126f,  -0.015374f, 0.668023f,  -0.737293f, 0.519567f,
+      0.851472f,  0.665415f,  -0.481198f, -0.573956f, 0.044630f,  -0.205286f,
+      -0.041780f, 0.987807f,  0.208957f,  0.889817f,  -0.019116f, -0.124107f,
+      0.545311f,  0.488133f,  -0.114192f, -0.894000f, -0.824356f, 0.595972f,
+      0.311165f,  -0.935329f, 0.114134f,  0.439603f,  -0.779184f, -0.566705f,
+      0.622040f,  -0.722676f, 0.763798f,  0.847112f,  -0.974489f, -0.245681f,
+      -0.664377f, 0.080446f,  -0.796675f, -0.921465f, 0.866458f,  0.943184f,
+      -0.278144f, 0.288411f,  -0.864105f, -0.584176f, -0.920792f, -0.061281f,
+      -0.699807f, 0.982614f};
+
+  const float kReference[] = {
+      0.085604f,  -0.367126f, -0.218170f, 0.594653f,  0.661245f,  0.319041f,
+      -0.212891f, 0.237800f,  -0.614716f, 0.201758f,  0.305032f,  0.144414f,
+      -0.936523f, 0.647359f,  -0.613403f, -0.611542f, -0.549835f, 0.477004f,
+      -0.477386f, -0.287262f, 0.650746f,  0.101169f,  0.899258f,  -0.808014f};
+
+  RunBitexactnessTest(
+      16000, 2, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+
+TEST(LowCutFilterBitExactnessTest, Stereo16kHzConverged) {
+  const float kReferenceInput[] = {
+      -0.145875f, 0.910744f,  0.448494f,  0.161783f,  0.080516f,  0.410882f,
+      -0.989942f, 0.565032f,  0.853719f,  -0.983409f, 0.649257f,  0.534672f,
+      0.994274f,  -0.544694f, 0.839084f,  0.283999f,  -0.789360f, -0.463678f,
+      0.527688f,  0.611020f,  -0.791494f, -0.060482f, -0.561876f, 0.845416f,
+      -0.359355f, 0.715088f,  -0.480307f, 0.756126f,  -0.623465f, 0.518388f,
+      -0.936621f, 0.284678f,  0.133742f,  -0.247181f, -0.574903f, 0.584314f,
+      -0.709113f, -0.021715f, -0.974309f, -0.626776f, -0.029539f, 0.676452f,
+      -0.717886f, 0.464434f,  0.382134f,  -0.931015f, -0.022285f, 0.942781f,
+      -0.775097f, 0.486428f,  0.277083f,  0.188366f,  -0.002755f, 0.135705f,
+      -0.146991f, -0.847521f, -0.418827f, 0.122670f,  0.266667f,  0.861552f,
+      0.955538f,  -0.812807f, 0.323470f,  0.205546f,  -0.052364f, -0.287487f,
+      -0.048843f, 0.342044f,  0.919290f,  -0.821831f, 0.595485f,  0.181551f,
+      0.824394f,  -0.797741f, -0.413411f, -0.896824f, 0.008256f,  0.536752f,
+      -0.434029f, -0.549280f, -0.337421f, -0.093497f, 0.474769f,  0.019771f,
+      -0.234972f, 0.810966f,  0.930515f,  0.256535f,  -0.735938f, 0.236604f,
+      -0.233960f, 0.982387f,  -0.426345f, 0.412383f,  0.070412f,  -0.613578f,
+      0.378870f,  -0.899090f, -0.631132f, -0.908683f, 0.770083f,  0.679589f,
+      -0.763690f, -0.179170f, -0.759543f, 0.144185f,  0.898780f,  -0.487230f,
+      0.979731f,  -0.300384f, -0.582955f, 0.331654f,  0.946689f,  0.245400f,
+      -0.872924f, -0.252981f, -0.667497f, -0.537444f, -0.895583f, 0.803513f,
+      0.586583f,  -0.253971f, 0.664109f,  0.507669f,  0.243726f,  -0.211814f,
+      -0.281444f, -0.822295f, -0.316646f, 0.097341f,  -0.078905f, 0.290905f,
+      0.027042f,  0.628853f,  -0.805634f, -0.072573f, 0.179635f,  -0.625656f,
+      0.222660f,  -0.896116f, 0.151454f,  0.684689f,  -0.000548f, -0.121950f,
+      -0.701886f, -0.943441f, 0.513340f,  0.592212f,  -0.412889f, -0.769587f,
+      -0.249817f, 0.657787f,  0.683553f,  0.330477f,  0.920280f,  0.886236f,
+      -0.774601f, 0.296575f,  -0.038392f, -0.866959f, 0.795542f,  -0.005540f,
+      0.542607f,  -0.879276f, -0.475085f, 0.302139f,  -0.732792f, 0.277091f,
+      -0.230114f, 0.531396f,  0.305831f,  -0.237022f, -0.399963f, -0.319721f,
+      0.837853f,  -0.087466f, -0.115006f, -0.091628f, 0.890564f,  -0.561762f,
+      0.764806f,  -0.960249f, -0.316470f, 0.532055f,  -0.314393f, 0.237613f,
+      -0.093958f, -0.979675f, 0.198162f,  0.203137f,  0.298835f,  -0.314559f,
+      -0.013401f, 0.403548f,  0.775605f,  -0.889884f, -0.803276f, 0.299566f,
+      0.528142f,  0.975918f,  -0.749350f, -0.271046f, 0.352460f,  -0.248484f,
+      0.726917f,  -0.416046f, -0.733050f, 0.345301f,  -0.594830f, 0.737030f,
+      0.502315f,  -0.161241f, -0.999538f, -0.701073f, -0.452331f, 0.744850f,
+      0.202502f,  -0.357623f, -0.431414f, -0.129368f, 0.807518f,  0.850211f,
+      0.010585f,  0.255164f,  0.438528f,  -0.952174f, 0.149865f,  -0.906931f,
+      -0.154937f, -0.064531f, -0.954744f, -0.869852f, 0.847913f,  0.068286f,
+      -0.266407f, -0.272108f, -0.697253f, -0.700783f, -0.298396f, -0.328068f,
+      0.568056f,  -0.026522f, -0.070404f, -0.737495f, 0.772783f,  0.349115f,
+      0.670319f,  0.312976f,  0.967834f,  0.959580f,  -0.499694f, 0.249141f,
+      0.456485f,  -0.003659f, 0.699657f,  -0.618164f, -0.751712f, -0.994419f,
+      -0.694094f, 0.068322f,  0.021267f,  -0.229568f, -0.378807f, -0.992889f,
+      0.630485f,  0.276837f,  -0.103321f, -0.511828f, 0.606770f,  0.647942f,
+      0.704381f,  -0.065496f, 0.941398f,  0.682488f,  -0.842904f, -0.524802f,
+      0.635142f,  -0.188343f, -0.067376f, 0.903072f,  0.930011f,  0.530570f,
+      0.149067f,  0.831850f,  -0.009135f, -0.667975f, -0.348005f, -0.407128f,
+      0.116597f,  -0.865046f, -0.862044f, -0.666431f, 0.894877f,  0.622177f,
+      0.420911f,  0.940491f,  0.996854f,  0.974910f,  -0.699827f, 0.916958f,
+      0.060918f,  -0.851827f, -0.376358f, 0.790342f,  0.669537f,  -0.995302f,
+      0.280420f,  0.606365f,  -0.509738f, -0.871756f, -0.473703f, -0.794559f,
+      -0.032562f, -0.162231f, -0.237422f, 0.773530f,  -0.158885f, -0.432304f,
+      -0.903638f, -0.561668f, -0.521648f, -0.941483f, 0.404622f,  -0.984729f,
+      0.221841f,  -0.183821f, -0.502107f, 0.304919f,  -0.359446f, -0.792656f,
+      0.071130f,  -0.670260f, 0.766877f,  0.332914f,  0.695485f,  0.525322f,
+      0.614028f,  0.265905f,  0.420855f,  0.377327f,  -0.358104f, 0.063297f,
+      0.746388f,  -0.890921f, 0.000802f,  -0.134474f, 0.808565f,  0.260367f,
+      0.966072f,  0.170401f,  0.681273f,  -0.062372f, 0.090445f,  -0.641792f,
+      0.268923f,  0.925918f,  0.068028f,  -0.040771f, 0.587332f,  -0.814573f,
+      0.761599f,  -0.992253f, 0.023058f,  0.356927f,  0.131495f,  -0.043083f,
+      -0.358974f, 0.203160f,  0.826305f,  0.365036f,  0.893467f,  -0.801822f,
+      0.022058f,  -0.779743f, 0.090524f,  0.377572f,  -0.705166f, 0.555122f,
+      -0.201898f, 0.796600f,  -0.385912f, -0.877898f, -0.561058f, -0.834334f,
+      0.900791f,  -0.967259f, -0.770663f, -0.975180f, -0.567545f, -0.977145f,
+      0.284899f,  0.033982f,  -0.508916f, -0.612505f, -0.818259f, -0.263117f,
+      -0.984414f, 0.205403f,  -0.042291f, -0.383765f, 0.488889f,  0.678699f,
+      -0.475136f, 0.028476f,  -0.106452f, -0.317578f, 0.678284f,  0.964985f,
+      0.252929f,  -0.637450f, -0.753966f, 0.159937f,  -0.342928f, -0.463627f,
+      0.100478f,  -0.638966f, 0.356984f,  -0.888623f, -0.931886f, -0.426963f,
+      -0.845220f, 0.801145f,  0.693212f,  -0.208603f, -0.661569f, -0.139095f,
+      -0.167564f, 0.457527f,  -0.187053f, 0.903615f,  0.823970f,  0.902829f,
+      -0.307998f, -0.419512f, 0.773402f,  -0.579938f, -0.738247f, 0.041032f,
+      0.810925f,  -0.194940f, -0.568477f, -0.842521f, 0.866120f,  0.205743f,
+      -0.245016f, 0.329863f,  0.584381f,  -0.333016f, 0.385318f,  -0.592369f,
+      0.917427f,  0.423665f,  -0.666187f, -0.114446f, 0.265987f,  0.859934f,
+      0.058662f,  0.252949f,  0.361638f,  0.846395f,  -0.694332f, -0.188558f,
+      -0.375048f, 0.387798f,  0.781376f,  -0.018658f, 0.611647f,  -0.347122f,
+      0.099758f,  -0.222431f, 0.793658f,  0.352240f,  0.656794f,  -0.779822f,
+      -0.441545f, 0.535272f,  -0.567887f, -0.931876f, -0.126896f, 0.873727f,
+      -0.475822f, 0.139491f,  -0.280894f, -0.946323f, 0.000838f,  0.654030f,
+      -0.482035f, -0.908230f, -0.507057f, 0.321464f,  -0.341181f, 0.318992f,
+      -0.973992f, 0.436136f,  -0.217762f, -0.932989f, -0.187969f, 0.432615f,
+      0.842673f,  0.968031f,  0.966842f,  0.792612f,  0.731406f,  0.601922f,
+      0.109958f,  -0.162256f, -0.745755f, 0.309241f,  0.727930f,  -0.450803f,
+      0.680328f,  -0.858490f, -0.242416f, -0.463661f, -0.694158f, 0.261999f,
+      -0.367250f, 0.918224f,  -0.002652f, 0.477217f,  -0.974489f, 0.210706f,
+      0.152903f,  0.614758f,  0.309936f,  0.756457f,  0.804746f,  -0.695534f,
+      -0.614840f, 0.581951f,  -0.878590f, -0.220346f, -0.400068f, 0.468360f,
+      -0.791581f, 0.585151f,  0.565458f,  0.064795f,  -0.493295f, -0.858091f,
+      0.251607f,  -0.950637f, -0.875915f, -0.740776f, -0.098772f, 0.344672f,
+      0.712222f,  -0.003109f, -0.902431f, -0.372335f, 0.283262f,  0.572773f,
+      -0.421699f, -0.004264f, 0.636869f,  0.190257f,  0.072849f,  -0.338254f,
+      -0.176620f, 0.588012f,  -0.313584f, -0.074787f, -0.264353f, 0.359141f,
+      0.135558f,  0.303554f,  -0.017773f, -0.203084f, -0.045032f, -0.866825f,
+      -0.177943f, 0.938184f,  0.561442f,  0.458036f,  0.531301f,  0.513162f,
+      0.686541f,  0.540314f,  0.957322f,  -0.777281f, -0.207846f, -0.015879f,
+      -0.483811f, -0.926068f, 0.948763f,  0.452852f,  -0.704070f, -0.704211f,
+      0.409648f,  -0.238013f, -0.847177f, -0.178319f, -0.714019f, 0.597840f,
+      0.860496f,  -0.990561f, 0.300081f,  0.357065f,  -0.492754f, 0.686362f,
+      -0.412082f, -0.946279f, -0.813386f, 0.595770f,  0.422805f,  0.566814f,
+      0.247845f,  0.650831f,  -0.929955f, -0.189050f, -0.500662f, -0.038206f,
+      0.761678f,  -0.438630f, 0.198285f,  -0.947548f, -0.689603f, 0.667822f,
+      -0.610213f, 0.659576f,  -0.323850f, 0.342233f,  -0.895267f, 0.468618f,
+      -0.001036f, 0.886600f,  -0.420455f, -0.246879f, -0.772489f, 0.929701f,
+      -0.134977f, -0.830874f, 0.433353f,  0.013575f,  -0.343825f, 0.507048f,
+      0.672012f,  -0.492567f, 0.068850f,  -0.129670f, -0.684592f, 0.200962f,
+      0.874902f,  -0.784483f, 0.799963f,  0.100930f,  -0.145287f, -0.695238f,
+      -0.504908f, -0.105262f, 0.065567f,  -0.290698f, 0.546230f,  0.763362f,
+      0.468184f,  -0.187136f, 0.208357f,  0.282210f,  -0.745066f, -0.007616f,
+      -0.379061f, 0.157149f,  0.887218f,  -0.146121f, -0.933743f, 0.858868f,
+      0.849965f,  -0.283386f, -0.480022f, 0.573719f,  0.023164f,  0.125054f,
+      0.369588f,  -0.815207f, 0.745158f,  0.885876f,  -0.806812f, 0.691765f,
+      0.818791f,  -0.977318f, 0.047365f,  0.300691f,  -0.229709f, 0.298604f,
+      0.525707f,  0.151372f,  0.263838f,  -0.443592f, 0.679673f,  -0.146330f,
+      0.263245f,  0.666934f,  -0.459629f, -0.198399f, 0.108509f,  -0.112269f,
+      -0.819232f, 0.488763f,  -0.934769f, -0.140515f, -0.925475f, 0.951596f,
+      0.044680f,  0.819260f,  -0.233504f, 0.768904f,  -0.489965f, 0.818100f,
+      0.789121f,  -0.202966f, 0.250040f,  0.135195f,  0.789024f,  -0.571668f,
+      -0.992282f, 0.761163f,  -0.529757f, -0.510271f, 0.281834f,  -0.390951f,
+      0.651242f,  0.767377f,  0.890746f,  -0.218409f, 0.602640f,  -0.685773f,
+      0.250331f,  0.397971f,  -0.828262f, 0.062359f,  0.777133f,  -0.472668f,
+      -0.530429f, 0.679314f,  -0.008920f, -0.695267f, -0.538464f, 0.315908f,
+      0.125897f,  -0.416343f, 0.244610f,  0.431811f,  -0.438538f, -0.175454f,
+      -0.275589f, 0.562784f,  -0.729026f, 0.804139f,  -0.420728f, -0.000884f,
+      0.567181f,  0.354124f,  -0.700377f, 0.393239f,  -0.741974f, 0.891893f,
+      0.772824f,  0.030009f,  0.358817f,  0.953587f,  -0.749079f, 0.504486f,
+      0.654104f,  0.562861f,  -0.618235f, -0.142717f, -0.971087f, -0.349429f,
+      -0.730596f, -0.098965f, 0.144550f,  0.584047f,  -0.160527f, 0.065073f,
+      0.851409f,  0.798164f,  0.089667f,  0.802248f,  -0.896347f, 0.617205f,
+      -0.330191f, -0.542634f, 0.644804f,  -0.303531f, -0.669059f, -0.943733f,
+      0.910740f,  0.360581f,  0.721124f,  0.878187f,  0.360388f,  0.834847f,
+      -0.486617f, 0.771236f,  0.840086f,  -0.399873f, -0.853218f, 0.534797f,
+      -0.830096f, 0.457528f,  -0.104221f, 0.302497f,  -0.660996f, 0.062898f,
+      0.267602f,  -0.971808f, -0.059257f, 0.772652f,  -0.771943f, -0.114918f,
+      0.319096f,  -0.410454f, 0.900737f,  0.388572f,  -0.586387f, 0.109525f,
+      0.758557f,  0.115715f,  0.504668f,  0.789802f,  0.683688f,  -0.738287f,
+      -0.621692f, -0.692720f, -0.942196f, -0.981830f, 0.192903f,  0.218099f,
+      0.837847f,  0.467149f,  -0.397706f, -0.008851f, -0.483674f, 0.465709f,
+      -0.766478f, 0.492083f,  0.619578f,  0.490467f,  -0.325713f, 0.168650f,
+      -0.062096f, -0.825470f, 0.657435f,  0.371889f,  -0.465350f, 0.938967f,
+      -0.632452f, -0.400118f, -0.177630f, -0.527022f, -0.609889f, 0.410759f,
+      -0.638903f, 0.044666f,  -0.407656f, -0.074436f, 0.850465f,  -0.568222f,
+      -0.997982f, 0.813212f,  0.360084f,  0.029904f,  0.044138f,  -0.794163f,
+      0.993761f,  -0.282062f, 0.250485f,  -0.213267f, -0.984675f, 0.090570f,
+      0.018221f,  -0.506442f, -0.909209f, 0.683459f,  -0.903500f, -0.367359f,
+      0.566839f,  0.944800f,  0.172928f,  0.556088f,  0.455395f,  0.301974f,
+      0.329230f,  0.877560f,  0.070163f,  -0.203120f, 0.340915f,  -0.118931f,
+      -0.734252f, -0.121593f, 0.095285f,  -0.209727f, -0.203456f, 0.502697f,
+      0.044701f,  -0.019134f, -0.822642f, -0.498297f, -0.104882f, 0.275922f,
+      0.418891f,  0.985240f,  0.864390f,  -0.815541f, 0.907080f,  -0.674409f,
+      0.940910f,  0.194013f,  -0.519546f, -0.859410f, -0.399918f, 0.627090f,
+      -0.846580f, -0.291054f, -0.735978f, -0.683641f, -0.875706f, 0.403687f,
+      -0.827037f, 0.233574f,  -0.652457f, 0.302802f,  -0.002607f, -0.430979f,
+      0.661119f,  0.636720f,  0.876339f,  -0.999348f, 0.280778f,  -0.985289f,
+      -0.787158f, -0.786411f, -0.265782f, -0.520785f, -0.307720f, -0.500760f,
+      -0.225871f, -0.157923f, 0.280155f,  0.575106f,  -0.460011f, 0.687965f,
+      0.480937f,  0.652204f,  -0.635616f, -0.869128f, 0.220701f,  0.403106f,
+      -0.776765f, -0.808353f, 0.195668f,  0.624465f,  0.629156f,  -0.821126f,
+      0.462557f,  0.807713f,  -0.095536f, -0.858625f, -0.517444f, 0.463730f};
+
+  const float kReference[] = {
+      -0.816528f, 0.085421f,  0.739647f,  -0.922089f, 0.669301f,  -0.048187f,
+      -0.290039f, -0.818085f, -0.596008f, -0.177826f, -0.002197f, -0.350647f,
+      -0.064301f, 0.337291f,  -0.621765f, 0.115909f,  0.311899f,  -0.915924f,
+      0.020478f,  0.836055f,  -0.714020f, -0.037140f, 0.391125f,  -0.340118f};
+
+  RunBitexactnessTest(
+      16000, 2, CreateVector(rtc::ArrayView<const float>(kReferenceInput)),
+      CreateVector(rtc::ArrayView<const float>(kReference)));
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/noise_suppression_impl.cc b/modules/audio_processing/noise_suppression_impl.cc
new file mode 100644
index 0000000..8dd713f
--- /dev/null
+++ b/modules/audio_processing/noise_suppression_impl.cc
@@ -0,0 +1,213 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/noise_suppression_impl.h"
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/constructormagic.h"
+#if defined(WEBRTC_NS_FLOAT)
+#include "modules/audio_processing/ns/noise_suppression.h"
+#define NS_CREATE WebRtcNs_Create
+#define NS_FREE WebRtcNs_Free
+#define NS_INIT WebRtcNs_Init
+#define NS_SET_POLICY WebRtcNs_set_policy
+typedef NsHandle NsState;
+#elif defined(WEBRTC_NS_FIXED)
+#include "modules/audio_processing/ns/noise_suppression_x.h"
+#define NS_CREATE WebRtcNsx_Create
+#define NS_FREE WebRtcNsx_Free
+#define NS_INIT WebRtcNsx_Init
+#define NS_SET_POLICY WebRtcNsx_set_policy
+typedef NsxHandle NsState;
+#endif
+
+namespace webrtc {
+class NoiseSuppressionImpl::Suppressor {
+ public:
+  explicit Suppressor(int sample_rate_hz) {
+    state_ = NS_CREATE();
+    RTC_CHECK(state_);
+    int error = NS_INIT(state_, sample_rate_hz);
+    RTC_DCHECK_EQ(0, error);
+  }
+  ~Suppressor() {
+    NS_FREE(state_);
+  }
+  NsState* state() { return state_; }
+ private:
+  NsState* state_ = nullptr;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Suppressor);
+};
+
+NoiseSuppressionImpl::NoiseSuppressionImpl(rtc::CriticalSection* crit)
+    : crit_(crit) {
+  RTC_DCHECK(crit);
+}
+
+NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
+
+void NoiseSuppressionImpl::Initialize(size_t channels, int sample_rate_hz) {
+  rtc::CritScope cs(crit_);
+  channels_ = channels;
+  sample_rate_hz_ = sample_rate_hz;
+  std::vector<std::unique_ptr<Suppressor>> new_suppressors;
+  if (enabled_) {
+    new_suppressors.resize(channels);
+    for (size_t i = 0; i < channels; i++) {
+      new_suppressors[i].reset(new Suppressor(sample_rate_hz));
+    }
+  }
+  suppressors_.swap(new_suppressors);
+  set_level(level_);
+}
+
+void NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+  RTC_DCHECK(audio);
+#if defined(WEBRTC_NS_FLOAT)
+  rtc::CritScope cs(crit_);
+  if (!enabled_) {
+    return;
+  }
+
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels());
+  for (size_t i = 0; i < suppressors_.size(); i++) {
+    WebRtcNs_Analyze(suppressors_[i]->state(),
+                     audio->split_bands_const_f(i)[kBand0To8kHz]);
+  }
+#endif
+}
+
+void NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  RTC_DCHECK(audio);
+  rtc::CritScope cs(crit_);
+  if (!enabled_) {
+    return;
+  }
+
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels());
+  for (size_t i = 0; i < suppressors_.size(); i++) {
+#if defined(WEBRTC_NS_FLOAT)
+    WebRtcNs_Process(suppressors_[i]->state(),
+                     audio->split_bands_const_f(i),
+                     audio->num_bands(),
+                     audio->split_bands_f(i));
+#elif defined(WEBRTC_NS_FIXED)
+    WebRtcNsx_Process(suppressors_[i]->state(),
+                      audio->split_bands_const(i),
+                      audio->num_bands(),
+                      audio->split_bands(i));
+#endif
+  }
+}
+
+int NoiseSuppressionImpl::Enable(bool enable) {
+  rtc::CritScope cs(crit_);
+  if (enabled_ != enable) {
+    enabled_ = enable;
+    Initialize(channels_, sample_rate_hz_);
+  }
+  return AudioProcessing::kNoError;
+}
+
+bool NoiseSuppressionImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
+  return enabled_;
+}
+
+int NoiseSuppressionImpl::set_level(Level level) {
+  int policy = 1;
+  switch (level) {
+    case NoiseSuppression::kLow:
+      policy = 0;
+      break;
+    case NoiseSuppression::kModerate:
+      policy = 1;
+      break;
+    case NoiseSuppression::kHigh:
+      policy = 2;
+      break;
+    case NoiseSuppression::kVeryHigh:
+      policy = 3;
+      break;
+    default:
+      RTC_NOTREACHED();
+  }
+  rtc::CritScope cs(crit_);
+  level_ = level;
+  for (auto& suppressor : suppressors_) {
+    int error = NS_SET_POLICY(suppressor->state(), policy);
+    RTC_DCHECK_EQ(0, error);
+  }
+  return AudioProcessing::kNoError;
+}
+
+NoiseSuppression::Level NoiseSuppressionImpl::level() const {
+  rtc::CritScope cs(crit_);
+  return level_;
+}
+
+float NoiseSuppressionImpl::speech_probability() const {
+  rtc::CritScope cs(crit_);
+#if defined(WEBRTC_NS_FLOAT)
+  float probability_average = 0.0f;
+  for (auto& suppressor : suppressors_) {
+    probability_average +=
+        WebRtcNs_prior_speech_probability(suppressor->state());
+  }
+  if (!suppressors_.empty()) {
+    probability_average /= suppressors_.size();
+  }
+  return probability_average;
+#elif defined(WEBRTC_NS_FIXED)
+  // TODO(peah): Returning error code as a float! Remove this.
+  // Currently not available for the fixed point implementation.
+  return AudioProcessing::kUnsupportedFunctionError;
+#endif
+}
+
+std::vector<float> NoiseSuppressionImpl::NoiseEstimate() {
+  rtc::CritScope cs(crit_);
+  std::vector<float> noise_estimate;
+#if defined(WEBRTC_NS_FLOAT)
+  const float kNumChannelsFraction = 1.f / suppressors_.size();
+  noise_estimate.assign(WebRtcNs_num_freq(), 0.f);
+  for (auto& suppressor : suppressors_) {
+    const float* noise = WebRtcNs_noise_estimate(suppressor->state());
+    for (size_t i = 0; i < noise_estimate.size(); ++i) {
+      noise_estimate[i] += kNumChannelsFraction * noise[i];
+    }
+  }
+#elif defined(WEBRTC_NS_FIXED)
+  noise_estimate.assign(WebRtcNsx_num_freq(), 0.f);
+  for (auto& suppressor : suppressors_) {
+    int q_noise;
+    const uint32_t* noise = WebRtcNsx_noise_estimate(suppressor->state(),
+                                                     &q_noise);
+    const float kNormalizationFactor =
+        1.f / ((1 << q_noise) * suppressors_.size());
+    for (size_t i = 0; i < noise_estimate.size(); ++i) {
+      noise_estimate[i] += kNormalizationFactor * noise[i];
+    }
+  }
+#endif
+  return noise_estimate;
+}
+
+size_t NoiseSuppressionImpl::num_noise_bins() {
+#if defined(WEBRTC_NS_FLOAT)
+  return WebRtcNs_num_freq();
+#elif defined(WEBRTC_NS_FIXED)
+  return WebRtcNsx_num_freq();
+#endif
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/noise_suppression_impl.h b/modules/audio_processing/noise_suppression_impl.h
new file mode 100644
index 0000000..fba716e
--- /dev/null
+++ b/modules/audio_processing/noise_suppression_impl.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class NoiseSuppressionImpl : public NoiseSuppression {
+ public:
+  explicit NoiseSuppressionImpl(rtc::CriticalSection* crit);
+  ~NoiseSuppressionImpl() override;
+
+  // TODO(peah): Fold into ctor, once public API is removed.
+  void Initialize(size_t channels, int sample_rate_hz);
+  void AnalyzeCaptureAudio(AudioBuffer* audio);
+  void ProcessCaptureAudio(AudioBuffer* audio);
+
+  // NoiseSuppression implementation.
+  int Enable(bool enable) override;
+  bool is_enabled() const override;
+  int set_level(Level level) override;
+  Level level() const override;
+  float speech_probability() const override;
+  std::vector<float> NoiseEstimate() override;
+  static size_t num_noise_bins();
+
+ private:
+  class Suppressor;
+  rtc::CriticalSection* const crit_;
+  bool enabled_ RTC_GUARDED_BY(crit_) = false;
+  Level level_ RTC_GUARDED_BY(crit_) = kModerate;
+  size_t channels_ RTC_GUARDED_BY(crit_) = 0;
+  int sample_rate_hz_ RTC_GUARDED_BY(crit_) = 0;
+  std::vector<std::unique_ptr<Suppressor>> suppressors_ RTC_GUARDED_BY(crit_);
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(NoiseSuppressionImpl);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
diff --git a/modules/audio_processing/noise_suppression_unittest.cc b/modules/audio_processing/noise_suppression_unittest.cc
new file mode 100644
index 0000000..0b734fd
--- /dev/null
+++ b/modules/audio_processing/noise_suppression_unittest.cc
@@ -0,0 +1,284 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/noise_suppression_impl.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kNumFramesToProcess = 1000;
+
+// Process one frame of data and produce the output.
+void ProcessOneFrame(int sample_rate_hz,
+                     AudioBuffer* capture_buffer,
+                     NoiseSuppressionImpl* noise_suppressor) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_buffer->SplitIntoFrequencyBands();
+  }
+
+  noise_suppressor->AnalyzeCaptureAudio(capture_buffer);
+  noise_suppressor->ProcessCaptureAudio(capture_buffer);
+
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    capture_buffer->MergeFrequencyBands();
+  }
+}
+
+// Processes a specified amount of frames, verifies the results and reports
+// any errors.
+void RunBitexactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         NoiseSuppressionImpl::Level level,
+                         float speech_probability_reference,
+                         rtc::ArrayView<const float> noise_estimate_reference,
+                         rtc::ArrayView<const float> output_reference) {
+  rtc::CriticalSection crit_capture;
+  NoiseSuppressionImpl noise_suppressor(&crit_capture);
+  noise_suppressor.Initialize(num_channels, sample_rate_hz);
+  noise_suppressor.Enable(true);
+  noise_suppressor.set_level(level);
+
+  int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+  const StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(samples_per_channel * num_channels);
+  for (size_t frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &capture_file, capture_input);
+
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, &capture_buffer, &noise_suppressor);
+  }
+
+  // Extract test results.
+  std::vector<float> capture_output;
+  test::ExtractVectorFromAudioBuffer(capture_config, &capture_buffer,
+                                     &capture_output);
+  float speech_probability = noise_suppressor.speech_probability();
+  std::vector<float> noise_estimate = noise_suppressor.NoiseEstimate();
+
+  const float kVectorElementErrorBound = 1.0f / 32768.0f;
+  EXPECT_FLOAT_EQ(speech_probability_reference, speech_probability);
+  EXPECT_TRUE(test::VerifyArray(noise_estimate_reference, noise_estimate,
+                                kVectorElementErrorBound));
+
+  // Compare the output with the reference. Only the first values of the output
+  // from last frame processed are compared in order not having to specify all
+  // preceeding frames as testvectors. As the algorithm being tested has a
+  // memory, testing only the last frame implicitly also tests the preceeding
+  // frames.
+  EXPECT_TRUE(test::VerifyDeinterleavedArray(
+      capture_config.num_frames(), capture_config.num_channels(),
+      output_reference, capture_output, kVectorElementErrorBound));
+}
+
+}  // namespace
+
+TEST(NoiseSuppresionBitExactnessTest, Mono8kHzLow) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {1432.341431f, 3321.919922f, 7677.521973f};
+  const float kOutputReference[] = {0.003510f, 0.004517f, 0.004669f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {1432.341431f, 3321.919922f, 7677.521973f};
+  const float kOutputReference[] = {0.003510f, 0.004517f, 0.004669f};
+#else
+  const float kSpeechProbabilityReference = 0.73421317f;
+  const float kNoiseEstimateReference[] =
+      {1175.266113f, 3289.305908f, 7532.991211f};
+  const float kOutputReference[] = {0.003263f, 0.004402f, 0.004537f};
+#endif
+
+  RunBitexactnessTest(8000, 1, NoiseSuppression::Level::kLow,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Mono16kHzLow) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2534.461914f, 6277.638672f, 14367.499023f};
+  const float kOutputReference[] = {0.003449f, 0.004334f, 0.004303f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2534.461914f, 6277.638672f, 14367.499023f};
+  const float kOutputReference[] = {0.003449f, 0.004334f, 0.004303f};
+#else
+  const float kSpeechProbabilityReference = 0.71672988f;
+  const float kNoiseEstimateReference[] =
+      {2151.313965f, 6509.765137f, 15658.848633f};
+  const float kOutputReference[] = {0.003574f, 0.004494f, 0.004499f};
+#endif
+
+  RunBitexactnessTest(16000, 1, NoiseSuppression::Level::kLow,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Mono32kHzLow) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2540.059082f, 6317.822754f, 14440.845703f};
+  const float kOutputReference[] = {0.001679f, 0.002411f, 0.002594f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2540.059082f, 6317.822754f, 14440.845703f};
+  const float kOutputReference[] = {0.001679f, 0.002411f, 0.002594f};
+#else
+  const float kSpeechProbabilityReference = 0.67999554f;
+  const float kNoiseEstimateReference[] =
+      {2149.780518f, 7076.936035f, 14939.945312f};
+  const float kOutputReference[] = {0.001221f, 0.001984f, 0.002228f};
+#endif
+
+  RunBitexactnessTest(32000, 1, NoiseSuppression::Level::kLow,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Mono48kHzLow) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2564.605713f, 6213.656250f, 13372.284180f};
+  const float kOutputReference[] = {-0.013185f, -0.012769f, -0.012023f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2564.605713f, 6213.656250f, 13372.284180f};
+  const float kOutputReference[] = {-0.013185f, -0.012769f, -0.012023f};
+#else
+  const float kSpeechProbabilityReference = 0.70645678f;
+  const float kNoiseEstimateReference[] =
+      {2168.783203f, 6902.895508f, 13190.677734f};
+  const float kOutputReference[] = {-0.013062f, -0.012657f, -0.011934f};
+#endif
+
+  RunBitexactnessTest(48000, 1, NoiseSuppression::Level::kLow,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Stereo16kHzLow) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {9992.127930f, 12689.569336f, 11589.296875f};
+  const float kOutputReference[] = {-0.011108f, -0.007904f, -0.012390f,
+                                    -0.002441f, 0.000855f,  -0.003204f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {10321.353516f, 12133.852539f, 10923.060547f};
+  const float kOutputReference[] = {-0.011108f, -0.007904f, -0.012390f,
+                                    -0.002472f, 0.000916f,  -0.003235f};
+#else
+  const float kSpeechProbabilityReference = 0.67230678f;
+  const float kNoiseEstimateReference[] =
+      {9771.250000f, 11329.377930f, 10503.052734f};
+  const float kOutputReference[] = {-0.011459f, -0.008110f, -0.012728f,
+                                    -0.002399f, 0.001018f,  -0.003189f};
+#endif
+
+  RunBitexactnessTest(16000, 2, NoiseSuppression::Level::kLow,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Mono16kHzModerate) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2057.085938f, 7601.055176f, 19666.187500f};
+  const float kOutputReference[] = {0.004669f, 0.005524f, 0.005432f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2244.497803f, 6864.164062f, 16726.523438f};
+  const float kOutputReference[] = {0.004669f, 0.005615f, 0.005585f};
+#else
+  const float kSpeechProbabilityReference = 0.70897013f;
+  const float kNoiseEstimateReference[] =
+      {2171.490723f, 6553.567871f, 15626.562500f};
+  const float kOutputReference[] = {0.004513f, 0.005590f, 0.005614f};
+#endif
+
+  RunBitexactnessTest(16000, 1, NoiseSuppression::Level::kModerate,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Mono16kHzHigh) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2095.148193f, 7698.553711f, 19689.533203f};
+  const float kOutputReference[] = {0.004639f, 0.005402f, 0.005310f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2282.515625f, 6984.408203f, 16920.960938f};
+  const float kOutputReference[] = {0.004547f, 0.005432f, 0.005402f};
+#else
+  const float kSpeechProbabilityReference = 0.70106733f;
+  const float kNoiseEstimateReference[] =
+      {2224.968506f, 6712.025879f, 15785.087891f};
+  const float kOutputReference[] = {0.004394f, 0.005406f, 0.005416f};
+#endif
+
+  RunBitexactnessTest(16000, 1, NoiseSuppression::Level::kHigh,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+TEST(NoiseSuppresionBitExactnessTest, Mono16kHzVeryHigh) {
+#if defined(WEBRTC_ARCH_ARM64)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2677.733398f, 6186.987305f, 14365.744141f};
+  const float kOutputReference[] = {0.004273f, 0.005127f, 0.005188f};
+#elif defined(WEBRTC_ARCH_ARM)
+  const float kSpeechProbabilityReference = -4.0f;
+  const float kNoiseEstimateReference[] =
+      {2677.733398f, 6186.987305f, 14365.744141f};
+  const float kOutputReference[] = {0.004273f, 0.005127f, 0.005188f};
+#else
+  const float kSpeechProbabilityReference = 0.70281971f;
+  const float kNoiseEstimateReference[] =
+      {2254.347900f, 6723.699707f, 15771.625977f};
+  const float kOutputReference[] = {0.004321f, 0.005247f, 0.005263f};
+#endif
+
+  RunBitexactnessTest(16000, 1, NoiseSuppression::Level::kVeryHigh,
+                      kSpeechProbabilityReference, kNoiseEstimateReference,
+                      kOutputReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/ns/defines.h b/modules/audio_processing/ns/defines.h
new file mode 100644
index 0000000..66b45a9
--- /dev/null
+++ b/modules/audio_processing/ns/defines.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_
+#define MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_
+
+#define BLOCKL_MAX          160 // max processing block length: 160
+#define ANAL_BLOCKL_MAX     256 // max analysis block length: 256
+#define HALF_ANAL_BLOCKL    129 // half max analysis block length + 1
+#define NUM_HIGH_BANDS_MAX  2   // max number of high bands: 2
+
+#define QUANTILE            (float)0.25
+
+#define SIMULT              3
+#define END_STARTUP_LONG    200
+#define END_STARTUP_SHORT   50
+#define FACTOR              (float)40.0
+#define WIDTH               (float)0.01
+
+// Length of fft work arrays.
+#define IP_LENGTH (ANAL_BLOCKL_MAX >> 1) // must be at least ceil(2 + sqrt(ANAL_BLOCKL_MAX/2))
+#define W_LENGTH (ANAL_BLOCKL_MAX >> 1)
+
+//PARAMETERS FOR NEW METHOD
+#define DD_PR_SNR           (float)0.98 // DD update of prior SNR
+#define LRT_TAVG            (float)0.50 // tavg parameter for LRT (previously 0.90)
+#define SPECT_FL_TAVG       (float)0.30 // tavg parameter for spectral flatness measure
+#define SPECT_DIFF_TAVG     (float)0.30 // tavg parameter for spectral difference measure
+#define PRIOR_UPDATE        (float)0.10 // update parameter of prior model
+#define NOISE_UPDATE        (float)0.90 // update parameter for noise
+#define SPEECH_UPDATE       (float)0.99 // update parameter when likely speech
+#define WIDTH_PR_MAP        (float)4.0  // width parameter in sigmoid map for prior model
+#define LRT_FEATURE_THR     (float)0.5  // default threshold for LRT feature
+#define SF_FEATURE_THR      (float)0.5  // default threshold for Spectral Flatness feature
+#define SD_FEATURE_THR      (float)0.5  // default threshold for Spectral Difference feature
+#define PROB_RANGE          (float)0.20 // probability threshold for noise state in
+                                        // speech/noise likelihood
+#define HIST_PAR_EST         1000       // histogram size for estimation of parameters
+#define GAMMA_PAUSE         (float)0.05 // update for conservative noise estimate
+//
+#define B_LIM               (float)0.5  // threshold in final energy gain factor calculation
+#endif // MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_
diff --git a/modules/audio_processing/ns/noise_suppression.c b/modules/audio_processing/ns/noise_suppression.c
new file mode 100644
index 0000000..e21416f
--- /dev/null
+++ b/modules/audio_processing/ns/noise_suppression.c
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/noise_suppression.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/ns/defines.h"
+#include "modules/audio_processing/ns/ns_core.h"
+
+NsHandle* WebRtcNs_Create() {
+  NoiseSuppressionC* self = malloc(sizeof(NoiseSuppressionC));
+  self->initFlag = 0;
+  return (NsHandle*)self;
+}
+
+void WebRtcNs_Free(NsHandle* NS_inst) {
+  free(NS_inst);
+}
+
+int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs) {
+  return WebRtcNs_InitCore((NoiseSuppressionC*)NS_inst, fs);
+}
+
+int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) {
+  return WebRtcNs_set_policy_core((NoiseSuppressionC*)NS_inst, mode);
+}
+
+void WebRtcNs_Analyze(NsHandle* NS_inst, const float* spframe) {
+  WebRtcNs_AnalyzeCore((NoiseSuppressionC*)NS_inst, spframe);
+}
+
+void WebRtcNs_Process(NsHandle* NS_inst,
+                      const float* const* spframe,
+                      size_t num_bands,
+                      float* const* outframe) {
+  WebRtcNs_ProcessCore((NoiseSuppressionC*)NS_inst, spframe, num_bands,
+                       outframe);
+}
+
+float WebRtcNs_prior_speech_probability(NsHandle* handle) {
+  NoiseSuppressionC* self = (NoiseSuppressionC*)handle;
+  if (handle == NULL) {
+    return -1;
+  }
+  if (self->initFlag == 0) {
+    return -1;
+  }
+  return self->priorSpeechProb;
+}
+
+const float* WebRtcNs_noise_estimate(const NsHandle* handle) {
+  const NoiseSuppressionC* self = (const NoiseSuppressionC*)handle;
+  if (handle == NULL || self->initFlag == 0) {
+    return NULL;
+  }
+  return self->noise;
+}
+
+size_t WebRtcNs_num_freq() {
+  return HALF_ANAL_BLOCKL;
+}
diff --git a/modules/audio_processing/ns/noise_suppression.h b/modules/audio_processing/ns/noise_suppression.h
new file mode 100644
index 0000000..a167142
--- /dev/null
+++ b/modules/audio_processing/ns/noise_suppression.h
@@ -0,0 +1,135 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_
+#define MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_
+
+#include <stddef.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct NsHandleT NsHandle;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This function creates an instance of the floating point Noise Suppression.
+ */
+NsHandle* WebRtcNs_Create();
+
+/*
+ * This function frees the dynamic memory of a specified noise suppression
+ * instance.
+ *
+ * Input:
+ *      - NS_inst       : Pointer to NS instance that should be freed
+ */
+void WebRtcNs_Free(NsHandle* NS_inst);
+
+/*
+ * This function initializes a NS instance and has to be called before any other
+ * processing is made.
+ *
+ * Input:
+ *      - NS_inst       : Instance that should be initialized
+ *      - fs            : sampling frequency
+ *
+ * Output:
+ *      - NS_inst       : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs);
+
+/*
+ * This changes the aggressiveness of the noise suppression method.
+ *
+ * Input:
+ *      - NS_inst       : Noise suppression instance.
+ *      - mode          : 0: Mild, 1: Medium , 2: Aggressive
+ *
+ * Output:
+ *      - NS_inst       : Updated instance.
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
+
+/*
+ * This functions estimates the background noise for the inserted speech frame.
+ * The input and output signals should always be 10ms (80 or 160 samples).
+ *
+ * Input
+ *      - NS_inst       : Noise suppression instance.
+ *      - spframe       : Pointer to speech frame buffer for L band
+ *
+ * Output:
+ *      - NS_inst       : Updated NS instance
+ */
+void WebRtcNs_Analyze(NsHandle* NS_inst, const float* spframe);
+
+/*
+ * This functions does Noise Suppression for the inserted speech frame. The
+ * input and output signals should always be 10ms (80 or 160 samples).
+ *
+ * Input
+ *      - NS_inst       : Noise suppression instance.
+ *      - spframe       : Pointer to speech frame buffer for each band
+ *      - num_bands     : Number of bands
+ *
+ * Output:
+ *      - NS_inst       : Updated NS instance
+ *      - outframe      : Pointer to output frame for each band
+ */
+void WebRtcNs_Process(NsHandle* NS_inst,
+                     const float* const* spframe,
+                     size_t num_bands,
+                     float* const* outframe);
+
+/* Returns the internally used prior speech probability of the current frame.
+ * There is a frequency bin based one as well, with which this should not be
+ * confused.
+ *
+ * Input
+ *      - handle        : Noise suppression instance.
+ *
+ * Return value         : Prior speech probability in interval [0.0, 1.0].
+ *                        -1 - NULL pointer or uninitialized instance.
+ */
+float WebRtcNs_prior_speech_probability(NsHandle* handle);
+
+/* Returns a pointer to the noise estimate per frequency bin. The number of
+ * frequency bins can be provided using WebRtcNs_num_freq().
+ *
+ * Input
+ *      - handle        : Noise suppression instance.
+ *
+ * Return value         : Pointer to the noise estimate per frequency bin.
+ *                        Returns NULL if the input is a NULL pointer or an
+ *                        uninitialized instance.
+ */
+const float* WebRtcNs_noise_estimate(const NsHandle* handle);
+
+/* Returns the number of frequency bins, which is the length of the noise
+ * estimate for example.
+ *
+ * Return value         : Number of frequency bins.
+ */
+size_t WebRtcNs_num_freq();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_
diff --git a/modules/audio_processing/ns/noise_suppression_x.c b/modules/audio_processing/ns/noise_suppression_x.c
new file mode 100644
index 0000000..1fd3ebc
--- /dev/null
+++ b/modules/audio_processing/ns/noise_suppression_x.c
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/noise_suppression_x.h"
+
+#include <stdlib.h>
+
+#include "common_audio/signal_processing/include/real_fft.h"
+#include "modules/audio_processing/ns/nsx_core.h"
+#include "modules/audio_processing/ns/nsx_defines.h"
+
+NsxHandle* WebRtcNsx_Create() {
+  NoiseSuppressionFixedC* self = malloc(sizeof(NoiseSuppressionFixedC));
+  WebRtcSpl_Init();
+  self->real_fft = NULL;
+  self->initFlag = 0;
+  return (NsxHandle*)self;
+}
+
+void WebRtcNsx_Free(NsxHandle* nsxInst) {
+  WebRtcSpl_FreeRealFFT(((NoiseSuppressionFixedC*)nsxInst)->real_fft);
+  free(nsxInst);
+}
+
+int WebRtcNsx_Init(NsxHandle* nsxInst, uint32_t fs) {
+  return WebRtcNsx_InitCore((NoiseSuppressionFixedC*)nsxInst, fs);
+}
+
+int WebRtcNsx_set_policy(NsxHandle* nsxInst, int mode) {
+  return WebRtcNsx_set_policy_core((NoiseSuppressionFixedC*)nsxInst, mode);
+}
+
+void WebRtcNsx_Process(NsxHandle* nsxInst,
+                      const short* const* speechFrame,
+                      int num_bands,
+                      short* const* outFrame) {
+  WebRtcNsx_ProcessCore((NoiseSuppressionFixedC*)nsxInst, speechFrame,
+                        num_bands, outFrame);
+}
+
+const uint32_t* WebRtcNsx_noise_estimate(const NsxHandle* nsxInst,
+                                         int* q_noise) {
+  *q_noise = 11;
+  const NoiseSuppressionFixedC* self = (const NoiseSuppressionFixedC*)nsxInst;
+  if (nsxInst == NULL || self->initFlag == 0) {
+    return NULL;
+  }
+  *q_noise += self->prevQNoise;
+  return self->prevNoiseU32;
+}
+
+size_t WebRtcNsx_num_freq() {
+  return HALF_ANAL_BLOCKL;
+}
diff --git a/modules/audio_processing/ns/noise_suppression_x.h b/modules/audio_processing/ns/noise_suppression_x.h
new file mode 100644
index 0000000..838861d
--- /dev/null
+++ b/modules/audio_processing/ns/noise_suppression_x.h
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_
+#define MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_
+
+#include <stddef.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct NsxHandleT NsxHandle;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This function creates an instance of the fixed point Noise Suppression.
+ */
+NsxHandle* WebRtcNsx_Create();
+
+/*
+ * This function frees the dynamic memory of a specified Noise Suppression
+ * instance.
+ *
+ * Input:
+ *      - nsxInst       : Pointer to NS instance that should be freed
+ */
+void WebRtcNsx_Free(NsxHandle* nsxInst);
+
+/*
+ * This function initializes a NS instance
+ *
+ * Input:
+ *      - nsxInst       : Instance that should be initialized
+ *      - fs            : sampling frequency
+ *
+ * Output:
+ *      - nsxInst       : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNsx_Init(NsxHandle* nsxInst, uint32_t fs);
+
+/*
+ * This changes the aggressiveness of the noise suppression method.
+ *
+ * Input:
+ *      - nsxInst       : Instance that should be initialized
+ *      - mode          : 0: Mild, 1: Medium , 2: Aggressive
+ *
+ * Output:
+ *      - nsxInst       : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNsx_set_policy(NsxHandle* nsxInst, int mode);
+
+/*
+ * This functions does noise suppression for the inserted speech frame. The
+ * input and output signals should always be 10ms (80 or 160 samples).
+ *
+ * Input
+ *      - nsxInst       : NSx instance. Needs to be initiated before call.
+ *      - speechFrame   : Pointer to speech frame buffer for each band
+ *      - num_bands     : Number of bands
+ *
+ * Output:
+ *      - nsxInst       : Updated NSx instance
+ *      - outFrame      : Pointer to output frame for each band
+ */
+void WebRtcNsx_Process(NsxHandle* nsxInst,
+                       const short* const* speechFrame,
+                       int num_bands,
+                       short* const* outFrame);
+
+/* Returns a pointer to the noise estimate per frequency bin. The number of
+ * frequency bins can be provided using WebRtcNsx_num_freq().
+ *
+ * Input
+ *      - nsxInst       : NSx instance. Needs to be initiated before call.
+ *      - q_noise       : Q value of the noise estimate, which is the number of
+ *                        bits that it needs to be right-shifted to be
+ *                        normalized.
+ *
+ * Return value         : Pointer to the noise estimate per frequency bin.
+ *                        Returns NULL if the input is a NULL pointer or an
+ *                        uninitialized instance.
+ */
+const uint32_t* WebRtcNsx_noise_estimate(const NsxHandle* nsxInst,
+                                         int* q_noise);
+
+/* Returns the number of frequency bins, which is the length of the noise
+ * estimate for example.
+ *
+ * Return value         : Number of frequency bins.
+ */
+size_t WebRtcNsx_num_freq();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_
diff --git a/modules/audio_processing/ns/ns_core.c b/modules/audio_processing/ns/ns_core.c
new file mode 100644
index 0000000..c87713a
--- /dev/null
+++ b/modules/audio_processing/ns/ns_core.c
@@ -0,0 +1,1420 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+#include "common_audio/fft4g.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/ns/noise_suppression.h"
+#include "modules/audio_processing/ns/ns_core.h"
+#include "modules/audio_processing/ns/windows_private.h"
+
+// Set Feature Extraction Parameters.
+static void set_feature_extraction_parameters(NoiseSuppressionC* self) {
+  // Bin size of histogram.
+  self->featureExtractionParams.binSizeLrt = 0.1f;
+  self->featureExtractionParams.binSizeSpecFlat = 0.05f;
+  self->featureExtractionParams.binSizeSpecDiff = 0.1f;
+
+  // Range of histogram over which LRT threshold is computed.
+  self->featureExtractionParams.rangeAvgHistLrt = 1.f;
+
+  // Scale parameters: multiply dominant peaks of the histograms by scale factor
+  // to obtain thresholds for prior model.
+  // For LRT and spectral difference.
+  self->featureExtractionParams.factor1ModelPars = 1.2f;
+  // For spectral_flatness: used when noise is flatter than speech.
+  self->featureExtractionParams.factor2ModelPars = 0.9f;
+
+  // Peak limit for spectral flatness (varies between 0 and 1).
+  self->featureExtractionParams.thresPosSpecFlat = 0.6f;
+
+  // Limit on spacing of two highest peaks in histogram: spacing determined by
+  // bin size.
+  self->featureExtractionParams.limitPeakSpacingSpecFlat =
+      2 * self->featureExtractionParams.binSizeSpecFlat;
+  self->featureExtractionParams.limitPeakSpacingSpecDiff =
+      2 * self->featureExtractionParams.binSizeSpecDiff;
+
+  // Limit on relevance of second peak.
+  self->featureExtractionParams.limitPeakWeightsSpecFlat = 0.5f;
+  self->featureExtractionParams.limitPeakWeightsSpecDiff = 0.5f;
+
+  // Fluctuation limit of LRT feature.
+  self->featureExtractionParams.thresFluctLrt = 0.05f;
+
+  // Limit on the max and min values for the feature thresholds.
+  self->featureExtractionParams.maxLrt = 1.f;
+  self->featureExtractionParams.minLrt = 0.2f;
+
+  self->featureExtractionParams.maxSpecFlat = 0.95f;
+  self->featureExtractionParams.minSpecFlat = 0.1f;
+
+  self->featureExtractionParams.maxSpecDiff = 1.f;
+  self->featureExtractionParams.minSpecDiff = 0.16f;
+
+  // Criteria of weight of histogram peak to accept/reject feature.
+  self->featureExtractionParams.thresWeightSpecFlat =
+      (int)(0.3 * (self->modelUpdatePars[1]));  // For spectral flatness.
+  self->featureExtractionParams.thresWeightSpecDiff =
+      (int)(0.3 * (self->modelUpdatePars[1]));  // For spectral difference.
+}
+
+// Initialize state.
+int WebRtcNs_InitCore(NoiseSuppressionC* self, uint32_t fs) {
+  int i;
+  // Check for valid pointer.
+  if (self == NULL) {
+    return -1;
+  }
+
+  // Initialization of struct.
+  if (fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000) {
+    self->fs = fs;
+  } else {
+    return -1;
+  }
+  self->windShift = 0;
+  // We only support 10ms frames.
+  if (fs == 8000) {
+    self->blockLen = 80;
+    self->anaLen = 128;
+    self->window = kBlocks80w128;
+  } else {
+    self->blockLen = 160;
+    self->anaLen = 256;
+    self->window = kBlocks160w256;
+  }
+  self->magnLen = self->anaLen / 2 + 1;  // Number of frequency bins.
+
+  // Initialize FFT work arrays.
+  self->ip[0] = 0;  // Setting this triggers initialization.
+  memset(self->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+  WebRtc_rdft(self->anaLen, 1, self->dataBuf, self->ip, self->wfft);
+
+  memset(self->analyzeBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+  memset(self->dataBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+  memset(self->syntBuf, 0, sizeof(float) * ANAL_BLOCKL_MAX);
+
+  // For HB processing.
+  memset(self->dataBufHB,
+         0,
+         sizeof(float) * NUM_HIGH_BANDS_MAX * ANAL_BLOCKL_MAX);
+
+  // For quantile noise estimation.
+  memset(self->quantile, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) {
+    self->lquantile[i] = 8.f;
+    self->density[i] = 0.3f;
+  }
+
+  for (i = 0; i < SIMULT; i++) {
+    self->counter[i] =
+        (int)floor((float)(END_STARTUP_LONG * (i + 1)) / (float)SIMULT);
+  }
+
+  self->updates = 0;
+
+  // Wiener filter initialization.
+  for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
+    self->smooth[i] = 1.f;
+  }
+
+  // Set the aggressiveness: default.
+  self->aggrMode = 0;
+
+  // Initialize variables for new method.
+  self->priorSpeechProb = 0.5f;  // Prior prob for speech/noise.
+  // Previous analyze mag spectrum.
+  memset(self->magnPrevAnalyze, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  // Previous process mag spectrum.
+  memset(self->magnPrevProcess, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  // Current noise-spectrum.
+  memset(self->noise, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  // Previous noise-spectrum.
+  memset(self->noisePrev, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  // Conservative noise spectrum estimate.
+  memset(self->magnAvgPause, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  // For estimation of HB in second pass.
+  memset(self->speechProb, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  // Initial average magnitude spectrum.
+  memset(self->initMagnEst, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+  for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
+    // Smooth LR (same as threshold).
+    self->logLrtTimeAvg[i] = LRT_FEATURE_THR;
+  }
+
+  // Feature quantities.
+  // Spectral flatness (start on threshold).
+  self->featureData[0] = SF_FEATURE_THR;
+  self->featureData[1] = 0.f;  // Spectral entropy: not used in this version.
+  self->featureData[2] = 0.f;  // Spectral variance: not used in this version.
+  // Average LRT factor (start on threshold).
+  self->featureData[3] = LRT_FEATURE_THR;
+  // Spectral template diff (start on threshold).
+  self->featureData[4] = SF_FEATURE_THR;
+  self->featureData[5] = 0.f;  // Normalization for spectral difference.
+  // Window time-average of input magnitude spectrum.
+  self->featureData[6] = 0.f;
+
+  memset(self->parametricNoise, 0, sizeof(float) * HALF_ANAL_BLOCKL);
+
+  // Histogram quantities: used to estimate/update thresholds for features.
+  memset(self->histLrt, 0, sizeof(int) * HIST_PAR_EST);
+  memset(self->histSpecFlat, 0, sizeof(int) * HIST_PAR_EST);
+  memset(self->histSpecDiff, 0, sizeof(int) * HIST_PAR_EST);
+
+
+  self->blockInd = -1;  // Frame counter.
+  // Default threshold for LRT feature.
+  self->priorModelPars[0] = LRT_FEATURE_THR;
+  // Threshold for spectral flatness: determined on-line.
+  self->priorModelPars[1] = 0.5f;
+  // sgn_map par for spectral measure: 1 for flatness measure.
+  self->priorModelPars[2] = 1.f;
+  // Threshold for template-difference feature: determined on-line.
+  self->priorModelPars[3] = 0.5f;
+  // Default weighting parameter for LRT feature.
+  self->priorModelPars[4] = 1.f;
+  // Default weighting parameter for spectral flatness feature.
+  self->priorModelPars[5] = 0.f;
+  // Default weighting parameter for spectral difference feature.
+  self->priorModelPars[6] = 0.f;
+
+  // Update flag for parameters:
+  // 0 no update, 1 = update once, 2 = update every window.
+  self->modelUpdatePars[0] = 2;
+  self->modelUpdatePars[1] = 500;  // Window for update.
+  // Counter for update of conservative noise spectrum.
+  self->modelUpdatePars[2] = 0;
+  // Counter if the feature thresholds are updated during the sequence.
+  self->modelUpdatePars[3] = self->modelUpdatePars[1];
+
+  self->signalEnergy = 0.0;
+  self->sumMagn = 0.0;
+  self->whiteNoiseLevel = 0.0;
+  self->pinkNoiseNumerator = 0.0;
+  self->pinkNoiseExp = 0.0;
+
+  set_feature_extraction_parameters(self);
+
+  // Default mode.
+  WebRtcNs_set_policy_core(self, 0);
+
+  self->initFlag = 1;
+  return 0;
+}
+
+// Estimate noise.
+static void NoiseEstimation(NoiseSuppressionC* self,
+                            float* magn,
+                            float* noise) {
+  size_t i, s, offset;
+  float lmagn[HALF_ANAL_BLOCKL], delta;
+
+  if (self->updates < END_STARTUP_LONG) {
+    self->updates++;
+  }
+
+  for (i = 0; i < self->magnLen; i++) {
+    lmagn[i] = (float)log(magn[i]);
+  }
+
+  // Loop over simultaneous estimates.
+  for (s = 0; s < SIMULT; s++) {
+    offset = s * self->magnLen;
+
+    // newquantest(...)
+    for (i = 0; i < self->magnLen; i++) {
+      // Compute delta.
+      if (self->density[offset + i] > 1.0) {
+        delta = FACTOR * 1.f / self->density[offset + i];
+      } else {
+        delta = FACTOR;
+      }
+
+      // Update log quantile estimate.
+      if (lmagn[i] > self->lquantile[offset + i]) {
+        self->lquantile[offset + i] +=
+            QUANTILE * delta / (float)(self->counter[s] + 1);
+      } else {
+        self->lquantile[offset + i] -=
+            (1.f - QUANTILE) * delta / (float)(self->counter[s] + 1);
+      }
+
+      // Update density estimate.
+      if (fabs(lmagn[i] - self->lquantile[offset + i]) < WIDTH) {
+        self->density[offset + i] =
+            ((float)self->counter[s] * self->density[offset + i] +
+             1.f / (2.f * WIDTH)) /
+            (float)(self->counter[s] + 1);
+      }
+    }  // End loop over magnitude spectrum.
+
+    if (self->counter[s] >= END_STARTUP_LONG) {
+      self->counter[s] = 0;
+      if (self->updates >= END_STARTUP_LONG) {
+        for (i = 0; i < self->magnLen; i++) {
+          self->quantile[i] = (float)exp(self->lquantile[offset + i]);
+        }
+      }
+    }
+
+    self->counter[s]++;
+  }  // End loop over simultaneous estimates.
+
+  // Sequentially update the noise during startup.
+  if (self->updates < END_STARTUP_LONG) {
+    // Use the last "s" to get noise during startup that differ from zero.
+    for (i = 0; i < self->magnLen; i++) {
+      self->quantile[i] = (float)exp(self->lquantile[offset + i]);
+    }
+  }
+
+  for (i = 0; i < self->magnLen; i++) {
+    noise[i] = self->quantile[i];
+  }
+}
+
+// Extract thresholds for feature parameters.
+// Histograms are computed over some window size (given by
+// self->modelUpdatePars[1]).
+// Thresholds and weights are extracted every window.
+// |flag| = 0 updates histogram only, |flag| = 1 computes the threshold/weights.
+// Threshold and weights are returned in: self->priorModelPars.
+static void FeatureParameterExtraction(NoiseSuppressionC* self, int flag) {
+  int i, useFeatureSpecFlat, useFeatureSpecDiff, numHistLrt;
+  int maxPeak1, maxPeak2;
+  int weightPeak1SpecFlat, weightPeak2SpecFlat, weightPeak1SpecDiff,
+      weightPeak2SpecDiff;
+
+  float binMid, featureSum;
+  float posPeak1SpecFlat, posPeak2SpecFlat, posPeak1SpecDiff, posPeak2SpecDiff;
+  float fluctLrt, avgHistLrt, avgSquareHistLrt, avgHistLrtCompl;
+
+  // 3 features: LRT, flatness, difference.
+  // lrt_feature = self->featureData[3];
+  // flat_feature = self->featureData[0];
+  // diff_feature = self->featureData[4];
+
+  // Update histograms.
+  if (flag == 0) {
+    // LRT
+    if ((self->featureData[3] <
+         HIST_PAR_EST * self->featureExtractionParams.binSizeLrt) &&
+        (self->featureData[3] >= 0.0)) {
+      i = (int)(self->featureData[3] /
+                self->featureExtractionParams.binSizeLrt);
+      self->histLrt[i]++;
+    }
+    // Spectral flatness.
+    if ((self->featureData[0] <
+         HIST_PAR_EST * self->featureExtractionParams.binSizeSpecFlat) &&
+        (self->featureData[0] >= 0.0)) {
+      i = (int)(self->featureData[0] /
+                self->featureExtractionParams.binSizeSpecFlat);
+      self->histSpecFlat[i]++;
+    }
+    // Spectral difference.
+    if ((self->featureData[4] <
+         HIST_PAR_EST * self->featureExtractionParams.binSizeSpecDiff) &&
+        (self->featureData[4] >= 0.0)) {
+      i = (int)(self->featureData[4] /
+                self->featureExtractionParams.binSizeSpecDiff);
+      self->histSpecDiff[i]++;
+    }
+  }
+
+  // Extract parameters for speech/noise probability.
+  if (flag == 1) {
+    // LRT feature: compute the average over
+    // self->featureExtractionParams.rangeAvgHistLrt.
+    avgHistLrt = 0.0;
+    avgHistLrtCompl = 0.0;
+    avgSquareHistLrt = 0.0;
+    numHistLrt = 0;
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      binMid = ((float)i + 0.5f) * self->featureExtractionParams.binSizeLrt;
+      if (binMid <= self->featureExtractionParams.rangeAvgHistLrt) {
+        avgHistLrt += self->histLrt[i] * binMid;
+        numHistLrt += self->histLrt[i];
+      }
+      avgSquareHistLrt += self->histLrt[i] * binMid * binMid;
+      avgHistLrtCompl += self->histLrt[i] * binMid;
+    }
+    if (numHistLrt > 0) {
+      avgHistLrt = avgHistLrt / ((float)numHistLrt);
+    }
+    avgHistLrtCompl = avgHistLrtCompl / ((float)self->modelUpdatePars[1]);
+    avgSquareHistLrt = avgSquareHistLrt / ((float)self->modelUpdatePars[1]);
+    fluctLrt = avgSquareHistLrt - avgHistLrt * avgHistLrtCompl;
+    // Get threshold for LRT feature.
+    if (fluctLrt < self->featureExtractionParams.thresFluctLrt) {
+      // Very low fluctuation, so likely noise.
+      self->priorModelPars[0] = self->featureExtractionParams.maxLrt;
+    } else {
+      self->priorModelPars[0] =
+          self->featureExtractionParams.factor1ModelPars * avgHistLrt;
+      // Check if value is within min/max range.
+      if (self->priorModelPars[0] < self->featureExtractionParams.minLrt) {
+        self->priorModelPars[0] = self->featureExtractionParams.minLrt;
+      }
+      if (self->priorModelPars[0] > self->featureExtractionParams.maxLrt) {
+        self->priorModelPars[0] = self->featureExtractionParams.maxLrt;
+      }
+    }
+    // Done with LRT feature.
+
+    // For spectral flatness and spectral difference: compute the main peaks of
+    // histogram.
+    maxPeak1 = 0;
+    maxPeak2 = 0;
+    posPeak1SpecFlat = 0.0;
+    posPeak2SpecFlat = 0.0;
+    weightPeak1SpecFlat = 0;
+    weightPeak2SpecFlat = 0;
+
+    // Peaks for flatness.
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      binMid =
+          (i + 0.5f) * self->featureExtractionParams.binSizeSpecFlat;
+      if (self->histSpecFlat[i] > maxPeak1) {
+        // Found new "first" peak.
+        maxPeak2 = maxPeak1;
+        weightPeak2SpecFlat = weightPeak1SpecFlat;
+        posPeak2SpecFlat = posPeak1SpecFlat;
+
+        maxPeak1 = self->histSpecFlat[i];
+        weightPeak1SpecFlat = self->histSpecFlat[i];
+        posPeak1SpecFlat = binMid;
+      } else if (self->histSpecFlat[i] > maxPeak2) {
+        // Found new "second" peak.
+        maxPeak2 = self->histSpecFlat[i];
+        weightPeak2SpecFlat = self->histSpecFlat[i];
+        posPeak2SpecFlat = binMid;
+      }
+    }
+
+    // Compute two peaks for spectral difference.
+    maxPeak1 = 0;
+    maxPeak2 = 0;
+    posPeak1SpecDiff = 0.0;
+    posPeak2SpecDiff = 0.0;
+    weightPeak1SpecDiff = 0;
+    weightPeak2SpecDiff = 0;
+    // Peaks for spectral difference.
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      binMid =
+          ((float)i + 0.5f) * self->featureExtractionParams.binSizeSpecDiff;
+      if (self->histSpecDiff[i] > maxPeak1) {
+        // Found new "first" peak.
+        maxPeak2 = maxPeak1;
+        weightPeak2SpecDiff = weightPeak1SpecDiff;
+        posPeak2SpecDiff = posPeak1SpecDiff;
+
+        maxPeak1 = self->histSpecDiff[i];
+        weightPeak1SpecDiff = self->histSpecDiff[i];
+        posPeak1SpecDiff = binMid;
+      } else if (self->histSpecDiff[i] > maxPeak2) {
+        // Found new "second" peak.
+        maxPeak2 = self->histSpecDiff[i];
+        weightPeak2SpecDiff = self->histSpecDiff[i];
+        posPeak2SpecDiff = binMid;
+      }
+    }
+
+    // For spectrum flatness feature.
+    useFeatureSpecFlat = 1;
+    // Merge the two peaks if they are close.
+    if ((fabs(posPeak2SpecFlat - posPeak1SpecFlat) <
+         self->featureExtractionParams.limitPeakSpacingSpecFlat) &&
+        (weightPeak2SpecFlat >
+         self->featureExtractionParams.limitPeakWeightsSpecFlat *
+             weightPeak1SpecFlat)) {
+      weightPeak1SpecFlat += weightPeak2SpecFlat;
+      posPeak1SpecFlat = 0.5f * (posPeak1SpecFlat + posPeak2SpecFlat);
+    }
+    // Reject if weight of peaks is not large enough, or peak value too small.
+    if (weightPeak1SpecFlat <
+            self->featureExtractionParams.thresWeightSpecFlat ||
+        posPeak1SpecFlat < self->featureExtractionParams.thresPosSpecFlat) {
+      useFeatureSpecFlat = 0;
+    }
+    // If selected, get the threshold.
+    if (useFeatureSpecFlat == 1) {
+      // Compute the threshold.
+      self->priorModelPars[1] =
+          self->featureExtractionParams.factor2ModelPars * posPeak1SpecFlat;
+      // Check if value is within min/max range.
+      if (self->priorModelPars[1] < self->featureExtractionParams.minSpecFlat) {
+        self->priorModelPars[1] = self->featureExtractionParams.minSpecFlat;
+      }
+      if (self->priorModelPars[1] > self->featureExtractionParams.maxSpecFlat) {
+        self->priorModelPars[1] = self->featureExtractionParams.maxSpecFlat;
+      }
+    }
+    // Done with flatness feature.
+
+    // For template feature.
+    useFeatureSpecDiff = 1;
+    // Merge the two peaks if they are close.
+    if ((fabs(posPeak2SpecDiff - posPeak1SpecDiff) <
+         self->featureExtractionParams.limitPeakSpacingSpecDiff) &&
+        (weightPeak2SpecDiff >
+         self->featureExtractionParams.limitPeakWeightsSpecDiff *
+             weightPeak1SpecDiff)) {
+      weightPeak1SpecDiff += weightPeak2SpecDiff;
+      posPeak1SpecDiff = 0.5f * (posPeak1SpecDiff + posPeak2SpecDiff);
+    }
+    // Get the threshold value.
+    self->priorModelPars[3] =
+        self->featureExtractionParams.factor1ModelPars * posPeak1SpecDiff;
+    // Reject if weight of peaks is not large enough.
+    if (weightPeak1SpecDiff <
+        self->featureExtractionParams.thresWeightSpecDiff) {
+      useFeatureSpecDiff = 0;
+    }
+    // Check if value is within min/max range.
+    if (self->priorModelPars[3] < self->featureExtractionParams.minSpecDiff) {
+      self->priorModelPars[3] = self->featureExtractionParams.minSpecDiff;
+    }
+    if (self->priorModelPars[3] > self->featureExtractionParams.maxSpecDiff) {
+      self->priorModelPars[3] = self->featureExtractionParams.maxSpecDiff;
+    }
+    // Done with spectral difference feature.
+
+    // Don't use template feature if fluctuation of LRT feature is very low:
+    // most likely just noise state.
+    if (fluctLrt < self->featureExtractionParams.thresFluctLrt) {
+      useFeatureSpecDiff = 0;
+    }
+
+    // Select the weights between the features.
+    // self->priorModelPars[4] is weight for LRT: always selected.
+    // self->priorModelPars[5] is weight for spectral flatness.
+    // self->priorModelPars[6] is weight for spectral difference.
+    featureSum = (float)(1 + useFeatureSpecFlat + useFeatureSpecDiff);
+    self->priorModelPars[4] = 1.f / featureSum;
+    self->priorModelPars[5] = ((float)useFeatureSpecFlat) / featureSum;
+    self->priorModelPars[6] = ((float)useFeatureSpecDiff) / featureSum;
+
+    // Set hists to zero for next update.
+    if (self->modelUpdatePars[0] >= 1) {
+      for (i = 0; i < HIST_PAR_EST; i++) {
+        self->histLrt[i] = 0;
+        self->histSpecFlat[i] = 0;
+        self->histSpecDiff[i] = 0;
+      }
+    }
+  }  // End of flag == 1.
+}
+
+// Compute spectral flatness on input spectrum.
+// |magnIn| is the magnitude spectrum.
+// Spectral flatness is returned in self->featureData[0].
+static void ComputeSpectralFlatness(NoiseSuppressionC* self,
+                                    const float* magnIn) {
+  size_t i;
+  size_t shiftLP = 1;  // Option to remove first bin(s) from spectral measures.
+  float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp;
+
+  // Compute spectral measures.
+  // For flatness.
+  avgSpectralFlatnessNum = 0.0;
+  avgSpectralFlatnessDen = self->sumMagn;
+  for (i = 0; i < shiftLP; i++) {
+    avgSpectralFlatnessDen -= magnIn[i];
+  }
+  // Compute log of ratio of the geometric to arithmetic mean: check for log(0)
+  // case.
+  for (i = shiftLP; i < self->magnLen; i++) {
+    if (magnIn[i] > 0.0) {
+      avgSpectralFlatnessNum += (float)log(magnIn[i]);
+    } else {
+      self->featureData[0] -= SPECT_FL_TAVG * self->featureData[0];
+      return;
+    }
+  }
+  // Normalize.
+  avgSpectralFlatnessDen = avgSpectralFlatnessDen / self->magnLen;
+  avgSpectralFlatnessNum = avgSpectralFlatnessNum / self->magnLen;
+
+  // Ratio and inverse log: check for case of log(0).
+  spectralTmp = (float)exp(avgSpectralFlatnessNum) / avgSpectralFlatnessDen;
+
+  // Time-avg update of spectral flatness feature.
+  self->featureData[0] += SPECT_FL_TAVG * (spectralTmp - self->featureData[0]);
+  // Done with flatness feature.
+}
+
+// Compute prior and post SNR based on quantile noise estimation.
+// Compute DD estimate of prior SNR.
+// Inputs:
+//   * |magn| is the signal magnitude spectrum estimate.
+//   * |noise| is the magnitude noise spectrum estimate.
+// Outputs:
+//   * |snrLocPrior| is the computed prior SNR.
+//   * |snrLocPost| is the computed post SNR.
+static void ComputeSnr(const NoiseSuppressionC* self,
+                       const float* magn,
+                       const float* noise,
+                       float* snrLocPrior,
+                       float* snrLocPost) {
+  size_t i;
+
+  for (i = 0; i < self->magnLen; i++) {
+    // Previous post SNR.
+    // Previous estimate: based on previous frame with gain filter.
+    float previousEstimateStsa = self->magnPrevAnalyze[i] /
+        (self->noisePrev[i] + 0.0001f) * self->smooth[i];
+    // Post SNR.
+    snrLocPost[i] = 0.f;
+    if (magn[i] > noise[i]) {
+      snrLocPost[i] = magn[i] / (noise[i] + 0.0001f) - 1.f;
+    }
+    // DD estimate is sum of two terms: current estimate and previous estimate.
+    // Directed decision update of snrPrior.
+    snrLocPrior[i] =
+        DD_PR_SNR * previousEstimateStsa + (1.f - DD_PR_SNR) * snrLocPost[i];
+  }  // End of loop over frequencies.
+}
+
+// Compute the difference measure between input spectrum and a template/learned
+// noise spectrum.
+// |magnIn| is the input spectrum.
+// The reference/template spectrum is self->magnAvgPause[i].
+// Returns (normalized) spectral difference in self->featureData[4].
+static void ComputeSpectralDifference(NoiseSuppressionC* self,
+                                      const float* magnIn) {
+  // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 /
+  // var(magnAvgPause)
+  size_t i;
+  float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn;
+
+  avgPause = 0.0;
+  avgMagn = self->sumMagn;
+  // Compute average quantities.
+  for (i = 0; i < self->magnLen; i++) {
+    // Conservative smooth noise spectrum from pause frames.
+    avgPause += self->magnAvgPause[i];
+  }
+  avgPause /= self->magnLen;
+  avgMagn /= self->magnLen;
+
+  covMagnPause = 0.0;
+  varPause = 0.0;
+  varMagn = 0.0;
+  // Compute variance and covariance quantities.
+  for (i = 0; i < self->magnLen; i++) {
+    covMagnPause += (magnIn[i] - avgMagn) * (self->magnAvgPause[i] - avgPause);
+    varPause +=
+        (self->magnAvgPause[i] - avgPause) * (self->magnAvgPause[i] - avgPause);
+    varMagn += (magnIn[i] - avgMagn) * (magnIn[i] - avgMagn);
+  }
+  covMagnPause /= self->magnLen;
+  varPause /= self->magnLen;
+  varMagn /= self->magnLen;
+  // Update of average magnitude spectrum.
+  self->featureData[6] += self->signalEnergy;
+
+  avgDiffNormMagn =
+      varMagn - (covMagnPause * covMagnPause) / (varPause + 0.0001f);
+  // Normalize and compute time-avg update of difference feature.
+  avgDiffNormMagn = (float)(avgDiffNormMagn / (self->featureData[5] + 0.0001f));
+  self->featureData[4] +=
+      SPECT_DIFF_TAVG * (avgDiffNormMagn - self->featureData[4]);
+}
+
+// Compute speech/noise probability.
+// Speech/noise probability is returned in |probSpeechFinal|.
+// |magn| is the input magnitude spectrum.
+// |noise| is the noise spectrum.
+// |snrLocPrior| is the prior SNR for each frequency.
+// |snrLocPost| is the post SNR for each frequency.
+static void SpeechNoiseProb(NoiseSuppressionC* self,
+                            float* probSpeechFinal,
+                            const float* snrLocPrior,
+                            const float* snrLocPost) {
+  size_t i;
+  int sgnMap;
+  float invLrt, gainPrior, indPrior;
+  float logLrtTimeAvgKsum, besselTmp;
+  float indicator0, indicator1, indicator2;
+  float tmpFloat1, tmpFloat2;
+  float weightIndPrior0, weightIndPrior1, weightIndPrior2;
+  float threshPrior0, threshPrior1, threshPrior2;
+  float widthPrior, widthPrior0, widthPrior1, widthPrior2;
+
+  widthPrior0 = WIDTH_PR_MAP;
+  // Width for pause region: lower range, so increase width in tanh map.
+  widthPrior1 = 2.f * WIDTH_PR_MAP;
+  widthPrior2 = 2.f * WIDTH_PR_MAP;  // For spectral-difference measure.
+
+  // Threshold parameters for features.
+  threshPrior0 = self->priorModelPars[0];
+  threshPrior1 = self->priorModelPars[1];
+  threshPrior2 = self->priorModelPars[3];
+
+  // Sign for flatness feature.
+  sgnMap = (int)(self->priorModelPars[2]);
+
+  // Weight parameters for features.
+  weightIndPrior0 = self->priorModelPars[4];
+  weightIndPrior1 = self->priorModelPars[5];
+  weightIndPrior2 = self->priorModelPars[6];
+
+  // Compute feature based on average LR factor.
+  // This is the average over all frequencies of the smooth log LRT.
+  logLrtTimeAvgKsum = 0.0;
+  for (i = 0; i < self->magnLen; i++) {
+    tmpFloat1 = 1.f + 2.f * snrLocPrior[i];
+    tmpFloat2 = 2.f * snrLocPrior[i] / (tmpFloat1 + 0.0001f);
+    besselTmp = (snrLocPost[i] + 1.f) * tmpFloat2;
+    self->logLrtTimeAvg[i] +=
+        LRT_TAVG * (besselTmp - (float)log(tmpFloat1) - self->logLrtTimeAvg[i]);
+    logLrtTimeAvgKsum += self->logLrtTimeAvg[i];
+  }
+  logLrtTimeAvgKsum = (float)logLrtTimeAvgKsum / (self->magnLen);
+  self->featureData[3] = logLrtTimeAvgKsum;
+  // Done with computation of LR factor.
+
+  // Compute the indicator functions.
+  // Average LRT feature.
+  widthPrior = widthPrior0;
+  // Use larger width in tanh map for pause regions.
+  if (logLrtTimeAvgKsum < threshPrior0) {
+    widthPrior = widthPrior1;
+  }
+  // Compute indicator function: sigmoid map.
+  indicator0 =
+      0.5f *
+      ((float)tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0)) + 1.f);
+
+  // Spectral flatness feature.
+  tmpFloat1 = self->featureData[0];
+  widthPrior = widthPrior0;
+  // Use larger width in tanh map for pause regions.
+  if (sgnMap == 1 && (tmpFloat1 > threshPrior1)) {
+    widthPrior = widthPrior1;
+  }
+  if (sgnMap == -1 && (tmpFloat1 < threshPrior1)) {
+    widthPrior = widthPrior1;
+  }
+  // Compute indicator function: sigmoid map.
+  indicator1 =
+      0.5f *
+      ((float)tanh((float)sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) +
+       1.f);
+
+  // For template spectrum-difference.
+  tmpFloat1 = self->featureData[4];
+  widthPrior = widthPrior0;
+  // Use larger width in tanh map for pause regions.
+  if (tmpFloat1 < threshPrior2) {
+    widthPrior = widthPrior2;
+  }
+  // Compute indicator function: sigmoid map.
+  indicator2 =
+      0.5f * ((float)tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.f);
+
+  // Combine the indicator function with the feature weights.
+  indPrior = weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 +
+             weightIndPrior2 * indicator2;
+  // Done with computing indicator function.
+
+  // Compute the prior probability.
+  self->priorSpeechProb += PRIOR_UPDATE * (indPrior - self->priorSpeechProb);
+  // Make sure probabilities are within range: keep floor to 0.01.
+  if (self->priorSpeechProb > 1.f) {
+    self->priorSpeechProb = 1.f;
+  }
+  if (self->priorSpeechProb < 0.01f) {
+    self->priorSpeechProb = 0.01f;
+  }
+
+  // Final speech probability: combine prior model with LR factor:.
+  gainPrior = (1.f - self->priorSpeechProb) / (self->priorSpeechProb + 0.0001f);
+  for (i = 0; i < self->magnLen; i++) {
+    invLrt = (float)exp(-self->logLrtTimeAvg[i]);
+    invLrt = (float)gainPrior * invLrt;
+    probSpeechFinal[i] = 1.f / (1.f + invLrt);
+  }
+}
+
+// Update the noise features.
+// Inputs:
+//   * |magn| is the signal magnitude spectrum estimate.
+//   * |updateParsFlag| is an update flag for parameters.
+static void FeatureUpdate(NoiseSuppressionC* self,
+                          const float* magn,
+                          int updateParsFlag) {
+  // Compute spectral flatness on input spectrum.
+  ComputeSpectralFlatness(self, magn);
+  // Compute difference of input spectrum with learned/estimated noise spectrum.
+  ComputeSpectralDifference(self, magn);
+  // Compute histograms for parameter decisions (thresholds and weights for
+  // features).
+  // Parameters are extracted once every window time.
+  // (=self->modelUpdatePars[1])
+  if (updateParsFlag >= 1) {
+    // Counter update.
+    self->modelUpdatePars[3]--;
+    // Update histogram.
+    if (self->modelUpdatePars[3] > 0) {
+      FeatureParameterExtraction(self, 0);
+    }
+    // Compute model parameters.
+    if (self->modelUpdatePars[3] == 0) {
+      FeatureParameterExtraction(self, 1);
+      self->modelUpdatePars[3] = self->modelUpdatePars[1];
+      // If wish to update only once, set flag to zero.
+      if (updateParsFlag == 1) {
+        self->modelUpdatePars[0] = 0;
+      } else {
+        // Update every window:
+        // Get normalization for spectral difference for next window estimate.
+        self->featureData[6] =
+            self->featureData[6] / ((float)self->modelUpdatePars[1]);
+        self->featureData[5] =
+            0.5f * (self->featureData[6] + self->featureData[5]);
+        self->featureData[6] = 0.f;
+      }
+    }
+  }
+}
+
+// Update the noise estimate.
+// Inputs:
+//   * |magn| is the signal magnitude spectrum estimate.
+//   * |snrLocPrior| is the prior SNR.
+//   * |snrLocPost| is the post SNR.
+// Output:
+//   * |noise| is the updated noise magnitude spectrum estimate.
+static void UpdateNoiseEstimate(NoiseSuppressionC* self,
+                                const float* magn,
+                                const float* snrLocPrior,
+                                const float* snrLocPost,
+                                float* noise) {
+  size_t i;
+  float probSpeech, probNonSpeech;
+  // Time-avg parameter for noise update.
+  float gammaNoiseTmp = NOISE_UPDATE;
+  float gammaNoiseOld;
+  float noiseUpdateTmp;
+
+  for (i = 0; i < self->magnLen; i++) {
+    probSpeech = self->speechProb[i];
+    probNonSpeech = 1.f - probSpeech;
+    // Temporary noise update:
+    // Use it for speech frames if update value is less than previous.
+    noiseUpdateTmp = gammaNoiseTmp * self->noisePrev[i] +
+                     (1.f - gammaNoiseTmp) * (probNonSpeech * magn[i] +
+                                              probSpeech * self->noisePrev[i]);
+    // Time-constant based on speech/noise state.
+    gammaNoiseOld = gammaNoiseTmp;
+    gammaNoiseTmp = NOISE_UPDATE;
+    // Increase gamma (i.e., less noise update) for frame likely to be speech.
+    if (probSpeech > PROB_RANGE) {
+      gammaNoiseTmp = SPEECH_UPDATE;
+    }
+    // Conservative noise update.
+    if (probSpeech < PROB_RANGE) {
+      self->magnAvgPause[i] += GAMMA_PAUSE * (magn[i] - self->magnAvgPause[i]);
+    }
+    // Noise update.
+    if (gammaNoiseTmp == gammaNoiseOld) {
+      noise[i] = noiseUpdateTmp;
+    } else {
+      noise[i] = gammaNoiseTmp * self->noisePrev[i] +
+                 (1.f - gammaNoiseTmp) * (probNonSpeech * magn[i] +
+                                          probSpeech * self->noisePrev[i]);
+      // Allow for noise update downwards:
+      // If noise update decreases the noise, it is safe, so allow it to
+      // happen.
+      if (noiseUpdateTmp < noise[i]) {
+        noise[i] = noiseUpdateTmp;
+      }
+    }
+  }  // End of freq loop.
+}
+
+// Updates |buffer| with a new |frame|.
+// Inputs:
+//   * |frame| is a new speech frame or NULL for setting to zero.
+//   * |frame_length| is the length of the new frame.
+//   * |buffer_length| is the length of the buffer.
+// Output:
+//   * |buffer| is the updated buffer.
+static void UpdateBuffer(const float* frame,
+                         size_t frame_length,
+                         size_t buffer_length,
+                         float* buffer) {
+  RTC_DCHECK_LT(buffer_length, 2 * frame_length);
+
+  memcpy(buffer,
+         buffer + frame_length,
+         sizeof(*buffer) * (buffer_length - frame_length));
+  if (frame) {
+    memcpy(buffer + buffer_length - frame_length,
+           frame,
+           sizeof(*buffer) * frame_length);
+  } else {
+    memset(buffer + buffer_length - frame_length,
+           0,
+           sizeof(*buffer) * frame_length);
+  }
+}
+
+// Transforms the signal from time to frequency domain.
+// Inputs:
+//   * |time_data| is the signal in the time domain.
+//   * |time_data_length| is the length of the analysis buffer.
+//   * |magnitude_length| is the length of the spectrum magnitude, which equals
+//     the length of both |real| and |imag| (time_data_length / 2 + 1).
+// Outputs:
+//   * |time_data| is the signal in the frequency domain.
+//   * |real| is the real part of the frequency domain.
+//   * |imag| is the imaginary part of the frequency domain.
+//   * |magn| is the calculated signal magnitude in the frequency domain.
+static void FFT(NoiseSuppressionC* self,
+                float* time_data,
+                size_t time_data_length,
+                size_t magnitude_length,
+                float* real,
+                float* imag,
+                float* magn) {
+  size_t i;
+
+  RTC_DCHECK_EQ(magnitude_length, time_data_length / 2 + 1);
+
+  WebRtc_rdft(time_data_length, 1, time_data, self->ip, self->wfft);
+
+  imag[0] = 0;
+  real[0] = time_data[0];
+  magn[0] = fabsf(real[0]) + 1.f;
+  imag[magnitude_length - 1] = 0;
+  real[magnitude_length - 1] = time_data[1];
+  magn[magnitude_length - 1] = fabsf(real[magnitude_length - 1]) + 1.f;
+  for (i = 1; i < magnitude_length - 1; ++i) {
+    real[i] = time_data[2 * i];
+    imag[i] = time_data[2 * i + 1];
+    // Magnitude spectrum.
+    magn[i] = sqrtf(real[i] * real[i] + imag[i] * imag[i]) + 1.f;
+  }
+}
+
+// Transforms the signal from frequency to time domain.
+// Inputs:
+//   * |real| is the real part of the frequency domain.
+//   * |imag| is the imaginary part of the frequency domain.
+//   * |magnitude_length| is the length of the spectrum magnitude, which equals
+//     the length of both |real| and |imag|.
+//   * |time_data_length| is the length of the analysis buffer
+//     (2 * (magnitude_length - 1)).
+// Output:
+//   * |time_data| is the signal in the time domain.
+static void IFFT(NoiseSuppressionC* self,
+                 const float* real,
+                 const float* imag,
+                 size_t magnitude_length,
+                 size_t time_data_length,
+                 float* time_data) {
+  size_t i;
+
+  RTC_DCHECK_EQ(time_data_length, 2 * (magnitude_length - 1));
+
+  time_data[0] = real[0];
+  time_data[1] = real[magnitude_length - 1];
+  for (i = 1; i < magnitude_length - 1; ++i) {
+    time_data[2 * i] = real[i];
+    time_data[2 * i + 1] = imag[i];
+  }
+  WebRtc_rdft(time_data_length, -1, time_data, self->ip, self->wfft);
+
+  for (i = 0; i < time_data_length; ++i) {
+    time_data[i] *= 2.f / time_data_length;  // FFT scaling.
+  }
+}
+
+// Calculates the energy of a buffer.
+// Inputs:
+//   * |buffer| is the buffer over which the energy is calculated.
+//   * |length| is the length of the buffer.
+// Returns the calculated energy.
+static float Energy(const float* buffer, size_t length) {
+  size_t i;
+  float energy = 0.f;
+
+  for (i = 0; i < length; ++i) {
+    energy += buffer[i] * buffer[i];
+  }
+
+  return energy;
+}
+
+// Windows a buffer.
+// Inputs:
+//   * |window| is the window by which to multiply.
+//   * |data| is the data without windowing.
+//   * |length| is the length of the window and data.
+// Output:
+//   * |data_windowed| is the windowed data.
+static void Windowing(const float* window,
+                      const float* data,
+                      size_t length,
+                      float* data_windowed) {
+  size_t i;
+
+  for (i = 0; i < length; ++i) {
+    data_windowed[i] = window[i] * data[i];
+  }
+}
+
+// Estimate prior SNR decision-directed and compute DD based Wiener Filter.
+// Input:
+//   * |magn| is the signal magnitude spectrum estimate.
+// Output:
+//   * |theFilter| is the frequency response of the computed Wiener filter.
+static void ComputeDdBasedWienerFilter(const NoiseSuppressionC* self,
+                                       const float* magn,
+                                       float* theFilter) {
+  size_t i;
+  float snrPrior, previousEstimateStsa, currentEstimateStsa;
+
+  for (i = 0; i < self->magnLen; i++) {
+    // Previous estimate: based on previous frame with gain filter.
+    previousEstimateStsa = self->magnPrevProcess[i] /
+                           (self->noisePrev[i] + 0.0001f) * self->smooth[i];
+    // Post and prior SNR.
+    currentEstimateStsa = 0.f;
+    if (magn[i] > self->noise[i]) {
+      currentEstimateStsa = magn[i] / (self->noise[i] + 0.0001f) - 1.f;
+    }
+    // DD estimate is sum of two terms: current estimate and previous estimate.
+    // Directed decision update of |snrPrior|.
+    snrPrior = DD_PR_SNR * previousEstimateStsa +
+               (1.f - DD_PR_SNR) * currentEstimateStsa;
+    // Gain filter.
+    theFilter[i] = snrPrior / (self->overdrive + snrPrior);
+  }  // End of loop over frequencies.
+}
+
+// Changes the aggressiveness of the noise suppression method.
+// |mode| = 0 is mild (6dB), |mode| = 1 is medium (10dB) and |mode| = 2 is
+// aggressive (15dB).
+// Returns 0 on success and -1 otherwise.
+int WebRtcNs_set_policy_core(NoiseSuppressionC* self, int mode) {
+  // Allow for modes: 0, 1, 2, 3.
+  if (mode < 0 || mode > 3) {
+    return (-1);
+  }
+
+  self->aggrMode = mode;
+  if (mode == 0) {
+    self->overdrive = 1.f;
+    self->denoiseBound = 0.5f;
+    self->gainmap = 0;
+  } else if (mode == 1) {
+    // self->overdrive = 1.25f;
+    self->overdrive = 1.f;
+    self->denoiseBound = 0.25f;
+    self->gainmap = 1;
+  } else if (mode == 2) {
+    // self->overdrive = 1.25f;
+    self->overdrive = 1.1f;
+    self->denoiseBound = 0.125f;
+    self->gainmap = 1;
+  } else if (mode == 3) {
+    // self->overdrive = 1.3f;
+    self->overdrive = 1.25f;
+    self->denoiseBound = 0.09f;
+    self->gainmap = 1;
+  }
+  return 0;
+}
+
+void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame) {
+  size_t i;
+  const size_t kStartBand = 5;  // Skip first frequency bins during estimation.
+  int updateParsFlag;
+  float energy;
+  float signalEnergy = 0.f;
+  float sumMagn = 0.f;
+  float tmpFloat1, tmpFloat2, tmpFloat3;
+  float winData[ANAL_BLOCKL_MAX];
+  float magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
+  float snrLocPost[HALF_ANAL_BLOCKL], snrLocPrior[HALF_ANAL_BLOCKL];
+  float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
+  // Variables during startup.
+  float sum_log_i = 0.0;
+  float sum_log_i_square = 0.0;
+  float sum_log_magn = 0.0;
+  float sum_log_i_log_magn = 0.0;
+  float parametric_exp = 0.0;
+  float parametric_num = 0.0;
+
+  // Check that initiation has been done.
+  RTC_DCHECK_EQ(1, self->initFlag);
+  updateParsFlag = self->modelUpdatePars[0];
+
+  // Update analysis buffer for L band.
+  UpdateBuffer(speechFrame, self->blockLen, self->anaLen, self->analyzeBuf);
+
+  Windowing(self->window, self->analyzeBuf, self->anaLen, winData);
+  energy = Energy(winData, self->anaLen);
+  if (energy == 0.0) {
+    // We want to avoid updating statistics in this case:
+    // Updating feature statistics when we have zeros only will cause
+    // thresholds to move towards zero signal situations. This in turn has the
+    // effect that once the signal is "turned on" (non-zero values) everything
+    // will be treated as speech and there is no noise suppression effect.
+    // Depending on the duration of the inactive signal it takes a
+    // considerable amount of time for the system to learn what is noise and
+    // what is speech.
+    self->signalEnergy = 0;
+    return;
+  }
+
+  self->blockInd++;  // Update the block index only when we process a block.
+
+  FFT(self, winData, self->anaLen, self->magnLen, real, imag, magn);
+
+  for (i = 0; i < self->magnLen; i++) {
+    signalEnergy += real[i] * real[i] + imag[i] * imag[i];
+    sumMagn += magn[i];
+    if (self->blockInd < END_STARTUP_SHORT) {
+      if (i >= kStartBand) {
+        tmpFloat2 = logf((float)i);
+        sum_log_i += tmpFloat2;
+        sum_log_i_square += tmpFloat2 * tmpFloat2;
+        tmpFloat1 = logf(magn[i]);
+        sum_log_magn += tmpFloat1;
+        sum_log_i_log_magn += tmpFloat2 * tmpFloat1;
+      }
+    }
+  }
+  signalEnergy /= self->magnLen;
+  self->signalEnergy = signalEnergy;
+  self->sumMagn = sumMagn;
+
+  // Quantile noise estimate.
+  NoiseEstimation(self, magn, noise);
+  // Compute simplified noise model during startup.
+  if (self->blockInd < END_STARTUP_SHORT) {
+    // Estimate White noise.
+    self->whiteNoiseLevel += sumMagn / self->magnLen * self->overdrive;
+    // Estimate Pink noise parameters.
+    tmpFloat1 = sum_log_i_square * (self->magnLen - kStartBand);
+    tmpFloat1 -= (sum_log_i * sum_log_i);
+    tmpFloat2 =
+        (sum_log_i_square * sum_log_magn - sum_log_i * sum_log_i_log_magn);
+    tmpFloat3 = tmpFloat2 / tmpFloat1;
+    // Constrain the estimated spectrum to be positive.
+    if (tmpFloat3 < 0.f) {
+      tmpFloat3 = 0.f;
+    }
+    self->pinkNoiseNumerator += tmpFloat3;
+    tmpFloat2 = (sum_log_i * sum_log_magn);
+    tmpFloat2 -= (self->magnLen - kStartBand) * sum_log_i_log_magn;
+    tmpFloat3 = tmpFloat2 / tmpFloat1;
+    // Constrain the pink noise power to be in the interval [0, 1].
+    if (tmpFloat3 < 0.f) {
+      tmpFloat3 = 0.f;
+    }
+    if (tmpFloat3 > 1.f) {
+      tmpFloat3 = 1.f;
+    }
+    self->pinkNoiseExp += tmpFloat3;
+
+    // Calculate frequency independent parts of parametric noise estimate.
+    if (self->pinkNoiseExp > 0.f) {
+      // Use pink noise estimate.
+      parametric_num =
+          expf(self->pinkNoiseNumerator / (float)(self->blockInd + 1));
+      parametric_num *= (float)(self->blockInd + 1);
+      parametric_exp = self->pinkNoiseExp / (float)(self->blockInd + 1);
+    }
+    for (i = 0; i < self->magnLen; i++) {
+      // Estimate the background noise using the white and pink noise
+      // parameters.
+      if (self->pinkNoiseExp == 0.f) {
+        // Use white noise estimate.
+        self->parametricNoise[i] = self->whiteNoiseLevel;
+      } else {
+        // Use pink noise estimate.
+        float use_band = (float)(i < kStartBand ? kStartBand : i);
+        self->parametricNoise[i] =
+            parametric_num / powf(use_band, parametric_exp);
+      }
+      // Weight quantile noise with modeled noise.
+      noise[i] *= (self->blockInd);
+      tmpFloat2 =
+          self->parametricNoise[i] * (END_STARTUP_SHORT - self->blockInd);
+      noise[i] += (tmpFloat2 / (float)(self->blockInd + 1));
+      noise[i] /= END_STARTUP_SHORT;
+    }
+  }
+  // Compute average signal during END_STARTUP_LONG time:
+  // used to normalize spectral difference measure.
+  if (self->blockInd < END_STARTUP_LONG) {
+    self->featureData[5] *= self->blockInd;
+    self->featureData[5] += signalEnergy;
+    self->featureData[5] /= (self->blockInd + 1);
+  }
+
+  // Post and prior SNR needed for SpeechNoiseProb.
+  ComputeSnr(self, magn, noise, snrLocPrior, snrLocPost);
+
+  FeatureUpdate(self, magn, updateParsFlag);
+  SpeechNoiseProb(self, self->speechProb, snrLocPrior, snrLocPost);
+  UpdateNoiseEstimate(self, magn, snrLocPrior, snrLocPost, noise);
+
+  // Keep track of noise spectrum for next frame.
+  memcpy(self->noise, noise, sizeof(*noise) * self->magnLen);
+  memcpy(self->magnPrevAnalyze, magn, sizeof(*magn) * self->magnLen);
+}
+
+void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
+                          const float* const* speechFrame,
+                          size_t num_bands,
+                          float* const* outFrame) {
+  // Main routine for noise reduction.
+  int flagHB = 0;
+  size_t i, j;
+
+  float energy1, energy2, gain, factor, factor1, factor2;
+  float fout[BLOCKL_MAX];
+  float winData[ANAL_BLOCKL_MAX];
+  float magn[HALF_ANAL_BLOCKL];
+  float theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
+  float real[ANAL_BLOCKL_MAX], imag[HALF_ANAL_BLOCKL];
+
+  // SWB variables.
+  int deltaBweHB = 1;
+  int deltaGainHB = 1;
+  float decayBweHB = 1.0;
+  float gainMapParHB = 1.0;
+  float gainTimeDomainHB = 1.0;
+  float avgProbSpeechHB, avgProbSpeechHBTmp, avgFilterGainHB, gainModHB;
+  float sumMagnAnalyze, sumMagnProcess;
+
+  // Check that initiation has been done.
+  RTC_DCHECK_EQ(1, self->initFlag);
+  RTC_DCHECK_LE(num_bands - 1, NUM_HIGH_BANDS_MAX);
+
+  const float* const* speechFrameHB = NULL;
+  float* const* outFrameHB = NULL;
+  size_t num_high_bands = 0;
+  if (num_bands > 1) {
+    speechFrameHB = &speechFrame[1];
+    outFrameHB = &outFrame[1];
+    num_high_bands = num_bands - 1;
+    flagHB = 1;
+    // Range for averaging low band quantities for H band gain.
+    deltaBweHB = (int)self->magnLen / 4;
+    deltaGainHB = deltaBweHB;
+  }
+
+  // Update analysis buffer for L band.
+  UpdateBuffer(speechFrame[0], self->blockLen, self->anaLen, self->dataBuf);
+
+  if (flagHB == 1) {
+    // Update analysis buffer for H bands.
+    for (i = 0; i < num_high_bands; ++i) {
+      UpdateBuffer(speechFrameHB[i],
+                   self->blockLen,
+                   self->anaLen,
+                   self->dataBufHB[i]);
+    }
+  }
+
+  Windowing(self->window, self->dataBuf, self->anaLen, winData);
+  energy1 = Energy(winData, self->anaLen);
+  if (energy1 == 0.0 || self->signalEnergy == 0) {
+    // Synthesize the special case of zero input.
+    // Read out fully processed segment.
+    for (i = self->windShift; i < self->blockLen + self->windShift; i++) {
+      fout[i - self->windShift] = self->syntBuf[i];
+    }
+    // Update synthesis buffer.
+    UpdateBuffer(NULL, self->blockLen, self->anaLen, self->syntBuf);
+
+    for (i = 0; i < self->blockLen; ++i)
+      outFrame[0][i] =
+          WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
+
+    // For time-domain gain of HB.
+    if (flagHB == 1) {
+      for (i = 0; i < num_high_bands; ++i) {
+        for (j = 0; j < self->blockLen; ++j) {
+          outFrameHB[i][j] = WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+                                            self->dataBufHB[i][j],
+                                            WEBRTC_SPL_WORD16_MIN);
+        }
+      }
+    }
+
+    return;
+  }
+
+  FFT(self, winData, self->anaLen, self->magnLen, real, imag, magn);
+
+  if (self->blockInd < END_STARTUP_SHORT) {
+    for (i = 0; i < self->magnLen; i++) {
+      self->initMagnEst[i] += magn[i];
+    }
+  }
+
+  ComputeDdBasedWienerFilter(self, magn, theFilter);
+
+  for (i = 0; i < self->magnLen; i++) {
+    // Flooring bottom.
+    if (theFilter[i] < self->denoiseBound) {
+      theFilter[i] = self->denoiseBound;
+    }
+    // Flooring top.
+    if (theFilter[i] > 1.f) {
+      theFilter[i] = 1.f;
+    }
+    if (self->blockInd < END_STARTUP_SHORT) {
+      theFilterTmp[i] =
+          (self->initMagnEst[i] - self->overdrive * self->parametricNoise[i]);
+      theFilterTmp[i] /= (self->initMagnEst[i] + 0.0001f);
+      // Flooring bottom.
+      if (theFilterTmp[i] < self->denoiseBound) {
+        theFilterTmp[i] = self->denoiseBound;
+      }
+      // Flooring top.
+      if (theFilterTmp[i] > 1.f) {
+        theFilterTmp[i] = 1.f;
+      }
+      // Weight the two suppression filters.
+      theFilter[i] *= (self->blockInd);
+      theFilterTmp[i] *= (END_STARTUP_SHORT - self->blockInd);
+      theFilter[i] += theFilterTmp[i];
+      theFilter[i] /= (END_STARTUP_SHORT);
+    }
+
+    self->smooth[i] = theFilter[i];
+    real[i] *= self->smooth[i];
+    imag[i] *= self->smooth[i];
+  }
+  // Keep track of |magn| spectrum for next frame.
+  memcpy(self->magnPrevProcess, magn, sizeof(*magn) * self->magnLen);
+  memcpy(self->noisePrev, self->noise, sizeof(self->noise[0]) * self->magnLen);
+  // Back to time domain.
+  IFFT(self, real, imag, self->magnLen, self->anaLen, winData);
+
+  // Scale factor: only do it after END_STARTUP_LONG time.
+  factor = 1.f;
+  if (self->gainmap == 1 && self->blockInd > END_STARTUP_LONG) {
+    factor1 = 1.f;
+    factor2 = 1.f;
+
+    energy2 = Energy(winData, self->anaLen);
+    gain = (float)sqrt(energy2 / (energy1 + 1.f));
+
+    // Scaling for new version.
+    if (gain > B_LIM) {
+      factor1 = 1.f + 1.3f * (gain - B_LIM);
+      if (gain * factor1 > 1.f) {
+        factor1 = 1.f / gain;
+      }
+    }
+    if (gain < B_LIM) {
+      // Don't reduce scale too much for pause regions:
+      // attenuation here should be controlled by flooring.
+      if (gain <= self->denoiseBound) {
+        gain = self->denoiseBound;
+      }
+      factor2 = 1.f - 0.3f * (B_LIM - gain);
+    }
+    // Combine both scales with speech/noise prob:
+    // note prior (priorSpeechProb) is not frequency dependent.
+    factor = self->priorSpeechProb * factor1 +
+             (1.f - self->priorSpeechProb) * factor2;
+  }  // Out of self->gainmap == 1.
+
+  Windowing(self->window, winData, self->anaLen, winData);
+
+  // Synthesis.
+  for (i = 0; i < self->anaLen; i++) {
+    self->syntBuf[i] += factor * winData[i];
+  }
+  // Read out fully processed segment.
+  for (i = self->windShift; i < self->blockLen + self->windShift; i++) {
+    fout[i - self->windShift] = self->syntBuf[i];
+  }
+  // Update synthesis buffer.
+  UpdateBuffer(NULL, self->blockLen, self->anaLen, self->syntBuf);
+
+  for (i = 0; i < self->blockLen; ++i)
+    outFrame[0][i] =
+        WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
+
+  // For time-domain gain of HB.
+  if (flagHB == 1) {
+    // Average speech prob from low band.
+    // Average over second half (i.e., 4->8kHz) of frequencies spectrum.
+    avgProbSpeechHB = 0.0;
+    for (i = self->magnLen - deltaBweHB - 1; i < self->magnLen - 1; i++) {
+      avgProbSpeechHB += self->speechProb[i];
+    }
+    avgProbSpeechHB = avgProbSpeechHB / ((float)deltaBweHB);
+    // If the speech was suppressed by a component between Analyze and
+    // Process, for example the AEC, then it should not be considered speech
+    // for high band suppression purposes.
+    sumMagnAnalyze = 0;
+    sumMagnProcess = 0;
+    for (i = 0; i < self->magnLen; ++i) {
+      sumMagnAnalyze += self->magnPrevAnalyze[i];
+      sumMagnProcess += self->magnPrevProcess[i];
+    }
+    RTC_DCHECK_GT(sumMagnAnalyze, 0);
+    avgProbSpeechHB *= sumMagnProcess / sumMagnAnalyze;
+    // Average filter gain from low band.
+    // Average over second half (i.e., 4->8kHz) of frequencies spectrum.
+    avgFilterGainHB = 0.0;
+    for (i = self->magnLen - deltaGainHB - 1; i < self->magnLen - 1; i++) {
+      avgFilterGainHB += self->smooth[i];
+    }
+    avgFilterGainHB = avgFilterGainHB / ((float)(deltaGainHB));
+    avgProbSpeechHBTmp = 2.f * avgProbSpeechHB - 1.f;
+    // Gain based on speech probability.
+    gainModHB = 0.5f * (1.f + (float)tanh(gainMapParHB * avgProbSpeechHBTmp));
+    // Combine gain with low band gain.
+    gainTimeDomainHB = 0.5f * gainModHB + 0.5f * avgFilterGainHB;
+    if (avgProbSpeechHB >= 0.5f) {
+      gainTimeDomainHB = 0.25f * gainModHB + 0.75f * avgFilterGainHB;
+    }
+    gainTimeDomainHB = gainTimeDomainHB * decayBweHB;
+    // Make sure gain is within flooring range.
+    // Flooring bottom.
+    if (gainTimeDomainHB < self->denoiseBound) {
+      gainTimeDomainHB = self->denoiseBound;
+    }
+    // Flooring top.
+    if (gainTimeDomainHB > 1.f) {
+      gainTimeDomainHB = 1.f;
+    }
+    // Apply gain.
+    for (i = 0; i < num_high_bands; ++i) {
+      for (j = 0; j < self->blockLen; j++) {
+        outFrameHB[i][j] =
+            WEBRTC_SPL_SAT(WEBRTC_SPL_WORD16_MAX,
+                           gainTimeDomainHB * self->dataBufHB[i][j],
+                           WEBRTC_SPL_WORD16_MIN);
+      }
+    }
+  }  // End of H band gain computation.
+}
diff --git a/modules/audio_processing/ns/ns_core.h b/modules/audio_processing/ns/ns_core.h
new file mode 100644
index 0000000..990d363
--- /dev/null
+++ b/modules/audio_processing/ns/ns_core.h
@@ -0,0 +1,189 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_
+#define MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_
+
+#include "modules/audio_processing/ns/defines.h"
+
+typedef struct NSParaExtract_ {
+  // Bin size of histogram.
+  float binSizeLrt;
+  float binSizeSpecFlat;
+  float binSizeSpecDiff;
+  // Range of histogram over which LRT threshold is computed.
+  float rangeAvgHistLrt;
+  // Scale parameters: multiply dominant peaks of the histograms by scale factor
+  // to obtain thresholds for prior model.
+  float factor1ModelPars;  // For LRT and spectral difference.
+  float factor2ModelPars;  // For spectral_flatness: used when noise is flatter
+                           // than speech.
+  // Peak limit for spectral flatness (varies between 0 and 1).
+  float thresPosSpecFlat;
+  // Limit on spacing of two highest peaks in histogram: spacing determined by
+  // bin size.
+  float limitPeakSpacingSpecFlat;
+  float limitPeakSpacingSpecDiff;
+  // Limit on relevance of second peak.
+  float limitPeakWeightsSpecFlat;
+  float limitPeakWeightsSpecDiff;
+  // Limit on fluctuation of LRT feature.
+  float thresFluctLrt;
+  // Limit on the max and min values for the feature thresholds.
+  float maxLrt;
+  float minLrt;
+  float maxSpecFlat;
+  float minSpecFlat;
+  float maxSpecDiff;
+  float minSpecDiff;
+  // Criteria of weight of histogram peak to accept/reject feature.
+  int thresWeightSpecFlat;
+  int thresWeightSpecDiff;
+
+} NSParaExtract;
+
+typedef struct NoiseSuppressionC_ {
+  uint32_t fs;
+  size_t blockLen;
+  size_t windShift;
+  size_t anaLen;
+  size_t magnLen;
+  int aggrMode;
+  const float* window;
+  float analyzeBuf[ANAL_BLOCKL_MAX];
+  float dataBuf[ANAL_BLOCKL_MAX];
+  float syntBuf[ANAL_BLOCKL_MAX];
+
+  int initFlag;
+  // Parameters for quantile noise estimation.
+  float density[SIMULT * HALF_ANAL_BLOCKL];
+  float lquantile[SIMULT * HALF_ANAL_BLOCKL];
+  float quantile[HALF_ANAL_BLOCKL];
+  int counter[SIMULT];
+  int updates;
+  // Parameters for Wiener filter.
+  float smooth[HALF_ANAL_BLOCKL];
+  float overdrive;
+  float denoiseBound;
+  int gainmap;
+  // FFT work arrays.
+  size_t ip[IP_LENGTH];
+  float wfft[W_LENGTH];
+
+  // Parameters for new method: some not needed, will reduce/cleanup later.
+  int32_t blockInd;  // Frame index counter.
+  int modelUpdatePars[4];  // Parameters for updating or estimating.
+  // Thresholds/weights for prior model.
+  float priorModelPars[7];  // Parameters for prior model.
+  float noise[HALF_ANAL_BLOCKL];  // Noise spectrum from current frame.
+  float noisePrev[HALF_ANAL_BLOCKL];  // Noise spectrum from previous frame.
+  // Magnitude spectrum of previous analyze frame.
+  float magnPrevAnalyze[HALF_ANAL_BLOCKL];
+  // Magnitude spectrum of previous process frame.
+  float magnPrevProcess[HALF_ANAL_BLOCKL];
+  float logLrtTimeAvg[HALF_ANAL_BLOCKL];  // Log LRT factor with time-smoothing.
+  float priorSpeechProb;  // Prior speech/noise probability.
+  float featureData[7];
+  // Conservative noise spectrum estimate.
+  float magnAvgPause[HALF_ANAL_BLOCKL];
+  float signalEnergy;  // Energy of |magn|.
+  float sumMagn;
+  float whiteNoiseLevel;  // Initial noise estimate.
+  float initMagnEst[HALF_ANAL_BLOCKL];  // Initial magnitude spectrum estimate.
+  float pinkNoiseNumerator;  // Pink noise parameter: numerator.
+  float pinkNoiseExp;  // Pink noise parameter: power of frequencies.
+  float parametricNoise[HALF_ANAL_BLOCKL];
+  // Parameters for feature extraction.
+  NSParaExtract featureExtractionParams;
+  // Histograms for parameter estimation.
+  int histLrt[HIST_PAR_EST];
+  int histSpecFlat[HIST_PAR_EST];
+  int histSpecDiff[HIST_PAR_EST];
+  // Quantities for high band estimate.
+  float speechProb[HALF_ANAL_BLOCKL];  // Final speech/noise prob: prior + LRT.
+  // Buffering data for HB.
+  float dataBufHB[NUM_HIGH_BANDS_MAX][ANAL_BLOCKL_MAX];
+} NoiseSuppressionC;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************
+ * WebRtcNs_InitCore(...)
+ *
+ * This function initializes a noise suppression instance
+ *
+ * Input:
+ *      - self          : Instance that should be initialized
+ *      - fs            : Sampling frequency
+ *
+ * Output:
+ *      - self          : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNs_InitCore(NoiseSuppressionC* self, uint32_t fs);
+
+/****************************************************************************
+ * WebRtcNs_set_policy_core(...)
+ *
+ * This changes the aggressiveness of the noise suppression method.
+ *
+ * Input:
+ *      - self          : Instance that should be initialized
+ *      - mode          : 0: Mild (6dB), 1: Medium (10dB), 2: Aggressive (15dB)
+ *
+ * Output:
+ *      - self          : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int WebRtcNs_set_policy_core(NoiseSuppressionC* self, int mode);
+
+/****************************************************************************
+ * WebRtcNs_AnalyzeCore
+ *
+ * Estimate the background noise.
+ *
+ * Input:
+ *      - self          : Instance that should be initialized
+ *      - speechFrame   : Input speech frame for lower band
+ *
+ * Output:
+ *      - self          : Updated instance
+ */
+void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame);
+
+/****************************************************************************
+ * WebRtcNs_ProcessCore
+ *
+ * Do noise suppression.
+ *
+ * Input:
+ *      - self          : Instance that should be initialized
+ *      - inFrame       : Input speech frame for each band
+ *      - num_bands     : Number of bands
+ *
+ * Output:
+ *      - self          : Updated instance
+ *      - outFrame      : Output speech frame for each band
+ */
+void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
+                          const float* const* inFrame,
+                          size_t num_bands,
+                          float* const* outFrame);
+
+#ifdef __cplusplus
+}
+#endif
+#endif  // MODULES_AUDIO_PROCESSING_NS_NS_CORE_H_
diff --git a/modules/audio_processing/ns/nsx_core.c b/modules/audio_processing/ns/nsx_core.c
new file mode 100644
index 0000000..8043656
--- /dev/null
+++ b/modules/audio_processing/ns/nsx_core.c
@@ -0,0 +1,2107 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/noise_suppression_x.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "common_audio/signal_processing/include/real_fft.h"
+#include "modules/audio_processing/ns/nsx_core.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+#if defined(WEBRTC_HAS_NEON)
+/* Tables are defined in ARM assembly files. */
+extern const int16_t WebRtcNsx_kLogTable[9];
+extern const int16_t WebRtcNsx_kCounterDiv[201];
+extern const int16_t WebRtcNsx_kLogTableFrac[256];
+#else
+static const int16_t WebRtcNsx_kLogTable[9] = {
+  0, 177, 355, 532, 710, 887, 1065, 1242, 1420
+};
+
+static const int16_t WebRtcNsx_kCounterDiv[201] = {
+  32767, 16384, 10923, 8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731,
+  2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311,
+  1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910, 886, 862, 840,
+  819, 799, 780, 762, 745, 728, 712, 697, 683, 669, 655, 643, 630, 618, 607,
+  596, 585, 575, 565, 555, 546, 537, 529, 520, 512, 504, 496, 489, 482, 475,
+  468, 462, 455, 449, 443, 437, 431, 426, 420, 415, 410, 405, 400, 395, 390,
+  386, 381, 377, 372, 368, 364, 360, 356, 352, 349, 345, 341, 338, 334, 331,
+  328, 324, 321, 318, 315, 312, 309, 306, 303, 301, 298, 295, 293, 290, 287,
+  285, 282, 280, 278, 275, 273, 271, 269, 266, 264, 262, 260, 258, 256, 254,
+  252, 250, 248, 246, 245, 243, 241, 239, 237, 236, 234, 232, 231, 229, 228,
+  226, 224, 223, 221, 220, 218, 217, 216, 214, 213, 211, 210, 209, 207, 206,
+  205, 204, 202, 201, 200, 199, 197, 196, 195, 194, 193, 192, 191, 189, 188,
+  187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173,
+  172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163
+};
+
+static const int16_t WebRtcNsx_kLogTableFrac[256] = {
+  0,   1,   3,   4,   6,   7,   9,  10,  11,  13,  14,  16,  17,  18,  20,  21,
+  22,  24,  25,  26,  28,  29,  30,  32,  33,  34,  36,  37,  38,  40,  41,  42,
+  44,  45,  46,  47,  49,  50,  51,  52,  54,  55,  56,  57,  59,  60,  61,  62,
+  63,  65,  66,  67,  68,  69,  71,  72,  73,  74,  75,  77,  78,  79,  80,  81,
+  82,  84,  85,  86,  87,  88,  89,  90,  92,  93,  94,  95,  96,  97,  98,  99,
+  100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116,
+  117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+  132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+  147, 148, 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160,
+  161, 162, 163, 164, 165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174,
+  175, 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185, 185, 186, 187,
+  188, 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198, 198, 199, 200,
+  201, 202, 203, 203, 204, 205, 206, 207, 208, 208, 209, 210, 211, 212, 212,
+  213, 214, 215, 216, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224,
+  225, 226, 227, 228, 228, 229, 230, 231, 231, 232, 233, 234, 234, 235, 236,
+  237, 238, 238, 239, 240, 241, 241, 242, 243, 244, 244, 245, 246, 247, 247,
+  248, 249, 249, 250, 251, 252, 252, 253, 254, 255, 255
+};
+#endif  // WEBRTC_HAS_NEON
+
+// Skip first frequency bins during estimation. (0 <= value < 64)
+static const size_t kStartBand = 5;
+
+// hybrib Hanning & flat window
+static const int16_t kBlocks80w128x[128] = {
+  0,    536,   1072,   1606,   2139,   2669,   3196,   3720,   4240,   4756,   5266,
+  5771,   6270,   6762,   7246,   7723,   8192,   8652,   9102,   9543,   9974,  10394,
+  10803,  11200,  11585,  11958,  12318,  12665,  12998,  13318,  13623,  13913,  14189,
+  14449,  14694,  14924,  15137,  15334,  15515,  15679,  15826,  15956,  16069,  16165,
+  16244,  16305,  16349,  16375,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
+  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
+  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,  16384,
+  16384,  16384,  16384,  16384,  16375,  16349,  16305,  16244,  16165,  16069,  15956,
+  15826,  15679,  15515,  15334,  15137,  14924,  14694,  14449,  14189,  13913,  13623,
+  13318,  12998,  12665,  12318,  11958,  11585,  11200,  10803,  10394,   9974,   9543,
+  9102,   8652,   8192,   7723,   7246,   6762,   6270,   5771,   5266,   4756,   4240,
+  3720,   3196,   2669,   2139,   1606,   1072,    536
+};
+
+// hybrib Hanning & flat window
+static const int16_t kBlocks160w256x[256] = {
+  0,   268,   536,   804,  1072,  1339,  1606,  1872,
+  2139,  2404,  2669,  2933,  3196,  3459,  3720,  3981,
+  4240,  4499,  4756,  5012,  5266,  5520,  5771,  6021,
+  6270,  6517,  6762,  7005,  7246,  7486,  7723,  7959,
+  8192,  8423,  8652,  8878,  9102,  9324,  9543,  9760,
+  9974, 10185, 10394, 10600, 10803, 11003, 11200, 11394,
+  11585, 11773, 11958, 12140, 12318, 12493, 12665, 12833,
+  12998, 13160, 13318, 13472, 13623, 13770, 13913, 14053,
+  14189, 14321, 14449, 14574, 14694, 14811, 14924, 15032,
+  15137, 15237, 15334, 15426, 15515, 15599, 15679, 15754,
+  15826, 15893, 15956, 16015, 16069, 16119, 16165, 16207,
+  16244, 16277, 16305, 16329, 16349, 16364, 16375, 16382,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16384, 16384, 16384, 16384, 16384, 16384, 16384,
+  16384, 16382, 16375, 16364, 16349, 16329, 16305, 16277,
+  16244, 16207, 16165, 16119, 16069, 16015, 15956, 15893,
+  15826, 15754, 15679, 15599, 15515, 15426, 15334, 15237,
+  15137, 15032, 14924, 14811, 14694, 14574, 14449, 14321,
+  14189, 14053, 13913, 13770, 13623, 13472, 13318, 13160,
+  12998, 12833, 12665, 12493, 12318, 12140, 11958, 11773,
+  11585, 11394, 11200, 11003, 10803, 10600, 10394, 10185,
+  9974,  9760,  9543,  9324,  9102,  8878,  8652,  8423,
+  8192,  7959,  7723,  7486,  7246,  7005,  6762,  6517,
+  6270,  6021,  5771,  5520,  5266,  5012,  4756,  4499,
+  4240,  3981,  3720,  3459,  3196,  2933,  2669,  2404,
+  2139,  1872,  1606,  1339,  1072,   804,   536,   268
+};
+
+// Gain factor1 table: Input value in Q8 and output value in Q13
+// original floating point code
+//  if (gain > blim) {
+//    factor1 = 1.0 + 1.3 * (gain - blim);
+//    if (gain * factor1 > 1.0) {
+//      factor1 = 1.0 / gain;
+//    }
+//  } else {
+//    factor1 = 1.0;
+//  }
+static const int16_t kFactor1Table[257] = {
+  8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8233, 8274, 8315, 8355, 8396, 8436, 8475, 8515, 8554, 8592, 8631, 8669,
+  8707, 8745, 8783, 8820, 8857, 8894, 8931, 8967, 9003, 9039, 9075, 9111, 9146, 9181,
+  9216, 9251, 9286, 9320, 9354, 9388, 9422, 9456, 9489, 9523, 9556, 9589, 9622, 9655,
+  9687, 9719, 9752, 9784, 9816, 9848, 9879, 9911, 9942, 9973, 10004, 10035, 10066,
+  10097, 10128, 10158, 10188, 10218, 10249, 10279, 10308, 10338, 10368, 10397, 10426,
+  10456, 10485, 10514, 10543, 10572, 10600, 10629, 10657, 10686, 10714, 10742, 10770,
+  10798, 10826, 10854, 10882, 10847, 10810, 10774, 10737, 10701, 10666, 10631, 10596,
+  10562, 10527, 10494, 10460, 10427, 10394, 10362, 10329, 10297, 10266, 10235, 10203,
+  10173, 10142, 10112, 10082, 10052, 10023, 9994, 9965, 9936, 9908, 9879, 9851, 9824,
+  9796, 9769, 9742, 9715, 9689, 9662, 9636, 9610, 9584, 9559, 9534, 9508, 9484, 9459,
+  9434, 9410, 9386, 9362, 9338, 9314, 9291, 9268, 9245, 9222, 9199, 9176, 9154, 9132,
+  9110, 9088, 9066, 9044, 9023, 9002, 8980, 8959, 8939, 8918, 8897, 8877, 8857, 8836,
+  8816, 8796, 8777, 8757, 8738, 8718, 8699, 8680, 8661, 8642, 8623, 8605, 8586, 8568,
+  8550, 8532, 8514, 8496, 8478, 8460, 8443, 8425, 8408, 8391, 8373, 8356, 8339, 8323,
+  8306, 8289, 8273, 8256, 8240, 8224, 8208, 8192
+};
+
+// For Factor2 tables
+// original floating point code
+// if (gain > blim) {
+//   factor2 = 1.0;
+// } else {
+//   factor2 = 1.0 - 0.3 * (blim - gain);
+//   if (gain <= inst->denoiseBound) {
+//     factor2 = 1.0 - 0.3 * (blim - inst->denoiseBound);
+//   }
+// }
+//
+// Gain factor table: Input value in Q8 and output value in Q13
+static const int16_t kFactor2Aggressiveness1[257] = {
+  7577, 7577, 7577, 7577, 7577, 7577,
+  7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7577, 7596, 7614, 7632,
+  7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
+  7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
+  8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
+  8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
+};
+
+// Gain factor table: Input value in Q8 and output value in Q13
+static const int16_t kFactor2Aggressiveness2[257] = {
+  7270, 7270, 7270, 7270, 7270, 7306,
+  7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632,
+  7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
+  7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
+  8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
+  8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
+};
+
+// Gain factor table: Input value in Q8 and output value in Q13
+static const int16_t kFactor2Aggressiveness3[257] = {
+  7184, 7184, 7184, 7229, 7270, 7306,
+  7339, 7369, 7397, 7424, 7448, 7472, 7495, 7517, 7537, 7558, 7577, 7596, 7614, 7632,
+  7650, 7667, 7683, 7699, 7715, 7731, 7746, 7761, 7775, 7790, 7804, 7818, 7832, 7845,
+  7858, 7871, 7884, 7897, 7910, 7922, 7934, 7946, 7958, 7970, 7982, 7993, 8004, 8016,
+  8027, 8038, 8049, 8060, 8070, 8081, 8091, 8102, 8112, 8122, 8132, 8143, 8152, 8162,
+  8172, 8182, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
+  8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192
+};
+
+// sum of log2(i) from table index to inst->anaLen2 in Q5
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const int16_t kSumLogIndex[66] = {
+  0,  22917,  22917,  22885,  22834,  22770,  22696,  22613,
+  22524,  22428,  22326,  22220,  22109,  21994,  21876,  21754,
+  21629,  21501,  21370,  21237,  21101,  20963,  20822,  20679,
+  20535,  20388,  20239,  20089,  19937,  19783,  19628,  19470,
+  19312,  19152,  18991,  18828,  18664,  18498,  18331,  18164,
+  17994,  17824,  17653,  17480,  17306,  17132,  16956,  16779,
+  16602,  16423,  16243,  16063,  15881,  15699,  15515,  15331,
+  15146,  14960,  14774,  14586,  14398,  14209,  14019,  13829,
+  13637,  13445
+};
+
+// sum of log2(i)^2 from table index to inst->anaLen2 in Q2
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const int16_t kSumSquareLogIndex[66] = {
+  0,  16959,  16959,  16955,  16945,  16929,  16908,  16881,
+  16850,  16814,  16773,  16729,  16681,  16630,  16575,  16517,
+  16456,  16392,  16325,  16256,  16184,  16109,  16032,  15952,
+  15870,  15786,  15700,  15612,  15521,  15429,  15334,  15238,
+  15140,  15040,  14938,  14834,  14729,  14622,  14514,  14404,
+  14292,  14179,  14064,  13947,  13830,  13710,  13590,  13468,
+  13344,  13220,  13094,  12966,  12837,  12707,  12576,  12444,
+  12310,  12175,  12039,  11902,  11763,  11624,  11483,  11341,
+  11198,  11054
+};
+
+// log2(table index) in Q12
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const int16_t kLogIndex[129] = {
+  0,      0,   4096,   6492,   8192,   9511,  10588,  11499,
+  12288,  12984,  13607,  14170,  14684,  15157,  15595,  16003,
+  16384,  16742,  17080,  17400,  17703,  17991,  18266,  18529,
+  18780,  19021,  19253,  19476,  19691,  19898,  20099,  20292,
+  20480,  20662,  20838,  21010,  21176,  21338,  21496,  21649,
+  21799,  21945,  22087,  22226,  22362,  22495,  22625,  22752,
+  22876,  22998,  23117,  23234,  23349,  23462,  23572,  23680,
+  23787,  23892,  23994,  24095,  24195,  24292,  24388,  24483,
+  24576,  24668,  24758,  24847,  24934,  25021,  25106,  25189,
+  25272,  25354,  25434,  25513,  25592,  25669,  25745,  25820,
+  25895,  25968,  26041,  26112,  26183,  26253,  26322,  26390,
+  26458,  26525,  26591,  26656,  26721,  26784,  26848,  26910,
+  26972,  27033,  27094,  27154,  27213,  27272,  27330,  27388,
+  27445,  27502,  27558,  27613,  27668,  27722,  27776,  27830,
+  27883,  27935,  27988,  28039,  28090,  28141,  28191,  28241,
+  28291,  28340,  28388,  28437,  28484,  28532,  28579,  28626,
+  28672
+};
+
+// determinant of estimation matrix in Q0 corresponding to the log2 tables above
+// Note that the first table value is invalid, since log2(0) = -infinity
+static const int16_t kDeterminantEstMatrix[66] = {
+  0,  29814,  25574,  22640,  20351,  18469,  16873,  15491,
+  14277,  13199,  12233,  11362,  10571,   9851,   9192,   8587,
+  8030,   7515,   7038,   6596,   6186,   5804,   5448,   5115,
+  4805,   4514,   4242,   3988,   3749,   3524,   3314,   3116,
+  2930,   2755,   2590,   2435,   2289,   2152,   2022,   1900,
+  1785,   1677,   1575,   1478,   1388,   1302,   1221,   1145,
+  1073,   1005,    942,    881,    825,    771,    721,    674,
+  629,    587,    547,    510,    475,    442,    411,    382,
+  355,    330
+};
+
+// Update the noise estimation information.
+static void UpdateNoiseEstimate(NoiseSuppressionFixedC* inst, int offset) {
+  int32_t tmp32no1 = 0;
+  int32_t tmp32no2 = 0;
+  int16_t tmp16 = 0;
+  const int16_t kExp2Const = 11819; // Q13
+
+  size_t i = 0;
+
+  tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
+                                   inst->magnLen);
+  // Guarantee a Q-domain as high as possible and still fit in int16
+  inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   kExp2Const, tmp16, 21);
+  for (i = 0; i < inst->magnLen; i++) {
+    // inst->quantile[i]=exp(inst->lquantile[offset+i]);
+    // in Q21
+    tmp32no2 = kExp2Const * inst->noiseEstLogQuantile[offset + i];
+    tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
+    tmp16 = (int16_t)(tmp32no2 >> 21);
+    tmp16 -= 21;// shift 21 to get result in Q0
+    tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise)
+    if (tmp16 < 0) {
+      tmp32no1 >>= -tmp16;
+    } else {
+      tmp32no1 <<= tmp16;
+    }
+    inst->noiseEstQuantile[i] = WebRtcSpl_SatW32ToW16(tmp32no1);
+  }
+}
+
+// Noise Estimation
+static void NoiseEstimationC(NoiseSuppressionFixedC* inst,
+                             uint16_t* magn,
+                             uint32_t* noise,
+                             int16_t* q_noise) {
+  int16_t lmagn[HALF_ANAL_BLOCKL], counter, countDiv;
+  int16_t countProd, delta, zeros, frac;
+  int16_t log2, tabind, logval, tmp16, tmp16no1, tmp16no2;
+  const int16_t log2_const = 22713; // Q15
+  const int16_t width_factor = 21845;
+
+  size_t i, s, offset;
+
+  tabind = inst->stages - inst->normData;
+  RTC_DCHECK_LT(tabind, 9);
+  RTC_DCHECK_GT(tabind, -9);
+  if (tabind < 0) {
+    logval = -WebRtcNsx_kLogTable[-tabind];
+  } else {
+    logval = WebRtcNsx_kLogTable[tabind];
+  }
+
+  // lmagn(i)=log(magn(i))=log(2)*log2(magn(i))
+  // magn is in Q(-stages), and the real lmagn values are:
+  // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages)
+  // lmagn in Q8
+  for (i = 0; i < inst->magnLen; i++) {
+    if (magn[i]) {
+      zeros = WebRtcSpl_NormU32((uint32_t)magn[i]);
+      frac = (int16_t)((((uint32_t)magn[i] << zeros)
+                              & 0x7FFFFFFF) >> 23);
+      // log2(magn(i))
+      RTC_DCHECK_LT(frac, 256);
+      log2 = (int16_t)(((31 - zeros) << 8)
+                             + WebRtcNsx_kLogTableFrac[frac]);
+      // log2(magn(i))*log(2)
+      lmagn[i] = (int16_t)((log2 * log2_const) >> 15);
+      // + log(2^stages)
+      lmagn[i] += logval;
+    } else {
+      lmagn[i] = logval;//0;
+    }
+  }
+
+  // loop over simultaneous estimates
+  for (s = 0; s < SIMULT; s++) {
+    offset = s * inst->magnLen;
+
+    // Get counter values from state
+    counter = inst->noiseEstCounter[s];
+    RTC_DCHECK_LT(counter, 201);
+    countDiv = WebRtcNsx_kCounterDiv[counter];
+    countProd = (int16_t)(counter * countDiv);
+
+    // quant_est(...)
+    for (i = 0; i < inst->magnLen; i++) {
+      // compute delta
+      if (inst->noiseEstDensity[offset + i] > 512) {
+        // Get the value for delta by shifting intead of dividing.
+        int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i]);
+        delta = (int16_t)(FACTOR_Q16 >> (14 - factor));
+      } else {
+        delta = FACTOR_Q7;
+        if (inst->blockIndex < END_STARTUP_LONG) {
+          // Smaller step size during startup. This prevents from using
+          // unrealistic values causing overflow.
+          delta = FACTOR_Q7_STARTUP;
+        }
+      }
+
+      // update log quantile estimate
+      tmp16 = (int16_t)((delta * countDiv) >> 14);
+      if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) {
+        // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
+        // CounterDiv=1/(inst->counter[s]+1) in Q15
+        tmp16 += 2;
+        inst->noiseEstLogQuantile[offset + i] += tmp16 / 4;
+      } else {
+        tmp16 += 1;
+        // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
+        // TODO(bjornv): investigate why we need to truncate twice.
+        tmp16no2 = (int16_t)((tmp16 / 2) * 3 / 2);
+        inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
+        if (inst->noiseEstLogQuantile[offset + i] < logval) {
+          // This is the smallest fixed point representation we can
+          // have, hence we limit the output.
+          inst->noiseEstLogQuantile[offset + i] = logval;
+        }
+      }
+
+      // update density estimate
+      if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i])
+          < WIDTH_Q8) {
+        tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                     inst->noiseEstDensity[offset + i], countProd, 15);
+        tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                     width_factor, countDiv, 15);
+        inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2;
+      }
+    }  // end loop over magnitude spectrum
+
+    if (counter >= END_STARTUP_LONG) {
+      inst->noiseEstCounter[s] = 0;
+      if (inst->blockIndex >= END_STARTUP_LONG) {
+        UpdateNoiseEstimate(inst, offset);
+      }
+    }
+    inst->noiseEstCounter[s]++;
+
+  }  // end loop over simultaneous estimates
+
+  // Sequentially update the noise during startup
+  if (inst->blockIndex < END_STARTUP_LONG) {
+    UpdateNoiseEstimate(inst, offset);
+  }
+
+  for (i = 0; i < inst->magnLen; i++) {
+    noise[i] = (uint32_t)(inst->noiseEstQuantile[i]); // Q(qNoise)
+  }
+  (*q_noise) = (int16_t)inst->qNoise;
+}
+
+// Filter the data in the frequency domain, and create spectrum.
+static void PrepareSpectrumC(NoiseSuppressionFixedC* inst, int16_t* freq_buf) {
+  size_t i = 0, j = 0;
+
+  for (i = 0; i < inst->magnLen; i++) {
+    inst->real[i] = (int16_t)((inst->real[i] *
+        (int16_t)(inst->noiseSupFilter[i])) >> 14);  // Q(normData-stages)
+    inst->imag[i] = (int16_t)((inst->imag[i] *
+        (int16_t)(inst->noiseSupFilter[i])) >> 14);  // Q(normData-stages)
+  }
+
+  freq_buf[0] = inst->real[0];
+  freq_buf[1] = -inst->imag[0];
+  for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+    freq_buf[j] = inst->real[i];
+    freq_buf[j + 1] = -inst->imag[i];
+  }
+  freq_buf[inst->anaLen] = inst->real[inst->anaLen2];
+  freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
+}
+
+// Denormalize the real-valued signal |in|, the output from inverse FFT.
+static void DenormalizeC(NoiseSuppressionFixedC* inst,
+                         int16_t* in,
+                         int factor) {
+  size_t i = 0;
+  int32_t tmp32 = 0;
+  for (i = 0; i < inst->anaLen; i += 1) {
+    tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[i],
+                                 factor - inst->normData);
+    inst->real[i] = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+  }
+}
+
+// For the noise supression process, synthesis, read out fully processed
+// segment, and update synthesis buffer.
+static void SynthesisUpdateC(NoiseSuppressionFixedC* inst,
+                             int16_t* out_frame,
+                             int16_t gain_factor) {
+  size_t i = 0;
+  int16_t tmp16a = 0;
+  int16_t tmp16b = 0;
+  int32_t tmp32 = 0;
+
+  // synthesis
+  for (i = 0; i < inst->anaLen; i++) {
+    tmp16a = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                 inst->window[i], inst->real[i], 14); // Q0, window in Q14
+    tmp32 = WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(tmp16a, gain_factor, 13); // Q0
+    // Down shift with rounding
+    tmp16b = WebRtcSpl_SatW32ToW16(tmp32); // Q0
+    inst->synthesisBuffer[i] = WebRtcSpl_AddSatW16(inst->synthesisBuffer[i],
+                                                   tmp16b); // Q0
+  }
+
+  // read out fully processed segment
+  for (i = 0; i < inst->blockLen10ms; i++) {
+    out_frame[i] = inst->synthesisBuffer[i]; // Q0
+  }
+
+  // update synthesis buffer
+  memcpy(inst->synthesisBuffer, inst->synthesisBuffer + inst->blockLen10ms,
+      (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->synthesisBuffer));
+  WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer
+      + inst->anaLen - inst->blockLen10ms, inst->blockLen10ms);
+}
+
+// Update analysis buffer for lower band, and window data before FFT.
+static void AnalysisUpdateC(NoiseSuppressionFixedC* inst,
+                            int16_t* out,
+                            int16_t* new_speech) {
+  size_t i = 0;
+
+  // For lower band update analysis buffer.
+  memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms,
+      (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->analysisBuffer));
+  memcpy(inst->analysisBuffer + inst->anaLen - inst->blockLen10ms, new_speech,
+      inst->blockLen10ms * sizeof(*inst->analysisBuffer));
+
+  // Window data before FFT.
+  for (i = 0; i < inst->anaLen; i++) {
+    out[i] = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+               inst->window[i], inst->analysisBuffer[i], 14); // Q0
+  }
+}
+
+// Normalize the real-valued signal |in|, the input to forward FFT.
+static void NormalizeRealBufferC(NoiseSuppressionFixedC* inst,
+                                 const int16_t* in,
+                                 int16_t* out) {
+  size_t i = 0;
+  RTC_DCHECK_GE(inst->normData, 0);
+  for (i = 0; i < inst->anaLen; ++i) {
+    out[i] = in[i] << inst->normData;  // Q(normData)
+  }
+}
+
+// Declare function pointers.
+NoiseEstimation WebRtcNsx_NoiseEstimation;
+PrepareSpectrum WebRtcNsx_PrepareSpectrum;
+SynthesisUpdate WebRtcNsx_SynthesisUpdate;
+AnalysisUpdate WebRtcNsx_AnalysisUpdate;
+Denormalize WebRtcNsx_Denormalize;
+NormalizeRealBuffer WebRtcNsx_NormalizeRealBuffer;
+
+#if defined(WEBRTC_HAS_NEON)
+// Initialize function pointers for ARM Neon platform.
+static void WebRtcNsx_InitNeon(void) {
+  WebRtcNsx_NoiseEstimation = WebRtcNsx_NoiseEstimationNeon;
+  WebRtcNsx_PrepareSpectrum = WebRtcNsx_PrepareSpectrumNeon;
+  WebRtcNsx_SynthesisUpdate = WebRtcNsx_SynthesisUpdateNeon;
+  WebRtcNsx_AnalysisUpdate = WebRtcNsx_AnalysisUpdateNeon;
+}
+#endif
+
+#if defined(MIPS32_LE)
+// Initialize function pointers for MIPS platform.
+static void WebRtcNsx_InitMips(void) {
+  WebRtcNsx_PrepareSpectrum = WebRtcNsx_PrepareSpectrum_mips;
+  WebRtcNsx_SynthesisUpdate = WebRtcNsx_SynthesisUpdate_mips;
+  WebRtcNsx_AnalysisUpdate = WebRtcNsx_AnalysisUpdate_mips;
+  WebRtcNsx_NormalizeRealBuffer = WebRtcNsx_NormalizeRealBuffer_mips;
+#if defined(MIPS_DSP_R1_LE)
+  WebRtcNsx_Denormalize = WebRtcNsx_Denormalize_mips;
+#endif
+}
+#endif
+
+void WebRtcNsx_CalcParametricNoiseEstimate(NoiseSuppressionFixedC* inst,
+                                           int16_t pink_noise_exp_avg,
+                                           int32_t pink_noise_num_avg,
+                                           int freq_index,
+                                           uint32_t* noise_estimate,
+                                           uint32_t* noise_estimate_avg) {
+  int32_t tmp32no1 = 0;
+  int32_t tmp32no2 = 0;
+
+  int16_t int_part = 0;
+  int16_t frac_part = 0;
+
+  // Use pink noise estimate
+  // noise_estimate = 2^(pinkNoiseNumerator + pinkNoiseExp * log2(j))
+  RTC_DCHECK_GE(freq_index, 0);
+  RTC_DCHECK_LT(freq_index, 129);
+  tmp32no2 = (pink_noise_exp_avg * kLogIndex[freq_index]) >> 15;  // Q11
+  tmp32no1 = pink_noise_num_avg - tmp32no2; // Q11
+
+  // Calculate output: 2^tmp32no1
+  // Output in Q(minNorm-stages)
+  tmp32no1 += (inst->minNorm - inst->stages) << 11;
+  if (tmp32no1 > 0) {
+    int_part = (int16_t)(tmp32no1 >> 11);
+    frac_part = (int16_t)(tmp32no1 & 0x000007ff); // Q11
+    // Piecewise linear approximation of 'b' in
+    // 2^(int_part+frac_part) = 2^int_part * (1 + b)
+    // 'b' is given in Q11 and below stored in frac_part.
+    if (frac_part >> 10) {
+      // Upper fractional part
+      tmp32no2 = (2048 - frac_part) * 1244;  // Q21
+      tmp32no2 = 2048 - (tmp32no2 >> 10);
+    } else {
+      // Lower fractional part
+      tmp32no2 = (frac_part * 804) >> 10;
+    }
+    // Shift fractional part to Q(minNorm-stages)
+    tmp32no2 = WEBRTC_SPL_SHIFT_W32(tmp32no2, int_part - 11);
+    *noise_estimate_avg = (1 << int_part) + (uint32_t)tmp32no2;
+    // Scale up to initMagnEst, which is not block averaged
+    *noise_estimate = (*noise_estimate_avg) * (uint32_t)(inst->blockIndex + 1);
+  }
+}
+
+// Initialize state
+int32_t WebRtcNsx_InitCore(NoiseSuppressionFixedC* inst, uint32_t fs) {
+  int i;
+
+  //check for valid pointer
+  if (inst == NULL) {
+    return -1;
+  }
+  //
+
+  // Initialization of struct
+  if (fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000) {
+    inst->fs = fs;
+  } else {
+    return -1;
+  }
+
+  if (fs == 8000) {
+    inst->blockLen10ms = 80;
+    inst->anaLen = 128;
+    inst->stages = 7;
+    inst->window = kBlocks80w128x;
+    inst->thresholdLogLrt = 131072; //default threshold for LRT feature
+    inst->maxLrt = 0x0040000;
+    inst->minLrt = 52429;
+  } else {
+    inst->blockLen10ms = 160;
+    inst->anaLen = 256;
+    inst->stages = 8;
+    inst->window = kBlocks160w256x;
+    inst->thresholdLogLrt = 212644; //default threshold for LRT feature
+    inst->maxLrt = 0x0080000;
+    inst->minLrt = 104858;
+  }
+  inst->anaLen2 = inst->anaLen / 2;
+  inst->magnLen = inst->anaLen2 + 1;
+
+  if (inst->real_fft != NULL) {
+    WebRtcSpl_FreeRealFFT(inst->real_fft);
+  }
+  inst->real_fft = WebRtcSpl_CreateRealFFT(inst->stages);
+  if (inst->real_fft == NULL) {
+    return -1;
+  }
+
+  WebRtcSpl_ZerosArrayW16(inst->analysisBuffer, ANAL_BLOCKL_MAX);
+  WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer, ANAL_BLOCKL_MAX);
+
+  // for HB processing
+  WebRtcSpl_ZerosArrayW16(inst->dataBufHBFX[0],
+                          NUM_HIGH_BANDS_MAX * ANAL_BLOCKL_MAX);
+  // for quantile noise estimation
+  WebRtcSpl_ZerosArrayW16(inst->noiseEstQuantile, HALF_ANAL_BLOCKL);
+  for (i = 0; i < SIMULT * HALF_ANAL_BLOCKL; i++) {
+    inst->noiseEstLogQuantile[i] = 2048; // Q8
+    inst->noiseEstDensity[i] = 153; // Q9
+  }
+  for (i = 0; i < SIMULT; i++) {
+    inst->noiseEstCounter[i] = (int16_t)(END_STARTUP_LONG * (i + 1)) / SIMULT;
+  }
+
+  // Initialize suppression filter with ones
+  WebRtcSpl_MemSetW16((int16_t*)inst->noiseSupFilter, 16384, HALF_ANAL_BLOCKL);
+
+  // Set the aggressiveness: default
+  inst->aggrMode = 0;
+
+  //initialize variables for new method
+  inst->priorNonSpeechProb = 8192; // Q14(0.5) prior probability for speech/noise
+  for (i = 0; i < HALF_ANAL_BLOCKL; i++) {
+    inst->prevMagnU16[i] = 0;
+    inst->prevNoiseU32[i] = 0; //previous noise-spectrum
+    inst->logLrtTimeAvgW32[i] = 0; //smooth LR ratio
+    inst->avgMagnPause[i] = 0; //conservative noise spectrum estimate
+    inst->initMagnEst[i] = 0; //initial average magnitude spectrum
+  }
+
+  //feature quantities
+  inst->thresholdSpecDiff = 50; //threshold for difference feature: determined on-line
+  inst->thresholdSpecFlat = 20480; //threshold for flatness: determined on-line
+  inst->featureLogLrt = inst->thresholdLogLrt; //average LRT factor (= threshold)
+  inst->featureSpecFlat = inst->thresholdSpecFlat; //spectral flatness (= threshold)
+  inst->featureSpecDiff = inst->thresholdSpecDiff; //spectral difference (= threshold)
+  inst->weightLogLrt = 6; //default weighting par for LRT feature
+  inst->weightSpecFlat = 0; //default weighting par for spectral flatness feature
+  inst->weightSpecDiff = 0; //default weighting par for spectral difference feature
+
+  inst->curAvgMagnEnergy = 0; //window time-average of input magnitude spectrum
+  inst->timeAvgMagnEnergy = 0; //normalization for spectral difference
+  inst->timeAvgMagnEnergyTmp = 0; //normalization for spectral difference
+
+  //histogram quantities: used to estimate/update thresholds for features
+  WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST);
+  WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST);
+  WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST);
+
+  inst->blockIndex = -1; //frame counter
+
+  //inst->modelUpdate    = 500;   //window for update
+  inst->modelUpdate = (1 << STAT_UPDATES); //window for update
+  inst->cntThresUpdate = 0; //counter feature thresholds updates
+
+  inst->sumMagn = 0;
+  inst->magnEnergy = 0;
+  inst->prevQMagn = 0;
+  inst->qNoise = 0;
+  inst->prevQNoise = 0;
+
+  inst->energyIn = 0;
+  inst->scaleEnergyIn = 0;
+
+  inst->whiteNoiseLevel = 0;
+  inst->pinkNoiseNumerator = 0;
+  inst->pinkNoiseExp = 0;
+  inst->minNorm = 15; // Start with full scale
+  inst->zeroInputSignal = 0;
+
+  //default mode
+  WebRtcNsx_set_policy_core(inst, 0);
+
+#ifdef NS_FILEDEBUG
+  inst->infile = fopen("indebug.pcm", "wb");
+  inst->outfile = fopen("outdebug.pcm", "wb");
+  inst->file1 = fopen("file1.pcm", "wb");
+  inst->file2 = fopen("file2.pcm", "wb");
+  inst->file3 = fopen("file3.pcm", "wb");
+  inst->file4 = fopen("file4.pcm", "wb");
+  inst->file5 = fopen("file5.pcm", "wb");
+#endif
+
+  // Initialize function pointers.
+  WebRtcNsx_NoiseEstimation = NoiseEstimationC;
+  WebRtcNsx_PrepareSpectrum = PrepareSpectrumC;
+  WebRtcNsx_SynthesisUpdate = SynthesisUpdateC;
+  WebRtcNsx_AnalysisUpdate = AnalysisUpdateC;
+  WebRtcNsx_Denormalize = DenormalizeC;
+  WebRtcNsx_NormalizeRealBuffer = NormalizeRealBufferC;
+
+#if defined(WEBRTC_HAS_NEON)
+  WebRtcNsx_InitNeon();
+#endif
+
+#if defined(MIPS32_LE)
+  WebRtcNsx_InitMips();
+#endif
+
+  inst->initFlag = 1;
+
+  return 0;
+}
+
+int WebRtcNsx_set_policy_core(NoiseSuppressionFixedC* inst, int mode) {
+  // allow for modes:0,1,2,3
+  if (mode < 0 || mode > 3) {
+    return -1;
+  }
+
+  inst->aggrMode = mode;
+  if (mode == 0) {
+    inst->overdrive = 256; // Q8(1.0)
+    inst->denoiseBound = 8192; // Q14(0.5)
+    inst->gainMap = 0; // No gain compensation
+  } else if (mode == 1) {
+    inst->overdrive = 256; // Q8(1.0)
+    inst->denoiseBound = 4096; // Q14(0.25)
+    inst->factor2Table = kFactor2Aggressiveness1;
+    inst->gainMap = 1;
+  } else if (mode == 2) {
+    inst->overdrive = 282; // ~= Q8(1.1)
+    inst->denoiseBound = 2048; // Q14(0.125)
+    inst->factor2Table = kFactor2Aggressiveness2;
+    inst->gainMap = 1;
+  } else if (mode == 3) {
+    inst->overdrive = 320; // Q8(1.25)
+    inst->denoiseBound = 1475; // ~= Q14(0.09)
+    inst->factor2Table = kFactor2Aggressiveness3;
+    inst->gainMap = 1;
+  }
+  return 0;
+}
+
+// Extract thresholds for feature parameters
+// histograms are computed over some window_size (given by window_pars)
+// thresholds and weights are extracted every window
+// flag 0 means update histogram only, flag 1 means compute the thresholds/weights
+// threshold and weights are returned in: inst->priorModelPars
+void WebRtcNsx_FeatureParameterExtraction(NoiseSuppressionFixedC* inst,
+                                          int flag) {
+  uint32_t tmpU32;
+  uint32_t histIndex;
+  uint32_t posPeak1SpecFlatFX, posPeak2SpecFlatFX;
+  uint32_t posPeak1SpecDiffFX, posPeak2SpecDiffFX;
+
+  int32_t tmp32;
+  int32_t fluctLrtFX, thresFluctLrtFX;
+  int32_t avgHistLrtFX, avgSquareHistLrtFX, avgHistLrtComplFX;
+
+  int16_t j;
+  int16_t numHistLrt;
+
+  int i;
+  int useFeatureSpecFlat, useFeatureSpecDiff, featureSum;
+  int maxPeak1, maxPeak2;
+  int weightPeak1SpecFlat, weightPeak2SpecFlat;
+  int weightPeak1SpecDiff, weightPeak2SpecDiff;
+
+  //update histograms
+  if (!flag) {
+    // LRT
+    // Type casting to UWord32 is safe since negative values will not be wrapped to larger
+    // values than HIST_PAR_EST
+    histIndex = (uint32_t)(inst->featureLogLrt);
+    if (histIndex < HIST_PAR_EST) {
+      inst->histLrt[histIndex]++;
+    }
+    // Spectral flatness
+    // (inst->featureSpecFlat*20)>>10 = (inst->featureSpecFlat*5)>>8
+    histIndex = (inst->featureSpecFlat * 5) >> 8;
+    if (histIndex < HIST_PAR_EST) {
+      inst->histSpecFlat[histIndex]++;
+    }
+    // Spectral difference
+    histIndex = HIST_PAR_EST;
+    if (inst->timeAvgMagnEnergy > 0) {
+      // Guard against division by zero
+      // If timeAvgMagnEnergy == 0 we have no normalizing statistics and
+      // therefore can't update the histogram
+      histIndex = ((inst->featureSpecDiff * 5) >> inst->stages) /
+          inst->timeAvgMagnEnergy;
+    }
+    if (histIndex < HIST_PAR_EST) {
+      inst->histSpecDiff[histIndex]++;
+    }
+  }
+
+  // extract parameters for speech/noise probability
+  if (flag) {
+    useFeatureSpecDiff = 1;
+    //for LRT feature:
+    // compute the average over inst->featureExtractionParams.rangeAvgHistLrt
+    avgHistLrtFX = 0;
+    avgSquareHistLrtFX = 0;
+    numHistLrt = 0;
+    for (i = 0; i < BIN_SIZE_LRT; i++) {
+      j = (2 * i + 1);
+      tmp32 = inst->histLrt[i] * j;
+      avgHistLrtFX += tmp32;
+      numHistLrt += inst->histLrt[i];
+      avgSquareHistLrtFX += tmp32 * j;
+    }
+    avgHistLrtComplFX = avgHistLrtFX;
+    for (; i < HIST_PAR_EST; i++) {
+      j = (2 * i + 1);
+      tmp32 = inst->histLrt[i] * j;
+      avgHistLrtComplFX += tmp32;
+      avgSquareHistLrtFX += tmp32 * j;
+    }
+    fluctLrtFX = avgSquareHistLrtFX * numHistLrt -
+        avgHistLrtFX * avgHistLrtComplFX;
+    thresFluctLrtFX = THRES_FLUCT_LRT * numHistLrt;
+    // get threshold for LRT feature:
+    tmpU32 = (FACTOR_1_LRT_DIFF * (uint32_t)avgHistLrtFX);
+    if ((fluctLrtFX < thresFluctLrtFX) || (numHistLrt == 0) ||
+        (tmpU32 > (uint32_t)(100 * numHistLrt))) {
+      //very low fluctuation, so likely noise
+      inst->thresholdLogLrt = inst->maxLrt;
+    } else {
+      tmp32 = (int32_t)((tmpU32 << (9 + inst->stages)) / numHistLrt /
+                              25);
+      // check if value is within min/max range
+      inst->thresholdLogLrt = WEBRTC_SPL_SAT(inst->maxLrt,
+                                             tmp32,
+                                             inst->minLrt);
+    }
+    if (fluctLrtFX < thresFluctLrtFX) {
+      // Do not use difference feature if fluctuation of LRT feature is very low:
+      // most likely just noise state
+      useFeatureSpecDiff = 0;
+    }
+
+    // for spectral flatness and spectral difference: compute the main peaks of histogram
+    maxPeak1 = 0;
+    maxPeak2 = 0;
+    posPeak1SpecFlatFX = 0;
+    posPeak2SpecFlatFX = 0;
+    weightPeak1SpecFlat = 0;
+    weightPeak2SpecFlat = 0;
+
+    // peaks for flatness
+    for (i = 0; i < HIST_PAR_EST; i++) {
+      if (inst->histSpecFlat[i] > maxPeak1) {
+        // Found new "first" peak
+        maxPeak2 = maxPeak1;
+        weightPeak2SpecFlat = weightPeak1SpecFlat;
+        posPeak2SpecFlatFX = posPeak1SpecFlatFX;
+
+        maxPeak1 = inst->histSpecFlat[i];
+        weightPeak1SpecFlat = inst->histSpecFlat[i];
+        posPeak1SpecFlatFX = (uint32_t)(2 * i + 1);
+      } else if (inst->histSpecFlat[i] > maxPeak2) {
+        // Found new "second" peak
+        maxPeak2 = inst->histSpecFlat[i];
+        weightPeak2SpecFlat = inst->histSpecFlat[i];
+        posPeak2SpecFlatFX = (uint32_t)(2 * i + 1);
+      }
+    }
+
+    // for spectral flatness feature
+    useFeatureSpecFlat = 1;
+    // merge the two peaks if they are close
+    if ((posPeak1SpecFlatFX - posPeak2SpecFlatFX < LIM_PEAK_SPACE_FLAT_DIFF)
+        && (weightPeak2SpecFlat * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecFlat)) {
+      weightPeak1SpecFlat += weightPeak2SpecFlat;
+      posPeak1SpecFlatFX = (posPeak1SpecFlatFX + posPeak2SpecFlatFX) >> 1;
+    }
+    //reject if weight of peaks is not large enough, or peak value too small
+    if (weightPeak1SpecFlat < THRES_WEIGHT_FLAT_DIFF || posPeak1SpecFlatFX
+        < THRES_PEAK_FLAT) {
+      useFeatureSpecFlat = 0;
+    } else { // if selected, get the threshold
+      // compute the threshold and check if value is within min/max range
+      inst->thresholdSpecFlat = WEBRTC_SPL_SAT(MAX_FLAT_Q10, FACTOR_2_FLAT_Q10
+                                               * posPeak1SpecFlatFX, MIN_FLAT_Q10); //Q10
+    }
+    // done with flatness feature
+
+    if (useFeatureSpecDiff) {
+      //compute two peaks for spectral difference
+      maxPeak1 = 0;
+      maxPeak2 = 0;
+      posPeak1SpecDiffFX = 0;
+      posPeak2SpecDiffFX = 0;
+      weightPeak1SpecDiff = 0;
+      weightPeak2SpecDiff = 0;
+      // peaks for spectral difference
+      for (i = 0; i < HIST_PAR_EST; i++) {
+        if (inst->histSpecDiff[i] > maxPeak1) {
+          // Found new "first" peak
+          maxPeak2 = maxPeak1;
+          weightPeak2SpecDiff = weightPeak1SpecDiff;
+          posPeak2SpecDiffFX = posPeak1SpecDiffFX;
+
+          maxPeak1 = inst->histSpecDiff[i];
+          weightPeak1SpecDiff = inst->histSpecDiff[i];
+          posPeak1SpecDiffFX = (uint32_t)(2 * i + 1);
+        } else if (inst->histSpecDiff[i] > maxPeak2) {
+          // Found new "second" peak
+          maxPeak2 = inst->histSpecDiff[i];
+          weightPeak2SpecDiff = inst->histSpecDiff[i];
+          posPeak2SpecDiffFX = (uint32_t)(2 * i + 1);
+        }
+      }
+
+      // merge the two peaks if they are close
+      if ((posPeak1SpecDiffFX - posPeak2SpecDiffFX < LIM_PEAK_SPACE_FLAT_DIFF)
+          && (weightPeak2SpecDiff * LIM_PEAK_WEIGHT_FLAT_DIFF > weightPeak1SpecDiff)) {
+        weightPeak1SpecDiff += weightPeak2SpecDiff;
+        posPeak1SpecDiffFX = (posPeak1SpecDiffFX + posPeak2SpecDiffFX) >> 1;
+      }
+      // get the threshold value and check if value is within min/max range
+      inst->thresholdSpecDiff = WEBRTC_SPL_SAT(MAX_DIFF, FACTOR_1_LRT_DIFF
+                                               * posPeak1SpecDiffFX, MIN_DIFF); //5x bigger
+      //reject if weight of peaks is not large enough
+      if (weightPeak1SpecDiff < THRES_WEIGHT_FLAT_DIFF) {
+        useFeatureSpecDiff = 0;
+      }
+      // done with spectral difference feature
+    }
+
+    // select the weights between the features
+    // inst->priorModelPars[4] is weight for LRT: always selected
+    featureSum = 6 / (1 + useFeatureSpecFlat + useFeatureSpecDiff);
+    inst->weightLogLrt = featureSum;
+    inst->weightSpecFlat = useFeatureSpecFlat * featureSum;
+    inst->weightSpecDiff = useFeatureSpecDiff * featureSum;
+
+    // set histograms to zero for next update
+    WebRtcSpl_ZerosArrayW16(inst->histLrt, HIST_PAR_EST);
+    WebRtcSpl_ZerosArrayW16(inst->histSpecDiff, HIST_PAR_EST);
+    WebRtcSpl_ZerosArrayW16(inst->histSpecFlat, HIST_PAR_EST);
+  }  // end of flag == 1
+}
+
+
+// Compute spectral flatness on input spectrum
+// magn is the magnitude spectrum
+// spectral flatness is returned in inst->featureSpecFlat
+void WebRtcNsx_ComputeSpectralFlatness(NoiseSuppressionFixedC* inst,
+                                       uint16_t* magn) {
+  uint32_t tmpU32;
+  uint32_t avgSpectralFlatnessNum, avgSpectralFlatnessDen;
+
+  int32_t tmp32;
+  int32_t currentSpectralFlatness, logCurSpectralFlatness;
+
+  int16_t zeros, frac, intPart;
+
+  size_t i;
+
+  // for flatness
+  avgSpectralFlatnessNum = 0;
+  avgSpectralFlatnessDen = inst->sumMagn - (uint32_t)magn[0]; // Q(normData-stages)
+
+  // compute log of ratio of the geometric to arithmetic mean: check for log(0) case
+  // flatness = exp( sum(log(magn[i]))/N - log(sum(magn[i])/N) )
+  //          = exp( sum(log(magn[i]))/N ) * N / sum(magn[i])
+  //          = 2^( sum(log2(magn[i]))/N - (log2(sum(magn[i])) - log2(N)) ) [This is used]
+  for (i = 1; i < inst->magnLen; i++) {
+    // First bin is excluded from spectrum measures. Number of bins is now a power of 2
+    if (magn[i]) {
+      zeros = WebRtcSpl_NormU32((uint32_t)magn[i]);
+      frac = (int16_t)(((uint32_t)((uint32_t)(magn[i]) << zeros)
+                              & 0x7FFFFFFF) >> 23);
+      // log2(magn(i))
+      RTC_DCHECK_LT(frac, 256);
+      tmpU32 = (uint32_t)(((31 - zeros) << 8)
+                                + WebRtcNsx_kLogTableFrac[frac]); // Q8
+      avgSpectralFlatnessNum += tmpU32; // Q8
+    } else {
+      //if at least one frequency component is zero, treat separately
+      tmpU32 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecFlat, SPECT_FLAT_TAVG_Q14); // Q24
+      inst->featureSpecFlat -= tmpU32 >> 14;  // Q10
+      return;
+    }
+  }
+  //ratio and inverse log: check for case of log(0)
+  zeros = WebRtcSpl_NormU32(avgSpectralFlatnessDen);
+  frac = (int16_t)(((avgSpectralFlatnessDen << zeros) & 0x7FFFFFFF) >> 23);
+  // log2(avgSpectralFlatnessDen)
+  RTC_DCHECK_LT(frac, 256);
+  tmp32 = (int32_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]); // Q8
+  logCurSpectralFlatness = (int32_t)avgSpectralFlatnessNum;
+  logCurSpectralFlatness += ((int32_t)(inst->stages - 1) << (inst->stages + 7)); // Q(8+stages-1)
+  logCurSpectralFlatness -= (tmp32 << (inst->stages - 1));
+  logCurSpectralFlatness <<= (10 - inst->stages);  // Q17
+  tmp32 = (int32_t)(0x00020000 | (WEBRTC_SPL_ABS_W32(logCurSpectralFlatness)
+                                        & 0x0001FFFF)); //Q17
+  intPart = 7 - (logCurSpectralFlatness >> 17);  // Add 7 for output in Q10.
+  if (intPart > 0) {
+    currentSpectralFlatness = tmp32 >> intPart;
+  } else {
+    currentSpectralFlatness = tmp32 << -intPart;
+  }
+
+  //time average update of spectral flatness feature
+  tmp32 = currentSpectralFlatness - (int32_t)inst->featureSpecFlat; // Q10
+  tmp32 *= SPECT_FLAT_TAVG_Q14;  // Q24
+  inst->featureSpecFlat += tmp32 >> 14;  // Q10
+  // done with flatness feature
+}
+
+
+// Compute the difference measure between input spectrum and a template/learned noise spectrum
+// magn_tmp is the input spectrum
+// the reference/template spectrum is  inst->magn_avg_pause[i]
+// returns (normalized) spectral difference in inst->featureSpecDiff
+void WebRtcNsx_ComputeSpectralDifference(NoiseSuppressionFixedC* inst,
+                                         uint16_t* magnIn) {
+  // This is to be calculated:
+  // avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 / var(magnAvgPause)
+
+  uint32_t tmpU32no1, tmpU32no2;
+  uint32_t varMagnUFX, varPauseUFX, avgDiffNormMagnUFX;
+
+  int32_t tmp32no1, tmp32no2;
+  int32_t avgPauseFX, avgMagnFX, covMagnPauseFX;
+  int32_t maxPause, minPause;
+
+  int16_t tmp16no1;
+
+  size_t i;
+  int norm32, nShifts;
+
+  avgPauseFX = 0;
+  maxPause = 0;
+  minPause = inst->avgMagnPause[0]; // Q(prevQMagn)
+  // compute average quantities
+  for (i = 0; i < inst->magnLen; i++) {
+    // Compute mean of magn_pause
+    avgPauseFX += inst->avgMagnPause[i]; // in Q(prevQMagn)
+    maxPause = WEBRTC_SPL_MAX(maxPause, inst->avgMagnPause[i]);
+    minPause = WEBRTC_SPL_MIN(minPause, inst->avgMagnPause[i]);
+  }
+  // normalize by replacing div of "inst->magnLen" with "inst->stages-1" shifts
+  avgPauseFX >>= inst->stages - 1;
+  avgMagnFX = inst->sumMagn >> (inst->stages - 1);
+  // Largest possible deviation in magnPause for (co)var calculations
+  tmp32no1 = WEBRTC_SPL_MAX(maxPause - avgPauseFX, avgPauseFX - minPause);
+  // Get number of shifts to make sure we don't get wrap around in varPause
+  nShifts = WEBRTC_SPL_MAX(0, 10 + inst->stages - WebRtcSpl_NormW32(tmp32no1));
+
+  varMagnUFX = 0;
+  varPauseUFX = 0;
+  covMagnPauseFX = 0;
+  for (i = 0; i < inst->magnLen; i++) {
+    // Compute var and cov of magn and magn_pause
+    tmp16no1 = (int16_t)((int32_t)magnIn[i] - avgMagnFX);
+    tmp32no2 = inst->avgMagnPause[i] - avgPauseFX;
+    varMagnUFX += (uint32_t)(tmp16no1 * tmp16no1);  // Q(2*qMagn)
+    tmp32no1 = tmp32no2 * tmp16no1;  // Q(prevQMagn+qMagn)
+    covMagnPauseFX += tmp32no1; // Q(prevQMagn+qMagn)
+    tmp32no1 = tmp32no2 >> nShifts;  // Q(prevQMagn-minPause).
+    varPauseUFX += tmp32no1 * tmp32no1;  // Q(2*(prevQMagn-minPause))
+  }
+  //update of average magnitude spectrum: Q(-2*stages) and averaging replaced by shifts
+  inst->curAvgMagnEnergy +=
+      inst->magnEnergy >> (2 * inst->normData + inst->stages - 1);
+
+  avgDiffNormMagnUFX = varMagnUFX; // Q(2*qMagn)
+  if ((varPauseUFX) && (covMagnPauseFX)) {
+    tmpU32no1 = (uint32_t)WEBRTC_SPL_ABS_W32(covMagnPauseFX); // Q(prevQMagn+qMagn)
+    norm32 = WebRtcSpl_NormU32(tmpU32no1) - 16;
+    if (norm32 > 0) {
+      tmpU32no1 <<= norm32;  // Q(prevQMagn+qMagn+norm32)
+    } else {
+      tmpU32no1 >>= -norm32;  // Q(prevQMagn+qMagn+norm32)
+    }
+    tmpU32no2 = WEBRTC_SPL_UMUL(tmpU32no1, tmpU32no1); // Q(2*(prevQMagn+qMagn-norm32))
+
+    nShifts += norm32;
+    nShifts <<= 1;
+    if (nShifts < 0) {
+      varPauseUFX >>= (-nShifts); // Q(2*(qMagn+norm32+minPause))
+      nShifts = 0;
+    }
+    if (varPauseUFX > 0) {
+      // Q(2*(qMagn+norm32-16+minPause))
+      tmpU32no1 = tmpU32no2 / varPauseUFX;
+      tmpU32no1 >>= nShifts;
+
+      // Q(2*qMagn)
+      avgDiffNormMagnUFX -= WEBRTC_SPL_MIN(avgDiffNormMagnUFX, tmpU32no1);
+    } else {
+      avgDiffNormMagnUFX = 0;
+    }
+  }
+  //normalize and compute time average update of difference feature
+  tmpU32no1 = avgDiffNormMagnUFX >> (2 * inst->normData);
+  if (inst->featureSpecDiff > tmpU32no1) {
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(inst->featureSpecDiff - tmpU32no1,
+                                      SPECT_DIFF_TAVG_Q8); // Q(8-2*stages)
+    inst->featureSpecDiff -= tmpU32no2 >> 8;  // Q(-2*stages)
+  } else {
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no1 - inst->featureSpecDiff,
+                                      SPECT_DIFF_TAVG_Q8); // Q(8-2*stages)
+    inst->featureSpecDiff += tmpU32no2 >> 8;  // Q(-2*stages)
+  }
+}
+
+// Transform input (speechFrame) to frequency domain magnitude (magnU16)
+void WebRtcNsx_DataAnalysis(NoiseSuppressionFixedC* inst,
+                            short* speechFrame,
+                            uint16_t* magnU16) {
+  uint32_t tmpU32no1;
+
+  int32_t   tmp_1_w32 = 0;
+  int32_t   tmp_2_w32 = 0;
+  int32_t   sum_log_magn = 0;
+  int32_t   sum_log_i_log_magn = 0;
+
+  uint16_t  sum_log_magn_u16 = 0;
+  uint16_t  tmp_u16 = 0;
+
+  int16_t   sum_log_i = 0;
+  int16_t   sum_log_i_square = 0;
+  int16_t   frac = 0;
+  int16_t   log2 = 0;
+  int16_t   matrix_determinant = 0;
+  int16_t   maxWinData;
+
+  size_t i, j;
+  int zeros;
+  int net_norm = 0;
+  int right_shifts_in_magnU16 = 0;
+  int right_shifts_in_initMagnEst = 0;
+
+  int16_t winData_buff[ANAL_BLOCKL_MAX * 2 + 16];
+  int16_t realImag_buff[ANAL_BLOCKL_MAX * 2 + 16];
+
+  // Align the structures to 32-byte boundary for the FFT function.
+  int16_t* winData = (int16_t*) (((uintptr_t)winData_buff + 31) & ~31);
+  int16_t* realImag = (int16_t*) (((uintptr_t) realImag_buff + 31) & ~31);
+
+  // Update analysis buffer for lower band, and window data before FFT.
+  WebRtcNsx_AnalysisUpdate(inst, winData, speechFrame);
+
+  // Get input energy
+  inst->energyIn =
+      WebRtcSpl_Energy(winData, inst->anaLen, &inst->scaleEnergyIn);
+
+  // Reset zero input flag
+  inst->zeroInputSignal = 0;
+  // Acquire norm for winData
+  maxWinData = WebRtcSpl_MaxAbsValueW16(winData, inst->anaLen);
+  inst->normData = WebRtcSpl_NormW16(maxWinData);
+  if (maxWinData == 0) {
+    // Treat zero input separately.
+    inst->zeroInputSignal = 1;
+    return;
+  }
+
+  // Determine the net normalization in the frequency domain
+  net_norm = inst->stages - inst->normData;
+  // Track lowest normalization factor and use it to prevent wrap around in shifting
+  right_shifts_in_magnU16 = inst->normData - inst->minNorm;
+  right_shifts_in_initMagnEst = WEBRTC_SPL_MAX(-right_shifts_in_magnU16, 0);
+  inst->minNorm -= right_shifts_in_initMagnEst;
+  right_shifts_in_magnU16 = WEBRTC_SPL_MAX(right_shifts_in_magnU16, 0);
+
+  // create realImag as winData interleaved with zeros (= imag. part), normalize it
+  WebRtcNsx_NormalizeRealBuffer(inst, winData, realImag);
+
+  // FFT output will be in winData[].
+  WebRtcSpl_RealForwardFFT(inst->real_fft, realImag, winData);
+
+  inst->imag[0] = 0; // Q(normData-stages)
+  inst->imag[inst->anaLen2] = 0;
+  inst->real[0] = winData[0]; // Q(normData-stages)
+  inst->real[inst->anaLen2] = winData[inst->anaLen];
+  // Q(2*(normData-stages))
+  inst->magnEnergy = (uint32_t)(inst->real[0] * inst->real[0]);
+  inst->magnEnergy += (uint32_t)(inst->real[inst->anaLen2] *
+                                 inst->real[inst->anaLen2]);
+  magnU16[0] = (uint16_t)WEBRTC_SPL_ABS_W16(inst->real[0]); // Q(normData-stages)
+  magnU16[inst->anaLen2] = (uint16_t)WEBRTC_SPL_ABS_W16(inst->real[inst->anaLen2]);
+  inst->sumMagn = (uint32_t)magnU16[0]; // Q(normData-stages)
+  inst->sumMagn += (uint32_t)magnU16[inst->anaLen2];
+
+  if (inst->blockIndex >= END_STARTUP_SHORT) {
+    for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+      inst->real[i] = winData[j];
+      inst->imag[i] = -winData[j + 1];
+      // magnitude spectrum
+      // energy in Q(2*(normData-stages))
+      tmpU32no1 = (uint32_t)(winData[j] * winData[j]);
+      tmpU32no1 += (uint32_t)(winData[j + 1] * winData[j + 1]);
+      inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages))
+
+      magnU16[i] = (uint16_t)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages)
+      inst->sumMagn += (uint32_t)magnU16[i]; // Q(normData-stages)
+    }
+  } else {
+    //
+    // Gather information during startup for noise parameter estimation
+    //
+
+    // Switch initMagnEst to Q(minNorm-stages)
+    inst->initMagnEst[0] >>= right_shifts_in_initMagnEst;
+    inst->initMagnEst[inst->anaLen2] >>= right_shifts_in_initMagnEst;
+
+    // Update initMagnEst with magnU16 in Q(minNorm-stages).
+    inst->initMagnEst[0] += magnU16[0] >> right_shifts_in_magnU16;
+    inst->initMagnEst[inst->anaLen2] +=
+        magnU16[inst->anaLen2] >> right_shifts_in_magnU16;
+
+    log2 = 0;
+    if (magnU16[inst->anaLen2]) {
+      // Calculate log2(magnU16[inst->anaLen2])
+      zeros = WebRtcSpl_NormU32((uint32_t)magnU16[inst->anaLen2]);
+      frac = (int16_t)((((uint32_t)magnU16[inst->anaLen2] << zeros) &
+                              0x7FFFFFFF) >> 23); // Q8
+      // log2(magnU16(i)) in Q8
+      RTC_DCHECK_LT(frac, 256);
+      log2 = (int16_t)(((31 - zeros) << 8) + WebRtcNsx_kLogTableFrac[frac]);
+    }
+
+    sum_log_magn = (int32_t)log2; // Q8
+    // sum_log_i_log_magn in Q17
+    sum_log_i_log_magn = (kLogIndex[inst->anaLen2] * log2) >> 3;
+
+    for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+      inst->real[i] = winData[j];
+      inst->imag[i] = -winData[j + 1];
+      // magnitude spectrum
+      // energy in Q(2*(normData-stages))
+      tmpU32no1 = (uint32_t)(winData[j] * winData[j]);
+      tmpU32no1 += (uint32_t)(winData[j + 1] * winData[j + 1]);
+      inst->magnEnergy += tmpU32no1; // Q(2*(normData-stages))
+
+      magnU16[i] = (uint16_t)WebRtcSpl_SqrtFloor(tmpU32no1); // Q(normData-stages)
+      inst->sumMagn += (uint32_t)magnU16[i]; // Q(normData-stages)
+
+      // Switch initMagnEst to Q(minNorm-stages)
+      inst->initMagnEst[i] >>= right_shifts_in_initMagnEst;
+
+      // Update initMagnEst with magnU16 in Q(minNorm-stages).
+      inst->initMagnEst[i] += magnU16[i] >> right_shifts_in_magnU16;
+
+      if (i >= kStartBand) {
+        // For pink noise estimation. Collect data neglecting lower frequency band
+        log2 = 0;
+        if (magnU16[i]) {
+          zeros = WebRtcSpl_NormU32((uint32_t)magnU16[i]);
+          frac = (int16_t)((((uint32_t)magnU16[i] << zeros) &
+                                  0x7FFFFFFF) >> 23);
+          // log2(magnU16(i)) in Q8
+          RTC_DCHECK_LT(frac, 256);
+          log2 = (int16_t)(((31 - zeros) << 8)
+                                 + WebRtcNsx_kLogTableFrac[frac]);
+        }
+        sum_log_magn += (int32_t)log2; // Q8
+        // sum_log_i_log_magn in Q17
+        sum_log_i_log_magn += (kLogIndex[i] * log2) >> 3;
+      }
+    }
+
+    //
+    //compute simplified noise model during startup
+    //
+
+    // Estimate White noise
+
+    // Switch whiteNoiseLevel to Q(minNorm-stages)
+    inst->whiteNoiseLevel >>= right_shifts_in_initMagnEst;
+
+    // Update the average magnitude spectrum, used as noise estimate.
+    tmpU32no1 = WEBRTC_SPL_UMUL_32_16(inst->sumMagn, inst->overdrive);
+    tmpU32no1 >>= inst->stages + 8;
+
+    // Replacing division above with 'stages' shifts
+    // Shift to same Q-domain as whiteNoiseLevel
+    tmpU32no1 >>= right_shifts_in_magnU16;
+    // This operation is safe from wrap around as long as END_STARTUP_SHORT < 128
+    RTC_DCHECK_LT(END_STARTUP_SHORT, 128);
+    inst->whiteNoiseLevel += tmpU32no1; // Q(minNorm-stages)
+
+    // Estimate Pink noise parameters
+    // Denominator used in both parameter estimates.
+    // The value is only dependent on the size of the frequency band (kStartBand)
+    // and to reduce computational complexity stored in a table (kDeterminantEstMatrix[])
+    RTC_DCHECK_LT(kStartBand, 66);
+    matrix_determinant = kDeterminantEstMatrix[kStartBand]; // Q0
+    sum_log_i = kSumLogIndex[kStartBand]; // Q5
+    sum_log_i_square = kSumSquareLogIndex[kStartBand]; // Q2
+    if (inst->fs == 8000) {
+      // Adjust values to shorter blocks in narrow band.
+      tmp_1_w32 = (int32_t)matrix_determinant;
+      tmp_1_w32 += (kSumLogIndex[65] * sum_log_i) >> 9;
+      tmp_1_w32 -= (kSumLogIndex[65] * kSumLogIndex[65]) >> 10;
+      tmp_1_w32 -= (int32_t)sum_log_i_square << 4;
+      tmp_1_w32 -= ((inst->magnLen - kStartBand) * kSumSquareLogIndex[65]) >> 2;
+      matrix_determinant = (int16_t)tmp_1_w32;
+      sum_log_i -= kSumLogIndex[65]; // Q5
+      sum_log_i_square -= kSumSquareLogIndex[65]; // Q2
+    }
+
+    // Necessary number of shifts to fit sum_log_magn in a word16
+    zeros = 16 - WebRtcSpl_NormW32(sum_log_magn);
+    if (zeros < 0) {
+      zeros = 0;
+    }
+    tmp_1_w32 = sum_log_magn << 1;  // Q9
+    sum_log_magn_u16 = (uint16_t)(tmp_1_w32 >> zeros);  // Q(9-zeros).
+
+    // Calculate and update pinkNoiseNumerator. Result in Q11.
+    tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i_square, sum_log_magn_u16); // Q(11-zeros)
+    tmpU32no1 = sum_log_i_log_magn >> 12;  // Q5
+
+    // Shift the largest value of sum_log_i and tmp32no3 before multiplication
+    tmp_u16 = ((uint16_t)sum_log_i << 1);  // Q6
+    if ((uint32_t)sum_log_i > tmpU32no1) {
+      tmp_u16 >>= zeros;
+    } else {
+      tmpU32no1 >>= zeros;
+    }
+    tmp_2_w32 -= (int32_t)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros)
+    matrix_determinant >>= zeros;  // Q(-zeros)
+    tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11
+    tmp_2_w32 += (int32_t)net_norm << 11;  // Q11
+    if (tmp_2_w32 < 0) {
+      tmp_2_w32 = 0;
+    }
+    inst->pinkNoiseNumerator += tmp_2_w32; // Q11
+
+    // Calculate and update pinkNoiseExp. Result in Q14.
+    tmp_2_w32 = WEBRTC_SPL_MUL_16_U16(sum_log_i, sum_log_magn_u16); // Q(14-zeros)
+    tmp_1_w32 = sum_log_i_log_magn >> (3 + zeros);
+    tmp_1_w32 *= inst->magnLen - kStartBand;
+    tmp_2_w32 -= tmp_1_w32; // Q(14-zeros)
+    if (tmp_2_w32 > 0) {
+      // If the exponential parameter is negative force it to zero, which means a
+      // flat spectrum.
+      tmp_1_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q14
+      inst->pinkNoiseExp += WEBRTC_SPL_SAT(16384, tmp_1_w32, 0); // Q14
+    }
+  }
+}
+
+void WebRtcNsx_DataSynthesis(NoiseSuppressionFixedC* inst, short* outFrame) {
+  int32_t energyOut;
+
+  int16_t realImag_buff[ANAL_BLOCKL_MAX * 2 + 16];
+  int16_t rfft_out_buff[ANAL_BLOCKL_MAX * 2 + 16];
+
+  // Align the structures to 32-byte boundary for the FFT function.
+  int16_t* realImag = (int16_t*) (((uintptr_t)realImag_buff + 31) & ~31);
+  int16_t* rfft_out = (int16_t*) (((uintptr_t) rfft_out_buff + 31) & ~31);
+
+  int16_t tmp16no1, tmp16no2;
+  int16_t energyRatio;
+  int16_t gainFactor, gainFactor1, gainFactor2;
+
+  size_t i;
+  int outCIFFT;
+  int scaleEnergyOut = 0;
+
+  if (inst->zeroInputSignal) {
+    // synthesize the special case of zero input
+    // read out fully processed segment
+    for (i = 0; i < inst->blockLen10ms; i++) {
+      outFrame[i] = inst->synthesisBuffer[i]; // Q0
+    }
+    // update synthesis buffer
+    memcpy(inst->synthesisBuffer, inst->synthesisBuffer + inst->blockLen10ms,
+        (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->synthesisBuffer));
+    WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms,
+                            inst->blockLen10ms);
+    return;
+  }
+
+  // Filter the data in the frequency domain, and create spectrum.
+  WebRtcNsx_PrepareSpectrum(inst, realImag);
+
+  // Inverse FFT output will be in rfft_out[].
+  outCIFFT = WebRtcSpl_RealInverseFFT(inst->real_fft, realImag, rfft_out);
+
+  WebRtcNsx_Denormalize(inst, rfft_out, outCIFFT);
+
+  //scale factor: only do it after END_STARTUP_LONG time
+  gainFactor = 8192; // 8192 = Q13(1.0)
+  if (inst->gainMap == 1 &&
+      inst->blockIndex > END_STARTUP_LONG &&
+      inst->energyIn > 0) {
+    // Q(-scaleEnergyOut)
+    energyOut = WebRtcSpl_Energy(inst->real, inst->anaLen, &scaleEnergyOut);
+    if (scaleEnergyOut == 0 && !(energyOut & 0x7f800000)) {
+      energyOut = WEBRTC_SPL_SHIFT_W32(energyOut, 8 + scaleEnergyOut
+                                       - inst->scaleEnergyIn);
+    } else {
+      // |energyIn| is currently in Q(|scaleEnergyIn|), but to later on end up
+      // with an |energyRatio| in Q8 we need to change the Q-domain to
+      // Q(-8-scaleEnergyOut).
+      inst->energyIn >>= 8 + scaleEnergyOut - inst->scaleEnergyIn;
+    }
+
+    RTC_DCHECK_GT(inst->energyIn, 0);
+    energyRatio = (energyOut + inst->energyIn / 2) / inst->energyIn;  // Q8
+    // Limit the ratio to [0, 1] in Q8, i.e., [0, 256]
+    energyRatio = WEBRTC_SPL_SAT(256, energyRatio, 0);
+
+    // all done in lookup tables now
+    RTC_DCHECK_LT(energyRatio, 257);
+    gainFactor1 = kFactor1Table[energyRatio]; // Q8
+    gainFactor2 = inst->factor2Table[energyRatio]; // Q8
+
+    //combine both scales with speech/noise prob: note prior (priorSpeechProb) is not frequency dependent
+
+    // factor = inst->priorSpeechProb*factor1 + (1.0-inst->priorSpeechProb)*factor2; // original code
+    tmp16no1 = (int16_t)(((16384 - inst->priorNonSpeechProb) * gainFactor1) >>
+        14);  // in Q13, where 16384 = Q14(1.0)
+    tmp16no2 = (int16_t)((inst->priorNonSpeechProb * gainFactor2) >> 14);
+    gainFactor = tmp16no1 + tmp16no2; // Q13
+  }  // out of flag_gain_map==1
+
+  // Synthesis, read out fully processed segment, and update synthesis buffer.
+  WebRtcNsx_SynthesisUpdate(inst, outFrame, gainFactor);
+}
+
+void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst,
+                           const short* const* speechFrame,
+                           int num_bands,
+                           short* const* outFrame) {
+  // main routine for noise suppression
+
+  uint32_t tmpU32no1, tmpU32no2, tmpU32no3;
+  uint32_t satMax, maxNoiseU32;
+  uint32_t tmpMagnU32, tmpNoiseU32;
+  uint32_t nearMagnEst;
+  uint32_t noiseUpdateU32;
+  uint32_t noiseU32[HALF_ANAL_BLOCKL];
+  uint32_t postLocSnr[HALF_ANAL_BLOCKL];
+  uint32_t priorLocSnr[HALF_ANAL_BLOCKL];
+  uint32_t prevNearSnr[HALF_ANAL_BLOCKL];
+  uint32_t curNearSnr;
+  uint32_t priorSnr;
+  uint32_t noise_estimate = 0;
+  uint32_t noise_estimate_avg = 0;
+  uint32_t numerator = 0;
+
+  int32_t tmp32no1, tmp32no2;
+  int32_t pink_noise_num_avg = 0;
+
+  uint16_t tmpU16no1;
+  uint16_t magnU16[HALF_ANAL_BLOCKL];
+  uint16_t prevNoiseU16[HALF_ANAL_BLOCKL];
+  uint16_t nonSpeechProbFinal[HALF_ANAL_BLOCKL];
+  uint16_t gammaNoise, prevGammaNoise;
+  uint16_t noiseSupFilterTmp[HALF_ANAL_BLOCKL];
+
+  int16_t qMagn, qNoise;
+  int16_t avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB;
+  int16_t pink_noise_exp_avg = 0;
+
+  size_t i, j;
+  int nShifts, postShifts;
+  int norm32no1, norm32no2;
+  int flag, sign;
+  int q_domain_to_use = 0;
+
+  // Code for ARMv7-Neon platform assumes the following:
+  RTC_DCHECK_GT(inst->anaLen, 0);
+  RTC_DCHECK_GT(inst->anaLen2, 0);
+  RTC_DCHECK_EQ(0, inst->anaLen % 16);
+  RTC_DCHECK_EQ(0, inst->anaLen2 % 8);
+  RTC_DCHECK_GT(inst->blockLen10ms, 0);
+  RTC_DCHECK_EQ(0, inst->blockLen10ms % 16);
+  RTC_DCHECK_EQ(inst->magnLen, inst->anaLen2 + 1);
+
+#ifdef NS_FILEDEBUG
+  if (fwrite(spframe, sizeof(short),
+             inst->blockLen10ms, inst->infile) != inst->blockLen10ms) {
+    RTC_NOTREACHED();
+  }
+#endif
+
+  // Check that initialization has been done
+  RTC_DCHECK_EQ(1, inst->initFlag);
+  RTC_DCHECK_LE(num_bands - 1, NUM_HIGH_BANDS_MAX);
+
+  const short* const* speechFrameHB = NULL;
+  short* const* outFrameHB = NULL;
+  size_t num_high_bands = 0;
+  if (num_bands > 1) {
+    speechFrameHB = &speechFrame[1];
+    outFrameHB = &outFrame[1];
+    num_high_bands = (size_t)(num_bands - 1);
+  }
+
+  // Store speechFrame and transform to frequency domain
+  WebRtcNsx_DataAnalysis(inst, (short*)speechFrame[0], magnU16);
+
+  if (inst->zeroInputSignal) {
+    WebRtcNsx_DataSynthesis(inst, outFrame[0]);
+
+    if (num_bands > 1) {
+      // update analysis buffer for H band
+      // append new data to buffer FX
+      for (i = 0; i < num_high_bands; ++i) {
+        int block_shift = inst->anaLen - inst->blockLen10ms;
+        memcpy(inst->dataBufHBFX[i], inst->dataBufHBFX[i] + inst->blockLen10ms,
+            block_shift * sizeof(*inst->dataBufHBFX[i]));
+        memcpy(inst->dataBufHBFX[i] + block_shift, speechFrameHB[i],
+            inst->blockLen10ms * sizeof(*inst->dataBufHBFX[i]));
+        for (j = 0; j < inst->blockLen10ms; j++) {
+          outFrameHB[i][j] = inst->dataBufHBFX[i][j]; // Q0
+        }
+      }
+    }  // end of H band gain computation
+    return;
+  }
+
+  // Update block index when we have something to process
+  inst->blockIndex++;
+  //
+
+  // Norm of magn
+  qMagn = inst->normData - inst->stages;
+
+  // Compute spectral flatness on input spectrum
+  WebRtcNsx_ComputeSpectralFlatness(inst, magnU16);
+
+  // quantile noise estimate
+  WebRtcNsx_NoiseEstimation(inst, magnU16, noiseU32, &qNoise);
+
+  //noise estimate from previous frame
+  for (i = 0; i < inst->magnLen; i++) {
+    prevNoiseU16[i] = (uint16_t)(inst->prevNoiseU32[i] >> 11);  // Q(prevQNoise)
+  }
+
+  if (inst->blockIndex < END_STARTUP_SHORT) {
+    // Noise Q-domain to be used later; see description at end of section.
+    q_domain_to_use = WEBRTC_SPL_MIN((int)qNoise, inst->minNorm - inst->stages);
+
+    // Calculate frequency independent parts in parametric noise estimate and calculate
+    // the estimate for the lower frequency band (same values for all frequency bins)
+    if (inst->pinkNoiseExp) {
+      pink_noise_exp_avg = (int16_t)WebRtcSpl_DivW32W16(inst->pinkNoiseExp,
+                                                              (int16_t)(inst->blockIndex + 1)); // Q14
+      pink_noise_num_avg = WebRtcSpl_DivW32W16(inst->pinkNoiseNumerator,
+                                               (int16_t)(inst->blockIndex + 1)); // Q11
+      WebRtcNsx_CalcParametricNoiseEstimate(inst,
+                                            pink_noise_exp_avg,
+                                            pink_noise_num_avg,
+                                            kStartBand,
+                                            &noise_estimate,
+                                            &noise_estimate_avg);
+    } else {
+      // Use white noise estimate if we have poor pink noise parameter estimates
+      noise_estimate = inst->whiteNoiseLevel; // Q(minNorm-stages)
+      noise_estimate_avg = noise_estimate / (inst->blockIndex + 1); // Q(minNorm-stages)
+    }
+    for (i = 0; i < inst->magnLen; i++) {
+      // Estimate the background noise using the pink noise parameters if permitted
+      if ((inst->pinkNoiseExp) && (i >= kStartBand)) {
+        // Reset noise_estimate
+        noise_estimate = 0;
+        noise_estimate_avg = 0;
+        // Calculate the parametric noise estimate for current frequency bin
+        WebRtcNsx_CalcParametricNoiseEstimate(inst,
+                                              pink_noise_exp_avg,
+                                              pink_noise_num_avg,
+                                              i,
+                                              &noise_estimate,
+                                              &noise_estimate_avg);
+      }
+      // Calculate parametric Wiener filter
+      noiseSupFilterTmp[i] = inst->denoiseBound;
+      if (inst->initMagnEst[i]) {
+        // numerator = (initMagnEst - noise_estimate * overdrive)
+        // Result in Q(8+minNorm-stages)
+        tmpU32no1 = WEBRTC_SPL_UMUL_32_16(noise_estimate, inst->overdrive);
+        numerator = inst->initMagnEst[i] << 8;
+        if (numerator > tmpU32no1) {
+          // Suppression filter coefficient larger than zero, so calculate.
+          numerator -= tmpU32no1;
+
+          // Determine number of left shifts in numerator for best accuracy after
+          // division
+          nShifts = WebRtcSpl_NormU32(numerator);
+          nShifts = WEBRTC_SPL_SAT(6, nShifts, 0);
+
+          // Shift numerator to Q(nShifts+8+minNorm-stages)
+          numerator <<= nShifts;
+
+          // Shift denominator to Q(nShifts-6+minNorm-stages)
+          tmpU32no1 = inst->initMagnEst[i] >> (6 - nShifts);
+          if (tmpU32no1 == 0) {
+            // This is only possible if numerator = 0, in which case
+            // we don't need any division.
+            tmpU32no1 = 1;
+          }
+          tmpU32no2 = numerator / tmpU32no1;  // Q14
+          noiseSupFilterTmp[i] = (uint16_t)WEBRTC_SPL_SAT(16384, tmpU32no2,
+              (uint32_t)(inst->denoiseBound)); // Q14
+        }
+      }
+      // Weight quantile noise 'noiseU32' with modeled noise 'noise_estimate_avg'
+      // 'noiseU32 is in Q(qNoise) and 'noise_estimate' in Q(minNorm-stages)
+      // To guarantee that we do not get wrap around when shifting to the same domain
+      // we use the lowest one. Furthermore, we need to save 6 bits for the weighting.
+      // 'noise_estimate_avg' can handle this operation by construction, but 'noiseU32'
+      // may not.
+
+      // Shift 'noiseU32' to 'q_domain_to_use'
+      tmpU32no1 = noiseU32[i] >> (qNoise - q_domain_to_use);
+      // Shift 'noise_estimate_avg' to 'q_domain_to_use'
+      tmpU32no2 = noise_estimate_avg >>
+          (inst->minNorm - inst->stages - q_domain_to_use);
+      // Make a simple check to see if we have enough room for weighting 'tmpU32no1'
+      // without wrap around
+      nShifts = 0;
+      if (tmpU32no1 & 0xfc000000) {
+        tmpU32no1 >>= 6;
+        tmpU32no2 >>= 6;
+        nShifts = 6;
+      }
+      tmpU32no1 *= inst->blockIndex;
+      tmpU32no2 *= (END_STARTUP_SHORT - inst->blockIndex);
+      // Add them together and divide by startup length
+      noiseU32[i] = WebRtcSpl_DivU32U16(tmpU32no1 + tmpU32no2, END_STARTUP_SHORT);
+      // Shift back if necessary
+      noiseU32[i] <<= nShifts;
+    }
+    // Update new Q-domain for 'noiseU32'
+    qNoise = q_domain_to_use;
+  }
+  // compute average signal during END_STARTUP_LONG time:
+  // used to normalize spectral difference measure
+  if (inst->blockIndex < END_STARTUP_LONG) {
+    // substituting division with shift ending up in Q(-2*stages)
+    inst->timeAvgMagnEnergyTmp +=
+        inst->magnEnergy >> (2 * inst->normData + inst->stages - 1);
+    inst->timeAvgMagnEnergy = WebRtcSpl_DivU32U16(inst->timeAvgMagnEnergyTmp,
+                                                  inst->blockIndex + 1);
+  }
+
+  //start processing at frames == converged+1
+  // STEP 1: compute prior and post SNR based on quantile noise estimates
+
+  // compute direct decision (DD) estimate of prior SNR: needed for new method
+  satMax = (uint32_t)1048575;// Largest possible value without getting overflow despite shifting 12 steps
+  postShifts = 6 + qMagn - qNoise;
+  nShifts = 5 - inst->prevQMagn + inst->prevQNoise;
+  for (i = 0; i < inst->magnLen; i++) {
+    // FLOAT:
+    // post SNR
+    // postLocSnr[i] = 0.0;
+    // if (magn[i] > noise[i])
+    // {
+    //   postLocSnr[i] = magn[i] / (noise[i] + 0.0001);
+    // }
+    // // previous post SNR
+    // // previous estimate: based on previous frame with gain filter (smooth is previous filter)
+    //
+    // prevNearSnr[i] = inst->prevMagnU16[i] / (inst->noisePrev[i] + 0.0001) * (inst->smooth[i]);
+    //
+    // // DD estimate is sum of two terms: current estimate and previous estimate
+    // // directed decision update of priorSnr (or we actually store [2*priorSnr+1])
+    //
+    // priorLocSnr[i] = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * (postLocSnr[i] - 1.0);
+
+    // calculate post SNR: output in Q11
+    postLocSnr[i] = 2048; // 1.0 in Q11
+    tmpU32no1 = (uint32_t)magnU16[i] << 6;  // Q(6+qMagn)
+    if (postShifts < 0) {
+      tmpU32no2 = noiseU32[i] >> -postShifts;  // Q(6+qMagn)
+    } else {
+      tmpU32no2 = noiseU32[i] << postShifts;  // Q(6+qMagn)
+    }
+    if (tmpU32no1 > tmpU32no2) {
+      // Current magnitude larger than noise
+      tmpU32no1 <<= 11;  // Q(17+qMagn)
+      if (tmpU32no2 > 0) {
+        tmpU32no1 /= tmpU32no2;  // Q11
+        postLocSnr[i] = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
+      } else {
+        postLocSnr[i] = satMax;
+      }
+    }
+
+    // calculate prevNearSnr[i] and save for later instead of recalculating it later
+    // |nearMagnEst| in Q(prevQMagn + 14)
+    nearMagnEst = inst->prevMagnU16[i] * inst->noiseSupFilter[i];
+    tmpU32no1 = nearMagnEst << 3;  // Q(prevQMagn+17)
+    tmpU32no2 = inst->prevNoiseU32[i] >> nShifts;  // Q(prevQMagn+6)
+
+    if (tmpU32no2 > 0) {
+      tmpU32no1 /= tmpU32no2;  // Q11
+      tmpU32no1 = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
+    } else {
+      tmpU32no1 = satMax; // Q11
+    }
+    prevNearSnr[i] = tmpU32no1; // Q11
+
+    //directed decision update of priorSnr
+    tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(postLocSnr[i] - 2048, ONE_MINUS_DD_PR_SNR_Q11); // Q22
+    priorSnr = tmpU32no1 + tmpU32no2 + 512; // Q22 (added 512 for rounding)
+    // priorLocSnr = 1 + 2*priorSnr
+    priorLocSnr[i] = 2048 + (priorSnr >> 10);  // Q11
+  }  // end of loop over frequencies
+  // done with step 1: DD computation of prior and post SNR
+
+  // STEP 2: compute speech/noise likelihood
+
+  //compute difference of input spectrum with learned/estimated noise spectrum
+  WebRtcNsx_ComputeSpectralDifference(inst, magnU16);
+  //compute histograms for determination of parameters (thresholds and weights for features)
+  //parameters are extracted once every window time (=inst->modelUpdate)
+  //counter update
+  inst->cntThresUpdate++;
+  flag = (int)(inst->cntThresUpdate == inst->modelUpdate);
+  //update histogram
+  WebRtcNsx_FeatureParameterExtraction(inst, flag);
+  //compute model parameters
+  if (flag) {
+    inst->cntThresUpdate = 0; // Reset counter
+    //update every window:
+    // get normalization for spectral difference for next window estimate
+
+    // Shift to Q(-2*stages)
+    inst->curAvgMagnEnergy >>= STAT_UPDATES;
+
+    tmpU32no1 = (inst->curAvgMagnEnergy + inst->timeAvgMagnEnergy + 1) >> 1; //Q(-2*stages)
+    // Update featureSpecDiff
+    if ((tmpU32no1 != inst->timeAvgMagnEnergy) && (inst->featureSpecDiff) &&
+        (inst->timeAvgMagnEnergy > 0)) {
+      norm32no1 = 0;
+      tmpU32no3 = tmpU32no1;
+      while (0xFFFF0000 & tmpU32no3) {
+        tmpU32no3 >>= 1;
+        norm32no1++;
+      }
+      tmpU32no2 = inst->featureSpecDiff;
+      while (0xFFFF0000 & tmpU32no2) {
+        tmpU32no2 >>= 1;
+        norm32no1++;
+      }
+      tmpU32no3 = WEBRTC_SPL_UMUL(tmpU32no3, tmpU32no2);
+      tmpU32no3 /= inst->timeAvgMagnEnergy;
+      if (WebRtcSpl_NormU32(tmpU32no3) < norm32no1) {
+        inst->featureSpecDiff = 0x007FFFFF;
+      } else {
+        inst->featureSpecDiff = WEBRTC_SPL_MIN(0x007FFFFF,
+                                               tmpU32no3 << norm32no1);
+      }
+    }
+
+    inst->timeAvgMagnEnergy = tmpU32no1; // Q(-2*stages)
+    inst->curAvgMagnEnergy = 0;
+  }
+
+  //compute speech/noise probability
+  WebRtcNsx_SpeechNoiseProb(inst, nonSpeechProbFinal, priorLocSnr, postLocSnr);
+
+  //time-avg parameter for noise update
+  gammaNoise = NOISE_UPDATE_Q8; // Q8
+
+  maxNoiseU32 = 0;
+  postShifts = inst->prevQNoise - qMagn;
+  nShifts = inst->prevQMagn - qMagn;
+  for (i = 0; i < inst->magnLen; i++) {
+    // temporary noise update: use it for speech frames if update value is less than previous
+    // the formula has been rewritten into:
+    // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i])
+
+    if (postShifts < 0) {
+      tmpU32no2 = magnU16[i] >> -postShifts;  // Q(prevQNoise)
+    } else {
+      tmpU32no2 = (uint32_t)magnU16[i] << postShifts;  // Q(prevQNoise)
+    }
+    if (prevNoiseU16[i] > tmpU32no2) {
+      sign = -1;
+      tmpU32no1 = prevNoiseU16[i] - tmpU32no2;
+    } else {
+      sign = 1;
+      tmpU32no1 = tmpU32no2 - prevNoiseU16[i];
+    }
+    noiseUpdateU32 = inst->prevNoiseU32[i]; // Q(prevQNoise+11)
+    tmpU32no3 = 0;
+    if ((tmpU32no1) && (nonSpeechProbFinal[i])) {
+      // This value will be used later, if gammaNoise changes
+      tmpU32no3 = WEBRTC_SPL_UMUL_32_16(tmpU32no1, nonSpeechProbFinal[i]); // Q(prevQNoise+8)
+      if (0x7c000000 & tmpU32no3) {
+        // Shifting required before multiplication
+        tmpU32no2 = (tmpU32no3 >> 5) * gammaNoise;  // Q(prevQNoise+11)
+      } else {
+        // We can do shifting after multiplication
+        tmpU32no2 = (tmpU32no3 * gammaNoise) >> 5;  // Q(prevQNoise+11)
+      }
+      if (sign > 0) {
+        noiseUpdateU32 += tmpU32no2; // Q(prevQNoise+11)
+      } else {
+        // This operation is safe. We can never get wrap around, since worst
+        // case scenario means magnU16 = 0
+        noiseUpdateU32 -= tmpU32no2; // Q(prevQNoise+11)
+      }
+    }
+
+    //increase gamma (i.e., less noise update) for frame likely to be speech
+    prevGammaNoise = gammaNoise;
+    gammaNoise = NOISE_UPDATE_Q8;
+    //time-constant based on speech/noise state
+    //increase gamma (i.e., less noise update) for frames likely to be speech
+    if (nonSpeechProbFinal[i] < ONE_MINUS_PROB_RANGE_Q8) {
+      gammaNoise = GAMMA_NOISE_TRANS_AND_SPEECH_Q8;
+    }
+
+    if (prevGammaNoise != gammaNoise) {
+      // new noise update
+      // this line is the same as above, only that the result is stored in a different variable and the gammaNoise
+      // has changed
+      //
+      // noiseUpdate = noisePrev[i] + (1 - gammaNoise) * nonSpeechProb * (magn[i] - noisePrev[i])
+
+      if (0x7c000000 & tmpU32no3) {
+        // Shifting required before multiplication
+        tmpU32no2 = (tmpU32no3 >> 5) * gammaNoise;  // Q(prevQNoise+11)
+      } else {
+        // We can do shifting after multiplication
+        tmpU32no2 = (tmpU32no3 * gammaNoise) >> 5;  // Q(prevQNoise+11)
+      }
+      if (sign > 0) {
+        tmpU32no1 = inst->prevNoiseU32[i] + tmpU32no2; // Q(prevQNoise+11)
+      } else {
+        tmpU32no1 = inst->prevNoiseU32[i] - tmpU32no2; // Q(prevQNoise+11)
+      }
+      if (noiseUpdateU32 > tmpU32no1) {
+        noiseUpdateU32 = tmpU32no1; // Q(prevQNoise+11)
+      }
+    }
+    noiseU32[i] = noiseUpdateU32; // Q(prevQNoise+11)
+    if (noiseUpdateU32 > maxNoiseU32) {
+      maxNoiseU32 = noiseUpdateU32;
+    }
+
+    // conservative noise update
+    // // original FLOAT code
+    // if (prob_speech < PROB_RANGE) {
+    // inst->avgMagnPause[i] = inst->avgMagnPause[i] + (1.0 - gamma_pause)*(magn[i] - inst->avgMagnPause[i]);
+    // }
+
+    tmp32no2 = WEBRTC_SPL_SHIFT_W32(inst->avgMagnPause[i], -nShifts);
+    if (nonSpeechProbFinal[i] > ONE_MINUS_PROB_RANGE_Q8) {
+      if (nShifts < 0) {
+        tmp32no1 = (int32_t)magnU16[i] - tmp32no2; // Q(qMagn)
+        tmp32no1 *= ONE_MINUS_GAMMA_PAUSE_Q8;  // Q(8+prevQMagn+nShifts)
+        tmp32no1 = (tmp32no1 + 128) >> 8;  // Q(qMagn).
+      } else {
+        // In Q(qMagn+nShifts)
+        tmp32no1 = ((int32_t)magnU16[i] << nShifts) - inst->avgMagnPause[i];
+        tmp32no1 *= ONE_MINUS_GAMMA_PAUSE_Q8;  // Q(8+prevQMagn+nShifts)
+        tmp32no1 = (tmp32no1 + (128 << nShifts)) >> (8 + nShifts);  // Q(qMagn).
+      }
+      tmp32no2 += tmp32no1; // Q(qMagn)
+    }
+    inst->avgMagnPause[i] = tmp32no2;
+  }  // end of frequency loop
+
+  norm32no1 = WebRtcSpl_NormU32(maxNoiseU32);
+  qNoise = inst->prevQNoise + norm32no1 - 5;
+  // done with step 2: noise update
+
+  // STEP 3: compute dd update of prior snr and post snr based on new noise estimate
+  nShifts = inst->prevQNoise + 11 - qMagn;
+  for (i = 0; i < inst->magnLen; i++) {
+    // FLOAT code
+    // // post and prior SNR
+    // curNearSnr = 0.0;
+    // if (magn[i] > noise[i])
+    // {
+    // curNearSnr = magn[i] / (noise[i] + 0.0001) - 1.0;
+    // }
+    // // DD estimate is sum of two terms: current estimate and previous estimate
+    // // directed decision update of snrPrior
+    // snrPrior = DD_PR_SNR * prevNearSnr[i] + (1.0 - DD_PR_SNR) * curNearSnr;
+    // // gain filter
+    // tmpFloat1 = inst->overdrive + snrPrior;
+    // tmpFloat2 = snrPrior / tmpFloat1;
+    // theFilter[i] = tmpFloat2;
+
+    // calculate curNearSnr again, this is necessary because a new noise estimate has been made since then. for the original
+    curNearSnr = 0; // Q11
+    if (nShifts < 0) {
+      // This case is equivalent with magn < noise which implies curNearSnr = 0;
+      tmpMagnU32 = (uint32_t)magnU16[i]; // Q(qMagn)
+      tmpNoiseU32 = noiseU32[i] << -nShifts;  // Q(qMagn)
+    } else if (nShifts > 17) {
+      tmpMagnU32 = (uint32_t)magnU16[i] << 17;  // Q(qMagn+17)
+      tmpNoiseU32 = noiseU32[i] >> (nShifts - 17);  // Q(qMagn+17)
+    } else {
+      tmpMagnU32 = (uint32_t)magnU16[i] << nShifts;  // Q(qNoise_prev+11)
+      tmpNoiseU32 = noiseU32[i]; // Q(qNoise_prev+11)
+    }
+    if (tmpMagnU32 > tmpNoiseU32) {
+      tmpU32no1 = tmpMagnU32 - tmpNoiseU32; // Q(qCur)
+      norm32no2 = WEBRTC_SPL_MIN(11, WebRtcSpl_NormU32(tmpU32no1));
+      tmpU32no1 <<= norm32no2;  // Q(qCur+norm32no2)
+      tmpU32no2 = tmpNoiseU32 >> (11 - norm32no2);  // Q(qCur+norm32no2-11)
+      if (tmpU32no2 > 0) {
+        tmpU32no1 /= tmpU32no2;  // Q11
+      }
+      curNearSnr = WEBRTC_SPL_MIN(satMax, tmpU32no1); // Q11
+    }
+
+    //directed decision update of priorSnr
+    // FLOAT
+    // priorSnr = DD_PR_SNR * prevNearSnr + (1.0-DD_PR_SNR) * curNearSnr;
+
+    tmpU32no1 = WEBRTC_SPL_UMUL_32_16(prevNearSnr[i], DD_PR_SNR_Q11); // Q22
+    tmpU32no2 = WEBRTC_SPL_UMUL_32_16(curNearSnr, ONE_MINUS_DD_PR_SNR_Q11); // Q22
+    priorSnr = tmpU32no1 + tmpU32no2; // Q22
+
+    //gain filter
+    tmpU32no1 = inst->overdrive + ((priorSnr + 8192) >> 14);  // Q8
+    RTC_DCHECK_GT(inst->overdrive, 0);
+    tmpU16no1 = (priorSnr + tmpU32no1 / 2) / tmpU32no1;  // Q14
+    inst->noiseSupFilter[i] = WEBRTC_SPL_SAT(16384, tmpU16no1, inst->denoiseBound); // 16384 = Q14(1.0) // Q14
+
+    // Weight in the parametric Wiener filter during startup
+    if (inst->blockIndex < END_STARTUP_SHORT) {
+      // Weight the two suppression filters
+      tmpU32no1 = inst->noiseSupFilter[i] * inst->blockIndex;
+      tmpU32no2 = noiseSupFilterTmp[i] *
+          (END_STARTUP_SHORT - inst->blockIndex);
+      tmpU32no1 += tmpU32no2;
+      inst->noiseSupFilter[i] = (uint16_t)WebRtcSpl_DivU32U16(tmpU32no1,
+                                                                    END_STARTUP_SHORT);
+    }
+  }  // end of loop over frequencies
+  //done with step3
+
+  // save noise and magnitude spectrum for next frame
+  inst->prevQNoise = qNoise;
+  inst->prevQMagn = qMagn;
+  if (norm32no1 > 5) {
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->prevNoiseU32[i] = noiseU32[i] << (norm32no1 - 5);  // Q(qNoise+11)
+      inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
+    }
+  } else {
+    for (i = 0; i < inst->magnLen; i++) {
+      inst->prevNoiseU32[i] = noiseU32[i] >> (5 - norm32no1);  // Q(qNoise+11)
+      inst->prevMagnU16[i] = magnU16[i]; // Q(qMagn)
+    }
+  }
+
+  WebRtcNsx_DataSynthesis(inst, outFrame[0]);
+#ifdef NS_FILEDEBUG
+  if (fwrite(outframe, sizeof(short),
+             inst->blockLen10ms, inst->outfile) != inst->blockLen10ms) {
+    RTC_NOTREACHED();
+  }
+#endif
+
+  //for H band:
+  // only update data buffer, then apply time-domain gain is applied derived from L band
+  if (num_bands > 1) {
+    // update analysis buffer for H band
+    // append new data to buffer FX
+    for (i = 0; i < num_high_bands; ++i) {
+      memcpy(inst->dataBufHBFX[i], inst->dataBufHBFX[i] + inst->blockLen10ms,
+          (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->dataBufHBFX[i]));
+      memcpy(inst->dataBufHBFX[i] + inst->anaLen - inst->blockLen10ms,
+          speechFrameHB[i], inst->blockLen10ms * sizeof(*inst->dataBufHBFX[i]));
+    }
+    // range for averaging low band quantities for H band gain
+
+    gainTimeDomainHB = 16384; // 16384 = Q14(1.0)
+    //average speech prob from low band
+    //average filter gain from low band
+    //avg over second half (i.e., 4->8kHz) of freq. spectrum
+    tmpU32no1 = 0; // Q12
+    tmpU16no1 = 0; // Q8
+    for (i = inst->anaLen2 - (inst->anaLen2 >> 2); i < inst->anaLen2; i++) {
+      tmpU16no1 += nonSpeechProbFinal[i]; // Q8
+      tmpU32no1 += (uint32_t)(inst->noiseSupFilter[i]); // Q14
+    }
+    RTC_DCHECK_GE(inst->stages, 7);
+    avgProbSpeechHB = (4096 - (tmpU16no1 >> (inst->stages - 7)));  // Q12
+    avgFilterGainHB = (int16_t)(tmpU32no1 >> (inst->stages - 3));  // Q14
+
+    // // original FLOAT code
+    // // gain based on speech probability:
+    // avg_prob_speech_tt=(float)2.0*avg_prob_speech-(float)1.0;
+    // gain_mod=(float)0.5*((float)1.0+(float)tanh(avg_prob_speech_tt)); // between 0 and 1
+
+    // gain based on speech probability:
+    // original expression: "0.5 * (1 + tanh(2x-1))"
+    // avgProbSpeechHB has been anyway saturated to a value between 0 and 1 so the other cases don't have to be dealt with
+    // avgProbSpeechHB and gainModHB are in Q12, 3607 = Q12(0.880615234375) which is a zero point of
+    // |0.5 * (1 + tanh(2x-1)) - x| - |0.5 * (1 + tanh(2x-1)) - 0.880615234375| meaning that from that point the error of approximating
+    // the expression with f(x) = x would be greater than the error of approximating the expression with f(x) = 0.880615234375
+    // error: "|0.5 * (1 + tanh(2x-1)) - x| from x=0 to 0.880615234375" -> http://www.wolframalpha.com/input/?i=|0.5+*+(1+%2B+tanh(2x-1))+-+x|+from+x%3D0+to+0.880615234375
+    // and:  "|0.5 * (1 + tanh(2x-1)) - 0.880615234375| from x=0.880615234375 to 1" -> http://www.wolframalpha.com/input/?i=+|0.5+*+(1+%2B+tanh(2x-1))+-+0.880615234375|+from+x%3D0.880615234375+to+1
+    gainModHB = WEBRTC_SPL_MIN(avgProbSpeechHB, 3607);
+
+    // // original FLOAT code
+    // //combine gain with low band gain
+    // if (avg_prob_speech < (float)0.5) {
+    // gain_time_domain_HB=(float)0.5*gain_mod+(float)0.5*avg_filter_gain;
+    // }
+    // else {
+    // gain_time_domain_HB=(float)0.25*gain_mod+(float)0.75*avg_filter_gain;
+    // }
+
+
+    //combine gain with low band gain
+    if (avgProbSpeechHB < 2048) {
+      // 2048 = Q12(0.5)
+      // the next two lines in float are  "gain_time_domain = 0.5 * gain_mod + 0.5 * avg_filter_gain"; Q2(0.5) = 2 equals one left shift
+      gainTimeDomainHB = (gainModHB << 1) + (avgFilterGainHB >> 1); // Q14
+    } else {
+      // "gain_time_domain = 0.25 * gain_mod + 0.75 * agv_filter_gain;"
+      gainTimeDomainHB = (int16_t)((3 * avgFilterGainHB) >> 2);  // 3 = Q2(0.75)
+      gainTimeDomainHB += gainModHB; // Q14
+    }
+    //make sure gain is within flooring range
+    gainTimeDomainHB
+      = WEBRTC_SPL_SAT(16384, gainTimeDomainHB, (int16_t)(inst->denoiseBound)); // 16384 = Q14(1.0)
+
+
+    //apply gain
+    for (i = 0; i < num_high_bands; ++i) {
+      for (j = 0; j < inst->blockLen10ms; j++) {
+        outFrameHB[i][j] = (int16_t)((gainTimeDomainHB *
+            inst->dataBufHBFX[i][j]) >> 14);  // Q0
+      }
+    }
+  }  // end of H band gain computation
+}
diff --git a/modules/audio_processing/ns/nsx_core.h b/modules/audio_processing/ns/nsx_core.h
new file mode 100644
index 0000000..c8097f7
--- /dev/null
+++ b/modules/audio_processing/ns/nsx_core.h
@@ -0,0 +1,263 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
+#define MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
+
+#ifdef NS_FILEDEBUG
+#include <stdio.h>
+#endif
+
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/ns/nsx_defines.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef struct NoiseSuppressionFixedC_ {
+  uint32_t                fs;
+
+  const int16_t*          window;
+  int16_t                 analysisBuffer[ANAL_BLOCKL_MAX];
+  int16_t                 synthesisBuffer[ANAL_BLOCKL_MAX];
+  uint16_t                noiseSupFilter[HALF_ANAL_BLOCKL];
+  uint16_t                overdrive; /* Q8 */
+  uint16_t                denoiseBound; /* Q14 */
+  const int16_t*          factor2Table;
+  int16_t                 noiseEstLogQuantile[SIMULT* HALF_ANAL_BLOCKL];
+  int16_t                 noiseEstDensity[SIMULT* HALF_ANAL_BLOCKL];
+  int16_t                 noiseEstCounter[SIMULT];
+  int16_t                 noiseEstQuantile[HALF_ANAL_BLOCKL];
+
+  size_t                  anaLen;
+  size_t                  anaLen2;
+  size_t                  magnLen;
+  int                     aggrMode;
+  int                     stages;
+  int                     initFlag;
+  int                     gainMap;
+
+  int32_t                 maxLrt;
+  int32_t                 minLrt;
+  // Log LRT factor with time-smoothing in Q8.
+  int32_t                 logLrtTimeAvgW32[HALF_ANAL_BLOCKL];
+  int32_t                 featureLogLrt;
+  int32_t                 thresholdLogLrt;
+  int16_t                 weightLogLrt;
+
+  uint32_t                featureSpecDiff;
+  uint32_t                thresholdSpecDiff;
+  int16_t                 weightSpecDiff;
+
+  uint32_t                featureSpecFlat;
+  uint32_t                thresholdSpecFlat;
+  int16_t                 weightSpecFlat;
+
+  // Conservative estimate of noise spectrum.
+  int32_t                 avgMagnPause[HALF_ANAL_BLOCKL];
+  uint32_t                magnEnergy;
+  uint32_t                sumMagn;
+  uint32_t                curAvgMagnEnergy;
+  uint32_t                timeAvgMagnEnergy;
+  uint32_t                timeAvgMagnEnergyTmp;
+
+  uint32_t                whiteNoiseLevel;  // Initial noise estimate.
+  // Initial magnitude spectrum estimate.
+  uint32_t                initMagnEst[HALF_ANAL_BLOCKL];
+  // Pink noise parameters:
+  int32_t                 pinkNoiseNumerator;  // Numerator.
+  int32_t                 pinkNoiseExp;  // Power of freq.
+  int                     minNorm;  // Smallest normalization factor.
+  int                     zeroInputSignal;  // Zero input signal flag.
+
+  // Noise spectrum from previous frame.
+  uint32_t                prevNoiseU32[HALF_ANAL_BLOCKL];
+  // Magnitude spectrum from previous frame.
+  uint16_t                prevMagnU16[HALF_ANAL_BLOCKL];
+  // Prior speech/noise probability in Q14.
+  int16_t                 priorNonSpeechProb;
+
+  int                     blockIndex;  // Frame index counter.
+  // Parameter for updating or estimating thresholds/weights for prior model.
+  int                     modelUpdate;
+  int                     cntThresUpdate;
+
+  // Histograms for parameter estimation.
+  int16_t                 histLrt[HIST_PAR_EST];
+  int16_t                 histSpecFlat[HIST_PAR_EST];
+  int16_t                 histSpecDiff[HIST_PAR_EST];
+
+  // Quantities for high band estimate.
+  int16_t                 dataBufHBFX[NUM_HIGH_BANDS_MAX][ANAL_BLOCKL_MAX];
+
+  int                     qNoise;
+  int                     prevQNoise;
+  int                     prevQMagn;
+  size_t                  blockLen10ms;
+
+  int16_t                 real[ANAL_BLOCKL_MAX];
+  int16_t                 imag[ANAL_BLOCKL_MAX];
+  int32_t                 energyIn;
+  int                     scaleEnergyIn;
+  int                     normData;
+
+  struct RealFFT* real_fft;
+} NoiseSuppressionFixedC;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/****************************************************************************
+ * WebRtcNsx_InitCore(...)
+ *
+ * This function initializes a noise suppression instance
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - fs            : Sampling frequency
+ *
+ * Output:
+ *      - inst          : Initialized instance
+ *
+ * Return value         :  0 - Ok
+ *                        -1 - Error
+ */
+int32_t WebRtcNsx_InitCore(NoiseSuppressionFixedC* inst, uint32_t fs);
+
+/****************************************************************************
+ * WebRtcNsx_set_policy_core(...)
+ *
+ * This changes the aggressiveness of the noise suppression method.
+ *
+ * Input:
+ *      - inst       : Instance that should be initialized
+ *      - mode       : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
+ *
+ * Output:
+ *      - inst       : Initialized instance
+ *
+ * Return value      :  0 - Ok
+ *                     -1 - Error
+ */
+int WebRtcNsx_set_policy_core(NoiseSuppressionFixedC* inst, int mode);
+
+/****************************************************************************
+ * WebRtcNsx_ProcessCore
+ *
+ * Do noise suppression.
+ *
+ * Input:
+ *      - inst          : Instance that should be initialized
+ *      - inFrame       : Input speech frame for each band
+ *      - num_bands     : Number of bands
+ *
+ * Output:
+ *      - inst          : Updated instance
+ *      - outFrame      : Output speech frame for each band
+ */
+void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst,
+                           const short* const* inFrame,
+                           int num_bands,
+                           short* const* outFrame);
+
+/****************************************************************************
+ * Some function pointers, for internal functions shared by ARM NEON and
+ * generic C code.
+ */
+// Noise Estimation.
+typedef void (*NoiseEstimation)(NoiseSuppressionFixedC* inst,
+                                uint16_t* magn,
+                                uint32_t* noise,
+                                int16_t* q_noise);
+extern NoiseEstimation WebRtcNsx_NoiseEstimation;
+
+// Filter the data in the frequency domain, and create spectrum.
+typedef void (*PrepareSpectrum)(NoiseSuppressionFixedC* inst,
+                                int16_t* freq_buff);
+extern PrepareSpectrum WebRtcNsx_PrepareSpectrum;
+
+// For the noise supression process, synthesis, read out fully processed
+// segment, and update synthesis buffer.
+typedef void (*SynthesisUpdate)(NoiseSuppressionFixedC* inst,
+                                int16_t* out_frame,
+                                int16_t gain_factor);
+extern SynthesisUpdate WebRtcNsx_SynthesisUpdate;
+
+// Update analysis buffer for lower band, and window data before FFT.
+typedef void (*AnalysisUpdate)(NoiseSuppressionFixedC* inst,
+                               int16_t* out,
+                               int16_t* new_speech);
+extern AnalysisUpdate WebRtcNsx_AnalysisUpdate;
+
+// Denormalize the real-valued signal |in|, the output from inverse FFT.
+typedef void (*Denormalize)(NoiseSuppressionFixedC* inst,
+                            int16_t* in,
+                            int factor);
+extern Denormalize WebRtcNsx_Denormalize;
+
+// Normalize the real-valued signal |in|, the input to forward FFT.
+typedef void (*NormalizeRealBuffer)(NoiseSuppressionFixedC* inst,
+                                    const int16_t* in,
+                                    int16_t* out);
+extern NormalizeRealBuffer WebRtcNsx_NormalizeRealBuffer;
+
+// Compute speech/noise probability.
+// Intended to be private.
+void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
+                               uint16_t* nonSpeechProbFinal,
+                               uint32_t* priorLocSnr,
+                               uint32_t* postLocSnr);
+
+#if defined(WEBRTC_HAS_NEON)
+// For the above function pointers, functions for generic platforms are declared
+// and defined as static in file nsx_core.c, while those for ARM Neon platforms
+// are declared below and defined in file nsx_core_neon.c.
+void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
+                                   uint16_t* magn,
+                                   uint32_t* noise,
+                                   int16_t* q_noise);
+void WebRtcNsx_SynthesisUpdateNeon(NoiseSuppressionFixedC* inst,
+                                   int16_t* out_frame,
+                                   int16_t gain_factor);
+void WebRtcNsx_AnalysisUpdateNeon(NoiseSuppressionFixedC* inst,
+                                  int16_t* out,
+                                  int16_t* new_speech);
+void WebRtcNsx_PrepareSpectrumNeon(NoiseSuppressionFixedC* inst,
+                                   int16_t* freq_buff);
+#endif
+
+#if defined(MIPS32_LE)
+// For the above function pointers, functions for generic platforms are declared
+// and defined as static in file nsx_core.c, while those for MIPS platforms
+// are declared below and defined in file nsx_core_mips.c.
+void WebRtcNsx_SynthesisUpdate_mips(NoiseSuppressionFixedC* inst,
+                                    int16_t* out_frame,
+                                    int16_t gain_factor);
+void WebRtcNsx_AnalysisUpdate_mips(NoiseSuppressionFixedC* inst,
+                                   int16_t* out,
+                                   int16_t* new_speech);
+void WebRtcNsx_PrepareSpectrum_mips(NoiseSuppressionFixedC* inst,
+                                    int16_t* freq_buff);
+void WebRtcNsx_NormalizeRealBuffer_mips(NoiseSuppressionFixedC* inst,
+                                        const int16_t* in,
+                                        int16_t* out);
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcNsx_Denormalize_mips(NoiseSuppressionFixedC* inst,
+                                int16_t* in,
+                                int factor);
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_CORE_H_
diff --git a/modules/audio_processing/ns/nsx_core_c.c b/modules/audio_processing/ns/nsx_core_c.c
new file mode 100644
index 0000000..162fb19
--- /dev/null
+++ b/modules/audio_processing/ns/nsx_core_c.c
@@ -0,0 +1,260 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/checks.h"
+#include "modules/audio_processing/ns/noise_suppression_x.h"
+#include "modules/audio_processing/ns/nsx_core.h"
+#include "modules/audio_processing/ns/nsx_defines.h"
+
+static const int16_t kIndicatorTable[17] = {
+  0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
+  7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187
+};
+
+// Compute speech/noise probability
+// speech/noise probability is returned in: probSpeechFinal
+//snrLocPrior is the prior SNR for each frequency (in Q11)
+//snrLocPost is the post SNR for each frequency (in Q11)
+void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
+                               uint16_t* nonSpeechProbFinal,
+                               uint32_t* priorLocSnr,
+                               uint32_t* postLocSnr) {
+  uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
+  int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
+  int32_t frac32, logTmp;
+  int32_t logLrtTimeAvgKsumFX;
+  int16_t indPriorFX16;
+  int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
+  size_t i;
+  int normTmp, normTmp2, nShifts;
+
+  // compute feature based on average LR factor
+  // this is the average over all frequencies of the smooth log LRT
+  logLrtTimeAvgKsumFX = 0;
+  for (i = 0; i < inst->magnLen; i++) {
+    besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
+    normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
+    num = postLocSnr[i] << normTmp;  // Q(11+normTmp)
+    if (normTmp > 10) {
+      den = priorLocSnr[i] << (normTmp - 11);  // Q(normTmp)
+    } else {
+      den = priorLocSnr[i] >> (11 - normTmp);  // Q(normTmp)
+    }
+    if (den > 0) {
+      besselTmpFX32 -= num / den;  // Q11
+    } else {
+      besselTmpFX32 = 0;
+    }
+
+    // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior)
+    //                                       - inst->logLrtTimeAvg[i]);
+    // Here, LRT_TAVG = 0.5
+    zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
+    frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
+    tmp32 = (frac32 * frac32 * -43) >> 19;
+    tmp32 += ((int16_t)frac32 * 5412) >> 12;
+    frac32 = tmp32 + 37;
+    // tmp32 = log2(priorLocSnr[i])
+    tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
+    logTmp = (tmp32 * 178) >> 8;  // log2(priorLocSnr[i])*log(2)
+    // tmp32no1 = LRT_TAVG * (log(snrLocPrior) + inst->logLrtTimeAvg[i]) in Q12.
+    tmp32no1 = (logTmp + inst->logLrtTimeAvgW32[i]) / 2;
+    inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12
+
+    logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
+  }
+  inst->featureLogLrt = (logLrtTimeAvgKsumFX * BIN_SIZE_LRT) >>
+      (inst->stages + 11);
+
+  // done with computation of LR factor
+
+  //
+  //compute the indicator functions
+  //
+
+  // average LRT feature
+  // FLOAT code
+  // indicator0 = 0.5 * (tanh(widthPrior *
+  //                      (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
+  tmpIndFX = 16384; // Q14(1.0)
+  tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
+  nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
+  //use larger width in tanh map for pause regions
+  if (tmp32no1 < 0) {
+    tmpIndFX = 0;
+    tmp32no1 = -tmp32no1;
+    //widthPrior = widthPrior * 2.0;
+    nShifts++;
+  }
+  tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
+  // compute indicator function: sigmoid map
+  if (tmp32no1 < (16 << 14) && tmp32no1 >= 0) {
+    tableIndex = (int16_t)(tmp32no1 >> 14);
+    tmp16no2 = kIndicatorTable[tableIndex];
+    tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+    frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
+    tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
+    if (tmpIndFX == 0) {
+      tmpIndFX = 8192 - tmp16no2; // Q14
+    } else {
+      tmpIndFX = 8192 + tmp16no2; // Q14
+    }
+  }
+  indPriorFX = inst->weightLogLrt * tmpIndFX;  // 6*Q14
+
+  //spectral flatness feature
+  if (inst->weightSpecFlat) {
+    tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
+    tmpIndFX = 16384; // Q14(1.0)
+    //use larger width in tanh map for pause regions
+    tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
+    nShifts = 4;
+    if (inst->thresholdSpecFlat < tmpU32no1) {
+      tmpIndFX = 0;
+      tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
+      //widthPrior = widthPrior * 2.0;
+      nShifts++;
+    }
+    tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25);  // Q14
+    // compute indicator function: sigmoid map
+    // FLOAT code
+    // indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
+    //                          (threshPrior1 - tmpFloat1)) + 1.0);
+    if (tmpU32no1 < (16 << 14)) {
+      tableIndex = (int16_t)(tmpU32no1 >> 14);
+      tmp16no2 = kIndicatorTable[tableIndex];
+      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+      tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
+      if (tmpIndFX) {
+        tmpIndFX = 8192 + tmp16no2; // Q14
+      } else {
+        tmpIndFX = 8192 - tmp16no2; // Q14
+      }
+    }
+    indPriorFX += inst->weightSpecFlat * tmpIndFX;  // 6*Q14
+  }
+
+  //for template spectral-difference
+  if (inst->weightSpecDiff) {
+    tmpU32no1 = 0;
+    if (inst->featureSpecDiff) {
+      normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
+                               WebRtcSpl_NormU32(inst->featureSpecDiff));
+      RTC_DCHECK_GE(normTmp, 0);
+      tmpU32no1 = inst->featureSpecDiff << normTmp;  // Q(normTmp-2*stages)
+      tmpU32no2 = inst->timeAvgMagnEnergy >> (20 - inst->stages - normTmp);
+      if (tmpU32no2 > 0) {
+        // Q(20 - inst->stages)
+        tmpU32no1 /= tmpU32no2;
+      } else {
+        tmpU32no1 = (uint32_t)(0x7fffffff);
+      }
+    }
+    tmpU32no3 = (inst->thresholdSpecDiff << 17) / 25;
+    tmpU32no2 = tmpU32no1 - tmpU32no3;
+    nShifts = 1;
+    tmpIndFX = 16384; // Q14(1.0)
+    //use larger width in tanh map for pause regions
+    if (tmpU32no2 & 0x80000000) {
+      tmpIndFX = 0;
+      tmpU32no2 = tmpU32no3 - tmpU32no1;
+      //widthPrior = widthPrior * 2.0;
+      nShifts--;
+    }
+    tmpU32no1 = tmpU32no2 >> nShifts;
+    // compute indicator function: sigmoid map
+    /* FLOAT code
+     indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
+     */
+    if (tmpU32no1 < (16 << 14)) {
+      tableIndex = (int16_t)(tmpU32no1 >> 14);
+      tmp16no2 = kIndicatorTable[tableIndex];
+      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+      tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                    tmp16no1, frac, 14);
+      if (tmpIndFX) {
+        tmpIndFX = 8192 + tmp16no2;
+      } else {
+        tmpIndFX = 8192 - tmp16no2;
+      }
+    }
+    indPriorFX += inst->weightSpecDiff * tmpIndFX;  // 6*Q14
+  }
+
+  //combine the indicator function with the feature weights
+  // FLOAT code
+  // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 *
+  //                 indicator1 + weightIndPrior2 * indicator2);
+  indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
+  // done with computing indicator function
+
+  //compute the prior probability
+  // FLOAT code
+  // inst->priorNonSpeechProb += PRIOR_UPDATE *
+  //                             (indPriorNonSpeech - inst->priorNonSpeechProb);
+  tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
+  inst->priorNonSpeechProb += (int16_t)((PRIOR_UPDATE_Q14 * tmp16) >> 14);
+
+  //final speech probability: combine prior model with LR factor:
+
+  memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);
+
+  if (inst->priorNonSpeechProb > 0) {
+    for (i = 0; i < inst->magnLen; i++) {
+      // FLOAT code
+      // invLrt = exp(inst->logLrtTimeAvg[i]);
+      // invLrt = inst->priorSpeechProb * invLrt;
+      // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) /
+      //                         (1.0 - inst->priorSpeechProb + invLrt);
+      // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
+      // nonSpeechProbFinal[i] = inst->priorNonSpeechProb /
+      //                         (inst->priorNonSpeechProb + invLrt);
+      if (inst->logLrtTimeAvgW32[i] < 65300) {
+        tmp32no1 = (inst->logLrtTimeAvgW32[i] * 23637) >> 14;  // Q12
+        intPart = (int16_t)(tmp32no1 >> 12);
+        if (intPart < -8) {
+          intPart = -8;
+        }
+        frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12
+
+        // Quadratic approximation of 2^frac
+        tmp32no2 = (frac * frac * 44) >> 19;  // Q12.
+        tmp32no2 += (frac * 84) >> 7;  // Q12
+        invLrtFX = (1 << (8 + intPart)) +
+            WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8
+
+        normTmp = WebRtcSpl_NormW32(invLrtFX);
+        normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
+        if (normTmp + normTmp2 >= 7) {
+          if (normTmp + normTmp2 < 15) {
+            invLrtFX >>= 15 - normTmp2 - normTmp;
+            // Q(normTmp+normTmp2-7)
+            tmp32no1 = invLrtFX * (16384 - inst->priorNonSpeechProb);
+            // Q(normTmp+normTmp2+7)
+            invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2);
+                                                                  // Q14
+          } else {
+            tmp32no1 = invLrtFX * (16384 - inst->priorNonSpeechProb);
+                                                                  // Q22
+            invLrtFX = tmp32no1 >> 8;  // Q14.
+          }
+
+          tmp32no1 = (int32_t)inst->priorNonSpeechProb << 8;  // Q22
+
+          nonSpeechProbFinal[i] = tmp32no1 /
+              (inst->priorNonSpeechProb + invLrtFX);  // Q8
+        }
+      }
+    }
+  }
+}
+
diff --git a/modules/audio_processing/ns/nsx_core_mips.c b/modules/audio_processing/ns/nsx_core_mips.c
new file mode 100644
index 0000000..d58a9b2
--- /dev/null
+++ b/modules/audio_processing/ns/nsx_core_mips.c
@@ -0,0 +1,1002 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "modules/audio_processing/ns/noise_suppression_x.h"
+#include "modules/audio_processing/ns/nsx_core.h"
+
+static const int16_t kIndicatorTable[17] = {
+  0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
+  7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187
+};
+
+// Compute speech/noise probability
+// speech/noise probability is returned in: probSpeechFinal
+//snrLocPrior is the prior SNR for each frequency (in Q11)
+//snrLocPost is the post SNR for each frequency (in Q11)
+void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
+                               uint16_t* nonSpeechProbFinal,
+                               uint32_t* priorLocSnr,
+                               uint32_t* postLocSnr) {
+  uint32_t tmpU32no1, tmpU32no2, tmpU32no3;
+  int32_t indPriorFX, tmp32no1;
+  int32_t logLrtTimeAvgKsumFX;
+  int16_t indPriorFX16;
+  int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac;
+  size_t i;
+  int normTmp, nShifts;
+
+  int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
+  int32_t const_max = 0x7fffffff;
+  int32_t const_neg43 = -43;
+  int32_t const_5412 = 5412;
+  int32_t const_11rsh12 = (11 << 12);
+  int32_t const_178 = 178;
+
+
+  // compute feature based on average LR factor
+  // this is the average over all frequencies of the smooth log LRT
+  logLrtTimeAvgKsumFX = 0;
+  for (i = 0; i < inst->magnLen; i++) {
+    r0 = postLocSnr[i]; // Q11
+    r1 = priorLocSnr[i];
+    r2 = inst->logLrtTimeAvgW32[i];
+
+    __asm __volatile(
+      ".set       push                                    \n\t"
+      ".set       noreorder                               \n\t"
+      "clz        %[r3],    %[r0]                         \n\t"
+      "clz        %[r5],    %[r1]                         \n\t"
+      "slti       %[r4],    %[r3],    32                  \n\t"
+      "slti       %[r6],    %[r5],    32                  \n\t"
+      "movz       %[r3],    $0,       %[r4]               \n\t"
+      "movz       %[r5],    $0,       %[r6]               \n\t"
+      "slti       %[r4],    %[r3],    11                  \n\t"
+      "addiu      %[r6],    %[r3],    -11                 \n\t"
+      "neg        %[r7],    %[r6]                         \n\t"
+      "sllv       %[r6],    %[r1],    %[r6]               \n\t"
+      "srav       %[r7],    %[r1],    %[r7]               \n\t"
+      "movn       %[r6],    %[r7],    %[r4]               \n\t"
+      "sllv       %[r1],    %[r1],    %[r5]               \n\t"
+      "and        %[r1],    %[r1],    %[const_max]        \n\t"
+      "sra        %[r1],    %[r1],    19                  \n\t"
+      "mul        %[r7],    %[r1],    %[r1]               \n\t"
+      "sllv       %[r3],    %[r0],    %[r3]               \n\t"
+      "divu       %[r8],    %[r3],    %[r6]               \n\t"
+      "slti       %[r6],    %[r6],    1                   \n\t"
+      "mul        %[r7],    %[r7],    %[const_neg43]      \n\t"
+      "sra        %[r7],    %[r7],    19                  \n\t"
+      "movz       %[r3],    %[r8],    %[r6]               \n\t"
+      "subu       %[r0],    %[r0],    %[r3]               \n\t"
+      "movn       %[r0],    $0,       %[r6]               \n\t"
+      "mul        %[r1],    %[r1],    %[const_5412]       \n\t"
+      "sra        %[r1],    %[r1],    12                  \n\t"
+      "addu       %[r7],    %[r7],    %[r1]               \n\t"
+      "addiu      %[r1],    %[r7],    37                  \n\t"
+      "addiu      %[r5],    %[r5],    -31                 \n\t"
+      "neg        %[r5],    %[r5]                         \n\t"
+      "sll        %[r5],    %[r5],    12                  \n\t"
+      "addu       %[r5],    %[r5],    %[r1]               \n\t"
+      "subu       %[r7],    %[r5],    %[const_11rsh12]    \n\t"
+      "mul        %[r7],    %[r7],    %[const_178]        \n\t"
+      "sra        %[r7],    %[r7],    8                   \n\t"
+      "addu       %[r7],    %[r7],    %[r2]               \n\t"
+      "sra        %[r7],    %[r7],    1                   \n\t"
+      "subu       %[r2],    %[r2],    %[r7]               \n\t"
+      "addu       %[r2],    %[r2],    %[r0]               \n\t"
+      ".set       pop                                     \n\t"
+      : [r0] "+r" (r0), [r1] "+r" (r1), [r2] "+r" (r2),
+        [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+        [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8)
+      : [const_max] "r" (const_max), [const_neg43] "r" (const_neg43),
+        [const_5412] "r" (const_5412), [const_11rsh12] "r" (const_11rsh12),
+        [const_178] "r" (const_178)
+      : "hi", "lo"
+    );
+    inst->logLrtTimeAvgW32[i] = r2;
+    logLrtTimeAvgKsumFX += r2;
+  }
+
+  inst->featureLogLrt = (logLrtTimeAvgKsumFX * BIN_SIZE_LRT) >>
+      (inst->stages + 11);
+
+  // done with computation of LR factor
+
+  //
+  // compute the indicator functions
+  //
+
+  // average LRT feature
+  // FLOAT code
+  // indicator0 = 0.5 * (tanh(widthPrior *
+  //                      (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
+  tmpIndFX = 16384; // Q14(1.0)
+  tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
+  nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
+  //use larger width in tanh map for pause regions
+  if (tmp32no1 < 0) {
+    tmpIndFX = 0;
+    tmp32no1 = -tmp32no1;
+    //widthPrior = widthPrior * 2.0;
+    nShifts++;
+  }
+  tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
+  // compute indicator function: sigmoid map
+  if (tmp32no1 < (16 << 14) && tmp32no1 >= 0) {
+    tableIndex = (int16_t)(tmp32no1 >> 14);
+    tmp16no2 = kIndicatorTable[tableIndex];
+    tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+    frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
+    tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
+    if (tmpIndFX == 0) {
+      tmpIndFX = 8192 - tmp16no2; // Q14
+    } else {
+      tmpIndFX = 8192 + tmp16no2; // Q14
+    }
+  }
+  indPriorFX = inst->weightLogLrt * tmpIndFX;  // 6*Q14
+
+  //spectral flatness feature
+  if (inst->weightSpecFlat) {
+    tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
+    tmpIndFX = 16384; // Q14(1.0)
+    //use larger width in tanh map for pause regions
+    tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
+    nShifts = 4;
+    if (inst->thresholdSpecFlat < tmpU32no1) {
+      tmpIndFX = 0;
+      tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
+      //widthPrior = widthPrior * 2.0;
+      nShifts++;
+    }
+    tmpU32no1 = WebRtcSpl_DivU32U16(tmpU32no2 << nShifts, 25);  //Q14
+    // compute indicator function: sigmoid map
+    // FLOAT code
+    // indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
+    //                          (threshPrior1 - tmpFloat1)) + 1.0);
+    if (tmpU32no1 < (16 << 14)) {
+      tableIndex = (int16_t)(tmpU32no1 >> 14);
+      tmp16no2 = kIndicatorTable[tableIndex];
+      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+      tmp16no2 += (int16_t)((tmp16no1 * frac) >> 14);
+      if (tmpIndFX) {
+        tmpIndFX = 8192 + tmp16no2; // Q14
+      } else {
+        tmpIndFX = 8192 - tmp16no2; // Q14
+      }
+    }
+    indPriorFX += inst->weightSpecFlat * tmpIndFX;  // 6*Q14
+  }
+
+  //for template spectral-difference
+  if (inst->weightSpecDiff) {
+    tmpU32no1 = 0;
+    if (inst->featureSpecDiff) {
+      normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
+                               WebRtcSpl_NormU32(inst->featureSpecDiff));
+      RTC_DCHECK_GE(normTmp, 0);
+      tmpU32no1 = inst->featureSpecDiff << normTmp;  // Q(normTmp-2*stages)
+      tmpU32no2 = inst->timeAvgMagnEnergy >> (20 - inst->stages - normTmp);
+      if (tmpU32no2 > 0) {
+        // Q(20 - inst->stages)
+        tmpU32no1 /= tmpU32no2;
+      } else {
+        tmpU32no1 = (uint32_t)(0x7fffffff);
+      }
+    }
+    tmpU32no3 = (inst->thresholdSpecDiff << 17) / 25;
+    tmpU32no2 = tmpU32no1 - tmpU32no3;
+    nShifts = 1;
+    tmpIndFX = 16384; // Q14(1.0)
+    //use larger width in tanh map for pause regions
+    if (tmpU32no2 & 0x80000000) {
+      tmpIndFX = 0;
+      tmpU32no2 = tmpU32no3 - tmpU32no1;
+      //widthPrior = widthPrior * 2.0;
+      nShifts--;
+    }
+    tmpU32no1 = tmpU32no2 >> nShifts;
+    // compute indicator function: sigmoid map
+    /* FLOAT code
+     indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
+     */
+    if (tmpU32no1 < (16 << 14)) {
+      tableIndex = (int16_t)(tmpU32no1 >> 14);
+      tmp16no2 = kIndicatorTable[tableIndex];
+      tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+      frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+      tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                    tmp16no1, frac, 14);
+      if (tmpIndFX) {
+        tmpIndFX = 8192 + tmp16no2;
+      } else {
+        tmpIndFX = 8192 - tmp16no2;
+      }
+    }
+    indPriorFX += inst->weightSpecDiff * tmpIndFX;  // 6*Q14
+  }
+
+  //combine the indicator function with the feature weights
+  // FLOAT code
+  // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 *
+  //                 indicator1 + weightIndPrior2 * indicator2);
+  indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
+  // done with computing indicator function
+
+  //compute the prior probability
+  // FLOAT code
+  // inst->priorNonSpeechProb += PRIOR_UPDATE *
+  //                             (indPriorNonSpeech - inst->priorNonSpeechProb);
+  tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
+  inst->priorNonSpeechProb += (int16_t)((PRIOR_UPDATE_Q14 * tmp16) >> 14);
+
+  //final speech probability: combine prior model with LR factor:
+
+  memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);
+
+  if (inst->priorNonSpeechProb > 0) {
+    r0 = inst->priorNonSpeechProb;
+    r1 = 16384 - r0;
+    int32_t const_23637 = 23637;
+    int32_t const_44 = 44;
+    int32_t const_84 = 84;
+    int32_t const_1 = 1;
+    int32_t const_neg8 = -8;
+    for (i = 0; i < inst->magnLen; i++) {
+      r2 = inst->logLrtTimeAvgW32[i];
+      if (r2 < 65300) {
+        __asm __volatile(
+          ".set         push                                      \n\t"
+          ".set         noreorder                                 \n\t"
+          "mul          %[r2],    %[r2],          %[const_23637]  \n\t"
+          "sll          %[r6],    %[r1],          16              \n\t"
+          "clz          %[r7],    %[r6]                           \n\t"
+          "clo          %[r8],    %[r6]                           \n\t"
+          "slt          %[r9],    %[r6],          $0              \n\t"
+          "movn         %[r7],    %[r8],          %[r9]           \n\t"
+          "sra          %[r2],    %[r2],          14              \n\t"
+          "andi         %[r3],    %[r2],          0xfff           \n\t"
+          "mul          %[r4],    %[r3],          %[r3]           \n\t"
+          "mul          %[r3],    %[r3],          %[const_84]     \n\t"
+          "sra          %[r2],    %[r2],          12              \n\t"
+          "slt          %[r5],    %[r2],          %[const_neg8]   \n\t"
+          "movn         %[r2],    %[const_neg8],  %[r5]           \n\t"
+          "mul          %[r4],    %[r4],          %[const_44]     \n\t"
+          "sra          %[r3],    %[r3],          7               \n\t"
+          "addiu        %[r7],    %[r7],          -1              \n\t"
+          "slti         %[r9],    %[r7],          31              \n\t"
+          "movz         %[r7],    $0,             %[r9]           \n\t"
+          "sra          %[r4],    %[r4],          19              \n\t"
+          "addu         %[r4],    %[r4],          %[r3]           \n\t"
+          "addiu        %[r3],    %[r2],          8               \n\t"
+          "addiu        %[r2],    %[r2],          -4              \n\t"
+          "neg          %[r5],    %[r2]                           \n\t"
+          "sllv         %[r6],    %[r4],          %[r2]           \n\t"
+          "srav         %[r5],    %[r4],          %[r5]           \n\t"
+          "slt          %[r2],    %[r2],          $0              \n\t"
+          "movn         %[r6],    %[r5],          %[r2]           \n\t"
+          "sllv         %[r3],    %[const_1],     %[r3]           \n\t"
+          "addu         %[r2],    %[r3],          %[r6]           \n\t"
+          "clz          %[r4],    %[r2]                           \n\t"
+          "clo          %[r5],    %[r2]                           \n\t"
+          "slt          %[r8],    %[r2],          $0              \n\t"
+          "movn         %[r4],    %[r5],          %[r8]           \n\t"
+          "addiu        %[r4],    %[r4],          -1              \n\t"
+          "slt          %[r5],    $0,             %[r2]           \n\t"
+          "or           %[r5],    %[r5],          %[r7]           \n\t"
+          "movz         %[r4],    $0,             %[r5]           \n\t"
+          "addiu        %[r6],    %[r7],          -7              \n\t"
+          "addu         %[r6],    %[r6],          %[r4]           \n\t"
+          "bltz         %[r6],    1f                              \n\t"
+          " nop                                                   \n\t"
+          "addiu        %[r4],    %[r6],          -8              \n\t"
+          "neg          %[r3],    %[r4]                           \n\t"
+          "srav         %[r5],    %[r2],          %[r3]           \n\t"
+          "mul          %[r5],    %[r5],          %[r1]           \n\t"
+          "mul          %[r2],    %[r2],          %[r1]           \n\t"
+          "slt          %[r4],    %[r4],          $0              \n\t"
+          "srav         %[r5],    %[r5],          %[r6]           \n\t"
+          "sra          %[r2],    %[r2],          8               \n\t"
+          "movn         %[r2],    %[r5],          %[r4]           \n\t"
+          "sll          %[r3],    %[r0],          8               \n\t"
+          "addu         %[r2],    %[r0],          %[r2]           \n\t"
+          "divu         %[r3],    %[r3],          %[r2]           \n\t"
+         "1:                                                      \n\t"
+          ".set         pop                                       \n\t"
+          : [r2] "+r" (r2), [r3] "=&r" (r3), [r4] "=&r" (r4),
+            [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+            [r8] "=&r" (r8), [r9] "=&r" (r9)
+          : [r0] "r" (r0), [r1] "r" (r1), [const_23637] "r" (const_23637),
+            [const_neg8] "r" (const_neg8), [const_84] "r" (const_84),
+            [const_1] "r" (const_1), [const_44] "r" (const_44)
+          : "hi", "lo"
+        );
+        nonSpeechProbFinal[i] = r3;
+      }
+    }
+  }
+}
+
+// Update analysis buffer for lower band, and window data before FFT.
+void WebRtcNsx_AnalysisUpdate_mips(NoiseSuppressionFixedC* inst,
+                                   int16_t* out,
+                                   int16_t* new_speech) {
+  int iters, after;
+  int anaLen = (int)inst->anaLen;
+  int *window = (int*)inst->window;
+  int *anaBuf = (int*)inst->analysisBuffer;
+  int *outBuf = (int*)out;
+  int r0, r1, r2, r3, r4, r5, r6, r7;
+#if defined(MIPS_DSP_R1_LE)
+  int r8;
+#endif
+
+  // For lower band update analysis buffer.
+  memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms,
+      (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->analysisBuffer));
+  memcpy(inst->analysisBuffer + inst->anaLen - inst->blockLen10ms, new_speech,
+      inst->blockLen10ms * sizeof(*inst->analysisBuffer));
+
+  // Window data before FFT.
+#if defined(MIPS_DSP_R1_LE)
+  __asm __volatile(
+    ".set              push                                \n\t"
+    ".set              noreorder                           \n\t"
+    "sra               %[iters],   %[anaLen],    3         \n\t"
+   "1:                                                     \n\t"
+    "blez              %[iters],   2f                      \n\t"
+    " nop                                                  \n\t"
+    "lw                %[r0],      0(%[window])            \n\t"
+    "lw                %[r1],      0(%[anaBuf])            \n\t"
+    "lw                %[r2],      4(%[window])            \n\t"
+    "lw                %[r3],      4(%[anaBuf])            \n\t"
+    "lw                %[r4],      8(%[window])            \n\t"
+    "lw                %[r5],      8(%[anaBuf])            \n\t"
+    "lw                %[r6],      12(%[window])           \n\t"
+    "lw                %[r7],      12(%[anaBuf])           \n\t"
+    "muleq_s.w.phl     %[r8],      %[r0],        %[r1]     \n\t"
+    "muleq_s.w.phr     %[r0],      %[r0],        %[r1]     \n\t"
+    "muleq_s.w.phl     %[r1],      %[r2],        %[r3]     \n\t"
+    "muleq_s.w.phr     %[r2],      %[r2],        %[r3]     \n\t"
+    "muleq_s.w.phl     %[r3],      %[r4],        %[r5]     \n\t"
+    "muleq_s.w.phr     %[r4],      %[r4],        %[r5]     \n\t"
+    "muleq_s.w.phl     %[r5],      %[r6],        %[r7]     \n\t"
+    "muleq_s.w.phr     %[r6],      %[r6],        %[r7]     \n\t"
+#if defined(MIPS_DSP_R2_LE)
+    "precr_sra_r.ph.w  %[r8],      %[r0],        15        \n\t"
+    "precr_sra_r.ph.w  %[r1],      %[r2],        15        \n\t"
+    "precr_sra_r.ph.w  %[r3],      %[r4],        15        \n\t"
+    "precr_sra_r.ph.w  %[r5],      %[r6],        15        \n\t"
+    "sw                %[r8],      0(%[outBuf])            \n\t"
+    "sw                %[r1],      4(%[outBuf])            \n\t"
+    "sw                %[r3],      8(%[outBuf])            \n\t"
+    "sw                %[r5],      12(%[outBuf])           \n\t"
+#else
+    "shra_r.w          %[r8],      %[r8],        15        \n\t"
+    "shra_r.w          %[r0],      %[r0],        15        \n\t"
+    "shra_r.w          %[r1],      %[r1],        15        \n\t"
+    "shra_r.w          %[r2],      %[r2],        15        \n\t"
+    "shra_r.w          %[r3],      %[r3],        15        \n\t"
+    "shra_r.w          %[r4],      %[r4],        15        \n\t"
+    "shra_r.w          %[r5],      %[r5],        15        \n\t"
+    "shra_r.w          %[r6],      %[r6],        15        \n\t"
+    "sll               %[r0],      %[r0],        16        \n\t"
+    "sll               %[r2],      %[r2],        16        \n\t"
+    "sll               %[r4],      %[r4],        16        \n\t"
+    "sll               %[r6],      %[r6],        16        \n\t"
+    "packrl.ph         %[r0],      %[r8],        %[r0]     \n\t"
+    "packrl.ph         %[r2],      %[r1],        %[r2]     \n\t"
+    "packrl.ph         %[r4],      %[r3],        %[r4]     \n\t"
+    "packrl.ph         %[r6],      %[r5],        %[r6]     \n\t"
+    "sw                %[r0],      0(%[outBuf])            \n\t"
+    "sw                %[r2],      4(%[outBuf])            \n\t"
+    "sw                %[r4],      8(%[outBuf])            \n\t"
+    "sw                %[r6],      12(%[outBuf])           \n\t"
+#endif
+    "addiu             %[window],  %[window],    16        \n\t"
+    "addiu             %[anaBuf],  %[anaBuf],    16        \n\t"
+    "addiu             %[outBuf],  %[outBuf],    16        \n\t"
+    "b                 1b                                  \n\t"
+    " addiu            %[iters],   %[iters],     -1        \n\t"
+   "2:                                                     \n\t"
+    "andi              %[after],   %[anaLen],    7         \n\t"
+   "3:                                                     \n\t"
+    "blez              %[after],   4f                      \n\t"
+    " nop                                                  \n\t"
+    "lh                %[r0],      0(%[window])            \n\t"
+    "lh                %[r1],      0(%[anaBuf])            \n\t"
+    "mul               %[r0],      %[r0],        %[r1]     \n\t"
+    "addiu             %[window],  %[window],    2         \n\t"
+    "addiu             %[anaBuf],  %[anaBuf],    2         \n\t"
+    "addiu             %[outBuf],  %[outBuf],    2         \n\t"
+    "shra_r.w          %[r0],      %[r0],        14        \n\t"
+    "sh                %[r0],      -2(%[outBuf])           \n\t"
+    "b                 3b                                  \n\t"
+    " addiu            %[after],   %[after],     -1        \n\t"
+   "4:                                                     \n\t"
+    ".set              pop                                 \n\t"
+    : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+      [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+      [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8),
+      [iters] "=&r" (iters), [after] "=&r" (after),
+      [window] "+r" (window),[anaBuf] "+r" (anaBuf),
+      [outBuf] "+r" (outBuf)
+    : [anaLen] "r" (anaLen)
+    : "memory", "hi", "lo"
+  );
+#else
+  __asm  __volatile(
+    ".set           push                                    \n\t"
+    ".set           noreorder                               \n\t"
+    "sra            %[iters],   %[anaLen],      2           \n\t"
+   "1:                                                      \n\t"
+    "blez           %[iters],   2f                          \n\t"
+    " nop                                                   \n\t"
+    "lh             %[r0],      0(%[window])                \n\t"
+    "lh             %[r1],      0(%[anaBuf])                \n\t"
+    "lh             %[r2],      2(%[window])                \n\t"
+    "lh             %[r3],      2(%[anaBuf])                \n\t"
+    "lh             %[r4],      4(%[window])                \n\t"
+    "lh             %[r5],      4(%[anaBuf])                \n\t"
+    "lh             %[r6],      6(%[window])                \n\t"
+    "lh             %[r7],      6(%[anaBuf])                \n\t"
+    "mul            %[r0],      %[r0],          %[r1]       \n\t"
+    "mul            %[r2],      %[r2],          %[r3]       \n\t"
+    "mul            %[r4],      %[r4],          %[r5]       \n\t"
+    "mul            %[r6],      %[r6],          %[r7]       \n\t"
+    "addiu          %[window],  %[window],      8           \n\t"
+    "addiu          %[anaBuf],  %[anaBuf],      8           \n\t"
+    "addiu          %[r0],      %[r0],          0x2000      \n\t"
+    "addiu          %[r2],      %[r2],          0x2000      \n\t"
+    "addiu          %[r4],      %[r4],          0x2000      \n\t"
+    "addiu          %[r6],      %[r6],          0x2000      \n\t"
+    "sra            %[r0],      %[r0],          14          \n\t"
+    "sra            %[r2],      %[r2],          14          \n\t"
+    "sra            %[r4],      %[r4],          14          \n\t"
+    "sra            %[r6],      %[r6],          14          \n\t"
+    "sh             %[r0],      0(%[outBuf])                \n\t"
+    "sh             %[r2],      2(%[outBuf])                \n\t"
+    "sh             %[r4],      4(%[outBuf])                \n\t"
+    "sh             %[r6],      6(%[outBuf])                \n\t"
+    "addiu          %[outBuf],  %[outBuf],      8           \n\t"
+    "b              1b                                      \n\t"
+    " addiu         %[iters],   %[iters],       -1          \n\t"
+   "2:                                                      \n\t"
+    "andi           %[after],   %[anaLen],      3           \n\t"
+   "3:                                                      \n\t"
+    "blez           %[after],   4f                          \n\t"
+    " nop                                                   \n\t"
+    "lh             %[r0],      0(%[window])                \n\t"
+    "lh             %[r1],      0(%[anaBuf])                \n\t"
+    "mul            %[r0],      %[r0],          %[r1]       \n\t"
+    "addiu          %[window],  %[window],      2           \n\t"
+    "addiu          %[anaBuf],  %[anaBuf],      2           \n\t"
+    "addiu          %[outBuf],  %[outBuf],      2           \n\t"
+    "addiu          %[r0],      %[r0],          0x2000      \n\t"
+    "sra            %[r0],      %[r0],          14          \n\t"
+    "sh             %[r0],      -2(%[outBuf])               \n\t"
+    "b              3b                                      \n\t"
+    " addiu         %[after],   %[after],       -1          \n\t"
+   "4:                                                      \n\t"
+    ".set           pop                                     \n\t"
+    : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+      [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+      [r6] "=&r" (r6), [r7] "=&r" (r7), [iters] "=&r" (iters),
+      [after] "=&r" (after), [window] "+r" (window),
+      [anaBuf] "+r" (anaBuf), [outBuf] "+r" (outBuf)
+    : [anaLen] "r" (anaLen)
+    : "memory", "hi", "lo"
+  );
+#endif
+}
+
+// For the noise supression process, synthesis, read out fully processed
+// segment, and update synthesis buffer.
+void WebRtcNsx_SynthesisUpdate_mips(NoiseSuppressionFixedC* inst,
+                                    int16_t* out_frame,
+                                    int16_t gain_factor) {
+  int iters = (int)inst->blockLen10ms >> 2;
+  int after = inst->blockLen10ms & 3;
+  int r0, r1, r2, r3, r4, r5, r6, r7;
+  int16_t *window = (int16_t*)inst->window;
+  int16_t *real = inst->real;
+  int16_t *synthBuf = inst->synthesisBuffer;
+  int16_t *out = out_frame;
+  int sat_pos = 0x7fff;
+  int sat_neg = 0xffff8000;
+  int block10 = (int)inst->blockLen10ms;
+  int anaLen = (int)inst->anaLen;
+
+  __asm __volatile(
+    ".set       push                                        \n\t"
+    ".set       noreorder                                   \n\t"
+   "1:                                                      \n\t"
+    "blez       %[iters],   2f                              \n\t"
+    " nop                                                   \n\t"
+    "lh         %[r0],      0(%[window])                    \n\t"
+    "lh         %[r1],      0(%[real])                      \n\t"
+    "lh         %[r2],      2(%[window])                    \n\t"
+    "lh         %[r3],      2(%[real])                      \n\t"
+    "lh         %[r4],      4(%[window])                    \n\t"
+    "lh         %[r5],      4(%[real])                      \n\t"
+    "lh         %[r6],      6(%[window])                    \n\t"
+    "lh         %[r7],      6(%[real])                      \n\t"
+    "mul        %[r0],      %[r0],          %[r1]           \n\t"
+    "mul        %[r2],      %[r2],          %[r3]           \n\t"
+    "mul        %[r4],      %[r4],          %[r5]           \n\t"
+    "mul        %[r6],      %[r6],          %[r7]           \n\t"
+    "addiu      %[r0],      %[r0],          0x2000          \n\t"
+    "addiu      %[r2],      %[r2],          0x2000          \n\t"
+    "addiu      %[r4],      %[r4],          0x2000          \n\t"
+    "addiu      %[r6],      %[r6],          0x2000          \n\t"
+    "sra        %[r0],      %[r0],          14              \n\t"
+    "sra        %[r2],      %[r2],          14              \n\t"
+    "sra        %[r4],      %[r4],          14              \n\t"
+    "sra        %[r6],      %[r6],          14              \n\t"
+    "mul        %[r0],      %[r0],          %[gain_factor]  \n\t"
+    "mul        %[r2],      %[r2],          %[gain_factor]  \n\t"
+    "mul        %[r4],      %[r4],          %[gain_factor]  \n\t"
+    "mul        %[r6],      %[r6],          %[gain_factor]  \n\t"
+    "addiu      %[r0],      %[r0],          0x1000          \n\t"
+    "addiu      %[r2],      %[r2],          0x1000          \n\t"
+    "addiu      %[r4],      %[r4],          0x1000          \n\t"
+    "addiu      %[r6],      %[r6],          0x1000          \n\t"
+    "sra        %[r0],      %[r0],          13              \n\t"
+    "sra        %[r2],      %[r2],          13              \n\t"
+    "sra        %[r4],      %[r4],          13              \n\t"
+    "sra        %[r6],      %[r6],          13              \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "slt        %[r3],      %[r2],          %[sat_pos]      \n\t"
+    "slt        %[r5],      %[r4],          %[sat_pos]      \n\t"
+    "slt        %[r7],      %[r6],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "movz       %[r2],      %[sat_pos],     %[r3]           \n\t"
+    "movz       %[r4],      %[sat_pos],     %[r5]           \n\t"
+    "movz       %[r6],      %[sat_pos],     %[r7]           \n\t"
+    "lh         %[r1],      0(%[synthBuf])                  \n\t"
+    "lh         %[r3],      2(%[synthBuf])                  \n\t"
+    "lh         %[r5],      4(%[synthBuf])                  \n\t"
+    "lh         %[r7],      6(%[synthBuf])                  \n\t"
+    "addu       %[r0],      %[r0],          %[r1]           \n\t"
+    "addu       %[r2],      %[r2],          %[r3]           \n\t"
+    "addu       %[r4],      %[r4],          %[r5]           \n\t"
+    "addu       %[r6],      %[r6],          %[r7]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "slt        %[r3],      %[r2],          %[sat_pos]      \n\t"
+    "slt        %[r5],      %[r4],          %[sat_pos]      \n\t"
+    "slt        %[r7],      %[r6],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "movz       %[r2],      %[sat_pos],     %[r3]           \n\t"
+    "movz       %[r4],      %[sat_pos],     %[r5]           \n\t"
+    "movz       %[r6],      %[sat_pos],     %[r7]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_neg]      \n\t"
+    "slt        %[r3],      %[r2],          %[sat_neg]      \n\t"
+    "slt        %[r5],      %[r4],          %[sat_neg]      \n\t"
+    "slt        %[r7],      %[r6],          %[sat_neg]      \n\t"
+    "movn       %[r0],      %[sat_neg],     %[r1]           \n\t"
+    "movn       %[r2],      %[sat_neg],     %[r3]           \n\t"
+    "movn       %[r4],      %[sat_neg],     %[r5]           \n\t"
+    "movn       %[r6],      %[sat_neg],     %[r7]           \n\t"
+    "sh         %[r0],      0(%[synthBuf])                  \n\t"
+    "sh         %[r2],      2(%[synthBuf])                  \n\t"
+    "sh         %[r4],      4(%[synthBuf])                  \n\t"
+    "sh         %[r6],      6(%[synthBuf])                  \n\t"
+    "sh         %[r0],      0(%[out])                       \n\t"
+    "sh         %[r2],      2(%[out])                       \n\t"
+    "sh         %[r4],      4(%[out])                       \n\t"
+    "sh         %[r6],      6(%[out])                       \n\t"
+    "addiu      %[window],  %[window],      8               \n\t"
+    "addiu      %[real],    %[real],        8               \n\t"
+    "addiu      %[synthBuf],%[synthBuf],    8               \n\t"
+    "addiu      %[out],     %[out],         8               \n\t"
+    "b          1b                                          \n\t"
+    " addiu     %[iters],   %[iters],       -1              \n\t"
+   "2:                                                      \n\t"
+    "blez       %[after],   3f                              \n\t"
+    " subu      %[block10], %[anaLen],      %[block10]      \n\t"
+    "lh         %[r0],      0(%[window])                    \n\t"
+    "lh         %[r1],      0(%[real])                      \n\t"
+    "mul        %[r0],      %[r0],          %[r1]           \n\t"
+    "addiu      %[window],  %[window],      2               \n\t"
+    "addiu      %[real],    %[real],        2               \n\t"
+    "addiu      %[r0],      %[r0],          0x2000          \n\t"
+    "sra        %[r0],      %[r0],          14              \n\t"
+    "mul        %[r0],      %[r0],          %[gain_factor]  \n\t"
+    "addiu      %[r0],      %[r0],          0x1000          \n\t"
+    "sra        %[r0],      %[r0],          13              \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "lh         %[r1],      0(%[synthBuf])                  \n\t"
+    "addu       %[r0],      %[r0],          %[r1]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_neg]      \n\t"
+    "movn       %[r0],      %[sat_neg],     %[r1]           \n\t"
+    "sh         %[r0],      0(%[synthBuf])                  \n\t"
+    "sh         %[r0],      0(%[out])                       \n\t"
+    "addiu      %[synthBuf],%[synthBuf],    2               \n\t"
+    "addiu      %[out],     %[out],         2               \n\t"
+    "b          2b                                          \n\t"
+    " addiu     %[after],   %[after],       -1              \n\t"
+   "3:                                                      \n\t"
+    "sra        %[iters],   %[block10],     2               \n\t"
+   "4:                                                      \n\t"
+    "blez       %[iters],   5f                              \n\t"
+    " andi      %[after],   %[block10],     3               \n\t"
+    "lh         %[r0],      0(%[window])                    \n\t"
+    "lh         %[r1],      0(%[real])                      \n\t"
+    "lh         %[r2],      2(%[window])                    \n\t"
+    "lh         %[r3],      2(%[real])                      \n\t"
+    "lh         %[r4],      4(%[window])                    \n\t"
+    "lh         %[r5],      4(%[real])                      \n\t"
+    "lh         %[r6],      6(%[window])                    \n\t"
+    "lh         %[r7],      6(%[real])                      \n\t"
+    "mul        %[r0],      %[r0],          %[r1]           \n\t"
+    "mul        %[r2],      %[r2],          %[r3]           \n\t"
+    "mul        %[r4],      %[r4],          %[r5]           \n\t"
+    "mul        %[r6],      %[r6],          %[r7]           \n\t"
+    "addiu      %[r0],      %[r0],          0x2000          \n\t"
+    "addiu      %[r2],      %[r2],          0x2000          \n\t"
+    "addiu      %[r4],      %[r4],          0x2000          \n\t"
+    "addiu      %[r6],      %[r6],          0x2000          \n\t"
+    "sra        %[r0],      %[r0],          14              \n\t"
+    "sra        %[r2],      %[r2],          14              \n\t"
+    "sra        %[r4],      %[r4],          14              \n\t"
+    "sra        %[r6],      %[r6],          14              \n\t"
+    "mul        %[r0],      %[r0],          %[gain_factor]  \n\t"
+    "mul        %[r2],      %[r2],          %[gain_factor]  \n\t"
+    "mul        %[r4],      %[r4],          %[gain_factor]  \n\t"
+    "mul        %[r6],      %[r6],          %[gain_factor]  \n\t"
+    "addiu      %[r0],      %[r0],          0x1000          \n\t"
+    "addiu      %[r2],      %[r2],          0x1000          \n\t"
+    "addiu      %[r4],      %[r4],          0x1000          \n\t"
+    "addiu      %[r6],      %[r6],          0x1000          \n\t"
+    "sra        %[r0],      %[r0],          13              \n\t"
+    "sra        %[r2],      %[r2],          13              \n\t"
+    "sra        %[r4],      %[r4],          13              \n\t"
+    "sra        %[r6],      %[r6],          13              \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "slt        %[r3],      %[r2],          %[sat_pos]      \n\t"
+    "slt        %[r5],      %[r4],          %[sat_pos]      \n\t"
+    "slt        %[r7],      %[r6],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "movz       %[r2],      %[sat_pos],     %[r3]           \n\t"
+    "movz       %[r4],      %[sat_pos],     %[r5]           \n\t"
+    "movz       %[r6],      %[sat_pos],     %[r7]           \n\t"
+    "lh         %[r1],      0(%[synthBuf])                  \n\t"
+    "lh         %[r3],      2(%[synthBuf])                  \n\t"
+    "lh         %[r5],      4(%[synthBuf])                  \n\t"
+    "lh         %[r7],      6(%[synthBuf])                  \n\t"
+    "addu       %[r0],      %[r0],          %[r1]           \n\t"
+    "addu       %[r2],      %[r2],          %[r3]           \n\t"
+    "addu       %[r4],      %[r4],          %[r5]           \n\t"
+    "addu       %[r6],      %[r6],          %[r7]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "slt        %[r3],      %[r2],          %[sat_pos]      \n\t"
+    "slt        %[r5],      %[r4],          %[sat_pos]      \n\t"
+    "slt        %[r7],      %[r6],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "movz       %[r2],      %[sat_pos],     %[r3]           \n\t"
+    "movz       %[r4],      %[sat_pos],     %[r5]           \n\t"
+    "movz       %[r6],      %[sat_pos],     %[r7]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_neg]      \n\t"
+    "slt        %[r3],      %[r2],          %[sat_neg]      \n\t"
+    "slt        %[r5],      %[r4],          %[sat_neg]      \n\t"
+    "slt        %[r7],      %[r6],          %[sat_neg]      \n\t"
+    "movn       %[r0],      %[sat_neg],     %[r1]           \n\t"
+    "movn       %[r2],      %[sat_neg],     %[r3]           \n\t"
+    "movn       %[r4],      %[sat_neg],     %[r5]           \n\t"
+    "movn       %[r6],      %[sat_neg],     %[r7]           \n\t"
+    "sh         %[r0],      0(%[synthBuf])                  \n\t"
+    "sh         %[r2],      2(%[synthBuf])                  \n\t"
+    "sh         %[r4],      4(%[synthBuf])                  \n\t"
+    "sh         %[r6],      6(%[synthBuf])                  \n\t"
+    "addiu      %[window],  %[window],      8               \n\t"
+    "addiu      %[real],    %[real],        8               \n\t"
+    "addiu      %[synthBuf],%[synthBuf],    8               \n\t"
+    "b          4b                                          \n\t"
+    " addiu     %[iters],   %[iters],       -1              \n\t"
+   "5:                                                      \n\t"
+    "blez       %[after],   6f                              \n\t"
+    " nop                                                   \n\t"
+    "lh         %[r0],      0(%[window])                    \n\t"
+    "lh         %[r1],      0(%[real])                      \n\t"
+    "mul        %[r0],      %[r0],          %[r1]           \n\t"
+    "addiu      %[window],  %[window],      2               \n\t"
+    "addiu      %[real],    %[real],        2               \n\t"
+    "addiu      %[r0],      %[r0],          0x2000          \n\t"
+    "sra        %[r0],      %[r0],          14              \n\t"
+    "mul        %[r0],      %[r0],          %[gain_factor]  \n\t"
+    "addiu      %[r0],      %[r0],          0x1000          \n\t"
+    "sra        %[r0],      %[r0],          13              \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "lh         %[r1],      0(%[synthBuf])                  \n\t"
+    "addu       %[r0],      %[r0],          %[r1]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_pos]      \n\t"
+    "movz       %[r0],      %[sat_pos],     %[r1]           \n\t"
+    "slt        %[r1],      %[r0],          %[sat_neg]      \n\t"
+    "movn       %[r0],      %[sat_neg],     %[r1]           \n\t"
+    "sh         %[r0],      0(%[synthBuf])                  \n\t"
+    "addiu      %[synthBuf],%[synthBuf],    2               \n\t"
+    "b          2b                                          \n\t"
+    " addiu     %[after],   %[after],       -1              \n\t"
+   "6:                                                      \n\t"
+    ".set       pop                                         \n\t"
+    : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+      [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+      [r6] "=&r" (r6), [r7] "=&r" (r7), [iters] "+r" (iters),
+      [after] "+r" (after), [block10] "+r" (block10),
+      [window] "+r" (window), [real] "+r" (real),
+      [synthBuf] "+r" (synthBuf), [out] "+r" (out)
+    : [gain_factor] "r" (gain_factor), [sat_pos] "r" (sat_pos),
+      [sat_neg] "r" (sat_neg), [anaLen] "r" (anaLen)
+    : "memory", "hi", "lo"
+  );
+
+  // update synthesis buffer
+  memcpy(inst->synthesisBuffer, inst->synthesisBuffer + inst->blockLen10ms,
+      (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->synthesisBuffer));
+  WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer
+      + inst->anaLen - inst->blockLen10ms, inst->blockLen10ms);
+}
+
+// Filter the data in the frequency domain, and create spectrum.
+void WebRtcNsx_PrepareSpectrum_mips(NoiseSuppressionFixedC* inst,
+                                    int16_t* freq_buf) {
+  uint16_t *noiseSupFilter = inst->noiseSupFilter;
+  int16_t *real = inst->real;
+  int16_t *imag = inst->imag;
+  int32_t loop_count = 2;
+  int16_t tmp_1, tmp_2, tmp_3, tmp_4, tmp_5, tmp_6;
+  int16_t tmp16 = (int16_t)(inst->anaLen << 1) - 4;
+  int16_t* freq_buf_f = freq_buf;
+  int16_t* freq_buf_s = &freq_buf[tmp16];
+
+  __asm __volatile (
+    ".set       push                                                 \n\t"
+    ".set       noreorder                                            \n\t"
+    //first sample
+    "lh         %[tmp_1],           0(%[noiseSupFilter])             \n\t"
+    "lh         %[tmp_2],           0(%[real])                       \n\t"
+    "lh         %[tmp_3],           0(%[imag])                       \n\t"
+    "mul        %[tmp_2],           %[tmp_2],             %[tmp_1]   \n\t"
+    "mul        %[tmp_3],           %[tmp_3],             %[tmp_1]   \n\t"
+    "sra        %[tmp_2],           %[tmp_2],             14         \n\t"
+    "sra        %[tmp_3],           %[tmp_3],             14         \n\t"
+    "sh         %[tmp_2],           0(%[real])                       \n\t"
+    "sh         %[tmp_3],           0(%[imag])                       \n\t"
+    "negu       %[tmp_3],           %[tmp_3]                         \n\t"
+    "sh         %[tmp_2],           0(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_3],           2(%[freq_buf_f])                 \n\t"
+    "addiu      %[real],            %[real],              2          \n\t"
+    "addiu      %[imag],            %[imag],              2          \n\t"
+    "addiu      %[noiseSupFilter],  %[noiseSupFilter],    2          \n\t"
+    "addiu      %[freq_buf_f],      %[freq_buf_f],        4          \n\t"
+   "1:                                                               \n\t"
+    "lh         %[tmp_1],           0(%[noiseSupFilter])             \n\t"
+    "lh         %[tmp_2],           0(%[real])                       \n\t"
+    "lh         %[tmp_3],           0(%[imag])                       \n\t"
+    "lh         %[tmp_4],           2(%[noiseSupFilter])             \n\t"
+    "lh         %[tmp_5],           2(%[real])                       \n\t"
+    "lh         %[tmp_6],           2(%[imag])                       \n\t"
+    "mul        %[tmp_2],           %[tmp_2],             %[tmp_1]   \n\t"
+    "mul        %[tmp_3],           %[tmp_3],             %[tmp_1]   \n\t"
+    "mul        %[tmp_5],           %[tmp_5],             %[tmp_4]   \n\t"
+    "mul        %[tmp_6],           %[tmp_6],             %[tmp_4]   \n\t"
+    "addiu      %[loop_count],      %[loop_count],        2          \n\t"
+    "sra        %[tmp_2],           %[tmp_2],             14         \n\t"
+    "sra        %[tmp_3],           %[tmp_3],             14         \n\t"
+    "sra        %[tmp_5],           %[tmp_5],             14         \n\t"
+    "sra        %[tmp_6],           %[tmp_6],             14         \n\t"
+    "addiu      %[noiseSupFilter],  %[noiseSupFilter],    4          \n\t"
+    "sh         %[tmp_2],           0(%[real])                       \n\t"
+    "sh         %[tmp_2],           4(%[freq_buf_s])                 \n\t"
+    "sh         %[tmp_3],           0(%[imag])                       \n\t"
+    "sh         %[tmp_3],           6(%[freq_buf_s])                 \n\t"
+    "negu       %[tmp_3],           %[tmp_3]                         \n\t"
+    "sh         %[tmp_5],           2(%[real])                       \n\t"
+    "sh         %[tmp_5],           0(%[freq_buf_s])                 \n\t"
+    "sh         %[tmp_6],           2(%[imag])                       \n\t"
+    "sh         %[tmp_6],           2(%[freq_buf_s])                 \n\t"
+    "negu       %[tmp_6],           %[tmp_6]                         \n\t"
+    "addiu      %[freq_buf_s],      %[freq_buf_s],        -8         \n\t"
+    "addiu      %[real],            %[real],              4          \n\t"
+    "addiu      %[imag],            %[imag],              4          \n\t"
+    "sh         %[tmp_2],           0(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_3],           2(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_5],           4(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_6],           6(%[freq_buf_f])                 \n\t"
+    "blt        %[loop_count],      %[loop_size],         1b         \n\t"
+    " addiu     %[freq_buf_f],      %[freq_buf_f],        8          \n\t"
+    //last two samples:
+    "lh         %[tmp_1],           0(%[noiseSupFilter])             \n\t"
+    "lh         %[tmp_2],           0(%[real])                       \n\t"
+    "lh         %[tmp_3],           0(%[imag])                       \n\t"
+    "lh         %[tmp_4],           2(%[noiseSupFilter])             \n\t"
+    "lh         %[tmp_5],           2(%[real])                       \n\t"
+    "lh         %[tmp_6],           2(%[imag])                       \n\t"
+    "mul        %[tmp_2],           %[tmp_2],             %[tmp_1]   \n\t"
+    "mul        %[tmp_3],           %[tmp_3],             %[tmp_1]   \n\t"
+    "mul        %[tmp_5],           %[tmp_5],             %[tmp_4]   \n\t"
+    "mul        %[tmp_6],           %[tmp_6],             %[tmp_4]   \n\t"
+    "sra        %[tmp_2],           %[tmp_2],             14         \n\t"
+    "sra        %[tmp_3],           %[tmp_3],             14         \n\t"
+    "sra        %[tmp_5],           %[tmp_5],             14         \n\t"
+    "sra        %[tmp_6],           %[tmp_6],             14         \n\t"
+    "sh         %[tmp_2],           0(%[real])                       \n\t"
+    "sh         %[tmp_2],           4(%[freq_buf_s])                 \n\t"
+    "sh         %[tmp_3],           0(%[imag])                       \n\t"
+    "sh         %[tmp_3],           6(%[freq_buf_s])                 \n\t"
+    "negu       %[tmp_3],           %[tmp_3]                         \n\t"
+    "sh         %[tmp_2],           0(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_3],           2(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_5],           4(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_6],           6(%[freq_buf_f])                 \n\t"
+    "sh         %[tmp_5],           2(%[real])                       \n\t"
+    "sh         %[tmp_6],           2(%[imag])                       \n\t"
+    ".set       pop                                                  \n\t"
+    : [real] "+r" (real), [imag] "+r" (imag),
+      [freq_buf_f] "+r" (freq_buf_f), [freq_buf_s] "+r" (freq_buf_s),
+      [loop_count] "+r" (loop_count), [noiseSupFilter] "+r" (noiseSupFilter),
+      [tmp_1] "=&r" (tmp_1), [tmp_2] "=&r" (tmp_2), [tmp_3] "=&r" (tmp_3),
+      [tmp_4] "=&r" (tmp_4), [tmp_5] "=&r" (tmp_5), [tmp_6] "=&r" (tmp_6)
+    : [loop_size] "r" (inst->anaLen2)
+    : "memory", "hi", "lo"
+  );
+}
+
+#if defined(MIPS_DSP_R1_LE)
+// Denormalize the real-valued signal |in|, the output from inverse FFT.
+void WebRtcNsx_Denormalize_mips(NoiseSuppressionFixedC* inst,
+                                int16_t* in,
+                                int factor) {
+  int32_t r0, r1, r2, r3, t0;
+  int len = (int)inst->anaLen;
+  int16_t *out = &inst->real[0];
+  int shift = factor - inst->normData;
+
+  __asm __volatile (
+    ".set          push                                \n\t"
+    ".set          noreorder                           \n\t"
+    "beqz          %[len],     8f                      \n\t"
+    " nop                                              \n\t"
+    "bltz          %[shift],   4f                      \n\t"
+    " sra          %[t0],      %[len],      2          \n\t"
+    "beqz          %[t0],      2f                      \n\t"
+    " andi         %[len],     %[len],      3          \n\t"
+   "1:                                                 \n\t"
+    "lh            %[r0],      0(%[in])                \n\t"
+    "lh            %[r1],      2(%[in])                \n\t"
+    "lh            %[r2],      4(%[in])                \n\t"
+    "lh            %[r3],      6(%[in])                \n\t"
+    "shllv_s.ph    %[r0],      %[r0],       %[shift]   \n\t"
+    "shllv_s.ph    %[r1],      %[r1],       %[shift]   \n\t"
+    "shllv_s.ph    %[r2],      %[r2],       %[shift]   \n\t"
+    "shllv_s.ph    %[r3],      %[r3],       %[shift]   \n\t"
+    "addiu         %[in],      %[in],       8          \n\t"
+    "addiu         %[t0],      %[t0],       -1         \n\t"
+    "sh            %[r0],      0(%[out])               \n\t"
+    "sh            %[r1],      2(%[out])               \n\t"
+    "sh            %[r2],      4(%[out])               \n\t"
+    "sh            %[r3],      6(%[out])               \n\t"
+    "bgtz          %[t0],      1b                      \n\t"
+    " addiu        %[out],     %[out],      8          \n\t"
+   "2:                                                 \n\t"
+    "beqz          %[len],     8f                      \n\t"
+    " nop                                              \n\t"
+   "3:                                                 \n\t"
+    "lh            %[r0],      0(%[in])                \n\t"
+    "addiu         %[in],      %[in],       2          \n\t"
+    "addiu         %[len],     %[len],      -1         \n\t"
+    "shllv_s.ph    %[r0],      %[r0],       %[shift]   \n\t"
+    "addiu         %[out],     %[out],      2          \n\t"
+    "bgtz          %[len],     3b                      \n\t"
+    " sh           %[r0],      -2(%[out])              \n\t"
+    "b             8f                                  \n\t"
+   "4:                                                 \n\t"
+    "negu          %[shift],   %[shift]                \n\t"
+    "beqz          %[t0],      6f                      \n\t"
+    " andi         %[len],     %[len],      3          \n\t"
+   "5:                                                 \n\t"
+    "lh            %[r0],      0(%[in])                \n\t"
+    "lh            %[r1],      2(%[in])                \n\t"
+    "lh            %[r2],      4(%[in])                \n\t"
+    "lh            %[r3],      6(%[in])                \n\t"
+    "srav          %[r0],      %[r0],       %[shift]   \n\t"
+    "srav          %[r1],      %[r1],       %[shift]   \n\t"
+    "srav          %[r2],      %[r2],       %[shift]   \n\t"
+    "srav          %[r3],      %[r3],       %[shift]   \n\t"
+    "addiu         %[in],      %[in],       8          \n\t"
+    "addiu         %[t0],      %[t0],       -1         \n\t"
+    "sh            %[r0],      0(%[out])               \n\t"
+    "sh            %[r1],      2(%[out])               \n\t"
+    "sh            %[r2],      4(%[out])               \n\t"
+    "sh            %[r3],      6(%[out])               \n\t"
+    "bgtz          %[t0],      5b                      \n\t"
+    " addiu        %[out],     %[out],      8          \n\t"
+   "6:                                                 \n\t"
+    "beqz          %[len],     8f                      \n\t"
+    " nop                                              \n\t"
+   "7:                                                 \n\t"
+    "lh            %[r0],      0(%[in])                \n\t"
+    "addiu         %[in],      %[in],       2          \n\t"
+    "addiu         %[len],     %[len],      -1         \n\t"
+    "srav          %[r0],      %[r0],       %[shift]   \n\t"
+    "addiu         %[out],     %[out],      2          \n\t"
+    "bgtz          %[len],     7b                      \n\t"
+    " sh           %[r0],      -2(%[out])              \n\t"
+   "8:                                                 \n\t"
+    ".set          pop                                 \n\t"
+    : [t0] "=&r" (t0), [r0] "=&r" (r0), [r1] "=&r" (r1),
+      [r2] "=&r" (r2), [r3] "=&r" (r3)
+    : [len] "r" (len), [shift] "r" (shift), [in] "r" (in),
+      [out] "r" (out)
+    : "memory"
+  );
+}
+#endif
+
+// Normalize the real-valued signal |in|, the input to forward FFT.
+void WebRtcNsx_NormalizeRealBuffer_mips(NoiseSuppressionFixedC* inst,
+                                        const int16_t* in,
+                                        int16_t* out) {
+  int32_t r0, r1, r2, r3, t0;
+  int len = (int)inst->anaLen;
+  int shift = inst->normData;
+
+  __asm __volatile (
+    ".set          push                                \n\t"
+    ".set          noreorder                           \n\t"
+    "beqz          %[len],     4f                      \n\t"
+    " sra          %[t0],      %[len],      2          \n\t"
+    "beqz          %[t0],      2f                      \n\t"
+    " andi         %[len],     %[len],      3          \n\t"
+   "1:                                                 \n\t"
+    "lh            %[r0],      0(%[in])                \n\t"
+    "lh            %[r1],      2(%[in])                \n\t"
+    "lh            %[r2],      4(%[in])                \n\t"
+    "lh            %[r3],      6(%[in])                \n\t"
+    "sllv          %[r0],      %[r0],       %[shift]   \n\t"
+    "sllv          %[r1],      %[r1],       %[shift]   \n\t"
+    "sllv          %[r2],      %[r2],       %[shift]   \n\t"
+    "sllv          %[r3],      %[r3],       %[shift]   \n\t"
+    "addiu         %[in],      %[in],       8          \n\t"
+    "addiu         %[t0],      %[t0],       -1         \n\t"
+    "sh            %[r0],      0(%[out])               \n\t"
+    "sh            %[r1],      2(%[out])               \n\t"
+    "sh            %[r2],      4(%[out])               \n\t"
+    "sh            %[r3],      6(%[out])               \n\t"
+    "bgtz          %[t0],      1b                      \n\t"
+    " addiu        %[out],     %[out],      8          \n\t"
+   "2:                                                 \n\t"
+    "beqz          %[len],     4f                      \n\t"
+    " nop                                              \n\t"
+   "3:                                                 \n\t"
+    "lh            %[r0],      0(%[in])                \n\t"
+    "addiu         %[in],      %[in],       2          \n\t"
+    "addiu         %[len],     %[len],      -1         \n\t"
+    "sllv          %[r0],      %[r0],       %[shift]   \n\t"
+    "addiu         %[out],     %[out],      2          \n\t"
+    "bgtz          %[len],     3b                      \n\t"
+    " sh           %[r0],      -2(%[out])              \n\t"
+   "4:                                                 \n\t"
+    ".set          pop                                 \n\t"
+    : [t0] "=&r" (t0), [r0] "=&r" (r0), [r1] "=&r" (r1),
+      [r2] "=&r" (r2), [r3] "=&r" (r3)
+    : [len] "r" (len), [shift] "r" (shift), [in] "r" (in),
+      [out] "r" (out)
+    : "memory"
+  );
+}
+
diff --git a/modules/audio_processing/ns/nsx_core_neon.c b/modules/audio_processing/ns/nsx_core_neon.c
new file mode 100644
index 0000000..64ce99c
--- /dev/null
+++ b/modules/audio_processing/ns/nsx_core_neon.c
@@ -0,0 +1,606 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/ns/nsx_core.h"
+
+#include <arm_neon.h>
+
+#include "rtc_base/checks.h"
+
+// Constants to compensate for shifting signal log(2^shifts).
+const int16_t WebRtcNsx_kLogTable[9] = {
+  0, 177, 355, 532, 710, 887, 1065, 1242, 1420
+};
+
+const int16_t WebRtcNsx_kCounterDiv[201] = {
+  32767, 16384, 10923, 8192, 6554, 5461, 4681, 4096, 3641, 3277, 2979, 2731,
+  2521, 2341, 2185, 2048, 1928, 1820, 1725, 1638, 1560, 1489, 1425, 1365, 1311,
+  1260, 1214, 1170, 1130, 1092, 1057, 1024, 993, 964, 936, 910, 886, 862, 840,
+  819, 799, 780, 762, 745, 728, 712, 697, 683, 669, 655, 643, 630, 618, 607,
+  596, 585, 575, 565, 555, 546, 537, 529, 520, 512, 504, 496, 489, 482, 475,
+  468, 462, 455, 449, 443, 437, 431, 426, 420, 415, 410, 405, 400, 395, 390,
+  386, 381, 377, 372, 368, 364, 360, 356, 352, 349, 345, 341, 338, 334, 331,
+  328, 324, 321, 318, 315, 312, 309, 306, 303, 301, 298, 295, 293, 290, 287,
+  285, 282, 280, 278, 275, 273, 271, 269, 266, 264, 262, 260, 258, 256, 254,
+  252, 250, 248, 246, 245, 243, 241, 239, 237, 236, 234, 232, 231, 229, 228,
+  226, 224, 223, 221, 220, 218, 217, 216, 214, 213, 211, 210, 209, 207, 206,
+  205, 204, 202, 201, 200, 199, 197, 196, 195, 194, 193, 192, 191, 189, 188,
+  187, 186, 185, 184, 183, 182, 181, 180, 179, 178, 177, 176, 175, 174, 173,
+  172, 172, 171, 170, 169, 168, 167, 166, 165, 165, 164, 163
+};
+
+const int16_t WebRtcNsx_kLogTableFrac[256] = {
+  0, 1, 3, 4, 6, 7, 9, 10, 11, 13, 14, 16, 17, 18, 20, 21,
+  22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42,
+  44, 45, 46, 47, 49, 50, 51, 52, 54, 55, 56, 57, 59, 60, 61, 62,
+  63, 65, 66, 67, 68, 69, 71, 72, 73, 74, 75, 77, 78, 79, 80, 81,
+  82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,
+  100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116,
+  117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+  132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+  147, 148, 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160,
+  161, 162, 163, 164, 165, 166, 167, 168, 169, 169, 170, 171, 172, 173, 174,
+  175, 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185, 185, 186, 187,
+  188, 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198, 198, 199, 200,
+  201, 202, 203, 203, 204, 205, 206, 207, 208, 208, 209, 210, 211, 212, 212,
+  213, 214, 215, 216, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224,
+  225, 226, 227, 228, 228, 229, 230, 231, 231, 232, 233, 234, 234, 235, 236,
+  237, 238, 238, 239, 240, 241, 241, 242, 243, 244, 244, 245, 246, 247, 247,
+  248, 249, 249, 250, 251, 252, 252, 253, 254, 255, 255
+};
+
+// Update the noise estimation information.
+static void UpdateNoiseEstimateNeon(NoiseSuppressionFixedC* inst, int offset) {
+  const int16_t kExp2Const = 11819; // Q13
+  int16_t* ptr_noiseEstLogQuantile = NULL;
+  int16_t* ptr_noiseEstQuantile = NULL;
+  int16x4_t kExp2Const16x4 = vdup_n_s16(kExp2Const);
+  int32x4_t twentyOne32x4 = vdupq_n_s32(21);
+  int32x4_t constA32x4 = vdupq_n_s32(0x1fffff);
+  int32x4_t constB32x4 = vdupq_n_s32(0x200000);
+
+  int16_t tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
+                                        inst->magnLen);
+
+  // Guarantee a Q-domain as high as possible and still fit in int16
+  inst->qNoise = 14 - (int) WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(kExp2Const,
+                                                                 tmp16,
+                                                                 21);
+
+  int32x4_t qNoise32x4 = vdupq_n_s32(inst->qNoise);
+
+  for (ptr_noiseEstLogQuantile = &inst->noiseEstLogQuantile[offset],
+       ptr_noiseEstQuantile = &inst->noiseEstQuantile[0];
+       ptr_noiseEstQuantile < &inst->noiseEstQuantile[inst->magnLen - 3];
+       ptr_noiseEstQuantile += 4, ptr_noiseEstLogQuantile += 4) {
+
+    // tmp32no2 = kExp2Const * inst->noiseEstLogQuantile[offset + i];
+    int16x4_t v16x4 = vld1_s16(ptr_noiseEstLogQuantile);
+    int32x4_t v32x4B = vmull_s16(v16x4, kExp2Const16x4);
+
+    // tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
+    int32x4_t v32x4A = vandq_s32(v32x4B, constA32x4);
+    v32x4A = vorrq_s32(v32x4A, constB32x4);
+
+    // tmp16 = (int16_t)(tmp32no2 >> 21);
+    v32x4B = vshrq_n_s32(v32x4B, 21);
+
+    // tmp16 -= 21;// shift 21 to get result in Q0
+    v32x4B = vsubq_s32(v32x4B, twentyOne32x4);
+
+    // tmp16 += (int16_t) inst->qNoise;
+    // shift to get result in Q(qNoise)
+    v32x4B = vaddq_s32(v32x4B, qNoise32x4);
+
+    // if (tmp16 < 0) {
+    //   tmp32no1 >>= -tmp16;
+    // } else {
+    //   tmp32no1 <<= tmp16;
+    // }
+    v32x4B = vshlq_s32(v32x4A, v32x4B);
+
+    // tmp16 = WebRtcSpl_SatW32ToW16(tmp32no1);
+    v16x4 = vqmovn_s32(v32x4B);
+
+    //inst->noiseEstQuantile[i] = tmp16;
+    vst1_s16(ptr_noiseEstQuantile, v16x4);
+  }
+
+  // Last iteration:
+
+  // inst->quantile[i]=exp(inst->lquantile[offset+i]);
+  // in Q21
+  int32_t tmp32no2 = kExp2Const * *ptr_noiseEstLogQuantile;
+  int32_t tmp32no1 = (0x00200000 | (tmp32no2 & 0x001FFFFF)); // 2^21 + frac
+
+  tmp16 = (int16_t)(tmp32no2 >> 21);
+  tmp16 -= 21;// shift 21 to get result in Q0
+  tmp16 += (int16_t) inst->qNoise; //shift to get result in Q(qNoise)
+  if (tmp16 < 0) {
+    tmp32no1 >>= -tmp16;
+  } else {
+    tmp32no1 <<= tmp16;
+  }
+  *ptr_noiseEstQuantile = WebRtcSpl_SatW32ToW16(tmp32no1);
+}
+
+// Noise Estimation
+void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
+                                   uint16_t* magn,
+                                   uint32_t* noise,
+                                   int16_t* q_noise) {
+  int16_t lmagn[HALF_ANAL_BLOCKL], counter, countDiv;
+  int16_t countProd, delta, zeros, frac;
+  int16_t log2, tabind, logval, tmp16, tmp16no1, tmp16no2;
+  const int16_t log2_const = 22713;
+  const int16_t width_factor = 21845;
+
+  size_t i, s, offset;
+
+  tabind = inst->stages - inst->normData;
+  RTC_DCHECK_LT(tabind, 9);
+  RTC_DCHECK_GT(tabind, -9);
+  if (tabind < 0) {
+    logval = -WebRtcNsx_kLogTable[-tabind];
+  } else {
+    logval = WebRtcNsx_kLogTable[tabind];
+  }
+
+  int16x8_t logval_16x8 = vdupq_n_s16(logval);
+
+  // lmagn(i)=log(magn(i))=log(2)*log2(magn(i))
+  // magn is in Q(-stages), and the real lmagn values are:
+  // real_lmagn(i)=log(magn(i)*2^stages)=log(magn(i))+log(2^stages)
+  // lmagn in Q8
+  for (i = 0; i < inst->magnLen; i++) {
+    if (magn[i]) {
+      zeros = WebRtcSpl_NormU32((uint32_t)magn[i]);
+      frac = (int16_t)((((uint32_t)magn[i] << zeros)
+                        & 0x7FFFFFFF) >> 23);
+      RTC_DCHECK_LT(frac, 256);
+      // log2(magn(i))
+      log2 = (int16_t)(((31 - zeros) << 8)
+                       + WebRtcNsx_kLogTableFrac[frac]);
+      // log2(magn(i))*log(2)
+      lmagn[i] = (int16_t)((log2 * log2_const) >> 15);
+      // + log(2^stages)
+      lmagn[i] += logval;
+    } else {
+      lmagn[i] = logval;
+    }
+  }
+
+  int16x4_t Q3_16x4  = vdup_n_s16(3);
+  int16x8_t WIDTHQ8_16x8 = vdupq_n_s16(WIDTH_Q8);
+  int16x8_t WIDTHFACTOR_16x8 = vdupq_n_s16(width_factor);
+
+  int16_t factor = FACTOR_Q7;
+  if (inst->blockIndex < END_STARTUP_LONG)
+    factor = FACTOR_Q7_STARTUP;
+
+  // Loop over simultaneous estimates
+  for (s = 0; s < SIMULT; s++) {
+    offset = s * inst->magnLen;
+
+    // Get counter values from state
+    counter = inst->noiseEstCounter[s];
+    RTC_DCHECK_LT(counter, 201);
+    countDiv = WebRtcNsx_kCounterDiv[counter];
+    countProd = (int16_t)(counter * countDiv);
+
+    // quant_est(...)
+    int16_t deltaBuff[8];
+    int16x4_t tmp16x4_0;
+    int16x4_t tmp16x4_1;
+    int16x4_t countDiv_16x4 = vdup_n_s16(countDiv);
+    int16x8_t countProd_16x8 = vdupq_n_s16(countProd);
+    int16x8_t tmp16x8_0 = vdupq_n_s16(countDiv);
+    int16x8_t prod16x8 = vqrdmulhq_s16(WIDTHFACTOR_16x8, tmp16x8_0);
+    int16x8_t tmp16x8_1;
+    int16x8_t tmp16x8_2;
+    int16x8_t tmp16x8_3;
+    uint16x8_t tmp16x8_4;
+    int32x4_t tmp32x4;
+
+    for (i = 0; i + 7 < inst->magnLen; i += 8) {
+      // Compute delta.
+      // Smaller step size during startup. This prevents from using
+      // unrealistic values causing overflow.
+      tmp16x8_0 = vdupq_n_s16(factor);
+      vst1q_s16(deltaBuff, tmp16x8_0);
+
+      int j;
+      for (j = 0; j < 8; j++) {
+        if (inst->noiseEstDensity[offset + i + j] > 512) {
+          // Get values for deltaBuff by shifting intead of dividing.
+          int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i + j]);
+          deltaBuff[j] = (int16_t)(FACTOR_Q16 >> (14 - factor));
+        }
+      }
+
+      // Update log quantile estimate
+
+      // tmp16 = (int16_t)((delta * countDiv) >> 14);
+      tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[0]), countDiv_16x4);
+      tmp16x4_1 = vshrn_n_s32(tmp32x4, 14);
+      tmp32x4 = vmull_s16(vld1_s16(&deltaBuff[4]), countDiv_16x4);
+      tmp16x4_0 = vshrn_n_s32(tmp32x4, 14);
+      tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // Keep for several lines.
+
+      // prepare for the "if" branch
+      // tmp16 += 2;
+      // tmp16_1 = (Word16)(tmp16>>2);
+      tmp16x8_1 = vrshrq_n_s16(tmp16x8_0, 2);
+
+      // inst->noiseEstLogQuantile[offset+i] + tmp16_1;
+      tmp16x8_2 = vld1q_s16(&inst->noiseEstLogQuantile[offset + i]); // Keep
+      tmp16x8_1 = vaddq_s16(tmp16x8_2, tmp16x8_1); // Keep for several lines
+
+      // Prepare for the "else" branch
+      // tmp16 += 1;
+      // tmp16_1 = (Word16)(tmp16>>1);
+      tmp16x8_0 = vrshrq_n_s16(tmp16x8_0, 1);
+
+      // tmp16_2 = (int16_t)((tmp16_1 * 3) >> 1);
+      tmp32x4 = vmull_s16(vget_low_s16(tmp16x8_0), Q3_16x4);
+      tmp16x4_1 = vshrn_n_s32(tmp32x4, 1);
+
+      // tmp16_2 = (int16_t)((tmp16_1 * 3) >> 1);
+      tmp32x4 = vmull_s16(vget_high_s16(tmp16x8_0), Q3_16x4);
+      tmp16x4_0 = vshrn_n_s32(tmp32x4, 1);
+
+      // inst->noiseEstLogQuantile[offset + i] - tmp16_2;
+      tmp16x8_0 = vcombine_s16(tmp16x4_1, tmp16x4_0); // keep
+      tmp16x8_0 = vsubq_s16(tmp16x8_2, tmp16x8_0);
+
+      // logval is the smallest fixed point representation we can have. Values
+      // below that will correspond to values in the interval [0, 1], which
+      // can't possibly occur.
+      tmp16x8_0 = vmaxq_s16(tmp16x8_0, logval_16x8);
+
+      // Do the if-else branches:
+      tmp16x8_3 = vld1q_s16(&lmagn[i]); // keep for several lines
+      tmp16x8_4 = vcgtq_s16(tmp16x8_3, tmp16x8_2);
+      tmp16x8_2 = vbslq_s16(tmp16x8_4, tmp16x8_1, tmp16x8_0);
+      vst1q_s16(&inst->noiseEstLogQuantile[offset + i], tmp16x8_2);
+
+      // Update density estimate
+      // tmp16_1 + tmp16_2
+      tmp16x8_1 = vld1q_s16(&inst->noiseEstDensity[offset + i]);
+      tmp16x8_0 = vqrdmulhq_s16(tmp16x8_1, countProd_16x8);
+      tmp16x8_0 = vaddq_s16(tmp16x8_0, prod16x8);
+
+      // lmagn[i] - inst->noiseEstLogQuantile[offset + i]
+      tmp16x8_3 = vsubq_s16(tmp16x8_3, tmp16x8_2);
+      tmp16x8_3 = vabsq_s16(tmp16x8_3);
+      tmp16x8_4 = vcgtq_s16(WIDTHQ8_16x8, tmp16x8_3);
+      tmp16x8_1 = vbslq_s16(tmp16x8_4, tmp16x8_0, tmp16x8_1);
+      vst1q_s16(&inst->noiseEstDensity[offset + i], tmp16x8_1);
+    }  // End loop over magnitude spectrum
+
+    // Last iteration over magnitude spectrum:
+    // compute delta
+    if (inst->noiseEstDensity[offset + i] > 512) {
+      // Get values for deltaBuff by shifting intead of dividing.
+      int factor = WebRtcSpl_NormW16(inst->noiseEstDensity[offset + i]);
+      delta = (int16_t)(FACTOR_Q16 >> (14 - factor));
+    } else {
+      delta = FACTOR_Q7;
+      if (inst->blockIndex < END_STARTUP_LONG) {
+        // Smaller step size during startup. This prevents from using
+        // unrealistic values causing overflow.
+        delta = FACTOR_Q7_STARTUP;
+      }
+    }
+    // update log quantile estimate
+    tmp16 = (int16_t)((delta * countDiv) >> 14);
+    if (lmagn[i] > inst->noiseEstLogQuantile[offset + i]) {
+      // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2
+      // CounterDiv=1/(inst->counter[s]+1) in Q15
+      tmp16 += 2;
+      inst->noiseEstLogQuantile[offset + i] += tmp16 / 4;
+    } else {
+      tmp16 += 1;
+      // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2
+      // TODO(bjornv): investigate why we need to truncate twice.
+      tmp16no2 = (int16_t)((tmp16 / 2) * 3 / 2);
+      inst->noiseEstLogQuantile[offset + i] -= tmp16no2;
+      if (inst->noiseEstLogQuantile[offset + i] < logval) {
+        // logval is the smallest fixed point representation we can have.
+        // Values below that will correspond to values in the interval
+        // [0, 1], which can't possibly occur.
+        inst->noiseEstLogQuantile[offset + i] = logval;
+      }
+    }
+
+    // update density estimate
+    if (WEBRTC_SPL_ABS_W16(lmagn[i] - inst->noiseEstLogQuantile[offset + i])
+        < WIDTH_Q8) {
+      tmp16no1 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   inst->noiseEstDensity[offset + i], countProd, 15);
+      tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+                   width_factor, countDiv, 15);
+      inst->noiseEstDensity[offset + i] = tmp16no1 + tmp16no2;
+    }
+
+
+    if (counter >= END_STARTUP_LONG) {
+      inst->noiseEstCounter[s] = 0;
+      if (inst->blockIndex >= END_STARTUP_LONG) {
+        UpdateNoiseEstimateNeon(inst, offset);
+      }
+    }
+    inst->noiseEstCounter[s]++;
+
+  }  // end loop over simultaneous estimates
+
+  // Sequentially update the noise during startup
+  if (inst->blockIndex < END_STARTUP_LONG) {
+    UpdateNoiseEstimateNeon(inst, offset);
+  }
+
+  for (i = 0; i < inst->magnLen; i++) {
+    noise[i] = (uint32_t)(inst->noiseEstQuantile[i]); // Q(qNoise)
+  }
+  (*q_noise) = (int16_t)inst->qNoise;
+}
+
+// Filter the data in the frequency domain, and create spectrum.
+void WebRtcNsx_PrepareSpectrumNeon(NoiseSuppressionFixedC* inst,
+                                   int16_t* freq_buf) {
+  RTC_DCHECK_EQ(1, inst->magnLen % 8);
+  RTC_DCHECK_EQ(0, inst->anaLen2 % 16);
+
+  // (1) Filtering.
+
+  // Fixed point C code for the next block is as follows:
+  // for (i = 0; i < inst->magnLen; i++) {
+  //   inst->real[i] = (int16_t)((inst->real[i] *
+  //      (int16_t)(inst->noiseSupFilter[i])) >> 14);  // Q(normData-stages)
+  //   inst->imag[i] = (int16_t)((inst->imag[i] *
+  //      (int16_t)(inst->noiseSupFilter[i])) >> 14);  // Q(normData-stages)
+  // }
+
+  int16_t* preal = &inst->real[0];
+  int16_t* pimag = &inst->imag[0];
+  int16_t* pns_filter = (int16_t*)&inst->noiseSupFilter[0];
+  int16_t* pimag_end = pimag + inst->magnLen - 4;
+
+  while (pimag < pimag_end) {
+    int16x8_t real = vld1q_s16(preal);
+    int16x8_t imag = vld1q_s16(pimag);
+    int16x8_t ns_filter = vld1q_s16(pns_filter);
+
+    int32x4_t tmp_r_0 = vmull_s16(vget_low_s16(real), vget_low_s16(ns_filter));
+    int32x4_t tmp_i_0 = vmull_s16(vget_low_s16(imag), vget_low_s16(ns_filter));
+    int32x4_t tmp_r_1 = vmull_s16(vget_high_s16(real),
+                                  vget_high_s16(ns_filter));
+    int32x4_t tmp_i_1 = vmull_s16(vget_high_s16(imag),
+                                  vget_high_s16(ns_filter));
+
+    int16x4_t result_r_0 = vshrn_n_s32(tmp_r_0, 14);
+    int16x4_t result_i_0 = vshrn_n_s32(tmp_i_0, 14);
+    int16x4_t result_r_1 = vshrn_n_s32(tmp_r_1, 14);
+    int16x4_t result_i_1 = vshrn_n_s32(tmp_i_1, 14);
+
+    vst1q_s16(preal, vcombine_s16(result_r_0, result_r_1));
+    vst1q_s16(pimag, vcombine_s16(result_i_0, result_i_1));
+    preal += 8;
+    pimag += 8;
+    pns_filter += 8;
+  }
+
+  // Filter the last element
+  *preal = (int16_t)((*preal * *pns_filter) >> 14);
+  *pimag = (int16_t)((*pimag * *pns_filter) >> 14);
+
+  // (2) Create spectrum.
+
+  // Fixed point C code for the rest of the function is as follows:
+  // freq_buf[0] = inst->real[0];
+  // freq_buf[1] = -inst->imag[0];
+  // for (i = 1, j = 2; i < inst->anaLen2; i += 1, j += 2) {
+  //   freq_buf[j] = inst->real[i];
+  //   freq_buf[j + 1] = -inst->imag[i];
+  // }
+  // freq_buf[inst->anaLen] = inst->real[inst->anaLen2];
+  // freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
+
+  preal = &inst->real[0];
+  pimag = &inst->imag[0];
+  pimag_end = pimag + inst->anaLen2;
+  int16_t * freq_buf_start = freq_buf;
+  while (pimag < pimag_end) {
+    // loop unroll
+    int16x8x2_t real_imag_0;
+    int16x8x2_t real_imag_1;
+    real_imag_0.val[1] = vld1q_s16(pimag);
+    real_imag_0.val[0] = vld1q_s16(preal);
+    preal += 8;
+    pimag += 8;
+    real_imag_1.val[1] = vld1q_s16(pimag);
+    real_imag_1.val[0] = vld1q_s16(preal);
+    preal += 8;
+    pimag += 8;
+
+    real_imag_0.val[1] = vnegq_s16(real_imag_0.val[1]);
+    real_imag_1.val[1] = vnegq_s16(real_imag_1.val[1]);
+    vst2q_s16(freq_buf_start, real_imag_0);
+    freq_buf_start += 16;
+    vst2q_s16(freq_buf_start, real_imag_1);
+    freq_buf_start += 16;
+  }
+  freq_buf[inst->anaLen] = inst->real[inst->anaLen2];
+  freq_buf[inst->anaLen + 1] = -inst->imag[inst->anaLen2];
+}
+
+// For the noise supress process, synthesis, read out fully processed segment,
+// and update synthesis buffer.
+void WebRtcNsx_SynthesisUpdateNeon(NoiseSuppressionFixedC* inst,
+                                   int16_t* out_frame,
+                                   int16_t gain_factor) {
+  RTC_DCHECK_EQ(0, inst->anaLen % 16);
+  RTC_DCHECK_EQ(0, inst->blockLen10ms % 16);
+
+  int16_t* preal_start = inst->real;
+  const int16_t* pwindow = inst->window;
+  int16_t* preal_end = preal_start + inst->anaLen;
+  int16_t* psynthesis_buffer = inst->synthesisBuffer;
+
+  while (preal_start < preal_end) {
+    // Loop unroll.
+    int16x8_t window_0 = vld1q_s16(pwindow);
+    int16x8_t real_0 = vld1q_s16(preal_start);
+    int16x8_t synthesis_buffer_0 = vld1q_s16(psynthesis_buffer);
+
+    int16x8_t window_1 = vld1q_s16(pwindow + 8);
+    int16x8_t real_1 = vld1q_s16(preal_start + 8);
+    int16x8_t synthesis_buffer_1 = vld1q_s16(psynthesis_buffer + 8);
+
+    int32x4_t tmp32a_0_low = vmull_s16(vget_low_s16(real_0),
+                                       vget_low_s16(window_0));
+    int32x4_t tmp32a_0_high = vmull_s16(vget_high_s16(real_0),
+                                        vget_high_s16(window_0));
+
+    int32x4_t tmp32a_1_low = vmull_s16(vget_low_s16(real_1),
+                                       vget_low_s16(window_1));
+    int32x4_t tmp32a_1_high = vmull_s16(vget_high_s16(real_1),
+                                        vget_high_s16(window_1));
+
+    int16x4_t tmp16a_0_low = vqrshrn_n_s32(tmp32a_0_low, 14);
+    int16x4_t tmp16a_0_high = vqrshrn_n_s32(tmp32a_0_high, 14);
+
+    int16x4_t tmp16a_1_low = vqrshrn_n_s32(tmp32a_1_low, 14);
+    int16x4_t tmp16a_1_high = vqrshrn_n_s32(tmp32a_1_high, 14);
+
+    int32x4_t tmp32b_0_low = vmull_n_s16(tmp16a_0_low, gain_factor);
+    int32x4_t tmp32b_0_high = vmull_n_s16(tmp16a_0_high, gain_factor);
+
+    int32x4_t tmp32b_1_low = vmull_n_s16(tmp16a_1_low, gain_factor);
+    int32x4_t tmp32b_1_high = vmull_n_s16(tmp16a_1_high, gain_factor);
+
+    int16x4_t tmp16b_0_low = vqrshrn_n_s32(tmp32b_0_low, 13);
+    int16x4_t tmp16b_0_high = vqrshrn_n_s32(tmp32b_0_high, 13);
+
+    int16x4_t tmp16b_1_low = vqrshrn_n_s32(tmp32b_1_low, 13);
+    int16x4_t tmp16b_1_high = vqrshrn_n_s32(tmp32b_1_high, 13);
+
+    synthesis_buffer_0 = vqaddq_s16(vcombine_s16(tmp16b_0_low, tmp16b_0_high),
+                                    synthesis_buffer_0);
+    synthesis_buffer_1 = vqaddq_s16(vcombine_s16(tmp16b_1_low, tmp16b_1_high),
+                                    synthesis_buffer_1);
+    vst1q_s16(psynthesis_buffer, synthesis_buffer_0);
+    vst1q_s16(psynthesis_buffer + 8, synthesis_buffer_1);
+
+    pwindow += 16;
+    preal_start += 16;
+    psynthesis_buffer += 16;
+  }
+
+  // Read out fully processed segment.
+  int16_t * p_start = inst->synthesisBuffer;
+  int16_t * p_end = inst->synthesisBuffer + inst->blockLen10ms;
+  int16_t * p_frame = out_frame;
+  while (p_start < p_end) {
+    int16x8_t frame_0 = vld1q_s16(p_start);
+    vst1q_s16(p_frame, frame_0);
+    p_start += 8;
+    p_frame += 8;
+  }
+
+  // Update synthesis buffer.
+  int16_t* p_start_src = inst->synthesisBuffer + inst->blockLen10ms;
+  int16_t* p_end_src = inst->synthesisBuffer + inst->anaLen;
+  int16_t* p_start_dst = inst->synthesisBuffer;
+  while (p_start_src < p_end_src) {
+    int16x8_t frame = vld1q_s16(p_start_src);
+    vst1q_s16(p_start_dst, frame);
+    p_start_src += 8;
+    p_start_dst += 8;
+  }
+
+  p_start = inst->synthesisBuffer + inst->anaLen - inst->blockLen10ms;
+  p_end = p_start + inst->blockLen10ms;
+  int16x8_t zero = vdupq_n_s16(0);
+  for (;p_start < p_end; p_start += 8) {
+    vst1q_s16(p_start, zero);
+  }
+}
+
+// Update analysis buffer for lower band, and window data before FFT.
+void WebRtcNsx_AnalysisUpdateNeon(NoiseSuppressionFixedC* inst,
+                                  int16_t* out,
+                                  int16_t* new_speech) {
+  RTC_DCHECK_EQ(0, inst->blockLen10ms % 16);
+  RTC_DCHECK_EQ(0, inst->anaLen % 16);
+
+  // For lower band update analysis buffer.
+  // memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms,
+  //     (inst->anaLen - inst->blockLen10ms) * sizeof(*inst->analysisBuffer));
+  int16_t* p_start_src = inst->analysisBuffer + inst->blockLen10ms;
+  int16_t* p_end_src = inst->analysisBuffer + inst->anaLen;
+  int16_t* p_start_dst = inst->analysisBuffer;
+  while (p_start_src < p_end_src) {
+    int16x8_t frame = vld1q_s16(p_start_src);
+    vst1q_s16(p_start_dst, frame);
+
+    p_start_src += 8;
+    p_start_dst += 8;
+  }
+
+  // memcpy(inst->analysisBuffer + inst->anaLen - inst->blockLen10ms,
+  //     new_speech, inst->blockLen10ms * sizeof(*inst->analysisBuffer));
+  p_start_src = new_speech;
+  p_end_src = new_speech + inst->blockLen10ms;
+  p_start_dst = inst->analysisBuffer + inst->anaLen - inst->blockLen10ms;
+  while (p_start_src < p_end_src) {
+    int16x8_t frame = vld1q_s16(p_start_src);
+    vst1q_s16(p_start_dst, frame);
+
+    p_start_src += 8;
+    p_start_dst += 8;
+  }
+
+  // Window data before FFT.
+  int16_t* p_start_window = (int16_t*) inst->window;
+  int16_t* p_start_buffer = inst->analysisBuffer;
+  int16_t* p_end_buffer = inst->analysisBuffer + inst->anaLen;
+  int16_t* p_start_out = out;
+
+  // Load the first element to reduce pipeline bubble.
+  int16x8_t window = vld1q_s16(p_start_window);
+  int16x8_t buffer = vld1q_s16(p_start_buffer);
+  p_start_window += 8;
+  p_start_buffer += 8;
+
+  while (p_start_buffer < p_end_buffer) {
+    // Unroll loop.
+    int32x4_t tmp32_low = vmull_s16(vget_low_s16(window), vget_low_s16(buffer));
+    int32x4_t tmp32_high = vmull_s16(vget_high_s16(window),
+                                     vget_high_s16(buffer));
+    window = vld1q_s16(p_start_window);
+    buffer = vld1q_s16(p_start_buffer);
+
+    int16x4_t result_low = vrshrn_n_s32(tmp32_low, 14);
+    int16x4_t result_high = vrshrn_n_s32(tmp32_high, 14);
+    vst1q_s16(p_start_out, vcombine_s16(result_low, result_high));
+
+    p_start_buffer += 8;
+    p_start_window += 8;
+    p_start_out += 8;
+  }
+  int32x4_t tmp32_low = vmull_s16(vget_low_s16(window), vget_low_s16(buffer));
+  int32x4_t tmp32_high = vmull_s16(vget_high_s16(window),
+                                   vget_high_s16(buffer));
+
+  int16x4_t result_low = vrshrn_n_s32(tmp32_low, 14);
+  int16x4_t result_high = vrshrn_n_s32(tmp32_high, 14);
+  vst1q_s16(p_start_out, vcombine_s16(result_low, result_high));
+}
diff --git a/modules/audio_processing/ns/nsx_defines.h b/modules/audio_processing/ns/nsx_defines.h
new file mode 100644
index 0000000..12869b3
--- /dev/null
+++ b/modules/audio_processing/ns/nsx_defines.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_DEFINES_H_
+#define MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_DEFINES_H_
+
+#define ANAL_BLOCKL_MAX         256 /* Max analysis block length */
+#define HALF_ANAL_BLOCKL        129 /* Half max analysis block length + 1 */
+#define NUM_HIGH_BANDS_MAX      2   /* Max number of high bands */
+#define SIMULT                  3
+#define END_STARTUP_LONG        200
+#define END_STARTUP_SHORT       50
+#define FACTOR_Q16              2621440 /* 40 in Q16 */
+#define FACTOR_Q7               5120 /* 40 in Q7 */
+#define FACTOR_Q7_STARTUP       1024 /* 8 in Q7 */
+#define WIDTH_Q8                3 /* 0.01 in Q8 (or 25 ) */
+
+/* PARAMETERS FOR NEW METHOD */
+#define DD_PR_SNR_Q11           2007 /* ~= Q11(0.98) DD update of prior SNR */
+#define ONE_MINUS_DD_PR_SNR_Q11 41 /* DD update of prior SNR */
+#define SPECT_FLAT_TAVG_Q14     4915 /* (0.30) tavg parameter for spectral flatness measure */
+#define SPECT_DIFF_TAVG_Q8      77 /* (0.30) tavg parameter for spectral flatness measure */
+#define PRIOR_UPDATE_Q14        1638 /* Q14(0.1) Update parameter of prior model */
+#define NOISE_UPDATE_Q8         26 /* 26 ~= Q8(0.1) Update parameter for noise */
+
+/* Probability threshold for noise state in speech/noise likelihood. */
+#define ONE_MINUS_PROB_RANGE_Q8 205 /* 205 ~= Q8(0.8) */
+#define HIST_PAR_EST            1000 /* Histogram size for estimation of parameters */
+
+/* FEATURE EXTRACTION CONFIG  */
+/* Bin size of histogram */
+#define BIN_SIZE_LRT            10
+/* Scale parameters: multiply dominant peaks of the histograms by scale factor to obtain. */
+/* Thresholds for prior model */
+#define FACTOR_1_LRT_DIFF       6 /* For LRT and spectral difference (5 times bigger) */
+/* For spectral_flatness: used when noise is flatter than speech (10 times bigger). */
+#define FACTOR_2_FLAT_Q10       922
+/* Peak limit for spectral flatness (varies between 0 and 1) */
+#define THRES_PEAK_FLAT         24 /* * 2 * BIN_SIZE_FLAT_FX */
+/* Limit on spacing of two highest peaks in histogram: spacing determined by bin size. */
+#define LIM_PEAK_SPACE_FLAT_DIFF    4 /* * 2 * BIN_SIZE_DIFF_FX */
+/* Limit on relevance of second peak */
+#define LIM_PEAK_WEIGHT_FLAT_DIFF   2
+#define THRES_FLUCT_LRT         10240 /* = 20 * inst->modelUpdate; fluctuation limit of LRT feat. */
+/* Limit on the max and min values for the feature thresholds */
+#define MAX_FLAT_Q10            38912 /*  * 2 * BIN_SIZE_FLAT_FX */
+#define MIN_FLAT_Q10            4096 /*  * 2 * BIN_SIZE_FLAT_FX */
+#define MAX_DIFF                100 /* * 2 * BIN_SIZE_DIFF_FX */
+#define MIN_DIFF                16 /* * 2 * BIN_SIZE_DIFF_FX */
+/* Criteria of weight of histogram peak  to accept/reject feature */
+#define THRES_WEIGHT_FLAT_DIFF  154 /*(int)(0.3*(inst->modelUpdate)) for flatness and difference */
+
+#define STAT_UPDATES            9 /* Update every 512 = 1 << 9 block */
+#define ONE_MINUS_GAMMA_PAUSE_Q8    13 /* ~= Q8(0.05) Update for conservative noise estimate */
+#define GAMMA_NOISE_TRANS_AND_SPEECH_Q8 3 /* ~= Q8(0.01) Update for transition and noise region */
+
+#endif /* MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NSX_DEFINES_H_ */
diff --git a/modules/audio_processing/ns/windows_private.h b/modules/audio_processing/ns/windows_private.h
new file mode 100644
index 0000000..2ffd693
--- /dev/null
+++ b/modules/audio_processing/ns/windows_private.h
@@ -0,0 +1,574 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
+#define MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
+
+// Hanning window for 4ms 16kHz
+static const float kHanning64w128[128] = {
+  0.00000000000000f, 0.02454122852291f, 0.04906767432742f,
+  0.07356456359967f, 0.09801714032956f, 0.12241067519922f,
+  0.14673047445536f, 0.17096188876030f, 0.19509032201613f,
+  0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
+  0.29028467725446f, 0.31368174039889f, 0.33688985339222f,
+  0.35989503653499f, 0.38268343236509f, 0.40524131400499f,
+  0.42755509343028f, 0.44961132965461f, 0.47139673682600f,
+  0.49289819222978f, 0.51410274419322f, 0.53499761988710f,
+  0.55557023301960f, 0.57580819141785f, 0.59569930449243f,
+  0.61523159058063f, 0.63439328416365f, 0.65317284295378f,
+  0.67155895484702f, 0.68954054473707f, 0.70710678118655f,
+  0.72424708295147f, 0.74095112535496f, 0.75720884650648f,
+  0.77301045336274f, 0.78834642762661f, 0.80320753148064f,
+  0.81758481315158f, 0.83146961230255f, 0.84485356524971f,
+  0.85772861000027f, 0.87008699110871f, 0.88192126434835f,
+  0.89322430119552f, 0.90398929312344f, 0.91420975570353f,
+  0.92387953251129f, 0.93299279883474f, 0.94154406518302f,
+  0.94952818059304f, 0.95694033573221f, 0.96377606579544f,
+  0.97003125319454f, 0.97570213003853f, 0.98078528040323f,
+  0.98527764238894f, 0.98917650996478f, 0.99247953459871f,
+  0.99518472667220f, 0.99729045667869f, 0.99879545620517f,
+  0.99969881869620f, 1.00000000000000f,
+  0.99969881869620f, 0.99879545620517f, 0.99729045667869f,
+  0.99518472667220f, 0.99247953459871f, 0.98917650996478f,
+  0.98527764238894f, 0.98078528040323f, 0.97570213003853f,
+  0.97003125319454f, 0.96377606579544f, 0.95694033573221f,
+  0.94952818059304f, 0.94154406518302f, 0.93299279883474f,
+  0.92387953251129f, 0.91420975570353f, 0.90398929312344f,
+  0.89322430119552f, 0.88192126434835f, 0.87008699110871f,
+  0.85772861000027f, 0.84485356524971f, 0.83146961230255f,
+  0.81758481315158f, 0.80320753148064f, 0.78834642762661f,
+  0.77301045336274f, 0.75720884650648f, 0.74095112535496f,
+  0.72424708295147f, 0.70710678118655f, 0.68954054473707f,
+  0.67155895484702f, 0.65317284295378f, 0.63439328416365f,
+  0.61523159058063f, 0.59569930449243f, 0.57580819141785f,
+  0.55557023301960f, 0.53499761988710f, 0.51410274419322f,
+  0.49289819222978f, 0.47139673682600f, 0.44961132965461f,
+  0.42755509343028f, 0.40524131400499f, 0.38268343236509f,
+  0.35989503653499f, 0.33688985339222f, 0.31368174039889f,
+  0.29028467725446f, 0.26671275747490f, 0.24298017990326f,
+  0.21910124015687f, 0.19509032201613f, 0.17096188876030f,
+  0.14673047445536f, 0.12241067519922f, 0.09801714032956f,
+  0.07356456359967f, 0.04906767432742f, 0.02454122852291f
+};
+
+
+
+// hybrib Hanning & flat window
+static const float kBlocks80w128[128] = {
+  (float)0.00000000, (float)0.03271908, (float)0.06540313, (float)0.09801714, (float)0.13052619,
+  (float)0.16289547, (float)0.19509032, (float)0.22707626, (float)0.25881905, (float)0.29028468,
+  (float)0.32143947, (float)0.35225005, (float)0.38268343, (float)0.41270703, (float)0.44228869,
+  (float)0.47139674, (float)0.50000000, (float)0.52806785, (float)0.55557023, (float)0.58247770,
+  (float)0.60876143, (float)0.63439328, (float)0.65934582, (float)0.68359230, (float)0.70710678,
+  (float)0.72986407, (float)0.75183981, (float)0.77301045, (float)0.79335334, (float)0.81284668,
+  (float)0.83146961, (float)0.84920218, (float)0.86602540, (float)0.88192126, (float)0.89687274,
+  (float)0.91086382, (float)0.92387953, (float)0.93590593, (float)0.94693013, (float)0.95694034,
+  (float)0.96592583, (float)0.97387698, (float)0.98078528, (float)0.98664333, (float)0.99144486,
+  (float)0.99518473, (float)0.99785892, (float)0.99946459, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)0.99946459, (float)0.99785892, (float)0.99518473, (float)0.99144486,
+  (float)0.98664333, (float)0.98078528, (float)0.97387698, (float)0.96592583, (float)0.95694034,
+  (float)0.94693013, (float)0.93590593, (float)0.92387953, (float)0.91086382, (float)0.89687274,
+  (float)0.88192126, (float)0.86602540, (float)0.84920218, (float)0.83146961, (float)0.81284668,
+  (float)0.79335334, (float)0.77301045, (float)0.75183981, (float)0.72986407, (float)0.70710678,
+  (float)0.68359230, (float)0.65934582, (float)0.63439328, (float)0.60876143, (float)0.58247770,
+  (float)0.55557023, (float)0.52806785, (float)0.50000000, (float)0.47139674, (float)0.44228869,
+  (float)0.41270703, (float)0.38268343, (float)0.35225005, (float)0.32143947, (float)0.29028468,
+  (float)0.25881905, (float)0.22707626, (float)0.19509032, (float)0.16289547, (float)0.13052619,
+  (float)0.09801714, (float)0.06540313, (float)0.03271908
+};
+
+// hybrib Hanning & flat window
+static const float kBlocks160w256[256] = {
+  (float)0.00000000, (float)0.01636173, (float)0.03271908, (float)0.04906767, (float)0.06540313,
+  (float)0.08172107, (float)0.09801714, (float)0.11428696, (float)0.13052619, (float)0.14673047,
+  (float)0.16289547, (float)0.17901686, (float)0.19509032, (float)0.21111155, (float)0.22707626,
+  (float)0.24298018, (float)0.25881905, (float)0.27458862, (float)0.29028468, (float)0.30590302,
+  (float)0.32143947, (float)0.33688985, (float)0.35225005, (float)0.36751594, (float)0.38268343,
+  (float)0.39774847, (float)0.41270703, (float)0.42755509, (float)0.44228869, (float)0.45690388,
+  (float)0.47139674, (float)0.48576339, (float)0.50000000, (float)0.51410274, (float)0.52806785,
+  (float)0.54189158, (float)0.55557023, (float)0.56910015, (float)0.58247770, (float)0.59569930,
+  (float)0.60876143, (float)0.62166057, (float)0.63439328, (float)0.64695615, (float)0.65934582,
+  (float)0.67155895, (float)0.68359230, (float)0.69544264, (float)0.70710678, (float)0.71858162,
+  (float)0.72986407, (float)0.74095113, (float)0.75183981, (float)0.76252720, (float)0.77301045,
+  (float)0.78328675, (float)0.79335334, (float)0.80320753, (float)0.81284668, (float)0.82226822,
+  (float)0.83146961, (float)0.84044840, (float)0.84920218, (float)0.85772861, (float)0.86602540,
+  (float)0.87409034, (float)0.88192126, (float)0.88951608, (float)0.89687274, (float)0.90398929,
+  (float)0.91086382, (float)0.91749450, (float)0.92387953, (float)0.93001722, (float)0.93590593,
+  (float)0.94154407, (float)0.94693013, (float)0.95206268, (float)0.95694034, (float)0.96156180,
+  (float)0.96592583, (float)0.97003125, (float)0.97387698, (float)0.97746197, (float)0.98078528,
+  (float)0.98384601, (float)0.98664333, (float)0.98917651, (float)0.99144486, (float)0.99344778,
+  (float)0.99518473, (float)0.99665524, (float)0.99785892, (float)0.99879546, (float)0.99946459,
+  (float)0.99986614, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)0.99986614, (float)0.99946459, (float)0.99879546, (float)0.99785892,
+  (float)0.99665524, (float)0.99518473, (float)0.99344778, (float)0.99144486, (float)0.98917651,
+  (float)0.98664333, (float)0.98384601, (float)0.98078528, (float)0.97746197, (float)0.97387698,
+  (float)0.97003125, (float)0.96592583, (float)0.96156180, (float)0.95694034, (float)0.95206268,
+  (float)0.94693013, (float)0.94154407, (float)0.93590593, (float)0.93001722, (float)0.92387953,
+  (float)0.91749450, (float)0.91086382, (float)0.90398929, (float)0.89687274, (float)0.88951608,
+  (float)0.88192126, (float)0.87409034, (float)0.86602540, (float)0.85772861, (float)0.84920218,
+  (float)0.84044840, (float)0.83146961, (float)0.82226822, (float)0.81284668, (float)0.80320753,
+  (float)0.79335334, (float)0.78328675, (float)0.77301045, (float)0.76252720, (float)0.75183981,
+  (float)0.74095113, (float)0.72986407, (float)0.71858162, (float)0.70710678, (float)0.69544264,
+  (float)0.68359230, (float)0.67155895, (float)0.65934582, (float)0.64695615, (float)0.63439328,
+  (float)0.62166057, (float)0.60876143, (float)0.59569930, (float)0.58247770, (float)0.56910015,
+  (float)0.55557023, (float)0.54189158, (float)0.52806785, (float)0.51410274, (float)0.50000000,
+  (float)0.48576339, (float)0.47139674, (float)0.45690388, (float)0.44228869, (float)0.42755509,
+  (float)0.41270703, (float)0.39774847, (float)0.38268343, (float)0.36751594, (float)0.35225005,
+  (float)0.33688985, (float)0.32143947, (float)0.30590302, (float)0.29028468, (float)0.27458862,
+  (float)0.25881905, (float)0.24298018, (float)0.22707626, (float)0.21111155, (float)0.19509032,
+  (float)0.17901686, (float)0.16289547, (float)0.14673047, (float)0.13052619, (float)0.11428696,
+  (float)0.09801714, (float)0.08172107, (float)0.06540313, (float)0.04906767, (float)0.03271908,
+  (float)0.01636173
+};
+
+// hybrib Hanning & flat window: for 20ms
+static const float kBlocks320w512[512] = {
+  (float)0.00000000, (float)0.00818114, (float)0.01636173, (float)0.02454123, (float)0.03271908,
+  (float)0.04089475, (float)0.04906767, (float)0.05723732, (float)0.06540313, (float)0.07356456,
+  (float)0.08172107, (float)0.08987211, (float)0.09801714, (float)0.10615561, (float)0.11428696,
+  (float)0.12241068, (float)0.13052619, (float)0.13863297, (float)0.14673047, (float)0.15481816,
+  (float)0.16289547, (float)0.17096189, (float)0.17901686, (float)0.18705985, (float)0.19509032,
+  (float)0.20310773, (float)0.21111155, (float)0.21910124, (float)0.22707626, (float)0.23503609,
+  (float)0.24298018, (float)0.25090801, (float)0.25881905, (float)0.26671276, (float)0.27458862,
+  (float)0.28244610, (float)0.29028468, (float)0.29810383, (float)0.30590302, (float)0.31368174,
+  (float)0.32143947, (float)0.32917568, (float)0.33688985, (float)0.34458148, (float)0.35225005,
+  (float)0.35989504, (float)0.36751594, (float)0.37511224, (float)0.38268343, (float)0.39022901,
+  (float)0.39774847, (float)0.40524131, (float)0.41270703, (float)0.42014512, (float)0.42755509,
+  (float)0.43493645, (float)0.44228869, (float)0.44961133, (float)0.45690388, (float)0.46416584,
+  (float)0.47139674, (float)0.47859608, (float)0.48576339, (float)0.49289819, (float)0.50000000,
+  (float)0.50706834, (float)0.51410274, (float)0.52110274, (float)0.52806785, (float)0.53499762,
+  (float)0.54189158, (float)0.54874927, (float)0.55557023, (float)0.56235401, (float)0.56910015,
+  (float)0.57580819, (float)0.58247770, (float)0.58910822, (float)0.59569930, (float)0.60225052,
+  (float)0.60876143, (float)0.61523159, (float)0.62166057, (float)0.62804795, (float)0.63439328,
+  (float)0.64069616, (float)0.64695615, (float)0.65317284, (float)0.65934582, (float)0.66547466,
+  (float)0.67155895, (float)0.67759830, (float)0.68359230, (float)0.68954054, (float)0.69544264,
+  (float)0.70129818, (float)0.70710678, (float)0.71286806, (float)0.71858162, (float)0.72424708,
+  (float)0.72986407, (float)0.73543221, (float)0.74095113, (float)0.74642045, (float)0.75183981,
+  (float)0.75720885, (float)0.76252720, (float)0.76779452, (float)0.77301045, (float)0.77817464,
+  (float)0.78328675, (float)0.78834643, (float)0.79335334, (float)0.79830715, (float)0.80320753,
+  (float)0.80805415, (float)0.81284668, (float)0.81758481, (float)0.82226822, (float)0.82689659,
+  (float)0.83146961, (float)0.83598698, (float)0.84044840, (float)0.84485357, (float)0.84920218,
+  (float)0.85349396, (float)0.85772861, (float)0.86190585, (float)0.86602540, (float)0.87008699,
+  (float)0.87409034, (float)0.87803519, (float)0.88192126, (float)0.88574831, (float)0.88951608,
+  (float)0.89322430, (float)0.89687274, (float)0.90046115, (float)0.90398929, (float)0.90745693,
+  (float)0.91086382, (float)0.91420976, (float)0.91749450, (float)0.92071783, (float)0.92387953,
+  (float)0.92697940, (float)0.93001722, (float)0.93299280, (float)0.93590593, (float)0.93875641,
+  (float)0.94154407, (float)0.94426870, (float)0.94693013, (float)0.94952818, (float)0.95206268,
+  (float)0.95453345, (float)0.95694034, (float)0.95928317, (float)0.96156180, (float)0.96377607,
+  (float)0.96592583, (float)0.96801094, (float)0.97003125, (float)0.97198664, (float)0.97387698,
+  (float)0.97570213, (float)0.97746197, (float)0.97915640, (float)0.98078528, (float)0.98234852,
+  (float)0.98384601, (float)0.98527764, (float)0.98664333, (float)0.98794298, (float)0.98917651,
+  (float)0.99034383, (float)0.99144486, (float)0.99247953, (float)0.99344778, (float)0.99434953,
+  (float)0.99518473, (float)0.99595331, (float)0.99665524, (float)0.99729046, (float)0.99785892,
+  (float)0.99836060, (float)0.99879546, (float)0.99916346, (float)0.99946459, (float)0.99969882,
+  (float)0.99986614, (float)0.99996653, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000, (float)1.00000000,
+  (float)1.00000000, (float)0.99996653, (float)0.99986614, (float)0.99969882, (float)0.99946459,
+  (float)0.99916346, (float)0.99879546, (float)0.99836060, (float)0.99785892, (float)0.99729046,
+  (float)0.99665524, (float)0.99595331, (float)0.99518473, (float)0.99434953, (float)0.99344778,
+  (float)0.99247953, (float)0.99144486, (float)0.99034383, (float)0.98917651, (float)0.98794298,
+  (float)0.98664333, (float)0.98527764, (float)0.98384601, (float)0.98234852, (float)0.98078528,
+  (float)0.97915640, (float)0.97746197, (float)0.97570213, (float)0.97387698, (float)0.97198664,
+  (float)0.97003125, (float)0.96801094, (float)0.96592583, (float)0.96377607, (float)0.96156180,
+  (float)0.95928317, (float)0.95694034, (float)0.95453345, (float)0.95206268, (float)0.94952818,
+  (float)0.94693013, (float)0.94426870, (float)0.94154407, (float)0.93875641, (float)0.93590593,
+  (float)0.93299280, (float)0.93001722, (float)0.92697940, (float)0.92387953, (float)0.92071783,
+  (float)0.91749450, (float)0.91420976, (float)0.91086382, (float)0.90745693, (float)0.90398929,
+  (float)0.90046115, (float)0.89687274, (float)0.89322430, (float)0.88951608, (float)0.88574831,
+  (float)0.88192126, (float)0.87803519, (float)0.87409034, (float)0.87008699, (float)0.86602540,
+  (float)0.86190585, (float)0.85772861, (float)0.85349396, (float)0.84920218, (float)0.84485357,
+  (float)0.84044840, (float)0.83598698, (float)0.83146961, (float)0.82689659, (float)0.82226822,
+  (float)0.81758481, (float)0.81284668, (float)0.80805415, (float)0.80320753, (float)0.79830715,
+  (float)0.79335334, (float)0.78834643, (float)0.78328675, (float)0.77817464, (float)0.77301045,
+  (float)0.76779452, (float)0.76252720, (float)0.75720885, (float)0.75183981, (float)0.74642045,
+  (float)0.74095113, (float)0.73543221, (float)0.72986407, (float)0.72424708, (float)0.71858162,
+  (float)0.71286806, (float)0.70710678, (float)0.70129818, (float)0.69544264, (float)0.68954054,
+  (float)0.68359230, (float)0.67759830, (float)0.67155895, (float)0.66547466, (float)0.65934582,
+  (float)0.65317284, (float)0.64695615, (float)0.64069616, (float)0.63439328, (float)0.62804795,
+  (float)0.62166057, (float)0.61523159, (float)0.60876143, (float)0.60225052, (float)0.59569930,
+  (float)0.58910822, (float)0.58247770, (float)0.57580819, (float)0.56910015, (float)0.56235401,
+  (float)0.55557023, (float)0.54874927, (float)0.54189158, (float)0.53499762, (float)0.52806785,
+  (float)0.52110274, (float)0.51410274, (float)0.50706834, (float)0.50000000, (float)0.49289819,
+  (float)0.48576339, (float)0.47859608, (float)0.47139674, (float)0.46416584, (float)0.45690388,
+  (float)0.44961133, (float)0.44228869, (float)0.43493645, (float)0.42755509, (float)0.42014512,
+  (float)0.41270703, (float)0.40524131, (float)0.39774847, (float)0.39022901, (float)0.38268343,
+  (float)0.37511224, (float)0.36751594, (float)0.35989504, (float)0.35225005, (float)0.34458148,
+  (float)0.33688985, (float)0.32917568, (float)0.32143947, (float)0.31368174, (float)0.30590302,
+  (float)0.29810383, (float)0.29028468, (float)0.28244610, (float)0.27458862, (float)0.26671276,
+  (float)0.25881905, (float)0.25090801, (float)0.24298018, (float)0.23503609, (float)0.22707626,
+  (float)0.21910124, (float)0.21111155, (float)0.20310773, (float)0.19509032, (float)0.18705985,
+  (float)0.17901686, (float)0.17096189, (float)0.16289547, (float)0.15481816, (float)0.14673047,
+  (float)0.13863297, (float)0.13052619, (float)0.12241068, (float)0.11428696, (float)0.10615561,
+  (float)0.09801714, (float)0.08987211, (float)0.08172107, (float)0.07356456, (float)0.06540313,
+  (float)0.05723732, (float)0.04906767, (float)0.04089475, (float)0.03271908, (float)0.02454123,
+  (float)0.01636173, (float)0.00818114
+};
+
+
+// Hanning window: for 15ms at 16kHz with symmetric zeros
+static const float kBlocks240w512[512] = {
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00654494, (float)0.01308960, (float)0.01963369,
+  (float)0.02617695, (float)0.03271908, (float)0.03925982, (float)0.04579887, (float)0.05233596,
+  (float)0.05887080, (float)0.06540313, (float)0.07193266, (float)0.07845910, (float)0.08498218,
+  (float)0.09150162, (float)0.09801714, (float)0.10452846, (float)0.11103531, (float)0.11753740,
+  (float)0.12403446, (float)0.13052620, (float)0.13701233, (float)0.14349262, (float)0.14996676,
+  (float)0.15643448, (float)0.16289547, (float)0.16934951, (float)0.17579629, (float)0.18223552,
+  (float)0.18866697, (float)0.19509032, (float)0.20150533, (float)0.20791170, (float)0.21430916,
+  (float)0.22069745, (float)0.22707628, (float)0.23344538, (float)0.23980446, (float)0.24615330,
+  (float)0.25249159, (float)0.25881904, (float)0.26513544, (float)0.27144045, (float)0.27773386,
+  (float)0.28401536, (float)0.29028466, (float)0.29654160, (float)0.30278578, (float)0.30901700,
+  (float)0.31523499, (float)0.32143945, (float)0.32763019, (float)0.33380687, (float)0.33996925,
+  (float)0.34611708, (float)0.35225007, (float)0.35836795, (float)0.36447051, (float)0.37055743,
+  (float)0.37662852, (float)0.38268346, (float)0.38872197, (float)0.39474389, (float)0.40074885,
+  (float)0.40673664, (float)0.41270703, (float)0.41865975, (float)0.42459452, (float)0.43051112,
+  (float)0.43640924, (float)0.44228873, (float)0.44814920, (float)0.45399052, (float)0.45981237,
+  (float)0.46561453, (float)0.47139674, (float)0.47715878, (float)0.48290035, (float)0.48862126,
+  (float)0.49432120, (float)0.50000000, (float)0.50565743, (float)0.51129311, (float)0.51690692,
+  (float)0.52249855, (float)0.52806789, (float)0.53361452, (float)0.53913832, (float)0.54463905,
+  (float)0.55011642, (float)0.55557024, (float)0.56100029, (float)0.56640625, (float)0.57178795,
+  (float)0.57714522, (float)0.58247769, (float)0.58778524, (float)0.59306765, (float)0.59832460,
+  (float)0.60355598, (float)0.60876143, (float)0.61394083, (float)0.61909395, (float)0.62422055,
+  (float)0.62932038, (float)0.63439333, (float)0.63943899, (float)0.64445734, (float)0.64944810,
+  (float)0.65441096, (float)0.65934587, (float)0.66425246, (float)0.66913062, (float)0.67398012,
+  (float)0.67880076, (float)0.68359232, (float)0.68835455, (float)0.69308740, (float)0.69779050,
+  (float)0.70246369, (float)0.70710677, (float)0.71171963, (float)0.71630198, (float)0.72085363,
+  (float)0.72537440, (float)0.72986406, (float)0.73432255, (float)0.73874950, (float)0.74314487,
+  (float)0.74750835, (float)0.75183982, (float)0.75613910, (float)0.76040596, (float)0.76464027,
+  (float)0.76884186, (float)0.77301043, (float)0.77714598, (float)0.78124821, (float)0.78531694,
+  (float)0.78935206, (float)0.79335338, (float)0.79732066, (float)0.80125386, (float)0.80515265,
+  (float)0.80901700, (float)0.81284672, (float)0.81664157, (float)0.82040149, (float)0.82412618,
+  (float)0.82781565, (float)0.83146966, (float)0.83508795, (float)0.83867061, (float)0.84221727,
+  (float)0.84572780, (float)0.84920216, (float)0.85264021, (float)0.85604161, (float)0.85940641,
+  (float)0.86273444, (float)0.86602545, (float)0.86927933, (float)0.87249607, (float)0.87567532,
+  (float)0.87881714, (float)0.88192129, (float)0.88498765, (float)0.88801610, (float)0.89100653,
+  (float)0.89395881, (float)0.89687276, (float)0.89974827, (float)0.90258533, (float)0.90538365,
+  (float)0.90814316, (float)0.91086388, (float)0.91354549, (float)0.91618794, (float)0.91879123,
+  (float)0.92135513, (float)0.92387950, (float)0.92636442, (float)0.92880958, (float)0.93121493,
+  (float)0.93358046, (float)0.93590593, (float)0.93819135, (float)0.94043654, (float)0.94264150,
+  (float)0.94480604, (float)0.94693011, (float)0.94901365, (float)0.95105654, (float)0.95305866,
+  (float)0.95501995, (float)0.95694035, (float)0.95881975, (float)0.96065807, (float)0.96245527,
+  (float)0.96421117, (float)0.96592581, (float)0.96759909, (float)0.96923089, (float)0.97082120,
+  (float)0.97236991, (float)0.97387701, (float)0.97534233, (float)0.97676587, (float)0.97814763,
+  (float)0.97948742, (float)0.98078531, (float)0.98204112, (float)0.98325491, (float)0.98442656,
+  (float)0.98555607, (float)0.98664331, (float)0.98768836, (float)0.98869103, (float)0.98965138,
+  (float)0.99056935, (float)0.99144489, (float)0.99227792, (float)0.99306846, (float)0.99381649,
+  (float)0.99452192, (float)0.99518472, (float)0.99580491, (float)0.99638247, (float)0.99691731,
+  (float)0.99740952, (float)0.99785894, (float)0.99826562, (float)0.99862951, (float)0.99895066,
+  (float)0.99922901, (float)0.99946457, (float)0.99965733, (float)0.99980724, (float)0.99991435,
+  (float)0.99997860, (float)1.00000000, (float)0.99997860, (float)0.99991435, (float)0.99980724,
+  (float)0.99965733, (float)0.99946457, (float)0.99922901, (float)0.99895066, (float)0.99862951,
+  (float)0.99826562, (float)0.99785894, (float)0.99740946, (float)0.99691731, (float)0.99638247,
+  (float)0.99580491, (float)0.99518472, (float)0.99452192, (float)0.99381644, (float)0.99306846,
+  (float)0.99227792, (float)0.99144489, (float)0.99056935, (float)0.98965138, (float)0.98869103,
+  (float)0.98768836, (float)0.98664331, (float)0.98555607, (float)0.98442656, (float)0.98325491,
+  (float)0.98204112, (float)0.98078525, (float)0.97948742, (float)0.97814757, (float)0.97676587,
+  (float)0.97534227, (float)0.97387695, (float)0.97236991, (float)0.97082120, (float)0.96923089,
+  (float)0.96759909, (float)0.96592581, (float)0.96421117, (float)0.96245521, (float)0.96065807,
+  (float)0.95881969, (float)0.95694029, (float)0.95501995, (float)0.95305860, (float)0.95105648,
+  (float)0.94901365, (float)0.94693011, (float)0.94480604, (float)0.94264150, (float)0.94043654,
+  (float)0.93819129, (float)0.93590593, (float)0.93358046, (float)0.93121493, (float)0.92880952,
+  (float)0.92636436, (float)0.92387950, (float)0.92135507, (float)0.91879123, (float)0.91618794,
+  (float)0.91354543, (float)0.91086382, (float)0.90814310, (float)0.90538365, (float)0.90258527,
+  (float)0.89974827, (float)0.89687276, (float)0.89395875, (float)0.89100647, (float)0.88801610,
+  (float)0.88498759, (float)0.88192123, (float)0.87881714, (float)0.87567532, (float)0.87249595,
+  (float)0.86927933, (float)0.86602539, (float)0.86273432, (float)0.85940641, (float)0.85604161,
+  (float)0.85264009, (float)0.84920216, (float)0.84572780, (float)0.84221715, (float)0.83867055,
+  (float)0.83508795, (float)0.83146954, (float)0.82781565, (float)0.82412612, (float)0.82040137,
+  (float)0.81664157, (float)0.81284660, (float)0.80901700, (float)0.80515265, (float)0.80125374,
+  (float)0.79732066, (float)0.79335332, (float)0.78935200, (float)0.78531694, (float)0.78124815,
+  (float)0.77714586, (float)0.77301049, (float)0.76884180, (float)0.76464021, (float)0.76040596,
+  (float)0.75613904, (float)0.75183970, (float)0.74750835, (float)0.74314481, (float)0.73874938,
+  (float)0.73432249, (float)0.72986400, (float)0.72537428, (float)0.72085363, (float)0.71630186,
+  (float)0.71171951, (float)0.70710677, (float)0.70246363, (float)0.69779032, (float)0.69308734,
+  (float)0.68835449, (float)0.68359220, (float)0.67880070, (float)0.67398006, (float)0.66913044,
+  (float)0.66425240, (float)0.65934575, (float)0.65441096, (float)0.64944804, (float)0.64445722,
+  (float)0.63943905, (float)0.63439327, (float)0.62932026, (float)0.62422055, (float)0.61909389,
+  (float)0.61394072, (float)0.60876143, (float)0.60355592, (float)0.59832448, (float)0.59306765,
+  (float)0.58778518, (float)0.58247757, (float)0.57714522, (float)0.57178789, (float)0.56640613,
+  (float)0.56100023, (float)0.55557019, (float)0.55011630, (float)0.54463905, (float)0.53913826,
+  (float)0.53361434, (float)0.52806783, (float)0.52249849, (float)0.51690674, (float)0.51129305,
+  (float)0.50565726, (float)0.50000006, (float)0.49432117, (float)0.48862115, (float)0.48290038,
+  (float)0.47715873, (float)0.47139663, (float)0.46561456, (float)0.45981231, (float)0.45399037,
+  (float)0.44814920, (float)0.44228864, (float)0.43640912, (float)0.43051112, (float)0.42459446,
+  (float)0.41865960, (float)0.41270703, (float)0.40673658, (float)0.40074870, (float)0.39474386,
+  (float)0.38872188, (float)0.38268328, (float)0.37662849, (float)0.37055734, (float)0.36447033,
+  (float)0.35836792, (float)0.35224995, (float)0.34611690, (float)0.33996922, (float)0.33380675,
+  (float)0.32763001, (float)0.32143945, (float)0.31523487, (float)0.30901679, (float)0.30278572,
+  (float)0.29654145, (float)0.29028472, (float)0.28401530, (float)0.27773371, (float)0.27144048,
+  (float)0.26513538, (float)0.25881892, (float)0.25249159, (float)0.24615324, (float)0.23980433,
+  (float)0.23344538, (float)0.22707619, (float)0.22069728, (float)0.21430916, (float)0.20791161,
+  (float)0.20150517, (float)0.19509031, (float)0.18866688, (float)0.18223536, (float)0.17579627,
+  (float)0.16934940, (float)0.16289529, (float)0.15643445, (float)0.14996666, (float)0.14349243,
+  (float)0.13701232, (float)0.13052608, (float)0.12403426, (float)0.11753736, (float)0.11103519,
+  (float)0.10452849, (float)0.09801710, (float)0.09150149, (float)0.08498220, (float)0.07845904,
+  (float)0.07193252, (float)0.06540315, (float)0.05887074, (float)0.05233581, (float)0.04579888,
+  (float)0.03925974, (float)0.03271893, (float)0.02617695, (float)0.01963361, (float)0.01308943,
+  (float)0.00654493, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000
+};
+
+
+// Hanning window: for 30ms with 1024 fft with symmetric zeros at 16kHz
+static const float kBlocks480w1024[1024] = {
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00327249, (float)0.00654494,
+  (float)0.00981732, (float)0.01308960, (float)0.01636173, (float)0.01963369, (float)0.02290544,
+  (float)0.02617695, (float)0.02944817, (float)0.03271908, (float)0.03598964, (float)0.03925982,
+  (float)0.04252957, (float)0.04579887, (float)0.04906768, (float)0.05233596, (float)0.05560368,
+  (float)0.05887080, (float)0.06213730, (float)0.06540313, (float)0.06866825, (float)0.07193266,
+  (float)0.07519628, (float)0.07845910, (float)0.08172107, (float)0.08498218, (float)0.08824237,
+  (float)0.09150162, (float)0.09475989, (float)0.09801714, (float)0.10127335, (float)0.10452846,
+  (float)0.10778246, (float)0.11103531, (float)0.11428697, (float)0.11753740, (float)0.12078657,
+  (float)0.12403446, (float)0.12728101, (float)0.13052620, (float)0.13376999, (float)0.13701233,
+  (float)0.14025325, (float)0.14349262, (float)0.14673047, (float)0.14996676, (float)0.15320145,
+  (float)0.15643448, (float)0.15966582, (float)0.16289547, (float)0.16612339, (float)0.16934951,
+  (float)0.17257382, (float)0.17579629, (float)0.17901687, (float)0.18223552, (float)0.18545224,
+  (float)0.18866697, (float)0.19187967, (float)0.19509032, (float)0.19829889, (float)0.20150533,
+  (float)0.20470962, (float)0.20791170, (float)0.21111156, (float)0.21430916, (float)0.21750447,
+  (float)0.22069745, (float)0.22388805, (float)0.22707628, (float)0.23026206, (float)0.23344538,
+  (float)0.23662618, (float)0.23980446, (float)0.24298020, (float)0.24615330, (float)0.24932377,
+  (float)0.25249159, (float)0.25565669, (float)0.25881904, (float)0.26197866, (float)0.26513544,
+  (float)0.26828939, (float)0.27144045, (float)0.27458861, (float)0.27773386, (float)0.28087610,
+  (float)0.28401536, (float)0.28715158, (float)0.29028466, (float)0.29341471, (float)0.29654160,
+  (float)0.29966527, (float)0.30278578, (float)0.30590302, (float)0.30901700, (float)0.31212768,
+  (float)0.31523499, (float)0.31833893, (float)0.32143945, (float)0.32453656, (float)0.32763019,
+  (float)0.33072028, (float)0.33380687, (float)0.33688986, (float)0.33996925, (float)0.34304500,
+  (float)0.34611708, (float)0.34918544, (float)0.35225007, (float)0.35531089, (float)0.35836795,
+  (float)0.36142117, (float)0.36447051, (float)0.36751595, (float)0.37055743, (float)0.37359497,
+  (float)0.37662852, (float)0.37965801, (float)0.38268346, (float)0.38570479, (float)0.38872197,
+  (float)0.39173502, (float)0.39474389, (float)0.39774847, (float)0.40074885, (float)0.40374491,
+  (float)0.40673664, (float)0.40972406, (float)0.41270703, (float)0.41568562, (float)0.41865975,
+  (float)0.42162940, (float)0.42459452, (float)0.42755508, (float)0.43051112, (float)0.43346250,
+  (float)0.43640924, (float)0.43935132, (float)0.44228873, (float)0.44522133, (float)0.44814920,
+  (float)0.45107228, (float)0.45399052, (float)0.45690390, (float)0.45981237, (float)0.46271592,
+  (float)0.46561453, (float)0.46850815, (float)0.47139674, (float)0.47428030, (float)0.47715878,
+  (float)0.48003215, (float)0.48290035, (float)0.48576337, (float)0.48862126, (float)0.49147385,
+  (float)0.49432120, (float)0.49716330, (float)0.50000000, (float)0.50283140, (float)0.50565743,
+  (float)0.50847799, (float)0.51129311, (float)0.51410276, (float)0.51690692, (float)0.51970553,
+  (float)0.52249855, (float)0.52528602, (float)0.52806789, (float)0.53084403, (float)0.53361452,
+  (float)0.53637928, (float)0.53913832, (float)0.54189163, (float)0.54463905, (float)0.54738063,
+  (float)0.55011642, (float)0.55284631, (float)0.55557024, (float)0.55828828, (float)0.56100029,
+  (float)0.56370628, (float)0.56640625, (float)0.56910014, (float)0.57178795, (float)0.57446963,
+  (float)0.57714522, (float)0.57981455, (float)0.58247769, (float)0.58513463, (float)0.58778524,
+  (float)0.59042960, (float)0.59306765, (float)0.59569931, (float)0.59832460, (float)0.60094351,
+  (float)0.60355598, (float)0.60616195, (float)0.60876143, (float)0.61135441, (float)0.61394083,
+  (float)0.61652070, (float)0.61909395, (float)0.62166059, (float)0.62422055, (float)0.62677383,
+  (float)0.62932038, (float)0.63186020, (float)0.63439333, (float)0.63691956, (float)0.63943899,
+  (float)0.64195162, (float)0.64445734, (float)0.64695615, (float)0.64944810, (float)0.65193301,
+  (float)0.65441096, (float)0.65688187, (float)0.65934587, (float)0.66180271, (float)0.66425246,
+  (float)0.66669512, (float)0.66913062, (float)0.67155898, (float)0.67398012, (float)0.67639405,
+  (float)0.67880076, (float)0.68120021, (float)0.68359232, (float)0.68597710, (float)0.68835455,
+  (float)0.69072467, (float)0.69308740, (float)0.69544262, (float)0.69779050, (float)0.70013082,
+  (float)0.70246369, (float)0.70478904, (float)0.70710677, (float)0.70941699, (float)0.71171963,
+  (float)0.71401459, (float)0.71630198, (float)0.71858168, (float)0.72085363, (float)0.72311789,
+  (float)0.72537440, (float)0.72762316, (float)0.72986406, (float)0.73209721, (float)0.73432255,
+  (float)0.73653996, (float)0.73874950, (float)0.74095118, (float)0.74314487, (float)0.74533057,
+  (float)0.74750835, (float)0.74967808, (float)0.75183982, (float)0.75399351, (float)0.75613910,
+  (float)0.75827658, (float)0.76040596, (float)0.76252723, (float)0.76464027, (float)0.76674515,
+  (float)0.76884186, (float)0.77093029, (float)0.77301043, (float)0.77508241, (float)0.77714598,
+  (float)0.77920127, (float)0.78124821, (float)0.78328675, (float)0.78531694, (float)0.78733873,
+  (float)0.78935206, (float)0.79135692, (float)0.79335338, (float)0.79534125, (float)0.79732066,
+  (float)0.79929149, (float)0.80125386, (float)0.80320752, (float)0.80515265, (float)0.80708915,
+  (float)0.80901700, (float)0.81093621, (float)0.81284672, (float)0.81474853, (float)0.81664157,
+  (float)0.81852591, (float)0.82040149, (float)0.82226825, (float)0.82412618, (float)0.82597536,
+  (float)0.82781565, (float)0.82964706, (float)0.83146966, (float)0.83328325, (float)0.83508795,
+  (float)0.83688378, (float)0.83867061, (float)0.84044838, (float)0.84221727, (float)0.84397703,
+  (float)0.84572780, (float)0.84746957, (float)0.84920216, (float)0.85092574, (float)0.85264021,
+  (float)0.85434544, (float)0.85604161, (float)0.85772866, (float)0.85940641, (float)0.86107504,
+  (float)0.86273444, (float)0.86438453, (float)0.86602545, (float)0.86765707, (float)0.86927933,
+  (float)0.87089235, (float)0.87249607, (float)0.87409031, (float)0.87567532, (float)0.87725097,
+  (float)0.87881714, (float)0.88037390, (float)0.88192129, (float)0.88345921, (float)0.88498765,
+  (float)0.88650668, (float)0.88801610, (float)0.88951612, (float)0.89100653, (float)0.89248741,
+  (float)0.89395881, (float)0.89542055, (float)0.89687276, (float)0.89831537, (float)0.89974827,
+  (float)0.90117162, (float)0.90258533, (float)0.90398932, (float)0.90538365, (float)0.90676826,
+  (float)0.90814316, (float)0.90950841, (float)0.91086388, (float)0.91220951, (float)0.91354549,
+  (float)0.91487163, (float)0.91618794, (float)0.91749454, (float)0.91879123, (float)0.92007810,
+  (float)0.92135513, (float)0.92262226, (float)0.92387950, (float)0.92512691, (float)0.92636442,
+  (float)0.92759192, (float)0.92880958, (float)0.93001723, (float)0.93121493, (float)0.93240267,
+  (float)0.93358046, (float)0.93474817, (float)0.93590593, (float)0.93705362, (float)0.93819135,
+  (float)0.93931901, (float)0.94043654, (float)0.94154406, (float)0.94264150, (float)0.94372880,
+  (float)0.94480604, (float)0.94587320, (float)0.94693011, (float)0.94797695, (float)0.94901365,
+  (float)0.95004016, (float)0.95105654, (float)0.95206273, (float)0.95305866, (float)0.95404440,
+  (float)0.95501995, (float)0.95598525, (float)0.95694035, (float)0.95788521, (float)0.95881975,
+  (float)0.95974404, (float)0.96065807, (float)0.96156180, (float)0.96245527, (float)0.96333838,
+  (float)0.96421117, (float)0.96507370, (float)0.96592581, (float)0.96676767, (float)0.96759909,
+  (float)0.96842021, (float)0.96923089, (float)0.97003126, (float)0.97082120, (float)0.97160077,
+  (float)0.97236991, (float)0.97312868, (float)0.97387701, (float)0.97461486, (float)0.97534233,
+  (float)0.97605932, (float)0.97676587, (float)0.97746199, (float)0.97814763, (float)0.97882277,
+  (float)0.97948742, (float)0.98014158, (float)0.98078531, (float)0.98141843, (float)0.98204112,
+  (float)0.98265332, (float)0.98325491, (float)0.98384601, (float)0.98442656, (float)0.98499662,
+  (float)0.98555607, (float)0.98610497, (float)0.98664331, (float)0.98717111, (float)0.98768836,
+  (float)0.98819500, (float)0.98869103, (float)0.98917651, (float)0.98965138, (float)0.99011570,
+  (float)0.99056935, (float)0.99101239, (float)0.99144489, (float)0.99186671, (float)0.99227792,
+  (float)0.99267852, (float)0.99306846, (float)0.99344778, (float)0.99381649, (float)0.99417448,
+  (float)0.99452192, (float)0.99485862, (float)0.99518472, (float)0.99550015, (float)0.99580491,
+  (float)0.99609905, (float)0.99638247, (float)0.99665523, (float)0.99691731, (float)0.99716878,
+  (float)0.99740952, (float)0.99763954, (float)0.99785894, (float)0.99806762, (float)0.99826562,
+  (float)0.99845290, (float)0.99862951, (float)0.99879545, (float)0.99895066, (float)0.99909520,
+  (float)0.99922901, (float)0.99935216, (float)0.99946457, (float)0.99956632, (float)0.99965733,
+  (float)0.99973762, (float)0.99980724, (float)0.99986613, (float)0.99991435, (float)0.99995178,
+  (float)0.99997860, (float)0.99999464, (float)1.00000000, (float)0.99999464, (float)0.99997860,
+  (float)0.99995178, (float)0.99991435, (float)0.99986613, (float)0.99980724, (float)0.99973762,
+  (float)0.99965733, (float)0.99956632, (float)0.99946457, (float)0.99935216, (float)0.99922901,
+  (float)0.99909520, (float)0.99895066, (float)0.99879545, (float)0.99862951, (float)0.99845290,
+  (float)0.99826562, (float)0.99806762, (float)0.99785894, (float)0.99763954, (float)0.99740946,
+  (float)0.99716872, (float)0.99691731, (float)0.99665523, (float)0.99638247, (float)0.99609905,
+  (float)0.99580491, (float)0.99550015, (float)0.99518472, (float)0.99485862, (float)0.99452192,
+  (float)0.99417448, (float)0.99381644, (float)0.99344778, (float)0.99306846, (float)0.99267852,
+  (float)0.99227792, (float)0.99186671, (float)0.99144489, (float)0.99101239, (float)0.99056935,
+  (float)0.99011564, (float)0.98965138, (float)0.98917651, (float)0.98869103, (float)0.98819494,
+  (float)0.98768836, (float)0.98717111, (float)0.98664331, (float)0.98610497, (float)0.98555607,
+  (float)0.98499656, (float)0.98442656, (float)0.98384601, (float)0.98325491, (float)0.98265326,
+  (float)0.98204112, (float)0.98141843, (float)0.98078525, (float)0.98014158, (float)0.97948742,
+  (float)0.97882277, (float)0.97814757, (float)0.97746193, (float)0.97676587, (float)0.97605932,
+  (float)0.97534227, (float)0.97461486, (float)0.97387695, (float)0.97312862, (float)0.97236991,
+  (float)0.97160077, (float)0.97082120, (float)0.97003126, (float)0.96923089, (float)0.96842015,
+  (float)0.96759909, (float)0.96676761, (float)0.96592581, (float)0.96507365, (float)0.96421117,
+  (float)0.96333838, (float)0.96245521, (float)0.96156180, (float)0.96065807, (float)0.95974404,
+  (float)0.95881969, (float)0.95788515, (float)0.95694029, (float)0.95598525, (float)0.95501995,
+  (float)0.95404440, (float)0.95305860, (float)0.95206267, (float)0.95105648, (float)0.95004016,
+  (float)0.94901365, (float)0.94797695, (float)0.94693011, (float)0.94587314, (float)0.94480604,
+  (float)0.94372880, (float)0.94264150, (float)0.94154406, (float)0.94043654, (float)0.93931895,
+  (float)0.93819129, (float)0.93705362, (float)0.93590593, (float)0.93474817, (float)0.93358046,
+  (float)0.93240267, (float)0.93121493, (float)0.93001723, (float)0.92880952, (float)0.92759192,
+  (float)0.92636436, (float)0.92512691, (float)0.92387950, (float)0.92262226, (float)0.92135507,
+  (float)0.92007804, (float)0.91879123, (float)0.91749448, (float)0.91618794, (float)0.91487157,
+  (float)0.91354543, (float)0.91220951, (float)0.91086382, (float)0.90950835, (float)0.90814310,
+  (float)0.90676820, (float)0.90538365, (float)0.90398932, (float)0.90258527, (float)0.90117157,
+  (float)0.89974827, (float)0.89831525, (float)0.89687276, (float)0.89542055, (float)0.89395875,
+  (float)0.89248741, (float)0.89100647, (float)0.88951600, (float)0.88801610, (float)0.88650662,
+  (float)0.88498759, (float)0.88345915, (float)0.88192123, (float)0.88037384, (float)0.87881714,
+  (float)0.87725091, (float)0.87567532, (float)0.87409031, (float)0.87249595, (float)0.87089223,
+  (float)0.86927933, (float)0.86765701, (float)0.86602539, (float)0.86438447, (float)0.86273432,
+  (float)0.86107504, (float)0.85940641, (float)0.85772860, (float)0.85604161, (float)0.85434544,
+  (float)0.85264009, (float)0.85092574, (float)0.84920216, (float)0.84746951, (float)0.84572780,
+  (float)0.84397697, (float)0.84221715, (float)0.84044844, (float)0.83867055, (float)0.83688372,
+  (float)0.83508795, (float)0.83328319, (float)0.83146954, (float)0.82964706, (float)0.82781565,
+  (float)0.82597530, (float)0.82412612, (float)0.82226813, (float)0.82040137, (float)0.81852591,
+  (float)0.81664157, (float)0.81474847, (float)0.81284660, (float)0.81093609, (float)0.80901700,
+  (float)0.80708915, (float)0.80515265, (float)0.80320752, (float)0.80125374, (float)0.79929143,
+  (float)0.79732066, (float)0.79534125, (float)0.79335332, (float)0.79135686, (float)0.78935200,
+  (float)0.78733861, (float)0.78531694, (float)0.78328675, (float)0.78124815, (float)0.77920121,
+  (float)0.77714586, (float)0.77508223, (float)0.77301049, (float)0.77093029, (float)0.76884180,
+  (float)0.76674509, (float)0.76464021, (float)0.76252711, (float)0.76040596, (float)0.75827658,
+  (float)0.75613904, (float)0.75399339, (float)0.75183970, (float)0.74967796, (float)0.74750835,
+  (float)0.74533057, (float)0.74314481, (float)0.74095106, (float)0.73874938, (float)0.73653996,
+  (float)0.73432249, (float)0.73209721, (float)0.72986400, (float)0.72762305, (float)0.72537428,
+  (float)0.72311789, (float)0.72085363, (float)0.71858162, (float)0.71630186, (float)0.71401453,
+  (float)0.71171951, (float)0.70941705, (float)0.70710677, (float)0.70478898, (float)0.70246363,
+  (float)0.70013070, (float)0.69779032, (float)0.69544268, (float)0.69308734, (float)0.69072461,
+  (float)0.68835449, (float)0.68597704, (float)0.68359220, (float)0.68120021, (float)0.67880070,
+  (float)0.67639399, (float)0.67398006, (float)0.67155886, (float)0.66913044, (float)0.66669512,
+  (float)0.66425240, (float)0.66180259, (float)0.65934575, (float)0.65688181, (float)0.65441096,
+  (float)0.65193301, (float)0.64944804, (float)0.64695609, (float)0.64445722, (float)0.64195150,
+  (float)0.63943905, (float)0.63691956, (float)0.63439327, (float)0.63186014, (float)0.62932026,
+  (float)0.62677372, (float)0.62422055, (float)0.62166059, (float)0.61909389, (float)0.61652064,
+  (float)0.61394072, (float)0.61135429, (float)0.60876143, (float)0.60616189, (float)0.60355592,
+  (float)0.60094339, (float)0.59832448, (float)0.59569913, (float)0.59306765, (float)0.59042960,
+  (float)0.58778518, (float)0.58513451, (float)0.58247757, (float)0.57981461, (float)0.57714522,
+  (float)0.57446963, (float)0.57178789, (float)0.56910002, (float)0.56640613, (float)0.56370628,
+  (float)0.56100023, (float)0.55828822, (float)0.55557019, (float)0.55284619, (float)0.55011630,
+  (float)0.54738069, (float)0.54463905, (float)0.54189152, (float)0.53913826, (float)0.53637916,
+  (float)0.53361434, (float)0.53084403, (float)0.52806783, (float)0.52528596, (float)0.52249849,
+  (float)0.51970541, (float)0.51690674, (float)0.51410276, (float)0.51129305, (float)0.50847787,
+  (float)0.50565726, (float)0.50283122, (float)0.50000006, (float)0.49716327, (float)0.49432117,
+  (float)0.49147379, (float)0.48862115, (float)0.48576325, (float)0.48290038, (float)0.48003212,
+  (float)0.47715873, (float)0.47428021, (float)0.47139663, (float)0.46850798, (float)0.46561456,
+  (float)0.46271589, (float)0.45981231, (float)0.45690379, (float)0.45399037, (float)0.45107210,
+  (float)0.44814920, (float)0.44522130, (float)0.44228864, (float)0.43935123, (float)0.43640912,
+  (float)0.43346232, (float)0.43051112, (float)0.42755505, (float)0.42459446, (float)0.42162928,
+  (float)0.41865960, (float)0.41568545, (float)0.41270703, (float)0.40972400, (float)0.40673658,
+  (float)0.40374479, (float)0.40074870, (float)0.39774850, (float)0.39474386, (float)0.39173496,
+  (float)0.38872188, (float)0.38570464, (float)0.38268328, (float)0.37965804, (float)0.37662849,
+  (float)0.37359491, (float)0.37055734, (float)0.36751580, (float)0.36447033, (float)0.36142117,
+  (float)0.35836792, (float)0.35531086, (float)0.35224995, (float)0.34918529, (float)0.34611690,
+  (float)0.34304500, (float)0.33996922, (float)0.33688980, (float)0.33380675, (float)0.33072016,
+  (float)0.32763001, (float)0.32453656, (float)0.32143945, (float)0.31833887, (float)0.31523487,
+  (float)0.31212750, (float)0.30901679, (float)0.30590302, (float)0.30278572, (float)0.29966521,
+  (float)0.29654145, (float)0.29341453, (float)0.29028472, (float)0.28715155, (float)0.28401530,
+  (float)0.28087601, (float)0.27773371, (float)0.27458847, (float)0.27144048, (float)0.26828936,
+  (float)0.26513538, (float)0.26197854, (float)0.25881892, (float)0.25565651, (float)0.25249159,
+  (float)0.24932374, (float)0.24615324, (float)0.24298008, (float)0.23980433, (float)0.23662600,
+  (float)0.23344538, (float)0.23026201, (float)0.22707619, (float)0.22388794, (float)0.22069728,
+  (float)0.21750426, (float)0.21430916, (float)0.21111152, (float)0.20791161, (float)0.20470949,
+  (float)0.20150517, (float)0.19829892, (float)0.19509031, (float)0.19187963, (float)0.18866688,
+  (float)0.18545210, (float)0.18223536, (float)0.17901689, (float)0.17579627, (float)0.17257376,
+  (float)0.16934940, (float)0.16612324, (float)0.16289529, (float)0.15966584, (float)0.15643445,
+  (float)0.15320137, (float)0.14996666, (float)0.14673033, (float)0.14349243, (float)0.14025325,
+  (float)0.13701232, (float)0.13376991, (float)0.13052608, (float)0.12728085, (float)0.12403426,
+  (float)0.12078657, (float)0.11753736, (float)0.11428688, (float)0.11103519, (float)0.10778230,
+  (float)0.10452849, (float)0.10127334, (float)0.09801710, (float)0.09475980, (float)0.09150149,
+  (float)0.08824220, (float)0.08498220, (float)0.08172106, (float)0.07845904, (float)0.07519618,
+  (float)0.07193252, (float)0.06866808, (float)0.06540315, (float)0.06213728, (float)0.05887074,
+  (float)0.05560357, (float)0.05233581, (float)0.04906749, (float)0.04579888, (float)0.04252954,
+  (float)0.03925974, (float)0.03598953, (float)0.03271893, (float)0.02944798, (float)0.02617695,
+  (float)0.02290541, (float)0.01963361, (float)0.01636161, (float)0.01308943, (float)0.00981712,
+  (float)0.00654493, (float)0.00327244, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000,
+  (float)0.00000000, (float)0.00000000, (float)0.00000000, (float)0.00000000
+};
+
+#endif  // MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_WINDOWS_PRIVATE_H_
diff --git a/modules/audio_processing/render_queue_item_verifier.h b/modules/audio_processing/render_queue_item_verifier.h
new file mode 100644
index 0000000..b8aff4a
--- /dev/null
+++ b/modules/audio_processing/render_queue_item_verifier.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_RENDER_QUEUE_ITEM_VERIFIER_H_
+#define MODULES_AUDIO_PROCESSING_RENDER_QUEUE_ITEM_VERIFIER_H_
+
+#include <vector>
+
+namespace webrtc {
+
+// Functor to use when supplying a verifier function for the queue item
+// verifcation.
+template <typename T>
+class RenderQueueItemVerifier {
+ public:
+  explicit RenderQueueItemVerifier(size_t minimum_capacity)
+      : minimum_capacity_(minimum_capacity) {}
+
+  bool operator()(const std::vector<T>& v) const {
+    return v.capacity() >= minimum_capacity_;
+  }
+
+ private:
+  size_t minimum_capacity_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_RENDER_QUEUE_ITEM_VERIFIER_H__
diff --git a/modules/audio_processing/residual_echo_detector.cc b/modules/audio_processing/residual_echo_detector.cc
new file mode 100644
index 0000000..f506579
--- /dev/null
+++ b/modules/audio_processing/residual_echo_detector.cc
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/residual_echo_detector.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace {
+
+float Power(rtc::ArrayView<const float> input) {
+  if (input.empty()) {
+    return 0.f;
+  }
+  return std::inner_product(input.begin(), input.end(), input.begin(), 0.f) /
+         input.size();
+}
+
+constexpr size_t kLookbackFrames = 650;
+// TODO(ivoc): Verify the size of this buffer.
+constexpr size_t kRenderBufferSize = 30;
+constexpr float kAlpha = 0.001f;
+// 10 seconds of data, updated every 10 ms.
+constexpr size_t kAggregationBufferSize = 10 * 100;
+
+}  // namespace
+
+namespace webrtc {
+
+int ResidualEchoDetector::instance_count_ = 0;
+
+ResidualEchoDetector::ResidualEchoDetector()
+    : data_dumper_(
+          new ApmDataDumper(rtc::AtomicOps::Increment(&instance_count_))),
+      render_buffer_(kRenderBufferSize),
+      render_power_(kLookbackFrames),
+      render_power_mean_(kLookbackFrames),
+      render_power_std_dev_(kLookbackFrames),
+      covariances_(kLookbackFrames),
+      recent_likelihood_max_(kAggregationBufferSize) {}
+
+ResidualEchoDetector::~ResidualEchoDetector() = default;
+
+void ResidualEchoDetector::AnalyzeRenderAudio(
+    rtc::ArrayView<const float> render_audio) {
+  // Dump debug data assuming 48 kHz sample rate (if this assumption is not
+  // valid the dumped audio will need to be converted offline accordingly).
+  data_dumper_->DumpWav("ed_render", render_audio.size(), render_audio.data(),
+                        48000, 1);
+
+  if (render_buffer_.Size() == 0) {
+    frames_since_zero_buffer_size_ = 0;
+  } else if (frames_since_zero_buffer_size_ >= kRenderBufferSize) {
+    // This can happen in a few cases: at the start of a call, due to a glitch
+    // or due to clock drift. The excess capture value will be ignored.
+    // TODO(ivoc): Include how often this happens in APM stats.
+    render_buffer_.Pop();
+    frames_since_zero_buffer_size_ = 0;
+  }
+  ++frames_since_zero_buffer_size_;
+  float power = Power(render_audio);
+  render_buffer_.Push(power);
+}
+
+void ResidualEchoDetector::AnalyzeCaptureAudio(
+    rtc::ArrayView<const float> capture_audio) {
+  // Dump debug data assuming 48 kHz sample rate (if this assumption is not
+  // valid the dumped audio will need to be converted offline accordingly).
+  data_dumper_->DumpWav("ed_capture", capture_audio.size(),
+                        capture_audio.data(), 48000, 1);
+
+  if (first_process_call_) {
+    // On the first process call (so the start of a call), we must flush the
+    // render buffer, otherwise the render data will be delayed.
+    render_buffer_.Clear();
+    first_process_call_ = false;
+  }
+
+  // Get the next render value.
+  const rtc::Optional<float> buffered_render_power = render_buffer_.Pop();
+  if (!buffered_render_power) {
+    // This can happen in a few cases: at the start of a call, due to a glitch
+    // or due to clock drift. The excess capture value will be ignored.
+    // TODO(ivoc): Include how often this happens in APM stats.
+    return;
+  }
+  // Update the render statistics, and store the statistics in circular buffers.
+  render_statistics_.Update(*buffered_render_power);
+  RTC_DCHECK_LT(next_insertion_index_, kLookbackFrames);
+  render_power_[next_insertion_index_] = *buffered_render_power;
+  render_power_mean_[next_insertion_index_] = render_statistics_.mean();
+  render_power_std_dev_[next_insertion_index_] =
+      render_statistics_.std_deviation();
+
+  // Get the next capture value, update capture statistics and add the relevant
+  // values to the buffers.
+  const float capture_power = Power(capture_audio);
+  capture_statistics_.Update(capture_power);
+  const float capture_mean = capture_statistics_.mean();
+  const float capture_std_deviation = capture_statistics_.std_deviation();
+
+  // Update the covariance values and determine the new echo likelihood.
+  echo_likelihood_ = 0.f;
+  size_t read_index = next_insertion_index_;
+
+  int best_delay = -1;
+  for (size_t delay = 0; delay < covariances_.size(); ++delay) {
+    RTC_DCHECK_LT(read_index, render_power_.size());
+    covariances_[delay].Update(capture_power, capture_mean,
+                               capture_std_deviation, render_power_[read_index],
+                               render_power_mean_[read_index],
+                               render_power_std_dev_[read_index]);
+    read_index = read_index > 0 ? read_index - 1 : kLookbackFrames - 1;
+
+    if (covariances_[delay].normalized_cross_correlation() > echo_likelihood_) {
+      echo_likelihood_ = covariances_[delay].normalized_cross_correlation();
+      best_delay = static_cast<int>(delay);
+    }
+  }
+  // This is a temporary log message to help find the underlying cause for echo
+  // likelihoods > 1.0.
+  // TODO(ivoc): Remove once the issue is resolved.
+  if (echo_likelihood_ > 1.1f) {
+    // Make sure we don't spam the log.
+    if (log_counter_ < 5 && best_delay != -1) {
+      size_t read_index = kLookbackFrames + next_insertion_index_ - best_delay;
+      if (read_index >= kLookbackFrames) {
+        read_index -= kLookbackFrames;
+      }
+      RTC_DCHECK_LT(read_index, render_power_.size());
+      RTC_LOG_F(LS_ERROR)
+          << "Echo detector internal state: {"
+             "Echo likelihood: " << echo_likelihood_
+          << ", Best Delay: " << best_delay
+          << ", Covariance: " << covariances_[best_delay].covariance()
+          << ", Last capture power: " << capture_power
+          << ", Capture mean: " << capture_mean
+          << ", Capture_standard deviation: " << capture_std_deviation
+          << ", Last render power: " << render_power_[read_index]
+          << ", Render mean: " << render_power_mean_[read_index]
+          << ", Render standard deviation: "
+          << render_power_std_dev_[read_index]
+          << ", Reliability: " << reliability_ << "}";
+      log_counter_++;
+    }
+  }
+  RTC_DCHECK_LT(echo_likelihood_, 1.1f);
+
+  reliability_ = (1.0f - kAlpha) * reliability_ + kAlpha * 1.0f;
+  echo_likelihood_ *= reliability_;
+  // This is a temporary fix to prevent echo likelihood values > 1.0.
+  // TODO(ivoc): Find the root cause of this issue and fix it.
+  echo_likelihood_ = std::min(echo_likelihood_, 1.0f);
+  int echo_percentage = static_cast<int>(echo_likelihood_ * 100);
+  RTC_HISTOGRAM_COUNTS("WebRTC.Audio.ResidualEchoDetector.EchoLikelihood",
+                       echo_percentage, 0, 100, 100 /* number of bins */);
+
+  // Update the buffer of recent likelihood values.
+  recent_likelihood_max_.Update(echo_likelihood_);
+
+  // Update the next insertion index.
+  next_insertion_index_ = next_insertion_index_ < (kLookbackFrames - 1)
+                              ? next_insertion_index_ + 1
+                              : 0;
+}
+
+void ResidualEchoDetector::Initialize(int /*capture_sample_rate_hz*/,
+                                      int /*num_capture_channels*/,
+                                      int /*render_sample_rate_hz*/,
+                                      int /*num_render_channels*/) {
+  render_buffer_.Clear();
+  std::fill(render_power_.begin(), render_power_.end(), 0.f);
+  std::fill(render_power_mean_.begin(), render_power_mean_.end(), 0.f);
+  std::fill(render_power_std_dev_.begin(), render_power_std_dev_.end(), 0.f);
+  render_statistics_.Clear();
+  capture_statistics_.Clear();
+  recent_likelihood_max_.Clear();
+  for (auto& cov : covariances_) {
+    cov.Clear();
+  }
+  echo_likelihood_ = 0.f;
+  next_insertion_index_ = 0;
+  reliability_ = 0.f;
+}
+
+void EchoDetector::PackRenderAudioBuffer(AudioBuffer* audio,
+                                         std::vector<float>* packed_buffer) {
+  packed_buffer->clear();
+  packed_buffer->insert(packed_buffer->end(), audio->channels_f()[0],
+                        audio->channels_f()[0] + audio->num_frames());
+}
+
+EchoDetector::Metrics ResidualEchoDetector::GetMetrics() const {
+  EchoDetector::Metrics metrics;
+  metrics.echo_likelihood = echo_likelihood_;
+  metrics.echo_likelihood_recent_max = recent_likelihood_max_.max();
+  return metrics;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/residual_echo_detector.h b/modules/audio_processing/residual_echo_detector.h
new file mode 100644
index 0000000..5d18ecb
--- /dev/null
+++ b/modules/audio_processing/residual_echo_detector.h
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_RESIDUAL_ECHO_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_RESIDUAL_ECHO_DETECTOR_H_
+
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/echo_detector/circular_buffer.h"
+#include "modules/audio_processing/echo_detector/mean_variance_estimator.h"
+#include "modules/audio_processing/echo_detector/moving_max.h"
+#include "modules/audio_processing/echo_detector/normalized_covariance_estimator.h"
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+class ApmDataDumper;
+class AudioBuffer;
+
+class ResidualEchoDetector : public EchoDetector {
+ public:
+  ResidualEchoDetector();
+  ~ResidualEchoDetector() override;
+
+  // This function should be called while holding the render lock.
+  void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) override;
+
+  // This function should be called while holding the capture lock.
+  void AnalyzeCaptureAudio(rtc::ArrayView<const float> capture_audio) override;
+
+  // This function should be called while holding the capture lock.
+  void Initialize(int capture_sample_rate_hz,
+                  int num_capture_channels,
+                  int render_sample_rate_hz,
+                  int num_render_channels) override;
+
+  // This function is for testing purposes only.
+  void SetReliabilityForTest(float value) { reliability_ = value; }
+
+  // This function should be called while holding the capture lock.
+  EchoDetector::Metrics GetMetrics() const override;
+
+ private:
+  static int instance_count_;
+  std::unique_ptr<ApmDataDumper> data_dumper_;
+  // Keep track if the |Process| function has been previously called.
+  bool first_process_call_ = true;
+  // Buffer for storing the power of incoming farend buffers. This is needed for
+  // cases where calls to BufferFarend and Process are jittery.
+  CircularBuffer render_buffer_;
+  // Count how long ago it was that the size of |render_buffer_| was zero. This
+  // value is also reset to zero when clock drift is detected and a value from
+  // the renderbuffer is discarded, even though the buffer is not actually zero
+  // at that point. This is done to avoid repeatedly removing elements in this
+  // situation.
+  size_t frames_since_zero_buffer_size_ = 0;
+
+  // Circular buffers containing delayed versions of the power, mean and
+  // standard deviation, for calculating the delayed covariance values.
+  std::vector<float> render_power_;
+  std::vector<float> render_power_mean_;
+  std::vector<float> render_power_std_dev_;
+  // Covariance estimates for different delay values.
+  std::vector<NormalizedCovarianceEstimator> covariances_;
+  // Index where next element should be inserted in all of the above circular
+  // buffers.
+  size_t next_insertion_index_ = 0;
+
+  MeanVarianceEstimator render_statistics_;
+  MeanVarianceEstimator capture_statistics_;
+  // Current echo likelihood.
+  float echo_likelihood_ = 0.f;
+  // Reliability of the current likelihood.
+  float reliability_ = 0.f;
+  MovingMax recent_likelihood_max_;
+
+  int log_counter_ = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_RESIDUAL_ECHO_DETECTOR_H_
diff --git a/modules/audio_processing/residual_echo_detector_unittest.cc b/modules/audio_processing/residual_echo_detector_unittest.cc
new file mode 100644
index 0000000..7bfa0d2
--- /dev/null
+++ b/modules/audio_processing/residual_echo_detector_unittest.cc
@@ -0,0 +1,132 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <vector>
+
+#include "modules/audio_processing/residual_echo_detector.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ResidualEchoDetectorTests, Echo) {
+  ResidualEchoDetector echo_detector;
+  echo_detector.SetReliabilityForTest(1.0f);
+  std::vector<float> ones(160, 1.f);
+  std::vector<float> zeros(160, 0.f);
+
+  // In this test the capture signal has a delay of 10 frames w.r.t. the render
+  // signal, but is otherwise identical. Both signals are periodic with a 20
+  // frame interval.
+  for (int i = 0; i < 1000; i++) {
+    if (i % 20 == 0) {
+      echo_detector.AnalyzeRenderAudio(ones);
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    } else if (i % 20 == 10) {
+      echo_detector.AnalyzeRenderAudio(zeros);
+      echo_detector.AnalyzeCaptureAudio(ones);
+    } else {
+      echo_detector.AnalyzeRenderAudio(zeros);
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    }
+  }
+  // We expect to detect echo with near certain likelihood.
+  auto ed_metrics = echo_detector.GetMetrics();
+  EXPECT_NEAR(1.f, ed_metrics.echo_likelihood, 0.01f);
+}
+
+TEST(ResidualEchoDetectorTests, NoEcho) {
+  ResidualEchoDetector echo_detector;
+  echo_detector.SetReliabilityForTest(1.0f);
+  std::vector<float> ones(160, 1.f);
+  std::vector<float> zeros(160, 0.f);
+
+  // In this test the capture signal is always zero, so no echo should be
+  // detected.
+  for (int i = 0; i < 1000; i++) {
+    if (i % 20 == 0) {
+      echo_detector.AnalyzeRenderAudio(ones);
+    } else {
+      echo_detector.AnalyzeRenderAudio(zeros);
+    }
+    echo_detector.AnalyzeCaptureAudio(zeros);
+  }
+  // We expect to not detect any echo.
+  auto ed_metrics = echo_detector.GetMetrics();
+  EXPECT_NEAR(0.f, ed_metrics.echo_likelihood, 0.01f);
+}
+
+TEST(ResidualEchoDetectorTests, EchoWithRenderClockDrift) {
+  ResidualEchoDetector echo_detector;
+  echo_detector.SetReliabilityForTest(1.0f);
+  std::vector<float> ones(160, 1.f);
+  std::vector<float> zeros(160, 0.f);
+
+  // In this test the capture signal has a delay of 10 frames w.r.t. the render
+  // signal, but is otherwise identical. Both signals are periodic with a 20
+  // frame interval. There is a simulated clock drift of 1% in this test, with
+  // the render side producing data slightly faster.
+  for (int i = 0; i < 1000; i++) {
+    if (i % 20 == 0) {
+      echo_detector.AnalyzeRenderAudio(ones);
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    } else if (i % 20 == 10) {
+      echo_detector.AnalyzeRenderAudio(zeros);
+      echo_detector.AnalyzeCaptureAudio(ones);
+    } else {
+      echo_detector.AnalyzeRenderAudio(zeros);
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    }
+    if (i % 100 == 0) {
+      // This is causing the simulated clock drift.
+      echo_detector.AnalyzeRenderAudio(zeros);
+    }
+  }
+  // We expect to detect echo with high likelihood. Clock drift is harder to
+  // correct on the render side than on the capture side. This is due to the
+  // render buffer, clock drift can only be discovered after a certain delay.
+  // A growing buffer can be caused by jitter or clock drift and it's not
+  // possible to make this decision right away. For this reason we only expect
+  // an echo likelihood of 75% in this test.
+  auto ed_metrics = echo_detector.GetMetrics();
+  EXPECT_GT(ed_metrics.echo_likelihood, 0.75f);
+}
+
+TEST(ResidualEchoDetectorTests, EchoWithCaptureClockDrift) {
+  ResidualEchoDetector echo_detector;
+  echo_detector.SetReliabilityForTest(1.0f);
+  std::vector<float> ones(160, 1.f);
+  std::vector<float> zeros(160, 0.f);
+
+  // In this test the capture signal has a delay of 10 frames w.r.t. the render
+  // signal, but is otherwise identical. Both signals are periodic with a 20
+  // frame interval. There is a simulated clock drift of 1% in this test, with
+  // the capture side producing data slightly faster.
+  for (int i = 0; i < 1000; i++) {
+    if (i % 20 == 0) {
+      echo_detector.AnalyzeRenderAudio(ones);
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    } else if (i % 20 == 10) {
+      echo_detector.AnalyzeRenderAudio(zeros);
+      echo_detector.AnalyzeCaptureAudio(ones);
+    } else {
+      echo_detector.AnalyzeRenderAudio(zeros);
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    }
+    if (i % 100 == 0) {
+      // This is causing the simulated clock drift.
+      echo_detector.AnalyzeCaptureAudio(zeros);
+    }
+  }
+  // We expect to detect echo with near certain likelihood.
+  auto ed_metrics = echo_detector.GetMetrics();
+  EXPECT_NEAR(1.f, ed_metrics.echo_likelihood, 0.01f);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/rms_level.cc b/modules/audio_processing/rms_level.cc
new file mode 100644
index 0000000..55db226
--- /dev/null
+++ b/modules/audio_processing/rms_level.cc
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/rms_level.h"
+
+#include <math.h>
+#include <algorithm>
+#include <numeric>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+static constexpr float kMaxSquaredLevel = 32768 * 32768;
+// kMinLevel is the level corresponding to kMinLevelDb, that is 10^(-127/10).
+static constexpr float kMinLevel = 1.995262314968883e-13f;
+
+// Calculates the normalized RMS value from a mean square value. The input
+// should be the sum of squared samples divided by the number of samples. The
+// value will be normalized to full range before computing the RMS, wich is
+// returned as a negated dBfs. That is, 0 is full amplitude while 127 is very
+// faint.
+int ComputeRms(float mean_square) {
+  if (mean_square <= kMinLevel * kMaxSquaredLevel) {
+    // Very faint; simply return the minimum value.
+    return RmsLevel::kMinLevelDb;
+  }
+  // Normalize by the max level.
+  const float mean_square_norm = mean_square / kMaxSquaredLevel;
+  RTC_DCHECK_GT(mean_square_norm, kMinLevel);
+  // 20log_10(x^0.5) = 10log_10(x)
+  const float rms = 10.f * log10(mean_square_norm);
+  RTC_DCHECK_LE(rms, 0.f);
+  RTC_DCHECK_GT(rms, -RmsLevel::kMinLevelDb);
+  // Return the negated value.
+  return static_cast<int>(-rms + 0.5f);
+}
+}  // namespace
+
+RmsLevel::RmsLevel() {
+  Reset();
+}
+
+RmsLevel::~RmsLevel() = default;
+
+void RmsLevel::Reset() {
+  sum_square_ = 0.f;
+  sample_count_ = 0;
+  max_sum_square_ = 0.f;
+  block_size_ = rtc::nullopt;
+}
+
+void RmsLevel::Analyze(rtc::ArrayView<const int16_t> data) {
+  if (data.empty()) {
+    return;
+  }
+
+  CheckBlockSize(data.size());
+
+  const float sum_square =
+      std::accumulate(data.begin(), data.end(), 0.f,
+                      [](float a, int16_t b) { return a + b * b; });
+  RTC_DCHECK_GE(sum_square, 0.f);
+  sum_square_ += sum_square;
+  sample_count_ += data.size();
+
+  max_sum_square_ = std::max(max_sum_square_, sum_square);
+}
+
+void RmsLevel::AnalyzeMuted(size_t length) {
+  CheckBlockSize(length);
+  sample_count_ += length;
+}
+
+int RmsLevel::Average() {
+  int rms = (sample_count_ == 0) ? RmsLevel::kMinLevelDb
+                                 : ComputeRms(sum_square_ / sample_count_);
+  Reset();
+  return rms;
+}
+
+RmsLevel::Levels RmsLevel::AverageAndPeak() {
+  // Note that block_size_ should by design always be non-empty when
+  // sample_count_ != 0. Also, the * operator of rtc::Optional enforces this
+  // with a DCHECK.
+  Levels levels = (sample_count_ == 0)
+                      ? Levels{RmsLevel::kMinLevelDb, RmsLevel::kMinLevelDb}
+                      : Levels{ComputeRms(sum_square_ / sample_count_),
+                               ComputeRms(max_sum_square_ / *block_size_)};
+  Reset();
+  return levels;
+}
+
+void RmsLevel::CheckBlockSize(size_t block_size) {
+  if (block_size_ != block_size) {
+    Reset();
+    block_size_ = block_size;
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/rms_level.h b/modules/audio_processing/rms_level.h
new file mode 100644
index 0000000..6fe22fd
--- /dev/null
+++ b/modules/audio_processing/rms_level.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+#define MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
+#include "api/array_view.h"
+#include "api/optional.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Computes the root mean square (RMS) level in dBFs (decibels from digital
+// full-scale) of audio data. The computation follows RFC 6465:
+// https://tools.ietf.org/html/rfc6465
+// with the intent that it can provide the RTP audio level indication.
+//
+// The expected approach is to provide constant-sized chunks of audio to
+// Analyze(). When enough chunks have been accumulated to form a packet, call
+// Average() to get the audio level indicator for the RTP header.
+class RmsLevel {
+ public:
+  struct Levels {
+    int average;
+    int peak;
+  };
+
+  static constexpr int kMinLevelDb = 127;
+
+  RmsLevel();
+  ~RmsLevel();
+
+  // Can be called to reset internal states, but is not required during normal
+  // operation.
+  void Reset();
+
+  // Pass each chunk of audio to Analyze() to accumulate the level.
+  void Analyze(rtc::ArrayView<const int16_t> data);
+
+  // If all samples with the given |length| have a magnitude of zero, this is
+  // a shortcut to avoid some computation.
+  void AnalyzeMuted(size_t length);
+
+  // Computes the RMS level over all data passed to Analyze() since the last
+  // call to Average(). The returned value is positive but should be interpreted
+  // as negative as per the RFC. It is constrained to [0, 127]. Resets the
+  // internal state to start a new measurement period.
+  int Average();
+
+  // Like Average() above, but also returns the RMS peak value. Resets the
+  // internal state to start a new measurement period.
+  Levels AverageAndPeak();
+
+ private:
+  // Compares |block_size| with |block_size_|. If they are different, calls
+  // Reset() and stores the new size.
+  void CheckBlockSize(size_t block_size);
+
+  float sum_square_;
+  size_t sample_count_;
+  float max_sum_square_;
+  rtc::Optional<size_t> block_size_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
diff --git a/modules/audio_processing/rms_level_unittest.cc b/modules/audio_processing/rms_level_unittest.cc
new file mode 100644
index 0000000..cf7683d
--- /dev/null
+++ b/modules/audio_processing/rms_level_unittest.cc
@@ -0,0 +1,150 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <cmath>
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/rms_level.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/mathutils.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+constexpr int kSampleRateHz = 48000;
+constexpr size_t kBlockSizeSamples = kSampleRateHz / 100;
+
+std::unique_ptr<RmsLevel> RunTest(rtc::ArrayView<const int16_t> input) {
+  std::unique_ptr<RmsLevel> level(new RmsLevel);
+  for (size_t n = 0; n + kBlockSizeSamples <= input.size();
+       n += kBlockSizeSamples) {
+    level->Analyze(input.subview(n, kBlockSizeSamples));
+  }
+  return level;
+}
+
+std::vector<int16_t> CreateSinusoid(int frequency_hz,
+                                    int amplitude,
+                                    size_t num_samples) {
+  std::vector<int16_t> x(num_samples);
+  for (size_t n = 0; n < num_samples; ++n) {
+    x[n] = rtc::saturated_cast<int16_t>(
+        amplitude * std::sin(2 * M_PI * n * frequency_hz / kSampleRateHz));
+  }
+  return x;
+}
+}  // namespace
+
+TEST(RmsLevelTest, Run1000HzFullScale) {
+  auto x = CreateSinusoid(1000, INT16_MAX, kSampleRateHz);
+  auto level = RunTest(x);
+  EXPECT_EQ(3, level->Average());  // -3 dBFS
+}
+
+TEST(RmsLevelTest, Run1000HzFullScaleAverageAndPeak) {
+  auto x = CreateSinusoid(1000, INT16_MAX, kSampleRateHz);
+  auto level = RunTest(x);
+  auto stats = level->AverageAndPeak();
+  EXPECT_EQ(3, stats.average);  // -3 dBFS
+  EXPECT_EQ(3, stats.peak);
+}
+
+TEST(RmsLevelTest, Run1000HzHalfScale) {
+  auto x = CreateSinusoid(1000, INT16_MAX / 2, kSampleRateHz);
+  auto level = RunTest(x);
+  EXPECT_EQ(9, level->Average());  // -9 dBFS
+}
+
+TEST(RmsLevelTest, RunZeros) {
+  std::vector<int16_t> x(kSampleRateHz, 0);  // 1 second of pure silence.
+  auto level = RunTest(x);
+  EXPECT_EQ(127, level->Average());
+}
+
+TEST(RmsLevelTest, RunZerosAverageAndPeak) {
+  std::vector<int16_t> x(kSampleRateHz, 0);  // 1 second of pure silence.
+  auto level = RunTest(x);
+  auto stats = level->AverageAndPeak();
+  EXPECT_EQ(127, stats.average);
+  EXPECT_EQ(127, stats.peak);
+}
+
+TEST(RmsLevelTest, NoSamples) {
+  RmsLevel level;
+  EXPECT_EQ(127, level.Average());  // Return minimum if no samples are given.
+}
+
+TEST(RmsLevelTest, NoSamplesAverageAndPeak) {
+  RmsLevel level;
+  auto stats = level.AverageAndPeak();
+  EXPECT_EQ(127, stats.average);
+  EXPECT_EQ(127, stats.peak);
+}
+
+TEST(RmsLevelTest, PollTwice) {
+  auto x = CreateSinusoid(1000, INT16_MAX, kSampleRateHz);
+  auto level = RunTest(x);
+  level->Average();
+  EXPECT_EQ(127, level->Average());  // Stats should be reset at this point.
+}
+
+TEST(RmsLevelTest, Reset) {
+  auto x = CreateSinusoid(1000, INT16_MAX, kSampleRateHz);
+  auto level = RunTest(x);
+  level->Reset();
+  EXPECT_EQ(127, level->Average());  // Stats should be reset at this point.
+}
+
+// Inserts 1 second of full-scale sinusoid, followed by 1 second of muted.
+TEST(RmsLevelTest, ProcessMuted) {
+  auto x = CreateSinusoid(1000, INT16_MAX, kSampleRateHz);
+  auto level = RunTest(x);
+  const size_t kBlocksPerSecond = rtc::CheckedDivExact(
+      static_cast<size_t>(kSampleRateHz), kBlockSizeSamples);
+  for (size_t i = 0; i < kBlocksPerSecond; ++i) {
+    level->AnalyzeMuted(kBlockSizeSamples);
+  }
+  EXPECT_EQ(6, level->Average());  // Average RMS halved due to the silence.
+}
+
+// Inserts 1 second of half-scale sinusoid, follwed by 10 ms of full-scale, and
+// finally 1 second of half-scale again. Expect the average to be -9 dBFS due
+// to the vast majority of the signal being half-scale, and the peak to be
+// -3 dBFS.
+TEST(RmsLevelTest, RunHalfScaleAndInsertFullScale) {
+  auto half_scale = CreateSinusoid(1000, INT16_MAX / 2, kSampleRateHz);
+  auto full_scale = CreateSinusoid(1000, INT16_MAX, kSampleRateHz / 100);
+  auto x = half_scale;
+  x.insert(x.end(), full_scale.begin(), full_scale.end());
+  x.insert(x.end(), half_scale.begin(), half_scale.end());
+  ASSERT_EQ(static_cast<size_t>(2 * kSampleRateHz + kSampleRateHz / 100),
+            x.size());
+  auto level = RunTest(x);
+  auto stats = level->AverageAndPeak();
+  EXPECT_EQ(9, stats.average);
+  EXPECT_EQ(3, stats.peak);
+}
+
+TEST(RmsLevelTest, ResetOnBlockSizeChange) {
+  auto x = CreateSinusoid(1000, INT16_MAX, kSampleRateHz);
+  auto level = RunTest(x);
+  // Create a new signal with half amplitude, but double block length.
+  auto y = CreateSinusoid(1000, INT16_MAX / 2, kBlockSizeSamples * 2);
+  level->Analyze(y);
+  auto stats = level->AverageAndPeak();
+  // Expect all stats to only be influenced by the last signal (y), since the
+  // changed block size should reset the stats.
+  EXPECT_EQ(9, stats.average);
+  EXPECT_EQ(9, stats.peak);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/splitting_filter.cc b/modules/audio_processing/splitting_filter.cc
new file mode 100644
index 0000000..e2b8f82
--- /dev/null
+++ b/modules/audio_processing/splitting_filter.cc
@@ -0,0 +1,108 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/splitting_filter.h"
+
+#include "common_audio/channel_buffer.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+SplittingFilter::SplittingFilter(size_t num_channels,
+                                 size_t num_bands,
+                                 size_t num_frames)
+    : num_bands_(num_bands) {
+  RTC_CHECK(num_bands_ == 2 || num_bands_ == 3);
+  if (num_bands_ == 2) {
+    two_bands_states_.resize(num_channels);
+  } else if (num_bands_ == 3) {
+    for (size_t i = 0; i < num_channels; ++i) {
+      three_band_filter_banks_.push_back(std::unique_ptr<ThreeBandFilterBank>(
+          new ThreeBandFilterBank(num_frames)));
+    }
+  }
+}
+
+SplittingFilter::~SplittingFilter() = default;
+
+void SplittingFilter::Analysis(const IFChannelBuffer* data,
+                               IFChannelBuffer* bands) {
+  RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+  RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+  RTC_DCHECK_EQ(data->num_frames(),
+                bands->num_frames_per_band() * bands->num_bands());
+  if (bands->num_bands() == 2) {
+    TwoBandsAnalysis(data, bands);
+  } else if (bands->num_bands() == 3) {
+    ThreeBandsAnalysis(data, bands);
+  }
+}
+
+void SplittingFilter::Synthesis(const IFChannelBuffer* bands,
+                                IFChannelBuffer* data) {
+  RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+  RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+  RTC_DCHECK_EQ(data->num_frames(),
+                bands->num_frames_per_band() * bands->num_bands());
+  if (bands->num_bands() == 2) {
+    TwoBandsSynthesis(bands, data);
+  } else if (bands->num_bands() == 3) {
+    ThreeBandsSynthesis(bands, data);
+  }
+}
+
+void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
+                                       IFChannelBuffer* bands) {
+  RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
+  for (size_t i = 0; i < two_bands_states_.size(); ++i) {
+    WebRtcSpl_AnalysisQMF(data->ibuf_const()->channels()[i],
+                          data->num_frames(),
+                          bands->ibuf()->channels(0)[i],
+                          bands->ibuf()->channels(1)[i],
+                          two_bands_states_[i].analysis_state1,
+                          two_bands_states_[i].analysis_state2);
+  }
+}
+
+void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
+                                        IFChannelBuffer* data) {
+  RTC_DCHECK_LE(data->num_channels(), two_bands_states_.size());
+  for (size_t i = 0; i < data->num_channels(); ++i) {
+    WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i],
+                           bands->ibuf_const()->channels(1)[i],
+                           bands->num_frames_per_band(),
+                           data->ibuf()->channels()[i],
+                           two_bands_states_[i].synthesis_state1,
+                           two_bands_states_[i].synthesis_state2);
+  }
+}
+
+void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
+                                         IFChannelBuffer* bands) {
+  RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
+  for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
+    three_band_filter_banks_[i]->Analysis(data->fbuf_const()->channels()[i],
+                                          data->num_frames(),
+                                          bands->fbuf()->bands(i));
+  }
+}
+
+void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands,
+                                          IFChannelBuffer* data) {
+  RTC_DCHECK_LE(data->num_channels(), three_band_filter_banks_.size());
+  for (size_t i = 0; i < data->num_channels(); ++i) {
+    three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i),
+                                           bands->num_frames_per_band(),
+                                           data->fbuf()->channels()[i]);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/splitting_filter.h b/modules/audio_processing/splitting_filter.h
new file mode 100644
index 0000000..7d60c82
--- /dev/null
+++ b/modules/audio_processing/splitting_filter.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_
+
+#include <cstring>
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/three_band_filter_bank.h"
+
+namespace webrtc {
+
+class IFChannelBuffer;
+
+struct TwoBandsStates {
+  TwoBandsStates() {
+    memset(analysis_state1, 0, sizeof(analysis_state1));
+    memset(analysis_state2, 0, sizeof(analysis_state2));
+    memset(synthesis_state1, 0, sizeof(synthesis_state1));
+    memset(synthesis_state2, 0, sizeof(synthesis_state2));
+  }
+
+  static const int kStateSize = 6;
+  int analysis_state1[kStateSize];
+  int analysis_state2[kStateSize];
+  int synthesis_state1[kStateSize];
+  int synthesis_state2[kStateSize];
+};
+
+// Splitting filter which is able to split into and merge from 2 or 3 frequency
+// bands. The number of channels needs to be provided at construction time.
+//
+// For each block, Analysis() is called to split into bands and then Synthesis()
+// to merge these bands again. The input and output signals are contained in
+// IFChannelBuffers and for the different bands an array of IFChannelBuffers is
+// used.
+class SplittingFilter {
+ public:
+  SplittingFilter(size_t num_channels, size_t num_bands, size_t num_frames);
+  ~SplittingFilter();
+
+  void Analysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
+  void Synthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
+
+ private:
+  // Two-band analysis and synthesis work for 640 samples or less.
+  void TwoBandsAnalysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
+  void TwoBandsSynthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
+  void ThreeBandsAnalysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
+  void ThreeBandsSynthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
+  void InitBuffers();
+
+  const size_t num_bands_;
+  std::vector<TwoBandsStates> two_bands_states_;
+  std::vector<std::unique_ptr<ThreeBandFilterBank>> three_band_filter_banks_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_SPLITTING_FILTER_H_
diff --git a/modules/audio_processing/splitting_filter_unittest.cc b/modules/audio_processing/splitting_filter_unittest.cc
new file mode 100644
index 0000000..3e0dbb9
--- /dev/null
+++ b/modules/audio_processing/splitting_filter_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include <cmath>
+
+#include "common_audio/channel_buffer.h"
+#include "modules/audio_processing/splitting_filter.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer48kHzChannel = 480;
+
+}  // namespace
+
+// Generates a signal from presence or absence of sine waves of different
+// frequencies.
+// Splits into 3 bands and checks their presence or absence.
+// Recombines the bands.
+// Calculates the delay.
+// Checks that the cross correlation of input and output is high enough at the
+// calculated delay.
+TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
+  static const int kChannels = 1;
+  static const int kSampleRateHz = 48000;
+  static const size_t kNumBands = 3;
+  static const int kFrequenciesHz[kNumBands] = {1000, 12000, 18000};
+  static const float kAmplitude = 8192.f;
+  static const size_t kChunks = 8;
+  SplittingFilter splitting_filter(kChannels,
+                                   kNumBands,
+                                   kSamplesPer48kHzChannel);
+  IFChannelBuffer in_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
+  IFChannelBuffer bands(kSamplesPer48kHzChannel, kChannels, kNumBands);
+  IFChannelBuffer out_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
+  for (size_t i = 0; i < kChunks; ++i) {
+    // Input signal generation.
+    bool is_present[kNumBands];
+    memset(in_data.fbuf()->channels()[0],
+           0,
+           kSamplesPer48kHzChannel * sizeof(in_data.fbuf()->channels()[0][0]));
+    for (size_t j = 0; j < kNumBands; ++j) {
+      is_present[j] = i & (static_cast<size_t>(1) << j);
+      float amplitude = is_present[j] ? kAmplitude : 0.f;
+      for (size_t k = 0; k < kSamplesPer48kHzChannel; ++k) {
+        in_data.fbuf()->channels()[0][k] +=
+            amplitude * sin(2.f * M_PI * kFrequenciesHz[j] *
+                (i * kSamplesPer48kHzChannel + k) / kSampleRateHz);
+      }
+    }
+    // Three band splitting filter.
+    splitting_filter.Analysis(&in_data, &bands);
+    // Energy calculation.
+    float energy[kNumBands];
+    for (size_t j = 0; j < kNumBands; ++j) {
+      energy[j] = 0.f;
+      for (size_t k = 0; k < kSamplesPer16kHzChannel; ++k) {
+        energy[j] += bands.fbuf_const()->channels(j)[0][k] *
+                     bands.fbuf_const()->channels(j)[0][k];
+      }
+      energy[j] /= kSamplesPer16kHzChannel;
+      if (is_present[j]) {
+        EXPECT_GT(energy[j], kAmplitude * kAmplitude / 4);
+      } else {
+        EXPECT_LT(energy[j], kAmplitude * kAmplitude / 4);
+      }
+    }
+    // Three band merge.
+    splitting_filter.Synthesis(&bands, &out_data);
+    // Delay and cross correlation estimation.
+    float xcorr = 0.f;
+    for (size_t delay = 0; delay < kSamplesPer48kHzChannel; ++delay) {
+      float tmpcorr = 0.f;
+      for (size_t j = delay; j < kSamplesPer48kHzChannel; ++j) {
+        tmpcorr += in_data.fbuf_const()->channels()[0][j - delay] *
+                   out_data.fbuf_const()->channels()[0][j];
+      }
+      tmpcorr /= kSamplesPer48kHzChannel;
+      if (tmpcorr > xcorr) {
+        xcorr = tmpcorr;
+      }
+    }
+    // High cross correlation check.
+    bool any_present = false;
+    for (size_t j = 0; j < kNumBands; ++j) {
+      any_present |= is_present[j];
+    }
+    if (any_present) {
+      EXPECT_GT(xcorr, kAmplitude * kAmplitude / 4);
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/three_band_filter_bank.cc b/modules/audio_processing/three_band_filter_bank.cc
new file mode 100644
index 0000000..f5a319b
--- /dev/null
+++ b/modules/audio_processing/three_band_filter_bank.cc
@@ -0,0 +1,216 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to
+// the proposed in "Multirate Signal Processing for Communication Systems" by
+// Fredric J Harris.
+//
+// The idea is to take a heterodyne system and change the order of the
+// components to get something which is efficient to implement digitally.
+//
+// It is possible to separate the filter using the noble identity as follows:
+//
+// H(z) = H0(z^3) + z^-1 * H1(z^3) + z^-2 * H2(z^3)
+//
+// This is used in the analysis stage to first downsample serial to parallel
+// and then filter each branch with one of these polyphase decompositions of the
+// lowpass prototype. Because each filter is only a modulation of the prototype,
+// it is enough to multiply each coefficient by the respective cosine value to
+// shift it to the desired band. But because the cosine period is 12 samples,
+// it requires separating the prototype even further using the noble identity.
+// After filtering and modulating for each band, the output of all filters is
+// accumulated to get the downsampled bands.
+//
+// A similar logic can be applied to the synthesis stage.
+
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+
+#include "modules/audio_processing/three_band_filter_bank.h"
+
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const size_t kNumBands = 3;
+const size_t kSparsity = 4;
+
+// Factors to take into account when choosing |kNumCoeffs|:
+//   1. Higher |kNumCoeffs|, means faster transition, which ensures less
+//      aliasing. This is especially important when there is non-linear
+//      processing between the splitting and merging.
+//   2. The delay that this filter bank introduces is
+//      |kNumBands| * |kSparsity| * |kNumCoeffs| / 2, so it increases linearly
+//      with |kNumCoeffs|.
+//   3. The computation complexity also increases linearly with |kNumCoeffs|.
+const size_t kNumCoeffs = 4;
+
+// The Matlab code to generate these |kLowpassCoeffs| is:
+//
+// N = kNumBands * kSparsity * kNumCoeffs - 1;
+// h = fir1(N, 1 / (2 * kNumBands), kaiser(N + 1, 3.5));
+// reshape(h, kNumBands * kSparsity, kNumCoeffs);
+//
+// Because the total bandwidth of the lower and higher band is double the middle
+// one (because of the spectrum parity), the low-pass prototype is half the
+// bandwidth of 1 / (2 * |kNumBands|) and is then shifted with cosine modulation
+// to the right places.
+// A Kaiser window is used because of its flexibility and the alpha is set to
+// 3.5, since that sets a stop band attenuation of 40dB ensuring a fast
+// transition.
+const float kLowpassCoeffs[kNumBands * kSparsity][kNumCoeffs] =
+    {{-0.00047749f, -0.00496888f, +0.16547118f, +0.00425496f},
+     {-0.00173287f, -0.01585778f, +0.14989004f, +0.00994113f},
+     {-0.00304815f, -0.02536082f, +0.12154542f, +0.01157993f},
+     {-0.00383509f, -0.02982767f, +0.08543175f, +0.00983212f},
+     {-0.00346946f, -0.02587886f, +0.04760441f, +0.00607594f},
+     {-0.00154717f, -0.01136076f, +0.01387458f, +0.00186353f},
+     {+0.00186353f, +0.01387458f, -0.01136076f, -0.00154717f},
+     {+0.00607594f, +0.04760441f, -0.02587886f, -0.00346946f},
+     {+0.00983212f, +0.08543175f, -0.02982767f, -0.00383509f},
+     {+0.01157993f, +0.12154542f, -0.02536082f, -0.00304815f},
+     {+0.00994113f, +0.14989004f, -0.01585778f, -0.00173287f},
+     {+0.00425496f, +0.16547118f, -0.00496888f, -0.00047749f}};
+
+// Downsamples |in| into |out|, taking one every |kNumbands| starting from
+// |offset|. |split_length| is the |out| length. |in| has to be at least
+// |kNumBands| * |split_length| long.
+void Downsample(const float* in,
+                size_t split_length,
+                size_t offset,
+                float* out) {
+  for (size_t i = 0; i < split_length; ++i) {
+    out[i] = in[kNumBands * i + offset];
+  }
+}
+
+// Upsamples |in| into |out|, scaling by |kNumBands| and accumulating it every
+// |kNumBands| starting from |offset|. |split_length| is the |in| length. |out|
+// has to be at least |kNumBands| * |split_length| long.
+void Upsample(const float* in, size_t split_length, size_t offset, float* out) {
+  for (size_t i = 0; i < split_length; ++i) {
+    out[kNumBands * i + offset] += kNumBands * in[i];
+  }
+}
+
+}  // namespace
+
+// Because the low-pass filter prototype has half bandwidth it is possible to
+// use a DCT to shift it in both directions at the same time, to the center
+// frequencies [1 / 12, 3 / 12, 5 / 12].
+ThreeBandFilterBank::ThreeBandFilterBank(size_t length)
+    : in_buffer_(rtc::CheckedDivExact(length, kNumBands)),
+      out_buffer_(in_buffer_.size()) {
+  for (size_t i = 0; i < kSparsity; ++i) {
+    for (size_t j = 0; j < kNumBands; ++j) {
+      analysis_filters_.push_back(
+          std::unique_ptr<SparseFIRFilter>(new SparseFIRFilter(
+              kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i)));
+      synthesis_filters_.push_back(
+          std::unique_ptr<SparseFIRFilter>(new SparseFIRFilter(
+              kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i)));
+    }
+  }
+  dct_modulation_.resize(kNumBands * kSparsity);
+  for (size_t i = 0; i < dct_modulation_.size(); ++i) {
+    dct_modulation_[i].resize(kNumBands);
+    for (size_t j = 0; j < kNumBands; ++j) {
+      dct_modulation_[i][j] =
+          2.f * cos(2.f * M_PI * i * (2.f * j + 1.f) / dct_modulation_.size());
+    }
+  }
+}
+
+ThreeBandFilterBank::~ThreeBandFilterBank() = default;
+
+// The analysis can be separated in these steps:
+//   1. Serial to parallel downsampling by a factor of |kNumBands|.
+//   2. Filtering of |kSparsity| different delayed signals with polyphase
+//      decomposition of the low-pass prototype filter and upsampled by a factor
+//      of |kSparsity|.
+//   3. Modulating with cosines and accumulating to get the desired band.
+void ThreeBandFilterBank::Analysis(const float* in,
+                                   size_t length,
+                                   float* const* out) {
+  RTC_CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands));
+  for (size_t i = 0; i < kNumBands; ++i) {
+    memset(out[i], 0, in_buffer_.size() * sizeof(*out[i]));
+  }
+  for (size_t i = 0; i < kNumBands; ++i) {
+    Downsample(in, in_buffer_.size(), kNumBands - i - 1, &in_buffer_[0]);
+    for (size_t j = 0; j < kSparsity; ++j) {
+      const size_t offset = i + j * kNumBands;
+      analysis_filters_[offset]->Filter(&in_buffer_[0],
+                                        in_buffer_.size(),
+                                        &out_buffer_[0]);
+      DownModulate(&out_buffer_[0], out_buffer_.size(), offset, out);
+    }
+  }
+}
+
+// The synthesis can be separated in these steps:
+//   1. Modulating with cosines.
+//   2. Filtering each one with a polyphase decomposition of the low-pass
+//      prototype filter upsampled by a factor of |kSparsity| and accumulating
+//      |kSparsity| signals with different delays.
+//   3. Parallel to serial upsampling by a factor of |kNumBands|.
+void ThreeBandFilterBank::Synthesis(const float* const* in,
+                                    size_t split_length,
+                                    float* out) {
+  RTC_CHECK_EQ(in_buffer_.size(), split_length);
+  memset(out, 0, kNumBands * in_buffer_.size() * sizeof(*out));
+  for (size_t i = 0; i < kNumBands; ++i) {
+    for (size_t j = 0; j < kSparsity; ++j) {
+      const size_t offset = i + j * kNumBands;
+      UpModulate(in, in_buffer_.size(), offset, &in_buffer_[0]);
+      synthesis_filters_[offset]->Filter(&in_buffer_[0],
+                                         in_buffer_.size(),
+                                         &out_buffer_[0]);
+      Upsample(&out_buffer_[0], out_buffer_.size(), i, out);
+    }
+  }
+}
+
+
+// Modulates |in| by |dct_modulation_| and accumulates it in each of the
+// |kNumBands| bands of |out|. |offset| is the index in the period of the
+// cosines used for modulation. |split_length| is the length of |in| and each
+// band of |out|.
+void ThreeBandFilterBank::DownModulate(const float* in,
+                                       size_t split_length,
+                                       size_t offset,
+                                       float* const* out) {
+  for (size_t i = 0; i < kNumBands; ++i) {
+    for (size_t j = 0; j < split_length; ++j) {
+      out[i][j] += dct_modulation_[offset][i] * in[j];
+    }
+  }
+}
+
+// Modulates each of the |kNumBands| bands of |in| by |dct_modulation_| and
+// accumulates them in |out|. |out| is cleared before starting to accumulate.
+// |offset| is the index in the period of the cosines used for modulation.
+// |split_length| is the length of each band of |in| and |out|.
+void ThreeBandFilterBank::UpModulate(const float* const* in,
+                                     size_t split_length,
+                                     size_t offset,
+                                     float* out) {
+  memset(out, 0, split_length * sizeof(*out));
+  for (size_t i = 0; i < kNumBands; ++i) {
+    for (size_t j = 0; j < split_length; ++j) {
+      out[j] += dct_modulation_[offset][i] * in[i][j];
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/three_band_filter_bank.h b/modules/audio_processing/three_band_filter_bank.h
new file mode 100644
index 0000000..ccbf2dd
--- /dev/null
+++ b/modules/audio_processing/three_band_filter_bank.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_
+#define MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_
+
+#include <cstring>
+#include <memory>
+#include <vector>
+
+#include "common_audio/sparse_fir_filter.h"
+
+namespace webrtc {
+
+// An implementation of a 3-band FIR filter-bank with DCT modulation, similar to
+// the proposed in "Multirate Signal Processing for Communication Systems" by
+// Fredric J Harris.
+// The low-pass filter prototype has these characteristics:
+// * Pass-band ripple = 0.3dB
+// * Pass-band frequency = 0.147 (7kHz at 48kHz)
+// * Stop-band attenuation = 40dB
+// * Stop-band frequency = 0.192 (9.2kHz at 48kHz)
+// * Delay = 24 samples (500us at 48kHz)
+// * Linear phase
+// This filter bank does not satisfy perfect reconstruction. The SNR after
+// analysis and synthesis (with no processing in between) is approximately 9.5dB
+// depending on the input signal after compensating for the delay.
+class ThreeBandFilterBank final {
+ public:
+  explicit ThreeBandFilterBank(size_t length);
+  ~ThreeBandFilterBank();
+
+  // Splits |in| into 3 downsampled frequency bands in |out|.
+  // |length| is the |in| length. Each of the 3 bands of |out| has to have a
+  // length of |length| / 3.
+  void Analysis(const float* in, size_t length, float* const* out);
+
+  // Merges the 3 downsampled frequency bands in |in| into |out|.
+  // |split_length| is the length of each band of |in|. |out| has to have at
+  // least a length of 3 * |split_length|.
+  void Synthesis(const float* const* in, size_t split_length, float* out);
+
+ private:
+  void DownModulate(const float* in,
+                    size_t split_length,
+                    size_t offset,
+                    float* const* out);
+  void UpModulate(const float* const* in,
+                  size_t split_length,
+                  size_t offset,
+                  float* out);
+
+  std::vector<float> in_buffer_;
+  std::vector<float> out_buffer_;
+  std::vector<std::unique_ptr<SparseFIRFilter>> analysis_filters_;
+  std::vector<std::unique_ptr<SparseFIRFilter>> synthesis_filters_;
+  std::vector<std::vector<float>> dct_modulation_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_THREE_BAND_FILTER_BANK_H_
diff --git a/modules/audio_processing/transient/click_annotate.cc b/modules/audio_processing/transient/click_annotate.cc
new file mode 100644
index 0000000..a8b4a30
--- /dev/null
+++ b/modules/audio_processing/transient/click_annotate.cc
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cfloat>
+#include <cstdio>
+#include <cstdlib>
+#include <memory>
+#include <vector>
+
+#include "modules/audio_processing/transient/transient_detector.h"
+#include "modules/audio_processing/transient/file_utils.h"
+#include "system_wrappers/include/file_wrapper.h"
+
+using webrtc::FileWrapper;
+using webrtc::TransientDetector;
+
+// Application to generate a RTP timing file.
+// Opens the PCM file and divides the signal in frames.
+// Creates a send times array, one for each step.
+// Each block that contains a transient, has an infinite send time.
+// The resultant array is written to a DAT file
+// Returns -1 on error or |lost_packets| otherwise.
+int main(int argc, char* argv[]) {
+  if (argc != 5) {
+    printf("\n%s - Application to generate a RTP timing file.\n\n", argv[0]);
+    printf("%s PCMfile DATfile chunkSize sampleRate\n\n", argv[0]);
+    printf("Opens the PCMfile with sampleRate in Hertz.\n");
+    printf("Creates a send times array, one for each chunkSize ");
+    printf("milliseconds step.\n");
+    printf("Each block that contains a transient, has an infinite send time. ");
+    printf("The resultant array is written to a DATfile.\n\n");
+    return 0;
+  }
+
+  std::unique_ptr<FileWrapper> pcm_file(FileWrapper::Create());
+  pcm_file->OpenFile(argv[1], true);
+  if (!pcm_file->is_open()) {
+    printf("\nThe %s could not be opened.\n\n", argv[1]);
+    return -1;
+  }
+
+  std::unique_ptr<FileWrapper> dat_file(FileWrapper::Create());
+  dat_file->OpenFile(argv[2], false);
+  if (!dat_file->is_open()) {
+    printf("\nThe %s could not be opened.\n\n", argv[2]);
+    return -1;
+  }
+
+  int chunk_size_ms = atoi(argv[3]);
+  if (chunk_size_ms <= 0) {
+    printf("\nThe chunkSize must be a positive integer\n\n");
+    return -1;
+  }
+
+  int sample_rate_hz = atoi(argv[4]);
+  if (sample_rate_hz <= 0) {
+    printf("\nThe sampleRate must be a positive integer\n\n");
+    return -1;
+  }
+
+  TransientDetector detector(sample_rate_hz);
+  int lost_packets = 0;
+  size_t audio_buffer_length = chunk_size_ms * sample_rate_hz / 1000;
+  std::unique_ptr<float[]> audio_buffer(new float[audio_buffer_length]);
+  std::vector<float> send_times;
+
+  // Read first buffer from the PCM test file.
+  size_t file_samples_read = ReadInt16FromFileToFloatBuffer(
+      pcm_file.get(),
+      audio_buffer_length,
+      audio_buffer.get());
+  for (int time = 0; file_samples_read > 0; time += chunk_size_ms) {
+    // Pad the rest of the buffer with zeros.
+    for (size_t i = file_samples_read; i < audio_buffer_length; ++i) {
+      audio_buffer[i] = 0.0;
+    }
+    float value =
+        detector.Detect(audio_buffer.get(), audio_buffer_length, NULL, 0);
+    if (value < 0.5f) {
+      value = time;
+    } else {
+      value = FLT_MAX;
+      ++lost_packets;
+    }
+    send_times.push_back(value);
+
+    // Read next buffer from the PCM test file.
+    file_samples_read = ReadInt16FromFileToFloatBuffer(pcm_file.get(),
+                                                       audio_buffer_length,
+                                                       audio_buffer.get());
+  }
+
+  size_t floats_written = WriteFloatBufferToFile(dat_file.get(),
+                                                 send_times.size(),
+                                                 &send_times[0]);
+
+  if (floats_written == 0) {
+    printf("\nThe send times could not be written to DAT file\n\n");
+    return -1;
+  }
+
+  pcm_file->CloseFile();
+  dat_file->CloseFile();
+
+  return lost_packets;
+}
diff --git a/modules/audio_processing/transient/common.h b/modules/audio_processing/transient/common.h
new file mode 100644
index 0000000..69546fc
--- /dev/null
+++ b/modules/audio_processing/transient/common.h
@@ -0,0 +1,27 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_COMMON_H_
+namespace webrtc {
+namespace ts {
+
+static const float kPi = 3.14159265358979323846f;
+static const int kChunkSizeMs = 10;
+enum {
+  kSampleRate8kHz = 8000,
+  kSampleRate16kHz = 16000,
+  kSampleRate32kHz = 32000,
+  kSampleRate48kHz = 48000
+};
+
+} // namespace ts
+} // namespace webrtc
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_COMMON_H_
diff --git a/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h b/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h
new file mode 100644
index 0000000..4de24e0
--- /dev/null
+++ b/modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This header file defines the coefficients of the FIR based approximation of
+// the Meyer Wavelet
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_DAUBECHIES_8_WAVELET_COEFFS_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_DAUBECHIES_8_WAVELET_COEFFS_H_
+
+// Decomposition coefficients Daubechies 8.
+
+namespace webrtc {
+
+const int kDaubechies8CoefficientsLength = 16;
+
+const float kDaubechies8HighPassCoefficients[kDaubechies8CoefficientsLength]
+    = {
+  -5.44158422430816093862e-02f,
+  3.12871590914465924627e-01f,
+  -6.75630736298012846142e-01f,
+  5.85354683654869090148e-01f,
+  1.58291052560238926228e-02f,
+  -2.84015542962428091389e-01f,
+  -4.72484573997972536787e-04f,
+  1.28747426620186011803e-01f,
+  1.73693010020221083600e-02f,
+  -4.40882539310647192377e-02f,
+  -1.39810279170155156436e-02f,
+  8.74609404701565465445e-03f,
+  4.87035299301066034600e-03f,
+  -3.91740372995977108837e-04f,
+  -6.75449405998556772109e-04f,
+  -1.17476784002281916305e-04f
+};
+
+const float kDaubechies8LowPassCoefficients[kDaubechies8CoefficientsLength] = {
+  -1.17476784002281916305e-04f,
+  6.75449405998556772109e-04f,
+  -3.91740372995977108837e-04f,
+  -4.87035299301066034600e-03f,
+  8.74609404701565465445e-03f,
+  1.39810279170155156436e-02f,
+  -4.40882539310647192377e-02f,
+  -1.73693010020221083600e-02f,
+  1.28747426620186011803e-01f,
+  4.72484573997972536787e-04f,
+  -2.84015542962428091389e-01f,
+  -1.58291052560238926228e-02f,
+  5.85354683654869090148e-01f,
+  6.75630736298012846142e-01f,
+  3.12871590914465924627e-01f,
+  5.44158422430816093862e-02f
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_DAUBECHIES_8_WAVELET_COEFFS_H_
diff --git a/modules/audio_processing/transient/dyadic_decimator.h b/modules/audio_processing/transient/dyadic_decimator.h
new file mode 100644
index 0000000..104f95d
--- /dev/null
+++ b/modules/audio_processing/transient/dyadic_decimator.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_DYADIC_DECIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_DYADIC_DECIMATOR_H_
+
+#include <cstdlib>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Provides a set of static methods to perform dyadic decimations.
+
+namespace webrtc {
+
+// Returns the proper length of the output buffer that you should use for the
+// given |in_length| and decimation |odd_sequence|.
+// Return -1 on error.
+inline size_t GetOutLengthToDyadicDecimate(size_t in_length,
+                                           bool odd_sequence) {
+  size_t out_length = in_length / 2;
+
+  if (in_length % 2 == 1 && !odd_sequence) {
+    ++out_length;
+  }
+
+  return out_length;
+}
+
+// Performs a dyadic decimation: removes every odd/even member of a sequence
+// halving its overall length.
+// Arguments:
+//    in: array of |in_length|.
+//    odd_sequence: If false, the odd members will be removed (1, 3, 5, ...);
+//                  if true, the even members will be removed (0, 2, 4, ...).
+//    out: array of |out_length|. |out_length| must be large enough to
+//         hold the decimated output. The necessary length can be provided by
+//         GetOutLengthToDyadicDecimate().
+//         Must be previously allocated.
+// Returns the number of output samples, -1 on error.
+template<typename T>
+static size_t DyadicDecimate(const T* in,
+                             size_t in_length,
+                             bool odd_sequence,
+                             T* out,
+                             size_t out_length) {
+  size_t half_length = GetOutLengthToDyadicDecimate(in_length, odd_sequence);
+
+  if (!in || !out || in_length <= 0 || out_length < half_length) {
+    return 0;
+  }
+
+  size_t output_samples = 0;
+  size_t index_adjustment = odd_sequence ? 1 : 0;
+  for (output_samples = 0; output_samples < half_length; ++output_samples) {
+    out[output_samples] = in[output_samples * 2 + index_adjustment];
+  }
+
+  return output_samples;
+}
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_DYADIC_DECIMATOR_H_
diff --git a/modules/audio_processing/transient/dyadic_decimator_unittest.cc b/modules/audio_processing/transient/dyadic_decimator_unittest.cc
new file mode 100644
index 0000000..c407f47
--- /dev/null
+++ b/modules/audio_processing/transient/dyadic_decimator_unittest.cc
@@ -0,0 +1,126 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/dyadic_decimator.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const size_t kEvenBufferLength = 6;
+static const size_t kOddBufferLength = 5;
+static const size_t kOutBufferLength = 3;
+
+int16_t const test_buffer_even_len[] = {0, 1, 2, 3, 4, 5};
+int16_t const test_buffer_odd_len[]  = {0, 1, 2, 3, 4};
+int16_t test_buffer_out[kOutBufferLength];
+
+TEST(DyadicDecimatorTest, GetOutLengthToDyadicDecimate) {
+  EXPECT_EQ(3u, GetOutLengthToDyadicDecimate(6, false));
+  EXPECT_EQ(3u, GetOutLengthToDyadicDecimate(6, true));
+  EXPECT_EQ(3u, GetOutLengthToDyadicDecimate(5, false));
+  EXPECT_EQ(2u, GetOutLengthToDyadicDecimate(5, true));
+}
+
+
+TEST(DyadicDecimatorTest, DyadicDecimateErrorValues) {
+  size_t out_samples = 0;
+
+  out_samples = DyadicDecimate(static_cast<int16_t*>(NULL),
+                               kEvenBufferLength,
+                               false,  // Even sequence.
+                               test_buffer_out,
+                               kOutBufferLength);
+  EXPECT_EQ(0u, out_samples);
+
+  out_samples = DyadicDecimate(test_buffer_even_len,
+                               kEvenBufferLength,
+                               false,  // Even sequence.
+                               static_cast<int16_t*>(NULL),
+                               kOutBufferLength);
+  EXPECT_EQ(0u, out_samples);
+
+  // Less than required |out_length|.
+  out_samples = DyadicDecimate(test_buffer_even_len,
+                               kEvenBufferLength,
+                               false,  // Even sequence.
+                               test_buffer_out,
+                               2);
+  EXPECT_EQ(0u, out_samples);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateEvenLengthEvenSequence) {
+  size_t expected_out_samples =
+      GetOutLengthToDyadicDecimate(kEvenBufferLength, false);
+
+  size_t out_samples = DyadicDecimate(test_buffer_even_len,
+                                      kEvenBufferLength,
+                                      false,  // Even sequence.
+                                      test_buffer_out,
+                                      kOutBufferLength);
+
+  EXPECT_EQ(expected_out_samples, out_samples);
+
+  EXPECT_EQ(0, test_buffer_out[0]);
+  EXPECT_EQ(2, test_buffer_out[1]);
+  EXPECT_EQ(4, test_buffer_out[2]);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateEvenLengthOddSequence) {
+  size_t expected_out_samples =
+      GetOutLengthToDyadicDecimate(kEvenBufferLength, true);
+
+  size_t out_samples = DyadicDecimate(test_buffer_even_len,
+                                      kEvenBufferLength,
+                                      true,  // Odd sequence.
+                                      test_buffer_out,
+                                      kOutBufferLength);
+
+  EXPECT_EQ(expected_out_samples, out_samples);
+
+  EXPECT_EQ(1, test_buffer_out[0]);
+  EXPECT_EQ(3, test_buffer_out[1]);
+  EXPECT_EQ(5, test_buffer_out[2]);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateOddLengthEvenSequence) {
+  size_t expected_out_samples =
+      GetOutLengthToDyadicDecimate(kOddBufferLength, false);
+
+  size_t out_samples = DyadicDecimate(test_buffer_odd_len,
+                                      kOddBufferLength,
+                                      false,  // Even sequence.
+                                      test_buffer_out,
+                                      kOutBufferLength);
+
+  EXPECT_EQ(expected_out_samples, out_samples);
+
+  EXPECT_EQ(0, test_buffer_out[0]);
+  EXPECT_EQ(2, test_buffer_out[1]);
+  EXPECT_EQ(4, test_buffer_out[2]);
+}
+
+TEST(DyadicDecimatorTest, DyadicDecimateOddLengthOddSequence) {
+  size_t expected_out_samples =
+      GetOutLengthToDyadicDecimate(kOddBufferLength, true);
+
+  size_t out_samples = DyadicDecimate(test_buffer_odd_len,
+                                      kOddBufferLength,
+                                      true,  // Odd sequence.
+                                      test_buffer_out,
+                                      kOutBufferLength);
+
+  EXPECT_EQ(expected_out_samples, out_samples);
+
+  EXPECT_EQ(1, test_buffer_out[0]);
+  EXPECT_EQ(3, test_buffer_out[1]);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/file_utils.cc b/modules/audio_processing/transient/file_utils.cc
new file mode 100644
index 0000000..7bf2e08
--- /dev/null
+++ b/modules/audio_processing/transient/file_utils.cc
@@ -0,0 +1,258 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/file_utils.h"
+
+#include <memory>
+
+#include "system_wrappers/include/file_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+int ConvertByteArrayToFloat(const uint8_t bytes[4], float* out) {
+  if (!bytes || !out) {
+    return -1;
+  }
+
+  uint32_t binary_value = 0;
+  for (int i = 3; i >= 0; --i) {
+    binary_value <<= 8;
+    binary_value += bytes[i];
+  }
+
+  *out = bit_cast<float>(binary_value);
+
+  return 0;
+}
+
+int ConvertByteArrayToDouble(const uint8_t bytes[8], double* out) {
+  if (!bytes || !out) {
+    return -1;
+  }
+
+  uint64_t binary_value = 0;
+  for (int i = 7; i >= 0; --i) {
+    binary_value <<= 8;
+    binary_value += bytes[i];
+  }
+
+  *out = bit_cast<double>(binary_value);
+
+  return 0;
+}
+
+int ConvertFloatToByteArray(float value, uint8_t out_bytes[4]) {
+  if (!out_bytes) {
+    return -1;
+  }
+
+  uint32_t binary_value = bit_cast<uint32_t>(value);
+  for (size_t i = 0; i < 4; ++i) {
+    out_bytes[i] = binary_value;
+    binary_value >>= 8;
+  }
+
+  return 0;
+}
+
+int ConvertDoubleToByteArray(double value, uint8_t out_bytes[8]) {
+  if (!out_bytes) {
+    return -1;
+  }
+
+  uint64_t binary_value = bit_cast<uint64_t>(value);
+  for (size_t i = 0; i < 8; ++i) {
+    out_bytes[i] = binary_value;
+    binary_value >>= 8;
+  }
+
+  return 0;
+}
+
+size_t ReadInt16BufferFromFile(FileWrapper* file,
+                               size_t length,
+                               int16_t* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<uint8_t[]> byte_array(new uint8_t[2]);
+
+  size_t int16s_read = 0;
+
+  while (int16s_read < length) {
+    size_t bytes_read = file->Read(byte_array.get(), 2);
+    if (bytes_read < 2) {
+      break;
+    }
+    int16_t value = byte_array[1];
+    value <<= 8;
+    value += byte_array[0];
+    buffer[int16s_read] = value;
+    ++int16s_read;
+  }
+
+  return int16s_read;
+}
+
+size_t ReadInt16FromFileToFloatBuffer(FileWrapper* file,
+                                      size_t length,
+                                      float* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<int16_t[]> buffer16(new int16_t[length]);
+
+  size_t int16s_read = ReadInt16BufferFromFile(file, length, buffer16.get());
+
+  for (size_t i = 0; i < int16s_read; ++i) {
+    buffer[i] = buffer16[i];
+  }
+
+  return int16s_read;
+}
+
+size_t ReadInt16FromFileToDoubleBuffer(FileWrapper* file,
+                                       size_t length,
+                                       double* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<int16_t[]> buffer16(new int16_t[length]);
+
+  size_t int16s_read = ReadInt16BufferFromFile(file, length, buffer16.get());
+
+  for (size_t i = 0; i < int16s_read; ++i) {
+    buffer[i] = buffer16[i];
+  }
+
+  return int16s_read;
+}
+
+size_t ReadFloatBufferFromFile(FileWrapper* file,
+                               size_t length,
+                               float* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<uint8_t[]> byte_array(new uint8_t[4]);
+
+  size_t floats_read = 0;
+
+  while (floats_read < length) {
+    size_t bytes_read = file->Read(byte_array.get(), 4);
+    if (bytes_read < 4) {
+      break;
+    }
+    ConvertByteArrayToFloat(byte_array.get(), &buffer[floats_read]);
+    ++floats_read;
+  }
+
+  return floats_read;
+}
+
+size_t ReadDoubleBufferFromFile(FileWrapper* file,
+                                size_t length,
+                                double* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<uint8_t[]> byte_array(new uint8_t[8]);
+
+  size_t doubles_read = 0;
+
+  while (doubles_read < length) {
+    size_t bytes_read = file->Read(byte_array.get(), 8);
+    if (bytes_read < 8) {
+      break;
+    }
+    ConvertByteArrayToDouble(byte_array.get(), &buffer[doubles_read]);
+    ++doubles_read;
+  }
+
+  return doubles_read;
+}
+
+size_t WriteInt16BufferToFile(FileWrapper* file,
+                              size_t length,
+                              const int16_t* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<uint8_t[]> byte_array(new uint8_t[2]);
+
+  size_t int16s_written = 0;
+
+  for (int16s_written = 0; int16s_written < length; ++int16s_written) {
+    // Get byte representation.
+    byte_array[0] = buffer[int16s_written] & 0xFF;
+    byte_array[1] = (buffer[int16s_written] >> 8) & 0xFF;
+
+    file->Write(byte_array.get(), 2);
+  }
+
+  file->Flush();
+
+  return int16s_written;
+}
+
+size_t WriteFloatBufferToFile(FileWrapper* file,
+                              size_t length,
+                              const float* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<uint8_t[]> byte_array(new uint8_t[4]);
+
+  size_t floats_written = 0;
+
+  for (floats_written = 0; floats_written < length; ++floats_written) {
+    // Get byte representation.
+    ConvertFloatToByteArray(buffer[floats_written], byte_array.get());
+
+    file->Write(byte_array.get(), 4);
+  }
+
+  file->Flush();
+
+  return floats_written;
+}
+
+size_t WriteDoubleBufferToFile(FileWrapper* file,
+                               size_t length,
+                               const double* buffer) {
+  if (!file || !file->is_open() || !buffer || length <= 0) {
+    return 0;
+  }
+
+  std::unique_ptr<uint8_t[]> byte_array(new uint8_t[8]);
+
+  size_t doubles_written = 0;
+
+  for (doubles_written = 0; doubles_written < length; ++doubles_written) {
+    // Get byte representation.
+    ConvertDoubleToByteArray(buffer[doubles_written], byte_array.get());
+
+    file->Write(byte_array.get(), 8);
+  }
+
+  file->Flush();
+
+  return doubles_written;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/file_utils.h b/modules/audio_processing/transient/file_utils.h
new file mode 100644
index 0000000..3f05c1d
--- /dev/null
+++ b/modules/audio_processing/transient/file_utils.h
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_FILE_UTILS_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_FILE_UTILS_H_
+
+#include <string.h>
+
+#include "system_wrappers/include/file_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// This is a copy of the cast included in the Chromium codebase here:
+// http://cs.chromium.org/src/third_party/cld/base/casts.h
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  // A compile error here means your Dest and Source have different sizes.
+  static_assert(sizeof(Dest) == sizeof(Source),
+                "Dest and Source have different sizes");
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+// Converts the byte array with binary float representation to float.
+// Bytes must be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertByteArrayToFloat(const uint8_t bytes[4], float* out);
+
+// Converts the byte array with binary double representation to double.
+// Bytes must be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertByteArrayToDouble(const uint8_t bytes[8], double* out);
+
+// Converts a float to a byte array with binary float representation.
+// Bytes will be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertFloatToByteArray(float value, uint8_t out_bytes[4]);
+
+// Converts a double to a byte array with binary double representation.
+// Bytes will be in little-endian order.
+// Returns 0 if correct, -1 on error.
+int ConvertDoubleToByteArray(double value, uint8_t out_bytes[8]);
+
+// Reads |length| 16-bit integers from |file| to |buffer|.
+// |file| must be previously opened.
+// Returns the number of 16-bit integers read or -1 on error.
+size_t ReadInt16BufferFromFile(FileWrapper* file,
+                               size_t length,
+                               int16_t* buffer);
+
+// Reads |length| 16-bit integers from |file| and stores those values
+// (converting them) in |buffer|.
+// |file| must be previously opened.
+// Returns the number of 16-bit integers read or -1 on error.
+size_t ReadInt16FromFileToFloatBuffer(FileWrapper* file,
+                                      size_t length,
+                                      float* buffer);
+
+// Reads |length| 16-bit integers from |file| and stores those values
+// (converting them) in |buffer|.
+// |file| must be previously opened.
+// Returns the number of 16-bit integers read or -1 on error.
+size_t ReadInt16FromFileToDoubleBuffer(FileWrapper* file,
+                                       size_t length,
+                                       double* buffer);
+
+// Reads |length| floats in binary representation (4 bytes) from |file| to
+// |buffer|.
+// |file| must be previously opened.
+// Returns the number of floats read or -1 on error.
+size_t ReadFloatBufferFromFile(FileWrapper* file, size_t length, float* buffer);
+
+// Reads |length| doubles in binary representation (8 bytes) from |file| to
+// |buffer|.
+// |file| must be previously opened.
+// Returns the number of doubles read or -1 on error.
+size_t ReadDoubleBufferFromFile(FileWrapper* file,
+                                size_t length,
+                                double* buffer);
+
+// Writes |length| 16-bit integers from |buffer| in binary representation (2
+// bytes) to |file|. It flushes |file|, so after this call there are no
+// writings pending.
+// |file| must be previously opened.
+// Returns the number of doubles written or -1 on error.
+size_t WriteInt16BufferToFile(FileWrapper* file,
+                              size_t length,
+                              const int16_t* buffer);
+
+// Writes |length| floats from |buffer| in binary representation (4 bytes) to
+// |file|. It flushes |file|, so after this call there are no writtings pending.
+// |file| must be previously opened.
+// Returns the number of doubles written or -1 on error.
+size_t WriteFloatBufferToFile(FileWrapper* file,
+                              size_t length,
+                              const float* buffer);
+
+// Writes |length| doubles from |buffer| in binary representation (8 bytes) to
+// |file|. It flushes |file|, so after this call there are no writings pending.
+// |file| must be previously opened.
+// Returns the number of doubles written or -1 on error.
+size_t WriteDoubleBufferToFile(FileWrapper* file,
+                               size_t length,
+                               const double* buffer);
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_FILE_UTILS_H_
diff --git a/modules/audio_processing/transient/file_utils_unittest.cc b/modules/audio_processing/transient/file_utils_unittest.cc
new file mode 100644
index 0000000..c5e0399
--- /dev/null
+++ b/modules/audio_processing/transient/file_utils_unittest.cc
@@ -0,0 +1,539 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/file_utils.h"
+
+#include <string.h>
+#include <string>
+#include <memory>
+#include <vector>
+
+#include "system_wrappers/include/file_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+static const uint8_t kPiBytesf[4] = {0xDB, 0x0F, 0x49, 0x40};
+static const uint8_t kEBytesf[4] = {0x54, 0xF8, 0x2D, 0x40};
+static const uint8_t kAvogadroBytesf[4] = {0x2F, 0x0C, 0xFF, 0x66};
+
+static const uint8_t kPiBytes[8] =
+    {0x18, 0x2D, 0x44, 0x54, 0xFB, 0x21, 0x09, 0x40};
+static const uint8_t kEBytes[8] =
+    {0x69, 0x57, 0x14, 0x8B, 0x0A, 0xBF, 0x05, 0x40};
+static const uint8_t kAvogadroBytes[8] =
+    {0xF4, 0xBC, 0xA8, 0xDF, 0x85, 0xE1, 0xDF, 0x44};
+
+static const double kPi = 3.14159265358979323846;
+static const double kE = 2.71828182845904523536;
+static const double kAvogadro = 602214100000000000000000.0;
+
+class TransientFileUtilsTest: public ::testing::Test {
+ protected:
+  TransientFileUtilsTest()
+      : kTestFileName(
+            test::ResourcePath("audio_processing/transient/double-utils",
+                               "dat")),
+        kTestFileNamef(
+            test::ResourcePath("audio_processing/transient/float-utils",
+                               "dat")) {}
+
+  ~TransientFileUtilsTest() override {
+    CleanupTempFiles();
+  }
+
+  std::string CreateTempFilename(const std::string& dir,
+      const std::string& prefix) {
+    std::string filename = test::TempFilename(dir, prefix);
+    temp_filenames_.push_back(filename);
+    return filename;
+  }
+
+  void CleanupTempFiles() {
+    for (const std::string& filename : temp_filenames_) {
+      remove(filename.c_str());
+    }
+    temp_filenames_.clear();
+  }
+
+  // This file (used in some tests) contains binary data. The data correspond to
+  // the double representation of the constants: Pi, E, and the Avogadro's
+  // Number;
+  // appended in that order.
+  const std::string kTestFileName;
+
+  // This file (used in some tests) contains binary data. The data correspond to
+  // the float representation of the constants: Pi, E, and the Avogadro's
+  // Number;
+  // appended in that order.
+  const std::string kTestFileNamef;
+
+  // List of temporary filenames created by CreateTempFilename.
+  std::vector<std::string> temp_filenames_;
+};
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertByteArrayToFloat DISABLED_ConvertByteArrayToFloat
+#else
+#define MAYBE_ConvertByteArrayToFloat ConvertByteArrayToFloat
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertByteArrayToFloat) {
+  float value = 0.0;
+
+  EXPECT_EQ(0, ConvertByteArrayToFloat(kPiBytesf, &value));
+  EXPECT_FLOAT_EQ(kPi, value);
+
+  EXPECT_EQ(0, ConvertByteArrayToFloat(kEBytesf, &value));
+  EXPECT_FLOAT_EQ(kE, value);
+
+  EXPECT_EQ(0, ConvertByteArrayToFloat(kAvogadroBytesf, &value));
+  EXPECT_FLOAT_EQ(kAvogadro, value);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertByteArrayToDouble DISABLED_ConvertByteArrayToDouble
+#else
+#define MAYBE_ConvertByteArrayToDouble ConvertByteArrayToDouble
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertByteArrayToDouble) {
+  double value = 0.0;
+
+  EXPECT_EQ(0, ConvertByteArrayToDouble(kPiBytes, &value));
+  EXPECT_DOUBLE_EQ(kPi, value);
+
+  EXPECT_EQ(0, ConvertByteArrayToDouble(kEBytes, &value));
+  EXPECT_DOUBLE_EQ(kE, value);
+
+  EXPECT_EQ(0, ConvertByteArrayToDouble(kAvogadroBytes, &value));
+  EXPECT_DOUBLE_EQ(kAvogadro, value);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertFloatToByteArray DISABLED_ConvertFloatToByteArray
+#else
+#define MAYBE_ConvertFloatToByteArray ConvertFloatToByteArray
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertFloatToByteArray) {
+  std::unique_ptr<uint8_t[]> bytes(new uint8_t[4]);
+
+  EXPECT_EQ(0, ConvertFloatToByteArray(kPi, bytes.get()));
+  EXPECT_EQ(0, memcmp(bytes.get(), kPiBytesf, 4));
+
+  EXPECT_EQ(0, ConvertFloatToByteArray(kE, bytes.get()));
+  EXPECT_EQ(0, memcmp(bytes.get(), kEBytesf, 4));
+
+  EXPECT_EQ(0, ConvertFloatToByteArray(kAvogadro, bytes.get()));
+  EXPECT_EQ(0, memcmp(bytes.get(), kAvogadroBytesf, 4));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertDoubleToByteArray DISABLED_ConvertDoubleToByteArray
+#else
+#define MAYBE_ConvertDoubleToByteArray ConvertDoubleToByteArray
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertDoubleToByteArray) {
+  std::unique_ptr<uint8_t[]> bytes(new uint8_t[8]);
+
+  EXPECT_EQ(0, ConvertDoubleToByteArray(kPi, bytes.get()));
+  EXPECT_EQ(0, memcmp(bytes.get(), kPiBytes, 8));
+
+  EXPECT_EQ(0, ConvertDoubleToByteArray(kE, bytes.get()));
+  EXPECT_EQ(0, memcmp(bytes.get(), kEBytes, 8));
+
+  EXPECT_EQ(0, ConvertDoubleToByteArray(kAvogadro, bytes.get()));
+  EXPECT_EQ(0, memcmp(bytes.get(), kAvogadroBytes, 8));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16BufferFromFile DISABLED_ReadInt16BufferFromFile
+#else
+#define MAYBE_ReadInt16BufferFromFile ReadInt16BufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16BufferFromFile) {
+  std::string test_filename = kTestFileName;
+
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  file->OpenFile(test_filename.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kTestFileName.c_str();
+
+  const size_t kBufferLength = 12;
+  std::unique_ptr<int16_t[]> buffer(new int16_t[kBufferLength]);
+
+  EXPECT_EQ(kBufferLength, ReadInt16BufferFromFile(file.get(),
+                                                   kBufferLength,
+                                                   buffer.get()));
+  EXPECT_EQ(22377, buffer[4]);
+  EXPECT_EQ(16389, buffer[7]);
+  EXPECT_EQ(17631, buffer[kBufferLength - 1]);
+
+  file->Rewind();
+
+  // The next test is for checking the case where there are not as much data as
+  // needed in the file, but reads to the end, and it returns the number of
+  // int16s read.
+  const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+  buffer.reset(new int16_t[kBufferLenghtLargerThanFile]);
+  EXPECT_EQ(kBufferLength, ReadInt16BufferFromFile(file.get(),
+                                                   kBufferLenghtLargerThanFile,
+                                                   buffer.get()));
+  EXPECT_EQ(11544, buffer[0]);
+  EXPECT_EQ(22377, buffer[4]);
+  EXPECT_EQ(16389, buffer[7]);
+  EXPECT_EQ(17631, buffer[kBufferLength - 1]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16FromFileToFloatBuffer \
+  DISABLED_ReadInt16FromFileToFloatBuffer
+#else
+#define MAYBE_ReadInt16FromFileToFloatBuffer ReadInt16FromFileToFloatBuffer
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16FromFileToFloatBuffer) {
+  std::string test_filename = kTestFileName;
+
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  file->OpenFile(test_filename.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kTestFileName.c_str();
+
+  const size_t kBufferLength = 12;
+  std::unique_ptr<float[]> buffer(new float[kBufferLength]);
+
+  EXPECT_EQ(kBufferLength, ReadInt16FromFileToFloatBuffer(file.get(),
+                                                          kBufferLength,
+                                                          buffer.get()));
+
+  EXPECT_DOUBLE_EQ(11544, buffer[0]);
+  EXPECT_DOUBLE_EQ(22377, buffer[4]);
+  EXPECT_DOUBLE_EQ(16389, buffer[7]);
+  EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+
+  file->Rewind();
+
+  // The next test is for checking the case where there are not as much data as
+  // needed in the file, but reads to the end, and it returns the number of
+  // int16s read.
+  const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+  buffer.reset(new float[kBufferLenghtLargerThanFile]);
+  EXPECT_EQ(kBufferLength,
+            ReadInt16FromFileToFloatBuffer(file.get(),
+                                           kBufferLenghtLargerThanFile,
+                                           buffer.get()));
+  EXPECT_DOUBLE_EQ(11544, buffer[0]);
+  EXPECT_DOUBLE_EQ(22377, buffer[4]);
+  EXPECT_DOUBLE_EQ(16389, buffer[7]);
+  EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16FromFileToDoubleBuffer \
+  DISABLED_ReadInt16FromFileToDoubleBuffer
+#else
+#define MAYBE_ReadInt16FromFileToDoubleBuffer ReadInt16FromFileToDoubleBuffer
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16FromFileToDoubleBuffer) {
+  std::string test_filename = kTestFileName;
+
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  file->OpenFile(test_filename.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kTestFileName.c_str();
+
+  const size_t kBufferLength = 12;
+  std::unique_ptr<double[]> buffer(new double[kBufferLength]);
+
+  EXPECT_EQ(kBufferLength, ReadInt16FromFileToDoubleBuffer(file.get(),
+                                                           kBufferLength,
+                                                           buffer.get()));
+  EXPECT_DOUBLE_EQ(11544, buffer[0]);
+  EXPECT_DOUBLE_EQ(22377, buffer[4]);
+  EXPECT_DOUBLE_EQ(16389, buffer[7]);
+  EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+
+  file->Rewind();
+
+  // The next test is for checking the case where there are not as much data as
+  // needed in the file, but reads to the end, and it returns the number of
+  // int16s read.
+  const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+  buffer.reset(new double[kBufferLenghtLargerThanFile]);
+  EXPECT_EQ(kBufferLength,
+            ReadInt16FromFileToDoubleBuffer(file.get(),
+                                            kBufferLenghtLargerThanFile,
+                                            buffer.get()));
+  EXPECT_DOUBLE_EQ(11544, buffer[0]);
+  EXPECT_DOUBLE_EQ(22377, buffer[4]);
+  EXPECT_DOUBLE_EQ(16389, buffer[7]);
+  EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadFloatBufferFromFile DISABLED_ReadFloatBufferFromFile
+#else
+#define MAYBE_ReadFloatBufferFromFile ReadFloatBufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadFloatBufferFromFile) {
+  std::string test_filename = kTestFileNamef;
+
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  file->OpenFile(test_filename.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kTestFileNamef.c_str();
+
+  const size_t kBufferLength = 3;
+  std::unique_ptr<float[]> buffer(new float[kBufferLength]);
+
+  EXPECT_EQ(kBufferLength, ReadFloatBufferFromFile(file.get(),
+                                                   kBufferLength,
+                                                   buffer.get()));
+  EXPECT_FLOAT_EQ(kPi, buffer[0]);
+  EXPECT_FLOAT_EQ(kE, buffer[1]);
+  EXPECT_FLOAT_EQ(kAvogadro, buffer[2]);
+
+  file->Rewind();
+
+  // The next test is for checking the case where there are not as much data as
+  // needed in the file, but reads to the end, and it returns the number of
+  // doubles read.
+  const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+  buffer.reset(new float[kBufferLenghtLargerThanFile]);
+  EXPECT_EQ(kBufferLength, ReadFloatBufferFromFile(file.get(),
+                                                   kBufferLenghtLargerThanFile,
+                                                   buffer.get()));
+  EXPECT_FLOAT_EQ(kPi, buffer[0]);
+  EXPECT_FLOAT_EQ(kE, buffer[1]);
+  EXPECT_FLOAT_EQ(kAvogadro, buffer[2]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadDoubleBufferFromFile DISABLED_ReadDoubleBufferFromFile
+#else
+#define MAYBE_ReadDoubleBufferFromFile ReadDoubleBufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadDoubleBufferFromFile) {
+  std::string test_filename = kTestFileName;
+
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  file->OpenFile(test_filename.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kTestFileName.c_str();
+
+  const size_t kBufferLength = 3;
+  std::unique_ptr<double[]> buffer(new double[kBufferLength]);
+
+  EXPECT_EQ(kBufferLength, ReadDoubleBufferFromFile(file.get(),
+                                                    kBufferLength,
+                                                    buffer.get()));
+  EXPECT_DOUBLE_EQ(kPi, buffer[0]);
+  EXPECT_DOUBLE_EQ(kE, buffer[1]);
+  EXPECT_DOUBLE_EQ(kAvogadro, buffer[2]);
+
+  file->Rewind();
+
+  // The next test is for checking the case where there are not as much data as
+  // needed in the file, but reads to the end, and it returns the number of
+  // doubles read.
+  const size_t kBufferLenghtLargerThanFile = kBufferLength * 2;
+  buffer.reset(new double[kBufferLenghtLargerThanFile]);
+  EXPECT_EQ(kBufferLength, ReadDoubleBufferFromFile(file.get(),
+                                                    kBufferLenghtLargerThanFile,
+                                                    buffer.get()));
+  EXPECT_DOUBLE_EQ(kPi, buffer[0]);
+  EXPECT_DOUBLE_EQ(kE, buffer[1]);
+  EXPECT_DOUBLE_EQ(kAvogadro, buffer[2]);
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteInt16BufferToFile DISABLED_WriteInt16BufferToFile
+#else
+#define MAYBE_WriteInt16BufferToFile WriteInt16BufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteInt16BufferToFile) {
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  std::string kOutFileName = CreateTempFilename(test::OutputPath(),
+                                                "utils_test");
+
+  file->OpenFile(kOutFileName.c_str(), false);  // Write mode.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kOutFileName.c_str();
+
+  const size_t kBufferLength = 3;
+  std::unique_ptr<int16_t[]> written_buffer(new int16_t[kBufferLength]);
+  std::unique_ptr<int16_t[]> read_buffer(new int16_t[kBufferLength]);
+
+  written_buffer[0] = 1;
+  written_buffer[1] = 2;
+  written_buffer[2] = 3;
+
+  EXPECT_EQ(kBufferLength, WriteInt16BufferToFile(file.get(),
+                                                  kBufferLength,
+                                                  written_buffer.get()));
+
+  file->CloseFile();
+
+  file->OpenFile(kOutFileName.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kOutFileName.c_str();
+
+  EXPECT_EQ(kBufferLength, ReadInt16BufferFromFile(file.get(),
+                                                   kBufferLength,
+                                                   read_buffer.get()));
+  EXPECT_EQ(0, memcmp(written_buffer.get(),
+                      read_buffer.get(),
+                      kBufferLength * sizeof(written_buffer[0])));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteFloatBufferToFile DISABLED_WriteFloatBufferToFile
+#else
+#define MAYBE_WriteFloatBufferToFile WriteFloatBufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteFloatBufferToFile) {
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  std::string kOutFileName = CreateTempFilename(test::OutputPath(),
+                                                "utils_test");
+
+  file->OpenFile(kOutFileName.c_str(), false);  // Write mode.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kOutFileName.c_str();
+
+  const size_t kBufferLength = 3;
+  std::unique_ptr<float[]> written_buffer(new float[kBufferLength]);
+  std::unique_ptr<float[]> read_buffer(new float[kBufferLength]);
+
+  written_buffer[0] = static_cast<float>(kPi);
+  written_buffer[1] = static_cast<float>(kE);
+  written_buffer[2] = static_cast<float>(kAvogadro);
+
+  EXPECT_EQ(kBufferLength, WriteFloatBufferToFile(file.get(),
+                                                  kBufferLength,
+                                                  written_buffer.get()));
+
+  file->CloseFile();
+
+  file->OpenFile(kOutFileName.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kOutFileName.c_str();
+
+  EXPECT_EQ(kBufferLength, ReadFloatBufferFromFile(file.get(),
+                                                   kBufferLength,
+                                                   read_buffer.get()));
+  EXPECT_EQ(0, memcmp(written_buffer.get(),
+                      read_buffer.get(),
+                      kBufferLength * sizeof(written_buffer[0])));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteDoubleBufferToFile DISABLED_WriteDoubleBufferToFile
+#else
+#define MAYBE_WriteDoubleBufferToFile WriteDoubleBufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteDoubleBufferToFile) {
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  std::string kOutFileName = CreateTempFilename(test::OutputPath(),
+                                                "utils_test");
+
+  file->OpenFile(kOutFileName.c_str(), false);  // Write mode.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kOutFileName.c_str();
+
+  const size_t kBufferLength = 3;
+  std::unique_ptr<double[]> written_buffer(new double[kBufferLength]);
+  std::unique_ptr<double[]> read_buffer(new double[kBufferLength]);
+
+  written_buffer[0] = kPi;
+  written_buffer[1] = kE;
+  written_buffer[2] = kAvogadro;
+
+  EXPECT_EQ(kBufferLength, WriteDoubleBufferToFile(file.get(),
+                                                   kBufferLength,
+                                                   written_buffer.get()));
+
+  file->CloseFile();
+
+  file->OpenFile(kOutFileName.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kOutFileName.c_str();
+
+  EXPECT_EQ(kBufferLength, ReadDoubleBufferFromFile(file.get(),
+                                                    kBufferLength,
+                                                    read_buffer.get()));
+  EXPECT_EQ(0, memcmp(written_buffer.get(),
+                      read_buffer.get(),
+                      kBufferLength * sizeof(written_buffer[0])));
+}
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_ExpectedErrorReturnValues DISABLED_ExpectedErrorReturnValues
+#else
+#define MAYBE_ExpectedErrorReturnValues ExpectedErrorReturnValues
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ExpectedErrorReturnValues) {
+  std::string test_filename = kTestFileName;
+
+  double value;
+  std::unique_ptr<int16_t[]> int16_buffer(new int16_t[1]);
+  std::unique_ptr<double[]> double_buffer(new double[1]);
+  std::unique_ptr<FileWrapper> file(FileWrapper::Create());
+
+  EXPECT_EQ(-1, ConvertByteArrayToDouble(NULL, &value));
+  EXPECT_EQ(-1, ConvertByteArrayToDouble(kPiBytes, NULL));
+
+  EXPECT_EQ(-1, ConvertDoubleToByteArray(kPi, NULL));
+
+  // Tests with file not opened.
+  EXPECT_EQ(0u, ReadInt16BufferFromFile(file.get(), 1, int16_buffer.get()));
+  EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(file.get(),
+                                                1,
+                                                double_buffer.get()));
+  EXPECT_EQ(0u, ReadDoubleBufferFromFile(file.get(), 1, double_buffer.get()));
+  EXPECT_EQ(0u, WriteInt16BufferToFile(file.get(), 1, int16_buffer.get()));
+  EXPECT_EQ(0u, WriteDoubleBufferToFile(file.get(), 1, double_buffer.get()));
+
+  file->OpenFile(test_filename.c_str(), true);  // Read only.
+  ASSERT_TRUE(file->is_open()) << "File could not be opened:\n"
+                               << kTestFileName.c_str();
+
+  EXPECT_EQ(0u, ReadInt16BufferFromFile(NULL, 1, int16_buffer.get()));
+  EXPECT_EQ(0u, ReadInt16BufferFromFile(file.get(), 1, NULL));
+  EXPECT_EQ(0u, ReadInt16BufferFromFile(file.get(), 0, int16_buffer.get()));
+
+  EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(NULL, 1, double_buffer.get()));
+  EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(file.get(), 1, NULL));
+  EXPECT_EQ(0u, ReadInt16FromFileToDoubleBuffer(file.get(),
+                                                0,
+                                                double_buffer.get()));
+
+  EXPECT_EQ(0u, ReadDoubleBufferFromFile(NULL, 1, double_buffer.get()));
+  EXPECT_EQ(0u, ReadDoubleBufferFromFile(file.get(), 1, NULL));
+  EXPECT_EQ(0u, ReadDoubleBufferFromFile(file.get(), 0, double_buffer.get()));
+
+  EXPECT_EQ(0u, WriteInt16BufferToFile(NULL, 1, int16_buffer.get()));
+  EXPECT_EQ(0u, WriteInt16BufferToFile(file.get(), 1, NULL));
+  EXPECT_EQ(0u, WriteInt16BufferToFile(file.get(), 0, int16_buffer.get()));
+
+  EXPECT_EQ(0u, WriteDoubleBufferToFile(NULL, 1, double_buffer.get()));
+  EXPECT_EQ(0u, WriteDoubleBufferToFile(file.get(), 1, NULL));
+  EXPECT_EQ(0u, WriteDoubleBufferToFile(file.get(), 0, double_buffer.get()));
+}
+
+}  // namespace webrtc
+
diff --git a/modules/audio_processing/transient/moving_moments.cc b/modules/audio_processing/transient/moving_moments.cc
new file mode 100644
index 0000000..46b16b8
--- /dev/null
+++ b/modules/audio_processing/transient/moving_moments.cc
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/moving_moments.h"
+
+#include <math.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+MovingMoments::MovingMoments(size_t length)
+    : length_(length),
+      queue_(),
+      sum_(0.0),
+      sum_of_squares_(0.0) {
+  RTC_DCHECK_GT(length, 0);
+  for (size_t i = 0; i < length; ++i) {
+    queue_.push(0.0);
+  }
+}
+
+MovingMoments::~MovingMoments() {}
+
+void MovingMoments::CalculateMoments(const float* in, size_t in_length,
+                                     float* first, float* second) {
+  RTC_DCHECK(in);
+  RTC_DCHECK_GT(in_length, 0);
+  RTC_DCHECK(first);
+  RTC_DCHECK(second);
+
+  for (size_t i = 0; i < in_length; ++i) {
+    const float old_value = queue_.front();
+    queue_.pop();
+    queue_.push(in[i]);
+
+    sum_ += in[i] - old_value;
+    sum_of_squares_ += in[i] * in[i] - old_value * old_value;
+    first[i] = sum_ / length_;
+    second[i] = sum_of_squares_ / length_;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/moving_moments.h b/modules/audio_processing/transient/moving_moments.h
new file mode 100644
index 0000000..f1b3e38
--- /dev/null
+++ b/modules/audio_processing/transient/moving_moments.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_MOVING_MOMENTS_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_MOVING_MOMENTS_H_
+
+#include <stddef.h>
+
+#include <queue>
+
+namespace webrtc {
+
+// Calculates the first and second moments for each value of a buffer taking
+// into account a given number of previous values.
+// It preserves its state, so it can be multiple-called.
+// TODO(chadan): Implement a function that takes a buffer of first moments and a
+// buffer of second moments; and calculates the variances. When needed.
+// TODO(chadan): Add functionality to update with a buffer but only output are
+// the last values of the moments. When needed.
+class MovingMoments {
+ public:
+  // Creates a Moving Moments object, that uses the last |length| values
+  // (including the new value introduced in every new calculation).
+  explicit MovingMoments(size_t length);
+  ~MovingMoments();
+
+  // Calculates the new values using |in|. Results will be in the out buffers.
+  // |first| and |second| must be allocated with at least |in_length|.
+  void CalculateMoments(const float* in, size_t in_length,
+                        float* first, float* second);
+
+ private:
+  size_t length_;
+  // A queue holding the |length_| latest input values.
+  std::queue<float> queue_;
+  // Sum of the values of the queue.
+  float sum_;
+  // Sum of the squares of the values of the queue.
+  float sum_of_squares_;
+};
+
+}  // namespace webrtc
+
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_MOVING_MOMENTS_H_
diff --git a/modules/audio_processing/transient/moving_moments_unittest.cc b/modules/audio_processing/transient/moving_moments_unittest.cc
new file mode 100644
index 0000000..057bc45
--- /dev/null
+++ b/modules/audio_processing/transient/moving_moments_unittest.cc
@@ -0,0 +1,207 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/moving_moments.h"
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const float kTolerance = 0.0001f;
+
+class MovingMomentsTest : public ::testing::Test {
+ protected:
+  static const size_t kMovingMomentsBufferLength = 5;
+  static const size_t kMaxOutputLength = 20;  // Valid for this tests only.
+
+  virtual void SetUp();
+  // Calls CalculateMoments and verifies that it produces the expected
+  // outputs.
+  void CalculateMomentsAndVerify(const float* input, size_t input_length,
+                                 const float* expected_mean,
+                                 const float* expected_mean_squares);
+
+  std::unique_ptr<MovingMoments> moving_moments_;
+  float output_mean_[kMaxOutputLength];
+  float output_mean_squares_[kMaxOutputLength];
+};
+
+const size_t MovingMomentsTest::kMaxOutputLength;
+
+void MovingMomentsTest::SetUp() {
+  moving_moments_.reset(new MovingMoments(kMovingMomentsBufferLength));
+}
+
+void MovingMomentsTest::CalculateMomentsAndVerify(
+    const float* input, size_t input_length,
+    const float* expected_mean,
+    const float* expected_mean_squares) {
+  ASSERT_LE(input_length, kMaxOutputLength);
+
+  moving_moments_->CalculateMoments(input,
+                                    input_length,
+                                    output_mean_,
+                                    output_mean_squares_);
+
+  for (size_t i = 1; i < input_length; ++i) {
+    EXPECT_NEAR(expected_mean[i], output_mean_[i], kTolerance);
+    EXPECT_NEAR(expected_mean_squares[i], output_mean_squares_[i], kTolerance);
+  }
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAnAllZerosBuffer) {
+  const float kInput[] = {0.f, 0.f, 0.f, 0.f, 0.f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  const float expected_mean[kInputLength] = {0.f, 0.f, 0.f, 0.f, 0.f};
+  const float expected_mean_squares[kInputLength] = {0.f, 0.f, 0.f, 0.f, 0.f};
+
+  CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+                            expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAConstantBuffer) {
+  const float kInput[] = {5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  const float expected_mean[kInputLength] =
+      {1.f, 2.f, 3.f, 4.f, 5.f, 5.f, 5.f, 5.f, 5.f, 5.f};
+  const float expected_mean_squares[kInputLength] =
+      {5.f, 10.f, 15.f, 20.f, 25.f, 25.f, 25.f, 25.f, 25.f, 25.f};
+
+  CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+                            expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAnIncreasingBuffer) {
+  const float kInput[] = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  const float expected_mean[kInputLength] =
+      {0.2f, 0.6f, 1.2f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f};
+  const float expected_mean_squares[kInputLength] =
+      {0.2f, 1.f, 2.8f, 6.f, 11.f, 18.f, 27.f, 38.f, 51.f};
+
+  CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+                            expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfADecreasingBuffer) {
+  const float kInput[] =
+      {-1.f, -2.f, -3.f, -4.f, -5.f, -6.f, -7.f, -8.f, -9.f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  const float expected_mean[kInputLength] =
+      {-0.2f, -0.6f, -1.2f, -2.f, -3.f, -4.f, -5.f, -6.f, -7.f};
+  const float expected_mean_squares[kInputLength] =
+      {0.2f, 1.f, 2.8f, 6.f, 11.f, 18.f, 27.f, 38.f, 51.f};
+
+  CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+                            expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAZeroMeanSequence) {
+  const size_t kMovingMomentsBufferLength = 4;
+  moving_moments_.reset(new MovingMoments(kMovingMomentsBufferLength));
+  const float kInput[] =
+      {1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  const float expected_mean[kInputLength] =
+      {0.25f, 0.f, 0.25f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
+  const float expected_mean_squares[kInputLength] =
+      {0.25f, 0.5f, 0.75f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f};
+
+  CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+                            expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, CorrectMomentsOfAnArbitraryBuffer) {
+  const float kInput[] =
+      {0.2f, 0.3f, 0.5f, 0.7f, 0.11f, 0.13f, 0.17f, 0.19f, 0.23f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  const float expected_mean[kInputLength] =
+      {0.04f, 0.1f, 0.2f, 0.34f, 0.362f, 0.348f, 0.322f, 0.26f, 0.166f};
+  const float expected_mean_squares[kInputLength] =
+      {0.008f, 0.026f, 0.076f, 0.174f, 0.1764f, 0.1718f, 0.1596f, 0.1168f,
+      0.0294f};
+
+  CalculateMomentsAndVerify(kInput, kInputLength, expected_mean,
+                            expected_mean_squares);
+}
+
+TEST_F(MovingMomentsTest, MutipleCalculateMomentsCalls) {
+  const float kInputFirstCall[] =
+      {0.2f, 0.3f, 0.5f, 0.7f, 0.11f, 0.13f, 0.17f, 0.19f, 0.23f};
+  const size_t kInputFirstCallLength = sizeof(kInputFirstCall) /
+                                    sizeof(kInputFirstCall[0]);
+  const float kInputSecondCall[] = {0.29f, 0.31f};
+  const size_t kInputSecondCallLength = sizeof(kInputSecondCall) /
+                                     sizeof(kInputSecondCall[0]);
+  const float kInputThirdCall[] = {0.37f, 0.41f, 0.43f, 0.47f};
+  const size_t kInputThirdCallLength = sizeof(kInputThirdCall) /
+                                    sizeof(kInputThirdCall[0]);
+
+  const float expected_mean_first_call[kInputFirstCallLength] =
+      {0.04f, 0.1f, 0.2f, 0.34f, 0.362f, 0.348f, 0.322f, 0.26f, 0.166f};
+  const float expected_mean_squares_first_call[kInputFirstCallLength] =
+      {0.008f, 0.026f, 0.076f, 0.174f, 0.1764f, 0.1718f, 0.1596f, 0.1168f,
+      0.0294f};
+
+  const float expected_mean_second_call[kInputSecondCallLength] =
+      {0.202f, 0.238f};
+  const float expected_mean_squares_second_call[kInputSecondCallLength] =
+      {0.0438f, 0.0596f};
+
+  const float expected_mean_third_call[kInputThirdCallLength] =
+      {0.278f, 0.322f, 0.362f, 0.398f};
+  const float expected_mean_squares_third_call[kInputThirdCallLength] =
+      {0.0812f, 0.1076f, 0.134f, 0.1614f};
+
+  CalculateMomentsAndVerify(kInputFirstCall, kInputFirstCallLength,
+      expected_mean_first_call, expected_mean_squares_first_call);
+
+  CalculateMomentsAndVerify(kInputSecondCall, kInputSecondCallLength,
+      expected_mean_second_call, expected_mean_squares_second_call);
+
+  CalculateMomentsAndVerify(kInputThirdCall, kInputThirdCallLength,
+      expected_mean_third_call, expected_mean_squares_third_call);
+}
+
+TEST_F(MovingMomentsTest,
+       VerifySampleBasedVsBlockBasedCalculation) {
+  const float kInput[] =
+      {0.2f, 0.3f, 0.5f, 0.7f, 0.11f, 0.13f, 0.17f, 0.19f, 0.23f};
+  const size_t kInputLength = sizeof(kInput) / sizeof(kInput[0]);
+
+  float output_mean_block_based[kInputLength];
+  float output_mean_squares_block_based[kInputLength];
+
+  float output_mean_sample_based;
+  float output_mean_squares_sample_based;
+
+  moving_moments_->CalculateMoments(
+      kInput, kInputLength, output_mean_block_based,
+      output_mean_squares_block_based);
+  moving_moments_.reset(new MovingMoments(kMovingMomentsBufferLength));
+  for (size_t i = 0; i < kInputLength; ++i) {
+    moving_moments_->CalculateMoments(
+        &kInput[i], 1, &output_mean_sample_based,
+        &output_mean_squares_sample_based);
+    EXPECT_FLOAT_EQ(output_mean_block_based[i], output_mean_sample_based);
+    EXPECT_FLOAT_EQ(output_mean_squares_block_based[i],
+                     output_mean_squares_sample_based);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/transient_detector.cc b/modules/audio_processing/transient/transient_detector.cc
new file mode 100644
index 0000000..1bb6f9f
--- /dev/null
+++ b/modules/audio_processing/transient/transient_detector.cc
@@ -0,0 +1,176 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_detector.h"
+
+#include <float.h>
+#include <math.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h"
+#include "modules/audio_processing/transient/moving_moments.h"
+#include "modules/audio_processing/transient/wpd_tree.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+static const int kTransientLengthMs = 30;
+static const int kChunksAtStartupLeftToDelete =
+    kTransientLengthMs / ts::kChunkSizeMs;
+static const float kDetectThreshold = 16.f;
+
+TransientDetector::TransientDetector(int sample_rate_hz)
+    : samples_per_chunk_(sample_rate_hz * ts::kChunkSizeMs / 1000),
+      last_first_moment_(),
+      last_second_moment_(),
+      chunks_at_startup_left_to_delete_(kChunksAtStartupLeftToDelete),
+      reference_energy_(1.f),
+      using_reference_(false) {
+  RTC_DCHECK(sample_rate_hz == ts::kSampleRate8kHz ||
+             sample_rate_hz == ts::kSampleRate16kHz ||
+             sample_rate_hz == ts::kSampleRate32kHz ||
+             sample_rate_hz == ts::kSampleRate48kHz);
+  int samples_per_transient = sample_rate_hz * kTransientLengthMs / 1000;
+  // Adjustment to avoid data loss while downsampling, making
+  // |samples_per_chunk_| and |samples_per_transient| always divisible by
+  // |kLeaves|.
+  samples_per_chunk_ -= samples_per_chunk_ % kLeaves;
+  samples_per_transient -= samples_per_transient % kLeaves;
+
+  tree_leaves_data_length_ = samples_per_chunk_ / kLeaves;
+  wpd_tree_.reset(new WPDTree(samples_per_chunk_,
+                              kDaubechies8HighPassCoefficients,
+                              kDaubechies8LowPassCoefficients,
+                              kDaubechies8CoefficientsLength,
+                              kLevels));
+  for (size_t i = 0; i < kLeaves; ++i) {
+    moving_moments_[i].reset(
+        new MovingMoments(samples_per_transient / kLeaves));
+  }
+
+  first_moments_.reset(new float[tree_leaves_data_length_]);
+  second_moments_.reset(new float[tree_leaves_data_length_]);
+
+  for (int i = 0; i < kChunksAtStartupLeftToDelete; ++i) {
+    previous_results_.push_back(0.f);
+  }
+}
+
+TransientDetector::~TransientDetector() {}
+
+float TransientDetector::Detect(const float* data,
+                                size_t data_length,
+                                const float* reference_data,
+                                size_t reference_length) {
+  RTC_DCHECK(data);
+  RTC_DCHECK_EQ(samples_per_chunk_, data_length);
+
+  // TODO(aluebs): Check if these errors can logically happen and if not assert
+  // on them.
+  if (wpd_tree_->Update(data, samples_per_chunk_) != 0) {
+    return -1.f;
+  }
+
+  float result = 0.f;
+
+  for (size_t i = 0; i < kLeaves; ++i) {
+    WPDNode* leaf = wpd_tree_->NodeAt(kLevels, i);
+
+    moving_moments_[i]->CalculateMoments(leaf->data(),
+                                         tree_leaves_data_length_,
+                                         first_moments_.get(),
+                                         second_moments_.get());
+
+    // Add value delayed (Use the last moments from the last call to Detect).
+    float unbiased_data = leaf->data()[0] - last_first_moment_[i];
+    result +=
+        unbiased_data * unbiased_data / (last_second_moment_[i] + FLT_MIN);
+
+    // Add new values.
+    for (size_t j = 1; j < tree_leaves_data_length_; ++j) {
+      unbiased_data = leaf->data()[j] - first_moments_[j - 1];
+      result +=
+          unbiased_data * unbiased_data / (second_moments_[j - 1] + FLT_MIN);
+    }
+
+    last_first_moment_[i] = first_moments_[tree_leaves_data_length_ - 1];
+    last_second_moment_[i] = second_moments_[tree_leaves_data_length_ - 1];
+  }
+
+  result /= tree_leaves_data_length_;
+
+  result *= ReferenceDetectionValue(reference_data, reference_length);
+
+  if (chunks_at_startup_left_to_delete_ > 0) {
+    chunks_at_startup_left_to_delete_--;
+    result = 0.f;
+  }
+
+  if (result >= kDetectThreshold) {
+    result = 1.f;
+  } else {
+    // Get proportional value.
+    // Proportion achieved with a squared raised cosine function with domain
+    // [0, kDetectThreshold) and image [0, 1), it's always increasing.
+    const float horizontal_scaling = ts::kPi / kDetectThreshold;
+    const float kHorizontalShift = ts::kPi;
+    const float kVerticalScaling = 0.5f;
+    const float kVerticalShift = 1.f;
+
+    result = (cos(result * horizontal_scaling + kHorizontalShift)
+        + kVerticalShift) * kVerticalScaling;
+    result *= result;
+  }
+
+  previous_results_.pop_front();
+  previous_results_.push_back(result);
+
+  // In the current implementation we return the max of the current result and
+  // the previous results, so the high results have a width equals to
+  // |transient_length|.
+  return *std::max_element(previous_results_.begin(), previous_results_.end());
+}
+
+// Looks for the highest slope and compares it with the previous ones.
+// An exponential transformation takes this to the [0, 1] range. This value is
+// multiplied by the detection result to avoid false positives.
+float TransientDetector::ReferenceDetectionValue(const float* data,
+                                                 size_t length) {
+  if (data == NULL) {
+    using_reference_ = false;
+    return 1.f;
+  }
+  static const float kEnergyRatioThreshold = 0.2f;
+  static const float kReferenceNonLinearity = 20.f;
+  static const float kMemory = 0.99f;
+  float reference_energy = 0.f;
+  for (size_t i = 1; i < length; ++i) {
+    reference_energy += data[i] * data[i];
+  }
+  if (reference_energy == 0.f) {
+    using_reference_ = false;
+    return 1.f;
+  }
+  RTC_DCHECK_NE(0, reference_energy_);
+  float result = 1.f / (1.f + exp(kReferenceNonLinearity *
+                                  (kEnergyRatioThreshold -
+                                   reference_energy / reference_energy_)));
+  reference_energy_ =
+      kMemory * reference_energy_ + (1.f - kMemory) * reference_energy;
+
+  using_reference_ = true;
+
+  return result;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/transient_detector.h b/modules/audio_processing/transient/transient_detector.h
new file mode 100644
index 0000000..3267b3a
--- /dev/null
+++ b/modules/audio_processing/transient/transient_detector.h
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_DETECTOR_H_
+
+#include <deque>
+#include <memory>
+
+#include "modules/audio_processing/transient/moving_moments.h"
+#include "modules/audio_processing/transient/wpd_tree.h"
+
+namespace webrtc {
+
+// This is an implementation of the transient detector described in "Causal
+// Wavelet based transient detector".
+// Calculates the log-likelihood of a transient to happen on a signal at any
+// given time based on the previous samples; it uses a WPD tree to analyze the
+// signal.  It preserves its state, so it can be multiple-called.
+class TransientDetector {
+ public:
+  // TODO(chadan): The only supported wavelet is Daubechies 8 using a WPD tree
+  // of 3 levels. Make an overloaded constructor to allow different wavelets and
+  // depths of the tree. When needed.
+
+  // Creates a wavelet based transient detector.
+  TransientDetector(int sample_rate_hz);
+
+  ~TransientDetector();
+
+  // Calculates the log-likelihood of the existence of a transient in |data|.
+  // |data_length| has to be equal to |samples_per_chunk_|.
+  // Returns a value between 0 and 1, as a non linear representation of this
+  // likelihood.
+  // Returns a negative value on error.
+  float Detect(const float* data,
+               size_t data_length,
+               const float* reference_data,
+               size_t reference_length);
+
+  bool using_reference() { return using_reference_; }
+
+ private:
+  float ReferenceDetectionValue(const float* data, size_t length);
+
+  static const size_t kLevels = 3;
+  static const size_t kLeaves = 1 << kLevels;
+
+  size_t samples_per_chunk_;
+
+  std::unique_ptr<WPDTree> wpd_tree_;
+  size_t tree_leaves_data_length_;
+
+  // A MovingMoments object is needed for each leaf in the WPD tree.
+  std::unique_ptr<MovingMoments> moving_moments_[kLeaves];
+
+  std::unique_ptr<float[]> first_moments_;
+  std::unique_ptr<float[]> second_moments_;
+
+  // Stores the last calculated moments from the previous detection.
+  float last_first_moment_[kLeaves];
+  float last_second_moment_[kLeaves];
+
+  // We keep track of the previous results from the previous chunks, so it can
+  // be used to effectively give results according to the |transient_length|.
+  std::deque<float> previous_results_;
+
+  // Number of chunks that are going to return only zeros at the beginning of
+  // the detection. It helps to avoid infs and nans due to the lack of
+  // information.
+  int chunks_at_startup_left_to_delete_;
+
+  float reference_energy_;
+
+  bool using_reference_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_DETECTOR_H_
diff --git a/modules/audio_processing/transient/transient_detector_unittest.cc b/modules/audio_processing/transient/transient_detector_unittest.cc
new file mode 100644
index 0000000..96af179
--- /dev/null
+++ b/modules/audio_processing/transient/transient_detector_unittest.cc
@@ -0,0 +1,104 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_detector.h"
+
+#include <memory>
+#include <sstream>
+#include <string>
+
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/file_utils.h"
+#include "system_wrappers/include/file_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+static const int kSampleRatesHz[] = {ts::kSampleRate8kHz,
+                                     ts::kSampleRate16kHz,
+                                     ts::kSampleRate32kHz,
+                                     ts::kSampleRate48kHz};
+static const size_t kNumberOfSampleRates =
+    sizeof(kSampleRatesHz) / sizeof(*kSampleRatesHz);
+
+// This test is for the correctness of the transient detector.
+// Checks the results comparing them with the ones stored in the detect files in
+// the directory: resources/audio_processing/transient/
+// The files contain all the results in double precision (Little endian).
+// The audio files used with different sample rates are stored in the same
+// directory.
+#if defined(WEBRTC_IOS)
+TEST(TransientDetectorTest, DISABLED_CorrectnessBasedOnFiles) {
+#else
+TEST(TransientDetectorTest, CorrectnessBasedOnFiles) {
+#endif
+  for (size_t i = 0; i < kNumberOfSampleRates; ++i) {
+    int sample_rate_hz = kSampleRatesHz[i];
+
+    // Prepare detect file.
+    std::stringstream detect_file_name;
+    detect_file_name << "audio_processing/transient/detect"
+                     << (sample_rate_hz / 1000) << "kHz";
+
+    std::unique_ptr<FileWrapper> detect_file(FileWrapper::Create());
+
+    detect_file->OpenFile(
+        test::ResourcePath(detect_file_name.str(), "dat").c_str(),
+        true);  // Read only.
+
+    bool file_opened = detect_file->is_open();
+    ASSERT_TRUE(file_opened) << "File could not be opened.\n"
+          << detect_file_name.str().c_str();
+
+    // Prepare audio file.
+    std::stringstream audio_file_name;
+    audio_file_name << "audio_processing/transient/audio"
+                    << (sample_rate_hz / 1000) << "kHz";
+
+    std::unique_ptr<FileWrapper> audio_file(FileWrapper::Create());
+
+    audio_file->OpenFile(
+        test::ResourcePath(audio_file_name.str(), "pcm").c_str(),
+        true);  // Read only.
+
+    // Create detector.
+    TransientDetector detector(sample_rate_hz);
+
+    const size_t buffer_length = sample_rate_hz * ts::kChunkSizeMs / 1000;
+    std::unique_ptr<float[]> buffer(new float[buffer_length]);
+
+    const float kTolerance = 0.02f;
+
+    size_t frames_read = 0;
+
+    while (ReadInt16FromFileToFloatBuffer(audio_file.get(),
+                                          buffer_length,
+                                          buffer.get()) == buffer_length) {
+      ++frames_read;
+
+      float detector_value =
+          detector.Detect(buffer.get(), buffer_length, NULL, 0);
+      double file_value;
+      ASSERT_EQ(1u, ReadDoubleBufferFromFile(detect_file.get(), 1, &file_value))
+          << "Detect test file is malformed.\n";
+
+      // Compare results with data from the matlab test file.
+      EXPECT_NEAR(file_value, detector_value, kTolerance) << "Frame: "
+          << frames_read;
+    }
+
+    detect_file->CloseFile();
+    audio_file->CloseFile();
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/transient_suppression_test.cc b/modules/audio_processing/transient/transient_suppression_test.cc
new file mode 100644
index 0000000..14fe4f8
--- /dev/null
+++ b/modules/audio_processing/transient/transient_suppression_test.cc
@@ -0,0 +1,250 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_suppressor.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <memory>
+#include <string>
+
+#include "common_audio/include/audio_util.h"
+#include "modules/audio_processing/agc/agc.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/flags.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+DEFINE_string(in_file_name, "", "PCM file that contains the signal.");
+DEFINE_string(detection_file_name,
+              "",
+              "PCM file that contains the detection signal.");
+DEFINE_string(reference_file_name,
+              "",
+              "PCM file that contains the reference signal.");
+
+DEFINE_int(chunk_size_ms,
+           10,
+           "Time between each chunk of samples in milliseconds.");
+
+DEFINE_int(sample_rate_hz,
+           16000,
+           "Sampling frequency of the signal in Hertz.");
+DEFINE_int(detection_rate_hz,
+           0,
+           "Sampling frequency of the detection signal in Hertz.");
+
+DEFINE_int(num_channels, 1, "Number of channels.");
+
+DEFINE_bool(help, false, "Print this message.");
+
+namespace webrtc {
+
+const char kUsage[] =
+    "\nDetects and suppresses transients from file.\n\n"
+    "This application loads the signal from the in_file_name with a specific\n"
+    "num_channels and sample_rate_hz, the detection signal from the\n"
+    "detection_file_name with a specific detection_rate_hz, and the reference\n"
+    "signal from the reference_file_name with sample_rate_hz, divides them\n"
+    "into chunk_size_ms blocks, computes its voice value and depending on the\n"
+    "voice_threshold does the respective restoration. You can always get the\n"
+    "all-voiced or all-unvoiced cases by setting the voice_threshold to 0 or\n"
+    "1 respectively.\n\n";
+
+// Read next buffers from the test files (signed 16-bit host-endian PCM
+// format). audio_buffer has int16 samples, detection_buffer has float samples
+// with range [-32768,32767], and reference_buffer has float samples with range
+// [-1,1]. Return true iff all the buffers were filled completely.
+bool ReadBuffers(FILE* in_file,
+                 size_t audio_buffer_size,
+                 int num_channels,
+                 int16_t* audio_buffer,
+                 FILE* detection_file,
+                 size_t detection_buffer_size,
+                 float* detection_buffer,
+                 FILE* reference_file,
+                 float* reference_buffer) {
+  std::unique_ptr<int16_t[]> tmpbuf;
+  int16_t* read_ptr = audio_buffer;
+  if (num_channels > 1) {
+    tmpbuf.reset(new int16_t[num_channels * audio_buffer_size]);
+    read_ptr = tmpbuf.get();
+  }
+  if (fread(read_ptr,
+            sizeof(*read_ptr),
+            num_channels * audio_buffer_size,
+            in_file) != num_channels * audio_buffer_size) {
+    return false;
+  }
+  // De-interleave.
+  if (num_channels > 1) {
+    for (int i = 0; i < num_channels; ++i) {
+      for (size_t j = 0; j < audio_buffer_size; ++j) {
+        audio_buffer[i * audio_buffer_size + j] =
+            read_ptr[i + j * num_channels];
+      }
+    }
+  }
+  if (detection_file) {
+    std::unique_ptr<int16_t[]> ibuf(new int16_t[detection_buffer_size]);
+    if (fread(ibuf.get(), sizeof(ibuf[0]), detection_buffer_size,
+              detection_file) != detection_buffer_size)
+      return false;
+    for (size_t i = 0; i < detection_buffer_size; ++i)
+      detection_buffer[i] = ibuf[i];
+  }
+  if (reference_file) {
+    std::unique_ptr<int16_t[]> ibuf(new int16_t[audio_buffer_size]);
+    if (fread(ibuf.get(), sizeof(ibuf[0]), audio_buffer_size, reference_file)
+        != audio_buffer_size)
+      return false;
+    S16ToFloat(ibuf.get(), audio_buffer_size, reference_buffer);
+  }
+  return true;
+}
+
+// Write a number of samples to an open signed 16-bit host-endian PCM file.
+static void WritePCM(FILE* f,
+                     size_t num_samples,
+                     int num_channels,
+                     const float* buffer) {
+  std::unique_ptr<int16_t[]> ibuf(new int16_t[num_channels * num_samples]);
+  // Interleave.
+  for (int i = 0; i < num_channels; ++i) {
+    for (size_t j = 0; j < num_samples; ++j) {
+      ibuf[i + j * num_channels] = FloatS16ToS16(buffer[i * num_samples + j]);
+    }
+  }
+  fwrite(ibuf.get(), sizeof(ibuf[0]), num_channels * num_samples, f);
+}
+
+// This application tests the transient suppression by providing a processed
+// PCM file, which has to be listened to in order to evaluate the
+// performance.
+// It gets an audio file, and its voice gain information, and the suppressor
+// process it giving the output file "suppressed_keystrokes.pcm".
+void void_main() {
+  // TODO(aluebs): Remove all FileWrappers.
+  // Prepare the input file.
+  FILE* in_file = fopen(FLAG_in_file_name, "rb");
+  ASSERT_TRUE(in_file != NULL);
+
+  // Prepare the detection file.
+  FILE* detection_file = NULL;
+  if (strlen(FLAG_detection_file_name) > 0) {
+    detection_file = fopen(FLAG_detection_file_name, "rb");
+  }
+
+  // Prepare the reference file.
+  FILE* reference_file = NULL;
+  if (strlen(FLAG_reference_file_name) > 0) {
+    reference_file = fopen(FLAG_reference_file_name, "rb");
+  }
+
+  // Prepare the output file.
+  std::string out_file_name = test::OutputPath() + "suppressed_keystrokes.pcm";
+  FILE* out_file = fopen(out_file_name.c_str(), "wb");
+  ASSERT_TRUE(out_file != NULL);
+
+  int detection_rate_hz = FLAG_detection_rate_hz;
+  if (detection_rate_hz == 0) {
+    detection_rate_hz = FLAG_sample_rate_hz;
+  }
+
+  Agc agc;
+
+  TransientSuppressor suppressor;
+  suppressor.Initialize(
+      FLAG_sample_rate_hz, detection_rate_hz, FLAG_num_channels);
+
+  const size_t audio_buffer_size =
+      FLAG_chunk_size_ms * FLAG_sample_rate_hz / 1000;
+  const size_t detection_buffer_size =
+      FLAG_chunk_size_ms * detection_rate_hz / 1000;
+
+  // int16 and float variants of the same data.
+  std::unique_ptr<int16_t[]> audio_buffer_i(
+      new int16_t[FLAG_num_channels * audio_buffer_size]);
+  std::unique_ptr<float[]> audio_buffer_f(
+      new float[FLAG_num_channels * audio_buffer_size]);
+
+  std::unique_ptr<float[]> detection_buffer, reference_buffer;
+
+  if (detection_file)
+    detection_buffer.reset(new float[detection_buffer_size]);
+  if (reference_file)
+    reference_buffer.reset(new float[audio_buffer_size]);
+
+  while (ReadBuffers(in_file,
+                     audio_buffer_size,
+                     FLAG_num_channels,
+                     audio_buffer_i.get(),
+                     detection_file,
+                     detection_buffer_size,
+                     detection_buffer.get(),
+                     reference_file,
+                     reference_buffer.get())) {
+    agc.Process(audio_buffer_i.get(),
+                static_cast<int>(audio_buffer_size),
+                FLAG_sample_rate_hz);
+
+    for (size_t i = 0; i < FLAG_num_channels * audio_buffer_size; ++i) {
+      audio_buffer_f[i] = audio_buffer_i[i];
+    }
+
+    ASSERT_EQ(0,
+              suppressor.Suppress(audio_buffer_f.get(),
+                                  audio_buffer_size,
+                                  FLAG_num_channels,
+                                  detection_buffer.get(),
+                                  detection_buffer_size,
+                                  reference_buffer.get(),
+                                  audio_buffer_size,
+                                  agc.voice_probability(),
+                                  true))
+        << "The transient suppressor could not suppress the frame";
+
+    // Write result to out file.
+    WritePCM(
+        out_file, audio_buffer_size, FLAG_num_channels, audio_buffer_f.get());
+  }
+
+  fclose(in_file);
+  if (detection_file) {
+    fclose(detection_file);
+  }
+  if (reference_file) {
+    fclose(reference_file);
+  }
+  fclose(out_file);
+}
+
+}  // namespace webrtc
+
+int main(int argc, char* argv[]) {
+  if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) ||
+      FLAG_help || argc != 1) {
+    printf("%s", webrtc::kUsage);
+    if (FLAG_help) {
+      rtc::FlagList::Print(nullptr, false);
+      return 0;
+    }
+    return 1;
+  }
+  RTC_CHECK_GT(FLAG_chunk_size_ms, 0);
+  RTC_CHECK_GT(FLAG_sample_rate_hz, 0);
+  RTC_CHECK_GT(FLAG_num_channels, 0);
+
+  webrtc::void_main();
+  return 0;
+}
diff --git a/modules/audio_processing/transient/transient_suppressor.cc b/modules/audio_processing/transient/transient_suppressor.cc
new file mode 100644
index 0000000..9bbd7d9
--- /dev/null
+++ b/modules/audio_processing/transient/transient_suppressor.cc
@@ -0,0 +1,424 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_suppressor.h"
+
+#include <math.h>
+#include <string.h>
+#include <cmath>
+#include <complex>
+#include <deque>
+#include <set>
+
+#include "common_audio/fft4g.h"
+#include "common_audio/include/audio_util.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+#include "modules/audio_processing/ns/windows_private.h"
+#include "modules/audio_processing/transient/common.h"
+#include "modules/audio_processing/transient/transient_detector.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+static const float kMeanIIRCoefficient = 0.5f;
+static const float kVoiceThreshold = 0.02f;
+
+// TODO(aluebs): Check if these values work also for 48kHz.
+static const size_t kMinVoiceBin = 3;
+static const size_t kMaxVoiceBin = 60;
+
+namespace {
+
+float ComplexMagnitude(float a, float b) {
+  return std::abs(a) + std::abs(b);
+}
+
+}  // namespace
+
+TransientSuppressor::TransientSuppressor()
+    : data_length_(0),
+      detection_length_(0),
+      analysis_length_(0),
+      buffer_delay_(0),
+      complex_analysis_length_(0),
+      num_channels_(0),
+      window_(NULL),
+      detector_smoothed_(0.f),
+      keypress_counter_(0),
+      chunks_since_keypress_(0),
+      detection_enabled_(false),
+      suppression_enabled_(false),
+      use_hard_restoration_(false),
+      chunks_since_voice_change_(0),
+      seed_(182),
+      using_reference_(false) {
+}
+
+TransientSuppressor::~TransientSuppressor() {}
+
+int TransientSuppressor::Initialize(int sample_rate_hz,
+                                    int detection_rate_hz,
+                                    int num_channels) {
+  switch (sample_rate_hz) {
+    case ts::kSampleRate8kHz:
+      analysis_length_ = 128u;
+      window_ = kBlocks80w128;
+      break;
+    case ts::kSampleRate16kHz:
+      analysis_length_ = 256u;
+      window_ = kBlocks160w256;
+      break;
+    case ts::kSampleRate32kHz:
+      analysis_length_ = 512u;
+      window_ = kBlocks320w512;
+      break;
+    case ts::kSampleRate48kHz:
+      analysis_length_ = 1024u;
+      window_ = kBlocks480w1024;
+      break;
+    default:
+      return -1;
+  }
+  if (detection_rate_hz != ts::kSampleRate8kHz &&
+      detection_rate_hz != ts::kSampleRate16kHz &&
+      detection_rate_hz != ts::kSampleRate32kHz &&
+      detection_rate_hz != ts::kSampleRate48kHz) {
+    return -1;
+  }
+  if (num_channels <= 0) {
+    return -1;
+  }
+
+  detector_.reset(new TransientDetector(detection_rate_hz));
+  data_length_ = sample_rate_hz * ts::kChunkSizeMs / 1000;
+  if (data_length_ > analysis_length_) {
+    RTC_NOTREACHED();
+    return -1;
+  }
+  buffer_delay_ = analysis_length_ - data_length_;
+
+  complex_analysis_length_ = analysis_length_ / 2 + 1;
+  RTC_DCHECK_GE(complex_analysis_length_, kMaxVoiceBin);
+  num_channels_ = num_channels;
+  in_buffer_.reset(new float[analysis_length_ * num_channels_]);
+  memset(in_buffer_.get(),
+         0,
+         analysis_length_ * num_channels_ * sizeof(in_buffer_[0]));
+  detection_length_ = detection_rate_hz * ts::kChunkSizeMs / 1000;
+  detection_buffer_.reset(new float[detection_length_]);
+  memset(detection_buffer_.get(),
+         0,
+         detection_length_ * sizeof(detection_buffer_[0]));
+  out_buffer_.reset(new float[analysis_length_ * num_channels_]);
+  memset(out_buffer_.get(),
+         0,
+         analysis_length_ * num_channels_ * sizeof(out_buffer_[0]));
+  // ip[0] must be zero to trigger initialization using rdft().
+  size_t ip_length = 2 + sqrtf(analysis_length_);
+  ip_.reset(new size_t[ip_length]());
+  memset(ip_.get(), 0, ip_length * sizeof(ip_[0]));
+  wfft_.reset(new float[complex_analysis_length_ - 1]);
+  memset(wfft_.get(), 0, (complex_analysis_length_ - 1) * sizeof(wfft_[0]));
+  spectral_mean_.reset(new float[complex_analysis_length_ * num_channels_]);
+  memset(spectral_mean_.get(),
+         0,
+         complex_analysis_length_ * num_channels_ * sizeof(spectral_mean_[0]));
+  fft_buffer_.reset(new float[analysis_length_ + 2]);
+  memset(fft_buffer_.get(), 0, (analysis_length_ + 2) * sizeof(fft_buffer_[0]));
+  magnitudes_.reset(new float[complex_analysis_length_]);
+  memset(magnitudes_.get(),
+         0,
+         complex_analysis_length_ * sizeof(magnitudes_[0]));
+  mean_factor_.reset(new float[complex_analysis_length_]);
+
+  static const float kFactorHeight = 10.f;
+  static const float kLowSlope = 1.f;
+  static const float kHighSlope = 0.3f;
+  for (size_t i = 0; i < complex_analysis_length_; ++i) {
+    mean_factor_[i] =
+        kFactorHeight /
+            (1.f + exp(kLowSlope * static_cast<int>(i - kMinVoiceBin))) +
+        kFactorHeight /
+            (1.f + exp(kHighSlope * static_cast<int>(kMaxVoiceBin - i)));
+  }
+  detector_smoothed_ = 0.f;
+  keypress_counter_ = 0;
+  chunks_since_keypress_ = 0;
+  detection_enabled_ = false;
+  suppression_enabled_ = false;
+  use_hard_restoration_ = false;
+  chunks_since_voice_change_ = 0;
+  seed_ = 182;
+  using_reference_ = false;
+  return 0;
+}
+
+int TransientSuppressor::Suppress(float* data,
+                                  size_t data_length,
+                                  int num_channels,
+                                  const float* detection_data,
+                                  size_t detection_length,
+                                  const float* reference_data,
+                                  size_t reference_length,
+                                  float voice_probability,
+                                  bool key_pressed) {
+  if (!data || data_length != data_length_ || num_channels != num_channels_ ||
+      detection_length != detection_length_ || voice_probability < 0 ||
+      voice_probability > 1) {
+    return -1;
+  }
+
+  UpdateKeypress(key_pressed);
+  UpdateBuffers(data);
+
+  int result = 0;
+  if (detection_enabled_) {
+    UpdateRestoration(voice_probability);
+
+    if (!detection_data) {
+      // Use the input data  of the first channel if special detection data is
+      // not supplied.
+      detection_data = &in_buffer_[buffer_delay_];
+    }
+
+    float detector_result = detector_->Detect(
+        detection_data, detection_length, reference_data, reference_length);
+    if (detector_result < 0) {
+      return -1;
+    }
+
+    using_reference_ = detector_->using_reference();
+
+    // |detector_smoothed_| follows the |detector_result| when this last one is
+    // increasing, but has an exponential decaying tail to be able to suppress
+    // the ringing of keyclicks.
+    float smooth_factor = using_reference_ ? 0.6 : 0.1;
+    detector_smoothed_ = detector_result >= detector_smoothed_
+                             ? detector_result
+                             : smooth_factor * detector_smoothed_ +
+                                   (1 - smooth_factor) * detector_result;
+
+    for (int i = 0; i < num_channels_; ++i) {
+      Suppress(&in_buffer_[i * analysis_length_],
+               &spectral_mean_[i * complex_analysis_length_],
+               &out_buffer_[i * analysis_length_]);
+    }
+  }
+
+  // If the suppression isn't enabled, we use the in buffer to delay the signal
+  // appropriately. This also gives time for the out buffer to be refreshed with
+  // new data between detection and suppression getting enabled.
+  for (int i = 0; i < num_channels_; ++i) {
+    memcpy(&data[i * data_length_],
+           suppression_enabled_ ? &out_buffer_[i * analysis_length_]
+                                : &in_buffer_[i * analysis_length_],
+           data_length_ * sizeof(*data));
+  }
+  return result;
+}
+
+// This should only be called when detection is enabled. UpdateBuffers() must
+// have been called. At return, |out_buffer_| will be filled with the
+// processed output.
+void TransientSuppressor::Suppress(float* in_ptr,
+                                   float* spectral_mean,
+                                   float* out_ptr) {
+  // Go to frequency domain.
+  for (size_t i = 0; i < analysis_length_; ++i) {
+    // TODO(aluebs): Rename windows
+    fft_buffer_[i] = in_ptr[i] * window_[i];
+  }
+
+  WebRtc_rdft(analysis_length_, 1, fft_buffer_.get(), ip_.get(), wfft_.get());
+
+  // Since WebRtc_rdft puts R[n/2] in fft_buffer_[1], we move it to the end
+  // for convenience.
+  fft_buffer_[analysis_length_] = fft_buffer_[1];
+  fft_buffer_[analysis_length_ + 1] = 0.f;
+  fft_buffer_[1] = 0.f;
+
+  for (size_t i = 0; i < complex_analysis_length_; ++i) {
+    magnitudes_[i] = ComplexMagnitude(fft_buffer_[i * 2],
+                                      fft_buffer_[i * 2 + 1]);
+  }
+  // Restore audio if necessary.
+  if (suppression_enabled_) {
+    if (use_hard_restoration_) {
+      HardRestoration(spectral_mean);
+    } else {
+      SoftRestoration(spectral_mean);
+    }
+  }
+
+  // Update the spectral mean.
+  for (size_t i = 0; i < complex_analysis_length_; ++i) {
+    spectral_mean[i] = (1 - kMeanIIRCoefficient) * spectral_mean[i] +
+                       kMeanIIRCoefficient * magnitudes_[i];
+  }
+
+  // Back to time domain.
+  // Put R[n/2] back in fft_buffer_[1].
+  fft_buffer_[1] = fft_buffer_[analysis_length_];
+
+  WebRtc_rdft(analysis_length_,
+              -1,
+              fft_buffer_.get(),
+              ip_.get(),
+              wfft_.get());
+  const float fft_scaling = 2.f / analysis_length_;
+
+  for (size_t i = 0; i < analysis_length_; ++i) {
+    out_ptr[i] += fft_buffer_[i] * window_[i] * fft_scaling;
+  }
+}
+
+void TransientSuppressor::UpdateKeypress(bool key_pressed) {
+  const int kKeypressPenalty = 1000 / ts::kChunkSizeMs;
+  const int kIsTypingThreshold = 1000 / ts::kChunkSizeMs;
+  const int kChunksUntilNotTyping = 4000 / ts::kChunkSizeMs;  // 4 seconds.
+
+  if (key_pressed) {
+    keypress_counter_ += kKeypressPenalty;
+    chunks_since_keypress_ = 0;
+    detection_enabled_ = true;
+  }
+  keypress_counter_ = std::max(0, keypress_counter_ - 1);
+
+  if (keypress_counter_ > kIsTypingThreshold) {
+    if (!suppression_enabled_) {
+      RTC_LOG(LS_INFO) << "[ts] Transient suppression is now enabled.";
+    }
+    suppression_enabled_ = true;
+    keypress_counter_ = 0;
+  }
+
+  if (detection_enabled_ &&
+      ++chunks_since_keypress_ > kChunksUntilNotTyping) {
+    if (suppression_enabled_) {
+      RTC_LOG(LS_INFO) << "[ts] Transient suppression is now disabled.";
+    }
+    detection_enabled_ = false;
+    suppression_enabled_ = false;
+    keypress_counter_ = 0;
+  }
+}
+
+void TransientSuppressor::UpdateRestoration(float voice_probability) {
+  const int kHardRestorationOffsetDelay = 3;
+  const int kHardRestorationOnsetDelay = 80;
+
+  bool not_voiced = voice_probability < kVoiceThreshold;
+
+  if (not_voiced == use_hard_restoration_) {
+    chunks_since_voice_change_ = 0;
+  } else {
+    ++chunks_since_voice_change_;
+
+    if ((use_hard_restoration_ &&
+         chunks_since_voice_change_ > kHardRestorationOffsetDelay) ||
+        (!use_hard_restoration_ &&
+         chunks_since_voice_change_ > kHardRestorationOnsetDelay)) {
+      use_hard_restoration_ = not_voiced;
+      chunks_since_voice_change_ = 0;
+    }
+  }
+}
+
+// Shift buffers to make way for new data. Must be called after
+// |detection_enabled_| is updated by UpdateKeypress().
+void TransientSuppressor::UpdateBuffers(float* data) {
+  // TODO(aluebs): Change to ring buffer.
+  memmove(in_buffer_.get(),
+          &in_buffer_[data_length_],
+          (buffer_delay_ + (num_channels_ - 1) * analysis_length_) *
+              sizeof(in_buffer_[0]));
+  // Copy new chunk to buffer.
+  for (int i = 0; i < num_channels_; ++i) {
+    memcpy(&in_buffer_[buffer_delay_ + i * analysis_length_],
+           &data[i * data_length_],
+           data_length_ * sizeof(*data));
+  }
+  if (detection_enabled_) {
+    // Shift previous chunk in out buffer.
+    memmove(out_buffer_.get(),
+            &out_buffer_[data_length_],
+            (buffer_delay_ + (num_channels_ - 1) * analysis_length_) *
+                sizeof(out_buffer_[0]));
+    // Initialize new chunk in out buffer.
+    for (int i = 0; i < num_channels_; ++i) {
+      memset(&out_buffer_[buffer_delay_ + i * analysis_length_],
+             0,
+             data_length_ * sizeof(out_buffer_[0]));
+    }
+  }
+}
+
+// Restores the unvoiced signal if a click is present.
+// Attenuates by a certain factor every peak in the |fft_buffer_| that exceeds
+// the spectral mean. The attenuation depends on |detector_smoothed_|.
+// If a restoration takes place, the |magnitudes_| are updated to the new value.
+void TransientSuppressor::HardRestoration(float* spectral_mean) {
+  const float detector_result =
+      1.f - pow(1.f - detector_smoothed_, using_reference_ ? 200.f : 50.f);
+  // To restore, we get the peaks in the spectrum. If higher than the previous
+  // spectral mean we adjust them.
+  for (size_t i = 0; i < complex_analysis_length_; ++i) {
+    if (magnitudes_[i] > spectral_mean[i] && magnitudes_[i] > 0) {
+      // RandU() generates values on [0, int16::max()]
+      const float phase = 2 * ts::kPi * WebRtcSpl_RandU(&seed_) /
+          std::numeric_limits<int16_t>::max();
+      const float scaled_mean = detector_result * spectral_mean[i];
+
+      fft_buffer_[i * 2] = (1 - detector_result) * fft_buffer_[i * 2] +
+                           scaled_mean * cosf(phase);
+      fft_buffer_[i * 2 + 1] = (1 - detector_result) * fft_buffer_[i * 2 + 1] +
+                               scaled_mean * sinf(phase);
+      magnitudes_[i] = magnitudes_[i] -
+                       detector_result * (magnitudes_[i] - spectral_mean[i]);
+    }
+  }
+}
+
+// Restores the voiced signal if a click is present.
+// Attenuates by a certain factor every peak in the |fft_buffer_| that exceeds
+// the spectral mean and that is lower than some function of the current block
+// frequency mean. The attenuation depends on |detector_smoothed_|.
+// If a restoration takes place, the |magnitudes_| are updated to the new value.
+void TransientSuppressor::SoftRestoration(float* spectral_mean) {
+  // Get the spectral magnitude mean of the current block.
+  float block_frequency_mean = 0;
+  for (size_t i = kMinVoiceBin; i < kMaxVoiceBin; ++i) {
+    block_frequency_mean += magnitudes_[i];
+  }
+  block_frequency_mean /= (kMaxVoiceBin - kMinVoiceBin);
+
+  // To restore, we get the peaks in the spectrum. If higher than the
+  // previous spectral mean and lower than a factor of the block mean
+  // we adjust them. The factor is a double sigmoid that has a minimum in the
+  // voice frequency range (300Hz - 3kHz).
+  for (size_t i = 0; i < complex_analysis_length_; ++i) {
+    if (magnitudes_[i] > spectral_mean[i] && magnitudes_[i] > 0 &&
+        (using_reference_ ||
+         magnitudes_[i] < block_frequency_mean * mean_factor_[i])) {
+      const float new_magnitude =
+          magnitudes_[i] -
+          detector_smoothed_ * (magnitudes_[i] - spectral_mean[i]);
+      const float magnitude_ratio = new_magnitude / magnitudes_[i];
+
+      fft_buffer_[i * 2] *= magnitude_ratio;
+      fft_buffer_[i * 2 + 1] *= magnitude_ratio;
+      magnitudes_[i] = new_magnitude;
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/transient_suppressor.h b/modules/audio_processing/transient/transient_suppressor.h
new file mode 100644
index 0000000..27b096c
--- /dev/null
+++ b/modules/audio_processing/transient/transient_suppressor.h
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_H_
+
+#include <deque>
+#include <memory>
+#include <set>
+
+#include "rtc_base/gtest_prod_util.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class TransientDetector;
+
+// Detects transients in an audio stream and suppress them using a simple
+// restoration algorithm that attenuates unexpected spikes in the spectrum.
+class TransientSuppressor {
+ public:
+  TransientSuppressor();
+  ~TransientSuppressor();
+
+  int Initialize(int sample_rate_hz, int detector_rate_hz, int num_channels);
+
+  // Processes a |data| chunk, and returns it with keystrokes suppressed from
+  // it. The float format is assumed to be int16 ranged. If there are more than
+  // one channel, the chunks are concatenated one after the other in |data|.
+  // |data_length| must be equal to |data_length_|.
+  // |num_channels| must be equal to |num_channels_|.
+  // A sub-band, ideally the higher, can be used as |detection_data|. If it is
+  // NULL, |data| is used for the detection too. The |detection_data| is always
+  // assumed mono.
+  // If a reference signal (e.g. keyboard microphone) is available, it can be
+  // passed in as |reference_data|. It is assumed mono and must have the same
+  // length as |data|. NULL is accepted if unavailable.
+  // This suppressor performs better if voice information is available.
+  // |voice_probability| is the probability of voice being present in this chunk
+  // of audio. If voice information is not available, |voice_probability| must
+  // always be set to 1.
+  // |key_pressed| determines if a key was pressed on this audio chunk.
+  // Returns 0 on success and -1 otherwise.
+  int Suppress(float* data,
+               size_t data_length,
+               int num_channels,
+               const float* detection_data,
+               size_t detection_length,
+               const float* reference_data,
+               size_t reference_length,
+               float voice_probability,
+               bool key_pressed);
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(TransientSuppressorTest,
+                           TypingDetectionLogicWorksAsExpectedForMono);
+  void Suppress(float* in_ptr, float* spectral_mean, float* out_ptr);
+
+  void UpdateKeypress(bool key_pressed);
+  void UpdateRestoration(float voice_probability);
+
+  void UpdateBuffers(float* data);
+
+  void HardRestoration(float* spectral_mean);
+  void SoftRestoration(float* spectral_mean);
+
+  std::unique_ptr<TransientDetector> detector_;
+
+  size_t data_length_;
+  size_t detection_length_;
+  size_t analysis_length_;
+  size_t buffer_delay_;
+  size_t complex_analysis_length_;
+  int num_channels_;
+  // Input buffer where the original samples are stored.
+  std::unique_ptr<float[]> in_buffer_;
+  std::unique_ptr<float[]> detection_buffer_;
+  // Output buffer where the restored samples are stored.
+  std::unique_ptr<float[]> out_buffer_;
+
+  // Arrays for fft.
+  std::unique_ptr<size_t[]> ip_;
+  std::unique_ptr<float[]> wfft_;
+
+  std::unique_ptr<float[]> spectral_mean_;
+
+  // Stores the data for the fft.
+  std::unique_ptr<float[]> fft_buffer_;
+
+  std::unique_ptr<float[]> magnitudes_;
+
+  const float* window_;
+
+  std::unique_ptr<float[]> mean_factor_;
+
+  float detector_smoothed_;
+
+  int keypress_counter_;
+  int chunks_since_keypress_;
+  bool detection_enabled_;
+  bool suppression_enabled_;
+
+  bool use_hard_restoration_;
+  int chunks_since_voice_change_;
+
+  uint32_t seed_;
+
+  bool using_reference_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_TRANSIENT_SUPPRESSOR_H_
diff --git a/modules/audio_processing/transient/transient_suppressor_unittest.cc b/modules/audio_processing/transient/transient_suppressor_unittest.cc
new file mode 100644
index 0000000..32d9858
--- /dev/null
+++ b/modules/audio_processing/transient/transient_suppressor_unittest.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/transient_suppressor.h"
+
+#include "modules/audio_processing/transient/common.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(TransientSuppressorTest, TypingDetectionLogicWorksAsExpectedForMono) {
+  static const int kNumChannels = 1;
+
+  TransientSuppressor ts;
+  ts.Initialize(ts::kSampleRate16kHz, ts::kSampleRate16kHz, kNumChannels);
+
+  // Each key-press enables detection.
+  EXPECT_FALSE(ts.detection_enabled_);
+  ts.UpdateKeypress(true);
+  EXPECT_TRUE(ts.detection_enabled_);
+
+  // It takes four seconds without any key-press to disable the detection
+  for (int time_ms = 0; time_ms < 3990; time_ms += ts::kChunkSizeMs) {
+    ts.UpdateKeypress(false);
+    EXPECT_TRUE(ts.detection_enabled_);
+  }
+  ts.UpdateKeypress(false);
+  EXPECT_FALSE(ts.detection_enabled_);
+
+  // Key-presses that are more than a second apart from each other don't enable
+  // suppression.
+  for (int i = 0; i < 100; ++i) {
+    EXPECT_FALSE(ts.suppression_enabled_);
+    ts.UpdateKeypress(true);
+    EXPECT_TRUE(ts.detection_enabled_);
+    EXPECT_FALSE(ts.suppression_enabled_);
+    for (int time_ms = 0; time_ms < 990; time_ms += ts::kChunkSizeMs) {
+      ts.UpdateKeypress(false);
+      EXPECT_TRUE(ts.detection_enabled_);
+      EXPECT_FALSE(ts.suppression_enabled_);
+    }
+    ts.UpdateKeypress(false);
+  }
+
+  // Two consecutive key-presses is enough to enable the suppression.
+  ts.UpdateKeypress(true);
+  EXPECT_FALSE(ts.suppression_enabled_);
+  ts.UpdateKeypress(true);
+  EXPECT_TRUE(ts.suppression_enabled_);
+
+  // Key-presses that are less than a second apart from each other don't disable
+  // detection nor suppression.
+  for (int i = 0; i < 100; ++i) {
+    for (int time_ms = 0; time_ms < 1000; time_ms += ts::kChunkSizeMs) {
+      ts.UpdateKeypress(false);
+      EXPECT_TRUE(ts.detection_enabled_);
+      EXPECT_TRUE(ts.suppression_enabled_);
+    }
+    ts.UpdateKeypress(true);
+    EXPECT_TRUE(ts.detection_enabled_);
+    EXPECT_TRUE(ts.suppression_enabled_);
+  }
+
+  // It takes four seconds without any key-press to disable the detection and
+  // suppression.
+  for (int time_ms = 0; time_ms < 3990; time_ms += ts::kChunkSizeMs) {
+    ts.UpdateKeypress(false);
+    EXPECT_TRUE(ts.detection_enabled_);
+    EXPECT_TRUE(ts.suppression_enabled_);
+  }
+  for (int time_ms = 0; time_ms < 1000; time_ms += ts::kChunkSizeMs) {
+    ts.UpdateKeypress(false);
+    EXPECT_FALSE(ts.detection_enabled_);
+    EXPECT_FALSE(ts.suppression_enabled_);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/wpd_node.cc b/modules/audio_processing/transient/wpd_node.cc
new file mode 100644
index 0000000..20d6a90
--- /dev/null
+++ b/modules/audio_processing/transient/wpd_node.cc
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_node.h"
+
+#include <math.h>
+#include <string.h>
+
+#include "common_audio/fir_filter.h"
+#include "common_audio/fir_filter_factory.h"
+#include "modules/audio_processing/transient/dyadic_decimator.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+WPDNode::WPDNode(size_t length,
+                 const float* coefficients,
+                 size_t coefficients_length)
+    : // The data buffer has parent data length to be able to contain and filter
+      // it.
+      data_(new float[2 * length + 1]),
+      length_(length),
+      filter_(CreateFirFilter(coefficients,
+                              coefficients_length,
+                              2 * length + 1)) {
+  RTC_DCHECK_GT(length, 0);
+  RTC_DCHECK(coefficients);
+  RTC_DCHECK_GT(coefficients_length, 0);
+  memset(data_.get(), 0.f, (2 * length + 1) * sizeof(data_[0]));
+}
+
+WPDNode::~WPDNode() {}
+
+int WPDNode::Update(const float* parent_data, size_t parent_data_length) {
+  if (!parent_data || (parent_data_length / 2) != length_) {
+    return -1;
+  }
+
+  // Filter data.
+  filter_->Filter(parent_data, parent_data_length, data_.get());
+
+  // Decimate data.
+  const bool kOddSequence = true;
+  size_t output_samples = DyadicDecimate(
+      data_.get(), parent_data_length, kOddSequence, data_.get(), length_);
+  if (output_samples != length_) {
+    return -1;
+  }
+
+  // Get abs to all values.
+  for (size_t i = 0; i < length_; ++i) {
+    data_[i] = fabs(data_[i]);
+  }
+
+  return 0;
+}
+
+int WPDNode::set_data(const float* new_data, size_t length) {
+  if (!new_data || length != length_) {
+    return -1;
+  }
+  memcpy(data_.get(), new_data, length * sizeof(data_[0]));
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/wpd_node.h b/modules/audio_processing/transient/wpd_node.h
new file mode 100644
index 0000000..117a633
--- /dev/null
+++ b/modules/audio_processing/transient/wpd_node.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_NODE_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_NODE_H_
+
+#include <memory>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class FIRFilter;
+
+// A single node of a Wavelet Packet Decomposition (WPD) tree.
+class WPDNode {
+ public:
+  // Creates a WPDNode. The data vector will contain zeros. The filter will have
+  // the coefficients provided.
+  WPDNode(size_t length, const float* coefficients, size_t coefficients_length);
+  ~WPDNode();
+
+  // Updates the node data. |parent_data| / 2 must be equals to |length_|.
+  // Returns 0 if correct, and -1 otherwise.
+  int Update(const float* parent_data, size_t parent_data_length);
+
+  const float* data() const { return data_.get(); }
+  // Returns 0 if correct, and -1 otherwise.
+  int set_data(const float* new_data, size_t length);
+  size_t length() const { return length_; }
+
+ private:
+  std::unique_ptr<float[]> data_;
+  size_t length_;
+  std::unique_ptr<FIRFilter> filter_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_NODE_H_
diff --git a/modules/audio_processing/transient/wpd_node_unittest.cc b/modules/audio_processing/transient/wpd_node_unittest.cc
new file mode 100644
index 0000000..1929361
--- /dev/null
+++ b/modules/audio_processing/transient/wpd_node_unittest.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_node.h"
+
+#include <string.h>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const size_t kDataLength = 5;
+static const float kTolerance = 0.0001f;
+
+static const size_t kParentDataLength = kDataLength * 2;
+static const float kParentData[kParentDataLength] =
+    {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f};
+
+static const float kCoefficients[] = {0.2f, -0.3f, 0.5f, -0.7f, 0.11f};
+static const size_t kCoefficientsLength = sizeof(kCoefficients) /
+                                       sizeof(kCoefficients[0]);
+
+TEST(WPDNodeTest, Accessors) {
+  WPDNode node(kDataLength, kCoefficients, kCoefficientsLength);
+  EXPECT_EQ(0, node.set_data(kParentData, kDataLength));
+  EXPECT_EQ(0, memcmp(node.data(),
+                      kParentData,
+                      kDataLength * sizeof(node.data()[0])));
+}
+
+TEST(WPDNodeTest, UpdateThatOnlyDecimates) {
+  const float kIndentyCoefficient = 1.f;
+  WPDNode node(kDataLength, &kIndentyCoefficient, 1);
+  EXPECT_EQ(0, node.Update(kParentData, kParentDataLength));
+  for (size_t i = 0; i < kDataLength; ++i) {
+    EXPECT_FLOAT_EQ(kParentData[i * 2 + 1], node.data()[i]);
+  }
+}
+
+TEST(WPDNodeTest, UpdateWithArbitraryDataAndArbitraryFilter) {
+  WPDNode node(kDataLength, kCoefficients, kCoefficientsLength);
+  EXPECT_EQ(0, node.Update(kParentData, kParentDataLength));
+  EXPECT_NEAR(0.1f, node.data()[0], kTolerance);
+  EXPECT_NEAR(0.2f, node.data()[1], kTolerance);
+  EXPECT_NEAR(0.18f, node.data()[2], kTolerance);
+  EXPECT_NEAR(0.56f, node.data()[3], kTolerance);
+  EXPECT_NEAR(0.94f, node.data()[4], kTolerance);
+}
+
+TEST(WPDNodeTest, ExpectedErrorReturnValue) {
+  WPDNode node(kDataLength, kCoefficients, kCoefficientsLength);
+  EXPECT_EQ(-1, node.Update(kParentData, kParentDataLength - 1));
+  EXPECT_EQ(-1, node.Update(NULL, kParentDataLength));
+  EXPECT_EQ(-1, node.set_data(kParentData, kDataLength - 1));
+  EXPECT_EQ(-1, node.set_data(NULL, kDataLength));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/wpd_tree.cc b/modules/audio_processing/transient/wpd_tree.cc
new file mode 100644
index 0000000..a01b816
--- /dev/null
+++ b/modules/audio_processing/transient/wpd_tree.cc
@@ -0,0 +1,118 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_tree.h"
+
+#include <math.h>
+#include <string.h>
+
+#include "modules/audio_processing/transient/dyadic_decimator.h"
+#include "modules/audio_processing/transient/wpd_node.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+WPDTree::WPDTree(size_t data_length, const float* high_pass_coefficients,
+                 const float* low_pass_coefficients, size_t coefficients_length,
+                 int levels)
+    : data_length_(data_length),
+      levels_(levels),
+      num_nodes_((1 << (levels + 1)) - 1) {
+  RTC_DCHECK_GT(data_length, (static_cast<size_t>(1) << levels));
+  RTC_DCHECK(high_pass_coefficients);
+  RTC_DCHECK(low_pass_coefficients);
+  RTC_DCHECK_GT(levels, 0);
+  // Size is 1 more, so we can use the array as 1-based. nodes_[0] is never
+  // allocated.
+  nodes_.reset(new std::unique_ptr<WPDNode>[num_nodes_ + 1]);
+
+  // Create the first node
+  const float kRootCoefficient = 1.f;  // Identity Coefficient.
+  nodes_[1].reset(new WPDNode(data_length, &kRootCoefficient, 1));
+  // Variables used to create the rest of the nodes.
+  size_t index = 1;
+  size_t index_left_child = 0;
+  size_t index_right_child = 0;
+
+  int num_nodes_at_curr_level = 0;
+
+  // Branching each node in each level to create its children. The last level is
+  // not branched (all the nodes of that level are leaves).
+  for (int current_level = 0; current_level < levels; ++current_level) {
+    num_nodes_at_curr_level = 1 << current_level;
+    for (int i = 0; i < num_nodes_at_curr_level; ++i) {
+      index = (1 << current_level) + i;
+      // Obtain the index of the current node children.
+      index_left_child = index * 2;
+      index_right_child = index_left_child + 1;
+      nodes_[index_left_child].reset(new WPDNode(nodes_[index]->length() / 2,
+                                                 low_pass_coefficients,
+                                                 coefficients_length));
+      nodes_[index_right_child].reset(new WPDNode(nodes_[index]->length() / 2,
+                                                  high_pass_coefficients,
+                                                  coefficients_length));
+    }
+  }
+}
+
+WPDTree::~WPDTree() {}
+
+WPDNode* WPDTree::NodeAt(int level, int index) {
+  if (level < 0 || level > levels_ || index < 0 || index >= 1 << level) {
+    return NULL;
+  }
+
+  return nodes_[(1 << level) + index].get();
+}
+
+int WPDTree::Update(const float* data, size_t data_length) {
+  if (!data || data_length != data_length_) {
+    return -1;
+  }
+
+  // Update the root node.
+  int update_result = nodes_[1]->set_data(data, data_length);
+  if (update_result != 0) {
+    return -1;
+  }
+
+  // Variables used to update the rest of the nodes.
+  size_t index = 1;
+  size_t index_left_child = 0;
+  size_t index_right_child = 0;
+
+  int num_nodes_at_curr_level = 0;
+
+  for (int current_level = 0; current_level < levels_; ++current_level) {
+    num_nodes_at_curr_level = 1 << current_level;
+    for (int i = 0; i < num_nodes_at_curr_level; ++i) {
+      index = (1 << current_level) + i;
+      // Obtain the index of the current node children.
+      index_left_child = index * 2;
+      index_right_child = index_left_child + 1;
+
+      update_result = nodes_[index_left_child]->Update(
+          nodes_[index]->data(), nodes_[index]->length());
+      if (update_result != 0) {
+        return -1;
+      }
+
+      update_result = nodes_[index_right_child]->Update(
+          nodes_[index]->data(), nodes_[index]->length());
+      if (update_result != 0) {
+        return -1;
+      }
+    }
+  }
+
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/transient/wpd_tree.h b/modules/audio_processing/transient/wpd_tree.h
new file mode 100644
index 0000000..53fc06b
--- /dev/null
+++ b/modules/audio_processing/transient/wpd_tree.h
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_TREE_H_
+#define MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_TREE_H_
+
+#include <memory>
+
+#include "modules/audio_processing/transient/wpd_node.h"
+
+namespace webrtc {
+
+// Tree of a Wavelet Packet Decomposition (WPD).
+//
+// The root node contains all the data provided; for each node in the tree, the
+// left child contains the approximation coefficients extracted from the node,
+// and the right child contains the detail coefficients.
+// It preserves its state, so it can be multiple-called.
+//
+// The number of nodes in the tree will be 2 ^ levels - 1.
+//
+// Implementation details: Since the tree always will be a complete binary tree,
+// it is implemented using a single linear array instead of managing the
+// relationships in each node. For convience is better to use a array that
+// starts in 1 (instead of 0). Taking that into account, the following formulas
+// apply:
+// Root node index: 1.
+// Node(Level, Index in that level): 2 ^ Level + (Index in that level).
+// Left Child: Current node index * 2.
+// Right Child: Current node index * 2 + 1.
+// Parent: Current Node Index / 2 (Integer division).
+class WPDTree {
+ public:
+  // Creates a WPD tree using the data length and coefficients provided.
+  WPDTree(size_t data_length,
+          const float* high_pass_coefficients,
+          const float* low_pass_coefficients,
+          size_t coefficients_length,
+          int levels);
+  ~WPDTree();
+
+  // Returns the number of nodes at any given level.
+  static int NumberOfNodesAtLevel(int level) {
+    return 1 << level;
+  }
+
+  // Returns a pointer to the node at the given level and index(of that level).
+  // Level goes from 0 to levels().
+  // Index goes from 0 to the number of NumberOfNodesAtLevel(level) - 1.
+  //
+  // You can use the following formulas to get any node within the tree:
+  // Notation: (Level, Index of node in that level).
+  // Root node: (0/0).
+  // Left Child: (Current node level + 1, Current node index * 2).
+  // Right Child: (Current node level + 1, Current node index * 2 + 1).
+  // Parent: (Current node level - 1, Current node index / 2) (Integer division)
+  //
+  // If level or index are out of bounds the function will return NULL.
+  WPDNode* NodeAt(int level, int index);
+
+  // Updates all the nodes of the tree with the new data. |data_length| must be
+  // teh same that was used for the creation of the tree.
+  // Returns 0 if correct, and -1 otherwise.
+  int Update(const float* data, size_t data_length);
+
+  // Returns the total number of levels below the root. Root is cosidered level
+  // 0.
+  int levels() const { return levels_; }
+
+  // Returns the total number of nodes.
+  int num_nodes() const { return num_nodes_; }
+
+  // Returns the total number of leaves.
+  int num_leaves() const { return 1 << levels_; }
+
+ private:
+  size_t data_length_;
+  int levels_;
+  int num_nodes_;
+  std::unique_ptr<std::unique_ptr<WPDNode>[]> nodes_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_TRANSIENT_WPD_TREE_H_
diff --git a/modules/audio_processing/transient/wpd_tree_unittest.cc b/modules/audio_processing/transient/wpd_tree_unittest.cc
new file mode 100644
index 0000000..a90af77
--- /dev/null
+++ b/modules/audio_processing/transient/wpd_tree_unittest.cc
@@ -0,0 +1,193 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/transient/wpd_tree.h"
+
+#include <memory>
+#include <sstream>
+#include <string>
+
+#include "modules/audio_processing/transient/daubechies_8_wavelet_coeffs.h"
+#include "modules/audio_processing/transient/file_utils.h"
+#include "system_wrappers/include/file_wrapper.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TEST(WPDTreeTest, Construction) {
+  const size_t kTestBufferSize = 100;
+  const int kLevels = 5;
+  const int kExpectedNumberOfNodes = (1 << (kLevels + 1)) - 1;
+
+  float test_buffer[kTestBufferSize];
+  memset(test_buffer, 0.f, kTestBufferSize * sizeof(*test_buffer));
+  float test_coefficients[] = {1.f, 2.f, 3.f, 4.f, 5.f};
+  const size_t kTestCoefficientsLength = sizeof(test_coefficients) /
+      sizeof(test_coefficients[0]);
+  WPDTree tree(kTestBufferSize,
+               test_coefficients,
+               test_coefficients,
+               kTestCoefficientsLength,
+               kLevels);
+  ASSERT_EQ(kExpectedNumberOfNodes, tree.num_nodes());
+  // Checks for NodeAt(level, index).
+  int nodes_at_level = 0;
+  for (int level = 0; level <= kLevels; ++level) {
+    nodes_at_level = 1 << level;
+    for (int i = 0; i < nodes_at_level; ++i) {
+      ASSERT_TRUE(NULL != tree.NodeAt(level, i));
+    }
+    // Out of bounds.
+    EXPECT_EQ(NULL, tree.NodeAt(level, -1));
+    EXPECT_EQ(NULL, tree.NodeAt(level, -12));
+    EXPECT_EQ(NULL, tree.NodeAt(level, nodes_at_level));
+    EXPECT_EQ(NULL, tree.NodeAt(level, nodes_at_level + 5));
+  }
+  // Out of bounds.
+  EXPECT_EQ(NULL, tree.NodeAt(-1, 0));
+  EXPECT_EQ(NULL, tree.NodeAt(-12, 0));
+  EXPECT_EQ(NULL, tree.NodeAt(kLevels + 1, 0));
+  EXPECT_EQ(NULL, tree.NodeAt(kLevels + 5, 0));
+  // Checks for Update().
+  EXPECT_EQ(0, tree.Update(test_buffer, kTestBufferSize));
+  EXPECT_EQ(-1, tree.Update(NULL, kTestBufferSize));
+  EXPECT_EQ(-1, tree.Update(test_buffer, kTestBufferSize - 1));
+}
+
+// This test is for the correctness of the tree.
+// Checks the results from the Matlab equivalent, it is done comparing the
+// results that are stored in the output files from Matlab.
+// It also writes the results in its own set of files in the out directory.
+// Matlab and output files contain all the results in double precision (Little
+// endian) appended.
+#if defined(WEBRTC_IOS)
+TEST(WPDTreeTest, DISABLED_CorrectnessBasedOnMatlabFiles) {
+#else
+TEST(WPDTreeTest, CorrectnessBasedOnMatlabFiles) {
+#endif
+  // 10 ms at 16000 Hz.
+  const size_t kTestBufferSize = 160;
+  const int kLevels = 3;
+  const int kLeaves = 1 << kLevels;
+  const size_t kLeavesSamples = kTestBufferSize >> kLevels;
+  // Create tree with Discrete Meyer Wavelet Coefficients.
+  WPDTree tree(kTestBufferSize,
+               kDaubechies8HighPassCoefficients,
+               kDaubechies8LowPassCoefficients,
+               kDaubechies8CoefficientsLength,
+               kLevels);
+  // Allocate and open all matlab and out files.
+  std::unique_ptr<FileWrapper> matlab_files_data[kLeaves];
+  std::unique_ptr<FileWrapper> out_files_data[kLeaves];
+
+  for (int i = 0; i < kLeaves; ++i) {
+    // Matlab files.
+    matlab_files_data[i].reset(FileWrapper::Create());
+
+    std::ostringstream matlab_stream;
+    matlab_stream << "audio_processing/transient/wpd" << i;
+    std::string matlab_string = test::ResourcePath(matlab_stream.str(), "dat");
+    matlab_files_data[i]->OpenFile(matlab_string.c_str(), true);  // Read only.
+
+    bool file_opened = matlab_files_data[i]->is_open();
+    ASSERT_TRUE(file_opened) << "File could not be opened.\n" << matlab_string;
+
+    // Out files.
+    out_files_data[i].reset(FileWrapper::Create());
+
+    std::ostringstream out_stream;
+    out_stream << test::OutputPath() << "wpd_" << i << ".out";
+    std::string out_string = out_stream.str();
+
+    out_files_data[i]->OpenFile(out_string.c_str(), false);  // Write mode.
+
+    file_opened = out_files_data[i]->is_open();
+    ASSERT_TRUE(file_opened) << "File could not be opened.\n" << out_string;
+  }
+
+  // Prepare the test file.
+  std::string test_file_name = test::ResourcePath(
+      "audio_processing/transient/ajm-macbook-1-spke16m", "pcm");
+
+  std::unique_ptr<FileWrapper> test_file(FileWrapper::Create());
+
+  test_file->OpenFile(test_file_name.c_str(), true);  // Read only.
+
+  bool file_opened = test_file->is_open();
+  ASSERT_TRUE(file_opened) << "File could not be opened.\n" << test_file_name;
+
+  float test_buffer[kTestBufferSize];
+
+  // Only the first frames of the audio file are tested. The matlab files also
+  // only contains information about the first frames.
+  const size_t kMaxFramesToTest = 100;
+  const float kTolerance = 0.03f;
+
+  size_t frames_read = 0;
+
+  // Read first buffer from the PCM test file.
+  size_t file_samples_read = ReadInt16FromFileToFloatBuffer(test_file.get(),
+                                                            kTestBufferSize,
+                                                            test_buffer);
+  while (file_samples_read > 0 && frames_read < kMaxFramesToTest) {
+    ++frames_read;
+
+    if (file_samples_read < kTestBufferSize) {
+      // Pad the rest of the buffer with zeros.
+      for (size_t i = file_samples_read; i < kTestBufferSize; ++i) {
+        test_buffer[i] = 0.0;
+      }
+    }
+    tree.Update(test_buffer, kTestBufferSize);
+    double matlab_buffer[kTestBufferSize];
+
+    // Compare results with data from the matlab test files.
+    for (int i = 0; i < kLeaves; ++i) {
+      // Compare data values
+      size_t matlab_samples_read =
+          ReadDoubleBufferFromFile(matlab_files_data[i].get(),
+                                   kLeavesSamples,
+                                   matlab_buffer);
+
+      ASSERT_EQ(kLeavesSamples, matlab_samples_read)
+          << "Matlab test files are malformed.\n"
+          << "File: 3_" << i;
+      // Get output data from the corresponding node
+      const float* node_data = tree.NodeAt(kLevels, i)->data();
+      // Compare with matlab files.
+      for (size_t j = 0; j < kLeavesSamples; ++j) {
+        EXPECT_NEAR(matlab_buffer[j], node_data[j], kTolerance)
+            << "\nLeaf: " << i << "\nSample: " << j
+            << "\nFrame: " << frames_read - 1;
+      }
+
+      // Write results to out files.
+      WriteFloatBufferToFile(out_files_data[i].get(),
+                             kLeavesSamples,
+                             node_data);
+    }
+
+    // Read next buffer from the PCM test file.
+    file_samples_read = ReadInt16FromFileToFloatBuffer(test_file.get(),
+                                                       kTestBufferSize,
+                                                       test_buffer);
+  }
+
+  // Close all matlab and out files.
+  for (int i = 0; i < kLeaves; ++i) {
+    matlab_files_data[i]->CloseFile();
+    out_files_data[i]->CloseFile();
+  }
+
+  test_file->CloseFile();
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/typing_detection.cc b/modules/audio_processing/typing_detection.cc
new file mode 100644
index 0000000..6e18124
--- /dev/null
+++ b/modules/audio_processing/typing_detection.cc
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/typing_detection.h"
+
+namespace webrtc {
+
+TypingDetection::TypingDetection()
+    : time_active_(0),
+      time_since_last_typing_(0),
+      penalty_counter_(0),
+      counter_since_last_detection_update_(0),
+      detection_to_report_(false),
+      new_detection_to_report_(false),
+      time_window_(10),
+      cost_per_typing_(100),
+      reporting_threshold_(300),
+      penalty_decay_(1),
+      type_event_delay_(2),
+      report_detection_update_period_(1) {
+}
+
+TypingDetection::~TypingDetection() {}
+
+bool TypingDetection::Process(bool key_pressed, bool vad_activity) {
+  if (vad_activity)
+    time_active_++;
+  else
+    time_active_ = 0;
+
+  // Keep track if time since last typing event
+  if (key_pressed)
+    time_since_last_typing_ = 0;
+  else
+    ++time_since_last_typing_;
+
+  if (time_since_last_typing_ < type_event_delay_ &&
+      vad_activity &&
+      time_active_ < time_window_) {
+    penalty_counter_ += cost_per_typing_;
+    if (penalty_counter_ > reporting_threshold_)
+      new_detection_to_report_ = true;
+  }
+
+  if (penalty_counter_ > 0)
+    penalty_counter_ -= penalty_decay_;
+
+  if (++counter_since_last_detection_update_ ==
+      report_detection_update_period_) {
+    detection_to_report_ = new_detection_to_report_;
+    new_detection_to_report_ = false;
+    counter_since_last_detection_update_ = 0;
+  }
+
+  return detection_to_report_;
+}
+
+int TypingDetection::TimeSinceLastDetectionInSeconds() {
+  // Round to whole seconds.
+  return (time_since_last_typing_ + 50) / 100;
+}
+
+void TypingDetection::SetParameters(int time_window,
+                                    int cost_per_typing,
+                                    int reporting_threshold,
+                                    int penalty_decay,
+                                    int type_event_delay,
+                                    int report_detection_update_period) {
+  if (time_window) time_window_ = time_window;
+
+  if (cost_per_typing) cost_per_typing_ = cost_per_typing;
+
+  if (reporting_threshold) reporting_threshold_ = reporting_threshold;
+
+  if (penalty_decay) penalty_decay_ = penalty_decay;
+
+  if (type_event_delay) type_event_delay_ = type_event_delay;
+
+  if (report_detection_update_period)
+    report_detection_update_period_ = report_detection_update_period;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/typing_detection.h b/modules/audio_processing/typing_detection.h
new file mode 100644
index 0000000..fe74a59
--- /dev/null
+++ b/modules/audio_processing/typing_detection.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
+#define MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
+
+#include "modules/include/module_common_types.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class TypingDetection {
+ public:
+  TypingDetection();
+  virtual ~TypingDetection();
+
+  // Run the detection algortihm. Shall be called every 10 ms. Returns true if
+  // typing is detected, or false if not, based on the update period as set with
+  // SetParameters(). See |report_detection_update_period_| description below.
+  bool Process(bool key_pressed, bool vad_activity);
+
+  // Gets the time in seconds since the last detection.
+  int TimeSinceLastDetectionInSeconds();
+
+  // Sets the algorithm parameters. A parameter value of 0 leaves it unchanged.
+  // See the correspondning member variables below for descriptions.
+  void SetParameters(int time_window,
+                     int cost_per_typing,
+                     int reporting_threshold,
+                     int penalty_decay,
+                     int type_event_delay,
+                     int report_detection_update_period);
+
+ private:
+  int time_active_;
+  int time_since_last_typing_;
+  int penalty_counter_;
+
+  // Counter since last time the detection status reported by Process() was
+  // updated. See also |report_detection_update_period_|.
+  int counter_since_last_detection_update_;
+
+  // The detection status to report. Updated every
+  // |report_detection_update_period_| call to Process().
+  bool detection_to_report_;
+
+  // What |detection_to_report_| should be set to next time it is updated.
+  bool new_detection_to_report_;
+
+  // Settable threshold values.
+
+  // Number of 10 ms slots accepted to count as a hit.
+  int time_window_;
+
+  // Penalty added for a typing + activity coincide.
+  int cost_per_typing_;
+
+  // Threshold for |penalty_counter_|.
+  int reporting_threshold_;
+
+  // How much we reduce |penalty_counter_| every 10 ms.
+  int penalty_decay_;
+
+  // How old typing events we allow.
+  int type_event_delay_;
+
+  // Settable update period.
+
+  // Number of 10 ms slots between each update of the detection status returned
+  // by Process(). This inertia added to the algorithm is usually desirable and
+  // provided so that consumers of the class don't have to implement that
+  // themselves if they don't wish.
+  // If set to 1, each call to Process() will return the detection status for
+  // that 10 ms slot.
+  // If set to N (where N > 1), the detection status returned from Process()
+  // will remain the same until Process() has been called N times. Then, if none
+  // of the last N calls to Process() has detected typing for each respective
+  // 10 ms slot, Process() will return false. If at least one of the last N
+  // calls has detected typing, Process() will return true. And that returned
+  // status will then remain the same until the next N calls have been done.
+  int report_detection_update_period_;
+};
+
+}  // namespace webrtc
+
+#endif  // #ifndef MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
diff --git a/modules/audio_processing/utility/block_mean_calculator.cc b/modules/audio_processing/utility/block_mean_calculator.cc
new file mode 100644
index 0000000..3d76692
--- /dev/null
+++ b/modules/audio_processing/utility/block_mean_calculator.cc
@@ -0,0 +1,53 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/block_mean_calculator.h"
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+BlockMeanCalculator::BlockMeanCalculator(size_t block_length)
+    : block_length_(block_length),
+      count_(0),
+      sum_(0.0),
+      mean_(0.0) {
+  RTC_DCHECK(block_length_ != 0);
+}
+
+void BlockMeanCalculator::Reset() {
+  Clear();
+  mean_ = 0.0;
+}
+
+void BlockMeanCalculator::AddValue(float value) {
+  sum_ += value;
+  ++count_;
+  if (count_ == block_length_) {
+    mean_ = sum_ / block_length_;
+    Clear();
+  }
+}
+
+bool BlockMeanCalculator::EndOfBlock() const {
+  return count_ == 0;
+}
+
+float BlockMeanCalculator::GetLatestMean() const {
+  return mean_;
+}
+
+// Flush all samples added.
+void BlockMeanCalculator::Clear() {
+  count_ = 0;
+  sum_ = 0.0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/utility/block_mean_calculator.h b/modules/audio_processing/utility/block_mean_calculator.h
new file mode 100644
index 0000000..cfa7cfb
--- /dev/null
+++ b/modules/audio_processing/utility/block_mean_calculator.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_BLOCK_MEAN_CALCULATOR_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_BLOCK_MEAN_CALCULATOR_H_
+
+#include <stddef.h>
+
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+// BlockMeanCalculator calculates the mean of a block of values. Values are
+// added one after another, and the mean is updated at the end of every block.
+class BlockMeanCalculator {
+ public:
+  explicit BlockMeanCalculator(size_t block_length);
+
+  // Reset.
+  void Reset();
+
+  // Add one value to the sequence.
+  void AddValue(float value);
+
+  // Return whether the latest added value was at the end of a block.
+  bool EndOfBlock() const;
+
+  // Return the latest mean.
+  float GetLatestMean() const;
+
+ private:
+  // Clear all values added.
+  void Clear();
+
+  const size_t block_length_;
+  size_t count_;
+  float sum_;
+  float mean_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BlockMeanCalculator);
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_BLOCK_MEAN_CALCULATOR_H_
diff --git a/modules/audio_processing/utility/block_mean_calculator_unittest.cc b/modules/audio_processing/utility/block_mean_calculator_unittest.cc
new file mode 100644
index 0000000..1f4ebf1
--- /dev/null
+++ b/modules/audio_processing/utility/block_mean_calculator_unittest.cc
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/block_mean_calculator.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MeanCalculatorTest, Correctness) {
+  const size_t kBlockLength = 10;
+  BlockMeanCalculator mean_calculator(kBlockLength);
+  size_t i = 0;
+  float reference = 0.0;
+
+  for (; i < kBlockLength - 1; ++i) {
+    mean_calculator.AddValue(static_cast<float>(i));
+    EXPECT_FALSE(mean_calculator.EndOfBlock());
+  }
+  mean_calculator.AddValue(static_cast<float>(i++));
+  EXPECT_TRUE(mean_calculator.EndOfBlock());
+
+  for (; i < 3 * kBlockLength; ++i) {
+    const bool end_of_block = i % kBlockLength == 0;
+    if (end_of_block) {
+      // Sum of (i - kBlockLength) ... (i - 1)
+      reference = i - 0.5 * (1 + kBlockLength);
+    }
+    EXPECT_EQ(mean_calculator.EndOfBlock(), end_of_block);
+    EXPECT_EQ(reference, mean_calculator.GetLatestMean());
+    mean_calculator.AddValue(static_cast<float>(i));
+  }
+}
+
+TEST(MeanCalculatorTest, Reset) {
+  const size_t kBlockLength = 10;
+  BlockMeanCalculator mean_calculator(kBlockLength);
+  for (size_t i = 0; i < kBlockLength - 1; ++i) {
+    mean_calculator.AddValue(static_cast<float>(i));
+  }
+  mean_calculator.Reset();
+  size_t i = 0;
+  for (; i < kBlockLength - 1; ++i) {
+    mean_calculator.AddValue(static_cast<float>(i));
+    EXPECT_FALSE(mean_calculator.EndOfBlock());
+  }
+  mean_calculator.AddValue(static_cast<float>(i));
+  EXPECT_TRUE(mean_calculator.EndOfBlock());
+  EXPECT_EQ(mean_calculator.GetLatestMean(), 0.5 * (kBlockLength - 1));
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/utility/delay_estimator.cc b/modules/audio_processing/utility/delay_estimator.cc
new file mode 100644
index 0000000..871b541
--- /dev/null
+++ b/modules/audio_processing/utility/delay_estimator.cc
@@ -0,0 +1,703 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+// Number of right shifts for scaling is linearly depending on number of bits in
+// the far-end binary spectrum.
+static const int kShiftsAtZero = 13;  // Right shifts at zero binary spectrum.
+static const int kShiftsLinearSlope = 3;
+
+static const int32_t kProbabilityOffset = 1024;  // 2 in Q9.
+static const int32_t kProbabilityLowerLimit = 8704;  // 17 in Q9.
+static const int32_t kProbabilityMinSpread = 2816;  // 5.5 in Q9.
+
+// Robust validation settings
+static const float kHistogramMax = 3000.f;
+static const float kLastHistogramMax = 250.f;
+static const float kMinHistogramThreshold = 1.5f;
+static const int kMinRequiredHits = 10;
+static const int kMaxHitsWhenPossiblyNonCausal = 10;
+static const int kMaxHitsWhenPossiblyCausal = 1000;
+static const float kQ14Scaling = 1.f / (1 << 14);  // Scaling by 2^14 to get Q0.
+static const float kFractionSlope = 0.05f;
+static const float kMinFractionWhenPossiblyCausal = 0.5f;
+static const float kMinFractionWhenPossiblyNonCausal = 0.25f;
+
+// Counts and returns number of bits of a 32-bit word.
+static int BitCount(uint32_t u32) {
+  uint32_t tmp = u32 - ((u32 >> 1) & 033333333333) -
+      ((u32 >> 2) & 011111111111);
+  tmp = ((tmp + (tmp >> 3)) & 030707070707);
+  tmp = (tmp + (tmp >> 6));
+  tmp = (tmp + (tmp >> 12) + (tmp >> 24)) & 077;
+
+  return ((int) tmp);
+}
+
+// Compares the |binary_vector| with all rows of the |binary_matrix| and counts
+// per row the number of times they have the same value.
+//
+// Inputs:
+//      - binary_vector     : binary "vector" stored in a long
+//      - binary_matrix     : binary "matrix" stored as a vector of long
+//      - matrix_size       : size of binary "matrix"
+//
+// Output:
+//      - bit_counts        : "Vector" stored as a long, containing for each
+//                            row the number of times the matrix row and the
+//                            input vector have the same value
+//
+static void BitCountComparison(uint32_t binary_vector,
+                               const uint32_t* binary_matrix,
+                               int matrix_size,
+                               int32_t* bit_counts) {
+  int n = 0;
+
+  // Compare |binary_vector| with all rows of the |binary_matrix|
+  for (; n < matrix_size; n++) {
+    bit_counts[n] = (int32_t) BitCount(binary_vector ^ binary_matrix[n]);
+  }
+}
+
+// Collects necessary statistics for the HistogramBasedValidation().  This
+// function has to be called prior to calling HistogramBasedValidation().  The
+// statistics updated and used by the HistogramBasedValidation() are:
+//  1. the number of |candidate_hits|, which states for how long we have had the
+//     same |candidate_delay|
+//  2. the |histogram| of candidate delays over time.  This histogram is
+//     weighted with respect to a reliability measure and time-varying to cope
+//     with possible delay shifts.
+// For further description see commented code.
+//
+// Inputs:
+//  - candidate_delay   : The delay to validate.
+//  - valley_depth_q14  : The cost function has a valley/minimum at the
+//                        |candidate_delay| location.  |valley_depth_q14| is the
+//                        cost function difference between the minimum and
+//                        maximum locations.  The value is in the Q14 domain.
+//  - valley_level_q14  : Is the cost function value at the minimum, in Q14.
+static void UpdateRobustValidationStatistics(BinaryDelayEstimator* self,
+                                             int candidate_delay,
+                                             int32_t valley_depth_q14,
+                                             int32_t valley_level_q14) {
+  const float valley_depth = valley_depth_q14 * kQ14Scaling;
+  float decrease_in_last_set = valley_depth;
+  const int max_hits_for_slow_change = (candidate_delay < self->last_delay) ?
+      kMaxHitsWhenPossiblyNonCausal : kMaxHitsWhenPossiblyCausal;
+  int i = 0;
+
+  RTC_DCHECK_EQ(self->history_size, self->farend->history_size);
+  // Reset |candidate_hits| if we have a new candidate.
+  if (candidate_delay != self->last_candidate_delay) {
+    self->candidate_hits = 0;
+    self->last_candidate_delay = candidate_delay;
+  }
+  self->candidate_hits++;
+
+  // The |histogram| is updated differently across the bins.
+  // 1. The |candidate_delay| histogram bin is increased with the
+  //    |valley_depth|, which is a simple measure of how reliable the
+  //    |candidate_delay| is.  The histogram is not increased above
+  //    |kHistogramMax|.
+  self->histogram[candidate_delay] += valley_depth;
+  if (self->histogram[candidate_delay] > kHistogramMax) {
+    self->histogram[candidate_delay] = kHistogramMax;
+  }
+  // 2. The histogram bins in the neighborhood of |candidate_delay| are
+  //    unaffected.  The neighborhood is defined as x + {-2, -1, 0, 1}.
+  // 3. The histogram bins in the neighborhood of |last_delay| are decreased
+  //    with |decrease_in_last_set|.  This value equals the difference between
+  //    the cost function values at the locations |candidate_delay| and
+  //    |last_delay| until we reach |max_hits_for_slow_change| consecutive hits
+  //    at the |candidate_delay|.  If we exceed this amount of hits the
+  //    |candidate_delay| is a "potential" candidate and we start decreasing
+  //    these histogram bins more rapidly with |valley_depth|.
+  if (self->candidate_hits < max_hits_for_slow_change) {
+    decrease_in_last_set = (self->mean_bit_counts[self->compare_delay] -
+        valley_level_q14) * kQ14Scaling;
+  }
+  // 4. All other bins are decreased with |valley_depth|.
+  // TODO(bjornv): Investigate how to make this loop more efficient.  Split up
+  // the loop?  Remove parts that doesn't add too much.
+  for (i = 0; i < self->history_size; ++i) {
+    int is_in_last_set = (i >= self->last_delay - 2) &&
+        (i <= self->last_delay + 1) && (i != candidate_delay);
+    int is_in_candidate_set = (i >= candidate_delay - 2) &&
+        (i <= candidate_delay + 1);
+    self->histogram[i] -= decrease_in_last_set * is_in_last_set +
+        valley_depth * (!is_in_last_set && !is_in_candidate_set);
+    // 5. No histogram bin can go below 0.
+    if (self->histogram[i] < 0) {
+      self->histogram[i] = 0;
+    }
+  }
+}
+
+// Validates the |candidate_delay|, estimated in WebRtc_ProcessBinarySpectrum(),
+// based on a mix of counting concurring hits with a modified histogram
+// of recent delay estimates.  In brief a candidate is valid (returns 1) if it
+// is the most likely according to the histogram.  There are a couple of
+// exceptions that are worth mentioning:
+//  1. If the |candidate_delay| < |last_delay| it can be that we are in a
+//     non-causal state, breaking a possible echo control algorithm.  Hence, we
+//     open up for a quicker change by allowing the change even if the
+//     |candidate_delay| is not the most likely one according to the histogram.
+//  2. There's a minimum number of hits (kMinRequiredHits) and the histogram
+//     value has to reached a minimum (kMinHistogramThreshold) to be valid.
+//  3. The action is also depending on the filter length used for echo control.
+//     If the delay difference is larger than what the filter can capture, we
+//     also move quicker towards a change.
+// For further description see commented code.
+//
+// Input:
+//  - candidate_delay     : The delay to validate.
+//
+// Return value:
+//  - is_histogram_valid  : 1 - The |candidate_delay| is valid.
+//                          0 - Otherwise.
+static int HistogramBasedValidation(const BinaryDelayEstimator* self,
+                                    int candidate_delay) {
+  float fraction = 1.f;
+  float histogram_threshold = self->histogram[self->compare_delay];
+  const int delay_difference = candidate_delay - self->last_delay;
+  int is_histogram_valid = 0;
+
+  // The histogram based validation of |candidate_delay| is done by comparing
+  // the |histogram| at bin |candidate_delay| with a |histogram_threshold|.
+  // This |histogram_threshold| equals a |fraction| of the |histogram| at bin
+  // |last_delay|.  The |fraction| is a piecewise linear function of the
+  // |delay_difference| between the |candidate_delay| and the |last_delay|
+  // allowing for a quicker move if
+  //  i) a potential echo control filter can not handle these large differences.
+  // ii) keeping |last_delay| instead of updating to |candidate_delay| could
+  //     force an echo control into a non-causal state.
+  // We further require the histogram to have reached a minimum value of
+  // |kMinHistogramThreshold|.  In addition, we also require the number of
+  // |candidate_hits| to be more than |kMinRequiredHits| to remove spurious
+  // values.
+
+  // Calculate a comparison histogram value (|histogram_threshold|) that is
+  // depending on the distance between the |candidate_delay| and |last_delay|.
+  // TODO(bjornv): How much can we gain by turning the fraction calculation
+  // into tables?
+  if (delay_difference > self->allowed_offset) {
+    fraction = 1.f - kFractionSlope * (delay_difference - self->allowed_offset);
+    fraction = (fraction > kMinFractionWhenPossiblyCausal ? fraction :
+        kMinFractionWhenPossiblyCausal);
+  } else if (delay_difference < 0) {
+    fraction = kMinFractionWhenPossiblyNonCausal -
+        kFractionSlope * delay_difference;
+    fraction = (fraction > 1.f ? 1.f : fraction);
+  }
+  histogram_threshold *= fraction;
+  histogram_threshold = (histogram_threshold > kMinHistogramThreshold ?
+      histogram_threshold : kMinHistogramThreshold);
+
+  is_histogram_valid =
+      (self->histogram[candidate_delay] >= histogram_threshold) &&
+      (self->candidate_hits > kMinRequiredHits);
+
+  return is_histogram_valid;
+}
+
+// Performs a robust validation of the |candidate_delay| estimated in
+// WebRtc_ProcessBinarySpectrum().  The algorithm takes the
+// |is_instantaneous_valid| and the |is_histogram_valid| and combines them
+// into a robust validation.  The HistogramBasedValidation() has to be called
+// prior to this call.
+// For further description on how the combination is done, see commented code.
+//
+// Inputs:
+//  - candidate_delay         : The delay to validate.
+//  - is_instantaneous_valid  : The instantaneous validation performed in
+//                              WebRtc_ProcessBinarySpectrum().
+//  - is_histogram_valid      : The histogram based validation.
+//
+// Return value:
+//  - is_robust               : 1 - The candidate_delay is valid according to a
+//                                  combination of the two inputs.
+//                            : 0 - Otherwise.
+static int RobustValidation(const BinaryDelayEstimator* self,
+                            int candidate_delay,
+                            int is_instantaneous_valid,
+                            int is_histogram_valid) {
+  int is_robust = 0;
+
+  // The final robust validation is based on the two algorithms; 1) the
+  // |is_instantaneous_valid| and 2) the histogram based with result stored in
+  // |is_histogram_valid|.
+  //   i) Before we actually have a valid estimate (|last_delay| == -2), we say
+  //      a candidate is valid if either algorithm states so
+  //      (|is_instantaneous_valid| OR |is_histogram_valid|).
+  is_robust = (self->last_delay < 0) &&
+      (is_instantaneous_valid || is_histogram_valid);
+  //  ii) Otherwise, we need both algorithms to be certain
+  //      (|is_instantaneous_valid| AND |is_histogram_valid|)
+  is_robust |= is_instantaneous_valid && is_histogram_valid;
+  // iii) With one exception, i.e., the histogram based algorithm can overrule
+  //      the instantaneous one if |is_histogram_valid| = 1 and the histogram
+  //      is significantly strong.
+  is_robust |= is_histogram_valid &&
+      (self->histogram[candidate_delay] > self->last_delay_histogram);
+
+  return is_robust;
+}
+
+void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) {
+
+  if (self == NULL) {
+    return;
+  }
+
+  free(self->binary_far_history);
+  self->binary_far_history = NULL;
+
+  free(self->far_bit_counts);
+  self->far_bit_counts = NULL;
+
+  free(self);
+}
+
+BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend(
+    int history_size) {
+  BinaryDelayEstimatorFarend* self = NULL;
+
+  if (history_size > 1) {
+    // Sanity conditions fulfilled.
+    self = static_cast<BinaryDelayEstimatorFarend*>(
+        malloc(sizeof(BinaryDelayEstimatorFarend)));
+  }
+  if (self == NULL) {
+    return NULL;
+  }
+
+  self->history_size = 0;
+  self->binary_far_history = NULL;
+  self->far_bit_counts = NULL;
+  if (WebRtc_AllocateFarendBufferMemory(self, history_size) == 0) {
+    WebRtc_FreeBinaryDelayEstimatorFarend(self);
+    self = NULL;
+  }
+  return self;
+}
+
+int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self,
+                                      int history_size) {
+  RTC_DCHECK(self);
+  // (Re-)Allocate memory for history buffers.
+  self->binary_far_history = static_cast<uint32_t*>(
+      realloc(self->binary_far_history,
+              history_size * sizeof(*self->binary_far_history)));
+  self->far_bit_counts = static_cast<int*>(
+      realloc(self->far_bit_counts,
+              history_size * sizeof(*self->far_bit_counts)));
+  if ((self->binary_far_history == NULL) || (self->far_bit_counts == NULL)) {
+    history_size = 0;
+  }
+  // Fill with zeros if we have expanded the buffers.
+  if (history_size > self->history_size) {
+    int size_diff = history_size - self->history_size;
+    memset(&self->binary_far_history[self->history_size],
+           0,
+           sizeof(*self->binary_far_history) * size_diff);
+    memset(&self->far_bit_counts[self->history_size],
+           0,
+           sizeof(*self->far_bit_counts) * size_diff);
+  }
+  self->history_size = history_size;
+
+  return self->history_size;
+}
+
+void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) {
+  RTC_DCHECK(self);
+  memset(self->binary_far_history, 0, sizeof(uint32_t) * self->history_size);
+  memset(self->far_bit_counts, 0, sizeof(int) * self->history_size);
+}
+
+void WebRtc_SoftResetBinaryDelayEstimatorFarend(
+    BinaryDelayEstimatorFarend* self, int delay_shift) {
+  int abs_shift = abs(delay_shift);
+  int shift_size = 0;
+  int dest_index = 0;
+  int src_index = 0;
+  int padding_index = 0;
+
+  RTC_DCHECK(self);
+  shift_size = self->history_size - abs_shift;
+  RTC_DCHECK_GT(shift_size, 0);
+  if (delay_shift == 0) {
+    return;
+  } else if (delay_shift > 0) {
+    dest_index = abs_shift;
+  } else if (delay_shift < 0) {
+    src_index = abs_shift;
+    padding_index = shift_size;
+  }
+
+  // Shift and zero pad buffers.
+  memmove(&self->binary_far_history[dest_index],
+          &self->binary_far_history[src_index],
+          sizeof(*self->binary_far_history) * shift_size);
+  memset(&self->binary_far_history[padding_index], 0,
+         sizeof(*self->binary_far_history) * abs_shift);
+  memmove(&self->far_bit_counts[dest_index],
+          &self->far_bit_counts[src_index],
+          sizeof(*self->far_bit_counts) * shift_size);
+  memset(&self->far_bit_counts[padding_index], 0,
+         sizeof(*self->far_bit_counts) * abs_shift);
+}
+
+void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle,
+                                 uint32_t binary_far_spectrum) {
+  RTC_DCHECK(handle);
+  // Shift binary spectrum history and insert current |binary_far_spectrum|.
+  memmove(&(handle->binary_far_history[1]), &(handle->binary_far_history[0]),
+          (handle->history_size - 1) * sizeof(uint32_t));
+  handle->binary_far_history[0] = binary_far_spectrum;
+
+  // Shift history of far-end binary spectrum bit counts and insert bit count
+  // of current |binary_far_spectrum|.
+  memmove(&(handle->far_bit_counts[1]), &(handle->far_bit_counts[0]),
+          (handle->history_size - 1) * sizeof(int));
+  handle->far_bit_counts[0] = BitCount(binary_far_spectrum);
+}
+
+void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self) {
+
+  if (self == NULL) {
+    return;
+  }
+
+  free(self->mean_bit_counts);
+  self->mean_bit_counts = NULL;
+
+  free(self->bit_counts);
+  self->bit_counts = NULL;
+
+  free(self->binary_near_history);
+  self->binary_near_history = NULL;
+
+  free(self->histogram);
+  self->histogram = NULL;
+
+  // BinaryDelayEstimator does not have ownership of |farend|, hence we do not
+  // free the memory here. That should be handled separately by the user.
+  self->farend = NULL;
+
+  free(self);
+}
+
+BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
+    BinaryDelayEstimatorFarend* farend, int max_lookahead) {
+  BinaryDelayEstimator* self = NULL;
+
+  if ((farend != NULL) && (max_lookahead >= 0)) {
+    // Sanity conditions fulfilled.
+    self = static_cast<BinaryDelayEstimator*>(
+        malloc(sizeof(BinaryDelayEstimator)));
+  }
+  if (self == NULL) {
+    return NULL;
+  }
+
+  self->farend = farend;
+  self->near_history_size = max_lookahead + 1;
+  self->history_size = 0;
+  self->robust_validation_enabled = 0;  // Disabled by default.
+  self->allowed_offset = 0;
+
+  self->lookahead = max_lookahead;
+
+  // Allocate memory for spectrum and history buffers.
+  self->mean_bit_counts = NULL;
+  self->bit_counts = NULL;
+  self->histogram = NULL;
+  self->binary_near_history = static_cast<uint32_t*>(
+      malloc((max_lookahead + 1) * sizeof(*self->binary_near_history)));
+  if (self->binary_near_history == NULL ||
+      WebRtc_AllocateHistoryBufferMemory(self, farend->history_size) == 0) {
+    WebRtc_FreeBinaryDelayEstimator(self);
+    self = NULL;
+  }
+
+  return self;
+}
+
+int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self,
+                                       int history_size) {
+  BinaryDelayEstimatorFarend* far = self->farend;
+  // (Re-)Allocate memory for spectrum and history buffers.
+  if (history_size != far->history_size) {
+    // Only update far-end buffers if we need.
+    history_size = WebRtc_AllocateFarendBufferMemory(far, history_size);
+  }
+  // The extra array element in |mean_bit_counts| and |histogram| is a dummy
+  // element only used while |last_delay| == -2, i.e., before we have a valid
+  // estimate.
+  self->mean_bit_counts = static_cast<int32_t*>(
+      realloc(self->mean_bit_counts,
+              (history_size + 1) * sizeof(*self->mean_bit_counts)));
+  self->bit_counts = static_cast<int32_t*>(
+      realloc(self->bit_counts, history_size * sizeof(*self->bit_counts)));
+  self->histogram = static_cast<float*>(
+      realloc(self->histogram, (history_size + 1) * sizeof(*self->histogram)));
+
+  if ((self->mean_bit_counts == NULL) ||
+      (self->bit_counts == NULL) ||
+      (self->histogram == NULL)) {
+    history_size = 0;
+  }
+  // Fill with zeros if we have expanded the buffers.
+  if (history_size > self->history_size) {
+    int size_diff = history_size - self->history_size;
+    memset(&self->mean_bit_counts[self->history_size],
+           0,
+           sizeof(*self->mean_bit_counts) * size_diff);
+    memset(&self->bit_counts[self->history_size],
+           0,
+           sizeof(*self->bit_counts) * size_diff);
+    memset(&self->histogram[self->history_size],
+           0,
+           sizeof(*self->histogram) * size_diff);
+  }
+  self->history_size = history_size;
+
+  return self->history_size;
+}
+
+void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self) {
+  int i = 0;
+  RTC_DCHECK(self);
+
+  memset(self->bit_counts, 0, sizeof(int32_t) * self->history_size);
+  memset(self->binary_near_history,
+         0,
+         sizeof(uint32_t) * self->near_history_size);
+  for (i = 0; i <= self->history_size; ++i) {
+    self->mean_bit_counts[i] = (20 << 9);  // 20 in Q9.
+    self->histogram[i] = 0.f;
+  }
+  self->minimum_probability = kMaxBitCountsQ9;  // 32 in Q9.
+  self->last_delay_probability = (int) kMaxBitCountsQ9;  // 32 in Q9.
+
+  // Default return value if we're unable to estimate. -1 is used for errors.
+  self->last_delay = -2;
+
+  self->last_candidate_delay = -2;
+  self->compare_delay = self->history_size;
+  self->candidate_hits = 0;
+  self->last_delay_histogram = 0.f;
+}
+
+int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self,
+                                         int delay_shift) {
+  int lookahead = 0;
+  RTC_DCHECK(self);
+  lookahead = self->lookahead;
+  self->lookahead -= delay_shift;
+  if (self->lookahead < 0) {
+    self->lookahead = 0;
+  }
+  if (self->lookahead > self->near_history_size - 1) {
+    self->lookahead = self->near_history_size - 1;
+  }
+  return lookahead - self->lookahead;
+}
+
+int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
+                                 uint32_t binary_near_spectrum) {
+  int i = 0;
+  int candidate_delay = -1;
+  int valid_candidate = 0;
+
+  int32_t value_best_candidate = kMaxBitCountsQ9;
+  int32_t value_worst_candidate = 0;
+  int32_t valley_depth = 0;
+
+  RTC_DCHECK(self);
+  if (self->farend->history_size != self->history_size) {
+    // Non matching history sizes.
+    return -1;
+  }
+  if (self->near_history_size > 1) {
+    // If we apply lookahead, shift near-end binary spectrum history. Insert
+    // current |binary_near_spectrum| and pull out the delayed one.
+    memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]),
+            (self->near_history_size - 1) * sizeof(uint32_t));
+    self->binary_near_history[0] = binary_near_spectrum;
+    binary_near_spectrum = self->binary_near_history[self->lookahead];
+  }
+
+  // Compare with delayed spectra and store the |bit_counts| for each delay.
+  BitCountComparison(binary_near_spectrum, self->farend->binary_far_history,
+                     self->history_size, self->bit_counts);
+
+  // Update |mean_bit_counts|, which is the smoothed version of |bit_counts|.
+  for (i = 0; i < self->history_size; i++) {
+    // |bit_counts| is constrained to [0, 32], meaning we can smooth with a
+    // factor up to 2^26. We use Q9.
+    int32_t bit_count = (self->bit_counts[i] << 9);  // Q9.
+
+    // Update |mean_bit_counts| only when far-end signal has something to
+    // contribute. If |far_bit_counts| is zero the far-end signal is weak and
+    // we likely have a poor echo condition, hence don't update.
+    if (self->farend->far_bit_counts[i] > 0) {
+      // Make number of right shifts piecewise linear w.r.t. |far_bit_counts|.
+      int shifts = kShiftsAtZero;
+      shifts -= (kShiftsLinearSlope * self->farend->far_bit_counts[i]) >> 4;
+      WebRtc_MeanEstimatorFix(bit_count, shifts, &(self->mean_bit_counts[i]));
+    }
+  }
+
+  // Find |candidate_delay|, |value_best_candidate| and |value_worst_candidate|
+  // of |mean_bit_counts|.
+  for (i = 0; i < self->history_size; i++) {
+    if (self->mean_bit_counts[i] < value_best_candidate) {
+      value_best_candidate = self->mean_bit_counts[i];
+      candidate_delay = i;
+    }
+    if (self->mean_bit_counts[i] > value_worst_candidate) {
+      value_worst_candidate = self->mean_bit_counts[i];
+    }
+  }
+  valley_depth = value_worst_candidate - value_best_candidate;
+
+  // The |value_best_candidate| is a good indicator on the probability of
+  // |candidate_delay| being an accurate delay (a small |value_best_candidate|
+  // means a good binary match). In the following sections we make a decision
+  // whether to update |last_delay| or not.
+  // 1) If the difference bit counts between the best and the worst delay
+  //    candidates is too small we consider the situation to be unreliable and
+  //    don't update |last_delay|.
+  // 2) If the situation is reliable we update |last_delay| if the value of the
+  //    best candidate delay has a value less than
+  //     i) an adaptive threshold |minimum_probability|, or
+  //    ii) this corresponding value |last_delay_probability|, but updated at
+  //        this time instant.
+
+  // Update |minimum_probability|.
+  if ((self->minimum_probability > kProbabilityLowerLimit) &&
+      (valley_depth > kProbabilityMinSpread)) {
+    // The "hard" threshold can't be lower than 17 (in Q9).
+    // The valley in the curve also has to be distinct, i.e., the
+    // difference between |value_worst_candidate| and |value_best_candidate| has
+    // to be large enough.
+    int32_t threshold = value_best_candidate + kProbabilityOffset;
+    if (threshold < kProbabilityLowerLimit) {
+      threshold = kProbabilityLowerLimit;
+    }
+    if (self->minimum_probability > threshold) {
+      self->minimum_probability = threshold;
+    }
+  }
+  // Update |last_delay_probability|.
+  // We use a Markov type model, i.e., a slowly increasing level over time.
+  self->last_delay_probability++;
+  // Validate |candidate_delay|.  We have a reliable instantaneous delay
+  // estimate if
+  //  1) The valley is distinct enough (|valley_depth| > |kProbabilityOffset|)
+  // and
+  //  2) The depth of the valley is deep enough
+  //      (|value_best_candidate| < |minimum_probability|)
+  //     and deeper than the best estimate so far
+  //      (|value_best_candidate| < |last_delay_probability|)
+  valid_candidate = ((valley_depth > kProbabilityOffset) &&
+      ((value_best_candidate < self->minimum_probability) ||
+          (value_best_candidate < self->last_delay_probability)));
+
+  // Check for nonstationary farend signal.
+  const bool non_stationary_farend =
+      std::any_of(self->farend->far_bit_counts,
+                  self->farend->far_bit_counts + self->history_size,
+                  [](int a) { return a > 0; });
+
+  if (non_stationary_farend) {
+    // Only update the validation statistics when the farend is nonstationary
+    // as the underlying estimates are otherwise frozen.
+    UpdateRobustValidationStatistics(self, candidate_delay, valley_depth,
+                                     value_best_candidate);
+  }
+
+  if (self->robust_validation_enabled) {
+    int is_histogram_valid = HistogramBasedValidation(self, candidate_delay);
+    valid_candidate = RobustValidation(self, candidate_delay, valid_candidate,
+                                       is_histogram_valid);
+
+  }
+
+  // Only update the delay estimate when the farend is nonstationary and when
+  // a valid delay candidate is available.
+  if (non_stationary_farend && valid_candidate) {
+    if (candidate_delay != self->last_delay) {
+      self->last_delay_histogram =
+          (self->histogram[candidate_delay] > kLastHistogramMax ?
+              kLastHistogramMax : self->histogram[candidate_delay]);
+      // Adjust the histogram if we made a change to |last_delay|, though it was
+      // not the most likely one according to the histogram.
+      if (self->histogram[candidate_delay] <
+          self->histogram[self->compare_delay]) {
+        self->histogram[self->compare_delay] = self->histogram[candidate_delay];
+      }
+    }
+    self->last_delay = candidate_delay;
+    if (value_best_candidate < self->last_delay_probability) {
+      self->last_delay_probability = value_best_candidate;
+    }
+    self->compare_delay = self->last_delay;
+  }
+
+  return self->last_delay;
+}
+
+int WebRtc_binary_last_delay(BinaryDelayEstimator* self) {
+  RTC_DCHECK(self);
+  return self->last_delay;
+}
+
+float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) {
+  float quality = 0;
+  RTC_DCHECK(self);
+
+  if (self->robust_validation_enabled) {
+    // Simply a linear function of the histogram height at delay estimate.
+    quality = self->histogram[self->compare_delay] / kHistogramMax;
+  } else {
+    // Note that |last_delay_probability| states how deep the minimum of the
+    // cost function is, so it is rather an error probability.
+    quality = (float) (kMaxBitCountsQ9 - self->last_delay_probability) /
+        kMaxBitCountsQ9;
+    if (quality < 0) {
+      quality = 0;
+    }
+  }
+  return quality;
+}
+
+void WebRtc_MeanEstimatorFix(int32_t new_value,
+                             int factor,
+                             int32_t* mean_value) {
+  int32_t diff = new_value - *mean_value;
+
+  // mean_new = mean_value + ((new_value - mean_value) >> factor);
+  if (diff < 0) {
+    diff = -((-diff) >> factor);
+  } else {
+    diff = (diff >> factor);
+  }
+  *mean_value += diff;
+}
diff --git a/modules/audio_processing/utility/delay_estimator.h b/modules/audio_processing/utility/delay_estimator.h
new file mode 100644
index 0000000..cce6113
--- /dev/null
+++ b/modules/audio_processing/utility/delay_estimator.h
@@ -0,0 +1,251 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs delay estimation on binary converted spectra.
+// The return value is  0 - OK and -1 - Error, unless otherwise stated.
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+static const int32_t kMaxBitCountsQ9 = (32 << 9);  // 32 matching bits in Q9.
+
+typedef struct {
+  // Pointer to bit counts.
+  int* far_bit_counts;
+  // Binary history variables.
+  uint32_t* binary_far_history;
+  int history_size;
+} BinaryDelayEstimatorFarend;
+
+typedef struct {
+  // Pointer to bit counts.
+  int32_t* mean_bit_counts;
+  // Array only used locally in ProcessBinarySpectrum() but whose size is
+  // determined at run-time.
+  int32_t* bit_counts;
+
+  // Binary history variables.
+  uint32_t* binary_near_history;
+  int near_history_size;
+  int history_size;
+
+  // Delay estimation variables.
+  int32_t minimum_probability;
+  int last_delay_probability;
+
+  // Delay memory.
+  int last_delay;
+
+  // Robust validation
+  int robust_validation_enabled;
+  int allowed_offset;
+  int last_candidate_delay;
+  int compare_delay;
+  int candidate_hits;
+  float* histogram;
+  float last_delay_histogram;
+
+  // For dynamically changing the lookahead when using SoftReset...().
+  int lookahead;
+
+  // Far-end binary spectrum history buffer etc.
+  BinaryDelayEstimatorFarend* farend;
+} BinaryDelayEstimator;
+
+// Releases the memory allocated by
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+// Input:
+//    - self              : Pointer to the binary delay estimation far-end
+//                          instance which is the return value of
+//                          WebRtc_CreateBinaryDelayEstimatorFarend().
+//
+void WebRtc_FreeBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self);
+
+// Allocates the memory needed by the far-end part of the binary delay
+// estimation. The memory needs to be initialized separately through
+// WebRtc_InitBinaryDelayEstimatorFarend(...).
+//
+// Inputs:
+//      - history_size    : Size of the far-end binary spectrum history.
+//
+// Return value:
+//      - BinaryDelayEstimatorFarend*
+//                        : Created |handle|. If the memory can't be allocated
+//                          or if any of the input parameters are invalid NULL
+//                          is returned.
+//
+BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend(
+    int history_size);
+
+// Re-allocates the buffers.
+//
+// Inputs:
+//      - self            : Pointer to the binary estimation far-end instance
+//                          which is the return value of
+//                          WebRtc_CreateBinaryDelayEstimatorFarend().
+//      - history_size    : Size of the far-end binary spectrum history.
+//
+// Return value:
+//      - history_size    : The history size allocated.
+int WebRtc_AllocateFarendBufferMemory(BinaryDelayEstimatorFarend* self,
+                                      int history_size);
+
+// Initializes the delay estimation far-end instance created with
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+//
+// Input:
+//    - self              : Pointer to the delay estimation far-end instance.
+//
+// Output:
+//    - self              : Initialized far-end instance.
+//
+void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self);
+
+// Soft resets the delay estimation far-end instance created with
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+//
+// Input:
+//    - delay_shift   : The amount of blocks to shift history buffers.
+//
+void WebRtc_SoftResetBinaryDelayEstimatorFarend(
+    BinaryDelayEstimatorFarend* self, int delay_shift);
+
+// Adds the binary far-end spectrum to the internal far-end history buffer. This
+// spectrum is used as reference when calculating the delay using
+// WebRtc_ProcessBinarySpectrum().
+//
+// Inputs:
+//    - self                  : Pointer to the delay estimation far-end
+//                              instance.
+//    - binary_far_spectrum   : Far-end binary spectrum.
+//
+// Output:
+//    - self                  : Updated far-end instance.
+//
+void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* self,
+                                 uint32_t binary_far_spectrum);
+
+// Releases the memory allocated by WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Note that BinaryDelayEstimator utilizes BinaryDelayEstimatorFarend, but does
+// not take ownership of it, hence the BinaryDelayEstimator has to be torn down
+// before the far-end.
+//
+// Input:
+//    - self              : Pointer to the binary delay estimation instance
+//                          which is the return value of
+//                          WebRtc_CreateBinaryDelayEstimator().
+//
+void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self);
+
+// Allocates the memory needed by the binary delay estimation. The memory needs
+// to be initialized separately through WebRtc_InitBinaryDelayEstimator(...).
+//
+// See WebRtc_CreateDelayEstimator(..) in delay_estimator_wrapper.c for detailed
+// description.
+BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
+    BinaryDelayEstimatorFarend* farend, int max_lookahead);
+
+// Re-allocates |history_size| dependent buffers. The far-end buffers will be
+// updated at the same time if needed.
+//
+// Input:
+//      - self            : Pointer to the binary estimation instance which is
+//                          the return value of
+//                          WebRtc_CreateBinaryDelayEstimator().
+//      - history_size    : Size of the history buffers.
+//
+// Return value:
+//      - history_size    : The history size allocated.
+int WebRtc_AllocateHistoryBufferMemory(BinaryDelayEstimator* self,
+                                       int history_size);
+
+// Initializes the delay estimation instance created with
+// WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Input:
+//    - self              : Pointer to the delay estimation instance.
+//
+// Output:
+//    - self              : Initialized instance.
+//
+void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self);
+
+// Soft resets the delay estimation instance created with
+// WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Input:
+//    - delay_shift   : The amount of blocks to shift history buffers.
+//
+// Return value:
+//    - actual_shifts : The actual number of shifts performed.
+//
+int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self,
+                                         int delay_shift);
+
+// Estimates and returns the delay between the binary far-end and binary near-
+// end spectra. It is assumed the binary far-end spectrum has been added using
+// WebRtc_AddBinaryFarSpectrum() prior to this call. The value will be offset by
+// the lookahead (i.e. the lookahead should be subtracted from the returned
+// value).
+//
+// Inputs:
+//    - self                  : Pointer to the delay estimation instance.
+//    - binary_near_spectrum  : Near-end binary spectrum of the current block.
+//
+// Output:
+//    - self                  : Updated instance.
+//
+// Return value:
+//    - delay                 :  >= 0 - Calculated delay value.
+//                              -2    - Insufficient data for estimation.
+//
+int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
+                                 uint32_t binary_near_spectrum);
+
+// Returns the last calculated delay updated by the function
+// WebRtc_ProcessBinarySpectrum(...).
+//
+// Input:
+//    - self                  : Pointer to the delay estimation instance.
+//
+// Return value:
+//    - delay                 :  >= 0 - Last calculated delay value
+//                              -2    - Insufficient data for estimation.
+//
+int WebRtc_binary_last_delay(BinaryDelayEstimator* self);
+
+// Returns the estimation quality of the last calculated delay updated by the
+// function WebRtc_ProcessBinarySpectrum(...). The estimation quality is a value
+// in the interval [0, 1].  The higher the value, the better the quality.
+//
+// Return value:
+//    - delay_quality         :  >= 0 - Estimation quality of last calculated
+//                                      delay value.
+float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self);
+
+// Updates the |mean_value| recursively with a step size of 2^-|factor|. This
+// function is used internally in the Binary Delay Estimator as well as the
+// Fixed point wrapper.
+//
+// Inputs:
+//    - new_value             : The new value the mean should be updated with.
+//    - factor                : The step size, in number of right shifts.
+//
+// Input/Output:
+//    - mean_value            : Pointer to the mean value.
+//
+void WebRtc_MeanEstimatorFix(int32_t new_value,
+                             int factor,
+                             int32_t* mean_value);
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_H_
diff --git a/modules/audio_processing/utility/delay_estimator_internal.h b/modules/audio_processing/utility/delay_estimator_internal.h
new file mode 100644
index 0000000..46eea3e
--- /dev/null
+++ b/modules/audio_processing/utility/delay_estimator_internal.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Header file including the delay estimator handle used for testing.
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+typedef union {
+  float float_;
+  int32_t int32_;
+} SpectrumType;
+
+typedef struct {
+  // Pointers to mean values of spectrum.
+  SpectrumType* mean_far_spectrum;
+  // |mean_far_spectrum| initialization indicator.
+  int far_spectrum_initialized;
+
+  int spectrum_size;
+
+  // Far-end part of binary spectrum based delay estimation.
+  BinaryDelayEstimatorFarend* binary_farend;
+} DelayEstimatorFarend;
+
+typedef struct {
+  // Pointers to mean values of spectrum.
+  SpectrumType* mean_near_spectrum;
+  // |mean_near_spectrum| initialization indicator.
+  int near_spectrum_initialized;
+
+  int spectrum_size;
+
+  // Binary spectrum based delay estimator
+  BinaryDelayEstimator* binary_handle;
+} DelayEstimator;
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
diff --git a/modules/audio_processing/utility/delay_estimator_unittest.cc b/modules/audio_processing/utility/delay_estimator_unittest.cc
new file mode 100644
index 0000000..36700e5
--- /dev/null
+++ b/modules/audio_processing/utility/delay_estimator_unittest.cc
@@ -0,0 +1,618 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+#include "modules/audio_processing/utility/delay_estimator_internal.h"
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace {
+
+enum { kSpectrumSize = 65 };
+// Delay history sizes.
+enum { kMaxDelay = 100 };
+enum { kLookahead = 10 };
+enum { kHistorySize = kMaxDelay + kLookahead };
+// Length of binary spectrum sequence.
+enum { kSequenceLength = 400 };
+
+const int kDifferentHistorySize = 3;
+const int kDifferentLookahead = 1;
+
+const int kEnable[] = { 0, 1 };
+const size_t kSizeEnable = sizeof(kEnable) / sizeof(*kEnable);
+
+class DelayEstimatorTest : public ::testing::Test {
+ protected:
+  DelayEstimatorTest();
+  virtual void SetUp();
+  virtual void TearDown();
+
+  void Init();
+  void InitBinary();
+  void VerifyDelay(BinaryDelayEstimator* binary_handle, int offset, int delay);
+  void RunBinarySpectra(BinaryDelayEstimator* binary1,
+                        BinaryDelayEstimator* binary2,
+                        int near_offset, int lookahead_offset, int far_offset);
+  void RunBinarySpectraTest(int near_offset, int lookahead_offset,
+                            int ref_robust_validation, int robust_validation);
+
+  void* handle_;
+  DelayEstimator* self_;
+  void* farend_handle_;
+  DelayEstimatorFarend* farend_self_;
+  BinaryDelayEstimator* binary_;
+  BinaryDelayEstimatorFarend* binary_farend_;
+  int spectrum_size_;
+  // Dummy input spectra.
+  float far_f_[kSpectrumSize];
+  float near_f_[kSpectrumSize];
+  uint16_t far_u16_[kSpectrumSize];
+  uint16_t near_u16_[kSpectrumSize];
+  uint32_t binary_spectrum_[kSequenceLength + kHistorySize];
+};
+
+DelayEstimatorTest::DelayEstimatorTest()
+    : handle_(NULL),
+      self_(NULL),
+      farend_handle_(NULL),
+      farend_self_(NULL),
+      binary_(NULL),
+      binary_farend_(NULL),
+      spectrum_size_(kSpectrumSize) {
+  // Dummy input data are set with more or less arbitrary non-zero values.
+  memset(far_f_, 1, sizeof(far_f_));
+  memset(near_f_, 2, sizeof(near_f_));
+  memset(far_u16_, 1, sizeof(far_u16_));
+  memset(near_u16_, 2, sizeof(near_u16_));
+  // Construct a sequence of binary spectra used to verify delay estimate. The
+  // |kSequenceLength| has to be long enough for the delay estimation to leave
+  // the initialized state.
+  binary_spectrum_[0] = 1;
+  for (int i = 1; i < (kSequenceLength + kHistorySize); i++) {
+    binary_spectrum_[i] = 3 * binary_spectrum_[i - 1];
+  }
+}
+
+void DelayEstimatorTest::SetUp() {
+  farend_handle_ = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize,
+                                                     kHistorySize);
+  ASSERT_TRUE(farend_handle_ != NULL);
+  farend_self_ = reinterpret_cast<DelayEstimatorFarend*>(farend_handle_);
+  handle_ = WebRtc_CreateDelayEstimator(farend_handle_, kLookahead);
+  ASSERT_TRUE(handle_ != NULL);
+  self_ = reinterpret_cast<DelayEstimator*>(handle_);
+  binary_farend_ = WebRtc_CreateBinaryDelayEstimatorFarend(kHistorySize);
+  ASSERT_TRUE(binary_farend_ != NULL);
+  binary_ = WebRtc_CreateBinaryDelayEstimator(binary_farend_, kLookahead);
+  ASSERT_TRUE(binary_ != NULL);
+}
+
+void DelayEstimatorTest::TearDown() {
+  WebRtc_FreeDelayEstimator(handle_);
+  handle_ = NULL;
+  self_ = NULL;
+  WebRtc_FreeDelayEstimatorFarend(farend_handle_);
+  farend_handle_ = NULL;
+  farend_self_ = NULL;
+  WebRtc_FreeBinaryDelayEstimator(binary_);
+  binary_ = NULL;
+  WebRtc_FreeBinaryDelayEstimatorFarend(binary_farend_);
+  binary_farend_ = NULL;
+}
+
+void DelayEstimatorTest::Init() {
+  // Initialize Delay Estimator
+  EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_));
+  EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_));
+  // Verify initialization.
+  EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
+  EXPECT_EQ(0, self_->near_spectrum_initialized);
+  EXPECT_EQ(-2, WebRtc_last_delay(handle_));  // Delay in initial state.
+  EXPECT_FLOAT_EQ(0, WebRtc_last_delay_quality(handle_));  // Zero quality.
+}
+
+void DelayEstimatorTest::InitBinary() {
+  // Initialize Binary Delay Estimator (far-end part).
+  WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_);
+  // Initialize Binary Delay Estimator
+  WebRtc_InitBinaryDelayEstimator(binary_);
+  // Verify initialization. This does not guarantee a complete check, since
+  // |last_delay| may be equal to -2 before initialization if done on the fly.
+  EXPECT_EQ(-2, binary_->last_delay);
+}
+
+void DelayEstimatorTest::VerifyDelay(BinaryDelayEstimator* binary_handle,
+                                     int offset, int delay) {
+  // Verify that we WebRtc_binary_last_delay() returns correct delay.
+  EXPECT_EQ(delay, WebRtc_binary_last_delay(binary_handle));
+
+  if (delay != -2) {
+    // Verify correct delay estimate. In the non-causal case the true delay
+    // is equivalent with the |offset|.
+    EXPECT_EQ(offset, delay);
+  }
+}
+
+void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1,
+                                          BinaryDelayEstimator* binary2,
+                                          int near_offset,
+                                          int lookahead_offset,
+                                          int far_offset) {
+  int different_validations = binary1->robust_validation_enabled ^
+      binary2->robust_validation_enabled;
+  WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_);
+  WebRtc_InitBinaryDelayEstimator(binary1);
+  WebRtc_InitBinaryDelayEstimator(binary2);
+  // Verify initialization. This does not guarantee a complete check, since
+  // |last_delay| may be equal to -2 before initialization if done on the fly.
+  EXPECT_EQ(-2, binary1->last_delay);
+  EXPECT_EQ(-2, binary2->last_delay);
+  for (int i = kLookahead; i < (kSequenceLength + kLookahead); i++) {
+    WebRtc_AddBinaryFarSpectrum(binary_farend_,
+                                binary_spectrum_[i + far_offset]);
+    int delay_1 = WebRtc_ProcessBinarySpectrum(binary1, binary_spectrum_[i]);
+    int delay_2 =
+        WebRtc_ProcessBinarySpectrum(binary2,
+                                     binary_spectrum_[i - near_offset]);
+
+    VerifyDelay(binary1, far_offset + kLookahead, delay_1);
+    VerifyDelay(binary2,
+                far_offset + kLookahead + lookahead_offset + near_offset,
+                delay_2);
+    // Expect the two delay estimates to be offset by |lookahead_offset| +
+    // |near_offset| when we have left the initial state.
+    if ((delay_1 != -2) && (delay_2 != -2)) {
+      EXPECT_EQ(delay_1, delay_2 - lookahead_offset - near_offset);
+    }
+    // For the case of identical signals |delay_1| and |delay_2| should match
+    // all the time, unless one of them has robust validation turned on.  In
+    // that case the robust validation leaves the initial state faster.
+    if ((near_offset == 0) && (lookahead_offset == 0)) {
+      if  (!different_validations) {
+        EXPECT_EQ(delay_1, delay_2);
+      } else {
+        if (binary1->robust_validation_enabled) {
+          EXPECT_GE(delay_1, delay_2);
+        } else {
+          EXPECT_GE(delay_2, delay_1);
+        }
+      }
+    }
+  }
+  // Verify that we have left the initialized state.
+  EXPECT_NE(-2, WebRtc_binary_last_delay(binary1));
+  EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary1));
+  EXPECT_NE(-2, WebRtc_binary_last_delay(binary2));
+  EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary2));
+}
+
+void DelayEstimatorTest::RunBinarySpectraTest(int near_offset,
+                                              int lookahead_offset,
+                                              int ref_robust_validation,
+                                              int robust_validation) {
+  BinaryDelayEstimator* binary2 =
+      WebRtc_CreateBinaryDelayEstimator(binary_farend_,
+                                        kLookahead + lookahead_offset);
+  // Verify the delay for both causal and non-causal systems. For causal systems
+  // the delay is equivalent with a positive |offset| of the far-end sequence.
+  // For non-causal systems the delay is equivalent with a negative |offset| of
+  // the far-end sequence.
+  binary_->robust_validation_enabled = ref_robust_validation;
+  binary2->robust_validation_enabled = robust_validation;
+  for (int offset = -kLookahead;
+      offset < kMaxDelay - lookahead_offset - near_offset;
+      offset++) {
+    RunBinarySpectra(binary_, binary2, near_offset, lookahead_offset, offset);
+  }
+  WebRtc_FreeBinaryDelayEstimator(binary2);
+  binary2 = NULL;
+  binary_->robust_validation_enabled = 0;  // Reset reference.
+}
+
+TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) {
+  // In this test we verify correct error returns on invalid API calls.
+
+  // WebRtc_CreateDelayEstimatorFarend() and WebRtc_CreateDelayEstimator()
+  // should return a NULL pointer on invalid input values.
+  // Make sure we have a non-NULL value at start, so we can detect NULL after
+  // create failure.
+  void* handle = farend_handle_;
+  handle = WebRtc_CreateDelayEstimatorFarend(33, kHistorySize);
+  EXPECT_TRUE(handle == NULL);
+  handle = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, 1);
+  EXPECT_TRUE(handle == NULL);
+
+  handle = handle_;
+  handle = WebRtc_CreateDelayEstimator(NULL, kLookahead);
+  EXPECT_TRUE(handle == NULL);
+  handle = WebRtc_CreateDelayEstimator(farend_handle_, -1);
+  EXPECT_TRUE(handle == NULL);
+
+  // WebRtc_InitDelayEstimatorFarend() and WebRtc_InitDelayEstimator() should
+  // return -1 if we have a NULL pointer as |handle|.
+  EXPECT_EQ(-1, WebRtc_InitDelayEstimatorFarend(NULL));
+  EXPECT_EQ(-1, WebRtc_InitDelayEstimator(NULL));
+
+  // WebRtc_AddFarSpectrumFloat() should return -1 if we have:
+  // 1) NULL pointer as |handle|.
+  // 2) NULL pointer as far-end spectrum.
+  // 3) Incorrect spectrum size.
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(NULL, far_f_, spectrum_size_));
+  // Use |farend_handle_| which is properly created at SetUp().
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, NULL,
+                                           spectrum_size_));
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_,
+                                           spectrum_size_ + 1));
+
+  // WebRtc_AddFarSpectrumFix() should return -1 if we have:
+  // 1) NULL pointer as |handle|.
+  // 2) NULL pointer as far-end spectrum.
+  // 3) Incorrect spectrum size.
+  // 4) Too high precision in far-end spectrum (Q-domain > 15).
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(NULL, far_u16_, spectrum_size_, 0));
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, NULL, spectrum_size_,
+                                         0));
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+                                         spectrum_size_ + 1, 0));
+  EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+                                         spectrum_size_, 16));
+
+  // WebRtc_set_history_size() should return -1 if:
+  // 1) |handle| is a NULL.
+  // 2) |history_size| <= 1.
+  EXPECT_EQ(-1, WebRtc_set_history_size(NULL, 1));
+  EXPECT_EQ(-1, WebRtc_set_history_size(handle_, 1));
+  // WebRtc_history_size() should return -1 if:
+  // 1) NULL pointer input.
+  EXPECT_EQ(-1, WebRtc_history_size(NULL));
+  // 2) there is a mismatch between history size.
+  void* tmp_handle = WebRtc_CreateDelayEstimator(farend_handle_, kHistorySize);
+  EXPECT_EQ(0, WebRtc_InitDelayEstimator(tmp_handle));
+  EXPECT_EQ(kDifferentHistorySize,
+            WebRtc_set_history_size(tmp_handle, kDifferentHistorySize));
+  EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(tmp_handle));
+  EXPECT_EQ(kHistorySize, WebRtc_set_history_size(handle_, kHistorySize));
+  EXPECT_EQ(-1, WebRtc_history_size(tmp_handle));
+
+  // WebRtc_set_lookahead() should return -1 if we try a value outside the
+  /// buffer.
+  EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, kLookahead + 1));
+  EXPECT_EQ(-1, WebRtc_set_lookahead(handle_, -1));
+
+  // WebRtc_set_allowed_offset() should return -1 if we have:
+  // 1) NULL pointer as |handle|.
+  // 2) |allowed_offset| < 0.
+  EXPECT_EQ(-1, WebRtc_set_allowed_offset(NULL, 0));
+  EXPECT_EQ(-1, WebRtc_set_allowed_offset(handle_, -1));
+
+  EXPECT_EQ(-1, WebRtc_get_allowed_offset(NULL));
+
+  // WebRtc_enable_robust_validation() should return -1 if we have:
+  // 1) NULL pointer as |handle|.
+  // 2) Incorrect |enable| value (not 0 or 1).
+  EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, kEnable[0]));
+  EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, -1));
+  EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, 2));
+
+  // WebRtc_is_robust_validation_enabled() should return -1 if we have NULL
+  // pointer as |handle|.
+  EXPECT_EQ(-1, WebRtc_is_robust_validation_enabled(NULL));
+
+  // WebRtc_DelayEstimatorProcessFloat() should return -1 if we have:
+  // 1) NULL pointer as |handle|.
+  // 2) NULL pointer as near-end spectrum.
+  // 3) Incorrect spectrum size.
+  // 4) Non matching history sizes if multiple delay estimators using the same
+  //    far-end reference.
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(NULL, near_f_,
+                                                  spectrum_size_));
+  // Use |handle_| which is properly created at SetUp().
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, NULL,
+                                                  spectrum_size_));
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_,
+                                                  spectrum_size_ + 1));
+  // |tmp_handle| is already in a non-matching state.
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFloat(tmp_handle,
+                                                  near_f_,
+                                                  spectrum_size_));
+
+  // WebRtc_DelayEstimatorProcessFix() should return -1 if we have:
+  // 1) NULL pointer as |handle|.
+  // 2) NULL pointer as near-end spectrum.
+  // 3) Incorrect spectrum size.
+  // 4) Too high precision in near-end spectrum (Q-domain > 15).
+  // 5) Non matching history sizes if multiple delay estimators using the same
+  //    far-end reference.
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(NULL, near_u16_, spectrum_size_,
+                                                0));
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, NULL, spectrum_size_,
+                                                0));
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+                                                spectrum_size_ + 1, 0));
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+                                                spectrum_size_, 16));
+  // |tmp_handle| is already in a non-matching state.
+  EXPECT_EQ(-1, WebRtc_DelayEstimatorProcessFix(tmp_handle,
+                                                near_u16_,
+                                                spectrum_size_,
+                                                0));
+  WebRtc_FreeDelayEstimator(tmp_handle);
+
+  // WebRtc_last_delay() should return -1 if we have a NULL pointer as |handle|.
+  EXPECT_EQ(-1, WebRtc_last_delay(NULL));
+
+  // Free any local memory if needed.
+  WebRtc_FreeDelayEstimator(handle);
+}
+
+TEST_F(DelayEstimatorTest, VerifyAllowedOffset) {
+  // Is set to zero by default.
+  EXPECT_EQ(0, WebRtc_get_allowed_offset(handle_));
+  for (int i = 1; i >= 0; i--) {
+    EXPECT_EQ(0, WebRtc_set_allowed_offset(handle_, i));
+    EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_));
+    Init();
+    // Unaffected over a reset.
+    EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_));
+  }
+}
+
+TEST_F(DelayEstimatorTest, VerifyEnableRobustValidation) {
+  // Disabled by default.
+  EXPECT_EQ(0, WebRtc_is_robust_validation_enabled(handle_));
+  for (size_t i = 0; i < kSizeEnable; ++i) {
+    EXPECT_EQ(0, WebRtc_enable_robust_validation(handle_, kEnable[i]));
+    EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_));
+    Init();
+    // Unaffected over a reset.
+    EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_));
+  }
+}
+
+TEST_F(DelayEstimatorTest, InitializedSpectrumAfterProcess) {
+  // In this test we verify that the mean spectra are initialized after first
+  // time we call WebRtc_AddFarSpectrum() and Process() respectively. The test
+  // also verifies the state is not left for zero spectra.
+  const float kZerosFloat[kSpectrumSize] = { 0.0 };
+  const uint16_t kZerosU16[kSpectrumSize] = { 0 };
+
+  // For floating point operations, process one frame and verify initialization
+  // flag.
+  Init();
+  EXPECT_EQ(0, WebRtc_AddFarSpectrumFloat(farend_handle_, kZerosFloat,
+                                          spectrum_size_));
+  EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
+  EXPECT_EQ(0, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_,
+                                           spectrum_size_));
+  EXPECT_EQ(1, farend_self_->far_spectrum_initialized);
+  EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFloat(handle_, kZerosFloat,
+                                                  spectrum_size_));
+  EXPECT_EQ(0, self_->near_spectrum_initialized);
+  EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFloat(handle_, near_f_,
+                                                  spectrum_size_));
+  EXPECT_EQ(1, self_->near_spectrum_initialized);
+
+  // For fixed point operations, process one frame and verify initialization
+  // flag.
+  Init();
+  EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, kZerosU16,
+                                        spectrum_size_, 0));
+  EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
+  EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+                                         spectrum_size_, 0));
+  EXPECT_EQ(1, farend_self_->far_spectrum_initialized);
+  EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFix(handle_, kZerosU16,
+                                                spectrum_size_, 0));
+  EXPECT_EQ(0, self_->near_spectrum_initialized);
+  EXPECT_EQ(-2, WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+                                                spectrum_size_, 0));
+  EXPECT_EQ(1, self_->near_spectrum_initialized);
+}
+
+TEST_F(DelayEstimatorTest, CorrectLastDelay) {
+  // In this test we verify that we get the correct last delay upon valid call.
+  // We simply process the same data until we leave the initialized state
+  // (|last_delay| = -2). Then we compare the Process() output with the
+  // last_delay() call.
+
+  // TODO(bjornv): Update quality values for robust validation.
+  int last_delay = 0;
+  // Floating point operations.
+  Init();
+  for (int i = 0; i < 200; i++) {
+    EXPECT_EQ(0, WebRtc_AddFarSpectrumFloat(farend_handle_, far_f_,
+                                            spectrum_size_));
+    last_delay = WebRtc_DelayEstimatorProcessFloat(handle_, near_f_,
+                                                   spectrum_size_);
+    if (last_delay != -2) {
+      EXPECT_EQ(last_delay, WebRtc_last_delay(handle_));
+      if (!WebRtc_is_robust_validation_enabled(handle_)) {
+        EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9,
+                        WebRtc_last_delay_quality(handle_));
+      }
+      break;
+    }
+  }
+  // Verify that we have left the initialized state.
+  EXPECT_NE(-2, WebRtc_last_delay(handle_));
+  EXPECT_LT(0, WebRtc_last_delay_quality(handle_));
+
+  // Fixed point operations.
+  Init();
+  for (int i = 0; i < 200; i++) {
+    EXPECT_EQ(0, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
+                                          spectrum_size_, 0));
+    last_delay = WebRtc_DelayEstimatorProcessFix(handle_, near_u16_,
+                                                 spectrum_size_, 0);
+    if (last_delay != -2) {
+      EXPECT_EQ(last_delay, WebRtc_last_delay(handle_));
+      if (!WebRtc_is_robust_validation_enabled(handle_)) {
+        EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9,
+                        WebRtc_last_delay_quality(handle_));
+      }
+      break;
+    }
+  }
+  // Verify that we have left the initialized state.
+  EXPECT_NE(-2, WebRtc_last_delay(handle_));
+  EXPECT_LT(0, WebRtc_last_delay_quality(handle_));
+}
+
+TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimatorFarend) {
+  // In this test we verify correct output on invalid API calls to the Binary
+  // Delay Estimator (far-end part).
+
+  BinaryDelayEstimatorFarend* binary = binary_farend_;
+  // WebRtc_CreateBinaryDelayEstimatorFarend() should return -1 if the input
+  // history size is less than 2. This is to make sure the buffer shifting
+  // applies properly.
+  // Make sure we have a non-NULL value at start, so we can detect NULL after
+  // create failure.
+  binary = WebRtc_CreateBinaryDelayEstimatorFarend(1);
+  EXPECT_TRUE(binary == NULL);
+}
+
+TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimator) {
+  // In this test we verify correct output on invalid API calls to the Binary
+  // Delay Estimator.
+
+  BinaryDelayEstimator* binary_handle = binary_;
+  // WebRtc_CreateBinaryDelayEstimator() should return -1 if we have a NULL
+  // pointer as |binary_farend| or invalid input values. Upon failure, the
+  // |binary_handle| should be NULL.
+  // Make sure we have a non-NULL value at start, so we can detect NULL after
+  // create failure.
+  binary_handle = WebRtc_CreateBinaryDelayEstimator(NULL, kLookahead);
+  EXPECT_TRUE(binary_handle == NULL);
+  binary_handle = WebRtc_CreateBinaryDelayEstimator(binary_farend_, -1);
+  EXPECT_TRUE(binary_handle == NULL);
+}
+
+TEST_F(DelayEstimatorTest, MeanEstimatorFix) {
+  // In this test we verify that we update the mean value in correct direction
+  // only. With "direction" we mean increase or decrease.
+
+  int32_t mean_value = 4000;
+  int32_t mean_value_before = mean_value;
+  int32_t new_mean_value = mean_value * 2;
+
+  // Increasing |mean_value|.
+  WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value);
+  EXPECT_LT(mean_value_before, mean_value);
+  EXPECT_GT(new_mean_value, mean_value);
+
+  // Decreasing |mean_value|.
+  new_mean_value = mean_value / 2;
+  mean_value_before = mean_value;
+  WebRtc_MeanEstimatorFix(new_mean_value, 10, &mean_value);
+  EXPECT_GT(mean_value_before, mean_value);
+  EXPECT_LT(new_mean_value, mean_value);
+}
+
+TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearSameSpectrum) {
+  // In this test we verify that we get the correct delay estimates if we shift
+  // the signal accordingly. We create two Binary Delay Estimators and feed them
+  // with the same signals, so they should output the same results.
+  // We verify both causal and non-causal delays.
+  // For these noise free signals, the robust validation should not have an
+  // impact, hence we turn robust validation on/off for both reference and
+  // delayed near end.
+
+  for (size_t i = 0; i < kSizeEnable; ++i) {
+    for (size_t j = 0; j < kSizeEnable; ++j) {
+      RunBinarySpectraTest(0, 0, kEnable[i], kEnable[j]);
+    }
+  }
+}
+
+TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentSpectrum) {
+  // In this test we use the same setup as above, but we now feed the two Binary
+  // Delay Estimators with different signals, so they should output different
+  // results.
+  // For these noise free signals, the robust validation should not have an
+  // impact, hence we turn robust validation on/off for both reference and
+  // delayed near end.
+
+  const int kNearOffset = 1;
+  for (size_t i = 0; i < kSizeEnable; ++i) {
+    for (size_t j = 0; j < kSizeEnable; ++j) {
+      RunBinarySpectraTest(kNearOffset, 0, kEnable[i], kEnable[j]);
+    }
+  }
+}
+
+TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentLookahead) {
+  // In this test we use the same setup as above, feeding the two Binary
+  // Delay Estimators with the same signals. The difference is that we create
+  // them with different lookahead.
+  // For these noise free signals, the robust validation should not have an
+  // impact, hence we turn robust validation on/off for both reference and
+  // delayed near end.
+
+  const int kLookaheadOffset = 1;
+  for (size_t i = 0; i < kSizeEnable; ++i) {
+    for (size_t j = 0; j < kSizeEnable; ++j) {
+      RunBinarySpectraTest(0, kLookaheadOffset, kEnable[i], kEnable[j]);
+    }
+  }
+}
+
+TEST_F(DelayEstimatorTest, AllowedOffsetNoImpactWhenRobustValidationDisabled) {
+  // The same setup as in ExactDelayEstimateMultipleNearSameSpectrum with the
+  // difference that |allowed_offset| is set for the reference binary delay
+  // estimator.
+
+  binary_->allowed_offset = 10;
+  RunBinarySpectraTest(0, 0, 0, 0);
+  binary_->allowed_offset = 0;  // Reset reference.
+}
+
+TEST_F(DelayEstimatorTest, VerifyLookaheadAtCreate) {
+  void* farend_handle = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize,
+                                                          kMaxDelay);
+  ASSERT_TRUE(farend_handle != NULL);
+  void* handle = WebRtc_CreateDelayEstimator(farend_handle, kLookahead);
+  ASSERT_TRUE(handle != NULL);
+  EXPECT_EQ(kLookahead, WebRtc_lookahead(handle));
+  WebRtc_FreeDelayEstimator(handle);
+  WebRtc_FreeDelayEstimatorFarend(farend_handle);
+}
+
+TEST_F(DelayEstimatorTest, VerifyLookaheadIsSetAndKeptAfterInit) {
+  EXPECT_EQ(kLookahead, WebRtc_lookahead(handle_));
+  EXPECT_EQ(kDifferentLookahead,
+            WebRtc_set_lookahead(handle_, kDifferentLookahead));
+  EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_));
+  EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_));
+  EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_));
+  EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_));
+  EXPECT_EQ(kDifferentLookahead, WebRtc_lookahead(handle_));
+}
+
+TEST_F(DelayEstimatorTest, VerifyHistorySizeAtCreate) {
+  EXPECT_EQ(kHistorySize, WebRtc_history_size(handle_));
+}
+
+TEST_F(DelayEstimatorTest, VerifyHistorySizeIsSetAndKeptAfterInit) {
+  EXPECT_EQ(kHistorySize, WebRtc_history_size(handle_));
+  EXPECT_EQ(kDifferentHistorySize,
+            WebRtc_set_history_size(handle_, kDifferentHistorySize));
+  EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_));
+  EXPECT_EQ(0, WebRtc_InitDelayEstimator(handle_));
+  EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_));
+  EXPECT_EQ(0, WebRtc_InitDelayEstimatorFarend(farend_handle_));
+  EXPECT_EQ(kDifferentHistorySize, WebRtc_history_size(handle_));
+}
+
+// TODO(bjornv): Add tests for SoftReset...(...).
+
+}  // namespace
diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.cc b/modules/audio_processing/utility/delay_estimator_wrapper.cc
new file mode 100644
index 0000000..f907c80
--- /dev/null
+++ b/modules/audio_processing/utility/delay_estimator_wrapper.cc
@@ -0,0 +1,486 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/delay_estimator_wrapper.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "modules/audio_processing/utility/delay_estimator.h"
+#include "modules/audio_processing/utility/delay_estimator_internal.h"
+#include "rtc_base/checks.h"
+
+// Only bit |kBandFirst| through bit |kBandLast| are processed and
+// |kBandFirst| - |kBandLast| must be < 32.
+enum { kBandFirst = 12 };
+enum { kBandLast = 43 };
+
+static __inline uint32_t SetBit(uint32_t in, int pos) {
+  uint32_t mask = (1 << pos);
+  uint32_t out = (in | mask);
+
+  return out;
+}
+
+// Calculates the mean recursively. Same version as WebRtc_MeanEstimatorFix(),
+// but for float.
+//
+// Inputs:
+//    - new_value             : New additional value.
+//    - scale                 : Scale for smoothing (should be less than 1.0).
+//
+// Input/Output:
+//    - mean_value            : Pointer to the mean value for updating.
+//
+static void MeanEstimatorFloat(float new_value,
+                               float scale,
+                               float* mean_value) {
+  RTC_DCHECK_LT(scale, 1.0f);
+  *mean_value += (new_value - *mean_value) * scale;
+}
+
+// Computes the binary spectrum by comparing the input |spectrum| with a
+// |threshold_spectrum|. Float and fixed point versions.
+//
+// Inputs:
+//      - spectrum            : Spectrum of which the binary spectrum should be
+//                              calculated.
+//      - threshold_spectrum  : Threshold spectrum with which the input
+//                              spectrum is compared.
+// Return:
+//      - out                 : Binary spectrum.
+//
+static uint32_t BinarySpectrumFix(const uint16_t* spectrum,
+                                  SpectrumType* threshold_spectrum,
+                                  int q_domain,
+                                  int* threshold_initialized) {
+  int i = kBandFirst;
+  uint32_t out = 0;
+
+  RTC_DCHECK_LT(q_domain, 16);
+
+  if (!(*threshold_initialized)) {
+    // Set the |threshold_spectrum| to half the input |spectrum| as starting
+    // value. This speeds up the convergence.
+    for (i = kBandFirst; i <= kBandLast; i++) {
+      if (spectrum[i] > 0) {
+        // Convert input spectrum from Q(|q_domain|) to Q15.
+        int32_t spectrum_q15 = ((int32_t) spectrum[i]) << (15 - q_domain);
+        threshold_spectrum[i].int32_ = (spectrum_q15 >> 1);
+        *threshold_initialized = 1;
+      }
+    }
+  }
+  for (i = kBandFirst; i <= kBandLast; i++) {
+    // Convert input spectrum from Q(|q_domain|) to Q15.
+    int32_t spectrum_q15 = ((int32_t) spectrum[i]) << (15 - q_domain);
+    // Update the |threshold_spectrum|.
+    WebRtc_MeanEstimatorFix(spectrum_q15, 6, &(threshold_spectrum[i].int32_));
+    // Convert |spectrum| at current frequency bin to a binary value.
+    if (spectrum_q15 > threshold_spectrum[i].int32_) {
+      out = SetBit(out, i - kBandFirst);
+    }
+  }
+
+  return out;
+}
+
+static uint32_t BinarySpectrumFloat(const float* spectrum,
+                                    SpectrumType* threshold_spectrum,
+                                    int* threshold_initialized) {
+  int i = kBandFirst;
+  uint32_t out = 0;
+  const float kScale = 1 / 64.0;
+
+  if (!(*threshold_initialized)) {
+    // Set the |threshold_spectrum| to half the input |spectrum| as starting
+    // value. This speeds up the convergence.
+    for (i = kBandFirst; i <= kBandLast; i++) {
+      if (spectrum[i] > 0.0f) {
+        threshold_spectrum[i].float_ = (spectrum[i] / 2);
+        *threshold_initialized = 1;
+      }
+    }
+  }
+
+  for (i = kBandFirst; i <= kBandLast; i++) {
+    // Update the |threshold_spectrum|.
+    MeanEstimatorFloat(spectrum[i], kScale, &(threshold_spectrum[i].float_));
+    // Convert |spectrum| at current frequency bin to a binary value.
+    if (spectrum[i] > threshold_spectrum[i].float_) {
+      out = SetBit(out, i - kBandFirst);
+    }
+  }
+
+  return out;
+}
+
+void WebRtc_FreeDelayEstimatorFarend(void* handle) {
+  DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
+
+  if (handle == NULL) {
+    return;
+  }
+
+  free(self->mean_far_spectrum);
+  self->mean_far_spectrum = NULL;
+
+  WebRtc_FreeBinaryDelayEstimatorFarend(self->binary_farend);
+  self->binary_farend = NULL;
+
+  free(self);
+}
+
+void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size) {
+  DelayEstimatorFarend* self = NULL;
+
+  // Check if the sub band used in the delay estimation is small enough to fit
+  // the binary spectra in a uint32_t.
+  static_assert(kBandLast - kBandFirst < 32, "");
+
+  if (spectrum_size >= kBandLast) {
+    self = static_cast<DelayEstimatorFarend*>(
+        malloc(sizeof(DelayEstimatorFarend)));
+  }
+
+  if (self != NULL) {
+    int memory_fail = 0;
+
+    // Allocate memory for the binary far-end spectrum handling.
+    self->binary_farend = WebRtc_CreateBinaryDelayEstimatorFarend(history_size);
+    memory_fail |= (self->binary_farend == NULL);
+
+    // Allocate memory for spectrum buffers.
+    self->mean_far_spectrum =
+        static_cast<SpectrumType*>(malloc(spectrum_size * sizeof(SpectrumType)));
+    memory_fail |= (self->mean_far_spectrum == NULL);
+
+    self->spectrum_size = spectrum_size;
+
+    if (memory_fail) {
+      WebRtc_FreeDelayEstimatorFarend(self);
+      self = NULL;
+    }
+  }
+
+  return self;
+}
+
+int WebRtc_InitDelayEstimatorFarend(void* handle) {
+  DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  // Initialize far-end part of binary delay estimator.
+  WebRtc_InitBinaryDelayEstimatorFarend(self->binary_farend);
+
+  // Set averaged far and near end spectra to zero.
+  memset(self->mean_far_spectrum, 0,
+         sizeof(SpectrumType) * self->spectrum_size);
+  // Reset initialization indicators.
+  self->far_spectrum_initialized = 0;
+
+  return 0;
+}
+
+void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift) {
+  DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
+  RTC_DCHECK(self);
+  WebRtc_SoftResetBinaryDelayEstimatorFarend(self->binary_farend, delay_shift);
+}
+
+int WebRtc_AddFarSpectrumFix(void* handle,
+                             const uint16_t* far_spectrum,
+                             int spectrum_size,
+                             int far_q) {
+  DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
+  uint32_t binary_spectrum = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (far_spectrum == NULL) {
+    // Empty far end spectrum.
+    return -1;
+  }
+  if (spectrum_size != self->spectrum_size) {
+    // Data sizes don't match.
+    return -1;
+  }
+  if (far_q > 15) {
+    // If |far_q| is larger than 15 we cannot guarantee no wrap around.
+    return -1;
+  }
+
+  // Get binary spectrum.
+  binary_spectrum = BinarySpectrumFix(far_spectrum, self->mean_far_spectrum,
+                                      far_q, &(self->far_spectrum_initialized));
+  WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum);
+
+  return 0;
+}
+
+int WebRtc_AddFarSpectrumFloat(void* handle,
+                               const float* far_spectrum,
+                               int spectrum_size) {
+  DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
+  uint32_t binary_spectrum = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (far_spectrum == NULL) {
+    // Empty far end spectrum.
+    return -1;
+  }
+  if (spectrum_size != self->spectrum_size) {
+    // Data sizes don't match.
+    return -1;
+  }
+
+  // Get binary spectrum.
+  binary_spectrum = BinarySpectrumFloat(far_spectrum, self->mean_far_spectrum,
+                                        &(self->far_spectrum_initialized));
+  WebRtc_AddBinaryFarSpectrum(self->binary_farend, binary_spectrum);
+
+  return 0;
+}
+
+void WebRtc_FreeDelayEstimator(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (handle == NULL) {
+    return;
+  }
+
+  free(self->mean_near_spectrum);
+  self->mean_near_spectrum = NULL;
+
+  WebRtc_FreeBinaryDelayEstimator(self->binary_handle);
+  self->binary_handle = NULL;
+
+  free(self);
+}
+
+void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead) {
+  DelayEstimator* self = NULL;
+  DelayEstimatorFarend* farend = (DelayEstimatorFarend*) farend_handle;
+
+  if (farend_handle != NULL) {
+    self = static_cast<DelayEstimator*>(malloc(sizeof(DelayEstimator)));
+  }
+
+  if (self != NULL) {
+    int memory_fail = 0;
+
+    // Allocate memory for the farend spectrum handling.
+    self->binary_handle =
+        WebRtc_CreateBinaryDelayEstimator(farend->binary_farend, max_lookahead);
+    memory_fail |= (self->binary_handle == NULL);
+
+    // Allocate memory for spectrum buffers.
+    self->mean_near_spectrum = static_cast<SpectrumType*>(
+        malloc(farend->spectrum_size * sizeof(SpectrumType)));
+    memory_fail |= (self->mean_near_spectrum == NULL);
+
+    self->spectrum_size = farend->spectrum_size;
+
+    if (memory_fail) {
+      WebRtc_FreeDelayEstimator(self);
+      self = NULL;
+    }
+  }
+
+  return self;
+}
+
+int WebRtc_InitDelayEstimator(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  // Initialize binary delay estimator.
+  WebRtc_InitBinaryDelayEstimator(self->binary_handle);
+
+  // Set averaged far and near end spectra to zero.
+  memset(self->mean_near_spectrum, 0,
+         sizeof(SpectrumType) * self->spectrum_size);
+  // Reset initialization indicators.
+  self->near_spectrum_initialized = 0;
+
+  return 0;
+}
+
+int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  RTC_DCHECK(self);
+  return WebRtc_SoftResetBinaryDelayEstimator(self->binary_handle, delay_shift);
+}
+
+int WebRtc_set_history_size(void* handle, int history_size) {
+  DelayEstimator* self = static_cast<DelayEstimator*>(handle);
+
+  if ((self == NULL) || (history_size <= 1)) {
+    return -1;
+  }
+  return WebRtc_AllocateHistoryBufferMemory(self->binary_handle, history_size);
+}
+
+int WebRtc_history_size(const void* handle) {
+  const DelayEstimator* self = static_cast<const DelayEstimator*>(handle);
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (self->binary_handle->farend->history_size !=
+      self->binary_handle->history_size) {
+    // Non matching history sizes.
+    return -1;
+  }
+  return self->binary_handle->history_size;
+}
+
+int WebRtc_set_lookahead(void* handle, int lookahead) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  RTC_DCHECK(self);
+  RTC_DCHECK(self->binary_handle);
+  if ((lookahead > self->binary_handle->near_history_size - 1) ||
+      (lookahead < 0)) {
+    return -1;
+  }
+  self->binary_handle->lookahead = lookahead;
+  return self->binary_handle->lookahead;
+}
+
+int WebRtc_lookahead(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  RTC_DCHECK(self);
+  RTC_DCHECK(self->binary_handle);
+  return self->binary_handle->lookahead;
+}
+
+int WebRtc_set_allowed_offset(void* handle, int allowed_offset) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if ((self == NULL) || (allowed_offset < 0)) {
+    return -1;
+  }
+  self->binary_handle->allowed_offset = allowed_offset;
+  return 0;
+}
+
+int WebRtc_get_allowed_offset(const void* handle) {
+  const DelayEstimator* self = (const DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+  return self->binary_handle->allowed_offset;
+}
+
+int WebRtc_enable_robust_validation(void* handle, int enable) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if ((enable < 0) || (enable > 1)) {
+    return -1;
+  }
+  RTC_DCHECK(self->binary_handle);
+  self->binary_handle->robust_validation_enabled = enable;
+  return 0;
+}
+
+int WebRtc_is_robust_validation_enabled(const void* handle) {
+  const DelayEstimator* self = (const DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+  return self->binary_handle->robust_validation_enabled;
+}
+
+int WebRtc_DelayEstimatorProcessFix(void* handle,
+                                    const uint16_t* near_spectrum,
+                                    int spectrum_size,
+                                    int near_q) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  uint32_t binary_spectrum = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (near_spectrum == NULL) {
+    // Empty near end spectrum.
+    return -1;
+  }
+  if (spectrum_size != self->spectrum_size) {
+    // Data sizes don't match.
+    return -1;
+  }
+  if (near_q > 15) {
+    // If |near_q| is larger than 15 we cannot guarantee no wrap around.
+    return -1;
+  }
+
+  // Get binary spectra.
+  binary_spectrum = BinarySpectrumFix(near_spectrum,
+                                      self->mean_near_spectrum,
+                                      near_q,
+                                      &(self->near_spectrum_initialized));
+
+  return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum);
+}
+
+int WebRtc_DelayEstimatorProcessFloat(void* handle,
+                                      const float* near_spectrum,
+                                      int spectrum_size) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  uint32_t binary_spectrum = 0;
+
+  if (self == NULL) {
+    return -1;
+  }
+  if (near_spectrum == NULL) {
+    // Empty near end spectrum.
+    return -1;
+  }
+  if (spectrum_size != self->spectrum_size) {
+    // Data sizes don't match.
+    return -1;
+  }
+
+  // Get binary spectrum.
+  binary_spectrum = BinarySpectrumFloat(near_spectrum, self->mean_near_spectrum,
+                                        &(self->near_spectrum_initialized));
+
+  return WebRtc_ProcessBinarySpectrum(self->binary_handle, binary_spectrum);
+}
+
+int WebRtc_last_delay(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+
+  if (self == NULL) {
+    return -1;
+  }
+
+  return WebRtc_binary_last_delay(self->binary_handle);
+}
+
+float WebRtc_last_delay_quality(void* handle) {
+  DelayEstimator* self = (DelayEstimator*) handle;
+  RTC_DCHECK(self);
+  return WebRtc_binary_last_delay_quality(self->binary_handle);
+}
diff --git a/modules/audio_processing/utility/delay_estimator_wrapper.h b/modules/audio_processing/utility/delay_estimator_wrapper.h
new file mode 100644
index 0000000..6b6e51f
--- /dev/null
+++ b/modules/audio_processing/utility/delay_estimator_wrapper.h
@@ -0,0 +1,244 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Performs delay estimation on block by block basis.
+// The return value is  0 - OK and -1 - Error, unless otherwise stated.
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Releases the memory allocated by WebRtc_CreateDelayEstimatorFarend(...)
+void WebRtc_FreeDelayEstimatorFarend(void* handle);
+
+// Allocates the memory needed by the far-end part of the delay estimation. The
+// memory needs to be initialized separately through
+// WebRtc_InitDelayEstimatorFarend(...).
+//
+// Inputs:
+//  - spectrum_size     : Size of the spectrum used both in far-end and
+//                        near-end. Used to allocate memory for spectrum
+//                        specific buffers.
+//  - history_size      : The far-end history buffer size. A change in buffer
+//                        size can be forced with WebRtc_set_history_size().
+//                        Note that the maximum delay which can be estimated is
+//                        determined together with WebRtc_set_lookahead().
+//
+// Return value:
+//  - void*             : Created |handle|. If the memory can't be allocated or
+//                        if any of the input parameters are invalid NULL is
+//                        returned.
+void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size);
+
+// Initializes the far-end part of the delay estimation instance returned by
+// WebRtc_CreateDelayEstimatorFarend(...)
+int WebRtc_InitDelayEstimatorFarend(void* handle);
+
+// Soft resets the far-end part of the delay estimation instance returned by
+// WebRtc_CreateDelayEstimatorFarend(...).
+// Input:
+//      - delay_shift   : The amount of blocks to shift history buffers.
+void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift);
+
+// Adds the far-end spectrum to the far-end history buffer. This spectrum is
+// used as reference when calculating the delay using
+// WebRtc_ProcessSpectrum().
+//
+// Inputs:
+//    - far_spectrum    : Far-end spectrum.
+//    - spectrum_size   : The size of the data arrays (same for both far- and
+//                        near-end).
+//    - far_q           : The Q-domain of the far-end data.
+//
+// Output:
+//    - handle          : Updated far-end instance.
+//
+int WebRtc_AddFarSpectrumFix(void* handle,
+                             const uint16_t* far_spectrum,
+                             int spectrum_size,
+                             int far_q);
+
+// See WebRtc_AddFarSpectrumFix() for description.
+int WebRtc_AddFarSpectrumFloat(void* handle,
+                               const float* far_spectrum,
+                               int spectrum_size);
+
+// Releases the memory allocated by WebRtc_CreateDelayEstimator(...)
+void WebRtc_FreeDelayEstimator(void* handle);
+
+// Allocates the memory needed by the delay estimation. The memory needs to be
+// initialized separately through WebRtc_InitDelayEstimator(...).
+//
+// Inputs:
+//      - farend_handle : Pointer to the far-end part of the delay estimation
+//                        instance created prior to this call using
+//                        WebRtc_CreateDelayEstimatorFarend().
+//
+//                        Note that WebRtc_CreateDelayEstimator does not take
+//                        ownership of |farend_handle|, which has to be torn
+//                        down properly after this instance.
+//
+//      - max_lookahead : Maximum amount of non-causal lookahead allowed. The
+//                        actual amount of lookahead used can be controlled by
+//                        WebRtc_set_lookahead(...). The default |lookahead| is
+//                        set to |max_lookahead| at create time. Use
+//                        WebRtc_set_lookahead(...) before start if a different
+//                        value is desired.
+//
+//                        Using lookahead can detect cases in which a near-end
+//                        signal occurs before the corresponding far-end signal.
+//                        It will delay the estimate for the current block by an
+//                        equal amount, and the returned values will be offset
+//                        by it.
+//
+//                        A value of zero is the typical no-lookahead case.
+//                        This also represents the minimum delay which can be
+//                        estimated.
+//
+//                        Note that the effective range of delay estimates is
+//                        [-|lookahead|,... ,|history_size|-|lookahead|)
+//                        where |history_size| is set through
+//                        WebRtc_set_history_size().
+//
+// Return value:
+//      - void*         : Created |handle|. If the memory can't be allocated or
+//                        if any of the input parameters are invalid NULL is
+//                        returned.
+void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead);
+
+// Initializes the delay estimation instance returned by
+// WebRtc_CreateDelayEstimator(...)
+int WebRtc_InitDelayEstimator(void* handle);
+
+// Soft resets the delay estimation instance returned by
+// WebRtc_CreateDelayEstimator(...)
+// Input:
+//      - delay_shift   : The amount of blocks to shift history buffers.
+//
+// Return value:
+//      - actual_shifts : The actual number of shifts performed.
+int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift);
+
+// Sets the effective |history_size| used. Valid values from 2. We simply need
+// at least two delays to compare to perform an estimate. If |history_size| is
+// changed, buffers are reallocated filling in with zeros if necessary.
+// Note that changing the |history_size| affects both buffers in far-end and
+// near-end. Hence it is important to change all DelayEstimators that use the
+// same reference far-end, to the same |history_size| value.
+// Inputs:
+//  - handle            : Pointer to the delay estimation instance.
+//  - history_size      : Effective history size to be used.
+// Return value:
+//  - new_history_size  : The new history size used. If the memory was not able
+//                        to be allocated 0 is returned.
+int WebRtc_set_history_size(void* handle, int history_size);
+
+// Returns the history_size currently used.
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+int WebRtc_history_size(const void* handle);
+
+// Sets the amount of |lookahead| to use. Valid values are [0, max_lookahead]
+// where |max_lookahead| was set at create time through
+// WebRtc_CreateDelayEstimator(...).
+//
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+//      - lookahead     : The amount of lookahead to be used.
+//
+// Return value:
+//      - new_lookahead : The actual amount of lookahead set, unless |handle| is
+//                        a NULL pointer or |lookahead| is invalid, for which an
+//                        error is returned.
+int WebRtc_set_lookahead(void* handle, int lookahead);
+
+// Returns the amount of lookahead we currently use.
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+int WebRtc_lookahead(void* handle);
+
+// Sets the |allowed_offset| used in the robust validation scheme.  If the
+// delay estimator is used in an echo control component, this parameter is
+// related to the filter length.  In principle |allowed_offset| should be set to
+// the echo control filter length minus the expected echo duration, i.e., the
+// delay offset the echo control can handle without quality regression.  The
+// default value, used if not set manually, is zero.  Note that |allowed_offset|
+// has to be non-negative.
+// Inputs:
+//  - handle            : Pointer to the delay estimation instance.
+//  - allowed_offset    : The amount of delay offset, measured in partitions,
+//                        the echo control filter can handle.
+int WebRtc_set_allowed_offset(void* handle, int allowed_offset);
+
+// Returns the |allowed_offset| in number of partitions.
+int WebRtc_get_allowed_offset(const void* handle);
+
+// Enables/Disables a robust validation functionality in the delay estimation.
+// This is by default set to disabled at create time.  The state is preserved
+// over a reset.
+// Inputs:
+//      - handle        : Pointer to the delay estimation instance.
+//      - enable        : Enable (1) or disable (0) this feature.
+int WebRtc_enable_robust_validation(void* handle, int enable);
+
+// Returns 1 if robust validation is enabled and 0 if disabled.
+int WebRtc_is_robust_validation_enabled(const void* handle);
+
+// Estimates and returns the delay between the far-end and near-end blocks. The
+// value will be offset by the lookahead (i.e. the lookahead should be
+// subtracted from the returned value).
+// Inputs:
+//      - handle        : Pointer to the delay estimation instance.
+//      - near_spectrum : Pointer to the near-end spectrum data of the current
+//                        block.
+//      - spectrum_size : The size of the data arrays (same for both far- and
+//                        near-end).
+//      - near_q        : The Q-domain of the near-end data.
+//
+// Output:
+//      - handle        : Updated instance.
+//
+// Return value:
+//      - delay         :  >= 0 - Calculated delay value.
+//                        -1    - Error.
+//                        -2    - Insufficient data for estimation.
+int WebRtc_DelayEstimatorProcessFix(void* handle,
+                                    const uint16_t* near_spectrum,
+                                    int spectrum_size,
+                                    int near_q);
+
+// See WebRtc_DelayEstimatorProcessFix() for description.
+int WebRtc_DelayEstimatorProcessFloat(void* handle,
+                                      const float* near_spectrum,
+                                      int spectrum_size);
+
+// Returns the last calculated delay updated by the function
+// WebRtc_DelayEstimatorProcess(...).
+//
+// Input:
+//      - handle        : Pointer to the delay estimation instance.
+//
+// Return value:
+//      - delay         : >= 0  - Last calculated delay value.
+//                        -1    - Error.
+//                        -2    - Insufficient data for estimation.
+int WebRtc_last_delay(void* handle);
+
+// Returns the estimation quality/probability of the last calculated delay
+// updated by the function WebRtc_DelayEstimatorProcess(...). The estimation
+// quality is a value in the interval [0, 1]. The higher the value, the better
+// the quality.
+//
+// Return value:
+//      - delay_quality : >= 0  - Estimation quality of last calculated delay.
+float WebRtc_last_delay_quality(void* handle);
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
diff --git a/modules/audio_processing/utility/ooura_fft.cc b/modules/audio_processing/utility/ooura_fft.cc
new file mode 100644
index 0000000..d753a81
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft.cc
@@ -0,0 +1,543 @@
+/*
+ * http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html
+ * Copyright Takuya OOURA, 1996-2001
+ *
+ * You may use, copy, modify and distribute this code for any purpose (include
+ * commercial use) and without fee. Please refer to this package when you modify
+ * this code.
+ *
+ * Changes by the WebRTC authors:
+ *    - Trivial type modifications.
+ *    - Minimal code subset to do rdft of length 128.
+ *    - Optimizations because of known length.
+ *    - Removed the global variables by moving the code in to a class in order
+ *      to make it thread safe.
+ *
+ *  All changes are covered by the WebRTC license and IP grant:
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+#include <math.h>
+
+#include "modules/audio_processing/utility/ooura_fft_tables_common.h"
+#include "system_wrappers/include/cpu_features_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+namespace {
+
+#if !(defined(MIPS_FPU_LE) || defined(WEBRTC_HAS_NEON))
+static void cft1st_128_C(float* a) {
+  const int n = 128;
+  int j, k1, k2;
+  float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
+  float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+  // The processing of the first set of elements was simplified in C to avoid
+  // some operations (multiplication by zero or one, addition of two elements
+  // multiplied by the same weight, ...).
+  x0r = a[0] + a[2];
+  x0i = a[1] + a[3];
+  x1r = a[0] - a[2];
+  x1i = a[1] - a[3];
+  x2r = a[4] + a[6];
+  x2i = a[5] + a[7];
+  x3r = a[4] - a[6];
+  x3i = a[5] - a[7];
+  a[0] = x0r + x2r;
+  a[1] = x0i + x2i;
+  a[4] = x0r - x2r;
+  a[5] = x0i - x2i;
+  a[2] = x1r - x3i;
+  a[3] = x1i + x3r;
+  a[6] = x1r + x3i;
+  a[7] = x1i - x3r;
+  wk1r = rdft_w[2];
+  x0r = a[8] + a[10];
+  x0i = a[9] + a[11];
+  x1r = a[8] - a[10];
+  x1i = a[9] - a[11];
+  x2r = a[12] + a[14];
+  x2i = a[13] + a[15];
+  x3r = a[12] - a[14];
+  x3i = a[13] - a[15];
+  a[8] = x0r + x2r;
+  a[9] = x0i + x2i;
+  a[12] = x2i - x0i;
+  a[13] = x0r - x2r;
+  x0r = x1r - x3i;
+  x0i = x1i + x3r;
+  a[10] = wk1r * (x0r - x0i);
+  a[11] = wk1r * (x0r + x0i);
+  x0r = x3i + x1r;
+  x0i = x3r - x1i;
+  a[14] = wk1r * (x0i - x0r);
+  a[15] = wk1r * (x0i + x0r);
+  k1 = 0;
+  for (j = 16; j < n; j += 16) {
+    k1 += 2;
+    k2 = 2 * k1;
+    wk2r = rdft_w[k1 + 0];
+    wk2i = rdft_w[k1 + 1];
+    wk1r = rdft_w[k2 + 0];
+    wk1i = rdft_w[k2 + 1];
+    wk3r = rdft_wk3ri_first[k1 + 0];
+    wk3i = rdft_wk3ri_first[k1 + 1];
+    x0r = a[j + 0] + a[j + 2];
+    x0i = a[j + 1] + a[j + 3];
+    x1r = a[j + 0] - a[j + 2];
+    x1i = a[j + 1] - a[j + 3];
+    x2r = a[j + 4] + a[j + 6];
+    x2i = a[j + 5] + a[j + 7];
+    x3r = a[j + 4] - a[j + 6];
+    x3i = a[j + 5] - a[j + 7];
+    a[j + 0] = x0r + x2r;
+    a[j + 1] = x0i + x2i;
+    x0r -= x2r;
+    x0i -= x2i;
+    a[j + 4] = wk2r * x0r - wk2i * x0i;
+    a[j + 5] = wk2r * x0i + wk2i * x0r;
+    x0r = x1r - x3i;
+    x0i = x1i + x3r;
+    a[j + 2] = wk1r * x0r - wk1i * x0i;
+    a[j + 3] = wk1r * x0i + wk1i * x0r;
+    x0r = x1r + x3i;
+    x0i = x1i - x3r;
+    a[j + 6] = wk3r * x0r - wk3i * x0i;
+    a[j + 7] = wk3r * x0i + wk3i * x0r;
+    wk1r = rdft_w[k2 + 2];
+    wk1i = rdft_w[k2 + 3];
+    wk3r = rdft_wk3ri_second[k1 + 0];
+    wk3i = rdft_wk3ri_second[k1 + 1];
+    x0r = a[j + 8] + a[j + 10];
+    x0i = a[j + 9] + a[j + 11];
+    x1r = a[j + 8] - a[j + 10];
+    x1i = a[j + 9] - a[j + 11];
+    x2r = a[j + 12] + a[j + 14];
+    x2i = a[j + 13] + a[j + 15];
+    x3r = a[j + 12] - a[j + 14];
+    x3i = a[j + 13] - a[j + 15];
+    a[j + 8] = x0r + x2r;
+    a[j + 9] = x0i + x2i;
+    x0r -= x2r;
+    x0i -= x2i;
+    a[j + 12] = -wk2i * x0r - wk2r * x0i;
+    a[j + 13] = -wk2i * x0i + wk2r * x0r;
+    x0r = x1r - x3i;
+    x0i = x1i + x3r;
+    a[j + 10] = wk1r * x0r - wk1i * x0i;
+    a[j + 11] = wk1r * x0i + wk1i * x0r;
+    x0r = x1r + x3i;
+    x0i = x1i - x3r;
+    a[j + 14] = wk3r * x0r - wk3i * x0i;
+    a[j + 15] = wk3r * x0i + wk3i * x0r;
+  }
+}
+
+static void cftmdl_128_C(float* a) {
+  const int l = 8;
+  const int n = 128;
+  const int m = 32;
+  int j0, j1, j2, j3, k, k1, k2, m2;
+  float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
+  float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+  for (j0 = 0; j0 < l; j0 += 2) {
+    j1 = j0 + 8;
+    j2 = j0 + 16;
+    j3 = j0 + 24;
+    x0r = a[j0 + 0] + a[j1 + 0];
+    x0i = a[j0 + 1] + a[j1 + 1];
+    x1r = a[j0 + 0] - a[j1 + 0];
+    x1i = a[j0 + 1] - a[j1 + 1];
+    x2r = a[j2 + 0] + a[j3 + 0];
+    x2i = a[j2 + 1] + a[j3 + 1];
+    x3r = a[j2 + 0] - a[j3 + 0];
+    x3i = a[j2 + 1] - a[j3 + 1];
+    a[j0 + 0] = x0r + x2r;
+    a[j0 + 1] = x0i + x2i;
+    a[j2 + 0] = x0r - x2r;
+    a[j2 + 1] = x0i - x2i;
+    a[j1 + 0] = x1r - x3i;
+    a[j1 + 1] = x1i + x3r;
+    a[j3 + 0] = x1r + x3i;
+    a[j3 + 1] = x1i - x3r;
+  }
+  wk1r = rdft_w[2];
+  for (j0 = m; j0 < l + m; j0 += 2) {
+    j1 = j0 + 8;
+    j2 = j0 + 16;
+    j3 = j0 + 24;
+    x0r = a[j0 + 0] + a[j1 + 0];
+    x0i = a[j0 + 1] + a[j1 + 1];
+    x1r = a[j0 + 0] - a[j1 + 0];
+    x1i = a[j0 + 1] - a[j1 + 1];
+    x2r = a[j2 + 0] + a[j3 + 0];
+    x2i = a[j2 + 1] + a[j3 + 1];
+    x3r = a[j2 + 0] - a[j3 + 0];
+    x3i = a[j2 + 1] - a[j3 + 1];
+    a[j0 + 0] = x0r + x2r;
+    a[j0 + 1] = x0i + x2i;
+    a[j2 + 0] = x2i - x0i;
+    a[j2 + 1] = x0r - x2r;
+    x0r = x1r - x3i;
+    x0i = x1i + x3r;
+    a[j1 + 0] = wk1r * (x0r - x0i);
+    a[j1 + 1] = wk1r * (x0r + x0i);
+    x0r = x3i + x1r;
+    x0i = x3r - x1i;
+    a[j3 + 0] = wk1r * (x0i - x0r);
+    a[j3 + 1] = wk1r * (x0i + x0r);
+  }
+  k1 = 0;
+  m2 = 2 * m;
+  for (k = m2; k < n; k += m2) {
+    k1 += 2;
+    k2 = 2 * k1;
+    wk2r = rdft_w[k1 + 0];
+    wk2i = rdft_w[k1 + 1];
+    wk1r = rdft_w[k2 + 0];
+    wk1i = rdft_w[k2 + 1];
+    wk3r = rdft_wk3ri_first[k1 + 0];
+    wk3i = rdft_wk3ri_first[k1 + 1];
+    for (j0 = k; j0 < l + k; j0 += 2) {
+      j1 = j0 + 8;
+      j2 = j0 + 16;
+      j3 = j0 + 24;
+      x0r = a[j0 + 0] + a[j1 + 0];
+      x0i = a[j0 + 1] + a[j1 + 1];
+      x1r = a[j0 + 0] - a[j1 + 0];
+      x1i = a[j0 + 1] - a[j1 + 1];
+      x2r = a[j2 + 0] + a[j3 + 0];
+      x2i = a[j2 + 1] + a[j3 + 1];
+      x3r = a[j2 + 0] - a[j3 + 0];
+      x3i = a[j2 + 1] - a[j3 + 1];
+      a[j0 + 0] = x0r + x2r;
+      a[j0 + 1] = x0i + x2i;
+      x0r -= x2r;
+      x0i -= x2i;
+      a[j2 + 0] = wk2r * x0r - wk2i * x0i;
+      a[j2 + 1] = wk2r * x0i + wk2i * x0r;
+      x0r = x1r - x3i;
+      x0i = x1i + x3r;
+      a[j1 + 0] = wk1r * x0r - wk1i * x0i;
+      a[j1 + 1] = wk1r * x0i + wk1i * x0r;
+      x0r = x1r + x3i;
+      x0i = x1i - x3r;
+      a[j3 + 0] = wk3r * x0r - wk3i * x0i;
+      a[j3 + 1] = wk3r * x0i + wk3i * x0r;
+    }
+    wk1r = rdft_w[k2 + 2];
+    wk1i = rdft_w[k2 + 3];
+    wk3r = rdft_wk3ri_second[k1 + 0];
+    wk3i = rdft_wk3ri_second[k1 + 1];
+    for (j0 = k + m; j0 < l + (k + m); j0 += 2) {
+      j1 = j0 + 8;
+      j2 = j0 + 16;
+      j3 = j0 + 24;
+      x0r = a[j0 + 0] + a[j1 + 0];
+      x0i = a[j0 + 1] + a[j1 + 1];
+      x1r = a[j0 + 0] - a[j1 + 0];
+      x1i = a[j0 + 1] - a[j1 + 1];
+      x2r = a[j2 + 0] + a[j3 + 0];
+      x2i = a[j2 + 1] + a[j3 + 1];
+      x3r = a[j2 + 0] - a[j3 + 0];
+      x3i = a[j2 + 1] - a[j3 + 1];
+      a[j0 + 0] = x0r + x2r;
+      a[j0 + 1] = x0i + x2i;
+      x0r -= x2r;
+      x0i -= x2i;
+      a[j2 + 0] = -wk2i * x0r - wk2r * x0i;
+      a[j2 + 1] = -wk2i * x0i + wk2r * x0r;
+      x0r = x1r - x3i;
+      x0i = x1i + x3r;
+      a[j1 + 0] = wk1r * x0r - wk1i * x0i;
+      a[j1 + 1] = wk1r * x0i + wk1i * x0r;
+      x0r = x1r + x3i;
+      x0i = x1i - x3r;
+      a[j3 + 0] = wk3r * x0r - wk3i * x0i;
+      a[j3 + 1] = wk3r * x0i + wk3i * x0r;
+    }
+  }
+}
+
+static void rftfsub_128_C(float* a) {
+  const float* c = rdft_w + 32;
+  int j1, j2, k1, k2;
+  float wkr, wki, xr, xi, yr, yi;
+
+  for (j1 = 1, j2 = 2; j2 < 64; j1 += 1, j2 += 2) {
+    k2 = 128 - j2;
+    k1 = 32 - j1;
+    wkr = 0.5f - c[k1];
+    wki = c[j1];
+    xr = a[j2 + 0] - a[k2 + 0];
+    xi = a[j2 + 1] + a[k2 + 1];
+    yr = wkr * xr - wki * xi;
+    yi = wkr * xi + wki * xr;
+    a[j2 + 0] -= yr;
+    a[j2 + 1] -= yi;
+    a[k2 + 0] += yr;
+    a[k2 + 1] -= yi;
+  }
+}
+
+static void rftbsub_128_C(float* a) {
+  const float* c = rdft_w + 32;
+  int j1, j2, k1, k2;
+  float wkr, wki, xr, xi, yr, yi;
+
+  a[1] = -a[1];
+  for (j1 = 1, j2 = 2; j2 < 64; j1 += 1, j2 += 2) {
+    k2 = 128 - j2;
+    k1 = 32 - j1;
+    wkr = 0.5f - c[k1];
+    wki = c[j1];
+    xr = a[j2 + 0] - a[k2 + 0];
+    xi = a[j2 + 1] + a[k2 + 1];
+    yr = wkr * xr + wki * xi;
+    yi = wkr * xi - wki * xr;
+    a[j2 + 0] = a[j2 + 0] - yr;
+    a[j2 + 1] = yi - a[j2 + 1];
+    a[k2 + 0] = yr + a[k2 + 0];
+    a[k2 + 1] = yi - a[k2 + 1];
+  }
+  a[65] = -a[65];
+}
+#endif
+
+
+}  // namespace
+
+OouraFft::OouraFft() {
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+  use_sse2_ = (WebRtc_GetCPUInfo(kSSE2) != 0);
+#else
+  use_sse2_ = false;
+#endif
+}
+
+OouraFft::~OouraFft() = default;
+
+void OouraFft::Fft(float* a) const {
+  float xi;
+  bitrv2_128(a);
+  cftfsub_128(a);
+  rftfsub_128(a);
+  xi = a[0] - a[1];
+  a[0] += a[1];
+  a[1] = xi;
+}
+void OouraFft::InverseFft(float* a) const {
+  a[1] = 0.5f * (a[0] - a[1]);
+  a[0] -= a[1];
+  rftbsub_128(a);
+  bitrv2_128(a);
+  cftbsub_128(a);
+}
+
+void OouraFft::cft1st_128(float* a) const {
+#if defined(MIPS_FPU_LE)
+  cft1st_128_mips(a);
+#elif defined(WEBRTC_HAS_NEON)
+  cft1st_128_neon(a);
+#elif defined(WEBRTC_ARCH_X86_FAMILY)
+  if (use_sse2_) {
+    cft1st_128_SSE2(a);
+  } else {
+    cft1st_128_C(a);
+  }
+#else
+  cft1st_128_C(a);
+#endif
+}
+void OouraFft::cftmdl_128(float* a) const {
+#if defined(MIPS_FPU_LE)
+  cftmdl_128_mips(a);
+#elif defined(WEBRTC_HAS_NEON)
+  cftmdl_128_neon(a);
+#elif defined(WEBRTC_ARCH_X86_FAMILY)
+  if (use_sse2_) {
+    cftmdl_128_SSE2(a);
+  } else {
+    cftmdl_128_C(a);
+  }
+#else
+  cftmdl_128_C(a);
+#endif
+}
+void OouraFft::rftfsub_128(float* a) const {
+#if defined(MIPS_FPU_LE)
+  rftfsub_128_mips(a);
+#elif defined(WEBRTC_HAS_NEON)
+  rftfsub_128_neon(a);
+#elif defined(WEBRTC_ARCH_X86_FAMILY)
+  if (use_sse2_) {
+    rftfsub_128_SSE2(a);
+  } else {
+    rftfsub_128_C(a);
+  }
+#else
+  rftfsub_128_C(a);
+#endif
+}
+
+void OouraFft::rftbsub_128(float* a) const {
+#if defined(MIPS_FPU_LE)
+  rftbsub_128_mips(a);
+#elif defined(WEBRTC_HAS_NEON)
+  rftbsub_128_neon(a);
+#elif defined(WEBRTC_ARCH_X86_FAMILY)
+  if (use_sse2_) {
+    rftbsub_128_SSE2(a);
+  } else {
+    rftbsub_128_C(a);
+  }
+#else
+  rftbsub_128_C(a);
+#endif
+}
+
+void OouraFft::cftbsub_128(float* a) const {
+  int j, j1, j2, j3, l;
+  float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+  cft1st_128(a);
+  cftmdl_128(a);
+  l = 32;
+
+  for (j = 0; j < l; j += 2) {
+    j1 = j + l;
+    j2 = j1 + l;
+    j3 = j2 + l;
+    x0r = a[j] + a[j1];
+    x0i = -a[j + 1] - a[j1 + 1];
+    x1r = a[j] - a[j1];
+    x1i = -a[j + 1] + a[j1 + 1];
+    x2r = a[j2] + a[j3];
+    x2i = a[j2 + 1] + a[j3 + 1];
+    x3r = a[j2] - a[j3];
+    x3i = a[j2 + 1] - a[j3 + 1];
+    a[j] = x0r + x2r;
+    a[j + 1] = x0i - x2i;
+    a[j2] = x0r - x2r;
+    a[j2 + 1] = x0i + x2i;
+    a[j1] = x1r - x3i;
+    a[j1 + 1] = x1i - x3r;
+    a[j3] = x1r + x3i;
+    a[j3 + 1] = x1i + x3r;
+  }
+}
+
+void OouraFft::cftfsub_128(float* a) const {
+  int j, j1, j2, j3, l;
+  float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+  cft1st_128(a);
+  cftmdl_128(a);
+  l = 32;
+  for (j = 0; j < l; j += 2) {
+    j1 = j + l;
+    j2 = j1 + l;
+    j3 = j2 + l;
+    x0r = a[j] + a[j1];
+    x0i = a[j + 1] + a[j1 + 1];
+    x1r = a[j] - a[j1];
+    x1i = a[j + 1] - a[j1 + 1];
+    x2r = a[j2] + a[j3];
+    x2i = a[j2 + 1] + a[j3 + 1];
+    x3r = a[j2] - a[j3];
+    x3i = a[j2 + 1] - a[j3 + 1];
+    a[j] = x0r + x2r;
+    a[j + 1] = x0i + x2i;
+    a[j2] = x0r - x2r;
+    a[j2 + 1] = x0i - x2i;
+    a[j1] = x1r - x3i;
+    a[j1 + 1] = x1i + x3r;
+    a[j3] = x1r + x3i;
+    a[j3 + 1] = x1i - x3r;
+  }
+}
+
+void OouraFft::bitrv2_128(float* a) const {
+  /*
+      Following things have been attempted but are no faster:
+      (a) Storing the swap indexes in a LUT (index calculations are done
+          for 'free' while waiting on memory/L1).
+      (b) Consolidate the load/store of two consecutive floats by a 64 bit
+          integer (execution is memory/L1 bound).
+      (c) Do a mix of floats and 64 bit integer to maximize register
+          utilization (execution is memory/L1 bound).
+      (d) Replacing ip[i] by ((k<<31)>>25) + ((k >> 1)<<5).
+      (e) Hard-coding of the offsets to completely eliminates index
+          calculations.
+  */
+
+  unsigned int j, j1, k, k1;
+  float xr, xi, yr, yi;
+
+  const int ip[4] = {0, 64, 32, 96};
+  for (k = 0; k < 4; k++) {
+    for (j = 0; j < k; j++) {
+      j1 = 2 * j + ip[k];
+      k1 = 2 * k + ip[j];
+      xr = a[j1 + 0];
+      xi = a[j1 + 1];
+      yr = a[k1 + 0];
+      yi = a[k1 + 1];
+      a[j1 + 0] = yr;
+      a[j1 + 1] = yi;
+      a[k1 + 0] = xr;
+      a[k1 + 1] = xi;
+      j1 += 8;
+      k1 += 16;
+      xr = a[j1 + 0];
+      xi = a[j1 + 1];
+      yr = a[k1 + 0];
+      yi = a[k1 + 1];
+      a[j1 + 0] = yr;
+      a[j1 + 1] = yi;
+      a[k1 + 0] = xr;
+      a[k1 + 1] = xi;
+      j1 += 8;
+      k1 -= 8;
+      xr = a[j1 + 0];
+      xi = a[j1 + 1];
+      yr = a[k1 + 0];
+      yi = a[k1 + 1];
+      a[j1 + 0] = yr;
+      a[j1 + 1] = yi;
+      a[k1 + 0] = xr;
+      a[k1 + 1] = xi;
+      j1 += 8;
+      k1 += 16;
+      xr = a[j1 + 0];
+      xi = a[j1 + 1];
+      yr = a[k1 + 0];
+      yi = a[k1 + 1];
+      a[j1 + 0] = yr;
+      a[j1 + 1] = yi;
+      a[k1 + 0] = xr;
+      a[k1 + 1] = xi;
+    }
+    j1 = 2 * k + 8 + ip[k];
+    k1 = j1 + 8;
+    xr = a[j1 + 0];
+    xi = a[j1 + 1];
+    yr = a[k1 + 0];
+    yi = a[k1 + 1];
+    a[j1 + 0] = yr;
+    a[j1 + 1] = yi;
+    a[k1 + 0] = xr;
+    a[k1 + 1] = xi;
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/utility/ooura_fft.h b/modules/audio_processing/utility/ooura_fft.h
new file mode 100644
index 0000000..96d57dc
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+void cft1st_128_SSE2(float* a);
+void cftmdl_128_SSE2(float* a);
+void rftfsub_128_SSE2(float* a);
+void rftbsub_128_SSE2(float* a);
+#endif
+
+#if defined(MIPS_FPU_LE)
+void cft1st_128_mips(float* a);
+void cftmdl_128_mips(float* a);
+void rftfsub_128_mips(float* a);
+void rftbsub_128_mips(float* a);
+#endif
+
+#if defined(WEBRTC_HAS_NEON)
+void cft1st_128_neon(float* a);
+void cftmdl_128_neon(float* a);
+void rftfsub_128_neon(float* a);
+void rftbsub_128_neon(float* a);
+#endif
+
+class OouraFft {
+ public:
+  OouraFft();
+  ~OouraFft();
+  void Fft(float* a) const;
+  void InverseFft(float* a) const;
+
+ private:
+  void cft1st_128(float* a) const;
+  void cftmdl_128(float* a) const;
+  void rftfsub_128(float* a) const;
+  void rftbsub_128(float* a) const;
+
+  void cftfsub_128(float* a) const;
+  void cftbsub_128(float* a) const;
+  void bitrv2_128(float* a) const;
+  bool use_sse2_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_H_
diff --git a/modules/audio_processing/utility/ooura_fft_mips.cc b/modules/audio_processing/utility/ooura_fft_mips.cc
new file mode 100644
index 0000000..569e1d7
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft_mips.cc
@@ -0,0 +1,1185 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+#include "modules/audio_processing/utility/ooura_fft_tables_common.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+#if defined(MIPS_FPU_LE)
+void bitrv2_128_mips(float* a) {
+  // n is 128
+  float xr, xi, yr, yi;
+
+  xr = a[8];
+  xi = a[9];
+  yr = a[16];
+  yi = a[17];
+  a[8] = yr;
+  a[9] = yi;
+  a[16] = xr;
+  a[17] = xi;
+
+  xr = a[64];
+  xi = a[65];
+  yr = a[2];
+  yi = a[3];
+  a[64] = yr;
+  a[65] = yi;
+  a[2] = xr;
+  a[3] = xi;
+
+  xr = a[72];
+  xi = a[73];
+  yr = a[18];
+  yi = a[19];
+  a[72] = yr;
+  a[73] = yi;
+  a[18] = xr;
+  a[19] = xi;
+
+  xr = a[80];
+  xi = a[81];
+  yr = a[10];
+  yi = a[11];
+  a[80] = yr;
+  a[81] = yi;
+  a[10] = xr;
+  a[11] = xi;
+
+  xr = a[88];
+  xi = a[89];
+  yr = a[26];
+  yi = a[27];
+  a[88] = yr;
+  a[89] = yi;
+  a[26] = xr;
+  a[27] = xi;
+
+  xr = a[74];
+  xi = a[75];
+  yr = a[82];
+  yi = a[83];
+  a[74] = yr;
+  a[75] = yi;
+  a[82] = xr;
+  a[83] = xi;
+
+  xr = a[32];
+  xi = a[33];
+  yr = a[4];
+  yi = a[5];
+  a[32] = yr;
+  a[33] = yi;
+  a[4] = xr;
+  a[5] = xi;
+
+  xr = a[40];
+  xi = a[41];
+  yr = a[20];
+  yi = a[21];
+  a[40] = yr;
+  a[41] = yi;
+  a[20] = xr;
+  a[21] = xi;
+
+  xr = a[48];
+  xi = a[49];
+  yr = a[12];
+  yi = a[13];
+  a[48] = yr;
+  a[49] = yi;
+  a[12] = xr;
+  a[13] = xi;
+
+  xr = a[56];
+  xi = a[57];
+  yr = a[28];
+  yi = a[29];
+  a[56] = yr;
+  a[57] = yi;
+  a[28] = xr;
+  a[29] = xi;
+
+  xr = a[34];
+  xi = a[35];
+  yr = a[68];
+  yi = a[69];
+  a[34] = yr;
+  a[35] = yi;
+  a[68] = xr;
+  a[69] = xi;
+
+  xr = a[42];
+  xi = a[43];
+  yr = a[84];
+  yi = a[85];
+  a[42] = yr;
+  a[43] = yi;
+  a[84] = xr;
+  a[85] = xi;
+
+  xr = a[50];
+  xi = a[51];
+  yr = a[76];
+  yi = a[77];
+  a[50] = yr;
+  a[51] = yi;
+  a[76] = xr;
+  a[77] = xi;
+
+  xr = a[58];
+  xi = a[59];
+  yr = a[92];
+  yi = a[93];
+  a[58] = yr;
+  a[59] = yi;
+  a[92] = xr;
+  a[93] = xi;
+
+  xr = a[44];
+  xi = a[45];
+  yr = a[52];
+  yi = a[53];
+  a[44] = yr;
+  a[45] = yi;
+  a[52] = xr;
+  a[53] = xi;
+
+  xr = a[96];
+  xi = a[97];
+  yr = a[6];
+  yi = a[7];
+  a[96] = yr;
+  a[97] = yi;
+  a[6] = xr;
+  a[7] = xi;
+
+  xr = a[104];
+  xi = a[105];
+  yr = a[22];
+  yi = a[23];
+  a[104] = yr;
+  a[105] = yi;
+  a[22] = xr;
+  a[23] = xi;
+
+  xr = a[112];
+  xi = a[113];
+  yr = a[14];
+  yi = a[15];
+  a[112] = yr;
+  a[113] = yi;
+  a[14] = xr;
+  a[15] = xi;
+
+  xr = a[120];
+  xi = a[121];
+  yr = a[30];
+  yi = a[31];
+  a[120] = yr;
+  a[121] = yi;
+  a[30] = xr;
+  a[31] = xi;
+
+  xr = a[98];
+  xi = a[99];
+  yr = a[70];
+  yi = a[71];
+  a[98] = yr;
+  a[99] = yi;
+  a[70] = xr;
+  a[71] = xi;
+
+  xr = a[106];
+  xi = a[107];
+  yr = a[86];
+  yi = a[87];
+  a[106] = yr;
+  a[107] = yi;
+  a[86] = xr;
+  a[87] = xi;
+
+  xr = a[114];
+  xi = a[115];
+  yr = a[78];
+  yi = a[79];
+  a[114] = yr;
+  a[115] = yi;
+  a[78] = xr;
+  a[79] = xi;
+
+  xr = a[122];
+  xi = a[123];
+  yr = a[94];
+  yi = a[95];
+  a[122] = yr;
+  a[123] = yi;
+  a[94] = xr;
+  a[95] = xi;
+
+  xr = a[100];
+  xi = a[101];
+  yr = a[38];
+  yi = a[39];
+  a[100] = yr;
+  a[101] = yi;
+  a[38] = xr;
+  a[39] = xi;
+
+  xr = a[108];
+  xi = a[109];
+  yr = a[54];
+  yi = a[55];
+  a[108] = yr;
+  a[109] = yi;
+  a[54] = xr;
+  a[55] = xi;
+
+  xr = a[116];
+  xi = a[117];
+  yr = a[46];
+  yi = a[47];
+  a[116] = yr;
+  a[117] = yi;
+  a[46] = xr;
+  a[47] = xi;
+
+  xr = a[124];
+  xi = a[125];
+  yr = a[62];
+  yi = a[63];
+  a[124] = yr;
+  a[125] = yi;
+  a[62] = xr;
+  a[63] = xi;
+
+  xr = a[110];
+  xi = a[111];
+  yr = a[118];
+  yi = a[119];
+  a[110] = yr;
+  a[111] = yi;
+  a[118] = xr;
+  a[119] = xi;
+}
+
+void cft1st_128_mips(float* a) {
+  float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14;
+  int a_ptr, p1_rdft, p2_rdft, count;
+  const float* first = rdft_wk3ri_first;
+  const float* second = rdft_wk3ri_second;
+
+  __asm __volatile (
+    ".set       push                                                    \n\t"
+    ".set       noreorder                                               \n\t"
+    // first 8
+    "lwc1       %[f0],        0(%[a])                                   \n\t"
+    "lwc1       %[f1],        4(%[a])                                   \n\t"
+    "lwc1       %[f2],        8(%[a])                                   \n\t"
+    "lwc1       %[f3],        12(%[a])                                  \n\t"
+    "lwc1       %[f4],        16(%[a])                                  \n\t"
+    "lwc1       %[f5],        20(%[a])                                  \n\t"
+    "lwc1       %[f6],        24(%[a])                                  \n\t"
+    "lwc1       %[f7],        28(%[a])                                  \n\t"
+    "add.s      %[f8],        %[f0],        %[f2]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f4],        %[f6]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f6]                       \n\t"
+    "add.s      %[f6],        %[f1],        %[f3]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f3]                       \n\t"
+    "add.s      %[f3],        %[f5],        %[f7]                       \n\t"
+    "sub.s      %[f5],        %[f5],        %[f7]                       \n\t"
+    "add.s      %[f7],        %[f8],        %[f2]                       \n\t"
+    "sub.s      %[f8],        %[f8],        %[f2]                       \n\t"
+    "sub.s      %[f2],        %[f1],        %[f4]                       \n\t"
+    "add.s      %[f1],        %[f1],        %[f4]                       \n\t"
+    "add.s      %[f4],        %[f6],        %[f3]                       \n\t"
+    "sub.s      %[f6],        %[f6],        %[f3]                       \n\t"
+    "sub.s      %[f3],        %[f0],        %[f5]                       \n\t"
+    "add.s      %[f0],        %[f0],        %[f5]                       \n\t"
+    "swc1       %[f7],        0(%[a])                                   \n\t"
+    "swc1       %[f8],        16(%[a])                                  \n\t"
+    "swc1       %[f2],        28(%[a])                                  \n\t"
+    "swc1       %[f1],        12(%[a])                                  \n\t"
+    "swc1       %[f4],        4(%[a])                                   \n\t"
+    "swc1       %[f6],        20(%[a])                                  \n\t"
+    "swc1       %[f3],        8(%[a])                                   \n\t"
+    "swc1       %[f0],        24(%[a])                                  \n\t"
+    // second 8
+    "lwc1       %[f0],        32(%[a])                                  \n\t"
+    "lwc1       %[f1],        36(%[a])                                  \n\t"
+    "lwc1       %[f2],        40(%[a])                                  \n\t"
+    "lwc1       %[f3],        44(%[a])                                  \n\t"
+    "lwc1       %[f4],        48(%[a])                                  \n\t"
+    "lwc1       %[f5],        52(%[a])                                  \n\t"
+    "lwc1       %[f6],        56(%[a])                                  \n\t"
+    "lwc1       %[f7],        60(%[a])                                  \n\t"
+    "add.s      %[f8],        %[f4],        %[f6]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f6]                       \n\t"
+    "add.s      %[f6],        %[f1],        %[f3]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f3]                       \n\t"
+    "add.s      %[f3],        %[f0],        %[f2]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f5],        %[f7]                       \n\t"
+    "sub.s      %[f5],        %[f5],        %[f7]                       \n\t"
+    "add.s      %[f7],        %[f4],        %[f1]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f1]                       \n\t"
+    "add.s      %[f1],        %[f3],        %[f8]                       \n\t"
+    "sub.s      %[f3],        %[f3],        %[f8]                       \n\t"
+    "sub.s      %[f8],        %[f0],        %[f5]                       \n\t"
+    "add.s      %[f0],        %[f0],        %[f5]                       \n\t"
+    "add.s      %[f5],        %[f6],        %[f2]                       \n\t"
+    "sub.s      %[f6],        %[f2],        %[f6]                       \n\t"
+    "lwc1       %[f9],        8(%[rdft_w])                              \n\t"
+    "sub.s      %[f2],        %[f8],        %[f7]                       \n\t"
+    "add.s      %[f8],        %[f8],        %[f7]                       \n\t"
+    "sub.s      %[f7],        %[f4],        %[f0]                       \n\t"
+    "add.s      %[f4],        %[f4],        %[f0]                       \n\t"
+    // prepare for loop
+    "addiu      %[a_ptr],     %[a],         64                          \n\t"
+    "addiu      %[p1_rdft],   %[rdft_w],    8                           \n\t"
+    "addiu      %[p2_rdft],   %[rdft_w],    16                          \n\t"
+    "addiu      %[count],     $zero,        7                           \n\t"
+    // finish second 8
+    "mul.s      %[f2],        %[f9],        %[f2]                       \n\t"
+    "mul.s      %[f8],        %[f9],        %[f8]                       \n\t"
+    "mul.s      %[f7],        %[f9],        %[f7]                       \n\t"
+    "mul.s      %[f4],        %[f9],        %[f4]                       \n\t"
+    "swc1       %[f1],        32(%[a])                                  \n\t"
+    "swc1       %[f3],        52(%[a])                                  \n\t"
+    "swc1       %[f5],        36(%[a])                                  \n\t"
+    "swc1       %[f6],        48(%[a])                                  \n\t"
+    "swc1       %[f2],        40(%[a])                                  \n\t"
+    "swc1       %[f8],        44(%[a])                                  \n\t"
+    "swc1       %[f7],        56(%[a])                                  \n\t"
+    "swc1       %[f4],        60(%[a])                                  \n\t"
+    // loop
+   "1:                                                                  \n\t"
+    "lwc1       %[f0],        0(%[a_ptr])                               \n\t"
+    "lwc1       %[f1],        4(%[a_ptr])                               \n\t"
+    "lwc1       %[f2],        8(%[a_ptr])                               \n\t"
+    "lwc1       %[f3],        12(%[a_ptr])                              \n\t"
+    "lwc1       %[f4],        16(%[a_ptr])                              \n\t"
+    "lwc1       %[f5],        20(%[a_ptr])                              \n\t"
+    "lwc1       %[f6],        24(%[a_ptr])                              \n\t"
+    "lwc1       %[f7],        28(%[a_ptr])                              \n\t"
+    "add.s      %[f8],        %[f0],        %[f2]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f4],        %[f6]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f6]                       \n\t"
+    "add.s      %[f6],        %[f1],        %[f3]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f3]                       \n\t"
+    "add.s      %[f3],        %[f5],        %[f7]                       \n\t"
+    "sub.s      %[f5],        %[f5],        %[f7]                       \n\t"
+    "lwc1       %[f10],       4(%[p1_rdft])                             \n\t"
+    "lwc1       %[f11],       0(%[p2_rdft])                             \n\t"
+    "lwc1       %[f12],       4(%[p2_rdft])                             \n\t"
+    "lwc1       %[f13],       8(%[first])                               \n\t"
+    "lwc1       %[f14],       12(%[first])                              \n\t"
+    "add.s      %[f7],        %[f8],        %[f2]                       \n\t"
+    "sub.s      %[f8],        %[f8],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f6],        %[f3]                       \n\t"
+    "sub.s      %[f6],        %[f6],        %[f3]                       \n\t"
+    "add.s      %[f3],        %[f0],        %[f5]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f5]                       \n\t"
+    "add.s      %[f5],        %[f1],        %[f4]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f4]                       \n\t"
+    "swc1       %[f7],        0(%[a_ptr])                               \n\t"
+    "swc1       %[f2],        4(%[a_ptr])                               \n\t"
+    "mul.s      %[f4],        %[f9],        %[f8]                       \n\t"
+#if defined(MIPS32_R2_LE)
+    "mul.s      %[f8],        %[f10],       %[f8]                       \n\t"
+    "mul.s      %[f7],        %[f11],       %[f0]                       \n\t"
+    "mul.s      %[f0],        %[f12],       %[f0]                       \n\t"
+    "mul.s      %[f2],        %[f13],       %[f3]                       \n\t"
+    "mul.s      %[f3],        %[f14],       %[f3]                       \n\t"
+    "nmsub.s    %[f4],        %[f4],        %[f10],       %[f6]         \n\t"
+    "madd.s     %[f8],        %[f8],        %[f9],        %[f6]         \n\t"
+    "nmsub.s    %[f7],        %[f7],        %[f12],       %[f5]         \n\t"
+    "madd.s     %[f0],        %[f0],        %[f11],       %[f5]         \n\t"
+    "nmsub.s    %[f2],        %[f2],        %[f14],       %[f1]         \n\t"
+    "madd.s     %[f3],        %[f3],        %[f13],       %[f1]         \n\t"
+#else
+    "mul.s      %[f7],        %[f10],       %[f6]                       \n\t"
+    "mul.s      %[f6],        %[f9],        %[f6]                       \n\t"
+    "mul.s      %[f8],        %[f10],       %[f8]                       \n\t"
+    "mul.s      %[f2],        %[f11],       %[f0]                       \n\t"
+    "mul.s      %[f11],       %[f11],       %[f5]                       \n\t"
+    "mul.s      %[f5],        %[f12],       %[f5]                       \n\t"
+    "mul.s      %[f0],        %[f12],       %[f0]                       \n\t"
+    "mul.s      %[f12],       %[f13],       %[f3]                       \n\t"
+    "mul.s      %[f13],       %[f13],       %[f1]                       \n\t"
+    "mul.s      %[f1],        %[f14],       %[f1]                       \n\t"
+    "mul.s      %[f3],        %[f14],       %[f3]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f7]                       \n\t"
+    "add.s      %[f8],        %[f6],        %[f8]                       \n\t"
+    "sub.s      %[f7],        %[f2],        %[f5]                       \n\t"
+    "add.s      %[f0],        %[f11],       %[f0]                       \n\t"
+    "sub.s      %[f2],        %[f12],       %[f1]                       \n\t"
+    "add.s      %[f3],        %[f13],       %[f3]                       \n\t"
+#endif
+    "swc1       %[f4],        16(%[a_ptr])                              \n\t"
+    "swc1       %[f8],        20(%[a_ptr])                              \n\t"
+    "swc1       %[f7],        8(%[a_ptr])                               \n\t"
+    "swc1       %[f0],        12(%[a_ptr])                              \n\t"
+    "swc1       %[f2],        24(%[a_ptr])                              \n\t"
+    "swc1       %[f3],        28(%[a_ptr])                              \n\t"
+    "lwc1       %[f0],        32(%[a_ptr])                              \n\t"
+    "lwc1       %[f1],        36(%[a_ptr])                              \n\t"
+    "lwc1       %[f2],        40(%[a_ptr])                              \n\t"
+    "lwc1       %[f3],        44(%[a_ptr])                              \n\t"
+    "lwc1       %[f4],        48(%[a_ptr])                              \n\t"
+    "lwc1       %[f5],        52(%[a_ptr])                              \n\t"
+    "lwc1       %[f6],        56(%[a_ptr])                              \n\t"
+    "lwc1       %[f7],        60(%[a_ptr])                              \n\t"
+    "add.s      %[f8],        %[f0],        %[f2]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f4],        %[f6]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f6]                       \n\t"
+    "add.s      %[f6],        %[f1],        %[f3]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f3]                       \n\t"
+    "add.s      %[f3],        %[f5],        %[f7]                       \n\t"
+    "sub.s      %[f5],        %[f5],        %[f7]                       \n\t"
+    "lwc1       %[f11],       8(%[p2_rdft])                             \n\t"
+    "lwc1       %[f12],       12(%[p2_rdft])                            \n\t"
+    "lwc1       %[f13],       8(%[second])                              \n\t"
+    "lwc1       %[f14],       12(%[second])                             \n\t"
+    "add.s      %[f7],        %[f8],        %[f2]                       \n\t"
+    "sub.s      %[f8],        %[f2],        %[f8]                       \n\t"
+    "add.s      %[f2],        %[f6],        %[f3]                       \n\t"
+    "sub.s      %[f6],        %[f3],        %[f6]                       \n\t"
+    "add.s      %[f3],        %[f0],        %[f5]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f5]                       \n\t"
+    "add.s      %[f5],        %[f1],        %[f4]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f4]                       \n\t"
+    "swc1       %[f7],        32(%[a_ptr])                              \n\t"
+    "swc1       %[f2],        36(%[a_ptr])                              \n\t"
+    "mul.s      %[f4],        %[f10],       %[f8]                       \n\t"
+#if defined(MIPS32_R2_LE)
+    "mul.s      %[f10],       %[f10],       %[f6]                       \n\t"
+    "mul.s      %[f7],        %[f11],       %[f0]                       \n\t"
+    "mul.s      %[f11],       %[f11],       %[f5]                       \n\t"
+    "mul.s      %[f2],        %[f13],       %[f3]                       \n\t"
+    "mul.s      %[f13],       %[f13],       %[f1]                       \n\t"
+    "madd.s     %[f4],        %[f4],        %[f9],        %[f6]         \n\t"
+    "nmsub.s    %[f10],       %[f10],       %[f9],        %[f8]         \n\t"
+    "nmsub.s    %[f7],        %[f7],        %[f12],       %[f5]         \n\t"
+    "madd.s     %[f11],       %[f11],       %[f12],       %[f0]         \n\t"
+    "nmsub.s    %[f2],        %[f2],        %[f14],       %[f1]         \n\t"
+    "madd.s     %[f13],       %[f13],       %[f14],       %[f3]         \n\t"
+#else
+    "mul.s      %[f2],        %[f9],        %[f6]                       \n\t"
+    "mul.s      %[f10],       %[f10],       %[f6]                       \n\t"
+    "mul.s      %[f9],        %[f9],        %[f8]                       \n\t"
+    "mul.s      %[f7],        %[f11],       %[f0]                       \n\t"
+    "mul.s      %[f8],        %[f12],       %[f5]                       \n\t"
+    "mul.s      %[f11],       %[f11],       %[f5]                       \n\t"
+    "mul.s      %[f12],       %[f12],       %[f0]                       \n\t"
+    "mul.s      %[f5],        %[f13],       %[f3]                       \n\t"
+    "mul.s      %[f0],        %[f14],       %[f1]                       \n\t"
+    "mul.s      %[f13],       %[f13],       %[f1]                       \n\t"
+    "mul.s      %[f14],       %[f14],       %[f3]                       \n\t"
+    "add.s      %[f4],        %[f4],        %[f2]                       \n\t"
+    "sub.s      %[f10],       %[f10],       %[f9]                       \n\t"
+    "sub.s      %[f7],        %[f7],        %[f8]                       \n\t"
+    "add.s      %[f11],       %[f11],       %[f12]                      \n\t"
+    "sub.s      %[f2],        %[f5],        %[f0]                       \n\t"
+    "add.s      %[f13],       %[f13],       %[f14]                      \n\t"
+#endif
+    "swc1       %[f4],        48(%[a_ptr])                              \n\t"
+    "swc1       %[f10],       52(%[a_ptr])                              \n\t"
+    "swc1       %[f7],        40(%[a_ptr])                              \n\t"
+    "swc1       %[f11],       44(%[a_ptr])                              \n\t"
+    "swc1       %[f2],        56(%[a_ptr])                              \n\t"
+    "swc1       %[f13],       60(%[a_ptr])                              \n\t"
+    "addiu      %[count],     %[count],     -1                          \n\t"
+    "lwc1       %[f9],        8(%[p1_rdft])                             \n\t"
+    "addiu      %[a_ptr],     %[a_ptr],     64                          \n\t"
+    "addiu      %[p1_rdft],   %[p1_rdft],   8                           \n\t"
+    "addiu      %[p2_rdft],   %[p2_rdft],   16                          \n\t"
+    "addiu      %[first],     %[first],     8                           \n\t"
+    "bgtz       %[count],     1b                                        \n\t"
+    " addiu     %[second],    %[second],    8                           \n\t"
+    ".set       pop                                                     \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11),
+      [f12] "=&f" (f12), [f13] "=&f" (f13), [f14] "=&f" (f14),
+      [a_ptr] "=&r" (a_ptr), [p1_rdft] "=&r" (p1_rdft), [first] "+r" (first),
+      [p2_rdft] "=&r" (p2_rdft), [count] "=&r" (count), [second] "+r" (second)
+    : [a] "r" (a), [rdft_w] "r" (rdft_w)
+    : "memory"
+  );
+}
+
+void cftmdl_128_mips(float* a) {
+  float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14;
+  int tmp_a, count;
+  __asm __volatile (
+    ".set       push                                      \n\t"
+    ".set       noreorder                                 \n\t"
+    "addiu      %[tmp_a],   %[a],         0               \n\t"
+    "addiu      %[count],   $zero,        4               \n\t"
+   "1:                                                    \n\t"
+    "addiu      %[count],   %[count],     -1              \n\t"
+    "lwc1       %[f0],      0(%[tmp_a])                   \n\t"
+    "lwc1       %[f2],      32(%[tmp_a])                  \n\t"
+    "lwc1       %[f4],      64(%[tmp_a])                  \n\t"
+    "lwc1       %[f6],      96(%[tmp_a])                  \n\t"
+    "lwc1       %[f1],      4(%[tmp_a])                   \n\t"
+    "lwc1       %[f3],      36(%[tmp_a])                  \n\t"
+    "lwc1       %[f5],      68(%[tmp_a])                  \n\t"
+    "lwc1       %[f7],      100(%[tmp_a])                 \n\t"
+    "add.s      %[f8],      %[f0],        %[f2]           \n\t"
+    "sub.s      %[f0],      %[f0],        %[f2]           \n\t"
+    "add.s      %[f2],      %[f4],        %[f6]           \n\t"
+    "sub.s      %[f4],      %[f4],        %[f6]           \n\t"
+    "add.s      %[f6],      %[f1],        %[f3]           \n\t"
+    "sub.s      %[f1],      %[f1],        %[f3]           \n\t"
+    "add.s      %[f3],      %[f5],        %[f7]           \n\t"
+    "sub.s      %[f5],      %[f5],        %[f7]           \n\t"
+    "add.s      %[f7],      %[f8],        %[f2]           \n\t"
+    "sub.s      %[f8],      %[f8],        %[f2]           \n\t"
+    "add.s      %[f2],      %[f1],        %[f4]           \n\t"
+    "sub.s      %[f1],      %[f1],        %[f4]           \n\t"
+    "add.s      %[f4],      %[f6],        %[f3]           \n\t"
+    "sub.s      %[f6],      %[f6],        %[f3]           \n\t"
+    "sub.s      %[f3],      %[f0],        %[f5]           \n\t"
+    "add.s      %[f0],      %[f0],        %[f5]           \n\t"
+    "swc1       %[f7],      0(%[tmp_a])                   \n\t"
+    "swc1       %[f8],      64(%[tmp_a])                  \n\t"
+    "swc1       %[f2],      36(%[tmp_a])                  \n\t"
+    "swc1       %[f1],      100(%[tmp_a])                 \n\t"
+    "swc1       %[f4],      4(%[tmp_a])                   \n\t"
+    "swc1       %[f6],      68(%[tmp_a])                  \n\t"
+    "swc1       %[f3],      32(%[tmp_a])                  \n\t"
+    "swc1       %[f0],      96(%[tmp_a])                  \n\t"
+    "bgtz       %[count],   1b                            \n\t"
+    " addiu     %[tmp_a],   %[tmp_a],     8               \n\t"
+    ".set       pop                                       \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [tmp_a] "=&r" (tmp_a), [count] "=&r" (count)
+    : [a] "r" (a)
+    : "memory"
+  );
+  f9 = rdft_w[2];
+  __asm __volatile (
+    ".set       push                                      \n\t"
+    ".set       noreorder                                 \n\t"
+    "addiu      %[tmp_a],   %[a],         128             \n\t"
+    "addiu      %[count],   $zero,        4               \n\t"
+   "1:                                                    \n\t"
+    "addiu      %[count],   %[count],     -1              \n\t"
+    "lwc1       %[f0],      0(%[tmp_a])                   \n\t"
+    "lwc1       %[f2],      32(%[tmp_a])                  \n\t"
+    "lwc1       %[f5],      68(%[tmp_a])                  \n\t"
+    "lwc1       %[f7],      100(%[tmp_a])                 \n\t"
+    "lwc1       %[f1],      4(%[tmp_a])                   \n\t"
+    "lwc1       %[f3],      36(%[tmp_a])                  \n\t"
+    "lwc1       %[f4],      64(%[tmp_a])                  \n\t"
+    "lwc1       %[f6],      96(%[tmp_a])                  \n\t"
+    "sub.s      %[f8],      %[f0],        %[f2]           \n\t"
+    "add.s      %[f0],      %[f0],        %[f2]           \n\t"
+    "sub.s      %[f2],      %[f5],        %[f7]           \n\t"
+    "add.s      %[f5],      %[f5],        %[f7]           \n\t"
+    "sub.s      %[f7],      %[f1],        %[f3]           \n\t"
+    "add.s      %[f1],      %[f1],        %[f3]           \n\t"
+    "sub.s      %[f3],      %[f4],        %[f6]           \n\t"
+    "add.s      %[f4],      %[f4],        %[f6]           \n\t"
+    "sub.s      %[f6],      %[f8],        %[f2]           \n\t"
+    "add.s      %[f8],      %[f8],        %[f2]           \n\t"
+    "add.s      %[f2],      %[f5],        %[f1]           \n\t"
+    "sub.s      %[f5],      %[f5],        %[f1]           \n\t"
+    "add.s      %[f1],      %[f3],        %[f7]           \n\t"
+    "sub.s      %[f3],      %[f3],        %[f7]           \n\t"
+    "add.s      %[f7],      %[f0],        %[f4]           \n\t"
+    "sub.s      %[f0],      %[f0],        %[f4]           \n\t"
+    "sub.s      %[f4],      %[f6],        %[f1]           \n\t"
+    "add.s      %[f6],      %[f6],        %[f1]           \n\t"
+    "sub.s      %[f1],      %[f3],        %[f8]           \n\t"
+    "add.s      %[f3],      %[f3],        %[f8]           \n\t"
+    "mul.s      %[f4],      %[f4],        %[f9]           \n\t"
+    "mul.s      %[f6],      %[f6],        %[f9]           \n\t"
+    "mul.s      %[f1],      %[f1],        %[f9]           \n\t"
+    "mul.s      %[f3],      %[f3],        %[f9]           \n\t"
+    "swc1       %[f7],      0(%[tmp_a])                   \n\t"
+    "swc1       %[f2],      4(%[tmp_a])                   \n\t"
+    "swc1       %[f5],      64(%[tmp_a])                  \n\t"
+    "swc1       %[f0],      68(%[tmp_a])                  \n\t"
+    "swc1       %[f4],      32(%[tmp_a])                  \n\t"
+    "swc1       %[f6],      36(%[tmp_a])                  \n\t"
+    "swc1       %[f1],      96(%[tmp_a])                  \n\t"
+    "swc1       %[f3],      100(%[tmp_a])                 \n\t"
+    "bgtz       %[count],   1b                            \n\t"
+    " addiu     %[tmp_a],   %[tmp_a],     8               \n\t"
+    ".set       pop                                       \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [tmp_a] "=&r" (tmp_a), [count] "=&r" (count)
+    : [a] "r" (a), [f9] "f" (f9)
+    : "memory"
+  );
+  f10 = rdft_w[3];
+  f11 = rdft_w[4];
+  f12 = rdft_w[5];
+  f13 = rdft_wk3ri_first[2];
+  f14 = rdft_wk3ri_first[3];
+
+  __asm __volatile (
+    ".set       push                                                    \n\t"
+    ".set       noreorder                                               \n\t"
+    "addiu      %[tmp_a],     %[a],         256                         \n\t"
+    "addiu      %[count],     $zero,        4                           \n\t"
+   "1:                                                                  \n\t"
+    "addiu      %[count],     %[count],     -1                          \n\t"
+    "lwc1       %[f0],        0(%[tmp_a])                               \n\t"
+    "lwc1       %[f2],        32(%[tmp_a])                              \n\t"
+    "lwc1       %[f4],        64(%[tmp_a])                              \n\t"
+    "lwc1       %[f6],        96(%[tmp_a])                              \n\t"
+    "lwc1       %[f1],        4(%[tmp_a])                               \n\t"
+    "lwc1       %[f3],        36(%[tmp_a])                              \n\t"
+    "lwc1       %[f5],        68(%[tmp_a])                              \n\t"
+    "lwc1       %[f7],        100(%[tmp_a])                             \n\t"
+    "add.s      %[f8],        %[f0],        %[f2]                       \n\t"
+    "sub.s      %[f0],        %[f0],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f4],        %[f6]                       \n\t"
+    "sub.s      %[f4],        %[f4],        %[f6]                       \n\t"
+    "add.s      %[f6],        %[f1],        %[f3]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f3]                       \n\t"
+    "add.s      %[f3],        %[f5],        %[f7]                       \n\t"
+    "sub.s      %[f5],        %[f5],        %[f7]                       \n\t"
+    "sub.s      %[f7],        %[f8],        %[f2]                       \n\t"
+    "add.s      %[f8],        %[f8],        %[f2]                       \n\t"
+    "add.s      %[f2],        %[f1],        %[f4]                       \n\t"
+    "sub.s      %[f1],        %[f1],        %[f4]                       \n\t"
+    "sub.s      %[f4],        %[f6],        %[f3]                       \n\t"
+    "add.s      %[f6],        %[f6],        %[f3]                       \n\t"
+    "sub.s      %[f3],        %[f0],        %[f5]                       \n\t"
+    "add.s      %[f0],        %[f0],        %[f5]                       \n\t"
+    "swc1       %[f8],        0(%[tmp_a])                               \n\t"
+    "swc1       %[f6],        4(%[tmp_a])                               \n\t"
+    "mul.s      %[f5],        %[f9],        %[f7]                       \n\t"
+#if defined(MIPS32_R2_LE)
+    "mul.s      %[f7],        %[f10],       %[f7]                       \n\t"
+    "mul.s      %[f8],        %[f11],       %[f3]                       \n\t"
+    "mul.s      %[f3],        %[f12],       %[f3]                       \n\t"
+    "mul.s      %[f6],        %[f13],       %[f0]                       \n\t"
+    "mul.s      %[f0],        %[f14],       %[f0]                       \n\t"
+    "nmsub.s    %[f5],        %[f5],        %[f10],       %[f4]         \n\t"
+    "madd.s     %[f7],        %[f7],        %[f9],        %[f4]         \n\t"
+    "nmsub.s    %[f8],        %[f8],        %[f12],       %[f2]         \n\t"
+    "madd.s     %[f3],        %[f3],        %[f11],       %[f2]         \n\t"
+    "nmsub.s    %[f6],        %[f6],        %[f14],       %[f1]         \n\t"
+    "madd.s     %[f0],        %[f0],        %[f13],       %[f1]         \n\t"
+    "swc1       %[f5],        64(%[tmp_a])                              \n\t"
+    "swc1       %[f7],        68(%[tmp_a])                              \n\t"
+#else
+    "mul.s      %[f8],        %[f10],       %[f4]                       \n\t"
+    "mul.s      %[f4],        %[f9],        %[f4]                       \n\t"
+    "mul.s      %[f7],        %[f10],       %[f7]                       \n\t"
+    "mul.s      %[f6],        %[f11],       %[f3]                       \n\t"
+    "mul.s      %[f3],        %[f12],       %[f3]                       \n\t"
+    "sub.s      %[f5],        %[f5],        %[f8]                       \n\t"
+    "mul.s      %[f8],        %[f12],       %[f2]                       \n\t"
+    "mul.s      %[f2],        %[f11],       %[f2]                       \n\t"
+    "add.s      %[f7],        %[f4],        %[f7]                       \n\t"
+    "mul.s      %[f4],        %[f13],       %[f0]                       \n\t"
+    "mul.s      %[f0],        %[f14],       %[f0]                       \n\t"
+    "sub.s      %[f8],        %[f6],        %[f8]                       \n\t"
+    "mul.s      %[f6],        %[f14],       %[f1]                       \n\t"
+    "mul.s      %[f1],        %[f13],       %[f1]                       \n\t"
+    "add.s      %[f3],        %[f2],        %[f3]                       \n\t"
+    "swc1       %[f5],        64(%[tmp_a])                              \n\t"
+    "swc1       %[f7],        68(%[tmp_a])                              \n\t"
+    "sub.s      %[f6],        %[f4],        %[f6]                       \n\t"
+    "add.s      %[f0],        %[f1],        %[f0]                       \n\t"
+#endif
+    "swc1       %[f8],        32(%[tmp_a])                              \n\t"
+    "swc1       %[f3],        36(%[tmp_a])                              \n\t"
+    "swc1       %[f6],        96(%[tmp_a])                              \n\t"
+    "swc1       %[f0],        100(%[tmp_a])                             \n\t"
+    "bgtz       %[count],     1b                                        \n\t"
+    " addiu     %[tmp_a],     %[tmp_a],     8                           \n\t"
+    ".set       pop                                                     \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [tmp_a] "=&r" (tmp_a), [count] "=&r" (count)
+    : [a] "r" (a),  [f9] "f" (f9), [f10] "f" (f10), [f11] "f" (f11),
+      [f12] "f" (f12), [f13] "f" (f13), [f14] "f" (f14)
+    : "memory"
+  );
+  f11 = rdft_w[6];
+  f12 = rdft_w[7];
+  f13 = rdft_wk3ri_second[2];
+  f14 = rdft_wk3ri_second[3];
+  __asm __volatile (
+    ".set       push                                                       \n\t"
+    ".set       noreorder                                                  \n\t"
+    "addiu      %[tmp_a],       %[a],           384                        \n\t"
+    "addiu      %[count],       $zero,          4                          \n\t"
+   "1:                                                                     \n\t"
+    "addiu      %[count],       %[count],       -1                         \n\t"
+    "lwc1       %[f0],          0(%[tmp_a])                                \n\t"
+    "lwc1       %[f1],          4(%[tmp_a])                                \n\t"
+    "lwc1       %[f2],          32(%[tmp_a])                               \n\t"
+    "lwc1       %[f3],          36(%[tmp_a])                               \n\t"
+    "lwc1       %[f4],          64(%[tmp_a])                               \n\t"
+    "lwc1       %[f5],          68(%[tmp_a])                               \n\t"
+    "lwc1       %[f6],          96(%[tmp_a])                               \n\t"
+    "lwc1       %[f7],          100(%[tmp_a])                              \n\t"
+    "add.s      %[f8],          %[f0],          %[f2]                      \n\t"
+    "sub.s      %[f0],          %[f0],          %[f2]                      \n\t"
+    "add.s      %[f2],          %[f4],          %[f6]                      \n\t"
+    "sub.s      %[f4],          %[f4],          %[f6]                      \n\t"
+    "add.s      %[f6],          %[f1],          %[f3]                      \n\t"
+    "sub.s      %[f1],          %[f1],          %[f3]                      \n\t"
+    "add.s      %[f3],          %[f5],          %[f7]                      \n\t"
+    "sub.s      %[f5],          %[f5],          %[f7]                      \n\t"
+    "sub.s      %[f7],          %[f2],          %[f8]                      \n\t"
+    "add.s      %[f2],          %[f2],          %[f8]                      \n\t"
+    "add.s      %[f8],          %[f1],          %[f4]                      \n\t"
+    "sub.s      %[f1],          %[f1],          %[f4]                      \n\t"
+    "sub.s      %[f4],          %[f3],          %[f6]                      \n\t"
+    "add.s      %[f3],          %[f3],          %[f6]                      \n\t"
+    "sub.s      %[f6],          %[f0],          %[f5]                      \n\t"
+    "add.s      %[f0],          %[f0],          %[f5]                      \n\t"
+    "swc1       %[f2],          0(%[tmp_a])                                \n\t"
+    "swc1       %[f3],          4(%[tmp_a])                                \n\t"
+    "mul.s      %[f5],          %[f10],         %[f7]                      \n\t"
+#if defined(MIPS32_R2_LE)
+    "mul.s      %[f7],          %[f9],          %[f7]                      \n\t"
+    "mul.s      %[f2],          %[f12],         %[f8]                      \n\t"
+    "mul.s      %[f8],          %[f11],         %[f8]                      \n\t"
+    "mul.s      %[f3],          %[f14],         %[f1]                      \n\t"
+    "mul.s      %[f1],          %[f13],         %[f1]                      \n\t"
+    "madd.s     %[f5],          %[f5],          %[f9],       %[f4]         \n\t"
+    "msub.s     %[f7],          %[f7],          %[f10],      %[f4]         \n\t"
+    "msub.s     %[f2],          %[f2],          %[f11],      %[f6]         \n\t"
+    "madd.s     %[f8],          %[f8],          %[f12],      %[f6]         \n\t"
+    "msub.s     %[f3],          %[f3],          %[f13],      %[f0]         \n\t"
+    "madd.s     %[f1],          %[f1],          %[f14],      %[f0]         \n\t"
+    "swc1       %[f5],          64(%[tmp_a])                               \n\t"
+    "swc1       %[f7],          68(%[tmp_a])                               \n\t"
+#else
+    "mul.s      %[f2],          %[f9],          %[f4]                      \n\t"
+    "mul.s      %[f4],          %[f10],         %[f4]                      \n\t"
+    "mul.s      %[f7],          %[f9],          %[f7]                      \n\t"
+    "mul.s      %[f3],          %[f11],         %[f6]                      \n\t"
+    "mul.s      %[f6],          %[f12],         %[f6]                      \n\t"
+    "add.s      %[f5],          %[f5],          %[f2]                      \n\t"
+    "sub.s      %[f7],          %[f4],          %[f7]                      \n\t"
+    "mul.s      %[f2],          %[f12],         %[f8]                      \n\t"
+    "mul.s      %[f8],          %[f11],         %[f8]                      \n\t"
+    "mul.s      %[f4],          %[f14],         %[f1]                      \n\t"
+    "mul.s      %[f1],          %[f13],         %[f1]                      \n\t"
+    "sub.s      %[f2],          %[f3],          %[f2]                      \n\t"
+    "mul.s      %[f3],          %[f13],         %[f0]                      \n\t"
+    "mul.s      %[f0],          %[f14],         %[f0]                      \n\t"
+    "add.s      %[f8],          %[f8],          %[f6]                      \n\t"
+    "swc1       %[f5],          64(%[tmp_a])                               \n\t"
+    "swc1       %[f7],          68(%[tmp_a])                               \n\t"
+    "sub.s      %[f3],          %[f3],          %[f4]                      \n\t"
+    "add.s      %[f1],          %[f1],          %[f0]                      \n\t"
+#endif
+    "swc1       %[f2],          32(%[tmp_a])                               \n\t"
+    "swc1       %[f8],          36(%[tmp_a])                               \n\t"
+    "swc1       %[f3],          96(%[tmp_a])                               \n\t"
+    "swc1       %[f1],          100(%[tmp_a])                              \n\t"
+    "bgtz       %[count],       1b                                         \n\t"
+    " addiu     %[tmp_a],       %[tmp_a],       8                          \n\t"
+    ".set       pop                                                        \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [tmp_a] "=&r" (tmp_a), [count] "=&r" (count)
+    : [a] "r" (a), [f9] "f" (f9), [f10] "f" (f10), [f11] "f" (f11),
+      [f12] "f" (f12), [f13] "f" (f13), [f14] "f" (f14)
+    : "memory"
+  );
+}
+
+void cftfsub_128_mips(float* a) {
+  float f0, f1, f2, f3, f4, f5, f6, f7, f8;
+  int tmp_a, count;
+
+  cft1st_128_mips(a);
+  cftmdl_128_mips(a);
+
+  __asm __volatile (
+    ".set       push                                      \n\t"
+    ".set       noreorder                                 \n\t"
+    "addiu      %[tmp_a],       %[a],         0           \n\t"
+    "addiu      %[count],       $zero,        16          \n\t"
+   "1:                                                    \n\t"
+    "addiu      %[count],       %[count],     -1          \n\t"
+    "lwc1       %[f0],          0(%[tmp_a])               \n\t"
+    "lwc1       %[f2],          128(%[tmp_a])             \n\t"
+    "lwc1       %[f4],          256(%[tmp_a])             \n\t"
+    "lwc1       %[f6],          384(%[tmp_a])             \n\t"
+    "lwc1       %[f1],          4(%[tmp_a])               \n\t"
+    "lwc1       %[f3],          132(%[tmp_a])             \n\t"
+    "lwc1       %[f5],          260(%[tmp_a])             \n\t"
+    "lwc1       %[f7],          388(%[tmp_a])             \n\t"
+    "add.s      %[f8],          %[f0],        %[f2]       \n\t"
+    "sub.s      %[f0],          %[f0],        %[f2]       \n\t"
+    "add.s      %[f2],          %[f4],        %[f6]       \n\t"
+    "sub.s      %[f4],          %[f4],        %[f6]       \n\t"
+    "add.s      %[f6],          %[f1],        %[f3]       \n\t"
+    "sub.s      %[f1],          %[f1],        %[f3]       \n\t"
+    "add.s      %[f3],          %[f5],        %[f7]       \n\t"
+    "sub.s      %[f5],          %[f5],        %[f7]       \n\t"
+    "add.s      %[f7],          %[f8],        %[f2]       \n\t"
+    "sub.s      %[f8],          %[f8],        %[f2]       \n\t"
+    "add.s      %[f2],          %[f1],        %[f4]       \n\t"
+    "sub.s      %[f1],          %[f1],        %[f4]       \n\t"
+    "add.s      %[f4],          %[f6],        %[f3]       \n\t"
+    "sub.s      %[f6],          %[f6],        %[f3]       \n\t"
+    "sub.s      %[f3],          %[f0],        %[f5]       \n\t"
+    "add.s      %[f0],          %[f0],        %[f5]       \n\t"
+    "swc1       %[f7],          0(%[tmp_a])               \n\t"
+    "swc1       %[f8],          256(%[tmp_a])             \n\t"
+    "swc1       %[f2],          132(%[tmp_a])             \n\t"
+    "swc1       %[f1],          388(%[tmp_a])             \n\t"
+    "swc1       %[f4],          4(%[tmp_a])               \n\t"
+    "swc1       %[f6],          260(%[tmp_a])             \n\t"
+    "swc1       %[f3],          128(%[tmp_a])             \n\t"
+    "swc1       %[f0],          384(%[tmp_a])             \n\t"
+    "bgtz       %[count],       1b                        \n\t"
+    " addiu     %[tmp_a],       %[tmp_a],   8             \n\t"
+    ".set       pop                                       \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [tmp_a] "=&r" (tmp_a),
+      [count] "=&r" (count)
+    : [a] "r" (a)
+    : "memory"
+  );
+}
+
+void cftbsub_128_mips(float* a) {
+  float f0, f1, f2, f3, f4, f5, f6, f7, f8;
+  int tmp_a, count;
+
+  cft1st_128_mips(a);
+  cftmdl_128_mips(a);
+
+  __asm __volatile (
+    ".set       push                                        \n\t"
+    ".set       noreorder                                   \n\t"
+    "addiu      %[tmp_a],   %[a],           0               \n\t"
+    "addiu      %[count],   $zero,          16              \n\t"
+   "1:                                                      \n\t"
+    "addiu      %[count],   %[count],       -1              \n\t"
+    "lwc1       %[f0],      0(%[tmp_a])                     \n\t"
+    "lwc1       %[f2],      128(%[tmp_a])                   \n\t"
+    "lwc1       %[f4],      256(%[tmp_a])                   \n\t"
+    "lwc1       %[f6],      384(%[tmp_a])                   \n\t"
+    "lwc1       %[f1],      4(%[tmp_a])                     \n\t"
+    "lwc1       %[f3],      132(%[tmp_a])                   \n\t"
+    "lwc1       %[f5],      260(%[tmp_a])                   \n\t"
+    "lwc1       %[f7],      388(%[tmp_a])                   \n\t"
+    "add.s      %[f8],      %[f0],          %[f2]           \n\t"
+    "sub.s      %[f0],      %[f0],          %[f2]           \n\t"
+    "add.s      %[f2],      %[f4],          %[f6]           \n\t"
+    "sub.s      %[f4],      %[f4],          %[f6]           \n\t"
+    "add.s      %[f6],      %[f1],          %[f3]           \n\t"
+    "sub.s      %[f1],      %[f3],          %[f1]           \n\t"
+    "add.s      %[f3],      %[f5],          %[f7]           \n\t"
+    "sub.s      %[f5],      %[f5],          %[f7]           \n\t"
+    "add.s      %[f7],      %[f8],          %[f2]           \n\t"
+    "sub.s      %[f8],      %[f8],          %[f2]           \n\t"
+    "sub.s      %[f2],      %[f1],          %[f4]           \n\t"
+    "add.s      %[f1],      %[f1],          %[f4]           \n\t"
+    "add.s      %[f4],      %[f3],          %[f6]           \n\t"
+    "sub.s      %[f6],      %[f3],          %[f6]           \n\t"
+    "sub.s      %[f3],      %[f0],          %[f5]           \n\t"
+    "add.s      %[f0],      %[f0],          %[f5]           \n\t"
+    "neg.s      %[f4],      %[f4]                           \n\t"
+    "swc1       %[f7],      0(%[tmp_a])                     \n\t"
+    "swc1       %[f8],      256(%[tmp_a])                   \n\t"
+    "swc1       %[f2],      132(%[tmp_a])                   \n\t"
+    "swc1       %[f1],      388(%[tmp_a])                   \n\t"
+    "swc1       %[f6],      260(%[tmp_a])                   \n\t"
+    "swc1       %[f3],      128(%[tmp_a])                   \n\t"
+    "swc1       %[f0],      384(%[tmp_a])                   \n\t"
+    "swc1       %[f4],       4(%[tmp_a])                     \n\t"
+    "bgtz       %[count],   1b                              \n\t"
+    " addiu     %[tmp_a],   %[tmp_a],       8               \n\t"
+    ".set       pop                                         \n\t"
+    : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+      [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+      [f8] "=&f" (f8), [tmp_a] "=&r" (tmp_a), [count] "=&r" (count)
+    : [a] "r" (a)
+    : "memory"
+  );
+}
+
+void rftfsub_128_mips(float* a) {
+  const float* c = rdft_w + 32;
+  const float f0 = 0.5f;
+  float* a1 = &a[2];
+  float* a2 = &a[126];
+  const float* c1 = &c[1];
+  const float* c2 = &c[31];
+  float f1, f2, f3 ,f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15;
+  int count;
+
+  __asm __volatile (
+    ".set      push                                             \n\t"
+    ".set      noreorder                                        \n\t"
+    "lwc1      %[f6],       0(%[c2])                            \n\t"
+    "lwc1      %[f1],       0(%[a1])                            \n\t"
+    "lwc1      %[f2],       0(%[a2])                            \n\t"
+    "lwc1      %[f3],       4(%[a1])                            \n\t"
+    "lwc1      %[f4],       4(%[a2])                            \n\t"
+    "lwc1      %[f5],       0(%[c1])                            \n\t"
+    "sub.s     %[f6],       %[f0],        %[f6]                 \n\t"
+    "sub.s     %[f7],       %[f1],        %[f2]                 \n\t"
+    "add.s     %[f8],       %[f3],        %[f4]                 \n\t"
+    "addiu     %[count],    $zero,        15                    \n\t"
+    "mul.s     %[f9],       %[f6],        %[f7]                 \n\t"
+    "mul.s     %[f6],       %[f6],        %[f8]                 \n\t"
+#if !defined(MIPS32_R2_LE)
+    "mul.s     %[f8],       %[f5],        %[f8]                 \n\t"
+    "mul.s     %[f5],       %[f5],        %[f7]                 \n\t"
+    "sub.s     %[f9],       %[f9],        %[f8]                 \n\t"
+    "add.s     %[f6],       %[f6],        %[f5]                 \n\t"
+#else
+    "nmsub.s   %[f9],       %[f9],        %[f5],      %[f8]     \n\t"
+    "madd.s    %[f6],       %[f6],        %[f5],      %[f7]     \n\t"
+#endif
+    "sub.s     %[f1],       %[f1],        %[f9]                 \n\t"
+    "add.s     %[f2],       %[f2],        %[f9]                 \n\t"
+    "sub.s     %[f3],       %[f3],        %[f6]                 \n\t"
+    "sub.s     %[f4],       %[f4],        %[f6]                 \n\t"
+    "swc1      %[f1],       0(%[a1])                            \n\t"
+    "swc1      %[f2],       0(%[a2])                            \n\t"
+    "swc1      %[f3],       4(%[a1])                            \n\t"
+    "swc1      %[f4],       4(%[a2])                            \n\t"
+    "addiu     %[a1],       %[a1],        8                     \n\t"
+    "addiu     %[a2],       %[a2],        -8                    \n\t"
+    "addiu     %[c1],       %[c1],        4                     \n\t"
+    "addiu     %[c2],       %[c2],        -4                    \n\t"
+   "1:                                                          \n\t"
+    "lwc1      %[f6],       0(%[c2])                            \n\t"
+    "lwc1      %[f1],       0(%[a1])                            \n\t"
+    "lwc1      %[f2],       0(%[a2])                            \n\t"
+    "lwc1      %[f3],       4(%[a1])                            \n\t"
+    "lwc1      %[f4],       4(%[a2])                            \n\t"
+    "lwc1      %[f5],       0(%[c1])                            \n\t"
+    "sub.s     %[f6],       %[f0],        %[f6]                 \n\t"
+    "sub.s     %[f7],       %[f1],        %[f2]                 \n\t"
+    "add.s     %[f8],       %[f3],        %[f4]                 \n\t"
+    "lwc1      %[f10],      -4(%[c2])                           \n\t"
+    "lwc1      %[f11],      8(%[a1])                            \n\t"
+    "lwc1      %[f12],      -8(%[a2])                           \n\t"
+    "mul.s     %[f9],       %[f6],        %[f7]                 \n\t"
+    "mul.s     %[f6],       %[f6],        %[f8]                 \n\t"
+#if !defined(MIPS32_R2_LE)
+    "mul.s     %[f8],       %[f5],        %[f8]                 \n\t"
+    "mul.s     %[f5],       %[f5],        %[f7]                 \n\t"
+    "lwc1      %[f13],      12(%[a1])                           \n\t"
+    "lwc1      %[f14],      -4(%[a2])                           \n\t"
+    "lwc1      %[f15],      4(%[c1])                            \n\t"
+    "sub.s     %[f9],       %[f9],        %[f8]                 \n\t"
+    "add.s     %[f6],       %[f6],        %[f5]                 \n\t"
+#else
+    "lwc1      %[f13],      12(%[a1])                           \n\t"
+    "lwc1      %[f14],      -4(%[a2])                           \n\t"
+    "lwc1      %[f15],      4(%[c1])                            \n\t"
+    "nmsub.s   %[f9],       %[f9],        %[f5],      %[f8]     \n\t"
+    "madd.s    %[f6],       %[f6],        %[f5],      %[f7]     \n\t"
+#endif
+    "sub.s     %[f10],      %[f0],        %[f10]                \n\t"
+    "sub.s     %[f5],       %[f11],       %[f12]                \n\t"
+    "add.s     %[f7],       %[f13],       %[f14]                \n\t"
+    "sub.s     %[f1],       %[f1],        %[f9]                 \n\t"
+    "add.s     %[f2],       %[f2],        %[f9]                 \n\t"
+    "sub.s     %[f3],       %[f3],        %[f6]                 \n\t"
+    "mul.s     %[f8],       %[f10],       %[f5]                 \n\t"
+    "mul.s     %[f10],      %[f10],       %[f7]                 \n\t"
+#if !defined(MIPS32_R2_LE)
+    "mul.s     %[f9],       %[f15],       %[f7]                 \n\t"
+    "mul.s     %[f15],      %[f15],       %[f5]                 \n\t"
+    "sub.s     %[f4],       %[f4],        %[f6]                 \n\t"
+    "swc1      %[f1],       0(%[a1])                            \n\t"
+    "swc1      %[f2],       0(%[a2])                            \n\t"
+    "sub.s     %[f8],       %[f8],        %[f9]                 \n\t"
+    "add.s     %[f10],      %[f10],       %[f15]                \n\t"
+#else
+    "swc1      %[f1],       0(%[a1])                            \n\t"
+    "swc1      %[f2],       0(%[a2])                            \n\t"
+    "sub.s     %[f4],       %[f4],        %[f6]                 \n\t"
+    "nmsub.s   %[f8],       %[f8],        %[f15],     %[f7]     \n\t"
+    "madd.s    %[f10],      %[f10],       %[f15],     %[f5]     \n\t"
+#endif
+    "swc1      %[f3],       4(%[a1])                            \n\t"
+    "swc1      %[f4],       4(%[a2])                            \n\t"
+    "sub.s     %[f11],      %[f11],       %[f8]                 \n\t"
+    "add.s     %[f12],      %[f12],       %[f8]                 \n\t"
+    "sub.s     %[f13],      %[f13],       %[f10]                \n\t"
+    "sub.s     %[f14],      %[f14],       %[f10]                \n\t"
+    "addiu     %[c2],       %[c2],        -8                    \n\t"
+    "addiu     %[c1],       %[c1],        8                     \n\t"
+    "swc1      %[f11],      8(%[a1])                            \n\t"
+    "swc1      %[f12],      -8(%[a2])                           \n\t"
+    "swc1      %[f13],      12(%[a1])                           \n\t"
+    "swc1      %[f14],      -4(%[a2])                           \n\t"
+    "addiu     %[a1],       %[a1],        16                    \n\t"
+    "addiu     %[count],    %[count],     -1                    \n\t"
+    "bgtz      %[count],    1b                                  \n\t"
+    " addiu    %[a2],       %[a2],        -16                   \n\t"
+    ".set      pop                                              \n\t"
+    : [a1] "+r" (a1), [a2] "+r" (a2), [c1] "+r" (c1), [c2] "+r" (c2),
+      [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3), [f4] "=&f" (f4),
+      [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7), [f8] "=&f" (f8),
+      [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11), [f12] "=&f" (f12),
+      [f13] "=&f" (f13), [f14] "=&f" (f14), [f15] "=&f" (f15),
+      [count] "=&r" (count)
+    : [f0] "f" (f0)
+    : "memory"
+  );
+}
+
+void rftbsub_128_mips(float* a) {
+  const float *c = rdft_w + 32;
+  const float f0 = 0.5f;
+  float* a1 = &a[2];
+  float* a2 = &a[126];
+  const float* c1 = &c[1];
+  const float* c2 = &c[31];
+  float f1, f2, f3 ,f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15;
+  int count;
+
+  a[1] = -a[1];
+  a[65] = -a[65];
+
+  __asm __volatile (
+    ".set      push                                             \n\t"
+    ".set      noreorder                                        \n\t"
+    "lwc1      %[f6],       0(%[c2])                            \n\t"
+    "lwc1      %[f1],       0(%[a1])                            \n\t"
+    "lwc1      %[f2],       0(%[a2])                            \n\t"
+    "lwc1      %[f3],       4(%[a1])                            \n\t"
+    "lwc1      %[f4],       4(%[a2])                            \n\t"
+    "lwc1      %[f5],       0(%[c1])                            \n\t"
+    "sub.s     %[f6],       %[f0],        %[f6]                 \n\t"
+    "sub.s     %[f7],       %[f1],        %[f2]                 \n\t"
+    "add.s     %[f8],       %[f3],        %[f4]                 \n\t"
+    "addiu     %[count],    $zero,        15                    \n\t"
+    "mul.s     %[f9],       %[f6],        %[f7]                 \n\t"
+    "mul.s     %[f6],       %[f6],        %[f8]                 \n\t"
+#if !defined(MIPS32_R2_LE)
+    "mul.s     %[f8],       %[f5],        %[f8]                 \n\t"
+    "mul.s     %[f5],       %[f5],        %[f7]                 \n\t"
+    "add.s     %[f9],       %[f9],        %[f8]                 \n\t"
+    "sub.s     %[f6],       %[f6],        %[f5]                 \n\t"
+#else
+    "madd.s    %[f9],       %[f9],        %[f5],      %[f8]     \n\t"
+    "nmsub.s   %[f6],       %[f6],        %[f5],      %[f7]     \n\t"
+#endif
+    "sub.s     %[f1],       %[f1],        %[f9]                 \n\t"
+    "add.s     %[f2],       %[f2],        %[f9]                 \n\t"
+    "sub.s     %[f3],       %[f6],        %[f3]                 \n\t"
+    "sub.s     %[f4],       %[f6],        %[f4]                 \n\t"
+    "swc1      %[f1],       0(%[a1])                            \n\t"
+    "swc1      %[f2],       0(%[a2])                            \n\t"
+    "swc1      %[f3],       4(%[a1])                            \n\t"
+    "swc1      %[f4],       4(%[a2])                            \n\t"
+    "addiu     %[a1],       %[a1],        8                     \n\t"
+    "addiu     %[a2],       %[a2],        -8                    \n\t"
+    "addiu     %[c1],       %[c1],        4                     \n\t"
+    "addiu     %[c2],       %[c2],        -4                    \n\t"
+   "1:                                                          \n\t"
+    "lwc1      %[f6],       0(%[c2])                            \n\t"
+    "lwc1      %[f1],       0(%[a1])                            \n\t"
+    "lwc1      %[f2],       0(%[a2])                            \n\t"
+    "lwc1      %[f3],       4(%[a1])                            \n\t"
+    "lwc1      %[f4],       4(%[a2])                            \n\t"
+    "lwc1      %[f5],       0(%[c1])                            \n\t"
+    "sub.s     %[f6],       %[f0],        %[f6]                 \n\t"
+    "sub.s     %[f7],       %[f1],        %[f2]                 \n\t"
+    "add.s     %[f8],       %[f3],        %[f4]                 \n\t"
+    "lwc1      %[f10],      -4(%[c2])                           \n\t"
+    "lwc1      %[f11],      8(%[a1])                            \n\t"
+    "lwc1      %[f12],      -8(%[a2])                           \n\t"
+    "mul.s     %[f9],       %[f6],        %[f7]                 \n\t"
+    "mul.s     %[f6],       %[f6],        %[f8]                 \n\t"
+#if !defined(MIPS32_R2_LE)
+    "mul.s     %[f8],       %[f5],        %[f8]                 \n\t"
+    "mul.s     %[f5],       %[f5],        %[f7]                 \n\t"
+    "lwc1      %[f13],      12(%[a1])                           \n\t"
+    "lwc1      %[f14],      -4(%[a2])                           \n\t"
+    "lwc1      %[f15],      4(%[c1])                            \n\t"
+    "add.s     %[f9],       %[f9],        %[f8]                 \n\t"
+    "sub.s     %[f6],       %[f6],        %[f5]                 \n\t"
+#else
+    "lwc1      %[f13],      12(%[a1])                           \n\t"
+    "lwc1      %[f14],      -4(%[a2])                           \n\t"
+    "lwc1      %[f15],      4(%[c1])                            \n\t"
+    "madd.s    %[f9],       %[f9],        %[f5],      %[f8]     \n\t"
+    "nmsub.s   %[f6],       %[f6],        %[f5],      %[f7]     \n\t"
+#endif
+    "sub.s     %[f10],      %[f0],        %[f10]                \n\t"
+    "sub.s     %[f5],       %[f11],       %[f12]                \n\t"
+    "add.s     %[f7],       %[f13],       %[f14]                \n\t"
+    "sub.s     %[f1],       %[f1],        %[f9]                 \n\t"
+    "add.s     %[f2],       %[f2],        %[f9]                 \n\t"
+    "sub.s     %[f3],       %[f6],        %[f3]                 \n\t"
+    "mul.s     %[f8],       %[f10],       %[f5]                 \n\t"
+    "mul.s     %[f10],      %[f10],       %[f7]                 \n\t"
+#if !defined(MIPS32_R2_LE)
+    "mul.s     %[f9],       %[f15],       %[f7]                 \n\t"
+    "mul.s     %[f15],      %[f15],       %[f5]                 \n\t"
+    "sub.s     %[f4],       %[f6],        %[f4]                 \n\t"
+    "swc1      %[f1],       0(%[a1])                            \n\t"
+    "swc1      %[f2],       0(%[a2])                            \n\t"
+    "add.s     %[f8],       %[f8],        %[f9]                 \n\t"
+    "sub.s     %[f10],      %[f10],       %[f15]                \n\t"
+#else
+    "swc1      %[f1],       0(%[a1])                            \n\t"
+    "swc1      %[f2],       0(%[a2])                            \n\t"
+    "sub.s     %[f4],       %[f6],        %[f4]                 \n\t"
+    "madd.s    %[f8],       %[f8],        %[f15],     %[f7]     \n\t"
+    "nmsub.s   %[f10],      %[f10],       %[f15],     %[f5]     \n\t"
+#endif
+    "swc1      %[f3],       4(%[a1])                            \n\t"
+    "swc1      %[f4],       4(%[a2])                            \n\t"
+    "sub.s     %[f11],      %[f11],       %[f8]                 \n\t"
+    "add.s     %[f12],      %[f12],       %[f8]                 \n\t"
+    "sub.s     %[f13],      %[f10],       %[f13]                \n\t"
+    "sub.s     %[f14],      %[f10],       %[f14]                \n\t"
+    "addiu     %[c2],       %[c2],        -8                    \n\t"
+    "addiu     %[c1],       %[c1],        8                     \n\t"
+    "swc1      %[f11],      8(%[a1])                            \n\t"
+    "swc1      %[f12],      -8(%[a2])                           \n\t"
+    "swc1      %[f13],      12(%[a1])                           \n\t"
+    "swc1      %[f14],      -4(%[a2])                           \n\t"
+    "addiu     %[a1],       %[a1],        16                    \n\t"
+    "addiu     %[count],    %[count],     -1                    \n\t"
+    "bgtz      %[count],    1b                                  \n\t"
+    " addiu    %[a2],       %[a2],        -16                   \n\t"
+    ".set      pop                                              \n\t"
+    : [a1] "+r" (a1), [a2] "+r" (a2), [c1] "+r" (c1), [c2] "+r" (c2),
+      [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3), [f4] "=&f" (f4),
+      [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7), [f8] "=&f" (f8),
+      [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11), [f12] "=&f" (f12),
+      [f13] "=&f" (f13), [f14] "=&f" (f14), [f15] "=&f" (f15),
+      [count] "=&r" (count)
+    : [f0] "f" (f0)
+    : "memory"
+  );
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/utility/ooura_fft_neon.cc b/modules/audio_processing/utility/ooura_fft_neon.cc
new file mode 100644
index 0000000..401387a
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft_neon.cc
@@ -0,0 +1,352 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The rdft AEC algorithm, neon version of speed-critical functions.
+ *
+ * Based on the sse2 version.
+ */
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+#include <arm_neon.h>
+
+#include "modules/audio_processing/utility/ooura_fft_tables_common.h"
+#include "modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_HAS_NEON)
+void cft1st_128_neon(float* a) {
+  const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign);
+  int j, k2;
+
+  for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) {
+    float32x4_t a00v = vld1q_f32(&a[j + 0]);
+    float32x4_t a04v = vld1q_f32(&a[j + 4]);
+    float32x4_t a08v = vld1q_f32(&a[j + 8]);
+    float32x4_t a12v = vld1q_f32(&a[j + 12]);
+    float32x4_t a01v = vcombine_f32(vget_low_f32(a00v), vget_low_f32(a08v));
+    float32x4_t a23v = vcombine_f32(vget_high_f32(a00v), vget_high_f32(a08v));
+    float32x4_t a45v = vcombine_f32(vget_low_f32(a04v), vget_low_f32(a12v));
+    float32x4_t a67v = vcombine_f32(vget_high_f32(a04v), vget_high_f32(a12v));
+    const float32x4_t wk1rv = vld1q_f32(&rdft_wk1r[k2]);
+    const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2]);
+    const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2]);
+    const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2]);
+    const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2]);
+    const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2]);
+    float32x4_t x0v = vaddq_f32(a01v, a23v);
+    const float32x4_t x1v = vsubq_f32(a01v, a23v);
+    const float32x4_t x2v = vaddq_f32(a45v, a67v);
+    const float32x4_t x3v = vsubq_f32(a45v, a67v);
+    const float32x4_t x3w = vrev64q_f32(x3v);
+    float32x4_t x0w;
+    a01v = vaddq_f32(x0v, x2v);
+    x0v = vsubq_f32(x0v, x2v);
+    x0w = vrev64q_f32(x0v);
+    a45v = vmulq_f32(wk2rv, x0v);
+    a45v = vmlaq_f32(a45v, wk2iv, x0w);
+    x0v = vmlaq_f32(x1v, x3w, vec_swap_sign);
+    x0w = vrev64q_f32(x0v);
+    a23v = vmulq_f32(wk1rv, x0v);
+    a23v = vmlaq_f32(a23v, wk1iv, x0w);
+    x0v = vmlsq_f32(x1v, x3w, vec_swap_sign);
+    x0w = vrev64q_f32(x0v);
+    a67v = vmulq_f32(wk3rv, x0v);
+    a67v = vmlaq_f32(a67v, wk3iv, x0w);
+    a00v = vcombine_f32(vget_low_f32(a01v), vget_low_f32(a23v));
+    a04v = vcombine_f32(vget_low_f32(a45v), vget_low_f32(a67v));
+    a08v = vcombine_f32(vget_high_f32(a01v), vget_high_f32(a23v));
+    a12v = vcombine_f32(vget_high_f32(a45v), vget_high_f32(a67v));
+    vst1q_f32(&a[j + 0], a00v);
+    vst1q_f32(&a[j + 4], a04v);
+    vst1q_f32(&a[j + 8], a08v);
+    vst1q_f32(&a[j + 12], a12v);
+  }
+}
+
+void cftmdl_128_neon(float* a) {
+  int j;
+  const int l = 8;
+  const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign);
+  float32x4_t wk1rv = vld1q_f32(cftmdl_wk1r);
+
+  for (j = 0; j < l; j += 2) {
+    const float32x2_t a_00 = vld1_f32(&a[j + 0]);
+    const float32x2_t a_08 = vld1_f32(&a[j + 8]);
+    const float32x2_t a_32 = vld1_f32(&a[j + 32]);
+    const float32x2_t a_40 = vld1_f32(&a[j + 40]);
+    const float32x4_t a_00_32 = vcombine_f32(a_00, a_32);
+    const float32x4_t a_08_40 = vcombine_f32(a_08, a_40);
+    const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40);
+    const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40);
+    const float32x2_t a_16 = vld1_f32(&a[j + 16]);
+    const float32x2_t a_24 = vld1_f32(&a[j + 24]);
+    const float32x2_t a_48 = vld1_f32(&a[j + 48]);
+    const float32x2_t a_56 = vld1_f32(&a[j + 56]);
+    const float32x4_t a_16_48 = vcombine_f32(a_16, a_48);
+    const float32x4_t a_24_56 = vcombine_f32(a_24, a_56);
+    const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56);
+    const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56);
+    const float32x4_t xx0 = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+    const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+    const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1);
+    const float32x4_t x1_x3_add =
+        vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+    const float32x4_t x1_x3_sub =
+        vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+    const float32x2_t yy0_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 0);
+    const float32x2_t yy0_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 0);
+    const float32x4_t yy0_as = vcombine_f32(yy0_a, yy0_s);
+    const float32x2_t yy1_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 1);
+    const float32x2_t yy1_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 1);
+    const float32x4_t yy1_as = vcombine_f32(yy1_a, yy1_s);
+    const float32x4_t yy0 = vmlaq_f32(yy0_as, vec_swap_sign, yy1_as);
+    const float32x4_t yy4 = vmulq_f32(wk1rv, yy0);
+    const float32x4_t xx1_rev = vrev64q_f32(xx1);
+    const float32x4_t yy4_rev = vrev64q_f32(yy4);
+
+    vst1_f32(&a[j + 0], vget_low_f32(xx0));
+    vst1_f32(&a[j + 32], vget_high_f32(xx0));
+    vst1_f32(&a[j + 16], vget_low_f32(xx1));
+    vst1_f32(&a[j + 48], vget_high_f32(xx1_rev));
+
+    a[j + 48] = -a[j + 48];
+
+    vst1_f32(&a[j + 8], vget_low_f32(x1_x3_add));
+    vst1_f32(&a[j + 24], vget_low_f32(x1_x3_sub));
+    vst1_f32(&a[j + 40], vget_low_f32(yy4));
+    vst1_f32(&a[j + 56], vget_high_f32(yy4_rev));
+  }
+
+  {
+    const int k = 64;
+    const int k1 = 2;
+    const int k2 = 2 * k1;
+    const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2 + 0]);
+    const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2 + 0]);
+    const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2 + 0]);
+    const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2 + 0]);
+    const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2 + 0]);
+    wk1rv = vld1q_f32(&rdft_wk1r[k2 + 0]);
+    for (j = k; j < l + k; j += 2) {
+      const float32x2_t a_00 = vld1_f32(&a[j + 0]);
+      const float32x2_t a_08 = vld1_f32(&a[j + 8]);
+      const float32x2_t a_32 = vld1_f32(&a[j + 32]);
+      const float32x2_t a_40 = vld1_f32(&a[j + 40]);
+      const float32x4_t a_00_32 = vcombine_f32(a_00, a_32);
+      const float32x4_t a_08_40 = vcombine_f32(a_08, a_40);
+      const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40);
+      const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40);
+      const float32x2_t a_16 = vld1_f32(&a[j + 16]);
+      const float32x2_t a_24 = vld1_f32(&a[j + 24]);
+      const float32x2_t a_48 = vld1_f32(&a[j + 48]);
+      const float32x2_t a_56 = vld1_f32(&a[j + 56]);
+      const float32x4_t a_16_48 = vcombine_f32(a_16, a_48);
+      const float32x4_t a_24_56 = vcombine_f32(a_24, a_56);
+      const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56);
+      const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56);
+      const float32x4_t xx = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+      const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+      const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1);
+      const float32x4_t x1_x3_add =
+          vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+      const float32x4_t x1_x3_sub =
+          vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+      float32x4_t xx4 = vmulq_f32(wk2rv, xx1);
+      float32x4_t xx12 = vmulq_f32(wk1rv, x1_x3_add);
+      float32x4_t xx22 = vmulq_f32(wk3rv, x1_x3_sub);
+      xx4 = vmlaq_f32(xx4, wk2iv, vrev64q_f32(xx1));
+      xx12 = vmlaq_f32(xx12, wk1iv, vrev64q_f32(x1_x3_add));
+      xx22 = vmlaq_f32(xx22, wk3iv, vrev64q_f32(x1_x3_sub));
+
+      vst1_f32(&a[j + 0], vget_low_f32(xx));
+      vst1_f32(&a[j + 32], vget_high_f32(xx));
+      vst1_f32(&a[j + 16], vget_low_f32(xx4));
+      vst1_f32(&a[j + 48], vget_high_f32(xx4));
+      vst1_f32(&a[j + 8], vget_low_f32(xx12));
+      vst1_f32(&a[j + 40], vget_high_f32(xx12));
+      vst1_f32(&a[j + 24], vget_low_f32(xx22));
+      vst1_f32(&a[j + 56], vget_high_f32(xx22));
+    }
+  }
+}
+
+__inline static float32x4_t reverse_order_f32x4(float32x4_t in) {
+  // A B C D -> C D A B
+  const float32x4_t rev = vcombine_f32(vget_high_f32(in), vget_low_f32(in));
+  // C D A B -> D C B A
+  return vrev64q_f32(rev);
+}
+
+void rftfsub_128_neon(float* a) {
+  const float* c = rdft_w + 32;
+  int j1, j2;
+  const float32x4_t mm_half = vdupq_n_f32(0.5f);
+
+  // Vectorized code (four at once).
+  // Note: commented number are indexes for the first iteration of the loop.
+  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+    // Load 'wk'.
+    const float32x4_t c_j1 = vld1q_f32(&c[j1]);          //  1,  2,  3,  4,
+    const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]);     // 28, 29, 30, 31,
+    const float32x4_t wkrt = vsubq_f32(mm_half, c_k1);   // 28, 29, 30, 31,
+    const float32x4_t wkr_ = reverse_order_f32x4(wkrt);  // 31, 30, 29, 28,
+    const float32x4_t wki_ = c_j1;                       //  1,  2,  3,  4,
+    // Load and shuffle 'a'.
+    //   2,   4,   6,   8,   3,   5,   7,   9
+    float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]);
+    // 120, 122, 124, 126, 121, 123, 125, 127,
+    const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]);
+    // 126, 124, 122, 120
+    const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]);
+    // 127, 125, 123, 121
+    const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]);
+    // Calculate 'x'.
+    const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0);
+    // 2-126, 4-124, 6-122, 8-120,
+    const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1);
+    // 3-127, 5-125, 7-123, 9-121,
+    // Calculate product into 'y'.
+    //    yr = wkr * xr - wki * xi;
+    //    yi = wkr * xi + wki * xr;
+    const float32x4_t a_ = vmulq_f32(wkr_, xr_);
+    const float32x4_t b_ = vmulq_f32(wki_, xi_);
+    const float32x4_t c_ = vmulq_f32(wkr_, xi_);
+    const float32x4_t d_ = vmulq_f32(wki_, xr_);
+    const float32x4_t yr_ = vsubq_f32(a_, b_);  // 2-126, 4-124, 6-122, 8-120,
+    const float32x4_t yi_ = vaddq_f32(c_, d_);  // 3-127, 5-125, 7-123, 9-121,
+                                                // Update 'a'.
+                                                //    a[j2 + 0] -= yr;
+                                                //    a[j2 + 1] -= yi;
+                                                //    a[k2 + 0] += yr;
+                                                //    a[k2 + 1] -= yi;
+    // 126, 124, 122, 120,
+    const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_);
+    // 127, 125, 123, 121,
+    const float32x4_t a_k2_p1n = vsubq_f32(a_k2_p1, yi_);
+    // Shuffle in right order and store.
+    const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n);
+    const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n);
+    // 124, 125, 126, 127, 120, 121, 122, 123
+    const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr);
+    //   2,   4,   6,   8,
+    a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_);
+    //   3,   5,   7,   9,
+    a_j2_p.val[1] = vsubq_f32(a_j2_p.val[1], yi_);
+    //   2,   3,   4,   5,   6,   7,   8,   9,
+    vst2q_f32(&a[0 + j2], a_j2_p);
+
+    vst1q_f32(&a[122 - j2], a_k2_n.val[1]);
+    vst1q_f32(&a[126 - j2], a_k2_n.val[0]);
+  }
+
+  // Scalar code for the remaining items.
+  for (; j2 < 64; j1 += 1, j2 += 2) {
+    const int k2 = 128 - j2;
+    const int k1 = 32 - j1;
+    const float wkr = 0.5f - c[k1];
+    const float wki = c[j1];
+    const float xr = a[j2 + 0] - a[k2 + 0];
+    const float xi = a[j2 + 1] + a[k2 + 1];
+    const float yr = wkr * xr - wki * xi;
+    const float yi = wkr * xi + wki * xr;
+    a[j2 + 0] -= yr;
+    a[j2 + 1] -= yi;
+    a[k2 + 0] += yr;
+    a[k2 + 1] -= yi;
+  }
+}
+
+void rftbsub_128_neon(float* a) {
+  const float* c = rdft_w + 32;
+  int j1, j2;
+  const float32x4_t mm_half = vdupq_n_f32(0.5f);
+
+  a[1] = -a[1];
+  // Vectorized code (four at once).
+  //    Note: commented number are indexes for the first iteration of the loop.
+  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+    // Load 'wk'.
+    const float32x4_t c_j1 = vld1q_f32(&c[j1]);          //  1,  2,  3,  4,
+    const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]);     // 28, 29, 30, 31,
+    const float32x4_t wkrt = vsubq_f32(mm_half, c_k1);   // 28, 29, 30, 31,
+    const float32x4_t wkr_ = reverse_order_f32x4(wkrt);  // 31, 30, 29, 28,
+    const float32x4_t wki_ = c_j1;                       //  1,  2,  3,  4,
+    // Load and shuffle 'a'.
+    //   2,   4,   6,   8,   3,   5,   7,   9
+    float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]);
+    // 120, 122, 124, 126, 121, 123, 125, 127,
+    const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]);
+    // 126, 124, 122, 120
+    const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]);
+    // 127, 125, 123, 121
+    const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]);
+    // Calculate 'x'.
+    const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0);
+    // 2-126, 4-124, 6-122, 8-120,
+    const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1);
+    // 3-127, 5-125, 7-123, 9-121,
+    // Calculate product into 'y'.
+    //    yr = wkr * xr - wki * xi;
+    //    yi = wkr * xi + wki * xr;
+    const float32x4_t a_ = vmulq_f32(wkr_, xr_);
+    const float32x4_t b_ = vmulq_f32(wki_, xi_);
+    const float32x4_t c_ = vmulq_f32(wkr_, xi_);
+    const float32x4_t d_ = vmulq_f32(wki_, xr_);
+    const float32x4_t yr_ = vaddq_f32(a_, b_);  // 2-126, 4-124, 6-122, 8-120,
+    const float32x4_t yi_ = vsubq_f32(c_, d_);  // 3-127, 5-125, 7-123, 9-121,
+                                                // Update 'a'.
+                                                //    a[j2 + 0] -= yr;
+                                                //    a[j2 + 1] -= yi;
+                                                //    a[k2 + 0] += yr;
+                                                //    a[k2 + 1] -= yi;
+    // 126, 124, 122, 120,
+    const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_);
+    // 127, 125, 123, 121,
+    const float32x4_t a_k2_p1n = vsubq_f32(yi_, a_k2_p1);
+    // Shuffle in right order and store.
+    //   2,   3,   4,   5,   6,   7,   8,   9,
+    const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n);
+    const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n);
+    // 124, 125, 126, 127, 120, 121, 122, 123
+    const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr);
+    //   2,   4,   6,   8,
+    a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_);
+    //   3,   5,   7,   9,
+    a_j2_p.val[1] = vsubq_f32(yi_, a_j2_p.val[1]);
+    //   2,   3,   4,   5,   6,   7,   8,   9,
+    vst2q_f32(&a[0 + j2], a_j2_p);
+
+    vst1q_f32(&a[122 - j2], a_k2_n.val[1]);
+    vst1q_f32(&a[126 - j2], a_k2_n.val[0]);
+  }
+
+  // Scalar code for the remaining items.
+  for (; j2 < 64; j1 += 1, j2 += 2) {
+    const int k2 = 128 - j2;
+    const int k1 = 32 - j1;
+    const float wkr = 0.5f - c[k1];
+    const float wki = c[j1];
+    const float xr = a[j2 + 0] - a[k2 + 0];
+    const float xi = a[j2 + 1] + a[k2 + 1];
+    const float yr = wkr * xr + wki * xi;
+    const float yi = wkr * xi - wki * xr;
+    a[j2 + 0] = a[j2 + 0] - yr;
+    a[j2 + 1] = yi - a[j2 + 1];
+    a[k2 + 0] = yr + a[k2 + 0];
+    a[k2 + 1] = yi - a[k2 + 1];
+  }
+  a[65] = -a[65];
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/utility/ooura_fft_sse2.cc b/modules/audio_processing/utility/ooura_fft_sse2.cc
new file mode 100644
index 0000000..b44458e
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft_sse2.cc
@@ -0,0 +1,438 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+#include <emmintrin.h>
+
+#include "modules/audio_processing/utility/ooura_fft_tables_common.h"
+#include "modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+
+namespace {
+// These intrinsics were unavailable before VS 2008.
+// TODO(andrew): move to a common file.
+#if defined(_MSC_VER) && _MSC_VER < 1500
+static __inline __m128 _mm_castsi128_ps(__m128i a) {
+  return *(__m128*)&a;
+}
+static __inline __m128i _mm_castps_si128(__m128 a) {
+  return *(__m128i*)&a;
+}
+#endif
+
+}  // namespace
+
+void cft1st_128_SSE2(float* a) {
+  const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign);
+  int j, k2;
+
+  for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) {
+    __m128 a00v = _mm_loadu_ps(&a[j + 0]);
+    __m128 a04v = _mm_loadu_ps(&a[j + 4]);
+    __m128 a08v = _mm_loadu_ps(&a[j + 8]);
+    __m128 a12v = _mm_loadu_ps(&a[j + 12]);
+    __m128 a01v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(1, 0, 1, 0));
+    __m128 a23v = _mm_shuffle_ps(a00v, a08v, _MM_SHUFFLE(3, 2, 3, 2));
+    __m128 a45v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(1, 0, 1, 0));
+    __m128 a67v = _mm_shuffle_ps(a04v, a12v, _MM_SHUFFLE(3, 2, 3, 2));
+
+    const __m128 wk1rv = _mm_load_ps(&rdft_wk1r[k2]);
+    const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2]);
+    const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2]);
+    const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2]);
+    const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2]);
+    const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2]);
+    __m128 x0v = _mm_add_ps(a01v, a23v);
+    const __m128 x1v = _mm_sub_ps(a01v, a23v);
+    const __m128 x2v = _mm_add_ps(a45v, a67v);
+    const __m128 x3v = _mm_sub_ps(a45v, a67v);
+    __m128 x0w;
+    a01v = _mm_add_ps(x0v, x2v);
+    x0v = _mm_sub_ps(x0v, x2v);
+    x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1));
+    {
+      const __m128 a45_0v = _mm_mul_ps(wk2rv, x0v);
+      const __m128 a45_1v = _mm_mul_ps(wk2iv, x0w);
+      a45v = _mm_add_ps(a45_0v, a45_1v);
+    }
+    {
+      __m128 a23_0v, a23_1v;
+      const __m128 x3w = _mm_shuffle_ps(x3v, x3v, _MM_SHUFFLE(2, 3, 0, 1));
+      const __m128 x3s = _mm_mul_ps(mm_swap_sign, x3w);
+      x0v = _mm_add_ps(x1v, x3s);
+      x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1));
+      a23_0v = _mm_mul_ps(wk1rv, x0v);
+      a23_1v = _mm_mul_ps(wk1iv, x0w);
+      a23v = _mm_add_ps(a23_0v, a23_1v);
+
+      x0v = _mm_sub_ps(x1v, x3s);
+      x0w = _mm_shuffle_ps(x0v, x0v, _MM_SHUFFLE(2, 3, 0, 1));
+    }
+    {
+      const __m128 a67_0v = _mm_mul_ps(wk3rv, x0v);
+      const __m128 a67_1v = _mm_mul_ps(wk3iv, x0w);
+      a67v = _mm_add_ps(a67_0v, a67_1v);
+    }
+
+    a00v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(1, 0, 1, 0));
+    a04v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(1, 0, 1, 0));
+    a08v = _mm_shuffle_ps(a01v, a23v, _MM_SHUFFLE(3, 2, 3, 2));
+    a12v = _mm_shuffle_ps(a45v, a67v, _MM_SHUFFLE(3, 2, 3, 2));
+    _mm_storeu_ps(&a[j + 0], a00v);
+    _mm_storeu_ps(&a[j + 4], a04v);
+    _mm_storeu_ps(&a[j + 8], a08v);
+    _mm_storeu_ps(&a[j + 12], a12v);
+  }
+}
+
+void cftmdl_128_SSE2(float* a) {
+  const int l = 8;
+  const __m128 mm_swap_sign = _mm_load_ps(k_swap_sign);
+  int j0;
+
+  __m128 wk1rv = _mm_load_ps(cftmdl_wk1r);
+  for (j0 = 0; j0 < l; j0 += 2) {
+    const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]);
+    const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]);
+    const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]);
+    const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]);
+    const __m128 a_00_32 =
+        _mm_shuffle_ps(_mm_castsi128_ps(a_00), _mm_castsi128_ps(a_32),
+                       _MM_SHUFFLE(1, 0, 1, 0));
+    const __m128 a_08_40 =
+        _mm_shuffle_ps(_mm_castsi128_ps(a_08), _mm_castsi128_ps(a_40),
+                       _MM_SHUFFLE(1, 0, 1, 0));
+    __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40);
+    const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40);
+
+    const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]);
+    const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]);
+    const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]);
+    const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]);
+    const __m128 a_16_48 =
+        _mm_shuffle_ps(_mm_castsi128_ps(a_16), _mm_castsi128_ps(a_48),
+                       _MM_SHUFFLE(1, 0, 1, 0));
+    const __m128 a_24_56 =
+        _mm_shuffle_ps(_mm_castsi128_ps(a_24), _mm_castsi128_ps(a_56),
+                       _MM_SHUFFLE(1, 0, 1, 0));
+    const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56);
+    const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56);
+
+    const __m128 xx0 = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+    const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+
+    const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32(
+        _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1)));
+    const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1);
+    const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+    const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+
+    const __m128 yy0 =
+        _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(2, 2, 2, 2));
+    const __m128 yy1 =
+        _mm_shuffle_ps(x1_x3_add, x1_x3_sub, _MM_SHUFFLE(3, 3, 3, 3));
+    const __m128 yy2 = _mm_mul_ps(mm_swap_sign, yy1);
+    const __m128 yy3 = _mm_add_ps(yy0, yy2);
+    const __m128 yy4 = _mm_mul_ps(wk1rv, yy3);
+
+    _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx0));
+    _mm_storel_epi64(
+        (__m128i*)&a[j0 + 32],
+        _mm_shuffle_epi32(_mm_castps_si128(xx0), _MM_SHUFFLE(3, 2, 3, 2)));
+
+    _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx1));
+    _mm_storel_epi64(
+        (__m128i*)&a[j0 + 48],
+        _mm_shuffle_epi32(_mm_castps_si128(xx1), _MM_SHUFFLE(2, 3, 2, 3)));
+    a[j0 + 48] = -a[j0 + 48];
+
+    _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(x1_x3_add));
+    _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(x1_x3_sub));
+
+    _mm_storel_epi64((__m128i*)&a[j0 + 40], _mm_castps_si128(yy4));
+    _mm_storel_epi64(
+        (__m128i*)&a[j0 + 56],
+        _mm_shuffle_epi32(_mm_castps_si128(yy4), _MM_SHUFFLE(2, 3, 2, 3)));
+  }
+
+  {
+    int k = 64;
+    int k1 = 2;
+    int k2 = 2 * k1;
+    const __m128 wk2rv = _mm_load_ps(&rdft_wk2r[k2 + 0]);
+    const __m128 wk2iv = _mm_load_ps(&rdft_wk2i[k2 + 0]);
+    const __m128 wk1iv = _mm_load_ps(&rdft_wk1i[k2 + 0]);
+    const __m128 wk3rv = _mm_load_ps(&rdft_wk3r[k2 + 0]);
+    const __m128 wk3iv = _mm_load_ps(&rdft_wk3i[k2 + 0]);
+    wk1rv = _mm_load_ps(&rdft_wk1r[k2 + 0]);
+    for (j0 = k; j0 < l + k; j0 += 2) {
+      const __m128i a_00 = _mm_loadl_epi64((__m128i*)&a[j0 + 0]);
+      const __m128i a_08 = _mm_loadl_epi64((__m128i*)&a[j0 + 8]);
+      const __m128i a_32 = _mm_loadl_epi64((__m128i*)&a[j0 + 32]);
+      const __m128i a_40 = _mm_loadl_epi64((__m128i*)&a[j0 + 40]);
+      const __m128 a_00_32 =
+          _mm_shuffle_ps(_mm_castsi128_ps(a_00), _mm_castsi128_ps(a_32),
+                         _MM_SHUFFLE(1, 0, 1, 0));
+      const __m128 a_08_40 =
+          _mm_shuffle_ps(_mm_castsi128_ps(a_08), _mm_castsi128_ps(a_40),
+                         _MM_SHUFFLE(1, 0, 1, 0));
+      __m128 x0r0_0i0_0r1_x0i1 = _mm_add_ps(a_00_32, a_08_40);
+      const __m128 x1r0_1i0_1r1_x1i1 = _mm_sub_ps(a_00_32, a_08_40);
+
+      const __m128i a_16 = _mm_loadl_epi64((__m128i*)&a[j0 + 16]);
+      const __m128i a_24 = _mm_loadl_epi64((__m128i*)&a[j0 + 24]);
+      const __m128i a_48 = _mm_loadl_epi64((__m128i*)&a[j0 + 48]);
+      const __m128i a_56 = _mm_loadl_epi64((__m128i*)&a[j0 + 56]);
+      const __m128 a_16_48 =
+          _mm_shuffle_ps(_mm_castsi128_ps(a_16), _mm_castsi128_ps(a_48),
+                         _MM_SHUFFLE(1, 0, 1, 0));
+      const __m128 a_24_56 =
+          _mm_shuffle_ps(_mm_castsi128_ps(a_24), _mm_castsi128_ps(a_56),
+                         _MM_SHUFFLE(1, 0, 1, 0));
+      const __m128 x2r0_2i0_2r1_x2i1 = _mm_add_ps(a_16_48, a_24_56);
+      const __m128 x3r0_3i0_3r1_x3i1 = _mm_sub_ps(a_16_48, a_24_56);
+
+      const __m128 xx = _mm_add_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+      const __m128 xx1 = _mm_sub_ps(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+      const __m128 xx2 = _mm_mul_ps(xx1, wk2rv);
+      const __m128 xx3 = _mm_mul_ps(
+          wk2iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xx1),
+                                                    _MM_SHUFFLE(2, 3, 0, 1))));
+      const __m128 xx4 = _mm_add_ps(xx2, xx3);
+
+      const __m128 x3i0_3r0_3i1_x3r1 = _mm_castsi128_ps(_mm_shuffle_epi32(
+          _mm_castps_si128(x3r0_3i0_3r1_x3i1), _MM_SHUFFLE(2, 3, 0, 1)));
+      const __m128 x3_swapped = _mm_mul_ps(mm_swap_sign, x3i0_3r0_3i1_x3r1);
+      const __m128 x1_x3_add = _mm_add_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+      const __m128 x1_x3_sub = _mm_sub_ps(x1r0_1i0_1r1_x1i1, x3_swapped);
+
+      const __m128 xx10 = _mm_mul_ps(x1_x3_add, wk1rv);
+      const __m128 xx11 = _mm_mul_ps(
+          wk1iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_add),
+                                                    _MM_SHUFFLE(2, 3, 0, 1))));
+      const __m128 xx12 = _mm_add_ps(xx10, xx11);
+
+      const __m128 xx20 = _mm_mul_ps(x1_x3_sub, wk3rv);
+      const __m128 xx21 = _mm_mul_ps(
+          wk3iv, _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(x1_x3_sub),
+                                                    _MM_SHUFFLE(2, 3, 0, 1))));
+      const __m128 xx22 = _mm_add_ps(xx20, xx21);
+
+      _mm_storel_epi64((__m128i*)&a[j0 + 0], _mm_castps_si128(xx));
+      _mm_storel_epi64(
+          (__m128i*)&a[j0 + 32],
+          _mm_shuffle_epi32(_mm_castps_si128(xx), _MM_SHUFFLE(3, 2, 3, 2)));
+
+      _mm_storel_epi64((__m128i*)&a[j0 + 16], _mm_castps_si128(xx4));
+      _mm_storel_epi64(
+          (__m128i*)&a[j0 + 48],
+          _mm_shuffle_epi32(_mm_castps_si128(xx4), _MM_SHUFFLE(3, 2, 3, 2)));
+
+      _mm_storel_epi64((__m128i*)&a[j0 + 8], _mm_castps_si128(xx12));
+      _mm_storel_epi64(
+          (__m128i*)&a[j0 + 40],
+          _mm_shuffle_epi32(_mm_castps_si128(xx12), _MM_SHUFFLE(3, 2, 3, 2)));
+
+      _mm_storel_epi64((__m128i*)&a[j0 + 24], _mm_castps_si128(xx22));
+      _mm_storel_epi64(
+          (__m128i*)&a[j0 + 56],
+          _mm_shuffle_epi32(_mm_castps_si128(xx22), _MM_SHUFFLE(3, 2, 3, 2)));
+    }
+  }
+}
+
+void rftfsub_128_SSE2(float* a) {
+  const float* c = rdft_w + 32;
+  int j1, j2, k1, k2;
+  float wkr, wki, xr, xi, yr, yi;
+
+  static const ALIGN16_BEG float ALIGN16_END k_half[4] = {0.5f, 0.5f, 0.5f,
+                                                          0.5f};
+  const __m128 mm_half = _mm_load_ps(k_half);
+
+  // Vectorized code (four at once).
+  //    Note: commented number are indexes for the first iteration of the loop.
+  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+    // Load 'wk'.
+    const __m128 c_j1 = _mm_loadu_ps(&c[j1]);       //  1,  2,  3,  4,
+    const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]);  // 28, 29, 30, 31,
+    const __m128 wkrt = _mm_sub_ps(mm_half, c_k1);  // 28, 29, 30, 31,
+    const __m128 wkr_ =
+        _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3));  // 31, 30, 29, 28,
+    const __m128 wki_ = c_j1;                                 //  1,  2,  3,  4,
+    // Load and shuffle 'a'.
+    const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]);    //   2,   3,   4,   5,
+    const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]);    //   6,   7,   8,   9,
+    const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]);  // 120, 121, 122, 123,
+    const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]);  // 124, 125, 126, 127,
+    const __m128 a_j2_p0 = _mm_shuffle_ps(
+        a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0));  //   2,   4,   6,   8,
+    const __m128 a_j2_p1 = _mm_shuffle_ps(
+        a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1));  //   3,   5,   7,   9,
+    const __m128 a_k2_p0 = _mm_shuffle_ps(
+        a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2));  // 126, 124, 122, 120,
+    const __m128 a_k2_p1 = _mm_shuffle_ps(
+        a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3));  // 127, 125, 123, 121,
+    // Calculate 'x'.
+    const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0);
+    // 2-126, 4-124, 6-122, 8-120,
+    const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1);
+    // 3-127, 5-125, 7-123, 9-121,
+    // Calculate product into 'y'.
+    //    yr = wkr * xr - wki * xi;
+    //    yi = wkr * xi + wki * xr;
+    const __m128 a_ = _mm_mul_ps(wkr_, xr_);
+    const __m128 b_ = _mm_mul_ps(wki_, xi_);
+    const __m128 c_ = _mm_mul_ps(wkr_, xi_);
+    const __m128 d_ = _mm_mul_ps(wki_, xr_);
+    const __m128 yr_ = _mm_sub_ps(a_, b_);  // 2-126, 4-124, 6-122, 8-120,
+    const __m128 yi_ = _mm_add_ps(c_, d_);  // 3-127, 5-125, 7-123, 9-121,
+                                            // Update 'a'.
+                                            //    a[j2 + 0] -= yr;
+                                            //    a[j2 + 1] -= yi;
+                                            //    a[k2 + 0] += yr;
+    //    a[k2 + 1] -= yi;
+    const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_);  //   2,   4,   6,   8,
+    const __m128 a_j2_p1n = _mm_sub_ps(a_j2_p1, yi_);  //   3,   5,   7,   9,
+    const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_);  // 126, 124, 122, 120,
+    const __m128 a_k2_p1n = _mm_sub_ps(a_k2_p1, yi_);  // 127, 125, 123, 121,
+    // Shuffle in right order and store.
+    const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n);
+    //   2,   3,   4,   5,
+    const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n);
+    //   6,   7,   8,   9,
+    const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n);
+    // 122, 123, 120, 121,
+    const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n);
+    // 126, 127, 124, 125,
+    const __m128 a_k2_0n = _mm_shuffle_ps(
+        a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2));  // 120, 121, 122, 123,
+    const __m128 a_k2_4n = _mm_shuffle_ps(
+        a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2));  // 124, 125, 126, 127,
+    _mm_storeu_ps(&a[0 + j2], a_j2_0n);
+    _mm_storeu_ps(&a[4 + j2], a_j2_4n);
+    _mm_storeu_ps(&a[122 - j2], a_k2_0n);
+    _mm_storeu_ps(&a[126 - j2], a_k2_4n);
+  }
+  // Scalar code for the remaining items.
+  for (; j2 < 64; j1 += 1, j2 += 2) {
+    k2 = 128 - j2;
+    k1 = 32 - j1;
+    wkr = 0.5f - c[k1];
+    wki = c[j1];
+    xr = a[j2 + 0] - a[k2 + 0];
+    xi = a[j2 + 1] + a[k2 + 1];
+    yr = wkr * xr - wki * xi;
+    yi = wkr * xi + wki * xr;
+    a[j2 + 0] -= yr;
+    a[j2 + 1] -= yi;
+    a[k2 + 0] += yr;
+    a[k2 + 1] -= yi;
+  }
+}
+
+void rftbsub_128_SSE2(float* a) {
+  const float* c = rdft_w + 32;
+  int j1, j2, k1, k2;
+  float wkr, wki, xr, xi, yr, yi;
+
+  static const ALIGN16_BEG float ALIGN16_END k_half[4] = {0.5f, 0.5f, 0.5f,
+                                                          0.5f};
+  const __m128 mm_half = _mm_load_ps(k_half);
+
+  a[1] = -a[1];
+  // Vectorized code (four at once).
+  //    Note: commented number are indexes for the first iteration of the loop.
+  for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+    // Load 'wk'.
+    const __m128 c_j1 = _mm_loadu_ps(&c[j1]);       //  1,  2,  3,  4,
+    const __m128 c_k1 = _mm_loadu_ps(&c[29 - j1]);  // 28, 29, 30, 31,
+    const __m128 wkrt = _mm_sub_ps(mm_half, c_k1);  // 28, 29, 30, 31,
+    const __m128 wkr_ =
+        _mm_shuffle_ps(wkrt, wkrt, _MM_SHUFFLE(0, 1, 2, 3));  // 31, 30, 29, 28,
+    const __m128 wki_ = c_j1;                                 //  1,  2,  3,  4,
+    // Load and shuffle 'a'.
+    const __m128 a_j2_0 = _mm_loadu_ps(&a[0 + j2]);    //   2,   3,   4,   5,
+    const __m128 a_j2_4 = _mm_loadu_ps(&a[4 + j2]);    //   6,   7,   8,   9,
+    const __m128 a_k2_0 = _mm_loadu_ps(&a[122 - j2]);  // 120, 121, 122, 123,
+    const __m128 a_k2_4 = _mm_loadu_ps(&a[126 - j2]);  // 124, 125, 126, 127,
+    const __m128 a_j2_p0 = _mm_shuffle_ps(
+        a_j2_0, a_j2_4, _MM_SHUFFLE(2, 0, 2, 0));  //   2,   4,   6,   8,
+    const __m128 a_j2_p1 = _mm_shuffle_ps(
+        a_j2_0, a_j2_4, _MM_SHUFFLE(3, 1, 3, 1));  //   3,   5,   7,   9,
+    const __m128 a_k2_p0 = _mm_shuffle_ps(
+        a_k2_4, a_k2_0, _MM_SHUFFLE(0, 2, 0, 2));  // 126, 124, 122, 120,
+    const __m128 a_k2_p1 = _mm_shuffle_ps(
+        a_k2_4, a_k2_0, _MM_SHUFFLE(1, 3, 1, 3));  // 127, 125, 123, 121,
+    // Calculate 'x'.
+    const __m128 xr_ = _mm_sub_ps(a_j2_p0, a_k2_p0);
+    // 2-126, 4-124, 6-122, 8-120,
+    const __m128 xi_ = _mm_add_ps(a_j2_p1, a_k2_p1);
+    // 3-127, 5-125, 7-123, 9-121,
+    // Calculate product into 'y'.
+    //    yr = wkr * xr + wki * xi;
+    //    yi = wkr * xi - wki * xr;
+    const __m128 a_ = _mm_mul_ps(wkr_, xr_);
+    const __m128 b_ = _mm_mul_ps(wki_, xi_);
+    const __m128 c_ = _mm_mul_ps(wkr_, xi_);
+    const __m128 d_ = _mm_mul_ps(wki_, xr_);
+    const __m128 yr_ = _mm_add_ps(a_, b_);  // 2-126, 4-124, 6-122, 8-120,
+    const __m128 yi_ = _mm_sub_ps(c_, d_);  // 3-127, 5-125, 7-123, 9-121,
+                                            // Update 'a'.
+                                            //    a[j2 + 0] = a[j2 + 0] - yr;
+                                            //    a[j2 + 1] = yi - a[j2 + 1];
+                                            //    a[k2 + 0] = yr + a[k2 + 0];
+    //    a[k2 + 1] = yi - a[k2 + 1];
+    const __m128 a_j2_p0n = _mm_sub_ps(a_j2_p0, yr_);  //   2,   4,   6,   8,
+    const __m128 a_j2_p1n = _mm_sub_ps(yi_, a_j2_p1);  //   3,   5,   7,   9,
+    const __m128 a_k2_p0n = _mm_add_ps(a_k2_p0, yr_);  // 126, 124, 122, 120,
+    const __m128 a_k2_p1n = _mm_sub_ps(yi_, a_k2_p1);  // 127, 125, 123, 121,
+    // Shuffle in right order and store.
+    const __m128 a_j2_0n = _mm_unpacklo_ps(a_j2_p0n, a_j2_p1n);
+    //   2,   3,   4,   5,
+    const __m128 a_j2_4n = _mm_unpackhi_ps(a_j2_p0n, a_j2_p1n);
+    //   6,   7,   8,   9,
+    const __m128 a_k2_0nt = _mm_unpackhi_ps(a_k2_p0n, a_k2_p1n);
+    // 122, 123, 120, 121,
+    const __m128 a_k2_4nt = _mm_unpacklo_ps(a_k2_p0n, a_k2_p1n);
+    // 126, 127, 124, 125,
+    const __m128 a_k2_0n = _mm_shuffle_ps(
+        a_k2_0nt, a_k2_0nt, _MM_SHUFFLE(1, 0, 3, 2));  // 120, 121, 122, 123,
+    const __m128 a_k2_4n = _mm_shuffle_ps(
+        a_k2_4nt, a_k2_4nt, _MM_SHUFFLE(1, 0, 3, 2));  // 124, 125, 126, 127,
+    _mm_storeu_ps(&a[0 + j2], a_j2_0n);
+    _mm_storeu_ps(&a[4 + j2], a_j2_4n);
+    _mm_storeu_ps(&a[122 - j2], a_k2_0n);
+    _mm_storeu_ps(&a[126 - j2], a_k2_4n);
+  }
+  // Scalar code for the remaining items.
+  for (; j2 < 64; j1 += 1, j2 += 2) {
+    k2 = 128 - j2;
+    k1 = 32 - j1;
+    wkr = 0.5f - c[k1];
+    wki = c[j1];
+    xr = a[j2 + 0] - a[k2 + 0];
+    xi = a[j2 + 1] + a[k2 + 1];
+    yr = wkr * xr + wki * xi;
+    yi = wkr * xi - wki * xr;
+    a[j2 + 0] = a[j2 + 0] - yr;
+    a[j2 + 1] = yi - a[j2 + 1];
+    a[k2 + 0] = yr + a[k2 + 0];
+    a[k2 + 1] = yi - a[k2 + 1];
+  }
+  a[65] = -a[65];
+}
+#endif
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/utility/ooura_fft_tables_common.h b/modules/audio_processing/utility/ooura_fft_tables_common.h
new file mode 100644
index 0000000..47d076e
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft_tables_common.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+namespace webrtc {
+
+// This tables used to be computed at run-time. For example, refer to:
+// https://code.google.com/p/webrtc/source/browse/trunk/webrtc/modules/audio_processing/utility/apm_rdft.c?r=6564
+// to see the initialization code.
+// Constants shared by all paths (C, SSE2, NEON).
+const float rdft_w[64] = {
+    1.0000000000f, 0.0000000000f, 0.7071067691f, 0.7071067691f, 0.9238795638f,
+    0.3826834559f, 0.3826834559f, 0.9238795638f, 0.9807852507f, 0.1950903237f,
+    0.5555702448f, 0.8314695954f, 0.8314695954f, 0.5555702448f, 0.1950903237f,
+    0.9807852507f, 0.9951847196f, 0.0980171412f, 0.6343933344f, 0.7730104327f,
+    0.8819212914f, 0.4713967443f, 0.2902846634f, 0.9569403529f, 0.9569403529f,
+    0.2902846634f, 0.4713967443f, 0.8819212914f, 0.7730104327f, 0.6343933344f,
+    0.0980171412f, 0.9951847196f, 0.7071067691f, 0.4993977249f, 0.4975923598f,
+    0.4945882559f, 0.4903926253f, 0.4850156307f, 0.4784701765f, 0.4707720280f,
+    0.4619397819f, 0.4519946277f, 0.4409606457f, 0.4288643003f, 0.4157347977f,
+    0.4016037583f, 0.3865052164f, 0.3704755902f, 0.3535533845f, 0.3357794881f,
+    0.3171966672f, 0.2978496552f, 0.2777851224f, 0.2570513785f, 0.2356983721f,
+    0.2137775421f, 0.1913417280f, 0.1684449315f, 0.1451423317f, 0.1214900985f,
+    0.0975451618f, 0.0733652338f, 0.0490085706f, 0.0245338380f,
+};
+
+// Constants used by the C and MIPS paths.
+const float rdft_wk3ri_first[16] = {
+    1.000000000f, 0.000000000f, 0.382683456f,  0.923879564f,
+    0.831469536f, 0.555570245f, -0.195090353f, 0.980785251f,
+    0.956940353f, 0.290284693f, 0.098017156f,  0.995184720f,
+    0.634393334f, 0.773010492f, -0.471396863f, 0.881921172f,
+};
+const float rdft_wk3ri_second[16] = {
+    -0.707106769f, 0.707106769f,  -0.923879564f, -0.382683456f,
+    -0.980785251f, 0.195090353f,  -0.555570245f, -0.831469536f,
+    -0.881921172f, 0.471396863f,  -0.773010492f, -0.634393334f,
+    -0.995184720f, -0.098017156f, -0.290284693f, -0.956940353f,
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_COMMON_H_
diff --git a/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h b/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h
new file mode 100644
index 0000000..1c44ae7
--- /dev/null
+++ b/modules/audio_processing/utility/ooura_fft_tables_neon_sse2.h
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_
+#define MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_
+
+#include "modules/audio_processing/utility/ooura_fft.h"
+
+#ifdef _MSC_VER /* visual c++ */
+#define ALIGN16_BEG __declspec(align(16))
+#define ALIGN16_END
+#else /* gcc or icc */
+#define ALIGN16_BEG
+#define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+namespace webrtc {
+
+// These tables used to be computed at run-time. For example, refer to:
+// https://code.google.com/p/webrtc/source/browse/trunk/webrtc/modules/audio_processing/utility/apm_rdft.c?r=6564
+// to see the initialization code.
+#if defined(WEBRTC_ARCH_X86_FAMILY) || defined(WEBRTC_HAS_NEON)
+// Constants used by SSE2 and NEON but initialized in the C path.
+const ALIGN16_BEG float ALIGN16_END k_swap_sign[4] = {-1.f, 1.f, -1.f, 1.f};
+
+ALIGN16_BEG const float ALIGN16_END rdft_wk1r[32] = {
+    1.000000000f, 1.000000000f, 0.707106769f, 0.707106769f, 0.923879564f,
+    0.923879564f, 0.382683456f, 0.382683456f, 0.980785251f, 0.980785251f,
+    0.555570245f, 0.555570245f, 0.831469595f, 0.831469595f, 0.195090324f,
+    0.195090324f, 0.995184720f, 0.995184720f, 0.634393334f, 0.634393334f,
+    0.881921291f, 0.881921291f, 0.290284663f, 0.290284663f, 0.956940353f,
+    0.956940353f, 0.471396744f, 0.471396744f, 0.773010433f, 0.773010433f,
+    0.098017141f, 0.098017141f,
+};
+ALIGN16_BEG const float ALIGN16_END rdft_wk2r[32] = {
+    1.000000000f,  1.000000000f,  -0.000000000f, -0.000000000f, 0.707106769f,
+    0.707106769f,  -0.707106769f, -0.707106769f, 0.923879564f,  0.923879564f,
+    -0.382683456f, -0.382683456f, 0.382683456f,  0.382683456f,  -0.923879564f,
+    -0.923879564f, 0.980785251f,  0.980785251f,  -0.195090324f, -0.195090324f,
+    0.555570245f,  0.555570245f,  -0.831469595f, -0.831469595f, 0.831469595f,
+    0.831469595f,  -0.555570245f, -0.555570245f, 0.195090324f,  0.195090324f,
+    -0.980785251f, -0.980785251f,
+};
+ALIGN16_BEG const float ALIGN16_END rdft_wk3r[32] = {
+    1.000000000f,  1.000000000f,  -0.707106769f, -0.707106769f, 0.382683456f,
+    0.382683456f,  -0.923879564f, -0.923879564f, 0.831469536f,  0.831469536f,
+    -0.980785251f, -0.980785251f, -0.195090353f, -0.195090353f, -0.555570245f,
+    -0.555570245f, 0.956940353f,  0.956940353f,  -0.881921172f, -0.881921172f,
+    0.098017156f,  0.098017156f,  -0.773010492f, -0.773010492f, 0.634393334f,
+    0.634393334f,  -0.995184720f, -0.995184720f, -0.471396863f, -0.471396863f,
+    -0.290284693f, -0.290284693f,
+};
+ALIGN16_BEG const float ALIGN16_END rdft_wk1i[32] = {
+    -0.000000000f, 0.000000000f,  -0.707106769f, 0.707106769f,  -0.382683456f,
+    0.382683456f,  -0.923879564f, 0.923879564f,  -0.195090324f, 0.195090324f,
+    -0.831469595f, 0.831469595f,  -0.555570245f, 0.555570245f,  -0.980785251f,
+    0.980785251f,  -0.098017141f, 0.098017141f,  -0.773010433f, 0.773010433f,
+    -0.471396744f, 0.471396744f,  -0.956940353f, 0.956940353f,  -0.290284663f,
+    0.290284663f,  -0.881921291f, 0.881921291f,  -0.634393334f, 0.634393334f,
+    -0.995184720f, 0.995184720f,
+};
+ALIGN16_BEG const float ALIGN16_END rdft_wk2i[32] = {
+    -0.000000000f, 0.000000000f,  -1.000000000f, 1.000000000f,  -0.707106769f,
+    0.707106769f,  -0.707106769f, 0.707106769f,  -0.382683456f, 0.382683456f,
+    -0.923879564f, 0.923879564f,  -0.923879564f, 0.923879564f,  -0.382683456f,
+    0.382683456f,  -0.195090324f, 0.195090324f,  -0.980785251f, 0.980785251f,
+    -0.831469595f, 0.831469595f,  -0.555570245f, 0.555570245f,  -0.555570245f,
+    0.555570245f,  -0.831469595f, 0.831469595f,  -0.980785251f, 0.980785251f,
+    -0.195090324f, 0.195090324f,
+};
+ALIGN16_BEG const float ALIGN16_END rdft_wk3i[32] = {
+    -0.000000000f, 0.000000000f,  -0.707106769f, 0.707106769f,  -0.923879564f,
+    0.923879564f,  0.382683456f,  -0.382683456f, -0.555570245f, 0.555570245f,
+    -0.195090353f, 0.195090353f,  -0.980785251f, 0.980785251f,  0.831469536f,
+    -0.831469536f, -0.290284693f, 0.290284693f,  -0.471396863f, 0.471396863f,
+    -0.995184720f, 0.995184720f,  0.634393334f,  -0.634393334f, -0.773010492f,
+    0.773010492f,  0.098017156f,  -0.098017156f, -0.881921172f, 0.881921172f,
+    0.956940353f,  -0.956940353f,
+};
+ALIGN16_BEG const float ALIGN16_END cftmdl_wk1r[4] = {
+    0.707106769f, 0.707106769f, 0.707106769f, -0.707106769f,
+};
+#endif
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_UTILITY_OOURA_FFT_TABLES_NEON_SSE2_H_
diff --git a/modules/audio_processing/vad/BUILD.gn b/modules/audio_processing/vad/BUILD.gn
new file mode 100644
index 0000000..9976b78
--- /dev/null
+++ b/modules/audio_processing/vad/BUILD.gn
@@ -0,0 +1,71 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../../webrtc.gni")
+rtc_static_library("vad") {
+  visibility = [
+    "../*",
+    "../../../rtc_tools:*",
+  ]
+  sources = [
+    "common.h",
+    "gmm.cc",
+    "gmm.h",
+    "noise_gmm_tables.h",
+    "pitch_based_vad.cc",
+    "pitch_based_vad.h",
+    "pitch_internal.cc",
+    "pitch_internal.h",
+    "pole_zero_filter.cc",
+    "pole_zero_filter.h",
+    "standalone_vad.cc",
+    "standalone_vad.h",
+    "vad_audio_proc.cc",
+    "vad_audio_proc.h",
+    "vad_audio_proc_internal.h",
+    "vad_circular_buffer.cc",
+    "vad_circular_buffer.h",
+    "voice_activity_detector.cc",
+    "voice_activity_detector.h",
+    "voice_gmm_tables.h",
+  ]
+  deps = [
+    "../..:module_api",
+    "../../..:typedefs",
+    "../../../audio/utility:audio_frame_operations",
+    "../../../common_audio",
+    "../../../common_audio:common_audio_c",
+    "../../../rtc_base:checks",
+    "../../audio_coding:isac",
+  ]
+}
+
+if (rtc_include_tests) {
+  rtc_static_library("vad_unittests") {
+    testonly = true
+    sources = [
+      "gmm_unittest.cc",
+      "pitch_based_vad_unittest.cc",
+      "pitch_internal_unittest.cc",
+      "pole_zero_filter_unittest.cc",
+      "standalone_vad_unittest.cc",
+      "vad_audio_proc_unittest.cc",
+      "vad_circular_buffer_unittest.cc",
+      "voice_activity_detector_unittest.cc",
+    ]
+    deps = [
+      ":vad",
+      "../..:module_api",
+      "../../../common_audio",
+      "../../../test:fileutils",
+      "../../../test:test_support",
+      "//testing/gmock",
+      "//testing/gtest",
+    ]
+  }
+}
diff --git a/modules/audio_processing/vad/common.h b/modules/audio_processing/vad/common.h
new file mode 100644
index 0000000..b5a5fb3
--- /dev/null
+++ b/modules/audio_processing/vad/common.h
@@ -0,0 +1,29 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
+#define MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
+
+#include <stddef.h>
+
+static const int kSampleRateHz = 16000;
+static const size_t kLength10Ms = kSampleRateHz / 100;
+static const size_t kMaxNumFrames = 4;
+
+struct AudioFeatures {
+  double log_pitch_gain[kMaxNumFrames];
+  double pitch_lag_hz[kMaxNumFrames];
+  double spectral_peak[kMaxNumFrames];
+  double rms[kMaxNumFrames];
+  size_t num_frames;
+  bool silence;
+};
+
+#endif  // MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
diff --git a/modules/audio_processing/vad/gmm.cc b/modules/audio_processing/vad/gmm.cc
new file mode 100644
index 0000000..266ca44
--- /dev/null
+++ b/modules/audio_processing/vad/gmm.cc
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/gmm.h"
+
+#include <math.h>
+#include <stdlib.h>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+static const int kMaxDimension = 10;
+
+static void RemoveMean(const double* in,
+                       const double* mean_vec,
+                       int dimension,
+                       double* out) {
+  for (int n = 0; n < dimension; ++n)
+    out[n] = in[n] - mean_vec[n];
+}
+
+static double ComputeExponent(const double* in,
+                              const double* covar_inv,
+                              int dimension) {
+  double q = 0;
+  for (int i = 0; i < dimension; ++i) {
+    double v = 0;
+    for (int j = 0; j < dimension; j++)
+      v += (*covar_inv++) * in[j];
+    q += v * in[i];
+  }
+  q *= -0.5;
+  return q;
+}
+
+double EvaluateGmm(const double* x, const GmmParameters& gmm_parameters) {
+  if (gmm_parameters.dimension > kMaxDimension) {
+    return -1;  // This is invalid pdf so the caller can check this.
+  }
+  double f = 0;
+  double v[kMaxDimension];
+  const double* mean_vec = gmm_parameters.mean;
+  const double* covar_inv = gmm_parameters.covar_inverse;
+
+  for (int n = 0; n < gmm_parameters.num_mixtures; n++) {
+    RemoveMean(x, mean_vec, gmm_parameters.dimension, v);
+    double q = ComputeExponent(v, covar_inv, gmm_parameters.dimension) +
+               gmm_parameters.weight[n];
+    f += exp(q);
+    mean_vec += gmm_parameters.dimension;
+    covar_inv += gmm_parameters.dimension * gmm_parameters.dimension;
+  }
+  return f;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/gmm.h b/modules/audio_processing/vad/gmm.h
new file mode 100644
index 0000000..93eb675
--- /dev/null
+++ b/modules/audio_processing/vad/gmm.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_GMM_H_
+#define MODULES_AUDIO_PROCESSING_VAD_GMM_H_
+
+namespace webrtc {
+
+// A structure that specifies a GMM.
+// A GMM is formulated as
+//  f(x) = w[0] * mixture[0] + w[1] * mixture[1] + ... +
+//         w[num_mixtures - 1] * mixture[num_mixtures - 1];
+// Where a 'mixture' is a Gaussian density.
+
+struct GmmParameters {
+  // weight[n] = log(w[n]) - |dimension|/2 * log(2*pi) - 1/2 * log(det(cov[n]));
+  // where cov[n] is the covariance matrix of mixture n;
+  const double* weight;
+  // pointer to the first element of a |num_mixtures|x|dimension| matrix
+  // where kth row is the mean of the kth mixture.
+  const double* mean;
+  // pointer to the first element of a |num_mixtures|x|dimension|x|dimension|
+  // 3D-matrix, where the kth 2D-matrix is the inverse of the covariance
+  // matrix of the kth mixture.
+  const double* covar_inverse;
+  // Dimensionality of the mixtures.
+  int dimension;
+  // number of the mixtures.
+  int num_mixtures;
+};
+
+// Evaluate the given GMM, according to |gmm_parameters|, at the given point
+// |x|. If the dimensionality of the given GMM is larger that the maximum
+// acceptable dimension by the following function -1 is returned.
+double EvaluateGmm(const double* x, const GmmParameters& gmm_parameters);
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_PROCESSING_VAD_GMM_H_
diff --git a/modules/audio_processing/vad/gmm_unittest.cc b/modules/audio_processing/vad/gmm_unittest.cc
new file mode 100644
index 0000000..dfc8855
--- /dev/null
+++ b/modules/audio_processing/vad/gmm_unittest.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/gmm.h"
+
+#include <math.h>
+
+#include "modules/audio_processing/vad/noise_gmm_tables.h"
+#include "modules/audio_processing/vad/voice_gmm_tables.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(GmmTest, EvaluateGmm) {
+  GmmParameters noise_gmm;
+  GmmParameters voice_gmm;
+
+  // Setup noise GMM.
+  noise_gmm.dimension = kNoiseGmmDim;
+  noise_gmm.num_mixtures = kNoiseGmmNumMixtures;
+  noise_gmm.weight = kNoiseGmmWeights;
+  noise_gmm.mean = &kNoiseGmmMean[0][0];
+  noise_gmm.covar_inverse = &kNoiseGmmCovarInverse[0][0][0];
+
+  // Setup voice GMM.
+  voice_gmm.dimension = kVoiceGmmDim;
+  voice_gmm.num_mixtures = kVoiceGmmNumMixtures;
+  voice_gmm.weight = kVoiceGmmWeights;
+  voice_gmm.mean = &kVoiceGmmMean[0][0];
+  voice_gmm.covar_inverse = &kVoiceGmmCovarInverse[0][0][0];
+
+  // Test vectors. These are the mean of the GMM means.
+  const double kXVoice[kVoiceGmmDim] = {
+      -1.35893162459863, 602.862491970368, 178.022069191324};
+  const double kXNoise[kNoiseGmmDim] = {
+      -2.33443722724409, 2827.97828765184, 141.114178166812};
+
+  // Expected pdf values. These values are computed in MATLAB using EvalGmm.m
+  const double kPdfNoise = 1.88904409403101e-07;
+  const double kPdfVoice = 1.30453996982266e-06;
+
+  // Relative error should be smaller that the following value.
+  const double kAcceptedRelativeErr = 1e-10;
+
+  // Test Voice.
+  double pdf = EvaluateGmm(kXVoice, voice_gmm);
+  EXPECT_GT(pdf, 0);
+  double relative_error = fabs(pdf - kPdfVoice) / kPdfVoice;
+  EXPECT_LE(relative_error, kAcceptedRelativeErr);
+
+  // Test Noise.
+  pdf = EvaluateGmm(kXNoise, noise_gmm);
+  EXPECT_GT(pdf, 0);
+  relative_error = fabs(pdf - kPdfNoise) / kPdfNoise;
+  EXPECT_LE(relative_error, kAcceptedRelativeErr);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/noise_gmm_tables.h b/modules/audio_processing/vad/noise_gmm_tables.h
new file mode 100644
index 0000000..c07dade
--- /dev/null
+++ b/modules/audio_processing/vad/noise_gmm_tables.h
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// GMM tables for inactive segments. Generated by MakeGmmTables.m.
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_NOISE_GMM_TABLES_H_
+#define MODULES_AUDIO_PROCESSING_VAD_NOISE_GMM_TABLES_H_
+
+static const int kNoiseGmmNumMixtures = 12;
+static const int kNoiseGmmDim = 3;
+
+static const double
+    kNoiseGmmCovarInverse[kNoiseGmmNumMixtures][kNoiseGmmDim][kNoiseGmmDim] = {
+        {{7.36219567592941e+00, 4.83060785179861e-03, 1.23335151497610e-02},
+         {4.83060785179861e-03, 1.65289507047817e-04, -2.41490588169997e-04},
+         {1.23335151497610e-02, -2.41490588169997e-04, 6.59472060689382e-03}},
+        {{8.70265239309140e+00, -5.30636201431086e-04, 5.44014966585347e-03},
+         {-5.30636201431086e-04, 3.11095453521008e-04, -1.86287206836035e-04},
+         {5.44014966585347e-03, -1.86287206836035e-04, 6.29493388790744e-04}},
+        {{4.53467851955055e+00, -3.92977536695197e-03, -2.46521420693317e-03},
+         {-3.92977536695197e-03, 4.94650752632750e-05, -1.08587438501826e-05},
+         {-2.46521420693317e-03, -1.08587438501826e-05, 9.28793975422261e-05}},
+        {{9.26817997114275e-01, -4.03976069276753e-04, -3.56441427392165e-03},
+         {-4.03976069276753e-04, 2.51976251631430e-06, 1.46914206734572e-07},
+         {-3.56441427392165e-03, 1.46914206734572e-07, 8.19914567685373e-05}},
+        {{7.61715986787441e+00, -1.54889041216888e-04, 2.41756280071656e-02},
+         {-1.54889041216888e-04, 3.50282550461672e-07, -6.27251196972490e-06},
+         {2.41756280071656e-02, -6.27251196972490e-06, 1.45061847649872e-02}},
+        {{8.31193642663158e+00, -3.84070508164323e-04, -3.09750630821876e-02},
+         {-3.84070508164323e-04, 3.80433432277336e-07, -1.14321142836636e-06},
+         {-3.09750630821876e-02, -1.14321142836636e-06, 8.35091486289997e-04}},
+        {{9.67283151270894e-01, 5.82465812445039e-05, -3.18350798617053e-03},
+         {5.82465812445039e-05, 2.23762672000318e-07, -7.74196587408623e-07},
+         {-3.18350798617053e-03, -7.74196587408623e-07, 3.85120938338325e-04}},
+        {{8.28066236985388e+00, 5.87634508319763e-05, 6.99303090891743e-03},
+         {5.87634508319763e-05, 2.93746018618058e-07, 3.40843332882272e-07},
+         {6.99303090891743e-03, 3.40843332882272e-07, 1.99379171190344e-04}},
+        {{6.07488998675646e+00, -1.11494526618473e-02, 5.10013111123381e-03},
+         {-1.11494526618473e-02, 6.99238879921751e-04, 5.36718550370870e-05},
+         {5.10013111123381e-03, 5.36718550370870e-05, 5.26909853276753e-04}},
+        {{6.90492021419175e+00, 4.20639355257863e-04, -2.38612752336481e-03},
+         {4.20639355257863e-04, 3.31246767338153e-06, -2.42052288150859e-08},
+         {-2.38612752336481e-03, -2.42052288150859e-08, 4.46608368363412e-04}},
+        {{1.31069150869715e+01, -1.73718583865670e-04, -1.97591814508578e-02},
+         {-1.73718583865670e-04, 2.80451716300124e-07, 9.96570755379865e-07},
+         {-1.97591814508578e-02, 9.96570755379865e-07, 2.41361900868847e-03}},
+        {{4.69566344239814e+00, -2.61077567563690e-04, 5.26359000761433e-03},
+         {-2.61077567563690e-04, 1.82420859823767e-06, -7.83645887541601e-07},
+         {5.26359000761433e-03, -7.83645887541601e-07, 1.33586288288802e-02}}};
+
+static const double kNoiseGmmMean[kNoiseGmmNumMixtures][kNoiseGmmDim] = {
+    {-2.01386094766163e+00, 1.69702162045397e+02, 7.41715804872181e+01},
+    {-1.94684591777290e+00, 1.42398396732668e+02, 1.64186321157831e+02},
+    {-2.29319297562437e+00, 3.86415425589868e+02, 2.13452215267125e+02},
+    {-3.25487177070268e+00, 1.08668712553616e+03, 2.33119949467419e+02},
+    {-2.13159632447467e+00, 4.83821702557717e+03, 6.86786166673740e+01},
+    {-2.26171410780526e+00, 4.79420193982422e+03, 1.53222513286450e+02},
+    {-3.32166740703185e+00, 4.35161135834358e+03, 1.33206448431316e+02},
+    {-2.19290322814343e+00, 3.98325506609408e+03, 2.13249167359934e+02},
+    {-2.02898459255404e+00, 7.37039893155007e+03, 1.12518527491926e+02},
+    {-2.26150236399500e+00, 1.54896745196145e+03, 1.49717357868579e+02},
+    {-2.00417668301790e+00, 3.82434760310304e+03, 1.07438913004312e+02},
+    {-2.30193040814533e+00, 1.43953696546439e+03, 7.04085275122649e+01}};
+
+static const double kNoiseGmmWeights[kNoiseGmmNumMixtures] = {
+    -1.09422832086193e+01,
+    -1.10847897513425e+01,
+    -1.36767587732187e+01,
+    -1.79789356118641e+01,
+    -1.42830169160894e+01,
+    -1.56500228061379e+01,
+    -1.83124990950113e+01,
+    -1.69979436177477e+01,
+    -1.12329424387828e+01,
+    -1.41311785780639e+01,
+    -1.47171861448585e+01,
+    -1.35963362781839e+01};
+#endif  // MODULES_AUDIO_PROCESSING_VAD_NOISE_GMM_TABLES_H_
diff --git a/modules/audio_processing/vad/pitch_based_vad.cc b/modules/audio_processing/vad/pitch_based_vad.cc
new file mode 100644
index 0000000..bca2552
--- /dev/null
+++ b/modules/audio_processing/vad/pitch_based_vad.cc
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_based_vad.h"
+
+#include <math.h>
+#include <string.h>
+
+#include "modules/audio_processing/vad/vad_circular_buffer.h"
+#include "modules/audio_processing/vad/common.h"
+#include "modules/audio_processing/vad/noise_gmm_tables.h"
+#include "modules/audio_processing/vad/voice_gmm_tables.h"
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+static_assert(kNoiseGmmDim == kVoiceGmmDim,
+              "noise and voice gmm dimension not equal");
+
+// These values should match MATLAB counterparts for unit-tests to pass.
+static const int kPosteriorHistorySize = 500;  // 5 sec of 10 ms frames.
+static const double kInitialPriorProbability = 0.3;
+static const int kTransientWidthThreshold = 7;
+static const double kLowProbabilityThreshold = 0.2;
+
+static double LimitProbability(double p) {
+  const double kLimHigh = 0.99;
+  const double kLimLow = 0.01;
+
+  if (p > kLimHigh)
+    p = kLimHigh;
+  else if (p < kLimLow)
+    p = kLimLow;
+  return p;
+}
+
+PitchBasedVad::PitchBasedVad()
+    : p_prior_(kInitialPriorProbability),
+      circular_buffer_(VadCircularBuffer::Create(kPosteriorHistorySize)) {
+  // Setup noise GMM.
+  noise_gmm_.dimension = kNoiseGmmDim;
+  noise_gmm_.num_mixtures = kNoiseGmmNumMixtures;
+  noise_gmm_.weight = kNoiseGmmWeights;
+  noise_gmm_.mean = &kNoiseGmmMean[0][0];
+  noise_gmm_.covar_inverse = &kNoiseGmmCovarInverse[0][0][0];
+
+  // Setup voice GMM.
+  voice_gmm_.dimension = kVoiceGmmDim;
+  voice_gmm_.num_mixtures = kVoiceGmmNumMixtures;
+  voice_gmm_.weight = kVoiceGmmWeights;
+  voice_gmm_.mean = &kVoiceGmmMean[0][0];
+  voice_gmm_.covar_inverse = &kVoiceGmmCovarInverse[0][0][0];
+}
+
+PitchBasedVad::~PitchBasedVad() {
+}
+
+int PitchBasedVad::VoicingProbability(const AudioFeatures& features,
+                                      double* p_combined) {
+  double p;
+  double gmm_features[3];
+  double pdf_features_given_voice;
+  double pdf_features_given_noise;
+  // These limits are the same in matlab implementation 'VoicingProbGMM().'
+  const double kLimLowLogPitchGain = -2.0;
+  const double kLimHighLogPitchGain = -0.9;
+  const double kLimLowSpectralPeak = 200;
+  const double kLimHighSpectralPeak = 2000;
+  const double kEps = 1e-12;
+  for (size_t n = 0; n < features.num_frames; n++) {
+    gmm_features[0] = features.log_pitch_gain[n];
+    gmm_features[1] = features.spectral_peak[n];
+    gmm_features[2] = features.pitch_lag_hz[n];
+
+    pdf_features_given_voice = EvaluateGmm(gmm_features, voice_gmm_);
+    pdf_features_given_noise = EvaluateGmm(gmm_features, noise_gmm_);
+
+    if (features.spectral_peak[n] < kLimLowSpectralPeak ||
+        features.spectral_peak[n] > kLimHighSpectralPeak ||
+        features.log_pitch_gain[n] < kLimLowLogPitchGain) {
+      pdf_features_given_voice = kEps * pdf_features_given_noise;
+    } else if (features.log_pitch_gain[n] > kLimHighLogPitchGain) {
+      pdf_features_given_noise = kEps * pdf_features_given_voice;
+    }
+
+    p = p_prior_ * pdf_features_given_voice /
+        (pdf_features_given_voice * p_prior_ +
+         pdf_features_given_noise * (1 - p_prior_));
+
+    p = LimitProbability(p);
+
+    // Combine pitch-based probability with standalone probability, before
+    // updating prior probabilities.
+    double prod_active = p * p_combined[n];
+    double prod_inactive = (1 - p) * (1 - p_combined[n]);
+    p_combined[n] = prod_active / (prod_active + prod_inactive);
+
+    if (UpdatePrior(p_combined[n]) < 0)
+      return -1;
+    // Limit prior probability. With a zero prior probability the posterior
+    // probability is always zero.
+    p_prior_ = LimitProbability(p_prior_);
+  }
+  return 0;
+}
+
+int PitchBasedVad::UpdatePrior(double p) {
+  circular_buffer_->Insert(p);
+  if (circular_buffer_->RemoveTransient(kTransientWidthThreshold,
+                                        kLowProbabilityThreshold) < 0)
+    return -1;
+  p_prior_ = circular_buffer_->Mean();
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/pitch_based_vad.h b/modules/audio_processing/vad/pitch_based_vad.h
new file mode 100644
index 0000000..584dcc7
--- /dev/null
+++ b/modules/audio_processing/vad/pitch_based_vad.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_PITCH_BASED_VAD_H_
+#define MODULES_AUDIO_PROCESSING_VAD_PITCH_BASED_VAD_H_
+
+#include <memory>
+
+#include "modules/audio_processing/vad/common.h"
+#include "modules/audio_processing/vad/gmm.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+class VadCircularBuffer;
+
+// Computes the probability of the input audio frame to be active given
+// the corresponding pitch-gain and lag of the frame.
+class PitchBasedVad {
+ public:
+  PitchBasedVad();
+  ~PitchBasedVad();
+
+  // Compute pitch-based voicing probability, given the features.
+  //   features: a structure containing features required for computing voicing
+  //             probabilities.
+  //
+  //   p_combined: an array which contains the combined activity probabilities
+  //               computed prior to the call of this function. The method,
+  //               then, computes the voicing probabilities and combine them
+  //               with the given values. The result are returned in |p|.
+  int VoicingProbability(const AudioFeatures& features, double* p_combined);
+
+ private:
+  int UpdatePrior(double p);
+
+  // TODO(turajs): maybe defining this at a higher level (maybe enum) so that
+  // all the code recognize it as "no-error."
+  static const int kNoError = 0;
+
+  GmmParameters noise_gmm_;
+  GmmParameters voice_gmm_;
+
+  double p_prior_;
+
+  std::unique_ptr<VadCircularBuffer> circular_buffer_;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_PROCESSING_VAD_PITCH_BASED_VAD_H_
diff --git a/modules/audio_processing/vad/pitch_based_vad_unittest.cc b/modules/audio_processing/vad/pitch_based_vad_unittest.cc
new file mode 100644
index 0000000..fb6daa5
--- /dev/null
+++ b/modules/audio_processing/vad/pitch_based_vad_unittest.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_based_vad.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <string>
+
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TEST(PitchBasedVadTest, VoicingProbabilityTest) {
+  std::string spectral_peak_file_name =
+      test::ResourcePath("audio_processing/agc/agc_spectral_peak", "dat");
+  FILE* spectral_peak_file = fopen(spectral_peak_file_name.c_str(), "rb");
+  ASSERT_TRUE(spectral_peak_file != NULL);
+
+  std::string pitch_gain_file_name =
+      test::ResourcePath("audio_processing/agc/agc_pitch_gain", "dat");
+  FILE* pitch_gain_file = fopen(pitch_gain_file_name.c_str(), "rb");
+  ASSERT_TRUE(pitch_gain_file != NULL);
+
+  std::string pitch_lag_file_name =
+      test::ResourcePath("audio_processing/agc/agc_pitch_lag", "dat");
+  FILE* pitch_lag_file = fopen(pitch_lag_file_name.c_str(), "rb");
+  ASSERT_TRUE(pitch_lag_file != NULL);
+
+  std::string voicing_prob_file_name =
+      test::ResourcePath("audio_processing/agc/agc_voicing_prob", "dat");
+  FILE* voicing_prob_file = fopen(voicing_prob_file_name.c_str(), "rb");
+  ASSERT_TRUE(voicing_prob_file != NULL);
+
+  PitchBasedVad vad_;
+
+  double reference_activity_probability;
+
+  AudioFeatures audio_features;
+  memset(&audio_features, 0, sizeof(audio_features));
+  audio_features.num_frames = 1;
+  while (fread(audio_features.spectral_peak,
+               sizeof(audio_features.spectral_peak[0]), 1,
+               spectral_peak_file) == 1u) {
+    double p;
+    ASSERT_EQ(1u, fread(audio_features.log_pitch_gain,
+                        sizeof(audio_features.log_pitch_gain[0]), 1,
+                        pitch_gain_file));
+    ASSERT_EQ(1u,
+              fread(audio_features.pitch_lag_hz,
+                    sizeof(audio_features.pitch_lag_hz[0]), 1, pitch_lag_file));
+    ASSERT_EQ(1u, fread(&reference_activity_probability,
+                        sizeof(reference_activity_probability), 1,
+                        voicing_prob_file));
+
+    p = 0.5;  // Initialize to the neutral value for combining probabilities.
+    EXPECT_EQ(0, vad_.VoicingProbability(audio_features, &p));
+    EXPECT_NEAR(p, reference_activity_probability, 0.01);
+  }
+
+  fclose(spectral_peak_file);
+  fclose(pitch_gain_file);
+  fclose(pitch_lag_file);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/pitch_internal.cc b/modules/audio_processing/vad/pitch_internal.cc
new file mode 100644
index 0000000..7e6bd3e
--- /dev/null
+++ b/modules/audio_processing/vad/pitch_internal.cc
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_internal.h"
+
+#include <cmath>
+
+// A 4-to-3 linear interpolation.
+// The interpolation constants are derived as following:
+// Input pitch parameters are updated every 7.5 ms. Within a 30-ms interval
+// we are interested in pitch parameters of 0-5 ms, 10-15ms and 20-25ms. This is
+// like interpolating 4-to-6 and keep the odd samples.
+// The reason behind this is that LPC coefficients are computed for the first
+// half of each 10ms interval.
+static void PitchInterpolation(double old_val, const double* in, double* out) {
+  out[0] = 1. / 6. * old_val + 5. / 6. * in[0];
+  out[1] = 5. / 6. * in[1] + 1. / 6. * in[2];
+  out[2] = 0.5 * in[2] + 0.5 * in[3];
+}
+
+void GetSubframesPitchParameters(int sampling_rate_hz,
+                                 double* gains,
+                                 double* lags,
+                                 int num_in_frames,
+                                 int num_out_frames,
+                                 double* log_old_gain,
+                                 double* old_lag,
+                                 double* log_pitch_gain,
+                                 double* pitch_lag_hz) {
+  // Gain interpolation is in log-domain, also returned in log-domain.
+  for (int n = 0; n < num_in_frames; n++)
+    gains[n] = log(gains[n] + 1e-12);
+
+  // Interpolate lags and gains.
+  PitchInterpolation(*log_old_gain, gains, log_pitch_gain);
+  *log_old_gain = gains[num_in_frames - 1];
+  PitchInterpolation(*old_lag, lags, pitch_lag_hz);
+  *old_lag = lags[num_in_frames - 1];
+
+  // Convert pitch-lags to Hertz.
+  for (int n = 0; n < num_out_frames; n++) {
+    pitch_lag_hz[n] = (sampling_rate_hz) / (pitch_lag_hz[n]);
+  }
+}
diff --git a/modules/audio_processing/vad/pitch_internal.h b/modules/audio_processing/vad/pitch_internal.h
new file mode 100644
index 0000000..67e0522
--- /dev/null
+++ b/modules/audio_processing/vad/pitch_internal.h
@@ -0,0 +1,26 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_PITCH_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_VAD_PITCH_INTERNAL_H_
+
+// TODO(turajs): Write a description of this function. Also be consistent with
+// usage of |sampling_rate_hz| vs |kSamplingFreqHz|.
+void GetSubframesPitchParameters(int sampling_rate_hz,
+                                 double* gains,
+                                 double* lags,
+                                 int num_in_frames,
+                                 int num_out_frames,
+                                 double* log_old_gain,
+                                 double* old_lag,
+                                 double* log_pitch_gain,
+                                 double* pitch_lag_hz);
+
+#endif  // MODULES_AUDIO_PROCESSING_VAD_PITCH_INTERNAL_H_
diff --git a/modules/audio_processing/vad/pitch_internal_unittest.cc b/modules/audio_processing/vad/pitch_internal_unittest.cc
new file mode 100644
index 0000000..c1fde10
--- /dev/null
+++ b/modules/audio_processing/vad/pitch_internal_unittest.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pitch_internal.h"
+
+#include <math.h>
+
+#include "test/gtest.h"
+
+TEST(PitchInternalTest, test) {
+  const int kSamplingRateHz = 8000;
+  const int kNumInputParameters = 4;
+  const int kNumOutputParameters = 3;
+  // Inputs
+  double log_old_gain = log(0.5);
+  double gains[] = {0.6, 0.2, 0.5, 0.4};
+
+  double old_lag = 70;
+  double lags[] = {90, 111, 122, 50};
+
+  // Expected outputs
+  double expected_log_pitch_gain[] = {
+      -0.541212549898316, -1.45672279045507, -0.80471895621705};
+  double expected_log_old_gain = log(gains[kNumInputParameters - 1]);
+
+  double expected_pitch_lag_hz[] = {
+      92.3076923076923, 70.9010339734121, 93.0232558139535};
+  double expected_old_lag = lags[kNumInputParameters - 1];
+
+  double log_pitch_gain[kNumOutputParameters];
+  double pitch_lag_hz[kNumInputParameters];
+
+  GetSubframesPitchParameters(kSamplingRateHz, gains, lags, kNumInputParameters,
+                              kNumOutputParameters, &log_old_gain, &old_lag,
+                              log_pitch_gain, pitch_lag_hz);
+
+  for (int n = 0; n < 3; n++) {
+    EXPECT_NEAR(pitch_lag_hz[n], expected_pitch_lag_hz[n], 1e-6);
+    EXPECT_NEAR(log_pitch_gain[n], expected_log_pitch_gain[n], 1e-8);
+  }
+  EXPECT_NEAR(old_lag, expected_old_lag, 1e-6);
+  EXPECT_NEAR(log_old_gain, expected_log_old_gain, 1e-8);
+}
diff --git a/modules/audio_processing/vad/pole_zero_filter.cc b/modules/audio_processing/vad/pole_zero_filter.cc
new file mode 100644
index 0000000..fa56a3c
--- /dev/null
+++ b/modules/audio_processing/vad/pole_zero_filter.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pole_zero_filter.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+namespace webrtc {
+
+PoleZeroFilter* PoleZeroFilter::Create(const float* numerator_coefficients,
+                                       size_t order_numerator,
+                                       const float* denominator_coefficients,
+                                       size_t order_denominator) {
+  if (order_numerator > kMaxFilterOrder ||
+      order_denominator > kMaxFilterOrder || denominator_coefficients[0] == 0 ||
+      numerator_coefficients == NULL || denominator_coefficients == NULL)
+    return NULL;
+  return new PoleZeroFilter(numerator_coefficients, order_numerator,
+                            denominator_coefficients, order_denominator);
+}
+
+PoleZeroFilter::PoleZeroFilter(const float* numerator_coefficients,
+                               size_t order_numerator,
+                               const float* denominator_coefficients,
+                               size_t order_denominator)
+    : past_input_(),
+      past_output_(),
+      numerator_coefficients_(),
+      denominator_coefficients_(),
+      order_numerator_(order_numerator),
+      order_denominator_(order_denominator),
+      highest_order_(std::max(order_denominator, order_numerator)) {
+  memcpy(numerator_coefficients_, numerator_coefficients,
+         sizeof(numerator_coefficients_[0]) * (order_numerator_ + 1));
+  memcpy(denominator_coefficients_, denominator_coefficients,
+         sizeof(denominator_coefficients_[0]) * (order_denominator_ + 1));
+
+  if (denominator_coefficients_[0] != 1) {
+    for (size_t n = 0; n <= order_numerator_; n++)
+      numerator_coefficients_[n] /= denominator_coefficients_[0];
+    for (size_t n = 0; n <= order_denominator_; n++)
+      denominator_coefficients_[n] /= denominator_coefficients_[0];
+  }
+}
+
+template <typename T>
+static float FilterArPast(const T* past, size_t order,
+                          const float* coefficients) {
+  float sum = 0.0f;
+  size_t past_index = order - 1;
+  for (size_t k = 1; k <= order; k++, past_index--)
+    sum += coefficients[k] * past[past_index];
+  return sum;
+}
+
+int PoleZeroFilter::Filter(const int16_t* in,
+                           size_t num_input_samples,
+                           float* output) {
+  if (in == NULL || output == NULL)
+    return -1;
+  // This is the typical case, just a memcpy.
+  const size_t k = std::min(num_input_samples, highest_order_);
+  size_t n;
+  for (n = 0; n < k; n++) {
+    output[n] = in[n] * numerator_coefficients_[0];
+    output[n] += FilterArPast(&past_input_[n], order_numerator_,
+                              numerator_coefficients_);
+    output[n] -= FilterArPast(&past_output_[n], order_denominator_,
+                              denominator_coefficients_);
+
+    past_input_[n + order_numerator_] = in[n];
+    past_output_[n + order_denominator_] = output[n];
+  }
+  if (highest_order_ < num_input_samples) {
+    for (size_t m = 0; n < num_input_samples; n++, m++) {
+      output[n] = in[n] * numerator_coefficients_[0];
+      output[n] +=
+          FilterArPast(&in[m], order_numerator_, numerator_coefficients_);
+      output[n] -= FilterArPast(&output[m], order_denominator_,
+                                denominator_coefficients_);
+    }
+    // Record into the past signal.
+    memcpy(past_input_, &in[num_input_samples - order_numerator_],
+           sizeof(in[0]) * order_numerator_);
+    memcpy(past_output_, &output[num_input_samples - order_denominator_],
+           sizeof(output[0]) * order_denominator_);
+  } else {
+    // Odd case that the length of the input is shorter that filter order.
+    memmove(past_input_, &past_input_[num_input_samples],
+            order_numerator_ * sizeof(past_input_[0]));
+    memmove(past_output_, &past_output_[num_input_samples],
+            order_denominator_ * sizeof(past_output_[0]));
+  }
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/pole_zero_filter.h b/modules/audio_processing/vad/pole_zero_filter.h
new file mode 100644
index 0000000..283deec
--- /dev/null
+++ b/modules/audio_processing/vad/pole_zero_filter.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
+#define MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
+
+#include <cstddef>
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class PoleZeroFilter {
+ public:
+  ~PoleZeroFilter() {}
+
+  static PoleZeroFilter* Create(const float* numerator_coefficients,
+                                size_t order_numerator,
+                                const float* denominator_coefficients,
+                                size_t order_denominator);
+
+  int Filter(const int16_t* in, size_t num_input_samples, float* output);
+
+ private:
+  PoleZeroFilter(const float* numerator_coefficients,
+                 size_t order_numerator,
+                 const float* denominator_coefficients,
+                 size_t order_denominator);
+
+  static const int kMaxFilterOrder = 24;
+
+  int16_t past_input_[kMaxFilterOrder * 2];
+  float past_output_[kMaxFilterOrder * 2];
+
+  float numerator_coefficients_[kMaxFilterOrder + 1];
+  float denominator_coefficients_[kMaxFilterOrder + 1];
+
+  size_t order_numerator_;
+  size_t order_denominator_;
+  size_t highest_order_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
diff --git a/modules/audio_processing/vad/pole_zero_filter_unittest.cc b/modules/audio_processing/vad/pole_zero_filter_unittest.cc
new file mode 100644
index 0000000..38ee10f
--- /dev/null
+++ b/modules/audio_processing/vad/pole_zero_filter_unittest.cc
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/pole_zero_filter.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <memory>
+
+#include "modules/audio_processing/vad/vad_audio_proc_internal.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+static const int kInputSamples = 50;
+
+static const int16_t kInput[kInputSamples] = {
+    -2136,  -7116, 10715,  2464,   3164,   8139,   11393, 24013, -32117, -5544,
+    -27740, 10181, 14190,  -24055, -15912, 17393,  6359,  -9950, -13894, 32432,
+    -23944, 3437,  -8381,  19768,  3087,   -19795, -5920, 13310, 1407,   3876,
+    4059,   3524,  -23130, 19121,  -27900, -24840, 4089,  21422, -3625,  3015,
+    -11236, 28856, 13424,  6571,   -19761, -6361,  15821, -9469, 29727,  32229};
+
+static const float kReferenceOutput[kInputSamples] = {
+    -2082.230472f,  -6878.572941f,  10697.090871f,  2358.373952f,
+    2973.936512f,   7738.580650f,   10690.803213f,  22687.091576f,
+    -32676.684717f, -5879.621684f,  -27359.297432f, 10368.735888f,
+    13994.584604f,  -23676.126249f, -15078.250390f, 17818.253338f,
+    6577.743123f,   -9498.369315f,  -13073.651079f, 32460.026588f,
+    -23391.849347f, 3953.805667f,   -7667.761363f,  19995.153447f,
+    3185.575477f,   -19207.365160f, -5143.103201f,  13756.317237f,
+    1779.654794f,   4142.269755f,   4209.475034f,   3572.991789f,
+    -22509.089546f, 19307.878964f,  -27060.439759f, -23319.042810f,
+    5547.685267f,   22312.718676f,  -2707.309027f,  3852.358490f,
+    -10135.510093f, 29241.509970f,  13394.397233f,  6340.721417f,
+    -19510.207905f, -5908.442086f,  15882.301634f,  -9211.335255f,
+    29253.056735f,  30874.443046f};
+
+class PoleZeroFilterTest : public ::testing::Test {
+ protected:
+  PoleZeroFilterTest()
+      : my_filter_(PoleZeroFilter::Create(kCoeffNumerator,
+                                          kFilterOrder,
+                                          kCoeffDenominator,
+                                          kFilterOrder)) {}
+
+  ~PoleZeroFilterTest() override {}
+
+  void FilterSubframes(int num_subframes);
+
+ private:
+  void TestClean();
+  std::unique_ptr<PoleZeroFilter> my_filter_;
+};
+
+void PoleZeroFilterTest::FilterSubframes(int num_subframes) {
+  float output[kInputSamples];
+  const int num_subframe_samples = kInputSamples / num_subframes;
+  EXPECT_EQ(num_subframe_samples * num_subframes, kInputSamples);
+
+  for (int n = 0; n < num_subframes; n++) {
+    my_filter_->Filter(&kInput[n * num_subframe_samples], num_subframe_samples,
+                       &output[n * num_subframe_samples]);
+  }
+  for (int n = 0; n < kInputSamples; n++) {
+    EXPECT_NEAR(output[n], kReferenceOutput[n], 1);
+  }
+}
+
+TEST_F(PoleZeroFilterTest, OneSubframe) {
+  FilterSubframes(1);
+}
+
+TEST_F(PoleZeroFilterTest, TwoSubframes) {
+  FilterSubframes(2);
+}
+
+TEST_F(PoleZeroFilterTest, FiveSubframes) {
+  FilterSubframes(5);
+}
+
+TEST_F(PoleZeroFilterTest, TenSubframes) {
+  FilterSubframes(10);
+}
+
+TEST_F(PoleZeroFilterTest, TwentyFiveSubframes) {
+  FilterSubframes(25);
+}
+
+TEST_F(PoleZeroFilterTest, FiftySubframes) {
+  FilterSubframes(50);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/standalone_vad.cc b/modules/audio_processing/vad/standalone_vad.cc
new file mode 100644
index 0000000..004cefe
--- /dev/null
+++ b/modules/audio_processing/vad/standalone_vad.cc
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/standalone_vad.h"
+
+#include "audio/utility/audio_frame_operations.h"
+#include "modules/include/module_common_types.h"
+#include "rtc_base/checks.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+static const int kDefaultStandaloneVadMode = 3;
+
+StandaloneVad::StandaloneVad(VadInst* vad)
+    : vad_(vad), buffer_(), index_(0), mode_(kDefaultStandaloneVadMode) {
+}
+
+StandaloneVad::~StandaloneVad() {
+  WebRtcVad_Free(vad_);
+}
+
+StandaloneVad* StandaloneVad::Create() {
+  VadInst* vad = WebRtcVad_Create();
+  if (!vad)
+    return nullptr;
+
+  int err = WebRtcVad_Init(vad);
+  err |= WebRtcVad_set_mode(vad, kDefaultStandaloneVadMode);
+  if (err != 0) {
+    WebRtcVad_Free(vad);
+    return nullptr;
+  }
+  return new StandaloneVad(vad);
+}
+
+int StandaloneVad::AddAudio(const int16_t* data, size_t length) {
+  if (length != kLength10Ms)
+    return -1;
+
+  if (index_ + length > kLength10Ms * kMaxNum10msFrames)
+    // Reset the buffer if it's full.
+    // TODO(ajm): Instead, consider just processing every 10 ms frame. Then we
+    // can forgo the buffering.
+    index_ = 0;
+
+  memcpy(&buffer_[index_], data, sizeof(int16_t) * length);
+  index_ += length;
+  return 0;
+}
+
+int StandaloneVad::GetActivity(double* p, size_t length_p) {
+  if (index_ == 0)
+    return -1;
+
+  const size_t num_frames = index_ / kLength10Ms;
+  if (num_frames > length_p)
+    return -1;
+  RTC_DCHECK_EQ(0, WebRtcVad_ValidRateAndFrameLength(kSampleRateHz, index_));
+
+  int activity = WebRtcVad_Process(vad_, kSampleRateHz, buffer_, index_);
+  if (activity < 0)
+    return -1;
+  else if (activity == 0)
+    p[0] = 0.01;  // Arbitrary but small and non-zero.
+  else
+    p[0] = 0.5;  // 0.5 is neutral values when combinned by other probabilities.
+  for (size_t n = 1; n < num_frames; n++)
+    p[n] = p[0];
+  // Reset the buffer to start from the beginning.
+  index_ = 0;
+  return activity;
+}
+
+int StandaloneVad::set_mode(int mode) {
+  if (mode < 0 || mode > 3)
+    return -1;
+  if (WebRtcVad_set_mode(vad_, mode) != 0)
+    return -1;
+
+  mode_ = mode;
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/standalone_vad.h b/modules/audio_processing/vad/standalone_vad.h
new file mode 100644
index 0000000..b85de0a
--- /dev/null
+++ b/modules/audio_processing/vad/standalone_vad.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AGC_STANDALONE_VAD_H_
+#define MODULES_AUDIO_PROCESSING_AGC_STANDALONE_VAD_H_
+
+#include "modules/audio_processing/vad/common.h"
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+
+class StandaloneVad {
+ public:
+  static StandaloneVad* Create();
+  ~StandaloneVad();
+
+  // Outputs
+  //   p: a buffer where probabilities are written to.
+  //   length_p: number of elements of |p|.
+  //
+  // return value:
+  //    -1: if no audio is stored or VAD returns error.
+  //     0: in success.
+  // In case of error the content of |activity| is unchanged.
+  //
+  // Note that due to a high false-positive (VAD decision is active while the
+  // processed audio is just background noise) rate, stand-alone VAD is used as
+  // a one-sided indicator. The activity probability is 0.5 if the frame is
+  // classified as active, and the probability is 0.01 if the audio is
+  // classified as passive. In this way, when probabilities are combined, the
+  // effect of the stand-alone VAD is neutral if the input is classified as
+  // active.
+  int GetActivity(double* p, size_t length_p);
+
+  // Expecting 10 ms of 16 kHz audio to be pushed in.
+  int AddAudio(const int16_t* data, size_t length);
+
+  // Set aggressiveness of VAD, 0 is the least aggressive and 3 is the most
+  // aggressive mode. Returns -1 if the input is less than 0 or larger than 3,
+  // otherwise 0 is returned.
+  int set_mode(int mode);
+  // Get the agressiveness of the current VAD.
+  int mode() const { return mode_; }
+
+ private:
+  explicit StandaloneVad(VadInst* vad);
+
+  static const size_t kMaxNum10msFrames = 3;
+
+  // TODO(turajs): Is there a way to use scoped-pointer here?
+  VadInst* vad_;
+  int16_t buffer_[kMaxNum10msFrames * kLength10Ms];
+  size_t index_;
+  int mode_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AGC_STANDALONE_VAD_H_
diff --git a/modules/audio_processing/vad/standalone_vad_unittest.cc b/modules/audio_processing/vad/standalone_vad_unittest.cc
new file mode 100644
index 0000000..28d1349
--- /dev/null
+++ b/modules/audio_processing/vad/standalone_vad_unittest.cc
@@ -0,0 +1,108 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/standalone_vad.h"
+
+#include <string.h>
+
+#include <memory>
+
+#include "modules/include/module_common_types.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TEST(StandaloneVadTest, Api) {
+  std::unique_ptr<StandaloneVad> vad(StandaloneVad::Create());
+  int16_t data[kLength10Ms] = {0};
+
+  // Valid frame length (for 32 kHz rate), but not what the VAD is expecting.
+  EXPECT_EQ(-1, vad->AddAudio(data, 320));
+
+  const size_t kMaxNumFrames = 3;
+  double p[kMaxNumFrames];
+  for (size_t n = 0; n < kMaxNumFrames; n++)
+    EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
+
+  // Pretend |p| is shorter that it should be.
+  EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames - 1));
+
+  EXPECT_EQ(0, vad->GetActivity(p, kMaxNumFrames));
+
+  // Ask for activity when buffer is empty.
+  EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames));
+
+  // Should reset and result in one buffer.
+  for (size_t n = 0; n < kMaxNumFrames + 1; n++)
+    EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
+  EXPECT_EQ(0, vad->GetActivity(p, 1));
+
+  // Wrong modes
+  EXPECT_EQ(-1, vad->set_mode(-1));
+  EXPECT_EQ(-1, vad->set_mode(4));
+
+  // Valid mode.
+  const int kMode = 2;
+  EXPECT_EQ(0, vad->set_mode(kMode));
+  EXPECT_EQ(kMode, vad->mode());
+}
+
+#if defined(WEBRTC_IOS)
+TEST(StandaloneVadTest, DISABLED_ActivityDetection) {
+#else
+TEST(StandaloneVadTest, ActivityDetection) {
+#endif
+  std::unique_ptr<StandaloneVad> vad(StandaloneVad::Create());
+  const size_t kDataLength = kLength10Ms;
+  int16_t data[kDataLength] = {0};
+
+  FILE* pcm_file =
+      fopen(test::ResourcePath("audio_processing/agc/agc_audio", "pcm").c_str(),
+            "rb");
+  ASSERT_TRUE(pcm_file != NULL);
+
+  FILE* reference_file = fopen(
+      test::ResourcePath("audio_processing/agc/agc_vad", "dat").c_str(), "rb");
+  ASSERT_TRUE(reference_file != NULL);
+
+  // Reference activities are prepared with 0 aggressiveness.
+  ASSERT_EQ(0, vad->set_mode(0));
+
+  // Stand-alone VAD can operate on 1, 2 or 3 frames of length 10 ms. The
+  // reference file is created for 30 ms frame.
+  const int kNumVadFramesToProcess = 3;
+  int num_frames = 0;
+  while (fread(data, sizeof(int16_t), kDataLength, pcm_file) == kDataLength) {
+    vad->AddAudio(data, kDataLength);
+    num_frames++;
+    if (num_frames == kNumVadFramesToProcess) {
+      num_frames = 0;
+      int referece_activity;
+      double p[kNumVadFramesToProcess];
+      EXPECT_EQ(1u, fread(&referece_activity, sizeof(referece_activity), 1,
+                          reference_file));
+      int activity = vad->GetActivity(p, kNumVadFramesToProcess);
+      EXPECT_EQ(referece_activity, activity);
+      if (activity != 0) {
+        // When active, probabilities are set to 0.5.
+        for (int n = 0; n < kNumVadFramesToProcess; n++)
+          EXPECT_EQ(0.5, p[n]);
+      } else {
+        // When inactive, probabilities are set to 0.01.
+        for (int n = 0; n < kNumVadFramesToProcess; n++)
+          EXPECT_EQ(0.01, p[n]);
+      }
+    }
+  }
+  fclose(reference_file);
+  fclose(pcm_file);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/vad_audio_proc.cc b/modules/audio_processing/vad/vad_audio_proc.cc
new file mode 100644
index 0000000..b1841d0
--- /dev/null
+++ b/modules/audio_processing/vad/vad_audio_proc.cc
@@ -0,0 +1,276 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/vad_audio_proc.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include "common_audio/fft4g.h"
+#include "modules/audio_processing/vad/pitch_internal.h"
+#include "modules/audio_processing/vad/pole_zero_filter.h"
+#include "modules/audio_processing/vad/vad_audio_proc_internal.h"
+#include "rtc_base/checks.h"
+extern "C" {
+#include "modules/audio_coding/codecs/isac/main/source/codec.h"
+#include "modules/audio_coding/codecs/isac/main/source/lpc_analysis.h"
+#include "modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
+#include "modules/audio_coding/codecs/isac/main/source/structs.h"
+}
+#include "modules/include/module_common_types.h"
+
+namespace webrtc {
+
+// The following structures are declared anonymous in iSAC's structs.h. To
+// forward declare them, we use this derived class trick.
+struct VadAudioProc::PitchAnalysisStruct : public ::PitchAnalysisStruct {};
+struct VadAudioProc::PreFiltBankstr : public ::PreFiltBankstr {};
+
+static constexpr float kFrequencyResolution =
+    kSampleRateHz / static_cast<float>(VadAudioProc::kDftSize);
+static constexpr int kSilenceRms = 5;
+
+// TODO(turajs): Make a Create or Init for VadAudioProc.
+VadAudioProc::VadAudioProc()
+    : audio_buffer_(),
+      num_buffer_samples_(kNumPastSignalSamples),
+      log_old_gain_(-2),
+      old_lag_(50),  // Arbitrary but valid as pitch-lag (in samples).
+      pitch_analysis_handle_(new PitchAnalysisStruct),
+      pre_filter_handle_(new PreFiltBankstr),
+      high_pass_filter_(PoleZeroFilter::Create(kCoeffNumerator,
+                                               kFilterOrder,
+                                               kCoeffDenominator,
+                                               kFilterOrder)) {
+  static_assert(kNumPastSignalSamples + kNumSubframeSamples ==
+                    sizeof(kLpcAnalWin) / sizeof(kLpcAnalWin[0]),
+                "lpc analysis window incorrect size");
+  static_assert(kLpcOrder + 1 == sizeof(kCorrWeight) / sizeof(kCorrWeight[0]),
+                "correlation weight incorrect size");
+
+  // TODO(turajs): Are we doing too much in the constructor?
+  float data[kDftSize];
+  // Make FFT to initialize.
+  ip_[0] = 0;
+  WebRtc_rdft(kDftSize, 1, data, ip_, w_fft_);
+  // TODO(turajs): Need to initialize high-pass filter.
+
+  // Initialize iSAC components.
+  WebRtcIsac_InitPreFilterbank(pre_filter_handle_.get());
+  WebRtcIsac_InitPitchAnalysis(pitch_analysis_handle_.get());
+}
+
+VadAudioProc::~VadAudioProc() {
+}
+
+void VadAudioProc::ResetBuffer() {
+  memcpy(audio_buffer_, &audio_buffer_[kNumSamplesToProcess],
+         sizeof(audio_buffer_[0]) * kNumPastSignalSamples);
+  num_buffer_samples_ = kNumPastSignalSamples;
+}
+
+int VadAudioProc::ExtractFeatures(const int16_t* frame,
+                                  size_t length,
+                                  AudioFeatures* features) {
+  features->num_frames = 0;
+  if (length != kNumSubframeSamples) {
+    return -1;
+  }
+
+  // High-pass filter to remove the DC component and very low frequency content.
+  // We have experienced that this high-pass filtering improves voice/non-voiced
+  // classification.
+  if (high_pass_filter_->Filter(frame, kNumSubframeSamples,
+                                &audio_buffer_[num_buffer_samples_]) != 0) {
+    return -1;
+  }
+
+  num_buffer_samples_ += kNumSubframeSamples;
+  if (num_buffer_samples_ < kBufferLength) {
+    return 0;
+  }
+  RTC_DCHECK_EQ(num_buffer_samples_, kBufferLength);
+  features->num_frames = kNum10msSubframes;
+  features->silence = false;
+
+  Rms(features->rms, kMaxNumFrames);
+  for (size_t i = 0; i < kNum10msSubframes; ++i) {
+    if (features->rms[i] < kSilenceRms) {
+      // PitchAnalysis can cause NaNs in the pitch gain if it's fed silence.
+      // Bail out here instead.
+      features->silence = true;
+      ResetBuffer();
+      return 0;
+    }
+  }
+
+  PitchAnalysis(features->log_pitch_gain, features->pitch_lag_hz,
+                kMaxNumFrames);
+  FindFirstSpectralPeaks(features->spectral_peak, kMaxNumFrames);
+  ResetBuffer();
+  return 0;
+}
+
+// Computes |kLpcOrder + 1| correlation coefficients.
+void VadAudioProc::SubframeCorrelation(double* corr,
+                                       size_t length_corr,
+                                       size_t subframe_index) {
+  RTC_DCHECK_GE(length_corr, kLpcOrder + 1);
+  double windowed_audio[kNumSubframeSamples + kNumPastSignalSamples];
+  size_t buffer_index = subframe_index * kNumSubframeSamples;
+
+  for (size_t n = 0; n < kNumSubframeSamples + kNumPastSignalSamples; n++)
+    windowed_audio[n] = audio_buffer_[buffer_index++] * kLpcAnalWin[n];
+
+  WebRtcIsac_AutoCorr(corr, windowed_audio,
+                      kNumSubframeSamples + kNumPastSignalSamples, kLpcOrder);
+}
+
+// Compute |kNum10msSubframes| sets of LPC coefficients, one per 10 ms input.
+// The analysis window is 15 ms long and it is centered on the first half of
+// each 10ms sub-frame. This is equivalent to computing LPC coefficients for the
+// first half of each 10 ms subframe.
+void VadAudioProc::GetLpcPolynomials(double* lpc, size_t length_lpc) {
+  RTC_DCHECK_GE(length_lpc, kNum10msSubframes * (kLpcOrder + 1));
+  double corr[kLpcOrder + 1];
+  double reflec_coeff[kLpcOrder];
+  for (size_t i = 0, offset_lpc = 0; i < kNum10msSubframes;
+       i++, offset_lpc += kLpcOrder + 1) {
+    SubframeCorrelation(corr, kLpcOrder + 1, i);
+    corr[0] *= 1.0001;
+    // This makes Lev-Durb a bit more stable.
+    for (size_t k = 0; k < kLpcOrder + 1; k++) {
+      corr[k] *= kCorrWeight[k];
+    }
+    WebRtcIsac_LevDurb(&lpc[offset_lpc], reflec_coeff, corr, kLpcOrder);
+  }
+}
+
+// Fit a second order curve to these 3 points and find the location of the
+// extremum. The points are inverted before curve fitting.
+static float QuadraticInterpolation(float prev_val,
+                                    float curr_val,
+                                    float next_val) {
+  // Doing the interpolation in |1 / A(z)|^2.
+  float fractional_index = 0;
+  next_val = 1.0f / next_val;
+  prev_val = 1.0f / prev_val;
+  curr_val = 1.0f / curr_val;
+
+  fractional_index =
+      -(next_val - prev_val) * 0.5f / (next_val + prev_val - 2.f * curr_val);
+  RTC_DCHECK_LT(fabs(fractional_index), 1);
+  return fractional_index;
+}
+
+// 1 / A(z), where A(z) is defined by |lpc| is a model of the spectral envelope
+// of the input signal. The local maximum of the spectral envelope corresponds
+// with the local minimum of A(z). It saves complexity, as we save one
+// inversion. Furthermore, we find the first local maximum of magnitude squared,
+// to save on one square root.
+void VadAudioProc::FindFirstSpectralPeaks(double* f_peak,
+                                          size_t length_f_peak) {
+  RTC_DCHECK_GE(length_f_peak, kNum10msSubframes);
+  double lpc[kNum10msSubframes * (kLpcOrder + 1)];
+  // For all sub-frames.
+  GetLpcPolynomials(lpc, kNum10msSubframes * (kLpcOrder + 1));
+
+  const size_t kNumDftCoefficients = kDftSize / 2 + 1;
+  float data[kDftSize];
+
+  for (size_t i = 0; i < kNum10msSubframes; i++) {
+    // Convert to float with zero pad.
+    memset(data, 0, sizeof(data));
+    for (size_t n = 0; n < kLpcOrder + 1; n++) {
+      data[n] = static_cast<float>(lpc[i * (kLpcOrder + 1) + n]);
+    }
+    // Transform to frequency domain.
+    WebRtc_rdft(kDftSize, 1, data, ip_, w_fft_);
+
+    size_t index_peak = 0;
+    float prev_magn_sqr = data[0] * data[0];
+    float curr_magn_sqr = data[2] * data[2] + data[3] * data[3];
+    float next_magn_sqr;
+    bool found_peak = false;
+    for (size_t n = 2; n < kNumDftCoefficients - 1; n++) {
+      next_magn_sqr =
+          data[2 * n] * data[2 * n] + data[2 * n + 1] * data[2 * n + 1];
+      if (curr_magn_sqr < prev_magn_sqr && curr_magn_sqr < next_magn_sqr) {
+        found_peak = true;
+        index_peak = n - 1;
+        break;
+      }
+      prev_magn_sqr = curr_magn_sqr;
+      curr_magn_sqr = next_magn_sqr;
+    }
+    float fractional_index = 0;
+    if (!found_peak) {
+      // Checking if |kNumDftCoefficients - 1| is the local minimum.
+      next_magn_sqr = data[1] * data[1];
+      if (curr_magn_sqr < prev_magn_sqr && curr_magn_sqr < next_magn_sqr) {
+        index_peak = kNumDftCoefficients - 1;
+      }
+    } else {
+      // A peak is found, do a simple quadratic interpolation to get a more
+      // accurate estimate of the peak location.
+      fractional_index =
+          QuadraticInterpolation(prev_magn_sqr, curr_magn_sqr, next_magn_sqr);
+    }
+    f_peak[i] = (index_peak + fractional_index) * kFrequencyResolution;
+  }
+}
+
+// Using iSAC functions to estimate pitch gains & lags.
+void VadAudioProc::PitchAnalysis(double* log_pitch_gains,
+                                 double* pitch_lags_hz,
+                                 size_t length) {
+  // TODO(turajs): This can be "imported" from iSAC & and the next two
+  // constants.
+  RTC_DCHECK_GE(length, kNum10msSubframes);
+  const int kNumPitchSubframes = 4;
+  double gains[kNumPitchSubframes];
+  double lags[kNumPitchSubframes];
+
+  const int kNumSubbandFrameSamples = 240;
+  const int kNumLookaheadSamples = 24;
+
+  float lower[kNumSubbandFrameSamples];
+  float upper[kNumSubbandFrameSamples];
+  double lower_lookahead[kNumSubbandFrameSamples];
+  double upper_lookahead[kNumSubbandFrameSamples];
+  double lower_lookahead_pre_filter[kNumSubbandFrameSamples +
+                                    kNumLookaheadSamples];
+
+  // Split signal to lower and upper bands
+  WebRtcIsac_SplitAndFilterFloat(&audio_buffer_[kNumPastSignalSamples], lower,
+                                 upper, lower_lookahead, upper_lookahead,
+                                 pre_filter_handle_.get());
+  WebRtcIsac_PitchAnalysis(lower_lookahead, lower_lookahead_pre_filter,
+                           pitch_analysis_handle_.get(), lags, gains);
+
+  // Lags are computed on lower-band signal with sampling rate half of the
+  // input signal.
+  GetSubframesPitchParameters(
+      kSampleRateHz / 2, gains, lags, kNumPitchSubframes, kNum10msSubframes,
+      &log_old_gain_, &old_lag_, log_pitch_gains, pitch_lags_hz);
+}
+
+void VadAudioProc::Rms(double* rms, size_t length_rms) {
+  RTC_DCHECK_GE(length_rms, kNum10msSubframes);
+  size_t offset = kNumPastSignalSamples;
+  for (size_t i = 0; i < kNum10msSubframes; i++) {
+    rms[i] = 0;
+    for (size_t n = 0; n < kNumSubframeSamples; n++, offset++)
+      rms[i] += audio_buffer_[offset] * audio_buffer_[offset];
+    rms[i] = sqrt(rms[i] / kNumSubframeSamples);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/vad_audio_proc.h b/modules/audio_processing/vad/vad_audio_proc.h
new file mode 100644
index 0000000..b1441a0
--- /dev/null
+++ b/modules/audio_processing/vad/vad_audio_proc.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_H_
+
+#include <memory>
+
+#include "modules/audio_processing/vad/common.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class AudioFrame;
+class PoleZeroFilter;
+
+class VadAudioProc {
+ public:
+  // Forward declare iSAC structs.
+  struct PitchAnalysisStruct;
+  struct PreFiltBankstr;
+
+  VadAudioProc();
+  ~VadAudioProc();
+
+  int ExtractFeatures(const int16_t* audio_frame,
+                      size_t length,
+                      AudioFeatures* audio_features);
+
+  static const size_t kDftSize = 512;
+
+ private:
+  void PitchAnalysis(double* pitch_gains, double* pitch_lags_hz, size_t length);
+  void SubframeCorrelation(double* corr,
+                           size_t length_corr,
+                           size_t subframe_index);
+  void GetLpcPolynomials(double* lpc, size_t length_lpc);
+  void FindFirstSpectralPeaks(double* f_peak, size_t length_f_peak);
+  void Rms(double* rms, size_t length_rms);
+  void ResetBuffer();
+
+  // To compute spectral peak we perform LPC analysis to get spectral envelope.
+  // For every 30 ms we compute 3 spectral peak there for 3 LPC analysis.
+  // LPC is computed over 15 ms of windowed audio. For every 10 ms sub-frame
+  // we need 5 ms of past signal to create the input of LPC analysis.
+  enum : size_t {
+    kNumPastSignalSamples = static_cast<size_t>(kSampleRateHz / 200)
+  };
+
+  // TODO(turajs): maybe defining this at a higher level (maybe enum) so that
+  // all the code recognize it as "no-error."
+  enum : int { kNoError = 0 };
+
+  enum : size_t { kNum10msSubframes = 3 };
+  enum : size_t {
+    kNumSubframeSamples = static_cast<size_t>(kSampleRateHz / 100)
+  };
+  enum : size_t {
+    // Samples in 30 ms @ given sampling rate.
+    kNumSamplesToProcess = kNum10msSubframes * kNumSubframeSamples
+  };
+  enum : size_t {
+    kBufferLength = kNumPastSignalSamples + kNumSamplesToProcess
+  };
+  enum : size_t { kIpLength = kDftSize >> 1 };
+  enum : size_t { kWLength = kDftSize >> 1 };
+  enum : size_t { kLpcOrder = 16 };
+
+  size_t ip_[kIpLength];
+  float w_fft_[kWLength];
+
+  // A buffer of 5 ms (past audio) + 30 ms (one iSAC frame ).
+  float audio_buffer_[kBufferLength];
+  size_t num_buffer_samples_;
+
+  double log_old_gain_;
+  double old_lag_;
+
+  std::unique_ptr<PitchAnalysisStruct> pitch_analysis_handle_;
+  std::unique_ptr<PreFiltBankstr> pre_filter_handle_;
+  std::unique_ptr<PoleZeroFilter> high_pass_filter_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_H_
diff --git a/modules/audio_processing/vad/vad_audio_proc_internal.h b/modules/audio_processing/vad/vad_audio_proc_internal.h
new file mode 100644
index 0000000..ab1e636
--- /dev/null
+++ b/modules/audio_processing/vad/vad_audio_proc_internal.h
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_INTERNAL_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROC_INTERNAL_H_
+
+namespace webrtc {
+
+// These values should match MATLAB counterparts for unit-tests to pass.
+static const double kCorrWeight[] = {1.000000,
+                                     0.985000,
+                                     0.970225,
+                                     0.955672,
+                                     0.941337,
+                                     0.927217,
+                                     0.913308,
+                                     0.899609,
+                                     0.886115,
+                                     0.872823,
+                                     0.859730,
+                                     0.846834,
+                                     0.834132,
+                                     0.821620,
+                                     0.809296,
+                                     0.797156,
+                                     0.785199};
+
+static const double kLpcAnalWin[] = {
+    0.00000000, 0.01314436, 0.02628645, 0.03942400, 0.05255473, 0.06567639,
+    0.07878670, 0.09188339, 0.10496421, 0.11802689, 0.13106918, 0.14408883,
+    0.15708358, 0.17005118, 0.18298941, 0.19589602, 0.20876878, 0.22160547,
+    0.23440387, 0.24716177, 0.25987696, 0.27254725, 0.28517045, 0.29774438,
+    0.31026687, 0.32273574, 0.33514885, 0.34750406, 0.35979922, 0.37203222,
+    0.38420093, 0.39630327, 0.40833713, 0.42030043, 0.43219112, 0.44400713,
+    0.45574642, 0.46740697, 0.47898676, 0.49048379, 0.50189608, 0.51322164,
+    0.52445853, 0.53560481, 0.54665854, 0.55761782, 0.56848075, 0.57924546,
+    0.58991008, 0.60047278, 0.61093173, 0.62128512, 0.63153117, 0.64166810,
+    0.65169416, 0.66160761, 0.67140676, 0.68108990, 0.69065536, 0.70010148,
+    0.70942664, 0.71862923, 0.72770765, 0.73666033, 0.74548573, 0.75418233,
+    0.76274862, 0.77118312, 0.77948437, 0.78765094, 0.79568142, 0.80357442,
+    0.81132858, 0.81894256, 0.82641504, 0.83374472, 0.84093036, 0.84797069,
+    0.85486451, 0.86161063, 0.86820787, 0.87465511, 0.88095122, 0.88709512,
+    0.89308574, 0.89892206, 0.90460306, 0.91012776, 0.91549520, 0.92070447,
+    0.92575465, 0.93064488, 0.93537432, 0.93994213, 0.94434755, 0.94858979,
+    0.95266814, 0.95658189, 0.96033035, 0.96391289, 0.96732888, 0.97057773,
+    0.97365889, 0.97657181, 0.97931600, 0.98189099, 0.98429632, 0.98653158,
+    0.98859639, 0.99049038, 0.99221324, 0.99376466, 0.99514438, 0.99635215,
+    0.99738778, 0.99825107, 0.99894188, 0.99946010, 0.99980562, 0.99997840,
+    0.99997840, 0.99980562, 0.99946010, 0.99894188, 0.99825107, 0.99738778,
+    0.99635215, 0.99514438, 0.99376466, 0.99221324, 0.99049038, 0.98859639,
+    0.98653158, 0.98429632, 0.98189099, 0.97931600, 0.97657181, 0.97365889,
+    0.97057773, 0.96732888, 0.96391289, 0.96033035, 0.95658189, 0.95266814,
+    0.94858979, 0.94434755, 0.93994213, 0.93537432, 0.93064488, 0.92575465,
+    0.92070447, 0.91549520, 0.91012776, 0.90460306, 0.89892206, 0.89308574,
+    0.88709512, 0.88095122, 0.87465511, 0.86820787, 0.86161063, 0.85486451,
+    0.84797069, 0.84093036, 0.83374472, 0.82641504, 0.81894256, 0.81132858,
+    0.80357442, 0.79568142, 0.78765094, 0.77948437, 0.77118312, 0.76274862,
+    0.75418233, 0.74548573, 0.73666033, 0.72770765, 0.71862923, 0.70942664,
+    0.70010148, 0.69065536, 0.68108990, 0.67140676, 0.66160761, 0.65169416,
+    0.64166810, 0.63153117, 0.62128512, 0.61093173, 0.60047278, 0.58991008,
+    0.57924546, 0.56848075, 0.55761782, 0.54665854, 0.53560481, 0.52445853,
+    0.51322164, 0.50189608, 0.49048379, 0.47898676, 0.46740697, 0.45574642,
+    0.44400713, 0.43219112, 0.42030043, 0.40833713, 0.39630327, 0.38420093,
+    0.37203222, 0.35979922, 0.34750406, 0.33514885, 0.32273574, 0.31026687,
+    0.29774438, 0.28517045, 0.27254725, 0.25987696, 0.24716177, 0.23440387,
+    0.22160547, 0.20876878, 0.19589602, 0.18298941, 0.17005118, 0.15708358,
+    0.14408883, 0.13106918, 0.11802689, 0.10496421, 0.09188339, 0.07878670,
+    0.06567639, 0.05255473, 0.03942400, 0.02628645, 0.01314436, 0.00000000};
+
+static const size_t kFilterOrder = 2;
+static const float kCoeffNumerator[kFilterOrder + 1] = {0.974827f,
+                                                        -1.949650f,
+                                                        0.974827f};
+static const float kCoeffDenominator[kFilterOrder + 1] = {1.0f,
+                                                          -1.971999f,
+                                                          0.972457f};
+
+static_assert(kFilterOrder + 1 ==
+                  sizeof(kCoeffNumerator) / sizeof(kCoeffNumerator[0]),
+              "numerator coefficients incorrect size");
+static_assert(kFilterOrder + 1 ==
+                  sizeof(kCoeffDenominator) / sizeof(kCoeffDenominator[0]),
+              "denominator coefficients incorrect size");
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_VAD_VAD_AUDIO_PROCESSING_H_
diff --git a/modules/audio_processing/vad/vad_audio_proc_unittest.cc b/modules/audio_processing/vad/vad_audio_proc_unittest.cc
new file mode 100644
index 0000000..c520257
--- /dev/null
+++ b/modules/audio_processing/vad/vad_audio_proc_unittest.cc
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// We don't test the value of pitch gain and lags as they are created by iSAC
+// routines. However, interpolation of pitch-gain and lags is in a separate
+// class and has its own unit-test.
+
+#include "modules/audio_processing/vad/vad_audio_proc.h"
+
+#include <math.h>
+#include <stdio.h>
+
+#include <string>
+
+#include "modules/audio_processing/vad/common.h"
+#include "modules/include/module_common_types.h"
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TEST(AudioProcessingTest, DISABLED_ComputingFirstSpectralPeak) {
+  VadAudioProc audioproc;
+
+  std::string peak_file_name =
+      test::ResourcePath("audio_processing/agc/agc_spectral_peak", "dat");
+  FILE* peak_file = fopen(peak_file_name.c_str(), "rb");
+  ASSERT_TRUE(peak_file != NULL);
+
+  std::string pcm_file_name =
+      test::ResourcePath("audio_processing/agc/agc_audio", "pcm");
+  FILE* pcm_file = fopen(pcm_file_name.c_str(), "rb");
+  ASSERT_TRUE(pcm_file != NULL);
+
+  // Read 10 ms audio in each iteration.
+  const size_t kDataLength = kLength10Ms;
+  int16_t data[kDataLength] = {0};
+  AudioFeatures features;
+  double sp[kMaxNumFrames];
+  while (fread(data, sizeof(int16_t), kDataLength, pcm_file) == kDataLength) {
+    audioproc.ExtractFeatures(data, kDataLength, &features);
+    if (features.num_frames > 0) {
+      ASSERT_LT(features.num_frames, kMaxNumFrames);
+      // Read reference values.
+      const size_t num_frames = features.num_frames;
+      ASSERT_EQ(num_frames, fread(sp, sizeof(sp[0]), num_frames, peak_file));
+      for (size_t n = 0; n < features.num_frames; n++)
+        EXPECT_NEAR(features.spectral_peak[n], sp[n], 3);
+    }
+  }
+
+  fclose(peak_file);
+  fclose(pcm_file);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/vad_circular_buffer.cc b/modules/audio_processing/vad/vad_circular_buffer.cc
new file mode 100644
index 0000000..3c4d5ad
--- /dev/null
+++ b/modules/audio_processing/vad/vad_circular_buffer.cc
@@ -0,0 +1,137 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/vad_circular_buffer.h"
+
+#include <stdlib.h>
+
+namespace webrtc {
+
+VadCircularBuffer::VadCircularBuffer(int buffer_size)
+    : buffer_(new double[buffer_size]),
+      is_full_(false),
+      index_(0),
+      buffer_size_(buffer_size),
+      sum_(0) {
+}
+
+VadCircularBuffer::~VadCircularBuffer() {
+}
+
+void VadCircularBuffer::Reset() {
+  is_full_ = false;
+  index_ = 0;
+  sum_ = 0;
+}
+
+VadCircularBuffer* VadCircularBuffer::Create(int buffer_size) {
+  if (buffer_size <= 0)
+    return NULL;
+  return new VadCircularBuffer(buffer_size);
+}
+
+double VadCircularBuffer::Oldest() const {
+  if (!is_full_)
+    return buffer_[0];
+  else
+    return buffer_[index_];
+}
+
+double VadCircularBuffer::Mean() {
+  double m;
+  if (is_full_) {
+    m = sum_ / buffer_size_;
+  } else {
+    if (index_ > 0)
+      m = sum_ / index_;
+    else
+      m = 0;
+  }
+  return m;
+}
+
+void VadCircularBuffer::Insert(double value) {
+  if (is_full_) {
+    sum_ -= buffer_[index_];
+  }
+  sum_ += value;
+  buffer_[index_] = value;
+  index_++;
+  if (index_ >= buffer_size_) {
+    is_full_ = true;
+    index_ = 0;
+  }
+}
+int VadCircularBuffer::BufferLevel() {
+  if (is_full_)
+    return buffer_size_;
+  return index_;
+}
+
+int VadCircularBuffer::Get(int index, double* value) const {
+  int err = ConvertToLinearIndex(&index);
+  if (err < 0)
+    return -1;
+  *value = buffer_[index];
+  return 0;
+}
+
+int VadCircularBuffer::Set(int index, double value) {
+  int err = ConvertToLinearIndex(&index);
+  if (err < 0)
+    return -1;
+
+  sum_ -= buffer_[index];
+  buffer_[index] = value;
+  sum_ += value;
+  return 0;
+}
+
+int VadCircularBuffer::ConvertToLinearIndex(int* index) const {
+  if (*index < 0 || *index >= buffer_size_)
+    return -1;
+
+  if (!is_full_ && *index >= index_)
+    return -1;
+
+  *index = index_ - 1 - *index;
+  if (*index < 0)
+    *index += buffer_size_;
+  return 0;
+}
+
+int VadCircularBuffer::RemoveTransient(int width_threshold,
+                                       double val_threshold) {
+  if (!is_full_ && index_ < width_threshold + 2)
+    return 0;
+
+  int index_1 = 0;
+  int index_2 = width_threshold + 1;
+  double v = 0;
+  if (Get(index_1, &v) < 0)
+    return -1;
+  if (v < val_threshold) {
+    Set(index_1, 0);
+    int index;
+    for (index = index_2; index > index_1; index--) {
+      if (Get(index, &v) < 0)
+        return -1;
+      if (v < val_threshold)
+        break;
+    }
+    for (; index > index_1; index--) {
+      if (Set(index, 0.0) < 0)
+        return -1;
+    }
+  }
+  return 0;
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/vad_circular_buffer.h b/modules/audio_processing/vad/vad_circular_buffer.h
new file mode 100644
index 0000000..46b03d4
--- /dev/null
+++ b/modules/audio_processing/vad/vad_circular_buffer.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VAD_CIRCULAR_BUFFER_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VAD_CIRCULAR_BUFFER_H_
+
+#include <memory>
+
+namespace webrtc {
+
+// A circular buffer tailored to the need of this project. It stores last
+// K samples of the input, and keeps track of the mean of the last samples.
+//
+// It is used in class "PitchBasedActivity" to keep track of posterior
+// probabilities in the past few seconds. The posterior probabilities are used
+// to recursively update prior probabilities.
+class VadCircularBuffer {
+ public:
+  static VadCircularBuffer* Create(int buffer_size);
+  ~VadCircularBuffer();
+
+  // If buffer is wrapped around.
+  bool is_full() const { return is_full_; }
+  // Get the oldest entry in the buffer.
+  double Oldest() const;
+  // Insert new value into the buffer.
+  void Insert(double value);
+  // Reset buffer, forget the past, start fresh.
+  void Reset();
+
+  // The mean value of the elements in the buffer. The return value is zero if
+  // buffer is empty, i.e. no value is inserted.
+  double Mean();
+  // Remove transients. If the values exceed |val_threshold| for a period
+  // shorter then or equal to |width_threshold|, then that period is considered
+  // transient and set to zero.
+  int RemoveTransient(int width_threshold, double val_threshold);
+
+ private:
+  explicit VadCircularBuffer(int buffer_size);
+  // Get previous values. |index = 0| corresponds to the most recent
+  // insertion. |index = 1| is the one before the most recent insertion, and
+  // so on.
+  int Get(int index, double* value) const;
+  // Set a given position to |value|. |index| is interpreted as above.
+  int Set(int index, double value);
+  // Return the number of valid elements in the buffer.
+  int BufferLevel();
+
+  // Convert an index with the interpretation as get() method to the
+  // corresponding linear index.
+  int ConvertToLinearIndex(int* index) const;
+
+  std::unique_ptr<double[]> buffer_;
+  bool is_full_;
+  int index_;
+  int buffer_size_;
+  double sum_;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_AUDIO_PROCESSING_VAD_VAD_CIRCULAR_BUFFER_H_
diff --git a/modules/audio_processing/vad/vad_circular_buffer_unittest.cc b/modules/audio_processing/vad/vad_circular_buffer_unittest.cc
new file mode 100644
index 0000000..efbd70d
--- /dev/null
+++ b/modules/audio_processing/vad/vad_circular_buffer_unittest.cc
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/vad_circular_buffer.h"
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+static const int kWidthThreshold = 7;
+static const double kValThreshold = 1.0;
+static const int kLongBuffSize = 100;
+static const int kShortBuffSize = 10;
+
+static void InsertSequentially(int k, VadCircularBuffer* circular_buffer) {
+  double mean_val;
+  for (int n = 1; n <= k; n++) {
+    EXPECT_TRUE(!circular_buffer->is_full());
+    circular_buffer->Insert(n);
+    mean_val = circular_buffer->Mean();
+    EXPECT_EQ((n + 1.0) / 2., mean_val);
+  }
+}
+
+static void Insert(double value,
+                   int num_insertion,
+                   VadCircularBuffer* circular_buffer) {
+  for (int n = 0; n < num_insertion; n++)
+    circular_buffer->Insert(value);
+}
+
+static void InsertZeros(int num_zeros, VadCircularBuffer* circular_buffer) {
+  Insert(0.0, num_zeros, circular_buffer);
+}
+
+TEST(VadCircularBufferTest, GeneralTest) {
+  std::unique_ptr<VadCircularBuffer> circular_buffer(
+      VadCircularBuffer::Create(kShortBuffSize));
+  double mean_val;
+
+  // Mean should return zero if nothing is inserted.
+  mean_val = circular_buffer->Mean();
+  EXPECT_DOUBLE_EQ(0.0, mean_val);
+  InsertSequentially(kShortBuffSize, circular_buffer.get());
+
+  // Should be full.
+  EXPECT_TRUE(circular_buffer->is_full());
+  // Correct update after being full.
+  for (int n = 1; n < kShortBuffSize; n++) {
+    circular_buffer->Insert(n);
+    mean_val = circular_buffer->Mean();
+    EXPECT_DOUBLE_EQ((kShortBuffSize + 1.) / 2., mean_val);
+    EXPECT_TRUE(circular_buffer->is_full());
+  }
+
+  // Check reset. This should be like starting fresh.
+  circular_buffer->Reset();
+  mean_val = circular_buffer->Mean();
+  EXPECT_DOUBLE_EQ(0, mean_val);
+  InsertSequentially(kShortBuffSize, circular_buffer.get());
+  EXPECT_TRUE(circular_buffer->is_full());
+}
+
+TEST(VadCircularBufferTest, TransientsRemoval) {
+  std::unique_ptr<VadCircularBuffer> circular_buffer(
+      VadCircularBuffer::Create(kLongBuffSize));
+  // Let the first transient be in wrap-around.
+  InsertZeros(kLongBuffSize - kWidthThreshold / 2, circular_buffer.get());
+
+  double push_val = kValThreshold;
+  double mean_val;
+  for (int k = kWidthThreshold; k >= 1; k--) {
+    Insert(push_val, k, circular_buffer.get());
+    circular_buffer->Insert(0);
+    mean_val = circular_buffer->Mean();
+    EXPECT_DOUBLE_EQ(k * push_val / kLongBuffSize, mean_val);
+    circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold);
+    mean_val = circular_buffer->Mean();
+    EXPECT_DOUBLE_EQ(0, mean_val);
+  }
+}
+
+TEST(VadCircularBufferTest, TransientDetection) {
+  std::unique_ptr<VadCircularBuffer> circular_buffer(
+      VadCircularBuffer::Create(kLongBuffSize));
+  // Let the first transient be in wrap-around.
+  int num_insertion = kLongBuffSize - kWidthThreshold / 2;
+  InsertZeros(num_insertion, circular_buffer.get());
+
+  double push_val = 2;
+  // This is longer than a transient and shouldn't be removed.
+  int num_non_zero_elements = kWidthThreshold + 1;
+  Insert(push_val, num_non_zero_elements, circular_buffer.get());
+
+  double mean_val = circular_buffer->Mean();
+  EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+  circular_buffer->Insert(0);
+  EXPECT_EQ(0,
+            circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold));
+  mean_val = circular_buffer->Mean();
+  EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+
+  // A transient right after a non-transient, should be removed and mean is
+  // not changed.
+  num_insertion = 3;
+  Insert(push_val, num_insertion, circular_buffer.get());
+  circular_buffer->Insert(0);
+  EXPECT_EQ(0,
+            circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold));
+  mean_val = circular_buffer->Mean();
+  EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+
+  // Last input is larger than threshold, although the sequence is short but
+  // it shouldn't be considered transient.
+  Insert(push_val, num_insertion, circular_buffer.get());
+  num_non_zero_elements += num_insertion;
+  EXPECT_EQ(0,
+            circular_buffer->RemoveTransient(kWidthThreshold, kValThreshold));
+  mean_val = circular_buffer->Mean();
+  EXPECT_DOUBLE_EQ(num_non_zero_elements * push_val / kLongBuffSize, mean_val);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/voice_activity_detector.cc b/modules/audio_processing/vad/voice_activity_detector.cc
new file mode 100644
index 0000000..66a704f
--- /dev/null
+++ b/modules/audio_processing/vad/voice_activity_detector.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+
+const size_t kNumChannels = 1;
+
+const double kDefaultVoiceValue = 1.0;
+const double kNeutralProbability = 0.5;
+const double kLowProbability = 0.01;
+
+}  // namespace
+
+VoiceActivityDetector::VoiceActivityDetector()
+    : last_voice_probability_(kDefaultVoiceValue),
+      standalone_vad_(StandaloneVad::Create()) {
+}
+
+VoiceActivityDetector::~VoiceActivityDetector() = default;
+
+// Because ISAC has a different chunk length, it updates
+// |chunkwise_voice_probabilities_| and |chunkwise_rms_| when there is new data.
+// Otherwise it clears them.
+void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
+                                         size_t length,
+                                         int sample_rate_hz) {
+  RTC_DCHECK_EQ(length, sample_rate_hz / 100);
+  // Resample to the required rate.
+  const int16_t* resampled_ptr = audio;
+  if (sample_rate_hz != kSampleRateHz) {
+    RTC_CHECK_EQ(
+        resampler_.ResetIfNeeded(sample_rate_hz, kSampleRateHz, kNumChannels),
+        0);
+    resampler_.Push(audio, length, resampled_, kLength10Ms, length);
+    resampled_ptr = resampled_;
+  }
+  RTC_DCHECK_EQ(length, kLength10Ms);
+
+  // Each chunk needs to be passed into |standalone_vad_|, because internally it
+  // buffers the audio and processes it all at once when GetActivity() is
+  // called.
+  RTC_CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
+
+  audio_processing_.ExtractFeatures(resampled_ptr, length, &features_);
+
+  chunkwise_voice_probabilities_.resize(features_.num_frames);
+  chunkwise_rms_.resize(features_.num_frames);
+  std::copy(features_.rms, features_.rms + chunkwise_rms_.size(),
+            chunkwise_rms_.begin());
+  if (features_.num_frames > 0) {
+    if (features_.silence) {
+      // The other features are invalid, so set the voice probabilities to an
+      // arbitrary low value.
+      std::fill(chunkwise_voice_probabilities_.begin(),
+                chunkwise_voice_probabilities_.end(), kLowProbability);
+    } else {
+      std::fill(chunkwise_voice_probabilities_.begin(),
+                chunkwise_voice_probabilities_.end(), kNeutralProbability);
+      RTC_CHECK_GE(
+          standalone_vad_->GetActivity(&chunkwise_voice_probabilities_[0],
+                                       chunkwise_voice_probabilities_.size()),
+          0);
+      RTC_CHECK_GE(pitch_based_vad_.VoicingProbability(
+                       features_, &chunkwise_voice_probabilities_[0]),
+                   0);
+    }
+    last_voice_probability_ = chunkwise_voice_probabilities_.back();
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/voice_activity_detector.h b/modules/audio_processing/vad/voice_activity_detector.h
new file mode 100644
index 0000000..0079cb2
--- /dev/null
+++ b/modules/audio_processing/vad/voice_activity_detector.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VOICE_ACTIVITY_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VOICE_ACTIVITY_DETECTOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "common_audio/resampler/include/resampler.h"
+#include "modules/audio_processing/vad/vad_audio_proc.h"
+#include "modules/audio_processing/vad/common.h"
+#include "modules/audio_processing/vad/pitch_based_vad.h"
+#include "modules/audio_processing/vad/standalone_vad.h"
+
+namespace webrtc {
+
+// A Voice Activity Detector (VAD) that combines the voice probability from the
+// StandaloneVad and PitchBasedVad to get a more robust estimation.
+class VoiceActivityDetector {
+ public:
+  VoiceActivityDetector();
+  ~VoiceActivityDetector();
+
+  // Processes each audio chunk and estimates the voice probability.
+  void ProcessChunk(const int16_t* audio, size_t length, int sample_rate_hz);
+
+  // Returns a vector of voice probabilities for each chunk. It can be empty for
+  // some chunks, but it catches up afterwards returning multiple values at
+  // once.
+  const std::vector<double>& chunkwise_voice_probabilities() const {
+    return chunkwise_voice_probabilities_;
+  }
+
+  // Returns a vector of RMS values for each chunk. It has the same length as
+  // chunkwise_voice_probabilities().
+  const std::vector<double>& chunkwise_rms() const { return chunkwise_rms_; }
+
+  // Returns the last voice probability, regardless of the internal
+  // implementation, although it has a few chunks of delay.
+  float last_voice_probability() const { return last_voice_probability_; }
+
+ private:
+  // TODO(aluebs): Change these to float.
+  std::vector<double> chunkwise_voice_probabilities_;
+  std::vector<double> chunkwise_rms_;
+
+  float last_voice_probability_;
+
+  Resampler resampler_;
+  VadAudioProc audio_processing_;
+
+  std::unique_ptr<StandaloneVad> standalone_vad_;
+  PitchBasedVad pitch_based_vad_;
+
+  int16_t resampled_[kLength10Ms];
+  AudioFeatures features_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_VAD_VOICE_ACTIVITY_DETECTOR_H_
diff --git a/modules/audio_processing/vad/voice_activity_detector_unittest.cc b/modules/audio_processing/vad/voice_activity_detector_unittest.cc
new file mode 100644
index 0000000..62eda2b
--- /dev/null
+++ b/modules/audio_processing/vad/voice_activity_detector_unittest.cc
@@ -0,0 +1,168 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/vad/voice_activity_detector.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "test/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace {
+
+const int kStartTimeSec = 16;
+const float kMeanSpeechProbability = 0.3f;
+const float kMaxNoiseProbability = 0.1f;
+const size_t kNumChunks = 300u;
+const size_t kNumChunksPerIsacBlock = 3;
+
+void GenerateNoise(std::vector<int16_t>* data) {
+  for (size_t i = 0; i < data->size(); ++i) {
+    // std::rand returns between 0 and RAND_MAX, but this will work because it
+    // wraps into some random place.
+    (*data)[i] = std::rand();
+  }
+}
+
+}  // namespace
+
+TEST(VoiceActivityDetectorTest, ConstructorSetsDefaultValues) {
+  const float kDefaultVoiceValue = 1.f;
+
+  VoiceActivityDetector vad;
+
+  std::vector<double> p = vad.chunkwise_voice_probabilities();
+  std::vector<double> rms = vad.chunkwise_rms();
+
+  EXPECT_EQ(p.size(), 0u);
+  EXPECT_EQ(rms.size(), 0u);
+
+  EXPECT_FLOAT_EQ(vad.last_voice_probability(), kDefaultVoiceValue);
+}
+
+TEST(VoiceActivityDetectorTest, Speech16kHzHasHighVoiceProbabilities) {
+  const int kSampleRateHz = 16000;
+  const int kLength10Ms = kSampleRateHz / 100;
+
+  VoiceActivityDetector vad;
+
+  std::vector<int16_t> data(kLength10Ms);
+  float mean_probability = 0.f;
+
+  FILE* pcm_file =
+      fopen(test::ResourcePath("audio_processing/transient/audio16kHz", "pcm")
+                .c_str(),
+            "rb");
+  ASSERT_TRUE(pcm_file != nullptr);
+  // The silences in the file are skipped to get a more robust voice probability
+  // for speech.
+  ASSERT_EQ(fseek(pcm_file, kStartTimeSec * kSampleRateHz * sizeof(data[0]),
+                  SEEK_SET),
+            0);
+
+  size_t num_chunks = 0;
+  while (fread(&data[0], sizeof(data[0]), data.size(), pcm_file) ==
+         data.size()) {
+    vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
+
+    mean_probability += vad.last_voice_probability();
+
+    ++num_chunks;
+  }
+
+  mean_probability /= num_chunks;
+
+  EXPECT_GT(mean_probability, kMeanSpeechProbability);
+}
+
+TEST(VoiceActivityDetectorTest, Speech32kHzHasHighVoiceProbabilities) {
+  const int kSampleRateHz = 32000;
+  const int kLength10Ms = kSampleRateHz / 100;
+
+  VoiceActivityDetector vad;
+
+  std::vector<int16_t> data(kLength10Ms);
+  float mean_probability = 0.f;
+
+  FILE* pcm_file =
+      fopen(test::ResourcePath("audio_processing/transient/audio32kHz", "pcm")
+                .c_str(),
+            "rb");
+  ASSERT_TRUE(pcm_file != nullptr);
+  // The silences in the file are skipped to get a more robust voice probability
+  // for speech.
+  ASSERT_EQ(fseek(pcm_file, kStartTimeSec * kSampleRateHz * sizeof(data[0]),
+                  SEEK_SET),
+            0);
+
+  size_t num_chunks = 0;
+  while (fread(&data[0], sizeof(data[0]), data.size(), pcm_file) ==
+         data.size()) {
+    vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
+
+    mean_probability += vad.last_voice_probability();
+
+    ++num_chunks;
+  }
+
+  mean_probability /= num_chunks;
+
+  EXPECT_GT(mean_probability, kMeanSpeechProbability);
+}
+
+TEST(VoiceActivityDetectorTest, Noise16kHzHasLowVoiceProbabilities) {
+  VoiceActivityDetector vad;
+
+  std::vector<int16_t> data(kLength10Ms);
+  float max_probability = 0.f;
+
+  std::srand(42);
+
+  for (size_t i = 0; i < kNumChunks; ++i) {
+    GenerateNoise(&data);
+
+    vad.ProcessChunk(&data[0], data.size(), kSampleRateHz);
+
+    // Before the |vad has enough data to process an ISAC block it will return
+    // the default value, 1.f, which would ruin the |max_probability| value.
+    if (i > kNumChunksPerIsacBlock) {
+      max_probability = std::max(max_probability, vad.last_voice_probability());
+    }
+  }
+
+  EXPECT_LT(max_probability, kMaxNoiseProbability);
+}
+
+TEST(VoiceActivityDetectorTest, Noise32kHzHasLowVoiceProbabilities) {
+  VoiceActivityDetector vad;
+
+  std::vector<int16_t> data(2 * kLength10Ms);
+  float max_probability = 0.f;
+
+  std::srand(42);
+
+  for (size_t i = 0; i < kNumChunks; ++i) {
+    GenerateNoise(&data);
+
+    vad.ProcessChunk(&data[0], data.size(), 2 * kSampleRateHz);
+
+    // Before the |vad has enough data to process an ISAC block it will return
+    // the default value, 1.f, which would ruin the |max_probability| value.
+    if (i > kNumChunksPerIsacBlock) {
+      max_probability = std::max(max_probability, vad.last_voice_probability());
+    }
+  }
+
+  EXPECT_LT(max_probability, kMaxNoiseProbability);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/vad/voice_gmm_tables.h b/modules/audio_processing/vad/voice_gmm_tables.h
new file mode 100644
index 0000000..29cc7d6
--- /dev/null
+++ b/modules/audio_processing/vad/voice_gmm_tables.h
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// GMM tables for active segments. Generated by MakeGmmTables.m.
+
+#ifndef MODULES_AUDIO_PROCESSING_VAD_VOICE_GMM_TABLES_H_
+#define MODULES_AUDIO_PROCESSING_VAD_VOICE_GMM_TABLES_H_
+
+static const int kVoiceGmmNumMixtures = 12;
+static const int kVoiceGmmDim = 3;
+
+static const double
+    kVoiceGmmCovarInverse[kVoiceGmmNumMixtures][kVoiceGmmDim][kVoiceGmmDim] = {
+        {{1.83673825579513e+00, -8.09791637570095e-04, 4.60106414365986e-03},
+         {-8.09791637570095e-04, 8.89351738394608e-04, -9.80188953277734e-04},
+         {4.60106414365986e-03, -9.80188953277734e-04, 1.38706060206582e-03}},
+        {{6.76228912850703e+01, -1.98893120119660e-02, -3.53548357253551e-03},
+         {-1.98893120119660e-02, 3.96216858500530e-05, -4.08492938394097e-05},
+         {-3.53548357253551e-03, -4.08492938394097e-05, 9.31864352856416e-04}},
+        {{9.98612435944558e+00, -5.27880954316893e-03, -6.30342541619017e-03},
+         {-5.27880954316893e-03, 4.54359480225226e-05, 6.30804591626044e-05},
+         {-6.30342541619017e-03, 6.30804591626044e-05, 5.36466441382942e-04}},
+        {{3.39917474216349e+01, -1.56213579433191e-03, -4.01459014990225e-02},
+         {-1.56213579433191e-03, 6.40415424897724e-05, 6.20076342427833e-05},
+         {-4.01459014990225e-02, 6.20076342427833e-05, 3.51199070103063e-03}},
+        {{1.34545062271428e+01, -7.94513610147144e-03, -5.34401019341728e-02},
+         {-7.94513610147144e-03, 1.16511820098649e-04, 4.66063702069293e-05},
+         {-5.34401019341728e-02, 4.66063702069293e-05, 2.72354323774163e-03}},
+        {{1.08557844314806e+02, -1.54885805673668e-02, -1.88029692674851e-02},
+         {-1.54885805673668e-02, 1.16404042786406e-04, 6.45579292702802e-06},
+         {-1.88029692674851e-02, 6.45579292702802e-06, 4.32330478391416e-04}},
+        {{8.22940066541450e+01, -1.15903110231303e-02, -4.92166764865343e-02},
+         {-1.15903110231303e-02, 7.42510742165261e-05, 3.73007314191290e-06},
+         {-4.92166764865343e-02, 3.73007314191290e-06, 3.64005221593244e-03}},
+        {{2.31133605685660e+00, -7.83261568950254e-04, 7.45744012346313e-04},
+         {-7.83261568950254e-04, 1.29460648214142e-05, -2.22774455093730e-06},
+         {7.45744012346313e-04, -2.22774455093730e-06, 1.05117294093010e-04}},
+        {{3.78767849189611e+02, 1.57759761011568e-03, -2.08551217988774e-02},
+         {1.57759761011568e-03, 4.76066236886865e-05, -2.33977412299324e-05},
+         {-2.08551217988774e-02, -2.33977412299324e-05, 5.24261005371196e-04}},
+        {{6.98580096506135e-01, -5.13850255217378e-04, -4.01124551717056e-04},
+         {-5.13850255217378e-04, 1.40501021984840e-06, -2.09496928716569e-06},
+         {-4.01124551717056e-04, -2.09496928716569e-06, 2.82879357740037e-04}},
+        {{2.62770945162399e+00, -2.31825753241430e-03, -5.30447217466318e-03},
+         {-2.31825753241430e-03, 4.59108572227649e-05, 7.67631886355405e-05},
+         {-5.30447217466318e-03, 7.67631886355405e-05, 2.28521601674098e-03}},
+        {{1.89940391362152e+02, -4.23280856852379e-03, -2.70608873541399e-02},
+         {-4.23280856852379e-03, 6.77547582742563e-05, 2.69154203800467e-05},
+         {-2.70608873541399e-02, 2.69154203800467e-05, 3.88574543373470e-03}}};
+
+static const double kVoiceGmmMean[kVoiceGmmNumMixtures][kVoiceGmmDim] = {
+    {-2.15020241646536e+00, 4.97079062999877e+02, 4.77078119504505e+02},
+    {-8.92097680029190e-01, 5.92064964199921e+02, 1.81045145941059e+02},
+    {-1.29435784144398e+00, 4.98450293410611e+02, 1.71991263804064e+02},
+    {-1.03925228397884e+00, 4.99511274321571e+02, 1.05838336539105e+02},
+    {-1.29229047206129e+00, 4.15026762566707e+02, 1.12861119017125e+02},
+    {-7.88748114599810e-01, 4.48739336688113e+02, 1.89784216956337e+02},
+    {-8.77777402332642e-01, 4.86620285054533e+02, 1.13477708016491e+02},
+    {-2.06465957063057e+00, 6.33385049870607e+02, 2.32758546796149e+02},
+    {-6.98893789231685e-01, 5.93622051503385e+02, 1.92536982473203e+02},
+    {-2.55901217508894e+00, 1.55914919756205e+03, 1.39769980835570e+02},
+    {-1.92070024165837e+00, 4.87983940444185e+02, 1.02745468128289e+02},
+    {-7.29187507662854e-01, 5.22717685022855e+02, 1.16377942283991e+02}};
+
+static const double kVoiceGmmWeights[kVoiceGmmNumMixtures] = {
+    -1.39789694361035e+01,
+    -1.19527720202104e+01,
+    -1.32396317929055e+01,
+    -1.09436815209238e+01,
+    -1.13440027478149e+01,
+    -1.12200721834504e+01,
+    -1.02537324043693e+01,
+    -1.60789861938302e+01,
+    -1.03394494048344e+01,
+    -1.83207938586818e+01,
+    -1.31186044948288e+01,
+    -9.52479998673554e+00};
+#endif  // MODULES_AUDIO_PROCESSING_VAD_VOICE_GMM_TABLES_H_
diff --git a/modules/audio_processing/voice_detection_impl.cc b/modules/audio_processing/voice_detection_impl.cc
new file mode 100644
index 0000000..5ee0c7b
--- /dev/null
+++ b/modules/audio_processing/voice_detection_impl.cc
@@ -0,0 +1,155 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/voice_detection_impl.h"
+
+#include "common_audio/vad/include/webrtc_vad.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+class VoiceDetectionImpl::Vad {
+ public:
+  Vad() {
+    state_ = WebRtcVad_Create();
+    RTC_CHECK(state_);
+    int error = WebRtcVad_Init(state_);
+    RTC_DCHECK_EQ(0, error);
+  }
+  ~Vad() {
+    WebRtcVad_Free(state_);
+  }
+  VadInst* state() { return state_; }
+ private:
+  VadInst* state_ = nullptr;
+  RTC_DISALLOW_COPY_AND_ASSIGN(Vad);
+};
+
+VoiceDetectionImpl::VoiceDetectionImpl(rtc::CriticalSection* crit)
+    : crit_(crit) {
+  RTC_DCHECK(crit);
+}
+
+VoiceDetectionImpl::~VoiceDetectionImpl() {}
+
+void VoiceDetectionImpl::Initialize(int sample_rate_hz) {
+  rtc::CritScope cs(crit_);
+  sample_rate_hz_ = sample_rate_hz;
+  std::unique_ptr<Vad> new_vad;
+  if (enabled_) {
+    new_vad.reset(new Vad());
+  }
+  vad_.swap(new_vad);
+  using_external_vad_ = false;
+  frame_size_samples_ =
+      static_cast<size_t>(frame_size_ms_ * sample_rate_hz_) / 1000;
+  set_likelihood(likelihood_);
+}
+
+void VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+  rtc::CritScope cs(crit_);
+  if (!enabled_) {
+    return;
+  }
+  if (using_external_vad_) {
+    using_external_vad_ = false;
+    return;
+  }
+
+  RTC_DCHECK_GE(160, audio->num_frames_per_band());
+  // TODO(ajm): concatenate data in frame buffer here.
+  int vad_ret = WebRtcVad_Process(vad_->state(), sample_rate_hz_,
+                                  audio->mixed_low_pass_data(),
+                                  frame_size_samples_);
+  if (vad_ret == 0) {
+    stream_has_voice_ = false;
+    audio->set_activity(AudioFrame::kVadPassive);
+  } else if (vad_ret == 1) {
+    stream_has_voice_ = true;
+    audio->set_activity(AudioFrame::kVadActive);
+  } else {
+    RTC_NOTREACHED();
+  }
+}
+
+int VoiceDetectionImpl::Enable(bool enable) {
+  rtc::CritScope cs(crit_);
+  if (enabled_ != enable) {
+    enabled_ = enable;
+    Initialize(sample_rate_hz_);
+  }
+  return AudioProcessing::kNoError;
+}
+
+bool VoiceDetectionImpl::is_enabled() const {
+  rtc::CritScope cs(crit_);
+  return enabled_;
+}
+
+int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) {
+  rtc::CritScope cs(crit_);
+  using_external_vad_ = true;
+  stream_has_voice_ = has_voice;
+  return AudioProcessing::kNoError;
+}
+
+bool VoiceDetectionImpl::stream_has_voice() const {
+  rtc::CritScope cs(crit_);
+  // TODO(ajm): enable this assertion?
+  //RTC_DCHECK(using_external_vad_ || is_component_enabled());
+  return stream_has_voice_;
+}
+
+int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
+  rtc::CritScope cs(crit_);
+  likelihood_ = likelihood;
+  if (enabled_) {
+    int mode = 2;
+    switch (likelihood) {
+      case VoiceDetection::kVeryLowLikelihood:
+        mode = 3;
+        break;
+      case VoiceDetection::kLowLikelihood:
+        mode = 2;
+        break;
+      case VoiceDetection::kModerateLikelihood:
+        mode = 1;
+        break;
+      case VoiceDetection::kHighLikelihood:
+        mode = 0;
+        break;
+      default:
+        RTC_NOTREACHED();
+        break;
+    }
+    int error = WebRtcVad_set_mode(vad_->state(), mode);
+    RTC_DCHECK_EQ(0, error);
+  }
+  return AudioProcessing::kNoError;
+}
+
+VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
+  rtc::CritScope cs(crit_);
+  return likelihood_;
+}
+
+int VoiceDetectionImpl::set_frame_size_ms(int size) {
+  rtc::CritScope cs(crit_);
+  RTC_DCHECK_EQ(10, size); // TODO(ajm): remove when supported.
+  frame_size_ms_ = size;
+  Initialize(sample_rate_hz_);
+  return AudioProcessing::kNoError;
+}
+
+int VoiceDetectionImpl::frame_size_ms() const {
+  rtc::CritScope cs(crit_);
+  return frame_size_ms_;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/voice_detection_impl.h b/modules/audio_processing/voice_detection_impl.h
new file mode 100644
index 0000000..4b724bd
--- /dev/null
+++ b/modules/audio_processing/voice_detection_impl.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
+#define MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
+
+#include <memory>
+
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+
+namespace webrtc {
+
+class AudioBuffer;
+
+class VoiceDetectionImpl : public VoiceDetection {
+ public:
+  explicit VoiceDetectionImpl(rtc::CriticalSection* crit);
+  ~VoiceDetectionImpl() override;
+
+  // TODO(peah): Fold into ctor, once public API is removed.
+  void Initialize(int sample_rate_hz);
+  void ProcessCaptureAudio(AudioBuffer* audio);
+
+  // VoiceDetection implementation.
+  int Enable(bool enable) override;
+  bool is_enabled() const override;
+  int set_stream_has_voice(bool has_voice) override;
+  bool stream_has_voice() const override;
+  int set_likelihood(Likelihood likelihood) override;
+  Likelihood likelihood() const override;
+  int set_frame_size_ms(int size) override;
+  int frame_size_ms() const override;
+
+ private:
+  class Vad;
+  rtc::CriticalSection* const crit_;
+  bool enabled_ RTC_GUARDED_BY(crit_) = false;
+  bool stream_has_voice_ RTC_GUARDED_BY(crit_) = false;
+  bool using_external_vad_ RTC_GUARDED_BY(crit_) = false;
+  Likelihood likelihood_ RTC_GUARDED_BY(crit_) = kLowLikelihood;
+  int frame_size_ms_ RTC_GUARDED_BY(crit_) = 10;
+  size_t frame_size_samples_ RTC_GUARDED_BY(crit_) = 0;
+  int sample_rate_hz_ RTC_GUARDED_BY(crit_) = 0;
+  std::unique_ptr<Vad> vad_ RTC_GUARDED_BY(crit_);
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VoiceDetectionImpl);
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
diff --git a/modules/audio_processing/voice_detection_unittest.cc b/modules/audio_processing/voice_detection_unittest.cc
new file mode 100644
index 0000000..663913b
--- /dev/null
+++ b/modules/audio_processing/voice_detection_unittest.cc
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <vector>
+
+#include "api/array_view.h"
+#include "modules/audio_processing/audio_buffer.h"
+#include "modules/audio_processing/test/audio_buffer_tools.h"
+#include "modules/audio_processing/test/bitexactness_tools.h"
+#include "modules/audio_processing/voice_detection_impl.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const int kNumFramesToProcess = 1000;
+
+// Process one frame of data and produce the output.
+void ProcessOneFrame(int sample_rate_hz,
+                     AudioBuffer* audio_buffer,
+                     VoiceDetectionImpl* voice_detection) {
+  if (sample_rate_hz > AudioProcessing::kSampleRate16kHz) {
+    audio_buffer->SplitIntoFrequencyBands();
+  }
+
+  voice_detection->ProcessCaptureAudio(audio_buffer);
+}
+
+// Processes a specified amount of frames, verifies the results and reports
+// any errors.
+void RunBitexactnessTest(int sample_rate_hz,
+                         size_t num_channels,
+                         int frame_size_ms_reference,
+                         bool stream_has_voice_reference,
+                         VoiceDetection::Likelihood likelihood_reference) {
+  rtc::CriticalSection crit_capture;
+  VoiceDetectionImpl voice_detection(&crit_capture);
+  voice_detection.Initialize(sample_rate_hz > 16000 ? 16000 : sample_rate_hz);
+  voice_detection.Enable(true);
+
+  int samples_per_channel = rtc::CheckedDivExact(sample_rate_hz, 100);
+  const StreamConfig capture_config(sample_rate_hz, num_channels, false);
+  AudioBuffer capture_buffer(
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames(), capture_config.num_channels(),
+      capture_config.num_frames());
+  test::InputAudioFile capture_file(
+      test::GetApmCaptureTestVectorFileName(sample_rate_hz));
+  std::vector<float> capture_input(samples_per_channel * num_channels);
+  for (int frame_no = 0; frame_no < kNumFramesToProcess; ++frame_no) {
+    ReadFloatSamplesFromStereoFile(samples_per_channel, num_channels,
+                                   &capture_file, capture_input);
+
+    test::CopyVectorToAudioBuffer(capture_config, capture_input,
+                                  &capture_buffer);
+
+    ProcessOneFrame(sample_rate_hz, &capture_buffer, &voice_detection);
+  }
+
+  int frame_size_ms = voice_detection.frame_size_ms();
+  bool stream_has_voice = voice_detection.stream_has_voice();
+  VoiceDetection::Likelihood likelihood = voice_detection.likelihood();
+
+  // Compare the outputs to the references.
+  EXPECT_EQ(frame_size_ms_reference, frame_size_ms);
+  EXPECT_EQ(stream_has_voice_reference, stream_has_voice);
+  EXPECT_EQ(likelihood_reference, likelihood);
+}
+
+const int kFrameSizeMsReference = 10;
+const bool kStreamHasVoiceReference = true;
+const VoiceDetection::Likelihood kLikelihoodReference =
+    VoiceDetection::kLowLikelihood;
+
+}  // namespace
+
+TEST(VoiceDetectionBitExactnessTest, Mono8kHz) {
+  RunBitexactnessTest(8000, 1, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Mono16kHz) {
+  RunBitexactnessTest(16000, 1, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Mono32kHz) {
+  RunBitexactnessTest(32000, 1, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Mono48kHz) {
+  RunBitexactnessTest(48000, 1, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Stereo8kHz) {
+  RunBitexactnessTest(8000, 2, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Stereo16kHz) {
+  RunBitexactnessTest(16000, 2, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Stereo32kHz) {
+  RunBitexactnessTest(32000, 2, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+TEST(VoiceDetectionBitExactnessTest, Stereo48kHz) {
+  RunBitexactnessTest(48000, 2, kFrameSizeMsReference, kStreamHasVoiceReference,
+                      kLikelihoodReference);
+}
+
+}  // namespace webrtc
diff --git a/modules/include/DEPS b/modules/include/DEPS
new file mode 100644
index 0000000..1b4439d
--- /dev/null
+++ b/modules/include/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+common_video",
+]
diff --git a/modules/include/module.h b/modules/include/module.h
new file mode 100644
index 0000000..fc2a1b5
--- /dev/null
+++ b/modules/include/module.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_H_
+#define MODULES_INCLUDE_MODULE_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class ProcessThread;
+
+class Module {
+ public:
+  // Returns the number of milliseconds until the module wants a worker
+  // thread to call Process.
+  // This method is called on the same worker thread as Process will
+  // be called on.
+  // TODO(tommi): Almost all implementations of this function, need to know
+  // the current tick count.  Consider passing it as an argument.  It could
+  // also improve the accuracy of when the next callback occurs since the
+  // thread that calls Process() will also have it's tick count reference
+  // which might not match with what the implementations use.
+  virtual int64_t TimeUntilNextProcess() = 0;
+
+  // Process any pending tasks such as timeouts.
+  // Called on a worker thread.
+  virtual void Process() = 0;
+
+  // This method is called when the module is attached to a *running* process
+  // thread or detached from one.  In the case of detaching, |process_thread|
+  // will be nullptr.
+  //
+  // This method will be called in the following cases:
+  //
+  // * Non-null process_thread:
+  //   * ProcessThread::RegisterModule() is called while the thread is running.
+  //   * ProcessThread::Start() is called and RegisterModule has previously
+  //     been called.  The thread will be started immediately after notifying
+  //     all modules.
+  //
+  // * Null process_thread:
+  //   * ProcessThread::DeRegisterModule() is called while the thread is
+  //     running.
+  //   * ProcessThread::Stop() was called and the thread has been stopped.
+  //
+  // NOTE: This method is not called from the worker thread itself, but from
+  //       the thread that registers/deregisters the module or calls Start/Stop.
+  virtual void ProcessThreadAttached(ProcessThread* process_thread) {}
+
+ protected:
+  virtual ~Module() {}
+};
+}  // namespace webrtc
+
+#endif  // MODULES_INCLUDE_MODULE_H_
diff --git a/modules/include/module_common_types.h b/modules/include/module_common_types.h
new file mode 100644
index 0000000..1290075
--- /dev/null
+++ b/modules/include/module_common_types.h
@@ -0,0 +1,304 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
+#define MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
+
+#include <assert.h>
+#include <string.h>  // memcpy
+
+#include <algorithm>
+#include <limits>
+
+#include "api/optional.h"
+// TODO(bugs.webrtc.org/7504): Included here because users of this header expect
+// it to declare AudioFrame. Delete as soon as all known users are updated.
+#include "api/audio/audio_frame.h"
+#include "api/rtp_headers.h"
+#include "api/video/video_rotation.h"
+#include "common_types.h"  // NOLINT(build/include)
+#include "modules/include/module_common_types_public.h"
+#include "modules/include/module_fec_types.h"
+#include "modules/video_coding/codecs/h264/include/h264_globals.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_globals.h"
+#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/deprecation.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/timeutils.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+struct RTPAudioHeader {
+  uint8_t numEnergy;                  // number of valid entries in arrOfEnergy
+  uint8_t arrOfEnergy[kRtpCsrcSize];  // one energy byte (0-9) per channel
+  bool isCNG;                         // is this CNG
+  size_t channel;                     // number of channels 2 = stereo
+};
+
+enum RtpVideoCodecTypes {
+  kRtpVideoNone = 0,
+  kRtpVideoGeneric = 1,
+  kRtpVideoVp8 = 2,
+  kRtpVideoVp9 = 3,
+  kRtpVideoH264 = 4
+};
+
+union RTPVideoTypeHeader {
+  RTPVideoHeaderVP8 VP8;
+  RTPVideoHeaderVP9 VP9;
+  RTPVideoHeaderH264 H264;
+};
+
+// Since RTPVideoHeader is used as a member of a union, it can't have a
+// non-trivial default constructor.
+struct RTPVideoHeader {
+  uint16_t width;  // size
+  uint16_t height;
+  VideoRotation rotation;
+
+  PlayoutDelay playout_delay;
+
+  VideoContentType content_type;
+
+  VideoSendTiming video_timing;
+
+  bool is_first_packet_in_frame;
+  uint8_t simulcastIdx;  // Index if the simulcast encoder creating
+                         // this frame, 0 if not using simulcast.
+  RtpVideoCodecTypes codec;
+  RTPVideoTypeHeader codecHeader;
+};
+union RTPTypeHeader {
+  RTPAudioHeader Audio;
+  RTPVideoHeader Video;
+};
+
+struct WebRtcRTPHeader {
+  RTPHeader header;
+  FrameType frameType;
+  RTPTypeHeader type;
+  // NTP time of the capture time in local timebase in milliseconds.
+  int64_t ntp_time_ms;
+};
+
+class RTPFragmentationHeader {
+ public:
+  RTPFragmentationHeader()
+      : fragmentationVectorSize(0),
+        fragmentationOffset(NULL),
+        fragmentationLength(NULL),
+        fragmentationTimeDiff(NULL),
+        fragmentationPlType(NULL) {}
+
+  RTPFragmentationHeader(RTPFragmentationHeader&& other)
+      : RTPFragmentationHeader() {
+    std::swap(*this, other);
+  }
+
+  ~RTPFragmentationHeader() {
+    delete[] fragmentationOffset;
+    delete[] fragmentationLength;
+    delete[] fragmentationTimeDiff;
+    delete[] fragmentationPlType;
+  }
+
+  void operator=(RTPFragmentationHeader&& other) { std::swap(*this, other); }
+
+  friend void swap(RTPFragmentationHeader& a, RTPFragmentationHeader& b) {
+    using std::swap;
+    swap(a.fragmentationVectorSize, b.fragmentationVectorSize);
+    swap(a.fragmentationOffset, b.fragmentationOffset);
+    swap(a.fragmentationLength, b.fragmentationLength);
+    swap(a.fragmentationTimeDiff, b.fragmentationTimeDiff);
+    swap(a.fragmentationPlType, b.fragmentationPlType);
+  }
+
+  void CopyFrom(const RTPFragmentationHeader& src) {
+    if (this == &src) {
+      return;
+    }
+
+    if (src.fragmentationVectorSize != fragmentationVectorSize) {
+      // new size of vectors
+
+      // delete old
+      delete[] fragmentationOffset;
+      fragmentationOffset = NULL;
+      delete[] fragmentationLength;
+      fragmentationLength = NULL;
+      delete[] fragmentationTimeDiff;
+      fragmentationTimeDiff = NULL;
+      delete[] fragmentationPlType;
+      fragmentationPlType = NULL;
+
+      if (src.fragmentationVectorSize > 0) {
+        // allocate new
+        if (src.fragmentationOffset) {
+          fragmentationOffset = new size_t[src.fragmentationVectorSize];
+        }
+        if (src.fragmentationLength) {
+          fragmentationLength = new size_t[src.fragmentationVectorSize];
+        }
+        if (src.fragmentationTimeDiff) {
+          fragmentationTimeDiff = new uint16_t[src.fragmentationVectorSize];
+        }
+        if (src.fragmentationPlType) {
+          fragmentationPlType = new uint8_t[src.fragmentationVectorSize];
+        }
+      }
+      // set new size
+      fragmentationVectorSize = src.fragmentationVectorSize;
+    }
+
+    if (src.fragmentationVectorSize > 0) {
+      // copy values
+      if (src.fragmentationOffset) {
+        memcpy(fragmentationOffset, src.fragmentationOffset,
+               src.fragmentationVectorSize * sizeof(size_t));
+      }
+      if (src.fragmentationLength) {
+        memcpy(fragmentationLength, src.fragmentationLength,
+               src.fragmentationVectorSize * sizeof(size_t));
+      }
+      if (src.fragmentationTimeDiff) {
+        memcpy(fragmentationTimeDiff, src.fragmentationTimeDiff,
+               src.fragmentationVectorSize * sizeof(uint16_t));
+      }
+      if (src.fragmentationPlType) {
+        memcpy(fragmentationPlType, src.fragmentationPlType,
+               src.fragmentationVectorSize * sizeof(uint8_t));
+      }
+    }
+  }
+
+  void VerifyAndAllocateFragmentationHeader(const size_t size) {
+    assert(size <= std::numeric_limits<uint16_t>::max());
+    const uint16_t size16 = static_cast<uint16_t>(size);
+    if (fragmentationVectorSize < size16) {
+      uint16_t oldVectorSize = fragmentationVectorSize;
+      {
+        // offset
+        size_t* oldOffsets = fragmentationOffset;
+        fragmentationOffset = new size_t[size16];
+        memset(fragmentationOffset + oldVectorSize, 0,
+               sizeof(size_t) * (size16 - oldVectorSize));
+        // copy old values
+        memcpy(fragmentationOffset, oldOffsets,
+               sizeof(size_t) * oldVectorSize);
+        delete[] oldOffsets;
+      }
+      // length
+      {
+        size_t* oldLengths = fragmentationLength;
+        fragmentationLength = new size_t[size16];
+        memset(fragmentationLength + oldVectorSize, 0,
+               sizeof(size_t) * (size16 - oldVectorSize));
+        memcpy(fragmentationLength, oldLengths,
+               sizeof(size_t) * oldVectorSize);
+        delete[] oldLengths;
+      }
+      // time diff
+      {
+        uint16_t* oldTimeDiffs = fragmentationTimeDiff;
+        fragmentationTimeDiff = new uint16_t[size16];
+        memset(fragmentationTimeDiff + oldVectorSize, 0,
+               sizeof(uint16_t) * (size16 - oldVectorSize));
+        memcpy(fragmentationTimeDiff, oldTimeDiffs,
+               sizeof(uint16_t) * oldVectorSize);
+        delete[] oldTimeDiffs;
+      }
+      // payload type
+      {
+        uint8_t* oldTimePlTypes = fragmentationPlType;
+        fragmentationPlType = new uint8_t[size16];
+        memset(fragmentationPlType + oldVectorSize, 0,
+               sizeof(uint8_t) * (size16 - oldVectorSize));
+        memcpy(fragmentationPlType, oldTimePlTypes,
+               sizeof(uint8_t) * oldVectorSize);
+        delete[] oldTimePlTypes;
+      }
+      fragmentationVectorSize = size16;
+    }
+  }
+
+  uint16_t fragmentationVectorSize;  // Number of fragmentations
+  size_t* fragmentationOffset;       // Offset of pointer to data for each
+                                     // fragmentation
+  size_t* fragmentationLength;       // Data size for each fragmentation
+  uint16_t* fragmentationTimeDiff;   // Timestamp difference relative "now" for
+                                     // each fragmentation
+  uint8_t* fragmentationPlType;      // Payload type of each fragmentation
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(RTPFragmentationHeader);
+};
+
+struct RTCPVoIPMetric {
+  // RFC 3611 4.7
+  uint8_t lossRate;
+  uint8_t discardRate;
+  uint8_t burstDensity;
+  uint8_t gapDensity;
+  uint16_t burstDuration;
+  uint16_t gapDuration;
+  uint16_t roundTripDelay;
+  uint16_t endSystemDelay;
+  uint8_t signalLevel;
+  uint8_t noiseLevel;
+  uint8_t RERL;
+  uint8_t Gmin;
+  uint8_t Rfactor;
+  uint8_t extRfactor;
+  uint8_t MOSLQ;
+  uint8_t MOSCQ;
+  uint8_t RXconfig;
+  uint16_t JBnominal;
+  uint16_t JBmax;
+  uint16_t JBabsMax;
+};
+
+// Interface used by the CallStats class to distribute call statistics.
+// Callbacks will be triggered as soon as the class has been registered to a
+// CallStats object using RegisterStatsObserver.
+class CallStatsObserver {
+ public:
+  virtual void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) = 0;
+
+  virtual ~CallStatsObserver() {}
+};
+
+struct PacedPacketInfo {
+  PacedPacketInfo() {}
+  PacedPacketInfo(int probe_cluster_id,
+                  int probe_cluster_min_probes,
+                  int probe_cluster_min_bytes)
+      : probe_cluster_id(probe_cluster_id),
+        probe_cluster_min_probes(probe_cluster_min_probes),
+        probe_cluster_min_bytes(probe_cluster_min_bytes) {}
+
+  bool operator==(const PacedPacketInfo& rhs) const {
+    return send_bitrate_bps == rhs.send_bitrate_bps &&
+           probe_cluster_id == rhs.probe_cluster_id &&
+           probe_cluster_min_probes == rhs.probe_cluster_min_probes &&
+           probe_cluster_min_bytes == rhs.probe_cluster_min_bytes;
+  }
+
+  static constexpr int kNotAProbe = -1;
+  int send_bitrate_bps = -1;
+  int probe_cluster_id = kNotAProbe;
+  int probe_cluster_min_probes = -1;
+  int probe_cluster_min_bytes = -1;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
diff --git a/modules/include/module_common_types_public.h b/modules/include/module_common_types_public.h
new file mode 100644
index 0000000..2fbb49a
--- /dev/null
+++ b/modules/include/module_common_types_public.h
@@ -0,0 +1,110 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_COMMON_TYPES_PUBLIC_H_
+#define MODULES_INCLUDE_MODULE_COMMON_TYPES_PUBLIC_H_
+
+#include <limits>
+
+#include "api/optional.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+template <typename U>
+inline bool IsNewer(U value, U prev_value) {
+  static_assert(!std::numeric_limits<U>::is_signed, "U must be unsigned");
+  // kBreakpoint is the half-way mark for the type U. For instance, for a
+  // uint16_t it will be 0x8000, and for a uint32_t, it will be 0x8000000.
+  constexpr U kBreakpoint = (std::numeric_limits<U>::max() >> 1) + 1;
+  // Distinguish between elements that are exactly kBreakpoint apart.
+  // If t1>t2 and |t1-t2| = kBreakpoint: IsNewer(t1,t2)=true,
+  // IsNewer(t2,t1)=false
+  // rather than having IsNewer(t1,t2) = IsNewer(t2,t1) = false.
+  if (value - prev_value == kBreakpoint) {
+    return value > prev_value;
+  }
+  return value != prev_value &&
+         static_cast<U>(value - prev_value) < kBreakpoint;
+}
+
+// Utility class to unwrap a number to a larger type. The numbers will never be
+// unwrapped to a negative value.
+template <typename U>
+class Unwrapper {
+  static_assert(!std::numeric_limits<U>::is_signed, "U must be unsigned");
+  static_assert(std::numeric_limits<U>::max() <=
+                    std::numeric_limits<uint32_t>::max(),
+                "U must not be wider than 32 bits");
+
+ public:
+  // Get the unwrapped value, but don't update the internal state.
+  int64_t UnwrapWithoutUpdate(U value) const {
+    if (!last_value_)
+      return value;
+
+    constexpr int64_t kMaxPlusOne =
+        static_cast<int64_t>(std::numeric_limits<U>::max()) + 1;
+
+    U cropped_last = static_cast<U>(*last_value_);
+    int64_t delta = value - cropped_last;
+    if (IsNewer(value, cropped_last)) {
+      if (delta < 0)
+        delta += kMaxPlusOne;  // Wrap forwards.
+    } else if (delta > 0 && (*last_value_ + delta - kMaxPlusOne) >= 0) {
+      // If value is older but delta is positive, this is a backwards
+      // wrap-around. However, don't wrap backwards past 0 (unwrapped).
+      delta -= kMaxPlusOne;
+    }
+
+    return *last_value_ + delta;
+  }
+
+  // Only update the internal state to the specified last (unwrapped) value.
+  void UpdateLast(int64_t last_value) {
+    last_value_ = last_value;
+  }
+
+  // Unwrap the value and update the internal state.
+  int64_t Unwrap(U value) {
+    int64_t unwrapped = UnwrapWithoutUpdate(value);
+    UpdateLast(unwrapped);
+    return unwrapped;
+  }
+
+ private:
+  rtc::Optional<int64_t> last_value_;
+};
+
+using SequenceNumberUnwrapper = Unwrapper<uint16_t>;
+using TimestampUnwrapper = Unwrapper<uint32_t>;
+
+inline bool IsNewerSequenceNumber(uint16_t sequence_number,
+                                  uint16_t prev_sequence_number) {
+  return IsNewer(sequence_number, prev_sequence_number);
+}
+
+inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
+  return IsNewer(timestamp, prev_timestamp);
+}
+
+inline uint16_t LatestSequenceNumber(uint16_t sequence_number1,
+                                     uint16_t sequence_number2) {
+  return IsNewerSequenceNumber(sequence_number1, sequence_number2)
+             ? sequence_number1
+             : sequence_number2;
+}
+
+inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) {
+  return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 : timestamp2;
+}
+
+}  // namespace webrtc
+#endif  // MODULES_INCLUDE_MODULE_COMMON_TYPES_PUBLIC_H_
diff --git a/modules/include/module_fec_types.h b/modules/include/module_fec_types.h
new file mode 100644
index 0000000..25d6bc5
--- /dev/null
+++ b/modules/include/module_fec_types.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_INCLUDE_MODULE_FEC_TYPES_H_
+#define MODULES_INCLUDE_MODULE_FEC_TYPES_H_
+
+namespace webrtc {
+
+// Types for the FEC packet masks. The type |kFecMaskRandom| is based on a
+// random loss model. The type |kFecMaskBursty| is based on a bursty/consecutive
+// loss model. The packet masks are defined in
+// modules/rtp_rtcp/fec_private_tables_random(bursty).h
+enum FecMaskType {
+  kFecMaskRandom,
+  kFecMaskBursty,
+};
+
+// Struct containing forward error correction settings.
+struct FecProtectionParams {
+  int fec_rate;
+  int max_fec_frames;
+  FecMaskType fec_mask_type;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_INCLUDE_MODULE_FEC_TYPES_H_
diff --git a/modules/video_coding/codecs/h264/include/h264.h b/modules/video_coding/codecs/h264/include/h264.h
new file mode 100644
index 0000000..e23818b
--- /dev/null
+++ b/modules/video_coding/codecs/h264/include/h264.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
+
+#include <memory>
+#include <vector>
+
+#include "media/base/codec.h"
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+struct SdpVideoFormat;
+
+// Set to disable the H.264 encoder/decoder implementations that are provided if
+// |rtc_use_h264| build flag is true (if false, this function does nothing).
+// This function should only be called before or during WebRTC initialization
+// and is not thread-safe.
+void DisableRtcUseH264();
+
+// Returns a vector with all supported internal H264 profiles that we can
+// negotiate in SDP, in order of preference.
+std::vector<SdpVideoFormat> SupportedH264Codecs();
+
+class H264Encoder : public VideoEncoder {
+ public:
+  static std::unique_ptr<H264Encoder> Create(const cricket::VideoCodec& codec);
+  // If H.264 is supported (any implementation).
+  static bool IsSupported();
+
+  ~H264Encoder() override {}
+};
+
+class H264Decoder : public VideoDecoder {
+ public:
+  static std::unique_ptr<H264Decoder> Create();
+  static bool IsSupported();
+
+  ~H264Decoder() override {}
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_H_
diff --git a/modules/video_coding/codecs/h264/include/h264_globals.h b/modules/video_coding/codecs/h264/include/h264_globals.h
new file mode 100644
index 0000000..cae270c
--- /dev/null
+++ b/modules/video_coding/codecs/h264/include/h264_globals.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
+
+namespace webrtc {
+
+// The packetization types that we support: single, aggregated, and fragmented.
+enum H264PacketizationTypes {
+  kH264SingleNalu,  // This packet contains a single NAL unit.
+  kH264StapA,       // This packet contains STAP-A (single time
+                    // aggregation) packets. If this packet has an
+                    // associated NAL unit type, it'll be for the
+                    // first such aggregated packet.
+  kH264FuA,         // This packet contains a FU-A (fragmentation
+                    // unit) packet, meaning it is a part of a frame
+                    // that was too large to fit into a single packet.
+};
+
+// Packetization modes are defined in RFC 6184 section 6
+// Due to the structure containing this being initialized with zeroes
+// in some places, and mode 1 being default, mode 1 needs to have the value
+// zero. https://crbug.com/webrtc/6803
+enum class H264PacketizationMode {
+  NonInterleaved = 0,  // Mode 1 - STAP-A, FU-A is allowed
+  SingleNalUnit        // Mode 0 - only single NALU allowed
+};
+
+// This function is declared inline because it is not clear which
+// .cc file it should belong to.
+// TODO(hta): Refactor. https://bugs.webrtc.org/6842
+inline std::ostream& operator<<(std::ostream& stream,
+                                H264PacketizationMode mode) {
+  switch (mode) {
+    case H264PacketizationMode::NonInterleaved:
+      stream << "NonInterleaved";
+      break;
+    case H264PacketizationMode::SingleNalUnit:
+      stream << "SingleNalUnit";
+      break;
+  }
+  return stream;
+}
+
+struct NaluInfo {
+  uint8_t type;
+  int sps_id;
+  int pps_id;
+};
+
+const size_t kMaxNalusPerPacket = 10;
+
+struct RTPVideoHeaderH264 {
+  // The NAL unit type. If this is a header for a
+  // fragmented packet, it's the NAL unit type of
+  // the original data. If this is the header for an
+  // aggregated packet, it's the NAL unit type of
+  // the first NAL unit in the packet.
+  uint8_t nalu_type;
+  // The packetization type of this buffer - single, aggregated or fragmented.
+  H264PacketizationTypes packetization_type;
+  NaluInfo nalus[kMaxNalusPerPacket];
+  size_t nalus_length;
+  // The packetization mode of this transport. Packetization mode
+  // determines which packetization types are allowed when packetizing.
+  H264PacketizationMode packetization_mode;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_H264_INCLUDE_H264_GLOBALS_H_
diff --git a/modules/video_coding/codecs/interface/common_constants.h b/modules/video_coding/codecs/interface/common_constants.h
new file mode 100644
index 0000000..2fdcb90
--- /dev/null
+++ b/modules/video_coding/codecs/interface/common_constants.h
@@ -0,0 +1,26 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains constants that are used by multiple global
+// codec definitions (modules/video_coding/codecs/*/include/*_globals.h)
+
+#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
+#define MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
+
+namespace webrtc {
+
+const int16_t kNoPictureId = -1;
+const int16_t kNoTl0PicIdx = -1;
+const uint8_t kNoTemporalIdx = 0xFF;
+const int kNoKeyIdx = -1;
+
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_INTERFACE_COMMON_CONSTANTS_H_
diff --git a/modules/video_coding/codecs/vp8/include/vp8.h b/modules/video_coding/codecs/vp8/include/vp8.h
new file mode 100644
index 0000000..00808e2
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/include/vp8.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ *  WEBRTC VP8 wrapper interface
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+
+#include <memory>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+class VP8Encoder : public VideoEncoder {
+ public:
+  static std::unique_ptr<VP8Encoder> Create();
+
+  virtual ~VP8Encoder() {}
+};  // end of VP8Encoder class
+
+class VP8Decoder : public VideoDecoder {
+ public:
+  static std::unique_ptr<VP8Decoder> Create();
+
+  virtual ~VP8Decoder() {}
+};  // end of VP8Decoder class
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/modules/video_coding/codecs/vp8/include/vp8_common_types.h b/modules/video_coding/codecs/vp8/include/vp8_common_types.h
new file mode 100644
index 0000000..dff70ac
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/include/vp8_common_types.h
@@ -0,0 +1,29 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+
+#include "common_types.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Ratio allocation between temporal streams:
+// Values as required for the VP8 codec (accumulating).
+static const float
+    kVp8LayerRateAlloction[kMaxSimulcastStreams][kMaxTemporalStreams] = {
+        {1.0f, 1.0f, 1.0f, 1.0f},  // 1 layer
+        {0.6f, 1.0f, 1.0f, 1.0f},  // 2 layers {60%, 40%}
+        {0.4f, 0.6f, 1.0f, 1.0f},  // 3 layers {40%, 20%, 40%}
+        {0.25f, 0.4f, 0.6f, 1.0f}  // 4 layers {25%, 15%, 20%, 40%}
+};
+
+}  // namespace webrtc
+#endif  // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
diff --git a/modules/video_coding/codecs/vp8/include/vp8_globals.h b/modules/video_coding/codecs/vp8/include/vp8_globals.h
new file mode 100644
index 0000000..1fab5f4
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/include/vp8_globals.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+
+namespace webrtc {
+
+struct RTPVideoHeaderVP8 {
+  void InitRTPVideoHeaderVP8() {
+    nonReference = false;
+    pictureId = kNoPictureId;
+    tl0PicIdx = kNoTl0PicIdx;
+    temporalIdx = kNoTemporalIdx;
+    layerSync = false;
+    keyIdx = kNoKeyIdx;
+    partitionId = 0;
+    beginningOfPartition = false;
+  }
+
+  bool nonReference;          // Frame is discardable.
+  int16_t pictureId;          // Picture ID index, 15 bits;
+                              // kNoPictureId if PictureID does not exist.
+  int16_t tl0PicIdx;          // TL0PIC_IDX, 8 bits;
+                              // kNoTl0PicIdx means no value provided.
+  uint8_t temporalIdx;        // Temporal layer index, or kNoTemporalIdx.
+  bool layerSync;             // This frame is a layer sync frame.
+                              // Disabled if temporalIdx == kNoTemporalIdx.
+  int keyIdx;                 // 5 bits; kNoKeyIdx means not used.
+  int partitionId;            // VP8 partition ID
+  bool beginningOfPartition;  // True if this packet is the first
+                              // in a VP8 partition. Otherwise false
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_GLOBALS_H_
diff --git a/modules/video_coding/codecs/vp9/include/vp9.h b/modules/video_coding/codecs/vp9/include/vp9.h
new file mode 100644
index 0000000..172e69e
--- /dev/null
+++ b/modules/video_coding/codecs/vp9/include/vp9.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
+
+#include <memory>
+
+#include "modules/video_coding/include/video_codec_interface.h"
+
+namespace webrtc {
+
+class VP9Encoder : public VideoEncoder {
+ public:
+  static bool IsSupported();
+  static std::unique_ptr<VP9Encoder> Create();
+
+  virtual ~VP9Encoder() {}
+};
+
+class VP9Decoder : public VideoDecoder {
+ public:
+  static bool IsSupported();
+  static std::unique_ptr<VP9Decoder> Create();
+
+  virtual ~VP9Decoder() {}
+};
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
diff --git a/modules/video_coding/codecs/vp9/include/vp9_globals.h b/modules/video_coding/codecs/vp9/include/vp9_globals.h
new file mode 100644
index 0000000..945eb0c
--- /dev/null
+++ b/modules/video_coding/codecs/vp9/include/vp9_globals.h
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains codec dependent definitions that are needed in
+// order to compile the WebRTC codebase, even if this codec is not used.
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
+#define MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
+
+#include "modules/video_coding/codecs/interface/common_constants.h"
+
+namespace webrtc {
+
+const int16_t kMaxOneBytePictureId = 0x7F;    // 7 bits
+const int16_t kMaxTwoBytePictureId = 0x7FFF;  // 15 bits
+const uint8_t kNoSpatialIdx = 0xFF;
+const uint8_t kNoGofIdx = 0xFF;
+const uint8_t kNumVp9Buffers = 8;
+const size_t kMaxVp9RefPics = 3;
+const size_t kMaxVp9FramesInGof = 0xFF;  // 8 bits
+const size_t kMaxVp9NumberOfSpatialLayers = 8;
+
+enum TemporalStructureMode {
+  kTemporalStructureMode1,  // 1 temporal layer structure - i.e., IPPP...
+  kTemporalStructureMode2,  // 2 temporal layers 01...
+  kTemporalStructureMode3,  // 3 temporal layers 0212...
+  kTemporalStructureMode4   // 3 temporal layers 02120212...
+};
+
+struct GofInfoVP9 {
+  void SetGofInfoVP9(TemporalStructureMode tm) {
+    switch (tm) {
+      case kTemporalStructureMode1:
+        num_frames_in_gof = 1;
+        temporal_idx[0] = 0;
+        temporal_up_switch[0] = false;
+        num_ref_pics[0] = 1;
+        pid_diff[0][0] = 1;
+        break;
+      case kTemporalStructureMode2:
+        num_frames_in_gof = 2;
+        temporal_idx[0] = 0;
+        temporal_up_switch[0] = false;
+        num_ref_pics[0] = 1;
+        pid_diff[0][0] = 2;
+
+        temporal_idx[1] = 1;
+        temporal_up_switch[1] = true;
+        num_ref_pics[1] = 1;
+        pid_diff[1][0] = 1;
+        break;
+      case kTemporalStructureMode3:
+        num_frames_in_gof = 4;
+        temporal_idx[0] = 0;
+        temporal_up_switch[0] = false;
+        num_ref_pics[0] = 1;
+        pid_diff[0][0] = 4;
+
+        temporal_idx[1] = 2;
+        temporal_up_switch[1] = true;
+        num_ref_pics[1] = 1;
+        pid_diff[1][0] = 1;
+
+        temporal_idx[2] = 1;
+        temporal_up_switch[2] = true;
+        num_ref_pics[2] = 1;
+        pid_diff[2][0] = 2;
+
+        temporal_idx[3] = 2;
+        temporal_up_switch[3] = false;
+        num_ref_pics[3] = 2;
+        pid_diff[3][0] = 1;
+        pid_diff[3][1] = 2;
+        break;
+      case kTemporalStructureMode4:
+        num_frames_in_gof = 8;
+        temporal_idx[0] = 0;
+        temporal_up_switch[0] = false;
+        num_ref_pics[0] = 1;
+        pid_diff[0][0] = 4;
+
+        temporal_idx[1] = 2;
+        temporal_up_switch[1] = true;
+        num_ref_pics[1] = 1;
+        pid_diff[1][0] = 1;
+
+        temporal_idx[2] = 1;
+        temporal_up_switch[2] = true;
+        num_ref_pics[2] = 1;
+        pid_diff[2][0] = 2;
+
+        temporal_idx[3] = 2;
+        temporal_up_switch[3] = false;
+        num_ref_pics[3] = 2;
+        pid_diff[3][0] = 1;
+        pid_diff[3][1] = 2;
+
+        temporal_idx[4] = 0;
+        temporal_up_switch[0] = false;
+        num_ref_pics[4] = 1;
+        pid_diff[4][0] = 4;
+
+        temporal_idx[5] = 2;
+        temporal_up_switch[1] = false;
+        num_ref_pics[5] = 2;
+        pid_diff[5][0] = 1;
+        pid_diff[5][1] = 2;
+
+        temporal_idx[6] = 1;
+        temporal_up_switch[2] = false;
+        num_ref_pics[6] = 2;
+        pid_diff[6][0] = 2;
+        pid_diff[6][1] = 4;
+
+        temporal_idx[7] = 2;
+        temporal_up_switch[3] = false;
+        num_ref_pics[7] = 2;
+        pid_diff[7][0] = 1;
+        pid_diff[7][1] = 2;
+        break;
+      default:
+        assert(false);
+    }
+  }
+
+  void CopyGofInfoVP9(const GofInfoVP9& src) {
+    num_frames_in_gof = src.num_frames_in_gof;
+    for (size_t i = 0; i < num_frames_in_gof; ++i) {
+      temporal_idx[i] = src.temporal_idx[i];
+      temporal_up_switch[i] = src.temporal_up_switch[i];
+      num_ref_pics[i] = src.num_ref_pics[i];
+      for (uint8_t r = 0; r < num_ref_pics[i]; ++r) {
+        pid_diff[i][r] = src.pid_diff[i][r];
+      }
+    }
+  }
+
+  size_t num_frames_in_gof;
+  uint8_t temporal_idx[kMaxVp9FramesInGof];
+  bool temporal_up_switch[kMaxVp9FramesInGof];
+  uint8_t num_ref_pics[kMaxVp9FramesInGof];
+  uint8_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
+  uint16_t pid_start;
+};
+
+struct RTPVideoHeaderVP9 {
+  void InitRTPVideoHeaderVP9() {
+    inter_pic_predicted = false;
+    flexible_mode = false;
+    beginning_of_frame = false;
+    end_of_frame = false;
+    ss_data_available = false;
+    picture_id = kNoPictureId;
+    max_picture_id = kMaxTwoBytePictureId;
+    tl0_pic_idx = kNoTl0PicIdx;
+    temporal_idx = kNoTemporalIdx;
+    spatial_idx = kNoSpatialIdx;
+    temporal_up_switch = false;
+    inter_layer_predicted = false;
+    gof_idx = kNoGofIdx;
+    num_ref_pics = 0;
+    num_spatial_layers = 1;
+  }
+
+  bool inter_pic_predicted;  // This layer frame is dependent on previously
+                             // coded frame(s).
+  bool flexible_mode;        // This frame is in flexible mode.
+  bool beginning_of_frame;   // True if this packet is the first in a VP9 layer
+                             // frame.
+  bool end_of_frame;  // True if this packet is the last in a VP9 layer frame.
+  bool ss_data_available;   // True if SS data is available in this payload
+                            // descriptor.
+  int16_t picture_id;       // PictureID index, 15 bits;
+                            // kNoPictureId if PictureID does not exist.
+  int16_t max_picture_id;   // Maximum picture ID index; either 0x7F or 0x7FFF;
+  int16_t tl0_pic_idx;      // TL0PIC_IDX, 8 bits;
+                            // kNoTl0PicIdx means no value provided.
+  uint8_t temporal_idx;     // Temporal layer index, or kNoTemporalIdx.
+  uint8_t spatial_idx;      // Spatial layer index, or kNoSpatialIdx.
+  bool temporal_up_switch;  // True if upswitch to higher frame rate is possible
+                            // starting from this frame.
+  bool inter_layer_predicted;  // Frame is dependent on directly lower spatial
+                               // layer frame.
+
+  uint8_t gof_idx;  // Index to predefined temporal frame info in SS data.
+
+  uint8_t num_ref_pics;  // Number of reference pictures used by this layer
+                         // frame.
+  uint8_t pid_diff[kMaxVp9RefPics];  // P_DIFF signaled to derive the PictureID
+                                     // of the reference pictures.
+  int16_t ref_picture_id[kMaxVp9RefPics];  // PictureID of reference pictures.
+
+  // SS data.
+  size_t num_spatial_layers;  // Always populated.
+  bool spatial_layer_resolution_present;
+  uint16_t width[kMaxVp9NumberOfSpatialLayers];
+  uint16_t height[kMaxVp9NumberOfSpatialLayers];
+  GofInfoVP9 gof;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_GLOBALS_H_
diff --git a/rtc_base/BUILD.gn b/rtc_base/BUILD.gn
new file mode 100644
index 0000000..50e0b24
--- /dev/null
+++ b/rtc_base/BUILD.gn
@@ -0,0 +1,1179 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("//build/config/crypto.gni")
+import("//build/config/ui.gni")
+import("../webrtc.gni")
+
+if (is_android) {
+  import("//build/config/android/config.gni")
+  import("//build/config/android/rules.gni")
+}
+if (is_win) {
+  import("//build/config/clang/clang.gni")
+}
+
+group("base") {
+  public_deps = [
+    ":rtc_base",
+    ":rtc_base_approved",
+    ":rtc_task_queue",
+    ":sequenced_task_checker",
+    ":weak_ptr",
+  ]
+  if (is_android) {
+    public_deps += [ ":base_java" ]
+  }
+}
+
+config("rtc_base_chromium_config") {
+  defines = [ "NO_MAIN_THREAD_WRAPPING" ]
+}
+
+config("rtc_base_all_dependent_config") {
+  if (is_ios) {
+    libs = [
+      "CFNetwork.framework",
+      "Foundation.framework",
+      "Security.framework",
+      "SystemConfiguration.framework",
+      "UIKit.framework",
+    ]
+  }
+  if (is_mac) {
+    libs = [
+      "Cocoa.framework",
+      "Foundation.framework",
+      "IOKit.framework",
+      "Security.framework",
+      "SystemConfiguration.framework",
+    ]
+  }
+}
+
+if (!rtc_build_ssl) {
+  config("external_ssl_library") {
+    assert(rtc_ssl_root != "",
+           "You must specify rtc_ssl_root when rtc_build_ssl==0.")
+    include_dirs = [ rtc_ssl_root ]
+  }
+}
+
+source_set("protobuf_utils") {
+  visibility = [ "*" ]
+  sources = [
+    "protobuf_utils.h",
+  ]
+  if (rtc_enable_protobuf) {
+    public_deps = [
+      "//third_party/protobuf:protobuf_lite",
+    ]
+  }
+}
+
+source_set("compile_assert_c") {
+  sources = [
+    "compile_assert_c.h",
+  ]
+}
+
+rtc_source_set("rtc_base_approved") {
+  visibility = [ "*" ]
+  public_deps = [
+    ":rtc_base_approved_generic",
+  ]
+  if (is_mac && !build_with_chromium) {
+    public_deps += [ ":rtc_base_approved_objc" ]
+  }
+}
+
+rtc_source_set("checks") {
+  sources = [
+    "checks.cc",
+    "checks.h",
+  ]
+  deps = [
+    ":safe_compare",
+    "..:typedefs",
+  ]
+}
+
+rtc_source_set("rate_limiter") {
+  sources = [
+    "rate_limiter.cc",
+    "rate_limiter.h",
+  ]
+  deps = [
+    ":rtc_base_approved",
+    "../system_wrappers",
+  ]
+}
+
+rtc_source_set("sanitizer") {
+  sources = [
+    "sanitizer.h",
+  ]
+}
+
+rtc_source_set("safe_compare") {
+  sources = [
+    "numerics/safe_compare.h",
+  ]
+  deps = [
+    ":type_traits",
+  ]
+}
+
+rtc_source_set("safe_minmax") {
+  sources = [
+    "numerics/safe_minmax.h",
+  ]
+  deps = [
+    ":checks",
+    ":safe_compare",
+    ":type_traits",
+  ]
+}
+
+rtc_source_set("stringutils") {
+  sources = [
+    "strings/string_builder.cc",
+    "strings/string_builder.h",
+    "stringutils.cc",
+    "stringutils.h",
+  ]
+  deps = [
+    ":checks",
+    ":safe_minmax",
+    "../api:array_view",
+  ]
+}
+
+rtc_source_set("type_traits") {
+  sources = [
+    "type_traits.h",
+  ]
+}
+
+rtc_source_set("deprecation") {
+  sources = [
+    "deprecation.h",
+  ]
+}
+
+# The subset of rtc_base approved for use outside of libjingle.
+rtc_source_set("rtc_base_approved_generic") {
+  visibility = [
+    ":rtc_base_approved",
+    ":rtc_base_approved_objc",
+    ":weak_ptr_unittests",
+  ]
+
+  cflags = []
+  defines = []
+  libs = []
+  deps = [
+    ":checks",
+    ":safe_compare",
+    ":stringutils",
+    ":type_traits",
+    "../:typedefs",
+  ]
+
+  sources = [
+    "arraysize.h",
+    "atomicops.h",
+    "base64.cc",
+    "base64.h",
+    "basictypes.h",
+    "bind.h",
+    "bitbuffer.cc",
+    "bitbuffer.h",
+    "bitrateallocationstrategy.cc",
+    "bitrateallocationstrategy.h",
+    "buffer.h",
+    "bufferqueue.cc",
+    "bufferqueue.h",
+    "bytebuffer.cc",
+    "bytebuffer.h",
+    "byteorder.h",
+    "constructormagic.h",
+    "copyonwritebuffer.cc",
+    "copyonwritebuffer.h",
+    "criticalsection.cc",
+    "criticalsection.h",
+    "event_tracer.cc",
+    "event_tracer.h",
+    "file.cc",
+    "file.h",
+    "flags.cc",
+    "flags.h",
+    "format_macros.h",
+    "function_view.h",
+    "ignore_wundef.h",
+    "location.cc",
+    "location.h",
+    "numerics/histogram_percentile_counter.cc",
+    "numerics/histogram_percentile_counter.h",
+    "numerics/mod_ops.h",
+    "numerics/moving_max_counter.h",
+    "numerics/safe_conversions.h",
+    "numerics/safe_conversions_impl.h",
+    "onetimeevent.h",
+    "pathutils.cc",
+    "pathutils.h",
+    "platform_file.cc",
+    "platform_file.h",
+    "platform_thread.cc",
+    "platform_thread.h",
+    "platform_thread_types.cc",
+    "platform_thread_types.h",
+    "ptr_util.h",
+    "race_checker.cc",
+    "race_checker.h",
+    "random.cc",
+    "random.h",
+    "rate_statistics.cc",
+    "rate_statistics.h",
+    "ratetracker.cc",
+    "ratetracker.h",
+    "refcount.h",
+    "refcountedobject.h",
+    "refcounter.h",
+    "scoped_ref_ptr.h",
+    "string_to_number.cc",
+    "string_to_number.h",
+    "stringencode.cc",
+    "stringencode.h",
+    "stringize_macros.h",
+    "swap_queue.h",
+    "template_util.h",
+    "thread_annotations.h",
+    "thread_checker.h",
+    "thread_checker_impl.cc",
+    "thread_checker_impl.h",
+    "timestampaligner.cc",
+    "timestampaligner.h",
+    "timeutils.cc",
+    "timeutils.h",
+    "trace_event.h",
+    "zero_memory.cc",
+    "zero_memory.h",
+  ]
+
+  deps += [
+    "..:webrtc_common",
+    "../api:array_view",
+    "../api:optional",
+  ]
+
+  if (is_android) {
+    libs += [ "log" ]
+  }
+
+  if (is_posix) {
+    sources += [ "file_posix.cc" ]
+  }
+
+  if (is_win) {
+    sources += [ "file_win.cc" ]
+  }
+
+  if (build_with_chromium) {
+    # Dependency on chromium's logging (in //base).
+    deps += [ "//base:base" ]
+    sources += [
+      "../../webrtc_overrides/rtc_base/event.cc",
+      "../../webrtc_overrides/rtc_base/event.h",
+      "../../webrtc_overrides/rtc_base/logging.cc",
+      "../../webrtc_overrides/rtc_base/logging.h",
+    ]
+  } else {
+    sources += [
+      "event.cc",
+      "event.h",
+      "logging.cc",
+      "logging.h",
+    ]
+
+    # logging.h needs the deprecation header while downstream projects are
+    # removing code that depends on logging implementation details.
+    deps += [ ":deprecation" ]
+  }
+  if (is_component_build && is_win) {
+    # Copy the VS runtime DLLs into the isolate so that they don't have to be
+    # preinstalled on the target machine. The debug runtimes have a "d" at
+    # the end.
+    # This is a copy of https://codereview.chromium.org/1783973002.
+    # TODO(ehmaldonado): We'd like Chromium to make this changes easier to use,
+    # so we don't have to copy their changes and risk breakages.
+    # See http://crbug.com/653569
+    if (is_debug) {
+      vcrt_suffix = "d"
+    } else {
+      vcrt_suffix = ""
+    }
+
+    # These runtime files are copied to the output directory by the
+    # vs_toolchain script that runs as part of toolchain configuration.
+    data = [
+      "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+      "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+      "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+
+      # Universal Windows 10 CRT files
+      "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+      "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+      "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+      "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+      "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+      "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+      "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+      "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+    ]
+    if (is_asan) {
+      if (current_cpu == "x64") {
+        data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-x86_64.dll" ]
+      } else {
+        data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+      }
+    }
+  }
+  if (is_nacl) {
+    deps += [ "//native_client_sdk/src/libraries/nacl_io" ]
+  }
+}
+
+if (is_mac && !build_with_chromium) {
+  config("rtc_base_approved_objc_all_dependent_config") {
+    visibility = [ ":rtc_base_approved_objc" ]
+    libs = [ "Foundation.framework" ]  # needed for logging_mac.mm
+  }
+
+  rtc_source_set("rtc_base_approved_objc") {
+    visibility = [ ":rtc_base_approved" ]
+    all_dependent_configs = [ ":rtc_base_approved_objc_all_dependent_config" ]
+    sources = [
+      "logging_mac.mm",
+    ]
+    deps = [
+      ":rtc_base_approved_generic",
+    ]
+  }
+}
+
+rtc_source_set("rtc_task_queue") {
+  visibility = [ "*" ]
+  deps = [
+    ":rtc_base_approved",
+  ]
+  public_deps = [
+    ":rtc_task_queue_api",
+  ]
+
+  if (rtc_link_task_queue_impl) {
+    deps += [ ":rtc_task_queue_impl" ]
+  }
+}
+
+# WebRTC targets must not directly depend on rtc_task_queue_api or
+# rtc_task_queue_impl. Instead, depend on rtc_task_queue.
+# The build flag |rtc_link_task_queue_impl| decides if WebRTC targets will link
+# to the default implemenation in rtc_task_queue_impl or if an externally
+# provided implementation should be used. An external implementation should
+# depend on rtc_task_queue_api.
+rtc_source_set("rtc_task_queue_api") {
+  sources = [
+    "task_queue.h",
+  ]
+  deps = [
+    ":rtc_base_approved",
+  ]
+}
+
+rtc_source_set("rtc_task_queue_impl") {
+  visibility = [ "*" ]
+  deps = [
+    ":checks",
+    ":rtc_base_approved",
+    ":rtc_task_queue_api",
+  ]
+  if (rtc_build_libevent) {
+    deps += [ "//base/third_party/libevent" ]
+  }
+  if (rtc_enable_libevent) {
+    sources = [
+      "task_queue_libevent.cc",
+      "task_queue_posix.cc",
+      "task_queue_posix.h",
+    ]
+  } else {
+    if (is_mac || is_ios) {
+      sources = [
+        "task_queue_gcd.cc",
+        "task_queue_posix.cc",
+        "task_queue_posix.h",
+      ]
+    }
+    if (is_win) {
+      sources = [
+        "task_queue_win.cc",
+      ]
+    }
+  }
+}
+
+rtc_static_library("sequenced_task_checker") {
+  sources = [
+    "sequenced_task_checker.h",
+    "sequenced_task_checker_impl.cc",
+    "sequenced_task_checker_impl.h",
+  ]
+  deps = [
+    ":checks",
+    ":rtc_base_approved",
+    ":rtc_task_queue",
+  ]
+}
+
+rtc_static_library("weak_ptr") {
+  sources = [
+    "weak_ptr.cc",
+    "weak_ptr.h",
+  ]
+  deps = [
+    ":rtc_base_approved",
+    ":sequenced_task_checker",
+  ]
+}
+
+rtc_static_library("rtc_numerics") {
+  sources = [
+    "numerics/exp_filter.cc",
+    "numerics/exp_filter.h",
+    "numerics/moving_median_filter.h",
+    "numerics/percentile_filter.h",
+    "numerics/sequence_number_util.h",
+  ]
+  deps = [
+    ":checks",
+    ":rtc_base_approved",
+    ":safe_compare",
+    "../api:optional",
+  ]
+}
+
+config("rtc_base_warnings_config") {
+  if (is_win && is_clang) {
+    cflags = [
+      # Disable warnings failing when compiling with Clang on Windows.
+      # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+      "-Wno-sign-compare",
+      "-Wno-missing-braces",
+    ]
+  }
+}
+
+rtc_source_set("rtc_json") {
+  defines = []
+  sources = [
+    "json.cc",
+    "json.h",
+  ]
+  all_dependent_configs = [ "//third_party/jsoncpp:jsoncpp_config" ]
+  if (rtc_build_json) {
+    public_deps = [
+      "//third_party/jsoncpp",
+    ]
+  } else {
+    include_dirs = [ "$rtc_jsoncpp_root" ]
+
+    # When defined changes the include path for json.h to where it is
+    # expected to be when building json outside of the standalone build.
+    defines += [ "WEBRTC_EXTERNAL_JSON" ]
+  }
+}
+
+rtc_static_library("rtc_base") {
+  visibility = [ "*" ]
+  public_deps = []
+  if (!build_with_mozilla) {
+    public_deps += [ ":rtc_base_generic" ]
+  }
+  if (is_win) {
+    sources = [
+      "noop.cc",
+    ]
+  }
+  if (is_ios || is_mac) {
+    sources = [
+      "noop.mm",
+    ]
+    public_deps += [ ":rtc_base_objc" ]
+  }
+}
+
+if (is_ios || is_mac) {
+  rtc_source_set("rtc_base_objc") {
+    sources = [
+      "thread_darwin.mm",
+    ]
+    deps = [
+      ":rtc_base_generic",
+    ]
+    visibility = [ ":rtc_base" ]
+  }
+}
+
+rtc_static_library("rtc_base_generic") {
+  cflags = []
+  cflags_cc = []
+  libs = []
+  defines = []
+  deps = [
+    ":checks",
+    ":stringutils",
+    "..:webrtc_common",
+    "../api:array_view",
+    "../api:optional",
+  ]
+  public_deps = [
+    ":rtc_base_approved",
+  ]
+  public_configs = []
+
+  all_dependent_configs = [ ":rtc_base_all_dependent_config" ]
+
+  sources = [
+    "asyncinvoker-inl.h",
+    "asyncinvoker.cc",
+    "asyncinvoker.h",
+    "asyncpacketsocket.cc",
+    "asyncpacketsocket.h",
+    "asyncresolverinterface.cc",
+    "asyncresolverinterface.h",
+    "asyncsocket.cc",
+    "asyncsocket.h",
+    "asynctcpsocket.cc",
+    "asynctcpsocket.h",
+    "asyncudpsocket.cc",
+    "asyncudpsocket.h",
+    "crc32.cc",
+    "crc32.h",
+    "cryptstring.cc",
+    "cryptstring.h",
+    "data_rate_limiter.cc",
+    "data_rate_limiter.h",
+    "dscp.h",
+    "filerotatingstream.cc",
+    "filerotatingstream.h",
+    "fileutils.cc",
+    "fileutils.h",
+    "gunit_prod.h",
+    "helpers.cc",
+    "helpers.h",
+    "httpcommon-inl.h",
+    "httpcommon.cc",
+    "httpcommon.h",
+    "ipaddress.cc",
+    "ipaddress.h",
+    "keep_ref_until_done.h",
+    "messagedigest.cc",
+    "messagedigest.h",
+    "messagehandler.cc",
+    "messagehandler.h",
+    "messagequeue.cc",
+    "messagequeue.h",
+    "nethelper.cc",
+    "nethelper.h",
+    "nethelpers.cc",
+    "nethelpers.h",
+    "network.cc",
+    "network.h",
+    "network_constants.h",
+    "networkmonitor.cc",
+    "networkmonitor.h",
+    "networkroute.h",
+    "nullsocketserver.cc",
+    "nullsocketserver.h",
+    "openssl.h",
+    "openssladapter.cc",
+    "openssladapter.h",
+    "openssldigest.cc",
+    "openssldigest.h",
+    "opensslidentity.cc",
+    "opensslidentity.h",
+    "opensslstreamadapter.cc",
+    "opensslstreamadapter.h",
+    "physicalsocketserver.cc",
+    "physicalsocketserver.h",
+    "proxyinfo.cc",
+    "proxyinfo.h",
+    "ratelimiter.h",
+    "rtccertificate.cc",
+    "rtccertificate.h",
+    "rtccertificategenerator.cc",
+    "rtccertificategenerator.h",
+    "signalthread.cc",
+    "signalthread.h",
+    "sigslot.cc",
+    "sigslot.h",
+    "sigslotrepeater.h",
+    "socket.h",
+    "socketadapters.cc",
+    "socketadapters.h",
+    "socketaddress.cc",
+    "socketaddress.h",
+    "socketaddresspair.cc",
+    "socketaddresspair.h",
+    "socketfactory.h",
+    "socketserver.h",
+    "socketstream.cc",
+    "socketstream.h",
+    "ssladapter.cc",
+    "ssladapter.h",
+    "sslfingerprint.cc",
+    "sslfingerprint.h",
+    "sslidentity.cc",
+    "sslidentity.h",
+    "sslstreamadapter.cc",
+    "sslstreamadapter.h",
+    "stream.cc",
+    "stream.h",
+    "thread.cc",
+    "thread.h",
+  ]
+
+  visibility = [
+    ":rtc_base",
+    ":rtc_base_objc",
+  ]
+
+  # TODO(henrike): issue 3307, make rtc_base build with the Chromium default
+  # compiler settings.
+  suppressed_configs += [ "//build/config/compiler:chromium_code" ]
+  configs += [ "//build/config/compiler:no_chromium_code" ]
+  if (!is_win) {
+    cflags += [ "-Wno-uninitialized" ]
+  }
+
+  if (build_with_chromium) {
+    if (is_win) {
+      sources += [ "../../webrtc_overrides/rtc_base/win32socketinit.cc" ]
+    }
+    include_dirs = [ "../../boringssl/src/include" ]
+    public_configs += [ ":rtc_base_chromium_config" ]
+  } else {
+    configs += [ ":rtc_base_warnings_config" ]
+    sources += [
+      "callback.h",
+      "logsinks.cc",
+      "logsinks.h",
+      "numerics/mathutils.h",
+      "optionsfile.cc",
+      "optionsfile.h",
+      "rollingaccumulator.h",
+      "sslroots.h",
+      "transformadapter.cc",
+      "transformadapter.h",
+    ]
+
+    if (is_win) {
+      sources += [
+        "win32socketinit.cc",
+        "win32socketinit.h",
+        "win32socketserver.cc",
+        "win32socketserver.h",
+      ]
+    }
+  }  # !build_with_chromium
+
+  if (rtc_build_ssl) {
+    deps += [ "//third_party/boringssl" ]
+  } else {
+    configs += [ ":external_ssl_library" ]
+  }
+
+  if (is_android) {
+    sources += [
+      "ifaddrs-android.cc",
+      "ifaddrs-android.h",
+    ]
+
+    libs += [
+      "log",
+      "GLESv2",
+    ]
+  }
+
+  if (is_ios || is_mac) {
+    sources += [ "macifaddrs_converter.cc" ]
+  }
+
+  if (rtc_use_x11) {
+    libs += [
+      "dl",
+      "rt",
+      "Xext",
+      "X11",
+      "Xcomposite",
+      "Xrender",
+    ]
+  }
+
+  if (is_linux) {
+    libs += [
+      "dl",
+      "rt",
+    ]
+  }
+
+  if (is_mac) {
+    sources += [
+      "macutils.cc",
+      "macutils.h",
+    ]
+    libs += [
+      # For ProcessInformationCopyDictionary in unixfilesystem.cc.
+      "ApplicationServices.framework",
+    ]
+  }
+
+  if (is_win) {
+    sources += [
+      "win32.cc",
+      "win32.h",
+      "win32filesystem.cc",
+      "win32filesystem.h",
+      "win32window.cc",
+      "win32window.h",
+    ]
+
+    libs += [
+      "crypt32.lib",
+      "iphlpapi.lib",
+      "secur32.lib",
+    ]
+
+    cflags += [
+      # Suppress warnings about WIN32_LEAN_AND_MEAN.
+      "/wd4005",
+      "/wd4703",
+    ]
+
+    defines += [ "_CRT_NONSTDC_NO_DEPRECATE" ]
+  }
+
+  if (is_posix) {
+    sources += [
+      "ifaddrs_converter.cc",
+      "ifaddrs_converter.h",
+      "unixfilesystem.cc",
+      "unixfilesystem.h",
+    ]
+  }
+
+  if (is_nacl) {
+    deps += [ "//native_client_sdk/src/libraries/nacl_io" ]
+    defines += [ "timezone=_timezone" ]
+    sources -= [ "ifaddrs_converter.cc" ]
+  }
+}
+
+rtc_source_set("gtest_prod") {
+  visibility = [ "*" ]
+  sources = [
+    "gtest_prod_util.h",
+  ]
+}
+
+config("rtc_base_tests_utils_exported_config") {
+  defines = [ "GTEST_RELATIVE_PATH" ]
+}
+
+config("rtc_base_tests_utils_warnings_config") {
+  if (is_win && is_clang) {
+    cflags = [
+      # See https://bugs.chromium.org/p/webrtc/issues/detail?id=6270
+      "-Wno-reorder",
+      "-Wno-sign-compare",
+    ]
+  }
+}
+
+rtc_source_set("rtc_base_tests_utils") {
+  testonly = true
+  sources = [
+    # Also use this as a convenient dumping ground for misc files that are
+    # included by multiple targets below.
+    "cpu_time.cc",
+    "cpu_time.h",
+    "fakeclock.cc",
+    "fakeclock.h",
+    "fakenetwork.h",
+    "fakesslidentity.cc",
+    "fakesslidentity.h",
+    "firewallsocketserver.cc",
+    "firewallsocketserver.h",
+    "gunit.cc",
+    "gunit.h",
+    "httpbase.cc",
+    "httpbase.h",
+    "httpserver.cc",
+    "httpserver.h",
+    "memory_usage.cc",
+    "memory_usage.h",
+    "natserver.cc",
+    "natserver.h",
+    "natsocketfactory.cc",
+    "natsocketfactory.h",
+    "nattypes.cc",
+    "nattypes.h",
+    "proxyserver.cc",
+    "proxyserver.h",
+    "refcount.h",
+    "sigslottester.h",
+    "sigslottester.h.pump",
+    "testbase64.h",
+    "testclient.cc",
+    "testclient.h",
+    "testechoserver.cc",
+    "testechoserver.h",
+    "testutils.cc",
+    "testutils.h",
+    "timedelta.h",
+    "virtualsocketserver.cc",
+    "virtualsocketserver.h",
+  ]
+  configs += [ ":rtc_base_tests_utils_warnings_config" ]
+  public_configs = [ ":rtc_base_tests_utils_exported_config" ]
+  deps = [
+    ":checks",
+    ":rtc_base",
+    ":stringutils",
+    "../test:field_trial",
+    "../test:test_support",
+    "system:fallthrough",
+  ]
+  public_deps = [
+    "//testing/gtest",
+  ]
+}
+
+rtc_source_set("rtc_task_queue_for_test") {
+  visibility = [ "*" ]
+  testonly = true
+
+  sources = [
+    "task_queue_for_test.cc",
+    "task_queue_for_test.h",
+  ]
+  deps = [
+    ":checks",
+    ":rtc_base_approved",
+    ":rtc_task_queue",
+  ]
+}
+
+if (rtc_include_tests) {
+  rtc_source_set("rtc_base_tests_main") {
+    testonly = true
+    sources = [
+      "unittest_main.cc",
+    ]
+    public_configs = [ ":rtc_base_tests_utils_exported_config" ]
+    deps = [
+      ":rtc_base",
+      ":rtc_base_approved",
+      ":rtc_base_tests_utils",
+      "../system_wrappers:field_trial_default",
+      "../test:field_trial",
+      "../test:fileutils",
+      "../test:test_support",
+    ]
+
+    public_deps = [
+      "//testing/gtest",
+    ]
+  }
+
+  rtc_source_set("rtc_base_nonparallel_tests") {
+    testonly = true
+
+    sources = [
+      "cpu_time_unittest.cc",
+      "filerotatingstream_unittest.cc",
+      "nullsocketserver_unittest.cc",
+      "physicalsocketserver_unittest.cc",
+      "socket_unittest.cc",
+      "socket_unittest.h",
+      "socketaddress_unittest.cc",
+    ]
+    deps = [
+      ":checks",
+      ":rtc_base",
+      ":rtc_base_tests_main",
+      ":rtc_base_tests_utils",
+      "../system_wrappers:system_wrappers",
+      "../test:fileutils",
+      "../test:test_support",
+      "//testing/gtest",
+    ]
+    if (is_win) {
+      sources += [ "win32socketserver_unittest.cc" ]
+    }
+  }
+
+  rtc_source_set("rtc_base_approved_unittests") {
+    testonly = true
+
+    sources = [
+      "atomicops_unittest.cc",
+      "base64_unittest.cc",
+      "basictypes_unittest.cc",
+      "bind_unittest.cc",
+      "bitbuffer_unittest.cc",
+      "bitrateallocationstrategy_unittest.cc",
+      "buffer_unittest.cc",
+      "bufferqueue_unittest.cc",
+      "bytebuffer_unittest.cc",
+      "byteorder_unittest.cc",
+      "copyonwritebuffer_unittest.cc",
+      "criticalsection_unittest.cc",
+      "event_tracer_unittest.cc",
+      "event_unittest.cc",
+      "file_unittest.cc",
+      "function_view_unittest.cc",
+      "logging_unittest.cc",
+      "numerics/histogram_percentile_counter_unittest.cc",
+      "numerics/mod_ops_unittest.cc",
+      "numerics/moving_max_counter_unittest.cc",
+      "numerics/safe_compare_unittest.cc",
+      "numerics/safe_minmax_unittest.cc",
+      "onetimeevent_unittest.cc",
+      "pathutils_unittest.cc",
+      "platform_thread_unittest.cc",
+      "random_unittest.cc",
+      "rate_limiter_unittest.cc",
+      "rate_statistics_unittest.cc",
+      "ratetracker_unittest.cc",
+      "refcountedobject_unittest.cc",
+      "string_to_number_unittest.cc",
+      "stringencode_unittest.cc",
+      "stringize_macros_unittest.cc",
+      "strings/string_builder_unittest.cc",
+      "stringutils_unittest.cc",
+      "swap_queue_unittest.cc",
+      "thread_annotations_unittest.cc",
+      "thread_checker_unittest.cc",
+      "timestampaligner_unittest.cc",
+      "timeutils_unittest.cc",
+      "virtualsocket_unittest.cc",
+      "zero_memory_unittest.cc",
+    ]
+    deps = [
+      ":checks",
+      ":rate_limiter",
+      ":rtc_base",
+      ":rtc_base_approved",
+      ":rtc_base_tests_main",
+      ":rtc_base_tests_utils",
+      ":rtc_task_queue",
+      ":safe_compare",
+      ":safe_minmax",
+      ":stringutils",
+      "../api:array_view",
+      "../system_wrappers:system_wrappers",
+      "../test:fileutils",
+      "../test:test_support",
+    ]
+  }
+
+  rtc_source_set("rtc_task_queue_unittests") {
+    visibility = [ "*" ]
+    testonly = true
+
+    sources = [
+      "task_queue_unittest.cc",
+    ]
+    deps = [
+      ":rtc_base_approved",
+      ":rtc_base_tests_main",
+      ":rtc_base_tests_utils",
+      ":rtc_task_queue",
+      ":rtc_task_queue_for_test",
+      "../test:test_support",
+    ]
+  }
+
+  rtc_source_set("sequenced_task_checker_unittests") {
+    testonly = true
+
+    sources = [
+      "sequenced_task_checker_unittest.cc",
+    ]
+    deps = [
+      ":checks",
+      ":rtc_base_approved",
+      ":rtc_base_tests_main",
+      ":rtc_task_queue",
+      ":sequenced_task_checker",
+      "../test:test_support",
+    ]
+  }
+
+  rtc_source_set("weak_ptr_unittests") {
+    testonly = true
+
+    sources = [
+      "weak_ptr_unittest.cc",
+    ]
+    deps = [
+      ":rtc_base_approved_generic",
+      ":rtc_base_tests_main",
+      ":rtc_base_tests_utils",
+      ":rtc_task_queue",
+      ":weak_ptr",
+      "../test:test_support",
+    ]
+  }
+
+  rtc_source_set("rtc_numerics_unittests") {
+    testonly = true
+
+    sources = [
+      "numerics/exp_filter_unittest.cc",
+      "numerics/moving_median_filter_unittest.cc",
+      "numerics/percentile_filter_unittest.cc",
+      "numerics/sequence_number_util_unittest.cc",
+    ]
+    deps = [
+      ":rtc_base_approved",
+      ":rtc_base_tests_main",
+      ":rtc_numerics",
+      "../test:test_support",
+    ]
+  }
+
+  config("rtc_base_unittests_config") {
+    if (is_clang) {
+      cflags = [ "-Wno-unused-const-variable" ]
+    }
+  }
+  rtc_source_set("rtc_base_unittests") {
+    testonly = true
+
+    sources = [
+      "callback_unittest.cc",
+      "crc32_unittest.cc",
+      "data_rate_limiter_unittest.cc",
+      "helpers_unittest.cc",
+      "httpbase_unittest.cc",
+      "httpcommon_unittest.cc",
+      "httpserver_unittest.cc",
+      "ipaddress_unittest.cc",
+      "memory_usage_unittest.cc",
+      "messagedigest_unittest.cc",
+      "messagequeue_unittest.cc",
+      "nat_unittest.cc",
+      "network_unittest.cc",
+      "optionsfile_unittest.cc",
+      "proxy_unittest.cc",
+      "ptr_util_unittest.cc",
+      "rollingaccumulator_unittest.cc",
+      "rtccertificate_unittest.cc",
+      "rtccertificategenerator_unittest.cc",
+      "signalthread_unittest.cc",
+      "sigslot_unittest.cc",
+      "sigslottester_unittest.cc",
+      "stream_unittest.cc",
+      "testclient_unittest.cc",
+      "thread_unittest.cc",
+    ]
+    if (is_win) {
+      sources += [
+        "win32_unittest.cc",
+        "win32window_unittest.cc",
+      ]
+    }
+    if (is_posix) {
+      sources += [
+        "openssladapter_unittest.cc",
+        "ssladapter_unittest.cc",
+        "sslidentity_unittest.cc",
+        "sslstreamadapter_unittest.cc",
+      ]
+    }
+    deps = [
+      ":checks",
+      ":rtc_base_tests_main",
+      ":rtc_base_tests_utils",
+      ":stringutils",
+      "../api:array_view",
+      "../api:optional",
+      "../test:fileutils",
+      "../test:test_support",
+    ]
+    public_deps = [
+      ":rtc_base",
+    ]
+    configs += [ ":rtc_base_unittests_config" ]
+    if (build_with_chromium) {
+      include_dirs = [ "../../boringssl/src/include" ]
+    }
+    if (rtc_build_ssl) {
+      deps += [ "//third_party/boringssl" ]
+    } else {
+      configs += [ ":external_ssl_library" ]
+    }
+  }
+}
+
+if (is_android) {
+  rtc_android_library("base_java") {
+    java_files = [
+      "java/src/org/webrtc/ContextUtils.java",
+      "java/src/org/webrtc/Logging.java",
+      "java/src/org/webrtc/Size.java",
+      "java/src/org/webrtc/ThreadUtils.java",
+    ]
+  }
+}
diff --git a/rtc_base/DEPS b/rtc_base/DEPS
new file mode 100644
index 0000000..0dff88e
--- /dev/null
+++ b/rtc_base/DEPS
@@ -0,0 +1,19 @@
+include_rules = [
+  "+base/third_party/libevent",
+  "+json",
+  "+third_party/jsoncpp",
+  "+system_wrappers",
+]
+
+specific_include_rules = {
+  "gunit_prod.h": [
+    "+gtest",
+    "+testing/base/gunit_prod.h",
+  ],
+  "protobuf_utils.h": [
+    "+third_party/protobuf",
+  ],
+  "gunit\.h": [
+    "+testing/base/public/gunit.h"
+  ],
+}
diff --git a/rtc_base/Dummy.java b/rtc_base/Dummy.java
new file mode 100644
index 0000000..d8f02c9
--- /dev/null
+++ b/rtc_base/Dummy.java
@@ -0,0 +1,19 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/**
+ * This class only exists as glue in a transition.
+ * TODO(kjellander): Remove.
+ * See https://bugs.webrtc.org/7634 for more details.
+ */
+class Dummy {
+  Dummy() {
+  }
+}
diff --git a/rtc_base/OWNERS b/rtc_base/OWNERS
new file mode 100644
index 0000000..9e8ef1f
--- /dev/null
+++ b/rtc_base/OWNERS
@@ -0,0 +1,18 @@
+henrikg@webrtc.org
+hta@webrtc.org
+juberti@webrtc.org
+mflodman@webrtc.org
+perkj@webrtc.org
+pthatcher@webrtc.org
+sergeyu@chromium.org
+tommi@webrtc.org
+deadbeef@webrtc.org
+kwiberg@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
+
+per-file rate_statistics*=sprang@webrtc.org
+per-file rate_statistics*=stefan@webrtc.org
diff --git a/rtc_base/arraysize.h b/rtc_base/arraysize.h
new file mode 100644
index 0000000..f7845b5
--- /dev/null
+++ b/rtc_base/arraysize.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ARRAYSIZE_H_
+#define RTC_BASE_ARRAYSIZE_H_
+
+#include <stddef.h>
+
+// This file defines the arraysize() macro and is derived from Chromium's
+// base/macros.h.
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example.  If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
+
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+#endif  // RTC_BASE_ARRAYSIZE_H_
diff --git a/rtc_base/asyncinvoker-inl.h b/rtc_base/asyncinvoker-inl.h
new file mode 100644
index 0000000..0d546b1
--- /dev/null
+++ b/rtc_base/asyncinvoker-inl.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCINVOKER_INL_H_
+#define RTC_BASE_ASYNCINVOKER_INL_H_
+
+#include "rtc_base/bind.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace rtc {
+
+class AsyncInvoker;
+
+// Helper class for AsyncInvoker. Runs a task and triggers a callback
+// on the calling thread if necessary.
+class AsyncClosure {
+ public:
+  explicit AsyncClosure(AsyncInvoker* invoker);
+  virtual ~AsyncClosure();
+  // Runs the asynchronous task, and triggers a callback to the calling
+  // thread if needed. Should be called from the target thread.
+  virtual void Execute() = 0;
+
+ protected:
+  AsyncInvoker* invoker_;
+  // Reference counted so that if the AsyncInvoker destructor finishes before
+  // an AsyncClosure's destructor that's about to call
+  // "invocation_complete_->Set()", it's not dereferenced after being
+  // destroyed.
+  scoped_refptr<RefCountedObject<Event>> invocation_complete_;
+};
+
+// Simple closure that doesn't trigger a callback for the calling thread.
+template <class FunctorT>
+class FireAndForgetAsyncClosure : public AsyncClosure {
+ public:
+  explicit FireAndForgetAsyncClosure(AsyncInvoker* invoker,
+                                     FunctorT&& functor)
+      : AsyncClosure(invoker), functor_(std::forward<FunctorT>(functor)) {}
+  virtual void Execute() {
+    functor_();
+  }
+ private:
+  typename std::decay<FunctorT>::type functor_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ASYNCINVOKER_INL_H_
diff --git a/rtc_base/asyncinvoker.cc b/rtc_base/asyncinvoker.cc
new file mode 100644
index 0000000..7033c1a
--- /dev/null
+++ b/rtc_base/asyncinvoker.cc
@@ -0,0 +1,147 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/asyncinvoker.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+AsyncInvoker::AsyncInvoker()
+    : pending_invocations_(0),
+      invocation_complete_(new RefCountedObject<Event>(false, false)),
+      destroying_(false) {}
+
+AsyncInvoker::~AsyncInvoker() {
+  destroying_.store(true, std::memory_order_relaxed);
+  // Messages for this need to be cleared *before* our destructor is complete.
+  MessageQueueManager::Clear(this);
+  // And we need to wait for any invocations that are still in progress on
+  // other threads. Using memory_order_acquire for synchronization with
+  // AsyncClosure destructors.
+  while (pending_invocations_.load(std::memory_order_acquire) > 0) {
+    // If the destructor was called while AsyncInvoke was being called by
+    // another thread, WITHIN an AsyncInvoked functor, it may do another
+    // Thread::Post even after we called MessageQueueManager::Clear(this). So
+    // we need to keep calling Clear to discard these posts.
+    Thread::Current()->Clear(this);
+    invocation_complete_->Wait(Event::kForever);
+  }
+}
+
+void AsyncInvoker::OnMessage(Message* msg) {
+  // Get the AsyncClosure shared ptr from this message's data.
+  ScopedMessageData<AsyncClosure>* data =
+      static_cast<ScopedMessageData<AsyncClosure>*>(msg->pdata);
+  // Execute the closure and trigger the return message if needed.
+  data->inner_data().Execute();
+  delete data;
+}
+
+void AsyncInvoker::Flush(Thread* thread, uint32_t id /*= MQID_ANY*/) {
+  // If the destructor is waiting for invocations to finish, don't start
+  // running even more tasks.
+  if (destroying_.load(std::memory_order_relaxed))
+    return;
+
+  // Run this on |thread| to reduce the number of context switches.
+  if (Thread::Current() != thread) {
+    thread->Invoke<void>(RTC_FROM_HERE,
+                         Bind(&AsyncInvoker::Flush, this, thread, id));
+    return;
+  }
+
+  MessageList removed;
+  thread->Clear(this, id, &removed);
+  for (MessageList::iterator it = removed.begin(); it != removed.end(); ++it) {
+    // This message was pending on this thread, so run it now.
+    thread->Send(it->posted_from, it->phandler, it->message_id, it->pdata);
+  }
+}
+
+void AsyncInvoker::Clear() {
+  MessageQueueManager::Clear(this);
+}
+
+void AsyncInvoker::DoInvoke(const Location& posted_from,
+                            Thread* thread,
+                            std::unique_ptr<AsyncClosure> closure,
+                            uint32_t id) {
+  if (destroying_.load(std::memory_order_relaxed)) {
+    // Note that this may be expected, if the application is AsyncInvoking
+    // tasks that AsyncInvoke other tasks. But otherwise it indicates a race
+    // between a thread destroying the AsyncInvoker and a thread still trying
+    // to use it.
+    RTC_LOG(LS_WARNING) << "Tried to invoke while destroying the invoker.";
+    return;
+  }
+  thread->Post(posted_from, this, id,
+               new ScopedMessageData<AsyncClosure>(std::move(closure)));
+}
+
+void AsyncInvoker::DoInvokeDelayed(const Location& posted_from,
+                                   Thread* thread,
+                                   std::unique_ptr<AsyncClosure> closure,
+                                   uint32_t delay_ms,
+                                   uint32_t id) {
+  if (destroying_.load(std::memory_order_relaxed)) {
+    // See above comment.
+    RTC_LOG(LS_WARNING) << "Tried to invoke while destroying the invoker.";
+    return;
+  }
+  thread->PostDelayed(posted_from, delay_ms, this, id,
+                      new ScopedMessageData<AsyncClosure>(std::move(closure)));
+}
+
+GuardedAsyncInvoker::GuardedAsyncInvoker() : thread_(Thread::Current()) {
+  thread_->SignalQueueDestroyed.connect(this,
+                                        &GuardedAsyncInvoker::ThreadDestroyed);
+}
+
+GuardedAsyncInvoker::~GuardedAsyncInvoker() {
+}
+
+bool GuardedAsyncInvoker::Flush(uint32_t id) {
+  CritScope cs(&crit_);
+  if (thread_ == nullptr)
+    return false;
+  invoker_.Flush(thread_, id);
+  return true;
+}
+
+void GuardedAsyncInvoker::ThreadDestroyed() {
+  CritScope cs(&crit_);
+  // We should never get more than one notification about the thread dying.
+  RTC_DCHECK(thread_ != nullptr);
+  thread_ = nullptr;
+}
+
+AsyncClosure::AsyncClosure(AsyncInvoker* invoker)
+    : invoker_(invoker), invocation_complete_(invoker_->invocation_complete_) {
+  invoker_->pending_invocations_.fetch_add(1, std::memory_order_relaxed);
+}
+
+AsyncClosure::~AsyncClosure() {
+  // Using memory_order_release for synchronization with the AsyncInvoker
+  // destructor.
+  invoker_->pending_invocations_.fetch_sub(1, std::memory_order_release);
+
+  // After |pending_invocations_| is decremented, we may need to signal
+  // |invocation_complete_| in case the AsyncInvoker is being destroyed and
+  // waiting for pending tasks to complete.
+  //
+  // It's also possible that the destructor finishes before "Set()" is called,
+  // which is safe because the event is reference counted (and in a thread-safe
+  // way).
+  invocation_complete_->Set();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/asyncinvoker.h b/rtc_base/asyncinvoker.h
new file mode 100644
index 0000000..74e8689
--- /dev/null
+++ b/rtc_base/asyncinvoker.h
@@ -0,0 +1,265 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCINVOKER_H_
+#define RTC_BASE_ASYNCINVOKER_H_
+
+#include <atomic>
+#include <memory>
+#include <utility>
+
+#include "rtc_base/asyncinvoker-inl.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/event.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+// Invokes function objects (aka functors) asynchronously on a Thread, and
+// owns the lifetime of calls (ie, when this object is destroyed, calls in
+// flight are cancelled). AsyncInvoker can optionally execute a user-specified
+// function when the asynchronous call is complete, or operates in
+// fire-and-forget mode otherwise.
+//
+// AsyncInvoker does not own the thread it calls functors on.
+//
+// A note about async calls and object lifetimes: users should
+// be mindful of object lifetimes when calling functions asynchronously and
+// ensure objects used by the function _cannot_ be deleted between the
+// invocation and execution of the functor. AsyncInvoker is designed to
+// help: any calls in flight will be cancelled when the AsyncInvoker used to
+// make the call is destructed, and any calls executing will be allowed to
+// complete before AsyncInvoker destructs.
+//
+// The easiest way to ensure lifetimes are handled correctly is to create a
+// class that owns the Thread and AsyncInvoker objects, and then call its
+// methods asynchronously as needed.
+//
+// Example:
+//   class MyClass {
+//    public:
+//     void FireAsyncTaskWithResult(Thread* thread, int x) {
+//       // Specify a callback to get the result upon completion.
+//       invoker_.AsyncInvoke<int>(RTC_FROM_HERE,
+//           thread, Bind(&MyClass::AsyncTaskWithResult, this, x),
+//           &MyClass::OnTaskComplete, this);
+//     }
+//     void FireAnotherAsyncTask(Thread* thread) {
+//       // No callback specified means fire-and-forget.
+//       invoker_.AsyncInvoke<void>(RTC_FROM_HERE,
+//           thread, Bind(&MyClass::AnotherAsyncTask, this));
+//
+//    private:
+//     int AsyncTaskWithResult(int x) {
+//       // Some long running process...
+//       return x * x;
+//     }
+//     void AnotherAsyncTask() {
+//       // Some other long running process...
+//     }
+//     void OnTaskComplete(int result) { result_ = result; }
+//
+//     AsyncInvoker invoker_;
+//     int result_;
+//   };
+//
+// More details about threading:
+// - It's safe to construct/destruct AsyncInvoker on different threads.
+// - It's safe to call AsyncInvoke from different threads.
+// - It's safe to call AsyncInvoke recursively from *within* a functor that's
+//   being AsyncInvoked.
+// - However, it's *not* safe to call AsyncInvoke from *outside* a functor
+//   that's being AsyncInvoked while the AsyncInvoker is being destroyed on
+//   another thread. This is just inherently unsafe and there's no way to
+//   prevent that. So, the user of this class should ensure that the start of
+//   each "chain" of invocations is synchronized somehow with the AsyncInvoker's
+//   destruction. This can be done by starting each chain of invocations on the
+//   same thread on which it will be destroyed, or by using some other
+//   synchronization method.
+class AsyncInvoker : public MessageHandler {
+ public:
+  AsyncInvoker();
+  ~AsyncInvoker() override;
+
+  // Call |functor| asynchronously on |thread|, with no callback upon
+  // completion. Returns immediately.
+  template <class ReturnT, class FunctorT>
+  void AsyncInvoke(const Location& posted_from,
+                   Thread* thread,
+                   FunctorT&& functor,
+                   uint32_t id = 0) {
+    std::unique_ptr<AsyncClosure> closure(
+        new FireAndForgetAsyncClosure<FunctorT>(
+            this, std::forward<FunctorT>(functor)));
+    DoInvoke(posted_from, thread, std::move(closure), id);
+  }
+
+  // Call |functor| asynchronously on |thread| with |delay_ms|, with no callback
+  // upon completion. Returns immediately.
+  template <class ReturnT, class FunctorT>
+  void AsyncInvokeDelayed(const Location& posted_from,
+                          Thread* thread,
+                          FunctorT&& functor,
+                          uint32_t delay_ms,
+                          uint32_t id = 0) {
+    std::unique_ptr<AsyncClosure> closure(
+        new FireAndForgetAsyncClosure<FunctorT>(
+            this, std::forward<FunctorT>(functor)));
+    DoInvokeDelayed(posted_from, thread, std::move(closure), delay_ms, id);
+  }
+
+  // Synchronously execute on |thread| all outstanding calls we own
+  // that are pending on |thread|, and wait for calls to complete
+  // before returning. Optionally filter by message id.
+  // The destructor will not wait for outstanding calls, so if that
+  // behavior is desired, call Flush() before destroying this object.
+  void Flush(Thread* thread, uint32_t id = MQID_ANY);
+
+  // Cancels any outstanding calls we own that are pending on any thread, and
+  // which have not yet started to execute. This does not wait for any calls
+  // that have already started executing to complete.
+  void Clear();
+
+ private:
+  void OnMessage(Message* msg) override;
+  void DoInvoke(const Location& posted_from,
+                Thread* thread,
+                std::unique_ptr<AsyncClosure> closure,
+                uint32_t id);
+  void DoInvokeDelayed(const Location& posted_from,
+                       Thread* thread,
+                       std::unique_ptr<AsyncClosure> closure,
+                       uint32_t delay_ms,
+                       uint32_t id);
+
+  // Used to keep track of how many invocations (AsyncClosures) are still
+  // alive, so that the destructor can wait for them to finish, as described in
+  // the class documentation.
+  //
+  // TODO(deadbeef): Using a raw std::atomic like this is prone to error and
+  // difficult to maintain. We should try to wrap this functionality in a
+  // separate class to reduce the chance of errors being introduced in the
+  // future.
+  std::atomic<int> pending_invocations_;
+
+  // Reference counted so that if the AsyncInvoker destructor finishes before
+  // an AsyncClosure's destructor that's about to call
+  // "invocation_complete_->Set()", it's not dereferenced after being
+  // destroyed.
+  scoped_refptr<RefCountedObject<Event>> invocation_complete_;
+
+  // This flag is used to ensure that if an application AsyncInvokes tasks that
+  // recursively AsyncInvoke other tasks ad infinitum, the cycle eventually
+  // terminates.
+  std::atomic<bool> destroying_;
+
+  friend class AsyncClosure;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncInvoker);
+};
+
+// Similar to AsyncInvoker, but guards against the Thread being destroyed while
+// there are outstanding dangling pointers to it. It will connect to the current
+// thread in the constructor, and will get notified when that thread is
+// destroyed. After GuardedAsyncInvoker is constructed, it can be used from
+// other threads to post functors to the thread it was constructed on. If that
+// thread dies, any further calls to AsyncInvoke() will be safely ignored.
+class GuardedAsyncInvoker : public sigslot::has_slots<> {
+ public:
+  GuardedAsyncInvoker();
+  ~GuardedAsyncInvoker() override;
+
+  // Synchronously execute all outstanding calls we own, and wait for calls to
+  // complete before returning. Optionally filter by message id. The destructor
+  // will not wait for outstanding calls, so if that behavior is desired, call
+  // Flush() first. Returns false if the thread has died.
+  bool Flush(uint32_t id = MQID_ANY);
+
+  // Call |functor| asynchronously with no callback upon completion. Returns
+  // immediately. Returns false if the thread has died.
+  template <class ReturnT, class FunctorT>
+  bool AsyncInvoke(const Location& posted_from,
+                   FunctorT&& functor,
+                   uint32_t id = 0) {
+    CritScope cs(&crit_);
+    if (thread_ == nullptr)
+      return false;
+    invoker_.AsyncInvoke<ReturnT, FunctorT>(
+        posted_from, thread_, std::forward<FunctorT>(functor), id);
+    return true;
+  }
+
+  // Call |functor| asynchronously with |delay_ms|, with no callback upon
+  // completion. Returns immediately. Returns false if the thread has died.
+  template <class ReturnT, class FunctorT>
+  bool AsyncInvokeDelayed(const Location& posted_from,
+                          FunctorT&& functor,
+                          uint32_t delay_ms,
+                          uint32_t id = 0) {
+    CritScope cs(&crit_);
+    if (thread_ == nullptr)
+      return false;
+    invoker_.AsyncInvokeDelayed<ReturnT, FunctorT>(
+        posted_from, thread_, std::forward<FunctorT>(functor), delay_ms, id);
+    return true;
+  }
+
+  // Call |functor| asynchronously, calling |callback| when done. Returns false
+  // if the thread has died.
+  template <class ReturnT, class FunctorT, class HostT>
+  bool AsyncInvoke(const Location& posted_from,
+                   const Location& callback_posted_from,
+                   FunctorT&& functor,
+                   void (HostT::*callback)(ReturnT),
+                   HostT* callback_host,
+                   uint32_t id = 0) {
+    CritScope cs(&crit_);
+    if (thread_ == nullptr)
+      return false;
+    invoker_.AsyncInvoke<ReturnT, FunctorT, HostT>(
+        posted_from, callback_posted_from, thread_,
+        std::forward<FunctorT>(functor), callback, callback_host, id);
+    return true;
+  }
+
+  // Call |functor| asynchronously calling |callback| when done. Overloaded for
+  // void return. Returns false if the thread has died.
+  template <class ReturnT, class FunctorT, class HostT>
+  bool AsyncInvoke(const Location& posted_from,
+                   const Location& callback_posted_from,
+                   FunctorT&& functor,
+                   void (HostT::*callback)(),
+                   HostT* callback_host,
+                   uint32_t id = 0) {
+    CritScope cs(&crit_);
+    if (thread_ == nullptr)
+      return false;
+    invoker_.AsyncInvoke<ReturnT, FunctorT, HostT>(
+        posted_from, callback_posted_from, thread_,
+        std::forward<FunctorT>(functor), callback, callback_host, id);
+    return true;
+  }
+
+ private:
+  // Callback when |thread_| is destroyed.
+  void ThreadDestroyed();
+
+  CriticalSection crit_;
+  Thread* thread_ RTC_GUARDED_BY(crit_);
+  AsyncInvoker invoker_ RTC_GUARDED_BY(crit_);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ASYNCINVOKER_H_
diff --git a/rtc_base/asyncpacketsocket.cc b/rtc_base/asyncpacketsocket.cc
new file mode 100644
index 0000000..d945039
--- /dev/null
+++ b/rtc_base/asyncpacketsocket.cc
@@ -0,0 +1,29 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/asyncpacketsocket.h"
+
+namespace rtc {
+
+PacketTimeUpdateParams::PacketTimeUpdateParams()
+    : rtp_sendtime_extension_id(-1),
+      srtp_auth_tag_len(-1),
+      srtp_packet_index(-1) {
+}
+
+PacketTimeUpdateParams::~PacketTimeUpdateParams() = default;
+
+AsyncPacketSocket::AsyncPacketSocket() {
+}
+
+AsyncPacketSocket::~AsyncPacketSocket() {
+}
+
+};  // namespace rtc
diff --git a/rtc_base/asyncpacketsocket.h b/rtc_base/asyncpacketsocket.h
new file mode 100644
index 0000000..16f4de0
--- /dev/null
+++ b/rtc_base/asyncpacketsocket.h
@@ -0,0 +1,143 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCPACKETSOCKET_H_
+#define RTC_BASE_ASYNCPACKETSOCKET_H_
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/dscp.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+// This structure holds the info needed to update the packet send time header
+// extension, including the information needed to update the authentication tag
+// after changing the value.
+struct PacketTimeUpdateParams {
+  PacketTimeUpdateParams();
+  ~PacketTimeUpdateParams();
+
+  int rtp_sendtime_extension_id;    // extension header id present in packet.
+  std::vector<char> srtp_auth_key;  // Authentication key.
+  int srtp_auth_tag_len;            // Authentication tag length.
+  int64_t srtp_packet_index;        // Required for Rtp Packet authentication.
+};
+
+// This structure holds meta information for the packet which is about to send
+// over network.
+struct PacketOptions {
+  PacketOptions() : dscp(DSCP_NO_CHANGE), packet_id(-1) {}
+  explicit PacketOptions(DiffServCodePoint dscp) : dscp(dscp), packet_id(-1) {}
+
+  DiffServCodePoint dscp;
+  int packet_id;  // 16 bits, -1 represents "not set".
+  PacketTimeUpdateParams packet_time_params;
+};
+
+// This structure will have the information about when packet is actually
+// received by socket.
+struct PacketTime {
+  PacketTime() : timestamp(-1), not_before(-1) {}
+  PacketTime(int64_t timestamp, int64_t not_before)
+      : timestamp(timestamp), not_before(not_before) {}
+
+  int64_t timestamp;   // Receive time after socket delivers the data.
+
+  // Earliest possible time the data could have arrived, indicating the
+  // potential error in the |timestamp| value, in case the system, is busy. For
+  // example, the time of the last select() call.
+  // If unknown, this value will be set to zero.
+  int64_t not_before;
+};
+
+inline PacketTime CreatePacketTime(int64_t not_before) {
+  return PacketTime(TimeMicros(), not_before);
+}
+
+// Provides the ability to receive packets asynchronously. Sends are not
+// buffered since it is acceptable to drop packets under high load.
+class AsyncPacketSocket : public sigslot::has_slots<> {
+ public:
+  enum State {
+    STATE_CLOSED,
+    STATE_BINDING,
+    STATE_BOUND,
+    STATE_CONNECTING,
+    STATE_CONNECTED
+  };
+
+  AsyncPacketSocket();
+  ~AsyncPacketSocket() override;
+
+  // Returns current local address. Address may be set to null if the
+  // socket is not bound yet (GetState() returns STATE_BINDING).
+  virtual SocketAddress GetLocalAddress() const = 0;
+
+  // Returns remote address. Returns zeroes if this is not a client TCP socket.
+  virtual SocketAddress GetRemoteAddress() const = 0;
+
+  // Send a packet.
+  virtual int Send(const void *pv, size_t cb, const PacketOptions& options) = 0;
+  virtual int SendTo(const void *pv, size_t cb, const SocketAddress& addr,
+                     const PacketOptions& options) = 0;
+
+  // Close the socket.
+  virtual int Close() = 0;
+
+  // Returns current state of the socket.
+  virtual State GetState() const = 0;
+
+  // Get/set options.
+  virtual int GetOption(Socket::Option opt, int* value) = 0;
+  virtual int SetOption(Socket::Option opt, int value) = 0;
+
+  // Get/Set current error.
+  // TODO: Remove SetError().
+  virtual int GetError() const = 0;
+  virtual void SetError(int error) = 0;
+
+  // Emitted each time a packet is read. Used only for UDP and
+  // connected TCP sockets.
+  sigslot::signal5<AsyncPacketSocket*, const char*, size_t,
+                   const SocketAddress&,
+                   const PacketTime&> SignalReadPacket;
+
+  // Emitted each time a packet is sent.
+  sigslot::signal2<AsyncPacketSocket*, const SentPacket&> SignalSentPacket;
+
+  // Emitted when the socket is currently able to send.
+  sigslot::signal1<AsyncPacketSocket*> SignalReadyToSend;
+
+  // Emitted after address for the socket is allocated, i.e. binding
+  // is finished. State of the socket is changed from BINDING to BOUND
+  // (for UDP and server TCP sockets) or CONNECTING (for client TCP
+  // sockets).
+  sigslot::signal2<AsyncPacketSocket*, const SocketAddress&> SignalAddressReady;
+
+  // Emitted for client TCP sockets when state is changed from
+  // CONNECTING to CONNECTED.
+  sigslot::signal1<AsyncPacketSocket*> SignalConnect;
+
+  // Emitted for client TCP sockets when state is changed from
+  // CONNECTED to CLOSED.
+  sigslot::signal2<AsyncPacketSocket*, int> SignalClose;
+
+  // Used only for listening TCP sockets.
+  sigslot::signal2<AsyncPacketSocket*, AsyncPacketSocket*> SignalNewConnection;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncPacketSocket);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ASYNCPACKETSOCKET_H_
diff --git a/rtc_base/asyncresolverinterface.cc b/rtc_base/asyncresolverinterface.cc
new file mode 100644
index 0000000..62dd36a
--- /dev/null
+++ b/rtc_base/asyncresolverinterface.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/asyncresolverinterface.h"
+
+namespace rtc {
+
+AsyncResolverInterface::AsyncResolverInterface() {
+}
+
+AsyncResolverInterface::~AsyncResolverInterface() = default;
+
+};  // namespace rtc
diff --git a/rtc_base/asyncresolverinterface.h b/rtc_base/asyncresolverinterface.h
new file mode 100644
index 0000000..96b5ec1
--- /dev/null
+++ b/rtc_base/asyncresolverinterface.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright 2013 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCRESOLVERINTERFACE_H_
+#define RTC_BASE_ASYNCRESOLVERINTERFACE_H_
+
+#include "rtc_base/sigslot.h"
+#include "rtc_base/socketaddress.h"
+
+namespace rtc {
+
+// This interface defines the methods to resolve the address asynchronously.
+class AsyncResolverInterface {
+ public:
+  AsyncResolverInterface();
+  virtual ~AsyncResolverInterface();
+
+  // Start address resolve process.
+  virtual void Start(const SocketAddress& addr) = 0;
+  // Returns top most resolved address of |family|
+  virtual bool GetResolvedAddress(int family, SocketAddress* addr) const = 0;
+  // Returns error from resolver.
+  virtual int GetError() const = 0;
+  // Delete the resolver.
+  virtual void Destroy(bool wait) = 0;
+  // Returns top most resolved IPv4 address if address is resolved successfully.
+  // Otherwise returns address set in SetAddress.
+  SocketAddress address() const {
+    SocketAddress addr;
+    GetResolvedAddress(AF_INET, &addr);
+    return addr;
+  }
+
+  // This signal is fired when address resolve process is completed.
+  sigslot::signal1<AsyncResolverInterface*> SignalDone;
+};
+
+}  // namespace rtc
+
+#endif
diff --git a/rtc_base/asyncsocket.cc b/rtc_base/asyncsocket.cc
new file mode 100644
index 0000000..b28b2f9
--- /dev/null
+++ b/rtc_base/asyncsocket.cc
@@ -0,0 +1,127 @@
+/*
+ *  Copyright 2010 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+AsyncSocket::AsyncSocket() {
+}
+
+AsyncSocket::~AsyncSocket() {
+}
+
+AsyncSocketAdapter::AsyncSocketAdapter(AsyncSocket* socket) : socket_(nullptr) {
+  Attach(socket);
+}
+
+AsyncSocketAdapter::~AsyncSocketAdapter() {
+  delete socket_;
+}
+
+void AsyncSocketAdapter::Attach(AsyncSocket* socket) {
+  RTC_DCHECK(!socket_);
+  socket_ = socket;
+  if (socket_) {
+    socket_->SignalConnectEvent.connect(this,
+                                        &AsyncSocketAdapter::OnConnectEvent);
+    socket_->SignalReadEvent.connect(this, &AsyncSocketAdapter::OnReadEvent);
+    socket_->SignalWriteEvent.connect(this, &AsyncSocketAdapter::OnWriteEvent);
+    socket_->SignalCloseEvent.connect(this, &AsyncSocketAdapter::OnCloseEvent);
+  }
+}
+
+SocketAddress AsyncSocketAdapter::GetLocalAddress() const {
+  return socket_->GetLocalAddress();
+}
+
+SocketAddress AsyncSocketAdapter::GetRemoteAddress() const {
+  return socket_->GetRemoteAddress();
+}
+
+int AsyncSocketAdapter::Bind(const SocketAddress& addr) {
+  return socket_->Bind(addr);
+}
+
+int AsyncSocketAdapter::Connect(const SocketAddress& addr) {
+  return socket_->Connect(addr);
+}
+
+int AsyncSocketAdapter::Send(const void* pv, size_t cb) {
+  return socket_->Send(pv, cb);
+}
+
+int AsyncSocketAdapter::SendTo(const void* pv,
+                               size_t cb,
+                               const SocketAddress& addr) {
+  return socket_->SendTo(pv, cb, addr);
+}
+
+int AsyncSocketAdapter::Recv(void* pv, size_t cb, int64_t* timestamp) {
+  return socket_->Recv(pv, cb, timestamp);
+}
+
+int AsyncSocketAdapter::RecvFrom(void* pv,
+                                 size_t cb,
+                                 SocketAddress* paddr,
+                                 int64_t* timestamp) {
+  return socket_->RecvFrom(pv, cb, paddr, timestamp);
+}
+
+int AsyncSocketAdapter::Listen(int backlog) {
+  return socket_->Listen(backlog);
+}
+
+AsyncSocket* AsyncSocketAdapter::Accept(SocketAddress* paddr) {
+  return socket_->Accept(paddr);
+}
+
+int AsyncSocketAdapter::Close() {
+  return socket_->Close();
+}
+
+int AsyncSocketAdapter::GetError() const {
+  return socket_->GetError();
+}
+
+void AsyncSocketAdapter::SetError(int error) {
+  return socket_->SetError(error);
+}
+
+AsyncSocket::ConnState AsyncSocketAdapter::GetState() const {
+  return socket_->GetState();
+}
+
+int AsyncSocketAdapter::GetOption(Option opt, int* value) {
+  return socket_->GetOption(opt, value);
+}
+
+int AsyncSocketAdapter::SetOption(Option opt, int value) {
+  return socket_->SetOption(opt, value);
+}
+
+void AsyncSocketAdapter::OnConnectEvent(AsyncSocket* socket) {
+  SignalConnectEvent(this);
+}
+
+void AsyncSocketAdapter::OnReadEvent(AsyncSocket* socket) {
+  SignalReadEvent(this);
+}
+
+void AsyncSocketAdapter::OnWriteEvent(AsyncSocket* socket) {
+  SignalWriteEvent(this);
+}
+
+void AsyncSocketAdapter::OnCloseEvent(AsyncSocket* socket, int err) {
+  SignalCloseEvent(this, err);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/asyncsocket.h b/rtc_base/asyncsocket.h
new file mode 100644
index 0000000..c018c23
--- /dev/null
+++ b/rtc_base/asyncsocket.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCSOCKET_H_
+#define RTC_BASE_ASYNCSOCKET_H_
+
+#include "rtc_base/sigslot.h"
+#include "rtc_base/socket.h"
+
+namespace rtc {
+
+// TODO: Remove Socket and rename AsyncSocket to Socket.
+
+// Provides the ability to perform socket I/O asynchronously.
+class AsyncSocket : public Socket {
+ public:
+  AsyncSocket();
+  ~AsyncSocket() override;
+
+  AsyncSocket* Accept(SocketAddress* paddr) override = 0;
+
+  // SignalReadEvent and SignalWriteEvent use multi_threaded_local to allow
+  // access concurrently from different thread.
+  // For example SignalReadEvent::connect will be called in AsyncUDPSocket ctor
+  // but at the same time the SocketDispatcher maybe signaling the read event.
+  // ready to read
+  sigslot::signal1<AsyncSocket*,
+                   sigslot::multi_threaded_local> SignalReadEvent;
+  // ready to write
+  sigslot::signal1<AsyncSocket*,
+                   sigslot::multi_threaded_local> SignalWriteEvent;
+  sigslot::signal1<AsyncSocket*> SignalConnectEvent;     // connected
+  sigslot::signal2<AsyncSocket*, int> SignalCloseEvent;  // closed
+};
+
+class AsyncSocketAdapter : public AsyncSocket, public sigslot::has_slots<> {
+ public:
+  // The adapted socket may explicitly be null, and later assigned using Attach.
+  // However, subclasses which support detached mode must override any methods
+  // that will be called during the detached period (usually GetState()), to
+  // avoid dereferencing a null pointer.
+  explicit AsyncSocketAdapter(AsyncSocket* socket);
+  ~AsyncSocketAdapter() override;
+  void Attach(AsyncSocket* socket);
+  SocketAddress GetLocalAddress() const override;
+  SocketAddress GetRemoteAddress() const override;
+  int Bind(const SocketAddress& addr) override;
+  int Connect(const SocketAddress& addr) override;
+  int Send(const void* pv, size_t cb) override;
+  int SendTo(const void* pv, size_t cb, const SocketAddress& addr) override;
+  int Recv(void* pv, size_t cb, int64_t* timestamp) override;
+  int RecvFrom(void* pv,
+               size_t cb,
+               SocketAddress* paddr,
+               int64_t* timestamp) override;
+  int Listen(int backlog) override;
+  AsyncSocket* Accept(SocketAddress* paddr) override;
+  int Close() override;
+  int GetError() const override;
+  void SetError(int error) override;
+  ConnState GetState() const override;
+  int GetOption(Option opt, int* value) override;
+  int SetOption(Option opt, int value) override;
+
+ protected:
+  virtual void OnConnectEvent(AsyncSocket* socket);
+  virtual void OnReadEvent(AsyncSocket* socket);
+  virtual void OnWriteEvent(AsyncSocket* socket);
+  virtual void OnCloseEvent(AsyncSocket* socket, int err);
+
+  AsyncSocket* socket_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ASYNCSOCKET_H_
diff --git a/rtc_base/asynctcpsocket.cc b/rtc_base/asynctcpsocket.cc
new file mode 100644
index 0000000..9e0589c
--- /dev/null
+++ b/rtc_base/asynctcpsocket.cc
@@ -0,0 +1,332 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/asynctcpsocket.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+#if defined(WEBRTC_POSIX)
+#include <errno.h>
+#endif  // WEBRTC_POSIX
+
+namespace rtc {
+
+static const size_t kMaxPacketSize = 64 * 1024;
+
+typedef uint16_t PacketLength;
+static const size_t kPacketLenSize = sizeof(PacketLength);
+
+static const size_t kBufSize = kMaxPacketSize + kPacketLenSize;
+
+// The input buffer will be resized so that at least kMinimumRecvSize bytes can
+// be received (but it will not grow above the maximum size passed to the
+// constructor).
+static const size_t kMinimumRecvSize = 128;
+
+static const int kListenBacklog = 5;
+
+// Binds and connects |socket|
+AsyncSocket* AsyncTCPSocketBase::ConnectSocket(
+    rtc::AsyncSocket* socket,
+    const rtc::SocketAddress& bind_address,
+    const rtc::SocketAddress& remote_address) {
+  std::unique_ptr<rtc::AsyncSocket> owned_socket(socket);
+  if (socket->Bind(bind_address) < 0) {
+    RTC_LOG(LS_ERROR) << "Bind() failed with error " << socket->GetError();
+    return nullptr;
+  }
+  if (socket->Connect(remote_address) < 0) {
+    RTC_LOG(LS_ERROR) << "Connect() failed with error " << socket->GetError();
+    return nullptr;
+  }
+  return owned_socket.release();
+}
+
+AsyncTCPSocketBase::AsyncTCPSocketBase(AsyncSocket* socket, bool listen,
+                                       size_t max_packet_size)
+    : socket_(socket),
+      listen_(listen),
+      max_insize_(max_packet_size),
+      max_outsize_(max_packet_size) {
+  if (!listen_) {
+    // Listening sockets don't send/receive data, so they don't need buffers.
+    inbuf_.EnsureCapacity(kMinimumRecvSize);
+  }
+
+  RTC_DCHECK(socket_.get() != nullptr);
+  socket_->SignalConnectEvent.connect(
+      this, &AsyncTCPSocketBase::OnConnectEvent);
+  socket_->SignalReadEvent.connect(this, &AsyncTCPSocketBase::OnReadEvent);
+  socket_->SignalWriteEvent.connect(this, &AsyncTCPSocketBase::OnWriteEvent);
+  socket_->SignalCloseEvent.connect(this, &AsyncTCPSocketBase::OnCloseEvent);
+
+  if (listen_) {
+    if (socket_->Listen(kListenBacklog) < 0) {
+      RTC_LOG(LS_ERROR) << "Listen() failed with error " << socket_->GetError();
+    }
+  }
+}
+
+AsyncTCPSocketBase::~AsyncTCPSocketBase() {}
+
+SocketAddress AsyncTCPSocketBase::GetLocalAddress() const {
+  return socket_->GetLocalAddress();
+}
+
+SocketAddress AsyncTCPSocketBase::GetRemoteAddress() const {
+  return socket_->GetRemoteAddress();
+}
+
+int AsyncTCPSocketBase::Close() {
+  return socket_->Close();
+}
+
+AsyncTCPSocket::State AsyncTCPSocketBase::GetState() const {
+  switch (socket_->GetState()) {
+    case Socket::CS_CLOSED:
+      return STATE_CLOSED;
+    case Socket::CS_CONNECTING:
+      if (listen_) {
+        return STATE_BOUND;
+      } else {
+        return STATE_CONNECTING;
+      }
+    case Socket::CS_CONNECTED:
+      return STATE_CONNECTED;
+    default:
+      RTC_NOTREACHED();
+      return STATE_CLOSED;
+  }
+}
+
+int AsyncTCPSocketBase::GetOption(Socket::Option opt, int* value) {
+  return socket_->GetOption(opt, value);
+}
+
+int AsyncTCPSocketBase::SetOption(Socket::Option opt, int value) {
+  return socket_->SetOption(opt, value);
+}
+
+int AsyncTCPSocketBase::GetError() const {
+  return socket_->GetError();
+}
+
+void AsyncTCPSocketBase::SetError(int error) {
+  return socket_->SetError(error);
+}
+
+int AsyncTCPSocketBase::SendTo(const void *pv, size_t cb,
+                               const SocketAddress& addr,
+                               const rtc::PacketOptions& options) {
+  const SocketAddress& remote_address = GetRemoteAddress();
+  if (addr == remote_address)
+    return Send(pv, cb, options);
+  // Remote address may be empty if there is a sudden network change.
+  RTC_DCHECK(remote_address.IsNil());
+  socket_->SetError(ENOTCONN);
+  return -1;
+}
+
+int AsyncTCPSocketBase::SendRaw(const void * pv, size_t cb) {
+  if (outbuf_.size() + cb > max_outsize_) {
+    socket_->SetError(EMSGSIZE);
+    return -1;
+  }
+
+  RTC_DCHECK(!listen_);
+  outbuf_.AppendData(static_cast<const uint8_t*>(pv), cb);
+
+  return FlushOutBuffer();
+}
+
+int AsyncTCPSocketBase::FlushOutBuffer() {
+  RTC_DCHECK(!listen_);
+  int res = socket_->Send(outbuf_.data(), outbuf_.size());
+  if (res <= 0) {
+    return res;
+  }
+  if (static_cast<size_t>(res) > outbuf_.size()) {
+    RTC_NOTREACHED();
+    return -1;
+  }
+  size_t new_size = outbuf_.size() - res;
+  if (new_size > 0) {
+    memmove(outbuf_.data(), outbuf_.data() + res, new_size);
+  }
+  outbuf_.SetSize(new_size);
+  return res;
+}
+
+void AsyncTCPSocketBase::AppendToOutBuffer(const void* pv, size_t cb) {
+  RTC_DCHECK(outbuf_.size() + cb <= max_outsize_);
+  RTC_DCHECK(!listen_);
+  outbuf_.AppendData(static_cast<const uint8_t*>(pv), cb);
+}
+
+void AsyncTCPSocketBase::OnConnectEvent(AsyncSocket* socket) {
+  SignalConnect(this);
+}
+
+void AsyncTCPSocketBase::OnReadEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket_.get() == socket);
+
+  if (listen_) {
+    rtc::SocketAddress address;
+    rtc::AsyncSocket* new_socket = socket->Accept(&address);
+    if (!new_socket) {
+      // TODO(stefan): Do something better like forwarding the error
+      // to the user.
+      RTC_LOG(LS_ERROR) << "TCP accept failed with error "
+                        << socket_->GetError();
+      return;
+    }
+
+    HandleIncomingConnection(new_socket);
+
+    // Prime a read event in case data is waiting.
+    new_socket->SignalReadEvent(new_socket);
+  } else {
+    size_t total_recv = 0;
+    while (true) {
+      size_t free_size = inbuf_.capacity() - inbuf_.size();
+      if (free_size < kMinimumRecvSize && inbuf_.capacity() < max_insize_) {
+        inbuf_.EnsureCapacity(std::min(max_insize_, inbuf_.capacity() * 2));
+        free_size = inbuf_.capacity() - inbuf_.size();
+      }
+
+      int len =
+          socket_->Recv(inbuf_.data() + inbuf_.size(), free_size, nullptr);
+      if (len < 0) {
+        // TODO(stefan): Do something better like forwarding the error to the
+        // user.
+        if (!socket_->IsBlocking()) {
+          RTC_LOG(LS_ERROR) << "Recv() returned error: " << socket_->GetError();
+        }
+        break;
+      }
+
+      total_recv += len;
+      inbuf_.SetSize(inbuf_.size() + len);
+      if (!len || static_cast<size_t>(len) < free_size) {
+        break;
+      }
+    }
+
+    if (!total_recv) {
+      return;
+    }
+
+    size_t size = inbuf_.size();
+    ProcessInput(inbuf_.data<char>(), &size);
+
+    if (size > inbuf_.size()) {
+      RTC_LOG(LS_ERROR) << "input buffer overflow";
+      RTC_NOTREACHED();
+      inbuf_.Clear();
+    } else {
+      inbuf_.SetSize(size);
+    }
+  }
+}
+
+void AsyncTCPSocketBase::OnWriteEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket_.get() == socket);
+
+  if (outbuf_.size() > 0) {
+    FlushOutBuffer();
+  }
+
+  if (outbuf_.size() == 0) {
+    SignalReadyToSend(this);
+  }
+}
+
+void AsyncTCPSocketBase::OnCloseEvent(AsyncSocket* socket, int error) {
+  SignalClose(this, error);
+}
+
+// AsyncTCPSocket
+// Binds and connects |socket| and creates AsyncTCPSocket for
+// it. Takes ownership of |socket|. Returns null if bind() or
+// connect() fail (|socket| is destroyed in that case).
+AsyncTCPSocket* AsyncTCPSocket::Create(
+    AsyncSocket* socket,
+    const SocketAddress& bind_address,
+    const SocketAddress& remote_address) {
+  return new AsyncTCPSocket(AsyncTCPSocketBase::ConnectSocket(
+      socket, bind_address, remote_address), false);
+}
+
+AsyncTCPSocket::AsyncTCPSocket(AsyncSocket* socket, bool listen)
+    : AsyncTCPSocketBase(socket, listen, kBufSize) {
+}
+
+int AsyncTCPSocket::Send(const void *pv, size_t cb,
+                         const rtc::PacketOptions& options) {
+  if (cb > kBufSize) {
+    SetError(EMSGSIZE);
+    return -1;
+  }
+
+  // If we are blocking on send, then silently drop this packet
+  if (!IsOutBufferEmpty())
+    return static_cast<int>(cb);
+
+  PacketLength pkt_len = HostToNetwork16(static_cast<PacketLength>(cb));
+  AppendToOutBuffer(&pkt_len, kPacketLenSize);
+  AppendToOutBuffer(pv, cb);
+
+  int res = FlushOutBuffer();
+  if (res <= 0) {
+    // drop packet if we made no progress
+    ClearOutBuffer();
+    return res;
+  }
+
+  rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis());
+  SignalSentPacket(this, sent_packet);
+
+  // We claim to have sent the whole thing, even if we only sent partial
+  return static_cast<int>(cb);
+}
+
+void AsyncTCPSocket::ProcessInput(char * data, size_t* len) {
+  SocketAddress remote_addr(GetRemoteAddress());
+
+  while (true) {
+    if (*len < kPacketLenSize)
+      return;
+
+    PacketLength pkt_len = rtc::GetBE16(data);
+    if (*len < kPacketLenSize + pkt_len)
+      return;
+
+    SignalReadPacket(this, data + kPacketLenSize, pkt_len, remote_addr,
+                     CreatePacketTime(0));
+
+    *len -= kPacketLenSize + pkt_len;
+    if (*len > 0) {
+      memmove(data, data + kPacketLenSize + pkt_len, *len);
+    }
+  }
+}
+
+void AsyncTCPSocket::HandleIncomingConnection(AsyncSocket* socket) {
+  SignalNewConnection(this, new AsyncTCPSocket(socket, false));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/asynctcpsocket.h b/rtc_base/asynctcpsocket.h
new file mode 100644
index 0000000..0a548d0
--- /dev/null
+++ b/rtc_base/asynctcpsocket.h
@@ -0,0 +1,108 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCTCPSOCKET_H_
+#define RTC_BASE_ASYNCTCPSOCKET_H_
+
+#include <memory>
+
+#include "rtc_base/asyncpacketsocket.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/socketfactory.h"
+
+namespace rtc {
+
+// Simulates UDP semantics over TCP.  Send and Recv packet sizes
+// are preserved, and drops packets silently on Send, rather than
+// buffer them in user space.
+class AsyncTCPSocketBase : public AsyncPacketSocket {
+ public:
+  AsyncTCPSocketBase(AsyncSocket* socket, bool listen, size_t max_packet_size);
+  ~AsyncTCPSocketBase() override;
+
+  // Pure virtual methods to send and recv data.
+  int Send(const void *pv, size_t cb,
+                   const rtc::PacketOptions& options) override = 0;
+  virtual void ProcessInput(char* data, size_t* len) = 0;
+  // Signals incoming connection.
+  virtual void HandleIncomingConnection(AsyncSocket* socket) = 0;
+
+  SocketAddress GetLocalAddress() const override;
+  SocketAddress GetRemoteAddress() const override;
+  int SendTo(const void* pv,
+             size_t cb,
+             const SocketAddress& addr,
+             const rtc::PacketOptions& options) override;
+  int Close() override;
+
+  State GetState() const override;
+  int GetOption(Socket::Option opt, int* value) override;
+  int SetOption(Socket::Option opt, int value) override;
+  int GetError() const override;
+  void SetError(int error) override;
+
+ protected:
+  // Binds and connects |socket| and creates AsyncTCPSocket for
+  // it. Takes ownership of |socket|. Returns null if bind() or
+  // connect() fail (|socket| is destroyed in that case).
+  static AsyncSocket* ConnectSocket(AsyncSocket* socket,
+                                    const SocketAddress& bind_address,
+                                    const SocketAddress& remote_address);
+  virtual int SendRaw(const void* pv, size_t cb);
+  int FlushOutBuffer();
+  // Add data to |outbuf_|.
+  void AppendToOutBuffer(const void* pv, size_t cb);
+
+  // Helper methods for |outpos_|.
+  bool IsOutBufferEmpty() const { return outbuf_.size() == 0; }
+  void ClearOutBuffer() { outbuf_.Clear(); }
+
+ private:
+  // Called by the underlying socket
+  void OnConnectEvent(AsyncSocket* socket);
+  void OnReadEvent(AsyncSocket* socket);
+  void OnWriteEvent(AsyncSocket* socket);
+  void OnCloseEvent(AsyncSocket* socket, int error);
+
+  std::unique_ptr<AsyncSocket> socket_;
+  bool listen_;
+  Buffer inbuf_;
+  Buffer outbuf_;
+  size_t max_insize_;
+  size_t max_outsize_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncTCPSocketBase);
+};
+
+class AsyncTCPSocket : public AsyncTCPSocketBase {
+ public:
+  // Binds and connects |socket| and creates AsyncTCPSocket for
+  // it. Takes ownership of |socket|. Returns null if bind() or
+  // connect() fail (|socket| is destroyed in that case).
+  static AsyncTCPSocket* Create(AsyncSocket* socket,
+                                const SocketAddress& bind_address,
+                                const SocketAddress& remote_address);
+  AsyncTCPSocket(AsyncSocket* socket, bool listen);
+  ~AsyncTCPSocket() override {}
+
+  int Send(const void* pv,
+           size_t cb,
+           const rtc::PacketOptions& options) override;
+  void ProcessInput(char* data, size_t* len) override;
+  void HandleIncomingConnection(AsyncSocket* socket) override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncTCPSocket);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ASYNCTCPSOCKET_H_
diff --git a/rtc_base/asynctcpsocket_unittest.cc b/rtc_base/asynctcpsocket_unittest.cc
new file mode 100644
index 0000000..7081411
--- /dev/null
+++ b/rtc_base/asynctcpsocket_unittest.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/asynctcpsocket.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/virtualsocketserver.h"
+
+namespace rtc {
+
+class AsyncTCPSocketTest
+    : public testing::Test,
+      public sigslot::has_slots<> {
+ public:
+  AsyncTCPSocketTest()
+      : vss_(new rtc::VirtualSocketServer()),
+        socket_(vss_->CreateAsyncSocket(SOCK_STREAM)),
+        tcp_socket_(new AsyncTCPSocket(socket_, true)),
+        ready_to_send_(false) {
+    tcp_socket_->SignalReadyToSend.connect(this,
+                                           &AsyncTCPSocketTest::OnReadyToSend);
+  }
+
+  void OnReadyToSend(rtc::AsyncPacketSocket* socket) {
+    ready_to_send_ = true;
+  }
+
+ protected:
+  std::unique_ptr<VirtualSocketServer> vss_;
+  AsyncSocket* socket_;
+  std::unique_ptr<AsyncTCPSocket> tcp_socket_;
+  bool ready_to_send_;
+};
+
+TEST_F(AsyncTCPSocketTest, OnWriteEvent) {
+  EXPECT_FALSE(ready_to_send_);
+  socket_->SignalWriteEvent(socket_);
+  EXPECT_TRUE(ready_to_send_);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/asyncudpsocket.cc b/rtc_base/asyncudpsocket.cc
new file mode 100644
index 0000000..5a50ae3
--- /dev/null
+++ b/rtc_base/asyncudpsocket.cc
@@ -0,0 +1,131 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/asyncudpsocket.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+static const int BUF_SIZE = 64 * 1024;
+
+AsyncUDPSocket* AsyncUDPSocket::Create(
+    AsyncSocket* socket,
+    const SocketAddress& bind_address) {
+  std::unique_ptr<AsyncSocket> owned_socket(socket);
+  if (socket->Bind(bind_address) < 0) {
+    RTC_LOG(LS_ERROR) << "Bind() failed with error " << socket->GetError();
+    return nullptr;
+  }
+  return new AsyncUDPSocket(owned_socket.release());
+}
+
+AsyncUDPSocket* AsyncUDPSocket::Create(SocketFactory* factory,
+                                       const SocketAddress& bind_address) {
+  AsyncSocket* socket =
+      factory->CreateAsyncSocket(bind_address.family(), SOCK_DGRAM);
+  if (!socket)
+    return nullptr;
+  return Create(socket, bind_address);
+}
+
+AsyncUDPSocket::AsyncUDPSocket(AsyncSocket* socket)
+    : socket_(socket) {
+  size_ = BUF_SIZE;
+  buf_ = new char[size_];
+
+  // The socket should start out readable but not writable.
+  socket_->SignalReadEvent.connect(this, &AsyncUDPSocket::OnReadEvent);
+  socket_->SignalWriteEvent.connect(this, &AsyncUDPSocket::OnWriteEvent);
+}
+
+AsyncUDPSocket::~AsyncUDPSocket() {
+  delete [] buf_;
+}
+
+SocketAddress AsyncUDPSocket::GetLocalAddress() const {
+  return socket_->GetLocalAddress();
+}
+
+SocketAddress AsyncUDPSocket::GetRemoteAddress() const {
+  return socket_->GetRemoteAddress();
+}
+
+int AsyncUDPSocket::Send(const void *pv, size_t cb,
+                         const rtc::PacketOptions& options) {
+  rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis());
+  int ret = socket_->Send(pv, cb);
+  SignalSentPacket(this, sent_packet);
+  return ret;
+}
+
+int AsyncUDPSocket::SendTo(const void *pv, size_t cb,
+                           const SocketAddress& addr,
+                           const rtc::PacketOptions& options) {
+  rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis());
+  int ret = socket_->SendTo(pv, cb, addr);
+  SignalSentPacket(this, sent_packet);
+  return ret;
+}
+
+int AsyncUDPSocket::Close() {
+  return socket_->Close();
+}
+
+AsyncUDPSocket::State AsyncUDPSocket::GetState() const {
+  return STATE_BOUND;
+}
+
+int AsyncUDPSocket::GetOption(Socket::Option opt, int* value) {
+  return socket_->GetOption(opt, value);
+}
+
+int AsyncUDPSocket::SetOption(Socket::Option opt, int value) {
+  return socket_->SetOption(opt, value);
+}
+
+int AsyncUDPSocket::GetError() const {
+  return socket_->GetError();
+}
+
+void AsyncUDPSocket::SetError(int error) {
+  return socket_->SetError(error);
+}
+
+void AsyncUDPSocket::OnReadEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket_.get() == socket);
+
+  SocketAddress remote_addr;
+  int64_t timestamp;
+  int len = socket_->RecvFrom(buf_, size_, &remote_addr, &timestamp);
+  if (len < 0) {
+    // An error here typically means we got an ICMP error in response to our
+    // send datagram, indicating the remote address was unreachable.
+    // When doing ICE, this kind of thing will often happen.
+    // TODO: Do something better like forwarding the error to the user.
+    SocketAddress local_addr = socket_->GetLocalAddress();
+    RTC_LOG(LS_INFO) << "AsyncUDPSocket[" << local_addr.ToSensitiveString()
+                     << "] receive failed with error "
+                     << socket_->GetError();
+    return;
+  }
+
+  // TODO: Make sure that we got all of the packet.
+  // If we did not, then we should resize our buffer to be large enough.
+  SignalReadPacket(
+      this, buf_, static_cast<size_t>(len), remote_addr,
+      (timestamp > -1 ? PacketTime(timestamp, 0) : CreatePacketTime(0)));
+}
+
+void AsyncUDPSocket::OnWriteEvent(AsyncSocket* socket) {
+  SignalReadyToSend(this);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/asyncudpsocket.h b/rtc_base/asyncudpsocket.h
new file mode 100644
index 0000000..d814b4b
--- /dev/null
+++ b/rtc_base/asyncudpsocket.h
@@ -0,0 +1,67 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ASYNCUDPSOCKET_H_
+#define RTC_BASE_ASYNCUDPSOCKET_H_
+
+#include <memory>
+
+#include "rtc_base/asyncpacketsocket.h"
+#include "rtc_base/socketfactory.h"
+
+namespace rtc {
+
+// Provides the ability to receive packets asynchronously.  Sends are not
+// buffered since it is acceptable to drop packets under high load.
+class AsyncUDPSocket : public AsyncPacketSocket {
+ public:
+  // Binds |socket| and creates AsyncUDPSocket for it. Takes ownership
+  // of |socket|. Returns null if bind() fails (|socket| is destroyed
+  // in that case).
+  static AsyncUDPSocket* Create(AsyncSocket* socket,
+                                const SocketAddress& bind_address);
+  // Creates a new socket for sending asynchronous UDP packets using an
+  // asynchronous socket from the given factory.
+  static AsyncUDPSocket* Create(SocketFactory* factory,
+                                const SocketAddress& bind_address);
+  explicit AsyncUDPSocket(AsyncSocket* socket);
+  ~AsyncUDPSocket() override;
+
+  SocketAddress GetLocalAddress() const override;
+  SocketAddress GetRemoteAddress() const override;
+  int Send(const void* pv,
+           size_t cb,
+           const rtc::PacketOptions& options) override;
+  int SendTo(const void* pv,
+             size_t cb,
+             const SocketAddress& addr,
+             const rtc::PacketOptions& options) override;
+  int Close() override;
+
+  State GetState() const override;
+  int GetOption(Socket::Option opt, int* value) override;
+  int SetOption(Socket::Option opt, int value) override;
+  int GetError() const override;
+  void SetError(int error) override;
+
+ private:
+  // Called when the underlying socket is ready to be read from.
+  void OnReadEvent(AsyncSocket* socket);
+  // Called when the underlying socket is ready to send.
+  void OnWriteEvent(AsyncSocket* socket);
+
+  std::unique_ptr<AsyncSocket> socket_;
+  char* buf_;
+  size_t size_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ASYNCUDPSOCKET_H_
diff --git a/rtc_base/asyncudpsocket_unittest.cc b/rtc_base/asyncudpsocket_unittest.cc
new file mode 100644
index 0000000..af7cc20
--- /dev/null
+++ b/rtc_base/asyncudpsocket_unittest.cc
@@ -0,0 +1,53 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/asyncudpsocket.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/physicalsocketserver.h"
+#include "rtc_base/virtualsocketserver.h"
+
+namespace rtc {
+
+class AsyncUdpSocketTest
+    : public testing::Test,
+      public sigslot::has_slots<> {
+ public:
+  AsyncUdpSocketTest()
+      : pss_(new rtc::PhysicalSocketServer),
+        vss_(new rtc::VirtualSocketServer(pss_.get())),
+        socket_(vss_->CreateAsyncSocket(SOCK_DGRAM)),
+        udp_socket_(new AsyncUDPSocket(socket_)),
+        ready_to_send_(false) {
+    udp_socket_->SignalReadyToSend.connect(this,
+                                           &AsyncUdpSocketTest::OnReadyToSend);
+  }
+
+  void OnReadyToSend(rtc::AsyncPacketSocket* socket) {
+    ready_to_send_ = true;
+  }
+
+ protected:
+  std::unique_ptr<PhysicalSocketServer> pss_;
+  std::unique_ptr<VirtualSocketServer> vss_;
+  AsyncSocket* socket_;
+  std::unique_ptr<AsyncUDPSocket> udp_socket_;
+  bool ready_to_send_;
+};
+
+TEST_F(AsyncUdpSocketTest, OnWriteEvent) {
+  EXPECT_FALSE(ready_to_send_);
+  socket_->SignalWriteEvent(socket_);
+  EXPECT_TRUE(ready_to_send_);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/atomicops.h b/rtc_base/atomicops.h
new file mode 100644
index 0000000..c0ff1a6
--- /dev/null
+++ b/rtc_base/atomicops.h
@@ -0,0 +1,87 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ATOMICOPS_H_
+#define RTC_BASE_ATOMICOPS_H_
+
+#if defined(WEBRTC_WIN)
+// Include winsock2.h before including <windows.h> to maintain consistency with
+// win32.h.  We can't include win32.h directly here since it pulls in
+// headers such as basictypes.h which causes problems in Chromium where webrtc
+// exists as two separate projects, webrtc and libjingle.
+#include <winsock2.h>
+#include <windows.h>
+#endif  // defined(WEBRTC_WIN)
+
+namespace rtc {
+class AtomicOps {
+ public:
+#if defined(WEBRTC_WIN)
+  // Assumes sizeof(int) == sizeof(LONG), which it is on Win32 and Win64.
+  static int Increment(volatile int* i) {
+    return ::InterlockedIncrement(reinterpret_cast<volatile LONG*>(i));
+  }
+  static int Decrement(volatile int* i) {
+    return ::InterlockedDecrement(reinterpret_cast<volatile LONG*>(i));
+  }
+  static int AcquireLoad(volatile const int* i) {
+    return *i;
+  }
+  static void ReleaseStore(volatile int* i, int value) {
+    *i = value;
+  }
+  static int CompareAndSwap(volatile int* i, int old_value, int new_value) {
+    return ::InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(i),
+                                        new_value,
+                                        old_value);
+  }
+  // Pointer variants.
+  template <typename T>
+  static T* AcquireLoadPtr(T* volatile* ptr) {
+    return *ptr;
+  }
+  template <typename T>
+  static T* CompareAndSwapPtr(T* volatile* ptr, T* old_value, T* new_value) {
+    return static_cast<T*>(::InterlockedCompareExchangePointer(
+        reinterpret_cast<PVOID volatile*>(ptr), new_value, old_value));
+  }
+#else
+  static int Increment(volatile int* i) {
+    return __sync_add_and_fetch(i, 1);
+  }
+  static int Decrement(volatile int* i) {
+    return __sync_sub_and_fetch(i, 1);
+  }
+  static int AcquireLoad(volatile const int* i) {
+    return __atomic_load_n(i, __ATOMIC_ACQUIRE);
+  }
+  static void ReleaseStore(volatile int* i, int value) {
+    __atomic_store_n(i, value, __ATOMIC_RELEASE);
+  }
+  static int CompareAndSwap(volatile int* i, int old_value, int new_value) {
+    return __sync_val_compare_and_swap(i, old_value, new_value);
+  }
+  // Pointer variants.
+  template <typename T>
+  static T* AcquireLoadPtr(T* volatile* ptr) {
+    return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+  }
+  template <typename T>
+  static T* CompareAndSwapPtr(T* volatile* ptr, T* old_value, T* new_value) {
+    return __sync_val_compare_and_swap(ptr, old_value, new_value);
+  }
+#endif
+};
+
+
+
+}
+
+#endif  // RTC_BASE_ATOMICOPS_H_
diff --git a/rtc_base/atomicops_unittest.cc b/rtc_base/atomicops_unittest.cc
new file mode 100644
index 0000000..d5a1105
--- /dev/null
+++ b/rtc_base/atomicops_unittest.cc
@@ -0,0 +1,12 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// TODO(pbos): Move AtomicOps tests to here from
+// webrtc/rtc_base/criticalsection_unittest.cc.
diff --git a/rtc_base/base64.cc b/rtc_base/base64.cc
new file mode 100644
index 0000000..0ccec0e
--- /dev/null
+++ b/rtc_base/base64.cc
@@ -0,0 +1,278 @@
+
+//*********************************************************************
+//* Base64 - a simple base64 encoder and decoder.
+//*
+//*     Copyright (c) 1999, Bob Withers - bwit@pobox.com
+//*
+//* This code may be freely used for any purpose, either personal
+//* or commercial, provided the authors copyright notice remains
+//* intact.
+//*
+//* Enhancements by Stanley Yamane:
+//*     o reverse lookup table for the decode function
+//*     o reserve string buffer space in advance
+//*
+//*********************************************************************
+
+#include "rtc_base/base64.h"
+
+#include <string.h>
+
+#include "rtc_base/checks.h"
+
+using std::vector;
+
+namespace rtc {
+
+static const char kPad = '=';
+static const unsigned char pd = 0xFD;  // Padding
+static const unsigned char sp = 0xFE;  // Whitespace
+static const unsigned char il = 0xFF;  // Illegal base64 character
+
+const char Base64::Base64Table[] =
+    // 0000000000111111111122222222223333333333444444444455555555556666
+    // 0123456789012345678901234567890123456789012345678901234567890123
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+// Decode Table gives the index of any valid base64 character in the
+// Base64 table
+// 65 == A, 97 == a, 48 == 0, 43 == +, 47 == /
+
+const unsigned char Base64::DecodeTable[] = {
+    // 0  1  2  3  4  5  6  7  8  9
+    il, il, il, il, il, il, il, il, il, sp,  //   0 -   9
+    sp, sp, sp, sp, il, il, il, il, il, il,  //  10 -  19
+    il, il, il, il, il, il, il, il, il, il,  //  20 -  29
+    il, il, sp, il, il, il, il, il, il, il,  //  30 -  39
+    il, il, il, 62, il, il, il, 63, 52, 53,  //  40 -  49
+    54, 55, 56, 57, 58, 59, 60, 61, il, il,  //  50 -  59
+    il, pd, il, il, il, 0,  1,  2,  3,  4,   //  60 -  69
+    5,  6,  7,  8,  9,  10, 11, 12, 13, 14,  //  70 -  79
+    15, 16, 17, 18, 19, 20, 21, 22, 23, 24,  //  80 -  89
+    25, il, il, il, il, il, il, 26, 27, 28,  //  90 -  99
+    29, 30, 31, 32, 33, 34, 35, 36, 37, 38,  // 100 - 109
+    39, 40, 41, 42, 43, 44, 45, 46, 47, 48,  // 110 - 119
+    49, 50, 51, il, il, il, il, il, il, il,  // 120 - 129
+    il, il, il, il, il, il, il, il, il, il,  // 130 - 139
+    il, il, il, il, il, il, il, il, il, il,  // 140 - 149
+    il, il, il, il, il, il, il, il, il, il,  // 150 - 159
+    il, il, il, il, il, il, il, il, il, il,  // 160 - 169
+    il, il, il, il, il, il, il, il, il, il,  // 170 - 179
+    il, il, il, il, il, il, il, il, il, il,  // 180 - 189
+    il, il, il, il, il, il, il, il, il, il,  // 190 - 199
+    il, il, il, il, il, il, il, il, il, il,  // 200 - 209
+    il, il, il, il, il, il, il, il, il, il,  // 210 - 219
+    il, il, il, il, il, il, il, il, il, il,  // 220 - 229
+    il, il, il, il, il, il, il, il, il, il,  // 230 - 239
+    il, il, il, il, il, il, il, il, il, il,  // 240 - 249
+    il, il, il, il, il, il                   // 250 - 255
+};
+
+bool Base64::IsBase64Char(char ch) {
+  return (('A' <= ch) && (ch <= 'Z')) || (('a' <= ch) && (ch <= 'z')) ||
+         (('0' <= ch) && (ch <= '9')) || (ch == '+') || (ch == '/');
+}
+
+bool Base64::GetNextBase64Char(char ch, char* next_ch) {
+  if (next_ch == nullptr) {
+    return false;
+  }
+  const char* p = strchr(Base64Table, ch);
+  if (!p)
+    return false;
+  ++p;
+  *next_ch = (*p) ? *p : Base64Table[0];
+  return true;
+}
+
+bool Base64::IsBase64Encoded(const std::string& str) {
+  for (size_t i = 0; i < str.size(); ++i) {
+    if (!IsBase64Char(str.at(i)))
+      return false;
+  }
+  return true;
+}
+
+void Base64::EncodeFromArray(const void* data,
+                             size_t len,
+                             std::string* result) {
+  RTC_DCHECK(nullptr != result);
+  result->clear();
+  result->resize(((len + 2) / 3) * 4);
+  const unsigned char* byte_data = static_cast<const unsigned char*>(data);
+
+  unsigned char c;
+  size_t i = 0;
+  size_t dest_ix = 0;
+  while (i < len) {
+    c = (byte_data[i] >> 2) & 0x3f;
+    (*result)[dest_ix++] = Base64Table[c];
+
+    c = (byte_data[i] << 4) & 0x3f;
+    if (++i < len) {
+      c |= (byte_data[i] >> 4) & 0x0f;
+    }
+    (*result)[dest_ix++] = Base64Table[c];
+
+    if (i < len) {
+      c = (byte_data[i] << 2) & 0x3f;
+      if (++i < len) {
+        c |= (byte_data[i] >> 6) & 0x03;
+      }
+      (*result)[dest_ix++] = Base64Table[c];
+    } else {
+      (*result)[dest_ix++] = kPad;
+    }
+
+    if (i < len) {
+      c = byte_data[i] & 0x3f;
+      (*result)[dest_ix++] = Base64Table[c];
+      ++i;
+    } else {
+      (*result)[dest_ix++] = kPad;
+    }
+  }
+}
+
+size_t Base64::GetNextQuantum(DecodeFlags parse_flags,
+                              bool illegal_pads,
+                              const char* data,
+                              size_t len,
+                              size_t* dpos,
+                              unsigned char qbuf[4],
+                              bool* padded) {
+  size_t byte_len = 0, pad_len = 0, pad_start = 0;
+  for (; (byte_len < 4) && (*dpos < len); ++*dpos) {
+    qbuf[byte_len] = DecodeTable[static_cast<unsigned char>(data[*dpos])];
+    if ((il == qbuf[byte_len]) || (illegal_pads && (pd == qbuf[byte_len]))) {
+      if (parse_flags != DO_PARSE_ANY)
+        break;
+      // Ignore illegal characters
+    } else if (sp == qbuf[byte_len]) {
+      if (parse_flags == DO_PARSE_STRICT)
+        break;
+      // Ignore spaces
+    } else if (pd == qbuf[byte_len]) {
+      if (byte_len < 2) {
+        if (parse_flags != DO_PARSE_ANY)
+          break;
+        // Ignore unexpected padding
+      } else if (byte_len + pad_len >= 4) {
+        if (parse_flags != DO_PARSE_ANY)
+          break;
+        // Ignore extra pads
+      } else {
+        if (1 == ++pad_len) {
+          pad_start = *dpos;
+        }
+      }
+    } else {
+      if (pad_len > 0) {
+        if (parse_flags != DO_PARSE_ANY)
+          break;
+        // Ignore pads which are followed by data
+        pad_len = 0;
+      }
+      ++byte_len;
+    }
+  }
+  for (size_t i = byte_len; i < 4; ++i) {
+    qbuf[i] = 0;
+  }
+  if (4 == byte_len + pad_len) {
+    *padded = true;
+  } else {
+    *padded = false;
+    if (pad_len) {
+      // Roll back illegal padding
+      *dpos = pad_start;
+    }
+  }
+  return byte_len;
+}
+
+bool Base64::DecodeFromArray(const char* data,
+                             size_t len,
+                             DecodeFlags flags,
+                             std::string* result,
+                             size_t* data_used) {
+  return DecodeFromArrayTemplate<std::string>(data, len, flags, result,
+                                              data_used);
+}
+
+bool Base64::DecodeFromArray(const char* data,
+                             size_t len,
+                             DecodeFlags flags,
+                             vector<char>* result,
+                             size_t* data_used) {
+  return DecodeFromArrayTemplate<vector<char>>(data, len, flags, result,
+                                               data_used);
+}
+
+bool Base64::DecodeFromArray(const char* data,
+                             size_t len,
+                             DecodeFlags flags,
+                             vector<uint8_t>* result,
+                             size_t* data_used) {
+  return DecodeFromArrayTemplate<vector<uint8_t>>(data, len, flags, result,
+                                                  data_used);
+}
+
+template <typename T>
+bool Base64::DecodeFromArrayTemplate(const char* data,
+                                     size_t len,
+                                     DecodeFlags flags,
+                                     T* result,
+                                     size_t* data_used) {
+  RTC_DCHECK(nullptr != result);
+  RTC_DCHECK(flags <= (DO_PARSE_MASK | DO_PAD_MASK | DO_TERM_MASK));
+
+  const DecodeFlags parse_flags = flags & DO_PARSE_MASK;
+  const DecodeFlags pad_flags = flags & DO_PAD_MASK;
+  const DecodeFlags term_flags = flags & DO_TERM_MASK;
+  RTC_DCHECK(0 != parse_flags);
+  RTC_DCHECK(0 != pad_flags);
+  RTC_DCHECK(0 != term_flags);
+
+  result->clear();
+  result->reserve(len);
+
+  size_t dpos = 0;
+  bool success = true, padded;
+  unsigned char c, qbuf[4];
+  while (dpos < len) {
+    size_t qlen = GetNextQuantum(parse_flags, (DO_PAD_NO == pad_flags), data,
+                                 len, &dpos, qbuf, &padded);
+    c = (qbuf[0] << 2) | ((qbuf[1] >> 4) & 0x3);
+    if (qlen >= 2) {
+      result->push_back(c);
+      c = ((qbuf[1] << 4) & 0xf0) | ((qbuf[2] >> 2) & 0xf);
+      if (qlen >= 3) {
+        result->push_back(c);
+        c = ((qbuf[2] << 6) & 0xc0) | qbuf[3];
+        if (qlen >= 4) {
+          result->push_back(c);
+          c = 0;
+        }
+      }
+    }
+    if (qlen < 4) {
+      if ((DO_TERM_ANY != term_flags) && (0 != c)) {
+        success = false;  // unused bits
+      }
+      if ((DO_PAD_YES == pad_flags) && !padded) {
+        success = false;  // expected padding
+      }
+      break;
+    }
+  }
+  if ((DO_TERM_BUFFER == term_flags) && (dpos != len)) {
+    success = false;  // unused chars
+  }
+  if (data_used) {
+    *data_used = dpos;
+  }
+  return success;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/base64.h b/rtc_base/base64.h
new file mode 100644
index 0000000..bfe2fe6
--- /dev/null
+++ b/rtc_base/base64.h
@@ -0,0 +1,123 @@
+
+//*********************************************************************
+//* C_Base64 - a simple base64 encoder and decoder.
+//*
+//*     Copyright (c) 1999, Bob Withers - bwit@pobox.com
+//*
+//* This code may be freely used for any purpose, either personal
+//* or commercial, provided the authors copyright notice remains
+//* intact.
+//*********************************************************************
+
+#ifndef RTC_BASE_BASE64_H_
+#define RTC_BASE_BASE64_H_
+
+#include <string>
+#include <vector>
+
+namespace rtc {
+
+class Base64 {
+ public:
+  enum DecodeOption {
+    DO_PARSE_STRICT = 1,  // Parse only base64 characters
+    DO_PARSE_WHITE = 2,   // Parse only base64 and whitespace characters
+    DO_PARSE_ANY = 3,     // Parse all characters
+    DO_PARSE_MASK = 3,
+
+    DO_PAD_YES = 4,  // Padding is required
+    DO_PAD_ANY = 8,  // Padding is optional
+    DO_PAD_NO = 12,  // Padding is disallowed
+    DO_PAD_MASK = 12,
+
+    DO_TERM_BUFFER = 16,  // Must termiante at end of buffer
+    DO_TERM_CHAR = 32,    // May terminate at any character boundary
+    DO_TERM_ANY = 48,     // May terminate at a sub-character bit offset
+    DO_TERM_MASK = 48,
+
+    // Strictest interpretation
+    DO_STRICT = DO_PARSE_STRICT | DO_PAD_YES | DO_TERM_BUFFER,
+
+    DO_LAX = DO_PARSE_ANY | DO_PAD_ANY | DO_TERM_CHAR,
+  };
+  typedef int DecodeFlags;
+
+  static bool IsBase64Char(char ch);
+
+  // Get the char next to the |ch| from the Base64Table.
+  // If the |ch| is the last one in the Base64Table then returns
+  // the first one from the table.
+  // Expects the |ch| be a base64 char.
+  // The result will be saved in |next_ch|.
+  // Returns true on success.
+  static bool GetNextBase64Char(char ch, char* next_ch);
+
+  // Determines whether the given string consists entirely of valid base64
+  // encoded characters.
+  static bool IsBase64Encoded(const std::string& str);
+
+  static void EncodeFromArray(const void* data,
+                              size_t len,
+                              std::string* result);
+  static bool DecodeFromArray(const char* data,
+                              size_t len,
+                              DecodeFlags flags,
+                              std::string* result,
+                              size_t* data_used);
+  static bool DecodeFromArray(const char* data,
+                              size_t len,
+                              DecodeFlags flags,
+                              std::vector<char>* result,
+                              size_t* data_used);
+  static bool DecodeFromArray(const char* data,
+                              size_t len,
+                              DecodeFlags flags,
+                              std::vector<uint8_t>* result,
+                              size_t* data_used);
+
+  // Convenience Methods
+  static inline std::string Encode(const std::string& data) {
+    std::string result;
+    EncodeFromArray(data.data(), data.size(), &result);
+    return result;
+  }
+  static inline std::string Decode(const std::string& data, DecodeFlags flags) {
+    std::string result;
+    DecodeFromArray(data.data(), data.size(), flags, &result, nullptr);
+    return result;
+  }
+  static inline bool Decode(const std::string& data,
+                            DecodeFlags flags,
+                            std::string* result,
+                            size_t* data_used) {
+    return DecodeFromArray(data.data(), data.size(), flags, result, data_used);
+  }
+  static inline bool Decode(const std::string& data,
+                            DecodeFlags flags,
+                            std::vector<char>* result,
+                            size_t* data_used) {
+    return DecodeFromArray(data.data(), data.size(), flags, result, data_used);
+  }
+
+ private:
+  static const char Base64Table[];
+  static const unsigned char DecodeTable[];
+
+  static size_t GetNextQuantum(DecodeFlags parse_flags,
+                               bool illegal_pads,
+                               const char* data,
+                               size_t len,
+                               size_t* dpos,
+                               unsigned char qbuf[4],
+                               bool* padded);
+  template <typename T>
+  static bool DecodeFromArrayTemplate(const char* data,
+                                      size_t len,
+                                      DecodeFlags flags,
+                                      T* result,
+                                      size_t* data_used);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_BASE64_H_
diff --git a/rtc_base/base64_unittest.cc b/rtc_base/base64_unittest.cc
new file mode 100644
index 0000000..0f7c80d
--- /dev/null
+++ b/rtc_base/base64_unittest.cc
@@ -0,0 +1,999 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/base64.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/stringutils.h"
+
+#include "rtc_base/testbase64.h"
+
+using namespace std;
+using namespace rtc;
+
+static struct {
+  size_t plain_length;
+  const char* plaintext;
+  const char* cyphertext;
+} base64_tests[] = {
+
+  // Basic bit patterns;
+  // values obtained with "echo -n '...' | uuencode -m test"
+
+  { 1, "\000", "AA==" },
+  { 1, "\001", "AQ==" },
+  { 1, "\002", "Ag==" },
+  { 1, "\004", "BA==" },
+  { 1, "\010", "CA==" },
+  { 1, "\020", "EA==" },
+  { 1, "\040", "IA==" },
+  { 1, "\100", "QA==" },
+  { 1, "\200", "gA==" },
+
+  { 1, "\377", "/w==" },
+  { 1, "\376", "/g==" },
+  { 1, "\375", "/Q==" },
+  { 1, "\373", "+w==" },
+  { 1, "\367", "9w==" },
+  { 1, "\357", "7w==" },
+  { 1, "\337", "3w==" },
+  { 1, "\277", "vw==" },
+  { 1, "\177", "fw==" },
+  { 2, "\000\000", "AAA=" },
+  { 2, "\000\001", "AAE=" },
+  { 2, "\000\002", "AAI=" },
+  { 2, "\000\004", "AAQ=" },
+  { 2, "\000\010", "AAg=" },
+  { 2, "\000\020", "ABA=" },
+  { 2, "\000\040", "ACA=" },
+  { 2, "\000\100", "AEA=" },
+  { 2, "\000\200", "AIA=" },
+  { 2, "\001\000", "AQA=" },
+  { 2, "\002\000", "AgA=" },
+  { 2, "\004\000", "BAA=" },
+  { 2, "\010\000", "CAA=" },
+  { 2, "\020\000", "EAA=" },
+  { 2, "\040\000", "IAA=" },
+  { 2, "\100\000", "QAA=" },
+  { 2, "\200\000", "gAA=" },
+
+  { 2, "\377\377", "//8=" },
+  { 2, "\377\376", "//4=" },
+  { 2, "\377\375", "//0=" },
+  { 2, "\377\373", "//s=" },
+  { 2, "\377\367", "//c=" },
+  { 2, "\377\357", "/+8=" },
+  { 2, "\377\337", "/98=" },
+  { 2, "\377\277", "/78=" },
+  { 2, "\377\177", "/38=" },
+  { 2, "\376\377", "/v8=" },
+  { 2, "\375\377", "/f8=" },
+  { 2, "\373\377", "+/8=" },
+  { 2, "\367\377", "9/8=" },
+  { 2, "\357\377", "7/8=" },
+  { 2, "\337\377", "3/8=" },
+  { 2, "\277\377", "v/8=" },
+  { 2, "\177\377", "f/8=" },
+
+  { 3, "\000\000\000", "AAAA" },
+  { 3, "\000\000\001", "AAAB" },
+  { 3, "\000\000\002", "AAAC" },
+  { 3, "\000\000\004", "AAAE" },
+  { 3, "\000\000\010", "AAAI" },
+  { 3, "\000\000\020", "AAAQ" },
+  { 3, "\000\000\040", "AAAg" },
+  { 3, "\000\000\100", "AABA" },
+  { 3, "\000\000\200", "AACA" },
+  { 3, "\000\001\000", "AAEA" },
+  { 3, "\000\002\000", "AAIA" },
+  { 3, "\000\004\000", "AAQA" },
+  { 3, "\000\010\000", "AAgA" },
+  { 3, "\000\020\000", "ABAA" },
+  { 3, "\000\040\000", "ACAA" },
+  { 3, "\000\100\000", "AEAA" },
+  { 3, "\000\200\000", "AIAA" },
+  { 3, "\001\000\000", "AQAA" },
+  { 3, "\002\000\000", "AgAA" },
+  { 3, "\004\000\000", "BAAA" },
+  { 3, "\010\000\000", "CAAA" },
+  { 3, "\020\000\000", "EAAA" },
+  { 3, "\040\000\000", "IAAA" },
+  { 3, "\100\000\000", "QAAA" },
+  { 3, "\200\000\000", "gAAA" },
+
+  { 3, "\377\377\377", "////" },
+  { 3, "\377\377\376", "///+" },
+  { 3, "\377\377\375", "///9" },
+  { 3, "\377\377\373", "///7" },
+  { 3, "\377\377\367", "///3" },
+  { 3, "\377\377\357", "///v" },
+  { 3, "\377\377\337", "///f" },
+  { 3, "\377\377\277", "//+/" },
+  { 3, "\377\377\177", "//9/" },
+  { 3, "\377\376\377", "//7/" },
+  { 3, "\377\375\377", "//3/" },
+  { 3, "\377\373\377", "//v/" },
+  { 3, "\377\367\377", "//f/" },
+  { 3, "\377\357\377", "/+//" },
+  { 3, "\377\337\377", "/9//" },
+  { 3, "\377\277\377", "/7//" },
+  { 3, "\377\177\377", "/3//" },
+  { 3, "\376\377\377", "/v//" },
+  { 3, "\375\377\377", "/f//" },
+  { 3, "\373\377\377", "+///" },
+  { 3, "\367\377\377", "9///" },
+  { 3, "\357\377\377", "7///" },
+  { 3, "\337\377\377", "3///" },
+  { 3, "\277\377\377", "v///" },
+  { 3, "\177\377\377", "f///" },
+
+  // Random numbers: values obtained with
+  //
+  //  #! /bin/bash
+  //  dd bs=$1 count=1 if=/dev/random of=/tmp/bar.random
+  //  od -N $1 -t o1 /tmp/bar.random
+  //  uuencode -m test < /tmp/bar.random
+  //
+  // where $1 is the number of bytes (2, 3)
+
+  { 2, "\243\361", "o/E=" },
+  { 2, "\024\167", "FHc=" },
+  { 2, "\313\252", "y6o=" },
+  { 2, "\046\041", "JiE=" },
+  { 2, "\145\236", "ZZ4=" },
+  { 2, "\254\325", "rNU=" },
+  { 2, "\061\330", "Mdg=" },
+  { 2, "\245\032", "pRo=" },
+  { 2, "\006\000", "BgA=" },
+  { 2, "\375\131", "/Vk=" },
+  { 2, "\303\210", "w4g=" },
+  { 2, "\040\037", "IB8=" },
+  { 2, "\261\372", "sfo=" },
+  { 2, "\335\014", "3Qw=" },
+  { 2, "\233\217", "m48=" },
+  { 2, "\373\056", "+y4=" },
+  { 2, "\247\232", "p5o=" },
+  { 2, "\107\053", "Rys=" },
+  { 2, "\204\077", "hD8=" },
+  { 2, "\276\211", "vok=" },
+  { 2, "\313\110", "y0g=" },
+  { 2, "\363\376", "8/4=" },
+  { 2, "\251\234", "qZw=" },
+  { 2, "\103\262", "Q7I=" },
+  { 2, "\142\312", "Yso=" },
+  { 2, "\067\211", "N4k=" },
+  { 2, "\220\001", "kAE=" },
+  { 2, "\152\240", "aqA=" },
+  { 2, "\367\061", "9zE=" },
+  { 2, "\133\255", "W60=" },
+  { 2, "\176\035", "fh0=" },
+  { 2, "\032\231", "Gpk=" },
+
+  { 3, "\013\007\144", "Cwdk" },
+  { 3, "\030\112\106", "GEpG" },
+  { 3, "\047\325\046", "J9Um" },
+  { 3, "\310\160\022", "yHAS" },
+  { 3, "\131\100\237", "WUCf" },
+  { 3, "\064\342\134", "NOJc" },
+  { 3, "\010\177\004", "CH8E" },
+  { 3, "\345\147\205", "5WeF" },
+  { 3, "\300\343\360", "wOPw" },
+  { 3, "\061\240\201", "MaCB" },
+  { 3, "\225\333\044", "ldsk" },
+  { 3, "\215\137\352", "jV/q" },
+  { 3, "\371\147\160", "+Wdw" },
+  { 3, "\030\320\051", "GNAp" },
+  { 3, "\044\174\241", "JHyh" },
+  { 3, "\260\127\037", "sFcf" },
+  { 3, "\111\045\033", "SSUb" },
+  { 3, "\202\114\107", "gkxH" },
+  { 3, "\057\371\042", "L/ki" },
+  { 3, "\223\247\244", "k6ek" },
+  { 3, "\047\216\144", "J45k" },
+  { 3, "\203\070\327", "gzjX" },
+  { 3, "\247\140\072", "p2A6" },
+  { 3, "\124\115\116", "VE1O" },
+  { 3, "\157\162\050", "b3Io" },
+  { 3, "\357\223\004", "75ME" },
+  { 3, "\052\117\156", "Kk9u" },
+  { 3, "\347\154\000", "52wA" },
+  { 3, "\303\012\142", "wwpi" },
+  { 3, "\060\035\362", "MB3y" },
+  { 3, "\130\226\361", "WJbx" },
+  { 3, "\173\013\071", "ews5" },
+  { 3, "\336\004\027", "3gQX" },
+  { 3, "\357\366\234", "7/ac" },
+  { 3, "\353\304\111", "68RJ" },
+  { 3, "\024\264\131", "FLRZ" },
+  { 3, "\075\114\251", "PUyp" },
+  { 3, "\315\031\225", "zRmV" },
+  { 3, "\154\201\276", "bIG+" },
+  { 3, "\200\066\072", "gDY6" },
+  { 3, "\142\350\267", "Yui3" },
+  { 3, "\033\000\166", "GwB2" },
+  { 3, "\210\055\077", "iC0/" },
+  { 3, "\341\037\124", "4R9U" },
+  { 3, "\161\103\152", "cUNq" },
+  { 3, "\270\142\131", "uGJZ" },
+  { 3, "\337\076\074", "3z48" },
+  { 3, "\375\106\362", "/Uby" },
+  { 3, "\227\301\127", "l8FX" },
+  { 3, "\340\002\234", "4AKc" },
+  { 3, "\121\064\033", "UTQb" },
+  { 3, "\157\134\143", "b1xj" },
+  { 3, "\247\055\327", "py3X" },
+  { 3, "\340\142\005", "4GIF" },
+  { 3, "\060\260\143", "MLBj" },
+  { 3, "\075\203\170", "PYN4" },
+  { 3, "\143\160\016", "Y3AO" },
+  { 3, "\313\013\063", "ywsz" },
+  { 3, "\174\236\135", "fJ5d" },
+  { 3, "\103\047\026", "QycW" },
+  { 3, "\365\005\343", "9QXj" },
+  { 3, "\271\160\223", "uXCT" },
+  { 3, "\362\255\172", "8q16" },
+  { 3, "\113\012\015", "SwoN" },
+
+  // various lengths, generated by this python script:
+  //
+  // from string import lowercase as lc
+  // for i in range(27):
+  //   print '{ %2d, "%s",%s "%s" },' % (i, lc[:i], ' ' * (26-i),
+  //                                     lc[:i].encode('base64').strip())
+
+  {  0, "abcdefghijklmnopqrstuvwxyz", "" },
+  {  1, "abcdefghijklmnopqrstuvwxyz", "YQ==" },
+  {  2, "abcdefghijklmnopqrstuvwxyz", "YWI=" },
+  {  3, "abcdefghijklmnopqrstuvwxyz", "YWJj" },
+  {  4, "abcdefghijklmnopqrstuvwxyz", "YWJjZA==" },
+  {  5, "abcdefghijklmnopqrstuvwxyz", "YWJjZGU=" },
+  {  6, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVm" },
+  {  7, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZw==" },
+  {  8, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2g=" },
+  {  9, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hp" },
+  { 10, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpag==" },
+  { 11, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpams=" },
+  { 12, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamts" },
+  { 13, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbQ==" },
+  { 14, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW4=" },
+  { 15, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5v" },
+  { 16, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcA==" },
+  { 17, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHE=" },
+  { 18, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFy" },
+  { 19, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFycw==" },
+  { 20, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3Q=" },
+  { 21, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1" },
+  { 22, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dg==" },
+  { 23, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnc=" },
+  { 24, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4" },
+  { 25, "abcdefghijklmnopqrstuvwxy",  "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eQ==" },
+  { 26, "abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo=" },
+};
+#if 0
+static struct {
+  const char* plaintext;
+  const char* cyphertext;
+} base64_strings[] = {
+
+  // The first few Google quotes
+  // Cyphertext created with "uuencode - GNU sharutils 4.2.1"
+  {
+    "Everyone!  We're teetering on the brink of disaster."
+    " - Sergey Brin, 6/24/99, regarding the company's state "
+    "after the unleashing of Netscape/Google search",
+
+    "RXZlcnlvbmUhICBXZSdyZSB0ZWV0ZXJpbmcgb24gdGhlIGJyaW5rIG9mIGRp"
+    "c2FzdGVyLiAtIFNlcmdleSBCcmluLCA2LzI0Lzk5LCByZWdhcmRpbmcgdGhl"
+    "IGNvbXBhbnkncyBzdGF0ZSBhZnRlciB0aGUgdW5sZWFzaGluZyBvZiBOZXRz"
+    "Y2FwZS9Hb29nbGUgc2VhcmNo" },
+
+  {
+    "I'm not sure why we're still alive, but we seem to be."
+    " - Larry Page, 6/24/99, while hiding in the kitchenette "
+    "during the Netscape traffic overflow",
+
+    "SSdtIG5vdCBzdXJlIHdoeSB3ZSdyZSBzdGlsbCBhbGl2ZSwgYnV0IHdlIHNl"
+    "ZW0gdG8gYmUuIC0gTGFycnkgUGFnZSwgNi8yNC85OSwgd2hpbGUgaGlkaW5n"
+    "IGluIHRoZSBraXRjaGVuZXR0ZSBkdXJpbmcgdGhlIE5ldHNjYXBlIHRyYWZm"
+    "aWMgb3ZlcmZsb3c" },
+
+  {
+    "I think kids want porn."
+    " - Sergey Brin, 6/99, on why Google shouldn't prioritize a "
+    "filtered search for children and families",
+
+    "SSB0aGluayBraWRzIHdhbnQgcG9ybi4gLSBTZXJnZXkgQnJpbiwgNi85OSwg"
+    "b24gd2h5IEdvb2dsZSBzaG91bGRuJ3QgcHJpb3JpdGl6ZSBhIGZpbHRlcmVk"
+    "IHNlYXJjaCBmb3IgY2hpbGRyZW4gYW5kIGZhbWlsaWVz" },
+};
+#endif
+// Compare bytes 0..len-1 of x and y.  If not equal, abort with verbose error
+// message showing position and numeric value that differed.
+// Handles embedded nulls just like any other byte.
+// Only added because string.compare() in gcc-3.3.3 seems to misbehave with
+// embedded nulls.
+// TODO: switch back to string.compare() if/when gcc is fixed
+#define EXPECT_EQ_ARRAY(len, x, y, msg)                        \
+  for (size_t j = 0; j < len; ++j) {                           \
+    if (x[j] != y[j]) {                                        \
+      RTC_LOG(LS_ERROR) << "" #x << " != " #y << " byte " << j \
+                        << " msg: " << msg;                    \
+    }                                                          \
+  }
+
+size_t Base64Escape(const unsigned char *src, size_t szsrc, char *dest,
+                    size_t szdest) {
+  std::string escaped;
+  Base64::EncodeFromArray((const char *)src, szsrc, &escaped);
+  memcpy(dest, escaped.data(), min(escaped.size(), szdest));
+  return escaped.size();
+}
+
+size_t Base64Unescape(const char *src, size_t szsrc, char *dest,
+                      size_t szdest) {
+  std::string unescaped;
+  EXPECT_TRUE(
+      Base64::DecodeFromArray(src, szsrc, Base64::DO_LAX, &unescaped, nullptr));
+  memcpy(dest, unescaped.data(), min(unescaped.size(), szdest));
+  return unescaped.size();
+}
+
+size_t Base64Unescape(const char *src, size_t szsrc, std::string *s) {
+  EXPECT_TRUE(Base64::DecodeFromArray(src, szsrc, Base64::DO_LAX, s, nullptr));
+  return s->size();
+}
+
+TEST(Base64, EncodeDecodeBattery) {
+  RTC_LOG(LS_VERBOSE) << "Testing base-64";
+
+  size_t i;
+
+  // Check the short strings; this tests the math (and boundaries)
+  for( i = 0; i < sizeof(base64_tests) / sizeof(base64_tests[0]); ++i ) {
+    char encode_buffer[100];
+    size_t encode_length;
+    char decode_buffer[100];
+    size_t decode_length;
+    size_t cypher_length;
+
+    RTC_LOG(LS_VERBOSE) << "B64: " << base64_tests[i].cyphertext;
+
+    const unsigned char* unsigned_plaintext =
+      reinterpret_cast<const unsigned char*>(base64_tests[i].plaintext);
+
+    cypher_length = strlen(base64_tests[i].cyphertext);
+
+    // The basic escape function:
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = Base64Escape(unsigned_plaintext,
+                                 base64_tests[i].plain_length,
+                                 encode_buffer,
+                                 sizeof(encode_buffer));
+    //    Is it of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+
+    //    Is it the expected encoded value?
+    EXPECT_STREQ(encode_buffer, base64_tests[i].cyphertext);
+
+    // If we encode it into a buffer of exactly the right length...
+    memset(encode_buffer, 0, sizeof(encode_buffer));
+    encode_length = Base64Escape(unsigned_plaintext,
+                                 base64_tests[i].plain_length,
+                                 encode_buffer,
+                                 cypher_length);
+    //    Is it still of the expected length?
+    EXPECT_EQ(encode_length, cypher_length);
+
+    //    And is the value still correct?  (i.e., not losing the last byte)
+    EXPECT_STREQ(encode_buffer, base64_tests[i].cyphertext);
+
+    // If we decode it back:
+    memset(decode_buffer, 0, sizeof(decode_buffer));
+    decode_length = Base64Unescape(encode_buffer,
+                                   cypher_length,
+                                   decode_buffer,
+                                   sizeof(decode_buffer));
+
+    //    Is it of the expected length?
+    EXPECT_EQ(decode_length, base64_tests[i].plain_length);
+
+    //    Is it the expected decoded value?
+    EXPECT_EQ(0,  memcmp(decode_buffer, base64_tests[i].plaintext, decode_length));
+
+    // Our decoder treats the padding '=' characters at the end as
+    // optional.  If encode_buffer has any, run some additional
+    // tests that fiddle with them.
+    char* first_equals = strchr(encode_buffer, '=');
+    if (first_equals) {
+      // How many equals signs does the string start with?
+      int equals = (*(first_equals+1) == '=') ? 2 : 1;
+
+      // Try chopping off the equals sign(s) entirely.  The decoder
+      // should still be okay with this.
+      std::string decoded2("this junk should also be ignored");
+      *first_equals = '\0';
+      EXPECT_NE(0U, Base64Unescape(encode_buffer, first_equals-encode_buffer,
+                           &decoded2));
+      EXPECT_EQ(decoded2.size(), base64_tests[i].plain_length);
+      EXPECT_EQ_ARRAY(decoded2.size(), decoded2.data(), base64_tests[i].plaintext, i);
+
+      size_t len;
+
+      // try putting some extra stuff after the equals signs, or in between them
+      if (equals == 2) {
+        sprintfn(first_equals, 6, " = = ");
+        len = first_equals - encode_buffer + 5;
+      } else {
+        sprintfn(first_equals, 6, " = ");
+        len = first_equals - encode_buffer + 3;
+      }
+      decoded2.assign("this junk should be ignored");
+      EXPECT_NE(0U, Base64Unescape(encode_buffer, len, &decoded2));
+      EXPECT_EQ(decoded2.size(), base64_tests[i].plain_length);
+      EXPECT_EQ_ARRAY(decoded2.size(), decoded2, base64_tests[i].plaintext, i);
+    }
+  }
+}
+
+// here's a weird case: a giant base64 encoded stream which broke our base64
+// decoding.  Let's test it explicitly.
+const char SpecificTest[] =
+  "/9j/4AAQSkZJRgABAgEASABIAAD/4Q0HRXhpZgAATU0AKgAAAAgADAEOAAIAAAAgAAAAngEPAAI\n"
+  "AAAAFAAAAvgEQAAIAAAAJAAAAwwESAAMAAAABAAEAAAEaAAUAAAABAAAAzAEbAAUAAAABAAAA1A\n"
+  "EoAAMAAAABAAIAAAExAAIAAAAUAAAA3AEyAAIAAAAUAAAA8AE8AAIAAAAQAAABBAITAAMAAAABA\n"
+  "AIAAIdpAAQAAAABAAABFAAAAsQgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgAFNPTlkA\n"
+  "RFNDLVAyMDAAAAAASAAAAAEAAABIAAAAAUFkb2JlIFBob3Rvc2hvcCA3LjAAMjAwNzowMTozMCA\n"
+  "yMzoxMDowNABNYWMgT1MgWCAxMC40LjgAAByCmgAFAAAAAQAAAmqCnQAFAAAAAQAAAnKIIgADAA\n"
+  "AAAQACAACIJwADAAAAAQBkAACQAAAHAAAABDAyMjCQAwACAAAAFAAAAnqQBAACAAAAFAAAAo6RA\n"
+  "QAHAAAABAECAwCRAgAFAAAAAQAAAqKSBAAKAAAAAQAAAqqSBQAFAAAAAQAAArKSBwADAAAAAQAF\n"
+  "AACSCAADAAAAAQAAAACSCQADAAAAAQAPAACSCgAFAAAAAQAAArqgAAAHAAAABDAxMDCgAQADAAA\n"
+  "AAf//AACgAgAEAAAAAQAAAGSgAwAEAAAAAQAAAGSjAAAHAAAAAQMAAACjAQAHAAAAAQEAAACkAQ\n"
+  "ADAAAAAQAAAACkAgADAAAAAQAAAACkAwADAAAAAQAAAACkBgADAAAAAQAAAACkCAADAAAAAQAAA\n"
+  "ACkCQADAAAAAQAAAACkCgADAAAAAQAAAAAAAAAAAAAACgAAAZAAAAAcAAAACjIwMDc6MDE6MjAg\n"
+  "MjM6MDU6NTIAMjAwNzowMToyMCAyMzowNTo1MgAAAAAIAAAAAQAAAAAAAAAKAAAAMAAAABAAAAB\n"
+  "PAAAACgAAAAYBAwADAAAAAQAGAAABGgAFAAAAAQAAAxIBGwAFAAAAAQAAAxoBKAADAAAAAQACAA\n"
+  "ACAQAEAAAAAQAAAyICAgAEAAAAAQAACd0AAAAAAAAASAAAAAEAAABIAAAAAf/Y/+AAEEpGSUYAA\n"
+  "QIBAEgASAAA/+0ADEFkb2JlX0NNAAL/7gAOQWRvYmUAZIAAAAAB/9sAhAAMCAgICQgMCQkMEQsK\n"
+  "CxEVDwwMDxUYExMVExMYEQwMDAwMDBEMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAQ0LCw0\n"
+  "ODRAODhAUDg4OFBQODg4OFBEMDAwMDBERDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA\n"
+  "wMDAz/wAARCABkAGQDASIAAhEBAxEB/90ABAAH/8QBPwAAAQUBAQEBAQEAAAAAAAAAAwABAgQFB\n"
+  "gcICQoLAQABBQEBAQEBAQAAAAAAAAABAAIDBAUGBwgJCgsQAAEEAQMCBAIFBwYIBQMMMwEAAhED\n"
+  "BCESMQVBUWETInGBMgYUkaGxQiMkFVLBYjM0coLRQwclklPw4fFjczUWorKDJkSTVGRFwqN0Nhf\n"
+  "SVeJl8rOEw9N14/NGJ5SkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2N0dXZ3eHl6e3x9fn9xEAAg\n"
+  "IBAgQEAwQFBgcHBgU1AQACEQMhMRIEQVFhcSITBTKBkRShsUIjwVLR8DMkYuFygpJDUxVjczTxJ\n"
+  "QYWorKDByY1wtJEk1SjF2RFVTZ0ZeLys4TD03Xj80aUpIW0lcTU5PSltcXV5fVWZnaGlqa2xtbm\n"
+  "9ic3R1dnd4eXp7fH/9oADAMBAAIRAxEAPwDy7bKNTUXNLz9EaJPDWMjxH4ozhtpYwaACT8ShaaW\n"
+  "bW0uEc9/JFfjj0Q4Hk/PRDxwX7y47W9z/AN9Cv4+O3ILK2DcRqT2CaSvEbcl1Jbz37KG1dBldLo\n"
+  "qaS4l9xGjG9v6yoDAdYIaIjUk+AREgo4y5sapirb8Yl0NHHdKvBNm4yA1o5Pc+SPEFvCWqB3HZF\n"
+  "Hj2SbWQ/afGFP0bHP8ATY0uc4w1o1JPkkimGiS2KvqlnmBkOZQTyydzgPMM9v8A0lp4v1Nx9gF1\n"
+  "tpdqJaGtH/S3I0i3lISXW/8AMqnd/O2bfg2eUkqVYf/Q8zuncO4Bj7lZ+n7f5Mj5KsJcY8NUZ4d\n"
+  "uEDVo1HkeU0rg3Om4H2rabCWUN7DQuK1n5FWKW4uCwG92gDRJBS6exhxmMboQI+Cv4WFTQ42Bs2\n"
+  "fvnkkqEmy2YxoMMbpVzaz6jt+RbpHZs8lzkHqrasKkYOKP0jgDfZ4N/wDM1tNrcWfSPmRyq9uNV\n"
+  "DnFg2s97i7UkjxKVrq0eVz3spZsja+ASDzwsh9jnOk/JFzb3XZD3v1c4yT8UACTCniKDUnKz5Nj\n"
+  "G33XV1DV73BrT8dF23SejV4zg9g33cOsPb+SxVvqv9ViwNy8vS0iWs/daf8A0Y5dpTi1sADGxCR\n"
+  "K1o0YBEmInlXWYbDBcDLdPJXa8f71Yrx2jnUoAqLnfZK5hJaW2vdwEk5a/wD/0fN6Ia/e76IiVf\n"
+  "xavUL7CPpnT4LNbYXAVjuQt/AqDmNYO/Kjnoy4hr5J8SwMhrRMaeSvbsxrfUazcOw4UX0Cisem2\n"
+  "SBoD4+Kz8nC6llbSLCRrubJA8kwUWbUDa29X1PMa7aQWjuDC0MXMdbDbhI7eazBiUfZ6GOYRe1s\n"
+  "WvGgJ8Vbw2+m4Bx9s6JpNHuuGo1FF53r/SHYua61gLse0lzXeBP5rkvqx0o5vVWz7WY49QkiQSP\n"
+  "oN/tLoevW/ogxv0HA7tJ0AnhT+pdDGYVl/wCdcTPkGn2NU0JWNWvlgAbHV6fEqdu2gR/r2WlWwt\n"
+  "AA5VXAEsLXTqJafArQY5rRr9LiPBJiZsZCI1pJjxCi0j4oncSICSkWwzwkjeaSch//0vO7sP7Lm\n"
+  "enO9ogtd5FbPT3Q5pCpZVc4ld3Lmn3O8j9EI2BYdunKjOobMQIyI+rusc2wx4d0eutwGnHh/uQc\n"
+  "Ha7ladj6mVANGvcqOgz0Go7HJ12/GEHcwvB/dPY6ImbbaMaASGuIBjkN7qofs9Ubg9g7OI9p/t/\n"
+  "RTSmhTHr0v6eSz6UgCPP2/wAVu9Ex2V49dVY2iACB4BZeVXQ/AJ3gzGnnOi2+kACpru8flUsNmt\n"
+  "zHRf6xfWCnoeAfTh2ZaQKazx/Ke7+QxcKz61fWA2uuObaC4zGhaPJrXBL64ZFmR124O09ENraPK\n"
+  "N3/AH5GqxIrZVUyp2K2vfdkENsDnxuex9m4Ox9n82xSgNd9D+p/XR1npgseR9ppOy4Dx/NfH/CL\n"
+  "oQJGunmvMv8AFq3KHVcq3HkYQbD2nuSf0I/rMavSg6TLjLigQhJ7Z58v9QkmlsTOqSCn/9PzL7R\n"
+  "d6Qq3n0wZ2zotXpT9xLfFYvkr/S7jXeB8E0jRkhKpC3q8LcJ/kmCrTnkuAPCq4do9Q/ytVbuAeY\n"
+  "Gg5lQybQK+82GBqEQUA1kOHPYf3LLsoyN36G5w8iUfHxepbXE2l0cApALgLHzBq9UxhTXU5hMC1\n"
+  "ktnSCup6S4Ctk+C5XqVGcaHPfuiuHkeTTuWz0+9zaKiH6CC0/yXBSQ2a/MxojV57634rq+v2PLY\n"
+  "be1r2nsYG13/AFKxbfCBMcr0brGAzrGEwCG31ncx0SfBzf7S4+zoHUWWsJq3hz9oLfcBH77R9H+\n"
+  "0pA13u/qPgDp/Q6ri39JlfpXkDx+h/msWn1L6wdO6bSbcrIbU2Q0xLnSe21kuVejJspbVS5+4bd\n"
+  "ocBAkD/orG+tP1ar67Wy7GtZTm1SCXfRsb+a18fRe38x6SG3/44H1Z3f0y2I+l6DoSXD/8xPrDs\n"
+  "3enVu3bdnqN3R+//USSVo//1PLohhce+gRWS0Nsby3lRgFkKxQyW7SgUh3em5Tbq2uB9wWw1wey\n"
+  "J1XGV2XYdm5k7e4WzidXY9oMwo5RZ4T6Hd1ixwfp96PWbAJBVTHzK7O6Ky5oJB1HZMqmUEFlkGy\n"
+  "xpa4zI1Hkq31dy7bMN9BAc3HeWAnnbyxEycmuup1jiAGglZ31PyrmZ9tQg1WtNj54EHR3/S2qTH\n"
+  "1Yc5GgD1FFtzPdWGkd2AyflogZmRmsz6PSrbXbdo+txOrP337f3fzVo15DK2uyrTtqpBOnBKx6b\n"
+  "7MjJsz7tHWOAYP3WD6LU6cqGjFCNl1MmvLcxv6YtDTLSAqP27LrdtYHXFnJZI+Tp3MWg68OpDPv\n"
+  "UMUM2lkQBoouKQ6swjE9Nml+1sz1PW+z6xt27zuj+skrX2ZvqR5z8kkuOfdPt43/1fMm/grFG6f\n"
+  "Lss9JA7JG7tnZs/SfJUrfS3foJ9TvHCopJsV8nWx/t24bJn8Fo/5TjWJXMJIS+i+G36TsZ/7Q9P\n"
+  "8ATfzfeOFofVSZv2/zvt+O3X/v65dJPjt/BiyfN1/wn0zre79nVej/ADG8ep4x2/6Srjd6TdviF\n"
+  "52ko8m6/Ht9X1KnftEo+POwxzK8mSTF46vrH6T1/OEl5Okkl//Z/+0uHFBob3Rvc2hvcCAzLjAA\n"
+  "OEJJTQQEAAAAAAArHAIAAAIAAhwCeAAfICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAA\n"
+  "4QklNBCUAAAAAABD7Caa9B0wqNp2P4sxXqayFOEJJTQPqAAAAAB2wPD94bWwgdmVyc2lvbj0iMS\n"
+  "4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NUWVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUgQ\n"
+  "29tcHV0ZXIvL0RURCBQTElTVCAxLjAvL0VOIiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Q\n"
+  "cm9wZXJ0eUxpc3QtMS4wLmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk\n"
+  "+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYXQuUE1Ib3Jpem9udGFsUmVzPC9rZXk+Cgk8ZGljdD\n"
+  "4KCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuY3JlYXRvcjwva2V5PgoJCTxzdHJpbmc+Y\n"
+  "29tLmFwcGxlLnByaW50aW5nbWFuYWdlcjwvc3RyaW5nPgoJCTxrZXk+Y29tLmFwcGxlLnByaW50\n"
+  "LnRpY2tldC5pdGVtQXJyYXk8L2tleT4KCQk8YXJyYXk+CgkJCTxkaWN0PgoJCQkJPGtleT5jb20\n"
+  "uYXBwbGUucHJpbnQuUGFnZUZvcm1hdC5QTUhvcml6b250YWxSZXM8L2tleT4KCQkJCTxyZWFsPj\n"
+  "cyPC9yZWFsPgoJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNsaWVudDwva2V5PgoJC\n"
+  "QkJPHN0cmluZz5jb20uYXBwbGUucHJpbnRpbmdtYW5hZ2VyPC9zdHJpbmc+CgkJCQk8a2V5PmNv\n"
+  "bS5hcHBsZS5wcmludC50aWNrZXQubW9kRGF0ZTwva2V5PgoJCQkJPGRhdGU+MjAwNy0wMS0zMFQ\n"
+  "yMjowODo0MVo8L2RhdGU+CgkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuc3RhdGVGbG\n"
+  "FnPC9rZXk+CgkJCQk8aW50ZWdlcj4wPC9pbnRlZ2VyPgoJCQk8L2RpY3Q+CgkJPC9hcnJheT4KC\n"
+  "TwvZGljdD4KCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYXQuUE1PcmllbnRhdGlvbjwv\n"
+  "a2V5PgoJPGRpY3Q+CgkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNyZWF0b3I8L2tleT4\n"
+  "KCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21hbmFnZXI8L3N0cmluZz4KCQk8a2V5PmNvbS\n"
+  "5hcHBsZS5wcmludC50aWNrZXQuaXRlbUFycmF5PC9rZXk+CgkJPGFycmF5PgoJCQk8ZGljdD4KC\n"
+  "QkJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYXQuUE1PcmllbnRhdGlvbjwva2V5PgoJ\n"
+  "CQkJPGludGVnZXI+MTwvaW50ZWdlcj4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2tldC5\n"
+  "jbGllbnQ8L2tleT4KCQkJCTxzdHJpbmc+Y29tLmFwcGxlLnByaW50aW5nbWFuYWdlcjwvc3RyaW\n"
+  "5nPgoJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0Lm1vZERhdGU8L2tleT4KCQkJCTxkY\n"
+  "XRlPjIwMDctMDEtMzBUMjI6MDg6NDFaPC9kYXRlPgoJCQkJPGtleT5jb20uYXBwbGUucHJpbnQu\n"
+  "dGlja2V0LnN0YXRlRmxhZzwva2V5PgoJCQkJPGludGVnZXI+MDwvaW50ZWdlcj4KCQkJPC9kaWN\n"
+  "0PgoJCTwvYXJyYXk+Cgk8L2RpY3Q+Cgk8a2V5PmNvbS5hcHBsZS5wcmludC5QYWdlRm9ybWF0Ll\n"
+  "BNU2NhbGluZzwva2V5PgoJPGRpY3Q+CgkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNyZ\n"
+  "WF0b3I8L2tleT4KCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21hbmFnZXI8L3N0cmluZz4K\n"
+  "CQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuaXRlbUFycmF5PC9rZXk+CgkJPGFycmF5Pgo\n"
+  "JCQk8ZGljdD4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYXQuUE1TY2FsaW5nPC\n"
+  "9rZXk+CgkJCQk8cmVhbD4xPC9yZWFsPgoJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0L\n"
+  "mNsaWVudDwva2V5PgoJCQkJPHN0cmluZz5jb20uYXBwbGUucHJpbnRpbmdtYW5hZ2VyPC9zdHJp\n"
+  "bmc+CgkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQubW9kRGF0ZTwva2V5PgoJCQkJPGR\n"
+  "hdGU+MjAwNy0wMS0zMFQyMjowODo0MVo8L2RhdGU+CgkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC\n"
+  "50aWNrZXQuc3RhdGVGbGFnPC9rZXk+CgkJCQk8aW50ZWdlcj4wPC9pbnRlZ2VyPgoJCQk8L2RpY\n"
+  "3Q+CgkJPC9hcnJheT4KCTwvZGljdD4KCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYXQu\n"
+  "UE1WZXJ0aWNhbFJlczwva2V5PgoJPGRpY3Q+CgkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V\n"
+  "0LmNyZWF0b3I8L2tleT4KCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21hbmFnZXI8L3N0cm\n"
+  "luZz4KCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuaXRlbUFycmF5PC9rZXk+CgkJPGFyc\n"
+  "mF5PgoJCQk8ZGljdD4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYXQuUE1WZXJ0\n"
+  "aWNhbFJlczwva2V5PgoJCQkJPHJlYWw+NzI8L3JlYWw+CgkJCQk8a2V5PmNvbS5hcHBsZS5wcml\n"
+  "udC50aWNrZXQuY2xpZW50PC9rZXk+CgkJCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21hbm\n"
+  "FnZXI8L3N0cmluZz4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2tldC5tb2REYXRlPC9rZ\n"
+  "Xk+CgkJCQk8ZGF0ZT4yMDA3LTAxLTMwVDIyOjA4OjQxWjwvZGF0ZT4KCQkJCTxrZXk+Y29tLmFw\n"
+  "cGxlLnByaW50LnRpY2tldC5zdGF0ZUZsYWc8L2tleT4KCQkJCTxpbnRlZ2VyPjA8L2ludGVnZXI\n"
+  "+CgkJCTwvZGljdD4KCQk8L2FycmF5PgoJPC9kaWN0PgoJPGtleT5jb20uYXBwbGUucHJpbnQuUG\n"
+  "FnZUZvcm1hdC5QTVZlcnRpY2FsU2NhbGluZzwva2V5PgoJPGRpY3Q+CgkJPGtleT5jb20uYXBwb\n"
+  "GUucHJpbnQudGlja2V0LmNyZWF0b3I8L2tleT4KCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGlu\n"
+  "Z21hbmFnZXI8L3N0cmluZz4KCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuaXRlbUFycmF\n"
+  "5PC9rZXk+CgkJPGFycmF5PgoJCQk8ZGljdD4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2\n"
+  "VGb3JtYXQuUE1WZXJ0aWNhbFNjYWxpbmc8L2tleT4KCQkJCTxyZWFsPjE8L3JlYWw+CgkJCQk8a\n"
+  "2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuY2xpZW50PC9rZXk+CgkJCQk8c3RyaW5nPmNvbS5h\n"
+  "cHBsZS5wcmludGluZ21hbmFnZXI8L3N0cmluZz4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LnR\n"
+  "pY2tldC5tb2REYXRlPC9rZXk+CgkJCQk8ZGF0ZT4yMDA3LTAxLTMwVDIyOjA4OjQxWjwvZGF0ZT\n"
+  "4KCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2tldC5zdGF0ZUZsYWc8L2tleT4KCQkJCTxpb\n"
+  "nRlZ2VyPjA8L2ludGVnZXI+CgkJCTwvZGljdD4KCQk8L2FycmF5PgoJPC9kaWN0PgoJPGtleT5j\n"
+  "b20uYXBwbGUucHJpbnQuc3ViVGlja2V0LnBhcGVyX2luZm9fdGlja2V0PC9rZXk+Cgk8ZGljdD4\n"
+  "KCQk8a2V5PmNvbS5hcHBsZS5wcmludC5QYWdlRm9ybWF0LlBNQWRqdXN0ZWRQYWdlUmVjdDwva2\n"
+  "V5PgoJCTxkaWN0PgoJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuY3JlYXRvcjwva2V5P\n"
+  "goJCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21hbmFnZXI8L3N0cmluZz4KCQkJPGtleT5j\n"
+  "b20uYXBwbGUucHJpbnQudGlja2V0Lml0ZW1BcnJheTwva2V5PgoJCQk8YXJyYXk+CgkJCQk8ZGl\n"
+  "jdD4KCQkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC5QYWdlRm9ybWF0LlBNQWRqdXN0ZWRQYWdlUm\n"
+  "VjdDwva2V5PgoJCQkJCTxhcnJheT4KCQkJCQkJPHJlYWw+MC4wPC9yZWFsPgoJCQkJCQk8cmVhb\n"
+  "D4wLjA8L3JlYWw+CgkJCQkJCTxyZWFsPjczNDwvcmVhbD4KCQkJCQkJPHJlYWw+NTc2PC9yZWFs\n"
+  "PgoJCQkJCTwvYXJyYXk+CgkJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNsaWVudDw\n"
+  "va2V5PgoJCQkJCTxzdHJpbmc+Y29tLmFwcGxlLnByaW50aW5nbWFuYWdlcjwvc3RyaW5nPgoJCQ\n"
+  "kJCTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2tldC5tb2REYXRlPC9rZXk+CgkJCQkJPGRhdGU+M\n"
+  "jAwNy0wMS0zMFQyMjowODo0MVo8L2RhdGU+CgkJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlj\n"
+  "a2V0LnN0YXRlRmxhZzwva2V5PgoJCQkJCTxpbnRlZ2VyPjA8L2ludGVnZXI+CgkJCQk8L2RpY3Q\n"
+  "+CgkJCTwvYXJyYXk+CgkJPC9kaWN0PgoJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhZ2VGb3JtYX\n"
+  "QuUE1BZGp1c3RlZFBhcGVyUmVjdDwva2V5PgoJCTxkaWN0PgoJCQk8a2V5PmNvbS5hcHBsZS5wc\n"
+  "mludC50aWNrZXQuY3JlYXRvcjwva2V5PgoJCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21h\n"
+  "bmFnZXI8L3N0cmluZz4KCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0Lml0ZW1BcnJheTw\n"
+  "va2V5PgoJCQk8YXJyYXk+CgkJCQk8ZGljdD4KCQkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC5QYW\n"
+  "dlRm9ybWF0LlBNQWRqdXN0ZWRQYXBlclJlY3Q8L2tleT4KCQkJCQk8YXJyYXk+CgkJCQkJCTxyZ\n"
+  "WFsPi0xODwvcmVhbD4KCQkJCQkJPHJlYWw+LTE4PC9yZWFsPgoJCQkJCQk8cmVhbD43NzQ8L3Jl\n"
+  "YWw+CgkJCQkJCTxyZWFsPjU5NDwvcmVhbD4KCQkJCQk8L2FycmF5PgoJCQkJCTxrZXk+Y29tLmF\n"
+  "wcGxlLnByaW50LnRpY2tldC5jbGllbnQ8L2tleT4KCQkJCQk8c3RyaW5nPmNvbS5hcHBsZS5wcm\n"
+  "ludGluZ21hbmFnZXI8L3N0cmluZz4KCQkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQub\n"
+  "W9kRGF0ZTwva2V5PgoJCQkJCTxkYXRlPjIwMDctMDEtMzBUMjI6MDg6NDFaPC9kYXRlPgoJCQkJ\n"
+  "CTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2tldC5zdGF0ZUZsYWc8L2tleT4KCQkJCQk8aW50ZWd\n"
+  "lcj4wPC9pbnRlZ2VyPgoJCQkJPC9kaWN0PgoJCQk8L2FycmF5PgoJCTwvZGljdD4KCQk8a2V5Pm\n"
+  "NvbS5hcHBsZS5wcmludC5QYXBlckluZm8uUE1QYXBlck5hbWU8L2tleT4KCQk8ZGljdD4KCQkJP\n"
+  "GtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNyZWF0b3I8L2tleT4KCQkJPHN0cmluZz5jb20u\n"
+  "YXBwbGUucHJpbnQucG0uUG9zdFNjcmlwdDwvc3RyaW5nPgoJCQk8a2V5PmNvbS5hcHBsZS5wcml\n"
+  "udC50aWNrZXQuaXRlbUFycmF5PC9rZXk+CgkJCTxhcnJheT4KCQkJCTxkaWN0PgoJCQkJCTxrZX\n"
+  "k+Y29tLmFwcGxlLnByaW50LlBhcGVySW5mby5QTVBhcGVyTmFtZTwva2V5PgoJCQkJCTxzdHJpb\n"
+  "mc+bmEtbGV0dGVyPC9zdHJpbmc+CgkJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNs\n"
+  "aWVudDwva2V5PgoJCQkJCTxzdHJpbmc+Y29tLmFwcGxlLnByaW50LnBtLlBvc3RTY3JpcHQ8L3N\n"
+  "0cmluZz4KCQkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQubW9kRGF0ZTwva2V5PgoJCQ\n"
+  "kJCTxkYXRlPjIwMDMtMDctMDFUMTc6NDk6MzZaPC9kYXRlPgoJCQkJCTxrZXk+Y29tLmFwcGxlL\n"
+  "nByaW50LnRpY2tldC5zdGF0ZUZsYWc8L2tleT4KCQkJCQk8aW50ZWdlcj4xPC9pbnRlZ2VyPgoJ\n"
+  "CQkJPC9kaWN0PgoJCQk8L2FycmF5PgoJCTwvZGljdD4KCQk8a2V5PmNvbS5hcHBsZS5wcmludC5\n"
+  "QYXBlckluZm8uUE1VbmFkanVzdGVkUGFnZVJlY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5jb2\n"
+  "0uYXBwbGUucHJpbnQudGlja2V0LmNyZWF0b3I8L2tleT4KCQkJPHN0cmluZz5jb20uYXBwbGUuc\n"
+  "HJpbnQucG0uUG9zdFNjcmlwdDwvc3RyaW5nPgoJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNr\n"
+  "ZXQuaXRlbUFycmF5PC9rZXk+CgkJCTxhcnJheT4KCQkJCTxkaWN0PgoJCQkJCTxrZXk+Y29tLmF\n"
+  "wcGxlLnByaW50LlBhcGVySW5mby5QTVVuYWRqdXN0ZWRQYWdlUmVjdDwva2V5PgoJCQkJCTxhcn\n"
+  "JheT4KCQkJCQkJPHJlYWw+MC4wPC9yZWFsPgoJCQkJCQk8cmVhbD4wLjA8L3JlYWw+CgkJCQkJC\n"
+  "TxyZWFsPjczNDwvcmVhbD4KCQkJCQkJPHJlYWw+NTc2PC9yZWFsPgoJCQkJCTwvYXJyYXk+CgkJ\n"
+  "CQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNsaWVudDwva2V5PgoJCQkJCTxzdHJpbmc\n"
+  "+Y29tLmFwcGxlLnByaW50aW5nbWFuYWdlcjwvc3RyaW5nPgoJCQkJCTxrZXk+Y29tLmFwcGxlLn\n"
+  "ByaW50LnRpY2tldC5tb2REYXRlPC9rZXk+CgkJCQkJPGRhdGU+MjAwNy0wMS0zMFQyMjowODo0M\n"
+  "Vo8L2RhdGU+CgkJCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LnN0YXRlRmxhZzwva2V5\n"
+  "PgoJCQkJCTxpbnRlZ2VyPjA8L2ludGVnZXI+CgkJCQk8L2RpY3Q+CgkJCTwvYXJyYXk+CgkJPC9\n"
+  "kaWN0PgoJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhcGVySW5mby5QTVVuYWRqdXN0ZWRQYXBlcl\n"
+  "JlY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V0LmNyZWF0b\n"
+  "3I8L2tleT4KCQkJPHN0cmluZz5jb20uYXBwbGUucHJpbnQucG0uUG9zdFNjcmlwdDwvc3RyaW5n\n"
+  "PgoJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuaXRlbUFycmF5PC9rZXk+CgkJCTxhcnJ\n"
+  "heT4KCQkJCTxkaWN0PgoJCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LlBhcGVySW5mby5QTVVuYW\n"
+  "RqdXN0ZWRQYXBlclJlY3Q8L2tleT4KCQkJCQk8YXJyYXk+CgkJCQkJCTxyZWFsPi0xODwvcmVhb\n"
+  "D4KCQkJCQkJPHJlYWw+LTE4PC9yZWFsPgoJCQkJCQk8cmVhbD43NzQ8L3JlYWw+CgkJCQkJCTxy\n"
+  "ZWFsPjU5NDwvcmVhbD4KCQkJCQk8L2FycmF5PgoJCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LnR\n"
+  "pY2tldC5jbGllbnQ8L2tleT4KCQkJCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmludGluZ21hbmFnZX\n"
+  "I8L3N0cmluZz4KCQkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQubW9kRGF0ZTwva2V5P\n"
+  "goJCQkJCTxkYXRlPjIwMDctMDEtMzBUMjI6MDg6NDFaPC9kYXRlPgoJCQkJCTxrZXk+Y29tLmFw\n"
+  "cGxlLnByaW50LnRpY2tldC5zdGF0ZUZsYWc8L2tleT4KCQkJCQk8aW50ZWdlcj4wPC9pbnRlZ2V\n"
+  "yPgoJCQkJPC9kaWN0PgoJCQk8L2FycmF5PgoJCTwvZGljdD4KCQk8a2V5PmNvbS5hcHBsZS5wcm\n"
+  "ludC5QYXBlckluZm8ucHBkLlBNUGFwZXJOYW1lPC9rZXk+CgkJPGRpY3Q+CgkJCTxrZXk+Y29tL\n"
+  "mFwcGxlLnByaW50LnRpY2tldC5jcmVhdG9yPC9rZXk+CgkJCTxzdHJpbmc+Y29tLmFwcGxlLnBy\n"
+  "aW50LnBtLlBvc3RTY3JpcHQ8L3N0cmluZz4KCQkJPGtleT5jb20uYXBwbGUucHJpbnQudGlja2V\n"
+  "0Lml0ZW1BcnJheTwva2V5PgoJCQk8YXJyYXk+CgkJCQk8ZGljdD4KCQkJCQk8a2V5PmNvbS5hcH\n"
+  "BsZS5wcmludC5QYXBlckluZm8ucHBkLlBNUGFwZXJOYW1lPC9rZXk+CgkJCQkJPHN0cmluZz5VU\n"
+  "yBMZXR0ZXI8L3N0cmluZz4KCQkJCQk8a2V5PmNvbS5hcHBsZS5wcmludC50aWNrZXQuY2xpZW50\n"
+  "PC9rZXk+CgkJCQkJPHN0cmluZz5jb20uYXBwbGUucHJpbnQucG0uUG9zdFNjcmlwdDwvc3RyaW5\n"
+  "nPgoJCQkJCTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2tldC5tb2REYXRlPC9rZXk+CgkJCQkJPG\n"
+  "RhdGU+MjAwMy0wNy0wMVQxNzo0OTozNlo8L2RhdGU+CgkJCQkJPGtleT5jb20uYXBwbGUucHJpb\n"
+  "nQudGlja2V0LnN0YXRlRmxhZzwva2V5PgoJCQkJCTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8\n"
+  "L2RpY3Q+CgkJCTwvYXJyYXk+CgkJPC9kaWN0PgoJCTxrZXk+Y29tLmFwcGxlLnByaW50LnRpY2t\n"
+  "ldC5BUElWZXJzaW9uPC9rZXk+CgkJPHN0cmluZz4wMC4yMDwvc3RyaW5nPgoJCTxrZXk+Y29tLm\n"
+  "FwcGxlLnByaW50LnRpY2tldC5wcml2YXRlTG9jazwva2V5PgoJCTxmYWxzZS8+CgkJPGtleT5jb\n"
+  "20uYXBwbGUucHJpbnQudGlja2V0LnR5cGU8L2tleT4KCQk8c3RyaW5nPmNvbS5hcHBsZS5wcmlu\n"
+  "dC5QYXBlckluZm9UaWNrZXQ8L3N0cmluZz4KCTwvZGljdD4KCTxrZXk+Y29tLmFwcGxlLnByaW5\n"
+  "0LnRpY2tldC5BUElWZXJzaW9uPC9rZXk+Cgk8c3RyaW5nPjAwLjIwPC9zdHJpbmc+Cgk8a2V5Pm\n"
+  "NvbS5hcHBsZS5wcmludC50aWNrZXQucHJpdmF0ZUxvY2s8L2tleT4KCTxmYWxzZS8+Cgk8a2V5P\n"
+  "mNvbS5hcHBsZS5wcmludC50aWNrZXQudHlwZTwva2V5PgoJPHN0cmluZz5jb20uYXBwbGUucHJp\n"
+  "bnQuUGFnZUZvcm1hdFRpY2tldDwvc3RyaW5nPgo8L2RpY3Q+CjwvcGxpc3Q+CjhCSU0D6QAAAAA\n"
+  "AeAADAAAASABIAAAAAALeAkD/7v/uAwYCUgNnBSgD/AACAAAASABIAAAAAALYAigAAQAAAGQAAA\n"
+  "ABAAMDAwAAAAF//wABAAEAAAAAAAAAAAAAAABoCAAZAZAAAAAAACAAAAAAAAAAAAAAAAAAAAAAA\n"
+  "AAAAAAAAAAAADhCSU0D7QAAAAAAEABIAAAAAQABAEgAAAABAAE4QklNBCYAAAAAAA4AAAAAAAAA\n"
+  "AAAAP4AAADhCSU0EDQAAAAAABAAAAB44QklNBBkAAAAAAAQAAAAeOEJJTQPzAAAAAAAJAAAAAAA\n"
+  "AAAABADhCSU0ECgAAAAAAAQAAOEJJTScQAAAAAAAKAAEAAAAAAAAAAThCSU0D9QAAAAAASAAvZm\n"
+  "YAAQBsZmYABgAAAAAAAQAvZmYAAQChmZoABgAAAAAAAQAyAAAAAQBaAAAABgAAAAAAAQA1AAAAA\n"
+  "QAtAAAABgAAAAAAAThCSU0D+AAAAAAAcAAA/////////////////////////////wPoAAAAAP//\n"
+  "//////////////////////////8D6AAAAAD/////////////////////////////A+gAAAAA///\n"
+  "//////////////////////////wPoAAA4QklNBAgAAAAAABAAAAABAAACQAAAAkAAAAAAOEJJTQ\n"
+  "QeAAAAAAAEAAAAADhCSU0EGgAAAAADRQAAAAYAAAAAAAAAAAAAAGQAAABkAAAACABEAFMAQwAwA\n"
+  "DIAMwAyADUAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAGQAAABkAAAAAAAAAAAA\n"
+  "AAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAEAAAAAAABudWxsAAAAAgAAAAZib3VuZHN\n"
+  "PYmpjAAAAAQAAAAAAAFJjdDEAAAAEAAAAAFRvcCBsb25nAAAAAAAAAABMZWZ0bG9uZwAAAAAAAA\n"
+  "AAQnRvbWxvbmcAAABkAAAAAFJnaHRsb25nAAAAZAAAAAZzbGljZXNWbExzAAAAAU9iamMAAAABA\n"
+  "AAAAAAFc2xpY2UAAAASAAAAB3NsaWNlSURsb25nAAAAAAAAAAdncm91cElEbG9uZwAAAAAAAAAG\n"
+  "b3JpZ2luZW51bQAAAAxFU2xpY2VPcmlnaW4AAAANYXV0b0dlbmVyYXRlZAAAAABUeXBlZW51bQA\n"
+  "AAApFU2xpY2VUeXBlAAAAAEltZyAAAAAGYm91bmRzT2JqYwAAAAEAAAAAAABSY3QxAAAABAAAAA\n"
+  "BUb3AgbG9uZwAAAAAAAAAATGVmdGxvbmcAAAAAAAAAAEJ0b21sb25nAAAAZAAAAABSZ2h0bG9uZ\n"
+  "wAAAGQAAAADdXJsVEVYVAAAAAEAAAAAAABudWxsVEVYVAAAAAEAAAAAAABNc2dlVEVYVAAAAAEA\n"
+  "AAAAAAZhbHRUYWdURVhUAAAAAQAAAAAADmNlbGxUZXh0SXNIVE1MYm9vbAEAAAAIY2VsbFRleHR\n"
+  "URVhUAAAAAQAAAAAACWhvcnpBbGlnbmVudW0AAAAPRVNsaWNlSG9yekFsaWduAAAAB2RlZmF1bH\n"
+  "QAAAAJdmVydEFsaWduZW51bQAAAA9FU2xpY2VWZXJ0QWxpZ24AAAAHZGVmYXVsdAAAAAtiZ0Nvb\n"
+  "G9yVHlwZWVudW0AAAARRVNsaWNlQkdDb2xvclR5cGUAAAAATm9uZQAAAAl0b3BPdXRzZXRsb25n\n"
+  "AAAAAAAAAApsZWZ0T3V0c2V0bG9uZwAAAAAAAAAMYm90dG9tT3V0c2V0bG9uZwAAAAAAAAALcml\n"
+  "naHRPdXRzZXRsb25nAAAAAAA4QklNBBEAAAAAAAEBADhCSU0EFAAAAAAABAAAAAE4QklNBAwAAA\n"
+  "AACfkAAAABAAAAZAAAAGQAAAEsAAB1MAAACd0AGAAB/9j/4AAQSkZJRgABAgEASABIAAD/7QAMQ\n"
+  "WRvYmVfQ00AAv/uAA5BZG9iZQBkgAAAAAH/2wCEAAwICAgJCAwJCQwRCwoLERUPDAwPFRgTExUT\n"
+  "ExgRDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwBDQsLDQ4NEA4OEBQODg4UFA4\n"
+  "ODg4UEQwMDAwMEREMDAwMDAwRDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAGQAZA\n"
+  "MBIgACEQEDEQH/3QAEAAf/xAE/AAABBQEBAQEBAQAAAAAAAAADAAECBAUGBwgJCgsBAAEFAQEBA\n"
+  "QEBAAAAAAAAAAEAAgMEBQYHCAkKCxAAAQQBAwIEAgUHBggFAwwzAQACEQMEIRIxBUFRYRMicYEy\n"
+  "BhSRobFCIyQVUsFiMzRygtFDByWSU/Dh8WNzNRaisoMmRJNUZEXCo3Q2F9JV4mXys4TD03Xj80Y\n"
+  "nlKSFtJXE1OT0pbXF1eX1VmZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3EQACAgECBAQDBAUGBwcGBT\n"
+  "UBAAIRAyExEgRBUWFxIhMFMoGRFKGxQiPBUtHwMyRi4XKCkkNTFWNzNPElBhaisoMHJjXC0kSTV\n"
+  "KMXZEVVNnRl4vKzhMPTdePzRpSkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2JzdHV2d3h5ent8f/\n"
+  "2gAMAwEAAhEDEQA/APLtso1NRc0vP0Rok8NYyPEfijOG2ljBoAJPxKFppZtbS4Rz38kV+OPRDge\n"
+  "T89EPHBfvLjtb3P8A30K/j47cgsrYNxGpPYJpK8RtyXUlvPfsobV0GV0uippLiX3EaMb2/rKgMB\n"
+  "1ghoiNST4BESCjjLmxqmKtvxiXQ0cd0q8E2bjIDWjk9z5I8QW8JaoHcdkUePZJtZD9p8YU/Rsc/\n"
+  "wBNjS5zjDWjUk+SSKYaJLYq+qWeYGQ5lBPLJ3OA8wz2/wDSWni/U3H2AXW2l2oloa0f9LcjSLeU\n"
+  "hJdb/wAyqd387Zt+DZ5SSpVh/9DzO6dw7gGPuVn6ft/kyPkqwlxjw1Rnh24QNWjUeR5TSuDc6bg\n"
+  "fatpsJZQ3sNC4rWfkVYpbi4LAb3aANEkFLp7GHGYxuhAj4K/hYVNDjYGzZ++eSSoSbLZjGgwxul\n"
+  "XNrPqO35FukdmzyXOQeqtqwqRg4o/SOAN9ng3/AMzW02txZ9I+ZHKr241UOcWDaz3uLtSSPEpWu\n"
+  "rR5XPeylmyNr4BIPPCyH2Oc6T8kXNvddkPe/VzjJPxQAJMKeIoNScrPk2MbfddXUNXvcGtPx0Xb\n"
+  "dJ6NXjOD2Dfdw6w9v5LFW+q/1WLA3Ly9LSJaz91p/wDRjl2lOLWwAMbEJErWjRgESYieVdZhsMF\n"
+  "wMt08ldrx/vVivHaOdSgCoud9krmElpba93ASTlr/AP/R83ohr97voiJV/Fq9QvsI+mdPgs1thc\n"
+  "BWO5C38CoOY1g78qOejLiGvknxLAyGtExp5K9uzGt9RrNw7DhRfQKKx6bZIGgPj4rPycLqWVtIs\n"
+  "JGu5skDyTBRZtQNrb1fU8xrtpBaO4MLQxcx1sNuEjt5rMGJR9noY5hF7Wxa8aAnxVvDb6bgHH2z\n"
+  "omk0e64ajUUXnev9Idi5rrWAux7SXNd4E/muS+rHSjm9VbPtZjj1CSJBI+g3+0uh69b+iDG/QcD\n"
+  "u0nQCeFP6l0MZhWX/AJ1xM+QafY1TQlY1a+WABsdXp8Sp27aBH+vZaVbC0ADlVcASwtdOolp8Ct\n"
+  "BjmtGv0uI8EmJmxkIjWkmPEKLSPiidxIgJKRbDPCSN5pJyH//S87uw/suZ6c72iC13kVs9PdDmk\n"
+  "KllVziV3cuafc7yP0QjYFh26cqM6hsxAjIj6u6xzbDHh3R663AaceH+5BwdruVp2PqZUA0a9yo6\n"
+  "DPQajscnXb8YQdzC8H909joiZttoxoBIa4gGOQ3uqh+z1RuD2Ds4j2n+39FNKaFMevS/p5LPpSA\n"
+  "I8/b/ABW70THZXj11VjaIAIHgFl5VdD8AneDMaec6Lb6QAKmu7x+VSw2a3MdF/rF9YKeh4B9OHZ\n"
+  "lpAprPH8p7v5DFwrPrV9YDa645toLjMaFo8mtcEvrhkWZHXbg7T0Q2to8o3f8AfkarEitlVTKnY\n"
+  "ra992QQ2wOfG57H2bg7H2fzbFKA130P6n9dHWemCx5H2mk7LgPH818f8IuhAka6ea8y/wAWrcod\n"
+  "VyrceRhBsPae5J/Qj+sxq9KDpMuMuKBCEntnny/1CSaWxM6pIKf/0/MvtF3pCrefTBnbOi1elP3\n"
+  "Et8Vi+Sv9LuNd4HwTSNGSEqkLerwtwn+SYKtOeS4A8Krh2j1D/K1Vu4B5gaDmVDJtAr7zYYGoRB\n"
+  "QDWQ4c9h/csuyjI3fobnDyJR8fF6ltcTaXRwCkAuAsfMGr1TGFNdTmEwLWS2dIK6npLgK2T4Lle\n"
+  "pUZxoc9+6K4eR5NO5bPT73NoqIfoILT/JcFJDZr8zGiNXnvrfiur6/Y8tht7WvaexgbXf8AUrFt\n"
+  "8IExyvRusYDOsYTAIbfWdzHRJ8HN/tLj7OgdRZawmreHP2gt9wEfvtH0f7SkDXe7+o+AOn9DquL\n"
+  "f0mV+leQPH6H+axafUvrB07ptJtyshtTZDTEudJ7bWS5V6MmyltVLn7ht2hwECQP+isb60/Vqvr\n"
+  "tbLsa1lObVIJd9Gxv5rXx9F7fzHpIbf/jgfVnd/TLYj6XoOhJcP/zE+sOzd6dW7dt2eo3dH7/9R\n"
+  "JJWj//U8uiGFx76BFZLQ2xvLeVGAWQrFDJbtKBSHd6blNura4H3BbDXB7InVcZXZdh2bmTt7hbO\n"
+  "J1dj2gzCjlFnhPod3WLHB+n3o9ZsAkFVMfMrs7orLmgkHUdkyqZQQWWQbLGlrjMjUeSrfV3Ltsw\n"
+  "30EBzcd5YCedvLETJya66nWOIAaCVnfU/KuZn21CDVa02PngQdHf9LapMfVhzkaAPUUW3M91YaR\n"
+  "3YDJ+WiBmZGazPo9Kttdt2j63E6s/fft/d/NWjXkMra7KtO2qkE6cErHpvsyMmzPu0dY4Bg/dYP\n"
+  "otTpyoaMUI2XUya8tzG/pi0NMtICo/bsut21gdcWclkj5OncxaDrw6kM+9QxQzaWRAGii4pDqzC\n"
+  "MT02aX7WzPU9b7PrG3bvO6P6yStfZm+pHnPySS4590+3jf/V8yb+CsUbp8uyz0kDskbu2dmz9J8\n"
+  "lSt9Ld+gn1O8cKikmxXydbH+3bhsmfwWj/lONYlcwkhL6L4bfpOxn/tD0/wBN/N944Wh9VJm/b/\n"
+  "O+347df+/rl0k+O38GLJ83X/CfTOt7v2dV6P8AMbx6njHb/pKuN3pN2+IXnaSjybr8e31fUqd+0\n"
+  "Sj487DHMryZJMXjq+sfpPX84SXk6SSX/9kAOEJJTQQhAAAAAABVAAAAAQEAAAAPAEEAZABvAGIA\n"
+  "ZQAgAFAAaABvAHQAbwBzAGgAbwBwAAAAEwBBAGQAbwBiAGUAIABQAGgAbwB0AG8AcwBoAG8AcAA\n"
+  "gADcALgAwAAAAAQA4QklNBAYAAAAAAAcABQAAAAEBAP/hFWdodHRwOi8vbnMuYWRvYmUuY29tL3\n"
+  "hhcC8xLjAvADw/eHBhY2tldCBiZWdpbj0n77u/JyBpZD0nVzVNME1wQ2VoaUh6cmVTek5UY3prY\n"
+  "zlkJz8+Cjw/YWRvYmUteGFwLWZpbHRlcnMgZXNjPSJDUiI/Pgo8eDp4YXBtZXRhIHhtbG5zOng9\n"
+  "J2Fkb2JlOm5zOm1ldGEvJyB4OnhhcHRrPSdYTVAgdG9vbGtpdCAyLjguMi0zMywgZnJhbWV3b3J\n"
+  "rIDEuNSc+CjxyZGY6UkRGIHhtbG5zOnJkZj0naHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi\n"
+  "1yZGYtc3ludGF4LW5zIycgeG1sbnM6aVg9J2h0dHA6Ly9ucy5hZG9iZS5jb20vaVgvMS4wLyc+C\n"
+  "gogPHJkZjpEZXNjcmlwdGlvbiBhYm91dD0ndXVpZDoyMmQwMmIwYS1iMjQ5LTExZGItOGFmOC05\n"
+  "MWQ1NDAzZjkyZjknCiAgeG1sbnM6cGRmPSdodHRwOi8vbnMuYWRvYmUuY29tL3BkZi8xLjMvJz4\n"
+  "KICA8IS0tIHBkZjpTdWJqZWN0IGlzIGFsaWFzZWQgLS0+CiA8L3JkZjpEZXNjcmlwdGlvbj4KCi\n"
+  "A8cmRmOkRlc2NyaXB0aW9uIGFib3V0PSd1dWlkOjIyZDAyYjBhLWIyNDktMTFkYi04YWY4LTkxZ\n"
+  "DU0MDNmOTJmOScKICB4bWxuczpwaG90b3Nob3A9J2h0dHA6Ly9ucy5hZG9iZS5jb20vcGhvdG9z\n"
+  "aG9wLzEuMC8nPgogIDwhLS0gcGhvdG9zaG9wOkNhcHRpb24gaXMgYWxpYXNlZCAtLT4KIDwvcmR\n"
+  "mOkRlc2NyaXB0aW9uPgoKIDxyZGY6RGVzY3JpcHRpb24gYWJvdXQ9J3V1aWQ6MjJkMDJiMGEtYj\n"
+  "I0OS0xMWRiLThhZjgtOTFkNTQwM2Y5MmY5JwogIHhtbG5zOnhhcD0naHR0cDovL25zLmFkb2JlL\n"
+  "mNvbS94YXAvMS4wLyc+CiAgPCEtLSB4YXA6RGVzY3JpcHRpb24gaXMgYWxpYXNlZCAtLT4KIDwv\n"
+  "cmRmOkRlc2NyaXB0aW9uPgoKIDxyZGY6RGVzY3JpcHRpb24gYWJvdXQ9J3V1aWQ6MjJkMDJiMGE\n"
+  "tYjI0OS0xMWRiLThhZjgtOTFkNTQwM2Y5MmY5JwogIHhtbG5zOnhhcE1NPSdodHRwOi8vbnMuYW\n"
+  "RvYmUuY29tL3hhcC8xLjAvbW0vJz4KICA8eGFwTU06RG9jdW1lbnRJRD5hZG9iZTpkb2NpZDpwa\n"
+  "G90b3Nob3A6MjJkMDJiMDYtYjI0OS0xMWRiLThhZjgtOTFkNTQwM2Y5MmY5PC94YXBNTTpEb2N1\n"
+  "bWVudElEPgogPC9yZGY6RGVzY3JpcHRpb24+CgogPHJkZjpEZXNjcmlwdGlvbiBhYm91dD0ndXV\n"
+  "pZDoyMmQwMmIwYS1iMjQ5LTExZGItOGFmOC05MWQ1NDAzZjkyZjknCiAgeG1sbnM6ZGM9J2h0dH\n"
+  "A6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvJz4KICA8ZGM6ZGVzY3JpcHRpb24+CiAgIDxyZ\n"
+  "GY6QWx0PgogICAgPHJkZjpsaSB4bWw6bGFuZz0neC1kZWZhdWx0Jz4gICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgIDwvcmRmOkFsdD4KICA8L2RjOmRlc2NyaXB0aW9\n"
+  "uPgogPC9yZGY6RGVzY3JpcHRpb24+Cgo8L3JkZjpSREY+CjwveDp4YXBtZXRhPgogICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIA\n"
+  "ogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg\n"
+  "ICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI\n"
+  "CAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAg\n"
+  "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA\n"
+  "gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgIC\n"
+  "AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKPD94cGFja2V0IGVuZD0ndyc/P\n"
+  "v/uAA5BZG9iZQBkQAAAAAH/2wCEAAQDAwMDAwQDAwQGBAMEBgcFBAQFBwgGBgcGBggKCAkJCQkI\n"
+  "CgoMDAwMDAoMDAwMDAwMDAwMDAwMDAwMDAwMDAwBBAUFCAcIDwoKDxQODg4UFA4ODg4UEQwMDAw\n"
+  "MEREMDAwMDAwRDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAGQAZAMBEQACEQEDEQ\n"
+  "H/3QAEAA3/xAGiAAAABwEBAQEBAAAAAAAAAAAEBQMCBgEABwgJCgsBAAICAwEBAQEBAAAAAAAAA\n"
+  "AEAAgMEBQYHCAkKCxAAAgEDAwIEAgYHAwQCBgJzAQIDEQQABSESMUFRBhNhInGBFDKRoQcVsUIj\n"
+  "wVLR4TMWYvAkcoLxJUM0U5KismNzwjVEJ5OjszYXVGR0w9LiCCaDCQoYGYSURUaktFbTVSga8uP\n"
+  "zxNTk9GV1hZWltcXV5fVmdoaWprbG1ub2N0dXZ3eHl6e3x9fn9zhIWGh4iJiouMjY6PgpOUlZaX\n"
+  "mJmam5ydnp+So6SlpqeoqaqrrK2ur6EQACAgECAwUFBAUGBAgDA20BAAIRAwQhEjFBBVETYSIGc\n"
+  "YGRMqGx8BTB0eEjQhVSYnLxMyQ0Q4IWklMlomOywgdz0jXiRIMXVJMICQoYGSY2RRonZHRVN/Kj\n"
+  "s8MoKdPj84SUpLTE1OT0ZXWFlaW1xdXl9UZWZnaGlqa2xtbm9kdXZ3eHl6e3x9fn9zhIWGh4iJi\n"
+  "ouMjY6Pg5SVlpeYmZqbnJ2en5KjpKWmp6ipqqusra6vr/2gAMAwEAAhEDEQA/APBnplwPAdR+GB\n"
+  "KY6dYtNG1w39yh4+xb+zIksgEfFaRSSoIx8f7RPRRkSWQimM+lRmwWVXFWYigHxUUVoMiJM+Fj0\n"
+  "tg0RBegLE0Wu+3c+GTBazFCGI7HtSp9slbFYYzyoBsegw2hY1Afl3wqqRqahk+0tDgKpgu4DAUU\n"
+  "+HY+GRS2ePiMKtUB3G+KGuONq//Q8OzpFbW5WnxMop4k9crG5ZnZNJkEOn21utVRYw7HxZtz+OR\n"
+  "vdsrZ2lRtci4aVxFEQA0neg/ZXxJpTITNNuOFss0vSotYNvZ2qGRkPKSTqiU8Sdqk5SZU5Ix8XJ\n"
+  "NNZ8k6bp8TtM73OputUtYq0Unux/hkRkJOzZLCAN2KR+VpbtSkCBaDnIzdlWu59u+XeJTjeASk8\n"
+  "+juZOESEAVqx8BvU/PJibScTrTy09560hkWOGFd2YgFnPQKD19zhOSkxw2l8Vm6XAiYb8gg+k5O\n"
+  "9mnhoon9H3cs5s7WF5pp29OGGMFndyaAKBuTiEEPQLD8h/NDmNdYlttNkYjlbFjcXCr3LLH8II8\n"
+  "C2WUGviZvon/OPWkm3RNSv72SYllMkKxQRV67CQMSKYQAxMkR/wBC56d61P0heel4cYuVOXWvTp\n"
+  "h4Qjjf/9Hw5qBYyISaqjBV+QpvkAzKcki4HomnIxck/wBhtlR2bhunvlDywddMUl4zW+kQ9FQ8X\n"
+  "nfuSewrtmPkycPvc/DhMhvyegXOrWWhmLQPKlsj6xIAiLCoZkY96nv7npmJvI2XOjQFMl0fyRqM\n"
+  "NoxvZvrGt33wlATwiMnVnY1LEdSfuyXF3KIDmUu88w2XlnTl8raAlb2ZFfVL0jdYRtQnxc7BfDC\n"
+  "OaJR7nm3me5tdOtjbMvp3ZRXkV6chVQRX79hmVjgZG+jgZ5jHGhzecXF5LPL6jEjstSSaDM51Ka\n"
+  "6MZ9S1C0sEBe8uZo4YCBXdjxGw60wEWyEqfUHkT8vLXRJFuLdTcaqfhlvWUErtukZ3ABPUjIXTE\n"
+  "m3rGmeV2Tk5UKz/AG/E/wAcgZKya20C3b02kjYtH8AqCygbkUH0nLYlgUb+gbWtPbpXt/n2ybB/\n"
+  "/9Lw4oaVxGd+PxH3qBkGaY3KyiSP01IkiUclH8sg+LKydm6INvZvKsFu+kWtvD8LRoFNRup6moO\n"
+  "aqd277HsGW+XPLmn6XM17FF6l7vW4fd2Zuu+RFls2tmUNrLJb7TSBertGQGqetDkxE0na0pvtHs\n"
+  "QkszWyiGAG5laYlnkeMVHJj8sA5rPk+SvMepTalqlxd3B5zTOXdj/MxqafLpm5xioh5nPK5kpRG\n"
+  "pkcKAST0A6k5NpfUP5K/ki1ssHmHzF+71KRQ8Nud/Qibb/kYw6/yjbrXISlSH07YaHbWyxx2kXE\n"
+  "KACB2zHJtLI7XSelBRvH2xCpvaaTDHXkOTVBPcUG2479RlsdmJVPRtvV+ylenQ0y62FP/9PxRpo\n"
+  "WG5FxKKxKFDA+GVS5NsebLdFsRePc3siVW4f4QR0QVAGYeSXR2unhtZ6s60K6jt+MMSFwtF2+xX\n"
+  "wr7eGUGLlRPQMsE2vxQm7itxKg3VCfT2+nb8cDYaCDtfOXmCCcROrQrUhkkCHYn6emRMqZxjbLd\n"
+  "F1+W/4xajHzjNCtQKMffETWUdngX5p+QZ9A8xS6hbo0ui37NNDPT7DOalHpsCD08Rmyw5ARTpdV\n"
+  "gIPEF35MeRn80ed4S5EdrpKm9kZ15K0iH92hB7Me/tmS60vt/QrCYyekiBdgSTXcjqV9q9MokFD\n"
+  "N7S3aFVVR8RoK9zldqndvAY6nffr/AGYQqLhjdpCoIAZW22HavU/LJBUP9WblX0xTw7fOmWsX/9\n"
+  "Tw7FdvMqWkQ3Z1qfED+mQIbI77PX/LFis9vBajZm2Y+x65rMh3t30Bsze400aVaIbSLk6r8CMRT\n"
+  "l/NmOcllnGDD9Y8uecNfEEiXrMgDGWAyGOOu5WlB+vMrHODTlxZCdjsyFdB006VpVtLasurQxBL\n"
+  "64WiLI4/aFT1ANOXemV5piR2b9NiljB4yyHy9CLOVI5GJhB+CvXY9R8xmINzs5HNZ+Z96BZpbxA\n"
+  "fVJo39UFefwopYgL4nMiMd2qZoIn/AJx00u3t/Lt7qpp9Yv5GLf5MUTERqfbvmzBeezjd9H+VlL\n"
+  "wSQzBqsvOGQD7L12rXsemPNxmXQSxxIPU2nFV4HYqR1xEUWj4ZAxBryr2G+J2VGDZlLrxUH6KZA\n"
+  "Fkqb15VFelfwy+2FP8A/9Xxlf6AdA182Yk9eFeLxSjoVfcfSMo4uIOfkweFOnpvlWYrLEwNFAA+\n"
+  "nMOYdrhFvQLeSO7coBXiK8iKiv07Zj8Ac4QtNrW1njUcKcT+yAR/xGmR4WcsStLpTuPU9IFaEsV\n"
+  "BP3k4m2AgBzSwyQNcIwNTE1aI3wnam9O2Ug7s5Ckk/NDndeVXa2H78MqqV6jmeBp9+ZWKXqDjZ4\n"
+  "+gvVvy30qCy0qzsLRBCnBI2VdgUTqPvOZ7y+Q7pz+bn5q6d+VflZxZlJ/NN4ypptk5qtB9qRwDX\n"
+  "gn/AAx2y2ItpfKFv+eH5qNeTajJ5ovVaVywSqvEtTUKqupAA6D2y0BNPtv/AJx//M5PzL8mJeXT\n"
+  "L+ndPf6rqarSpkAqsnEAAeoN6DpkJRYci9lROSgSUUH9o9K5Tw0ztfSHnXkOtK9q+PHwydq//9b\n"
+  "yxrVoZNBtNSA5zRMPXmH8j0CLXuBmHE+qneamHpEuqYeV7pzFVTRgQK5XMNmnlb1vyyY5QA1OwJ\n"
+  "+eUF2seTOLu5s7azVIVAkpVn/hhnIALG73Yz5jvb1dICqzpDNIqyFD8SxH7R28cxibZCiWOsdJs\n"
+  "PTM6XNstPhnkjIhcHuJBVfvOCiUSn0TfWrTTLjyw8guA/PifTO3xcxxA8a5ZAbimvJP0m3p/kFF\n"
+  "WxhmpWQJ9NW3zZPHz5vlb/nIDVbrWfzO1RJhxGnpDaRL/khA1T7ktmSOTAJhZaAUtLawsbayl8v\n"
+  "xWi3Gpay0cF3HPcFRJJHJMXVrcJ8UaAFG5LWjF8tAYW9H/wCcOo9bTzxrt/owkTyksZW5gkIKvI\n"
+  "7k26nvyReRJHyyBWT7dWQyOWlbnK2526e1O1MqIUFE84uPLkOdK9RXI0E2/wD/1/DA1bURZLY/W\n"
+  "ZDZqwb0eXw7dMgIi7bjllVXsz7yNcfWC0Vd3Ip92Y2UOz0cnsPlwyx8xQ/u24sMxCadoJp9LOXk\n"
+  "VX/uwRUE0BI8cokbLMyoKouHu2MaKGXw7fLDwgoGSkbHpaNZyLLHRSKcFFQQRvUdMlwUFOQyLzr\n"
+  "ztpCaba6fPau4ijv4OURY8AjVFKV7ZZiO+7Vnh6XvXkSWNbW2WTb92KDxIFMzwHlZc3zX+fuizW\n"
+  "f5p3ty8XGDU4YLmCQiisyII3+4rvl8UB5ffEghRGvOm7AbnvWvjk1fen/ONPldPKP5aWOpPCfr2\n"
+  "uE31y6q2wbaMEn+VAMDSdyzrzj+avlHyTp0l/r2rxWFuHWJuIeacu4qFCRgsajfBwsty89/6Gr/\n"
+  "ACa9an+JL/hSnrfoubhXwpXpjwhaL//Q8E1AqtcAZMs8l6i1nqMa1oSVP0VynKLDmaWdSfQXl69\n"
+  "jF1Jv8MhDb5rpB3AO7INRRLhhGp4R05FgaGvTMU8200xS70zVDMRp2pTIOvBmB3PgQP15kxIcnD\n"
+  "LH/EEz0rRvOJhldr9pQtCqyd6VrShGTqw5d4ARv9jHfOGl+ZJNMluLkyenaFbiRdqFYW5nrWuwO\n"
+  "MKB5MdSMRxnhlu9N8p6lLFpti63FUjCtFJTrDKvse2bEDZ4XJ9RZB+YPli2/Mjy5bxoUi1a0YS2\n"
+  "85UOwIXiy9jRu+TBppfOF1+V3m22vrdpNPM8cs/oo0VJlUqQPjValR3+IZNNvtLS9Yu9Mi0/TJr\n"
+  "kyp6QhWVVCIWRATsKBemwwFrDzT87fybs/wA1bW21PRb+DTvNlgGSRp6iC8i3KJJx+y6n7D0Pwm\n"
+  "hxBZXT55/6Fi/Nf0PW+qWXq+t6X1X67F6vD/ftK04V/wBl344U8b//0fBapxheVh9ocV+nviqY2\n"
+  "/qQJDew/bioWHiuQ8m0bbvaPKGtQ6jaxSo9JloCK75gZI0Xb4sgkHo8MouoAvP94BsRmGY7uWJU\n"
+  "gzbypOQpNOvIdK4Nw2WCE2tXulTkjEEbdafgclxMhFBas93dwyQzsWDghlJFONKHJCZtjOFBJfy\n"
+  "j1y9vPL9zpbIs0WkXL2sUjA8hDXlGCRXtt07ZuYvL5KJeo6bfajbkzWkcToR8dqshZ6in2fhNK/\n"
+  "PDTUlXmHVvMdr5o0v9H2kdrqGpfu7m0nkY87Uf7tkKAU4/s03ynLkEBbfihx7dGT6va67LbRMNR\n"
+  "aKOBuUTKgIBXoK1BOYR1M3aQ0mOt9yxUeZNdtJhFapLqMluSXkg5oxJrUMW5KevQ9MmNXXNqOiH\n"
+  "Rr/Hmv8A1r9I/oj95w+r+j9Yf1+NP5+nXtTD+dF8tkfkOlv/0vC3ph7f0/alcVTbS4A8QibuKb5\n"
+  "RI05EBYRFpdX3ly79a2qYCavH/EY7TCYyMD5PSdD8+wXUSn1ArDqOhBzFlipz4ZwWbaV5htbsgF\n"
+  "qg9crMXKErGyYwajFGzxyHlGSePbbwyqg5UZlCaxrFpaWU95LIqrEjMAT4Dp9OShGy1ZslBhv/A\n"
+  "Dj9rd/a+aL+xUK+m38L3d0HrxRo2HFtu5D8c27y8t30raarbWkU+u6g4gsNORn+EcUaSh2Pc0/4\n"
+  "lgtAjezzbT9SutY1i782al8Nxdyotqh6xWybIg+jc5q8s+I27bFDgFPQp9RE+nrag70+L6crrZu\n"
+  "4jajokdv6LW/Dii1Wo61PXKQN3KPK0L+h4/rnD/K5V78a5LhXxd3/0/DMXXtwxVNtL9Xkaf3f7N\n"
+  "etfbKMjdjtkZ9D6ufrlK0+HpX8coF9HJ26sXvfqXrf7i/U+uften/d/wCyrmQL6uOav0pvpP8Ai\n"
+  "b1F+rV59+vH6a5XLhcjH4nRmY/xpxHP0/UptWvT6Mx/RbmjxWK+aP8AFf1M/pCv1Kvxen9inavf\n"
+  "MrFwXtzcLUeLXq5Mv/I3nz1b0v8AjofuKVry9KrUpTanOlf9jmQ68va/zH9b/COn/o7/AI431mP\n"
+  "65SvLh+zWvbl9rMfNfC34K4kmj9T6lD6FKclp/DNYXZx5srsPrHor6nXvkgxTPS/U+rv6dPU5mt\n"
+  "fngFN5ulv+l/pL/Lp/scerHo//2Q==\n";
+
+static std::string gCommandLine;
+
+TEST(Base64, LargeSample) {
+  RTC_LOG(LS_VERBOSE) << "Testing specific base64 file";
+
+  char unescaped[64 * 1024];
+
+  // unescape that massive blob above
+  size_t size = Base64Unescape(SpecificTest,
+                            sizeof(SpecificTest),
+                            unescaped,
+                            sizeof(unescaped));
+
+  EXPECT_EQ(size, sizeof(testbase64));
+  EXPECT_EQ(0, memcmp(testbase64, unescaped, sizeof(testbase64)));
+}
+
+bool DecodeTest(const char* encoded, size_t expect_unparsed,
+                const char* decoded, Base64::DecodeFlags flags)
+{
+  std::string result;
+  size_t consumed = 0, encoded_len = strlen(encoded);
+  bool success = Base64::DecodeFromArray(encoded, encoded_len, flags,
+                                         &result, &consumed);
+  size_t unparsed = encoded_len - consumed;
+  EXPECT_EQ(expect_unparsed, unparsed) << "\"" << encoded
+                                       << "\" -> \"" << decoded
+                                       << "\"";
+  EXPECT_STREQ(decoded, result.c_str());
+  return success;
+}
+
+#define Flags(x,y,z) \
+  Base64::DO_PARSE_##x | Base64::DO_PAD_##y | Base64::DO_TERM_##z
+
+TEST(Base64, DecodeParseOptions) {
+  // Trailing whitespace
+  EXPECT_TRUE (DecodeTest("YWJjZA== ", 1, "abcd", Flags(STRICT, YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA== ", 0, "abcd", Flags(WHITE,  YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA== ", 0, "abcd", Flags(ANY,    YES, CHAR)));
+
+  // Embedded whitespace
+  EXPECT_FALSE(DecodeTest("YWJjZA= =", 3, "abcd", Flags(STRICT, YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA= =", 0, "abcd", Flags(WHITE,  YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA= =", 0, "abcd", Flags(ANY,    YES, CHAR)));
+
+  // Embedded non-base64 characters
+  EXPECT_FALSE(DecodeTest("YWJjZA=*=", 3, "abcd", Flags(STRICT, YES, CHAR)));
+  EXPECT_FALSE(DecodeTest("YWJjZA=*=", 3, "abcd", Flags(WHITE,  YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA=*=", 0, "abcd", Flags(ANY,    YES, CHAR)));
+
+  // Unexpected padding characters
+  EXPECT_FALSE(DecodeTest("YW=JjZA==", 7, "a",    Flags(STRICT, YES, CHAR)));
+  EXPECT_FALSE(DecodeTest("YW=JjZA==", 7, "a",    Flags(WHITE,  YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YW=JjZA==", 0, "abcd", Flags(ANY,    YES, CHAR)));
+}
+
+TEST(Base64, DecodePadOptions) {
+  // Padding
+  EXPECT_TRUE (DecodeTest("YWJjZA==",  0, "abcd", Flags(STRICT, YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA==",  0, "abcd", Flags(STRICT, ANY, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA==",  2, "abcd", Flags(STRICT, NO,  CHAR)));
+
+  // Incomplete padding
+  EXPECT_FALSE(DecodeTest("YWJjZA=",   1, "abcd", Flags(STRICT, YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA=",   1, "abcd", Flags(STRICT, ANY, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA=",   1, "abcd", Flags(STRICT, NO,  CHAR)));
+
+  // No padding
+  EXPECT_FALSE(DecodeTest("YWJjZA",    0, "abcd", Flags(STRICT, YES, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA",    0, "abcd", Flags(STRICT, ANY, CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJjZA",    0, "abcd", Flags(STRICT, NO,  CHAR)));
+}
+
+TEST(Base64, DecodeTerminateOptions) {
+  // Complete quantum
+  EXPECT_TRUE (DecodeTest("YWJj",      0, "abc",  Flags(STRICT, NO,  BUFFER)));
+  EXPECT_TRUE (DecodeTest("YWJj",      0, "abc",  Flags(STRICT, NO,  CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJj",      0, "abc",  Flags(STRICT, NO,  ANY)));
+
+  // Complete quantum with trailing data
+  EXPECT_FALSE(DecodeTest("YWJj*",     1, "abc",  Flags(STRICT, NO,  BUFFER)));
+  EXPECT_TRUE (DecodeTest("YWJj*",     1, "abc",  Flags(STRICT, NO,  CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJj*",     1, "abc",  Flags(STRICT, NO,  ANY)));
+
+  // Incomplete quantum
+  EXPECT_FALSE(DecodeTest("YWJ",       0, "ab",   Flags(STRICT, NO,  BUFFER)));
+  EXPECT_FALSE(DecodeTest("YWJ",       0, "ab",   Flags(STRICT, NO,  CHAR)));
+  EXPECT_TRUE (DecodeTest("YWJ",       0, "ab",   Flags(STRICT, NO,  ANY)));
+}
+
+TEST(Base64, GetNextBase64Char) {
+  // The table looks like this:
+  // "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+  char next_char;
+  EXPECT_TRUE(Base64::GetNextBase64Char('A', &next_char));
+  EXPECT_EQ('B', next_char);
+  EXPECT_TRUE(Base64::GetNextBase64Char('Z', &next_char));
+  EXPECT_EQ('a', next_char);
+  EXPECT_TRUE(Base64::GetNextBase64Char('/', &next_char));
+  EXPECT_EQ('A', next_char);
+  EXPECT_FALSE(Base64::GetNextBase64Char('&', &next_char));
+  EXPECT_FALSE(Base64::GetNextBase64Char('Z', nullptr));
+}
diff --git a/rtc_base/basictypes.h b/rtc_base/basictypes.h
new file mode 100644
index 0000000..42226e7
--- /dev/null
+++ b/rtc_base/basictypes.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BASICTYPES_H_
+#define RTC_BASE_BASICTYPES_H_
+
+#include <stddef.h>  // for NULL, size_t
+#include <stdint.h>  // for uintptr_t and (u)int_t types.
+
+// Detect compiler is for x86 or x64.
+#if defined(__x86_64__) || defined(_M_X64) || \
+    defined(__i386__) || defined(_M_IX86)
+#define CPU_X86 1
+#endif
+
+// Detect compiler is for arm.
+#if defined(__arm__) || defined(_M_ARM)
+#define CPU_ARM 1
+#endif
+
+#if defined(CPU_X86) && defined(CPU_ARM)
+#error CPU_X86 and CPU_ARM both defined.
+#endif
+
+#if !defined(RTC_ARCH_CPU_BIG_ENDIAN) && !defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
+// x86, arm or GCC provided __BYTE_ORDER__ macros
+#if defined(CPU_X86) || defined(CPU_ARM) ||                             \
+  (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define RTC_ARCH_CPU_LITTLE_ENDIAN
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define RTC_ARCH_CPU_BIG_ENDIAN
+#else
+#error RTC_ARCH_CPU_BIG_ENDIAN or RTC_ARCH_CPU_LITTLE_ENDIAN should be defined.
+#endif
+#endif
+
+#if defined(RTC_ARCH_CPU_BIG_ENDIAN) && defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
+#error RTC_ARCH_CPU_BIG_ENDIAN and RTC_ARCH_CPU_LITTLE_ENDIAN both defined.
+#endif
+
+#if defined(WEBRTC_WIN)
+typedef int socklen_t;
+#endif
+
+// The following only works for C++
+#ifdef __cplusplus
+
+#ifndef ALIGNP
+#define ALIGNP(p, t)                                             \
+  (reinterpret_cast<uint8_t*>(((reinterpret_cast<uintptr_t>(p) + \
+  ((t) - 1)) & ~((t) - 1))))
+#endif
+
+#define RTC_IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1)))
+
+// Use these to declare and define a static local variable that gets leaked so
+// that its destructors are not called at exit.
+#define RTC_DEFINE_STATIC_LOCAL(type, name, arguments) \
+  static type& name = *new type arguments
+
+#endif  // __cplusplus
+
+#endif  // RTC_BASE_BASICTYPES_H_
diff --git a/rtc_base/basictypes_unittest.cc b/rtc_base/basictypes_unittest.cc
new file mode 100644
index 0000000..a8b0533
--- /dev/null
+++ b/rtc_base/basictypes_unittest.cc
@@ -0,0 +1,48 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/basictypes.h"
+
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+TEST(BasicTypesTest, Endian) {
+  uint16_t v16 = 0x1234u;
+  uint8_t first_byte = *reinterpret_cast<uint8_t*>(&v16);
+#if defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
+  EXPECT_EQ(0x34u, first_byte);
+#elif defined(RTC_ARCH_CPU_BIG_ENDIAN)
+  EXPECT_EQ(0x12u, first_byte);
+#endif
+}
+
+TEST(BasicTypesTest, SizeOfConstants) {
+  EXPECT_EQ(8u, sizeof(INT64_C(0)));
+  EXPECT_EQ(8u, sizeof(UINT64_C(0)));
+  EXPECT_EQ(8u, sizeof(INT64_C(0x1234567887654321)));
+  EXPECT_EQ(8u, sizeof(UINT64_C(0x8765432112345678)));
+}
+
+// Test CPU_ macros
+#if !defined(CPU_ARM) && defined(__arm__)
+#error expected CPU_ARM to be defined.
+#endif
+#if !defined(CPU_X86) && (defined(WEBRTC_WIN) || defined(WEBRTC_MAC) && !defined(WEBRTC_IOS))
+#error expected CPU_X86 to be defined.
+#endif
+#if !defined(RTC_ARCH_CPU_LITTLE_ENDIAN) && \
+  (defined(WEBRTC_WIN) || defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) || defined(CPU_X86))
+#error expected RTC_ARCH_CPU_LITTLE_ENDIAN to be defined.
+#endif
+
+// TODO(fbarchard): Test all macros in basictypes.h
+
+}  // namespace rtc
diff --git a/rtc_base/bind.h b/rtc_base/bind.h
new file mode 100644
index 0000000..b9f98b9
--- /dev/null
+++ b/rtc_base/bind.h
@@ -0,0 +1,284 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Bind() is an overloaded function that converts method calls into function
+// objects (aka functors). The method object is captured as a scoped_refptr<> if
+// possible, and as a raw pointer otherwise. Any arguments to the method are
+// captured by value. The return value of Bind is a stateful, nullary function
+// object. Care should be taken about the lifetime of objects captured by
+// Bind(); the returned functor knows nothing about the lifetime of a non
+// ref-counted method object or any arguments passed by pointer, and calling the
+// functor with a destroyed object will surely do bad things.
+//
+// To prevent the method object from being captured as a scoped_refptr<>, you
+// can use Unretained. But this should only be done when absolutely necessary,
+// and when the caller knows the extra reference isn't needed.
+//
+// Example usage:
+//   struct Foo {
+//     int Test1() { return 42; }
+//     int Test2() const { return 52; }
+//     int Test3(int x) { return x*x; }
+//     float Test4(int x, float y) { return x + y; }
+//   };
+//
+//   int main() {
+//     Foo foo;
+//     cout << rtc::Bind(&Foo::Test1, &foo)() << endl;
+//     cout << rtc::Bind(&Foo::Test2, &foo)() << endl;
+//     cout << rtc::Bind(&Foo::Test3, &foo, 3)() << endl;
+//     cout << rtc::Bind(&Foo::Test4, &foo, 7, 8.5f)() << endl;
+//   }
+//
+// Example usage of ref counted objects:
+//   struct Bar {
+//     int AddRef();
+//     int Release();
+//
+//     void Test() {}
+//     void BindThis() {
+//       // The functor passed to AsyncInvoke() will keep this object alive.
+//       invoker.AsyncInvoke(RTC_FROM_HERE,rtc::Bind(&Bar::Test, this));
+//     }
+//   };
+//
+//   int main() {
+//     rtc::scoped_refptr<Bar> bar = new rtc::RefCountedObject<Bar>();
+//     auto functor = rtc::Bind(&Bar::Test, bar);
+//     bar = nullptr;
+//     // The functor stores an internal scoped_refptr<Bar>, so this is safe.
+//     functor();
+//   }
+//
+
+#ifndef RTC_BASE_BIND_H_
+#define RTC_BASE_BIND_H_
+
+#include <tuple>
+#include <type_traits>
+
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/template_util.h"
+
+#define NONAME
+
+namespace rtc {
+namespace detail {
+// This is needed because the template parameters in Bind can't be resolved
+// if they're used both as parameters of the function pointer type and as
+// parameters to Bind itself: the function pointer parameters are exact
+// matches to the function prototype, but the parameters to bind have
+// references stripped. This trick allows the compiler to dictate the Bind
+// parameter types rather than deduce them.
+template <class T> struct identity { typedef T type; };
+
+// IsRefCounted<T>::value will be true for types that can be used in
+// rtc::scoped_refptr<T>, i.e. types that implements nullary functions AddRef()
+// and Release(), regardless of their return types. AddRef() and Release() can
+// be defined in T or any superclass of T.
+template <typename T>
+class IsRefCounted {
+  // This is a complex implementation detail done with SFINAE.
+
+  // Define types such that sizeof(Yes) != sizeof(No).
+  struct Yes { char dummy[1]; };
+  struct No { char dummy[2]; };
+  // Define two overloaded template functions with return types of different
+  // size. This way, we can use sizeof() on the return type to determine which
+  // function the compiler would have chosen. One function will be preferred
+  // over the other if it is possible to create it without compiler errors,
+  // otherwise the compiler will simply remove it, and default to the less
+  // preferred function.
+  template <typename R>
+  static Yes test(R* r, decltype(r->AddRef(), r->Release(), 42));
+  template <typename C> static No test(...);
+
+public:
+  // Trick the compiler to tell if it's possible to call AddRef() and Release().
+  static const bool value = sizeof(test<T>((T*)nullptr, 42)) == sizeof(Yes);
+};
+
+// TernaryTypeOperator is a helper class to select a type based on a static bool
+// value.
+template <bool condition, typename IfTrueT, typename IfFalseT>
+struct TernaryTypeOperator {};
+
+template <typename IfTrueT, typename IfFalseT>
+struct TernaryTypeOperator<true, IfTrueT, IfFalseT> {
+  typedef IfTrueT type;
+};
+
+template <typename IfTrueT, typename IfFalseT>
+struct TernaryTypeOperator<false, IfTrueT, IfFalseT> {
+  typedef IfFalseT type;
+};
+
+// PointerType<T>::type will be scoped_refptr<T> for ref counted types, and T*
+// otherwise.
+template <class T>
+struct PointerType {
+  typedef typename TernaryTypeOperator<IsRefCounted<T>::value,
+                                       scoped_refptr<T>,
+                                       T*>::type type;
+};
+
+template <typename T>
+class UnretainedWrapper {
+ public:
+  explicit UnretainedWrapper(T* o) : ptr_(o) {}
+  T* get() const { return ptr_; }
+
+ private:
+  T* ptr_;
+};
+
+}  // namespace detail
+
+template <typename T>
+static inline detail::UnretainedWrapper<T> Unretained(T* o) {
+  return detail::UnretainedWrapper<T>(o);
+}
+
+template <class ObjectT, class MethodT, class R, typename... Args>
+class MethodFunctor {
+ public:
+  MethodFunctor(MethodT method, ObjectT* object, Args... args)
+      : method_(method), object_(object), args_(args...) {}
+  R operator()() const {
+    return CallMethod(typename sequence_generator<sizeof...(Args)>::type());
+  }
+
+ private:
+  // Use sequence_generator (see template_util.h) to expand a MethodFunctor
+  // with 2 arguments to (std::get<0>(args_), std::get<1>(args_)), for
+  // instance.
+  template <int... S>
+  R CallMethod(sequence<S...>) const {
+    return (object_->*method_)(std::get<S>(args_)...);
+  }
+
+  MethodT method_;
+  typename detail::PointerType<ObjectT>::type object_;
+  typename std::tuple<typename std::remove_reference<Args>::type...> args_;
+};
+
+template <class ObjectT, class MethodT, class R, typename... Args>
+class UnretainedMethodFunctor {
+ public:
+  UnretainedMethodFunctor(MethodT method,
+                          detail::UnretainedWrapper<ObjectT> object,
+                          Args... args)
+      : method_(method), object_(object.get()), args_(args...) {}
+  R operator()() const {
+    return CallMethod(typename sequence_generator<sizeof...(Args)>::type());
+  }
+
+ private:
+  // Use sequence_generator (see template_util.h) to expand an
+  // UnretainedMethodFunctor with 2 arguments to (std::get<0>(args_),
+  // std::get<1>(args_)), for instance.
+  template <int... S>
+  R CallMethod(sequence<S...>) const {
+    return (object_->*method_)(std::get<S>(args_)...);
+  }
+
+  MethodT method_;
+  ObjectT* object_;
+  typename std::tuple<typename std::remove_reference<Args>::type...> args_;
+};
+
+template <class FunctorT, class R, typename... Args>
+class Functor {
+ public:
+  Functor(const FunctorT& functor, Args... args)
+      : functor_(functor), args_(args...) {}
+  R operator()() const {
+    return CallFunction(typename sequence_generator<sizeof...(Args)>::type());
+  }
+
+ private:
+  // Use sequence_generator (see template_util.h) to expand a Functor
+  // with 2 arguments to (std::get<0>(args_), std::get<1>(args_)), for
+  // instance.
+  template <int... S>
+  R CallFunction(sequence<S...>) const {
+    return functor_(std::get<S>(args_)...);
+  }
+
+  FunctorT functor_;
+  typename std::tuple<typename std::remove_reference<Args>::type...> args_;
+};
+
+#define FP_T(x) R (ObjectT::*x)(Args...)
+
+template <class ObjectT, class R, typename... Args>
+MethodFunctor<ObjectT, FP_T(NONAME), R, Args...> Bind(
+    FP_T(method),
+    ObjectT* object,
+    typename detail::identity<Args>::type... args) {
+  return MethodFunctor<ObjectT, FP_T(NONAME), R, Args...>(method, object,
+                                                          args...);
+}
+
+template <class ObjectT, class R, typename... Args>
+MethodFunctor<ObjectT, FP_T(NONAME), R, Args...> Bind(
+    FP_T(method),
+    const scoped_refptr<ObjectT>& object,
+    typename detail::identity<Args>::type... args) {
+  return MethodFunctor<ObjectT, FP_T(NONAME), R, Args...>(method, object.get(),
+                                                          args...);
+}
+
+template <class ObjectT, class R, typename... Args>
+UnretainedMethodFunctor<ObjectT, FP_T(NONAME), R, Args...> Bind(
+    FP_T(method),
+    detail::UnretainedWrapper<ObjectT> object,
+    typename detail::identity<Args>::type... args) {
+  return UnretainedMethodFunctor<ObjectT, FP_T(NONAME), R, Args...>(
+      method, object, args...);
+}
+
+#undef FP_T
+#define FP_T(x) R (ObjectT::*x)(Args...) const
+
+template <class ObjectT, class R, typename... Args>
+MethodFunctor<const ObjectT, FP_T(NONAME), R, Args...> Bind(
+    FP_T(method),
+    const ObjectT* object,
+    typename detail::identity<Args>::type... args) {
+  return MethodFunctor<const ObjectT, FP_T(NONAME), R, Args...>(method, object,
+                                                                args...);
+}
+template <class ObjectT, class R, typename... Args>
+UnretainedMethodFunctor<const ObjectT, FP_T(NONAME), R, Args...> Bind(
+    FP_T(method),
+    detail::UnretainedWrapper<const ObjectT> object,
+    typename detail::identity<Args>::type... args) {
+  return UnretainedMethodFunctor<const ObjectT, FP_T(NONAME), R, Args...>(
+      method, object, args...);
+}
+
+#undef FP_T
+#define FP_T(x) R (*x)(Args...)
+
+template <class R, typename... Args>
+Functor<FP_T(NONAME), R, Args...> Bind(
+    FP_T(function),
+    typename detail::identity<Args>::type... args) {
+  return Functor<FP_T(NONAME), R, Args...>(function, args...);
+}
+
+#undef FP_T
+
+}  // namespace rtc
+
+#undef NONAME
+
+#endif  // RTC_BASE_BIND_H_
diff --git a/rtc_base/bind_unittest.cc b/rtc_base/bind_unittest.cc
new file mode 100644
index 0000000..8703be4
--- /dev/null
+++ b/rtc_base/bind_unittest.cc
@@ -0,0 +1,223 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <type_traits>
+
+#include "rtc_base/bind.h"
+#include "rtc_base/gunit.h"
+
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+
+namespace rtc {
+
+namespace {
+
+struct LifeTimeCheck;
+
+struct MethodBindTester {
+  void NullaryVoid() { ++call_count; }
+  int NullaryInt() { ++call_count; return 1; }
+  int NullaryConst() const { ++call_count; return 2; }
+  void UnaryVoid(int dummy) { ++call_count; }
+  template <class T> T Identity(T value) { ++call_count; return value; }
+  int UnaryByPointer(int* value) const {
+    ++call_count;
+    return ++(*value);
+  }
+  int UnaryByRef(const int& value) const {
+    ++call_count;
+    return ++const_cast<int&>(value);
+  }
+  int Multiply(int a, int b) const { ++call_count; return a * b; }
+  void RefArgument(const scoped_refptr<LifeTimeCheck>& object) {
+    EXPECT_TRUE(object.get() != nullptr);
+  }
+
+  mutable int call_count;
+};
+
+struct A { int dummy; };
+struct B: public RefCountInterface { int dummy; };
+struct C: public A, B {};
+struct D {
+  int AddRef();
+};
+struct E: public D {
+  int Release();
+};
+struct F {
+  void AddRef();
+  void Release();
+};
+
+struct LifeTimeCheck {
+  LifeTimeCheck() : ref_count_(0) {}
+  void AddRef() { ++ref_count_; }
+  void Release() { --ref_count_; }
+  void NullaryVoid() {}
+  int ref_count_;
+};
+
+int Return42() { return 42; }
+int Negate(int a) { return -a; }
+int Multiply(int a, int b) { return a * b; }
+
+}  // namespace
+
+// Try to catch any problem with scoped_refptr type deduction in rtc::Bind at
+// compile time.
+#define EXPECT_IS_CAPTURED_AS_PTR(T)                              \
+  static_assert(is_same<detail::PointerType<T>::type, T*>::value, \
+                "PointerType")
+#define EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(T)                        \
+  static_assert(                                                      \
+      is_same<detail::PointerType<T>::type, scoped_refptr<T>>::value, \
+      "PointerType")
+
+EXPECT_IS_CAPTURED_AS_PTR(void);
+EXPECT_IS_CAPTURED_AS_PTR(int);
+EXPECT_IS_CAPTURED_AS_PTR(double);
+EXPECT_IS_CAPTURED_AS_PTR(A);
+EXPECT_IS_CAPTURED_AS_PTR(D);
+EXPECT_IS_CAPTURED_AS_PTR(RefCountInterface*);
+EXPECT_IS_CAPTURED_AS_PTR(
+    decltype(Unretained<RefCountedObject<RefCountInterface>>));
+
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountInterface);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(B);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(C);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(E);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(F);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountedObject<RefCountInterface>);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountedObject<B>);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountedObject<C>);
+EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(const RefCountedObject<RefCountInterface>);
+
+TEST(BindTest, BindToMethod) {
+  MethodBindTester object = {0};
+  EXPECT_EQ(0, object.call_count);
+  Bind(&MethodBindTester::NullaryVoid, &object)();
+  EXPECT_EQ(1, object.call_count);
+  EXPECT_EQ(1, Bind(&MethodBindTester::NullaryInt, &object)());
+  EXPECT_EQ(2, object.call_count);
+  EXPECT_EQ(2, Bind(&MethodBindTester::NullaryConst,
+                    static_cast<const MethodBindTester*>(&object))());
+  EXPECT_EQ(3, object.call_count);
+  Bind(&MethodBindTester::UnaryVoid, &object, 5)();
+  EXPECT_EQ(4, object.call_count);
+  EXPECT_EQ(100, Bind(&MethodBindTester::Identity<int>, &object, 100)());
+  EXPECT_EQ(5, object.call_count);
+  const std::string string_value("test string");
+  EXPECT_EQ(string_value, Bind(&MethodBindTester::Identity<std::string>,
+                               &object, string_value)());
+  EXPECT_EQ(6, object.call_count);
+  int value = 11;
+  // Bind binds by value, even if the method signature is by reference, so
+  // "reference" binds require pointers.
+  EXPECT_EQ(12, Bind(&MethodBindTester::UnaryByPointer, &object, &value)());
+  EXPECT_EQ(12, value);
+  EXPECT_EQ(7, object.call_count);
+  // It's possible to bind to a function that takes a const reference, though
+  // the capture will be a copy. See UnaryByRef hackery above where it removes
+  // the const to make sure the underlying storage is, in fact, a copy.
+  EXPECT_EQ(13, Bind(&MethodBindTester::UnaryByRef, &object, value)());
+  // But the original value is unmodified.
+  EXPECT_EQ(12, value);
+  EXPECT_EQ(8, object.call_count);
+  EXPECT_EQ(56, Bind(&MethodBindTester::Multiply, &object, 7, 8)());
+  EXPECT_EQ(9, object.call_count);
+}
+
+TEST(BindTest, BindToFunction) {
+  EXPECT_EQ(42, Bind(&Return42)());
+  EXPECT_EQ(3, Bind(&Negate, -3)());
+  EXPECT_EQ(56, Bind(&Multiply, 8, 7)());
+}
+
+// Test Bind where method object implements RefCountInterface and is passed as a
+// pointer.
+TEST(BindTest, CapturePointerAsScopedRefPtr) {
+  LifeTimeCheck object;
+  EXPECT_EQ(object.ref_count_, 0);
+  scoped_refptr<LifeTimeCheck> scoped_object(&object);
+  EXPECT_EQ(object.ref_count_, 1);
+  {
+    auto functor = Bind(&LifeTimeCheck::NullaryVoid, &object);
+    EXPECT_EQ(object.ref_count_, 2);
+    scoped_object = nullptr;
+    EXPECT_EQ(object.ref_count_, 1);
+  }
+  EXPECT_EQ(object.ref_count_, 0);
+}
+
+// Test Bind where method object implements RefCountInterface and is passed as a
+// scoped_refptr<>.
+TEST(BindTest, CaptureScopedRefPtrAsScopedRefPtr) {
+  LifeTimeCheck object;
+  EXPECT_EQ(object.ref_count_, 0);
+  scoped_refptr<LifeTimeCheck> scoped_object(&object);
+  EXPECT_EQ(object.ref_count_, 1);
+  {
+    auto functor = Bind(&LifeTimeCheck::NullaryVoid, scoped_object);
+    EXPECT_EQ(object.ref_count_, 2);
+    scoped_object = nullptr;
+    EXPECT_EQ(object.ref_count_, 1);
+  }
+  EXPECT_EQ(object.ref_count_, 0);
+}
+
+// Test Bind where method object is captured as scoped_refptr<> and the functor
+// dies while there are references left.
+TEST(BindTest, FunctorReleasesObjectOnDestruction) {
+  LifeTimeCheck object;
+  EXPECT_EQ(object.ref_count_, 0);
+  scoped_refptr<LifeTimeCheck> scoped_object(&object);
+  EXPECT_EQ(object.ref_count_, 1);
+  Bind(&LifeTimeCheck::NullaryVoid, &object)();
+  EXPECT_EQ(object.ref_count_, 1);
+  scoped_object = nullptr;
+  EXPECT_EQ(object.ref_count_, 0);
+}
+
+// Test Bind with scoped_refptr<> argument.
+TEST(BindTest, ScopedRefPointerArgument) {
+  LifeTimeCheck object;
+  EXPECT_EQ(object.ref_count_, 0);
+  scoped_refptr<LifeTimeCheck> scoped_object(&object);
+  EXPECT_EQ(object.ref_count_, 1);
+  {
+    MethodBindTester bind_tester;
+    auto functor =
+        Bind(&MethodBindTester::RefArgument, &bind_tester, scoped_object);
+    EXPECT_EQ(object.ref_count_, 2);
+  }
+  EXPECT_EQ(object.ref_count_, 1);
+  scoped_object = nullptr;
+  EXPECT_EQ(object.ref_count_, 0);
+}
+
+namespace {
+
+const int* Ref(const int& a) { return &a; }
+
+}  // anonymous namespace
+
+// Test Bind with non-scoped_refptr<> reference argument, which should be
+// modified to a non-reference capture.
+TEST(BindTest, RefArgument) {
+  const int x = 42;
+  EXPECT_EQ(&x, Ref(x));
+  // Bind() should make a copy of |x|, i.e. the pointers should be different.
+  auto functor = Bind(&Ref, x);
+  EXPECT_NE(&x, functor());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bitbuffer.cc b/rtc_base/bitbuffer.cc
new file mode 100644
index 0000000..86247af
--- /dev/null
+++ b/rtc_base/bitbuffer.cc
@@ -0,0 +1,310 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bitbuffer.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "rtc_base/checks.h"
+
+namespace {
+
+// Returns the lowest (right-most) |bit_count| bits in |byte|.
+uint8_t LowestBits(uint8_t byte, size_t bit_count) {
+  RTC_DCHECK_LE(bit_count, 8);
+  return byte & ((1 << bit_count) - 1);
+}
+
+// Returns the highest (left-most) |bit_count| bits in |byte|, shifted to the
+// lowest bits (to the right).
+uint8_t HighestBits(uint8_t byte, size_t bit_count) {
+  RTC_DCHECK_LE(bit_count, 8);
+  uint8_t shift = 8 - static_cast<uint8_t>(bit_count);
+  uint8_t mask = 0xFF << shift;
+  return (byte & mask) >> shift;
+}
+
+// Returns the highest byte of |val| in a uint8_t.
+uint8_t HighestByte(uint64_t val) {
+  return static_cast<uint8_t>(val >> 56);
+}
+
+// Returns the result of writing partial data from |source|, of
+// |source_bit_count| size in the highest bits, to |target| at
+// |target_bit_offset| from the highest bit.
+uint8_t WritePartialByte(uint8_t source,
+                         size_t source_bit_count,
+                         uint8_t target,
+                         size_t target_bit_offset) {
+  RTC_DCHECK(target_bit_offset < 8);
+  RTC_DCHECK(source_bit_count < 9);
+  RTC_DCHECK(source_bit_count <= (8 - target_bit_offset));
+  // Generate a mask for just the bits we're going to overwrite, so:
+  uint8_t mask =
+      // The number of bits we want, in the most significant bits...
+      static_cast<uint8_t>(0xFF << (8 - source_bit_count))
+      // ...shifted over to the target offset from the most signficant bit.
+      >> target_bit_offset;
+
+  // We want the target, with the bits we'll overwrite masked off, or'ed with
+  // the bits from the source we want.
+  return (target & ~mask) | (source >> target_bit_offset);
+}
+
+// Counts the number of bits used in the binary representation of val.
+size_t CountBits(uint64_t val) {
+  size_t bit_count = 0;
+  while (val != 0) {
+    bit_count++;
+    val >>= 1;
+  }
+  return bit_count;
+}
+
+}  // namespace
+
+namespace rtc {
+
+BitBuffer::BitBuffer(const uint8_t* bytes, size_t byte_count)
+    : bytes_(bytes), byte_count_(byte_count), byte_offset_(), bit_offset_() {
+  RTC_DCHECK(static_cast<uint64_t>(byte_count_) <=
+             std::numeric_limits<uint32_t>::max());
+}
+
+uint64_t BitBuffer::RemainingBitCount() const {
+  return (static_cast<uint64_t>(byte_count_) - byte_offset_) * 8 - bit_offset_;
+}
+
+bool BitBuffer::ReadUInt8(uint8_t* val) {
+  uint32_t bit_val;
+  if (!ReadBits(&bit_val, sizeof(uint8_t) * 8)) {
+    return false;
+  }
+  RTC_DCHECK(bit_val <= std::numeric_limits<uint8_t>::max());
+  *val = static_cast<uint8_t>(bit_val);
+  return true;
+}
+
+bool BitBuffer::ReadUInt16(uint16_t* val) {
+  uint32_t bit_val;
+  if (!ReadBits(&bit_val, sizeof(uint16_t) * 8)) {
+    return false;
+  }
+  RTC_DCHECK(bit_val <= std::numeric_limits<uint16_t>::max());
+  *val = static_cast<uint16_t>(bit_val);
+  return true;
+}
+
+bool BitBuffer::ReadUInt32(uint32_t* val) {
+  return ReadBits(val, sizeof(uint32_t) * 8);
+}
+
+bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) {
+  if (!val || bit_count > RemainingBitCount() || bit_count > 32) {
+    return false;
+  }
+  const uint8_t* bytes = bytes_ + byte_offset_;
+  size_t remaining_bits_in_current_byte = 8 - bit_offset_;
+  uint32_t bits = LowestBits(*bytes++, remaining_bits_in_current_byte);
+  // If we're reading fewer bits than what's left in the current byte, just
+  // return the portion of this byte that we need.
+  if (bit_count < remaining_bits_in_current_byte) {
+    *val = HighestBits(bits, bit_offset_ + bit_count);
+    return true;
+  }
+  // Otherwise, subtract what we've read from the bit count and read as many
+  // full bytes as we can into bits.
+  bit_count -= remaining_bits_in_current_byte;
+  while (bit_count >= 8) {
+    bits = (bits << 8) | *bytes++;
+    bit_count -= 8;
+  }
+  // Whatever we have left is smaller than a byte, so grab just the bits we need
+  // and shift them into the lowest bits.
+  if (bit_count > 0) {
+    bits <<= bit_count;
+    bits |= HighestBits(*bytes, bit_count);
+  }
+  *val = bits;
+  return true;
+}
+
+bool BitBuffer::ReadBits(uint32_t* val, size_t bit_count) {
+  return PeekBits(val, bit_count) && ConsumeBits(bit_count);
+}
+
+bool BitBuffer::ConsumeBytes(size_t byte_count) {
+  return ConsumeBits(byte_count * 8);
+}
+
+bool BitBuffer::ConsumeBits(size_t bit_count) {
+  if (bit_count > RemainingBitCount()) {
+    return false;
+  }
+
+  byte_offset_ += (bit_offset_ + bit_count) / 8;
+  bit_offset_ = (bit_offset_ + bit_count) % 8;
+  return true;
+}
+
+bool BitBuffer::ReadExponentialGolomb(uint32_t* val) {
+  if (!val) {
+    return false;
+  }
+  // Store off the current byte/bit offset, in case we want to restore them due
+  // to a failed parse.
+  size_t original_byte_offset = byte_offset_;
+  size_t original_bit_offset = bit_offset_;
+
+  // Count the number of leading 0 bits by peeking/consuming them one at a time.
+  size_t zero_bit_count = 0;
+  uint32_t peeked_bit;
+  while (PeekBits(&peeked_bit, 1) && peeked_bit == 0) {
+    zero_bit_count++;
+    ConsumeBits(1);
+  }
+
+  // We should either be at the end of the stream, or the next bit should be 1.
+  RTC_DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1);
+
+  // The bit count of the value is the number of zeros + 1. Make sure that many
+  // bits fits in a uint32_t and that we have enough bits left for it, and then
+  // read the value.
+  size_t value_bit_count = zero_bit_count + 1;
+  if (value_bit_count > 32 || !ReadBits(val, value_bit_count)) {
+    RTC_CHECK(Seek(original_byte_offset, original_bit_offset));
+    return false;
+  }
+  *val -= 1;
+  return true;
+}
+
+bool BitBuffer::ReadSignedExponentialGolomb(int32_t* val) {
+  uint32_t unsigned_val;
+  if (!ReadExponentialGolomb(&unsigned_val)) {
+    return false;
+  }
+  if ((unsigned_val & 1) == 0) {
+    *val = -static_cast<int32_t>(unsigned_val / 2);
+  } else {
+    *val = (unsigned_val + 1) / 2;
+  }
+  return true;
+}
+
+void BitBuffer::GetCurrentOffset(
+    size_t* out_byte_offset, size_t* out_bit_offset) {
+  RTC_CHECK(out_byte_offset != nullptr);
+  RTC_CHECK(out_bit_offset != nullptr);
+  *out_byte_offset = byte_offset_;
+  *out_bit_offset = bit_offset_;
+}
+
+bool BitBuffer::Seek(size_t byte_offset, size_t bit_offset) {
+  if (byte_offset > byte_count_ || bit_offset > 7 ||
+      (byte_offset == byte_count_ && bit_offset > 0)) {
+    return false;
+  }
+  byte_offset_ = byte_offset;
+  bit_offset_ = bit_offset;
+  return true;
+}
+
+BitBufferWriter::BitBufferWriter(uint8_t* bytes, size_t byte_count)
+    : BitBuffer(bytes, byte_count), writable_bytes_(bytes) {
+}
+
+bool BitBufferWriter::WriteUInt8(uint8_t val) {
+  return WriteBits(val, sizeof(uint8_t) * 8);
+}
+
+bool BitBufferWriter::WriteUInt16(uint16_t val) {
+  return WriteBits(val, sizeof(uint16_t) * 8);
+}
+
+bool BitBufferWriter::WriteUInt32(uint32_t val) {
+  return WriteBits(val, sizeof(uint32_t) * 8);
+}
+
+bool BitBufferWriter::WriteBits(uint64_t val, size_t bit_count) {
+  if (bit_count > RemainingBitCount()) {
+    return false;
+  }
+  size_t total_bits = bit_count;
+
+  // For simplicity, push the bits we want to read from val to the highest bits.
+  val <<= (sizeof(uint64_t) * 8 - bit_count);
+
+  uint8_t* bytes = writable_bytes_ + byte_offset_;
+
+  // The first byte is relatively special; the bit offset to write to may put us
+  // in the middle of the byte, and the total bit count to write may require we
+  // save the bits at the end of the byte.
+  size_t remaining_bits_in_current_byte = 8 - bit_offset_;
+  size_t bits_in_first_byte =
+      std::min(bit_count, remaining_bits_in_current_byte);
+  *bytes = WritePartialByte(
+      HighestByte(val), bits_in_first_byte, *bytes, bit_offset_);
+  if (bit_count <= remaining_bits_in_current_byte) {
+    // Nothing left to write, so quit early.
+    return ConsumeBits(total_bits);
+  }
+
+  // Subtract what we've written from the bit count, shift it off the value, and
+  // write the remaining full bytes.
+  val <<= bits_in_first_byte;
+  bytes++;
+  bit_count -= bits_in_first_byte;
+  while (bit_count >= 8) {
+    *bytes++ = HighestByte(val);
+    val <<= 8;
+    bit_count -= 8;
+  }
+
+  // Last byte may also be partial, so write the remaining bits from the top of
+  // val.
+  if (bit_count > 0) {
+    *bytes = WritePartialByte(HighestByte(val), bit_count, *bytes, 0);
+  }
+
+  // All done! Consume the bits we've written.
+  return ConsumeBits(total_bits);
+}
+
+bool BitBufferWriter::WriteExponentialGolomb(uint32_t val) {
+  // We don't support reading UINT32_MAX, because it doesn't fit in a uint32_t
+  // when encoded, so don't support writing it either.
+  if (val == std::numeric_limits<uint32_t>::max()) {
+    return false;
+  }
+  uint64_t val_to_encode = static_cast<uint64_t>(val) + 1;
+
+  // We need to write CountBits(val+1) 0s and then val+1. Since val (as a
+  // uint64_t) has leading zeros, we can just write the total golomb encoded
+  // size worth of bits, knowing the value will appear last.
+  return WriteBits(val_to_encode, CountBits(val_to_encode) * 2 - 1);
+}
+
+bool BitBufferWriter::WriteSignedExponentialGolomb(int32_t val) {
+  if (val == 0) {
+    return WriteExponentialGolomb(0);
+  } else if (val > 0) {
+    uint32_t signed_val = val;
+    return WriteExponentialGolomb((signed_val * 2) - 1);
+  } else {
+    if (val == std::numeric_limits<int32_t>::min())
+      return false;  // Not supported, would cause overflow.
+    uint32_t signed_val = -val;
+    return WriteExponentialGolomb(signed_val * 2);
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bitbuffer.h b/rtc_base/bitbuffer.h
new file mode 100644
index 0000000..8519414
--- /dev/null
+++ b/rtc_base/bitbuffer.h
@@ -0,0 +1,126 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BITBUFFER_H_
+#define RTC_BASE_BITBUFFER_H_
+
+#include <stdint.h>  // For integer types.
+#include <stddef.h>  // For size_t.
+
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+// A class, similar to ByteBuffer, that can parse bit-sized data out of a set of
+// bytes. Has a similar API to ByteBuffer, plus methods for reading bit-sized
+// and exponential golomb encoded data. For a writable version, use
+// BitBufferWriter. Unlike ByteBuffer, this class doesn't make a copy of the
+// source bytes, so it can be used on read-only data.
+// Sizes/counts specify bits/bytes, for clarity.
+// Byte order is assumed big-endian/network.
+class BitBuffer {
+ public:
+  BitBuffer(const uint8_t* bytes, size_t byte_count);
+
+  // Gets the current offset, in bytes/bits, from the start of the buffer. The
+  // bit offset is the offset into the current byte, in the range [0,7].
+  void GetCurrentOffset(size_t* out_byte_offset, size_t* out_bit_offset);
+
+  // The remaining bits in the byte buffer.
+  uint64_t RemainingBitCount() const;
+
+  // Reads byte-sized values from the buffer. Returns false if there isn't
+  // enough data left for the specified type.
+  bool ReadUInt8(uint8_t* val);
+  bool ReadUInt16(uint16_t* val);
+  bool ReadUInt32(uint32_t* val);
+
+  // Reads bit-sized values from the buffer. Returns false if there isn't enough
+  // data left for the specified bit count..
+  bool ReadBits(uint32_t* val, size_t bit_count);
+
+  // Peeks bit-sized values from the buffer. Returns false if there isn't enough
+  // data left for the specified number of bits. Doesn't move the current
+  // offset.
+  bool PeekBits(uint32_t* val, size_t bit_count);
+
+  // Reads the exponential golomb encoded value at the current offset.
+  // Exponential golomb values are encoded as:
+  // 1) x = source val + 1
+  // 2) In binary, write [countbits(x) - 1] 0s, then x
+  // To decode, we count the number of leading 0 bits, read that many + 1 bits,
+  // and increment the result by 1.
+  // Returns false if there isn't enough data left for the specified type, or if
+  // the value wouldn't fit in a uint32_t.
+  bool ReadExponentialGolomb(uint32_t* val);
+  // Reads signed exponential golomb values at the current offset. Signed
+  // exponential golomb values are just the unsigned values mapped to the
+  // sequence 0, 1, -1, 2, -2, etc. in order.
+  bool ReadSignedExponentialGolomb(int32_t* val);
+
+  // Moves current position |byte_count| bytes forward. Returns false if
+  // there aren't enough bytes left in the buffer.
+  bool ConsumeBytes(size_t byte_count);
+  // Moves current position |bit_count| bits forward. Returns false if
+  // there aren't enough bits left in the buffer.
+  bool ConsumeBits(size_t bit_count);
+
+  // Sets the current offset to the provied byte/bit offsets. The bit
+  // offset is from the given byte, in the range [0,7].
+  bool Seek(size_t byte_offset, size_t bit_offset);
+
+ protected:
+  const uint8_t* const bytes_;
+  // The total size of |bytes_|.
+  size_t byte_count_;
+  // The current offset, in bytes, from the start of |bytes_|.
+  size_t byte_offset_;
+  // The current offset, in bits, into the current byte.
+  size_t bit_offset_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BitBuffer);
+};
+
+// A BitBuffer API for write operations. Supports symmetric write APIs to the
+// reading APIs of BitBuffer. Note that the read/write offset is shared with the
+// BitBuffer API, so both reading and writing will consume bytes/bits.
+class BitBufferWriter : public BitBuffer {
+ public:
+  // Constructs a bit buffer for the writable buffer of |bytes|.
+  BitBufferWriter(uint8_t* bytes, size_t byte_count);
+
+  // Writes byte-sized values from the buffer. Returns false if there isn't
+  // enough data left for the specified type.
+  bool WriteUInt8(uint8_t val);
+  bool WriteUInt16(uint16_t val);
+  bool WriteUInt32(uint32_t val);
+
+  // Writes bit-sized values to the buffer. Returns false if there isn't enough
+  // room left for the specified number of bits.
+  bool WriteBits(uint64_t val, size_t bit_count);
+
+  // Writes the exponential golomb encoded version of the supplied value.
+  // Returns false if there isn't enough room left for the value.
+  bool WriteExponentialGolomb(uint32_t val);
+  // Writes the signed exponential golomb version of the supplied value.
+  // Signed exponential golomb values are just the unsigned values mapped to the
+  // sequence 0, 1, -1, 2, -2, etc. in order.
+  bool WriteSignedExponentialGolomb(int32_t val);
+
+ private:
+  // The buffer, as a writable array.
+  uint8_t* const writable_bytes_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BitBufferWriter);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_BITBUFFER_H_
diff --git a/rtc_base/bitbuffer_unittest.cc b/rtc_base/bitbuffer_unittest.cc
new file mode 100644
index 0000000..abf7232
--- /dev/null
+++ b/rtc_base/bitbuffer_unittest.cc
@@ -0,0 +1,329 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bitbuffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/bytebuffer.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+TEST(BitBufferTest, ConsumeBits) {
+  const uint8_t bytes[64] = {0};
+  BitBuffer buffer(bytes, 32);
+  uint64_t total_bits = 32 * 8;
+  EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+  EXPECT_TRUE(buffer.ConsumeBits(3));
+  total_bits -= 3;
+  EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+  EXPECT_TRUE(buffer.ConsumeBits(3));
+  total_bits -= 3;
+  EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+  EXPECT_TRUE(buffer.ConsumeBits(15));
+  total_bits -= 15;
+  EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+  EXPECT_TRUE(buffer.ConsumeBits(37));
+  total_bits -= 37;
+  EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+
+  EXPECT_FALSE(buffer.ConsumeBits(32 * 8));
+  EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+}
+
+TEST(BitBufferTest, ReadBytesAligned) {
+  const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23, 0x45, 0x67, 0x89};
+  uint8_t val8;
+  uint16_t val16;
+  uint32_t val32;
+  BitBuffer buffer(bytes, 8);
+  EXPECT_TRUE(buffer.ReadUInt8(&val8));
+  EXPECT_EQ(0x0Au, val8);
+  EXPECT_TRUE(buffer.ReadUInt8(&val8));
+  EXPECT_EQ(0xBCu, val8);
+  EXPECT_TRUE(buffer.ReadUInt16(&val16));
+  EXPECT_EQ(0xDEF1u, val16);
+  EXPECT_TRUE(buffer.ReadUInt32(&val32));
+  EXPECT_EQ(0x23456789u, val32);
+}
+
+TEST(BitBufferTest, ReadBytesOffset4) {
+  const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23,
+                           0x45, 0x67, 0x89, 0x0A};
+  uint8_t val8;
+  uint16_t val16;
+  uint32_t val32;
+  BitBuffer buffer(bytes, 9);
+  EXPECT_TRUE(buffer.ConsumeBits(4));
+
+  EXPECT_TRUE(buffer.ReadUInt8(&val8));
+  EXPECT_EQ(0xABu, val8);
+  EXPECT_TRUE(buffer.ReadUInt8(&val8));
+  EXPECT_EQ(0xCDu, val8);
+  EXPECT_TRUE(buffer.ReadUInt16(&val16));
+  EXPECT_EQ(0xEF12u, val16);
+  EXPECT_TRUE(buffer.ReadUInt32(&val32));
+  EXPECT_EQ(0x34567890u, val32);
+}
+
+TEST(BitBufferTest, ReadBytesOffset3) {
+  // The pattern we'll check against is counting down from 0b1111. It looks
+  // weird here because it's all offset by 3.
+  // Byte pattern is:
+  //    56701234
+  //  0b00011111,
+  //  0b11011011,
+  //  0b10010111,
+  //  0b01010011,
+  //  0b00001110,
+  //  0b11001010,
+  //  0b10000110,
+  //  0b01000010
+  //       xxxxx <-- last 5 bits unused.
+
+  // The bytes. It almost looks like counting down by two at a time, except the
+  // jump at 5->3->0, since that's when the high bit is turned off.
+  const uint8_t bytes[] = {0x1F, 0xDB, 0x97, 0x53, 0x0E, 0xCA, 0x86, 0x42};
+
+  uint8_t val8;
+  uint16_t val16;
+  uint32_t val32;
+  BitBuffer buffer(bytes, 8);
+  EXPECT_TRUE(buffer.ConsumeBits(3));
+  EXPECT_TRUE(buffer.ReadUInt8(&val8));
+  EXPECT_EQ(0xFEu, val8);
+  EXPECT_TRUE(buffer.ReadUInt16(&val16));
+  EXPECT_EQ(0xDCBAu, val16);
+  EXPECT_TRUE(buffer.ReadUInt32(&val32));
+  EXPECT_EQ(0x98765432u, val32);
+  // 5 bits left unread. Not enough to read a uint8_t.
+  EXPECT_EQ(5u, buffer.RemainingBitCount());
+  EXPECT_FALSE(buffer.ReadUInt8(&val8));
+}
+
+TEST(BitBufferTest, ReadBits) {
+  // Bit values are:
+  //  0b01001101,
+  //  0b00110010
+  const uint8_t bytes[] = {0x4D, 0x32};
+  uint32_t val;
+  BitBuffer buffer(bytes, 2);
+  EXPECT_TRUE(buffer.ReadBits(&val, 3));
+  // 0b010
+  EXPECT_EQ(0x2u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 2));
+  // 0b01
+  EXPECT_EQ(0x1u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 7));
+  // 0b1010011
+  EXPECT_EQ(0x53u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 2));
+  // 0b00
+  EXPECT_EQ(0x0u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 1));
+  // 0b1
+  EXPECT_EQ(0x1u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 1));
+  // 0b0
+  EXPECT_EQ(0x0u, val);
+
+  EXPECT_FALSE(buffer.ReadBits(&val, 1));
+}
+
+TEST(BitBufferTest, SetOffsetValues) {
+  uint8_t bytes[4] = {0};
+  BitBufferWriter buffer(bytes, 4);
+
+  size_t byte_offset, bit_offset;
+  // Bit offsets are [0,7].
+  EXPECT_TRUE(buffer.Seek(0, 0));
+  EXPECT_TRUE(buffer.Seek(0, 7));
+  buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+  EXPECT_EQ(0u, byte_offset);
+  EXPECT_EQ(7u, bit_offset);
+  EXPECT_FALSE(buffer.Seek(0, 8));
+  buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+  EXPECT_EQ(0u, byte_offset);
+  EXPECT_EQ(7u, bit_offset);
+  // Byte offsets are [0,length]. At byte offset length, the bit offset must be
+  // 0.
+  EXPECT_TRUE(buffer.Seek(0, 0));
+  EXPECT_TRUE(buffer.Seek(2, 4));
+  buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+  EXPECT_EQ(2u, byte_offset);
+  EXPECT_EQ(4u, bit_offset);
+  EXPECT_TRUE(buffer.Seek(4, 0));
+  EXPECT_FALSE(buffer.Seek(5, 0));
+  buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+  EXPECT_EQ(4u, byte_offset);
+  EXPECT_EQ(0u, bit_offset);
+  EXPECT_FALSE(buffer.Seek(4, 1));
+
+  // Disable death test on Android because it relies on fork() and doesn't play
+  // nicely.
+#if GTEST_HAS_DEATH_TEST
+#if !defined(WEBRTC_ANDROID)
+  // Passing a null out parameter is death.
+  EXPECT_DEATH(buffer.GetCurrentOffset(&byte_offset, nullptr), "");
+#endif
+#endif
+}
+
+uint64_t GolombEncoded(uint32_t val) {
+  val++;
+  uint32_t bit_counter = val;
+  uint64_t bit_count = 0;
+  while (bit_counter > 0) {
+    bit_count++;
+    bit_counter >>= 1;
+  }
+  return static_cast<uint64_t>(val) << (64 - (bit_count * 2 - 1));
+}
+
+TEST(BitBufferTest, GolombUint32Values) {
+  ByteBufferWriter byteBuffer;
+  byteBuffer.Resize(16);
+  BitBuffer buffer(reinterpret_cast<const uint8_t*>(byteBuffer.Data()),
+                   byteBuffer.Capacity());
+  // Test over the uint32_t range with a large enough step that the test doesn't
+  // take forever. Around 20,000 iterations should do.
+  const int kStep = std::numeric_limits<uint32_t>::max() / 20000;
+  for (uint32_t i = 0; i < std::numeric_limits<uint32_t>::max() - kStep;
+       i += kStep) {
+    uint64_t encoded_val = GolombEncoded(i);
+    byteBuffer.Clear();
+    byteBuffer.WriteUInt64(encoded_val);
+    uint32_t decoded_val;
+    EXPECT_TRUE(buffer.Seek(0, 0));
+    EXPECT_TRUE(buffer.ReadExponentialGolomb(&decoded_val));
+    EXPECT_EQ(i, decoded_val);
+  }
+}
+
+TEST(BitBufferTest, SignedGolombValues) {
+  uint8_t golomb_bits[] = {
+      0x80,  // 1
+      0x40,  // 010
+      0x60,  // 011
+      0x20,  // 00100
+      0x38,  // 00111
+  };
+  int32_t expected[] = {0, 1, -1, 2, -3};
+  for (size_t i = 0; i < sizeof(golomb_bits); ++i) {
+    BitBuffer buffer(&golomb_bits[i], 1);
+    int32_t decoded_val;
+    ASSERT_TRUE(buffer.ReadSignedExponentialGolomb(&decoded_val));
+    EXPECT_EQ(expected[i], decoded_val)
+        << "Mismatch in expected/decoded value for golomb_bits[" << i
+        << "]: " << static_cast<int>(golomb_bits[i]);
+  }
+}
+
+TEST(BitBufferTest, NoGolombOverread) {
+  const uint8_t bytes[] = {0x00, 0xFF, 0xFF};
+  // Make sure the bit buffer correctly enforces byte length on golomb reads.
+  // If it didn't, the above buffer would be valid at 3 bytes.
+  BitBuffer buffer(bytes, 1);
+  uint32_t decoded_val;
+  EXPECT_FALSE(buffer.ReadExponentialGolomb(&decoded_val));
+
+  BitBuffer longer_buffer(bytes, 2);
+  EXPECT_FALSE(longer_buffer.ReadExponentialGolomb(&decoded_val));
+
+  BitBuffer longest_buffer(bytes, 3);
+  EXPECT_TRUE(longest_buffer.ReadExponentialGolomb(&decoded_val));
+  // Golomb should have read 9 bits, so 0x01FF, and since it is golomb, the
+  // result is 0x01FF - 1 = 0x01FE.
+  EXPECT_EQ(0x01FEu, decoded_val);
+}
+
+TEST(BitBufferWriterTest, SymmetricReadWrite) {
+  uint8_t bytes[16] = {0};
+  BitBufferWriter buffer(bytes, 4);
+
+  // Write some bit data at various sizes.
+  EXPECT_TRUE(buffer.WriteBits(0x2u, 3));
+  EXPECT_TRUE(buffer.WriteBits(0x1u, 2));
+  EXPECT_TRUE(buffer.WriteBits(0x53u, 7));
+  EXPECT_TRUE(buffer.WriteBits(0x0u, 2));
+  EXPECT_TRUE(buffer.WriteBits(0x1u, 1));
+  EXPECT_TRUE(buffer.WriteBits(0x1ABCDu, 17));
+  // That should be all that fits in the buffer.
+  EXPECT_FALSE(buffer.WriteBits(1, 1));
+
+  EXPECT_TRUE(buffer.Seek(0, 0));
+  uint32_t val;
+  EXPECT_TRUE(buffer.ReadBits(&val, 3));
+  EXPECT_EQ(0x2u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 2));
+  EXPECT_EQ(0x1u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 7));
+  EXPECT_EQ(0x53u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 2));
+  EXPECT_EQ(0x0u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 1));
+  EXPECT_EQ(0x1u, val);
+  EXPECT_TRUE(buffer.ReadBits(&val, 17));
+  EXPECT_EQ(0x1ABCDu, val);
+  // And there should be nothing left.
+  EXPECT_FALSE(buffer.ReadBits(&val, 1));
+}
+
+TEST(BitBufferWriterTest, SymmetricBytesMisaligned) {
+  uint8_t bytes[16] = {0};
+  BitBufferWriter buffer(bytes, 16);
+
+  // Offset 3, to get things misaligned.
+  EXPECT_TRUE(buffer.ConsumeBits(3));
+  EXPECT_TRUE(buffer.WriteUInt8(0x12u));
+  EXPECT_TRUE(buffer.WriteUInt16(0x3456u));
+  EXPECT_TRUE(buffer.WriteUInt32(0x789ABCDEu));
+
+  buffer.Seek(0, 3);
+  uint8_t val8;
+  uint16_t val16;
+  uint32_t val32;
+  EXPECT_TRUE(buffer.ReadUInt8(&val8));
+  EXPECT_EQ(0x12u, val8);
+  EXPECT_TRUE(buffer.ReadUInt16(&val16));
+  EXPECT_EQ(0x3456u, val16);
+  EXPECT_TRUE(buffer.ReadUInt32(&val32));
+  EXPECT_EQ(0x789ABCDEu, val32);
+}
+
+TEST(BitBufferWriterTest, SymmetricGolomb) {
+  char test_string[] = "my precious";
+  uint8_t bytes[64] = {0};
+  BitBufferWriter buffer(bytes, 64);
+  for (size_t i = 0; i < arraysize(test_string); ++i) {
+    EXPECT_TRUE(buffer.WriteExponentialGolomb(test_string[i]));
+  }
+  buffer.Seek(0, 0);
+  for (size_t i = 0; i < arraysize(test_string); ++i) {
+    uint32_t val;
+    EXPECT_TRUE(buffer.ReadExponentialGolomb(&val));
+    EXPECT_LE(val, std::numeric_limits<uint8_t>::max());
+    EXPECT_EQ(test_string[i], static_cast<char>(val));
+  }
+}
+
+TEST(BitBufferWriterTest, WriteClearsBits) {
+  uint8_t bytes[] = {0xFF, 0xFF};
+  BitBufferWriter buffer(bytes, 2);
+  EXPECT_TRUE(buffer.ConsumeBits(3));
+  EXPECT_TRUE(buffer.WriteBits(0, 1));
+  EXPECT_EQ(0xEFu, bytes[0]);
+  EXPECT_TRUE(buffer.WriteBits(0, 3));
+  EXPECT_EQ(0xE1u, bytes[0]);
+  EXPECT_TRUE(buffer.WriteBits(0, 2));
+  EXPECT_EQ(0xE0u, bytes[0]);
+  EXPECT_EQ(0x7F, bytes[1]);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bitrateallocationstrategy.cc b/rtc_base/bitrateallocationstrategy.cc
new file mode 100644
index 0000000..d2a06cd
--- /dev/null
+++ b/rtc_base/bitrateallocationstrategy.cc
@@ -0,0 +1,152 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bitrateallocationstrategy.h"
+#include <algorithm>
+#include <utility>
+
+namespace rtc {
+
+// The purpose of this is to allow video streams to use extra bandwidth for FEC.
+// TODO(bugs.webrtc.org/8541): May be worth to refactor to keep this logic in
+// video send stream. Similar logic is implemented in BitrateAllocator.
+
+const int kTransmissionMaxBitrateMultiplier = 2;
+
+std::vector<uint32_t> BitrateAllocationStrategy::SetAllBitratesToMinimum(
+    const ArrayView<const TrackConfig*> track_configs) {
+  std::vector<uint32_t> track_allocations;
+  for (const auto* track_config : track_configs) {
+    track_allocations.push_back(track_config->min_bitrate_bps);
+  }
+  return track_allocations;
+}
+
+std::vector<uint32_t> BitrateAllocationStrategy::DistributeBitratesEvenly(
+    const ArrayView<const TrackConfig*> track_configs,
+    uint32_t available_bitrate) {
+  std::vector<uint32_t> track_allocations =
+      SetAllBitratesToMinimum(track_configs);
+  uint32_t sum_min_bitrates = 0;
+  uint32_t sum_max_bitrates = 0;
+  for (const auto* track_config : track_configs) {
+    sum_min_bitrates += track_config->min_bitrate_bps;
+    sum_max_bitrates += track_config->max_bitrate_bps;
+  }
+  if (sum_min_bitrates >= available_bitrate) {
+    return track_allocations;
+  } else if (available_bitrate >= sum_max_bitrates) {
+    auto track_allocations_it = track_allocations.begin();
+    for (const auto* track_config : track_configs) {
+      *track_allocations_it++ = track_config->max_bitrate_bps;
+    }
+    return track_allocations;
+  } else {
+    // If sum_min_bitrates < available_bitrate < sum_max_bitrates allocate
+    // bitrates evenly up to max_bitrate_bps starting from the track with the
+    // lowest max_bitrate_bps. Remainder of available bitrate split evenly among
+    // remaining tracks.
+    std::multimap<uint32_t, size_t> max_bitrate_sorted_configs;
+    for (const TrackConfig** track_configs_it = track_configs.begin();
+         track_configs_it != track_configs.end(); ++track_configs_it) {
+      max_bitrate_sorted_configs.insert(
+          std::make_pair((*track_configs_it)->max_bitrate_bps,
+                         track_configs_it - track_configs.begin()));
+    }
+    uint32_t total_available_increase = available_bitrate - sum_min_bitrates;
+    int processed_configs = 0;
+    for (const auto& track_config_pair : max_bitrate_sorted_configs) {
+      uint32_t available_increase =
+          total_available_increase /
+          (static_cast<uint32_t>(track_configs.size() - processed_configs));
+      uint32_t consumed_increase =
+          std::min(track_configs[track_config_pair.second]->max_bitrate_bps -
+                       track_configs[track_config_pair.second]->min_bitrate_bps,
+                   available_increase);
+      track_allocations[track_config_pair.second] += consumed_increase;
+      total_available_increase -= consumed_increase;
+      ++processed_configs;
+    }
+    return track_allocations;
+  }
+}
+
+AudioPriorityBitrateAllocationStrategy::AudioPriorityBitrateAllocationStrategy(
+    std::string audio_track_id,
+    uint32_t sufficient_audio_bitrate)
+    : audio_track_id_(audio_track_id),
+      sufficient_audio_bitrate_(sufficient_audio_bitrate) {}
+
+std::vector<uint32_t> AudioPriorityBitrateAllocationStrategy::AllocateBitrates(
+    uint32_t available_bitrate,
+    const ArrayView<const TrackConfig*> track_configs) {
+  const TrackConfig* audio_track_config = NULL;
+  size_t audio_config_index = 0;
+  uint32_t sum_min_bitrates = 0;
+  uint32_t sum_max_bitrates = 0;
+
+  for (const auto*& track_config : track_configs) {
+    sum_min_bitrates += track_config->min_bitrate_bps;
+    sum_max_bitrates += track_config->max_bitrate_bps;
+    if (track_config->track_id == audio_track_id_) {
+      audio_track_config = track_config;
+      audio_config_index = &track_config - &track_configs[0];
+    }
+  }
+  if (sum_max_bitrates < available_bitrate) {
+    // Allow non audio streams to go above max upto
+    // kTransmissionMaxBitrateMultiplier * max_bitrate_bps
+    size_t track_configs_size = track_configs.size();
+    std::vector<TrackConfig> increased_track_configs(track_configs_size);
+    std::vector<const TrackConfig*> increased_track_configs_ptr(
+        track_configs_size);
+    for (unsigned long i = 0; i < track_configs_size; i++) {
+      increased_track_configs[i] = (*track_configs[i]);
+      increased_track_configs_ptr[i] = &increased_track_configs[i];
+      if (track_configs[i]->track_id != audio_track_id_) {
+        increased_track_configs[i].max_bitrate_bps =
+            track_configs[i]->max_bitrate_bps *
+            kTransmissionMaxBitrateMultiplier;
+      }
+    }
+    return DistributeBitratesEvenly(increased_track_configs_ptr,
+                                    available_bitrate);
+  }
+  if (audio_track_config == nullptr) {
+    return DistributeBitratesEvenly(track_configs, available_bitrate);
+  }
+  auto safe_sufficient_audio_bitrate = std::min(
+      std::max(audio_track_config->min_bitrate_bps, sufficient_audio_bitrate_),
+      audio_track_config->max_bitrate_bps);
+  if (available_bitrate <= sum_min_bitrates) {
+    return SetAllBitratesToMinimum(track_configs);
+  } else {
+    if (available_bitrate <= sum_min_bitrates + safe_sufficient_audio_bitrate -
+                                 audio_track_config->min_bitrate_bps) {
+      std::vector<uint32_t> track_allocations =
+          SetAllBitratesToMinimum(track_configs);
+      track_allocations[audio_config_index] +=
+          available_bitrate - sum_min_bitrates;
+      return track_allocations;
+    } else {
+      // Setting audio track minimum to safe_sufficient_audio_bitrate will
+      // allow using DistributeBitratesEvenly to allocate at least sufficient
+      // bitrate for audio and the rest evenly.
+      TrackConfig sufficient_track_config(*track_configs[audio_config_index]);
+      sufficient_track_config.min_bitrate_bps = safe_sufficient_audio_bitrate;
+      track_configs[audio_config_index] = &sufficient_track_config;
+      std::vector<uint32_t> track_allocations =
+          DistributeBitratesEvenly(track_configs, available_bitrate);
+      return track_allocations;
+    }
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bitrateallocationstrategy.h b/rtc_base/bitrateallocationstrategy.h
new file mode 100644
index 0000000..f711d1f
--- /dev/null
+++ b/rtc_base/bitrateallocationstrategy.h
@@ -0,0 +1,101 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BITRATEALLOCATIONSTRATEGY_H_
+#define RTC_BASE_BITRATEALLOCATIONSTRATEGY_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+// Pluggable strategy allows configuration of bitrate allocation per media
+// track.
+//
+// The strategy should provide allocation for every track passed with
+// track_configs in AllocateBitrates. The allocations are constrained by
+// max_bitrate_bps, min_bitrate_bps defining the track supported range and
+// enforce_min_bitrate indicating if the track my be paused by allocating 0
+// bitrate.
+class BitrateAllocationStrategy {
+ public:
+  struct TrackConfig {
+    TrackConfig(uint32_t min_bitrate_bps,
+                uint32_t max_bitrate_bps,
+                bool enforce_min_bitrate,
+                std::string track_id)
+        : min_bitrate_bps(min_bitrate_bps),
+          max_bitrate_bps(max_bitrate_bps),
+          enforce_min_bitrate(enforce_min_bitrate),
+          track_id(track_id) {}
+    TrackConfig(const TrackConfig& track_config) = default;
+    virtual ~TrackConfig() = default;
+    TrackConfig() {}
+
+    // Minimum bitrate supported by track.
+    uint32_t min_bitrate_bps;
+
+    // Maximum bitrate supported by track.
+    uint32_t max_bitrate_bps;
+
+    // True means track may not be paused by allocating 0 bitrate.
+    bool enforce_min_bitrate;
+
+    // MediaStreamTrack ID as defined by application. May be empty.
+    std::string track_id;
+  };
+
+  static std::vector<uint32_t> SetAllBitratesToMinimum(
+      const ArrayView<const TrackConfig*> track_configs);
+  static std::vector<uint32_t> DistributeBitratesEvenly(
+      const ArrayView<const TrackConfig*> track_configs,
+      uint32_t available_bitrate);
+
+  // Strategy is expected to allocate all available_bitrate up to the sum of
+  // max_bitrate_bps of all tracks. If available_bitrate is less than the sum of
+  // min_bitrate_bps of all tracks, tracks having enforce_min_bitrate set to
+  // false may get 0 allocation and are suppoused to pause, tracks with
+  // enforce_min_bitrate set to true are expecting to get min_bitrate_bps.
+  //
+  // If the strategy will allocate more than available_bitrate it may cause
+  // overuse of the currently available network capacity and may cause increase
+  // in RTT and packet loss. Allocating less than available bitrate may cause
+  // available_bitrate decrease.
+  virtual std::vector<uint32_t> AllocateBitrates(
+      uint32_t available_bitrate,
+      const ArrayView<const TrackConfig*> track_configs) = 0;
+
+  virtual ~BitrateAllocationStrategy() = default;
+};
+
+// Simple allocation strategy giving priority to audio until
+// sufficient_audio_bitrate is reached. Bitrate is distributed evenly between
+// the tracks after sufficient_audio_bitrate is reached. This implementation
+// does not pause tracks even if enforce_min_bitrate is false.
+class AudioPriorityBitrateAllocationStrategy
+    : public BitrateAllocationStrategy {
+ public:
+  AudioPriorityBitrateAllocationStrategy(std::string audio_track_id,
+                                         uint32_t sufficient_audio_bitrate);
+  std::vector<uint32_t> AllocateBitrates(
+      uint32_t available_bitrate,
+      const ArrayView<const TrackConfig*> track_configs) override;
+
+ private:
+  std::string audio_track_id_;
+  uint32_t sufficient_audio_bitrate_;
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_BITRATEALLOCATIONSTRATEGY_H_
diff --git a/rtc_base/bitrateallocationstrategy_unittest.cc b/rtc_base/bitrateallocationstrategy_unittest.cc
new file mode 100644
index 0000000..bfc41f5
--- /dev/null
+++ b/rtc_base/bitrateallocationstrategy_unittest.cc
@@ -0,0 +1,244 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bitrateallocationstrategy.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+std::vector<const rtc::BitrateAllocationStrategy::TrackConfig*>
+MakeTrackConfigPtrsVector(
+    const std::vector<BitrateAllocationStrategy::TrackConfig>& track_configs) {
+  std::vector<const rtc::BitrateAllocationStrategy::TrackConfig*>
+      track_config_ptrs(track_configs.size());
+  int i = 0;
+  for (const auto& c : track_configs) {
+    track_config_ptrs[i++] = &c;
+  }
+  return track_config_ptrs;
+}
+
+TEST(BitrateAllocationStrategyTest, SetAllBitratesToMinimum) {
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 6000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+
+  std::vector<BitrateAllocationStrategy::TrackConfig> track_configs = {
+      BitrateAllocationStrategy::TrackConfig(
+          min_audio_bitrate, max_audio_bitrate, false, audio_track_id),
+      BitrateAllocationStrategy::TrackConfig(
+          min_video_bitrate, max_video_bitrate, false, video_track_id),
+      BitrateAllocationStrategy::TrackConfig(min_other_bitrate,
+                                             max_other_bitrate, false, "")};
+
+  std::vector<const rtc::BitrateAllocationStrategy::TrackConfig*>
+      track_config_ptrs = MakeTrackConfigPtrsVector(track_configs);
+
+  std::vector<uint32_t> allocations =
+      BitrateAllocationStrategy::SetAllBitratesToMinimum(track_config_ptrs);
+  EXPECT_EQ(min_audio_bitrate, allocations[0]);
+  EXPECT_EQ(min_video_bitrate, allocations[1]);
+  EXPECT_EQ(min_other_bitrate, allocations[2]);
+}
+
+TEST(BitrateAllocationStrategyTest, DistributeBitratesEvenly) {
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 16000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+  constexpr uint32_t available_bitrate = 52000;
+  constexpr uint32_t even_bitrate_increase =
+      (available_bitrate - min_audio_bitrate - min_video_bitrate -
+       min_other_bitrate) /
+      3;
+
+  std::vector<BitrateAllocationStrategy::TrackConfig> track_configs = {
+      BitrateAllocationStrategy::TrackConfig(
+          min_audio_bitrate, max_audio_bitrate, false, audio_track_id),
+      BitrateAllocationStrategy::TrackConfig(
+          min_video_bitrate, max_video_bitrate, false, video_track_id),
+      BitrateAllocationStrategy::TrackConfig(min_other_bitrate,
+                                             max_other_bitrate, false, "")};
+
+  std::vector<const rtc::BitrateAllocationStrategy::TrackConfig*>
+      track_config_ptrs = MakeTrackConfigPtrsVector(track_configs);
+
+  std::vector<uint32_t> allocations =
+      BitrateAllocationStrategy::DistributeBitratesEvenly(track_config_ptrs,
+                                                          available_bitrate);
+  EXPECT_EQ(min_audio_bitrate + even_bitrate_increase, allocations[0]);
+  EXPECT_EQ(min_video_bitrate + even_bitrate_increase, allocations[1]);
+  EXPECT_EQ(min_other_bitrate + even_bitrate_increase, allocations[2]);
+}
+
+std::vector<uint32_t> RunAudioPriorityAllocation(
+    uint32_t sufficient_audio_bitrate,
+    std::string audio_track_id,
+    uint32_t min_audio_bitrate,
+    uint32_t max_audio_bitrate,
+    std::string video_track_id,
+    uint32_t min_video_bitrate,
+    uint32_t max_video_bitrate,
+    uint32_t min_other_bitrate,
+    uint32_t max_other_bitrate,
+    uint32_t available_bitrate) {
+  AudioPriorityBitrateAllocationStrategy allocation_strategy(
+      audio_track_id, sufficient_audio_bitrate);
+  std::vector<BitrateAllocationStrategy::TrackConfig> track_configs = {
+      BitrateAllocationStrategy::TrackConfig(
+          min_audio_bitrate, max_audio_bitrate, false, audio_track_id),
+      BitrateAllocationStrategy::TrackConfig(
+          min_video_bitrate, max_video_bitrate, false, video_track_id),
+      BitrateAllocationStrategy::TrackConfig(min_other_bitrate,
+                                             max_other_bitrate, false, "")};
+
+  std::vector<const rtc::BitrateAllocationStrategy::TrackConfig*>
+      track_config_ptrs = MakeTrackConfigPtrsVector(track_configs);
+
+  return allocation_strategy.AllocateBitrates(available_bitrate,
+                                              track_config_ptrs);
+}
+
+// Test that when the available bitrate is less than the sum of the minimum
+// bitrates, the minimum bitrate is allocated for each track.
+TEST(AudioPriorityBitrateAllocationStrategyTest, MinAllocateBitrate) {
+  constexpr uint32_t sufficient_audio_bitrate = 16000;
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 6000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+  constexpr uint32_t available_bitrate = 10000;
+
+  std::vector<uint32_t> allocations = RunAudioPriorityAllocation(
+      sufficient_audio_bitrate, audio_track_id, min_audio_bitrate,
+      max_audio_bitrate, video_track_id, min_video_bitrate, max_video_bitrate,
+      min_other_bitrate, max_other_bitrate, available_bitrate);
+  EXPECT_EQ(min_audio_bitrate, allocations[0]);
+  EXPECT_EQ(min_video_bitrate, allocations[1]);
+  EXPECT_EQ(min_other_bitrate, allocations[2]);
+}
+
+// Test that when the available bitrate is more than the sum of the max
+// bitrates, the max bitrate is allocated for each track.
+TEST(AudioPriorityBitrateAllocationStrategyTest, MaxAllocateBitrate) {
+  constexpr uint32_t sufficient_audio_bitrate = 16000;
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 6000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+  constexpr uint32_t available_bitrate = 400000;
+
+  std::vector<uint32_t> allocations = RunAudioPriorityAllocation(
+      sufficient_audio_bitrate, audio_track_id, min_audio_bitrate,
+      max_audio_bitrate, video_track_id, min_video_bitrate, max_video_bitrate,
+      min_other_bitrate, max_other_bitrate, available_bitrate);
+
+  // TODO(bugs.webrtc.org/8541): Until the bug is fixed not audio streams will
+  // get up to kTransmissionMaxBitrateMultiplier*max_bitrate
+  constexpr uint32_t video_bitrate =
+      (available_bitrate - max_audio_bitrate - max_other_bitrate * 2);
+  EXPECT_EQ(max_audio_bitrate, allocations[0]);
+  EXPECT_EQ(video_bitrate, allocations[1]);
+  EXPECT_EQ(max_other_bitrate * 2, allocations[2]);
+}
+
+// Test that audio track will get up to sufficient bitrate before video and
+// other bitrate will be allocated.
+TEST(AudioPriorityBitrateAllocationStrategyTest, AudioPriorityAllocateBitrate) {
+  constexpr uint32_t sufficient_audio_bitrate = 16000;
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 6000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+  constexpr uint32_t available_bitrate = 49000;
+
+  std::vector<uint32_t> allocations = RunAudioPriorityAllocation(
+      sufficient_audio_bitrate, audio_track_id, min_audio_bitrate,
+      max_audio_bitrate, video_track_id, min_video_bitrate, max_video_bitrate,
+      min_other_bitrate, max_other_bitrate, available_bitrate);
+  EXPECT_EQ(sufficient_audio_bitrate, allocations[0]);
+  EXPECT_EQ(min_video_bitrate, allocations[1]);
+  EXPECT_EQ(min_other_bitrate, allocations[2]);
+}
+
+// Test that bitrate will be allocated evenly after sufficient audio bitrate is
+// allocated.
+TEST(AudioPriorityBitrateAllocationStrategyTest, EvenAllocateBitrate) {
+  constexpr uint32_t sufficient_audio_bitrate = 16000;
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 6000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+  constexpr uint32_t available_bitrate = 52000;
+  constexpr uint32_t even_bitrate_increase =
+      (available_bitrate - sufficient_audio_bitrate - min_video_bitrate -
+       min_other_bitrate) /
+      3;
+
+  std::vector<uint32_t> allocations = RunAudioPriorityAllocation(
+      sufficient_audio_bitrate, audio_track_id, min_audio_bitrate,
+      max_audio_bitrate, video_track_id, min_video_bitrate, max_video_bitrate,
+      min_other_bitrate, max_other_bitrate, available_bitrate);
+  EXPECT_EQ(sufficient_audio_bitrate + even_bitrate_increase, allocations[0]);
+  EXPECT_EQ(min_video_bitrate + even_bitrate_increase, allocations[1]);
+  EXPECT_EQ(min_other_bitrate + even_bitrate_increase, allocations[2]);
+}
+
+// Test that bitrate will be allocated to video after audio and other max
+// allocation.
+TEST(AudioPriorityBitrateAllocationStrategyTest, VideoAllocateBitrate) {
+  constexpr uint32_t sufficient_audio_bitrate = 16000;
+  const std::string audio_track_id = "audio_track";
+  constexpr uint32_t min_audio_bitrate = 6000;
+  constexpr uint32_t max_audio_bitrate = 64000;
+  const std::string video_track_id = "video_track";
+  constexpr uint32_t min_video_bitrate = 30000;
+  constexpr uint32_t max_video_bitrate = 300000;
+  constexpr uint32_t min_other_bitrate = 3000;
+  constexpr uint32_t max_other_bitrate = 30000;
+  constexpr uint32_t available_bitrate = 200000;
+  constexpr uint32_t video_bitrate =
+      available_bitrate - max_audio_bitrate - max_other_bitrate;
+
+  std::vector<uint32_t> allocations = RunAudioPriorityAllocation(
+      sufficient_audio_bitrate, audio_track_id, min_audio_bitrate,
+      max_audio_bitrate, video_track_id, min_video_bitrate, max_video_bitrate,
+      min_other_bitrate, max_other_bitrate, available_bitrate);
+  EXPECT_EQ(max_audio_bitrate, allocations[0]);
+  EXPECT_EQ(video_bitrate, allocations[1]);
+  EXPECT_EQ(max_other_bitrate, allocations[2]);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/buffer.h b/rtc_base/buffer.h
new file mode 100644
index 0000000..64974d3
--- /dev/null
+++ b/rtc_base/buffer.h
@@ -0,0 +1,429 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BUFFER_H_
+#define RTC_BASE_BUFFER_H_
+
+#include <algorithm>
+#include <cstring>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/type_traits.h"
+#include "rtc_base/zero_memory.h"
+
+namespace rtc {
+
+namespace internal {
+
+// (Internal; please don't use outside this file.) Determines if elements of
+// type U are compatible with a BufferT<T>. For most types, we just ignore
+// top-level const and forbid top-level volatile and require T and U to be
+// otherwise equal, but all byte-sized integers (notably char, int8_t, and
+// uint8_t) are compatible with each other. (Note: We aim to get rid of this
+// behavior, and treat all types the same.)
+template <typename T, typename U>
+struct BufferCompat {
+  static constexpr bool value =
+      !std::is_volatile<U>::value &&
+      ((std::is_integral<T>::value && sizeof(T) == 1)
+           ? (std::is_integral<U>::value && sizeof(U) == 1)
+           : (std::is_same<T, typename std::remove_const<U>::type>::value));
+};
+
+}  // namespace internal
+
+// Basic buffer class, can be grown and shrunk dynamically.
+// Unlike std::string/vector, does not initialize data when increasing size.
+// If "ZeroOnFree" is true, any memory is explicitly cleared before releasing.
+// The type alias "ZeroOnFreeBuffer" below should be used instead of setting
+// "ZeroOnFree" in the template manually to "true".
+template <typename T, bool ZeroOnFree = false>
+class BufferT {
+  // We want T's destructor and default constructor to be trivial, i.e. perform
+  // no action, so that we don't have to touch the memory we allocate and
+  // deallocate. And we want T to be trivially copyable, so that we can copy T
+  // instances with std::memcpy. This is precisely the definition of a trivial
+  // type.
+  static_assert(std::is_trivial<T>::value, "T must be a trivial type.");
+
+  // This class relies heavily on being able to mutate its data.
+  static_assert(!std::is_const<T>::value, "T may not be const");
+
+ public:
+  using value_type = T;
+
+  // An empty BufferT.
+  BufferT() : size_(0), capacity_(0), data_(nullptr) {
+    RTC_DCHECK(IsConsistent());
+  }
+
+  // Disable copy construction and copy assignment, since copying a buffer is
+  // expensive enough that we want to force the user to be explicit about it.
+  BufferT(const BufferT&) = delete;
+  BufferT& operator=(const BufferT&) = delete;
+
+  BufferT(BufferT&& buf)
+      : size_(buf.size()),
+        capacity_(buf.capacity()),
+        data_(std::move(buf.data_)) {
+    RTC_DCHECK(IsConsistent());
+    buf.OnMovedFrom();
+  }
+
+  // Construct a buffer with the specified number of uninitialized elements.
+  explicit BufferT(size_t size) : BufferT(size, size) {}
+
+  BufferT(size_t size, size_t capacity)
+      : size_(size),
+        capacity_(std::max(size, capacity)),
+        data_(new T[capacity_]) {
+    RTC_DCHECK(IsConsistent());
+  }
+
+  // Construct a buffer and copy the specified number of elements into it.
+  template <typename U,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  BufferT(const U* data, size_t size) : BufferT(data, size, size) {}
+
+  template <typename U,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  BufferT(U* data, size_t size, size_t capacity) : BufferT(size, capacity) {
+    static_assert(sizeof(T) == sizeof(U), "");
+    std::memcpy(data_.get(), data, size * sizeof(U));
+  }
+
+  // Construct a buffer from the contents of an array.
+  template <typename U,
+            size_t N,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  BufferT(U (&array)[N]) : BufferT(array, N) {}
+
+  ~BufferT() { MaybeZeroCompleteBuffer(); }
+
+  // Get a pointer to the data. Just .data() will give you a (const) T*, but if
+  // T is a byte-sized integer, you may also use .data<U>() for any other
+  // byte-sized integer U.
+  template <typename U = T,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  const U* data() const {
+    RTC_DCHECK(IsConsistent());
+    return reinterpret_cast<U*>(data_.get());
+  }
+
+  template <typename U = T,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  U* data() {
+    RTC_DCHECK(IsConsistent());
+    return reinterpret_cast<U*>(data_.get());
+  }
+
+  bool empty() const {
+    RTC_DCHECK(IsConsistent());
+    return size_ == 0;
+  }
+
+  size_t size() const {
+    RTC_DCHECK(IsConsistent());
+    return size_;
+  }
+
+  size_t capacity() const {
+    RTC_DCHECK(IsConsistent());
+    return capacity_;
+  }
+
+  BufferT& operator=(BufferT&& buf) {
+    RTC_DCHECK(IsConsistent());
+    RTC_DCHECK(buf.IsConsistent());
+    size_ = buf.size_;
+    capacity_ = buf.capacity_;
+    data_ = std::move(buf.data_);
+    buf.OnMovedFrom();
+    return *this;
+  }
+
+  bool operator==(const BufferT& buf) const {
+    RTC_DCHECK(IsConsistent());
+    if (size_ != buf.size_) {
+      return false;
+    }
+    if (std::is_integral<T>::value) {
+      // Optimization.
+      return std::memcmp(data_.get(), buf.data_.get(), size_ * sizeof(T)) == 0;
+    }
+    for (size_t i = 0; i < size_; ++i) {
+      if (data_[i] != buf.data_[i]) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  bool operator!=(const BufferT& buf) const { return !(*this == buf); }
+
+  T& operator[](size_t index) {
+    RTC_DCHECK_LT(index, size_);
+    return data()[index];
+  }
+
+  T operator[](size_t index) const {
+    RTC_DCHECK_LT(index, size_);
+    return data()[index];
+  }
+
+  T* begin() { return data(); }
+  T* end() { return data() + size(); }
+  const T* begin() const { return data(); }
+  const T* end() const { return data() + size(); }
+  const T* cbegin() const { return data(); }
+  const T* cend() const { return data() + size(); }
+
+  // The SetData functions replace the contents of the buffer. They accept the
+  // same input types as the constructors.
+  template <typename U,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  void SetData(const U* data, size_t size) {
+    RTC_DCHECK(IsConsistent());
+    const size_t old_size = size_;
+    size_ = 0;
+    AppendData(data, size);
+    if (ZeroOnFree && size_ < old_size) {
+      ZeroTrailingData(old_size - size_);
+    }
+  }
+
+  template <typename U,
+            size_t N,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  void SetData(const U (&array)[N]) {
+    SetData(array, N);
+  }
+
+  template <typename W,
+            typename std::enable_if<
+                HasDataAndSize<const W, const T>::value>::type* = nullptr>
+  void SetData(const W& w) {
+    SetData(w.data(), w.size());
+  }
+
+  // Replaces the data in the buffer with at most |max_elements| of data, using
+  // the function |setter|, which should have the following signature:
+  //
+  //   size_t setter(ArrayView<U> view)
+  //
+  // |setter| is given an appropriately typed ArrayView of length exactly
+  // |max_elements| that describes the area where it should write the data; it
+  // should return the number of elements actually written. (If it doesn't fill
+  // the whole ArrayView, it should leave the unused space at the end.)
+  template <typename U = T,
+            typename F,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  size_t SetData(size_t max_elements, F&& setter) {
+    RTC_DCHECK(IsConsistent());
+    const size_t old_size = size_;
+    size_ = 0;
+    const size_t written = AppendData<U>(max_elements, std::forward<F>(setter));
+    if (ZeroOnFree && size_ < old_size) {
+      ZeroTrailingData(old_size - size_);
+    }
+    return written;
+  }
+
+  // The AppendData functions add data to the end of the buffer. They accept
+  // the same input types as the constructors.
+  template <typename U,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  void AppendData(const U* data, size_t size) {
+    RTC_DCHECK(IsConsistent());
+    const size_t new_size = size_ + size;
+    EnsureCapacityWithHeadroom(new_size, true);
+    static_assert(sizeof(T) == sizeof(U), "");
+    std::memcpy(data_.get() + size_, data, size * sizeof(U));
+    size_ = new_size;
+    RTC_DCHECK(IsConsistent());
+  }
+
+  template <typename U,
+            size_t N,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  void AppendData(const U (&array)[N]) {
+    AppendData(array, N);
+  }
+
+  template <typename W,
+            typename std::enable_if<
+                HasDataAndSize<const W, const T>::value>::type* = nullptr>
+  void AppendData(const W& w) {
+    AppendData(w.data(), w.size());
+  }
+
+  template <typename U,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  void AppendData(const U& item) {
+    AppendData(&item, 1);
+  }
+
+  // Appends at most |max_elements| to the end of the buffer, using the function
+  // |setter|, which should have the following signature:
+  //
+  //   size_t setter(ArrayView<U> view)
+  //
+  // |setter| is given an appropriately typed ArrayView of length exactly
+  // |max_elements| that describes the area where it should write the data; it
+  // should return the number of elements actually written. (If it doesn't fill
+  // the whole ArrayView, it should leave the unused space at the end.)
+  template <typename U = T,
+            typename F,
+            typename std::enable_if<
+                internal::BufferCompat<T, U>::value>::type* = nullptr>
+  size_t AppendData(size_t max_elements, F&& setter) {
+    RTC_DCHECK(IsConsistent());
+    const size_t old_size = size_;
+    SetSize(old_size + max_elements);
+    U* base_ptr = data<U>() + old_size;
+    size_t written_elements = setter(rtc::ArrayView<U>(base_ptr, max_elements));
+
+    RTC_CHECK_LE(written_elements, max_elements);
+    size_ = old_size + written_elements;
+    RTC_DCHECK(IsConsistent());
+    return written_elements;
+  }
+
+  // Sets the size of the buffer. If the new size is smaller than the old, the
+  // buffer contents will be kept but truncated; if the new size is greater,
+  // the existing contents will be kept and the new space will be
+  // uninitialized.
+  void SetSize(size_t size) {
+    const size_t old_size = size_;
+    EnsureCapacityWithHeadroom(size, true);
+    size_ = size;
+    if (ZeroOnFree && size_ < old_size) {
+      ZeroTrailingData(old_size - size_);
+    }
+  }
+
+  // Ensure that the buffer size can be increased to at least capacity without
+  // further reallocation. (Of course, this operation might need to reallocate
+  // the buffer.)
+  void EnsureCapacity(size_t capacity) {
+    // Don't allocate extra headroom, since the user is asking for a specific
+    // capacity.
+    EnsureCapacityWithHeadroom(capacity, false);
+  }
+
+  // Resets the buffer to zero size without altering capacity. Works even if the
+  // buffer has been moved from.
+  void Clear() {
+    MaybeZeroCompleteBuffer();
+    size_ = 0;
+    RTC_DCHECK(IsConsistent());
+  }
+
+  // Swaps two buffers. Also works for buffers that have been moved from.
+  friend void swap(BufferT& a, BufferT& b) {
+    using std::swap;
+    swap(a.size_, b.size_);
+    swap(a.capacity_, b.capacity_);
+    swap(a.data_, b.data_);
+  }
+
+ private:
+  void EnsureCapacityWithHeadroom(size_t capacity, bool extra_headroom) {
+    RTC_DCHECK(IsConsistent());
+    if (capacity <= capacity_)
+      return;
+
+    // If the caller asks for extra headroom, ensure that the new capacity is
+    // >= 1.5 times the old capacity. Any constant > 1 is sufficient to prevent
+    // quadratic behavior; as to why we pick 1.5 in particular, see
+    // https://github.com/facebook/folly/blob/master/folly/docs/FBVector.md and
+    // http://www.gahcep.com/cpp-internals-stl-vector-part-1/.
+    const size_t new_capacity =
+        extra_headroom ? std::max(capacity, capacity_ + capacity_ / 2)
+                       : capacity;
+
+    std::unique_ptr<T[]> new_data(new T[new_capacity]);
+    std::memcpy(new_data.get(), data_.get(), size_ * sizeof(T));
+    MaybeZeroCompleteBuffer();
+    data_ = std::move(new_data);
+    capacity_ = new_capacity;
+    RTC_DCHECK(IsConsistent());
+  }
+
+  // Zero the complete buffer if template argument "ZeroOnFree" is true.
+  void MaybeZeroCompleteBuffer() {
+    if (ZeroOnFree && capacity_) {
+      // It would be sufficient to only zero "size_" elements, as all other
+      // methods already ensure that the unused capacity contains no sensitive
+      // data - but better safe than sorry.
+      ExplicitZeroMemory(data_.get(), capacity_ * sizeof(T));
+    }
+  }
+
+  // Zero the first "count" elements of unused capacity.
+  void ZeroTrailingData(size_t count) {
+    RTC_DCHECK(IsConsistent());
+    RTC_DCHECK_LE(count, capacity_ - size_);
+    ExplicitZeroMemory(data_.get() + size_, count * sizeof(T));
+  }
+
+  // Precondition for all methods except Clear and the destructor.
+  // Postcondition for all methods except move construction and move
+  // assignment, which leave the moved-from object in a possibly inconsistent
+  // state.
+  bool IsConsistent() const {
+    return (data_ || capacity_ == 0) && capacity_ >= size_;
+  }
+
+  // Called when *this has been moved from. Conceptually it's a no-op, but we
+  // can mutate the state slightly to help subsequent sanity checks catch bugs.
+  void OnMovedFrom() {
+#if RTC_DCHECK_IS_ON
+    // Make *this consistent and empty. Shouldn't be necessary, but better safe
+    // than sorry.
+    size_ = 0;
+    capacity_ = 0;
+#else
+    // Ensure that *this is always inconsistent, to provoke bugs.
+    size_ = 1;
+    capacity_ = 0;
+#endif
+  }
+
+  size_t size_;
+  size_t capacity_;
+  std::unique_ptr<T[]> data_;
+};
+
+// By far the most common sort of buffer.
+using Buffer = BufferT<uint8_t>;
+
+// A buffer that zeros memory before releasing it.
+template <typename T>
+using ZeroOnFreeBuffer = BufferT<T, true>;
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_BUFFER_H_
diff --git a/rtc_base/buffer_unittest.cc b/rtc_base/buffer_unittest.cc
new file mode 100644
index 0000000..e3b7d46
--- /dev/null
+++ b/rtc_base/buffer_unittest.cc
@@ -0,0 +1,514 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/buffer.h"
+
+#include "api/array_view.h"
+#include "rtc_base/gunit.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace rtc {
+
+namespace {
+
+// clang-format off
+const uint8_t kTestData[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+                             0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+// clang-format on
+
+void TestBuf(const Buffer& b1, size_t size, size_t capacity) {
+  EXPECT_EQ(b1.size(), size);
+  EXPECT_EQ(b1.capacity(), capacity);
+}
+
+}  // namespace
+
+TEST(BufferTest, TestConstructEmpty) {
+  TestBuf(Buffer(), 0, 0);
+  TestBuf(Buffer(Buffer()), 0, 0);
+  TestBuf(Buffer(0), 0, 0);
+
+  // We can't use a literal 0 for the first argument, because C++ will allow
+  // that to be considered a null pointer, which makes the call ambiguous.
+  TestBuf(Buffer(0 + 0, 10), 0, 10);
+
+  TestBuf(Buffer(kTestData, 0), 0, 0);
+  TestBuf(Buffer(kTestData, 0, 20), 0, 20);
+}
+
+TEST(BufferTest, TestConstructData) {
+  Buffer buf(kTestData, 7);
+  EXPECT_EQ(buf.size(), 7u);
+  EXPECT_EQ(buf.capacity(), 7u);
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 7));
+}
+
+TEST(BufferTest, TestConstructDataWithCapacity) {
+  Buffer buf(kTestData, 7, 14);
+  EXPECT_EQ(buf.size(), 7u);
+  EXPECT_EQ(buf.capacity(), 14u);
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 7));
+}
+
+TEST(BufferTest, TestConstructArray) {
+  Buffer buf(kTestData);
+  EXPECT_EQ(buf.size(), 16u);
+  EXPECT_EQ(buf.capacity(), 16u);
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 16));
+}
+
+TEST(BufferTest, TestSetData) {
+  Buffer buf(kTestData + 4, 7);
+  buf.SetData(kTestData, 9);
+  EXPECT_EQ(buf.size(), 9u);
+  EXPECT_EQ(buf.capacity(), 7u * 3 / 2);
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 9));
+  Buffer buf2;
+  buf2.SetData(buf);
+  EXPECT_EQ(buf.size(), 9u);
+  EXPECT_EQ(buf.capacity(), 7u * 3 / 2);
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 9));
+}
+
+TEST(BufferTest, TestAppendData) {
+  Buffer buf(kTestData + 4, 3);
+  buf.AppendData(kTestData + 10, 2);
+  const int8_t exp[] = {0x4, 0x5, 0x6, 0xa, 0xb};
+  EXPECT_EQ(buf, Buffer(exp));
+  Buffer buf2;
+  buf2.AppendData(buf);
+  buf2.AppendData(rtc::ArrayView<uint8_t>(buf));
+  const int8_t exp2[] = {0x4, 0x5, 0x6, 0xa, 0xb, 0x4, 0x5, 0x6, 0xa, 0xb};
+  EXPECT_EQ(buf2, Buffer(exp2));
+}
+
+TEST(BufferTest, TestSetAndAppendWithUnknownArg) {
+  struct TestDataContainer {
+    size_t size() const { return 3; }
+    const uint8_t* data() const { return kTestData; }
+  };
+  Buffer buf;
+  buf.SetData(TestDataContainer());
+  EXPECT_EQ(3u, buf.size());
+  EXPECT_EQ(Buffer(kTestData, 3), buf);
+  buf.AppendData(TestDataContainer());
+  EXPECT_EQ(6u, buf.size());
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 3));
+  EXPECT_EQ(0, memcmp(buf.data() + 3, kTestData, 3));
+}
+
+TEST(BufferTest, TestSetSizeSmaller) {
+  Buffer buf;
+  buf.SetData(kTestData, 15);
+  buf.SetSize(10);
+  EXPECT_EQ(buf.size(), 10u);
+  EXPECT_EQ(buf.capacity(), 15u);  // Hasn't shrunk.
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(buf, Buffer(kTestData, 10));
+}
+
+TEST(BufferTest, TestSetSizeLarger) {
+  Buffer buf;
+  buf.SetData(kTestData, 15);
+  EXPECT_EQ(buf.size(), 15u);
+  EXPECT_EQ(buf.capacity(), 15u);
+  EXPECT_FALSE(buf.empty());
+  buf.SetSize(20);
+  EXPECT_EQ(buf.size(), 20u);
+  EXPECT_EQ(buf.capacity(), 15u * 3 / 2);  // Has grown.
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(0, memcmp(buf.data(), kTestData, 15));
+}
+
+TEST(BufferTest, TestEnsureCapacitySmaller) {
+  Buffer buf(kTestData);
+  const char* data = buf.data<char>();
+  buf.EnsureCapacity(4);
+  EXPECT_EQ(buf.capacity(), 16u);     // Hasn't shrunk.
+  EXPECT_EQ(buf.data<char>(), data);  // No reallocation.
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(buf, Buffer(kTestData));
+}
+
+TEST(BufferTest, TestEnsureCapacityLarger) {
+  Buffer buf(kTestData, 5);
+  buf.EnsureCapacity(10);
+  const int8_t* data = buf.data<int8_t>();
+  EXPECT_EQ(buf.capacity(), 10u);
+  buf.AppendData(kTestData + 5, 5);
+  EXPECT_EQ(buf.data<int8_t>(), data);  // No reallocation.
+  EXPECT_FALSE(buf.empty());
+  EXPECT_EQ(buf, Buffer(kTestData, 10));
+}
+
+TEST(BufferTest, TestMoveConstruct) {
+  Buffer buf1(kTestData, 3, 40);
+  const uint8_t* data = buf1.data();
+  Buffer buf2(std::move(buf1));
+  EXPECT_EQ(buf2.size(), 3u);
+  EXPECT_EQ(buf2.capacity(), 40u);
+  EXPECT_EQ(buf2.data(), data);
+  EXPECT_FALSE(buf2.empty());
+  buf1.Clear();
+  EXPECT_EQ(buf1.size(), 0u);
+  EXPECT_EQ(buf1.capacity(), 0u);
+  EXPECT_EQ(buf1.data(), nullptr);
+  EXPECT_TRUE(buf1.empty());
+}
+
+TEST(BufferTest, TestMoveAssign) {
+  Buffer buf1(kTestData, 3, 40);
+  const uint8_t* data = buf1.data();
+  Buffer buf2(kTestData);
+  buf2 = std::move(buf1);
+  EXPECT_EQ(buf2.size(), 3u);
+  EXPECT_EQ(buf2.capacity(), 40u);
+  EXPECT_EQ(buf2.data(), data);
+  EXPECT_FALSE(buf2.empty());
+  buf1.Clear();
+  EXPECT_EQ(buf1.size(), 0u);
+  EXPECT_EQ(buf1.capacity(), 0u);
+  EXPECT_EQ(buf1.data(), nullptr);
+  EXPECT_TRUE(buf1.empty());
+}
+
+TEST(BufferTest, TestSwap) {
+  Buffer buf1(kTestData, 3);
+  Buffer buf2(kTestData, 6, 40);
+  uint8_t* data1 = buf1.data();
+  uint8_t* data2 = buf2.data();
+  using std::swap;
+  swap(buf1, buf2);
+  EXPECT_EQ(buf1.size(), 6u);
+  EXPECT_EQ(buf1.capacity(), 40u);
+  EXPECT_EQ(buf1.data(), data2);
+  EXPECT_FALSE(buf1.empty());
+  EXPECT_EQ(buf2.size(), 3u);
+  EXPECT_EQ(buf2.capacity(), 3u);
+  EXPECT_EQ(buf2.data(), data1);
+  EXPECT_FALSE(buf2.empty());
+}
+
+TEST(BufferTest, TestClear) {
+  Buffer buf;
+  buf.SetData(kTestData, 15);
+  EXPECT_EQ(buf.size(), 15u);
+  EXPECT_EQ(buf.capacity(), 15u);
+  EXPECT_FALSE(buf.empty());
+  const char *data = buf.data<char>();
+  buf.Clear();
+  EXPECT_EQ(buf.size(), 0u);
+  EXPECT_EQ(buf.capacity(), 15u);  // Hasn't shrunk.
+  EXPECT_EQ(buf.data<char>(), data); // No reallocation.
+  EXPECT_TRUE(buf.empty());
+}
+
+TEST(BufferTest, TestLambdaSetAppend) {
+  auto setter = [] (rtc::ArrayView<uint8_t> av) {
+    for (int i = 0; i != 15; ++i)
+      av[i] = kTestData[i];
+    return 15;
+  };
+
+  Buffer buf1;
+  buf1.SetData(kTestData, 15);
+  buf1.AppendData(kTestData, 15);
+
+  Buffer buf2;
+  EXPECT_EQ(buf2.SetData(15, setter), 15u);
+  EXPECT_EQ(buf2.AppendData(15, setter), 15u);
+  EXPECT_EQ(buf1, buf2);
+  EXPECT_EQ(buf1.capacity(), buf2.capacity());
+  EXPECT_FALSE(buf1.empty());
+  EXPECT_FALSE(buf2.empty());
+}
+
+TEST(BufferTest, TestLambdaSetAppendSigned) {
+  auto setter = [] (rtc::ArrayView<int8_t> av) {
+    for (int i = 0; i != 15; ++i)
+      av[i] = kTestData[i];
+    return 15;
+  };
+
+  Buffer buf1;
+  buf1.SetData(kTestData, 15);
+  buf1.AppendData(kTestData, 15);
+
+  Buffer buf2;
+  EXPECT_EQ(buf2.SetData<int8_t>(15, setter), 15u);
+  EXPECT_EQ(buf2.AppendData<int8_t>(15, setter), 15u);
+  EXPECT_EQ(buf1, buf2);
+  EXPECT_EQ(buf1.capacity(), buf2.capacity());
+  EXPECT_FALSE(buf1.empty());
+  EXPECT_FALSE(buf2.empty());
+}
+
+TEST(BufferTest, TestLambdaAppendEmpty) {
+  auto setter = [] (rtc::ArrayView<uint8_t> av) {
+    for (int i = 0; i != 15; ++i)
+      av[i] = kTestData[i];
+    return 15;
+  };
+
+  Buffer buf1;
+  buf1.SetData(kTestData, 15);
+
+  Buffer buf2;
+  EXPECT_EQ(buf2.AppendData(15, setter), 15u);
+  EXPECT_EQ(buf1, buf2);
+  EXPECT_EQ(buf1.capacity(), buf2.capacity());
+  EXPECT_FALSE(buf1.empty());
+  EXPECT_FALSE(buf2.empty());
+}
+
+TEST(BufferTest, TestLambdaAppendPartial) {
+  auto setter = [] (rtc::ArrayView<uint8_t> av) {
+    for (int i = 0; i != 7; ++i)
+      av[i] = kTestData[i];
+    return 7;
+  };
+
+  Buffer buf;
+  EXPECT_EQ(buf.AppendData(15, setter), 7u);
+  EXPECT_EQ(buf.size(), 7u);            // Size is exactly what we wrote.
+  EXPECT_GE(buf.capacity(), 7u);        // Capacity is valid.
+  EXPECT_NE(buf.data<char>(), nullptr); // Data is actually stored.
+  EXPECT_FALSE(buf.empty());
+}
+
+TEST(BufferTest, TestMutableLambdaSetAppend) {
+  uint8_t magic_number = 17;
+  auto setter = [magic_number] (rtc::ArrayView<uint8_t> av) mutable {
+    for (int i = 0; i != 15; ++i) {
+      av[i] = magic_number;
+      ++magic_number;
+    }
+    return 15;
+  };
+
+  EXPECT_EQ(magic_number, 17);
+
+  Buffer buf;
+  EXPECT_EQ(buf.SetData(15, setter), 15u);
+  EXPECT_EQ(buf.AppendData(15, setter), 15u);
+  EXPECT_EQ(buf.size(), 30u);           // Size is exactly what we wrote.
+  EXPECT_GE(buf.capacity(), 30u);       // Capacity is valid.
+  EXPECT_NE(buf.data<char>(), nullptr); // Data is actually stored.
+  EXPECT_FALSE(buf.empty());
+
+  for (uint8_t i = 0; i != buf.size(); ++i) {
+    EXPECT_EQ(buf.data()[i], magic_number + i);
+  }
+}
+
+TEST(BufferTest, TestBracketRead) {
+  Buffer buf(kTestData, 7);
+  EXPECT_EQ(buf.size(), 7u);
+  EXPECT_EQ(buf.capacity(), 7u);
+  EXPECT_NE(buf.data(), nullptr);
+  EXPECT_FALSE(buf.empty());
+
+  for (size_t i = 0; i != 7u; ++i) {
+    EXPECT_EQ(buf[i], kTestData[i]);
+  }
+}
+
+TEST(BufferTest, TestBracketReadConst) {
+  Buffer buf(kTestData, 7);
+  EXPECT_EQ(buf.size(), 7u);
+  EXPECT_EQ(buf.capacity(), 7u);
+  EXPECT_NE(buf.data(), nullptr);
+  EXPECT_FALSE(buf.empty());
+
+  const Buffer& cbuf = buf;
+
+  for (size_t i = 0; i != 7u; ++i) {
+    EXPECT_EQ(cbuf[i], kTestData[i]);
+  }
+}
+
+TEST(BufferTest, TestBracketWrite) {
+  Buffer buf(7);
+  EXPECT_EQ(buf.size(), 7u);
+  EXPECT_EQ(buf.capacity(), 7u);
+  EXPECT_NE(buf.data(), nullptr);
+  EXPECT_FALSE(buf.empty());
+
+  for (size_t i = 0; i != 7u; ++i) {
+    buf[i] = kTestData[i];
+  }
+
+  for (size_t i = 0; i != 7u; ++i) {
+    EXPECT_EQ(buf[i], kTestData[i]);
+  }
+}
+
+TEST(BufferTest, TestBeginEnd) {
+  const Buffer cbuf(kTestData);
+  Buffer buf(kTestData);
+  auto* b1 = cbuf.begin();
+  for (auto& x : buf) {
+    EXPECT_EQ(*b1, x);
+    ++b1;
+    ++x;
+  }
+  EXPECT_EQ(cbuf.end(), b1);
+  auto* b2 = buf.begin();
+  for (auto& y : cbuf) {
+    EXPECT_EQ(*b2, y + 1);
+    ++b2;
+  }
+  EXPECT_EQ(buf.end(), b2);
+}
+
+TEST(BufferTest, TestInt16) {
+  static constexpr int16_t test_data[] = {14, 15, 16, 17, 18};
+  BufferT<int16_t> buf(test_data);
+  EXPECT_EQ(buf.size(), 5u);
+  EXPECT_EQ(buf.capacity(), 5u);
+  EXPECT_NE(buf.data(), nullptr);
+  EXPECT_FALSE(buf.empty());
+  for (size_t i = 0; i != buf.size(); ++i) {
+    EXPECT_EQ(test_data[i], buf[i]);
+  }
+  BufferT<int16_t> buf2(test_data);
+  EXPECT_EQ(buf, buf2);
+  buf2[0] = 9;
+  EXPECT_NE(buf, buf2);
+}
+
+TEST(BufferTest, TestFloat) {
+  static constexpr float test_data[] = {14, 15, 16, 17, 18};
+  BufferT<float> buf;
+  EXPECT_EQ(buf.size(), 0u);
+  EXPECT_EQ(buf.capacity(), 0u);
+  EXPECT_EQ(buf.data(), nullptr);
+  EXPECT_TRUE(buf.empty());
+  buf.SetData(test_data);
+  EXPECT_EQ(buf.size(), 5u);
+  EXPECT_EQ(buf.capacity(), 5u);
+  EXPECT_NE(buf.data(), nullptr);
+  EXPECT_FALSE(buf.empty());
+  float* p1 = buf.data();
+  while (buf.data() == p1) {
+    buf.AppendData(test_data);
+  }
+  EXPECT_EQ(buf.size(), buf.capacity());
+  EXPECT_GT(buf.size(), 5u);
+  EXPECT_EQ(buf.size() % 5, 0u);
+  EXPECT_NE(buf.data(), nullptr);
+  for (size_t i = 0; i != buf.size(); ++i) {
+    EXPECT_EQ(test_data[i % 5], buf[i]);
+  }
+}
+
+TEST(BufferTest, TestStruct) {
+  struct BloodStone {
+    bool blood;
+    const char* stone;
+  };
+  BufferT<BloodStone> buf(4);
+  EXPECT_EQ(buf.size(), 4u);
+  EXPECT_EQ(buf.capacity(), 4u);
+  EXPECT_NE(buf.data(), nullptr);
+  EXPECT_FALSE(buf.empty());
+  BufferT<BloodStone*> buf2(4);
+  for (size_t i = 0; i < buf2.size(); ++i) {
+    buf2[i] = &buf[i];
+  }
+  static const char kObsidian[] = "obsidian";
+  buf2[2]->stone = kObsidian;
+  EXPECT_EQ(kObsidian, buf[2].stone);
+}
+
+TEST(ZeroOnFreeBufferTest, TestZeroOnSetData) {
+  ZeroOnFreeBuffer<uint8_t> buf(kTestData, 7);
+  const uint8_t* old_data = buf.data();
+  const size_t old_capacity = buf.capacity();
+  const size_t old_size = buf.size();
+  constexpr size_t offset = 1;
+  buf.SetData(kTestData + offset, 2);
+  // Sanity checks to make sure the underlying heap memory was not reallocated.
+  EXPECT_EQ(old_data, buf.data());
+  EXPECT_EQ(old_capacity, buf.capacity());
+  // The first two elements have been overwritten, and the remaining five have
+  // been zeroed.
+  EXPECT_EQ(kTestData[offset], buf[0]);
+  EXPECT_EQ(kTestData[offset + 1], buf[1]);
+  for (size_t i = 2; i < old_size; i++) {
+    EXPECT_EQ(0, old_data[i]);
+  }
+}
+
+TEST(ZeroOnFreeBufferTest, TestZeroOnSetDataFromSetter) {
+  static constexpr size_t offset = 1;
+  const auto setter = [](rtc::ArrayView<uint8_t> av) {
+    for (int i = 0; i != 2; ++i)
+      av[i] = kTestData[offset + i];
+    return 2;
+  };
+
+  ZeroOnFreeBuffer<uint8_t> buf(kTestData, 7);
+  const uint8_t* old_data = buf.data();
+  const size_t old_capacity = buf.capacity();
+  const size_t old_size = buf.size();
+  buf.SetData(2, setter);
+  // Sanity checks to make sure the underlying heap memory was not reallocated.
+  EXPECT_EQ(old_data, buf.data());
+  EXPECT_EQ(old_capacity, buf.capacity());
+  // The first two elements have been overwritten, and the remaining five have
+  // been zeroed.
+  EXPECT_EQ(kTestData[offset], buf[0]);
+  EXPECT_EQ(kTestData[offset + 1], buf[1]);
+  for (size_t i = 2; i < old_size; i++) {
+    EXPECT_EQ(0, old_data[i]);
+  }
+}
+
+TEST(ZeroOnFreeBufferTest, TestZeroOnSetSize) {
+  ZeroOnFreeBuffer<uint8_t> buf(kTestData, 7);
+  const uint8_t* old_data = buf.data();
+  const size_t old_capacity = buf.capacity();
+  const size_t old_size = buf.size();
+  buf.SetSize(2);
+  // Sanity checks to make sure the underlying heap memory was not reallocated.
+  EXPECT_EQ(old_data, buf.data());
+  EXPECT_EQ(old_capacity, buf.capacity());
+  // The first two elements have not been modified and the remaining five have
+  // been zeroed.
+  EXPECT_EQ(kTestData[0], buf[0]);
+  EXPECT_EQ(kTestData[1], buf[1]);
+  for (size_t i = 2; i < old_size; i++) {
+    EXPECT_EQ(0, old_data[i]);
+  }
+}
+
+TEST(ZeroOnFreeBufferTest, TestZeroOnClear) {
+  ZeroOnFreeBuffer<uint8_t> buf(kTestData, 7);
+  const uint8_t* old_data = buf.data();
+  const size_t old_capacity = buf.capacity();
+  const size_t old_size = buf.size();
+  buf.Clear();
+  // Sanity checks to make sure the underlying heap memory was not reallocated.
+  EXPECT_EQ(old_data, buf.data());
+  EXPECT_EQ(old_capacity, buf.capacity());
+  // The underlying memory was not released but cleared.
+  for (size_t i = 0; i < old_size; i++) {
+    EXPECT_EQ(0, old_data[i]);
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bufferqueue.cc b/rtc_base/bufferqueue.cc
new file mode 100644
index 0000000..3b082a5
--- /dev/null
+++ b/rtc_base/bufferqueue.cc
@@ -0,0 +1,94 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bufferqueue.h"
+
+#include <algorithm>
+
+namespace rtc {
+
+BufferQueue::BufferQueue(size_t capacity, size_t default_size)
+    : capacity_(capacity), default_size_(default_size) {
+}
+
+BufferQueue::~BufferQueue() {
+  CritScope cs(&crit_);
+
+  for (Buffer* buffer : queue_) {
+    delete buffer;
+  }
+  for (Buffer* buffer : free_list_) {
+    delete buffer;
+  }
+}
+
+size_t BufferQueue::size() const {
+  CritScope cs(&crit_);
+  return queue_.size();
+}
+
+void BufferQueue::Clear() {
+  CritScope cs(&crit_);
+  while (!queue_.empty()) {
+    free_list_.push_back(queue_.front());
+    queue_.pop_front();
+  }
+}
+
+bool BufferQueue::ReadFront(void* buffer, size_t bytes, size_t* bytes_read) {
+  CritScope cs(&crit_);
+  if (queue_.empty()) {
+    return false;
+  }
+
+  bool was_writable = queue_.size() < capacity_;
+  Buffer* packet = queue_.front();
+  queue_.pop_front();
+
+  bytes = std::min(bytes, packet->size());
+  memcpy(buffer, packet->data(), bytes);
+  if (bytes_read) {
+    *bytes_read = bytes;
+  }
+  free_list_.push_back(packet);
+  if (!was_writable) {
+    NotifyWritableForTest();
+  }
+  return true;
+}
+
+bool BufferQueue::WriteBack(const void* buffer, size_t bytes,
+                            size_t* bytes_written) {
+  CritScope cs(&crit_);
+  if (queue_.size() == capacity_) {
+    return false;
+  }
+
+  bool was_readable = !queue_.empty();
+  Buffer* packet;
+  if (!free_list_.empty()) {
+    packet = free_list_.back();
+    free_list_.pop_back();
+  } else {
+    packet = new Buffer(bytes, default_size_);
+  }
+
+  packet->SetData(static_cast<const uint8_t*>(buffer), bytes);
+  if (bytes_written) {
+    *bytes_written = bytes;
+  }
+  queue_.push_back(packet);
+  if (!was_readable) {
+    NotifyReadableForTest();
+  }
+  return true;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bufferqueue.h b/rtc_base/bufferqueue.h
new file mode 100644
index 0000000..94ab0ca
--- /dev/null
+++ b/rtc_base/bufferqueue.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BUFFERQUEUE_H_
+#define RTC_BASE_BUFFERQUEUE_H_
+
+#include <deque>
+#include <vector>
+
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+
+namespace rtc {
+
+class BufferQueue {
+ public:
+  // Creates a buffer queue with a given capacity and default buffer size.
+  BufferQueue(size_t capacity, size_t default_size);
+  virtual ~BufferQueue();
+
+  // Return number of queued buffers.
+  size_t size() const;
+
+  // Clear the BufferQueue by moving all Buffers from |queue_| to |free_list_|.
+  void Clear();
+
+  // ReadFront will only read one buffer at a time and will truncate buffers
+  // that don't fit in the passed memory.
+  // Returns true unless no data could be returned.
+  bool ReadFront(void* data, size_t bytes, size_t* bytes_read);
+
+  // WriteBack always writes either the complete memory or nothing.
+  // Returns true unless no data could be written.
+  bool WriteBack(const void* data, size_t bytes, size_t* bytes_written);
+
+ protected:
+  // These methods are called when the state of the queue changes.
+  virtual void NotifyReadableForTest() {}
+  virtual void NotifyWritableForTest() {}
+
+ private:
+  size_t capacity_;
+  size_t default_size_;
+  CriticalSection crit_;
+  std::deque<Buffer*> queue_ RTC_GUARDED_BY(crit_);
+  std::vector<Buffer*> free_list_ RTC_GUARDED_BY(crit_);
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(BufferQueue);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_BUFFERQUEUE_H_
diff --git a/rtc_base/bufferqueue_unittest.cc b/rtc_base/bufferqueue_unittest.cc
new file mode 100644
index 0000000..11d115f
--- /dev/null
+++ b/rtc_base/bufferqueue_unittest.cc
@@ -0,0 +1,86 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bufferqueue.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+TEST(BufferQueueTest, TestAll) {
+  const size_t kSize = 16;
+  const char in[kSize * 2 + 1] = "0123456789ABCDEFGHIJKLMNOPQRSTUV";
+  char out[kSize * 2];
+  size_t bytes;
+  BufferQueue queue1(1, kSize);
+  BufferQueue queue2(2, kSize);
+
+  // The queue is initially empty.
+  EXPECT_EQ(0u, queue1.size());
+  EXPECT_FALSE(queue1.ReadFront(out, kSize, &bytes));
+
+  // A write should succeed.
+  EXPECT_TRUE(queue1.WriteBack(in, kSize, &bytes));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(1u, queue1.size());
+
+  // The queue is full now (only one buffer allowed).
+  EXPECT_FALSE(queue1.WriteBack(in, kSize, &bytes));
+  EXPECT_EQ(1u, queue1.size());
+
+  // Reading previously written buffer.
+  EXPECT_TRUE(queue1.ReadFront(out, kSize, &bytes));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+
+  // The queue is empty again now.
+  EXPECT_FALSE(queue1.ReadFront(out, kSize, &bytes));
+  EXPECT_EQ(0u, queue1.size());
+
+  // Reading only returns available data.
+  EXPECT_TRUE(queue1.WriteBack(in, kSize, &bytes));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(1u, queue1.size());
+  EXPECT_TRUE(queue1.ReadFront(out, kSize * 2, &bytes));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+  EXPECT_EQ(0u, queue1.size());
+
+  // Reading maintains buffer boundaries.
+  EXPECT_TRUE(queue2.WriteBack(in, kSize / 2, &bytes));
+  EXPECT_EQ(1u, queue2.size());
+  EXPECT_TRUE(queue2.WriteBack(in + kSize / 2, kSize / 2, &bytes));
+  EXPECT_EQ(2u, queue2.size());
+  EXPECT_TRUE(queue2.ReadFront(out, kSize, &bytes));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+  EXPECT_EQ(1u, queue2.size());
+  EXPECT_TRUE(queue2.ReadFront(out, kSize, &bytes));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in + kSize / 2, out, kSize / 2));
+  EXPECT_EQ(0u, queue2.size());
+
+  // Reading truncates buffers.
+  EXPECT_TRUE(queue2.WriteBack(in, kSize / 2, &bytes));
+  EXPECT_EQ(1u, queue2.size());
+  EXPECT_TRUE(queue2.WriteBack(in + kSize / 2, kSize / 2, &bytes));
+  EXPECT_EQ(2u, queue2.size());
+  // Read first packet partially in too-small buffer.
+  EXPECT_TRUE(queue2.ReadFront(out, kSize / 4, &bytes));
+  EXPECT_EQ(kSize / 4, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 4));
+  EXPECT_EQ(1u, queue2.size());
+  // Remainder of first packet is truncated, reading starts with next packet.
+  EXPECT_TRUE(queue2.ReadFront(out, kSize, &bytes));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in + kSize / 2, out, kSize / 2));
+  EXPECT_EQ(0u, queue2.size());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bytebuffer.cc b/rtc_base/bytebuffer.cc
new file mode 100644
index 0000000..f152d4f
--- /dev/null
+++ b/rtc_base/bytebuffer.cc
@@ -0,0 +1,178 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bytebuffer.h"
+
+#include <string.h>
+
+#include <algorithm>
+
+#include "rtc_base/basictypes.h"
+
+namespace rtc {
+
+ByteBufferWriter::ByteBufferWriter() : ByteBufferWriterT() {}
+
+ByteBufferWriter::ByteBufferWriter(ByteOrder byte_order)
+    : ByteBufferWriterT(byte_order) {}
+
+ByteBufferWriter::ByteBufferWriter(const char* bytes, size_t len)
+    : ByteBufferWriterT(bytes, len) {}
+
+ByteBufferWriter::ByteBufferWriter(const char* bytes,
+                                   size_t len,
+                                   ByteOrder byte_order)
+    : ByteBufferWriterT(bytes, len, byte_order) {}
+
+ByteBufferReader::ByteBufferReader(const char* bytes, size_t len)
+    : ByteBuffer(ORDER_NETWORK) {
+  Construct(bytes, len);
+}
+
+ByteBufferReader::ByteBufferReader(const char* bytes, size_t len,
+                                   ByteOrder byte_order)
+    : ByteBuffer(byte_order) {
+  Construct(bytes, len);
+}
+
+ByteBufferReader::ByteBufferReader(const char* bytes)
+    : ByteBuffer(ORDER_NETWORK) {
+  Construct(bytes, strlen(bytes));
+}
+
+ByteBufferReader::ByteBufferReader(const Buffer& buf)
+    : ByteBuffer(ORDER_NETWORK) {
+  Construct(buf.data<char>(), buf.size());
+}
+
+ByteBufferReader::ByteBufferReader(const ByteBufferWriter& buf)
+    : ByteBuffer(buf.Order()) {
+  Construct(buf.Data(), buf.Length());
+}
+
+void ByteBufferReader::Construct(const char* bytes, size_t len) {
+  bytes_ = bytes;
+  size_ = len;
+  start_ = 0;
+  end_ = len;
+}
+
+bool ByteBufferReader::ReadUInt8(uint8_t* val) {
+  if (!val) return false;
+
+  return ReadBytes(reinterpret_cast<char*>(val), 1);
+}
+
+bool ByteBufferReader::ReadUInt16(uint16_t* val) {
+  if (!val) return false;
+
+  uint16_t v;
+  if (!ReadBytes(reinterpret_cast<char*>(&v), 2)) {
+    return false;
+  } else {
+    *val = (Order() == ORDER_NETWORK) ? NetworkToHost16(v) : v;
+    return true;
+  }
+}
+
+bool ByteBufferReader::ReadUInt24(uint32_t* val) {
+  if (!val) return false;
+
+  uint32_t v = 0;
+  char* read_into = reinterpret_cast<char*>(&v);
+  if (Order() == ORDER_NETWORK || IsHostBigEndian()) {
+    ++read_into;
+  }
+
+  if (!ReadBytes(read_into, 3)) {
+    return false;
+  } else {
+    *val = (Order() == ORDER_NETWORK) ? NetworkToHost32(v) : v;
+    return true;
+  }
+}
+
+bool ByteBufferReader::ReadUInt32(uint32_t* val) {
+  if (!val) return false;
+
+  uint32_t v;
+  if (!ReadBytes(reinterpret_cast<char*>(&v), 4)) {
+    return false;
+  } else {
+    *val = (Order() == ORDER_NETWORK) ? NetworkToHost32(v) : v;
+    return true;
+  }
+}
+
+bool ByteBufferReader::ReadUInt64(uint64_t* val) {
+  if (!val) return false;
+
+  uint64_t v;
+  if (!ReadBytes(reinterpret_cast<char*>(&v), 8)) {
+    return false;
+  } else {
+    *val = (Order() == ORDER_NETWORK) ? NetworkToHost64(v) : v;
+    return true;
+  }
+}
+
+bool ByteBufferReader::ReadUVarint(uint64_t* val) {
+  if (!val) {
+    return false;
+  }
+  // Integers are deserialized 7 bits at a time, with each byte having a
+  // continuation byte (msb=1) if there are more bytes to be read.
+  uint64_t v = 0;
+  for (int i = 0; i < 64; i += 7) {
+    char byte;
+    if (!ReadBytes(&byte, 1)) {
+      return false;
+    }
+    // Read the first 7 bits of the byte, then offset by bits read so far.
+    v |= (static_cast<uint64_t>(byte) & 0x7F) << i;
+    // True if the msb is not a continuation byte.
+    if (static_cast<uint64_t>(byte) < 0x80) {
+      *val = v;
+      return true;
+    }
+  }
+  return false;
+}
+
+bool ByteBufferReader::ReadString(std::string* val, size_t len) {
+  if (!val) return false;
+
+  if (len > Length()) {
+    return false;
+  } else {
+    val->append(bytes_ + start_, len);
+    start_ += len;
+    return true;
+  }
+}
+
+bool ByteBufferReader::ReadBytes(char* val, size_t len) {
+  if (len > Length()) {
+    return false;
+  } else {
+    memcpy(val, bytes_ + start_, len);
+    start_ += len;
+    return true;
+  }
+}
+
+bool ByteBufferReader::Consume(size_t size) {
+  if (size > Length())
+    return false;
+  start_ += size;
+  return true;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/bytebuffer.h b/rtc_base/bytebuffer.h
new file mode 100644
index 0000000..740bff2
--- /dev/null
+++ b/rtc_base/bytebuffer.h
@@ -0,0 +1,202 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BYTEBUFFER_H_
+#define RTC_BASE_BYTEBUFFER_H_
+
+#include <string>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/byteorder.h"
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+class ByteBuffer {
+ public:
+  enum ByteOrder {
+    ORDER_NETWORK = 0,  // Default, use network byte order (big endian).
+    ORDER_HOST,         // Use the native order of the host.
+  };
+
+  explicit ByteBuffer(ByteOrder byte_order) : byte_order_(byte_order) {}
+
+  ByteOrder Order() const { return byte_order_; }
+
+ private:
+  ByteOrder byte_order_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ByteBuffer);
+};
+
+template <class BufferClassT>
+class ByteBufferWriterT : public ByteBuffer {
+ public:
+  // |byte_order| defines order of bytes in the buffer.
+  ByteBufferWriterT() : ByteBuffer(ORDER_NETWORK) {
+    Construct(nullptr, kDefaultCapacity);
+  }
+  explicit ByteBufferWriterT(ByteOrder byte_order) : ByteBuffer(byte_order) {
+    Construct(nullptr, kDefaultCapacity);
+  }
+  ByteBufferWriterT(const char* bytes, size_t len) : ByteBuffer(ORDER_NETWORK) {
+    Construct(bytes, len);
+  }
+  ByteBufferWriterT(const char* bytes, size_t len, ByteOrder byte_order)
+      : ByteBuffer(byte_order) {
+    Construct(bytes, len);
+  }
+
+  const char* Data() const { return buffer_.data(); }
+  size_t Length() const { return buffer_.size(); }
+  size_t Capacity() const { return buffer_.capacity(); }
+
+  // Write value to the buffer. Resizes the buffer when it is
+  // neccessary.
+  void WriteUInt8(uint8_t val) {
+    WriteBytes(reinterpret_cast<const char*>(&val), 1);
+  }
+  void WriteUInt16(uint16_t val) {
+    uint16_t v = (Order() == ORDER_NETWORK) ? HostToNetwork16(val) : val;
+    WriteBytes(reinterpret_cast<const char*>(&v), 2);
+  }
+  void WriteUInt24(uint32_t val) {
+    uint32_t v = (Order() == ORDER_NETWORK) ? HostToNetwork32(val) : val;
+    char* start = reinterpret_cast<char*>(&v);
+    if (Order() == ORDER_NETWORK || IsHostBigEndian()) {
+      ++start;
+    }
+    WriteBytes(start, 3);
+  }
+  void WriteUInt32(uint32_t val) {
+    uint32_t v = (Order() == ORDER_NETWORK) ? HostToNetwork32(val) : val;
+    WriteBytes(reinterpret_cast<const char*>(&v), 4);
+  }
+  void WriteUInt64(uint64_t val) {
+    uint64_t v = (Order() == ORDER_NETWORK) ? HostToNetwork64(val) : val;
+    WriteBytes(reinterpret_cast<const char*>(&v), 8);
+  }
+  // Serializes an unsigned varint in the format described by
+  // https://developers.google.com/protocol-buffers/docs/encoding#varints
+  // with the caveat that integers are 64-bit, not 128-bit.
+  void WriteUVarint(uint64_t val) {
+    while (val >= 0x80) {
+      // Write 7 bits at a time, then set the msb to a continuation byte
+      // (msb=1).
+      char byte = static_cast<char>(val) | 0x80;
+      WriteBytes(&byte, 1);
+      val >>= 7;
+    }
+    char last_byte = static_cast<char>(val);
+    WriteBytes(&last_byte, 1);
+  }
+  void WriteString(const std::string& val) {
+    WriteBytes(val.c_str(), val.size());
+  }
+  void WriteBytes(const char* val, size_t len) { buffer_.AppendData(val, len); }
+
+  // Reserves the given number of bytes and returns a char* that can be written
+  // into. Useful for functions that require a char* buffer and not a
+  // ByteBufferWriter.
+  char* ReserveWriteBuffer(size_t len) {
+    buffer_.SetSize(buffer_.size() + len);
+    return buffer_.data();
+  }
+
+  // Resize the buffer to the specified |size|.
+  void Resize(size_t size) { buffer_.SetSize(size); }
+
+  // Clears the contents of the buffer. After this, Length() will be 0.
+  void Clear() { buffer_.Clear(); }
+
+ private:
+  static constexpr size_t kDefaultCapacity = 4096;
+
+  void Construct(const char* bytes, size_t size) {
+    if (bytes) {
+      buffer_.AppendData(bytes, size);
+    } else {
+      buffer_.EnsureCapacity(size);
+    }
+  }
+
+  BufferClassT buffer_;
+
+  // There are sensible ways to define these, but they aren't needed in our code
+  // base.
+  RTC_DISALLOW_COPY_AND_ASSIGN(ByteBufferWriterT);
+};
+
+class ByteBufferWriter : public ByteBufferWriterT<BufferT<char>> {
+ public:
+  // |byte_order| defines order of bytes in the buffer.
+  ByteBufferWriter();
+  explicit ByteBufferWriter(ByteOrder byte_order);
+  ByteBufferWriter(const char* bytes, size_t len);
+  ByteBufferWriter(const char* bytes, size_t len, ByteOrder byte_order);
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(ByteBufferWriter);
+};
+
+// The ByteBufferReader references the passed data, i.e. the pointer must be
+// valid during the lifetime of the reader.
+class ByteBufferReader : public ByteBuffer {
+ public:
+  ByteBufferReader(const char* bytes, size_t len);
+  ByteBufferReader(const char* bytes, size_t len, ByteOrder byte_order);
+
+  // Initializes buffer from a zero-terminated string.
+  explicit ByteBufferReader(const char* bytes);
+
+  explicit ByteBufferReader(const Buffer& buf);
+
+  explicit ByteBufferReader(const ByteBufferWriter& buf);
+
+  // Returns start of unprocessed data.
+  const char* Data() const { return bytes_ + start_; }
+  // Returns number of unprocessed bytes.
+  size_t Length() const { return end_ - start_; }
+
+  // Read a next value from the buffer. Return false if there isn't
+  // enough data left for the specified type.
+  bool ReadUInt8(uint8_t* val);
+  bool ReadUInt16(uint16_t* val);
+  bool ReadUInt24(uint32_t* val);
+  bool ReadUInt32(uint32_t* val);
+  bool ReadUInt64(uint64_t* val);
+  bool ReadUVarint(uint64_t* val);
+  bool ReadBytes(char* val, size_t len);
+
+  // Appends next |len| bytes from the buffer to |val|. Returns false
+  // if there is less than |len| bytes left.
+  bool ReadString(std::string* val, size_t len);
+
+  // Moves current position |size| bytes forward. Returns false if
+  // there is less than |size| bytes left in the buffer. Consume doesn't
+  // permanently remove data, so remembered read positions are still valid
+  // after this call.
+  bool Consume(size_t size);
+
+ private:
+  void Construct(const char* bytes, size_t size);
+
+  const char* bytes_;
+  size_t size_;
+  size_t start_;
+  size_t end_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ByteBufferReader);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_BYTEBUFFER_H_
diff --git a/rtc_base/bytebuffer_unittest.cc b/rtc_base/bytebuffer_unittest.cc
new file mode 100644
index 0000000..6140e9f
--- /dev/null
+++ b/rtc_base/bytebuffer_unittest.cc
@@ -0,0 +1,258 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/bytebuffer.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/byteorder.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+TEST(ByteBufferTest, TestByteOrder) {
+  uint16_t n16 = 1;
+  uint32_t n32 = 1;
+  uint64_t n64 = 1;
+
+  EXPECT_EQ(n16, NetworkToHost16(HostToNetwork16(n16)));
+  EXPECT_EQ(n32, NetworkToHost32(HostToNetwork32(n32)));
+  EXPECT_EQ(n64, NetworkToHost64(HostToNetwork64(n64)));
+
+  if (IsHostBigEndian()) {
+    // The host is the network (big) endian.
+    EXPECT_EQ(n16, HostToNetwork16(n16));
+    EXPECT_EQ(n32, HostToNetwork32(n32));
+    EXPECT_EQ(n64, HostToNetwork64(n64));
+
+    // GetBE converts big endian to little endian here.
+    EXPECT_EQ(n16 >> 8, GetBE16(&n16));
+    EXPECT_EQ(n32 >> 24, GetBE32(&n32));
+    EXPECT_EQ(n64 >> 56, GetBE64(&n64));
+  } else {
+    // The host is little endian.
+    EXPECT_NE(n16, HostToNetwork16(n16));
+    EXPECT_NE(n32, HostToNetwork32(n32));
+    EXPECT_NE(n64, HostToNetwork64(n64));
+
+    // GetBE converts little endian to big endian here.
+    EXPECT_EQ(GetBE16(&n16), HostToNetwork16(n16));
+    EXPECT_EQ(GetBE32(&n32), HostToNetwork32(n32));
+    EXPECT_EQ(GetBE64(&n64), HostToNetwork64(n64));
+
+    // GetBE converts little endian to big endian here.
+    EXPECT_EQ(n16 << 8, GetBE16(&n16));
+    EXPECT_EQ(n32 << 24, GetBE32(&n32));
+    EXPECT_EQ(n64 << 56, GetBE64(&n64));
+  }
+}
+
+TEST(ByteBufferTest, TestBufferLength) {
+  ByteBufferWriter buffer;
+  size_t size = 0;
+  EXPECT_EQ(size, buffer.Length());
+
+  buffer.WriteUInt8(1);
+  ++size;
+  EXPECT_EQ(size, buffer.Length());
+
+  buffer.WriteUInt16(1);
+  size += 2;
+  EXPECT_EQ(size, buffer.Length());
+
+  buffer.WriteUInt24(1);
+  size += 3;
+  EXPECT_EQ(size, buffer.Length());
+
+  buffer.WriteUInt32(1);
+  size += 4;
+  EXPECT_EQ(size, buffer.Length());
+
+  buffer.WriteUInt64(1);
+  size += 8;
+  EXPECT_EQ(size, buffer.Length());
+}
+
+TEST(ByteBufferTest, TestReadWriteBuffer) {
+  ByteBufferWriter::ByteOrder orders[2] = { ByteBufferWriter::ORDER_HOST,
+                                            ByteBufferWriter::ORDER_NETWORK };
+  for (size_t i = 0; i < arraysize(orders); i++) {
+    ByteBufferWriter buffer(orders[i]);
+    EXPECT_EQ(orders[i], buffer.Order());
+    ByteBufferReader read_buf(nullptr, 0, orders[i]);
+    EXPECT_EQ(orders[i], read_buf.Order());
+    uint8_t ru8;
+    EXPECT_FALSE(read_buf.ReadUInt8(&ru8));
+
+    // Write and read uint8_t.
+    uint8_t wu8 = 1;
+    buffer.WriteUInt8(wu8);
+    ByteBufferReader read_buf1(buffer.Data(), buffer.Length(), orders[i]);
+    EXPECT_TRUE(read_buf1.ReadUInt8(&ru8));
+    EXPECT_EQ(wu8, ru8);
+    EXPECT_EQ(0U, read_buf1.Length());
+    buffer.Clear();
+
+    // Write and read uint16_t.
+    uint16_t wu16 = (1 << 8) + 1;
+    buffer.WriteUInt16(wu16);
+    ByteBufferReader read_buf2(buffer.Data(), buffer.Length(), orders[i]);
+    uint16_t ru16;
+    EXPECT_TRUE(read_buf2.ReadUInt16(&ru16));
+    EXPECT_EQ(wu16, ru16);
+    EXPECT_EQ(0U, read_buf2.Length());
+    buffer.Clear();
+
+    // Write and read uint24.
+    uint32_t wu24 = (3 << 16) + (2 << 8) + 1;
+    buffer.WriteUInt24(wu24);
+    ByteBufferReader read_buf3(buffer.Data(), buffer.Length(), orders[i]);
+    uint32_t ru24;
+    EXPECT_TRUE(read_buf3.ReadUInt24(&ru24));
+    EXPECT_EQ(wu24, ru24);
+    EXPECT_EQ(0U, read_buf3.Length());
+    buffer.Clear();
+
+    // Write and read uint32_t.
+    uint32_t wu32 = (4 << 24) + (3 << 16) + (2 << 8) + 1;
+    buffer.WriteUInt32(wu32);
+    ByteBufferReader read_buf4(buffer.Data(), buffer.Length(), orders[i]);
+    uint32_t ru32;
+    EXPECT_TRUE(read_buf4.ReadUInt32(&ru32));
+    EXPECT_EQ(wu32, ru32);
+    EXPECT_EQ(0U, read_buf3.Length());
+    buffer.Clear();
+
+    // Write and read uint64_t.
+    uint32_t another32 = (8 << 24) + (7 << 16) + (6 << 8) + 5;
+    uint64_t wu64 = (static_cast<uint64_t>(another32) << 32) + wu32;
+    buffer.WriteUInt64(wu64);
+    ByteBufferReader read_buf5(buffer.Data(), buffer.Length(), orders[i]);
+    uint64_t ru64;
+    EXPECT_TRUE(read_buf5.ReadUInt64(&ru64));
+    EXPECT_EQ(wu64, ru64);
+    EXPECT_EQ(0U, read_buf5.Length());
+    buffer.Clear();
+
+    // Write and read string.
+    std::string write_string("hello");
+    buffer.WriteString(write_string);
+    ByteBufferReader read_buf6(buffer.Data(), buffer.Length(), orders[i]);
+    std::string read_string;
+    EXPECT_TRUE(read_buf6.ReadString(&read_string, write_string.size()));
+    EXPECT_EQ(write_string, read_string);
+    EXPECT_EQ(0U, read_buf6.Length());
+    buffer.Clear();
+
+    // Write and read bytes
+    char write_bytes[] = "foo";
+    buffer.WriteBytes(write_bytes, 3);
+    ByteBufferReader read_buf7(buffer.Data(), buffer.Length(), orders[i]);
+    char read_bytes[3];
+    EXPECT_TRUE(read_buf7.ReadBytes(read_bytes, 3));
+    for (int i = 0; i < 3; ++i) {
+      EXPECT_EQ(write_bytes[i], read_bytes[i]);
+    }
+    EXPECT_EQ(0U, read_buf7.Length());
+    buffer.Clear();
+
+    // Write and read reserved buffer space
+    char* write_dst = buffer.ReserveWriteBuffer(3);
+    memcpy(write_dst, write_bytes, 3);
+    ByteBufferReader read_buf8(buffer.Data(), buffer.Length(), orders[i]);
+    memset(read_bytes, 0, 3);
+    EXPECT_TRUE(read_buf8.ReadBytes(read_bytes, 3));
+    for (int i = 0; i < 3; ++i) {
+      EXPECT_EQ(write_bytes[i], read_bytes[i]);
+    }
+    EXPECT_EQ(0U, read_buf8.Length());
+    buffer.Clear();
+
+    // Write and read in order.
+    buffer.WriteUInt8(wu8);
+    buffer.WriteUInt16(wu16);
+    buffer.WriteUInt24(wu24);
+    buffer.WriteUInt32(wu32);
+    buffer.WriteUInt64(wu64);
+    ByteBufferReader read_buf9(buffer.Data(), buffer.Length(), orders[i]);
+    EXPECT_TRUE(read_buf9.ReadUInt8(&ru8));
+    EXPECT_EQ(wu8, ru8);
+    EXPECT_TRUE(read_buf9.ReadUInt16(&ru16));
+    EXPECT_EQ(wu16, ru16);
+    EXPECT_TRUE(read_buf9.ReadUInt24(&ru24));
+    EXPECT_EQ(wu24, ru24);
+    EXPECT_TRUE(read_buf9.ReadUInt32(&ru32));
+    EXPECT_EQ(wu32, ru32);
+    EXPECT_TRUE(read_buf9.ReadUInt64(&ru64));
+    EXPECT_EQ(wu64, ru64);
+    EXPECT_EQ(0U, read_buf9.Length());
+    buffer.Clear();
+  }
+}
+
+TEST(ByteBufferTest, TestReadWriteUVarint) {
+  ByteBufferWriter::ByteOrder orders[2] = {ByteBufferWriter::ORDER_HOST,
+                                           ByteBufferWriter::ORDER_NETWORK};
+  for (ByteBufferWriter::ByteOrder& order : orders) {
+    ByteBufferWriter write_buffer(order);
+    size_t size = 0;
+    EXPECT_EQ(size, write_buffer.Length());
+
+    write_buffer.WriteUVarint(1u);
+    ++size;
+    EXPECT_EQ(size, write_buffer.Length());
+
+    write_buffer.WriteUVarint(2u);
+    ++size;
+    EXPECT_EQ(size, write_buffer.Length());
+
+    write_buffer.WriteUVarint(27u);
+    ++size;
+    EXPECT_EQ(size, write_buffer.Length());
+
+    write_buffer.WriteUVarint(149u);
+    size += 2;
+    EXPECT_EQ(size, write_buffer.Length());
+
+    write_buffer.WriteUVarint(68719476736u);
+    size += 6;
+    EXPECT_EQ(size, write_buffer.Length());
+
+    ByteBufferReader read_buffer(write_buffer.Data(), write_buffer.Length(),
+                                 order);
+    EXPECT_EQ(size, read_buffer.Length());
+    uint64_t val1, val2, val3, val4, val5;
+
+    ASSERT_TRUE(read_buffer.ReadUVarint(&val1));
+    EXPECT_EQ(1u, val1);
+    --size;
+    EXPECT_EQ(size, read_buffer.Length());
+
+    ASSERT_TRUE(read_buffer.ReadUVarint(&val2));
+    EXPECT_EQ(2u, val2);
+    --size;
+    EXPECT_EQ(size, read_buffer.Length());
+
+    ASSERT_TRUE(read_buffer.ReadUVarint(&val3));
+    EXPECT_EQ(27u, val3);
+    --size;
+    EXPECT_EQ(size, read_buffer.Length());
+
+    ASSERT_TRUE(read_buffer.ReadUVarint(&val4));
+    EXPECT_EQ(149u, val4);
+    size -= 2;
+    EXPECT_EQ(size, read_buffer.Length());
+
+    ASSERT_TRUE(read_buffer.ReadUVarint(&val5));
+    EXPECT_EQ(68719476736u, val5);
+    size -= 6;
+    EXPECT_EQ(size, read_buffer.Length());
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/byteorder.h b/rtc_base/byteorder.h
new file mode 100644
index 0000000..85f0cc5
--- /dev/null
+++ b/rtc_base/byteorder.h
@@ -0,0 +1,178 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_BYTEORDER_H_
+#define RTC_BASE_BYTEORDER_H_
+
+#if defined(WEBRTC_POSIX) && !defined(__native_client__)
+#include <arpa/inet.h>
+#endif
+
+#include "rtc_base/basictypes.h"
+
+#if defined(WEBRTC_MAC)
+#include <libkern/OSByteOrder.h>
+
+#define htobe16(v) OSSwapHostToBigInt16(v)
+#define htobe32(v) OSSwapHostToBigInt32(v)
+#define htobe64(v) OSSwapHostToBigInt64(v)
+#define be16toh(v) OSSwapBigToHostInt16(v)
+#define be32toh(v) OSSwapBigToHostInt32(v)
+#define be64toh(v) OSSwapBigToHostInt64(v)
+
+#define htole16(v) OSSwapHostToLittleInt16(v)
+#define htole32(v) OSSwapHostToLittleInt32(v)
+#define htole64(v) OSSwapHostToLittleInt64(v)
+#define le16toh(v) OSSwapLittleToHostInt16(v)
+#define le32toh(v) OSSwapLittleToHostInt32(v)
+#define le64toh(v) OSSwapLittleToHostInt64(v)
+#elif defined(WEBRTC_WIN) || defined(__native_client__)
+
+#if defined(WEBRTC_WIN)
+#include <stdlib.h>
+#include <winsock2.h>
+#else
+#include <netinet/in.h>
+#endif
+
+#define htobe16(v) htons(v)
+#define htobe32(v) htonl(v)
+#define be16toh(v) ntohs(v)
+#define be32toh(v) ntohl(v)
+#if defined(WEBRTC_WIN)
+#define htobe64(v) htonll(v)
+#define be64toh(v) ntohll(v)
+#endif
+
+#if defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
+#define htole16(v) (v)
+#define htole32(v) (v)
+#define htole64(v) (v)
+#define le16toh(v) (v)
+#define le32toh(v) (v)
+#define le64toh(v) (v)
+#if defined(__native_client__)
+#define htobe64(v) __builtin_bswap64(v)
+#define be64toh(v) __builtin_bswap64(v)
+#endif
+#elif defined(RTC_ARCH_CPU_BIG_ENDIAN)
+#define htole16(v) __builtin_bswap16(v)
+#define htole32(v) __builtin_bswap32(v)
+#define htole64(v) __builtin_bswap64(v)
+#define le16toh(v) __builtin_bswap16(v)
+#define le32toh(v) __builtin_bswap32(v)
+#define le64toh(v) __builtin_bswap64(v)
+#if defined(__native_client__)
+#define htobe64(v) (v)
+#define be64toh(v) (v)
+#endif
+#else
+#error RTC_ARCH_CPU_BIG_ENDIAN or RTC_ARCH_CPU_LITTLE_ENDIAN must be defined.
+#endif  // defined(RTC_ARCH_CPU_LITTLE_ENDIAN)
+#elif defined(WEBRTC_POSIX)
+#include <endian.h>
+#endif
+
+namespace rtc {
+
+// Reading and writing of little and big-endian numbers from memory
+
+inline void Set8(void* memory, size_t offset, uint8_t v) {
+  static_cast<uint8_t*>(memory)[offset] = v;
+}
+
+inline uint8_t Get8(const void* memory, size_t offset) {
+  return static_cast<const uint8_t*>(memory)[offset];
+}
+
+inline void SetBE16(void* memory, uint16_t v) {
+  *static_cast<uint16_t*>(memory) = htobe16(v);
+}
+
+inline void SetBE32(void* memory, uint32_t v) {
+  *static_cast<uint32_t*>(memory) = htobe32(v);
+}
+
+inline void SetBE64(void* memory, uint64_t v) {
+  *static_cast<uint64_t*>(memory) = htobe64(v);
+}
+
+inline uint16_t GetBE16(const void* memory) {
+  return be16toh(*static_cast<const uint16_t*>(memory));
+}
+
+inline uint32_t GetBE32(const void* memory) {
+  return be32toh(*static_cast<const uint32_t*>(memory));
+}
+
+inline uint64_t GetBE64(const void* memory) {
+  return be64toh(*static_cast<const uint64_t*>(memory));
+}
+
+inline void SetLE16(void* memory, uint16_t v) {
+  *static_cast<uint16_t*>(memory) = htole16(v);
+}
+
+inline void SetLE32(void* memory, uint32_t v) {
+  *static_cast<uint32_t*>(memory) = htole32(v);
+}
+
+inline void SetLE64(void* memory, uint64_t v) {
+  *static_cast<uint64_t*>(memory) = htole64(v);
+}
+
+inline uint16_t GetLE16(const void* memory) {
+  return le16toh(*static_cast<const uint16_t*>(memory));
+}
+
+inline uint32_t GetLE32(const void* memory) {
+  return le32toh(*static_cast<const uint32_t*>(memory));
+}
+
+inline uint64_t GetLE64(const void* memory) {
+  return le64toh(*static_cast<const uint64_t*>(memory));
+}
+
+// Check if the current host is big endian.
+inline bool IsHostBigEndian() {
+#if defined(RTC_ARCH_CPU_BIG_ENDIAN)
+  return true;
+#else
+  return false;
+#endif
+}
+
+inline uint16_t HostToNetwork16(uint16_t n) {
+  return htobe16(n);
+}
+
+inline uint32_t HostToNetwork32(uint32_t n) {
+  return htobe32(n);
+}
+
+inline uint64_t HostToNetwork64(uint64_t n) {
+  return htobe64(n);
+}
+
+inline uint16_t NetworkToHost16(uint16_t n) {
+  return be16toh(n);
+}
+
+inline uint32_t NetworkToHost32(uint32_t n) {
+  return be32toh(n);
+}
+
+inline uint64_t NetworkToHost64(uint64_t n) {
+  return be64toh(n);
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_BYTEORDER_H_
diff --git a/rtc_base/byteorder_unittest.cc b/rtc_base/byteorder_unittest.cc
new file mode 100644
index 0000000..30dc5fa
--- /dev/null
+++ b/rtc_base/byteorder_unittest.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+
+#include "rtc_base/byteorder.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+// Test memory set functions put values into memory in expected order.
+TEST(ByteOrderTest, TestSet) {
+  uint8_t buf[8] = {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u};
+  Set8(buf, 0, 0xfb);
+  Set8(buf, 1, 0x12);
+  EXPECT_EQ(0xfb, buf[0]);
+  EXPECT_EQ(0x12, buf[1]);
+  SetBE16(buf, 0x1234);
+  EXPECT_EQ(0x12, buf[0]);
+  EXPECT_EQ(0x34, buf[1]);
+  SetLE16(buf, 0x1234);
+  EXPECT_EQ(0x34, buf[0]);
+  EXPECT_EQ(0x12, buf[1]);
+  SetBE32(buf, 0x12345678);
+  EXPECT_EQ(0x12, buf[0]);
+  EXPECT_EQ(0x34, buf[1]);
+  EXPECT_EQ(0x56, buf[2]);
+  EXPECT_EQ(0x78, buf[3]);
+  SetLE32(buf, 0x12345678);
+  EXPECT_EQ(0x78, buf[0]);
+  EXPECT_EQ(0x56, buf[1]);
+  EXPECT_EQ(0x34, buf[2]);
+  EXPECT_EQ(0x12, buf[3]);
+  SetBE64(buf, UINT64_C(0x0123456789abcdef));
+  EXPECT_EQ(0x01, buf[0]);
+  EXPECT_EQ(0x23, buf[1]);
+  EXPECT_EQ(0x45, buf[2]);
+  EXPECT_EQ(0x67, buf[3]);
+  EXPECT_EQ(0x89, buf[4]);
+  EXPECT_EQ(0xab, buf[5]);
+  EXPECT_EQ(0xcd, buf[6]);
+  EXPECT_EQ(0xef, buf[7]);
+  SetLE64(buf, UINT64_C(0x0123456789abcdef));
+  EXPECT_EQ(0xef, buf[0]);
+  EXPECT_EQ(0xcd, buf[1]);
+  EXPECT_EQ(0xab, buf[2]);
+  EXPECT_EQ(0x89, buf[3]);
+  EXPECT_EQ(0x67, buf[4]);
+  EXPECT_EQ(0x45, buf[5]);
+  EXPECT_EQ(0x23, buf[6]);
+  EXPECT_EQ(0x01, buf[7]);
+}
+
+// Test memory get functions get values from memory in expected order.
+TEST(ByteOrderTest, TestGet) {
+  uint8_t buf[8];
+  buf[0] = 0x01u;
+  buf[1] = 0x23u;
+  buf[2] = 0x45u;
+  buf[3] = 0x67u;
+  buf[4] = 0x89u;
+  buf[5] = 0xabu;
+  buf[6] = 0xcdu;
+  buf[7] = 0xefu;
+  EXPECT_EQ(0x01u, Get8(buf, 0));
+  EXPECT_EQ(0x23u, Get8(buf, 1));
+  EXPECT_EQ(0x0123u, GetBE16(buf));
+  EXPECT_EQ(0x2301u, GetLE16(buf));
+  EXPECT_EQ(0x01234567u, GetBE32(buf));
+  EXPECT_EQ(0x67452301u, GetLE32(buf));
+  EXPECT_EQ(UINT64_C(0x0123456789abcdef), GetBE64(buf));
+  EXPECT_EQ(UINT64_C(0xefcdab8967452301), GetLE64(buf));
+}
+
+}  // namespace rtc
+
diff --git a/rtc_base/callback.h b/rtc_base/callback.h
new file mode 100644
index 0000000..0e035ad
--- /dev/null
+++ b/rtc_base/callback.h
@@ -0,0 +1,261 @@
+// This file was GENERATED by command:
+//     pump.py callback.h.pump
+// DO NOT EDIT BY HAND!!!
+
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// To generate callback.h from callback.h.pump, execute:
+// ../third_party/googletest/src/googletest/scripts/pump.py callback.h.pump
+
+// Callbacks are callable object containers. They can hold a function pointer
+// or a function object and behave like a value type. Internally, data is
+// reference-counted, making copies and pass-by-value inexpensive.
+//
+// Callbacks are typed using template arguments.  The format is:
+//   CallbackN<ReturnType, ParamType1, ..., ParamTypeN>
+// where N is the number of arguments supplied to the callable object.
+// Callbacks are invoked using operator(), just like a function or a function
+// object. Default-constructed callbacks are "empty," and executing an empty
+// callback does nothing. A callback can be made empty by assigning it from
+// a default-constructed callback.
+//
+// Callbacks are similar in purpose to std::function (which isn't available on
+// all platforms we support) and a lightweight alternative to sigslots. Since
+// they effectively hide the type of the object they call, they're useful in
+// breaking dependencies between objects that need to interact with one another.
+// Notably, they can hold the results of Bind(), std::bind*, etc, without
+// needing
+// to know the resulting object type of those calls.
+//
+// Sigslots, on the other hand, provide a fuller feature set, such as multiple
+// subscriptions to a signal, optional thread-safety, and lifetime tracking of
+// slots. When these features are needed, choose sigslots.
+//
+// Example:
+//   int sqr(int x) { return x * x; }
+//   struct AddK {
+//     int k;
+//     int operator()(int x) const { return x + k; }
+//   } add_k = {5};
+//
+//   Callback1<int, int> my_callback;
+//   cout << my_callback.empty() << endl;  // true
+//
+//   my_callback = Callback1<int, int>(&sqr);
+//   cout << my_callback.empty() << endl;  // false
+//   cout << my_callback(3) << endl;  // 9
+//
+//   my_callback = Callback1<int, int>(add_k);
+//   cout << my_callback(10) << endl;  // 15
+//
+//   my_callback = Callback1<int, int>();
+//   cout << my_callback.empty() << endl;  // true
+
+#ifndef RTC_BASE_CALLBACK_H_
+#define RTC_BASE_CALLBACK_H_
+
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace rtc {
+
+template <class R>
+class Callback0 {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback0() {}
+  template <class T> Callback0(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()() {
+    if (empty())
+      return R();
+    return helper_->Run();
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run() = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run() {
+      return functor_();
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+
+template <class R,
+          class P1>
+class Callback1 {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback1() {}
+  template <class T> Callback1(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()(P1 p1) {
+    if (empty())
+      return R();
+    return helper_->Run(p1);
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run(P1 p1) = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run(P1 p1) {
+      return functor_(p1);
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+
+template <class R,
+          class P1,
+          class P2>
+class Callback2 {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback2() {}
+  template <class T> Callback2(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()(P1 p1, P2 p2) {
+    if (empty())
+      return R();
+    return helper_->Run(p1, p2);
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run(P1 p1, P2 p2) = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run(P1 p1, P2 p2) {
+      return functor_(p1, p2);
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+
+template <class R,
+          class P1,
+          class P2,
+          class P3>
+class Callback3 {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback3() {}
+  template <class T> Callback3(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()(P1 p1, P2 p2, P3 p3) {
+    if (empty())
+      return R();
+    return helper_->Run(p1, p2, p3);
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run(P1 p1, P2 p2, P3 p3) = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run(P1 p1, P2 p2, P3 p3) {
+      return functor_(p1, p2, p3);
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+
+template <class R,
+          class P1,
+          class P2,
+          class P3,
+          class P4>
+class Callback4 {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback4() {}
+  template <class T> Callback4(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()(P1 p1, P2 p2, P3 p3, P4 p4) {
+    if (empty())
+      return R();
+    return helper_->Run(p1, p2, p3, p4);
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4) = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4) {
+      return functor_(p1, p2, p3, p4);
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+
+template <class R,
+          class P1,
+          class P2,
+          class P3,
+          class P4,
+          class P5>
+class Callback5 {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback5() {}
+  template <class T> Callback5(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+    if (empty())
+      return R();
+    return helper_->Run(p1, p2, p3, p4, p5);
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+      return functor_(p1, p2, p3, p4, p5);
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_CALLBACK_H_
diff --git a/rtc_base/callback.h.pump b/rtc_base/callback.h.pump
new file mode 100644
index 0000000..2c40eab
--- /dev/null
+++ b/rtc_base/callback.h.pump
@@ -0,0 +1,104 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// To generate callback.h from callback.h.pump, execute:
+// ../third_party/googletest/src/googletest/scripts/pump.py callback.h.pump
+
+// Callbacks are callable object containers. They can hold a function pointer
+// or a function object and behave like a value type. Internally, data is
+// reference-counted, making copies and pass-by-value inexpensive.
+//
+// Callbacks are typed using template arguments.  The format is:
+//   CallbackN<ReturnType, ParamType1, ..., ParamTypeN>
+// where N is the number of arguments supplied to the callable object.
+// Callbacks are invoked using operator(), just like a function or a function
+// object. Default-constructed callbacks are "empty," and executing an empty
+// callback does nothing. A callback can be made empty by assigning it from
+// a default-constructed callback.
+//
+// Callbacks are similar in purpose to std::function (which isn't available on
+// all platforms we support) and a lightweight alternative to sigslots. Since
+// they effectively hide the type of the object they call, they're useful in
+// breaking dependencies between objects that need to interact with one another.
+// Notably, they can hold the results of Bind(), std::bind*, etc, without needing
+// to know the resulting object type of those calls.
+//
+// Sigslots, on the other hand, provide a fuller feature set, such as multiple
+// subscriptions to a signal, optional thread-safety, and lifetime tracking of
+// slots. When these features are needed, choose sigslots.
+//
+// Example:
+//   int sqr(int x) { return x * x; }
+//   struct AddK {
+//     int k;
+//     int operator()(int x) const { return x + k; }
+//   } add_k = {5};
+//
+//   Callback1<int, int> my_callback;
+//   cout << my_callback.empty() << endl;  // true
+//
+//   my_callback = Callback1<int, int>(&sqr);
+//   cout << my_callback.empty() << endl;  // false
+//   cout << my_callback(3) << endl;  // 9
+//
+//   my_callback = Callback1<int, int>(add_k);
+//   cout << my_callback(10) << endl;  // 15
+//
+//   my_callback = Callback1<int, int>();
+//   cout << my_callback.empty() << endl;  // true
+
+#ifndef RTC_BASE_CALLBACK_H_
+#define RTC_BASE_CALLBACK_H_
+
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace rtc {
+
+$var n = 5
+$range i 0..n
+$for i [[
+$range j 1..i
+
+template <class R$for j [[,
+          class P$j]]>
+class Callback$i {
+ public:
+  // Default copy operations are appropriate for this class.
+  Callback$i() {}
+  template <class T> Callback$i(const T& functor)
+      : helper_(new RefCountedObject< HelperImpl<T> >(functor)) {}
+  R operator()($for j , [[P$j p$j]]) {
+    if (empty())
+      return R();
+    return helper_->Run($for j , [[p$j]]);
+  }
+  bool empty() const { return !helper_; }
+
+ private:
+  struct Helper : RefCountInterface {
+    virtual ~Helper() {}
+    virtual R Run($for j , [[P$j p$j]]) = 0;
+  };
+  template <class T> struct HelperImpl : Helper {
+    explicit HelperImpl(const T& functor) : functor_(functor) {}
+    virtual R Run($for j , [[P$j p$j]]) {
+      return functor_($for j , [[p$j]]);
+    }
+    T functor_;
+  };
+  scoped_refptr<Helper> helper_;
+};
+
+]]
+}  // namespace rtc
+
+#endif  // RTC_BASE_CALLBACK_H_
diff --git a/rtc_base/callback_unittest.cc b/rtc_base/callback_unittest.cc
new file mode 100644
index 0000000..26bfd5d
--- /dev/null
+++ b/rtc_base/callback_unittest.cc
@@ -0,0 +1,140 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/callback.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/keep_ref_until_done.h"
+#include "rtc_base/refcount.h"
+
+namespace rtc {
+
+namespace {
+
+void f() {}
+int g() { return 42; }
+int h(int x) { return x * x; }
+void i(int& x) { x *= x; }  // NOLINT: Testing refs
+
+struct BindTester {
+  int a() { return 24; }
+  int b(int x) const { return x * x; }
+};
+
+class RefCountedBindTester : public RefCountInterface {
+ public:
+  RefCountedBindTester() : count_(0) {}
+  void AddRef() const override { ++count_; }
+  RefCountReleaseStatus Release() const override {
+    --count_;
+    return count_ == 0 ? RefCountReleaseStatus::kDroppedLastRef
+                       : RefCountReleaseStatus::kOtherRefsRemained;
+  }
+  int RefCount() const { return count_; }
+
+ private:
+  mutable int count_;
+};
+
+}  // namespace
+
+TEST(CallbackTest, VoidReturn) {
+  Callback0<void> cb;
+  EXPECT_TRUE(cb.empty());
+  cb();  // Executing an empty callback should not crash.
+  cb = Callback0<void>(&f);
+  EXPECT_FALSE(cb.empty());
+  cb();
+}
+
+TEST(CallbackTest, IntReturn) {
+  Callback0<int> cb;
+  EXPECT_TRUE(cb.empty());
+  cb = Callback0<int>(&g);
+  EXPECT_FALSE(cb.empty());
+  EXPECT_EQ(42, cb());
+  EXPECT_EQ(42, cb());
+}
+
+TEST(CallbackTest, OneParam) {
+  Callback1<int, int> cb1(&h);
+  EXPECT_FALSE(cb1.empty());
+  EXPECT_EQ(9, cb1(-3));
+  EXPECT_EQ(100, cb1(10));
+
+  // Try clearing a callback.
+  cb1 = Callback1<int, int>();
+  EXPECT_TRUE(cb1.empty());
+
+  // Try a callback with a ref parameter.
+  Callback1<void, int&> cb2(&i);
+  int x = 3;
+  cb2(x);
+  EXPECT_EQ(9, x);
+  cb2(x);
+  EXPECT_EQ(81, x);
+}
+
+TEST(CallbackTest, WithBind) {
+  BindTester t;
+  Callback0<int> cb1 = Bind(&BindTester::a, &t);
+  EXPECT_EQ(24, cb1());
+  EXPECT_EQ(24, cb1());
+  cb1 = Bind(&BindTester::b, &t, 10);
+  EXPECT_EQ(100, cb1());
+  EXPECT_EQ(100, cb1());
+  cb1 = Bind(&BindTester::b, &t, 5);
+  EXPECT_EQ(25, cb1());
+  EXPECT_EQ(25, cb1());
+}
+
+TEST(KeepRefUntilDoneTest, simple) {
+  RefCountedBindTester t;
+  EXPECT_EQ(0, t.RefCount());
+  {
+    Callback0<void> cb = KeepRefUntilDone(&t);
+    EXPECT_EQ(1, t.RefCount());
+    cb();
+    EXPECT_EQ(1, t.RefCount());
+    cb();
+    EXPECT_EQ(1, t.RefCount());
+  }
+  EXPECT_EQ(0, t.RefCount());
+}
+
+TEST(KeepRefUntilDoneTest, copy) {
+  RefCountedBindTester t;
+  EXPECT_EQ(0, t.RefCount());
+  Callback0<void> cb2;
+  {
+    Callback0<void> cb = KeepRefUntilDone(&t);
+    EXPECT_EQ(1, t.RefCount());
+    cb2 = cb;
+  }
+  EXPECT_EQ(1, t.RefCount());
+  cb2 = Callback0<void>();
+  EXPECT_EQ(0, t.RefCount());
+}
+
+TEST(KeepRefUntilDoneTest, scopedref) {
+  RefCountedBindTester t;
+  EXPECT_EQ(0, t.RefCount());
+  {
+    scoped_refptr<RefCountedBindTester> t_scoped_ref(&t);
+    Callback0<void> cb = KeepRefUntilDone(t_scoped_ref);
+    t_scoped_ref = nullptr;
+    EXPECT_EQ(1, t.RefCount());
+    cb();
+    EXPECT_EQ(1, t.RefCount());
+  }
+  EXPECT_EQ(0, t.RefCount());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/checks.cc b/rtc_base/checks.cc
new file mode 100644
index 0000000..820ca96
--- /dev/null
+++ b/rtc_base/checks.cc
@@ -0,0 +1,117 @@
+/*
+ *  Copyright 2006 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Most of this was borrowed (with minor modifications) from V8's and Chromium's
+// src/base/logging.cc.
+
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+
+#if defined(WEBRTC_ANDROID)
+#define RTC_LOG_TAG_ANDROID "rtc"
+#include <android/log.h>  // NOLINT
+#endif
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#endif
+
+#if defined(WEBRTC_WIN)
+#define LAST_SYSTEM_ERROR (::GetLastError())
+#elif defined(__native_client__) && __native_client__
+#define LAST_SYSTEM_ERROR (0)
+#elif defined(WEBRTC_POSIX)
+#include <errno.h>
+#define LAST_SYSTEM_ERROR (errno)
+#endif  // WEBRTC_WIN
+
+#include "rtc_base/checks.h"
+
+#if defined(_MSC_VER)
+// Warning C4722: destructor never returns, potential memory leak.
+// FatalMessage's dtor very intentionally aborts.
+#pragma warning(disable:4722)
+#endif
+
+namespace rtc {
+namespace {
+
+void VPrintError(const char* format, va_list args) {
+#if defined(WEBRTC_ANDROID)
+  __android_log_vprint(ANDROID_LOG_ERROR, RTC_LOG_TAG_ANDROID, format, args);
+#else
+  vfprintf(stderr, format, args);
+#endif
+}
+
+#if defined(__GNUC__)
+void PrintError(const char* format, ...)
+    __attribute__((__format__(__printf__, 1, 2)));
+#endif
+
+void PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+}  // namespace
+
+FatalMessage::FatalMessage(const char* file, int line) {
+  Init(file, line);
+}
+
+FatalMessage::FatalMessage(const char* file, int line, std::string* result) {
+  Init(file, line);
+  stream_ << "Check failed: " << *result << std::endl << "# ";
+  delete result;
+}
+
+NO_RETURN FatalMessage::~FatalMessage() {
+  fflush(stdout);
+  fflush(stderr);
+  stream_ << std::endl << "#" << std::endl;
+  PrintError("%s", stream_.str().c_str());
+  fflush(stderr);
+  abort();
+}
+
+void FatalMessage::Init(const char* file, int line) {
+  stream_ << std::endl
+          << std::endl
+          << "#" << std::endl
+          << "# Fatal error in " << file << ", line " << line << std::endl
+          << "# last system error: " << LAST_SYSTEM_ERROR << std::endl
+          << "# ";
+}
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+// Explicit instantiations for commonly used comparisons.
+template std::string* MakeCheckOpString<int, int>(
+    const int&, const int&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned long>(
+    const unsigned long&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned int>(
+    const unsigned long&, const unsigned int&, const char* names);
+template std::string* MakeCheckOpString<unsigned int, unsigned long>(
+    const unsigned int&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<std::string, std::string>(
+    const std::string&, const std::string&, const char* name);
+#endif
+
+}  // namespace rtc
+
+// Function to call from the C version of the RTC_CHECK and RTC_DCHECK macros.
+NO_RETURN void rtc_FatalMessage(const char* file, int line, const char* msg) {
+  rtc::FatalMessage(file, line).stream() << msg;
+}
diff --git a/rtc_base/checks.h b/rtc_base/checks.h
new file mode 100644
index 0000000..e9a19e6
--- /dev/null
+++ b/rtc_base/checks.h
@@ -0,0 +1,290 @@
+/*
+ *  Copyright 2006 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_CHECKS_H_
+#define RTC_BASE_CHECKS_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// If you for some reson need to know if DCHECKs are on, test the value of
+// RTC_DCHECK_IS_ON. (Test its value, not if it's defined; it'll always be
+// defined, to either a true or a false value.)
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define RTC_DCHECK_IS_ON 1
+#else
+#define RTC_DCHECK_IS_ON 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+NO_RETURN void rtc_FatalMessage(const char* file, int line, const char* msg);
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#ifdef __cplusplus
+// C++ version.
+
+#include <sstream>
+#include <string>
+
+#include "rtc_base/numerics/safe_compare.h"
+
+// The macros here print a message to stderr and abort under various
+// conditions. All will accept additional stream messages. For example:
+// RTC_DCHECK_EQ(foo, bar) << "I'm printed when foo != bar.";
+//
+// - RTC_CHECK(x) is an assertion that x is always true, and that if it isn't,
+//   it's better to terminate the process than to continue. During development,
+//   the reason that it's better to terminate might simply be that the error
+//   handling code isn't in place yet; in production, the reason might be that
+//   the author of the code truly believes that x will always be true, but that
+//   she recognizes that if she is wrong, abrupt and unpleasant process
+//   termination is still better than carrying on with the assumption violated.
+//
+//   RTC_CHECK always evaluates its argument, so it's OK for x to have side
+//   effects.
+//
+// - RTC_DCHECK(x) is the same as RTC_CHECK(x)---an assertion that x is always
+//   true---except that x will only be evaluated in debug builds; in production
+//   builds, x is simply assumed to be true. This is useful if evaluating x is
+//   expensive and the expected cost of failing to detect the violated
+//   assumption is acceptable. You should not handle cases where a production
+//   build fails to spot a violated condition, even those that would result in
+//   crashes. If the code needs to cope with the error, make it cope, but don't
+//   call RTC_DCHECK; if the condition really can't occur, but you'd sleep
+//   better at night knowing that the process will suicide instead of carrying
+//   on in case you were wrong, use RTC_CHECK instead of RTC_DCHECK.
+//
+//   RTC_DCHECK only evaluates its argument in debug builds, so if x has visible
+//   side effects, you need to write e.g.
+//     bool w = x; RTC_DCHECK(w);
+//
+// - RTC_CHECK_EQ, _NE, _GT, ..., and RTC_DCHECK_EQ, _NE, _GT, ... are
+//   specialized variants of RTC_CHECK and RTC_DCHECK that print prettier
+//   messages if the condition doesn't hold. Prefer them to raw RTC_CHECK and
+//   RTC_DCHECK.
+//
+// - FATAL() aborts unconditionally.
+//
+// TODO(ajm): Ideally, checks.h would be combined with logging.h, but
+// consolidation with system_wrappers/logging.h should happen first.
+
+namespace rtc {
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold.
+#define RTC_LAZY_STREAM(stream, condition)                                    \
+  !(condition) ? static_cast<void>(0) : rtc::FatalMessageVoidify() & (stream)
+
+// The actual stream used isn't important. We reference |ignored| in the code
+// but don't evaluate it; this is to avoid "unused variable" warnings (we do so
+// in a particularly convoluted way with an extra ?: because that appears to be
+// the simplest construct that keeps Visual Studio from complaining about
+// condition being unused).
+#define RTC_EAT_STREAM_PARAMETERS(ignored) \
+  (true ? true : ((void)(ignored), true))  \
+      ? static_cast<void>(0)               \
+      : rtc::FatalMessageVoidify() & rtc::FatalMessage("", 0).stream()
+
+// Call RTC_EAT_STREAM_PARAMETERS with an argument that fails to compile if
+// values of the same types as |a| and |b| can't be compared with the given
+// operation, and that would evaluate |a| and |b| if evaluated.
+#define RTC_EAT_STREAM_PARAMETERS_OP(op, a, b) \
+  RTC_EAT_STREAM_PARAMETERS(((void)rtc::Safe##op(a, b)))
+
+// RTC_CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG or anything else, so the check will be executed
+// regardless of compilation mode.
+//
+// We make sure RTC_CHECK et al. always evaluates their arguments, as
+// doing RTC_CHECK(FunctionWithSideEffect()) is a common idiom.
+#define RTC_CHECK(condition)                                      \
+  RTC_LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), \
+                  !(condition))                                   \
+      << "Check failed: " #condition << std::endl << "# "
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use RTC_CHECK_EQ et al below.
+//
+// TODO(akalin): Rewrite this so that constructs like if (...)
+// RTC_CHECK_EQ(...) else { ... } work properly.
+#define RTC_CHECK_OP(name, op, val1, val2)                                 \
+  if (std::string* _result =                                               \
+          rtc::Check##name##Impl((val1), (val2), #val1 " " #op " " #val2)) \
+    rtc::FatalMessage(__FILE__, __LINE__, _result).stream()
+
+// Build the error message string.  This is separate from the "Impl"
+// function template because it is not performance critical and so can
+// be out of line, while the "Impl" code should be inline.  Caller
+// takes ownership of the returned string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+  std::ostringstream ss;
+  ss << names << " (" << v1 << " vs. " << v2 << ")";
+  std::string* msg = new std::string(ss.str());
+  return msg;
+}
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
+// in logging.cc.
+extern template std::string* MakeCheckOpString<int, int>(
+    const int&, const int&, const char* names);
+extern template
+std::string* MakeCheckOpString<unsigned long, unsigned long>(
+    const unsigned long&, const unsigned long&, const char* names);
+extern template
+std::string* MakeCheckOpString<unsigned long, unsigned int>(
+    const unsigned long&, const unsigned int&, const char* names);
+extern template
+std::string* MakeCheckOpString<unsigned int, unsigned long>(
+    const unsigned int&, const unsigned long&, const char* names);
+extern template
+std::string* MakeCheckOpString<std::string, std::string>(
+    const std::string&, const std::string&, const char* name);
+#endif
+
+// Helper functions for RTC_CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define DEFINE_RTC_CHECK_OP_IMPL(name)                                       \
+  template <class t1, class t2>                                              \
+  inline std::string* Check##name##Impl(const t1& v1, const t2& v2,          \
+                                        const char* names) {                 \
+    if (rtc::Safe##name(v1, v2))                                             \
+      return nullptr;                                                        \
+    else                                                                     \
+      return rtc::MakeCheckOpString(v1, v2, names);                          \
+  }                                                                          \
+  inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
+    if (rtc::Safe##name(v1, v2))                                             \
+      return nullptr;                                                        \
+    else                                                                     \
+      return rtc::MakeCheckOpString(v1, v2, names);                          \
+  }
+DEFINE_RTC_CHECK_OP_IMPL(Eq)
+DEFINE_RTC_CHECK_OP_IMPL(Ne)
+DEFINE_RTC_CHECK_OP_IMPL(Le)
+DEFINE_RTC_CHECK_OP_IMPL(Lt)
+DEFINE_RTC_CHECK_OP_IMPL(Ge)
+DEFINE_RTC_CHECK_OP_IMPL(Gt)
+#undef DEFINE_RTC_CHECK_OP_IMPL
+
+#define RTC_CHECK_EQ(val1, val2) RTC_CHECK_OP(Eq, ==, val1, val2)
+#define RTC_CHECK_NE(val1, val2) RTC_CHECK_OP(Ne, !=, val1, val2)
+#define RTC_CHECK_LE(val1, val2) RTC_CHECK_OP(Le, <=, val1, val2)
+#define RTC_CHECK_LT(val1, val2) RTC_CHECK_OP(Lt, <, val1, val2)
+#define RTC_CHECK_GE(val1, val2) RTC_CHECK_OP(Ge, >=, val1, val2)
+#define RTC_CHECK_GT(val1, val2) RTC_CHECK_OP(Gt, >, val1, val2)
+
+// The RTC_DCHECK macro is equivalent to RTC_CHECK except that it only generates
+// code in debug builds. It does reference the condition parameter in all cases,
+// though, so callers won't risk getting warnings about unused variables.
+#if RTC_DCHECK_IS_ON
+#define RTC_DCHECK(condition) RTC_CHECK(condition)
+#define RTC_DCHECK_EQ(v1, v2) RTC_CHECK_EQ(v1, v2)
+#define RTC_DCHECK_NE(v1, v2) RTC_CHECK_NE(v1, v2)
+#define RTC_DCHECK_LE(v1, v2) RTC_CHECK_LE(v1, v2)
+#define RTC_DCHECK_LT(v1, v2) RTC_CHECK_LT(v1, v2)
+#define RTC_DCHECK_GE(v1, v2) RTC_CHECK_GE(v1, v2)
+#define RTC_DCHECK_GT(v1, v2) RTC_CHECK_GT(v1, v2)
+#else
+#define RTC_DCHECK(condition) RTC_EAT_STREAM_PARAMETERS(condition)
+#define RTC_DCHECK_EQ(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Eq, v1, v2)
+#define RTC_DCHECK_NE(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Ne, v1, v2)
+#define RTC_DCHECK_LE(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Le, v1, v2)
+#define RTC_DCHECK_LT(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Lt, v1, v2)
+#define RTC_DCHECK_GE(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Ge, v1, v2)
+#define RTC_DCHECK_GT(v1, v2) RTC_EAT_STREAM_PARAMETERS_OP(Gt, v1, v2)
+#endif
+
+// This is identical to LogMessageVoidify but in name.
+class FatalMessageVoidify {
+ public:
+  FatalMessageVoidify() { }
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(std::ostream&) { }
+};
+
+#define RTC_UNREACHABLE_CODE_HIT false
+#define RTC_NOTREACHED() RTC_DCHECK(RTC_UNREACHABLE_CODE_HIT)
+
+// TODO(bugs.webrtc.org/8454): Add an RTC_ prefix or rename differently.
+#define FATAL() rtc::FatalMessage(__FILE__, __LINE__).stream()
+// TODO(ajm): Consider adding RTC_NOTIMPLEMENTED macro when
+// base/logging.h and system_wrappers/logging.h are consolidated such that we
+// can match the Chromium behavior.
+
+// Like a stripped-down LogMessage from logging.h, except that it aborts.
+class FatalMessage {
+ public:
+  FatalMessage(const char* file, int line);
+  // Used for RTC_CHECK_EQ(), etc. Takes ownership of the given string.
+  FatalMessage(const char* file, int line, std::string* result);
+  NO_RETURN ~FatalMessage();
+
+  std::ostream& stream() { return stream_; }
+
+ private:
+  void Init(const char* file, int line);
+
+  std::ostringstream stream_;
+};
+
+// Performs the integer division a/b and returns the result. CHECKs that the
+// remainder is zero.
+template <typename T>
+inline T CheckedDivExact(T a, T b) {
+  RTC_CHECK_EQ(a % b, 0) << a << " is not evenly divisible by " << b;
+  return a / b;
+}
+
+}  // namespace rtc
+
+#else  // __cplusplus not defined
+// C version. Lacks many features compared to the C++ version, but usage
+// guidelines are the same.
+
+#define RTC_CHECK(condition)                                             \
+  do {                                                                   \
+    if (!(condition)) {                                                  \
+      rtc_FatalMessage(__FILE__, __LINE__, "CHECK failed: " #condition); \
+    }                                                                    \
+  } while (0)
+
+#define RTC_CHECK_EQ(a, b) RTC_CHECK((a) == (b))
+#define RTC_CHECK_NE(a, b) RTC_CHECK((a) != (b))
+#define RTC_CHECK_LE(a, b) RTC_CHECK((a) <= (b))
+#define RTC_CHECK_LT(a, b) RTC_CHECK((a) < (b))
+#define RTC_CHECK_GE(a, b) RTC_CHECK((a) >= (b))
+#define RTC_CHECK_GT(a, b) RTC_CHECK((a) > (b))
+
+#define RTC_DCHECK(condition)                                             \
+  do {                                                                    \
+    if (RTC_DCHECK_IS_ON && !(condition)) {                               \
+      rtc_FatalMessage(__FILE__, __LINE__, "DCHECK failed: " #condition); \
+    }                                                                     \
+  } while (0)
+
+#define RTC_DCHECK_EQ(a, b) RTC_DCHECK((a) == (b))
+#define RTC_DCHECK_NE(a, b) RTC_DCHECK((a) != (b))
+#define RTC_DCHECK_LE(a, b) RTC_DCHECK((a) <= (b))
+#define RTC_DCHECK_LT(a, b) RTC_DCHECK((a) < (b))
+#define RTC_DCHECK_GE(a, b) RTC_DCHECK((a) >= (b))
+#define RTC_DCHECK_GT(a, b) RTC_DCHECK((a) > (b))
+
+#endif  // __cplusplus
+
+#endif  // RTC_BASE_CHECKS_H_
diff --git a/rtc_base/compile_assert_c.h b/rtc_base/compile_assert_c.h
new file mode 100644
index 0000000..c83d314
--- /dev/null
+++ b/rtc_base/compile_assert_c.h
@@ -0,0 +1,21 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_COMPILE_ASSERT_C_H_
+#define RTC_BASE_COMPILE_ASSERT_C_H_
+
+// Use this macro to verify at compile time that certain restrictions are met.
+// The argument is the boolean expression to evaluate.
+// Example:
+//   RTC_COMPILE_ASSERT(sizeof(foo) < 128);
+// Note: In C++, use static_assert instead!
+#define RTC_COMPILE_ASSERT(expression) switch (0) {case 0: case expression:;}
+
+#endif  // RTC_BASE_COMPILE_ASSERT_C_H_
diff --git a/rtc_base/constructormagic.h b/rtc_base/constructormagic.h
new file mode 100644
index 0000000..8a953aa
--- /dev/null
+++ b/rtc_base/constructormagic.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_CONSTRUCTORMAGIC_H_
+#define RTC_BASE_CONSTRUCTORMAGIC_H_
+
+// Put this in the declarations for a class to be unassignable.
+#define RTC_DISALLOW_ASSIGN(TypeName) \
+  void operator=(const TypeName&) = delete
+
+// A macro to disallow the copy constructor and operator= functions. This should
+// be used in the declarations for a class.
+#define RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+  TypeName(const TypeName&) = delete;          \
+  RTC_DISALLOW_ASSIGN(TypeName)
+
+// A macro to disallow all the implicit constructors, namely the default
+// constructor, copy constructor and operator= functions.
+//
+// This should be used in the declarations for a class that wants to prevent
+// anyone from instantiating it. This is especially useful for classes
+// containing only static methods.
+#define RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+  TypeName() = delete;                               \
+  RTC_DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+#endif  // RTC_BASE_CONSTRUCTORMAGIC_H_
diff --git a/rtc_base/copyonwritebuffer.cc b/rtc_base/copyonwritebuffer.cc
new file mode 100644
index 0000000..579dd46
--- /dev/null
+++ b/rtc_base/copyonwritebuffer.cc
@@ -0,0 +1,112 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/copyonwritebuffer.h"
+
+namespace rtc {
+
+CopyOnWriteBuffer::CopyOnWriteBuffer() {
+  RTC_DCHECK(IsConsistent());
+}
+
+CopyOnWriteBuffer::CopyOnWriteBuffer(const CopyOnWriteBuffer& buf)
+    : buffer_(buf.buffer_) {
+}
+
+CopyOnWriteBuffer::CopyOnWriteBuffer(CopyOnWriteBuffer&& buf)
+    : buffer_(std::move(buf.buffer_)) {
+}
+
+CopyOnWriteBuffer::CopyOnWriteBuffer(size_t size)
+    : buffer_(size > 0 ? new RefCountedObject<Buffer>(size) : nullptr) {
+  RTC_DCHECK(IsConsistent());
+}
+
+CopyOnWriteBuffer::CopyOnWriteBuffer(size_t size, size_t capacity)
+    : buffer_(size > 0 || capacity > 0
+          ? new RefCountedObject<Buffer>(size, capacity)
+          : nullptr) {
+  RTC_DCHECK(IsConsistent());
+}
+
+CopyOnWriteBuffer::~CopyOnWriteBuffer() = default;
+
+bool CopyOnWriteBuffer::operator==(const CopyOnWriteBuffer& buf) const {
+  // Must either use the same buffer internally or have the same contents.
+  RTC_DCHECK(IsConsistent());
+  RTC_DCHECK(buf.IsConsistent());
+  return buffer_.get() == buf.buffer_.get() ||
+      (buffer_.get() && buf.buffer_.get() &&
+      *buffer_.get() == *buf.buffer_.get());
+}
+
+void CopyOnWriteBuffer::SetSize(size_t size) {
+  RTC_DCHECK(IsConsistent());
+  if (!buffer_) {
+    if (size > 0) {
+      buffer_ = new RefCountedObject<Buffer>(size);
+    }
+    RTC_DCHECK(IsConsistent());
+    return;
+  }
+
+  // Clone data if referenced.
+  if (!buffer_->HasOneRef()) {
+    buffer_ = new RefCountedObject<Buffer>(
+        buffer_->data(),
+        std::min(buffer_->size(), size),
+        std::max(buffer_->capacity(), size));
+  }
+  buffer_->SetSize(size);
+  RTC_DCHECK(IsConsistent());
+}
+
+void CopyOnWriteBuffer::EnsureCapacity(size_t capacity) {
+  RTC_DCHECK(IsConsistent());
+  if (!buffer_) {
+    if (capacity > 0) {
+      buffer_ = new RefCountedObject<Buffer>(0, capacity);
+    }
+    RTC_DCHECK(IsConsistent());
+    return;
+  } else if (capacity <= buffer_->capacity()) {
+    return;
+  }
+
+  CloneDataIfReferenced(std::max(buffer_->capacity(), capacity));
+  buffer_->EnsureCapacity(capacity);
+  RTC_DCHECK(IsConsistent());
+}
+
+void CopyOnWriteBuffer::Clear() {
+  if (!buffer_)
+    return;
+
+  if (buffer_->HasOneRef()) {
+    buffer_->Clear();
+  } else {
+    buffer_ = new RefCountedObject<Buffer>(0, buffer_->capacity());
+  }
+  RTC_DCHECK(IsConsistent());
+}
+
+void CopyOnWriteBuffer::CloneDataIfReferenced(size_t new_capacity) {
+  if (buffer_->HasOneRef()) {
+    return;
+  }
+
+  buffer_ = new RefCountedObject<Buffer>(buffer_->data(), buffer_->size(),
+      new_capacity);
+  RTC_DCHECK(IsConsistent());
+}
+
+
+
+}  // namespace rtc
diff --git a/rtc_base/copyonwritebuffer.h b/rtc_base/copyonwritebuffer.h
new file mode 100644
index 0000000..c4bba87
--- /dev/null
+++ b/rtc_base/copyonwritebuffer.h
@@ -0,0 +1,242 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_COPYONWRITEBUFFER_H_
+#define RTC_BASE_COPYONWRITEBUFFER_H_
+
+#include <algorithm>
+#include <utility>
+
+#include "rtc_base/buffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace rtc {
+
+class CopyOnWriteBuffer {
+ public:
+  // An empty buffer.
+  CopyOnWriteBuffer();
+  // Copy size and contents of an existing buffer.
+  CopyOnWriteBuffer(const CopyOnWriteBuffer& buf);
+  // Move contents from an existing buffer.
+  CopyOnWriteBuffer(CopyOnWriteBuffer&& buf);
+
+  // Construct a buffer with the specified number of uninitialized bytes.
+  explicit CopyOnWriteBuffer(size_t size);
+  CopyOnWriteBuffer(size_t size, size_t capacity);
+
+  // Construct a buffer and copy the specified number of bytes into it. The
+  // source array may be (const) uint8_t*, int8_t*, or char*.
+  template <typename T,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  CopyOnWriteBuffer(const T* data, size_t size)
+      : CopyOnWriteBuffer(data, size, size) {}
+  template <typename T,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  CopyOnWriteBuffer(const T* data, size_t size, size_t capacity)
+      : CopyOnWriteBuffer(size, capacity) {
+    if (buffer_) {
+      std::memcpy(buffer_->data(), data, size);
+    }
+  }
+
+  // Construct a buffer from the contents of an array.
+  template <typename T,
+            size_t N,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  CopyOnWriteBuffer(const T (&array)[N])  // NOLINT: runtime/explicit
+      : CopyOnWriteBuffer(array, N) {}
+
+  ~CopyOnWriteBuffer();
+
+  // Get a pointer to the data. Just .data() will give you a (const) uint8_t*,
+  // but you may also use .data<int8_t>() and .data<char>().
+  template <typename T = uint8_t,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  const T* data() const {
+    return cdata<T>();
+  }
+
+  // Get writable pointer to the data. This will create a copy of the underlying
+  // data if it is shared with other buffers.
+  template <typename T = uint8_t,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  T* data() {
+    RTC_DCHECK(IsConsistent());
+    if (!buffer_) {
+      return nullptr;
+    }
+    CloneDataIfReferenced(buffer_->capacity());
+    return buffer_->data<T>();
+  }
+
+  // Get const pointer to the data. This will not create a copy of the
+  // underlying data if it is shared with other buffers.
+  template <typename T = uint8_t,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  const T* cdata() const {
+    RTC_DCHECK(IsConsistent());
+    if (!buffer_) {
+      return nullptr;
+    }
+    return buffer_->data<T>();
+  }
+
+  size_t size() const {
+    RTC_DCHECK(IsConsistent());
+    return buffer_ ? buffer_->size() : 0;
+  }
+
+  size_t capacity() const {
+    RTC_DCHECK(IsConsistent());
+    return buffer_ ? buffer_->capacity() : 0;
+  }
+
+  CopyOnWriteBuffer& operator=(const CopyOnWriteBuffer& buf) {
+    RTC_DCHECK(IsConsistent());
+    RTC_DCHECK(buf.IsConsistent());
+    if (&buf != this) {
+      buffer_ = buf.buffer_;
+    }
+    return *this;
+  }
+
+  CopyOnWriteBuffer& operator=(CopyOnWriteBuffer&& buf) {
+    RTC_DCHECK(IsConsistent());
+    RTC_DCHECK(buf.IsConsistent());
+    buffer_ = std::move(buf.buffer_);
+    return *this;
+  }
+
+  bool operator==(const CopyOnWriteBuffer& buf) const;
+
+  bool operator!=(const CopyOnWriteBuffer& buf) const {
+    return !(*this == buf);
+  }
+
+  uint8_t& operator[](size_t index) {
+    RTC_DCHECK_LT(index, size());
+    return data()[index];
+  }
+
+  uint8_t operator[](size_t index) const {
+    RTC_DCHECK_LT(index, size());
+    return cdata()[index];
+  }
+
+  // Replace the contents of the buffer. Accepts the same types as the
+  // constructors.
+  template <typename T,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  void SetData(const T* data, size_t size) {
+    RTC_DCHECK(IsConsistent());
+    if (!buffer_) {
+      buffer_ = size > 0 ? new RefCountedObject<Buffer>(data, size) : nullptr;
+    } else if (!buffer_->HasOneRef()) {
+      buffer_ = new RefCountedObject<Buffer>(data, size, buffer_->capacity());
+    } else {
+      buffer_->SetData(data, size);
+    }
+    RTC_DCHECK(IsConsistent());
+  }
+
+  template <typename T,
+            size_t N,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  void SetData(const T (&array)[N]) {
+    SetData(array, N);
+  }
+
+  void SetData(const CopyOnWriteBuffer& buf) {
+    RTC_DCHECK(IsConsistent());
+    RTC_DCHECK(buf.IsConsistent());
+    if (&buf != this) {
+      buffer_ = buf.buffer_;
+    }
+  }
+
+  // Append data to the buffer. Accepts the same types as the constructors.
+  template <typename T,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  void AppendData(const T* data, size_t size) {
+    RTC_DCHECK(IsConsistent());
+    if (!buffer_) {
+      buffer_ = new RefCountedObject<Buffer>(data, size);
+      RTC_DCHECK(IsConsistent());
+      return;
+    }
+
+    CloneDataIfReferenced(std::max(buffer_->capacity(),
+        buffer_->size() + size));
+    buffer_->AppendData(data, size);
+    RTC_DCHECK(IsConsistent());
+  }
+
+  template <typename T,
+            size_t N,
+            typename std::enable_if<
+                internal::BufferCompat<uint8_t, T>::value>::type* = nullptr>
+  void AppendData(const T (&array)[N]) {
+    AppendData(array, N);
+  }
+
+  void AppendData(const CopyOnWriteBuffer& buf) {
+    AppendData(buf.data(), buf.size());
+  }
+
+  // Sets the size of the buffer. If the new size is smaller than the old, the
+  // buffer contents will be kept but truncated; if the new size is greater,
+  // the existing contents will be kept and the new space will be
+  // uninitialized.
+  void SetSize(size_t size);
+
+  // Ensure that the buffer size can be increased to at least capacity without
+  // further reallocation. (Of course, this operation might need to reallocate
+  // the buffer.)
+  void EnsureCapacity(size_t capacity);
+
+  // Resets the buffer to zero size without altering capacity. Works even if the
+  // buffer has been moved from.
+  void Clear();
+
+  // Swaps two buffers.
+  friend void swap(CopyOnWriteBuffer& a, CopyOnWriteBuffer& b) {
+    std::swap(a.buffer_, b.buffer_);
+  }
+
+ private:
+  // Create a copy of the underlying data if it is referenced from other Buffer
+  // objects.
+  void CloneDataIfReferenced(size_t new_capacity);
+
+  // Pre- and postcondition of all methods.
+  bool IsConsistent() const {
+    return (!buffer_ || buffer_->capacity() > 0);
+  }
+
+  // buffer_ is either null, or points to an rtc::Buffer with capacity > 0.
+  scoped_refptr<RefCountedObject<Buffer>> buffer_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_COPYONWRITEBUFFER_H_
diff --git a/rtc_base/copyonwritebuffer_unittest.cc b/rtc_base/copyonwritebuffer_unittest.cc
new file mode 100644
index 0000000..24a57d4
--- /dev/null
+++ b/rtc_base/copyonwritebuffer_unittest.cc
@@ -0,0 +1,319 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/copyonwritebuffer.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+namespace {
+
+// clang-format off
+const uint8_t kTestData[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+                             0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+// clang-format on
+
+}  // namespace
+
+void EnsureBuffersShareData(const CopyOnWriteBuffer& buf1,
+                            const CopyOnWriteBuffer& buf2) {
+  // Data is shared between buffers.
+  EXPECT_EQ(buf1.size(), buf2.size());
+  EXPECT_EQ(buf1.capacity(), buf2.capacity());
+  const uint8_t* data1 = buf1.data();
+  const uint8_t* data2 = buf2.data();
+  EXPECT_EQ(data1, data2);
+  EXPECT_EQ(buf1, buf2);
+}
+
+void EnsureBuffersDontShareData(const CopyOnWriteBuffer& buf1,
+                                const CopyOnWriteBuffer& buf2) {
+  // Data is not shared between buffers.
+  const uint8_t* data1 = buf1.cdata();
+  const uint8_t* data2 = buf2.cdata();
+  EXPECT_NE(data1, data2);
+}
+
+TEST(CopyOnWriteBufferTest, TestCreateEmptyData) {
+  CopyOnWriteBuffer buf(static_cast<const uint8_t*>(nullptr), 0);
+  EXPECT_EQ(buf.size(), 0u);
+  EXPECT_EQ(buf.capacity(), 0u);
+  EXPECT_EQ(buf.data(), nullptr);
+}
+
+TEST(CopyOnWriteBufferTest, TestMoveConstruct) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  size_t buf1_size = buf1.size();
+  size_t buf1_capacity = buf1.capacity();
+  const uint8_t* buf1_data = buf1.cdata();
+
+  CopyOnWriteBuffer buf2(std::move(buf1));
+  EXPECT_EQ(buf1.size(), 0u);
+  EXPECT_EQ(buf1.capacity(), 0u);
+  EXPECT_EQ(buf1.data(), nullptr);
+  EXPECT_EQ(buf2.size(), buf1_size);
+  EXPECT_EQ(buf2.capacity(), buf1_capacity);
+  EXPECT_EQ(buf2.data(), buf1_data);
+}
+
+TEST(CopyOnWriteBufferTest, TestMoveAssign) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  size_t buf1_size = buf1.size();
+  size_t buf1_capacity = buf1.capacity();
+  const uint8_t* buf1_data = buf1.cdata();
+
+  CopyOnWriteBuffer buf2;
+  buf2 = std::move(buf1);
+  EXPECT_EQ(buf1.size(), 0u);
+  EXPECT_EQ(buf1.capacity(), 0u);
+  EXPECT_EQ(buf1.data(), nullptr);
+  EXPECT_EQ(buf2.size(), buf1_size);
+  EXPECT_EQ(buf2.capacity(), buf1_capacity);
+  EXPECT_EQ(buf2.data(), buf1_data);
+}
+
+TEST(CopyOnWriteBufferTest, TestSwap) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  size_t buf1_size = buf1.size();
+  size_t buf1_capacity = buf1.capacity();
+  const uint8_t* buf1_data = buf1.cdata();
+
+  CopyOnWriteBuffer buf2(kTestData, 6, 20);
+  size_t buf2_size = buf2.size();
+  size_t buf2_capacity = buf2.capacity();
+  const uint8_t* buf2_data = buf2.cdata();
+
+  std::swap(buf1, buf2);
+  EXPECT_EQ(buf1.size(), buf2_size);
+  EXPECT_EQ(buf1.capacity(), buf2_capacity);
+  EXPECT_EQ(buf1.data(), buf2_data);
+  EXPECT_EQ(buf2.size(), buf1_size);
+  EXPECT_EQ(buf2.capacity(), buf1_capacity);
+  EXPECT_EQ(buf2.data(), buf1_data);
+}
+
+TEST(CopyOnWriteBufferTest, TestAppendData) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  EnsureBuffersShareData(buf1, buf2);
+
+  // AppendData copies the underlying buffer.
+  buf2.AppendData("foo");
+  EXPECT_EQ(buf2.size(), buf1.size() + 4);  // "foo" + trailing 0x00
+  EXPECT_EQ(buf2.capacity(), buf1.capacity());
+  EXPECT_NE(buf2.data(), buf1.data());
+
+  EXPECT_EQ(buf1, CopyOnWriteBuffer(kTestData, 3));
+  const int8_t exp[] = {0x0, 0x1, 0x2, 'f', 'o', 'o', 0x0};
+  EXPECT_EQ(buf2, CopyOnWriteBuffer(exp));
+}
+
+TEST(CopyOnWriteBufferTest, SetEmptyData) {
+  CopyOnWriteBuffer buf(10);
+
+  buf.SetData<uint8_t>(nullptr, 0);
+
+  EXPECT_EQ(0u, buf.size());
+}
+
+TEST(CopyOnWriteBufferTest, SetDataNoMoreThanCapacityDoesntCauseReallocation) {
+  CopyOnWriteBuffer buf1(3, 10);
+  const uint8_t* const original_allocation = buf1.cdata();
+
+  buf1.SetData(kTestData, 10);
+
+  EXPECT_EQ(original_allocation, buf1.cdata());
+  EXPECT_EQ(buf1, CopyOnWriteBuffer(kTestData, 10));
+}
+
+TEST(CopyOnWriteBufferTest, SetDataMakeReferenceCopy) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2;
+
+  buf2.SetData(buf1);
+
+  EnsureBuffersShareData(buf1, buf2);
+}
+
+TEST(CopyOnWriteBufferTest, SetDataOnSharedKeepsOriginal) {
+  const uint8_t data[] = "foo";
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  const uint8_t* const original_allocation = buf1.cdata();
+  CopyOnWriteBuffer buf2(buf1);
+
+  buf2.SetData(data);
+
+  EnsureBuffersDontShareData(buf1, buf2);
+  EXPECT_EQ(original_allocation, buf1.cdata());
+  EXPECT_EQ(buf1, CopyOnWriteBuffer(kTestData, 3));
+  EXPECT_EQ(buf2, CopyOnWriteBuffer(data));
+}
+
+TEST(CopyOnWriteBufferTest, SetDataOnSharedKeepsCapacity) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+  EnsureBuffersShareData(buf1, buf2);
+
+  buf2.SetData(kTestData, 2);
+
+  EnsureBuffersDontShareData(buf1, buf2);
+  EXPECT_EQ(2u, buf2.size());
+  EXPECT_EQ(10u, buf2.capacity());
+}
+
+TEST(CopyOnWriteBufferTest, TestEnsureCapacity) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  // Smaller than existing capacity -> no change and still same contents.
+  buf2.EnsureCapacity(8);
+  EnsureBuffersShareData(buf1, buf2);
+  EXPECT_EQ(buf1.size(), 3u);
+  EXPECT_EQ(buf1.capacity(), 10u);
+  EXPECT_EQ(buf2.size(), 3u);
+  EXPECT_EQ(buf2.capacity(), 10u);
+
+  // Lager than existing capacity -> data is cloned.
+  buf2.EnsureCapacity(16);
+  EnsureBuffersDontShareData(buf1, buf2);
+  EXPECT_EQ(buf1.size(), 3u);
+  EXPECT_EQ(buf1.capacity(), 10u);
+  EXPECT_EQ(buf2.size(), 3u);
+  EXPECT_EQ(buf2.capacity(), 16u);
+  // The size and contents are still the same.
+  EXPECT_EQ(buf1, buf2);
+}
+
+TEST(CopyOnWriteBufferTest, SetSizeDoesntChangeOriginal) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  const uint8_t* const original_allocation = buf1.cdata();
+  CopyOnWriteBuffer buf2(buf1);
+
+  buf2.SetSize(16);
+
+  EnsureBuffersDontShareData(buf1, buf2);
+  EXPECT_EQ(original_allocation, buf1.cdata());
+  EXPECT_EQ(3u, buf1.size());
+  EXPECT_EQ(10u, buf1.capacity());
+}
+
+TEST(CopyOnWriteBufferTest, SetSizeCloneContent) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  buf2.SetSize(16);
+
+  EXPECT_EQ(buf2.size(), 16u);
+  EXPECT_EQ(0, memcmp(buf2.data(), kTestData, 3));
+}
+
+TEST(CopyOnWriteBufferTest, SetSizeMayIncreaseCapacity) {
+  CopyOnWriteBuffer buf(kTestData, 3, 10);
+
+  buf.SetSize(16);
+
+  EXPECT_EQ(16u, buf.size());
+  EXPECT_EQ(16u, buf.capacity());
+}
+
+TEST(CopyOnWriteBufferTest, SetSizeDoesntDecreaseCapacity) {
+  CopyOnWriteBuffer buf1(kTestData, 5, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  buf2.SetSize(2);
+
+  EXPECT_EQ(2u, buf2.size());
+  EXPECT_EQ(10u, buf2.capacity());
+}
+
+TEST(CopyOnWriteBufferTest, ClearDoesntChangeOriginal) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  const uint8_t* const original_allocation = buf1.cdata();
+  CopyOnWriteBuffer buf2(buf1);
+
+  buf2.Clear();
+
+  EnsureBuffersDontShareData(buf1, buf2);
+  EXPECT_EQ(3u, buf1.size());
+  EXPECT_EQ(10u, buf1.capacity());
+  EXPECT_EQ(original_allocation, buf1.cdata());
+  EXPECT_EQ(0u, buf2.size());
+}
+
+TEST(CopyOnWriteBufferTest, ClearDoesntChangeCapacity) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  buf2.Clear();
+
+  EXPECT_EQ(0u, buf2.size());
+  EXPECT_EQ(10u, buf2.capacity());
+}
+
+TEST(CopyOnWriteBufferTest, TestConstDataAccessor) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  // .cdata() doesn't clone data.
+  const uint8_t* cdata1 = buf1.cdata();
+  const uint8_t* cdata2 = buf2.cdata();
+  EXPECT_EQ(cdata1, cdata2);
+
+  // Non-const .data() clones data if shared.
+  const uint8_t* data1 = buf1.data();
+  const uint8_t* data2 = buf2.data();
+  EXPECT_NE(data1, data2);
+  // buf1 was cloned above.
+  EXPECT_NE(data1, cdata1);
+  // Therefore buf2 was no longer sharing data and was not cloned.
+  EXPECT_EQ(data2, cdata1);
+}
+
+TEST(CopyOnWriteBufferTest, TestBacketRead) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  EnsureBuffersShareData(buf1, buf2);
+  // Non-const reads clone the data if shared.
+  for (size_t i = 0; i != 3u; ++i) {
+    EXPECT_EQ(buf1[i], kTestData[i]);
+  }
+  EnsureBuffersDontShareData(buf1, buf2);
+}
+
+TEST(CopyOnWriteBufferTest, TestBacketReadConst) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  EnsureBuffersShareData(buf1, buf2);
+  const CopyOnWriteBuffer& cbuf1 = buf1;
+  for (size_t i = 0; i != 3u; ++i) {
+    EXPECT_EQ(cbuf1[i], kTestData[i]);
+  }
+  EnsureBuffersShareData(buf1, buf2);
+}
+
+TEST(CopyOnWriteBufferTest, TestBacketWrite) {
+  CopyOnWriteBuffer buf1(kTestData, 3, 10);
+  CopyOnWriteBuffer buf2(buf1);
+
+  EnsureBuffersShareData(buf1, buf2);
+  for (size_t i = 0; i != 3u; ++i) {
+    buf1[i] = kTestData[i] + 1;
+  }
+  EXPECT_EQ(buf1.size(), 3u);
+  EXPECT_EQ(buf1.capacity(), 10u);
+  EXPECT_EQ(buf2.size(), 3u);
+  EXPECT_EQ(buf2.capacity(), 10u);
+  EXPECT_EQ(0, memcmp(buf2.cdata(), kTestData, 3));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/cpu_time.cc b/rtc_base/cpu_time.cc
new file mode 100644
index 0000000..6c22880
--- /dev/null
+++ b/rtc_base/cpu_time.cc
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/cpu_time.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+
+#if defined(WEBRTC_LINUX)
+#include <time.h>
+#elif defined(WEBRTC_MAC)
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/times.h>
+#include <mach/thread_info.h>
+#include <mach/thread_act.h>
+#include <mach/mach_init.h>
+#include <unistd.h>
+#elif defined(WEBRTC_WIN)
+#include <windows.h>
+#endif
+
+#if defined(WEBRTC_WIN)
+namespace {
+// FILETIME resolution is 100 nanosecs.
+const int64_t kNanosecsPerFiletime = 100;
+}  // namespace
+#endif
+
+namespace rtc {
+
+int64_t GetProcessCpuTimeNanos() {
+#if defined(WEBRTC_LINUX)
+  struct timespec ts;
+  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
+    return ts.tv_sec * kNumNanosecsPerSec + ts.tv_nsec;
+  } else {
+    RTC_LOG_ERR(LS_ERROR) << "clock_gettime() failed.";
+  }
+#elif defined(WEBRTC_MAC)
+  struct rusage rusage;
+  if (getrusage(RUSAGE_SELF, &rusage) == 0) {
+    return rusage.ru_utime.tv_sec * kNumNanosecsPerSec +
+           rusage.ru_utime.tv_usec * kNumNanosecsPerMicrosec;
+  } else {
+    RTC_LOG_ERR(LS_ERROR) << "getrusage() failed.";
+  }
+#elif defined(WEBRTC_WIN)
+  FILETIME createTime;
+  FILETIME exitTime;
+  FILETIME kernelTime;
+  FILETIME userTime;
+  if (GetProcessTimes(GetCurrentProcess(), &createTime, &exitTime, &kernelTime,
+                      &userTime) != 0) {
+    return ((static_cast<uint64_t>(userTime.dwHighDateTime) << 32) +
+            userTime.dwLowDateTime) *
+           kNanosecsPerFiletime;
+  } else {
+    RTC_LOG_ERR(LS_ERROR) << "GetProcessTimes() failed.";
+  }
+#else
+  // Not implemented yet.
+  static_assert(
+      false, "GetProcessCpuTimeNanos() platform support not yet implemented.");
+#endif
+  return -1;
+}
+
+int64_t GetThreadCpuTimeNanos() {
+#if defined(WEBRTC_LINUX)
+  struct timespec ts;
+  if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) {
+    return ts.tv_sec * kNumNanosecsPerSec + ts.tv_nsec;
+  } else {
+    RTC_LOG_ERR(LS_ERROR) << "clock_gettime() failed.";
+  }
+#elif defined(WEBRTC_MAC)
+  thread_basic_info_data_t info;
+  mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+  if (thread_info(mach_thread_self(), THREAD_BASIC_INFO, (thread_info_t)&info,
+                  &count) == KERN_SUCCESS) {
+    return info.user_time.seconds * kNumNanosecsPerSec +
+           info.user_time.microseconds * kNumNanosecsPerMicrosec;
+  } else {
+    RTC_LOG_ERR(LS_ERROR) << "thread_info() failed.";
+  }
+#elif defined(WEBRTC_WIN)
+  FILETIME createTime;
+  FILETIME exitTime;
+  FILETIME kernelTime;
+  FILETIME userTime;
+  if (GetThreadTimes(GetCurrentThread(), &createTime, &exitTime, &kernelTime,
+                     &userTime) != 0) {
+    return ((static_cast<uint64_t>(userTime.dwHighDateTime) << 32) +
+            userTime.dwLowDateTime) *
+           kNanosecsPerFiletime;
+  } else {
+    RTC_LOG_ERR(LS_ERROR) << "GetThreadTimes() failed.";
+  }
+#else
+  // Not implemented yet.
+  static_assert(
+      false, "GetProcessCpuTimeNanos() platform support not yet implemented.");
+#endif
+  return -1;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/cpu_time.h b/rtc_base/cpu_time.h
new file mode 100644
index 0000000..f712f62
--- /dev/null
+++ b/rtc_base/cpu_time.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_CPU_TIME_H_
+#define RTC_BASE_CPU_TIME_H_
+
+#include <stdint.h>
+
+namespace rtc {
+
+// Returns total CPU time of a current process in nanoseconds.
+// Time base is unknown, therefore use only to calculate deltas.
+int64_t GetProcessCpuTimeNanos();
+
+// Returns total CPU time of a current thread in nanoseconds.
+// Time base is unknown, therefore use only to calculate deltas.
+int64_t GetThreadCpuTimeNanos();
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_CPU_TIME_H_
diff --git a/rtc_base/cpu_time_unittest.cc b/rtc_base/cpu_time_unittest.cc
new file mode 100644
index 0000000..ba97378
--- /dev/null
+++ b/rtc_base/cpu_time_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/cpu_time.h"
+#include <algorithm>
+#include <memory>
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/cpu_info.h"
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+// Only run these tests on non-instrumented builds, because timing on
+// instrumented builds is unreliable, causing the test to be flaky.
+#if defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || \
+    defined(ADDRESS_SANITIZER)
+#define MAYBE_TEST(test_name) DISABLED_##test_name
+#else
+#define MAYBE_TEST(test_name) test_name
+#endif
+
+namespace {
+const int kAllowedErrorMillisecs = 30;
+const int kProcessingTimeMillisecs = 300;
+const int kWorkingThreads = 2;
+
+// Consumes approximately kProcessingTimeMillisecs of CPU time in single thread.
+bool WorkingFunction(void* counter_pointer) {
+  int64_t* counter = reinterpret_cast<int64_t*>(counter_pointer);
+  *counter = 0;
+  int64_t stop_cpu_time =
+      rtc::GetThreadCpuTimeNanos() +
+      kProcessingTimeMillisecs * rtc::kNumNanosecsPerMillisec;
+  while (rtc::GetThreadCpuTimeNanos() < stop_cpu_time) {
+    (*counter)++;
+  }
+  return false;
+}
+}  // namespace
+
+namespace rtc {
+
+// A minimal test which can be run on instrumented builds, so that they're at
+// least exercising the code to check for memory leaks/etc.
+TEST(CpuTimeTest, BasicTest) {
+  int64_t process_start_time_nanos = GetProcessCpuTimeNanos();
+  int64_t thread_start_time_nanos = GetThreadCpuTimeNanos();
+  int64_t process_duration_nanos =
+      GetProcessCpuTimeNanos() - process_start_time_nanos;
+  int64_t thread_duration_nanos =
+      GetThreadCpuTimeNanos() - thread_start_time_nanos;
+  EXPECT_GE(process_duration_nanos, 0);
+  EXPECT_GE(thread_duration_nanos, 0);
+}
+
+TEST(CpuTimeTest, MAYBE_TEST(TwoThreads)) {
+  int64_t process_start_time_nanos = GetProcessCpuTimeNanos();
+  int64_t thread_start_time_nanos = GetThreadCpuTimeNanos();
+  int64_t counter1;
+  int64_t counter2;
+  PlatformThread thread1(WorkingFunction, reinterpret_cast<void*>(&counter1),
+                         "Thread1");
+  PlatformThread thread2(WorkingFunction, reinterpret_cast<void*>(&counter2),
+                         "Thread2");
+  thread1.Start();
+  thread2.Start();
+  thread1.Stop();
+  thread2.Stop();
+
+  EXPECT_GE(counter1, 0);
+  EXPECT_GE(counter2, 0);
+  int64_t process_duration_nanos =
+      GetProcessCpuTimeNanos() - process_start_time_nanos;
+  int64_t thread_duration_nanos =
+      GetThreadCpuTimeNanos() - thread_start_time_nanos;
+  // This thread did almost nothing.
+  // Therefore GetThreadCpuTime is not a wall clock.
+  EXPECT_LE(thread_duration_nanos,
+            kAllowedErrorMillisecs * kNumNanosecsPerMillisec);
+  // Total process time is at least twice working threads' CPU time.
+  // Therefore process and thread times are correctly related.
+  EXPECT_GE(
+      process_duration_nanos,
+      kWorkingThreads * (kProcessingTimeMillisecs - kAllowedErrorMillisecs)
+      * kNumNanosecsPerMillisec);
+}
+
+TEST(CpuTimeTest, MAYBE_TEST(Sleeping)) {
+  int64_t process_start_time_nanos = GetProcessCpuTimeNanos();
+  webrtc::SleepMs(kProcessingTimeMillisecs);
+  int64_t process_duration_nanos =
+      GetProcessCpuTimeNanos() - process_start_time_nanos;
+  // Sleeping should not introduce any additional CPU time.
+  // Therefore GetProcessCpuTime is not a wall clock.
+  EXPECT_LE(process_duration_nanos,
+            kWorkingThreads * kAllowedErrorMillisecs * kNumNanosecsPerMillisec);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/crc32.cc b/rtc_base/crc32.cc
new file mode 100644
index 0000000..c214f38
--- /dev/null
+++ b/rtc_base/crc32.cc
@@ -0,0 +1,52 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/crc32.h"
+
+#include "rtc_base/arraysize.h"
+
+namespace rtc {
+
+// This implementation is based on the sample implementation in RFC 1952.
+
+// CRC32 polynomial, in reversed form.
+// See RFC 1952, or http://en.wikipedia.org/wiki/Cyclic_redundancy_check
+static const uint32_t kCrc32Polynomial = 0xEDB88320;
+static uint32_t kCrc32Table[256] = {0};
+
+static void EnsureCrc32TableInited() {
+  if (kCrc32Table[arraysize(kCrc32Table) - 1])
+    return;  // already inited
+  for (uint32_t i = 0; i < arraysize(kCrc32Table); ++i) {
+    uint32_t c = i;
+    for (size_t j = 0; j < 8; ++j) {
+      if (c & 1) {
+        c = kCrc32Polynomial ^ (c >> 1);
+      } else {
+        c >>= 1;
+      }
+    }
+    kCrc32Table[i] = c;
+  }
+}
+
+uint32_t UpdateCrc32(uint32_t start, const void* buf, size_t len) {
+  EnsureCrc32TableInited();
+
+  uint32_t c = start ^ 0xFFFFFFFF;
+  const uint8_t* u = static_cast<const uint8_t*>(buf);
+  for (size_t i = 0; i < len; ++i) {
+    c = kCrc32Table[(c ^ u[i]) & 0xFF] ^ (c >> 8);
+  }
+  return c ^ 0xFFFFFFFF;
+}
+
+}  // namespace rtc
+
diff --git a/rtc_base/crc32.h b/rtc_base/crc32.h
new file mode 100644
index 0000000..a0ce432
--- /dev/null
+++ b/rtc_base/crc32.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_CRC32_H_
+#define RTC_BASE_CRC32_H_
+
+#include <string>
+
+#include "rtc_base/basictypes.h"
+
+namespace rtc {
+
+// Updates a CRC32 checksum with |len| bytes from |buf|. |initial| holds the
+// checksum result from the previous update; for the first call, it should be 0.
+uint32_t UpdateCrc32(uint32_t initial, const void* buf, size_t len);
+
+// Computes a CRC32 checksum using |len| bytes from |buf|.
+inline uint32_t ComputeCrc32(const void* buf, size_t len) {
+  return UpdateCrc32(0, buf, len);
+}
+inline uint32_t ComputeCrc32(const std::string& str) {
+  return ComputeCrc32(str.c_str(), str.size());
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_CRC32_H_
diff --git a/rtc_base/crc32_unittest.cc b/rtc_base/crc32_unittest.cc
new file mode 100644
index 0000000..576b424
--- /dev/null
+++ b/rtc_base/crc32_unittest.cc
@@ -0,0 +1,35 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/crc32.h"
+#include "rtc_base/gunit.h"
+
+#include <string>
+
+namespace rtc {
+
+TEST(Crc32Test, TestBasic) {
+  EXPECT_EQ(0U, ComputeCrc32(""));
+  EXPECT_EQ(0x352441C2U, ComputeCrc32("abc"));
+  EXPECT_EQ(0x171A3F5FU,
+      ComputeCrc32("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"));
+}
+
+TEST(Crc32Test, TestMultipleUpdates) {
+  std::string input =
+      "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+  uint32_t c = 0;
+  for (size_t i = 0; i < input.size(); ++i) {
+    c = UpdateCrc32(c, &input[i], 1);
+  }
+  EXPECT_EQ(0x171A3F5FU, c);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/criticalsection.cc b/rtc_base/criticalsection.cc
new file mode 100644
index 0000000..f9168d8
--- /dev/null
+++ b/rtc_base/criticalsection.cc
@@ -0,0 +1,252 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/criticalsection.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread_types.h"
+
+// TODO(tommi): Split this file up to per-platform implementation files.
+
+namespace rtc {
+
+CriticalSection::CriticalSection() {
+#if defined(WEBRTC_WIN)
+  InitializeCriticalSection(&crit_);
+#elif defined(WEBRTC_POSIX)
+# if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+  lock_queue_ = 0;
+  owning_thread_ = 0;
+  recursion_ = 0;
+  semaphore_ = dispatch_semaphore_create(0);
+# else
+  pthread_mutexattr_t mutex_attribute;
+  pthread_mutexattr_init(&mutex_attribute);
+  pthread_mutexattr_settype(&mutex_attribute, PTHREAD_MUTEX_RECURSIVE);
+  pthread_mutex_init(&mutex_, &mutex_attribute);
+  pthread_mutexattr_destroy(&mutex_attribute);
+# endif
+  CS_DEBUG_CODE(thread_ = 0);
+  CS_DEBUG_CODE(recursion_count_ = 0);
+  RTC_UNUSED(thread_);
+  RTC_UNUSED(recursion_count_);
+#else
+# error Unsupported platform.
+#endif
+}
+
+CriticalSection::~CriticalSection() {
+#if defined(WEBRTC_WIN)
+  DeleteCriticalSection(&crit_);
+#elif defined(WEBRTC_POSIX)
+# if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+  dispatch_release(semaphore_);
+# else
+  pthread_mutex_destroy(&mutex_);
+# endif
+#else
+# error Unsupported platform.
+#endif
+}
+
+void CriticalSection::Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION() {
+#if defined(WEBRTC_WIN)
+  EnterCriticalSection(&crit_);
+#elif defined(WEBRTC_POSIX)
+# if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+  int spin = 3000;
+  PlatformThreadRef self = CurrentThreadRef();
+  bool have_lock = false;
+  do {
+    // Instead of calling TryEnter() in this loop, we do two interlocked
+    // operations, first a read-only one in order to avoid affecting the lock
+    // cache-line while spinning, in case another thread is using the lock.
+    if (!IsThreadRefEqual(owning_thread_, self)) {
+      if (AtomicOps::AcquireLoad(&lock_queue_) == 0) {
+        if (AtomicOps::CompareAndSwap(&lock_queue_, 0, 1) == 0) {
+          have_lock = true;
+          break;
+        }
+      }
+    } else {
+      AtomicOps::Increment(&lock_queue_);
+      have_lock = true;
+      break;
+    }
+
+    sched_yield();
+  } while (--spin);
+
+  if (!have_lock && AtomicOps::Increment(&lock_queue_) > 1) {
+    // Owning thread cannot be the current thread since TryEnter() would
+    // have succeeded.
+    RTC_DCHECK(!IsThreadRefEqual(owning_thread_, self));
+    // Wait for the lock to become available.
+    dispatch_semaphore_wait(semaphore_, DISPATCH_TIME_FOREVER);
+    RTC_DCHECK(owning_thread_ == 0);
+    RTC_DCHECK(!recursion_);
+  }
+
+  owning_thread_ = self;
+  ++recursion_;
+
+# else
+  pthread_mutex_lock(&mutex_);
+# endif
+
+# if CS_DEBUG_CHECKS
+  if (!recursion_count_) {
+    RTC_DCHECK(!thread_);
+    thread_ = CurrentThreadRef();
+  } else {
+    RTC_DCHECK(CurrentThreadIsOwner());
+  }
+  ++recursion_count_;
+# endif
+#else
+# error Unsupported platform.
+#endif
+}
+
+bool CriticalSection::TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+#if defined(WEBRTC_WIN)
+  return TryEnterCriticalSection(&crit_) != FALSE;
+#elif defined(WEBRTC_POSIX)
+# if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+  if (!IsThreadRefEqual(owning_thread_, CurrentThreadRef())) {
+    if (AtomicOps::CompareAndSwap(&lock_queue_, 0, 1) != 0)
+      return false;
+    owning_thread_ = CurrentThreadRef();
+    RTC_DCHECK(!recursion_);
+  } else {
+    AtomicOps::Increment(&lock_queue_);
+  }
+  ++recursion_;
+# else
+  if (pthread_mutex_trylock(&mutex_) != 0)
+    return false;
+# endif
+# if CS_DEBUG_CHECKS
+  if (!recursion_count_) {
+    RTC_DCHECK(!thread_);
+    thread_ = CurrentThreadRef();
+  } else {
+    RTC_DCHECK(CurrentThreadIsOwner());
+  }
+  ++recursion_count_;
+# endif
+  return true;
+#else
+# error Unsupported platform.
+#endif
+}
+
+void CriticalSection::Leave() const RTC_UNLOCK_FUNCTION() {
+  RTC_DCHECK(CurrentThreadIsOwner());
+#if defined(WEBRTC_WIN)
+  LeaveCriticalSection(&crit_);
+#elif defined(WEBRTC_POSIX)
+# if CS_DEBUG_CHECKS
+  --recursion_count_;
+  RTC_DCHECK(recursion_count_ >= 0);
+  if (!recursion_count_)
+    thread_ = 0;
+# endif
+# if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+  RTC_DCHECK(IsThreadRefEqual(owning_thread_, CurrentThreadRef()));
+  RTC_DCHECK_GE(recursion_, 0);
+  --recursion_;
+  if (!recursion_)
+    owning_thread_ = 0;
+
+  if (AtomicOps::Decrement(&lock_queue_) > 0 && !recursion_)
+    dispatch_semaphore_signal(semaphore_);
+# else
+  pthread_mutex_unlock(&mutex_);
+# endif
+#else
+# error Unsupported platform.
+#endif
+}
+
+bool CriticalSection::CurrentThreadIsOwner() const {
+#if defined(WEBRTC_WIN)
+  // OwningThread has type HANDLE but actually contains the Thread ID:
+  // http://stackoverflow.com/questions/12675301/why-is-the-owningthread-member-of-critical-section-of-type-handle-when-it-is-de
+  // Converting through size_t avoids the VS 2015 warning C4312: conversion from
+  // 'type1' to 'type2' of greater size
+  return crit_.OwningThread ==
+         reinterpret_cast<HANDLE>(static_cast<size_t>(GetCurrentThreadId()));
+#elif defined(WEBRTC_POSIX)
+# if CS_DEBUG_CHECKS
+  return IsThreadRefEqual(thread_, CurrentThreadRef());
+# else
+  return true;
+# endif  // CS_DEBUG_CHECKS
+#else
+# error Unsupported platform.
+#endif
+}
+
+CritScope::CritScope(const CriticalSection* cs) : cs_(cs) { cs_->Enter(); }
+CritScope::~CritScope() { cs_->Leave(); }
+
+TryCritScope::TryCritScope(const CriticalSection* cs)
+    : cs_(cs), locked_(cs->TryEnter()) {
+  CS_DEBUG_CODE(lock_was_called_ = false);
+  RTC_UNUSED(lock_was_called_);
+}
+
+TryCritScope::~TryCritScope() {
+  CS_DEBUG_CODE(RTC_DCHECK(lock_was_called_));
+  if (locked_)
+    cs_->Leave();
+}
+
+bool TryCritScope::locked() const {
+  CS_DEBUG_CODE(lock_was_called_ = true);
+  return locked_;
+}
+
+void GlobalLockPod::Lock() {
+#if !defined(WEBRTC_WIN) && (!defined(WEBRTC_MAC) || USE_NATIVE_MUTEX_ON_MAC)
+  const struct timespec ts_null = {0};
+#endif
+
+  while (AtomicOps::CompareAndSwap(&lock_acquired, 0, 1)) {
+#if defined(WEBRTC_WIN)
+    ::Sleep(0);
+#elif defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+    sched_yield();
+#else
+    nanosleep(&ts_null, nullptr);
+#endif
+  }
+}
+
+void GlobalLockPod::Unlock() {
+  int old_value = AtomicOps::CompareAndSwap(&lock_acquired, 1, 0);
+  RTC_DCHECK_EQ(1, old_value) << "Unlock called without calling Lock first";
+}
+
+GlobalLock::GlobalLock() {
+  lock_acquired = 0;
+}
+
+GlobalLockScope::GlobalLockScope(GlobalLockPod* lock)
+    : lock_(lock) {
+  lock_->Lock();
+}
+
+GlobalLockScope::~GlobalLockScope() {
+  lock_->Unlock();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/criticalsection.h b/rtc_base/criticalsection.h
new file mode 100644
index 0000000..1ef9634
--- /dev/null
+++ b/rtc_base/criticalsection.h
@@ -0,0 +1,159 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_CRITICALSECTION_H_
+#define RTC_BASE_CRITICALSECTION_H_
+
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/platform_thread_types.h"
+#include "rtc_base/thread_annotations.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(WEBRTC_WIN)
+// Include winsock2.h before including <windows.h> to maintain consistency with
+// win32.h.  We can't include win32.h directly here since it pulls in
+// headers such as basictypes.h which causes problems in Chromium where webrtc
+// exists as two separate projects, webrtc and libjingle.
+#include <winsock2.h>
+#include <windows.h>
+#include <sal.h>  // must come after windows headers.
+#endif  // defined(WEBRTC_WIN)
+
+#if defined(WEBRTC_POSIX)
+#include <pthread.h>
+#endif
+
+// See notes in the 'Performance' unit test for the effects of this flag.
+#define USE_NATIVE_MUTEX_ON_MAC 0
+
+#if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+#include <dispatch/dispatch.h>
+#endif
+
+#define CS_DEBUG_CHECKS RTC_DCHECK_IS_ON
+
+#if CS_DEBUG_CHECKS
+#define CS_DEBUG_CODE(x) x
+#else  // !CS_DEBUG_CHECKS
+#define CS_DEBUG_CODE(x)
+#endif  // !CS_DEBUG_CHECKS
+
+namespace rtc {
+
+// Locking methods (Enter, TryEnter, Leave)are const to permit protecting
+// members inside a const context without requiring mutable CriticalSections
+// everywhere.
+class RTC_LOCKABLE CriticalSection {
+ public:
+  CriticalSection();
+  ~CriticalSection();
+
+  void Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION();
+  bool TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true);
+  void Leave() const RTC_UNLOCK_FUNCTION();
+
+ private:
+  // Use only for RTC_DCHECKing.
+  bool CurrentThreadIsOwner() const;
+
+#if defined(WEBRTC_WIN)
+  mutable CRITICAL_SECTION crit_;
+#elif defined(WEBRTC_POSIX)
+# if defined(WEBRTC_MAC) && !USE_NATIVE_MUTEX_ON_MAC
+  // Number of times the lock has been locked + number of threads waiting.
+  // TODO(tommi): We could use this number and subtract the recursion count
+  // to find places where we have multiple threads contending on the same lock.
+  mutable volatile int lock_queue_;
+  // |recursion_| represents the recursion count + 1 for the thread that owns
+  // the lock. Only modified by the thread that owns the lock.
+  mutable int recursion_;
+  // Used to signal a single waiting thread when the lock becomes available.
+  mutable dispatch_semaphore_t semaphore_;
+  // The thread that currently holds the lock. Required to handle recursion.
+  mutable PlatformThreadRef owning_thread_;
+# else
+  mutable pthread_mutex_t mutex_;
+# endif
+  mutable PlatformThreadRef thread_;  // Only used by RTC_DCHECKs.
+  mutable int recursion_count_;       // Only used by RTC_DCHECKs.
+#else  // !defined(WEBRTC_WIN) && !defined(WEBRTC_POSIX)
+# error Unsupported platform.
+#endif
+};
+
+// CritScope, for serializing execution through a scope.
+class RTC_SCOPED_LOCKABLE CritScope {
+ public:
+  explicit CritScope(const CriticalSection* cs) RTC_EXCLUSIVE_LOCK_FUNCTION(cs);
+  ~CritScope() RTC_UNLOCK_FUNCTION();
+
+ private:
+  const CriticalSection* const cs_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(CritScope);
+};
+
+// Tries to lock a critical section on construction via
+// CriticalSection::TryEnter, and unlocks on destruction if the
+// lock was taken. Never blocks.
+//
+// IMPORTANT: Unlike CritScope, the lock may not be owned by this thread in
+// subsequent code. Users *must* check locked() to determine if the
+// lock was taken. If you're not calling locked(), you're doing it wrong!
+class TryCritScope {
+ public:
+  explicit TryCritScope(const CriticalSection* cs);
+  ~TryCritScope();
+#if defined(WEBRTC_WIN)
+  _Check_return_ bool locked() const;
+#elif defined(WEBRTC_POSIX)
+  bool locked() const __attribute__ ((__warn_unused_result__));
+#else  // !defined(WEBRTC_WIN) && !defined(WEBRTC_POSIX)
+# error Unsupported platform.
+#endif
+ private:
+  const CriticalSection* const cs_;
+  const bool locked_;
+  mutable bool lock_was_called_;  // Only used by RTC_DCHECKs.
+  RTC_DISALLOW_COPY_AND_ASSIGN(TryCritScope);
+};
+
+// A POD lock used to protect global variables. Do NOT use for other purposes.
+// No custom constructor or private data member should be added.
+class RTC_LOCKABLE GlobalLockPod {
+ public:
+  void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION();
+
+  void Unlock() RTC_UNLOCK_FUNCTION();
+
+  volatile int lock_acquired;
+};
+
+class GlobalLock : public GlobalLockPod {
+ public:
+  GlobalLock();
+};
+
+// GlobalLockScope, for serializing execution through a scope.
+class RTC_SCOPED_LOCKABLE GlobalLockScope {
+ public:
+  explicit GlobalLockScope(GlobalLockPod* lock)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(lock);
+  ~GlobalLockScope() RTC_UNLOCK_FUNCTION();
+
+ private:
+  GlobalLockPod* const lock_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(GlobalLockScope);
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_CRITICALSECTION_H_
diff --git a/rtc_base/criticalsection_unittest.cc b/rtc_base/criticalsection_unittest.cc
new file mode 100644
index 0000000..f5d6957
--- /dev/null
+++ b/rtc_base/criticalsection_unittest.cc
@@ -0,0 +1,409 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+namespace {
+
+const int kLongTime = 10000;  // 10 seconds
+const int kNumThreads = 16;
+const int kOperationsToRun = 1000;
+
+class UniqueValueVerifier {
+ public:
+  void Verify(const std::vector<int>& values) {
+    for (size_t i = 0; i < values.size(); ++i) {
+      std::pair<std::set<int>::iterator, bool> result =
+          all_values_.insert(values[i]);
+      // Each value should only be taken by one thread, so if this value
+      // has already been added, something went wrong.
+      EXPECT_TRUE(result.second)
+          << " Thread=" << Thread::Current() << " value=" << values[i];
+    }
+  }
+
+  void Finalize() {}
+
+ private:
+  std::set<int> all_values_;
+};
+
+class CompareAndSwapVerifier {
+ public:
+  CompareAndSwapVerifier() : zero_count_(0) {}
+
+  void Verify(const std::vector<int>& values) {
+    for (auto v : values) {
+      if (v == 0) {
+        EXPECT_EQ(0, zero_count_) << "Thread=" << Thread::Current();
+        ++zero_count_;
+      } else {
+        EXPECT_EQ(1, v) << " Thread=" << Thread::Current();
+      }
+    }
+  }
+
+  void Finalize() {
+    EXPECT_EQ(1, zero_count_);
+  }
+ private:
+  int zero_count_;
+};
+
+class RunnerBase : public MessageHandler {
+ public:
+  explicit RunnerBase(int value)
+      : threads_active_(0),
+        start_event_(true, false),
+        done_event_(true, false),
+        shared_value_(value) {}
+
+  bool Run() {
+    // Signal all threads to start.
+    start_event_.Set();
+
+    // Wait for all threads to finish.
+    return done_event_.Wait(kLongTime);
+  }
+
+  void SetExpectedThreadCount(int count) {
+    threads_active_ = count;
+  }
+
+  int shared_value() const { return shared_value_; }
+
+ protected:
+  // Derived classes must override OnMessage, and call BeforeStart and AfterEnd
+  // at the beginning and the end of OnMessage respectively.
+  void BeforeStart() {
+    ASSERT_TRUE(start_event_.Wait(kLongTime));
+  }
+
+  // Returns true if all threads have finished.
+  bool AfterEnd() {
+    if (AtomicOps::Decrement(&threads_active_) == 0) {
+      done_event_.Set();
+      return true;
+    }
+    return false;
+  }
+
+  int threads_active_;
+  Event start_event_;
+  Event done_event_;
+  int shared_value_;
+};
+
+class RTC_LOCKABLE CriticalSectionLock {
+ public:
+  void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { cs_.Enter(); }
+  void Unlock() RTC_UNLOCK_FUNCTION() { cs_.Leave(); }
+
+ private:
+  CriticalSection cs_;
+};
+
+template <class Lock>
+class LockRunner : public RunnerBase {
+ public:
+  LockRunner() : RunnerBase(0) {}
+
+  void OnMessage(Message* msg) override {
+    BeforeStart();
+
+    lock_.Lock();
+
+    EXPECT_EQ(0, shared_value_);
+    int old = shared_value_;
+
+    // Use a loop to increase the chance of race.
+    for (int i = 0; i < kOperationsToRun; ++i) {
+      ++shared_value_;
+    }
+    EXPECT_EQ(old + kOperationsToRun, shared_value_);
+    shared_value_ = 0;
+
+    lock_.Unlock();
+
+    AfterEnd();
+  }
+
+ private:
+  Lock lock_;
+};
+
+template <class Op, class Verifier>
+class AtomicOpRunner : public RunnerBase {
+ public:
+  explicit AtomicOpRunner(int initial_value) : RunnerBase(initial_value) {}
+
+  void OnMessage(Message* msg) override {
+    BeforeStart();
+
+    std::vector<int> values;
+    values.reserve(kOperationsToRun);
+
+    // Generate a bunch of values by updating shared_value_ atomically.
+    for (int i = 0; i < kOperationsToRun; ++i) {
+      values.push_back(Op::AtomicOp(&shared_value_));
+    }
+
+    { // Add them all to the set.
+      CritScope cs(&all_values_crit_);
+      verifier_.Verify(values);
+    }
+
+    if (AfterEnd()) {
+      verifier_.Finalize();
+    }
+  }
+
+ private:
+  CriticalSection all_values_crit_;
+  Verifier verifier_;
+};
+
+struct IncrementOp {
+  static int AtomicOp(int* i) { return AtomicOps::Increment(i); }
+};
+
+struct DecrementOp {
+  static int AtomicOp(int* i) { return AtomicOps::Decrement(i); }
+};
+
+struct CompareAndSwapOp {
+  static int AtomicOp(int* i) { return AtomicOps::CompareAndSwap(i, 0, 1); }
+};
+
+void StartThreads(std::vector<std::unique_ptr<Thread>>* threads,
+                  MessageHandler* handler) {
+  for (int i = 0; i < kNumThreads; ++i) {
+    std::unique_ptr<Thread> thread(Thread::Create());
+    thread->Start();
+    thread->Post(RTC_FROM_HERE, handler);
+    threads->push_back(std::move(thread));
+  }
+}
+
+}  // namespace
+
+TEST(AtomicOpsTest, Simple) {
+  int value = 0;
+  EXPECT_EQ(1, AtomicOps::Increment(&value));
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(2, AtomicOps::Increment(&value));
+  EXPECT_EQ(2, value);
+  EXPECT_EQ(1, AtomicOps::Decrement(&value));
+  EXPECT_EQ(1, value);
+  EXPECT_EQ(0, AtomicOps::Decrement(&value));
+  EXPECT_EQ(0, value);
+}
+
+TEST(AtomicOpsTest, SimplePtr) {
+  class Foo {};
+  Foo* volatile foo = nullptr;
+  std::unique_ptr<Foo> a(new Foo());
+  std::unique_ptr<Foo> b(new Foo());
+  // Reading the initial value should work as expected.
+  EXPECT_TRUE(rtc::AtomicOps::AcquireLoadPtr(&foo) == nullptr);
+  // Setting using compare and swap should work.
+  EXPECT_TRUE(rtc::AtomicOps::CompareAndSwapPtr(
+                  &foo, static_cast<Foo*>(nullptr), a.get()) == nullptr);
+  EXPECT_TRUE(rtc::AtomicOps::AcquireLoadPtr(&foo) == a.get());
+  // Setting another value but with the wrong previous pointer should fail
+  // (remain a).
+  EXPECT_TRUE(rtc::AtomicOps::CompareAndSwapPtr(
+                  &foo, static_cast<Foo*>(nullptr), b.get()) == a.get());
+  EXPECT_TRUE(rtc::AtomicOps::AcquireLoadPtr(&foo) == a.get());
+  // Replacing a with b should work.
+  EXPECT_TRUE(rtc::AtomicOps::CompareAndSwapPtr(&foo, a.get(), b.get()) ==
+              a.get());
+  EXPECT_TRUE(rtc::AtomicOps::AcquireLoadPtr(&foo) == b.get());
+}
+
+TEST(AtomicOpsTest, Increment) {
+  // Create and start lots of threads.
+  AtomicOpRunner<IncrementOp, UniqueValueVerifier> runner(0);
+  std::vector<std::unique_ptr<Thread>> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(kOperationsToRun * kNumThreads, runner.shared_value());
+}
+
+TEST(AtomicOpsTest, Decrement) {
+  // Create and start lots of threads.
+  AtomicOpRunner<DecrementOp, UniqueValueVerifier> runner(
+      kOperationsToRun * kNumThreads);
+  std::vector<std::unique_ptr<Thread>> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(0, runner.shared_value());
+}
+
+TEST(AtomicOpsTest, CompareAndSwap) {
+  // Create and start lots of threads.
+  AtomicOpRunner<CompareAndSwapOp, CompareAndSwapVerifier> runner(0);
+  std::vector<std::unique_ptr<Thread>> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(1, runner.shared_value());
+}
+
+TEST(GlobalLockTest, Basic) {
+  // Create and start lots of threads.
+  LockRunner<GlobalLock> runner;
+  std::vector<std::unique_ptr<Thread>> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(0, runner.shared_value());
+}
+
+TEST(CriticalSectionTest, Basic) {
+  // Create and start lots of threads.
+  LockRunner<CriticalSectionLock> runner;
+  std::vector<std::unique_ptr<Thread>> threads;
+  StartThreads(&threads, &runner);
+  runner.SetExpectedThreadCount(kNumThreads);
+
+  // Release the hounds!
+  EXPECT_TRUE(runner.Run());
+  EXPECT_EQ(0, runner.shared_value());
+}
+
+class PerfTestData {
+ public:
+  PerfTestData(int expected_count, Event* event)
+      : cache_line_barrier_1_(), cache_line_barrier_2_(),
+        expected_count_(expected_count), event_(event) {
+    cache_line_barrier_1_[0]++;  // Avoid 'is not used'.
+    cache_line_barrier_2_[0]++;  // Avoid 'is not used'.
+  }
+  ~PerfTestData() {}
+
+  void AddToCounter(int add) {
+    rtc::CritScope cs(&lock_);
+    my_counter_ += add;
+    if (my_counter_ == expected_count_)
+      event_->Set();
+  }
+
+  int64_t total() const {
+    // Assume that only one thread is running now.
+    return my_counter_;
+  }
+
+ private:
+  uint8_t cache_line_barrier_1_[64];
+  CriticalSection lock_;
+  uint8_t cache_line_barrier_2_[64];
+  int64_t my_counter_ = 0;
+  const int expected_count_;
+  Event* const event_;
+};
+
+class PerfTestThread {
+ public:
+  PerfTestThread() : thread_(&ThreadFunc, this, "CsPerf") {}
+
+  void Start(PerfTestData* data, int repeats, int id) {
+    RTC_DCHECK(!thread_.IsRunning());
+    RTC_DCHECK(!data_);
+    data_ = data;
+    repeats_ = repeats;
+    my_id_ = id;
+    thread_.Start();
+  }
+
+  void Stop() {
+    RTC_DCHECK(thread_.IsRunning());
+    RTC_DCHECK(data_);
+    thread_.Stop();
+    repeats_ = 0;
+    data_ = nullptr;
+    my_id_ = 0;
+  }
+
+ private:
+  static bool ThreadFunc(void* param) {
+    PerfTestThread* me = static_cast<PerfTestThread*>(param);
+    for (int i = 0; i < me->repeats_; ++i)
+      me->data_->AddToCounter(me->my_id_);
+    return false;
+  }
+
+  PlatformThread thread_;
+  PerfTestData* data_ = nullptr;
+  int repeats_ = 0;
+  int my_id_ = 0;
+};
+
+// Comparison of output of this test as tested on a MacBook Pro Retina, 15-inch,
+// Mid 2014, 2,8 GHz Intel Core i7, 16 GB 1600 MHz DDR3,
+// running OS X El Capitan, 10.11.2.
+//
+// Native mutex implementation:
+// Approximate CPU usage:
+//   System: ~16%
+//   User mode: ~1.3%
+//   Idle: ~82%
+// Unit test output:
+// [       OK ] CriticalSectionTest.Performance (234545 ms)
+//
+// Special partially spin lock based implementation:
+// Approximate CPU usage:
+//   System: ~75%
+//   User mode: ~16%
+//   Idle: ~8%
+// Unit test output:
+// [       OK ] CriticalSectionTest.Performance (2107 ms)
+//
+// The test is disabled by default to avoid unecessarily loading the bots.
+TEST(CriticalSectionTest, DISABLED_Performance) {
+  PerfTestThread threads[8];
+  Event event(false, false);
+
+  static const int kThreadRepeats = 10000000;
+  static const int kExpectedCount = kThreadRepeats * arraysize(threads);
+  PerfTestData test_data(kExpectedCount, &event);
+
+  for (auto& t : threads)
+    t.Start(&test_data, kThreadRepeats, 1);
+
+  event.Wait(Event::kForever);
+
+  for (auto& t : threads)
+    t.Stop();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/cryptstring.cc b/rtc_base/cryptstring.cc
new file mode 100644
index 0000000..421710c
--- /dev/null
+++ b/rtc_base/cryptstring.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/cryptstring.h"
+
+namespace rtc {
+
+size_t EmptyCryptStringImpl::GetLength() const {
+  return 0;
+}
+
+void EmptyCryptStringImpl::CopyTo(char* dest, bool nullterminate) const {
+  if (nullterminate) {
+    *dest = '\0';
+  }
+}
+
+std::string EmptyCryptStringImpl::UrlEncode() const {
+  return "";
+}
+
+CryptStringImpl* EmptyCryptStringImpl::Copy() const {
+  return new EmptyCryptStringImpl();
+}
+
+void EmptyCryptStringImpl::CopyRawTo(std::vector<unsigned char>* dest) const {
+  dest->clear();
+}
+
+CryptString::CryptString() : impl_(new EmptyCryptStringImpl()) {
+}
+
+CryptString::CryptString(const CryptString& other)
+    : impl_(other.impl_->Copy()) {
+}
+
+CryptString::CryptString(const CryptStringImpl& impl) : impl_(impl.Copy()) {
+}
+
+CryptString::~CryptString() = default;
+
+size_t InsecureCryptStringImpl::GetLength() const {
+  return password_.size();
+}
+
+void InsecureCryptStringImpl::CopyTo(char* dest, bool nullterminate) const {
+  memcpy(dest, password_.data(), password_.size());
+  if (nullterminate)
+    dest[password_.size()] = 0;
+}
+
+std::string InsecureCryptStringImpl::UrlEncode() const {
+  return password_;
+}
+
+CryptStringImpl* InsecureCryptStringImpl::Copy() const {
+  InsecureCryptStringImpl* copy = new InsecureCryptStringImpl;
+  copy->password() = password_;
+  return copy;
+}
+
+void InsecureCryptStringImpl::CopyRawTo(
+    std::vector<unsigned char>* dest) const {
+  dest->resize(password_.size());
+  memcpy(&dest->front(), password_.data(), password_.size());
+}
+
+};  // namespace rtc
diff --git a/rtc_base/cryptstring.h b/rtc_base/cryptstring.h
new file mode 100644
index 0000000..c210487
--- /dev/null
+++ b/rtc_base/cryptstring.h
@@ -0,0 +1,86 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_CRYPTSTRING_H_
+#define RTC_BASE_CRYPTSTRING_H_
+
+#include <string.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace rtc {
+
+class CryptStringImpl {
+ public:
+  virtual ~CryptStringImpl() {}
+  virtual size_t GetLength() const = 0;
+  virtual void CopyTo(char * dest, bool nullterminate) const = 0;
+  virtual std::string UrlEncode() const = 0;
+  virtual CryptStringImpl * Copy() const = 0;
+  virtual void CopyRawTo(std::vector<unsigned char> * dest) const = 0;
+};
+
+class EmptyCryptStringImpl : public CryptStringImpl {
+ public:
+  ~EmptyCryptStringImpl() override {}
+  size_t GetLength() const override;
+  void CopyTo(char* dest, bool nullterminate) const override;
+  std::string UrlEncode() const override;
+  CryptStringImpl* Copy() const override;
+  void CopyRawTo(std::vector<unsigned char>* dest) const override;
+};
+
+class CryptString {
+ public:
+  CryptString();
+  size_t GetLength() const { return impl_->GetLength(); }
+  void CopyTo(char* dest, bool nullterminate) const {
+    impl_->CopyTo(dest, nullterminate);
+  }
+  CryptString(const CryptString& other);
+  explicit CryptString(const CryptStringImpl& impl);
+  ~CryptString();
+  CryptString & operator=(const CryptString & other) {
+    if (this != &other) {
+      impl_.reset(other.impl_->Copy());
+    }
+    return *this;
+  }
+  void Clear() { impl_.reset(new EmptyCryptStringImpl()); }
+  std::string UrlEncode() const { return impl_->UrlEncode(); }
+  void CopyRawTo(std::vector<unsigned char> * dest) const {
+    return impl_->CopyRawTo(dest);
+  }
+
+ private:
+  std::unique_ptr<const CryptStringImpl> impl_;
+};
+
+class InsecureCryptStringImpl : public CryptStringImpl {
+ public:
+  std::string& password() { return password_; }
+  const std::string& password() const { return password_; }
+
+  ~InsecureCryptStringImpl() override = default;
+  size_t GetLength() const override;
+  void CopyTo(char* dest, bool nullterminate) const override;
+  std::string UrlEncode() const override;
+  CryptStringImpl* Copy() const override;
+  void CopyRawTo(std::vector<unsigned char>* dest) const override;
+
+ private:
+  std::string password_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_CRYPTSTRING_H_
diff --git a/rtc_base/data_rate_limiter.cc b/rtc_base/data_rate_limiter.cc
new file mode 100644
index 0000000..7288257
--- /dev/null
+++ b/rtc_base/data_rate_limiter.cc
@@ -0,0 +1,29 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/data_rate_limiter.h"
+
+namespace rtc {
+
+bool DataRateLimiter::CanUse(size_t desired, double time) {
+  return ((time > period_end_ && desired <= max_per_period_) ||
+          (used_in_period_ + desired) <= max_per_period_);
+}
+
+void DataRateLimiter::Use(size_t used, double time) {
+  if (time > period_end_) {
+    period_start_ = time;
+    period_end_ = time + period_length_;
+    used_in_period_ = 0;
+  }
+  used_in_period_ += used;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/data_rate_limiter.h b/rtc_base/data_rate_limiter.h
new file mode 100644
index 0000000..d290816
--- /dev/null
+++ b/rtc_base/data_rate_limiter.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_DATA_RATE_LIMITER_H_
+#define RTC_BASE_DATA_RATE_LIMITER_H_
+
+#include <stddef.h>
+
+namespace rtc {
+
+// Limits the rate of use to a certain maximum quantity per period of
+// time.  Use, for example, for simple bandwidth throttling.
+//
+// It's implemented like a diet plan: You have so many calories per
+// day.  If you hit the limit, you can't eat any more until the next
+// day.
+class DataRateLimiter {
+ public:
+  // For example, 100kb per second.
+  DataRateLimiter(size_t max, double period)
+      : max_per_period_(max),
+        period_length_(period),
+        used_in_period_(0),
+        period_start_(0.0),
+        period_end_(period) {}
+  virtual ~DataRateLimiter() {}
+
+  // Returns true if if the desired quantity is available in the
+  // current period (< (max - used)).  Once the given time passes the
+  // end of the period, used is set to zero and more use is available.
+  bool CanUse(size_t desired, double time);
+  // Increment the quantity used this period.  If past the end of a
+  // period, a new period is started.
+  void Use(size_t used, double time);
+
+  size_t used_in_period() const { return used_in_period_; }
+
+  size_t max_per_period() const { return max_per_period_; }
+
+ private:
+  size_t max_per_period_;
+  double period_length_;
+  size_t used_in_period_;
+  double period_start_;
+  double period_end_;
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_DATA_RATE_LIMITER_H_
diff --git a/rtc_base/data_rate_limiter_unittest.cc b/rtc_base/data_rate_limiter_unittest.cc
new file mode 100644
index 0000000..8c410fe
--- /dev/null
+++ b/rtc_base/data_rate_limiter_unittest.cc
@@ -0,0 +1,59 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/data_rate_limiter.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+TEST(RateLimiterTest, TestCanUse) {
+  // Diet: Can eat 2,000 calories per day.
+  DataRateLimiter limiter = DataRateLimiter(2000, 1.0);
+
+  double monday = 1.0;
+  double tuesday = 2.0;
+  double thursday = 4.0;
+
+  EXPECT_TRUE(limiter.CanUse(0, monday));
+  EXPECT_TRUE(limiter.CanUse(1000, monday));
+  EXPECT_TRUE(limiter.CanUse(1999, monday));
+  EXPECT_TRUE(limiter.CanUse(2000, monday));
+  EXPECT_FALSE(limiter.CanUse(2001, monday));
+
+  limiter.Use(1000, monday);
+
+  EXPECT_TRUE(limiter.CanUse(0, monday));
+  EXPECT_TRUE(limiter.CanUse(999, monday));
+  EXPECT_TRUE(limiter.CanUse(1000, monday));
+  EXPECT_FALSE(limiter.CanUse(1001, monday));
+
+  limiter.Use(1000, monday);
+
+  EXPECT_TRUE(limiter.CanUse(0, monday));
+  EXPECT_FALSE(limiter.CanUse(1, monday));
+
+  EXPECT_TRUE(limiter.CanUse(0, tuesday));
+  EXPECT_TRUE(limiter.CanUse(1, tuesday));
+  EXPECT_TRUE(limiter.CanUse(1999, tuesday));
+  EXPECT_TRUE(limiter.CanUse(2000, tuesday));
+  EXPECT_FALSE(limiter.CanUse(2001, tuesday));
+
+  limiter.Use(1000, tuesday);
+
+  EXPECT_TRUE(limiter.CanUse(1000, tuesday));
+  EXPECT_FALSE(limiter.CanUse(1001, tuesday));
+
+  limiter.Use(1000, thursday);
+
+  EXPECT_TRUE(limiter.CanUse(1000, tuesday));
+  EXPECT_FALSE(limiter.CanUse(1001, tuesday));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/deprecation.h b/rtc_base/deprecation.h
new file mode 100644
index 0000000..af776d5
--- /dev/null
+++ b/rtc_base/deprecation.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_DEPRECATION_H_
+#define RTC_BASE_DEPRECATION_H_
+
+// Annotate the declarations of deprecated functions with this to cause a
+// compiler warning when they're used. Like so:
+//
+//   RTC_DEPRECATED std::pony PonyPlz(const std::pony_spec& ps);
+//
+// NOTE 1: The annotation goes on the declaration in the .h file, not the
+// definition in the .cc file!
+//
+// NOTE 2: In order to keep unit testing the deprecated function without
+// getting warnings, do something like this:
+//
+//   std::pony DEPRECATED_PonyPlz(const std::pony_spec& ps);
+//   RTC_DEPRECATED inline std::pony PonyPlz(const std::pony_spec& ps) {
+//     return DEPRECATED_PonyPlz(ps);
+//   }
+//
+// In other words, rename the existing function, and provide an inline wrapper
+// using the original name that calls it. That way, callers who are willing to
+// call it using the DEPRECATED_-prefixed name don't get the warning.
+//
+// TODO(kwiberg): Remove this when we can use [[deprecated]] from C++14.
+#if defined(_MSC_VER)
+// Note: Deprecation warnings seem to fail to trigger on Windows
+// (https://bugs.chromium.org/p/webrtc/issues/detail?id=5368).
+#define RTC_DEPRECATED __declspec(deprecated)
+#elif defined(__GNUC__)
+#define RTC_DEPRECATED __attribute__ ((__deprecated__))
+#else
+#define RTC_DEPRECATED
+#endif
+
+#endif  // RTC_BASE_DEPRECATION_H_
diff --git a/rtc_base/dscp.h b/rtc_base/dscp.h
new file mode 100644
index 0000000..bdce466
--- /dev/null
+++ b/rtc_base/dscp.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2013 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_DSCP_H_
+#define RTC_BASE_DSCP_H_
+
+namespace rtc {
+// Differentiated Services Code Point.
+// See http://tools.ietf.org/html/rfc2474 for details.
+enum DiffServCodePoint {
+  DSCP_NO_CHANGE = -1,
+  DSCP_DEFAULT = 0,  // Same as DSCP_CS0
+  DSCP_CS0  = 0,   // The default
+  DSCP_CS1  = 8,   // Bulk/background traffic
+  DSCP_AF11 = 10,
+  DSCP_AF12 = 12,
+  DSCP_AF13 = 14,
+  DSCP_CS2  = 16,
+  DSCP_AF21 = 18,
+  DSCP_AF22 = 20,
+  DSCP_AF23 = 22,
+  DSCP_CS3  = 24,
+  DSCP_AF31 = 26,
+  DSCP_AF32 = 28,
+  DSCP_AF33 = 30,
+  DSCP_CS4  = 32,
+  DSCP_AF41 = 34,  // Video
+  DSCP_AF42 = 36,  // Video
+  DSCP_AF43 = 38,  // Video
+  DSCP_CS5  = 40,  // Video
+  DSCP_EF   = 46,  // Voice
+  DSCP_CS6  = 48,  // Voice
+  DSCP_CS7  = 56,  // Control messages
+};
+
+}  // namespace rtc
+
+ #endif  // RTC_BASE_DSCP_H_
diff --git a/rtc_base/event.cc b/rtc_base/event.cc
new file mode 100644
index 0000000..ff4faad
--- /dev/null
+++ b/rtc_base/event.cc
@@ -0,0 +1,124 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/event.h"
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#elif defined(WEBRTC_POSIX)
+#include <pthread.h>
+#include <sys/time.h>
+#include <time.h>
+#else
+#error "Must define either WEBRTC_WIN or WEBRTC_POSIX."
+#endif
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+#if defined(WEBRTC_WIN)
+
+Event::Event(bool manual_reset, bool initially_signaled) {
+  event_handle_ = ::CreateEvent(nullptr,  // Security attributes.
+                                manual_reset, initially_signaled,
+                                nullptr);  // Name.
+  RTC_CHECK(event_handle_);
+}
+
+Event::~Event() {
+  CloseHandle(event_handle_);
+}
+
+void Event::Set() {
+  SetEvent(event_handle_);
+}
+
+void Event::Reset() {
+  ResetEvent(event_handle_);
+}
+
+bool Event::Wait(int milliseconds) {
+  DWORD ms = (milliseconds == kForever) ? INFINITE : milliseconds;
+  return (WaitForSingleObject(event_handle_, ms) == WAIT_OBJECT_0);
+}
+
+#elif defined(WEBRTC_POSIX)
+
+Event::Event(bool manual_reset, bool initially_signaled)
+    : is_manual_reset_(manual_reset),
+      event_status_(initially_signaled) {
+  RTC_CHECK(pthread_mutex_init(&event_mutex_, nullptr) == 0);
+  RTC_CHECK(pthread_cond_init(&event_cond_, nullptr) == 0);
+}
+
+Event::~Event() {
+  pthread_mutex_destroy(&event_mutex_);
+  pthread_cond_destroy(&event_cond_);
+}
+
+void Event::Set() {
+  pthread_mutex_lock(&event_mutex_);
+  event_status_ = true;
+  pthread_cond_broadcast(&event_cond_);
+  pthread_mutex_unlock(&event_mutex_);
+}
+
+void Event::Reset() {
+  pthread_mutex_lock(&event_mutex_);
+  event_status_ = false;
+  pthread_mutex_unlock(&event_mutex_);
+}
+
+bool Event::Wait(int milliseconds) {
+  int error = 0;
+
+  struct timespec ts;
+  if (milliseconds != kForever) {
+    // Converting from seconds and microseconds (1e-6) plus
+    // milliseconds (1e-3) to seconds and nanoseconds (1e-9).
+
+    struct timeval tv;
+    gettimeofday(&tv, nullptr);
+
+    ts.tv_sec = tv.tv_sec + (milliseconds / 1000);
+    ts.tv_nsec = tv.tv_usec * 1000 + (milliseconds % 1000) * 1000000;
+
+    // Handle overflow.
+    if (ts.tv_nsec >= 1000000000) {
+      ts.tv_sec++;
+      ts.tv_nsec -= 1000000000;
+    }
+  }
+
+  pthread_mutex_lock(&event_mutex_);
+  if (milliseconds != kForever) {
+    while (!event_status_ && error == 0) {
+      error = pthread_cond_timedwait(&event_cond_, &event_mutex_, &ts);
+    }
+  } else {
+    while (!event_status_ && error == 0)
+      error = pthread_cond_wait(&event_cond_, &event_mutex_);
+  }
+
+  // NOTE(liulk): Exactly one thread will auto-reset this event. All
+  // the other threads will think it's unsignaled.  This seems to be
+  // consistent with auto-reset events in WEBRTC_WIN
+  if (error == 0 && !is_manual_reset_)
+    event_status_ = false;
+
+  pthread_mutex_unlock(&event_mutex_);
+
+  return (error == 0);
+}
+
+#endif
+
+}  // namespace rtc
diff --git a/rtc_base/event.h b/rtc_base/event.h
new file mode 100644
index 0000000..7e61c4c
--- /dev/null
+++ b/rtc_base/event.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_EVENT_H_
+#define RTC_BASE_EVENT_H_
+
+#include "rtc_base/constructormagic.h"
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#elif defined(WEBRTC_POSIX)
+#include <pthread.h>
+#else
+#error "Must define either WEBRTC_WIN or WEBRTC_POSIX."
+#endif
+
+namespace rtc {
+
+class Event {
+ public:
+  static const int kForever = -1;
+
+  Event(bool manual_reset, bool initially_signaled);
+  ~Event();
+
+  void Set();
+  void Reset();
+
+  // Wait for the event to become signaled, for the specified number of
+  // |milliseconds|.  To wait indefinetly, pass kForever.
+  bool Wait(int milliseconds);
+
+ private:
+#if defined(WEBRTC_WIN)
+  HANDLE event_handle_;
+#elif defined(WEBRTC_POSIX)
+  pthread_mutex_t event_mutex_;
+  pthread_cond_t event_cond_;
+  const bool is_manual_reset_;
+  bool event_status_;
+#endif
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Event);
+};
+
+// This class is provided for compatibility with Chromium.
+// The rtc::Event implementation is overriden inside of Chromium for the
+// purposes of detecting when threads are blocked that shouldn't be as well as
+// to use the more accurate event implementation that's there than is provided
+// by default on some platforms (e.g. Windows).
+// When building with standalone WebRTC, this class is a noop.
+// For further information, please see the ScopedAllowBaseSyncPrimitives class
+// in Chromium.
+class ScopedAllowBaseSyncPrimitives {
+ public:
+  ScopedAllowBaseSyncPrimitives() {}
+  ~ScopedAllowBaseSyncPrimitives() {}
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_EVENT_H_
diff --git a/rtc_base/event_tracer.cc b/rtc_base/event_tracer.cc
new file mode 100644
index 0000000..9cbda97
--- /dev/null
+++ b/rtc_base/event_tracer.cc
@@ -0,0 +1,414 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "rtc_base/event_tracer.h"
+
+#include <inttypes.h>
+
+#include <string>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/trace_event.h"
+
+// This is a guesstimate that should be enough in most cases.
+static const size_t kEventLoggerArgsStrBufferInitialSize = 256;
+static const size_t kTraceArgBufferLength = 32;
+
+namespace webrtc {
+
+namespace {
+
+GetCategoryEnabledPtr g_get_category_enabled_ptr = nullptr;
+AddTraceEventPtr g_add_trace_event_ptr = nullptr;
+
+}  // namespace
+
+void SetupEventTracer(GetCategoryEnabledPtr get_category_enabled_ptr,
+                      AddTraceEventPtr add_trace_event_ptr) {
+  g_get_category_enabled_ptr = get_category_enabled_ptr;
+  g_add_trace_event_ptr = add_trace_event_ptr;
+}
+
+const unsigned char* EventTracer::GetCategoryEnabled(const char* name) {
+  if (g_get_category_enabled_ptr)
+    return g_get_category_enabled_ptr(name);
+
+  // A string with null terminator means category is disabled.
+  return reinterpret_cast<const unsigned char*>("\0");
+}
+
+// Arguments to this function (phase, etc.) are as defined in
+// webrtc/rtc_base/trace_event.h.
+void EventTracer::AddTraceEvent(char phase,
+                                const unsigned char* category_enabled,
+                                const char* name,
+                                unsigned long long id,
+                                int num_args,
+                                const char** arg_names,
+                                const unsigned char* arg_types,
+                                const unsigned long long* arg_values,
+                                unsigned char flags) {
+  if (g_add_trace_event_ptr) {
+    g_add_trace_event_ptr(phase,
+                          category_enabled,
+                          name,
+                          id,
+                          num_args,
+                          arg_names,
+                          arg_types,
+                          arg_values,
+                          flags);
+  }
+}
+
+}  // namespace webrtc
+
+namespace rtc {
+namespace tracing {
+namespace {
+
+static void EventTracingThreadFunc(void* params);
+
+// Atomic-int fast path for avoiding logging when disabled.
+static volatile int g_event_logging_active = 0;
+
+// TODO(pbos): Log metadata for all threads, etc.
+class EventLogger final {
+ public:
+  EventLogger()
+      : logging_thread_(EventTracingThreadFunc,
+                        this,
+                        "EventTracingThread",
+                        kLowPriority),
+        shutdown_event_(false, false) {}
+  ~EventLogger() { RTC_DCHECK(thread_checker_.CalledOnValidThread()); }
+
+  void AddTraceEvent(const char* name,
+                     const unsigned char* category_enabled,
+                     char phase,
+                     int num_args,
+                     const char** arg_names,
+                     const unsigned char* arg_types,
+                     const unsigned long long* arg_values,
+                     uint64_t timestamp,
+                     int pid,
+                     rtc::PlatformThreadId thread_id) {
+    std::vector<TraceArg> args(num_args);
+    for (int i = 0; i < num_args; ++i) {
+      TraceArg& arg = args[i];
+      arg.name = arg_names[i];
+      arg.type = arg_types[i];
+      arg.value.as_uint = arg_values[i];
+
+      // Value is a pointer to a temporary string, so we have to make a copy.
+      if (arg.type == TRACE_VALUE_TYPE_COPY_STRING) {
+        // Space for the string and for the terminating null character.
+        size_t str_length = strlen(arg.value.as_string) + 1;
+        char* str_copy = new char[str_length];
+        memcpy(str_copy, arg.value.as_string, str_length);
+        arg.value.as_string = str_copy;
+      }
+    }
+    rtc::CritScope lock(&crit_);
+    trace_events_.push_back(
+        {name, category_enabled, phase, args, timestamp, 1, thread_id});
+  }
+
+// The TraceEvent format is documented here:
+// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
+  void Log() {
+    RTC_DCHECK(output_file_);
+    static const int kLoggingIntervalMs = 100;
+    fprintf(output_file_, "{ \"traceEvents\": [\n");
+    bool has_logged_event = false;
+    while (true) {
+      bool shutting_down = shutdown_event_.Wait(kLoggingIntervalMs);
+      std::vector<TraceEvent> events;
+      {
+        rtc::CritScope lock(&crit_);
+        trace_events_.swap(events);
+      }
+      std::string args_str;
+      args_str.reserve(kEventLoggerArgsStrBufferInitialSize);
+      for (TraceEvent& e : events) {
+        args_str.clear();
+        if (!e.args.empty()) {
+          args_str += ", \"args\": {";
+          bool is_first_argument = true;
+          for (TraceArg& arg : e.args) {
+            if (!is_first_argument)
+              args_str += ",";
+            is_first_argument = false;
+            args_str += " \"";
+            args_str += arg.name;
+            args_str += "\": ";
+            args_str += TraceArgValueAsString(arg);
+
+            // Delete our copy of the string.
+            if (arg.type == TRACE_VALUE_TYPE_COPY_STRING) {
+              delete[] arg.value.as_string;
+              arg.value.as_string = nullptr;
+            }
+          }
+          args_str += " }";
+        }
+        fprintf(output_file_,
+                "%s{ \"name\": \"%s\""
+                ", \"cat\": \"%s\""
+                ", \"ph\": \"%c\""
+                ", \"ts\": %" PRIu64
+                ", \"pid\": %d"
+#if defined(WEBRTC_WIN)
+                ", \"tid\": %lu"
+#else
+                ", \"tid\": %d"
+#endif  // defined(WEBRTC_WIN)
+                "%s"
+                "}\n",
+                has_logged_event ? "," : " ", e.name, e.category_enabled,
+                e.phase, e.timestamp, e.pid, e.tid, args_str.c_str());
+        has_logged_event = true;
+      }
+      if (shutting_down)
+        break;
+    }
+    fprintf(output_file_, "]}\n");
+    if (output_file_owned_)
+      fclose(output_file_);
+    output_file_ = nullptr;
+  }
+
+  void Start(FILE* file, bool owned) {
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(file);
+    RTC_DCHECK(!output_file_);
+    output_file_ = file;
+    output_file_owned_ = owned;
+    {
+      rtc::CritScope lock(&crit_);
+      // Since the atomic fast-path for adding events to the queue can be
+      // bypassed while the logging thread is shutting down there may be some
+      // stale events in the queue, hence the vector needs to be cleared to not
+      // log events from a previous logging session (which may be days old).
+      trace_events_.clear();
+    }
+    // Enable event logging (fast-path). This should be disabled since starting
+    // shouldn't be done twice.
+    RTC_CHECK_EQ(0,
+                 rtc::AtomicOps::CompareAndSwap(&g_event_logging_active, 0, 1));
+
+    // Finally start, everything should be set up now.
+    logging_thread_.Start();
+    TRACE_EVENT_INSTANT0("webrtc", "EventLogger::Start");
+  }
+
+  void Stop() {
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    TRACE_EVENT_INSTANT0("webrtc", "EventLogger::Stop");
+    // Try to stop. Abort if we're not currently logging.
+    if (rtc::AtomicOps::CompareAndSwap(&g_event_logging_active, 1, 0) == 0)
+      return;
+
+    // Wake up logging thread to finish writing.
+    shutdown_event_.Set();
+    // Join the logging thread.
+    logging_thread_.Stop();
+  }
+
+ private:
+  struct TraceArg {
+    const char* name;
+    unsigned char type;
+    // Copied from webrtc/rtc_base/trace_event.h TraceValueUnion.
+    union TraceArgValue {
+      bool as_bool;
+      unsigned long long as_uint;
+      long long as_int;
+      double as_double;
+      const void* as_pointer;
+      const char* as_string;
+    } value;
+
+    // Assert that the size of the union is equal to the size of the as_uint
+    // field since we are assigning to arbitrary types using it.
+    static_assert(sizeof(TraceArgValue) == sizeof(unsigned long long),
+                  "Size of TraceArg value union is not equal to the size of "
+                  "the uint field of that union.");
+  };
+
+  struct TraceEvent {
+    const char* name;
+    const unsigned char* category_enabled;
+    char phase;
+    std::vector<TraceArg> args;
+    uint64_t timestamp;
+    int pid;
+    rtc::PlatformThreadId tid;
+  };
+
+  static std::string TraceArgValueAsString(TraceArg arg) {
+    std::string output;
+
+    if (arg.type == TRACE_VALUE_TYPE_STRING ||
+        arg.type == TRACE_VALUE_TYPE_COPY_STRING) {
+      // Space for every character to be an espaced character + two for
+      // quatation marks.
+      output.reserve(strlen(arg.value.as_string) * 2 + 2);
+      output += '\"';
+      const char* c = arg.value.as_string;
+      do {
+        if (*c == '"' || *c == '\\') {
+          output += '\\';
+          output += *c;
+        } else {
+          output += *c;
+        }
+      } while (*++c);
+      output += '\"';
+    } else {
+      output.resize(kTraceArgBufferLength);
+      size_t print_length = 0;
+      switch (arg.type) {
+        case TRACE_VALUE_TYPE_BOOL:
+          if (arg.value.as_bool) {
+            strcpy(&output[0], "true");
+            print_length = 4;
+          } else {
+            strcpy(&output[0], "false");
+            print_length = 5;
+          }
+          break;
+        case TRACE_VALUE_TYPE_UINT:
+          print_length = sprintfn(&output[0], kTraceArgBufferLength, "%llu",
+                                  arg.value.as_uint);
+          break;
+        case TRACE_VALUE_TYPE_INT:
+          print_length = sprintfn(&output[0], kTraceArgBufferLength, "%lld",
+                                  arg.value.as_int);
+          break;
+        case TRACE_VALUE_TYPE_DOUBLE:
+          print_length = sprintfn(&output[0], kTraceArgBufferLength, "%f",
+                                  arg.value.as_double);
+          break;
+        case TRACE_VALUE_TYPE_POINTER:
+          print_length = sprintfn(&output[0], kTraceArgBufferLength, "\"%p\"",
+                                  arg.value.as_pointer);
+          break;
+      }
+      size_t output_length = print_length < kTraceArgBufferLength
+                                 ? print_length
+                                 : kTraceArgBufferLength - 1;
+      // This will hopefully be very close to nop. On most implementations, it
+      // just writes null byte and sets the length field of the string.
+      output.resize(output_length);
+    }
+
+    return output;
+  }
+
+  rtc::CriticalSection crit_;
+  std::vector<TraceEvent> trace_events_ RTC_GUARDED_BY(crit_);
+  rtc::PlatformThread logging_thread_;
+  rtc::Event shutdown_event_;
+  rtc::ThreadChecker thread_checker_;
+  FILE* output_file_ = nullptr;
+  bool output_file_owned_ = false;
+};
+
+static void EventTracingThreadFunc(void* params) {
+  static_cast<EventLogger*>(params)->Log();
+}
+
+static EventLogger* volatile g_event_logger = nullptr;
+static const char* const kDisabledTracePrefix = TRACE_DISABLED_BY_DEFAULT("");
+const unsigned char* InternalGetCategoryEnabled(const char* name) {
+  const char* prefix_ptr = &kDisabledTracePrefix[0];
+  const char* name_ptr = name;
+  // Check whether name contains the default-disabled prefix.
+  while (*prefix_ptr == *name_ptr && *prefix_ptr != '\0') {
+    ++prefix_ptr;
+    ++name_ptr;
+  }
+  return reinterpret_cast<const unsigned char*>(*prefix_ptr == '\0' ? ""
+                                                                    : name);
+}
+
+void InternalAddTraceEvent(char phase,
+                           const unsigned char* category_enabled,
+                           const char* name,
+                           unsigned long long id,
+                           int num_args,
+                           const char** arg_names,
+                           const unsigned char* arg_types,
+                           const unsigned long long* arg_values,
+                           unsigned char flags) {
+  // Fast path for when event tracing is inactive.
+  if (rtc::AtomicOps::AcquireLoad(&g_event_logging_active) == 0)
+    return;
+
+  g_event_logger->AddTraceEvent(name, category_enabled, phase, num_args,
+                                arg_names, arg_types, arg_values,
+                                rtc::TimeMicros(), 1, rtc::CurrentThreadId());
+}
+
+}  // namespace
+
+void SetupInternalTracer() {
+  RTC_CHECK(rtc::AtomicOps::CompareAndSwapPtr(
+                &g_event_logger, static_cast<EventLogger*>(nullptr),
+                new EventLogger()) == nullptr);
+  webrtc::SetupEventTracer(InternalGetCategoryEnabled, InternalAddTraceEvent);
+}
+
+void StartInternalCaptureToFile(FILE* file) {
+  if (g_event_logger) {
+    g_event_logger->Start(file, false);
+  }
+}
+
+bool StartInternalCapture(const char* filename) {
+  if (!g_event_logger)
+    return false;
+
+  FILE* file = fopen(filename, "w");
+  if (!file) {
+    RTC_LOG(LS_ERROR) << "Failed to open trace file '" << filename
+                      << "' for writing.";
+    return false;
+  }
+  g_event_logger->Start(file, true);
+  return true;
+}
+
+void StopInternalCapture() {
+  if (g_event_logger) {
+    g_event_logger->Stop();
+  }
+}
+
+void ShutdownInternalTracer() {
+  StopInternalCapture();
+  EventLogger* old_logger = rtc::AtomicOps::AcquireLoadPtr(&g_event_logger);
+  RTC_DCHECK(old_logger);
+  RTC_CHECK(rtc::AtomicOps::CompareAndSwapPtr(
+                &g_event_logger, old_logger,
+                static_cast<EventLogger*>(nullptr)) == old_logger);
+  delete old_logger;
+  webrtc::SetupEventTracer(nullptr, nullptr);
+}
+
+}  // namespace tracing
+}  // namespace rtc
diff --git a/rtc_base/event_tracer.h b/rtc_base/event_tracer.h
new file mode 100644
index 0000000..fc7ad04
--- /dev/null
+++ b/rtc_base/event_tracer.h
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file defines the interface for event tracing in WebRTC.
+//
+// Event log handlers are set through SetupEventTracer(). User of this API will
+// provide two function pointers to handle event tracing calls.
+//
+// * GetCategoryEnabledPtr
+//   Event tracing system calls this function to determine if a particular
+//   event category is enabled.
+//
+// * AddTraceEventPtr
+//   Adds a tracing event. It is the user's responsibility to log the data
+//   provided.
+//
+// Parameters for the above two functions are described in trace_event.h.
+
+#ifndef RTC_BASE_EVENT_TRACER_H_
+#define RTC_BASE_EVENT_TRACER_H_
+
+#include <stdio.h>
+
+namespace webrtc {
+
+typedef const unsigned char* (*GetCategoryEnabledPtr)(const char* name);
+typedef void (*AddTraceEventPtr)(char phase,
+                                 const unsigned char* category_enabled,
+                                 const char* name,
+                                 unsigned long long id,
+                                 int num_args,
+                                 const char** arg_names,
+                                 const unsigned char* arg_types,
+                                 const unsigned long long* arg_values,
+                                 unsigned char flags);
+
+// User of WebRTC can call this method to setup event tracing.
+//
+// This method must be called before any WebRTC methods. Functions
+// provided should be thread-safe.
+void SetupEventTracer(
+    GetCategoryEnabledPtr get_category_enabled_ptr,
+    AddTraceEventPtr add_trace_event_ptr);
+
+// This class defines interface for the event tracing system to call
+// internally. Do not call these methods directly.
+class EventTracer {
+ public:
+  static const unsigned char* GetCategoryEnabled(
+      const char* name);
+
+  static void AddTraceEvent(
+      char phase,
+      const unsigned char* category_enabled,
+      const char* name,
+      unsigned long long id,
+      int num_args,
+      const char** arg_names,
+      const unsigned char* arg_types,
+      const unsigned long long* arg_values,
+      unsigned char flags);
+};
+
+}  // namespace webrtc
+
+namespace rtc {
+namespace tracing {
+// Set up internal event tracer.
+void SetupInternalTracer();
+bool StartInternalCapture(const char* filename);
+void StartInternalCaptureToFile(FILE* file);
+void StopInternalCapture();
+// Make sure we run this, this will tear down the internal tracing.
+void ShutdownInternalTracer();
+}  // namespace tracing
+}  // namespace rtc
+
+#endif  // RTC_BASE_EVENT_TRACER_H_
diff --git a/rtc_base/event_tracer_unittest.cc b/rtc_base/event_tracer_unittest.cc
new file mode 100644
index 0000000..25b300b
--- /dev/null
+++ b/rtc_base/event_tracer_unittest.cc
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/event_tracer.h"
+
+#include "rtc_base/trace_event.h"
+#include "test/gtest.h"
+
+namespace {
+
+class TestStatistics {
+ public:
+  TestStatistics() : events_logged_(0) {
+  }
+
+  void Reset() {
+    events_logged_ = 0;
+  }
+
+  void Increment() {
+    ++events_logged_;
+  }
+
+  int Count() const { return events_logged_; }
+
+  static TestStatistics* Get() {
+    static TestStatistics* test_stats = nullptr;
+    if (!test_stats)
+      test_stats = new TestStatistics();
+    return test_stats;
+  }
+
+ private:
+  int events_logged_;
+};
+
+static const unsigned char* GetCategoryEnabledHandler(const char* name) {
+  return reinterpret_cast<const unsigned char*>("test");
+}
+
+static void AddTraceEventHandler(char phase,
+                                 const unsigned char* category_enabled,
+                                 const char* name,
+                                 unsigned long long id,
+                                 int num_args,
+                                 const char** arg_names,
+                                 const unsigned char* arg_types,
+                                 const unsigned long long* arg_values,
+                                 unsigned char flags) {
+  TestStatistics::Get()->Increment();
+}
+
+}  // namespace
+
+namespace webrtc {
+
+TEST(EventTracerTest, EventTracerDisabled) {
+  {
+    TRACE_EVENT0("test", "EventTracerDisabled");
+  }
+  EXPECT_FALSE(TestStatistics::Get()->Count());
+  TestStatistics::Get()->Reset();
+}
+
+TEST(EventTracerTest, ScopedTraceEvent) {
+  SetupEventTracer(&GetCategoryEnabledHandler, &AddTraceEventHandler);
+  {
+    TRACE_EVENT0("test", "ScopedTraceEvent");
+  }
+  EXPECT_EQ(2, TestStatistics::Get()->Count());
+  TestStatistics::Get()->Reset();
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/event_unittest.cc b/rtc_base/event_unittest.cc
new file mode 100644
index 0000000..5bb6f77
--- /dev/null
+++ b/rtc_base/event_unittest.cc
@@ -0,0 +1,94 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/platform_thread.h"
+
+namespace rtc {
+
+TEST(EventTest, InitiallySignaled) {
+  Event event(false, true);
+  ASSERT_TRUE(event.Wait(0));
+}
+
+TEST(EventTest, ManualReset) {
+  Event event(true, false);
+  ASSERT_FALSE(event.Wait(0));
+
+  event.Set();
+  ASSERT_TRUE(event.Wait(0));
+  ASSERT_TRUE(event.Wait(0));
+
+  event.Reset();
+  ASSERT_FALSE(event.Wait(0));
+}
+
+TEST(EventTest, AutoReset) {
+  Event event(false, false);
+  ASSERT_FALSE(event.Wait(0));
+
+  event.Set();
+  ASSERT_TRUE(event.Wait(0));
+  ASSERT_FALSE(event.Wait(0));
+}
+
+class SignalerThread {
+public:
+  SignalerThread() : thread_(&ThreadFn, this, "EventPerf") {}
+  void Start(Event* writer, Event* reader) {
+    writer_ = writer;
+    reader_ = reader;
+    thread_.Start();
+  }
+  void Stop() {
+    stop_event_.Set();
+    thread_.Stop();
+  }
+  static void ThreadFn(void *param) {
+    auto* me = static_cast<SignalerThread*>(param);
+    while(!me->stop_event_.Wait(0)) {
+      me->writer_->Set();
+      me->reader_->Wait(Event::kForever);
+    }
+  }
+  Event stop_event_{false, false};
+  Event* writer_;
+  Event* reader_;
+  PlatformThread thread_;
+};
+
+// These tests are disabled by default and only intended to be run manually.
+TEST(EventTest, DISABLED_PerformanceSingleThread) {
+  static const int kNumIterations = 10000000;
+  Event event(false, false);
+  for (int i = 0; i < kNumIterations; ++i) {
+    event.Set();
+    event.Wait(0);
+  }
+}
+
+TEST(EventTest, DISABLED_PerformanceMultiThread) {
+  static const int kNumIterations = 10000;
+  Event read(false, false);
+  Event write(false, false);
+  SignalerThread thread;
+  thread.Start(&read, &write);
+
+  for (int i = 0; i < kNumIterations; ++i) {
+    write.Set();
+    read.Wait(Event::kForever);
+  }
+  write.Set();
+
+  thread.Stop();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/experiments/BUILD.gn b/rtc_base/experiments/BUILD.gn
new file mode 100644
index 0000000..305ad24
--- /dev/null
+++ b/rtc_base/experiments/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_static_library("alr_experiment") {
+  sources = [
+    "alr_experiment.cc",
+    "alr_experiment.h",
+  ]
+  deps = [
+    "../:rtc_base_approved",
+    "../../api:optional",
+    "../../system_wrappers:field_trial_api",
+  ]
+}
diff --git a/rtc_base/experiments/DEPS b/rtc_base/experiments/DEPS
new file mode 100644
index 0000000..8a9adf1
--- /dev/null
+++ b/rtc_base/experiments/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+system_wrappers",
+]
diff --git a/rtc_base/experiments/alr_experiment.cc b/rtc_base/experiments/alr_experiment.cc
new file mode 100644
index 0000000..c69caed
--- /dev/null
+++ b/rtc_base/experiments/alr_experiment.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/experiments/alr_experiment.h"
+
+#include <string>
+
+#include "rtc_base/format_macros.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+const char AlrExperimentSettings::kScreenshareProbingBweExperimentName[] =
+    "WebRTC-ProbingScreenshareBwe";
+const char AlrExperimentSettings::kStrictPacingAndProbingExperimentName[] =
+    "WebRTC-StrictPacingAndProbing";
+const char kDefaultProbingScreenshareBweSettings[] = "1.0,2875,80,40,-60,3";
+
+bool AlrExperimentSettings::MaxOneFieldTrialEnabled() {
+  return field_trial::FindFullName(kStrictPacingAndProbingExperimentName)
+             .empty() ||
+         field_trial::FindFullName(kScreenshareProbingBweExperimentName)
+             .empty();
+}
+
+rtc::Optional<AlrExperimentSettings>
+AlrExperimentSettings::CreateFromFieldTrial(const char* experiment_name) {
+  rtc::Optional<AlrExperimentSettings> ret;
+  std::string group_name = field_trial::FindFullName(experiment_name);
+
+  const std::string kIgnoredSuffix = "_Dogfood";
+  std::string::size_type suffix_pos = group_name.rfind(kIgnoredSuffix);
+  if (suffix_pos != std::string::npos &&
+      suffix_pos == group_name.length() - kIgnoredSuffix.length()) {
+    group_name.resize(group_name.length() - kIgnoredSuffix.length());
+  }
+
+  if (experiment_name == kScreenshareProbingBweExperimentName) {
+    // This experiment is now default-on with fixed settings.
+    // TODO(sprang): Remove this kill-switch and clean up experiment code.
+    if (group_name != "Disabled") {
+      group_name = kDefaultProbingScreenshareBweSettings;
+    }
+  }
+
+  if (group_name.empty())
+    return ret;
+
+  AlrExperimentSettings settings;
+  if (sscanf(group_name.c_str(), "%f,%" PRId64 ",%d,%d,%d,%d",
+             &settings.pacing_factor, &settings.max_paced_queue_time,
+             &settings.alr_bandwidth_usage_percent,
+             &settings.alr_start_budget_level_percent,
+             &settings.alr_stop_budget_level_percent,
+             &settings.group_id) == 6) {
+    ret.emplace(settings);
+    RTC_LOG(LS_INFO) << "Using ALR experiment settings: "
+                        "pacing factor: "
+                     << settings.pacing_factor << ", max pacer queue length: "
+                     << settings.max_paced_queue_time
+                     << ", ALR start bandwidth usage percent: "
+                     << settings.alr_bandwidth_usage_percent
+                     << ", ALR end budget level percent: "
+                     << settings.alr_start_budget_level_percent
+                     << ", ALR end budget level percent: "
+                     << settings.alr_stop_budget_level_percent
+                     << ", ALR experiment group ID: " << settings.group_id;
+  } else {
+    RTC_LOG(LS_INFO) << "Failed to parse ALR experiment: " << experiment_name;
+  }
+
+  return ret;
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/experiments/alr_experiment.h b/rtc_base/experiments/alr_experiment.h
new file mode 100644
index 0000000..a9c483d
--- /dev/null
+++ b/rtc_base/experiments/alr_experiment.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_EXPERIMENTS_ALR_EXPERIMENT_H_
+#define RTC_BASE_EXPERIMENTS_ALR_EXPERIMENT_H_
+
+#include "api/optional.h"
+
+namespace webrtc {
+struct AlrExperimentSettings {
+ public:
+  float pacing_factor;
+  int64_t max_paced_queue_time;
+  int alr_bandwidth_usage_percent;
+  int alr_start_budget_level_percent;
+  int alr_stop_budget_level_percent;
+  // Will be sent to the receive side for stats slicing.
+  // Can be 0..6, because it's sent as a 3 bits value and there's also
+  // reserved value to indicate absence of experiment.
+  int group_id;
+
+  static const char kScreenshareProbingBweExperimentName[];
+  static const char kStrictPacingAndProbingExperimentName[];
+  static rtc::Optional<AlrExperimentSettings> CreateFromFieldTrial(
+      const char* experiment_name);
+  static bool MaxOneFieldTrialEnabled();
+
+ private:
+  AlrExperimentSettings() = default;
+};
+}  // namespace webrtc
+
+#endif  // RTC_BASE_EXPERIMENTS_ALR_EXPERIMENT_H_
diff --git a/rtc_base/fakeclock.cc b/rtc_base/fakeclock.cc
new file mode 100644
index 0000000..721f699
--- /dev/null
+++ b/rtc_base/fakeclock.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/fakeclock.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/messagequeue.h"
+
+namespace rtc {
+
+int64_t FakeClock::TimeNanos() const {
+  CritScope cs(&lock_);
+  return time_;
+}
+
+void FakeClock::SetTimeNanos(int64_t nanos) {
+  {
+    CritScope cs(&lock_);
+    RTC_DCHECK(nanos >= time_);
+    time_ = nanos;
+  }
+  // If message queues are waiting in a socket select() with a timeout provided
+  // by the OS, they should wake up and dispatch all messages that are ready.
+  MessageQueueManager::ProcessAllMessageQueues();
+}
+
+void FakeClock::AdvanceTime(TimeDelta delta) {
+  {
+    CritScope cs(&lock_);
+    time_ += delta.ToNanoseconds();
+  }
+  MessageQueueManager::ProcessAllMessageQueues();
+}
+
+ScopedFakeClock::ScopedFakeClock() {
+  prev_clock_ = SetClockForTesting(this);
+}
+
+ScopedFakeClock::~ScopedFakeClock() {
+  SetClockForTesting(prev_clock_);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/fakeclock.h b/rtc_base/fakeclock.h
new file mode 100644
index 0000000..1b1ee71
--- /dev/null
+++ b/rtc_base/fakeclock.h
@@ -0,0 +1,71 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FAKECLOCK_H_
+#define RTC_BASE_FAKECLOCK_H_
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/timedelta.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+// Fake clock for use with unit tests, which does not tick on its own.
+// Starts at time 0.
+//
+// TODO(deadbeef): Unify with webrtc::SimulatedClock.
+class FakeClock : public ClockInterface {
+ public:
+  ~FakeClock() override {}
+
+  // ClockInterface implementation.
+  int64_t TimeNanos() const override;
+
+  // Methods that can be used by the test to control the time.
+
+  // Should only be used to set a time in the future.
+  void SetTimeNanos(int64_t nanos);
+  void SetTimeMicros(int64_t micros) {
+    SetTimeNanos(kNumNanosecsPerMicrosec * micros);
+  }
+
+  void AdvanceTime(TimeDelta delta);
+  void AdvanceTimeMicros(int64_t micros) {
+    AdvanceTime(rtc::TimeDelta::FromMicroseconds(micros));
+  }
+ private:
+  CriticalSection lock_;
+  int64_t time_ RTC_GUARDED_BY(lock_) = 0;
+};
+
+// Helper class that sets itself as the global clock in its constructor and
+// unsets it in its destructor.
+class ScopedFakeClock : public FakeClock {
+ public:
+  ScopedFakeClock();
+  ~ScopedFakeClock() override;
+
+ private:
+  ClockInterface* prev_clock_;
+};
+
+// Helper class to "undo" the fake clock temporarily.
+class ScopedRealClock {
+ public:
+  ScopedRealClock();
+  ~ScopedRealClock();
+
+ private:
+  ClockInterface* prev_clock_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FAKECLOCK_H_
diff --git a/rtc_base/fakenetwork.h b/rtc_base/fakenetwork.h
new file mode 100644
index 0000000..fd4eb61
--- /dev/null
+++ b/rtc_base/fakenetwork.h
@@ -0,0 +1,129 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FAKENETWORK_H_
+#define RTC_BASE_FAKENETWORK_H_
+
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/network.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+const int kFakeIPv4NetworkPrefixLength = 24;
+const int kFakeIPv6NetworkPrefixLength = 64;
+
+// Fake network manager that allows us to manually specify the IPs to use.
+class FakeNetworkManager : public NetworkManagerBase,
+                           public MessageHandler {
+ public:
+  FakeNetworkManager() {}
+
+  typedef std::vector<std::pair<SocketAddress, AdapterType>> IfaceList;
+
+  void AddInterface(const SocketAddress& iface) {
+    // Ensure a unique name for the interface if its name is not given.
+    AddInterface(iface, "test" + rtc::ToString(next_index_++));
+  }
+
+  void AddInterface(const SocketAddress& iface, const std::string& if_name) {
+    AddInterface(iface, if_name, ADAPTER_TYPE_UNKNOWN);
+  }
+
+  void AddInterface(const SocketAddress& iface,
+                    const std::string& if_name,
+                    AdapterType type) {
+    SocketAddress address(if_name, 0);
+    address.SetResolvedIP(iface.ipaddr());
+    ifaces_.push_back(std::make_pair(address, type));
+    DoUpdateNetworks();
+  }
+
+  void RemoveInterface(const SocketAddress& iface) {
+    for (IfaceList::iterator it = ifaces_.begin();
+         it != ifaces_.end(); ++it) {
+      if (it->first.EqualIPs(iface)) {
+        ifaces_.erase(it);
+        break;
+      }
+    }
+    DoUpdateNetworks();
+  }
+
+  virtual void StartUpdating() {
+    ++start_count_;
+    if (start_count_ == 1) {
+      sent_first_update_ = false;
+      rtc::Thread::Current()->Post(RTC_FROM_HERE, this);
+    } else {
+      if (sent_first_update_) {
+        SignalNetworksChanged();
+      }
+    }
+  }
+
+  virtual void StopUpdating() { --start_count_; }
+
+  // MessageHandler interface.
+  virtual void OnMessage(Message* msg) {
+    DoUpdateNetworks();
+  }
+
+  using NetworkManagerBase::set_enumeration_permission;
+  using NetworkManagerBase::set_default_local_addresses;
+
+ private:
+  void DoUpdateNetworks() {
+    if (start_count_ == 0)
+      return;
+    std::vector<Network*> networks;
+    for (IfaceList::iterator it = ifaces_.begin();
+         it != ifaces_.end(); ++it) {
+      int prefix_length = 0;
+      if (it->first.ipaddr().family() == AF_INET) {
+        prefix_length = kFakeIPv4NetworkPrefixLength;
+      } else if (it->first.ipaddr().family() == AF_INET6) {
+        prefix_length = kFakeIPv6NetworkPrefixLength;
+      }
+      IPAddress prefix = TruncateIP(it->first.ipaddr(), prefix_length);
+      std::unique_ptr<Network> net(new Network(it->first.hostname(),
+                                               it->first.hostname(), prefix,
+                                               prefix_length, it->second));
+      net->set_default_local_address_provider(this);
+      net->AddIP(it->first.ipaddr());
+      networks.push_back(net.release());
+    }
+    bool changed;
+    MergeNetworkList(networks, &changed);
+    if (changed || !sent_first_update_) {
+      SignalNetworksChanged();
+      sent_first_update_ = true;
+    }
+  }
+
+  IfaceList ifaces_;
+  int next_index_ = 0;
+  int start_count_ = 0;
+  bool sent_first_update_ = false;
+
+  IPAddress default_local_ipv4_address_;
+  IPAddress default_local_ipv6_address_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FAKENETWORK_H_
diff --git a/rtc_base/fakesslidentity.cc b/rtc_base/fakesslidentity.cc
new file mode 100644
index 0000000..825c89b
--- /dev/null
+++ b/rtc_base/fakesslidentity.cc
@@ -0,0 +1,120 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/fakesslidentity.h"
+
+#include <algorithm>
+#include <string>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/ptr_util.h"
+
+namespace rtc {
+
+FakeSSLCertificate::FakeSSLCertificate(const std::string& pem_string)
+    : pem_string_(pem_string),
+      digest_algorithm_(DIGEST_SHA_1),
+      expiration_time_(-1) {}
+
+FakeSSLCertificate::FakeSSLCertificate(const FakeSSLCertificate&) = default;
+
+FakeSSLCertificate::~FakeSSLCertificate() = default;
+
+FakeSSLCertificate* FakeSSLCertificate::GetReference() const {
+  return new FakeSSLCertificate(*this);
+}
+
+std::string FakeSSLCertificate::ToPEMString() const {
+  return pem_string_;
+}
+
+void FakeSSLCertificate::ToDER(Buffer* der_buffer) const {
+  std::string der_string;
+  RTC_CHECK(
+      SSLIdentity::PemToDer(kPemTypeCertificate, pem_string_, &der_string));
+  der_buffer->SetData(der_string.c_str(), der_string.size());
+}
+
+int64_t FakeSSLCertificate::CertificateExpirationTime() const {
+  return expiration_time_;
+}
+
+void FakeSSLCertificate::SetCertificateExpirationTime(int64_t expiration_time) {
+  expiration_time_ = expiration_time;
+}
+
+void FakeSSLCertificate::set_digest_algorithm(const std::string& algorithm) {
+  digest_algorithm_ = algorithm;
+}
+
+bool FakeSSLCertificate::GetSignatureDigestAlgorithm(
+    std::string* algorithm) const {
+  *algorithm = digest_algorithm_;
+  return true;
+}
+
+bool FakeSSLCertificate::ComputeDigest(const std::string& algorithm,
+                                       unsigned char* digest,
+                                       size_t size,
+                                       size_t* length) const {
+  *length = rtc::ComputeDigest(algorithm, pem_string_.c_str(),
+                               pem_string_.size(), digest, size);
+  return (*length != 0);
+}
+
+FakeSSLIdentity::FakeSSLIdentity(const std::string& pem_string)
+    : FakeSSLIdentity(FakeSSLCertificate(pem_string)) {}
+
+FakeSSLIdentity::FakeSSLIdentity(const std::vector<std::string>& pem_strings) {
+  std::vector<std::unique_ptr<SSLCertificate>> certs;
+  for (const std::string& pem_string : pem_strings) {
+    certs.push_back(MakeUnique<FakeSSLCertificate>(pem_string));
+  }
+  cert_chain_ = MakeUnique<SSLCertChain>(std::move(certs));
+}
+
+FakeSSLIdentity::FakeSSLIdentity(const FakeSSLCertificate& cert)
+    : cert_chain_(MakeUnique<SSLCertChain>(&cert)) {}
+
+FakeSSLIdentity::FakeSSLIdentity(const FakeSSLIdentity& o)
+    : cert_chain_(o.cert_chain_->UniqueCopy()) {}
+
+FakeSSLIdentity::~FakeSSLIdentity() = default;
+
+FakeSSLIdentity* FakeSSLIdentity::GetReference() const {
+  return new FakeSSLIdentity(*this);
+}
+
+const SSLCertificate& FakeSSLIdentity::certificate() const {
+  return cert_chain_->Get(0);
+}
+
+const SSLCertChain& FakeSSLIdentity::cert_chain() const {
+  return *cert_chain_.get();
+}
+
+std::string FakeSSLIdentity::PrivateKeyToPEMString() const {
+  RTC_NOTREACHED();  // Not implemented.
+  return "";
+}
+
+std::string FakeSSLIdentity::PublicKeyToPEMString() const {
+  RTC_NOTREACHED();  // Not implemented.
+  return "";
+}
+
+bool FakeSSLIdentity::operator==(const SSLIdentity& other) const {
+  RTC_NOTREACHED();  // Not implemented.
+  return false;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/fakesslidentity.h b/rtc_base/fakesslidentity.h
new file mode 100644
index 0000000..4494a52
--- /dev/null
+++ b/rtc_base/fakesslidentity.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FAKESSLIDENTITY_H_
+#define RTC_BASE_FAKESSLIDENTITY_H_
+
+#include <memory>
+#include <vector>
+
+#include "rtc_base/sslidentity.h"
+
+namespace rtc {
+
+class FakeSSLCertificate : public SSLCertificate {
+ public:
+  // SHA-1 is the default digest algorithm because it is available in all build
+  // configurations used for unit testing.
+  explicit FakeSSLCertificate(const std::string& pem_string);
+
+  FakeSSLCertificate(const FakeSSLCertificate&);
+  ~FakeSSLCertificate() override;
+
+  // SSLCertificate implementation.
+  FakeSSLCertificate* GetReference() const override;
+  std::string ToPEMString() const override;
+  void ToDER(Buffer* der_buffer) const override;
+  int64_t CertificateExpirationTime() const override;
+  bool GetSignatureDigestAlgorithm(std::string* algorithm) const override;
+  bool ComputeDigest(const std::string& algorithm,
+                     unsigned char* digest,
+                     size_t size,
+                     size_t* length) const override;
+
+  void SetCertificateExpirationTime(int64_t expiration_time);
+
+  void set_digest_algorithm(const std::string& algorithm);
+
+ private:
+  std::string pem_string_;
+  std::string digest_algorithm_;
+  // Expiration time in seconds relative to epoch, 1970-01-01T00:00:00Z (UTC).
+  int64_t expiration_time_;
+};
+
+class FakeSSLIdentity : public SSLIdentity {
+ public:
+  explicit FakeSSLIdentity(const std::string& pem_string);
+  // For a certificate chain.
+  explicit FakeSSLIdentity(const std::vector<std::string>& pem_strings);
+  explicit FakeSSLIdentity(const FakeSSLCertificate& cert);
+
+  explicit FakeSSLIdentity(const FakeSSLIdentity& o);
+
+  ~FakeSSLIdentity() override;
+
+  // SSLIdentity implementation.
+  FakeSSLIdentity* GetReference() const override;
+  const SSLCertificate& certificate() const override;
+  const SSLCertChain& cert_chain() const override;
+  // Not implemented.
+  std::string PrivateKeyToPEMString() const override;
+  // Not implemented.
+  std::string PublicKeyToPEMString() const override;
+  // Not implemented.
+  virtual bool operator==(const SSLIdentity& other) const;
+
+ private:
+  std::unique_ptr<SSLCertChain> cert_chain_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FAKESSLIDENTITY_H_
diff --git a/rtc_base/file.cc b/rtc_base/file.cc
new file mode 100644
index 0000000..a6ee2aa
--- /dev/null
+++ b/rtc_base/file.cc
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/file.h"
+
+#include <utility>
+
+namespace rtc {
+
+namespace {
+
+std::string NormalizePathname(Pathname&& path) {
+  path.Normalize();
+  return path.pathname();
+}
+
+}  // namespace
+
+File::File(PlatformFile file) : file_(file) {}
+
+File::File() : file_(kInvalidPlatformFileValue) {}
+
+File::~File() {
+  Close();
+}
+
+// static
+File File::Open(const std::string& path) {
+  return File(OpenPlatformFile(path));
+}
+
+// static
+File File::Open(Pathname&& path) {
+  return Open(NormalizePathname(std::move(path)));
+}
+
+// static
+File File::Open(const Pathname& path) {
+  return Open(Pathname(path));
+}
+
+// static
+File File::Create(const std::string& path) {
+  return File(CreatePlatformFile(path));
+}
+
+// static
+File File::Create(Pathname&& path) {
+  return Create(NormalizePathname(std::move(path)));
+}
+
+// static
+File File::Create(const Pathname& path) {
+  return Create(Pathname(path));
+}
+
+// static
+bool File::Remove(const std::string& path) {
+  return RemoveFile(path);
+}
+
+// static
+bool File::Remove(Pathname&& path) {
+  return Remove(NormalizePathname(std::move(path)));
+}
+
+// static
+bool File::Remove(const Pathname& path) {
+  return Remove(Pathname(path));
+}
+
+File::File(File&& other) : file_(other.file_) {
+  other.file_ = kInvalidPlatformFileValue;
+}
+
+File& File::operator=(File&& other) {
+  Close();
+  file_ = other.file_;
+  other.file_ = kInvalidPlatformFileValue;
+  return *this;
+}
+
+bool File::IsOpen() {
+  return file_ != kInvalidPlatformFileValue;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/file.h b/rtc_base/file.h
new file mode 100644
index 0000000..f87d9ce
--- /dev/null
+++ b/rtc_base/file.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FILE_H_
+#define RTC_BASE_FILE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/pathutils.h"
+#include "rtc_base/platform_file.h"
+
+namespace rtc {
+
+// This class wraps the platform specific APIs for simple file interactions.
+//
+// The various read and write methods are best effort, i.e. if an underlying
+// call does not manage to read/write all the data more calls will be performed,
+// until an error is detected or all data is read/written.
+class File {
+ public:
+  // Wraps the given PlatformFile. This class is then responsible for closing
+  // the file, which will be done in the destructor if Close is never called.
+  explicit File(PlatformFile);
+  // The default constructor produces a closed file.
+  File();
+  ~File();
+
+  File(File&& other);
+  File& operator=(File&& other);
+
+  // Open and Create give files with both reading and writing enabled.
+  static File Open(const std::string& path);
+  static File Open(Pathname&& path);
+  static File Open(const Pathname& path);
+  // If the file already exists it will be overwritten.
+  static File Create(const std::string& path);
+  static File Create(Pathname&& path);
+  static File Create(const Pathname& path);
+
+  // Remove a file in the file system.
+  static bool Remove(const std::string& path);
+  static bool Remove(Pathname&& path);
+  static bool Remove(const Pathname& path);
+
+  size_t Write(const uint8_t* data, size_t length);
+  size_t Read(uint8_t* buffer, size_t length);
+
+  // The current position in the file after a call to these methods is platform
+  // dependent (MSVC gives position offset+length, most other
+  // compilers/platforms do not alter the position), i.e. do not depend on it,
+  // do a Seek before any subsequent Read/Write.
+  size_t WriteAt(const uint8_t* data, size_t length, size_t offset);
+  size_t ReadAt(uint8_t* buffer, size_t length, size_t offset);
+
+  // Attempt to position the file at the given offset from the start.
+  // Returns true if successful, false otherwise.
+  bool Seek(size_t offset);
+
+  // Attempt to close the file. Returns true if successful, false otherwise,
+  // most notably when the file is already closed.
+  bool Close();
+
+  bool IsOpen();
+
+ private:
+  PlatformFile file_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(File);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FILE_H_
diff --git a/rtc_base/file_posix.cc b/rtc_base/file_posix.cc
new file mode 100644
index 0000000..b0fec9f
--- /dev/null
+++ b/rtc_base/file_posix.cc
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/file.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+size_t File::Write(const uint8_t* data, size_t length) {
+  size_t total_written = 0;
+  do {
+    ssize_t written;
+    do {
+      written = ::write(file_, data + total_written, length - total_written);
+    } while (written == -1 && errno == EINTR);
+    if (written == -1)
+      break;
+    total_written += written;
+  } while (total_written < length);
+  return total_written;
+}
+
+size_t File::Read(uint8_t* buffer, size_t length) {
+  size_t total_read = 0;
+  do {
+    ssize_t read;
+    do {
+      read = ::read(file_, buffer + total_read, length - total_read);
+    } while (read == -1 && errno == EINTR);
+    if (read == -1)
+      break;
+    total_read += read;
+  } while (total_read < length);
+  return total_read;
+}
+
+size_t File::WriteAt(const uint8_t* data, size_t length, size_t offset) {
+  size_t total_written = 0;
+  do {
+    ssize_t written;
+    do {
+      written = ::pwrite(file_, data + total_written, length - total_written,
+                         offset + total_written);
+    } while (written == -1 && errno == EINTR);
+    if (written == -1)
+      break;
+    total_written += written;
+  } while (total_written < length);
+  return total_written;
+}
+
+size_t File::ReadAt(uint8_t* buffer, size_t length, size_t offset) {
+  size_t total_read = 0;
+  do {
+    ssize_t read;
+    do {
+      read = ::pread(file_, buffer + total_read, length - total_read,
+                     offset + total_read);
+    } while (read == -1 && errno == EINTR);
+    if (read == -1)
+      break;
+    total_read += read;
+  } while (total_read < length);
+  return total_read;
+}
+
+bool File::Seek(size_t offset) {
+  RTC_DCHECK_LE(offset, std::numeric_limits<off_t>::max());
+  return lseek(file_, static_cast<off_t>(offset), SEEK_SET) != -1;
+}
+
+bool File::Close() {
+  if (file_ == rtc::kInvalidPlatformFileValue)
+    return false;
+  bool ret = close(file_) == 0;
+  file_ = rtc::kInvalidPlatformFileValue;
+  return ret;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/file_unittest.cc b/rtc_base/file_unittest.cc
new file mode 100644
index 0000000..a8e39dd
--- /dev/null
+++ b/rtc_base/file_unittest.cc
@@ -0,0 +1,201 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "rtc_base/file.h"
+#include "rtc_base/gunit.h"
+#include "test/testsupport/fileutils.h"
+
+#if defined(WEBRTC_WIN)
+
+#include "rtc_base/win32.h"
+
+#else  // if defined(WEBRTC_WIN)
+
+#include <errno.h>
+
+#endif
+
+namespace rtc {
+
+int LastError() {
+#if defined(WEBRTC_WIN)
+  return ::GetLastError();
+#else
+  return errno;
+#endif
+}
+
+bool VerifyBuffer(uint8_t* buffer, size_t length, uint8_t start_value) {
+  for (size_t i = 0; i < length; ++i) {
+    uint8_t val = start_value++;
+    EXPECT_EQ(val, buffer[i]);
+    if (buffer[i] != val)
+      return false;
+  }
+  // Prevent the same buffer from being verified multiple times simply
+  // because some operation that should have written to it failed
+  memset(buffer, 0, length);
+  return true;
+}
+
+class FileTest : public ::testing::Test {
+ protected:
+  std::string path_;
+  void SetUp() override {
+    path_ = webrtc::test::TempFilename(webrtc::test::OutputPath(), "test_file");
+    ASSERT_FALSE(path_.empty());
+  }
+  void TearDown() override { RemoveFile(path_); }
+};
+
+TEST_F(FileTest, DefaultConstructor) {
+  File file;
+  uint8_t buffer[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+
+  EXPECT_FALSE(file.IsOpen());
+  EXPECT_EQ(0u, file.Write(buffer, 10));
+  EXPECT_FALSE(file.Seek(0));
+  EXPECT_EQ(0u, file.Read(buffer, 10));
+  EXPECT_EQ(0u, file.WriteAt(buffer, 10, 0));
+  EXPECT_EQ(0u, file.ReadAt(buffer, 10, 0));
+  EXPECT_FALSE(file.Close());
+}
+
+TEST_F(FileTest, DoubleClose) {
+  File file = File::Open(path_);
+  ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+
+  EXPECT_TRUE(file.Close());
+  EXPECT_FALSE(file.Close());
+}
+
+TEST_F(FileTest, SimpleReadWrite) {
+  File file = File::Open(path_);
+  ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+
+  uint8_t data[100] = {0};
+  uint8_t out[100] = {0};
+  for (int i = 0; i < 100; ++i) {
+    data[i] = i;
+  }
+
+  EXPECT_EQ(10u, file.Write(data, 10));
+
+  EXPECT_TRUE(file.Seek(0));
+  EXPECT_EQ(10u, file.Read(out, 10));
+  EXPECT_TRUE(VerifyBuffer(out, 10, 0));
+
+  EXPECT_TRUE(file.Seek(0));
+  EXPECT_EQ(100u, file.Write(data, 100));
+
+  EXPECT_TRUE(file.Seek(0));
+  EXPECT_EQ(100u, file.Read(out, 100));
+  EXPECT_TRUE(VerifyBuffer(out, 100, 0));
+
+  EXPECT_TRUE(file.Seek(1));
+  EXPECT_EQ(50u, file.Write(data, 50));
+  EXPECT_EQ(50u, file.Write(data + 50, 50));
+
+  EXPECT_TRUE(file.Seek(1));
+  EXPECT_EQ(100u, file.Read(out, 100));
+  EXPECT_TRUE(VerifyBuffer(out, 100, 0));
+}
+
+TEST_F(FileTest, ReadWriteClose) {
+  File file = File::Open(path_);
+  ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+
+  uint8_t data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+  uint8_t out[10] = {0};
+  EXPECT_EQ(10u, file.Write(data, 10));
+  EXPECT_TRUE(file.Close());
+
+  File file2 = File::Open(path_);
+  ASSERT_TRUE(file2.IsOpen()) << "Error: " << LastError();
+  EXPECT_EQ(10u, file2.Read(out, 10));
+  EXPECT_TRUE(VerifyBuffer(out, 10, 0));
+}
+
+TEST_F(FileTest, RandomAccessRead) {
+  File file = File::Open(path_);
+  ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+
+  uint8_t data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+  uint8_t out[10] = {0};
+  EXPECT_EQ(10u, file.Write(data, 10));
+
+  EXPECT_EQ(4u, file.ReadAt(out, 4, 0));
+  EXPECT_TRUE(VerifyBuffer(out, 4, 0));
+
+  EXPECT_EQ(4u, file.ReadAt(out, 4, 4));
+  EXPECT_TRUE(VerifyBuffer(out, 4, 4));
+
+  EXPECT_EQ(5u, file.ReadAt(out, 5, 5));
+  EXPECT_TRUE(VerifyBuffer(out, 5, 5));
+}
+
+TEST_F(FileTest, RandomAccessReadWrite) {
+  File file = File::Open(path_);
+  ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+
+  uint8_t data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+  uint8_t out[10] = {0};
+  EXPECT_EQ(10u, file.Write(data, 10));
+  EXPECT_TRUE(file.Seek(4));
+
+  EXPECT_EQ(4u, file.WriteAt(data, 4, 4));
+  EXPECT_EQ(4u, file.ReadAt(out, 4, 4));
+  EXPECT_TRUE(VerifyBuffer(out, 4, 0));
+
+  EXPECT_EQ(2u, file.WriteAt(data, 2, 8));
+  EXPECT_EQ(2u, file.ReadAt(out, 2, 8));
+  EXPECT_TRUE(VerifyBuffer(out, 2, 0));
+}
+
+TEST_F(FileTest, OpenFromPathname) {
+  {
+    File file = File::Open(Pathname(path_));
+    ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+  }
+
+  {
+    Pathname path(path_);
+    File file = File::Open(path);
+    ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+  }
+}
+
+TEST_F(FileTest, CreateFromPathname) {
+  {
+    File file = File::Create(Pathname(path_));
+    ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+  }
+
+  {
+    Pathname path(path_);
+    File file = File::Create(path);
+    ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+  }
+}
+
+TEST_F(FileTest, ShouldBeAbleToRemoveFile) {
+  {
+    File file = File::Open(Pathname(path_));
+    ASSERT_TRUE(file.IsOpen()) << "Error: " << LastError();
+  }
+
+  ASSERT_TRUE(File::Remove(Pathname(path_))) << "Error: " << LastError();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/file_win.cc b/rtc_base/file_win.cc
new file mode 100644
index 0000000..d7580aa
--- /dev/null
+++ b/rtc_base/file_win.cc
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/file.h"
+
+#include <io.h>
+#include <windows.h>
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+size_t File::Write(const uint8_t* data, size_t length) {
+  RTC_DCHECK_LT(length, std::numeric_limits<DWORD>::max());
+  size_t total_written = 0;
+  do {
+    DWORD written;
+    if (!::WriteFile(file_, data + total_written,
+                     static_cast<DWORD>(length - total_written), &written,
+                     nullptr)) {
+      break;
+    }
+    total_written += written;
+  } while (total_written < length);
+  return total_written;
+}
+
+size_t File::Read(uint8_t* buffer, size_t length) {
+  RTC_DCHECK_LT(length, std::numeric_limits<DWORD>::max());
+  size_t total_read = 0;
+  do {
+    DWORD read;
+    if (!::ReadFile(file_, buffer + total_read,
+                    static_cast<DWORD>(length - total_read), &read, nullptr)) {
+      break;
+    }
+    total_read += read;
+  } while (total_read < length);
+  return total_read;
+}
+
+size_t File::WriteAt(const uint8_t* data, size_t length, size_t offset) {
+  RTC_DCHECK_LT(length, std::numeric_limits<DWORD>::max());
+  size_t total_written = 0;
+  do {
+    DWORD written;
+
+    LARGE_INTEGER offset_li;
+    offset_li.QuadPart = offset + total_written;
+
+    OVERLAPPED overlapped = {0};
+    overlapped.Offset = offset_li.LowPart;
+    overlapped.OffsetHigh = offset_li.HighPart;
+
+    if (!::WriteFile(file_, data + total_written,
+                     static_cast<DWORD>(length - total_written), &written,
+                     &overlapped)) {
+      break;
+    }
+
+    total_written += written;
+  } while (total_written < length);
+  return total_written;
+}
+
+size_t File::ReadAt(uint8_t* buffer, size_t length, size_t offset) {
+  RTC_DCHECK_LT(length, std::numeric_limits<DWORD>::max());
+  size_t total_read = 0;
+  do {
+    DWORD read;
+
+    LARGE_INTEGER offset_li;
+    offset_li.QuadPart = offset + total_read;
+
+    OVERLAPPED overlapped = {0};
+    overlapped.Offset = offset_li.LowPart;
+    overlapped.OffsetHigh = offset_li.HighPart;
+
+    if (!::ReadFile(file_, buffer + total_read,
+                    static_cast<DWORD>(length - total_read), &read,
+                    &overlapped)) {
+      break;
+    }
+
+    total_read += read;
+  } while (total_read < length);
+  return total_read;
+}
+
+bool File::Seek(size_t offset) {
+  LARGE_INTEGER distance;
+  distance.QuadPart = offset;
+  return SetFilePointerEx(file_, distance, nullptr, FILE_BEGIN) != 0;
+}
+
+bool File::Close() {
+  if (file_ == kInvalidPlatformFileValue)
+    return false;
+  bool ret = CloseHandle(file_) != 0;
+  file_ = kInvalidPlatformFileValue;
+  return ret;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/filerotatingstream.cc b/rtc_base/filerotatingstream.cc
new file mode 100644
index 0000000..4e3aa73
--- /dev/null
+++ b/rtc_base/filerotatingstream.cc
@@ -0,0 +1,401 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/filerotatingstream.h"
+
+#include <algorithm>
+#include <iostream>
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/fileutils.h"
+#include "rtc_base/pathutils.h"
+
+// Note: We use std::cerr for logging in the write paths of this stream to avoid
+// infinite loops when logging.
+
+namespace rtc {
+
+FileRotatingStream::FileRotatingStream(const std::string& dir_path,
+                                       const std::string& file_prefix)
+    : FileRotatingStream(dir_path, file_prefix, 0, 0, kRead) {
+}
+
+FileRotatingStream::FileRotatingStream(const std::string& dir_path,
+                                       const std::string& file_prefix,
+                                       size_t max_file_size,
+                                       size_t num_files)
+    : FileRotatingStream(dir_path,
+                         file_prefix,
+                         max_file_size,
+                         num_files,
+                         kWrite) {
+  RTC_DCHECK_GT(max_file_size, 0);
+  RTC_DCHECK_GT(num_files, 1);
+}
+
+FileRotatingStream::FileRotatingStream(const std::string& dir_path,
+                                       const std::string& file_prefix,
+                                       size_t max_file_size,
+                                       size_t num_files,
+                                       Mode mode)
+    : dir_path_(dir_path),
+      file_prefix_(file_prefix),
+      mode_(mode),
+      file_stream_(nullptr),
+      max_file_size_(max_file_size),
+      current_file_index_(0),
+      rotation_index_(0),
+      current_bytes_written_(0),
+      disable_buffering_(false) {
+  RTC_DCHECK(Filesystem::IsFolder(dir_path));
+  switch (mode) {
+    case kWrite: {
+      file_names_.clear();
+      for (size_t i = 0; i < num_files; ++i) {
+        file_names_.push_back(GetFilePath(i, num_files));
+      }
+      rotation_index_ = num_files - 1;
+      break;
+    }
+    case kRead: {
+      file_names_ = GetFilesWithPrefix();
+      std::sort(file_names_.begin(), file_names_.end());
+      if (file_names_.size() > 0) {
+        // |file_names_| is sorted newest first, so read from the end.
+        current_file_index_ = file_names_.size() - 1;
+      }
+      break;
+    }
+  }
+}
+
+FileRotatingStream::~FileRotatingStream() {
+}
+
+StreamState FileRotatingStream::GetState() const {
+  if (mode_ == kRead && current_file_index_ < file_names_.size()) {
+    return SS_OPEN;
+  }
+  if (!file_stream_) {
+    return SS_CLOSED;
+  }
+  return file_stream_->GetState();
+}
+
+StreamResult FileRotatingStream::Read(void* buffer,
+                                      size_t buffer_len,
+                                      size_t* read,
+                                      int* error) {
+  RTC_DCHECK(buffer);
+  if (mode_ != kRead) {
+    return SR_EOS;
+  }
+  if (current_file_index_ >= file_names_.size()) {
+    return SR_EOS;
+  }
+  // We will have no file stream initially, and when we are finished with the
+  // previous file.
+  if (!file_stream_) {
+    if (!OpenCurrentFile()) {
+      return SR_ERROR;
+    }
+  }
+  int local_error = 0;
+  if (!error) {
+    error = &local_error;
+  }
+  StreamResult result = file_stream_->Read(buffer, buffer_len, read, error);
+  if (result == SR_EOS || result == SR_ERROR) {
+    if (result == SR_ERROR) {
+      RTC_LOG(LS_ERROR) << "Failed to read from: "
+                        << file_names_[current_file_index_]
+                        << "Error: " << error;
+    }
+    // Reached the end of the file, read next file. If there is an error return
+    // the error status but allow for a next read by reading next file.
+    CloseCurrentFile();
+    if (current_file_index_ == 0) {
+      // Just finished reading the last file, signal EOS by setting index.
+      current_file_index_ = file_names_.size();
+    } else {
+      --current_file_index_;
+    }
+    if (read) {
+      *read = 0;
+    }
+    return result == SR_EOS ? SR_SUCCESS : result;
+  } else if (result == SR_SUCCESS) {
+    // Succeeded, continue reading from this file.
+    return SR_SUCCESS;
+  } else {
+    RTC_NOTREACHED();
+  }
+  return result;
+}
+
+StreamResult FileRotatingStream::Write(const void* data,
+                                       size_t data_len,
+                                       size_t* written,
+                                       int* error) {
+  if (mode_ != kWrite) {
+    return SR_EOS;
+  }
+  if (!file_stream_) {
+    std::cerr << "Open() must be called before Write." << std::endl;
+    return SR_ERROR;
+  }
+  // Write as much as will fit in to the current file.
+  RTC_DCHECK_LT(current_bytes_written_, max_file_size_);
+  size_t remaining_bytes = max_file_size_ - current_bytes_written_;
+  size_t write_length = std::min(data_len, remaining_bytes);
+  size_t local_written = 0;
+  if (!written) {
+    written = &local_written;
+  }
+  StreamResult result = file_stream_->Write(data, write_length, written, error);
+  current_bytes_written_ += *written;
+
+  // If we're done with this file, rotate it out.
+  if (current_bytes_written_ >= max_file_size_) {
+    RTC_DCHECK_EQ(current_bytes_written_, max_file_size_);
+    RotateFiles();
+  }
+  return result;
+}
+
+bool FileRotatingStream::Flush() {
+  if (!file_stream_) {
+    return false;
+  }
+  return file_stream_->Flush();
+}
+
+bool FileRotatingStream::GetSize(size_t* size) const {
+  if (mode_ != kRead) {
+    // Not possible to get accurate size on disk when writing because of
+    // potential buffering.
+    return false;
+  }
+  RTC_DCHECK(size);
+  *size = 0;
+  size_t total_size = 0;
+  for (auto file_name : file_names_) {
+    Pathname pathname(file_name);
+    size_t file_size = 0;
+    if (Filesystem::GetFileSize(file_name, &file_size)) {
+      total_size += file_size;
+    }
+  }
+  *size = total_size;
+  return true;
+}
+
+void FileRotatingStream::Close() {
+  CloseCurrentFile();
+}
+
+bool FileRotatingStream::Open() {
+  switch (mode_) {
+    case kRead:
+      // Defer opening to when we first read since we want to return read error
+      // if we fail to open next file.
+      return true;
+    case kWrite: {
+      // Delete existing files when opening for write.
+      std::vector<std::string> matching_files = GetFilesWithPrefix();
+      for (auto matching_file : matching_files) {
+        if (!Filesystem::DeleteFile(matching_file)) {
+          std::cerr << "Failed to delete: " << matching_file << std::endl;
+        }
+      }
+      return OpenCurrentFile();
+    }
+  }
+  return false;
+}
+
+bool FileRotatingStream::DisableBuffering() {
+  disable_buffering_ = true;
+  if (!file_stream_) {
+    std::cerr << "Open() must be called before DisableBuffering()."
+              << std::endl;
+    return false;
+  }
+  return file_stream_->DisableBuffering();
+}
+
+std::string FileRotatingStream::GetFilePath(size_t index) const {
+  RTC_DCHECK_LT(index, file_names_.size());
+  return file_names_[index];
+}
+
+bool FileRotatingStream::OpenCurrentFile() {
+  CloseCurrentFile();
+
+  // Opens the appropriate file in the appropriate mode.
+  RTC_DCHECK_LT(current_file_index_, file_names_.size());
+  std::string file_path = file_names_[current_file_index_];
+  file_stream_.reset(new FileStream());
+  const char* mode = nullptr;
+  switch (mode_) {
+    case kWrite:
+      mode = "w+";
+      // We should always we writing to the zero-th file.
+      RTC_DCHECK_EQ(current_file_index_, 0);
+      break;
+    case kRead:
+      mode = "r";
+      break;
+  }
+  int error = 0;
+  if (!file_stream_->Open(file_path, mode, &error)) {
+    std::cerr << "Failed to open: " << file_path << "Error: " << error
+              << std::endl;
+    file_stream_.reset();
+    return false;
+  }
+  if (disable_buffering_) {
+    file_stream_->DisableBuffering();
+  }
+  return true;
+}
+
+void FileRotatingStream::CloseCurrentFile() {
+  if (!file_stream_) {
+    return;
+  }
+  current_bytes_written_ = 0;
+  file_stream_.reset();
+}
+
+void FileRotatingStream::RotateFiles() {
+  RTC_DCHECK_EQ(mode_, kWrite);
+  CloseCurrentFile();
+  // Rotates the files by deleting the file at |rotation_index_|, which is the
+  // oldest file and then renaming the newer files to have an incremented index.
+  // See header file comments for example.
+  RTC_DCHECK_LT(rotation_index_, file_names_.size());
+  std::string file_to_delete = file_names_[rotation_index_];
+  if (Filesystem::IsFile(file_to_delete)) {
+    if (!Filesystem::DeleteFile(file_to_delete)) {
+      std::cerr << "Failed to delete: " << file_to_delete << std::endl;
+    }
+  }
+  for (auto i = rotation_index_; i > 0; --i) {
+    std::string rotated_name = file_names_[i];
+    std::string unrotated_name = file_names_[i - 1];
+    if (Filesystem::IsFile(unrotated_name)) {
+      if (!Filesystem::MoveFile(unrotated_name, rotated_name)) {
+        std::cerr << "Failed to move: " << unrotated_name << " to "
+                  << rotated_name << std::endl;
+      }
+    }
+  }
+  // Create a new file for 0th index.
+  OpenCurrentFile();
+  OnRotation();
+}
+
+std::vector<std::string> FileRotatingStream::GetFilesWithPrefix() const {
+  std::vector<std::string> files;
+  // Iterate over the files in the directory.
+  DirectoryIterator it;
+  Pathname dir_path;
+  dir_path.SetFolder(dir_path_);
+  if (!it.Iterate(dir_path)) {
+    return files;
+  }
+  do {
+    std::string current_name = it.Name();
+    if (current_name.size() && !it.IsDirectory() &&
+        current_name.compare(0, file_prefix_.size(), file_prefix_) == 0) {
+      Pathname path(dir_path_, current_name);
+      files.push_back(path.pathname());
+    }
+  } while (it.Next());
+  return files;
+}
+
+std::string FileRotatingStream::GetFilePath(size_t index,
+                                            size_t num_files) const {
+  RTC_DCHECK_LT(index, num_files);
+  std::ostringstream file_name;
+  // The format will be "_%<num_digits>zu". We want to zero pad the index so
+  // that it will sort nicely.
+  size_t max_digits = ((num_files - 1) / 10) + 1;
+  size_t num_digits = (index / 10) + 1;
+  RTC_DCHECK_LE(num_digits, max_digits);
+  size_t padding = max_digits - num_digits;
+
+  file_name << file_prefix_ << "_";
+  for (size_t i = 0; i < padding; ++i) {
+    file_name << "0";
+  }
+  file_name << index;
+
+  Pathname file_path(dir_path_, file_name.str());
+  return file_path.pathname();
+}
+
+CallSessionFileRotatingStream::CallSessionFileRotatingStream(
+    const std::string& dir_path)
+    : FileRotatingStream(dir_path, kLogPrefix),
+      max_total_log_size_(0),
+      num_rotations_(0) {
+}
+
+CallSessionFileRotatingStream::CallSessionFileRotatingStream(
+    const std::string& dir_path,
+    size_t max_total_log_size)
+    : FileRotatingStream(dir_path,
+                         kLogPrefix,
+                         max_total_log_size / 2,
+                         GetNumRotatingLogFiles(max_total_log_size) + 1),
+      max_total_log_size_(max_total_log_size),
+      num_rotations_(0) {
+  RTC_DCHECK_GE(max_total_log_size, 4);
+}
+
+const char* CallSessionFileRotatingStream::kLogPrefix = "webrtc_log";
+const size_t CallSessionFileRotatingStream::kRotatingLogFileDefaultSize =
+    1024 * 1024;
+
+void CallSessionFileRotatingStream::OnRotation() {
+  ++num_rotations_;
+  if (num_rotations_ == 1) {
+    // On the first rotation adjust the max file size so subsequent files after
+    // the first are smaller.
+    SetMaxFileSize(GetRotatingLogSize(max_total_log_size_));
+  } else if (num_rotations_ == (GetNumFiles() - 1)) {
+    // On the next rotation the very first file is going to be deleted. Change
+    // the rotation index so this doesn't happen.
+    SetRotationIndex(GetRotationIndex() - 1);
+  }
+}
+
+size_t CallSessionFileRotatingStream::GetRotatingLogSize(
+    size_t max_total_log_size) {
+  size_t num_rotating_log_files = GetNumRotatingLogFiles(max_total_log_size);
+  size_t rotating_log_size = num_rotating_log_files > 2
+                                 ? kRotatingLogFileDefaultSize
+                                 : max_total_log_size / 4;
+  return rotating_log_size;
+}
+
+size_t CallSessionFileRotatingStream::GetNumRotatingLogFiles(
+    size_t max_total_log_size) {
+  // At minimum have two rotating files. Otherwise split the available log size
+  // evenly across 1MB files.
+  return std::max((size_t)2,
+                  (max_total_log_size / 2) / kRotatingLogFileDefaultSize);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/filerotatingstream.h b/rtc_base/filerotatingstream.h
new file mode 100644
index 0000000..4dab345
--- /dev/null
+++ b/rtc_base/filerotatingstream.h
@@ -0,0 +1,173 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FILEROTATINGSTREAM_H_
+#define RTC_BASE_FILEROTATINGSTREAM_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/stream.h"
+
+namespace rtc {
+
+// FileRotatingStream writes to a file in the directory specified in the
+// constructor. It rotates the files once the current file is full. The
+// individual file size and the number of files used is configurable in the
+// constructor. Open() must be called before using this stream.
+class FileRotatingStream : public StreamInterface {
+ public:
+  // Use this constructor for reading a directory previously written to with
+  // this stream.
+  FileRotatingStream(const std::string& dir_path,
+                     const std::string& file_prefix);
+
+  // Use this constructor for writing to a directory. Files in the directory
+  // matching the prefix will be deleted on open.
+  FileRotatingStream(const std::string& dir_path,
+                     const std::string& file_prefix,
+                     size_t max_file_size,
+                     size_t num_files);
+
+  ~FileRotatingStream() override;
+
+  // StreamInterface methods.
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  bool Flush() override;
+  // Returns the total file size currently used on disk.
+  bool GetSize(size_t* size) const override;
+  void Close() override;
+
+  // Opens the appropriate file(s). Call this before using the stream.
+  bool Open();
+
+  // Disabling buffering causes writes to block until disk is updated. This is
+  // enabled by default for performance.
+  bool DisableBuffering();
+
+  // Returns the path used for the i-th newest file, where the 0th file is the
+  // newest file. The file may or may not exist, this is just used for
+  // formatting. Index must be less than GetNumFiles().
+  std::string GetFilePath(size_t index) const;
+
+  // Returns the number of files that will used by this stream.
+  size_t GetNumFiles() const { return file_names_.size(); }
+
+ protected:
+  size_t GetMaxFileSize() const { return max_file_size_; }
+
+  void SetMaxFileSize(size_t size) { max_file_size_ = size; }
+
+  size_t GetRotationIndex() const { return rotation_index_; }
+
+  void SetRotationIndex(size_t index) { rotation_index_ = index; }
+
+  virtual void OnRotation() {}
+
+ private:
+  enum Mode { kRead, kWrite };
+
+  FileRotatingStream(const std::string& dir_path,
+                     const std::string& file_prefix,
+                     size_t max_file_size,
+                     size_t num_files,
+                     Mode mode);
+
+  bool OpenCurrentFile();
+  void CloseCurrentFile();
+
+  // Rotates the files by creating a new current file, renaming the
+  // existing files, and deleting the oldest one. e.g.
+  // file_0 -> file_1
+  // file_1 -> file_2
+  // file_2 -> delete
+  // create new file_0
+  void RotateFiles();
+
+  // Returns a list of file names in the directory beginning with the prefix.
+  std::vector<std::string> GetFilesWithPrefix() const;
+  // Private version of GetFilePath.
+  std::string GetFilePath(size_t index, size_t num_files) const;
+
+  const std::string dir_path_;
+  const std::string file_prefix_;
+  const Mode mode_;
+
+  // FileStream is used to write to the current file.
+  std::unique_ptr<FileStream> file_stream_;
+  // Convenience storage for file names so we don't generate them over and over.
+  std::vector<std::string> file_names_;
+  size_t max_file_size_;
+  size_t current_file_index_;
+  // The rotation index indicates the index of the file that will be
+  // deleted first on rotation. Indices lower than this index will be rotated.
+  size_t rotation_index_;
+  // Number of bytes written to current file. We need this because with
+  // buffering the file size read from disk might not be accurate.
+  size_t current_bytes_written_;
+  bool disable_buffering_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FileRotatingStream);
+};
+
+// CallSessionFileRotatingStream is meant to be used in situations where we will
+// have limited disk space. Its purpose is to read and write logs up to a
+// maximum size. Once the maximum size is exceeded, logs from the middle are
+// deleted whereas logs from the beginning and end are preserved. The reason for
+// this is because we anticipate that in WebRTC the beginning and end of the
+// logs are most useful for call diagnostics.
+//
+// This implementation simply writes to a single file until
+// |max_total_log_size| / 2 bytes are written to it, and subsequently writes to
+// a set of rotating files. We do this by inheriting FileRotatingStream and
+// setting the appropriate internal variables so that we don't delete the last
+// (earliest) file on rotate, and that that file's size is bigger.
+//
+// Open() must be called before using this stream.
+class CallSessionFileRotatingStream : public FileRotatingStream {
+ public:
+  // Use this constructor for reading a directory previously written to with
+  // this stream.
+  explicit CallSessionFileRotatingStream(const std::string& dir_path);
+  // Use this constructor for writing to a directory. Files in the directory
+  // matching what's used by the stream will be deleted. |max_total_log_size|
+  // must be at least 4.
+  CallSessionFileRotatingStream(const std::string& dir_path,
+                                size_t max_total_log_size);
+  ~CallSessionFileRotatingStream() override {}
+
+ protected:
+  void OnRotation() override;
+
+ private:
+  static size_t GetRotatingLogSize(size_t max_total_log_size);
+  static size_t GetNumRotatingLogFiles(size_t max_total_log_size);
+  static const char* kLogPrefix;
+  static const size_t kRotatingLogFileDefaultSize;
+
+  const size_t max_total_log_size_;
+  size_t num_rotations_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(CallSessionFileRotatingStream);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FILEROTATINGSTREAM_H_
diff --git a/rtc_base/filerotatingstream_unittest.cc b/rtc_base/filerotatingstream_unittest.cc
new file mode 100644
index 0000000..16db280
--- /dev/null
+++ b/rtc_base/filerotatingstream_unittest.cc
@@ -0,0 +1,345 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/filerotatingstream.h"
+#include "rtc_base/fileutils.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/pathutils.h"
+#include "test/testsupport/fileutils.h"
+
+namespace rtc {
+
+namespace {
+
+void CleanupLogDirectory(const FileRotatingStream& stream) {
+  for (size_t i = 0; i < stream.GetNumFiles(); ++i) {
+    // Ignore return value, not all files are expected to exist.
+    webrtc::test::RemoveFile(stream.GetFilePath(i));
+  }
+}
+
+}  // namespace
+
+#if defined (WEBRTC_ANDROID)
+// Fails on Android: https://bugs.chromium.org/p/webrtc/issues/detail?id=4364.
+#define MAYBE_FileRotatingStreamTest DISABLED_FileRotatingStreamTest
+#else
+#define MAYBE_FileRotatingStreamTest FileRotatingStreamTest
+#endif
+
+class MAYBE_FileRotatingStreamTest : public ::testing::Test {
+ protected:
+  static const char* kFilePrefix;
+  static const size_t kMaxFileSize;
+
+  void Init(const std::string& dir_name,
+            const std::string& file_prefix,
+            size_t max_file_size,
+            size_t num_log_files) {
+    dir_path_ = webrtc::test::OutputPath();
+
+    // Append per-test output path in order to run within gtest parallel.
+    dir_path_.append(dir_name);
+    dir_path_.push_back(Pathname::DefaultFolderDelimiter());
+    ASSERT_TRUE(webrtc::test::CreateDir(dir_path_));
+    stream_.reset(new FileRotatingStream(dir_path_, file_prefix, max_file_size,
+                                         num_log_files));
+  }
+
+  void TearDown() override {
+    // On windows, open files can't be removed.
+    stream_->Close();
+    CleanupLogDirectory(*stream_);
+    EXPECT_TRUE(webrtc::test::RemoveDir(dir_path_));
+
+    stream_.reset();
+  }
+
+  // Writes the data to the stream and flushes it.
+  void WriteAndFlush(const void* data, const size_t data_len) {
+    EXPECT_EQ(SR_SUCCESS, stream_->WriteAll(data, data_len, nullptr, nullptr));
+    EXPECT_TRUE(stream_->Flush());
+  }
+
+  // Checks that the stream reads in the expected contents and then returns an
+  // end of stream result.
+  void VerifyStreamRead(const char* expected_contents,
+                        const size_t expected_length,
+                        const std::string& dir_path,
+                        const char* file_prefix) {
+    std::unique_ptr<FileRotatingStream> stream;
+    stream.reset(new FileRotatingStream(dir_path, file_prefix));
+    ASSERT_TRUE(stream->Open());
+    size_t read = 0;
+    size_t stream_size = 0;
+    EXPECT_TRUE(stream->GetSize(&stream_size));
+    std::unique_ptr<uint8_t[]> buffer(new uint8_t[expected_length]);
+    EXPECT_EQ(SR_SUCCESS,
+              stream->ReadAll(buffer.get(), expected_length, &read, nullptr));
+    EXPECT_EQ(0, memcmp(expected_contents, buffer.get(), expected_length));
+    EXPECT_EQ(SR_EOS, stream->ReadAll(buffer.get(), 1, nullptr, nullptr));
+    EXPECT_EQ(stream_size, read);
+  }
+
+  void VerifyFileContents(const char* expected_contents,
+                          const size_t expected_length,
+                          const std::string& file_path) {
+    std::unique_ptr<uint8_t[]> buffer(new uint8_t[expected_length]);
+    FileStream stream;
+    ASSERT_TRUE(stream.Open(file_path, "r", nullptr));
+    EXPECT_EQ(rtc::SR_SUCCESS,
+              stream.ReadAll(buffer.get(), expected_length, nullptr, nullptr));
+    EXPECT_EQ(0, memcmp(expected_contents, buffer.get(), expected_length));
+    size_t file_size = 0;
+    EXPECT_TRUE(stream.GetSize(&file_size));
+    EXPECT_EQ(file_size, expected_length);
+  }
+
+  std::unique_ptr<FileRotatingStream> stream_;
+  std::string dir_path_;
+};
+
+const char* MAYBE_FileRotatingStreamTest::kFilePrefix =
+    "FileRotatingStreamTest";
+const size_t MAYBE_FileRotatingStreamTest::kMaxFileSize = 2;
+
+// Tests that stream state is correct before and after Open / Close.
+TEST_F(MAYBE_FileRotatingStreamTest, State) {
+  Init("FileRotatingStreamTestState", kFilePrefix, kMaxFileSize, 3);
+
+  EXPECT_EQ(SS_CLOSED, stream_->GetState());
+  ASSERT_TRUE(stream_->Open());
+  EXPECT_EQ(SS_OPEN, stream_->GetState());
+  stream_->Close();
+  EXPECT_EQ(SS_CLOSED, stream_->GetState());
+}
+
+// Tests that nothing is written to file when data of length zero is written.
+TEST_F(MAYBE_FileRotatingStreamTest, EmptyWrite) {
+  Init("FileRotatingStreamTestEmptyWrite", kFilePrefix, kMaxFileSize, 3);
+
+  ASSERT_TRUE(stream_->Open());
+  WriteAndFlush("a", 0);
+
+  std::string logfile_path = stream_->GetFilePath(0);
+  FileStream stream;
+  ASSERT_TRUE(stream.Open(logfile_path, "r", nullptr));
+  size_t file_size = 0;
+  EXPECT_TRUE(stream.GetSize(&file_size));
+  EXPECT_EQ(0u, file_size);
+}
+
+// Tests that a write operation followed by a read returns the expected data
+// and writes to the expected files.
+TEST_F(MAYBE_FileRotatingStreamTest, WriteAndRead) {
+  Init("FileRotatingStreamTestWriteAndRead", kFilePrefix, kMaxFileSize, 3);
+
+  ASSERT_TRUE(stream_->Open());
+  // The test is set up to create three log files of length 2. Write and check
+  // contents.
+  std::string messages[3] = {"aa", "bb", "cc"};
+  for (size_t i = 0; i < arraysize(messages); ++i) {
+    const std::string& message = messages[i];
+    WriteAndFlush(message.c_str(), message.size());
+    // Since the max log size is 2, we will be causing rotation. Read from the
+    // next file.
+    VerifyFileContents(message.c_str(), message.size(),
+                       stream_->GetFilePath(1));
+  }
+  // Check that exactly three files exist.
+  for (size_t i = 0; i < arraysize(messages); ++i) {
+    EXPECT_TRUE(Filesystem::IsFile(stream_->GetFilePath(i)));
+  }
+  std::string message("d");
+  WriteAndFlush(message.c_str(), message.size());
+  for (size_t i = 0; i < arraysize(messages); ++i) {
+    EXPECT_TRUE(Filesystem::IsFile(stream_->GetFilePath(i)));
+  }
+  // TODO(tkchin): Maybe check all the files in the dir.
+
+  // Reopen for read.
+  std::string expected_contents("bbccd");
+  VerifyStreamRead(expected_contents.c_str(), expected_contents.size(),
+                   dir_path_, kFilePrefix);
+}
+
+// Tests that writing data greater than the total capacity of the files
+// overwrites the files correctly and is read correctly after.
+TEST_F(MAYBE_FileRotatingStreamTest, WriteOverflowAndRead) {
+  Init("FileRotatingStreamTestWriteOverflowAndRead", kFilePrefix, kMaxFileSize,
+       3);
+  ASSERT_TRUE(stream_->Open());
+  // This should cause overflow across all three files, such that the first file
+  // we wrote to also gets overwritten.
+  std::string message("foobarbaz");
+  WriteAndFlush(message.c_str(), message.size());
+  std::string expected_file_contents("z");
+  VerifyFileContents(expected_file_contents.c_str(),
+                     expected_file_contents.size(), stream_->GetFilePath(0));
+  std::string expected_stream_contents("arbaz");
+  VerifyStreamRead(expected_stream_contents.c_str(),
+                   expected_stream_contents.size(), dir_path_, kFilePrefix);
+}
+
+// Tests that the returned file paths have the right folder and prefix.
+TEST_F(MAYBE_FileRotatingStreamTest, GetFilePath) {
+  Init("FileRotatingStreamTestGetFilePath", kFilePrefix, kMaxFileSize, 20);
+  for (auto i = 0; i < 20; ++i) {
+    Pathname path(stream_->GetFilePath(i));
+    EXPECT_EQ(0, path.folder().compare(dir_path_));
+    EXPECT_EQ(0, path.filename().compare(0, strlen(kFilePrefix), kFilePrefix));
+  }
+}
+
+#if defined (WEBRTC_ANDROID)
+// Fails on Android: https://bugs.chromium.org/p/webrtc/issues/detail?id=4364.
+#define MAYBE_CallSessionFileRotatingStreamTest \
+    DISABLED_CallSessionFileRotatingStreamTest
+#else
+#define MAYBE_CallSessionFileRotatingStreamTest \
+    CallSessionFileRotatingStreamTest
+#endif
+
+class MAYBE_CallSessionFileRotatingStreamTest : public ::testing::Test {
+ protected:
+  void Init(const std::string& dir_name, size_t max_total_log_size) {
+    dir_path_ = webrtc::test::OutputPath();
+
+    // Append per-test output path in order to run within gtest parallel.
+    dir_path_.append(dir_name);
+    dir_path_.push_back(Pathname::DefaultFolderDelimiter());
+    ASSERT_TRUE(webrtc::test::CreateDir(dir_path_));
+    stream_.reset(
+        new CallSessionFileRotatingStream(dir_path_, max_total_log_size));
+  }
+
+  void TearDown() override {
+    // On windows, open files can't be removed.
+    stream_->Close();
+    CleanupLogDirectory(*stream_);
+    EXPECT_TRUE(webrtc::test::RemoveDir(dir_path_));
+
+    stream_.reset();
+  }
+
+  // Writes the data to the stream and flushes it.
+  void WriteAndFlush(const void* data, const size_t data_len) {
+    EXPECT_EQ(SR_SUCCESS, stream_->WriteAll(data, data_len, nullptr, nullptr));
+    EXPECT_TRUE(stream_->Flush());
+  }
+
+  // Checks that the stream reads in the expected contents and then returns an
+  // end of stream result.
+  void VerifyStreamRead(const char* expected_contents,
+                        const size_t expected_length,
+                        const std::string& dir_path) {
+    std::unique_ptr<CallSessionFileRotatingStream> stream(
+        new CallSessionFileRotatingStream(dir_path));
+    ASSERT_TRUE(stream->Open());
+    size_t read = 0;
+    size_t stream_size = 0;
+    EXPECT_TRUE(stream->GetSize(&stream_size));
+    std::unique_ptr<uint8_t[]> buffer(new uint8_t[expected_length]);
+    EXPECT_EQ(SR_SUCCESS,
+              stream->ReadAll(buffer.get(), expected_length, &read, nullptr));
+    EXPECT_EQ(0, memcmp(expected_contents, buffer.get(), expected_length));
+    EXPECT_EQ(SR_EOS, stream->ReadAll(buffer.get(), 1, nullptr, nullptr));
+    EXPECT_EQ(stream_size, read);
+  }
+
+  std::unique_ptr<CallSessionFileRotatingStream> stream_;
+  std::string dir_path_;
+};
+
+// Tests that writing and reading to a stream with the smallest possible
+// capacity works.
+TEST_F(MAYBE_CallSessionFileRotatingStreamTest, WriteAndReadSmallest) {
+  Init("CallSessionFileRotatingStreamTestWriteAndReadSmallest", 4);
+
+  ASSERT_TRUE(stream_->Open());
+  std::string message("abcde");
+  WriteAndFlush(message.c_str(), message.size());
+  std::string expected_contents("abe");
+  VerifyStreamRead(expected_contents.c_str(), expected_contents.size(),
+                   dir_path_);
+}
+
+// Tests that writing and reading to a stream with capacity lesser than 4MB
+// behaves correctly.
+TEST_F(MAYBE_CallSessionFileRotatingStreamTest, WriteAndReadSmall) {
+  Init("CallSessionFileRotatingStreamTestWriteAndReadSmall", 8);
+
+  ASSERT_TRUE(stream_->Open());
+  std::string message("123456789");
+  WriteAndFlush(message.c_str(), message.size());
+  std::string expected_contents("1234789");
+  VerifyStreamRead(expected_contents.c_str(), expected_contents.size(),
+                   dir_path_);
+}
+
+// Tests that writing and reading to a stream with capacity greater than 4MB
+// behaves correctly.
+TEST_F(MAYBE_CallSessionFileRotatingStreamTest, WriteAndReadLarge) {
+  Init("CallSessionFileRotatingStreamTestWriteAndReadLarge", 6 * 1024 * 1024);
+
+  ASSERT_TRUE(stream_->Open());
+  const size_t buffer_size = 1024 * 1024;
+  std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+  for (int i = 0; i < 8; i++) {
+    memset(buffer.get(), i, buffer_size);
+    EXPECT_EQ(SR_SUCCESS,
+              stream_->WriteAll(buffer.get(), buffer_size, nullptr, nullptr));
+  }
+
+  stream_.reset(new CallSessionFileRotatingStream(dir_path_));
+  ASSERT_TRUE(stream_->Open());
+  std::unique_ptr<uint8_t[]> expected_buffer(new uint8_t[buffer_size]);
+  int expected_vals[] = {0, 1, 2, 6, 7};
+  for (size_t i = 0; i < arraysize(expected_vals); ++i) {
+    memset(expected_buffer.get(), expected_vals[i], buffer_size);
+    EXPECT_EQ(SR_SUCCESS,
+              stream_->ReadAll(buffer.get(), buffer_size, nullptr, nullptr));
+    EXPECT_EQ(0, memcmp(buffer.get(), expected_buffer.get(), buffer_size));
+  }
+  EXPECT_EQ(SR_EOS, stream_->ReadAll(buffer.get(), 1, nullptr, nullptr));
+}
+
+// Tests that writing and reading to a stream where only the first file is
+// written to behaves correctly.
+TEST_F(MAYBE_CallSessionFileRotatingStreamTest, WriteAndReadFirstHalf) {
+  Init("CallSessionFileRotatingStreamTestWriteAndReadFirstHalf",
+       6 * 1024 * 1024);
+  ASSERT_TRUE(stream_->Open());
+  const size_t buffer_size = 1024 * 1024;
+  std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+  for (int i = 0; i < 2; i++) {
+    memset(buffer.get(), i, buffer_size);
+    EXPECT_EQ(SR_SUCCESS,
+              stream_->WriteAll(buffer.get(), buffer_size, nullptr, nullptr));
+  }
+
+  stream_.reset(new CallSessionFileRotatingStream(dir_path_));
+  ASSERT_TRUE(stream_->Open());
+  std::unique_ptr<uint8_t[]> expected_buffer(new uint8_t[buffer_size]);
+  int expected_vals[] = {0, 1};
+  for (size_t i = 0; i < arraysize(expected_vals); ++i) {
+    memset(expected_buffer.get(), expected_vals[i], buffer_size);
+    EXPECT_EQ(SR_SUCCESS,
+              stream_->ReadAll(buffer.get(), buffer_size, nullptr, nullptr));
+    EXPECT_EQ(0, memcmp(buffer.get(), expected_buffer.get(), buffer_size));
+  }
+  EXPECT_EQ(SR_EOS, stream_->ReadAll(buffer.get(), 1, nullptr, nullptr));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/fileutils.cc b/rtc_base/fileutils.cc
new file mode 100644
index 0000000..40ec86f
--- /dev/null
+++ b/rtc_base/fileutils.cc
@@ -0,0 +1,133 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/fileutils.h"
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/pathutils.h"
+#include "rtc_base/stringutils.h"
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32filesystem.h"
+#else
+#include "rtc_base/unixfilesystem.h"
+#endif
+
+#if !defined(WEBRTC_WIN)
+#define MAX_PATH 260
+#endif
+
+namespace rtc {
+
+//////////////////////////
+// Directory Iterator   //
+//////////////////////////
+
+// A DirectoryIterator is created with a given directory. It originally points
+// to the first file in the directory, and can be advanecd with Next(). This
+// allows you to get information about each file.
+
+  // Constructor
+DirectoryIterator::DirectoryIterator()
+#ifdef WEBRTC_WIN
+    : handle_(INVALID_HANDLE_VALUE) {
+#else
+    : dir_(nullptr),
+      dirent_(nullptr){
+#endif
+}
+
+  // Destructor
+DirectoryIterator::~DirectoryIterator() {
+#if defined(WEBRTC_WIN)
+  if (handle_ != INVALID_HANDLE_VALUE)
+    ::FindClose(handle_);
+#else
+  if (dir_)
+    closedir(dir_);
+#endif
+}
+
+  // Starts traversing a directory.
+  // dir is the directory to traverse
+  // returns true if the directory exists and is valid
+bool DirectoryIterator::Iterate(const Pathname &dir) {
+  directory_ = dir.pathname();
+#if defined(WEBRTC_WIN)
+  if (handle_ != INVALID_HANDLE_VALUE)
+    ::FindClose(handle_);
+  std::string d = dir.pathname() + '*';
+  handle_ = ::FindFirstFile(ToUtf16(d).c_str(), &data_);
+  if (handle_ == INVALID_HANDLE_VALUE)
+    return false;
+#else
+  if (dir_ != nullptr)
+    closedir(dir_);
+  dir_ = ::opendir(directory_.c_str());
+  if (dir_ == nullptr)
+    return false;
+  dirent_ = readdir(dir_);
+  if (dirent_ == nullptr)
+    return false;
+
+  if (::stat(std::string(directory_ + Name()).c_str(), &stat_) != 0)
+    return false;
+#endif
+  return true;
+}
+
+  // Advances to the next file
+  // returns true if there were more files in the directory.
+bool DirectoryIterator::Next() {
+#if defined(WEBRTC_WIN)
+  return ::FindNextFile(handle_, &data_) == TRUE;
+#else
+  dirent_ = ::readdir(dir_);
+  if (dirent_ == nullptr)
+    return false;
+
+  return ::stat(std::string(directory_ + Name()).c_str(), &stat_) == 0;
+#endif
+}
+
+  // returns true if the file currently pointed to is a directory
+bool DirectoryIterator::IsDirectory() const {
+#if defined(WEBRTC_WIN)
+  return (data_.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != FALSE;
+#else
+  return S_ISDIR(stat_.st_mode);
+#endif
+}
+
+  // returns the name of the file currently pointed to
+std::string DirectoryIterator::Name() const {
+#if defined(WEBRTC_WIN)
+  return ToUtf8(data_.cFileName);
+#else
+  RTC_DCHECK(dirent_);
+  return dirent_->d_name;
+#endif
+}
+
+FilesystemInterface* Filesystem::default_filesystem_ = nullptr;
+
+FilesystemInterface *Filesystem::EnsureDefaultFilesystem() {
+  if (!default_filesystem_) {
+#if defined(WEBRTC_WIN)
+    default_filesystem_ = new Win32Filesystem();
+#else
+    default_filesystem_ = new UnixFilesystem();
+#endif
+  }
+  return default_filesystem_;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/fileutils.h b/rtc_base/fileutils.h
new file mode 100644
index 0000000..b9e8a05
--- /dev/null
+++ b/rtc_base/fileutils.h
@@ -0,0 +1,150 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FILEUTILS_H_
+#define RTC_BASE_FILEUTILS_H_
+
+#include <string>
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#else
+#include <dirent.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif  // WEBRTC_WIN
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/platform_file.h"
+
+namespace rtc {
+
+class FileStream;
+class Pathname;
+
+//////////////////////////
+// Directory Iterator   //
+//////////////////////////
+
+// A DirectoryIterator is created with a given directory. It originally points
+// to the first file in the directory, and can be advanecd with Next(). This
+// allows you to get information about each file.
+
+class DirectoryIterator {
+  friend class Filesystem;
+ public:
+  // Constructor
+  DirectoryIterator();
+  // Destructor
+  virtual ~DirectoryIterator();
+
+  // Starts traversing a directory
+  // dir is the directory to traverse
+  // returns true if the directory exists and is valid
+  // The iterator will point to the first entry in the directory
+  virtual bool Iterate(const Pathname &path);
+
+  // Advances to the next file
+  // returns true if there were more files in the directory.
+  virtual bool Next();
+
+  // returns true if the file currently pointed to is a directory
+  virtual bool IsDirectory() const;
+
+  // returns the name of the file currently pointed to
+  virtual std::string Name() const;
+
+ private:
+  std::string directory_;
+#if defined(WEBRTC_WIN)
+  WIN32_FIND_DATA data_;
+  HANDLE handle_;
+#else
+  DIR *dir_;
+  struct dirent *dirent_;
+  struct stat stat_;
+#endif
+};
+
+class FilesystemInterface {
+ public:
+  virtual ~FilesystemInterface() {}
+
+  // This will attempt to delete the path located at filename.
+  // It DCHECKs and returns false if the path points to a folder or a
+  // non-existent file.
+  virtual bool DeleteFile(const Pathname &filename) = 0;
+
+  // This moves a file from old_path to new_path, where "old_path" is a
+  // plain file. This DCHECKs and returns false if old_path points to a
+  // directory, and returns true if the function succeeds.
+  virtual bool MoveFile(const Pathname &old_path, const Pathname &new_path) = 0;
+
+  // Returns true if pathname refers to a directory
+  virtual bool IsFolder(const Pathname& pathname) = 0;
+
+  // Returns true if pathname refers to a file
+  virtual bool IsFile(const Pathname& pathname) = 0;
+
+  // Determines the size of the file indicated by path.
+  virtual bool GetFileSize(const Pathname& path, size_t* size) = 0;
+};
+
+class Filesystem {
+ public:
+  static FilesystemInterface *default_filesystem() {
+    RTC_DCHECK(default_filesystem_);
+    return default_filesystem_;
+  }
+
+  static void set_default_filesystem(FilesystemInterface *filesystem) {
+    default_filesystem_ = filesystem;
+  }
+
+  static FilesystemInterface *swap_default_filesystem(
+      FilesystemInterface *filesystem) {
+    FilesystemInterface *cur = default_filesystem_;
+    default_filesystem_ = filesystem;
+    return cur;
+  }
+
+  static bool DeleteFile(const Pathname &filename) {
+    return EnsureDefaultFilesystem()->DeleteFile(filename);
+  }
+
+  static bool MoveFile(const Pathname &old_path, const Pathname &new_path) {
+    return EnsureDefaultFilesystem()->MoveFile(old_path, new_path);
+  }
+
+  static bool IsFolder(const Pathname& pathname) {
+    return EnsureDefaultFilesystem()->IsFolder(pathname);
+  }
+
+  static bool IsFile(const Pathname &pathname) {
+    return EnsureDefaultFilesystem()->IsFile(pathname);
+  }
+
+  static bool GetFileSize(const Pathname& path, size_t* size) {
+    return EnsureDefaultFilesystem()->GetFileSize(path, size);
+  }
+
+ private:
+  static FilesystemInterface* default_filesystem_;
+
+  static FilesystemInterface *EnsureDefaultFilesystem();
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Filesystem);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FILEUTILS_H_
diff --git a/rtc_base/firewallsocketserver.cc b/rtc_base/firewallsocketserver.cc
new file mode 100644
index 0000000..60f45ed
--- /dev/null
+++ b/rtc_base/firewallsocketserver.cc
@@ -0,0 +1,273 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/firewallsocketserver.h"
+
+#include <algorithm>
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+class FirewallSocket : public AsyncSocketAdapter {
+ public:
+  FirewallSocket(FirewallSocketServer* server, AsyncSocket* socket, int type)
+    : AsyncSocketAdapter(socket), server_(server), type_(type) {
+  }
+
+  int Bind(const SocketAddress& addr) override {
+    if (!server_->IsBindableIp(addr.ipaddr())) {
+      SetError(EINVAL);
+      return SOCKET_ERROR;
+    }
+    return AsyncSocketAdapter::Bind(addr);
+  }
+
+  int Connect(const SocketAddress& addr) override {
+    if (type_ == SOCK_STREAM) {
+      if (!server_->Check(FP_TCP, GetLocalAddress(), addr)) {
+        RTC_LOG(LS_VERBOSE) << "FirewallSocket outbound TCP connection from "
+                            << GetLocalAddress().ToSensitiveString() << " to "
+                            << addr.ToSensitiveString() << " denied";
+        // TODO: Handle this asynchronously.
+        SetError(EHOSTUNREACH);
+        return SOCKET_ERROR;
+      }
+    }
+    return AsyncSocketAdapter::Connect(addr);
+  }
+  int Send(const void* pv, size_t cb) override {
+    return SendTo(pv, cb, GetRemoteAddress());
+  }
+  int SendTo(const void* pv, size_t cb, const SocketAddress& addr) override {
+    RTC_DCHECK(type_ == SOCK_DGRAM || type_ == SOCK_STREAM);
+    FirewallProtocol protocol = (type_ == SOCK_DGRAM) ? FP_UDP : FP_TCP;
+    if (!server_->Check(protocol, GetLocalAddress(), addr)) {
+      RTC_LOG(LS_VERBOSE) << "FirewallSocket outbound packet with type "
+                          << type_ << " from "
+                          << GetLocalAddress().ToSensitiveString() << " to "
+                          << addr.ToSensitiveString() << " dropped";
+      return static_cast<int>(cb);
+    }
+    return AsyncSocketAdapter::SendTo(pv, cb, addr);
+  }
+  int Recv(void* pv, size_t cb, int64_t* timestamp) override {
+    SocketAddress addr;
+    return RecvFrom(pv, cb, &addr, timestamp);
+  }
+  int RecvFrom(void* pv,
+               size_t cb,
+               SocketAddress* paddr,
+               int64_t* timestamp) override {
+    if (type_ == SOCK_DGRAM) {
+      while (true) {
+        int res = AsyncSocketAdapter::RecvFrom(pv, cb, paddr, timestamp);
+        if (res <= 0)
+          return res;
+        if (server_->Check(FP_UDP, *paddr, GetLocalAddress()))
+          return res;
+        RTC_LOG(LS_VERBOSE)
+            << "FirewallSocket inbound UDP packet from "
+            << paddr->ToSensitiveString() << " to "
+            << GetLocalAddress().ToSensitiveString() << " dropped";
+      }
+    }
+    return AsyncSocketAdapter::RecvFrom(pv, cb, paddr, timestamp);
+  }
+
+  int Listen(int backlog) override {
+    if (!server_->tcp_listen_enabled()) {
+      RTC_LOG(LS_VERBOSE) << "FirewallSocket listen attempt denied";
+      return -1;
+    }
+
+    return AsyncSocketAdapter::Listen(backlog);
+  }
+  AsyncSocket* Accept(SocketAddress* paddr) override {
+    SocketAddress addr;
+    while (AsyncSocket* sock = AsyncSocketAdapter::Accept(&addr)) {
+      if (server_->Check(FP_TCP, addr, GetLocalAddress())) {
+        if (paddr)
+          *paddr = addr;
+        return sock;
+      }
+      sock->Close();
+      delete sock;
+      RTC_LOG(LS_VERBOSE) << "FirewallSocket inbound TCP connection from "
+                          << addr.ToSensitiveString() << " to "
+                          << GetLocalAddress().ToSensitiveString() << " denied";
+    }
+    return 0;
+  }
+
+ private:
+  FirewallSocketServer* server_;
+  int type_;
+};
+
+FirewallSocketServer::FirewallSocketServer(SocketServer* server,
+                                           FirewallManager* manager,
+                                           bool should_delete_server)
+    : server_(server), manager_(manager),
+      should_delete_server_(should_delete_server),
+      udp_sockets_enabled_(true), tcp_sockets_enabled_(true),
+      tcp_listen_enabled_(true) {
+  if (manager_)
+    manager_->AddServer(this);
+}
+
+FirewallSocketServer::~FirewallSocketServer() {
+  if (manager_)
+    manager_->RemoveServer(this);
+
+  if (server_ && should_delete_server_) {
+    delete server_;
+    server_ = nullptr;
+  }
+}
+
+void FirewallSocketServer::AddRule(bool allow, FirewallProtocol p,
+                                   FirewallDirection d,
+                                   const SocketAddress& addr) {
+  SocketAddress any;
+  if (d == FD_IN || d == FD_ANY) {
+    AddRule(allow, p, any, addr);
+  }
+  if (d == FD_OUT || d == FD_ANY) {
+    AddRule(allow, p, addr, any);
+  }
+}
+
+
+void FirewallSocketServer::AddRule(bool allow, FirewallProtocol p,
+                                   const SocketAddress& src,
+                                   const SocketAddress& dst) {
+  Rule r;
+  r.allow = allow;
+  r.p = p;
+  r.src = src;
+  r.dst = dst;
+  CritScope scope(&crit_);
+  rules_.push_back(r);
+}
+
+void FirewallSocketServer::ClearRules() {
+  CritScope scope(&crit_);
+  rules_.clear();
+}
+
+bool FirewallSocketServer::Check(FirewallProtocol p,
+                                 const SocketAddress& src,
+                                 const SocketAddress& dst) {
+  CritScope scope(&crit_);
+  for (size_t i = 0; i < rules_.size(); ++i) {
+    const Rule& r = rules_[i];
+    if ((r.p != p) && (r.p != FP_ANY))
+      continue;
+    if ((r.src.ipaddr() != src.ipaddr()) && !r.src.IsNil())
+      continue;
+    if ((r.src.port() != src.port()) && (r.src.port() != 0))
+      continue;
+    if ((r.dst.ipaddr() != dst.ipaddr()) && !r.dst.IsNil())
+      continue;
+    if ((r.dst.port() != dst.port()) && (r.dst.port() != 0))
+      continue;
+    return r.allow;
+  }
+  return true;
+}
+
+void FirewallSocketServer::SetUnbindableIps(
+    const std::vector<rtc::IPAddress>& unbindable_ips) {
+  unbindable_ips_ = unbindable_ips;
+}
+
+bool FirewallSocketServer::IsBindableIp(const rtc::IPAddress& ip) {
+  return std::find(unbindable_ips_.begin(), unbindable_ips_.end(), ip) ==
+         unbindable_ips_.end();
+}
+
+Socket* FirewallSocketServer::CreateSocket(int type) {
+  return CreateSocket(AF_INET, type);
+}
+
+Socket* FirewallSocketServer::CreateSocket(int family, int type) {
+  return WrapSocket(server_->CreateAsyncSocket(family, type), type);
+}
+
+AsyncSocket* FirewallSocketServer::CreateAsyncSocket(int type) {
+  return CreateAsyncSocket(AF_INET, type);
+}
+
+AsyncSocket* FirewallSocketServer::CreateAsyncSocket(int family, int type) {
+  return WrapSocket(server_->CreateAsyncSocket(family, type), type);
+}
+
+void FirewallSocketServer::SetMessageQueue(MessageQueue* queue) {
+  server_->SetMessageQueue(queue);
+}
+
+bool FirewallSocketServer::Wait(int cms, bool process_io) {
+  return server_->Wait(cms, process_io);
+}
+
+void FirewallSocketServer::WakeUp() {
+  return server_->WakeUp();
+}
+
+AsyncSocket* FirewallSocketServer::WrapSocket(AsyncSocket* sock, int type) {
+  if (!sock ||
+      (type == SOCK_STREAM && !tcp_sockets_enabled_) ||
+      (type == SOCK_DGRAM && !udp_sockets_enabled_)) {
+    RTC_LOG(LS_VERBOSE) << "FirewallSocketServer socket creation denied";
+    delete sock;
+    return nullptr;
+  }
+  return new FirewallSocket(this, sock, type);
+}
+
+FirewallManager::FirewallManager() {
+}
+
+FirewallManager::~FirewallManager() {
+  RTC_DCHECK(servers_.empty());
+}
+
+void FirewallManager::AddServer(FirewallSocketServer* server) {
+  CritScope scope(&crit_);
+  servers_.push_back(server);
+}
+
+void FirewallManager::RemoveServer(FirewallSocketServer* server) {
+  CritScope scope(&crit_);
+  servers_.erase(std::remove(servers_.begin(), servers_.end(), server),
+                 servers_.end());
+}
+
+void FirewallManager::AddRule(bool allow, FirewallProtocol p,
+                              FirewallDirection d, const SocketAddress& addr) {
+  CritScope scope(&crit_);
+  for (std::vector<FirewallSocketServer*>::const_iterator it =
+      servers_.begin(); it != servers_.end(); ++it) {
+    (*it)->AddRule(allow, p, d, addr);
+  }
+}
+
+void FirewallManager::ClearRules() {
+  CritScope scope(&crit_);
+  for (std::vector<FirewallSocketServer*>::const_iterator it =
+      servers_.begin(); it != servers_.end(); ++it) {
+    (*it)->ClearRules();
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/firewallsocketserver.h b/rtc_base/firewallsocketserver.h
new file mode 100644
index 0000000..9f04946
--- /dev/null
+++ b/rtc_base/firewallsocketserver.h
@@ -0,0 +1,125 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FIREWALLSOCKETSERVER_H_
+#define RTC_BASE_FIREWALLSOCKETSERVER_H_
+
+#include <vector>
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/socketserver.h"
+
+namespace rtc {
+
+class FirewallManager;
+
+// This SocketServer shim simulates a rule-based firewall server.
+
+enum FirewallProtocol { FP_UDP, FP_TCP, FP_ANY };
+enum FirewallDirection { FD_IN, FD_OUT, FD_ANY };
+
+class FirewallSocketServer : public SocketServer {
+ public:
+  FirewallSocketServer(SocketServer* server,
+                       FirewallManager* manager = nullptr,
+                       bool should_delete_server = false);
+  ~FirewallSocketServer() override;
+
+  SocketServer* socketserver() const { return server_; }
+  void set_socketserver(SocketServer* server) {
+    if (server_ && should_delete_server_) {
+      delete server_;
+      server_ = nullptr;
+      should_delete_server_ = false;
+    }
+    server_ = server;
+  }
+
+  // Settings to control whether CreateSocket or Socket::Listen succeed.
+  void set_udp_sockets_enabled(bool enabled) { udp_sockets_enabled_ = enabled; }
+  void set_tcp_sockets_enabled(bool enabled) { tcp_sockets_enabled_ = enabled; }
+  bool tcp_listen_enabled() const { return tcp_listen_enabled_; }
+  void set_tcp_listen_enabled(bool enabled) { tcp_listen_enabled_ = enabled; }
+
+  // Rules govern the behavior of Connect/Accept/Send/Recv attempts.
+  void AddRule(bool allow, FirewallProtocol p = FP_ANY,
+               FirewallDirection d = FD_ANY,
+               const SocketAddress& addr = SocketAddress());
+  void AddRule(bool allow, FirewallProtocol p,
+               const SocketAddress& src, const SocketAddress& dst);
+  void ClearRules();
+
+  bool Check(FirewallProtocol p,
+             const SocketAddress& src, const SocketAddress& dst);
+
+  // Set the IP addresses for which Bind will fail. By default this list is
+  // empty. This can be used to simulate a real OS that refuses to bind to
+  // addresses under various circumstances.
+  //
+  // No matter how many addresses are added (including INADDR_ANY), the server
+  // will still allow creating outgoing TCP connections, since they don't
+  // require explicitly binding a socket.
+  void SetUnbindableIps(const std::vector<rtc::IPAddress>& unbindable_ips);
+  bool IsBindableIp(const rtc::IPAddress& ip);
+
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+  void SetMessageQueue(MessageQueue* queue) override;
+  bool Wait(int cms, bool process_io) override;
+  void WakeUp() override;
+
+  Socket * WrapSocket(Socket * sock, int type);
+  AsyncSocket * WrapSocket(AsyncSocket * sock, int type);
+
+ private:
+  SocketServer * server_;
+  FirewallManager * manager_;
+  CriticalSection crit_;
+  struct Rule {
+    bool allow;
+    FirewallProtocol p;
+    FirewallDirection d;
+    SocketAddress src;
+    SocketAddress dst;
+  };
+  std::vector<Rule> rules_;
+  std::vector<rtc::IPAddress> unbindable_ips_;
+  bool should_delete_server_;
+  bool udp_sockets_enabled_;
+  bool tcp_sockets_enabled_;
+  bool tcp_listen_enabled_;
+};
+
+// FirewallManager allows you to manage firewalls in multiple threads together
+
+class FirewallManager {
+ public:
+  FirewallManager();
+  ~FirewallManager();
+
+  void AddServer(FirewallSocketServer * server);
+  void RemoveServer(FirewallSocketServer * server);
+
+  void AddRule(bool allow, FirewallProtocol p = FP_ANY,
+               FirewallDirection d = FD_ANY,
+               const SocketAddress& addr = SocketAddress());
+  void ClearRules();
+
+ private:
+  CriticalSection crit_;
+  std::vector<FirewallSocketServer *> servers_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FIREWALLSOCKETSERVER_H_
diff --git a/rtc_base/flags.cc b/rtc_base/flags.cc
new file mode 100644
index 0000000..a2fb708
--- /dev/null
+++ b/rtc_base/flags.cc
@@ -0,0 +1,317 @@
+/*
+ *  Copyright 2006 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/flags.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/stringutils.h"
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#endif
+
+
+namespace {
+bool FlagEq(const char* arg, const char* flag) {
+  // Compare two flags for equality.
+  // 'arg' is the name of a flag passed via the command line and 'flag' is the
+  // name of a flag defined with the DEFINE_* macros.
+  // We compare the flags for equality, considering hyphens (-) and
+  // underscores (_) to be equivalent, so that --flag-name and --flag_name both
+  // match with --flag_name.
+  while (*arg != '\0' && (*arg == *flag || (*arg == '-' && *flag == '_'))) {
+    ++arg;
+    ++flag;
+  }
+  return *arg == '\0' && *flag == '\0';
+}
+}  // namespace
+
+namespace rtc {
+// -----------------------------------------------------------------------------
+// Implementation of Flag
+
+Flag::Flag(const char* file, const char* name, const char* comment,
+           Type type, void* variable, FlagValue default__)
+    : file_(file),
+      name_(name),
+      comment_(comment),
+      type_(type),
+      variable_(reinterpret_cast<FlagValue*>(variable)),
+      default_(default__) {
+  FlagList::Register(this);
+}
+
+
+void Flag::SetToDefault() {
+  // Note that we cannot simply do '*variable_ = default_;' since
+  // flag variables are not really of type FlagValue and thus may
+  // be smaller! The FlagValue union is simply 'overlayed' on top
+  // of a flag variable for convenient access. Since union members
+  // are guarantee to be aligned at the beginning, this works.
+  switch (type_) {
+    case Flag::BOOL:
+      variable_->b = default_.b;
+      return;
+    case Flag::INT:
+      variable_->i = default_.i;
+      return;
+    case Flag::FLOAT:
+      variable_->f = default_.f;
+      return;
+    case Flag::STRING:
+      variable_->s = default_.s;
+      return;
+  }
+  FATAL() << "unreachable code";
+}
+
+
+static const char* Type2String(Flag::Type type) {
+  switch (type) {
+    case Flag::BOOL: return "bool";
+    case Flag::INT: return "int";
+    case Flag::FLOAT: return "float";
+    case Flag::STRING: return "string";
+  }
+  FATAL() << "unreachable code";
+}
+
+
+static void PrintFlagValue(Flag::Type type, FlagValue* p) {
+  switch (type) {
+    case Flag::BOOL:
+      printf("%s", (p->b ? "true" : "false"));
+      return;
+    case Flag::INT:
+      printf("%d", p->i);
+      return;
+    case Flag::FLOAT:
+      printf("%f", p->f);
+      return;
+    case Flag::STRING:
+      printf("%s", p->s);
+      return;
+  }
+  FATAL() << "unreachable code";
+}
+
+
+void Flag::Print(bool print_current_value) {
+  printf("  --%s (%s)  type: %s  default: ", name_, comment_,
+          Type2String(type_));
+  PrintFlagValue(type_, &default_);
+  if (print_current_value) {
+    printf("  current value: ");
+    PrintFlagValue(type_, variable_);
+  }
+  printf("\n");
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of FlagList
+
+Flag* FlagList::list_ = nullptr;
+
+FlagList::FlagList() {
+  list_ = nullptr;
+}
+
+void FlagList::Print(const char* file, bool print_current_value) {
+  // Since flag registration is likely by file (= C++ file),
+  // we don't need to sort by file and still get grouped output.
+  const char* current = nullptr;
+  for (Flag* f = list_; f != nullptr; f = f->next()) {
+    if (file == nullptr || file == f->file()) {
+      if (current != f->file()) {
+        printf("Flags from %s:\n", f->file());
+        current = f->file();
+      }
+      f->Print(print_current_value);
+    }
+  }
+}
+
+
+Flag* FlagList::Lookup(const char* name) {
+  Flag* f = list_;
+  while (f != nullptr && !FlagEq(name, f->name()))
+    f = f->next();
+  return f;
+}
+
+
+void FlagList::SplitArgument(const char* arg,
+                             char* buffer, int buffer_size,
+                             const char** name, const char** value,
+                             bool* is_bool) {
+  *name = nullptr;
+  *value = nullptr;
+  *is_bool = false;
+
+  if (*arg == '-') {
+    // find the begin of the flag name
+    arg++;  // remove 1st '-'
+    if (*arg == '-')
+      arg++;  // remove 2nd '-'
+    if (arg[0] == 'n' && arg[1] == 'o' && Lookup(arg + 2)) {
+      arg += 2;  // remove "no"
+      *is_bool = true;
+    }
+    *name = arg;
+
+    // find the end of the flag name
+    while (*arg != '\0' && *arg != '=')
+      arg++;
+
+    // get the value if any
+    if (*arg == '=') {
+      // make a copy so we can NUL-terminate flag name
+      int n = static_cast<int>(arg - *name);
+      RTC_CHECK_LT(n, buffer_size);
+      memcpy(buffer, *name, n * sizeof(char));
+      buffer[n] = '\0';
+      *name = buffer;
+      // get the value
+      *value = arg + 1;
+    }
+  }
+}
+
+
+int FlagList::SetFlagsFromCommandLine(int* argc, const char** argv,
+                                      bool remove_flags) {
+  // parse arguments
+  for (int i = 1; i < *argc; /* see below */) {
+    int j = i;  // j > 0
+    const char* arg = argv[i++];
+
+    // split arg into flag components
+    char buffer[1024];
+    const char* name;
+    const char* value;
+    bool is_bool;
+    SplitArgument(arg, buffer, sizeof buffer, &name, &value, &is_bool);
+
+    if (name != nullptr) {
+      // lookup the flag
+      Flag* flag = Lookup(name);
+      if (flag == nullptr) {
+        fprintf(stderr, "Error: unrecognized flag %s\n", arg);
+        return j;
+      }
+
+      // if we still need a flag value, use the next argument if available
+      if (flag->type() != Flag::BOOL && value == nullptr) {
+        if (i < *argc) {
+          value = argv[i++];
+        } else {
+          fprintf(stderr, "Error: missing value for flag %s of type %s\n",
+            arg, Type2String(flag->type()));
+          return j;
+        }
+      }
+
+      // set the flag
+      char empty[] = { '\0' };
+      char* endp = empty;
+      switch (flag->type()) {
+        case Flag::BOOL:
+          *flag->bool_variable() = !is_bool;
+          break;
+        case Flag::INT:
+          *flag->int_variable() = strtol(value, &endp, 10);
+          break;
+        case Flag::FLOAT:
+          *flag->float_variable() = strtod(value, &endp);
+          break;
+        case Flag::STRING:
+          *flag->string_variable() = value;
+          break;
+      }
+
+      // handle errors
+      if ((flag->type() == Flag::BOOL && value != nullptr) ||
+          (flag->type() != Flag::BOOL && is_bool) || *endp != '\0') {
+        fprintf(stderr, "Error: illegal value for flag %s of type %s\n",
+          arg, Type2String(flag->type()));
+        return j;
+      }
+
+      // remove the flag & value from the command
+      if (remove_flags)
+        while (j < i)
+          argv[j++] = nullptr;
+    }
+  }
+
+  // shrink the argument list
+  if (remove_flags) {
+    int j = 1;
+    for (int i = 1; i < *argc; i++) {
+      if (argv[i] != nullptr)
+        argv[j++] = argv[i];
+    }
+    *argc = j;
+  }
+
+  // parsed all flags successfully
+  return 0;
+}
+
+void FlagList::Register(Flag* flag) {
+  RTC_DCHECK(flag);
+  RTC_DCHECK_GT(strlen(flag->name()), 0);
+  // NOTE: Don't call Lookup() within Register because it accesses the name_
+  // of other flags in list_, and if the flags are coming from two different
+  // compilation units, the initialization order between them is undefined, and
+  // this will trigger an asan initialization-order-fiasco error.
+  flag->next_ = list_;
+  list_ = flag;
+}
+
+#if defined(WEBRTC_WIN)
+WindowsCommandLineArguments::WindowsCommandLineArguments() {
+  // start by getting the command line.
+  LPTSTR command_line = ::GetCommandLine();
+   // now, convert it to a list of wide char strings.
+  LPWSTR *wide_argv = ::CommandLineToArgvW(command_line, &argc_);
+  // now allocate an array big enough to hold that many string pointers.
+  argv_ = new char*[argc_];
+
+  // iterate over the returned wide strings;
+  for(int i = 0; i < argc_; ++i) {
+    std::string s = rtc::ToUtf8(wide_argv[i], wcslen(wide_argv[i]));
+    char *buffer = new char[s.length() + 1];
+    rtc::strcpyn(buffer, s.length() + 1, s.c_str());
+
+    // make sure the argv array has the right string at this point.
+    argv_[i] = buffer;
+  }
+  LocalFree(wide_argv);
+}
+
+WindowsCommandLineArguments::~WindowsCommandLineArguments() {
+  // need to free each string in the array, and then the array.
+  for(int i = 0; i < argc_; i++) {
+    delete[] argv_[i];
+  }
+
+  delete[] argv_;
+}
+#endif  // WEBRTC_WIN
+
+}  // namespace rtc
diff --git a/rtc_base/flags.h b/rtc_base/flags.h
new file mode 100644
index 0000000..5a07b1a
--- /dev/null
+++ b/rtc_base/flags.h
@@ -0,0 +1,268 @@
+/*
+ *  Copyright 2006 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+// Originally comes from shared/commandlineflags/flags.h
+
+// Flags are defined and declared using DEFINE_xxx and DECLARE_xxx macros,
+// where xxx is the flag type. Flags are referred to via FLAG_yyy,
+// where yyy is the flag name. For intialization and iteration of flags,
+// see the FlagList class. For full programmatic access to any
+// flag, see the Flag class.
+//
+// The implementation only relies and basic C++ functionality
+// and needs no special library or STL support.
+
+#ifndef RTC_BASE_FLAGS_H_
+#define RTC_BASE_FLAGS_H_
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+// Internal use only.
+union FlagValue {
+  // Note: Because in C++ non-bool values are silently converted into
+  // bool values ('bool b = "false";' results in b == true!), we pass
+  // and int argument to New_BOOL as this appears to be safer - sigh.
+  // In particular, it prevents the (not uncommon!) bug where a bool
+  // flag is defined via: DEFINE_bool(flag, "false", "some comment");.
+  static FlagValue New_BOOL(int b) {
+    FlagValue v;
+    v.b = (b != 0);
+    return v;
+  }
+
+  static FlagValue New_INT(int i) {
+    FlagValue v;
+    v.i = i;
+    return v;
+  }
+
+  static FlagValue New_FLOAT(float f) {
+    FlagValue v;
+    v.f = f;
+    return v;
+  }
+
+  static FlagValue New_STRING(const char* s) {
+    FlagValue v;
+    v.s = s;
+    return v;
+  }
+
+  bool b;
+  int i;
+  double f;
+  const char* s;
+};
+
+
+// Each flag can be accessed programmatically via a Flag object.
+class Flag {
+ public:
+  enum Type { BOOL, INT, FLOAT, STRING };
+
+  // Internal use only.
+  Flag(const char* file, const char* name, const char* comment,
+       Type type, void* variable, FlagValue default_);
+
+  // General flag information
+  const char* file() const  { return file_; }
+  const char* name() const  { return name_; }
+  const char* comment() const  { return comment_; }
+
+  // Flag type
+  Type type() const  { return type_; }
+
+  // Flag variables
+  bool* bool_variable() const {
+    RTC_DCHECK_EQ(BOOL, type_);
+    return &variable_->b;
+  }
+
+  int* int_variable() const {
+    RTC_DCHECK_EQ(INT, type_);
+    return &variable_->i;
+  }
+
+  double* float_variable() const {
+    RTC_DCHECK_EQ(FLOAT, type_);
+    return &variable_->f;
+  }
+
+  const char** string_variable() const {
+    RTC_DCHECK_EQ(STRING, type_);
+    return &variable_->s;
+  }
+
+  // Default values
+  bool bool_default() const {
+    RTC_DCHECK_EQ(BOOL, type_);
+    return default_.b;
+  }
+
+  int int_default() const {
+    RTC_DCHECK_EQ(INT, type_);
+    return default_.i;
+  }
+
+  double float_default() const {
+    RTC_DCHECK_EQ(FLOAT, type_);
+    return default_.f;
+  }
+
+  const char* string_default() const {
+    RTC_DCHECK_EQ(STRING, type_);
+    return default_.s;
+  }
+
+  // Resets a flag to its default value
+  void SetToDefault();
+
+  // Iteration support
+  Flag* next() const  { return next_; }
+
+  // Prints flag information. The current flag value is only printed
+  // if print_current_value is set.
+  void Print(bool print_current_value);
+
+ private:
+  const char* file_;
+  const char* name_;
+  const char* comment_;
+
+  Type type_;
+  FlagValue* variable_;
+  FlagValue default_;
+
+  Flag* next_;
+
+  friend class FlagList;  // accesses next_
+};
+
+
+// Internal use only.
+#define DEFINE_FLAG(type, c_type, name, default, comment) \
+  /* define and initialize the flag */                    \
+  c_type FLAG_##name = (default);                         \
+  /* register the flag */                                 \
+  static rtc::Flag Flag_##name(__FILE__, #name, (comment),      \
+                               rtc::Flag::type, &FLAG_##name,   \
+                               rtc::FlagValue::New_##type(default))
+
+
+// Internal use only.
+#define DECLARE_FLAG(c_type, name)              \
+  /* declare the external flag */               \
+  extern c_type FLAG_##name
+
+
+// Use the following macros to define a new flag:
+#define DEFINE_bool(name, default, comment) \
+  DEFINE_FLAG(BOOL, bool, name, default, comment)
+#define DEFINE_int(name, default, comment) \
+  DEFINE_FLAG(INT, int, name, default, comment)
+#define DEFINE_float(name, default, comment) \
+  DEFINE_FLAG(FLOAT, double, name, default, comment)
+#define DEFINE_string(name, default, comment) \
+  DEFINE_FLAG(STRING, const char*, name, default, comment)
+
+
+// Use the following macros to declare a flag defined elsewhere:
+#define DECLARE_bool(name)  DECLARE_FLAG(bool, name)
+#define DECLARE_int(name)  DECLARE_FLAG(int, name)
+#define DECLARE_float(name)  DECLARE_FLAG(double, name)
+#define DECLARE_string(name)  DECLARE_FLAG(const char*, name)
+
+
+// The global list of all flags.
+class FlagList {
+ public:
+  FlagList();
+
+  // The null-terminated list of all flags. Traverse with Flag::next().
+  static Flag* list()  { return list_; }
+
+  // If file != nullptr, prints information for all flags defined in file;
+  // otherwise prints information for all flags in all files. The current flag
+  // value is only printed if print_current_value is set.
+  static void Print(const char* file, bool print_current_value);
+
+  // Lookup a flag by name. Returns the matching flag or null.
+  static Flag* Lookup(const char* name);
+
+  // Helper function to parse flags: Takes an argument arg and splits it into
+  // a flag name and flag value (or null if they are missing). is_bool is set
+  // if the arg started with "-no" or "--no". The buffer may be used to NUL-
+  // terminate the name, it must be large enough to hold any possible name.
+  static void SplitArgument(const char* arg,
+                            char* buffer, int buffer_size,
+                            const char** name, const char** value,
+                            bool* is_bool);
+
+  // Set the flag values by parsing the command line. If remove_flags
+  // is set, the flags and associated values are removed from (argc,
+  // argv). Returns 0 if no error occurred. Otherwise, returns the
+  // argv index > 0 for the argument where an error occurred. In that
+  // case, (argc, argv) will remain unchanged indepdendent of the
+  // remove_flags value, and no assumptions about flag settings should
+  // be made.
+  //
+  // The following syntax for flags is accepted (both '-' and '--' are ok):
+  //
+  //   --flag        (bool flags only)
+  //   --noflag      (bool flags only)
+  //   --flag=value  (non-bool flags only, no spaces around '=')
+  //   --flag value  (non-bool flags only)
+  static int SetFlagsFromCommandLine(int* argc,
+                                     const char** argv,
+                                     bool remove_flags);
+  static inline int SetFlagsFromCommandLine(int* argc,
+                                            char** argv,
+                                            bool remove_flags) {
+    return SetFlagsFromCommandLine(argc, const_cast<const char**>(argv),
+                                   remove_flags);
+  }
+
+  // Registers a new flag. Called during program initialization. Not
+  // thread-safe.
+  static void Register(Flag* flag);
+
+ private:
+  static Flag* list_;
+};
+
+#if defined(WEBRTC_WIN)
+// A helper class to translate Windows command line arguments into UTF8,
+// which then allows us to just pass them to the flags system.
+// This encapsulates all the work of getting the command line and translating
+// it to an array of 8-bit strings; all you have to do is create one of these,
+// and then call argc() and argv().
+class WindowsCommandLineArguments {
+ public:
+  WindowsCommandLineArguments();
+  ~WindowsCommandLineArguments();
+
+  int argc() { return argc_; }
+  char **argv() { return argv_; }
+ private:
+  int argc_;
+  char **argv_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(WindowsCommandLineArguments);
+};
+#endif  // WEBRTC_WIN
+
+}  // namespace rtc
+
+#endif  // SHARED_COMMANDLINEFLAGS_FLAGS_H_
diff --git a/rtc_base/format_macros.h b/rtc_base/format_macros.h
new file mode 100644
index 0000000..d252a94
--- /dev/null
+++ b/rtc_base/format_macros.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FORMAT_MACROS_H_
+#define RTC_BASE_FORMAT_MACROS_H_
+
+// This file defines the format macros for some integer types and is derived
+// from Chromium's base/format_macros.h.
+
+// To print a 64-bit value in a portable way:
+//   int64_t value;
+//   printf("xyz:%" PRId64, value);
+// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
+//
+// To print a size_t value in a portable way:
+//   size_t size;
+//   printf("xyz: %" PRIuS, size);
+// The "u" in the macro corresponds to %u, and S is for "size".
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+#if defined(WEBRTC_POSIX)
+
+#if (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && !defined(PRId64)
+#error "inttypes.h has already been included before this header file, but "
+#error "without __STDC_FORMAT_MACROS defined."
+#endif
+
+#if !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+
+#if !defined(PRIuS)
+#define PRIuS "zu"
+#endif
+
+// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
+// architectures and Apple does not provides standard format macros and
+// recommends casting. This has many drawbacks, so instead define macros
+// for formatting those types.
+#if defined(WEBRTC_MAC)
+#if defined(WEBRTC_ARCH_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "ld"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "lu"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "lx"
+#endif
+#else  // defined(WEBRTC_ARCH_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "d"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "u"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "x"
+#endif
+#endif
+#endif  // defined(WEBRTC_MAC)
+
+#else  // WEBRTC_WIN
+
+#include <inttypes.h>
+
+#if !defined(PRId64)
+#define PRId64 "I64d"
+#endif
+
+#if !defined(PRIu64)
+#define PRIu64 "I64u"
+#endif
+
+#if !defined(PRIx64)
+#define PRIx64 "I64x"
+#endif
+
+#if !defined(PRIuS)
+#define PRIuS "Iu"
+#endif
+
+#endif
+
+#endif  // RTC_BASE_FORMAT_MACROS_H_
diff --git a/rtc_base/function_view.h b/rtc_base/function_view.h
new file mode 100644
index 0000000..91ab88e
--- /dev/null
+++ b/rtc_base/function_view.h
@@ -0,0 +1,130 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_FUNCTION_VIEW_H_
+#define RTC_BASE_FUNCTION_VIEW_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "rtc_base/checks.h"
+
+// Just like std::function, FunctionView will wrap any callable and hide its
+// actual type, exposing only its signature. But unlike std::function,
+// FunctionView doesn't own its callable---it just points to it. Thus, it's a
+// good choice mainly as a function argument when the callable argument will
+// not be called again once the function has returned.
+//
+// Its constructors are implicit, so that callers won't have to convert lambdas
+// and other callables to FunctionView<Blah(Blah, Blah)> explicitly. This is
+// safe because FunctionView is only a reference to the real callable.
+//
+// Example use:
+//
+//   void SomeFunction(rtc::FunctionView<int(int)> index_transform);
+//   ...
+//   SomeFunction([](int i) { return 2 * i + 1; });
+//
+// Note: FunctionView is tiny (essentially just two pointers) and trivially
+// copyable, so it's probably cheaper to pass it by value than by const
+// reference.
+
+namespace rtc {
+
+template <typename T>
+class FunctionView;  // Undefined.
+
+template <typename RetT, typename... ArgT>
+class FunctionView<RetT(ArgT...)> final {
+ public:
+  // Constructor for lambdas and other callables; it accepts every type of
+  // argument except those noted in its enable_if call.
+  template <
+      typename F,
+      typename std::enable_if<
+          // Not for function pointers; we have another constructor for that
+          // below.
+          !std::is_function<typename std::remove_pointer<
+              typename std::remove_reference<F>::type>::type>::value &&
+
+          // Not for nullptr; we have another constructor for that below.
+          !std::is_same<std::nullptr_t,
+                        typename std::remove_cv<F>::type>::value &&
+
+          // Not for FunctionView objects; we have another constructor for that
+          // (the implicitly declared copy constructor).
+          !std::is_same<FunctionView,
+                        typename std::remove_cv<typename std::remove_reference<
+                            F>::type>::type>::value>::type* = nullptr>
+  FunctionView(F&& f)
+      : call_(CallVoidPtr<typename std::remove_reference<F>::type>) {
+    f_.void_ptr = &f;
+  }
+
+  // Constructor that accepts function pointers. If the argument is null, the
+  // result is an empty FunctionView.
+  template <
+      typename F,
+      typename std::enable_if<std::is_function<typename std::remove_pointer<
+          typename std::remove_reference<F>::type>::type>::value>::type* =
+          nullptr>
+  FunctionView(F&& f)
+      : call_(f ? CallFunPtr<typename std::remove_pointer<F>::type> : nullptr) {
+    f_.fun_ptr = reinterpret_cast<void (*)()>(f);
+  }
+
+  // Constructor that accepts nullptr. It creates an empty FunctionView.
+  template <typename F,
+            typename std::enable_if<std::is_same<
+                std::nullptr_t,
+                typename std::remove_cv<F>::type>::value>::type* = nullptr>
+  FunctionView(F&& f) : call_(nullptr) {}
+
+  // Default constructor. Creates an empty FunctionView.
+  FunctionView() : call_(nullptr) {}
+
+  RetT operator()(ArgT... args) const {
+    RTC_DCHECK(call_);
+    return call_(f_, std::forward<ArgT>(args)...);
+  }
+
+  // Returns true if we have a function, false if we don't (i.e., we're null).
+  explicit operator bool() const { return !!call_; }
+
+ private:
+  union VoidUnion {
+    void* void_ptr;
+    void (*fun_ptr)();
+  };
+
+  template <typename F>
+  static RetT CallVoidPtr(VoidUnion vu, ArgT... args) {
+    return (*static_cast<F*>(vu.void_ptr))(std::forward<ArgT>(args)...);
+  }
+  template <typename F>
+  static RetT CallFunPtr(VoidUnion vu, ArgT... args) {
+    return (reinterpret_cast<typename std::add_pointer<F>::type>(vu.fun_ptr))(
+        std::forward<ArgT>(args)...);
+  }
+
+  // A pointer to the callable thing, with type information erased. It's a
+  // union because we have to use separate types depending on if the callable
+  // thing is a function pointer or something else.
+  VoidUnion f_;
+
+  // Pointer to a dispatch function that knows the type of the callable thing
+  // that's stored in f_, and how to call it. A FunctionView object is empty
+  // (null) iff call_ is null.
+  RetT (*call_)(VoidUnion, ArgT...);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_FUNCTION_VIEW_H_
diff --git a/rtc_base/function_view_unittest.cc b/rtc_base/function_view_unittest.cc
new file mode 100644
index 0000000..98f78d6
--- /dev/null
+++ b/rtc_base/function_view_unittest.cc
@@ -0,0 +1,175 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/function_view.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+namespace {
+
+int CallWith33(rtc::FunctionView<int(int)> fv) {
+  return fv ? fv(33) : -1;
+}
+
+int Add33(int x) {
+  return x + 33;
+}
+
+}  // namespace
+
+// Test the main use case of FunctionView: implicitly converting a callable
+// argument.
+TEST(FunctionViewTest, ImplicitConversion) {
+  EXPECT_EQ(38, CallWith33([](int x) { return x + 5; }));
+  EXPECT_EQ(66, CallWith33(Add33));
+  EXPECT_EQ(-1, CallWith33(nullptr));
+}
+
+TEST(FunctionViewTest, IntIntLambdaWithoutState) {
+  auto f = [](int x) { return x + 1; };
+  EXPECT_EQ(18, f(17));
+  rtc::FunctionView<int(int)> fv(f);
+  EXPECT_TRUE(fv);
+  EXPECT_EQ(18, fv(17));
+}
+
+TEST(FunctionViewTest, IntVoidLambdaWithState) {
+  int x = 13;
+  auto f = [x]() mutable { return ++x; };
+  rtc::FunctionView<int()> fv(f);
+  EXPECT_TRUE(fv);
+  EXPECT_EQ(14, f());
+  EXPECT_EQ(15, fv());
+  EXPECT_EQ(16, f());
+  EXPECT_EQ(17, fv());
+}
+
+TEST(FunctionViewTest, IntIntFunction) {
+  rtc::FunctionView<int(int)> fv(Add33);
+  EXPECT_TRUE(fv);
+  EXPECT_EQ(50, fv(17));
+}
+
+TEST(FunctionViewTest, IntIntFunctionPointer) {
+  rtc::FunctionView<int(int)> fv(&Add33);
+  EXPECT_TRUE(fv);
+  EXPECT_EQ(50, fv(17));
+}
+
+TEST(FunctionViewTest, Null) {
+  // These two call constructors that statically construct null FunctionViews.
+  EXPECT_FALSE(rtc::FunctionView<int()>());
+  EXPECT_FALSE(rtc::FunctionView<int()>(nullptr));
+
+  // This calls the constructor for function pointers.
+  EXPECT_FALSE(rtc::FunctionView<int()>(reinterpret_cast<int(*)()>(0)));
+}
+
+// Ensure that FunctionView handles move-only arguments and return values.
+TEST(FunctionViewTest, UniquePtrPassthrough) {
+  auto f = [](std::unique_ptr<int> x) { return x; };
+  rtc::FunctionView<std::unique_ptr<int>(std::unique_ptr<int>)> fv(f);
+  std::unique_ptr<int> x(new int);
+  int* x_addr = x.get();
+  auto y = fv(std::move(x));
+  EXPECT_EQ(x_addr, y.get());
+}
+
+TEST(FunctionViewTest, CopyConstructor) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  rtc::FunctionView<int()> fv2(fv1);
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+TEST(FunctionViewTest, MoveConstructorIsCopy) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  rtc::FunctionView<int()> fv2(std::move(fv1));
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+TEST(FunctionViewTest, CopyAssignment) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  auto f23 = [] { return 23; };
+  rtc::FunctionView<int()> fv2(f23);
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(23, fv2());
+  fv2 = fv1;
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+TEST(FunctionViewTest, MoveAssignmentIsCopy) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  auto f23 = [] { return 23; };
+  rtc::FunctionView<int()> fv2(f23);
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(23, fv2());
+  fv2 = std::move(fv1);
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+TEST(FunctionViewTest, Swap) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  auto f23 = [] { return 23; };
+  rtc::FunctionView<int()> fv2(f23);
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(23, fv2());
+  using std::swap;
+  swap(fv1, fv2);
+  EXPECT_EQ(23, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+// Ensure that when you copy-construct a FunctionView, the new object points to
+// the same function as the old one (as opposed to the new object pointing to
+// the old one).
+TEST(FunctionViewTest, CopyConstructorChaining) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  rtc::FunctionView<int()> fv2(fv1);
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(17, fv2());
+  auto f23 = [] { return 23; };
+  fv1 = f23;
+  EXPECT_EQ(23, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+// Ensure that when you assign one FunctionView to another, we actually make a
+// copy (as opposed to making the second FunctionView point to the first one).
+TEST(FunctionViewTest, CopyAssignmentChaining) {
+  auto f17 = [] { return 17; };
+  rtc::FunctionView<int()> fv1(f17);
+  rtc::FunctionView<int()> fv2;
+  EXPECT_TRUE(fv1);
+  EXPECT_EQ(17, fv1());
+  EXPECT_FALSE(fv2);
+  fv2 = fv1;
+  EXPECT_EQ(17, fv1());
+  EXPECT_EQ(17, fv2());
+  auto f23 = [] { return 23; };
+  fv1 = f23;
+  EXPECT_EQ(23, fv1());
+  EXPECT_EQ(17, fv2());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/gtest_prod_util.h b/rtc_base/gtest_prod_util.h
new file mode 100644
index 0000000..dc9679f
--- /dev/null
+++ b/rtc_base/gtest_prod_util.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_GTEST_PROD_UTIL_H_
+#define RTC_BASE_GTEST_PROD_UTIL_H_
+
+// Define our own version of FRIEND_TEST here rather than including
+// gtest_prod.h to avoid depending on any part of GTest in production code.
+#define FRIEND_TEST_WEBRTC(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+// This file is a plain copy of Chromium's base/gtest_prod_util.h.
+//
+// This is a wrapper for gtest's FRIEND_TEST macro that friends
+// test with all possible prefixes. This is very helpful when changing the test
+// prefix, because the friend declarations don't need to be updated.
+//
+// Example usage:
+//
+// class MyClass {
+//  private:
+//   void MyMethod();
+//   FRIEND_TEST_ALL_PREFIXES(MyClassTest, MyMethod);
+// };
+#define FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
+  FRIEND_TEST_WEBRTC(test_case_name, test_name); \
+  FRIEND_TEST_WEBRTC(test_case_name, DISABLED_##test_name); \
+  FRIEND_TEST_WEBRTC(test_case_name, FLAKY_##test_name); \
+  FRIEND_TEST_WEBRTC(test_case_name, FAILS_##test_name)
+
+#endif  // RTC_BASE_GTEST_PROD_UTIL_H_
diff --git a/rtc_base/gunit.cc b/rtc_base/gunit.cc
new file mode 100644
index 0000000..0dd8f12
--- /dev/null
+++ b/rtc_base/gunit.cc
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/gunit.h"
+
+#include <string>
+
+#include "rtc_base/stringutils.h"
+
+::testing::AssertionResult AssertStartsWith(const char* str_expr,
+                                            const char* prefix_expr,
+                                            const std::string& str,
+                                            const std::string& prefix) {
+  if (rtc::starts_with(str.c_str(), prefix.c_str())) {
+    return ::testing::AssertionSuccess();
+  } else {
+    return ::testing::AssertionFailure()
+           << str_expr << "\nwhich is\n\"" << str << "\"\ndoes not start with\n"
+           << prefix_expr << "\nwhich is\n\"" << prefix << "\"";
+  }
+}
+
+::testing::AssertionResult AssertStringContains(const char* str_expr,
+                                                const char* substr_expr,
+                                                const std::string& str,
+                                                const std::string& substr) {
+  if (str.find(substr) != std::string::npos) {
+    return ::testing::AssertionSuccess();
+  } else {
+    return ::testing::AssertionFailure()
+           << str_expr << "\nwhich is\n\"" << str << "\"\ndoes not contain\n"
+           << substr_expr << "\nwhich is\n\"" << substr << "\"";
+  }
+}
diff --git a/rtc_base/gunit.h b/rtc_base/gunit.h
new file mode 100644
index 0000000..1d3019b
--- /dev/null
+++ b/rtc_base/gunit.h
@@ -0,0 +1,172 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_GUNIT_H_
+#define RTC_BASE_GUNIT_H_
+
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+#if defined(GTEST_RELATIVE_PATH)
+#include "test/gtest.h"
+#else
+#include "testing/base/public/gunit.h"
+#endif
+
+// Wait until "ex" is true, or "timeout" expires.
+#define WAIT(ex, timeout)                                       \
+  for (int64_t start = rtc::SystemTimeMillis();                 \
+       !(ex) && rtc::SystemTimeMillis() < start + (timeout);) { \
+    rtc::Thread::Current()->ProcessMessages(0);                 \
+    rtc::Thread::Current()->SleepMs(1);                         \
+  }
+
+// This returns the result of the test in res, so that we don't re-evaluate
+// the expression in the XXXX_WAIT macros below, since that causes problems
+// when the expression is only true the first time you check it.
+#define WAIT_(ex, timeout, res)                                   \
+  do {                                                            \
+    int64_t start = rtc::SystemTimeMillis();                      \
+    res = (ex);                                                   \
+    while (!res && rtc::SystemTimeMillis() < start + (timeout)) { \
+      rtc::Thread::Current()->ProcessMessages(0);                 \
+      rtc::Thread::Current()->SleepMs(1);                         \
+      res = (ex);                                                 \
+    }                                                             \
+  } while (0)
+
+// The typical EXPECT_XXXX and ASSERT_XXXXs, but done until true or a timeout.
+// One can add failure message by appending "<< msg".
+#define EXPECT_TRUE_WAIT(ex, timeout)                   \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                         \
+  if (bool res = true) {                                \
+    WAIT_(ex, timeout, res);                            \
+    if (!res)                                           \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__); \
+  } else                                                \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : EXPECT_TRUE(ex)
+
+#define EXPECT_EQ_WAIT(v1, v2, timeout)                 \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                         \
+  if (bool res = true) {                                \
+    WAIT_(v1 == v2, timeout, res);                      \
+    if (!res)                                           \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__); \
+  } else                                                \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : EXPECT_EQ(v1, v2)
+
+#define ASSERT_TRUE_WAIT(ex, timeout)                   \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                         \
+  if (bool res = true) {                                \
+    WAIT_(ex, timeout, res);                            \
+    if (!res)                                           \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__); \
+  } else                                                \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : ASSERT_TRUE(ex)
+
+#define ASSERT_EQ_WAIT(v1, v2, timeout)                 \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                         \
+  if (bool res = true) {                                \
+    WAIT_(v1 == v2, timeout, res);                      \
+    if (!res)                                           \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__); \
+  } else                                                \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : ASSERT_EQ(v1, v2)
+
+// Version with a "soft" timeout and a margin. This logs if the timeout is
+// exceeded, but it only fails if the expression still isn't true after the
+// margin time passes.
+#define EXPECT_TRUE_WAIT_MARGIN(ex, timeout, margin)                           \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                                                \
+  if (bool res = true) {                                                       \
+    WAIT_(ex, timeout, res);                                                   \
+    if (res)                                                                   \
+      break;                                                                   \
+    RTC_LOG(LS_WARNING) << "Expression " << #ex << " still not true after "    \
+                        << (timeout) << "ms; waiting an additional " << margin \
+                        << "ms";                                               \
+    WAIT_(ex, margin, res);                                                    \
+    if (!res)                                                                  \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__);                        \
+  } else                                                                       \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : EXPECT_TRUE(ex)
+
+// Wait until "ex" is true, or "timeout" expires, using fake clock where
+// messages are processed every millisecond.
+// TODO(pthatcher): Allow tests to control how many milliseconds to advance.
+#define SIMULATED_WAIT(ex, timeout, clock)                    \
+  for (int64_t start = rtc::TimeMillis();                     \
+       !(ex) && rtc::TimeMillis() < start + (timeout);) {     \
+    (clock).AdvanceTime(rtc::TimeDelta::FromMilliseconds(1)); \
+  }
+
+// This returns the result of the test in res, so that we don't re-evaluate
+// the expression in the XXXX_WAIT macros below, since that causes problems
+// when the expression is only true the first time you check it.
+#define SIMULATED_WAIT_(ex, timeout, res, clock)                \
+  do {                                                          \
+    int64_t start = rtc::TimeMillis();                          \
+    res = (ex);                                                 \
+    while (!res && rtc::TimeMillis() < start + (timeout)) {     \
+      (clock).AdvanceTime(rtc::TimeDelta::FromMilliseconds(1)); \
+      res = (ex);                                               \
+    }                                                           \
+  } while (0)
+
+// The typical EXPECT_XXXX, but done until true or a timeout with a fake clock.
+#define EXPECT_TRUE_SIMULATED_WAIT(ex, timeout, clock) \
+  do {                                                 \
+    bool res;                                          \
+    SIMULATED_WAIT_(ex, timeout, res, clock);          \
+    if (!res) {                                        \
+      EXPECT_TRUE(ex);                                 \
+    }                                                  \
+  } while (0)
+
+#define EXPECT_EQ_SIMULATED_WAIT(v1, v2, timeout, clock) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                          \
+  if (bool res = true) {                                 \
+    SIMULATED_WAIT_(v1 == v2, timeout, res, clock);      \
+    if (!res)                                            \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__);  \
+  } else                                                 \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : EXPECT_EQ(v1, v2)
+
+#define ASSERT_TRUE_SIMULATED_WAIT(ex, timeout, clock)  \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                         \
+  if (bool res = true) {                                \
+    SIMULATED_WAIT_(ex, timeout, res, clock);           \
+    if (!res)                                           \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__); \
+  } else                                                \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : ASSERT_TRUE(ex)
+
+#define ASSERT_EQ_SIMULATED_WAIT(v1, v2, timeout, clock) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_                          \
+  if (bool res = true) {                                 \
+    SIMULATED_WAIT_(v1 == v2, timeout, res, clock);      \
+    if (!res)                                            \
+      goto GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__);  \
+  } else                                                 \
+    GTEST_CONCAT_TOKEN_(gunit_label_, __LINE__) : ASSERT_EQ(v1, v2)
+
+// Usage: EXPECT_PRED_FORMAT2(AssertStartsWith, str, "prefix");
+testing::AssertionResult AssertStartsWith(const char* str_expr,
+                                          const char* prefix_expr,
+                                          const std::string& str,
+                                          const std::string& prefix);
+
+// Usage: EXPECT_PRED_FORMAT2(AssertStringContains, str, "substring");
+testing::AssertionResult AssertStringContains(const char* str_expr,
+                                              const char* substr_expr,
+                                              const std::string& str,
+                                              const std::string& substr);
+
+#endif  // RTC_BASE_GUNIT_H_
diff --git a/rtc_base/gunit_prod.h b/rtc_base/gunit_prod.h
new file mode 100644
index 0000000..ae4157d
--- /dev/null
+++ b/rtc_base/gunit_prod.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_GUNIT_PROD_H_
+#define RTC_BASE_GUNIT_PROD_H_
+
+#if defined(WEBRTC_ANDROID)
+// Android doesn't use gtest at all, so anything that relies on gtest should
+// check this define first.
+#define NO_GTEST
+#elif defined (GTEST_RELATIVE_PATH)
+#include "gtest/gtest_prod.h"
+#else
+#include "testing/base/gunit_prod.h"
+#endif
+
+#endif  // RTC_BASE_GUNIT_PROD_H_
diff --git a/rtc_base/helpers.cc b/rtc_base/helpers.cc
new file mode 100644
index 0000000..9cb9268
--- /dev/null
+++ b/rtc_base/helpers.cc
@@ -0,0 +1,222 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/helpers.h"
+
+#include <limits>
+#include <memory>
+
+#include <openssl/rand.h>
+
+#include "rtc_base/base64.h"
+#include "rtc_base/basictypes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timeutils.h"
+
+// Protect against max macro inclusion.
+#undef max
+
+namespace rtc {
+
+// Base class for RNG implementations.
+class RandomGenerator {
+ public:
+  virtual ~RandomGenerator() {}
+  virtual bool Init(const void* seed, size_t len) = 0;
+  virtual bool Generate(void* buf, size_t len) = 0;
+};
+
+// The OpenSSL RNG.
+class SecureRandomGenerator : public RandomGenerator {
+ public:
+  SecureRandomGenerator() {}
+  ~SecureRandomGenerator() override {}
+  bool Init(const void* seed, size_t len) override { return true; }
+  bool Generate(void* buf, size_t len) override {
+    return (RAND_bytes(reinterpret_cast<unsigned char*>(buf), len) > 0);
+  }
+};
+
+// A test random generator, for predictable output.
+class TestRandomGenerator : public RandomGenerator {
+ public:
+  TestRandomGenerator() : seed_(7) {
+  }
+  ~TestRandomGenerator() override {
+  }
+  bool Init(const void* seed, size_t len) override { return true; }
+  bool Generate(void* buf, size_t len) override {
+    for (size_t i = 0; i < len; ++i) {
+      static_cast<uint8_t*>(buf)[i] = static_cast<uint8_t>(GetRandom());
+    }
+    return true;
+  }
+
+ private:
+  int GetRandom() {
+    return ((seed_ = seed_ * 214013L + 2531011L) >> 16) & 0x7fff;
+  }
+  int seed_;
+};
+
+namespace {
+
+// TODO: Use Base64::Base64Table instead.
+static const char kBase64[64] = {
+    'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+    'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
+    'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+    'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+    '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'};
+
+static const char kHex[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
+                              '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+
+static const char kUuidDigit17[4] = {'8', '9', 'a', 'b'};
+
+// This round about way of creating a global RNG is to safe-guard against
+// indeterminant static initialization order.
+std::unique_ptr<RandomGenerator>& GetGlobalRng() {
+  RTC_DEFINE_STATIC_LOCAL(std::unique_ptr<RandomGenerator>, global_rng,
+                          (new SecureRandomGenerator()));
+  return global_rng;
+}
+
+RandomGenerator& Rng() {
+  return *GetGlobalRng();
+}
+
+}  // namespace
+
+void SetRandomTestMode(bool test) {
+  if (!test) {
+    GetGlobalRng().reset(new SecureRandomGenerator());
+  } else {
+    GetGlobalRng().reset(new TestRandomGenerator());
+  }
+}
+
+bool InitRandom(int seed) {
+  return InitRandom(reinterpret_cast<const char*>(&seed), sizeof(seed));
+}
+
+bool InitRandom(const char* seed, size_t len) {
+  if (!Rng().Init(seed, len)) {
+    RTC_LOG(LS_ERROR) << "Failed to init random generator!";
+    return false;
+  }
+  return true;
+}
+
+std::string CreateRandomString(size_t len) {
+  std::string str;
+  RTC_CHECK(CreateRandomString(len, &str));
+  return str;
+}
+
+static bool CreateRandomString(size_t len,
+                        const char* table, int table_size,
+                        std::string* str) {
+  str->clear();
+  // Avoid biased modulo division below.
+  if (256 % table_size) {
+    RTC_LOG(LS_ERROR) << "Table size must divide 256 evenly!";
+    return false;
+  }
+  std::unique_ptr<uint8_t[]> bytes(new uint8_t[len]);
+  if (!Rng().Generate(bytes.get(), len)) {
+    RTC_LOG(LS_ERROR) << "Failed to generate random string!";
+    return false;
+  }
+  str->reserve(len);
+  for (size_t i = 0; i < len; ++i) {
+    str->push_back(table[bytes[i] % table_size]);
+  }
+  return true;
+}
+
+bool CreateRandomString(size_t len, std::string* str) {
+  return CreateRandomString(len, kBase64, 64, str);
+}
+
+bool CreateRandomString(size_t len, const std::string& table,
+                        std::string* str) {
+  return CreateRandomString(len, table.c_str(),
+                            static_cast<int>(table.size()), str);
+}
+
+bool CreateRandomData(size_t length, std::string* data) {
+  data->resize(length);
+  // std::string is guaranteed to use contiguous memory in c++11 so we can
+  // safely write directly to it.
+  return Rng().Generate(&data->at(0), length);
+}
+
+// Version 4 UUID is of the form:
+// xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx
+// Where 'x' is a hex digit, and 'y' is 8, 9, a or b.
+std::string CreateRandomUuid() {
+  std::string str;
+  std::unique_ptr<uint8_t[]> bytes(new uint8_t[31]);
+  RTC_CHECK(Rng().Generate(bytes.get(), 31));
+  str.reserve(36);
+  for (size_t i = 0; i < 8; ++i) {
+    str.push_back(kHex[bytes[i] % 16]);
+  }
+  str.push_back('-');
+  for (size_t i = 8; i < 12; ++i) {
+    str.push_back(kHex[bytes[i] % 16]);
+  }
+  str.push_back('-');
+  str.push_back('4');
+  for (size_t i = 12; i < 15; ++i) {
+    str.push_back(kHex[bytes[i] % 16]);
+  }
+  str.push_back('-');
+  str.push_back(kUuidDigit17[bytes[15] % 4]);
+  for (size_t i = 16; i < 19; ++i) {
+    str.push_back(kHex[bytes[i] % 16]);
+  }
+  str.push_back('-');
+  for (size_t i = 19; i < 31; ++i) {
+    str.push_back(kHex[bytes[i] % 16]);
+  }
+  return str;
+}
+
+uint32_t CreateRandomId() {
+  uint32_t id;
+  RTC_CHECK(Rng().Generate(&id, sizeof(id)));
+  return id;
+}
+
+uint64_t CreateRandomId64() {
+  return static_cast<uint64_t>(CreateRandomId()) << 32 | CreateRandomId();
+}
+
+uint32_t CreateRandomNonZeroId() {
+  uint32_t id;
+  do {
+    id = CreateRandomId();
+  } while (id == 0);
+  return id;
+}
+
+double CreateRandomDouble() {
+  return CreateRandomId() / (std::numeric_limits<uint32_t>::max() +
+                             std::numeric_limits<double>::epsilon());
+}
+
+double GetNextMovingAverage(double prev_average, double cur, double ratio) {
+  return (ratio * prev_average + cur) / (ratio + 1);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/helpers.h b/rtc_base/helpers.h
new file mode 100644
index 0000000..0100794
--- /dev/null
+++ b/rtc_base/helpers.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_HELPERS_H_
+#define RTC_BASE_HELPERS_H_
+
+#include <string>
+#include "rtc_base/basictypes.h"
+
+namespace rtc {
+
+// For testing, we can return predictable data.
+void SetRandomTestMode(bool test);
+
+// Initializes the RNG, and seeds it with the specified entropy.
+bool InitRandom(int seed);
+bool InitRandom(const char* seed, size_t len);
+
+// Generates a (cryptographically) random string of the given length.
+// We generate base64 values so that they will be printable.
+std::string CreateRandomString(size_t length);
+
+// Generates a (cryptographically) random string of the given length.
+// We generate base64 values so that they will be printable.
+// Return false if the random number generator failed.
+bool CreateRandomString(size_t length, std::string* str);
+
+// Generates a (cryptographically) random string of the given length,
+// with characters from the given table. Return false if the random
+// number generator failed.
+// For ease of implementation, the function requires that the table
+// size evenly divide 256; otherwise, it returns false.
+bool CreateRandomString(size_t length, const std::string& table,
+                        std::string* str);
+
+// Generates (cryptographically) random data of the given length.
+// Return false if the random number generator failed.
+bool CreateRandomData(size_t length, std::string* data);
+
+// Generates a (cryptographically) random UUID version 4 string.
+std::string CreateRandomUuid();
+
+// Generates a random id.
+uint32_t CreateRandomId();
+
+// Generates a 64 bit random id.
+uint64_t CreateRandomId64();
+
+// Generates a random id > 0.
+uint32_t CreateRandomNonZeroId();
+
+// Generates a random double between 0.0 (inclusive) and 1.0 (exclusive).
+double CreateRandomDouble();
+
+// Compute moving average with the given ratio between the previous average
+// value and the current value.
+double GetNextMovingAverage(double prev_average, double cur, double ratio);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_HELPERS_H_
diff --git a/rtc_base/helpers_unittest.cc b/rtc_base/helpers_unittest.cc
new file mode 100644
index 0000000..9fa16a5
--- /dev/null
+++ b/rtc_base/helpers_unittest.cc
@@ -0,0 +1,111 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "rtc_base/buffer.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/helpers.h"
+#include "rtc_base/ssladapter.h"
+
+namespace rtc {
+
+class RandomTest : public testing::Test {};
+
+TEST_F(RandomTest, TestCreateRandomId) {
+  CreateRandomId();
+}
+
+TEST_F(RandomTest, TestCreateRandomDouble) {
+  for (int i = 0; i < 100; ++i) {
+    double r = CreateRandomDouble();
+    EXPECT_GE(r, 0.0);
+    EXPECT_LT(r, 1.0);
+  }
+}
+
+TEST_F(RandomTest, TestCreateNonZeroRandomId) {
+  EXPECT_NE(0U, CreateRandomNonZeroId());
+}
+
+TEST_F(RandomTest, TestCreateRandomString) {
+  std::string random = CreateRandomString(256);
+  EXPECT_EQ(256U, random.size());
+  std::string random2;
+  EXPECT_TRUE(CreateRandomString(256, &random2));
+  EXPECT_NE(random, random2);
+  EXPECT_EQ(256U, random2.size());
+}
+
+TEST_F(RandomTest, TestCreateRandomData) {
+  static size_t kRandomDataLength = 32;
+  std::string random1;
+  std::string random2;
+  EXPECT_TRUE(CreateRandomData(kRandomDataLength, &random1));
+  EXPECT_EQ(kRandomDataLength, random1.size());
+  EXPECT_TRUE(CreateRandomData(kRandomDataLength, &random2));
+  EXPECT_EQ(kRandomDataLength, random2.size());
+  EXPECT_NE(0, memcmp(random1.data(), random2.data(), kRandomDataLength));
+}
+
+TEST_F(RandomTest, TestCreateRandomStringEvenlyDivideTable) {
+  static std::string kUnbiasedTable("01234567");
+  std::string random;
+  EXPECT_TRUE(CreateRandomString(256, kUnbiasedTable, &random));
+  EXPECT_EQ(256U, random.size());
+
+  static std::string kBiasedTable("0123456789");
+  EXPECT_FALSE(CreateRandomString(256, kBiasedTable, &random));
+  EXPECT_EQ(0U, random.size());
+}
+
+TEST_F(RandomTest, TestCreateRandomUuid) {
+  std::string random = CreateRandomUuid();
+  EXPECT_EQ(36U, random.size());
+}
+
+TEST_F(RandomTest, TestCreateRandomForTest) {
+  // Make sure we get the output we expect.
+  SetRandomTestMode(true);
+  EXPECT_EQ(2154761789U, CreateRandomId());
+  EXPECT_EQ("h0ISP4S5SJKH/9EY", CreateRandomString(16));
+  EXPECT_EQ("41706e92-cdd3-46d9-a22d-8ff1737ffb11", CreateRandomUuid());
+  static size_t kRandomDataLength = 32;
+  std::string random;
+  EXPECT_TRUE(CreateRandomData(kRandomDataLength, &random));
+  EXPECT_EQ(kRandomDataLength, random.size());
+  Buffer expected("\xbd\x52\x2a\x4b\x97\x93\x2f\x1c"
+      "\xc4\x72\xab\xa2\x88\x68\x3e\xcc"
+      "\xa3\x8d\xaf\x13\x3b\xbc\x83\xbb"
+      "\x16\xf1\xcf\x56\x0c\xf5\x4a\x8b", kRandomDataLength);
+  EXPECT_EQ(0, memcmp(expected.data(), random.data(), kRandomDataLength));
+
+  // Reset and make sure we get the same output.
+  SetRandomTestMode(true);
+  EXPECT_EQ(2154761789U, CreateRandomId());
+  EXPECT_EQ("h0ISP4S5SJKH/9EY", CreateRandomString(16));
+  EXPECT_EQ("41706e92-cdd3-46d9-a22d-8ff1737ffb11", CreateRandomUuid());
+  EXPECT_TRUE(CreateRandomData(kRandomDataLength, &random));
+  EXPECT_EQ(kRandomDataLength, random.size());
+  EXPECT_EQ(0, memcmp(expected.data(), random.data(), kRandomDataLength));
+
+  // Test different character sets.
+  SetRandomTestMode(true);
+  std::string str;
+  EXPECT_TRUE(CreateRandomString(16, "a", &str));
+  EXPECT_EQ("aaaaaaaaaaaaaaaa", str);
+  EXPECT_TRUE(CreateRandomString(16, "abcd", &str));
+  EXPECT_EQ("dbaaabdaccbcabbd", str);
+
+  // Turn off test mode for other tests.
+  SetRandomTestMode(false);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/httpbase.cc b/rtc_base/httpbase.cc
new file mode 100644
index 0000000..ca85b57
--- /dev/null
+++ b/rtc_base/httpbase.cc
@@ -0,0 +1,888 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#else  // !WEBRTC_WIN
+#define SEC_E_CERT_EXPIRED (-2146893016)
+#endif  // !WEBRTC_WIN
+
+#include "rtc_base/checks.h"
+#include "rtc_base/httpbase.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/system/fallthrough.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+//////////////////////////////////////////////////////////////////////
+// Helpers
+//////////////////////////////////////////////////////////////////////
+
+bool MatchHeader(const char* str, size_t len, HttpHeader header) {
+  const char* const header_str = ToString(header);
+  const size_t header_len = strlen(header_str);
+  return (len == header_len) && (_strnicmp(str, header_str, header_len) == 0);
+}
+
+enum {
+  MSG_READ
+};
+
+//////////////////////////////////////////////////////////////////////
+// HttpParser
+//////////////////////////////////////////////////////////////////////
+
+HttpParser::HttpParser() {
+  reset();
+}
+
+HttpParser::~HttpParser() {
+}
+
+void
+HttpParser::reset() {
+  state_ = ST_LEADER;
+  chunked_ = false;
+  data_size_ = SIZE_UNKNOWN;
+}
+
+HttpParser::ProcessResult
+HttpParser::Process(const char* buffer, size_t len, size_t* processed,
+                    HttpError* error) {
+  *processed = 0;
+  *error = HE_NONE;
+
+  if (state_ >= ST_COMPLETE) {
+    RTC_NOTREACHED();
+    return PR_COMPLETE;
+  }
+
+  while (true) {
+    if (state_ < ST_DATA) {
+      size_t pos = *processed;
+      while ((pos < len) && (buffer[pos] != '\n')) {
+        pos += 1;
+      }
+      if (pos >= len) {
+        break;  // don't have a full header
+      }
+      const char* line = buffer + *processed;
+      size_t len = (pos - *processed);
+      *processed = pos + 1;
+      while ((len > 0) && isspace(static_cast<unsigned char>(line[len-1]))) {
+        len -= 1;
+      }
+      ProcessResult result = ProcessLine(line, len, error);
+      RTC_LOG(LS_VERBOSE) << "Processed line, result=" << result;
+
+      if (PR_CONTINUE != result) {
+        return result;
+      }
+    } else if (data_size_ == 0) {
+      if (chunked_) {
+        state_ = ST_CHUNKTERM;
+      } else {
+        return PR_COMPLETE;
+      }
+    } else {
+      size_t available = len - *processed;
+      if (available <= 0) {
+        break; // no more data
+      }
+      if ((data_size_ != SIZE_UNKNOWN) && (available > data_size_)) {
+        available = data_size_;
+      }
+      size_t read = 0;
+      ProcessResult result = ProcessData(buffer + *processed, available, read,
+                                         error);
+      RTC_LOG(LS_VERBOSE) << "Processed data, result: " << result
+                          << " read: " << read << " err: " << error;
+
+      if (PR_CONTINUE != result) {
+        return result;
+      }
+      *processed += read;
+      if (data_size_ != SIZE_UNKNOWN) {
+        data_size_ -= read;
+      }
+    }
+  }
+
+  return PR_CONTINUE;
+}
+
+HttpParser::ProcessResult
+HttpParser::ProcessLine(const char* line, size_t len, HttpError* error) {
+  RTC_LOG_F(LS_VERBOSE) << " state: " << state_
+                        << " line: " << std::string(line, len)
+                        << " len: " << len << " err: " << error;
+
+  switch (state_) {
+  case ST_LEADER:
+    state_ = ST_HEADERS;
+    return ProcessLeader(line, len, error);
+
+  case ST_HEADERS:
+    if (len > 0) {
+      const char* value = strchrn(line, len, ':');
+      if (!value) {
+        *error = HE_PROTOCOL;
+        return PR_COMPLETE;
+      }
+      size_t nlen = (value - line);
+      const char* eol = line + len;
+      do {
+        value += 1;
+      } while ((value < eol) && isspace(static_cast<unsigned char>(*value)));
+      size_t vlen = eol - value;
+      if (MatchHeader(line, nlen, HH_CONTENT_LENGTH)) {
+        // sscanf isn't safe with strings that aren't null-terminated, and there
+        // is no guarantee that |value| is.
+        // Create a local copy that is null-terminated.
+        std::string value_str(value, vlen);
+        unsigned int temp_size;
+        if (sscanf(value_str.c_str(), "%u", &temp_size) != 1) {
+          *error = HE_PROTOCOL;
+          return PR_COMPLETE;
+        }
+        data_size_ = static_cast<size_t>(temp_size);
+      } else if (MatchHeader(line, nlen, HH_TRANSFER_ENCODING)) {
+        if ((vlen == 7) && (_strnicmp(value, "chunked", 7) == 0)) {
+          chunked_ = true;
+        } else if ((vlen == 8) && (_strnicmp(value, "identity", 8) == 0)) {
+          chunked_ = false;
+        } else {
+          *error = HE_PROTOCOL;
+          return PR_COMPLETE;
+        }
+      }
+      return ProcessHeader(line, nlen, value, vlen, error);
+    } else {
+      state_ = chunked_ ? ST_CHUNKSIZE : ST_DATA;
+      return ProcessHeaderComplete(chunked_, data_size_, error);
+    }
+    break;
+
+  case ST_CHUNKSIZE:
+    if (len > 0) {
+      char* ptr = nullptr;
+      data_size_ = strtoul(line, &ptr, 16);
+      if (ptr != line + len) {
+        *error = HE_PROTOCOL;
+        return PR_COMPLETE;
+      }
+      state_ = (data_size_ == 0) ? ST_TRAILERS : ST_DATA;
+    } else {
+      *error = HE_PROTOCOL;
+      return PR_COMPLETE;
+    }
+    break;
+
+  case ST_CHUNKTERM:
+    if (len > 0) {
+      *error = HE_PROTOCOL;
+      return PR_COMPLETE;
+    } else {
+      state_ = chunked_ ? ST_CHUNKSIZE : ST_DATA;
+    }
+    break;
+
+  case ST_TRAILERS:
+    if (len == 0) {
+      return PR_COMPLETE;
+    }
+    // *error = onHttpRecvTrailer();
+    break;
+
+  default:
+    RTC_NOTREACHED();
+    break;
+  }
+
+  return PR_CONTINUE;
+}
+
+bool
+HttpParser::is_valid_end_of_input() const {
+  return (state_ == ST_DATA) && (data_size_ == SIZE_UNKNOWN);
+}
+
+void
+HttpParser::complete(HttpError error) {
+  if (state_ < ST_COMPLETE) {
+    state_ = ST_COMPLETE;
+    OnComplete(error);
+  }
+}
+
+//////////////////////////////////////////////////////////////////////
+// HttpBase::DocumentStream
+//////////////////////////////////////////////////////////////////////
+
+class BlockingMemoryStream : public ExternalMemoryStream {
+public:
+  BlockingMemoryStream(char* buffer, size_t size)
+  : ExternalMemoryStream(buffer, size) { }
+
+  StreamResult DoReserve(size_t size, int* error) override {
+    return (buffer_length_ >= size) ? SR_SUCCESS : SR_BLOCK;
+  }
+};
+
+class HttpBase::DocumentStream : public StreamInterface {
+public:
+  DocumentStream(HttpBase* base) : base_(base), error_(HE_DEFAULT) { }
+
+  StreamState GetState() const override {
+    if (nullptr == base_)
+      return SS_CLOSED;
+    if (HM_RECV == base_->mode_)
+      return SS_OPEN;
+    return SS_OPENING;
+  }
+
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override {
+    if (!base_) {
+      if (error) *error = error_;
+      return (HE_NONE == error_) ? SR_EOS : SR_ERROR;
+    }
+
+    if (HM_RECV != base_->mode_) {
+      return SR_BLOCK;
+    }
+
+    // DoReceiveLoop writes http document data to the StreamInterface* document
+    // member of HttpData.  In this case, we want this data to be written
+    // directly to our buffer.  To accomplish this, we wrap our buffer with a
+    // StreamInterface, and replace the existing document with our wrapper.
+    // When the method returns, we restore the old document.  Ideally, we would
+    // pass our StreamInterface* to DoReceiveLoop, but due to the callbacks
+    // of HttpParser, we would still need to store the pointer temporarily.
+    std::unique_ptr<StreamInterface> stream(
+        new BlockingMemoryStream(reinterpret_cast<char*>(buffer), buffer_len));
+
+    // Replace the existing document with our wrapped buffer.
+    base_->data_->document.swap(stream);
+
+    // Pump the I/O loop.  DoReceiveLoop is guaranteed not to attempt to
+    // complete the I/O process, which means that our wrapper is not in danger
+    // of being deleted.  To ensure this, DoReceiveLoop returns true when it
+    // wants complete to be called.  We make sure to uninstall our wrapper
+    // before calling complete().
+    HttpError http_error;
+    bool complete = base_->DoReceiveLoop(&http_error);
+
+    // Reinstall the original output document.
+    base_->data_->document.swap(stream);
+
+    // If we reach the end of the receive stream, we disconnect our stream
+    // adapter from the HttpBase, and further calls to read will either return
+    // EOS or ERROR, appropriately.  Finally, we call complete().
+    StreamResult result = SR_BLOCK;
+    if (complete) {
+      HttpBase* base = Disconnect(http_error);
+      if (error) *error = error_;
+      result = (HE_NONE == error_) ? SR_EOS : SR_ERROR;
+      base->complete(http_error);
+    }
+
+    // Even if we are complete, if some data was read we must return SUCCESS.
+    // Future Reads will return EOS or ERROR based on the error_ variable.
+    size_t position;
+    stream->GetPosition(&position);
+    if (position > 0) {
+      if (read) *read = position;
+      result = SR_SUCCESS;
+    }
+    return result;
+  }
+
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override {
+    if (error) *error = -1;
+    return SR_ERROR;
+  }
+
+  void Close() override {
+    if (base_) {
+      HttpBase* base = Disconnect(HE_NONE);
+      if (HM_RECV == base->mode_ && base->http_stream_) {
+        // Read I/O could have been stalled on the user of this DocumentStream,
+        // so restart the I/O process now that we've removed ourselves.
+        base->http_stream_->PostEvent(SE_READ, 0);
+      }
+    }
+  }
+
+  bool GetAvailable(size_t* size) const override {
+    if (!base_ || HM_RECV != base_->mode_)
+      return false;
+    size_t data_size = base_->GetDataRemaining();
+    if (SIZE_UNKNOWN == data_size)
+      return false;
+    if (size)
+      *size = data_size;
+    return true;
+  }
+
+  HttpBase* Disconnect(HttpError error) {
+    RTC_DCHECK(nullptr != base_);
+    RTC_DCHECK(nullptr != base_->doc_stream_);
+    HttpBase* base = base_;
+    base_->doc_stream_ = nullptr;
+    base_ = nullptr;
+    error_ = error;
+    return base;
+  }
+
+private:
+  HttpBase* base_;
+  HttpError error_;
+};
+
+//////////////////////////////////////////////////////////////////////
+// HttpBase
+//////////////////////////////////////////////////////////////////////
+
+HttpBase::HttpBase()
+    : mode_(HM_NONE),
+      data_(nullptr),
+      notify_(nullptr),
+      http_stream_(nullptr),
+      doc_stream_(nullptr) {}
+
+HttpBase::~HttpBase() {
+  RTC_DCHECK(HM_NONE == mode_);
+}
+
+bool
+HttpBase::isConnected() const {
+  return (http_stream_ != nullptr) && (http_stream_->GetState() == SS_OPEN);
+}
+
+bool
+HttpBase::attach(StreamInterface* stream) {
+  if ((mode_ != HM_NONE) || (http_stream_ != nullptr) || (stream == nullptr)) {
+    RTC_NOTREACHED();
+    return false;
+  }
+  http_stream_ = stream;
+  http_stream_->SignalEvent.connect(this, &HttpBase::OnHttpStreamEvent);
+  mode_ = (http_stream_->GetState() == SS_OPENING) ? HM_CONNECT : HM_NONE;
+  return true;
+}
+
+StreamInterface*
+HttpBase::detach() {
+  RTC_DCHECK(HM_NONE == mode_);
+  if (mode_ != HM_NONE) {
+    return nullptr;
+  }
+  StreamInterface* stream = http_stream_;
+  http_stream_ = nullptr;
+  if (stream) {
+    stream->SignalEvent.disconnect(this);
+  }
+  return stream;
+}
+
+void
+HttpBase::send(HttpData* data) {
+  RTC_DCHECK(HM_NONE == mode_);
+  if (mode_ != HM_NONE) {
+    return;
+  } else if (!isConnected()) {
+    OnHttpStreamEvent(http_stream_, SE_CLOSE, HE_DISCONNECTED);
+    return;
+  }
+
+  mode_ = HM_SEND;
+  data_ = data;
+  len_ = 0;
+  ignore_data_ = chunk_data_ = false;
+
+  if (data_->document) {
+    data_->document->SignalEvent.connect(this, &HttpBase::OnDocumentEvent);
+  }
+
+  std::string encoding;
+  if (data_->hasHeader(HH_TRANSFER_ENCODING, &encoding)
+      && (encoding == "chunked")) {
+    chunk_data_ = true;
+  }
+
+  len_ = data_->formatLeader(buffer_, sizeof(buffer_));
+  len_ += strcpyn(buffer_ + len_, sizeof(buffer_) - len_, "\r\n");
+
+  header_ = data_->begin();
+  if (header_ == data_->end()) {
+    // We must call this at least once, in the case where there are no headers.
+    queue_headers();
+  }
+
+  flush_data();
+}
+
+void
+HttpBase::recv(HttpData* data) {
+  RTC_DCHECK(HM_NONE == mode_);
+  if (mode_ != HM_NONE) {
+    return;
+  } else if (!isConnected()) {
+    OnHttpStreamEvent(http_stream_, SE_CLOSE, HE_DISCONNECTED);
+    return;
+  }
+
+  mode_ = HM_RECV;
+  data_ = data;
+  len_ = 0;
+  ignore_data_ = chunk_data_ = false;
+
+  reset();
+  if (doc_stream_) {
+    doc_stream_->SignalEvent(doc_stream_, SE_OPEN | SE_READ, 0);
+  } else {
+    read_and_process_data();
+  }
+}
+
+void
+HttpBase::abort(HttpError err) {
+  if (mode_ != HM_NONE) {
+    if (http_stream_ != nullptr) {
+      http_stream_->Close();
+    }
+    do_complete(err);
+  }
+}
+
+StreamInterface* HttpBase::GetDocumentStream() {
+  if (doc_stream_)
+    return nullptr;
+  doc_stream_ = new DocumentStream(this);
+  return doc_stream_;
+}
+
+HttpError HttpBase::HandleStreamClose(int error) {
+  if (http_stream_ != nullptr) {
+    http_stream_->Close();
+  }
+  if (error == 0) {
+    if ((mode_ == HM_RECV) && is_valid_end_of_input()) {
+      return HE_NONE;
+    } else {
+      return HE_DISCONNECTED;
+    }
+  } else if (error == SOCKET_EACCES) {
+    return HE_AUTH;
+  } else if (error == SEC_E_CERT_EXPIRED) {
+    return HE_CERTIFICATE_EXPIRED;
+  }
+  RTC_LOG_F(LS_ERROR) << "(" << error << ")";
+  return (HM_CONNECT == mode_) ? HE_CONNECT_FAILED : HE_SOCKET_ERROR;
+}
+
+bool HttpBase::DoReceiveLoop(HttpError* error) {
+  RTC_DCHECK(HM_RECV == mode_);
+  RTC_DCHECK(nullptr != error);
+
+  // Do to the latency between receiving read notifications from
+  // pseudotcpchannel, we rely on repeated calls to read in order to acheive
+  // ideal throughput.  The number of reads is limited to prevent starving
+  // the caller.
+
+  size_t loop_count = 0;
+  const size_t kMaxReadCount = 20;
+  bool process_requires_more_data = false;
+  do {
+    // The most frequent use of this function is response to new data available
+    // on http_stream_.  Therefore, we optimize by attempting to read from the
+    // network first (as opposed to processing existing data first).
+
+    if (len_ < sizeof(buffer_)) {
+      // Attempt to buffer more data.
+      size_t read;
+      int read_error;
+      StreamResult read_result = http_stream_->Read(buffer_ + len_,
+                                                    sizeof(buffer_) - len_,
+                                                    &read, &read_error);
+      switch (read_result) {
+      case SR_SUCCESS:
+        RTC_DCHECK(len_ + read <= sizeof(buffer_));
+        len_ += read;
+        break;
+      case SR_BLOCK:
+        if (process_requires_more_data) {
+          // We're can't make progress until more data is available.
+          return false;
+        }
+        // Attempt to process the data already in our buffer.
+        break;
+      case SR_EOS:
+        // Clean close, with no error.
+        read_error = 0;
+        RTC_FALLTHROUGH();  // Fall through to HandleStreamClose.
+      case SR_ERROR:
+        *error = HandleStreamClose(read_error);
+        return true;
+      }
+    } else if (process_requires_more_data) {
+      // We have too much unprocessed data in our buffer.  This should only
+      // occur when a single HTTP header is longer than the buffer size (32K).
+      // Anything longer than that is almost certainly an error.
+      *error = HE_OVERFLOW;
+      return true;
+    }
+
+    // Process data in our buffer.  Process is not guaranteed to process all
+    // the buffered data.  In particular, it will wait until a complete
+    // protocol element (such as http header, or chunk size) is available,
+    // before processing it in its entirety.  Also, it is valid and sometimes
+    // necessary to call Process with an empty buffer, since the state machine
+    // may have interrupted state transitions to complete.
+    size_t processed;
+    ProcessResult process_result = Process(buffer_, len_, &processed,
+                                            error);
+    RTC_DCHECK(processed <= len_);
+    len_ -= processed;
+    memmove(buffer_, buffer_ + processed, len_);
+    switch (process_result) {
+    case PR_CONTINUE:
+      // We need more data to make progress.
+      process_requires_more_data = true;
+      break;
+    case PR_BLOCK:
+      // We're stalled on writing the processed data.
+      return false;
+    case PR_COMPLETE:
+      // *error already contains the correct code.
+      return true;
+    }
+  } while (++loop_count <= kMaxReadCount);
+
+  RTC_LOG_F(LS_WARNING) << "danger of starvation";
+  return false;
+}
+
+void
+HttpBase::read_and_process_data() {
+  HttpError error;
+  if (DoReceiveLoop(&error)) {
+    complete(error);
+  }
+}
+
+void
+HttpBase::flush_data() {
+  RTC_DCHECK(HM_SEND == mode_);
+
+  // When send_required is true, no more buffering can occur without a network
+  // write.
+  bool send_required = (len_ >= sizeof(buffer_));
+
+  while (true) {
+    RTC_DCHECK(len_ <= sizeof(buffer_));
+
+    // HTTP is inherently sensitive to round trip latency, since a frequent use
+    // case is for small requests and responses to be sent back and forth, and
+    // the lack of pipelining forces a single request to take a minimum of the
+    // round trip time.  As a result, it is to our benefit to pack as much data
+    // into each packet as possible.  Thus, we defer network writes until we've
+    // buffered as much data as possible.
+
+    if (!send_required && (header_ != data_->end())) {
+      // First, attempt to queue more header data.
+      send_required = queue_headers();
+    }
+
+    if (!send_required && data_->document) {
+      // Next, attempt to queue document data.
+
+      const size_t kChunkDigits = 8;
+      size_t offset, reserve;
+      if (chunk_data_) {
+        // Reserve characters at the start for X-byte hex value and \r\n
+        offset = len_ + kChunkDigits + 2;
+        // ... and 2 characters at the end for \r\n
+        reserve = offset + 2;
+      } else {
+        offset = len_;
+        reserve = offset;
+      }
+
+      if (reserve >= sizeof(buffer_)) {
+        send_required = true;
+      } else {
+        size_t read;
+        int error;
+        StreamResult result = data_->document->Read(buffer_ + offset,
+                                                    sizeof(buffer_) - reserve,
+                                                    &read, &error);
+        if (result == SR_SUCCESS) {
+          RTC_DCHECK(reserve + read <= sizeof(buffer_));
+          if (chunk_data_) {
+            // Prepend the chunk length in hex.
+            // Note: sprintfn appends a null terminator, which is why we can't
+            // combine it with the line terminator.
+            sprintfn(buffer_ + len_, kChunkDigits + 1, "%.*x",
+                     kChunkDigits, read);
+            // Add line terminator to the chunk length.
+            memcpy(buffer_ + len_ + kChunkDigits, "\r\n", 2);
+            // Add line terminator to the end of the chunk.
+            memcpy(buffer_ + offset + read, "\r\n", 2);
+          }
+          len_ = reserve + read;
+        } else if (result == SR_BLOCK) {
+          // Nothing to do but flush data to the network.
+          send_required = true;
+        } else if (result == SR_EOS) {
+          if (chunk_data_) {
+            // Append the empty chunk and empty trailers, then turn off
+            // chunking.
+            RTC_DCHECK(len_ + 5 <= sizeof(buffer_));
+            memcpy(buffer_ + len_, "0\r\n\r\n", 5);
+            len_ += 5;
+            chunk_data_ = false;
+          } else if (0 == len_) {
+            // No more data to read, and no more data to write.
+            do_complete();
+            return;
+          }
+          // Although we are done reading data, there is still data which needs
+          // to be flushed to the network.
+          send_required = true;
+        } else {
+          RTC_LOG_F(LS_ERROR) << "Read error: " << error;
+          do_complete(HE_STREAM);
+          return;
+        }
+      }
+    }
+
+    if (0 == len_) {
+      // No data currently available to send.
+      if (!data_->document) {
+        // If there is no source document, that means we're done.
+        do_complete();
+      }
+      return;
+    }
+
+    size_t written;
+    int error;
+    StreamResult result = http_stream_->Write(buffer_, len_, &written, &error);
+    if (result == SR_SUCCESS) {
+      RTC_DCHECK(written <= len_);
+      len_ -= written;
+      memmove(buffer_, buffer_ + written, len_);
+      send_required = false;
+    } else if (result == SR_BLOCK) {
+      if (send_required) {
+        // Nothing more we can do until network is writeable.
+        return;
+      }
+    } else {
+      RTC_DCHECK(result == SR_ERROR);
+      RTC_LOG_F(LS_ERROR) << "error";
+      OnHttpStreamEvent(http_stream_, SE_CLOSE, error);
+      return;
+    }
+  }
+
+  RTC_NOTREACHED();
+}
+
+bool
+HttpBase::queue_headers() {
+  RTC_DCHECK(HM_SEND == mode_);
+  while (header_ != data_->end()) {
+    size_t len = sprintfn(buffer_ + len_, sizeof(buffer_) - len_,
+                          "%.*s: %.*s\r\n",
+                          header_->first.size(), header_->first.data(),
+                          header_->second.size(), header_->second.data());
+    if (len_ + len < sizeof(buffer_) - 3) {
+      len_ += len;
+      ++header_;
+    } else if (len_ == 0) {
+      RTC_LOG(WARNING) << "discarding header that is too long: "
+                       << header_->first;
+      ++header_;
+    } else {
+      // Not enough room for the next header, write to network first.
+      return true;
+    }
+  }
+  // End of headers
+  len_ += strcpyn(buffer_ + len_, sizeof(buffer_) - len_, "\r\n");
+  return false;
+}
+
+void
+HttpBase::do_complete(HttpError err) {
+  RTC_DCHECK(mode_ != HM_NONE);
+  HttpMode mode = mode_;
+  mode_ = HM_NONE;
+  if (data_ && data_->document) {
+    data_->document->SignalEvent.disconnect(this);
+  }
+  data_ = nullptr;
+  if ((HM_RECV == mode) && doc_stream_) {
+    RTC_DCHECK(HE_NONE !=
+               err);  // We should have Disconnected doc_stream_ already.
+    DocumentStream* ds = doc_stream_;
+    ds->Disconnect(err);
+    ds->SignalEvent(ds, SE_CLOSE, err);
+  }
+  if (notify_) {
+    notify_->onHttpComplete(mode, err);
+  }
+}
+
+//
+// Stream Signals
+//
+
+void
+HttpBase::OnHttpStreamEvent(StreamInterface* stream, int events, int error) {
+  RTC_DCHECK(stream == http_stream_);
+  if ((events & SE_OPEN) && (mode_ == HM_CONNECT)) {
+    do_complete();
+    return;
+  }
+
+  if ((events & SE_WRITE) && (mode_ == HM_SEND)) {
+    flush_data();
+    return;
+  }
+
+  if ((events & SE_READ) && (mode_ == HM_RECV)) {
+    if (doc_stream_) {
+      doc_stream_->SignalEvent(doc_stream_, SE_READ, 0);
+    } else {
+      read_and_process_data();
+    }
+    return;
+  }
+
+  if ((events & SE_CLOSE) == 0)
+    return;
+
+  HttpError http_error = HandleStreamClose(error);
+  if (mode_ == HM_RECV) {
+    complete(http_error);
+  } else if (mode_ != HM_NONE) {
+    do_complete(http_error);
+  } else if (notify_) {
+    notify_->onHttpClosed(http_error);
+  }
+}
+
+void
+HttpBase::OnDocumentEvent(StreamInterface* stream, int events, int error) {
+  RTC_DCHECK(stream == data_->document.get());
+  if ((events & SE_WRITE) && (mode_ == HM_RECV)) {
+    read_and_process_data();
+    return;
+  }
+
+  if ((events & SE_READ) && (mode_ == HM_SEND)) {
+    flush_data();
+    return;
+  }
+
+  if (events & SE_CLOSE) {
+    RTC_LOG_F(LS_ERROR) << "Read error: " << error;
+    do_complete(HE_STREAM);
+    return;
+  }
+}
+
+//
+// HttpParser Implementation
+//
+
+HttpParser::ProcessResult
+HttpBase::ProcessLeader(const char* line, size_t len, HttpError* error) {
+  *error = data_->parseLeader(line, len);
+  return (HE_NONE == *error) ? PR_CONTINUE : PR_COMPLETE;
+}
+
+HttpParser::ProcessResult
+HttpBase::ProcessHeader(const char* name, size_t nlen, const char* value,
+                        size_t vlen, HttpError* error) {
+  std::string sname(name, nlen), svalue(value, vlen);
+  data_->addHeader(sname, svalue);
+  return PR_CONTINUE;
+}
+
+HttpParser::ProcessResult
+HttpBase::ProcessHeaderComplete(bool chunked, size_t& data_size,
+                                HttpError* error) {
+  StreamInterface* old_docstream = doc_stream_;
+  if (notify_) {
+    *error = notify_->onHttpHeaderComplete(chunked, data_size);
+    // The request must not be aborted as a result of this callback.
+    RTC_DCHECK(nullptr != data_);
+  }
+  if ((HE_NONE == *error) && data_->document) {
+    data_->document->SignalEvent.connect(this, &HttpBase::OnDocumentEvent);
+  }
+  if (HE_NONE != *error) {
+    return PR_COMPLETE;
+  }
+  if (old_docstream != doc_stream_) {
+    // Break out of Process loop, since our I/O model just changed.
+    return PR_BLOCK;
+  }
+  return PR_CONTINUE;
+}
+
+HttpParser::ProcessResult
+HttpBase::ProcessData(const char* data, size_t len, size_t& read,
+                      HttpError* error) {
+  if (ignore_data_ || !data_->document) {
+    read = len;
+    return PR_CONTINUE;
+  }
+  int write_error = 0;
+  switch (data_->document->Write(data, len, &read, &write_error)) {
+  case SR_SUCCESS:
+    return PR_CONTINUE;
+  case SR_BLOCK:
+    return PR_BLOCK;
+  case SR_EOS:
+    RTC_LOG_F(LS_ERROR) << "Unexpected EOS";
+    *error = HE_STREAM;
+    return PR_COMPLETE;
+  case SR_ERROR:
+  default:
+    RTC_LOG_F(LS_ERROR) << "Write error: " << write_error;
+    *error = HE_STREAM;
+    return PR_COMPLETE;
+  }
+}
+
+void
+HttpBase::OnComplete(HttpError err) {
+  RTC_LOG_F(LS_VERBOSE);
+  do_complete(err);
+}
+
+} // namespace rtc
diff --git a/rtc_base/httpbase.h b/rtc_base/httpbase.h
new file mode 100644
index 0000000..5ca0134
--- /dev/null
+++ b/rtc_base/httpbase.h
@@ -0,0 +1,187 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef RTC_BASE_HTTPBASE_H_
+#define RTC_BASE_HTTPBASE_H_
+
+#include "rtc_base/httpcommon.h"
+
+namespace rtc {
+
+class StreamInterface;
+
+///////////////////////////////////////////////////////////////////////////////
+// HttpParser - Parses an HTTP stream provided via Process and end_of_input, and
+// generates events for:
+//  Structural Elements: Leader, Headers, Document Data
+//  Events: End of Headers, End of Document, Errors
+///////////////////////////////////////////////////////////////////////////////
+
+class HttpParser {
+public:
+  enum ProcessResult { PR_CONTINUE, PR_BLOCK, PR_COMPLETE };
+  HttpParser();
+  virtual ~HttpParser();
+
+  void reset();
+  ProcessResult Process(const char* buffer, size_t len, size_t* processed,
+                        HttpError* error);
+  bool is_valid_end_of_input() const;
+  void complete(HttpError err);
+
+  size_t GetDataRemaining() const { return data_size_; }
+
+protected:
+  ProcessResult ProcessLine(const char* line, size_t len, HttpError* error);
+
+  // HttpParser Interface
+  virtual ProcessResult ProcessLeader(const char* line, size_t len,
+                                      HttpError* error) = 0;
+  virtual ProcessResult ProcessHeader(const char* name, size_t nlen,
+                                      const char* value, size_t vlen,
+                                      HttpError* error) = 0;
+  virtual ProcessResult ProcessHeaderComplete(bool chunked, size_t& data_size,
+                                              HttpError* error) = 0;
+  virtual ProcessResult ProcessData(const char* data, size_t len, size_t& read,
+                                    HttpError* error) = 0;
+  virtual void OnComplete(HttpError err) = 0;
+
+private:
+  enum State {
+    ST_LEADER, ST_HEADERS,
+    ST_CHUNKSIZE, ST_CHUNKTERM, ST_TRAILERS,
+    ST_DATA, ST_COMPLETE
+  } state_;
+  bool chunked_;
+  size_t data_size_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// IHttpNotify
+///////////////////////////////////////////////////////////////////////////////
+
+enum HttpMode { HM_NONE, HM_CONNECT, HM_RECV, HM_SEND };
+
+class IHttpNotify {
+public:
+  virtual ~IHttpNotify() {}
+  virtual HttpError onHttpHeaderComplete(bool chunked, size_t& data_size) = 0;
+  virtual void onHttpComplete(HttpMode mode, HttpError err) = 0;
+  virtual void onHttpClosed(HttpError err) = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// HttpBase - Provides a state machine for implementing HTTP-based components.
+// Attach HttpBase to a StreamInterface which represents a bidirectional HTTP
+// stream, and then call send() or recv() to initiate sending or receiving one
+// side of an HTTP transaction.  By default, HttpBase operates as an I/O pump,
+// moving data from the HTTP stream to the HttpData object and vice versa.
+// However, it can also operate in stream mode, in which case the user of the
+// stream interface drives I/O via calls to Read().
+///////////////////////////////////////////////////////////////////////////////
+
+class HttpBase
+: private HttpParser,
+  public sigslot::has_slots<>
+{
+public:
+  HttpBase();
+  ~HttpBase() override;
+
+  void notify(IHttpNotify* notify) { notify_ = notify; }
+  bool attach(StreamInterface* stream);
+  StreamInterface* stream() { return http_stream_; }
+  StreamInterface* detach();
+  bool isConnected() const;
+
+  void send(HttpData* data);
+  void recv(HttpData* data);
+  void abort(HttpError err);
+
+  HttpMode mode() const { return mode_; }
+
+  void set_ignore_data(bool ignore) { ignore_data_ = ignore; }
+  bool ignore_data() const { return ignore_data_; }
+
+  // Obtaining this stream puts HttpBase into stream mode until the stream
+  // is closed.  HttpBase can only expose one open stream interface at a time.
+  // Further calls will return null.
+  StreamInterface* GetDocumentStream();
+
+protected:
+  // Do cleanup when the http stream closes (error may be 0 for a clean
+  // shutdown), and return the error code to signal.
+  HttpError HandleStreamClose(int error);
+
+  // DoReceiveLoop acts as a data pump, pulling data from the http stream,
+  // pushing it through the HttpParser, and then populating the HttpData object
+  // based on the callbacks from the parser.  One of the most interesting
+  // callbacks is ProcessData, which provides the actual http document body.
+  // This data is then written to the HttpData::document.  As a result, data
+  // flows from the network to the document, with some incidental protocol
+  // parsing in between.
+  // Ideally, we would pass in the document* to DoReceiveLoop, to more easily
+  // support GetDocumentStream().  However, since the HttpParser is callback
+  // driven, we are forced to store the pointer somewhere until the callback
+  // is triggered.
+  // Returns true if the received document has finished, and
+  // HttpParser::complete should be called.
+  bool DoReceiveLoop(HttpError* err);
+
+  void read_and_process_data();
+  void flush_data();
+  bool queue_headers();
+  void do_complete(HttpError err = HE_NONE);
+
+  void OnHttpStreamEvent(StreamInterface* stream, int events, int error);
+  void OnDocumentEvent(StreamInterface* stream, int events, int error);
+
+  // HttpParser Interface
+  ProcessResult ProcessLeader(const char* line,
+                              size_t len,
+                              HttpError* error) override;
+  ProcessResult ProcessHeader(const char* name,
+                              size_t nlen,
+                              const char* value,
+                              size_t vlen,
+                              HttpError* error) override;
+  ProcessResult ProcessHeaderComplete(bool chunked,
+                                      size_t& data_size,
+                                      HttpError* error) override;
+  ProcessResult ProcessData(const char* data,
+                            size_t len,
+                            size_t& read,
+                            HttpError* error) override;
+  void OnComplete(HttpError err) override;
+
+private:
+  class DocumentStream;
+  friend class DocumentStream;
+
+  enum { kBufferSize = 32 * 1024 };
+
+  HttpMode mode_;
+  HttpData* data_;
+  IHttpNotify* notify_;
+  StreamInterface* http_stream_;
+  DocumentStream* doc_stream_;
+  char buffer_[kBufferSize];
+  size_t len_;
+
+  bool ignore_data_, chunk_data_;
+  HttpData::const_iterator header_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace rtc
+
+#endif // RTC_BASE_HTTPBASE_H_
diff --git a/rtc_base/httpbase_unittest.cc b/rtc_base/httpbase_unittest.cc
new file mode 100644
index 0000000..1b7ab7f
--- /dev/null
+++ b/rtc_base/httpbase_unittest.cc
@@ -0,0 +1,525 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/httpbase.h"
+#include "rtc_base/testutils.h"
+
+namespace rtc {
+
+const char* const kHttpResponse =
+  "HTTP/1.1 200\r\n"
+  "Connection: Keep-Alive\r\n"
+  "Content-Type: text/plain\r\n"
+  "Proxy-Authorization: 42\r\n"
+  "Transfer-Encoding: chunked\r\n"
+  "\r\n"
+  "00000008\r\n"
+  "Goodbye!\r\n"
+  "0\r\n\r\n";
+
+const char* const kHttpEmptyResponse =
+  "HTTP/1.1 200\r\n"
+  "Connection: Keep-Alive\r\n"
+  "Content-Length: 0\r\n"
+  "Proxy-Authorization: 42\r\n"
+  "\r\n";
+
+const char* const kHttpResponsePrefix =
+  "HTTP/1.1 200\r\n"
+  "Connection: Keep-Alive\r\n"
+  "Content-Type: text/plain\r\n"
+  "Proxy-Authorization: 42\r\n"
+  "Transfer-Encoding: chunked\r\n"
+  "\r\n"
+  "8\r\n"
+  "Goodbye!\r\n";
+
+class HttpBaseTest : public testing::Test, public IHttpNotify {
+public:
+  enum EventType { E_HEADER_COMPLETE, E_COMPLETE, E_CLOSED };
+  struct Event {
+    EventType event;
+    bool chunked;
+    size_t data_size;
+    HttpMode mode;
+    HttpError err;
+  };
+  HttpBaseTest() : mem(nullptr), obtain_stream(false), http_stream(nullptr) {}
+
+  void TearDown() override {
+    delete http_stream;
+    // Avoid an ASSERT, in case a test doesn't clean up properly
+    base.abort(HE_NONE);
+  }
+
+  HttpError onHttpHeaderComplete(bool chunked, size_t& data_size) override {
+    RTC_LOG_F(LS_VERBOSE) << "chunked: " << chunked << " size: " << data_size;
+    Event e = { E_HEADER_COMPLETE, chunked, data_size, HM_NONE, HE_NONE};
+    events.push_back(e);
+    if (obtain_stream) {
+      ObtainDocumentStream();
+    }
+    return HE_NONE;
+  }
+  void onHttpComplete(HttpMode mode, HttpError err) override {
+    RTC_LOG_F(LS_VERBOSE) << "mode: " << mode << " err: " << err;
+    Event e = { E_COMPLETE, false, 0, mode, err };
+    events.push_back(e);
+  }
+  void onHttpClosed(HttpError err) override {
+    RTC_LOG_F(LS_VERBOSE) << "err: " << err;
+    Event e = { E_CLOSED, false, 0, HM_NONE, err };
+    events.push_back(e);
+  }
+
+  void SetupSource(const char* response);
+
+  void VerifyHeaderComplete(size_t event_count, bool empty_doc);
+  void VerifyDocumentContents(const char* expected_data,
+                              size_t expected_length = SIZE_UNKNOWN);
+
+  void ObtainDocumentStream();
+  void VerifyDocumentStreamIsOpening();
+  void VerifyDocumentStreamOpenEvent();
+  void ReadDocumentStreamData(const char* expected_data);
+  void VerifyDocumentStreamIsEOS();
+
+  void SetupDocument(const char* response);
+  void VerifySourceContents(const char* expected_data,
+                            size_t expected_length = SIZE_UNKNOWN);
+
+  void VerifyTransferComplete(HttpMode mode, HttpError error);
+
+  HttpBase base;
+  MemoryStream* mem;
+  HttpResponseData data;
+
+  // The source of http data, and source events
+  webrtc::testing::StreamSource src;
+  std::vector<Event> events;
+
+  // Document stream, and stream events
+  bool obtain_stream;
+  StreamInterface* http_stream;
+  webrtc::testing::StreamSink sink;
+};
+
+void HttpBaseTest::SetupSource(const char* http_data) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+
+  src.SetState(SS_OPENING);
+  src.QueueString(http_data);
+
+  base.notify(this);
+  base.attach(&src);
+  EXPECT_TRUE(events.empty());
+
+  src.SetState(SS_OPEN);
+  ASSERT_EQ(1U, events.size());
+  EXPECT_EQ(E_COMPLETE, events[0].event);
+  EXPECT_EQ(HM_CONNECT, events[0].mode);
+  EXPECT_EQ(HE_NONE, events[0].err);
+  events.clear();
+
+  mem = new MemoryStream;
+  data.document.reset(mem);
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifyHeaderComplete(size_t event_count, bool empty_doc) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+
+  ASSERT_EQ(event_count, events.size());
+  EXPECT_EQ(E_HEADER_COMPLETE, events[0].event);
+
+  std::string header;
+  EXPECT_EQ(HVER_1_1, data.version);
+  EXPECT_EQ(static_cast<uint32_t>(HC_OK), data.scode);
+  EXPECT_TRUE(data.hasHeader(HH_PROXY_AUTHORIZATION, &header));
+  EXPECT_EQ("42", header);
+  EXPECT_TRUE(data.hasHeader(HH_CONNECTION, &header));
+  EXPECT_EQ("Keep-Alive", header);
+
+  if (empty_doc) {
+    EXPECT_FALSE(events[0].chunked);
+    EXPECT_EQ(0U, events[0].data_size);
+
+    EXPECT_TRUE(data.hasHeader(HH_CONTENT_LENGTH, &header));
+    EXPECT_EQ("0", header);
+  } else {
+    EXPECT_TRUE(events[0].chunked);
+    EXPECT_EQ(SIZE_UNKNOWN, events[0].data_size);
+
+    EXPECT_TRUE(data.hasHeader(HH_CONTENT_TYPE, &header));
+    EXPECT_EQ("text/plain", header);
+    EXPECT_TRUE(data.hasHeader(HH_TRANSFER_ENCODING, &header));
+    EXPECT_EQ("chunked", header);
+  }
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifyDocumentContents(const char* expected_data,
+                                          size_t expected_length) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+
+  if (SIZE_UNKNOWN == expected_length) {
+    expected_length = strlen(expected_data);
+  }
+  EXPECT_EQ(mem, data.document.get());
+
+  size_t length;
+  mem->GetSize(&length);
+  EXPECT_EQ(expected_length, length);
+  EXPECT_TRUE(0 == memcmp(expected_data, mem->GetBuffer(), length));
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::ObtainDocumentStream() {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+  EXPECT_FALSE(http_stream);
+  http_stream = base.GetDocumentStream();
+  ASSERT_TRUE(nullptr != http_stream);
+  sink.Monitor(http_stream);
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifyDocumentStreamIsOpening() {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+  ASSERT_TRUE(nullptr != http_stream);
+  EXPECT_EQ(0, sink.Events(http_stream));
+  EXPECT_EQ(SS_OPENING, http_stream->GetState());
+
+  size_t read = 0;
+  char buffer[5] = { 0 };
+  EXPECT_EQ(SR_BLOCK,
+            http_stream->Read(buffer, sizeof(buffer), &read, nullptr));
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifyDocumentStreamOpenEvent() {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+
+  ASSERT_TRUE(nullptr != http_stream);
+  EXPECT_EQ(SE_OPEN | SE_READ, sink.Events(http_stream));
+  EXPECT_EQ(SS_OPEN, http_stream->GetState());
+
+  // HTTP headers haven't arrived yet
+  EXPECT_EQ(0U, events.size());
+  EXPECT_EQ(static_cast<uint32_t>(HC_INTERNAL_SERVER_ERROR), data.scode);
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::ReadDocumentStreamData(const char* expected_data) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+
+  ASSERT_TRUE(nullptr != http_stream);
+  EXPECT_EQ(SS_OPEN, http_stream->GetState());
+
+  // Pump the HTTP I/O using Read, and verify the results.
+  size_t verified_length = 0;
+  const size_t expected_length = strlen(expected_data);
+  while (verified_length < expected_length) {
+    size_t read = 0;
+    char buffer[5] = { 0 };
+    size_t amt_to_read =
+        std::min(expected_length - verified_length, sizeof(buffer));
+    EXPECT_EQ(SR_SUCCESS,
+              http_stream->Read(buffer, amt_to_read, &read, nullptr));
+    EXPECT_EQ(amt_to_read, read);
+    EXPECT_TRUE(0 == memcmp(expected_data + verified_length, buffer, read));
+    verified_length += read;
+  }
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifyDocumentStreamIsEOS() {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+
+  ASSERT_TRUE(nullptr != http_stream);
+  size_t read = 0;
+  char buffer[5] = { 0 };
+  EXPECT_EQ(SR_EOS, http_stream->Read(buffer, sizeof(buffer), &read, nullptr));
+  EXPECT_EQ(SS_CLOSED, http_stream->GetState());
+
+  // When EOS is caused by Read, we don't expect SE_CLOSE
+  EXPECT_EQ(0, sink.Events(http_stream));
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::SetupDocument(const char* document_data) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+  src.SetState(SS_OPEN);
+
+  base.notify(this);
+  base.attach(&src);
+  EXPECT_TRUE(events.empty());
+
+  if (document_data) {
+    // Note: we could just call data.set_success("text/plain", mem), but that
+    // won't allow us to use the chunked transfer encoding.
+    mem = new MemoryStream(document_data);
+    data.document.reset(mem);
+    data.setHeader(HH_CONTENT_TYPE, "text/plain");
+    data.setHeader(HH_TRANSFER_ENCODING, "chunked");
+  } else {
+    data.setHeader(HH_CONTENT_LENGTH, "0");
+  }
+  data.scode = HC_OK;
+  data.setHeader(HH_PROXY_AUTHORIZATION, "42");
+  data.setHeader(HH_CONNECTION, "Keep-Alive");
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifySourceContents(const char* expected_data,
+                                        size_t expected_length) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+  if (SIZE_UNKNOWN == expected_length) {
+    expected_length = strlen(expected_data);
+  }
+  std::string contents = src.ReadData();
+  EXPECT_EQ(expected_length, contents.length());
+  EXPECT_TRUE(0 == memcmp(expected_data, contents.data(), expected_length));
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+void HttpBaseTest::VerifyTransferComplete(HttpMode mode, HttpError error) {
+  RTC_LOG_F(LS_VERBOSE) << "Enter";
+  // Verify that http operation has completed
+  ASSERT_TRUE(events.size() > 0);
+  size_t last_event = events.size() - 1;
+  EXPECT_EQ(E_COMPLETE, events[last_event].event);
+  EXPECT_EQ(mode, events[last_event].mode);
+  EXPECT_EQ(error, events[last_event].err);
+  RTC_LOG_F(LS_VERBOSE) << "Exit";
+}
+
+//
+// Tests
+//
+
+TEST_F(HttpBaseTest, SupportsSend) {
+  // Queue response document
+  SetupDocument("Goodbye!");
+
+  // Begin send
+  base.send(&data);
+
+  // Send completed successfully
+  VerifyTransferComplete(HM_SEND, HE_NONE);
+  VerifySourceContents(kHttpResponse);
+}
+
+TEST_F(HttpBaseTest, SupportsSendNoDocument) {
+  // Queue response document
+  SetupDocument(nullptr);
+
+  // Begin send
+  base.send(&data);
+
+  // Send completed successfully
+  VerifyTransferComplete(HM_SEND, HE_NONE);
+  VerifySourceContents(kHttpEmptyResponse);
+}
+
+TEST_F(HttpBaseTest, SignalsCompleteOnInterruptedSend) {
+  // This test is attempting to expose a bug that occurs when a particular
+  // base objects is used for receiving, and then used for sending.  In
+  // particular, the HttpParser state is different after receiving.  Simulate
+  // that here.
+  SetupSource(kHttpResponse);
+  base.recv(&data);
+  VerifyTransferComplete(HM_RECV, HE_NONE);
+
+  src.Clear();
+  data.clear(true);
+  events.clear();
+  base.detach();
+
+  // Queue response document
+  SetupDocument("Goodbye!");
+
+  // Prevent entire response from being sent
+  const size_t kInterruptedLength = strlen(kHttpResponse) - 1;
+  src.SetWriteBlock(kInterruptedLength);
+
+  // Begin send
+  base.send(&data);
+
+  // Document is mostly complete, but no completion signal yet.
+  EXPECT_TRUE(events.empty());
+  VerifySourceContents(kHttpResponse, kInterruptedLength);
+
+  src.SetState(SS_CLOSED);
+
+  // Send completed with disconnect error, and no additional data.
+  VerifyTransferComplete(HM_SEND, HE_DISCONNECTED);
+  EXPECT_TRUE(src.ReadData().empty());
+}
+
+TEST_F(HttpBaseTest, SupportsReceiveViaDocumentPush) {
+  // Queue response document
+  SetupSource(kHttpResponse);
+
+  // Begin receive
+  base.recv(&data);
+
+  // Document completed successfully
+  VerifyHeaderComplete(2, false);
+  VerifyTransferComplete(HM_RECV, HE_NONE);
+  VerifyDocumentContents("Goodbye!");
+}
+
+TEST_F(HttpBaseTest, SupportsReceiveViaStreamPull) {
+  // Switch to pull mode
+  ObtainDocumentStream();
+  VerifyDocumentStreamIsOpening();
+
+  // Queue response document
+  SetupSource(kHttpResponse);
+  VerifyDocumentStreamIsOpening();
+
+  // Begin receive
+  base.recv(&data);
+
+  // Pull document data
+  VerifyDocumentStreamOpenEvent();
+  ReadDocumentStreamData("Goodbye!");
+  VerifyDocumentStreamIsEOS();
+
+  // Document completed successfully
+  VerifyHeaderComplete(2, false);
+  VerifyTransferComplete(HM_RECV, HE_NONE);
+  VerifyDocumentContents("");
+}
+
+TEST_F(HttpBaseTest, DISABLED_AllowsCloseStreamBeforeDocumentIsComplete) {
+
+  // TODO: Remove extra logging once test failure is understood
+  LoggingSeverity old_sev = rtc::LogMessage::GetLogToDebug();
+  rtc::LogMessage::LogToDebug(LS_VERBOSE);
+
+
+  // Switch to pull mode
+  ObtainDocumentStream();
+  VerifyDocumentStreamIsOpening();
+
+  // Queue response document
+  SetupSource(kHttpResponse);
+  VerifyDocumentStreamIsOpening();
+
+  // Begin receive
+  base.recv(&data);
+
+  // Pull some of the data
+  VerifyDocumentStreamOpenEvent();
+  ReadDocumentStreamData("Goodb");
+
+  // We've seen the header by now
+  VerifyHeaderComplete(1, false);
+
+  // Close the pull stream, this will transition back to push I/O.
+  http_stream->Close();
+  Thread::Current()->ProcessMessages(0);
+
+  // Remainder of document completed successfully
+  VerifyTransferComplete(HM_RECV, HE_NONE);
+  VerifyDocumentContents("ye!");
+
+  rtc::LogMessage::LogToDebug(old_sev);
+}
+
+TEST_F(HttpBaseTest, AllowsGetDocumentStreamInResponseToHttpHeader) {
+  // Queue response document
+  SetupSource(kHttpResponse);
+
+  // Switch to pull mode in response to header arrival
+  obtain_stream = true;
+
+  // Begin receive
+  base.recv(&data);
+
+  // We've already seen the header, but not data has arrived
+  VerifyHeaderComplete(1, false);
+  VerifyDocumentContents("");
+
+  // Pull the document data
+  ReadDocumentStreamData("Goodbye!");
+  VerifyDocumentStreamIsEOS();
+
+  // Document completed successfully
+  VerifyTransferComplete(HM_RECV, HE_NONE);
+  VerifyDocumentContents("");
+}
+
+TEST_F(HttpBaseTest, AllowsGetDocumentStreamWithEmptyDocumentBody) {
+  // Queue empty response document
+  SetupSource(kHttpEmptyResponse);
+
+  // Switch to pull mode in response to header arrival
+  obtain_stream = true;
+
+  // Begin receive
+  base.recv(&data);
+
+  // We've already seen the header, but not data has arrived
+  VerifyHeaderComplete(1, true);
+  VerifyDocumentContents("");
+
+  // The document is still open, until we attempt to read
+  ASSERT_TRUE(nullptr != http_stream);
+  EXPECT_EQ(SS_OPEN, http_stream->GetState());
+
+  // Attempt to read data, and discover EOS
+  VerifyDocumentStreamIsEOS();
+
+  // Document completed successfully
+  VerifyTransferComplete(HM_RECV, HE_NONE);
+  VerifyDocumentContents("");
+}
+
+TEST_F(HttpBaseTest, SignalsDocumentStreamCloseOnUnexpectedClose) {
+  // Switch to pull mode
+  ObtainDocumentStream();
+  VerifyDocumentStreamIsOpening();
+
+  // Queue response document
+  SetupSource(kHttpResponsePrefix);
+  VerifyDocumentStreamIsOpening();
+
+  // Begin receive
+  base.recv(&data);
+
+  // Pull document data
+  VerifyDocumentStreamOpenEvent();
+  ReadDocumentStreamData("Goodbye!");
+
+  // Simulate unexpected close
+  src.SetState(SS_CLOSED);
+
+  // Observe error event on document stream
+  EXPECT_EQ(webrtc::testing::SSE_ERROR, sink.Events(http_stream));
+
+  // Future reads give an error
+  int error = 0;
+  char buffer[5] = { 0 };
+  EXPECT_EQ(SR_ERROR,
+            http_stream->Read(buffer, sizeof(buffer), nullptr, &error));
+  EXPECT_EQ(HE_DISCONNECTED, error);
+
+  // Document completed with error
+  VerifyHeaderComplete(2, false);
+  VerifyTransferComplete(HM_RECV, HE_DISCONNECTED);
+  VerifyDocumentContents("");
+}
+
+} // namespace rtc
diff --git a/rtc_base/httpcommon-inl.h b/rtc_base/httpcommon-inl.h
new file mode 100644
index 0000000..70263da
--- /dev/null
+++ b/rtc_base/httpcommon-inl.h
@@ -0,0 +1,132 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_HTTPCOMMON_INL_H_
+#define RTC_BASE_HTTPCOMMON_INL_H_
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/httpcommon.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// Url
+///////////////////////////////////////////////////////////////////////////////
+
+template<class CTYPE>
+void Url<CTYPE>::do_set_url(const CTYPE* val, size_t len) {
+  if (ascnicmp(val, "http://", 7) == 0) {
+    val += 7; len -= 7;
+    secure_ = false;
+  } else if (ascnicmp(val, "https://", 8) == 0) {
+    val += 8; len -= 8;
+    secure_ = true;
+  } else {
+    clear();
+    return;
+  }
+  const CTYPE* path = strchrn(val, len, static_cast<CTYPE>('/'));
+  if (!path) {
+    path = val + len;
+  }
+  size_t address_length = (path - val);
+  do_set_address(val, address_length);
+  do_set_full_path(path, len - address_length);
+}
+
+template<class CTYPE>
+void Url<CTYPE>::do_set_address(const CTYPE* val, size_t len) {
+  if (const CTYPE* at = strchrn(val, len, static_cast<CTYPE>('@'))) {
+    // Everything before the @ is a user:password combo, so skip it.
+    len -= at - val + 1;
+    val = at + 1;
+  }
+  if (const CTYPE* colon = strchrn(val, len, static_cast<CTYPE>(':'))) {
+    host_.assign(val, colon - val);
+    // Note: In every case, we're guaranteed that colon is followed by a null,
+    // or non-numeric character.
+    port_ = static_cast<uint16_t>(::strtoul(colon + 1, nullptr, 10));
+    // TODO: Consider checking for invalid data following port number.
+  } else {
+    host_.assign(val, len);
+    port_ = HttpDefaultPort(secure_);
+  }
+}
+
+template<class CTYPE>
+void Url<CTYPE>::do_set_full_path(const CTYPE* val, size_t len) {
+  const CTYPE* query = strchrn(val, len, static_cast<CTYPE>('?'));
+  if (!query) {
+    query = val + len;
+  }
+  size_t path_length = (query - val);
+  if (0 == path_length) {
+    // TODO: consider failing in this case.
+    path_.assign(1, static_cast<CTYPE>('/'));
+  } else {
+    RTC_DCHECK(val[0] == static_cast<CTYPE>('/'));
+    path_.assign(val, path_length);
+  }
+  query_.assign(query, len - path_length);
+}
+
+template<class CTYPE>
+void Url<CTYPE>::do_get_url(string* val) const {
+  CTYPE protocol[9];
+  asccpyn(protocol, arraysize(protocol), secure_ ? "https://" : "http://");
+  val->append(protocol);
+  do_get_address(val);
+  do_get_full_path(val);
+}
+
+template<class CTYPE>
+void Url<CTYPE>::do_get_address(string* val) const {
+  val->append(host_);
+  if (port_ != HttpDefaultPort(secure_)) {
+    CTYPE format[5], port[32];
+    asccpyn(format, arraysize(format), ":%hu");
+    sprintfn(port, arraysize(port), format, port_);
+    val->append(port);
+  }
+}
+
+template<class CTYPE>
+void Url<CTYPE>::do_get_full_path(string* val) const {
+  val->append(path_);
+  val->append(query_);
+}
+
+template<class CTYPE>
+bool Url<CTYPE>::get_attribute(const string& name, string* value) const {
+  if (query_.empty())
+    return false;
+
+  std::string::size_type pos = query_.find(name, 1);
+  if (std::string::npos == pos)
+    return false;
+
+  pos += name.length() + 1;
+  if ((pos > query_.length()) || (static_cast<CTYPE>('=') != query_[pos-1]))
+    return false;
+
+  std::string::size_type end = query_.find(static_cast<CTYPE>('&'), pos);
+  if (std::string::npos == end) {
+    end = query_.length();
+  }
+  value->assign(query_.substr(pos, end - pos));
+  return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_HTTPCOMMON_INL_H_
diff --git a/rtc_base/httpcommon.cc b/rtc_base/httpcommon.cc
new file mode 100644
index 0000000..f23cb63
--- /dev/null
+++ b/rtc_base/httpcommon.cc
@@ -0,0 +1,1052 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <time.h>
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#define SECURITY_WIN32
+#include <security.h>
+#endif
+
+#include <algorithm>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/base64.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/cryptstring.h"
+#include "rtc_base/httpcommon-inl.h"
+#include "rtc_base/httpcommon.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/zero_memory.h"
+
+namespace rtc {
+namespace {
+#if defined(WEBRTC_WIN)
+///////////////////////////////////////////////////////////////////////////////
+// ConstantToLabel can be used to easily generate string names from constant
+// values.  This can be useful for logging descriptive names of error messages.
+// Usage:
+//   const ConstantToLabel LIBRARY_ERRORS[] = {
+//     KLABEL(SOME_ERROR),
+//     KLABEL(SOME_OTHER_ERROR),
+//     ...
+//     LASTLABEL
+//   }
+//
+//   int err = LibraryFunc();
+//   LOG(LS_ERROR) << "LibraryFunc returned: "
+//                 << GetErrorName(err, LIBRARY_ERRORS);
+struct ConstantToLabel { int value; const char * label; };
+
+const char* LookupLabel(int value, const ConstantToLabel entries[]) {
+  for (int i = 0; entries[i].label; ++i) {
+    if (value == entries[i].value) {
+      return entries[i].label;
+    }
+  }
+  return 0;
+}
+
+std::string GetErrorName(int err, const ConstantToLabel* err_table) {
+  if (err == 0)
+    return "No error";
+
+  if (err_table != 0) {
+    if (const char* value = LookupLabel(err, err_table))
+      return value;
+  }
+
+  char buffer[16];
+  snprintf(buffer, sizeof(buffer), "0x%08x", err);
+  return buffer;
+}
+
+#define KLABEL(x) { x, #x }
+#define LASTLABEL { 0, 0 }
+
+const ConstantToLabel SECURITY_ERRORS[] = {
+  KLABEL(SEC_I_COMPLETE_AND_CONTINUE),
+  KLABEL(SEC_I_COMPLETE_NEEDED),
+  KLABEL(SEC_I_CONTEXT_EXPIRED),
+  KLABEL(SEC_I_CONTINUE_NEEDED),
+  KLABEL(SEC_I_INCOMPLETE_CREDENTIALS),
+  KLABEL(SEC_I_RENEGOTIATE),
+  KLABEL(SEC_E_CERT_EXPIRED),
+  KLABEL(SEC_E_INCOMPLETE_MESSAGE),
+  KLABEL(SEC_E_INSUFFICIENT_MEMORY),
+  KLABEL(SEC_E_INTERNAL_ERROR),
+  KLABEL(SEC_E_INVALID_HANDLE),
+  KLABEL(SEC_E_INVALID_TOKEN),
+  KLABEL(SEC_E_LOGON_DENIED),
+  KLABEL(SEC_E_NO_AUTHENTICATING_AUTHORITY),
+  KLABEL(SEC_E_NO_CREDENTIALS),
+  KLABEL(SEC_E_NOT_OWNER),
+  KLABEL(SEC_E_OK),
+  KLABEL(SEC_E_SECPKG_NOT_FOUND),
+  KLABEL(SEC_E_TARGET_UNKNOWN),
+  KLABEL(SEC_E_UNKNOWN_CREDENTIALS),
+  KLABEL(SEC_E_UNSUPPORTED_FUNCTION),
+  KLABEL(SEC_E_UNTRUSTED_ROOT),
+  KLABEL(SEC_E_WRONG_PRINCIPAL),
+  LASTLABEL
+};
+#undef KLABEL
+#undef LASTLABEL
+#endif  // defined(WEBRTC_WIN)
+}  // namespace
+
+//////////////////////////////////////////////////////////////////////
+// Enum - TODO: expose globally later?
+//////////////////////////////////////////////////////////////////////
+
+bool find_string(size_t& index, const std::string& needle,
+                 const char* const haystack[], size_t max_index) {
+  for (index=0; index<max_index; ++index) {
+    if (_stricmp(needle.c_str(), haystack[index]) == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+template<class E>
+struct Enum {
+  static const char** Names;
+  static size_t Size;
+
+  static inline const char* Name(E val) { return Names[val]; }
+  static inline bool Parse(E& val, const std::string& name) {
+    size_t index;
+    if (!find_string(index, name, Names, Size))
+      return false;
+    val = static_cast<E>(index);
+    return true;
+  }
+
+  E val;
+
+  inline operator E&() { return val; }
+  inline Enum& operator=(E rhs) { val = rhs; return *this; }
+
+  inline const char* name() const { return Name(val); }
+  inline bool assign(const std::string& name) { return Parse(val, name); }
+  inline Enum& operator=(const std::string& rhs) { assign(rhs); return *this; }
+};
+
+#define ENUM(e,n) \
+  template<> const char** Enum<e>::Names = n; \
+  template<> size_t Enum<e>::Size = sizeof(n)/sizeof(n[0])
+
+//////////////////////////////////////////////////////////////////////
+// HttpCommon
+//////////////////////////////////////////////////////////////////////
+
+static const char* kHttpVersions[HVER_LAST+1] = {
+  "1.0", "1.1", "Unknown"
+};
+ENUM(HttpVersion, kHttpVersions);
+
+static const char* kHttpVerbs[HV_LAST+1] = {
+  "GET", "POST", "PUT", "DELETE", "CONNECT", "HEAD"
+};
+ENUM(HttpVerb, kHttpVerbs);
+
+static const char* kHttpHeaders[HH_LAST+1] = {
+  "Age",
+  "Cache-Control",
+  "Connection",
+  "Content-Disposition",
+  "Content-Length",
+  "Content-Range",
+  "Content-Type",
+  "Cookie",
+  "Date",
+  "ETag",
+  "Expires",
+  "Host",
+  "If-Modified-Since",
+  "If-None-Match",
+  "Keep-Alive",
+  "Last-Modified",
+  "Location",
+  "Proxy-Authenticate",
+  "Proxy-Authorization",
+  "Proxy-Connection",
+  "Range",
+  "Set-Cookie",
+  "TE",
+  "Trailers",
+  "Transfer-Encoding",
+  "Upgrade",
+  "User-Agent",
+  "WWW-Authenticate",
+};
+ENUM(HttpHeader, kHttpHeaders);
+
+const char* ToString(HttpVersion version) {
+  return Enum<HttpVersion>::Name(version);
+}
+
+bool FromString(HttpVersion& version, const std::string& str) {
+  return Enum<HttpVersion>::Parse(version, str);
+}
+
+const char* ToString(HttpVerb verb) {
+  return Enum<HttpVerb>::Name(verb);
+}
+
+bool FromString(HttpVerb& verb, const std::string& str) {
+  return Enum<HttpVerb>::Parse(verb, str);
+}
+
+const char* ToString(HttpHeader header) {
+  return Enum<HttpHeader>::Name(header);
+}
+
+bool FromString(HttpHeader& header, const std::string& str) {
+  return Enum<HttpHeader>::Parse(header, str);
+}
+
+bool HttpCodeHasBody(uint32_t code) {
+  return !HttpCodeIsInformational(code)
+         && (code != HC_NO_CONTENT) && (code != HC_NOT_MODIFIED);
+}
+
+bool HttpCodeIsCacheable(uint32_t code) {
+  switch (code) {
+  case HC_OK:
+  case HC_NON_AUTHORITATIVE:
+  case HC_PARTIAL_CONTENT:
+  case HC_MULTIPLE_CHOICES:
+  case HC_MOVED_PERMANENTLY:
+  case HC_GONE:
+    return true;
+  default:
+    return false;
+  }
+}
+
+bool HttpHeaderIsEndToEnd(HttpHeader header) {
+  switch (header) {
+  case HH_CONNECTION:
+  case HH_KEEP_ALIVE:
+  case HH_PROXY_AUTHENTICATE:
+  case HH_PROXY_AUTHORIZATION:
+  case HH_PROXY_CONNECTION:  // Note part of RFC... this is non-standard header
+  case HH_TE:
+  case HH_TRAILERS:
+  case HH_TRANSFER_ENCODING:
+  case HH_UPGRADE:
+    return false;
+  default:
+    return true;
+  }
+}
+
+bool HttpHeaderIsCollapsible(HttpHeader header) {
+  switch (header) {
+  case HH_SET_COOKIE:
+  case HH_PROXY_AUTHENTICATE:
+  case HH_WWW_AUTHENTICATE:
+    return false;
+  default:
+    return true;
+  }
+}
+
+bool HttpShouldKeepAlive(const HttpData& data) {
+  std::string connection;
+  if ((data.hasHeader(HH_PROXY_CONNECTION, &connection)
+      || data.hasHeader(HH_CONNECTION, &connection))) {
+    return (_stricmp(connection.c_str(), "Keep-Alive") == 0);
+  }
+  return (data.version >= HVER_1_1);
+}
+
+namespace {
+
+inline bool IsEndOfAttributeName(size_t pos, size_t len, const char * data) {
+  if (pos >= len)
+    return true;
+  if (isspace(static_cast<unsigned char>(data[pos])))
+    return true;
+  // The reason for this complexity is that some attributes may contain trailing
+  // equal signs (like base64 tokens in Negotiate auth headers)
+  if ((pos+1 < len) && (data[pos] == '=') &&
+      !isspace(static_cast<unsigned char>(data[pos+1])) &&
+      (data[pos+1] != '=')) {
+    return true;
+  }
+  return false;
+}
+
+}  // anonymous namespace
+
+void HttpParseAttributes(const char * data, size_t len,
+                         HttpAttributeList& attributes) {
+  size_t pos = 0;
+  while (true) {
+    // Skip leading whitespace
+    while ((pos < len) && isspace(static_cast<unsigned char>(data[pos]))) {
+      ++pos;
+    }
+
+    // End of attributes?
+    if (pos >= len)
+      return;
+
+    // Find end of attribute name
+    size_t start = pos;
+    while (!IsEndOfAttributeName(pos, len, data)) {
+      ++pos;
+    }
+
+    HttpAttribute attribute;
+    attribute.first.assign(data + start, data + pos);
+
+    // Attribute has value?
+    if ((pos < len) && (data[pos] == '=')) {
+      ++pos; // Skip '='
+      // Check if quoted value
+      if ((pos < len) && (data[pos] == '"')) {
+        while (++pos < len) {
+          if (data[pos] == '"') {
+            ++pos;
+            break;
+          }
+          if ((data[pos] == '\\') && (pos + 1 < len))
+            ++pos;
+          attribute.second.append(1, data[pos]);
+        }
+      } else {
+        while ((pos < len) &&
+            !isspace(static_cast<unsigned char>(data[pos])) &&
+            (data[pos] != ',')) {
+          attribute.second.append(1, data[pos++]);
+        }
+      }
+    }
+
+    attributes.push_back(attribute);
+    if ((pos < len) && (data[pos] == ',')) ++pos; // Skip ','
+  }
+}
+
+bool HttpHasAttribute(const HttpAttributeList& attributes,
+                      const std::string& name,
+                      std::string* value) {
+  for (HttpAttributeList::const_iterator it = attributes.begin();
+       it != attributes.end(); ++it) {
+    if (it->first == name) {
+      if (value) {
+        *value = it->second;
+      }
+      return true;
+    }
+  }
+  return false;
+}
+
+bool HttpHasNthAttribute(HttpAttributeList& attributes,
+                         size_t index,
+                         std::string* name,
+                         std::string* value) {
+  if (index >= attributes.size())
+    return false;
+
+  if (name)
+    *name = attributes[index].first;
+  if (value)
+    *value = attributes[index].second;
+  return true;
+}
+
+bool HttpDateToSeconds(const std::string& date, time_t* seconds) {
+  const char* const kTimeZones[] = {
+    "UT", "GMT", "EST", "EDT", "CST", "CDT", "MST", "MDT", "PST", "PDT",
+    "A", "B", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M",
+    "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y"
+  };
+  const int kTimeZoneOffsets[] = {
+     0,  0, -5, -4, -6, -5, -7, -6, -8, -7,
+    -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12,
+     1,  2,  3,  4,  5,  6,  7,  8,  9,  10,  11,  12
+  };
+
+  RTC_DCHECK(nullptr != seconds);
+  struct tm tval;
+  memset(&tval, 0, sizeof(tval));
+  char month[4], zone[6];
+  memset(month, 0, sizeof(month));
+  memset(zone, 0, sizeof(zone));
+
+  if (7 != sscanf(date.c_str(), "%*3s, %d %3s %d %d:%d:%d %5c",
+                  &tval.tm_mday, month, &tval.tm_year,
+                  &tval.tm_hour, &tval.tm_min, &tval.tm_sec, zone)) {
+    return false;
+  }
+  switch (toupper(month[2])) {
+  case 'N': tval.tm_mon = (month[1] == 'A') ? 0 : 5; break;
+  case 'B': tval.tm_mon = 1; break;
+  case 'R': tval.tm_mon = (month[0] == 'M') ? 2 : 3; break;
+  case 'Y': tval.tm_mon = 4; break;
+  case 'L': tval.tm_mon = 6; break;
+  case 'G': tval.tm_mon = 7; break;
+  case 'P': tval.tm_mon = 8; break;
+  case 'T': tval.tm_mon = 9; break;
+  case 'V': tval.tm_mon = 10; break;
+  case 'C': tval.tm_mon = 11; break;
+  }
+  tval.tm_year -= 1900;
+  time_t gmt, non_gmt = mktime(&tval);
+  if ((zone[0] == '+') || (zone[0] == '-')) {
+    if (!isdigit(zone[1]) || !isdigit(zone[2])
+        || !isdigit(zone[3]) || !isdigit(zone[4])) {
+      return false;
+    }
+    int hours = (zone[1] - '0') * 10 + (zone[2] - '0');
+    int minutes = (zone[3] - '0') * 10 + (zone[4] - '0');
+    int offset = (hours * 60 + minutes) * 60;
+    gmt = non_gmt + ((zone[0] == '+') ? offset : -offset);
+  } else {
+    size_t zindex;
+    if (!find_string(zindex, zone, kTimeZones, arraysize(kTimeZones))) {
+      return false;
+    }
+    gmt = non_gmt + kTimeZoneOffsets[zindex] * 60 * 60;
+  }
+  // TODO: Android should support timezone, see b/2441195
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID) || defined(BSD)
+  tm *tm_for_timezone = localtime(&gmt);
+  *seconds = gmt + tm_for_timezone->tm_gmtoff;
+#else
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+  long timezone = 0;
+  _get_timezone(&timezone);
+#endif
+  *seconds = gmt - timezone;
+#endif
+  return true;
+}
+
+std::string HttpAddress(const SocketAddress& address, bool secure) {
+  return (address.port() == HttpDefaultPort(secure))
+          ? address.hostname() : address.ToString();
+}
+
+//////////////////////////////////////////////////////////////////////
+// HttpData
+//////////////////////////////////////////////////////////////////////
+
+HttpData::HttpData() : version(HVER_1_1) {
+}
+
+HttpData::~HttpData() = default;
+
+void
+HttpData::clear(bool release_document) {
+  // Clear headers first, since releasing a document may have far-reaching
+  // effects.
+  headers_.clear();
+  if (release_document) {
+    document.reset();
+  }
+}
+
+void
+HttpData::copy(const HttpData& src) {
+  headers_ = src.headers_;
+}
+
+void
+HttpData::changeHeader(const std::string& name, const std::string& value,
+                       HeaderCombine combine) {
+  if (combine == HC_AUTO) {
+    HttpHeader header;
+    // Unrecognized headers are collapsible
+    combine = !FromString(header, name) || HttpHeaderIsCollapsible(header)
+              ? HC_YES : HC_NO;
+  } else if (combine == HC_REPLACE) {
+    headers_.erase(name);
+    combine = HC_NO;
+  }
+  // At this point, combine is one of (YES, NO, NEW)
+  if (combine != HC_NO) {
+    HeaderMap::iterator it = headers_.find(name);
+    if (it != headers_.end()) {
+      if (combine == HC_YES) {
+        it->second.append(",");
+        it->second.append(value);
+      }
+      return;
+    }
+  }
+  headers_.insert(HeaderMap::value_type(name, value));
+}
+
+size_t HttpData::clearHeader(const std::string& name) {
+  return headers_.erase(name);
+}
+
+HttpData::iterator HttpData::clearHeader(iterator header) {
+  iterator deprecated = header++;
+  headers_.erase(deprecated);
+  return header;
+}
+
+bool
+HttpData::hasHeader(const std::string& name, std::string* value) const {
+  HeaderMap::const_iterator it = headers_.find(name);
+  if (it == headers_.end()) {
+    return false;
+  } else if (value) {
+    *value = it->second;
+  }
+  return true;
+}
+
+void HttpData::setContent(const std::string& content_type,
+                          StreamInterface* document) {
+  setHeader(HH_CONTENT_TYPE, content_type);
+  setDocumentAndLength(document);
+}
+
+void HttpData::setDocumentAndLength(StreamInterface* document) {
+  // TODO: Consider calling Rewind() here?
+  RTC_DCHECK(!hasHeader(HH_CONTENT_LENGTH, nullptr));
+  RTC_DCHECK(!hasHeader(HH_TRANSFER_ENCODING, nullptr));
+  RTC_DCHECK(document != nullptr);
+  this->document.reset(document);
+  size_t content_length = 0;
+  if (this->document->GetAvailable(&content_length)) {
+    char buffer[32];
+    sprintfn(buffer, sizeof(buffer), "%d", content_length);
+    setHeader(HH_CONTENT_LENGTH, buffer);
+  } else {
+    setHeader(HH_TRANSFER_ENCODING, "chunked");
+  }
+}
+
+//
+// HttpRequestData
+//
+
+void
+HttpRequestData::clear(bool release_document) {
+  verb = HV_GET;
+  path.clear();
+  HttpData::clear(release_document);
+}
+
+void
+HttpRequestData::copy(const HttpRequestData& src) {
+  verb = src.verb;
+  path = src.path;
+  HttpData::copy(src);
+}
+
+size_t
+HttpRequestData::formatLeader(char* buffer, size_t size) const {
+  RTC_DCHECK(path.find(' ') == std::string::npos);
+  return sprintfn(buffer, size, "%s %.*s HTTP/%s", ToString(verb), path.size(),
+                  path.data(), ToString(version));
+}
+
+HttpError
+HttpRequestData::parseLeader(const char* line, size_t len) {
+  unsigned int vmajor, vminor;
+  int vend, dstart, dend;
+  // sscanf isn't safe with strings that aren't null-terminated, and there is
+  // no guarantee that |line| is. Create a local copy that is null-terminated.
+  std::string line_str(line, len);
+  line = line_str.c_str();
+  if ((sscanf(line, "%*s%n %n%*s%n HTTP/%u.%u",
+              &vend, &dstart, &dend, &vmajor, &vminor) != 2)
+      || (vmajor != 1)) {
+    return HE_PROTOCOL;
+  }
+  if (vminor == 0) {
+    version = HVER_1_0;
+  } else if (vminor == 1) {
+    version = HVER_1_1;
+  } else {
+    return HE_PROTOCOL;
+  }
+  std::string sverb(line, vend);
+  if (!FromString(verb, sverb.c_str())) {
+    return HE_PROTOCOL; // !?! HC_METHOD_NOT_SUPPORTED?
+  }
+  path.assign(line + dstart, line + dend);
+  return HE_NONE;
+}
+
+bool HttpRequestData::getAbsoluteUri(std::string* uri) const {
+  if (HV_CONNECT == verb)
+    return false;
+  Url<char> url(path);
+  if (url.valid()) {
+    uri->assign(path);
+    return true;
+  }
+  std::string host;
+  if (!hasHeader(HH_HOST, &host))
+    return false;
+  url.set_address(host);
+  url.set_full_path(path);
+  uri->assign(url.url());
+  return url.valid();
+}
+
+bool HttpRequestData::getRelativeUri(std::string* host,
+                                     std::string* path) const
+{
+  if (HV_CONNECT == verb)
+    return false;
+  Url<char> url(this->path);
+  if (url.valid()) {
+    host->assign(url.address());
+    path->assign(url.full_path());
+    return true;
+  }
+  if (!hasHeader(HH_HOST, host))
+    return false;
+  path->assign(this->path);
+  return true;
+}
+
+//
+// HttpResponseData
+//
+
+void
+HttpResponseData::clear(bool release_document) {
+  scode = HC_INTERNAL_SERVER_ERROR;
+  message.clear();
+  HttpData::clear(release_document);
+}
+
+void
+HttpResponseData::copy(const HttpResponseData& src) {
+  scode = src.scode;
+  message = src.message;
+  HttpData::copy(src);
+}
+
+void HttpResponseData::set_success(uint32_t scode) {
+  this->scode = scode;
+  message.clear();
+  setHeader(HH_CONTENT_LENGTH, "0", false);
+}
+
+void HttpResponseData::set_success(const std::string& content_type,
+                                   StreamInterface* document,
+                                   uint32_t scode) {
+  this->scode = scode;
+  message.erase(message.begin(), message.end());
+  setContent(content_type, document);
+}
+
+void HttpResponseData::set_redirect(const std::string& location,
+                                    uint32_t scode) {
+  this->scode = scode;
+  message.clear();
+  setHeader(HH_LOCATION, location);
+  setHeader(HH_CONTENT_LENGTH, "0", false);
+}
+
+void HttpResponseData::set_error(uint32_t scode) {
+  this->scode = scode;
+  message.clear();
+  setHeader(HH_CONTENT_LENGTH, "0", false);
+}
+
+size_t
+HttpResponseData::formatLeader(char* buffer, size_t size) const {
+  size_t len = sprintfn(buffer, size, "HTTP/%s %lu", ToString(version), scode);
+  if (!message.empty()) {
+    len += sprintfn(buffer + len, size - len, " %.*s",
+                    message.size(), message.data());
+  }
+  return len;
+}
+
+HttpError
+HttpResponseData::parseLeader(const char* line, size_t len) {
+  size_t pos = 0;
+  unsigned int vmajor, vminor, temp_scode;
+  int temp_pos;
+  // sscanf isn't safe with strings that aren't null-terminated, and there is
+  // no guarantee that |line| is. Create a local copy that is null-terminated.
+  std::string line_str(line, len);
+  line = line_str.c_str();
+  if (sscanf(line, "HTTP %u%n",
+             &temp_scode, &temp_pos) == 1) {
+    // This server's response has no version. :( NOTE: This happens for every
+    // response to requests made from Chrome plugins, regardless of the server's
+    // behaviour.
+    RTC_LOG(LS_VERBOSE) << "HTTP version missing from response";
+    version = HVER_UNKNOWN;
+  } else if ((sscanf(line, "HTTP/%u.%u %u%n",
+                     &vmajor, &vminor, &temp_scode, &temp_pos) == 3)
+             && (vmajor == 1)) {
+    // This server's response does have a version.
+    if (vminor == 0) {
+      version = HVER_1_0;
+    } else if (vminor == 1) {
+      version = HVER_1_1;
+    } else {
+      return HE_PROTOCOL;
+    }
+  } else {
+    return HE_PROTOCOL;
+  }
+  scode = temp_scode;
+  pos = static_cast<size_t>(temp_pos);
+  while ((pos < len) && isspace(static_cast<unsigned char>(line[pos]))) ++pos;
+  message.assign(line + pos, len - pos);
+  return HE_NONE;
+}
+
+//////////////////////////////////////////////////////////////////////
+// Http Authentication
+//////////////////////////////////////////////////////////////////////
+
+std::string quote(const std::string& str) {
+  std::string result;
+  result.push_back('"');
+  for (size_t i=0; i<str.size(); ++i) {
+    if ((str[i] == '"') || (str[i] == '\\'))
+      result.push_back('\\');
+    result.push_back(str[i]);
+  }
+  result.push_back('"');
+  return result;
+}
+
+#if defined(WEBRTC_WIN)
+struct NegotiateAuthContext : public HttpAuthContext {
+  CredHandle cred;
+  CtxtHandle ctx;
+  size_t steps;
+  bool specified_credentials;
+
+  NegotiateAuthContext(const std::string& auth, CredHandle c1, CtxtHandle c2)
+  : HttpAuthContext(auth), cred(c1), ctx(c2), steps(0),
+    specified_credentials(false)
+  { }
+
+  ~NegotiateAuthContext() override {
+    DeleteSecurityContext(&ctx);
+    FreeCredentialsHandle(&cred);
+  }
+};
+#endif // WEBRTC_WIN
+
+HttpAuthResult HttpAuthenticate(
+  const char * challenge, size_t len,
+  const SocketAddress& server,
+  const std::string& method, const std::string& uri,
+  const std::string& username, const CryptString& password,
+  HttpAuthContext *& context, std::string& response, std::string& auth_method)
+{
+  HttpAttributeList args;
+  HttpParseAttributes(challenge, len, args);
+  HttpHasNthAttribute(args, 0, &auth_method, nullptr);
+
+  if (context && (context->auth_method != auth_method))
+    return HAR_IGNORE;
+
+  // BASIC
+  if (_stricmp(auth_method.c_str(), "basic") == 0) {
+    if (context)
+      return HAR_CREDENTIALS; // Bad credentials
+    if (username.empty())
+      return HAR_CREDENTIALS; // Missing credentials
+
+    context = new HttpAuthContext(auth_method);
+
+    // TODO(bugs.webrtc.org/8905): Convert sensitive to a CryptString and also
+    // return response as CryptString so contents get securely deleted
+    // automatically.
+    // std::string decoded = username + ":" + password;
+    size_t len = username.size() + password.GetLength() + 2;
+    char * sensitive = new char[len];
+    size_t pos = strcpyn(sensitive, len, username.data(), username.size());
+    pos += strcpyn(sensitive + pos, len - pos, ":");
+    password.CopyTo(sensitive + pos, true);
+
+    response = auth_method;
+    response.append(" ");
+    // TODO: create a sensitive-source version of Base64::encode
+    response.append(Base64::Encode(sensitive));
+    ExplicitZeroMemory(sensitive, len);
+    delete [] sensitive;
+    return HAR_RESPONSE;
+  }
+
+  // DIGEST
+  if (_stricmp(auth_method.c_str(), "digest") == 0) {
+    if (context)
+      return HAR_CREDENTIALS; // Bad credentials
+    if (username.empty())
+      return HAR_CREDENTIALS; // Missing credentials
+
+    context = new HttpAuthContext(auth_method);
+
+    std::string cnonce, ncount;
+    char buffer[256];
+    sprintf(buffer, "%d", static_cast<int>(time(0)));
+    cnonce = MD5(buffer);
+    ncount = "00000001";
+
+    std::string realm, nonce, qop, opaque;
+    HttpHasAttribute(args, "realm", &realm);
+    HttpHasAttribute(args, "nonce", &nonce);
+    bool has_qop = HttpHasAttribute(args, "qop", &qop);
+    bool has_opaque = HttpHasAttribute(args, "opaque", &opaque);
+
+    // TODO(bugs.webrtc.org/8905): Convert sensitive to a CryptString and also
+    // return response as CryptString so contents get securely deleted
+    // automatically.
+    // std::string A1 = username + ":" + realm + ":" + password;
+    size_t len = username.size() + realm.size() + password.GetLength() + 3;
+    char * sensitive = new char[len];  // A1
+    size_t pos = strcpyn(sensitive, len, username.data(), username.size());
+    pos += strcpyn(sensitive + pos, len - pos, ":");
+    pos += strcpyn(sensitive + pos, len - pos, realm.c_str());
+    pos += strcpyn(sensitive + pos, len - pos, ":");
+    password.CopyTo(sensitive + pos, true);
+
+    std::string A2 = method + ":" + uri;
+    std::string middle;
+    if (has_qop) {
+      qop = "auth";
+      middle = nonce + ":" + ncount + ":" + cnonce + ":" + qop;
+    } else {
+      middle = nonce;
+    }
+    std::string HA1 = MD5(sensitive);
+    ExplicitZeroMemory(sensitive, len);
+    delete [] sensitive;
+    std::string HA2 = MD5(A2);
+    std::string dig_response = MD5(HA1 + ":" + middle + ":" + HA2);
+
+    std::stringstream ss;
+    ss << auth_method;
+    ss << " username=" << quote(username);
+    ss << ", realm=" << quote(realm);
+    ss << ", nonce=" << quote(nonce);
+    ss << ", uri=" << quote(uri);
+    if (has_qop) {
+      ss << ", qop=" << qop;
+      ss << ", nc="  << ncount;
+      ss << ", cnonce=" << quote(cnonce);
+    }
+    ss << ", response=\"" << dig_response << "\"";
+    if (has_opaque) {
+      ss << ", opaque=" << quote(opaque);
+    }
+    response = ss.str();
+    return HAR_RESPONSE;
+  }
+
+#if defined(WEBRTC_WIN)
+#if 1
+  bool want_negotiate = (_stricmp(auth_method.c_str(), "negotiate") == 0);
+  bool want_ntlm = (_stricmp(auth_method.c_str(), "ntlm") == 0);
+  // SPNEGO & NTLM
+  if (want_negotiate || want_ntlm) {
+    const size_t MAX_MESSAGE = 12000, MAX_SPN = 256;
+    char out_buf[MAX_MESSAGE], spn[MAX_SPN];
+
+#if 0 // Requires funky windows versions
+    DWORD len = MAX_SPN;
+    if (DsMakeSpn("HTTP", server.HostAsURIString().c_str(), nullptr,
+                  server.port(),
+                  0, &len, spn) != ERROR_SUCCESS) {
+      RTC_LOG_F(WARNING) << "(Negotiate) - DsMakeSpn failed";
+      return HAR_IGNORE;
+    }
+#else
+    sprintfn(spn, MAX_SPN, "HTTP/%s", server.ToString().c_str());
+#endif
+
+    SecBuffer out_sec;
+    out_sec.pvBuffer   = out_buf;
+    out_sec.cbBuffer   = sizeof(out_buf);
+    out_sec.BufferType = SECBUFFER_TOKEN;
+
+    SecBufferDesc out_buf_desc;
+    out_buf_desc.ulVersion = 0;
+    out_buf_desc.cBuffers  = 1;
+    out_buf_desc.pBuffers  = &out_sec;
+
+    const ULONG NEG_FLAGS_DEFAULT =
+      //ISC_REQ_ALLOCATE_MEMORY
+      ISC_REQ_CONFIDENTIALITY
+      //| ISC_REQ_EXTENDED_ERROR
+      //| ISC_REQ_INTEGRITY
+      | ISC_REQ_REPLAY_DETECT
+      | ISC_REQ_SEQUENCE_DETECT
+      //| ISC_REQ_STREAM
+      //| ISC_REQ_USE_SUPPLIED_CREDS
+      ;
+
+    ::TimeStamp lifetime;
+    SECURITY_STATUS ret = S_OK;
+    ULONG ret_flags = 0, flags = NEG_FLAGS_DEFAULT;
+
+    bool specify_credentials = !username.empty();
+    size_t steps = 0;
+
+    // uint32_t now = Time();
+
+    NegotiateAuthContext * neg = static_cast<NegotiateAuthContext *>(context);
+    if (neg) {
+      const size_t max_steps = 10;
+      if (++neg->steps >= max_steps) {
+        RTC_LOG(WARNING) << "AsyncHttpsProxySocket::Authenticate(Negotiate) "
+                            "too many retries";
+        return HAR_ERROR;
+      }
+      steps = neg->steps;
+
+      std::string challenge, decoded_challenge;
+      if (HttpHasNthAttribute(args, 1, &challenge, nullptr) &&
+          Base64::Decode(challenge, Base64::DO_STRICT, &decoded_challenge,
+                         nullptr)) {
+        SecBuffer in_sec;
+        in_sec.pvBuffer   = const_cast<char *>(decoded_challenge.data());
+        in_sec.cbBuffer   = static_cast<unsigned long>(decoded_challenge.size());
+        in_sec.BufferType = SECBUFFER_TOKEN;
+
+        SecBufferDesc in_buf_desc;
+        in_buf_desc.ulVersion = 0;
+        in_buf_desc.cBuffers  = 1;
+        in_buf_desc.pBuffers  = &in_sec;
+
+        ret = InitializeSecurityContextA(&neg->cred, &neg->ctx, spn, flags, 0, SECURITY_NATIVE_DREP, &in_buf_desc, 0, &neg->ctx, &out_buf_desc, &ret_flags, &lifetime);
+        if (FAILED(ret)) {
+          RTC_LOG(LS_ERROR) << "InitializeSecurityContext returned: "
+                            << GetErrorName(ret, SECURITY_ERRORS);
+          return HAR_ERROR;
+        }
+      } else if (neg->specified_credentials) {
+        // Try again with default credentials
+        specify_credentials = false;
+        delete context;
+        context = neg = 0;
+      } else {
+        return HAR_CREDENTIALS;
+      }
+    }
+
+    if (!neg) {
+      unsigned char userbuf[256], passbuf[256], domainbuf[16];
+      SEC_WINNT_AUTH_IDENTITY_A auth_id, * pauth_id = 0;
+      if (specify_credentials) {
+        memset(&auth_id, 0, sizeof(auth_id));
+        size_t len = password.GetLength()+1;
+        char * sensitive = new char[len];
+        password.CopyTo(sensitive, true);
+        std::string::size_type pos = username.find('\\');
+        if (pos == std::string::npos) {
+          auth_id.UserLength = static_cast<unsigned long>(
+              std::min(sizeof(userbuf) - 1, username.size()));
+          memcpy(userbuf, username.c_str(), auth_id.UserLength);
+          userbuf[auth_id.UserLength] = 0;
+          auth_id.DomainLength = 0;
+          domainbuf[auth_id.DomainLength] = 0;
+          auth_id.PasswordLength = static_cast<unsigned long>(
+              std::min(sizeof(passbuf) - 1, password.GetLength()));
+          memcpy(passbuf, sensitive, auth_id.PasswordLength);
+          passbuf[auth_id.PasswordLength] = 0;
+        } else {
+          auth_id.UserLength = static_cast<unsigned long>(
+              std::min(sizeof(userbuf) - 1, username.size() - pos - 1));
+          memcpy(userbuf, username.c_str() + pos + 1, auth_id.UserLength);
+          userbuf[auth_id.UserLength] = 0;
+          auth_id.DomainLength =
+              static_cast<unsigned long>(std::min(sizeof(domainbuf) - 1, pos));
+          memcpy(domainbuf, username.c_str(), auth_id.DomainLength);
+          domainbuf[auth_id.DomainLength] = 0;
+          auth_id.PasswordLength = static_cast<unsigned long>(
+              std::min(sizeof(passbuf) - 1, password.GetLength()));
+          memcpy(passbuf, sensitive, auth_id.PasswordLength);
+          passbuf[auth_id.PasswordLength] = 0;
+        }
+        ExplicitZeroMemory(sensitive, len);
+        delete [] sensitive;
+        auth_id.User = userbuf;
+        auth_id.Domain = domainbuf;
+        auth_id.Password = passbuf;
+        auth_id.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
+        pauth_id = &auth_id;
+        RTC_LOG(LS_VERBOSE)
+            << "Negotiate protocol: Using specified credentials";
+      } else {
+        RTC_LOG(LS_VERBOSE) << "Negotiate protocol: Using default credentials";
+      }
+
+      CredHandle cred;
+      ret = AcquireCredentialsHandleA(
+          0, const_cast<char*>(want_negotiate ? NEGOSSP_NAME_A : NTLMSP_NAME_A),
+          SECPKG_CRED_OUTBOUND, 0, pauth_id, 0, 0, &cred, &lifetime);
+      if (ret != SEC_E_OK) {
+        RTC_LOG(LS_ERROR) << "AcquireCredentialsHandle error: "
+                          << GetErrorName(ret, SECURITY_ERRORS);
+        return HAR_IGNORE;
+      }
+
+      //CSecBufferBundle<5, CSecBufferBase::FreeSSPI> sb_out;
+
+      CtxtHandle ctx;
+      ret = InitializeSecurityContextA(&cred, 0, spn, flags, 0, SECURITY_NATIVE_DREP, 0, 0, &ctx, &out_buf_desc, &ret_flags, &lifetime);
+      if (FAILED(ret)) {
+        RTC_LOG(LS_ERROR) << "InitializeSecurityContext returned: "
+                          << GetErrorName(ret, SECURITY_ERRORS);
+        FreeCredentialsHandle(&cred);
+        return HAR_IGNORE;
+      }
+
+      RTC_DCHECK(!context);
+      context = neg = new NegotiateAuthContext(auth_method, cred, ctx);
+      neg->specified_credentials = specify_credentials;
+      neg->steps = steps;
+    }
+
+    if ((ret == SEC_I_COMPLETE_NEEDED) || (ret == SEC_I_COMPLETE_AND_CONTINUE)) {
+      ret = CompleteAuthToken(&neg->ctx, &out_buf_desc);
+      RTC_LOG(LS_VERBOSE) << "CompleteAuthToken returned: "
+                          << GetErrorName(ret, SECURITY_ERRORS);
+      if (FAILED(ret)) {
+        return HAR_ERROR;
+      }
+    }
+
+    std::string decoded(out_buf, out_buf + out_sec.cbBuffer);
+    response = auth_method;
+    response.append(" ");
+    response.append(Base64::Encode(decoded));
+    return HAR_RESPONSE;
+  }
+#endif
+#endif // WEBRTC_WIN
+
+  return HAR_IGNORE;
+}
+
+//////////////////////////////////////////////////////////////////////
+
+} // namespace rtc
diff --git a/rtc_base/httpcommon.h b/rtc_base/httpcommon.h
new file mode 100644
index 0000000..4dd1172
--- /dev/null
+++ b/rtc_base/httpcommon.h
@@ -0,0 +1,457 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_HTTPCOMMON_H_
+#define RTC_BASE_HTTPCOMMON_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+#include "rtc_base/basictypes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+class CryptString;
+class SocketAddress;
+
+//////////////////////////////////////////////////////////////////////
+// Constants
+//////////////////////////////////////////////////////////////////////
+
+enum HttpCode {
+  HC_OK = 200,
+  HC_NON_AUTHORITATIVE = 203,
+  HC_NO_CONTENT = 204,
+  HC_PARTIAL_CONTENT = 206,
+
+  HC_MULTIPLE_CHOICES = 300,
+  HC_MOVED_PERMANENTLY = 301,
+  HC_FOUND = 302,
+  HC_SEE_OTHER = 303,
+  HC_NOT_MODIFIED = 304,
+  HC_MOVED_TEMPORARILY = 307,
+
+  HC_BAD_REQUEST = 400,
+  HC_UNAUTHORIZED = 401,
+  HC_FORBIDDEN = 403,
+  HC_NOT_FOUND = 404,
+  HC_PROXY_AUTHENTICATION_REQUIRED = 407,
+  HC_GONE = 410,
+
+  HC_INTERNAL_SERVER_ERROR = 500,
+  HC_NOT_IMPLEMENTED = 501,
+  HC_SERVICE_UNAVAILABLE = 503,
+};
+
+enum HttpVersion {
+  HVER_1_0, HVER_1_1, HVER_UNKNOWN,
+  HVER_LAST = HVER_UNKNOWN
+};
+
+enum HttpVerb {
+  HV_GET, HV_POST, HV_PUT, HV_DELETE, HV_CONNECT, HV_HEAD,
+  HV_LAST = HV_HEAD
+};
+
+enum HttpError {
+  HE_NONE,
+  HE_PROTOCOL,            // Received non-valid HTTP data
+  HE_DISCONNECTED,        // Connection closed unexpectedly
+  HE_OVERFLOW,            // Received too much data for internal buffers
+  HE_CONNECT_FAILED,      // The socket failed to connect.
+  HE_SOCKET_ERROR,        // An error occurred on a connected socket
+  HE_SHUTDOWN,            // Http object is being destroyed
+  HE_OPERATION_CANCELLED, // Connection aborted locally
+  HE_AUTH,                // Proxy Authentication Required
+  HE_CERTIFICATE_EXPIRED, // During SSL negotiation
+  HE_STREAM,              // Problem reading or writing to the document
+  HE_CACHE,               // Problem reading from cache
+  HE_DEFAULT
+};
+
+enum HttpHeader {
+  HH_AGE,
+  HH_CACHE_CONTROL,
+  HH_CONNECTION,
+  HH_CONTENT_DISPOSITION,
+  HH_CONTENT_LENGTH,
+  HH_CONTENT_RANGE,
+  HH_CONTENT_TYPE,
+  HH_COOKIE,
+  HH_DATE,
+  HH_ETAG,
+  HH_EXPIRES,
+  HH_HOST,
+  HH_IF_MODIFIED_SINCE,
+  HH_IF_NONE_MATCH,
+  HH_KEEP_ALIVE,
+  HH_LAST_MODIFIED,
+  HH_LOCATION,
+  HH_PROXY_AUTHENTICATE,
+  HH_PROXY_AUTHORIZATION,
+  HH_PROXY_CONNECTION,
+  HH_RANGE,
+  HH_SET_COOKIE,
+  HH_TE,
+  HH_TRAILERS,
+  HH_TRANSFER_ENCODING,
+  HH_UPGRADE,
+  HH_USER_AGENT,
+  HH_WWW_AUTHENTICATE,
+  HH_LAST = HH_WWW_AUTHENTICATE
+};
+
+const uint16_t HTTP_DEFAULT_PORT = 80;
+const uint16_t HTTP_SECURE_PORT = 443;
+
+//////////////////////////////////////////////////////////////////////
+// Utility Functions
+//////////////////////////////////////////////////////////////////////
+
+inline HttpError mkerr(HttpError err, HttpError def_err = HE_DEFAULT) {
+  return (err != HE_NONE) ? err : def_err;
+}
+
+const char* ToString(HttpVersion version);
+bool FromString(HttpVersion& version, const std::string& str);
+
+const char* ToString(HttpVerb verb);
+bool FromString(HttpVerb& verb, const std::string& str);
+
+const char* ToString(HttpHeader header);
+bool FromString(HttpHeader& header, const std::string& str);
+
+inline bool HttpCodeIsInformational(uint32_t code) {
+  return ((code / 100) == 1);
+}
+inline bool HttpCodeIsSuccessful(uint32_t code) {
+  return ((code / 100) == 2);
+}
+inline bool HttpCodeIsRedirection(uint32_t code) {
+  return ((code / 100) == 3);
+}
+inline bool HttpCodeIsClientError(uint32_t code) {
+  return ((code / 100) == 4);
+}
+inline bool HttpCodeIsServerError(uint32_t code) {
+  return ((code / 100) == 5);
+}
+
+bool HttpCodeHasBody(uint32_t code);
+bool HttpCodeIsCacheable(uint32_t code);
+bool HttpHeaderIsEndToEnd(HttpHeader header);
+bool HttpHeaderIsCollapsible(HttpHeader header);
+
+struct HttpData;
+bool HttpShouldKeepAlive(const HttpData& data);
+
+typedef std::pair<std::string, std::string> HttpAttribute;
+typedef std::vector<HttpAttribute> HttpAttributeList;
+void HttpParseAttributes(const char * data, size_t len,
+                         HttpAttributeList& attributes);
+bool HttpHasAttribute(const HttpAttributeList& attributes,
+                      const std::string& name,
+                      std::string* value);
+bool HttpHasNthAttribute(HttpAttributeList& attributes,
+                         size_t index,
+                         std::string* name,
+                         std::string* value);
+
+// Convert RFC1123 date (DoW, DD Mon YYYY HH:MM:SS TZ) to unix timestamp
+bool HttpDateToSeconds(const std::string& date, time_t* seconds);
+
+inline uint16_t HttpDefaultPort(bool secure) {
+  return secure ? HTTP_SECURE_PORT : HTTP_DEFAULT_PORT;
+}
+
+// Returns the http server notation for a given address
+std::string HttpAddress(const SocketAddress& address, bool secure);
+
+// functional for insensitive std::string compare
+struct iless {
+  bool operator()(const std::string& lhs, const std::string& rhs) const {
+    return (::_stricmp(lhs.c_str(), rhs.c_str()) < 0);
+  }
+};
+
+// put quotes around a string and escape any quotes inside it
+std::string quote(const std::string& str);
+
+//////////////////////////////////////////////////////////////////////
+// Url
+//////////////////////////////////////////////////////////////////////
+
+template<class CTYPE>
+class Url {
+public:
+  typedef typename Traits<CTYPE>::string string;
+
+  // TODO: Implement Encode/Decode
+  static int Encode(const CTYPE* source, CTYPE* destination, size_t len);
+  static int Encode(const string& source, string& destination);
+  static int Decode(const CTYPE* source, CTYPE* destination, size_t len);
+  static int Decode(const string& source, string& destination);
+
+  Url(const string& url) { do_set_url(url.c_str(), url.size()); }
+  Url(const string& path, const string& host, uint16_t port = HTTP_DEFAULT_PORT)
+      : host_(host), port_(port), secure_(HTTP_SECURE_PORT == port) {
+    set_full_path(path);
+  }
+
+  bool valid() const { return !host_.empty(); }
+  void clear() {
+    host_.clear();
+    port_ = HTTP_DEFAULT_PORT;
+    secure_ = false;
+    path_.assign(1, static_cast<CTYPE>('/'));
+    query_.clear();
+  }
+
+  void set_url(const string& val) {
+    do_set_url(val.c_str(), val.size());
+  }
+  string url() const {
+    string val; do_get_url(&val); return val;
+  }
+
+  void set_address(const string& val) {
+    do_set_address(val.c_str(), val.size());
+  }
+  string address() const {
+    string val; do_get_address(&val); return val;
+  }
+
+  void set_full_path(const string& val) {
+    do_set_full_path(val.c_str(), val.size());
+  }
+  string full_path() const {
+    string val; do_get_full_path(&val); return val;
+  }
+
+  void set_host(const string& val) { host_ = val; }
+  const string& host() const { return host_; }
+
+  void set_port(uint16_t val) { port_ = val; }
+  uint16_t port() const { return port_; }
+
+  void set_secure(bool val) { secure_ = val; }
+  bool secure() const { return secure_; }
+
+  void set_path(const string& val) {
+    if (val.empty()) {
+      path_.assign(1, static_cast<CTYPE>('/'));
+    } else {
+      RTC_DCHECK(val[0] == static_cast<CTYPE>('/'));
+      path_ = val;
+    }
+  }
+  const string& path() const { return path_; }
+
+  void set_query(const string& val) {
+    RTC_DCHECK(val.empty() || (val[0] == static_cast<CTYPE>('?')));
+    query_ = val;
+  }
+  const string& query() const { return query_; }
+
+  bool get_attribute(const string& name, string* value) const;
+
+private:
+  void do_set_url(const CTYPE* val, size_t len);
+  void do_set_address(const CTYPE* val, size_t len);
+  void do_set_full_path(const CTYPE* val, size_t len);
+
+  void do_get_url(string* val) const;
+  void do_get_address(string* val) const;
+  void do_get_full_path(string* val) const;
+
+  string host_, path_, query_;
+  uint16_t port_;
+  bool secure_;
+};
+
+//////////////////////////////////////////////////////////////////////
+// HttpData
+//////////////////////////////////////////////////////////////////////
+
+struct HttpData {
+  typedef std::multimap<std::string, std::string, iless> HeaderMap;
+  typedef HeaderMap::const_iterator const_iterator;
+  typedef HeaderMap::iterator iterator;
+
+  HttpVersion version;
+  std::unique_ptr<StreamInterface> document;
+
+  HttpData();
+
+  enum HeaderCombine { HC_YES, HC_NO, HC_AUTO, HC_REPLACE, HC_NEW };
+  void changeHeader(const std::string& name, const std::string& value,
+                    HeaderCombine combine);
+  inline void addHeader(const std::string& name, const std::string& value,
+                        bool append = true) {
+    changeHeader(name, value, append ? HC_AUTO : HC_NO);
+  }
+  inline void setHeader(const std::string& name, const std::string& value,
+                        bool overwrite = true) {
+    changeHeader(name, value, overwrite ? HC_REPLACE : HC_NEW);
+  }
+  // Returns count of erased headers
+  size_t clearHeader(const std::string& name);
+  // Returns iterator to next header
+  iterator clearHeader(iterator header);
+
+  // keep in mind, this may not do what you want in the face of multiple headers
+  bool hasHeader(const std::string& name, std::string* value) const;
+
+  inline const_iterator begin() const {
+    return headers_.begin();
+  }
+  inline const_iterator end() const {
+    return headers_.end();
+  }
+  inline iterator begin() {
+    return headers_.begin();
+  }
+  inline iterator end() {
+    return headers_.end();
+  }
+  inline const_iterator begin(const std::string& name) const {
+    return headers_.lower_bound(name);
+  }
+  inline const_iterator end(const std::string& name) const {
+    return headers_.upper_bound(name);
+  }
+  inline iterator begin(const std::string& name) {
+    return headers_.lower_bound(name);
+  }
+  inline iterator end(const std::string& name) {
+    return headers_.upper_bound(name);
+  }
+
+  // Convenience methods using HttpHeader
+  inline void changeHeader(HttpHeader header, const std::string& value,
+                           HeaderCombine combine) {
+    changeHeader(ToString(header), value, combine);
+  }
+  inline void addHeader(HttpHeader header, const std::string& value,
+                        bool append = true) {
+    addHeader(ToString(header), value, append);
+  }
+  inline void setHeader(HttpHeader header, const std::string& value,
+                        bool overwrite = true) {
+    setHeader(ToString(header), value, overwrite);
+  }
+  inline void clearHeader(HttpHeader header) {
+    clearHeader(ToString(header));
+  }
+  inline bool hasHeader(HttpHeader header, std::string* value) const {
+    return hasHeader(ToString(header), value);
+  }
+  inline const_iterator begin(HttpHeader header) const {
+    return headers_.lower_bound(ToString(header));
+  }
+  inline const_iterator end(HttpHeader header) const {
+    return headers_.upper_bound(ToString(header));
+  }
+  inline iterator begin(HttpHeader header) {
+    return headers_.lower_bound(ToString(header));
+  }
+  inline iterator end(HttpHeader header) {
+    return headers_.upper_bound(ToString(header));
+  }
+
+  void setContent(const std::string& content_type, StreamInterface* document);
+  void setDocumentAndLength(StreamInterface* document);
+
+  virtual size_t formatLeader(char* buffer, size_t size) const = 0;
+  virtual HttpError parseLeader(const char* line, size_t len) = 0;
+
+protected:
+ virtual ~HttpData();
+  void clear(bool release_document);
+  void copy(const HttpData& src);
+
+private:
+  HeaderMap headers_;
+};
+
+struct HttpRequestData : public HttpData {
+  HttpVerb verb;
+  std::string path;
+
+  HttpRequestData() : verb(HV_GET) { }
+
+  void clear(bool release_document);
+  void copy(const HttpRequestData& src);
+
+  size_t formatLeader(char* buffer, size_t size) const override;
+  HttpError parseLeader(const char* line, size_t len) override;
+
+  bool getAbsoluteUri(std::string* uri) const;
+  bool getRelativeUri(std::string* host, std::string* path) const;
+};
+
+struct HttpResponseData : public HttpData {
+  uint32_t scode;
+  std::string message;
+
+  HttpResponseData() : scode(HC_INTERNAL_SERVER_ERROR) { }
+  void clear(bool release_document);
+  void copy(const HttpResponseData& src);
+
+  // Convenience methods
+  void set_success(uint32_t scode = HC_OK);
+  void set_success(const std::string& content_type,
+                   StreamInterface* document,
+                   uint32_t scode = HC_OK);
+  void set_redirect(const std::string& location,
+                    uint32_t scode = HC_MOVED_TEMPORARILY);
+  void set_error(uint32_t scode);
+
+  size_t formatLeader(char* buffer, size_t size) const override;
+  HttpError parseLeader(const char* line, size_t len) override;
+};
+
+struct HttpTransaction {
+  HttpRequestData request;
+  HttpResponseData response;
+};
+
+//////////////////////////////////////////////////////////////////////
+// Http Authentication
+//////////////////////////////////////////////////////////////////////
+
+struct HttpAuthContext {
+  std::string auth_method;
+  HttpAuthContext(const std::string& auth) : auth_method(auth) { }
+  virtual ~HttpAuthContext() { }
+};
+
+enum HttpAuthResult { HAR_RESPONSE, HAR_IGNORE, HAR_CREDENTIALS, HAR_ERROR };
+
+// 'context' is used by this function to record information between calls.
+// Start by passing a null pointer, then pass the same pointer each additional
+// call.  When the authentication attempt is finished, delete the context.
+// TODO(bugs.webrtc.org/8905): Change "response" to "ZeroOnFreeBuffer".
+HttpAuthResult HttpAuthenticate(
+  const char * challenge, size_t len,
+  const SocketAddress& server,
+  const std::string& method, const std::string& uri,
+  const std::string& username, const CryptString& password,
+  HttpAuthContext *& context, std::string& response, std::string& auth_method);
+
+//////////////////////////////////////////////////////////////////////
+
+} // namespace rtc
+
+#endif // RTC_BASE_HTTPCOMMON_H_
diff --git a/rtc_base/httpcommon_unittest.cc b/rtc_base/httpcommon_unittest.cc
new file mode 100644
index 0000000..997d6e3
--- /dev/null
+++ b/rtc_base/httpcommon_unittest.cc
@@ -0,0 +1,165 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/httpcommon.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/httpcommon-inl.h"
+
+namespace rtc {
+
+#define TEST_PROTOCOL "http://"
+#define TEST_HOST "www.google.com"
+#define TEST_PATH "/folder/file.html"
+#define TEST_QUERY "?query=x&attr=y"
+#define TEST_URL TEST_PROTOCOL TEST_HOST TEST_PATH TEST_QUERY
+
+TEST(Url, DecomposesUrls) {
+  Url<char> url(TEST_URL);
+  EXPECT_TRUE(url.valid());
+  EXPECT_FALSE(url.secure());
+  EXPECT_STREQ(TEST_HOST, url.host().c_str());
+  EXPECT_EQ(80, url.port());
+  EXPECT_STREQ(TEST_PATH, url.path().c_str());
+  EXPECT_STREQ(TEST_QUERY, url.query().c_str());
+  EXPECT_STREQ(TEST_HOST, url.address().c_str());
+  EXPECT_STREQ(TEST_PATH TEST_QUERY, url.full_path().c_str());
+  EXPECT_STREQ(TEST_URL, url.url().c_str());
+}
+
+TEST(Url, ComposesUrls) {
+  // Set in constructor
+  Url<char> url(TEST_PATH TEST_QUERY, TEST_HOST, 80);
+  EXPECT_TRUE(url.valid());
+  EXPECT_FALSE(url.secure());
+  EXPECT_STREQ(TEST_HOST, url.host().c_str());
+  EXPECT_EQ(80, url.port());
+  EXPECT_STREQ(TEST_PATH, url.path().c_str());
+  EXPECT_STREQ(TEST_QUERY, url.query().c_str());
+  EXPECT_STREQ(TEST_HOST, url.address().c_str());
+  EXPECT_STREQ(TEST_PATH TEST_QUERY, url.full_path().c_str());
+  EXPECT_STREQ(TEST_URL, url.url().c_str());
+
+  url.clear();
+  EXPECT_FALSE(url.valid());
+  EXPECT_FALSE(url.secure());
+  EXPECT_STREQ("", url.host().c_str());
+  EXPECT_EQ(80, url.port());
+  EXPECT_STREQ("/", url.path().c_str());
+  EXPECT_STREQ("", url.query().c_str());
+
+  // Set component-wise
+  url.set_host(TEST_HOST);
+  url.set_port(80);
+  url.set_path(TEST_PATH);
+  url.set_query(TEST_QUERY);
+  EXPECT_TRUE(url.valid());
+  EXPECT_FALSE(url.secure());
+  EXPECT_STREQ(TEST_HOST, url.host().c_str());
+  EXPECT_EQ(80, url.port());
+  EXPECT_STREQ(TEST_PATH, url.path().c_str());
+  EXPECT_STREQ(TEST_QUERY, url.query().c_str());
+  EXPECT_STREQ(TEST_HOST, url.address().c_str());
+  EXPECT_STREQ(TEST_PATH TEST_QUERY, url.full_path().c_str());
+  EXPECT_STREQ(TEST_URL, url.url().c_str());
+}
+
+TEST(Url, EnsuresNonEmptyPath) {
+  Url<char> url(TEST_PROTOCOL TEST_HOST);
+  EXPECT_TRUE(url.valid());
+  EXPECT_STREQ("/", url.path().c_str());
+
+  url.clear();
+  EXPECT_STREQ("/", url.path().c_str());
+  url.set_path("");
+  EXPECT_STREQ("/", url.path().c_str());
+
+  url.clear();
+  EXPECT_STREQ("/", url.path().c_str());
+  url.set_full_path("");
+  EXPECT_STREQ("/", url.path().c_str());
+}
+
+TEST(Url, GetQueryAttributes) {
+  Url<char> url(TEST_URL);
+  std::string value;
+  EXPECT_TRUE(url.get_attribute("query", &value));
+  EXPECT_STREQ("x", value.c_str());
+  value.clear();
+  EXPECT_TRUE(url.get_attribute("attr", &value));
+  EXPECT_STREQ("y", value.c_str());
+  value.clear();
+  EXPECT_FALSE(url.get_attribute("Query", &value));
+  EXPECT_TRUE(value.empty());
+}
+
+TEST(Url, SkipsUserAndPassword) {
+  Url<char> url("https://mail.google.com:pwd@badsite.com:12345/asdf");
+  EXPECT_TRUE(url.valid());
+  EXPECT_TRUE(url.secure());
+  EXPECT_STREQ("badsite.com", url.host().c_str());
+  EXPECT_EQ(12345, url.port());
+  EXPECT_STREQ("/asdf", url.path().c_str());
+  EXPECT_STREQ("badsite.com:12345", url.address().c_str());
+}
+
+TEST(Url, SkipsUser) {
+  Url<char> url("https://mail.google.com@badsite.com:12345/asdf");
+  EXPECT_TRUE(url.valid());
+  EXPECT_TRUE(url.secure());
+  EXPECT_STREQ("badsite.com", url.host().c_str());
+  EXPECT_EQ(12345, url.port());
+  EXPECT_STREQ("/asdf", url.path().c_str());
+  EXPECT_STREQ("badsite.com:12345", url.address().c_str());
+}
+
+TEST(HttpResponseData, parseLeaderHttp1_0) {
+  static const char kResponseString[] = "HTTP/1.0 200 OK";
+  HttpResponseData response;
+  EXPECT_EQ(HE_NONE, response.parseLeader(kResponseString,
+                                          sizeof(kResponseString) - 1));
+  EXPECT_EQ(HVER_1_0, response.version);
+  EXPECT_EQ(200U, response.scode);
+}
+
+TEST(HttpResponseData, parseLeaderHttp1_1) {
+  static const char kResponseString[] = "HTTP/1.1 200 OK";
+  HttpResponseData response;
+  EXPECT_EQ(HE_NONE, response.parseLeader(kResponseString,
+                                          sizeof(kResponseString) - 1));
+  EXPECT_EQ(HVER_1_1, response.version);
+  EXPECT_EQ(200U, response.scode);
+}
+
+TEST(HttpResponseData, parseLeaderHttpUnknown) {
+  static const char kResponseString[] = "HTTP 200 OK";
+  HttpResponseData response;
+  EXPECT_EQ(HE_NONE, response.parseLeader(kResponseString,
+                                          sizeof(kResponseString) - 1));
+  EXPECT_EQ(HVER_UNKNOWN, response.version);
+  EXPECT_EQ(200U, response.scode);
+}
+
+TEST(HttpResponseData, parseLeaderHttpFailure) {
+  static const char kResponseString[] = "HTTP/1.1 503 Service Unavailable";
+  HttpResponseData response;
+  EXPECT_EQ(HE_NONE, response.parseLeader(kResponseString,
+                                          sizeof(kResponseString) - 1));
+  EXPECT_EQ(HVER_1_1, response.version);
+  EXPECT_EQ(503U, response.scode);
+}
+
+TEST(HttpResponseData, parseLeaderHttpInvalid) {
+  static const char kResponseString[] = "Durrrrr, what's HTTP?";
+  HttpResponseData response;
+  EXPECT_EQ(HE_PROTOCOL, response.parseLeader(kResponseString,
+                                              sizeof(kResponseString) - 1));
+}
+
+} // namespace rtc
diff --git a/rtc_base/httpserver.cc b/rtc_base/httpserver.cc
new file mode 100644
index 0000000..c36b432
--- /dev/null
+++ b/rtc_base/httpserver.cc
@@ -0,0 +1,287 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "rtc_base/httpcommon-inl.h"
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/httpserver.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/socketstream.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// HttpServer
+///////////////////////////////////////////////////////////////////////////////
+
+HttpServer::HttpServer() : next_connection_id_(1), closing_(false) {
+}
+
+HttpServer::~HttpServer() {
+  if (closing_) {
+    RTC_LOG(LS_WARNING) << "HttpServer::CloseAll has not completed";
+  }
+  for (ConnectionMap::iterator it = connections_.begin();
+       it != connections_.end();
+       ++it) {
+    StreamInterface* stream = it->second->EndProcess();
+    delete stream;
+    delete it->second;
+  }
+}
+
+int
+HttpServer::HandleConnection(StreamInterface* stream) {
+  int connection_id = next_connection_id_++;
+  RTC_DCHECK(connection_id != HTTP_INVALID_CONNECTION_ID);
+  Connection* connection = new Connection(connection_id, this);
+  connections_.insert(ConnectionMap::value_type(connection_id, connection));
+  connection->BeginProcess(stream);
+  return connection_id;
+}
+
+void
+HttpServer::Respond(HttpServerTransaction* transaction) {
+  int connection_id = transaction->connection_id();
+  if (Connection* connection = Find(connection_id)) {
+    connection->Respond(transaction);
+  } else {
+    delete transaction;
+    // We may be tempted to SignalHttpComplete, but that implies that a
+    // connection still exists.
+  }
+}
+
+void
+HttpServer::Close(int connection_id, bool force) {
+  if (Connection* connection = Find(connection_id)) {
+    connection->InitiateClose(force);
+  }
+}
+
+void
+HttpServer::CloseAll(bool force) {
+  if (connections_.empty()) {
+    SignalCloseAllComplete(this);
+    return;
+  }
+  closing_ = true;
+  std::list<Connection*> connections;
+  for (ConnectionMap::const_iterator it = connections_.begin();
+       it != connections_.end(); ++it) {
+    connections.push_back(it->second);
+  }
+  for (std::list<Connection*>::const_iterator it = connections.begin();
+      it != connections.end(); ++it) {
+    (*it)->InitiateClose(force);
+  }
+}
+
+HttpServer::Connection*
+HttpServer::Find(int connection_id) {
+  ConnectionMap::iterator it = connections_.find(connection_id);
+  if (it == connections_.end())
+    return nullptr;
+  return it->second;
+}
+
+void
+HttpServer::Remove(int connection_id) {
+  ConnectionMap::iterator it = connections_.find(connection_id);
+  if (it == connections_.end()) {
+    RTC_NOTREACHED();
+    return;
+  }
+  Connection* connection = it->second;
+  connections_.erase(it);
+  SignalConnectionClosed(this, connection_id, connection->EndProcess());
+  delete connection;
+  if (closing_ && connections_.empty()) {
+    closing_ = false;
+    SignalCloseAllComplete(this);
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// HttpServer::Connection
+///////////////////////////////////////////////////////////////////////////////
+
+HttpServer::Connection::Connection(int connection_id, HttpServer* server)
+    : connection_id_(connection_id),
+      server_(server),
+      current_(nullptr),
+      signalling_(false),
+      close_(false) {}
+
+HttpServer::Connection::~Connection() {
+  // It's possible that an object hosted inside this transaction signalled
+  // an event which caused the connection to close.
+  Thread::Current()->Dispose(current_);
+}
+
+void
+HttpServer::Connection::BeginProcess(StreamInterface* stream) {
+  base_.notify(this);
+  base_.attach(stream);
+  current_ = new HttpServerTransaction(connection_id_);
+  if (base_.mode() != HM_CONNECT)
+    base_.recv(&current_->request);
+}
+
+StreamInterface*
+HttpServer::Connection::EndProcess() {
+  base_.notify(nullptr);
+  base_.abort(HE_DISCONNECTED);
+  return base_.detach();
+}
+
+void
+HttpServer::Connection::Respond(HttpServerTransaction* transaction) {
+  RTC_DCHECK(current_ == nullptr);
+  current_ = transaction;
+  if (current_->response.begin() == current_->response.end()) {
+    current_->response.set_error(HC_INTERNAL_SERVER_ERROR);
+  }
+  bool keep_alive = HttpShouldKeepAlive(current_->request);
+  current_->response.setHeader(HH_CONNECTION,
+                               keep_alive ? "Keep-Alive" : "Close",
+                               false);
+  close_ = !HttpShouldKeepAlive(current_->response);
+  base_.send(&current_->response);
+}
+
+void
+HttpServer::Connection::InitiateClose(bool force) {
+  bool request_in_progress = (HM_SEND == base_.mode()) || (nullptr == current_);
+  if (!signalling_ && (force || !request_in_progress)) {
+    server_->Remove(connection_id_);
+  } else {
+    close_ = true;
+  }
+}
+
+//
+// IHttpNotify Implementation
+//
+
+HttpError
+HttpServer::Connection::onHttpHeaderComplete(bool chunked, size_t& data_size) {
+  if (data_size == SIZE_UNKNOWN) {
+    data_size = 0;
+  }
+  RTC_DCHECK(current_ != nullptr);
+  bool custom_document = false;
+  server_->SignalHttpRequestHeader(server_, current_, &custom_document);
+  if (!custom_document) {
+    current_->request.document.reset(new MemoryStream);
+  }
+  return HE_NONE;
+}
+
+void
+HttpServer::Connection::onHttpComplete(HttpMode mode, HttpError err) {
+  if (mode == HM_SEND) {
+    RTC_DCHECK(current_ != nullptr);
+    signalling_ = true;
+    server_->SignalHttpRequestComplete(server_, current_, err);
+    signalling_ = false;
+    if (close_) {
+      // Force a close
+      err = HE_DISCONNECTED;
+    }
+  }
+  if (err != HE_NONE) {
+    server_->Remove(connection_id_);
+  } else if (mode == HM_CONNECT) {
+    base_.recv(&current_->request);
+  } else if (mode == HM_RECV) {
+    RTC_DCHECK(current_ != nullptr);
+    // TODO: do we need this?
+    //request_.document_->rewind();
+    HttpServerTransaction* transaction = current_;
+    current_ = nullptr;
+    server_->SignalHttpRequest(server_, transaction);
+  } else if (mode == HM_SEND) {
+    Thread::Current()->Dispose(current_->response.document.release());
+    current_->request.clear(true);
+    current_->response.clear(true);
+    base_.recv(&current_->request);
+  } else {
+    RTC_NOTREACHED();
+  }
+}
+
+void
+HttpServer::Connection::onHttpClosed(HttpError err) {
+  server_->Remove(connection_id_);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// HttpListenServer
+///////////////////////////////////////////////////////////////////////////////
+
+HttpListenServer::HttpListenServer() {
+  SignalConnectionClosed.connect(this, &HttpListenServer::OnConnectionClosed);
+}
+
+HttpListenServer::~HttpListenServer() {
+}
+
+int HttpListenServer::Listen(const SocketAddress& address) {
+  AsyncSocket* sock =
+      Thread::Current()->socketserver()->CreateAsyncSocket(address.family(),
+                                                           SOCK_STREAM);
+  if (!sock) {
+    return SOCKET_ERROR;
+  }
+  listener_.reset(sock);
+  listener_->SignalReadEvent.connect(this, &HttpListenServer::OnReadEvent);
+  if ((listener_->Bind(address) != SOCKET_ERROR) &&
+      (listener_->Listen(5) != SOCKET_ERROR))
+    return 0;
+  return listener_->GetError();
+}
+
+bool HttpListenServer::GetAddress(SocketAddress* address) const {
+  if (!listener_) {
+    return false;
+  }
+  *address = listener_->GetLocalAddress();
+  return !address->IsNil();
+}
+
+void HttpListenServer::StopListening() {
+  if (listener_) {
+    listener_->Close();
+  }
+}
+
+void HttpListenServer::OnReadEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket == listener_.get());
+  AsyncSocket* incoming = listener_->Accept(nullptr);
+  if (incoming) {
+    StreamInterface* stream = new SocketStream(incoming);
+    HandleConnection(stream);
+  }
+}
+
+void HttpListenServer::OnConnectionClosed(HttpServer* server,
+                                          int connection_id,
+                                          StreamInterface* stream) {
+  Thread::Current()->Dispose(stream);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
diff --git a/rtc_base/httpserver.h b/rtc_base/httpserver.h
new file mode 100644
index 0000000..61442a9
--- /dev/null
+++ b/rtc_base/httpserver.h
@@ -0,0 +1,139 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_HTTPSERVER_H_
+#define RTC_BASE_HTTPSERVER_H_
+
+#include <map>
+#include <memory>
+
+#include "rtc_base/httpbase.h"
+
+namespace rtc {
+
+class AsyncSocket;
+class HttpServer;
+class SocketAddress;
+
+//////////////////////////////////////////////////////////////////////
+// HttpServer
+//////////////////////////////////////////////////////////////////////
+
+const int HTTP_INVALID_CONNECTION_ID = 0;
+
+struct HttpServerTransaction : public HttpTransaction {
+public:
+  HttpServerTransaction(int id) : connection_id_(id) { }
+  int connection_id() const { return connection_id_; }
+
+private:
+  int connection_id_;
+};
+
+class HttpServer {
+public:
+  HttpServer();
+  virtual ~HttpServer();
+
+  int HandleConnection(StreamInterface* stream);
+  // Due to sigslot issues, we can't destroy some streams at an arbitrary time.
+  sigslot::signal3<HttpServer*, int, StreamInterface*> SignalConnectionClosed;
+
+  // This signal occurs when the HTTP request headers have been received, but
+  // before the request body is written to the request document.  By default,
+  // the request document is a MemoryStream.  By handling this signal, the
+  // document can be overridden, in which case the third signal argument should
+  // be set to true.  In the case where the request body should be ignored,
+  // the document can be set to null.  Note that the transaction object is still
+  // owened by the HttpServer at this point.
+  sigslot::signal3<HttpServer*, HttpServerTransaction*, bool*>
+    SignalHttpRequestHeader;
+
+  // An HTTP request has been made, and is available in the transaction object.
+  // Populate the transaction's response, and then return the object via the
+  // Respond method.  Note that during this time, ownership of the transaction
+  // object is transferred, so it may be passed between threads, although
+  // respond must be called on the server's active thread.
+  sigslot::signal2<HttpServer*, HttpServerTransaction*> SignalHttpRequest;
+  void Respond(HttpServerTransaction* transaction);
+
+  // If you want to know when a request completes, listen to this event.
+  sigslot::signal3<HttpServer*, HttpServerTransaction*, int>
+    SignalHttpRequestComplete;
+
+  // Stop processing the connection indicated by connection_id.
+  // Unless force is true, the server will complete sending a response that is
+  // in progress.
+  void Close(int connection_id, bool force);
+  void CloseAll(bool force);
+
+  // After calling CloseAll, this event is signalled to indicate that all
+  // outstanding connections have closed.
+  sigslot::signal1<HttpServer*> SignalCloseAllComplete;
+
+private:
+  class Connection : private IHttpNotify {
+  public:
+    Connection(int connection_id, HttpServer* server);
+    ~Connection() override;
+
+    void BeginProcess(StreamInterface* stream);
+    StreamInterface* EndProcess();
+
+    void Respond(HttpServerTransaction* transaction);
+    void InitiateClose(bool force);
+
+    // IHttpNotify Interface
+    HttpError onHttpHeaderComplete(bool chunked, size_t& data_size) override;
+    void onHttpComplete(HttpMode mode, HttpError err) override;
+    void onHttpClosed(HttpError err) override;
+
+    int connection_id_;
+    HttpServer* server_;
+    HttpBase base_;
+    HttpServerTransaction* current_;
+    bool signalling_, close_;
+  };
+
+  Connection* Find(int connection_id);
+  void Remove(int connection_id);
+
+  friend class Connection;
+  typedef std::map<int,Connection*> ConnectionMap;
+
+  ConnectionMap connections_;
+  int next_connection_id_;
+  bool closing_;
+};
+
+//////////////////////////////////////////////////////////////////////
+
+class HttpListenServer : public HttpServer, public sigslot::has_slots<> {
+public:
+  HttpListenServer();
+  ~HttpListenServer() override;
+
+  int Listen(const SocketAddress& address);
+  bool GetAddress(SocketAddress* address) const;
+  void StopListening();
+
+private:
+  void OnReadEvent(AsyncSocket* socket);
+  void OnConnectionClosed(HttpServer* server, int connection_id,
+                          StreamInterface* stream);
+
+  std::unique_ptr<AsyncSocket> listener_;
+};
+
+//////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif // RTC_BASE_HTTPSERVER_H_
diff --git a/rtc_base/httpserver_unittest.cc b/rtc_base/httpserver_unittest.cc
new file mode 100644
index 0000000..5e86c88
--- /dev/null
+++ b/rtc_base/httpserver_unittest.cc
@@ -0,0 +1,130 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/httpserver.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/testutils.h"
+
+using namespace webrtc::testing;
+
+namespace rtc {
+
+namespace {
+  const char* const kRequest =
+    "GET /index.html HTTP/1.1\r\n"
+    "Host: localhost\r\n"
+    "\r\n";
+
+  struct HttpServerMonitor : public sigslot::has_slots<> {
+    HttpServerTransaction* transaction;
+    bool server_closed, connection_closed;
+
+    HttpServerMonitor(HttpServer* server)
+        : transaction(nullptr), server_closed(false), connection_closed(false) {
+      server->SignalCloseAllComplete.connect(this,
+        &HttpServerMonitor::OnClosed);
+      server->SignalHttpRequest.connect(this, &HttpServerMonitor::OnRequest);
+      server->SignalHttpRequestComplete.connect(this,
+        &HttpServerMonitor::OnRequestComplete);
+      server->SignalConnectionClosed.connect(this,
+        &HttpServerMonitor::OnConnectionClosed);
+    }
+    void OnRequest(HttpServer*, HttpServerTransaction* t) {
+      ASSERT_FALSE(transaction);
+      transaction = t;
+      transaction->response.set_success();
+      transaction->response.setHeader(HH_CONNECTION, "Close");
+    }
+    void OnRequestComplete(HttpServer*, HttpServerTransaction* t, int) {
+      ASSERT_EQ(transaction, t);
+      transaction = nullptr;
+    }
+    void OnClosed(HttpServer*) {
+      server_closed = true;
+    }
+    void OnConnectionClosed(HttpServer*, int, StreamInterface* stream) {
+      connection_closed = true;
+      delete stream;
+    }
+  };
+
+  void CreateClientConnection(HttpServer& server,
+                              HttpServerMonitor& monitor,
+                              bool send_request) {
+    StreamSource* client = new StreamSource;
+    client->SetState(SS_OPEN);
+    server.HandleConnection(client);
+    EXPECT_FALSE(monitor.server_closed);
+    EXPECT_FALSE(monitor.transaction);
+
+    if (send_request) {
+      // Simulate a request
+      client->QueueString(kRequest);
+      EXPECT_FALSE(monitor.server_closed);
+    }
+  }
+}  // anonymous namespace
+
+TEST(HttpServer, DoesNotSignalCloseUnlessCloseAllIsCalled) {
+  HttpServer server;
+  HttpServerMonitor monitor(&server);
+  // Add an active client connection
+  CreateClientConnection(server, monitor, true);
+  // Simulate a response
+  ASSERT_TRUE(nullptr != monitor.transaction);
+  server.Respond(monitor.transaction);
+  EXPECT_FALSE(monitor.transaction);
+  // Connection has closed, but no server close signal
+  EXPECT_FALSE(monitor.server_closed);
+  EXPECT_TRUE(monitor.connection_closed);
+}
+
+TEST(HttpServer, SignalsCloseWhenNoConnectionsAreActive) {
+  HttpServer server;
+  HttpServerMonitor monitor(&server);
+  // Add an idle client connection
+  CreateClientConnection(server, monitor, false);
+  // Perform graceful close
+  server.CloseAll(false);
+  // Connections have all closed
+  EXPECT_TRUE(monitor.server_closed);
+  EXPECT_TRUE(monitor.connection_closed);
+}
+
+TEST(HttpServer, SignalsCloseAfterGracefulCloseAll) {
+  HttpServer server;
+  HttpServerMonitor monitor(&server);
+  // Add an active client connection
+  CreateClientConnection(server, monitor, true);
+  // Initiate a graceful close
+  server.CloseAll(false);
+  EXPECT_FALSE(monitor.server_closed);
+  // Simulate a response
+  ASSERT_TRUE(nullptr != monitor.transaction);
+  server.Respond(monitor.transaction);
+  EXPECT_FALSE(monitor.transaction);
+  // Connections have all closed
+  EXPECT_TRUE(monitor.server_closed);
+  EXPECT_TRUE(monitor.connection_closed);
+}
+
+TEST(HttpServer, SignalsCloseAfterForcedCloseAll) {
+  HttpServer server;
+  HttpServerMonitor monitor(&server);
+  // Add an active client connection
+  CreateClientConnection(server, monitor, true);
+  // Initiate a forceful close
+  server.CloseAll(true);
+  // Connections have all closed
+  EXPECT_TRUE(monitor.server_closed);
+  EXPECT_TRUE(monitor.connection_closed);
+}
+
+} // namespace rtc
diff --git a/rtc_base/ifaddrs-android.cc b/rtc_base/ifaddrs-android.cc
new file mode 100644
index 0000000..85a4497
--- /dev/null
+++ b/rtc_base/ifaddrs-android.cc
@@ -0,0 +1,223 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_ANDROID)
+#include "rtc_base/ifaddrs-android.h"
+#include <errno.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+namespace {
+
+struct netlinkrequest {
+  nlmsghdr header;
+  ifaddrmsg msg;
+};
+
+const int kMaxReadSize = 4096;
+
+}  // namespace
+
+namespace rtc {
+
+int set_ifname(struct ifaddrs* ifaddr, int interface) {
+  char buf[IFNAMSIZ] = {0};
+  char* name = if_indextoname(interface, buf);
+  if (name == nullptr) {
+    return -1;
+  }
+  ifaddr->ifa_name = new char[strlen(name) + 1];
+  strncpy(ifaddr->ifa_name, name, strlen(name) + 1);
+  return 0;
+}
+
+int set_flags(struct ifaddrs* ifaddr) {
+  int fd = socket(AF_INET, SOCK_DGRAM, 0);
+  if (fd == -1) {
+    return -1;
+  }
+  ifreq ifr;
+  memset(&ifr, 0, sizeof(ifr));
+  strncpy(ifr.ifr_name, ifaddr->ifa_name, IFNAMSIZ - 1);
+  int rc = ioctl(fd, SIOCGIFFLAGS, &ifr);
+  close(fd);
+  if (rc == -1) {
+    return -1;
+  }
+  ifaddr->ifa_flags = ifr.ifr_flags;
+  return 0;
+}
+
+int set_addresses(struct ifaddrs* ifaddr, ifaddrmsg* msg, void* data,
+                  size_t len) {
+  if (msg->ifa_family == AF_INET) {
+    sockaddr_in* sa = new sockaddr_in;
+    sa->sin_family = AF_INET;
+    memcpy(&sa->sin_addr, data, len);
+    ifaddr->ifa_addr = reinterpret_cast<sockaddr*>(sa);
+  } else if (msg->ifa_family == AF_INET6) {
+    sockaddr_in6* sa = new sockaddr_in6;
+    sa->sin6_family = AF_INET6;
+    sa->sin6_scope_id = msg->ifa_index;
+    memcpy(&sa->sin6_addr, data, len);
+    ifaddr->ifa_addr = reinterpret_cast<sockaddr*>(sa);
+  } else {
+    return -1;
+  }
+  return 0;
+}
+
+int make_prefixes(struct ifaddrs* ifaddr, int family, int prefixlen) {
+  char* prefix = nullptr;
+  if (family == AF_INET) {
+    sockaddr_in* mask = new sockaddr_in;
+    mask->sin_family = AF_INET;
+    memset(&mask->sin_addr, 0, sizeof(in_addr));
+    ifaddr->ifa_netmask = reinterpret_cast<sockaddr*>(mask);
+    if (prefixlen > 32) {
+      prefixlen = 32;
+    }
+    prefix = reinterpret_cast<char*>(&mask->sin_addr);
+  } else if (family == AF_INET6) {
+    sockaddr_in6* mask = new sockaddr_in6;
+    mask->sin6_family = AF_INET6;
+    memset(&mask->sin6_addr, 0, sizeof(in6_addr));
+    ifaddr->ifa_netmask = reinterpret_cast<sockaddr*>(mask);
+    if (prefixlen > 128) {
+      prefixlen = 128;
+    }
+    prefix = reinterpret_cast<char*>(&mask->sin6_addr);
+  } else {
+    return -1;
+  }
+  for (int i = 0; i < (prefixlen / 8); i++) {
+    *prefix++ = 0xFF;
+  }
+  char remainder = 0xff;
+  remainder <<= (8 - prefixlen % 8);
+  *prefix = remainder;
+  return 0;
+}
+
+int populate_ifaddrs(struct ifaddrs* ifaddr, ifaddrmsg* msg, void* bytes,
+                     size_t len) {
+  if (set_ifname(ifaddr, msg->ifa_index) != 0) {
+    return -1;
+  }
+  if (set_flags(ifaddr) != 0) {
+    return -1;
+  }
+  if (set_addresses(ifaddr, msg, bytes, len) != 0) {
+    return -1;
+  }
+  if (make_prefixes(ifaddr, msg->ifa_family, msg->ifa_prefixlen) != 0) {
+    return -1;
+  }
+  return 0;
+}
+
+int getifaddrs(struct ifaddrs** result) {
+  int fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+  if (fd < 0) {
+    return -1;
+  }
+
+  netlinkrequest ifaddr_request;
+  memset(&ifaddr_request, 0, sizeof(ifaddr_request));
+  ifaddr_request.header.nlmsg_flags = NLM_F_ROOT | NLM_F_REQUEST;
+  ifaddr_request.header.nlmsg_type = RTM_GETADDR;
+  ifaddr_request.header.nlmsg_len = NLMSG_LENGTH(sizeof(ifaddrmsg));
+
+  ssize_t count = send(fd, &ifaddr_request, ifaddr_request.header.nlmsg_len, 0);
+  if (static_cast<size_t>(count) != ifaddr_request.header.nlmsg_len) {
+    close(fd);
+    return -1;
+  }
+  struct ifaddrs* start = nullptr;
+  struct ifaddrs* current = nullptr;
+  char buf[kMaxReadSize];
+  ssize_t amount_read = recv(fd, &buf, kMaxReadSize, 0);
+  while (amount_read > 0) {
+    nlmsghdr* header = reinterpret_cast<nlmsghdr*>(&buf[0]);
+    size_t header_size = static_cast<size_t>(amount_read);
+    for ( ; NLMSG_OK(header, header_size);
+          header = NLMSG_NEXT(header, header_size)) {
+      switch (header->nlmsg_type) {
+        case NLMSG_DONE:
+          // Success. Return.
+          *result = start;
+          close(fd);
+          return 0;
+        case NLMSG_ERROR:
+          close(fd);
+          freeifaddrs(start);
+          return -1;
+        case RTM_NEWADDR: {
+          ifaddrmsg* address_msg =
+              reinterpret_cast<ifaddrmsg*>(NLMSG_DATA(header));
+          rtattr* rta = IFA_RTA(address_msg);
+          ssize_t payload_len = IFA_PAYLOAD(header);
+          while (RTA_OK(rta, payload_len)) {
+            if (rta->rta_type == IFA_ADDRESS) {
+              int family = address_msg->ifa_family;
+              if (family == AF_INET || family == AF_INET6) {
+                ifaddrs* newest = new ifaddrs;
+                memset(newest, 0, sizeof(ifaddrs));
+                if (current) {
+                  current->ifa_next = newest;
+                } else {
+                  start = newest;
+                }
+                if (populate_ifaddrs(newest, address_msg, RTA_DATA(rta),
+                                     RTA_PAYLOAD(rta)) != 0) {
+                  freeifaddrs(start);
+                  *result = nullptr;
+                  return -1;
+                }
+                current = newest;
+              }
+            }
+            rta = RTA_NEXT(rta, payload_len);
+          }
+          break;
+        }
+      }
+    }
+    amount_read = recv(fd, &buf, kMaxReadSize, 0);
+  }
+  close(fd);
+  freeifaddrs(start);
+  return -1;
+}
+
+void freeifaddrs(struct ifaddrs* addrs) {
+  struct ifaddrs* last = nullptr;
+  struct ifaddrs* cursor = addrs;
+  while (cursor) {
+    delete[] cursor->ifa_name;
+    delete cursor->ifa_addr;
+    delete cursor->ifa_netmask;
+    last = cursor;
+    cursor = cursor->ifa_next;
+    delete last;
+  }
+}
+
+}  // namespace rtc
+#endif  // defined(WEBRTC_ANDROID)
diff --git a/rtc_base/ifaddrs-android.h b/rtc_base/ifaddrs-android.h
new file mode 100644
index 0000000..82b4cb3
--- /dev/null
+++ b/rtc_base/ifaddrs-android.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright 2013 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_IFADDRS_ANDROID_H_
+#define RTC_BASE_IFADDRS_ANDROID_H_
+
+#include <stdio.h>
+#include <sys/socket.h>
+
+
+// Implementation of getifaddrs for Android.
+// Fills out a list of ifaddr structs (see below) which contain information
+// about every network interface available on the host.
+// See 'man getifaddrs' on Linux or OS X (nb: it is not a POSIX function).
+struct ifaddrs {
+  struct ifaddrs* ifa_next;
+  char* ifa_name;
+  unsigned int ifa_flags;
+  struct sockaddr* ifa_addr;
+  struct sockaddr* ifa_netmask;
+  // Real ifaddrs has broadcast, point to point and data members.
+  // We don't need them (yet?).
+};
+
+namespace rtc {
+
+int getifaddrs(struct ifaddrs** result);
+void freeifaddrs(struct ifaddrs* addrs);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_IFADDRS_ANDROID_H_
diff --git a/rtc_base/ifaddrs_converter.cc b/rtc_base/ifaddrs_converter.cc
new file mode 100644
index 0000000..2db99ef
--- /dev/null
+++ b/rtc_base/ifaddrs_converter.cc
@@ -0,0 +1,60 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/ifaddrs_converter.h"
+
+namespace rtc {
+
+IfAddrsConverter::IfAddrsConverter() {}
+
+IfAddrsConverter::~IfAddrsConverter() {}
+
+bool IfAddrsConverter::ConvertIfAddrsToIPAddress(
+    const struct ifaddrs* interface,
+    InterfaceAddress* ip,
+    IPAddress* mask) {
+  switch (interface->ifa_addr->sa_family) {
+    case AF_INET: {
+      *ip = IPAddress(
+          reinterpret_cast<sockaddr_in*>(interface->ifa_addr)->sin_addr);
+      *mask = IPAddress(
+          reinterpret_cast<sockaddr_in*>(interface->ifa_netmask)->sin_addr);
+      return true;
+    }
+    case AF_INET6: {
+      int ip_attributes = IPV6_ADDRESS_FLAG_NONE;
+      if (!ConvertNativeAttributesToIPAttributes(interface, &ip_attributes)) {
+        return false;
+      }
+      *ip = InterfaceAddress(
+          reinterpret_cast<sockaddr_in6*>(interface->ifa_addr)->sin6_addr,
+          ip_attributes);
+      *mask = IPAddress(
+          reinterpret_cast<sockaddr_in6*>(interface->ifa_netmask)->sin6_addr);
+      return true;
+    }
+    default: { return false; }
+  }
+}
+
+bool IfAddrsConverter::ConvertNativeAttributesToIPAttributes(
+    const struct ifaddrs* interface,
+    int* ip_attributes) {
+  *ip_attributes = IPV6_ADDRESS_FLAG_NONE;
+  return true;
+}
+
+#if !defined(WEBRTC_MAC)
+// For MAC and IOS, it's defined in macifaddrs_converter.cc
+IfAddrsConverter* CreateIfAddrsConverter() {
+  return new IfAddrsConverter();
+}
+#endif
+}  // namespace rtc
diff --git a/rtc_base/ifaddrs_converter.h b/rtc_base/ifaddrs_converter.h
new file mode 100644
index 0000000..35bef5b
--- /dev/null
+++ b/rtc_base/ifaddrs_converter.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_IFADDRS_CONVERTER_H_
+#define RTC_BASE_IFADDRS_CONVERTER_H_
+
+#if defined(WEBRTC_ANDROID)
+#include "rtc_base/ifaddrs-android.h"
+#else
+#include <ifaddrs.h>
+#endif  // WEBRTC_ANDROID
+
+#include "rtc_base/ipaddress.h"
+
+namespace rtc {
+
+// This class converts native interface addresses to our internal IPAddress
+// class. Subclasses should override ConvertNativeToIPAttributes to implement
+// the different ways of retrieving IPv6 attributes for various POSIX platforms.
+class IfAddrsConverter {
+ public:
+  IfAddrsConverter();
+  virtual ~IfAddrsConverter();
+  virtual bool ConvertIfAddrsToIPAddress(const struct ifaddrs* interface,
+                                         InterfaceAddress* ipaddress,
+                                         IPAddress* mask);
+
+ protected:
+  virtual bool ConvertNativeAttributesToIPAttributes(
+      const struct ifaddrs* interface,
+      int* ip_attributes);
+};
+
+IfAddrsConverter* CreateIfAddrsConverter();
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_IFADDRS_CONVERTER_H_
diff --git a/rtc_base/ignore_wundef.h b/rtc_base/ignore_wundef.h
new file mode 100644
index 0000000..1564096
--- /dev/null
+++ b/rtc_base/ignore_wundef.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_IGNORE_WUNDEF_H_
+#define RTC_BASE_IGNORE_WUNDEF_H_
+
+// If a header file uses #if on possibly undefined macros (and it's for some
+// reason not possible to just fix the header file), include it like this:
+//
+//   RTC_PUSH_IGNORING_WUNDEF()
+//   #include "misbehaving_header.h"
+//   RTC_POP_IGNORING_WUNDEF()
+//
+// This will cause the compiler to not emit -Wundef warnings for that file.
+
+#ifdef __clang__
+#define RTC_PUSH_IGNORING_WUNDEF() \
+  _Pragma("clang diagnostic push") \
+      _Pragma("clang diagnostic ignored \"-Wundef\"")
+#define RTC_POP_IGNORING_WUNDEF() _Pragma("clang diagnostic pop")
+#else
+#define RTC_PUSH_IGNORING_WUNDEF()
+#define RTC_POP_IGNORING_WUNDEF()
+#endif  // __clang__
+
+#endif  // RTC_BASE_IGNORE_WUNDEF_H_
diff --git a/rtc_base/ipaddress.cc b/rtc_base/ipaddress.cc
new file mode 100644
index 0000000..d441f07
--- /dev/null
+++ b/rtc_base/ipaddress.cc
@@ -0,0 +1,565 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_POSIX)
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifdef OPENBSD
+#include <netinet/in_systm.h>
+#endif
+#ifndef __native_client__
+#include <netinet/ip.h>
+#endif
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <unistd.h>
+#endif
+
+#include <stdio.h>
+
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/ipaddress.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/stringutils.h"
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#endif  // WEBRTC_WIN
+
+namespace rtc {
+
+// Prefixes used for categorizing IPv6 addresses.
+static const in6_addr kV4MappedPrefix = {{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                           0xFF, 0xFF, 0}}};
+static const in6_addr k6To4Prefix = {{{0x20, 0x02, 0}}};
+static const in6_addr kTeredoPrefix = {{{0x20, 0x01, 0x00, 0x00}}};
+static const in6_addr kV4CompatibilityPrefix = {{{0}}};
+static const in6_addr k6BonePrefix = {{{0x3f, 0xfe, 0}}};
+static const in6_addr kPrivateNetworkPrefix = {{{0xFD}}};
+
+static bool IPIsHelper(const IPAddress& ip,
+                       const in6_addr& tomatch, int length);
+static in_addr ExtractMappedAddress(const in6_addr& addr);
+
+uint32_t IPAddress::v4AddressAsHostOrderInteger() const {
+  if (family_ == AF_INET) {
+    return NetworkToHost32(u_.ip4.s_addr);
+  } else {
+    return 0;
+  }
+}
+
+bool IPAddress::IsNil() const {
+  return IPIsUnspec(*this);
+}
+
+size_t IPAddress::Size() const {
+  switch (family_) {
+    case AF_INET:
+      return sizeof(in_addr);
+    case AF_INET6:
+      return sizeof(in6_addr);
+  }
+  return 0;
+}
+
+
+bool IPAddress::operator==(const IPAddress &other) const {
+  if (family_ != other.family_) {
+    return false;
+  }
+  if (family_ == AF_INET) {
+    return memcmp(&u_.ip4, &other.u_.ip4, sizeof(u_.ip4)) == 0;
+  }
+  if (family_ == AF_INET6) {
+    return memcmp(&u_.ip6, &other.u_.ip6, sizeof(u_.ip6)) == 0;
+  }
+  return family_ == AF_UNSPEC;
+}
+
+bool IPAddress::operator!=(const IPAddress &other) const {
+  return !((*this) == other);
+}
+
+bool IPAddress::operator >(const IPAddress &other) const {
+  return (*this) != other && !((*this) < other);
+}
+
+bool IPAddress::operator <(const IPAddress &other) const {
+  // IPv4 is 'less than' IPv6
+  if (family_ != other.family_) {
+    if (family_ == AF_UNSPEC) {
+      return true;
+    }
+    if (family_ == AF_INET && other.family_ == AF_INET6) {
+      return true;
+    }
+    return false;
+  }
+  // Comparing addresses of the same family.
+  switch (family_) {
+    case AF_INET: {
+      return NetworkToHost32(u_.ip4.s_addr) <
+          NetworkToHost32(other.u_.ip4.s_addr);
+    }
+    case AF_INET6: {
+      return memcmp(&u_.ip6.s6_addr, &other.u_.ip6.s6_addr, 16) < 0;
+    }
+  }
+  // Catches AF_UNSPEC and invalid addresses.
+  return false;
+}
+
+std::ostream& operator<<(std::ostream& os, const IPAddress& ip) {
+  os << ip.ToString();
+  return os;
+}
+
+in6_addr IPAddress::ipv6_address() const {
+  return u_.ip6;
+}
+
+in_addr IPAddress::ipv4_address() const {
+  return u_.ip4;
+}
+
+std::string IPAddress::ToString() const {
+  if (family_ != AF_INET && family_ != AF_INET6) {
+    return std::string();
+  }
+  char buf[INET6_ADDRSTRLEN] = {0};
+  const void* src = &u_.ip4;
+  if (family_ == AF_INET6) {
+    src = &u_.ip6;
+  }
+  if (!rtc::inet_ntop(family_, src, buf, sizeof(buf))) {
+    return std::string();
+  }
+  return std::string(buf);
+}
+
+std::string IPAddress::ToSensitiveString() const {
+#if !defined(NDEBUG)
+  // Return non-stripped in debug.
+  return ToString();
+#else
+  switch (family_) {
+    case AF_INET: {
+      std::string address = ToString();
+      size_t find_pos = address.rfind('.');
+      if (find_pos == std::string::npos)
+        return std::string();
+      address.resize(find_pos);
+      address += ".x";
+      return address;
+    }
+    case AF_INET6: {
+      std::string result;
+      result.resize(INET6_ADDRSTRLEN);
+      in6_addr addr = ipv6_address();
+      size_t len =
+          rtc::sprintfn(&(result[0]), result.size(), "%x:%x:%x:x:x:x:x:x",
+                        (addr.s6_addr[0] << 8) + addr.s6_addr[1],
+                        (addr.s6_addr[2] << 8) + addr.s6_addr[3],
+                        (addr.s6_addr[4] << 8) + addr.s6_addr[5]);
+      result.resize(len);
+      return result;
+    }
+  }
+  return std::string();
+#endif
+}
+
+IPAddress IPAddress::Normalized() const {
+  if (family_ != AF_INET6) {
+    return *this;
+  }
+  if (!IPIsV4Mapped(*this)) {
+    return *this;
+  }
+  in_addr addr = ExtractMappedAddress(u_.ip6);
+  return IPAddress(addr);
+}
+
+IPAddress IPAddress::AsIPv6Address() const {
+  if (family_ != AF_INET) {
+    return *this;
+  }
+  in6_addr v6addr = kV4MappedPrefix;
+  ::memcpy(&v6addr.s6_addr[12], &u_.ip4.s_addr, sizeof(u_.ip4.s_addr));
+  return IPAddress(v6addr);
+}
+
+bool InterfaceAddress::operator==(const InterfaceAddress &other) const {
+  return ipv6_flags_ == other.ipv6_flags() &&
+    static_cast<const IPAddress&>(*this) == other;
+}
+
+bool InterfaceAddress::operator!=(const InterfaceAddress &other) const {
+  return !((*this) == other);
+}
+
+const InterfaceAddress& InterfaceAddress::operator=(
+  const InterfaceAddress& other) {
+  ipv6_flags_ = other.ipv6_flags_;
+  static_cast<IPAddress&>(*this) = other;
+  return *this;
+}
+
+std::ostream& operator<<(std::ostream& os, const InterfaceAddress& ip) {
+  os << static_cast<const IPAddress&>(ip);
+
+  if (ip.family() == AF_INET6)
+    os << "|flags:0x" << std::hex << ip.ipv6_flags();
+
+  return os;
+}
+
+static bool IPIsPrivateNetworkV4(const IPAddress& ip) {
+  uint32_t ip_in_host_order = ip.v4AddressAsHostOrderInteger();
+  return ((ip_in_host_order >> 24) == 10) ||
+      ((ip_in_host_order >> 20) == ((172 << 4) | 1)) ||
+      ((ip_in_host_order >> 16) == ((192 << 8) | 168));
+}
+
+static bool IPIsPrivateNetworkV6(const IPAddress& ip) {
+  return IPIsHelper(ip, kPrivateNetworkPrefix, 8);
+}
+
+bool IPIsPrivateNetwork(const IPAddress& ip) {
+  switch (ip.family()) {
+    case AF_INET: {
+      return IPIsPrivateNetworkV4(ip);
+    }
+    case AF_INET6: {
+      return IPIsPrivateNetworkV6(ip);
+    }
+  }
+  return false;
+}
+
+in_addr ExtractMappedAddress(const in6_addr& in6) {
+  in_addr ipv4;
+  ::memcpy(&ipv4.s_addr, &in6.s6_addr[12], sizeof(ipv4.s_addr));
+  return ipv4;
+}
+
+bool IPFromAddrInfo(struct addrinfo* info, IPAddress* out) {
+  if (!info || !info->ai_addr) {
+    return false;
+  }
+  if (info->ai_addr->sa_family == AF_INET) {
+    sockaddr_in* addr = reinterpret_cast<sockaddr_in*>(info->ai_addr);
+    *out = IPAddress(addr->sin_addr);
+    return true;
+  } else if (info->ai_addr->sa_family == AF_INET6) {
+    sockaddr_in6* addr = reinterpret_cast<sockaddr_in6*>(info->ai_addr);
+    *out = IPAddress(addr->sin6_addr);
+    return true;
+  }
+  return false;
+}
+
+bool IPFromString(const std::string& str, IPAddress* out) {
+  if (!out) {
+    return false;
+  }
+  in_addr addr;
+  if (rtc::inet_pton(AF_INET, str.c_str(), &addr) == 0) {
+    in6_addr addr6;
+    if (rtc::inet_pton(AF_INET6, str.c_str(), &addr6) == 0) {
+      *out = IPAddress();
+      return false;
+    }
+    *out = IPAddress(addr6);
+  } else {
+    *out = IPAddress(addr);
+  }
+  return true;
+}
+
+bool IPFromString(const std::string& str, int flags,
+                  InterfaceAddress* out) {
+  IPAddress ip;
+  if (!IPFromString(str, &ip)) {
+    return false;
+  }
+
+  *out = InterfaceAddress(ip, flags);
+  return true;
+}
+
+bool IPIsAny(const IPAddress& ip) {
+  switch (ip.family()) {
+    case AF_INET:
+      return ip == IPAddress(INADDR_ANY);
+    case AF_INET6:
+      return ip == IPAddress(in6addr_any) || ip == IPAddress(kV4MappedPrefix);
+    case AF_UNSPEC:
+      return false;
+  }
+  return false;
+}
+
+static bool IPIsLoopbackV4(const IPAddress& ip) {
+  uint32_t ip_in_host_order = ip.v4AddressAsHostOrderInteger();
+  return ((ip_in_host_order >> 24) == 127);
+}
+
+static bool IPIsLoopbackV6(const IPAddress& ip) {
+  return ip == IPAddress(in6addr_loopback);
+}
+
+bool IPIsLoopback(const IPAddress& ip) {
+  switch (ip.family()) {
+    case AF_INET: {
+      return IPIsLoopbackV4(ip);
+    }
+    case AF_INET6: {
+      return IPIsLoopbackV6(ip);
+    }
+  }
+  return false;
+}
+
+bool IPIsPrivate(const IPAddress& ip) {
+  return IPIsLinkLocal(ip) || IPIsLoopback(ip) || IPIsPrivateNetwork(ip);
+}
+
+bool IPIsUnspec(const IPAddress& ip) {
+  return ip.family() == AF_UNSPEC;
+}
+
+size_t HashIP(const IPAddress& ip) {
+  switch (ip.family()) {
+    case AF_INET: {
+      return ip.ipv4_address().s_addr;
+    }
+    case AF_INET6: {
+      in6_addr v6addr = ip.ipv6_address();
+      const uint32_t* v6_as_ints =
+          reinterpret_cast<const uint32_t*>(&v6addr.s6_addr);
+      return v6_as_ints[0] ^ v6_as_ints[1] ^ v6_as_ints[2] ^ v6_as_ints[3];
+    }
+  }
+  return 0;
+}
+
+IPAddress TruncateIP(const IPAddress& ip, int length) {
+  if (length < 0) {
+    return IPAddress();
+  }
+  if (ip.family() == AF_INET) {
+    if (length > 31) {
+      return ip;
+    }
+    if (length == 0) {
+      return IPAddress(INADDR_ANY);
+    }
+    int mask = (0xFFFFFFFF << (32 - length));
+    uint32_t host_order_ip = NetworkToHost32(ip.ipv4_address().s_addr);
+    in_addr masked;
+    masked.s_addr = HostToNetwork32(host_order_ip & mask);
+    return IPAddress(masked);
+  } else if (ip.family() == AF_INET6) {
+    if (length > 127) {
+      return ip;
+    }
+    if (length == 0) {
+      return IPAddress(in6addr_any);
+    }
+    in6_addr v6addr = ip.ipv6_address();
+    int position = length / 32;
+    int inner_length = 32 - (length - (position * 32));
+    // Note: 64bit mask constant needed to allow possible 32-bit left shift.
+    uint32_t inner_mask = 0xFFFFFFFFLL << inner_length;
+    uint32_t* v6_as_ints = reinterpret_cast<uint32_t*>(&v6addr.s6_addr);
+    for (int i = 0; i < 4; ++i) {
+      if (i == position) {
+        uint32_t host_order_inner = NetworkToHost32(v6_as_ints[i]);
+        v6_as_ints[i] = HostToNetwork32(host_order_inner & inner_mask);
+      } else if (i > position) {
+        v6_as_ints[i] = 0;
+      }
+    }
+    return IPAddress(v6addr);
+  }
+  return IPAddress();
+}
+
+int CountIPMaskBits(IPAddress mask) {
+  uint32_t word_to_count = 0;
+  int bits = 0;
+  switch (mask.family()) {
+    case AF_INET: {
+      word_to_count = NetworkToHost32(mask.ipv4_address().s_addr);
+      break;
+    }
+    case AF_INET6: {
+      in6_addr v6addr = mask.ipv6_address();
+      const uint32_t* v6_as_ints =
+          reinterpret_cast<const uint32_t*>(&v6addr.s6_addr);
+      int i = 0;
+      for (; i < 4; ++i) {
+        if (v6_as_ints[i] != 0xFFFFFFFF) {
+          break;
+        }
+      }
+      if (i < 4) {
+        word_to_count = NetworkToHost32(v6_as_ints[i]);
+      }
+      bits = (i * 32);
+      break;
+    }
+    default: {
+      return 0;
+    }
+  }
+  if (word_to_count == 0) {
+    return bits;
+  }
+
+  // Public domain bit-twiddling hack from:
+  // http://graphics.stanford.edu/~seander/bithacks.html
+  // Counts the trailing 0s in the word.
+  unsigned int zeroes = 32;
+  // This could also be written word_to_count &= -word_to_count, but
+  // MSVC emits warning C4146 when negating an unsigned number.
+  word_to_count &= ~word_to_count + 1;  // Isolate lowest set bit.
+  if (word_to_count) zeroes--;
+  if (word_to_count & 0x0000FFFF) zeroes -= 16;
+  if (word_to_count & 0x00FF00FF) zeroes -= 8;
+  if (word_to_count & 0x0F0F0F0F) zeroes -= 4;
+  if (word_to_count & 0x33333333) zeroes -= 2;
+  if (word_to_count & 0x55555555) zeroes -= 1;
+
+  return bits + (32 - zeroes);
+}
+
+bool IPIsHelper(const IPAddress& ip, const in6_addr& tomatch, int length) {
+  // Helper method for checking IP prefix matches (but only on whole byte
+  // lengths). Length is in bits.
+  in6_addr addr = ip.ipv6_address();
+  return ::memcmp(&addr, &tomatch, (length >> 3)) == 0;
+}
+
+bool IPIs6Bone(const IPAddress& ip) {
+  return IPIsHelper(ip, k6BonePrefix, 16);
+}
+
+bool IPIs6To4(const IPAddress& ip) {
+  return IPIsHelper(ip, k6To4Prefix, 16);
+}
+
+static bool IPIsLinkLocalV4(const IPAddress& ip) {
+  uint32_t ip_in_host_order = ip.v4AddressAsHostOrderInteger();
+  return ((ip_in_host_order >> 16) == ((169 << 8) | 254));
+}
+
+static bool IPIsLinkLocalV6(const IPAddress& ip) {
+  // Can't use the helper because the prefix is 10 bits.
+  in6_addr addr = ip.ipv6_address();
+  return (addr.s6_addr[0] == 0xFE) && ((addr.s6_addr[1] & 0xC0) == 0x80);
+}
+
+bool IPIsLinkLocal(const IPAddress& ip) {
+  switch (ip.family()) {
+    case AF_INET: {
+      return IPIsLinkLocalV4(ip);
+    }
+    case AF_INET6: {
+      return IPIsLinkLocalV6(ip);
+    }
+  }
+  return false;
+}
+
+// According to http://www.ietf.org/rfc/rfc2373.txt, Appendix A, page 19.  An
+// address which contains MAC will have its 11th and 12th bytes as FF:FE as well
+// as the U/L bit as 1.
+bool IPIsMacBased(const IPAddress& ip) {
+  in6_addr addr = ip.ipv6_address();
+  return ((addr.s6_addr[8] & 0x02) && addr.s6_addr[11] == 0xFF &&
+          addr.s6_addr[12] == 0xFE);
+}
+
+bool IPIsSiteLocal(const IPAddress& ip) {
+  // Can't use the helper because the prefix is 10 bits.
+  in6_addr addr = ip.ipv6_address();
+  return addr.s6_addr[0] == 0xFE && (addr.s6_addr[1] & 0xC0) == 0xC0;
+}
+
+bool IPIsULA(const IPAddress& ip) {
+  // Can't use the helper because the prefix is 7 bits.
+  in6_addr addr = ip.ipv6_address();
+  return (addr.s6_addr[0] & 0xFE) == 0xFC;
+}
+
+bool IPIsTeredo(const IPAddress& ip) {
+  return IPIsHelper(ip, kTeredoPrefix, 32);
+}
+
+bool IPIsV4Compatibility(const IPAddress& ip) {
+  return IPIsHelper(ip, kV4CompatibilityPrefix, 96);
+}
+
+bool IPIsV4Mapped(const IPAddress& ip) {
+  return IPIsHelper(ip, kV4MappedPrefix, 96);
+}
+
+int IPAddressPrecedence(const IPAddress& ip) {
+  // Precedence values from RFC 3484-bis. Prefers native v4 over 6to4/Teredo.
+  if (ip.family() == AF_INET) {
+    return 30;
+  } else if (ip.family() == AF_INET6) {
+    if (IPIsLoopback(ip)) {
+      return 60;
+    } else if (IPIsULA(ip)) {
+      return 50;
+    } else if (IPIsV4Mapped(ip)) {
+      return 30;
+    } else if (IPIs6To4(ip)) {
+      return 20;
+    } else if (IPIsTeredo(ip)) {
+      return 10;
+    } else if (IPIsV4Compatibility(ip) || IPIsSiteLocal(ip) || IPIs6Bone(ip)) {
+      return 1;
+    } else {
+      // A 'normal' IPv6 address.
+      return 40;
+    }
+  }
+  return 0;
+}
+
+IPAddress GetLoopbackIP(int family) {
+  if (family == AF_INET) {
+    return rtc::IPAddress(INADDR_LOOPBACK);
+  }
+  if (family == AF_INET6) {
+    return rtc::IPAddress(in6addr_loopback);
+  }
+  return rtc::IPAddress();
+}
+
+IPAddress GetAnyIP(int family) {
+  if (family == AF_INET) {
+    return rtc::IPAddress(INADDR_ANY);
+  }
+  if (family == AF_INET6) {
+    return rtc::IPAddress(in6addr_any);
+  }
+  return rtc::IPAddress();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/ipaddress.h b/rtc_base/ipaddress.h
new file mode 100644
index 0000000..4ef7d08
--- /dev/null
+++ b/rtc_base/ipaddress.h
@@ -0,0 +1,193 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_IPADDRESS_H_
+#define RTC_BASE_IPADDRESS_H_
+
+#if defined(WEBRTC_POSIX)
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#endif
+#if defined(WEBRTC_WIN)
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/byteorder.h"
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#endif
+
+namespace rtc {
+
+enum IPv6AddressFlag {
+  IPV6_ADDRESS_FLAG_NONE =           0x00,
+
+  // Temporary address is dynamic by nature and will not carry MAC
+  // address.
+  IPV6_ADDRESS_FLAG_TEMPORARY =      1 << 0,
+
+  // Temporary address could become deprecated once the preferred
+  // lifetime is reached. It is still valid but just shouldn't be used
+  // to create new connection.
+  IPV6_ADDRESS_FLAG_DEPRECATED =     1 << 1,
+};
+
+// Version-agnostic IP address class, wraps a union of in_addr and in6_addr.
+class IPAddress {
+ public:
+  IPAddress() : family_(AF_UNSPEC) {
+    ::memset(&u_, 0, sizeof(u_));
+  }
+
+  explicit IPAddress(const in_addr& ip4) : family_(AF_INET) {
+    memset(&u_, 0, sizeof(u_));
+    u_.ip4 = ip4;
+  }
+
+  explicit IPAddress(const in6_addr& ip6) : family_(AF_INET6) {
+    u_.ip6 = ip6;
+  }
+
+  explicit IPAddress(uint32_t ip_in_host_byte_order) : family_(AF_INET) {
+    memset(&u_, 0, sizeof(u_));
+    u_.ip4.s_addr = HostToNetwork32(ip_in_host_byte_order);
+  }
+
+  IPAddress(const IPAddress& other) : family_(other.family_) {
+    ::memcpy(&u_, &other.u_, sizeof(u_));
+  }
+
+  virtual ~IPAddress() {}
+
+  const IPAddress & operator=(const IPAddress& other) {
+    family_ = other.family_;
+    ::memcpy(&u_, &other.u_, sizeof(u_));
+    return *this;
+  }
+
+  bool operator==(const IPAddress& other) const;
+  bool operator!=(const IPAddress& other) const;
+  bool operator <(const IPAddress& other) const;
+  bool operator >(const IPAddress& other) const;
+  friend std::ostream& operator<<(std::ostream& os, const IPAddress& addr);
+
+  int family() const { return family_; }
+  in_addr ipv4_address() const;
+  in6_addr ipv6_address() const;
+
+  // Returns the number of bytes needed to store the raw address.
+  size_t Size() const;
+
+  // Wraps inet_ntop.
+  std::string ToString() const;
+
+  // Same as ToString but anonymizes it by hiding the last part.
+  std::string ToSensitiveString() const;
+
+  // Returns an unmapped address from a possibly-mapped address.
+  // Returns the same address if this isn't a mapped address.
+  IPAddress Normalized() const;
+
+  // Returns this address as an IPv6 address.
+  // Maps v4 addresses (as ::ffff:a.b.c.d), returns v6 addresses unchanged.
+  IPAddress AsIPv6Address() const;
+
+  // For socketaddress' benefit. Returns the IP in host byte order.
+  uint32_t v4AddressAsHostOrderInteger() const;
+
+  // Whether this is an unspecified IP address.
+  bool IsNil() const;
+
+ private:
+  int family_;
+  union {
+    in_addr ip4;
+    in6_addr ip6;
+  } u_;
+};
+
+// IP class which could represent IPv6 address flags which is only
+// meaningful in IPv6 case.
+class InterfaceAddress : public IPAddress {
+ public:
+  InterfaceAddress() : ipv6_flags_(IPV6_ADDRESS_FLAG_NONE) {}
+
+  InterfaceAddress(IPAddress ip)
+    : IPAddress(ip), ipv6_flags_(IPV6_ADDRESS_FLAG_NONE) {}
+
+  InterfaceAddress(IPAddress addr, int ipv6_flags)
+    : IPAddress(addr), ipv6_flags_(ipv6_flags) {}
+
+  InterfaceAddress(const in6_addr& ip6, int ipv6_flags)
+    : IPAddress(ip6), ipv6_flags_(ipv6_flags) {}
+
+  const InterfaceAddress & operator=(const InterfaceAddress& other);
+
+  bool operator==(const InterfaceAddress& other) const;
+  bool operator!=(const InterfaceAddress& other) const;
+
+  int ipv6_flags() const { return ipv6_flags_; }
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const InterfaceAddress& addr);
+
+ private:
+  int ipv6_flags_;
+};
+
+bool IPFromAddrInfo(struct addrinfo* info, IPAddress* out);
+bool IPFromString(const std::string& str, IPAddress* out);
+bool IPFromString(const std::string& str, int flags,
+                  InterfaceAddress* out);
+bool IPIsAny(const IPAddress& ip);
+bool IPIsLoopback(const IPAddress& ip);
+bool IPIsLinkLocal(const IPAddress& ip);
+// Identify a private network address like "192.168.111.222"
+// (see https://en.wikipedia.org/wiki/Private_network )
+bool IPIsPrivateNetwork(const IPAddress& ip);
+// Identify if an IP is "private", that is a loopback
+// or an address belonging to a link-local or a private network.
+bool IPIsPrivate(const IPAddress& ip);
+bool IPIsUnspec(const IPAddress& ip);
+size_t HashIP(const IPAddress& ip);
+
+// These are only really applicable for IPv6 addresses.
+bool IPIs6Bone(const IPAddress& ip);
+bool IPIs6To4(const IPAddress& ip);
+bool IPIsMacBased(const IPAddress& ip);
+bool IPIsSiteLocal(const IPAddress& ip);
+bool IPIsTeredo(const IPAddress& ip);
+bool IPIsULA(const IPAddress& ip);
+bool IPIsV4Compatibility(const IPAddress& ip);
+bool IPIsV4Mapped(const IPAddress& ip);
+
+// Returns the precedence value for this IP as given in RFC3484.
+int IPAddressPrecedence(const IPAddress& ip);
+
+// Returns 'ip' truncated to be 'length' bits long.
+IPAddress TruncateIP(const IPAddress& ip, int length);
+
+IPAddress GetLoopbackIP(int family);
+IPAddress GetAnyIP(int family);
+
+// Returns the number of contiguously set bits, counting from the MSB in network
+// byte order, in this IPAddress. Bits after the first 0 encountered are not
+// counted.
+int CountIPMaskBits(IPAddress mask);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_IPADDRESS_H_
diff --git a/rtc_base/ipaddress_unittest.cc b/rtc_base/ipaddress_unittest.cc
new file mode 100644
index 0000000..90c9559
--- /dev/null
+++ b/rtc_base/ipaddress_unittest.cc
@@ -0,0 +1,979 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/ipaddress.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+static const unsigned int kIPv4AddrSize = 4;
+static const unsigned int kIPv6AddrSize = 16;
+static const unsigned int kIPv4RFC1918Addr = 0xC0A80701;
+static const unsigned int kIPv4PublicAddr = 0x01020304;
+static const unsigned int kIPv4LinkLocalAddr = 0xA9FE10C1; // 169.254.16.193
+static const in6_addr kIPv6LinkLocalAddr = {{{0xfe, 0x80, 0x00, 0x00,
+                                              0x00, 0x00, 0x00, 0x00,
+                                              0xbe, 0x30, 0x5b, 0xff,
+                                              0xfe, 0xe5, 0x00, 0xc3}}};
+static const in6_addr kIPv6PublicAddr = {{{0x24, 0x01, 0xfa, 0x00,
+                                           0x00, 0x04, 0x10, 0x00,
+                                           0xbe, 0x30, 0x5b, 0xff,
+                                           0xfe, 0xe5, 0x00, 0xc3}}};
+static const in6_addr kIPv6PublicAddr2 = {{{0x24, 0x01, 0x00, 0x00,
+                                            0x00, 0x00, 0x10, 0x00,
+                                            0xbe, 0x30, 0x5b, 0xff,
+                                            0xfe, 0xe5, 0x00, 0xc3}}};
+static const in6_addr kIPv4MappedAnyAddr = {{{0x00, 0x00, 0x00, 0x00,
+                                              0x00, 0x00, 0x00, 0x00,
+                                              0x00, 0x00, 0xff, 0xff,
+                                              0x00, 0x00, 0x00, 0x00}}};
+static const in6_addr kIPv4MappedRFC1918Addr = {{{0x00, 0x00, 0x00, 0x00,
+                                                  0x00, 0x00, 0x00, 0x00,
+                                                  0x00, 0x00, 0xff, 0xff,
+                                                  0xc0, 0xa8, 0x07, 0x01}}};
+static const in6_addr kIPv4MappedPublicAddr = {{{0x00, 0x00, 0x00, 0x00,
+                                                 0x00, 0x00, 0x00, 0x00,
+                                                 0x00, 0x00, 0xff, 0xff,
+                                                 0x01, 0x02, 0x03, 0x04}}};
+
+static const std::string kIPv4AnyAddrString = "0.0.0.0";
+static const std::string kIPv4LoopbackAddrString = "127.0.0.1";
+static const std::string kIPv4RFC1918AddrString = "192.168.7.1";
+static const std::string kIPv4PublicAddrString = "1.2.3.4";
+static const std::string kIPv4PublicAddrAnonymizedString = "1.2.3.x";
+static const std::string kIPv6AnyAddrString = "::";
+static const std::string kIPv6LoopbackAddrString = "::1";
+static const std::string kIPv6LinkLocalAddrString = "fe80::be30:5bff:fee5:c3";
+static const std::string kIPv6EuiAddrString =
+    "2620:0:1008:1201:a248:1cff:fe98:360";
+static const std::string kIPv6TemporaryAddrString =
+    "2620:0:1008:1201:2089:6dda:385e:80c0";
+static const std::string kIPv6PublicAddrString =
+    "2401:fa00:4:1000:be30:5bff:fee5:c3";
+static const std::string kIPv6PublicAddr2String =
+    "2401::1000:be30:5bff:fee5:c3";
+static const std::string kIPv6PublicAddrAnonymizedString =
+    "2401:fa00:4:x:x:x:x:x";
+static const std::string kIPv6PublicAddr2AnonymizedString =
+    "2401:0:0:x:x:x:x:x";
+static const std::string kIPv4MappedAnyAddrString = "::ffff:0:0";
+static const std::string kIPv4MappedRFC1918AddrString = "::ffff:c0a8:701";
+static const std::string kIPv4MappedLoopbackAddrString = "::ffff:7f00:1";
+static const std::string kIPv4MappedPublicAddrString = "::ffff:102:0304";
+static const std::string kIPv4MappedV4StyleAddrString = "::ffff:192.168.7.1";
+
+static const std::string kIPv4BrokenString1 = "192.168.7.";
+static const std::string kIPv4BrokenString2 = "192.168.7.1.1";
+static const std::string kIPv4BrokenString3 = "192.168.7.1:80";
+static const std::string kIPv4BrokenString4 = "192.168.7.ONE";
+static const std::string kIPv4BrokenString5 = "-192.168.7.1";
+static const std::string kIPv4BrokenString6 = "256.168.7.1";
+static const std::string kIPv6BrokenString1 = "2401:fa00:4:1000:be30";
+static const std::string kIPv6BrokenString2 =
+    "2401:fa00:4:1000:be30:5bff:fee5:c3:1";
+static const std::string kIPv6BrokenString3 =
+    "[2401:fa00:4:1000:be30:5bff:fee5:c3]:1";
+static const std::string kIPv6BrokenString4 =
+    "2401::4::be30";
+static const std::string kIPv6BrokenString5 =
+    "2401:::4:fee5:be30";
+static const std::string kIPv6BrokenString6 =
+    "2401f:fa00:4:1000:be30:5bff:fee5:c3";
+static const std::string kIPv6BrokenString7 =
+    "2401:ga00:4:1000:be30:5bff:fee5:c3";
+static const std::string kIPv6BrokenString8 =
+    "2401:fa000:4:1000:be30:5bff:fee5:c3";
+static const std::string kIPv6BrokenString9 =
+    "2401:fal0:4:1000:be30:5bff:fee5:c3";
+static const std::string kIPv6BrokenString10 =
+    "::ffff:192.168.7.";
+static const std::string kIPv6BrokenString11 =
+    "::ffff:192.168.7.1.1.1";
+static const std::string kIPv6BrokenString12 =
+    "::fffe:192.168.7.1";
+static const std::string kIPv6BrokenString13 =
+    "::ffff:192.168.7.ff";
+static const std::string kIPv6BrokenString14 =
+    "0x2401:fa00:4:1000:be30:5bff:fee5:c3";
+
+bool AreEqual(const IPAddress& addr,
+              const IPAddress& addr2) {
+  if ((IPIsAny(addr) != IPIsAny(addr2)) ||
+      (IPIsLoopback(addr) != IPIsLoopback(addr2)) ||
+      (IPIsPrivate(addr) != IPIsPrivate(addr2)) ||
+      (HashIP(addr) != HashIP(addr2)) ||
+      (addr.Size() != addr2.Size()) ||
+      (addr.family() != addr2.family()) ||
+      (addr.ToString() != addr2.ToString())) {
+    return false;
+  }
+  in_addr v4addr, v4addr2;
+  v4addr = addr.ipv4_address();
+  v4addr2 = addr2.ipv4_address();
+  if (0 != memcmp(&v4addr, &v4addr2, sizeof(v4addr))) {
+    return false;
+  }
+  in6_addr v6addr, v6addr2;
+  v6addr = addr.ipv6_address();
+  v6addr2 = addr2.ipv6_address();
+  if (0 != memcmp(&v6addr, &v6addr2, sizeof(v6addr))) {
+    return false;
+  }
+  return true;
+}
+
+bool BrokenIPStringFails(const std::string& broken) {
+  IPAddress addr(0);   // Intentionally make it v4.
+  if (IPFromString(kIPv4BrokenString1, &addr)) {
+    return false;
+  }
+  return addr.family() == AF_UNSPEC;
+}
+
+bool CheckMaskCount(const std::string& mask, int expected_length) {
+  IPAddress addr;
+  return IPFromString(mask, &addr) &&
+      (expected_length == CountIPMaskBits(addr));
+}
+
+bool TryInvalidMaskCount(const std::string& mask) {
+  // We don't care about the result at all, but we do want to know if
+  // CountIPMaskBits is going to crash or infinite loop or something.
+  IPAddress addr;
+  if (!IPFromString(mask, &addr)) {
+    return false;
+  }
+  CountIPMaskBits(addr);
+  return true;
+}
+
+bool CheckTruncateIP(const std::string& initial, int truncate_length,
+                     const std::string& expected_result) {
+  IPAddress addr, expected;
+  IPFromString(initial, &addr);
+  IPFromString(expected_result, &expected);
+  IPAddress truncated = TruncateIP(addr, truncate_length);
+  return truncated == expected;
+}
+
+TEST(IPAddressTest, TestDefaultCtor) {
+  IPAddress addr;
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+
+  EXPECT_EQ(0U, addr.Size());
+  EXPECT_EQ(AF_UNSPEC, addr.family());
+  EXPECT_EQ("", addr.ToString());
+}
+
+TEST(IPAddressTest, TestInAddrCtor) {
+  in_addr v4addr;
+
+  // Test V4 Any address.
+  v4addr.s_addr = INADDR_ANY;
+  IPAddress addr(v4addr);
+  EXPECT_TRUE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4AnyAddrString, addr.ToString());
+
+  // Test a V4 loopback address.
+  v4addr.s_addr = htonl(INADDR_LOOPBACK);
+  addr = IPAddress(v4addr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_TRUE(IPIsLoopback(addr));
+  EXPECT_TRUE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4LoopbackAddrString, addr.ToString());
+
+  // Test an RFC1918 address.
+  v4addr.s_addr = htonl(kIPv4RFC1918Addr);
+  addr = IPAddress(v4addr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_TRUE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4RFC1918AddrString, addr.ToString());
+
+  // Test a 'normal' v4 address.
+  v4addr.s_addr = htonl(kIPv4PublicAddr);
+  addr = IPAddress(v4addr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4PublicAddrString, addr.ToString());
+}
+
+TEST(IPAddressTest, TestInAddr6Ctor) {
+  // Test v6 empty.
+  IPAddress addr(in6addr_any);
+  EXPECT_TRUE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv6AddrSize, addr.Size());
+  EXPECT_EQ(kIPv6AnyAddrString, addr.ToString());
+
+  // Test v6 loopback.
+  addr = IPAddress(in6addr_loopback);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_TRUE(IPIsLoopback(addr));
+  EXPECT_TRUE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv6AddrSize, addr.Size());
+  EXPECT_EQ(kIPv6LoopbackAddrString, addr.ToString());
+
+  // Test v6 link-local.
+  addr = IPAddress(kIPv6LinkLocalAddr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_TRUE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv6AddrSize, addr.Size());
+  EXPECT_EQ(kIPv6LinkLocalAddrString, addr.ToString());
+
+  // Test v6 global address.
+  addr = IPAddress(kIPv6PublicAddr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv6AddrSize, addr.Size());
+  EXPECT_EQ(kIPv6PublicAddrString, addr.ToString());
+}
+
+TEST(IPAddressTest, TestUint32Ctor) {
+  // Test V4 Any address.
+  IPAddress addr(0);
+  EXPECT_TRUE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4AnyAddrString, addr.ToString());
+
+  // Test a V4 loopback address.
+  addr = IPAddress(INADDR_LOOPBACK);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_TRUE(IPIsLoopback(addr));
+  EXPECT_TRUE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4LoopbackAddrString, addr.ToString());
+
+  // Test an RFC1918 address.
+  addr = IPAddress(kIPv4RFC1918Addr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_TRUE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4RFC1918AddrString, addr.ToString());
+
+  // Test a 'normal' v4 address.
+  addr = IPAddress(kIPv4PublicAddr);
+  EXPECT_FALSE(IPIsAny(addr));
+  EXPECT_FALSE(IPIsLoopback(addr));
+  EXPECT_FALSE(IPIsPrivate(addr));
+  EXPECT_EQ(kIPv4AddrSize, addr.Size());
+  EXPECT_EQ(kIPv4PublicAddrString, addr.ToString());
+}
+
+TEST(IPAddressTest, TestCopyCtor) {
+  in_addr v4addr;
+  v4addr.s_addr = htonl(kIPv4PublicAddr);
+  IPAddress addr(v4addr);
+  IPAddress addr2(addr);
+
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(INADDR_ANY);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(INADDR_LOOPBACK);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(kIPv4PublicAddr);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(kIPv4RFC1918Addr);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(in6addr_any);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(in6addr_loopback);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(kIPv6LinkLocalAddr);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr = IPAddress(kIPv6PublicAddr);
+  addr2 = IPAddress(addr);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+}
+
+TEST(IPAddressTest, TestEquality) {
+  // Check v4 equality
+  in_addr v4addr, v4addr2;
+  v4addr.s_addr = htonl(kIPv4PublicAddr);
+  v4addr2.s_addr = htonl(kIPv4PublicAddr + 1);
+  IPAddress addr(v4addr);
+  IPAddress addr2(v4addr2);
+  IPAddress addr3(v4addr);
+
+  EXPECT_TRUE(addr == addr);
+  EXPECT_TRUE(addr2 == addr2);
+  EXPECT_TRUE(addr3 == addr3);
+  EXPECT_TRUE(addr == addr3);
+  EXPECT_TRUE(addr3 == addr);
+  EXPECT_FALSE(addr2 == addr);
+  EXPECT_FALSE(addr2 == addr3);
+  EXPECT_FALSE(addr == addr2);
+  EXPECT_FALSE(addr3 == addr2);
+
+  // Check v6 equality
+  IPAddress addr4(kIPv6PublicAddr);
+  IPAddress addr5(kIPv6LinkLocalAddr);
+  IPAddress addr6(kIPv6PublicAddr);
+
+  EXPECT_TRUE(addr4 == addr4);
+  EXPECT_TRUE(addr5 == addr5);
+  EXPECT_TRUE(addr4 == addr6);
+  EXPECT_TRUE(addr6 == addr4);
+  EXPECT_FALSE(addr4 == addr5);
+  EXPECT_FALSE(addr5 == addr4);
+  EXPECT_FALSE(addr6 == addr5);
+  EXPECT_FALSE(addr5 == addr6);
+
+  // Check v4/v6 cross-equality
+  EXPECT_FALSE(addr == addr4);
+  EXPECT_FALSE(addr == addr5);
+  EXPECT_FALSE(addr == addr6);
+  EXPECT_FALSE(addr4 == addr);
+  EXPECT_FALSE(addr5 == addr);
+  EXPECT_FALSE(addr6 == addr);
+  EXPECT_FALSE(addr2 == addr4);
+  EXPECT_FALSE(addr2 == addr5);
+  EXPECT_FALSE(addr2 == addr6);
+  EXPECT_FALSE(addr4 == addr2);
+  EXPECT_FALSE(addr5 == addr2);
+  EXPECT_FALSE(addr6 == addr2);
+  EXPECT_FALSE(addr3 == addr4);
+  EXPECT_FALSE(addr3 == addr5);
+  EXPECT_FALSE(addr3 == addr6);
+  EXPECT_FALSE(addr4 == addr3);
+  EXPECT_FALSE(addr5 == addr3);
+  EXPECT_FALSE(addr6 == addr3);
+
+  // Special cases: loopback and any.
+  // They're special but they're still not equal.
+  IPAddress v4loopback(htonl(INADDR_LOOPBACK));
+  IPAddress v6loopback(in6addr_loopback);
+  EXPECT_FALSE(v4loopback == v6loopback);
+
+  IPAddress v4any(0);
+  IPAddress v6any(in6addr_any);
+  EXPECT_FALSE(v4any == v6any);
+}
+
+TEST(IPAddressTest, TestComparison) {
+  // Defined in 'ascending' order.
+  // v6 > v4, and intra-family sorting is purely numerical
+  IPAddress addr0;  // AF_UNSPEC
+  IPAddress addr1(INADDR_ANY);  // 0.0.0.0
+  IPAddress addr2(kIPv4PublicAddr);  // 1.2.3.4
+  IPAddress addr3(INADDR_LOOPBACK);  // 127.0.0.1
+  IPAddress addr4(kIPv4RFC1918Addr);  // 192.168.7.1.
+  IPAddress addr5(in6addr_any);  // ::
+  IPAddress addr6(in6addr_loopback);  // ::1
+  IPAddress addr7(kIPv6PublicAddr);  // 2401....
+  IPAddress addr8(kIPv6LinkLocalAddr);  // fe80....
+
+  EXPECT_TRUE(addr0 < addr1);
+  EXPECT_TRUE(addr1 < addr2);
+  EXPECT_TRUE(addr2 < addr3);
+  EXPECT_TRUE(addr3 < addr4);
+  EXPECT_TRUE(addr4 < addr5);
+  EXPECT_TRUE(addr5 < addr6);
+  EXPECT_TRUE(addr6 < addr7);
+  EXPECT_TRUE(addr7 < addr8);
+
+  EXPECT_FALSE(addr0 > addr1);
+  EXPECT_FALSE(addr1 > addr2);
+  EXPECT_FALSE(addr2 > addr3);
+  EXPECT_FALSE(addr3 > addr4);
+  EXPECT_FALSE(addr4 > addr5);
+  EXPECT_FALSE(addr5 > addr6);
+  EXPECT_FALSE(addr6 > addr7);
+  EXPECT_FALSE(addr7 > addr8);
+
+  EXPECT_FALSE(addr0 > addr0);
+  EXPECT_FALSE(addr1 > addr1);
+  EXPECT_FALSE(addr2 > addr2);
+  EXPECT_FALSE(addr3 > addr3);
+  EXPECT_FALSE(addr4 > addr4);
+  EXPECT_FALSE(addr5 > addr5);
+  EXPECT_FALSE(addr6 > addr6);
+  EXPECT_FALSE(addr7 > addr7);
+  EXPECT_FALSE(addr8 > addr8);
+
+  EXPECT_FALSE(addr0 < addr0);
+  EXPECT_FALSE(addr1 < addr1);
+  EXPECT_FALSE(addr2 < addr2);
+  EXPECT_FALSE(addr3 < addr3);
+  EXPECT_FALSE(addr4 < addr4);
+  EXPECT_FALSE(addr5 < addr5);
+  EXPECT_FALSE(addr6 < addr6);
+  EXPECT_FALSE(addr7 < addr7);
+  EXPECT_FALSE(addr8 < addr8);
+}
+
+TEST(IPAddressTest, TestFromString) {
+  IPAddress addr;
+  IPAddress addr2;
+  addr2 = IPAddress(INADDR_ANY);
+
+  EXPECT_TRUE(IPFromString(kIPv4AnyAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv4AnyAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(INADDR_LOOPBACK);
+  EXPECT_TRUE(IPFromString(kIPv4LoopbackAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv4LoopbackAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(kIPv4RFC1918Addr);
+  EXPECT_TRUE(IPFromString(kIPv4RFC1918AddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv4RFC1918AddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(kIPv4PublicAddr);
+  EXPECT_TRUE(IPFromString(kIPv4PublicAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv4PublicAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(in6addr_any);
+  EXPECT_TRUE(IPFromString(kIPv6AnyAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv6AnyAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(in6addr_loopback);
+  EXPECT_TRUE(IPFromString(kIPv6LoopbackAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv6LoopbackAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(kIPv6LinkLocalAddr);
+  EXPECT_TRUE(IPFromString(kIPv6LinkLocalAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv6LinkLocalAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(kIPv6PublicAddr);
+  EXPECT_TRUE(IPFromString(kIPv6PublicAddrString, &addr));
+  EXPECT_EQ(addr.ToString(), kIPv6PublicAddrString);
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  addr2 = IPAddress(kIPv4MappedRFC1918Addr);
+  EXPECT_TRUE(IPFromString(kIPv4MappedV4StyleAddrString, &addr));
+  EXPECT_PRED2(AreEqual, addr, addr2);
+
+  // Broken cases, should set addr to AF_UNSPEC.
+  EXPECT_PRED1(BrokenIPStringFails, kIPv4BrokenString1);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv4BrokenString2);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv4BrokenString3);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv4BrokenString4);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv4BrokenString5);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv4BrokenString6);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString1);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString2);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString3);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString4);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString5);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString6);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString7);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString8);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString9);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString10);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString11);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString12);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString13);
+  EXPECT_PRED1(BrokenIPStringFails, kIPv6BrokenString14);
+}
+
+TEST(IPAddressTest, TestIPFromAddrInfo) {
+  struct sockaddr_in expected4;
+  struct sockaddr_in6 expected6;
+  struct addrinfo test_info;
+  struct addrinfo next_info;
+  memset(&next_info, 'A', sizeof(next_info));
+  test_info.ai_next = &next_info;
+  // Check that we can get an IPv4 address out.
+  test_info.ai_addr = reinterpret_cast<struct sockaddr*>(&expected4);
+  expected4.sin_addr.s_addr = HostToNetwork32(kIPv4PublicAddr);
+  expected4.sin_family = AF_INET;
+  IPAddress expected(kIPv4PublicAddr);
+  IPAddress addr;
+  EXPECT_TRUE(IPFromAddrInfo(&test_info, &addr));
+  EXPECT_EQ(expected, addr);
+  // Check that we can get an IPv6 address out.
+  expected6.sin6_addr = kIPv6PublicAddr;
+  expected6.sin6_family = AF_INET6;
+  expected = IPAddress(kIPv6PublicAddr);
+  test_info.ai_addr = reinterpret_cast<struct sockaddr*>(&expected6);
+  EXPECT_TRUE(IPFromAddrInfo(&test_info, &addr));
+  EXPECT_EQ(expected, addr);
+  // Check that unspec fails.
+  expected6.sin6_family = AF_UNSPEC;
+  EXPECT_FALSE(IPFromAddrInfo(&test_info, &addr));
+  // Check a zeroed out addrinfo doesn't crash us.
+  memset(&next_info, 0, sizeof(next_info));
+  EXPECT_FALSE(IPFromAddrInfo(&next_info, &addr));
+}
+
+TEST(IPAddressTest, TestIsPrivate) {
+  EXPECT_FALSE(IPIsPrivate(IPAddress(INADDR_ANY)));
+  EXPECT_FALSE(IPIsPrivate(IPAddress(kIPv4PublicAddr)));
+  EXPECT_FALSE(IPIsPrivate(IPAddress(in6addr_any)));
+  EXPECT_FALSE(IPIsPrivate(IPAddress(kIPv6PublicAddr)));
+  EXPECT_FALSE(IPIsPrivate(IPAddress(kIPv4MappedAnyAddr)));
+  EXPECT_FALSE(IPIsPrivate(IPAddress(kIPv4MappedPublicAddr)));
+
+  EXPECT_TRUE(IPIsPrivate(IPAddress(kIPv4RFC1918Addr)));
+  EXPECT_TRUE(IPIsPrivate(IPAddress(INADDR_LOOPBACK)));
+  EXPECT_TRUE(IPIsPrivate(IPAddress(in6addr_loopback)));
+  EXPECT_TRUE(IPIsPrivate(IPAddress(kIPv6LinkLocalAddr)));
+}
+
+TEST(IPAddressTest, TestIsNil) {
+  IPAddress addr;
+  EXPECT_TRUE(IPAddress().IsNil());
+
+  EXPECT_TRUE(IPFromString(kIPv6AnyAddrString, &addr));
+  EXPECT_FALSE(addr.IsNil());
+
+  EXPECT_TRUE(IPFromString(kIPv4AnyAddrString, &addr));
+  EXPECT_FALSE(addr.IsNil());
+
+  EXPECT_FALSE(IPAddress(kIPv4PublicAddr).IsNil());
+}
+
+TEST(IPAddressTest, TestIsLoopback) {
+  EXPECT_FALSE(IPIsLoopback(IPAddress(INADDR_ANY)));
+  EXPECT_FALSE(IPIsLoopback(IPAddress(kIPv4PublicAddr)));
+  EXPECT_FALSE(IPIsLoopback(IPAddress(in6addr_any)));
+  EXPECT_FALSE(IPIsLoopback(IPAddress(kIPv6PublicAddr)));
+  EXPECT_FALSE(IPIsLoopback(IPAddress(kIPv4MappedAnyAddr)));
+  EXPECT_FALSE(IPIsLoopback(IPAddress(kIPv4MappedPublicAddr)));
+
+  EXPECT_TRUE(IPIsLoopback(IPAddress(INADDR_LOOPBACK)));
+  // Try an address in the loopback range (127.0.0.0/8) other than the typical
+  // 127.0.0.1.
+  EXPECT_TRUE(IPIsLoopback(IPAddress(0x7f010203)));
+  EXPECT_TRUE(IPIsLoopback(IPAddress(in6addr_loopback)));
+}
+
+TEST(IPAddressTest, TestIsLinkLocal) {
+  // "any" addresses
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(INADDR_ANY)));
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(in6addr_any)));
+  // loopback addresses
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(INADDR_LOOPBACK)));
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(in6addr_loopback)));
+  // public addresses
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(kIPv4PublicAddr)));
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(kIPv6PublicAddr)));
+  // private network addresses
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(kIPv4RFC1918Addr)));
+  // mapped addresses
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(kIPv4MappedAnyAddr)));
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(kIPv4MappedPublicAddr)));
+  EXPECT_FALSE(IPIsLinkLocal(IPAddress(kIPv4MappedRFC1918Addr)));
+
+  // link-local network addresses
+  EXPECT_TRUE(IPIsLinkLocal(IPAddress(kIPv4LinkLocalAddr)));
+  EXPECT_TRUE(IPIsLinkLocal(IPAddress(kIPv6LinkLocalAddr)));
+}
+
+// Verify that IPIsAny catches all cases of "any" address.
+TEST(IPAddressTest, TestIsAny) {
+  IPAddress addr;
+
+  EXPECT_TRUE(IPFromString(kIPv6AnyAddrString, &addr));
+  EXPECT_TRUE(IPIsAny(addr));
+
+  EXPECT_TRUE(IPFromString(kIPv4AnyAddrString, &addr));
+  EXPECT_TRUE(IPIsAny(addr));
+
+  EXPECT_TRUE(IPIsAny(IPAddress(kIPv4MappedAnyAddr)));
+}
+
+TEST(IPAddressTest, TestIsEui64) {
+  IPAddress addr;
+  EXPECT_TRUE(IPFromString(kIPv6EuiAddrString, &addr));
+  EXPECT_TRUE(IPIsMacBased(addr));
+
+  EXPECT_TRUE(IPFromString(kIPv6TemporaryAddrString, &addr));
+  EXPECT_FALSE(IPIsMacBased(addr));
+
+  EXPECT_TRUE(IPFromString(kIPv6LinkLocalAddrString, &addr));
+  EXPECT_TRUE(IPIsMacBased(addr));
+
+  EXPECT_TRUE(IPFromString(kIPv6AnyAddrString, &addr));
+  EXPECT_FALSE(IPIsMacBased(addr));
+
+  EXPECT_TRUE(IPFromString(kIPv6LoopbackAddrString, &addr));
+  EXPECT_FALSE(IPIsMacBased(addr));
+}
+
+TEST(IPAddressTest, TestNormalized) {
+  // Check normalizing a ::ffff:a.b.c.d address.
+  IPAddress addr;
+  EXPECT_TRUE(IPFromString(kIPv4MappedV4StyleAddrString, &addr));
+  IPAddress addr2(kIPv4RFC1918Addr);
+  addr = addr.Normalized();
+  EXPECT_EQ(addr2, addr);
+
+  // Check normalizing a ::ffff:aabb:ccdd address.
+  addr = IPAddress(kIPv4MappedPublicAddr);
+  addr2 = IPAddress(kIPv4PublicAddr);
+  addr = addr.Normalized();
+  EXPECT_EQ(addr, addr2);
+
+  // Check that a non-mapped v6 addresses isn't altered.
+  addr = IPAddress(kIPv6PublicAddr);
+  addr2 = IPAddress(kIPv6PublicAddr);
+  addr = addr.Normalized();
+  EXPECT_EQ(addr, addr2);
+
+  // Check that addresses that look a bit like mapped addresses aren't altered
+  EXPECT_TRUE(IPFromString("fe80::ffff:0102:0304", &addr));
+  addr2 = addr;
+  addr = addr.Normalized();
+  EXPECT_EQ(addr, addr2);
+  EXPECT_TRUE(IPFromString("::0102:0304", &addr));
+  addr2 = addr;
+  addr = addr.Normalized();
+  EXPECT_EQ(addr, addr2);
+  // This string should 'work' as an IP address but is not a mapped address,
+  // so it shouldn't change on normalization.
+  EXPECT_TRUE(IPFromString("::192.168.7.1", &addr));
+  addr2 = addr;
+  addr = addr.Normalized();
+  EXPECT_EQ(addr, addr2);
+
+  // Check that v4 addresses aren't altered.
+  addr = IPAddress(htonl(kIPv4PublicAddr));
+  addr2 = IPAddress(htonl(kIPv4PublicAddr));
+  addr = addr.Normalized();
+  EXPECT_EQ(addr, addr2);
+}
+
+TEST(IPAddressTest, TestAsIPv6Address) {
+  IPAddress addr(kIPv4PublicAddr);
+  IPAddress addr2(kIPv4MappedPublicAddr);
+  addr = addr.AsIPv6Address();
+  EXPECT_EQ(addr, addr2);
+
+  addr = IPAddress(kIPv4MappedPublicAddr);
+  addr2 = IPAddress(kIPv4MappedPublicAddr);
+  addr = addr.AsIPv6Address();
+  EXPECT_EQ(addr, addr2);
+
+  addr = IPAddress(kIPv6PublicAddr);
+  addr2 = IPAddress(kIPv6PublicAddr);
+  addr = addr.AsIPv6Address();
+  EXPECT_EQ(addr, addr2);
+}
+
+TEST(IPAddressTest, TestCountIPMaskBits) {
+  IPAddress mask;
+  // IPv4 on byte boundaries
+  EXPECT_PRED2(CheckMaskCount, "255.255.255.255", 32);
+  EXPECT_PRED2(CheckMaskCount, "255.255.255.0", 24);
+  EXPECT_PRED2(CheckMaskCount, "255.255.0.0", 16);
+  EXPECT_PRED2(CheckMaskCount, "255.0.0.0", 8);
+  EXPECT_PRED2(CheckMaskCount, "0.0.0.0", 0);
+
+  // IPv4 not on byte boundaries
+  EXPECT_PRED2(CheckMaskCount, "128.0.0.0", 1);
+  EXPECT_PRED2(CheckMaskCount, "224.0.0.0", 3);
+  EXPECT_PRED2(CheckMaskCount, "255.248.0.0", 13);
+  EXPECT_PRED2(CheckMaskCount, "255.255.224.0", 19);
+  EXPECT_PRED2(CheckMaskCount, "255.255.255.252", 30);
+
+  // V6 on byte boundaries
+  EXPECT_PRED2(CheckMaskCount, "::", 0);
+  EXPECT_PRED2(CheckMaskCount, "ff00::", 8);
+  EXPECT_PRED2(CheckMaskCount, "ffff::", 16);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ff00::", 24);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff::", 32);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ff00::", 40);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff::", 48);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ff00::", 56);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff::", 64);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ff00::", 72);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff::", 80);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ff00::", 88);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff::", 96);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ff00:0000", 104);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:0000", 112);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00", 120);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", 128);
+
+  // V6 not on byte boundaries.
+  EXPECT_PRED2(CheckMaskCount, "8000::", 1);
+  EXPECT_PRED2(CheckMaskCount, "ff80::", 9);
+  EXPECT_PRED2(CheckMaskCount, "ffff:fe00::", 23);
+  EXPECT_PRED2(CheckMaskCount, "ffff:fffe::", 31);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:e000::", 35);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffe0::", 43);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:f800::", 53);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:fff8::", 61);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:fc00::", 70);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:fffc::", 78);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:8000::", 81);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ff80::", 89);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:fe00::", 103);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:fffe:0000", 111);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fc00", 118);
+  EXPECT_PRED2(CheckMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffc", 126);
+
+  // Non-contiguous ranges. These are invalid but lets test them
+  // to make sure they don't crash anything or infinite loop or something.
+  EXPECT_PRED1(TryInvalidMaskCount, "217.0.0.0");
+  EXPECT_PRED1(TryInvalidMaskCount, "255.185.0.0");
+  EXPECT_PRED1(TryInvalidMaskCount, "255.255.251.0");
+  EXPECT_PRED1(TryInvalidMaskCount, "255.255.251.255");
+  EXPECT_PRED1(TryInvalidMaskCount, "255.255.254.201");
+  EXPECT_PRED1(TryInvalidMaskCount, "::1");
+  EXPECT_PRED1(TryInvalidMaskCount, "fe80::1");
+  EXPECT_PRED1(TryInvalidMaskCount, "ff80::1");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff::1");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ff00:1::1");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff::ffff:1");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ff00:1::");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff::ff00");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ff00:1234::");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:0012::ffff");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ff01::");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ffff:7f00::");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ffff:ff7a::");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:7f00:0000");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ff70:0000");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:0211");
+  EXPECT_PRED1(TryInvalidMaskCount, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff7f");
+}
+
+TEST(IPAddressTest, TestTruncateIP) {
+  EXPECT_PRED3(CheckTruncateIP, "255.255.255.255", 24, "255.255.255.0");
+  EXPECT_PRED3(CheckTruncateIP, "255.255.255.255", 16, "255.255.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "255.255.255.255", 8, "255.0.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "202.67.7.255", 24, "202.67.7.0");
+  EXPECT_PRED3(CheckTruncateIP, "202.129.65.205", 16, "202.129.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "55.25.2.77", 8, "55.0.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "74.128.99.254", 1, "0.0.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "106.55.99.254", 3, "96.0.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "172.167.53.222", 13, "172.160.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "255.255.224.0", 18, "255.255.192.0");
+  EXPECT_PRED3(CheckTruncateIP, "255.255.255.252", 28, "255.255.255.240");
+
+  EXPECT_PRED3(CheckTruncateIP, "fe80:1111:2222:3333:4444:5555:6666:7777", 1,
+               "8000::");
+  EXPECT_PRED3(CheckTruncateIP, "fff0:1111:2222:3333:4444:5555:6666:7777", 9,
+               "ff80::");
+  EXPECT_PRED3(CheckTruncateIP, "ffff:ff80:1111:2222:3333:4444:5555:6666", 23,
+               "ffff:fe00::");
+  EXPECT_PRED3(CheckTruncateIP, "ffff:ff80:1111:2222:3333:4444:5555:6666", 32,
+               "ffff:ff80::");
+  EXPECT_PRED3(CheckTruncateIP, "2400:f9af:e456:1111:2222:3333:4444:5555", 35,
+               "2400:f9af:e000::");
+  EXPECT_PRED3(CheckTruncateIP, "9999:1111:2233:4444:5555:6666:7777:8888", 53,
+               "9999:1111:2233:4000::");
+  EXPECT_PRED3(CheckTruncateIP, "9999:1111:2233:4567:5555:6666:7777:8888", 64,
+               "9999:1111:2233:4567::");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 68,
+               "1111:2222:3333:4444:5000::");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 92,
+               "1111:2222:3333:4444:5555:6660::");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 96,
+               "1111:2222:3333:4444:5555:6666::");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 105,
+               "1111:2222:3333:4444:5555:6666:7700::");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 124,
+               "1111:2222:3333:4444:5555:6666:7777:8880");
+
+  // Slightly degenerate cases
+  EXPECT_PRED3(CheckTruncateIP, "202.165.33.127", 32, "202.165.33.127");
+  EXPECT_PRED3(CheckTruncateIP, "235.105.77.12", 0, "0.0.0.0");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 128,
+               "1111:2222:3333:4444:5555:6666:7777:8888");
+  EXPECT_PRED3(CheckTruncateIP, "1111:2222:3333:4444:5555:6666:7777:8888", 0,
+               "::");
+}
+
+TEST(IPAddressTest, TestCategorizeIPv6) {
+  // Test determining if an IPAddress is 6Bone/6To4/Teredo/etc.
+  // IPv4 address, should be none of these (not even v4compat/v4mapped).
+  IPAddress v4_addr(kIPv4PublicAddr);
+  EXPECT_FALSE(IPIs6Bone(v4_addr));
+  EXPECT_FALSE(IPIs6To4(v4_addr));
+  EXPECT_FALSE(IPIsSiteLocal(v4_addr));
+  EXPECT_FALSE(IPIsTeredo(v4_addr));
+  EXPECT_FALSE(IPIsULA(v4_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(v4_addr));
+  EXPECT_FALSE(IPIsV4Mapped(v4_addr));
+  // Linklocal (fe80::/16) adddress; should be none of these.
+  IPAddress linklocal_addr(kIPv6LinkLocalAddr);
+  EXPECT_FALSE(IPIs6Bone(linklocal_addr));
+  EXPECT_FALSE(IPIs6To4(linklocal_addr));
+  EXPECT_FALSE(IPIsSiteLocal(linklocal_addr));
+  EXPECT_FALSE(IPIsTeredo(linklocal_addr));
+  EXPECT_FALSE(IPIsULA(linklocal_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(linklocal_addr));
+  EXPECT_FALSE(IPIsV4Mapped(linklocal_addr));
+  // 'Normal' IPv6 address, should also be none of these.
+  IPAddress normal_addr(kIPv6PublicAddr);
+  EXPECT_FALSE(IPIs6Bone(normal_addr));
+  EXPECT_FALSE(IPIs6To4(normal_addr));
+  EXPECT_FALSE(IPIsSiteLocal(normal_addr));
+  EXPECT_FALSE(IPIsTeredo(normal_addr));
+  EXPECT_FALSE(IPIsULA(normal_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(normal_addr));
+  EXPECT_FALSE(IPIsV4Mapped(normal_addr));
+  // IPv4 mapped address (::ffff:123.123.123.123)
+  IPAddress v4mapped_addr(kIPv4MappedPublicAddr);
+  EXPECT_TRUE(IPIsV4Mapped(v4mapped_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(v4mapped_addr));
+  EXPECT_FALSE(IPIs6Bone(v4mapped_addr));
+  EXPECT_FALSE(IPIs6To4(v4mapped_addr));
+  EXPECT_FALSE(IPIsSiteLocal(v4mapped_addr));
+  EXPECT_FALSE(IPIsTeredo(v4mapped_addr));
+  EXPECT_FALSE(IPIsULA(v4mapped_addr));
+  // IPv4 compatibility address (::123.123.123.123)
+  IPAddress v4compat_addr;
+  IPFromString("::192.168.7.1", &v4compat_addr);
+  EXPECT_TRUE(IPIsV4Compatibility(v4compat_addr));
+  EXPECT_FALSE(IPIs6Bone(v4compat_addr));
+  EXPECT_FALSE(IPIs6To4(v4compat_addr));
+  EXPECT_FALSE(IPIsSiteLocal(v4compat_addr));
+  EXPECT_FALSE(IPIsTeredo(v4compat_addr));
+  EXPECT_FALSE(IPIsULA(v4compat_addr));
+  EXPECT_FALSE(IPIsV4Mapped(v4compat_addr));
+  // 6Bone address (3FFE::/16)
+  IPAddress sixbone_addr;
+  IPFromString("3FFE:123:456::789:123", &sixbone_addr);
+  EXPECT_TRUE(IPIs6Bone(sixbone_addr));
+  EXPECT_FALSE(IPIs6To4(sixbone_addr));
+  EXPECT_FALSE(IPIsSiteLocal(sixbone_addr));
+  EXPECT_FALSE(IPIsTeredo(sixbone_addr));
+  EXPECT_FALSE(IPIsULA(sixbone_addr));
+  EXPECT_FALSE(IPIsV4Mapped(sixbone_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(sixbone_addr));
+  // Unique Local Address (FC::/7)
+  IPAddress ula_addr;
+  IPFromString("FC00:123:456::789:123", &ula_addr);
+  EXPECT_TRUE(IPIsULA(ula_addr));
+  EXPECT_FALSE(IPIs6Bone(ula_addr));
+  EXPECT_FALSE(IPIs6To4(ula_addr));
+  EXPECT_FALSE(IPIsSiteLocal(ula_addr));
+  EXPECT_FALSE(IPIsTeredo(ula_addr));
+  EXPECT_FALSE(IPIsV4Mapped(ula_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(ula_addr));
+  // 6To4 Address (2002::/16)
+  IPAddress sixtofour_addr;
+  IPFromString("2002:123:456::789:123", &sixtofour_addr);
+  EXPECT_TRUE(IPIs6To4(sixtofour_addr));
+  EXPECT_FALSE(IPIs6Bone(sixtofour_addr));
+  EXPECT_FALSE(IPIsSiteLocal(sixtofour_addr));
+  EXPECT_FALSE(IPIsTeredo(sixtofour_addr));
+  EXPECT_FALSE(IPIsULA(sixtofour_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(sixtofour_addr));
+  EXPECT_FALSE(IPIsV4Mapped(sixtofour_addr));
+  // Site Local address (FEC0::/10)
+  IPAddress sitelocal_addr;
+  IPFromString("FEC0:123:456::789:123", &sitelocal_addr);
+  EXPECT_TRUE(IPIsSiteLocal(sitelocal_addr));
+  EXPECT_FALSE(IPIs6Bone(sitelocal_addr));
+  EXPECT_FALSE(IPIs6To4(sitelocal_addr));
+  EXPECT_FALSE(IPIsTeredo(sitelocal_addr));
+  EXPECT_FALSE(IPIsULA(sitelocal_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(sitelocal_addr));
+  EXPECT_FALSE(IPIsV4Mapped(sitelocal_addr));
+  // Teredo Address (2001:0000::/32)
+  IPAddress teredo_addr;
+  IPFromString("2001:0000:123:456::789:123", &teredo_addr);
+  EXPECT_TRUE(IPIsTeredo(teredo_addr));
+  EXPECT_FALSE(IPIsSiteLocal(teredo_addr));
+  EXPECT_FALSE(IPIs6Bone(teredo_addr));
+  EXPECT_FALSE(IPIs6To4(teredo_addr));
+  EXPECT_FALSE(IPIsULA(teredo_addr));
+  EXPECT_FALSE(IPIsV4Compatibility(teredo_addr));
+  EXPECT_FALSE(IPIsV4Mapped(teredo_addr));
+}
+
+TEST(IPAddressTest, TestToSensitiveString) {
+  IPAddress addr_v4 = IPAddress(kIPv4PublicAddr);
+  IPAddress addr_v6 = IPAddress(kIPv6PublicAddr);
+  IPAddress addr_v6_2 = IPAddress(kIPv6PublicAddr2);
+  EXPECT_EQ(kIPv4PublicAddrString, addr_v4.ToString());
+  EXPECT_EQ(kIPv6PublicAddrString, addr_v6.ToString());
+  EXPECT_EQ(kIPv6PublicAddr2String, addr_v6_2.ToString());
+#if defined(NDEBUG)
+  EXPECT_EQ(kIPv4PublicAddrAnonymizedString, addr_v4.ToSensitiveString());
+  EXPECT_EQ(kIPv6PublicAddrAnonymizedString, addr_v6.ToSensitiveString());
+  EXPECT_EQ(kIPv6PublicAddr2AnonymizedString, addr_v6_2.ToSensitiveString());
+#else
+  EXPECT_EQ(kIPv4PublicAddrString, addr_v4.ToSensitiveString());
+  EXPECT_EQ(kIPv6PublicAddrString, addr_v6.ToSensitiveString());
+  EXPECT_EQ(kIPv6PublicAddr2String, addr_v6_2.ToSensitiveString());
+#endif  // defined(NDEBUG)
+}
+
+TEST(IPAddressTest, TestInterfaceAddress) {
+  in6_addr addr;
+  InterfaceAddress addr1(kIPv6PublicAddr,
+                         IPV6_ADDRESS_FLAG_TEMPORARY);
+  EXPECT_EQ(addr1.ipv6_flags(), IPV6_ADDRESS_FLAG_TEMPORARY);
+  EXPECT_EQ(addr1.family(), AF_INET6);
+
+  addr = addr1.ipv6_address();
+  EXPECT_TRUE(IN6_ARE_ADDR_EQUAL(&addr, &kIPv6PublicAddr));
+
+  InterfaceAddress addr2 = addr1;
+  EXPECT_EQ(addr1, addr2);
+  EXPECT_EQ(addr2.ipv6_flags(), IPV6_ADDRESS_FLAG_TEMPORARY);
+  addr = addr2.ipv6_address();
+  EXPECT_TRUE(IN6_ARE_ADDR_EQUAL(&addr, &kIPv6PublicAddr));
+
+  InterfaceAddress addr3(addr1);
+  EXPECT_EQ(addr1, addr3);
+  EXPECT_EQ(addr3.ipv6_flags(), IPV6_ADDRESS_FLAG_TEMPORARY);
+  addr = addr3.ipv6_address();
+  EXPECT_TRUE(IN6_ARE_ADDR_EQUAL(&addr, &kIPv6PublicAddr));
+
+  InterfaceAddress addr4(kIPv6PublicAddr,
+                         IPV6_ADDRESS_FLAG_DEPRECATED);
+  EXPECT_NE(addr1, addr4);
+
+  // When you compare them as IPAddress, since operator==
+  // is not virtual, it'll be equal.
+  IPAddress *paddr1 = &addr1;
+  IPAddress *paddr4 = &addr4;
+  EXPECT_EQ(*paddr1, *paddr4);
+
+  InterfaceAddress addr5(kIPv6LinkLocalAddr,
+                         IPV6_ADDRESS_FLAG_TEMPORARY);
+  EXPECT_NE(addr1, addr5);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/java/src/org/webrtc/ContextUtils.java b/rtc_base/java/src/org/webrtc/ContextUtils.java
new file mode 100644
index 0000000..f0e6ef0
--- /dev/null
+++ b/rtc_base/java/src/org/webrtc/ContextUtils.java
@@ -0,0 +1,46 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc;
+
+import android.content.Context;
+import org.webrtc.Logging;
+
+/**
+ * Class for storing the application context and retrieving it in a static context. Similar to
+ * org.chromium.base.ContextUtils.
+ */
+public class ContextUtils {
+  private static final String TAG = "ContextUtils";
+  private static Context applicationContext;
+
+  /**
+   * Stores the application context that will be returned by getApplicationContext. This is called
+   * by PeerConnectionFactory.initialize. The application context must be set before creating
+   * a PeerConnectionFactory and must not be modified while it is alive.
+   */
+  public static void initialize(Context applicationContext) {
+    if (applicationContext == null) {
+      throw new IllegalArgumentException(
+          "Application context cannot be null for ContextUtils.initialize.");
+    }
+    ContextUtils.applicationContext = applicationContext;
+  }
+
+  /**
+   * Returns the stored application context.
+   *
+   * @deprecated crbug.com/webrtc/8937
+   */
+  @Deprecated
+  public static Context getApplicationContext() {
+    return applicationContext;
+  }
+}
diff --git a/rtc_base/java/src/org/webrtc/Logging.java b/rtc_base/java/src/org/webrtc/Logging.java
new file mode 100644
index 0000000..f143d2f
--- /dev/null
+++ b/rtc_base/java/src/org/webrtc/Logging.java
@@ -0,0 +1,158 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.EnumSet;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Java wrapper for WebRTC logging. Logging defaults to java.util.logging.Logger, but will switch to
+ * native logging (rtc::LogMessage) if one of the following static functions are called from the
+ * app:
+ * - Logging.enableLogThreads
+ * - Logging.enableLogTimeStamps
+ * - Logging.enableLogToDebugOutput
+ *
+ * Using these APIs requires that the native library is loaded, using
+ * PeerConnectionFactory.initialize.
+ */
+public class Logging {
+  private static final Logger fallbackLogger = createFallbackLogger();
+  private static volatile boolean loggingEnabled;
+
+  private static Logger createFallbackLogger() {
+    final Logger fallbackLogger = Logger.getLogger("org.webrtc.Logging");
+    fallbackLogger.setLevel(Level.ALL);
+    return fallbackLogger;
+  }
+
+  // TODO(solenberg): Remove once dependent projects updated.
+  @Deprecated
+  public enum TraceLevel {
+    TRACE_NONE(0x0000),
+    TRACE_STATEINFO(0x0001),
+    TRACE_WARNING(0x0002),
+    TRACE_ERROR(0x0004),
+    TRACE_CRITICAL(0x0008),
+    TRACE_APICALL(0x0010),
+    TRACE_DEFAULT(0x00ff),
+    TRACE_MODULECALL(0x0020),
+    TRACE_MEMORY(0x0100),
+    TRACE_TIMER(0x0200),
+    TRACE_STREAM(0x0400),
+    TRACE_DEBUG(0x0800),
+    TRACE_INFO(0x1000),
+    TRACE_TERSEINFO(0x2000),
+    TRACE_ALL(0xffff);
+
+    public final int level;
+    TraceLevel(int level) {
+      this.level = level;
+    }
+  }
+
+  // Keep in sync with webrtc/rtc_base/logging.h:LoggingSeverity.
+  public enum Severity { LS_SENSITIVE, LS_VERBOSE, LS_INFO, LS_WARNING, LS_ERROR, LS_NONE }
+
+  public static void enableLogThreads() {
+    nativeEnableLogThreads();
+  }
+
+  public static void enableLogTimeStamps() {
+    nativeEnableLogTimeStamps();
+  }
+
+  // TODO(solenberg): Remove once dependent projects updated.
+  @Deprecated
+  public static void enableTracing(String path, EnumSet<TraceLevel> levels) {}
+
+  // Enable diagnostic logging for messages of |severity| to the platform debug
+  // output. On Android, the output will be directed to Logcat.
+  // Note: this function starts collecting the output of the RTC_LOG() macros.
+  // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression.
+  @SuppressWarnings("NoSynchronizedMethodCheck")
+  public static synchronized void enableLogToDebugOutput(Severity severity) {
+    nativeEnableLogToDebugOutput(severity.ordinal());
+    loggingEnabled = true;
+  }
+
+  public static void log(Severity severity, String tag, String message) {
+    if (loggingEnabled) {
+      nativeLog(severity.ordinal(), tag, message);
+      return;
+    }
+
+    // Fallback to system log.
+    Level level;
+    switch (severity) {
+      case LS_ERROR:
+        level = Level.SEVERE;
+        break;
+      case LS_WARNING:
+        level = Level.WARNING;
+        break;
+      case LS_INFO:
+        level = Level.INFO;
+        break;
+      default:
+        level = Level.FINE;
+        break;
+    }
+    fallbackLogger.log(level, tag + ": " + message);
+  }
+
+  public static void d(String tag, String message) {
+    log(Severity.LS_INFO, tag, message);
+  }
+
+  public static void e(String tag, String message) {
+    log(Severity.LS_ERROR, tag, message);
+  }
+
+  public static void w(String tag, String message) {
+    log(Severity.LS_WARNING, tag, message);
+  }
+
+  public static void e(String tag, String message, Throwable e) {
+    log(Severity.LS_ERROR, tag, message);
+    log(Severity.LS_ERROR, tag, e.toString());
+    log(Severity.LS_ERROR, tag, getStackTraceString(e));
+  }
+
+  public static void w(String tag, String message, Throwable e) {
+    log(Severity.LS_WARNING, tag, message);
+    log(Severity.LS_WARNING, tag, e.toString());
+    log(Severity.LS_WARNING, tag, getStackTraceString(e));
+  }
+
+  public static void v(String tag, String message) {
+    log(Severity.LS_VERBOSE, tag, message);
+  }
+
+  private static String getStackTraceString(Throwable e) {
+    if (e == null) {
+      return "";
+    }
+
+    StringWriter sw = new StringWriter();
+    PrintWriter pw = new PrintWriter(sw);
+    e.printStackTrace(pw);
+    return sw.toString();
+  }
+
+  private static native void nativeEnableLogToDebugOutput(int nativeSeverity);
+  private static native void nativeEnableLogThreads();
+  private static native void nativeEnableLogTimeStamps();
+  private static native void nativeLog(int severity, String tag, String message);
+}
diff --git a/rtc_base/java/src/org/webrtc/OWNERS b/rtc_base/java/src/org/webrtc/OWNERS
new file mode 100644
index 0000000..299e8b2
--- /dev/null
+++ b/rtc_base/java/src/org/webrtc/OWNERS
@@ -0,0 +1,2 @@
+magjed@webrtc.org
+sakal@webrtc.org
diff --git a/rtc_base/java/src/org/webrtc/Size.java b/rtc_base/java/src/org/webrtc/Size.java
new file mode 100644
index 0000000..a711b5d
--- /dev/null
+++ b/rtc_base/java/src/org/webrtc/Size.java
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc;
+
+/**
+ * Class for representing size of an object. Very similar to android.util.Size but available on all
+ * devices.
+ */
+public class Size {
+  public int width;
+  public int height;
+
+  public Size(int width, int height) {
+    this.width = width;
+    this.height = height;
+  }
+
+  @Override
+  public String toString() {
+    return width + "x" + height;
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof Size)) {
+      return false;
+    }
+    final Size otherSize = (Size) other;
+    return width == otherSize.width && height == otherSize.height;
+  }
+
+  @Override
+  public int hashCode() {
+    // Use prime close to 2^16 to avoid collisions for normal values less than 2^16.
+    return 1 + 65537 * width + height;
+  }
+}
diff --git a/rtc_base/java/src/org/webrtc/ThreadUtils.java b/rtc_base/java/src/org/webrtc/ThreadUtils.java
new file mode 100644
index 0000000..3cc80d3
--- /dev/null
+++ b/rtc_base/java/src/org/webrtc/ThreadUtils.java
@@ -0,0 +1,211 @@
+/*
+ *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc;
+
+import android.os.Handler;
+import android.os.Looper;
+import android.os.SystemClock;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+public class ThreadUtils {
+  /**
+   * Utility class to be used for checking that a method is called on the correct thread.
+   */
+  public static class ThreadChecker {
+    private Thread thread = Thread.currentThread();
+
+    public void checkIsOnValidThread() {
+      if (thread == null) {
+        thread = Thread.currentThread();
+      }
+      if (Thread.currentThread() != thread) {
+        throw new IllegalStateException("Wrong thread");
+      }
+    }
+
+    public void detachThread() {
+      thread = null;
+    }
+  }
+
+  /**
+   * Throws exception if called from other than main thread.
+   */
+  public static void checkIsOnMainThread() {
+    if (Thread.currentThread() != Looper.getMainLooper().getThread()) {
+      throw new IllegalStateException("Not on main thread!");
+    }
+  }
+
+  /**
+   * Utility interface to be used with executeUninterruptibly() to wait for blocking operations
+   * to complete without getting interrupted..
+   */
+  public interface BlockingOperation { void run() throws InterruptedException; }
+
+  /**
+   * Utility method to make sure a blocking operation is executed to completion without getting
+   * interrupted. This should be used in cases where the operation is waiting for some critical
+   * work, e.g. cleanup, that must complete before returning. If the thread is interrupted during
+   * the blocking operation, this function will re-run the operation until completion, and only then
+   * re-interrupt the thread.
+   */
+  public static void executeUninterruptibly(BlockingOperation operation) {
+    boolean wasInterrupted = false;
+    while (true) {
+      try {
+        operation.run();
+        break;
+      } catch (InterruptedException e) {
+        // Someone is asking us to return early at our convenience. We can't cancel this operation,
+        // but we should preserve the information and pass it along.
+        wasInterrupted = true;
+      }
+    }
+    // Pass interruption information along.
+    if (wasInterrupted) {
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  public static boolean joinUninterruptibly(final Thread thread, long timeoutMs) {
+    final long startTimeMs = SystemClock.elapsedRealtime();
+    long timeRemainingMs = timeoutMs;
+    boolean wasInterrupted = false;
+    while (timeRemainingMs > 0) {
+      try {
+        thread.join(timeRemainingMs);
+        break;
+      } catch (InterruptedException e) {
+        // Someone is asking us to return early at our convenience. We can't cancel this operation,
+        // but we should preserve the information and pass it along.
+        wasInterrupted = true;
+        final long elapsedTimeMs = SystemClock.elapsedRealtime() - startTimeMs;
+        timeRemainingMs = timeoutMs - elapsedTimeMs;
+      }
+    }
+    // Pass interruption information along.
+    if (wasInterrupted) {
+      Thread.currentThread().interrupt();
+    }
+    return !thread.isAlive();
+  }
+
+  public static void joinUninterruptibly(final Thread thread) {
+    executeUninterruptibly(new BlockingOperation() {
+      @Override
+      public void run() throws InterruptedException {
+        thread.join();
+      }
+    });
+  }
+
+  public static void awaitUninterruptibly(final CountDownLatch latch) {
+    executeUninterruptibly(new BlockingOperation() {
+      @Override
+      public void run() throws InterruptedException {
+        latch.await();
+      }
+    });
+  }
+
+  public static boolean awaitUninterruptibly(CountDownLatch barrier, long timeoutMs) {
+    final long startTimeMs = SystemClock.elapsedRealtime();
+    long timeRemainingMs = timeoutMs;
+    boolean wasInterrupted = false;
+    boolean result = false;
+    do {
+      try {
+        result = barrier.await(timeRemainingMs, TimeUnit.MILLISECONDS);
+        break;
+      } catch (InterruptedException e) {
+        // Someone is asking us to return early at our convenience. We can't cancel this operation,
+        // but we should preserve the information and pass it along.
+        wasInterrupted = true;
+        final long elapsedTimeMs = SystemClock.elapsedRealtime() - startTimeMs;
+        timeRemainingMs = timeoutMs - elapsedTimeMs;
+      }
+    } while (timeRemainingMs > 0);
+    // Pass interruption information along.
+    if (wasInterrupted) {
+      Thread.currentThread().interrupt();
+    }
+    return result;
+  }
+
+  /**
+   * Post |callable| to |handler| and wait for the result.
+   */
+  public static <V> V invokeAtFrontUninterruptibly(
+      final Handler handler, final Callable<V> callable) {
+    if (handler.getLooper().getThread() == Thread.currentThread()) {
+      try {
+        return callable.call();
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+    // Place-holder classes that are assignable inside nested class.
+    class CaughtException {
+      Exception e;
+    }
+    class Result {
+      public V value;
+    }
+    final Result result = new Result();
+    final CaughtException caughtException = new CaughtException();
+    final CountDownLatch barrier = new CountDownLatch(1);
+    handler.post(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          result.value = callable.call();
+        } catch (Exception e) {
+          caughtException.e = e;
+        }
+        barrier.countDown();
+      }
+    });
+    awaitUninterruptibly(barrier);
+    // Re-throw any runtime exception caught inside the other thread. Since this is an invoke, add
+    // stack trace for the waiting thread as well.
+    if (caughtException.e != null) {
+      final RuntimeException runtimeException = new RuntimeException(caughtException.e);
+      runtimeException.setStackTrace(
+          concatStackTraces(caughtException.e.getStackTrace(), runtimeException.getStackTrace()));
+      throw runtimeException;
+    }
+    return result.value;
+  }
+
+  /**
+   * Post |runner| to |handler|, at the front, and wait for completion.
+   */
+  public static void invokeAtFrontUninterruptibly(final Handler handler, final Runnable runner) {
+    invokeAtFrontUninterruptibly(handler, new Callable<Void>() {
+      @Override
+      public Void call() {
+        runner.run();
+        return null;
+      }
+    });
+  }
+
+  static StackTraceElement[] concatStackTraces(
+      StackTraceElement[] inner, StackTraceElement[] outer) {
+    final StackTraceElement[] combined = new StackTraceElement[inner.length + outer.length];
+    System.arraycopy(inner, 0, combined, 0, inner.length);
+    System.arraycopy(outer, 0, combined, inner.length, outer.length);
+    return combined;
+  }
+}
diff --git a/rtc_base/json.cc b/rtc_base/json.cc
new file mode 100644
index 0000000..b8071a9
--- /dev/null
+++ b/rtc_base/json.cc
@@ -0,0 +1,300 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/json.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <stdlib.h>
+
+#include <sstream>
+
+namespace rtc {
+
+bool GetStringFromJson(const Json::Value& in, std::string* out) {
+  if (!in.isString()) {
+    std::ostringstream s;
+    if (in.isBool()) {
+      s << std::boolalpha << in.asBool();
+    } else if (in.isInt()) {
+      s << in.asInt();
+    } else if (in.isUInt()) {
+      s << in.asUInt();
+    } else if (in.isDouble()) {
+      s << in.asDouble();
+    } else {
+      return false;
+    }
+    *out = s.str();
+  } else {
+    *out = in.asString();
+  }
+  return true;
+}
+
+bool GetIntFromJson(const Json::Value& in, int* out) {
+  bool ret;
+  if (!in.isString()) {
+    ret = in.isConvertibleTo(Json::intValue);
+    if (ret) {
+      *out = in.asInt();
+    }
+  } else {
+    long val;  // NOLINT
+    const char* c_str = in.asCString();
+    char* end_ptr;
+    errno = 0;
+    val = strtol(c_str, &end_ptr, 10);  // NOLINT
+    ret = (end_ptr != c_str && *end_ptr == '\0' && !errno &&
+           val >= INT_MIN && val <= INT_MAX);
+    *out = val;
+  }
+  return ret;
+}
+
+bool GetUIntFromJson(const Json::Value& in, unsigned int* out) {
+  bool ret;
+  if (!in.isString()) {
+    ret = in.isConvertibleTo(Json::uintValue);
+    if (ret) {
+      *out = in.asUInt();
+    }
+  } else {
+    unsigned long val;  // NOLINT
+    const char* c_str = in.asCString();
+    char* end_ptr;
+    errno = 0;
+    val = strtoul(c_str, &end_ptr, 10);  // NOLINT
+    ret = (end_ptr != c_str && *end_ptr == '\0' && !errno &&
+           val <= UINT_MAX);
+    *out = val;
+  }
+  return ret;
+}
+
+bool GetBoolFromJson(const Json::Value& in, bool* out) {
+  bool ret;
+  if (!in.isString()) {
+    ret = in.isConvertibleTo(Json::booleanValue);
+    if (ret) {
+      *out = in.asBool();
+    }
+  } else {
+    if (in.asString() == "true") {
+      *out = true;
+      ret = true;
+    } else if (in.asString() == "false") {
+      *out = false;
+      ret = true;
+    } else {
+      ret = false;
+    }
+  }
+  return ret;
+}
+
+bool GetDoubleFromJson(const Json::Value& in, double* out) {
+  bool ret;
+  if (!in.isString()) {
+    ret = in.isConvertibleTo(Json::realValue);
+    if (ret) {
+      *out = in.asDouble();
+    }
+  } else {
+    double val;
+    const char* c_str = in.asCString();
+    char* end_ptr;
+    errno = 0;
+    val = strtod(c_str, &end_ptr);
+    ret = (end_ptr != c_str && *end_ptr == '\0' && !errno);
+    *out = val;
+  }
+  return ret;
+}
+
+namespace {
+template<typename T>
+bool JsonArrayToVector(const Json::Value& value,
+                       bool (*getter)(const Json::Value& in, T* out),
+                       std::vector<T> *vec) {
+  vec->clear();
+  if (!value.isArray()) {
+    return false;
+  }
+
+  for (Json::Value::ArrayIndex i = 0; i < value.size(); ++i) {
+    T val;
+    if (!getter(value[i], &val)) {
+      return false;
+    }
+    vec->push_back(val);
+  }
+
+  return true;
+}
+// Trivial getter helper
+bool GetValueFromJson(const Json::Value& in, Json::Value* out) {
+  *out = in;
+  return true;
+}
+}  // unnamed namespace
+
+bool JsonArrayToValueVector(const Json::Value& in,
+                            std::vector<Json::Value>* out) {
+  return JsonArrayToVector(in, GetValueFromJson, out);
+}
+
+bool JsonArrayToIntVector(const Json::Value& in,
+                          std::vector<int>* out) {
+  return JsonArrayToVector(in, GetIntFromJson, out);
+}
+
+bool JsonArrayToUIntVector(const Json::Value& in,
+                           std::vector<unsigned int>* out) {
+  return JsonArrayToVector(in, GetUIntFromJson, out);
+}
+
+bool JsonArrayToStringVector(const Json::Value& in,
+                             std::vector<std::string>* out) {
+  return JsonArrayToVector(in, GetStringFromJson, out);
+}
+
+bool JsonArrayToBoolVector(const Json::Value& in,
+                           std::vector<bool>* out) {
+  return JsonArrayToVector(in, GetBoolFromJson, out);
+}
+
+bool JsonArrayToDoubleVector(const Json::Value& in,
+                             std::vector<double>* out) {
+  return JsonArrayToVector(in, GetDoubleFromJson, out);
+}
+
+namespace {
+template<typename T>
+Json::Value VectorToJsonArray(const std::vector<T>& vec) {
+  Json::Value result(Json::arrayValue);
+  for (size_t i = 0; i < vec.size(); ++i) {
+    result.append(Json::Value(vec[i]));
+  }
+  return result;
+}
+}  // unnamed namespace
+
+Json::Value ValueVectorToJsonArray(const std::vector<Json::Value>& in) {
+  return VectorToJsonArray(in);
+}
+
+Json::Value IntVectorToJsonArray(const std::vector<int>& in) {
+  return VectorToJsonArray(in);
+}
+
+Json::Value UIntVectorToJsonArray(const std::vector<unsigned int>& in) {
+  return VectorToJsonArray(in);
+}
+
+Json::Value StringVectorToJsonArray(const std::vector<std::string>& in) {
+  return VectorToJsonArray(in);
+}
+
+Json::Value BoolVectorToJsonArray(const std::vector<bool>& in) {
+  return VectorToJsonArray(in);
+}
+
+Json::Value DoubleVectorToJsonArray(const std::vector<double>& in) {
+  return VectorToJsonArray(in);
+}
+
+bool GetValueFromJsonArray(const Json::Value& in, size_t n,
+                           Json::Value* out) {
+  if (!in.isArray() || !in.isValidIndex(static_cast<int>(n))) {
+    return false;
+  }
+
+  *out = in[static_cast<Json::Value::ArrayIndex>(n)];
+  return true;
+}
+
+bool GetIntFromJsonArray(const Json::Value& in, size_t n,
+                         int* out) {
+  Json::Value x;
+  return GetValueFromJsonArray(in, n, &x) && GetIntFromJson(x, out);
+}
+
+bool GetUIntFromJsonArray(const Json::Value& in, size_t n,
+                          unsigned int* out)  {
+  Json::Value x;
+  return GetValueFromJsonArray(in, n, &x) && GetUIntFromJson(x, out);
+}
+
+bool GetStringFromJsonArray(const Json::Value& in, size_t n,
+                            std::string* out) {
+  Json::Value x;
+  return GetValueFromJsonArray(in, n, &x) && GetStringFromJson(x, out);
+}
+
+bool GetBoolFromJsonArray(const Json::Value& in, size_t n,
+                          bool* out) {
+  Json::Value x;
+  return GetValueFromJsonArray(in, n, &x) && GetBoolFromJson(x, out);
+}
+
+bool GetDoubleFromJsonArray(const Json::Value& in, size_t n,
+                            double* out) {
+  Json::Value x;
+  return GetValueFromJsonArray(in, n, &x) && GetDoubleFromJson(x, out);
+}
+
+bool GetValueFromJsonObject(const Json::Value& in, const std::string& k,
+                            Json::Value* out) {
+  if (!in.isObject() || !in.isMember(k)) {
+    return false;
+  }
+
+  *out = in[k];
+  return true;
+}
+
+bool GetIntFromJsonObject(const Json::Value& in, const std::string& k,
+                          int* out) {
+  Json::Value x;
+  return GetValueFromJsonObject(in, k, &x) && GetIntFromJson(x, out);
+}
+
+bool GetUIntFromJsonObject(const Json::Value& in, const std::string& k,
+                           unsigned int* out)  {
+  Json::Value x;
+  return GetValueFromJsonObject(in, k, &x) && GetUIntFromJson(x, out);
+}
+
+bool GetStringFromJsonObject(const Json::Value& in, const std::string& k,
+                             std::string* out)  {
+  Json::Value x;
+  return GetValueFromJsonObject(in, k, &x) && GetStringFromJson(x, out);
+}
+
+bool GetBoolFromJsonObject(const Json::Value& in, const std::string& k,
+                           bool* out) {
+  Json::Value x;
+  return GetValueFromJsonObject(in, k, &x) && GetBoolFromJson(x, out);
+}
+
+bool GetDoubleFromJsonObject(const Json::Value& in, const std::string& k,
+                             double* out) {
+  Json::Value x;
+  return GetValueFromJsonObject(in, k, &x) && GetDoubleFromJson(x, out);
+}
+
+std::string JsonValueToString(const Json::Value& json) {
+  Json::FastWriter w;
+  std::string value = w.write(json);
+  return value.substr(0, value.size() - 1);  // trim trailing newline
+}
+
+}  // namespace rtc
diff --git a/rtc_base/json.h b/rtc_base/json.h
new file mode 100644
index 0000000..5db8bd6
--- /dev/null
+++ b/rtc_base/json.h
@@ -0,0 +1,91 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_JSON_H_
+#define RTC_BASE_JSON_H_
+
+#include <string>
+#include <vector>
+
+#if !defined(WEBRTC_EXTERNAL_JSON)
+#include "json/json.h"
+#else
+#include "third_party/jsoncpp/json.h"
+#endif
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// JSON Helpers
+///////////////////////////////////////////////////////////////////////////////
+
+// Robust conversion operators, better than the ones in JsonCpp.
+bool GetIntFromJson(const Json::Value& in, int* out);
+bool GetUIntFromJson(const Json::Value& in, unsigned int* out);
+bool GetStringFromJson(const Json::Value& in, std::string* out);
+bool GetBoolFromJson(const Json::Value& in, bool* out);
+bool GetDoubleFromJson(const Json::Value& in, double* out);
+
+// Pull values out of a JSON array.
+bool GetValueFromJsonArray(const Json::Value& in, size_t n,
+                           Json::Value* out);
+bool GetIntFromJsonArray(const Json::Value& in, size_t n,
+                         int* out);
+bool GetUIntFromJsonArray(const Json::Value& in, size_t n,
+                          unsigned int* out);
+bool GetStringFromJsonArray(const Json::Value& in, size_t n,
+                            std::string* out);
+bool GetBoolFromJsonArray(const Json::Value& in, size_t n,
+                          bool* out);
+bool GetDoubleFromJsonArray(const Json::Value& in, size_t n,
+                            double* out);
+
+// Convert json arrays to std::vector
+bool JsonArrayToValueVector(const Json::Value& in,
+                            std::vector<Json::Value>* out);
+bool JsonArrayToIntVector(const Json::Value& in,
+                          std::vector<int>* out);
+bool JsonArrayToUIntVector(const Json::Value& in,
+                           std::vector<unsigned int>* out);
+bool JsonArrayToStringVector(const Json::Value& in,
+                             std::vector<std::string>* out);
+bool JsonArrayToBoolVector(const Json::Value& in,
+                           std::vector<bool>* out);
+bool JsonArrayToDoubleVector(const Json::Value& in,
+                             std::vector<double>* out);
+
+// Convert std::vector to json array
+Json::Value ValueVectorToJsonArray(const std::vector<Json::Value>& in);
+Json::Value IntVectorToJsonArray(const std::vector<int>& in);
+Json::Value UIntVectorToJsonArray(const std::vector<unsigned int>& in);
+Json::Value StringVectorToJsonArray(const std::vector<std::string>& in);
+Json::Value BoolVectorToJsonArray(const std::vector<bool>& in);
+Json::Value DoubleVectorToJsonArray(const std::vector<double>& in);
+
+// Pull values out of a JSON object.
+bool GetValueFromJsonObject(const Json::Value& in, const std::string& k,
+                            Json::Value* out);
+bool GetIntFromJsonObject(const Json::Value& in, const std::string& k,
+                          int* out);
+bool GetUIntFromJsonObject(const Json::Value& in, const std::string& k,
+                           unsigned int* out);
+bool GetStringFromJsonObject(const Json::Value& in, const std::string& k,
+                             std::string* out);
+bool GetBoolFromJsonObject(const Json::Value& in, const std::string& k,
+                           bool* out);
+bool GetDoubleFromJsonObject(const Json::Value& in, const std::string& k,
+                             double* out);
+
+// Writes out a Json value as a string.
+std::string JsonValueToString(const Json::Value& json);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_JSON_H_
diff --git a/rtc_base/json_unittest.cc b/rtc_base/json_unittest.cc
new file mode 100644
index 0000000..17b126a
--- /dev/null
+++ b/rtc_base/json_unittest.cc
@@ -0,0 +1,283 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/json.h"
+
+#include <vector>
+
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+static Json::Value in_s("foo");
+static Json::Value in_sn("99");
+static Json::Value in_si("-99");
+static Json::Value in_sb("true");
+static Json::Value in_sd("1.2");
+static Json::Value in_n(12);
+static Json::Value in_i(-12);
+static Json::Value in_u(34U);
+static Json::Value in_b(true);
+static Json::Value in_d(1.2);
+static Json::Value big_sn("12345678901234567890");
+static Json::Value big_si("-12345678901234567890");
+static Json::Value big_u(0xFFFFFFFF);
+static Json::Value bad_a(Json::arrayValue);
+static Json::Value bad_o(Json::objectValue);
+
+TEST(JsonTest, GetString) {
+  std::string out;
+  EXPECT_TRUE(GetStringFromJson(in_s, &out));
+  EXPECT_EQ("foo", out);
+  EXPECT_TRUE(GetStringFromJson(in_sn, &out));
+  EXPECT_EQ("99", out);
+  EXPECT_TRUE(GetStringFromJson(in_si, &out));
+  EXPECT_EQ("-99", out);
+  EXPECT_TRUE(GetStringFromJson(in_i, &out));
+  EXPECT_EQ("-12", out);
+  EXPECT_TRUE(GetStringFromJson(in_n, &out));
+  EXPECT_EQ("12", out);
+  EXPECT_TRUE(GetStringFromJson(in_u, &out));
+  EXPECT_EQ("34", out);
+  EXPECT_TRUE(GetStringFromJson(in_b, &out));
+  EXPECT_EQ("true", out);
+  // Not supported here yet.
+  EXPECT_FALSE(GetStringFromJson(bad_a, &out));
+  EXPECT_FALSE(GetStringFromJson(bad_o, &out));
+}
+
+TEST(JsonTest, GetInt) {
+  int out;
+  EXPECT_TRUE(GetIntFromJson(in_sn, &out));
+  EXPECT_EQ(99, out);
+  EXPECT_TRUE(GetIntFromJson(in_si, &out));
+  EXPECT_EQ(-99, out);
+  EXPECT_TRUE(GetIntFromJson(in_n, &out));
+  EXPECT_EQ(12, out);
+  EXPECT_TRUE(GetIntFromJson(in_i, &out));
+  EXPECT_EQ(-12, out);
+  EXPECT_TRUE(GetIntFromJson(in_u, &out));
+  EXPECT_EQ(34, out);
+  EXPECT_TRUE(GetIntFromJson(in_b, &out));
+  EXPECT_EQ(1, out);
+  EXPECT_FALSE(GetIntFromJson(in_s, &out));
+  EXPECT_FALSE(GetIntFromJson(big_sn, &out));
+  EXPECT_FALSE(GetIntFromJson(big_si, &out));
+  EXPECT_FALSE(GetIntFromJson(big_u, &out));
+  EXPECT_FALSE(GetIntFromJson(bad_a, &out));
+  EXPECT_FALSE(GetIntFromJson(bad_o, &out));
+}
+
+TEST(JsonTest, GetUInt) {
+  unsigned int out;
+  EXPECT_TRUE(GetUIntFromJson(in_sn, &out));
+  EXPECT_EQ(99U, out);
+  EXPECT_TRUE(GetUIntFromJson(in_n, &out));
+  EXPECT_EQ(12U, out);
+  EXPECT_TRUE(GetUIntFromJson(in_u, &out));
+  EXPECT_EQ(34U, out);
+  EXPECT_TRUE(GetUIntFromJson(in_b, &out));
+  EXPECT_EQ(1U, out);
+  EXPECT_TRUE(GetUIntFromJson(big_u, &out));
+  EXPECT_EQ(0xFFFFFFFFU, out);
+  EXPECT_FALSE(GetUIntFromJson(in_s, &out));
+  // TODO: Fail reading negative strings.
+  // EXPECT_FALSE(GetUIntFromJson(in_si, &out));
+  EXPECT_FALSE(GetUIntFromJson(in_i, &out));
+  EXPECT_FALSE(GetUIntFromJson(big_sn, &out));
+  EXPECT_FALSE(GetUIntFromJson(big_si, &out));
+  EXPECT_FALSE(GetUIntFromJson(bad_a, &out));
+  EXPECT_FALSE(GetUIntFromJson(bad_o, &out));
+}
+
+TEST(JsonTest, GetBool) {
+  bool out;
+  EXPECT_TRUE(GetBoolFromJson(in_sb, &out));
+  EXPECT_EQ(true, out);
+  EXPECT_TRUE(GetBoolFromJson(in_n, &out));
+  EXPECT_EQ(true, out);
+  EXPECT_TRUE(GetBoolFromJson(in_i, &out));
+  EXPECT_EQ(true, out);
+  EXPECT_TRUE(GetBoolFromJson(in_u, &out));
+  EXPECT_EQ(true, out);
+  EXPECT_TRUE(GetBoolFromJson(in_b, &out));
+  EXPECT_EQ(true, out);
+  EXPECT_TRUE(GetBoolFromJson(big_u, &out));
+  EXPECT_EQ(true, out);
+  EXPECT_FALSE(GetBoolFromJson(in_s, &out));
+  EXPECT_FALSE(GetBoolFromJson(in_sn, &out));
+  EXPECT_FALSE(GetBoolFromJson(in_si, &out));
+  EXPECT_FALSE(GetBoolFromJson(big_sn, &out));
+  EXPECT_FALSE(GetBoolFromJson(big_si, &out));
+  EXPECT_FALSE(GetBoolFromJson(bad_a, &out));
+  EXPECT_FALSE(GetBoolFromJson(bad_o, &out));
+}
+
+TEST(JsonTest, GetDouble) {
+  double out;
+  EXPECT_TRUE(GetDoubleFromJson(in_sn, &out));
+  EXPECT_EQ(99, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_si, &out));
+  EXPECT_EQ(-99, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_sd, &out));
+  EXPECT_EQ(1.2, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_n, &out));
+  EXPECT_EQ(12, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_i, &out));
+  EXPECT_EQ(-12, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_u, &out));
+  EXPECT_EQ(34, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_b, &out));
+  EXPECT_EQ(1, out);
+  EXPECT_TRUE(GetDoubleFromJson(in_d, &out));
+  EXPECT_EQ(1.2, out);
+  EXPECT_FALSE(GetDoubleFromJson(in_s, &out));
+}
+
+TEST(JsonTest, GetFromArray) {
+  Json::Value a, out;
+  a.append(in_s);
+  a.append(in_i);
+  a.append(in_u);
+  a.append(in_b);
+  EXPECT_TRUE(GetValueFromJsonArray(a, 0, &out));
+  EXPECT_TRUE(GetValueFromJsonArray(a, 3, &out));
+  EXPECT_FALSE(GetValueFromJsonArray(a, 99, &out));
+  EXPECT_FALSE(GetValueFromJsonArray(a, 0xFFFFFFFF, &out));
+}
+
+TEST(JsonTest, GetFromObject) {
+  Json::Value o, out;
+  o["string"] = in_s;
+  o["int"] = in_i;
+  o["uint"] = in_u;
+  o["bool"] = in_b;
+  EXPECT_TRUE(GetValueFromJsonObject(o, "int", &out));
+  EXPECT_TRUE(GetValueFromJsonObject(o, "bool", &out));
+  EXPECT_FALSE(GetValueFromJsonObject(o, "foo", &out));
+  EXPECT_FALSE(GetValueFromJsonObject(o, "", &out));
+}
+
+namespace {
+template <typename T>
+std::vector<T> VecOf3(const T& a, const T& b, const T& c) {
+  std::vector<T> in;
+  in.push_back(a);
+  in.push_back(b);
+  in.push_back(c);
+  return in;
+}
+template <typename T>
+Json::Value JsonVecOf3(const T& a, const T& b, const T& c) {
+  Json::Value in(Json::arrayValue);
+  in.append(a);
+  in.append(b);
+  in.append(c);
+  return in;
+}
+}  // unnamed namespace
+
+TEST(JsonTest, ValueVectorToFromArray) {
+  std::vector<Json::Value> in = VecOf3<Json::Value>("a", "b", "c");
+  Json::Value out = ValueVectorToJsonArray(in);
+  EXPECT_EQ(in.size(), out.size());
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); ++i) {
+    EXPECT_EQ(in[i].asString(), out[i].asString());
+  }
+  Json::Value inj = JsonVecOf3<Json::Value>("a", "b", "c");
+  EXPECT_EQ(inj, out);
+  std::vector<Json::Value> outj;
+  EXPECT_TRUE(JsonArrayToValueVector(inj, &outj));
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); i++) {
+    EXPECT_EQ(in[i], outj[i]);
+  }
+}
+
+TEST(JsonTest, IntVectorToFromArray) {
+  std::vector<int> in = VecOf3<int>(1, 2, 3);
+  Json::Value out = IntVectorToJsonArray(in);
+  EXPECT_EQ(in.size(), out.size());
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); ++i) {
+    EXPECT_EQ(in[i], out[i].asInt());
+  }
+  Json::Value inj = JsonVecOf3<int>(1, 2, 3);
+  EXPECT_EQ(inj, out);
+  std::vector<int> outj;
+  EXPECT_TRUE(JsonArrayToIntVector(inj, &outj));
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); i++) {
+    EXPECT_EQ(in[i], outj[i]);
+  }
+}
+
+TEST(JsonTest, UIntVectorToFromArray) {
+  std::vector<unsigned int> in = VecOf3<unsigned int>(1, 2, 3);
+  Json::Value out = UIntVectorToJsonArray(in);
+  EXPECT_EQ(in.size(), out.size());
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); ++i) {
+    EXPECT_EQ(in[i], out[i].asUInt());
+  }
+  Json::Value inj = JsonVecOf3<unsigned int>(1, 2, 3);
+  EXPECT_EQ(inj, out);
+  std::vector<unsigned int> outj;
+  EXPECT_TRUE(JsonArrayToUIntVector(inj, &outj));
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); i++) {
+    EXPECT_EQ(in[i], outj[i]);
+  }
+}
+
+TEST(JsonTest, StringVectorToFromArray) {
+  std::vector<std::string> in = VecOf3<std::string>("a", "b", "c");
+  Json::Value out = StringVectorToJsonArray(in);
+  EXPECT_EQ(in.size(), out.size());
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); ++i) {
+    EXPECT_EQ(in[i], out[i].asString());
+  }
+  Json::Value inj = JsonVecOf3<std::string>("a", "b", "c");
+  EXPECT_EQ(inj, out);
+  std::vector<std::string> outj;
+  EXPECT_TRUE(JsonArrayToStringVector(inj, &outj));
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); i++) {
+    EXPECT_EQ(in[i], outj[i]);
+  }
+}
+
+TEST(JsonTest, BoolVectorToFromArray) {
+  std::vector<bool> in = VecOf3<bool>(false, true, false);
+  Json::Value out = BoolVectorToJsonArray(in);
+  EXPECT_EQ(in.size(), out.size());
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); ++i) {
+    EXPECT_EQ(in[i], out[i].asBool());
+  }
+  Json::Value inj = JsonVecOf3<bool>(false, true, false);
+  EXPECT_EQ(inj, out);
+  std::vector<bool> outj;
+  EXPECT_TRUE(JsonArrayToBoolVector(inj, &outj));
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); i++) {
+    EXPECT_EQ(in[i], outj[i]);
+  }
+}
+
+TEST(JsonTest, DoubleVectorToFromArray) {
+  std::vector<double> in = VecOf3<double>(1.0, 2.0, 3.0);
+  Json::Value out = DoubleVectorToJsonArray(in);
+  EXPECT_EQ(in.size(), out.size());
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); ++i) {
+    EXPECT_EQ(in[i], out[i].asDouble());
+  }
+  Json::Value inj = JsonVecOf3<double>(1.0, 2.0, 3.0);
+  EXPECT_EQ(inj, out);
+  std::vector<double> outj;
+  EXPECT_TRUE(JsonArrayToDoubleVector(inj, &outj));
+  for (Json::Value::ArrayIndex i = 0; i < in.size(); i++) {
+    EXPECT_EQ(in[i], outj[i]);
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/keep_ref_until_done.h b/rtc_base/keep_ref_until_done.h
new file mode 100644
index 0000000..979415d
--- /dev/null
+++ b/rtc_base/keep_ref_until_done.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_KEEP_REF_UNTIL_DONE_H_
+#define RTC_BASE_KEEP_REF_UNTIL_DONE_H_
+
+#include "rtc_base/bind.h"
+#include "rtc_base/callback.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace rtc {
+
+namespace impl {
+template <class T>
+static inline void DoNothing(const scoped_refptr<T>& object) {}
+}  // namespace impl
+
+// KeepRefUntilDone keeps a reference to |object| until the returned
+// callback goes out of scope. If the returned callback is copied, the
+// reference will be released when the last callback goes out of scope.
+template <class ObjectT>
+static inline Callback0<void> KeepRefUntilDone(ObjectT* object) {
+  return rtc::Bind(&impl::DoNothing<ObjectT>, scoped_refptr<ObjectT>(object));
+}
+
+template <class ObjectT>
+static inline Callback0<void> KeepRefUntilDone(
+    const scoped_refptr<ObjectT>& object) {
+  return rtc::Bind(&impl::DoNothing<ObjectT>, object);
+}
+
+}  // namespace rtc
+
+
+#endif  // RTC_BASE_KEEP_REF_UNTIL_DONE_H_
diff --git a/rtc_base/location.cc b/rtc_base/location.cc
new file mode 100644
index 0000000..9c90d9e
--- /dev/null
+++ b/rtc_base/location.cc
@@ -0,0 +1,38 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/location.h"
+
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+Location::Location(const char* function_name, const char* file_and_line)
+    : function_name_(function_name), file_and_line_(file_and_line) {}
+
+Location::Location() : function_name_("Unknown"), file_and_line_("Unknown") {}
+
+Location::Location(const Location& other)
+    : function_name_(other.function_name_),
+      file_and_line_(other.file_and_line_) {}
+
+Location& Location::operator=(const Location& other) {
+  function_name_ = other.function_name_;
+  file_and_line_ = other.file_and_line_;
+  return *this;
+}
+
+std::string Location::ToString() const {
+  char buf[256];
+  sprintfn(buf, sizeof(buf), "%s@%s", function_name_, file_and_line_);
+  return buf;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/location.h b/rtc_base/location.h
new file mode 100644
index 0000000..513bc26
--- /dev/null
+++ b/rtc_base/location.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_LOCATION_H_
+#define RTC_BASE_LOCATION_H_
+
+#include <string>
+
+#include "rtc_base/stringize_macros.h"
+
+namespace rtc {
+
+// Location provides basic info where of an object was constructed, or was
+// significantly brought to life.
+// This is a stripped down version of:
+// https://code.google.com/p/chromium/codesearch#chromium/src/base/location.h
+class Location {
+ public:
+  // Constructor should be called with a long-lived char*, such as __FILE__.
+  // It assumes the provided value will persist as a global constant, and it
+  // will not make a copy of it.
+  //
+  // TODO(deadbeef): Tracing is currently limited to 2 arguments, which is
+  // why the file name and line number are combined into one argument.
+  //
+  // Once TracingV2 is available, separate the file name and line number.
+  Location(const char* function_name, const char* file_and_line);
+  Location();
+  Location(const Location& other);
+  Location& operator=(const Location& other);
+
+  const char* function_name() const { return function_name_; }
+  const char* file_and_line() const { return file_and_line_; }
+
+  std::string ToString() const;
+
+ private:
+  const char* function_name_;
+  const char* file_and_line_;
+};
+
+// Define a macro to record the current source location.
+#define RTC_FROM_HERE RTC_FROM_HERE_WITH_FUNCTION(__FUNCTION__)
+
+#define RTC_FROM_HERE_WITH_FUNCTION(function_name) \
+  ::rtc::Location(function_name, __FILE__ ":" STRINGIZE(__LINE__))
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_LOCATION_H_
diff --git a/rtc_base/logging.cc b/rtc_base/logging.cc
new file mode 100644
index 0000000..ec8cd7f
--- /dev/null
+++ b/rtc_base/logging.cc
@@ -0,0 +1,476 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_WIN)
+#if !defined(WIN32_LEAN_AND_MEAN)
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+#if _MSC_VER < 1900
+#define snprintf _snprintf
+#endif
+#undef ERROR  // wingdi.h
+#endif
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include <CoreServices/CoreServices.h>
+#elif defined(WEBRTC_ANDROID)
+#include <android/log.h>
+// Android has a 1024 limit on log inputs. We use 60 chars as an
+// approx for the header/tag portion.
+// See android/system/core/liblog/logd_write.c
+static const int kMaxLogLineSize = 1024 - 60;
+#endif  // WEBRTC_MAC && !defined(WEBRTC_IOS) || WEBRTC_ANDROID
+
+#include <time.h>
+#include <limits.h>
+
+#include <algorithm>
+#include <iomanip>
+#include <ostream>
+#include <vector>
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread_types.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/strings/string_builder.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+namespace {
+// By default, release builds don't log, debug builds at info level
+#if !defined(NDEBUG)
+static LoggingSeverity g_min_sev = LS_INFO;
+static LoggingSeverity g_dbg_sev = LS_INFO;
+#else
+static LoggingSeverity g_min_sev = LS_NONE;
+static LoggingSeverity g_dbg_sev = LS_NONE;
+#endif
+
+// Return the filename portion of the string (that following the last slash).
+const char* FilenameFromPath(const char* file) {
+  const char* end1 = ::strrchr(file, '/');
+  const char* end2 = ::strrchr(file, '\\');
+  if (!end1 && !end2)
+    return file;
+  else
+    return (end1 > end2) ? end1 + 1 : end2 + 1;
+}
+
+std::ostream& GetNoopStream() {
+  class NoopStreamBuf : public std::streambuf {
+   public:
+    int overflow(int c) override { return c; }
+  };
+  static NoopStreamBuf noop_buffer;
+  static std::ostream noop_stream(&noop_buffer);
+  return noop_stream;
+}
+
+// Global lock for log subsystem, only needed to serialize access to streams_.
+CriticalSection g_log_crit;
+}  // namespace
+
+
+/////////////////////////////////////////////////////////////////////////////
+// LogMessage
+/////////////////////////////////////////////////////////////////////////////
+
+bool LogMessage::log_to_stderr_ = true;
+
+// The list of logging streams currently configured.
+// Note: we explicitly do not clean this up, because of the uncertain ordering
+// of destructors at program exit.  Let the person who sets the stream trigger
+// cleanup by setting to null, or let it leak (safe at program exit).
+LogMessage::StreamList LogMessage::streams_ RTC_GUARDED_BY(g_log_crit);
+
+// Boolean options default to false (0)
+bool LogMessage::thread_, LogMessage::timestamp_;
+
+LogMessage::LogMessage(const char* file,
+                       int line,
+                       LoggingSeverity sev,
+                       LogErrorContext err_ctx,
+                       int err)
+    : severity_(sev), is_noop_(IsNoop(sev)) {
+  // If there's no need to do any work, let's not :)
+  if (is_noop_)
+    return;
+
+  if (timestamp_) {
+    // Use SystemTimeMillis so that even if tests use fake clocks, the timestamp
+    // in log messages represents the real system time.
+    int64_t time = TimeDiff(SystemTimeMillis(), LogStartTime());
+    // Also ensure WallClockStartTime is initialized, so that it matches
+    // LogStartTime.
+    WallClockStartTime();
+    print_stream_ << "[" << std::setfill('0') << std::setw(3) << (time / 1000)
+                  << ":" << std::setw(3) << (time % 1000) << std::setfill(' ')
+                  << "] ";
+  }
+
+  if (thread_) {
+    PlatformThreadId id = CurrentThreadId();
+    print_stream_ << "[" << std::dec << id << "] ";
+  }
+
+  if (file != nullptr)
+    print_stream_ << "(" << FilenameFromPath(file)  << ":" << line << "): ";
+
+  if (err_ctx != ERRCTX_NONE) {
+    char tmp_buf[1024];
+    SimpleStringBuilder tmp(tmp_buf);
+    tmp.AppendFormat("[0x%08X]", err);
+    switch (err_ctx) {
+      case ERRCTX_ERRNO:
+        tmp << " " << strerror(err);
+        break;
+#ifdef WEBRTC_WIN
+      case ERRCTX_HRESULT: {
+        char msgbuf[256];
+        DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM |
+                      FORMAT_MESSAGE_IGNORE_INSERTS;
+        if (DWORD len = FormatMessageA(
+                flags, nullptr, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+                msgbuf, sizeof(msgbuf) / sizeof(msgbuf[0]), nullptr)) {
+          while ((len > 0) &&
+              isspace(static_cast<unsigned char>(msgbuf[len-1]))) {
+            msgbuf[--len] = 0;
+          }
+          tmp << " " << msgbuf;
+        }
+        break;
+      }
+#endif  // WEBRTC_WIN
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+      case ERRCTX_OSSTATUS: {
+        std::string desc(DescriptionFromOSStatus(err));
+        tmp << " " << (desc.empty() ? "Unknown error" : desc.c_str());
+        break;
+      }
+#endif  // WEBRTC_MAC && !defined(WEBRTC_IOS)
+      default:
+        break;
+    }
+    extra_ = tmp.str();
+  }
+}
+
+#if defined(WEBRTC_ANDROID)
+LogMessage::LogMessage(const char* file,
+                       int line,
+                       LoggingSeverity sev,
+                       const char* tag)
+    : LogMessage(file,
+                 line,
+                 sev,
+                 ERRCTX_NONE,
+                 0 /* err */) {
+  if (!is_noop_) {
+    tag_ = tag;
+    print_stream_ << tag << ": ";
+  }
+}
+#endif
+
+// DEPRECATED. Currently only used by downstream projects that use
+// implementation details of logging.h. Work is ongoing to remove those
+// dependencies.
+LogMessage::LogMessage(const char* file, int line, LoggingSeverity sev,
+                       const std::string& tag)
+    : LogMessage(file, line, sev) {
+  if (!is_noop_)
+    print_stream_ << tag << ": ";
+}
+
+LogMessage::~LogMessage() {
+  if (is_noop_)
+    return;
+
+  FinishPrintStream();
+
+  // TODO(tommi): Unfortunately |ostringstream::str()| always returns a copy
+  // of the constructed string. This means that we always end up creating
+  // two copies here (one owned by the stream, one by the return value of
+  // |str()|). It would be nice to switch to something else.
+  const std::string str = print_stream_.str();
+
+  if (severity_ >= g_dbg_sev) {
+#if defined(WEBRTC_ANDROID)
+    OutputToDebug(str, severity_, tag_);
+#else
+    OutputToDebug(str, severity_);
+#endif
+  }
+
+  CritScope cs(&g_log_crit);
+  for (auto& kv : streams_) {
+    if (severity_ >= kv.second) {
+      kv.first->OnLogMessage(str);
+    }
+  }
+}
+
+std::ostream& LogMessage::stream() {
+  return is_noop_ ? GetNoopStream() : print_stream_;
+}
+
+bool LogMessage::Loggable(LoggingSeverity sev) {
+  return sev >= g_min_sev;
+}
+
+int LogMessage::GetMinLogSeverity() {
+  return g_min_sev;
+}
+
+LoggingSeverity LogMessage::GetLogToDebug() {
+  return g_dbg_sev;
+}
+int64_t LogMessage::LogStartTime() {
+  static const int64_t g_start = SystemTimeMillis();
+  return g_start;
+}
+
+uint32_t LogMessage::WallClockStartTime() {
+  static const uint32_t g_start_wallclock = time(nullptr);
+  return g_start_wallclock;
+}
+
+void LogMessage::LogThreads(bool on) {
+  thread_ = on;
+}
+
+void LogMessage::LogTimestamps(bool on) {
+  timestamp_ = on;
+}
+
+void LogMessage::LogToDebug(LoggingSeverity min_sev) {
+  g_dbg_sev = min_sev;
+  CritScope cs(&g_log_crit);
+  UpdateMinLogSeverity();
+}
+
+void LogMessage::SetLogToStderr(bool log_to_stderr) {
+  log_to_stderr_ = log_to_stderr;
+}
+
+int LogMessage::GetLogToStream(LogSink* stream) {
+  CritScope cs(&g_log_crit);
+  LoggingSeverity sev = LS_NONE;
+  for (auto& kv : streams_) {
+    if (!stream || stream == kv.first) {
+      sev = std::min(sev, kv.second);
+    }
+  }
+  return sev;
+}
+
+void LogMessage::AddLogToStream(LogSink* stream, LoggingSeverity min_sev) {
+  CritScope cs(&g_log_crit);
+  streams_.push_back(std::make_pair(stream, min_sev));
+  UpdateMinLogSeverity();
+}
+
+void LogMessage::RemoveLogToStream(LogSink* stream) {
+  CritScope cs(&g_log_crit);
+  for (StreamList::iterator it = streams_.begin(); it != streams_.end(); ++it) {
+    if (stream == it->first) {
+      streams_.erase(it);
+      break;
+    }
+  }
+  UpdateMinLogSeverity();
+}
+
+void LogMessage::ConfigureLogging(const char* params) {
+  LoggingSeverity current_level = LS_VERBOSE;
+  LoggingSeverity debug_level = GetLogToDebug();
+
+  std::vector<std::string> tokens;
+  tokenize(params, ' ', &tokens);
+
+  for (const std::string& token : tokens) {
+    if (token.empty())
+      continue;
+
+    // Logging features
+    if (token == "tstamp") {
+      LogTimestamps();
+    } else if (token == "thread") {
+      LogThreads();
+
+    // Logging levels
+    } else if (token == "sensitive") {
+      current_level = LS_SENSITIVE;
+    } else if (token == "verbose") {
+      current_level = LS_VERBOSE;
+    } else if (token == "info") {
+      current_level = LS_INFO;
+    } else if (token == "warning") {
+      current_level = LS_WARNING;
+    } else if (token == "error") {
+      current_level = LS_ERROR;
+    } else if (token == "none") {
+      current_level = LS_NONE;
+
+    // Logging targets
+    } else if (token == "debug") {
+      debug_level = current_level;
+    }
+  }
+
+#if defined(WEBRTC_WIN)
+  if ((LS_NONE != debug_level) && !::IsDebuggerPresent()) {
+    // First, attempt to attach to our parent's console... so if you invoke
+    // from the command line, we'll see the output there.  Otherwise, create
+    // our own console window.
+    // Note: These methods fail if a console already exists, which is fine.
+    if (!AttachConsole(ATTACH_PARENT_PROCESS))
+      ::AllocConsole();
+  }
+#endif  // WEBRTC_WIN
+
+  LogToDebug(debug_level);
+}
+
+void LogMessage::UpdateMinLogSeverity()
+    RTC_EXCLUSIVE_LOCKS_REQUIRED(g_log_crit) {
+  LoggingSeverity min_sev = g_dbg_sev;
+  for (auto& kv : streams_) {
+    min_sev = std::min(g_dbg_sev, kv.second);
+  }
+  g_min_sev = min_sev;
+}
+
+#if defined(WEBRTC_ANDROID)
+void LogMessage::OutputToDebug(const std::string& str,
+                               LoggingSeverity severity,
+                               const char* tag) {
+#else
+void LogMessage::OutputToDebug(const std::string& str,
+                               LoggingSeverity severity) {
+#endif
+  bool log_to_stderr = log_to_stderr_;
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) && defined(NDEBUG)
+  // On the Mac, all stderr output goes to the Console log and causes clutter.
+  // So in opt builds, don't log to stderr unless the user specifically sets
+  // a preference to do so.
+  CFStringRef key = CFStringCreateWithCString(kCFAllocatorDefault,
+                                              "logToStdErr",
+                                              kCFStringEncodingUTF8);
+  CFStringRef domain = CFBundleGetIdentifier(CFBundleGetMainBundle());
+  if (key != nullptr && domain != nullptr) {
+    Boolean exists_and_is_valid;
+    Boolean should_log =
+        CFPreferencesGetAppBooleanValue(key, domain, &exists_and_is_valid);
+    // If the key doesn't exist or is invalid or is false, we will not log to
+    // stderr.
+    log_to_stderr = exists_and_is_valid && should_log;
+  }
+  if (key != nullptr) {
+    CFRelease(key);
+  }
+#endif  // defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) && defined(NDEBUG)
+
+#if defined(WEBRTC_WIN)
+  // Always log to the debugger.
+  // Perhaps stderr should be controlled by a preference, as on Mac?
+  OutputDebugStringA(str.c_str());
+  if (log_to_stderr) {
+    // This handles dynamically allocated consoles, too.
+    if (HANDLE error_handle = ::GetStdHandle(STD_ERROR_HANDLE)) {
+      log_to_stderr = false;
+      DWORD written = 0;
+      ::WriteFile(error_handle, str.data(), static_cast<DWORD>(str.size()),
+                  &written, 0);
+    }
+  }
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_ANDROID)
+  // Android's logging facility uses severity to log messages but we
+  // need to map libjingle's severity levels to Android ones first.
+  // Also write to stderr which maybe available to executable started
+  // from the shell.
+  int prio;
+  switch (severity) {
+    case LS_SENSITIVE:
+      __android_log_write(ANDROID_LOG_INFO, tag, "SENSITIVE");
+      if (log_to_stderr) {
+        fprintf(stderr, "SENSITIVE");
+        fflush(stderr);
+      }
+      return;
+    case LS_VERBOSE:
+      prio = ANDROID_LOG_VERBOSE;
+      break;
+    case LS_INFO:
+      prio = ANDROID_LOG_INFO;
+      break;
+    case LS_WARNING:
+      prio = ANDROID_LOG_WARN;
+      break;
+    case LS_ERROR:
+      prio = ANDROID_LOG_ERROR;
+      break;
+    default:
+      prio = ANDROID_LOG_UNKNOWN;
+  }
+
+  int size = str.size();
+  int line = 0;
+  int idx = 0;
+  const int max_lines = size / kMaxLogLineSize + 1;
+  if (max_lines == 1) {
+    __android_log_print(prio, tag, "%.*s", size, str.c_str());
+  } else {
+    while (size > 0) {
+      const int len = std::min(size, kMaxLogLineSize);
+      // Use the size of the string in the format (str may have \0 in the
+      // middle).
+      __android_log_print(prio, tag, "[%d/%d] %.*s", line + 1, max_lines, len,
+                          str.c_str() + idx);
+      idx += len;
+      size -= len;
+      ++line;
+    }
+  }
+#endif  // WEBRTC_ANDROID
+  if (log_to_stderr) {
+    fprintf(stderr, "%s", str.c_str());
+    fflush(stderr);
+  }
+}
+
+// static
+bool LogMessage::IsNoop(LoggingSeverity severity) {
+  if (severity >= g_dbg_sev)
+    return false;
+
+  // TODO(tommi): We're grabbing this lock for every LogMessage instance that
+  // is going to be logged. This introduces unnecessary synchronization for
+  // a feature that's mostly used for testing.
+  CritScope cs(&g_log_crit);
+  return streams_.size() == 0;
+}
+
+void LogMessage::FinishPrintStream() {
+  if (is_noop_)
+    return;
+  if (!extra_.empty())
+    print_stream_ << " : " << extra_;
+  print_stream_ << std::endl;
+}
+
+//////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
diff --git a/rtc_base/logging.h b/rtc_base/logging.h
new file mode 100644
index 0000000..efaadc5
--- /dev/null
+++ b/rtc_base/logging.h
@@ -0,0 +1,368 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// RTC_LOG(...) an ostream target that can be used to send formatted
+// output to a variety of logging targets, such as debugger console, stderr,
+// or any LogSink.
+// The severity level passed as the first argument to the logging
+// functions is used as a filter, to limit the verbosity of the logging.
+// Static members of LogMessage documented below are used to control the
+// verbosity and target of the output.
+// There are several variations on the RTC_LOG macro which facilitate logging
+// of common error conditions, detailed below.
+
+// RTC_LOG(sev) logs the given stream at severity "sev", which must be a
+//     compile-time constant of the LoggingSeverity type, without the namespace
+//     prefix.
+// RTC_LOG_V(sev) Like RTC_LOG(), but sev is a run-time variable of the
+//     LoggingSeverity type (basically, it just doesn't prepend the namespace).
+// RTC_LOG_F(sev) Like RTC_LOG(), but includes the name of the current function.
+// RTC_LOG_T(sev) Like RTC_LOG(), but includes the this pointer.
+// RTC_LOG_T_F(sev) Like RTC_LOG_F(), but includes the this pointer.
+// RTC_LOG_GLE(sev [, mod]) attempt to add a string description of the
+//     HRESULT returned by GetLastError.
+// RTC_LOG_ERRNO(sev) attempts to add a string description of an errno-derived
+//     error. errno and associated facilities exist on both Windows and POSIX,
+//     but on Windows they only apply to the C/C++ runtime.
+// RTC_LOG_ERR(sev) is an alias for the platform's normal error system, i.e.
+//     _GLE on Windows and _ERRNO on POSIX.
+// (The above three also all have _EX versions that let you specify the error
+// code, rather than using the last one.)
+// RTC_LOG_E(sev, ctx, err, ...) logs a detailed error interpreted using the
+//     specified context.
+// RTC_LOG_CHECK_LEVEL(sev) (and RTC_LOG_CHECK_LEVEL_V(sev)) can be used as a
+//     test before performing expensive or sensitive operations whose sole
+//     purpose is to output logging data at the desired level.
+
+#ifndef RTC_BASE_LOGGING_H_
+#define RTC_BASE_LOGGING_H_
+
+#include <errno.h>
+
+#include <list>
+#include <sstream>
+#include <string>
+#include <utility>
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include <CoreServices/CoreServices.h>
+#endif
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/deprecation.h"
+#include "rtc_base/thread_annotations.h"
+
+#if !defined(NDEBUG) || defined(DLOG_ALWAYS_ON)
+#define RTC_DLOG_IS_ON 1
+#else
+#define RTC_DLOG_IS_ON 0
+#endif
+
+namespace rtc {
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+// Returns a UTF8 description from an OS X Status error.
+std::string DescriptionFromOSStatus(OSStatus err);
+#endif
+
+//////////////////////////////////////////////////////////////////////
+
+// Note that the non-standard LoggingSeverity aliases exist because they are
+// still in broad use.  The meanings of the levels are:
+//  LS_SENSITIVE: Information which should only be logged with the consent
+//   of the user, due to privacy concerns.
+//  LS_VERBOSE: This level is for data which we do not want to appear in the
+//   normal debug log, but should appear in diagnostic logs.
+//  LS_INFO: Chatty level used in debugging for all sorts of things, the default
+//   in debug builds.
+//  LS_WARNING: Something that may warrant investigation.
+//  LS_ERROR: Something that should not have occurred.
+//  LS_NONE: Don't log.
+enum LoggingSeverity {
+  LS_SENSITIVE,
+  LS_VERBOSE,
+  LS_INFO,
+  LS_WARNING,
+  LS_ERROR,
+  LS_NONE,
+  INFO = LS_INFO,
+  WARNING = LS_WARNING,
+  LERROR = LS_ERROR
+};
+
+// LogErrorContext assists in interpreting the meaning of an error value.
+enum LogErrorContext {
+  ERRCTX_NONE,
+  ERRCTX_ERRNO,     // System-local errno
+  ERRCTX_HRESULT,   // Windows HRESULT
+  ERRCTX_OSSTATUS,  // MacOS OSStatus
+
+  // Abbreviations for LOG_E macro
+  ERRCTX_EN = ERRCTX_ERRNO,     // LOG_E(sev, EN, x)
+  ERRCTX_HR = ERRCTX_HRESULT,   // LOG_E(sev, HR, x)
+  ERRCTX_OS = ERRCTX_OSSTATUS,  // LOG_E(sev, OS, x)
+};
+
+// Virtual sink interface that can receive log messages.
+class LogSink {
+ public:
+  LogSink() {}
+  virtual ~LogSink() {}
+  virtual void OnLogMessage(const std::string& message) = 0;
+};
+
+class LogMessage {
+ public:
+  LogMessage(const char* file,
+             int line,
+             LoggingSeverity sev,
+             LogErrorContext err_ctx = ERRCTX_NONE,
+             int err = 0);
+
+#if defined(WEBRTC_ANDROID)
+  LogMessage(const char* file, int line, LoggingSeverity sev, const char* tag);
+#endif
+
+  // DEPRECATED - DO NOT USE - PLEASE USE THE MACROS INSTEAD OF THE CLASS.
+  // Android code should use the 'const char*' version since tags are static
+  // and we want to avoid allocating a std::string copy per log line.
+  RTC_DEPRECATED
+  LogMessage(const char* file, int line, LoggingSeverity sev,
+             const std::string& tag);
+
+  ~LogMessage();
+
+  static bool Loggable(LoggingSeverity sev);
+  std::ostream& stream();
+
+  // Returns the time at which this function was called for the first time.
+  // The time will be used as the logging start time.
+  // If this is not called externally, the LogMessage ctor also calls it, in
+  // which case the logging start time will be the time of the first LogMessage
+  // instance is created.
+  static int64_t LogStartTime();
+
+  // Returns the wall clock equivalent of |LogStartTime|, in seconds from the
+  // epoch.
+  static uint32_t WallClockStartTime();
+
+  //  LogThreads: Display the thread identifier of the current thread
+  static void LogThreads(bool on = true);
+
+  //  LogTimestamps: Display the elapsed time of the program
+  static void LogTimestamps(bool on = true);
+
+  // These are the available logging channels
+  //  Debug: Debug console on Windows, otherwise stderr
+  static void LogToDebug(LoggingSeverity min_sev);
+  static LoggingSeverity GetLogToDebug();
+
+  // Sets whether logs will be directed to stderr in debug mode.
+  static void SetLogToStderr(bool log_to_stderr);
+
+  //  Stream: Any non-blocking stream interface.  LogMessage takes ownership of
+  //   the stream. Multiple streams may be specified by using AddLogToStream.
+  //   LogToStream is retained for backwards compatibility; when invoked, it
+  //   will discard any previously set streams and install the specified stream.
+  //   GetLogToStream gets the severity for the specified stream, of if none
+  //   is specified, the minimum stream severity.
+  //   RemoveLogToStream removes the specified stream, without destroying it.
+  static int GetLogToStream(LogSink* stream = nullptr);
+  static void AddLogToStream(LogSink* stream, LoggingSeverity min_sev);
+  static void RemoveLogToStream(LogSink* stream);
+
+  // Testing against MinLogSeverity allows code to avoid potentially expensive
+  // logging operations by pre-checking the logging level.
+  static int GetMinLogSeverity();
+
+  // Parses the provided parameter stream to configure the options above.
+  // Useful for configuring logging from the command line.
+  static void ConfigureLogging(const char* params);
+
+ private:
+  friend class LogMessageForTesting;
+  typedef std::pair<LogSink*, LoggingSeverity> StreamAndSeverity;
+  typedef std::list<StreamAndSeverity> StreamList;
+
+  // Updates min_sev_ appropriately when debug sinks change.
+  static void UpdateMinLogSeverity();
+
+  // These write out the actual log messages.
+#if defined(WEBRTC_ANDROID)
+  static void OutputToDebug(const std::string& msg,
+                            LoggingSeverity severity,
+                            const char* tag);
+#else
+  static void OutputToDebug(const std::string& msg, LoggingSeverity severity);
+#endif
+
+  // Checks the current global debug severity and if the |streams_| collection
+  // is empty. If |severity| is smaller than the global severity and if the
+  // |streams_| collection is empty, the LogMessage will be considered a noop
+  // LogMessage.
+  static bool IsNoop(LoggingSeverity severity);
+
+  // Called from the dtor (or from a test) to append optional extra error
+  // information to the log stream and a newline character.
+  void FinishPrintStream();
+
+  // The ostream that buffers the formatted message before output
+  std::ostringstream print_stream_;
+
+  // The severity level of this message
+  LoggingSeverity severity_;
+
+#if defined(WEBRTC_ANDROID)
+  // The Android debug output tag.
+  const char* tag_ = "libjingle";
+#endif
+
+  // String data generated in the constructor, that should be appended to
+  // the message before output.
+  std::string extra_;
+
+  const bool is_noop_;
+
+  // The output streams and their associated severities
+  static StreamList streams_;
+
+  // Flags for formatting options
+  static bool thread_, timestamp_;
+
+  // Determines if logs will be directed to stderr in debug mode.
+  static bool log_to_stderr_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(LogMessage);
+};
+
+//////////////////////////////////////////////////////////////////////
+// Logging Helpers
+//////////////////////////////////////////////////////////////////////
+
+// The following non-obvious technique for implementation of a
+// conditional log stream was stolen from google3/base/logging.h.
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros.  This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class LogMessageVoidify {
+ public:
+  LogMessageVoidify() { }
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(std::ostream&) { }
+};
+
+#define RTC_LOG_SEVERITY_PRECONDITION(sev) \
+  !(rtc::LogMessage::Loggable(sev)) \
+    ? (void) 0 \
+    : rtc::LogMessageVoidify() &
+
+#define RTC_LOG(sev) \
+  RTC_LOG_SEVERITY_PRECONDITION(rtc::sev) \
+    rtc::LogMessage(__FILE__, __LINE__, rtc::sev).stream()
+
+// The _V version is for when a variable is passed in.  It doesn't do the
+// namespace concatenation.
+#define RTC_LOG_V(sev) \
+  RTC_LOG_SEVERITY_PRECONDITION(sev) \
+    rtc::LogMessage(__FILE__, __LINE__, sev).stream()
+
+// The _F version prefixes the message with the current function name.
+#if (defined(__GNUC__) && !defined(NDEBUG)) || defined(WANT_PRETTY_LOG_F)
+#define RTC_LOG_F(sev) RTC_LOG(sev) << __PRETTY_FUNCTION__ << ": "
+#define RTC_LOG_T_F(sev) RTC_LOG(sev) << this << ": " \
+  << __PRETTY_FUNCTION__ << ": "
+#else
+#define RTC_LOG_F(sev) RTC_LOG(sev) << __FUNCTION__ << ": "
+#define RTC_LOG_T_F(sev) RTC_LOG(sev) << this << ": " << __FUNCTION__ << ": "
+#endif
+
+#define RTC_LOG_CHECK_LEVEL(sev) \
+  rtc::LogCheckLevel(rtc::sev)
+#define RTC_LOG_CHECK_LEVEL_V(sev) \
+  rtc::LogCheckLevel(sev)
+
+inline bool LogCheckLevel(LoggingSeverity sev) {
+  return (LogMessage::GetMinLogSeverity() <= sev);
+}
+
+#define RTC_LOG_E(sev, ctx, err, ...) \
+  RTC_LOG_SEVERITY_PRECONDITION(rtc::sev) \
+    rtc::LogMessage(__FILE__, __LINE__, rtc::sev, \
+                    rtc::ERRCTX_ ## ctx, err , ##__VA_ARGS__)   \
+        .stream()
+
+#define RTC_LOG_T(sev) RTC_LOG(sev) << this << ": "
+
+#define RTC_LOG_ERRNO_EX(sev, err) \
+  RTC_LOG_E(sev, ERRNO, err)
+#define RTC_LOG_ERRNO(sev) \
+  RTC_LOG_ERRNO_EX(sev, errno)
+
+#if defined(WEBRTC_WIN)
+#define RTC_LOG_GLE_EX(sev, err) \
+  RTC_LOG_E(sev, HRESULT, err)
+#define RTC_LOG_GLE(sev) \
+  RTC_LOG_GLE_EX(sev, GetLastError())
+#define RTC_LOG_ERR_EX(sev, err) \
+  RTC_LOG_GLE_EX(sev, err)
+#define RTC_LOG_ERR(sev) \
+  RTC_LOG_GLE(sev)
+#elif defined(__native_client__) && __native_client__
+#define RTC_LOG_ERR_EX(sev, err) \
+  RTC_LOG(sev)
+#define RTC_LOG_ERR(sev) \
+  RTC_LOG(sev)
+#elif defined(WEBRTC_POSIX)
+#define RTC_LOG_ERR_EX(sev, err) \
+  RTC_LOG_ERRNO_EX(sev, err)
+#define RTC_LOG_ERR(sev) \
+  RTC_LOG_ERRNO(sev)
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_ANDROID)
+namespace internal {
+// Inline adapters provided for backwards compatibility for downstream projects.
+inline const char* AdaptString(const char* str) { return str; }
+inline const char* AdaptString(const std::string& str) { return str.c_str(); }
+}  // namespace internal
+#define RTC_LOG_TAG(sev, tag)        \
+  RTC_LOG_SEVERITY_PRECONDITION(sev) \
+  rtc::LogMessage(nullptr, 0, sev, rtc::internal::AdaptString(tag)).stream()
+#else
+// DEPRECATED. This macro is only intended for Android.
+#define RTC_LOG_TAG(sev, tag)        \
+  RTC_LOG_SEVERITY_PRECONDITION(sev) \
+  rtc::LogMessage(nullptr, 0, sev).stream()
+#endif
+
+// The RTC_DLOG macros are equivalent to their RTC_LOG counterparts except that
+// they only generate code in debug builds.
+#if RTC_DLOG_IS_ON
+#define RTC_DLOG(sev) RTC_LOG(sev)
+#define RTC_DLOG_V(sev) RTC_LOG_V(sev)
+#define RTC_DLOG_F(sev) RTC_LOG_F(sev)
+#else
+#define RTC_DLOG_EAT_STREAM_PARAMS(sev) \
+  (true ? true : ((void)(sev), true))   \
+      ? static_cast<void>(0)            \
+      : rtc::LogMessageVoidify() &      \
+            rtc::LogMessage(__FILE__, __LINE__, sev).stream()
+#define RTC_DLOG(sev) RTC_DLOG_EAT_STREAM_PARAMS(rtc::sev)
+#define RTC_DLOG_V(sev) RTC_DLOG_EAT_STREAM_PARAMS(sev)
+#define RTC_DLOG_F(sev) RTC_DLOG_EAT_STREAM_PARAMS(rtc::sev)
+#endif
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_LOGGING_H_
diff --git a/rtc_base/logging_mac.mm b/rtc_base/logging_mac.mm
new file mode 100644
index 0000000..378cfbf
--- /dev/null
+++ b/rtc_base/logging_mac.mm
@@ -0,0 +1,22 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/logging.h"
+
+#import <Foundation/Foundation.h>
+
+
+namespace rtc {
+std::string DescriptionFromOSStatus(OSStatus err) {
+  NSError* error =
+      [NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil];
+  return error.description.UTF8String;
+}
+}  // namespace rtc
diff --git a/rtc_base/logging_unittest.cc b/rtc_base/logging_unittest.cc
new file mode 100644
index 0000000..1be0b24
--- /dev/null
+++ b/rtc_base/logging_unittest.cc
@@ -0,0 +1,242 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/logging.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/stream.h"
+#include "test/testsupport/fileutils.h"
+
+namespace rtc {
+
+template <typename Base>
+class LogSinkImpl
+    : public LogSink,
+      public Base {
+ public:
+  LogSinkImpl() {}
+
+  template<typename P>
+  explicit LogSinkImpl(P* p) : Base(p) {}
+
+ private:
+  void OnLogMessage(const std::string& message) override {
+    static_cast<Base*>(this)->WriteAll(
+        message.data(), message.size(), nullptr, nullptr);
+  }
+};
+
+class LogMessageForTesting : public LogMessage {
+ public:
+  LogMessageForTesting(const char* file,
+                       int line,
+                       LoggingSeverity sev,
+                       LogErrorContext err_ctx = ERRCTX_NONE,
+                       int err = 0)
+      : LogMessage(file, line, sev, err_ctx, err) {}
+
+  const std::string& get_extra() const { return extra_; }
+  bool is_noop() const { return is_noop_; }
+
+  // Returns the contents of the internal log stream.
+  // Note that parts of the stream won't (as is) be available until *after* the
+  // dtor of the parent class has run. So, as is, this only represents a
+  // partially built stream.
+  std::string GetPrintStream() {
+    RTC_DCHECK(!is_finished_);
+    is_finished_ = true;
+    FinishPrintStream();
+    std::string ret = print_stream_.str();
+    // Just to make an error even more clear if the stream gets used after this.
+    print_stream_.clear();
+    return ret;
+  }
+
+ private:
+  bool is_finished_ = false;
+};
+
+// Test basic logging operation. We should get the INFO log but not the VERBOSE.
+// We should restore the correct global state at the end.
+TEST(LogTest, SingleStream) {
+  int sev = LogMessage::GetLogToStream(nullptr);
+
+  std::string str;
+  LogSinkImpl<StringStream> stream(&str);
+  LogMessage::AddLogToStream(&stream, LS_INFO);
+  EXPECT_EQ(LS_INFO, LogMessage::GetLogToStream(&stream));
+
+  RTC_LOG(LS_INFO) << "INFO";
+  RTC_LOG(LS_VERBOSE) << "VERBOSE";
+  EXPECT_NE(std::string::npos, str.find("INFO"));
+  EXPECT_EQ(std::string::npos, str.find("VERBOSE"));
+
+  LogMessage::RemoveLogToStream(&stream);
+  EXPECT_EQ(LS_NONE, LogMessage::GetLogToStream(&stream));
+
+  EXPECT_EQ(sev, LogMessage::GetLogToStream(nullptr));
+}
+
+// Test using multiple log streams. The INFO stream should get the INFO message,
+// the VERBOSE stream should get the INFO and the VERBOSE.
+// We should restore the correct global state at the end.
+TEST(LogTest, MultipleStreams) {
+  int sev = LogMessage::GetLogToStream(nullptr);
+
+  std::string str1, str2;
+  LogSinkImpl<StringStream> stream1(&str1), stream2(&str2);
+  LogMessage::AddLogToStream(&stream1, LS_INFO);
+  LogMessage::AddLogToStream(&stream2, LS_VERBOSE);
+  EXPECT_EQ(LS_INFO, LogMessage::GetLogToStream(&stream1));
+  EXPECT_EQ(LS_VERBOSE, LogMessage::GetLogToStream(&stream2));
+
+  RTC_LOG(LS_INFO) << "INFO";
+  RTC_LOG(LS_VERBOSE) << "VERBOSE";
+
+  EXPECT_NE(std::string::npos, str1.find("INFO"));
+  EXPECT_EQ(std::string::npos, str1.find("VERBOSE"));
+  EXPECT_NE(std::string::npos, str2.find("INFO"));
+  EXPECT_NE(std::string::npos, str2.find("VERBOSE"));
+
+  LogMessage::RemoveLogToStream(&stream2);
+  LogMessage::RemoveLogToStream(&stream1);
+  EXPECT_EQ(LS_NONE, LogMessage::GetLogToStream(&stream2));
+  EXPECT_EQ(LS_NONE, LogMessage::GetLogToStream(&stream1));
+
+  EXPECT_EQ(sev, LogMessage::GetLogToStream(nullptr));
+}
+
+class LogThread {
+ public:
+  LogThread() : thread_(&ThreadEntry, this, "LogThread") {}
+  ~LogThread() { thread_.Stop(); }
+
+  void Start() { thread_.Start(); }
+
+ private:
+  void Run() {
+    // LS_SENSITIVE by default to avoid cluttering up any real logging going on.
+    RTC_LOG(LS_SENSITIVE) << "RTC_LOG";
+  }
+
+  static void ThreadEntry(void* p) { static_cast<LogThread*>(p)->Run(); }
+
+  PlatformThread thread_;
+  Event event_{false, false};
+};
+
+// Ensure we don't crash when adding/removing streams while threads are going.
+// We should restore the correct global state at the end.
+// This test also makes sure that the 'noop' stream() singleton object, can be
+// safely used from mutiple threads since the threads log at LS_SENSITIVE
+// (by default 'noop' entries).
+TEST(LogTest, MultipleThreads) {
+  int sev = LogMessage::GetLogToStream(nullptr);
+
+  LogThread thread1, thread2, thread3;
+  thread1.Start();
+  thread2.Start();
+  thread3.Start();
+
+  LogSinkImpl<NullStream> stream1, stream2, stream3;
+  for (int i = 0; i < 1000; ++i) {
+    LogMessage::AddLogToStream(&stream1, LS_INFO);
+    LogMessage::AddLogToStream(&stream2, LS_VERBOSE);
+    LogMessage::AddLogToStream(&stream3, LS_SENSITIVE);
+    LogMessage::RemoveLogToStream(&stream1);
+    LogMessage::RemoveLogToStream(&stream2);
+    LogMessage::RemoveLogToStream(&stream3);
+  }
+
+  EXPECT_EQ(sev, LogMessage::GetLogToStream(nullptr));
+}
+
+
+TEST(LogTest, WallClockStartTime) {
+  uint32_t time = LogMessage::WallClockStartTime();
+  // Expect the time to be in a sensible range, e.g. > 2012-01-01.
+  EXPECT_GT(time, 1325376000u);
+}
+
+TEST(LogTest, CheckExtraErrorField) {
+  LogMessageForTesting log_msg("some/path/myfile.cc", 100, LS_WARNING,
+                               ERRCTX_ERRNO, 0xD);
+  ASSERT_FALSE(log_msg.is_noop());
+  log_msg.stream() << "This gets added at dtor time";
+
+  const std::string& extra = log_msg.get_extra();
+  const size_t length_to_check = arraysize("[0x12345678]") - 1;
+  ASSERT_GE(extra.length(), length_to_check);
+  EXPECT_EQ(std::string("[0x0000000D]"), extra.substr(0, length_to_check));
+}
+
+TEST(LogTest, CheckFilePathParsed) {
+  LogMessageForTesting log_msg("some/path/myfile.cc", 100, LS_INFO);
+  ASSERT_FALSE(log_msg.is_noop());
+  log_msg.stream() << "<- Does this look right?";
+
+  const std::string stream = log_msg.GetPrintStream();
+  EXPECT_NE(std::string::npos, stream.find("(myfile.cc:100)"));
+}
+
+TEST(LogTest, CheckNoopLogEntry) {
+  if (LogMessage::GetLogToDebug() <= LS_SENSITIVE) {
+    printf("CheckNoopLogEntry: skipping. Global severity is being overridden.");
+    return;
+  }
+
+  // Logging at LS_SENSITIVE severity, is by default turned off, so this should
+  // be treated as a noop message.
+  LogMessageForTesting log_msg("some/path/myfile.cc", 100, LS_SENSITIVE);
+  log_msg.stream() << "Should be logged to nowhere.";
+  EXPECT_TRUE(log_msg.is_noop());
+  const std::string stream = log_msg.GetPrintStream();
+  EXPECT_TRUE(stream.empty());
+}
+
+// Test the time required to write 1000 80-character logs to a string.
+TEST(LogTest, Perf) {
+  std::string str;
+  LogSinkImpl<StringStream> stream(&str);
+  LogMessage::AddLogToStream(&stream, LS_SENSITIVE);
+
+  const std::string message(80, 'X');
+  {
+    // Just to be sure that we're not measuring the performance of logging
+    // noop log messages.
+    LogMessageForTesting sanity_check_msg(__FILE__, __LINE__, LS_SENSITIVE);
+    ASSERT_FALSE(sanity_check_msg.is_noop());
+  }
+
+  // We now know how many bytes the logging framework will tag onto every msg.
+  const size_t logging_overhead = str.size();
+  // Reset the stream to 0 size.
+  str.clear();
+  str.reserve(120000);
+  static const int kRepetitions = 1000;
+
+  int64_t start = TimeMillis(), finish;
+  for (int i = 0; i < kRepetitions; ++i) {
+    LogMessageForTesting(__FILE__, __LINE__, LS_SENSITIVE).stream() << message;
+  }
+  finish = TimeMillis();
+
+  LogMessage::RemoveLogToStream(&stream);
+  stream.Close();
+
+  EXPECT_EQ(str.size(), (message.size() + logging_overhead) * kRepetitions);
+  RTC_LOG(LS_INFO) << "Total log time: " << TimeDiff(finish, start) << " ms "
+                   << " total bytes logged: " << str.size();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/logsinks.cc b/rtc_base/logsinks.cc
new file mode 100644
index 0000000..5f64f62
--- /dev/null
+++ b/rtc_base/logsinks.cc
@@ -0,0 +1,64 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/logsinks.h"
+
+#include <iostream>
+#include <string>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+FileRotatingLogSink::FileRotatingLogSink(const std::string& log_dir_path,
+                                         const std::string& log_prefix,
+                                         size_t max_log_size,
+                                         size_t num_log_files)
+    : FileRotatingLogSink(new FileRotatingStream(log_dir_path,
+                                                 log_prefix,
+                                                 max_log_size,
+                                                 num_log_files)) {
+}
+
+FileRotatingLogSink::FileRotatingLogSink(FileRotatingStream* stream)
+    : stream_(stream) {
+  RTC_DCHECK(stream);
+}
+
+FileRotatingLogSink::~FileRotatingLogSink() {
+}
+
+void FileRotatingLogSink::OnLogMessage(const std::string& message) {
+  if (stream_->GetState() != SS_OPEN) {
+    std::cerr << "Init() must be called before adding this sink." << std::endl;
+    return;
+  }
+  stream_->WriteAll(message.c_str(), message.size(), nullptr, nullptr);
+}
+
+bool FileRotatingLogSink::Init() {
+  return stream_->Open();
+}
+
+bool FileRotatingLogSink::DisableBuffering() {
+  return stream_->DisableBuffering();
+}
+
+CallSessionFileRotatingLogSink::CallSessionFileRotatingLogSink(
+    const std::string& log_dir_path,
+    size_t max_total_log_size)
+    : FileRotatingLogSink(
+          new CallSessionFileRotatingStream(log_dir_path, max_total_log_size)) {
+}
+
+CallSessionFileRotatingLogSink::~CallSessionFileRotatingLogSink() {
+}
+
+}  // namespace rtc
diff --git a/rtc_base/logsinks.h b/rtc_base/logsinks.h
new file mode 100644
index 0000000..315ef96
--- /dev/null
+++ b/rtc_base/logsinks.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_LOGSINKS_H_
+#define RTC_BASE_LOGSINKS_H_
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/filerotatingstream.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+// Log sink that uses a FileRotatingStream to write to disk.
+// Init() must be called before adding this sink.
+class FileRotatingLogSink : public LogSink {
+ public:
+  // |num_log_files| must be greater than 1 and |max_log_size| must be greater
+  // than 0.
+  FileRotatingLogSink(const std::string& log_dir_path,
+                      const std::string& log_prefix,
+                      size_t max_log_size,
+                      size_t num_log_files);
+  ~FileRotatingLogSink() override;
+
+  // Writes the message to the current file. It will spill over to the next
+  // file if needed.
+  void OnLogMessage(const std::string& message) override;
+
+  // Deletes any existing files in the directory and creates a new log file.
+  virtual bool Init();
+
+  // Disables buffering on the underlying stream.
+  bool DisableBuffering();
+
+ protected:
+  explicit FileRotatingLogSink(FileRotatingStream* stream);
+
+ private:
+  std::unique_ptr<FileRotatingStream> stream_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(FileRotatingLogSink);
+};
+
+// Log sink that uses a CallSessionFileRotatingStream to write to disk.
+// Init() must be called before adding this sink.
+class CallSessionFileRotatingLogSink : public FileRotatingLogSink {
+ public:
+  CallSessionFileRotatingLogSink(const std::string& log_dir_path,
+                                 size_t max_total_log_size);
+  ~CallSessionFileRotatingLogSink() override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(CallSessionFileRotatingLogSink);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_LOGSINKS_H_
diff --git a/rtc_base/macifaddrs_converter.cc b/rtc_base/macifaddrs_converter.cc
new file mode 100644
index 0000000..254be9b
--- /dev/null
+++ b/rtc_base/macifaddrs_converter.cc
@@ -0,0 +1,282 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/ifaddrs_converter.h"
+#include "rtc_base/logging.h"
+
+#if !defined(WEBRTC_IOS)
+#include <net/if_media.h>
+#include <netinet/in_var.h>
+#else  // WEBRTC_IOS
+#define SCOPE6_ID_MAX 16
+
+struct in6_addrlifetime {
+  time_t ia6t_expire;    /* valid lifetime expiration time */
+  time_t ia6t_preferred; /* preferred lifetime expiration time */
+  u_int32_t ia6t_vltime; /* valid lifetime */
+  u_int32_t ia6t_pltime; /* prefix lifetime */
+};
+
+struct in6_ifstat {
+  u_quad_t ifs6_in_receive;      /* # of total input datagram */
+  u_quad_t ifs6_in_hdrerr;       /* # of datagrams with invalid hdr */
+  u_quad_t ifs6_in_toobig;       /* # of datagrams exceeded MTU */
+  u_quad_t ifs6_in_noroute;      /* # of datagrams with no route */
+  u_quad_t ifs6_in_addrerr;      /* # of datagrams with invalid dst */
+  u_quad_t ifs6_in_protounknown; /* # of datagrams with unknown proto */
+                                 /* NOTE: increment on final dst if */
+  u_quad_t ifs6_in_truncated;    /* # of truncated datagrams */
+  u_quad_t ifs6_in_discard;      /* # of discarded datagrams */
+                                 /* NOTE: fragment timeout is not here */
+  u_quad_t ifs6_in_deliver;      /* # of datagrams delivered to ULP */
+                                 /* NOTE: increment on final dst if */
+  u_quad_t ifs6_out_forward;     /* # of datagrams forwarded */
+                                 /* NOTE: increment on outgoing if */
+  u_quad_t ifs6_out_request;     /* # of outgoing datagrams from ULP */
+                                 /* NOTE: does not include forwrads */
+  u_quad_t ifs6_out_discard;     /* # of discarded datagrams */
+  u_quad_t ifs6_out_fragok;      /* # of datagrams fragmented */
+  u_quad_t ifs6_out_fragfail;    /* # of datagrams failed on fragment */
+  u_quad_t ifs6_out_fragcreat;   /* # of fragment datagrams */
+                                 /* NOTE: this is # after fragment */
+  u_quad_t ifs6_reass_reqd;      /* # of incoming fragmented packets */
+                                 /* NOTE: increment on final dst if */
+  u_quad_t ifs6_reass_ok;        /* # of reassembled packets */
+                                 /* NOTE: this is # after reass */
+                                 /* NOTE: increment on final dst if */
+  u_quad_t ifs6_reass_fail;      /* # of reass failures */
+                                 /* NOTE: may not be packet count */
+                                 /* NOTE: increment on final dst if */
+  u_quad_t ifs6_in_mcast;        /* # of inbound multicast datagrams */
+  u_quad_t ifs6_out_mcast;       /* # of outbound multicast datagrams */
+};
+struct icmp6_ifstat {
+  /*
+   * Input statistics
+   */
+  /* ipv6IfIcmpInMsgs, total # of input messages */
+  u_quad_t ifs6_in_msg;
+  /* ipv6IfIcmpInErrors, # of input error messages */
+  u_quad_t ifs6_in_error;
+  /* ipv6IfIcmpInDestUnreachs, # of input dest unreach errors */
+  u_quad_t ifs6_in_dstunreach;
+  /* ipv6IfIcmpInAdminProhibs, # of input admin. prohibited errs */
+  u_quad_t ifs6_in_adminprohib;
+  /* ipv6IfIcmpInTimeExcds, # of input time exceeded errors */
+  u_quad_t ifs6_in_timeexceed;
+  /* ipv6IfIcmpInParmProblems, # of input parameter problem errors */
+  u_quad_t ifs6_in_paramprob;
+  /* ipv6IfIcmpInPktTooBigs, # of input packet too big errors */
+  u_quad_t ifs6_in_pkttoobig;
+  /* ipv6IfIcmpInEchos, # of input echo requests */
+  u_quad_t ifs6_in_echo;
+  /* ipv6IfIcmpInEchoReplies, # of input echo replies */
+  u_quad_t ifs6_in_echoreply;
+  /* ipv6IfIcmpInRouterSolicits, # of input router solicitations */
+  u_quad_t ifs6_in_routersolicit;
+  /* ipv6IfIcmpInRouterAdvertisements, # of input router advertisements */
+  u_quad_t ifs6_in_routeradvert;
+  /* ipv6IfIcmpInNeighborSolicits, # of input neighbor solicitations */
+  u_quad_t ifs6_in_neighborsolicit;
+  /* ipv6IfIcmpInNeighborAdvertisements, # of input neighbor advs. */
+  u_quad_t ifs6_in_neighboradvert;
+  /* ipv6IfIcmpInRedirects, # of input redirects */
+  u_quad_t ifs6_in_redirect;
+  /* ipv6IfIcmpInGroupMembQueries, # of input MLD queries */
+  u_quad_t ifs6_in_mldquery;
+  /* ipv6IfIcmpInGroupMembResponses, # of input MLD reports */
+  u_quad_t ifs6_in_mldreport;
+  /* ipv6IfIcmpInGroupMembReductions, # of input MLD done */
+  u_quad_t ifs6_in_mlddone;
+
+  /*
+   * Output statistics. We should solve unresolved routing problem...
+   */
+  /* ipv6IfIcmpOutMsgs, total # of output messages */
+  u_quad_t ifs6_out_msg;
+  /* ipv6IfIcmpOutErrors, # of output error messages */
+  u_quad_t ifs6_out_error;
+  /* ipv6IfIcmpOutDestUnreachs, # of output dest unreach errors */
+  u_quad_t ifs6_out_dstunreach;
+  /* ipv6IfIcmpOutAdminProhibs, # of output admin. prohibited errs */
+  u_quad_t ifs6_out_adminprohib;
+  /* ipv6IfIcmpOutTimeExcds, # of output time exceeded errors */
+  u_quad_t ifs6_out_timeexceed;
+  /* ipv6IfIcmpOutParmProblems, # of output parameter problem errors */
+  u_quad_t ifs6_out_paramprob;
+  /* ipv6IfIcmpOutPktTooBigs, # of output packet too big errors */
+  u_quad_t ifs6_out_pkttoobig;
+  /* ipv6IfIcmpOutEchos, # of output echo requests */
+  u_quad_t ifs6_out_echo;
+  /* ipv6IfIcmpOutEchoReplies, # of output echo replies */
+  u_quad_t ifs6_out_echoreply;
+  /* ipv6IfIcmpOutRouterSolicits, # of output router solicitations */
+  u_quad_t ifs6_out_routersolicit;
+  /* ipv6IfIcmpOutRouterAdvertisements, # of output router advs. */
+  u_quad_t ifs6_out_routeradvert;
+  /* ipv6IfIcmpOutNeighborSolicits, # of output neighbor solicitations */
+  u_quad_t ifs6_out_neighborsolicit;
+  /* ipv6IfIcmpOutNeighborAdvertisements, # of output neighbor advs. */
+  u_quad_t ifs6_out_neighboradvert;
+  /* ipv6IfIcmpOutRedirects, # of output redirects */
+  u_quad_t ifs6_out_redirect;
+  /* ipv6IfIcmpOutGroupMembQueries, # of output MLD queries */
+  u_quad_t ifs6_out_mldquery;
+  /* ipv6IfIcmpOutGroupMembResponses, # of output MLD reports */
+  u_quad_t ifs6_out_mldreport;
+  /* ipv6IfIcmpOutGroupMembReductions, # of output MLD done */
+  u_quad_t ifs6_out_mlddone;
+};
+
+struct in6_ifreq {
+  char ifr_name[IFNAMSIZ];
+  union {
+    struct sockaddr_in6 ifru_addr;
+    struct sockaddr_in6 ifru_dstaddr;
+    int ifru_flags;
+    int ifru_flags6;
+    int ifru_metric;
+    int ifru_intval;
+    caddr_t ifru_data;
+    struct in6_addrlifetime ifru_lifetime;
+    struct in6_ifstat ifru_stat;
+    struct icmp6_ifstat ifru_icmp6stat;
+    u_int32_t ifru_scope_id[SCOPE6_ID_MAX];
+  } ifr_ifru;
+};
+
+#define SIOCGIFAFLAG_IN6 _IOWR('i', 73, struct in6_ifreq)
+
+#define IN6_IFF_ANYCAST 0x0001    /* anycast address */
+#define IN6_IFF_TENTATIVE 0x0002  /* tentative address */
+#define IN6_IFF_DUPLICATED 0x0004 /* DAD detected duplicate */
+#define IN6_IFF_DETACHED 0x0008   /* may be detached from the link */
+#define IN6_IFF_DEPRECATED 0x0010 /* deprecated address */
+#define IN6_IFF_TEMPORARY 0x0080  /* temporary (anonymous) address. */
+
+#endif  // WEBRTC_IOS
+
+namespace rtc {
+
+namespace {
+
+class IPv6AttributesGetter {
+ public:
+  IPv6AttributesGetter();
+  virtual ~IPv6AttributesGetter();
+  bool IsInitialized() const;
+  bool GetIPAttributes(const char* ifname,
+                       const sockaddr* sock_addr,
+                       int* native_attributes);
+
+ private:
+  // on MAC or IOS, we have to use ioctl with a socket to query an IPv6
+  // interface's attribute.
+  int ioctl_socket_;
+};
+
+IPv6AttributesGetter::IPv6AttributesGetter()
+    : ioctl_socket_(
+          socket(AF_INET6, SOCK_DGRAM, 0 /* unspecified protocol */)) {
+  RTC_DCHECK_GE(ioctl_socket_, 0);
+}
+
+bool IPv6AttributesGetter::IsInitialized() const {
+  return ioctl_socket_ >= 0;
+}
+
+IPv6AttributesGetter::~IPv6AttributesGetter() {
+  if (!IsInitialized()) {
+    return;
+  }
+  close(ioctl_socket_);
+}
+
+bool IPv6AttributesGetter::GetIPAttributes(const char* ifname,
+                                           const sockaddr* sock_addr,
+                                           int* native_attributes) {
+  if (!IsInitialized()) {
+    return false;
+  }
+
+  struct in6_ifreq ifr = {};
+  strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name) - 1);
+  memcpy(&ifr.ifr_ifru.ifru_addr, sock_addr, sock_addr->sa_len);
+  int rv = ioctl(ioctl_socket_, SIOCGIFAFLAG_IN6, &ifr);
+  if (rv >= 0) {
+    *native_attributes = ifr.ifr_ifru.ifru_flags;
+  } else {
+    RTC_LOG(LS_ERROR) << "ioctl returns " << errno;
+  }
+  return (rv >= 0);
+}
+
+// Converts native IPv6 address attributes to net IPv6 address attributes.  If
+// it returns false, the IP address isn't suitable for one-to-one communications
+// applications and should be ignored.
+bool ConvertNativeToIPAttributes(int native_attributes, int* net_attributes) {
+  // For MacOSX, we disallow addresses with attributes IN6_IFF_ANYCASE,
+  // IN6_IFF_DUPLICATED, IN6_IFF_TENTATIVE, and IN6_IFF_DETACHED as these are
+  // still progressing through duplicated address detection (DAD) or are not
+  // suitable for one-to-one communication applications.
+  if (native_attributes & (IN6_IFF_ANYCAST | IN6_IFF_DUPLICATED |
+                           IN6_IFF_TENTATIVE | IN6_IFF_DETACHED)) {
+    return false;
+  }
+
+  if (native_attributes & IN6_IFF_TEMPORARY) {
+    *net_attributes |= IPV6_ADDRESS_FLAG_TEMPORARY;
+  }
+
+  if (native_attributes & IN6_IFF_DEPRECATED) {
+    *net_attributes |= IPV6_ADDRESS_FLAG_DEPRECATED;
+  }
+
+  return true;
+}
+
+class MacIfAddrsConverter : public IfAddrsConverter {
+ public:
+  MacIfAddrsConverter() : ip_attribute_getter_(new IPv6AttributesGetter()) {}
+  ~MacIfAddrsConverter() override {}
+
+  bool ConvertNativeAttributesToIPAttributes(const struct ifaddrs* interface,
+                                             int* ip_attributes) override {
+    int native_attributes;
+    if (!ip_attribute_getter_->GetIPAttributes(
+            interface->ifa_name, interface->ifa_addr, &native_attributes)) {
+      return false;
+    }
+
+    if (!ConvertNativeToIPAttributes(native_attributes, ip_attributes)) {
+      return false;
+    }
+
+    return true;
+  }
+
+ private:
+  std::unique_ptr<IPv6AttributesGetter> ip_attribute_getter_;
+};
+
+}  // namespace
+
+IfAddrsConverter* CreateIfAddrsConverter() {
+  return new MacIfAddrsConverter();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/macutils.cc b/rtc_base/macutils.cc
new file mode 100644
index 0000000..d3f2919
--- /dev/null
+++ b/rtc_base/macutils.cc
@@ -0,0 +1,49 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstring>
+#include <memory>
+#include <sstream>
+
+#include <sys/utsname.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/macutils.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+bool ToUtf8(const CFStringRef str16, std::string* str8) {
+  if ((nullptr == str16) || (nullptr == str8)) {
+    return false;
+  }
+  size_t maxlen = CFStringGetMaximumSizeForEncoding(CFStringGetLength(str16),
+                                                    kCFStringEncodingUTF8) + 1;
+  std::unique_ptr<char[]> buffer(new char[maxlen]);
+  if (!buffer || !CFStringGetCString(str16, buffer.get(), maxlen,
+                                     kCFStringEncodingUTF8)) {
+    return false;
+  }
+  str8->assign(buffer.get());
+  return true;
+}
+
+bool ToUtf16(const std::string& str8, CFStringRef* str16) {
+  if (nullptr == str16) {
+    return false;
+  }
+  *str16 = CFStringCreateWithBytes(kCFAllocatorDefault,
+                                   reinterpret_cast<const UInt8*>(str8.data()),
+                                   str8.length(), kCFStringEncodingUTF8,
+                                   false);
+  return nullptr != *str16;
+}
+}  // namespace rtc
diff --git a/rtc_base/macutils.h b/rtc_base/macutils.h
new file mode 100644
index 0000000..b22e5f9
--- /dev/null
+++ b/rtc_base/macutils.h
@@ -0,0 +1,22 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_MACUTILS_H_
+#define RTC_BASE_MACUTILS_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <string>
+
+namespace rtc {
+bool ToUtf8(const CFStringRef str16, std::string* str8);
+bool ToUtf16(const std::string& str8, CFStringRef* str16);
+}  // namespace rtc
+
+#endif  // RTC_BASE_MACUTILS_H_
diff --git a/rtc_base/memory_usage.cc b/rtc_base/memory_usage.cc
new file mode 100644
index 0000000..41b27ed
--- /dev/null
+++ b/rtc_base/memory_usage.cc
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/memory_usage.h"
+
+#if defined(WEBRTC_LINUX)
+#include <unistd.h>
+#include <cstdlib>
+#include <cstdio>
+#include <cstring>
+#elif defined(WEBRTC_MAC)
+#include <mach/mach.h>
+#elif defined(WEBRTC_WIN)
+#include <windows.h>
+#include <psapi.h>
+#endif
+
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+int64_t GetProcessResidentSizeBytes() {
+#if defined(WEBRTC_LINUX)
+  FILE* file = fopen("/proc/self/statm", "r");
+  if (file == nullptr) {
+    RTC_LOG(LS_ERROR) << "Failed to open /proc/self/statm";
+    return -1;
+  }
+  int result = -1;
+  if (fscanf(file, "%*s%d", &result) != 1) {
+    fclose(file);
+    RTC_LOG(LS_ERROR) << "Failed to parse /proc/self/statm";
+    return -1;
+  }
+  fclose(file);
+  return static_cast<int64_t>(result) * sysconf(_SC_PAGESIZE);
+#elif defined(WEBRTC_MAC)
+  task_basic_info_64 info;
+  mach_msg_type_number_t info_count = TASK_BASIC_INFO_64_COUNT;
+  if (task_info(mach_task_self(), TASK_BASIC_INFO_64,
+                reinterpret_cast<task_info_t>(&info),
+                &info_count) != KERN_SUCCESS) {
+    RTC_LOG_ERR(LS_ERROR) << "task_info() failed";
+    return -1;
+  }
+  return info.resident_size;
+#elif defined(WEBRTC_WIN)
+  PROCESS_MEMORY_COUNTERS pmc;
+  if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
+    RTC_LOG_ERR(LS_ERROR) << "GetProcessMemoryInfo() failed";
+    return -1;
+  }
+  return pmc.WorkingSetSize;
+#else
+  // Not implemented yet.
+  static_assert(false,
+                "GetProcessVirtualMemoryUsageBytes() platform support not yet "
+                "implemented.");
+#endif
+}
+
+}  // namespace rtc
diff --git a/rtc_base/memory_usage.h b/rtc_base/memory_usage.h
new file mode 100644
index 0000000..c49323c
--- /dev/null
+++ b/rtc_base/memory_usage.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef RTC_BASE_MEMORY_USAGE_H_
+#define RTC_BASE_MEMORY_USAGE_H_
+
+#include <stdint.h>
+
+namespace rtc {
+
+// Returns current memory used by the process in bytes (working set size on
+// Windows and resident set size on other platforms).
+// Returns -1 on failure.
+int64_t GetProcessResidentSizeBytes();
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_MEMORY_USAGE_H_
diff --git a/rtc_base/memory_usage_unittest.cc b/rtc_base/memory_usage_unittest.cc
new file mode 100644
index 0000000..87cdcef
--- /dev/null
+++ b/rtc_base/memory_usage_unittest.cc
@@ -0,0 +1,23 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/memory_usage.h"
+#include <cstdio>
+#include "test/gtest.h"
+
+namespace rtc {
+
+TEST(GetMemoryUsage, SimpleTest) {
+  int64_t used_bytes = GetProcessResidentSizeBytes();
+  EXPECT_GE(used_bytes, 0);
+}
+
+}  // namespace rtc
+
diff --git a/rtc_base/messagedigest.cc b/rtc_base/messagedigest.cc
new file mode 100644
index 0000000..bf8d25d
--- /dev/null
+++ b/rtc_base/messagedigest.cc
@@ -0,0 +1,167 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/messagedigest.h"
+
+#include <memory>
+
+#include <string.h>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/openssldigest.h"
+#include "rtc_base/stringencode.h"
+
+namespace rtc {
+
+// From RFC 4572.
+const char DIGEST_MD5[]     = "md5";
+const char DIGEST_SHA_1[]   = "sha-1";
+const char DIGEST_SHA_224[] = "sha-224";
+const char DIGEST_SHA_256[] = "sha-256";
+const char DIGEST_SHA_384[] = "sha-384";
+const char DIGEST_SHA_512[] = "sha-512";
+
+static const size_t kBlockSize = 64;  // valid for SHA-256 and down
+
+MessageDigest* MessageDigestFactory::Create(const std::string& alg) {
+  MessageDigest* digest = new OpenSSLDigest(alg);
+  if (digest->Size() == 0) {  // invalid algorithm
+    delete digest;
+    digest = nullptr;
+  }
+  return digest;
+}
+
+bool IsFips180DigestAlgorithm(const std::string& alg) {
+  // These are the FIPS 180 algorithms.  According to RFC 4572 Section 5,
+  // "Self-signed certificates (for which legacy certificates are not a
+  // consideration) MUST use one of the FIPS 180 algorithms (SHA-1,
+  // SHA-224, SHA-256, SHA-384, or SHA-512) as their signature algorithm,
+  // and thus also MUST use it to calculate certificate fingerprints."
+  return alg == DIGEST_SHA_1 ||
+         alg == DIGEST_SHA_224 ||
+         alg == DIGEST_SHA_256 ||
+         alg == DIGEST_SHA_384 ||
+         alg == DIGEST_SHA_512;
+}
+
+size_t ComputeDigest(MessageDigest* digest, const void* input, size_t in_len,
+                     void* output, size_t out_len) {
+  digest->Update(input, in_len);
+  return digest->Finish(output, out_len);
+}
+
+size_t ComputeDigest(const std::string& alg, const void* input, size_t in_len,
+                     void* output, size_t out_len) {
+  std::unique_ptr<MessageDigest> digest(MessageDigestFactory::Create(alg));
+  return (digest) ?
+      ComputeDigest(digest.get(), input, in_len, output, out_len) :
+      0;
+}
+
+std::string ComputeDigest(MessageDigest* digest, const std::string& input) {
+  std::unique_ptr<char[]> output(new char[digest->Size()]);
+  ComputeDigest(digest, input.data(), input.size(),
+                output.get(), digest->Size());
+  return hex_encode(output.get(), digest->Size());
+}
+
+bool ComputeDigest(const std::string& alg, const std::string& input,
+                   std::string* output) {
+  std::unique_ptr<MessageDigest> digest(MessageDigestFactory::Create(alg));
+  if (!digest) {
+    return false;
+  }
+  *output = ComputeDigest(digest.get(), input);
+  return true;
+}
+
+std::string ComputeDigest(const std::string& alg, const std::string& input) {
+  std::string output;
+  ComputeDigest(alg, input, &output);
+  return output;
+}
+
+// Compute a RFC 2104 HMAC: H(K XOR opad, H(K XOR ipad, text))
+size_t ComputeHmac(MessageDigest* digest,
+                   const void* key, size_t key_len,
+                   const void* input, size_t in_len,
+                   void* output, size_t out_len) {
+  // We only handle algorithms with a 64-byte blocksize.
+  // TODO: Add BlockSize() method to MessageDigest.
+  size_t block_len = kBlockSize;
+  if (digest->Size() > 32) {
+    return 0;
+  }
+  // Copy the key to a block-sized buffer to simplify padding.
+  // If the key is longer than a block, hash it and use the result instead.
+  std::unique_ptr<uint8_t[]> new_key(new uint8_t[block_len]);
+  if (key_len > block_len) {
+    ComputeDigest(digest, key, key_len, new_key.get(), block_len);
+    memset(new_key.get() + digest->Size(), 0, block_len - digest->Size());
+  } else {
+    memcpy(new_key.get(), key, key_len);
+    memset(new_key.get() + key_len, 0, block_len - key_len);
+  }
+  // Set up the padding from the key, salting appropriately for each padding.
+  std::unique_ptr<uint8_t[]> o_pad(new uint8_t[block_len]);
+  std::unique_ptr<uint8_t[]> i_pad(new uint8_t[block_len]);
+  for (size_t i = 0; i < block_len; ++i) {
+    o_pad[i] = 0x5c ^ new_key[i];
+    i_pad[i] = 0x36 ^ new_key[i];
+  }
+  // Inner hash; hash the inner padding, and then the input buffer.
+  std::unique_ptr<uint8_t[]> inner(new uint8_t[digest->Size()]);
+  digest->Update(i_pad.get(), block_len);
+  digest->Update(input, in_len);
+  digest->Finish(inner.get(), digest->Size());
+  // Outer hash; hash the outer padding, and then the result of the inner hash.
+  digest->Update(o_pad.get(), block_len);
+  digest->Update(inner.get(), digest->Size());
+  return digest->Finish(output, out_len);
+}
+
+size_t ComputeHmac(const std::string& alg, const void* key, size_t key_len,
+                   const void* input, size_t in_len,
+                   void* output, size_t out_len) {
+  std::unique_ptr<MessageDigest> digest(MessageDigestFactory::Create(alg));
+  if (!digest) {
+    return 0;
+  }
+  return ComputeHmac(digest.get(), key, key_len,
+                     input, in_len, output, out_len);
+}
+
+std::string ComputeHmac(MessageDigest* digest, const std::string& key,
+                        const std::string& input) {
+  std::unique_ptr<char[]> output(new char[digest->Size()]);
+  ComputeHmac(digest, key.data(), key.size(),
+              input.data(), input.size(), output.get(), digest->Size());
+  return hex_encode(output.get(), digest->Size());
+}
+
+bool ComputeHmac(const std::string& alg, const std::string& key,
+                 const std::string& input, std::string* output) {
+  std::unique_ptr<MessageDigest> digest(MessageDigestFactory::Create(alg));
+  if (!digest) {
+    return false;
+  }
+  *output = ComputeHmac(digest.get(), key, input);
+  return true;
+}
+
+std::string ComputeHmac(const std::string& alg, const std::string& key,
+                        const std::string& input) {
+  std::string output;
+  ComputeHmac(alg, key, input, &output);
+  return output;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/messagedigest.h b/rtc_base/messagedigest.h
new file mode 100644
index 0000000..f80dd7a
--- /dev/null
+++ b/rtc_base/messagedigest.h
@@ -0,0 +1,109 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_MESSAGEDIGEST_H_
+#define RTC_BASE_MESSAGEDIGEST_H_
+
+#include <string>
+
+namespace rtc {
+
+// Definitions for the digest algorithms.
+extern const char DIGEST_MD5[];
+extern const char DIGEST_SHA_1[];
+extern const char DIGEST_SHA_224[];
+extern const char DIGEST_SHA_256[];
+extern const char DIGEST_SHA_384[];
+extern const char DIGEST_SHA_512[];
+
+// A general class for computing hashes.
+class MessageDigest {
+ public:
+  enum { kMaxSize = 64 };  // Maximum known size (SHA-512)
+  virtual ~MessageDigest() {}
+  // Returns the digest output size (e.g. 16 bytes for MD5).
+  virtual size_t Size() const = 0;
+  // Updates the digest with |len| bytes from |buf|.
+  virtual void Update(const void* buf, size_t len) = 0;
+  // Outputs the digest value to |buf| with length |len|.
+  // Returns the number of bytes written, i.e., Size().
+  virtual size_t Finish(void* buf, size_t len) = 0;
+};
+
+// A factory class for creating digest objects.
+class MessageDigestFactory {
+ public:
+  static MessageDigest* Create(const std::string& alg);
+};
+
+// A whitelist of approved digest algorithms from RFC 4572 (FIPS 180).
+bool IsFips180DigestAlgorithm(const std::string& alg);
+
+// Functions to create hashes.
+
+// Computes the hash of |in_len| bytes of |input|, using the |digest| hash
+// implementation, and outputs the hash to the buffer |output|, which is
+// |out_len| bytes long. Returns the number of bytes written to |output| if
+// successful, or 0 if |out_len| was too small.
+size_t ComputeDigest(MessageDigest* digest, const void* input, size_t in_len,
+                     void* output, size_t out_len);
+// Like the previous function, but creates a digest implementation based on
+// the desired digest name |alg|, e.g. DIGEST_SHA_1. Returns 0 if there is no
+// digest with the given name.
+size_t ComputeDigest(const std::string& alg, const void* input, size_t in_len,
+                     void* output, size_t out_len);
+// Computes the hash of |input| using the |digest| hash implementation, and
+// returns it as a hex-encoded string.
+std::string ComputeDigest(MessageDigest* digest, const std::string& input);
+// Like the previous function, but creates a digest implementation based on
+// the desired digest name |alg|, e.g. DIGEST_SHA_1. Returns empty string if
+// there is no digest with the given name.
+std::string ComputeDigest(const std::string& alg, const std::string& input);
+// Like the previous function, but returns an explicit result code.
+bool ComputeDigest(const std::string& alg, const std::string& input,
+                   std::string* output);
+
+// Shorthand way to compute a hex-encoded hash using MD5.
+inline std::string MD5(const std::string& input) {
+  return ComputeDigest(DIGEST_MD5, input);
+}
+
+// Functions to compute RFC 2104 HMACs.
+
+// Computes the HMAC of |in_len| bytes of |input|, using the |digest| hash
+// implementation and |key_len| bytes of |key| to key the HMAC, and outputs
+// the HMAC to the buffer |output|, which is |out_len| bytes long. Returns the
+// number of bytes written to |output| if successful, or 0 if |out_len| was too
+// small.
+size_t ComputeHmac(MessageDigest* digest, const void* key, size_t key_len,
+                   const void* input, size_t in_len,
+                   void* output, size_t out_len);
+// Like the previous function, but creates a digest implementation based on
+// the desired digest name |alg|, e.g. DIGEST_SHA_1. Returns 0 if there is no
+// digest with the given name.
+size_t ComputeHmac(const std::string& alg, const void* key, size_t key_len,
+                   const void* input, size_t in_len,
+                   void* output, size_t out_len);
+// Computes the HMAC of |input| using the |digest| hash implementation and |key|
+// to key the HMAC, and returns it as a hex-encoded string.
+std::string ComputeHmac(MessageDigest* digest, const std::string& key,
+                        const std::string& input);
+// Like the previous function, but creates a digest implementation based on
+// the desired digest name |alg|, e.g. DIGEST_SHA_1. Returns empty string if
+// there is no digest with the given name.
+std::string ComputeHmac(const std::string& alg, const std::string& key,
+                        const std::string& input);
+// Like the previous function, but returns an explicit result code.
+bool ComputeHmac(const std::string& alg, const std::string& key,
+                 const std::string& input, std::string* output);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_MESSAGEDIGEST_H_
diff --git a/rtc_base/messagedigest_unittest.cc b/rtc_base/messagedigest_unittest.cc
new file mode 100644
index 0000000..4d7c338
--- /dev/null
+++ b/rtc_base/messagedigest_unittest.cc
@@ -0,0 +1,151 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/stringencode.h"
+
+namespace rtc {
+
+// Test vectors from RFC 1321.
+TEST(MessageDigestTest, TestMd5Digest) {
+  // Test the string versions of the APIs.
+  EXPECT_EQ("d41d8cd98f00b204e9800998ecf8427e",
+      ComputeDigest(DIGEST_MD5, ""));
+  EXPECT_EQ("900150983cd24fb0d6963f7d28e17f72",
+      ComputeDigest(DIGEST_MD5, "abc"));
+  EXPECT_EQ("c3fcd3d76192e4007dfb496cca67e13b",
+      ComputeDigest(DIGEST_MD5, "abcdefghijklmnopqrstuvwxyz"));
+
+  // Test the raw buffer versions of the APIs; also check output buffer size.
+  char output[16];
+  EXPECT_EQ(sizeof(output),
+      ComputeDigest(DIGEST_MD5, "abc", 3, output, sizeof(output)));
+  EXPECT_EQ("900150983cd24fb0d6963f7d28e17f72",
+      hex_encode(output, sizeof(output)));
+  EXPECT_EQ(0U,
+      ComputeDigest(DIGEST_MD5, "abc", 3, output, sizeof(output) - 1));
+}
+
+// Test vectors from RFC 3174.
+TEST(MessageDigestTest, TestSha1Digest) {
+  // Test the string versions of the APIs.
+  EXPECT_EQ("da39a3ee5e6b4b0d3255bfef95601890afd80709",
+      ComputeDigest(DIGEST_SHA_1, ""));
+  EXPECT_EQ("a9993e364706816aba3e25717850c26c9cd0d89d",
+      ComputeDigest(DIGEST_SHA_1, "abc"));
+  EXPECT_EQ("84983e441c3bd26ebaae4aa1f95129e5e54670f1",
+      ComputeDigest(DIGEST_SHA_1,
+          "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"));
+
+  // Test the raw buffer versions of the APIs; also check output buffer size.
+  char output[20];
+  EXPECT_EQ(sizeof(output),
+      ComputeDigest(DIGEST_SHA_1, "abc", 3, output, sizeof(output)));
+  EXPECT_EQ("a9993e364706816aba3e25717850c26c9cd0d89d",
+      hex_encode(output, sizeof(output)));
+  EXPECT_EQ(0U,
+      ComputeDigest(DIGEST_SHA_1, "abc", 3, output, sizeof(output) - 1));
+}
+
+// Test that we fail properly if a bad digest algorithm is specified.
+TEST(MessageDigestTest, TestBadDigest) {
+  std::string output;
+  EXPECT_FALSE(ComputeDigest("sha-9000", "abc", &output));
+  EXPECT_EQ("", ComputeDigest("sha-9000", "abc"));
+}
+
+// Test vectors from RFC 2202.
+TEST(MessageDigestTest, TestMd5Hmac) {
+  // Test the string versions of the APIs.
+  EXPECT_EQ("9294727a3638bb1c13f48ef8158bfc9d",
+      ComputeHmac(DIGEST_MD5, std::string(16, '\x0b'), "Hi There"));
+  EXPECT_EQ("750c783e6ab0b503eaa86e310a5db738",
+      ComputeHmac(DIGEST_MD5, "Jefe", "what do ya want for nothing?"));
+  EXPECT_EQ("56be34521d144c88dbb8c733f0e8b3f6",
+      ComputeHmac(DIGEST_MD5, std::string(16, '\xaa'),
+          std::string(50, '\xdd')));
+  EXPECT_EQ("697eaf0aca3a3aea3a75164746ffaa79",
+      ComputeHmac(DIGEST_MD5,
+          "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+          "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19",
+          std::string(50, '\xcd')));
+  EXPECT_EQ("56461ef2342edc00f9bab995690efd4c",
+      ComputeHmac(DIGEST_MD5, std::string(16, '\x0c'),
+          "Test With Truncation"));
+  EXPECT_EQ("6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd",
+      ComputeHmac(DIGEST_MD5, std::string(80, '\xaa'),
+          "Test Using Larger Than Block-Size Key - Hash Key First"));
+  EXPECT_EQ("6f630fad67cda0ee1fb1f562db3aa53e",
+      ComputeHmac(DIGEST_MD5, std::string(80, '\xaa'),
+          "Test Using Larger Than Block-Size Key and Larger "
+          "Than One Block-Size Data"));
+
+  // Test the raw buffer versions of the APIs; also check output buffer size.
+  std::string key(16, '\x0b');
+  std::string input("Hi There");
+  char output[16];
+  EXPECT_EQ(sizeof(output),
+      ComputeHmac(DIGEST_MD5, key.c_str(), key.size(),
+          input.c_str(), input.size(), output, sizeof(output)));
+  EXPECT_EQ("9294727a3638bb1c13f48ef8158bfc9d",
+      hex_encode(output, sizeof(output)));
+  EXPECT_EQ(0U,
+      ComputeHmac(DIGEST_MD5, key.c_str(), key.size(),
+          input.c_str(), input.size(), output, sizeof(output) - 1));
+}
+
+// Test vectors from RFC 2202.
+TEST(MessageDigestTest, TestSha1Hmac) {
+  // Test the string versions of the APIs.
+  EXPECT_EQ("b617318655057264e28bc0b6fb378c8ef146be00",
+      ComputeHmac(DIGEST_SHA_1, std::string(20, '\x0b'), "Hi There"));
+  EXPECT_EQ("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79",
+      ComputeHmac(DIGEST_SHA_1, "Jefe", "what do ya want for nothing?"));
+  EXPECT_EQ("125d7342b9ac11cd91a39af48aa17b4f63f175d3",
+      ComputeHmac(DIGEST_SHA_1, std::string(20, '\xaa'),
+          std::string(50, '\xdd')));
+  EXPECT_EQ("4c9007f4026250c6bc8414f9bf50c86c2d7235da",
+      ComputeHmac(DIGEST_SHA_1,
+          "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+          "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19",
+          std::string(50, '\xcd')));
+  EXPECT_EQ("4c1a03424b55e07fe7f27be1d58bb9324a9a5a04",
+      ComputeHmac(DIGEST_SHA_1, std::string(20, '\x0c'),
+          "Test With Truncation"));
+  EXPECT_EQ("aa4ae5e15272d00e95705637ce8a3b55ed402112",
+      ComputeHmac(DIGEST_SHA_1, std::string(80, '\xaa'),
+          "Test Using Larger Than Block-Size Key - Hash Key First"));
+  EXPECT_EQ("e8e99d0f45237d786d6bbaa7965c7808bbff1a91",
+      ComputeHmac(DIGEST_SHA_1, std::string(80, '\xaa'),
+          "Test Using Larger Than Block-Size Key and Larger "
+          "Than One Block-Size Data"));
+
+  // Test the raw buffer versions of the APIs; also check output buffer size.
+  std::string key(20, '\x0b');
+  std::string input("Hi There");
+  char output[20];
+  EXPECT_EQ(sizeof(output),
+      ComputeHmac(DIGEST_SHA_1, key.c_str(), key.size(),
+          input.c_str(), input.size(), output, sizeof(output)));
+  EXPECT_EQ("b617318655057264e28bc0b6fb378c8ef146be00",
+      hex_encode(output, sizeof(output)));
+  EXPECT_EQ(0U,
+      ComputeHmac(DIGEST_SHA_1, key.c_str(), key.size(),
+          input.c_str(), input.size(), output, sizeof(output) - 1));
+}
+
+TEST(MessageDigestTest, TestBadHmac) {
+  std::string output;
+  EXPECT_FALSE(ComputeHmac("sha-9000", "key", "abc", &output));
+  EXPECT_EQ("", ComputeHmac("sha-9000", "key", "abc"));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/messagehandler.cc b/rtc_base/messagehandler.cc
new file mode 100644
index 0000000..2f580cc
--- /dev/null
+++ b/rtc_base/messagehandler.cc
@@ -0,0 +1,20 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/messagequeue.h"
+
+namespace rtc {
+
+MessageHandler::~MessageHandler() {
+  MessageQueueManager::Clear(this);
+}
+
+} // namespace rtc
diff --git a/rtc_base/messagehandler.h b/rtc_base/messagehandler.h
new file mode 100644
index 0000000..ff953f7
--- /dev/null
+++ b/rtc_base/messagehandler.h
@@ -0,0 +1,75 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_MESSAGEHANDLER_H_
+#define RTC_BASE_MESSAGEHANDLER_H_
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+struct Message;
+
+// Messages get dispatched to a MessageHandler
+
+class MessageHandler {
+ public:
+  virtual ~MessageHandler();
+  virtual void OnMessage(Message* msg) = 0;
+
+ protected:
+  MessageHandler() {}
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(MessageHandler);
+};
+
+// Helper class to facilitate executing a functor on a thread.
+template <class ReturnT, class FunctorT>
+class FunctorMessageHandler : public MessageHandler {
+ public:
+  explicit FunctorMessageHandler(FunctorT&& functor)
+      : functor_(std::forward<FunctorT>(functor)) {}
+  virtual void OnMessage(Message* msg) {
+    result_ = functor_();
+  }
+  const ReturnT& result() const { return result_; }
+
+  // Returns moved result. Should not call result() or MoveResult() again
+  // after this.
+  ReturnT MoveResult() { return std::move(result_); }
+
+ private:
+  FunctorT functor_;
+  ReturnT result_;
+};
+
+// Specialization for ReturnT of void.
+template <class FunctorT>
+class FunctorMessageHandler<void, FunctorT> : public MessageHandler {
+ public:
+  explicit FunctorMessageHandler(const FunctorT& functor)
+      : functor_(functor) {}
+  virtual void OnMessage(Message* msg) {
+    functor_();
+  }
+  void result() const {}
+  void MoveResult() {}
+
+ private:
+  FunctorT functor_;
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_MESSAGEHANDLER_H_
diff --git a/rtc_base/messagequeue.cc b/rtc_base/messagequeue.cc
new file mode 100644
index 0000000..001d3ed
--- /dev/null
+++ b/rtc_base/messagequeue.cc
@@ -0,0 +1,541 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <algorithm>
+
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/trace_event.h"
+
+namespace rtc {
+namespace {
+
+const int kMaxMsgLatency = 150;  // 150 ms
+const int kSlowDispatchLoggingThreshold = 50;  // 50 ms
+
+class RTC_SCOPED_LOCKABLE MarkProcessingCritScope {
+ public:
+  MarkProcessingCritScope(const CriticalSection* cs, size_t* processing)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(cs)
+      : cs_(cs), processing_(processing) {
+    cs_->Enter();
+    *processing_ += 1;
+  }
+
+  ~MarkProcessingCritScope() RTC_UNLOCK_FUNCTION() {
+    *processing_ -= 1;
+    cs_->Leave();
+  }
+
+ private:
+  const CriticalSection* const cs_;
+  size_t* processing_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(MarkProcessingCritScope);
+};
+}  // namespace
+
+//------------------------------------------------------------------
+// MessageQueueManager
+
+MessageQueueManager* MessageQueueManager::instance_ = nullptr;
+
+MessageQueueManager* MessageQueueManager::Instance() {
+  // Note: This is not thread safe, but it is first called before threads are
+  // spawned.
+  if (!instance_)
+    instance_ = new MessageQueueManager;
+  return instance_;
+}
+
+bool MessageQueueManager::IsInitialized() {
+  return instance_ != nullptr;
+}
+
+MessageQueueManager::MessageQueueManager() : processing_(0) {}
+
+MessageQueueManager::~MessageQueueManager() {
+}
+
+void MessageQueueManager::Add(MessageQueue *message_queue) {
+  return Instance()->AddInternal(message_queue);
+}
+void MessageQueueManager::AddInternal(MessageQueue *message_queue) {
+  CritScope cs(&crit_);
+  // Prevent changes while the list of message queues is processed.
+  RTC_DCHECK_EQ(processing_, 0);
+  message_queues_.push_back(message_queue);
+}
+
+void MessageQueueManager::Remove(MessageQueue *message_queue) {
+  // If there isn't a message queue manager instance, then there isn't a queue
+  // to remove.
+  if (!instance_) return;
+  return Instance()->RemoveInternal(message_queue);
+}
+void MessageQueueManager::RemoveInternal(MessageQueue *message_queue) {
+  // If this is the last MessageQueue, destroy the manager as well so that
+  // we don't leak this object at program shutdown. As mentioned above, this is
+  // not thread-safe, but this should only happen at program termination (when
+  // the ThreadManager is destroyed, and threads are no longer active).
+  bool destroy = false;
+  {
+    CritScope cs(&crit_);
+    // Prevent changes while the list of message queues is processed.
+    RTC_DCHECK_EQ(processing_, 0);
+    std::vector<MessageQueue *>::iterator iter;
+    iter = std::find(message_queues_.begin(), message_queues_.end(),
+                     message_queue);
+    if (iter != message_queues_.end()) {
+      message_queues_.erase(iter);
+    }
+    destroy = message_queues_.empty();
+  }
+  if (destroy) {
+    instance_ = nullptr;
+    delete this;
+  }
+}
+
+void MessageQueueManager::Clear(MessageHandler *handler) {
+  // If there isn't a message queue manager instance, then there aren't any
+  // queues to remove this handler from.
+  if (!instance_) return;
+  return Instance()->ClearInternal(handler);
+}
+void MessageQueueManager::ClearInternal(MessageHandler *handler) {
+  // Deleted objects may cause re-entrant calls to ClearInternal. This is
+  // allowed as the list of message queues does not change while queues are
+  // cleared.
+  MarkProcessingCritScope cs(&crit_, &processing_);
+  std::vector<MessageQueue *>::iterator iter;
+  for (MessageQueue* queue : message_queues_) {
+    queue->Clear(handler);
+  }
+}
+
+void MessageQueueManager::ProcessAllMessageQueues() {
+  if (!instance_) {
+    return;
+  }
+  return Instance()->ProcessAllMessageQueuesInternal();
+}
+
+void MessageQueueManager::ProcessAllMessageQueuesInternal() {
+  // This works by posting a delayed message at the current time and waiting
+  // for it to be dispatched on all queues, which will ensure that all messages
+  // that came before it were also dispatched.
+  volatile int queues_not_done = 0;
+
+  // This class is used so that whether the posted message is processed, or the
+  // message queue is simply cleared, queues_not_done gets decremented.
+  class ScopedIncrement : public MessageData {
+   public:
+    ScopedIncrement(volatile int* value) : value_(value) {
+      AtomicOps::Increment(value_);
+    }
+    ~ScopedIncrement() override { AtomicOps::Decrement(value_); }
+
+   private:
+    volatile int* value_;
+  };
+
+  {
+    MarkProcessingCritScope cs(&crit_, &processing_);
+    for (MessageQueue* queue : message_queues_) {
+      if (!queue->IsProcessingMessages()) {
+        // If the queue is not processing messages, it can
+        // be ignored. If we tried to post a message to it, it would be dropped
+        // or ignored.
+        continue;
+      }
+      queue->PostDelayed(RTC_FROM_HERE, 0, nullptr, MQID_DISPOSE,
+                         new ScopedIncrement(&queues_not_done));
+    }
+  }
+  // Note: One of the message queues may have been on this thread, which is why
+  // we can't synchronously wait for queues_not_done to go to 0; we need to
+  // process messages as well.
+  while (AtomicOps::AcquireLoad(&queues_not_done) > 0) {
+    rtc::Thread::Current()->ProcessMessages(0);
+  }
+}
+
+//------------------------------------------------------------------
+// MessageQueue
+MessageQueue::MessageQueue(SocketServer* ss, bool init_queue)
+    : fPeekKeep_(false),
+      dmsgq_next_num_(0),
+      fInitialized_(false),
+      fDestroyed_(false),
+      stop_(0),
+      ss_(ss) {
+  RTC_DCHECK(ss);
+  // Currently, MessageQueue holds a socket server, and is the base class for
+  // Thread.  It seems like it makes more sense for Thread to hold the socket
+  // server, and provide it to the MessageQueue, since the Thread controls
+  // the I/O model, and MQ is agnostic to those details.  Anyway, this causes
+  // messagequeue_unittest to depend on network libraries... yuck.
+  ss_->SetMessageQueue(this);
+  if (init_queue) {
+    DoInit();
+  }
+}
+
+MessageQueue::MessageQueue(std::unique_ptr<SocketServer> ss, bool init_queue)
+    : MessageQueue(ss.get(), init_queue) {
+  own_ss_ = std::move(ss);
+}
+
+MessageQueue::~MessageQueue() {
+  DoDestroy();
+}
+
+void MessageQueue::DoInit() {
+  if (fInitialized_) {
+    return;
+  }
+
+  fInitialized_ = true;
+  MessageQueueManager::Add(this);
+}
+
+void MessageQueue::DoDestroy() {
+  if (fDestroyed_) {
+    return;
+  }
+
+  fDestroyed_ = true;
+  // The signal is done from here to ensure
+  // that it always gets called when the queue
+  // is going away.
+  SignalQueueDestroyed();
+  MessageQueueManager::Remove(this);
+  Clear(nullptr);
+
+  if (ss_) {
+    ss_->SetMessageQueue(nullptr);
+  }
+}
+
+SocketServer* MessageQueue::socketserver() {
+  return ss_;
+}
+
+void MessageQueue::WakeUpSocketServer() {
+  ss_->WakeUp();
+}
+
+void MessageQueue::Quit() {
+  AtomicOps::ReleaseStore(&stop_, 1);
+  WakeUpSocketServer();
+}
+
+bool MessageQueue::IsQuitting() {
+  return AtomicOps::AcquireLoad(&stop_) != 0;
+}
+
+bool MessageQueue::IsProcessingMessages() {
+  return !IsQuitting();
+}
+
+void MessageQueue::Restart() {
+  AtomicOps::ReleaseStore(&stop_, 0);
+}
+
+bool MessageQueue::Peek(Message *pmsg, int cmsWait) {
+  if (fPeekKeep_) {
+    *pmsg = msgPeek_;
+    return true;
+  }
+  if (!Get(pmsg, cmsWait))
+    return false;
+  msgPeek_ = *pmsg;
+  fPeekKeep_ = true;
+  return true;
+}
+
+bool MessageQueue::Get(Message *pmsg, int cmsWait, bool process_io) {
+  // Return and clear peek if present
+  // Always return the peek if it exists so there is Peek/Get symmetry
+
+  if (fPeekKeep_) {
+    *pmsg = msgPeek_;
+    fPeekKeep_ = false;
+    return true;
+  }
+
+  // Get w/wait + timer scan / dispatch + socket / event multiplexer dispatch
+
+  int64_t cmsTotal = cmsWait;
+  int64_t cmsElapsed = 0;
+  int64_t msStart = TimeMillis();
+  int64_t msCurrent = msStart;
+  while (true) {
+    // Check for sent messages
+    ReceiveSends();
+
+    // Check for posted events
+    int64_t cmsDelayNext = kForever;
+    bool first_pass = true;
+    while (true) {
+      // All queue operations need to be locked, but nothing else in this loop
+      // (specifically handling disposed message) can happen inside the crit.
+      // Otherwise, disposed MessageHandlers will cause deadlocks.
+      {
+        CritScope cs(&crit_);
+        // On the first pass, check for delayed messages that have been
+        // triggered and calculate the next trigger time.
+        if (first_pass) {
+          first_pass = false;
+          while (!dmsgq_.empty()) {
+            if (msCurrent < dmsgq_.top().msTrigger_) {
+              cmsDelayNext = TimeDiff(dmsgq_.top().msTrigger_, msCurrent);
+              break;
+            }
+            msgq_.push_back(dmsgq_.top().msg_);
+            dmsgq_.pop();
+          }
+        }
+        // Pull a message off the message queue, if available.
+        if (msgq_.empty()) {
+          break;
+        } else {
+          *pmsg = msgq_.front();
+          msgq_.pop_front();
+        }
+      }  // crit_ is released here.
+
+      // Log a warning for time-sensitive messages that we're late to deliver.
+      if (pmsg->ts_sensitive) {
+        int64_t delay = TimeDiff(msCurrent, pmsg->ts_sensitive);
+        if (delay > 0) {
+          RTC_LOG_F(LS_WARNING)
+              << "id: " << pmsg->message_id
+              << "  delay: " << (delay + kMaxMsgLatency) << "ms";
+        }
+      }
+      // If this was a dispose message, delete it and skip it.
+      if (MQID_DISPOSE == pmsg->message_id) {
+        RTC_DCHECK(nullptr == pmsg->phandler);
+        delete pmsg->pdata;
+        *pmsg = Message();
+        continue;
+      }
+      return true;
+    }
+
+    if (IsQuitting())
+      break;
+
+    // Which is shorter, the delay wait or the asked wait?
+
+    int64_t cmsNext;
+    if (cmsWait == kForever) {
+      cmsNext = cmsDelayNext;
+    } else {
+      cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
+      if ((cmsDelayNext != kForever) && (cmsDelayNext < cmsNext))
+        cmsNext = cmsDelayNext;
+    }
+
+    {
+      // Wait and multiplex in the meantime
+      if (!ss_->Wait(static_cast<int>(cmsNext), process_io))
+        return false;
+    }
+
+    // If the specified timeout expired, return
+
+    msCurrent = TimeMillis();
+    cmsElapsed = TimeDiff(msCurrent, msStart);
+    if (cmsWait != kForever) {
+      if (cmsElapsed >= cmsWait)
+        return false;
+    }
+  }
+  return false;
+}
+
+void MessageQueue::ReceiveSends() {
+}
+
+void MessageQueue::Post(const Location& posted_from,
+                        MessageHandler* phandler,
+                        uint32_t id,
+                        MessageData* pdata,
+                        bool time_sensitive) {
+  if (IsQuitting())
+    return;
+
+  // Keep thread safe
+  // Add the message to the end of the queue
+  // Signal for the multiplexer to return
+
+  {
+    CritScope cs(&crit_);
+    Message msg;
+    msg.posted_from = posted_from;
+    msg.phandler = phandler;
+    msg.message_id = id;
+    msg.pdata = pdata;
+    if (time_sensitive) {
+      msg.ts_sensitive = TimeMillis() + kMaxMsgLatency;
+    }
+    msgq_.push_back(msg);
+  }
+  WakeUpSocketServer();
+}
+
+void MessageQueue::PostDelayed(const Location& posted_from,
+                               int cmsDelay,
+                               MessageHandler* phandler,
+                               uint32_t id,
+                               MessageData* pdata) {
+  return DoDelayPost(posted_from, cmsDelay, TimeAfter(cmsDelay), phandler, id,
+                     pdata);
+}
+
+void MessageQueue::PostAt(const Location& posted_from,
+                          uint32_t tstamp,
+                          MessageHandler* phandler,
+                          uint32_t id,
+                          MessageData* pdata) {
+  // This should work even if it is used (unexpectedly).
+  int64_t delay = static_cast<uint32_t>(TimeMillis()) - tstamp;
+  return DoDelayPost(posted_from, delay, tstamp, phandler, id, pdata);
+}
+
+void MessageQueue::PostAt(const Location& posted_from,
+                          int64_t tstamp,
+                          MessageHandler* phandler,
+                          uint32_t id,
+                          MessageData* pdata) {
+  return DoDelayPost(posted_from, TimeUntil(tstamp), tstamp, phandler, id,
+                     pdata);
+}
+
+void MessageQueue::DoDelayPost(const Location& posted_from,
+                               int64_t cmsDelay,
+                               int64_t tstamp,
+                               MessageHandler* phandler,
+                               uint32_t id,
+                               MessageData* pdata) {
+  if (IsQuitting()) {
+    return;
+  }
+
+  // Keep thread safe
+  // Add to the priority queue. Gets sorted soonest first.
+  // Signal for the multiplexer to return.
+
+  {
+    CritScope cs(&crit_);
+    Message msg;
+    msg.posted_from = posted_from;
+    msg.phandler = phandler;
+    msg.message_id = id;
+    msg.pdata = pdata;
+    DelayedMessage dmsg(cmsDelay, tstamp, dmsgq_next_num_, msg);
+    dmsgq_.push(dmsg);
+    // If this message queue processes 1 message every millisecond for 50 days,
+    // we will wrap this number.  Even then, only messages with identical times
+    // will be misordered, and then only briefly.  This is probably ok.
+    ++dmsgq_next_num_;
+    RTC_DCHECK_NE(0, dmsgq_next_num_);
+  }
+  WakeUpSocketServer();
+}
+
+int MessageQueue::GetDelay() {
+  CritScope cs(&crit_);
+
+  if (!msgq_.empty())
+    return 0;
+
+  if (!dmsgq_.empty()) {
+    int delay = TimeUntil(dmsgq_.top().msTrigger_);
+    if (delay < 0)
+      delay = 0;
+    return delay;
+  }
+
+  return kForever;
+}
+
+void MessageQueue::Clear(MessageHandler* phandler,
+                         uint32_t id,
+                         MessageList* removed) {
+  CritScope cs(&crit_);
+
+  // Remove messages with phandler
+
+  if (fPeekKeep_ && msgPeek_.Match(phandler, id)) {
+    if (removed) {
+      removed->push_back(msgPeek_);
+    } else {
+      delete msgPeek_.pdata;
+    }
+    fPeekKeep_ = false;
+  }
+
+  // Remove from ordered message queue
+
+  for (MessageList::iterator it = msgq_.begin(); it != msgq_.end();) {
+    if (it->Match(phandler, id)) {
+      if (removed) {
+        removed->push_back(*it);
+      } else {
+        delete it->pdata;
+      }
+      it = msgq_.erase(it);
+    } else {
+      ++it;
+    }
+  }
+
+  // Remove from priority queue. Not directly iterable, so use this approach
+
+  PriorityQueue::container_type::iterator new_end = dmsgq_.container().begin();
+  for (PriorityQueue::container_type::iterator it = new_end;
+       it != dmsgq_.container().end(); ++it) {
+    if (it->msg_.Match(phandler, id)) {
+      if (removed) {
+        removed->push_back(it->msg_);
+      } else {
+        delete it->msg_.pdata;
+      }
+    } else {
+      *new_end++ = *it;
+    }
+  }
+  dmsgq_.container().erase(new_end, dmsgq_.container().end());
+  dmsgq_.reheap();
+}
+
+void MessageQueue::Dispatch(Message *pmsg) {
+  TRACE_EVENT2("webrtc", "MessageQueue::Dispatch", "src_file_and_line",
+               pmsg->posted_from.file_and_line(), "src_func",
+               pmsg->posted_from.function_name());
+  int64_t start_time = TimeMillis();
+  pmsg->phandler->OnMessage(pmsg);
+  int64_t end_time = TimeMillis();
+  int64_t diff = TimeDiff(end_time, start_time);
+  if (diff >= kSlowDispatchLoggingThreshold) {
+    RTC_LOG(LS_INFO) << "Message took " << diff
+                     << "ms to dispatch. Posted from: "
+                     << pmsg->posted_from.ToString();
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/messagequeue.h b/rtc_base/messagequeue.h
new file mode 100644
index 0000000..7055643
--- /dev/null
+++ b/rtc_base/messagequeue.h
@@ -0,0 +1,329 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_MESSAGEQUEUE_H_
+#define RTC_BASE_MESSAGEQUEUE_H_
+
+#include <string.h>
+
+#include <algorithm>
+#include <list>
+#include <memory>
+#include <queue>
+#include <utility>
+#include <vector>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/location.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/socketserver.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+struct Message;
+class MessageQueue;
+
+// MessageQueueManager does cleanup of of message queues
+
+class MessageQueueManager {
+ public:
+  static void Add(MessageQueue *message_queue);
+  static void Remove(MessageQueue *message_queue);
+  static void Clear(MessageHandler *handler);
+
+  // For testing purposes, we expose whether or not the MessageQueueManager
+  // instance has been initialized. It has no other use relative to the rest of
+  // the functions of this class, which auto-initialize the underlying
+  // MessageQueueManager instance when necessary.
+  static bool IsInitialized();
+
+  // Mainly for testing purposes, for use with a simulated clock.
+  // Ensures that all message queues have processed delayed messages
+  // up until the current point in time.
+  static void ProcessAllMessageQueues();
+
+ private:
+  static MessageQueueManager* Instance();
+
+  MessageQueueManager();
+  ~MessageQueueManager();
+
+  void AddInternal(MessageQueue *message_queue);
+  void RemoveInternal(MessageQueue *message_queue);
+  void ClearInternal(MessageHandler *handler);
+  void ProcessAllMessageQueuesInternal();
+
+  static MessageQueueManager* instance_;
+  // This list contains all live MessageQueues.
+  std::vector<MessageQueue*> message_queues_ RTC_GUARDED_BY(crit_);
+
+  // Methods that don't modify the list of message queues may be called in a
+  // re-entrant fashion. "processing_" keeps track of the depth of re-entrant
+  // calls.
+  CriticalSection crit_;
+  size_t processing_ RTC_GUARDED_BY(crit_);
+};
+
+// Derive from this for specialized data
+// App manages lifetime, except when messages are purged
+
+class MessageData {
+ public:
+  MessageData() {}
+  virtual ~MessageData() {}
+};
+
+template <class T>
+class TypedMessageData : public MessageData {
+ public:
+  explicit TypedMessageData(const T& data) : data_(data) { }
+  const T& data() const { return data_; }
+  T& data() { return data_; }
+ private:
+  T data_;
+};
+
+// Like TypedMessageData, but for pointers that require a delete.
+template <class T>
+class ScopedMessageData : public MessageData {
+ public:
+  explicit ScopedMessageData(std::unique_ptr<T> data)
+      : data_(std::move(data)) {}
+  // Deprecated.
+  // TODO(deadbeef): Remove this once downstream applications stop using it.
+  explicit ScopedMessageData(T* data) : data_(data) {}
+  // Deprecated.
+  // TODO(deadbeef): Returning a reference to a unique ptr? Why. Get rid of
+  // this once downstream applications stop using it, then rename inner_data to
+  // just data.
+  const std::unique_ptr<T>& data() const { return data_; }
+  std::unique_ptr<T>& data() { return data_; }
+
+  const T& inner_data() const { return *data_; }
+  T& inner_data() { return *data_; }
+
+ private:
+  std::unique_ptr<T> data_;
+};
+
+// Like ScopedMessageData, but for reference counted pointers.
+template <class T>
+class ScopedRefMessageData : public MessageData {
+ public:
+  explicit ScopedRefMessageData(T* data) : data_(data) { }
+  const scoped_refptr<T>& data() const { return data_; }
+  scoped_refptr<T>& data() { return data_; }
+ private:
+  scoped_refptr<T> data_;
+};
+
+template<class T>
+inline MessageData* WrapMessageData(const T& data) {
+  return new TypedMessageData<T>(data);
+}
+
+template<class T>
+inline const T& UseMessageData(MessageData* data) {
+  return static_cast< TypedMessageData<T>* >(data)->data();
+}
+
+template<class T>
+class DisposeData : public MessageData {
+ public:
+  explicit DisposeData(T* data) : data_(data) { }
+  virtual ~DisposeData() { delete data_; }
+ private:
+  T* data_;
+};
+
+const uint32_t MQID_ANY = static_cast<uint32_t>(-1);
+const uint32_t MQID_DISPOSE = static_cast<uint32_t>(-2);
+
+// No destructor
+
+struct Message {
+  Message()
+      : phandler(nullptr), message_id(0), pdata(nullptr), ts_sensitive(0) {}
+  inline bool Match(MessageHandler* handler, uint32_t id) const {
+    return (handler == nullptr || handler == phandler) &&
+           (id == MQID_ANY || id == message_id);
+  }
+  Location posted_from;
+  MessageHandler *phandler;
+  uint32_t message_id;
+  MessageData *pdata;
+  int64_t ts_sensitive;
+};
+
+typedef std::list<Message> MessageList;
+
+// DelayedMessage goes into a priority queue, sorted by trigger time.  Messages
+// with the same trigger time are processed in num_ (FIFO) order.
+
+class DelayedMessage {
+ public:
+  DelayedMessage(int64_t delay,
+                 int64_t trigger,
+                 uint32_t num,
+                 const Message& msg)
+      : cmsDelay_(delay), msTrigger_(trigger), num_(num), msg_(msg) {}
+
+  bool operator< (const DelayedMessage& dmsg) const {
+    return (dmsg.msTrigger_ < msTrigger_)
+           || ((dmsg.msTrigger_ == msTrigger_) && (dmsg.num_ < num_));
+  }
+
+  int64_t cmsDelay_;  // for debugging
+  int64_t msTrigger_;
+  uint32_t num_;
+  Message msg_;
+};
+
+class MessageQueue {
+ public:
+  static const int kForever = -1;
+
+  // Create a new MessageQueue and optionally assign it to the passed
+  // SocketServer. Subclasses that override Clear should pass false for
+  // init_queue and call DoInit() from their constructor to prevent races
+  // with the MessageQueueManager using the object while the vtable is still
+  // being created.
+  MessageQueue(SocketServer* ss, bool init_queue);
+  MessageQueue(std::unique_ptr<SocketServer> ss, bool init_queue);
+
+  // NOTE: SUBCLASSES OF MessageQueue THAT OVERRIDE Clear MUST CALL
+  // DoDestroy() IN THEIR DESTRUCTORS! This is required to avoid a data race
+  // between the destructor modifying the vtable, and the MessageQueueManager
+  // calling Clear on the object from a different thread.
+  virtual ~MessageQueue();
+
+  SocketServer* socketserver();
+
+  // Note: The behavior of MessageQueue has changed.  When a MQ is stopped,
+  // futher Posts and Sends will fail.  However, any pending Sends and *ready*
+  // Posts (as opposed to unexpired delayed Posts) will be delivered before
+  // Get (or Peek) returns false.  By guaranteeing delivery of those messages,
+  // we eliminate the race condition when an MessageHandler and MessageQueue
+  // may be destroyed independently of each other.
+  virtual void Quit();
+  virtual bool IsQuitting();
+  virtual void Restart();
+  // Not all message queues actually process messages (such as SignalThread).
+  // In those cases, it's important to know, before posting, that it won't be
+  // Processed.  Normally, this would be true until IsQuitting() is true.
+  virtual bool IsProcessingMessages();
+
+  // Get() will process I/O until:
+  //  1) A message is available (returns true)
+  //  2) cmsWait seconds have elapsed (returns false)
+  //  3) Stop() is called (returns false)
+  virtual bool Get(Message *pmsg, int cmsWait = kForever,
+                   bool process_io = true);
+  virtual bool Peek(Message *pmsg, int cmsWait = 0);
+  virtual void Post(const Location& posted_from,
+                    MessageHandler* phandler,
+                    uint32_t id = 0,
+                    MessageData* pdata = nullptr,
+                    bool time_sensitive = false);
+  virtual void PostDelayed(const Location& posted_from,
+                           int cmsDelay,
+                           MessageHandler* phandler,
+                           uint32_t id = 0,
+                           MessageData* pdata = nullptr);
+  virtual void PostAt(const Location& posted_from,
+                      int64_t tstamp,
+                      MessageHandler* phandler,
+                      uint32_t id = 0,
+                      MessageData* pdata = nullptr);
+  // TODO(honghaiz): Remove this when all the dependencies are removed.
+  virtual void PostAt(const Location& posted_from,
+                      uint32_t tstamp,
+                      MessageHandler* phandler,
+                      uint32_t id = 0,
+                      MessageData* pdata = nullptr);
+  virtual void Clear(MessageHandler* phandler,
+                     uint32_t id = MQID_ANY,
+                     MessageList* removed = nullptr);
+  virtual void Dispatch(Message *pmsg);
+  virtual void ReceiveSends();
+
+  // Amount of time until the next message can be retrieved
+  virtual int GetDelay();
+
+  bool empty() const { return size() == 0u; }
+  size_t size() const {
+    CritScope cs(&crit_);  // msgq_.size() is not thread safe.
+    return msgq_.size() + dmsgq_.size() + (fPeekKeep_ ? 1u : 0u);
+  }
+
+  // Internally posts a message which causes the doomed object to be deleted
+  template<class T> void Dispose(T* doomed) {
+    if (doomed) {
+      Post(RTC_FROM_HERE, nullptr, MQID_DISPOSE, new DisposeData<T>(doomed));
+    }
+  }
+
+  // When this signal is sent out, any references to this queue should
+  // no longer be used.
+  sigslot::signal0<> SignalQueueDestroyed;
+
+ protected:
+  class PriorityQueue : public std::priority_queue<DelayedMessage> {
+   public:
+    container_type& container() { return c; }
+    void reheap() { make_heap(c.begin(), c.end(), comp); }
+  };
+
+  void DoDelayPost(const Location& posted_from,
+                   int64_t cmsDelay,
+                   int64_t tstamp,
+                   MessageHandler* phandler,
+                   uint32_t id,
+                   MessageData* pdata);
+
+  // Perform initialization, subclasses must call this from their constructor
+  // if false was passed as init_queue to the MessageQueue constructor.
+  void DoInit();
+
+  // Perform cleanup, subclasses that override Clear must call this from the
+  // destructor.
+  void DoDestroy();
+
+  void WakeUpSocketServer();
+
+  bool fPeekKeep_;
+  Message msgPeek_;
+  MessageList msgq_ RTC_GUARDED_BY(crit_);
+  PriorityQueue dmsgq_ RTC_GUARDED_BY(crit_);
+  uint32_t dmsgq_next_num_ RTC_GUARDED_BY(crit_);
+  CriticalSection crit_;
+  bool fInitialized_;
+  bool fDestroyed_;
+
+ private:
+  volatile int stop_;
+
+  // The SocketServer might not be owned by MessageQueue.
+  SocketServer* const ss_;
+  // Used if SocketServer ownership lies with |this|.
+  std::unique_ptr<SocketServer> own_ss_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(MessageQueue);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_MESSAGEQUEUE_H_
diff --git a/rtc_base/messagequeue_unittest.cc b/rtc_base/messagequeue_unittest.cc
new file mode 100644
index 0000000..9e1ba63
--- /dev/null
+++ b/rtc_base/messagequeue_unittest.cc
@@ -0,0 +1,248 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/messagequeue.h"
+
+#include <functional>
+
+#include "rtc_base/atomicops.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+using namespace rtc;
+
+class MessageQueueTest: public testing::Test, public MessageQueue {
+ public:
+  MessageQueueTest() : MessageQueue(SocketServer::CreateDefault(), true) {}
+  bool IsLocked_Worker() {
+    if (!crit_.TryEnter()) {
+      return true;
+    }
+    crit_.Leave();
+    return false;
+  }
+  bool IsLocked() {
+    // We have to do this on a worker thread, or else the TryEnter will
+    // succeed, since our critical sections are reentrant.
+    std::unique_ptr<Thread> worker(Thread::CreateWithSocketServer());
+    worker->Start();
+    return worker->Invoke<bool>(
+        RTC_FROM_HERE, rtc::Bind(&MessageQueueTest::IsLocked_Worker, this));
+  }
+};
+
+struct DeletedLockChecker {
+  DeletedLockChecker(MessageQueueTest* test, bool* was_locked, bool* deleted)
+      : test(test), was_locked(was_locked), deleted(deleted) { }
+  ~DeletedLockChecker() {
+    *deleted = true;
+    *was_locked = test->IsLocked();
+  }
+  MessageQueueTest* test;
+  bool* was_locked;
+  bool* deleted;
+};
+
+static void DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder(
+    MessageQueue* q) {
+  EXPECT_TRUE(q != nullptr);
+  int64_t now = TimeMillis();
+  q->PostAt(RTC_FROM_HERE, now, nullptr, 3);
+  q->PostAt(RTC_FROM_HERE, now - 2, nullptr, 0);
+  q->PostAt(RTC_FROM_HERE, now - 1, nullptr, 1);
+  q->PostAt(RTC_FROM_HERE, now, nullptr, 4);
+  q->PostAt(RTC_FROM_HERE, now - 1, nullptr, 2);
+
+  Message msg;
+  for (size_t i=0; i<5; ++i) {
+    memset(&msg, 0, sizeof(msg));
+    EXPECT_TRUE(q->Get(&msg, 0));
+    EXPECT_EQ(i, msg.message_id);
+  }
+
+  EXPECT_FALSE(q->Get(&msg, 0));  // No more messages
+}
+
+TEST_F(MessageQueueTest,
+       DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder) {
+  MessageQueue q(SocketServer::CreateDefault(), true);
+  DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder(&q);
+
+  NullSocketServer nullss;
+  MessageQueue q_nullss(&nullss, true);
+  DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder(&q_nullss);
+}
+
+TEST_F(MessageQueueTest, DisposeNotLocked) {
+  bool was_locked = true;
+  bool deleted = false;
+  DeletedLockChecker* d = new DeletedLockChecker(this, &was_locked, &deleted);
+  Dispose(d);
+  Message msg;
+  EXPECT_FALSE(Get(&msg, 0));
+  EXPECT_TRUE(deleted);
+  EXPECT_FALSE(was_locked);
+}
+
+class DeletedMessageHandler : public MessageHandler {
+ public:
+  explicit DeletedMessageHandler(bool* deleted) : deleted_(deleted) { }
+  ~DeletedMessageHandler() override { *deleted_ = true; }
+  void OnMessage(Message* msg) override {}
+
+ private:
+  bool* deleted_;
+};
+
+TEST_F(MessageQueueTest, DiposeHandlerWithPostedMessagePending) {
+  bool deleted = false;
+  DeletedMessageHandler *handler = new DeletedMessageHandler(&deleted);
+  // First, post a dispose.
+  Dispose(handler);
+  // Now, post a message, which should *not* be returned by Get().
+  Post(RTC_FROM_HERE, handler, 1);
+  Message msg;
+  EXPECT_FALSE(Get(&msg, 0));
+  EXPECT_TRUE(deleted);
+}
+
+struct UnwrapMainThreadScope {
+  UnwrapMainThreadScope() : rewrap_(Thread::Current() != nullptr) {
+    if (rewrap_) ThreadManager::Instance()->UnwrapCurrentThread();
+  }
+  ~UnwrapMainThreadScope() {
+    if (rewrap_) ThreadManager::Instance()->WrapCurrentThread();
+  }
+ private:
+  bool rewrap_;
+};
+
+TEST(MessageQueueManager, Clear) {
+  UnwrapMainThreadScope s;
+  if (MessageQueueManager::IsInitialized()) {
+    RTC_LOG(LS_INFO)
+        << "Unable to run MessageQueueManager::Clear test, since the "
+        << "MessageQueueManager was already initialized by some "
+        << "other test in this run.";
+    return;
+  }
+  bool deleted = false;
+  DeletedMessageHandler* handler = new DeletedMessageHandler(&deleted);
+  delete handler;
+  EXPECT_TRUE(deleted);
+  EXPECT_FALSE(MessageQueueManager::IsInitialized());
+}
+
+// Ensure that ProcessAllMessageQueues does its essential function; process
+// all messages (both delayed and non delayed) up until the current time, on
+// all registered message queues.
+TEST(MessageQueueManager, ProcessAllMessageQueues) {
+  Event entered_process_all_message_queues(true, false);
+  auto a = Thread::CreateWithSocketServer();
+  auto b = Thread::CreateWithSocketServer();
+  a->Start();
+  b->Start();
+
+  volatile int messages_processed = 0;
+  FunctorMessageHandler<void, std::function<void()>> incrementer(
+      [&messages_processed, &entered_process_all_message_queues] {
+        // Wait for event as a means to ensure Increment doesn't occur outside
+        // of ProcessAllMessageQueues. The event is set by a message posted to
+        // the main thread, which is guaranteed to be handled inside
+        // ProcessAllMessageQueues.
+        entered_process_all_message_queues.Wait(Event::kForever);
+        AtomicOps::Increment(&messages_processed);
+      });
+  FunctorMessageHandler<void, std::function<void()>> event_signaler(
+      [&entered_process_all_message_queues] {
+        entered_process_all_message_queues.Set();
+      });
+
+  // Post messages (both delayed and non delayed) to both threads.
+  a->Post(RTC_FROM_HERE, &incrementer);
+  b->Post(RTC_FROM_HERE, &incrementer);
+  a->PostDelayed(RTC_FROM_HERE, 0, &incrementer);
+  b->PostDelayed(RTC_FROM_HERE, 0, &incrementer);
+  rtc::Thread::Current()->Post(RTC_FROM_HERE, &event_signaler);
+
+  MessageQueueManager::ProcessAllMessageQueues();
+  EXPECT_EQ(4, AtomicOps::AcquireLoad(&messages_processed));
+}
+
+// Test that ProcessAllMessageQueues doesn't hang if a thread is quitting.
+TEST(MessageQueueManager, ProcessAllMessageQueuesWithQuittingThread) {
+  auto t = Thread::CreateWithSocketServer();
+  t->Start();
+  t->Quit();
+  MessageQueueManager::ProcessAllMessageQueues();
+}
+
+// Test that ProcessAllMessageQueues doesn't hang if a queue clears its
+// messages.
+TEST(MessageQueueManager, ProcessAllMessageQueuesWithClearedQueue) {
+  Event entered_process_all_message_queues(true, false);
+  auto t = Thread::CreateWithSocketServer();
+  t->Start();
+
+  FunctorMessageHandler<void, std::function<void()>> clearer(
+      [&entered_process_all_message_queues] {
+        // Wait for event as a means to ensure Clear doesn't occur outside of
+        // ProcessAllMessageQueues. The event is set by a message posted to the
+        // main thread, which is guaranteed to be handled inside
+        // ProcessAllMessageQueues.
+        entered_process_all_message_queues.Wait(Event::kForever);
+        rtc::Thread::Current()->Clear(nullptr);
+      });
+  FunctorMessageHandler<void, std::function<void()>> event_signaler(
+      [&entered_process_all_message_queues] {
+        entered_process_all_message_queues.Set();
+      });
+
+  // Post messages (both delayed and non delayed) to both threads.
+  t->Post(RTC_FROM_HERE, &clearer);
+  rtc::Thread::Current()->Post(RTC_FROM_HERE, &event_signaler);
+  MessageQueueManager::ProcessAllMessageQueues();
+}
+
+class RefCountedHandler
+  : public MessageHandler,
+    public rtc::RefCountInterface {
+ public:
+  void OnMessage(Message* msg) override {}
+};
+
+class EmptyHandler : public MessageHandler {
+ public:
+  void OnMessage(Message* msg) override {}
+};
+
+TEST(MessageQueueManager, ClearReentrant) {
+  std::unique_ptr<Thread> t(Thread::Create());
+  EmptyHandler handler;
+  RefCountedHandler* inner_handler(
+      new rtc::RefCountedObject<RefCountedHandler>());
+  // When the empty handler is destroyed, it will clear messages queued for
+  // itself. The message to be cleared itself wraps a MessageHandler object
+  // (RefCountedHandler) so this will cause the message queue to be cleared
+  // again in a re-entrant fashion, which previously triggered a DCHECK.
+  // The inner handler will be removed in a re-entrant fashion from the
+  // message queue of the thread while the outer handler is removed, verifying
+  // that the iterator is not invalidated in "MessageQueue::Clear".
+  t->Post(RTC_FROM_HERE, inner_handler, 0);
+  t->Post(RTC_FROM_HERE, &handler, 0,
+          new ScopedRefMessageData<RefCountedHandler>(inner_handler));
+}
diff --git a/rtc_base/nat_unittest.cc b/rtc_base/nat_unittest.cc
new file mode 100644
index 0000000..d68df1d
--- /dev/null
+++ b/rtc_base/nat_unittest.cc
@@ -0,0 +1,389 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "rtc_base/asynctcpsocket.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/natserver.h"
+#include "rtc_base/natsocketfactory.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/network.h"
+#include "rtc_base/physicalsocketserver.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/testclient.h"
+#include "rtc_base/virtualsocketserver.h"
+
+using namespace rtc;
+
+bool CheckReceive(
+    TestClient* client, bool should_receive, const char* buf, size_t size) {
+  return (should_receive) ?
+      client->CheckNextPacket(buf, size, 0) :
+      client->CheckNoPacket();
+}
+
+TestClient* CreateTestClient(
+      SocketFactory* factory, const SocketAddress& local_addr) {
+  return new TestClient(
+      WrapUnique(AsyncUDPSocket::Create(factory, local_addr)));
+}
+
+TestClient* CreateTCPTestClient(AsyncSocket* socket) {
+  return new TestClient(MakeUnique<AsyncTCPSocket>(socket, false));
+}
+
+// Tests that when sending from internal_addr to external_addrs through the
+// NAT type specified by nat_type, all external addrs receive the sent packet
+// and, if exp_same is true, all use the same mapped-address on the NAT.
+void TestSend(
+      SocketServer* internal, const SocketAddress& internal_addr,
+      SocketServer* external, const SocketAddress external_addrs[4],
+      NATType nat_type, bool exp_same) {
+  Thread th_int(internal);
+  Thread th_ext(external);
+
+  SocketAddress server_addr = internal_addr;
+  server_addr.SetPort(0);  // Auto-select a port
+  NATServer* nat = new NATServer(nat_type, internal, server_addr, server_addr,
+                                 external, external_addrs[0]);
+  NATSocketFactory* natsf = new NATSocketFactory(internal,
+                                                 nat->internal_udp_address(),
+                                                 nat->internal_tcp_address());
+
+  TestClient* in = CreateTestClient(natsf, internal_addr);
+  TestClient* out[4];
+  for (int i = 0; i < 4; i++)
+    out[i] = CreateTestClient(external, external_addrs[i]);
+
+  th_int.Start();
+  th_ext.Start();
+
+  const char* buf = "filter_test";
+  size_t len = strlen(buf);
+
+  in->SendTo(buf, len, out[0]->address());
+  SocketAddress trans_addr;
+  EXPECT_TRUE(out[0]->CheckNextPacket(buf, len, &trans_addr));
+
+  for (int i = 1; i < 4; i++) {
+    in->SendTo(buf, len, out[i]->address());
+    SocketAddress trans_addr2;
+    EXPECT_TRUE(out[i]->CheckNextPacket(buf, len, &trans_addr2));
+    bool are_same = (trans_addr == trans_addr2);
+    ASSERT_EQ(are_same, exp_same) << "same translated address";
+    ASSERT_NE(AF_UNSPEC, trans_addr.family());
+    ASSERT_NE(AF_UNSPEC, trans_addr2.family());
+  }
+
+  th_int.Stop();
+  th_ext.Stop();
+
+  delete nat;
+  delete natsf;
+  delete in;
+  for (int i = 0; i < 4; i++)
+    delete out[i];
+}
+
+// Tests that when sending from external_addrs to internal_addr, the packet
+// is delivered according to the specified filter_ip and filter_port rules.
+void TestRecv(
+      SocketServer* internal, const SocketAddress& internal_addr,
+      SocketServer* external, const SocketAddress external_addrs[4],
+      NATType nat_type, bool filter_ip, bool filter_port) {
+  Thread th_int(internal);
+  Thread th_ext(external);
+
+  SocketAddress server_addr = internal_addr;
+  server_addr.SetPort(0);  // Auto-select a port
+  NATServer* nat = new NATServer(nat_type, internal, server_addr, server_addr,
+                                 external, external_addrs[0]);
+  NATSocketFactory* natsf = new NATSocketFactory(internal,
+                                                 nat->internal_udp_address(),
+                                                 nat->internal_tcp_address());
+
+  TestClient* in = CreateTestClient(natsf, internal_addr);
+  TestClient* out[4];
+  for (int i = 0; i < 4; i++)
+    out[i] = CreateTestClient(external, external_addrs[i]);
+
+  th_int.Start();
+  th_ext.Start();
+
+  const char* buf = "filter_test";
+  size_t len = strlen(buf);
+
+  in->SendTo(buf, len, out[0]->address());
+  SocketAddress trans_addr;
+  EXPECT_TRUE(out[0]->CheckNextPacket(buf, len, &trans_addr));
+
+  out[1]->SendTo(buf, len, trans_addr);
+  EXPECT_TRUE(CheckReceive(in, !filter_ip, buf, len));
+
+  out[2]->SendTo(buf, len, trans_addr);
+  EXPECT_TRUE(CheckReceive(in, !filter_port, buf, len));
+
+  out[3]->SendTo(buf, len, trans_addr);
+  EXPECT_TRUE(CheckReceive(in, !filter_ip && !filter_port, buf, len));
+
+  th_int.Stop();
+  th_ext.Stop();
+
+  delete nat;
+  delete natsf;
+  delete in;
+  for (int i = 0; i < 4; i++)
+    delete out[i];
+}
+
+// Tests that NATServer allocates bindings properly.
+void TestBindings(
+    SocketServer* internal, const SocketAddress& internal_addr,
+    SocketServer* external, const SocketAddress external_addrs[4]) {
+  TestSend(internal, internal_addr, external, external_addrs,
+           NAT_OPEN_CONE, true);
+  TestSend(internal, internal_addr, external, external_addrs,
+           NAT_ADDR_RESTRICTED, true);
+  TestSend(internal, internal_addr, external, external_addrs,
+           NAT_PORT_RESTRICTED, true);
+  TestSend(internal, internal_addr, external, external_addrs,
+           NAT_SYMMETRIC, false);
+}
+
+// Tests that NATServer filters packets properly.
+void TestFilters(
+    SocketServer* internal, const SocketAddress& internal_addr,
+    SocketServer* external, const SocketAddress external_addrs[4]) {
+  TestRecv(internal, internal_addr, external, external_addrs,
+           NAT_OPEN_CONE, false, false);
+  TestRecv(internal, internal_addr, external, external_addrs,
+           NAT_ADDR_RESTRICTED, true, false);
+  TestRecv(internal, internal_addr, external, external_addrs,
+           NAT_PORT_RESTRICTED, true, true);
+  TestRecv(internal, internal_addr, external, external_addrs,
+           NAT_SYMMETRIC, true, true);
+}
+
+bool TestConnectivity(const SocketAddress& src, const IPAddress& dst) {
+  // The physical NAT tests require connectivity to the selected ip from the
+  // internal address used for the NAT. Things like firewalls can break that, so
+  // check to see if it's worth even trying with this ip.
+  std::unique_ptr<PhysicalSocketServer> pss(new PhysicalSocketServer());
+  std::unique_ptr<AsyncSocket> client(
+      pss->CreateAsyncSocket(src.family(), SOCK_DGRAM));
+  std::unique_ptr<AsyncSocket> server(
+      pss->CreateAsyncSocket(src.family(), SOCK_DGRAM));
+  if (client->Bind(SocketAddress(src.ipaddr(), 0)) != 0 ||
+      server->Bind(SocketAddress(dst, 0)) != 0) {
+    return false;
+  }
+  const char* buf = "hello other socket";
+  size_t len = strlen(buf);
+  int sent = client->SendTo(buf, len, server->GetLocalAddress());
+  SocketAddress addr;
+  const size_t kRecvBufSize = 64;
+  char recvbuf[kRecvBufSize];
+  Thread::Current()->SleepMs(100);
+  int received = server->RecvFrom(recvbuf, kRecvBufSize, &addr, nullptr);
+  return received == sent && ::memcmp(buf, recvbuf, len) == 0;
+}
+
+void TestPhysicalInternal(const SocketAddress& int_addr) {
+  BasicNetworkManager network_manager;
+  network_manager.set_ipv6_enabled(true);
+  network_manager.StartUpdating();
+  // Process pending messages so the network list is updated.
+  Thread::Current()->ProcessMessages(0);
+
+  std::vector<Network*> networks;
+  network_manager.GetNetworks(&networks);
+  networks.erase(std::remove_if(networks.begin(), networks.end(),
+                                [](rtc::Network* network) {
+                                  return rtc::kDefaultNetworkIgnoreMask &
+                                         network->type();
+                                }),
+                 networks.end());
+  if (networks.empty()) {
+    RTC_LOG(LS_WARNING) << "Not enough network adapters for test.";
+    return;
+  }
+
+  SocketAddress ext_addr1(int_addr);
+  SocketAddress ext_addr2;
+  // Find an available IP with matching family. The test breaks if int_addr
+  // can't talk to ip, so check for connectivity as well.
+  for (std::vector<Network*>::iterator it = networks.begin();
+      it != networks.end(); ++it) {
+    const IPAddress& ip = (*it)->GetBestIP();
+    if (ip.family() == int_addr.family() && TestConnectivity(int_addr, ip)) {
+      ext_addr2.SetIP(ip);
+      break;
+    }
+  }
+  if (ext_addr2.IsNil()) {
+    RTC_LOG(LS_WARNING) << "No available IP of same family as " << int_addr;
+    return;
+  }
+
+  RTC_LOG(LS_INFO) << "selected ip " << ext_addr2.ipaddr();
+
+  SocketAddress ext_addrs[4] = {
+      SocketAddress(ext_addr1),
+      SocketAddress(ext_addr2),
+      SocketAddress(ext_addr1),
+      SocketAddress(ext_addr2)
+  };
+
+  std::unique_ptr<PhysicalSocketServer> int_pss(new PhysicalSocketServer());
+  std::unique_ptr<PhysicalSocketServer> ext_pss(new PhysicalSocketServer());
+
+  TestBindings(int_pss.get(), int_addr, ext_pss.get(), ext_addrs);
+  TestFilters(int_pss.get(), int_addr, ext_pss.get(), ext_addrs);
+}
+
+TEST(NatTest, TestPhysicalIPv4) {
+  TestPhysicalInternal(SocketAddress("127.0.0.1", 0));
+}
+
+TEST(NatTest, TestPhysicalIPv6) {
+  if (HasIPv6Enabled()) {
+    TestPhysicalInternal(SocketAddress("::1", 0));
+  } else {
+    RTC_LOG(LS_WARNING) << "No IPv6, skipping";
+  }
+}
+
+namespace {
+
+class TestVirtualSocketServer : public VirtualSocketServer {
+ public:
+  // Expose this publicly
+  IPAddress GetNextIP(int af) { return VirtualSocketServer::GetNextIP(af); }
+};
+
+}  // namespace
+
+void TestVirtualInternal(int family) {
+  std::unique_ptr<TestVirtualSocketServer> int_vss(
+      new TestVirtualSocketServer());
+  std::unique_ptr<TestVirtualSocketServer> ext_vss(
+      new TestVirtualSocketServer());
+
+  SocketAddress int_addr;
+  SocketAddress ext_addrs[4];
+  int_addr.SetIP(int_vss->GetNextIP(family));
+  ext_addrs[0].SetIP(ext_vss->GetNextIP(int_addr.family()));
+  ext_addrs[1].SetIP(ext_vss->GetNextIP(int_addr.family()));
+  ext_addrs[2].SetIP(ext_addrs[0].ipaddr());
+  ext_addrs[3].SetIP(ext_addrs[1].ipaddr());
+
+  TestBindings(int_vss.get(), int_addr, ext_vss.get(), ext_addrs);
+  TestFilters(int_vss.get(), int_addr, ext_vss.get(), ext_addrs);
+}
+
+TEST(NatTest, TestVirtualIPv4) {
+  TestVirtualInternal(AF_INET);
+}
+
+TEST(NatTest, TestVirtualIPv6) {
+  if (HasIPv6Enabled()) {
+    TestVirtualInternal(AF_INET6);
+  } else {
+    RTC_LOG(LS_WARNING) << "No IPv6, skipping";
+  }
+}
+
+class NatTcpTest : public testing::Test, public sigslot::has_slots<> {
+ public:
+  NatTcpTest()
+      : int_addr_("192.168.0.1", 0),
+        ext_addr_("10.0.0.1", 0),
+        connected_(false),
+        int_vss_(new TestVirtualSocketServer()),
+        ext_vss_(new TestVirtualSocketServer()),
+        int_thread_(new Thread(int_vss_.get())),
+        ext_thread_(new Thread(ext_vss_.get())),
+        nat_(new NATServer(NAT_OPEN_CONE,
+                           int_vss_.get(),
+                           int_addr_,
+                           int_addr_,
+                           ext_vss_.get(),
+                           ext_addr_)),
+        natsf_(new NATSocketFactory(int_vss_.get(),
+                                    nat_->internal_udp_address(),
+                                    nat_->internal_tcp_address())) {
+    int_thread_->Start();
+    ext_thread_->Start();
+  }
+
+  void OnConnectEvent(AsyncSocket* socket) {
+    connected_ = true;
+  }
+
+  void OnAcceptEvent(AsyncSocket* socket) {
+    accepted_.reset(server_->Accept(nullptr));
+  }
+
+  void OnCloseEvent(AsyncSocket* socket, int error) {
+  }
+
+  void ConnectEvents() {
+    server_->SignalReadEvent.connect(this, &NatTcpTest::OnAcceptEvent);
+    client_->SignalConnectEvent.connect(this, &NatTcpTest::OnConnectEvent);
+  }
+
+  SocketAddress int_addr_;
+  SocketAddress ext_addr_;
+  bool connected_;
+  std::unique_ptr<TestVirtualSocketServer> int_vss_;
+  std::unique_ptr<TestVirtualSocketServer> ext_vss_;
+  std::unique_ptr<Thread> int_thread_;
+  std::unique_ptr<Thread> ext_thread_;
+  std::unique_ptr<NATServer> nat_;
+  std::unique_ptr<NATSocketFactory> natsf_;
+  std::unique_ptr<AsyncSocket> client_;
+  std::unique_ptr<AsyncSocket> server_;
+  std::unique_ptr<AsyncSocket> accepted_;
+};
+
+TEST_F(NatTcpTest, DISABLED_TestConnectOut) {
+  server_.reset(ext_vss_->CreateAsyncSocket(SOCK_STREAM));
+  server_->Bind(ext_addr_);
+  server_->Listen(5);
+
+  client_.reset(natsf_->CreateAsyncSocket(SOCK_STREAM));
+  EXPECT_GE(0, client_->Bind(int_addr_));
+  EXPECT_GE(0, client_->Connect(server_->GetLocalAddress()));
+
+  ConnectEvents();
+
+  EXPECT_TRUE_WAIT(connected_, 1000);
+  EXPECT_EQ(client_->GetRemoteAddress(), server_->GetLocalAddress());
+  EXPECT_EQ(accepted_->GetRemoteAddress().ipaddr(), ext_addr_.ipaddr());
+
+  std::unique_ptr<rtc::TestClient> in(CreateTCPTestClient(client_.release()));
+  std::unique_ptr<rtc::TestClient> out(
+      CreateTCPTestClient(accepted_.release()));
+
+  const char* buf = "test_packet";
+  size_t len = strlen(buf);
+
+  in->Send(buf, len);
+  SocketAddress trans_addr;
+  EXPECT_TRUE(out->CheckNextPacket(buf, len, &trans_addr));
+
+  out->Send(buf, len);
+  EXPECT_TRUE(in->CheckNextPacket(buf, len, &trans_addr));
+}
+// #endif
diff --git a/rtc_base/natserver.cc b/rtc_base/natserver.cc
new file mode 100644
index 0000000..bf983fe
--- /dev/null
+++ b/rtc_base/natserver.cc
@@ -0,0 +1,252 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/natserver.h"
+#include "rtc_base/natsocketfactory.h"
+#include "rtc_base/socketadapters.h"
+
+namespace rtc {
+
+RouteCmp::RouteCmp(NAT* nat) : symmetric(nat->IsSymmetric()) {
+}
+
+size_t RouteCmp::operator()(const SocketAddressPair& r) const {
+  size_t h = r.source().Hash();
+  if (symmetric)
+    h ^= r.destination().Hash();
+  return h;
+}
+
+bool RouteCmp::operator()(
+      const SocketAddressPair& r1, const SocketAddressPair& r2) const {
+  if (r1.source() < r2.source())
+    return true;
+  if (r2.source() < r1.source())
+    return false;
+  if (symmetric && (r1.destination() < r2.destination()))
+    return true;
+  if (symmetric && (r2.destination() < r1.destination()))
+    return false;
+  return false;
+}
+
+AddrCmp::AddrCmp(NAT* nat)
+    : use_ip(nat->FiltersIP()), use_port(nat->FiltersPort()) {
+}
+
+size_t AddrCmp::operator()(const SocketAddress& a) const {
+  size_t h = 0;
+  if (use_ip)
+    h ^= HashIP(a.ipaddr());
+  if (use_port)
+    h ^= a.port() | (a.port() << 16);
+  return h;
+}
+
+bool AddrCmp::operator()(
+      const SocketAddress& a1, const SocketAddress& a2) const {
+  if (use_ip && (a1.ipaddr() < a2.ipaddr()))
+    return true;
+  if (use_ip && (a2.ipaddr() < a1.ipaddr()))
+    return false;
+  if (use_port && (a1.port() < a2.port()))
+    return true;
+  if (use_port && (a2.port() < a1.port()))
+    return false;
+  return false;
+}
+
+// Proxy socket that will capture the external destination address intended for
+// a TCP connection to the NAT server.
+class NATProxyServerSocket : public AsyncProxyServerSocket {
+ public:
+  NATProxyServerSocket(AsyncSocket* socket)
+      : AsyncProxyServerSocket(socket, kNATEncodedIPv6AddressSize) {
+    BufferInput(true);
+  }
+
+  void SendConnectResult(int err, const SocketAddress& addr) override {
+    char code = err ? 1 : 0;
+    BufferedReadAdapter::DirectSend(&code, sizeof(char));
+  }
+
+ protected:
+  void ProcessInput(char* data, size_t* len) override {
+    if (*len < 2) {
+      return;
+    }
+
+    int family = data[1];
+    RTC_DCHECK(family == AF_INET || family == AF_INET6);
+    if ((family == AF_INET && *len < kNATEncodedIPv4AddressSize) ||
+        (family == AF_INET6 && *len < kNATEncodedIPv6AddressSize)) {
+      return;
+    }
+
+    SocketAddress dest_addr;
+    size_t address_length = UnpackAddressFromNAT(data, *len, &dest_addr);
+
+    *len -= address_length;
+    if (*len > 0) {
+      memmove(data, data + address_length, *len);
+    }
+
+    bool remainder = (*len > 0);
+    BufferInput(false);
+    SignalConnectRequest(this, dest_addr);
+    if (remainder) {
+      SignalReadEvent(this);
+    }
+  }
+
+};
+
+class NATProxyServer : public ProxyServer {
+ public:
+  NATProxyServer(SocketFactory* int_factory, const SocketAddress& int_addr,
+                 SocketFactory* ext_factory, const SocketAddress& ext_ip)
+      : ProxyServer(int_factory, int_addr, ext_factory, ext_ip) {
+  }
+
+ protected:
+  AsyncProxyServerSocket* WrapSocket(AsyncSocket* socket) override {
+    return new NATProxyServerSocket(socket);
+  }
+};
+
+NATServer::NATServer(
+    NATType type, SocketFactory* internal,
+    const SocketAddress& internal_udp_addr,
+    const SocketAddress& internal_tcp_addr,
+    SocketFactory* external, const SocketAddress& external_ip)
+    : external_(external), external_ip_(external_ip.ipaddr(), 0) {
+  nat_ = NAT::Create(type);
+
+  udp_server_socket_ = AsyncUDPSocket::Create(internal, internal_udp_addr);
+  udp_server_socket_->SignalReadPacket.connect(this,
+                                               &NATServer::OnInternalUDPPacket);
+  tcp_proxy_server_ = new NATProxyServer(internal, internal_tcp_addr, external,
+                                         external_ip);
+
+  int_map_ = new InternalMap(RouteCmp(nat_));
+  ext_map_ = new ExternalMap();
+}
+
+NATServer::~NATServer() {
+  for (InternalMap::iterator iter = int_map_->begin();
+       iter != int_map_->end();
+       iter++)
+    delete iter->second;
+
+  delete nat_;
+  delete udp_server_socket_;
+  delete tcp_proxy_server_;
+  delete int_map_;
+  delete ext_map_;
+}
+
+void NATServer::OnInternalUDPPacket(
+    AsyncPacketSocket* socket, const char* buf, size_t size,
+    const SocketAddress& addr, const PacketTime& packet_time) {
+  // Read the intended destination from the wire.
+  SocketAddress dest_addr;
+  size_t length = UnpackAddressFromNAT(buf, size, &dest_addr);
+
+  // Find the translation for these addresses (allocating one if necessary).
+  SocketAddressPair route(addr, dest_addr);
+  InternalMap::iterator iter = int_map_->find(route);
+  if (iter == int_map_->end()) {
+    Translate(route);
+    iter = int_map_->find(route);
+  }
+  RTC_DCHECK(iter != int_map_->end());
+
+  // Allow the destination to send packets back to the source.
+  iter->second->WhitelistInsert(dest_addr);
+
+  // Send the packet to its intended destination.
+  rtc::PacketOptions options;
+  iter->second->socket->SendTo(buf + length, size - length, dest_addr, options);
+}
+
+void NATServer::OnExternalUDPPacket(
+    AsyncPacketSocket* socket, const char* buf, size_t size,
+    const SocketAddress& remote_addr, const PacketTime& packet_time) {
+  SocketAddress local_addr = socket->GetLocalAddress();
+
+  // Find the translation for this addresses.
+  ExternalMap::iterator iter = ext_map_->find(local_addr);
+  RTC_DCHECK(iter != ext_map_->end());
+
+  // Allow the NAT to reject this packet.
+  if (ShouldFilterOut(iter->second, remote_addr)) {
+    RTC_LOG(LS_INFO) << "Packet from " << remote_addr.ToSensitiveString()
+                     << " was filtered out by the NAT.";
+    return;
+  }
+
+  // Forward this packet to the internal address.
+  // First prepend the address in a quasi-STUN format.
+  std::unique_ptr<char[]> real_buf(new char[size + kNATEncodedIPv6AddressSize]);
+  size_t addrlength = PackAddressForNAT(real_buf.get(),
+                                        size + kNATEncodedIPv6AddressSize,
+                                        remote_addr);
+  // Copy the data part after the address.
+  rtc::PacketOptions options;
+  memcpy(real_buf.get() + addrlength, buf, size);
+  udp_server_socket_->SendTo(real_buf.get(), size + addrlength,
+                             iter->second->route.source(), options);
+}
+
+void NATServer::Translate(const SocketAddressPair& route) {
+  AsyncUDPSocket* socket = AsyncUDPSocket::Create(external_, external_ip_);
+
+  if (!socket) {
+    RTC_LOG(LS_ERROR) << "Couldn't find a free port!";
+    return;
+  }
+
+  TransEntry* entry = new TransEntry(route, socket, nat_);
+  (*int_map_)[route] = entry;
+  (*ext_map_)[socket->GetLocalAddress()] = entry;
+  socket->SignalReadPacket.connect(this, &NATServer::OnExternalUDPPacket);
+}
+
+bool NATServer::ShouldFilterOut(TransEntry* entry,
+                                const SocketAddress& ext_addr) {
+  return entry->WhitelistContains(ext_addr);
+}
+
+NATServer::TransEntry::TransEntry(
+    const SocketAddressPair& r, AsyncUDPSocket* s, NAT* nat)
+    : route(r), socket(s) {
+  whitelist = new AddressSet(AddrCmp(nat));
+}
+
+NATServer::TransEntry::~TransEntry() {
+  delete whitelist;
+  delete socket;
+}
+
+void NATServer::TransEntry::WhitelistInsert(const SocketAddress& addr) {
+  CritScope cs(&crit_);
+  whitelist->insert(addr);
+}
+
+bool NATServer::TransEntry::WhitelistContains(const SocketAddress& ext_addr) {
+  CritScope cs(&crit_);
+  return whitelist->find(ext_addr) == whitelist->end();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/natserver.h b/rtc_base/natserver.h
new file mode 100644
index 0000000..a7b4d62
--- /dev/null
+++ b/rtc_base/natserver.h
@@ -0,0 +1,124 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NATSERVER_H_
+#define RTC_BASE_NATSERVER_H_
+
+#include <map>
+#include <set>
+
+#include "rtc_base/asyncudpsocket.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/nattypes.h"
+#include "rtc_base/proxyserver.h"
+#include "rtc_base/socketaddresspair.h"
+#include "rtc_base/socketfactory.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+// Change how routes (socketaddress pairs) are compared based on the type of
+// NAT.  The NAT server maintains a hashtable of the routes that it knows
+// about.  So these affect which routes are treated the same.
+struct RouteCmp {
+  explicit RouteCmp(NAT* nat);
+  size_t operator()(const SocketAddressPair& r) const;
+  bool operator()(
+      const SocketAddressPair& r1, const SocketAddressPair& r2) const;
+
+  bool symmetric;
+};
+
+// Changes how addresses are compared based on the filtering rules of the NAT.
+struct AddrCmp {
+  explicit AddrCmp(NAT* nat);
+  size_t operator()(const SocketAddress& r) const;
+  bool operator()(const SocketAddress& r1, const SocketAddress& r2) const;
+
+  bool use_ip;
+  bool use_port;
+};
+
+// Implements the NAT device.  It listens for packets on the internal network,
+// translates them, and sends them out over the external network.
+//
+// TCP connections initiated from the internal side of the NAT server are
+// also supported, by making a connection to the NAT server's TCP address and
+// then sending the remote address in quasi-STUN format. The connection status
+// will be indicated back to the client as a 1 byte status code, where '0'
+// indicates success.
+
+const int NAT_SERVER_UDP_PORT = 4237;
+const int NAT_SERVER_TCP_PORT = 4238;
+
+class NATServer : public sigslot::has_slots<> {
+ public:
+  NATServer(
+      NATType type, SocketFactory* internal,
+      const SocketAddress& internal_udp_addr,
+      const SocketAddress& internal_tcp_addr,
+      SocketFactory* external, const SocketAddress& external_ip);
+  ~NATServer() override;
+
+  SocketAddress internal_udp_address() const {
+    return udp_server_socket_->GetLocalAddress();
+  }
+
+  SocketAddress internal_tcp_address() const {
+    return tcp_proxy_server_->GetServerAddress();
+  }
+
+  // Packets received on one of the networks.
+  void OnInternalUDPPacket(AsyncPacketSocket* socket, const char* buf,
+                           size_t size, const SocketAddress& addr,
+                           const PacketTime& packet_time);
+  void OnExternalUDPPacket(AsyncPacketSocket* socket, const char* buf,
+                           size_t size, const SocketAddress& remote_addr,
+                           const PacketTime& packet_time);
+
+ private:
+  typedef std::set<SocketAddress, AddrCmp> AddressSet;
+
+  /* Records a translation and the associated external socket. */
+  struct TransEntry {
+    TransEntry(const SocketAddressPair& r, AsyncUDPSocket* s, NAT* nat);
+    ~TransEntry();
+
+    void WhitelistInsert(const SocketAddress& addr);
+    bool WhitelistContains(const SocketAddress& ext_addr);
+
+    SocketAddressPair route;
+    AsyncUDPSocket* socket;
+    AddressSet* whitelist;
+    CriticalSection crit_;
+  };
+
+  typedef std::map<SocketAddressPair, TransEntry*, RouteCmp> InternalMap;
+  typedef std::map<SocketAddress, TransEntry*> ExternalMap;
+
+  /* Creates a new entry that translates the given route. */
+  void Translate(const SocketAddressPair& route);
+
+  /* Determines whether the NAT would filter out a packet from this address. */
+  bool ShouldFilterOut(TransEntry* entry, const SocketAddress& ext_addr);
+
+  NAT* nat_;
+  SocketFactory* external_;
+  SocketAddress external_ip_;
+  AsyncUDPSocket* udp_server_socket_;
+  ProxyServer* tcp_proxy_server_;
+  InternalMap* int_map_;
+  ExternalMap* ext_map_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(NATServer);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NATSERVER_H_
diff --git a/rtc_base/natsocketfactory.cc b/rtc_base/natsocketfactory.cc
new file mode 100644
index 0000000..dd4c030
--- /dev/null
+++ b/rtc_base/natsocketfactory.cc
@@ -0,0 +1,530 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/natsocketfactory.h"
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/natserver.h"
+#include "rtc_base/virtualsocketserver.h"
+
+namespace rtc {
+
+// Packs the given socketaddress into the buffer in buf, in the quasi-STUN
+// format that the natserver uses.
+// Returns 0 if an invalid address is passed.
+size_t PackAddressForNAT(char* buf, size_t buf_size,
+                         const SocketAddress& remote_addr) {
+  const IPAddress& ip = remote_addr.ipaddr();
+  int family = ip.family();
+  buf[0] = 0;
+  buf[1] = family;
+  // Writes the port.
+  *(reinterpret_cast<uint16_t*>(&buf[2])) = HostToNetwork16(remote_addr.port());
+  if (family == AF_INET) {
+    RTC_DCHECK(buf_size >= kNATEncodedIPv4AddressSize);
+    in_addr v4addr = ip.ipv4_address();
+    memcpy(&buf[4], &v4addr, kNATEncodedIPv4AddressSize - 4);
+    return kNATEncodedIPv4AddressSize;
+  } else if (family == AF_INET6) {
+    RTC_DCHECK(buf_size >= kNATEncodedIPv6AddressSize);
+    in6_addr v6addr = ip.ipv6_address();
+    memcpy(&buf[4], &v6addr, kNATEncodedIPv6AddressSize - 4);
+    return kNATEncodedIPv6AddressSize;
+  }
+  return 0U;
+}
+
+// Decodes the remote address from a packet that has been encoded with the nat's
+// quasi-STUN format. Returns the length of the address (i.e., the offset into
+// data where the original packet starts).
+size_t UnpackAddressFromNAT(const char* buf, size_t buf_size,
+                            SocketAddress* remote_addr) {
+  RTC_DCHECK(buf_size >= 8);
+  RTC_DCHECK(buf[0] == 0);
+  int family = buf[1];
+  uint16_t port =
+      NetworkToHost16(*(reinterpret_cast<const uint16_t*>(&buf[2])));
+  if (family == AF_INET) {
+    const in_addr* v4addr = reinterpret_cast<const in_addr*>(&buf[4]);
+    *remote_addr = SocketAddress(IPAddress(*v4addr), port);
+    return kNATEncodedIPv4AddressSize;
+  } else if (family == AF_INET6) {
+    RTC_DCHECK(buf_size >= 20);
+    const in6_addr* v6addr = reinterpret_cast<const in6_addr*>(&buf[4]);
+    *remote_addr = SocketAddress(IPAddress(*v6addr), port);
+    return kNATEncodedIPv6AddressSize;
+  }
+  return 0U;
+}
+
+
+// NATSocket
+class NATSocket : public AsyncSocket, public sigslot::has_slots<> {
+ public:
+  explicit NATSocket(NATInternalSocketFactory* sf, int family, int type)
+      : sf_(sf),
+        family_(family),
+        type_(type),
+        connected_(false),
+        socket_(nullptr),
+        buf_(nullptr),
+        size_(0) {}
+
+  ~NATSocket() override {
+    delete socket_;
+    delete[] buf_;
+  }
+
+  SocketAddress GetLocalAddress() const override {
+    return (socket_) ? socket_->GetLocalAddress() : SocketAddress();
+  }
+
+  SocketAddress GetRemoteAddress() const override {
+    return remote_addr_;  // will be NIL if not connected
+  }
+
+  int Bind(const SocketAddress& addr) override {
+    if (socket_) {  // already bound, bubble up error
+      return -1;
+    }
+
+    return BindInternal(addr);
+  }
+
+  int Connect(const SocketAddress& addr) override {
+    int result = 0;
+    // If we're not already bound (meaning |socket_| is null), bind to ANY
+    // address.
+    if (!socket_) {
+      result = BindInternal(SocketAddress(GetAnyIP(family_), 0));
+      if (result < 0) {
+        return result;
+      }
+    }
+
+    if (type_ == SOCK_STREAM) {
+      result = socket_->Connect(server_addr_.IsNil() ? addr : server_addr_);
+    } else {
+      connected_ = true;
+    }
+
+    if (result >= 0) {
+      remote_addr_ = addr;
+    }
+
+    return result;
+  }
+
+  int Send(const void* data, size_t size) override {
+    RTC_DCHECK(connected_);
+    return SendTo(data, size, remote_addr_);
+  }
+
+  int SendTo(const void* data,
+             size_t size,
+             const SocketAddress& addr) override {
+    RTC_DCHECK(!connected_ || addr == remote_addr_);
+    if (server_addr_.IsNil() || type_ == SOCK_STREAM) {
+      return socket_->SendTo(data, size, addr);
+    }
+    // This array will be too large for IPv4 packets, but only by 12 bytes.
+    std::unique_ptr<char[]> buf(new char[size + kNATEncodedIPv6AddressSize]);
+    size_t addrlength = PackAddressForNAT(buf.get(),
+                                          size + kNATEncodedIPv6AddressSize,
+                                          addr);
+    size_t encoded_size = size + addrlength;
+    memcpy(buf.get() + addrlength, data, size);
+    int result = socket_->SendTo(buf.get(), encoded_size, server_addr_);
+    if (result >= 0) {
+      RTC_DCHECK(result == static_cast<int>(encoded_size));
+      result = result - static_cast<int>(addrlength);
+    }
+    return result;
+  }
+
+  int Recv(void* data, size_t size, int64_t* timestamp) override {
+    SocketAddress addr;
+    return RecvFrom(data, size, &addr, timestamp);
+  }
+
+  int RecvFrom(void* data,
+               size_t size,
+               SocketAddress* out_addr,
+               int64_t* timestamp) override {
+    if (server_addr_.IsNil() || type_ == SOCK_STREAM) {
+      return socket_->RecvFrom(data, size, out_addr, timestamp);
+    }
+    // Make sure we have enough room to read the requested amount plus the
+    // largest possible header address.
+    SocketAddress remote_addr;
+    Grow(size + kNATEncodedIPv6AddressSize);
+
+    // Read the packet from the socket.
+    int result = socket_->RecvFrom(buf_, size_, &remote_addr, timestamp);
+    if (result >= 0) {
+      RTC_DCHECK(remote_addr == server_addr_);
+
+      // TODO: we need better framing so we know how many bytes we can
+      // return before we need to read the next address. For UDP, this will be
+      // fine as long as the reader always reads everything in the packet.
+      RTC_DCHECK((size_t)result < size_);
+
+      // Decode the wire packet into the actual results.
+      SocketAddress real_remote_addr;
+      size_t addrlength = UnpackAddressFromNAT(buf_, result, &real_remote_addr);
+      memcpy(data, buf_ + addrlength, result - addrlength);
+
+      // Make sure this packet should be delivered before returning it.
+      if (!connected_ || (real_remote_addr == remote_addr_)) {
+        if (out_addr)
+          *out_addr = real_remote_addr;
+        result = result - static_cast<int>(addrlength);
+      } else {
+        RTC_LOG(LS_ERROR) << "Dropping packet from unknown remote address: "
+                          << real_remote_addr.ToString();
+        result = 0;  // Tell the caller we didn't read anything
+      }
+    }
+
+    return result;
+  }
+
+  int Close() override {
+    int result = 0;
+    if (socket_) {
+      result = socket_->Close();
+      if (result >= 0) {
+        connected_ = false;
+        remote_addr_ = SocketAddress();
+        delete socket_;
+        socket_ = nullptr;
+      }
+    }
+    return result;
+  }
+
+  int Listen(int backlog) override { return socket_->Listen(backlog); }
+  AsyncSocket* Accept(SocketAddress* paddr) override {
+    return socket_->Accept(paddr);
+  }
+  int GetError() const override {
+    return socket_ ? socket_->GetError() : error_;
+  }
+  void SetError(int error) override {
+    if (socket_) {
+      socket_->SetError(error);
+    } else {
+      error_ = error;
+    }
+  }
+  ConnState GetState() const override {
+    return connected_ ? CS_CONNECTED : CS_CLOSED;
+  }
+  int GetOption(Option opt, int* value) override {
+    return socket_->GetOption(opt, value);
+  }
+  int SetOption(Option opt, int value) override {
+    return socket_->SetOption(opt, value);
+  }
+
+  void OnConnectEvent(AsyncSocket* socket) {
+    // If we're NATed, we need to send a message with the real addr to use.
+    RTC_DCHECK(socket == socket_);
+    if (server_addr_.IsNil()) {
+      connected_ = true;
+      SignalConnectEvent(this);
+    } else {
+      SendConnectRequest();
+    }
+  }
+  void OnReadEvent(AsyncSocket* socket) {
+    // If we're NATed, we need to process the connect reply.
+    RTC_DCHECK(socket == socket_);
+    if (type_ == SOCK_STREAM && !server_addr_.IsNil() && !connected_) {
+      HandleConnectReply();
+    } else {
+      SignalReadEvent(this);
+    }
+  }
+  void OnWriteEvent(AsyncSocket* socket) {
+    RTC_DCHECK(socket == socket_);
+    SignalWriteEvent(this);
+  }
+  void OnCloseEvent(AsyncSocket* socket, int error) {
+    RTC_DCHECK(socket == socket_);
+    SignalCloseEvent(this, error);
+  }
+
+ private:
+  int BindInternal(const SocketAddress& addr) {
+    RTC_DCHECK(!socket_);
+
+    int result;
+    socket_ = sf_->CreateInternalSocket(family_, type_, addr, &server_addr_);
+    result = (socket_) ? socket_->Bind(addr) : -1;
+    if (result >= 0) {
+      socket_->SignalConnectEvent.connect(this, &NATSocket::OnConnectEvent);
+      socket_->SignalReadEvent.connect(this, &NATSocket::OnReadEvent);
+      socket_->SignalWriteEvent.connect(this, &NATSocket::OnWriteEvent);
+      socket_->SignalCloseEvent.connect(this, &NATSocket::OnCloseEvent);
+    } else {
+      server_addr_.Clear();
+      delete socket_;
+      socket_ = nullptr;
+    }
+
+    return result;
+  }
+
+  // Makes sure the buffer is at least the given size.
+  void Grow(size_t new_size) {
+    if (size_ < new_size) {
+      delete[] buf_;
+      size_ = new_size;
+      buf_ = new char[size_];
+    }
+  }
+
+  // Sends the destination address to the server to tell it to connect.
+  void SendConnectRequest() {
+    char buf[kNATEncodedIPv6AddressSize];
+    size_t length = PackAddressForNAT(buf, arraysize(buf), remote_addr_);
+    socket_->Send(buf, length);
+  }
+
+  // Handles the byte sent back from the server and fires the appropriate event.
+  void HandleConnectReply() {
+    char code;
+    socket_->Recv(&code, sizeof(code), nullptr);
+    if (code == 0) {
+      connected_ = true;
+      SignalConnectEvent(this);
+    } else {
+      Close();
+      SignalCloseEvent(this, code);
+    }
+  }
+
+  NATInternalSocketFactory* sf_;
+  int family_;
+  int type_;
+  bool connected_;
+  SocketAddress remote_addr_;
+  SocketAddress server_addr_;  // address of the NAT server
+  AsyncSocket* socket_;
+  // Need to hold error in case it occurs before the socket is created.
+  int error_ = 0;
+  char* buf_;
+  size_t size_;
+};
+
+// NATSocketFactory
+NATSocketFactory::NATSocketFactory(SocketFactory* factory,
+                                   const SocketAddress& nat_udp_addr,
+                                   const SocketAddress& nat_tcp_addr)
+    : factory_(factory), nat_udp_addr_(nat_udp_addr),
+      nat_tcp_addr_(nat_tcp_addr) {
+}
+
+Socket* NATSocketFactory::CreateSocket(int type) {
+  return CreateSocket(AF_INET, type);
+}
+
+Socket* NATSocketFactory::CreateSocket(int family, int type) {
+  return new NATSocket(this, family, type);
+}
+
+AsyncSocket* NATSocketFactory::CreateAsyncSocket(int type) {
+  return CreateAsyncSocket(AF_INET, type);
+}
+
+AsyncSocket* NATSocketFactory::CreateAsyncSocket(int family, int type) {
+  return new NATSocket(this, family, type);
+}
+
+AsyncSocket* NATSocketFactory::CreateInternalSocket(int family, int type,
+    const SocketAddress& local_addr, SocketAddress* nat_addr) {
+  if (type == SOCK_STREAM) {
+    *nat_addr = nat_tcp_addr_;
+  } else {
+    *nat_addr = nat_udp_addr_;
+  }
+  return factory_->CreateAsyncSocket(family, type);
+}
+
+// NATSocketServer
+NATSocketServer::NATSocketServer(SocketServer* server)
+    : server_(server), msg_queue_(nullptr) {}
+
+NATSocketServer::Translator* NATSocketServer::GetTranslator(
+    const SocketAddress& ext_ip) {
+  return nats_.Get(ext_ip);
+}
+
+NATSocketServer::Translator* NATSocketServer::AddTranslator(
+    const SocketAddress& ext_ip, const SocketAddress& int_ip, NATType type) {
+  // Fail if a translator already exists with this extternal address.
+  if (nats_.Get(ext_ip))
+    return nullptr;
+
+  return nats_.Add(ext_ip, new Translator(this, type, int_ip, server_, ext_ip));
+}
+
+void NATSocketServer::RemoveTranslator(
+    const SocketAddress& ext_ip) {
+  nats_.Remove(ext_ip);
+}
+
+Socket* NATSocketServer::CreateSocket(int type) {
+  return CreateSocket(AF_INET, type);
+}
+
+Socket* NATSocketServer::CreateSocket(int family, int type) {
+  return new NATSocket(this, family, type);
+}
+
+AsyncSocket* NATSocketServer::CreateAsyncSocket(int type) {
+  return CreateAsyncSocket(AF_INET, type);
+}
+
+AsyncSocket* NATSocketServer::CreateAsyncSocket(int family, int type) {
+  return new NATSocket(this, family, type);
+}
+
+void NATSocketServer::SetMessageQueue(MessageQueue* queue) {
+  msg_queue_ = queue;
+  server_->SetMessageQueue(queue);
+}
+
+bool NATSocketServer::Wait(int cms, bool process_io) {
+  return server_->Wait(cms, process_io);
+}
+
+void NATSocketServer::WakeUp() {
+  server_->WakeUp();
+}
+
+AsyncSocket* NATSocketServer::CreateInternalSocket(int family, int type,
+    const SocketAddress& local_addr, SocketAddress* nat_addr) {
+  AsyncSocket* socket = nullptr;
+  Translator* nat = nats_.FindClient(local_addr);
+  if (nat) {
+    socket = nat->internal_factory()->CreateAsyncSocket(family, type);
+    *nat_addr = (type == SOCK_STREAM) ?
+        nat->internal_tcp_address() : nat->internal_udp_address();
+  } else {
+    socket = server_->CreateAsyncSocket(family, type);
+  }
+  return socket;
+}
+
+// NATSocketServer::Translator
+NATSocketServer::Translator::Translator(
+    NATSocketServer* server, NATType type, const SocketAddress& int_ip,
+    SocketFactory* ext_factory, const SocketAddress& ext_ip)
+    : server_(server) {
+  // Create a new private network, and a NATServer running on the private
+  // network that bridges to the external network. Also tell the private
+  // network to use the same message queue as us.
+  VirtualSocketServer* internal_server = new VirtualSocketServer();
+  internal_server->SetMessageQueue(server_->queue());
+  internal_factory_.reset(internal_server);
+  nat_server_.reset(new NATServer(type, internal_server, int_ip, int_ip,
+                                  ext_factory, ext_ip));
+}
+
+NATSocketServer::Translator::~Translator() = default;
+
+NATSocketServer::Translator* NATSocketServer::Translator::GetTranslator(
+    const SocketAddress& ext_ip) {
+  return nats_.Get(ext_ip);
+}
+
+NATSocketServer::Translator* NATSocketServer::Translator::AddTranslator(
+    const SocketAddress& ext_ip, const SocketAddress& int_ip, NATType type) {
+  // Fail if a translator already exists with this extternal address.
+  if (nats_.Get(ext_ip))
+    return nullptr;
+
+  AddClient(ext_ip);
+  return nats_.Add(ext_ip,
+                   new Translator(server_, type, int_ip, server_, ext_ip));
+}
+void NATSocketServer::Translator::RemoveTranslator(
+    const SocketAddress& ext_ip) {
+  nats_.Remove(ext_ip);
+  RemoveClient(ext_ip);
+}
+
+bool NATSocketServer::Translator::AddClient(
+    const SocketAddress& int_ip) {
+  // Fail if a client already exists with this internal address.
+  if (clients_.find(int_ip) != clients_.end())
+    return false;
+
+  clients_.insert(int_ip);
+  return true;
+}
+
+void NATSocketServer::Translator::RemoveClient(
+    const SocketAddress& int_ip) {
+  std::set<SocketAddress>::iterator it = clients_.find(int_ip);
+  if (it != clients_.end()) {
+    clients_.erase(it);
+  }
+}
+
+NATSocketServer::Translator* NATSocketServer::Translator::FindClient(
+    const SocketAddress& int_ip) {
+  // See if we have the requested IP, or any of our children do.
+  return (clients_.find(int_ip) != clients_.end()) ?
+      this : nats_.FindClient(int_ip);
+}
+
+// NATSocketServer::TranslatorMap
+NATSocketServer::TranslatorMap::~TranslatorMap() {
+  for (TranslatorMap::iterator it = begin(); it != end(); ++it) {
+    delete it->second;
+  }
+}
+
+NATSocketServer::Translator* NATSocketServer::TranslatorMap::Get(
+    const SocketAddress& ext_ip) {
+  TranslatorMap::iterator it = find(ext_ip);
+  return (it != end()) ? it->second : nullptr;
+}
+
+NATSocketServer::Translator* NATSocketServer::TranslatorMap::Add(
+    const SocketAddress& ext_ip, Translator* nat) {
+  (*this)[ext_ip] = nat;
+  return nat;
+}
+
+void NATSocketServer::TranslatorMap::Remove(
+    const SocketAddress& ext_ip) {
+  TranslatorMap::iterator it = find(ext_ip);
+  if (it != end()) {
+    delete it->second;
+    erase(it);
+  }
+}
+
+NATSocketServer::Translator* NATSocketServer::TranslatorMap::FindClient(
+    const SocketAddress& int_ip) {
+  Translator* nat = nullptr;
+  for (TranslatorMap::iterator it = begin(); it != end() && !nat; ++it) {
+    nat = it->second->FindClient(int_ip);
+  }
+  return nat;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/natsocketfactory.h b/rtc_base/natsocketfactory.h
new file mode 100644
index 0000000..319545c
--- /dev/null
+++ b/rtc_base/natsocketfactory.h
@@ -0,0 +1,168 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NATSOCKETFACTORY_H_
+#define RTC_BASE_NATSOCKETFACTORY_H_
+
+#include <string>
+#include <map>
+#include <memory>
+#include <set>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/natserver.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/socketserver.h"
+
+namespace rtc {
+
+const size_t kNATEncodedIPv4AddressSize = 8U;
+const size_t kNATEncodedIPv6AddressSize = 20U;
+
+// Used by the NAT socket implementation.
+class NATInternalSocketFactory {
+ public:
+  virtual ~NATInternalSocketFactory() {}
+  virtual AsyncSocket* CreateInternalSocket(int family, int type,
+      const SocketAddress& local_addr, SocketAddress* nat_addr) = 0;
+};
+
+// Creates sockets that will send all traffic through a NAT, using an existing
+// NATServer instance running at nat_addr. The actual data is sent using sockets
+// from a socket factory, given to the constructor.
+class NATSocketFactory : public SocketFactory, public NATInternalSocketFactory {
+ public:
+  NATSocketFactory(SocketFactory* factory, const SocketAddress& nat_udp_addr,
+                   const SocketAddress& nat_tcp_addr);
+
+  // SocketFactory implementation
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+  // NATInternalSocketFactory implementation
+  AsyncSocket* CreateInternalSocket(int family,
+                                    int type,
+                                    const SocketAddress& local_addr,
+                                    SocketAddress* nat_addr) override;
+
+ private:
+  SocketFactory* factory_;
+  SocketAddress nat_udp_addr_;
+  SocketAddress nat_tcp_addr_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(NATSocketFactory);
+};
+
+// Creates sockets that will send traffic through a NAT depending on what
+// address they bind to. This can be used to simulate a client on a NAT sending
+// to a client that is not behind a NAT.
+// Note that the internal addresses of clients must be unique. This is because
+// there is only one socketserver per thread, and the Bind() address is used to
+// figure out which NAT (if any) the socket should talk to.
+//
+// Example with 3 NATs (2 cascaded), and 3 clients.
+// ss->AddTranslator("1.2.3.4", "192.168.0.1", NAT_ADDR_RESTRICTED);
+// ss->AddTranslator("99.99.99.99", "10.0.0.1", NAT_SYMMETRIC)->
+//     AddTranslator("10.0.0.2", "192.168.1.1", NAT_OPEN_CONE);
+// ss->GetTranslator("1.2.3.4")->AddClient("1.2.3.4", "192.168.0.2");
+// ss->GetTranslator("99.99.99.99")->AddClient("10.0.0.3");
+// ss->GetTranslator("99.99.99.99")->GetTranslator("10.0.0.2")->
+//     AddClient("192.168.1.2");
+class NATSocketServer : public SocketServer, public NATInternalSocketFactory {
+ public:
+  class Translator;
+  // holds a list of NATs
+  class TranslatorMap : private std::map<SocketAddress, Translator*> {
+   public:
+    ~TranslatorMap();
+    Translator* Get(const SocketAddress& ext_ip);
+    Translator* Add(const SocketAddress& ext_ip, Translator*);
+    void Remove(const SocketAddress& ext_ip);
+    Translator* FindClient(const SocketAddress& int_ip);
+  };
+
+  // a specific NAT
+  class Translator {
+   public:
+    Translator(NATSocketServer* server, NATType type,
+               const SocketAddress& int_addr, SocketFactory* ext_factory,
+               const SocketAddress& ext_addr);
+    ~Translator();
+
+    SocketFactory* internal_factory() { return internal_factory_.get(); }
+    SocketAddress internal_udp_address() const {
+      return nat_server_->internal_udp_address();
+    }
+    SocketAddress internal_tcp_address() const {
+      return SocketAddress();  // nat_server_->internal_tcp_address();
+    }
+
+    Translator* GetTranslator(const SocketAddress& ext_ip);
+    Translator* AddTranslator(const SocketAddress& ext_ip,
+                              const SocketAddress& int_ip, NATType type);
+    void RemoveTranslator(const SocketAddress& ext_ip);
+
+    bool AddClient(const SocketAddress& int_ip);
+    void RemoveClient(const SocketAddress& int_ip);
+
+    // Looks for the specified client in this or a child NAT.
+    Translator* FindClient(const SocketAddress& int_ip);
+
+   private:
+    NATSocketServer* server_;
+    std::unique_ptr<SocketFactory> internal_factory_;
+    std::unique_ptr<NATServer> nat_server_;
+    TranslatorMap nats_;
+    std::set<SocketAddress> clients_;
+  };
+
+  explicit NATSocketServer(SocketServer* ss);
+
+  SocketServer* socketserver() { return server_; }
+  MessageQueue* queue() { return msg_queue_; }
+
+  Translator* GetTranslator(const SocketAddress& ext_ip);
+  Translator* AddTranslator(const SocketAddress& ext_ip,
+                            const SocketAddress& int_ip, NATType type);
+  void RemoveTranslator(const SocketAddress& ext_ip);
+
+  // SocketServer implementation
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+  void SetMessageQueue(MessageQueue* queue) override;
+  bool Wait(int cms, bool process_io) override;
+  void WakeUp() override;
+
+  // NATInternalSocketFactory implementation
+  AsyncSocket* CreateInternalSocket(int family,
+                                    int type,
+                                    const SocketAddress& local_addr,
+                                    SocketAddress* nat_addr) override;
+
+ private:
+  SocketServer* server_;
+  MessageQueue* msg_queue_;
+  TranslatorMap nats_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(NATSocketServer);
+};
+
+// Free-standing NAT helper functions.
+size_t PackAddressForNAT(char* buf, size_t buf_size,
+                         const SocketAddress& remote_addr);
+size_t UnpackAddressFromNAT(const char* buf, size_t buf_size,
+                            SocketAddress* remote_addr);
+}  // namespace rtc
+
+#endif  // RTC_BASE_NATSOCKETFACTORY_H_
diff --git a/rtc_base/nattypes.cc b/rtc_base/nattypes.cc
new file mode 100644
index 0000000..29936ad
--- /dev/null
+++ b/rtc_base/nattypes.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/nattypes.h"
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+class SymmetricNAT : public NAT {
+public:
+ bool IsSymmetric() override { return true; }
+ bool FiltersIP() override { return true; }
+ bool FiltersPort() override { return true; }
+};
+
+class OpenConeNAT : public NAT {
+public:
+ bool IsSymmetric() override { return false; }
+ bool FiltersIP() override { return false; }
+ bool FiltersPort() override { return false; }
+};
+
+class AddressRestrictedNAT : public NAT {
+public:
+ bool IsSymmetric() override { return false; }
+ bool FiltersIP() override { return true; }
+ bool FiltersPort() override { return false; }
+};
+
+class PortRestrictedNAT : public NAT {
+public:
+ bool IsSymmetric() override { return false; }
+ bool FiltersIP() override { return true; }
+ bool FiltersPort() override { return true; }
+};
+
+NAT* NAT::Create(NATType type) {
+  switch (type) {
+    case NAT_OPEN_CONE:
+      return new OpenConeNAT();
+    case NAT_ADDR_RESTRICTED:
+      return new AddressRestrictedNAT();
+    case NAT_PORT_RESTRICTED:
+      return new PortRestrictedNAT();
+    case NAT_SYMMETRIC:
+      return new SymmetricNAT();
+    default:
+      RTC_NOTREACHED();
+      return 0;
+  }
+}
+
+} // namespace rtc
diff --git a/rtc_base/nattypes.h b/rtc_base/nattypes.h
new file mode 100644
index 0000000..64b36d3
--- /dev/null
+++ b/rtc_base/nattypes.h
@@ -0,0 +1,47 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NATTYPES_H_
+#define RTC_BASE_NATTYPES_H_
+
+namespace rtc {
+
+/* Identifies each type of NAT that can be simulated. */
+enum NATType {
+  NAT_OPEN_CONE,
+  NAT_ADDR_RESTRICTED,
+  NAT_PORT_RESTRICTED,
+  NAT_SYMMETRIC
+};
+
+// Implements the rules for each specific type of NAT.
+class NAT {
+public:
+  virtual ~NAT() { }
+
+  // Determines whether this NAT uses both source and destination address when
+  // checking whether a mapping already exists.
+  virtual bool IsSymmetric() = 0;
+
+  // Determines whether this NAT drops packets received from a different IP
+  // the one last sent to.
+  virtual bool FiltersIP() = 0;
+
+  // Determines whether this NAT drops packets received from a different port
+  // the one last sent to.
+  virtual bool FiltersPort() = 0;
+
+  // Returns an implementation of the given type of NAT.
+  static NAT* Create(NATType type);
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_NATTYPES_H_
diff --git a/rtc_base/nethelper.cc b/rtc_base/nethelper.cc
new file mode 100644
index 0000000..e654fe3
--- /dev/null
+++ b/rtc_base/nethelper.cc
@@ -0,0 +1,42 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/nethelper.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/ipaddress.h"
+
+namespace cricket {
+
+const char UDP_PROTOCOL_NAME[] = "udp";
+const char TCP_PROTOCOL_NAME[] = "tcp";
+const char SSLTCP_PROTOCOL_NAME[] = "ssltcp";
+const char TLS_PROTOCOL_NAME[] = "tls";
+
+int GetIpOverhead(int addr_family) {
+  switch (addr_family) {
+    case AF_INET:  // IPv4
+      return 20;
+    case AF_INET6:  // IPv6
+      return 40;
+    default:
+      RTC_NOTREACHED() << "Invaild address family.";
+      return 0;
+  }
+}
+
+int GetProtocolOverhead(const std::string& protocol) {
+  if (protocol == TCP_PROTOCOL_NAME || protocol == SSLTCP_PROTOCOL_NAME) {
+    return 20;
+  }
+  return 8;
+}
+
+}  // namespace cricket
diff --git a/rtc_base/nethelper.h b/rtc_base/nethelper.h
new file mode 100644
index 0000000..e86d126
--- /dev/null
+++ b/rtc_base/nethelper.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef RTC_BASE_NETHELPER_H_
+#define RTC_BASE_NETHELPER_H_
+
+#include <cstdlib>
+#include <string>
+
+// This header contains helper functions and constants used by different types
+// of transports.
+namespace cricket {
+
+extern const char UDP_PROTOCOL_NAME[];
+extern const char TCP_PROTOCOL_NAME[];
+extern const char SSLTCP_PROTOCOL_NAME[];
+extern const char TLS_PROTOCOL_NAME[];
+
+// Get the network layer overhead per packet based on the IP address family.
+int GetIpOverhead(int addr_family);
+
+// Get the transport layer overhead per packet based on the protocol.
+int GetProtocolOverhead(const std::string& protocol);
+
+}  // namespace cricket
+
+#endif  // RTC_BASE_NETHELPER_H_
diff --git a/rtc_base/nethelpers.cc b/rtc_base/nethelpers.cc
new file mode 100644
index 0000000..c41e124
--- /dev/null
+++ b/rtc_base/nethelpers.cc
@@ -0,0 +1,219 @@
+/*
+ *  Copyright 2008 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/nethelpers.h"
+
+#include <memory>
+
+#if defined(WEBRTC_WIN)
+#include <ws2spi.h>
+#include <ws2tcpip.h>
+#include "rtc_base/win32.h"
+#endif
+#if defined(WEBRTC_POSIX) && !defined(__native_client__)
+#if defined(WEBRTC_ANDROID)
+#include "rtc_base/ifaddrs-android.h"
+#else
+#include <ifaddrs.h>
+#endif
+#endif  // defined(WEBRTC_POSIX) && !defined(__native_client__)
+
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/signalthread.h"
+
+namespace rtc {
+
+int ResolveHostname(const std::string& hostname, int family,
+                    std::vector<IPAddress>* addresses) {
+#ifdef __native_client__
+  RTC_NOTREACHED();
+  RTC_LOG(LS_WARNING) << "ResolveHostname() is not implemented for NaCl";
+  return -1;
+#else  // __native_client__
+  if (!addresses) {
+    return -1;
+  }
+  addresses->clear();
+  struct addrinfo* result = nullptr;
+  struct addrinfo hints = {0};
+  hints.ai_family = family;
+  // |family| here will almost always be AF_UNSPEC, because |family| comes from
+  // AsyncResolver::addr_.family(), which comes from a SocketAddress constructed
+  // with a hostname. When a SocketAddress is constructed with a hostname, its
+  // family is AF_UNSPEC. However, if someday in the future we construct
+  // a SocketAddress with both a hostname and a family other than AF_UNSPEC,
+  // then it would be possible to get a specific family value here.
+
+  // The behavior of AF_UNSPEC is roughly "get both ipv4 and ipv6", as
+  // documented by the various operating systems:
+  // Linux: http://man7.org/linux/man-pages/man3/getaddrinfo.3.html
+  // Windows: https://msdn.microsoft.com/en-us/library/windows/desktop/
+  // ms738520(v=vs.85).aspx
+  // Mac: https://developer.apple.com/legacy/library/documentation/Darwin/
+  // Reference/ManPages/man3/getaddrinfo.3.html
+  // Android (source code, not documentation):
+  // https://android.googlesource.com/platform/bionic/+/
+  // 7e0bfb511e85834d7c6cb9631206b62f82701d60/libc/netbsd/net/getaddrinfo.c#1657
+  hints.ai_flags = AI_ADDRCONFIG;
+  int ret = getaddrinfo(hostname.c_str(), nullptr, &hints, &result);
+  if (ret != 0) {
+    return ret;
+  }
+  struct addrinfo* cursor = result;
+  for (; cursor; cursor = cursor->ai_next) {
+    if (family == AF_UNSPEC || cursor->ai_family == family) {
+      IPAddress ip;
+      if (IPFromAddrInfo(cursor, &ip)) {
+        addresses->push_back(ip);
+      }
+    }
+  }
+  freeaddrinfo(result);
+  return 0;
+#endif  // !__native_client__
+}
+
+// AsyncResolver
+AsyncResolver::AsyncResolver()
+    : SignalThread(), error_(-1) {}
+
+AsyncResolver::~AsyncResolver() = default;
+
+void AsyncResolver::Start(const SocketAddress& addr) {
+  addr_ = addr;
+  // SignalThred Start will kickoff the resolve process.
+  SignalThread::Start();
+}
+
+bool AsyncResolver::GetResolvedAddress(int family, SocketAddress* addr) const {
+  if (error_ != 0 || addresses_.empty())
+    return false;
+
+  *addr = addr_;
+  for (size_t i = 0; i < addresses_.size(); ++i) {
+    if (family == addresses_[i].family()) {
+      addr->SetResolvedIP(addresses_[i]);
+      return true;
+    }
+  }
+  return false;
+}
+
+int AsyncResolver::GetError() const {
+  return error_;
+}
+
+void AsyncResolver::Destroy(bool wait) {
+  SignalThread::Destroy(wait);
+}
+
+void AsyncResolver::DoWork() {
+  error_ = ResolveHostname(addr_.hostname().c_str(), addr_.family(),
+                           &addresses_);
+}
+
+void AsyncResolver::OnWorkDone() {
+  SignalDone(this);
+}
+
+const char* inet_ntop(int af, const void *src, char* dst, socklen_t size) {
+#if defined(WEBRTC_WIN)
+  return win32_inet_ntop(af, src, dst, size);
+#else
+  return ::inet_ntop(af, src, dst, size);
+#endif
+}
+
+int inet_pton(int af, const char* src, void *dst) {
+#if defined(WEBRTC_WIN)
+  return win32_inet_pton(af, src, dst);
+#else
+  return ::inet_pton(af, src, dst);
+#endif
+}
+
+bool HasIPv4Enabled() {
+#if defined(WEBRTC_POSIX) && !defined(__native_client__)
+  bool has_ipv4 = false;
+  struct ifaddrs* ifa;
+  if (getifaddrs(&ifa) < 0) {
+    return false;
+  }
+  for (struct ifaddrs* cur = ifa; cur != nullptr; cur = cur->ifa_next) {
+    if (cur->ifa_addr->sa_family == AF_INET) {
+      has_ipv4 = true;
+      break;
+    }
+  }
+  freeifaddrs(ifa);
+  return has_ipv4;
+#else
+  return true;
+#endif
+}
+
+bool HasIPv6Enabled() {
+#if defined(WEBRTC_WIN)
+  if (IsWindowsVistaOrLater()) {
+    return true;
+  }
+  if (!IsWindowsXpOrLater()) {
+    return false;
+  }
+  DWORD protbuff_size = 4096;
+  std::unique_ptr<char[]> protocols;
+  LPWSAPROTOCOL_INFOW protocol_infos = nullptr;
+  int requested_protocols[2] = {AF_INET6, 0};
+
+  int err = 0;
+  int ret = 0;
+  // Check for protocols in a do-while loop until we provide a buffer large
+  // enough. (WSCEnumProtocols sets protbuff_size to its desired value).
+  // It is extremely unlikely that this will loop more than once.
+  do {
+    protocols.reset(new char[protbuff_size]);
+    protocol_infos = reinterpret_cast<LPWSAPROTOCOL_INFOW>(protocols.get());
+    ret = WSCEnumProtocols(requested_protocols, protocol_infos,
+                           &protbuff_size, &err);
+  } while (ret == SOCKET_ERROR && err == WSAENOBUFS);
+
+  if (ret == SOCKET_ERROR) {
+    return false;
+  }
+
+  // Even if ret is positive, check specifically for IPv6.
+  // Non-IPv6 enabled WinXP will still return a RAW protocol.
+  for (int i = 0; i < ret; ++i) {
+    if (protocol_infos[i].iAddressFamily == AF_INET6) {
+      return true;
+    }
+  }
+  return false;
+#elif defined(WEBRTC_POSIX) && !defined(__native_client__)
+  bool has_ipv6 = false;
+  struct ifaddrs* ifa;
+  if (getifaddrs(&ifa) < 0) {
+    return false;
+  }
+  for (struct ifaddrs* cur = ifa; cur != nullptr; cur = cur->ifa_next) {
+    if (cur->ifa_addr->sa_family == AF_INET6) {
+      has_ipv6 = true;
+      break;
+    }
+  }
+  freeifaddrs(ifa);
+  return has_ipv6;
+#else
+  return true;
+#endif
+}
+}  // namespace rtc
diff --git a/rtc_base/nethelpers.h b/rtc_base/nethelpers.h
new file mode 100644
index 0000000..e118b27
--- /dev/null
+++ b/rtc_base/nethelpers.h
@@ -0,0 +1,66 @@
+/*
+ *  Copyright 2008 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NETHELPERS_H_
+#define RTC_BASE_NETHELPERS_H_
+
+#if defined(WEBRTC_POSIX)
+#include <netdb.h>
+#include <stddef.h>
+#elif WEBRTC_WIN
+#include <winsock2.h>  // NOLINT
+#endif
+
+#include <list>
+
+#include "rtc_base/asyncresolverinterface.h"
+#include "rtc_base/signalthread.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/socketaddress.h"
+
+namespace rtc {
+
+class AsyncResolverTest;
+
+// AsyncResolver will perform async DNS resolution, signaling the result on
+// the SignalDone from AsyncResolverInterface when the operation completes.
+class AsyncResolver : public SignalThread, public AsyncResolverInterface {
+ public:
+  AsyncResolver();
+  ~AsyncResolver() override;
+
+  void Start(const SocketAddress& addr) override;
+  bool GetResolvedAddress(int family, SocketAddress* addr) const override;
+  int GetError() const override;
+  void Destroy(bool wait) override;
+
+  const std::vector<IPAddress>& addresses() const { return addresses_; }
+  void set_error(int error) { error_ = error; }
+
+ protected:
+  void DoWork() override;
+  void OnWorkDone() override;
+
+ private:
+  SocketAddress addr_;
+  std::vector<IPAddress> addresses_;
+  int error_;
+};
+
+// rtc namespaced wrappers for inet_ntop and inet_pton so we can avoid
+// the windows-native versions of these.
+const char* inet_ntop(int af, const void *src, char* dst, socklen_t size);
+int inet_pton(int af, const char* src, void *dst);
+
+bool HasIPv4Enabled();
+bool HasIPv6Enabled();
+}  // namespace rtc
+
+#endif  // RTC_BASE_NETHELPERS_H_
diff --git a/rtc_base/network.cc b/rtc_base/network.cc
new file mode 100644
index 0000000..6d59888
--- /dev/null
+++ b/rtc_base/network.cc
@@ -0,0 +1,984 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/network.h"
+
+#if defined(WEBRTC_POSIX)
+// linux/if.h can't be included at the same time as the posix sys/if.h, and
+// it's transitively required by linux/route.h, so include that version on
+// linux instead of the standard posix one.
+#if defined(WEBRTC_LINUX)
+#include <linux/if.h>
+#include <linux/route.h>
+#elif !defined(__native_client__)
+#include <net/if.h>
+#endif
+#endif  // WEBRTC_POSIX
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#include <Iphlpapi.h>
+#elif !defined(__native_client__)
+#include "rtc_base/ifaddrs_converter.h"
+#endif
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/networkmonitor.h"
+#include "rtc_base/socket.h"  // includes something that makes windows happy
+#include "rtc_base/stream.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+namespace {
+
+const uint32_t kUpdateNetworksMessage = 1;
+const uint32_t kSignalNetworksMessage = 2;
+
+// Fetch list of networks every two seconds.
+const int kNetworksUpdateIntervalMs = 2000;
+
+const int kHighestNetworkPreference = 127;
+
+typedef struct {
+  Network* net;
+  std::vector<InterfaceAddress> ips;
+} AddressList;
+
+bool CompareNetworks(const Network* a, const Network* b) {
+  if (a->prefix_length() == b->prefix_length()) {
+    if (a->name() == b->name()) {
+      return a->prefix() < b->prefix();
+    }
+  }
+  return a->name() < b->name();
+}
+
+bool SortNetworks(const Network* a, const Network* b) {
+  // Network types will be preferred above everything else while sorting
+  // Networks.
+
+  // Networks are sorted first by type.
+  if (a->type() != b->type()) {
+    return a->type() < b->type();
+  }
+
+  IPAddress ip_a = a->GetBestIP();
+  IPAddress ip_b = b->GetBestIP();
+
+  // After type, networks are sorted by IP address precedence values
+  // from RFC 3484-bis
+  if (IPAddressPrecedence(ip_a) != IPAddressPrecedence(ip_b)) {
+    return IPAddressPrecedence(ip_a) > IPAddressPrecedence(ip_b);
+  }
+
+  // TODO(mallinath) - Add VPN and Link speed conditions while sorting.
+
+  // Networks are sorted last by key.
+  return a->key() < b->key();
+}
+
+std::string AdapterTypeToString(AdapterType type) {
+  switch (type) {
+    case ADAPTER_TYPE_UNKNOWN:
+      return "Unknown";
+    case ADAPTER_TYPE_ETHERNET:
+      return "Ethernet";
+    case ADAPTER_TYPE_WIFI:
+      return "Wifi";
+    case ADAPTER_TYPE_CELLULAR:
+      return "Cellular";
+    case ADAPTER_TYPE_VPN:
+      return "VPN";
+    case ADAPTER_TYPE_LOOPBACK:
+      return "Loopback";
+    default:
+      RTC_NOTREACHED() << "Invalid type " << type;
+      return std::string();
+  }
+}
+
+#if !defined(__native_client__)
+bool IsIgnoredIPv6(const InterfaceAddress& ip) {
+  if (ip.family() != AF_INET6) {
+    return false;
+  }
+
+  // Link-local addresses require scope id to be bound successfully.
+  // However, our IPAddress structure doesn't carry that so the
+  // information is lost and causes binding failure.
+  if (IPIsLinkLocal(ip)) {
+    return true;
+  }
+
+  // Any MAC based IPv6 should be avoided to prevent the MAC tracking.
+  if (IPIsMacBased(ip)) {
+    return true;
+  }
+
+  // Ignore deprecated IPv6.
+  if (ip.ipv6_flags() & IPV6_ADDRESS_FLAG_DEPRECATED) {
+    return true;
+  }
+
+  return false;
+}
+#endif  // !defined(__native_client__)
+
+}  // namespace
+
+// These addresses are used as the targets to find out the default local address
+// on a multi-homed endpoint. They are actually DNS servers.
+const char kPublicIPv4Host[] = "8.8.8.8";
+const char kPublicIPv6Host[] = "2001:4860:4860::8888";
+const int kPublicPort = 53;  // DNS port.
+
+std::string MakeNetworkKey(const std::string& name, const IPAddress& prefix,
+                           int prefix_length) {
+  std::ostringstream ost;
+  ost << name << "%" << prefix.ToString() << "/" << prefix_length;
+  return ost.str();
+}
+
+AdapterType GetAdapterTypeFromName(const char* network_name) {
+  if (strncmp(network_name, "ipsec", 5) == 0 ||
+      strncmp(network_name, "tun", 3) == 0 ||
+      strncmp(network_name, "utun", 4) == 0 ||
+      strncmp(network_name, "tap", 3) == 0) {
+    return ADAPTER_TYPE_VPN;
+  }
+#if defined(WEBRTC_IOS)
+  // Cell networks are pdp_ipN on iOS.
+  if (strncmp(network_name, "pdp_ip", 6) == 0) {
+    return ADAPTER_TYPE_CELLULAR;
+  }
+  if (strncmp(network_name, "en", 2) == 0) {
+    // This may not be most accurate because sometimes Ethernet interface
+    // name also starts with "en" but it is better than showing it as
+    // "unknown" type.
+    // TODO(honghaiz): Write a proper IOS network manager.
+    return ADAPTER_TYPE_WIFI;
+  }
+#elif defined(WEBRTC_ANDROID)
+  if (strncmp(network_name, "rmnet", 5) == 0 ||
+      strncmp(network_name, "v4-rmnet", 8) == 0) {
+    return ADAPTER_TYPE_CELLULAR;
+  }
+  if (strncmp(network_name, "wlan", 4) == 0) {
+    return ADAPTER_TYPE_WIFI;
+  }
+#endif
+
+  return ADAPTER_TYPE_UNKNOWN;
+}
+
+NetworkManager::NetworkManager() {
+}
+
+NetworkManager::~NetworkManager() {
+}
+
+NetworkManager::EnumerationPermission NetworkManager::enumeration_permission()
+    const {
+  return ENUMERATION_ALLOWED;
+}
+
+bool NetworkManager::GetDefaultLocalAddress(int family, IPAddress* addr) const {
+  return false;
+}
+
+NetworkManagerBase::NetworkManagerBase()
+    : enumeration_permission_(NetworkManager::ENUMERATION_ALLOWED),
+      ipv6_enabled_(true) {
+}
+
+NetworkManagerBase::~NetworkManagerBase() {
+  for (const auto& kv : networks_map_) {
+    delete kv.second;
+  }
+}
+
+NetworkManager::EnumerationPermission
+NetworkManagerBase::enumeration_permission() const {
+  return enumeration_permission_;
+}
+
+void NetworkManagerBase::GetAnyAddressNetworks(NetworkList* networks) {
+  if (!ipv4_any_address_network_) {
+    const rtc::IPAddress ipv4_any_address(INADDR_ANY);
+    ipv4_any_address_network_.reset(
+        new rtc::Network("any", "any", ipv4_any_address, 0));
+    ipv4_any_address_network_->set_default_local_address_provider(this);
+    ipv4_any_address_network_->AddIP(ipv4_any_address);
+  }
+  networks->push_back(ipv4_any_address_network_.get());
+
+  if (ipv6_enabled()) {
+    if (!ipv6_any_address_network_) {
+      const rtc::IPAddress ipv6_any_address(in6addr_any);
+      ipv6_any_address_network_.reset(
+          new rtc::Network("any", "any", ipv6_any_address, 0));
+      ipv6_any_address_network_->set_default_local_address_provider(this);
+      ipv6_any_address_network_->AddIP(ipv6_any_address);
+    }
+    networks->push_back(ipv6_any_address_network_.get());
+  }
+}
+
+void NetworkManagerBase::GetNetworks(NetworkList* result) const {
+  result->clear();
+  result->insert(result->begin(), networks_.begin(), networks_.end());
+}
+
+void NetworkManagerBase::MergeNetworkList(const NetworkList& new_networks,
+                                          bool* changed) {
+  NetworkManager::Stats stats;
+  MergeNetworkList(new_networks, changed, &stats);
+}
+
+void NetworkManagerBase::MergeNetworkList(const NetworkList& new_networks,
+                                          bool* changed,
+                                          NetworkManager::Stats* stats) {
+  *changed = false;
+  // AddressList in this map will track IP addresses for all Networks
+  // with the same key.
+  std::map<std::string, AddressList> consolidated_address_list;
+  NetworkList list(new_networks);
+  std::sort(list.begin(), list.end(), CompareNetworks);
+  // First, build a set of network-keys to the ipaddresses.
+  for (Network* network : list) {
+    bool might_add_to_merged_list = false;
+    std::string key = MakeNetworkKey(network->name(),
+                                     network->prefix(),
+                                     network->prefix_length());
+    if (consolidated_address_list.find(key) ==
+        consolidated_address_list.end()) {
+      AddressList addrlist;
+      addrlist.net = network;
+      consolidated_address_list[key] = addrlist;
+      might_add_to_merged_list = true;
+    }
+    const std::vector<InterfaceAddress>& addresses = network->GetIPs();
+    AddressList& current_list = consolidated_address_list[key];
+    for (const InterfaceAddress& address : addresses) {
+      current_list.ips.push_back(address);
+    }
+    if (!might_add_to_merged_list) {
+      delete network;
+    } else {
+      if (current_list.ips[0].family() == AF_INET) {
+        stats->ipv4_network_count++;
+      } else {
+        RTC_DCHECK(current_list.ips[0].family() == AF_INET6);
+        stats->ipv6_network_count++;
+      }
+    }
+  }
+
+  // Next, look for existing network objects to re-use.
+  // Result of Network merge. Element in this list should have unique key.
+  NetworkList merged_list;
+  for (const auto& kv : consolidated_address_list) {
+    const std::string& key = kv.first;
+    Network* net = kv.second.net;
+    auto existing = networks_map_.find(key);
+    if (existing == networks_map_.end()) {
+      // This network is new. Place it in the network map.
+      merged_list.push_back(net);
+      networks_map_[key] = net;
+      net->set_id(next_available_network_id_++);
+      // Also, we might have accumulated IPAddresses from the first
+      // step, set it here.
+      net->SetIPs(kv.second.ips, true);
+      *changed = true;
+    } else {
+      // This network exists in the map already. Reset its IP addresses.
+      Network* existing_net = existing->second;
+      *changed = existing_net->SetIPs(kv.second.ips, *changed);
+      merged_list.push_back(existing_net);
+      if (net->type() != ADAPTER_TYPE_UNKNOWN &&
+          net->type() != existing_net->type()) {
+        existing_net->set_type(net->type());
+        *changed = true;
+      }
+      // If the existing network was not active, networks have changed.
+      if (!existing_net->active()) {
+        *changed = true;
+      }
+      RTC_DCHECK(net->active());
+      if (existing_net != net) {
+        delete net;
+      }
+    }
+  }
+  // It may still happen that the merged list is a subset of |networks_|.
+  // To detect this change, we compare their sizes.
+  if (merged_list.size() != networks_.size()) {
+    *changed = true;
+  }
+
+  // If the network list changes, we re-assign |networks_| to the merged list
+  // and re-sort it.
+  if (*changed) {
+    networks_ = merged_list;
+    // Reset the active states of all networks.
+    for (const auto& kv : networks_map_) {
+      Network* network = kv.second;
+      // If |network| is in the newly generated |networks_|, it is active.
+      bool found = std::find(networks_.begin(), networks_.end(), network) !=
+                   networks_.end();
+      network->set_active(found);
+    }
+    std::sort(networks_.begin(), networks_.end(), SortNetworks);
+    // Now network interfaces are sorted, we should set the preference value
+    // for each of the interfaces we are planning to use.
+    // Preference order of network interfaces might have changed from previous
+    // sorting due to addition of higher preference network interface.
+    // Since we have already sorted the network interfaces based on our
+    // requirements, we will just assign a preference value starting with 127,
+    // in decreasing order.
+    int pref = kHighestNetworkPreference;
+    for (Network* network : networks_) {
+      network->set_preference(pref);
+      if (pref > 0) {
+        --pref;
+      } else {
+        RTC_LOG(LS_ERROR) << "Too many network interfaces to handle!";
+        break;
+      }
+    }
+  }
+}
+
+void NetworkManagerBase::set_default_local_addresses(const IPAddress& ipv4,
+                                                     const IPAddress& ipv6) {
+  if (ipv4.family() == AF_INET) {
+    default_local_ipv4_address_ = ipv4;
+  }
+  if (ipv6.family() == AF_INET6) {
+    default_local_ipv6_address_ = ipv6;
+  }
+}
+
+bool NetworkManagerBase::GetDefaultLocalAddress(int family,
+                                                IPAddress* ipaddr) const {
+  if (family == AF_INET && !default_local_ipv4_address_.IsNil()) {
+    *ipaddr = default_local_ipv4_address_;
+    return true;
+  } else if (family == AF_INET6 && !default_local_ipv6_address_.IsNil()) {
+    Network* ipv6_network = GetNetworkFromAddress(default_local_ipv6_address_);
+    if (ipv6_network) {
+      // If the default ipv6 network's BestIP is different than
+      // default_local_ipv6_address_, use it instead.
+      // This is to prevent potential IP address leakage. See WebRTC bug 5376.
+      *ipaddr = ipv6_network->GetBestIP();
+    } else {
+      *ipaddr = default_local_ipv6_address_;
+    }
+    return true;
+  }
+  return false;
+}
+
+Network* NetworkManagerBase::GetNetworkFromAddress(
+    const rtc::IPAddress& ip) const {
+  for (Network* network : networks_) {
+    const auto& ips = network->GetIPs();
+    if (std::find_if(ips.begin(), ips.end(),
+                     [ip](const InterfaceAddress& existing_ip) {
+                       return ip == static_cast<rtc::IPAddress>(existing_ip);
+                     }) != ips.end()) {
+      return network;
+    }
+  }
+  return nullptr;
+}
+
+BasicNetworkManager::BasicNetworkManager()
+    : thread_(nullptr),
+      sent_first_update_(false),
+      start_count_(0),
+      ignore_non_default_routes_(false) {}
+
+BasicNetworkManager::~BasicNetworkManager() {
+}
+
+void BasicNetworkManager::OnNetworksChanged() {
+  RTC_LOG(LS_INFO) << "Network change was observed";
+  UpdateNetworksOnce();
+}
+
+#if defined(__native_client__)
+
+bool BasicNetworkManager::CreateNetworks(bool include_ignored,
+                                         NetworkList* networks) const {
+  RTC_NOTREACHED();
+  RTC_LOG(LS_WARNING) << "BasicNetworkManager doesn't work on NaCl yet";
+  return false;
+}
+
+#elif defined(WEBRTC_POSIX)
+void BasicNetworkManager::ConvertIfAddrs(struct ifaddrs* interfaces,
+                                         IfAddrsConverter* ifaddrs_converter,
+                                         bool include_ignored,
+                                         NetworkList* networks) const {
+  NetworkMap current_networks;
+
+  for (struct ifaddrs* cursor = interfaces; cursor != nullptr;
+       cursor = cursor->ifa_next) {
+    IPAddress prefix;
+    IPAddress mask;
+    InterfaceAddress ip;
+    int scope_id = 0;
+
+    // Some interfaces may not have address assigned.
+    if (!cursor->ifa_addr || !cursor->ifa_netmask) {
+      continue;
+    }
+    // Skip ones which are down.
+    if (!(cursor->ifa_flags & IFF_RUNNING)) {
+      continue;
+    }
+    // Skip unknown family.
+    if (cursor->ifa_addr->sa_family != AF_INET &&
+        cursor->ifa_addr->sa_family != AF_INET6) {
+      continue;
+    }
+    // Skip IPv6 if not enabled.
+    if (cursor->ifa_addr->sa_family == AF_INET6 && !ipv6_enabled()) {
+      continue;
+    }
+    // Convert to InterfaceAddress.
+    if (!ifaddrs_converter->ConvertIfAddrsToIPAddress(cursor, &ip, &mask)) {
+      continue;
+    }
+
+    // Special case for IPv6 address.
+    if (cursor->ifa_addr->sa_family == AF_INET6) {
+      if (IsIgnoredIPv6(ip)) {
+        continue;
+      }
+      scope_id =
+          reinterpret_cast<sockaddr_in6*>(cursor->ifa_addr)->sin6_scope_id;
+    }
+
+    AdapterType adapter_type = ADAPTER_TYPE_UNKNOWN;
+    if (cursor->ifa_flags & IFF_LOOPBACK) {
+      adapter_type = ADAPTER_TYPE_LOOPBACK;
+    } else {
+      // If there is a network_monitor, use it to get the adapter type.
+      // Otherwise, get the adapter type based on a few name matching rules.
+      if (network_monitor_) {
+        adapter_type = network_monitor_->GetAdapterType(cursor->ifa_name);
+      }
+      if (adapter_type == ADAPTER_TYPE_UNKNOWN) {
+        adapter_type = GetAdapterTypeFromName(cursor->ifa_name);
+      }
+    }
+    int prefix_length = CountIPMaskBits(mask);
+    prefix = TruncateIP(ip, prefix_length);
+    std::string key = MakeNetworkKey(std::string(cursor->ifa_name),
+                                     prefix, prefix_length);
+    auto iter = current_networks.find(key);
+    if (iter == current_networks.end()) {
+      // TODO(phoglund): Need to recognize other types as well.
+      std::unique_ptr<Network> network(
+          new Network(cursor->ifa_name, cursor->ifa_name, prefix, prefix_length,
+                      adapter_type));
+      network->set_default_local_address_provider(this);
+      network->set_scope_id(scope_id);
+      network->AddIP(ip);
+      network->set_ignored(IsIgnoredNetwork(*network));
+      if (include_ignored || !network->ignored()) {
+        current_networks[key] = network.get();
+        networks->push_back(network.release());
+      }
+    } else {
+      Network* existing_network = iter->second;
+      existing_network->AddIP(ip);
+      if (adapter_type != ADAPTER_TYPE_UNKNOWN) {
+        existing_network->set_type(adapter_type);
+      }
+    }
+  }
+}
+
+bool BasicNetworkManager::CreateNetworks(bool include_ignored,
+                                         NetworkList* networks) const {
+  struct ifaddrs* interfaces;
+  int error = getifaddrs(&interfaces);
+  if (error != 0) {
+    RTC_LOG_ERR(LERROR) << "getifaddrs failed to gather interface data: "
+                        << error;
+    return false;
+  }
+
+  std::unique_ptr<IfAddrsConverter> ifaddrs_converter(CreateIfAddrsConverter());
+  ConvertIfAddrs(interfaces, ifaddrs_converter.get(), include_ignored,
+                 networks);
+
+  freeifaddrs(interfaces);
+  return true;
+}
+
+#elif defined(WEBRTC_WIN)
+
+unsigned int GetPrefix(PIP_ADAPTER_PREFIX prefixlist,
+              const IPAddress& ip, IPAddress* prefix) {
+  IPAddress current_prefix;
+  IPAddress best_prefix;
+  unsigned int best_length = 0;
+  while (prefixlist) {
+    // Look for the longest matching prefix in the prefixlist.
+    if (prefixlist->Address.lpSockaddr == nullptr ||
+        prefixlist->Address.lpSockaddr->sa_family != ip.family()) {
+      prefixlist = prefixlist->Next;
+      continue;
+    }
+    switch (prefixlist->Address.lpSockaddr->sa_family) {
+      case AF_INET: {
+        sockaddr_in* v4_addr =
+            reinterpret_cast<sockaddr_in*>(prefixlist->Address.lpSockaddr);
+        current_prefix = IPAddress(v4_addr->sin_addr);
+        break;
+      }
+      case AF_INET6: {
+          sockaddr_in6* v6_addr =
+              reinterpret_cast<sockaddr_in6*>(prefixlist->Address.lpSockaddr);
+          current_prefix = IPAddress(v6_addr->sin6_addr);
+          break;
+      }
+      default: {
+        prefixlist = prefixlist->Next;
+        continue;
+      }
+    }
+    if (TruncateIP(ip, prefixlist->PrefixLength) == current_prefix &&
+        prefixlist->PrefixLength > best_length) {
+      best_prefix = current_prefix;
+      best_length = prefixlist->PrefixLength;
+    }
+    prefixlist = prefixlist->Next;
+  }
+  *prefix = best_prefix;
+  return best_length;
+}
+
+bool BasicNetworkManager::CreateNetworks(bool include_ignored,
+                                         NetworkList* networks) const {
+  NetworkMap current_networks;
+  // MSDN recommends a 15KB buffer for the first try at GetAdaptersAddresses.
+  size_t buffer_size = 16384;
+  std::unique_ptr<char[]> adapter_info(new char[buffer_size]);
+  PIP_ADAPTER_ADDRESSES adapter_addrs =
+      reinterpret_cast<PIP_ADAPTER_ADDRESSES>(adapter_info.get());
+  int adapter_flags = (GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_ANYCAST |
+                       GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_INCLUDE_PREFIX);
+  int ret = 0;
+  do {
+    adapter_info.reset(new char[buffer_size]);
+    adapter_addrs = reinterpret_cast<PIP_ADAPTER_ADDRESSES>(adapter_info.get());
+    ret = GetAdaptersAddresses(AF_UNSPEC, adapter_flags,
+                               0, adapter_addrs,
+                               reinterpret_cast<PULONG>(&buffer_size));
+  } while (ret == ERROR_BUFFER_OVERFLOW);
+  if (ret != ERROR_SUCCESS) {
+    return false;
+  }
+  int count = 0;
+  while (adapter_addrs) {
+    if (adapter_addrs->OperStatus == IfOperStatusUp) {
+      PIP_ADAPTER_UNICAST_ADDRESS address = adapter_addrs->FirstUnicastAddress;
+      PIP_ADAPTER_PREFIX prefixlist = adapter_addrs->FirstPrefix;
+      std::string name;
+      std::string description;
+#if !defined(NDEBUG)
+      name = ToUtf8(adapter_addrs->FriendlyName,
+                    wcslen(adapter_addrs->FriendlyName));
+#endif
+      description = ToUtf8(adapter_addrs->Description,
+                           wcslen(adapter_addrs->Description));
+      for (; address; address = address->Next) {
+#if defined(NDEBUG)
+        name = rtc::ToString(count);
+#endif
+
+        IPAddress ip;
+        int scope_id = 0;
+        std::unique_ptr<Network> network;
+        switch (address->Address.lpSockaddr->sa_family) {
+          case AF_INET: {
+            sockaddr_in* v4_addr =
+                reinterpret_cast<sockaddr_in*>(address->Address.lpSockaddr);
+            ip = IPAddress(v4_addr->sin_addr);
+            break;
+          }
+          case AF_INET6: {
+            if (ipv6_enabled()) {
+              sockaddr_in6* v6_addr =
+                  reinterpret_cast<sockaddr_in6*>(address->Address.lpSockaddr);
+              scope_id = v6_addr->sin6_scope_id;
+              ip = IPAddress(v6_addr->sin6_addr);
+
+              if (IsIgnoredIPv6(ip)) {
+                continue;
+              }
+
+              break;
+            } else {
+              continue;
+            }
+          }
+          default: {
+            continue;
+          }
+        }
+
+        IPAddress prefix;
+        int prefix_length = GetPrefix(prefixlist, ip, &prefix);
+        std::string key = MakeNetworkKey(name, prefix, prefix_length);
+        auto existing_network = current_networks.find(key);
+        if (existing_network == current_networks.end()) {
+          AdapterType adapter_type = ADAPTER_TYPE_UNKNOWN;
+          if (adapter_addrs->IfType == IF_TYPE_SOFTWARE_LOOPBACK) {
+            // TODO(phoglund): Need to recognize other types as well.
+            adapter_type = ADAPTER_TYPE_LOOPBACK;
+          }
+          std::unique_ptr<Network> network(new Network(
+              name, description, prefix, prefix_length, adapter_type));
+          network->set_default_local_address_provider(this);
+          network->set_scope_id(scope_id);
+          network->AddIP(ip);
+          bool ignored = IsIgnoredNetwork(*network);
+          network->set_ignored(ignored);
+          if (include_ignored || !network->ignored()) {
+            current_networks[key] = network.get();
+            networks->push_back(network.release());
+          }
+        } else {
+          (*existing_network).second->AddIP(ip);
+        }
+      }
+      // Count is per-adapter - all 'Networks' created from the same
+      // adapter need to have the same name.
+      ++count;
+    }
+    adapter_addrs = adapter_addrs->Next;
+  }
+  return true;
+}
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_LINUX)
+bool IsDefaultRoute(const std::string& network_name) {
+  FileStream fs;
+  if (!fs.Open("/proc/net/route", "r", nullptr)) {
+    RTC_LOG(LS_WARNING)
+        << "Couldn't read /proc/net/route, skipping default "
+        << "route check (assuming everything is a default route).";
+    return true;
+  } else {
+    std::string line;
+    while (fs.ReadLine(&line) == SR_SUCCESS) {
+      char iface_name[256];
+      unsigned int iface_ip, iface_gw, iface_mask, iface_flags;
+      if (sscanf(line.c_str(),
+                 "%255s %8X %8X %4X %*d %*u %*d %8X",
+                 iface_name, &iface_ip, &iface_gw,
+                 &iface_flags, &iface_mask) == 5 &&
+          network_name == iface_name &&
+          iface_mask == 0 &&
+          (iface_flags & (RTF_UP | RTF_HOST)) == RTF_UP) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+#endif
+
+bool BasicNetworkManager::IsIgnoredNetwork(const Network& network) const {
+  // Ignore networks on the explicit ignore list.
+  for (const std::string& ignored_name : network_ignore_list_) {
+    if (network.name() == ignored_name) {
+      return true;
+    }
+  }
+
+#if defined(WEBRTC_POSIX)
+  // Filter out VMware/VirtualBox interfaces, typically named vmnet1,
+  // vmnet8, or vboxnet0.
+  if (strncmp(network.name().c_str(), "vmnet", 5) == 0 ||
+      strncmp(network.name().c_str(), "vnic", 4) == 0 ||
+      strncmp(network.name().c_str(), "vboxnet", 7) == 0) {
+    return true;
+  }
+#if defined(WEBRTC_LINUX)
+  // Make sure this is a default route, if we're ignoring non-defaults.
+  if (ignore_non_default_routes_ && !IsDefaultRoute(network.name())) {
+    return true;
+  }
+#endif
+#elif defined(WEBRTC_WIN)
+  // Ignore any HOST side vmware adapters with a description like:
+  // VMware Virtual Ethernet Adapter for VMnet1
+  // but don't ignore any GUEST side adapters with a description like:
+  // VMware Accelerated AMD PCNet Adapter #2
+  if (strstr(network.description().c_str(), "VMnet") != nullptr) {
+    return true;
+  }
+#endif
+
+  // Ignore any networks with a 0.x.y.z IP
+  if (network.prefix().family() == AF_INET) {
+    return (network.prefix().v4AddressAsHostOrderInteger() < 0x01000000);
+  }
+
+  return false;
+}
+
+void BasicNetworkManager::StartUpdating() {
+  thread_ = Thread::Current();
+  if (start_count_) {
+    // If network interfaces are already discovered and signal is sent,
+    // we should trigger network signal immediately for the new clients
+    // to start allocating ports.
+    if (sent_first_update_)
+      thread_->Post(RTC_FROM_HERE, this, kSignalNetworksMessage);
+  } else {
+    thread_->Post(RTC_FROM_HERE, this, kUpdateNetworksMessage);
+    StartNetworkMonitor();
+  }
+  ++start_count_;
+}
+
+void BasicNetworkManager::StopUpdating() {
+  RTC_DCHECK(Thread::Current() == thread_);
+  if (!start_count_)
+    return;
+
+  --start_count_;
+  if (!start_count_) {
+    thread_->Clear(this);
+    sent_first_update_ = false;
+    StopNetworkMonitor();
+  }
+}
+
+void BasicNetworkManager::StartNetworkMonitor() {
+  NetworkMonitorFactory* factory = NetworkMonitorFactory::GetFactory();
+  if (factory == nullptr) {
+    return;
+  }
+  if (!network_monitor_) {
+    network_monitor_.reset(factory->CreateNetworkMonitor());
+    if (!network_monitor_) {
+      return;
+    }
+    network_monitor_->SignalNetworksChanged.connect(
+        this, &BasicNetworkManager::OnNetworksChanged);
+  }
+  network_monitor_->Start();
+}
+
+void BasicNetworkManager::StopNetworkMonitor() {
+  if (!network_monitor_) {
+    return;
+  }
+  network_monitor_->Stop();
+}
+
+void BasicNetworkManager::OnMessage(Message* msg) {
+  switch (msg->message_id) {
+    case kUpdateNetworksMessage: {
+      UpdateNetworksContinually();
+      break;
+    }
+    case kSignalNetworksMessage:  {
+      SignalNetworksChanged();
+      break;
+    }
+    default:
+      RTC_NOTREACHED();
+  }
+}
+
+IPAddress BasicNetworkManager::QueryDefaultLocalAddress(int family) const {
+  RTC_DCHECK(thread_ == Thread::Current());
+  RTC_DCHECK(thread_->socketserver() != nullptr);
+  RTC_DCHECK(family == AF_INET || family == AF_INET6);
+
+  std::unique_ptr<AsyncSocket> socket(
+      thread_->socketserver()->CreateAsyncSocket(family, SOCK_DGRAM));
+  if (!socket) {
+    RTC_LOG_ERR(LERROR) << "Socket creation failed";
+    return IPAddress();
+  }
+
+  if (socket->Connect(SocketAddress(
+          family == AF_INET ? kPublicIPv4Host : kPublicIPv6Host, kPublicPort)) <
+      0) {
+    if (socket->GetError() != ENETUNREACH
+        && socket->GetError() != EHOSTUNREACH) {
+      // Ignore the expected case of "host/net unreachable" - which happens if
+      // the network is V4- or V6-only.
+      RTC_LOG(LS_INFO) << "Connect failed with " << socket->GetError();
+    }
+    return IPAddress();
+  }
+  return socket->GetLocalAddress().ipaddr();
+}
+
+void BasicNetworkManager::UpdateNetworksOnce() {
+  if (!start_count_)
+    return;
+
+  RTC_DCHECK(Thread::Current() == thread_);
+
+  NetworkList list;
+  if (!CreateNetworks(false, &list)) {
+    SignalError();
+  } else {
+    bool changed;
+    NetworkManager::Stats stats;
+    MergeNetworkList(list, &changed, &stats);
+    set_default_local_addresses(QueryDefaultLocalAddress(AF_INET),
+                                QueryDefaultLocalAddress(AF_INET6));
+    if (changed || !sent_first_update_) {
+      SignalNetworksChanged();
+      sent_first_update_ = true;
+    }
+  }
+}
+
+void BasicNetworkManager::UpdateNetworksContinually() {
+  UpdateNetworksOnce();
+  thread_->PostDelayed(RTC_FROM_HERE, kNetworksUpdateIntervalMs, this,
+                       kUpdateNetworksMessage);
+}
+
+void BasicNetworkManager::DumpNetworks() {
+  NetworkList list;
+  GetNetworks(&list);
+  RTC_LOG(LS_INFO) << "NetworkManager detected " << list.size() << " networks:";
+  for (const Network* network : list) {
+    RTC_LOG(LS_INFO) << network->ToString() << ": " << network->description()
+                     << ", active ? " << network->active()
+                     << ((network->ignored()) ? ", Ignored" : "");
+  }
+}
+
+Network::Network(const std::string& name,
+                 const std::string& desc,
+                 const IPAddress& prefix,
+                 int prefix_length)
+    : name_(name),
+      description_(desc),
+      prefix_(prefix),
+      prefix_length_(prefix_length),
+      key_(MakeNetworkKey(name, prefix, prefix_length)),
+      scope_id_(0),
+      ignored_(false),
+      type_(ADAPTER_TYPE_UNKNOWN),
+      preference_(0) {}
+
+Network::Network(const std::string& name,
+                 const std::string& desc,
+                 const IPAddress& prefix,
+                 int prefix_length,
+                 AdapterType type)
+    : name_(name),
+      description_(desc),
+      prefix_(prefix),
+      prefix_length_(prefix_length),
+      key_(MakeNetworkKey(name, prefix, prefix_length)),
+      scope_id_(0),
+      ignored_(false),
+      type_(type),
+      preference_(0) {}
+
+Network::Network(const Network&) = default;
+
+Network::~Network() = default;
+
+// Sets the addresses of this network. Returns true if the address set changed.
+// Change detection is short circuited if the changed argument is true.
+bool Network::SetIPs(const std::vector<InterfaceAddress>& ips, bool changed) {
+  // Detect changes with a nested loop; n-squared but we expect on the order
+  // of 2-3 addresses per network.
+  changed = changed || ips.size() != ips_.size();
+  if (!changed) {
+    for (const InterfaceAddress& ip : ips) {
+      if (std::find(ips_.begin(), ips_.end(), ip) == ips_.end()) {
+        changed = true;
+        break;
+      }
+    }
+  }
+
+  ips_ = ips;
+  return changed;
+}
+
+// Select the best IP address to use from this Network.
+IPAddress Network::GetBestIP() const {
+  if (ips_.size() == 0) {
+    return IPAddress();
+  }
+
+  if (prefix_.family() == AF_INET) {
+    return static_cast<IPAddress>(ips_.at(0));
+  }
+
+  InterfaceAddress selected_ip, ula_ip;
+
+  for (const InterfaceAddress& ip : ips_) {
+    // Ignore any address which has been deprecated already.
+    if (ip.ipv6_flags() & IPV6_ADDRESS_FLAG_DEPRECATED)
+      continue;
+
+    // ULA address should only be returned when we have no other
+    // global IP.
+    if (IPIsULA(static_cast<const IPAddress&>(ip))) {
+      ula_ip = ip;
+      continue;
+    }
+    selected_ip = ip;
+
+    // Search could stop once a temporary non-deprecated one is found.
+    if (ip.ipv6_flags() & IPV6_ADDRESS_FLAG_TEMPORARY)
+      break;
+  }
+
+  // No proper global IPv6 address found, use ULA instead.
+  if (IPIsUnspec(selected_ip) && !IPIsUnspec(ula_ip)) {
+    selected_ip = ula_ip;
+  }
+
+  return static_cast<IPAddress>(selected_ip);
+}
+
+std::string Network::ToString() const {
+  std::stringstream ss;
+  // Print out the first space-terminated token of the network desc, plus
+  // the IP address.
+  ss << "Net[" << description_.substr(0, description_.find(' '))
+     << ":" << prefix_.ToSensitiveString() << "/" << prefix_length_
+     << ":" << AdapterTypeToString(type_) << "]";
+  return ss.str();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/network.h b/rtc_base/network.h
new file mode 100644
index 0000000..49934cc
--- /dev/null
+++ b/rtc_base/network.h
@@ -0,0 +1,432 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NETWORK_H_
+#define RTC_BASE_NETWORK_H_
+
+#include <stdint.h>
+
+#include <deque>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "rtc_base/ipaddress.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/networkmonitor.h"
+#include "rtc_base/sigslot.h"
+
+#if defined(WEBRTC_POSIX)
+struct ifaddrs;
+#endif  // defined(WEBRTC_POSIX)
+
+namespace rtc {
+
+extern const char kPublicIPv4Host[];
+extern const char kPublicIPv6Host[];
+
+class IfAddrsConverter;
+class Network;
+class NetworkMonitorInterface;
+class Thread;
+
+// By default, ignore loopback interfaces on the host.
+const int kDefaultNetworkIgnoreMask = ADAPTER_TYPE_LOOPBACK;
+
+// Makes a string key for this network. Used in the network manager's maps.
+// Network objects are keyed on interface name, network prefix and the
+// length of that prefix.
+std::string MakeNetworkKey(const std::string& name, const IPAddress& prefix,
+                           int prefix_length);
+
+// Utility function that attempts to determine an adapter type by an interface
+// name (e.g., "wlan0"). Can be used by NetworkManager subclasses when other
+// mechanisms fail to determine the type.
+AdapterType GetAdapterTypeFromName(const char* network_name);
+
+class DefaultLocalAddressProvider {
+ public:
+  virtual ~DefaultLocalAddressProvider() = default;
+  // The default local address is the local address used in multi-homed endpoint
+  // when the any address (0.0.0.0 or ::) is used as the local address. It's
+  // important to check the return value as a IP family may not be enabled.
+  virtual bool GetDefaultLocalAddress(int family, IPAddress* ipaddr) const = 0;
+};
+
+// Generic network manager interface. It provides list of local
+// networks.
+//
+// Every method of NetworkManager (including the destructor) must be called on
+// the same thread, except for the constructor which may be called on any
+// thread.
+//
+// This allows constructing a NetworkManager subclass on one thread and
+// passing it into an object that uses it on a different thread.
+class NetworkManager : public DefaultLocalAddressProvider {
+ public:
+  typedef std::vector<Network*> NetworkList;
+
+  // This enum indicates whether adapter enumeration is allowed.
+  enum EnumerationPermission {
+    ENUMERATION_ALLOWED,  // Adapter enumeration is allowed. Getting 0 network
+                          // from GetNetworks means that there is no network
+                          // available.
+    ENUMERATION_BLOCKED,  // Adapter enumeration is disabled.
+                          // GetAnyAddressNetworks() should be used instead.
+  };
+
+  NetworkManager();
+  ~NetworkManager() override;
+
+  // Called when network list is updated.
+  sigslot::signal0<> SignalNetworksChanged;
+
+  // Indicates a failure when getting list of network interfaces.
+  sigslot::signal0<> SignalError;
+
+  // This should be called on the NetworkManager's thread before the
+  // NetworkManager is used. Subclasses may override this if necessary.
+  virtual void Initialize() {}
+
+  // Start/Stop monitoring of network interfaces
+  // list. SignalNetworksChanged or SignalError is emitted immediately
+  // after StartUpdating() is called. After that SignalNetworksChanged
+  // is emitted whenever list of networks changes.
+  virtual void StartUpdating() = 0;
+  virtual void StopUpdating() = 0;
+
+  // Returns the current list of networks available on this machine.
+  // StartUpdating() must be called before this method is called.
+  // It makes sure that repeated calls return the same object for a
+  // given network, so that quality is tracked appropriately. Does not
+  // include ignored networks.
+  virtual void GetNetworks(NetworkList* networks) const = 0;
+
+  // return the current permission state of GetNetworks()
+  virtual EnumerationPermission enumeration_permission() const;
+
+  // "AnyAddressNetwork" is a network which only contains single "any address"
+  // IP address.  (i.e. INADDR_ANY for IPv4 or in6addr_any for IPv6). This is
+  // useful as binding to such interfaces allow default routing behavior like
+  // http traffic.
+  //
+  // This method appends the "any address" networks to the list, such that this
+  // can optionally be called after GetNetworks.
+  //
+  // TODO(guoweis): remove this body when chromium implements this.
+  virtual void GetAnyAddressNetworks(NetworkList* networks) {}
+
+  // Dumps the current list of networks in the network manager.
+  virtual void DumpNetworks() {}
+  bool GetDefaultLocalAddress(int family, IPAddress* ipaddr) const override;
+
+  struct Stats {
+    int ipv4_network_count;
+    int ipv6_network_count;
+    Stats() {
+      ipv4_network_count = 0;
+      ipv6_network_count = 0;
+    }
+  };
+};
+
+// Base class for NetworkManager implementations.
+class NetworkManagerBase : public NetworkManager {
+ public:
+  NetworkManagerBase();
+  ~NetworkManagerBase() override;
+
+  void GetNetworks(NetworkList* networks) const override;
+  void GetAnyAddressNetworks(NetworkList* networks) override;
+
+  // Defaults to true.
+  // TODO(deadbeef): Remove this. Nothing but tests use this; IPv6 is enabled
+  // by default everywhere else.
+  bool ipv6_enabled() const { return ipv6_enabled_; }
+  void set_ipv6_enabled(bool enabled) { ipv6_enabled_ = enabled; }
+
+  EnumerationPermission enumeration_permission() const override;
+
+  bool GetDefaultLocalAddress(int family, IPAddress* ipaddr) const override;
+
+ protected:
+  typedef std::map<std::string, Network*> NetworkMap;
+  // Updates |networks_| with the networks listed in |list|. If
+  // |network_map_| already has a Network object for a network listed
+  // in the |list| then it is reused. Accept ownership of the Network
+  // objects in the |list|. |changed| will be set to true if there is
+  // any change in the network list.
+  void MergeNetworkList(const NetworkList& list, bool* changed);
+
+  // |stats| will be populated even if |*changed| is false.
+  void MergeNetworkList(const NetworkList& list,
+                        bool* changed,
+                        NetworkManager::Stats* stats);
+
+  void set_enumeration_permission(EnumerationPermission state) {
+    enumeration_permission_ = state;
+  }
+
+  void set_default_local_addresses(const IPAddress& ipv4,
+                                   const IPAddress& ipv6);
+
+ private:
+  friend class NetworkTest;
+
+  Network* GetNetworkFromAddress(const rtc::IPAddress& ip) const;
+
+  EnumerationPermission enumeration_permission_;
+
+  NetworkList networks_;
+
+  NetworkMap networks_map_;
+  bool ipv6_enabled_;
+
+  std::unique_ptr<rtc::Network> ipv4_any_address_network_;
+  std::unique_ptr<rtc::Network> ipv6_any_address_network_;
+
+  IPAddress default_local_ipv4_address_;
+  IPAddress default_local_ipv6_address_;
+  // We use 16 bits to save the bandwidth consumption when sending the network
+  // id over the Internet. It is OK that the 16-bit integer overflows to get a
+  // network id 0 because we only compare the network ids in the old and the new
+  // best connections in the transport channel.
+  uint16_t next_available_network_id_ = 1;
+};
+
+// Basic implementation of the NetworkManager interface that gets list
+// of networks using OS APIs.
+class BasicNetworkManager : public NetworkManagerBase,
+                            public MessageHandler,
+                            public sigslot::has_slots<> {
+ public:
+  BasicNetworkManager();
+  ~BasicNetworkManager() override;
+
+  void StartUpdating() override;
+  void StopUpdating() override;
+
+  void DumpNetworks() override;
+
+  // MessageHandler interface.
+  void OnMessage(Message* msg) override;
+  bool started() { return start_count_ > 0; }
+
+  // Sets the network ignore list, which is empty by default. Any network on the
+  // ignore list will be filtered from network enumeration results.
+  void set_network_ignore_list(const std::vector<std::string>& list) {
+    network_ignore_list_ = list;
+  }
+
+#if defined(WEBRTC_LINUX)
+  // Sets the flag for ignoring non-default routes.
+  // Defaults to false.
+  void set_ignore_non_default_routes(bool value) {
+    ignore_non_default_routes_ = value;
+  }
+#endif
+
+ protected:
+#if defined(WEBRTC_POSIX)
+  // Separated from CreateNetworks for tests.
+  void ConvertIfAddrs(ifaddrs* interfaces,
+                      IfAddrsConverter* converter,
+                      bool include_ignored,
+                      NetworkList* networks) const;
+#endif  // defined(WEBRTC_POSIX)
+
+  // Creates a network object for each network available on the machine.
+  bool CreateNetworks(bool include_ignored, NetworkList* networks) const;
+
+  // Determines if a network should be ignored. This should only be determined
+  // based on the network's property instead of any individual IP.
+  bool IsIgnoredNetwork(const Network& network) const;
+
+  // This function connects a UDP socket to a public address and returns the
+  // local address associated it. Since it binds to the "any" address
+  // internally, it returns the default local address on a multi-homed endpoint.
+  IPAddress QueryDefaultLocalAddress(int family) const;
+
+ private:
+  friend class NetworkTest;
+
+  // Creates a network monitor and listens for network updates.
+  void StartNetworkMonitor();
+  // Stops and removes the network monitor.
+  void StopNetworkMonitor();
+  // Called when it receives updates from the network monitor.
+  void OnNetworksChanged();
+
+  // Updates the networks and reschedules the next update.
+  void UpdateNetworksContinually();
+  // Only updates the networks; does not reschedule the next update.
+  void UpdateNetworksOnce();
+
+  Thread* thread_;
+  bool sent_first_update_;
+  int start_count_;
+  std::vector<std::string> network_ignore_list_;
+  bool ignore_non_default_routes_;
+  std::unique_ptr<NetworkMonitorInterface> network_monitor_;
+};
+
+// Represents a Unix-type network interface, with a name and single address.
+class Network {
+ public:
+  Network(const std::string& name,
+          const std::string& description,
+          const IPAddress& prefix,
+          int prefix_length);
+
+  Network(const std::string& name,
+          const std::string& description,
+          const IPAddress& prefix,
+          int prefix_length,
+          AdapterType type);
+  Network(const Network&);
+  ~Network();
+
+  sigslot::signal1<const Network*> SignalTypeChanged;
+
+  const DefaultLocalAddressProvider* default_local_address_provider() {
+    return default_local_address_provider_;
+  }
+  void set_default_local_address_provider(
+      const DefaultLocalAddressProvider* provider) {
+    default_local_address_provider_ = provider;
+  }
+
+  // Returns the name of the interface this network is associated wtih.
+  const std::string& name() const { return name_; }
+
+  // Returns the OS-assigned name for this network. This is useful for
+  // debugging but should not be sent over the wire (for privacy reasons).
+  const std::string& description() const { return description_; }
+
+  // Returns the prefix for this network.
+  const IPAddress& prefix() const { return prefix_; }
+  // Returns the length, in bits, of this network's prefix.
+  int prefix_length() const { return prefix_length_; }
+
+  // |key_| has unique value per network interface. Used in sorting network
+  // interfaces. Key is derived from interface name and it's prefix.
+  std::string key() const { return key_; }
+
+  // Returns the Network's current idea of the 'best' IP it has.
+  // Or return an unset IP if this network has no active addresses.
+  // Here is the rule on how we mark the IPv6 address as ignorable for WebRTC.
+  // 1) return all global temporary dynamic and non-deprecrated ones.
+  // 2) if #1 not available, return global ones.
+  // 3) if #2 not available, use ULA ipv6 as last resort. (ULA stands
+  // for unique local address, which is not route-able in open
+  // internet but might be useful for a close WebRTC deployment.
+
+  // TODO(guoweis): rule #3 actually won't happen at current
+  // implementation. The reason being that ULA address starting with
+  // 0xfc 0r 0xfd will be grouped into its own Network. The result of
+  // that is WebRTC will have one extra Network to generate candidates
+  // but the lack of rule #3 shouldn't prevent turning on IPv6 since
+  // ULA should only be tried in a close deployment anyway.
+
+  // Note that when not specifying any flag, it's treated as case global
+  // IPv6 address
+  IPAddress GetBestIP() const;
+
+  // Keep the original function here for now.
+  // TODO(guoweis): Remove this when all callers are migrated to GetBestIP().
+  IPAddress ip() const { return GetBestIP(); }
+
+  // Adds an active IP address to this network. Does not check for duplicates.
+  void AddIP(const InterfaceAddress& ip) { ips_.push_back(ip); }
+
+  // Sets the network's IP address list. Returns true if new IP addresses were
+  // detected. Passing true to already_changed skips this check.
+  bool SetIPs(const std::vector<InterfaceAddress>& ips, bool already_changed);
+  // Get the list of IP Addresses associated with this network.
+  const std::vector<InterfaceAddress>& GetIPs() const { return ips_;}
+  // Clear the network's list of addresses.
+  void ClearIPs() { ips_.clear(); }
+
+  // Returns the scope-id of the network's address.
+  // Should only be relevant for link-local IPv6 addresses.
+  int scope_id() const { return scope_id_; }
+  void set_scope_id(int id) { scope_id_ = id; }
+
+  // Indicates whether this network should be ignored, perhaps because
+  // the IP is 0, or the interface is one we know is invalid.
+  bool ignored() const { return ignored_; }
+  void set_ignored(bool ignored) { ignored_ = ignored; }
+
+  AdapterType type() const { return type_; }
+  void set_type(AdapterType type) {
+    if (type_ == type) {
+      return;
+    }
+    type_ = type;
+    SignalTypeChanged(this);
+  }
+
+  uint16_t GetCost() const {
+    switch (type_) {
+      case rtc::ADAPTER_TYPE_ETHERNET:
+      case rtc::ADAPTER_TYPE_LOOPBACK:
+        return kNetworkCostMin;
+      case rtc::ADAPTER_TYPE_WIFI:
+      case rtc::ADAPTER_TYPE_VPN:
+        return kNetworkCostLow;
+      case rtc::ADAPTER_TYPE_CELLULAR:
+        return kNetworkCostHigh;
+      default:
+        return kNetworkCostUnknown;
+    }
+  }
+  // A unique id assigned by the network manager, which may be signaled
+  // to the remote side in the candidate.
+  uint16_t id() const { return id_; }
+  void set_id(uint16_t id) { id_ = id; }
+
+  int preference() const { return preference_; }
+  void set_preference(int preference) { preference_ = preference; }
+
+  // When we enumerate networks and find a previously-seen network is missing,
+  // we do not remove it (because it may be used elsewhere). Instead, we mark
+  // it inactive, so that we can detect network changes properly.
+  bool active() const { return active_; }
+  void set_active(bool active) {
+    if (active_ != active) {
+      active_ = active;
+    }
+  }
+
+  // Debugging description of this network
+  std::string ToString() const;
+
+ private:
+  const DefaultLocalAddressProvider* default_local_address_provider_ = nullptr;
+  std::string name_;
+  std::string description_;
+  IPAddress prefix_;
+  int prefix_length_;
+  std::string key_;
+  std::vector<InterfaceAddress> ips_;
+  int scope_id_;
+  bool ignored_;
+  AdapterType type_;
+  int preference_;
+  bool active_ = true;
+  uint16_t id_ = 0;
+
+  friend class NetworkManager;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NETWORK_H_
diff --git a/rtc_base/network_constants.h b/rtc_base/network_constants.h
new file mode 100644
index 0000000..b4c8bea
--- /dev/null
+++ b/rtc_base/network_constants.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NETWORK_CONSTANTS_H_
+#define RTC_BASE_NETWORK_CONSTANTS_H_
+
+#include <stdint.h>
+
+namespace rtc {
+
+static const uint16_t kNetworkCostMax = 999;
+static const uint16_t kNetworkCostHigh = 900;
+static const uint16_t kNetworkCostUnknown = 50;
+static const uint16_t kNetworkCostLow = 10;
+static const uint16_t kNetworkCostMin = 0;
+
+enum AdapterType {
+  // This enum resembles the one in Chromium net::ConnectionType.
+  ADAPTER_TYPE_UNKNOWN = 0,
+  ADAPTER_TYPE_ETHERNET = 1 << 0,
+  ADAPTER_TYPE_WIFI = 1 << 1,
+  ADAPTER_TYPE_CELLULAR = 1 << 2,
+  ADAPTER_TYPE_VPN = 1 << 3,
+  ADAPTER_TYPE_LOOPBACK = 1 << 4
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NETWORK_CONSTANTS_H_
diff --git a/rtc_base/network_unittest.cc b/rtc_base/network_unittest.cc
new file mode 100644
index 0000000..bf09c24
--- /dev/null
+++ b/rtc_base/network_unittest.cc
@@ -0,0 +1,1150 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/network.h"
+
+#include <stdlib.h>
+
+#include <memory>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/networkmonitor.h"
+#if defined(WEBRTC_POSIX)
+#include <sys/types.h>
+#include <net/if.h>
+#include "rtc_base/ifaddrs_converter.h"
+#endif  // defined(WEBRTC_POSIX)
+#include "rtc_base/gunit.h"
+#if defined(WEBRTC_WIN)
+#include "rtc_base/logging.h"  // For RTC_LOG_GLE
+#endif
+
+namespace rtc {
+
+namespace {
+
+class FakeNetworkMonitor : public NetworkMonitorBase {
+ public:
+  void Start() override { started_ = true; }
+  void Stop() override { started_ = false; }
+  bool started() { return started_; }
+  AdapterType GetAdapterType(const std::string& if_name) override {
+    // Note that the name matching rules are different from the
+    // GetAdapterTypeFromName in NetworkManager.
+    if (if_name.find("wifi") == 0) {
+      return ADAPTER_TYPE_WIFI;
+    }
+    if (if_name.find("cellular") == 0) {
+      return ADAPTER_TYPE_CELLULAR;
+    }
+    return ADAPTER_TYPE_UNKNOWN;
+  }
+
+ private:
+  bool started_ = false;
+};
+
+class FakeNetworkMonitorFactory : public NetworkMonitorFactory {
+ public:
+  FakeNetworkMonitorFactory() {}
+  NetworkMonitorInterface* CreateNetworkMonitor() override {
+    return new FakeNetworkMonitor();
+  }
+};
+
+}  // namespace
+
+class NetworkTest : public testing::Test, public sigslot::has_slots<>  {
+ public:
+  NetworkTest() : callback_called_(false) {}
+
+  void OnNetworksChanged() {
+    callback_called_ = true;
+  }
+
+  NetworkManager::Stats MergeNetworkList(
+      BasicNetworkManager& network_manager,
+      const NetworkManager::NetworkList& list,
+      bool* changed) {
+    NetworkManager::Stats stats;
+    network_manager.MergeNetworkList(list, changed, &stats);
+    return stats;
+  }
+
+  bool IsIgnoredNetwork(BasicNetworkManager& network_manager,
+                        const Network& network) {
+    return network_manager.IsIgnoredNetwork(network);
+  }
+
+  NetworkManager::NetworkList GetNetworks(
+      const BasicNetworkManager& network_manager, bool include_ignored) {
+    NetworkManager::NetworkList list;
+    network_manager.CreateNetworks(include_ignored, &list);
+    return list;
+  }
+
+  FakeNetworkMonitor* GetNetworkMonitor(BasicNetworkManager& network_manager) {
+    return static_cast<FakeNetworkMonitor*>(
+        network_manager.network_monitor_.get());
+  }
+  void ClearNetworks(BasicNetworkManager& network_manager) {
+    for (const auto& kv : network_manager.networks_map_) {
+      delete kv.second;
+    }
+    network_manager.networks_.clear();
+    network_manager.networks_map_.clear();
+  }
+
+  AdapterType GetAdapterType(BasicNetworkManager& network_manager) {
+    BasicNetworkManager::NetworkList list;
+    network_manager.GetNetworks(&list);
+    RTC_CHECK_EQ(1, list.size());
+    return list[0]->type();
+  }
+
+#if defined(WEBRTC_POSIX)
+  // Separated from CreateNetworks for tests.
+  static void CallConvertIfAddrs(const BasicNetworkManager& network_manager,
+                                 struct ifaddrs* interfaces,
+                                 bool include_ignored,
+                                 NetworkManager::NetworkList* networks) {
+    // Use the base IfAddrsConverter for test cases.
+    std::unique_ptr<IfAddrsConverter> ifaddrs_converter(new IfAddrsConverter());
+    network_manager.ConvertIfAddrs(interfaces, ifaddrs_converter.get(),
+                                   include_ignored, networks);
+  }
+
+  struct sockaddr_in6* CreateIpv6Addr(const std::string& ip_string,
+                                      uint32_t scope_id) {
+    struct sockaddr_in6* ipv6_addr = static_cast<struct sockaddr_in6*>(
+        malloc(sizeof(struct sockaddr_in6)));
+    memset(ipv6_addr, 0, sizeof(struct sockaddr_in6));
+    ipv6_addr->sin6_family = AF_INET6;
+    ipv6_addr->sin6_scope_id = scope_id;
+    IPAddress ip;
+    IPFromString(ip_string, &ip);
+    ipv6_addr->sin6_addr = ip.ipv6_address();
+    return ipv6_addr;
+  }
+
+  // Pointers created here need to be released via ReleaseIfAddrs.
+  struct ifaddrs* AddIpv6Address(struct ifaddrs* list,
+                                 char* if_name,
+                                 const std::string& ipv6_address,
+                                 const std::string& ipv6_netmask,
+                                 uint32_t scope_id) {
+    struct ifaddrs* if_addr = new struct ifaddrs;
+    memset(if_addr, 0, sizeof(struct ifaddrs));
+    if_addr->ifa_name = if_name;
+    if_addr->ifa_addr = reinterpret_cast<struct sockaddr*>(
+        CreateIpv6Addr(ipv6_address, scope_id));
+    if_addr->ifa_netmask =
+        reinterpret_cast<struct sockaddr*>(CreateIpv6Addr(ipv6_netmask, 0));
+    if_addr->ifa_next = list;
+    if_addr->ifa_flags = IFF_RUNNING;
+    return if_addr;
+  }
+
+  struct ifaddrs* InstallIpv6Network(char* if_name,
+                                     const std::string& ipv6_address,
+                                     const std::string& ipv6_mask,
+                                     BasicNetworkManager& network_manager) {
+    ifaddrs* addr_list = nullptr;
+    addr_list = AddIpv6Address(addr_list, if_name, ipv6_address, ipv6_mask, 0);
+    NetworkManager::NetworkList result;
+    bool changed;
+    NetworkManager::Stats stats;
+    CallConvertIfAddrs(network_manager, addr_list, true, &result);
+    network_manager.MergeNetworkList(result, &changed, &stats);
+    return addr_list;
+  }
+
+  void ReleaseIfAddrs(struct ifaddrs* list) {
+    struct ifaddrs* if_addr = list;
+    while (if_addr != nullptr) {
+      struct ifaddrs* next_addr = if_addr->ifa_next;
+      free(if_addr->ifa_addr);
+      free(if_addr->ifa_netmask);
+      delete if_addr;
+      if_addr = next_addr;
+    }
+  }
+#endif  // defined(WEBRTC_POSIX)
+
+ protected:
+  bool callback_called_;
+};
+
+class TestBasicNetworkManager : public BasicNetworkManager {
+ public:
+  using BasicNetworkManager::QueryDefaultLocalAddress;
+  using BasicNetworkManager::set_default_local_addresses;
+};
+
+// Test that the Network ctor works properly.
+TEST_F(NetworkTest, TestNetworkConstruct) {
+  Network ipv4_network1("test_eth0", "Test Network Adapter 1",
+                        IPAddress(0x12345600U), 24);
+  EXPECT_EQ("test_eth0", ipv4_network1.name());
+  EXPECT_EQ("Test Network Adapter 1", ipv4_network1.description());
+  EXPECT_EQ(IPAddress(0x12345600U), ipv4_network1.prefix());
+  EXPECT_EQ(24, ipv4_network1.prefix_length());
+  EXPECT_FALSE(ipv4_network1.ignored());
+}
+
+TEST_F(NetworkTest, TestIsIgnoredNetworkIgnoresIPsStartingWith0) {
+  Network ipv4_network1("test_eth0", "Test Network Adapter 1",
+                        IPAddress(0x12345600U), 24, ADAPTER_TYPE_ETHERNET);
+  Network ipv4_network2("test_eth1", "Test Network Adapter 2",
+                        IPAddress(0x010000U), 24, ADAPTER_TYPE_ETHERNET);
+  BasicNetworkManager network_manager;
+  EXPECT_FALSE(IsIgnoredNetwork(network_manager, ipv4_network1));
+  EXPECT_TRUE(IsIgnoredNetwork(network_manager, ipv4_network2));
+}
+
+// TODO(phoglund): Remove when ignore list goes away.
+TEST_F(NetworkTest, TestIgnoreList) {
+  Network ignore_me("ignore_me", "Ignore me please!",
+                    IPAddress(0x12345600U), 24);
+  Network include_me("include_me", "Include me please!",
+                     IPAddress(0x12345600U), 24);
+  BasicNetworkManager network_manager;
+  EXPECT_FALSE(IsIgnoredNetwork(network_manager, ignore_me));
+  EXPECT_FALSE(IsIgnoredNetwork(network_manager, include_me));
+  std::vector<std::string> ignore_list;
+  ignore_list.push_back("ignore_me");
+  network_manager.set_network_ignore_list(ignore_list);
+  EXPECT_TRUE(IsIgnoredNetwork(network_manager, ignore_me));
+  EXPECT_FALSE(IsIgnoredNetwork(network_manager, include_me));
+}
+
+// Test is failing on Windows opt: b/11288214
+TEST_F(NetworkTest, DISABLED_TestCreateNetworks) {
+  BasicNetworkManager manager;
+  NetworkManager::NetworkList result = GetNetworks(manager, true);
+  // We should be able to bind to any addresses we find.
+  NetworkManager::NetworkList::iterator it;
+  for (it = result.begin();
+       it != result.end();
+       ++it) {
+    sockaddr_storage storage;
+    memset(&storage, 0, sizeof(storage));
+    IPAddress ip = (*it)->GetBestIP();
+    SocketAddress bindaddress(ip, 0);
+    bindaddress.SetScopeID((*it)->scope_id());
+    // TODO(thaloun): Use rtc::AsyncSocket once it supports IPv6.
+    int fd = static_cast<int>(socket(ip.family(), SOCK_STREAM, IPPROTO_TCP));
+    if (fd > 0) {
+      size_t ipsize = bindaddress.ToSockAddrStorage(&storage);
+      EXPECT_GE(ipsize, 0U);
+      int success = ::bind(fd,
+                           reinterpret_cast<sockaddr*>(&storage),
+                           static_cast<int>(ipsize));
+#if defined(WEBRTC_WIN)
+      if (success)
+        RTC_LOG_GLE(LS_ERROR) << "Socket bind failed.";
+#endif
+      EXPECT_EQ(0, success);
+#if defined(WEBRTC_WIN)
+      closesocket(fd);
+#else
+      close(fd);
+#endif
+    }
+    delete (*it);
+  }
+}
+
+// Test StartUpdating() and StopUpdating(). network_permission_state starts with
+// ALLOWED.
+TEST_F(NetworkTest, TestUpdateNetworks) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  EXPECT_EQ(NetworkManager::ENUMERATION_ALLOWED,
+            manager.enumeration_permission());
+  manager.StartUpdating();
+  Thread::Current()->ProcessMessages(0);
+  EXPECT_TRUE(callback_called_);
+  callback_called_ = false;
+  // Callback should be triggered immediately when StartUpdating
+  // is called, after network update signal is already sent.
+  manager.StartUpdating();
+  EXPECT_TRUE(manager.started());
+  Thread::Current()->ProcessMessages(0);
+  EXPECT_TRUE(callback_called_);
+  manager.StopUpdating();
+  EXPECT_TRUE(manager.started());
+  manager.StopUpdating();
+  EXPECT_EQ(NetworkManager::ENUMERATION_ALLOWED,
+            manager.enumeration_permission());
+  EXPECT_FALSE(manager.started());
+  manager.StopUpdating();
+  EXPECT_FALSE(manager.started());
+  callback_called_ = false;
+  // Callback should be triggered immediately after StartUpdating is called
+  // when start_count_ is reset to 0.
+  manager.StartUpdating();
+  Thread::Current()->ProcessMessages(0);
+  EXPECT_TRUE(callback_called_);
+}
+
+// Verify that MergeNetworkList() merges network lists properly.
+TEST_F(NetworkTest, TestBasicMergeNetworkList) {
+  Network ipv4_network1("test_eth0", "Test Network Adapter 1",
+                        IPAddress(0x12345600U), 24);
+  Network ipv4_network2("test_eth1", "Test Network Adapter 2",
+                        IPAddress(0x00010000U), 16);
+  ipv4_network1.AddIP(IPAddress(0x12345678));
+  ipv4_network2.AddIP(IPAddress(0x00010004));
+  BasicNetworkManager manager;
+
+  // Add ipv4_network1 to the list of networks.
+  NetworkManager::NetworkList list;
+  list.push_back(new Network(ipv4_network1));
+  bool changed;
+  NetworkManager::Stats stats = MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+  EXPECT_EQ(stats.ipv6_network_count, 0);
+  EXPECT_EQ(stats.ipv4_network_count, 1);
+  list.clear();
+
+  manager.GetNetworks(&list);
+  EXPECT_EQ(1U, list.size());
+  EXPECT_EQ(ipv4_network1.ToString(), list[0]->ToString());
+  Network* net1 = list[0];
+  uint16_t net_id1 = net1->id();
+  EXPECT_EQ(1, net_id1);
+  list.clear();
+
+  // Replace ipv4_network1 with ipv4_network2.
+  list.push_back(new Network(ipv4_network2));
+  stats = MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+  EXPECT_EQ(stats.ipv6_network_count, 0);
+  EXPECT_EQ(stats.ipv4_network_count, 1);
+  list.clear();
+
+  manager.GetNetworks(&list);
+  EXPECT_EQ(1U, list.size());
+  EXPECT_EQ(ipv4_network2.ToString(), list[0]->ToString());
+  Network* net2 = list[0];
+  uint16_t net_id2 = net2->id();
+  // Network id will increase.
+  EXPECT_LT(net_id1, net_id2);
+  list.clear();
+
+  // Add Network2 back.
+  list.push_back(new Network(ipv4_network1));
+  list.push_back(new Network(ipv4_network2));
+  stats = MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+  EXPECT_EQ(stats.ipv6_network_count, 0);
+  EXPECT_EQ(stats.ipv4_network_count, 2);
+  list.clear();
+
+  // Verify that we get previous instances of Network objects.
+  manager.GetNetworks(&list);
+  EXPECT_EQ(2U, list.size());
+  EXPECT_TRUE((net1 == list[0] && net2 == list[1]) ||
+              (net1 == list[1] && net2 == list[0]));
+  EXPECT_TRUE((net_id1 == list[0]->id() && net_id2 == list[1]->id()) ||
+              (net_id1 == list[1]->id() && net_id2 == list[0]->id()));
+  list.clear();
+
+  // Call MergeNetworkList() again and verify that we don't get update
+  // notification.
+  list.push_back(new Network(ipv4_network2));
+  list.push_back(new Network(ipv4_network1));
+  stats = MergeNetworkList(manager, list, &changed);
+  EXPECT_FALSE(changed);
+  EXPECT_EQ(stats.ipv6_network_count, 0);
+  EXPECT_EQ(stats.ipv4_network_count, 2);
+  list.clear();
+
+  // Verify that we get previous instances of Network objects.
+  manager.GetNetworks(&list);
+  EXPECT_EQ(2U, list.size());
+  EXPECT_TRUE((net1 == list[0] && net2 == list[1]) ||
+              (net1 == list[1] && net2 == list[0]));
+  EXPECT_TRUE((net_id1 == list[0]->id() && net_id2 == list[1]->id()) ||
+              (net_id1 == list[1]->id() && net_id2 == list[0]->id()));
+  list.clear();
+}
+
+// Sets up some test IPv6 networks and appends them to list.
+// Four networks are added - public and link local, for two interfaces.
+void SetupNetworks(NetworkManager::NetworkList* list) {
+  IPAddress ip;
+  IPAddress prefix;
+  EXPECT_TRUE(IPFromString("abcd::1234:5678:abcd:ef12", &ip));
+  EXPECT_TRUE(IPFromString("abcd::", &prefix));
+  // First, fake link-locals.
+  Network ipv6_eth0_linklocalnetwork("test_eth0", "Test NetworkAdapter 1",
+                                     prefix, 64);
+  ipv6_eth0_linklocalnetwork.AddIP(ip);
+  EXPECT_TRUE(IPFromString("abcd::5678:abcd:ef12:3456", &ip));
+  Network ipv6_eth1_linklocalnetwork("test_eth1", "Test NetworkAdapter 2",
+                                     prefix, 64);
+  ipv6_eth1_linklocalnetwork.AddIP(ip);
+  // Public networks:
+  EXPECT_TRUE(IPFromString("2401:fa00:4:1000:be30:5bff:fee5:c3", &ip));
+  prefix = TruncateIP(ip, 64);
+  Network ipv6_eth0_publicnetwork1_ip1("test_eth0", "Test NetworkAdapter 1",
+                                       prefix, 64);
+  ipv6_eth0_publicnetwork1_ip1.AddIP(ip);
+  EXPECT_TRUE(IPFromString("2400:4030:1:2c00:be30:abcd:efab:cdef", &ip));
+  prefix = TruncateIP(ip, 64);
+  Network ipv6_eth1_publicnetwork1_ip1("test_eth1", "Test NetworkAdapter 1",
+                                       prefix, 64);
+  ipv6_eth1_publicnetwork1_ip1.AddIP(ip);
+  list->push_back(new Network(ipv6_eth0_linklocalnetwork));
+  list->push_back(new Network(ipv6_eth1_linklocalnetwork));
+  list->push_back(new Network(ipv6_eth0_publicnetwork1_ip1));
+  list->push_back(new Network(ipv6_eth1_publicnetwork1_ip1));
+}
+
+// Test that the basic network merging case works.
+TEST_F(NetworkTest, TestIPv6MergeNetworkList) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  NetworkManager::NetworkList original_list;
+  SetupNetworks(&original_list);
+  bool changed = false;
+  NetworkManager::Stats stats =
+      MergeNetworkList(manager, original_list, &changed);
+  EXPECT_TRUE(changed);
+  EXPECT_EQ(stats.ipv6_network_count, 4);
+  EXPECT_EQ(stats.ipv4_network_count, 0);
+  NetworkManager::NetworkList list;
+  manager.GetNetworks(&list);
+  EXPECT_EQ(original_list.size(), list.size());
+  // Verify that the original members are in the merged list.
+  for (NetworkManager::NetworkList::iterator it = original_list.begin();
+       it != original_list.end(); ++it) {
+    EXPECT_NE(list.end(), std::find(list.begin(), list.end(), *it));
+  }
+}
+
+// Tests that when two network lists that describe the same set of networks are
+// merged, that the changed callback is not called, and that the original
+// objects remain in the result list.
+TEST_F(NetworkTest, TestNoChangeMerge) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  NetworkManager::NetworkList original_list;
+  SetupNetworks(&original_list);
+  bool changed = false;
+  MergeNetworkList(manager, original_list, &changed);
+  EXPECT_TRUE(changed);
+  // Second list that describes the same networks but with new objects.
+  NetworkManager::NetworkList second_list;
+  SetupNetworks(&second_list);
+  changed = false;
+  MergeNetworkList(manager, second_list, &changed);
+  EXPECT_FALSE(changed);
+  NetworkManager::NetworkList resulting_list;
+  manager.GetNetworks(&resulting_list);
+  EXPECT_EQ(original_list.size(), resulting_list.size());
+  // Verify that the original members are in the merged list.
+  for (NetworkManager::NetworkList::iterator it = original_list.begin();
+       it != original_list.end(); ++it) {
+    EXPECT_NE(resulting_list.end(),
+              std::find(resulting_list.begin(), resulting_list.end(), *it));
+  }
+  // Doublecheck that the new networks aren't in the list.
+  for (NetworkManager::NetworkList::iterator it = second_list.begin();
+       it != second_list.end(); ++it) {
+    EXPECT_EQ(resulting_list.end(),
+              std::find(resulting_list.begin(), resulting_list.end(), *it));
+  }
+}
+
+// Test that we can merge a network that is the same as another network but with
+// a different IP. The original network should remain in the list, but have its
+// IP changed.
+TEST_F(NetworkTest, MergeWithChangedIP) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  NetworkManager::NetworkList original_list;
+  SetupNetworks(&original_list);
+  // Make a network that we're going to change.
+  IPAddress ip;
+  EXPECT_TRUE(IPFromString("2401:fa01:4:1000:be30:faa:fee:faa", &ip));
+  IPAddress prefix = TruncateIP(ip, 64);
+  Network* network_to_change = new Network("test_eth0",
+                                          "Test Network Adapter 1",
+                                          prefix, 64);
+  Network* changed_network = new Network(*network_to_change);
+  network_to_change->AddIP(ip);
+  IPAddress changed_ip;
+  EXPECT_TRUE(IPFromString("2401:fa01:4:1000:be30:f00:f00:f00", &changed_ip));
+  changed_network->AddIP(changed_ip);
+  original_list.push_back(network_to_change);
+  bool changed = false;
+  MergeNetworkList(manager, original_list, &changed);
+  NetworkManager::NetworkList second_list;
+  SetupNetworks(&second_list);
+  second_list.push_back(changed_network);
+  changed = false;
+  MergeNetworkList(manager, second_list, &changed);
+  EXPECT_TRUE(changed);
+  NetworkManager::NetworkList list;
+  manager.GetNetworks(&list);
+  EXPECT_EQ(original_list.size(), list.size());
+  // Make sure the original network is still in the merged list.
+  EXPECT_NE(list.end(),
+            std::find(list.begin(), list.end(), network_to_change));
+  EXPECT_EQ(changed_ip, network_to_change->GetIPs().at(0));
+}
+
+// Testing a similar case to above, but checking that a network can be updated
+// with additional IPs (not just a replacement).
+TEST_F(NetworkTest, TestMultipleIPMergeNetworkList) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  NetworkManager::NetworkList original_list;
+  SetupNetworks(&original_list);
+  bool changed = false;
+  MergeNetworkList(manager, original_list, &changed);
+  EXPECT_TRUE(changed);
+  IPAddress ip;
+  IPAddress check_ip;
+  IPAddress prefix;
+  // Add a second IP to the public network on eth0 (2401:fa00:4:1000/64).
+  EXPECT_TRUE(IPFromString("2401:fa00:4:1000:be30:5bff:fee5:c6", &ip));
+  prefix = TruncateIP(ip, 64);
+  Network ipv6_eth0_publicnetwork1_ip2("test_eth0", "Test NetworkAdapter 1",
+                                       prefix, 64);
+  // This is the IP that already existed in the public network on eth0.
+  EXPECT_TRUE(IPFromString("2401:fa00:4:1000:be30:5bff:fee5:c3", &check_ip));
+  ipv6_eth0_publicnetwork1_ip2.AddIP(ip);
+  original_list.push_back(new Network(ipv6_eth0_publicnetwork1_ip2));
+  changed = false;
+  MergeNetworkList(manager, original_list, &changed);
+  EXPECT_TRUE(changed);
+  // There should still be four networks.
+  NetworkManager::NetworkList list;
+  manager.GetNetworks(&list);
+  EXPECT_EQ(4U, list.size());
+  // Check the gathered IPs.
+  int matchcount = 0;
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    if ((*it)->ToString() == original_list[2]->ToString()) {
+      ++matchcount;
+      EXPECT_EQ(1, matchcount);
+      // This should be the same network object as before.
+      EXPECT_EQ((*it), original_list[2]);
+      // But with two addresses now.
+      EXPECT_EQ(2U, (*it)->GetIPs().size());
+      EXPECT_NE((*it)->GetIPs().end(),
+                std::find((*it)->GetIPs().begin(),
+                          (*it)->GetIPs().end(),
+                          check_ip));
+      EXPECT_NE((*it)->GetIPs().end(),
+                std::find((*it)->GetIPs().begin(),
+                          (*it)->GetIPs().end(),
+                          ip));
+    } else {
+      // Check the IP didn't get added anywhere it wasn't supposed to.
+      EXPECT_EQ((*it)->GetIPs().end(),
+                std::find((*it)->GetIPs().begin(),
+                          (*it)->GetIPs().end(),
+                          ip));
+    }
+  }
+}
+
+// Test that merge correctly distinguishes multiple networks on an interface.
+TEST_F(NetworkTest, TestMultiplePublicNetworksOnOneInterfaceMerge) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(
+      static_cast<NetworkTest*>(this), &NetworkTest::OnNetworksChanged);
+  NetworkManager::NetworkList original_list;
+  SetupNetworks(&original_list);
+  bool changed = false;
+  MergeNetworkList(manager, original_list, &changed);
+  EXPECT_TRUE(changed);
+  IPAddress ip;
+  IPAddress prefix;
+  // A second network for eth0.
+  EXPECT_TRUE(IPFromString("2400:4030:1:2c00:be30:5bff:fee5:c3", &ip));
+  prefix = TruncateIP(ip, 64);
+  Network ipv6_eth0_publicnetwork2_ip1("test_eth0", "Test NetworkAdapter 1",
+                                       prefix, 64);
+  ipv6_eth0_publicnetwork2_ip1.AddIP(ip);
+  original_list.push_back(new Network(ipv6_eth0_publicnetwork2_ip1));
+  changed = false;
+  MergeNetworkList(manager, original_list, &changed);
+  EXPECT_TRUE(changed);
+  // There should be five networks now.
+  NetworkManager::NetworkList list;
+  manager.GetNetworks(&list);
+  EXPECT_EQ(5U, list.size());
+  // Check the resulting addresses.
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    if ((*it)->prefix() == ipv6_eth0_publicnetwork2_ip1.prefix() &&
+        (*it)->name() == ipv6_eth0_publicnetwork2_ip1.name()) {
+      // Check the new network has 1 IP and that it's the correct one.
+      EXPECT_EQ(1U, (*it)->GetIPs().size());
+      EXPECT_EQ(ip, (*it)->GetIPs().at(0));
+    } else {
+      // Check the IP didn't get added anywhere it wasn't supposed to.
+      EXPECT_EQ((*it)->GetIPs().end(),
+                std::find((*it)->GetIPs().begin(),
+                          (*it)->GetIPs().end(),
+                          ip));
+    }
+  }
+}
+
+// Test that DumpNetworks does not crash.
+TEST_F(NetworkTest, TestCreateAndDumpNetworks) {
+  BasicNetworkManager manager;
+  NetworkManager::NetworkList list = GetNetworks(manager, true);
+  bool changed;
+  MergeNetworkList(manager, list, &changed);
+  manager.DumpNetworks();
+}
+
+// Test that we can toggle IPv6 on and off.
+// Crashes on Linux. See webrtc:4923.
+#if defined(WEBRTC_LINUX)
+#define MAYBE_TestIPv6Toggle DISABLED_TestIPv6Toggle
+#else
+#define MAYBE_TestIPv6Toggle TestIPv6Toggle
+#endif
+TEST_F(NetworkTest, MAYBE_TestIPv6Toggle) {
+  BasicNetworkManager manager;
+  bool ipv6_found = false;
+  NetworkManager::NetworkList list;
+#if !defined(WEBRTC_WIN)
+  // There should be at least one IPv6 network (fe80::/64 should be in there).
+  // TODO(thaloun): Disabling this test on windows for the moment as the test
+  // machines don't seem to have IPv6 installed on them at all.
+  manager.set_ipv6_enabled(true);
+  list = GetNetworks(manager, true);
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    if ((*it)->prefix().family() == AF_INET6) {
+      ipv6_found = true;
+      break;
+    }
+  }
+  EXPECT_TRUE(ipv6_found);
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    delete (*it);
+  }
+#endif
+  ipv6_found = false;
+  manager.set_ipv6_enabled(false);
+  list = GetNetworks(manager, true);
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    if ((*it)->prefix().family() == AF_INET6) {
+      ipv6_found = true;
+      break;
+    }
+  }
+  EXPECT_FALSE(ipv6_found);
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    delete (*it);
+  }
+}
+
+// Test that when network interfaces are sorted and given preference values,
+// IPv6 comes first.
+TEST_F(NetworkTest, IPv6NetworksPreferredOverIPv4) {
+  BasicNetworkManager manager;
+  Network ipv4_network1("test_eth0", "Test Network Adapter 1",
+                        IPAddress(0x12345600U), 24);
+  ipv4_network1.AddIP(IPAddress(0x12345600U));
+
+  IPAddress ip;
+  IPAddress prefix;
+  EXPECT_TRUE(IPFromString("2400:4030:1:2c00:be30:abcd:efab:cdef", &ip));
+  prefix = TruncateIP(ip, 64);
+  Network ipv6_eth1_publicnetwork1_ip1("test_eth1", "Test NetworkAdapter 2",
+                                       prefix, 64);
+  ipv6_eth1_publicnetwork1_ip1.AddIP(ip);
+
+  NetworkManager::NetworkList list;
+  list.push_back(new Network(ipv4_network1));
+  list.push_back(new Network(ipv6_eth1_publicnetwork1_ip1));
+  Network* net1 = list[0];
+  Network* net2 = list[1];
+
+  bool changed = false;
+  MergeNetworkList(manager, list, &changed);
+  ASSERT_TRUE(changed);
+  // After sorting IPv6 network should be higher order than IPv4 networks.
+  EXPECT_TRUE(net1->preference() < net2->preference());
+}
+
+// When two interfaces are equivalent in everything but name, they're expected
+// to be preference-ordered by name. For example, "eth0" before "eth1".
+TEST_F(NetworkTest, NetworksSortedByInterfaceName) {
+  BasicNetworkManager manager;
+  Network* eth0 = new Network("test_eth0", "Test Network Adapter 1",
+                              IPAddress(0x65432100U), 24);
+  eth0->AddIP(IPAddress(0x65432100U));
+  Network* eth1 = new Network("test_eth1", "Test Network Adapter 2",
+                              IPAddress(0x12345600U), 24);
+  eth1->AddIP(IPAddress(0x12345600U));
+  NetworkManager::NetworkList list;
+  // Add them to the list in the opposite of the expected sorted order, to
+  // ensure sorting actually occurs.
+  list.push_back(eth1);
+  list.push_back(eth0);
+
+  bool changed = false;
+  MergeNetworkList(manager, list, &changed);
+  ASSERT_TRUE(changed);
+  // "test_eth0" should be preferred over "test_eth1".
+  EXPECT_TRUE(eth0->preference() > eth1->preference());
+}
+
+TEST_F(NetworkTest, TestNetworkAdapterTypes) {
+  Network wifi("wlan0", "Wireless Adapter", IPAddress(0x12345600U), 24,
+               ADAPTER_TYPE_WIFI);
+  EXPECT_EQ(ADAPTER_TYPE_WIFI, wifi.type());
+  Network ethernet("eth0", "Ethernet", IPAddress(0x12345600U), 24,
+                   ADAPTER_TYPE_ETHERNET);
+  EXPECT_EQ(ADAPTER_TYPE_ETHERNET, ethernet.type());
+  Network cellular("test_cell", "Cellular Adapter", IPAddress(0x12345600U), 24,
+                   ADAPTER_TYPE_CELLULAR);
+  EXPECT_EQ(ADAPTER_TYPE_CELLULAR, cellular.type());
+  Network vpn("bridge_test", "VPN Adapter", IPAddress(0x12345600U), 24,
+              ADAPTER_TYPE_VPN);
+  EXPECT_EQ(ADAPTER_TYPE_VPN, vpn.type());
+  Network unknown("test", "Test Adapter", IPAddress(0x12345600U), 24,
+                  ADAPTER_TYPE_UNKNOWN);
+  EXPECT_EQ(ADAPTER_TYPE_UNKNOWN, unknown.type());
+}
+
+#if defined(WEBRTC_POSIX)
+// Verify that we correctly handle interfaces with no address.
+TEST_F(NetworkTest, TestConvertIfAddrsNoAddress) {
+  ifaddrs list;
+  memset(&list, 0, sizeof(list));
+  list.ifa_name = const_cast<char*>("test_iface");
+
+  NetworkManager::NetworkList result;
+  BasicNetworkManager manager;
+  CallConvertIfAddrs(manager, &list, true, &result);
+  EXPECT_TRUE(result.empty());
+}
+
+// Verify that if there are two addresses on one interface, only one network
+// is generated.
+TEST_F(NetworkTest, TestConvertIfAddrsMultiAddressesOnOneInterface) {
+  char if_name[20] = "rmnet0";
+  ifaddrs* list = nullptr;
+  list = AddIpv6Address(list, if_name, "1000:2000:3000:4000:0:0:0:1",
+                        "FFFF:FFFF:FFFF:FFFF::", 0);
+  list = AddIpv6Address(list, if_name, "1000:2000:3000:4000:0:0:0:2",
+                        "FFFF:FFFF:FFFF:FFFF::", 0);
+  NetworkManager::NetworkList result;
+  BasicNetworkManager manager;
+  CallConvertIfAddrs(manager, list, true, &result);
+  EXPECT_EQ(1U, result.size());
+  bool changed;
+  // This ensures we release the objects created in CallConvertIfAddrs.
+  MergeNetworkList(manager, result, &changed);
+  ReleaseIfAddrs(list);
+}
+
+TEST_F(NetworkTest, TestConvertIfAddrsNotRunning) {
+  ifaddrs list;
+  memset(&list, 0, sizeof(list));
+  list.ifa_name = const_cast<char*>("test_iface");
+  sockaddr ifa_addr;
+  sockaddr ifa_netmask;
+  list.ifa_addr = &ifa_addr;
+  list.ifa_netmask = &ifa_netmask;
+
+  NetworkManager::NetworkList result;
+  BasicNetworkManager manager;
+  CallConvertIfAddrs(manager, &list, true, &result);
+  EXPECT_TRUE(result.empty());
+}
+
+// Tests that the network type can be updated after the network monitor is
+// started.
+TEST_F(NetworkTest, TestGetAdapterTypeFromNetworkMonitor) {
+  char if_name1[20] = "wifi0";
+  std::string ipv6_address1 = "1000:2000:3000:4000:0:0:0:1";
+  std::string ipv6_address2 = "1000:2000:3000:8000:0:0:0:1";
+  std::string ipv6_mask = "FFFF:FFFF:FFFF:FFFF::";
+  BasicNetworkManager manager;
+  // A network created before the network monitor is started will get
+  // UNKNOWN type.
+  ifaddrs* addr_list =
+      InstallIpv6Network(if_name1, ipv6_address1, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_UNKNOWN, GetAdapterType(manager));
+  ReleaseIfAddrs(addr_list);
+  // Note: Do not call ClearNetworks here in order to test that the type
+  // of an existing network can be changed after the network monitor starts
+  // and detects the network type correctly.
+
+  // After the network monitor starts, the type will be updated.
+  FakeNetworkMonitorFactory* factory = new FakeNetworkMonitorFactory();
+  NetworkMonitorFactory::SetFactory(factory);
+  // This brings up the hook with the network monitor.
+  manager.StartUpdating();
+  // Add the same ipv6 address as before but it has the right network type
+  // detected by the network monitor now.
+  addr_list = InstallIpv6Network(if_name1, ipv6_address1, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_WIFI, GetAdapterType(manager));
+  ReleaseIfAddrs(addr_list);
+  ClearNetworks(manager);
+
+  // Add another network with the type inferred from the network monitor.
+  char if_name2[20] = "cellular0";
+  addr_list = InstallIpv6Network(if_name2, ipv6_address2, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_CELLULAR, GetAdapterType(manager));
+  ReleaseIfAddrs(addr_list);
+  ClearNetworks(manager);
+}
+
+// Test that the network type can be determined based on name matching in
+// a few cases. Note that UNKNOWN type for non-matching strings has been tested
+// in the above test.
+TEST_F(NetworkTest, TestGetAdapterTypeFromNameMatching) {
+  std::string ipv6_address1 = "1000:2000:3000:4000:0:0:0:1";
+  std::string ipv6_address2 = "1000:2000:3000:8000:0:0:0:1";
+  std::string ipv6_mask = "FFFF:FFFF:FFFF:FFFF::";
+  BasicNetworkManager manager;
+
+  // IPSec interface; name is in form "ipsec<index>".
+  char if_name[20] = "ipsec11";
+  ifaddrs* addr_list =
+      InstallIpv6Network(if_name, ipv6_address1, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_VPN, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+
+#if defined(WEBRTC_IOS)
+  strcpy(if_name, "pdp_ip0");
+  addr_list = InstallIpv6Network(if_name, ipv6_address1, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_CELLULAR, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+
+  strcpy(if_name, "en0");
+  addr_list = InstallIpv6Network(if_name, ipv6_address1, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_WIFI, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+
+#elif defined(WEBRTC_ANDROID)
+  strcpy(if_name, "rmnet0");
+  addr_list = InstallIpv6Network(if_name, ipv6_address1, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_CELLULAR, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+
+  strcpy(if_name, "wlan1");
+  addr_list = InstallIpv6Network(if_name, ipv6_address2, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_WIFI, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+
+  strcpy(if_name, "v4-rmnet_data0");
+  addr_list = InstallIpv6Network(if_name, ipv6_address2, ipv6_mask, manager);
+  EXPECT_EQ(ADAPTER_TYPE_CELLULAR, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+#else
+  // TODO(deadbeef): If not iOS or Android, "wlan0" should be treated as
+  // "unknown"? Why? This should be fixed if there's no good reason.
+  strcpy(if_name, "wlan0");
+  addr_list = InstallIpv6Network(if_name, ipv6_address1, ipv6_mask, manager);
+
+  EXPECT_EQ(ADAPTER_TYPE_UNKNOWN, GetAdapterType(manager));
+  ClearNetworks(manager);
+  ReleaseIfAddrs(addr_list);
+#endif
+}
+#endif  // defined(WEBRTC_POSIX)
+
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+// If you want to test non-default routes, you can do the following on a linux
+// machine:
+// 1) Load the dummy network driver:
+// sudo modprobe dummy
+// sudo ifconfig dummy0 127.0.0.1
+// 2) Run this test and confirm the output says it found a dummy route (and
+// passes).
+// 3) When done:
+// sudo rmmmod dummy
+TEST_F(NetworkTest, TestIgnoreNonDefaultRoutes) {
+  BasicNetworkManager manager;
+  NetworkManager::NetworkList list;
+  list = GetNetworks(manager, false);
+  bool found_dummy = false;
+  RTC_LOG(LS_INFO) << "Looking for dummy network: ";
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    RTC_LOG(LS_INFO) << "  Network name: " << (*it)->name();
+    found_dummy |= (*it)->name().find("dummy0") != std::string::npos;
+  }
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    delete (*it);
+  }
+  if (!found_dummy) {
+    RTC_LOG(LS_INFO) << "No dummy found, quitting.";
+    return;
+  }
+  RTC_LOG(LS_INFO) << "Found dummy, running again while ignoring non-default "
+                   << "routes.";
+  manager.set_ignore_non_default_routes(true);
+  list = GetNetworks(manager, false);
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    RTC_LOG(LS_INFO) << "  Network name: " << (*it)->name();
+    EXPECT_TRUE((*it)->name().find("dummy0") == std::string::npos);
+  }
+  for (NetworkManager::NetworkList::iterator it = list.begin();
+       it != list.end(); ++it) {
+    delete (*it);
+  }
+}
+#endif
+
+// Test MergeNetworkList successfully combines all IPs for the same
+// prefix/length into a single Network.
+TEST_F(NetworkTest, TestMergeNetworkList) {
+  BasicNetworkManager manager;
+  NetworkManager::NetworkList list;
+
+  // Create 2 IPAddress classes with only last digit different.
+  IPAddress ip1, ip2;
+  EXPECT_TRUE(IPFromString("2400:4030:1:2c00:be30:0:0:1", &ip1));
+  EXPECT_TRUE(IPFromString("2400:4030:1:2c00:be30:0:0:2", &ip2));
+
+  // Create 2 networks with the same prefix and length.
+  Network* net1 = new Network("em1", "em1", TruncateIP(ip1, 64), 64);
+  Network* net2 = new Network("em1", "em1", TruncateIP(ip1, 64), 64);
+
+  // Add different IP into each.
+  net1->AddIP(ip1);
+  net2->AddIP(ip2);
+
+  list.push_back(net1);
+  list.push_back(net2);
+  bool changed;
+  MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+
+  NetworkManager::NetworkList list2;
+  manager.GetNetworks(&list2);
+
+  // Make sure the resulted networklist has only 1 element and 2
+  // IPAddresses.
+  EXPECT_EQ(list2.size(), 1uL);
+  EXPECT_EQ(list2[0]->GetIPs().size(), 2uL);
+  EXPECT_EQ(list2[0]->GetIPs()[0], ip1);
+  EXPECT_EQ(list2[0]->GetIPs()[1], ip2);
+}
+
+// Test that MergeNetworkList successfully detects the change if
+// a network becomes inactive and then active again.
+TEST_F(NetworkTest, TestMergeNetworkListWithInactiveNetworks) {
+  BasicNetworkManager manager;
+  Network network1("test_wifi", "Test Network Adapter 1",
+                   IPAddress(0x12345600U), 24);
+  Network network2("test_eth0", "Test Network Adapter 2",
+                   IPAddress(0x00010000U), 16);
+  network1.AddIP(IPAddress(0x12345678));
+  network2.AddIP(IPAddress(0x00010004));
+  NetworkManager::NetworkList list;
+  Network* net1 = new Network(network1);
+  list.push_back(net1);
+  bool changed;
+  MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+  list.clear();
+  manager.GetNetworks(&list);
+  ASSERT_EQ(1U, list.size());
+  EXPECT_EQ(net1, list[0]);
+
+  list.clear();
+  Network* net2 = new Network(network2);
+  list.push_back(net2);
+  MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+  list.clear();
+  manager.GetNetworks(&list);
+  ASSERT_EQ(1U, list.size());
+  EXPECT_EQ(net2, list[0]);
+
+  // Now network1 is inactive. Try to merge it again.
+  list.clear();
+  list.push_back(new Network(network1));
+  MergeNetworkList(manager, list, &changed);
+  EXPECT_TRUE(changed);
+  list.clear();
+  manager.GetNetworks(&list);
+  ASSERT_EQ(1U, list.size());
+  EXPECT_TRUE(list[0]->active());
+  EXPECT_EQ(net1, list[0]);
+}
+
+// Test that the filtering logic follows the defined ruleset in network.h.
+TEST_F(NetworkTest, TestIPv6Selection) {
+  InterfaceAddress ip;
+  std::string ipstr;
+
+  ipstr = "2401:fa00:4:1000:be30:5bff:fee5:c3";
+  ASSERT_TRUE(IPFromString(ipstr, IPV6_ADDRESS_FLAG_DEPRECATED, &ip));
+
+  // Create a network with this prefix.
+  Network ipv6_network(
+      "test_eth0", "Test NetworkAdapter", TruncateIP(ip, 64), 64);
+
+  // When there is no address added, it should return an unspecified
+  // address.
+  EXPECT_EQ(ipv6_network.GetBestIP(), IPAddress());
+  EXPECT_TRUE(IPIsUnspec(ipv6_network.GetBestIP()));
+
+  // Deprecated one should not be returned.
+  ipv6_network.AddIP(ip);
+  EXPECT_EQ(ipv6_network.GetBestIP(), IPAddress());
+
+  // Add ULA one. ULA is unique local address which is starting either
+  // with 0xfc or 0xfd.
+  ipstr = "fd00:fa00:4:1000:be30:5bff:fee5:c4";
+  ASSERT_TRUE(IPFromString(ipstr, IPV6_ADDRESS_FLAG_NONE, &ip));
+  ipv6_network.AddIP(ip);
+  EXPECT_EQ(ipv6_network.GetBestIP(), static_cast<IPAddress>(ip));
+
+  // Add global one.
+  ipstr = "2401:fa00:4:1000:be30:5bff:fee5:c5";
+  ASSERT_TRUE(IPFromString(ipstr, IPV6_ADDRESS_FLAG_NONE, &ip));
+  ipv6_network.AddIP(ip);
+  EXPECT_EQ(ipv6_network.GetBestIP(), static_cast<IPAddress>(ip));
+
+  // Add global dynamic temporary one.
+  ipstr = "2401:fa00:4:1000:be30:5bff:fee5:c6";
+  ASSERT_TRUE(IPFromString(ipstr, IPV6_ADDRESS_FLAG_TEMPORARY, &ip));
+  ipv6_network.AddIP(ip);
+  EXPECT_EQ(ipv6_network.GetBestIP(), static_cast<IPAddress>(ip));
+}
+
+TEST_F(NetworkTest, TestNetworkMonitoring) {
+  BasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(static_cast<NetworkTest*>(this),
+                                        &NetworkTest::OnNetworksChanged);
+  FakeNetworkMonitorFactory* factory = new FakeNetworkMonitorFactory();
+  NetworkMonitorFactory::SetFactory(factory);
+  manager.StartUpdating();
+  FakeNetworkMonitor* network_monitor = GetNetworkMonitor(manager);
+  EXPECT_TRUE(network_monitor && network_monitor->started());
+  EXPECT_TRUE_WAIT(callback_called_, 1000);
+  callback_called_ = false;
+
+  // Clear the networks so that there will be network changes below.
+  ClearNetworks(manager);
+  // Network manager is started, so the callback is called when the network
+  // monitor fires the network-change event.
+  network_monitor->OnNetworksChanged();
+  EXPECT_TRUE_WAIT(callback_called_, 1000);
+
+  // Network manager is stopped.
+  manager.StopUpdating();
+  EXPECT_FALSE(GetNetworkMonitor(manager)->started());
+
+  NetworkMonitorFactory::ReleaseFactory(factory);
+}
+
+// Fails on Android: https://bugs.chromium.org/p/webrtc/issues/detail?id=4364.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_DefaultLocalAddress DISABLED_DefaultLocalAddress
+#else
+#define MAYBE_DefaultLocalAddress DefaultLocalAddress
+#endif
+TEST_F(NetworkTest, MAYBE_DefaultLocalAddress) {
+  IPAddress ip;
+  TestBasicNetworkManager manager;
+  manager.SignalNetworksChanged.connect(static_cast<NetworkTest*>(this),
+                                        &NetworkTest::OnNetworksChanged);
+  FakeNetworkMonitorFactory* factory = new FakeNetworkMonitorFactory();
+  NetworkMonitorFactory::SetFactory(factory);
+  manager.StartUpdating();
+  EXPECT_TRUE_WAIT(callback_called_, 1000);
+
+  // Make sure we can query default local address when an address for such
+  // address family exists.
+  std::vector<Network*> networks;
+  manager.GetNetworks(&networks);
+  EXPECT_TRUE(!networks.empty());
+  for (const auto* network : networks) {
+    if (network->GetBestIP().family() == AF_INET) {
+      EXPECT_TRUE(manager.QueryDefaultLocalAddress(AF_INET) != IPAddress());
+    } else if (network->GetBestIP().family() == AF_INET6 &&
+               !IPIsLoopback(network->GetBestIP())) {
+      // Existence of an IPv6 loopback address doesn't mean it has IPv6 network
+      // enabled.
+      EXPECT_TRUE(manager.QueryDefaultLocalAddress(AF_INET6) != IPAddress());
+    }
+  }
+
+  // GetDefaultLocalAddress should return the valid default address after set.
+  manager.set_default_local_addresses(GetLoopbackIP(AF_INET),
+                                      GetLoopbackIP(AF_INET6));
+  EXPECT_TRUE(manager.GetDefaultLocalAddress(AF_INET, &ip));
+  EXPECT_EQ(ip, GetLoopbackIP(AF_INET));
+  EXPECT_TRUE(manager.GetDefaultLocalAddress(AF_INET6, &ip));
+  EXPECT_EQ(ip, GetLoopbackIP(AF_INET6));
+
+  // More tests on GetDefaultLocalAddress with ipv6 addresses where the set
+  // default address may be different from the best IP address of any network.
+  InterfaceAddress ip1;
+  EXPECT_TRUE(IPFromString("abcd::1234:5678:abcd:1111",
+                           IPV6_ADDRESS_FLAG_TEMPORARY, &ip1));
+  // Create a network with a prefix of ip1.
+  Network ipv6_network("test_eth0", "Test NetworkAdapter", TruncateIP(ip1, 64),
+                       64);
+  IPAddress ip2;
+  EXPECT_TRUE(IPFromString("abcd::1234:5678:abcd:2222", &ip2));
+  ipv6_network.AddIP(ip1);
+  ipv6_network.AddIP(ip2);
+  BasicNetworkManager::NetworkList list(1, new Network(ipv6_network));
+  bool changed;
+  MergeNetworkList(manager, list, &changed);
+  // If the set default address is not in any network, GetDefaultLocalAddress
+  // should return it.
+  IPAddress ip3;
+  EXPECT_TRUE(IPFromString("abcd::1234:5678:abcd:3333", &ip3));
+  manager.set_default_local_addresses(GetLoopbackIP(AF_INET), ip3);
+  EXPECT_TRUE(manager.GetDefaultLocalAddress(AF_INET6, &ip));
+  EXPECT_EQ(ip3, ip);
+  // If the set default address is in a network, GetDefaultLocalAddress will
+  // return the best IP in that network.
+  manager.set_default_local_addresses(GetLoopbackIP(AF_INET), ip2);
+  EXPECT_TRUE(manager.GetDefaultLocalAddress(AF_INET6, &ip));
+  EXPECT_EQ(static_cast<IPAddress>(ip1), ip);
+
+  manager.StopUpdating();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/networkmonitor.cc b/rtc_base/networkmonitor.cc
new file mode 100644
index 0000000..0272951
--- /dev/null
+++ b/rtc_base/networkmonitor.cc
@@ -0,0 +1,62 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/networkmonitor.h"
+
+#include "rtc_base/checks.h"
+
+namespace {
+const uint32_t UPDATE_NETWORKS_MESSAGE = 1;
+
+// This is set by NetworkMonitorFactory::SetFactory and the caller of
+// NetworkMonitorFactory::SetFactory must be responsible for calling
+// ReleaseFactory to destroy the factory.
+rtc::NetworkMonitorFactory* network_monitor_factory = nullptr;
+}  // namespace
+
+namespace rtc {
+NetworkMonitorInterface::NetworkMonitorInterface() {}
+
+NetworkMonitorInterface::~NetworkMonitorInterface() {}
+
+NetworkMonitorBase::NetworkMonitorBase() : worker_thread_(Thread::Current()) {}
+NetworkMonitorBase::~NetworkMonitorBase() {}
+
+void NetworkMonitorBase::OnNetworksChanged() {
+  RTC_LOG(LS_VERBOSE) << "Network change is received at the network monitor";
+  worker_thread_->Post(RTC_FROM_HERE, this, UPDATE_NETWORKS_MESSAGE);
+}
+
+void NetworkMonitorBase::OnMessage(Message* msg) {
+  RTC_DCHECK(msg->message_id == UPDATE_NETWORKS_MESSAGE);
+  SignalNetworksChanged();
+}
+
+NetworkMonitorFactory::NetworkMonitorFactory() {}
+NetworkMonitorFactory::~NetworkMonitorFactory() {}
+
+void NetworkMonitorFactory::SetFactory(NetworkMonitorFactory* factory) {
+  if (network_monitor_factory != nullptr) {
+    delete network_monitor_factory;
+  }
+  network_monitor_factory = factory;
+}
+
+void NetworkMonitorFactory::ReleaseFactory(NetworkMonitorFactory* factory) {
+  if (factory == network_monitor_factory) {
+    SetFactory(nullptr);
+  }
+}
+
+NetworkMonitorFactory* NetworkMonitorFactory::GetFactory() {
+  return network_monitor_factory;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/networkmonitor.h b/rtc_base/networkmonitor.h
new file mode 100644
index 0000000..254b225
--- /dev/null
+++ b/rtc_base/networkmonitor.h
@@ -0,0 +1,119 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NETWORKMONITOR_H_
+#define RTC_BASE_NETWORKMONITOR_H_
+
+#include "rtc_base/logging.h"
+#include "rtc_base/network_constants.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+class IPAddress;
+
+enum class NetworkBindingResult {
+  SUCCESS = 0,   // No error
+  FAILURE = -1,  // Generic error
+  NOT_IMPLEMENTED = -2,
+  ADDRESS_NOT_FOUND = -3,
+  NETWORK_CHANGED = -4
+};
+
+class NetworkBinderInterface {
+ public:
+  // Binds a socket to the network that is attached to |address| so that all
+  // packets on the socket |socket_fd| will be sent via that network.
+  // This is needed because some operating systems (like Android) require a
+  // special bind call to put packets on a non-default network interface.
+  virtual NetworkBindingResult BindSocketToNetwork(
+      int socket_fd,
+      const IPAddress& address) = 0;
+  virtual ~NetworkBinderInterface() {}
+};
+
+/*
+ * Receives network-change events via |OnNetworksChanged| and signals the
+ * networks changed event.
+ *
+ * Threading consideration:
+ * It is expected that all upstream operations (from native to Java) are
+ * performed from the worker thread. This includes creating, starting and
+ * stopping the monitor. This avoids the potential race condition when creating
+ * the singleton Java NetworkMonitor class. Downstream operations can be from
+ * any thread, but this class will forward all the downstream operations onto
+ * the worker thread.
+ *
+ * Memory consideration:
+ * NetworkMonitor is owned by the caller (NetworkManager). The global network
+ * monitor factory is owned by the factory itself but needs to be released from
+ * the factory creator.
+ */
+// Generic network monitor interface. It starts and stops monitoring network
+// changes, and fires the SignalNetworksChanged event when networks change.
+class NetworkMonitorInterface {
+ public:
+  NetworkMonitorInterface();
+  virtual ~NetworkMonitorInterface();
+
+  sigslot::signal0<> SignalNetworksChanged;
+
+  virtual void Start() = 0;
+  virtual void Stop() = 0;
+
+  // Implementations should call this method on the base when networks change,
+  // and the base will fire SignalNetworksChanged on the right thread.
+  virtual void OnNetworksChanged() = 0;
+
+  virtual AdapterType GetAdapterType(const std::string& interface_name) = 0;
+};
+
+class NetworkMonitorBase : public NetworkMonitorInterface,
+                           public MessageHandler,
+                           public sigslot::has_slots<> {
+ public:
+  NetworkMonitorBase();
+  ~NetworkMonitorBase() override;
+
+  void OnNetworksChanged() override;
+
+  void OnMessage(Message* msg) override;
+
+ protected:
+  Thread* worker_thread() { return worker_thread_; }
+
+ private:
+  Thread* worker_thread_;
+};
+
+/*
+ * NetworkMonitorFactory creates NetworkMonitors.
+ */
+class NetworkMonitorFactory {
+ public:
+  // This is not thread-safe; it should be called once (or once per audio/video
+  // call) during the call initialization.
+  static void SetFactory(NetworkMonitorFactory* factory);
+
+  static void ReleaseFactory(NetworkMonitorFactory* factory);
+  static NetworkMonitorFactory* GetFactory();
+
+  virtual NetworkMonitorInterface* CreateNetworkMonitor() = 0;
+
+  virtual ~NetworkMonitorFactory();
+
+ protected:
+  NetworkMonitorFactory();
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NETWORKMONITOR_H_
diff --git a/rtc_base/networkroute.h b/rtc_base/networkroute.h
new file mode 100644
index 0000000..5800ef8
--- /dev/null
+++ b/rtc_base/networkroute.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NETWORKROUTE_H_
+#define RTC_BASE_NETWORKROUTE_H_
+
+#include <stdint.h>
+
+// TODO(honghaiz): Make a directory that describes the interfaces and structs
+// the media code can rely on and the network code can implement, and both can
+// depend on that, but not depend on each other. Then, move this file to that
+// directory.
+namespace rtc {
+
+struct NetworkRoute {
+  bool connected;
+  uint16_t local_network_id;
+  uint16_t remote_network_id;
+  int last_sent_packet_id;  // Last packet id sent on the PREVIOUS route.
+  int packet_overhead;      // The overhead in bytes from IP layer and above.
+
+  NetworkRoute()
+      : connected(false),
+        local_network_id(0),
+        remote_network_id(0),
+        last_sent_packet_id(-1),
+        packet_overhead(0) {}
+
+  // The route is connected if the local and remote network ids are provided.
+  // TODO(zhihuang): Remove this and let the caller set the fields explicitly.
+  NetworkRoute(bool connected,
+               uint16_t local_net_id,
+               uint16_t remote_net_id,
+               int last_packet_id)
+      : connected(connected),
+        local_network_id(local_net_id),
+        remote_network_id(remote_net_id),
+        last_sent_packet_id(last_packet_id),
+        packet_overhead(0) {}
+
+  // |last_sent_packet_id| and |packet_overhead| do not affect the NetworkRoute
+  // comparison.
+  bool operator==(const NetworkRoute& nr) const {
+    return connected == nr.connected &&
+           local_network_id == nr.local_network_id &&
+           remote_network_id == nr.remote_network_id;
+  }
+
+  bool operator!=(const NetworkRoute& nr) const { return !(*this == nr); }
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_NETWORKROUTE_H_
diff --git a/rtc_base/noop.cc b/rtc_base/noop.cc
new file mode 100644
index 0000000..16a8e6d
--- /dev/null
+++ b/rtc_base/noop.cc
@@ -0,0 +1,13 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is only needed to make ninja happy on some platforms.
+// On some platforms it is not possible to link an rtc_static_library
+// without any source file listed in the GN target.
diff --git a/rtc_base/noop.mm b/rtc_base/noop.mm
new file mode 100644
index 0000000..16a8e6d
--- /dev/null
+++ b/rtc_base/noop.mm
@@ -0,0 +1,13 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file is only needed to make ninja happy on some platforms.
+// On some platforms it is not possible to link an rtc_static_library
+// without any source file listed in the GN target.
diff --git a/rtc_base/nullsocketserver.cc b/rtc_base/nullsocketserver.cc
new file mode 100644
index 0000000..68b67fa
--- /dev/null
+++ b/rtc_base/nullsocketserver.cc
@@ -0,0 +1,49 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+NullSocketServer::NullSocketServer() : event_(false, false) {}
+NullSocketServer::~NullSocketServer() {}
+
+bool NullSocketServer::Wait(int cms, bool process_io) {
+  event_.Wait(cms);
+  return true;
+}
+
+void NullSocketServer::WakeUp() {
+  event_.Set();
+}
+
+rtc::Socket* NullSocketServer::CreateSocket(int /* type */) {
+  RTC_NOTREACHED();
+  return nullptr;
+}
+
+rtc::Socket* NullSocketServer::CreateSocket(int /* family */, int /* type */) {
+  RTC_NOTREACHED();
+  return nullptr;
+}
+
+rtc::AsyncSocket* NullSocketServer::CreateAsyncSocket(int /* type */) {
+  RTC_NOTREACHED();
+  return nullptr;
+}
+
+rtc::AsyncSocket* NullSocketServer::CreateAsyncSocket(int /* family */,
+                                                      int /* type */) {
+  RTC_NOTREACHED();
+  return nullptr;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/nullsocketserver.h b/rtc_base/nullsocketserver.h
new file mode 100644
index 0000000..7715c5c
--- /dev/null
+++ b/rtc_base/nullsocketserver.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NULLSOCKETSERVER_H_
+#define RTC_BASE_NULLSOCKETSERVER_H_
+
+#include "rtc_base/event.h"
+#include "rtc_base/socketserver.h"
+
+namespace rtc {
+
+class NullSocketServer : public SocketServer {
+ public:
+  NullSocketServer();
+  ~NullSocketServer() override;
+
+  bool Wait(int cms, bool process_io) override;
+  void WakeUp() override;
+
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+ private:
+  Event event_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NULLSOCKETSERVER_H_
diff --git a/rtc_base/nullsocketserver_unittest.cc b/rtc_base/nullsocketserver_unittest.cc
new file mode 100644
index 0000000..e3d9952
--- /dev/null
+++ b/rtc_base/nullsocketserver_unittest.cc
@@ -0,0 +1,44 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+static const uint32_t kTimeout = 5000U;
+
+class NullSocketServerTest
+    : public testing::Test,
+      public MessageHandler {
+ protected:
+  void OnMessage(Message* message) override { ss_.WakeUp(); }
+
+  NullSocketServer ss_;
+};
+
+TEST_F(NullSocketServerTest, WaitAndSet) {
+  auto thread = Thread::Create();
+  EXPECT_TRUE(thread->Start());
+  thread->Post(RTC_FROM_HERE, this, 0);
+  // The process_io will be ignored.
+  const bool process_io = true;
+  EXPECT_TRUE_WAIT(ss_.Wait(SocketServer::kForever, process_io), kTimeout);
+}
+
+TEST_F(NullSocketServerTest, TestWait) {
+  int64_t start = TimeMillis();
+  ss_.Wait(200, true);
+  // The actual wait time is dependent on the resolution of the timer used by
+  // the Event class. Allow for the event to signal ~20ms early.
+  EXPECT_GE(TimeSince(start), 180);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/numerics/exp_filter.cc b/rtc_base/numerics/exp_filter.cc
new file mode 100644
index 0000000..0c6fb00
--- /dev/null
+++ b/rtc_base/numerics/exp_filter.cc
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/numerics/exp_filter.h"
+
+#include <math.h>
+
+namespace rtc {
+
+const float ExpFilter::kValueUndefined = -1.0f;
+
+void ExpFilter::Reset(float alpha) {
+  alpha_ = alpha;
+  filtered_ = kValueUndefined;
+}
+
+float ExpFilter::Apply(float exp, float sample) {
+  if (filtered_ == kValueUndefined) {
+    // Initialize filtered value.
+    filtered_ = sample;
+  } else if (exp == 1.0) {
+    filtered_ = alpha_ * filtered_ + (1 - alpha_) * sample;
+  } else {
+    float alpha = pow(alpha_, exp);
+    filtered_ = alpha * filtered_ + (1 - alpha) * sample;
+  }
+  if (max_ != kValueUndefined && filtered_ > max_) {
+    filtered_ = max_;
+  }
+  return filtered_;
+}
+
+void ExpFilter::UpdateBase(float alpha) {
+  alpha_ = alpha;
+}
+}  // namespace rtc
diff --git a/rtc_base/numerics/exp_filter.h b/rtc_base/numerics/exp_filter.h
new file mode 100644
index 0000000..4be9a0a
--- /dev/null
+++ b/rtc_base/numerics/exp_filter.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_EXP_FILTER_H_
+#define RTC_BASE_NUMERICS_EXP_FILTER_H_
+
+namespace rtc {
+
+// This class can be used, for example, for smoothing the result of bandwidth
+// estimation and packet loss estimation.
+
+class ExpFilter {
+ public:
+  static const float kValueUndefined;
+
+  explicit ExpFilter(float alpha, float max = kValueUndefined) : max_(max) {
+    Reset(alpha);
+  }
+
+  // Resets the filter to its initial state, and resets filter factor base to
+  // the given value |alpha|.
+  void Reset(float alpha);
+
+  // Applies the filter with a given exponent on the provided sample:
+  // y(k) = min(alpha_^ exp * y(k-1) + (1 - alpha_^ exp) * sample, max_).
+  float Apply(float exp, float sample);
+
+  // Returns current filtered value.
+  float filtered() const { return filtered_; }
+
+  // Changes the filter factor base to the given value |alpha|.
+  void UpdateBase(float alpha);
+
+ private:
+  float alpha_;     // Filter factor base.
+  float filtered_;  // Current filter output.
+  const float max_;
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_NUMERICS_EXP_FILTER_H_
diff --git a/rtc_base/numerics/exp_filter_unittest.cc b/rtc_base/numerics/exp_filter_unittest.cc
new file mode 100644
index 0000000..412dc77
--- /dev/null
+++ b/rtc_base/numerics/exp_filter_unittest.cc
@@ -0,0 +1,71 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "rtc_base/numerics/exp_filter.h"
+#include "test/gtest.h"
+
+namespace rtc {
+
+TEST(ExpFilterTest, FirstTimeOutputEqualInput) {
+  // No max value defined.
+  ExpFilter filter = ExpFilter(0.9f);
+  filter.Apply(100.0f, 10.0f);
+
+  // First time, first argument no effect.
+  double value = 10.0f;
+  EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+TEST(ExpFilterTest, SecondTime) {
+  double value;
+
+  ExpFilter filter = ExpFilter(0.9f);
+  filter.Apply(100.0f, 10.0f);
+
+  // First time, first argument no effect.
+  value = 10.0f;
+
+  filter.Apply(10.0f, 20.0f);
+  double alpha = pow(0.9f, 10.0f);
+  value = alpha * value + (1.0f - alpha) * 20.0f;
+  EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+TEST(ExpFilterTest, Reset) {
+  ExpFilter filter = ExpFilter(0.9f);
+  filter.Apply(100.0f, 10.0f);
+
+  filter.Reset(0.8f);
+  filter.Apply(100.0f, 1.0f);
+
+  // Become first time after a reset.
+  double value = 1.0f;
+  EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+TEST(ExpfilterTest, OutputLimitedByMax) {
+  double value;
+
+  // Max value defined.
+  ExpFilter filter = ExpFilter(0.9f, 1.0f);
+  filter.Apply(100.0f, 10.0f);
+
+  // Limited to max value.
+  value = 1.0f;
+  EXPECT_EQ(value, filter.filtered());
+
+  filter.Apply(1.0f, 0.0f);
+  value = 0.9f * value;
+  EXPECT_FLOAT_EQ(value, filter.filtered());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/numerics/histogram_percentile_counter.cc b/rtc_base/numerics/histogram_percentile_counter.cc
new file mode 100644
index 0000000..87ebd53
--- /dev/null
+++ b/rtc_base/numerics/histogram_percentile_counter.cc
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/numerics/histogram_percentile_counter.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+HistogramPercentileCounter::HistogramPercentileCounter(
+    uint32_t long_tail_boundary)
+    : histogram_low_(size_t{long_tail_boundary}),
+      long_tail_boundary_(long_tail_boundary),
+      total_elements_(0),
+      total_elements_low_(0) {}
+
+HistogramPercentileCounter::~HistogramPercentileCounter() = default;
+
+void HistogramPercentileCounter::Add(const HistogramPercentileCounter& other) {
+  for (uint32_t value = 0; value < other.long_tail_boundary_; ++value) {
+    Add(value, other.histogram_low_[value]);
+  }
+  for (const auto& it : histogram_high_) {
+    Add(it.first, it.second);
+  }
+}
+
+void HistogramPercentileCounter::Add(uint32_t value, size_t count) {
+  if (value < long_tail_boundary_) {
+    histogram_low_[value] += count;
+    total_elements_low_ += count;
+  } else {
+    histogram_high_[value] += count;
+  }
+  total_elements_ += count;
+}
+
+void HistogramPercentileCounter::Add(uint32_t value) {
+  Add(value, 1);
+}
+
+rtc::Optional<uint32_t> HistogramPercentileCounter::GetPercentile(
+    float fraction) {
+  RTC_CHECK_LE(fraction, 1.0);
+  RTC_CHECK_GE(fraction, 0.0);
+  if (total_elements_ == 0)
+    return rtc::nullopt;
+  size_t elements_to_skip = static_cast<size_t>(
+      std::max(0.0f, std::ceil(total_elements_ * fraction) - 1));
+  if (elements_to_skip >= total_elements_)
+    elements_to_skip = total_elements_ - 1;
+  if (elements_to_skip < total_elements_low_) {
+    for (uint32_t value = 0; value < long_tail_boundary_; ++value) {
+      if (elements_to_skip < histogram_low_[value])
+        return value;
+      elements_to_skip -= histogram_low_[value];
+    }
+  } else {
+    elements_to_skip -= total_elements_low_;
+    for (const auto& it : histogram_high_) {
+      if (elements_to_skip < it.second)
+        return it.first;
+      elements_to_skip -= it.second;
+    }
+  }
+  RTC_NOTREACHED();
+  return rtc::nullopt;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/numerics/histogram_percentile_counter.h b/rtc_base/numerics/histogram_percentile_counter.h
new file mode 100644
index 0000000..4ad2e53
--- /dev/null
+++ b/rtc_base/numerics/histogram_percentile_counter.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_HISTOGRAM_PERCENTILE_COUNTER_H_
+#define RTC_BASE_NUMERICS_HISTOGRAM_PERCENTILE_COUNTER_H_
+
+#include <stdint.h>
+#include <map>
+#include <vector>
+
+#include "api/optional.h"
+
+namespace rtc {
+// Calculates percentiles on the stream of data. Use |Add| methods to add new
+// values. Use |GetPercentile| to get percentile of the currently added values.
+class HistogramPercentileCounter {
+ public:
+  // Values below |long_tail_boundary| are stored as the histogram in an array.
+  // Values above - in a map.
+  explicit HistogramPercentileCounter(uint32_t long_tail_boundary);
+  ~HistogramPercentileCounter();
+  void Add(uint32_t value);
+  void Add(uint32_t value, size_t count);
+  void Add(const HistogramPercentileCounter& other);
+  // Argument should be from 0 to 1.
+  rtc::Optional<uint32_t> GetPercentile(float fraction);
+
+ private:
+  std::vector<size_t> histogram_low_;
+  std::map<uint32_t, size_t> histogram_high_;
+  const uint32_t long_tail_boundary_;
+  size_t total_elements_;
+  size_t total_elements_low_;
+};
+}  // namespace rtc
+#endif  // RTC_BASE_NUMERICS_HISTOGRAM_PERCENTILE_COUNTER_H_
diff --git a/rtc_base/numerics/histogram_percentile_counter_unittest.cc b/rtc_base/numerics/histogram_percentile_counter_unittest.cc
new file mode 100644
index 0000000..a004dba
--- /dev/null
+++ b/rtc_base/numerics/histogram_percentile_counter_unittest.cc
@@ -0,0 +1,43 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/numerics/histogram_percentile_counter.h"
+
+#include <utility>
+#include <vector>
+
+#include "test/gtest.h"
+
+TEST(HistogramPercentileCounterTest, ReturnsCorrectPercentiles) {
+  rtc::HistogramPercentileCounter counter(10);
+  const std::vector<int> kTestValues = {1,  2,  3,  4,  5,  6,  7,  8,  9,  10,
+                                        11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
+
+  EXPECT_FALSE(counter.GetPercentile(0.5f));
+  // Pairs of {fraction, percentile value} computed by hand
+  // for |kTestValues|.
+  const std::vector<std::pair<float, uint32_t>> kTestPercentiles = {
+      {0.0f, 1},   {0.01f, 1},  {0.5f, 10}, {0.9f, 18},
+      {0.95f, 19}, {0.99f, 20}, {1.0f, 20}};
+  for (int value : kTestValues) {
+    counter.Add(value);
+  }
+  for (const auto& test_percentile : kTestPercentiles) {
+    EXPECT_EQ(test_percentile.second,
+              counter.GetPercentile(test_percentile.first).value_or(0));
+  }
+}
+
+TEST(HistogramPercentileCounterTest, HandlesEmptySequence) {
+  rtc::HistogramPercentileCounter counter(10);
+  EXPECT_FALSE(counter.GetPercentile(0.5f));
+  counter.Add(1u);
+  EXPECT_TRUE(counter.GetPercentile(0.5f));
+}
diff --git a/rtc_base/numerics/mathutils.h b/rtc_base/numerics/mathutils.h
new file mode 100644
index 0000000..5036c8f
--- /dev/null
+++ b/rtc_base/numerics/mathutils.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright 2005 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_MATHUTILS_H_
+#define RTC_BASE_NUMERICS_MATHUTILS_H_
+
+#include <math.h>
+#include <type_traits>
+
+#include "rtc_base/checks.h"
+
+#ifndef M_PI
+#define M_PI 3.14159265359f
+#endif
+
+// Given two numbers |x| and |y| such that x >= y, computes the difference
+// x - y without causing undefined behavior due to signed overflow.
+template <typename T>
+typename std::make_unsigned<T>::type unsigned_difference(T x, T y) {
+  static_assert(
+      std::is_signed<T>::value,
+      "Function unsigned_difference is only meaningful for signed types.");
+  RTC_DCHECK_GE(x, y);
+  typedef typename std::make_unsigned<T>::type unsigned_type;
+  // int -> unsigned conversion repeatedly adds UINT_MAX + 1 until the number
+  // can be represented as an unsigned. Since we know that the actual
+  // difference x - y can be represented as an unsigned, it is sufficient to
+  // compute the difference modulo UINT_MAX + 1, i.e using unsigned arithmetic.
+  return static_cast<unsigned_type>(x) - static_cast<unsigned_type>(y);
+}
+
+#endif  // RTC_BASE_NUMERICS_MATHUTILS_H_
diff --git a/rtc_base/numerics/mod_ops.h b/rtc_base/numerics/mod_ops.h
new file mode 100644
index 0000000..90d3ed8
--- /dev/null
+++ b/rtc_base/numerics/mod_ops.h
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_MOD_OPS_H_
+#define RTC_BASE_NUMERICS_MOD_OPS_H_
+
+#include <algorithm>
+#include <limits>
+#include <type_traits>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+template <unsigned long M>                                    // NOLINT
+inline unsigned long Add(unsigned long a, unsigned long b) {  // NOLINT
+  RTC_DCHECK_LT(a, M);
+  unsigned long t = M - b % M;  // NOLINT
+  unsigned long res = a - t;    // NOLINT
+  if (t > a)
+    return res + M;
+  return res;
+}
+
+template <unsigned long M>                                         // NOLINT
+inline unsigned long Subtract(unsigned long a, unsigned long b) {  // NOLINT
+  RTC_DCHECK_LT(a, M);
+  unsigned long sub = b % M;  // NOLINT
+  if (a < sub)
+    return M - (sub - a);
+  return a - sub;
+}
+
+// Calculates the forward difference between two wrapping numbers.
+//
+// Example:
+// uint8_t x = 253;
+// uint8_t y = 2;
+//
+// ForwardDiff(x, y) == 5
+//
+//   252   253   254   255    0     1     2     3
+// #################################################
+// |     |  x  |     |     |     |     |  y  |     |
+// #################################################
+//          |----->----->----->----->----->
+//
+// ForwardDiff(y, x) == 251
+//
+//   252   253   254   255    0     1     2     3
+// #################################################
+// |     |  x  |     |     |     |     |  y  |     |
+// #################################################
+// -->----->                              |----->---
+//
+// If M > 0 then wrapping occurs at M, if M == 0 then wrapping occurs at the
+// largest value representable by T.
+template <typename T, T M>
+inline typename std::enable_if<(M > 0), T>::type ForwardDiff(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  RTC_DCHECK_LT(a, M);
+  RTC_DCHECK_LT(b, M);
+  return a <= b ? b - a : M - (a - b);
+}
+
+template <typename T, T M>
+inline typename std::enable_if<(M == 0), T>::type ForwardDiff(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  return b - a;
+}
+
+template <typename T>
+inline T ForwardDiff(T a, T b) {
+  return ForwardDiff<T, 0>(a, b);
+}
+
+// Calculates the reverse difference between two wrapping numbers.
+//
+// Example:
+// uint8_t x = 253;
+// uint8_t y = 2;
+//
+// ReverseDiff(y, x) == 5
+//
+//   252   253   254   255    0     1     2     3
+// #################################################
+// |     |  x  |     |     |     |     |  y  |     |
+// #################################################
+//          <-----<-----<-----<-----<-----|
+//
+// ReverseDiff(x, y) == 251
+//
+//   252   253   254   255    0     1     2     3
+// #################################################
+// |     |  x  |     |     |     |     |  y  |     |
+// #################################################
+// ---<-----|                             |<-----<--
+//
+// If M > 0 then wrapping occurs at M, if M == 0 then wrapping occurs at the
+// largest value representable by T.
+template <typename T, T M>
+inline typename std::enable_if<(M > 0), T>::type ReverseDiff(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  RTC_DCHECK_LT(a, M);
+  RTC_DCHECK_LT(b, M);
+  return b <= a ? a - b : M - (b - a);
+}
+
+template <typename T, T M>
+inline typename std::enable_if<(M == 0), T>::type ReverseDiff(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  return a - b;
+}
+
+template <typename T>
+inline T ReverseDiff(T a, T b) {
+  return ReverseDiff<T, 0>(a, b);
+}
+
+// Calculates the minimum distance between to wrapping numbers.
+//
+// The minimum distance is defined as min(ForwardDiff(a, b), ReverseDiff(a, b))
+template <typename T, T M = 0>
+inline T MinDiff(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  return std::min(ForwardDiff<T, M>(a, b), ReverseDiff<T, M>(a, b));
+}
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_NUMERICS_MOD_OPS_H_
diff --git a/rtc_base/numerics/mod_ops_unittest.cc b/rtc_base/numerics/mod_ops_unittest.cc
new file mode 100644
index 0000000..7b03e65
--- /dev/null
+++ b/rtc_base/numerics/mod_ops_unittest.cc
@@ -0,0 +1,156 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/numerics/mod_ops.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+class TestModOps : public ::testing::Test {
+ protected:
+  // Can't use std::numeric_limits<unsigned long>::max() since
+  // MSVC doesn't support constexpr.
+  static const unsigned long ulmax = ~0ul;  // NOLINT
+};
+
+TEST_F(TestModOps, Add) {
+  const int D = 100;
+  ASSERT_EQ(1u, Add<D>(0, 1));
+  ASSERT_EQ(0u, Add<D>(0, D));
+  for (int i = 0; i < D; ++i)
+    ASSERT_EQ(0u, Add<D>(i, D - i));
+
+  int t = 37;
+  uint8_t a = t;
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_EQ(a, static_cast<uint8_t>(t));
+    t = Add<256>(t, 1);
+    ++a;
+  }
+}
+
+TEST_F(TestModOps, AddLarge) {
+  const unsigned long D = ulmax - 10ul;  // NOLINT
+  unsigned long l = D - 1ul;             // NOLINT
+  ASSERT_EQ(D - 2ul, Add<D>(l, l));
+  ASSERT_EQ(9ul, Add<D>(l, ulmax));
+  ASSERT_EQ(10ul, Add<D>(0ul, ulmax));
+}
+
+TEST_F(TestModOps, Subtract) {
+  const int D = 100;
+  ASSERT_EQ(99u, Subtract<D>(0, 1));
+  ASSERT_EQ(0u, Subtract<D>(0, D));
+  for (int i = 0; i < D; ++i)
+    ASSERT_EQ(0u, Subtract<D>(i, D + i));
+
+  int t = 37;
+  uint8_t a = t;
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_EQ(a, static_cast<uint8_t>(t));
+    t = Subtract<256>(t, 1);
+    --a;
+  }
+}
+
+TEST_F(TestModOps, SubtractLarge) {
+  // NOLINTNEXTLINE
+  const unsigned long D = ulmax - 10ul;  // NOLINT
+  unsigned long l = D - 1ul;             // NOLINT
+  ASSERT_EQ(0ul, Subtract<D>(l, l));
+  ASSERT_EQ(D - 11ul, Subtract<D>(l, ulmax));
+  ASSERT_EQ(D - 10ul, Subtract<D>(0ul, ulmax));
+}
+
+TEST_F(TestModOps, ForwardDiff) {
+  ASSERT_EQ(0u, ForwardDiff(4711u, 4711u));
+
+  uint8_t x = 0;
+  uint8_t y = 255;
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_EQ(255u, ForwardDiff(x, y));
+    ++x;
+    ++y;
+  }
+
+  int yi = 255;
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_EQ(255u, ForwardDiff<uint8_t>(x, yi));
+    ++x;
+    ++yi;
+  }
+}
+
+TEST_F(TestModOps, ForwardDiffWithDivisor) {
+  ASSERT_EQ(122, (ForwardDiff<uint8_t, 123>(0, 122)));
+  ASSERT_EQ(0, (ForwardDiff<uint8_t, 123>(122, 122)));
+  ASSERT_EQ(122, (ForwardDiff<uint8_t, 123>(1, 0)));
+  ASSERT_EQ(0, (ForwardDiff<uint8_t, 123>(0, 0)));
+  ASSERT_EQ(1, (ForwardDiff<uint8_t, 123>(122, 0)));
+}
+
+TEST_F(TestModOps, ReverseDiff) {
+  ASSERT_EQ(0u, ReverseDiff(4711u, 4711u));
+
+  uint8_t x = 0;
+  uint8_t y = 255;
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_EQ(1u, ReverseDiff(x, y));
+    ++x;
+    ++y;
+  }
+
+  int yi = 255;
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_EQ(1u, ReverseDiff<uint8_t>(x, yi));
+    ++x;
+    ++yi;
+  }
+}
+
+TEST_F(TestModOps, ReverseDiffWithDivisor) {
+  ASSERT_EQ(1, (ReverseDiff<uint8_t, 123>(0, 122)));
+  ASSERT_EQ(0, (ReverseDiff<uint8_t, 123>(122, 122)));
+  ASSERT_EQ(1, (ReverseDiff<uint8_t, 123>(1, 0)));
+  ASSERT_EQ(0, (ReverseDiff<uint8_t, 123>(0, 0)));
+  ASSERT_EQ(122, (ReverseDiff<uint8_t, 123>(122, 0)));
+}
+
+TEST_F(TestModOps, MinDiff) {
+  for (uint16_t i = 0; i < 256; ++i) {
+    ASSERT_EQ(0, MinDiff<uint8_t>(i, i));
+    ASSERT_EQ(1, MinDiff<uint8_t>(i - 1, i));
+    ASSERT_EQ(1, MinDiff<uint8_t>(i + 1, i));
+  }
+
+  for (uint8_t i = 0; i < 128; ++i)
+    ASSERT_EQ(i, MinDiff<uint8_t>(0, i));
+
+  for (uint8_t i = 0; i < 128; ++i)
+    ASSERT_EQ(128 - i, MinDiff<uint8_t>(0, 128 + i));
+}
+
+TEST_F(TestModOps, MinDiffWitDivisor) {
+  ASSERT_EQ(5u, (MinDiff<uint8_t, 11>(0, 5)));
+  ASSERT_EQ(5u, (MinDiff<uint8_t, 11>(0, 6)));
+  ASSERT_EQ(5u, (MinDiff<uint8_t, 11>(5, 0)));
+  ASSERT_EQ(5u, (MinDiff<uint8_t, 11>(6, 0)));
+
+  const uint16_t D = 4711;
+
+  for (uint16_t i = 0; i < D / 2; ++i)
+    ASSERT_EQ(i, (MinDiff<uint16_t, D>(0, i)));
+
+  ASSERT_EQ(D / 2, (MinDiff<uint16_t, D>(0, D / 2)));
+
+  for (uint16_t i = 0; i < D / 2; ++i)
+    ASSERT_EQ(D / 2 - i, (MinDiff<uint16_t, D>(0, D / 2 - i)));
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/numerics/moving_max_counter.h b/rtc_base/numerics/moving_max_counter.h
new file mode 100644
index 0000000..4595cf3
--- /dev/null
+++ b/rtc_base/numerics/moving_max_counter.h
@@ -0,0 +1,116 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_MOVING_MAX_COUNTER_H_
+#define RTC_BASE_NUMERICS_MOVING_MAX_COUNTER_H_
+
+#include <stdint.h>
+
+#include <deque>
+#include <limits>
+#include <utility>
+
+#include "api/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+// Implements moving max: can add samples to it and calculate maximum over some
+// fixed moving window.
+//
+// Window size is configured at constructor.
+// Samples can be added with |Add()| and max over current window is returned by
+// |MovingMax|. |current_time_ms| in successive calls to Add and MovingMax
+// should never decrease as if it's a wallclock time.
+template <class T>
+class MovingMaxCounter {
+ public:
+  explicit MovingMaxCounter(int64_t window_length_ms);
+  // Advances the current time, and adds a new sample. The new current time must
+  // be at least as large as the old current time.
+  void Add(const T& sample, int64_t current_time_ms);
+  // Advances the current time, and returns the maximum sample in the time
+  // window ending at the current time. The new current time must be at least as
+  // large as the old current time.
+  rtc::Optional<T> Max(int64_t current_time_ms);
+  void Reset();
+
+ private:
+  // Throws out obsolete samples.
+  void RollWindow(int64_t new_time_ms);
+  const int64_t window_length_ms_;
+  // This deque stores (timestamp, sample) pairs in chronological order; new
+  // pairs are only ever added at the end. However, because they can't affect
+  // the Max() calculation, pairs older than window_length_ms_ are discarded,
+  // and if an older pair has a sample that's smaller than that of a younger
+  // pair, the older pair is discarded. As a result, the sequence of timestamps
+  // is strictly increasing, and the sequence of samples is strictly decreasing.
+  std::deque<std::pair<int64_t, T>> samples_;
+#if RTC_DCHECK_IS_ON
+  int64_t last_call_time_ms_ = std::numeric_limits<int64_t>::min();
+#endif
+  RTC_DISALLOW_COPY_AND_ASSIGN(MovingMaxCounter);
+};
+
+template <class T>
+MovingMaxCounter<T>::MovingMaxCounter(int64_t window_length_ms)
+    : window_length_ms_(window_length_ms) {}
+
+template <class T>
+void MovingMaxCounter<T>::Add(const T& sample, int64_t current_time_ms) {
+  RollWindow(current_time_ms);
+  // Remove samples that will never be maximum in any window: newly added sample
+  // will always be in all windows the previous samples are. Thus, smaller or
+  // equal samples could be removed. This will maintain the invariant - deque
+  // contains strictly decreasing sequence of values.
+  while (!samples_.empty() && samples_.back().second <= sample) {
+    samples_.pop_back();
+  }
+  // Add the new sample but only if there's no existing sample at the same time.
+  // Due to checks above, the already existing element will be larger, so the
+  // new sample will never be the maximum in any window.
+  if (samples_.empty() || samples_.back().first < current_time_ms) {
+    samples_.emplace_back(std::make_pair(current_time_ms, sample));
+  }
+}
+
+template <class T>
+rtc::Optional<T> MovingMaxCounter<T>::Max(int64_t current_time_ms) {
+  RollWindow(current_time_ms);
+  rtc::Optional<T> res;
+  if (!samples_.empty()) {
+    res.emplace(samples_.front().second);
+  }
+  return res;
+}
+
+template <class T>
+void MovingMaxCounter<T>::Reset() {
+  samples_.clear();
+}
+
+template <class T>
+void MovingMaxCounter<T>::RollWindow(int64_t new_time_ms) {
+#if RTC_DCHECK_IS_ON
+  RTC_DCHECK_GE(new_time_ms, last_call_time_ms_);
+  last_call_time_ms_ = new_time_ms;
+#endif
+  const int64_t window_begin_ms = new_time_ms - window_length_ms_;
+  auto it = samples_.begin();
+  while (it != samples_.end() && it->first < window_begin_ms) {
+    ++it;
+  }
+  samples_.erase(samples_.begin(), it);
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NUMERICS_MOVING_MAX_COUNTER_H_
diff --git a/rtc_base/numerics/moving_max_counter_unittest.cc b/rtc_base/numerics/moving_max_counter_unittest.cc
new file mode 100644
index 0000000..4e74d6d
--- /dev/null
+++ b/rtc_base/numerics/moving_max_counter_unittest.cc
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/numerics/moving_max_counter.h"
+#include "test/gtest.h"
+
+TEST(MovingMaxCounter, ReportsMaximumInTheWindow) {
+  rtc::MovingMaxCounter<int> counter(100);
+  counter.Add(1, 1);
+  EXPECT_EQ(counter.Max(1), 1);
+  counter.Add(2, 30);
+  EXPECT_EQ(counter.Max(30), 2);
+  counter.Add(100, 60);
+  EXPECT_EQ(counter.Max(60), 100);
+  counter.Add(4, 70);
+  EXPECT_EQ(counter.Max(70), 100);
+  counter.Add(5, 90);
+  EXPECT_EQ(counter.Max(90), 100);
+}
+
+TEST(MovingMaxCounter, IgnoresOldElements) {
+  rtc::MovingMaxCounter<int> counter(100);
+  counter.Add(1, 1);
+  counter.Add(2, 30);
+  counter.Add(100, 60);
+  counter.Add(4, 70);
+  counter.Add(5, 90);
+  EXPECT_EQ(counter.Max(160), 100);
+  // 100 is now out of the window. Next maximum is 5.
+  EXPECT_EQ(counter.Max(161), 5);
+}
+
+TEST(MovingMaxCounter, HandlesEmptyWindow) {
+  rtc::MovingMaxCounter<int> counter(100);
+  counter.Add(123, 1);
+  EXPECT_TRUE(counter.Max(101).has_value());
+  EXPECT_FALSE(counter.Max(102).has_value());
+}
+
+TEST(MovingMaxCounter, HandlesSamplesWithEqualTimestamps) {
+  rtc::MovingMaxCounter<int> counter(100);
+  counter.Add(2, 30);
+  EXPECT_EQ(counter.Max(30), 2);
+  counter.Add(5, 30);
+  EXPECT_EQ(counter.Max(30), 5);
+  counter.Add(4, 30);
+  EXPECT_EQ(counter.Max(30), 5);
+  counter.Add(1, 90);
+  EXPECT_EQ(counter.Max(150), 1);
+}
diff --git a/rtc_base/numerics/moving_median_filter.h b/rtc_base/numerics/moving_median_filter.h
new file mode 100644
index 0000000..b5c5fce
--- /dev/null
+++ b/rtc_base/numerics/moving_median_filter.h
@@ -0,0 +1,79 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_MOVING_MEDIAN_FILTER_H_
+#define RTC_BASE_NUMERICS_MOVING_MEDIAN_FILTER_H_
+
+#include <list>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/numerics/percentile_filter.h"
+
+namespace webrtc {
+
+// Class to efficiently get moving median filter from a stream of samples.
+template <typename T>
+class MovingMedianFilter {
+ public:
+  // Construct filter. |window_size| is how many latest samples are stored and
+  // used to take median. |window_size| must be positive.
+  explicit MovingMedianFilter(size_t window_size);
+
+  // Insert a new sample.
+  void Insert(const T& value);
+
+  // Removes all samples;
+  void Reset();
+
+  // Get median over the latest window.
+  T GetFilteredValue() const;
+
+ private:
+  PercentileFilter<T> percentile_filter_;
+  std::list<T> samples_;
+  size_t samples_stored_;
+  const size_t window_size_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(MovingMedianFilter);
+};
+
+template <typename T>
+MovingMedianFilter<T>::MovingMedianFilter(size_t window_size)
+    : percentile_filter_(0.5f), samples_stored_(0), window_size_(window_size) {
+  RTC_CHECK_GT(window_size, 0);
+}
+
+template <typename T>
+void MovingMedianFilter<T>::Insert(const T& value) {
+  percentile_filter_.Insert(value);
+  samples_.emplace_back(value);
+  ++samples_stored_;
+  if (samples_stored_ > window_size_) {
+    percentile_filter_.Erase(samples_.front());
+    samples_.pop_front();
+    --samples_stored_;
+  }
+}
+
+template <typename T>
+T MovingMedianFilter<T>::GetFilteredValue() const {
+  return percentile_filter_.GetPercentileValue();
+}
+
+template <typename T>
+void MovingMedianFilter<T>::Reset() {
+  percentile_filter_.Reset();
+  samples_.clear();
+  samples_stored_ = 0;
+}
+
+}  // namespace webrtc
+#endif  // RTC_BASE_NUMERICS_MOVING_MEDIAN_FILTER_H_
diff --git a/rtc_base/numerics/moving_median_filter_unittest.cc b/rtc_base/numerics/moving_median_filter_unittest.cc
new file mode 100644
index 0000000..5a6eb3d
--- /dev/null
+++ b/rtc_base/numerics/moving_median_filter_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ *  Copyright 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/numerics/moving_median_filter.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(MovingMedianFilterTest, ProcessesNoSamples) {
+  MovingMedianFilter<int> filter(2);
+  EXPECT_EQ(0, filter.GetFilteredValue());
+}
+
+TEST(MovingMedianFilterTest, ReturnsMovingMedianWindow5) {
+  MovingMedianFilter<int> filter(5);
+  const int64_t kSamples[5] = {1, 5, 2, 3, 4};
+  const int64_t kExpectedFilteredValues[5] = {1, 1, 2, 2, 3};
+  for (int i = 0; i < 5; ++i) {
+    filter.Insert(kSamples[i]);
+    EXPECT_EQ(kExpectedFilteredValues[i], filter.GetFilteredValue());
+  }
+}
+
+TEST(MovingMedianFilterTest, ReturnsMovingMedianWindow3) {
+  MovingMedianFilter<int> filter(3);
+  const int64_t kSamples[5] = {1, 5, 2, 3, 4};
+  const int64_t kExpectedFilteredValues[5] = {1, 1, 2, 3, 3};
+  for (int i = 0; i < 5; ++i) {
+    filter.Insert(kSamples[i]);
+    EXPECT_EQ(kExpectedFilteredValues[i], filter.GetFilteredValue());
+  }
+}
+
+TEST(MovingMedianFilterTest, ReturnsMovingMedianWindow1) {
+  MovingMedianFilter<int> filter(1);
+  const int64_t kSamples[5] = {1, 5, 2, 3, 4};
+  const int64_t kExpectedFilteredValues[5] = {1, 5, 2, 3, 4};
+  for (int i = 0; i < 5; ++i) {
+    filter.Insert(kSamples[i]);
+    EXPECT_EQ(kExpectedFilteredValues[i], filter.GetFilteredValue());
+  }
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/numerics/percentile_filter.h b/rtc_base/numerics/percentile_filter.h
new file mode 100644
index 0000000..cba4463
--- /dev/null
+++ b/rtc_base/numerics/percentile_filter.h
@@ -0,0 +1,124 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_PERCENTILE_FILTER_H_
+#define RTC_BASE_NUMERICS_PERCENTILE_FILTER_H_
+
+#include <stdint.h>
+
+#include <iterator>
+#include <set>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+// Class to efficiently get the percentile value from a group of observations.
+// The percentile is the value below which a given percentage of the
+// observations fall.
+template <typename T>
+class PercentileFilter {
+ public:
+  // Construct filter. |percentile| should be between 0 and 1.
+  explicit PercentileFilter(float percentile);
+
+  // Insert one observation. The complexity of this operation is logarithmic in
+  // the size of the container.
+  void Insert(const T& value);
+
+  // Remove one observation or return false if |value| doesn't exist in the
+  // container. The complexity of this operation is logarithmic in the size of
+  // the container.
+  bool Erase(const T& value);
+
+  // Get the percentile value. The complexity of this operation is constant.
+  T GetPercentileValue() const;
+
+  // Removes all the stored observations.
+  void Reset();
+
+ private:
+  // Update iterator and index to point at target percentile value.
+  void UpdatePercentileIterator();
+
+  const float percentile_;
+  std::multiset<T> set_;
+  // Maintain iterator and index of current target percentile value.
+  typename std::multiset<T>::iterator percentile_it_;
+  int64_t percentile_index_;
+};
+
+template <typename T>
+PercentileFilter<T>::PercentileFilter(float percentile)
+    : percentile_(percentile),
+      percentile_it_(set_.begin()),
+      percentile_index_(0) {
+  RTC_CHECK_GE(percentile, 0.0f);
+  RTC_CHECK_LE(percentile, 1.0f);
+}
+
+template <typename T>
+void PercentileFilter<T>::Insert(const T& value) {
+  // Insert element at the upper bound.
+  set_.insert(value);
+  if (set_.size() == 1u) {
+    // First element inserted - initialize percentile iterator and index.
+    percentile_it_ = set_.begin();
+    percentile_index_ = 0;
+  } else if (value < *percentile_it_) {
+    // If new element is before us, increment |percentile_index_|.
+    ++percentile_index_;
+  }
+  UpdatePercentileIterator();
+}
+
+template <typename T>
+bool PercentileFilter<T>::Erase(const T& value) {
+  typename std::multiset<T>::const_iterator it = set_.lower_bound(value);
+  // Ignore erase operation if the element is not present in the current set.
+  if (it == set_.end() || *it != value)
+    return false;
+  if (it == percentile_it_) {
+    // If same iterator, update to the following element. Index is not
+    // affected.
+    percentile_it_ = set_.erase(it);
+  } else {
+    set_.erase(it);
+    // If erased element was before us, decrement |percentile_index_|.
+    if (value <= *percentile_it_)
+      --percentile_index_;
+  }
+  UpdatePercentileIterator();
+  return true;
+}
+
+template <typename T>
+void PercentileFilter<T>::UpdatePercentileIterator() {
+  if (set_.empty())
+    return;
+  const int64_t index = static_cast<int64_t>(percentile_ * (set_.size() - 1));
+  std::advance(percentile_it_, index - percentile_index_);
+  percentile_index_ = index;
+}
+
+template <typename T>
+T PercentileFilter<T>::GetPercentileValue() const {
+  return set_.empty() ? 0 : *percentile_it_;
+}
+
+template <typename T>
+void PercentileFilter<T>::Reset() {
+  set_.clear();
+  percentile_it_ = set_.begin();
+  percentile_index_ = 0;
+}
+}  // namespace webrtc
+
+#endif  // RTC_BASE_NUMERICS_PERCENTILE_FILTER_H_
diff --git a/rtc_base/numerics/percentile_filter_unittest.cc b/rtc_base/numerics/percentile_filter_unittest.cc
new file mode 100644
index 0000000..11fb4a5
--- /dev/null
+++ b/rtc_base/numerics/percentile_filter_unittest.cc
@@ -0,0 +1,141 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <climits>
+#include <random>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/numerics/percentile_filter.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class PercentileFilterTest : public ::testing::TestWithParam<float> {
+ public:
+  PercentileFilterTest() : filter_(GetParam()) {
+    // Make sure the tests are deterministic by seeding with a constant.
+    srand(42);
+  }
+
+ protected:
+  PercentileFilter<int64_t> filter_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(PercentileFilterTest);
+};
+
+INSTANTIATE_TEST_CASE_P(PercentileFilterTests,
+                        PercentileFilterTest,
+                        ::testing::Values(0.0f, 0.1f, 0.5f, 0.9f, 1.0f));
+
+TEST(PercentileFilterTest, MinFilter) {
+  PercentileFilter<int64_t> filter(0.0f);
+  filter.Insert(4);
+  EXPECT_EQ(4, filter.GetPercentileValue());
+  filter.Insert(3);
+  EXPECT_EQ(3, filter.GetPercentileValue());
+}
+
+TEST(PercentileFilterTest, MaxFilter) {
+  PercentileFilter<int64_t> filter(1.0f);
+  filter.Insert(3);
+  EXPECT_EQ(3, filter.GetPercentileValue());
+  filter.Insert(4);
+  EXPECT_EQ(4, filter.GetPercentileValue());
+}
+
+TEST(PercentileFilterTest, MedianFilterDouble) {
+  PercentileFilter<double> filter(0.5f);
+  filter.Insert(2.71828);
+  filter.Insert(3.14159);
+  filter.Insert(1.41421);
+  EXPECT_EQ(2.71828, filter.GetPercentileValue());
+}
+
+TEST(PercentileFilterTest, MedianFilterInt) {
+  PercentileFilter<int> filter(0.5f);
+  filter.Insert(INT_MIN);
+  filter.Insert(1);
+  filter.Insert(2);
+  EXPECT_EQ(1, filter.GetPercentileValue());
+  filter.Insert(INT_MAX);
+  filter.Erase(INT_MIN);
+  EXPECT_EQ(2, filter.GetPercentileValue());
+}
+
+TEST(PercentileFilterTest, MedianFilterUnsigned) {
+  PercentileFilter<unsigned> filter(0.5f);
+  filter.Insert(UINT_MAX);
+  filter.Insert(2u);
+  filter.Insert(1u);
+  EXPECT_EQ(2u, filter.GetPercentileValue());
+  filter.Insert(0u);
+  filter.Erase(UINT_MAX);
+  EXPECT_EQ(1u, filter.GetPercentileValue());
+}
+
+TEST_P(PercentileFilterTest, EmptyFilter) {
+  EXPECT_EQ(0, filter_.GetPercentileValue());
+  filter_.Insert(3);
+  bool success = filter_.Erase(3);
+  EXPECT_TRUE(success);
+  EXPECT_EQ(0, filter_.GetPercentileValue());
+}
+
+TEST_P(PercentileFilterTest, EraseNonExistingElement) {
+  bool success = filter_.Erase(3);
+  EXPECT_FALSE(success);
+  EXPECT_EQ(0, filter_.GetPercentileValue());
+  filter_.Insert(4);
+  success = filter_.Erase(3);
+  EXPECT_FALSE(success);
+  EXPECT_EQ(4, filter_.GetPercentileValue());
+}
+
+TEST_P(PercentileFilterTest, DuplicateElements) {
+  filter_.Insert(3);
+  filter_.Insert(3);
+  filter_.Erase(3);
+  EXPECT_EQ(3, filter_.GetPercentileValue());
+}
+
+TEST_P(PercentileFilterTest, InsertAndEraseTenValuesInRandomOrder) {
+  int64_t zero_to_nine[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+  // The percentile value of the ten values above.
+  const int64_t expected_value = static_cast<int64_t>(GetParam() * 9);
+
+  // Insert two sets of |zero_to_nine| in random order.
+  for (int i = 0; i < 2; ++i) {
+    std::shuffle(zero_to_nine, zero_to_nine + 10,
+                 std::mt19937(std::random_device()()));
+    for (int64_t value : zero_to_nine)
+      filter_.Insert(value);
+    // After inserting a full set of |zero_to_nine|, the percentile should
+    // stay constant.
+    EXPECT_EQ(expected_value, filter_.GetPercentileValue());
+  }
+
+  // Insert and erase sets of |zero_to_nine| in random order a few times.
+  for (int i = 0; i < 3; ++i) {
+    std::shuffle(zero_to_nine, zero_to_nine + 10,
+                 std::mt19937(std::random_device()()));
+    for (int64_t value : zero_to_nine)
+      filter_.Erase(value);
+    EXPECT_EQ(expected_value, filter_.GetPercentileValue());
+    std::shuffle(zero_to_nine, zero_to_nine + 10,
+                 std::mt19937(std::random_device()()));
+    for (int64_t value : zero_to_nine)
+      filter_.Insert(value);
+    EXPECT_EQ(expected_value, filter_.GetPercentileValue());
+  }
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/numerics/safe_compare.h b/rtc_base/numerics/safe_compare.h
new file mode 100644
index 0000000..85f0a30
--- /dev/null
+++ b/rtc_base/numerics/safe_compare.h
@@ -0,0 +1,176 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file defines six constexpr functions:
+//
+//   rtc::SafeEq  // ==
+//   rtc::SafeNe  // !=
+//   rtc::SafeLt  // <
+//   rtc::SafeLe  // <=
+//   rtc::SafeGt  // >
+//   rtc::SafeGe  // >=
+//
+// They each accept two arguments of arbitrary types, and in almost all cases,
+// they simply call the appropriate comparison operator. However, if both
+// arguments are integers, they don't compare them using C++'s quirky rules,
+// but instead adhere to the true mathematical definitions. It is as if the
+// arguments were first converted to infinite-range signed integers, and then
+// compared, although of course nothing expensive like that actually takes
+// place. In practice, for signed/signed and unsigned/unsigned comparisons and
+// some mixed-signed comparisons with a compile-time constant, the overhead is
+// zero; in the remaining cases, it is just a few machine instructions (no
+// branches).
+
+#ifndef RTC_BASE_NUMERICS_SAFE_COMPARE_H_
+#define RTC_BASE_NUMERICS_SAFE_COMPARE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "rtc_base/type_traits.h"
+
+namespace rtc {
+
+namespace safe_cmp_impl {
+
+template <size_t N>
+struct LargerIntImpl : std::false_type {};
+template <>
+struct LargerIntImpl<sizeof(int8_t)> : std::true_type {
+  using type = int16_t;
+};
+template <>
+struct LargerIntImpl<sizeof(int16_t)> : std::true_type {
+  using type = int32_t;
+};
+template <>
+struct LargerIntImpl<sizeof(int32_t)> : std::true_type {
+  using type = int64_t;
+};
+
+// LargerInt<T1, T2>::value is true iff there's a signed type that's larger
+// than T1 (and no larger than the larger of T2 and int*, for performance
+// reasons); and if there is such a type, LargerInt<T1, T2>::type is an alias
+// for it.
+template <typename T1, typename T2>
+struct LargerInt
+    : LargerIntImpl<sizeof(T1) < sizeof(T2) || sizeof(T1) < sizeof(int*)
+                        ? sizeof(T1)
+                        : 0> {};
+
+template <typename T>
+constexpr typename std::make_unsigned<T>::type MakeUnsigned(T a) {
+  return static_cast<typename std::make_unsigned<T>::type>(a);
+}
+
+// Overload for when both T1 and T2 have the same signedness.
+template <typename Op,
+          typename T1,
+          typename T2,
+          typename std::enable_if<std::is_signed<T1>::value ==
+                                  std::is_signed<T2>::value>::type* = nullptr>
+constexpr bool Cmp(T1 a, T2 b) {
+  return Op::Op(a, b);
+}
+
+// Overload for signed - unsigned comparison that can be promoted to a bigger
+// signed type.
+template <typename Op,
+          typename T1,
+          typename T2,
+          typename std::enable_if<std::is_signed<T1>::value &&
+                                  std::is_unsigned<T2>::value &&
+                                  LargerInt<T2, T1>::value>::type* = nullptr>
+constexpr bool Cmp(T1 a, T2 b) {
+  return Op::Op(a, static_cast<typename LargerInt<T2, T1>::type>(b));
+}
+
+// Overload for unsigned - signed comparison that can be promoted to a bigger
+// signed type.
+template <typename Op,
+          typename T1,
+          typename T2,
+          typename std::enable_if<std::is_unsigned<T1>::value &&
+                                  std::is_signed<T2>::value &&
+                                  LargerInt<T1, T2>::value>::type* = nullptr>
+constexpr bool Cmp(T1 a, T2 b) {
+  return Op::Op(static_cast<typename LargerInt<T1, T2>::type>(a), b);
+}
+
+// Overload for signed - unsigned comparison that can't be promoted to a bigger
+// signed type.
+template <typename Op,
+          typename T1,
+          typename T2,
+          typename std::enable_if<std::is_signed<T1>::value &&
+                                  std::is_unsigned<T2>::value &&
+                                  !LargerInt<T2, T1>::value>::type* = nullptr>
+constexpr bool Cmp(T1 a, T2 b) {
+  return a < 0 ? Op::Op(-1, 0) : Op::Op(safe_cmp_impl::MakeUnsigned(a), b);
+}
+
+// Overload for unsigned - signed comparison that can't be promoted to a bigger
+// signed type.
+template <typename Op,
+          typename T1,
+          typename T2,
+          typename std::enable_if<std::is_unsigned<T1>::value &&
+                                  std::is_signed<T2>::value &&
+                                  !LargerInt<T1, T2>::value>::type* = nullptr>
+constexpr bool Cmp(T1 a, T2 b) {
+  return b < 0 ? Op::Op(0, -1) : Op::Op(a, safe_cmp_impl::MakeUnsigned(b));
+}
+
+#define RTC_SAFECMP_MAKE_OP(name, op)      \
+  struct name {                            \
+    template <typename T1, typename T2>    \
+    static constexpr bool Op(T1 a, T2 b) { \
+      return a op b;                       \
+    }                                      \
+  };
+RTC_SAFECMP_MAKE_OP(EqOp, ==)
+RTC_SAFECMP_MAKE_OP(NeOp, !=)
+RTC_SAFECMP_MAKE_OP(LtOp, <)
+RTC_SAFECMP_MAKE_OP(LeOp, <=)
+RTC_SAFECMP_MAKE_OP(GtOp, >)
+RTC_SAFECMP_MAKE_OP(GeOp, >=)
+#undef RTC_SAFECMP_MAKE_OP
+
+}  // namespace safe_cmp_impl
+
+#define RTC_SAFECMP_MAKE_FUN(name)                                            \
+  template <typename T1, typename T2>                                         \
+  constexpr                                                                   \
+      typename std::enable_if<IsIntlike<T1>::value && IsIntlike<T2>::value,   \
+                              bool>::type Safe##name(T1 a, T2 b) {            \
+    /* Unary plus here turns enums into real integral types. */               \
+    return safe_cmp_impl::Cmp<safe_cmp_impl::name##Op>(+a, +b);               \
+  }                                                                           \
+  template <typename T1, typename T2>                                         \
+  constexpr                                                                   \
+      typename std::enable_if<!IsIntlike<T1>::value || !IsIntlike<T2>::value, \
+                              bool>::type Safe##name(const T1& a,             \
+                                                     const T2& b) {           \
+    return safe_cmp_impl::name##Op::Op(a, b);                                 \
+  }
+RTC_SAFECMP_MAKE_FUN(Eq)
+RTC_SAFECMP_MAKE_FUN(Ne)
+RTC_SAFECMP_MAKE_FUN(Lt)
+RTC_SAFECMP_MAKE_FUN(Le)
+RTC_SAFECMP_MAKE_FUN(Gt)
+RTC_SAFECMP_MAKE_FUN(Ge)
+#undef RTC_SAFECMP_MAKE_FUN
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NUMERICS_SAFE_COMPARE_H_
diff --git a/rtc_base/numerics/safe_compare_unittest.cc b/rtc_base/numerics/safe_compare_unittest.cc
new file mode 100644
index 0000000..e7a251f
--- /dev/null
+++ b/rtc_base/numerics/safe_compare_unittest.cc
@@ -0,0 +1,394 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "rtc_base/numerics/safe_compare.h"
+#include "test/gtest.h"
+
+namespace rtc {
+
+namespace {
+
+constexpr std::uintmax_t umax = std::numeric_limits<std::uintmax_t>::max();
+constexpr std::intmax_t imin = std::numeric_limits<std::intmax_t>::min();
+constexpr std::intmax_t m1 = -1;
+
+// m1 and umax have the same representation because we use 2's complement
+// arithmetic, so naive casting will confuse them.
+static_assert(static_cast<std::uintmax_t>(m1) == umax, "");
+static_assert(m1 == static_cast<std::intmax_t>(umax), "");
+
+static const std::pair<int, int> p1(1, 1);
+static const std::pair<int, int> p2(1, 2);
+
+}  // namespace
+
+// clang-format off
+
+// These functions aren't used in the tests, but it's useful to look at the
+// compiler output for them, and verify that (1) the same-signedness *Safe
+// functions result in exactly the same code as their *Ref counterparts, and
+// that (2) the mixed-signedness *Safe functions have just a few extra
+// arithmetic and logic instructions (but no extra control flow instructions).
+bool TestLessThanRef(      int a,      int b) { return a < b; }
+bool TestLessThanRef( unsigned a, unsigned b) { return a < b; }
+bool TestLessThanSafe(     int a,      int b) { return SafeLt(a, b); }
+bool TestLessThanSafe(unsigned a, unsigned b) { return SafeLt(a, b); }
+bool TestLessThanSafe(unsigned a,      int b) { return SafeLt(a, b); }
+bool TestLessThanSafe(     int a, unsigned b) { return SafeLt(a, b); }
+
+// For these, we expect the *Ref and *Safe functions to result in identical
+// code, except for the ones that compare a signed variable with an unsigned
+// constant; in that case, the *Ref function does an unsigned comparison (fast
+// but incorrect) and the *Safe function spends a few extra instructions on
+// doing it right.
+bool TestLessThan17Ref(       int a) { return a < 17; }
+bool TestLessThan17Ref(  unsigned a) { return a < 17; }
+bool TestLessThan17uRef(      int a) { return static_cast<unsigned>(a) < 17u; }
+bool TestLessThan17uRef( unsigned a) { return a < 17u; }
+bool TestLessThan17Safe(      int a) { return SafeLt(a, 17); }
+bool TestLessThan17Safe( unsigned a) { return SafeLt(a, 17); }
+bool TestLessThan17uSafe(     int a) { return SafeLt(a, 17u); }
+bool TestLessThan17uSafe(unsigned a) { return SafeLt(a, 17u); }
+
+// Cases where we can't convert to a larger signed type.
+bool TestLessThanMax( intmax_t a, uintmax_t b) { return SafeLt(a, b); }
+bool TestLessThanMax(uintmax_t a,  intmax_t b) { return SafeLt(a, b); }
+bool TestLessThanMax17u( intmax_t a) { return SafeLt(a, uintmax_t{17}); }
+bool TestLessThanMax17( uintmax_t a) { return SafeLt(a,  intmax_t{17}); }
+
+// Cases where the compiler should be able to compute the result at compile
+// time.
+bool TestLessThanConst1() { return SafeLt(  -1,    1); }
+bool TestLessThanConst2() { return SafeLt(  m1, umax); }
+bool TestLessThanConst3() { return SafeLt(umax, imin); }
+bool TestLessThanConst4(unsigned a) { return SafeLt( a, -1); }
+bool TestLessThanConst5(unsigned a) { return SafeLt(-1,  a); }
+bool TestLessThanConst6(unsigned a) { return SafeLt( a,  a); }
+
+// clang-format on
+
+TEST(SafeCmpTest, Eq) {
+  static_assert(!SafeEq(-1, 2), "");
+  static_assert(!SafeEq(-1, 2u), "");
+  static_assert(!SafeEq(2, -1), "");
+  static_assert(!SafeEq(2u, -1), "");
+
+  static_assert(!SafeEq(1, 2), "");
+  static_assert(!SafeEq(1, 2u), "");
+  static_assert(!SafeEq(1u, 2), "");
+  static_assert(!SafeEq(1u, 2u), "");
+  static_assert(!SafeEq(2, 1), "");
+  static_assert(!SafeEq(2, 1u), "");
+  static_assert(!SafeEq(2u, 1), "");
+  static_assert(!SafeEq(2u, 1u), "");
+
+  static_assert(SafeEq(2, 2), "");
+  static_assert(SafeEq(2, 2u), "");
+  static_assert(SafeEq(2u, 2), "");
+  static_assert(SafeEq(2u, 2u), "");
+
+  static_assert(SafeEq(imin, imin), "");
+  static_assert(!SafeEq(imin, umax), "");
+  static_assert(!SafeEq(umax, imin), "");
+  static_assert(SafeEq(umax, umax), "");
+
+  static_assert(SafeEq(m1, m1), "");
+  static_assert(!SafeEq(m1, umax), "");
+  static_assert(!SafeEq(umax, m1), "");
+  static_assert(SafeEq(umax, umax), "");
+
+  static_assert(!SafeEq(1, 2), "");
+  static_assert(!SafeEq(1, 2.0), "");
+  static_assert(!SafeEq(1.0, 2), "");
+  static_assert(!SafeEq(1.0, 2.0), "");
+  static_assert(!SafeEq(2, 1), "");
+  static_assert(!SafeEq(2, 1.0), "");
+  static_assert(!SafeEq(2.0, 1), "");
+  static_assert(!SafeEq(2.0, 1.0), "");
+
+  static_assert(SafeEq(2, 2), "");
+  static_assert(SafeEq(2, 2.0), "");
+  static_assert(SafeEq(2.0, 2), "");
+  static_assert(SafeEq(2.0, 2.0), "");
+
+  EXPECT_TRUE(SafeEq(p1, p1));
+  EXPECT_FALSE(SafeEq(p1, p2));
+  EXPECT_FALSE(SafeEq(p2, p1));
+  EXPECT_TRUE(SafeEq(p2, p2));
+}
+
+TEST(SafeCmpTest, Ne) {
+  static_assert(SafeNe(-1, 2), "");
+  static_assert(SafeNe(-1, 2u), "");
+  static_assert(SafeNe(2, -1), "");
+  static_assert(SafeNe(2u, -1), "");
+
+  static_assert(SafeNe(1, 2), "");
+  static_assert(SafeNe(1, 2u), "");
+  static_assert(SafeNe(1u, 2), "");
+  static_assert(SafeNe(1u, 2u), "");
+  static_assert(SafeNe(2, 1), "");
+  static_assert(SafeNe(2, 1u), "");
+  static_assert(SafeNe(2u, 1), "");
+  static_assert(SafeNe(2u, 1u), "");
+
+  static_assert(!SafeNe(2, 2), "");
+  static_assert(!SafeNe(2, 2u), "");
+  static_assert(!SafeNe(2u, 2), "");
+  static_assert(!SafeNe(2u, 2u), "");
+
+  static_assert(!SafeNe(imin, imin), "");
+  static_assert(SafeNe(imin, umax), "");
+  static_assert(SafeNe(umax, imin), "");
+  static_assert(!SafeNe(umax, umax), "");
+
+  static_assert(!SafeNe(m1, m1), "");
+  static_assert(SafeNe(m1, umax), "");
+  static_assert(SafeNe(umax, m1), "");
+  static_assert(!SafeNe(umax, umax), "");
+
+  static_assert(SafeNe(1, 2), "");
+  static_assert(SafeNe(1, 2.0), "");
+  static_assert(SafeNe(1.0, 2), "");
+  static_assert(SafeNe(1.0, 2.0), "");
+  static_assert(SafeNe(2, 1), "");
+  static_assert(SafeNe(2, 1.0), "");
+  static_assert(SafeNe(2.0, 1), "");
+  static_assert(SafeNe(2.0, 1.0), "");
+
+  static_assert(!SafeNe(2, 2), "");
+  static_assert(!SafeNe(2, 2.0), "");
+  static_assert(!SafeNe(2.0, 2), "");
+  static_assert(!SafeNe(2.0, 2.0), "");
+
+  EXPECT_FALSE(SafeNe(p1, p1));
+  EXPECT_TRUE(SafeNe(p1, p2));
+  EXPECT_TRUE(SafeNe(p2, p1));
+  EXPECT_FALSE(SafeNe(p2, p2));
+}
+
+TEST(SafeCmpTest, Lt) {
+  static_assert(SafeLt(-1, 2), "");
+  static_assert(SafeLt(-1, 2u), "");
+  static_assert(!SafeLt(2, -1), "");
+  static_assert(!SafeLt(2u, -1), "");
+
+  static_assert(SafeLt(1, 2), "");
+  static_assert(SafeLt(1, 2u), "");
+  static_assert(SafeLt(1u, 2), "");
+  static_assert(SafeLt(1u, 2u), "");
+  static_assert(!SafeLt(2, 1), "");
+  static_assert(!SafeLt(2, 1u), "");
+  static_assert(!SafeLt(2u, 1), "");
+  static_assert(!SafeLt(2u, 1u), "");
+
+  static_assert(!SafeLt(2, 2), "");
+  static_assert(!SafeLt(2, 2u), "");
+  static_assert(!SafeLt(2u, 2), "");
+  static_assert(!SafeLt(2u, 2u), "");
+
+  static_assert(!SafeLt(imin, imin), "");
+  static_assert(SafeLt(imin, umax), "");
+  static_assert(!SafeLt(umax, imin), "");
+  static_assert(!SafeLt(umax, umax), "");
+
+  static_assert(!SafeLt(m1, m1), "");
+  static_assert(SafeLt(m1, umax), "");
+  static_assert(!SafeLt(umax, m1), "");
+  static_assert(!SafeLt(umax, umax), "");
+
+  static_assert(SafeLt(1, 2), "");
+  static_assert(SafeLt(1, 2.0), "");
+  static_assert(SafeLt(1.0, 2), "");
+  static_assert(SafeLt(1.0, 2.0), "");
+  static_assert(!SafeLt(2, 1), "");
+  static_assert(!SafeLt(2, 1.0), "");
+  static_assert(!SafeLt(2.0, 1), "");
+  static_assert(!SafeLt(2.0, 1.0), "");
+
+  static_assert(!SafeLt(2, 2), "");
+  static_assert(!SafeLt(2, 2.0), "");
+  static_assert(!SafeLt(2.0, 2), "");
+  static_assert(!SafeLt(2.0, 2.0), "");
+
+  EXPECT_FALSE(SafeLt(p1, p1));
+  EXPECT_TRUE(SafeLt(p1, p2));
+  EXPECT_FALSE(SafeLt(p2, p1));
+  EXPECT_FALSE(SafeLt(p2, p2));
+}
+
+TEST(SafeCmpTest, Le) {
+  static_assert(SafeLe(-1, 2), "");
+  static_assert(SafeLe(-1, 2u), "");
+  static_assert(!SafeLe(2, -1), "");
+  static_assert(!SafeLe(2u, -1), "");
+
+  static_assert(SafeLe(1, 2), "");
+  static_assert(SafeLe(1, 2u), "");
+  static_assert(SafeLe(1u, 2), "");
+  static_assert(SafeLe(1u, 2u), "");
+  static_assert(!SafeLe(2, 1), "");
+  static_assert(!SafeLe(2, 1u), "");
+  static_assert(!SafeLe(2u, 1), "");
+  static_assert(!SafeLe(2u, 1u), "");
+
+  static_assert(SafeLe(2, 2), "");
+  static_assert(SafeLe(2, 2u), "");
+  static_assert(SafeLe(2u, 2), "");
+  static_assert(SafeLe(2u, 2u), "");
+
+  static_assert(SafeLe(imin, imin), "");
+  static_assert(SafeLe(imin, umax), "");
+  static_assert(!SafeLe(umax, imin), "");
+  static_assert(SafeLe(umax, umax), "");
+
+  static_assert(SafeLe(m1, m1), "");
+  static_assert(SafeLe(m1, umax), "");
+  static_assert(!SafeLe(umax, m1), "");
+  static_assert(SafeLe(umax, umax), "");
+
+  static_assert(SafeLe(1, 2), "");
+  static_assert(SafeLe(1, 2.0), "");
+  static_assert(SafeLe(1.0, 2), "");
+  static_assert(SafeLe(1.0, 2.0), "");
+  static_assert(!SafeLe(2, 1), "");
+  static_assert(!SafeLe(2, 1.0), "");
+  static_assert(!SafeLe(2.0, 1), "");
+  static_assert(!SafeLe(2.0, 1.0), "");
+
+  static_assert(SafeLe(2, 2), "");
+  static_assert(SafeLe(2, 2.0), "");
+  static_assert(SafeLe(2.0, 2), "");
+  static_assert(SafeLe(2.0, 2.0), "");
+
+  EXPECT_TRUE(SafeLe(p1, p1));
+  EXPECT_TRUE(SafeLe(p1, p2));
+  EXPECT_FALSE(SafeLe(p2, p1));
+  EXPECT_TRUE(SafeLe(p2, p2));
+}
+
+TEST(SafeCmpTest, Gt) {
+  static_assert(!SafeGt(-1, 2), "");
+  static_assert(!SafeGt(-1, 2u), "");
+  static_assert(SafeGt(2, -1), "");
+  static_assert(SafeGt(2u, -1), "");
+
+  static_assert(!SafeGt(1, 2), "");
+  static_assert(!SafeGt(1, 2u), "");
+  static_assert(!SafeGt(1u, 2), "");
+  static_assert(!SafeGt(1u, 2u), "");
+  static_assert(SafeGt(2, 1), "");
+  static_assert(SafeGt(2, 1u), "");
+  static_assert(SafeGt(2u, 1), "");
+  static_assert(SafeGt(2u, 1u), "");
+
+  static_assert(!SafeGt(2, 2), "");
+  static_assert(!SafeGt(2, 2u), "");
+  static_assert(!SafeGt(2u, 2), "");
+  static_assert(!SafeGt(2u, 2u), "");
+
+  static_assert(!SafeGt(imin, imin), "");
+  static_assert(!SafeGt(imin, umax), "");
+  static_assert(SafeGt(umax, imin), "");
+  static_assert(!SafeGt(umax, umax), "");
+
+  static_assert(!SafeGt(m1, m1), "");
+  static_assert(!SafeGt(m1, umax), "");
+  static_assert(SafeGt(umax, m1), "");
+  static_assert(!SafeGt(umax, umax), "");
+
+  static_assert(!SafeGt(1, 2), "");
+  static_assert(!SafeGt(1, 2.0), "");
+  static_assert(!SafeGt(1.0, 2), "");
+  static_assert(!SafeGt(1.0, 2.0), "");
+  static_assert(SafeGt(2, 1), "");
+  static_assert(SafeGt(2, 1.0), "");
+  static_assert(SafeGt(2.0, 1), "");
+  static_assert(SafeGt(2.0, 1.0), "");
+
+  static_assert(!SafeGt(2, 2), "");
+  static_assert(!SafeGt(2, 2.0), "");
+  static_assert(!SafeGt(2.0, 2), "");
+  static_assert(!SafeGt(2.0, 2.0), "");
+
+  EXPECT_FALSE(SafeGt(p1, p1));
+  EXPECT_FALSE(SafeGt(p1, p2));
+  EXPECT_TRUE(SafeGt(p2, p1));
+  EXPECT_FALSE(SafeGt(p2, p2));
+}
+
+TEST(SafeCmpTest, Ge) {
+  static_assert(!SafeGe(-1, 2), "");
+  static_assert(!SafeGe(-1, 2u), "");
+  static_assert(SafeGe(2, -1), "");
+  static_assert(SafeGe(2u, -1), "");
+
+  static_assert(!SafeGe(1, 2), "");
+  static_assert(!SafeGe(1, 2u), "");
+  static_assert(!SafeGe(1u, 2), "");
+  static_assert(!SafeGe(1u, 2u), "");
+  static_assert(SafeGe(2, 1), "");
+  static_assert(SafeGe(2, 1u), "");
+  static_assert(SafeGe(2u, 1), "");
+  static_assert(SafeGe(2u, 1u), "");
+
+  static_assert(SafeGe(2, 2), "");
+  static_assert(SafeGe(2, 2u), "");
+  static_assert(SafeGe(2u, 2), "");
+  static_assert(SafeGe(2u, 2u), "");
+
+  static_assert(SafeGe(imin, imin), "");
+  static_assert(!SafeGe(imin, umax), "");
+  static_assert(SafeGe(umax, imin), "");
+  static_assert(SafeGe(umax, umax), "");
+
+  static_assert(SafeGe(m1, m1), "");
+  static_assert(!SafeGe(m1, umax), "");
+  static_assert(SafeGe(umax, m1), "");
+  static_assert(SafeGe(umax, umax), "");
+
+  static_assert(!SafeGe(1, 2), "");
+  static_assert(!SafeGe(1, 2.0), "");
+  static_assert(!SafeGe(1.0, 2), "");
+  static_assert(!SafeGe(1.0, 2.0), "");
+  static_assert(SafeGe(2, 1), "");
+  static_assert(SafeGe(2, 1.0), "");
+  static_assert(SafeGe(2.0, 1), "");
+  static_assert(SafeGe(2.0, 1.0), "");
+
+  static_assert(SafeGe(2, 2), "");
+  static_assert(SafeGe(2, 2.0), "");
+  static_assert(SafeGe(2.0, 2), "");
+  static_assert(SafeGe(2.0, 2.0), "");
+
+  EXPECT_TRUE(SafeGe(p1, p1));
+  EXPECT_FALSE(SafeGe(p1, p2));
+  EXPECT_TRUE(SafeGe(p2, p1));
+  EXPECT_TRUE(SafeGe(p2, p2));
+}
+
+TEST(SafeCmpTest, Enum) {
+  enum E1 { e1 = 13 };
+  enum { e2 = 13 };
+  enum E3 : unsigned { e3 = 13 };
+  enum : unsigned { e4 = 13 };
+  static_assert(SafeEq(13, e1), "");
+  static_assert(SafeEq(13u, e1), "");
+  static_assert(SafeEq(13, e2), "");
+  static_assert(SafeEq(13u, e2), "");
+  static_assert(SafeEq(13, e3), "");
+  static_assert(SafeEq(13u, e3), "");
+  static_assert(SafeEq(13, e4), "");
+  static_assert(SafeEq(13u, e4), "");
+}
+
+}  // namespace rtc
diff --git a/rtc_base/numerics/safe_conversions.h b/rtc_base/numerics/safe_conversions.h
new file mode 100644
index 0000000..58efcaa
--- /dev/null
+++ b/rtc_base/numerics/safe_conversions.h
@@ -0,0 +1,76 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/numerics/safe_conversions.h.
+
+#ifndef RTC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define RTC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions_impl.h"
+
+namespace rtc {
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+inline bool IsValueInRangeForNumericType(Src value) {
+  return internal::RangeCheck<Dst>(value) == internal::TYPE_VALID;
+}
+
+// checked_cast<> and dchecked_cast<> are analogous to static_cast<> for
+// numeric types, except that they [D]CHECK that the specified numeric
+// conversion will not overflow or underflow. NaN source will always trigger
+// the [D]CHECK.
+template <typename Dst, typename Src>
+inline Dst checked_cast(Src value) {
+  RTC_CHECK(IsValueInRangeForNumericType<Dst>(value));
+  return static_cast<Dst>(value);
+}
+template <typename Dst, typename Src>
+inline Dst dchecked_cast(Src value) {
+  RTC_DCHECK(IsValueInRangeForNumericType<Dst>(value));
+  return static_cast<Dst>(value);
+}
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate rather than overflow or
+// underflow. NaN assignment to an integral will trigger a RTC_CHECK condition.
+template <typename Dst, typename Src>
+inline Dst saturated_cast(Src value) {
+  // Optimization for floating point values, which already saturate.
+  if (std::numeric_limits<Dst>::is_iec559)
+    return static_cast<Dst>(value);
+
+  switch (internal::RangeCheck<Dst>(value)) {
+    case internal::TYPE_VALID:
+      return static_cast<Dst>(value);
+
+    case internal::TYPE_UNDERFLOW:
+      return std::numeric_limits<Dst>::min();
+
+    case internal::TYPE_OVERFLOW:
+      return std::numeric_limits<Dst>::max();
+
+    // Should fail only on attempting to assign NaN to a saturated integer.
+    case internal::TYPE_INVALID:
+      FATAL();
+      return std::numeric_limits<Dst>::max();
+  }
+
+  FATAL();
+  return static_cast<Dst>(value);
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/rtc_base/numerics/safe_conversions_impl.h b/rtc_base/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000..9b4f1c6
--- /dev/null
+++ b/rtc_base/numerics/safe_conversions_impl.h
@@ -0,0 +1,175 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/numerics/safe_conversions_impl.h.
+
+#ifndef RTC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define RTC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <limits>
+
+namespace rtc {
+namespace internal {
+
+enum DstSign { DST_UNSIGNED, DST_SIGNED };
+
+enum SrcSign { SRC_UNSIGNED, SRC_SIGNED };
+
+enum DstRange { OVERLAPS_RANGE, CONTAINS_RANGE };
+
+// Helper templates to statically determine if our destination type can contain
+// all values represented by the source type.
+
+template <typename Dst,
+          typename Src,
+          DstSign IsDstSigned =
+              std::numeric_limits<Dst>::is_signed ? DST_SIGNED : DST_UNSIGNED,
+          SrcSign IsSrcSigned =
+              std::numeric_limits<Src>::is_signed ? SRC_SIGNED : SRC_UNSIGNED>
+struct StaticRangeCheck {};
+
+template <typename Dst, typename Src>
+struct StaticRangeCheck<Dst, Src, DST_SIGNED, SRC_SIGNED> {
+  typedef std::numeric_limits<Dst> DstLimits;
+  typedef std::numeric_limits<Src> SrcLimits;
+  // Compare based on max_exponent, which we must compute for integrals.
+  static const size_t kDstMaxExponent =
+      DstLimits::is_iec559 ? DstLimits::max_exponent : (sizeof(Dst) * 8 - 1);
+  static const size_t kSrcMaxExponent =
+      SrcLimits::is_iec559 ? SrcLimits::max_exponent : (sizeof(Src) * 8 - 1);
+  static const DstRange value =
+      kDstMaxExponent >= kSrcMaxExponent ? CONTAINS_RANGE : OVERLAPS_RANGE;
+};
+
+template <typename Dst, typename Src>
+struct StaticRangeCheck<Dst, Src, DST_UNSIGNED, SRC_UNSIGNED> {
+  static const DstRange value =
+      sizeof(Dst) >= sizeof(Src) ? CONTAINS_RANGE : OVERLAPS_RANGE;
+};
+
+template <typename Dst, typename Src>
+struct StaticRangeCheck<Dst, Src, DST_SIGNED, SRC_UNSIGNED> {
+  typedef std::numeric_limits<Dst> DstLimits;
+  typedef std::numeric_limits<Src> SrcLimits;
+  // Compare based on max_exponent, which we must compute for integrals.
+  static const size_t kDstMaxExponent =
+      DstLimits::is_iec559 ? DstLimits::max_exponent : (sizeof(Dst) * 8 - 1);
+  static const size_t kSrcMaxExponent = sizeof(Src) * 8;
+  static const DstRange value =
+      kDstMaxExponent >= kSrcMaxExponent ? CONTAINS_RANGE : OVERLAPS_RANGE;
+};
+
+template <typename Dst, typename Src>
+struct StaticRangeCheck<Dst, Src, DST_UNSIGNED, SRC_SIGNED> {
+  static const DstRange value = OVERLAPS_RANGE;
+};
+
+enum RangeCheckResult {
+  TYPE_VALID = 0,      // Value can be represented by the destination type.
+  TYPE_UNDERFLOW = 1,  // Value would overflow.
+  TYPE_OVERFLOW = 2,   // Value would underflow.
+  TYPE_INVALID = 3     // Source value is invalid (i.e. NaN).
+};
+
+// This macro creates a RangeCheckResult from an upper and lower bound
+// check by taking advantage of the fact that only NaN can be out of range in
+// both directions at once.
+#define BASE_NUMERIC_RANGE_CHECK_RESULT(is_in_upper_bound, is_in_lower_bound) \
+  RangeCheckResult(((is_in_upper_bound) ? 0 : TYPE_OVERFLOW) |                \
+                   ((is_in_lower_bound) ? 0 : TYPE_UNDERFLOW))
+
+template <typename Dst,
+          typename Src,
+          DstSign IsDstSigned =
+              std::numeric_limits<Dst>::is_signed ? DST_SIGNED : DST_UNSIGNED,
+          SrcSign IsSrcSigned =
+              std::numeric_limits<Src>::is_signed ? SRC_SIGNED : SRC_UNSIGNED,
+          DstRange IsSrcRangeContained = StaticRangeCheck<Dst, Src>::value>
+struct RangeCheckImpl {};
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Dst range always contains the result: nothing to check.
+template <typename Dst, typename Src, DstSign IsDstSigned, SrcSign IsSrcSigned>
+struct RangeCheckImpl<Dst, Src, IsDstSigned, IsSrcSigned, CONTAINS_RANGE> {
+  static RangeCheckResult Check(Src value) { return TYPE_VALID; }
+};
+
+// Signed to signed narrowing.
+template <typename Dst, typename Src>
+struct RangeCheckImpl<Dst, Src, DST_SIGNED, SRC_SIGNED, OVERLAPS_RANGE> {
+  static RangeCheckResult Check(Src value) {
+    typedef std::numeric_limits<Dst> DstLimits;
+    return DstLimits::is_iec559
+               ? BASE_NUMERIC_RANGE_CHECK_RESULT(
+                     value <= static_cast<Src>(DstLimits::max()),
+                     value >= static_cast<Src>(DstLimits::max() * -1))
+               : BASE_NUMERIC_RANGE_CHECK_RESULT(
+                     value <= static_cast<Src>(DstLimits::max()),
+                     value >= static_cast<Src>(DstLimits::min()));
+  }
+};
+
+// Unsigned to unsigned narrowing.
+template <typename Dst, typename Src>
+struct RangeCheckImpl<Dst, Src, DST_UNSIGNED, SRC_UNSIGNED, OVERLAPS_RANGE> {
+  static RangeCheckResult Check(Src value) {
+    typedef std::numeric_limits<Dst> DstLimits;
+    return BASE_NUMERIC_RANGE_CHECK_RESULT(
+        value <= static_cast<Src>(DstLimits::max()), true);
+  }
+};
+
+// Unsigned to signed.
+template <typename Dst, typename Src>
+struct RangeCheckImpl<Dst, Src, DST_SIGNED, SRC_UNSIGNED, OVERLAPS_RANGE> {
+  static RangeCheckResult Check(Src value) {
+    typedef std::numeric_limits<Dst> DstLimits;
+    return sizeof(Dst) > sizeof(Src)
+               ? TYPE_VALID
+               : BASE_NUMERIC_RANGE_CHECK_RESULT(
+                     value <= static_cast<Src>(DstLimits::max()), true);
+  }
+};
+
+// Signed to unsigned.
+template <typename Dst, typename Src>
+struct RangeCheckImpl<Dst, Src, DST_UNSIGNED, SRC_SIGNED, OVERLAPS_RANGE> {
+  static RangeCheckResult Check(Src value) {
+    typedef std::numeric_limits<Dst> DstLimits;
+    typedef std::numeric_limits<Src> SrcLimits;
+    // Compare based on max_exponent, which we must compute for integrals.
+    static const size_t kDstMaxExponent = sizeof(Dst) * 8;
+    static const size_t kSrcMaxExponent =
+        SrcLimits::is_iec559 ? SrcLimits::max_exponent : (sizeof(Src) * 8 - 1);
+    return (kDstMaxExponent >= kSrcMaxExponent)
+               ? BASE_NUMERIC_RANGE_CHECK_RESULT(true,
+                                                 value >= static_cast<Src>(0))
+               : BASE_NUMERIC_RANGE_CHECK_RESULT(
+                     value <= static_cast<Src>(DstLimits::max()),
+                     value >= static_cast<Src>(0));
+  }
+};
+
+template <typename Dst, typename Src>
+inline RangeCheckResult RangeCheck(Src value) {
+  static_assert(std::numeric_limits<Src>::is_specialized,
+                "argument must be numeric");
+  static_assert(std::numeric_limits<Dst>::is_specialized,
+                "result must be numeric");
+  return RangeCheckImpl<Dst, Src>::Check(value);
+}
+
+}  // namespace internal
+}  // namespace rtc
+
+#endif  // RTC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/rtc_base/numerics/safe_minmax.h b/rtc_base/numerics/safe_minmax.h
new file mode 100644
index 0000000..8d00afb
--- /dev/null
+++ b/rtc_base/numerics/safe_minmax.h
@@ -0,0 +1,335 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Minimum and maximum
+// ===================
+//
+//   rtc::SafeMin(x, y)
+//   rtc::SafeMax(x, y)
+//
+// (These are both constexpr.)
+//
+// Accept two arguments of either any two integral or any two floating-point
+// types, and return the smaller and larger value, respectively, with no
+// truncation or wrap-around. If only one of the input types is statically
+// guaranteed to be able to represent the result, the return type is that type;
+// if either one would do, the result type is the smaller type. (One of these
+// two cases always applies.)
+//
+//   * The case with one floating-point and one integral type is not allowed,
+//     because the floating-point type will have greater range, but may not
+//     have sufficient precision to represent the integer value exactly.)
+//
+// Clamp (a.k.a. constrain to a given interval)
+// ============================================
+//
+//   rtc::SafeClamp(x, a, b)
+//
+// Accepts three arguments of any mix of integral types or any mix of
+// floating-point types, and returns the value in the closed interval [a, b]
+// that is closest to x (that is, if x < a it returns a; if x > b it returns b;
+// and if a <= x <= b it returns x). As for SafeMin() and SafeMax(), there is
+// no truncation or wrap-around. The result type
+//
+//   1. is statically guaranteed to be able to represent the result;
+//
+//   2. is no larger than the largest of the three argument types; and
+//
+//   3. has the same signedness as the type of the third argument, if this is
+//      possible without violating the First or Second Law.
+//
+// There is always at least one type that meets criteria 1 and 2. If more than
+// one type meets these criteria equally well, the result type is one of the
+// types that is smallest. Note that unlike SafeMin() and SafeMax(),
+// SafeClamp() will sometimes pick a return type that isn't the type of any of
+// its arguments.
+//
+//   * In this context, a type A is smaller than a type B if it has a smaller
+//     range; that is, if A::max() - A::min() < B::max() - B::min(). For
+//     example, int8_t < int16_t == uint16_t < int32_t, and all integral types
+//     are smaller than all floating-point types.)
+//
+//   * As for SafeMin and SafeMax, mixing integer and floating-point arguments
+//     is not allowed, because floating-point types have greater range than
+//     integer types, but do not have sufficient precision to represent the
+//     values of most integer types exactly.
+//
+// Requesting a specific return type
+// =================================
+//
+// All three functions allow callers to explicitly specify the return type as a
+// template parameter, overriding the default return type. E.g.
+//
+//   rtc::SafeMin<int>(x, y)  // returns an int
+//
+// If the requested type is statically guaranteed to be able to represent the
+// result, then everything's fine, and the return type is as requested. But if
+// the requested type is too small, a static_assert is triggered.
+
+#ifndef RTC_BASE_NUMERICS_SAFE_MINMAX_H_
+#define RTC_BASE_NUMERICS_SAFE_MINMAX_H_
+
+#include <limits>
+#include <type_traits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "rtc_base/type_traits.h"
+
+namespace rtc {
+
+namespace safe_minmax_impl {
+
+// Make the range of a type available via something other than a constexpr
+// function, to work around MSVC limitations. See
+// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/
+template <typename T>
+struct Limits {
+  static constexpr T lowest = std::numeric_limits<T>::lowest();
+  static constexpr T max = std::numeric_limits<T>::max();
+};
+
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct UnderlyingType;
+
+template <typename T>
+struct UnderlyingType<T, false> {
+  using type = T;
+};
+
+template <typename T>
+struct UnderlyingType<T, true> {
+  using type = typename std::underlying_type<T>::type;
+};
+
+// Given two types T1 and T2, find types that can hold the smallest (in
+// ::min_t) and the largest (in ::max_t) of the two values.
+template <typename T1,
+          typename T2,
+          bool int1 = IsIntlike<T1>::value,
+          bool int2 = IsIntlike<T2>::value>
+struct MType {
+  static_assert(int1 == int2,
+                "You may not mix integral and floating-point arguments");
+};
+
+// Specialization for when neither type is integral (and therefore presumably
+// floating-point).
+template <typename T1, typename T2>
+struct MType<T1, T2, false, false> {
+  using min_t = typename std::common_type<T1, T2>::type;
+  static_assert(std::is_same<min_t, T1>::value ||
+                    std::is_same<min_t, T2>::value,
+                "");
+
+  using max_t = typename std::common_type<T1, T2>::type;
+  static_assert(std::is_same<max_t, T1>::value ||
+                    std::is_same<max_t, T2>::value,
+                "");
+};
+
+// Specialization for when both types are integral.
+template <typename T1, typename T2>
+struct MType<T1, T2, true, true> {
+  // The type with the lowest minimum value. In case of a tie, the type with
+  // the lowest maximum value. In case that too is a tie, the types have the
+  // same range, and we arbitrarily pick T1.
+  using min_t = typename std::conditional<
+      SafeLt(Limits<T1>::lowest, Limits<T2>::lowest),
+      T1,
+      typename std::conditional<
+          SafeGt(Limits<T1>::lowest, Limits<T2>::lowest),
+          T2,
+          typename std::conditional<SafeLe(Limits<T1>::max, Limits<T2>::max),
+                                    T1,
+                                    T2>::type>::type>::type;
+  static_assert(std::is_same<min_t, T1>::value ||
+                    std::is_same<min_t, T2>::value,
+                "");
+
+  // The type with the highest maximum value. In case of a tie, the types have
+  // the same range (because in C++, integer types with the same maximum also
+  // have the same minimum).
+  static_assert(SafeNe(Limits<T1>::max, Limits<T2>::max) ||
+                    SafeEq(Limits<T1>::lowest, Limits<T2>::lowest),
+                "integer types with the same max should have the same min");
+  using max_t = typename std::
+      conditional<SafeGe(Limits<T1>::max, Limits<T2>::max), T1, T2>::type;
+  static_assert(std::is_same<max_t, T1>::value ||
+                    std::is_same<max_t, T2>::value,
+                "");
+};
+
+// A dummy type that we pass around at compile time but never actually use.
+// Declared but not defined.
+struct DefaultType;
+
+// ::type is A, except we fall back to B if A is DefaultType. We static_assert
+// that the chosen type can hold all values that B can hold.
+template <typename A, typename B>
+struct TypeOr {
+  using type = typename std::
+      conditional<std::is_same<A, DefaultType>::value, B, A>::type;
+  static_assert(SafeLe(Limits<type>::lowest, Limits<B>::lowest) &&
+                    SafeGe(Limits<type>::max, Limits<B>::max),
+                "The specified type isn't large enough");
+  static_assert(IsIntlike<type>::value == IsIntlike<B>::value &&
+                    std::is_floating_point<type>::value ==
+                        std::is_floating_point<type>::value,
+                "float<->int conversions not allowed");
+};
+
+}  // namespace safe_minmax_impl
+
+template <
+    typename R = safe_minmax_impl::DefaultType,
+    typename T1 = safe_minmax_impl::DefaultType,
+    typename T2 = safe_minmax_impl::DefaultType,
+    typename R2 = typename safe_minmax_impl::TypeOr<
+        R,
+        typename safe_minmax_impl::MType<
+            typename safe_minmax_impl::UnderlyingType<T1>::type,
+            typename safe_minmax_impl::UnderlyingType<T2>::type>::min_t>::type>
+constexpr R2 SafeMin(T1 a, T2 b) {
+  static_assert(IsIntlike<T1>::value || std::is_floating_point<T1>::value,
+                "The first argument must be integral or floating-point");
+  static_assert(IsIntlike<T2>::value || std::is_floating_point<T2>::value,
+                "The second argument must be integral or floating-point");
+  return SafeLt(a, b) ? static_cast<R2>(a) : static_cast<R2>(b);
+}
+
+template <
+    typename R = safe_minmax_impl::DefaultType,
+    typename T1 = safe_minmax_impl::DefaultType,
+    typename T2 = safe_minmax_impl::DefaultType,
+    typename R2 = typename safe_minmax_impl::TypeOr<
+        R,
+        typename safe_minmax_impl::MType<
+            typename safe_minmax_impl::UnderlyingType<T1>::type,
+            typename safe_minmax_impl::UnderlyingType<T2>::type>::max_t>::type>
+constexpr R2 SafeMax(T1 a, T2 b) {
+  static_assert(IsIntlike<T1>::value || std::is_floating_point<T1>::value,
+                "The first argument must be integral or floating-point");
+  static_assert(IsIntlike<T2>::value || std::is_floating_point<T2>::value,
+                "The second argument must be integral or floating-point");
+  return SafeGt(a, b) ? static_cast<R2>(a) : static_cast<R2>(b);
+}
+
+namespace safe_minmax_impl {
+
+// Given three types T, L, and H, let ::type be a suitable return value for
+// SafeClamp(T, L, H). See the docs at the top of this file for details.
+template <typename T,
+          typename L,
+          typename H,
+          bool int1 = IsIntlike<T>::value,
+          bool int2 = IsIntlike<L>::value,
+          bool int3 = IsIntlike<H>::value>
+struct ClampType {
+  static_assert(int1 == int2 && int1 == int3,
+                "You may not mix integral and floating-point arguments");
+};
+
+// Specialization for when all three types are floating-point.
+template <typename T, typename L, typename H>
+struct ClampType<T, L, H, false, false, false> {
+  using type = typename std::common_type<T, L, H>::type;
+};
+
+// Specialization for when all three types are integral.
+template <typename T, typename L, typename H>
+struct ClampType<T, L, H, true, true, true> {
+ private:
+  // Range of the return value. The return type must be able to represent this
+  // full range.
+  static constexpr auto r_min =
+      SafeMax(Limits<L>::lowest, SafeMin(Limits<H>::lowest, Limits<T>::lowest));
+  static constexpr auto r_max =
+      SafeMin(Limits<H>::max, SafeMax(Limits<L>::max, Limits<T>::max));
+
+  // Is the given type an acceptable return type? (That is, can it represent
+  // all possible return values, and is it no larger than the largest of the
+  // input types?)
+  template <typename A>
+  struct AcceptableType {
+   private:
+    static constexpr bool not_too_large = sizeof(A) <= sizeof(L) ||
+                                          sizeof(A) <= sizeof(H) ||
+                                          sizeof(A) <= sizeof(T);
+    static constexpr bool range_contained =
+        SafeLe(Limits<A>::lowest, r_min) && SafeLe(r_max, Limits<A>::max);
+
+   public:
+    static constexpr bool value = not_too_large && range_contained;
+  };
+
+  using best_signed_type = typename std::conditional<
+      AcceptableType<int8_t>::value,
+      int8_t,
+      typename std::conditional<
+          AcceptableType<int16_t>::value,
+          int16_t,
+          typename std::conditional<AcceptableType<int32_t>::value,
+                                    int32_t,
+                                    int64_t>::type>::type>::type;
+
+  using best_unsigned_type = typename std::conditional<
+      AcceptableType<uint8_t>::value,
+      uint8_t,
+      typename std::conditional<
+          AcceptableType<uint16_t>::value,
+          uint16_t,
+          typename std::conditional<AcceptableType<uint32_t>::value,
+                                    uint32_t,
+                                    uint64_t>::type>::type>::type;
+
+ public:
+  // Pick the best type, preferring the same signedness as T but falling back
+  // to the other one if necessary.
+  using type = typename std::conditional<
+      std::is_signed<T>::value,
+      typename std::conditional<AcceptableType<best_signed_type>::value,
+                                best_signed_type,
+                                best_unsigned_type>::type,
+      typename std::conditional<AcceptableType<best_unsigned_type>::value,
+                                best_unsigned_type,
+                                best_signed_type>::type>::type;
+  static_assert(AcceptableType<type>::value, "");
+};
+
+}  // namespace safe_minmax_impl
+
+template <
+    typename R = safe_minmax_impl::DefaultType,
+    typename T = safe_minmax_impl::DefaultType,
+    typename L = safe_minmax_impl::DefaultType,
+    typename H = safe_minmax_impl::DefaultType,
+    typename R2 = typename safe_minmax_impl::TypeOr<
+        R,
+        typename safe_minmax_impl::ClampType<
+            typename safe_minmax_impl::UnderlyingType<T>::type,
+            typename safe_minmax_impl::UnderlyingType<L>::type,
+            typename safe_minmax_impl::UnderlyingType<H>::type>::type>::type>
+R2 SafeClamp(T x, L min, H max) {
+  static_assert(IsIntlike<H>::value || std::is_floating_point<H>::value,
+                "The first argument must be integral or floating-point");
+  static_assert(IsIntlike<T>::value || std::is_floating_point<T>::value,
+                "The second argument must be integral or floating-point");
+  static_assert(IsIntlike<L>::value || std::is_floating_point<L>::value,
+                "The third argument must be integral or floating-point");
+  RTC_DCHECK_LE(min, max);
+  return SafeLe(x, min)
+             ? static_cast<R2>(min)
+             : SafeGe(x, max) ? static_cast<R2>(max) : static_cast<R2>(x);
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_NUMERICS_SAFE_MINMAX_H_
diff --git a/rtc_base/numerics/safe_minmax_unittest.cc b/rtc_base/numerics/safe_minmax_unittest.cc
new file mode 100644
index 0000000..72d23b6
--- /dev/null
+++ b/rtc_base/numerics/safe_minmax_unittest.cc
@@ -0,0 +1,344 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+
+#include "rtc_base/numerics/safe_minmax.h"
+#include "test/gtest.h"
+
+namespace rtc {
+
+namespace {
+
+// Functions that check that SafeMin(), SafeMax(), and SafeClamp() return the
+// specified type. The functions that end in "R" use an explicitly given return
+// type.
+
+template <typename T1, typename T2, typename Tmin, typename Tmax>
+constexpr bool TypeCheckMinMax() {
+  return std::is_same<decltype(SafeMin(std::declval<T1>(), std::declval<T2>())),
+                      Tmin>::value &&
+         std::is_same<decltype(SafeMax(std::declval<T1>(), std::declval<T2>())),
+                      Tmax>::value;
+}
+
+template <typename T1, typename T2, typename R>
+constexpr bool TypeCheckMinR() {
+  return std::is_same<
+      decltype(SafeMin<R>(std::declval<T1>(), std::declval<T2>())), R>::value;
+}
+
+template <typename T1, typename T2, typename R>
+constexpr bool TypeCheckMaxR() {
+  return std::is_same<
+      decltype(SafeMax<R>(std::declval<T1>(), std::declval<T2>())), R>::value;
+}
+
+template <typename T, typename L, typename H, typename R>
+constexpr bool TypeCheckClamp() {
+  return std::is_same<decltype(SafeClamp(std::declval<T>(), std::declval<L>(),
+                                         std::declval<H>())),
+                      R>::value;
+}
+
+template <typename T, typename L, typename H, typename R>
+constexpr bool TypeCheckClampR() {
+  return std::is_same<decltype(SafeClamp<R>(std::declval<T>(),
+                                            std::declval<L>(),
+                                            std::declval<H>())),
+                      R>::value;
+}
+
+// clang-format off
+
+// SafeMin/SafeMax: Check that all combinations of signed/unsigned 8/64 bits
+// give the correct default result type.
+static_assert(TypeCheckMinMax<  int8_t,   int8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckMinMax<  int8_t,  uint8_t,   int8_t,  uint8_t>(), "");
+static_assert(TypeCheckMinMax<  int8_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckMinMax<  int8_t, uint64_t,   int8_t, uint64_t>(), "");
+static_assert(TypeCheckMinMax< uint8_t,   int8_t,   int8_t,  uint8_t>(), "");
+static_assert(TypeCheckMinMax< uint8_t,  uint8_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckMinMax< uint8_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckMinMax< uint8_t, uint64_t,  uint8_t, uint64_t>(), "");
+static_assert(TypeCheckMinMax< int64_t,   int8_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckMinMax< int64_t,  uint8_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckMinMax< int64_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckMinMax< int64_t, uint64_t,  int64_t, uint64_t>(), "");
+static_assert(TypeCheckMinMax<uint64_t,   int8_t,   int8_t, uint64_t>(), "");
+static_assert(TypeCheckMinMax<uint64_t,  uint8_t,  uint8_t, uint64_t>(), "");
+static_assert(TypeCheckMinMax<uint64_t,  int64_t,  int64_t, uint64_t>(), "");
+static_assert(TypeCheckMinMax<uint64_t, uint64_t, uint64_t, uint64_t>(), "");
+
+// SafeClamp: Check that all combinations of signed/unsigned 8/64 bits give the
+// correct result type.
+static_assert(TypeCheckClamp<  int8_t,   int8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,   int8_t,  uint8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,   int8_t,  int64_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,   int8_t, uint64_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  uint8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  uint8_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  uint8_t,  int64_t,  int16_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  uint8_t, uint64_t,  int16_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  int64_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  int64_t,  uint8_t,  int16_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp<  int8_t,  int64_t, uint64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp<  int8_t, uint64_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<  int8_t, uint64_t,  uint8_t,  int16_t>(), "");
+static_assert(TypeCheckClamp<  int8_t, uint64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp<  int8_t, uint64_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,   int8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,   int8_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,   int8_t,  int64_t,  int16_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,   int8_t, uint64_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  uint8_t,   int8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  uint8_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  uint8_t,  int64_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  uint8_t, uint64_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  int64_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  int64_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< uint8_t,  int64_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp< uint8_t, uint64_t,   int8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t, uint64_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp< uint8_t, uint64_t,  int64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp< uint8_t, uint64_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,   int8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp< int64_t,   int8_t,  uint8_t,  int16_t>(), "");
+static_assert(TypeCheckClamp< int64_t,   int8_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,   int8_t, uint64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  uint8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  uint8_t,  uint8_t,  int16_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  uint8_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  uint8_t, uint64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  int64_t,   int8_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  int64_t,  uint8_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t,  int64_t, uint64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t, uint64_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp< int64_t, uint64_t,  uint8_t,  int16_t>(), "");
+static_assert(TypeCheckClamp< int64_t, uint64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp< int64_t, uint64_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,   int8_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,   int8_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,   int8_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,   int8_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  uint8_t,   int8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  uint8_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  uint8_t,  int64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  uint8_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  int64_t,   int8_t,   int8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  int64_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  int64_t,  int64_t,  int64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t,  int64_t, uint64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t, uint64_t,   int8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t, uint64_t,  uint8_t,  uint8_t>(), "");
+static_assert(TypeCheckClamp<uint64_t, uint64_t,  int64_t, uint64_t>(), "");
+static_assert(TypeCheckClamp<uint64_t, uint64_t, uint64_t, uint64_t>(), "");
+
+enum DefaultE { kFoo = -17 };
+enum UInt8E : uint8_t { kBar = 17 };
+
+// SafeMin/SafeMax: Check that we can use enum types.
+static_assert(TypeCheckMinMax<unsigned, unsigned, unsigned, unsigned>(), "");
+static_assert(TypeCheckMinMax<unsigned, DefaultE,      int, unsigned>(), "");
+static_assert(TypeCheckMinMax<unsigned,   UInt8E,  uint8_t, unsigned>(), "");
+static_assert(TypeCheckMinMax<DefaultE, unsigned,      int, unsigned>(), "");
+static_assert(TypeCheckMinMax<DefaultE, DefaultE,      int,      int>(), "");
+static_assert(TypeCheckMinMax<DefaultE,   UInt8E,      int,      int>(), "");
+static_assert(TypeCheckMinMax<  UInt8E, unsigned,  uint8_t, unsigned>(), "");
+static_assert(TypeCheckMinMax<  UInt8E, DefaultE,     int,       int>(), "");
+static_assert(TypeCheckMinMax<  UInt8E,   UInt8E,  uint8_t,  uint8_t>(), "");
+
+// SafeClamp: Check that we can use enum types.
+static_assert(TypeCheckClamp<unsigned, unsigned, unsigned, unsigned>(), "");
+static_assert(TypeCheckClamp<unsigned, unsigned, DefaultE, unsigned>(), "");
+static_assert(TypeCheckClamp<unsigned, unsigned,   UInt8E,  uint8_t>(), "");
+static_assert(TypeCheckClamp<unsigned, DefaultE, unsigned, unsigned>(), "");
+static_assert(TypeCheckClamp<unsigned, DefaultE, DefaultE,      int>(), "");
+static_assert(TypeCheckClamp<unsigned, DefaultE,   UInt8E,  uint8_t>(), "");
+static_assert(TypeCheckClamp<unsigned,   UInt8E, unsigned, unsigned>(), "");
+static_assert(TypeCheckClamp<unsigned,   UInt8E, DefaultE, unsigned>(), "");
+static_assert(TypeCheckClamp<unsigned,   UInt8E,   UInt8E,  uint8_t>(), "");
+static_assert(TypeCheckClamp<DefaultE, unsigned, unsigned, unsigned>(), "");
+static_assert(TypeCheckClamp<DefaultE, unsigned, DefaultE,      int>(), "");
+static_assert(TypeCheckClamp<DefaultE, unsigned,   UInt8E,  int16_t>(), "");
+static_assert(TypeCheckClamp<DefaultE, DefaultE, unsigned,      int>(), "");
+static_assert(TypeCheckClamp<DefaultE, DefaultE, DefaultE,      int>(), "");
+static_assert(TypeCheckClamp<DefaultE, DefaultE,   UInt8E,      int>(), "");
+static_assert(TypeCheckClamp<DefaultE,   UInt8E, unsigned,      int>(), "");
+static_assert(TypeCheckClamp<DefaultE,   UInt8E, DefaultE,      int>(), "");
+static_assert(TypeCheckClamp<DefaultE,   UInt8E,   UInt8E,  int16_t>(), "");
+static_assert(TypeCheckClamp<  UInt8E, unsigned, unsigned, unsigned>(), "");
+static_assert(TypeCheckClamp<  UInt8E, unsigned, DefaultE, unsigned>(), "");
+static_assert(TypeCheckClamp<  UInt8E, unsigned,   UInt8E,  uint8_t>(), "");
+static_assert(TypeCheckClamp<  UInt8E, DefaultE, unsigned, unsigned>(), "");
+static_assert(TypeCheckClamp<  UInt8E, DefaultE, DefaultE,      int>(), "");
+static_assert(TypeCheckClamp<  UInt8E, DefaultE,   UInt8E,  uint8_t>(), "");
+static_assert(TypeCheckClamp<  UInt8E,   UInt8E, unsigned,  uint8_t>(), "");
+static_assert(TypeCheckClamp<  UInt8E,   UInt8E, DefaultE,  uint8_t>(), "");
+static_assert(TypeCheckClamp<  UInt8E,   UInt8E,   UInt8E,  uint8_t>(), "");
+
+using ld = long double;
+
+// SafeMin/SafeMax: Check that all floating-point combinations give the
+// correct result type.
+static_assert(TypeCheckMinMax< float,  float,  float,  float>(), "");
+static_assert(TypeCheckMinMax< float, double, double, double>(), "");
+static_assert(TypeCheckMinMax< float,     ld,     ld,     ld>(), "");
+static_assert(TypeCheckMinMax<double,  float, double, double>(), "");
+static_assert(TypeCheckMinMax<double, double, double, double>(), "");
+static_assert(TypeCheckMinMax<double,     ld,     ld,     ld>(), "");
+static_assert(TypeCheckMinMax<    ld,  float,     ld,     ld>(), "");
+static_assert(TypeCheckMinMax<    ld, double,     ld,     ld>(), "");
+static_assert(TypeCheckMinMax<    ld,     ld,     ld,     ld>(), "");
+
+// SafeClamp: Check that all floating-point combinations give the correct
+// result type.
+static_assert(TypeCheckClamp< float,  float,  float,  float>(), "");
+static_assert(TypeCheckClamp< float,  float, double, double>(), "");
+static_assert(TypeCheckClamp< float,  float,     ld,     ld>(), "");
+static_assert(TypeCheckClamp< float, double,  float, double>(), "");
+static_assert(TypeCheckClamp< float, double, double, double>(), "");
+static_assert(TypeCheckClamp< float, double,     ld,     ld>(), "");
+static_assert(TypeCheckClamp< float,     ld,  float,     ld>(), "");
+static_assert(TypeCheckClamp< float,     ld, double,     ld>(), "");
+static_assert(TypeCheckClamp< float,     ld,     ld,     ld>(), "");
+static_assert(TypeCheckClamp<double,  float,  float, double>(), "");
+static_assert(TypeCheckClamp<double,  float, double, double>(), "");
+static_assert(TypeCheckClamp<double,  float,     ld,     ld>(), "");
+static_assert(TypeCheckClamp<double, double,  float, double>(), "");
+static_assert(TypeCheckClamp<double, double, double, double>(), "");
+static_assert(TypeCheckClamp<double, double,     ld,     ld>(), "");
+static_assert(TypeCheckClamp<double,     ld,  float,     ld>(), "");
+static_assert(TypeCheckClamp<double,     ld, double,     ld>(), "");
+static_assert(TypeCheckClamp<double,     ld,     ld,     ld>(), "");
+static_assert(TypeCheckClamp<    ld,  float,  float,     ld>(), "");
+static_assert(TypeCheckClamp<    ld,  float, double,     ld>(), "");
+static_assert(TypeCheckClamp<    ld,  float,     ld,     ld>(), "");
+static_assert(TypeCheckClamp<    ld, double,  float,     ld>(), "");
+static_assert(TypeCheckClamp<    ld, double, double,     ld>(), "");
+static_assert(TypeCheckClamp<    ld, double,     ld,     ld>(), "");
+static_assert(TypeCheckClamp<    ld,     ld,  float,     ld>(), "");
+static_assert(TypeCheckClamp<    ld,     ld, double,     ld>(), "");
+static_assert(TypeCheckClamp<    ld,     ld,     ld,     ld>(), "");
+
+// clang-format on
+
+// SafeMin/SafeMax: Check some cases of explicitly specified return type. The
+// commented-out lines give compilation errors due to the requested return type
+// being too small or requiring an int<->float conversion.
+static_assert(TypeCheckMinR<int8_t, int8_t, int16_t>(), "");
+// static_assert(TypeCheckMinR<int8_t, int8_t, float>(), "");
+static_assert(TypeCheckMinR<uint32_t, uint64_t, uint32_t>(), "");
+// static_assert(TypeCheckMaxR<uint64_t, float, float>(), "");
+// static_assert(TypeCheckMaxR<uint64_t, double, float>(), "");
+static_assert(TypeCheckMaxR<uint32_t, int32_t, uint32_t>(), "");
+// static_assert(TypeCheckMaxR<uint32_t, int32_t, int32_t>(), "");
+
+// SafeClamp: Check some cases of explicitly specified return type. The
+// commented-out lines give compilation errors due to the requested return type
+// being too small.
+static_assert(TypeCheckClampR<int16_t, int8_t, uint8_t, int16_t>(), "");
+static_assert(TypeCheckClampR<int16_t, int8_t, uint8_t, int32_t>(), "");
+// static_assert(TypeCheckClampR<int16_t, int8_t, uint8_t, uint32_t>(), "");
+
+template <typename T1, typename T2, typename Tmin, typename Tmax>
+constexpr bool CheckMinMax(T1 a, T2 b, Tmin min, Tmax max) {
+  return TypeCheckMinMax<T1, T2, Tmin, Tmax>() && SafeMin(a, b) == min &&
+         SafeMax(a, b) == max;
+}
+
+template <typename T, typename L, typename H, typename R>
+bool CheckClamp(T x, L min, H max, R clamped) {
+  return TypeCheckClamp<T, L, H, R>() && SafeClamp(x, min, max) == clamped;
+}
+
+// SafeMin/SafeMax: Check a few values.
+static_assert(CheckMinMax(int8_t{1}, int8_t{-1}, int8_t{-1}, int8_t{1}), "");
+static_assert(CheckMinMax(uint8_t{1}, int8_t{-1}, int8_t{-1}, uint8_t{1}), "");
+static_assert(CheckMinMax(uint8_t{5}, uint64_t{2}, uint8_t{2}, uint64_t{5}),
+              "");
+static_assert(CheckMinMax(std::numeric_limits<int32_t>::min(),
+                          std::numeric_limits<uint32_t>::max(),
+                          std::numeric_limits<int32_t>::min(),
+                          std::numeric_limits<uint32_t>::max()),
+              "");
+static_assert(CheckMinMax(std::numeric_limits<int32_t>::min(),
+                          std::numeric_limits<uint16_t>::max(),
+                          std::numeric_limits<int32_t>::min(),
+                          int32_t{std::numeric_limits<uint16_t>::max()}),
+              "");
+// static_assert(CheckMinMax(1.f, 2, 1.f, 2.f), "");
+static_assert(CheckMinMax(1.f, 0.0, 0.0, 1.0), "");
+
+// SafeClamp: Check a few values.
+TEST(SafeMinmaxTest, Clamp) {
+  EXPECT_TRUE(CheckClamp(int32_t{-1000000}, std::numeric_limits<int16_t>::min(),
+                         std::numeric_limits<int16_t>::max(),
+                         std::numeric_limits<int16_t>::min()));
+  EXPECT_TRUE(CheckClamp(uint32_t{1000000}, std::numeric_limits<int16_t>::min(),
+                         std::numeric_limits<int16_t>::max(),
+                         std::numeric_limits<int16_t>::max()));
+  EXPECT_TRUE(CheckClamp(3.f, -1.0, 1.f, 1.0));
+  EXPECT_TRUE(CheckClamp(3.0, -1.f, 1.f, 1.0));
+}
+
+}  // namespace
+
+// These functions aren't used in the tests, but it's useful to look at the
+// compiler output for them, and verify that (1) the same-signedness Test*Safe
+// functions result in exactly the same code as their Test*Ref counterparts,
+// and that (2) the mixed-signedness Test*Safe functions have just a few extra
+// arithmetic and logic instructions (but no extra control flow instructions).
+
+// clang-format off
+int32_t  TestMinRef(  int32_t a,  int32_t b) { return std::min(a, b); }
+uint32_t TestMinRef( uint32_t a, uint32_t b) { return std::min(a, b); }
+int32_t  TestMinSafe( int32_t a,  int32_t b) { return SafeMin(a, b); }
+int32_t  TestMinSafe( int32_t a, uint32_t b) { return SafeMin(a, b); }
+int32_t  TestMinSafe(uint32_t a,  int32_t b) { return SafeMin(a, b); }
+uint32_t TestMinSafe(uint32_t a, uint32_t b) { return SafeMin(a, b); }
+// clang-format on
+
+int32_t TestClampRef(int32_t x, int32_t a, int32_t b) {
+  return std::max(a, std::min(x, b));
+}
+uint32_t TestClampRef(uint32_t x, uint32_t a, uint32_t b) {
+  return std::max(a, std::min(x, b));
+}
+int32_t TestClampSafe(int32_t x, int32_t a, int32_t b) {
+  return SafeClamp(x, a, b);
+}
+int32_t TestClampSafe(int32_t x, int32_t a, uint32_t b) {
+  return SafeClamp(x, a, b);
+}
+int32_t TestClampSafe(int32_t x, uint32_t a, int32_t b) {
+  return SafeClamp(x, a, b);
+}
+uint32_t TestClampSafe(int32_t x, uint32_t a, uint32_t b) {
+  return SafeClamp(x, a, b);
+}
+int32_t TestClampSafe(uint32_t x, int32_t a, int32_t b) {
+  return SafeClamp(x, a, b);
+}
+uint32_t TestClampSafe(uint32_t x, int32_t a, uint32_t b) {
+  return SafeClamp(x, a, b);
+}
+int32_t TestClampSafe(uint32_t x, uint32_t a, int32_t b) {
+  return SafeClamp(x, a, b);
+}
+uint32_t TestClampSafe(uint32_t x, uint32_t a, uint32_t b) {
+  return SafeClamp(x, a, b);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/numerics/sequence_number_util.h b/rtc_base/numerics/sequence_number_util.h
new file mode 100644
index 0000000..9e4b844
--- /dev/null
+++ b/rtc_base/numerics/sequence_number_util.h
@@ -0,0 +1,128 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_NUMERICS_SEQUENCE_NUMBER_UTIL_H_
+#define RTC_BASE_NUMERICS_SEQUENCE_NUMBER_UTIL_H_
+
+#include <limits>
+#include <type_traits>
+
+#include "api/optional.h"
+#include "rtc_base/numerics/mod_ops.h"
+#include "rtc_base/numerics/safe_compare.h"
+
+namespace webrtc {
+
+// Test if the sequence number |a| is ahead or at sequence number |b|.
+//
+// If |M| is an even number and the two sequence numbers are at max distance
+// from each other, then the sequence number with the highest value is
+// considered to be ahead.
+template <typename T, T M>
+inline typename std::enable_if<(M > 0), bool>::type AheadOrAt(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  const T maxDist = M / 2;
+  if (!(M & 1) && MinDiff<T, M>(a, b) == maxDist)
+    return b < a;
+  return ForwardDiff<T, M>(b, a) <= maxDist;
+}
+
+template <typename T, T M>
+inline typename std::enable_if<(M == 0), bool>::type AheadOrAt(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  const T maxDist = std::numeric_limits<T>::max() / 2 + T(1);
+  if (a - b == maxDist)
+    return b < a;
+  return ForwardDiff(b, a) < maxDist;
+}
+
+template <typename T>
+inline bool AheadOrAt(T a, T b) {
+  return AheadOrAt<T, 0>(a, b);
+}
+
+// Test if the sequence number |a| is ahead of sequence number |b|.
+//
+// If |M| is an even number and the two sequence numbers are at max distance
+// from each other, then the sequence number with the highest value is
+// considered to be ahead.
+template <typename T, T M = 0>
+inline bool AheadOf(T a, T b) {
+  static_assert(std::is_unsigned<T>::value,
+                "Type must be an unsigned integer.");
+  return a != b && AheadOrAt<T, M>(a, b);
+}
+
+// Comparator used to compare sequence numbers in a continuous fashion.
+//
+// WARNING! If used to sort sequence numbers of length M then the interval
+//          covered by the sequence numbers may not be larger than floor(M/2).
+template <typename T, T M = 0>
+struct AscendingSeqNumComp {
+  bool operator()(T a, T b) const { return AheadOf<T, M>(a, b); }
+};
+
+// Comparator used to compare sequence numbers in a continuous fashion.
+//
+// WARNING! If used to sort sequence numbers of length M then the interval
+//          covered by the sequence numbers may not be larger than floor(M/2).
+template <typename T, T M = 0>
+struct DescendingSeqNumComp {
+  bool operator()(T a, T b) const { return AheadOf<T, M>(b, a); }
+};
+
+// A sequencer number unwrapper where the start value of the unwrapped sequence
+// can be set. The unwrapped value is not allowed to wrap.
+template <typename T, T M = 0>
+class SeqNumUnwrapper {
+  // Use '<' instead of rtc::SafeLt to avoid crbug.com/753488
+  static_assert(
+      std::is_unsigned<T>::value &&
+          std::numeric_limits<T>::max() < std::numeric_limits<uint64_t>::max(),
+      "Type unwrapped must be an unsigned integer smaller than uint64_t.");
+
+ public:
+  // We want a default value that is close to 2^62 for a two reasons. Firstly,
+  // we can unwrap wrapping numbers in either direction, and secondly, the
+  // unwrapped numbers can be stored in either int64_t or uint64_t. We also want
+  // the default value to be human readable, which makes a power of 10 suitable.
+  static constexpr uint64_t kDefaultStartValue = 1000000000000000000UL;
+
+  SeqNumUnwrapper() : last_unwrapped_(kDefaultStartValue) {}
+  explicit SeqNumUnwrapper(uint64_t start_at) : last_unwrapped_(start_at) {}
+
+  uint64_t Unwrap(T value) {
+    if (!last_value_)
+      last_value_.emplace(value);
+
+    uint64_t unwrapped = 0;
+    if (AheadOrAt<T, M>(value, *last_value_)) {
+      unwrapped = last_unwrapped_ + ForwardDiff<T, M>(*last_value_, value);
+      RTC_CHECK_GE(unwrapped, last_unwrapped_);
+    } else {
+      unwrapped = last_unwrapped_ - ReverseDiff<T, M>(*last_value_, value);
+      RTC_CHECK_LT(unwrapped, last_unwrapped_);
+    }
+
+    *last_value_ = value;
+    last_unwrapped_ = unwrapped;
+    return last_unwrapped_;
+  }
+
+ private:
+  uint64_t last_unwrapped_;
+  rtc::Optional<T> last_value_;
+};
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_NUMERICS_SEQUENCE_NUMBER_UTIL_H_
diff --git a/rtc_base/numerics/sequence_number_util_unittest.cc b/rtc_base/numerics/sequence_number_util_unittest.cc
new file mode 100644
index 0000000..beb2b52
--- /dev/null
+++ b/rtc_base/numerics/sequence_number_util_unittest.cc
@@ -0,0 +1,320 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <set>
+
+#include "rtc_base/numerics/sequence_number_util.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+class TestSeqNumUtil : public ::testing::Test {
+ protected:
+  // Can't use std::numeric_limits<unsigned long>::max() since
+  // MSVC doesn't support constexpr.
+  static const unsigned long ulmax = ~0ul;  // NOLINT
+};
+
+TEST_F(TestSeqNumUtil, AheadOrAt) {
+  uint8_t x = 0;
+  uint8_t y = 0;
+  ASSERT_TRUE(AheadOrAt(x, y));
+  ++x;
+  ASSERT_TRUE(AheadOrAt(x, y));
+  ASSERT_FALSE(AheadOrAt(y, x));
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_TRUE(AheadOrAt(x, y));
+    ++x;
+    ++y;
+  }
+
+  x = 128;
+  y = 0;
+  ASSERT_TRUE(AheadOrAt(x, y));
+  ASSERT_FALSE(AheadOrAt(y, x));
+
+  x = 129;
+  ASSERT_FALSE(AheadOrAt(x, y));
+  ASSERT_TRUE(AheadOrAt(y, x));
+  ASSERT_TRUE(AheadOrAt<uint16_t>(x, y));
+  ASSERT_FALSE(AheadOrAt<uint16_t>(y, x));
+}
+
+TEST_F(TestSeqNumUtil, AheadOrAtWithDivisor) {
+  ASSERT_TRUE((AheadOrAt<uint8_t, 11>(5, 0)));
+  ASSERT_FALSE((AheadOrAt<uint8_t, 11>(6, 0)));
+  ASSERT_FALSE((AheadOrAt<uint8_t, 11>(0, 5)));
+  ASSERT_TRUE((AheadOrAt<uint8_t, 11>(0, 6)));
+
+  ASSERT_TRUE((AheadOrAt<uint8_t, 10>(5, 0)));
+  ASSERT_FALSE((AheadOrAt<uint8_t, 10>(6, 0)));
+  ASSERT_FALSE((AheadOrAt<uint8_t, 10>(0, 5)));
+  ASSERT_TRUE((AheadOrAt<uint8_t, 10>(0, 6)));
+
+  const uint8_t D = 211;
+  uint8_t x = 0;
+  for (int i = 0; i < D; ++i) {
+    uint8_t next_x = Add<D>(x, 1);
+    ASSERT_TRUE((AheadOrAt<uint8_t, D>(i, i)));
+    ASSERT_TRUE((AheadOrAt<uint8_t, D>(next_x, i)));
+    ASSERT_FALSE((AheadOrAt<uint8_t, D>(i, next_x)));
+    x = next_x;
+  }
+}
+
+TEST_F(TestSeqNumUtil, AheadOf) {
+  uint8_t x = 0;
+  uint8_t y = 0;
+  ASSERT_FALSE(AheadOf(x, y));
+  ++x;
+  ASSERT_TRUE(AheadOf(x, y));
+  ASSERT_FALSE(AheadOf(y, x));
+  for (int i = 0; i < 256; ++i) {
+    ASSERT_TRUE(AheadOf(x, y));
+    ++x;
+    ++y;
+  }
+
+  x = 128;
+  y = 0;
+  for (int i = 0; i < 128; ++i) {
+    ASSERT_TRUE(AheadOf(x, y));
+    ASSERT_FALSE(AheadOf(y, x));
+    x++;
+    y++;
+  }
+
+  for (int i = 0; i < 128; ++i) {
+    ASSERT_FALSE(AheadOf(x, y));
+    ASSERT_TRUE(AheadOf(y, x));
+    x++;
+    y++;
+  }
+
+  x = 129;
+  y = 0;
+  ASSERT_FALSE(AheadOf(x, y));
+  ASSERT_TRUE(AheadOf(y, x));
+  ASSERT_TRUE(AheadOf<uint16_t>(x, y));
+  ASSERT_FALSE(AheadOf<uint16_t>(y, x));
+}
+
+TEST_F(TestSeqNumUtil, AheadOfWithDivisor) {
+  ASSERT_TRUE((AheadOf<uint8_t, 11>(5, 0)));
+  ASSERT_FALSE((AheadOf<uint8_t, 11>(6, 0)));
+  ASSERT_FALSE((AheadOf<uint8_t, 11>(0, 5)));
+  ASSERT_TRUE((AheadOf<uint8_t, 11>(0, 6)));
+
+  ASSERT_TRUE((AheadOf<uint8_t, 10>(5, 0)));
+  ASSERT_FALSE((AheadOf<uint8_t, 10>(6, 0)));
+  ASSERT_FALSE((AheadOf<uint8_t, 10>(0, 5)));
+  ASSERT_TRUE((AheadOf<uint8_t, 10>(0, 6)));
+
+  const uint8_t D = 211;
+  uint8_t x = 0;
+  for (int i = 0; i < D; ++i) {
+    uint8_t next_x = Add<D>(x, 1);
+    ASSERT_FALSE((AheadOf<uint8_t, D>(i, i)));
+    ASSERT_TRUE((AheadOf<uint8_t, D>(next_x, i)));
+    ASSERT_FALSE((AheadOf<uint8_t, D>(i, next_x)));
+    x = next_x;
+  }
+}
+
+TEST_F(TestSeqNumUtil, ForwardDiffWithDivisor) {
+  const uint8_t kDivisor = 211;
+
+  for (uint8_t i = 0; i < kDivisor - 1; ++i) {
+    ASSERT_EQ(0, (ForwardDiff<uint8_t, kDivisor>(i, i)));
+    ASSERT_EQ(1, (ForwardDiff<uint8_t, kDivisor>(i, i + 1)));
+    ASSERT_EQ(kDivisor - 1, (ForwardDiff<uint8_t, kDivisor>(i + 1, i)));
+  }
+
+  for (uint8_t i = 1; i < kDivisor; ++i) {
+    ASSERT_EQ(i, (ForwardDiff<uint8_t, kDivisor>(0, i)));
+    ASSERT_EQ(kDivisor - i, (ForwardDiff<uint8_t, kDivisor>(i, 0)));
+  }
+}
+
+TEST_F(TestSeqNumUtil, ReverseDiffWithDivisor) {
+  const uint8_t kDivisor = 241;
+
+  for (uint8_t i = 0; i < kDivisor - 1; ++i) {
+    ASSERT_EQ(0, (ReverseDiff<uint8_t, kDivisor>(i, i)));
+    ASSERT_EQ(kDivisor - 1, (ReverseDiff<uint8_t, kDivisor>(i, i + 1)));
+    ASSERT_EQ(1, (ReverseDiff<uint8_t, kDivisor>(i + 1, i)));
+  }
+
+  for (uint8_t i = 1; i < kDivisor; ++i) {
+    ASSERT_EQ(kDivisor - i, (ReverseDiff<uint8_t, kDivisor>(0, i)));
+    ASSERT_EQ(i, (ReverseDiff<uint8_t, kDivisor>(i, 0)));
+  }
+}
+
+TEST_F(TestSeqNumUtil, SeqNumComparator) {
+  std::set<uint8_t, AscendingSeqNumComp<uint8_t>> seq_nums_asc;
+  std::set<uint8_t, DescendingSeqNumComp<uint8_t>> seq_nums_desc;
+
+  uint8_t x = 0;
+  for (int i = 0; i < 128; ++i) {
+    seq_nums_asc.insert(x);
+    seq_nums_desc.insert(x);
+    ASSERT_EQ(x, *seq_nums_asc.begin());
+    ASSERT_EQ(x, *seq_nums_desc.rbegin());
+    ++x;
+  }
+
+  seq_nums_asc.clear();
+  seq_nums_desc.clear();
+  x = 199;
+  for (int i = 0; i < 128; ++i) {
+    seq_nums_asc.insert(x);
+    seq_nums_desc.insert(x);
+    ASSERT_EQ(x, *seq_nums_asc.begin());
+    ASSERT_EQ(x, *seq_nums_desc.rbegin());
+    ++x;
+  }
+}
+
+TEST_F(TestSeqNumUtil, SeqNumComparatorWithDivisor) {
+  const uint8_t D = 223;
+
+  std::set<uint8_t, AscendingSeqNumComp<uint8_t, D>> seq_nums_asc;
+  std::set<uint8_t, DescendingSeqNumComp<uint8_t, D>> seq_nums_desc;
+
+  uint8_t x = 0;
+  for (int i = 0; i < D / 2; ++i) {
+    seq_nums_asc.insert(x);
+    seq_nums_desc.insert(x);
+    ASSERT_EQ(x, *seq_nums_asc.begin());
+    ASSERT_EQ(x, *seq_nums_desc.rbegin());
+    x = Add<D>(x, 1);
+  }
+
+  seq_nums_asc.clear();
+  seq_nums_desc.clear();
+  x = 200;
+  for (int i = 0; i < D / 2; ++i) {
+    seq_nums_asc.insert(x);
+    seq_nums_desc.insert(x);
+    ASSERT_EQ(x, *seq_nums_asc.begin());
+    ASSERT_EQ(x, *seq_nums_desc.rbegin());
+    x = Add<D>(x, 1);
+  }
+}
+
+#if GTEST_HAS_DEATH_TEST
+#if !defined(WEBRTC_ANDROID)
+TEST(SeqNumUnwrapper, NoBackWardWrap) {
+  SeqNumUnwrapper<uint8_t> unwrapper(0);
+  EXPECT_EQ(0U, unwrapper.Unwrap(0));
+
+  // The unwrapped sequence is not allowed to wrap, if that happens the
+  // SeqNumUnwrapper should have been constructed with a higher start value.
+  EXPECT_DEATH(unwrapper.Unwrap(255), "");
+}
+
+TEST(SeqNumUnwrapper, NoForwardWrap) {
+  SeqNumUnwrapper<uint32_t> unwrapper(std::numeric_limits<uint64_t>::max());
+  EXPECT_EQ(std::numeric_limits<uint64_t>::max(), unwrapper.Unwrap(0));
+
+  // The unwrapped sequence is not allowed to wrap, if that happens the
+  // SeqNumUnwrapper should have been constructed with a lower start value.
+  EXPECT_DEATH(unwrapper.Unwrap(1), "");
+}
+#endif
+#endif
+
+TEST(SeqNumUnwrapper, ForwardWrap) {
+  SeqNumUnwrapper<uint8_t> unwrapper(0);
+  EXPECT_EQ(0U, unwrapper.Unwrap(255));
+  EXPECT_EQ(1U, unwrapper.Unwrap(0));
+}
+
+TEST(SeqNumUnwrapper, ForwardWrapWithDivisor) {
+  SeqNumUnwrapper<uint8_t, 33> unwrapper(0);
+  EXPECT_EQ(0U, unwrapper.Unwrap(30));
+  EXPECT_EQ(6U, unwrapper.Unwrap(3));
+}
+
+TEST(SeqNumUnwrapper, BackWardWrap) {
+  SeqNumUnwrapper<uint8_t> unwrapper(10);
+  EXPECT_EQ(10U, unwrapper.Unwrap(0));
+  EXPECT_EQ(8U, unwrapper.Unwrap(254));
+}
+
+TEST(SeqNumUnwrapper, BackWardWrapWithDivisor) {
+  SeqNumUnwrapper<uint8_t, 33> unwrapper(10);
+  EXPECT_EQ(10U, unwrapper.Unwrap(0));
+  EXPECT_EQ(8U, unwrapper.Unwrap(31));
+}
+
+TEST(SeqNumUnwrapper, Unwrap) {
+  SeqNumUnwrapper<uint16_t> unwrapper(0);
+  const uint16_t kMax = std::numeric_limits<uint16_t>::max();
+  const uint16_t kMaxDist = kMax / 2 + 1;
+
+  EXPECT_EQ(0U, unwrapper.Unwrap(0));
+  EXPECT_EQ(kMaxDist, unwrapper.Unwrap(kMaxDist));
+  EXPECT_EQ(0U, unwrapper.Unwrap(0));
+
+  EXPECT_EQ(kMaxDist, unwrapper.Unwrap(kMaxDist));
+  EXPECT_EQ(kMax, unwrapper.Unwrap(kMax));
+  EXPECT_EQ(kMax + 1U, unwrapper.Unwrap(0));
+  EXPECT_EQ(kMax, unwrapper.Unwrap(kMax));
+  EXPECT_EQ(kMaxDist, unwrapper.Unwrap(kMaxDist));
+  EXPECT_EQ(0U, unwrapper.Unwrap(0));
+}
+
+TEST(SeqNumUnwrapper, UnwrapOddDivisor) {
+  SeqNumUnwrapper<uint8_t, 11> unwrapper(10);
+
+  EXPECT_EQ(10U, unwrapper.Unwrap(10));
+  EXPECT_EQ(11U, unwrapper.Unwrap(0));
+  EXPECT_EQ(16U, unwrapper.Unwrap(5));
+  EXPECT_EQ(21U, unwrapper.Unwrap(10));
+  EXPECT_EQ(22U, unwrapper.Unwrap(0));
+  EXPECT_EQ(17U, unwrapper.Unwrap(6));
+  EXPECT_EQ(12U, unwrapper.Unwrap(1));
+  EXPECT_EQ(7U, unwrapper.Unwrap(7));
+  EXPECT_EQ(2U, unwrapper.Unwrap(2));
+  EXPECT_EQ(0U, unwrapper.Unwrap(0));
+}
+
+TEST(SeqNumUnwrapper, ManyForwardWraps) {
+  const int kLargeNumber = 4711;
+  const int kMaxStep = kLargeNumber / 2;
+  const int kNumWraps = 100;
+  SeqNumUnwrapper<uint16_t, kLargeNumber> unwrapper;
+
+  uint16_t next_unwrap = 0;
+  uint64_t expected = decltype(unwrapper)::kDefaultStartValue;
+  for (int i = 0; i < kNumWraps * 2 + 1; ++i) {
+    EXPECT_EQ(expected, unwrapper.Unwrap(next_unwrap));
+    expected += kMaxStep;
+    next_unwrap = (next_unwrap + kMaxStep) % kLargeNumber;
+  }
+}
+
+TEST(SeqNumUnwrapper, ManyBackwardWraps) {
+  const int kLargeNumber = 4711;
+  const int kMaxStep = kLargeNumber / 2;
+  const int kNumWraps = 100;
+  SeqNumUnwrapper<uint16_t, kLargeNumber> unwrapper(kLargeNumber * kNumWraps);
+
+  uint16_t next_unwrap = 0;
+  uint64_t expected = kLargeNumber * kNumWraps;
+  for (uint16_t i = 0; i < kNumWraps * 2 + 1; ++i) {
+    EXPECT_EQ(expected, unwrapper.Unwrap(next_unwrap));
+    expected -= kMaxStep;
+    next_unwrap = (next_unwrap + kMaxStep + 1) % kLargeNumber;
+  }
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/onetimeevent.h b/rtc_base/onetimeevent.h
new file mode 100644
index 0000000..8c55e26
--- /dev/null
+++ b/rtc_base/onetimeevent.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ONETIMEEVENT_H_
+#define RTC_BASE_ONETIMEEVENT_H_
+
+#include "rtc_base/criticalsection.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+// Provides a simple way to perform an operation (such as logging) one
+// time in a certain scope.
+// Example:
+//   OneTimeEvent firstFrame;
+//   ...
+//   if (firstFrame()) {
+//     RTC_LOG(LS_INFO) << "This is the first frame".
+//   }
+class OneTimeEvent {
+ public:
+  OneTimeEvent() {}
+  bool operator()() {
+    rtc::CritScope cs(&critsect_);
+    if (happened_) {
+      return false;
+    }
+    happened_ = true;
+    return true;
+  }
+
+ private:
+  bool happened_ = false;
+  rtc::CriticalSection critsect_;
+};
+
+// A non-thread-safe, ligher-weight version of the OneTimeEvent class.
+class ThreadUnsafeOneTimeEvent {
+ public:
+  ThreadUnsafeOneTimeEvent() {}
+  bool operator()() {
+    if (happened_) {
+      return false;
+    }
+    happened_ = true;
+    return true;
+  }
+
+ private:
+  bool happened_ = false;
+};
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_ONETIMEEVENT_H_
diff --git a/rtc_base/onetimeevent_unittest.cc b/rtc_base/onetimeevent_unittest.cc
new file mode 100644
index 0000000..49cae07
--- /dev/null
+++ b/rtc_base/onetimeevent_unittest.cc
@@ -0,0 +1,33 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/onetimeevent.h"
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+
+TEST(OneTimeEventTest, ThreadSafe) {
+  OneTimeEvent ot;
+
+  // The one time event is expected to evaluate to true only the first time.
+  EXPECT_TRUE(ot());
+  EXPECT_FALSE(ot());
+  EXPECT_FALSE(ot());
+}
+
+TEST(OneTimeEventTest, ThreadUnsafe) {
+  ThreadUnsafeOneTimeEvent ot;
+
+  EXPECT_TRUE(ot());
+  EXPECT_FALSE(ot());
+  EXPECT_FALSE(ot());
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/openssl.h b/rtc_base/openssl.h
new file mode 100644
index 0000000..dbbae05
--- /dev/null
+++ b/rtc_base/openssl.h
@@ -0,0 +1,20 @@
+/*
+ *  Copyright 2013 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_OPENSSL_H_
+#define RTC_BASE_OPENSSL_H_
+
+#include <openssl/ssl.h>
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+#error OpenSSL is older than 1.1.0, which is the minimum supported version.
+#endif
+
+#endif  // RTC_BASE_OPENSSL_H_
diff --git a/rtc_base/openssladapter.cc b/rtc_base/openssladapter.cc
new file mode 100644
index 0000000..ce00469
--- /dev/null
+++ b/rtc_base/openssladapter.cc
@@ -0,0 +1,1103 @@
+/*
+ *  Copyright 2008 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/openssladapter.h"
+
+#if defined(WEBRTC_POSIX)
+#include <unistd.h>
+#endif
+
+#if defined(WEBRTC_WIN)
+// Must be included first before openssl headers.
+#include "rtc_base/win32.h"  // NOLINT
+#endif  // WEBRTC_WIN
+
+#include <openssl/bio.h>
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+#include <openssl/opensslv.h>
+#include <openssl/rand.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/openssl.h"
+#include "rtc_base/sslroots.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/thread.h"
+
+#ifndef OPENSSL_IS_BORINGSSL
+
+// TODO: Use a nicer abstraction for mutex.
+
+#if defined(WEBRTC_WIN)
+  #define MUTEX_TYPE HANDLE
+#define MUTEX_SETUP(x) (x) = CreateMutex(nullptr, FALSE, nullptr)
+#define MUTEX_CLEANUP(x) CloseHandle(x)
+#define MUTEX_LOCK(x) WaitForSingleObject((x), INFINITE)
+#define MUTEX_UNLOCK(x) ReleaseMutex(x)
+#define THREAD_ID GetCurrentThreadId()
+#elif defined(WEBRTC_POSIX)
+  #define MUTEX_TYPE pthread_mutex_t
+  #define MUTEX_SETUP(x) pthread_mutex_init(&(x), nullptr)
+  #define MUTEX_CLEANUP(x) pthread_mutex_destroy(&(x))
+  #define MUTEX_LOCK(x) pthread_mutex_lock(&(x))
+  #define MUTEX_UNLOCK(x) pthread_mutex_unlock(&(x))
+  #define THREAD_ID pthread_self()
+#else
+  #error You must define mutex operations appropriate for your platform!
+#endif
+
+struct CRYPTO_dynlock_value {
+  MUTEX_TYPE mutex;
+};
+
+#endif  // #ifndef OPENSSL_IS_BORINGSSL
+
+//////////////////////////////////////////////////////////////////////
+// SocketBIO
+//////////////////////////////////////////////////////////////////////
+
+static int socket_write(BIO* h, const char* buf, int num);
+static int socket_read(BIO* h, char* buf, int size);
+static int socket_puts(BIO* h, const char* str);
+static long socket_ctrl(BIO* h, int cmd, long arg1, void* arg2);
+static int socket_new(BIO* h);
+static int socket_free(BIO* data);
+
+static BIO_METHOD* BIO_socket_method() {
+  static BIO_METHOD* methods = [] {
+    BIO_METHOD* methods = BIO_meth_new(BIO_TYPE_BIO, "socket");
+    BIO_meth_set_write(methods, socket_write);
+    BIO_meth_set_read(methods, socket_read);
+    BIO_meth_set_puts(methods, socket_puts);
+    BIO_meth_set_ctrl(methods, socket_ctrl);
+    BIO_meth_set_create(methods, socket_new);
+    BIO_meth_set_destroy(methods, socket_free);
+    return methods;
+  }();
+  return methods;
+}
+
+static BIO* BIO_new_socket(rtc::AsyncSocket* socket) {
+  BIO* ret = BIO_new(BIO_socket_method());
+  if (ret == nullptr) {
+    return nullptr;
+  }
+  BIO_set_data(ret, socket);
+  return ret;
+}
+
+static int socket_new(BIO* b) {
+  BIO_set_shutdown(b, 0);
+  BIO_set_init(b, 1);
+  BIO_set_data(b, 0);
+  return 1;
+}
+
+static int socket_free(BIO* b) {
+  if (b == nullptr)
+    return 0;
+  return 1;
+}
+
+static int socket_read(BIO* b, char* out, int outl) {
+  if (!out)
+    return -1;
+  rtc::AsyncSocket* socket = static_cast<rtc::AsyncSocket*>(BIO_get_data(b));
+  BIO_clear_retry_flags(b);
+  int result = socket->Recv(out, outl, nullptr);
+  if (result > 0) {
+    return result;
+  } else if (socket->IsBlocking()) {
+    BIO_set_retry_read(b);
+  }
+  return -1;
+}
+
+static int socket_write(BIO* b, const char* in, int inl) {
+  if (!in)
+    return -1;
+  rtc::AsyncSocket* socket = static_cast<rtc::AsyncSocket*>(BIO_get_data(b));
+  BIO_clear_retry_flags(b);
+  int result = socket->Send(in, inl);
+  if (result > 0) {
+    return result;
+  } else if (socket->IsBlocking()) {
+    BIO_set_retry_write(b);
+  }
+  return -1;
+}
+
+static int socket_puts(BIO* b, const char* str) {
+  return socket_write(b, str, rtc::checked_cast<int>(strlen(str)));
+}
+
+static long socket_ctrl(BIO* b, int cmd, long num, void* ptr) {
+  switch (cmd) {
+  case BIO_CTRL_RESET:
+    return 0;
+  case BIO_CTRL_EOF: {
+    rtc::AsyncSocket* socket = static_cast<rtc::AsyncSocket*>(ptr);
+    // 1 means socket closed.
+    return (socket->GetState() == rtc::AsyncSocket::CS_CLOSED) ? 1 : 0;
+  }
+  case BIO_CTRL_WPENDING:
+  case BIO_CTRL_PENDING:
+    return 0;
+  case BIO_CTRL_FLUSH:
+    return 1;
+  default:
+    return 0;
+  }
+}
+
+static void LogSslError() {
+  // Walk down the error stack to find the SSL error.
+  uint32_t error_code;
+  const char* file;
+  int line;
+  do {
+    error_code = ERR_get_error_line(&file, &line);
+    if (ERR_GET_LIB(error_code) == ERR_LIB_SSL) {
+      RTC_LOG(LS_ERROR) << "ERR_LIB_SSL: " << error_code << ", " << file << ":"
+                        << line;
+      break;
+    }
+  } while (error_code != 0);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// OpenSSLAdapter
+/////////////////////////////////////////////////////////////////////////////
+
+namespace rtc {
+
+VerificationCallback OpenSSLAdapter::custom_verify_callback_ = nullptr;
+
+bool OpenSSLAdapter::InitializeSSL(VerificationCallback callback) {
+  if (!SSL_library_init())
+    return false;
+#if !defined(ADDRESS_SANITIZER) || !defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
+  // Loading the error strings crashes mac_asan.  Omit this debugging aid there.
+  SSL_load_error_strings();
+#endif
+  ERR_load_BIO_strings();
+  OpenSSL_add_all_algorithms();
+  RAND_poll();
+  custom_verify_callback_ = callback;
+  return true;
+}
+
+bool OpenSSLAdapter::CleanupSSL() {
+  return true;
+}
+
+OpenSSLAdapter::OpenSSLAdapter(AsyncSocket* socket,
+                               OpenSSLAdapterFactory* factory)
+    : SSLAdapter(socket),
+      factory_(factory),
+      state_(SSL_NONE),
+      role_(SSL_CLIENT),
+      ssl_read_needs_write_(false),
+      ssl_write_needs_read_(false),
+      restartable_(false),
+      ssl_(nullptr),
+      ssl_ctx_(nullptr),
+      ssl_mode_(SSL_MODE_TLS),
+      ignore_bad_cert_(false),
+      custom_verification_succeeded_(false) {
+  // If a factory is used, take a reference on the factory's SSL_CTX.
+  // Otherwise, we'll create our own later.
+  // Either way, we'll release our reference via SSL_CTX_free() in Cleanup().
+  if (factory_) {
+    ssl_ctx_ = factory_->ssl_ctx();
+    RTC_DCHECK(ssl_ctx_);
+    // Note: if using OpenSSL, requires version 1.1.0 or later.
+    SSL_CTX_up_ref(ssl_ctx_);
+  }
+}
+
+OpenSSLAdapter::~OpenSSLAdapter() {
+  Cleanup();
+}
+
+void OpenSSLAdapter::SetIgnoreBadCert(bool ignore) {
+  ignore_bad_cert_ = ignore;
+}
+
+void OpenSSLAdapter::SetAlpnProtocols(const std::vector<std::string>& protos) {
+  alpn_protocols_ = protos;
+}
+
+void OpenSSLAdapter::SetEllipticCurves(const std::vector<std::string>& curves) {
+  elliptic_curves_ = curves;
+}
+
+void OpenSSLAdapter::SetMode(SSLMode mode) {
+  RTC_DCHECK(!ssl_ctx_);
+  RTC_DCHECK(state_ == SSL_NONE);
+  ssl_mode_ = mode;
+}
+
+void OpenSSLAdapter::SetIdentity(SSLIdentity* identity) {
+  RTC_DCHECK(!identity_);
+  identity_.reset(static_cast<OpenSSLIdentity*>(identity));
+}
+
+void OpenSSLAdapter::SetRole(SSLRole role) {
+  role_ = role;
+}
+
+AsyncSocket* OpenSSLAdapter::Accept(SocketAddress* paddr) {
+  RTC_DCHECK(role_ == SSL_SERVER);
+  AsyncSocket* socket = SSLAdapter::Accept(paddr);
+  if (!socket) {
+    return nullptr;
+  }
+
+  SSLAdapter* adapter = SSLAdapter::Create(socket);
+  adapter->SetIdentity(identity_->GetReference());
+  adapter->SetRole(rtc::SSL_SERVER);
+  adapter->SetIgnoreBadCert(ignore_bad_cert_);
+  adapter->StartSSL("", false);
+  return adapter;
+}
+
+int OpenSSLAdapter::StartSSL(const char* hostname, bool restartable) {
+  if (state_ != SSL_NONE)
+    return -1;
+
+  ssl_host_name_ = hostname;
+  restartable_ = restartable;
+
+  if (socket_->GetState() != Socket::CS_CONNECTED) {
+    state_ = SSL_WAIT;
+    return 0;
+  }
+
+  state_ = SSL_CONNECTING;
+  if (int err = BeginSSL()) {
+    Error("BeginSSL", err, false);
+    return err;
+  }
+
+  return 0;
+}
+
+int OpenSSLAdapter::BeginSSL() {
+  RTC_LOG(LS_INFO) << "OpenSSLAdapter::BeginSSL: " << ssl_host_name_;
+  RTC_DCHECK(state_ == SSL_CONNECTING);
+
+  int err = 0;
+  BIO* bio = nullptr;
+
+  // First set up the context. We should either have a factory, with its own
+  // pre-existing context, or be running standalone, in which case we will
+  // need to create one, and specify |false| to disable session caching.
+  if (!factory_) {
+    RTC_DCHECK(!ssl_ctx_);
+    ssl_ctx_ = CreateContext(ssl_mode_, false);
+  }
+  if (!ssl_ctx_) {
+    err = -1;
+    goto ssl_error;
+  }
+
+  if (identity_ && !identity_->ConfigureIdentity(ssl_ctx_)) {
+    SSL_CTX_free(ssl_ctx_);
+    err = -1;
+    goto ssl_error;
+  }
+
+  bio = BIO_new_socket(socket_);
+  if (!bio) {
+    err = -1;
+    goto ssl_error;
+  }
+
+  ssl_ = SSL_new(ssl_ctx_);
+  if (!ssl_) {
+    err = -1;
+    goto ssl_error;
+  }
+
+  SSL_set_app_data(ssl_, this);
+
+  // SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER allows different buffers to be passed
+  // into SSL_write when a record could only be partially transmitted (and thus
+  // requires another call to SSL_write to finish transmission). This allows us
+  // to copy the data into our own buffer when this occurs, since the original
+  // buffer can't safely be accessed after control exits Send.
+  // TODO(deadbeef): Do we want SSL_MODE_ENABLE_PARTIAL_WRITE? It doesn't
+  // appear Send handles partial writes properly, though maybe we never notice
+  // since we never send more than 16KB at once..
+  SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE |
+                     SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
+
+  // Enable SNI, if a hostname is supplied.
+  if (!ssl_host_name_.empty()) {
+    SSL_set_tlsext_host_name(ssl_, ssl_host_name_.c_str());
+
+    // Enable session caching, if configured and a hostname is supplied.
+    if (factory_) {
+      SSL_SESSION* cached = factory_->LookupSession(ssl_host_name_);
+      if (cached) {
+        if (SSL_set_session(ssl_, cached) == 0) {
+          RTC_LOG(LS_WARNING) << "Failed to apply SSL session from cache";
+          err = -1;
+          goto ssl_error;
+        }
+
+        RTC_LOG(LS_INFO) << "Attempting to resume SSL session to "
+                         << ssl_host_name_;
+      }
+    }
+  }
+
+#ifdef OPENSSL_IS_BORINGSSL
+  // Set a couple common TLS extensions; even though we don't use them yet.
+  SSL_enable_ocsp_stapling(ssl_);
+  SSL_enable_signed_cert_timestamps(ssl_);
+#endif
+
+  if (!alpn_protocols_.empty()) {
+    std::string tls_alpn_string = TransformAlpnProtocols(alpn_protocols_);
+    if (!tls_alpn_string.empty()) {
+      SSL_set_alpn_protos(
+          ssl_, reinterpret_cast<const unsigned char*>(tls_alpn_string.data()),
+          tls_alpn_string.size());
+    }
+  }
+
+  if (!elliptic_curves_.empty()) {
+    SSL_set1_curves_list(ssl_, rtc::join(elliptic_curves_, ':').c_str());
+  }
+
+  // Now that the initial config is done, transfer ownership of |bio| to the
+  // SSL object. If ContinueSSL() fails, the bio will be freed in Cleanup().
+  SSL_set_bio(ssl_, bio, bio);
+  bio = nullptr;
+
+  // Do the connect.
+  err = ContinueSSL();
+  if (err != 0)
+    goto ssl_error;
+
+  return err;
+
+ssl_error:
+  Cleanup();
+  if (bio)
+    BIO_free(bio);
+
+  return err;
+}
+
+int OpenSSLAdapter::ContinueSSL() {
+  RTC_DCHECK(state_ == SSL_CONNECTING);
+
+  // Clear the DTLS timer
+  Thread::Current()->Clear(this, MSG_TIMEOUT);
+
+  int code = (role_ == SSL_CLIENT) ? SSL_connect(ssl_) : SSL_accept(ssl_);
+  switch (SSL_get_error(ssl_, code)) {
+  case SSL_ERROR_NONE:
+    if (!SSLPostConnectionCheck(ssl_, ssl_host_name_.c_str())) {
+      RTC_LOG(LS_ERROR) << "TLS post connection check failed";
+      // make sure we close the socket
+      Cleanup();
+      // The connect failed so return -1 to shut down the socket
+      return -1;
+    }
+
+    state_ = SSL_CONNECTED;
+    AsyncSocketAdapter::OnConnectEvent(this);
+#if 0  // TODO: worry about this
+    // Don't let ourselves go away during the callbacks
+    PRefPtr<OpenSSLAdapter> lock(this);
+    RTC_LOG(LS_INFO) << " -- onStreamReadable";
+    AsyncSocketAdapter::OnReadEvent(this);
+    RTC_LOG(LS_INFO) << " -- onStreamWriteable";
+    AsyncSocketAdapter::OnWriteEvent(this);
+#endif
+    break;
+
+  case SSL_ERROR_WANT_READ:
+    RTC_LOG(LS_VERBOSE) << " -- error want read";
+    struct timeval timeout;
+    if (DTLSv1_get_timeout(ssl_, &timeout)) {
+      int delay = timeout.tv_sec * 1000 + timeout.tv_usec/1000;
+
+      Thread::Current()->PostDelayed(RTC_FROM_HERE, delay, this, MSG_TIMEOUT,
+                                     0);
+    }
+    break;
+
+  case SSL_ERROR_WANT_WRITE:
+    break;
+
+  case SSL_ERROR_ZERO_RETURN:
+  default:
+    RTC_LOG(LS_WARNING) << "ContinueSSL -- error " << code;
+    return (code != 0) ? code : -1;
+  }
+
+  return 0;
+}
+
+void OpenSSLAdapter::Error(const char* context, int err, bool signal) {
+  RTC_LOG(LS_WARNING) << "OpenSSLAdapter::Error(" << context << ", " << err
+                      << ")";
+  state_ = SSL_ERROR;
+  SetError(err);
+  if (signal)
+    AsyncSocketAdapter::OnCloseEvent(this, err);
+}
+
+void OpenSSLAdapter::Cleanup() {
+  RTC_LOG(LS_INFO) << "OpenSSLAdapter::Cleanup";
+
+  state_ = SSL_NONE;
+  ssl_read_needs_write_ = false;
+  ssl_write_needs_read_ = false;
+  custom_verification_succeeded_ = false;
+  pending_data_.Clear();
+
+  if (ssl_) {
+    SSL_free(ssl_);
+    ssl_ = nullptr;
+  }
+
+  if (ssl_ctx_) {
+    SSL_CTX_free(ssl_ctx_);
+    ssl_ctx_ = nullptr;
+  }
+  identity_.reset();
+
+  // Clear the DTLS timer
+  Thread::Current()->Clear(this, MSG_TIMEOUT);
+}
+
+int OpenSSLAdapter::DoSslWrite(const void* pv, size_t cb, int* error) {
+  // If we have pending data (that was previously only partially written by
+  // SSL_write), we shouldn't be attempting to write anything else.
+  RTC_DCHECK(pending_data_.empty() || pv == pending_data_.data());
+  RTC_DCHECK(error != nullptr);
+
+  ssl_write_needs_read_ = false;
+  int ret = SSL_write(ssl_, pv, checked_cast<int>(cb));
+  *error = SSL_get_error(ssl_, ret);
+  switch (*error) {
+    case SSL_ERROR_NONE:
+      // Success!
+      return ret;
+    case SSL_ERROR_WANT_READ:
+      RTC_LOG(LS_INFO) << " -- error want read";
+      ssl_write_needs_read_ = true;
+      SetError(EWOULDBLOCK);
+      break;
+    case SSL_ERROR_WANT_WRITE:
+      RTC_LOG(LS_INFO) << " -- error want write";
+      SetError(EWOULDBLOCK);
+      break;
+    case SSL_ERROR_ZERO_RETURN:
+      SetError(EWOULDBLOCK);
+      // do we need to signal closure?
+      break;
+    case SSL_ERROR_SSL:
+      LogSslError();
+      Error("SSL_write", ret ? ret : -1, false);
+      break;
+    default:
+      Error("SSL_write", ret ? ret : -1, false);
+      break;
+  }
+
+  return SOCKET_ERROR;
+}
+
+//
+// AsyncSocket Implementation
+//
+
+int OpenSSLAdapter::Send(const void* pv, size_t cb) {
+  switch (state_) {
+  case SSL_NONE:
+    return AsyncSocketAdapter::Send(pv, cb);
+
+  case SSL_WAIT:
+  case SSL_CONNECTING:
+    SetError(ENOTCONN);
+    return SOCKET_ERROR;
+
+  case SSL_CONNECTED:
+    break;
+
+  case SSL_ERROR:
+  default:
+    return SOCKET_ERROR;
+  }
+
+  int ret;
+  int error;
+
+  if (!pending_data_.empty()) {
+    ret = DoSslWrite(pending_data_.data(), pending_data_.size(), &error);
+    if (ret != static_cast<int>(pending_data_.size())) {
+      // We couldn't finish sending the pending data, so we definitely can't
+      // send any more data. Return with an EWOULDBLOCK error.
+      SetError(EWOULDBLOCK);
+      return SOCKET_ERROR;
+    }
+    // We completed sending the data previously passed into SSL_write! Now
+    // we're allowed to send more data.
+    pending_data_.Clear();
+  }
+
+  // OpenSSL will return an error if we try to write zero bytes
+  if (cb == 0)
+    return 0;
+
+  ret = DoSslWrite(pv, cb, &error);
+
+  // If SSL_write fails with SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE, this
+  // means the underlying socket is blocked on reading or (more typically)
+  // writing. When this happens, OpenSSL requires that the next call to
+  // SSL_write uses the same arguments (though, with
+  // SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, the actual buffer pointer may be
+  // different).
+  //
+  // However, after Send exits, we will have lost access to data the user of
+  // this class is trying to send, and there's no guarantee that the user of
+  // this class will call Send with the same arguements when it fails. So, we
+  // buffer the data ourselves. When we know the underlying socket is writable
+  // again from OnWriteEvent (or if Send is called again before that happens),
+  // we'll retry sending this buffered data.
+  if (error == SSL_ERROR_WANT_READ || error == SSL_ERROR_WANT_WRITE) {
+    // Shouldn't be able to get to this point if we already have pending data.
+    RTC_DCHECK(pending_data_.empty());
+    RTC_LOG(LS_WARNING)
+        << "SSL_write couldn't write to the underlying socket; buffering data.";
+    pending_data_.SetData(static_cast<const uint8_t*>(pv), cb);
+    // Since we're taking responsibility for sending this data, return its full
+    // size. The user of this class can consider it sent.
+    return cb;
+  }
+
+  return ret;
+}
+
+int OpenSSLAdapter::SendTo(const void* pv,
+                           size_t cb,
+                           const SocketAddress& addr) {
+  if (socket_->GetState() == Socket::CS_CONNECTED &&
+      addr == socket_->GetRemoteAddress()) {
+    return Send(pv, cb);
+  }
+
+  SetError(ENOTCONN);
+
+  return SOCKET_ERROR;
+}
+
+int OpenSSLAdapter::Recv(void* pv, size_t cb, int64_t* timestamp) {
+  switch (state_) {
+
+  case SSL_NONE:
+    return AsyncSocketAdapter::Recv(pv, cb, timestamp);
+
+  case SSL_WAIT:
+  case SSL_CONNECTING:
+    SetError(ENOTCONN);
+    return SOCKET_ERROR;
+
+  case SSL_CONNECTED:
+    break;
+
+  case SSL_ERROR:
+  default:
+    return SOCKET_ERROR;
+  }
+
+  // Don't trust OpenSSL with zero byte reads
+  if (cb == 0)
+    return 0;
+
+  ssl_read_needs_write_ = false;
+
+  int code = SSL_read(ssl_, pv, checked_cast<int>(cb));
+  int error = SSL_get_error(ssl_, code);
+  switch (error) {
+    case SSL_ERROR_NONE:
+      return code;
+    case SSL_ERROR_WANT_READ:
+      SetError(EWOULDBLOCK);
+      break;
+    case SSL_ERROR_WANT_WRITE:
+      ssl_read_needs_write_ = true;
+      SetError(EWOULDBLOCK);
+      break;
+    case SSL_ERROR_ZERO_RETURN:
+      SetError(EWOULDBLOCK);
+      // do we need to signal closure?
+      break;
+    case SSL_ERROR_SSL:
+      LogSslError();
+      Error("SSL_read", (code ? code : -1), false);
+      break;
+    default:
+      Error("SSL_read", (code ? code : -1), false);
+      break;
+  }
+
+  return SOCKET_ERROR;
+}
+
+int OpenSSLAdapter::RecvFrom(void* pv,
+                             size_t cb,
+                             SocketAddress* paddr,
+                             int64_t* timestamp) {
+  if (socket_->GetState() == Socket::CS_CONNECTED) {
+    int ret = Recv(pv, cb, timestamp);
+
+    *paddr = GetRemoteAddress();
+
+    return ret;
+  }
+
+  SetError(ENOTCONN);
+
+  return SOCKET_ERROR;
+}
+
+int OpenSSLAdapter::Close() {
+  Cleanup();
+  state_ = restartable_ ? SSL_WAIT : SSL_NONE;
+  return AsyncSocketAdapter::Close();
+}
+
+Socket::ConnState OpenSSLAdapter::GetState() const {
+  //if (signal_close_)
+  //  return CS_CONNECTED;
+  ConnState state = socket_->GetState();
+  if ((state == CS_CONNECTED)
+      && ((state_ == SSL_WAIT) || (state_ == SSL_CONNECTING)))
+    state = CS_CONNECTING;
+  return state;
+}
+
+bool OpenSSLAdapter::IsResumedSession() {
+  return (ssl_ && SSL_session_reused(ssl_) == 1);
+}
+
+void OpenSSLAdapter::OnMessage(Message* msg) {
+  if (MSG_TIMEOUT == msg->message_id) {
+    RTC_LOG(LS_INFO) << "DTLS timeout expired";
+    DTLSv1_handle_timeout(ssl_);
+    ContinueSSL();
+  }
+}
+
+void OpenSSLAdapter::OnConnectEvent(AsyncSocket* socket) {
+  RTC_LOG(LS_INFO) << "OpenSSLAdapter::OnConnectEvent";
+  if (state_ != SSL_WAIT) {
+    RTC_DCHECK(state_ == SSL_NONE);
+    AsyncSocketAdapter::OnConnectEvent(socket);
+    return;
+  }
+
+  state_ = SSL_CONNECTING;
+  if (int err = BeginSSL()) {
+    AsyncSocketAdapter::OnCloseEvent(socket, err);
+  }
+}
+
+void OpenSSLAdapter::OnReadEvent(AsyncSocket* socket) {
+  if (state_ == SSL_NONE) {
+    AsyncSocketAdapter::OnReadEvent(socket);
+    return;
+  }
+
+  if (state_ == SSL_CONNECTING) {
+    if (int err = ContinueSSL()) {
+      Error("ContinueSSL", err);
+    }
+    return;
+  }
+
+  if (state_ != SSL_CONNECTED)
+    return;
+
+  // Don't let ourselves go away during the callbacks
+  //PRefPtr<OpenSSLAdapter> lock(this); // TODO: fix this
+  if (ssl_write_needs_read_)  {
+    AsyncSocketAdapter::OnWriteEvent(socket);
+  }
+
+  AsyncSocketAdapter::OnReadEvent(socket);
+}
+
+void OpenSSLAdapter::OnWriteEvent(AsyncSocket* socket) {
+  if (state_ == SSL_NONE) {
+    AsyncSocketAdapter::OnWriteEvent(socket);
+    return;
+  }
+
+  if (state_ == SSL_CONNECTING) {
+    if (int err = ContinueSSL()) {
+      Error("ContinueSSL", err);
+    }
+    return;
+  }
+
+  if (state_ != SSL_CONNECTED)
+    return;
+
+  // Don't let ourselves go away during the callbacks
+  //PRefPtr<OpenSSLAdapter> lock(this); // TODO: fix this
+
+  if (ssl_read_needs_write_)  {
+    AsyncSocketAdapter::OnReadEvent(socket);
+  }
+
+  // If a previous SSL_write failed due to the underlying socket being blocked,
+  // this will attempt finishing the write operation.
+  if (!pending_data_.empty()) {
+    int error;
+    if (DoSslWrite(pending_data_.data(), pending_data_.size(), &error) ==
+        static_cast<int>(pending_data_.size())) {
+      pending_data_.Clear();
+    }
+  }
+
+  AsyncSocketAdapter::OnWriteEvent(socket);
+}
+
+void OpenSSLAdapter::OnCloseEvent(AsyncSocket* socket, int err) {
+  RTC_LOG(LS_INFO) << "OpenSSLAdapter::OnCloseEvent(" << err << ")";
+  AsyncSocketAdapter::OnCloseEvent(socket, err);
+}
+
+bool OpenSSLAdapter::VerifyServerName(SSL* ssl, const char* host,
+                                      bool ignore_bad_cert) {
+  if (!host)
+    return false;
+
+  // Checking the return from SSL_get_peer_certificate here is not strictly
+  // necessary.  With our setup, it is not possible for it to return
+  // null.  However, it is good form to check the return.
+  X509* certificate = SSL_get_peer_certificate(ssl);
+  if (!certificate)
+    return false;
+
+  // Logging certificates is extremely verbose. So it is disabled by default.
+#ifdef LOG_CERTIFICATES
+  {
+    RTC_DLOG(LS_INFO) << "Certificate from server:";
+    BIO* mem = BIO_new(BIO_s_mem());
+    X509_print_ex(mem, certificate, XN_FLAG_SEP_CPLUS_SPC, X509_FLAG_NO_HEADER);
+    BIO_write(mem, "\0", 1);
+    char* buffer;
+    BIO_get_mem_data(mem, &buffer);
+    RTC_DLOG(LS_INFO) << buffer;
+    BIO_free(mem);
+
+    char* cipher_description =
+        SSL_CIPHER_description(SSL_get_current_cipher(ssl), nullptr, 128);
+    RTC_DLOG(LS_INFO) << "Cipher: " << cipher_description;
+    OPENSSL_free(cipher_description);
+  }
+#endif
+
+  bool ok = false;
+  GENERAL_NAMES* names = reinterpret_cast<GENERAL_NAMES*>(
+      X509_get_ext_d2i(certificate, NID_subject_alt_name, nullptr, nullptr));
+  if (names) {
+    for (size_t i = 0; i < static_cast<size_t>(sk_GENERAL_NAME_num(names));
+         i++) {
+      const GENERAL_NAME* name = sk_GENERAL_NAME_value(names, i);
+      if (name->type != GEN_DNS)
+        continue;
+      std::string value(
+          reinterpret_cast<const char*>(ASN1_STRING_data(name->d.dNSName)),
+          ASN1_STRING_length(name->d.dNSName));
+      // string_match takes NUL-terminated strings, so check for embedded NULs.
+      if (value.find('\0') != std::string::npos)
+        continue;
+      if (string_match(host, value.c_str())) {
+        ok = true;
+        break;
+      }
+    }
+    GENERAL_NAMES_free(names);
+  }
+
+  char data[256];
+  X509_NAME* subject;
+  if (!ok && ((subject = X509_get_subject_name(certificate)) != nullptr) &&
+      (X509_NAME_get_text_by_NID(subject, NID_commonName, data, sizeof(data)) >
+       0)) {
+    data[sizeof(data)-1] = 0;
+    if (_stricmp(data, host) == 0)
+      ok = true;
+  }
+
+  X509_free(certificate);
+
+  // This should only ever be turned on for debugging and development.
+  if (!ok && ignore_bad_cert) {
+    RTC_DLOG(LS_WARNING) << "TLS certificate check FAILED.  "
+                         << "Allowing connection anyway.";
+    ok = true;
+  }
+
+  return ok;
+}
+
+bool OpenSSLAdapter::SSLPostConnectionCheck(SSL* ssl, const char* host) {
+  bool ok = VerifyServerName(ssl, host, ignore_bad_cert_);
+
+  if (ok) {
+    ok = (SSL_get_verify_result(ssl) == X509_V_OK ||
+          custom_verification_succeeded_);
+  }
+
+  if (!ok && ignore_bad_cert_) {
+    RTC_DLOG(LS_INFO) << "Other TLS post connection checks failed.";
+    ok = true;
+  }
+
+  return ok;
+}
+
+#if !defined(NDEBUG)
+
+// We only use this for tracing and so it is only needed in debug mode
+
+void OpenSSLAdapter::SSLInfoCallback(const SSL* s, int where, int ret) {
+  const char* str = "undefined";
+  int w = where & ~SSL_ST_MASK;
+  if (w & SSL_ST_CONNECT) {
+    str = "SSL_connect";
+  } else if (w & SSL_ST_ACCEPT) {
+    str = "SSL_accept";
+  }
+  if (where & SSL_CB_LOOP) {
+    RTC_DLOG(LS_INFO) << str << ":" << SSL_state_string_long(s);
+  } else if (where & SSL_CB_ALERT) {
+    str = (where & SSL_CB_READ) ? "read" : "write";
+    RTC_DLOG(LS_INFO) << "SSL3 alert " << str << ":"
+                      << SSL_alert_type_string_long(ret) << ":"
+                      << SSL_alert_desc_string_long(ret);
+  } else if (where & SSL_CB_EXIT) {
+    if (ret == 0) {
+      RTC_DLOG(LS_INFO) << str << ":failed in " << SSL_state_string_long(s);
+    } else if (ret < 0) {
+      RTC_DLOG(LS_INFO) << str << ":error in " << SSL_state_string_long(s);
+    }
+  }
+}
+
+#endif
+
+int OpenSSLAdapter::SSLVerifyCallback(int ok, X509_STORE_CTX* store) {
+#if !defined(NDEBUG)
+  if (!ok) {
+    char data[256];
+    X509* cert = X509_STORE_CTX_get_current_cert(store);
+    int depth = X509_STORE_CTX_get_error_depth(store);
+    int err = X509_STORE_CTX_get_error(store);
+
+    RTC_DLOG(LS_INFO) << "Error with certificate at depth: " << depth;
+    X509_NAME_oneline(X509_get_issuer_name(cert), data, sizeof(data));
+    RTC_DLOG(LS_INFO) << "  issuer  = " << data;
+    X509_NAME_oneline(X509_get_subject_name(cert), data, sizeof(data));
+    RTC_DLOG(LS_INFO) << "  subject = " << data;
+    RTC_DLOG(LS_INFO) << "  err     = " << err << ":"
+                      << X509_verify_cert_error_string(err);
+  }
+#endif
+
+  // Get our stream pointer from the store
+  SSL* ssl = reinterpret_cast<SSL*>(
+                X509_STORE_CTX_get_ex_data(store,
+                  SSL_get_ex_data_X509_STORE_CTX_idx()));
+
+  OpenSSLAdapter* stream =
+    reinterpret_cast<OpenSSLAdapter*>(SSL_get_app_data(ssl));
+
+  if (!ok && custom_verify_callback_) {
+    void* cert =
+        reinterpret_cast<void*>(X509_STORE_CTX_get_current_cert(store));
+    if (custom_verify_callback_(cert)) {
+      stream->custom_verification_succeeded_ = true;
+      RTC_LOG(LS_INFO) << "validated certificate using custom callback";
+      ok = true;
+    }
+  }
+
+  // Should only be used for debugging and development.
+  if (!ok && stream->ignore_bad_cert_) {
+    RTC_DLOG(LS_WARNING) << "Ignoring cert error while verifying cert chain";
+    ok = 1;
+  }
+
+  return ok;
+}
+
+int OpenSSLAdapter::NewSSLSessionCallback(SSL* ssl, SSL_SESSION* session) {
+  OpenSSLAdapter* stream =
+      reinterpret_cast<OpenSSLAdapter*>(SSL_get_app_data(ssl));
+  RTC_DCHECK(stream->factory_);
+  RTC_LOG(LS_INFO) << "Caching SSL session for " << stream->ssl_host_name_;
+  stream->factory_->AddSession(stream->ssl_host_name_, session);
+  return 1;  // We've taken ownership of the session; OpenSSL shouldn't free it.
+}
+
+bool OpenSSLAdapter::ConfigureTrustedRootCertificates(SSL_CTX* ctx) {
+  // Add the root cert that we care about to the SSL context
+  int count_of_added_certs = 0;
+  for (size_t i = 0; i < arraysize(kSSLCertCertificateList); i++) {
+    const unsigned char* cert_buffer = kSSLCertCertificateList[i];
+    size_t cert_buffer_len = kSSLCertCertificateSizeList[i];
+    X509* cert =
+        d2i_X509(nullptr, &cert_buffer, checked_cast<long>(cert_buffer_len));
+    if (cert) {
+      int return_value = X509_STORE_add_cert(SSL_CTX_get_cert_store(ctx), cert);
+      if (return_value == 0) {
+        RTC_LOG(LS_WARNING) << "Unable to add certificate.";
+      } else {
+        count_of_added_certs++;
+      }
+      X509_free(cert);
+    }
+  }
+  return count_of_added_certs > 0;
+}
+
+SSL_CTX* OpenSSLAdapter::CreateContext(SSLMode mode, bool enable_cache) {
+  // Use (D)TLS 1.2.
+  // Note: BoringSSL supports a range of versions by setting max/min version
+  // (Default V1.0 to V1.2). However (D)TLSv1_2_client_method functions used
+  // below in OpenSSL only support V1.2.
+  SSL_CTX* ctx = nullptr;
+#ifdef OPENSSL_IS_BORINGSSL
+  ctx = SSL_CTX_new(mode == SSL_MODE_DTLS ? DTLS_method() : TLS_method());
+#else
+  ctx = SSL_CTX_new(mode == SSL_MODE_DTLS ? DTLSv1_2_client_method()
+                                          : TLSv1_2_client_method());
+#endif  // OPENSSL_IS_BORINGSSL
+  if (ctx == nullptr) {
+    unsigned long error = ERR_get_error();  // NOLINT: type used by OpenSSL.
+    RTC_LOG(LS_WARNING) << "SSL_CTX creation failed: " << '"'
+                        << ERR_reason_error_string(error) << "\" "
+                        << "(error=" << error << ')';
+    return nullptr;
+  }
+  if (!ConfigureTrustedRootCertificates(ctx)) {
+    SSL_CTX_free(ctx);
+    return nullptr;
+  }
+
+#if !defined(NDEBUG)
+  SSL_CTX_set_info_callback(ctx, SSLInfoCallback);
+#endif
+
+  SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER, SSLVerifyCallback);
+  SSL_CTX_set_verify_depth(ctx, 4);
+  // Use defaults, but disable HMAC-SHA256 and HMAC-SHA384 ciphers
+  // (note that SHA256 and SHA384 only select legacy CBC ciphers).
+  // Additionally disable HMAC-SHA1 ciphers in ECDSA. These are the remaining
+  // CBC-mode ECDSA ciphers.
+  SSL_CTX_set_cipher_list(
+      ctx, "ALL:!SHA256:!SHA384:!aPSK:!ECDSA+SHA1:!ADH:!LOW:!EXP:!MD5");
+
+  if (mode == SSL_MODE_DTLS) {
+    SSL_CTX_set_read_ahead(ctx, 1);
+  }
+
+  if (enable_cache) {
+    SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_CLIENT);
+    SSL_CTX_sess_set_new_cb(ctx, &OpenSSLAdapter::NewSSLSessionCallback);
+  }
+
+  return ctx;
+}
+
+std::string TransformAlpnProtocols(
+    const std::vector<std::string>& alpn_protocols) {
+  // Transforms the alpn_protocols list to the format expected by
+  // Open/BoringSSL. This requires joining the protocols into a single string
+  // and prepending a character with the size of the protocol string before
+  // each protocol.
+  std::string transformed_alpn;
+  for (const std::string& proto : alpn_protocols) {
+    if (proto.size() == 0 || proto.size() > 0xFF) {
+      RTC_LOG(LS_ERROR) << "OpenSSLAdapter::Error("
+                        << "TransformAlpnProtocols received proto with size "
+                        << proto.size() << ")";
+      return "";
+    }
+    transformed_alpn += static_cast<char>(proto.size());
+    transformed_alpn += proto;
+    RTC_LOG(LS_VERBOSE) << "TransformAlpnProtocols: Adding proto: " << proto;
+  }
+  return transformed_alpn;
+}
+
+//////////////////////////////////////////////////////////////////////
+// OpenSSLAdapterFactory
+//////////////////////////////////////////////////////////////////////
+
+OpenSSLAdapterFactory::OpenSSLAdapterFactory()
+    : ssl_mode_(SSL_MODE_TLS), ssl_ctx_(nullptr) {}
+
+OpenSSLAdapterFactory::~OpenSSLAdapterFactory() {
+  for (auto it : sessions_) {
+    SSL_SESSION_free(it.second);
+  }
+  SSL_CTX_free(ssl_ctx_);
+}
+
+void OpenSSLAdapterFactory::SetMode(SSLMode mode) {
+  RTC_DCHECK(!ssl_ctx_);
+  ssl_mode_ = mode;
+}
+
+OpenSSLAdapter* OpenSSLAdapterFactory::CreateAdapter(AsyncSocket* socket) {
+  if (!ssl_ctx_) {
+    bool enable_cache = true;
+    ssl_ctx_ = OpenSSLAdapter::CreateContext(ssl_mode_, enable_cache);
+    if (!ssl_ctx_) {
+      return nullptr;
+    }
+  }
+
+  return new OpenSSLAdapter(socket, this);
+}
+
+SSL_SESSION* OpenSSLAdapterFactory::LookupSession(const std::string& hostname) {
+  auto it = sessions_.find(hostname);
+  return (it != sessions_.end()) ? it->second : nullptr;
+}
+
+void OpenSSLAdapterFactory::AddSession(const std::string& hostname,
+                                       SSL_SESSION* new_session) {
+  SSL_SESSION* old_session = LookupSession(hostname);
+  SSL_SESSION_free(old_session);
+  sessions_[hostname] = new_session;
+}
+
+} // namespace rtc
diff --git a/rtc_base/openssladapter.h b/rtc_base/openssladapter.h
new file mode 100644
index 0000000..2d0474e
--- /dev/null
+++ b/rtc_base/openssladapter.h
@@ -0,0 +1,178 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_OPENSSLADAPTER_H_
+#define RTC_BASE_OPENSSLADAPTER_H_
+
+#include <map>
+#include <string>
+#include "rtc_base/buffer.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/opensslidentity.h"
+#include "rtc_base/ssladapter.h"
+
+typedef struct ssl_st SSL;
+typedef struct ssl_ctx_st SSL_CTX;
+typedef struct x509_store_ctx_st X509_STORE_CTX;
+typedef struct ssl_session_st SSL_SESSION;
+
+namespace rtc {
+
+class OpenSSLAdapterFactory;
+
+class OpenSSLAdapter : public SSLAdapter, public MessageHandler {
+ public:
+  static bool InitializeSSL(VerificationCallback callback);
+  static bool CleanupSSL();
+
+  explicit OpenSSLAdapter(AsyncSocket* socket,
+                          OpenSSLAdapterFactory* factory = nullptr);
+  ~OpenSSLAdapter() override;
+
+  void SetIgnoreBadCert(bool ignore) override;
+  void SetAlpnProtocols(const std::vector<std::string>& protos) override;
+  void SetEllipticCurves(const std::vector<std::string>& curves) override;
+
+  void SetMode(SSLMode mode) override;
+  void SetIdentity(SSLIdentity* identity) override;
+  void SetRole(SSLRole role) override;
+  AsyncSocket* Accept(SocketAddress* paddr) override;
+  int StartSSL(const char* hostname, bool restartable) override;
+  int Send(const void* pv, size_t cb) override;
+  int SendTo(const void* pv, size_t cb, const SocketAddress& addr) override;
+  int Recv(void* pv, size_t cb, int64_t* timestamp) override;
+  int RecvFrom(void* pv,
+               size_t cb,
+               SocketAddress* paddr,
+               int64_t* timestamp) override;
+  int Close() override;
+
+  // Note that the socket returns ST_CONNECTING while SSL is being negotiated.
+  ConnState GetState() const override;
+  bool IsResumedSession() override;
+
+  // Creates a new SSL_CTX object, configured for client-to-server usage
+  // with SSLMode |mode|, and if |enable_cache| is true, with support for
+  // storing successful sessions so that they can be later resumed.
+  // OpenSSLAdapterFactory will call this method to create its own internal
+  // SSL_CTX, and OpenSSLAdapter will also call this when used without a
+  // factory.
+  static SSL_CTX* CreateContext(SSLMode mode, bool enable_cache);
+
+ protected:
+  void OnConnectEvent(AsyncSocket* socket) override;
+  void OnReadEvent(AsyncSocket* socket) override;
+  void OnWriteEvent(AsyncSocket* socket) override;
+  void OnCloseEvent(AsyncSocket* socket, int err) override;
+
+ private:
+  enum SSLState {
+    SSL_NONE, SSL_WAIT, SSL_CONNECTING, SSL_CONNECTED, SSL_ERROR
+  };
+
+  enum { MSG_TIMEOUT };
+
+  int BeginSSL();
+  int ContinueSSL();
+  void Error(const char* context, int err, bool signal = true);
+  void Cleanup();
+
+  // Return value and arguments have the same meanings as for Send; |error| is
+  // an output parameter filled with the result of SSL_get_error.
+  int DoSslWrite(const void* pv, size_t cb, int* error);
+
+  void OnMessage(Message* msg) override;
+
+  static bool VerifyServerName(SSL* ssl, const char* host,
+                               bool ignore_bad_cert);
+  bool SSLPostConnectionCheck(SSL* ssl, const char* host);
+#if !defined(NDEBUG)
+  // In debug builds, logs info about the state of the SSL connection.
+  static void SSLInfoCallback(const SSL* ssl, int where, int ret);
+#endif
+  static int SSLVerifyCallback(int ok, X509_STORE_CTX* store);
+  static VerificationCallback custom_verify_callback_;
+  friend class OpenSSLStreamAdapter;  // for custom_verify_callback_;
+
+  // If the SSL_CTX was created with |enable_cache| set to true, this callback
+  // will be called when a SSL session has been successfully established,
+  // to allow its SSL_SESSION* to be cached for later resumption.
+  static int NewSSLSessionCallback(SSL* ssl, SSL_SESSION* session);
+
+  static bool ConfigureTrustedRootCertificates(SSL_CTX* ctx);
+
+  // Parent object that maintains shared state.
+  // Can be null if state sharing is not needed.
+  OpenSSLAdapterFactory* factory_;
+
+  SSLState state_;
+  std::unique_ptr<OpenSSLIdentity> identity_;
+  SSLRole role_;
+  bool ssl_read_needs_write_;
+  bool ssl_write_needs_read_;
+  // If true, socket will retain SSL configuration after Close.
+  // TODO(juberti): Remove this unused flag.
+  bool restartable_;
+
+  // This buffer is used if SSL_write fails with SSL_ERROR_WANT_WRITE, which
+  // means we need to keep retrying with *the same exact data* until it
+  // succeeds. Afterwards it will be cleared.
+  Buffer pending_data_;
+
+  SSL* ssl_;
+  SSL_CTX* ssl_ctx_;
+  std::string ssl_host_name_;
+  // Do DTLS or not
+  SSLMode ssl_mode_;
+  // If true, the server certificate need not match the configured hostname.
+  bool ignore_bad_cert_;
+  // List of protocols to be used in the TLS ALPN extension.
+  std::vector<std::string> alpn_protocols_;
+  // List of elliptic curves to be used in the TLS elliptic curves extension.
+  std::vector<std::string> elliptic_curves_;
+
+  bool custom_verification_succeeded_;
+};
+
+std::string TransformAlpnProtocols(const std::vector<std::string>& protos);
+
+/////////////////////////////////////////////////////////////////////////////
+class OpenSSLAdapterFactory : public SSLAdapterFactory {
+ public:
+  OpenSSLAdapterFactory();
+  ~OpenSSLAdapterFactory() override;
+
+  void SetMode(SSLMode mode) override;
+  OpenSSLAdapter* CreateAdapter(AsyncSocket* socket) override;
+
+  static OpenSSLAdapterFactory* Create();
+
+ private:
+  SSL_CTX* ssl_ctx() { return ssl_ctx_; }
+  // Looks up a session by hostname. The returned SSL_SESSION is not up_refed.
+  SSL_SESSION* LookupSession(const std::string& hostname);
+  // Adds a session to the cache, and up_refs it. Any existing session with the
+  // same hostname is replaced.
+  void AddSession(const std::string& hostname, SSL_SESSION* session);
+  friend class OpenSSLAdapter;
+
+  SSLMode ssl_mode_;
+  // Holds the shared SSL_CTX for all created adapters.
+  SSL_CTX* ssl_ctx_;
+  // Map of hostnames to SSL_SESSIONs; holds references to the SSL_SESSIONs,
+  // which are cleaned up when the factory is destroyed.
+  // TODO(juberti): Add LRU eviction to keep the cache from growing forever.
+  std::map<std::string, SSL_SESSION*> sessions_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_OPENSSLADAPTER_H_
diff --git a/rtc_base/openssladapter_unittest.cc b/rtc_base/openssladapter_unittest.cc
new file mode 100644
index 0000000..d043353
--- /dev/null
+++ b/rtc_base/openssladapter_unittest.cc
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/openssladapter.h"
+
+namespace rtc {
+
+TEST(OpenSSLAdapterTest, TestTransformAlpnProtocols) {
+  EXPECT_EQ("", TransformAlpnProtocols(std::vector<std::string>()));
+
+  // Protocols larger than 255 characters (whose size can't be fit in a byte),
+  // can't be converted, and an empty string will be returned.
+  std::string large_protocol(256, 'a');
+  EXPECT_EQ("",
+            TransformAlpnProtocols(std::vector<std::string>{large_protocol}));
+
+  // One protocol test.
+  std::vector<std::string> alpn_protos{"h2"};
+  std::stringstream expected_response;
+  expected_response << static_cast<char>(2) << "h2";
+  EXPECT_EQ(expected_response.str(), TransformAlpnProtocols(alpn_protos));
+
+  // Standard protocols test (h2,http/1.1).
+  alpn_protos.push_back("http/1.1");
+  expected_response << static_cast<char>(8) << "http/1.1";
+  EXPECT_EQ(expected_response.str(), TransformAlpnProtocols(alpn_protos));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/openssldigest.cc b/rtc_base/openssldigest.cc
new file mode 100644
index 0000000..32cd4af
--- /dev/null
+++ b/rtc_base/openssldigest.cc
@@ -0,0 +1,119 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/openssldigest.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/openssl.h"
+
+namespace rtc {
+
+OpenSSLDigest::OpenSSLDigest(const std::string& algorithm) {
+  ctx_ = EVP_MD_CTX_new();
+  RTC_CHECK(ctx_ != nullptr);
+  EVP_MD_CTX_init(ctx_);
+  if (GetDigestEVP(algorithm, &md_)) {
+    EVP_DigestInit_ex(ctx_, md_, nullptr);
+  } else {
+    md_ = nullptr;
+  }
+}
+
+OpenSSLDigest::~OpenSSLDigest() {
+  EVP_MD_CTX_destroy(ctx_);
+}
+
+size_t OpenSSLDigest::Size() const {
+  if (!md_) {
+    return 0;
+  }
+  return EVP_MD_size(md_);
+}
+
+void OpenSSLDigest::Update(const void* buf, size_t len) {
+  if (!md_) {
+    return;
+  }
+  EVP_DigestUpdate(ctx_, buf, len);
+}
+
+size_t OpenSSLDigest::Finish(void* buf, size_t len) {
+  if (!md_ || len < Size()) {
+    return 0;
+  }
+  unsigned int md_len;
+  EVP_DigestFinal_ex(ctx_, static_cast<unsigned char*>(buf), &md_len);
+  EVP_DigestInit_ex(ctx_, md_, nullptr);  // prepare for future Update()s
+  RTC_DCHECK(md_len == Size());
+  return md_len;
+}
+
+bool OpenSSLDigest::GetDigestEVP(const std::string& algorithm,
+                                 const EVP_MD** mdp) {
+  const EVP_MD* md;
+  if (algorithm == DIGEST_MD5) {
+    md = EVP_md5();
+  } else if (algorithm == DIGEST_SHA_1) {
+    md = EVP_sha1();
+  } else if (algorithm == DIGEST_SHA_224) {
+    md = EVP_sha224();
+  } else if (algorithm == DIGEST_SHA_256) {
+    md = EVP_sha256();
+  } else if (algorithm == DIGEST_SHA_384) {
+    md = EVP_sha384();
+  } else if (algorithm == DIGEST_SHA_512) {
+    md = EVP_sha512();
+  } else {
+    return false;
+  }
+
+  // Can't happen
+  RTC_DCHECK(EVP_MD_size(md) >= 16);
+  *mdp = md;
+  return true;
+}
+
+bool OpenSSLDigest::GetDigestName(const EVP_MD* md,
+                                  std::string* algorithm) {
+  RTC_DCHECK(md != nullptr);
+  RTC_DCHECK(algorithm != nullptr);
+
+  int md_type = EVP_MD_type(md);
+  if (md_type == NID_md5) {
+    *algorithm = DIGEST_MD5;
+  } else if (md_type == NID_sha1) {
+    *algorithm = DIGEST_SHA_1;
+  } else if (md_type == NID_sha224) {
+    *algorithm = DIGEST_SHA_224;
+  } else if (md_type == NID_sha256) {
+    *algorithm = DIGEST_SHA_256;
+  } else if (md_type == NID_sha384) {
+    *algorithm = DIGEST_SHA_384;
+  } else if (md_type == NID_sha512) {
+    *algorithm = DIGEST_SHA_512;
+  } else {
+    algorithm->clear();
+    return false;
+  }
+
+  return true;
+}
+
+bool OpenSSLDigest::GetDigestSize(const std::string& algorithm,
+                                  size_t* length) {
+  const EVP_MD *md;
+  if (!GetDigestEVP(algorithm, &md))
+    return false;
+
+  *length = EVP_MD_size(md);
+  return true;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/openssldigest.h b/rtc_base/openssldigest.h
new file mode 100644
index 0000000..2b65867
--- /dev/null
+++ b/rtc_base/openssldigest.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_OPENSSLDIGEST_H_
+#define RTC_BASE_OPENSSLDIGEST_H_
+
+#include <openssl/evp.h>
+
+#include "rtc_base/messagedigest.h"
+
+namespace rtc {
+
+// An implementation of the digest class that uses OpenSSL.
+class OpenSSLDigest : public MessageDigest {
+ public:
+  // Creates an OpenSSLDigest with |algorithm| as the hash algorithm.
+  explicit OpenSSLDigest(const std::string& algorithm);
+  ~OpenSSLDigest() override;
+  // Returns the digest output size (e.g. 16 bytes for MD5).
+  size_t Size() const override;
+  // Updates the digest with |len| bytes from |buf|.
+  void Update(const void* buf, size_t len) override;
+  // Outputs the digest value to |buf| with length |len|.
+  size_t Finish(void* buf, size_t len) override;
+
+  // Helper function to look up a digest's EVP by name.
+  static bool GetDigestEVP(const std::string &algorithm,
+                           const EVP_MD** md);
+  // Helper function to look up a digest's name by EVP.
+  static bool GetDigestName(const EVP_MD* md,
+                            std::string* algorithm);
+  // Helper function to get the length of a digest.
+  static bool GetDigestSize(const std::string &algorithm,
+                            size_t* len);
+
+ private:
+  EVP_MD_CTX* ctx_ = nullptr;
+  const EVP_MD* md_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_OPENSSLDIGEST_H_
diff --git a/rtc_base/opensslidentity.cc b/rtc_base/opensslidentity.cc
new file mode 100644
index 0000000..9f7c63b
--- /dev/null
+++ b/rtc_base/opensslidentity.cc
@@ -0,0 +1,635 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/opensslidentity.h"
+
+#include <memory>
+
+#if defined(WEBRTC_WIN)
+// Must be included first before openssl headers.
+#include "rtc_base/win32.h"  // NOLINT
+#endif  // WEBRTC_WIN
+
+#include <openssl/bio.h>
+#include <openssl/bn.h>
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+#include <openssl/pem.h>
+#include <openssl/rsa.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/helpers.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/openssl.h"
+#include "rtc_base/openssldigest.h"
+#include "rtc_base/ptr_util.h"
+
+namespace rtc {
+
+// We could have exposed a myriad of parameters for the crypto stuff,
+// but keeping it simple seems best.
+
+// Random bits for certificate serial number
+static const int SERIAL_RAND_BITS = 64;
+
+// Generate a key pair. Caller is responsible for freeing the returned object.
+static EVP_PKEY* MakeKey(const KeyParams& key_params) {
+  RTC_LOG(LS_INFO) << "Making key pair";
+  EVP_PKEY* pkey = EVP_PKEY_new();
+  if (key_params.type() == KT_RSA) {
+    int key_length = key_params.rsa_params().mod_size;
+    BIGNUM* exponent = BN_new();
+    RSA* rsa = RSA_new();
+    if (!pkey || !exponent || !rsa ||
+        !BN_set_word(exponent, key_params.rsa_params().pub_exp) ||
+        !RSA_generate_key_ex(rsa, key_length, exponent, nullptr) ||
+        !EVP_PKEY_assign_RSA(pkey, rsa)) {
+      EVP_PKEY_free(pkey);
+      BN_free(exponent);
+      RSA_free(rsa);
+      RTC_LOG(LS_ERROR) << "Failed to make RSA key pair";
+      return nullptr;
+    }
+    // ownership of rsa struct was assigned, don't free it.
+    BN_free(exponent);
+  } else if (key_params.type() == KT_ECDSA) {
+    if (key_params.ec_curve() == EC_NIST_P256) {
+      EC_KEY* ec_key = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+
+      // Ensure curve name is included when EC key is serialized.
+      // Without this call, OpenSSL versions before 1.1.0 will create
+      // certificates that don't work for TLS.
+      // This is a no-op for BoringSSL and OpenSSL 1.1.0+
+      EC_KEY_set_asn1_flag(ec_key, OPENSSL_EC_NAMED_CURVE);
+
+      if (!pkey || !ec_key || !EC_KEY_generate_key(ec_key) ||
+          !EVP_PKEY_assign_EC_KEY(pkey, ec_key)) {
+        EVP_PKEY_free(pkey);
+        EC_KEY_free(ec_key);
+        RTC_LOG(LS_ERROR) << "Failed to make EC key pair";
+        return nullptr;
+      }
+      // ownership of ec_key struct was assigned, don't free it.
+    } else {
+      // Add generation of any other curves here.
+      EVP_PKEY_free(pkey);
+      RTC_LOG(LS_ERROR) << "ECDSA key requested for unknown curve";
+      return nullptr;
+    }
+  } else {
+    EVP_PKEY_free(pkey);
+    RTC_LOG(LS_ERROR) << "Key type requested not understood";
+    return nullptr;
+  }
+
+  RTC_LOG(LS_INFO) << "Returning key pair";
+  return pkey;
+}
+
+// Generate a self-signed certificate, with the public key from the
+// given key pair. Caller is responsible for freeing the returned object.
+static X509* MakeCertificate(EVP_PKEY* pkey, const SSLIdentityParams& params) {
+  RTC_LOG(LS_INFO) << "Making certificate for " << params.common_name;
+  X509* x509 = nullptr;
+  BIGNUM* serial_number = nullptr;
+  X509_NAME* name = nullptr;
+  time_t epoch_off = 0;  // Time offset since epoch.
+
+  if ((x509 = X509_new()) == nullptr)
+    goto error;
+
+  if (!X509_set_pubkey(x509, pkey))
+    goto error;
+
+  // serial number
+  // temporary reference to serial number inside x509 struct
+  ASN1_INTEGER* asn1_serial_number;
+  if ((serial_number = BN_new()) == nullptr ||
+      !BN_pseudo_rand(serial_number, SERIAL_RAND_BITS, 0, 0) ||
+      (asn1_serial_number = X509_get_serialNumber(x509)) == nullptr ||
+      !BN_to_ASN1_INTEGER(serial_number, asn1_serial_number))
+    goto error;
+
+  if (!X509_set_version(x509, 2L))  // version 3
+    goto error;
+
+  // There are a lot of possible components for the name entries. In
+  // our P2P SSL mode however, the certificates are pre-exchanged
+  // (through the secure XMPP channel), and so the certificate
+  // identification is arbitrary. It can't be empty, so we set some
+  // arbitrary common_name. Note that this certificate goes out in
+  // clear during SSL negotiation, so there may be a privacy issue in
+  // putting anything recognizable here.
+  if ((name = X509_NAME_new()) == nullptr ||
+      !X509_NAME_add_entry_by_NID(name, NID_commonName, MBSTRING_UTF8,
+                                  (unsigned char*)params.common_name.c_str(),
+                                  -1, -1, 0) ||
+      !X509_set_subject_name(x509, name) || !X509_set_issuer_name(x509, name))
+    goto error;
+
+  if (!X509_time_adj(X509_get_notBefore(x509), params.not_before, &epoch_off) ||
+      !X509_time_adj(X509_get_notAfter(x509), params.not_after, &epoch_off))
+    goto error;
+
+  if (!X509_sign(x509, pkey, EVP_sha256()))
+    goto error;
+
+  BN_free(serial_number);
+  X509_NAME_free(name);
+  RTC_LOG(LS_INFO) << "Returning certificate";
+  return x509;
+
+error:
+  BN_free(serial_number);
+  X509_NAME_free(name);
+  X509_free(x509);
+  return nullptr;
+}
+
+// This dumps the SSL error stack to the log.
+static void LogSSLErrors(const std::string& prefix) {
+  char error_buf[200];
+  unsigned long err;
+
+  while ((err = ERR_get_error()) != 0) {
+    ERR_error_string_n(err, error_buf, sizeof(error_buf));
+    RTC_LOG(LS_ERROR) << prefix << ": " << error_buf << "\n";
+  }
+}
+
+OpenSSLKeyPair* OpenSSLKeyPair::Generate(const KeyParams& key_params) {
+  EVP_PKEY* pkey = MakeKey(key_params);
+  if (!pkey) {
+    LogSSLErrors("Generating key pair");
+    return nullptr;
+  }
+  return new OpenSSLKeyPair(pkey);
+}
+
+OpenSSLKeyPair* OpenSSLKeyPair::FromPrivateKeyPEMString(
+    const std::string& pem_string) {
+  BIO* bio = BIO_new_mem_buf(const_cast<char*>(pem_string.c_str()), -1);
+  if (!bio) {
+    RTC_LOG(LS_ERROR) << "Failed to create a new BIO buffer.";
+    return nullptr;
+  }
+  BIO_set_mem_eof_return(bio, 0);
+  EVP_PKEY* pkey =
+      PEM_read_bio_PrivateKey(bio, nullptr, nullptr, const_cast<char*>("\0"));
+  BIO_free(bio);  // Frees the BIO, but not the pointed-to string.
+  if (!pkey) {
+    RTC_LOG(LS_ERROR) << "Failed to create the private key from PEM string.";
+    return nullptr;
+  }
+  if (EVP_PKEY_missing_parameters(pkey) != 0) {
+    RTC_LOG(LS_ERROR)
+        << "The resulting key pair is missing public key parameters.";
+    EVP_PKEY_free(pkey);
+    return nullptr;
+  }
+  return new OpenSSLKeyPair(pkey);
+}
+
+OpenSSLKeyPair::~OpenSSLKeyPair() {
+  EVP_PKEY_free(pkey_);
+}
+
+OpenSSLKeyPair* OpenSSLKeyPair::GetReference() {
+  AddReference();
+  return new OpenSSLKeyPair(pkey_);
+}
+
+void OpenSSLKeyPair::AddReference() {
+  EVP_PKEY_up_ref(pkey_);
+}
+
+std::string OpenSSLKeyPair::PrivateKeyToPEMString() const {
+  BIO* temp_memory_bio = BIO_new(BIO_s_mem());
+  if (!temp_memory_bio) {
+    RTC_LOG_F(LS_ERROR) << "Failed to allocate temporary memory bio";
+    RTC_NOTREACHED();
+    return "";
+  }
+  if (!PEM_write_bio_PrivateKey(temp_memory_bio, pkey_, nullptr, nullptr, 0,
+                                nullptr, nullptr)) {
+    RTC_LOG_F(LS_ERROR) << "Failed to write private key";
+    BIO_free(temp_memory_bio);
+    RTC_NOTREACHED();
+    return "";
+  }
+  BIO_write(temp_memory_bio, "\0", 1);
+  char* buffer;
+  BIO_get_mem_data(temp_memory_bio, &buffer);
+  std::string priv_key_str = buffer;
+  BIO_free(temp_memory_bio);
+  return priv_key_str;
+}
+
+std::string OpenSSLKeyPair::PublicKeyToPEMString() const {
+  BIO* temp_memory_bio = BIO_new(BIO_s_mem());
+  if (!temp_memory_bio) {
+    RTC_LOG_F(LS_ERROR) << "Failed to allocate temporary memory bio";
+    RTC_NOTREACHED();
+    return "";
+  }
+  if (!PEM_write_bio_PUBKEY(temp_memory_bio, pkey_)) {
+    RTC_LOG_F(LS_ERROR) << "Failed to write public key";
+    BIO_free(temp_memory_bio);
+    RTC_NOTREACHED();
+    return "";
+  }
+  BIO_write(temp_memory_bio, "\0", 1);
+  char* buffer;
+  BIO_get_mem_data(temp_memory_bio, &buffer);
+  std::string pub_key_str = buffer;
+  BIO_free(temp_memory_bio);
+  return pub_key_str;
+}
+
+bool OpenSSLKeyPair::operator==(const OpenSSLKeyPair& other) const {
+  return EVP_PKEY_cmp(this->pkey_, other.pkey_) == 1;
+}
+
+bool OpenSSLKeyPair::operator!=(const OpenSSLKeyPair& other) const {
+  return !(*this == other);
+}
+
+#if !defined(NDEBUG)
+// Print a certificate to the log, for debugging.
+static void PrintCert(X509* x509) {
+  BIO* temp_memory_bio = BIO_new(BIO_s_mem());
+  if (!temp_memory_bio) {
+    RTC_DLOG_F(LS_ERROR) << "Failed to allocate temporary memory bio";
+    return;
+  }
+  X509_print_ex(temp_memory_bio, x509, XN_FLAG_SEP_CPLUS_SPC, 0);
+  BIO_write(temp_memory_bio, "\0", 1);
+  char* buffer;
+  BIO_get_mem_data(temp_memory_bio, &buffer);
+  RTC_DLOG(LS_VERBOSE) << buffer;
+  BIO_free(temp_memory_bio);
+}
+#endif
+
+OpenSSLCertificate::OpenSSLCertificate(X509* x509) : x509_(x509) {
+  AddReference();
+}
+
+OpenSSLCertificate* OpenSSLCertificate::Generate(
+    OpenSSLKeyPair* key_pair,
+    const SSLIdentityParams& params) {
+  SSLIdentityParams actual_params(params);
+  if (actual_params.common_name.empty()) {
+    // Use a random string, arbitrarily 8chars long.
+    actual_params.common_name = CreateRandomString(8);
+  }
+  X509* x509 = MakeCertificate(key_pair->pkey(), actual_params);
+  if (!x509) {
+    LogSSLErrors("Generating certificate");
+    return nullptr;
+  }
+#if !defined(NDEBUG)
+  PrintCert(x509);
+#endif
+  OpenSSLCertificate* ret = new OpenSSLCertificate(x509);
+  X509_free(x509);
+  return ret;
+}
+
+OpenSSLCertificate* OpenSSLCertificate::FromPEMString(
+    const std::string& pem_string) {
+  BIO* bio = BIO_new_mem_buf(const_cast<char*>(pem_string.c_str()), -1);
+  if (!bio)
+    return nullptr;
+  BIO_set_mem_eof_return(bio, 0);
+  X509* x509 =
+      PEM_read_bio_X509(bio, nullptr, nullptr, const_cast<char*>("\0"));
+  BIO_free(bio);  // Frees the BIO, but not the pointed-to string.
+
+  if (!x509)
+    return nullptr;
+
+  OpenSSLCertificate* ret = new OpenSSLCertificate(x509);
+  X509_free(x509);
+  return ret;
+}
+
+// NOTE: This implementation only functions correctly after InitializeSSL
+// and before CleanupSSL.
+bool OpenSSLCertificate::GetSignatureDigestAlgorithm(
+    std::string* algorithm) const {
+  int nid = X509_get_signature_nid(x509_);
+  switch (nid) {
+    case NID_md5WithRSA:
+    case NID_md5WithRSAEncryption:
+      *algorithm = DIGEST_MD5;
+      break;
+    case NID_ecdsa_with_SHA1:
+    case NID_dsaWithSHA1:
+    case NID_dsaWithSHA1_2:
+    case NID_sha1WithRSA:
+    case NID_sha1WithRSAEncryption:
+      *algorithm = DIGEST_SHA_1;
+      break;
+    case NID_ecdsa_with_SHA224:
+    case NID_sha224WithRSAEncryption:
+    case NID_dsa_with_SHA224:
+      *algorithm = DIGEST_SHA_224;
+      break;
+    case NID_ecdsa_with_SHA256:
+    case NID_sha256WithRSAEncryption:
+    case NID_dsa_with_SHA256:
+      *algorithm = DIGEST_SHA_256;
+      break;
+    case NID_ecdsa_with_SHA384:
+    case NID_sha384WithRSAEncryption:
+      *algorithm = DIGEST_SHA_384;
+      break;
+    case NID_ecdsa_with_SHA512:
+    case NID_sha512WithRSAEncryption:
+      *algorithm = DIGEST_SHA_512;
+      break;
+    default:
+      // Unknown algorithm.  There are several unhandled options that are less
+      // common and more complex.
+      RTC_LOG(LS_ERROR) << "Unknown signature algorithm NID: " << nid;
+      algorithm->clear();
+      return false;
+  }
+  return true;
+}
+
+bool OpenSSLCertificate::ComputeDigest(const std::string& algorithm,
+                                       unsigned char* digest,
+                                       size_t size,
+                                       size_t* length) const {
+  return ComputeDigest(x509_, algorithm, digest, size, length);
+}
+
+bool OpenSSLCertificate::ComputeDigest(const X509* x509,
+                                       const std::string& algorithm,
+                                       unsigned char* digest,
+                                       size_t size,
+                                       size_t* length) {
+  const EVP_MD* md;
+  unsigned int n;
+
+  if (!OpenSSLDigest::GetDigestEVP(algorithm, &md))
+    return false;
+
+  if (size < static_cast<size_t>(EVP_MD_size(md)))
+    return false;
+
+  X509_digest(x509, md, digest, &n);
+
+  *length = n;
+
+  return true;
+}
+
+OpenSSLCertificate::~OpenSSLCertificate() {
+  X509_free(x509_);
+}
+
+OpenSSLCertificate* OpenSSLCertificate::GetReference() const {
+  return new OpenSSLCertificate(x509_);
+}
+
+std::string OpenSSLCertificate::ToPEMString() const {
+  BIO* bio = BIO_new(BIO_s_mem());
+  if (!bio) {
+    FATAL() << "unreachable code";
+  }
+  if (!PEM_write_bio_X509(bio, x509_)) {
+    BIO_free(bio);
+    FATAL() << "unreachable code";
+  }
+  BIO_write(bio, "\0", 1);
+  char* buffer;
+  BIO_get_mem_data(bio, &buffer);
+  std::string ret(buffer);
+  BIO_free(bio);
+  return ret;
+}
+
+void OpenSSLCertificate::ToDER(Buffer* der_buffer) const {
+  // In case of failure, make sure to leave the buffer empty.
+  der_buffer->SetSize(0);
+
+  // Calculates the DER representation of the certificate, from scratch.
+  BIO* bio = BIO_new(BIO_s_mem());
+  if (!bio) {
+    FATAL() << "unreachable code";
+  }
+  if (!i2d_X509_bio(bio, x509_)) {
+    BIO_free(bio);
+    FATAL() << "unreachable code";
+  }
+  char* data;
+  size_t length = BIO_get_mem_data(bio, &data);
+  der_buffer->SetData(data, length);
+  BIO_free(bio);
+}
+
+void OpenSSLCertificate::AddReference() const {
+  RTC_DCHECK(x509_ != nullptr);
+  X509_up_ref(x509_);
+}
+
+bool OpenSSLCertificate::operator==(const OpenSSLCertificate& other) const {
+  return X509_cmp(x509_, other.x509_) == 0;
+}
+
+bool OpenSSLCertificate::operator!=(const OpenSSLCertificate& other) const {
+  return !(*this == other);
+}
+
+// Documented in sslidentity.h.
+int64_t OpenSSLCertificate::CertificateExpirationTime() const {
+  ASN1_TIME* expire_time = X509_get_notAfter(x509_);
+  bool long_format;
+
+  if (expire_time->type == V_ASN1_UTCTIME) {
+    long_format = false;
+  } else if (expire_time->type == V_ASN1_GENERALIZEDTIME) {
+    long_format = true;
+  } else {
+    return -1;
+  }
+
+  return ASN1TimeToSec(expire_time->data, expire_time->length, long_format);
+}
+
+OpenSSLIdentity::OpenSSLIdentity(
+    std::unique_ptr<OpenSSLKeyPair> key_pair,
+    std::unique_ptr<OpenSSLCertificate> certificate)
+    : key_pair_(std::move(key_pair)) {
+  RTC_DCHECK(key_pair_ != nullptr);
+  RTC_DCHECK(certificate != nullptr);
+  std::vector<std::unique_ptr<SSLCertificate>> certs;
+  certs.push_back(std::move(certificate));
+  cert_chain_.reset(new SSLCertChain(std::move(certs)));
+}
+
+OpenSSLIdentity::OpenSSLIdentity(std::unique_ptr<OpenSSLKeyPair> key_pair,
+                                 std::unique_ptr<SSLCertChain> cert_chain)
+    : key_pair_(std::move(key_pair)), cert_chain_(std::move(cert_chain)) {
+  RTC_DCHECK(key_pair_ != nullptr);
+  RTC_DCHECK(cert_chain_ != nullptr);
+}
+
+OpenSSLIdentity::~OpenSSLIdentity() = default;
+
+OpenSSLIdentity* OpenSSLIdentity::GenerateInternal(
+    const SSLIdentityParams& params) {
+  std::unique_ptr<OpenSSLKeyPair> key_pair(
+      OpenSSLKeyPair::Generate(params.key_params));
+  if (key_pair) {
+    std::unique_ptr<OpenSSLCertificate> certificate(
+        OpenSSLCertificate::Generate(key_pair.get(), params));
+    if (certificate != nullptr)
+      return new OpenSSLIdentity(std::move(key_pair), std::move(certificate));
+  }
+  RTC_LOG(LS_INFO) << "Identity generation failed";
+  return nullptr;
+}
+
+OpenSSLIdentity* OpenSSLIdentity::GenerateWithExpiration(
+    const std::string& common_name,
+    const KeyParams& key_params,
+    time_t certificate_lifetime) {
+  SSLIdentityParams params;
+  params.key_params = key_params;
+  params.common_name = common_name;
+  time_t now = time(nullptr);
+  params.not_before = now + kCertificateWindowInSeconds;
+  params.not_after = now + certificate_lifetime;
+  if (params.not_before > params.not_after)
+    return nullptr;
+  return GenerateInternal(params);
+}
+
+OpenSSLIdentity* OpenSSLIdentity::GenerateForTest(
+    const SSLIdentityParams& params) {
+  return GenerateInternal(params);
+}
+
+SSLIdentity* OpenSSLIdentity::FromPEMStrings(const std::string& private_key,
+                                             const std::string& certificate) {
+  std::unique_ptr<OpenSSLCertificate> cert(
+      OpenSSLCertificate::FromPEMString(certificate));
+  if (!cert) {
+    RTC_LOG(LS_ERROR) << "Failed to create OpenSSLCertificate from PEM string.";
+    return nullptr;
+  }
+
+  std::unique_ptr<OpenSSLKeyPair> key_pair(
+      OpenSSLKeyPair::FromPrivateKeyPEMString(private_key));
+  if (!key_pair) {
+    RTC_LOG(LS_ERROR) << "Failed to create key pair from PEM string.";
+    return nullptr;
+  }
+
+  return new OpenSSLIdentity(std::move(key_pair), std::move(cert));
+}
+
+SSLIdentity* OpenSSLIdentity::FromPEMChainStrings(
+    const std::string& private_key,
+    const std::string& certificate_chain) {
+  BIO* bio =
+      BIO_new_mem_buf(certificate_chain.data(), certificate_chain.size());
+  if (!bio)
+    return nullptr;
+  BIO_set_mem_eof_return(bio, 0);
+  std::vector<std::unique_ptr<SSLCertificate>> certs;
+  while (true) {
+    X509* x509 =
+        PEM_read_bio_X509(bio, nullptr, nullptr, const_cast<char*>("\0"));
+    if (x509 == nullptr) {
+      uint32_t err = ERR_peek_error();
+      if (ERR_GET_LIB(err) == ERR_LIB_PEM &&
+          ERR_GET_REASON(err) == PEM_R_NO_START_LINE) {
+        break;
+      }
+      RTC_LOG(LS_ERROR) << "Failed to parse certificate from PEM string.";
+      BIO_free(bio);
+      return nullptr;
+    }
+    certs.emplace_back(new OpenSSLCertificate(x509));
+    X509_free(x509);
+  }
+  BIO_free(bio);
+  if (certs.empty()) {
+    RTC_LOG(LS_ERROR) << "Found no certificates in PEM string.";
+    return nullptr;
+  }
+
+  std::unique_ptr<OpenSSLKeyPair> key_pair(
+      OpenSSLKeyPair::FromPrivateKeyPEMString(private_key));
+  if (!key_pair) {
+    RTC_LOG(LS_ERROR) << "Failed to create key pair from PEM string.";
+    return nullptr;
+  }
+
+  return new OpenSSLIdentity(std::move(key_pair),
+                             MakeUnique<SSLCertChain>(std::move(certs)));
+}
+
+const OpenSSLCertificate& OpenSSLIdentity::certificate() const {
+  return *static_cast<const OpenSSLCertificate*>(&cert_chain_->Get(0));
+}
+
+const SSLCertChain& OpenSSLIdentity::cert_chain() const {
+  return *cert_chain_.get();
+}
+
+OpenSSLIdentity* OpenSSLIdentity::GetReference() const {
+  return new OpenSSLIdentity(WrapUnique(key_pair_->GetReference()),
+                             WrapUnique(cert_chain_->Copy()));
+}
+
+bool OpenSSLIdentity::ConfigureIdentity(SSL_CTX* ctx) {
+  // 1 is the documented success return code.
+  const OpenSSLCertificate* cert = &certificate();
+  if (SSL_CTX_use_certificate(ctx, cert->x509()) != 1 ||
+      SSL_CTX_use_PrivateKey(ctx, key_pair_->pkey()) != 1) {
+    LogSSLErrors("Configuring key and certificate");
+    return false;
+  }
+  // If a chain is available, use it.
+  for (size_t i = 1; i < cert_chain_->GetSize(); ++i) {
+    cert = static_cast<const OpenSSLCertificate*>(&cert_chain_->Get(i));
+    if (SSL_CTX_add1_chain_cert(ctx, cert->x509()) != 1) {
+      LogSSLErrors("Configuring intermediate certificate");
+      return false;
+    }
+  }
+
+  return true;
+}
+
+std::string OpenSSLIdentity::PrivateKeyToPEMString() const {
+  return key_pair_->PrivateKeyToPEMString();
+}
+
+std::string OpenSSLIdentity::PublicKeyToPEMString() const {
+  return key_pair_->PublicKeyToPEMString();
+}
+
+bool OpenSSLIdentity::operator==(const OpenSSLIdentity& other) const {
+  return *this->key_pair_ == *other.key_pair_ &&
+         this->certificate() == other.certificate();
+}
+
+bool OpenSSLIdentity::operator!=(const OpenSSLIdentity& other) const {
+  return !(*this == other);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/opensslidentity.h b/rtc_base/opensslidentity.h
new file mode 100644
index 0000000..c1dc49f
--- /dev/null
+++ b/rtc_base/opensslidentity.h
@@ -0,0 +1,147 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_OPENSSLIDENTITY_H_
+#define RTC_BASE_OPENSSLIDENTITY_H_
+
+#include <openssl/evp.h>
+#include <openssl/x509.h>
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/sslidentity.h"
+
+typedef struct ssl_ctx_st SSL_CTX;
+
+namespace rtc {
+
+// OpenSSLKeyPair encapsulates an OpenSSL EVP_PKEY* keypair object,
+// which is reference counted inside the OpenSSL library.
+class OpenSSLKeyPair {
+ public:
+  explicit OpenSSLKeyPair(EVP_PKEY* pkey) : pkey_(pkey) {
+    RTC_DCHECK(pkey_ != nullptr);
+  }
+
+  static OpenSSLKeyPair* Generate(const KeyParams& key_params);
+  // Constructs a key pair from the private key PEM string. This must not result
+  // in missing public key parameters. Returns null on error.
+  static OpenSSLKeyPair* FromPrivateKeyPEMString(const std::string& pem_string);
+
+  virtual ~OpenSSLKeyPair();
+
+  virtual OpenSSLKeyPair* GetReference();
+
+  EVP_PKEY* pkey() const { return pkey_; }
+  std::string PrivateKeyToPEMString() const;
+  std::string PublicKeyToPEMString() const;
+  bool operator==(const OpenSSLKeyPair& other) const;
+  bool operator!=(const OpenSSLKeyPair& other) const;
+
+ private:
+  void AddReference();
+
+  EVP_PKEY* pkey_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(OpenSSLKeyPair);
+};
+
+// OpenSSLCertificate encapsulates an OpenSSL X509* certificate object,
+// which is also reference counted inside the OpenSSL library.
+class OpenSSLCertificate : public SSLCertificate {
+ public:
+  // Caller retains ownership of the X509 object.
+  explicit OpenSSLCertificate(X509* x509);
+
+  static OpenSSLCertificate* Generate(OpenSSLKeyPair* key_pair,
+                                      const SSLIdentityParams& params);
+  static OpenSSLCertificate* FromPEMString(const std::string& pem_string);
+
+  ~OpenSSLCertificate() override;
+
+  OpenSSLCertificate* GetReference() const override;
+
+  X509* x509() const { return x509_; }
+
+  std::string ToPEMString() const override;
+  void ToDER(Buffer* der_buffer) const override;
+  bool operator==(const OpenSSLCertificate& other) const;
+  bool operator!=(const OpenSSLCertificate& other) const;
+
+  // Compute the digest of the certificate given algorithm
+  bool ComputeDigest(const std::string& algorithm,
+                     unsigned char* digest,
+                     size_t size,
+                     size_t* length) const override;
+
+  // Compute the digest of a certificate as an X509 *
+  static bool ComputeDigest(const X509* x509,
+                            const std::string& algorithm,
+                            unsigned char* digest,
+                            size_t size,
+                            size_t* length);
+
+  bool GetSignatureDigestAlgorithm(std::string* algorithm) const override;
+
+  int64_t CertificateExpirationTime() const override;
+
+ private:
+  void AddReference() const;
+
+  X509* x509_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(OpenSSLCertificate);
+};
+
+// Holds a keypair and certificate together, and a method to generate
+// them consistently.
+class OpenSSLIdentity : public SSLIdentity {
+ public:
+  static OpenSSLIdentity* GenerateWithExpiration(const std::string& common_name,
+                                                 const KeyParams& key_params,
+                                                 time_t certificate_lifetime);
+  static OpenSSLIdentity* GenerateForTest(const SSLIdentityParams& params);
+  static SSLIdentity* FromPEMStrings(const std::string& private_key,
+                                     const std::string& certificate);
+  static SSLIdentity* FromPEMChainStrings(const std::string& private_key,
+                                          const std::string& certificate_chain);
+  ~OpenSSLIdentity() override;
+
+  const OpenSSLCertificate& certificate() const override;
+  const SSLCertChain& cert_chain() const override;
+  OpenSSLIdentity* GetReference() const override;
+
+  // Configure an SSL context object to use our key and certificate.
+  bool ConfigureIdentity(SSL_CTX* ctx);
+
+  std::string PrivateKeyToPEMString() const override;
+  std::string PublicKeyToPEMString() const override;
+  bool operator==(const OpenSSLIdentity& other) const;
+  bool operator!=(const OpenSSLIdentity& other) const;
+
+ private:
+  OpenSSLIdentity(std::unique_ptr<OpenSSLKeyPair> key_pair,
+                  std::unique_ptr<OpenSSLCertificate> certificate);
+  OpenSSLIdentity(std::unique_ptr<OpenSSLKeyPair> key_pair,
+                  std::unique_ptr<SSLCertChain> cert_chain);
+
+  static OpenSSLIdentity* GenerateInternal(const SSLIdentityParams& params);
+
+  std::unique_ptr<OpenSSLKeyPair> key_pair_;
+  std::unique_ptr<SSLCertChain> cert_chain_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(OpenSSLIdentity);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_OPENSSLIDENTITY_H_
diff --git a/rtc_base/opensslstreamadapter.cc b/rtc_base/opensslstreamadapter.cc
new file mode 100644
index 0000000..c0fb108
--- /dev/null
+++ b/rtc_base/opensslstreamadapter.cc
@@ -0,0 +1,1215 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/opensslstreamadapter.h"
+
+#include <openssl/bio.h>
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+#include <openssl/rand.h>
+#include <openssl/tls1.h>
+#include <openssl/x509v3.h>
+#ifndef OPENSSL_IS_BORINGSSL
+#include <openssl/dtls1.h>
+#include <openssl/ssl.h>
+#endif
+
+#include <memory>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/openssl.h"
+#include "rtc_base/openssladapter.h"
+#include "rtc_base/openssldigest.h"
+#include "rtc_base/opensslidentity.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+namespace {
+  bool g_use_time_callback_for_testing = false;
+}
+
+namespace rtc {
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+#error "webrtc requires at least OpenSSL version 1.1.0, to support DTLS-SRTP"
+#endif
+
+// SRTP cipher suite table. |internal_name| is used to construct a
+// colon-separated profile strings which is needed by
+// SSL_CTX_set_tlsext_use_srtp().
+struct SrtpCipherMapEntry {
+  const char* internal_name;
+  const int id;
+};
+
+// This isn't elegant, but it's better than an external reference
+static SrtpCipherMapEntry SrtpCipherMap[] = {
+    {"SRTP_AES128_CM_SHA1_80", SRTP_AES128_CM_SHA1_80},
+    {"SRTP_AES128_CM_SHA1_32", SRTP_AES128_CM_SHA1_32},
+    {"SRTP_AEAD_AES_128_GCM", SRTP_AEAD_AES_128_GCM},
+    {"SRTP_AEAD_AES_256_GCM", SRTP_AEAD_AES_256_GCM},
+    {nullptr, 0}};
+
+#ifdef OPENSSL_IS_BORINGSSL
+// Not used in production code. Actual time should be relative to Jan 1, 1970.
+static void TimeCallbackForTesting(const SSL* ssl, struct timeval* out_clock) {
+  int64_t time = TimeNanos();
+  out_clock->tv_sec = time / kNumNanosecsPerSec;
+  out_clock->tv_usec = (time % kNumNanosecsPerSec) / kNumNanosecsPerMicrosec;
+}
+#else  // #ifdef OPENSSL_IS_BORINGSSL
+
+// Cipher name table. Maps internal OpenSSL cipher ids to the RFC name.
+struct SslCipherMapEntry {
+  uint32_t openssl_id;
+  const char* rfc_name;
+};
+
+#define DEFINE_CIPHER_ENTRY_SSL3(name)  {SSL3_CK_##name, "TLS_"#name}
+#define DEFINE_CIPHER_ENTRY_TLS1(name)  {TLS1_CK_##name, "TLS_"#name}
+
+// The "SSL_CIPHER_standard_name" function is only available in OpenSSL when
+// compiled with tracing, so we need to define the mapping manually here.
+static const SslCipherMapEntry kSslCipherMap[] = {
+    // TLS v1.0 ciphersuites from RFC2246.
+    DEFINE_CIPHER_ENTRY_SSL3(RSA_RC4_128_SHA),
+    {SSL3_CK_RSA_DES_192_CBC3_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA"},
+
+    // AES ciphersuites from RFC3268.
+    {TLS1_CK_RSA_WITH_AES_128_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA"},
+    {TLS1_CK_DHE_RSA_WITH_AES_128_SHA, "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"},
+    {TLS1_CK_RSA_WITH_AES_256_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA"},
+    {TLS1_CK_DHE_RSA_WITH_AES_256_SHA, "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"},
+
+    // ECC ciphersuites from RFC4492.
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_ECDSA_WITH_RC4_128_SHA),
+    {TLS1_CK_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA,
+     "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA"},
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_ECDSA_WITH_AES_128_CBC_SHA),
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_ECDSA_WITH_AES_256_CBC_SHA),
+
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_RSA_WITH_RC4_128_SHA),
+    {TLS1_CK_ECDHE_RSA_WITH_DES_192_CBC3_SHA,
+     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"},
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_RSA_WITH_AES_128_CBC_SHA),
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_RSA_WITH_AES_256_CBC_SHA),
+
+    // TLS v1.2 ciphersuites.
+    {TLS1_CK_RSA_WITH_AES_128_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256"},
+    {TLS1_CK_RSA_WITH_AES_256_SHA256, "TLS_RSA_WITH_AES_256_CBC_SHA256"},
+    {TLS1_CK_DHE_RSA_WITH_AES_128_SHA256,
+     "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256"},
+    {TLS1_CK_DHE_RSA_WITH_AES_256_SHA256,
+     "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256"},
+
+    // TLS v1.2 GCM ciphersuites from RFC5288.
+    DEFINE_CIPHER_ENTRY_TLS1(RSA_WITH_AES_128_GCM_SHA256),
+    DEFINE_CIPHER_ENTRY_TLS1(RSA_WITH_AES_256_GCM_SHA384),
+    DEFINE_CIPHER_ENTRY_TLS1(DHE_RSA_WITH_AES_128_GCM_SHA256),
+    DEFINE_CIPHER_ENTRY_TLS1(DHE_RSA_WITH_AES_256_GCM_SHA384),
+    DEFINE_CIPHER_ENTRY_TLS1(DH_RSA_WITH_AES_128_GCM_SHA256),
+    DEFINE_CIPHER_ENTRY_TLS1(DH_RSA_WITH_AES_256_GCM_SHA384),
+
+    // ECDH HMAC based ciphersuites from RFC5289.
+    {TLS1_CK_ECDHE_ECDSA_WITH_AES_128_SHA256,
+     "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"},
+    {TLS1_CK_ECDHE_ECDSA_WITH_AES_256_SHA384,
+     "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"},
+    {TLS1_CK_ECDHE_RSA_WITH_AES_128_SHA256,
+     "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"},
+    {TLS1_CK_ECDHE_RSA_WITH_AES_256_SHA384,
+     "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"},
+
+    // ECDH GCM based ciphersuites from RFC5289.
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_ECDSA_WITH_AES_128_GCM_SHA256),
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_ECDSA_WITH_AES_256_GCM_SHA384),
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_RSA_WITH_AES_128_GCM_SHA256),
+    DEFINE_CIPHER_ENTRY_TLS1(ECDHE_RSA_WITH_AES_256_GCM_SHA384),
+
+    {0, nullptr}};
+#endif  // #ifndef OPENSSL_IS_BORINGSSL
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4309)
+#pragma warning(disable : 4310)
+#endif  // defined(_MSC_VER)
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif  // defined(_MSC_VER)
+
+//////////////////////////////////////////////////////////////////////
+// StreamBIO
+//////////////////////////////////////////////////////////////////////
+
+static int stream_write(BIO* h, const char* buf, int num);
+static int stream_read(BIO* h, char* buf, int size);
+static int stream_puts(BIO* h, const char* str);
+static long stream_ctrl(BIO* h, int cmd, long arg1, void* arg2);
+static int stream_new(BIO* h);
+static int stream_free(BIO* data);
+
+static BIO_METHOD* BIO_stream_method() {
+  static BIO_METHOD* method = [] {
+    BIO_METHOD* method = BIO_meth_new(BIO_TYPE_BIO, "stream");
+    BIO_meth_set_write(method, stream_write);
+    BIO_meth_set_read(method, stream_read);
+    BIO_meth_set_puts(method, stream_puts);
+    BIO_meth_set_ctrl(method, stream_ctrl);
+    BIO_meth_set_create(method, stream_new);
+    BIO_meth_set_destroy(method, stream_free);
+    return method;
+  }();
+  return method;
+}
+
+static BIO* BIO_new_stream(StreamInterface* stream) {
+  BIO* ret = BIO_new(BIO_stream_method());
+  if (ret == nullptr)
+    return nullptr;
+  BIO_set_data(ret, stream);
+  return ret;
+}
+
+// bio methods return 1 (or at least non-zero) on success and 0 on failure.
+
+static int stream_new(BIO* b) {
+  BIO_set_shutdown(b, 0);
+  BIO_set_init(b, 1);
+  BIO_set_data(b, 0);
+  return 1;
+}
+
+static int stream_free(BIO* b) {
+  if (b == nullptr)
+    return 0;
+  return 1;
+}
+
+static int stream_read(BIO* b, char* out, int outl) {
+  if (!out)
+    return -1;
+  StreamInterface* stream = static_cast<StreamInterface*>(BIO_get_data(b));
+  BIO_clear_retry_flags(b);
+  size_t read;
+  int error;
+  StreamResult result = stream->Read(out, outl, &read, &error);
+  if (result == SR_SUCCESS) {
+    return checked_cast<int>(read);
+  } else if (result == SR_BLOCK) {
+    BIO_set_retry_read(b);
+  }
+  return -1;
+}
+
+static int stream_write(BIO* b, const char* in, int inl) {
+  if (!in)
+    return -1;
+  StreamInterface* stream = static_cast<StreamInterface*>(BIO_get_data(b));
+  BIO_clear_retry_flags(b);
+  size_t written;
+  int error;
+  StreamResult result = stream->Write(in, inl, &written, &error);
+  if (result == SR_SUCCESS) {
+    return checked_cast<int>(written);
+  } else if (result == SR_BLOCK) {
+    BIO_set_retry_write(b);
+  }
+  return -1;
+}
+
+static int stream_puts(BIO* b, const char* str) {
+  return stream_write(b, str, checked_cast<int>(strlen(str)));
+}
+
+static long stream_ctrl(BIO* b, int cmd, long num, void* ptr) {
+  switch (cmd) {
+    case BIO_CTRL_RESET:
+      return 0;
+    case BIO_CTRL_EOF: {
+      StreamInterface* stream = static_cast<StreamInterface*>(ptr);
+      // 1 means end-of-stream.
+      return (stream->GetState() == SS_CLOSED) ? 1 : 0;
+    }
+    case BIO_CTRL_WPENDING:
+    case BIO_CTRL_PENDING:
+      return 0;
+    case BIO_CTRL_FLUSH:
+      return 1;
+    case BIO_CTRL_DGRAM_QUERY_MTU:
+      // openssl defaults to mtu=256 unless we return something here.
+      // The handshake doesn't actually need to send packets above 1k,
+      // so this seems like a sensible value that should work in most cases.
+      // Webrtc uses the same value for video packets.
+      return 1200;
+    default:
+      return 0;
+  }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// OpenSSLStreamAdapter
+/////////////////////////////////////////////////////////////////////////////
+
+OpenSSLStreamAdapter::OpenSSLStreamAdapter(StreamInterface* stream)
+    : SSLStreamAdapter(stream),
+      state_(SSL_NONE),
+      role_(SSL_CLIENT),
+      ssl_read_needs_write_(false),
+      ssl_write_needs_read_(false),
+      ssl_(nullptr),
+      ssl_ctx_(nullptr),
+      ssl_mode_(SSL_MODE_TLS),
+      ssl_max_version_(SSL_PROTOCOL_TLS_12) {}
+
+OpenSSLStreamAdapter::~OpenSSLStreamAdapter() {
+  Cleanup(0);
+}
+
+void OpenSSLStreamAdapter::SetIdentity(SSLIdentity* identity) {
+  RTC_DCHECK(!identity_);
+  identity_.reset(static_cast<OpenSSLIdentity*>(identity));
+}
+
+void OpenSSLStreamAdapter::SetServerRole(SSLRole role) {
+  role_ = role;
+}
+
+bool OpenSSLStreamAdapter::SetPeerCertificateDigest(
+    const std::string& digest_alg,
+    const unsigned char* digest_val,
+    size_t digest_len,
+    SSLPeerCertificateDigestError* error) {
+  RTC_DCHECK(!peer_certificate_verified_);
+  RTC_DCHECK(!has_peer_certificate_digest());
+  size_t expected_len;
+  if (error) {
+    *error = SSLPeerCertificateDigestError::NONE;
+  }
+
+  if (!OpenSSLDigest::GetDigestSize(digest_alg, &expected_len)) {
+    RTC_LOG(LS_WARNING) << "Unknown digest algorithm: " << digest_alg;
+    if (error) {
+      *error = SSLPeerCertificateDigestError::UNKNOWN_ALGORITHM;
+    }
+    return false;
+  }
+  if (expected_len != digest_len) {
+    if (error) {
+      *error = SSLPeerCertificateDigestError::INVALID_LENGTH;
+    }
+    return false;
+  }
+
+  peer_certificate_digest_value_.SetData(digest_val, digest_len);
+  peer_certificate_digest_algorithm_ = digest_alg;
+
+  if (!peer_cert_chain_) {
+    // Normal case, where the digest is set before we obtain the certificate
+    // from the handshake.
+    return true;
+  }
+
+  if (!VerifyPeerCertificate()) {
+    Error("SetPeerCertificateDigest", -1, SSL_AD_BAD_CERTIFICATE, false);
+    if (error) {
+      *error = SSLPeerCertificateDigestError::VERIFICATION_FAILED;
+    }
+    return false;
+  }
+
+  if (state_ == SSL_CONNECTED) {
+    // Post the event asynchronously to unwind the stack. The caller
+    // of ContinueSSL may be the same object listening for these
+    // events and may not be prepared for reentrancy.
+    PostEvent(SE_OPEN | SE_READ | SE_WRITE, 0);
+  }
+
+  return true;
+}
+
+std::string OpenSSLStreamAdapter::SslCipherSuiteToName(int cipher_suite) {
+#ifdef OPENSSL_IS_BORINGSSL
+  const SSL_CIPHER* ssl_cipher = SSL_get_cipher_by_value(cipher_suite);
+  if (!ssl_cipher) {
+    return std::string();
+  }
+  return SSL_CIPHER_standard_name(ssl_cipher);
+#else
+  for (const SslCipherMapEntry* entry = kSslCipherMap; entry->rfc_name;
+       ++entry) {
+    if (cipher_suite == static_cast<int>(entry->openssl_id)) {
+      return entry->rfc_name;
+    }
+  }
+  return std::string();
+#endif
+}
+
+bool OpenSSLStreamAdapter::GetSslCipherSuite(int* cipher_suite) {
+  if (state_ != SSL_CONNECTED)
+    return false;
+
+  const SSL_CIPHER* current_cipher = SSL_get_current_cipher(ssl_);
+  if (current_cipher == nullptr) {
+    return false;
+  }
+
+  *cipher_suite = static_cast<uint16_t>(SSL_CIPHER_get_id(current_cipher));
+  return true;
+}
+
+int OpenSSLStreamAdapter::GetSslVersion() const {
+  if (state_ != SSL_CONNECTED)
+    return -1;
+
+  int ssl_version = SSL_version(ssl_);
+  if (ssl_mode_ == SSL_MODE_DTLS) {
+    if (ssl_version == DTLS1_VERSION)
+      return SSL_PROTOCOL_DTLS_10;
+    else if (ssl_version == DTLS1_2_VERSION)
+      return SSL_PROTOCOL_DTLS_12;
+  } else {
+    if (ssl_version == TLS1_VERSION)
+      return SSL_PROTOCOL_TLS_10;
+    else if (ssl_version == TLS1_1_VERSION)
+      return SSL_PROTOCOL_TLS_11;
+    else if (ssl_version == TLS1_2_VERSION)
+      return SSL_PROTOCOL_TLS_12;
+  }
+
+  return -1;
+}
+
+// Key Extractor interface
+bool OpenSSLStreamAdapter::ExportKeyingMaterial(const std::string& label,
+                                                const uint8_t* context,
+                                                size_t context_len,
+                                                bool use_context,
+                                                uint8_t* result,
+                                                size_t result_len) {
+  int i;
+
+  i = SSL_export_keying_material(ssl_, result, result_len, label.c_str(),
+                                 label.length(), const_cast<uint8_t*>(context),
+                                 context_len, use_context);
+
+  if (i != 1)
+    return false;
+
+  return true;
+}
+
+bool OpenSSLStreamAdapter::SetDtlsSrtpCryptoSuites(
+    const std::vector<int>& ciphers) {
+  std::string internal_ciphers;
+
+  if (state_ != SSL_NONE)
+    return false;
+
+  for (std::vector<int>::const_iterator cipher = ciphers.begin();
+       cipher != ciphers.end(); ++cipher) {
+    bool found = false;
+    for (SrtpCipherMapEntry* entry = SrtpCipherMap; entry->internal_name;
+         ++entry) {
+      if (*cipher == entry->id) {
+        found = true;
+        if (!internal_ciphers.empty())
+          internal_ciphers += ":";
+        internal_ciphers += entry->internal_name;
+        break;
+      }
+    }
+
+    if (!found) {
+      RTC_LOG(LS_ERROR) << "Could not find cipher: " << *cipher;
+      return false;
+    }
+  }
+
+  if (internal_ciphers.empty())
+    return false;
+
+  srtp_ciphers_ = internal_ciphers;
+  return true;
+}
+
+bool OpenSSLStreamAdapter::GetDtlsSrtpCryptoSuite(int* crypto_suite) {
+  RTC_DCHECK(state_ == SSL_CONNECTED);
+  if (state_ != SSL_CONNECTED)
+    return false;
+
+  const SRTP_PROTECTION_PROFILE *srtp_profile =
+      SSL_get_selected_srtp_profile(ssl_);
+
+  if (!srtp_profile)
+    return false;
+
+  *crypto_suite = srtp_profile->id;
+  RTC_DCHECK(!SrtpCryptoSuiteToName(*crypto_suite).empty());
+  return true;
+}
+
+bool OpenSSLStreamAdapter::IsTlsConnected() {
+  return state_ == SSL_CONNECTED;
+}
+
+int OpenSSLStreamAdapter::StartSSL() {
+  if (state_ != SSL_NONE) {
+    // Don't allow StartSSL to be called twice.
+    return -1;
+  }
+
+  if (StreamAdapterInterface::GetState() != SS_OPEN) {
+    state_ = SSL_WAIT;
+    return 0;
+  }
+
+  state_ = SSL_CONNECTING;
+  if (int err = BeginSSL()) {
+    Error("BeginSSL", err, 0, false);
+    return err;
+  }
+
+  return 0;
+}
+
+void OpenSSLStreamAdapter::SetMode(SSLMode mode) {
+  RTC_DCHECK(state_ == SSL_NONE);
+  ssl_mode_ = mode;
+}
+
+void OpenSSLStreamAdapter::SetMaxProtocolVersion(SSLProtocolVersion version) {
+  RTC_DCHECK(ssl_ctx_ == nullptr);
+  ssl_max_version_ = version;
+}
+
+void OpenSSLStreamAdapter::SetInitialRetransmissionTimeout(
+    int timeout_ms) {
+  RTC_DCHECK(ssl_ctx_ == nullptr);
+  dtls_handshake_timeout_ms_ = timeout_ms;
+}
+
+//
+// StreamInterface Implementation
+//
+
+StreamResult OpenSSLStreamAdapter::Write(const void* data, size_t data_len,
+                                         size_t* written, int* error) {
+  RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::Write(" << data_len << ")";
+
+  switch (state_) {
+  case SSL_NONE:
+    // pass-through in clear text
+    return StreamAdapterInterface::Write(data, data_len, written, error);
+
+  case SSL_WAIT:
+  case SSL_CONNECTING:
+    return SR_BLOCK;
+
+  case SSL_CONNECTED:
+    if (waiting_to_verify_peer_certificate()) {
+      return SR_BLOCK;
+    }
+    break;
+
+  case SSL_ERROR:
+  case SSL_CLOSED:
+  default:
+    if (error)
+      *error = ssl_error_code_;
+    return SR_ERROR;
+  }
+
+  // OpenSSL will return an error if we try to write zero bytes
+  if (data_len == 0) {
+    if (written)
+      *written = 0;
+    return SR_SUCCESS;
+  }
+
+  ssl_write_needs_read_ = false;
+
+  int code = SSL_write(ssl_, data, checked_cast<int>(data_len));
+  int ssl_error = SSL_get_error(ssl_, code);
+  switch (ssl_error) {
+  case SSL_ERROR_NONE:
+    RTC_LOG(LS_VERBOSE) << " -- success";
+    RTC_DCHECK_GT(code, 0);
+    RTC_DCHECK_LE(code, data_len);
+    if (written)
+      *written = code;
+    return SR_SUCCESS;
+  case SSL_ERROR_WANT_READ:
+    RTC_LOG(LS_VERBOSE) << " -- error want read";
+    ssl_write_needs_read_ = true;
+    return SR_BLOCK;
+  case SSL_ERROR_WANT_WRITE:
+    RTC_LOG(LS_VERBOSE) << " -- error want write";
+    return SR_BLOCK;
+
+  case SSL_ERROR_ZERO_RETURN:
+  default:
+    Error("SSL_write", (ssl_error ? ssl_error : -1), 0, false);
+    if (error)
+      *error = ssl_error_code_;
+    return SR_ERROR;
+  }
+  // not reached
+}
+
+StreamResult OpenSSLStreamAdapter::Read(void* data, size_t data_len,
+                                        size_t* read, int* error) {
+  RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::Read(" << data_len << ")";
+  switch (state_) {
+    case SSL_NONE:
+      // pass-through in clear text
+      return StreamAdapterInterface::Read(data, data_len, read, error);
+
+    case SSL_WAIT:
+    case SSL_CONNECTING:
+      return SR_BLOCK;
+
+    case SSL_CONNECTED:
+      if (waiting_to_verify_peer_certificate()) {
+        return SR_BLOCK;
+      }
+      break;
+
+    case SSL_CLOSED:
+      return SR_EOS;
+
+    case SSL_ERROR:
+    default:
+      if (error)
+        *error = ssl_error_code_;
+      return SR_ERROR;
+  }
+
+  // Don't trust OpenSSL with zero byte reads
+  if (data_len == 0) {
+    if (read)
+      *read = 0;
+    return SR_SUCCESS;
+  }
+
+  ssl_read_needs_write_ = false;
+
+  int code = SSL_read(ssl_, data, checked_cast<int>(data_len));
+  int ssl_error = SSL_get_error(ssl_, code);
+  switch (ssl_error) {
+    case SSL_ERROR_NONE:
+      RTC_LOG(LS_VERBOSE) << " -- success";
+      RTC_DCHECK_GT(code, 0);
+      RTC_DCHECK_LE(code, data_len);
+      if (read)
+        *read = code;
+
+      if (ssl_mode_ == SSL_MODE_DTLS) {
+        // Enforce atomic reads -- this is a short read
+        unsigned int pending = SSL_pending(ssl_);
+
+        if (pending) {
+          RTC_LOG(LS_INFO) << " -- short DTLS read. flushing";
+          FlushInput(pending);
+          if (error)
+            *error = SSE_MSG_TRUNC;
+          return SR_ERROR;
+        }
+      }
+      return SR_SUCCESS;
+    case SSL_ERROR_WANT_READ:
+      RTC_LOG(LS_VERBOSE) << " -- error want read";
+      return SR_BLOCK;
+    case SSL_ERROR_WANT_WRITE:
+      RTC_LOG(LS_VERBOSE) << " -- error want write";
+      ssl_read_needs_write_ = true;
+      return SR_BLOCK;
+    case SSL_ERROR_ZERO_RETURN:
+      RTC_LOG(LS_VERBOSE) << " -- remote side closed";
+      Close();
+      return SR_EOS;
+      break;
+    default:
+      Error("SSL_read", (ssl_error ? ssl_error : -1), 0, false);
+      if (error)
+        *error = ssl_error_code_;
+      return SR_ERROR;
+  }
+  // not reached
+}
+
+void OpenSSLStreamAdapter::FlushInput(unsigned int left) {
+  unsigned char buf[2048];
+
+  while (left) {
+    // This should always succeed
+    int toread = (sizeof(buf) < left) ? sizeof(buf) : left;
+    int code = SSL_read(ssl_, buf, toread);
+
+    int ssl_error = SSL_get_error(ssl_, code);
+    RTC_DCHECK(ssl_error == SSL_ERROR_NONE);
+
+    if (ssl_error != SSL_ERROR_NONE) {
+      RTC_DLOG(LS_VERBOSE) << " -- error " << code;
+      Error("SSL_read", (ssl_error ? ssl_error : -1), 0, false);
+      return;
+    }
+
+    RTC_LOG(LS_VERBOSE) << " -- flushed " << code << " bytes";
+    left -= code;
+  }
+}
+
+void OpenSSLStreamAdapter::Close() {
+  Cleanup(0);
+  RTC_DCHECK(state_ == SSL_CLOSED || state_ == SSL_ERROR);
+  // When we're closed at SSL layer, also close the stream level which
+  // performs necessary clean up. Otherwise, a new incoming packet after
+  // this could overflow the stream buffer.
+  StreamAdapterInterface::Close();
+}
+
+StreamState OpenSSLStreamAdapter::GetState() const {
+  switch (state_) {
+    case SSL_WAIT:
+    case SSL_CONNECTING:
+      return SS_OPENING;
+    case SSL_CONNECTED:
+      if (waiting_to_verify_peer_certificate()) {
+        return SS_OPENING;
+      }
+      return SS_OPEN;
+    default:
+      return SS_CLOSED;
+  };
+  // not reached
+}
+
+void OpenSSLStreamAdapter::OnEvent(StreamInterface* stream, int events,
+                                   int err) {
+  int events_to_signal = 0;
+  int signal_error = 0;
+  RTC_DCHECK(stream == this->stream());
+  if ((events & SE_OPEN)) {
+    RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent SE_OPEN";
+    if (state_ != SSL_WAIT) {
+      RTC_DCHECK(state_ == SSL_NONE);
+      events_to_signal |= SE_OPEN;
+    } else {
+      state_ = SSL_CONNECTING;
+      if (int err = BeginSSL()) {
+        Error("BeginSSL", err, 0, true);
+        return;
+      }
+    }
+  }
+  if ((events & (SE_READ|SE_WRITE))) {
+    RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent"
+                        << ((events & SE_READ) ? " SE_READ" : "")
+                        << ((events & SE_WRITE) ? " SE_WRITE" : "");
+    if (state_ == SSL_NONE) {
+      events_to_signal |= events & (SE_READ|SE_WRITE);
+    } else if (state_ == SSL_CONNECTING) {
+      if (int err = ContinueSSL()) {
+        Error("ContinueSSL", err, 0, true);
+        return;
+      }
+    } else if (state_ == SSL_CONNECTED) {
+      if (((events & SE_READ) && ssl_write_needs_read_) ||
+          (events & SE_WRITE)) {
+        RTC_LOG(LS_VERBOSE) << " -- onStreamWriteable";
+        events_to_signal |= SE_WRITE;
+      }
+      if (((events & SE_WRITE) && ssl_read_needs_write_) ||
+          (events & SE_READ)) {
+        RTC_LOG(LS_VERBOSE) << " -- onStreamReadable";
+        events_to_signal |= SE_READ;
+      }
+    }
+  }
+  if ((events & SE_CLOSE)) {
+    RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent(SE_CLOSE, " << err
+                        << ")";
+    Cleanup(0);
+    events_to_signal |= SE_CLOSE;
+    // SE_CLOSE is the only event that uses the final parameter to OnEvent().
+    RTC_DCHECK(signal_error == 0);
+    signal_error = err;
+  }
+  if (events_to_signal)
+    StreamAdapterInterface::OnEvent(stream, events_to_signal, signal_error);
+}
+
+int OpenSSLStreamAdapter::BeginSSL() {
+  RTC_DCHECK(state_ == SSL_CONNECTING);
+  // The underlying stream has opened.
+  RTC_LOG(LS_INFO) << "BeginSSL with peer.";
+
+  BIO* bio = nullptr;
+
+  // First set up the context.
+  RTC_DCHECK(ssl_ctx_ == nullptr);
+  ssl_ctx_ = SetupSSLContext();
+  if (!ssl_ctx_)
+    return -1;
+
+  bio = BIO_new_stream(static_cast<StreamInterface*>(stream()));
+  if (!bio)
+    return -1;
+
+  ssl_ = SSL_new(ssl_ctx_);
+  if (!ssl_) {
+    BIO_free(bio);
+    return -1;
+  }
+
+  SSL_set_app_data(ssl_, this);
+
+  SSL_set_bio(ssl_, bio, bio);  // the SSL object owns the bio now.
+  if (ssl_mode_ == SSL_MODE_DTLS) {
+#ifdef OPENSSL_IS_BORINGSSL
+    DTLSv1_set_initial_timeout_duration(ssl_, dtls_handshake_timeout_ms_);
+#else
+    // Enable read-ahead for DTLS so whole packets are read from internal BIO
+    // before parsing. This is done internally by BoringSSL for DTLS.
+    SSL_set_read_ahead(ssl_, 1);
+#endif
+  }
+
+  SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE |
+               SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
+
+#if !defined(OPENSSL_IS_BORINGSSL)
+  // Specify an ECDH group for ECDHE ciphers, otherwise OpenSSL cannot
+  // negotiate them when acting as the server. Use NIST's P-256 which is
+  // commonly supported. BoringSSL doesn't need explicit configuration and has
+  // a reasonable default set.
+  EC_KEY* ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
+  if (ecdh == nullptr)
+    return -1;
+  SSL_set_options(ssl_, SSL_OP_SINGLE_ECDH_USE);
+  SSL_set_tmp_ecdh(ssl_, ecdh);
+  EC_KEY_free(ecdh);
+#endif
+
+  // Do the connect
+  return ContinueSSL();
+}
+
+int OpenSSLStreamAdapter::ContinueSSL() {
+  RTC_LOG(LS_VERBOSE) << "ContinueSSL";
+  RTC_DCHECK(state_ == SSL_CONNECTING);
+
+  // Clear the DTLS timer
+  Thread::Current()->Clear(this, MSG_TIMEOUT);
+
+  int code = (role_ == SSL_CLIENT) ? SSL_connect(ssl_) : SSL_accept(ssl_);
+  int ssl_error;
+  switch (ssl_error = SSL_get_error(ssl_, code)) {
+    case SSL_ERROR_NONE:
+      RTC_LOG(LS_VERBOSE) << " -- success";
+      // By this point, OpenSSL should have given us a certificate, or errored
+      // out if one was missing.
+      RTC_DCHECK(peer_cert_chain_ || !client_auth_enabled());
+
+      state_ = SSL_CONNECTED;
+      if (!waiting_to_verify_peer_certificate()) {
+        // We have everything we need to start the connection, so signal
+        // SE_OPEN. If we need a client certificate fingerprint and don't have
+        // it yet, we'll instead signal SE_OPEN in SetPeerCertificateDigest.
+        //
+        // TODO(deadbeef): Post this event asynchronously to unwind the stack.
+        // The caller of ContinueSSL may be the same object listening for these
+        // events and may not be prepared for reentrancy.
+        // PostEvent(SE_OPEN | SE_READ | SE_WRITE, 0);
+        StreamAdapterInterface::OnEvent(stream(), SE_OPEN | SE_READ | SE_WRITE,
+                                        0);
+      }
+      break;
+
+    case SSL_ERROR_WANT_READ: {
+      RTC_LOG(LS_VERBOSE) << " -- error want read";
+      struct timeval timeout;
+      if (DTLSv1_get_timeout(ssl_, &timeout)) {
+        int delay = timeout.tv_sec * 1000 + timeout.tv_usec / 1000;
+
+        Thread::Current()->PostDelayed(RTC_FROM_HERE, delay, this, MSG_TIMEOUT,
+                                       0);
+        }
+      }
+      break;
+
+    case SSL_ERROR_WANT_WRITE:
+      RTC_LOG(LS_VERBOSE) << " -- error want write";
+      break;
+
+    case SSL_ERROR_ZERO_RETURN:
+    default:
+      RTC_LOG(LS_VERBOSE) << " -- error " << code;
+      SSLHandshakeError ssl_handshake_err = SSLHandshakeError::UNKNOWN;
+      int err_code = ERR_peek_last_error();
+      if (err_code != 0 && ERR_GET_REASON(err_code) == SSL_R_NO_SHARED_CIPHER) {
+        ssl_handshake_err = SSLHandshakeError::INCOMPATIBLE_CIPHERSUITE;
+      }
+      SignalSSLHandshakeError(ssl_handshake_err);
+      return (ssl_error != 0) ? ssl_error : -1;
+  }
+
+  return 0;
+}
+
+void OpenSSLStreamAdapter::Error(const char* context,
+                                 int err,
+                                 uint8_t alert,
+                                 bool signal) {
+  RTC_LOG(LS_WARNING) << "OpenSSLStreamAdapter::Error(" << context << ", "
+                      << err << ", " << static_cast<int>(alert) << ")";
+  state_ = SSL_ERROR;
+  ssl_error_code_ = err;
+  Cleanup(alert);
+  if (signal)
+    StreamAdapterInterface::OnEvent(stream(), SE_CLOSE, err);
+}
+
+void OpenSSLStreamAdapter::Cleanup(uint8_t alert) {
+  RTC_LOG(LS_INFO) << "Cleanup";
+
+  if (state_ != SSL_ERROR) {
+    state_ = SSL_CLOSED;
+    ssl_error_code_ = 0;
+  }
+
+  if (ssl_) {
+    int ret;
+// SSL_send_fatal_alert is only available in BoringSSL.
+#ifdef OPENSSL_IS_BORINGSSL
+    if (alert) {
+      ret = SSL_send_fatal_alert(ssl_, alert);
+      if (ret < 0) {
+        RTC_LOG(LS_WARNING) << "SSL_send_fatal_alert failed, error = "
+                            << SSL_get_error(ssl_, ret);
+      }
+    } else {
+#endif
+      ret = SSL_shutdown(ssl_);
+      if (ret < 0) {
+        RTC_LOG(LS_WARNING)
+            << "SSL_shutdown failed, error = " << SSL_get_error(ssl_, ret);
+      }
+#ifdef OPENSSL_IS_BORINGSSL
+    }
+#endif
+    SSL_free(ssl_);
+    ssl_ = nullptr;
+  }
+  if (ssl_ctx_) {
+    SSL_CTX_free(ssl_ctx_);
+    ssl_ctx_ = nullptr;
+  }
+  identity_.reset();
+  peer_cert_chain_.reset();
+
+  // Clear the DTLS timer
+  Thread::Current()->Clear(this, MSG_TIMEOUT);
+}
+
+
+void OpenSSLStreamAdapter::OnMessage(Message* msg) {
+  // Process our own messages and then pass others to the superclass
+  if (MSG_TIMEOUT == msg->message_id) {
+    RTC_LOG(LS_INFO) << "DTLS timeout expired";
+    DTLSv1_handle_timeout(ssl_);
+    ContinueSSL();
+  } else {
+    StreamInterface::OnMessage(msg);
+  }
+}
+
+SSL_CTX* OpenSSLStreamAdapter::SetupSSLContext() {
+  SSL_CTX* ctx = nullptr;
+
+#ifdef OPENSSL_IS_BORINGSSL
+    ctx = SSL_CTX_new(ssl_mode_ == SSL_MODE_DTLS ?
+        DTLS_method() : TLS_method());
+    // Version limiting for BoringSSL will be done below.
+#else
+  const SSL_METHOD* method;
+  switch (ssl_max_version_) {
+    case SSL_PROTOCOL_TLS_10:
+    case SSL_PROTOCOL_TLS_11:
+      // OpenSSL doesn't support setting min/max versions, so we always use
+      // (D)TLS 1.0 if a max. version below the max. available is requested.
+      if (ssl_mode_ == SSL_MODE_DTLS) {
+        if (role_ == SSL_CLIENT) {
+          method = DTLSv1_client_method();
+        } else {
+          method = DTLSv1_server_method();
+        }
+      } else {
+        if (role_ == SSL_CLIENT) {
+          method = TLSv1_client_method();
+        } else {
+          method = TLSv1_server_method();
+        }
+      }
+      break;
+    case SSL_PROTOCOL_TLS_12:
+    default:
+      if (ssl_mode_ == SSL_MODE_DTLS) {
+        if (role_ == SSL_CLIENT) {
+          method = DTLS_client_method();
+        } else {
+          method = DTLS_server_method();
+        }
+      } else {
+        if (role_ == SSL_CLIENT) {
+          method = TLS_client_method();
+        } else {
+          method = TLS_server_method();
+        }
+      }
+      break;
+  }
+  ctx = SSL_CTX_new(method);
+#endif  // OPENSSL_IS_BORINGSSL
+
+  if (ctx == nullptr)
+    return nullptr;
+
+#ifdef OPENSSL_IS_BORINGSSL
+  SSL_CTX_set_min_proto_version(ctx, ssl_mode_ == SSL_MODE_DTLS ?
+      DTLS1_VERSION : TLS1_VERSION);
+  switch (ssl_max_version_) {
+    case SSL_PROTOCOL_TLS_10:
+      SSL_CTX_set_max_proto_version(ctx, ssl_mode_ == SSL_MODE_DTLS ?
+          DTLS1_VERSION : TLS1_VERSION);
+      break;
+    case SSL_PROTOCOL_TLS_11:
+      SSL_CTX_set_max_proto_version(ctx, ssl_mode_ == SSL_MODE_DTLS ?
+          DTLS1_VERSION : TLS1_1_VERSION);
+      break;
+    case SSL_PROTOCOL_TLS_12:
+    default:
+      SSL_CTX_set_max_proto_version(ctx, ssl_mode_ == SSL_MODE_DTLS ?
+          DTLS1_2_VERSION : TLS1_2_VERSION);
+      break;
+  }
+  if (g_use_time_callback_for_testing) {
+    SSL_CTX_set_current_time_cb(ctx, &TimeCallbackForTesting);
+  }
+#endif
+
+  if (identity_ && !identity_->ConfigureIdentity(ctx)) {
+    SSL_CTX_free(ctx);
+    return nullptr;
+  }
+
+#if !defined(NDEBUG)
+  SSL_CTX_set_info_callback(ctx, OpenSSLAdapter::SSLInfoCallback);
+#endif
+
+  int mode = SSL_VERIFY_PEER;
+  if (client_auth_enabled()) {
+    // Require a certificate from the client.
+    // Note: Normally this is always true in production, but it may be disabled
+    // for testing purposes (e.g. SSLAdapter unit tests).
+    mode |= SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
+  }
+
+  // Configure a custom certificate verification callback to check the peer
+  // certificate digest. Note the second argument to SSL_CTX_set_verify is to
+  // override individual errors in the default verification logic, which is not
+  // what we want here.
+  SSL_CTX_set_verify(ctx, mode, nullptr);
+  SSL_CTX_set_cert_verify_callback(ctx, SSLVerifyCallback, nullptr);
+
+  // Select list of available ciphers. Note that !SHA256 and !SHA384 only
+  // remove HMAC-SHA256 and HMAC-SHA384 cipher suites, not GCM cipher suites
+  // with SHA256 or SHA384 as the handshake hash.
+  // This matches the list of SSLClientSocketOpenSSL in Chromium.
+  SSL_CTX_set_cipher_list(
+      ctx, "DEFAULT:!NULL:!aNULL:!SHA256:!SHA384:!aECDH:!AESGCM+AES256:!aPSK");
+
+  if (!srtp_ciphers_.empty()) {
+    if (SSL_CTX_set_tlsext_use_srtp(ctx, srtp_ciphers_.c_str())) {
+      SSL_CTX_free(ctx);
+      return nullptr;
+    }
+  }
+
+  return ctx;
+}
+
+bool OpenSSLStreamAdapter::VerifyPeerCertificate() {
+  if (!has_peer_certificate_digest() || !peer_cert_chain_ ||
+      !peer_cert_chain_->GetSize()) {
+    RTC_LOG(LS_WARNING) << "Missing digest or peer certificate.";
+    return false;
+  }
+  const OpenSSLCertificate* leaf_cert =
+      static_cast<const OpenSSLCertificate*>(&peer_cert_chain_->Get(0));
+
+  unsigned char digest[EVP_MAX_MD_SIZE];
+  size_t digest_length;
+  if (!OpenSSLCertificate::ComputeDigest(
+          leaf_cert->x509(), peer_certificate_digest_algorithm_, digest,
+          sizeof(digest), &digest_length)) {
+    RTC_LOG(LS_WARNING) << "Failed to compute peer cert digest.";
+    return false;
+  }
+
+  Buffer computed_digest(digest, digest_length);
+  if (computed_digest != peer_certificate_digest_value_) {
+    RTC_LOG(LS_WARNING)
+        << "Rejected peer certificate due to mismatched digest.";
+    return false;
+  }
+  // Ignore any verification error if the digest matches, since there is no
+  // value in checking the validity of a self-signed cert issued by untrusted
+  // sources.
+  RTC_LOG(LS_INFO) << "Accepted peer certificate.";
+  peer_certificate_verified_ = true;
+  return true;
+}
+
+std::unique_ptr<SSLCertChain> OpenSSLStreamAdapter::GetPeerSSLCertChain()
+    const {
+  return peer_cert_chain_ ? peer_cert_chain_->UniqueCopy() : nullptr;
+}
+
+int OpenSSLStreamAdapter::SSLVerifyCallback(X509_STORE_CTX* store, void* arg) {
+  // Get our SSL structure and OpenSSLStreamAdapter from the store.
+  SSL* ssl = reinterpret_cast<SSL*>(
+      X509_STORE_CTX_get_ex_data(store, SSL_get_ex_data_X509_STORE_CTX_idx()));
+  OpenSSLStreamAdapter* stream =
+      reinterpret_cast<OpenSSLStreamAdapter*>(SSL_get_app_data(ssl));
+
+#if defined(OPENSSL_IS_BORINGSSL)
+  STACK_OF(X509)* chain = SSL_get_peer_full_cert_chain(ssl);
+  // Creates certificate chain.
+  std::vector<std::unique_ptr<SSLCertificate>> cert_chain;
+  for (X509* cert : chain) {
+    cert_chain.emplace_back(new OpenSSLCertificate(cert));
+  }
+  stream->peer_cert_chain_.reset(new SSLCertChain(std::move(cert_chain)));
+#else
+  // Record the peer's certificate.
+  X509* cert = SSL_get_peer_certificate(ssl);
+  stream->peer_cert_chain_.reset(
+      new SSLCertChain(new OpenSSLCertificate(cert)));
+  X509_free(cert);
+#endif
+
+  // If the peer certificate digest isn't known yet, we'll wait to verify
+  // until it's known, and for now just return a success status.
+  if (stream->peer_certificate_digest_algorithm_.empty()) {
+    RTC_LOG(LS_INFO) << "Waiting to verify certificate until digest is known.";
+    return 1;
+  }
+
+  if (!stream->VerifyPeerCertificate()) {
+    X509_STORE_CTX_set_error(store, X509_V_ERR_CERT_REJECTED);
+    return 0;
+  }
+
+  return 1;
+}
+
+bool OpenSSLStreamAdapter::IsBoringSsl() {
+#ifdef OPENSSL_IS_BORINGSSL
+  return true;
+#else
+  return false;
+#endif
+}
+
+#define CDEF(X) \
+  { static_cast<uint16_t>(TLS1_CK_##X & 0xffff), "TLS_" #X }
+
+struct cipher_list {
+  uint16_t cipher;
+  const char* cipher_str;
+};
+
+// TODO(torbjorng): Perhaps add more cipher suites to these lists.
+static const cipher_list OK_RSA_ciphers[] = {
+  CDEF(ECDHE_RSA_WITH_AES_128_CBC_SHA),
+  CDEF(ECDHE_RSA_WITH_AES_256_CBC_SHA),
+  CDEF(ECDHE_RSA_WITH_AES_128_GCM_SHA256),
+#ifdef TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA256
+  CDEF(ECDHE_RSA_WITH_AES_256_GCM_SHA256),
+#endif
+#ifdef TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
+  CDEF(ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256),
+#endif
+};
+
+static const cipher_list OK_ECDSA_ciphers[] = {
+  CDEF(ECDHE_ECDSA_WITH_AES_128_CBC_SHA),
+  CDEF(ECDHE_ECDSA_WITH_AES_256_CBC_SHA),
+  CDEF(ECDHE_ECDSA_WITH_AES_128_GCM_SHA256),
+#ifdef TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA256
+  CDEF(ECDHE_ECDSA_WITH_AES_256_GCM_SHA256),
+#endif
+#ifdef TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
+  CDEF(ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256),
+#endif
+};
+#undef CDEF
+
+bool OpenSSLStreamAdapter::IsAcceptableCipher(int cipher, KeyType key_type) {
+  if (key_type == KT_RSA) {
+    for (const cipher_list& c : OK_RSA_ciphers) {
+      if (cipher == c.cipher)
+        return true;
+    }
+  }
+
+  if (key_type == KT_ECDSA) {
+    for (const cipher_list& c : OK_ECDSA_ciphers) {
+      if (cipher == c.cipher)
+        return true;
+    }
+  }
+
+  return false;
+}
+
+bool OpenSSLStreamAdapter::IsAcceptableCipher(const std::string& cipher,
+                                              KeyType key_type) {
+  if (key_type == KT_RSA) {
+    for (const cipher_list& c : OK_RSA_ciphers) {
+      if (cipher == c.cipher_str)
+        return true;
+    }
+  }
+
+  if (key_type == KT_ECDSA) {
+    for (const cipher_list& c : OK_ECDSA_ciphers) {
+      if (cipher == c.cipher_str)
+        return true;
+    }
+  }
+
+  return false;
+}
+
+void OpenSSLStreamAdapter::enable_time_callback_for_testing() {
+  g_use_time_callback_for_testing = true;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/opensslstreamadapter.h b/rtc_base/opensslstreamadapter.h
new file mode 100644
index 0000000..97ab557
--- /dev/null
+++ b/rtc_base/opensslstreamadapter.h
@@ -0,0 +1,224 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_OPENSSLSTREAMADAPTER_H_
+#define RTC_BASE_OPENSSLSTREAMADAPTER_H_
+
+#include <string>
+#include <memory>
+#include <vector>
+
+#include "rtc_base/buffer.h"
+#include "rtc_base/opensslidentity.h"
+#include "rtc_base/sslstreamadapter.h"
+
+typedef struct ssl_st SSL;
+typedef struct ssl_ctx_st SSL_CTX;
+typedef struct ssl_cipher_st SSL_CIPHER;
+typedef struct x509_store_ctx_st X509_STORE_CTX;
+
+namespace rtc {
+
+// This class was written with OpenSSLAdapter (a socket adapter) as a
+// starting point. It has similar structure and functionality, but uses a
+// "peer-to-peer" mode, verifying the peer's certificate using a digest
+// sent over a secure signaling channel.
+//
+// Static methods to initialize and deinit the SSL library are in
+// OpenSSLAdapter. These should probably be moved out to a neutral class.
+//
+// In a few cases I have factored out some OpenSSLAdapter code into static
+// methods so it can be reused from this class. Eventually that code should
+// probably be moved to a common support class. Unfortunately there remain a
+// few duplicated sections of code. I have not done more restructuring because
+// I did not want to affect existing code that uses OpenSSLAdapter.
+//
+// This class does not support the SSL connection restart feature present in
+// OpenSSLAdapter. I am not entirely sure how the feature is useful and I am
+// not convinced that it works properly.
+//
+// This implementation is careful to disallow data exchange after an SSL error,
+// and it has an explicit SSL_CLOSED state. It should not be possible to send
+// any data in clear after one of the StartSSL methods has been called.
+
+// Look in sslstreamadapter.h for documentation of the methods.
+
+class OpenSSLIdentity;
+
+///////////////////////////////////////////////////////////////////////////////
+
+class OpenSSLStreamAdapter : public SSLStreamAdapter {
+ public:
+  explicit OpenSSLStreamAdapter(StreamInterface* stream);
+  ~OpenSSLStreamAdapter() override;
+
+  void SetIdentity(SSLIdentity* identity) override;
+
+  // Default argument is for compatibility
+  void SetServerRole(SSLRole role = SSL_SERVER) override;
+  bool SetPeerCertificateDigest(
+      const std::string& digest_alg,
+      const unsigned char* digest_val,
+      size_t digest_len,
+      SSLPeerCertificateDigestError* error = nullptr) override;
+
+  std::unique_ptr<SSLCertChain> GetPeerSSLCertChain() const override;
+
+  // Goes from state SSL_NONE to either SSL_CONNECTING or SSL_WAIT, depending
+  // on whether the underlying stream is already open or not.
+  int StartSSL() override;
+  void SetMode(SSLMode mode) override;
+  void SetMaxProtocolVersion(SSLProtocolVersion version) override;
+  void SetInitialRetransmissionTimeout(int timeout_ms) override;
+
+  StreamResult Read(void* data,
+                    size_t data_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+  StreamState GetState() const override;
+
+  // TODO(guoweis): Move this away from a static class method.
+  static std::string SslCipherSuiteToName(int crypto_suite);
+
+  bool GetSslCipherSuite(int* cipher) override;
+
+  int GetSslVersion() const override;
+
+  // Key Extractor interface
+  bool ExportKeyingMaterial(const std::string& label,
+                            const uint8_t* context,
+                            size_t context_len,
+                            bool use_context,
+                            uint8_t* result,
+                            size_t result_len) override;
+
+  // DTLS-SRTP interface
+  bool SetDtlsSrtpCryptoSuites(const std::vector<int>& crypto_suites) override;
+  bool GetDtlsSrtpCryptoSuite(int* crypto_suite) override;
+
+  bool IsTlsConnected() override;
+
+  // Capabilities interfaces.
+  static bool IsBoringSsl();
+
+  static bool IsAcceptableCipher(int cipher, KeyType key_type);
+  static bool IsAcceptableCipher(const std::string& cipher, KeyType key_type);
+
+  // Use our timeutils.h source of timing in BoringSSL, allowing us to test
+  // using a fake clock.
+  static void enable_time_callback_for_testing();
+
+ protected:
+  void OnEvent(StreamInterface* stream, int events, int err) override;
+
+ private:
+  enum SSLState {
+    // Before calling one of the StartSSL methods, data flows
+    // in clear text.
+    SSL_NONE,
+    SSL_WAIT,  // waiting for the stream to open to start SSL negotiation
+    SSL_CONNECTING,  // SSL negotiation in progress
+    SSL_CONNECTED,  // SSL stream successfully established
+    SSL_ERROR,  // some SSL error occurred, stream is closed
+    SSL_CLOSED  // Clean close
+  };
+
+  enum { MSG_TIMEOUT = MSG_MAX+1};
+
+  // The following three methods return 0 on success and a negative
+  // error code on failure. The error code may be from OpenSSL or -1
+  // on some other error cases, so it can't really be interpreted
+  // unfortunately.
+
+  // Prepare SSL library, state is SSL_CONNECTING.
+  int BeginSSL();
+  // Perform SSL negotiation steps.
+  int ContinueSSL();
+
+  // Error handler helper. signal is given as true for errors in
+  // asynchronous contexts (when an error method was not returned
+  // through some other method), and in that case an SE_CLOSE event is
+  // raised on the stream with the specified error.
+  // A 0 error means a graceful close, otherwise there is not really enough
+  // context to interpret the error code.
+  // |alert| indicates an alert description (one of the SSL_AD constants) to
+  // send to the remote endpoint when closing the association. If 0, a normal
+  // shutdown will be performed.
+  void Error(const char* context, int err, uint8_t alert, bool signal);
+  void Cleanup(uint8_t alert);
+
+  // Override MessageHandler
+  void OnMessage(Message* msg) override;
+
+  // Flush the input buffers by reading left bytes (for DTLS)
+  void FlushInput(unsigned int left);
+
+  // SSL library configuration
+  SSL_CTX* SetupSSLContext();
+  // Verify the peer certificate matches the signaled digest.
+  bool VerifyPeerCertificate();
+  // SSL certificate verification callback. See
+  // SSL_CTX_set_cert_verify_callback.
+  static int SSLVerifyCallback(X509_STORE_CTX* store, void* arg);
+
+  bool waiting_to_verify_peer_certificate() const {
+    return client_auth_enabled() && !peer_certificate_verified_;
+  }
+
+  bool has_peer_certificate_digest() const {
+    return !peer_certificate_digest_algorithm_.empty() &&
+           !peer_certificate_digest_value_.empty();
+  }
+
+  SSLState state_;
+  SSLRole role_;
+  int ssl_error_code_;  // valid when state_ == SSL_ERROR or SSL_CLOSED
+  // Whether the SSL negotiation is blocked on needing to read or
+  // write to the wrapped stream.
+  bool ssl_read_needs_write_;
+  bool ssl_write_needs_read_;
+
+  SSL* ssl_;
+  SSL_CTX* ssl_ctx_;
+
+  // Our key and certificate.
+  std::unique_ptr<OpenSSLIdentity> identity_;
+  // The certificate chain that the peer presented. Initially null, until the
+  // connection is established.
+  std::unique_ptr<SSLCertChain> peer_cert_chain_;
+  bool peer_certificate_verified_ = false;
+  // The digest of the certificate that the peer must present.
+  Buffer peer_certificate_digest_value_;
+  std::string peer_certificate_digest_algorithm_;
+
+  // The DtlsSrtp ciphers
+  std::string srtp_ciphers_;
+
+  // Do DTLS or not
+  SSLMode ssl_mode_;
+
+  // Max. allowed protocol version
+  SSLProtocolVersion ssl_max_version_;
+
+  // A 50-ms initial timeout ensures rapid setup on fast connections, but may
+  // be too aggressive for low bandwidth links.
+  int dtls_handshake_timeout_ms_ = 50;
+};
+
+/////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_OPENSSLSTREAMADAPTER_H_
diff --git a/rtc_base/optionsfile.cc b/rtc_base/optionsfile.cc
new file mode 100644
index 0000000..c3b6a6a
--- /dev/null
+++ b/rtc_base/optionsfile.cc
@@ -0,0 +1,183 @@
+/*
+ *  Copyright 2008 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/optionsfile.h"
+
+#include <ctype.h>
+
+#include "rtc_base/logging.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringencode.h"
+
+namespace rtc {
+
+OptionsFile::OptionsFile(const std::string &path) : path_(path) {
+}
+
+OptionsFile::~OptionsFile() = default;
+
+bool OptionsFile::Load() {
+  options_.clear();
+  // Open file.
+  FileStream stream;
+  int err;
+  if (!stream.Open(path_, "r", &err)) {
+    RTC_LOG_F(LS_WARNING) << "Could not open file, err=" << err;
+    // We do not consider this an error because we expect there to be no file
+    // until the user saves a setting.
+    return true;
+  }
+  // Read in all its data.
+  std::string line;
+  StreamResult res;
+  for (;;) {
+    res = stream.ReadLine(&line);
+    if (res != SR_SUCCESS) {
+      break;
+    }
+    size_t equals_pos = line.find('=');
+    if (equals_pos == std::string::npos) {
+      // We do not consider this an error. Instead we ignore the line and
+      // keep going.
+      RTC_LOG_F(LS_WARNING) << "Ignoring malformed line in " << path_;
+      continue;
+    }
+    std::string key(line, 0, equals_pos);
+    std::string value(line, equals_pos + 1, line.length() - (equals_pos + 1));
+    options_[key] = value;
+  }
+  if (res != SR_EOS) {
+    RTC_LOG_F(LS_ERROR) << "Error when reading from file";
+    return false;
+  } else {
+    return true;
+  }
+}
+
+bool OptionsFile::Save() {
+  // Open file.
+  FileStream stream;
+  int err;
+  if (!stream.Open(path_, "w", &err)) {
+    RTC_LOG_F(LS_ERROR) << "Could not open file, err=" << err;
+    return false;
+  }
+  // Write out all the data.
+  StreamResult res = SR_SUCCESS;
+  size_t written;
+  int error;
+  for (OptionsMap::const_iterator i = options_.begin(); i != options_.end();
+       ++i) {
+    res = stream.WriteAll(i->first.c_str(), i->first.length(), &written,
+        &error);
+    if (res != SR_SUCCESS) {
+      break;
+    }
+    res = stream.WriteAll("=", 1, &written, &error);
+    if (res != SR_SUCCESS) {
+      break;
+    }
+    res = stream.WriteAll(i->second.c_str(), i->second.length(), &written,
+        &error);
+    if (res != SR_SUCCESS) {
+      break;
+    }
+    res = stream.WriteAll("\n", 1, &written, &error);
+    if (res != SR_SUCCESS) {
+      break;
+    }
+  }
+  if (res != SR_SUCCESS) {
+    RTC_LOG_F(LS_ERROR) << "Unable to write to file";
+    return false;
+  } else {
+    return true;
+  }
+}
+
+bool OptionsFile::IsLegalName(const std::string &name) {
+  for (size_t pos = 0; pos < name.length(); ++pos) {
+    if (name[pos] == '\n' || name[pos] == '\\' || name[pos] == '=') {
+      // Illegal character.
+      RTC_LOG(LS_WARNING) << "Ignoring operation for illegal option " << name;
+      return false;
+    }
+  }
+  return true;
+}
+
+bool OptionsFile::IsLegalValue(const std::string &value) {
+  for (size_t pos = 0; pos < value.length(); ++pos) {
+    if (value[pos] == '\n' || value[pos] == '\\') {
+      // Illegal character.
+      RTC_LOG(LS_WARNING) << "Ignoring operation for illegal value " << value;
+      return false;
+    }
+  }
+  return true;
+}
+
+bool OptionsFile::GetStringValue(const std::string& option,
+                                 std::string *out_val) const {
+  RTC_LOG(LS_VERBOSE) << "OptionsFile::GetStringValue " << option;
+  if (!IsLegalName(option)) {
+    return false;
+  }
+  OptionsMap::const_iterator i = options_.find(option);
+  if (i == options_.end()) {
+    return false;
+  }
+  *out_val = i->second;
+  return true;
+}
+
+bool OptionsFile::GetIntValue(const std::string& option,
+                              int *out_val) const {
+  RTC_LOG(LS_VERBOSE) << "OptionsFile::GetIntValue " << option;
+  if (!IsLegalName(option)) {
+    return false;
+  }
+  OptionsMap::const_iterator i = options_.find(option);
+  if (i == options_.end()) {
+    return false;
+  }
+  return FromString(i->second, out_val);
+}
+
+bool OptionsFile::SetStringValue(const std::string& option,
+                                 const std::string& value) {
+  RTC_LOG(LS_VERBOSE) << "OptionsFile::SetStringValue " << option << ":"
+                      << value;
+  if (!IsLegalName(option) || !IsLegalValue(value)) {
+    return false;
+  }
+  options_[option] = value;
+  return true;
+}
+
+bool OptionsFile::SetIntValue(const std::string& option,
+                              int value) {
+  RTC_LOG(LS_VERBOSE) << "OptionsFile::SetIntValue " << option << ":" << value;
+  if (!IsLegalName(option)) {
+    return false;
+  }
+  return ToString(value, &options_[option]);
+}
+
+bool OptionsFile::RemoveValue(const std::string& option) {
+  RTC_LOG(LS_VERBOSE) << "OptionsFile::RemoveValue " << option;
+  if (!IsLegalName(option)) {
+    return false;
+  }
+  options_.erase(option);
+  return true;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/optionsfile.h b/rtc_base/optionsfile.h
new file mode 100644
index 0000000..90976ac
--- /dev/null
+++ b/rtc_base/optionsfile.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright 2008 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_OPTIONSFILE_H_
+#define RTC_BASE_OPTIONSFILE_H_
+
+#include <map>
+#include <string>
+
+namespace rtc {
+
+// Implements storage of simple options in a text file on disk. This is
+// cross-platform, but it is intended mostly for Linux where there is no
+// first-class options storage system.
+class OptionsFile {
+ public:
+  OptionsFile(const std::string &path);
+  ~OptionsFile();
+
+  // Loads the file from disk, overwriting the in-memory values.
+  bool Load();
+  // Saves the contents in memory, overwriting the on-disk values.
+  bool Save();
+
+  bool GetStringValue(const std::string& option, std::string* out_val) const;
+  bool GetIntValue(const std::string& option, int* out_val) const;
+  bool SetStringValue(const std::string& option, const std::string& val);
+  bool SetIntValue(const std::string& option, int val);
+  bool RemoveValue(const std::string& option);
+
+ private:
+  typedef std::map<std::string, std::string> OptionsMap;
+
+  static bool IsLegalName(const std::string &name);
+  static bool IsLegalValue(const std::string &value);
+
+  std::string path_;
+  OptionsMap options_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_OPTIONSFILE_H_
diff --git a/rtc_base/optionsfile_unittest.cc b/rtc_base/optionsfile_unittest.cc
new file mode 100644
index 0000000..d2e3cbd
--- /dev/null
+++ b/rtc_base/optionsfile_unittest.cc
@@ -0,0 +1,180 @@
+/*
+ *  Copyright 2008 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/optionsfile.h"
+#include "test/testsupport/fileutils.h"
+
+namespace rtc {
+
+static const std::string kTestOptionA = "test-option-a";
+static const std::string kTestOptionB = "test-option-b";
+static const std::string kTestString1 = "a string";
+static const std::string kTestString2 = "different string";
+static const std::string kOptionWithEquals = "foo=bar";
+static const std::string kOptionWithNewline = "foo\nbar";
+static const std::string kValueWithEquals = "baz=quux";
+static const std::string kValueWithNewline = "baz\nquux";
+static const std::string kEmptyString = "";
+static const char kOptionWithUtf8[] = {'O', 'p', 't', '\302', '\256', 'i', 'o',
+    'n', '\342', '\204', '\242', '\0'};  // Opt(R)io(TM).
+static const char kValueWithUtf8[] = {'V', 'a', 'l', '\302', '\256', 'v', 'e',
+    '\342', '\204', '\242', '\0'};  // Val(R)ue(TM).
+static int kTestInt1 = 12345;
+static int kTestInt2 = 67890;
+static int kNegInt = -634;
+static int kZero = 0;
+
+#if defined (WEBRTC_ANDROID)
+// Fails on Android: https://bugs.chromium.org/p/webrtc/issues/detail?id=4364.
+#define MAYBE_OptionsFileTest DISABLED_OptionsFileTest
+#else
+#define MAYBE_OptionsFileTest OptionsFileTest
+#endif
+
+class MAYBE_OptionsFileTest : public testing::Test {
+ public:
+  MAYBE_OptionsFileTest() {
+    test_file_ =
+        webrtc::test::TempFilename(webrtc::test::OutputPath(), ".testfile");
+    OpenStore();
+  }
+
+  ~MAYBE_OptionsFileTest() override {
+    webrtc::test::RemoveFile(test_file_);
+  }
+
+ protected:
+  void OpenStore() {
+    store_.reset(new OptionsFile(test_file_));
+  }
+
+  std::unique_ptr<OptionsFile> store_;
+
+ private:
+  std::string test_file_;
+};
+
+TEST_F(MAYBE_OptionsFileTest, GetSetString) {
+  // Clear contents of the file on disk.
+  EXPECT_TRUE(store_->Save());
+  std::string out1, out2;
+  EXPECT_FALSE(store_->GetStringValue(kTestOptionA, &out1));
+  EXPECT_FALSE(store_->GetStringValue(kTestOptionB, &out2));
+  EXPECT_TRUE(store_->SetStringValue(kTestOptionA, kTestString1));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->SetStringValue(kTestOptionB, kTestString2));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionA, &out1));
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionB, &out2));
+  EXPECT_EQ(kTestString1, out1);
+  EXPECT_EQ(kTestString2, out2);
+  EXPECT_TRUE(store_->RemoveValue(kTestOptionA));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->RemoveValue(kTestOptionB));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_FALSE(store_->GetStringValue(kTestOptionA, &out1));
+  EXPECT_FALSE(store_->GetStringValue(kTestOptionB, &out2));
+}
+
+TEST_F(MAYBE_OptionsFileTest, GetSetInt) {
+  // Clear contents of the file on disk.
+  EXPECT_TRUE(store_->Save());
+  int out1, out2;
+  EXPECT_FALSE(store_->GetIntValue(kTestOptionA, &out1));
+  EXPECT_FALSE(store_->GetIntValue(kTestOptionB, &out2));
+  EXPECT_TRUE(store_->SetIntValue(kTestOptionA, kTestInt1));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->SetIntValue(kTestOptionB, kTestInt2));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->GetIntValue(kTestOptionA, &out1));
+  EXPECT_TRUE(store_->GetIntValue(kTestOptionB, &out2));
+  EXPECT_EQ(kTestInt1, out1);
+  EXPECT_EQ(kTestInt2, out2);
+  EXPECT_TRUE(store_->RemoveValue(kTestOptionA));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->RemoveValue(kTestOptionB));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_FALSE(store_->GetIntValue(kTestOptionA, &out1));
+  EXPECT_FALSE(store_->GetIntValue(kTestOptionB, &out2));
+  EXPECT_TRUE(store_->SetIntValue(kTestOptionA, kNegInt));
+  EXPECT_TRUE(store_->GetIntValue(kTestOptionA, &out1));
+  EXPECT_EQ(kNegInt, out1);
+  EXPECT_TRUE(store_->SetIntValue(kTestOptionA, kZero));
+  EXPECT_TRUE(store_->GetIntValue(kTestOptionA, &out1));
+  EXPECT_EQ(kZero, out1);
+}
+
+TEST_F(MAYBE_OptionsFileTest, Persist) {
+  // Clear contents of the file on disk.
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->SetStringValue(kTestOptionA, kTestString1));
+  EXPECT_TRUE(store_->SetIntValue(kTestOptionB, kNegInt));
+  EXPECT_TRUE(store_->Save());
+
+  // Load the saved contents from above.
+  OpenStore();
+  EXPECT_TRUE(store_->Load());
+  std::string out1;
+  int out2;
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionA, &out1));
+  EXPECT_TRUE(store_->GetIntValue(kTestOptionB, &out2));
+  EXPECT_EQ(kTestString1, out1);
+  EXPECT_EQ(kNegInt, out2);
+}
+
+TEST_F(MAYBE_OptionsFileTest, SpecialCharacters) {
+  // Clear contents of the file on disk.
+  EXPECT_TRUE(store_->Save());
+  std::string out;
+  EXPECT_FALSE(store_->SetStringValue(kOptionWithEquals, kTestString1));
+  EXPECT_FALSE(store_->GetStringValue(kOptionWithEquals, &out));
+  EXPECT_FALSE(store_->SetStringValue(kOptionWithNewline, kTestString1));
+  EXPECT_FALSE(store_->GetStringValue(kOptionWithNewline, &out));
+  EXPECT_TRUE(store_->SetStringValue(kOptionWithUtf8, kValueWithUtf8));
+  EXPECT_TRUE(store_->SetStringValue(kTestOptionA, kTestString1));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionA, &out));
+  EXPECT_EQ(kTestString1, out);
+  EXPECT_TRUE(store_->GetStringValue(kOptionWithUtf8, &out));
+  EXPECT_EQ(kValueWithUtf8, out);
+  EXPECT_FALSE(store_->SetStringValue(kTestOptionA, kValueWithNewline));
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionA, &out));
+  EXPECT_EQ(kTestString1, out);
+  EXPECT_TRUE(store_->SetStringValue(kTestOptionA, kValueWithEquals));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionA, &out));
+  EXPECT_EQ(kValueWithEquals, out);
+  EXPECT_TRUE(store_->SetStringValue(kEmptyString, kTestString2));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->GetStringValue(kEmptyString, &out));
+  EXPECT_EQ(kTestString2, out);
+  EXPECT_TRUE(store_->SetStringValue(kTestOptionB, kEmptyString));
+  EXPECT_TRUE(store_->Save());
+  EXPECT_TRUE(store_->Load());
+  EXPECT_TRUE(store_->GetStringValue(kTestOptionB, &out));
+  EXPECT_EQ(kEmptyString, out);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/pathutils.cc b/rtc_base/pathutils.cc
new file mode 100644
index 0000000..b85d14f
--- /dev/null
+++ b/rtc_base/pathutils.cc
@@ -0,0 +1,189 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#include <tchar.h>
+#endif  // WEBRTC_WIN
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/pathutils.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+static const char EMPTY_STR[] = "";
+
+// EXT_DELIM separates a file basename from extension
+const char EXT_DELIM = '.';
+
+// FOLDER_DELIMS separate folder segments and the filename
+const char* const FOLDER_DELIMS = "/\\";
+
+// DEFAULT_FOLDER_DELIM is the preferred delimiter for this platform
+#ifdef WEBRTC_WIN
+const char DEFAULT_FOLDER_DELIM = '\\';
+#else  // !WEBRTC_WIN
+const char DEFAULT_FOLDER_DELIM = '/';
+#endif  // !WEBRTC_WIN
+
+///////////////////////////////////////////////////////////////////////////////
+// Pathname - parsing of pathnames into components, and vice versa
+///////////////////////////////////////////////////////////////////////////////
+
+bool Pathname::IsFolderDelimiter(char ch) {
+  return (nullptr != ::strchr(FOLDER_DELIMS, ch));
+}
+
+char Pathname::DefaultFolderDelimiter() {
+  return DEFAULT_FOLDER_DELIM;
+}
+
+Pathname::Pathname()
+    : folder_delimiter_(DEFAULT_FOLDER_DELIM) {
+}
+
+Pathname::Pathname(const Pathname&) = default;
+Pathname::Pathname(Pathname&&) = default;
+
+Pathname::Pathname(const std::string& pathname)
+    : folder_delimiter_(DEFAULT_FOLDER_DELIM) {
+  SetPathname(pathname);
+}
+
+Pathname::Pathname(const std::string& folder, const std::string& filename)
+    : folder_delimiter_(DEFAULT_FOLDER_DELIM) {
+  SetPathname(folder, filename);
+}
+
+Pathname& Pathname::operator=(const Pathname&) = default;
+Pathname& Pathname::operator=(Pathname&&) = default;
+
+void Pathname::Normalize() {
+  for (size_t i=0; i<folder_.length(); ++i) {
+    if (IsFolderDelimiter(folder_[i])) {
+      folder_[i] = folder_delimiter_;
+    }
+  }
+}
+
+void Pathname::clear() {
+  folder_.clear();
+  basename_.clear();
+  extension_.clear();
+}
+
+bool Pathname::empty() const {
+  return folder_.empty() && basename_.empty() && extension_.empty();
+}
+
+std::string Pathname::pathname() const {
+  std::string pathname(folder_);
+  pathname.append(basename_);
+  pathname.append(extension_);
+  if (pathname.empty()) {
+    // Instead of the empty pathname, return the current working directory.
+    pathname.push_back('.');
+    pathname.push_back(folder_delimiter_);
+  }
+  return pathname;
+}
+
+void Pathname::SetPathname(const std::string& pathname) {
+  std::string::size_type pos = pathname.find_last_of(FOLDER_DELIMS);
+  if (pos != std::string::npos) {
+    SetFolder(pathname.substr(0, pos + 1));
+    SetFilename(pathname.substr(pos + 1));
+  } else {
+    SetFolder(EMPTY_STR);
+    SetFilename(pathname);
+  }
+}
+
+void Pathname::SetPathname(const std::string& folder,
+                           const std::string& filename) {
+  SetFolder(folder);
+  SetFilename(filename);
+}
+
+std::string Pathname::folder() const {
+  return folder_;
+}
+
+std::string Pathname::parent_folder() const {
+  std::string::size_type pos = std::string::npos;
+  if (folder_.size() >= 2) {
+    pos = folder_.find_last_of(FOLDER_DELIMS, folder_.length() - 2);
+  }
+  if (pos != std::string::npos) {
+    return folder_.substr(0, pos + 1);
+  } else {
+    return EMPTY_STR;
+  }
+}
+
+void Pathname::SetFolder(const std::string& folder) {
+  folder_.assign(folder);
+  // Ensure folder ends in a path delimiter
+  if (!folder_.empty() && !IsFolderDelimiter(folder_[folder_.length()-1])) {
+    folder_.push_back(folder_delimiter_);
+  }
+}
+
+void Pathname::AppendFolder(const std::string& folder) {
+  folder_.append(folder);
+  // Ensure folder ends in a path delimiter
+  if (!folder_.empty() && !IsFolderDelimiter(folder_[folder_.length()-1])) {
+    folder_.push_back(folder_delimiter_);
+  }
+}
+
+bool Pathname::SetBasename(const std::string& basename) {
+  if(basename.find_first_of(FOLDER_DELIMS) != std::string::npos) {
+    return false;
+  }
+  basename_.assign(basename);
+  return true;
+}
+
+bool Pathname::SetExtension(const std::string& extension) {
+  if (extension.find_first_of(FOLDER_DELIMS) != std::string::npos ||
+    extension.find_first_of(EXT_DELIM, 1) != std::string::npos) {
+      return false;
+  }
+  extension_.assign(extension);
+  // Ensure extension begins with the extension delimiter
+  if (!extension_.empty() && (extension_[0] != EXT_DELIM)) {
+    extension_.insert(extension_.begin(), EXT_DELIM);
+  }
+  return true;
+}
+
+std::string Pathname::filename() const {
+  std::string filename(basename_);
+  filename.append(extension_);
+  return filename;
+}
+
+bool Pathname::SetFilename(const std::string& filename) {
+  std::string::size_type pos = filename.rfind(EXT_DELIM);
+  if ((pos == std::string::npos) || (pos == 0)) {
+    return SetExtension(EMPTY_STR) && SetBasename(filename);
+  } else {
+    return SetExtension(filename.substr(pos)) && SetBasename(filename.substr(0, pos));
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace rtc
diff --git a/rtc_base/pathutils.h b/rtc_base/pathutils.h
new file mode 100644
index 0000000..b66cf18
--- /dev/null
+++ b/rtc_base/pathutils.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PATHUTILS_H_
+#define RTC_BASE_PATHUTILS_H_
+
+#include <string>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// Pathname - parsing of pathnames into components, and vice versa.
+//
+// To establish consistent terminology, a filename never contains a folder
+// component.  A folder never contains a filename.  A pathname may include
+// a folder and/or filename component.  Here are some examples:
+//
+//   pathname()      /home/john/example.txt
+//   folder()        /home/john/
+//   filename()                 example.txt
+//   parent_folder() /home/
+//   folder_name()         john/
+//   basename()                 example
+//   extension()                       .txt
+//
+// Basename may begin, end, and/or include periods, but no folder delimiters.
+// If extension exists, it consists of a period followed by zero or more
+// non-period/non-delimiter characters, and basename is non-empty.
+///////////////////////////////////////////////////////////////////////////////
+
+class Pathname {
+public:
+  // Folder delimiters are slash and backslash
+  static bool IsFolderDelimiter(char ch);
+  static char DefaultFolderDelimiter();
+
+  Pathname();
+  Pathname(const Pathname&);
+  Pathname(Pathname&&);
+  Pathname(const std::string& pathname);
+  Pathname(const std::string& folder, const std::string& filename);
+
+  Pathname& operator=(const Pathname&);
+  Pathname& operator=(Pathname&&);
+
+  // Normalize changes all folder delimiters to folder_delimiter()
+  void Normalize();
+
+  // Reset to the empty pathname
+  void clear();
+
+  // Returns true if the pathname is empty.  Note: this->pathname().empty()
+  // is always false.
+  bool empty() const;
+
+  // Returns the folder and filename components.  If the pathname is empty,
+  // returns a string representing the current directory (as a relative path,
+  // i.e., ".").
+  std::string pathname() const;
+  void SetPathname(const std::string& pathname);
+  void SetPathname(const std::string& folder, const std::string& filename);
+
+  std::string folder() const;
+  std::string parent_folder() const;
+  // SetFolder and AppendFolder will append a folder delimiter, if needed.
+  void SetFolder(const std::string& folder);
+  void AppendFolder(const std::string& folder);
+
+  bool SetBasename(const std::string& basename);
+
+  // SetExtension will prefix a period, if needed.
+  bool SetExtension(const std::string& extension);
+
+  std::string filename() const;
+  bool SetFilename(const std::string& filename);
+
+private:
+  std::string folder_, basename_, extension_;
+  char folder_delimiter_;
+};
+
+}  // namespace rtc
+
+#endif // RTC_BASE_PATHUTILS_H_
diff --git a/rtc_base/pathutils_unittest.cc b/rtc_base/pathutils_unittest.cc
new file mode 100644
index 0000000..cbd33ed
--- /dev/null
+++ b/rtc_base/pathutils_unittest.cc
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/pathutils.h"
+#include "rtc_base/gunit.h"
+
+TEST(Pathname, ReturnsDotForEmptyPathname) {
+  const std::string kCWD =
+      std::string(".") + rtc::Pathname::DefaultFolderDelimiter();
+
+  rtc::Pathname path("/", "");
+  EXPECT_FALSE(path.empty());
+  EXPECT_FALSE(path.folder().empty());
+  EXPECT_TRUE (path.filename().empty());
+  EXPECT_FALSE(path.pathname().empty());
+  EXPECT_EQ(std::string("/"), path.pathname());
+
+  path.SetPathname("", "foo");
+  EXPECT_FALSE(path.empty());
+  EXPECT_TRUE (path.folder().empty());
+  EXPECT_FALSE(path.filename().empty());
+  EXPECT_FALSE(path.pathname().empty());
+  EXPECT_EQ(std::string("foo"), path.pathname());
+
+  path.SetPathname("", "");
+  EXPECT_TRUE (path.empty());
+  EXPECT_TRUE (path.folder().empty());
+  EXPECT_TRUE (path.filename().empty());
+  EXPECT_FALSE(path.pathname().empty());
+  EXPECT_EQ(kCWD, path.pathname());
+
+  path.SetPathname(kCWD, "");
+  EXPECT_FALSE(path.empty());
+  EXPECT_FALSE(path.folder().empty());
+  EXPECT_TRUE (path.filename().empty());
+  EXPECT_FALSE(path.pathname().empty());
+  EXPECT_EQ(kCWD, path.pathname());
+}
diff --git a/rtc_base/physicalsocketserver.cc b/rtc_base/physicalsocketserver.cc
new file mode 100644
index 0000000..0d5fc52
--- /dev/null
+++ b/rtc_base/physicalsocketserver.cc
@@ -0,0 +1,2007 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "rtc_base/physicalsocketserver.h"
+
+#if defined(_MSC_VER) && _MSC_VER < 1300
+#pragma warning(disable:4786)
+#endif
+
+#ifdef MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
+#if defined(WEBRTC_POSIX)
+#include <string.h>
+#include <fcntl.h>
+#if defined(WEBRTC_USE_EPOLL)
+// "poll" will be used to wait for the signal dispatcher.
+#include <poll.h>
+#endif
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/select.h>
+#include <unistd.h>
+#include <signal.h>
+#endif
+
+#if defined(WEBRTC_WIN)
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#undef SetPort
+#endif
+
+#include <errno.h>
+
+#include <algorithm>
+#include <map>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/basictypes.h"
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/networkmonitor.h"
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/win32socketinit.h"
+
+#if defined(WEBRTC_WIN)
+#define LAST_SYSTEM_ERROR (::GetLastError())
+#elif defined(__native_client__) && __native_client__
+#define LAST_SYSTEM_ERROR (0)
+#elif defined(WEBRTC_POSIX)
+#define LAST_SYSTEM_ERROR (errno)
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_POSIX)
+#include <netinet/tcp.h>  // for TCP_NODELAY
+#define IP_MTU 14 // Until this is integrated from linux/in.h to netinet/in.h
+typedef void* SockOptArg;
+
+#endif  // WEBRTC_POSIX
+
+#if defined(WEBRTC_POSIX) && !defined(WEBRTC_MAC) && !defined(__native_client__)
+
+int64_t GetSocketRecvTimestamp(int socket) {
+  struct timeval tv_ioctl;
+  int ret = ioctl(socket, SIOCGSTAMP, &tv_ioctl);
+  if (ret != 0)
+    return -1;
+  int64_t timestamp =
+      rtc::kNumMicrosecsPerSec * static_cast<int64_t>(tv_ioctl.tv_sec) +
+      static_cast<int64_t>(tv_ioctl.tv_usec);
+  return timestamp;
+}
+
+#else
+
+int64_t GetSocketRecvTimestamp(int socket) {
+  return -1;
+}
+#endif
+
+#if defined(WEBRTC_WIN)
+typedef char* SockOptArg;
+#endif
+
+#if defined(WEBRTC_USE_EPOLL)
+// POLLRDHUP / EPOLLRDHUP are only defined starting with Linux 2.6.17.
+#if !defined(POLLRDHUP)
+#define POLLRDHUP 0x2000
+#endif
+#if !defined(EPOLLRDHUP)
+#define EPOLLRDHUP 0x2000
+#endif
+#endif
+
+namespace rtc {
+
+std::unique_ptr<SocketServer> SocketServer::CreateDefault() {
+#if defined(__native_client__)
+  return std::unique_ptr<SocketServer>(new rtc::NullSocketServer);
+#else
+  return std::unique_ptr<SocketServer>(new rtc::PhysicalSocketServer);
+#endif
+}
+
+#if defined(WEBRTC_WIN)
+// Standard MTUs, from RFC 1191
+const uint16_t PACKET_MAXIMUMS[] = {
+    65535,  // Theoretical maximum, Hyperchannel
+    32000,  // Nothing
+    17914,  // 16Mb IBM Token Ring
+    8166,   // IEEE 802.4
+    // 4464,   // IEEE 802.5 (4Mb max)
+    4352,   // FDDI
+    // 2048,   // Wideband Network
+    2002,   // IEEE 802.5 (4Mb recommended)
+    // 1536,   // Expermental Ethernet Networks
+    // 1500,   // Ethernet, Point-to-Point (default)
+    1492,   // IEEE 802.3
+    1006,   // SLIP, ARPANET
+    // 576,    // X.25 Networks
+    // 544,    // DEC IP Portal
+    // 512,    // NETBIOS
+    508,    // IEEE 802/Source-Rt Bridge, ARCNET
+    296,    // Point-to-Point (low delay)
+    68,     // Official minimum
+    0,      // End of list marker
+};
+
+static const int IP_HEADER_SIZE = 20u;
+static const int IPV6_HEADER_SIZE = 40u;
+static const int ICMP_HEADER_SIZE = 8u;
+static const int ICMP_PING_TIMEOUT_MILLIS = 10000u;
+#endif
+
+PhysicalSocket::PhysicalSocket(PhysicalSocketServer* ss, SOCKET s)
+  : ss_(ss), s_(s), error_(0),
+    state_((s == INVALID_SOCKET) ? CS_CLOSED : CS_CONNECTED),
+    resolver_(nullptr) {
+#if defined(WEBRTC_WIN)
+  // EnsureWinsockInit() ensures that winsock is initialized. The default
+  // version of this function doesn't do anything because winsock is
+  // initialized by constructor of a static object. If neccessary libjingle
+  // users can link it with a different version of this function by replacing
+  // win32socketinit.cc. See win32socketinit.cc for more details.
+  EnsureWinsockInit();
+#endif
+  if (s_ != INVALID_SOCKET) {
+    SetEnabledEvents(DE_READ | DE_WRITE);
+
+    int type = SOCK_STREAM;
+    socklen_t len = sizeof(type);
+    const int res =
+        getsockopt(s_, SOL_SOCKET, SO_TYPE, (SockOptArg)&type, &len);
+    RTC_DCHECK_EQ(0, res);
+    udp_ = (SOCK_DGRAM == type);
+  }
+}
+
+PhysicalSocket::~PhysicalSocket() {
+  Close();
+}
+
+bool PhysicalSocket::Create(int family, int type) {
+  Close();
+  s_ = ::socket(family, type, 0);
+  udp_ = (SOCK_DGRAM == type);
+  UpdateLastError();
+  if (udp_) {
+    SetEnabledEvents(DE_READ | DE_WRITE);
+  }
+  return s_ != INVALID_SOCKET;
+}
+
+SocketAddress PhysicalSocket::GetLocalAddress() const {
+  sockaddr_storage addr_storage = {0};
+  socklen_t addrlen = sizeof(addr_storage);
+  sockaddr* addr = reinterpret_cast<sockaddr*>(&addr_storage);
+  int result = ::getsockname(s_, addr, &addrlen);
+  SocketAddress address;
+  if (result >= 0) {
+    SocketAddressFromSockAddrStorage(addr_storage, &address);
+  } else {
+    RTC_LOG(LS_WARNING) << "GetLocalAddress: unable to get local addr, socket="
+                        << s_;
+  }
+  return address;
+}
+
+SocketAddress PhysicalSocket::GetRemoteAddress() const {
+  sockaddr_storage addr_storage = {0};
+  socklen_t addrlen = sizeof(addr_storage);
+  sockaddr* addr = reinterpret_cast<sockaddr*>(&addr_storage);
+  int result = ::getpeername(s_, addr, &addrlen);
+  SocketAddress address;
+  if (result >= 0) {
+    SocketAddressFromSockAddrStorage(addr_storage, &address);
+  } else {
+    RTC_LOG(LS_WARNING)
+        << "GetRemoteAddress: unable to get remote addr, socket=" << s_;
+  }
+  return address;
+}
+
+int PhysicalSocket::Bind(const SocketAddress& bind_addr) {
+  SocketAddress copied_bind_addr = bind_addr;
+  // If a network binder is available, use it to bind a socket to an interface
+  // instead of bind(), since this is more reliable on an OS with a weak host
+  // model.
+  if (ss_->network_binder() && !bind_addr.IsAnyIP()) {
+    NetworkBindingResult result =
+        ss_->network_binder()->BindSocketToNetwork(s_, bind_addr.ipaddr());
+    if (result == NetworkBindingResult::SUCCESS) {
+      // Since the network binder handled binding the socket to the desired
+      // network interface, we don't need to (and shouldn't) include an IP in
+      // the bind() call; bind() just needs to assign a port.
+      copied_bind_addr.SetIP(GetAnyIP(copied_bind_addr.ipaddr().family()));
+    } else if (result == NetworkBindingResult::NOT_IMPLEMENTED) {
+      RTC_LOG(LS_INFO) << "Can't bind socket to network because "
+                          "network binding is not implemented for this OS.";
+    } else {
+      if (bind_addr.IsLoopbackIP()) {
+        // If we couldn't bind to a loopback IP (which should only happen in
+        // test scenarios), continue on. This may be expected behavior.
+        RTC_LOG(LS_VERBOSE) << "Binding socket to loopback address "
+                            << bind_addr.ipaddr().ToString()
+                            << " failed; result: " << static_cast<int>(result);
+      } else {
+        RTC_LOG(LS_WARNING) << "Binding socket to network address "
+                            << bind_addr.ipaddr().ToString()
+                            << " failed; result: " << static_cast<int>(result);
+        // If a network binding was attempted and failed, we should stop here
+        // and not try to use the socket. Otherwise, we may end up sending
+        // packets with an invalid source address.
+        // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7026
+        return -1;
+      }
+    }
+  }
+  sockaddr_storage addr_storage;
+  size_t len = copied_bind_addr.ToSockAddrStorage(&addr_storage);
+  sockaddr* addr = reinterpret_cast<sockaddr*>(&addr_storage);
+  int err = ::bind(s_, addr, static_cast<int>(len));
+  UpdateLastError();
+#if !defined(NDEBUG)
+  if (0 == err) {
+    dbg_addr_ = "Bound @ ";
+    dbg_addr_.append(GetLocalAddress().ToString());
+  }
+#endif
+  return err;
+}
+
+int PhysicalSocket::Connect(const SocketAddress& addr) {
+  // TODO(pthatcher): Implicit creation is required to reconnect...
+  // ...but should we make it more explicit?
+  if (state_ != CS_CLOSED) {
+    SetError(EALREADY);
+    return SOCKET_ERROR;
+  }
+  if (addr.IsUnresolvedIP()) {
+    RTC_LOG(LS_VERBOSE) << "Resolving addr in PhysicalSocket::Connect";
+    resolver_ = new AsyncResolver();
+    resolver_->SignalDone.connect(this, &PhysicalSocket::OnResolveResult);
+    resolver_->Start(addr);
+    state_ = CS_CONNECTING;
+    return 0;
+  }
+
+  return DoConnect(addr);
+}
+
+int PhysicalSocket::DoConnect(const SocketAddress& connect_addr) {
+  if ((s_ == INVALID_SOCKET) &&
+      !Create(connect_addr.family(), SOCK_STREAM)) {
+    return SOCKET_ERROR;
+  }
+  sockaddr_storage addr_storage;
+  size_t len = connect_addr.ToSockAddrStorage(&addr_storage);
+  sockaddr* addr = reinterpret_cast<sockaddr*>(&addr_storage);
+  int err = ::connect(s_, addr, static_cast<int>(len));
+  UpdateLastError();
+  uint8_t events = DE_READ | DE_WRITE;
+  if (err == 0) {
+    state_ = CS_CONNECTED;
+  } else if (IsBlockingError(GetError())) {
+    state_ = CS_CONNECTING;
+    events |= DE_CONNECT;
+  } else {
+    return SOCKET_ERROR;
+  }
+
+  EnableEvents(events);
+  return 0;
+}
+
+int PhysicalSocket::GetError() const {
+  CritScope cs(&crit_);
+  return error_;
+}
+
+void PhysicalSocket::SetError(int error) {
+  CritScope cs(&crit_);
+  error_ = error;
+}
+
+AsyncSocket::ConnState PhysicalSocket::GetState() const {
+  return state_;
+}
+
+int PhysicalSocket::GetOption(Option opt, int* value) {
+  int slevel;
+  int sopt;
+  if (TranslateOption(opt, &slevel, &sopt) == -1)
+    return -1;
+  socklen_t optlen = sizeof(*value);
+  int ret = ::getsockopt(s_, slevel, sopt, (SockOptArg)value, &optlen);
+  if (ret != -1 && opt == OPT_DONTFRAGMENT) {
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+    *value = (*value != IP_PMTUDISC_DONT) ? 1 : 0;
+#endif
+  }
+  return ret;
+}
+
+int PhysicalSocket::SetOption(Option opt, int value) {
+  int slevel;
+  int sopt;
+  if (TranslateOption(opt, &slevel, &sopt) == -1)
+    return -1;
+  if (opt == OPT_DONTFRAGMENT) {
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+    value = (value) ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
+#endif
+  }
+  return ::setsockopt(s_, slevel, sopt, (SockOptArg)&value, sizeof(value));
+}
+
+int PhysicalSocket::Send(const void* pv, size_t cb) {
+  int sent = DoSend(s_, reinterpret_cast<const char *>(pv),
+      static_cast<int>(cb),
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+      // Suppress SIGPIPE. Without this, attempting to send on a socket whose
+      // other end is closed will result in a SIGPIPE signal being raised to
+      // our process, which by default will terminate the process, which we
+      // don't want. By specifying this flag, we'll just get the error EPIPE
+      // instead and can handle the error gracefully.
+      MSG_NOSIGNAL
+#else
+      0
+#endif
+      );
+  UpdateLastError();
+  MaybeRemapSendError();
+  // We have seen minidumps where this may be false.
+  RTC_DCHECK(sent <= static_cast<int>(cb));
+  if ((sent > 0 && sent < static_cast<int>(cb)) ||
+      (sent < 0 && IsBlockingError(GetError()))) {
+    EnableEvents(DE_WRITE);
+  }
+  return sent;
+}
+
+int PhysicalSocket::SendTo(const void* buffer,
+                           size_t length,
+                           const SocketAddress& addr) {
+  sockaddr_storage saddr;
+  size_t len = addr.ToSockAddrStorage(&saddr);
+  int sent = DoSendTo(
+      s_, static_cast<const char *>(buffer), static_cast<int>(length),
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+      // Suppress SIGPIPE. See above for explanation.
+      MSG_NOSIGNAL,
+#else
+      0,
+#endif
+      reinterpret_cast<sockaddr*>(&saddr), static_cast<int>(len));
+  UpdateLastError();
+  MaybeRemapSendError();
+  // We have seen minidumps where this may be false.
+  RTC_DCHECK(sent <= static_cast<int>(length));
+  if ((sent > 0 && sent < static_cast<int>(length)) ||
+      (sent < 0 && IsBlockingError(GetError()))) {
+    EnableEvents(DE_WRITE);
+  }
+  return sent;
+}
+
+int PhysicalSocket::Recv(void* buffer, size_t length, int64_t* timestamp) {
+  int received = ::recv(s_, static_cast<char*>(buffer),
+                        static_cast<int>(length), 0);
+  if ((received == 0) && (length != 0)) {
+    // Note: on graceful shutdown, recv can return 0.  In this case, we
+    // pretend it is blocking, and then signal close, so that simplifying
+    // assumptions can be made about Recv.
+    RTC_LOG(LS_WARNING) << "EOF from socket; deferring close event";
+    // Must turn this back on so that the select() loop will notice the close
+    // event.
+    EnableEvents(DE_READ);
+    SetError(EWOULDBLOCK);
+    return SOCKET_ERROR;
+  }
+  if (timestamp) {
+    *timestamp = GetSocketRecvTimestamp(s_);
+  }
+  UpdateLastError();
+  int error = GetError();
+  bool success = (received >= 0) || IsBlockingError(error);
+  if (udp_ || success) {
+    EnableEvents(DE_READ);
+  }
+  if (!success) {
+    RTC_LOG_F(LS_VERBOSE) << "Error = " << error;
+  }
+  return received;
+}
+
+int PhysicalSocket::RecvFrom(void* buffer,
+                             size_t length,
+                             SocketAddress* out_addr,
+                             int64_t* timestamp) {
+  sockaddr_storage addr_storage;
+  socklen_t addr_len = sizeof(addr_storage);
+  sockaddr* addr = reinterpret_cast<sockaddr*>(&addr_storage);
+  int received = ::recvfrom(s_, static_cast<char*>(buffer),
+                            static_cast<int>(length), 0, addr, &addr_len);
+  if (timestamp) {
+    *timestamp = GetSocketRecvTimestamp(s_);
+  }
+  UpdateLastError();
+  if ((received >= 0) && (out_addr != nullptr))
+    SocketAddressFromSockAddrStorage(addr_storage, out_addr);
+  int error = GetError();
+  bool success = (received >= 0) || IsBlockingError(error);
+  if (udp_ || success) {
+    EnableEvents(DE_READ);
+  }
+  if (!success) {
+    RTC_LOG_F(LS_VERBOSE) << "Error = " << error;
+  }
+  return received;
+}
+
+int PhysicalSocket::Listen(int backlog) {
+  int err = ::listen(s_, backlog);
+  UpdateLastError();
+  if (err == 0) {
+    state_ = CS_CONNECTING;
+    EnableEvents(DE_ACCEPT);
+#if !defined(NDEBUG)
+    dbg_addr_ = "Listening @ ";
+    dbg_addr_.append(GetLocalAddress().ToString());
+#endif
+  }
+  return err;
+}
+
+AsyncSocket* PhysicalSocket::Accept(SocketAddress* out_addr) {
+  // Always re-subscribe DE_ACCEPT to make sure new incoming connections will
+  // trigger an event even if DoAccept returns an error here.
+  EnableEvents(DE_ACCEPT);
+  sockaddr_storage addr_storage;
+  socklen_t addr_len = sizeof(addr_storage);
+  sockaddr* addr = reinterpret_cast<sockaddr*>(&addr_storage);
+  SOCKET s = DoAccept(s_, addr, &addr_len);
+  UpdateLastError();
+  if (s == INVALID_SOCKET)
+    return nullptr;
+  if (out_addr != nullptr)
+    SocketAddressFromSockAddrStorage(addr_storage, out_addr);
+  return ss_->WrapSocket(s);
+}
+
+int PhysicalSocket::Close() {
+  if (s_ == INVALID_SOCKET)
+    return 0;
+  int err = ::closesocket(s_);
+  UpdateLastError();
+  s_ = INVALID_SOCKET;
+  state_ = CS_CLOSED;
+  SetEnabledEvents(0);
+  if (resolver_) {
+    resolver_->Destroy(false);
+    resolver_ = nullptr;
+  }
+  return err;
+}
+
+SOCKET PhysicalSocket::DoAccept(SOCKET socket,
+                                sockaddr* addr,
+                                socklen_t* addrlen) {
+  return ::accept(socket, addr, addrlen);
+}
+
+int PhysicalSocket::DoSend(SOCKET socket, const char* buf, int len, int flags) {
+  return ::send(socket, buf, len, flags);
+}
+
+int PhysicalSocket::DoSendTo(SOCKET socket,
+                             const char* buf,
+                             int len,
+                             int flags,
+                             const struct sockaddr* dest_addr,
+                             socklen_t addrlen) {
+  return ::sendto(socket, buf, len, flags, dest_addr, addrlen);
+}
+
+void PhysicalSocket::OnResolveResult(AsyncResolverInterface* resolver) {
+  if (resolver != resolver_) {
+    return;
+  }
+
+  int error = resolver_->GetError();
+  if (error == 0) {
+    error = DoConnect(resolver_->address());
+  } else {
+    Close();
+  }
+
+  if (error) {
+    SetError(error);
+    SignalCloseEvent(this, error);
+  }
+}
+
+void PhysicalSocket::UpdateLastError() {
+  SetError(LAST_SYSTEM_ERROR);
+}
+
+void PhysicalSocket::MaybeRemapSendError() {
+#if defined(WEBRTC_MAC)
+  // https://developer.apple.com/library/mac/documentation/Darwin/
+  // Reference/ManPages/man2/sendto.2.html
+  // ENOBUFS - The output queue for a network interface is full.
+  // This generally indicates that the interface has stopped sending,
+  // but may be caused by transient congestion.
+  if (GetError() == ENOBUFS) {
+    SetError(EWOULDBLOCK);
+  }
+#endif
+}
+
+void PhysicalSocket::SetEnabledEvents(uint8_t events) {
+  enabled_events_ = events;
+}
+
+void PhysicalSocket::EnableEvents(uint8_t events) {
+  enabled_events_ |= events;
+}
+
+void PhysicalSocket::DisableEvents(uint8_t events) {
+  enabled_events_ &= ~events;
+}
+
+int PhysicalSocket::TranslateOption(Option opt, int* slevel, int* sopt) {
+  switch (opt) {
+    case OPT_DONTFRAGMENT:
+#if defined(WEBRTC_WIN)
+      *slevel = IPPROTO_IP;
+      *sopt = IP_DONTFRAGMENT;
+      break;
+#elif defined(WEBRTC_MAC) || defined(BSD) || defined(__native_client__)
+      RTC_LOG(LS_WARNING) << "Socket::OPT_DONTFRAGMENT not supported.";
+      return -1;
+#elif defined(WEBRTC_POSIX)
+      *slevel = IPPROTO_IP;
+      *sopt = IP_MTU_DISCOVER;
+      break;
+#endif
+    case OPT_RCVBUF:
+      *slevel = SOL_SOCKET;
+      *sopt = SO_RCVBUF;
+      break;
+    case OPT_SNDBUF:
+      *slevel = SOL_SOCKET;
+      *sopt = SO_SNDBUF;
+      break;
+    case OPT_NODELAY:
+      *slevel = IPPROTO_TCP;
+      *sopt = TCP_NODELAY;
+      break;
+    case OPT_DSCP:
+      RTC_LOG(LS_WARNING) << "Socket::OPT_DSCP not supported.";
+      return -1;
+    case OPT_RTP_SENDTIME_EXTN_ID:
+      return -1;  // No logging is necessary as this not a OS socket option.
+    default:
+      RTC_NOTREACHED();
+      return -1;
+  }
+  return 0;
+}
+
+SocketDispatcher::SocketDispatcher(PhysicalSocketServer *ss)
+#if defined(WEBRTC_WIN)
+  : PhysicalSocket(ss), id_(0), signal_close_(false)
+#else
+  : PhysicalSocket(ss)
+#endif
+{
+}
+
+SocketDispatcher::SocketDispatcher(SOCKET s, PhysicalSocketServer *ss)
+#if defined(WEBRTC_WIN)
+  : PhysicalSocket(ss, s), id_(0), signal_close_(false)
+#else
+  : PhysicalSocket(ss, s)
+#endif
+{
+}
+
+SocketDispatcher::~SocketDispatcher() {
+  Close();
+}
+
+bool SocketDispatcher::Initialize() {
+  RTC_DCHECK(s_ != INVALID_SOCKET);
+  // Must be a non-blocking
+#if defined(WEBRTC_WIN)
+  u_long argp = 1;
+  ioctlsocket(s_, FIONBIO, &argp);
+#elif defined(WEBRTC_POSIX)
+  fcntl(s_, F_SETFL, fcntl(s_, F_GETFL, 0) | O_NONBLOCK);
+#endif
+#if defined(WEBRTC_IOS)
+  // iOS may kill sockets when the app is moved to the background
+  // (specifically, if the app doesn't use the "voip" UIBackgroundMode). When
+  // we attempt to write to such a socket, SIGPIPE will be raised, which by
+  // default will terminate the process, which we don't want. By specifying
+  // this socket option, SIGPIPE will be disabled for the socket.
+  int value = 1;
+  ::setsockopt(s_, SOL_SOCKET, SO_NOSIGPIPE, &value, sizeof(value));
+#endif
+  ss_->Add(this);
+  return true;
+}
+
+bool SocketDispatcher::Create(int type) {
+  return Create(AF_INET, type);
+}
+
+bool SocketDispatcher::Create(int family, int type) {
+  // Change the socket to be non-blocking.
+  if (!PhysicalSocket::Create(family, type))
+    return false;
+
+  if (!Initialize())
+    return false;
+
+#if defined(WEBRTC_WIN)
+  do { id_ = ++next_id_; } while (id_ == 0);
+#endif
+  return true;
+}
+
+#if defined(WEBRTC_WIN)
+
+WSAEVENT SocketDispatcher::GetWSAEvent() {
+  return WSA_INVALID_EVENT;
+}
+
+SOCKET SocketDispatcher::GetSocket() {
+  return s_;
+}
+
+bool SocketDispatcher::CheckSignalClose() {
+  if (!signal_close_)
+    return false;
+
+  char ch;
+  if (recv(s_, &ch, 1, MSG_PEEK) > 0)
+    return false;
+
+  state_ = CS_CLOSED;
+  signal_close_ = false;
+  SignalCloseEvent(this, signal_err_);
+  return true;
+}
+
+int SocketDispatcher::next_id_ = 0;
+
+#elif defined(WEBRTC_POSIX)
+
+int SocketDispatcher::GetDescriptor() {
+  return s_;
+}
+
+bool SocketDispatcher::IsDescriptorClosed() {
+  if (udp_) {
+    // The MSG_PEEK trick doesn't work for UDP, since (at least in some
+    // circumstances) it requires reading an entire UDP packet, which would be
+    // bad for performance here. So, just check whether |s_| has been closed,
+    // which should be sufficient.
+    return s_ == INVALID_SOCKET;
+  }
+  // We don't have a reliable way of distinguishing end-of-stream
+  // from readability.  So test on each readable call.  Is this
+  // inefficient?  Probably.
+  char ch;
+  ssize_t res = ::recv(s_, &ch, 1, MSG_PEEK);
+  if (res > 0) {
+    // Data available, so not closed.
+    return false;
+  } else if (res == 0) {
+    // EOF, so closed.
+    return true;
+  } else {  // error
+    switch (errno) {
+      // Returned if we've already closed s_.
+      case EBADF:
+      // Returned during ungraceful peer shutdown.
+      case ECONNRESET:
+        return true;
+      // The normal blocking error; don't log anything.
+      case EWOULDBLOCK:
+      // Interrupted system call.
+      case EINTR:
+        return false;
+      default:
+        // Assume that all other errors are just blocking errors, meaning the
+        // connection is still good but we just can't read from it right now.
+        // This should only happen when connecting (and at most once), because
+        // in all other cases this function is only called if the file
+        // descriptor is already known to be in the readable state. However,
+        // it's not necessary a problem if we spuriously interpret a
+        // "connection lost"-type error as a blocking error, because typically
+        // the next recv() will get EOF, so we'll still eventually notice that
+        // the socket is closed.
+        RTC_LOG_ERR(LS_WARNING) << "Assuming benign blocking error";
+        return false;
+    }
+  }
+}
+
+#endif // WEBRTC_POSIX
+
+uint32_t SocketDispatcher::GetRequestedEvents() {
+  return enabled_events();
+}
+
+void SocketDispatcher::OnPreEvent(uint32_t ff) {
+  if ((ff & DE_CONNECT) != 0)
+    state_ = CS_CONNECTED;
+
+#if defined(WEBRTC_WIN)
+  // We set CS_CLOSED from CheckSignalClose.
+#elif defined(WEBRTC_POSIX)
+  if ((ff & DE_CLOSE) != 0)
+    state_ = CS_CLOSED;
+#endif
+}
+
+#if defined(WEBRTC_WIN)
+
+void SocketDispatcher::OnEvent(uint32_t ff, int err) {
+  int cache_id = id_;
+  // Make sure we deliver connect/accept first. Otherwise, consumers may see
+  // something like a READ followed by a CONNECT, which would be odd.
+  if (((ff & DE_CONNECT) != 0) && (id_ == cache_id)) {
+    if (ff != DE_CONNECT)
+      RTC_LOG(LS_VERBOSE) << "Signalled with DE_CONNECT: " << ff;
+    DisableEvents(DE_CONNECT);
+#if !defined(NDEBUG)
+    dbg_addr_ = "Connected @ ";
+    dbg_addr_.append(GetRemoteAddress().ToString());
+#endif
+    SignalConnectEvent(this);
+  }
+  if (((ff & DE_ACCEPT) != 0) && (id_ == cache_id)) {
+    DisableEvents(DE_ACCEPT);
+    SignalReadEvent(this);
+  }
+  if ((ff & DE_READ) != 0) {
+    DisableEvents(DE_READ);
+    SignalReadEvent(this);
+  }
+  if (((ff & DE_WRITE) != 0) && (id_ == cache_id)) {
+    DisableEvents(DE_WRITE);
+    SignalWriteEvent(this);
+  }
+  if (((ff & DE_CLOSE) != 0) && (id_ == cache_id)) {
+    signal_close_ = true;
+    signal_err_ = err;
+  }
+}
+
+#elif defined(WEBRTC_POSIX)
+
+void SocketDispatcher::OnEvent(uint32_t ff, int err) {
+#if defined(WEBRTC_USE_EPOLL)
+  // Remember currently enabled events so we can combine multiple changes
+  // into one update call later.
+  // The signal handlers might re-enable events disabled here, so we can't
+  // keep a list of events to disable at the end of the method. This list
+  // would not be updated with the events enabled by the signal handlers.
+  StartBatchedEventUpdates();
+#endif
+  // Make sure we deliver connect/accept first. Otherwise, consumers may see
+  // something like a READ followed by a CONNECT, which would be odd.
+  if ((ff & DE_CONNECT) != 0) {
+    DisableEvents(DE_CONNECT);
+    SignalConnectEvent(this);
+  }
+  if ((ff & DE_ACCEPT) != 0) {
+    DisableEvents(DE_ACCEPT);
+    SignalReadEvent(this);
+  }
+  if ((ff & DE_READ) != 0) {
+    DisableEvents(DE_READ);
+    SignalReadEvent(this);
+  }
+  if ((ff & DE_WRITE) != 0) {
+    DisableEvents(DE_WRITE);
+    SignalWriteEvent(this);
+  }
+  if ((ff & DE_CLOSE) != 0) {
+    // The socket is now dead to us, so stop checking it.
+    SetEnabledEvents(0);
+    SignalCloseEvent(this, err);
+  }
+#if defined(WEBRTC_USE_EPOLL)
+  FinishBatchedEventUpdates();
+#endif
+}
+
+#endif // WEBRTC_POSIX
+
+#if defined(WEBRTC_USE_EPOLL)
+
+static int GetEpollEvents(uint32_t ff) {
+  int events = 0;
+  if (ff & (DE_READ | DE_ACCEPT)) {
+    events |= EPOLLIN;
+  }
+  if (ff & (DE_WRITE | DE_CONNECT)) {
+    events |= EPOLLOUT;
+  }
+  return events;
+}
+
+void SocketDispatcher::StartBatchedEventUpdates() {
+  RTC_DCHECK_EQ(saved_enabled_events_, -1);
+  saved_enabled_events_ = enabled_events();
+}
+
+void SocketDispatcher::FinishBatchedEventUpdates() {
+  RTC_DCHECK_NE(saved_enabled_events_, -1);
+  uint8_t old_events = static_cast<uint8_t>(saved_enabled_events_);
+  saved_enabled_events_ = -1;
+  MaybeUpdateDispatcher(old_events);
+}
+
+void SocketDispatcher::MaybeUpdateDispatcher(uint8_t old_events) {
+  if (GetEpollEvents(enabled_events()) != GetEpollEvents(old_events) &&
+      saved_enabled_events_ == -1) {
+    ss_->Update(this);
+  }
+}
+
+void SocketDispatcher::SetEnabledEvents(uint8_t events) {
+  uint8_t old_events = enabled_events();
+  PhysicalSocket::SetEnabledEvents(events);
+  MaybeUpdateDispatcher(old_events);
+}
+
+void SocketDispatcher::EnableEvents(uint8_t events) {
+  uint8_t old_events = enabled_events();
+  PhysicalSocket::EnableEvents(events);
+  MaybeUpdateDispatcher(old_events);
+}
+
+void SocketDispatcher::DisableEvents(uint8_t events) {
+  uint8_t old_events = enabled_events();
+  PhysicalSocket::DisableEvents(events);
+  MaybeUpdateDispatcher(old_events);
+}
+
+#endif  // WEBRTC_USE_EPOLL
+
+int SocketDispatcher::Close() {
+  if (s_ == INVALID_SOCKET)
+    return 0;
+
+#if defined(WEBRTC_WIN)
+  id_ = 0;
+  signal_close_ = false;
+#endif
+  ss_->Remove(this);
+  return PhysicalSocket::Close();
+}
+
+#if defined(WEBRTC_POSIX)
+class EventDispatcher : public Dispatcher {
+ public:
+  EventDispatcher(PhysicalSocketServer* ss) : ss_(ss), fSignaled_(false) {
+    if (pipe(afd_) < 0)
+      RTC_LOG(LERROR) << "pipe failed";
+    ss_->Add(this);
+  }
+
+  ~EventDispatcher() override {
+    ss_->Remove(this);
+    close(afd_[0]);
+    close(afd_[1]);
+  }
+
+  virtual void Signal() {
+    CritScope cs(&crit_);
+    if (!fSignaled_) {
+      const uint8_t b[1] = {0};
+      const ssize_t res = write(afd_[1], b, sizeof(b));
+      RTC_DCHECK_EQ(1, res);
+      fSignaled_ = true;
+    }
+  }
+
+  uint32_t GetRequestedEvents() override { return DE_READ; }
+
+  void OnPreEvent(uint32_t ff) override {
+    // It is not possible to perfectly emulate an auto-resetting event with
+    // pipes.  This simulates it by resetting before the event is handled.
+
+    CritScope cs(&crit_);
+    if (fSignaled_) {
+      uint8_t b[4];  // Allow for reading more than 1 byte, but expect 1.
+      const ssize_t res = read(afd_[0], b, sizeof(b));
+      RTC_DCHECK_EQ(1, res);
+      fSignaled_ = false;
+    }
+  }
+
+  void OnEvent(uint32_t ff, int err) override { RTC_NOTREACHED(); }
+
+  int GetDescriptor() override { return afd_[0]; }
+
+  bool IsDescriptorClosed() override { return false; }
+
+ private:
+  PhysicalSocketServer *ss_;
+  int afd_[2];
+  bool fSignaled_;
+  CriticalSection crit_;
+};
+
+// These two classes use the self-pipe trick to deliver POSIX signals to our
+// select loop. This is the only safe, reliable, cross-platform way to do
+// non-trivial things with a POSIX signal in an event-driven program (until
+// proper pselect() implementations become ubiquitous).
+
+class PosixSignalHandler {
+ public:
+  // POSIX only specifies 32 signals, but in principle the system might have
+  // more and the programmer might choose to use them, so we size our array
+  // for 128.
+  static const int kNumPosixSignals = 128;
+
+  // There is just a single global instance. (Signal handlers do not get any
+  // sort of user-defined void * parameter, so they can't access anything that
+  // isn't global.)
+  static PosixSignalHandler* Instance() {
+    RTC_DEFINE_STATIC_LOCAL(PosixSignalHandler, instance, ());
+    return &instance;
+  }
+
+  // Returns true if the given signal number is set.
+  bool IsSignalSet(int signum) const {
+    RTC_DCHECK(signum < static_cast<int>(arraysize(received_signal_)));
+    if (signum < static_cast<int>(arraysize(received_signal_))) {
+      return received_signal_[signum];
+    } else {
+      return false;
+    }
+  }
+
+  // Clears the given signal number.
+  void ClearSignal(int signum) {
+    RTC_DCHECK(signum < static_cast<int>(arraysize(received_signal_)));
+    if (signum < static_cast<int>(arraysize(received_signal_))) {
+      received_signal_[signum] = false;
+    }
+  }
+
+  // Returns the file descriptor to monitor for signal events.
+  int GetDescriptor() const {
+    return afd_[0];
+  }
+
+  // This is called directly from our real signal handler, so it must be
+  // signal-handler-safe. That means it cannot assume anything about the
+  // user-level state of the process, since the handler could be executed at any
+  // time on any thread.
+  void OnPosixSignalReceived(int signum) {
+    if (signum >= static_cast<int>(arraysize(received_signal_))) {
+      // We don't have space in our array for this.
+      return;
+    }
+    // Set a flag saying we've seen this signal.
+    received_signal_[signum] = true;
+    // Notify application code that we got a signal.
+    const uint8_t b[1] = {0};
+    if (-1 == write(afd_[1], b, sizeof(b))) {
+      // Nothing we can do here. If there's an error somehow then there's
+      // nothing we can safely do from a signal handler.
+      // No, we can't even safely log it.
+      // But, we still have to check the return value here. Otherwise,
+      // GCC 4.4.1 complains ignoring return value. Even (void) doesn't help.
+      return;
+    }
+  }
+
+ private:
+  PosixSignalHandler() {
+    if (pipe(afd_) < 0) {
+      RTC_LOG_ERR(LS_ERROR) << "pipe failed";
+      return;
+    }
+    if (fcntl(afd_[0], F_SETFL, O_NONBLOCK) < 0) {
+      RTC_LOG_ERR(LS_WARNING) << "fcntl #1 failed";
+    }
+    if (fcntl(afd_[1], F_SETFL, O_NONBLOCK) < 0) {
+      RTC_LOG_ERR(LS_WARNING) << "fcntl #2 failed";
+    }
+    memset(const_cast<void *>(static_cast<volatile void *>(received_signal_)),
+           0,
+           sizeof(received_signal_));
+  }
+
+  ~PosixSignalHandler() {
+    int fd1 = afd_[0];
+    int fd2 = afd_[1];
+    // We clobber the stored file descriptor numbers here or else in principle
+    // a signal that happens to be delivered during application termination
+    // could erroneously write a zero byte to an unrelated file handle in
+    // OnPosixSignalReceived() if some other file happens to be opened later
+    // during shutdown and happens to be given the same file descriptor number
+    // as our pipe had. Unfortunately even with this precaution there is still a
+    // race where that could occur if said signal happens to be handled
+    // concurrently with this code and happens to have already read the value of
+    // afd_[1] from memory before we clobber it, but that's unlikely.
+    afd_[0] = -1;
+    afd_[1] = -1;
+    close(fd1);
+    close(fd2);
+  }
+
+  int afd_[2];
+  // These are boolean flags that will be set in our signal handler and read
+  // and cleared from Wait(). There is a race involved in this, but it is
+  // benign. The signal handler sets the flag before signaling the pipe, so
+  // we'll never end up blocking in select() while a flag is still true.
+  // However, if two of the same signal arrive close to each other then it's
+  // possible that the second time the handler may set the flag while it's still
+  // true, meaning that signal will be missed. But the first occurrence of it
+  // will still be handled, so this isn't a problem.
+  // Volatile is not necessary here for correctness, but this data _is_ volatile
+  // so I've marked it as such.
+  volatile uint8_t received_signal_[kNumPosixSignals];
+};
+
+class PosixSignalDispatcher : public Dispatcher {
+ public:
+  PosixSignalDispatcher(PhysicalSocketServer *owner) : owner_(owner) {
+    owner_->Add(this);
+  }
+
+  ~PosixSignalDispatcher() override {
+    owner_->Remove(this);
+  }
+
+  uint32_t GetRequestedEvents() override { return DE_READ; }
+
+  void OnPreEvent(uint32_t ff) override {
+    // Events might get grouped if signals come very fast, so we read out up to
+    // 16 bytes to make sure we keep the pipe empty.
+    uint8_t b[16];
+    ssize_t ret = read(GetDescriptor(), b, sizeof(b));
+    if (ret < 0) {
+      RTC_LOG_ERR(LS_WARNING) << "Error in read()";
+    } else if (ret == 0) {
+      RTC_LOG(LS_WARNING) << "Should have read at least one byte";
+    }
+  }
+
+  void OnEvent(uint32_t ff, int err) override {
+    for (int signum = 0; signum < PosixSignalHandler::kNumPosixSignals;
+         ++signum) {
+      if (PosixSignalHandler::Instance()->IsSignalSet(signum)) {
+        PosixSignalHandler::Instance()->ClearSignal(signum);
+        HandlerMap::iterator i = handlers_.find(signum);
+        if (i == handlers_.end()) {
+          // This can happen if a signal is delivered to our process at around
+          // the same time as we unset our handler for it. It is not an error
+          // condition, but it's unusual enough to be worth logging.
+          RTC_LOG(LS_INFO) << "Received signal with no handler: " << signum;
+        } else {
+          // Otherwise, execute our handler.
+          (*i->second)(signum);
+        }
+      }
+    }
+  }
+
+  int GetDescriptor() override {
+    return PosixSignalHandler::Instance()->GetDescriptor();
+  }
+
+  bool IsDescriptorClosed() override { return false; }
+
+  void SetHandler(int signum, void (*handler)(int)) {
+    handlers_[signum] = handler;
+  }
+
+  void ClearHandler(int signum) {
+    handlers_.erase(signum);
+  }
+
+  bool HasHandlers() {
+    return !handlers_.empty();
+  }
+
+ private:
+  typedef std::map<int, void (*)(int)> HandlerMap;
+
+  HandlerMap handlers_;
+  // Our owner.
+  PhysicalSocketServer *owner_;
+};
+
+#endif // WEBRTC_POSIX
+
+#if defined(WEBRTC_WIN)
+static uint32_t FlagsToEvents(uint32_t events) {
+  uint32_t ffFD = FD_CLOSE;
+  if (events & DE_READ)
+    ffFD |= FD_READ;
+  if (events & DE_WRITE)
+    ffFD |= FD_WRITE;
+  if (events & DE_CONNECT)
+    ffFD |= FD_CONNECT;
+  if (events & DE_ACCEPT)
+    ffFD |= FD_ACCEPT;
+  return ffFD;
+}
+
+class EventDispatcher : public Dispatcher {
+ public:
+  EventDispatcher(PhysicalSocketServer *ss) : ss_(ss) {
+    hev_ = WSACreateEvent();
+    if (hev_) {
+      ss_->Add(this);
+    }
+  }
+
+  ~EventDispatcher() override {
+    if (hev_ != nullptr) {
+      ss_->Remove(this);
+      WSACloseEvent(hev_);
+      hev_ = nullptr;
+    }
+  }
+
+  virtual void Signal() {
+    if (hev_ != nullptr)
+      WSASetEvent(hev_);
+  }
+
+  uint32_t GetRequestedEvents() override { return 0; }
+
+  void OnPreEvent(uint32_t ff) override { WSAResetEvent(hev_); }
+
+  void OnEvent(uint32_t ff, int err) override {}
+
+  WSAEVENT GetWSAEvent() override { return hev_; }
+
+  SOCKET GetSocket() override { return INVALID_SOCKET; }
+
+  bool CheckSignalClose() override { return false; }
+
+ private:
+  PhysicalSocketServer* ss_;
+  WSAEVENT hev_;
+};
+#endif  // WEBRTC_WIN
+
+// Sets the value of a boolean value to false when signaled.
+class Signaler : public EventDispatcher {
+ public:
+  Signaler(PhysicalSocketServer* ss, bool* pf)
+      : EventDispatcher(ss), pf_(pf) {
+  }
+  ~Signaler() override { }
+
+  void OnEvent(uint32_t ff, int err) override {
+    if (pf_)
+      *pf_ = false;
+  }
+
+ private:
+  bool *pf_;
+};
+
+PhysicalSocketServer::PhysicalSocketServer()
+    : fWait_(false) {
+#if defined(WEBRTC_USE_EPOLL)
+  // Since Linux 2.6.8, the size argument is ignored, but must be greater than
+  // zero. Before that the size served as hint to the kernel for the amount of
+  // space to initially allocate in internal data structures.
+  epoll_fd_ = epoll_create(FD_SETSIZE);
+  if (epoll_fd_ == -1) {
+    // Not an error, will fall back to "select" below.
+    RTC_LOG_E(LS_WARNING, EN, errno) << "epoll_create";
+    epoll_fd_ = INVALID_SOCKET;
+  }
+#endif
+  signal_wakeup_ = new Signaler(this, &fWait_);
+#if defined(WEBRTC_WIN)
+  socket_ev_ = WSACreateEvent();
+#endif
+}
+
+PhysicalSocketServer::~PhysicalSocketServer() {
+#if defined(WEBRTC_WIN)
+  WSACloseEvent(socket_ev_);
+#endif
+#if defined(WEBRTC_POSIX)
+  signal_dispatcher_.reset();
+#endif
+  delete signal_wakeup_;
+#if defined(WEBRTC_USE_EPOLL)
+  if (epoll_fd_ != INVALID_SOCKET) {
+    close(epoll_fd_);
+  }
+#endif
+  RTC_DCHECK(dispatchers_.empty());
+}
+
+void PhysicalSocketServer::WakeUp() {
+  signal_wakeup_->Signal();
+}
+
+Socket* PhysicalSocketServer::CreateSocket(int type) {
+  return CreateSocket(AF_INET, type);
+}
+
+Socket* PhysicalSocketServer::CreateSocket(int family, int type) {
+  PhysicalSocket* socket = new PhysicalSocket(this);
+  if (socket->Create(family, type)) {
+    return socket;
+  } else {
+    delete socket;
+    return nullptr;
+  }
+}
+
+AsyncSocket* PhysicalSocketServer::CreateAsyncSocket(int type) {
+  return CreateAsyncSocket(AF_INET, type);
+}
+
+AsyncSocket* PhysicalSocketServer::CreateAsyncSocket(int family, int type) {
+  SocketDispatcher* dispatcher = new SocketDispatcher(this);
+  if (dispatcher->Create(family, type)) {
+    return dispatcher;
+  } else {
+    delete dispatcher;
+    return nullptr;
+  }
+}
+
+AsyncSocket* PhysicalSocketServer::WrapSocket(SOCKET s) {
+  SocketDispatcher* dispatcher = new SocketDispatcher(s, this);
+  if (dispatcher->Initialize()) {
+    return dispatcher;
+  } else {
+    delete dispatcher;
+    return nullptr;
+  }
+}
+
+void PhysicalSocketServer::Add(Dispatcher *pdispatcher) {
+  CritScope cs(&crit_);
+  if (processing_dispatchers_) {
+    // A dispatcher is being added while a "Wait" call is processing the
+    // list of socket events.
+    // Defer adding to "dispatchers_" set until processing is done to avoid
+    // invalidating the iterator in "Wait".
+    pending_remove_dispatchers_.erase(pdispatcher);
+    pending_add_dispatchers_.insert(pdispatcher);
+  } else {
+    dispatchers_.insert(pdispatcher);
+  }
+#if defined(WEBRTC_USE_EPOLL)
+  if (epoll_fd_ != INVALID_SOCKET) {
+    AddEpoll(pdispatcher);
+  }
+#endif  // WEBRTC_USE_EPOLL
+}
+
+void PhysicalSocketServer::Remove(Dispatcher *pdispatcher) {
+  CritScope cs(&crit_);
+  if (processing_dispatchers_) {
+    // A dispatcher is being removed while a "Wait" call is processing the
+    // list of socket events.
+    // Defer removal from "dispatchers_" set until processing is done to avoid
+    // invalidating the iterator in "Wait".
+    if (!pending_add_dispatchers_.erase(pdispatcher) &&
+        dispatchers_.find(pdispatcher) == dispatchers_.end()) {
+      RTC_LOG(LS_WARNING) << "PhysicalSocketServer asked to remove a unknown "
+                          << "dispatcher, potentially from a duplicate call to "
+                          << "Add.";
+      return;
+    }
+
+    pending_remove_dispatchers_.insert(pdispatcher);
+  } else if (!dispatchers_.erase(pdispatcher)) {
+    RTC_LOG(LS_WARNING)
+        << "PhysicalSocketServer asked to remove a unknown "
+        << "dispatcher, potentially from a duplicate call to Add.";
+    return;
+  }
+#if defined(WEBRTC_USE_EPOLL)
+  if (epoll_fd_ != INVALID_SOCKET) {
+    RemoveEpoll(pdispatcher);
+  }
+#endif  // WEBRTC_USE_EPOLL
+}
+
+void PhysicalSocketServer::Update(Dispatcher* pdispatcher) {
+#if defined(WEBRTC_USE_EPOLL)
+  if (epoll_fd_ == INVALID_SOCKET) {
+    return;
+  }
+
+  CritScope cs(&crit_);
+  if (dispatchers_.find(pdispatcher) == dispatchers_.end()) {
+    return;
+  }
+
+  UpdateEpoll(pdispatcher);
+#endif
+}
+
+void PhysicalSocketServer::AddRemovePendingDispatchers() {
+  if (!pending_add_dispatchers_.empty()) {
+    for (Dispatcher* pdispatcher : pending_add_dispatchers_) {
+      dispatchers_.insert(pdispatcher);
+    }
+    pending_add_dispatchers_.clear();
+  }
+
+  if (!pending_remove_dispatchers_.empty()) {
+    for (Dispatcher* pdispatcher : pending_remove_dispatchers_) {
+      dispatchers_.erase(pdispatcher);
+    }
+    pending_remove_dispatchers_.clear();
+  }
+}
+
+#if defined(WEBRTC_POSIX)
+
+bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) {
+#if defined(WEBRTC_USE_EPOLL)
+  // We don't keep a dedicated "epoll" descriptor containing only the non-IO
+  // (i.e. signaling) dispatcher, so "poll" will be used instead of the default
+  // "select" to support sockets larger than FD_SETSIZE.
+  if (!process_io) {
+    return WaitPoll(cmsWait, signal_wakeup_);
+  } else if (epoll_fd_ != INVALID_SOCKET) {
+    return WaitEpoll(cmsWait);
+  }
+#endif
+  return WaitSelect(cmsWait, process_io);
+}
+
+static void ProcessEvents(Dispatcher* dispatcher,
+                          bool readable,
+                          bool writable,
+                          bool check_error) {
+  int errcode = 0;
+  // TODO(pthatcher): Should we set errcode if getsockopt fails?
+  if (check_error) {
+    socklen_t len = sizeof(errcode);
+    ::getsockopt(dispatcher->GetDescriptor(), SOL_SOCKET, SO_ERROR, &errcode,
+                 &len);
+  }
+
+  uint32_t ff = 0;
+
+  // Check readable descriptors. If we're waiting on an accept, signal
+  // that. Otherwise we're waiting for data, check to see if we're
+  // readable or really closed.
+  // TODO(pthatcher): Only peek at TCP descriptors.
+  if (readable) {
+    if (dispatcher->GetRequestedEvents() & DE_ACCEPT) {
+      ff |= DE_ACCEPT;
+    } else if (errcode || dispatcher->IsDescriptorClosed()) {
+      ff |= DE_CLOSE;
+    } else {
+      ff |= DE_READ;
+    }
+  }
+
+  // Check writable descriptors. If we're waiting on a connect, detect
+  // success versus failure by the reaped error code.
+  if (writable) {
+    if (dispatcher->GetRequestedEvents() & DE_CONNECT) {
+      if (!errcode) {
+        ff |= DE_CONNECT;
+      } else {
+        ff |= DE_CLOSE;
+      }
+    } else {
+      ff |= DE_WRITE;
+    }
+  }
+
+  // Tell the descriptor about the event.
+  if (ff != 0) {
+    dispatcher->OnPreEvent(ff);
+    dispatcher->OnEvent(ff, errcode);
+  }
+}
+
+bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) {
+  // Calculate timing information
+
+  struct timeval* ptvWait = nullptr;
+  struct timeval tvWait;
+  struct timeval tvStop;
+  if (cmsWait != kForever) {
+    // Calculate wait timeval
+    tvWait.tv_sec = cmsWait / 1000;
+    tvWait.tv_usec = (cmsWait % 1000) * 1000;
+    ptvWait = &tvWait;
+
+    // Calculate when to return in a timeval
+    gettimeofday(&tvStop, nullptr);
+    tvStop.tv_sec += tvWait.tv_sec;
+    tvStop.tv_usec += tvWait.tv_usec;
+    if (tvStop.tv_usec >= 1000000) {
+      tvStop.tv_usec -= 1000000;
+      tvStop.tv_sec += 1;
+    }
+  }
+
+  // Zero all fd_sets. Don't need to do this inside the loop since
+  // select() zeros the descriptors not signaled
+
+  fd_set fdsRead;
+  FD_ZERO(&fdsRead);
+  fd_set fdsWrite;
+  FD_ZERO(&fdsWrite);
+  // Explicitly unpoison these FDs on MemorySanitizer which doesn't handle the
+  // inline assembly in FD_ZERO.
+  // http://crbug.com/344505
+#ifdef MEMORY_SANITIZER
+  __msan_unpoison(&fdsRead, sizeof(fdsRead));
+  __msan_unpoison(&fdsWrite, sizeof(fdsWrite));
+#endif
+
+  fWait_ = true;
+
+  while (fWait_) {
+    int fdmax = -1;
+    {
+      CritScope cr(&crit_);
+      // TODO(jbauch): Support re-entrant waiting.
+      RTC_DCHECK(!processing_dispatchers_);
+      for (Dispatcher* pdispatcher : dispatchers_) {
+        // Query dispatchers for read and write wait state
+        RTC_DCHECK(pdispatcher);
+        if (!process_io && (pdispatcher != signal_wakeup_))
+          continue;
+        int fd = pdispatcher->GetDescriptor();
+        // "select"ing a file descriptor that is equal to or larger than
+        // FD_SETSIZE will result in undefined behavior.
+        RTC_DCHECK_LT(fd, FD_SETSIZE);
+        if (fd > fdmax)
+          fdmax = fd;
+
+        uint32_t ff = pdispatcher->GetRequestedEvents();
+        if (ff & (DE_READ | DE_ACCEPT))
+          FD_SET(fd, &fdsRead);
+        if (ff & (DE_WRITE | DE_CONNECT))
+          FD_SET(fd, &fdsWrite);
+      }
+    }
+
+    // Wait then call handlers as appropriate
+    // < 0 means error
+    // 0 means timeout
+    // > 0 means count of descriptors ready
+    int n = select(fdmax + 1, &fdsRead, &fdsWrite, nullptr, ptvWait);
+
+    // If error, return error.
+    if (n < 0) {
+      if (errno != EINTR) {
+        RTC_LOG_E(LS_ERROR, EN, errno) << "select";
+        return false;
+      }
+      // Else ignore the error and keep going. If this EINTR was for one of the
+      // signals managed by this PhysicalSocketServer, the
+      // PosixSignalDeliveryDispatcher will be in the signaled state in the next
+      // iteration.
+    } else if (n == 0) {
+      // If timeout, return success
+      return true;
+    } else {
+      // We have signaled descriptors
+      CritScope cr(&crit_);
+      processing_dispatchers_ = true;
+      for (Dispatcher* pdispatcher : dispatchers_) {
+        int fd = pdispatcher->GetDescriptor();
+
+        bool readable = FD_ISSET(fd, &fdsRead);
+        if (readable) {
+          FD_CLR(fd, &fdsRead);
+        }
+
+        bool writable = FD_ISSET(fd, &fdsWrite);
+        if (writable) {
+          FD_CLR(fd, &fdsWrite);
+        }
+
+        // The error code can be signaled through reads or writes.
+        ProcessEvents(pdispatcher, readable, writable, readable || writable);
+      }
+
+      processing_dispatchers_ = false;
+      // Process deferred dispatchers that have been added/removed while the
+      // events were handled above.
+      AddRemovePendingDispatchers();
+    }
+
+    // Recalc the time remaining to wait. Doing it here means it doesn't get
+    // calced twice the first time through the loop
+    if (ptvWait) {
+      ptvWait->tv_sec = 0;
+      ptvWait->tv_usec = 0;
+      struct timeval tvT;
+      gettimeofday(&tvT, nullptr);
+      if ((tvStop.tv_sec > tvT.tv_sec)
+          || ((tvStop.tv_sec == tvT.tv_sec)
+              && (tvStop.tv_usec > tvT.tv_usec))) {
+        ptvWait->tv_sec = tvStop.tv_sec - tvT.tv_sec;
+        ptvWait->tv_usec = tvStop.tv_usec - tvT.tv_usec;
+        if (ptvWait->tv_usec < 0) {
+          RTC_DCHECK(ptvWait->tv_sec > 0);
+          ptvWait->tv_usec += 1000000;
+          ptvWait->tv_sec -= 1;
+        }
+      }
+    }
+  }
+
+  return true;
+}
+
+#if defined(WEBRTC_USE_EPOLL)
+
+// Initial number of events to process with one call to "epoll_wait".
+static const size_t kInitialEpollEvents = 128;
+
+// Maximum number of events to process with one call to "epoll_wait".
+static const size_t kMaxEpollEvents = 8192;
+
+void PhysicalSocketServer::AddEpoll(Dispatcher* pdispatcher) {
+  RTC_DCHECK(epoll_fd_ != INVALID_SOCKET);
+  int fd = pdispatcher->GetDescriptor();
+  RTC_DCHECK(fd != INVALID_SOCKET);
+  if (fd == INVALID_SOCKET) {
+    return;
+  }
+
+  struct epoll_event event = {0};
+  event.events = GetEpollEvents(pdispatcher->GetRequestedEvents());
+  event.data.ptr = pdispatcher;
+  int err = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, fd, &event);
+  RTC_DCHECK_EQ(err, 0);
+  if (err == -1) {
+    RTC_LOG_E(LS_ERROR, EN, errno) << "epoll_ctl EPOLL_CTL_ADD";
+  }
+}
+
+void PhysicalSocketServer::RemoveEpoll(Dispatcher* pdispatcher) {
+  RTC_DCHECK(epoll_fd_ != INVALID_SOCKET);
+  int fd = pdispatcher->GetDescriptor();
+  RTC_DCHECK(fd != INVALID_SOCKET);
+  if (fd == INVALID_SOCKET) {
+    return;
+  }
+
+  struct epoll_event event = {0};
+  int err = epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, fd, &event);
+  RTC_DCHECK(err == 0 || errno == ENOENT);
+  if (err == -1) {
+    if (errno == ENOENT) {
+      // Socket has already been closed.
+      RTC_LOG_E(LS_VERBOSE, EN, errno) << "epoll_ctl EPOLL_CTL_DEL";
+    } else {
+      RTC_LOG_E(LS_ERROR, EN, errno) << "epoll_ctl EPOLL_CTL_DEL";
+    }
+  }
+}
+
+void PhysicalSocketServer::UpdateEpoll(Dispatcher* pdispatcher) {
+  RTC_DCHECK(epoll_fd_ != INVALID_SOCKET);
+  int fd = pdispatcher->GetDescriptor();
+  RTC_DCHECK(fd != INVALID_SOCKET);
+  if (fd == INVALID_SOCKET) {
+    return;
+  }
+
+  struct epoll_event event = {0};
+  event.events = GetEpollEvents(pdispatcher->GetRequestedEvents());
+  event.data.ptr = pdispatcher;
+  int err = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, fd, &event);
+  RTC_DCHECK_EQ(err, 0);
+  if (err == -1) {
+    RTC_LOG_E(LS_ERROR, EN, errno) << "epoll_ctl EPOLL_CTL_MOD";
+  }
+}
+
+bool PhysicalSocketServer::WaitEpoll(int cmsWait) {
+  RTC_DCHECK(epoll_fd_ != INVALID_SOCKET);
+  int64_t tvWait = -1;
+  int64_t tvStop = -1;
+  if (cmsWait != kForever) {
+    tvWait = cmsWait;
+    tvStop = TimeAfter(cmsWait);
+  }
+
+  if (epoll_events_.empty()) {
+    // The initial space to receive events is created only if epoll is used.
+    epoll_events_.resize(kInitialEpollEvents);
+  }
+
+  fWait_ = true;
+
+  while (fWait_) {
+    // Wait then call handlers as appropriate
+    // < 0 means error
+    // 0 means timeout
+    // > 0 means count of descriptors ready
+    int n = epoll_wait(epoll_fd_, &epoll_events_[0],
+                       static_cast<int>(epoll_events_.size()),
+                       static_cast<int>(tvWait));
+    if (n < 0) {
+      if (errno != EINTR) {
+        RTC_LOG_E(LS_ERROR, EN, errno) << "epoll";
+        return false;
+      }
+      // Else ignore the error and keep going. If this EINTR was for one of the
+      // signals managed by this PhysicalSocketServer, the
+      // PosixSignalDeliveryDispatcher will be in the signaled state in the next
+      // iteration.
+    } else if (n == 0) {
+      // If timeout, return success
+      return true;
+    } else {
+      // We have signaled descriptors
+      CritScope cr(&crit_);
+      for (int i = 0; i < n; ++i) {
+        const epoll_event& event = epoll_events_[i];
+        Dispatcher* pdispatcher = static_cast<Dispatcher*>(event.data.ptr);
+        if (dispatchers_.find(pdispatcher) == dispatchers_.end()) {
+          // The dispatcher for this socket no longer exists.
+          continue;
+        }
+
+        bool readable = (event.events & (EPOLLIN | EPOLLPRI));
+        bool writable = (event.events & EPOLLOUT);
+        bool check_error = (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP));
+
+        ProcessEvents(pdispatcher, readable, writable, check_error);
+      }
+    }
+
+    if (static_cast<size_t>(n) == epoll_events_.size() &&
+        epoll_events_.size() < kMaxEpollEvents) {
+      // We used the complete space to receive events, increase size for future
+      // iterations.
+      epoll_events_.resize(std::max(epoll_events_.size() * 2, kMaxEpollEvents));
+    }
+
+    if (cmsWait != kForever) {
+      tvWait = TimeDiff(tvStop, TimeMillis());
+      if (tvWait < 0) {
+        // Return success on timeout.
+        return true;
+      }
+    }
+  }
+
+  return true;
+}
+
+bool PhysicalSocketServer::WaitPoll(int cmsWait, Dispatcher* dispatcher) {
+  RTC_DCHECK(dispatcher);
+  int64_t tvWait = -1;
+  int64_t tvStop = -1;
+  if (cmsWait != kForever) {
+    tvWait = cmsWait;
+    tvStop = TimeAfter(cmsWait);
+  }
+
+  fWait_ = true;
+
+  struct pollfd fds = {0};
+  int fd = dispatcher->GetDescriptor();
+  fds.fd = fd;
+
+  while (fWait_) {
+    uint32_t ff = dispatcher->GetRequestedEvents();
+    fds.events = 0;
+    if (ff & (DE_READ | DE_ACCEPT)) {
+      fds.events |= POLLIN;
+    }
+    if (ff & (DE_WRITE | DE_CONNECT)) {
+      fds.events |= POLLOUT;
+    }
+    fds.revents = 0;
+
+    // Wait then call handlers as appropriate
+    // < 0 means error
+    // 0 means timeout
+    // > 0 means count of descriptors ready
+    int n = poll(&fds, 1, static_cast<int>(tvWait));
+    if (n < 0) {
+      if (errno != EINTR) {
+        RTC_LOG_E(LS_ERROR, EN, errno) << "poll";
+        return false;
+      }
+      // Else ignore the error and keep going. If this EINTR was for one of the
+      // signals managed by this PhysicalSocketServer, the
+      // PosixSignalDeliveryDispatcher will be in the signaled state in the next
+      // iteration.
+    } else if (n == 0) {
+      // If timeout, return success
+      return true;
+    } else {
+      // We have signaled descriptors (should only be the passed dispatcher).
+      RTC_DCHECK_EQ(n, 1);
+      RTC_DCHECK_EQ(fds.fd, fd);
+
+      bool readable = (fds.revents & (POLLIN | POLLPRI));
+      bool writable = (fds.revents & POLLOUT);
+      bool check_error = (fds.revents & (POLLRDHUP | POLLERR | POLLHUP));
+
+      ProcessEvents(dispatcher, readable, writable, check_error);
+    }
+
+    if (cmsWait != kForever) {
+      tvWait = TimeDiff(tvStop, TimeMillis());
+      if (tvWait < 0) {
+        // Return success on timeout.
+        return true;
+      }
+    }
+  }
+
+  return true;
+}
+
+#endif  // WEBRTC_USE_EPOLL
+
+static void GlobalSignalHandler(int signum) {
+  PosixSignalHandler::Instance()->OnPosixSignalReceived(signum);
+}
+
+bool PhysicalSocketServer::SetPosixSignalHandler(int signum,
+                                                 void (*handler)(int)) {
+  // If handler is SIG_IGN or SIG_DFL then clear our user-level handler,
+  // otherwise set one.
+  if (handler == SIG_IGN || handler == SIG_DFL) {
+    if (!InstallSignal(signum, handler)) {
+      return false;
+    }
+    if (signal_dispatcher_) {
+      signal_dispatcher_->ClearHandler(signum);
+      if (!signal_dispatcher_->HasHandlers()) {
+        signal_dispatcher_.reset();
+      }
+    }
+  } else {
+    if (!signal_dispatcher_) {
+      signal_dispatcher_.reset(new PosixSignalDispatcher(this));
+    }
+    signal_dispatcher_->SetHandler(signum, handler);
+    if (!InstallSignal(signum, &GlobalSignalHandler)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+Dispatcher* PhysicalSocketServer::signal_dispatcher() {
+  return signal_dispatcher_.get();
+}
+
+bool PhysicalSocketServer::InstallSignal(int signum, void (*handler)(int)) {
+  struct sigaction act;
+  // It doesn't really matter what we set this mask to.
+  if (sigemptyset(&act.sa_mask) != 0) {
+    RTC_LOG_ERR(LS_ERROR) << "Couldn't set mask";
+    return false;
+  }
+  act.sa_handler = handler;
+#if !defined(__native_client__)
+  // Use SA_RESTART so that our syscalls don't get EINTR, since we don't need it
+  // and it's a nuisance. Though some syscalls still return EINTR and there's no
+  // real standard for which ones. :(
+  act.sa_flags = SA_RESTART;
+#else
+  act.sa_flags = 0;
+#endif
+  if (sigaction(signum, &act, nullptr) != 0) {
+    RTC_LOG_ERR(LS_ERROR) << "Couldn't set sigaction";
+    return false;
+  }
+  return true;
+}
+#endif  // WEBRTC_POSIX
+
+#if defined(WEBRTC_WIN)
+bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) {
+  int64_t cmsTotal = cmsWait;
+  int64_t cmsElapsed = 0;
+  int64_t msStart = Time();
+
+  fWait_ = true;
+  while (fWait_) {
+    std::vector<WSAEVENT> events;
+    std::vector<Dispatcher *> event_owners;
+
+    events.push_back(socket_ev_);
+
+    {
+      CritScope cr(&crit_);
+      // TODO(jbauch): Support re-entrant waiting.
+      RTC_DCHECK(!processing_dispatchers_);
+
+      // Calling "CheckSignalClose" might remove a closed dispatcher from the
+      // set. This must be deferred to prevent invalidating the iterator.
+      processing_dispatchers_ = true;
+      for (Dispatcher* disp : dispatchers_) {
+        if (!process_io && (disp != signal_wakeup_))
+          continue;
+        SOCKET s = disp->GetSocket();
+        if (disp->CheckSignalClose()) {
+          // We just signalled close, don't poll this socket
+        } else if (s != INVALID_SOCKET) {
+          WSAEventSelect(s,
+                         events[0],
+                         FlagsToEvents(disp->GetRequestedEvents()));
+        } else {
+          events.push_back(disp->GetWSAEvent());
+          event_owners.push_back(disp);
+        }
+      }
+
+      processing_dispatchers_ = false;
+      // Process deferred dispatchers that have been added/removed while the
+      // events were handled above.
+      AddRemovePendingDispatchers();
+    }
+
+    // Which is shorter, the delay wait or the asked wait?
+
+    int64_t cmsNext;
+    if (cmsWait == kForever) {
+      cmsNext = cmsWait;
+    } else {
+      cmsNext = std::max<int64_t>(0, cmsTotal - cmsElapsed);
+    }
+
+    // Wait for one of the events to signal
+    DWORD dw = WSAWaitForMultipleEvents(static_cast<DWORD>(events.size()),
+                                        &events[0],
+                                        false,
+                                        static_cast<DWORD>(cmsNext),
+                                        false);
+
+    if (dw == WSA_WAIT_FAILED) {
+      // Failed?
+      // TODO(pthatcher): need a better strategy than this!
+      WSAGetLastError();
+      RTC_NOTREACHED();
+      return false;
+    } else if (dw == WSA_WAIT_TIMEOUT) {
+      // Timeout?
+      return true;
+    } else {
+      // Figure out which one it is and call it
+      CritScope cr(&crit_);
+      int index = dw - WSA_WAIT_EVENT_0;
+      if (index > 0) {
+        --index; // The first event is the socket event
+        Dispatcher* disp = event_owners[index];
+        // The dispatcher could have been removed while waiting for events.
+        if (dispatchers_.find(disp) != dispatchers_.end()) {
+          disp->OnPreEvent(0);
+          disp->OnEvent(0, 0);
+        }
+      } else if (process_io) {
+        processing_dispatchers_ = true;
+        for (Dispatcher* disp : dispatchers_) {
+          SOCKET s = disp->GetSocket();
+          if (s == INVALID_SOCKET)
+            continue;
+
+          WSANETWORKEVENTS wsaEvents;
+          int err = WSAEnumNetworkEvents(s, events[0], &wsaEvents);
+          if (err == 0) {
+            {
+              if ((wsaEvents.lNetworkEvents & FD_READ) &&
+                  wsaEvents.iErrorCode[FD_READ_BIT] != 0) {
+                RTC_LOG(WARNING)
+                    << "PhysicalSocketServer got FD_READ_BIT error "
+                    << wsaEvents.iErrorCode[FD_READ_BIT];
+              }
+              if ((wsaEvents.lNetworkEvents & FD_WRITE) &&
+                  wsaEvents.iErrorCode[FD_WRITE_BIT] != 0) {
+                RTC_LOG(WARNING)
+                    << "PhysicalSocketServer got FD_WRITE_BIT error "
+                    << wsaEvents.iErrorCode[FD_WRITE_BIT];
+              }
+              if ((wsaEvents.lNetworkEvents & FD_CONNECT) &&
+                  wsaEvents.iErrorCode[FD_CONNECT_BIT] != 0) {
+                RTC_LOG(WARNING)
+                    << "PhysicalSocketServer got FD_CONNECT_BIT error "
+                    << wsaEvents.iErrorCode[FD_CONNECT_BIT];
+              }
+              if ((wsaEvents.lNetworkEvents & FD_ACCEPT) &&
+                  wsaEvents.iErrorCode[FD_ACCEPT_BIT] != 0) {
+                RTC_LOG(WARNING)
+                    << "PhysicalSocketServer got FD_ACCEPT_BIT error "
+                    << wsaEvents.iErrorCode[FD_ACCEPT_BIT];
+              }
+              if ((wsaEvents.lNetworkEvents & FD_CLOSE) &&
+                  wsaEvents.iErrorCode[FD_CLOSE_BIT] != 0) {
+                RTC_LOG(WARNING)
+                    << "PhysicalSocketServer got FD_CLOSE_BIT error "
+                    << wsaEvents.iErrorCode[FD_CLOSE_BIT];
+              }
+            }
+            uint32_t ff = 0;
+            int errcode = 0;
+            if (wsaEvents.lNetworkEvents & FD_READ)
+              ff |= DE_READ;
+            if (wsaEvents.lNetworkEvents & FD_WRITE)
+              ff |= DE_WRITE;
+            if (wsaEvents.lNetworkEvents & FD_CONNECT) {
+              if (wsaEvents.iErrorCode[FD_CONNECT_BIT] == 0) {
+                ff |= DE_CONNECT;
+              } else {
+                ff |= DE_CLOSE;
+                errcode = wsaEvents.iErrorCode[FD_CONNECT_BIT];
+              }
+            }
+            if (wsaEvents.lNetworkEvents & FD_ACCEPT)
+              ff |= DE_ACCEPT;
+            if (wsaEvents.lNetworkEvents & FD_CLOSE) {
+              ff |= DE_CLOSE;
+              errcode = wsaEvents.iErrorCode[FD_CLOSE_BIT];
+            }
+            if (ff != 0) {
+              disp->OnPreEvent(ff);
+              disp->OnEvent(ff, errcode);
+            }
+          }
+        }
+
+        processing_dispatchers_ = false;
+        // Process deferred dispatchers that have been added/removed while the
+        // events were handled above.
+        AddRemovePendingDispatchers();
+      }
+
+      // Reset the network event until new activity occurs
+      WSAResetEvent(socket_ev_);
+    }
+
+    // Break?
+    if (!fWait_)
+      break;
+    cmsElapsed = TimeSince(msStart);
+    if ((cmsWait != kForever) && (cmsElapsed >= cmsWait)) {
+       break;
+    }
+  }
+
+  // Done
+  return true;
+}
+#endif  // WEBRTC_WIN
+
+}  // namespace rtc
diff --git a/rtc_base/physicalsocketserver.h b/rtc_base/physicalsocketserver.h
new file mode 100644
index 0000000..f816774
--- /dev/null
+++ b/rtc_base/physicalsocketserver.h
@@ -0,0 +1,270 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PHYSICALSOCKETSERVER_H_
+#define RTC_BASE_PHYSICALSOCKETSERVER_H_
+
+#if defined(WEBRTC_POSIX) && defined(WEBRTC_LINUX)
+#include <sys/epoll.h>
+#define WEBRTC_USE_EPOLL 1
+#endif
+
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/socketserver.h"
+
+#if defined(WEBRTC_POSIX)
+typedef int SOCKET;
+#endif // WEBRTC_POSIX
+
+namespace rtc {
+
+// Event constants for the Dispatcher class.
+enum DispatcherEvent {
+  DE_READ    = 0x0001,
+  DE_WRITE   = 0x0002,
+  DE_CONNECT = 0x0004,
+  DE_CLOSE   = 0x0008,
+  DE_ACCEPT  = 0x0010,
+};
+
+class Signaler;
+#if defined(WEBRTC_POSIX)
+class PosixSignalDispatcher;
+#endif
+
+class Dispatcher {
+ public:
+  virtual ~Dispatcher() {}
+  virtual uint32_t GetRequestedEvents() = 0;
+  virtual void OnPreEvent(uint32_t ff) = 0;
+  virtual void OnEvent(uint32_t ff, int err) = 0;
+#if defined(WEBRTC_WIN)
+  virtual WSAEVENT GetWSAEvent() = 0;
+  virtual SOCKET GetSocket() = 0;
+  virtual bool CheckSignalClose() = 0;
+#elif defined(WEBRTC_POSIX)
+  virtual int GetDescriptor() = 0;
+  virtual bool IsDescriptorClosed() = 0;
+#endif
+};
+
+// A socket server that provides the real sockets of the underlying OS.
+class PhysicalSocketServer : public SocketServer {
+ public:
+  PhysicalSocketServer();
+  ~PhysicalSocketServer() override;
+
+  // SocketFactory:
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+  // Internal Factory for Accept (virtual so it can be overwritten in tests).
+  virtual AsyncSocket* WrapSocket(SOCKET s);
+
+  // SocketServer:
+  bool Wait(int cms, bool process_io) override;
+  void WakeUp() override;
+
+  void Add(Dispatcher* dispatcher);
+  void Remove(Dispatcher* dispatcher);
+  void Update(Dispatcher* dispatcher);
+
+#if defined(WEBRTC_POSIX)
+  // Sets the function to be executed in response to the specified POSIX signal.
+  // The function is executed from inside Wait() using the "self-pipe trick"--
+  // regardless of which thread receives the signal--and hence can safely
+  // manipulate user-level data structures.
+  // "handler" may be SIG_IGN, SIG_DFL, or a user-specified function, just like
+  // with signal(2).
+  // Only one PhysicalSocketServer should have user-level signal handlers.
+  // Dispatching signals on multiple PhysicalSocketServers is not reliable.
+  // The signal mask is not modified. It is the caller's responsibily to
+  // maintain it as desired.
+  virtual bool SetPosixSignalHandler(int signum, void (*handler)(int));
+
+ protected:
+  Dispatcher* signal_dispatcher();
+#endif
+
+ private:
+  typedef std::set<Dispatcher*> DispatcherSet;
+
+  void AddRemovePendingDispatchers();
+
+#if defined(WEBRTC_POSIX)
+  bool WaitSelect(int cms, bool process_io);
+  static bool InstallSignal(int signum, void (*handler)(int));
+
+  std::unique_ptr<PosixSignalDispatcher> signal_dispatcher_;
+#endif  // WEBRTC_POSIX
+#if defined(WEBRTC_USE_EPOLL)
+  void AddEpoll(Dispatcher* dispatcher);
+  void RemoveEpoll(Dispatcher* dispatcher);
+  void UpdateEpoll(Dispatcher* dispatcher);
+  bool WaitEpoll(int cms);
+  bool WaitPoll(int cms, Dispatcher* dispatcher);
+
+  int epoll_fd_ = INVALID_SOCKET;
+  std::vector<struct epoll_event> epoll_events_;
+#endif  // WEBRTC_USE_EPOLL
+  DispatcherSet dispatchers_;
+  DispatcherSet pending_add_dispatchers_;
+  DispatcherSet pending_remove_dispatchers_;
+  bool processing_dispatchers_ = false;
+  Signaler* signal_wakeup_;
+  CriticalSection crit_;
+  bool fWait_;
+#if defined(WEBRTC_WIN)
+  WSAEVENT socket_ev_;
+#endif
+};
+
+class PhysicalSocket : public AsyncSocket, public sigslot::has_slots<> {
+ public:
+  PhysicalSocket(PhysicalSocketServer* ss, SOCKET s = INVALID_SOCKET);
+  ~PhysicalSocket() override;
+
+  // Creates the underlying OS socket (same as the "socket" function).
+  virtual bool Create(int family, int type);
+
+  SocketAddress GetLocalAddress() const override;
+  SocketAddress GetRemoteAddress() const override;
+
+  int Bind(const SocketAddress& bind_addr) override;
+  int Connect(const SocketAddress& addr) override;
+
+  int GetError() const override;
+  void SetError(int error) override;
+
+  ConnState GetState() const override;
+
+  int GetOption(Option opt, int* value) override;
+  int SetOption(Option opt, int value) override;
+
+  int Send(const void* pv, size_t cb) override;
+  int SendTo(const void* buffer,
+             size_t length,
+             const SocketAddress& addr) override;
+
+  int Recv(void* buffer, size_t length, int64_t* timestamp) override;
+  int RecvFrom(void* buffer,
+               size_t length,
+               SocketAddress* out_addr,
+               int64_t* timestamp) override;
+
+  int Listen(int backlog) override;
+  AsyncSocket* Accept(SocketAddress* out_addr) override;
+
+  int Close() override;
+
+  SocketServer* socketserver() { return ss_; }
+
+ protected:
+  int DoConnect(const SocketAddress& connect_addr);
+
+  // Make virtual so ::accept can be overwritten in tests.
+  virtual SOCKET DoAccept(SOCKET socket, sockaddr* addr, socklen_t* addrlen);
+
+  // Make virtual so ::send can be overwritten in tests.
+  virtual int DoSend(SOCKET socket, const char* buf, int len, int flags);
+
+  // Make virtual so ::sendto can be overwritten in tests.
+  virtual int DoSendTo(SOCKET socket, const char* buf, int len, int flags,
+                       const struct sockaddr* dest_addr, socklen_t addrlen);
+
+  void OnResolveResult(AsyncResolverInterface* resolver);
+
+  void UpdateLastError();
+  void MaybeRemapSendError();
+
+  uint8_t enabled_events() const { return enabled_events_; }
+  virtual void SetEnabledEvents(uint8_t events);
+  virtual void EnableEvents(uint8_t events);
+  virtual void DisableEvents(uint8_t events);
+
+  static int TranslateOption(Option opt, int* slevel, int* sopt);
+
+  PhysicalSocketServer* ss_;
+  SOCKET s_;
+  bool udp_;
+  CriticalSection crit_;
+  int error_ RTC_GUARDED_BY(crit_);
+  ConnState state_;
+  AsyncResolver* resolver_;
+
+#if !defined(NDEBUG)
+  std::string dbg_addr_;
+#endif
+
+ private:
+  uint8_t enabled_events_ = 0;
+};
+
+class SocketDispatcher : public Dispatcher, public PhysicalSocket {
+ public:
+  explicit SocketDispatcher(PhysicalSocketServer *ss);
+  SocketDispatcher(SOCKET s, PhysicalSocketServer *ss);
+  ~SocketDispatcher() override;
+
+  bool Initialize();
+
+  virtual bool Create(int type);
+  bool Create(int family, int type) override;
+
+#if defined(WEBRTC_WIN)
+  WSAEVENT GetWSAEvent() override;
+  SOCKET GetSocket() override;
+  bool CheckSignalClose() override;
+#elif defined(WEBRTC_POSIX)
+  int GetDescriptor() override;
+  bool IsDescriptorClosed() override;
+#endif
+
+  uint32_t GetRequestedEvents() override;
+  void OnPreEvent(uint32_t ff) override;
+  void OnEvent(uint32_t ff, int err) override;
+
+  int Close() override;
+
+#if defined(WEBRTC_USE_EPOLL)
+ protected:
+  void StartBatchedEventUpdates();
+  void FinishBatchedEventUpdates();
+
+  void SetEnabledEvents(uint8_t events) override;
+  void EnableEvents(uint8_t events) override;
+  void DisableEvents(uint8_t events) override;
+#endif
+
+ private:
+#if defined(WEBRTC_WIN)
+  static int next_id_;
+  int id_;
+  bool signal_close_;
+  int signal_err_;
+#endif // WEBRTC_WIN
+#if defined(WEBRTC_USE_EPOLL)
+  void MaybeUpdateDispatcher(uint8_t old_events);
+
+  int saved_enabled_events_ = -1;
+#endif
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_PHYSICALSOCKETSERVER_H_
diff --git a/rtc_base/physicalsocketserver_unittest.cc b/rtc_base/physicalsocketserver_unittest.cc
new file mode 100644
index 0000000..d09385b
--- /dev/null
+++ b/rtc_base/physicalsocketserver_unittest.cc
@@ -0,0 +1,622 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <signal.h>
+#include <stdarg.h>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/networkmonitor.h"
+#include "rtc_base/physicalsocketserver.h"
+#include "rtc_base/socket_unittest.h"
+#include "rtc_base/testutils.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+#define MAYBE_SKIP_IPV4                        \
+  if (!HasIPv4Enabled()) {                     \
+    RTC_LOG(LS_INFO) << "No IPv4... skipping"; \
+    return;                                    \
+  }
+
+#define MAYBE_SKIP_IPV6                        \
+  if (!HasIPv6Enabled()) {                     \
+    RTC_LOG(LS_INFO) << "No IPv6... skipping"; \
+    return;                                    \
+  }
+
+class PhysicalSocketTest;
+
+class FakeSocketDispatcher : public SocketDispatcher {
+ public:
+  explicit FakeSocketDispatcher(PhysicalSocketServer* ss)
+    : SocketDispatcher(ss) {
+  }
+
+  FakeSocketDispatcher(SOCKET s, PhysicalSocketServer* ss)
+    : SocketDispatcher(s, ss) {
+  }
+
+ protected:
+  SOCKET DoAccept(SOCKET socket, sockaddr* addr, socklen_t* addrlen) override;
+  int DoSend(SOCKET socket, const char* buf, int len, int flags) override;
+  int DoSendTo(SOCKET socket, const char* buf, int len, int flags,
+               const struct sockaddr* dest_addr, socklen_t addrlen) override;
+};
+
+class FakePhysicalSocketServer : public PhysicalSocketServer {
+ public:
+  explicit FakePhysicalSocketServer(PhysicalSocketTest* test)
+    : test_(test) {
+  }
+
+  AsyncSocket* CreateAsyncSocket(int type) override {
+    SocketDispatcher* dispatcher = new FakeSocketDispatcher(this);
+    if (!dispatcher->Create(type)) {
+      delete dispatcher;
+      return nullptr;
+    }
+    return dispatcher;
+  }
+
+  AsyncSocket* CreateAsyncSocket(int family, int type) override {
+    SocketDispatcher* dispatcher = new FakeSocketDispatcher(this);
+    if (!dispatcher->Create(family, type)) {
+      delete dispatcher;
+      return nullptr;
+    }
+    return dispatcher;
+  }
+
+  AsyncSocket* WrapSocket(SOCKET s) override {
+    SocketDispatcher* dispatcher = new FakeSocketDispatcher(s, this);
+    if (!dispatcher->Initialize()) {
+      delete dispatcher;
+      return nullptr;
+    }
+    return dispatcher;
+  }
+
+  PhysicalSocketTest* GetTest() const { return test_; }
+
+ private:
+  PhysicalSocketTest* test_;
+};
+
+class FakeNetworkBinder : public NetworkBinderInterface {
+ public:
+  NetworkBindingResult BindSocketToNetwork(int, const IPAddress&) override {
+    ++num_binds_;
+    return result_;
+  }
+
+  void set_result(NetworkBindingResult result) { result_ = result; }
+
+  int num_binds() { return num_binds_; }
+
+ private:
+  NetworkBindingResult result_ = NetworkBindingResult::SUCCESS;
+  int num_binds_ = 0;
+};
+
+class PhysicalSocketTest : public SocketTest {
+ public:
+  // Set flag to simluate failures when calling "::accept" on a AsyncSocket.
+  void SetFailAccept(bool fail) { fail_accept_ = fail; }
+  bool FailAccept() const { return fail_accept_; }
+
+  // Maximum size to ::send to a socket. Set to < 0 to disable limiting.
+  void SetMaxSendSize(int max_size) { max_send_size_ = max_size; }
+  int MaxSendSize() const { return max_send_size_; }
+
+ protected:
+  PhysicalSocketTest()
+    : server_(new FakePhysicalSocketServer(this)),
+      thread_(server_.get()),
+      fail_accept_(false),
+      max_send_size_(-1) {}
+
+  void ConnectInternalAcceptError(const IPAddress& loopback);
+  void WritableAfterPartialWrite(const IPAddress& loopback);
+
+  std::unique_ptr<FakePhysicalSocketServer> server_;
+  rtc::AutoSocketServerThread thread_;
+  bool fail_accept_;
+  int max_send_size_;
+};
+
+SOCKET FakeSocketDispatcher::DoAccept(SOCKET socket,
+                                      sockaddr* addr,
+                                      socklen_t* addrlen) {
+  FakePhysicalSocketServer* ss =
+      static_cast<FakePhysicalSocketServer*>(socketserver());
+  if (ss->GetTest()->FailAccept()) {
+    return INVALID_SOCKET;
+  }
+
+  return SocketDispatcher::DoAccept(socket, addr, addrlen);
+}
+
+int FakeSocketDispatcher::DoSend(SOCKET socket, const char* buf, int len,
+    int flags) {
+  FakePhysicalSocketServer* ss =
+      static_cast<FakePhysicalSocketServer*>(socketserver());
+  if (ss->GetTest()->MaxSendSize() >= 0) {
+    len = std::min(len, ss->GetTest()->MaxSendSize());
+  }
+
+  return SocketDispatcher::DoSend(socket, buf, len, flags);
+}
+
+int FakeSocketDispatcher::DoSendTo(SOCKET socket, const char* buf, int len,
+    int flags, const struct sockaddr* dest_addr, socklen_t addrlen) {
+  FakePhysicalSocketServer* ss =
+      static_cast<FakePhysicalSocketServer*>(socketserver());
+  if (ss->GetTest()->MaxSendSize() >= 0) {
+    len = std::min(len, ss->GetTest()->MaxSendSize());
+  }
+
+  return SocketDispatcher::DoSendTo(socket, buf, len, flags, dest_addr,
+      addrlen);
+}
+
+TEST_F(PhysicalSocketTest, TestConnectIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestConnectIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectIPv6) {
+  SocketTest::TestConnectIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWithDnsLookupIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestConnectWithDnsLookupIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWithDnsLookupIPv6) {
+  SocketTest::TestConnectWithDnsLookupIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectFailIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestConnectFailIPv4();
+}
+
+void PhysicalSocketTest::ConnectInternalAcceptError(const IPAddress& loopback) {
+  webrtc::testing::StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create two clients.
+  std::unique_ptr<AsyncSocket> client1(
+      server_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client1.get());
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, client1->GetState());
+  EXPECT_PRED1(IsUnspecOrEmptyIP, client1->GetLocalAddress().ipaddr());
+
+  std::unique_ptr<AsyncSocket> client2(
+      server_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client2.get());
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, client2->GetState());
+  EXPECT_PRED1(IsUnspecOrEmptyIP, client2->GetLocalAddress().ipaddr());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      server_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, server->GetState());
+
+  // Ensure no pending server connections, since we haven't done anything yet.
+  EXPECT_FALSE(sink.Check(server.get(), webrtc::testing::SSE_READ));
+  EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+  EXPECT_TRUE(accept_addr.IsNil());
+
+  // Attempt first connect to listening socket.
+  EXPECT_EQ(0, client1->Connect(server->GetLocalAddress()));
+  EXPECT_FALSE(client1->GetLocalAddress().IsNil());
+  EXPECT_NE(server->GetLocalAddress(), client1->GetLocalAddress());
+
+  // Client is connecting, outcome not yet determined.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, client1->GetState());
+  EXPECT_FALSE(sink.Check(client1.get(), webrtc::testing::SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client1.get(), webrtc::testing::SSE_CLOSE));
+
+  // Server has pending connection, try to accept it (will fail).
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), webrtc::testing::SSE_READ)),
+                   kTimeout);
+  // Simulate "::accept" returning an error.
+  SetFailAccept(true);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  EXPECT_FALSE(accepted);
+  ASSERT_TRUE(accept_addr.IsNil());
+
+  // Ensure no more pending server connections.
+  EXPECT_FALSE(sink.Check(server.get(), webrtc::testing::SSE_READ));
+  EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+  EXPECT_TRUE(accept_addr.IsNil());
+
+  // Attempt second connect to listening socket.
+  EXPECT_EQ(0, client2->Connect(server->GetLocalAddress()));
+  EXPECT_FALSE(client2->GetLocalAddress().IsNil());
+  EXPECT_NE(server->GetLocalAddress(), client2->GetLocalAddress());
+
+  // Client is connecting, outcome not yet determined.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, client2->GetState());
+  EXPECT_FALSE(sink.Check(client2.get(), webrtc::testing::SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client2.get(), webrtc::testing::SSE_CLOSE));
+
+  // Server has pending connection, try to accept it (will succeed).
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), webrtc::testing::SSE_READ)),
+                   kTimeout);
+  SetFailAccept(false);
+  std::unique_ptr<AsyncSocket> accepted2(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted2);
+  EXPECT_FALSE(accept_addr.IsNil());
+  EXPECT_EQ(accepted2->GetRemoteAddress(), accept_addr);
+}
+
+TEST_F(PhysicalSocketTest, TestConnectAcceptErrorIPv4) {
+  MAYBE_SKIP_IPV4;
+  ConnectInternalAcceptError(kIPv4Loopback);
+}
+
+TEST_F(PhysicalSocketTest, TestConnectAcceptErrorIPv6) {
+  MAYBE_SKIP_IPV6;
+  ConnectInternalAcceptError(kIPv6Loopback);
+}
+
+void PhysicalSocketTest::WritableAfterPartialWrite(const IPAddress& loopback) {
+  // Simulate a really small maximum send size.
+  const int kMaxSendSize = 128;
+  SetMaxSendSize(kMaxSendSize);
+
+  // Run the default send/receive socket tests with a smaller amount of data
+  // to avoid long running times due to the small maximum send size.
+  const size_t kDataSize = 128 * 1024;
+  TcpInternal(loopback, kDataSize, kMaxSendSize);
+}
+
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=6167
+#if defined(WEBRTC_WIN)
+#define MAYBE_TestWritableAfterPartialWriteIPv4 DISABLED_TestWritableAfterPartialWriteIPv4
+#else
+#define MAYBE_TestWritableAfterPartialWriteIPv4 TestWritableAfterPartialWriteIPv4
+#endif
+TEST_F(PhysicalSocketTest, MAYBE_TestWritableAfterPartialWriteIPv4) {
+  MAYBE_SKIP_IPV4;
+  WritableAfterPartialWrite(kIPv4Loopback);
+}
+
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=6167
+#if defined(WEBRTC_WIN)
+#define MAYBE_TestWritableAfterPartialWriteIPv6 DISABLED_TestWritableAfterPartialWriteIPv6
+#else
+#define MAYBE_TestWritableAfterPartialWriteIPv6 TestWritableAfterPartialWriteIPv6
+#endif
+TEST_F(PhysicalSocketTest, MAYBE_TestWritableAfterPartialWriteIPv6) {
+  MAYBE_SKIP_IPV6;
+  WritableAfterPartialWrite(kIPv6Loopback);
+}
+
+TEST_F(PhysicalSocketTest, TestConnectFailIPv6) {
+  SocketTest::TestConnectFailIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWithDnsLookupFailIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestConnectWithDnsLookupFailIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWithDnsLookupFailIPv6) {
+  SocketTest::TestConnectWithDnsLookupFailIPv6();
+}
+
+
+TEST_F(PhysicalSocketTest, TestConnectWithClosedSocketIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestConnectWithClosedSocketIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWithClosedSocketIPv6) {
+  SocketTest::TestConnectWithClosedSocketIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWhileNotClosedIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestConnectWhileNotClosedIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestConnectWhileNotClosedIPv6) {
+  SocketTest::TestConnectWhileNotClosedIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestServerCloseDuringConnectIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestServerCloseDuringConnectIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestServerCloseDuringConnectIPv6) {
+  SocketTest::TestServerCloseDuringConnectIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestClientCloseDuringConnectIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestClientCloseDuringConnectIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestClientCloseDuringConnectIPv6) {
+  SocketTest::TestClientCloseDuringConnectIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestServerCloseIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestServerCloseIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestServerCloseIPv6) {
+  SocketTest::TestServerCloseIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestCloseInClosedCallbackIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestCloseInClosedCallbackIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestCloseInClosedCallbackIPv6) {
+  SocketTest::TestCloseInClosedCallbackIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestSocketServerWaitIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestSocketServerWaitIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestSocketServerWaitIPv6) {
+  SocketTest::TestSocketServerWaitIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestTcpIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestTcpIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestTcpIPv6) {
+  SocketTest::TestTcpIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestUdpIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestUdpIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestUdpIPv6) {
+  SocketTest::TestUdpIPv6();
+}
+
+// Disable for TSan v2, see
+// https://code.google.com/p/webrtc/issues/detail?id=3498 for details.
+// Also disable for MSan, see:
+// https://code.google.com/p/webrtc/issues/detail?id=4958
+// TODO(deadbeef): Enable again once test is reimplemented to be unflaky.
+// Also disable for ASan.
+// Disabled on Android: https://code.google.com/p/webrtc/issues/detail?id=4364
+// Disabled on Linux: https://bugs.chromium.org/p/webrtc/issues/detail?id=5233
+#if defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || \
+    defined(ADDRESS_SANITIZER) || defined(WEBRTC_ANDROID) ||  \
+    defined(WEBRTC_LINUX)
+#define MAYBE_TestUdpReadyToSendIPv4 DISABLED_TestUdpReadyToSendIPv4
+#else
+#define MAYBE_TestUdpReadyToSendIPv4 TestUdpReadyToSendIPv4
+#endif
+TEST_F(PhysicalSocketTest, MAYBE_TestUdpReadyToSendIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestUdpReadyToSendIPv4();
+}
+
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=6167
+#if defined(WEBRTC_WIN)
+#define MAYBE_TestUdpReadyToSendIPv6 DISABLED_TestUdpReadyToSendIPv6
+#else
+#define MAYBE_TestUdpReadyToSendIPv6 TestUdpReadyToSendIPv6
+#endif
+TEST_F(PhysicalSocketTest, MAYBE_TestUdpReadyToSendIPv6) {
+  SocketTest::TestUdpReadyToSendIPv6();
+}
+
+TEST_F(PhysicalSocketTest, TestGetSetOptionsIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestGetSetOptionsIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestGetSetOptionsIPv6) {
+  SocketTest::TestGetSetOptionsIPv6();
+}
+
+#if defined(WEBRTC_POSIX)
+
+// We don't get recv timestamps on Mac.
+#if !defined(WEBRTC_MAC)
+TEST_F(PhysicalSocketTest, TestSocketRecvTimestampIPv4) {
+  MAYBE_SKIP_IPV4;
+  SocketTest::TestSocketRecvTimestampIPv4();
+}
+
+TEST_F(PhysicalSocketTest, TestSocketRecvTimestampIPv6) {
+  SocketTest::TestSocketRecvTimestampIPv6();
+}
+#endif
+
+// Verify that if the socket was unable to be bound to a real network interface
+// (not loopback), Bind will return an error.
+TEST_F(PhysicalSocketTest,
+       BindFailsIfNetworkBinderFailsForNonLoopbackInterface) {
+  MAYBE_SKIP_IPV4;
+  FakeNetworkBinder fake_network_binder;
+  server_->set_network_binder(&fake_network_binder);
+  std::unique_ptr<AsyncSocket> socket(
+      server_->CreateAsyncSocket(AF_INET, SOCK_DGRAM));
+  fake_network_binder.set_result(NetworkBindingResult::FAILURE);
+  EXPECT_EQ(-1, socket->Bind(SocketAddress("192.168.0.1", 0)));
+  server_->set_network_binder(nullptr);
+}
+
+// Network binder shouldn't be used if the socket is bound to the "any" IP.
+TEST_F(PhysicalSocketTest,
+       NetworkBinderIsNotUsedForAnyIp) {
+  MAYBE_SKIP_IPV4;
+  FakeNetworkBinder fake_network_binder;
+  server_->set_network_binder(&fake_network_binder);
+  std::unique_ptr<AsyncSocket> socket(
+      server_->CreateAsyncSocket(AF_INET, SOCK_DGRAM));
+  EXPECT_EQ(0, socket->Bind(SocketAddress("0.0.0.0", 0)));
+  EXPECT_EQ(0, fake_network_binder.num_binds());
+  server_->set_network_binder(nullptr);
+}
+
+// For a loopback interface, failures to bind to the interface should be
+// tolerated.
+TEST_F(PhysicalSocketTest,
+       BindSucceedsIfNetworkBinderFailsForLoopbackInterface) {
+  MAYBE_SKIP_IPV4;
+  FakeNetworkBinder fake_network_binder;
+  server_->set_network_binder(&fake_network_binder);
+  std::unique_ptr<AsyncSocket> socket(
+      server_->CreateAsyncSocket(AF_INET, SOCK_DGRAM));
+  fake_network_binder.set_result(NetworkBindingResult::FAILURE);
+  EXPECT_EQ(0, socket->Bind(SocketAddress(kIPv4Loopback, 0)));
+  server_->set_network_binder(nullptr);
+}
+
+class PosixSignalDeliveryTest : public testing::Test {
+ public:
+  static void RecordSignal(int signum) {
+    signals_received_.push_back(signum);
+    signaled_thread_ = Thread::Current();
+  }
+
+ protected:
+  void SetUp() override { ss_.reset(new PhysicalSocketServer()); }
+
+  void TearDown() override {
+    ss_.reset(nullptr);
+    signals_received_.clear();
+    signaled_thread_ = nullptr;
+  }
+
+  bool ExpectSignal(int signum) {
+    if (signals_received_.empty()) {
+      RTC_LOG(LS_ERROR) << "ExpectSignal(): No signal received";
+      return false;
+    }
+    if (signals_received_[0] != signum) {
+      RTC_LOG(LS_ERROR) << "ExpectSignal(): Received signal "
+                        << signals_received_[0] << ", expected " << signum;
+      return false;
+    }
+    signals_received_.erase(signals_received_.begin());
+    return true;
+  }
+
+  bool ExpectNone() {
+    bool ret = signals_received_.empty();
+    if (!ret) {
+      RTC_LOG(LS_ERROR) << "ExpectNone(): Received signal "
+                        << signals_received_[0] << ", expected none";
+    }
+    return ret;
+  }
+
+  static std::vector<int> signals_received_;
+  static Thread *signaled_thread_;
+
+  std::unique_ptr<PhysicalSocketServer> ss_;
+};
+
+std::vector<int> PosixSignalDeliveryTest::signals_received_;
+Thread* PosixSignalDeliveryTest::signaled_thread_ = nullptr;
+
+// Test receiving a synchronous signal while not in Wait() and then entering
+// Wait() afterwards.
+TEST_F(PosixSignalDeliveryTest, RaiseThenWait) {
+  ASSERT_TRUE(ss_->SetPosixSignalHandler(SIGTERM, &RecordSignal));
+  raise(SIGTERM);
+  EXPECT_TRUE(ss_->Wait(0, true));
+  EXPECT_TRUE(ExpectSignal(SIGTERM));
+  EXPECT_TRUE(ExpectNone());
+}
+
+// Test that we can handle getting tons of repeated signals and that we see all
+// the different ones.
+TEST_F(PosixSignalDeliveryTest, InsanelyManySignals) {
+  ss_->SetPosixSignalHandler(SIGTERM, &RecordSignal);
+  ss_->SetPosixSignalHandler(SIGINT, &RecordSignal);
+  for (int i = 0; i < 10000; ++i) {
+    raise(SIGTERM);
+  }
+  raise(SIGINT);
+  EXPECT_TRUE(ss_->Wait(0, true));
+  // Order will be lowest signal numbers first.
+  EXPECT_TRUE(ExpectSignal(SIGINT));
+  EXPECT_TRUE(ExpectSignal(SIGTERM));
+  EXPECT_TRUE(ExpectNone());
+}
+
+// Test that a signal during a Wait() call is detected.
+TEST_F(PosixSignalDeliveryTest, SignalDuringWait) {
+  ss_->SetPosixSignalHandler(SIGALRM, &RecordSignal);
+  alarm(1);
+  EXPECT_TRUE(ss_->Wait(1500, true));
+  EXPECT_TRUE(ExpectSignal(SIGALRM));
+  EXPECT_TRUE(ExpectNone());
+}
+
+class RaiseSigTermRunnable : public Runnable {
+  void Run(Thread* thread) override {
+    thread->socketserver()->Wait(1000, false);
+
+    // Allow SIGTERM. This will be the only thread with it not masked so it will
+    // be delivered to us.
+    sigset_t mask;
+    sigemptyset(&mask);
+    pthread_sigmask(SIG_SETMASK, &mask, nullptr);
+
+    // Raise it.
+    raise(SIGTERM);
+  }
+};
+
+// Test that it works no matter what thread the kernel chooses to give the
+// signal to (since it's not guaranteed to be the one that Wait() runs on).
+TEST_F(PosixSignalDeliveryTest, SignalOnDifferentThread) {
+  ss_->SetPosixSignalHandler(SIGTERM, &RecordSignal);
+  // Mask out SIGTERM so that it can't be delivered to this thread.
+  sigset_t mask;
+  sigemptyset(&mask);
+  sigaddset(&mask, SIGTERM);
+  EXPECT_EQ(0, pthread_sigmask(SIG_SETMASK, &mask, nullptr));
+  // Start a new thread that raises it. It will have to be delivered to that
+  // thread. Our implementation should safely handle it and dispatch
+  // RecordSignal() on this thread.
+  std::unique_ptr<Thread> thread(Thread::CreateWithSocketServer());
+  std::unique_ptr<RaiseSigTermRunnable> runnable(new RaiseSigTermRunnable());
+  thread->Start(runnable.get());
+  EXPECT_TRUE(ss_->Wait(1500, true));
+  EXPECT_TRUE(ExpectSignal(SIGTERM));
+  EXPECT_EQ(Thread::Current(), signaled_thread_);
+  EXPECT_TRUE(ExpectNone());
+}
+
+#endif
+
+}  // namespace rtc
diff --git a/rtc_base/platform_file.cc b/rtc_base/platform_file.cc
new file mode 100644
index 0000000..35a2622
--- /dev/null
+++ b/rtc_base/platform_file.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/platform_file.h"
+
+#include "rtc_base/stringutils.h"
+
+#if defined(WEBRTC_WIN)
+#include <io.h>
+#else
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+namespace rtc {
+
+#if defined(WEBRTC_WIN)
+const PlatformFile kInvalidPlatformFileValue = INVALID_HANDLE_VALUE;
+
+FILE* FdopenPlatformFileForWriting(PlatformFile file) {
+  if (file == kInvalidPlatformFileValue)
+    return nullptr;
+  int fd = _open_osfhandle(reinterpret_cast<intptr_t>(file), 0);
+  if (fd < 0)
+    return nullptr;
+
+  return _fdopen(fd, "w");
+}
+
+bool ClosePlatformFile(PlatformFile file) {
+  return CloseHandle(file) != 0;
+}
+
+bool RemoveFile(const std::string& path) {
+  return ::DeleteFile(ToUtf16(path).c_str()) != 0;
+}
+
+PlatformFile OpenPlatformFile(const std::string& path) {
+  return ::CreateFile(ToUtf16(path).c_str(), GENERIC_READ | GENERIC_WRITE, 0,
+                      nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+}
+
+PlatformFile CreatePlatformFile(const std::string& path) {
+  return ::CreateFile(ToUtf16(path).c_str(), GENERIC_READ | GENERIC_WRITE, 0,
+                      nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+}
+
+#else  // defined(WEBRTC_WIN)
+
+const PlatformFile kInvalidPlatformFileValue = -1;
+
+FILE* FdopenPlatformFileForWriting(PlatformFile file) {
+  return fdopen(file, "w");
+}
+
+bool ClosePlatformFile(PlatformFile file) {
+  return close(file) == 0;
+}
+
+bool RemoveFile(const std::string& path) {
+  return ::unlink(path.c_str()) == 0;
+}
+
+PlatformFile OpenPlatformFile(const std::string& path) {
+  return ::open(path.c_str(), O_RDWR);
+}
+
+PlatformFile CreatePlatformFile(const std::string& path) {
+  return ::open(path.c_str(), O_CREAT | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+}
+
+#endif
+
+}  // namespace rtc
diff --git a/rtc_base/platform_file.h b/rtc_base/platform_file.h
new file mode 100644
index 0000000..8e911be
--- /dev/null
+++ b/rtc_base/platform_file.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PLATFORM_FILE_H_
+#define RTC_BASE_PLATFORM_FILE_H_
+
+#include <stdio.h>
+#include <string>
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#endif
+
+namespace rtc {
+
+#if defined(WEBRTC_WIN)
+typedef HANDLE PlatformFile;
+#elif defined(WEBRTC_POSIX)
+typedef int PlatformFile;
+#else
+#error Unsupported platform
+#endif
+
+extern const PlatformFile kInvalidPlatformFileValue;
+
+// Associates a standard FILE stream with an existing PlatformFile.
+// Note that after this function has returned a valid FILE stream,
+// the PlatformFile should no longer be used.
+FILE* FdopenPlatformFileForWriting(PlatformFile file);
+
+// Closes a PlatformFile. Returns true on success, false on failure.
+// Don't use ClosePlatformFile to close a file opened with FdopenPlatformFile.
+// Use fclose instead.
+bool ClosePlatformFile(PlatformFile file);
+
+// Removes a file in the filesystem.
+bool RemoveFile(const std::string& path);
+
+// Opens a file for reading and writing. You might want to use base/file.h
+// instead.
+PlatformFile OpenPlatformFile(const std::string& path);
+
+// Creates a new file for reading and writing. If the file already exists it
+// will be overwritten. You might want to use base/file.h instead.
+PlatformFile CreatePlatformFile(const std::string& path);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_PLATFORM_FILE_H_
diff --git a/rtc_base/platform_thread.cc b/rtc_base/platform_thread.cc
new file mode 100644
index 0000000..ca2ce13
--- /dev/null
+++ b/rtc_base/platform_thread.cc
@@ -0,0 +1,294 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/platform_thread.h"
+
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/trace_event.h"
+
+#if defined(WEBRTC_LINUX)
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#endif
+
+namespace rtc {
+namespace {
+#if defined(WEBRTC_WIN)
+void CALLBACK RaiseFlag(ULONG_PTR param) {
+  *reinterpret_cast<bool*>(param) = true;
+}
+#else
+struct ThreadAttributes {
+  ThreadAttributes() { pthread_attr_init(&attr); }
+  ~ThreadAttributes() { pthread_attr_destroy(&attr); }
+  pthread_attr_t* operator&() { return &attr; }
+  pthread_attr_t attr;
+};
+#endif  // defined(WEBRTC_WIN)
+}
+
+PlatformThread::PlatformThread(ThreadRunFunctionDeprecated func,
+                               void* obj,
+                               const char* thread_name)
+    : run_function_deprecated_(func),
+      obj_(obj),
+      name_(thread_name ? thread_name : "webrtc") {
+  RTC_DCHECK(func);
+  RTC_DCHECK(name_.length() < 64);
+  spawned_thread_checker_.DetachFromThread();
+}
+
+PlatformThread::PlatformThread(ThreadRunFunction func,
+                               void* obj,
+                               const char* thread_name,
+                               ThreadPriority priority /*= kNormalPriority*/)
+    : run_function_(func), priority_(priority), obj_(obj), name_(thread_name) {
+  RTC_DCHECK(func);
+  RTC_DCHECK(!name_.empty());
+  // TODO(tommi): Consider lowering the limit to 15 (limit on Linux).
+  RTC_DCHECK(name_.length() < 64);
+  spawned_thread_checker_.DetachFromThread();
+}
+
+PlatformThread::~PlatformThread() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(WEBRTC_WIN)
+  RTC_DCHECK(!thread_);
+  RTC_DCHECK(!thread_id_);
+#endif  // defined(WEBRTC_WIN)
+}
+
+#if defined(WEBRTC_WIN)
+DWORD WINAPI PlatformThread::StartThread(void* param) {
+  // The GetLastError() function only returns valid results when it is called
+  // after a Win32 API function that returns a "failed" result. A crash dump
+  // contains the result from GetLastError() and to make sure it does not
+  // falsely report a Windows error we call SetLastError here.
+  ::SetLastError(ERROR_SUCCESS);
+  static_cast<PlatformThread*>(param)->Run();
+  return 0;
+}
+#else
+void* PlatformThread::StartThread(void* param) {
+  static_cast<PlatformThread*>(param)->Run();
+  return 0;
+}
+#endif  // defined(WEBRTC_WIN)
+
+void PlatformThread::Start() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(!thread_) << "Thread already started?";
+#if defined(WEBRTC_WIN)
+  stop_ = false;
+
+  // See bug 2902 for background on STACK_SIZE_PARAM_IS_A_RESERVATION.
+  // Set the reserved stack stack size to 1M, which is the default on Windows
+  // and Linux.
+  thread_ = ::CreateThread(nullptr, 1024 * 1024, &StartThread, this,
+                           STACK_SIZE_PARAM_IS_A_RESERVATION, &thread_id_);
+  RTC_CHECK(thread_) << "CreateThread failed";
+  RTC_DCHECK(thread_id_);
+#else
+  ThreadAttributes attr;
+  // Set the stack stack size to 1M.
+  pthread_attr_setstacksize(&attr, 1024 * 1024);
+  RTC_CHECK_EQ(0, pthread_create(&thread_, &attr, &StartThread, this));
+#endif  // defined(WEBRTC_WIN)
+}
+
+bool PlatformThread::IsRunning() const {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(WEBRTC_WIN)
+  return thread_ != nullptr;
+#else
+  return thread_ != 0;
+#endif  // defined(WEBRTC_WIN)
+}
+
+PlatformThreadRef PlatformThread::GetThreadRef() const {
+#if defined(WEBRTC_WIN)
+  return thread_id_;
+#else
+  return thread_;
+#endif  // defined(WEBRTC_WIN)
+}
+
+void PlatformThread::Stop() {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  if (!IsRunning())
+    return;
+
+#if defined(WEBRTC_WIN)
+  // Set stop_ to |true| on the worker thread.
+  bool queued = QueueAPC(&RaiseFlag, reinterpret_cast<ULONG_PTR>(&stop_));
+  // Queuing the APC can fail if the thread is being terminated.
+  RTC_CHECK(queued || GetLastError() == ERROR_GEN_FAILURE);
+  WaitForSingleObject(thread_, INFINITE);
+  CloseHandle(thread_);
+  thread_ = nullptr;
+  thread_id_ = 0;
+#else
+  if (!run_function_)
+    RTC_CHECK_EQ(1, AtomicOps::Increment(&stop_flag_));
+  RTC_CHECK_EQ(0, pthread_join(thread_, nullptr));
+  if (!run_function_)
+    AtomicOps::ReleaseStore(&stop_flag_, 0);
+  thread_ = 0;
+#endif  // defined(WEBRTC_WIN)
+  spawned_thread_checker_.DetachFromThread();
+}
+
+// TODO(tommi): Deprecate the loop behavior in PlatformThread.
+// * Introduce a new callback type that returns void.
+// * Remove potential for a busy loop in PlatformThread.
+// * Delegate the responsibility for how to stop the thread, to the
+//   implementation that actually uses the thread.
+// All implementations will need to be aware of how the thread should be stopped
+// and encouraging a busy polling loop, can be costly in terms of power and cpu.
+void PlatformThread::Run() {
+  // Attach the worker thread checker to this thread.
+  RTC_DCHECK(spawned_thread_checker_.CalledOnValidThread());
+  rtc::SetCurrentThreadName(name_.c_str());
+
+  if (run_function_) {
+    SetPriority(priority_);
+    run_function_(obj_);
+    return;
+  }
+
+// TODO(tommi): Delete the rest of this function when looping isn't supported.
+#if RTC_DCHECK_IS_ON
+  // These constants control the busy loop detection algorithm below.
+  // |kMaxLoopCount| controls the limit for how many times we allow the loop
+  // to run within a period, before DCHECKing.
+  // |kPeriodToMeasureMs| controls how long that period is.
+  static const int kMaxLoopCount = 1000;
+  static const int kPeriodToMeasureMs = 100;
+  int64_t loop_stamps[kMaxLoopCount] = {};
+  int64_t sequence_nr = 0;
+#endif
+
+  do {
+    TRACE_EVENT1("webrtc", "PlatformThread::Run", "name", name_.c_str());
+
+    // The interface contract of Start/Stop is that for a successful call to
+    // Start, there should be at least one call to the run function.  So we
+    // call the function before checking |stop_|.
+    if (!run_function_deprecated_(obj_))
+      break;
+#if RTC_DCHECK_IS_ON
+    auto id = sequence_nr % kMaxLoopCount;
+    loop_stamps[id] = rtc::TimeMillis();
+    if (sequence_nr > kMaxLoopCount) {
+      auto compare_id = (id + 1) % kMaxLoopCount;
+      auto diff = loop_stamps[id] - loop_stamps[compare_id];
+      RTC_DCHECK_GE(diff, 0);
+      if (diff < kPeriodToMeasureMs) {
+        RTC_NOTREACHED() << "This thread is too busy: " << name_ << " " << diff
+                         << "ms sequence=" << sequence_nr << " "
+                         << loop_stamps[id] << " vs " << loop_stamps[compare_id]
+                         << ", " << id << " vs " << compare_id;
+      }
+    }
+    ++sequence_nr;
+#endif
+#if defined(WEBRTC_WIN)
+    // Alertable sleep to permit RaiseFlag to run and update |stop_|.
+    SleepEx(0, true);
+  } while (!stop_);
+#else
+#if defined(WEBRTC_MAC) || defined(WEBRTC_ANDROID)
+    sched_yield();
+#else
+    static const struct timespec ts_null = {0};
+    nanosleep(&ts_null, nullptr);
+#endif
+  } while (!AtomicOps::AcquireLoad(&stop_flag_));
+#endif  // defined(WEBRTC_WIN)
+}
+
+bool PlatformThread::SetPriority(ThreadPriority priority) {
+#if RTC_DCHECK_IS_ON
+  if (run_function_) {
+    // The non-deprecated way of how this function gets called, is that it must
+    // be called on the worker thread itself.
+    RTC_DCHECK(!thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(spawned_thread_checker_.CalledOnValidThread());
+  } else {
+    // In the case of deprecated use of this method, it must be called on the
+    // same thread as the PlatformThread object is constructed on.
+    RTC_DCHECK(thread_checker_.CalledOnValidThread());
+    RTC_DCHECK(IsRunning());
+  }
+#endif
+
+#if defined(WEBRTC_WIN)
+  return SetThreadPriority(thread_, priority) != FALSE;
+#elif defined(__native_client__) || defined(WEBRTC_FUCHSIA)
+  // Setting thread priorities is not supported in NaCl or Fuchsia.
+  return true;
+#elif defined(WEBRTC_CHROMIUM_BUILD) && defined(WEBRTC_LINUX)
+  // TODO(tommi): Switch to the same mechanism as Chromium uses for changing
+  // thread priorities.
+  return true;
+#else
+#ifdef WEBRTC_THREAD_RR
+  const int policy = SCHED_RR;
+#else
+  const int policy = SCHED_FIFO;
+#endif
+  const int min_prio = sched_get_priority_min(policy);
+  const int max_prio = sched_get_priority_max(policy);
+  if (min_prio == -1 || max_prio == -1) {
+    return false;
+  }
+
+  if (max_prio - min_prio <= 2)
+    return false;
+
+  // Convert webrtc priority to system priorities:
+  sched_param param;
+  const int top_prio = max_prio - 1;
+  const int low_prio = min_prio + 1;
+  switch (priority) {
+    case kLowPriority:
+      param.sched_priority = low_prio;
+      break;
+    case kNormalPriority:
+      // The -1 ensures that the kHighPriority is always greater or equal to
+      // kNormalPriority.
+      param.sched_priority = (low_prio + top_prio - 1) / 2;
+      break;
+    case kHighPriority:
+      param.sched_priority = std::max(top_prio - 2, low_prio);
+      break;
+    case kHighestPriority:
+      param.sched_priority = std::max(top_prio - 1, low_prio);
+      break;
+    case kRealtimePriority:
+      param.sched_priority = top_prio;
+      break;
+  }
+  return pthread_setschedparam(thread_, policy, &param) == 0;
+#endif  // defined(WEBRTC_WIN)
+}
+
+#if defined(WEBRTC_WIN)
+bool PlatformThread::QueueAPC(PAPCFUNC function, ULONG_PTR data) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  RTC_DCHECK(IsRunning());
+
+  return QueueUserAPC(function, thread_, data) != FALSE;
+}
+#endif
+
+}  // namespace rtc
diff --git a/rtc_base/platform_thread.h b/rtc_base/platform_thread.h
new file mode 100644
index 0000000..33921c2
--- /dev/null
+++ b/rtc_base/platform_thread.h
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PLATFORM_THREAD_H_
+#define RTC_BASE_PLATFORM_THREAD_H_
+
+#include <string>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/event.h"
+#include "rtc_base/platform_thread_types.h"
+#include "rtc_base/thread_checker.h"
+
+namespace rtc {
+
+// Callback function that the spawned thread will enter once spawned.
+// A return value of false is interpreted as that the function has no
+// more work to do and that the thread can be released.
+typedef bool (*ThreadRunFunctionDeprecated)(void*);
+typedef void (*ThreadRunFunction)(void*);
+
+enum ThreadPriority {
+#ifdef WEBRTC_WIN
+  kLowPriority = THREAD_PRIORITY_BELOW_NORMAL,
+  kNormalPriority = THREAD_PRIORITY_NORMAL,
+  kHighPriority = THREAD_PRIORITY_ABOVE_NORMAL,
+  kHighestPriority = THREAD_PRIORITY_HIGHEST,
+  kRealtimePriority = THREAD_PRIORITY_TIME_CRITICAL
+#else
+  kLowPriority = 1,
+  kNormalPriority = 2,
+  kHighPriority = 3,
+  kHighestPriority = 4,
+  kRealtimePriority = 5
+#endif
+};
+
+// Represents a simple worker thread.  The implementation must be assumed
+// to be single threaded, meaning that all methods of the class, must be
+// called from the same thread, including instantiation.
+class PlatformThread {
+ public:
+  PlatformThread(ThreadRunFunctionDeprecated func,
+                 void* obj,
+                 const char* thread_name);
+  PlatformThread(ThreadRunFunction func,
+                 void* obj,
+                 const char* thread_name,
+                 ThreadPriority priority = kNormalPriority);
+  virtual ~PlatformThread();
+
+  const std::string& name() const { return name_; }
+
+  // Spawns a thread and tries to set thread priority according to the priority
+  // from when CreateThread was called.
+  void Start();
+
+  bool IsRunning() const;
+
+  // Returns an identifier for the worker thread that can be used to do
+  // thread checks.
+  PlatformThreadRef GetThreadRef() const;
+
+  // Stops (joins) the spawned thread.
+  void Stop();
+
+  // Set the priority of the thread. Must be called when thread is running.
+  // TODO(tommi): Make private and only allow public support via ctor.
+  bool SetPriority(ThreadPriority priority);
+
+ protected:
+#if defined(WEBRTC_WIN)
+  // Exposed to derived classes to allow for special cases specific to Windows.
+  bool QueueAPC(PAPCFUNC apc_function, ULONG_PTR data);
+#endif
+
+ private:
+  void Run();
+
+  ThreadRunFunctionDeprecated const run_function_deprecated_ = nullptr;
+  ThreadRunFunction const run_function_ = nullptr;
+  const ThreadPriority priority_ = kNormalPriority;
+  void* const obj_;
+  // TODO(pbos): Make sure call sites use string literals and update to a const
+  // char* instead of a std::string.
+  const std::string name_;
+  rtc::ThreadChecker thread_checker_;
+  rtc::ThreadChecker spawned_thread_checker_;
+#if defined(WEBRTC_WIN)
+  static DWORD WINAPI StartThread(void* param);
+
+  bool stop_ = false;
+  HANDLE thread_ = nullptr;
+  DWORD thread_id_ = 0;
+#else
+  static void* StartThread(void* param);
+
+  // An atomic flag that we use to stop the thread. Only modified on the
+  // controlling thread and checked on the worker thread.
+  volatile int stop_flag_ = 0;
+  pthread_t thread_ = 0;
+#endif  // defined(WEBRTC_WIN)
+  RTC_DISALLOW_COPY_AND_ASSIGN(PlatformThread);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_PLATFORM_THREAD_H_
diff --git a/rtc_base/platform_thread_types.cc b/rtc_base/platform_thread_types.cc
new file mode 100644
index 0000000..15d4861
--- /dev/null
+++ b/rtc_base/platform_thread_types.cc
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/platform_thread_types.h"
+
+#if defined(WEBRTC_LINUX)
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#endif
+
+namespace rtc {
+
+PlatformThreadId CurrentThreadId() {
+#if defined(WEBRTC_WIN)
+  return GetCurrentThreadId();
+#elif defined(WEBRTC_POSIX)
+#if defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
+  return pthread_mach_thread_np(pthread_self());
+#elif defined(WEBRTC_ANDROID)
+  return gettid();
+#elif defined(WEBRTC_FUCHSIA)
+  return zx_thread_self();
+#elif defined(WEBRTC_LINUX)
+  return syscall(__NR_gettid);
+#else
+  // Default implementation for nacl and solaris.
+  return reinterpret_cast<pid_t>(pthread_self());
+#endif
+#endif  // defined(WEBRTC_POSIX)
+}
+
+PlatformThreadRef CurrentThreadRef() {
+#if defined(WEBRTC_WIN)
+  return GetCurrentThreadId();
+#elif defined(WEBRTC_FUCHSIA)
+  return zx_thread_self();
+#elif defined(WEBRTC_POSIX)
+  return pthread_self();
+#endif
+}
+
+bool IsThreadRefEqual(const PlatformThreadRef& a, const PlatformThreadRef& b) {
+#if defined(WEBRTC_WIN) || defined(WEBRTC_FUCHSIA)
+  return a == b;
+#elif defined(WEBRTC_POSIX)
+  return pthread_equal(a, b);
+#endif
+}
+
+void SetCurrentThreadName(const char* name) {
+#if defined(WEBRTC_WIN)
+  struct {
+    DWORD dwType;
+    LPCSTR szName;
+    DWORD dwThreadID;
+    DWORD dwFlags;
+  } threadname_info = {0x1000, name, static_cast<DWORD>(-1), 0};
+
+  __try {
+    ::RaiseException(0x406D1388, 0, sizeof(threadname_info) / sizeof(DWORD),
+                     reinterpret_cast<ULONG_PTR*>(&threadname_info));
+  } __except (EXCEPTION_EXECUTE_HANDLER) {  // NOLINT
+  }
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID)
+  prctl(PR_SET_NAME, reinterpret_cast<unsigned long>(name));  // NOLINT
+#elif defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
+  pthread_setname_np(name);
+#endif
+}
+
+}  // namespace rtc
diff --git a/rtc_base/platform_thread_types.h b/rtc_base/platform_thread_types.h
new file mode 100644
index 0000000..72aaa4b
--- /dev/null
+++ b/rtc_base/platform_thread_types.h
@@ -0,0 +1,53 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PLATFORM_THREAD_TYPES_H_
+#define RTC_BASE_PLATFORM_THREAD_TYPES_H_
+
+#if defined(WEBRTC_WIN)
+#include <winsock2.h>
+#include <windows.h>
+#elif defined(WEBRTC_FUCHSIA)
+#include <zircon/types.h>
+#include <zircon/process.h>
+#elif defined(WEBRTC_POSIX)
+#include <pthread.h>
+#include <unistd.h>
+#endif
+
+namespace rtc {
+#if defined(WEBRTC_WIN)
+typedef DWORD PlatformThreadId;
+typedef DWORD PlatformThreadRef;
+#elif defined(WEBRTC_FUCHSIA)
+typedef zx_handle_t PlatformThreadId;
+typedef zx_handle_t PlatformThreadRef;
+#elif defined(WEBRTC_POSIX)
+typedef pid_t PlatformThreadId;
+typedef pthread_t PlatformThreadRef;
+#endif
+
+// Retrieve the ID of the current thread.
+PlatformThreadId CurrentThreadId();
+
+// Retrieves a reference to the current thread. On Windows, this is the same
+// as CurrentThreadId. On other platforms it's the pthread_t returned by
+// pthread_self().
+PlatformThreadRef CurrentThreadRef();
+
+// Compares two thread identifiers for equality.
+bool IsThreadRefEqual(const PlatformThreadRef& a, const PlatformThreadRef& b);
+
+// Sets the current thread name.
+void SetCurrentThreadName(const char* name);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_PLATFORM_THREAD_TYPES_H_
diff --git a/rtc_base/platform_thread_unittest.cc b/rtc_base/platform_thread_unittest.cc
new file mode 100644
index 0000000..d8c8995
--- /dev/null
+++ b/rtc_base/platform_thread_unittest.cc
@@ -0,0 +1,128 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/platform_thread.h"
+
+#include "system_wrappers/include/sleep.h"
+#include "test/gtest.h"
+
+namespace rtc {
+namespace {
+// Function that does nothing, and reports success.
+bool NullRunFunctionDeprecated(void* obj) {
+  webrtc::SleepMs(2);  // Hand over timeslice, prevents busy looping.
+  return true;
+}
+
+bool TooBusyRunFunction(void* obj) {
+  // Indentionally busy looping.
+  return true;
+}
+
+void NullRunFunction(void* obj) {}
+
+// Function that sets a boolean.
+bool SetFlagRunFunctionDeprecated(void* obj) {
+  bool* obj_as_bool = static_cast<bool*>(obj);
+  *obj_as_bool = true;
+  webrtc::SleepMs(0);  // Hand over timeslice, prevents busy looping.
+  return true;
+}
+
+void SetFlagRunFunction(void* obj) {
+  bool* obj_as_bool = static_cast<bool*>(obj);
+  *obj_as_bool = true;
+}
+
+}  // namespace
+
+TEST(PlatformThreadTest, StartStopDeprecated) {
+  PlatformThread thread(&NullRunFunctionDeprecated, nullptr,
+                        "PlatformThreadTest");
+  EXPECT_TRUE(thread.name() == "PlatformThreadTest");
+  EXPECT_TRUE(thread.GetThreadRef() == 0);
+  thread.Start();
+  EXPECT_TRUE(thread.GetThreadRef() != 0);
+  thread.Stop();
+  EXPECT_TRUE(thread.GetThreadRef() == 0);
+}
+
+TEST(PlatformThreadTest, StartStop2Deprecated) {
+  PlatformThread thread1(&NullRunFunctionDeprecated, nullptr,
+                         "PlatformThreadTest1");
+  PlatformThread thread2(&NullRunFunctionDeprecated, nullptr,
+                         "PlatformThreadTest2");
+  EXPECT_TRUE(thread1.GetThreadRef() == thread2.GetThreadRef());
+  thread1.Start();
+  thread2.Start();
+  EXPECT_TRUE(thread1.GetThreadRef() != thread2.GetThreadRef());
+  thread2.Stop();
+  thread1.Stop();
+}
+
+TEST(PlatformThreadTest, RunFunctionIsCalledDeprecated) {
+  bool flag = false;
+  PlatformThread thread(&SetFlagRunFunctionDeprecated, &flag,
+                        "RunFunctionIsCalled");
+  thread.Start();
+
+  // At this point, the flag may be either true or false.
+  thread.Stop();
+
+  // We expect the thread to have run at least once.
+  EXPECT_TRUE(flag);
+}
+
+TEST(PlatformThreadTest, StartStop) {
+  PlatformThread thread(&NullRunFunction, nullptr, "PlatformThreadTest");
+  EXPECT_TRUE(thread.name() == "PlatformThreadTest");
+  EXPECT_TRUE(thread.GetThreadRef() == 0);
+  thread.Start();
+  EXPECT_TRUE(thread.GetThreadRef() != 0);
+  thread.Stop();
+  EXPECT_TRUE(thread.GetThreadRef() == 0);
+}
+
+TEST(PlatformThreadTest, StartStop2) {
+  PlatformThread thread1(&NullRunFunction, nullptr, "PlatformThreadTest1");
+  PlatformThread thread2(&NullRunFunction, nullptr, "PlatformThreadTest2");
+  EXPECT_TRUE(thread1.GetThreadRef() == thread2.GetThreadRef());
+  thread1.Start();
+  thread2.Start();
+  EXPECT_TRUE(thread1.GetThreadRef() != thread2.GetThreadRef());
+  thread2.Stop();
+  thread1.Stop();
+}
+
+TEST(PlatformThreadTest, RunFunctionIsCalled) {
+  bool flag = false;
+  PlatformThread thread(&SetFlagRunFunction, &flag, "RunFunctionIsCalled");
+  thread.Start();
+
+  // At this point, the flag may be either true or false.
+  thread.Stop();
+
+  // We expect the thread to have run at least once.
+  EXPECT_TRUE(flag);
+}
+
+// This test is disabled since it will cause a crash.
+// There might be a way to implement this as a death test, but it looks like
+// a death test requires an expression to be checked but does not allow a
+// flag to be raised that says "some thread will crash after this point".
+// TODO(tommi): Look into ways to enable the test by default.
+TEST(PlatformThreadTest, DISABLED_TooBusyDeprecated) {
+  PlatformThread thread(&TooBusyRunFunction, nullptr, "BusyThread");
+  thread.Start();
+  webrtc::SleepMs(1000);
+  thread.Stop();
+}
+
+}  // rtc
diff --git a/rtc_base/protobuf_utils.h b/rtc_base/protobuf_utils.h
new file mode 100644
index 0000000..8fbc060
--- /dev/null
+++ b/rtc_base/protobuf_utils.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#ifndef RTC_BASE_PROTOBUF_UTILS_H_
+#define RTC_BASE_PROTOBUF_UTILS_H_
+
+namespace webrtc {
+
+using ProtoString = std::string;
+
+}  // namespace webrtc
+
+#if WEBRTC_ENABLE_PROTOBUF
+
+#include "third_party/protobuf/src/google/protobuf/message_lite.h"
+#include "third_party/protobuf/src/google/protobuf/repeated_field.h"
+
+namespace webrtc {
+
+using google::protobuf::MessageLite;
+using google::protobuf::RepeatedPtrField;
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_ENABLE_PROTOBUF
+
+#endif  // RTC_BASE_PROTOBUF_UTILS_H_
diff --git a/rtc_base/proxy_unittest.cc b/rtc_base/proxy_unittest.cc
new file mode 100644
index 0000000..7d7b6f8
--- /dev/null
+++ b/rtc_base/proxy_unittest.cc
@@ -0,0 +1,76 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+#include "rtc_base/gunit.h"
+#include "rtc_base/httpserver.h"
+#include "rtc_base/proxyserver.h"
+#include "rtc_base/socketadapters.h"
+#include "rtc_base/testclient.h"
+#include "rtc_base/testechoserver.h"
+#include "rtc_base/virtualsocketserver.h"
+
+using rtc::Socket;
+using rtc::Thread;
+using rtc::SocketAddress;
+
+static const SocketAddress kSocksProxyIntAddr("1.2.3.4", 1080);
+static const SocketAddress kSocksProxyExtAddr("1.2.3.5", 0);
+static const SocketAddress kHttpsProxyIntAddr("1.2.3.4", 443);
+static const SocketAddress kHttpsProxyExtAddr("1.2.3.5", 0);
+static const SocketAddress kBogusProxyIntAddr("1.2.3.4", 999);
+
+// Sets up a virtual socket server and HTTPS/SOCKS5 proxy servers.
+class ProxyTest : public testing::Test {
+ public:
+  ProxyTest() : ss_(new rtc::VirtualSocketServer()), thread_(ss_.get()) {
+    socks_.reset(new rtc::SocksProxyServer(
+        ss_.get(), kSocksProxyIntAddr, ss_.get(), kSocksProxyExtAddr));
+    https_.reset(new rtc::HttpListenServer());
+    https_->Listen(kHttpsProxyIntAddr);
+  }
+
+  rtc::SocketServer* ss() { return ss_.get(); }
+
+ private:
+  std::unique_ptr<rtc::SocketServer> ss_;
+  rtc::AutoSocketServerThread thread_;
+  std::unique_ptr<rtc::SocksProxyServer> socks_;
+  // TODO: Make this a real HTTPS proxy server.
+  std::unique_ptr<rtc::HttpListenServer> https_;
+};
+
+// Tests whether we can use a SOCKS5 proxy to connect to a server.
+TEST_F(ProxyTest, TestSocks5Connect) {
+  rtc::AsyncSocket* socket =
+      ss()->CreateAsyncSocket(kSocksProxyIntAddr.family(), SOCK_STREAM);
+  rtc::AsyncSocksProxySocket* proxy_socket =
+      new rtc::AsyncSocksProxySocket(socket, kSocksProxyIntAddr,
+                                           "", rtc::CryptString());
+  // TODO: IPv6-ize these tests when proxy supports IPv6.
+
+  rtc::TestEchoServer server(Thread::Current(),
+                                   SocketAddress(INADDR_ANY, 0));
+
+  std::unique_ptr<rtc::AsyncTCPSocket> packet_socket(
+      rtc::AsyncTCPSocket::Create(proxy_socket, SocketAddress(INADDR_ANY, 0),
+                                  server.address()));
+  EXPECT_TRUE(packet_socket != nullptr);
+  rtc::TestClient client(std::move(packet_socket));
+
+  EXPECT_EQ(Socket::CS_CONNECTING, proxy_socket->GetState());
+  EXPECT_TRUE(client.CheckConnected());
+  EXPECT_EQ(Socket::CS_CONNECTED, proxy_socket->GetState());
+  EXPECT_EQ(server.address(), client.remote_address());
+  client.Send("foo", 3);
+  EXPECT_TRUE(client.CheckNextPacket("foo", 3, nullptr));
+  EXPECT_TRUE(client.CheckNoPacket());
+}
diff --git a/rtc_base/proxyinfo.cc b/rtc_base/proxyinfo.cc
new file mode 100644
index 0000000..a165dca
--- /dev/null
+++ b/rtc_base/proxyinfo.cc
@@ -0,0 +1,24 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/proxyinfo.h"
+
+namespace rtc {
+
+const char * ProxyToString(ProxyType proxy) {
+  const char * const PROXY_NAMES[] = { "none", "https", "socks5", "unknown" };
+  return PROXY_NAMES[proxy];
+}
+
+ProxyInfo::ProxyInfo() : type(PROXY_NONE), autodetect(false) {
+}
+ProxyInfo::~ProxyInfo() = default;
+
+} // namespace rtc
diff --git a/rtc_base/proxyinfo.h b/rtc_base/proxyinfo.h
new file mode 100644
index 0000000..5affcd8
--- /dev/null
+++ b/rtc_base/proxyinfo.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PROXYINFO_H_
+#define RTC_BASE_PROXYINFO_H_
+
+#include <string>
+#include "rtc_base/cryptstring.h"
+#include "rtc_base/socketaddress.h"
+
+namespace rtc {
+
+enum ProxyType {
+  PROXY_NONE,
+  PROXY_HTTPS,
+  PROXY_SOCKS5,
+  PROXY_UNKNOWN
+};
+const char * ProxyToString(ProxyType proxy);
+
+struct ProxyInfo {
+  ProxyType type;
+  SocketAddress address;
+  std::string autoconfig_url;
+  bool autodetect;
+  std::string bypass_list;
+  std::string username;
+  CryptString password;
+
+  ProxyInfo();
+  ~ProxyInfo();
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_PROXYINFO_H_
diff --git a/rtc_base/proxyserver.cc b/rtc_base/proxyserver.cc
new file mode 100644
index 0000000..5ab7943
--- /dev/null
+++ b/rtc_base/proxyserver.cc
@@ -0,0 +1,159 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/proxyserver.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/socketfactory.h"
+
+namespace rtc {
+
+// ProxyServer
+ProxyServer::ProxyServer(
+    SocketFactory* int_factory, const SocketAddress& int_addr,
+    SocketFactory* ext_factory, const SocketAddress& ext_ip)
+    : ext_factory_(ext_factory), ext_ip_(ext_ip.ipaddr(), 0),  // strip off port
+      server_socket_(int_factory->CreateAsyncSocket(int_addr.family(),
+                                                    SOCK_STREAM)) {
+  RTC_DCHECK(server_socket_.get() != nullptr);
+  RTC_DCHECK(int_addr.family() == AF_INET || int_addr.family() == AF_INET6);
+  server_socket_->Bind(int_addr);
+  server_socket_->Listen(5);
+  server_socket_->SignalReadEvent.connect(this, &ProxyServer::OnAcceptEvent);
+}
+
+ProxyServer::~ProxyServer() {
+  for (BindingList::iterator it = bindings_.begin();
+       it != bindings_.end(); ++it) {
+    delete (*it);
+  }
+}
+
+SocketAddress ProxyServer::GetServerAddress() {
+  return server_socket_->GetLocalAddress();
+}
+
+void ProxyServer::OnAcceptEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket);
+  RTC_DCHECK_EQ(socket, server_socket_.get());
+  AsyncSocket* int_socket = socket->Accept(nullptr);
+  AsyncProxyServerSocket* wrapped_socket = WrapSocket(int_socket);
+  AsyncSocket* ext_socket = ext_factory_->CreateAsyncSocket(ext_ip_.family(),
+                                                            SOCK_STREAM);
+  if (ext_socket) {
+    ext_socket->Bind(ext_ip_);
+    bindings_.push_back(new ProxyBinding(wrapped_socket, ext_socket));
+  } else {
+    RTC_LOG(LS_ERROR)
+        << "Unable to create external socket on proxy accept event";
+  }
+}
+
+void ProxyServer::OnBindingDestroyed(ProxyBinding* binding) {
+  BindingList::iterator it =
+      std::find(bindings_.begin(), bindings_.end(), binding);
+  delete (*it);
+  bindings_.erase(it);
+}
+
+// ProxyBinding
+ProxyBinding::ProxyBinding(AsyncProxyServerSocket* int_socket,
+                           AsyncSocket* ext_socket)
+    : int_socket_(int_socket), ext_socket_(ext_socket), connected_(false),
+      out_buffer_(kBufferSize), in_buffer_(kBufferSize) {
+  int_socket_->SignalConnectRequest.connect(this,
+                                            &ProxyBinding::OnConnectRequest);
+  int_socket_->SignalReadEvent.connect(this, &ProxyBinding::OnInternalRead);
+  int_socket_->SignalWriteEvent.connect(this, &ProxyBinding::OnInternalWrite);
+  int_socket_->SignalCloseEvent.connect(this, &ProxyBinding::OnInternalClose);
+  ext_socket_->SignalConnectEvent.connect(this,
+                                          &ProxyBinding::OnExternalConnect);
+  ext_socket_->SignalReadEvent.connect(this, &ProxyBinding::OnExternalRead);
+  ext_socket_->SignalWriteEvent.connect(this, &ProxyBinding::OnExternalWrite);
+  ext_socket_->SignalCloseEvent.connect(this, &ProxyBinding::OnExternalClose);
+}
+
+ProxyBinding::~ProxyBinding() = default;
+
+void ProxyBinding::OnConnectRequest(AsyncProxyServerSocket* socket,
+                                   const SocketAddress& addr) {
+  RTC_DCHECK(!connected_);
+  RTC_DCHECK(ext_socket_);
+  ext_socket_->Connect(addr);
+  // TODO: handle errors here
+}
+
+void ProxyBinding::OnInternalRead(AsyncSocket* socket) {
+  Read(int_socket_.get(), &out_buffer_);
+  Write(ext_socket_.get(), &out_buffer_);
+}
+
+void ProxyBinding::OnInternalWrite(AsyncSocket* socket) {
+  Write(int_socket_.get(), &in_buffer_);
+}
+
+void ProxyBinding::OnInternalClose(AsyncSocket* socket, int err) {
+  Destroy();
+}
+
+void ProxyBinding::OnExternalConnect(AsyncSocket* socket) {
+  RTC_DCHECK(socket != nullptr);
+  connected_ = true;
+  int_socket_->SendConnectResult(0, socket->GetRemoteAddress());
+}
+
+void ProxyBinding::OnExternalRead(AsyncSocket* socket) {
+  Read(ext_socket_.get(), &in_buffer_);
+  Write(int_socket_.get(), &in_buffer_);
+}
+
+void ProxyBinding::OnExternalWrite(AsyncSocket* socket) {
+  Write(ext_socket_.get(), &out_buffer_);
+}
+
+void ProxyBinding::OnExternalClose(AsyncSocket* socket, int err) {
+  if (!connected_) {
+    int_socket_->SendConnectResult(err, SocketAddress());
+  }
+  Destroy();
+}
+
+void ProxyBinding::Read(AsyncSocket* socket, FifoBuffer* buffer) {
+  // Only read if the buffer is empty.
+  RTC_DCHECK(socket != nullptr);
+  size_t size;
+  int read;
+  if (buffer->GetBuffered(&size) && size == 0) {
+    void* p = buffer->GetWriteBuffer(&size);
+    read = socket->Recv(p, size, nullptr);
+    buffer->ConsumeWriteBuffer(std::max(read, 0));
+  }
+}
+
+void ProxyBinding::Write(AsyncSocket* socket, FifoBuffer* buffer) {
+  RTC_DCHECK(socket != nullptr);
+  size_t size;
+  int written;
+  const void* p = buffer->GetReadData(&size);
+  written = socket->Send(p, size);
+  buffer->ConsumeReadData(std::max(written, 0));
+}
+
+void ProxyBinding::Destroy() {
+  SignalDestroyed(this);
+}
+
+AsyncProxyServerSocket* SocksProxyServer::WrapSocket(AsyncSocket* socket) {
+  return new AsyncSocksProxyServerSocket(socket);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/proxyserver.h b/rtc_base/proxyserver.h
new file mode 100644
index 0000000..d90b091
--- /dev/null
+++ b/rtc_base/proxyserver.h
@@ -0,0 +1,100 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_PROXYSERVER_H_
+#define RTC_BASE_PROXYSERVER_H_
+
+#include <list>
+#include <memory>
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/socketadapters.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/stream.h"
+
+namespace rtc {
+
+class SocketFactory;
+
+// ProxyServer is a base class that allows for easy construction of proxy
+// servers. With its helper class ProxyBinding, it contains all the necessary
+// logic for receiving and bridging connections. The specific client-server
+// proxy protocol is implemented by an instance of the AsyncProxyServerSocket
+// class; children of ProxyServer implement WrapSocket appropriately to return
+// the correct protocol handler.
+
+class ProxyBinding : public sigslot::has_slots<> {
+ public:
+  ProxyBinding(AsyncProxyServerSocket* in_socket, AsyncSocket* out_socket);
+  ~ProxyBinding() override;
+  sigslot::signal1<ProxyBinding*> SignalDestroyed;
+
+ private:
+  void OnConnectRequest(AsyncProxyServerSocket* socket,
+                        const SocketAddress& addr);
+  void OnInternalRead(AsyncSocket* socket);
+  void OnInternalWrite(AsyncSocket* socket);
+  void OnInternalClose(AsyncSocket* socket, int err);
+  void OnExternalConnect(AsyncSocket* socket);
+  void OnExternalRead(AsyncSocket* socket);
+  void OnExternalWrite(AsyncSocket* socket);
+  void OnExternalClose(AsyncSocket* socket, int err);
+
+  static void Read(AsyncSocket* socket, FifoBuffer* buffer);
+  static void Write(AsyncSocket* socket, FifoBuffer* buffer);
+  void Destroy();
+
+  static const int kBufferSize = 4096;
+  std::unique_ptr<AsyncProxyServerSocket> int_socket_;
+  std::unique_ptr<AsyncSocket> ext_socket_;
+  bool connected_;
+  FifoBuffer out_buffer_;
+  FifoBuffer in_buffer_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(ProxyBinding);
+};
+
+class ProxyServer : public sigslot::has_slots<> {
+ public:
+  ProxyServer(SocketFactory* int_factory, const SocketAddress& int_addr,
+              SocketFactory* ext_factory, const SocketAddress& ext_ip);
+  ~ProxyServer() override;
+
+  // Returns the address to which the proxy server is bound
+  SocketAddress GetServerAddress();
+
+ protected:
+  void OnAcceptEvent(AsyncSocket* socket);
+  virtual AsyncProxyServerSocket* WrapSocket(AsyncSocket* socket) = 0;
+  void OnBindingDestroyed(ProxyBinding* binding);
+
+ private:
+  typedef std::list<ProxyBinding*> BindingList;
+  SocketFactory* ext_factory_;
+  SocketAddress ext_ip_;
+  std::unique_ptr<AsyncSocket> server_socket_;
+  BindingList bindings_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(ProxyServer);
+};
+
+// SocksProxyServer is a simple extension of ProxyServer to implement SOCKS.
+class SocksProxyServer : public ProxyServer {
+ public:
+  SocksProxyServer(SocketFactory* int_factory, const SocketAddress& int_addr,
+                   SocketFactory* ext_factory, const SocketAddress& ext_ip)
+      : ProxyServer(int_factory, int_addr, ext_factory, ext_ip) {
+  }
+ protected:
+  AsyncProxyServerSocket* WrapSocket(AsyncSocket* socket) override;
+  RTC_DISALLOW_COPY_AND_ASSIGN(SocksProxyServer);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_PROXYSERVER_H_
diff --git a/rtc_base/ptr_util.h b/rtc_base/ptr_util.h
new file mode 100644
index 0000000..156df84
--- /dev/null
+++ b/rtc_base/ptr_util.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This implementation is borrowed from chromium.
+
+#ifndef RTC_BASE_PTR_UTIL_H_
+#define RTC_BASE_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace rtc {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+  return std::unique_ptr<T>(ptr);
+}
+
+namespace internal {
+
+template <typename T>
+struct MakeUniqueResult {
+  using Scalar = std::unique_ptr<T>;
+};
+
+template <typename T>
+struct MakeUniqueResult<T[]> {
+  using Array = std::unique_ptr<T[]>;
+};
+
+template <typename T, size_t N>
+struct MakeUniqueResult<T[N]> {
+  using Invalid = void;
+};
+
+}  // namespace internal
+
+// Helper to construct an object wrapped in a std::unique_ptr. This is an
+// implementation of C++14's std::make_unique that can be used in Chrome.
+//
+// MakeUnique<T>(args) should be preferred over WrapUnique(new T(args)): bare
+// calls to `new` should be treated with scrutiny.
+//
+// Usage:
+//   // ptr is a std::unique_ptr<std::string>
+//   auto ptr = MakeUnique<std::string>("hello world!");
+//
+//   // arr is a std::unique_ptr<int[]>
+//   auto arr = MakeUnique<int[]>(5);
+
+// Overload for non-array types. Arguments are forwarded to T's constructor.
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Scalar MakeUnique(Args&&... args) {
+  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+// Overload for array types of unknown bound, e.g. T[]. The array is allocated
+// with `new T[n]()` and value-initialized: note that this is distinct from
+// `new T[n]`, which default-initializes.
+template <typename T>
+typename internal::MakeUniqueResult<T>::Array MakeUnique(size_t size) {
+  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[size]());
+}
+
+// Overload to reject array types of known bound, e.g. T[n].
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Invalid MakeUnique(Args&&... args) =
+    delete;
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_PTR_UTIL_H_
diff --git a/rtc_base/ptr_util_unittest.cc b/rtc_base/ptr_util_unittest.cc
new file mode 100644
index 0000000..6497fda
--- /dev/null
+++ b/rtc_base/ptr_util_unittest.cc
@@ -0,0 +1,69 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/ptr_util.h"
+
+#include <stddef.h>
+#include <string>
+
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+namespace {
+
+class DeleteCounter {
+ public:
+  DeleteCounter() { ++count_; }
+  ~DeleteCounter() { --count_; }
+
+  static size_t count() { return count_; }
+
+ private:
+  static size_t count_;
+};
+
+size_t DeleteCounter::count_ = 0;
+
+}  // namespace
+
+TEST(PtrUtilTest, WrapUnique) {
+  EXPECT_EQ(0u, DeleteCounter::count());
+  DeleteCounter* counter = new DeleteCounter;
+  EXPECT_EQ(1u, DeleteCounter::count());
+  std::unique_ptr<DeleteCounter> owned_counter = WrapUnique(counter);
+  EXPECT_EQ(1u, DeleteCounter::count());
+  owned_counter.reset();
+  EXPECT_EQ(0u, DeleteCounter::count());
+}
+
+TEST(PtrUtilTest, MakeUniqueScalar) {
+  auto s = MakeUnique<std::string>();
+  EXPECT_EQ("", *s);
+
+  auto s2 = MakeUnique<std::string>("test");
+  EXPECT_EQ("test", *s2);
+}
+
+TEST(PtrUtilTest, MakeUniqueScalarWithMoveOnlyType) {
+  using MoveOnly = std::unique_ptr<std::string>;
+  auto p = MakeUnique<MoveOnly>(MakeUnique<std::string>("test"));
+  EXPECT_EQ("test", **p);
+}
+
+TEST(PtrUtilTest, MakeUniqueArray) {
+  EXPECT_EQ(0u, DeleteCounter::count());
+  auto a = MakeUnique<DeleteCounter[]>(5);
+  EXPECT_EQ(5u, DeleteCounter::count());
+  a.reset();
+  EXPECT_EQ(0u, DeleteCounter::count());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/race_checker.cc b/rtc_base/race_checker.cc
new file mode 100644
index 0000000..bf9dfdc
--- /dev/null
+++ b/rtc_base/race_checker.cc
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/race_checker.h"
+
+namespace rtc {
+
+RaceChecker::RaceChecker() {}
+
+// Note that the implementation here is in itself racy, but we pretend it does
+// not matter because we want this useful in release builds without having to
+// pay the cost of using atomics. A race hitting the race checker is likely to
+// cause access_count_ to diverge from zero and therefore cause the ThreadRef
+// comparison to fail, signaling a race, although it may not be in the exact
+// spot where a race *first* appeared in the code we're trying to protect. There
+// is also a chance that an actual race is missed, however the probability of
+// that has been considered small enough to be an acceptable trade off.
+bool RaceChecker::Acquire() const {
+  const PlatformThreadRef current_thread = CurrentThreadRef();
+  // Set new accessing thread if this is a new use.
+  if (access_count_++ == 0)
+    accessing_thread_ = current_thread;
+  // If this is being used concurrently this check will fail for the second
+  // thread entering since it won't set the thread. Recursive use of checked
+  // methods are OK since the accessing thread remains the same.
+  const PlatformThreadRef accessing_thread = accessing_thread_;
+  return IsThreadRefEqual(accessing_thread, current_thread);
+}
+
+void RaceChecker::Release() const {
+  --access_count_;
+}
+
+namespace internal {
+RaceCheckerScope::RaceCheckerScope(const RaceChecker* race_checker)
+    : race_checker_(race_checker), race_check_ok_(race_checker->Acquire()) {}
+
+bool RaceCheckerScope::RaceDetected() const {
+  return !race_check_ok_;
+}
+
+RaceCheckerScope::~RaceCheckerScope() {
+  race_checker_->Release();
+}
+
+}  // namespace internal
+}  // namespace rtc
diff --git a/rtc_base/race_checker.h b/rtc_base/race_checker.h
new file mode 100644
index 0000000..73567e9
--- /dev/null
+++ b/rtc_base/race_checker.h
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RACE_CHECKER_H_
+#define RTC_BASE_RACE_CHECKER_H_
+
+#include "rtc_base/checks.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace rtc {
+
+namespace internal {
+class RaceCheckerScope;
+}  // namespace internal
+
+// Best-effort race-checking implementation. This primitive uses no
+// synchronization at all to be as-fast-as-possible in the non-racy case.
+class RTC_LOCKABLE RaceChecker {
+ public:
+  friend class internal::RaceCheckerScope;
+  RaceChecker();
+
+ private:
+  bool Acquire() const RTC_EXCLUSIVE_LOCK_FUNCTION();
+  void Release() const RTC_UNLOCK_FUNCTION();
+
+  // Volatile to prevent code being optimized away in Acquire()/Release().
+  mutable volatile int access_count_ = 0;
+  mutable volatile PlatformThreadRef accessing_thread_;
+};
+
+namespace internal {
+class RTC_SCOPED_LOCKABLE RaceCheckerScope {
+ public:
+  explicit RaceCheckerScope(const RaceChecker* race_checker)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(race_checker);
+
+  bool RaceDetected() const;
+  ~RaceCheckerScope() RTC_UNLOCK_FUNCTION();
+
+ private:
+  const RaceChecker* const race_checker_;
+  const bool race_check_ok_;
+};
+
+class RTC_SCOPED_LOCKABLE RaceCheckerScopeDoNothing {
+ public:
+  explicit RaceCheckerScopeDoNothing(const RaceChecker* race_checker)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(race_checker) {}
+
+  ~RaceCheckerScopeDoNothing() RTC_UNLOCK_FUNCTION() {}
+};
+
+}  // namespace internal
+}  // namespace rtc
+
+#define RTC_CHECK_RUNS_SERIALIZED(x)               \
+  rtc::internal::RaceCheckerScope race_checker(x); \
+  RTC_CHECK(!race_checker.RaceDetected())
+
+#if RTC_DCHECK_IS_ON
+#define RTC_DCHECK_RUNS_SERIALIZED(x)              \
+  rtc::internal::RaceCheckerScope race_checker(x); \
+  RTC_DCHECK(!race_checker.RaceDetected())
+#else
+#define RTC_DCHECK_RUNS_SERIALIZED(x) \
+  rtc::internal::RaceCheckerScopeDoNothing race_checker(x)
+#endif
+
+#endif  // RTC_BASE_RACE_CHECKER_H_
diff --git a/rtc_base/random.cc b/rtc_base/random.cc
new file mode 100644
index 0000000..5deb621
--- /dev/null
+++ b/rtc_base/random.cc
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "rtc_base/random.h"
+
+#include <math.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+Random::Random(uint64_t seed) {
+  RTC_DCHECK(seed != 0x0ull);
+  state_ = seed;
+}
+
+uint32_t Random::Rand(uint32_t t) {
+  // Casting the output to 32 bits will give an almost uniform number.
+  // Pr[x=0] = (2^32-1) / (2^64-1)
+  // Pr[x=k] = 2^32 / (2^64-1) for k!=0
+  // Uniform would be Pr[x=k] = 2^32 / 2^64 for all 32-bit integers k.
+  uint32_t x = NextOutput();
+  // If x / 2^32 is uniform on [0,1), then x / 2^32 * (t+1) is uniform on
+  // the interval [0,t+1), so the integer part is uniform on [0,t].
+  uint64_t result = x * (static_cast<uint64_t>(t) + 1);
+  result >>= 32;
+  return result;
+}
+
+uint32_t Random::Rand(uint32_t low, uint32_t high) {
+  RTC_DCHECK(low <= high);
+  return Rand(high - low) + low;
+}
+
+int32_t Random::Rand(int32_t low, int32_t high) {
+  RTC_DCHECK(low <= high);
+  const int64_t low_i64{low};
+  return rtc::dchecked_cast<int32_t>(
+      Rand(rtc::dchecked_cast<uint32_t>(high - low_i64)) + low_i64);
+}
+
+template <>
+float Random::Rand<float>() {
+  double result = NextOutput() - 1;
+  result = result / 0xFFFFFFFFFFFFFFFEull;
+  return static_cast<float>(result);
+}
+
+template <>
+double Random::Rand<double>() {
+  double result = NextOutput() - 1;
+  result = result / 0xFFFFFFFFFFFFFFFEull;
+  return result;
+}
+
+template <>
+bool Random::Rand<bool>() {
+  return Rand(0, 1) == 1;
+}
+
+double Random::Gaussian(double mean, double standard_deviation) {
+  // Creating a Normal distribution variable from two independent uniform
+  // variables based on the Box-Muller transform, which is defined on the
+  // interval (0, 1]. Note that we rely on NextOutput to generate integers
+  // in the range [1, 2^64-1]. Normally this behavior is a bit frustrating,
+  // but here it is exactly what we need.
+  const double kPi = 3.14159265358979323846;
+  double u1 = static_cast<double>(NextOutput()) / 0xFFFFFFFFFFFFFFFFull;
+  double u2 = static_cast<double>(NextOutput()) / 0xFFFFFFFFFFFFFFFFull;
+  return mean + standard_deviation * sqrt(-2 * log(u1)) * cos(2 * kPi * u2);
+}
+
+double Random::Exponential(double lambda) {
+  double uniform = Rand<double>();
+  return -log(uniform) / lambda;
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/random.h b/rtc_base/random.h
new file mode 100644
index 0000000..7c103cc
--- /dev/null
+++ b/rtc_base/random.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RANDOM_H_
+#define RTC_BASE_RANDOM_H_
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class Random {
+ public:
+  // TODO(tommi): Change this so that the seed can be initialized internally,
+  // e.g. by offering two ways of constructing or offer a static method that
+  // returns a seed that's suitable for initialization.
+  // The problem now is that callers are calling clock_->TimeInMicroseconds()
+  // which calls TickTime::Now().Ticks(), which can return a very low value on
+  // Mac and can result in a seed of 0 after conversion to microseconds.
+  // Besides the quality of the random seed being poor, this also requires
+  // the client to take on extra dependencies to generate a seed.
+  // If we go for a static seed generator in Random, we can use something from
+  // webrtc/rtc_base and make sure that it works the same way across platforms.
+  // See also discussion here: https://codereview.webrtc.org/1623543002/
+  explicit Random(uint64_t seed);
+
+  // Return pseudo-random integer of the specified type.
+  // We need to limit the size to 32 bits to keep the output close to uniform.
+  template <typename T>
+  T Rand() {
+    static_assert(std::numeric_limits<T>::is_integer &&
+                      std::numeric_limits<T>::radix == 2 &&
+                      std::numeric_limits<T>::digits <= 32,
+                  "Rand is only supported for built-in integer types that are "
+                  "32 bits or smaller.");
+    return static_cast<T>(NextOutput());
+  }
+
+  // Uniformly distributed pseudo-random number in the interval [0, t].
+  uint32_t Rand(uint32_t t);
+
+  // Uniformly distributed pseudo-random number in the interval [low, high].
+  uint32_t Rand(uint32_t low, uint32_t high);
+
+  // Uniformly distributed pseudo-random number in the interval [low, high].
+  int32_t Rand(int32_t low, int32_t high);
+
+  // Normal Distribution.
+  double Gaussian(double mean, double standard_deviation);
+
+  // Exponential Distribution.
+  double Exponential(double lambda);
+
+ private:
+  // Outputs a nonzero 64-bit random number.
+  uint64_t NextOutput() {
+    state_ ^= state_ >> 12;
+    state_ ^= state_ << 25;
+    state_ ^= state_ >> 27;
+    RTC_DCHECK(state_ != 0x0ULL);
+    return state_ * 2685821657736338717ull;
+  }
+
+  uint64_t state_;
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Random);
+};
+
+// Return pseudo-random number in the interval [0.0, 1.0).
+template <>
+float Random::Rand<float>();
+
+// Return pseudo-random number in the interval [0.0, 1.0).
+template <>
+double Random::Rand<double>();
+
+// Return pseudo-random boolean value.
+template <>
+bool Random::Rand<bool>();
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_RANDOM_H_
diff --git a/rtc_base/random_unittest.cc b/rtc_base/random_unittest.cc
new file mode 100644
index 0000000..d05a16b
--- /dev/null
+++ b/rtc_base/random_unittest.cc
@@ -0,0 +1,303 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include <limits>
+#include <vector>
+
+#include "rtc_base/numerics/mathutils.h"  // unsigned difference
+#include "rtc_base/random.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+// Computes the positive remainder of x/n.
+template <typename T>
+T fdiv_remainder(T x, T n) {
+  RTC_CHECK_GE(n, 0);
+  T remainder = x % n;
+  if (remainder < 0)
+    remainder += n;
+  return remainder;
+}
+}  // namespace
+
+// Sample a number of random integers of type T. Divide them into buckets
+// based on the remainder when dividing by bucket_count and check that each
+// bucket gets roughly the expected number of elements.
+template <typename T>
+void UniformBucketTest(T bucket_count, int samples, Random* prng) {
+  std::vector<int> buckets(bucket_count, 0);
+
+  uint64_t total_values = 1ull << (std::numeric_limits<T>::digits +
+                                   std::numeric_limits<T>::is_signed);
+  T upper_limit =
+      std::numeric_limits<T>::max() -
+      static_cast<T>(total_values % static_cast<uint64_t>(bucket_count));
+  ASSERT_GT(upper_limit, std::numeric_limits<T>::max() / 2);
+
+  for (int i = 0; i < samples; i++) {
+    T sample;
+    do {
+      // We exclude a few numbers from the range so that it is divisible by
+      // the number of buckets. If we are unlucky and hit one of the excluded
+      // numbers we just resample. Note that if the number of buckets is a
+      // power of 2, then we don't have to exclude anything.
+      sample = prng->Rand<T>();
+    } while (sample > upper_limit);
+    buckets[fdiv_remainder(sample, bucket_count)]++;
+  }
+
+  for (T i = 0; i < bucket_count; i++) {
+    // Expect the result to be within 3 standard deviations of the mean.
+    EXPECT_NEAR(buckets[i], samples / bucket_count,
+                3 * sqrt(samples / bucket_count));
+  }
+}
+
+TEST(RandomNumberGeneratorTest, BucketTestSignedChar) {
+  Random prng(7297352569824ull);
+  UniformBucketTest<signed char>(64, 640000, &prng);
+  UniformBucketTest<signed char>(11, 440000, &prng);
+  UniformBucketTest<signed char>(3, 270000, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, BucketTestUnsignedChar) {
+  Random prng(7297352569824ull);
+  UniformBucketTest<unsigned char>(64, 640000, &prng);
+  UniformBucketTest<unsigned char>(11, 440000, &prng);
+  UniformBucketTest<unsigned char>(3, 270000, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, BucketTestSignedShort) {
+  Random prng(7297352569824ull);
+  UniformBucketTest<int16_t>(64, 640000, &prng);
+  UniformBucketTest<int16_t>(11, 440000, &prng);
+  UniformBucketTest<int16_t>(3, 270000, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, BucketTestUnsignedShort) {
+  Random prng(7297352569824ull);
+  UniformBucketTest<uint16_t>(64, 640000, &prng);
+  UniformBucketTest<uint16_t>(11, 440000, &prng);
+  UniformBucketTest<uint16_t>(3, 270000, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, BucketTestSignedInt) {
+  Random prng(7297352569824ull);
+  UniformBucketTest<signed int>(64, 640000, &prng);
+  UniformBucketTest<signed int>(11, 440000, &prng);
+  UniformBucketTest<signed int>(3, 270000, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, BucketTestUnsignedInt) {
+  Random prng(7297352569824ull);
+  UniformBucketTest<unsigned int>(64, 640000, &prng);
+  UniformBucketTest<unsigned int>(11, 440000, &prng);
+  UniformBucketTest<unsigned int>(3, 270000, &prng);
+}
+
+// The range of the random numbers is divided into bucket_count intervals
+// of consecutive numbers. Check that approximately equally many numbers
+// from each inteval are generated.
+void BucketTestSignedInterval(unsigned int bucket_count,
+                              unsigned int samples,
+                              int32_t low,
+                              int32_t high,
+                              int sigma_level,
+                              Random* prng) {
+  std::vector<unsigned int> buckets(bucket_count, 0);
+
+  ASSERT_GE(high, low);
+  ASSERT_GE(bucket_count, 2u);
+  uint32_t interval = unsigned_difference<int32_t>(high, low) + 1;
+  uint32_t numbers_per_bucket;
+  if (interval == 0) {
+    // The computation high - low + 1 should be 2^32 but overflowed
+    // Hence, bucket_count must be a power of 2
+    ASSERT_EQ(bucket_count & (bucket_count - 1), 0u);
+    numbers_per_bucket = (0x80000000u / bucket_count) * 2;
+  } else {
+    ASSERT_EQ(interval % bucket_count, 0u);
+    numbers_per_bucket = interval / bucket_count;
+  }
+
+  for (unsigned int i = 0; i < samples; i++) {
+    int32_t sample = prng->Rand(low, high);
+    EXPECT_LE(low, sample);
+    EXPECT_GE(high, sample);
+    buckets[unsigned_difference<int32_t>(sample, low) / numbers_per_bucket]++;
+  }
+
+  for (unsigned int i = 0; i < bucket_count; i++) {
+    // Expect the result to be within 3 standard deviations of the mean,
+    // or more generally, within sigma_level standard deviations of the mean.
+    double mean = static_cast<double>(samples) / bucket_count;
+    EXPECT_NEAR(buckets[i], mean, sigma_level * sqrt(mean));
+  }
+}
+
+// The range of the random numbers is divided into bucket_count intervals
+// of consecutive numbers. Check that approximately equally many numbers
+// from each inteval are generated.
+void BucketTestUnsignedInterval(unsigned int bucket_count,
+                                unsigned int samples,
+                                uint32_t low,
+                                uint32_t high,
+                                int sigma_level,
+                                Random* prng) {
+  std::vector<unsigned int> buckets(bucket_count, 0);
+
+  ASSERT_GE(high, low);
+  ASSERT_GE(bucket_count, 2u);
+  uint32_t interval = high - low + 1;
+  uint32_t numbers_per_bucket;
+  if (interval == 0) {
+    // The computation high - low + 1 should be 2^32 but overflowed
+    // Hence, bucket_count must be a power of 2
+    ASSERT_EQ(bucket_count & (bucket_count - 1), 0u);
+    numbers_per_bucket = (0x80000000u / bucket_count) * 2;
+  } else {
+    ASSERT_EQ(interval % bucket_count, 0u);
+    numbers_per_bucket = interval / bucket_count;
+  }
+
+  for (unsigned int i = 0; i < samples; i++) {
+    uint32_t sample = prng->Rand(low, high);
+    EXPECT_LE(low, sample);
+    EXPECT_GE(high, sample);
+    buckets[(sample - low) / numbers_per_bucket]++;
+  }
+
+  for (unsigned int i = 0; i < bucket_count; i++) {
+    // Expect the result to be within 3 standard deviations of the mean,
+    // or more generally, within sigma_level standard deviations of the mean.
+    double mean = static_cast<double>(samples) / bucket_count;
+    EXPECT_NEAR(buckets[i], mean, sigma_level * sqrt(mean));
+  }
+}
+
+TEST(RandomNumberGeneratorTest, UniformUnsignedInterval) {
+  Random prng(299792458ull);
+  BucketTestUnsignedInterval(2, 100000, 0, 1, 3, &prng);
+  BucketTestUnsignedInterval(7, 100000, 1, 14, 3, &prng);
+  BucketTestUnsignedInterval(11, 100000, 1000, 1010, 3, &prng);
+  BucketTestUnsignedInterval(100, 100000, 0, 99, 3, &prng);
+  BucketTestUnsignedInterval(2, 100000, 0, 4294967295, 3, &prng);
+  BucketTestUnsignedInterval(17, 100000, 455, 2147484110, 3, &prng);
+  // 99.7% of all samples will be within 3 standard deviations of the mean,
+  // but since we test 1000 buckets we allow an interval of 4 sigma.
+  BucketTestUnsignedInterval(1000, 1000000, 0, 2147483999, 4, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, UniformSignedInterval) {
+  Random prng(66260695729ull);
+  BucketTestSignedInterval(2, 100000, 0, 1, 3, &prng);
+  BucketTestSignedInterval(7, 100000, -2, 4, 3, &prng);
+  BucketTestSignedInterval(11, 100000, 1000, 1010, 3, &prng);
+  BucketTestSignedInterval(100, 100000, 0, 99, 3, &prng);
+  BucketTestSignedInterval(2, 100000, std::numeric_limits<int32_t>::min(),
+                           std::numeric_limits<int32_t>::max(), 3, &prng);
+  BucketTestSignedInterval(17, 100000, -1073741826, 1073741829, 3, &prng);
+  // 99.7% of all samples will be within 3 standard deviations of the mean,
+  // but since we test 1000 buckets we allow an interval of 4 sigma.
+  BucketTestSignedInterval(1000, 1000000, -352, 2147483647, 4, &prng);
+}
+
+// The range of the random numbers is divided into bucket_count intervals
+// of consecutive numbers. Check that approximately equally many numbers
+// from each inteval are generated.
+void BucketTestFloat(unsigned int bucket_count,
+                     unsigned int samples,
+                     int sigma_level,
+                     Random* prng) {
+  ASSERT_GE(bucket_count, 2u);
+  std::vector<unsigned int> buckets(bucket_count, 0);
+
+  for (unsigned int i = 0; i < samples; i++) {
+    uint32_t sample = bucket_count * prng->Rand<float>();
+    EXPECT_LE(0u, sample);
+    EXPECT_GE(bucket_count - 1, sample);
+    buckets[sample]++;
+  }
+
+  for (unsigned int i = 0; i < bucket_count; i++) {
+    // Expect the result to be within 3 standard deviations of the mean,
+    // or more generally, within sigma_level standard deviations of the mean.
+    double mean = static_cast<double>(samples) / bucket_count;
+    EXPECT_NEAR(buckets[i], mean, sigma_level * sqrt(mean));
+  }
+}
+
+TEST(RandomNumberGeneratorTest, UniformFloatInterval) {
+  Random prng(1380648813ull);
+  BucketTestFloat(100, 100000, 3, &prng);
+  // 99.7% of all samples will be within 3 standard deviations of the mean,
+  // but since we test 1000 buckets we allow an interval of 4 sigma.
+  // BucketTestSignedInterval(1000, 1000000, -352, 2147483647, 4, &prng);
+}
+
+TEST(RandomNumberGeneratorTest, SignedHasSameBitPattern) {
+  Random prng_signed(66738480ull), prng_unsigned(66738480ull);
+
+  for (int i = 0; i < 1000; i++) {
+    signed int s = prng_signed.Rand<signed int>();
+    unsigned int u = prng_unsigned.Rand<unsigned int>();
+    EXPECT_EQ(u, static_cast<unsigned int>(s));
+  }
+
+  for (int i = 0; i < 1000; i++) {
+    int16_t s = prng_signed.Rand<int16_t>();
+    uint16_t u = prng_unsigned.Rand<uint16_t>();
+    EXPECT_EQ(u, static_cast<uint16_t>(s));
+  }
+
+  for (int i = 0; i < 1000; i++) {
+    signed char s = prng_signed.Rand<signed char>();
+    unsigned char u = prng_unsigned.Rand<unsigned char>();
+    EXPECT_EQ(u, static_cast<unsigned char>(s));
+  }
+}
+
+TEST(RandomNumberGeneratorTest, Gaussian) {
+  const int kN = 100000;
+  const int kBuckets = 100;
+  const double kMean = 49;
+  const double kStddev = 10;
+
+  Random prng(1256637061);
+
+  std::vector<unsigned int> buckets(kBuckets, 0);
+  for (int i = 0; i < kN; i++) {
+    int index = prng.Gaussian(kMean, kStddev) + 0.5;
+    if (index >= 0 && index < kBuckets) {
+      buckets[index]++;
+    }
+  }
+
+  const double kPi = 3.14159265358979323846;
+  const double kScale = 1 / (kStddev * sqrt(2.0 * kPi));
+  const double kDiv = -2.0 * kStddev * kStddev;
+  for (int n = 0; n < kBuckets; ++n) {
+    // Use Simpsons rule to estimate the probability that a random gaussian
+    // sample is in the interval [n-0.5, n+0.5].
+    double f_left = kScale * exp((n - kMean - 0.5) * (n - kMean - 0.5) / kDiv);
+    double f_mid = kScale * exp((n - kMean) * (n - kMean) / kDiv);
+    double f_right = kScale * exp((n - kMean + 0.5) * (n - kMean + 0.5) / kDiv);
+    double normal_dist = (f_left + 4 * f_mid + f_right) / 6;
+    // Expect the number of samples to be within 3 standard deviations
+    // (rounded up) of the expected number of samples in the bucket.
+    EXPECT_NEAR(buckets[n], kN * normal_dist, 3 * sqrt(kN * normal_dist) + 1);
+  }
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/rate_limiter.cc b/rtc_base/rate_limiter.cc
new file mode 100644
index 0000000..34e0a40
--- /dev/null
+++ b/rtc_base/rate_limiter.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/rate_limiter.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+RateLimiter::RateLimiter(const Clock* clock, int64_t max_window_ms)
+    : clock_(clock),
+      current_rate_(max_window_ms, RateStatistics::kBpsScale),
+      window_size_ms_(max_window_ms),
+      max_rate_bps_(std::numeric_limits<uint32_t>::max()) {}
+
+RateLimiter::~RateLimiter() {}
+
+// Usage note: This class is intended be usable in a scenario where different
+// threads may call each of the the different method. For instance, a network
+// thread trying to send data calling TryUseRate(), the bandwidth estimator
+// calling SetMaxRate() and a timed maintenance thread periodically updating
+// the RTT.
+bool RateLimiter::TryUseRate(size_t packet_size_bytes) {
+  rtc::CritScope cs(&lock_);
+  int64_t now_ms = clock_->TimeInMilliseconds();
+  rtc::Optional<uint32_t> current_rate = current_rate_.Rate(now_ms);
+  if (current_rate) {
+    // If there is a current rate, check if adding bytes would cause maximum
+    // bitrate target to be exceeded. If there is NOT a valid current rate,
+    // allow allocating rate even if target is exceeded. This prevents
+    // problems
+    // at very low rates, where for instance retransmissions would never be
+    // allowed due to too high bitrate caused by a single packet.
+
+    size_t bitrate_addition_bps =
+        (packet_size_bytes * 8 * 1000) / window_size_ms_;
+    if (*current_rate + bitrate_addition_bps > max_rate_bps_)
+      return false;
+  }
+
+  current_rate_.Update(packet_size_bytes, now_ms);
+  return true;
+}
+
+void RateLimiter::SetMaxRate(uint32_t max_rate_bps) {
+  rtc::CritScope cs(&lock_);
+  max_rate_bps_ = max_rate_bps;
+}
+
+// Set the window size over which to measure the current bitrate.
+// For retransmissions, this is typically the RTT.
+bool RateLimiter::SetWindowSize(int64_t window_size_ms) {
+  rtc::CritScope cs(&lock_);
+  window_size_ms_ = window_size_ms;
+  return current_rate_.SetWindowSize(window_size_ms,
+                                     clock_->TimeInMilliseconds());
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/rate_limiter.h b/rtc_base/rate_limiter.h
new file mode 100644
index 0000000..0bfde0d
--- /dev/null
+++ b/rtc_base/rate_limiter.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RATE_LIMITER_H_
+#define RTC_BASE_RATE_LIMITER_H_
+
+#include <limits>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/rate_statistics.h"
+
+namespace webrtc {
+
+class Clock;
+
+// Class used to limit a bitrate, making sure the average does not exceed a
+// maximum as measured over a sliding window. This class is thread safe; all
+// methods will acquire (the same) lock befeore executing.
+class RateLimiter {
+ public:
+  RateLimiter(const Clock* clock, int64_t max_window_ms);
+  ~RateLimiter();
+
+  // Try to use rate to send bytes. Returns true on success and if so updates
+  // current rate.
+  bool TryUseRate(size_t packet_size_bytes);
+
+  // Set the maximum bitrate, in bps, that this limiter allows to send.
+  void SetMaxRate(uint32_t max_rate_bps);
+
+  // Set the window size over which to measure the current bitrate.
+  // For example, irt retransmissions, this is typically the RTT.
+  // Returns true on success and false if window_size_ms is out of range.
+  bool SetWindowSize(int64_t window_size_ms);
+
+ private:
+  const Clock* const clock_;
+  rtc::CriticalSection lock_;
+  RateStatistics current_rate_ RTC_GUARDED_BY(lock_);
+  int64_t window_size_ms_ RTC_GUARDED_BY(lock_);
+  uint32_t max_rate_bps_ RTC_GUARDED_BY(lock_);
+
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RateLimiter);
+};
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_RATE_LIMITER_H_
diff --git a/rtc_base/rate_limiter_unittest.cc b/rtc_base/rate_limiter_unittest.cc
new file mode 100644
index 0000000..6efea54
--- /dev/null
+++ b/rtc_base/rate_limiter_unittest.cc
@@ -0,0 +1,203 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <memory>
+
+#include "rtc_base/event.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/rate_limiter.h"
+#include "rtc_base/task_queue.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+class RateLimitTest : public ::testing::Test {
+ public:
+  RateLimitTest()
+      : clock_(0), rate_limiter(new RateLimiter(&clock_, kWindowSizeMs)) {}
+  ~RateLimitTest() override {}
+
+  void SetUp() override { rate_limiter->SetMaxRate(kMaxRateBps); }
+
+ protected:
+  static constexpr int64_t kWindowSizeMs = 1000;
+  static constexpr uint32_t kMaxRateBps = 100000;
+  // Bytes needed to completely saturate the rate limiter.
+  static constexpr size_t kRateFillingBytes =
+      (kMaxRateBps * kWindowSizeMs) / (8 * 1000);
+  SimulatedClock clock_;
+  std::unique_ptr<RateLimiter> rate_limiter;
+};
+
+TEST_F(RateLimitTest, IncreasingMaxRate) {
+  // Fill rate, extend window to full size.
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+  clock_.AdvanceTimeMilliseconds(kWindowSizeMs - 1);
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+
+  // All rate consumed.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+
+  // Double the available rate and fill that too.
+  rate_limiter->SetMaxRate(kMaxRateBps * 2);
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes));
+
+  // All rate consumed again.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+}
+
+TEST_F(RateLimitTest, DecreasingMaxRate) {
+  // Fill rate, extend window to full size.
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+  clock_.AdvanceTimeMilliseconds(kWindowSizeMs - 1);
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+
+  // All rate consumed.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+
+  // Halve the available rate and move window so half of the data falls out.
+  rate_limiter->SetMaxRate(kMaxRateBps / 2);
+  clock_.AdvanceTimeMilliseconds(1);
+
+  // All rate still consumed.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+}
+
+TEST_F(RateLimitTest, ChangingWindowSize) {
+  // Fill rate, extend window to full size.
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+  clock_.AdvanceTimeMilliseconds(kWindowSizeMs - 1);
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+
+  // All rate consumed.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+
+  // Decrease window size so half of the data falls out.
+  rate_limiter->SetWindowSize(kWindowSizeMs / 2);
+  // Average rate should still be the same, so rate is still all consumed.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+
+  // Increase window size again. Now the rate is only half used (removed data
+  // points don't come back to life).
+  rate_limiter->SetWindowSize(kWindowSizeMs);
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes / 2));
+
+  // All rate consumed again.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+}
+
+TEST_F(RateLimitTest, SingleUsageAlwaysOk) {
+  // Using more bytes than can fit in a window is OK for a single packet.
+  EXPECT_TRUE(rate_limiter->TryUseRate(kRateFillingBytes + 1));
+}
+
+TEST_F(RateLimitTest, WindowSizeLimits) {
+  EXPECT_TRUE(rate_limiter->SetWindowSize(1));
+  EXPECT_FALSE(rate_limiter->SetWindowSize(0));
+  EXPECT_TRUE(rate_limiter->SetWindowSize(kWindowSizeMs));
+  EXPECT_FALSE(rate_limiter->SetWindowSize(kWindowSizeMs + 1));
+}
+
+static const int64_t kMaxTimeoutMs = 30000;
+
+class ThreadTask {
+ public:
+  explicit ThreadTask(RateLimiter* rate_limiter)
+      : rate_limiter_(rate_limiter),
+        start_signal_(false, false),
+        end_signal_(false, false) {}
+  virtual ~ThreadTask() {}
+
+  void Run() {
+    start_signal_.Wait(kMaxTimeoutMs);
+    DoRun();
+    end_signal_.Set();
+  }
+
+  virtual void DoRun() = 0;
+
+  RateLimiter* const rate_limiter_;
+  rtc::Event start_signal_;
+  rtc::Event end_signal_;
+};
+
+void RunTask(void* thread_task) {
+  reinterpret_cast<ThreadTask*>(thread_task)->Run();
+}
+
+TEST_F(RateLimitTest, MultiThreadedUsage) {
+  // Simple sanity test, with different threads calling the various methods.
+  // Runs a few simple tasks, each on its own thread, but coordinated with
+  // events so that they run in a serialized order. Intended to catch data
+  // races when run with tsan et al.
+
+  // Half window size, double rate -> same amount of bytes needed to fill rate.
+
+  class SetWindowSizeTask : public ThreadTask {
+   public:
+    explicit SetWindowSizeTask(RateLimiter* rate_limiter)
+        : ThreadTask(rate_limiter) {}
+    ~SetWindowSizeTask() override {}
+
+    void DoRun() override {
+      EXPECT_TRUE(rate_limiter_->SetWindowSize(kWindowSizeMs / 2));
+    }
+  } set_window_size_task(rate_limiter.get());
+  rtc::PlatformThread thread1(RunTask, &set_window_size_task, "Thread1");
+  thread1.Start();
+
+  class SetMaxRateTask : public ThreadTask {
+   public:
+    explicit SetMaxRateTask(RateLimiter* rate_limiter)
+        : ThreadTask(rate_limiter) {}
+    ~SetMaxRateTask() override {}
+
+    void DoRun() override { rate_limiter_->SetMaxRate(kMaxRateBps * 2); }
+  } set_max_rate_task(rate_limiter.get());
+  rtc::PlatformThread thread2(RunTask, &set_max_rate_task, "Thread2");
+  thread2.Start();
+
+  class UseRateTask : public ThreadTask {
+   public:
+    UseRateTask(RateLimiter* rate_limiter, SimulatedClock* clock)
+        : ThreadTask(rate_limiter), clock_(clock) {}
+    ~UseRateTask() override {}
+
+    void DoRun() override {
+      EXPECT_TRUE(rate_limiter_->TryUseRate(kRateFillingBytes / 2));
+      clock_->AdvanceTimeMilliseconds((kWindowSizeMs / 2) - 1);
+      EXPECT_TRUE(rate_limiter_->TryUseRate(kRateFillingBytes / 2));
+    }
+
+    SimulatedClock* const clock_;
+  } use_rate_task(rate_limiter.get(), &clock_);
+  rtc::PlatformThread thread3(RunTask, &use_rate_task, "Thread3");
+  thread3.Start();
+
+  set_window_size_task.start_signal_.Set();
+  EXPECT_TRUE(set_window_size_task.end_signal_.Wait(kMaxTimeoutMs));
+
+  set_max_rate_task.start_signal_.Set();
+  EXPECT_TRUE(set_max_rate_task.end_signal_.Wait(kMaxTimeoutMs));
+
+  use_rate_task.start_signal_.Set();
+  EXPECT_TRUE(use_rate_task.end_signal_.Wait(kMaxTimeoutMs));
+
+  // All rate consumed.
+  EXPECT_FALSE(rate_limiter->TryUseRate(1));
+
+  thread1.Stop();
+  thread2.Stop();
+  thread3.Stop();
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/rate_statistics.cc b/rtc_base/rate_statistics.cc
new file mode 100644
index 0000000..e2d6b11
--- /dev/null
+++ b/rtc_base/rate_statistics.cc
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/rate_statistics.h"
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+RateStatistics::RateStatistics(int64_t window_size_ms, float scale)
+    : buckets_(new Bucket[window_size_ms]()),
+      accumulated_count_(0),
+      num_samples_(0),
+      oldest_time_(-window_size_ms),
+      oldest_index_(0),
+      scale_(scale),
+      max_window_size_ms_(window_size_ms),
+      current_window_size_ms_(max_window_size_ms_) {}
+
+RateStatistics::~RateStatistics() {}
+
+void RateStatistics::Reset() {
+  accumulated_count_ = 0;
+  num_samples_ = 0;
+  oldest_time_ = -max_window_size_ms_;
+  oldest_index_ = 0;
+  current_window_size_ms_ = max_window_size_ms_;
+  for (int64_t i = 0; i < max_window_size_ms_; i++)
+    buckets_[i] = Bucket();
+}
+
+void RateStatistics::Update(size_t count, int64_t now_ms) {
+  if (now_ms < oldest_time_) {
+    // Too old data is ignored.
+    return;
+  }
+
+  EraseOld(now_ms);
+
+  // First ever sample, reset window to start now.
+  if (!IsInitialized())
+    oldest_time_ = now_ms;
+
+  uint32_t now_offset = static_cast<uint32_t>(now_ms - oldest_time_);
+  RTC_DCHECK_LT(now_offset, max_window_size_ms_);
+  uint32_t index = oldest_index_ + now_offset;
+  if (index >= max_window_size_ms_)
+    index -= max_window_size_ms_;
+  buckets_[index].sum += count;
+  ++buckets_[index].samples;
+  accumulated_count_ += count;
+  ++num_samples_;
+}
+
+rtc::Optional<uint32_t> RateStatistics::Rate(int64_t now_ms) const {
+  // Yeah, this const_cast ain't pretty, but the alternative is to declare most
+  // of the members as mutable...
+  const_cast<RateStatistics*>(this)->EraseOld(now_ms);
+
+  // If window is a single bucket or there is only one sample in a data set that
+  // has not grown to the full window size, treat this as rate unavailable.
+  int64_t active_window_size = now_ms - oldest_time_ + 1;
+  if (num_samples_ == 0 || active_window_size <= 1 ||
+      (num_samples_ <= 1 && active_window_size < current_window_size_ms_)) {
+    return rtc::nullopt;
+  }
+
+  float scale = scale_ / active_window_size;
+  return static_cast<uint32_t>(accumulated_count_ * scale + 0.5f);
+}
+
+void RateStatistics::EraseOld(int64_t now_ms) {
+  if (!IsInitialized())
+    return;
+
+  // New oldest time that is included in data set.
+  int64_t new_oldest_time = now_ms - current_window_size_ms_ + 1;
+
+  // New oldest time is older than the current one, no need to cull data.
+  if (new_oldest_time <= oldest_time_)
+    return;
+
+  // Loop over buckets and remove too old data points.
+  while (num_samples_ > 0 && oldest_time_ < new_oldest_time) {
+    const Bucket& oldest_bucket = buckets_[oldest_index_];
+    RTC_DCHECK_GE(accumulated_count_, oldest_bucket.sum);
+    RTC_DCHECK_GE(num_samples_, oldest_bucket.samples);
+    accumulated_count_ -= oldest_bucket.sum;
+    num_samples_ -= oldest_bucket.samples;
+    buckets_[oldest_index_] = Bucket();
+    if (++oldest_index_ >= max_window_size_ms_)
+      oldest_index_ = 0;
+    ++oldest_time_;
+  }
+  oldest_time_ = new_oldest_time;
+}
+
+bool RateStatistics::SetWindowSize(int64_t window_size_ms, int64_t now_ms) {
+  if (window_size_ms <= 0 || window_size_ms > max_window_size_ms_)
+    return false;
+
+  current_window_size_ms_ = window_size_ms;
+  EraseOld(now_ms);
+  return true;
+}
+
+bool RateStatistics::IsInitialized() const {
+  return oldest_time_ != -max_window_size_ms_;
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/rate_statistics.h b/rtc_base/rate_statistics.h
new file mode 100644
index 0000000..aa28529
--- /dev/null
+++ b/rtc_base/rate_statistics.h
@@ -0,0 +1,84 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RATE_STATISTICS_H_
+#define RTC_BASE_RATE_STATISTICS_H_
+
+#include <memory>
+
+#include "api/optional.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class RateStatistics {
+ public:
+  static constexpr float kBpsScale = 8000.0f;
+
+  // max_window_size_ms = Maximum window size in ms for the rate estimation.
+  //                      Initial window size is set to this, but may be changed
+  //                      to something lower by calling SetWindowSize().
+  // scale = coefficient to convert counts/ms to desired unit
+  //         ex: kBpsScale (8000) for bits/s if count represents bytes.
+  RateStatistics(int64_t max_window_size_ms, float scale);
+  ~RateStatistics();
+
+  // Reset instance to original state.
+  void Reset();
+
+  // Update rate with a new data point, moving averaging window as needed.
+  void Update(size_t count, int64_t now_ms);
+
+  // Note that despite this being a const method, it still updates the internal
+  // state (moves averaging window), but it doesn't make any alterations that
+  // are observable from the other methods, as long as supplied timestamps are
+  // from a monotonic clock. Ie, it doesn't matter if this call moves the
+  // window, since any subsequent call to Update or Rate would still have moved
+  // the window as much or more.
+  rtc::Optional<uint32_t> Rate(int64_t now_ms) const;
+
+  // Update the size of the averaging window. The maximum allowed value for
+  // window_size_ms is max_window_size_ms as supplied in the constructor.
+  bool SetWindowSize(int64_t window_size_ms, int64_t now_ms);
+
+ private:
+  void EraseOld(int64_t now_ms);
+  bool IsInitialized() const;
+
+  // Counters are kept in buckets (circular buffer), with one bucket
+  // per millisecond.
+  struct Bucket {
+    size_t sum;      // Sum of all samples in this bucket.
+    size_t samples;  // Number of samples in this bucket.
+  };
+  std::unique_ptr<Bucket[]> buckets_;
+
+  // Total count recorded in buckets.
+  size_t accumulated_count_;
+
+  // The total number of samples in the buckets.
+  size_t num_samples_;
+
+  // Oldest time recorded in buckets.
+  int64_t oldest_time_;
+
+  // Bucket index of oldest counter recorded in buckets.
+  uint32_t oldest_index_;
+
+  // To convert counts/ms to desired units
+  const float scale_;
+
+  // The window sizes, in ms, over which the rate is calculated.
+  const int64_t max_window_size_ms_;
+  int64_t current_window_size_ms_;
+};
+}  // namespace webrtc
+
+#endif  // RTC_BASE_RATE_STATISTICS_H_
diff --git a/rtc_base/rate_statistics_unittest.cc b/rtc_base/rate_statistics_unittest.cc
new file mode 100644
index 0000000..51249b1
--- /dev/null
+++ b/rtc_base/rate_statistics_unittest.cc
@@ -0,0 +1,280 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+
+#include "rtc_base/rate_statistics.h"
+#include "test/gtest.h"
+
+namespace {
+
+using webrtc::RateStatistics;
+
+const int64_t kWindowMs = 500;
+
+class RateStatisticsTest : public ::testing::Test {
+ protected:
+  RateStatisticsTest() : stats_(kWindowMs, 8000) {}
+  RateStatistics stats_;
+};
+
+TEST_F(RateStatisticsTest, TestStrictMode) {
+  int64_t now_ms = 0;
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  const uint32_t kPacketSize = 1500u;
+  const uint32_t kExpectedRateBps = kPacketSize * 1000 * 8;
+
+  // Single data point is not enough for valid estimate.
+  stats_.Update(kPacketSize, now_ms++);
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  // Expecting 1200 kbps since the window is initially kept small and grows as
+  // we have more data.
+  stats_.Update(kPacketSize, now_ms);
+  EXPECT_EQ(kExpectedRateBps, *stats_.Rate(now_ms));
+
+  stats_.Reset();
+  // Expecting 0 after init.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  const int kInterval = 10;
+  for (int i = 0; i < 100000; ++i) {
+    if (i % kInterval == 0)
+      stats_.Update(kPacketSize, now_ms);
+
+    // Approximately 1200 kbps expected. Not exact since when packets
+    // are removed we will jump 10 ms to the next packet.
+    if (i > kInterval) {
+      rtc::Optional<uint32_t> rate = stats_.Rate(now_ms);
+      EXPECT_TRUE(static_cast<bool>(rate));
+      uint32_t samples = i / kInterval + 1;
+      uint64_t total_bits = samples * kPacketSize * 8;
+      uint32_t rate_bps = static_cast<uint32_t>((1000 * total_bits) / (i + 1));
+      EXPECT_NEAR(rate_bps, *rate, 22000u);
+    }
+    now_ms += 1;
+  }
+  now_ms += kWindowMs;
+  // The window is 2 seconds. If nothing has been received for that time
+  // the estimate should be 0.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+}
+
+TEST_F(RateStatisticsTest, IncreasingThenDecreasingBitrate) {
+  int64_t now_ms = 0;
+  stats_.Reset();
+  // Expecting 0 after init.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  stats_.Update(1000, ++now_ms);
+  const uint32_t kExpectedBitrate = 8000000;
+  // 1000 bytes per millisecond until plateau is reached.
+  int prev_error = kExpectedBitrate;
+  rtc::Optional<uint32_t> bitrate;
+  while (++now_ms < 10000) {
+    stats_.Update(1000, now_ms);
+    bitrate = stats_.Rate(now_ms);
+    EXPECT_TRUE(static_cast<bool>(bitrate));
+    int error = kExpectedBitrate - *bitrate;
+    error = std::abs(error);
+    // Expect the estimation error to decrease as the window is extended.
+    EXPECT_LE(error, prev_error + 1);
+    prev_error = error;
+  }
+  // Window filled, expect to be close to 8000000.
+  EXPECT_EQ(kExpectedBitrate, *bitrate);
+
+  // 1000 bytes per millisecond until 10-second mark, 8000 kbps expected.
+  while (++now_ms < 10000) {
+    stats_.Update(1000, now_ms);
+    bitrate = stats_.Rate(now_ms);
+    EXPECT_EQ(kExpectedBitrate, *bitrate);
+  }
+
+  // Zero bytes per millisecond until 0 is reached.
+  while (++now_ms < 20000) {
+    stats_.Update(0, now_ms);
+    rtc::Optional<uint32_t> new_bitrate = stats_.Rate(now_ms);
+    if (static_cast<bool>(new_bitrate) && *new_bitrate != *bitrate) {
+      // New bitrate must be lower than previous one.
+      EXPECT_LT(*new_bitrate, *bitrate);
+    } else {
+      // 0 kbps expected.
+      EXPECT_EQ(0u, *new_bitrate);
+      break;
+    }
+    bitrate = new_bitrate;
+  }
+
+  // Zero bytes per millisecond until 20-second mark, 0 kbps expected.
+  while (++now_ms < 20000) {
+    stats_.Update(0, now_ms);
+    EXPECT_EQ(0u, *stats_.Rate(now_ms));
+  }
+}
+
+TEST_F(RateStatisticsTest, ResetAfterSilence) {
+  int64_t now_ms = 0;
+  stats_.Reset();
+  // Expecting 0 after init.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  const uint32_t kExpectedBitrate = 8000000;
+  // 1000 bytes per millisecond until the window has been filled.
+  int prev_error = kExpectedBitrate;
+  rtc::Optional<uint32_t> bitrate;
+  while (++now_ms < 10000) {
+    stats_.Update(1000, now_ms);
+    bitrate = stats_.Rate(now_ms);
+    if (bitrate) {
+      int error = kExpectedBitrate - *bitrate;
+      error = std::abs(error);
+      // Expect the estimation error to decrease as the window is extended.
+      EXPECT_LE(error, prev_error + 1);
+      prev_error = error;
+    }
+  }
+  // Window filled, expect to be close to 8000000.
+  EXPECT_EQ(kExpectedBitrate, *bitrate);
+
+  now_ms += kWindowMs + 1;
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+  stats_.Update(1000, now_ms);
+  ++now_ms;
+  stats_.Update(1000, now_ms);
+  // We expect two samples of 1000 bytes, and that the bitrate is measured over
+  // 500 ms, i.e. 2 * 8 * 1000 / 0.500 = 32000.
+  EXPECT_EQ(32000u, *stats_.Rate(now_ms));
+
+  // Reset, add the same samples again.
+  stats_.Reset();
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+  stats_.Update(1000, now_ms);
+  ++now_ms;
+  stats_.Update(1000, now_ms);
+  // We expect two samples of 1000 bytes, and that the bitrate is measured over
+  // 2 ms (window size has been reset) i.e. 2 * 8 * 1000 / 0.002 = 8000000.
+  EXPECT_EQ(kExpectedBitrate, *stats_.Rate(now_ms));
+}
+
+TEST_F(RateStatisticsTest, HandlesChangingWindowSize) {
+  int64_t now_ms = 0;
+  stats_.Reset();
+
+  // Sanity test window size.
+  EXPECT_TRUE(stats_.SetWindowSize(kWindowMs, now_ms));
+  EXPECT_FALSE(stats_.SetWindowSize(kWindowMs + 1, now_ms));
+  EXPECT_FALSE(stats_.SetWindowSize(0, now_ms));
+  EXPECT_TRUE(stats_.SetWindowSize(1, now_ms));
+  EXPECT_TRUE(stats_.SetWindowSize(kWindowMs, now_ms));
+
+  // Fill the buffer at a rate of 1 byte / millisecond (8 kbps).
+  const int kBatchSize = 10;
+  for (int i = 0; i <= kWindowMs; i += kBatchSize)
+    stats_.Update(kBatchSize, now_ms += kBatchSize);
+  EXPECT_EQ(static_cast<uint32_t>(8000), *stats_.Rate(now_ms));
+
+  // Halve the window size, rate should stay the same.
+  EXPECT_TRUE(stats_.SetWindowSize(kWindowMs / 2, now_ms));
+  EXPECT_EQ(static_cast<uint32_t>(8000), *stats_.Rate(now_ms));
+
+  // Double the window size again, rate should stay the same. (As the window
+  // won't actually expand until new bit and bobs fall into it.
+  EXPECT_TRUE(stats_.SetWindowSize(kWindowMs, now_ms));
+  EXPECT_EQ(static_cast<uint32_t>(8000), *stats_.Rate(now_ms));
+
+  // Fill the now empty half with bits it twice the rate.
+  for (int i = 0; i < kWindowMs / 2; i += kBatchSize)
+    stats_.Update(kBatchSize * 2, now_ms += kBatchSize);
+
+  // Rate should have increase be 50%.
+  EXPECT_EQ(static_cast<uint32_t>((8000 * 3) / 2), *stats_.Rate(now_ms));
+}
+
+TEST_F(RateStatisticsTest, RespectsWindowSizeEdges) {
+  int64_t now_ms = 0;
+  stats_.Reset();
+  // Expecting 0 after init.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  // One byte per ms, using one big sample.
+  stats_.Update(kWindowMs, now_ms);
+  now_ms += kWindowMs - 2;
+  // Shouldn't work! (Only one sample, not full window size.)
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  // Window size should be full, and the single data point should be accepted.
+  ++now_ms;
+  rtc::Optional<uint32_t> bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(1000 * 8u, *bitrate);
+
+  // Add another, now we have twice the bitrate.
+  stats_.Update(kWindowMs, now_ms);
+  bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(2 * 1000 * 8u, *bitrate);
+
+  // Now that first sample should drop out...
+  now_ms += 1;
+  bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(1000 * 8u, *bitrate);
+}
+
+TEST_F(RateStatisticsTest, HandlesZeroCounts) {
+  int64_t now_ms = 0;
+  stats_.Reset();
+  // Expecting 0 after init.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  stats_.Update(kWindowMs, now_ms);
+  now_ms += kWindowMs - 1;
+  stats_.Update(0, now_ms);
+  rtc::Optional<uint32_t> bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(1000 * 8u, *bitrate);
+
+  // Move window along so first data point falls out.
+  ++now_ms;
+  bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(0u, *bitrate);
+
+  // Move window so last data point falls out.
+  now_ms += kWindowMs;
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+}
+
+TEST_F(RateStatisticsTest, HandlesQuietPeriods) {
+  int64_t now_ms = 0;
+  stats_.Reset();
+  // Expecting 0 after init.
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  stats_.Update(0, now_ms);
+  now_ms += kWindowMs - 1;
+  rtc::Optional<uint32_t> bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(0u, *bitrate);
+
+  // Move window along so first data point falls out.
+  ++now_ms;
+  EXPECT_FALSE(static_cast<bool>(stats_.Rate(now_ms)));
+
+  // Move window a long way out.
+  now_ms += 2 * kWindowMs;
+  stats_.Update(0, now_ms);
+  bitrate = stats_.Rate(now_ms);
+  EXPECT_TRUE(static_cast<bool>(bitrate));
+  EXPECT_EQ(0u, *bitrate);
+}
+}  // namespace
diff --git a/rtc_base/ratelimiter.h b/rtc_base/ratelimiter.h
new file mode 100644
index 0000000..8aa84aa
--- /dev/null
+++ b/rtc_base/ratelimiter.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RATELIMITER_H_
+#define RTC_BASE_RATELIMITER_H_
+
+#include "rtc_base/data_rate_limiter.h"
+
+namespace rtc {
+// Deprecated, use DataRateLimiter instead
+class RateLimiter : public DataRateLimiter {
+ public:
+  using DataRateLimiter::DataRateLimiter;
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_RATELIMITER_H_
diff --git a/rtc_base/ratetracker.cc b/rtc_base/ratetracker.cc
new file mode 100644
index 0000000..68bd8a1
--- /dev/null
+++ b/rtc_base/ratetracker.cc
@@ -0,0 +1,154 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/ratetracker.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+static const int64_t kTimeUnset = -1;
+
+RateTracker::RateTracker(int64_t bucket_milliseconds, size_t bucket_count)
+    : bucket_milliseconds_(bucket_milliseconds),
+      bucket_count_(bucket_count),
+      sample_buckets_(new size_t[bucket_count + 1]),
+      total_sample_count_(0u),
+      bucket_start_time_milliseconds_(kTimeUnset) {
+  RTC_CHECK(bucket_milliseconds > 0);
+  RTC_CHECK(bucket_count > 0);
+}
+
+RateTracker::~RateTracker() {
+  delete[] sample_buckets_;
+}
+
+double RateTracker::ComputeRateForInterval(
+    int64_t interval_milliseconds) const {
+  if (bucket_start_time_milliseconds_ == kTimeUnset) {
+    return 0.0;
+  }
+  int64_t current_time = Time();
+  // Calculate which buckets to sum up given the current time.  If the time
+  // has passed to a new bucket then we have to skip some of the oldest buckets.
+  int64_t available_interval_milliseconds =
+      std::min(interval_milliseconds,
+               bucket_milliseconds_ * static_cast<int64_t>(bucket_count_));
+  // number of old buckets (i.e. after the current bucket in the ring buffer)
+  // that are expired given our current time interval.
+  size_t buckets_to_skip;
+  // Number of milliseconds of the first bucket that are not a portion of the
+  // current interval.
+  int64_t milliseconds_to_skip;
+  if (current_time >
+      initialization_time_milliseconds_ + available_interval_milliseconds) {
+    int64_t time_to_skip =
+        current_time - bucket_start_time_milliseconds_ +
+        static_cast<int64_t>(bucket_count_) * bucket_milliseconds_ -
+        available_interval_milliseconds;
+    buckets_to_skip = time_to_skip / bucket_milliseconds_;
+    milliseconds_to_skip = time_to_skip % bucket_milliseconds_;
+  } else {
+    buckets_to_skip = bucket_count_ - current_bucket_;
+    milliseconds_to_skip = 0;
+    available_interval_milliseconds =
+        TimeDiff(current_time, initialization_time_milliseconds_);
+    // Let one bucket interval pass after initialization before reporting.
+    if (available_interval_milliseconds < bucket_milliseconds_) {
+      return 0.0;
+    }
+  }
+  // If we're skipping all buckets that means that there have been no samples
+  // within the sampling interval so report 0.
+  if (buckets_to_skip > bucket_count_ || available_interval_milliseconds == 0) {
+    return 0.0;
+  }
+  size_t start_bucket = NextBucketIndex(current_bucket_ + buckets_to_skip);
+  // Only count a portion of the first bucket according to how much of the
+  // first bucket is within the current interval.
+  size_t total_samples = ((sample_buckets_[start_bucket] *
+      (bucket_milliseconds_ - milliseconds_to_skip)) +
+      (bucket_milliseconds_ >> 1)) /
+      bucket_milliseconds_;
+  // All other buckets in the interval are counted in their entirety.
+  for (size_t i = NextBucketIndex(start_bucket);
+      i != NextBucketIndex(current_bucket_);
+      i = NextBucketIndex(i)) {
+    total_samples += sample_buckets_[i];
+  }
+  // Convert to samples per second.
+  return static_cast<double>(total_samples * 1000) /
+         static_cast<double>(available_interval_milliseconds);
+}
+
+double RateTracker::ComputeTotalRate() const {
+  if (bucket_start_time_milliseconds_ == kTimeUnset) {
+    return 0.0;
+  }
+  int64_t current_time = Time();
+  if (current_time <= initialization_time_milliseconds_) {
+    return 0.0;
+  }
+  return static_cast<double>(total_sample_count_ * 1000) /
+         static_cast<double>(
+             TimeDiff(current_time, initialization_time_milliseconds_));
+}
+
+size_t RateTracker::TotalSampleCount() const {
+  return total_sample_count_;
+}
+
+void RateTracker::AddSamples(size_t sample_count) {
+  EnsureInitialized();
+  int64_t current_time = Time();
+  // Advance the current bucket as needed for the current time, and reset
+  // bucket counts as we advance.
+  for (size_t i = 0;
+       i <= bucket_count_ &&
+       current_time >= bucket_start_time_milliseconds_ + bucket_milliseconds_;
+       ++i) {
+    bucket_start_time_milliseconds_ += bucket_milliseconds_;
+    current_bucket_ = NextBucketIndex(current_bucket_);
+    sample_buckets_[current_bucket_] = 0;
+  }
+  // Ensure that bucket_start_time_milliseconds_ is updated appropriately if
+  // the entire buffer of samples has been expired.
+  bucket_start_time_milliseconds_ += bucket_milliseconds_ *
+      ((current_time - bucket_start_time_milliseconds_) / bucket_milliseconds_);
+  // Add all samples in the bucket that includes the current time.
+  sample_buckets_[current_bucket_] += sample_count;
+  total_sample_count_ += sample_count;
+}
+
+int64_t RateTracker::Time() const {
+  return rtc::TimeMillis();
+}
+
+void RateTracker::EnsureInitialized() {
+  if (bucket_start_time_milliseconds_ == kTimeUnset) {
+    initialization_time_milliseconds_ = Time();
+    bucket_start_time_milliseconds_ = initialization_time_milliseconds_;
+    current_bucket_ = 0;
+    // We only need to initialize the first bucket because we reset buckets when
+    // current_bucket_ increments.
+    sample_buckets_[current_bucket_] = 0;
+  }
+}
+
+size_t RateTracker::NextBucketIndex(size_t bucket_index) const {
+  return (bucket_index + 1u) % (bucket_count_ + 1u);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/ratetracker.h b/rtc_base/ratetracker.h
new file mode 100644
index 0000000..05eccff
--- /dev/null
+++ b/rtc_base/ratetracker.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RATETRACKER_H_
+#define RTC_BASE_RATETRACKER_H_
+
+#include <stdlib.h>
+#include "rtc_base/basictypes.h"
+
+namespace rtc {
+
+// Computes units per second over a given interval by tracking the units over
+// each bucket of a given size and calculating the instantaneous rate assuming
+// that over each bucket the rate was constant.
+class RateTracker {
+ public:
+  RateTracker(int64_t bucket_milliseconds, size_t bucket_count);
+  virtual ~RateTracker();
+
+  // Computes the average rate over the most recent interval_milliseconds,
+  // or if the first sample was added within this period, computes the rate
+  // since the first sample was added.
+  double ComputeRateForInterval(int64_t interval_milliseconds) const;
+
+  // Computes the average rate over the rate tracker's recording interval
+  // of bucket_milliseconds * bucket_count.
+  double ComputeRate() const {
+    return ComputeRateForInterval(bucket_milliseconds_ *
+                                  static_cast<int64_t>(bucket_count_));
+  }
+
+  // Computes the average rate since the first sample was added to the
+  // rate tracker.
+  double ComputeTotalRate() const;
+
+  // The total number of samples added.
+  size_t TotalSampleCount() const;
+
+  // Reads the current time in order to determine the appropriate bucket for
+  // these samples, and increments the count for that bucket by sample_count.
+  void AddSamples(size_t sample_count);
+
+ protected:
+  // overrideable for tests
+  virtual int64_t Time() const;
+
+ private:
+  void EnsureInitialized();
+  size_t NextBucketIndex(size_t bucket_index) const;
+
+  const int64_t bucket_milliseconds_;
+  const size_t bucket_count_;
+  size_t* sample_buckets_;
+  size_t total_sample_count_;
+  size_t current_bucket_;
+  int64_t bucket_start_time_milliseconds_;
+  int64_t initialization_time_milliseconds_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_RATETRACKER_H_
diff --git a/rtc_base/ratetracker_unittest.cc b/rtc_base/ratetracker_unittest.cc
new file mode 100644
index 0000000..cd288a3
--- /dev/null
+++ b/rtc_base/ratetracker_unittest.cc
@@ -0,0 +1,168 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/ratetracker.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+namespace {
+  const uint32_t kBucketIntervalMs = 100;
+}  // namespace
+
+class RateTrackerForTest : public RateTracker {
+ public:
+  RateTrackerForTest() : RateTracker(kBucketIntervalMs, 10u), time_(0) {}
+  int64_t Time() const override { return time_; }
+  void AdvanceTime(int delta) { time_ += delta; }
+
+ private:
+  int64_t time_;
+};
+
+TEST(RateTrackerTest, Test30FPS) {
+  RateTrackerForTest tracker;
+
+  for (int i = 0; i < 300; ++i) {
+    tracker.AddSamples(1);
+    tracker.AdvanceTime(33);
+    if (i % 3 == 0) {
+      tracker.AdvanceTime(1);
+    }
+  }
+  EXPECT_DOUBLE_EQ(30.0, tracker.ComputeRateForInterval(50000));
+}
+
+TEST(RateTrackerTest, Test60FPS) {
+  RateTrackerForTest tracker;
+
+  for (int i = 0; i < 300; ++i) {
+    tracker.AddSamples(1);
+    tracker.AdvanceTime(16);
+    if (i % 3 != 0) {
+      tracker.AdvanceTime(1);
+    }
+  }
+  EXPECT_DOUBLE_EQ(60.0, tracker.ComputeRateForInterval(1000));
+}
+
+TEST(RateTrackerTest, TestRateTrackerBasics) {
+  RateTrackerForTest tracker;
+  EXPECT_DOUBLE_EQ(0.0, tracker.ComputeRateForInterval(1000));
+
+  // Add a sample.
+  tracker.AddSamples(1234);
+  // Advance the clock by less than one bucket interval (no rate returned).
+  tracker.AdvanceTime(kBucketIntervalMs - 1);
+  EXPECT_DOUBLE_EQ(0.0, tracker.ComputeRate());
+  // Advance the clock by 100 ms (one bucket interval).
+  tracker.AdvanceTime(1);
+  EXPECT_DOUBLE_EQ(12340.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(12340.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ(12340.0, tracker.ComputeTotalRate());
+
+  // Repeat.
+  tracker.AddSamples(1234);
+  tracker.AdvanceTime(100);
+  EXPECT_DOUBLE_EQ(12340.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(12340.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U * 2, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ(12340.0, tracker.ComputeTotalRate());
+
+  // Advance the clock by 800 ms, so we've elapsed a full second.
+  // units_second should now be filled in properly.
+  tracker.AdvanceTime(800);
+  EXPECT_DOUBLE_EQ(1234.0 * 2.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(1234.0 * 2.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U * 2, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ(1234.0 * 2.0, tracker.ComputeTotalRate());
+
+  // Poll the tracker again immediately. The reported rate should stay the same.
+  EXPECT_DOUBLE_EQ(1234.0 * 2.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(1234.0 * 2.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U * 2, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ(1234.0 * 2.0, tracker.ComputeTotalRate());
+
+  // Do nothing and advance by a second. We should drop down to zero.
+  tracker.AdvanceTime(1000);
+  EXPECT_DOUBLE_EQ(0.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(0.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U * 2, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ(1234.0, tracker.ComputeTotalRate());
+
+  // Send a bunch of data at a constant rate for 5.5 "seconds".
+  // We should report the rate properly.
+  for (int i = 0; i < 5500; i += 100) {
+    tracker.AddSamples(9876U);
+    tracker.AdvanceTime(100);
+  }
+  EXPECT_DOUBLE_EQ(9876.0 * 10.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(9876.0 * 10.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U * 2 + 9876U * 55, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ((1234.0 * 2.0 + 9876.0 * 55.0) / 7.5,
+      tracker.ComputeTotalRate());
+
+  // Advance the clock by 500 ms. Since we sent nothing over this half-second,
+  // the reported rate should be reduced by half.
+  tracker.AdvanceTime(500);
+  EXPECT_DOUBLE_EQ(9876.0 * 5.0, tracker.ComputeRateForInterval(1000));
+  EXPECT_DOUBLE_EQ(9876.0 * 5.0, tracker.ComputeRate());
+  EXPECT_EQ(1234U * 2 + 9876U * 55, tracker.TotalSampleCount());
+  EXPECT_DOUBLE_EQ((1234.0 * 2.0 + 9876.0 * 55.0) / 8.0,
+      tracker.ComputeTotalRate());
+
+  // Rate over the last half second should be zero.
+  EXPECT_DOUBLE_EQ(0.0, tracker.ComputeRateForInterval(500));
+}
+
+TEST(RateTrackerTest, TestLongPeriodBetweenSamples) {
+  RateTrackerForTest tracker;
+  tracker.AddSamples(1);
+  tracker.AdvanceTime(1000);
+  EXPECT_DOUBLE_EQ(1.0, tracker.ComputeRate());
+
+  tracker.AdvanceTime(2000);
+  EXPECT_DOUBLE_EQ(0.0, tracker.ComputeRate());
+
+  tracker.AdvanceTime(2000);
+  tracker.AddSamples(1);
+  EXPECT_DOUBLE_EQ(1.0, tracker.ComputeRate());
+}
+
+TEST(RateTrackerTest, TestRolloff) {
+  RateTrackerForTest tracker;
+  for (int i = 0; i < 10; ++i) {
+    tracker.AddSamples(1U);
+    tracker.AdvanceTime(100);
+  }
+  EXPECT_DOUBLE_EQ(10.0, tracker.ComputeRate());
+
+  for (int i = 0; i < 10; ++i) {
+    tracker.AddSamples(1U);
+    tracker.AdvanceTime(50);
+  }
+  EXPECT_DOUBLE_EQ(15.0, tracker.ComputeRate());
+  EXPECT_DOUBLE_EQ(20.0, tracker.ComputeRateForInterval(500));
+
+  for (int i = 0; i < 10; ++i) {
+    tracker.AddSamples(1U);
+    tracker.AdvanceTime(50);
+  }
+  EXPECT_DOUBLE_EQ(20.0, tracker.ComputeRate());
+}
+
+TEST(RateTrackerTest, TestGetUnitSecondsAfterInitialValue) {
+  RateTrackerForTest tracker;
+  tracker.AddSamples(1234);
+  tracker.AdvanceTime(1000);
+  EXPECT_DOUBLE_EQ(1234.0, tracker.ComputeRateForInterval(1000));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/refcount.h b/rtc_base/refcount.h
new file mode 100644
index 0000000..fb0971c
--- /dev/null
+++ b/rtc_base/refcount.h
@@ -0,0 +1,67 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef RTC_BASE_REFCOUNT_H_
+#define RTC_BASE_REFCOUNT_H_
+
+namespace rtc {
+
+// Refcounted objects should implement the following informal interface:
+//
+// void AddRef() const ;
+// RefCountReleaseStatus Release() const;
+//
+// You may access members of a reference-counted object, including the AddRef()
+// and Release() methods, only if you already own a reference to it, or if
+// you're borrowing someone else's reference. (A newly created object is a
+// special case: the reference count is zero on construction, and the code that
+// creates the object should immediately call AddRef(), bringing the reference
+// count from zero to one, e.g., by constructing an rtc::scoped_refptr).
+//
+// AddRef() creates a new reference to the object.
+//
+// Release() releases a reference to the object; the caller now has one less
+// reference than before the call. Returns kDroppedLastRef if the number of
+// references dropped to zero because of this (in which case the object destroys
+// itself). Otherwise, returns kOtherRefsRemained, to signal that at the precise
+// time the caller's reference was dropped, other references still remained (but
+// if other threads own references, this may of course have changed by the time
+// Release() returns).
+//
+// The caller of Release() must treat it in the same way as a delete operation:
+// Regardless of the return value from Release(), the caller mustn't access the
+// object. The object might still be alive, due to references held by other
+// users of the object, but the object can go away at any time, e.g., as the
+// result of another thread calling Release().
+//
+// Calling AddRef() and Release() manually is discouraged. It's recommended to
+// use rtc::scoped_refptr to manage all pointers to reference counted objects.
+// Note that rtc::scoped_refptr depends on compile-time duck-typing; formally
+// implementing the below RefCountInterface is not required.
+
+enum class RefCountReleaseStatus { kDroppedLastRef, kOtherRefsRemained };
+
+// Interfaces where refcounting is part of the public api should
+// inherit this abstract interface. The implementation of these
+// methods is usually provided by the RefCountedObject template class,
+// applied as a leaf in the inheritance tree.
+class RefCountInterface {
+ public:
+  virtual void AddRef() const = 0;
+  virtual RefCountReleaseStatus Release() const = 0;
+
+  // Non-public destructor, because Release() has exclusive responsibility for
+  // destroying the object.
+ protected:
+  virtual ~RefCountInterface() {}
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_REFCOUNT_H_
diff --git a/rtc_base/refcountedobject.h b/rtc_base/refcountedobject.h
new file mode 100644
index 0000000..da3ed9f
--- /dev/null
+++ b/rtc_base/refcountedobject.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef RTC_BASE_REFCOUNTEDOBJECT_H_
+#define RTC_BASE_REFCOUNTEDOBJECT_H_
+
+#include <utility>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcounter.h"
+
+namespace rtc {
+
+template <class T>
+class RefCountedObject : public T {
+ public:
+  RefCountedObject() {}
+
+  template <class P0>
+  explicit RefCountedObject(P0&& p0) : T(std::forward<P0>(p0)) {}
+
+  template <class P0, class P1, class... Args>
+  RefCountedObject(P0&& p0, P1&& p1, Args&&... args)
+      : T(std::forward<P0>(p0),
+          std::forward<P1>(p1),
+          std::forward<Args>(args)...) {}
+
+  virtual void AddRef() const { ref_count_.IncRef(); }
+
+  virtual RefCountReleaseStatus Release() const {
+    const auto status = ref_count_.DecRef();
+    if (status == RefCountReleaseStatus::kDroppedLastRef) {
+      delete this;
+    }
+    return status;
+  }
+
+  // Return whether the reference count is one. If the reference count is used
+  // in the conventional way, a reference count of 1 implies that the current
+  // thread owns the reference and no other thread shares it. This call
+  // performs the test for a reference count of one, and performs the memory
+  // barrier needed for the owning thread to act on the object, knowing that it
+  // has exclusive access to the object.
+  virtual bool HasOneRef() const { return ref_count_.HasOneRef(); }
+
+ protected:
+  virtual ~RefCountedObject() {}
+
+  mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedObject);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_REFCOUNTEDOBJECT_H_
diff --git a/rtc_base/refcountedobject_unittest.cc b/rtc_base/refcountedobject_unittest.cc
new file mode 100644
index 0000000..4744525
--- /dev/null
+++ b/rtc_base/refcountedobject_unittest.cc
@@ -0,0 +1,94 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+
+namespace rtc {
+
+namespace {
+
+class A {
+ public:
+  A() {}
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(A);
+};
+
+class RefClass : public RefCountInterface {
+ public:
+  RefClass() {}
+
+ protected:
+  ~RefClass() override {}
+};
+
+class RefClassWithRvalue : public RefCountInterface {
+ public:
+  explicit RefClassWithRvalue(std::unique_ptr<A> a) : a_(std::move(a)) {}
+
+ protected:
+  ~RefClassWithRvalue() override {}
+
+ public:
+  std::unique_ptr<A> a_;
+};
+
+class RefClassWithMixedValues : public RefCountInterface {
+ public:
+  RefClassWithMixedValues(std::unique_ptr<A> a, int b, const std::string& c)
+      : a_(std::move(a)), b_(b), c_(c) {}
+
+ protected:
+  ~RefClassWithMixedValues() override {}
+
+ public:
+  std::unique_ptr<A> a_;
+  int b_;
+  std::string c_;
+};
+
+}  // namespace
+
+TEST(RefCountedObject, HasOneRef) {
+  scoped_refptr<RefCountedObject<RefClass>> aref(
+      new RefCountedObject<RefClass>());
+  EXPECT_TRUE(aref->HasOneRef());
+  aref->AddRef();
+  EXPECT_FALSE(aref->HasOneRef());
+  EXPECT_EQ(aref->Release(), RefCountReleaseStatus::kOtherRefsRemained);
+  EXPECT_TRUE(aref->HasOneRef());
+}
+
+TEST(RefCountedObject, SupportRValuesInCtor) {
+  std::unique_ptr<A> a(new A());
+  scoped_refptr<RefClassWithRvalue> ref(
+      new RefCountedObject<RefClassWithRvalue>(std::move(a)));
+  EXPECT_TRUE(ref->a_.get() != nullptr);
+  EXPECT_TRUE(a.get() == nullptr);
+}
+
+TEST(RefCountedObject, SupportMixedTypesInCtor) {
+  std::unique_ptr<A> a(new A());
+  int b = 9;
+  std::string c = "hello";
+  scoped_refptr<RefClassWithMixedValues> ref(
+      new RefCountedObject<RefClassWithMixedValues>(std::move(a), b, c));
+  EXPECT_TRUE(ref->a_.get() != nullptr);
+  EXPECT_TRUE(a.get() == nullptr);
+  EXPECT_EQ(b, ref->b_);
+  EXPECT_EQ(c, ref->c_);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/refcounter.h b/rtc_base/refcounter.h
new file mode 100644
index 0000000..baf72ae
--- /dev/null
+++ b/rtc_base/refcounter.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef RTC_BASE_REFCOUNTER_H_
+#define RTC_BASE_REFCOUNTER_H_
+
+#include "rtc_base/atomicops.h"
+#include "rtc_base/refcount.h"
+
+namespace webrtc {
+namespace webrtc_impl {
+
+class RefCounter {
+ public:
+  explicit RefCounter(int ref_count) : ref_count_(ref_count) {}
+  RefCounter() = delete;
+
+  void IncRef() { rtc::AtomicOps::Increment(&ref_count_); }
+
+  // TODO(nisse): Switch return type to RefCountReleaseStatus?
+  // Returns true if this was the last reference, and the resource protected by
+  // the reference counter can be deleted.
+  rtc::RefCountReleaseStatus DecRef() {
+    return (rtc::AtomicOps::Decrement(&ref_count_) == 0)
+        ? rtc::RefCountReleaseStatus::kDroppedLastRef
+        : rtc::RefCountReleaseStatus::kOtherRefsRemained;
+  }
+
+  // Return whether the reference count is one. If the reference count is used
+  // in the conventional way, a reference count of 1 implies that the current
+  // thread owns the reference and no other thread shares it. This call performs
+  // the test for a reference count of one, and performs the memory barrier
+  // needed for the owning thread to act on the resource protected by the
+  // reference counter, knowing that it has exclusive access.
+  bool HasOneRef() const {
+    return rtc::AtomicOps::AcquireLoad(&ref_count_) == 1;
+  }
+
+ private:
+  volatile int ref_count_;
+};
+
+}  // namespace webrtc_impl
+}  // namespace webrtc
+
+#endif  // RTC_BASE_REFCOUNTER_H_
diff --git a/rtc_base/rollingaccumulator.h b/rtc_base/rollingaccumulator.h
new file mode 100644
index 0000000..e7d5b06
--- /dev/null
+++ b/rtc_base/rollingaccumulator.h
@@ -0,0 +1,174 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ROLLINGACCUMULATOR_H_
+#define RTC_BASE_ROLLINGACCUMULATOR_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+// RollingAccumulator stores and reports statistics
+// over N most recent samples.
+//
+// T is assumed to be an int, long, double or float.
+template<typename T>
+class RollingAccumulator {
+ public:
+  explicit RollingAccumulator(size_t max_count)
+    : samples_(max_count) {
+    Reset();
+  }
+  ~RollingAccumulator() {
+  }
+
+  size_t max_count() const {
+    return samples_.size();
+  }
+
+  size_t count() const {
+    return count_;
+  }
+
+  void Reset() {
+    count_ = 0U;
+    next_index_ = 0U;
+    sum_ = 0.0;
+    sum_2_ = 0.0;
+    max_ = T();
+    max_stale_ = false;
+    min_ = T();
+    min_stale_ = false;
+  }
+
+  void AddSample(T sample) {
+    if (count_ == max_count()) {
+      // Remove oldest sample.
+      T sample_to_remove = samples_[next_index_];
+      sum_ -= sample_to_remove;
+      sum_2_ -= static_cast<double>(sample_to_remove) * sample_to_remove;
+      if (sample_to_remove >= max_) {
+        max_stale_ = true;
+      }
+      if (sample_to_remove <= min_) {
+        min_stale_ = true;
+      }
+    } else {
+      // Increase count of samples.
+      ++count_;
+    }
+    // Add new sample.
+    samples_[next_index_] = sample;
+    sum_ += sample;
+    sum_2_ += static_cast<double>(sample) * sample;
+    if (count_ == 1 || sample >= max_) {
+      max_ = sample;
+      max_stale_ = false;
+    }
+    if (count_ == 1 || sample <= min_) {
+      min_ = sample;
+      min_stale_ = false;
+    }
+    // Update next_index_.
+    next_index_ = (next_index_ + 1) % max_count();
+  }
+
+  T ComputeSum() const {
+    return static_cast<T>(sum_);
+  }
+
+  double ComputeMean() const {
+    if (count_ == 0) {
+      return 0.0;
+    }
+    return sum_ / count_;
+  }
+
+  T ComputeMax() const {
+    if (max_stale_) {
+      RTC_DCHECK(count_ > 0) <<
+                 "It shouldn't be possible for max_stale_ && count_ == 0";
+      max_ = samples_[next_index_];
+      for (size_t i = 1u; i < count_; i++) {
+        max_ = std::max(max_, samples_[(next_index_ + i) % max_count()]);
+      }
+      max_stale_ = false;
+    }
+    return max_;
+  }
+
+  T ComputeMin() const {
+    if (min_stale_) {
+      RTC_DCHECK(count_ > 0) <<
+                 "It shouldn't be possible for min_stale_ && count_ == 0";
+      min_ = samples_[next_index_];
+      for (size_t i = 1u; i < count_; i++) {
+        min_ = std::min(min_, samples_[(next_index_ + i) % max_count()]);
+      }
+      min_stale_ = false;
+    }
+    return min_;
+  }
+
+  // O(n) time complexity.
+  // Weights nth sample with weight (learning_rate)^n. Learning_rate should be
+  // between (0.0, 1.0], otherwise the non-weighted mean is returned.
+  double ComputeWeightedMean(double learning_rate) const {
+    if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) {
+      return ComputeMean();
+    }
+    double weighted_mean = 0.0;
+    double current_weight = 1.0;
+    double weight_sum = 0.0;
+    const size_t max_size = max_count();
+    for (size_t i = 0; i < count_; ++i) {
+      current_weight *= learning_rate;
+      weight_sum += current_weight;
+      // Add max_size to prevent underflow.
+      size_t index = (next_index_ + max_size - i - 1) % max_size;
+      weighted_mean += current_weight * samples_[index];
+    }
+    return weighted_mean / weight_sum;
+  }
+
+  // Compute estimated variance.  Estimation is more accurate
+  // as the number of samples grows.
+  double ComputeVariance() const {
+    if (count_ == 0) {
+      return 0.0;
+    }
+    // Var = E[x^2] - (E[x])^2
+    double count_inv = 1.0 / count_;
+    double mean_2 = sum_2_ * count_inv;
+    double mean = sum_ * count_inv;
+    return mean_2 - (mean * mean);
+  }
+
+ private:
+  size_t count_;
+  size_t next_index_;
+  double sum_;    // Sum(x) - double to avoid overflow
+  double sum_2_;  // Sum(x*x) - double to avoid overflow
+  mutable T max_;
+  mutable bool max_stale_;
+  mutable T min_;
+  mutable bool min_stale_;
+  std::vector<T> samples_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RollingAccumulator);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ROLLINGACCUMULATOR_H_
diff --git a/rtc_base/rollingaccumulator_unittest.cc b/rtc_base/rollingaccumulator_unittest.cc
new file mode 100644
index 0000000..91ed853
--- /dev/null
+++ b/rtc_base/rollingaccumulator_unittest.cc
@@ -0,0 +1,118 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/rollingaccumulator.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+namespace {
+
+const double kLearningRate = 0.5;
+
+}  // namespace
+
+TEST(RollingAccumulatorTest, ZeroSamples) {
+  RollingAccumulator<int> accum(10);
+
+  EXPECT_EQ(0U, accum.count());
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeMean());
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeVariance());
+  EXPECT_EQ(0, accum.ComputeMin());
+  EXPECT_EQ(0, accum.ComputeMax());
+}
+
+TEST(RollingAccumulatorTest, SomeSamples) {
+  RollingAccumulator<int> accum(10);
+  for (int i = 0; i < 4; ++i) {
+    accum.AddSample(i);
+  }
+
+  EXPECT_EQ(4U, accum.count());
+  EXPECT_EQ(6, accum.ComputeSum());
+  EXPECT_DOUBLE_EQ(1.5, accum.ComputeMean());
+  EXPECT_NEAR(2.26666, accum.ComputeWeightedMean(kLearningRate), 0.01);
+  EXPECT_DOUBLE_EQ(1.25, accum.ComputeVariance());
+  EXPECT_EQ(0, accum.ComputeMin());
+  EXPECT_EQ(3, accum.ComputeMax());
+}
+
+TEST(RollingAccumulatorTest, RollingSamples) {
+  RollingAccumulator<int> accum(10);
+  for (int i = 0; i < 12; ++i) {
+    accum.AddSample(i);
+  }
+
+  EXPECT_EQ(10U, accum.count());
+  EXPECT_EQ(65, accum.ComputeSum());
+  EXPECT_DOUBLE_EQ(6.5, accum.ComputeMean());
+  EXPECT_NEAR(10.0, accum.ComputeWeightedMean(kLearningRate), 0.01);
+  EXPECT_NEAR(9.0, accum.ComputeVariance(), 1.0);
+  EXPECT_EQ(2, accum.ComputeMin());
+  EXPECT_EQ(11, accum.ComputeMax());
+}
+
+TEST(RollingAccumulatorTest, ResetSamples) {
+  RollingAccumulator<int> accum(10);
+
+  for (int i = 0; i < 10; ++i) {
+    accum.AddSample(100);
+  }
+  EXPECT_EQ(10U, accum.count());
+  EXPECT_DOUBLE_EQ(100.0, accum.ComputeMean());
+  EXPECT_EQ(100, accum.ComputeMin());
+  EXPECT_EQ(100, accum.ComputeMax());
+
+  accum.Reset();
+  EXPECT_EQ(0U, accum.count());
+
+  for (int i = 0; i < 5; ++i) {
+    accum.AddSample(i);
+  }
+
+  EXPECT_EQ(5U, accum.count());
+  EXPECT_EQ(10, accum.ComputeSum());
+  EXPECT_DOUBLE_EQ(2.0, accum.ComputeMean());
+  EXPECT_EQ(0, accum.ComputeMin());
+  EXPECT_EQ(4, accum.ComputeMax());
+}
+
+TEST(RollingAccumulatorTest, RollingSamplesDouble) {
+  RollingAccumulator<double> accum(10);
+  for (int i = 0; i < 23; ++i) {
+    accum.AddSample(5 * i);
+  }
+
+  EXPECT_EQ(10u, accum.count());
+  EXPECT_DOUBLE_EQ(875.0, accum.ComputeSum());
+  EXPECT_DOUBLE_EQ(87.5, accum.ComputeMean());
+  EXPECT_NEAR(105.049, accum.ComputeWeightedMean(kLearningRate), 0.1);
+  EXPECT_NEAR(229.166667, accum.ComputeVariance(), 25);
+  EXPECT_DOUBLE_EQ(65.0, accum.ComputeMin());
+  EXPECT_DOUBLE_EQ(110.0, accum.ComputeMax());
+}
+
+TEST(RollingAccumulatorTest, ComputeWeightedMeanCornerCases) {
+  RollingAccumulator<int> accum(10);
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(kLearningRate));
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(0.0));
+  EXPECT_DOUBLE_EQ(0.0, accum.ComputeWeightedMean(1.1));
+
+  for (int i = 0; i < 8; ++i) {
+    accum.AddSample(i);
+  }
+
+  EXPECT_DOUBLE_EQ(3.5, accum.ComputeMean());
+  EXPECT_DOUBLE_EQ(3.5, accum.ComputeWeightedMean(0));
+  EXPECT_DOUBLE_EQ(3.5, accum.ComputeWeightedMean(1.1));
+  EXPECT_NEAR(6.0, accum.ComputeWeightedMean(kLearningRate), 0.1);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/rtccertificate.cc b/rtc_base/rtccertificate.cc
new file mode 100644
index 0000000..2887895
--- /dev/null
+++ b/rtc_base/rtccertificate.cc
@@ -0,0 +1,75 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/rtccertificate.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/refcountedobject.h"
+
+namespace rtc {
+
+scoped_refptr<RTCCertificate> RTCCertificate::Create(
+    std::unique_ptr<SSLIdentity> identity) {
+  return new RefCountedObject<RTCCertificate>(identity.release());
+}
+
+RTCCertificate::RTCCertificate(SSLIdentity* identity)
+    : identity_(identity) {
+  RTC_DCHECK(identity_);
+}
+
+RTCCertificate::~RTCCertificate() {
+}
+
+uint64_t RTCCertificate::Expires() const {
+  int64_t expires = ssl_certificate().CertificateExpirationTime();
+  if (expires != -1)
+    return static_cast<uint64_t>(expires) * kNumMillisecsPerSec;
+  // If the expiration time could not be retrieved return an expired timestamp.
+  return 0;  // = 1970-01-01
+}
+
+bool RTCCertificate::HasExpired(uint64_t now) const {
+  return Expires() <= now;
+}
+
+const SSLCertificate& RTCCertificate::ssl_certificate() const {
+  return identity_->certificate();
+}
+
+const SSLCertChain& RTCCertificate::ssl_cert_chain() const {
+  return identity_->cert_chain();
+}
+
+RTCCertificatePEM RTCCertificate::ToPEM() const {
+  return RTCCertificatePEM(identity_->PrivateKeyToPEMString(),
+                           ssl_certificate().ToPEMString());
+}
+
+scoped_refptr<RTCCertificate> RTCCertificate::FromPEM(
+    const RTCCertificatePEM& pem) {
+  std::unique_ptr<SSLIdentity> identity(SSLIdentity::FromPEMStrings(
+      pem.private_key(), pem.certificate()));
+  if (!identity)
+    return nullptr;
+  return new RefCountedObject<RTCCertificate>(identity.release());
+}
+
+bool RTCCertificate::operator==(const RTCCertificate& certificate) const {
+  return *this->identity_ == *certificate.identity_;
+}
+
+bool RTCCertificate::operator!=(const RTCCertificate& certificate) const {
+  return !(*this == certificate);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/rtccertificate.h b/rtc_base/rtccertificate.h
new file mode 100644
index 0000000..f13caba
--- /dev/null
+++ b/rtc_base/rtccertificate.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RTCCERTIFICATE_H_
+#define RTC_BASE_RTCCERTIFICATE_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "rtc_base/refcount.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/sslidentity.h"
+
+namespace rtc {
+
+// This class contains PEM strings of an RTCCertificate's private key and
+// certificate and acts as a text representation of RTCCertificate. Certificates
+// can be serialized and deserialized to and from this format, which allows for
+// cloning and storing of certificates to disk. The PEM format is that of
+// |SSLIdentity::PrivateKeyToPEMString| and |SSLCertificate::ToPEMString|, e.g.
+// the string representations used by OpenSSL.
+class RTCCertificatePEM {
+ public:
+  RTCCertificatePEM(
+      const std::string& private_key,
+      const std::string& certificate)
+      : private_key_(private_key),
+        certificate_(certificate) {}
+
+  const std::string& private_key() const { return private_key_; }
+  const std::string& certificate() const { return certificate_; }
+
+ private:
+  std::string private_key_;
+  std::string certificate_;
+};
+
+// A thin abstraction layer between "lower level crypto stuff" like
+// SSLCertificate and WebRTC usage. Takes ownership of some lower level objects,
+// reference counting protects these from premature destruction.
+class RTCCertificate : public RefCountInterface {
+ public:
+  // Takes ownership of |identity|.
+  static scoped_refptr<RTCCertificate> Create(
+      std::unique_ptr<SSLIdentity> identity);
+
+  // Returns the expiration time in ms relative to epoch, 1970-01-01T00:00:00Z.
+  uint64_t Expires() const;
+  // Checks if the certificate has expired, where |now| is expressed in ms
+  // relative to epoch, 1970-01-01T00:00:00Z.
+  bool HasExpired(uint64_t now) const;
+  const SSLCertificate& ssl_certificate() const;
+  const SSLCertChain& ssl_cert_chain() const;
+
+  // TODO(hbos): If possible, remove once RTCCertificate and its
+  // ssl_certificate() is used in all relevant places. Should not pass around
+  // raw SSLIdentity* for the sake of accessing SSLIdentity::certificate().
+  // However, some places might need SSLIdentity* for its public/private key...
+  SSLIdentity* identity() const { return identity_.get(); }
+
+  // To/from PEM, a text representation of the RTCCertificate.
+  RTCCertificatePEM ToPEM() const;
+  // Can return nullptr if the certificate is invalid.
+  static scoped_refptr<RTCCertificate> FromPEM(const RTCCertificatePEM& pem);
+  bool operator==(const RTCCertificate& certificate) const;
+  bool operator!=(const RTCCertificate& certificate) const;
+
+ protected:
+  explicit RTCCertificate(SSLIdentity* identity);
+  ~RTCCertificate() override;
+
+ private:
+  // The SSLIdentity is the owner of the SSLCertificate. To protect our
+  // ssl_certificate() we take ownership of |identity_|.
+  std::unique_ptr<SSLIdentity> identity_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_RTCCERTIFICATE_H_
diff --git a/rtc_base/rtccertificate_unittest.cc b/rtc_base/rtccertificate_unittest.cc
new file mode 100644
index 0000000..7252a04
--- /dev/null
+++ b/rtc_base/rtccertificate_unittest.cc
@@ -0,0 +1,142 @@
+/*
+ *  Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/fakesslidentity.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/rtccertificate.h"
+#include "rtc_base/sslidentity.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+namespace {
+
+static const char* kTestCertCommonName = "RTCCertificateTest's certificate";
+
+}  // namespace
+
+class RTCCertificateTest : public testing::Test {
+ protected:
+  scoped_refptr<RTCCertificate> GenerateECDSA() {
+    std::unique_ptr<SSLIdentity> identity(
+        SSLIdentity::Generate(kTestCertCommonName, KeyParams::ECDSA()));
+    RTC_CHECK(identity);
+    return RTCCertificate::Create(std::move(identity));
+  }
+
+  // Timestamp note:
+  //   All timestamps in this unittest are expressed in number of seconds since
+  // epoch, 1970-01-01T00:00:00Z (UTC). The RTCCertificate interface uses ms,
+  // but only seconds-precision is supported by SSLCertificate. To make the
+  // tests clearer we convert everything to seconds since the precision matters
+  // when generating certificates or comparing timestamps.
+  //   As a result, ExpiresSeconds and HasExpiredSeconds are used instead of
+  // RTCCertificate::Expires and ::HasExpired for ms -> s conversion.
+
+  uint64_t NowSeconds() const {
+    return TimeNanos() / kNumNanosecsPerSec;
+  }
+
+  uint64_t ExpiresSeconds(const scoped_refptr<RTCCertificate>& cert) const {
+    uint64_t exp_ms = cert->Expires();
+    uint64_t exp_s = exp_ms / kNumMillisecsPerSec;
+    // Make sure this did not result in loss of precision.
+    RTC_CHECK_EQ(exp_s * kNumMillisecsPerSec, exp_ms);
+    return exp_s;
+  }
+
+  bool HasExpiredSeconds(const scoped_refptr<RTCCertificate>& cert,
+                         uint64_t now_s) const {
+    return cert->HasExpired(now_s * kNumMillisecsPerSec);
+  }
+
+  // An RTC_CHECK ensures that |expires_s| this is in valid range of time_t as
+  // is required by SSLIdentityParams. On some 32-bit systems time_t is limited
+  // to < 2^31. On such systems this will fail for expiration times of year 2038
+  // or later.
+  scoped_refptr<RTCCertificate> GenerateCertificateWithExpires(
+      uint64_t expires_s) const {
+    RTC_CHECK(IsValueInRangeForNumericType<time_t>(expires_s));
+
+    SSLIdentityParams params;
+    params.common_name = kTestCertCommonName;
+    params.not_before = 0;
+    params.not_after = static_cast<time_t>(expires_s);
+    // Certificate type does not matter for our purposes, using ECDSA because it
+    // is fast to generate.
+    params.key_params = KeyParams::ECDSA();
+
+    std::unique_ptr<SSLIdentity> identity(SSLIdentity::GenerateForTest(params));
+    return RTCCertificate::Create(std::move(identity));
+  }
+};
+
+TEST_F(RTCCertificateTest, NewCertificateNotExpired) {
+  // Generate a real certificate without specifying the expiration time.
+  // Certificate type doesn't matter, using ECDSA because it's fast to generate.
+  scoped_refptr<RTCCertificate> certificate = GenerateECDSA();
+
+  uint64_t now = NowSeconds();
+  EXPECT_FALSE(HasExpiredSeconds(certificate, now));
+  // Even without specifying the expiration time we would expect it to be valid
+  // for at least half an hour.
+  EXPECT_FALSE(HasExpiredSeconds(certificate, now + 30*60));
+}
+
+TEST_F(RTCCertificateTest, UsesExpiresAskedFor) {
+  uint64_t now = NowSeconds();
+  scoped_refptr<RTCCertificate> certificate =
+      GenerateCertificateWithExpires(now);
+  EXPECT_EQ(now, ExpiresSeconds(certificate));
+}
+
+TEST_F(RTCCertificateTest, ExpiresInOneSecond) {
+  // Generate a certificate that expires in 1s.
+  uint64_t now = NowSeconds();
+  scoped_refptr<RTCCertificate> certificate =
+      GenerateCertificateWithExpires(now + 1);
+  // Now it should not have expired.
+  EXPECT_FALSE(HasExpiredSeconds(certificate, now));
+  // In 2s it should have expired.
+  EXPECT_TRUE(HasExpiredSeconds(certificate, now + 2));
+}
+
+TEST_F(RTCCertificateTest, DifferentCertificatesNotEqual) {
+  scoped_refptr<RTCCertificate> a = GenerateECDSA();
+  scoped_refptr<RTCCertificate> b = GenerateECDSA();
+  EXPECT_TRUE(*a != *b);
+}
+
+TEST_F(RTCCertificateTest, CloneWithPEMSerialization) {
+  scoped_refptr<RTCCertificate> orig = GenerateECDSA();
+
+  // To PEM.
+  RTCCertificatePEM orig_pem = orig->ToPEM();
+  // Clone from PEM.
+  scoped_refptr<RTCCertificate> clone = RTCCertificate::FromPEM(orig_pem);
+  EXPECT_TRUE(clone);
+  EXPECT_TRUE(*orig == *clone);
+  EXPECT_EQ(orig->Expires(), clone->Expires());
+}
+
+TEST_F(RTCCertificateTest, FromPEMWithInvalidPEM) {
+  RTCCertificatePEM pem("not a valid PEM", "not a valid PEM");
+  scoped_refptr<RTCCertificate> certificate = RTCCertificate::FromPEM(pem);
+  EXPECT_FALSE(certificate);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/rtccertificategenerator.cc b/rtc_base/rtccertificategenerator.cc
new file mode 100644
index 0000000..cacff61
--- /dev/null
+++ b/rtc_base/rtccertificategenerator.cc
@@ -0,0 +1,161 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/rtccertificategenerator.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/sslidentity.h"
+
+namespace rtc {
+
+namespace {
+
+// A certificates' subject and issuer name.
+const char kIdentityName[] = "WebRTC";
+
+const uint64_t kYearInSeconds = 365 * 24 * 60 * 60;
+
+enum {
+  MSG_GENERATE,
+  MSG_GENERATE_DONE,
+};
+
+// Helper class for generating certificates asynchronously; a single task
+// instance is responsible for a single asynchronous certificate generation
+// request. We are using a separate helper class so that a generation request
+// can outlive the |RTCCertificateGenerator| that spawned it.
+class RTCCertificateGenerationTask : public RefCountInterface,
+                                     public MessageHandler {
+ public:
+  RTCCertificateGenerationTask(
+      Thread* signaling_thread,
+      Thread* worker_thread,
+      const KeyParams& key_params,
+      const Optional<uint64_t>& expires_ms,
+      const scoped_refptr<RTCCertificateGeneratorCallback>& callback)
+      : signaling_thread_(signaling_thread),
+        worker_thread_(worker_thread),
+        key_params_(key_params),
+        expires_ms_(expires_ms),
+        callback_(callback) {
+    RTC_DCHECK(signaling_thread_);
+    RTC_DCHECK(worker_thread_);
+    RTC_DCHECK(callback_);
+  }
+  ~RTCCertificateGenerationTask() override {}
+
+  // Handles |MSG_GENERATE| and its follow-up |MSG_GENERATE_DONE|.
+  void OnMessage(Message* msg) override {
+    switch (msg->message_id) {
+      case MSG_GENERATE:
+        RTC_DCHECK(worker_thread_->IsCurrent());
+
+        // Perform the certificate generation work here on the worker thread.
+        certificate_ = RTCCertificateGenerator::GenerateCertificate(
+            key_params_, expires_ms_);
+
+        // Handle callbacks on signaling thread. Pass on the |msg->pdata|
+        // (which references |this| with ref counting) to that thread.
+        signaling_thread_->Post(RTC_FROM_HERE, this, MSG_GENERATE_DONE,
+                                msg->pdata);
+        break;
+      case MSG_GENERATE_DONE:
+        RTC_DCHECK(signaling_thread_->IsCurrent());
+
+        // Perform callback with result here on the signaling thread.
+        if (certificate_) {
+          callback_->OnSuccess(certificate_);
+        } else {
+          callback_->OnFailure();
+        }
+
+        // Destroy |msg->pdata| which references |this| with ref counting. This
+        // may result in |this| being deleted - do not touch member variables
+        // after this line.
+        delete msg->pdata;
+        return;
+      default:
+        RTC_NOTREACHED();
+    }
+  }
+
+ private:
+  Thread* const signaling_thread_;
+  Thread* const worker_thread_;
+  const KeyParams key_params_;
+  const Optional<uint64_t> expires_ms_;
+  const scoped_refptr<RTCCertificateGeneratorCallback> callback_;
+  scoped_refptr<RTCCertificate> certificate_;
+};
+
+}  // namespace
+
+// static
+scoped_refptr<RTCCertificate>
+RTCCertificateGenerator::GenerateCertificate(
+    const KeyParams& key_params,
+    const Optional<uint64_t>& expires_ms) {
+  if (!key_params.IsValid())
+    return nullptr;
+  SSLIdentity* identity;
+  if (!expires_ms) {
+    identity = SSLIdentity::Generate(kIdentityName, key_params);
+  } else {
+    uint64_t expires_s = *expires_ms / 1000;
+    // Limit the expiration time to something reasonable (a year). This was
+    // somewhat arbitrarily chosen. It also ensures that the value is not too
+    // large for the unspecified |time_t|.
+    expires_s = std::min(expires_s, kYearInSeconds);
+    // TODO(torbjorng): Stop using |time_t|, its type is unspecified. It it safe
+    // to assume it can hold up to a year's worth of seconds (and more), but
+    // |SSLIdentity::Generate| should stop relying on |time_t|.
+    // See bugs.webrtc.org/5720.
+    time_t cert_lifetime_s = static_cast<time_t>(expires_s);
+    identity = SSLIdentity::GenerateWithExpiration(
+        kIdentityName, key_params, cert_lifetime_s);
+  }
+  if (!identity)
+    return nullptr;
+  std::unique_ptr<SSLIdentity> identity_sptr(identity);
+  return RTCCertificate::Create(std::move(identity_sptr));
+}
+
+RTCCertificateGenerator::RTCCertificateGenerator(
+    Thread* signaling_thread, Thread* worker_thread)
+    : signaling_thread_(signaling_thread),
+      worker_thread_(worker_thread) {
+  RTC_DCHECK(signaling_thread_);
+  RTC_DCHECK(worker_thread_);
+}
+
+void RTCCertificateGenerator::GenerateCertificateAsync(
+    const KeyParams& key_params,
+    const Optional<uint64_t>& expires_ms,
+    const scoped_refptr<RTCCertificateGeneratorCallback>& callback) {
+  RTC_DCHECK(signaling_thread_->IsCurrent());
+  RTC_DCHECK(callback);
+
+  // Create a new |RTCCertificateGenerationTask| for this generation request. It
+  // is reference counted and referenced by the message data, ensuring it lives
+  // until the task has completed (independent of |RTCCertificateGenerator|).
+  ScopedRefMessageData<RTCCertificateGenerationTask>* msg_data =
+      new ScopedRefMessageData<RTCCertificateGenerationTask>(
+          new RefCountedObject<RTCCertificateGenerationTask>(
+              signaling_thread_, worker_thread_, key_params, expires_ms,
+              callback));
+  worker_thread_->Post(RTC_FROM_HERE, msg_data->data().get(), MSG_GENERATE,
+                       msg_data);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/rtccertificategenerator.h b/rtc_base/rtccertificategenerator.h
new file mode 100644
index 0000000..1f85466
--- /dev/null
+++ b/rtc_base/rtccertificategenerator.h
@@ -0,0 +1,86 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_RTCCERTIFICATEGENERATOR_H_
+#define RTC_BASE_RTCCERTIFICATEGENERATOR_H_
+
+#include "api/optional.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/rtccertificate.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/sslidentity.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+// See |RTCCertificateGeneratorInterface::GenerateCertificateAsync|.
+class RTCCertificateGeneratorCallback : public RefCountInterface {
+ public:
+  virtual void OnSuccess(
+    const scoped_refptr<RTCCertificate>& certificate) = 0;
+  virtual void OnFailure() = 0;
+
+ protected:
+  ~RTCCertificateGeneratorCallback() override {}
+};
+
+// Generates |RTCCertificate|s.
+// See |RTCCertificateGenerator| for the WebRTC repo's implementation.
+class RTCCertificateGeneratorInterface {
+ public:
+  virtual ~RTCCertificateGeneratorInterface() {}
+
+  // Generates a certificate asynchronously on the worker thread.
+  // Must be called on the signaling thread. The |callback| is invoked with the
+  // result on the signaling thread. |exipres_ms| optionally specifies for how
+  // long we want the certificate to be valid, but the implementation may choose
+  // its own restrictions on the expiration time.
+  virtual void GenerateCertificateAsync(
+      const KeyParams& key_params,
+      const Optional<uint64_t>& expires_ms,
+      const scoped_refptr<RTCCertificateGeneratorCallback>& callback) = 0;
+};
+
+// Standard implementation of |RTCCertificateGeneratorInterface|.
+// The static function |GenerateCertificate| generates a certificate on the
+// current thread. The |RTCCertificateGenerator| instance generates certificates
+// asynchronously on the worker thread with |GenerateCertificateAsync|.
+class RTCCertificateGenerator : public RTCCertificateGeneratorInterface {
+ public:
+  // Generates a certificate on the current thread. Returns null on failure.
+  // If |expires_ms| is specified, the certificate will expire in approximately
+  // that many milliseconds from now. |expires_ms| is limited to a year, a
+  // larger value than that is clamped down to a year. If |expires_ms| is not
+  // specified, a default expiration time is used.
+  static scoped_refptr<RTCCertificate> GenerateCertificate(
+      const KeyParams& key_params,
+      const Optional<uint64_t>& expires_ms);
+
+  RTCCertificateGenerator(Thread* signaling_thread, Thread* worker_thread);
+  ~RTCCertificateGenerator() override {}
+
+  // |RTCCertificateGeneratorInterface| overrides.
+  // If |expires_ms| is specified, the certificate will expire in approximately
+  // that many milliseconds from now. |expires_ms| is limited to a year, a
+  // larger value than that is clamped down to a year. If |expires_ms| is not
+  // specified, a default expiration time is used.
+  void GenerateCertificateAsync(
+      const KeyParams& key_params,
+      const Optional<uint64_t>& expires_ms,
+      const scoped_refptr<RTCCertificateGeneratorCallback>& callback) override;
+
+ private:
+  Thread* const signaling_thread_;
+  Thread* const worker_thread_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_RTCCERTIFICATEGENERATOR_H_
diff --git a/rtc_base/rtccertificategenerator_unittest.cc b/rtc_base/rtccertificategenerator_unittest.cc
new file mode 100644
index 0000000..9a0ad0c
--- /dev/null
+++ b/rtc_base/rtccertificategenerator_unittest.cc
@@ -0,0 +1,152 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/rtccertificategenerator.h"
+
+#include <memory>
+
+#include "api/optional.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+class RTCCertificateGeneratorFixture : public RTCCertificateGeneratorCallback {
+ public:
+  RTCCertificateGeneratorFixture()
+      : signaling_thread_(Thread::Current()),
+        worker_thread_(Thread::Create()),
+        generate_async_completed_(false) {
+    RTC_CHECK(signaling_thread_);
+    RTC_CHECK(worker_thread_->Start());
+    generator_.reset(
+      new RTCCertificateGenerator(signaling_thread_, worker_thread_.get()));
+  }
+  ~RTCCertificateGeneratorFixture() override {}
+
+  RTCCertificateGenerator* generator() const { return generator_.get(); }
+  RTCCertificate* certificate() const { return certificate_.get(); }
+
+  void OnSuccess(const scoped_refptr<RTCCertificate>& certificate) override {
+    RTC_CHECK(signaling_thread_->IsCurrent());
+    RTC_CHECK(certificate);
+    certificate_ = certificate;
+    generate_async_completed_ = true;
+  }
+  void OnFailure() override {
+    RTC_CHECK(signaling_thread_->IsCurrent());
+    certificate_ = nullptr;
+    generate_async_completed_ = true;
+  }
+
+  bool GenerateAsyncCompleted() {
+    RTC_CHECK(signaling_thread_->IsCurrent());
+    if (generate_async_completed_) {
+      // Reset flag so that future generation requests are not considered done.
+      generate_async_completed_ = false;
+      return true;
+    }
+    return false;
+  }
+
+ protected:
+  Thread* const signaling_thread_;
+  std::unique_ptr<Thread> worker_thread_;
+  std::unique_ptr<RTCCertificateGenerator> generator_;
+  scoped_refptr<RTCCertificate> certificate_;
+  bool generate_async_completed_;
+};
+
+class RTCCertificateGeneratorTest
+    : public testing::Test {
+ public:
+  RTCCertificateGeneratorTest()
+      : fixture_(new RefCountedObject<RTCCertificateGeneratorFixture>()) {}
+
+ protected:
+  static const int kGenerationTimeoutMs = 10000;
+
+  scoped_refptr<RTCCertificateGeneratorFixture> fixture_;
+};
+
+TEST_F(RTCCertificateGeneratorTest, GenerateECDSA) {
+  EXPECT_TRUE(RTCCertificateGenerator::GenerateCertificate(
+      KeyParams::ECDSA(),
+      Optional<uint64_t>()));
+}
+
+TEST_F(RTCCertificateGeneratorTest, GenerateRSA) {
+  EXPECT_TRUE(RTCCertificateGenerator::GenerateCertificate(
+      KeyParams::RSA(),
+      Optional<uint64_t>()));
+}
+
+TEST_F(RTCCertificateGeneratorTest, GenerateAsyncECDSA) {
+  EXPECT_FALSE(fixture_->certificate());
+  fixture_->generator()->GenerateCertificateAsync(
+      KeyParams::ECDSA(),
+      Optional<uint64_t>(),
+      fixture_);
+  // Until generation has completed, the certificate is null. Since this is an
+  // async call, generation must not have completed until we process messages
+  // posted to this thread (which is done by |EXPECT_TRUE_WAIT|).
+  EXPECT_FALSE(fixture_->GenerateAsyncCompleted());
+  EXPECT_FALSE(fixture_->certificate());
+  EXPECT_TRUE_WAIT(fixture_->GenerateAsyncCompleted(), kGenerationTimeoutMs);
+  EXPECT_TRUE(fixture_->certificate());
+}
+
+TEST_F(RTCCertificateGeneratorTest, GenerateWithExpires) {
+  // By generating two certificates with different expiration we can compare the
+  // two expiration times relative to each other without knowing the current
+  // time relative to epoch, 1970-01-01T00:00:00Z. This verifies that the
+  // expiration parameter is correctly used relative to the generator's clock,
+  // but does not verify that this clock is relative to epoch.
+
+  // Generate a certificate that expires immediately.
+  scoped_refptr<RTCCertificate> cert_a =
+      RTCCertificateGenerator::GenerateCertificate(
+          KeyParams::ECDSA(), Optional<uint64_t>(0));
+  EXPECT_TRUE(cert_a);
+
+  // Generate a certificate that expires in one minute.
+  const uint64_t kExpiresMs = 60000;
+  scoped_refptr<RTCCertificate> cert_b =
+      RTCCertificateGenerator::GenerateCertificate(
+          KeyParams::ECDSA(), Optional<uint64_t>(kExpiresMs));
+  EXPECT_TRUE(cert_b);
+
+  // Verify that |cert_b| expires approximately |kExpiresMs| after |cert_a|
+  // (allowing a +/- 1 second plus maximum generation time difference).
+  EXPECT_GT(cert_b->Expires(), cert_a->Expires());
+  uint64_t expires_diff = cert_b->Expires() - cert_a->Expires();
+  EXPECT_GE(expires_diff, kExpiresMs);
+  EXPECT_LE(expires_diff, kExpiresMs + 2*kGenerationTimeoutMs + 1000);
+}
+
+TEST_F(RTCCertificateGeneratorTest, GenerateWithInvalidParamsShouldFail) {
+  KeyParams invalid_params = KeyParams::RSA(0, 0);
+  EXPECT_FALSE(invalid_params.IsValid());
+
+  EXPECT_FALSE(RTCCertificateGenerator::GenerateCertificate(
+      invalid_params, Optional<uint64_t>()));
+
+  fixture_->generator()->GenerateCertificateAsync(
+      invalid_params,
+      Optional<uint64_t>(),
+      fixture_);
+  EXPECT_TRUE_WAIT(fixture_->GenerateAsyncCompleted(), kGenerationTimeoutMs);
+  EXPECT_FALSE(fixture_->certificate());
+}
+
+}  // namespace rtc
diff --git a/rtc_base/sanitizer.h b/rtc_base/sanitizer.h
new file mode 100644
index 0000000..1b94e1e
--- /dev/null
+++ b/rtc_base/sanitizer.h
@@ -0,0 +1,118 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SANITIZER_H_
+#define RTC_BASE_SANITIZER_H_
+
+#include <stddef.h>  // for size_t
+
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define RTC_HAS_ASAN 1
+#endif
+#if __has_feature(memory_sanitizer)
+#define RTC_HAS_MSAN 1
+#endif
+#endif
+#ifndef RTC_HAS_ASAN
+#define RTC_HAS_ASAN 0
+#endif
+#ifndef RTC_HAS_MSAN
+#define RTC_HAS_MSAN 0
+#endif
+
+#if RTC_HAS_ASAN
+#include <sanitizer/asan_interface.h>
+#endif
+#if RTC_HAS_MSAN
+#include <sanitizer/msan_interface.h>
+#endif
+
+#ifdef __has_attribute
+#if __has_attribute(no_sanitize)
+#define RTC_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#endif
+#ifndef RTC_NO_SANITIZE
+#define RTC_NO_SANITIZE(what)
+#endif
+
+// Ask ASan to mark the memory range [ptr, ptr + element_size * num_elements)
+// as being unaddressable, so that reads and writes are not allowed. ASan may
+// narrow the range to the nearest alignment boundaries.
+static inline void rtc_AsanPoison(const volatile void* ptr,
+                                  size_t element_size,
+                                  size_t num_elements) {
+#if RTC_HAS_ASAN
+  ASAN_POISON_MEMORY_REGION(ptr, element_size * num_elements);
+#endif
+}
+
+// Ask ASan to mark the memory range [ptr, ptr + element_size * num_elements)
+// as being addressable, so that reads and writes are allowed. ASan may widen
+// the range to the nearest alignment boundaries.
+static inline void rtc_AsanUnpoison(const volatile void* ptr,
+                                    size_t element_size,
+                                    size_t num_elements) {
+#if RTC_HAS_ASAN
+  ASAN_UNPOISON_MEMORY_REGION(ptr, element_size * num_elements);
+#endif
+}
+
+// Ask MSan to mark the memory range [ptr, ptr + element_size * num_elements)
+// as being uninitialized.
+static inline void rtc_MsanMarkUninitialized(const volatile void* ptr,
+                                             size_t element_size,
+                                             size_t num_elements) {
+#if RTC_HAS_MSAN
+  __msan_poison(ptr, element_size * num_elements);
+#endif
+}
+
+// Force an MSan check (if any bits in the memory range [ptr, ptr +
+// element_size * num_elements) are uninitialized the call will crash with an
+// MSan report).
+static inline void rtc_MsanCheckInitialized(const volatile void* ptr,
+                                            size_t element_size,
+                                            size_t num_elements) {
+#if RTC_HAS_MSAN
+  __msan_check_mem_is_initialized(ptr, element_size * num_elements);
+#endif
+}
+
+#ifdef __cplusplus
+
+namespace rtc {
+
+template <typename T>
+inline void AsanPoison(const T& mem) {
+  rtc_AsanPoison(mem.data(), sizeof(mem.data()[0]), mem.size());
+}
+
+template <typename T>
+inline void AsanUnpoison(const T& mem) {
+  rtc_AsanUnpoison(mem.data(), sizeof(mem.data()[0]), mem.size());
+}
+
+template <typename T>
+inline void MsanMarkUninitialized(const T& mem) {
+  rtc_MsanMarkUninitialized(mem.data(), sizeof(mem.data()[0]), mem.size());
+}
+
+template <typename T>
+inline void MsanCheckInitialized(const T& mem) {
+  rtc_MsanCheckInitialized(mem.data(), sizeof(mem.data()[0]), mem.size());
+}
+
+}  // namespace rtc
+
+#endif  // __cplusplus
+
+#endif  // RTC_BASE_SANITIZER_H_
diff --git a/rtc_base/scoped_ref_ptr.h b/rtc_base/scoped_ref_ptr.h
new file mode 100644
index 0000000..0f4698a
--- /dev/null
+++ b/rtc_base/scoped_ref_ptr.h
@@ -0,0 +1,163 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Originally these classes are from Chromium.
+// http://src.chromium.org/viewvc/chrome/trunk/src/base/memory/ref_counted.h?view=markup
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     ...
+//     foo = nullptr;  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references null.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+
+#ifndef RTC_BASE_SCOPED_REF_PTR_H_
+#define RTC_BASE_SCOPED_REF_PTR_H_
+
+#include <memory>
+
+namespace rtc {
+
+template <class T>
+class scoped_refptr {
+ public:
+  scoped_refptr() : ptr_(nullptr) {}
+
+  scoped_refptr(T* p) : ptr_(p) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  template <typename U>
+  scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  // Move constructors.
+  scoped_refptr(scoped_refptr<T>&& r) : ptr_(r.release()) {}
+
+  template <typename U>
+  scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.release()) {}
+
+  ~scoped_refptr() {
+    if (ptr_)
+      ptr_->Release();
+  }
+
+  T* get() const { return ptr_; }
+  operator T*() const { return ptr_; }
+  T* operator->() const { return ptr_; }
+
+  // Release a pointer.
+  // The return value is the current pointer held by this object.
+  // If this object holds a null pointer, the return value is null.
+  // After this operation, this object will hold a null pointer,
+  // and will not own the object any more.
+  T* release() {
+    T* retVal = ptr_;
+    ptr_ = nullptr;
+    return retVal;
+  }
+
+  scoped_refptr<T>& operator=(T* p) {
+    // AddRef first so that self assignment should work
+    if (p)
+      p->AddRef();
+    if (ptr_ )
+      ptr_ ->Release();
+    ptr_ = p;
+    return *this;
+  }
+
+  scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+    return *this = r.ptr_;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+    return *this = r.get();
+  }
+
+  scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
+    scoped_refptr<T>(std::move(r)).swap(*this);
+    return *this;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
+    scoped_refptr<T>(std::move(r)).swap(*this);
+    return *this;
+  }
+
+  void swap(T** pp) {
+    T* p = ptr_;
+    ptr_ = *pp;
+    *pp = p;
+  }
+
+  void swap(scoped_refptr<T>& r) {
+    swap(&r.ptr_);
+  }
+
+ protected:
+  T* ptr_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SCOPED_REF_PTR_H_
diff --git a/rtc_base/sequenced_task_checker.h b/rtc_base/sequenced_task_checker.h
new file mode 100644
index 0000000..eb15198
--- /dev/null
+++ b/rtc_base/sequenced_task_checker.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SEQUENCED_TASK_CHECKER_H_
+#define RTC_BASE_SEQUENCED_TASK_CHECKER_H_
+
+// Apart from debug builds, we also enable the sequence checker in
+// builds with RTC_DCHECK_IS_ON so that trybots and waterfall bots
+// with this define will get the same level of checking as debug bots.
+#define ENABLE_SEQUENCED_TASK_CHECKER RTC_DCHECK_IS_ON
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/sequenced_task_checker_impl.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace rtc {
+namespace internal {
+// Forward declaration of the internal implementation of RTC_GUARDED_BY().
+// SequencedTaskChecker grants this class access to call its IsCurrent() method.
+// See thread_checker.h for more details.
+class AnnounceOnThread;
+}  // namespace internal
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the SequencedTaskChecker class to get the
+// right version for your build configuration.
+class SequencedTaskCheckerDoNothing {
+ public:
+  bool CalledSequentially() const { return true; }
+  void Detach() {}
+
+ private:
+  friend class internal::AnnounceOnThread;
+  bool IsCurrent() const { return CalledSequentially(); }
+};
+
+// SequencedTaskChecker is a helper class used to help verify that some methods
+// of a class are called on the same task queue or thread. A
+// SequencedTaskChecker is bound to a a task queue if the object is
+// created on a task queue, or a thread otherwise.
+//
+//
+// Example:
+// class MyClass {
+//  public:
+//   void Foo() {
+//     RTC_DCHECK(sequence_checker_.CalledSequentially());
+//     ... (do stuff) ...
+//   }
+//
+//  private:
+//   SequencedTaskChecker sequence_checker_;
+// }
+//
+// In Release mode, CalledOnValidThread will always return true.
+#if ENABLE_SEQUENCED_TASK_CHECKER
+class RTC_LOCKABLE SequencedTaskChecker : public SequencedTaskCheckerImpl {};
+#else
+class RTC_LOCKABLE SequencedTaskChecker : public SequencedTaskCheckerDoNothing {
+};
+#endif  // ENABLE_SEQUENCED_TASK_CHECKER_H_
+
+namespace internal {
+class RTC_SCOPED_LOCKABLE SequencedTaskCheckerScope {
+ public:
+  explicit SequencedTaskCheckerScope(const SequencedTaskChecker* checker)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(checker);
+  ~SequencedTaskCheckerScope() RTC_UNLOCK_FUNCTION();
+};
+
+}  // namespace internal
+
+#define RTC_DCHECK_CALLED_SEQUENTIALLY(x) \
+  rtc::internal::SequencedTaskCheckerScope checker(x)
+
+#undef ENABLE_SEQUENCED_TASK_CHECKER
+
+}  // namespace rtc
+#endif  // RTC_BASE_SEQUENCED_TASK_CHECKER_H_
diff --git a/rtc_base/sequenced_task_checker_impl.cc b/rtc_base/sequenced_task_checker_impl.cc
new file mode 100644
index 0000000..d7f46ea
--- /dev/null
+++ b/rtc_base/sequenced_task_checker_impl.cc
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/sequenced_task_checker_impl.h"
+
+#if defined(WEBRTC_MAC)
+#include <dispatch/dispatch.h>
+#endif
+
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/sequenced_task_checker.h"
+#include "rtc_base/task_queue.h"
+
+namespace rtc {
+
+SequencedTaskCheckerImpl::SequencedTaskCheckerImpl()
+    : attached_(true), valid_queue_(TaskQueue::Current()) {}
+
+SequencedTaskCheckerImpl::~SequencedTaskCheckerImpl() {}
+
+bool SequencedTaskCheckerImpl::CalledSequentially() const {
+  QueueId current_queue = TaskQueue::Current();
+#if defined(WEBRTC_MAC)
+  // If we're not running on a TaskQueue, use the system dispatch queue
+  // label as an identifier.
+  if (current_queue == nullptr)
+    current_queue = dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL);
+#endif
+  CritScope scoped_lock(&lock_);
+  if (!attached_) {  // true if previously detached.
+    valid_queue_ = current_queue;
+    attached_ = true;
+  }
+  if (!valid_queue_)
+    return thread_checker_.CalledOnValidThread();
+  return valid_queue_ == current_queue;
+}
+
+void SequencedTaskCheckerImpl::Detach() {
+  CritScope scoped_lock(&lock_);
+  attached_ = false;
+  valid_queue_ = nullptr;
+  thread_checker_.DetachFromThread();
+}
+
+namespace internal {
+
+SequencedTaskCheckerScope::SequencedTaskCheckerScope(
+    const SequencedTaskChecker* checker) {
+  RTC_DCHECK(checker->CalledSequentially());
+}
+
+SequencedTaskCheckerScope::~SequencedTaskCheckerScope() {}
+
+}  // namespace internal
+}  // namespace rtc
diff --git a/rtc_base/sequenced_task_checker_impl.h b/rtc_base/sequenced_task_checker_impl.h
new file mode 100644
index 0000000..86d5ef0
--- /dev/null
+++ b/rtc_base/sequenced_task_checker_impl.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SEQUENCED_TASK_CHECKER_IMPL_H_
+#define RTC_BASE_SEQUENCED_TASK_CHECKER_IMPL_H_
+
+#include "rtc_base/thread_checker.h"
+
+namespace rtc {
+
+class TaskQueue;
+// Real implementation of SequencedTaskChecker, for use in debug mode, or
+// for temporary use in release mode.
+//
+// Note: You should almost always use the SequencedTaskChecker class to get the
+// right version for your build configuration.
+class SequencedTaskCheckerImpl {
+ public:
+  SequencedTaskCheckerImpl();
+  ~SequencedTaskCheckerImpl();
+
+  bool CalledSequentially() const;
+
+  // Changes the task queue or thread that is checked for in IsCurrent.  This
+  // may be useful when an object may be created on one task queue / thread and
+  // then used exclusively on another thread.
+  void Detach();
+
+ private:
+  friend class internal::AnnounceOnThread;
+  bool IsCurrent() const { return CalledSequentially(); }
+
+  typedef const void* QueueId;
+  CriticalSection lock_;
+  ThreadChecker thread_checker_;
+  mutable bool attached_;
+  mutable QueueId valid_queue_;
+};
+
+}  // namespace rtc
+#endif  // RTC_BASE_SEQUENCED_TASK_CHECKER_IMPL_H_
diff --git a/rtc_base/sequenced_task_checker_unittest.cc b/rtc_base/sequenced_task_checker_unittest.cc
new file mode 100644
index 0000000..9199eb7
--- /dev/null
+++ b/rtc_base/sequenced_task_checker_unittest.cc
@@ -0,0 +1,296 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/sequenced_task_checker.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread_checker.h"
+#include "test/gtest.h"
+
+namespace rtc {
+
+namespace {
+
+// This class is dead code, but its purpose is to make sure that
+// SequencedTaskChecker is compatible with the RTC_GUARDED_BY and RTC_RUN_ON
+// attributes that are checked at compile-time.
+class CompileTimeTestForGuardedBy {
+ public:
+  int CalledOnSequence() RTC_RUN_ON(sequence_checker_) {
+    return guarded_;
+  }
+
+  void CallMeFromSequence() {
+    RTC_DCHECK_RUN_ON(&sequence_checker_)
+        << "Should be called on sequence";
+  }
+
+ private:
+  int guarded_ RTC_GUARDED_BY(sequence_checker_);
+  rtc::SequencedTaskChecker sequence_checker_;
+};
+
+
+// Calls SequencedTaskChecker::CalledSequentially on another thread.
+class CallCalledSequentiallyOnThread {
+ public:
+  CallCalledSequentiallyOnThread(bool expect_true,
+                                 SequencedTaskChecker* sequenced_task_checker)
+      : expect_true_(expect_true),
+        thread_has_run_event_(false, false),
+        thread_(&Run, this, "call_do_stuff_on_thread"),
+        sequenced_task_checker_(sequenced_task_checker) {
+    thread_.Start();
+  }
+  ~CallCalledSequentiallyOnThread() {
+    EXPECT_TRUE(thread_has_run_event_.Wait(1000));
+    thread_.Stop();
+  }
+
+ private:
+  static void Run(void* obj) {
+    CallCalledSequentiallyOnThread* call_stuff_on_thread =
+        static_cast<CallCalledSequentiallyOnThread*>(obj);
+    EXPECT_EQ(
+        call_stuff_on_thread->expect_true_,
+        call_stuff_on_thread->sequenced_task_checker_->CalledSequentially());
+    call_stuff_on_thread->thread_has_run_event_.Set();
+  }
+
+  const bool expect_true_;
+  Event thread_has_run_event_;
+  PlatformThread thread_;
+  SequencedTaskChecker* const sequenced_task_checker_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(CallCalledSequentiallyOnThread);
+};
+
+// Deletes SequencedTaskChecker on a different thread.
+class DeleteSequencedCheckerOnThread {
+ public:
+  explicit DeleteSequencedCheckerOnThread(
+      std::unique_ptr<SequencedTaskChecker> sequenced_task_checker)
+      : thread_(&Run, this, "delete_sequenced_task_checker_on_thread"),
+        thread_has_run_event_(false, false),
+        sequenced_task_checker_(std::move(sequenced_task_checker)) {
+    thread_.Start();
+  }
+
+  ~DeleteSequencedCheckerOnThread() {
+    EXPECT_TRUE(thread_has_run_event_.Wait(1000));
+    thread_.Stop();
+  }
+
+ private:
+  static bool Run(void* obj) {
+    DeleteSequencedCheckerOnThread* instance =
+        static_cast<DeleteSequencedCheckerOnThread*>(obj);
+    instance->sequenced_task_checker_.reset();
+    instance->thread_has_run_event_.Set();
+    return false;
+  }
+
+ private:
+  PlatformThread thread_;
+  Event thread_has_run_event_;
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DeleteSequencedCheckerOnThread);
+};
+
+void RunMethodOnDifferentThread(bool expect_true) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  CallCalledSequentiallyOnThread call_on_thread(expect_true,
+                                                sequenced_task_checker.get());
+}
+
+void RunMethodOnDifferentTaskQueue(bool expect_true) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  static const char kQueueName[] = "MethodNotAllowedOnDifferentTq";
+  TaskQueue queue(kQueueName);
+  Event done_event(false, false);
+  queue.PostTask([&sequenced_task_checker, &done_event, expect_true] {
+    if (expect_true)
+      EXPECT_TRUE(sequenced_task_checker->CalledSequentially());
+    else
+      EXPECT_FALSE(sequenced_task_checker->CalledSequentially());
+    done_event.Set();
+  });
+  EXPECT_TRUE(done_event.Wait(1000));
+}
+
+void DetachThenCallFromDifferentTaskQueue(bool expect_true) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  sequenced_task_checker->Detach();
+
+  Event done_event(false, false);
+  TaskQueue queue1("DetachThenCallFromDifferentTaskQueueImpl1");
+  queue1.PostTask([&sequenced_task_checker, &done_event] {
+    EXPECT_TRUE(sequenced_task_checker->CalledSequentially());
+    done_event.Set();
+  });
+  EXPECT_TRUE(done_event.Wait(1000));
+
+  // CalledSequentially should return false in debug builds after moving to
+  // another task queue.
+  TaskQueue queue2("DetachThenCallFromDifferentTaskQueueImpl2");
+  queue2.PostTask([&sequenced_task_checker, &done_event, expect_true] {
+    if (expect_true)
+      EXPECT_TRUE(sequenced_task_checker->CalledSequentially());
+    else
+      EXPECT_FALSE(sequenced_task_checker->CalledSequentially());
+    done_event.Set();
+  });
+  EXPECT_TRUE(done_event.Wait(1000));
+}
+}  // namespace
+
+TEST(SequencedTaskCheckerTest, CallsAllowedOnSameThread) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  EXPECT_TRUE(sequenced_task_checker->CalledSequentially());
+
+  // Verify that the destructor doesn't assert.
+  sequenced_task_checker.reset();
+}
+
+TEST(SequencedTaskCheckerTest, DestructorAllowedOnDifferentThread) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  // Verify that the destructor doesn't assert when called on a different
+  // thread.
+  DeleteSequencedCheckerOnThread delete_on_thread(
+      std::move(sequenced_task_checker));
+}
+
+TEST(SequencedTaskCheckerTest, DetachFromThread) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  sequenced_task_checker->Detach();
+  CallCalledSequentiallyOnThread call_on_thread(true,
+                                                sequenced_task_checker.get());
+}
+
+TEST(SequencedTaskCheckerTest, DetachFromThreadAndUseOnTaskQueue) {
+  std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+      new SequencedTaskChecker());
+
+  sequenced_task_checker->Detach();
+  static const char kQueueName[] = "DetachFromThreadAndUseOnTaskQueue";
+  TaskQueue queue(kQueueName);
+  Event done_event(false, false);
+  queue.PostTask([&sequenced_task_checker, &done_event] {
+    EXPECT_TRUE(sequenced_task_checker->CalledSequentially());
+    done_event.Set();
+  });
+  EXPECT_TRUE(done_event.Wait(1000));
+}
+
+TEST(SequencedTaskCheckerTest, DetachFromTaskQueueAndUseOnThread) {
+  TaskQueue queue("DetachFromTaskQueueAndUseOnThread");
+  Event done_event(false, false);
+  queue.PostTask([&done_event] {
+    std::unique_ptr<SequencedTaskChecker> sequenced_task_checker(
+        new SequencedTaskChecker());
+
+    sequenced_task_checker->Detach();
+    CallCalledSequentiallyOnThread call_on_thread(true,
+                                                  sequenced_task_checker.get());
+    done_event.Set();
+  });
+  EXPECT_TRUE(done_event.Wait(1000));
+}
+
+#if RTC_DCHECK_IS_ON
+TEST(SequencedTaskCheckerTest, MethodNotAllowedOnDifferentThreadInDebug) {
+  RunMethodOnDifferentThread(false);
+}
+#else
+TEST(SequencedTaskCheckerTest, MethodAllowedOnDifferentThreadInRelease) {
+  RunMethodOnDifferentThread(true);
+}
+#endif
+
+#if RTC_DCHECK_IS_ON
+TEST(SequencedTaskCheckerTest, MethodNotAllowedOnDifferentTaskQueueInDebug) {
+  RunMethodOnDifferentTaskQueue(false);
+}
+#else
+TEST(SequencedTaskCheckerTest, MethodAllowedOnDifferentTaskQueueInRelease) {
+  RunMethodOnDifferentTaskQueue(true);
+}
+#endif
+
+#if RTC_DCHECK_IS_ON
+TEST(SequencedTaskCheckerTest, DetachFromTaskQueueInDebug) {
+  DetachThenCallFromDifferentTaskQueue(false);
+}
+#else
+TEST(SequencedTaskCheckerTest, DetachFromTaskQueueInRelease) {
+  DetachThenCallFromDifferentTaskQueue(true);
+}
+#endif
+
+class TestAnnotations {
+ public:
+  TestAnnotations() : test_var_(false) {}
+
+  void ModifyTestVar() {
+    RTC_DCHECK_CALLED_SEQUENTIALLY(&checker_);
+    test_var_ = true;
+  }
+
+ private:
+  bool test_var_ RTC_GUARDED_BY(&checker_);
+  SequencedTaskChecker checker_;
+};
+
+TEST(SequencedTaskCheckerTest, TestAnnotations) {
+  TestAnnotations annotations;
+  annotations.ModifyTestVar();
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+
+void TestAnnotationsOnWrongQueue() {
+  TestAnnotations annotations;
+  static const char kQueueName[] = "TestAnnotationsOnWrongQueueDebug";
+  TaskQueue queue(kQueueName);
+  Event done_event(false, false);
+  queue.PostTask([&annotations, &done_event] {
+    annotations.ModifyTestVar();
+    done_event.Set();
+  });
+  EXPECT_TRUE(done_event.Wait(1000));
+}
+
+#if RTC_DCHECK_IS_ON
+TEST(SequencedTaskCheckerTest, TestAnnotationsOnWrongQueueDebug) {
+  ASSERT_DEATH({ TestAnnotationsOnWrongQueue(); }, "");
+}
+#else
+TEST(SequencedTaskCheckerTest, TestAnnotationsOnWrongQueueRelease) {
+  TestAnnotationsOnWrongQueue();
+}
+#endif
+#endif  // GTEST_HAS_DEATH_TEST
+}  // namespace rtc
diff --git a/rtc_base/signalthread.cc b/rtc_base/signalthread.cc
new file mode 100644
index 0000000..48a677e
--- /dev/null
+++ b/rtc_base/signalthread.cc
@@ -0,0 +1,161 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/signalthread.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/ptr_util.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// SignalThread
+///////////////////////////////////////////////////////////////////////////////
+
+SignalThread::SignalThread()
+    : main_(Thread::Current()),
+      worker_(this),
+      state_(kInit),
+      refcount_(1) {
+  main_->SignalQueueDestroyed.connect(this,
+                                      &SignalThread::OnMainThreadDestroyed);
+  worker_.SetName("SignalThread", this);
+}
+
+SignalThread::~SignalThread() {
+  RTC_DCHECK(refcount_ == 0);
+}
+
+bool SignalThread::SetName(const std::string& name, const void* obj) {
+  EnterExit ee(this);
+  RTC_DCHECK(main_->IsCurrent());
+  RTC_DCHECK(kInit == state_);
+  return worker_.SetName(name, obj);
+}
+
+void SignalThread::Start() {
+  EnterExit ee(this);
+  RTC_DCHECK(main_->IsCurrent());
+  if (kInit == state_ || kComplete == state_) {
+    state_ = kRunning;
+    OnWorkStart();
+    worker_.Start();
+  } else {
+    RTC_NOTREACHED();
+  }
+}
+
+void SignalThread::Destroy(bool wait) {
+  EnterExit ee(this);
+  RTC_DCHECK(main_->IsCurrent());
+  if ((kInit == state_) || (kComplete == state_)) {
+    refcount_--;
+  } else if (kRunning == state_ || kReleasing == state_) {
+    state_ = kStopping;
+    // OnWorkStop() must follow Quit(), so that when the thread wakes up due to
+    // OWS(), ContinueWork() will return false.
+    worker_.Quit();
+    OnWorkStop();
+    if (wait) {
+      // Release the thread's lock so that it can return from ::Run.
+      cs_.Leave();
+      worker_.Stop();
+      cs_.Enter();
+      refcount_--;
+    }
+  } else {
+    RTC_NOTREACHED();
+  }
+}
+
+void SignalThread::Release() {
+  EnterExit ee(this);
+  RTC_DCHECK(main_->IsCurrent());
+  if (kComplete == state_) {
+    refcount_--;
+  } else if (kRunning == state_) {
+    state_ = kReleasing;
+  } else {
+    // if (kInit == state_) use Destroy()
+    RTC_NOTREACHED();
+  }
+}
+
+bool SignalThread::ContinueWork() {
+  EnterExit ee(this);
+  RTC_DCHECK(worker_.IsCurrent());
+  return worker_.ProcessMessages(0);
+}
+
+void SignalThread::OnMessage(Message *msg) {
+  EnterExit ee(this);
+  if (ST_MSG_WORKER_DONE == msg->message_id) {
+    RTC_DCHECK(main_->IsCurrent());
+    OnWorkDone();
+    bool do_delete = false;
+    if (kRunning == state_) {
+      state_ = kComplete;
+    } else {
+      do_delete = true;
+    }
+    if (kStopping != state_) {
+      // Before signaling that the work is done, make sure that the worker
+      // thread actually is done. We got here because DoWork() finished and
+      // Run() posted the ST_MSG_WORKER_DONE message. This means the worker
+      // thread is about to go away anyway, but sometimes it doesn't actually
+      // finish before SignalWorkDone is processed, and for a reusable
+      // SignalThread this makes an assert in thread.cc fire.
+      //
+      // Calling Stop() on the worker ensures that the OS thread that underlies
+      // the worker will finish, and will be set to null, enabling us to call
+      // Start() again.
+      worker_.Stop();
+      SignalWorkDone(this);
+    }
+    if (do_delete) {
+      refcount_--;
+    }
+  }
+}
+
+SignalThread::Worker::Worker(SignalThread* parent)
+    : Thread(MakeUnique<NullSocketServer>(), /*do_init=*/false),
+      parent_(parent) {
+  DoInit();
+}
+
+SignalThread::Worker::~Worker() {
+  Stop();
+}
+
+void SignalThread::Worker::Run() {
+  parent_->Run();
+}
+
+void SignalThread::Run() {
+  DoWork();
+  {
+    EnterExit ee(this);
+    if (main_) {
+      main_->Post(RTC_FROM_HERE, this, ST_MSG_WORKER_DONE);
+    }
+  }
+}
+
+void SignalThread::OnMainThreadDestroyed() {
+  EnterExit ee(this);
+  main_ = nullptr;
+}
+
+bool SignalThread::Worker::IsProcessingMessages() {
+  return false;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/signalthread.h b/rtc_base/signalthread.h
new file mode 100644
index 0000000..8daaa08
--- /dev/null
+++ b/rtc_base/signalthread.h
@@ -0,0 +1,157 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SIGNALTHREAD_H_
+#define RTC_BASE_SIGNALTHREAD_H_
+
+#include <string>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// SignalThread - Base class for worker threads.  The main thread should call
+//  Start() to begin work, and then follow one of these models:
+//   Normal: Wait for SignalWorkDone, and then call Release to destroy.
+//   Cancellation: Call Release(true), to abort the worker thread.
+//   Fire-and-forget: Call Release(false), which allows the thread to run to
+//    completion, and then self-destruct without further notification.
+//   Periodic tasks: Wait for SignalWorkDone, then eventually call Start()
+//    again to repeat the task. When the instance isn't needed anymore,
+//    call Release. DoWork, OnWorkStart and OnWorkStop are called again,
+//    on a new thread.
+//  The subclass should override DoWork() to perform the background task.  By
+//   periodically calling ContinueWork(), it can check for cancellation.
+//   OnWorkStart and OnWorkDone can be overridden to do pre- or post-work
+//   tasks in the context of the main thread.
+///////////////////////////////////////////////////////////////////////////////
+
+class SignalThread
+    : public sigslot::has_slots<>,
+      protected MessageHandler {
+ public:
+  SignalThread();
+
+  // Context: Main Thread.  Call before Start to change the worker's name.
+  bool SetName(const std::string& name, const void* obj);
+
+  // Context: Main Thread.  Call to begin the worker thread.
+  void Start();
+
+  // Context: Main Thread.  If the worker thread is not running, deletes the
+  // object immediately.  Otherwise, asks the worker thread to abort processing,
+  // and schedules the object to be deleted once the worker exits.
+  // SignalWorkDone will not be signalled.  If wait is true, does not return
+  // until the thread is deleted.
+  void Destroy(bool wait);
+
+  // Context: Main Thread.  If the worker thread is complete, deletes the
+  // object immediately.  Otherwise, schedules the object to be deleted once
+  // the worker thread completes.  SignalWorkDone will be signalled.
+  void Release();
+
+  // Context: Main Thread.  Signalled when work is complete.
+  sigslot::signal1<SignalThread *> SignalWorkDone;
+
+  enum { ST_MSG_WORKER_DONE, ST_MSG_FIRST_AVAILABLE };
+
+ protected:
+  ~SignalThread() override;
+
+  Thread* worker() { return &worker_; }
+
+  // Context: Main Thread.  Subclass should override to do pre-work setup.
+  virtual void OnWorkStart() { }
+
+  // Context: Worker Thread.  Subclass should override to do work.
+  virtual void DoWork() = 0;
+
+  // Context: Worker Thread.  Subclass should call periodically to
+  // dispatch messages and determine if the thread should terminate.
+  bool ContinueWork();
+
+  // Context: Worker Thread.  Subclass should override when extra work is
+  // needed to abort the worker thread.
+  virtual void OnWorkStop() { }
+
+  // Context: Main Thread.  Subclass should override to do post-work cleanup.
+  virtual void OnWorkDone() { }
+
+  // Context: Any Thread.  If subclass overrides, be sure to call the base
+  // implementation.  Do not use (message_id < ST_MSG_FIRST_AVAILABLE)
+  void OnMessage(Message* msg) override;
+
+ private:
+  enum State {
+    kInit,            // Initialized, but not started
+    kRunning,         // Started and doing work
+    kReleasing,       // Same as running, but to be deleted when work is done
+    kComplete,        // Work is done
+    kStopping,        // Work is being interrupted
+  };
+
+  class Worker : public Thread {
+   public:
+    explicit Worker(SignalThread* parent);
+    ~Worker() override;
+    void Run() override;
+    bool IsProcessingMessages() override;
+
+   private:
+    SignalThread* parent_;
+
+    RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Worker);
+  };
+
+  class RTC_SCOPED_LOCKABLE EnterExit {
+   public:
+    explicit EnterExit(SignalThread* t) RTC_EXCLUSIVE_LOCK_FUNCTION(t->cs_)
+        : t_(t) {
+      t_->cs_.Enter();
+      // If refcount_ is zero then the object has already been deleted and we
+      // will be double-deleting it in ~EnterExit()! (shouldn't happen)
+      RTC_DCHECK_NE(0, t_->refcount_);
+      ++t_->refcount_;
+    }
+    ~EnterExit() RTC_UNLOCK_FUNCTION() {
+      bool d = (0 == --t_->refcount_);
+      t_->cs_.Leave();
+      if (d)
+        delete t_;
+    }
+
+   private:
+    SignalThread* t_;
+
+    RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EnterExit);
+  };
+
+  void Run();
+  void OnMainThreadDestroyed();
+
+  Thread* main_;
+  Worker worker_;
+  CriticalSection cs_;
+  State state_;
+  int refcount_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SignalThread);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SIGNALTHREAD_H_
diff --git a/rtc_base/signalthread_unittest.cc b/rtc_base/signalthread_unittest.cc
new file mode 100644
index 0000000..078710b
--- /dev/null
+++ b/rtc_base/signalthread_unittest.cc
@@ -0,0 +1,208 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/signalthread.h"
+#include "rtc_base/thread.h"
+
+using namespace rtc;
+
+// 10 seconds.
+static const int kTimeout = 10000;
+
+class SignalThreadTest : public testing::Test, public sigslot::has_slots<> {
+ public:
+  class SlowSignalThread : public SignalThread {
+   public:
+    SlowSignalThread(SignalThreadTest* harness) : harness_(harness) {}
+
+    ~SlowSignalThread() override {
+      EXPECT_EQ(harness_->main_thread_, Thread::Current());
+      ++harness_->thread_deleted_;
+    }
+
+    const SignalThreadTest* harness() { return harness_; }
+
+   protected:
+    void OnWorkStart() override {
+      ASSERT_TRUE(harness_ != nullptr);
+      ++harness_->thread_started_;
+      EXPECT_EQ(harness_->main_thread_, Thread::Current());
+      EXPECT_FALSE(worker()->RunningForTest());  // not started yet
+    }
+
+    void OnWorkStop() override {
+      ++harness_->thread_stopped_;
+      EXPECT_EQ(harness_->main_thread_, Thread::Current());
+      EXPECT_TRUE(worker()->RunningForTest());  // not stopped yet
+    }
+
+    void OnWorkDone() override {
+      ++harness_->thread_done_;
+      EXPECT_EQ(harness_->main_thread_, Thread::Current());
+      EXPECT_TRUE(worker()->RunningForTest());  // not stopped yet
+    }
+
+    void DoWork() override {
+      EXPECT_NE(harness_->main_thread_, Thread::Current());
+      EXPECT_EQ(worker(), Thread::Current());
+      Thread::Current()->socketserver()->Wait(250, false);
+    }
+
+   private:
+    SignalThreadTest* harness_;
+    RTC_DISALLOW_COPY_AND_ASSIGN(SlowSignalThread);
+  };
+
+  void OnWorkComplete(rtc::SignalThread* thread) {
+    SlowSignalThread* t = static_cast<SlowSignalThread*>(thread);
+    EXPECT_EQ(t->harness(), this);
+    EXPECT_EQ(main_thread_, Thread::Current());
+
+    ++thread_completed_;
+    if (!called_release_) {
+      thread->Release();
+    }
+  }
+
+  void SetUp() override {
+    main_thread_ = Thread::Current();
+    thread_ = new SlowSignalThread(this);
+    thread_->SignalWorkDone.connect(this, &SignalThreadTest::OnWorkComplete);
+    called_release_ = false;
+    thread_started_ = 0;
+    thread_done_ = 0;
+    thread_completed_ = 0;
+    thread_stopped_ = 0;
+    thread_deleted_ = 0;
+  }
+
+  void ExpectState(int started,
+                   int done,
+                   int completed,
+                   int stopped,
+                   int deleted) {
+    EXPECT_EQ(started, thread_started_);
+    EXPECT_EQ(done, thread_done_);
+    EXPECT_EQ(completed, thread_completed_);
+    EXPECT_EQ(stopped, thread_stopped_);
+    EXPECT_EQ(deleted, thread_deleted_);
+  }
+
+  void ExpectStateWait(int started,
+                       int done,
+                       int completed,
+                       int stopped,
+                       int deleted,
+                       int timeout) {
+    EXPECT_EQ_WAIT(started, thread_started_, timeout);
+    EXPECT_EQ_WAIT(done, thread_done_, timeout);
+    EXPECT_EQ_WAIT(completed, thread_completed_, timeout);
+    EXPECT_EQ_WAIT(stopped, thread_stopped_, timeout);
+    EXPECT_EQ_WAIT(deleted, thread_deleted_, timeout);
+  }
+
+  Thread* main_thread_;
+  SlowSignalThread* thread_;
+  bool called_release_;
+
+  int thread_started_;
+  int thread_done_;
+  int thread_completed_;
+  int thread_stopped_;
+  int thread_deleted_;
+};
+
+class OwnerThread : public Thread, public sigslot::has_slots<> {
+ public:
+  explicit OwnerThread(SignalThreadTest* harness)
+      : harness_(harness), has_run_(false) {}
+
+  ~OwnerThread() override { Stop(); }
+
+  void Run() override {
+    SignalThreadTest::SlowSignalThread* signal_thread =
+        new SignalThreadTest::SlowSignalThread(harness_);
+    signal_thread->SignalWorkDone.connect(this, &OwnerThread::OnWorkDone);
+    signal_thread->Start();
+    Thread::Current()->socketserver()->Wait(100, false);
+    signal_thread->Release();
+    // Delete |signal_thread|.
+    signal_thread->Destroy(true);
+    has_run_ = true;
+  }
+
+  bool has_run() { return has_run_; }
+  void OnWorkDone(SignalThread* signal_thread) {
+    FAIL() << " This shouldn't get called.";
+  }
+
+ private:
+  SignalThreadTest* harness_;
+  bool has_run_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(OwnerThread);
+};
+
+// Test for when the main thread goes away while the
+// signal thread is still working.  This may happen
+// when shutting down the process.
+TEST_F(SignalThreadTest, OwnerThreadGoesAway) {
+  // We don't use |thread_| for this test, so destroy it.
+  thread_->Destroy(true);
+
+  {
+    std::unique_ptr<OwnerThread> owner(new OwnerThread(this));
+    main_thread_ = owner.get();
+    owner->Start();
+    while (!owner->has_run()) {
+      Thread::Current()->socketserver()->Wait(10, false);
+    }
+  }
+  // At this point the main thread has gone away.
+  // Give the SignalThread a little time to do its callback,
+  // which will crash if the signal thread doesn't handle
+  // this situation well.
+  Thread::Current()->socketserver()->Wait(500, false);
+}
+
+TEST_F(SignalThreadTest, ThreadFinishes) {
+  thread_->Start();
+  ExpectState(1, 0, 0, 0, 0);
+  ExpectStateWait(1, 1, 1, 0, 1, kTimeout);
+}
+
+TEST_F(SignalThreadTest, ReleasedThreadFinishes) {
+  thread_->Start();
+  ExpectState(1, 0, 0, 0, 0);
+  thread_->Release();
+  called_release_ = true;
+  ExpectState(1, 0, 0, 0, 0);
+  ExpectStateWait(1, 1, 1, 0, 1, kTimeout);
+}
+
+TEST_F(SignalThreadTest, DestroyedThreadCleansUp) {
+  thread_->Start();
+  ExpectState(1, 0, 0, 0, 0);
+  thread_->Destroy(true);
+  ExpectState(1, 0, 0, 1, 1);
+  Thread::Current()->ProcessMessages(0);
+  ExpectState(1, 0, 0, 1, 1);
+}
+
+TEST_F(SignalThreadTest, DeferredDestroyedThreadCleansUp) {
+  thread_->Start();
+  ExpectState(1, 0, 0, 0, 0);
+  thread_->Destroy(false);
+  ExpectState(1, 0, 0, 1, 0);
+  ExpectStateWait(1, 1, 0, 1, 1, kTimeout);
+}
diff --git a/rtc_base/sigslot.cc b/rtc_base/sigslot.cc
new file mode 100644
index 0000000..9d792cb
--- /dev/null
+++ b/rtc_base/sigslot.cc
@@ -0,0 +1,22 @@
+// sigslot.h: Signal/Slot classes
+//
+// Written by Sarah Thompson (sarah@telergy.com) 2002.
+//
+// License: Public domain. You are free to use this code however you like, with
+// the proviso that the author takes on no responsibility or liability for any
+// use.
+
+#include "rtc_base/sigslot.h"
+
+namespace sigslot {
+
+#ifdef _SIGSLOT_HAS_POSIX_THREADS
+
+pthread_mutex_t* multi_threaded_global::get_mutex() {
+  static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER;
+  return &g_mutex;
+}
+
+#endif  // _SIGSLOT_HAS_POSIX_THREADS
+
+}  // namespace sigslot
diff --git a/rtc_base/sigslot.h b/rtc_base/sigslot.h
new file mode 100644
index 0000000..318aca3
--- /dev/null
+++ b/rtc_base/sigslot.h
@@ -0,0 +1,647 @@
+// sigslot.h: Signal/Slot classes
+//
+// Written by Sarah Thompson (sarah@telergy.com) 2002.
+//
+// License: Public domain. You are free to use this code however you like, with
+// the proviso that the author takes on no responsibility or liability for any
+// use.
+//
+// QUICK DOCUMENTATION
+//
+//        (see also the full documentation at http://sigslot.sourceforge.net/)
+//
+//    #define switches
+//      SIGSLOT_PURE_ISO:
+//        Define this to force ISO C++ compliance. This also disables all of
+//        the thread safety support on platforms where it is available.
+//
+//      SIGSLOT_USE_POSIX_THREADS:
+//        Force use of Posix threads when using a C++ compiler other than gcc
+//        on a platform that supports Posix threads. (When using gcc, this is
+//        the default - use SIGSLOT_PURE_ISO to disable this if necessary)
+//
+//      SIGSLOT_DEFAULT_MT_POLICY:
+//        Where thread support is enabled, this defaults to
+//        multi_threaded_global. Otherwise, the default is single_threaded.
+//        #define this yourself to override the default. In pure ISO mode,
+//        anything other than single_threaded will cause a compiler error.
+//
+//    PLATFORM NOTES
+//
+//      Win32:
+//        On Win32, the WEBRTC_WIN symbol must be #defined. Most mainstream
+//        compilers do this by default, but you may need to define it yourself
+//        if your build environment is less standard. This causes the Win32
+//        thread support to be compiled in and used automatically.
+//
+//      Unix/Linux/BSD, etc.:
+//        If you're using gcc, it is assumed that you have Posix threads
+//        available, so they are used automatically. You can override this (as
+//        under Windows) with the SIGSLOT_PURE_ISO switch. If you're using
+//        something other than gcc but still want to use Posix threads, you
+//        need to #define SIGSLOT_USE_POSIX_THREADS.
+//
+//      ISO C++:
+//        If none of the supported platforms are detected, or if
+//        SIGSLOT_PURE_ISO is defined, all multithreading support is turned
+//        off, along with any code that might cause a pure ISO C++ environment
+//        to complain. Before you ask, gcc -ansi -pedantic won't compile this
+//        library, but gcc -ansi is fine. Pedantic mode seems to throw a lot of
+//        errors that aren't really there. If you feel like investigating this,
+//        please contact the author.
+//
+//
+//    THREADING MODES
+//
+//      single_threaded:
+//        Your program is assumed to be single threaded from the point of view
+//        of signal/slot usage (i.e. all objects using signals and slots are
+//        created and destroyed from a single thread). Behaviour if objects are
+//        destroyed concurrently is undefined (i.e. you'll get the occasional
+//        segmentation fault/memory exception).
+//
+//      multi_threaded_global:
+//        Your program is assumed to be multi threaded. Objects using signals
+//        and slots can be safely created and destroyed from any thread, even
+//        when connections exist. In multi_threaded_global mode, this is
+//        achieved by a single global mutex (actually a critical section on
+//        Windows because they are faster). This option uses less OS resources,
+//        but results in more opportunities for contention, possibly resulting
+//        in more context switches than are strictly necessary.
+//
+//      multi_threaded_local:
+//        Behaviour in this mode is essentially the same as
+//        multi_threaded_global, except that each signal, and each object that
+//        inherits has_slots, all have their own mutex/critical section. In
+//        practice, this means that mutex collisions (and hence context
+//        switches) only happen if they are absolutely essential. However, on
+//        some platforms, creating a lot of mutexes can slow down the whole OS,
+//        so use this option with care.
+//
+//    USING THE LIBRARY
+//
+//      See the full documentation at http://sigslot.sourceforge.net/
+//
+// Libjingle specific:
+//
+// This file has been modified such that has_slots and signalx do not have to be
+// using the same threading requirements. E.g. it is possible to connect a
+// has_slots<single_threaded> and signal0<multi_threaded_local> or
+// has_slots<multi_threaded_local> and signal0<single_threaded>.
+// If has_slots is single threaded the user must ensure that it is not trying
+// to connect or disconnect to signalx concurrently or data race may occur.
+// If signalx is single threaded the user must ensure that disconnect, connect
+// or signal is not happening concurrently or data race may occur.
+
+#ifndef RTC_BASE_SIGSLOT_H_
+#define RTC_BASE_SIGSLOT_H_
+
+#include <stdlib.h>
+#include <cstring>
+#include <list>
+#include <set>
+
+// On our copy of sigslot.h, we set single threading as default.
+#define SIGSLOT_DEFAULT_MT_POLICY single_threaded
+
+#if defined(SIGSLOT_PURE_ISO) ||                   \
+    (!defined(WEBRTC_WIN) && !defined(__GNUG__) && \
+     !defined(SIGSLOT_USE_POSIX_THREADS))
+#define _SIGSLOT_SINGLE_THREADED
+#elif defined(WEBRTC_WIN)
+#define _SIGSLOT_HAS_WIN32_THREADS
+#if !defined(WIN32_LEAN_AND_MEAN)
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include "rtc_base/win32.h"
+#elif defined(__GNUG__) || defined(SIGSLOT_USE_POSIX_THREADS)
+#define _SIGSLOT_HAS_POSIX_THREADS
+#include <pthread.h>
+#else
+#define _SIGSLOT_SINGLE_THREADED
+#endif
+
+#ifndef SIGSLOT_DEFAULT_MT_POLICY
+#ifdef _SIGSLOT_SINGLE_THREADED
+#define SIGSLOT_DEFAULT_MT_POLICY single_threaded
+#else
+#define SIGSLOT_DEFAULT_MT_POLICY multi_threaded_local
+#endif
+#endif
+
+// TODO: change this namespace to rtc?
+namespace sigslot {
+
+class single_threaded {
+ public:
+  void lock() {}
+  void unlock() {}
+};
+
+#ifdef _SIGSLOT_HAS_WIN32_THREADS
+// The multi threading policies only get compiled in if they are enabled.
+class multi_threaded_global {
+ public:
+  multi_threaded_global() {
+    static bool isinitialised = false;
+
+    if (!isinitialised) {
+      InitializeCriticalSection(get_critsec());
+      isinitialised = true;
+    }
+  }
+
+  void lock() { EnterCriticalSection(get_critsec()); }
+
+  void unlock() { LeaveCriticalSection(get_critsec()); }
+
+ private:
+  CRITICAL_SECTION* get_critsec() {
+    static CRITICAL_SECTION g_critsec;
+    return &g_critsec;
+  }
+};
+
+class multi_threaded_local {
+ public:
+  multi_threaded_local() { InitializeCriticalSection(&m_critsec); }
+
+  multi_threaded_local(const multi_threaded_local&) {
+    InitializeCriticalSection(&m_critsec);
+  }
+
+  ~multi_threaded_local() { DeleteCriticalSection(&m_critsec); }
+
+  void lock() { EnterCriticalSection(&m_critsec); }
+
+  void unlock() { LeaveCriticalSection(&m_critsec); }
+
+ private:
+  CRITICAL_SECTION m_critsec;
+};
+#endif  // _SIGSLOT_HAS_WIN32_THREADS
+
+#ifdef _SIGSLOT_HAS_POSIX_THREADS
+// The multi threading policies only get compiled in if they are enabled.
+class multi_threaded_global {
+ public:
+  void lock() { pthread_mutex_lock(get_mutex()); }
+  void unlock() { pthread_mutex_unlock(get_mutex()); }
+
+ private:
+  static pthread_mutex_t* get_mutex();
+};
+
+class multi_threaded_local {
+ public:
+  multi_threaded_local() { pthread_mutex_init(&m_mutex, nullptr); }
+  multi_threaded_local(const multi_threaded_local&) {
+    pthread_mutex_init(&m_mutex, nullptr);
+  }
+  ~multi_threaded_local() { pthread_mutex_destroy(&m_mutex); }
+  void lock() { pthread_mutex_lock(&m_mutex); }
+  void unlock() { pthread_mutex_unlock(&m_mutex); }
+
+ private:
+  pthread_mutex_t m_mutex;
+};
+#endif  // _SIGSLOT_HAS_POSIX_THREADS
+
+template <class mt_policy>
+class lock_block {
+ public:
+  mt_policy* m_mutex;
+
+  lock_block(mt_policy* mtx) : m_mutex(mtx) { m_mutex->lock(); }
+
+  ~lock_block() { m_mutex->unlock(); }
+};
+
+class _signal_base_interface;
+
+class has_slots_interface {
+ private:
+  typedef void (*signal_connect_t)(has_slots_interface* self,
+                                   _signal_base_interface* sender);
+  typedef void (*signal_disconnect_t)(has_slots_interface* self,
+                                      _signal_base_interface* sender);
+  typedef void (*disconnect_all_t)(has_slots_interface* self);
+
+  const signal_connect_t m_signal_connect;
+  const signal_disconnect_t m_signal_disconnect;
+  const disconnect_all_t m_disconnect_all;
+
+ protected:
+  has_slots_interface(signal_connect_t conn,
+                      signal_disconnect_t disc,
+                      disconnect_all_t disc_all)
+      : m_signal_connect(conn),
+        m_signal_disconnect(disc),
+        m_disconnect_all(disc_all) {}
+
+  // Doesn't really need to be virtual, but is for backwards compatibility
+  // (it was virtual in a previous version of sigslot).
+  virtual ~has_slots_interface() {}
+
+ public:
+  void signal_connect(_signal_base_interface* sender) {
+    m_signal_connect(this, sender);
+  }
+
+  void signal_disconnect(_signal_base_interface* sender) {
+    m_signal_disconnect(this, sender);
+  }
+
+  void disconnect_all() { m_disconnect_all(this); }
+};
+
+class _signal_base_interface {
+ private:
+  typedef void (*slot_disconnect_t)(_signal_base_interface* self,
+                                    has_slots_interface* pslot);
+  typedef void (*slot_duplicate_t)(_signal_base_interface* self,
+                                   const has_slots_interface* poldslot,
+                                   has_slots_interface* pnewslot);
+
+  const slot_disconnect_t m_slot_disconnect;
+  const slot_duplicate_t m_slot_duplicate;
+
+ protected:
+  _signal_base_interface(slot_disconnect_t disc, slot_duplicate_t dupl)
+      : m_slot_disconnect(disc), m_slot_duplicate(dupl) {}
+
+  ~_signal_base_interface() {}
+
+ public:
+  void slot_disconnect(has_slots_interface* pslot) {
+    m_slot_disconnect(this, pslot);
+  }
+
+  void slot_duplicate(const has_slots_interface* poldslot,
+                      has_slots_interface* pnewslot) {
+    m_slot_duplicate(this, poldslot, pnewslot);
+  }
+};
+
+class _opaque_connection {
+ private:
+  typedef void (*emit_t)(const _opaque_connection*);
+  template <typename FromT, typename ToT>
+  union union_caster {
+    FromT from;
+    ToT to;
+  };
+
+  emit_t pemit;
+  has_slots_interface* pdest;
+  // Pointers to member functions may be up to 16 bytes for virtual classes,
+  // so make sure we have enough space to store it.
+  unsigned char pmethod[16];
+
+ public:
+  template <typename DestT, typename... Args>
+  _opaque_connection(DestT* pd, void (DestT::*pm)(Args...)) : pdest(pd) {
+    typedef void (DestT::*pm_t)(Args...);
+    static_assert(sizeof(pm_t) <= sizeof(pmethod),
+                  "Size of slot function pointer too large.");
+
+    std::memcpy(pmethod, &pm, sizeof(pm_t));
+
+    typedef void (*em_t)(const _opaque_connection* self, Args...);
+    union_caster<em_t, emit_t> caster2;
+    caster2.from = &_opaque_connection::emitter<DestT, Args...>;
+    pemit = caster2.to;
+  }
+
+  has_slots_interface* getdest() const { return pdest; }
+
+  _opaque_connection duplicate(has_slots_interface* newtarget) const {
+    _opaque_connection res = *this;
+    res.pdest = newtarget;
+    return res;
+  }
+
+  // Just calls the stored "emitter" function pointer stored at construction
+  // time.
+  template <typename... Args>
+  void emit(Args... args) const {
+    typedef void (*em_t)(const _opaque_connection*, Args...);
+    union_caster<emit_t, em_t> caster;
+    caster.from = pemit;
+    (caster.to)(this, args...);
+  }
+
+ private:
+  template <typename DestT, typename... Args>
+  static void emitter(const _opaque_connection* self, Args... args) {
+    typedef void (DestT::*pm_t)(Args...);
+    pm_t pm;
+    std::memcpy(&pm, self->pmethod, sizeof(pm_t));
+    (static_cast<DestT*>(self->pdest)->*(pm))(args...);
+  }
+};
+
+template <class mt_policy>
+class _signal_base : public _signal_base_interface, public mt_policy {
+ protected:
+  typedef std::list<_opaque_connection> connections_list;
+
+  _signal_base()
+      : _signal_base_interface(&_signal_base::do_slot_disconnect,
+                               &_signal_base::do_slot_duplicate),
+        m_current_iterator(m_connected_slots.end()) {}
+
+  ~_signal_base() { disconnect_all(); }
+
+ private:
+  _signal_base& operator=(_signal_base const& that);
+
+ public:
+  _signal_base(const _signal_base& o)
+      : _signal_base_interface(&_signal_base::do_slot_disconnect,
+                               &_signal_base::do_slot_duplicate),
+        m_current_iterator(m_connected_slots.end()) {
+    lock_block<mt_policy> lock(this);
+    for (const auto& connection : o.m_connected_slots) {
+      connection.getdest()->signal_connect(this);
+      m_connected_slots.push_back(connection);
+    }
+  }
+
+  bool is_empty() {
+    lock_block<mt_policy> lock(this);
+    return m_connected_slots.empty();
+  }
+
+  void disconnect_all() {
+    lock_block<mt_policy> lock(this);
+
+    while (!m_connected_slots.empty()) {
+      has_slots_interface* pdest = m_connected_slots.front().getdest();
+      m_connected_slots.pop_front();
+      pdest->signal_disconnect(static_cast<_signal_base_interface*>(this));
+    }
+    // If disconnect_all is called while the signal is firing, advance the
+    // current slot iterator to the end to avoid an invalidated iterator from
+    // being dereferenced.
+    m_current_iterator = m_connected_slots.end();
+  }
+
+#if !defined(NDEBUG)
+  bool connected(has_slots_interface* pclass) {
+    lock_block<mt_policy> lock(this);
+    connections_list::const_iterator it = m_connected_slots.begin();
+    connections_list::const_iterator itEnd = m_connected_slots.end();
+    while (it != itEnd) {
+      if (it->getdest() == pclass)
+        return true;
+      ++it;
+    }
+    return false;
+  }
+#endif
+
+  void disconnect(has_slots_interface* pclass) {
+    lock_block<mt_policy> lock(this);
+    connections_list::iterator it = m_connected_slots.begin();
+    connections_list::iterator itEnd = m_connected_slots.end();
+
+    while (it != itEnd) {
+      if (it->getdest() == pclass) {
+        // If we're currently using this iterator because the signal is firing,
+        // advance it to avoid it being invalidated.
+        if (m_current_iterator == it) {
+          m_current_iterator = m_connected_slots.erase(it);
+        } else {
+          m_connected_slots.erase(it);
+        }
+        pclass->signal_disconnect(static_cast<_signal_base_interface*>(this));
+        return;
+      }
+      ++it;
+    }
+  }
+
+ private:
+  static void do_slot_disconnect(_signal_base_interface* p,
+                                 has_slots_interface* pslot) {
+    _signal_base* const self = static_cast<_signal_base*>(p);
+    lock_block<mt_policy> lock(self);
+    connections_list::iterator it = self->m_connected_slots.begin();
+    connections_list::iterator itEnd = self->m_connected_slots.end();
+
+    while (it != itEnd) {
+      connections_list::iterator itNext = it;
+      ++itNext;
+
+      if (it->getdest() == pslot) {
+        // If we're currently using this iterator because the signal is firing,
+        // advance it to avoid it being invalidated.
+        if (self->m_current_iterator == it) {
+          self->m_current_iterator = self->m_connected_slots.erase(it);
+        } else {
+          self->m_connected_slots.erase(it);
+        }
+      }
+
+      it = itNext;
+    }
+  }
+
+  static void do_slot_duplicate(_signal_base_interface* p,
+                                const has_slots_interface* oldtarget,
+                                has_slots_interface* newtarget) {
+    _signal_base* const self = static_cast<_signal_base*>(p);
+    lock_block<mt_policy> lock(self);
+    connections_list::iterator it = self->m_connected_slots.begin();
+    connections_list::iterator itEnd = self->m_connected_slots.end();
+
+    while (it != itEnd) {
+      if (it->getdest() == oldtarget) {
+        self->m_connected_slots.push_back(it->duplicate(newtarget));
+      }
+
+      ++it;
+    }
+  }
+
+ protected:
+  connections_list m_connected_slots;
+
+  // Used to handle a slot being disconnected while a signal is
+  // firing (iterating m_connected_slots).
+  connections_list::iterator m_current_iterator;
+  bool m_erase_current_iterator = false;
+};
+
+template <class mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+class has_slots : public has_slots_interface, public mt_policy {
+ private:
+  typedef std::set<_signal_base_interface*> sender_set;
+  typedef sender_set::const_iterator const_iterator;
+
+ public:
+  has_slots()
+      : has_slots_interface(&has_slots::do_signal_connect,
+                            &has_slots::do_signal_disconnect,
+                            &has_slots::do_disconnect_all) {}
+
+  has_slots(has_slots const& o)
+      : has_slots_interface(&has_slots::do_signal_connect,
+                            &has_slots::do_signal_disconnect,
+                            &has_slots::do_disconnect_all) {
+    lock_block<mt_policy> lock(this);
+    for (auto* sender : o.m_senders) {
+      sender->slot_duplicate(&o, this);
+      m_senders.insert(sender);
+    }
+  }
+
+  ~has_slots() { this->disconnect_all(); }
+
+ private:
+  has_slots& operator=(has_slots const&);
+
+  static void do_signal_connect(has_slots_interface* p,
+                                _signal_base_interface* sender) {
+    has_slots* const self = static_cast<has_slots*>(p);
+    lock_block<mt_policy> lock(self);
+    self->m_senders.insert(sender);
+  }
+
+  static void do_signal_disconnect(has_slots_interface* p,
+                                   _signal_base_interface* sender) {
+    has_slots* const self = static_cast<has_slots*>(p);
+    lock_block<mt_policy> lock(self);
+    self->m_senders.erase(sender);
+  }
+
+  static void do_disconnect_all(has_slots_interface* p) {
+    has_slots* const self = static_cast<has_slots*>(p);
+    lock_block<mt_policy> lock(self);
+    while (!self->m_senders.empty()) {
+      std::set<_signal_base_interface*> senders;
+      senders.swap(self->m_senders);
+      const_iterator it = senders.begin();
+      const_iterator itEnd = senders.end();
+
+      while (it != itEnd) {
+        _signal_base_interface* s = *it;
+        ++it;
+        s->slot_disconnect(p);
+      }
+    }
+  }
+
+ private:
+  sender_set m_senders;
+};
+
+template <class mt_policy, typename... Args>
+class signal_with_thread_policy : public _signal_base<mt_policy> {
+ private:
+  typedef _signal_base<mt_policy> base;
+
+ protected:
+  typedef typename base::connections_list connections_list;
+
+ public:
+  signal_with_thread_policy() {}
+
+  template <class desttype>
+  void connect(desttype* pclass, void (desttype::*pmemfun)(Args...)) {
+    lock_block<mt_policy> lock(this);
+    this->m_connected_slots.push_back(_opaque_connection(pclass, pmemfun));
+    pclass->signal_connect(static_cast<_signal_base_interface*>(this));
+  }
+
+  void emit(Args... args) {
+    lock_block<mt_policy> lock(this);
+    this->m_current_iterator = this->m_connected_slots.begin();
+    while (this->m_current_iterator != this->m_connected_slots.end()) {
+      _opaque_connection const& conn = *this->m_current_iterator;
+      ++(this->m_current_iterator);
+      conn.emit<Args...>(args...);
+    }
+  }
+
+  void operator()(Args... args) { emit(args...); }
+};
+
+// Alias with default thread policy. Needed because both default arguments
+// and variadic template arguments must go at the end of the list, so we
+// can't have both at once.
+template <typename... Args>
+using signal = signal_with_thread_policy<SIGSLOT_DEFAULT_MT_POLICY, Args...>;
+
+// The previous verion of sigslot didn't use variadic templates, so you would
+// need to write "sigslot::signal2<Arg1, Arg2>", for example.
+// Now you can just write "sigslot::signal<Arg1, Arg2>", but these aliases
+// exist for backwards compatibility.
+template <typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal0 = signal_with_thread_policy<mt_policy>;
+
+template <typename A1, typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal1 = signal_with_thread_policy<mt_policy, A1>;
+
+template <typename A1,
+          typename A2,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal2 = signal_with_thread_policy<mt_policy, A1, A2>;
+
+template <typename A1,
+          typename A2,
+          typename A3,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal3 = signal_with_thread_policy<mt_policy, A1, A2, A3>;
+
+template <typename A1,
+          typename A2,
+          typename A3,
+          typename A4,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal4 = signal_with_thread_policy<mt_policy, A1, A2, A3, A4>;
+
+template <typename A1,
+          typename A2,
+          typename A3,
+          typename A4,
+          typename A5,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal5 = signal_with_thread_policy<mt_policy, A1, A2, A3, A4, A5>;
+
+template <typename A1,
+          typename A2,
+          typename A3,
+          typename A4,
+          typename A5,
+          typename A6,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal6 = signal_with_thread_policy<mt_policy, A1, A2, A3, A4, A5, A6>;
+
+template <typename A1,
+          typename A2,
+          typename A3,
+          typename A4,
+          typename A5,
+          typename A6,
+          typename A7,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal7 =
+    signal_with_thread_policy<mt_policy, A1, A2, A3, A4, A5, A6, A7>;
+
+template <typename A1,
+          typename A2,
+          typename A3,
+          typename A4,
+          typename A5,
+          typename A6,
+          typename A7,
+          typename A8,
+          typename mt_policy = SIGSLOT_DEFAULT_MT_POLICY>
+using signal8 =
+    signal_with_thread_policy<mt_policy, A1, A2, A3, A4, A5, A6, A7, A8>;
+
+}  // namespace sigslot
+
+#endif  // RTC_BASE_SIGSLOT_H_
diff --git a/rtc_base/sigslot_unittest.cc b/rtc_base/sigslot_unittest.cc
new file mode 100644
index 0000000..234bf45
--- /dev/null
+++ b/rtc_base/sigslot_unittest.cc
@@ -0,0 +1,390 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/sigslot.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/sigslotrepeater.h"
+
+// This function, when passed a has_slots or signalx, will break the build if
+// its threading requirement is not single threaded
+static bool TemplateIsST(const sigslot::single_threaded* p) {
+  return true;
+}
+// This function, when passed a has_slots or signalx, will break the build if
+// its threading requirement is not multi threaded
+static bool TemplateIsMT(const sigslot::multi_threaded_local* p) {
+  return true;
+}
+
+class SigslotDefault : public testing::Test, public sigslot::has_slots<> {
+ protected:
+  sigslot::signal0<> signal_;
+};
+
+template<class slot_policy = sigslot::single_threaded,
+         class signal_policy = sigslot::single_threaded>
+class SigslotReceiver : public sigslot::has_slots<slot_policy> {
+ public:
+  SigslotReceiver() : signal_(nullptr), signal_count_(0) {}
+  ~SigslotReceiver() {
+  }
+
+  // Provide copy constructor so that tests can exercise the has_slots copy
+  // constructor.
+  SigslotReceiver(const SigslotReceiver&) = default;
+
+  void Connect(sigslot::signal0<signal_policy>* signal) {
+    if (!signal) return;
+    Disconnect();
+    signal_ = signal;
+    signal->connect(this,
+                    &SigslotReceiver<slot_policy, signal_policy>::OnSignal);
+  }
+  void Disconnect() {
+    if (!signal_) return;
+    signal_->disconnect(this);
+    signal_ = nullptr;
+  }
+  void OnSignal() {
+    ++signal_count_;
+  }
+  int signal_count() { return signal_count_; }
+
+ private:
+  sigslot::signal0<signal_policy>* signal_;
+  int signal_count_;
+};
+
+template<class slot_policy = sigslot::single_threaded,
+         class mt_signal_policy = sigslot::multi_threaded_local>
+class SigslotSlotTest : public testing::Test {
+ protected:
+  SigslotSlotTest() {
+    mt_signal_policy mt_policy;
+    TemplateIsMT(&mt_policy);
+  }
+
+  virtual void SetUp() {
+    Connect();
+  }
+  virtual void TearDown() {
+    Disconnect();
+  }
+
+  void Disconnect() {
+    st_receiver_.Disconnect();
+    mt_receiver_.Disconnect();
+  }
+
+  void Connect() {
+    st_receiver_.Connect(&SignalSTLoopback);
+    mt_receiver_.Connect(&SignalMTLoopback);
+  }
+
+  int st_loop_back_count() { return st_receiver_.signal_count(); }
+  int mt_loop_back_count() { return mt_receiver_.signal_count(); }
+
+  sigslot::signal0<> SignalSTLoopback;
+  SigslotReceiver<slot_policy, sigslot::single_threaded> st_receiver_;
+  sigslot::signal0<mt_signal_policy> SignalMTLoopback;
+  SigslotReceiver<slot_policy, mt_signal_policy> mt_receiver_;
+};
+
+typedef SigslotSlotTest<> SigslotSTSlotTest;
+typedef SigslotSlotTest<sigslot::multi_threaded_local,
+                        sigslot::multi_threaded_local> SigslotMTSlotTest;
+
+class multi_threaded_local_fake : public sigslot::multi_threaded_local {
+ public:
+  multi_threaded_local_fake() : lock_count_(0), unlock_count_(0) {
+  }
+
+  void lock() { ++lock_count_; }
+  void unlock() { ++unlock_count_; }
+
+  int lock_count() { return lock_count_; }
+
+  bool InCriticalSection() { return lock_count_ != unlock_count_; }
+
+ protected:
+  int lock_count_;
+  int unlock_count_;
+};
+
+typedef SigslotSlotTest<multi_threaded_local_fake,
+                        multi_threaded_local_fake> SigslotMTLockBase;
+
+class SigslotMTLockTest : public SigslotMTLockBase {
+ protected:
+  SigslotMTLockTest() {}
+
+  void SetUp() override {
+    EXPECT_EQ(0, SlotLockCount());
+    SigslotMTLockBase::SetUp();
+    // Connects to two signals (ST and MT). However,
+    // SlotLockCount() only gets the count for the
+    // MT signal (there are two separate SigslotReceiver which
+    // keep track of their own count).
+    EXPECT_EQ(1, SlotLockCount());
+  }
+  void TearDown() override {
+    const int previous_lock_count = SlotLockCount();
+    SigslotMTLockBase::TearDown();
+    // Disconnects from two signals. Note analogous to SetUp().
+    EXPECT_EQ(previous_lock_count + 1, SlotLockCount());
+  }
+
+  int SlotLockCount() { return mt_receiver_.lock_count(); }
+  void Signal() { SignalMTLoopback(); }
+  int SignalLockCount() { return SignalMTLoopback.lock_count(); }
+  int signal_count() { return mt_loop_back_count(); }
+  bool InCriticalSection() { return SignalMTLoopback.InCriticalSection(); }
+};
+
+// This test will always succeed. However, if the default template instantiation
+// changes from single threaded to multi threaded it will break the build here.
+TEST_F(SigslotDefault, DefaultIsST) {
+  EXPECT_TRUE(TemplateIsST(this));
+  EXPECT_TRUE(TemplateIsST(&signal_));
+}
+
+// ST slot, ST signal
+TEST_F(SigslotSTSlotTest, STLoopbackTest) {
+  SignalSTLoopback();
+  EXPECT_EQ(1, st_loop_back_count());
+  EXPECT_EQ(0, mt_loop_back_count());
+}
+
+// ST slot, MT signal
+TEST_F(SigslotSTSlotTest, MTLoopbackTest) {
+  SignalMTLoopback();
+  EXPECT_EQ(1, mt_loop_back_count());
+  EXPECT_EQ(0, st_loop_back_count());
+}
+
+// ST slot, both ST and MT (separate) signal
+TEST_F(SigslotSTSlotTest, AllLoopbackTest) {
+  SignalSTLoopback();
+  SignalMTLoopback();
+  EXPECT_EQ(1, mt_loop_back_count());
+  EXPECT_EQ(1, st_loop_back_count());
+}
+
+TEST_F(SigslotSTSlotTest, Reconnect) {
+  SignalSTLoopback();
+  SignalMTLoopback();
+  EXPECT_EQ(1, mt_loop_back_count());
+  EXPECT_EQ(1, st_loop_back_count());
+  Disconnect();
+  SignalSTLoopback();
+  SignalMTLoopback();
+  EXPECT_EQ(1, mt_loop_back_count());
+  EXPECT_EQ(1, st_loop_back_count());
+  Connect();
+  SignalSTLoopback();
+  SignalMTLoopback();
+  EXPECT_EQ(2, mt_loop_back_count());
+  EXPECT_EQ(2, st_loop_back_count());
+}
+
+// MT slot, ST signal
+TEST_F(SigslotMTSlotTest, STLoopbackTest) {
+  SignalSTLoopback();
+  EXPECT_EQ(1, st_loop_back_count());
+  EXPECT_EQ(0, mt_loop_back_count());
+}
+
+// MT slot, MT signal
+TEST_F(SigslotMTSlotTest, MTLoopbackTest) {
+  SignalMTLoopback();
+  EXPECT_EQ(1, mt_loop_back_count());
+  EXPECT_EQ(0, st_loop_back_count());
+}
+
+// MT slot, both ST and MT (separate) signal
+TEST_F(SigslotMTSlotTest, AllLoopbackTest) {
+  SignalMTLoopback();
+  SignalSTLoopback();
+  EXPECT_EQ(1, st_loop_back_count());
+  EXPECT_EQ(1, mt_loop_back_count());
+}
+
+// Test that locks are acquired and released correctly.
+TEST_F(SigslotMTLockTest, LockSanity) {
+  const int lock_count = SignalLockCount();
+  Signal();
+  EXPECT_FALSE(InCriticalSection());
+  EXPECT_EQ(lock_count + 1, SignalLockCount());
+  EXPECT_EQ(1, signal_count());
+}
+
+// Destroy signal and slot in different orders.
+TEST(SigslotDestructionOrder, SignalFirst) {
+  sigslot::signal0<>* signal = new sigslot::signal0<>;
+  SigslotReceiver<>* receiver = new SigslotReceiver<>();
+  receiver->Connect(signal);
+  (*signal)();
+  EXPECT_EQ(1, receiver->signal_count());
+  delete signal;
+  delete receiver;
+}
+
+TEST(SigslotDestructionOrder, SlotFirst) {
+  sigslot::signal0<>* signal = new sigslot::signal0<>;
+  SigslotReceiver<>* receiver = new SigslotReceiver<>();
+  receiver->Connect(signal);
+  (*signal)();
+  EXPECT_EQ(1, receiver->signal_count());
+
+  delete receiver;
+  (*signal)();
+  delete signal;
+}
+
+// Test that if a signal is copied, its slot connections are copied as well.
+TEST(SigslotTest, CopyConnectedSignal) {
+  sigslot::signal<> signal;
+  SigslotReceiver<> receiver;
+  receiver.Connect(&signal);
+
+  // Fire the copied signal, expecting the receiver to be notified.
+  sigslot::signal<> copied_signal(signal);
+  copied_signal();
+  EXPECT_EQ(1, receiver.signal_count());
+}
+
+// Test that if a slot is copied, its signal connections are copied as well.
+TEST(SigslotTest, CopyConnectedSlot) {
+  sigslot::signal<> signal;
+  SigslotReceiver<> receiver;
+  receiver.Connect(&signal);
+
+  // Fire the signal after copying the receiver, expecting the copied receiver
+  // to be notified.
+  SigslotReceiver<> copied_receiver(receiver);
+  signal();
+  EXPECT_EQ(1, copied_receiver.signal_count());
+}
+
+// Just used for the test below.
+class Disconnector : public sigslot::has_slots<> {
+ public:
+  Disconnector(SigslotReceiver<>* receiver1, SigslotReceiver<>* receiver2)
+      : receiver1_(receiver1), receiver2_(receiver2) {}
+
+  void Connect(sigslot::signal<>* signal) {
+    signal_ = signal;
+    signal->connect(this, &Disconnector::Disconnect);
+  }
+
+ private:
+  void Disconnect() {
+    receiver1_->Disconnect();
+    receiver2_->Disconnect();
+    signal_->disconnect(this);
+  }
+
+  sigslot::signal<>* signal_;
+  SigslotReceiver<>* receiver1_;
+  SigslotReceiver<>* receiver2_;
+};
+
+// Test that things work as expected if a signal is disconnected from a slot
+// while it's firing.
+TEST(SigslotTest, DisconnectFromSignalWhileFiring) {
+  sigslot::signal<> signal;
+  SigslotReceiver<> receiver1;
+  SigslotReceiver<> receiver2;
+  SigslotReceiver<> receiver3;
+  Disconnector disconnector(&receiver1, &receiver2);
+
+  // From this ordering, receiver1 should receive the signal, then the
+  // disconnector will be invoked, causing receiver2 to be disconnected before
+  // it receives the signal. And receiver3 should also receive the signal,
+  // since it was never disconnected.
+  receiver1.Connect(&signal);
+  disconnector.Connect(&signal);
+  receiver2.Connect(&signal);
+  receiver3.Connect(&signal);
+  signal();
+
+  EXPECT_EQ(1, receiver1.signal_count());
+  EXPECT_EQ(0, receiver2.signal_count());
+  EXPECT_EQ(1, receiver3.signal_count());
+}
+
+// Uses disconnect_all instead of disconnect.
+class Disconnector2 : public sigslot::has_slots<> {
+ public:
+  void Connect(sigslot::signal<>* signal) {
+    signal_ = signal;
+    signal->connect(this, &Disconnector2::Disconnect);
+  }
+
+ private:
+  void Disconnect() {
+    signal_->disconnect_all();
+  }
+
+  sigslot::signal<>* signal_;
+};
+
+// Test that things work as expected if a signal is disconnected from a slot
+// while it's firing using disconnect_all.
+TEST(SigslotTest, CallDisconnectAllWhileSignalFiring) {
+  sigslot::signal<> signal;
+  SigslotReceiver<> receiver1;
+  SigslotReceiver<> receiver2;
+  Disconnector2 disconnector;
+
+  // From this ordering, receiver1 should receive the signal, then the
+  // disconnector will be invoked, causing receiver2 to be disconnected before
+  // it receives the signal.
+  receiver1.Connect(&signal);
+  disconnector.Connect(&signal);
+  receiver2.Connect(&signal);
+  signal();
+
+  EXPECT_EQ(1, receiver1.signal_count());
+  EXPECT_EQ(0, receiver2.signal_count());
+}
+
+// Basic test that a sigslot repeater works.
+TEST(SigslotRepeaterTest, RepeatsSignalsAfterRepeatCalled) {
+  sigslot::signal<> signal;
+  sigslot::repeater<> repeater;
+  repeater.repeat(signal);
+  // Note that receiver is connected to the repeater, not directly to the
+  // source signal.
+  SigslotReceiver<> receiver;
+  receiver.Connect(&repeater);
+  // The repeater should repeat the signal, causing the receiver to see it.
+  signal();
+  EXPECT_EQ(1, receiver.signal_count());
+  // Repeat another signal for good measure.
+  signal();
+  EXPECT_EQ(2, receiver.signal_count());
+}
+
+// After calling "stop", a repeater should stop repeating signals.
+TEST(SigslotRepeaterTest, StopsRepeatingSignalsAfterStopCalled) {
+  // Same setup as above test.
+  sigslot::signal<> signal;
+  sigslot::repeater<> repeater;
+  repeater.repeat(signal);
+  SigslotReceiver<> receiver;
+  receiver.Connect(&repeater);
+  signal();
+  ASSERT_EQ(1, receiver.signal_count());
+  // Now call stop. The next signal should NOT propagate to the receiver.
+  repeater.stop(signal);
+  signal();
+  EXPECT_EQ(1, receiver.signal_count());
+}
diff --git a/rtc_base/sigslotrepeater.h b/rtc_base/sigslotrepeater.h
new file mode 100644
index 0000000..ca44854
--- /dev/null
+++ b/rtc_base/sigslotrepeater.h
@@ -0,0 +1,56 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SIGSLOTREPEATER_H__
+#define RTC_BASE_SIGSLOTREPEATER_H__
+
+// repeaters are both signals and slots, which are designed as intermediate
+// pass-throughs for signals and slots which don't know about each other (for
+// modularity or encapsulation).  This eliminates the need to declare a signal
+// handler whose sole purpose is to fire another signal.  The repeater connects
+// to the originating signal using the 'repeat' method.  When the repeated
+// signal fires, the repeater will also fire.
+//
+// TODO(deadbeef): Actually use this, after we decide on some style points on
+// using signals, so it doesn't get deleted again.
+
+#include "rtc_base/sigslot.h"
+
+namespace sigslot {
+
+template <class mt_policy, typename... Args>
+class repeater_with_thread_policy
+    : public signal_with_thread_policy<mt_policy, Args...>,
+      public has_slots<mt_policy> {
+ private:
+  // These typedefs are just to make the code below more readable. Code using
+  // repeaters shouldn't need to reference these types directly.
+  typedef signal_with_thread_policy<mt_policy, Args...> base_type;
+  typedef repeater_with_thread_policy<mt_policy, Args...> this_type;
+
+ public:
+  repeater_with_thread_policy() {}
+  repeater_with_thread_policy(const this_type& s) : base_type(s) {}
+
+  void reemit(Args... args) { base_type::emit(args...); }
+  void repeat(base_type& s) { s.connect(this, &this_type::reemit); }
+  void stop(base_type& s) { s.disconnect(this); }
+};
+
+// Alias with default thread policy. Needed because both default arguments
+// and variadic template arguments must go at the end of the list, so we
+// can't have both at once.
+template <typename... Args>
+using repeater =
+    repeater_with_thread_policy<SIGSLOT_DEFAULT_MT_POLICY, Args...>;
+
+}  // namespace sigslot
+
+#endif  // RTC_BASE_SIGSLOTREPEATER_H__
diff --git a/rtc_base/sigslottester.h b/rtc_base/sigslottester.h
new file mode 100755
index 0000000..04c6302
--- /dev/null
+++ b/rtc_base/sigslottester.h
@@ -0,0 +1,216 @@
+// This file was GENERATED by command:
+//     pump.py sigslottester.h.pump
+// DO NOT EDIT BY HAND!!!
+
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SIGSLOTTESTER_H_
+#define RTC_BASE_SIGSLOTTESTER_H_
+
+// To generate sigslottester.h from sigslottester.h.pump, execute:
+// /home/build/google3/third_party/gtest/scripts/pump.py sigslottester.h.pump
+
+
+// SigslotTester(s) are utility classes to check if signals owned by an
+// object are being invoked at the right time and with the right arguments.
+// They are meant to be used in tests. Tests must provide "capture" pointers
+// (i.e. address of variables) where the arguments from the signal callback
+// can be stored.
+//
+// Example:
+//   /* Some signal */
+//   sigslot::signal1<const std::string&> foo;
+//
+//   /* We want to monitor foo in some test. Note how signal argument is
+//      const std::string&, but capture-type is std::string. Capture type
+//      must be type that can be assigned to. */
+//   std::string capture;
+//   SigslotTester1<const std::string&, std::string> slot(&foo, &capture);
+//   foo.emit("hello");
+//   EXPECT_EQ(1, slot.callback_count());
+//   EXPECT_EQ("hello", capture);
+//   /* See unit-tests for more examples */
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/sigslot.h"
+
+namespace rtc {
+
+// Base version for testing signals that passes no arguments.
+class SigslotTester0 : public sigslot::has_slots<> {
+ public:
+  explicit SigslotTester0(sigslot::signal0<>* signal) : callback_count_(0) {
+    signal->connect(this, &SigslotTester0::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback() { callback_count_++; }
+  int callback_count_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester0);
+};
+
+// Versions below are for testing signals that pass arguments. For all the
+// templates below:
+// - A1-A5 is the type of the argument i in the callback. Signals may and often
+//   do use const-references here for efficiency.
+// - C1-C5 is the type of the variable to capture argument i. These should be
+//   non-const value types suitable for use as lvalues.
+
+template <class A1, class C1>
+class SigslotTester1 : public sigslot::has_slots<> {
+ public:
+  SigslotTester1(sigslot::signal1<A1>* signal,
+                C1* capture1)
+      : callback_count_(0),
+      capture1_(capture1) {
+    signal->connect(this, &SigslotTester1::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback(A1 arg1) {
+    callback_count_++;
+    *capture1_ = arg1;
+  }
+
+  int callback_count_;
+  C1* capture1_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester1);
+};
+
+template <class A1, class A2, class C1, class C2>
+class SigslotTester2 : public sigslot::has_slots<> {
+ public:
+  SigslotTester2(sigslot::signal2<A1, A2>* signal,
+                C1* capture1, C2* capture2)
+      : callback_count_(0),
+      capture1_(capture1), capture2_(capture2) {
+    signal->connect(this, &SigslotTester2::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback(A1 arg1, A2 arg2) {
+    callback_count_++;
+    *capture1_ = arg1;
+    *capture2_ = arg2;
+  }
+
+  int callback_count_;
+  C1* capture1_;
+  C2* capture2_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester2);
+};
+
+template <class A1, class A2, class A3, class C1, class C2, class C3>
+class SigslotTester3 : public sigslot::has_slots<> {
+ public:
+  SigslotTester3(sigslot::signal3<A1, A2, A3>* signal,
+                C1* capture1, C2* capture2, C3* capture3)
+      : callback_count_(0),
+      capture1_(capture1), capture2_(capture2), capture3_(capture3) {
+    signal->connect(this, &SigslotTester3::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback(A1 arg1, A2 arg2, A3 arg3) {
+    callback_count_++;
+    *capture1_ = arg1;
+    *capture2_ = arg2;
+    *capture3_ = arg3;
+  }
+
+  int callback_count_;
+  C1* capture1_;
+  C2* capture2_;
+  C3* capture3_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester3);
+};
+
+template <class A1, class A2, class A3, class A4, class C1, class C2, class C3,
+    class C4>
+class SigslotTester4 : public sigslot::has_slots<> {
+ public:
+  SigslotTester4(sigslot::signal4<A1, A2, A3, A4>* signal,
+                C1* capture1, C2* capture2, C3* capture3, C4* capture4)
+      : callback_count_(0),
+      capture1_(capture1), capture2_(capture2), capture3_(capture3),
+          capture4_(capture4) {
+    signal->connect(this, &SigslotTester4::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback(A1 arg1, A2 arg2, A3 arg3, A4 arg4) {
+    callback_count_++;
+    *capture1_ = arg1;
+    *capture2_ = arg2;
+    *capture3_ = arg3;
+    *capture4_ = arg4;
+  }
+
+  int callback_count_;
+  C1* capture1_;
+  C2* capture2_;
+  C3* capture3_;
+  C4* capture4_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester4);
+};
+
+template <class A1, class A2, class A3, class A4, class A5, class C1, class C2,
+    class C3, class C4, class C5>
+class SigslotTester5 : public sigslot::has_slots<> {
+ public:
+  SigslotTester5(sigslot::signal5<A1, A2, A3, A4, A5>* signal,
+                C1* capture1, C2* capture2, C3* capture3, C4* capture4,
+                    C5* capture5)
+      : callback_count_(0),
+      capture1_(capture1), capture2_(capture2), capture3_(capture3),
+          capture4_(capture4), capture5_(capture5) {
+    signal->connect(this, &SigslotTester5::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback(A1 arg1, A2 arg2, A3 arg3, A4 arg4, A5 arg5) {
+    callback_count_++;
+    *capture1_ = arg1;
+    *capture2_ = arg2;
+    *capture3_ = arg3;
+    *capture4_ = arg4;
+    *capture5_ = arg5;
+  }
+
+  int callback_count_;
+  C1* capture1_;
+  C2* capture2_;
+  C3* capture3_;
+  C4* capture4_;
+  C5* capture5_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester5);
+};
+}  // namespace rtc
+
+#endif  // RTC_BASE_SIGSLOTTESTER_H_
diff --git a/rtc_base/sigslottester.h.pump b/rtc_base/sigslottester.h.pump
new file mode 100755
index 0000000..1029a0f
--- /dev/null
+++ b/rtc_base/sigslottester.h.pump
@@ -0,0 +1,102 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SIGSLOTTESTER_H_
+#define RTC_BASE_SIGSLOTTESTER_H_
+
+// To generate sigslottester.h from sigslottester.h.pump, execute:
+// /home/build/google3/third_party/gtest/scripts/pump.py sigslottester.h.pump
+
+
+// SigslotTester(s) are utility classes to check if signals owned by an
+// object are being invoked at the right time and with the right arguments.
+// They are meant to be used in tests. Tests must provide "capture" pointers
+// (i.e. address of variables) where the arguments from the signal callback
+// can be stored.
+//
+// Example:
+//   /* Some signal */
+//   sigslot::signal1<const std::string&> foo;
+//
+//   /* We want to monitor foo in some test. Note how signal argument is
+//      const std::string&, but capture-type is std::string. Capture type
+//      must be type that can be assigned to. */
+//   std::string capture;
+//   SigslotTester1<const std::string&, std::string> slot(&foo, &capture);
+//   foo.emit("hello");
+//   EXPECT_EQ(1, slot.callback_count());
+//   EXPECT_EQ("hello", capture);
+//   /* See unit-tests for more examples */
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/sigslot.h"
+
+namespace rtc {
+
+// Base version for testing signals that passes no arguments.
+class SigslotTester0 : public sigslot::has_slots<> {
+ public:
+  explicit SigslotTester0(sigslot::signal0<>* signal) : callback_count_(0) {
+    signal->connect(this, &SigslotTester0::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback() { callback_count_++; }
+  int callback_count_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester0);
+};
+
+// Versions below are for testing signals that pass arguments. For all the
+// templates below:
+// - A1-A5 is the type of the argument i in the callback. Signals may and often
+//   do use const-references here for efficiency.
+// - C1-C5 is the type of the variable to capture argument i. These should be
+//   non-const value types suitable for use as lvalues.
+
+$var n = 5
+$range i 1..n
+$for i [[
+$range j 1..i
+
+template <$for j , [[class A$j]], $for j , [[class C$j]]>
+class SigslotTester$i : public sigslot::has_slots<> {
+ public:
+  SigslotTester$i(sigslot::signal$i<$for j , [[A$j]]>* signal,
+                $for j , [[C$j* capture$j]])
+      : callback_count_(0),
+      $for j , [[capture$j[[]]_(capture$j)]] {
+    signal->connect(this, &SigslotTester$i::OnSignalCallback);
+  }
+
+  int callback_count() const { return callback_count_; }
+
+ private:
+  void OnSignalCallback($for j , [[A$j arg$j]]) {
+    callback_count_++;$for j [[
+
+    *capture$j[[]]_ = arg$j;]]
+
+  }
+
+  int callback_count_;$for j [[
+
+  C$j* capture$j[[]]_;]]
+
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SigslotTester$i);
+};
+
+]]
+}  // namespace rtc
+
+#endif  // RTC_BASE_SIGSLOTTESTER_H_
diff --git a/rtc_base/sigslottester_unittest.cc b/rtc_base/sigslottester_unittest.cc
new file mode 100755
index 0000000..c8e87e5
--- /dev/null
+++ b/rtc_base/sigslottester_unittest.cc
@@ -0,0 +1,86 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/sigslottester.h"
+
+#include <string>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/sigslot.h"
+
+namespace rtc {
+
+TEST(SigslotTester, TestSignal1Arg) {
+  sigslot::signal1<int> source1;
+  int capture1;
+  SigslotTester1<int, int> slot1(&source1, &capture1);
+  EXPECT_EQ(0, slot1.callback_count());
+
+  source1.emit(10);
+  EXPECT_EQ(1, slot1.callback_count());
+  EXPECT_EQ(10, capture1);
+
+  source1.emit(20);
+  EXPECT_EQ(2, slot1.callback_count());
+  EXPECT_EQ(20, capture1);
+}
+
+TEST(SigslotTester, TestSignal2Args) {
+  sigslot::signal2<int, char> source2;
+  int capture1;
+  char capture2;
+  SigslotTester2<int, char, int, char> slot2(&source2, &capture1, &capture2);
+  EXPECT_EQ(0, slot2.callback_count());
+
+  source2.emit(10, 'x');
+  EXPECT_EQ(1, slot2.callback_count());
+  EXPECT_EQ(10, capture1);
+  EXPECT_EQ('x', capture2);
+
+  source2.emit(20, 'y');
+  EXPECT_EQ(2, slot2.callback_count());
+  EXPECT_EQ(20, capture1);
+  EXPECT_EQ('y', capture2);
+}
+
+// Since it applies for 1 and 2 args, we assume it will work for up to 5 args.
+
+TEST(SigslotTester, TestSignalWithConstReferenceArgs) {
+  sigslot::signal1<const std::string&> source1;
+  std::string capture1;
+  SigslotTester1<const std::string&, std::string> slot1(&source1, &capture1);
+  EXPECT_EQ(0, slot1.callback_count());
+  source1.emit("hello");
+  EXPECT_EQ(1, slot1.callback_count());
+  EXPECT_EQ("hello", capture1);
+}
+
+TEST(SigslotTester, TestSignalWithPointerToConstArgs) {
+  sigslot::signal1<const std::string*> source1;
+  const std::string* capture1;
+  SigslotTester1<const std::string*, const std::string*> slot1(&source1,
+                                                               &capture1);
+  EXPECT_EQ(0, slot1.callback_count());
+  source1.emit(nullptr);
+  EXPECT_EQ(1, slot1.callback_count());
+  EXPECT_EQ(nullptr, capture1);
+}
+
+TEST(SigslotTester, TestSignalWithConstPointerArgs) {
+  sigslot::signal1<std::string* const> source1;
+  std::string* capture1;
+  SigslotTester1<std::string* const, std::string*> slot1(&source1, &capture1);
+  EXPECT_EQ(0, slot1.callback_count());
+  source1.emit(nullptr);
+  EXPECT_EQ(1, slot1.callback_count());
+  EXPECT_EQ(nullptr, capture1);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/socket.h b/rtc_base/socket.h
new file mode 100644
index 0000000..ca1a302
--- /dev/null
+++ b/rtc_base/socket.h
@@ -0,0 +1,196 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKET_H_
+#define RTC_BASE_SOCKET_H_
+
+#include <errno.h>
+
+#if defined(WEBRTC_POSIX)
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#define SOCKET_EACCES EACCES
+#endif
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#endif
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/socketaddress.h"
+
+// Rather than converting errors into a private namespace,
+// Reuse the POSIX socket api errors. Note this depends on
+// Win32 compatibility.
+
+#if defined(WEBRTC_WIN)
+#undef EWOULDBLOCK  // Remove errno.h's definition for each macro below.
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#undef EINPROGRESS
+#define EINPROGRESS WSAEINPROGRESS
+#undef EALREADY
+#define EALREADY WSAEALREADY
+#undef ENOTSOCK
+#define ENOTSOCK WSAENOTSOCK
+#undef EDESTADDRREQ
+#define EDESTADDRREQ WSAEDESTADDRREQ
+#undef EMSGSIZE
+#define EMSGSIZE WSAEMSGSIZE
+#undef EPROTOTYPE
+#define EPROTOTYPE WSAEPROTOTYPE
+#undef ENOPROTOOPT
+#define ENOPROTOOPT WSAENOPROTOOPT
+#undef EPROTONOSUPPORT
+#define EPROTONOSUPPORT WSAEPROTONOSUPPORT
+#undef ESOCKTNOSUPPORT
+#define ESOCKTNOSUPPORT WSAESOCKTNOSUPPORT
+#undef EOPNOTSUPP
+#define EOPNOTSUPP WSAEOPNOTSUPP
+#undef EPFNOSUPPORT
+#define EPFNOSUPPORT WSAEPFNOSUPPORT
+#undef EAFNOSUPPORT
+#define EAFNOSUPPORT WSAEAFNOSUPPORT
+#undef EADDRINUSE
+#define EADDRINUSE WSAEADDRINUSE
+#undef EADDRNOTAVAIL
+#define EADDRNOTAVAIL WSAEADDRNOTAVAIL
+#undef ENETDOWN
+#define ENETDOWN WSAENETDOWN
+#undef ENETUNREACH
+#define ENETUNREACH WSAENETUNREACH
+#undef ENETRESET
+#define ENETRESET WSAENETRESET
+#undef ECONNABORTED
+#define ECONNABORTED WSAECONNABORTED
+#undef ECONNRESET
+#define ECONNRESET WSAECONNRESET
+#undef ENOBUFS
+#define ENOBUFS WSAENOBUFS
+#undef EISCONN
+#define EISCONN WSAEISCONN
+#undef ENOTCONN
+#define ENOTCONN WSAENOTCONN
+#undef ESHUTDOWN
+#define ESHUTDOWN WSAESHUTDOWN
+#undef ETOOMANYREFS
+#define ETOOMANYREFS WSAETOOMANYREFS
+#undef ETIMEDOUT
+#define ETIMEDOUT WSAETIMEDOUT
+#undef ECONNREFUSED
+#define ECONNREFUSED WSAECONNREFUSED
+#undef ELOOP
+#define ELOOP WSAELOOP
+#undef ENAMETOOLONG
+#define ENAMETOOLONG WSAENAMETOOLONG
+#undef EHOSTDOWN
+#define EHOSTDOWN WSAEHOSTDOWN
+#undef EHOSTUNREACH
+#define EHOSTUNREACH WSAEHOSTUNREACH
+#undef ENOTEMPTY
+#define ENOTEMPTY WSAENOTEMPTY
+#undef EPROCLIM
+#define EPROCLIM WSAEPROCLIM
+#undef EUSERS
+#define EUSERS WSAEUSERS
+#undef EDQUOT
+#define EDQUOT WSAEDQUOT
+#undef ESTALE
+#define ESTALE WSAESTALE
+#undef EREMOTE
+#define EREMOTE WSAEREMOTE
+#define SOCKET_EACCES WSAEACCES
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_POSIX)
+#define INVALID_SOCKET (-1)
+#define SOCKET_ERROR (-1)
+#define closesocket(s) close(s)
+#endif  // WEBRTC_POSIX
+
+namespace rtc {
+
+inline bool IsBlockingError(int e) {
+  return (e == EWOULDBLOCK) || (e == EAGAIN) || (e == EINPROGRESS);
+}
+
+struct SentPacket {
+  SentPacket() : packet_id(-1), send_time_ms(-1) {}
+  SentPacket(int packet_id, int64_t send_time_ms)
+      : packet_id(packet_id), send_time_ms(send_time_ms) {}
+
+  int packet_id;
+  int64_t send_time_ms;
+};
+
+// General interface for the socket implementations of various networks.  The
+// methods match those of normal UNIX sockets very closely.
+class Socket {
+ public:
+  virtual ~Socket() {}
+
+  // Returns the address to which the socket is bound.  If the socket is not
+  // bound, then the any-address is returned.
+  virtual SocketAddress GetLocalAddress() const = 0;
+
+  // Returns the address to which the socket is connected.  If the socket is
+  // not connected, then the any-address is returned.
+  virtual SocketAddress GetRemoteAddress() const = 0;
+
+  virtual int Bind(const SocketAddress& addr) = 0;
+  virtual int Connect(const SocketAddress& addr) = 0;
+  virtual int Send(const void *pv, size_t cb) = 0;
+  virtual int SendTo(const void *pv, size_t cb, const SocketAddress& addr) = 0;
+  // |timestamp| is in units of microseconds.
+  virtual int Recv(void* pv, size_t cb, int64_t* timestamp) = 0;
+  virtual int RecvFrom(void* pv,
+                       size_t cb,
+                       SocketAddress* paddr,
+                       int64_t* timestamp) = 0;
+  virtual int Listen(int backlog) = 0;
+  virtual Socket *Accept(SocketAddress *paddr) = 0;
+  virtual int Close() = 0;
+  virtual int GetError() const = 0;
+  virtual void SetError(int error) = 0;
+  inline bool IsBlocking() const { return IsBlockingError(GetError()); }
+
+  enum ConnState {
+    CS_CLOSED,
+    CS_CONNECTING,
+    CS_CONNECTED
+  };
+  virtual ConnState GetState() const = 0;
+
+  enum Option {
+    OPT_DONTFRAGMENT,
+    OPT_RCVBUF,      // receive buffer size
+    OPT_SNDBUF,      // send buffer size
+    OPT_NODELAY,     // whether Nagle algorithm is enabled
+    OPT_IPV6_V6ONLY, // Whether the socket is IPv6 only.
+    OPT_DSCP,        // DSCP code
+    OPT_RTP_SENDTIME_EXTN_ID,  // This is a non-traditional socket option param.
+                               // This is specific to libjingle and will be used
+                               // if SendTime option is needed at socket level.
+  };
+  virtual int GetOption(Option opt, int* value) = 0;
+  virtual int SetOption(Option opt, int value) = 0;
+
+ protected:
+  Socket() {}
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(Socket);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SOCKET_H_
diff --git a/rtc_base/socket_unittest.cc b/rtc_base/socket_unittest.cc
new file mode 100644
index 0000000..a31cc02
--- /dev/null
+++ b/rtc_base/socket_unittest.cc
@@ -0,0 +1,1047 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/socket_unittest.h"
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/asyncudpsocket.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/socketserver.h"
+#include "rtc_base/testclient.h"
+#include "rtc_base/testutils.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+using webrtc::testing::SSE_CLOSE;
+using webrtc::testing::SSE_ERROR;
+using webrtc::testing::SSE_OPEN;
+using webrtc::testing::SSE_READ;
+using webrtc::testing::SSE_WRITE;
+using webrtc::testing::StreamSink;
+
+#define MAYBE_SKIP_IPV6                        \
+  if (!HasIPv6Enabled()) {                     \
+    RTC_LOG(LS_INFO) << "No IPv6... skipping"; \
+    return;                                    \
+  }
+
+// Data size to be used in TcpInternal tests.
+static const size_t kTcpInternalDataSize = 1024 * 1024;  // bytes
+
+void SocketTest::SetUp() {
+  ss_ = Thread::Current()->socketserver();
+}
+
+void SocketTest::TestConnectIPv4() {
+  ConnectInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestConnectIPv6() {
+  MAYBE_SKIP_IPV6;
+  ConnectInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestConnectWithDnsLookupIPv4() {
+  ConnectWithDnsLookupInternal(kIPv4Loopback, "localhost");
+}
+
+void SocketTest::TestConnectWithDnsLookupIPv6() {
+  // TODO: Enable this when DNS resolution supports IPv6.
+  RTC_LOG(LS_INFO) << "Skipping IPv6 DNS test";
+  // ConnectWithDnsLookupInternal(kIPv6Loopback, "localhost6");
+}
+
+void SocketTest::TestConnectFailIPv4() {
+  ConnectFailInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestConnectFailIPv6() {
+  MAYBE_SKIP_IPV6;
+  ConnectFailInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestConnectWithDnsLookupFailIPv4() {
+  ConnectWithDnsLookupFailInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestConnectWithDnsLookupFailIPv6() {
+  MAYBE_SKIP_IPV6;
+  ConnectWithDnsLookupFailInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestConnectWithClosedSocketIPv4() {
+  ConnectWithClosedSocketInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestConnectWithClosedSocketIPv6() {
+  MAYBE_SKIP_IPV6;
+  ConnectWithClosedSocketInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestConnectWhileNotClosedIPv4() {
+  ConnectWhileNotClosedInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestConnectWhileNotClosedIPv6() {
+  MAYBE_SKIP_IPV6;
+  ConnectWhileNotClosedInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestServerCloseDuringConnectIPv4() {
+  ServerCloseDuringConnectInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestServerCloseDuringConnectIPv6() {
+  MAYBE_SKIP_IPV6;
+  ServerCloseDuringConnectInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestClientCloseDuringConnectIPv4() {
+  ClientCloseDuringConnectInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestClientCloseDuringConnectIPv6() {
+  MAYBE_SKIP_IPV6;
+  ClientCloseDuringConnectInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestServerCloseIPv4() {
+  ServerCloseInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestServerCloseIPv6() {
+  MAYBE_SKIP_IPV6;
+  ServerCloseInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestCloseInClosedCallbackIPv4() {
+  CloseInClosedCallbackInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestCloseInClosedCallbackIPv6() {
+  MAYBE_SKIP_IPV6;
+  CloseInClosedCallbackInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestSocketServerWaitIPv4() {
+  SocketServerWaitInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestSocketServerWaitIPv6() {
+  MAYBE_SKIP_IPV6;
+  SocketServerWaitInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestTcpIPv4() {
+  TcpInternal(kIPv4Loopback, kTcpInternalDataSize, -1);
+}
+
+void SocketTest::TestTcpIPv6() {
+  MAYBE_SKIP_IPV6;
+  TcpInternal(kIPv6Loopback, kTcpInternalDataSize, -1);
+}
+
+void SocketTest::TestSingleFlowControlCallbackIPv4() {
+  SingleFlowControlCallbackInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestSingleFlowControlCallbackIPv6() {
+  MAYBE_SKIP_IPV6;
+  SingleFlowControlCallbackInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestUdpIPv4() {
+  UdpInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestUdpIPv6() {
+  MAYBE_SKIP_IPV6;
+  UdpInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestUdpReadyToSendIPv4() {
+#if !defined(WEBRTC_MAC)
+  // TODO(ronghuawu): Enable this test on mac/ios.
+  UdpReadyToSend(kIPv4Loopback);
+#endif
+}
+
+void SocketTest::TestUdpReadyToSendIPv6() {
+#if defined(WEBRTC_WIN)
+  // TODO(ronghuawu): Enable this test (currently flakey) on mac and linux.
+  MAYBE_SKIP_IPV6;
+  UdpReadyToSend(kIPv6Loopback);
+#endif
+}
+
+void SocketTest::TestGetSetOptionsIPv4() {
+  GetSetOptionsInternal(kIPv4Loopback);
+}
+
+void SocketTest::TestGetSetOptionsIPv6() {
+  MAYBE_SKIP_IPV6;
+  GetSetOptionsInternal(kIPv6Loopback);
+}
+
+void SocketTest::TestSocketRecvTimestampIPv4() {
+  SocketRecvTimestamp(kIPv4Loopback);
+}
+
+void SocketTest::TestSocketRecvTimestampIPv6() {
+  MAYBE_SKIP_IPV6;
+  SocketRecvTimestamp(kIPv6Loopback);
+}
+
+// For unbound sockets, GetLocalAddress / GetRemoteAddress return AF_UNSPEC
+// values on Windows, but an empty address of the same family on Linux/MacOS X.
+bool IsUnspecOrEmptyIP(const IPAddress& address) {
+#if !defined(WEBRTC_WIN)
+  return IPIsAny(address);
+#else
+  return address.family() == AF_UNSPEC;
+#endif
+}
+
+void SocketTest::ConnectInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, client->GetState());
+  EXPECT_PRED1(IsUnspecOrEmptyIP, client->GetLocalAddress().ipaddr());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, server->GetState());
+
+  // Ensure no pending server connections, since we haven't done anything yet.
+  EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+  EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+  EXPECT_TRUE(accept_addr.IsNil());
+
+  // Attempt connect to listening socket.
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+  EXPECT_FALSE(client->GetLocalAddress().IsNil());
+  EXPECT_NE(server->GetLocalAddress(), client->GetLocalAddress());
+
+  // Client is connecting, outcome not yet determined.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, client->GetState());
+  EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+
+  // Server has pending connection, accept it.
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  EXPECT_FALSE(accept_addr.IsNil());
+  EXPECT_EQ(accepted->GetRemoteAddress(), accept_addr);
+
+  // Connected from server perspective, check the addresses are correct.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, accepted->GetState());
+  EXPECT_EQ(server->GetLocalAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(client->GetLocalAddress(), accepted->GetRemoteAddress());
+
+  // Connected from client perspective, check the addresses are correct.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+}
+
+void SocketTest::ConnectWithDnsLookupInternal(const IPAddress& loopback,
+                                              const std::string& host) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connect to listening socket.
+  SocketAddress dns_addr(server->GetLocalAddress());
+  dns_addr.SetIP(host);
+  EXPECT_EQ(0, client->Connect(dns_addr));
+  // TODO: Bind when doing DNS lookup.
+  //EXPECT_NE(kEmptyAddr, client->GetLocalAddress());  // Implicit Bind
+
+  // Client is connecting, outcome not yet determined.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, client->GetState());
+  EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+
+  // Server has pending connection, accept it.
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  EXPECT_FALSE(accept_addr.IsNil());
+  EXPECT_EQ(accepted->GetRemoteAddress(), accept_addr);
+
+  // Connected from server perspective, check the addresses are correct.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, accepted->GetState());
+  EXPECT_EQ(server->GetLocalAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(client->GetLocalAddress(), accepted->GetRemoteAddress());
+
+  // Connected from client perspective, check the addresses are correct.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+}
+
+void SocketTest::ConnectFailInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server, but don't listen yet.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+
+  // Attempt connect to a non-existent socket.
+  // We don't connect to the server socket created above, since on
+  // MacOS it takes about 75 seconds to get back an error!
+  SocketAddress bogus_addr(loopback, 65535);
+  EXPECT_EQ(0, client->Connect(bogus_addr));
+
+  // Wait for connection to fail (ECONNREFUSED).
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, client->GetState(), kTimeout);
+  EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_TRUE(sink.Check(client.get(), SSE_ERROR));
+  EXPECT_TRUE(client->GetRemoteAddress().IsNil());
+
+  // Should be no pending server connections.
+  EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+  EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+  EXPECT_EQ(IPAddress(), accept_addr.ipaddr());
+}
+
+void SocketTest::ConnectWithDnsLookupFailInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server, but don't listen yet.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+
+  // Attempt connect to a non-existent host.
+  // We don't connect to the server socket created above, since on
+  // MacOS it takes about 75 seconds to get back an error!
+  SocketAddress bogus_dns_addr("not-a-real-hostname", 65535);
+  EXPECT_EQ(0, client->Connect(bogus_dns_addr));
+
+  // Wait for connection to fail (EHOSTNOTFOUND).
+  bool dns_lookup_finished = false;
+  WAIT_(client->GetState() == AsyncSocket::CS_CLOSED, kTimeout,
+        dns_lookup_finished);
+  if (!dns_lookup_finished) {
+    RTC_LOG(LS_WARNING) << "Skipping test; DNS resolution took longer than 5 "
+                        << "seconds.";
+    return;
+  }
+
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, client->GetState(), kTimeout);
+  EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_TRUE(sink.Check(client.get(), SSE_ERROR));
+  EXPECT_TRUE(client->GetRemoteAddress().IsNil());
+  // Should be no pending server connections.
+  EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+  EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+  EXPECT_TRUE(accept_addr.IsNil());
+}
+
+void SocketTest::ConnectWithClosedSocketInternal(const IPAddress& loopback) {
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Create a client and put in to CS_CLOSED state.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  EXPECT_EQ(0, client->Close());
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, client->GetState());
+
+  // Connect() should reinitialize the socket, and put it in to CS_CONNECTING.
+  EXPECT_EQ(0, client->Connect(SocketAddress(server->GetLocalAddress())));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, client->GetState());
+}
+
+void SocketTest::ConnectWhileNotClosedInternal(const IPAddress& loopback) {
+  // Create server and listen.
+  StreamSink sink;
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+  // Create client, connect.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  EXPECT_EQ(0, client->Connect(SocketAddress(server->GetLocalAddress())));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTING, client->GetState());
+  // Try to connect again. Should fail, but not interfere with original attempt.
+  EXPECT_EQ(SOCKET_ERROR,
+            client->Connect(SocketAddress(server->GetLocalAddress())));
+
+  // Accept the original connection.
+  SocketAddress accept_addr;
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  EXPECT_FALSE(accept_addr.IsNil());
+
+  // Check the states and addresses.
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, accepted->GetState());
+  EXPECT_EQ(server->GetLocalAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(client->GetLocalAddress(), accepted->GetRemoteAddress());
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+
+  // Try to connect again, to an unresolved hostname.
+  // Shouldn't break anything.
+  EXPECT_EQ(SOCKET_ERROR,
+            client->Connect(SocketAddress("localhost",
+                                          server->GetLocalAddress().port())));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, accepted->GetState());
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, client->GetState());
+  EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+}
+
+void SocketTest::ServerCloseDuringConnectInternal(const IPAddress& loopback) {
+  StreamSink sink;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connect to listening socket.
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+  // Close down the server while the socket is in the accept queue.
+  EXPECT_TRUE_WAIT(sink.Check(server.get(), SSE_READ), kTimeout);
+  server->Close();
+
+  // This should fail the connection for the client. Clean up.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_ERROR));
+  client->Close();
+}
+
+void SocketTest::ClientCloseDuringConnectInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connect to listening socket.
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+  // Close down the client while the socket is in the accept queue.
+  EXPECT_TRUE_WAIT(sink.Check(server.get(), SSE_READ), kTimeout);
+  client->Close();
+
+  // The connection should still be able to be accepted.
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  sink.Monitor(accepted.get());
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, accepted->GetState());
+
+  // The accepted socket should then close (possibly with err, timing-related)
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, accepted->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(accepted.get(), SSE_CLOSE) ||
+              sink.Check(accepted.get(), SSE_ERROR));
+
+  // The client should not get a close event.
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+}
+
+void SocketTest::ServerCloseInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connection.
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+  // Accept connection.
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  sink.Monitor(accepted.get());
+
+  // Both sides are now connected.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(accepted->GetRemoteAddress(), client->GetLocalAddress());
+
+  // Send data to the client, and then close the connection.
+  EXPECT_EQ(1, accepted->Send("a", 1));
+  accepted->Close();
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, accepted->GetState());
+
+  // Expect that the client is notified, and has not yet closed.
+  EXPECT_TRUE_WAIT(sink.Check(client.get(), SSE_READ), kTimeout);
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, client->GetState());
+
+  // Ensure the data can be read.
+  char buffer[10];
+  EXPECT_EQ(1, client->Recv(buffer, sizeof(buffer), nullptr));
+  EXPECT_EQ('a', buffer[0]);
+
+  // Now we should close, but the remote address will remain.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_FALSE(client->GetRemoteAddress().IsAnyIP());
+
+  // The closer should not get a close signal.
+  EXPECT_FALSE(sink.Check(accepted.get(), SSE_CLOSE));
+  EXPECT_TRUE(accepted->GetRemoteAddress().IsNil());
+
+  // And the closee should only get a single signal.
+  Thread::Current()->ProcessMessages(0);
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+
+  // Close down the client and ensure all is good.
+  client->Close();
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_TRUE(client->GetRemoteAddress().IsNil());
+}
+
+class SocketCloser : public sigslot::has_slots<> {
+ public:
+  void OnClose(AsyncSocket* socket, int error) {
+    socket->Close();  // Deleting here would blow up the vector of handlers
+                      // for the socket's signal.
+  }
+};
+
+void SocketTest::CloseInClosedCallbackInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketCloser closer;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+  client->SignalCloseEvent.connect(&closer, &SocketCloser::OnClose);
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connection.
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+  // Accept connection.
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  sink.Monitor(accepted.get());
+
+  // Both sides are now connected.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(accepted->GetRemoteAddress(), client->GetLocalAddress());
+
+  // Send data to the client, and then close the connection.
+  accepted->Close();
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, accepted->GetState());
+
+  // Expect that the client is notified, and has not yet closed.
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, client->GetState());
+
+  // Now we should be closed and invalidated
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_TRUE(Socket::CS_CLOSED == client->GetState());
+}
+
+class Sleeper : public MessageHandler {
+ public:
+  void OnMessage(Message* msg) override { Thread::Current()->SleepMs(500); }
+};
+
+void SocketTest::SocketServerWaitInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create & connect server and client sockets.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  sink.Monitor(accepted.get());
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, accepted->GetState());
+  EXPECT_EQ(server->GetLocalAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(client->GetLocalAddress(), accepted->GetRemoteAddress());
+
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+
+  // Do an i/o operation, triggering an eventual callback.
+  EXPECT_FALSE(sink.Check(accepted.get(), SSE_READ));
+  char buf[1024] = {0};
+
+  EXPECT_EQ(1024, client->Send(buf, 1024));
+  EXPECT_FALSE(sink.Check(accepted.get(), SSE_READ));
+
+  // Shouldn't signal when blocked in a thread Send, where process_io is false.
+  std::unique_ptr<Thread> thread(Thread::CreateWithSocketServer());
+  thread->Start();
+  Sleeper sleeper;
+  TypedMessageData<AsyncSocket*> data(client.get());
+  thread->Send(RTC_FROM_HERE, &sleeper, 0, &data);
+  EXPECT_FALSE(sink.Check(accepted.get(), SSE_READ));
+
+  // But should signal when process_io is true.
+  EXPECT_TRUE_WAIT((sink.Check(accepted.get(), SSE_READ)), kTimeout);
+  EXPECT_LT(0, accepted->Recv(buf, 1024, nullptr));
+}
+
+void SocketTest::TcpInternal(const IPAddress& loopback, size_t data_size,
+    ptrdiff_t max_send_size) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create receiving client.
+  std::unique_ptr<AsyncSocket> receiver(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(receiver.get());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connection.
+  EXPECT_EQ(0, receiver->Connect(server->GetLocalAddress()));
+
+  // Accept connection which will be used for sending.
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> sender(server->Accept(&accept_addr));
+  ASSERT_TRUE(sender);
+  sink.Monitor(sender.get());
+
+  // Both sides are now connected.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, receiver->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(receiver.get(), SSE_OPEN));
+  EXPECT_EQ(receiver->GetRemoteAddress(), sender->GetLocalAddress());
+  EXPECT_EQ(sender->GetRemoteAddress(), receiver->GetLocalAddress());
+
+  // Create test data.
+  rtc::Buffer send_buffer(0, data_size);
+  rtc::Buffer recv_buffer(0, data_size);
+  for (size_t i = 0; i < data_size; ++i) {
+    char ch = static_cast<char>(i % 256);
+    send_buffer.AppendData(&ch, sizeof(ch));
+  }
+  rtc::Buffer recved_data(0, data_size);
+
+  // Send and receive a bunch of data.
+  size_t sent_size = 0;
+  bool writable = true;
+  bool send_called = false;
+  bool readable = false;
+  bool recv_called = false;
+  while (recv_buffer.size() < send_buffer.size()) {
+    // Send as much as we can while we're cleared to send.
+    while (writable && sent_size < send_buffer.size()) {
+      int unsent_size = static_cast<int>(send_buffer.size() - sent_size);
+      int sent = sender->Send(send_buffer.data() + sent_size, unsent_size);
+      if (!send_called) {
+        // The first Send() after connecting or getting writability should
+        // succeed and send some data.
+        EXPECT_GT(sent, 0);
+        send_called = true;
+      }
+      if (sent >= 0) {
+        EXPECT_LE(sent, unsent_size);
+        sent_size += sent;
+        if (max_send_size >= 0) {
+          EXPECT_LE(static_cast<ptrdiff_t>(sent), max_send_size);
+          if (sent < unsent_size) {
+            // If max_send_size is limiting the amount to send per call such
+            // that the sent amount is less than the unsent amount, we simulate
+            // that the socket is no longer writable.
+            writable = false;
+          }
+        }
+      } else {
+        ASSERT_TRUE(sender->IsBlocking());
+        writable = false;
+      }
+    }
+
+    // Read all the sent data.
+    while (recv_buffer.size() < sent_size) {
+      if (!readable) {
+        // Wait until data is available.
+        EXPECT_TRUE_WAIT(sink.Check(receiver.get(), SSE_READ), kTimeout);
+        readable = true;
+        recv_called = false;
+      }
+
+      // Receive as much as we can get in a single recv call.
+      int recved_size = receiver->Recv(recved_data.data(), data_size, nullptr);
+
+      if (!recv_called) {
+        // The first Recv() after getting readability should succeed and receive
+        // some data.
+        // TODO: The following line is disabled due to flakey pulse
+        //     builds.  Re-enable if/when possible.
+        // EXPECT_GT(recved_size, 0);
+        recv_called = true;
+      }
+      if (recved_size >= 0) {
+        EXPECT_LE(static_cast<size_t>(recved_size),
+            sent_size - recv_buffer.size());
+        recv_buffer.AppendData(recved_data.data(), recved_size);
+      } else {
+        ASSERT_TRUE(receiver->IsBlocking());
+        readable = false;
+      }
+    }
+
+    // Once all that we've sent has been received, expect to be able to send
+    // again.
+    if (!writable) {
+      ASSERT_TRUE_WAIT(sink.Check(sender.get(), SSE_WRITE), kTimeout);
+      writable = true;
+      send_called = false;
+    }
+  }
+
+  // The received data matches the sent data.
+  EXPECT_EQ(data_size, sent_size);
+  EXPECT_EQ(data_size, recv_buffer.size());
+  EXPECT_EQ(recv_buffer, send_buffer);
+
+  // Close down.
+  sender->Close();
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CLOSED, receiver->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(receiver.get(), SSE_CLOSE));
+  receiver->Close();
+}
+
+void SocketTest::SingleFlowControlCallbackInternal(const IPAddress& loopback) {
+  StreamSink sink;
+  SocketAddress accept_addr;
+
+  // Create client.
+  std::unique_ptr<AsyncSocket> client(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(client.get());
+
+  // Create server and listen.
+  std::unique_ptr<AsyncSocket> server(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_STREAM));
+  sink.Monitor(server.get());
+  EXPECT_EQ(0, server->Bind(SocketAddress(loopback, 0)));
+  EXPECT_EQ(0, server->Listen(5));
+
+  // Attempt connection.
+  EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+  // Accept connection.
+  EXPECT_TRUE_WAIT((sink.Check(server.get(), SSE_READ)), kTimeout);
+  std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+  ASSERT_TRUE(accepted);
+  sink.Monitor(accepted.get());
+
+  // Both sides are now connected.
+  EXPECT_EQ_WAIT(AsyncSocket::CS_CONNECTED, client->GetState(), kTimeout);
+  EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+  EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+  EXPECT_EQ(accepted->GetRemoteAddress(), client->GetLocalAddress());
+
+  // Expect a writable callback from the connect.
+  EXPECT_TRUE_WAIT(sink.Check(accepted.get(), SSE_WRITE), kTimeout);
+
+  // Fill the socket buffer.
+  char buf[1024 * 16] = {0};
+  int sends = 0;
+  while (++sends && accepted->Send(&buf, arraysize(buf)) != -1) {}
+  EXPECT_TRUE(accepted->IsBlocking());
+
+  // Wait until data is available.
+  EXPECT_TRUE_WAIT(sink.Check(client.get(), SSE_READ), kTimeout);
+
+  // Pull data.
+  for (int i = 0; i < sends; ++i) {
+    client->Recv(buf, arraysize(buf), nullptr);
+  }
+
+  // Expect at least one additional writable callback.
+  EXPECT_TRUE_WAIT(sink.Check(accepted.get(), SSE_WRITE), kTimeout);
+
+  // Adding data in response to the writeable callback shouldn't cause infinite
+  // callbacks.
+  int extras = 0;
+  for (int i = 0; i < 100; ++i) {
+    accepted->Send(&buf, arraysize(buf));
+    rtc::Thread::Current()->ProcessMessages(1);
+    if (sink.Check(accepted.get(), SSE_WRITE)) {
+      extras++;
+    }
+  }
+  EXPECT_LT(extras, 2);
+
+  // Close down.
+  accepted->Close();
+  client->Close();
+}
+
+void SocketTest::UdpInternal(const IPAddress& loopback) {
+  SocketAddress empty = EmptySocketAddressWithFamily(loopback.family());
+  // Test basic bind and connect behavior.
+  AsyncSocket* socket =
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_DGRAM);
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, socket->GetState());
+  EXPECT_EQ(0, socket->Bind(SocketAddress(loopback, 0)));
+  SocketAddress addr1 = socket->GetLocalAddress();
+  EXPECT_EQ(0, socket->Connect(addr1));
+  EXPECT_EQ(AsyncSocket::CS_CONNECTED, socket->GetState());
+  socket->Close();
+  EXPECT_EQ(AsyncSocket::CS_CLOSED, socket->GetState());
+  delete socket;
+
+  // Test send/receive behavior.
+  std::unique_ptr<TestClient> client1(
+      new TestClient(WrapUnique(AsyncUDPSocket::Create(ss_, addr1))));
+  std::unique_ptr<TestClient> client2(
+      new TestClient(WrapUnique(AsyncUDPSocket::Create(ss_, empty))));
+
+  SocketAddress addr2;
+  EXPECT_EQ(3, client2->SendTo("foo", 3, addr1));
+  EXPECT_TRUE(client1->CheckNextPacket("foo", 3, &addr2));
+
+  SocketAddress addr3;
+  EXPECT_EQ(6, client1->SendTo("bizbaz", 6, addr2));
+  EXPECT_TRUE(client2->CheckNextPacket("bizbaz", 6, &addr3));
+  EXPECT_EQ(addr3, addr1);
+  // TODO: figure out what the intent is here
+  for (int i = 0; i < 10; ++i) {
+    client2.reset(
+        new TestClient(WrapUnique(AsyncUDPSocket::Create(ss_, empty))));
+
+    SocketAddress addr4;
+    EXPECT_EQ(3, client2->SendTo("foo", 3, addr1));
+    EXPECT_TRUE(client1->CheckNextPacket("foo", 3, &addr4));
+    EXPECT_EQ(addr4.ipaddr(), addr2.ipaddr());
+
+    SocketAddress addr5;
+    EXPECT_EQ(6, client1->SendTo("bizbaz", 6, addr4));
+    EXPECT_TRUE(client2->CheckNextPacket("bizbaz", 6, &addr5));
+    EXPECT_EQ(addr5, addr1);
+
+    addr2 = addr4;
+  }
+}
+
+void SocketTest::UdpReadyToSend(const IPAddress& loopback) {
+  SocketAddress empty = EmptySocketAddressWithFamily(loopback.family());
+  // RFC 5737 - The blocks 192.0.2.0/24 (TEST-NET-1) ... are provided for use in
+  // documentation.
+  // RFC 3849 - 2001:DB8::/32 as a documentation-only prefix.
+  std::string dest = (loopback.family() == AF_INET6) ?
+      "2001:db8::1" : "192.0.2.0";
+  SocketAddress test_addr(dest, 2345);
+
+  // Test send
+  std::unique_ptr<TestClient> client(
+      new TestClient(WrapUnique(AsyncUDPSocket::Create(ss_, empty))));
+  int test_packet_size = 1200;
+  std::unique_ptr<char[]> test_packet(new char[test_packet_size]);
+  // Init the test packet just to avoid memcheck warning.
+  memset(test_packet.get(), 0, test_packet_size);
+  // Set the send buffer size to the same size as the test packet to have a
+  // better chance to get EWOULDBLOCK.
+  int send_buffer_size = test_packet_size;
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+  send_buffer_size /= 2;
+#endif
+  client->SetOption(rtc::Socket::OPT_SNDBUF, send_buffer_size);
+
+  int error = 0;
+  uint32_t start_ms = Time();
+  int sent_packet_num = 0;
+  int expected_error = EWOULDBLOCK;
+  while (start_ms + kTimeout > Time()) {
+    int ret = client->SendTo(test_packet.get(), test_packet_size, test_addr);
+    ++sent_packet_num;
+    if (ret != test_packet_size) {
+      error = client->GetError();
+      if (error == expected_error) {
+        RTC_LOG(LS_INFO) << "Got expected error code after sending "
+                         << sent_packet_num << " packets.";
+        break;
+      }
+    }
+  }
+  EXPECT_EQ(expected_error, error);
+  EXPECT_FALSE(client->ready_to_send());
+  EXPECT_TRUE_WAIT(client->ready_to_send(), kTimeout);
+  RTC_LOG(LS_INFO) << "Got SignalReadyToSend";
+}
+
+void SocketTest::GetSetOptionsInternal(const IPAddress& loopback) {
+  std::unique_ptr<AsyncSocket> socket(
+      ss_->CreateAsyncSocket(loopback.family(), SOCK_DGRAM));
+  socket->Bind(SocketAddress(loopback, 0));
+
+  // Check SNDBUF/RCVBUF.
+  const int desired_size = 12345;
+#if defined(WEBRTC_LINUX)
+  // Yes, really.  It's in the kernel source.
+  const int expected_size = desired_size * 2;
+#else   // !WEBRTC_LINUX
+  const int expected_size = desired_size;
+#endif  // !WEBRTC_LINUX
+  int recv_size = 0;
+  int send_size = 0;
+  // get the initial sizes
+  ASSERT_NE(-1, socket->GetOption(Socket::OPT_RCVBUF, &recv_size));
+  ASSERT_NE(-1, socket->GetOption(Socket::OPT_SNDBUF, &send_size));
+  // set our desired sizes
+  ASSERT_NE(-1, socket->SetOption(Socket::OPT_RCVBUF, desired_size));
+  ASSERT_NE(-1, socket->SetOption(Socket::OPT_SNDBUF, desired_size));
+  // get the sizes again
+  ASSERT_NE(-1, socket->GetOption(Socket::OPT_RCVBUF, &recv_size));
+  ASSERT_NE(-1, socket->GetOption(Socket::OPT_SNDBUF, &send_size));
+  // make sure they are right
+  ASSERT_EQ(expected_size, recv_size);
+  ASSERT_EQ(expected_size, send_size);
+
+  // Check that we can't set NODELAY on a UDP socket.
+  int current_nd, desired_nd = 1;
+  ASSERT_EQ(-1, socket->GetOption(Socket::OPT_NODELAY, &current_nd));
+  ASSERT_EQ(-1, socket->SetOption(Socket::OPT_NODELAY, desired_nd));
+}
+
+void SocketTest::SocketRecvTimestamp(const IPAddress& loopback) {
+  std::unique_ptr<Socket> socket(
+      ss_->CreateSocket(loopback.family(), SOCK_DGRAM));
+  EXPECT_EQ(0, socket->Bind(SocketAddress(loopback, 0)));
+  SocketAddress address = socket->GetLocalAddress();
+
+  int64_t send_time_1 = TimeMicros();
+  socket->SendTo("foo", 3, address);
+  int64_t recv_timestamp_1;
+  char buffer[3];
+  socket->RecvFrom(buffer, 3, nullptr, &recv_timestamp_1);
+  EXPECT_GT(recv_timestamp_1, -1);
+
+  const int64_t kTimeBetweenPacketsMs = 100;
+  Thread::SleepMs(kTimeBetweenPacketsMs);
+
+  int64_t send_time_2 = TimeMicros();
+  socket->SendTo("bar", 3, address);
+  int64_t recv_timestamp_2;
+  socket->RecvFrom(buffer, 3, nullptr, &recv_timestamp_2);
+
+  int64_t system_time_diff = send_time_2 - send_time_1;
+  int64_t recv_timestamp_diff = recv_timestamp_2 - recv_timestamp_1;
+  // Compare against the system time at the point of sending, because
+  // SleepMs may not sleep for exactly the requested time.
+  EXPECT_NEAR(system_time_diff, recv_timestamp_diff, 10000);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/socket_unittest.h b/rtc_base/socket_unittest.h
new file mode 100644
index 0000000..55df219
--- /dev/null
+++ b/rtc_base/socket_unittest.h
@@ -0,0 +1,100 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKET_UNITTEST_H_
+#define RTC_BASE_SOCKET_UNITTEST_H_
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+// Generic socket tests, to be used when testing individual socketservers.
+// Derive your specific test class from SocketTest, install your
+// socketserver, and call the SocketTest test methods.
+class SocketTest : public testing::Test {
+ protected:
+  SocketTest() : kIPv4Loopback(INADDR_LOOPBACK),
+                 kIPv6Loopback(in6addr_loopback),
+                 ss_(nullptr) {}
+  void SetUp() override;
+  void TestConnectIPv4();
+  void TestConnectIPv6();
+  void TestConnectWithDnsLookupIPv4();
+  void TestConnectWithDnsLookupIPv6();
+  void TestConnectFailIPv4();
+  void TestConnectFailIPv6();
+  void TestConnectWithDnsLookupFailIPv4();
+  void TestConnectWithDnsLookupFailIPv6();
+  void TestConnectWithClosedSocketIPv4();
+  void TestConnectWithClosedSocketIPv6();
+  void TestConnectWhileNotClosedIPv4();
+  void TestConnectWhileNotClosedIPv6();
+  void TestServerCloseDuringConnectIPv4();
+  void TestServerCloseDuringConnectIPv6();
+  void TestClientCloseDuringConnectIPv4();
+  void TestClientCloseDuringConnectIPv6();
+  void TestServerCloseIPv4();
+  void TestServerCloseIPv6();
+  void TestCloseInClosedCallbackIPv4();
+  void TestCloseInClosedCallbackIPv6();
+  void TestSocketServerWaitIPv4();
+  void TestSocketServerWaitIPv6();
+  void TestTcpIPv4();
+  void TestTcpIPv6();
+  void TestSingleFlowControlCallbackIPv4();
+  void TestSingleFlowControlCallbackIPv6();
+  void TestUdpIPv4();
+  void TestUdpIPv6();
+  void TestUdpReadyToSendIPv4();
+  void TestUdpReadyToSendIPv6();
+  void TestGetSetOptionsIPv4();
+  void TestGetSetOptionsIPv6();
+  void TestSocketRecvTimestampIPv4();
+  void TestSocketRecvTimestampIPv6();
+
+  static const int kTimeout = 5000;  // ms
+  const IPAddress kIPv4Loopback;
+  const IPAddress kIPv6Loopback;
+
+ protected:
+  void TcpInternal(const IPAddress& loopback, size_t data_size,
+      ptrdiff_t max_send_size);
+
+ private:
+  void ConnectInternal(const IPAddress& loopback);
+  void ConnectWithDnsLookupInternal(const IPAddress& loopback,
+                                    const std::string& host);
+  void ConnectFailInternal(const IPAddress& loopback);
+
+  void ConnectWithDnsLookupFailInternal(const IPAddress& loopback);
+  void ConnectWithClosedSocketInternal(const IPAddress& loopback);
+  void ConnectWhileNotClosedInternal(const IPAddress& loopback);
+  void ServerCloseDuringConnectInternal(const IPAddress& loopback);
+  void ClientCloseDuringConnectInternal(const IPAddress& loopback);
+  void ServerCloseInternal(const IPAddress& loopback);
+  void CloseInClosedCallbackInternal(const IPAddress& loopback);
+  void SocketServerWaitInternal(const IPAddress& loopback);
+  void SingleFlowControlCallbackInternal(const IPAddress& loopback);
+  void UdpInternal(const IPAddress& loopback);
+  void UdpReadyToSend(const IPAddress& loopback);
+  void GetSetOptionsInternal(const IPAddress& loopback);
+  void SocketRecvTimestamp(const IPAddress& loopback);
+
+  SocketServer* ss_;
+};
+
+// For unbound sockets, GetLocalAddress / GetRemoteAddress return AF_UNSPEC
+// values on Windows, but an empty address of the same family on Linux/MacOS X.
+bool IsUnspecOrEmptyIP(const IPAddress& address);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SOCKET_UNITTEST_H_
diff --git a/rtc_base/socketadapters.cc b/rtc_base/socketadapters.cc
new file mode 100644
index 0000000..a300495
--- /dev/null
+++ b/rtc_base/socketadapters.cc
@@ -0,0 +1,848 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(_MSC_VER) && _MSC_VER < 1300
+#pragma warning(disable:4786)
+#endif
+
+#include <time.h>
+#include <errno.h>
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#define SECURITY_WIN32
+#include <security.h>
+#endif
+
+#include <algorithm>
+
+#include "rtc_base/bytebuffer.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/httpcommon.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/socketadapters.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/zero_memory.h"
+
+namespace rtc {
+
+BufferedReadAdapter::BufferedReadAdapter(AsyncSocket* socket, size_t size)
+    : AsyncSocketAdapter(socket), buffer_size_(size),
+      data_len_(0), buffering_(false) {
+  buffer_ = new char[buffer_size_];
+}
+
+BufferedReadAdapter::~BufferedReadAdapter() {
+  delete [] buffer_;
+}
+
+int BufferedReadAdapter::Send(const void *pv, size_t cb) {
+  if (buffering_) {
+    // TODO: Spoof error better; Signal Writeable
+    socket_->SetError(EWOULDBLOCK);
+    return -1;
+  }
+  return AsyncSocketAdapter::Send(pv, cb);
+}
+
+int BufferedReadAdapter::Recv(void* pv, size_t cb, int64_t* timestamp) {
+  if (buffering_) {
+    socket_->SetError(EWOULDBLOCK);
+    return -1;
+  }
+
+  size_t read = 0;
+
+  if (data_len_) {
+    read = std::min(cb, data_len_);
+    memcpy(pv, buffer_, read);
+    data_len_ -= read;
+    if (data_len_ > 0) {
+      memmove(buffer_, buffer_ + read, data_len_);
+    }
+    pv = static_cast<char *>(pv) + read;
+    cb -= read;
+  }
+
+  // FIX: If cb == 0, we won't generate another read event
+
+  int res = AsyncSocketAdapter::Recv(pv, cb, timestamp);
+  if (res >= 0) {
+    // Read from socket and possibly buffer; return combined length
+    return res + static_cast<int>(read);
+  }
+
+  if (read > 0) {
+    // Failed to read from socket, but still read something from buffer
+    return static_cast<int>(read);
+  }
+
+  // Didn't read anything; return error from socket
+  return res;
+}
+
+void BufferedReadAdapter::BufferInput(bool on) {
+  buffering_ = on;
+}
+
+void BufferedReadAdapter::OnReadEvent(AsyncSocket * socket) {
+  RTC_DCHECK(socket == socket_);
+
+  if (!buffering_) {
+    AsyncSocketAdapter::OnReadEvent(socket);
+    return;
+  }
+
+  if (data_len_ >= buffer_size_) {
+    RTC_LOG(LS_ERROR) << "Input buffer overflow";
+    RTC_NOTREACHED();
+    data_len_ = 0;
+  }
+
+  int len =
+      socket_->Recv(buffer_ + data_len_, buffer_size_ - data_len_, nullptr);
+  if (len < 0) {
+    // TODO: Do something better like forwarding the error to the user.
+    RTC_LOG_ERR(INFO) << "Recv";
+    return;
+  }
+
+  data_len_ += len;
+
+  ProcessInput(buffer_, &data_len_);
+}
+
+AsyncProxyServerSocket::AsyncProxyServerSocket(AsyncSocket* socket,
+                                               size_t buffer_size)
+    : BufferedReadAdapter(socket, buffer_size) {
+}
+
+AsyncProxyServerSocket::~AsyncProxyServerSocket() = default;
+
+///////////////////////////////////////////////////////////////////////////////
+
+// This is a SSL v2 CLIENT_HELLO message.
+// TODO: Should this have a session id? The response doesn't have a
+// certificate, so the hello should have a session id.
+static const uint8_t kSslClientHello[] = {
+    0x80, 0x46,                                            // msg len
+    0x01,                                                  // CLIENT_HELLO
+    0x03, 0x01,                                            // SSL 3.1
+    0x00, 0x2d,                                            // ciphersuite len
+    0x00, 0x00,                                            // session id len
+    0x00, 0x10,                                            // challenge len
+    0x01, 0x00, 0x80, 0x03, 0x00, 0x80, 0x07, 0x00, 0xc0,  // ciphersuites
+    0x06, 0x00, 0x40, 0x02, 0x00, 0x80, 0x04, 0x00, 0x80,  //
+    0x00, 0x00, 0x04, 0x00, 0xfe, 0xff, 0x00, 0x00, 0x0a,  //
+    0x00, 0xfe, 0xfe, 0x00, 0x00, 0x09, 0x00, 0x00, 0x64,  //
+    0x00, 0x00, 0x62, 0x00, 0x00, 0x03, 0x00, 0x00, 0x06,  //
+    0x1f, 0x17, 0x0c, 0xa6, 0x2f, 0x00, 0x78, 0xfc,        // challenge
+    0x46, 0x55, 0x2e, 0xb1, 0x83, 0x39, 0xf1, 0xea         //
+};
+
+// This is a TLSv1 SERVER_HELLO message.
+static const uint8_t kSslServerHello[] = {
+    0x16,                                            // handshake message
+    0x03, 0x01,                                      // SSL 3.1
+    0x00, 0x4a,                                      // message len
+    0x02,                                            // SERVER_HELLO
+    0x00, 0x00, 0x46,                                // handshake len
+    0x03, 0x01,                                      // SSL 3.1
+    0x42, 0x85, 0x45, 0xa7, 0x27, 0xa9, 0x5d, 0xa0,  // server random
+    0xb3, 0xc5, 0xe7, 0x53, 0xda, 0x48, 0x2b, 0x3f,  //
+    0xc6, 0x5a, 0xca, 0x89, 0xc1, 0x58, 0x52, 0xa1,  //
+    0x78, 0x3c, 0x5b, 0x17, 0x46, 0x00, 0x85, 0x3f,  //
+    0x20,                                            // session id len
+    0x0e, 0xd3, 0x06, 0x72, 0x5b, 0x5b, 0x1b, 0x5f,  // session id
+    0x15, 0xac, 0x13, 0xf9, 0x88, 0x53, 0x9d, 0x9b,  //
+    0xe8, 0x3d, 0x7b, 0x0c, 0x30, 0x32, 0x6e, 0x38,  //
+    0x4d, 0xa2, 0x75, 0x57, 0x41, 0x6c, 0x34, 0x5c,  //
+    0x00, 0x04,                                      // RSA/RC4-128/MD5
+    0x00                                             // null compression
+};
+
+AsyncSSLSocket::AsyncSSLSocket(AsyncSocket* socket)
+    : BufferedReadAdapter(socket, 1024) {
+}
+
+int AsyncSSLSocket::Connect(const SocketAddress& addr) {
+  // Begin buffering before we connect, so that there isn't a race condition
+  // between potential senders and receiving the OnConnectEvent signal
+  BufferInput(true);
+  return BufferedReadAdapter::Connect(addr);
+}
+
+void AsyncSSLSocket::OnConnectEvent(AsyncSocket * socket) {
+  RTC_DCHECK(socket == socket_);
+  // TODO: we could buffer output too...
+  const int res = DirectSend(kSslClientHello, sizeof(kSslClientHello));
+  RTC_DCHECK_EQ(sizeof(kSslClientHello), res);
+}
+
+void AsyncSSLSocket::ProcessInput(char* data, size_t* len) {
+  if (*len < sizeof(kSslServerHello))
+    return;
+
+  if (memcmp(kSslServerHello, data, sizeof(kSslServerHello)) != 0) {
+    Close();
+    SignalCloseEvent(this, 0);  // TODO: error code?
+    return;
+  }
+
+  *len -= sizeof(kSslServerHello);
+  if (*len > 0) {
+    memmove(data, data + sizeof(kSslServerHello), *len);
+  }
+
+  bool remainder = (*len > 0);
+  BufferInput(false);
+  SignalConnectEvent(this);
+
+  // FIX: if SignalConnect causes the socket to be destroyed, we are in trouble
+  if (remainder)
+    SignalReadEvent(this);
+}
+
+AsyncSSLServerSocket::AsyncSSLServerSocket(AsyncSocket* socket)
+     : BufferedReadAdapter(socket, 1024) {
+  BufferInput(true);
+}
+
+void AsyncSSLServerSocket::ProcessInput(char* data, size_t* len) {
+  // We only accept client hello messages.
+  if (*len < sizeof(kSslClientHello)) {
+    return;
+  }
+
+  if (memcmp(kSslClientHello, data, sizeof(kSslClientHello)) != 0) {
+    Close();
+    SignalCloseEvent(this, 0);
+    return;
+  }
+
+  *len -= sizeof(kSslClientHello);
+
+  // Clients should not send more data until the handshake is completed.
+  RTC_DCHECK(*len == 0);
+
+  // Send a server hello back to the client.
+  DirectSend(kSslServerHello, sizeof(kSslServerHello));
+
+  // Handshake completed for us, redirect input to our parent.
+  BufferInput(false);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+AsyncHttpsProxySocket::AsyncHttpsProxySocket(AsyncSocket* socket,
+                                             const std::string& user_agent,
+                                             const SocketAddress& proxy,
+                                             const std::string& username,
+                                             const CryptString& password)
+  : BufferedReadAdapter(socket, 1024), proxy_(proxy), agent_(user_agent),
+    user_(username), pass_(password), force_connect_(false), state_(PS_ERROR),
+    context_(0) {
+}
+
+AsyncHttpsProxySocket::~AsyncHttpsProxySocket() {
+  delete context_;
+}
+
+int AsyncHttpsProxySocket::Connect(const SocketAddress& addr) {
+  int ret;
+  RTC_LOG(LS_VERBOSE) << "AsyncHttpsProxySocket::Connect("
+                      << proxy_.ToSensitiveString() << ")";
+  dest_ = addr;
+  state_ = PS_INIT;
+  if (ShouldIssueConnect()) {
+    BufferInput(true);
+  }
+  ret = BufferedReadAdapter::Connect(proxy_);
+  // TODO: Set state_ appropriately if Connect fails.
+  return ret;
+}
+
+SocketAddress AsyncHttpsProxySocket::GetRemoteAddress() const {
+  return dest_;
+}
+
+int AsyncHttpsProxySocket::Close() {
+  headers_.clear();
+  state_ = PS_ERROR;
+  dest_.Clear();
+  delete context_;
+  context_ = nullptr;
+  return BufferedReadAdapter::Close();
+}
+
+Socket::ConnState AsyncHttpsProxySocket::GetState() const {
+  if (state_ < PS_TUNNEL) {
+    return CS_CONNECTING;
+  } else if (state_ == PS_TUNNEL) {
+    return CS_CONNECTED;
+  } else {
+    return CS_CLOSED;
+  }
+}
+
+void AsyncHttpsProxySocket::OnConnectEvent(AsyncSocket * socket) {
+  RTC_LOG(LS_VERBOSE) << "AsyncHttpsProxySocket::OnConnectEvent";
+  if (!ShouldIssueConnect()) {
+    state_ = PS_TUNNEL;
+    BufferedReadAdapter::OnConnectEvent(socket);
+    return;
+  }
+  SendRequest();
+}
+
+void AsyncHttpsProxySocket::OnCloseEvent(AsyncSocket * socket, int err) {
+  RTC_LOG(LS_VERBOSE) << "AsyncHttpsProxySocket::OnCloseEvent(" << err << ")";
+  if ((state_ == PS_WAIT_CLOSE) && (err == 0)) {
+    state_ = PS_ERROR;
+    Connect(dest_);
+  } else {
+    BufferedReadAdapter::OnCloseEvent(socket, err);
+  }
+}
+
+void AsyncHttpsProxySocket::ProcessInput(char* data, size_t* len) {
+  size_t start = 0;
+  for (size_t pos = start; state_ < PS_TUNNEL && pos < *len;) {
+    if (state_ == PS_SKIP_BODY) {
+      size_t consume = std::min(*len - pos, content_length_);
+      pos += consume;
+      start = pos;
+      content_length_ -= consume;
+      if (content_length_ == 0) {
+        EndResponse();
+      }
+      continue;
+    }
+
+    if (data[pos++] != '\n')
+      continue;
+
+    size_t len = pos - start - 1;
+    if ((len > 0) && (data[start + len - 1] == '\r'))
+      --len;
+
+    data[start + len] = 0;
+    ProcessLine(data + start, len);
+    start = pos;
+  }
+
+  *len -= start;
+  if (*len > 0) {
+    memmove(data, data + start, *len);
+  }
+
+  if (state_ != PS_TUNNEL)
+    return;
+
+  bool remainder = (*len > 0);
+  BufferInput(false);
+  SignalConnectEvent(this);
+
+  // FIX: if SignalConnect causes the socket to be destroyed, we are in trouble
+  if (remainder)
+    SignalReadEvent(this);  // TODO: signal this??
+}
+
+bool AsyncHttpsProxySocket::ShouldIssueConnect() const {
+  // TODO: Think about whether a more sophisticated test
+  // than dest port == 80 is needed.
+  return force_connect_ || (dest_.port() != 80);
+}
+
+void AsyncHttpsProxySocket::SendRequest() {
+  std::stringstream ss;
+  ss << "CONNECT " << dest_.ToString() << " HTTP/1.0\r\n";
+  ss << "User-Agent: " << agent_ << "\r\n";
+  ss << "Host: " << dest_.HostAsURIString() << "\r\n";
+  ss << "Content-Length: 0\r\n";
+  ss << "Proxy-Connection: Keep-Alive\r\n";
+  ss << headers_;
+  ss << "\r\n";
+  std::string str = ss.str();
+  DirectSend(str.c_str(), str.size());
+  state_ = PS_LEADER;
+  expect_close_ = true;
+  content_length_ = 0;
+  headers_.clear();
+
+  RTC_LOG(LS_VERBOSE) << "AsyncHttpsProxySocket >> " << str;
+}
+
+void AsyncHttpsProxySocket::ProcessLine(char * data, size_t len) {
+  RTC_LOG(LS_VERBOSE) << "AsyncHttpsProxySocket << " << data;
+
+  if (len == 0) {
+    if (state_ == PS_TUNNEL_HEADERS) {
+      state_ = PS_TUNNEL;
+    } else if (state_ == PS_ERROR_HEADERS) {
+      Error(defer_error_);
+      return;
+    } else if (state_ == PS_SKIP_HEADERS) {
+      if (content_length_) {
+        state_ = PS_SKIP_BODY;
+      } else {
+        EndResponse();
+        return;
+      }
+    } else {
+      static bool report = false;
+      if (!unknown_mechanisms_.empty() && !report) {
+        report = true;
+        std::string msg(
+          "Unable to connect to the Google Talk service due to an incompatibility "
+          "with your proxy.\r\nPlease help us resolve this issue by submitting the "
+          "following information to us using our technical issue submission form "
+          "at:\r\n\r\n"
+          "http://www.google.com/support/talk/bin/request.py\r\n\r\n"
+          "We apologize for the inconvenience.\r\n\r\n"
+          "Information to submit to Google: "
+          );
+        //std::string msg("Please report the following information to foo@bar.com:\r\nUnknown methods: ");
+        msg.append(unknown_mechanisms_);
+#if defined(WEBRTC_WIN)
+        MessageBoxA(0, msg.c_str(), "Oops!", MB_OK);
+#endif
+#if defined(WEBRTC_POSIX)
+        // TODO: Raise a signal so the UI can be separated.
+        RTC_LOG(LS_ERROR) << "Oops!\n\n" << msg;
+#endif
+      }
+      // Unexpected end of headers
+      Error(0);
+      return;
+    }
+  } else if (state_ == PS_LEADER) {
+    unsigned int code;
+    if (sscanf(data, "HTTP/%*u.%*u %u", &code) != 1) {
+      Error(0);
+      return;
+    }
+    switch (code) {
+    case 200:
+      // connection good!
+      state_ = PS_TUNNEL_HEADERS;
+      return;
+#if defined(HTTP_STATUS_PROXY_AUTH_REQ) && (HTTP_STATUS_PROXY_AUTH_REQ != 407)
+#error Wrong code for HTTP_STATUS_PROXY_AUTH_REQ
+#endif
+    case 407:  // HTTP_STATUS_PROXY_AUTH_REQ
+      state_ = PS_AUTHENTICATE;
+      return;
+    default:
+      defer_error_ = 0;
+      state_ = PS_ERROR_HEADERS;
+      return;
+    }
+  } else if ((state_ == PS_AUTHENTICATE)
+             && (_strnicmp(data, "Proxy-Authenticate:", 19) == 0)) {
+    std::string response, auth_method;
+    switch (HttpAuthenticate(data + 19, len - 19,
+                             proxy_, "CONNECT", "/",
+                             user_, pass_, context_, response, auth_method)) {
+    case HAR_IGNORE:
+      RTC_LOG(LS_VERBOSE) << "Ignoring Proxy-Authenticate: " << auth_method;
+      if (!unknown_mechanisms_.empty())
+        unknown_mechanisms_.append(", ");
+      unknown_mechanisms_.append(auth_method);
+      break;
+    case HAR_RESPONSE:
+      headers_ = "Proxy-Authorization: ";
+      headers_.append(response);
+      headers_.append("\r\n");
+      state_ = PS_SKIP_HEADERS;
+      unknown_mechanisms_.clear();
+      break;
+    case HAR_CREDENTIALS:
+      defer_error_ = SOCKET_EACCES;
+      state_ = PS_ERROR_HEADERS;
+      unknown_mechanisms_.clear();
+      break;
+    case HAR_ERROR:
+      defer_error_ = 0;
+      state_ = PS_ERROR_HEADERS;
+      unknown_mechanisms_.clear();
+      break;
+    }
+  } else if (_strnicmp(data, "Content-Length:", 15) == 0) {
+    content_length_ = strtoul(data + 15, 0, 0);
+  } else if (_strnicmp(data, "Proxy-Connection: Keep-Alive", 28) == 0) {
+    expect_close_ = false;
+    /*
+  } else if (_strnicmp(data, "Connection: close", 17) == 0) {
+    expect_close_ = true;
+    */
+  }
+}
+
+void AsyncHttpsProxySocket::EndResponse() {
+  if (!expect_close_) {
+    SendRequest();
+    return;
+  }
+
+  // No point in waiting for the server to close... let's close now
+  // TODO: Refactor out PS_WAIT_CLOSE
+  state_ = PS_WAIT_CLOSE;
+  BufferedReadAdapter::Close();
+  OnCloseEvent(this, 0);
+}
+
+void AsyncHttpsProxySocket::Error(int error) {
+  BufferInput(false);
+  Close();
+  SetError(error);
+  SignalCloseEvent(this, error);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+AsyncSocksProxySocket::AsyncSocksProxySocket(AsyncSocket* socket,
+                                             const SocketAddress& proxy,
+                                             const std::string& username,
+                                             const CryptString& password)
+    : BufferedReadAdapter(socket, 1024), state_(SS_ERROR), proxy_(proxy),
+      user_(username), pass_(password) {
+}
+
+AsyncSocksProxySocket::~AsyncSocksProxySocket() = default;
+
+int AsyncSocksProxySocket::Connect(const SocketAddress& addr) {
+  int ret;
+  dest_ = addr;
+  state_ = SS_INIT;
+  BufferInput(true);
+  ret = BufferedReadAdapter::Connect(proxy_);
+  // TODO: Set state_ appropriately if Connect fails.
+  return ret;
+}
+
+SocketAddress AsyncSocksProxySocket::GetRemoteAddress() const {
+  return dest_;
+}
+
+int AsyncSocksProxySocket::Close() {
+  state_ = SS_ERROR;
+  dest_.Clear();
+  return BufferedReadAdapter::Close();
+}
+
+Socket::ConnState AsyncSocksProxySocket::GetState() const {
+  if (state_ < SS_TUNNEL) {
+    return CS_CONNECTING;
+  } else if (state_ == SS_TUNNEL) {
+    return CS_CONNECTED;
+  } else {
+    return CS_CLOSED;
+  }
+}
+
+void AsyncSocksProxySocket::OnConnectEvent(AsyncSocket* socket) {
+  SendHello();
+}
+
+void AsyncSocksProxySocket::ProcessInput(char* data, size_t* len) {
+  RTC_DCHECK(state_ < SS_TUNNEL);
+
+  ByteBufferReader response(data, *len);
+
+  if (state_ == SS_HELLO) {
+    uint8_t ver, method;
+    if (!response.ReadUInt8(&ver) ||
+        !response.ReadUInt8(&method))
+      return;
+
+    if (ver != 5) {
+      Error(0);
+      return;
+    }
+
+    if (method == 0) {
+      SendConnect();
+    } else if (method == 2) {
+      SendAuth();
+    } else {
+      Error(0);
+      return;
+    }
+  } else if (state_ == SS_AUTH) {
+    uint8_t ver, status;
+    if (!response.ReadUInt8(&ver) ||
+        !response.ReadUInt8(&status))
+      return;
+
+    if ((ver != 1) || (status != 0)) {
+      Error(SOCKET_EACCES);
+      return;
+    }
+
+    SendConnect();
+  } else if (state_ == SS_CONNECT) {
+    uint8_t ver, rep, rsv, atyp;
+    if (!response.ReadUInt8(&ver) ||
+        !response.ReadUInt8(&rep) ||
+        !response.ReadUInt8(&rsv) ||
+        !response.ReadUInt8(&atyp))
+      return;
+
+    if ((ver != 5) || (rep != 0)) {
+      Error(0);
+      return;
+    }
+
+    uint16_t port;
+    if (atyp == 1) {
+      uint32_t addr;
+      if (!response.ReadUInt32(&addr) ||
+          !response.ReadUInt16(&port))
+        return;
+      RTC_LOG(LS_VERBOSE) << "Bound on " << addr << ":" << port;
+    } else if (atyp == 3) {
+      uint8_t len;
+      std::string addr;
+      if (!response.ReadUInt8(&len) ||
+          !response.ReadString(&addr, len) ||
+          !response.ReadUInt16(&port))
+        return;
+      RTC_LOG(LS_VERBOSE) << "Bound on " << addr << ":" << port;
+    } else if (atyp == 4) {
+      std::string addr;
+      if (!response.ReadString(&addr, 16) ||
+          !response.ReadUInt16(&port))
+        return;
+      RTC_LOG(LS_VERBOSE) << "Bound on <IPV6>:" << port;
+    } else {
+      Error(0);
+      return;
+    }
+
+    state_ = SS_TUNNEL;
+  }
+
+  // Consume parsed data
+  *len = response.Length();
+  memmove(data, response.Data(), *len);
+
+  if (state_ != SS_TUNNEL)
+    return;
+
+  bool remainder = (*len > 0);
+  BufferInput(false);
+  SignalConnectEvent(this);
+
+  // FIX: if SignalConnect causes the socket to be destroyed, we are in trouble
+  if (remainder)
+    SignalReadEvent(this);  // TODO: signal this??
+}
+
+void AsyncSocksProxySocket::SendHello() {
+  ByteBufferWriter request;
+  request.WriteUInt8(5);    // Socks Version
+  if (user_.empty()) {
+    request.WriteUInt8(1);  // Authentication Mechanisms
+    request.WriteUInt8(0);  // No authentication
+  } else {
+    request.WriteUInt8(2);  // Authentication Mechanisms
+    request.WriteUInt8(0);  // No authentication
+    request.WriteUInt8(2);  // Username/Password
+  }
+  DirectSend(request.Data(), request.Length());
+  state_ = SS_HELLO;
+}
+
+void AsyncSocksProxySocket::SendAuth() {
+  ByteBufferWriterT<ZeroOnFreeBuffer<char>> request;
+  request.WriteUInt8(1);           // Negotiation Version
+  request.WriteUInt8(static_cast<uint8_t>(user_.size()));
+  request.WriteString(user_);      // Username
+  request.WriteUInt8(static_cast<uint8_t>(pass_.GetLength()));
+  size_t len = pass_.GetLength() + 1;
+  char * sensitive = new char[len];
+  pass_.CopyTo(sensitive, true);
+  request.WriteBytes(sensitive, pass_.GetLength());  // Password
+  ExplicitZeroMemory(sensitive, len);
+  delete [] sensitive;
+  DirectSend(request.Data(), request.Length());
+  state_ = SS_AUTH;
+}
+
+void AsyncSocksProxySocket::SendConnect() {
+  ByteBufferWriter request;
+  request.WriteUInt8(5);              // Socks Version
+  request.WriteUInt8(1);              // CONNECT
+  request.WriteUInt8(0);              // Reserved
+  if (dest_.IsUnresolvedIP()) {
+    std::string hostname = dest_.hostname();
+    request.WriteUInt8(3);            // DOMAINNAME
+    request.WriteUInt8(static_cast<uint8_t>(hostname.size()));
+    request.WriteString(hostname);    // Destination Hostname
+  } else {
+    request.WriteUInt8(1);            // IPV4
+    request.WriteUInt32(dest_.ip());  // Destination IP
+  }
+  request.WriteUInt16(dest_.port());  // Destination Port
+  DirectSend(request.Data(), request.Length());
+  state_ = SS_CONNECT;
+}
+
+void AsyncSocksProxySocket::Error(int error) {
+  state_ = SS_ERROR;
+  BufferInput(false);
+  Close();
+  SetError(SOCKET_EACCES);
+  SignalCloseEvent(this, error);
+}
+
+AsyncSocksProxyServerSocket::AsyncSocksProxyServerSocket(AsyncSocket* socket)
+    : AsyncProxyServerSocket(socket, kBufferSize), state_(SS_HELLO) {
+  BufferInput(true);
+}
+
+void AsyncSocksProxyServerSocket::ProcessInput(char* data, size_t* len) {
+  // TODO: See if the whole message has arrived
+  RTC_DCHECK(state_ < SS_CONNECT_PENDING);
+
+  ByteBufferReader response(data, *len);
+  if (state_ == SS_HELLO) {
+    HandleHello(&response);
+  } else if (state_ == SS_AUTH) {
+    HandleAuth(&response);
+  } else if (state_ == SS_CONNECT) {
+    HandleConnect(&response);
+  }
+
+  // Consume parsed data
+  *len = response.Length();
+  memmove(data, response.Data(), *len);
+}
+
+void AsyncSocksProxyServerSocket::DirectSend(const ByteBufferWriter& buf) {
+  BufferedReadAdapter::DirectSend(buf.Data(), buf.Length());
+}
+
+void AsyncSocksProxyServerSocket::HandleHello(ByteBufferReader* request) {
+  uint8_t ver, num_methods;
+  if (!request->ReadUInt8(&ver) ||
+      !request->ReadUInt8(&num_methods)) {
+    Error(0);
+    return;
+  }
+
+  if (ver != 5) {
+    Error(0);
+    return;
+  }
+
+  // Handle either no-auth (0) or user/pass auth (2)
+  uint8_t method = 0xFF;
+  if (num_methods > 0 && !request->ReadUInt8(&method)) {
+    Error(0);
+    return;
+  }
+
+  // TODO: Ask the server which method to use.
+  SendHelloReply(method);
+  if (method == 0) {
+    state_ = SS_CONNECT;
+  } else if (method == 2) {
+    state_ = SS_AUTH;
+  } else {
+    state_ = SS_ERROR;
+  }
+}
+
+void AsyncSocksProxyServerSocket::SendHelloReply(uint8_t method) {
+  ByteBufferWriter response;
+  response.WriteUInt8(5);  // Socks Version
+  response.WriteUInt8(method);  // Auth method
+  DirectSend(response);
+}
+
+void AsyncSocksProxyServerSocket::HandleAuth(ByteBufferReader* request) {
+  uint8_t ver, user_len, pass_len;
+  std::string user, pass;
+  if (!request->ReadUInt8(&ver) ||
+      !request->ReadUInt8(&user_len) ||
+      !request->ReadString(&user, user_len) ||
+      !request->ReadUInt8(&pass_len) ||
+      !request->ReadString(&pass, pass_len)) {
+    Error(0);
+    return;
+  }
+
+  // TODO: Allow for checking of credentials.
+  SendAuthReply(0);
+  state_ = SS_CONNECT;
+}
+
+void AsyncSocksProxyServerSocket::SendAuthReply(uint8_t result) {
+  ByteBufferWriter response;
+  response.WriteUInt8(1);  // Negotiation Version
+  response.WriteUInt8(result);
+  DirectSend(response);
+}
+
+void AsyncSocksProxyServerSocket::HandleConnect(ByteBufferReader* request) {
+  uint8_t ver, command, reserved, addr_type;
+  uint32_t ip;
+  uint16_t port;
+  if (!request->ReadUInt8(&ver) ||
+      !request->ReadUInt8(&command) ||
+      !request->ReadUInt8(&reserved) ||
+      !request->ReadUInt8(&addr_type) ||
+      !request->ReadUInt32(&ip) ||
+      !request->ReadUInt16(&port)) {
+      Error(0);
+      return;
+  }
+
+  if (ver != 5 || command != 1 ||
+      reserved != 0 || addr_type != 1) {
+      Error(0);
+      return;
+  }
+
+  SignalConnectRequest(this, SocketAddress(ip, port));
+  state_ = SS_CONNECT_PENDING;
+}
+
+void AsyncSocksProxyServerSocket::SendConnectResult(int result,
+                                                    const SocketAddress& addr) {
+  if (state_ != SS_CONNECT_PENDING)
+    return;
+
+  ByteBufferWriter response;
+  response.WriteUInt8(5);  // Socks version
+  response.WriteUInt8((result != 0));  // 0x01 is generic error
+  response.WriteUInt8(0);  // reserved
+  response.WriteUInt8(1);  // IPv4 address
+  response.WriteUInt32(addr.ip());
+  response.WriteUInt16(addr.port());
+  DirectSend(response);
+  BufferInput(false);
+  state_ = SS_TUNNEL;
+}
+
+void AsyncSocksProxyServerSocket::Error(int error) {
+  state_ = SS_ERROR;
+  BufferInput(false);
+  Close();
+  SetError(SOCKET_EACCES);
+  SignalCloseEvent(this, error);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/socketadapters.h b/rtc_base/socketadapters.h
new file mode 100644
index 0000000..c5d1bfc
--- /dev/null
+++ b/rtc_base/socketadapters.h
@@ -0,0 +1,207 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKETADAPTERS_H_
+#define RTC_BASE_SOCKETADAPTERS_H_
+
+#include <map>
+#include <string>
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/cryptstring.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+struct HttpAuthContext;
+class ByteBufferReader;
+class ByteBufferWriter;
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Implements a socket adapter that can buffer and process data internally,
+// as in the case of connecting to a proxy, where you must speak the proxy
+// protocol before commencing normal socket behavior.
+class BufferedReadAdapter : public AsyncSocketAdapter {
+ public:
+  BufferedReadAdapter(AsyncSocket* socket, size_t buffer_size);
+  ~BufferedReadAdapter() override;
+
+  int Send(const void* pv, size_t cb) override;
+  int Recv(void* pv, size_t cb, int64_t* timestamp) override;
+
+ protected:
+  int DirectSend(const void* pv, size_t cb) {
+    return AsyncSocketAdapter::Send(pv, cb);
+  }
+
+  void BufferInput(bool on = true);
+  virtual void ProcessInput(char* data, size_t* len) = 0;
+
+  void OnReadEvent(AsyncSocket* socket) override;
+
+ private:
+  char * buffer_;
+  size_t buffer_size_, data_len_;
+  bool buffering_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(BufferedReadAdapter);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Interface for implementing proxy server sockets.
+class AsyncProxyServerSocket : public BufferedReadAdapter {
+ public:
+  AsyncProxyServerSocket(AsyncSocket* socket, size_t buffer_size);
+  ~AsyncProxyServerSocket() override;
+  sigslot::signal2<AsyncProxyServerSocket*,
+                   const SocketAddress&>  SignalConnectRequest;
+  virtual void SendConnectResult(int err, const SocketAddress& addr) = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Implements a socket adapter that performs the client side of a
+// fake SSL handshake. Used for "ssltcp" P2P functionality.
+class AsyncSSLSocket : public BufferedReadAdapter {
+ public:
+  explicit AsyncSSLSocket(AsyncSocket* socket);
+
+  int Connect(const SocketAddress& addr) override;
+
+ protected:
+  void OnConnectEvent(AsyncSocket* socket) override;
+  void ProcessInput(char* data, size_t* len) override;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncSSLSocket);
+};
+
+// Implements a socket adapter that performs the server side of a
+// fake SSL handshake. Used when implementing a relay server that does "ssltcp".
+class AsyncSSLServerSocket : public BufferedReadAdapter {
+ public:
+  explicit AsyncSSLServerSocket(AsyncSocket* socket);
+
+ protected:
+  void ProcessInput(char* data, size_t* len) override;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncSSLServerSocket);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Implements a socket adapter that speaks the HTTP/S proxy protocol.
+class AsyncHttpsProxySocket : public BufferedReadAdapter {
+ public:
+  AsyncHttpsProxySocket(AsyncSocket* socket, const std::string& user_agent,
+    const SocketAddress& proxy,
+    const std::string& username, const CryptString& password);
+  ~AsyncHttpsProxySocket() override;
+
+  // If connect is forced, the adapter will always issue an HTTP CONNECT to the
+  // target address.  Otherwise, it will connect only if the destination port
+  // is not port 80.
+  void SetForceConnect(bool force) { force_connect_ = force; }
+
+  int Connect(const SocketAddress& addr) override;
+  SocketAddress GetRemoteAddress() const override;
+  int Close() override;
+  ConnState GetState() const override;
+
+ protected:
+  void OnConnectEvent(AsyncSocket* socket) override;
+  void OnCloseEvent(AsyncSocket* socket, int err) override;
+  void ProcessInput(char* data, size_t* len) override;
+
+  bool ShouldIssueConnect() const;
+  void SendRequest();
+  void ProcessLine(char* data, size_t len);
+  void EndResponse();
+  void Error(int error);
+
+ private:
+  SocketAddress proxy_, dest_;
+  std::string agent_, user_, headers_;
+  CryptString pass_;
+  bool force_connect_;
+  size_t content_length_;
+  int defer_error_;
+  bool expect_close_;
+  enum ProxyState {
+    PS_INIT, PS_LEADER, PS_AUTHENTICATE, PS_SKIP_HEADERS, PS_ERROR_HEADERS,
+    PS_TUNNEL_HEADERS, PS_SKIP_BODY, PS_TUNNEL, PS_WAIT_CLOSE, PS_ERROR
+  } state_;
+  HttpAuthContext * context_;
+  std::string unknown_mechanisms_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncHttpsProxySocket);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Implements a socket adapter that speaks the SOCKS proxy protocol.
+class AsyncSocksProxySocket : public BufferedReadAdapter {
+ public:
+  AsyncSocksProxySocket(AsyncSocket* socket, const SocketAddress& proxy,
+    const std::string& username, const CryptString& password);
+  ~AsyncSocksProxySocket() override;
+
+  int Connect(const SocketAddress& addr) override;
+  SocketAddress GetRemoteAddress() const override;
+  int Close() override;
+  ConnState GetState() const override;
+
+ protected:
+  void OnConnectEvent(AsyncSocket* socket) override;
+  void ProcessInput(char* data, size_t* len) override;
+
+  void SendHello();
+  void SendConnect();
+  void SendAuth();
+  void Error(int error);
+
+ private:
+  enum State {
+    SS_INIT, SS_HELLO, SS_AUTH, SS_CONNECT, SS_TUNNEL, SS_ERROR
+  };
+  State state_;
+  SocketAddress proxy_, dest_;
+  std::string user_;
+  CryptString pass_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncSocksProxySocket);
+};
+
+// Implements a proxy server socket for the SOCKS protocol.
+class AsyncSocksProxyServerSocket : public AsyncProxyServerSocket {
+ public:
+  explicit AsyncSocksProxyServerSocket(AsyncSocket* socket);
+
+ private:
+  void ProcessInput(char* data, size_t* len) override;
+  void DirectSend(const ByteBufferWriter& buf);
+
+  void HandleHello(ByteBufferReader* request);
+  void SendHelloReply(uint8_t method);
+  void HandleAuth(ByteBufferReader* request);
+  void SendAuthReply(uint8_t result);
+  void HandleConnect(ByteBufferReader* request);
+  void SendConnectResult(int result, const SocketAddress& addr) override;
+
+  void Error(int error);
+
+  static const int kBufferSize = 1024;
+  enum State {
+    SS_HELLO, SS_AUTH, SS_CONNECT, SS_CONNECT_PENDING, SS_TUNNEL, SS_ERROR
+  };
+  State state_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(AsyncSocksProxyServerSocket);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SOCKETADAPTERS_H_
diff --git a/rtc_base/socketaddress.cc b/rtc_base/socketaddress.cc
new file mode 100644
index 0000000..54a41d4
--- /dev/null
+++ b/rtc_base/socketaddress.cc
@@ -0,0 +1,339 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+#if defined(WEBRTC_POSIX)
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#if defined(OPENBSD)
+#include <netinet/in_systm.h>
+#endif
+#if !defined(__native_client__)
+#include <netinet/ip.h>
+#endif
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <unistd.h>
+#endif
+
+#include <sstream>
+
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/nethelpers.h"
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#endif
+
+namespace rtc {
+
+SocketAddress::SocketAddress() {
+  Clear();
+}
+
+SocketAddress::SocketAddress(const std::string& hostname, int port) {
+  SetIP(hostname);
+  SetPort(port);
+}
+
+SocketAddress::SocketAddress(uint32_t ip_as_host_order_integer, int port) {
+  SetIP(IPAddress(ip_as_host_order_integer));
+  SetPort(port);
+}
+
+SocketAddress::SocketAddress(const IPAddress& ip, int port) {
+  SetIP(ip);
+  SetPort(port);
+}
+
+SocketAddress::SocketAddress(const SocketAddress& addr) {
+  this->operator=(addr);
+}
+
+void SocketAddress::Clear() {
+  hostname_.clear();
+  literal_ = false;
+  ip_ = IPAddress();
+  port_ = 0;
+  scope_id_ = 0;
+}
+
+bool SocketAddress::IsNil() const {
+  return hostname_.empty() && IPIsUnspec(ip_) && 0 == port_;
+}
+
+bool SocketAddress::IsComplete() const {
+  return (!IPIsAny(ip_)) && (0 != port_);
+}
+
+SocketAddress& SocketAddress::operator=(const SocketAddress& addr) {
+  hostname_ = addr.hostname_;
+  ip_ = addr.ip_;
+  port_ = addr.port_;
+  literal_ = addr.literal_;
+  scope_id_ = addr.scope_id_;
+  return *this;
+}
+
+void SocketAddress::SetIP(uint32_t ip_as_host_order_integer) {
+  hostname_.clear();
+  literal_ = false;
+  ip_ = IPAddress(ip_as_host_order_integer);
+  scope_id_ = 0;
+}
+
+void SocketAddress::SetIP(const IPAddress& ip) {
+  hostname_.clear();
+  literal_ = false;
+  ip_ = ip;
+  scope_id_ = 0;
+}
+
+void SocketAddress::SetIP(const std::string& hostname) {
+  hostname_ = hostname;
+  literal_ = IPFromString(hostname, &ip_);
+  if (!literal_) {
+    ip_ = IPAddress();
+  }
+  scope_id_ = 0;
+}
+
+void SocketAddress::SetResolvedIP(uint32_t ip_as_host_order_integer) {
+  ip_ = IPAddress(ip_as_host_order_integer);
+  scope_id_ = 0;
+}
+
+void SocketAddress::SetResolvedIP(const IPAddress& ip) {
+  ip_ = ip;
+  scope_id_ = 0;
+}
+
+void SocketAddress::SetPort(int port) {
+  port_ = rtc::dchecked_cast<uint16_t>(port);
+}
+
+uint32_t SocketAddress::ip() const {
+  return ip_.v4AddressAsHostOrderInteger();
+}
+
+const IPAddress& SocketAddress::ipaddr() const {
+  return ip_;
+}
+
+uint16_t SocketAddress::port() const {
+  return port_;
+}
+
+std::string SocketAddress::HostAsURIString() const {
+  // If the hostname was a literal IP string, it may need to have square
+  // brackets added (for SocketAddress::ToString()).
+  if (!literal_ && !hostname_.empty())
+    return hostname_;
+  if (ip_.family() == AF_INET6) {
+    return "[" + ip_.ToString() + "]";
+  } else {
+    return ip_.ToString();
+  }
+}
+
+std::string SocketAddress::HostAsSensitiveURIString() const {
+  // If the hostname was a literal IP string, it may need to have square
+  // brackets added (for SocketAddress::ToString()).
+  if (!literal_ && !hostname_.empty())
+    return hostname_;
+  if (ip_.family() == AF_INET6) {
+    return "[" + ip_.ToSensitiveString() + "]";
+  } else {
+    return ip_.ToSensitiveString();
+  }
+}
+
+std::string SocketAddress::PortAsString() const {
+  std::ostringstream ost;
+  ost << port_;
+  return ost.str();
+}
+
+std::string SocketAddress::ToString() const {
+  std::ostringstream ost;
+  ost << *this;
+  return ost.str();
+}
+
+std::string SocketAddress::ToSensitiveString() const {
+  std::ostringstream ost;
+  ost << HostAsSensitiveURIString() << ":" << port();
+  return ost.str();
+}
+
+bool SocketAddress::FromString(const std::string& str) {
+  if (str.at(0) == '[') {
+    std::string::size_type closebracket = str.rfind(']');
+    if (closebracket != std::string::npos) {
+      std::string::size_type colon = str.find(':', closebracket);
+      if (colon != std::string::npos && colon > closebracket) {
+        SetPort(strtoul(str.substr(colon + 1).c_str(), nullptr, 10));
+        SetIP(str.substr(1, closebracket - 1));
+      } else {
+        return false;
+      }
+    }
+  } else {
+    std::string::size_type pos = str.find(':');
+    if (std::string::npos == pos)
+      return false;
+    SetPort(strtoul(str.substr(pos + 1).c_str(), nullptr, 10));
+    SetIP(str.substr(0, pos));
+  }
+  return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const SocketAddress& addr) {
+  os << addr.HostAsURIString() << ":" << addr.port();
+  return os;
+}
+
+bool SocketAddress::IsAnyIP() const {
+  return IPIsAny(ip_);
+}
+
+bool SocketAddress::IsLoopbackIP() const {
+  return IPIsLoopback(ip_) || (IPIsAny(ip_) &&
+                               0 == strcmp(hostname_.c_str(), "localhost"));
+}
+
+bool SocketAddress::IsPrivateIP() const {
+  return IPIsPrivate(ip_);
+}
+
+bool SocketAddress::IsUnresolvedIP() const {
+  return IPIsUnspec(ip_) && !literal_ && !hostname_.empty();
+}
+
+bool SocketAddress::operator==(const SocketAddress& addr) const {
+  return EqualIPs(addr) && EqualPorts(addr);
+}
+
+bool SocketAddress::operator<(const SocketAddress& addr) const {
+  if (ip_ != addr.ip_)
+    return ip_ < addr.ip_;
+
+  // We only check hostnames if both IPs are ANY or unspecified.  This matches
+  // EqualIPs().
+  if ((IPIsAny(ip_) || IPIsUnspec(ip_)) && hostname_ != addr.hostname_)
+    return hostname_ < addr.hostname_;
+
+  return port_ < addr.port_;
+}
+
+bool SocketAddress::EqualIPs(const SocketAddress& addr) const {
+  return (ip_ == addr.ip_) &&
+      ((!IPIsAny(ip_) && !IPIsUnspec(ip_)) || (hostname_ == addr.hostname_));
+}
+
+bool SocketAddress::EqualPorts(const SocketAddress& addr) const {
+  return (port_ == addr.port_);
+}
+
+size_t SocketAddress::Hash() const {
+  size_t h = 0;
+  h ^= HashIP(ip_);
+  h ^= port_ | (port_ << 16);
+  return h;
+}
+
+void SocketAddress::ToSockAddr(sockaddr_in* saddr) const {
+  memset(saddr, 0, sizeof(*saddr));
+  if (ip_.family() != AF_INET) {
+    saddr->sin_family = AF_UNSPEC;
+    return;
+  }
+  saddr->sin_family = AF_INET;
+  saddr->sin_port = HostToNetwork16(port_);
+  if (IPIsAny(ip_)) {
+    saddr->sin_addr.s_addr = INADDR_ANY;
+  } else {
+    saddr->sin_addr = ip_.ipv4_address();
+  }
+}
+
+bool SocketAddress::FromSockAddr(const sockaddr_in& saddr) {
+  if (saddr.sin_family != AF_INET)
+    return false;
+  SetIP(NetworkToHost32(saddr.sin_addr.s_addr));
+  SetPort(NetworkToHost16(saddr.sin_port));
+  literal_ = false;
+  return true;
+}
+
+static size_t ToSockAddrStorageHelper(sockaddr_storage* addr,
+                                      IPAddress ip,
+                                      uint16_t port,
+                                      int scope_id) {
+  memset(addr, 0, sizeof(sockaddr_storage));
+  addr->ss_family = static_cast<unsigned short>(ip.family());
+  if (addr->ss_family == AF_INET6) {
+    sockaddr_in6* saddr = reinterpret_cast<sockaddr_in6*>(addr);
+    saddr->sin6_addr = ip.ipv6_address();
+    saddr->sin6_port = HostToNetwork16(port);
+    saddr->sin6_scope_id = scope_id;
+    return sizeof(sockaddr_in6);
+  } else if (addr->ss_family == AF_INET) {
+    sockaddr_in* saddr = reinterpret_cast<sockaddr_in*>(addr);
+    saddr->sin_addr = ip.ipv4_address();
+    saddr->sin_port = HostToNetwork16(port);
+    return sizeof(sockaddr_in);
+  }
+  return 0;
+}
+
+size_t SocketAddress::ToDualStackSockAddrStorage(sockaddr_storage *addr) const {
+  return ToSockAddrStorageHelper(addr, ip_.AsIPv6Address(), port_, scope_id_);
+}
+
+size_t SocketAddress::ToSockAddrStorage(sockaddr_storage* addr) const {
+  return ToSockAddrStorageHelper(addr, ip_, port_, scope_id_);
+}
+
+bool SocketAddressFromSockAddrStorage(const sockaddr_storage& addr,
+                                      SocketAddress* out) {
+  if (!out) {
+    return false;
+  }
+  if (addr.ss_family == AF_INET) {
+    const sockaddr_in* saddr = reinterpret_cast<const sockaddr_in*>(&addr);
+    *out = SocketAddress(IPAddress(saddr->sin_addr),
+                         NetworkToHost16(saddr->sin_port));
+    return true;
+  } else if (addr.ss_family == AF_INET6) {
+    const sockaddr_in6* saddr = reinterpret_cast<const sockaddr_in6*>(&addr);
+    *out = SocketAddress(IPAddress(saddr->sin6_addr),
+                         NetworkToHost16(saddr->sin6_port));
+    out->SetScopeID(saddr->sin6_scope_id);
+    return true;
+  }
+  return false;
+}
+
+SocketAddress EmptySocketAddressWithFamily(int family) {
+  if (family == AF_INET) {
+    return SocketAddress(IPAddress(INADDR_ANY), 0);
+  } else if (family == AF_INET6) {
+    return SocketAddress(IPAddress(in6addr_any), 0);
+  }
+  return SocketAddress();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/socketaddress.h b/rtc_base/socketaddress.h
new file mode 100644
index 0000000..d58eed8
--- /dev/null
+++ b/rtc_base/socketaddress.h
@@ -0,0 +1,197 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKETADDRESS_H_
+#define RTC_BASE_SOCKETADDRESS_H_
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+#include "rtc_base/basictypes.h"
+#include "rtc_base/ipaddress.h"
+
+#undef SetPort
+
+struct sockaddr_in;
+struct sockaddr_storage;
+
+namespace rtc {
+
+// Records an IP address and port.
+class SocketAddress {
+ public:
+  // Creates a nil address.
+  SocketAddress();
+
+  // Creates the address with the given host and port. Host may be a
+  // literal IP string or a hostname to be resolved later.
+  // DCHECKs that port is in valid range (0 to 2^16-1).
+  SocketAddress(const std::string& hostname, int port);
+
+  // Creates the address with the given IP and port.
+  // IP is given as an integer in host byte order. V4 only, to be deprecated.
+  // DCHECKs that port is in valid range (0 to 2^16-1).
+  SocketAddress(uint32_t ip_as_host_order_integer, int port);
+
+  // Creates the address with the given IP and port.
+  // DCHECKs that port is in valid range (0 to 2^16-1).
+  SocketAddress(const IPAddress& ip, int port);
+
+  // Creates a copy of the given address.
+  SocketAddress(const SocketAddress& addr);
+
+  // Resets to the nil address.
+  void Clear();
+
+  // Determines if this is a nil address (empty hostname, any IP, null port)
+  bool IsNil() const;
+
+  // Returns true if ip and port are set.
+  bool IsComplete() const;
+
+  // Replaces our address with the given one.
+  SocketAddress& operator=(const SocketAddress& addr);
+
+  // Changes the IP of this address to the given one, and clears the hostname
+  // IP is given as an integer in host byte order. V4 only, to be deprecated..
+  void SetIP(uint32_t ip_as_host_order_integer);
+
+  // Changes the IP of this address to the given one, and clears the hostname.
+  void SetIP(const IPAddress& ip);
+
+  // Changes the hostname of this address to the given one.
+  // Does not resolve the address; use Resolve to do so.
+  void SetIP(const std::string& hostname);
+
+  // Sets the IP address while retaining the hostname.  Useful for bypassing
+  // DNS for a pre-resolved IP.
+  // IP is given as an integer in host byte order. V4 only, to be deprecated.
+  void SetResolvedIP(uint32_t ip_as_host_order_integer);
+
+  // Sets the IP address while retaining the hostname.  Useful for bypassing
+  // DNS for a pre-resolved IP.
+  void SetResolvedIP(const IPAddress& ip);
+
+  // Changes the port of this address to the given one.
+  // DCHECKs that port is in valid range (0 to 2^16-1).
+  void SetPort(int port);
+
+  // Returns the hostname.
+  const std::string& hostname() const { return hostname_; }
+
+  // Returns the IP address as a host byte order integer.
+  // Returns 0 for non-v4 addresses.
+  uint32_t ip() const;
+
+  const IPAddress& ipaddr() const;
+
+  int family() const {return ip_.family(); }
+
+  // Returns the port part of this address.
+  uint16_t port() const;
+
+  // Returns the scope ID associated with this address. Scope IDs are a
+  // necessary addition to IPv6 link-local addresses, with different network
+  // interfaces having different scope-ids for their link-local addresses.
+  // IPv4 address do not have scope_ids and sockaddr_in structures do not have
+  // a field for them.
+  int scope_id() const {return scope_id_; }
+  void SetScopeID(int id) { scope_id_ = id; }
+
+  // Returns the 'host' portion of the address (hostname or IP) in a form
+  // suitable for use in a URI. If both IP and hostname are present, hostname
+  // is preferred. IPv6 addresses are enclosed in square brackets ('[' and ']').
+  std::string HostAsURIString() const;
+
+  // Same as HostAsURIString but anonymizes IP addresses by hiding the last
+  // part.
+  std::string HostAsSensitiveURIString() const;
+
+  // Returns the port as a string.
+  std::string PortAsString() const;
+
+  // Returns hostname:port or [hostname]:port.
+  std::string ToString() const;
+
+  // Same as ToString but anonymizes it by hiding the last part.
+  std::string ToSensitiveString() const;
+
+  // Parses hostname:port and [hostname]:port.
+  bool FromString(const std::string& str);
+
+  friend std::ostream& operator<<(std::ostream& os, const SocketAddress& addr);
+
+  // Determines whether this represents a missing / any IP address.
+  // That is, 0.0.0.0 or ::.
+  // Hostname and/or port may be set.
+  bool IsAnyIP() const;
+
+  // Determines whether the IP address refers to a loopback address.
+  // For v4 addresses this means the address is in the range 127.0.0.0/8.
+  // For v6 addresses this means the address is ::1.
+  bool IsLoopbackIP() const;
+
+  // Determines whether the IP address is in one of the private ranges:
+  // For v4: 127.0.0.0/8 10.0.0.0/8 192.168.0.0/16 172.16.0.0/12.
+  // For v6: FE80::/16 and ::1.
+  bool IsPrivateIP() const;
+
+  // Determines whether the hostname has been resolved to an IP.
+  bool IsUnresolvedIP() const;
+
+  // Determines whether this address is identical to the given one.
+  bool operator ==(const SocketAddress& addr) const;
+  inline bool operator !=(const SocketAddress& addr) const {
+    return !this->operator ==(addr);
+  }
+
+  // Compares based on IP and then port.
+  bool operator <(const SocketAddress& addr) const;
+
+  // Determines whether this address has the same IP as the one given.
+  bool EqualIPs(const SocketAddress& addr) const;
+
+  // Determines whether this address has the same port as the one given.
+  bool EqualPorts(const SocketAddress& addr) const;
+
+  // Hashes this address into a small number.
+  size_t Hash() const;
+
+  // Write this address to a sockaddr_in.
+  // If IPv6, will zero out the sockaddr_in and sets family to AF_UNSPEC.
+  void ToSockAddr(sockaddr_in* saddr) const;
+
+  // Read this address from a sockaddr_in.
+  bool FromSockAddr(const sockaddr_in& saddr);
+
+  // Read and write the address to/from a sockaddr_storage.
+  // Dual stack version always sets family to AF_INET6, and maps v4 addresses.
+  // The other version doesn't map, and outputs an AF_INET address for
+  // v4 or mapped addresses, and AF_INET6 addresses for others.
+  // Returns the size of the sockaddr_in or sockaddr_in6 structure that is
+  // written to the sockaddr_storage, or zero on failure.
+  size_t ToDualStackSockAddrStorage(sockaddr_storage* saddr) const;
+  size_t ToSockAddrStorage(sockaddr_storage* saddr) const;
+
+ private:
+  std::string hostname_;
+  IPAddress ip_;
+  uint16_t port_;
+  int scope_id_;
+  bool literal_;  // Indicates that 'hostname_' contains a literal IP string.
+};
+
+bool SocketAddressFromSockAddrStorage(const sockaddr_storage& saddr,
+                                      SocketAddress* out);
+SocketAddress EmptySocketAddressWithFamily(int family);
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SOCKETADDRESS_H_
diff --git a/rtc_base/socketaddress_unittest.cc b/rtc_base/socketaddress_unittest.cc
new file mode 100644
index 0000000..0d168df
--- /dev/null
+++ b/rtc_base/socketaddress_unittest.cc
@@ -0,0 +1,351 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_POSIX)
+#include <netinet/in.h>  // for sockaddr_in
+#endif
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/ipaddress.h"
+#include "rtc_base/socketaddress.h"
+
+namespace rtc {
+
+const in6_addr kTestV6Addr =  { { {0x20, 0x01, 0x0d, 0xb8,
+                                   0x10, 0x20, 0x30, 0x40,
+                                   0x50, 0x60, 0x70, 0x80,
+                                   0x90, 0xA0, 0xB0, 0xC0} } };
+const in6_addr kMappedV4Addr = { { {0x00, 0x00, 0x00, 0x00,
+                                    0x00, 0x00, 0x00, 0x00,
+                                    0x00, 0x00, 0xFF, 0xFF,
+                                    0x01, 0x02, 0x03, 0x04} } };
+const std::string kTestV6AddrString = "2001:db8:1020:3040:5060:7080:90a0:b0c0";
+const std::string kTestV6AddrAnonymizedString = "2001:db8:1020:x:x:x:x:x";
+const std::string kTestV6AddrFullString =
+    "[2001:db8:1020:3040:5060:7080:90a0:b0c0]:5678";
+const std::string kTestV6AddrFullAnonymizedString =
+    "[2001:db8:1020:x:x:x:x:x]:5678";
+
+TEST(SocketAddressTest, TestDefaultCtor) {
+  SocketAddress addr;
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(), addr.ipaddr());
+  EXPECT_EQ(0, addr.port());
+  EXPECT_EQ("", addr.hostname());
+}
+
+TEST(SocketAddressTest, TestIPPortCtor) {
+  SocketAddress addr(IPAddress(0x01020304), 5678);
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestIPv4StringPortCtor) {
+  SocketAddress addr("1.2.3.4", 5678);
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("1.2.3.4", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestIPv6StringPortCtor) {
+  SocketAddress addr2(kTestV6AddrString, 1234);
+  IPAddress tocheck(kTestV6Addr);
+
+  EXPECT_FALSE(addr2.IsUnresolvedIP());
+  EXPECT_EQ(tocheck, addr2.ipaddr());
+  EXPECT_EQ(1234, addr2.port());
+  EXPECT_EQ(kTestV6AddrString, addr2.hostname());
+  EXPECT_EQ("[" + kTestV6AddrString + "]:1234", addr2.ToString());
+}
+
+TEST(SocketAddressTest, TestSpecialStringPortCtor) {
+  // inet_addr doesn't handle this address properly.
+  SocketAddress addr("255.255.255.255", 5678);
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0xFFFFFFFFU), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("255.255.255.255", addr.hostname());
+  EXPECT_EQ("255.255.255.255:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestHostnamePortCtor) {
+  SocketAddress addr("a.b.com", 5678);
+  EXPECT_TRUE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("a.b.com", addr.hostname());
+  EXPECT_EQ("a.b.com:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestCopyCtor) {
+  SocketAddress from("1.2.3.4", 5678);
+  SocketAddress addr(from);
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("1.2.3.4", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestAssign) {
+  SocketAddress from("1.2.3.4", 5678);
+  SocketAddress addr(IPAddress(0x88888888), 9999);
+  addr = from;
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("1.2.3.4", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestSetIPPort) {
+  SocketAddress addr(IPAddress(0x88888888), 9999);
+  addr.SetIP(IPAddress(0x01020304));
+  addr.SetPort(5678);
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestSetIPFromString) {
+  SocketAddress addr(IPAddress(0x88888888), 9999);
+  addr.SetIP("1.2.3.4");
+  addr.SetPort(5678);
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("1.2.3.4", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestSetIPFromHostname) {
+  SocketAddress addr(IPAddress(0x88888888), 9999);
+  addr.SetIP("a.b.com");
+  addr.SetPort(5678);
+  EXPECT_TRUE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("a.b.com", addr.hostname());
+  EXPECT_EQ("a.b.com:5678", addr.ToString());
+  addr.SetResolvedIP(IPAddress(0x01020304));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ("a.b.com", addr.hostname());
+  EXPECT_EQ("a.b.com:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestFromIPv4String) {
+  SocketAddress addr;
+  EXPECT_TRUE(addr.FromString("1.2.3.4:5678"));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("1.2.3.4", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestFromIPv6String) {
+  SocketAddress addr;
+  EXPECT_TRUE(addr.FromString(kTestV6AddrFullString));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ(kTestV6AddrString, addr.hostname());
+  EXPECT_EQ(kTestV6AddrFullString, addr.ToString());
+}
+
+TEST(SocketAddressTest, TestFromHostname) {
+  SocketAddress addr;
+  EXPECT_TRUE(addr.FromString("a.b.com:5678"));
+  EXPECT_TRUE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("a.b.com", addr.hostname());
+  EXPECT_EQ("a.b.com:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestToFromSockAddr) {
+  SocketAddress from("1.2.3.4", 5678), addr;
+  sockaddr_in addr_in;
+  from.ToSockAddr(&addr_in);
+  EXPECT_TRUE(addr.FromSockAddr(addr_in));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+}
+
+TEST(SocketAddressTest, TestToFromSockAddrStorage) {
+  SocketAddress from("1.2.3.4", 5678), addr;
+  sockaddr_storage addr_storage;
+  from.ToSockAddrStorage(&addr_storage);
+  EXPECT_TRUE(SocketAddressFromSockAddrStorage(addr_storage, &addr));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(0x01020304U), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ("1.2.3.4:5678", addr.ToString());
+
+  addr.Clear();
+  from.ToDualStackSockAddrStorage(&addr_storage);
+  EXPECT_TRUE(SocketAddressFromSockAddrStorage(addr_storage, &addr));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(kMappedV4Addr), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ("[::ffff:1.2.3.4]:5678", addr.ToString());
+
+  addr.Clear();
+  memset(&addr_storage, 0, sizeof(sockaddr_storage));
+  from = SocketAddress(kTestV6AddrString, 5678);
+  from.SetScopeID(6);
+  from.ToSockAddrStorage(&addr_storage);
+  EXPECT_TRUE(SocketAddressFromSockAddrStorage(addr_storage, &addr));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(kTestV6Addr), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ(kTestV6AddrFullString, addr.ToString());
+  EXPECT_EQ(6, addr.scope_id());
+
+  addr.Clear();
+  from.ToDualStackSockAddrStorage(&addr_storage);
+  EXPECT_TRUE(SocketAddressFromSockAddrStorage(addr_storage, &addr));
+  EXPECT_FALSE(addr.IsUnresolvedIP());
+  EXPECT_EQ(IPAddress(kTestV6Addr), addr.ipaddr());
+  EXPECT_EQ(5678, addr.port());
+  EXPECT_EQ("", addr.hostname());
+  EXPECT_EQ(kTestV6AddrFullString, addr.ToString());
+  EXPECT_EQ(6, addr.scope_id());
+
+  addr = from;
+  addr_storage.ss_family = AF_UNSPEC;
+  EXPECT_FALSE(SocketAddressFromSockAddrStorage(addr_storage, &addr));
+  EXPECT_EQ(from, addr);
+
+  EXPECT_FALSE(SocketAddressFromSockAddrStorage(addr_storage, nullptr));
+}
+
+bool AreEqual(const SocketAddress& addr1,
+              const SocketAddress& addr2) {
+  return addr1 == addr2 && addr2 == addr1 &&
+      !(addr1 != addr2) && !(addr2 != addr1);
+}
+
+bool AreUnequal(const SocketAddress& addr1,
+                const SocketAddress& addr2) {
+  return !(addr1 == addr2) && !(addr2 == addr1) &&
+      addr1 != addr2 && addr2 != addr1;
+}
+
+TEST(SocketAddressTest, TestEqualityOperators) {
+  SocketAddress addr1("1.2.3.4", 5678);
+  SocketAddress addr2("1.2.3.4", 5678);
+  EXPECT_PRED2(AreEqual, addr1, addr2);
+
+  addr2 = SocketAddress("0.0.0.1", 5678);
+  EXPECT_PRED2(AreUnequal, addr1, addr2);
+
+  addr2 = SocketAddress("1.2.3.4", 1234);
+  EXPECT_PRED2(AreUnequal, addr1, addr2);
+
+  addr2 = SocketAddress(kTestV6AddrString, 5678);
+  EXPECT_PRED2(AreUnequal, addr1, addr2);
+
+  addr1 = SocketAddress(kTestV6AddrString, 5678);
+  EXPECT_PRED2(AreEqual, addr1, addr2);
+
+  addr2 = SocketAddress(kTestV6AddrString, 1234);
+  EXPECT_PRED2(AreUnequal, addr1, addr2);
+
+  addr2 = SocketAddress("fe80::1", 5678);
+  EXPECT_PRED2(AreUnequal, addr1, addr2);
+
+  SocketAddress addr3("a.b.c.d", 1);
+  SocketAddress addr4("b.b.c.d", 1);
+  EXPECT_PRED2(AreUnequal, addr3, addr4);
+  EXPECT_PRED2(AreEqual, addr3, addr3);
+
+  addr3.SetIP(addr1.ip());
+  addr4.SetIP(addr1.ip());
+  EXPECT_PRED2(AreEqual,addr3, addr4);
+}
+
+bool IsLessThan(const SocketAddress& addr1, const SocketAddress& addr2) {
+  return addr1 < addr2 &&
+      !(addr2 < addr1) &&
+      !(addr1 == addr2);
+}
+
+TEST(SocketAddressTest, TestComparisonOperator) {
+  SocketAddress addr1("1.2.3.4", 5678);
+  SocketAddress addr2("1.2.3.4", 5678);
+
+  EXPECT_FALSE(addr1 < addr2);
+  EXPECT_FALSE(addr2 < addr1);
+
+  addr2 = SocketAddress("1.2.3.4", 5679);
+  EXPECT_PRED2(IsLessThan, addr1, addr2);
+
+  addr2 = SocketAddress("2.2.3.4", 49152);
+  EXPECT_PRED2(IsLessThan, addr1, addr2);
+
+  addr2 = SocketAddress(kTestV6AddrString, 5678);
+  EXPECT_PRED2(IsLessThan, addr1, addr2);
+
+  addr1 = SocketAddress("fe80::1", 5678);
+  EXPECT_PRED2(IsLessThan, addr2, addr1);
+
+  addr2 = SocketAddress("fe80::1", 5679);
+  EXPECT_PRED2(IsLessThan, addr1, addr2);
+
+  addr2 = SocketAddress("fe80::1", 5678);
+  EXPECT_FALSE(addr1 < addr2);
+  EXPECT_FALSE(addr2 < addr1);
+
+  SocketAddress addr3("a.b.c.d", 1);
+  SocketAddress addr4("b.b.c.d", 1);
+  EXPECT_PRED2(IsLessThan, addr3, addr4);
+}
+
+TEST(SocketAddressTest, TestToSensitiveString) {
+  SocketAddress addr_v4("1.2.3.4", 5678);
+  EXPECT_EQ("1.2.3.4", addr_v4.HostAsURIString());
+  EXPECT_EQ("1.2.3.4:5678", addr_v4.ToString());
+
+#if defined(NDEBUG)
+  EXPECT_EQ("1.2.3.x", addr_v4.HostAsSensitiveURIString());
+  EXPECT_EQ("1.2.3.x:5678", addr_v4.ToSensitiveString());
+#else
+  EXPECT_EQ("1.2.3.4", addr_v4.HostAsSensitiveURIString());
+  EXPECT_EQ("1.2.3.4:5678", addr_v4.ToSensitiveString());
+#endif  // defined(NDEBUG)
+
+  SocketAddress addr_v6(kTestV6AddrString, 5678);
+  EXPECT_EQ("[" + kTestV6AddrString + "]", addr_v6.HostAsURIString());
+  EXPECT_EQ(kTestV6AddrFullString, addr_v6.ToString());
+#if defined(NDEBUG)
+  EXPECT_EQ("[" + kTestV6AddrAnonymizedString + "]",
+            addr_v6.HostAsSensitiveURIString());
+  EXPECT_EQ(kTestV6AddrFullAnonymizedString, addr_v6.ToSensitiveString());
+#else
+  EXPECT_EQ("[" + kTestV6AddrString + "]", addr_v6.HostAsSensitiveURIString());
+  EXPECT_EQ(kTestV6AddrFullString, addr_v6.ToSensitiveString());
+#endif  // defined(NDEBUG)
+}
+
+}  // namespace rtc
diff --git a/rtc_base/socketaddresspair.cc b/rtc_base/socketaddresspair.cc
new file mode 100644
index 0000000..3e4748f
--- /dev/null
+++ b/rtc_base/socketaddresspair.cc
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/socketaddresspair.h"
+
+namespace rtc {
+
+SocketAddressPair::SocketAddressPair(
+    const SocketAddress& src, const SocketAddress& dest)
+    : src_(src), dest_(dest) {
+}
+
+
+bool SocketAddressPair::operator ==(const SocketAddressPair& p) const {
+  return (src_ == p.src_) && (dest_ == p.dest_);
+}
+
+bool SocketAddressPair::operator <(const SocketAddressPair& p) const {
+  if (src_ < p.src_)
+    return true;
+  if (p.src_ < src_)
+    return false;
+  if (dest_ < p.dest_)
+    return true;
+  if (p.dest_ < dest_)
+    return false;
+  return false;
+}
+
+size_t SocketAddressPair::Hash() const {
+  return src_.Hash() ^ dest_.Hash();
+}
+
+} // namespace rtc
diff --git a/rtc_base/socketaddresspair.h b/rtc_base/socketaddresspair.h
new file mode 100644
index 0000000..8ff0ee6
--- /dev/null
+++ b/rtc_base/socketaddresspair.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKETADDRESSPAIR_H_
+#define RTC_BASE_SOCKETADDRESSPAIR_H_
+
+#include "rtc_base/socketaddress.h"
+
+namespace rtc {
+
+// Records a pair (source,destination) of socket addresses.  The two addresses
+// identify a connection between two machines.  (For UDP, this "connection" is
+// not maintained explicitly in a socket.)
+class SocketAddressPair {
+public:
+  SocketAddressPair() {}
+  SocketAddressPair(const SocketAddress& srs, const SocketAddress& dest);
+
+  const SocketAddress& source() const { return src_; }
+  const SocketAddress& destination() const { return dest_; }
+
+  bool operator ==(const SocketAddressPair& r) const;
+  bool operator <(const SocketAddressPair& r) const;
+
+  size_t Hash() const;
+
+private:
+  SocketAddress src_;
+  SocketAddress dest_;
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_SOCKETADDRESSPAIR_H_
diff --git a/rtc_base/socketfactory.h b/rtc_base/socketfactory.h
new file mode 100644
index 0000000..58bc0de
--- /dev/null
+++ b/rtc_base/socketfactory.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKETFACTORY_H_
+#define RTC_BASE_SOCKETFACTORY_H_
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/socket.h"
+
+namespace rtc {
+
+class SocketFactory {
+public:
+  virtual ~SocketFactory() {}
+
+  // Returns a new socket for blocking communication.  The type can be
+  // SOCK_DGRAM and SOCK_STREAM.
+  // TODO: C++ inheritance rules mean that all users must have both
+  // CreateSocket(int) and CreateSocket(int,int). Will remove CreateSocket(int)
+  // (and CreateAsyncSocket(int) when all callers are changed.
+  virtual Socket* CreateSocket(int type) = 0;
+  virtual Socket* CreateSocket(int family, int type) = 0;
+  // Returns a new socket for nonblocking communication.  The type can be
+  // SOCK_DGRAM and SOCK_STREAM.
+  virtual AsyncSocket* CreateAsyncSocket(int type) = 0;
+  virtual AsyncSocket* CreateAsyncSocket(int family, int type) = 0;
+};
+
+} // namespace rtc
+
+#endif // RTC_BASE_SOCKETFACTORY_H_
diff --git a/rtc_base/socketserver.h b/rtc_base/socketserver.h
new file mode 100644
index 0000000..e29884e
--- /dev/null
+++ b/rtc_base/socketserver.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKETSERVER_H_
+#define RTC_BASE_SOCKETSERVER_H_
+
+#include <memory>
+#include "rtc_base/socketfactory.h"
+
+namespace rtc {
+
+class MessageQueue;
+// Needs to be forward declared because there's a circular dependency between
+// NetworkMonitor and Thread.
+// TODO(deadbeef): Fix this.
+class NetworkBinderInterface;
+
+// Provides the ability to wait for activity on a set of sockets.  The Thread
+// class provides a nice wrapper on a socket server.
+//
+// The server is also a socket factory.  The sockets it creates will be
+// notified of asynchronous I/O from this server's Wait method.
+class SocketServer : public SocketFactory {
+ public:
+  static const int kForever = -1;
+
+  static std::unique_ptr<SocketServer> CreateDefault();
+  // When the socket server is installed into a Thread, this function is
+  // called to allow the socket server to use the thread's message queue for
+  // any messaging that it might need to perform.
+  virtual void SetMessageQueue(MessageQueue* queue) {}
+
+  // Sleeps until:
+  //  1) cms milliseconds have elapsed (unless cms == kForever)
+  //  2) WakeUp() is called
+  // While sleeping, I/O is performed if process_io is true.
+  virtual bool Wait(int cms, bool process_io) = 0;
+
+  // Causes the current wait (if one is in progress) to wake up.
+  virtual void WakeUp() = 0;
+
+  // A network binder will bind the created sockets to a network.
+  // It is only used in PhysicalSocketServer.
+  void set_network_binder(NetworkBinderInterface* binder) {
+    network_binder_ = binder;
+  }
+  NetworkBinderInterface* network_binder() const { return network_binder_; }
+
+ private:
+  NetworkBinderInterface* network_binder_ = nullptr;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SOCKETSERVER_H_
diff --git a/rtc_base/socketstream.cc b/rtc_base/socketstream.cc
new file mode 100644
index 0000000..8b4c513
--- /dev/null
+++ b/rtc_base/socketstream.cc
@@ -0,0 +1,123 @@
+/*
+ *  Copyright 2010 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/socketstream.h"
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+SocketStream::SocketStream(AsyncSocket* socket) : socket_(nullptr) {
+  Attach(socket);
+}
+
+SocketStream::~SocketStream() {
+  delete socket_;
+}
+
+void SocketStream::Attach(AsyncSocket* socket) {
+  if (socket_)
+    delete socket_;
+  socket_ = socket;
+  if (socket_) {
+    socket_->SignalConnectEvent.connect(this, &SocketStream::OnConnectEvent);
+    socket_->SignalReadEvent.connect(this,    &SocketStream::OnReadEvent);
+    socket_->SignalWriteEvent.connect(this,   &SocketStream::OnWriteEvent);
+    socket_->SignalCloseEvent.connect(this,   &SocketStream::OnCloseEvent);
+  }
+}
+
+AsyncSocket* SocketStream::Detach() {
+  AsyncSocket* socket = socket_;
+  if (socket_) {
+    socket_->SignalConnectEvent.disconnect(this);
+    socket_->SignalReadEvent.disconnect(this);
+    socket_->SignalWriteEvent.disconnect(this);
+    socket_->SignalCloseEvent.disconnect(this);
+    socket_ = nullptr;
+  }
+  return socket;
+}
+
+StreamState SocketStream::GetState() const {
+  RTC_DCHECK(socket_ != nullptr);
+  switch (socket_->GetState()) {
+    case Socket::CS_CONNECTED:
+      return SS_OPEN;
+    case Socket::CS_CONNECTING:
+      return SS_OPENING;
+    case Socket::CS_CLOSED:
+    default:
+      return SS_CLOSED;
+  }
+}
+
+StreamResult SocketStream::Read(void* buffer, size_t buffer_len,
+                                size_t* read, int* error) {
+  RTC_DCHECK(socket_ != nullptr);
+  int result = socket_->Recv(buffer, buffer_len, nullptr);
+  if (result < 0) {
+    if (socket_->IsBlocking())
+      return SR_BLOCK;
+    if (error)
+      *error = socket_->GetError();
+    return SR_ERROR;
+  }
+  if ((result > 0) || (buffer_len == 0)) {
+    if (read)
+      *read = result;
+    return SR_SUCCESS;
+  }
+  return SR_EOS;
+}
+
+StreamResult SocketStream::Write(const void* data, size_t data_len,
+                                 size_t* written, int* error) {
+  RTC_DCHECK(socket_ != nullptr);
+  int result = socket_->Send(data, data_len);
+  if (result < 0) {
+    if (socket_->IsBlocking())
+      return SR_BLOCK;
+    if (error)
+      *error = socket_->GetError();
+    return SR_ERROR;
+  }
+  if (written)
+    *written = result;
+  return SR_SUCCESS;
+}
+
+void SocketStream::Close() {
+  RTC_DCHECK(socket_ != nullptr);
+  socket_->Close();
+}
+
+void SocketStream::OnConnectEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket == socket_);
+  SignalEvent(this, SE_OPEN | SE_READ | SE_WRITE, 0);
+}
+
+void SocketStream::OnReadEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket == socket_);
+  SignalEvent(this, SE_READ, 0);
+}
+
+void SocketStream::OnWriteEvent(AsyncSocket* socket) {
+  RTC_DCHECK(socket == socket_);
+  SignalEvent(this, SE_WRITE, 0);
+}
+
+void SocketStream::OnCloseEvent(AsyncSocket* socket, int err) {
+  RTC_DCHECK(socket == socket_);
+  SignalEvent(this, SE_CLOSE, err);
+}
+
+
+}  // namespace rtc
diff --git a/rtc_base/socketstream.h b/rtc_base/socketstream.h
new file mode 100644
index 0000000..f781af1
--- /dev/null
+++ b/rtc_base/socketstream.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright 2005 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SOCKETSTREAM_H_
+#define RTC_BASE_SOCKETSTREAM_H_
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/stream.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+
+class SocketStream : public StreamInterface, public sigslot::has_slots<> {
+ public:
+  explicit SocketStream(AsyncSocket* socket);
+  ~SocketStream() override;
+
+  void Attach(AsyncSocket* socket);
+  AsyncSocket* Detach();
+
+  AsyncSocket* GetSocket() { return socket_; }
+
+  StreamState GetState() const override;
+
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+
+  void Close() override;
+
+ private:
+  void OnConnectEvent(AsyncSocket* socket);
+  void OnReadEvent(AsyncSocket* socket);
+  void OnWriteEvent(AsyncSocket* socket);
+  void OnCloseEvent(AsyncSocket* socket, int err);
+
+  AsyncSocket* socket_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SocketStream);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SOCKETSTREAM_H_
diff --git a/rtc_base/ssladapter.cc b/rtc_base/ssladapter.cc
new file mode 100644
index 0000000..8c62d3b
--- /dev/null
+++ b/rtc_base/ssladapter.cc
@@ -0,0 +1,39 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/ssladapter.h"
+
+#include "rtc_base/openssladapter.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace rtc {
+
+SSLAdapterFactory* SSLAdapterFactory::Create() {
+  return new OpenSSLAdapterFactory();
+}
+
+SSLAdapter* SSLAdapter::Create(AsyncSocket* socket) {
+  return new OpenSSLAdapter(socket);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool InitializeSSL(VerificationCallback callback) {
+  return OpenSSLAdapter::InitializeSSL(callback);
+}
+
+bool CleanupSSL() {
+  return OpenSSLAdapter::CleanupSSL();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
diff --git a/rtc_base/ssladapter.h b/rtc_base/ssladapter.h
new file mode 100644
index 0000000..6b84154
--- /dev/null
+++ b/rtc_base/ssladapter.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SSLADAPTER_H_
+#define RTC_BASE_SSLADAPTER_H_
+
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/sslstreamadapter.h"
+
+namespace rtc {
+
+class SSLAdapter;
+
+// Class for creating SSL adapters with shared state, e.g., a session cache,
+// which allows clients to resume SSL sessions to previously-contacted hosts.
+// Clients should create the factory using Create(), set up the factory as
+// needed using SetMode, and then call CreateAdapter to create adapters when
+// needed.
+class SSLAdapterFactory {
+ public:
+  virtual ~SSLAdapterFactory() {}
+  // Specifies whether TLS or DTLS is to be used for the SSL adapters.
+  virtual void SetMode(SSLMode mode) = 0;
+  // Creates a new SSL adapter, but from a shared context.
+  virtual SSLAdapter* CreateAdapter(AsyncSocket* socket) = 0;
+
+  static SSLAdapterFactory* Create();
+};
+
+// Class that abstracts a client-to-server SSL session. It can be created
+// standalone, via SSLAdapter::Create, or through a factory as described above,
+// in which case it will share state with other SSLAdapters created from the
+// same factory.
+// After creation, call StartSSL to initiate the SSL handshake to the server.
+class SSLAdapter : public AsyncSocketAdapter {
+ public:
+  explicit SSLAdapter(AsyncSocket* socket) : AsyncSocketAdapter(socket) {}
+
+  // Methods that control server certificate verification, used in unit tests.
+  // Do not call these methods in production code.
+  // TODO(juberti): Remove the opportunistic encryption mechanism in
+  // BasicPacketSocketFactory that uses this function.
+  virtual void SetIgnoreBadCert(bool ignore) = 0;
+
+  virtual void SetAlpnProtocols(const std::vector<std::string>& protos) = 0;
+  virtual void SetEllipticCurves(const std::vector<std::string>& curves) = 0;
+
+  // Do DTLS or TLS (default is TLS, if unspecified)
+  virtual void SetMode(SSLMode mode) = 0;
+
+  // Set the certificate this socket will present to incoming clients.
+  virtual void SetIdentity(SSLIdentity* identity) = 0;
+
+  // Choose whether the socket acts as a server socket or client socket.
+  virtual void SetRole(SSLRole role) = 0;
+
+  // StartSSL returns 0 if successful.
+  // If StartSSL is called while the socket is closed or connecting, the SSL
+  // negotiation will begin as soon as the socket connects.
+  // TODO(juberti): Remove |restartable|.
+  virtual int StartSSL(const char* hostname, bool restartable = false) = 0;
+
+  // When an SSLAdapterFactory is used, an SSLAdapter may be used to resume
+  // a previous SSL session, which results in an abbreviated handshake.
+  // This method, if called after SSL has been established for this adapter,
+  // indicates whether the current session is a resumption of a previous
+  // session.
+  virtual bool IsResumedSession() = 0;
+
+  // Create the default SSL adapter for this platform. On failure, returns null
+  // and deletes |socket|. Otherwise, the returned SSLAdapter takes ownership
+  // of |socket|.
+  static SSLAdapter* Create(AsyncSocket* socket);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef bool (*VerificationCallback)(void* cert);
+
+// Call this on the main thread, before using SSL.
+// Call CleanupSSL when finished with SSL.
+bool InitializeSSL(VerificationCallback callback = nullptr);
+
+// Call to cleanup additional threads, and also the main thread.
+bool CleanupSSL();
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SSLADAPTER_H_
diff --git a/rtc_base/ssladapter_unittest.cc b/rtc_base/ssladapter_unittest.cc
new file mode 100644
index 0000000..0996b01
--- /dev/null
+++ b/rtc_base/ssladapter_unittest.cc
@@ -0,0 +1,493 @@
+/*
+ *  Copyright 2014 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/ipaddress.h"
+#include "rtc_base/socketstream.h"
+#include "rtc_base/ssladapter.h"
+#include "rtc_base/sslidentity.h"
+#include "rtc_base/sslstreamadapter.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/virtualsocketserver.h"
+
+static const int kTimeout = 5000;
+
+static rtc::AsyncSocket* CreateSocket(const rtc::SSLMode& ssl_mode) {
+  rtc::SocketAddress address(rtc::IPAddress(INADDR_ANY), 0);
+
+  rtc::AsyncSocket* socket = rtc::Thread::Current()->
+      socketserver()->CreateAsyncSocket(
+      address.family(), (ssl_mode == rtc::SSL_MODE_DTLS) ?
+      SOCK_DGRAM : SOCK_STREAM);
+  socket->Bind(address);
+
+  return socket;
+}
+
+static std::string GetSSLProtocolName(const rtc::SSLMode& ssl_mode) {
+  return (ssl_mode == rtc::SSL_MODE_DTLS) ? "DTLS" : "TLS";
+}
+
+class SSLAdapterTestDummyClient : public sigslot::has_slots<> {
+ public:
+  explicit SSLAdapterTestDummyClient(const rtc::SSLMode& ssl_mode)
+      : ssl_mode_(ssl_mode) {
+    rtc::AsyncSocket* socket = CreateSocket(ssl_mode_);
+
+    ssl_adapter_.reset(rtc::SSLAdapter::Create(socket));
+
+    ssl_adapter_->SetMode(ssl_mode_);
+
+    // Ignore any certificate errors for the purpose of testing.
+    // Note: We do this only because we don't have a real certificate.
+    // NEVER USE THIS IN PRODUCTION CODE!
+    ssl_adapter_->SetIgnoreBadCert(true);
+
+    ssl_adapter_->SignalReadEvent.connect(this,
+        &SSLAdapterTestDummyClient::OnSSLAdapterReadEvent);
+    ssl_adapter_->SignalCloseEvent.connect(this,
+        &SSLAdapterTestDummyClient::OnSSLAdapterCloseEvent);
+  }
+
+  void SetAlpnProtocols(const std::vector<std::string>& protos) {
+    ssl_adapter_->SetAlpnProtocols(protos);
+  }
+
+  void SetEllipticCurves(const std::vector<std::string>& curves) {
+    ssl_adapter_->SetEllipticCurves(curves);
+  }
+
+  rtc::SocketAddress GetAddress() const {
+    return ssl_adapter_->GetLocalAddress();
+  }
+
+  rtc::AsyncSocket::ConnState GetState() const {
+    return ssl_adapter_->GetState();
+  }
+
+  const std::string& GetReceivedData() const {
+    return data_;
+  }
+
+  int Connect(const std::string& hostname, const rtc::SocketAddress& address) {
+    RTC_LOG(LS_INFO) << "Initiating connection with " << address;
+
+    int rv = ssl_adapter_->Connect(address);
+
+    if (rv == 0) {
+      RTC_LOG(LS_INFO) << "Starting " << GetSSLProtocolName(ssl_mode_)
+                       << " handshake with " << hostname;
+
+      if (ssl_adapter_->StartSSL(hostname.c_str(), false) != 0) {
+        return -1;
+      }
+    }
+
+    return rv;
+  }
+
+  int Close() {
+    return ssl_adapter_->Close();
+  }
+
+  int Send(const std::string& message) {
+    RTC_LOG(LS_INFO) << "Client sending '" << message << "'";
+
+    return ssl_adapter_->Send(message.data(), message.length());
+  }
+
+  void OnSSLAdapterReadEvent(rtc::AsyncSocket* socket) {
+    char buffer[4096] = "";
+
+    // Read data received from the server and store it in our internal buffer.
+    int read = socket->Recv(buffer, sizeof(buffer) - 1, nullptr);
+    if (read != -1) {
+      buffer[read] = '\0';
+
+      RTC_LOG(LS_INFO) << "Client received '" << buffer << "'";
+
+      data_ += buffer;
+    }
+  }
+
+  void OnSSLAdapterCloseEvent(rtc::AsyncSocket* socket, int error) {
+    // OpenSSLAdapter signals handshake failure with a close event, but without
+    // closing the socket! Let's close the socket here. This way GetState() can
+    // return CS_CLOSED after failure.
+    if (socket->GetState() != rtc::AsyncSocket::CS_CLOSED) {
+      socket->Close();
+    }
+  }
+
+ private:
+  const rtc::SSLMode ssl_mode_;
+
+  std::unique_ptr<rtc::SSLAdapter> ssl_adapter_;
+
+  std::string data_;
+};
+
+class SSLAdapterTestDummyServer : public sigslot::has_slots<> {
+ public:
+  explicit SSLAdapterTestDummyServer(const rtc::SSLMode& ssl_mode,
+                                     const rtc::KeyParams& key_params)
+      : ssl_mode_(ssl_mode) {
+    // Generate a key pair and a certificate for this host.
+    ssl_identity_.reset(rtc::SSLIdentity::Generate(GetHostname(), key_params));
+
+    server_socket_.reset(CreateSocket(ssl_mode_));
+
+    if (ssl_mode_ == rtc::SSL_MODE_TLS) {
+      server_socket_->SignalReadEvent.connect(this,
+          &SSLAdapterTestDummyServer::OnServerSocketReadEvent);
+
+      server_socket_->Listen(1);
+    }
+
+    RTC_LOG(LS_INFO) << ((ssl_mode_ == rtc::SSL_MODE_DTLS) ? "UDP" : "TCP")
+                     << " server listening on "
+                     << server_socket_->GetLocalAddress();
+  }
+
+  rtc::SocketAddress GetAddress() const {
+    return server_socket_->GetLocalAddress();
+  }
+
+  std::string GetHostname() const {
+    // Since we don't have a real certificate anyway, the value here doesn't
+    // really matter.
+    return "example.com";
+  }
+
+  const std::string& GetReceivedData() const {
+    return data_;
+  }
+
+  int Send(const std::string& message) {
+    if (ssl_stream_adapter_ == nullptr ||
+        ssl_stream_adapter_->GetState() != rtc::SS_OPEN) {
+      // No connection yet.
+      return -1;
+    }
+
+    RTC_LOG(LS_INFO) << "Server sending '" << message << "'";
+
+    size_t written;
+    int error;
+
+    rtc::StreamResult r = ssl_stream_adapter_->Write(message.data(),
+        message.length(), &written, &error);
+    if (r == rtc::SR_SUCCESS) {
+      return written;
+    } else {
+      return -1;
+    }
+  }
+
+  void AcceptConnection(const rtc::SocketAddress& address) {
+    // Only a single connection is supported.
+    ASSERT_TRUE(ssl_stream_adapter_ == nullptr);
+
+    // This is only for DTLS.
+    ASSERT_EQ(rtc::SSL_MODE_DTLS, ssl_mode_);
+
+    // Transfer ownership of the socket to the SSLStreamAdapter object.
+    rtc::AsyncSocket* socket = server_socket_.release();
+
+    socket->Connect(address);
+
+    DoHandshake(socket);
+  }
+
+  void OnServerSocketReadEvent(rtc::AsyncSocket* socket) {
+    // Only a single connection is supported.
+    ASSERT_TRUE(ssl_stream_adapter_ == nullptr);
+
+    DoHandshake(server_socket_->Accept(nullptr));
+  }
+
+  void OnSSLStreamAdapterEvent(rtc::StreamInterface* stream, int sig, int err) {
+    if (sig & rtc::SE_READ) {
+      char buffer[4096] = "";
+      size_t read;
+      int error;
+
+      // Read data received from the client and store it in our internal
+      // buffer.
+      rtc::StreamResult r =
+          stream->Read(buffer, sizeof(buffer) - 1, &read, &error);
+      if (r == rtc::SR_SUCCESS) {
+        buffer[read] = '\0';
+        RTC_LOG(LS_INFO) << "Server received '" << buffer << "'";
+        data_ += buffer;
+      }
+    }
+  }
+
+ private:
+  void DoHandshake(rtc::AsyncSocket* socket) {
+    rtc::SocketStream* stream = new rtc::SocketStream(socket);
+
+    ssl_stream_adapter_.reset(rtc::SSLStreamAdapter::Create(stream));
+
+    ssl_stream_adapter_->SetMode(ssl_mode_);
+    ssl_stream_adapter_->SetServerRole();
+
+    // SSLStreamAdapter is normally used for peer-to-peer communication, but
+    // here we're testing communication between a client and a server
+    // (e.g. a WebRTC-based application and an RFC 5766 TURN server), where
+    // clients are not required to provide a certificate during handshake.
+    // Accordingly, we must disable client authentication here.
+    ssl_stream_adapter_->set_client_auth_enabled(false);
+
+    ssl_stream_adapter_->SetIdentity(ssl_identity_->GetReference());
+
+    // Set a bogus peer certificate digest.
+    unsigned char digest[20];
+    size_t digest_len = sizeof(digest);
+    ssl_stream_adapter_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, digest,
+        digest_len);
+
+    ssl_stream_adapter_->StartSSL();
+
+    ssl_stream_adapter_->SignalEvent.connect(this,
+        &SSLAdapterTestDummyServer::OnSSLStreamAdapterEvent);
+  }
+
+  const rtc::SSLMode ssl_mode_;
+
+  std::unique_ptr<rtc::AsyncSocket> server_socket_;
+  std::unique_ptr<rtc::SSLStreamAdapter> ssl_stream_adapter_;
+
+  std::unique_ptr<rtc::SSLIdentity> ssl_identity_;
+
+  std::string data_;
+};
+
+class SSLAdapterTestBase : public testing::Test,
+                           public sigslot::has_slots<> {
+ public:
+  explicit SSLAdapterTestBase(const rtc::SSLMode& ssl_mode,
+                              const rtc::KeyParams& key_params)
+      : ssl_mode_(ssl_mode),
+        vss_(new rtc::VirtualSocketServer()),
+        thread_(vss_.get()),
+        server_(new SSLAdapterTestDummyServer(ssl_mode_, key_params)),
+        client_(new SSLAdapterTestDummyClient(ssl_mode_)),
+        handshake_wait_(kTimeout) {}
+
+  void SetHandshakeWait(int wait) {
+    handshake_wait_ = wait;
+  }
+
+  void SetAlpnProtocols(const std::vector<std::string>& protos) {
+    client_->SetAlpnProtocols(protos);
+  }
+
+  void SetEllipticCurves(const std::vector<std::string>& curves) {
+    client_->SetEllipticCurves(curves);
+  }
+
+  void TestHandshake(bool expect_success) {
+    int rv;
+
+    // The initial state is CS_CLOSED
+    ASSERT_EQ(rtc::AsyncSocket::CS_CLOSED, client_->GetState());
+
+    rv = client_->Connect(server_->GetHostname(), server_->GetAddress());
+    ASSERT_EQ(0, rv);
+
+    // Now the state should be CS_CONNECTING
+    ASSERT_EQ(rtc::AsyncSocket::CS_CONNECTING, client_->GetState());
+
+    if (ssl_mode_ == rtc::SSL_MODE_DTLS) {
+      // For DTLS, call AcceptConnection() with the client's address.
+      server_->AcceptConnection(client_->GetAddress());
+    }
+
+    if (expect_success) {
+      // If expecting success, the client should end up in the CS_CONNECTED
+      // state after handshake.
+      EXPECT_EQ_WAIT(rtc::AsyncSocket::CS_CONNECTED, client_->GetState(),
+          handshake_wait_);
+
+      RTC_LOG(LS_INFO) << GetSSLProtocolName(ssl_mode_)
+                       << " handshake complete.";
+
+    } else {
+      // On handshake failure the client should end up in the CS_CLOSED state.
+      EXPECT_EQ_WAIT(rtc::AsyncSocket::CS_CLOSED, client_->GetState(),
+          handshake_wait_);
+
+      RTC_LOG(LS_INFO) << GetSSLProtocolName(ssl_mode_) << " handshake failed.";
+    }
+  }
+
+  void TestTransfer(const std::string& message) {
+    int rv;
+
+    rv = client_->Send(message);
+    ASSERT_EQ(static_cast<int>(message.length()), rv);
+
+    // The server should have received the client's message.
+    EXPECT_EQ_WAIT(message, server_->GetReceivedData(), kTimeout);
+
+    rv = server_->Send(message);
+    ASSERT_EQ(static_cast<int>(message.length()), rv);
+
+    // The client should have received the server's message.
+    EXPECT_EQ_WAIT(message, client_->GetReceivedData(), kTimeout);
+
+    RTC_LOG(LS_INFO) << "Transfer complete.";
+  }
+
+ protected:
+  const rtc::SSLMode ssl_mode_;
+
+  std::unique_ptr<rtc::VirtualSocketServer> vss_;
+  rtc::AutoSocketServerThread thread_;
+  std::unique_ptr<SSLAdapterTestDummyServer> server_;
+  std::unique_ptr<SSLAdapterTestDummyClient> client_;
+
+  int handshake_wait_;
+};
+
+class SSLAdapterTestTLS_RSA : public SSLAdapterTestBase {
+ public:
+  SSLAdapterTestTLS_RSA()
+      : SSLAdapterTestBase(rtc::SSL_MODE_TLS, rtc::KeyParams::RSA()) {}
+};
+
+class SSLAdapterTestTLS_ECDSA : public SSLAdapterTestBase {
+ public:
+  SSLAdapterTestTLS_ECDSA()
+      : SSLAdapterTestBase(rtc::SSL_MODE_TLS, rtc::KeyParams::ECDSA()) {}
+};
+
+class SSLAdapterTestDTLS_RSA : public SSLAdapterTestBase {
+ public:
+  SSLAdapterTestDTLS_RSA()
+      : SSLAdapterTestBase(rtc::SSL_MODE_DTLS, rtc::KeyParams::RSA()) {}
+};
+
+class SSLAdapterTestDTLS_ECDSA : public SSLAdapterTestBase {
+ public:
+  SSLAdapterTestDTLS_ECDSA()
+      : SSLAdapterTestBase(rtc::SSL_MODE_DTLS, rtc::KeyParams::ECDSA()) {}
+};
+
+// Basic tests: TLS
+
+// Test that handshake works, using RSA
+TEST_F(SSLAdapterTestTLS_RSA, TestTLSConnect) {
+  TestHandshake(true);
+}
+
+// Test that handshake works, using ECDSA
+TEST_F(SSLAdapterTestTLS_ECDSA, TestTLSConnect) {
+  TestHandshake(true);
+}
+
+// Test transfer between client and server, using RSA
+TEST_F(SSLAdapterTestTLS_RSA, TestTLSTransfer) {
+  TestHandshake(true);
+  TestTransfer("Hello, world!");
+}
+
+TEST_F(SSLAdapterTestTLS_RSA, TestTLSTransferWithBlockedSocket) {
+  TestHandshake(true);
+
+  // Tell the underlying socket to simulate being blocked.
+  vss_->SetSendingBlocked(true);
+
+  std::string expected;
+  int rv;
+  // Send messages until the SSL socket adapter starts applying backpressure.
+  // Note that this may not occur immediately since there may be some amount of
+  // intermediate buffering (either in our code or in BoringSSL).
+  for (int i = 0; i < 1024; ++i) {
+    std::string message = "Hello, world: " + rtc::ToString(i);
+    rv = client_->Send(message);
+    if (rv != static_cast<int>(message.size())) {
+      // This test assumes either the whole message or none of it is sent.
+      ASSERT_EQ(-1, rv);
+      break;
+    }
+    expected += message;
+  }
+  // Assert that the loop above exited due to Send returning -1.
+  ASSERT_EQ(-1, rv);
+
+  // Try sending another message while blocked. -1 should be returned again and
+  // it shouldn't end up received by the server later.
+  EXPECT_EQ(-1, client_->Send("Never sent"));
+
+  // Unblock the underlying socket. All of the buffered messages should be sent
+  // without any further action.
+  vss_->SetSendingBlocked(false);
+  EXPECT_EQ_WAIT(expected, server_->GetReceivedData(), kTimeout);
+
+  // Send another message. This previously wasn't working
+  std::string final_message = "Fin.";
+  expected += final_message;
+  EXPECT_EQ(static_cast<int>(final_message.size()),
+            client_->Send(final_message));
+  EXPECT_EQ_WAIT(expected, server_->GetReceivedData(), kTimeout);
+}
+
+// Test transfer between client and server, using ECDSA
+TEST_F(SSLAdapterTestTLS_ECDSA, TestTLSTransfer) {
+  TestHandshake(true);
+  TestTransfer("Hello, world!");
+}
+
+// Test transfer using ALPN with protos as h2 and http/1.1
+TEST_F(SSLAdapterTestTLS_ECDSA, TestTLSALPN) {
+  std::vector<std::string> alpn_protos{"h2", "http/1.1"};
+  SetAlpnProtocols(alpn_protos);
+  TestHandshake(true);
+  TestTransfer("Hello, world!");
+}
+
+// Test transfer with TLS Elliptic curves set to "X25519:P-256:P-384:P-521"
+TEST_F(SSLAdapterTestTLS_ECDSA, TestTLSEllipticCurves) {
+  std::vector<std::string> elliptic_curves{"X25519", "P-256", "P-384", "P-521"};
+  SetEllipticCurves(elliptic_curves);
+  TestHandshake(true);
+  TestTransfer("Hello, world!");
+}
+
+// Basic tests: DTLS
+
+// Test that handshake works, using RSA
+TEST_F(SSLAdapterTestDTLS_RSA, TestDTLSConnect) {
+  TestHandshake(true);
+}
+
+// Test that handshake works, using ECDSA
+TEST_F(SSLAdapterTestDTLS_ECDSA, TestDTLSConnect) {
+  TestHandshake(true);
+}
+
+// Test transfer between client and server, using RSA
+TEST_F(SSLAdapterTestDTLS_RSA, TestDTLSTransfer) {
+  TestHandshake(true);
+  TestTransfer("Hello, world!");
+}
+
+// Test transfer between client and server, using ECDSA
+TEST_F(SSLAdapterTestDTLS_ECDSA, TestDTLSTransfer) {
+  TestHandshake(true);
+  TestTransfer("Hello, world!");
+}
diff --git a/rtc_base/sslfingerprint.cc b/rtc_base/sslfingerprint.cc
new file mode 100644
index 0000000..dda46f1
--- /dev/null
+++ b/rtc_base/sslfingerprint.cc
@@ -0,0 +1,113 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/sslfingerprint.h"
+
+#include <ctype.h>
+#include <string>
+
+#include "rtc_base/helpers.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/stringencode.h"
+
+namespace rtc {
+
+SSLFingerprint* SSLFingerprint::Create(
+    const std::string& algorithm, const rtc::SSLIdentity* identity) {
+  if (!identity) {
+    return nullptr;
+  }
+
+  return Create(algorithm, &(identity->certificate()));
+}
+
+SSLFingerprint* SSLFingerprint::Create(
+    const std::string& algorithm, const rtc::SSLCertificate* cert) {
+  uint8_t digest_val[64];
+  size_t digest_len;
+  bool ret = cert->ComputeDigest(
+      algorithm, digest_val, sizeof(digest_val), &digest_len);
+  if (!ret) {
+    return nullptr;
+  }
+
+  return new SSLFingerprint(algorithm, digest_val, digest_len);
+}
+
+SSLFingerprint* SSLFingerprint::CreateFromRfc4572(
+    const std::string& algorithm, const std::string& fingerprint) {
+  if (algorithm.empty() || !rtc::IsFips180DigestAlgorithm(algorithm))
+    return nullptr;
+
+  if (fingerprint.empty())
+    return nullptr;
+
+  size_t value_len;
+  char value[rtc::MessageDigest::kMaxSize];
+  value_len = rtc::hex_decode_with_delimiter(value, sizeof(value),
+                                                   fingerprint.c_str(),
+                                                   fingerprint.length(),
+                                                   ':');
+  if (!value_len)
+    return nullptr;
+
+  return new SSLFingerprint(algorithm, reinterpret_cast<uint8_t*>(value),
+                            value_len);
+}
+
+SSLFingerprint* SSLFingerprint::CreateFromCertificate(
+    const RTCCertificate* cert) {
+  std::string digest_alg;
+  if (!cert->ssl_certificate().GetSignatureDigestAlgorithm(&digest_alg)) {
+    RTC_LOG(LS_ERROR)
+        << "Failed to retrieve the certificate's digest algorithm";
+    return nullptr;
+  }
+
+  SSLFingerprint* fingerprint = Create(digest_alg, cert->identity());
+  if (!fingerprint) {
+    RTC_LOG(LS_ERROR) << "Failed to create identity fingerprint, alg="
+                      << digest_alg;
+  }
+  return fingerprint;
+}
+
+SSLFingerprint::SSLFingerprint(const std::string& algorithm,
+                               const uint8_t* digest_in,
+                               size_t digest_len)
+    : algorithm(algorithm) {
+  digest.SetData(digest_in, digest_len);
+}
+
+SSLFingerprint::SSLFingerprint(const SSLFingerprint& from)
+    : algorithm(from.algorithm), digest(from.digest) {}
+
+bool SSLFingerprint::operator==(const SSLFingerprint& other) const {
+  return algorithm == other.algorithm &&
+         digest == other.digest;
+}
+
+std::string SSLFingerprint::GetRfc4572Fingerprint() const {
+  std::string fingerprint =
+      rtc::hex_encode_with_delimiter(digest.data<char>(), digest.size(), ':');
+  std::transform(fingerprint.begin(), fingerprint.end(),
+                 fingerprint.begin(), ::toupper);
+  return fingerprint;
+}
+
+std::string SSLFingerprint::ToString() const {
+  std::string fp_str = algorithm;
+  fp_str.append(" ");
+  fp_str.append(GetRfc4572Fingerprint());
+  return fp_str;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/sslfingerprint.h b/rtc_base/sslfingerprint.h
new file mode 100644
index 0000000..b5e9b72
--- /dev/null
+++ b/rtc_base/sslfingerprint.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright 2012 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SSLFINGERPRINT_H_
+#define RTC_BASE_SSLFINGERPRINT_H_
+
+#include <string>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/copyonwritebuffer.h"
+#include "rtc_base/rtccertificate.h"
+#include "rtc_base/sslidentity.h"
+
+namespace rtc {
+
+class SSLCertificate;
+
+struct SSLFingerprint {
+  static SSLFingerprint* Create(const std::string& algorithm,
+                                const rtc::SSLIdentity* identity);
+
+  static SSLFingerprint* Create(const std::string& algorithm,
+                                const rtc::SSLCertificate* cert);
+
+  static SSLFingerprint* CreateFromRfc4572(const std::string& algorithm,
+                                           const std::string& fingerprint);
+
+  // Creates a fingerprint from a certificate, using the same digest algorithm
+  // as the certificate's signature.
+  static SSLFingerprint* CreateFromCertificate(const RTCCertificate* cert);
+
+  SSLFingerprint(const std::string& algorithm,
+                 const uint8_t* digest_in,
+                 size_t digest_len);
+
+  SSLFingerprint(const SSLFingerprint& from);
+
+  bool operator==(const SSLFingerprint& other) const;
+
+  std::string GetRfc4572Fingerprint() const;
+
+  std::string ToString() const;
+
+  std::string algorithm;
+  rtc::CopyOnWriteBuffer digest;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SSLFINGERPRINT_H_
diff --git a/rtc_base/sslidentity.cc b/rtc_base/sslidentity.cc
new file mode 100644
index 0000000..1514e52
--- /dev/null
+++ b/rtc_base/sslidentity.cc
@@ -0,0 +1,353 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Handling of certificates and keypairs for SSLStreamAdapter's peer mode.
+#include "rtc_base/sslidentity.h"
+
+#include <ctime>
+#include <string>
+#include <utility>
+
+#include "rtc_base/base64.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/opensslidentity.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/sslfingerprint.h"
+
+namespace rtc {
+
+const char kPemTypeCertificate[] = "CERTIFICATE";
+const char kPemTypeRsaPrivateKey[] = "RSA PRIVATE KEY";
+const char kPemTypeEcPrivateKey[] = "EC PRIVATE KEY";
+
+SSLCertificateStats::SSLCertificateStats(
+    std::string&& fingerprint,
+    std::string&& fingerprint_algorithm,
+    std::string&& base64_certificate,
+    std::unique_ptr<SSLCertificateStats>&& issuer)
+    : fingerprint(std::move(fingerprint)),
+      fingerprint_algorithm(std::move(fingerprint_algorithm)),
+      base64_certificate(std::move(base64_certificate)),
+      issuer(std::move(issuer)) {
+}
+
+SSLCertificateStats::~SSLCertificateStats() {
+}
+
+std::unique_ptr<SSLCertificateStats> SSLCertificate::GetStats() const {
+  // TODO(bemasc): Move this computation to a helper class that caches these
+  // values to reduce CPU use in |StatsCollector::GetStats|. This will require
+  // adding a fast |SSLCertificate::Equals| to detect certificate changes.
+  std::string digest_algorithm;
+  if (!GetSignatureDigestAlgorithm(&digest_algorithm))
+    return nullptr;
+
+  // |SSLFingerprint::Create| can fail if the algorithm returned by
+  // |SSLCertificate::GetSignatureDigestAlgorithm| is not supported by the
+  // implementation of |SSLCertificate::ComputeDigest|. This currently happens
+  // with MD5- and SHA-224-signed certificates when linked to libNSS.
+  std::unique_ptr<SSLFingerprint> ssl_fingerprint(
+      SSLFingerprint::Create(digest_algorithm, this));
+  if (!ssl_fingerprint)
+    return nullptr;
+  std::string fingerprint = ssl_fingerprint->GetRfc4572Fingerprint();
+
+  Buffer der_buffer;
+  ToDER(&der_buffer);
+  std::string der_base64;
+  Base64::EncodeFromArray(der_buffer.data(), der_buffer.size(), &der_base64);
+
+  return rtc::MakeUnique<SSLCertificateStats>(std::move(fingerprint),
+                                              std::move(digest_algorithm),
+                                              std::move(der_base64), nullptr);
+}
+
+std::unique_ptr<SSLCertificate> SSLCertificate::GetUniqueReference() const {
+  return WrapUnique(GetReference());
+}
+
+KeyParams::KeyParams(KeyType key_type) {
+  if (key_type == KT_ECDSA) {
+    type_ = KT_ECDSA;
+    params_.curve = EC_NIST_P256;
+  } else if (key_type == KT_RSA) {
+    type_ = KT_RSA;
+    params_.rsa.mod_size = kRsaDefaultModSize;
+    params_.rsa.pub_exp = kRsaDefaultExponent;
+  } else {
+    RTC_NOTREACHED();
+  }
+}
+
+// static
+KeyParams KeyParams::RSA(int mod_size, int pub_exp) {
+  KeyParams kt(KT_RSA);
+  kt.params_.rsa.mod_size = mod_size;
+  kt.params_.rsa.pub_exp = pub_exp;
+  return kt;
+}
+
+// static
+KeyParams KeyParams::ECDSA(ECCurve curve) {
+  KeyParams kt(KT_ECDSA);
+  kt.params_.curve = curve;
+  return kt;
+}
+
+bool KeyParams::IsValid() const {
+  if (type_ == KT_RSA) {
+    return (params_.rsa.mod_size >= kRsaMinModSize &&
+            params_.rsa.mod_size <= kRsaMaxModSize &&
+            params_.rsa.pub_exp > params_.rsa.mod_size);
+  } else if (type_ == KT_ECDSA) {
+    return (params_.curve == EC_NIST_P256);
+  }
+  return false;
+}
+
+RSAParams KeyParams::rsa_params() const {
+  RTC_DCHECK(type_ == KT_RSA);
+  return params_.rsa;
+}
+
+ECCurve KeyParams::ec_curve() const {
+  RTC_DCHECK(type_ == KT_ECDSA);
+  return params_.curve;
+}
+
+KeyType IntKeyTypeFamilyToKeyType(int key_type_family) {
+  return static_cast<KeyType>(key_type_family);
+}
+
+bool SSLIdentity::PemToDer(const std::string& pem_type,
+                           const std::string& pem_string,
+                           std::string* der) {
+  // Find the inner body. We need this to fulfill the contract of
+  // returning pem_length.
+  size_t header = pem_string.find("-----BEGIN " + pem_type + "-----");
+  if (header == std::string::npos)
+    return false;
+
+  size_t body = pem_string.find("\n", header);
+  if (body == std::string::npos)
+    return false;
+
+  size_t trailer = pem_string.find("-----END " + pem_type + "-----");
+  if (trailer == std::string::npos)
+    return false;
+
+  std::string inner = pem_string.substr(body + 1, trailer - (body + 1));
+
+  *der = Base64::Decode(inner, Base64::DO_PARSE_WHITE |
+                        Base64::DO_PAD_ANY |
+                        Base64::DO_TERM_BUFFER);
+  return true;
+}
+
+std::string SSLIdentity::DerToPem(const std::string& pem_type,
+                                  const unsigned char* data,
+                                  size_t length) {
+  std::stringstream result;
+
+  result << "-----BEGIN " << pem_type << "-----\n";
+
+  std::string b64_encoded;
+  Base64::EncodeFromArray(data, length, &b64_encoded);
+
+  // Divide the Base-64 encoded data into 64-character chunks, as per
+  // 4.3.2.4 of RFC 1421.
+  static const size_t kChunkSize = 64;
+  size_t chunks = (b64_encoded.size() + (kChunkSize - 1)) / kChunkSize;
+  for (size_t i = 0, chunk_offset = 0; i < chunks;
+       ++i, chunk_offset += kChunkSize) {
+    result << b64_encoded.substr(chunk_offset, kChunkSize);
+    result << "\n";
+  }
+
+  result << "-----END " << pem_type << "-----\n";
+
+  return result.str();
+}
+
+SSLCertChain::SSLCertChain(std::vector<std::unique_ptr<SSLCertificate>> certs)
+    : certs_(std::move(certs)) {}
+
+SSLCertChain::SSLCertChain(const std::vector<SSLCertificate*>& certs) {
+  RTC_DCHECK(!certs.empty());
+  certs_.resize(certs.size());
+  std::transform(
+      certs.begin(), certs.end(), certs_.begin(),
+      [](const SSLCertificate* cert) -> std::unique_ptr<SSLCertificate> {
+        return cert->GetUniqueReference();
+      });
+}
+
+SSLCertChain::SSLCertChain(const SSLCertificate* cert) {
+  certs_.push_back(cert->GetUniqueReference());
+}
+
+SSLCertChain::~SSLCertChain() {}
+
+SSLCertChain* SSLCertChain::Copy() const {
+  std::vector<std::unique_ptr<SSLCertificate>> new_certs(certs_.size());
+  std::transform(certs_.begin(), certs_.end(), new_certs.begin(),
+                 [](const std::unique_ptr<SSLCertificate>& cert)
+                     -> std::unique_ptr<SSLCertificate> {
+                   return cert->GetUniqueReference();
+                 });
+  return new SSLCertChain(std::move(new_certs));
+}
+
+std::unique_ptr<SSLCertChain> SSLCertChain::UniqueCopy() const {
+  return WrapUnique(Copy());
+}
+
+std::unique_ptr<SSLCertificateStats> SSLCertChain::GetStats() const {
+  // We have a linked list of certificates, starting with the first element of
+  // |certs_| and ending with the last element of |certs_|. The "issuer" of a
+  // certificate is the next certificate in the chain. Stats are produced for
+  // each certificate in the list. Here, the "issuer" is the issuer's stats.
+  std::unique_ptr<SSLCertificateStats> issuer;
+  // The loop runs in reverse so that the |issuer| is known before the
+  // certificate issued by |issuer|.
+  for (ptrdiff_t i = certs_.size() - 1; i >= 0; --i) {
+    std::unique_ptr<SSLCertificateStats> new_stats = certs_[i]->GetStats();
+    if (new_stats) {
+      new_stats->issuer = std::move(issuer);
+    }
+    issuer = std::move(new_stats);
+  }
+  return issuer;
+}
+
+// static
+SSLCertificate* SSLCertificate::FromPEMString(const std::string& pem_string) {
+  return OpenSSLCertificate::FromPEMString(pem_string);
+}
+
+// static
+SSLIdentity* SSLIdentity::GenerateWithExpiration(const std::string& common_name,
+                                                 const KeyParams& key_params,
+                                                 time_t certificate_lifetime) {
+  return OpenSSLIdentity::GenerateWithExpiration(common_name, key_params,
+                                                 certificate_lifetime);
+}
+
+// static
+SSLIdentity* SSLIdentity::Generate(const std::string& common_name,
+                                   const KeyParams& key_params) {
+  return OpenSSLIdentity::GenerateWithExpiration(
+      common_name, key_params, kDefaultCertificateLifetimeInSeconds);
+}
+
+// static
+SSLIdentity* SSLIdentity::Generate(const std::string& common_name,
+                                   KeyType key_type) {
+  return OpenSSLIdentity::GenerateWithExpiration(
+      common_name, KeyParams(key_type), kDefaultCertificateLifetimeInSeconds);
+}
+
+SSLIdentity* SSLIdentity::GenerateForTest(const SSLIdentityParams& params) {
+  return OpenSSLIdentity::GenerateForTest(params);
+}
+
+// static
+SSLIdentity* SSLIdentity::FromPEMStrings(const std::string& private_key,
+                                         const std::string& certificate) {
+  return OpenSSLIdentity::FromPEMStrings(private_key, certificate);
+}
+
+// static
+SSLIdentity* SSLIdentity::FromPEMChainStrings(
+    const std::string& private_key,
+    const std::string& certificate_chain) {
+  return OpenSSLIdentity::FromPEMChainStrings(private_key, certificate_chain);
+}
+
+bool operator==(const SSLIdentity& a, const SSLIdentity& b) {
+  return static_cast<const OpenSSLIdentity&>(a) ==
+         static_cast<const OpenSSLIdentity&>(b);
+}
+bool operator!=(const SSLIdentity& a, const SSLIdentity& b) {
+  return !(a == b);
+}
+
+// Read |n| bytes from ASN1 number string at *|pp| and return the numeric value.
+// Update *|pp| and *|np| to reflect number of read bytes.
+static inline int ASN1ReadInt(const unsigned char** pp, size_t* np, size_t n) {
+  const unsigned char* p = *pp;
+  int x = 0;
+  for (size_t i = 0; i < n; i++)
+    x = 10 * x + p[i] - '0';
+  *pp = p + n;
+  *np = *np - n;
+  return x;
+}
+
+int64_t ASN1TimeToSec(const unsigned char* s, size_t length, bool long_format) {
+  size_t bytes_left = length;
+
+  // Make sure the string ends with Z.  Doing it here protects the strspn call
+  // from running off the end of the string in Z's absense.
+  if (length == 0 || s[length - 1] != 'Z')
+    return -1;
+
+  // Make sure we only have ASCII digits so that we don't need to clutter the
+  // code below and ASN1ReadInt with error checking.
+  size_t n = strspn(reinterpret_cast<const char*>(s), "0123456789");
+  if (n + 1 != length)
+    return -1;
+
+  int year;
+
+  // Read out ASN1 year, in either 2-char "UTCTIME" or 4-char "GENERALIZEDTIME"
+  // format.  Both format use UTC in this context.
+  if (long_format) {
+    // ASN1 format: yyyymmddhh[mm[ss[.fff]]]Z where the Z is literal, but
+    // RFC 5280 requires us to only support exactly yyyymmddhhmmssZ.
+
+    if (bytes_left < 11)
+      return -1;
+
+    year = ASN1ReadInt(&s, &bytes_left, 4);
+    year -= 1900;
+  } else {
+    // ASN1 format: yymmddhhmm[ss]Z where the Z is literal, but RFC 5280
+    // requires us to only support exactly yymmddhhmmssZ.
+
+    if (bytes_left < 9)
+      return -1;
+
+    year = ASN1ReadInt(&s, &bytes_left, 2);
+    if (year < 50)  // Per RFC 5280 4.1.2.5.1
+      year += 100;
+  }
+
+  std::tm tm;
+  tm.tm_year = year;
+
+  // Read out remaining ASN1 time data and store it in |tm| in documented
+  // std::tm format.
+  tm.tm_mon = ASN1ReadInt(&s, &bytes_left, 2) - 1;
+  tm.tm_mday = ASN1ReadInt(&s, &bytes_left, 2);
+  tm.tm_hour = ASN1ReadInt(&s, &bytes_left, 2);
+  tm.tm_min = ASN1ReadInt(&s, &bytes_left, 2);
+  tm.tm_sec = ASN1ReadInt(&s, &bytes_left, 2);
+
+  if (bytes_left != 1) {
+    // Now just Z should remain.  Its existence was asserted above.
+    return -1;
+  }
+
+  return TmToSeconds(tm);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/sslidentity.h b/rtc_base/sslidentity.h
new file mode 100644
index 0000000..d14610b
--- /dev/null
+++ b/rtc_base/sslidentity.h
@@ -0,0 +1,273 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Handling of certificates and keypairs for SSLStreamAdapter's peer mode.
+
+#ifndef RTC_BASE_SSLIDENTITY_H_
+#define RTC_BASE_SSLIDENTITY_H_
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/messagedigest.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+// Forward declaration due to circular dependency with SSLCertificate.
+class SSLCertChain;
+
+struct SSLCertificateStats {
+  SSLCertificateStats(std::string&& fingerprint,
+                      std::string&& fingerprint_algorithm,
+                      std::string&& base64_certificate,
+                      std::unique_ptr<SSLCertificateStats>&& issuer);
+  ~SSLCertificateStats();
+  std::string fingerprint;
+  std::string fingerprint_algorithm;
+  std::string base64_certificate;
+  std::unique_ptr<SSLCertificateStats> issuer;
+};
+
+// Abstract interface overridden by SSL library specific
+// implementations.
+
+// A somewhat opaque type used to encapsulate a certificate.
+// Wraps the SSL library's notion of a certificate, with reference counting.
+// The SSLCertificate object is pretty much immutable once created.
+// (The OpenSSL implementation only does reference counting and
+// possibly caching of intermediate results.)
+class SSLCertificate {
+ public:
+  // Parses and builds a certificate from a PEM encoded string.
+  // Returns null on failure.
+  // The length of the string representation of the certificate is
+  // stored in *pem_length if it is non-null, and only if
+  // parsing was successful.
+  // Caller is responsible for freeing the returned object.
+  static SSLCertificate* FromPEMString(const std::string& pem_string);
+  virtual ~SSLCertificate() {}
+
+  // Returns a new SSLCertificate object instance wrapping the same
+  // underlying certificate, including its chain if present.  Caller is
+  // responsible for freeing the returned object. Use GetUniqueReference
+  // instead.
+  virtual SSLCertificate* GetReference() const = 0;
+
+  std::unique_ptr<SSLCertificate> GetUniqueReference() const;
+
+  // Returns a PEM encoded string representation of the certificate.
+  virtual std::string ToPEMString() const = 0;
+
+  // Provides a DER encoded binary representation of the certificate.
+  virtual void ToDER(Buffer* der_buffer) const = 0;
+
+  // Gets the name of the digest algorithm that was used to compute this
+  // certificate's signature.
+  virtual bool GetSignatureDigestAlgorithm(std::string* algorithm) const = 0;
+
+  // Compute the digest of the certificate given algorithm
+  virtual bool ComputeDigest(const std::string& algorithm,
+                             unsigned char* digest,
+                             size_t size,
+                             size_t* length) const = 0;
+
+  // Returns the time in seconds relative to epoch, 1970-01-01T00:00:00Z (UTC),
+  // or -1 if an expiration time could not be retrieved.
+  virtual int64_t CertificateExpirationTime() const = 0;
+
+  // Gets information (fingerprint, etc.) about this certificate. This is used
+  // for certificate stats, see
+  // https://w3c.github.io/webrtc-stats/#certificatestats-dict*.
+  std::unique_ptr<SSLCertificateStats> GetStats() const;
+};
+
+// SSLCertChain is a simple wrapper for a vector of SSLCertificates. It serves
+// primarily to ensure proper memory management (especially deletion) of the
+// SSLCertificate pointers.
+class SSLCertChain {
+ public:
+  explicit SSLCertChain(std::vector<std::unique_ptr<SSLCertificate>> certs);
+  // These constructors copy the provided SSLCertificate(s), so the caller
+  // retains ownership.
+  explicit SSLCertChain(const std::vector<SSLCertificate*>& certs);
+  explicit SSLCertChain(const SSLCertificate* cert);
+  ~SSLCertChain();
+
+  // Vector access methods.
+  size_t GetSize() const { return certs_.size(); }
+
+  // Returns a temporary reference, only valid until the chain is destroyed.
+  const SSLCertificate& Get(size_t pos) const { return *(certs_[pos]); }
+
+  // Returns a new SSLCertChain object instance wrapping the same underlying
+  // certificate chain.  Caller is responsible for freeing the returned object.
+  SSLCertChain* Copy() const;
+  // Same as above, but returning a unique_ptr for convenience.
+  std::unique_ptr<SSLCertChain> UniqueCopy() const;
+
+  // Gets information (fingerprint, etc.) about this certificate chain. This is
+  // used for certificate stats, see
+  // https://w3c.github.io/webrtc-stats/#certificatestats-dict*.
+  std::unique_ptr<SSLCertificateStats> GetStats() const;
+
+ private:
+  std::vector<std::unique_ptr<SSLCertificate>> certs_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SSLCertChain);
+};
+
+// KT_LAST is intended for vector declarations and loops over all key types;
+// it does not represent any key type in itself.
+// KT_DEFAULT is used as the default KeyType for KeyParams.
+enum KeyType { KT_RSA, KT_ECDSA, KT_LAST, KT_DEFAULT = KT_ECDSA };
+
+static const int kRsaDefaultModSize = 1024;
+static const int kRsaDefaultExponent = 0x10001;  // = 2^16+1 = 65537
+static const int kRsaMinModSize = 1024;
+static const int kRsaMaxModSize = 8192;
+
+// Certificate default validity lifetime.
+static const int kDefaultCertificateLifetimeInSeconds =
+    60 * 60 * 24 * 30;  // 30 days
+// Certificate validity window.
+// This is to compensate for slightly incorrect system clocks.
+static const int kCertificateWindowInSeconds = -60 * 60 * 24;
+
+struct RSAParams {
+  unsigned int mod_size;
+  unsigned int pub_exp;
+};
+
+enum ECCurve { EC_NIST_P256, /* EC_FANCY, */ EC_LAST };
+
+class KeyParams {
+ public:
+  // Generate a KeyParams object from a simple KeyType, using default params.
+  explicit KeyParams(KeyType key_type = KT_DEFAULT);
+
+  // Generate a a KeyParams for RSA with explicit parameters.
+  static KeyParams RSA(int mod_size = kRsaDefaultModSize,
+                       int pub_exp = kRsaDefaultExponent);
+
+  // Generate a a KeyParams for ECDSA specifying the curve.
+  static KeyParams ECDSA(ECCurve curve = EC_NIST_P256);
+
+  // Check validity of a KeyParams object. Since the factory functions have
+  // no way of returning errors, this function can be called after creation
+  // to make sure the parameters are OK.
+  bool IsValid() const;
+
+  RSAParams rsa_params() const;
+
+  ECCurve ec_curve() const;
+
+  KeyType type() const { return type_; }
+
+ private:
+  KeyType type_;
+  union {
+    RSAParams rsa;
+    ECCurve curve;
+  } params_;
+};
+
+// TODO(hbos): Remove once rtc::KeyType (to be modified) and
+// blink::WebRTCKeyType (to be landed) match. By using this function in Chromium
+// appropriately we can change KeyType enum -> class without breaking Chromium.
+KeyType IntKeyTypeFamilyToKeyType(int key_type_family);
+
+// Parameters for generating a certificate. If |common_name| is non-empty, it
+// will be used for the certificate's subject and issuer name, otherwise a
+// random string will be used.
+struct SSLIdentityParams {
+  std::string common_name;
+  time_t not_before;  // Absolute time since epoch in seconds.
+  time_t not_after;   // Absolute time since epoch in seconds.
+  KeyParams key_params;
+};
+
+// Our identity in an SSL negotiation: a keypair and certificate (both
+// with the same public key).
+// This too is pretty much immutable once created.
+class SSLIdentity {
+ public:
+  // Generates an identity (keypair and self-signed certificate). If
+  // |common_name| is non-empty, it will be used for the certificate's subject
+  // and issuer name, otherwise a random string will be used. The key type and
+  // parameters are defined in |key_param|. The certificate's lifetime in
+  // seconds from the current time is defined in |certificate_lifetime|; it
+  // should be a non-negative number.
+  // Returns null on failure.
+  // Caller is responsible for freeing the returned object.
+  static SSLIdentity* GenerateWithExpiration(const std::string& common_name,
+                                             const KeyParams& key_param,
+                                             time_t certificate_lifetime);
+  static SSLIdentity* Generate(const std::string& common_name,
+                               const KeyParams& key_param);
+  static SSLIdentity* Generate(const std::string& common_name,
+                               KeyType key_type);
+
+  // Generates an identity with the specified validity period.
+  // TODO(torbjorng): Now that Generate() accepts relevant params, make tests
+  // use that instead of this function.
+  static SSLIdentity* GenerateForTest(const SSLIdentityParams& params);
+
+  // Construct an identity from a private key and a certificate.
+  static SSLIdentity* FromPEMStrings(const std::string& private_key,
+                                     const std::string& certificate);
+
+  // Construct an identity from a private key and a certificate chain.
+  static SSLIdentity* FromPEMChainStrings(const std::string& private_key,
+                                          const std::string& certificate_chain);
+
+  virtual ~SSLIdentity() {}
+
+  // Returns a new SSLIdentity object instance wrapping the same
+  // identity information.
+  // Caller is responsible for freeing the returned object.
+  // TODO(hbos,torbjorng): Rename to a less confusing name.
+  virtual SSLIdentity* GetReference() const = 0;
+
+  // Returns a temporary reference to the end-entity (leaf) certificate.
+  virtual const SSLCertificate& certificate() const = 0;
+  // Returns a temporary reference to the entire certificate chain.
+  virtual const SSLCertChain& cert_chain() const = 0;
+  virtual std::string PrivateKeyToPEMString() const = 0;
+  virtual std::string PublicKeyToPEMString() const = 0;
+
+  // Helpers for parsing converting between PEM and DER format.
+  static bool PemToDer(const std::string& pem_type,
+                       const std::string& pem_string,
+                       std::string* der);
+  static std::string DerToPem(const std::string& pem_type,
+                              const unsigned char* data,
+                              size_t length);
+};
+
+bool operator==(const SSLIdentity& a, const SSLIdentity& b);
+bool operator!=(const SSLIdentity& a, const SSLIdentity& b);
+
+// Convert from ASN1 time as restricted by RFC 5280 to seconds from 1970-01-01
+// 00.00 ("epoch").  If the ASN1 time cannot be read, return -1.  The data at
+// |s| is not 0-terminated; its char count is defined by |length|.
+int64_t ASN1TimeToSec(const unsigned char* s, size_t length, bool long_format);
+
+extern const char kPemTypeCertificate[];
+extern const char kPemTypeRsaPrivateKey[];
+extern const char kPemTypeEcPrivateKey[];
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SSLIDENTITY_H_
diff --git a/rtc_base/sslidentity_unittest.cc b/rtc_base/sslidentity_unittest.cc
new file mode 100644
index 0000000..e1dbe05
--- /dev/null
+++ b/rtc_base/sslidentity_unittest.cc
@@ -0,0 +1,601 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+#include <string>
+
+#include "rtc_base/fakesslidentity.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/helpers.h"
+#include "rtc_base/ssladapter.h"
+#include "rtc_base/sslfingerprint.h"
+#include "rtc_base/sslidentity.h"
+#include "rtc_base/stringutils.h"
+
+using rtc::SSLIdentity;
+
+const char kTestCertificate[] = "-----BEGIN CERTIFICATE-----\n"
+    "MIIB6TCCAVICAQYwDQYJKoZIhvcNAQEEBQAwWzELMAkGA1UEBhMCQVUxEzARBgNV\n"
+    "BAgTClF1ZWVuc2xhbmQxGjAYBgNVBAoTEUNyeXB0U29mdCBQdHkgTHRkMRswGQYD\n"
+    "VQQDExJUZXN0IENBICgxMDI0IGJpdCkwHhcNMDAxMDE2MjIzMTAzWhcNMDMwMTE0\n"
+    "MjIzMTAzWjBjMQswCQYDVQQGEwJBVTETMBEGA1UECBMKUXVlZW5zbGFuZDEaMBgG\n"
+    "A1UEChMRQ3J5cHRTb2Z0IFB0eSBMdGQxIzAhBgNVBAMTGlNlcnZlciB0ZXN0IGNl\n"
+    "cnQgKDUxMiBiaXQpMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJ+zw4Qnlf8SMVIP\n"
+    "Fe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVDTGiXav6ooKXfX3j/7tdkuD8Ey2//\n"
+    "Kv7+ue0CAwEAATANBgkqhkiG9w0BAQQFAAOBgQCT0grFQeZaqYb5EYfk20XixZV4\n"
+    "GmyAbXMftG1Eo7qGiMhYzRwGNWxEYojf5PZkYZXvSqZ/ZXHXa4g59jK/rJNnaVGM\n"
+    "k+xIX8mxQvlV0n5O9PIha5BX5teZnkHKgL8aKKLKW1BK7YTngsfSzzaeame5iKfz\n"
+    "itAE+OjGF+PFKbwX8Q==\n"
+    "-----END CERTIFICATE-----\n";
+
+const unsigned char kTestCertSha1[] = {
+    0xA6, 0xC8, 0x59, 0xEA, 0xC3, 0x7E, 0x6D, 0x33,
+    0xCF, 0xE2, 0x69, 0x9D, 0x74, 0xE6, 0xF6, 0x8A,
+    0x9E, 0x47, 0xA7, 0xCA};
+const unsigned char kTestCertSha224[] = {
+    0xd4, 0xce, 0xc6, 0xcf, 0x28, 0xcb, 0xe9, 0x77,
+    0x38, 0x36, 0xcf, 0xb1, 0x3b, 0x4a, 0xd7, 0xbd,
+    0xae, 0x24, 0x21, 0x08, 0xcf, 0x6a, 0x44, 0x0d,
+    0x3f, 0x94, 0x2a, 0x5b};
+const unsigned char kTestCertSha256[] = {
+    0x41, 0x6b, 0xb4, 0x93, 0x47, 0x79, 0x77, 0x24,
+    0x77, 0x0b, 0x8b, 0x2e, 0xa6, 0x2b, 0xe0, 0xf9,
+    0x0a, 0xed, 0x1f, 0x31, 0xa6, 0xf7, 0x5c, 0xa1,
+    0x5a, 0xc4, 0xb0, 0xa2, 0xa4, 0x78, 0xb9, 0x76};
+const unsigned char kTestCertSha384[] = {
+    0x42, 0x31, 0x9a, 0x79, 0x1d, 0xd6, 0x08, 0xbf,
+    0x3b, 0xba, 0x36, 0xd8, 0x37, 0x4a, 0x9a, 0x75,
+    0xd3, 0x25, 0x6e, 0x28, 0x92, 0xbe, 0x06, 0xb7,
+    0xc5, 0xa0, 0x83, 0xe3, 0x86, 0xb1, 0x03, 0xfc,
+    0x64, 0x47, 0xd6, 0xd8, 0xaa, 0xd9, 0x36, 0x60,
+    0x04, 0xcc, 0xbe, 0x7d, 0x6a, 0xe8, 0x34, 0x49};
+const unsigned char kTestCertSha512[] = {
+    0x51, 0x1d, 0xec, 0x02, 0x3d, 0x51, 0x45, 0xd3,
+    0xd8, 0x1d, 0xa4, 0x9d, 0x43, 0xc9, 0xee, 0x32,
+    0x6f, 0x4f, 0x37, 0xee, 0xab, 0x3f, 0x25, 0xdf,
+    0x72, 0xfc, 0x61, 0x1a, 0xd5, 0x92, 0xff, 0x6b,
+    0x28, 0x71, 0x58, 0xb3, 0xe1, 0x8a, 0x18, 0xcf,
+    0x61, 0x33, 0x0e, 0x14, 0xc3, 0x04, 0xaa, 0x07,
+    0xf6, 0xa5, 0xda, 0xdc, 0x42, 0x42, 0x22, 0x35,
+    0xce, 0x26, 0x58, 0x4a, 0x33, 0x6d, 0xbc, 0xb6};
+
+// These PEM strings were created by generating an identity with
+// |SSLIdentity::Generate| and invoking |identity->PrivateKeyToPEMString()|,
+// |identity->PublicKeyToPEMString()| and
+// |identity->certificate().ToPEMString()|. If the crypto library is updated,
+// and the update changes the string form of the keys, these will have to be
+// updated too.  The fingerprint, fingerprint algorithm and base64 certificate
+// were created by calling |identity->certificate().GetStats()|.
+static const char kRSA_PRIVATE_KEY_PEM[] =
+    "-----BEGIN PRIVATE KEY-----\n"
+    "MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMQPqDStRlYeDpkX\n"
+    "erRmv+a1naM8vSVSY0gG2plnrnofViWRW3MRqWC+020MsIj3hPZeSAnt/y/FL/nr\n"
+    "4Ea7NXcwdRo1/1xEK7U/f/cjSg1aunyvHCHwcFcMr31HLFvHr0ZgcFwbgIuFLNEl\n"
+    "7kK5HMO9APz1ntUjek8BmBj8yMl9AgMBAAECgYA8FWBC5GcNtSBcIinkZyigF0A7\n"
+    "6j081sa+J/uNz4xUuI257ZXM6biygUhhvuXK06/XoIULJfhyN0fAm1yb0HtNhiUs\n"
+    "kMOYeon6b8FqFaPjrQf7Gr9FMiIHXNK19uegTMKztXyPZoUWlX84X0iawY95x0Y3\n"
+    "73f6P2rN2UOjlVVjAQJBAOKy3l2w3Zj2w0oAJox0eMwl+RxBNt1C42SHrob2mFUT\n"
+    "rytpVVYOasr8CoDI0kjacjI94sLum+buJoXXX6YTGO0CQQDdZwlYIEkoS3ftfxPa\n"
+    "Ai0YTBzAWvHJg0r8Gk/TkHo6IM+LSsZ9ZYUv/vBe4BKLw1I4hZ+bQvBiq+f8ROtk\n"
+    "+TDRAkAPL3ghwoU1h+IRBO2QHwUwd6K2N9AbBi4BP+168O3HVSg4ujeTKigRLMzv\n"
+    "T4R2iNt5bhfQgvdCgtVlxcWMdF8JAkBwDCg3eEdt5BuyjwBt8XH+/O4ED0KUWCTH\n"
+    "x00k5dZlupsuhE5Fwe4QpzXg3gekwdnHjyCCQ/NCDHvgOMTkmhQxAkA9V03KRX9b\n"
+    "bhvEzY/fu8gEp+EzsER96/D79az5z1BaMGL5OPM2xHBPJATKlswnAa7Lp3QKGZGk\n"
+    "TxslfL18J71s\n"
+    "-----END PRIVATE KEY-----\n";
+static const char kRSA_PUBLIC_KEY_PEM[] =
+    "-----BEGIN PUBLIC KEY-----\n"
+    "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDED6g0rUZWHg6ZF3q0Zr/mtZ2j\n"
+    "PL0lUmNIBtqZZ656H1YlkVtzEalgvtNtDLCI94T2XkgJ7f8vxS/56+BGuzV3MHUa\n"
+    "Nf9cRCu1P3/3I0oNWrp8rxwh8HBXDK99Ryxbx69GYHBcG4CLhSzRJe5CuRzDvQD8\n"
+    "9Z7VI3pPAZgY/MjJfQIDAQAB\n"
+    "-----END PUBLIC KEY-----\n";
+static const char kRSA_CERT_PEM[] =
+    "-----BEGIN CERTIFICATE-----\n"
+    "MIIBnDCCAQWgAwIBAgIJAOEHLgeWYwrpMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNV\n"
+    "BAMMBXRlc3QxMB4XDTE2MDQyNDE4MTAyMloXDTE2MDUyNTE4MTAyMlowEDEOMAwG\n"
+    "A1UEAwwFdGVzdDEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMQPqDStRlYe\n"
+    "DpkXerRmv+a1naM8vSVSY0gG2plnrnofViWRW3MRqWC+020MsIj3hPZeSAnt/y/F\n"
+    "L/nr4Ea7NXcwdRo1/1xEK7U/f/cjSg1aunyvHCHwcFcMr31HLFvHr0ZgcFwbgIuF\n"
+    "LNEl7kK5HMO9APz1ntUjek8BmBj8yMl9AgMBAAEwDQYJKoZIhvcNAQELBQADgYEA\n"
+    "C3ehaZFl+oEYN069C2ht/gMzuC77L854RF/x7xRtNZzkcg9TVgXXdM3auUvJi8dx\n"
+    "yTpU3ixErjQvoZew5ngXTEvTY8BSQUijJEaLWh8n6NDKRbEGTdAk8nPAmq9hdCFq\n"
+    "e3UkexqNHm3g/VxG4NUC1Y+w29ai0/Rgh+VvgbDwK+Q=\n"
+    "-----END CERTIFICATE-----\n";
+static const char kRSA_FINGERPRINT[] =
+    "3C:E8:B2:70:09:CF:A9:09:5A:F4:EF:8F:8D:8A:32:FF:EA:04:91:BA:6E:D4:17:78:16"
+    ":2A:EE:F9:9A:DD:E2:2B";
+static const char kRSA_FINGERPRINT_ALGORITHM[] =
+    "sha-256";
+static const char kRSA_BASE64_CERTIFICATE[] =
+    "MIIBnDCCAQWgAwIBAgIJAOEHLgeWYwrpMA0GCSqGSIb3DQEBCwUAMBAxDjAMBgNVBAMMBXRlc3"
+    "QxMB4XDTE2MDQyNDE4MTAyMloXDTE2MDUyNTE4MTAyMlowEDEOMAwGA1UEAwwFdGVzdDEwgZ8w"
+    "DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMQPqDStRlYeDpkXerRmv+a1naM8vSVSY0gG2plnrn"
+    "ofViWRW3MRqWC+020MsIj3hPZeSAnt/y/FL/nr4Ea7NXcwdRo1/1xEK7U/f/cjSg1aunyvHCHw"
+    "cFcMr31HLFvHr0ZgcFwbgIuFLNEl7kK5HMO9APz1ntUjek8BmBj8yMl9AgMBAAEwDQYJKoZIhv"
+    "cNAQELBQADgYEAC3ehaZFl+oEYN069C2ht/gMzuC77L854RF/x7xRtNZzkcg9TVgXXdM3auUvJ"
+    "i8dxyTpU3ixErjQvoZew5ngXTEvTY8BSQUijJEaLWh8n6NDKRbEGTdAk8nPAmq9hdCFqe3Ukex"
+    "qNHm3g/VxG4NUC1Y+w29ai0/Rgh+VvgbDwK+Q=";
+
+static const char kECDSA_PRIVATE_KEY_PEM[] =
+    "-----BEGIN PRIVATE KEY-----\n"
+    "MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg/AkEA2hklq7dQ2rN\n"
+    "ZxYL6hOUACL4pn7P4FYlA3ZQhIChRANCAAR7YgdO3utP/8IqVRq8G4VZKreMAxeN\n"
+    "rUa12twthv4uFjuHAHa9D9oyAjncmn+xvZZRyVmKrA56jRzENcEEHoAg\n"
+    "-----END PRIVATE KEY-----\n";
+static const char kECDSA_PUBLIC_KEY_PEM[] =
+    "-----BEGIN PUBLIC KEY-----\n"
+    "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEe2IHTt7rT//CKlUavBuFWSq3jAMX\n"
+    "ja1GtdrcLYb+LhY7hwB2vQ/aMgI53Jp/sb2WUclZiqwOeo0cxDXBBB6AIA==\n"
+    "-----END PUBLIC KEY-----\n";
+static const char kECDSA_CERT_PEM[] =
+    "-----BEGIN CERTIFICATE-----\n"
+    "MIIBFDCBu6ADAgECAgkArpkxjw62sW4wCgYIKoZIzj0EAwIwEDEOMAwGA1UEAwwF\n"
+    "dGVzdDMwHhcNMTYwNDI0MTgxNDM4WhcNMTYwNTI1MTgxNDM4WjAQMQ4wDAYDVQQD\n"
+    "DAV0ZXN0MzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABHtiB07e60//wipVGrwb\n"
+    "hVkqt4wDF42tRrXa3C2G/i4WO4cAdr0P2jICOdyaf7G9llHJWYqsDnqNHMQ1wQQe\n"
+    "gCAwCgYIKoZIzj0EAwIDSAAwRQIhANyreQ/K5yuPPpirsd0e/4WGLHou6bIOSQks\n"
+    "DYzo56NmAiAKOr3u8ol3LmygbUCwEvtWrS8QcJDygxHPACo99hkekw==\n"
+    "-----END CERTIFICATE-----\n";
+static const char kECDSA_FINGERPRINT[] =
+    "9F:47:FA:88:76:3D:18:B8:00:A0:59:9D:C3:5D:34:0B:1F:B8:99:9E:68:DA:F3:A5:DA"
+    ":50:33:A9:FF:4D:31:89";
+static const char kECDSA_FINGERPRINT_ALGORITHM[] =
+    "sha-256";
+static const char kECDSA_BASE64_CERTIFICATE[] =
+    "MIIBFDCBu6ADAgECAgkArpkxjw62sW4wCgYIKoZIzj0EAwIwEDEOMAwGA1UEAwwFdGVzdDMwHh"
+    "cNMTYwNDI0MTgxNDM4WhcNMTYwNTI1MTgxNDM4WjAQMQ4wDAYDVQQDDAV0ZXN0MzBZMBMGByqG"
+    "SM49AgEGCCqGSM49AwEHA0IABHtiB07e60//wipVGrwbhVkqt4wDF42tRrXa3C2G/i4WO4cAdr"
+    "0P2jICOdyaf7G9llHJWYqsDnqNHMQ1wQQegCAwCgYIKoZIzj0EAwIDSAAwRQIhANyreQ/K5yuP"
+    "Ppirsd0e/4WGLHou6bIOSQksDYzo56NmAiAKOr3u8ol3LmygbUCwEvtWrS8QcJDygxHPACo99h"
+    "kekw==";
+
+struct IdentityAndInfo {
+  std::unique_ptr<rtc::SSLIdentity> identity;
+  std::vector<std::string> ders;
+  std::vector<std::string> pems;
+  std::vector<std::string> fingerprints;
+};
+
+IdentityAndInfo CreateFakeIdentityAndInfoFromDers(
+    const std::vector<std::string>& ders) {
+  RTC_CHECK(!ders.empty());
+  IdentityAndInfo info;
+  info.ders = ders;
+  for (const std::string& der : ders) {
+    info.pems.push_back(rtc::SSLIdentity::DerToPem(
+        "CERTIFICATE",
+        reinterpret_cast<const unsigned char*>(der.c_str()),
+        der.length()));
+  }
+  info.identity.reset(new rtc::FakeSSLIdentity(info.pems));
+  // Strip header/footer and newline characters of PEM strings.
+  for (size_t i = 0; i < info.pems.size(); ++i) {
+    rtc::replace_substrs("-----BEGIN CERTIFICATE-----", 27,
+                         "", 0, &info.pems[i]);
+    rtc::replace_substrs("-----END CERTIFICATE-----", 25,
+                         "", 0, &info.pems[i]);
+    rtc::replace_substrs("\n", 1,
+                         "", 0, &info.pems[i]);
+  }
+  // Fingerprints for the whole certificate chain, starting with leaf
+  // certificate.
+  const rtc::SSLCertChain& chain = info.identity->cert_chain();
+  std::unique_ptr<rtc::SSLFingerprint> fp;
+  for (size_t i = 0; i < chain.GetSize(); i++) {
+    fp.reset(rtc::SSLFingerprint::Create("sha-1", &chain.Get(i)));
+    EXPECT_TRUE(fp);
+    info.fingerprints.push_back(fp->GetRfc4572Fingerprint());
+  }
+  EXPECT_EQ(info.ders.size(), info.fingerprints.size());
+  return info;
+}
+
+class SSLIdentityTest : public testing::Test {
+ public:
+  void SetUp() override {
+    identity_rsa1_.reset(SSLIdentity::Generate("test1", rtc::KT_RSA));
+    identity_rsa2_.reset(SSLIdentity::Generate("test2", rtc::KT_RSA));
+    identity_ecdsa1_.reset(SSLIdentity::Generate("test3", rtc::KT_ECDSA));
+    identity_ecdsa2_.reset(SSLIdentity::Generate("test4", rtc::KT_ECDSA));
+
+    ASSERT_TRUE(identity_rsa1_);
+    ASSERT_TRUE(identity_rsa2_);
+    ASSERT_TRUE(identity_ecdsa1_);
+    ASSERT_TRUE(identity_ecdsa2_);
+
+    test_cert_.reset(rtc::SSLCertificate::FromPEMString(kTestCertificate));
+    ASSERT_TRUE(test_cert_);
+  }
+
+  void TestGetSignatureDigestAlgorithm() {
+    std::string digest_algorithm;
+
+    ASSERT_TRUE(identity_rsa1_->certificate().GetSignatureDigestAlgorithm(
+        &digest_algorithm));
+    ASSERT_EQ(rtc::DIGEST_SHA_256, digest_algorithm);
+
+    ASSERT_TRUE(identity_rsa2_->certificate().GetSignatureDigestAlgorithm(
+        &digest_algorithm));
+    ASSERT_EQ(rtc::DIGEST_SHA_256, digest_algorithm);
+
+    ASSERT_TRUE(identity_ecdsa1_->certificate().GetSignatureDigestAlgorithm(
+        &digest_algorithm));
+    ASSERT_EQ(rtc::DIGEST_SHA_256, digest_algorithm);
+
+    ASSERT_TRUE(identity_ecdsa2_->certificate().GetSignatureDigestAlgorithm(
+        &digest_algorithm));
+    ASSERT_EQ(rtc::DIGEST_SHA_256, digest_algorithm);
+
+    // The test certificate has an MD5-based signature.
+    ASSERT_TRUE(test_cert_->GetSignatureDigestAlgorithm(&digest_algorithm));
+    ASSERT_EQ(rtc::DIGEST_MD5, digest_algorithm);
+  }
+
+  typedef unsigned char DigestType[rtc::MessageDigest::kMaxSize];
+
+  void TestDigestHelper(DigestType digest,
+                        const SSLIdentity* identity,
+                        const std::string& algorithm,
+                        size_t expected_len) {
+    DigestType digest1;
+    size_t digest_len;
+    bool rv;
+
+    memset(digest, 0, expected_len);
+    rv = identity->certificate().ComputeDigest(algorithm, digest,
+                                               sizeof(DigestType), &digest_len);
+    EXPECT_TRUE(rv);
+    EXPECT_EQ(expected_len, digest_len);
+
+    // Repeat digest computation for the identity as a sanity check.
+    memset(digest1, 0xff, expected_len);
+    rv = identity->certificate().ComputeDigest(algorithm, digest1,
+                                               sizeof(DigestType), &digest_len);
+    EXPECT_TRUE(rv);
+    EXPECT_EQ(expected_len, digest_len);
+
+    EXPECT_EQ(0, memcmp(digest, digest1, expected_len));
+  }
+
+  void TestDigestForGeneratedCert(const std::string& algorithm,
+                                  size_t expected_len) {
+    DigestType digest[4];
+
+    ASSERT_TRUE(expected_len <= sizeof(DigestType));
+
+    TestDigestHelper(digest[0], identity_rsa1_.get(), algorithm, expected_len);
+    TestDigestHelper(digest[1], identity_rsa2_.get(), algorithm, expected_len);
+    TestDigestHelper(digest[2], identity_ecdsa1_.get(), algorithm,
+                     expected_len);
+    TestDigestHelper(digest[3], identity_ecdsa2_.get(), algorithm,
+                     expected_len);
+
+    // Sanity check that all four digests are unique.  This could theoretically
+    // fail, since cryptographic hash collisions have a non-zero probability.
+    for (int i = 0; i < 4; i++) {
+      for (int j = 0; j < 4; j++) {
+        if (i != j)
+          EXPECT_NE(0, memcmp(digest[i], digest[j], expected_len));
+      }
+    }
+  }
+
+  void TestDigestForFixedCert(const std::string& algorithm,
+                              size_t expected_len,
+                              const unsigned char* expected_digest) {
+    bool rv;
+    DigestType digest;
+    size_t digest_len;
+
+    ASSERT_TRUE(expected_len <= sizeof(DigestType));
+
+    rv = test_cert_->ComputeDigest(algorithm, digest, sizeof(digest),
+                                   &digest_len);
+    EXPECT_TRUE(rv);
+    EXPECT_EQ(expected_len, digest_len);
+    EXPECT_EQ(0, memcmp(digest, expected_digest, expected_len));
+  }
+
+  void TestCloningIdentity(const SSLIdentity& identity) {
+    // Convert |identity| to PEM strings and create a new identity by converting
+    // back from the string format.
+    std::string priv_pem = identity.PrivateKeyToPEMString();
+    std::string publ_pem = identity.PublicKeyToPEMString();
+    std::string cert_pem = identity.certificate().ToPEMString();
+    std::unique_ptr<SSLIdentity> clone(
+        SSLIdentity::FromPEMStrings(priv_pem, cert_pem));
+    EXPECT_TRUE(clone);
+
+    // Make sure the clone is identical to the original.
+    EXPECT_TRUE(identity == *clone);
+    ASSERT_EQ(identity.certificate().CertificateExpirationTime(),
+              clone->certificate().CertificateExpirationTime());
+
+    // At this point we are confident that the identities are identical. To be
+    // extra sure, we compare PEM strings of the clone with the original. Note
+    // that the PEM strings of two identities are not strictly guaranteed to be
+    // equal (they describe structs whose members could be listed in a different
+    // order, for example). But because the same function is used to produce
+    // both PEMs, its a good enough bet that this comparison will work. If the
+    // assumption stops holding in the future we can always remove this from the
+    // unittest.
+    std::string clone_priv_pem = clone->PrivateKeyToPEMString();
+    std::string clone_publ_pem = clone->PublicKeyToPEMString();
+    std::string clone_cert_pem = clone->certificate().ToPEMString();
+    ASSERT_EQ(priv_pem, clone_priv_pem);
+    ASSERT_EQ(publ_pem, clone_publ_pem);
+    ASSERT_EQ(cert_pem, clone_cert_pem);
+  }
+
+ protected:
+  std::unique_ptr<SSLIdentity> identity_rsa1_;
+  std::unique_ptr<SSLIdentity> identity_rsa2_;
+  std::unique_ptr<SSLIdentity> identity_ecdsa1_;
+  std::unique_ptr<SSLIdentity> identity_ecdsa2_;
+  std::unique_ptr<rtc::SSLCertificate> test_cert_;
+};
+
+TEST_F(SSLIdentityTest, FixedDigestSHA1) {
+  TestDigestForFixedCert(rtc::DIGEST_SHA_1, 20, kTestCertSha1);
+}
+
+// HASH_AlgSHA224 is not supported in the chromium linux build.
+TEST_F(SSLIdentityTest, FixedDigestSHA224) {
+  TestDigestForFixedCert(rtc::DIGEST_SHA_224, 28, kTestCertSha224);
+}
+
+TEST_F(SSLIdentityTest, FixedDigestSHA256) {
+  TestDigestForFixedCert(rtc::DIGEST_SHA_256, 32, kTestCertSha256);
+}
+
+TEST_F(SSLIdentityTest, FixedDigestSHA384) {
+  TestDigestForFixedCert(rtc::DIGEST_SHA_384, 48, kTestCertSha384);
+}
+
+TEST_F(SSLIdentityTest, FixedDigestSHA512) {
+  TestDigestForFixedCert(rtc::DIGEST_SHA_512, 64, kTestCertSha512);
+}
+
+// HASH_AlgSHA224 is not supported in the chromium linux build.
+TEST_F(SSLIdentityTest, DigestSHA224) {
+  TestDigestForGeneratedCert(rtc::DIGEST_SHA_224, 28);
+}
+
+TEST_F(SSLIdentityTest, DigestSHA256) {
+  TestDigestForGeneratedCert(rtc::DIGEST_SHA_256, 32);
+}
+
+TEST_F(SSLIdentityTest, DigestSHA384) {
+  TestDigestForGeneratedCert(rtc::DIGEST_SHA_384, 48);
+}
+
+TEST_F(SSLIdentityTest, DigestSHA512) {
+  TestDigestForGeneratedCert(rtc::DIGEST_SHA_512, 64);
+}
+
+TEST_F(SSLIdentityTest, IdentityComparison) {
+  EXPECT_TRUE(*identity_rsa1_ == *identity_rsa1_);
+  EXPECT_FALSE(*identity_rsa1_ == *identity_rsa2_);
+  EXPECT_FALSE(*identity_rsa1_ == *identity_ecdsa1_);
+  EXPECT_FALSE(*identity_rsa1_ == *identity_ecdsa2_);
+
+  EXPECT_TRUE(*identity_rsa2_ == *identity_rsa2_);
+  EXPECT_FALSE(*identity_rsa2_ == *identity_ecdsa1_);
+  EXPECT_FALSE(*identity_rsa2_ == *identity_ecdsa2_);
+
+  EXPECT_TRUE(*identity_ecdsa1_ == *identity_ecdsa1_);
+  EXPECT_FALSE(*identity_ecdsa1_ == *identity_ecdsa2_);
+}
+
+TEST_F(SSLIdentityTest, FromPEMStringsRSA) {
+  std::unique_ptr<SSLIdentity> identity(
+      SSLIdentity::FromPEMStrings(kRSA_PRIVATE_KEY_PEM, kRSA_CERT_PEM));
+  EXPECT_TRUE(identity);
+  EXPECT_EQ(kRSA_PRIVATE_KEY_PEM, identity->PrivateKeyToPEMString());
+  EXPECT_EQ(kRSA_PUBLIC_KEY_PEM, identity->PublicKeyToPEMString());
+  EXPECT_EQ(kRSA_CERT_PEM, identity->certificate().ToPEMString());
+}
+
+TEST_F(SSLIdentityTest, FromPEMStringsEC) {
+  std::unique_ptr<SSLIdentity> identity(
+      SSLIdentity::FromPEMStrings(kECDSA_PRIVATE_KEY_PEM, kECDSA_CERT_PEM));
+  EXPECT_TRUE(identity);
+  EXPECT_EQ(kECDSA_PRIVATE_KEY_PEM, identity->PrivateKeyToPEMString());
+  EXPECT_EQ(kECDSA_PUBLIC_KEY_PEM, identity->PublicKeyToPEMString());
+  EXPECT_EQ(kECDSA_CERT_PEM, identity->certificate().ToPEMString());
+}
+
+TEST_F(SSLIdentityTest, CloneIdentityRSA) {
+  TestCloningIdentity(*identity_rsa1_);
+  TestCloningIdentity(*identity_rsa2_);
+}
+
+TEST_F(SSLIdentityTest, CloneIdentityECDSA) {
+  TestCloningIdentity(*identity_ecdsa1_);
+  TestCloningIdentity(*identity_ecdsa2_);
+}
+
+TEST_F(SSLIdentityTest, PemDerConversion) {
+  std::string der;
+  EXPECT_TRUE(SSLIdentity::PemToDer("CERTIFICATE", kTestCertificate, &der));
+
+  EXPECT_EQ(kTestCertificate, SSLIdentity::DerToPem(
+      "CERTIFICATE",
+      reinterpret_cast<const unsigned char*>(der.data()), der.length()));
+}
+
+TEST_F(SSLIdentityTest, GetSignatureDigestAlgorithm) {
+  TestGetSignatureDigestAlgorithm();
+}
+
+TEST_F(SSLIdentityTest, SSLCertificateGetStatsRSA) {
+  std::unique_ptr<SSLIdentity> identity(
+      SSLIdentity::FromPEMStrings(kRSA_PRIVATE_KEY_PEM, kRSA_CERT_PEM));
+  std::unique_ptr<rtc::SSLCertificateStats> stats =
+      identity->certificate().GetStats();
+  EXPECT_EQ(stats->fingerprint, kRSA_FINGERPRINT);
+  EXPECT_EQ(stats->fingerprint_algorithm, kRSA_FINGERPRINT_ALGORITHM);
+  EXPECT_EQ(stats->base64_certificate, kRSA_BASE64_CERTIFICATE);
+  EXPECT_FALSE(stats->issuer);
+}
+
+TEST_F(SSLIdentityTest, SSLCertificateGetStatsECDSA) {
+  std::unique_ptr<SSLIdentity> identity(
+      SSLIdentity::FromPEMStrings(kECDSA_PRIVATE_KEY_PEM, kECDSA_CERT_PEM));
+  std::unique_ptr<rtc::SSLCertificateStats> stats =
+      identity->certificate().GetStats();
+  EXPECT_EQ(stats->fingerprint, kECDSA_FINGERPRINT);
+  EXPECT_EQ(stats->fingerprint_algorithm, kECDSA_FINGERPRINT_ALGORITHM);
+  EXPECT_EQ(stats->base64_certificate, kECDSA_BASE64_CERTIFICATE);
+  EXPECT_FALSE(stats->issuer);
+}
+
+TEST_F(SSLIdentityTest, SSLCertificateGetStatsWithChain) {
+  std::vector<std::string> ders;
+  ders.push_back("every der results in");
+  ders.push_back("an identity + certificate");
+  ders.push_back("in a certificate chain");
+  IdentityAndInfo info = CreateFakeIdentityAndInfoFromDers(ders);
+  EXPECT_TRUE(info.identity);
+  EXPECT_EQ(info.ders, ders);
+  EXPECT_EQ(info.pems.size(), info.ders.size());
+  EXPECT_EQ(info.fingerprints.size(), info.ders.size());
+
+  std::unique_ptr<rtc::SSLCertificateStats> first_stats =
+      info.identity->cert_chain().GetStats();
+  rtc::SSLCertificateStats* cert_stats = first_stats.get();
+  for (size_t i = 0; i < info.ders.size(); ++i) {
+    EXPECT_EQ(cert_stats->fingerprint, info.fingerprints[i]);
+    EXPECT_EQ(cert_stats->fingerprint_algorithm, "sha-1");
+    EXPECT_EQ(cert_stats->base64_certificate, info.pems[i]);
+    cert_stats = cert_stats->issuer.get();
+    EXPECT_EQ(static_cast<bool>(cert_stats), i + 1 < info.ders.size());
+  }
+}
+
+class SSLIdentityExpirationTest : public testing::Test {
+ public:
+  SSLIdentityExpirationTest() {
+    // Set use of the test RNG to get deterministic expiration timestamp.
+    rtc::SetRandomTestMode(true);
+  }
+  ~SSLIdentityExpirationTest() override {
+    // Put it back for the next test.
+    rtc::SetRandomTestMode(false);
+  }
+
+  void TestASN1TimeToSec() {
+    struct asn_example {
+      const char* string;
+      bool long_format;
+      int64_t want;
+    } static const data[] = {
+      // Valid examples.
+      {"19700101000000Z",  true,  0},
+      {"700101000000Z",    false, 0},
+      {"19700101000001Z",  true,  1},
+      {"700101000001Z",    false, 1},
+      {"19700101000100Z",  true,  60},
+      {"19700101000101Z",  true,  61},
+      {"19700101010000Z",  true,  3600},
+      {"19700101010001Z",  true,  3601},
+      {"19700101010100Z",  true,  3660},
+      {"19700101010101Z",  true,  3661},
+      {"710911012345Z",    false, 53400225},
+      {"20000101000000Z",  true,  946684800},
+      {"20000101000000Z",  true,  946684800},
+      {"20151130140156Z",  true,  1448892116},
+      {"151130140156Z",    false, 1448892116},
+      {"20491231235959Z",  true,  2524607999},
+      {"491231235959Z",    false, 2524607999},
+      {"20500101000000Z",  true,  2524607999+1},
+      {"20700101000000Z",  true,  3155760000},
+      {"21000101000000Z",  true,  4102444800},
+      {"24000101000000Z",  true,  13569465600},
+
+      // Invalid examples.
+      {"19700101000000",    true,  -1},  // missing Z long format
+      {"19700101000000X",   true,  -1},  // X instead of Z long format
+      {"197001010000000",   true,  -1},  // 0 instead of Z long format
+      {"1970010100000000Z", true,  -1},  // excess digits long format
+      {"700101000000",      false, -1},  // missing Z short format
+      {"700101000000X",     false, -1},  // X instead of Z short format
+      {"7001010000000",     false, -1},  // 0 instead of Z short format
+      {"70010100000000Z",   false, -1},  // excess digits short format
+      {":9700101000000Z",   true,  -1},  // invalid character
+      {"1:700101000001Z",   true,  -1},  // invalid character
+      {"19:00101000100Z",   true,  -1},  // invalid character
+      {"197:0101000101Z",   true,  -1},  // invalid character
+      {"1970:101010000Z",   true,  -1},  // invalid character
+      {"19700:01010001Z",   true,  -1},  // invalid character
+      {"197001:1010100Z",   true,  -1},  // invalid character
+      {"1970010:010101Z",   true,  -1},  // invalid character
+      {"70010100:000Z",     false, -1},  // invalid character
+      {"700101000:01Z",     false, -1},  // invalid character
+      {"2000010100:000Z",   true,  -1},  // invalid character
+      {"21000101000:00Z",   true,  -1},  // invalid character
+      {"240001010000:0Z",   true,  -1},  // invalid character
+      {"500101000000Z",     false, -1},  // but too old for epoch
+      {"691231235959Z",     false, -1},  // too old for epoch
+      {"19611118043000Z",   false, -1},  // way too old for epoch
+    };
+
+    unsigned char buf[20];
+
+    // Run all examples and check for the expected result.
+    for (const auto& entry : data) {
+      size_t length = strlen(entry.string);
+      memcpy(buf, entry.string, length);    // Copy the ASN1 string...
+      buf[length] = rtc::CreateRandomId();  // ...and terminate it with junk.
+      int64_t res = rtc::ASN1TimeToSec(buf, length, entry.long_format);
+      RTC_LOG(LS_VERBOSE) << entry.string;
+      ASSERT_EQ(entry.want, res);
+    }
+    // Run all examples again, but with an invalid length.
+    for (const auto& entry : data) {
+      size_t length = strlen(entry.string);
+      memcpy(buf, entry.string, length);    // Copy the ASN1 string...
+      buf[length] = rtc::CreateRandomId();  // ...and terminate it with junk.
+      int64_t res = rtc::ASN1TimeToSec(buf, length - 1, entry.long_format);
+      RTC_LOG(LS_VERBOSE) << entry.string;
+      ASSERT_EQ(-1, res);
+    }
+  }
+
+  void TestExpireTime(int times) {
+    // We test just ECDSA here since what we're out to exercise is the
+    // interfaces for expiration setting and reading.
+    for (int i = 0; i < times; i++) {
+      // We limit the time to < 2^31 here, i.e., we stay before 2038, since else
+      // we hit time offset limitations in OpenSSL on some 32-bit systems.
+      time_t time_before_generation = time(nullptr);
+      time_t lifetime =
+          rtc::CreateRandomId() % (0x80000000 - time_before_generation);
+      rtc::KeyParams key_params = rtc::KeyParams::ECDSA(rtc::EC_NIST_P256);
+      SSLIdentity* identity =
+          rtc::SSLIdentity::GenerateWithExpiration("", key_params, lifetime);
+      time_t time_after_generation = time(nullptr);
+      EXPECT_LE(time_before_generation + lifetime,
+                identity->certificate().CertificateExpirationTime());
+      EXPECT_GE(time_after_generation + lifetime,
+                identity->certificate().CertificateExpirationTime());
+      delete identity;
+    }
+  }
+};
+
+TEST_F(SSLIdentityExpirationTest, TestASN1TimeToSec) {
+  TestASN1TimeToSec();
+}
+
+TEST_F(SSLIdentityExpirationTest, TestExpireTime) {
+  TestExpireTime(500);
+}
diff --git a/rtc_base/sslroots.h b/rtc_base/sslroots.h
new file mode 100644
index 0000000..7309a05
--- /dev/null
+++ b/rtc_base/sslroots.h
@@ -0,0 +1,4280 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SSLROOTS_H_
+#define RTC_BASE_SSLROOTS_H_
+
+// This file is the root certificates in C form that are needed to connect to
+// Google.
+
+// It was generated with the following command line:
+// > python tools/sslroots/generate_sslroots.py
+//    https://pki.google.com/roots.pem
+
+/* subject:/C=BE/O=GlobalSign nv-sa/OU=Root CA/CN=GlobalSign Root CA */
+/* issuer :/C=BE/O=GlobalSign nv-sa/OU=Root CA/CN=GlobalSign Root CA */
+
+
+const unsigned char GlobalSign_Root_CA_certificate[889]={
+0x30,0x82,0x03,0x75,0x30,0x82,0x02,0x5D,0xA0,0x03,0x02,0x01,0x02,0x02,0x0B,0x04,
+0x00,0x00,0x00,0x00,0x01,0x15,0x4B,0x5A,0xC3,0x94,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x57,0x31,0x0B,0x30,0x09,0x06,
+0x03,0x55,0x04,0x06,0x13,0x02,0x42,0x45,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,
+0x0A,0x13,0x10,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x6E,0x76,
+0x2D,0x73,0x61,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x0B,0x13,0x07,0x52,0x6F,
+0x6F,0x74,0x20,0x43,0x41,0x31,0x1B,0x30,0x19,0x06,0x03,0x55,0x04,0x03,0x13,0x12,
+0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x52,0x6F,0x6F,0x74,0x20,
+0x43,0x41,0x30,0x1E,0x17,0x0D,0x39,0x38,0x30,0x39,0x30,0x31,0x31,0x32,0x30,0x30,
+0x30,0x30,0x5A,0x17,0x0D,0x32,0x38,0x30,0x31,0x32,0x38,0x31,0x32,0x30,0x30,0x30,
+0x30,0x5A,0x30,0x57,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x42,
+0x45,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0A,0x13,0x10,0x47,0x6C,0x6F,0x62,
+0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x6E,0x76,0x2D,0x73,0x61,0x31,0x10,0x30,0x0E,
+0x06,0x03,0x55,0x04,0x0B,0x13,0x07,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x03,0x13,0x12,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,
+0x69,0x67,0x6E,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x30,0x82,0x01,0x22,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,
+0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xDA,0x0E,0xE6,0x99,
+0x8D,0xCE,0xA3,0xE3,0x4F,0x8A,0x7E,0xFB,0xF1,0x8B,0x83,0x25,0x6B,0xEA,0x48,0x1F,
+0xF1,0x2A,0xB0,0xB9,0x95,0x11,0x04,0xBD,0xF0,0x63,0xD1,0xE2,0x67,0x66,0xCF,0x1C,
+0xDD,0xCF,0x1B,0x48,0x2B,0xEE,0x8D,0x89,0x8E,0x9A,0xAF,0x29,0x80,0x65,0xAB,0xE9,
+0xC7,0x2D,0x12,0xCB,0xAB,0x1C,0x4C,0x70,0x07,0xA1,0x3D,0x0A,0x30,0xCD,0x15,0x8D,
+0x4F,0xF8,0xDD,0xD4,0x8C,0x50,0x15,0x1C,0xEF,0x50,0xEE,0xC4,0x2E,0xF7,0xFC,0xE9,
+0x52,0xF2,0x91,0x7D,0xE0,0x6D,0xD5,0x35,0x30,0x8E,0x5E,0x43,0x73,0xF2,0x41,0xE9,
+0xD5,0x6A,0xE3,0xB2,0x89,0x3A,0x56,0x39,0x38,0x6F,0x06,0x3C,0x88,0x69,0x5B,0x2A,
+0x4D,0xC5,0xA7,0x54,0xB8,0x6C,0x89,0xCC,0x9B,0xF9,0x3C,0xCA,0xE5,0xFD,0x89,0xF5,
+0x12,0x3C,0x92,0x78,0x96,0xD6,0xDC,0x74,0x6E,0x93,0x44,0x61,0xD1,0x8D,0xC7,0x46,
+0xB2,0x75,0x0E,0x86,0xE8,0x19,0x8A,0xD5,0x6D,0x6C,0xD5,0x78,0x16,0x95,0xA2,0xE9,
+0xC8,0x0A,0x38,0xEB,0xF2,0x24,0x13,0x4F,0x73,0x54,0x93,0x13,0x85,0x3A,0x1B,0xBC,
+0x1E,0x34,0xB5,0x8B,0x05,0x8C,0xB9,0x77,0x8B,0xB1,0xDB,0x1F,0x20,0x91,0xAB,0x09,
+0x53,0x6E,0x90,0xCE,0x7B,0x37,0x74,0xB9,0x70,0x47,0x91,0x22,0x51,0x63,0x16,0x79,
+0xAE,0xB1,0xAE,0x41,0x26,0x08,0xC8,0x19,0x2B,0xD1,0x46,0xAA,0x48,0xD6,0x64,0x2A,
+0xD7,0x83,0x34,0xFF,0x2C,0x2A,0xC1,0x6C,0x19,0x43,0x4A,0x07,0x85,0xE7,0xD3,0x7C,
+0xF6,0x21,0x68,0xEF,0xEA,0xF2,0x52,0x9F,0x7F,0x93,0x90,0xCF,0x02,0x03,0x01,0x00,
+0x01,0xA3,0x42,0x30,0x40,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,
+0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,
+0x14,0x60,0x7B,0x66,0x1A,0x45,0x0D,0x97,0xCA,0x89,0x50,0x2F,0x7D,0x04,0xCD,0x34,
+0xA8,0xFF,0xFC,0xFD,0x4B,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0xD6,0x73,0xE7,0x7C,0x4F,0x76,0xD0,
+0x8D,0xBF,0xEC,0xBA,0xA2,0xBE,0x34,0xC5,0x28,0x32,0xB5,0x7C,0xFC,0x6C,0x9C,0x2C,
+0x2B,0xBD,0x09,0x9E,0x53,0xBF,0x6B,0x5E,0xAA,0x11,0x48,0xB6,0xE5,0x08,0xA3,0xB3,
+0xCA,0x3D,0x61,0x4D,0xD3,0x46,0x09,0xB3,0x3E,0xC3,0xA0,0xE3,0x63,0x55,0x1B,0xF2,
+0xBA,0xEF,0xAD,0x39,0xE1,0x43,0xB9,0x38,0xA3,0xE6,0x2F,0x8A,0x26,0x3B,0xEF,0xA0,
+0x50,0x56,0xF9,0xC6,0x0A,0xFD,0x38,0xCD,0xC4,0x0B,0x70,0x51,0x94,0x97,0x98,0x04,
+0xDF,0xC3,0x5F,0x94,0xD5,0x15,0xC9,0x14,0x41,0x9C,0xC4,0x5D,0x75,0x64,0x15,0x0D,
+0xFF,0x55,0x30,0xEC,0x86,0x8F,0xFF,0x0D,0xEF,0x2C,0xB9,0x63,0x46,0xF6,0xAA,0xFC,
+0xDF,0xBC,0x69,0xFD,0x2E,0x12,0x48,0x64,0x9A,0xE0,0x95,0xF0,0xA6,0xEF,0x29,0x8F,
+0x01,0xB1,0x15,0xB5,0x0C,0x1D,0xA5,0xFE,0x69,0x2C,0x69,0x24,0x78,0x1E,0xB3,0xA7,
+0x1C,0x71,0x62,0xEE,0xCA,0xC8,0x97,0xAC,0x17,0x5D,0x8A,0xC2,0xF8,0x47,0x86,0x6E,
+0x2A,0xC4,0x56,0x31,0x95,0xD0,0x67,0x89,0x85,0x2B,0xF9,0x6C,0xA6,0x5D,0x46,0x9D,
+0x0C,0xAA,0x82,0xE4,0x99,0x51,0xDD,0x70,0xB7,0xDB,0x56,0x3D,0x61,0xE4,0x6A,0xE1,
+0x5C,0xD6,0xF6,0xFE,0x3D,0xDE,0x41,0xCC,0x07,0xAE,0x63,0x52,0xBF,0x53,0x53,0xF4,
+0x2B,0xE9,0xC7,0xFD,0xB6,0xF7,0x82,0x5F,0x85,0xD2,0x41,0x18,0xDB,0x81,0xB3,0x04,
+0x1C,0xC5,0x1F,0xA4,0x80,0x6F,0x15,0x20,0xC9,0xDE,0x0C,0x88,0x0A,0x1D,0xD6,0x66,
+0x55,0xE2,0xFC,0x48,0xC9,0x29,0x26,0x69,0xE0,
+};
+
+
+/* subject:/C=US/ST=New Jersey/L=Jersey City/O=The USERTRUST Network/CN=USERTrust RSA Certification Authority */
+/* issuer :/C=US/ST=New Jersey/L=Jersey City/O=The USERTRUST Network/CN=USERTrust RSA Certification Authority */
+
+
+const unsigned char USERTrust_RSA_Certification_Authority_certificate[1506]={
+0x30,0x82,0x05,0xDE,0x30,0x82,0x03,0xC6,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x01,
+0xFD,0x6D,0x30,0xFC,0xA3,0xCA,0x51,0xA8,0x1B,0xBC,0x64,0x0E,0x35,0x03,0x2D,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,0x05,0x00,0x30,0x81,
+0x88,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x13,
+0x30,0x11,0x06,0x03,0x55,0x04,0x08,0x13,0x0A,0x4E,0x65,0x77,0x20,0x4A,0x65,0x72,
+0x73,0x65,0x79,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x07,0x13,0x0B,0x4A,0x65,
+0x72,0x73,0x65,0x79,0x20,0x43,0x69,0x74,0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,
+0x04,0x0A,0x13,0x15,0x54,0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55,0x53,
+0x54,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x2E,0x30,0x2C,0x06,0x03,0x55,
+0x04,0x03,0x13,0x25,0x55,0x53,0x45,0x52,0x54,0x72,0x75,0x73,0x74,0x20,0x52,0x53,
+0x41,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,
+0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x31,0x30,0x30,
+0x32,0x30,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,
+0x31,0x38,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0x88,0x31,0x0B,0x30,0x09,
+0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x13,0x30,0x11,0x06,0x03,0x55,
+0x04,0x08,0x13,0x0A,0x4E,0x65,0x77,0x20,0x4A,0x65,0x72,0x73,0x65,0x79,0x31,0x14,
+0x30,0x12,0x06,0x03,0x55,0x04,0x07,0x13,0x0B,0x4A,0x65,0x72,0x73,0x65,0x79,0x20,
+0x43,0x69,0x74,0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,0x04,0x0A,0x13,0x15,0x54,
+0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55,0x53,0x54,0x20,0x4E,0x65,0x74,
+0x77,0x6F,0x72,0x6B,0x31,0x2E,0x30,0x2C,0x06,0x03,0x55,0x04,0x03,0x13,0x25,0x55,
+0x53,0x45,0x52,0x54,0x72,0x75,0x73,0x74,0x20,0x52,0x53,0x41,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,
+0x72,0x69,0x74,0x79,0x30,0x82,0x02,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x02,0x0F,0x00,0x30,0x82,0x02,0x0A,
+0x02,0x82,0x02,0x01,0x00,0x80,0x12,0x65,0x17,0x36,0x0E,0xC3,0xDB,0x08,0xB3,0xD0,
+0xAC,0x57,0x0D,0x76,0xED,0xCD,0x27,0xD3,0x4C,0xAD,0x50,0x83,0x61,0xE2,0xAA,0x20,
+0x4D,0x09,0x2D,0x64,0x09,0xDC,0xCE,0x89,0x9F,0xCC,0x3D,0xA9,0xEC,0xF6,0xCF,0xC1,
+0xDC,0xF1,0xD3,0xB1,0xD6,0x7B,0x37,0x28,0x11,0x2B,0x47,0xDA,0x39,0xC6,0xBC,0x3A,
+0x19,0xB4,0x5F,0xA6,0xBD,0x7D,0x9D,0xA3,0x63,0x42,0xB6,0x76,0xF2,0xA9,0x3B,0x2B,
+0x91,0xF8,0xE2,0x6F,0xD0,0xEC,0x16,0x20,0x90,0x09,0x3E,0xE2,0xE8,0x74,0xC9,0x18,
+0xB4,0x91,0xD4,0x62,0x64,0xDB,0x7F,0xA3,0x06,0xF1,0x88,0x18,0x6A,0x90,0x22,0x3C,
+0xBC,0xFE,0x13,0xF0,0x87,0x14,0x7B,0xF6,0xE4,0x1F,0x8E,0xD4,0xE4,0x51,0xC6,0x11,
+0x67,0x46,0x08,0x51,0xCB,0x86,0x14,0x54,0x3F,0xBC,0x33,0xFE,0x7E,0x6C,0x9C,0xFF,
+0x16,0x9D,0x18,0xBD,0x51,0x8E,0x35,0xA6,0xA7,0x66,0xC8,0x72,0x67,0xDB,0x21,0x66,
+0xB1,0xD4,0x9B,0x78,0x03,0xC0,0x50,0x3A,0xE8,0xCC,0xF0,0xDC,0xBC,0x9E,0x4C,0xFE,
+0xAF,0x05,0x96,0x35,0x1F,0x57,0x5A,0xB7,0xFF,0xCE,0xF9,0x3D,0xB7,0x2C,0xB6,0xF6,
+0x54,0xDD,0xC8,0xE7,0x12,0x3A,0x4D,0xAE,0x4C,0x8A,0xB7,0x5C,0x9A,0xB4,0xB7,0x20,
+0x3D,0xCA,0x7F,0x22,0x34,0xAE,0x7E,0x3B,0x68,0x66,0x01,0x44,0xE7,0x01,0x4E,0x46,
+0x53,0x9B,0x33,0x60,0xF7,0x94,0xBE,0x53,0x37,0x90,0x73,0x43,0xF3,0x32,0xC3,0x53,
+0xEF,0xDB,0xAA,0xFE,0x74,0x4E,0x69,0xC7,0x6B,0x8C,0x60,0x93,0xDE,0xC4,0xC7,0x0C,
+0xDF,0xE1,0x32,0xAE,0xCC,0x93,0x3B,0x51,0x78,0x95,0x67,0x8B,0xEE,0x3D,0x56,0xFE,
+0x0C,0xD0,0x69,0x0F,0x1B,0x0F,0xF3,0x25,0x26,0x6B,0x33,0x6D,0xF7,0x6E,0x47,0xFA,
+0x73,0x43,0xE5,0x7E,0x0E,0xA5,0x66,0xB1,0x29,0x7C,0x32,0x84,0x63,0x55,0x89,0xC4,
+0x0D,0xC1,0x93,0x54,0x30,0x19,0x13,0xAC,0xD3,0x7D,0x37,0xA7,0xEB,0x5D,0x3A,0x6C,
+0x35,0x5C,0xDB,0x41,0xD7,0x12,0xDA,0xA9,0x49,0x0B,0xDF,0xD8,0x80,0x8A,0x09,0x93,
+0x62,0x8E,0xB5,0x66,0xCF,0x25,0x88,0xCD,0x84,0xB8,0xB1,0x3F,0xA4,0x39,0x0F,0xD9,
+0x02,0x9E,0xEB,0x12,0x4C,0x95,0x7C,0xF3,0x6B,0x05,0xA9,0x5E,0x16,0x83,0xCC,0xB8,
+0x67,0xE2,0xE8,0x13,0x9D,0xCC,0x5B,0x82,0xD3,0x4C,0xB3,0xED,0x5B,0xFF,0xDE,0xE5,
+0x73,0xAC,0x23,0x3B,0x2D,0x00,0xBF,0x35,0x55,0x74,0x09,0x49,0xD8,0x49,0x58,0x1A,
+0x7F,0x92,0x36,0xE6,0x51,0x92,0x0E,0xF3,0x26,0x7D,0x1C,0x4D,0x17,0xBC,0xC9,0xEC,
+0x43,0x26,0xD0,0xBF,0x41,0x5F,0x40,0xA9,0x44,0x44,0xF4,0x99,0xE7,0x57,0x87,0x9E,
+0x50,0x1F,0x57,0x54,0xA8,0x3E,0xFD,0x74,0x63,0x2F,0xB1,0x50,0x65,0x09,0xE6,0x58,
+0x42,0x2E,0x43,0x1A,0x4C,0xB4,0xF0,0x25,0x47,0x59,0xFA,0x04,0x1E,0x93,0xD4,0x26,
+0x46,0x4A,0x50,0x81,0xB2,0xDE,0xBE,0x78,0xB7,0xFC,0x67,0x15,0xE1,0xC9,0x57,0x84,
+0x1E,0x0F,0x63,0xD6,0xE9,0x62,0xBA,0xD6,0x5F,0x55,0x2E,0xEA,0x5C,0xC6,0x28,0x08,
+0x04,0x25,0x39,0xB8,0x0E,0x2B,0xA9,0xF2,0x4C,0x97,0x1C,0x07,0x3F,0x0D,0x52,0xF5,
+0xED,0xEF,0x2F,0x82,0x0F,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x1D,
+0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x53,0x79,0xBF,0x5A,0xAA,0x2B,0x4A,
+0xCF,0x54,0x80,0xE1,0xD8,0x9B,0xC0,0x9D,0xF2,0xB2,0x03,0x66,0xCB,0x30,0x0E,0x06,
+0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,
+0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,0x05,0x00,0x03,0x82,0x02,
+0x01,0x00,0x5C,0xD4,0x7C,0x0D,0xCF,0xF7,0x01,0x7D,0x41,0x99,0x65,0x0C,0x73,0xC5,
+0x52,0x9F,0xCB,0xF8,0xCF,0x99,0x06,0x7F,0x1B,0xDA,0x43,0x15,0x9F,0x9E,0x02,0x55,
+0x57,0x96,0x14,0xF1,0x52,0x3C,0x27,0x87,0x94,0x28,0xED,0x1F,0x3A,0x01,0x37,0xA2,
+0x76,0xFC,0x53,0x50,0xC0,0x84,0x9B,0xC6,0x6B,0x4E,0xBA,0x8C,0x21,0x4F,0xA2,0x8E,
+0x55,0x62,0x91,0xF3,0x69,0x15,0xD8,0xBC,0x88,0xE3,0xC4,0xAA,0x0B,0xFD,0xEF,0xA8,
+0xE9,0x4B,0x55,0x2A,0x06,0x20,0x6D,0x55,0x78,0x29,0x19,0xEE,0x5F,0x30,0x5C,0x4B,
+0x24,0x11,0x55,0xFF,0x24,0x9A,0x6E,0x5E,0x2A,0x2B,0xEE,0x0B,0x4D,0x9F,0x7F,0xF7,
+0x01,0x38,0x94,0x14,0x95,0x43,0x07,0x09,0xFB,0x60,0xA9,0xEE,0x1C,0xAB,0x12,0x8C,
+0xA0,0x9A,0x5E,0xA7,0x98,0x6A,0x59,0x6D,0x8B,0x3F,0x08,0xFB,0xC8,0xD1,0x45,0xAF,
+0x18,0x15,0x64,0x90,0x12,0x0F,0x73,0x28,0x2E,0xC5,0xE2,0x24,0x4E,0xFC,0x58,0xEC,
+0xF0,0xF4,0x45,0xFE,0x22,0xB3,0xEB,0x2F,0x8E,0xD2,0xD9,0x45,0x61,0x05,0xC1,0x97,
+0x6F,0xA8,0x76,0x72,0x8F,0x8B,0x8C,0x36,0xAF,0xBF,0x0D,0x05,0xCE,0x71,0x8D,0xE6,
+0xA6,0x6F,0x1F,0x6C,0xA6,0x71,0x62,0xC5,0xD8,0xD0,0x83,0x72,0x0C,0xF1,0x67,0x11,
+0x89,0x0C,0x9C,0x13,0x4C,0x72,0x34,0xDF,0xBC,0xD5,0x71,0xDF,0xAA,0x71,0xDD,0xE1,
+0xB9,0x6C,0x8C,0x3C,0x12,0x5D,0x65,0xDA,0xBD,0x57,0x12,0xB6,0x43,0x6B,0xFF,0xE5,
+0xDE,0x4D,0x66,0x11,0x51,0xCF,0x99,0xAE,0xEC,0x17,0xB6,0xE8,0x71,0x91,0x8C,0xDE,
+0x49,0xFE,0xDD,0x35,0x71,0xA2,0x15,0x27,0x94,0x1C,0xCF,0x61,0xE3,0x26,0xBB,0x6F,
+0xA3,0x67,0x25,0x21,0x5D,0xE6,0xDD,0x1D,0x0B,0x2E,0x68,0x1B,0x3B,0x82,0xAF,0xEC,
+0x83,0x67,0x85,0xD4,0x98,0x51,0x74,0xB1,0xB9,0x99,0x80,0x89,0xFF,0x7F,0x78,0x19,
+0x5C,0x79,0x4A,0x60,0x2E,0x92,0x40,0xAE,0x4C,0x37,0x2A,0x2C,0xC9,0xC7,0x62,0xC8,
+0x0E,0x5D,0xF7,0x36,0x5B,0xCA,0xE0,0x25,0x25,0x01,0xB4,0xDD,0x1A,0x07,0x9C,0x77,
+0x00,0x3F,0xD0,0xDC,0xD5,0xEC,0x3D,0xD4,0xFA,0xBB,0x3F,0xCC,0x85,0xD6,0x6F,0x7F,
+0xA9,0x2D,0xDF,0xB9,0x02,0xF7,0xF5,0x97,0x9A,0xB5,0x35,0xDA,0xC3,0x67,0xB0,0x87,
+0x4A,0xA9,0x28,0x9E,0x23,0x8E,0xFF,0x5C,0x27,0x6B,0xE1,0xB0,0x4F,0xF3,0x07,0xEE,
+0x00,0x2E,0xD4,0x59,0x87,0xCB,0x52,0x41,0x95,0xEA,0xF4,0x47,0xD7,0xEE,0x64,0x41,
+0x55,0x7C,0x8D,0x59,0x02,0x95,0xDD,0x62,0x9D,0xC2,0xB9,0xEE,0x5A,0x28,0x74,0x84,
+0xA5,0x9B,0xB7,0x90,0xC7,0x0C,0x07,0xDF,0xF5,0x89,0x36,0x74,0x32,0xD6,0x28,0xC1,
+0xB0,0xB0,0x0B,0xE0,0x9C,0x4C,0xC3,0x1C,0xD6,0xFC,0xE3,0x69,0xB5,0x47,0x46,0x81,
+0x2F,0xA2,0x82,0xAB,0xD3,0x63,0x44,0x70,0xC4,0x8D,0xFF,0x2D,0x33,0xBA,0xAD,0x8F,
+0x7B,0xB5,0x70,0x88,0xAE,0x3E,0x19,0xCF,0x40,0x28,0xD8,0xFC,0xC8,0x90,0xBB,0x5D,
+0x99,0x22,0xF5,0x52,0xE6,0x58,0xC5,0x1F,0x88,0x31,0x43,0xEE,0x88,0x1D,0xD7,0xC6,
+0x8E,0x3C,0x43,0x6A,0x1D,0xA7,0x18,0xDE,0x7D,0x3D,0x16,0xF1,0x62,0xF9,0xCA,0x90,
+0xA8,0xFD,
+};
+
+
+/* subject:/C=US/O=Starfield Technologies, Inc./OU=Starfield Class 2 Certification Authority */
+/* issuer :/C=US/O=Starfield Technologies, Inc./OU=Starfield Class 2 Certification Authority */
+
+
+const unsigned char Starfield_Class_2_CA_certificate[1043]={
+0x30,0x82,0x04,0x0F,0x30,0x82,0x02,0xF7,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x00,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x68,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x25,
+0x30,0x23,0x06,0x03,0x55,0x04,0x0A,0x13,0x1C,0x53,0x74,0x61,0x72,0x66,0x69,0x65,
+0x6C,0x64,0x20,0x54,0x65,0x63,0x68,0x6E,0x6F,0x6C,0x6F,0x67,0x69,0x65,0x73,0x2C,
+0x20,0x49,0x6E,0x63,0x2E,0x31,0x32,0x30,0x30,0x06,0x03,0x55,0x04,0x0B,0x13,0x29,
+0x53,0x74,0x61,0x72,0x66,0x69,0x65,0x6C,0x64,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,
+0x32,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,
+0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x30,0x34,0x30,
+0x36,0x32,0x39,0x31,0x37,0x33,0x39,0x31,0x36,0x5A,0x17,0x0D,0x33,0x34,0x30,0x36,
+0x32,0x39,0x31,0x37,0x33,0x39,0x31,0x36,0x5A,0x30,0x68,0x31,0x0B,0x30,0x09,0x06,
+0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x25,0x30,0x23,0x06,0x03,0x55,0x04,
+0x0A,0x13,0x1C,0x53,0x74,0x61,0x72,0x66,0x69,0x65,0x6C,0x64,0x20,0x54,0x65,0x63,
+0x68,0x6E,0x6F,0x6C,0x6F,0x67,0x69,0x65,0x73,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,
+0x32,0x30,0x30,0x06,0x03,0x55,0x04,0x0B,0x13,0x29,0x53,0x74,0x61,0x72,0x66,0x69,
+0x65,0x6C,0x64,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,
+0x69,0x74,0x79,0x30,0x82,0x01,0x20,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0D,0x00,0x30,0x82,0x01,0x08,0x02,
+0x82,0x01,0x01,0x00,0xB7,0x32,0xC8,0xFE,0xE9,0x71,0xA6,0x04,0x85,0xAD,0x0C,0x11,
+0x64,0xDF,0xCE,0x4D,0xEF,0xC8,0x03,0x18,0x87,0x3F,0xA1,0xAB,0xFB,0x3C,0xA6,0x9F,
+0xF0,0xC3,0xA1,0xDA,0xD4,0xD8,0x6E,0x2B,0x53,0x90,0xFB,0x24,0xA4,0x3E,0x84,0xF0,
+0x9E,0xE8,0x5F,0xEC,0xE5,0x27,0x44,0xF5,0x28,0xA6,0x3F,0x7B,0xDE,0xE0,0x2A,0xF0,
+0xC8,0xAF,0x53,0x2F,0x9E,0xCA,0x05,0x01,0x93,0x1E,0x8F,0x66,0x1C,0x39,0xA7,0x4D,
+0xFA,0x5A,0xB6,0x73,0x04,0x25,0x66,0xEB,0x77,0x7F,0xE7,0x59,0xC6,0x4A,0x99,0x25,
+0x14,0x54,0xEB,0x26,0xC7,0xF3,0x7F,0x19,0xD5,0x30,0x70,0x8F,0xAF,0xB0,0x46,0x2A,
+0xFF,0xAD,0xEB,0x29,0xED,0xD7,0x9F,0xAA,0x04,0x87,0xA3,0xD4,0xF9,0x89,0xA5,0x34,
+0x5F,0xDB,0x43,0x91,0x82,0x36,0xD9,0x66,0x3C,0xB1,0xB8,0xB9,0x82,0xFD,0x9C,0x3A,
+0x3E,0x10,0xC8,0x3B,0xEF,0x06,0x65,0x66,0x7A,0x9B,0x19,0x18,0x3D,0xFF,0x71,0x51,
+0x3C,0x30,0x2E,0x5F,0xBE,0x3D,0x77,0x73,0xB2,0x5D,0x06,0x6C,0xC3,0x23,0x56,0x9A,
+0x2B,0x85,0x26,0x92,0x1C,0xA7,0x02,0xB3,0xE4,0x3F,0x0D,0xAF,0x08,0x79,0x82,0xB8,
+0x36,0x3D,0xEA,0x9C,0xD3,0x35,0xB3,0xBC,0x69,0xCA,0xF5,0xCC,0x9D,0xE8,0xFD,0x64,
+0x8D,0x17,0x80,0x33,0x6E,0x5E,0x4A,0x5D,0x99,0xC9,0x1E,0x87,0xB4,0x9D,0x1A,0xC0,
+0xD5,0x6E,0x13,0x35,0x23,0x5E,0xDF,0x9B,0x5F,0x3D,0xEF,0xD6,0xF7,0x76,0xC2,0xEA,
+0x3E,0xBB,0x78,0x0D,0x1C,0x42,0x67,0x6B,0x04,0xD8,0xF8,0xD6,0xDA,0x6F,0x8B,0xF2,
+0x44,0xA0,0x01,0xAB,0x02,0x01,0x03,0xA3,0x81,0xC5,0x30,0x81,0xC2,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xBF,0x5F,0xB7,0xD1,0xCE,0xDD,0x1F,0x86,
+0xF4,0x5B,0x55,0xAC,0xDC,0xD7,0x10,0xC2,0x0E,0xA9,0x88,0xE7,0x30,0x81,0x92,0x06,
+0x03,0x55,0x1D,0x23,0x04,0x81,0x8A,0x30,0x81,0x87,0x80,0x14,0xBF,0x5F,0xB7,0xD1,
+0xCE,0xDD,0x1F,0x86,0xF4,0x5B,0x55,0xAC,0xDC,0xD7,0x10,0xC2,0x0E,0xA9,0x88,0xE7,
+0xA1,0x6C,0xA4,0x6A,0x30,0x68,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,
+0x02,0x55,0x53,0x31,0x25,0x30,0x23,0x06,0x03,0x55,0x04,0x0A,0x13,0x1C,0x53,0x74,
+0x61,0x72,0x66,0x69,0x65,0x6C,0x64,0x20,0x54,0x65,0x63,0x68,0x6E,0x6F,0x6C,0x6F,
+0x67,0x69,0x65,0x73,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x32,0x30,0x30,0x06,0x03,
+0x55,0x04,0x0B,0x13,0x29,0x53,0x74,0x61,0x72,0x66,0x69,0x65,0x6C,0x64,0x20,0x43,
+0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,
+0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x82,0x01,
+0x00,0x30,0x0C,0x06,0x03,0x55,0x1D,0x13,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,
+0x01,0x01,0x00,0x05,0x9D,0x3F,0x88,0x9D,0xD1,0xC9,0x1A,0x55,0xA1,0xAC,0x69,0xF3,
+0xF3,0x59,0xDA,0x9B,0x01,0x87,0x1A,0x4F,0x57,0xA9,0xA1,0x79,0x09,0x2A,0xDB,0xF7,
+0x2F,0xB2,0x1E,0xCC,0xC7,0x5E,0x6A,0xD8,0x83,0x87,0xA1,0x97,0xEF,0x49,0x35,0x3E,
+0x77,0x06,0x41,0x58,0x62,0xBF,0x8E,0x58,0xB8,0x0A,0x67,0x3F,0xEC,0xB3,0xDD,0x21,
+0x66,0x1F,0xC9,0x54,0xFA,0x72,0xCC,0x3D,0x4C,0x40,0xD8,0x81,0xAF,0x77,0x9E,0x83,
+0x7A,0xBB,0xA2,0xC7,0xF5,0x34,0x17,0x8E,0xD9,0x11,0x40,0xF4,0xFC,0x2C,0x2A,0x4D,
+0x15,0x7F,0xA7,0x62,0x5D,0x2E,0x25,0xD3,0x00,0x0B,0x20,0x1A,0x1D,0x68,0xF9,0x17,
+0xB8,0xF4,0xBD,0x8B,0xED,0x28,0x59,0xDD,0x4D,0x16,0x8B,0x17,0x83,0xC8,0xB2,0x65,
+0xC7,0x2D,0x7A,0xA5,0xAA,0xBC,0x53,0x86,0x6D,0xDD,0x57,0xA4,0xCA,0xF8,0x20,0x41,
+0x0B,0x68,0xF0,0xF4,0xFB,0x74,0xBE,0x56,0x5D,0x7A,0x79,0xF5,0xF9,0x1D,0x85,0xE3,
+0x2D,0x95,0xBE,0xF5,0x71,0x90,0x43,0xCC,0x8D,0x1F,0x9A,0x00,0x0A,0x87,0x29,0xE9,
+0x55,0x22,0x58,0x00,0x23,0xEA,0xE3,0x12,0x43,0x29,0x5B,0x47,0x08,0xDD,0x8C,0x41,
+0x6A,0x65,0x06,0xA8,0xE5,0x21,0xAA,0x41,0xB4,0x95,0x21,0x95,0xB9,0x7D,0xD1,0x34,
+0xAB,0x13,0xD6,0xAD,0xBC,0xDC,0xE2,0x3D,0x39,0xCD,0xBD,0x3E,0x75,0x70,0xA1,0x18,
+0x59,0x03,0xC9,0x22,0xB4,0x8F,0x9C,0xD5,0x5E,0x2A,0xD7,0xA5,0xB6,0xD4,0x0A,0x6D,
+0xF8,0xB7,0x40,0x11,0x46,0x9A,0x1F,0x79,0x0E,0x62,0xBF,0x0F,0x97,0xEC,0xE0,0x2F,
+0x1F,0x17,0x94,
+};
+
+
+/* subject:/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 1999 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 3 Public Primary Certification Authority - G3 */
+/* issuer :/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 1999 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 3 Public Primary Certification Authority - G3 */
+
+
+const unsigned char Verisign_Class_3_Public_Primary_Certification_Authority___G3_certificate[1054]={
+0x30,0x82,0x04,0x1A,0x30,0x82,0x03,0x02,0x02,0x11,0x00,0x9B,0x7E,0x06,0x49,0xA3,
+0x3E,0x62,0xB9,0xD5,0xEE,0x90,0x48,0x71,0x29,0xEF,0x57,0x30,0x0D,0x06,0x09,0x2A,
+0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,0xCA,0x31,0x0B,0x30,
+0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,
+0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,0x65,
+0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,0x74,
+0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,0x28,
+0x63,0x29,0x20,0x31,0x39,0x39,0x39,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,
+0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,
+0x31,0x45,0x30,0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,0x53,
+0x69,0x67,0x6E,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x33,0x20,0x50,0x75,0x62,0x6C,
+0x69,0x63,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x20,0x2D,0x20,0x47,0x33,0x30,0x1E,0x17,0x0D,0x39,0x39,0x31,0x30,0x30,
+0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x36,0x30,0x37,0x31,0x36,
+0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0xCA,0x31,0x0B,0x30,0x09,0x06,0x03,
+0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,0x03,0x55,0x04,0x0A,
+0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,
+0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,0x65,0x72,0x69,0x53,
+0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,
+0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,0x28,0x63,0x29,0x20,
+0x31,0x39,0x39,0x39,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,
+0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,
+0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x45,0x30,
+0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,
+0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x33,0x20,0x50,0x75,0x62,0x6C,0x69,0x63,0x20,
+0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,
+0x2D,0x20,0x47,0x33,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,
+0x02,0x82,0x01,0x01,0x00,0xCB,0xBA,0x9C,0x52,0xFC,0x78,0x1F,0x1A,0x1E,0x6F,0x1B,
+0x37,0x73,0xBD,0xF8,0xC9,0x6B,0x94,0x12,0x30,0x4F,0xF0,0x36,0x47,0xF5,0xD0,0x91,
+0x0A,0xF5,0x17,0xC8,0xA5,0x61,0xC1,0x16,0x40,0x4D,0xFB,0x8A,0x61,0x90,0xE5,0x76,
+0x20,0xC1,0x11,0x06,0x7D,0xAB,0x2C,0x6E,0xA6,0xF5,0x11,0x41,0x8E,0xFA,0x2D,0xAD,
+0x2A,0x61,0x59,0xA4,0x67,0x26,0x4C,0xD0,0xE8,0xBC,0x52,0x5B,0x70,0x20,0x04,0x58,
+0xD1,0x7A,0xC9,0xA4,0x69,0xBC,0x83,0x17,0x64,0xAD,0x05,0x8B,0xBC,0xD0,0x58,0xCE,
+0x8D,0x8C,0xF5,0xEB,0xF0,0x42,0x49,0x0B,0x9D,0x97,0x27,0x67,0x32,0x6E,0xE1,0xAE,
+0x93,0x15,0x1C,0x70,0xBC,0x20,0x4D,0x2F,0x18,0xDE,0x92,0x88,0xE8,0x6C,0x85,0x57,
+0x11,0x1A,0xE9,0x7E,0xE3,0x26,0x11,0x54,0xA2,0x45,0x96,0x55,0x83,0xCA,0x30,0x89,
+0xE8,0xDC,0xD8,0xA3,0xED,0x2A,0x80,0x3F,0x7F,0x79,0x65,0x57,0x3E,0x15,0x20,0x66,
+0x08,0x2F,0x95,0x93,0xBF,0xAA,0x47,0x2F,0xA8,0x46,0x97,0xF0,0x12,0xE2,0xFE,0xC2,
+0x0A,0x2B,0x51,0xE6,0x76,0xE6,0xB7,0x46,0xB7,0xE2,0x0D,0xA6,0xCC,0xA8,0xC3,0x4C,
+0x59,0x55,0x89,0xE6,0xE8,0x53,0x5C,0x1C,0xEA,0x9D,0xF0,0x62,0x16,0x0B,0xA7,0xC9,
+0x5F,0x0C,0xF0,0xDE,0xC2,0x76,0xCE,0xAF,0xF7,0x6A,0xF2,0xFA,0x41,0xA6,0xA2,0x33,
+0x14,0xC9,0xE5,0x7A,0x63,0xD3,0x9E,0x62,0x37,0xD5,0x85,0x65,0x9E,0x0E,0xE6,0x53,
+0x24,0x74,0x1B,0x5E,0x1D,0x12,0x53,0x5B,0xC7,0x2C,0xE7,0x83,0x49,0x3B,0x15,0xAE,
+0x8A,0x68,0xB9,0x57,0x97,0x02,0x03,0x01,0x00,0x01,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x11,0x14,
+0x96,0xC1,0xAB,0x92,0x08,0xF7,0x3F,0x2F,0xC9,0xB2,0xFE,0xE4,0x5A,0x9F,0x64,0xDE,
+0xDB,0x21,0x4F,0x86,0x99,0x34,0x76,0x36,0x57,0xDD,0xD0,0x15,0x2F,0xC5,0xAD,0x7F,
+0x15,0x1F,0x37,0x62,0x73,0x3E,0xD4,0xE7,0x5F,0xCE,0x17,0x03,0xDB,0x35,0xFA,0x2B,
+0xDB,0xAE,0x60,0x09,0x5F,0x1E,0x5F,0x8F,0x6E,0xBB,0x0B,0x3D,0xEA,0x5A,0x13,0x1E,
+0x0C,0x60,0x6F,0xB5,0xC0,0xB5,0x23,0x22,0x2E,0x07,0x0B,0xCB,0xA9,0x74,0xCB,0x47,
+0xBB,0x1D,0xC1,0xD7,0xA5,0x6B,0xCC,0x2F,0xD2,0x42,0xFD,0x49,0xDD,0xA7,0x89,0xCF,
+0x53,0xBA,0xDA,0x00,0x5A,0x28,0xBF,0x82,0xDF,0xF8,0xBA,0x13,0x1D,0x50,0x86,0x82,
+0xFD,0x8E,0x30,0x8F,0x29,0x46,0xB0,0x1E,0x3D,0x35,0xDA,0x38,0x62,0x16,0x18,0x4A,
+0xAD,0xE6,0xB6,0x51,0x6C,0xDE,0xAF,0x62,0xEB,0x01,0xD0,0x1E,0x24,0xFE,0x7A,0x8F,
+0x12,0x1A,0x12,0x68,0xB8,0xFB,0x66,0x99,0x14,0x14,0x45,0x5C,0xAE,0xE7,0xAE,0x69,
+0x17,0x81,0x2B,0x5A,0x37,0xC9,0x5E,0x2A,0xF4,0xC6,0xE2,0xA1,0x5C,0x54,0x9B,0xA6,
+0x54,0x00,0xCF,0xF0,0xF1,0xC1,0xC7,0x98,0x30,0x1A,0x3B,0x36,0x16,0xDB,0xA3,0x6E,
+0xEA,0xFD,0xAD,0xB2,0xC2,0xDA,0xEF,0x02,0x47,0x13,0x8A,0xC0,0xF1,0xB3,0x31,0xAD,
+0x4F,0x1C,0xE1,0x4F,0x9C,0xAF,0x0F,0x0C,0x9D,0xF7,0x78,0x0D,0xD8,0xF4,0x35,0x56,
+0x80,0xDA,0xB7,0x6D,0x17,0x8F,0x9D,0x1E,0x81,0x64,0xE1,0xFE,0xC5,0x45,0xBA,0xAD,
+0x6B,0xB9,0x0A,0x7A,0x4E,0x4F,0x4B,0x84,0xEE,0x4B,0xF1,0x7D,0xDD,0x11,
+};
+
+
+/* subject:/C=US/ST=New Jersey/L=Jersey City/O=The USERTRUST Network/CN=USERTrust ECC Certification Authority */
+/* issuer :/C=US/ST=New Jersey/L=Jersey City/O=The USERTRUST Network/CN=USERTrust ECC Certification Authority */
+
+
+const unsigned char USERTrust_ECC_Certification_Authority_certificate[659]={
+0x30,0x82,0x02,0x8F,0x30,0x82,0x02,0x15,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x5C,
+0x8B,0x99,0xC5,0x5A,0x94,0xC5,0xD2,0x71,0x56,0xDE,0xCD,0x89,0x80,0xCC,0x26,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x81,0x88,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x13,0x30,0x11,0x06,
+0x03,0x55,0x04,0x08,0x13,0x0A,0x4E,0x65,0x77,0x20,0x4A,0x65,0x72,0x73,0x65,0x79,
+0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x07,0x13,0x0B,0x4A,0x65,0x72,0x73,0x65,
+0x79,0x20,0x43,0x69,0x74,0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x15,0x54,0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55,0x53,0x54,0x20,0x4E,
+0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x2E,0x30,0x2C,0x06,0x03,0x55,0x04,0x03,0x13,
+0x25,0x55,0x53,0x45,0x52,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x43,0x43,0x20,0x43,
+0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x31,0x30,0x30,0x32,0x30,0x31,
+0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x38,0x32,
+0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0x88,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,
+0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x08,0x13,
+0x0A,0x4E,0x65,0x77,0x20,0x4A,0x65,0x72,0x73,0x65,0x79,0x31,0x14,0x30,0x12,0x06,
+0x03,0x55,0x04,0x07,0x13,0x0B,0x4A,0x65,0x72,0x73,0x65,0x79,0x20,0x43,0x69,0x74,
+0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,0x04,0x0A,0x13,0x15,0x54,0x68,0x65,0x20,
+0x55,0x53,0x45,0x52,0x54,0x52,0x55,0x53,0x54,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,
+0x6B,0x31,0x2E,0x30,0x2C,0x06,0x03,0x55,0x04,0x03,0x13,0x25,0x55,0x53,0x45,0x52,
+0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x43,0x43,0x20,0x43,0x65,0x72,0x74,0x69,0x66,
+0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,
+0x79,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,
+0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0x1A,0xAC,0x54,0x5A,0xA9,0xF9,0x68,
+0x23,0xE7,0x7A,0xD5,0x24,0x6F,0x53,0xC6,0x5A,0xD8,0x4B,0xAB,0xC6,0xD5,0xB6,0xD1,
+0xE6,0x73,0x71,0xAE,0xDD,0x9C,0xD6,0x0C,0x61,0xFD,0xDB,0xA0,0x89,0x03,0xB8,0x05,
+0x14,0xEC,0x57,0xCE,0xEE,0x5D,0x3F,0xE2,0x21,0xB3,0xCE,0xF7,0xD4,0x8A,0x79,0xE0,
+0xA3,0x83,0x7E,0x2D,0x97,0xD0,0x61,0xC4,0xF1,0x99,0xDC,0x25,0x91,0x63,0xAB,0x7F,
+0x30,0xA3,0xB4,0x70,0xE2,0xC7,0xA1,0x33,0x9C,0xF3,0xBF,0x2E,0x5C,0x53,0xB1,0x5F,
+0xB3,0x7D,0x32,0x7F,0x8A,0x34,0xE3,0x79,0x79,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x3A,0xE1,0x09,0x86,0xD4,0xCF,0x19,0xC2,
+0x96,0x76,0x74,0x49,0x76,0xDC,0xE0,0x35,0xC6,0x63,0x63,0x9A,0x30,0x0E,0x06,0x03,
+0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,
+0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0A,0x06,
+0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x68,0x00,0x30,0x65,0x02,0x30,
+0x36,0x67,0xA1,0x16,0x08,0xDC,0xE4,0x97,0x00,0x41,0x1D,0x4E,0xBE,0xE1,0x63,0x01,
+0xCF,0x3B,0xAA,0x42,0x11,0x64,0xA0,0x9D,0x94,0x39,0x02,0x11,0x79,0x5C,0x7B,0x1D,
+0xFA,0x64,0xB9,0xEE,0x16,0x42,0xB3,0xBF,0x8A,0xC2,0x09,0xC4,0xEC,0xE4,0xB1,0x4D,
+0x02,0x31,0x00,0xE9,0x2A,0x61,0x47,0x8C,0x52,0x4A,0x4B,0x4E,0x18,0x70,0xF6,0xD6,
+0x44,0xD6,0x6E,0xF5,0x83,0xBA,0x6D,0x58,0xBD,0x24,0xD9,0x56,0x48,0xEA,0xEF,0xC4,
+0xA2,0x46,0x81,0x88,0x6A,0x3A,0x46,0xD1,0xA9,0x9B,0x4D,0xC9,0x61,0xDA,0xD1,0x5D,
+0x57,0x6A,0x18,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA */
+/* issuer :/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA */
+
+
+const unsigned char GeoTrust_Global_CA_certificate[856]={
+0x30,0x82,0x03,0x54,0x30,0x82,0x02,0x3C,0xA0,0x03,0x02,0x01,0x02,0x02,0x03,0x02,
+0x34,0x56,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,
+0x00,0x30,0x42,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,
+0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,
+0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1B,0x30,0x19,0x06,0x03,0x55,0x04,
+0x03,0x13,0x12,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x47,0x6C,0x6F,0x62,
+0x61,0x6C,0x20,0x43,0x41,0x30,0x1E,0x17,0x0D,0x30,0x32,0x30,0x35,0x32,0x31,0x30,
+0x34,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,0x32,0x30,0x35,0x32,0x31,0x30,0x34,
+0x30,0x30,0x30,0x30,0x5A,0x30,0x42,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,
+0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,
+0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1B,0x30,0x19,
+0x06,0x03,0x55,0x04,0x03,0x13,0x12,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,
+0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x43,0x41,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,
+0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xDA,0xCC,0x18,0x63,0x30,0xFD,
+0xF4,0x17,0x23,0x1A,0x56,0x7E,0x5B,0xDF,0x3C,0x6C,0x38,0xE4,0x71,0xB7,0x78,0x91,
+0xD4,0xBC,0xA1,0xD8,0x4C,0xF8,0xA8,0x43,0xB6,0x03,0xE9,0x4D,0x21,0x07,0x08,0x88,
+0xDA,0x58,0x2F,0x66,0x39,0x29,0xBD,0x05,0x78,0x8B,0x9D,0x38,0xE8,0x05,0xB7,0x6A,
+0x7E,0x71,0xA4,0xE6,0xC4,0x60,0xA6,0xB0,0xEF,0x80,0xE4,0x89,0x28,0x0F,0x9E,0x25,
+0xD6,0xED,0x83,0xF3,0xAD,0xA6,0x91,0xC7,0x98,0xC9,0x42,0x18,0x35,0x14,0x9D,0xAD,
+0x98,0x46,0x92,0x2E,0x4F,0xCA,0xF1,0x87,0x43,0xC1,0x16,0x95,0x57,0x2D,0x50,0xEF,
+0x89,0x2D,0x80,0x7A,0x57,0xAD,0xF2,0xEE,0x5F,0x6B,0xD2,0x00,0x8D,0xB9,0x14,0xF8,
+0x14,0x15,0x35,0xD9,0xC0,0x46,0xA3,0x7B,0x72,0xC8,0x91,0xBF,0xC9,0x55,0x2B,0xCD,
+0xD0,0x97,0x3E,0x9C,0x26,0x64,0xCC,0xDF,0xCE,0x83,0x19,0x71,0xCA,0x4E,0xE6,0xD4,
+0xD5,0x7B,0xA9,0x19,0xCD,0x55,0xDE,0xC8,0xEC,0xD2,0x5E,0x38,0x53,0xE5,0x5C,0x4F,
+0x8C,0x2D,0xFE,0x50,0x23,0x36,0xFC,0x66,0xE6,0xCB,0x8E,0xA4,0x39,0x19,0x00,0xB7,
+0x95,0x02,0x39,0x91,0x0B,0x0E,0xFE,0x38,0x2E,0xD1,0x1D,0x05,0x9A,0xF6,0x4D,0x3E,
+0x6F,0x0F,0x07,0x1D,0xAF,0x2C,0x1E,0x8F,0x60,0x39,0xE2,0xFA,0x36,0x53,0x13,0x39,
+0xD4,0x5E,0x26,0x2B,0xDB,0x3D,0xA8,0x14,0xBD,0x32,0xEB,0x18,0x03,0x28,0x52,0x04,
+0x71,0xE5,0xAB,0x33,0x3D,0xE1,0x38,0xBB,0x07,0x36,0x84,0x62,0x9C,0x79,0xEA,0x16,
+0x30,0xF4,0x5F,0xC0,0x2B,0xE8,0x71,0x6B,0xE4,0xF9,0x02,0x03,0x01,0x00,0x01,0xA3,
+0x53,0x30,0x51,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,
+0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xC0,
+0x7A,0x98,0x68,0x8D,0x89,0xFB,0xAB,0x05,0x64,0x0C,0x11,0x7D,0xAA,0x7D,0x65,0xB8,
+0xCA,0xCC,0x4E,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,
+0xC0,0x7A,0x98,0x68,0x8D,0x89,0xFB,0xAB,0x05,0x64,0x0C,0x11,0x7D,0xAA,0x7D,0x65,
+0xB8,0xCA,0xCC,0x4E,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,
+0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x35,0xE3,0x29,0x6A,0xE5,0x2F,0x5D,0x54,
+0x8E,0x29,0x50,0x94,0x9F,0x99,0x1A,0x14,0xE4,0x8F,0x78,0x2A,0x62,0x94,0xA2,0x27,
+0x67,0x9E,0xD0,0xCF,0x1A,0x5E,0x47,0xE9,0xC1,0xB2,0xA4,0xCF,0xDD,0x41,0x1A,0x05,
+0x4E,0x9B,0x4B,0xEE,0x4A,0x6F,0x55,0x52,0xB3,0x24,0xA1,0x37,0x0A,0xEB,0x64,0x76,
+0x2A,0x2E,0x2C,0xF3,0xFD,0x3B,0x75,0x90,0xBF,0xFA,0x71,0xD8,0xC7,0x3D,0x37,0xD2,
+0xB5,0x05,0x95,0x62,0xB9,0xA6,0xDE,0x89,0x3D,0x36,0x7B,0x38,0x77,0x48,0x97,0xAC,
+0xA6,0x20,0x8F,0x2E,0xA6,0xC9,0x0C,0xC2,0xB2,0x99,0x45,0x00,0xC7,0xCE,0x11,0x51,
+0x22,0x22,0xE0,0xA5,0xEA,0xB6,0x15,0x48,0x09,0x64,0xEA,0x5E,0x4F,0x74,0xF7,0x05,
+0x3E,0xC7,0x8A,0x52,0x0C,0xDB,0x15,0xB4,0xBD,0x6D,0x9B,0xE5,0xC6,0xB1,0x54,0x68,
+0xA9,0xE3,0x69,0x90,0xB6,0x9A,0xA5,0x0F,0xB8,0xB9,0x3F,0x20,0x7D,0xAE,0x4A,0xB5,
+0xB8,0x9C,0xE4,0x1D,0xB6,0xAB,0xE6,0x94,0xA5,0xC1,0xC7,0x83,0xAD,0xDB,0xF5,0x27,
+0x87,0x0E,0x04,0x6C,0xD5,0xFF,0xDD,0xA0,0x5D,0xED,0x87,0x52,0xB7,0x2B,0x15,0x02,
+0xAE,0x39,0xA6,0x6A,0x74,0xE9,0xDA,0xC4,0xE7,0xBC,0x4D,0x34,0x1E,0xA9,0x5C,0x4D,
+0x33,0x5F,0x92,0x09,0x2F,0x88,0x66,0x5D,0x77,0x97,0xC7,0x1D,0x76,0x13,0xA9,0xD5,
+0xE5,0xF1,0x16,0x09,0x11,0x35,0xD5,0xAC,0xDB,0x24,0x71,0x70,0x2C,0x98,0x56,0x0B,
+0xD9,0x17,0xB4,0xD1,0xE3,0x51,0x2B,0x5E,0x75,0xE8,0xD5,0xD0,0xDC,0x4F,0x34,0xED,
+0xC2,0x05,0x66,0x80,0xA1,0xCB,0xE6,0x33,
+};
+
+
+/* subject:/C=US/ST=Arizona/L=Scottsdale/O=Starfield Technologies, Inc./CN=Starfield Root Certificate Authority - G2 */
+/* issuer :/C=US/ST=Arizona/L=Scottsdale/O=Starfield Technologies, Inc./CN=Starfield Root Certificate Authority - G2 */
+
+
+const unsigned char Starfield_Root_Certificate_Authority___G2_certificate[993]={
+0x30,0x82,0x03,0xDD,0x30,0x82,0x02,0xC5,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x00,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,
+0x81,0x8F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,
+0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x08,0x13,0x07,0x41,0x72,0x69,0x7A,0x6F,0x6E,
+0x61,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x07,0x13,0x0A,0x53,0x63,0x6F,0x74,
+0x74,0x73,0x64,0x61,0x6C,0x65,0x31,0x25,0x30,0x23,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x1C,0x53,0x74,0x61,0x72,0x66,0x69,0x65,0x6C,0x64,0x20,0x54,0x65,0x63,0x68,0x6E,
+0x6F,0x6C,0x6F,0x67,0x69,0x65,0x73,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x32,0x30,
+0x30,0x06,0x03,0x55,0x04,0x03,0x13,0x29,0x53,0x74,0x61,0x72,0x66,0x69,0x65,0x6C,
+0x64,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,
+0x74,0x65,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x47,
+0x32,0x30,0x1E,0x17,0x0D,0x30,0x39,0x30,0x39,0x30,0x31,0x30,0x30,0x30,0x30,0x30,
+0x30,0x5A,0x17,0x0D,0x33,0x37,0x31,0x32,0x33,0x31,0x32,0x33,0x35,0x39,0x35,0x39,
+0x5A,0x30,0x81,0x8F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,
+0x53,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x08,0x13,0x07,0x41,0x72,0x69,0x7A,
+0x6F,0x6E,0x61,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x07,0x13,0x0A,0x53,0x63,
+0x6F,0x74,0x74,0x73,0x64,0x61,0x6C,0x65,0x31,0x25,0x30,0x23,0x06,0x03,0x55,0x04,
+0x0A,0x13,0x1C,0x53,0x74,0x61,0x72,0x66,0x69,0x65,0x6C,0x64,0x20,0x54,0x65,0x63,
+0x68,0x6E,0x6F,0x6C,0x6F,0x67,0x69,0x65,0x73,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,
+0x32,0x30,0x30,0x06,0x03,0x55,0x04,0x03,0x13,0x29,0x53,0x74,0x61,0x72,0x66,0x69,
+0x65,0x6C,0x64,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,
+0x63,0x61,0x74,0x65,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,
+0x20,0x47,0x32,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,
+0x82,0x01,0x01,0x00,0xBD,0xED,0xC1,0x03,0xFC,0xF6,0x8F,0xFC,0x02,0xB1,0x6F,0x5B,
+0x9F,0x48,0xD9,0x9D,0x79,0xE2,0xA2,0xB7,0x03,0x61,0x56,0x18,0xC3,0x47,0xB6,0xD7,
+0xCA,0x3D,0x35,0x2E,0x89,0x43,0xF7,0xA1,0x69,0x9B,0xDE,0x8A,0x1A,0xFD,0x13,0x20,
+0x9C,0xB4,0x49,0x77,0x32,0x29,0x56,0xFD,0xB9,0xEC,0x8C,0xDD,0x22,0xFA,0x72,0xDC,
+0x27,0x61,0x97,0xEE,0xF6,0x5A,0x84,0xEC,0x6E,0x19,0xB9,0x89,0x2C,0xDC,0x84,0x5B,
+0xD5,0x74,0xFB,0x6B,0x5F,0xC5,0x89,0xA5,0x10,0x52,0x89,0x46,0x55,0xF4,0xB8,0x75,
+0x1C,0xE6,0x7F,0xE4,0x54,0xAE,0x4B,0xF8,0x55,0x72,0x57,0x02,0x19,0xF8,0x17,0x71,
+0x59,0xEB,0x1E,0x28,0x07,0x74,0xC5,0x9D,0x48,0xBE,0x6C,0xB4,0xF4,0xA4,0xB0,0xF3,
+0x64,0x37,0x79,0x92,0xC0,0xEC,0x46,0x5E,0x7F,0xE1,0x6D,0x53,0x4C,0x62,0xAF,0xCD,
+0x1F,0x0B,0x63,0xBB,0x3A,0x9D,0xFB,0xFC,0x79,0x00,0x98,0x61,0x74,0xCF,0x26,0x82,
+0x40,0x63,0xF3,0xB2,0x72,0x6A,0x19,0x0D,0x99,0xCA,0xD4,0x0E,0x75,0xCC,0x37,0xFB,
+0x8B,0x89,0xC1,0x59,0xF1,0x62,0x7F,0x5F,0xB3,0x5F,0x65,0x30,0xF8,0xA7,0xB7,0x4D,
+0x76,0x5A,0x1E,0x76,0x5E,0x34,0xC0,0xE8,0x96,0x56,0x99,0x8A,0xB3,0xF0,0x7F,0xA4,
+0xCD,0xBD,0xDC,0x32,0x31,0x7C,0x91,0xCF,0xE0,0x5F,0x11,0xF8,0x6B,0xAA,0x49,0x5C,
+0xD1,0x99,0x94,0xD1,0xA2,0xE3,0x63,0x5B,0x09,0x76,0xB5,0x56,0x62,0xE1,0x4B,0x74,
+0x1D,0x96,0xD4,0x26,0xD4,0x08,0x04,0x59,0xD0,0x98,0x0E,0x0E,0xE6,0xDE,0xFC,0xC3,
+0xEC,0x1F,0x90,0xF1,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,
+0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,
+0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,
+0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x7C,0x0C,0x32,0x1F,0xA7,0xD9,0x30,
+0x7F,0xC4,0x7D,0x68,0xA3,0x62,0xA8,0xA1,0xCE,0xAB,0x07,0x5B,0x27,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,0x01,
+0x00,0x11,0x59,0xFA,0x25,0x4F,0x03,0x6F,0x94,0x99,0x3B,0x9A,0x1F,0x82,0x85,0x39,
+0xD4,0x76,0x05,0x94,0x5E,0xE1,0x28,0x93,0x6D,0x62,0x5D,0x09,0xC2,0xA0,0xA8,0xD4,
+0xB0,0x75,0x38,0xF1,0x34,0x6A,0x9D,0xE4,0x9F,0x8A,0x86,0x26,0x51,0xE6,0x2C,0xD1,
+0xC6,0x2D,0x6E,0x95,0x20,0x4A,0x92,0x01,0xEC,0xB8,0x8A,0x67,0x7B,0x31,0xE2,0x67,
+0x2E,0x8C,0x95,0x03,0x26,0x2E,0x43,0x9D,0x4A,0x31,0xF6,0x0E,0xB5,0x0C,0xBB,0xB7,
+0xE2,0x37,0x7F,0x22,0xBA,0x00,0xA3,0x0E,0x7B,0x52,0xFB,0x6B,0xBB,0x3B,0xC4,0xD3,
+0x79,0x51,0x4E,0xCD,0x90,0xF4,0x67,0x07,0x19,0xC8,0x3C,0x46,0x7A,0x0D,0x01,0x7D,
+0xC5,0x58,0xE7,0x6D,0xE6,0x85,0x30,0x17,0x9A,0x24,0xC4,0x10,0xE0,0x04,0xF7,0xE0,
+0xF2,0x7F,0xD4,0xAA,0x0A,0xFF,0x42,0x1D,0x37,0xED,0x94,0xE5,0x64,0x59,0x12,0x20,
+0x77,0x38,0xD3,0x32,0x3E,0x38,0x81,0x75,0x96,0x73,0xFA,0x68,0x8F,0xB1,0xCB,0xCE,
+0x1F,0xC5,0xEC,0xFA,0x9C,0x7E,0xCF,0x7E,0xB1,0xF1,0x07,0x2D,0xB6,0xFC,0xBF,0xCA,
+0xA4,0xBF,0xD0,0x97,0x05,0x4A,0xBC,0xEA,0x18,0x28,0x02,0x90,0xBD,0x54,0x78,0x09,
+0x21,0x71,0xD3,0xD1,0x7D,0x1D,0xD9,0x16,0xB0,0xA9,0x61,0x3D,0xD0,0x0A,0x00,0x22,
+0xFC,0xC7,0x7B,0xCB,0x09,0x64,0x45,0x0B,0x3B,0x40,0x81,0xF7,0x7D,0x7C,0x32,0xF5,
+0x98,0xCA,0x58,0x8E,0x7D,0x2A,0xEE,0x90,0x59,0x73,0x64,0xF9,0x36,0x74,0x5E,0x25,
+0xA1,0xF5,0x66,0x05,0x2E,0x7F,0x39,0x15,0xA9,0x2A,0xFB,0x50,0x8B,0x8E,0x85,0x69,
+0xF4,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root G3 */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root G3 */
+
+
+const unsigned char DigiCert_Global_Root_G3_certificate[579]={
+0x30,0x82,0x02,0x3F,0x30,0x82,0x01,0xC5,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x05,
+0x55,0x56,0xBC,0xF2,0x5E,0xA4,0x35,0x35,0xC3,0xA4,0x0F,0xD5,0xAB,0x45,0x72,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x61,0x31,0x0B,0x30,
+0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x49,0x6E,
+0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,0x77,0x77,0x2E,
+0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x20,0x30,0x1E,
+0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,
+0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,0x6F,0x6F,0x74,0x20,0x47,0x33,0x30,0x1E,
+0x17,0x0D,0x31,0x33,0x30,0x38,0x30,0x31,0x31,0x32,0x30,0x30,0x30,0x30,0x5A,0x17,
+0x0D,0x33,0x38,0x30,0x31,0x31,0x35,0x31,0x32,0x30,0x30,0x30,0x30,0x5A,0x30,0x61,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,0x6F,0x6F,0x74,0x20,0x47,
+0x33,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,
+0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0xDD,0xA7,0xD9,0xBB,0x8A,0xB8,0x0B,
+0xFB,0x0B,0x7F,0x21,0xD2,0xF0,0xBE,0xBE,0x73,0xF3,0x33,0x5D,0x1A,0xBC,0x34,0xEA,
+0xDE,0xC6,0x9B,0xBC,0xD0,0x95,0xF6,0xF0,0xCC,0xD0,0x0B,0xBA,0x61,0x5B,0x51,0x46,
+0x7E,0x9E,0x2D,0x9F,0xEE,0x8E,0x63,0x0C,0x17,0xEC,0x07,0x70,0xF5,0xCF,0x84,0x2E,
+0x40,0x83,0x9C,0xE8,0x3F,0x41,0x6D,0x3B,0xAD,0xD3,0xA4,0x14,0x59,0x36,0x78,0x9D,
+0x03,0x43,0xEE,0x10,0x13,0x6C,0x72,0xDE,0xAE,0x88,0xA7,0xA1,0x6B,0xB5,0x43,0xCE,
+0x67,0xDC,0x23,0xFF,0x03,0x1C,0xA3,0xE2,0x3E,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,
+0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,
+0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x1D,
+0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xB3,0xDB,0x48,0xA4,0xF9,0xA1,0xC5,
+0xD8,0xAE,0x36,0x41,0xCC,0x11,0x63,0x69,0x62,0x29,0xBC,0x4B,0xC6,0x30,0x0A,0x06,
+0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x68,0x00,0x30,0x65,0x02,0x31,
+0x00,0xAD,0xBC,0xF2,0x6C,0x3F,0x12,0x4A,0xD1,0x2D,0x39,0xC3,0x0A,0x09,0x97,0x73,
+0xF4,0x88,0x36,0x8C,0x88,0x27,0xBB,0xE6,0x88,0x8D,0x50,0x85,0xA7,0x63,0xF9,0x9E,
+0x32,0xDE,0x66,0x93,0x0F,0xF1,0xCC,0xB1,0x09,0x8F,0xDD,0x6C,0xAB,0xFA,0x6B,0x7F,
+0xA0,0x02,0x30,0x39,0x66,0x5B,0xC2,0x64,0x8D,0xB8,0x9E,0x50,0xDC,0xA8,0xD5,0x49,
+0xA2,0xED,0xC7,0xDC,0xD1,0x49,0x7F,0x17,0x01,0xB8,0xC8,0x86,0x8F,0x4E,0x8C,0x88,
+0x2B,0xA8,0x9A,0xA9,0x8A,0xC5,0xD1,0x00,0xBD,0xF8,0x54,0xE2,0x9A,0xE5,0x5B,0x7C,
+0xB3,0x27,0x17,
+};
+
+
+/* subject:/C=US/O=thawte, Inc./OU=(c) 2007 thawte, Inc. - For authorized use only/CN=thawte Primary Root CA - G2 */
+/* issuer :/C=US/O=thawte, Inc./OU=(c) 2007 thawte, Inc. - For authorized use only/CN=thawte Primary Root CA - G2 */
+
+
+const unsigned char thawte_Primary_Root_CA___G2_certificate[652]={
+0x30,0x82,0x02,0x88,0x30,0x82,0x02,0x0D,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x35,
+0xFC,0x26,0x5C,0xD9,0x84,0x4F,0xC9,0x3D,0x26,0x3D,0x57,0x9B,0xAE,0xD7,0x56,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x81,0x84,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0C,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,
+0x63,0x2E,0x31,0x38,0x30,0x36,0x06,0x03,0x55,0x04,0x0B,0x13,0x2F,0x28,0x63,0x29,
+0x20,0x32,0x30,0x30,0x37,0x20,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,
+0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x24,0x30,0x22,
+0x06,0x03,0x55,0x04,0x03,0x13,0x1B,0x74,0x68,0x61,0x77,0x74,0x65,0x20,0x50,0x72,
+0x69,0x6D,0x61,0x72,0x79,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,
+0x47,0x32,0x30,0x1E,0x17,0x0D,0x30,0x37,0x31,0x31,0x30,0x35,0x30,0x30,0x30,0x30,
+0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x38,0x32,0x33,0x35,0x39,0x35,
+0x39,0x5A,0x30,0x81,0x84,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,
+0x55,0x53,0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x74,0x68,0x61,
+0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x38,0x30,0x36,0x06,0x03,0x55,
+0x04,0x0B,0x13,0x2F,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x37,0x20,0x74,0x68,0x61,
+0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,
+0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,
+0x6E,0x6C,0x79,0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x03,0x13,0x1B,0x74,0x68,
+0x61,0x77,0x74,0x65,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x52,0x6F,0x6F,
+0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x47,0x32,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,
+0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,
+0x04,0xA2,0xD5,0x9C,0x82,0x7B,0x95,0x9D,0xF1,0x52,0x78,0x87,0xFE,0x8A,0x16,0xBF,
+0x05,0xE6,0xDF,0xA3,0x02,0x4F,0x0D,0x07,0xC6,0x00,0x51,0xBA,0x0C,0x02,0x52,0x2D,
+0x22,0xA4,0x42,0x39,0xC4,0xFE,0x8F,0xEA,0xC9,0xC1,0xBE,0xD4,0x4D,0xFF,0x9F,0x7A,
+0x9E,0xE2,0xB1,0x7C,0x9A,0xAD,0xA7,0x86,0x09,0x73,0x87,0xD1,0xE7,0x9A,0xE3,0x7A,
+0xA5,0xAA,0x6E,0xFB,0xBA,0xB3,0x70,0xC0,0x67,0x88,0xA2,0x35,0xD4,0xA3,0x9A,0xB1,
+0xFD,0xAD,0xC2,0xEF,0x31,0xFA,0xA8,0xB9,0xF3,0xFB,0x08,0xC6,0x91,0xD1,0xFB,0x29,
+0x95,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,
+0x14,0x9A,0xD8,0x00,0x30,0x00,0xE7,0x6B,0x7F,0x85,0x18,0xEE,0x8B,0xB6,0xCE,0x8A,
+0x0C,0xF8,0x11,0xE1,0xBB,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,
+0x03,0x03,0x69,0x00,0x30,0x66,0x02,0x31,0x00,0xDD,0xF8,0xE0,0x57,0x47,0x5B,0xA7,
+0xE6,0x0A,0xC3,0xBD,0xF5,0x80,0x8A,0x97,0x35,0x0D,0x1B,0x89,0x3C,0x54,0x86,0x77,
+0x28,0xCA,0xA1,0xF4,0x79,0xDE,0xB5,0xE6,0x38,0xB0,0xF0,0x65,0x70,0x8C,0x7F,0x02,
+0x54,0xC2,0xBF,0xFF,0xD8,0xA1,0x3E,0xD9,0xCF,0x02,0x31,0x00,0xC4,0x8D,0x94,0xFC,
+0xDC,0x53,0xD2,0xDC,0x9D,0x78,0x16,0x1F,0x15,0x33,0x23,0x53,0x52,0xE3,0x5A,0x31,
+0x5D,0x9D,0xCA,0xAE,0xBD,0x13,0x29,0x44,0x0D,0x27,0x5B,0xA8,0xE7,0x68,0x9C,0x12,
+0xF7,0x58,0x3F,0x2E,0x72,0x02,0x57,0xA3,0x8F,0xA1,0x14,0x2E,
+};
+
+
+/* subject:/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 2008 VeriSign, Inc. - For authorized use only/CN=VeriSign Universal Root Certification Authority */
+/* issuer :/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 2008 VeriSign, Inc. - For authorized use only/CN=VeriSign Universal Root Certification Authority */
+
+
+const unsigned char VeriSign_Universal_Root_Certification_Authority_certificate[1213]={
+0x30,0x82,0x04,0xB9,0x30,0x82,0x03,0xA1,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x40,
+0x1A,0xC4,0x64,0x21,0xB3,0x13,0x21,0x03,0x0E,0xBB,0xE4,0x12,0x1A,0xC5,0x1D,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x81,
+0xBD,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,
+0x30,0x15,0x06,0x03,0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,
+0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x16,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,
+0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,
+0x0B,0x13,0x31,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x38,0x20,0x56,0x65,0x72,0x69,
+0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,
+0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,
+0x6F,0x6E,0x6C,0x79,0x31,0x38,0x30,0x36,0x06,0x03,0x55,0x04,0x03,0x13,0x2F,0x56,
+0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,0x61,
+0x6C,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,
+0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,
+0x17,0x0D,0x30,0x38,0x30,0x34,0x30,0x32,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,
+0x0D,0x33,0x37,0x31,0x32,0x30,0x31,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,
+0xBD,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,
+0x30,0x15,0x06,0x03,0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,
+0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x16,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,
+0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,
+0x0B,0x13,0x31,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x38,0x20,0x56,0x65,0x72,0x69,
+0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,
+0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,
+0x6F,0x6E,0x6C,0x79,0x31,0x38,0x30,0x36,0x06,0x03,0x55,0x04,0x03,0x13,0x2F,0x56,
+0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,0x61,
+0x6C,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,
+0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x82,
+0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,
+0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xC7,
+0x61,0x37,0x5E,0xB1,0x01,0x34,0xDB,0x62,0xD7,0x15,0x9B,0xFF,0x58,0x5A,0x8C,0x23,
+0x23,0xD6,0x60,0x8E,0x91,0xD7,0x90,0x98,0x83,0x7A,0xE6,0x58,0x19,0x38,0x8C,0xC5,
+0xF6,0xE5,0x64,0x85,0xB4,0xA2,0x71,0xFB,0xED,0xBD,0xB9,0xDA,0xCD,0x4D,0x00,0xB4,
+0xC8,0x2D,0x73,0xA5,0xC7,0x69,0x71,0x95,0x1F,0x39,0x3C,0xB2,0x44,0x07,0x9C,0xE8,
+0x0E,0xFA,0x4D,0x4A,0xC4,0x21,0xDF,0x29,0x61,0x8F,0x32,0x22,0x61,0x82,0xC5,0x87,
+0x1F,0x6E,0x8C,0x7C,0x5F,0x16,0x20,0x51,0x44,0xD1,0x70,0x4F,0x57,0xEA,0xE3,0x1C,
+0xE3,0xCC,0x79,0xEE,0x58,0xD8,0x0E,0xC2,0xB3,0x45,0x93,0xC0,0x2C,0xE7,0x9A,0x17,
+0x2B,0x7B,0x00,0x37,0x7A,0x41,0x33,0x78,0xE1,0x33,0xE2,0xF3,0x10,0x1A,0x7F,0x87,
+0x2C,0xBE,0xF6,0xF5,0xF7,0x42,0xE2,0xE5,0xBF,0x87,0x62,0x89,0x5F,0x00,0x4B,0xDF,
+0xC5,0xDD,0xE4,0x75,0x44,0x32,0x41,0x3A,0x1E,0x71,0x6E,0x69,0xCB,0x0B,0x75,0x46,
+0x08,0xD1,0xCA,0xD2,0x2B,0x95,0xD0,0xCF,0xFB,0xB9,0x40,0x6B,0x64,0x8C,0x57,0x4D,
+0xFC,0x13,0x11,0x79,0x84,0xED,0x5E,0x54,0xF6,0x34,0x9F,0x08,0x01,0xF3,0x10,0x25,
+0x06,0x17,0x4A,0xDA,0xF1,0x1D,0x7A,0x66,0x6B,0x98,0x60,0x66,0xA4,0xD9,0xEF,0xD2,
+0x2E,0x82,0xF1,0xF0,0xEF,0x09,0xEA,0x44,0xC9,0x15,0x6A,0xE2,0x03,0x6E,0x33,0xD3,
+0xAC,0x9F,0x55,0x00,0xC7,0xF6,0x08,0x6A,0x94,0xB9,0x5F,0xDC,0xE0,0x33,0xF1,0x84,
+0x60,0xF9,0x5B,0x27,0x11,0xB4,0xFC,0x16,0xF2,0xBB,0x56,0x6A,0x80,0x25,0x8D,0x02,
+0x03,0x01,0x00,0x01,0xA3,0x81,0xB2,0x30,0x81,0xAF,0x30,0x0F,0x06,0x03,0x55,0x1D,
+0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,
+0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x6D,0x06,0x08,0x2B,
+0x06,0x01,0x05,0x05,0x07,0x01,0x0C,0x04,0x61,0x30,0x5F,0xA1,0x5D,0xA0,0x5B,0x30,
+0x59,0x30,0x57,0x30,0x55,0x16,0x09,0x69,0x6D,0x61,0x67,0x65,0x2F,0x67,0x69,0x66,
+0x30,0x21,0x30,0x1F,0x30,0x07,0x06,0x05,0x2B,0x0E,0x03,0x02,0x1A,0x04,0x14,0x8F,
+0xE5,0xD3,0x1A,0x86,0xAC,0x8D,0x8E,0x6B,0xC3,0xCF,0x80,0x6A,0xD4,0x48,0x18,0x2C,
+0x7B,0x19,0x2E,0x30,0x25,0x16,0x23,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x6C,0x6F,
+0x67,0x6F,0x2E,0x76,0x65,0x72,0x69,0x73,0x69,0x67,0x6E,0x2E,0x63,0x6F,0x6D,0x2F,
+0x76,0x73,0x6C,0x6F,0x67,0x6F,0x2E,0x67,0x69,0x66,0x30,0x1D,0x06,0x03,0x55,0x1D,
+0x0E,0x04,0x16,0x04,0x14,0xB6,0x77,0xFA,0x69,0x48,0x47,0x9F,0x53,0x12,0xD5,0xC2,
+0xEA,0x07,0x32,0x76,0x07,0xD1,0x97,0x07,0x19,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,
+0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x4A,0xF8,0xF8,
+0xB0,0x03,0xE6,0x2C,0x67,0x7B,0xE4,0x94,0x77,0x63,0xCC,0x6E,0x4C,0xF9,0x7D,0x0E,
+0x0D,0xDC,0xC8,0xB9,0x35,0xB9,0x70,0x4F,0x63,0xFA,0x24,0xFA,0x6C,0x83,0x8C,0x47,
+0x9D,0x3B,0x63,0xF3,0x9A,0xF9,0x76,0x32,0x95,0x91,0xB1,0x77,0xBC,0xAC,0x9A,0xBE,
+0xB1,0xE4,0x31,0x21,0xC6,0x81,0x95,0x56,0x5A,0x0E,0xB1,0xC2,0xD4,0xB1,0xA6,0x59,
+0xAC,0xF1,0x63,0xCB,0xB8,0x4C,0x1D,0x59,0x90,0x4A,0xEF,0x90,0x16,0x28,0x1F,0x5A,
+0xAE,0x10,0xFB,0x81,0x50,0x38,0x0C,0x6C,0xCC,0xF1,0x3D,0xC3,0xF5,0x63,0xE3,0xB3,
+0xE3,0x21,0xC9,0x24,0x39,0xE9,0xFD,0x15,0x66,0x46,0xF4,0x1B,0x11,0xD0,0x4D,0x73,
+0xA3,0x7D,0x46,0xF9,0x3D,0xED,0xA8,0x5F,0x62,0xD4,0xF1,0x3F,0xF8,0xE0,0x74,0x57,
+0x2B,0x18,0x9D,0x81,0xB4,0xC4,0x28,0xDA,0x94,0x97,0xA5,0x70,0xEB,0xAC,0x1D,0xBE,
+0x07,0x11,0xF0,0xD5,0xDB,0xDD,0xE5,0x8C,0xF0,0xD5,0x32,0xB0,0x83,0xE6,0x57,0xE2,
+0x8F,0xBF,0xBE,0xA1,0xAA,0xBF,0x3D,0x1D,0xB5,0xD4,0x38,0xEA,0xD7,0xB0,0x5C,0x3A,
+0x4F,0x6A,0x3F,0x8F,0xC0,0x66,0x6C,0x63,0xAA,0xE9,0xD9,0xA4,0x16,0xF4,0x81,0xD1,
+0x95,0x14,0x0E,0x7D,0xCD,0x95,0x34,0xD9,0xD2,0x8F,0x70,0x73,0x81,0x7B,0x9C,0x7E,
+0xBD,0x98,0x61,0xD8,0x45,0x87,0x98,0x90,0xC5,0xEB,0x86,0x30,0xC6,0x35,0xBF,0xF0,
+0xFF,0xC3,0x55,0x88,0x83,0x4B,0xEF,0x05,0x92,0x06,0x71,0xF2,0xB8,0x98,0x93,0xB7,
+0xEC,0xCD,0x82,0x61,0xF1,0x38,0xE6,0x4F,0x97,0x98,0x2A,0x5A,0x8D,
+};
+
+
+/* subject:/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 2007 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 3 Public Primary Certification Authority - G4 */
+/* issuer :/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 2007 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 3 Public Primary Certification Authority - G4 */
+
+
+const unsigned char VeriSign_Class_3_Public_Primary_Certification_Authority___G4_certificate[904]={
+0x30,0x82,0x03,0x84,0x30,0x82,0x03,0x0A,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x2F,
+0x80,0xFE,0x23,0x8C,0x0E,0x22,0x0F,0x48,0x67,0x12,0x28,0x91,0x87,0xAC,0xB3,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x81,0xCA,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,
+0x49,0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,
+0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,
+0x74,0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,
+0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x37,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,
+0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,
+0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,
+0x79,0x31,0x45,0x30,0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,
+0x53,0x69,0x67,0x6E,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x33,0x20,0x50,0x75,0x62,
+0x6C,0x69,0x63,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,
+0x69,0x74,0x79,0x20,0x2D,0x20,0x47,0x34,0x30,0x1E,0x17,0x0D,0x30,0x37,0x31,0x31,
+0x30,0x35,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,
+0x38,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0xCA,0x31,0x0B,0x30,0x09,0x06,
+0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,0x03,0x55,0x04,
+0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,0x6E,0x63,
+0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,0x65,0x72,0x69,
+0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,0x74,0x77,0x6F,
+0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,0x28,0x63,0x29,
+0x20,0x32,0x30,0x30,0x37,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,
+0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,
+0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x45,
+0x30,0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,0x53,0x69,0x67,
+0x6E,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x33,0x20,0x50,0x75,0x62,0x6C,0x69,0x63,
+0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,
+0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,
+0x20,0x2D,0x20,0x47,0x34,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,
+0x02,0x01,0x06,0x05,0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0xA7,0x56,0x7A,
+0x7C,0x52,0xDA,0x64,0x9B,0x0E,0x2D,0x5C,0xD8,0x5E,0xAC,0x92,0x3D,0xFE,0x01,0xE6,
+0x19,0x4A,0x3D,0x14,0x03,0x4B,0xFA,0x60,0x27,0x20,0xD9,0x83,0x89,0x69,0xFA,0x54,
+0xC6,0x9A,0x18,0x5E,0x55,0x2A,0x64,0xDE,0x06,0xF6,0x8D,0x4A,0x3B,0xAD,0x10,0x3C,
+0x65,0x3D,0x90,0x88,0x04,0x89,0xE0,0x30,0x61,0xB3,0xAE,0x5D,0x01,0xA7,0x7B,0xDE,
+0x7C,0xB2,0xBE,0xCA,0x65,0x61,0x00,0x86,0xAE,0xDA,0x8F,0x7B,0xD0,0x89,0xAD,0x4D,
+0x1D,0x59,0x9A,0x41,0xB1,0xBC,0x47,0x80,0xDC,0x9E,0x62,0xC3,0xF9,0xA3,0x81,0xB2,
+0x30,0x81,0xAF,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,
+0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,
+0x03,0x02,0x01,0x06,0x30,0x6D,0x06,0x08,0x2B,0x06,0x01,0x05,0x05,0x07,0x01,0x0C,
+0x04,0x61,0x30,0x5F,0xA1,0x5D,0xA0,0x5B,0x30,0x59,0x30,0x57,0x30,0x55,0x16,0x09,
+0x69,0x6D,0x61,0x67,0x65,0x2F,0x67,0x69,0x66,0x30,0x21,0x30,0x1F,0x30,0x07,0x06,
+0x05,0x2B,0x0E,0x03,0x02,0x1A,0x04,0x14,0x8F,0xE5,0xD3,0x1A,0x86,0xAC,0x8D,0x8E,
+0x6B,0xC3,0xCF,0x80,0x6A,0xD4,0x48,0x18,0x2C,0x7B,0x19,0x2E,0x30,0x25,0x16,0x23,
+0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x6C,0x6F,0x67,0x6F,0x2E,0x76,0x65,0x72,0x69,
+0x73,0x69,0x67,0x6E,0x2E,0x63,0x6F,0x6D,0x2F,0x76,0x73,0x6C,0x6F,0x67,0x6F,0x2E,
+0x67,0x69,0x66,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xB3,0x16,
+0x91,0xFD,0xEE,0xA6,0x6E,0xE4,0xB5,0x2E,0x49,0x8F,0x87,0x78,0x81,0x80,0xEC,0xE5,
+0xB1,0xB5,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x68,
+0x00,0x30,0x65,0x02,0x30,0x66,0x21,0x0C,0x18,0x26,0x60,0x5A,0x38,0x7B,0x56,0x42,
+0xE0,0xA7,0xFC,0x36,0x84,0x51,0x91,0x20,0x2C,0x76,0x4D,0x43,0x3D,0xC4,0x1D,0x84,
+0x23,0xD0,0xAC,0xD6,0x7C,0x35,0x06,0xCE,0xCD,0x69,0xBD,0x90,0x0D,0xDB,0x6C,0x48,
+0x42,0x1D,0x0E,0xAA,0x42,0x02,0x31,0x00,0x9C,0x3D,0x48,0x39,0x23,0x39,0x58,0x1A,
+0x15,0x12,0x59,0x6A,0x9E,0xEF,0xD5,0x59,0xB2,0x1D,0x52,0x2C,0x99,0x71,0xCD,0xC7,
+0x29,0xDF,0x1B,0x2A,0x61,0x7B,0x71,0xD1,0xDE,0xF3,0xC0,0xE5,0x0D,0x3A,0x4A,0xAA,
+0x2D,0xA7,0xD8,0x86,0x2A,0xDD,0x2E,0x10,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root G2 */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root G2 */
+
+
+const unsigned char DigiCert_Global_Root_G2_certificate[914]={
+0x30,0x82,0x03,0x8E,0x30,0x82,0x02,0x76,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x03,
+0x3A,0xF1,0xE6,0xA7,0x11,0xA9,0xA0,0xBB,0x28,0x64,0xB1,0x1D,0x09,0xFA,0xE5,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x61,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,0x6F,0x6F,0x74,0x20,0x47,
+0x32,0x30,0x1E,0x17,0x0D,0x31,0x33,0x30,0x38,0x30,0x31,0x31,0x32,0x30,0x30,0x30,
+0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x35,0x31,0x32,0x30,0x30,0x30,0x30,
+0x5A,0x30,0x61,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,
+0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,
+0x65,0x72,0x74,0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x10,0x77,0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,
+0x6F,0x6D,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x44,0x69,0x67,
+0x69,0x43,0x65,0x72,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,0x6F,0x6F,
+0x74,0x20,0x47,0x32,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,
+0x02,0x82,0x01,0x01,0x00,0xBB,0x37,0xCD,0x34,0xDC,0x7B,0x6B,0xC9,0xB2,0x68,0x90,
+0xAD,0x4A,0x75,0xFF,0x46,0xBA,0x21,0x0A,0x08,0x8D,0xF5,0x19,0x54,0xC9,0xFB,0x88,
+0xDB,0xF3,0xAE,0xF2,0x3A,0x89,0x91,0x3C,0x7A,0xE6,0xAB,0x06,0x1A,0x6B,0xCF,0xAC,
+0x2D,0xE8,0x5E,0x09,0x24,0x44,0xBA,0x62,0x9A,0x7E,0xD6,0xA3,0xA8,0x7E,0xE0,0x54,
+0x75,0x20,0x05,0xAC,0x50,0xB7,0x9C,0x63,0x1A,0x6C,0x30,0xDC,0xDA,0x1F,0x19,0xB1,
+0xD7,0x1E,0xDE,0xFD,0xD7,0xE0,0xCB,0x94,0x83,0x37,0xAE,0xEC,0x1F,0x43,0x4E,0xDD,
+0x7B,0x2C,0xD2,0xBD,0x2E,0xA5,0x2F,0xE4,0xA9,0xB8,0xAD,0x3A,0xD4,0x99,0xA4,0xB6,
+0x25,0xE9,0x9B,0x6B,0x00,0x60,0x92,0x60,0xFF,0x4F,0x21,0x49,0x18,0xF7,0x67,0x90,
+0xAB,0x61,0x06,0x9C,0x8F,0xF2,0xBA,0xE9,0xB4,0xE9,0x92,0x32,0x6B,0xB5,0xF3,0x57,
+0xE8,0x5D,0x1B,0xCD,0x8C,0x1D,0xAB,0x95,0x04,0x95,0x49,0xF3,0x35,0x2D,0x96,0xE3,
+0x49,0x6D,0xDD,0x77,0xE3,0xFB,0x49,0x4B,0xB4,0xAC,0x55,0x07,0xA9,0x8F,0x95,0xB3,
+0xB4,0x23,0xBB,0x4C,0x6D,0x45,0xF0,0xF6,0xA9,0xB2,0x95,0x30,0xB4,0xFD,0x4C,0x55,
+0x8C,0x27,0x4A,0x57,0x14,0x7C,0x82,0x9D,0xCD,0x73,0x92,0xD3,0x16,0x4A,0x06,0x0C,
+0x8C,0x50,0xD1,0x8F,0x1E,0x09,0xBE,0x17,0xA1,0xE6,0x21,0xCA,0xFD,0x83,0xE5,0x10,
+0xBC,0x83,0xA5,0x0A,0xC4,0x67,0x28,0xF6,0x73,0x14,0x14,0x3D,0x46,0x76,0xC3,0x87,
+0x14,0x89,0x21,0x34,0x4D,0xAF,0x0F,0x45,0x0C,0xA6,0x49,0xA1,0xBA,0xBB,0x9C,0xC5,
+0xB1,0x33,0x83,0x29,0x85,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,
+0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,
+0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,
+0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x4E,0x22,0x54,0x20,0x18,0x95,
+0xE6,0xE3,0x6E,0xE6,0x0F,0xFA,0xFA,0xB9,0x12,0xED,0x06,0x17,0x8F,0x39,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,
+0x01,0x00,0x60,0x67,0x28,0x94,0x6F,0x0E,0x48,0x63,0xEB,0x31,0xDD,0xEA,0x67,0x18,
+0xD5,0x89,0x7D,0x3C,0xC5,0x8B,0x4A,0x7F,0xE9,0xBE,0xDB,0x2B,0x17,0xDF,0xB0,0x5F,
+0x73,0x77,0x2A,0x32,0x13,0x39,0x81,0x67,0x42,0x84,0x23,0xF2,0x45,0x67,0x35,0xEC,
+0x88,0xBF,0xF8,0x8F,0xB0,0x61,0x0C,0x34,0xA4,0xAE,0x20,0x4C,0x84,0xC6,0xDB,0xF8,
+0x35,0xE1,0x76,0xD9,0xDF,0xA6,0x42,0xBB,0xC7,0x44,0x08,0x86,0x7F,0x36,0x74,0x24,
+0x5A,0xDA,0x6C,0x0D,0x14,0x59,0x35,0xBD,0xF2,0x49,0xDD,0xB6,0x1F,0xC9,0xB3,0x0D,
+0x47,0x2A,0x3D,0x99,0x2F,0xBB,0x5C,0xBB,0xB5,0xD4,0x20,0xE1,0x99,0x5F,0x53,0x46,
+0x15,0xDB,0x68,0x9B,0xF0,0xF3,0x30,0xD5,0x3E,0x31,0xE2,0x8D,0x84,0x9E,0xE3,0x8A,
+0xDA,0xDA,0x96,0x3E,0x35,0x13,0xA5,0x5F,0xF0,0xF9,0x70,0x50,0x70,0x47,0x41,0x11,
+0x57,0x19,0x4E,0xC0,0x8F,0xAE,0x06,0xC4,0x95,0x13,0x17,0x2F,0x1B,0x25,0x9F,0x75,
+0xF2,0xB1,0x8E,0x99,0xA1,0x6F,0x13,0xB1,0x41,0x71,0xFE,0x88,0x2A,0xC8,0x4F,0x10,
+0x20,0x55,0xD7,0xF3,0x14,0x45,0xE5,0xE0,0x44,0xF4,0xEA,0x87,0x95,0x32,0x93,0x0E,
+0xFE,0x53,0x46,0xFA,0x2C,0x9D,0xFF,0x8B,0x22,0xB9,0x4B,0xD9,0x09,0x45,0xA4,0xDE,
+0xA4,0xB8,0x9A,0x58,0xDD,0x1B,0x7D,0x52,0x9F,0x8E,0x59,0x43,0x88,0x81,0xA4,0x9E,
+0x26,0xD5,0x6F,0xAD,0xDD,0x0D,0xC6,0x37,0x7D,0xED,0x03,0x92,0x1B,0xE5,0x77,0x5F,
+0x76,0xEE,0x3C,0x8D,0xC4,0x5D,0x56,0x5B,0xA2,0xD9,0x66,0x6E,0xB3,0x35,0x37,0xE5,
+0x32,0xB6,
+};
+
+
+/* subject:/C=SE/O=AddTrust AB/OU=AddTrust TTP Network/CN=AddTrust Class 1 CA Root */
+/* issuer :/C=SE/O=AddTrust AB/OU=AddTrust TTP Network/CN=AddTrust Class 1 CA Root */
+
+
+const unsigned char AddTrust_Low_Value_Services_Root_certificate[1052]={
+0x30,0x82,0x04,0x18,0x30,0x82,0x03,0x00,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x65,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,
+0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,
+0x74,0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x0B,0x13,0x14,0x41,
+0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,
+0x6F,0x72,0x6B,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x03,0x13,0x18,0x41,0x64,
+0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x31,0x20,0x43,
+0x41,0x20,0x52,0x6F,0x6F,0x74,0x30,0x1E,0x17,0x0D,0x30,0x30,0x30,0x35,0x33,0x30,
+0x31,0x30,0x33,0x38,0x33,0x31,0x5A,0x17,0x0D,0x32,0x30,0x30,0x35,0x33,0x30,0x31,
+0x30,0x33,0x38,0x33,0x31,0x5A,0x30,0x65,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x53,0x45,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,
+0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x14,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,
+0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x21,0x30,0x1F,0x06,0x03,
+0x55,0x04,0x03,0x13,0x18,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x43,0x6C,
+0x61,0x73,0x73,0x20,0x31,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,0x74,0x30,0x82,0x01,
+0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,
+0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0x96,0x96,
+0xD4,0x21,0x49,0x60,0xE2,0x6B,0xE8,0x41,0x07,0x0C,0xDE,0xC4,0xE0,0xDC,0x13,0x23,
+0xCD,0xC1,0x35,0xC7,0xFB,0xD6,0x4E,0x11,0x0A,0x67,0x5E,0xF5,0x06,0x5B,0x6B,0xA5,
+0x08,0x3B,0x5B,0x29,0x16,0x3A,0xE7,0x87,0xB2,0x34,0x06,0xC5,0xBC,0x05,0xA5,0x03,
+0x7C,0x82,0xCB,0x29,0x10,0xAE,0xE1,0x88,0x81,0xBD,0xD6,0x9E,0xD3,0xFE,0x2D,0x56,
+0xC1,0x15,0xCE,0xE3,0x26,0x9D,0x15,0x2E,0x10,0xFB,0x06,0x8F,0x30,0x04,0xDE,0xA7,
+0xB4,0x63,0xB4,0xFF,0xB1,0x9C,0xAE,0x3C,0xAF,0x77,0xB6,0x56,0xC5,0xB5,0xAB,0xA2,
+0xE9,0x69,0x3A,0x3D,0x0E,0x33,0x79,0x32,0x3F,0x70,0x82,0x92,0x99,0x61,0x6D,0x8D,
+0x30,0x08,0x8F,0x71,0x3F,0xA6,0x48,0x57,0x19,0xF8,0x25,0xDC,0x4B,0x66,0x5C,0xA5,
+0x74,0x8F,0x98,0xAE,0xC8,0xF9,0xC0,0x06,0x22,0xE7,0xAC,0x73,0xDF,0xA5,0x2E,0xFB,
+0x52,0xDC,0xB1,0x15,0x65,0x20,0xFA,0x35,0x66,0x69,0xDE,0xDF,0x2C,0xF1,0x6E,0xBC,
+0x30,0xDB,0x2C,0x24,0x12,0xDB,0xEB,0x35,0x35,0x68,0x90,0xCB,0x00,0xB0,0x97,0x21,
+0x3D,0x74,0x21,0x23,0x65,0x34,0x2B,0xBB,0x78,0x59,0xA3,0xD6,0xE1,0x76,0x39,0x9A,
+0xA4,0x49,0x8E,0x8C,0x74,0xAF,0x6E,0xA4,0x9A,0xA3,0xD9,0x9B,0xD2,0x38,0x5C,0x9B,
+0xA2,0x18,0xCC,0x75,0x23,0x84,0xBE,0xEB,0xE2,0x4D,0x33,0x71,0x8E,0x1A,0xF0,0xC2,
+0xF8,0xC7,0x1D,0xA2,0xAD,0x03,0x97,0x2C,0xF8,0xCF,0x25,0xC6,0xF6,0xB8,0x24,0x31,
+0xB1,0x63,0x5D,0x92,0x7F,0x63,0xF0,0x25,0xC9,0x53,0x2E,0x1F,0xBF,0x4D,0x02,0x03,
+0x01,0x00,0x01,0xA3,0x81,0xD2,0x30,0x81,0xCF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,
+0x04,0x16,0x04,0x14,0x95,0xB1,0xB4,0xF0,0x94,0xB6,0xBD,0xC7,0xDA,0xD1,0x11,0x09,
+0x21,0xBE,0xC1,0xAF,0x49,0xFD,0x10,0x7B,0x30,0x0B,0x06,0x03,0x55,0x1D,0x0F,0x04,
+0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x81,0x8F,0x06,0x03,0x55,0x1D,0x23,0x04,0x81,
+0x87,0x30,0x81,0x84,0x80,0x14,0x95,0xB1,0xB4,0xF0,0x94,0xB6,0xBD,0xC7,0xDA,0xD1,
+0x11,0x09,0x21,0xBE,0xC1,0xAF,0x49,0xFD,0x10,0x7B,0xA1,0x69,0xA4,0x67,0x30,0x65,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,0x30,
+0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,
+0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x0B,0x13,0x14,0x41,0x64,
+0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,
+0x72,0x6B,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x03,0x13,0x18,0x41,0x64,0x64,
+0x54,0x72,0x75,0x73,0x74,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x31,0x20,0x43,0x41,
+0x20,0x52,0x6F,0x6F,0x74,0x82,0x01,0x01,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x2C,0x6D,0x64,0x1B,
+0x1F,0xCD,0x0D,0xDD,0xB9,0x01,0xFA,0x96,0x63,0x34,0x32,0x48,0x47,0x99,0xAE,0x97,
+0xED,0xFD,0x72,0x16,0xA6,0x73,0x47,0x5A,0xF4,0xEB,0xDD,0xE9,0xF5,0xD6,0xFB,0x45,
+0xCC,0x29,0x89,0x44,0x5D,0xBF,0x46,0x39,0x3D,0xE8,0xEE,0xBC,0x4D,0x54,0x86,0x1E,
+0x1D,0x6C,0xE3,0x17,0x27,0x43,0xE1,0x89,0x56,0x2B,0xA9,0x6F,0x72,0x4E,0x49,0x33,
+0xE3,0x72,0x7C,0x2A,0x23,0x9A,0xBC,0x3E,0xFF,0x28,0x2A,0xED,0xA3,0xFF,0x1C,0x23,
+0xBA,0x43,0x57,0x09,0x67,0x4D,0x4B,0x62,0x06,0x2D,0xF8,0xFF,0x6C,0x9D,0x60,0x1E,
+0xD8,0x1C,0x4B,0x7D,0xB5,0x31,0x2F,0xD9,0xD0,0x7C,0x5D,0xF8,0xDE,0x6B,0x83,0x18,
+0x78,0x37,0x57,0x2F,0xE8,0x33,0x07,0x67,0xDF,0x1E,0xC7,0x6B,0x2A,0x95,0x76,0xAE,
+0x8F,0x57,0xA3,0xF0,0xF4,0x52,0xB4,0xA9,0x53,0x08,0xCF,0xE0,0x4F,0xD3,0x7A,0x53,
+0x8B,0xFD,0xBB,0x1C,0x56,0x36,0xF2,0xFE,0xB2,0xB6,0xE5,0x76,0xBB,0xD5,0x22,0x65,
+0xA7,0x3F,0xFE,0xD1,0x66,0xAD,0x0B,0xBC,0x6B,0x99,0x86,0xEF,0x3F,0x7D,0xF3,0x18,
+0x32,0xCA,0x7B,0xC6,0xE3,0xAB,0x64,0x46,0x95,0xF8,0x26,0x69,0xD9,0x55,0x83,0x7B,
+0x2C,0x96,0x07,0xFF,0x59,0x2C,0x44,0xA3,0xC6,0xE5,0xE9,0xA9,0xDC,0xA1,0x63,0x80,
+0x5A,0x21,0x5E,0x21,0xCF,0x53,0x54,0xF0,0xBA,0x6F,0x89,0xDB,0xA8,0xAA,0x95,0xCF,
+0x8B,0xE3,0x71,0xCC,0x1E,0x1B,0x20,0x44,0x08,0xC0,0x7A,0xB6,0x40,0xFD,0xC4,0xE4,
+0x35,0xE1,0x1D,0x16,0x1C,0xD0,0xBC,0x2B,0x8E,0xD6,0x71,0xD9,
+};
+
+
+/* subject:/C=US/O=AffirmTrust/CN=AffirmTrust Premium ECC */
+/* issuer :/C=US/O=AffirmTrust/CN=AffirmTrust Premium ECC */
+
+
+const unsigned char AffirmTrust_Premium_ECC_certificate[514]={
+0x30,0x82,0x01,0xFE,0x30,0x82,0x01,0x85,0xA0,0x03,0x02,0x01,0x02,0x02,0x08,0x74,
+0x97,0x25,0x8A,0xC7,0x3F,0x7A,0x54,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,
+0x04,0x03,0x03,0x30,0x45,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,
+0x55,0x53,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x0C,0x0B,0x41,0x66,0x66,
+0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,
+0x03,0x0C,0x17,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x20,0x50,
+0x72,0x65,0x6D,0x69,0x75,0x6D,0x20,0x45,0x43,0x43,0x30,0x1E,0x17,0x0D,0x31,0x30,
+0x30,0x31,0x32,0x39,0x31,0x34,0x32,0x30,0x32,0x34,0x5A,0x17,0x0D,0x34,0x30,0x31,
+0x32,0x33,0x31,0x31,0x34,0x32,0x30,0x32,0x34,0x5A,0x30,0x45,0x31,0x0B,0x30,0x09,
+0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,0x03,0x55,
+0x04,0x0A,0x0C,0x0B,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x31,
+0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x0C,0x17,0x41,0x66,0x66,0x69,0x72,0x6D,
+0x54,0x72,0x75,0x73,0x74,0x20,0x50,0x72,0x65,0x6D,0x69,0x75,0x6D,0x20,0x45,0x43,
+0x43,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,
+0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0x0D,0x30,0x5E,0x1B,0x15,0x9D,0x03,
+0xD0,0xA1,0x79,0x35,0xB7,0x3A,0x3C,0x92,0x7A,0xCA,0x15,0x1C,0xCD,0x62,0xF3,0x9C,
+0x26,0x5C,0x07,0x3D,0xE5,0x54,0xFA,0xA3,0xD6,0xCC,0x12,0xEA,0xF4,0x14,0x5F,0xE8,
+0x8E,0x19,0xAB,0x2F,0x2E,0x48,0xE6,0xAC,0x18,0x43,0x78,0xAC,0xD0,0x37,0xC3,0xBD,
+0xB2,0xCD,0x2C,0xE6,0x47,0xE2,0x1A,0xE6,0x63,0xB8,0x3D,0x2E,0x2F,0x78,0xC4,0x4F,
+0xDB,0xF4,0x0F,0xA4,0x68,0x4C,0x55,0x72,0x6B,0x95,0x1D,0x4E,0x18,0x42,0x95,0x78,
+0xCC,0x37,0x3C,0x91,0xE2,0x9B,0x65,0x2B,0x29,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x9A,0xAF,0x29,0x7A,0xC0,0x11,0x35,0x35,
+0x26,0x51,0x30,0x00,0xC3,0x6A,0xFE,0x40,0xD5,0xAE,0xD6,0x3C,0x30,0x0F,0x06,0x03,
+0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,
+0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0A,0x06,
+0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x67,0x00,0x30,0x64,0x02,0x30,
+0x17,0x09,0xF3,0x87,0x88,0x50,0x5A,0xAF,0xC8,0xC0,0x42,0xBF,0x47,0x5F,0xF5,0x6C,
+0x6A,0x86,0xE0,0xC4,0x27,0x74,0xE4,0x38,0x53,0xD7,0x05,0x7F,0x1B,0x34,0xE3,0xC6,
+0x2F,0xB3,0xCA,0x09,0x3C,0x37,0x9D,0xD7,0xE7,0xB8,0x46,0xF1,0xFD,0xA1,0xE2,0x71,
+0x02,0x30,0x42,0x59,0x87,0x43,0xD4,0x51,0xDF,0xBA,0xD3,0x09,0x32,0x5A,0xCE,0x88,
+0x7E,0x57,0x3D,0x9C,0x5F,0x42,0x6B,0xF5,0x07,0x2D,0xB5,0xF0,0x82,0x93,0xF9,0x59,
+0x6F,0xAE,0x64,0xFA,0x58,0xE5,0x8B,0x1E,0xE3,0x63,0xBE,0xB5,0x81,0xCD,0x6F,0x02,
+0x8C,0x79,
+};
+
+
+/* subject:/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 1999 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 4 Public Primary Certification Authority - G3 */
+/* issuer :/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 1999 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 4 Public Primary Certification Authority - G3 */
+
+
+const unsigned char Verisign_Class_4_Public_Primary_Certification_Authority___G3_certificate[1054]={
+0x30,0x82,0x04,0x1A,0x30,0x82,0x03,0x02,0x02,0x11,0x00,0xEC,0xA0,0xA7,0x8B,0x6E,
+0x75,0x6A,0x01,0xCF,0xC4,0x7C,0xCC,0x2F,0x94,0x5E,0xD7,0x30,0x0D,0x06,0x09,0x2A,
+0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,0xCA,0x31,0x0B,0x30,
+0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,
+0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,0x65,
+0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,0x74,
+0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,0x28,
+0x63,0x29,0x20,0x31,0x39,0x39,0x39,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,
+0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,
+0x31,0x45,0x30,0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,0x53,
+0x69,0x67,0x6E,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x34,0x20,0x50,0x75,0x62,0x6C,
+0x69,0x63,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x20,0x2D,0x20,0x47,0x33,0x30,0x1E,0x17,0x0D,0x39,0x39,0x31,0x30,0x30,
+0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x36,0x30,0x37,0x31,0x36,
+0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0xCA,0x31,0x0B,0x30,0x09,0x06,0x03,
+0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,0x03,0x55,0x04,0x0A,
+0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,
+0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,0x65,0x72,0x69,0x53,
+0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,
+0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,0x28,0x63,0x29,0x20,
+0x31,0x39,0x39,0x39,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,
+0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,
+0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x45,0x30,
+0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,
+0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x34,0x20,0x50,0x75,0x62,0x6C,0x69,0x63,0x20,
+0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,
+0x2D,0x20,0x47,0x33,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,
+0x02,0x82,0x01,0x01,0x00,0xAD,0xCB,0xA5,0x11,0x69,0xC6,0x59,0xAB,0xF1,0x8F,0xB5,
+0x19,0x0F,0x56,0xCE,0xCC,0xB5,0x1F,0x20,0xE4,0x9E,0x26,0x25,0x4B,0xE0,0x73,0x65,
+0x89,0x59,0xDE,0xD0,0x83,0xE4,0xF5,0x0F,0xB5,0xBB,0xAD,0xF1,0x7C,0xE8,0x21,0xFC,
+0xE4,0xE8,0x0C,0xEE,0x7C,0x45,0x22,0x19,0x76,0x92,0xB4,0x13,0xB7,0x20,0x5B,0x09,
+0xFA,0x61,0xAE,0xA8,0xF2,0xA5,0x8D,0x85,0xC2,0x2A,0xD6,0xDE,0x66,0x36,0xD2,0x9B,
+0x02,0xF4,0xA8,0x92,0x60,0x7C,0x9C,0x69,0xB4,0x8F,0x24,0x1E,0xD0,0x86,0x52,0xF6,
+0x32,0x9C,0x41,0x58,0x1E,0x22,0xBD,0xCD,0x45,0x62,0x95,0x08,0x6E,0xD0,0x66,0xDD,
+0x53,0xA2,0xCC,0xF0,0x10,0xDC,0x54,0x73,0x8B,0x04,0xA1,0x46,0x33,0x33,0x5C,0x17,
+0x40,0xB9,0x9E,0x4D,0xD3,0xF3,0xBE,0x55,0x83,0xE8,0xB1,0x89,0x8E,0x5A,0x7C,0x9A,
+0x96,0x22,0x90,0x3B,0x88,0x25,0xF2,0xD2,0x53,0x88,0x02,0x0C,0x0B,0x78,0xF2,0xE6,
+0x37,0x17,0x4B,0x30,0x46,0x07,0xE4,0x80,0x6D,0xA6,0xD8,0x96,0x2E,0xE8,0x2C,0xF8,
+0x11,0xB3,0x38,0x0D,0x66,0xA6,0x9B,0xEA,0xC9,0x23,0x5B,0xDB,0x8E,0xE2,0xF3,0x13,
+0x8E,0x1A,0x59,0x2D,0xAA,0x02,0xF0,0xEC,0xA4,0x87,0x66,0xDC,0xC1,0x3F,0xF5,0xD8,
+0xB9,0xF4,0xEC,0x82,0xC6,0xD2,0x3D,0x95,0x1D,0xE5,0xC0,0x4F,0x84,0xC9,0xD9,0xA3,
+0x44,0x28,0x06,0x6A,0xD7,0x45,0xAC,0xF0,0x6B,0x6A,0xEF,0x4E,0x5F,0xF8,0x11,0x82,
+0x1E,0x38,0x63,0x34,0x66,0x50,0xD4,0x3E,0x93,0x73,0xFA,0x30,0xC3,0x66,0xAD,0xFF,
+0x93,0x2D,0x97,0xEF,0x03,0x02,0x03,0x01,0x00,0x01,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x8F,0xFA,
+0x25,0x6B,0x4F,0x5B,0xE4,0xA4,0x4E,0x27,0x55,0xAB,0x22,0x15,0x59,0x3C,0xCA,0xB5,
+0x0A,0xD4,0x4A,0xDB,0xAB,0xDD,0xA1,0x5F,0x53,0xC5,0xA0,0x57,0x39,0xC2,0xCE,0x47,
+0x2B,0xBE,0x3A,0xC8,0x56,0xBF,0xC2,0xD9,0x27,0x10,0x3A,0xB1,0x05,0x3C,0xC0,0x77,
+0x31,0xBB,0x3A,0xD3,0x05,0x7B,0x6D,0x9A,0x1C,0x30,0x8C,0x80,0xCB,0x93,0x93,0x2A,
+0x83,0xAB,0x05,0x51,0x82,0x02,0x00,0x11,0x67,0x6B,0xF3,0x88,0x61,0x47,0x5F,0x03,
+0x93,0xD5,0x5B,0x0D,0xE0,0xF1,0xD4,0xA1,0x32,0x35,0x85,0xB2,0x3A,0xDB,0xB0,0x82,
+0xAB,0xD1,0xCB,0x0A,0xBC,0x4F,0x8C,0x5B,0xC5,0x4B,0x00,0x3B,0x1F,0x2A,0x82,0xA6,
+0x7E,0x36,0x85,0xDC,0x7E,0x3C,0x67,0x00,0xB5,0xE4,0x3B,0x52,0xE0,0xA8,0xEB,0x5D,
+0x15,0xF9,0xC6,0x6D,0xF0,0xAD,0x1D,0x0E,0x85,0xB7,0xA9,0x9A,0x73,0x14,0x5A,0x5B,
+0x8F,0x41,0x28,0xC0,0xD5,0xE8,0x2D,0x4D,0xA4,0x5E,0xCD,0xAA,0xD9,0xED,0xCE,0xDC,
+0xD8,0xD5,0x3C,0x42,0x1D,0x17,0xC1,0x12,0x5D,0x45,0x38,0xC3,0x38,0xF3,0xFC,0x85,
+0x2E,0x83,0x46,0x48,0xB2,0xD7,0x20,0x5F,0x92,0x36,0x8F,0xE7,0x79,0x0F,0x98,0x5E,
+0x99,0xE8,0xF0,0xD0,0xA4,0xBB,0xF5,0x53,0xBD,0x2A,0xCE,0x59,0xB0,0xAF,0x6E,0x7F,
+0x6C,0xBB,0xD2,0x1E,0x00,0xB0,0x21,0xED,0xF8,0x41,0x62,0x82,0xB9,0xD8,0xB2,0xC4,
+0xBB,0x46,0x50,0xF3,0x31,0xC5,0x8F,0x01,0xA8,0x74,0xEB,0xF5,0x78,0x27,0xDA,0xE7,
+0xF7,0x66,0x43,0xF3,0x9E,0x83,0x3E,0x20,0xAA,0xC3,0x35,0x60,0x91,0xCE,
+};
+
+
+/* subject:/C=US/O=thawte, Inc./OU=Certification Services Division/OU=(c) 2006 thawte, Inc. - For authorized use only/CN=thawte Primary Root CA */
+/* issuer :/C=US/O=thawte, Inc./OU=Certification Services Division/OU=(c) 2006 thawte, Inc. - For authorized use only/CN=thawte Primary Root CA */
+
+
+const unsigned char thawte_Primary_Root_CA_certificate[1060]={
+0x30,0x82,0x04,0x20,0x30,0x82,0x03,0x08,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x34,
+0x4E,0xD5,0x57,0x20,0xD5,0xED,0xEC,0x49,0xF4,0x2F,0xCE,0x37,0xDB,0x2B,0x6D,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,
+0xA9,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,
+0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,
+0x20,0x49,0x6E,0x63,0x2E,0x31,0x28,0x30,0x26,0x06,0x03,0x55,0x04,0x0B,0x13,0x1F,
+0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x53,0x65,
+0x72,0x76,0x69,0x63,0x65,0x73,0x20,0x44,0x69,0x76,0x69,0x73,0x69,0x6F,0x6E,0x31,
+0x38,0x30,0x36,0x06,0x03,0x55,0x04,0x0B,0x13,0x2F,0x28,0x63,0x29,0x20,0x32,0x30,
+0x30,0x36,0x20,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,
+0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,
+0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,
+0x04,0x03,0x13,0x16,0x74,0x68,0x61,0x77,0x74,0x65,0x20,0x50,0x72,0x69,0x6D,0x61,
+0x72,0x79,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x30,0x1E,0x17,0x0D,0x30,0x36,
+0x31,0x31,0x31,0x37,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x36,0x30,
+0x37,0x31,0x36,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0xA9,0x31,0x0B,0x30,
+0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x0C,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,
+0x2E,0x31,0x28,0x30,0x26,0x06,0x03,0x55,0x04,0x0B,0x13,0x1F,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x53,0x65,0x72,0x76,0x69,0x63,
+0x65,0x73,0x20,0x44,0x69,0x76,0x69,0x73,0x69,0x6F,0x6E,0x31,0x38,0x30,0x36,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x2F,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x36,0x20,0x74,
+0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,
+0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,
+0x20,0x6F,0x6E,0x6C,0x79,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,0x13,0x16,
+0x74,0x68,0x61,0x77,0x74,0x65,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x52,
+0x6F,0x6F,0x74,0x20,0x43,0x41,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,
+0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xAC,0xA0,0xF0,0xFB,0x80,0x59,0xD4,0x9C,0xC7,
+0xA4,0xCF,0x9D,0xA1,0x59,0x73,0x09,0x10,0x45,0x0C,0x0D,0x2C,0x6E,0x68,0xF1,0x6C,
+0x5B,0x48,0x68,0x49,0x59,0x37,0xFC,0x0B,0x33,0x19,0xC2,0x77,0x7F,0xCC,0x10,0x2D,
+0x95,0x34,0x1C,0xE6,0xEB,0x4D,0x09,0xA7,0x1C,0xD2,0xB8,0xC9,0x97,0x36,0x02,0xB7,
+0x89,0xD4,0x24,0x5F,0x06,0xC0,0xCC,0x44,0x94,0x94,0x8D,0x02,0x62,0x6F,0xEB,0x5A,
+0xDD,0x11,0x8D,0x28,0x9A,0x5C,0x84,0x90,0x10,0x7A,0x0D,0xBD,0x74,0x66,0x2F,0x6A,
+0x38,0xA0,0xE2,0xD5,0x54,0x44,0xEB,0x1D,0x07,0x9F,0x07,0xBA,0x6F,0xEE,0xE9,0xFD,
+0x4E,0x0B,0x29,0xF5,0x3E,0x84,0xA0,0x01,0xF1,0x9C,0xAB,0xF8,0x1C,0x7E,0x89,0xA4,
+0xE8,0xA1,0xD8,0x71,0x65,0x0D,0xA3,0x51,0x7B,0xEE,0xBC,0xD2,0x22,0x60,0x0D,0xB9,
+0x5B,0x9D,0xDF,0xBA,0xFC,0x51,0x5B,0x0B,0xAF,0x98,0xB2,0xE9,0x2E,0xE9,0x04,0xE8,
+0x62,0x87,0xDE,0x2B,0xC8,0xD7,0x4E,0xC1,0x4C,0x64,0x1E,0xDD,0xCF,0x87,0x58,0xBA,
+0x4A,0x4F,0xCA,0x68,0x07,0x1D,0x1C,0x9D,0x4A,0xC6,0xD5,0x2F,0x91,0xCC,0x7C,0x71,
+0x72,0x1C,0xC5,0xC0,0x67,0xEB,0x32,0xFD,0xC9,0x92,0x5C,0x94,0xDA,0x85,0xC0,0x9B,
+0xBF,0x53,0x7D,0x2B,0x09,0xF4,0x8C,0x9D,0x91,0x1F,0x97,0x6A,0x52,0xCB,0xDE,0x09,
+0x36,0xA4,0x77,0xD8,0x7B,0x87,0x50,0x44,0xD5,0x3E,0x6E,0x29,0x69,0xFB,0x39,0x49,
+0x26,0x1E,0x09,0xA5,0x80,0x7B,0x40,0x2D,0xEB,0xE8,0x27,0x85,0xC9,0xFE,0x61,0xFD,
+0x7E,0xE6,0x7C,0x97,0x1D,0xD5,0x9D,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,
+0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,
+0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,
+0x06,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x7B,0x5B,0x45,0xCF,
+0xAF,0xCE,0xCB,0x7A,0xFD,0x31,0x92,0x1A,0x6A,0xB6,0xF3,0x46,0xEB,0x57,0x48,0x50,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,
+0x82,0x01,0x01,0x00,0x79,0x11,0xC0,0x4B,0xB3,0x91,0xB6,0xFC,0xF0,0xE9,0x67,0xD4,
+0x0D,0x6E,0x45,0xBE,0x55,0xE8,0x93,0xD2,0xCE,0x03,0x3F,0xED,0xDA,0x25,0xB0,0x1D,
+0x57,0xCB,0x1E,0x3A,0x76,0xA0,0x4C,0xEC,0x50,0x76,0xE8,0x64,0x72,0x0C,0xA4,0xA9,
+0xF1,0xB8,0x8B,0xD6,0xD6,0x87,0x84,0xBB,0x32,0xE5,0x41,0x11,0xC0,0x77,0xD9,0xB3,
+0x60,0x9D,0xEB,0x1B,0xD5,0xD1,0x6E,0x44,0x44,0xA9,0xA6,0x01,0xEC,0x55,0x62,0x1D,
+0x77,0xB8,0x5C,0x8E,0x48,0x49,0x7C,0x9C,0x3B,0x57,0x11,0xAC,0xAD,0x73,0x37,0x8E,
+0x2F,0x78,0x5C,0x90,0x68,0x47,0xD9,0x60,0x60,0xE6,0xFC,0x07,0x3D,0x22,0x20,0x17,
+0xC4,0xF7,0x16,0xE9,0xC4,0xD8,0x72,0xF9,0xC8,0x73,0x7C,0xDF,0x16,0x2F,0x15,0xA9,
+0x3E,0xFD,0x6A,0x27,0xB6,0xA1,0xEB,0x5A,0xBA,0x98,0x1F,0xD5,0xE3,0x4D,0x64,0x0A,
+0x9D,0x13,0xC8,0x61,0xBA,0xF5,0x39,0x1C,0x87,0xBA,0xB8,0xBD,0x7B,0x22,0x7F,0xF6,
+0xFE,0xAC,0x40,0x79,0xE5,0xAC,0x10,0x6F,0x3D,0x8F,0x1B,0x79,0x76,0x8B,0xC4,0x37,
+0xB3,0x21,0x18,0x84,0xE5,0x36,0x00,0xEB,0x63,0x20,0x99,0xB9,0xE9,0xFE,0x33,0x04,
+0xBB,0x41,0xC8,0xC1,0x02,0xF9,0x44,0x63,0x20,0x9E,0x81,0xCE,0x42,0xD3,0xD6,0x3F,
+0x2C,0x76,0xD3,0x63,0x9C,0x59,0xDD,0x8F,0xA6,0xE1,0x0E,0xA0,0x2E,0x41,0xF7,0x2E,
+0x95,0x47,0xCF,0xBC,0xFD,0x33,0xF3,0xF6,0x0B,0x61,0x7E,0x7E,0x91,0x2B,0x81,0x47,
+0xC2,0x27,0x30,0xEE,0xA7,0x10,0x5D,0x37,0x8F,0x5C,0x39,0x2B,0xE4,0x04,0xF0,0x7B,
+0x8D,0x56,0x8C,0x68,
+};
+
+
+/* subject:/C=SE/O=AddTrust AB/OU=AddTrust TTP Network/CN=AddTrust Public CA Root */
+/* issuer :/C=SE/O=AddTrust AB/OU=AddTrust TTP Network/CN=AddTrust Public CA Root */
+
+
+const unsigned char AddTrust_Public_Services_Root_certificate[1049]={
+0x30,0x82,0x04,0x15,0x30,0x82,0x02,0xFD,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x64,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,
+0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,
+0x74,0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x0B,0x13,0x14,0x41,
+0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,
+0x6F,0x72,0x6B,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x41,0x64,
+0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x50,0x75,0x62,0x6C,0x69,0x63,0x20,0x43,0x41,
+0x20,0x52,0x6F,0x6F,0x74,0x30,0x1E,0x17,0x0D,0x30,0x30,0x30,0x35,0x33,0x30,0x31,
+0x30,0x34,0x31,0x35,0x30,0x5A,0x17,0x0D,0x32,0x30,0x30,0x35,0x33,0x30,0x31,0x30,
+0x34,0x31,0x35,0x30,0x5A,0x30,0x64,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,
+0x13,0x02,0x53,0x45,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,
+0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,
+0x55,0x04,0x0B,0x13,0x14,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,0x54,
+0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,
+0x04,0x03,0x13,0x17,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x50,0x75,0x62,
+0x6C,0x69,0x63,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,0x74,0x30,0x82,0x01,0x22,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,
+0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xE9,0x1A,0x30,0x8F,
+0x83,0x88,0x14,0xC1,0x20,0xD8,0x3C,0x9B,0x8F,0x1B,0x7E,0x03,0x74,0xBB,0xDA,0x69,
+0xD3,0x46,0xA5,0xF8,0x8E,0xC2,0x0C,0x11,0x90,0x51,0xA5,0x2F,0x66,0x54,0x40,0x55,
+0xEA,0xDB,0x1F,0x4A,0x56,0xEE,0x9F,0x23,0x6E,0xF4,0x39,0xCB,0xA1,0xB9,0x6F,0xF2,
+0x7E,0xF9,0x5D,0x87,0x26,0x61,0x9E,0x1C,0xF8,0xE2,0xEC,0xA6,0x81,0xF8,0x21,0xC5,
+0x24,0xCC,0x11,0x0C,0x3F,0xDB,0x26,0x72,0x7A,0xC7,0x01,0x97,0x07,0x17,0xF9,0xD7,
+0x18,0x2C,0x30,0x7D,0x0E,0x7A,0x1E,0x62,0x1E,0xC6,0x4B,0xC0,0xFD,0x7D,0x62,0x77,
+0xD3,0x44,0x1E,0x27,0xF6,0x3F,0x4B,0x44,0xB3,0xB7,0x38,0xD9,0x39,0x1F,0x60,0xD5,
+0x51,0x92,0x73,0x03,0xB4,0x00,0x69,0xE3,0xF3,0x14,0x4E,0xEE,0xD1,0xDC,0x09,0xCF,
+0x77,0x34,0x46,0x50,0xB0,0xF8,0x11,0xF2,0xFE,0x38,0x79,0xF7,0x07,0x39,0xFE,0x51,
+0x92,0x97,0x0B,0x5B,0x08,0x5F,0x34,0x86,0x01,0xAD,0x88,0x97,0xEB,0x66,0xCD,0x5E,
+0xD1,0xFF,0xDC,0x7D,0xF2,0x84,0xDA,0xBA,0x77,0xAD,0xDC,0x80,0x08,0xC7,0xA7,0x87,
+0xD6,0x55,0x9F,0x97,0x6A,0xE8,0xC8,0x11,0x64,0xBA,0xE7,0x19,0x29,0x3F,0x11,0xB3,
+0x78,0x90,0x84,0x20,0x52,0x5B,0x11,0xEF,0x78,0xD0,0x83,0xF6,0xD5,0x48,0x90,0xD0,
+0x30,0x1C,0xCF,0x80,0xF9,0x60,0xFE,0x79,0xE4,0x88,0xF2,0xDD,0x00,0xEB,0x94,0x45,
+0xEB,0x65,0x94,0x69,0x40,0xBA,0xC0,0xD5,0xB4,0xB8,0xBA,0x7D,0x04,0x11,0xA8,0xEB,
+0x31,0x05,0x96,0x94,0x4E,0x58,0x21,0x8E,0x9F,0xD0,0x60,0xFD,0x02,0x03,0x01,0x00,
+0x01,0xA3,0x81,0xD1,0x30,0x81,0xCE,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,
+0x04,0x14,0x81,0x3E,0x37,0xD8,0x92,0xB0,0x1F,0x77,0x9F,0x5C,0xB4,0xAB,0x73,0xAA,
+0xE7,0xF6,0x34,0x60,0x2F,0xFA,0x30,0x0B,0x06,0x03,0x55,0x1D,0x0F,0x04,0x04,0x03,
+0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,
+0x03,0x01,0x01,0xFF,0x30,0x81,0x8E,0x06,0x03,0x55,0x1D,0x23,0x04,0x81,0x86,0x30,
+0x81,0x83,0x80,0x14,0x81,0x3E,0x37,0xD8,0x92,0xB0,0x1F,0x77,0x9F,0x5C,0xB4,0xAB,
+0x73,0xAA,0xE7,0xF6,0x34,0x60,0x2F,0xFA,0xA1,0x68,0xA4,0x66,0x30,0x64,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,0x30,0x12,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x41,
+0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x0B,0x13,0x14,0x41,0x64,0x64,0x54,
+0x72,0x75,0x73,0x74,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,
+0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x41,0x64,0x64,0x54,0x72,
+0x75,0x73,0x74,0x20,0x50,0x75,0x62,0x6C,0x69,0x63,0x20,0x43,0x41,0x20,0x52,0x6F,
+0x6F,0x74,0x82,0x01,0x01,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x03,0xF7,0x15,0x4A,0xF8,0x24,0xDA,
+0x23,0x56,0x16,0x93,0x76,0xDD,0x36,0x28,0xB9,0xAE,0x1B,0xB8,0xC3,0xF1,0x64,0xBA,
+0x20,0x18,0x78,0x95,0x29,0x27,0x57,0x05,0xBC,0x7C,0x2A,0xF4,0xB9,0x51,0x55,0xDA,
+0x87,0x02,0xDE,0x0F,0x16,0x17,0x31,0xF8,0xAA,0x79,0x2E,0x09,0x13,0xBB,0xAF,0xB2,
+0x20,0x19,0x12,0xE5,0x93,0xF9,0x4B,0xF9,0x83,0xE8,0x44,0xD5,0xB2,0x41,0x25,0xBF,
+0x88,0x75,0x6F,0xFF,0x10,0xFC,0x4A,0x54,0xD0,0x5F,0xF0,0xFA,0xEF,0x36,0x73,0x7D,
+0x1B,0x36,0x45,0xC6,0x21,0x6D,0xB4,0x15,0xB8,0x4E,0xCF,0x9C,0x5C,0xA5,0x3D,0x5A,
+0x00,0x8E,0x06,0xE3,0x3C,0x6B,0x32,0x7B,0xF2,0x9F,0xF0,0xB6,0xFD,0xDF,0xF0,0x28,
+0x18,0x48,0xF0,0xC6,0xBC,0xD0,0xBF,0x34,0x80,0x96,0xC2,0x4A,0xB1,0x6D,0x8E,0xC7,
+0x90,0x45,0xDE,0x2F,0x67,0xAC,0x45,0x04,0xA3,0x7A,0xDC,0x55,0x92,0xC9,0x47,0x66,
+0xD8,0x1A,0x8C,0xC7,0xED,0x9C,0x4E,0x9A,0xE0,0x12,0xBB,0xB5,0x6A,0x4C,0x84,0xE1,
+0xE1,0x22,0x0D,0x87,0x00,0x64,0xFE,0x8C,0x7D,0x62,0x39,0x65,0xA6,0xEF,0x42,0xB6,
+0x80,0x25,0x12,0x61,0x01,0xA8,0x24,0x13,0x70,0x00,0x11,0x26,0x5F,0xFA,0x35,0x50,
+0xC5,0x48,0xCC,0x06,0x47,0xE8,0x27,0xD8,0x70,0x8D,0x5F,0x64,0xE6,0xA1,0x44,0x26,
+0x5E,0x22,0xEC,0x92,0xCD,0xFF,0x42,0x9A,0x44,0x21,0x6D,0x5C,0xC5,0xE3,0x22,0x1D,
+0x5F,0x47,0x12,0xE7,0xCE,0x5F,0x5D,0xFA,0xD8,0xAA,0xB1,0x33,0x2D,0xD9,0x76,0xF2,
+0x4E,0x3A,0x33,0x0C,0x2B,0xB3,0x2D,0x90,0x06,
+};
+
+
+/* subject:/C=SE/O=AddTrust AB/OU=AddTrust TTP Network/CN=AddTrust Qualified CA Root */
+/* issuer :/C=SE/O=AddTrust AB/OU=AddTrust TTP Network/CN=AddTrust Qualified CA Root */
+
+
+const unsigned char AddTrust_Qualified_Certificates_Root_certificate[1058]={
+0x30,0x82,0x04,0x1E,0x30,0x82,0x03,0x06,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x67,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,
+0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,
+0x74,0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x0B,0x13,0x14,0x41,
+0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,
+0x6F,0x72,0x6B,0x31,0x23,0x30,0x21,0x06,0x03,0x55,0x04,0x03,0x13,0x1A,0x41,0x64,
+0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x51,0x75,0x61,0x6C,0x69,0x66,0x69,0x65,0x64,
+0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,0x74,0x30,0x1E,0x17,0x0D,0x30,0x30,0x30,0x35,
+0x33,0x30,0x31,0x30,0x34,0x34,0x35,0x30,0x5A,0x17,0x0D,0x32,0x30,0x30,0x35,0x33,
+0x30,0x31,0x30,0x34,0x34,0x35,0x30,0x5A,0x30,0x67,0x31,0x0B,0x30,0x09,0x06,0x03,
+0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,
+0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x41,0x42,0x31,0x1D,0x30,
+0x1B,0x06,0x03,0x55,0x04,0x0B,0x13,0x14,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,
+0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x23,0x30,0x21,
+0x06,0x03,0x55,0x04,0x03,0x13,0x1A,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,
+0x51,0x75,0x61,0x6C,0x69,0x66,0x69,0x65,0x64,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,
+0x74,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,
+0x01,0x00,0xE4,0x1E,0x9A,0xFE,0xDC,0x09,0x5A,0x87,0xA4,0x9F,0x47,0xBE,0x11,0x5F,
+0xAF,0x84,0x34,0xDB,0x62,0x3C,0x79,0x78,0xB7,0xE9,0x30,0xB5,0xEC,0x0C,0x1C,0x2A,
+0xC4,0x16,0xFF,0xE0,0xEC,0x71,0xEB,0x8A,0xF5,0x11,0x6E,0xED,0x4F,0x0D,0x91,0xD2,
+0x12,0x18,0x2D,0x49,0x15,0x01,0xC2,0xA4,0x22,0x13,0xC7,0x11,0x64,0xFF,0x22,0x12,
+0x9A,0xB9,0x8E,0x5C,0x2F,0x08,0xCF,0x71,0x6A,0xB3,0x67,0x01,0x59,0xF1,0x5D,0x46,
+0xF3,0xB0,0x78,0xA5,0xF6,0x0E,0x42,0x7A,0xE3,0x7F,0x1B,0xCC,0xD0,0xF0,0xB7,0x28,
+0xFD,0x2A,0xEA,0x9E,0xB3,0xB0,0xB9,0x04,0xAA,0xFD,0xF6,0xC7,0xB4,0xB1,0xB8,0x2A,
+0xA0,0xFB,0x58,0xF1,0x19,0xA0,0x6F,0x70,0x25,0x7E,0x3E,0x69,0x4A,0x7F,0x0F,0x22,
+0xD8,0xEF,0xAD,0x08,0x11,0x9A,0x29,0x99,0xE1,0xAA,0x44,0x45,0x9A,0x12,0x5E,0x3E,
+0x9D,0x6D,0x52,0xFC,0xE7,0xA0,0x3D,0x68,0x2F,0xF0,0x4B,0x70,0x7C,0x13,0x38,0xAD,
+0xBC,0x15,0x25,0xF1,0xD6,0xCE,0xAB,0xA2,0xC0,0x31,0xD6,0x2F,0x9F,0xE0,0xFF,0x14,
+0x59,0xFC,0x84,0x93,0xD9,0x87,0x7C,0x4C,0x54,0x13,0xEB,0x9F,0xD1,0x2D,0x11,0xF8,
+0x18,0x3A,0x3A,0xDE,0x25,0xD9,0xF7,0xD3,0x40,0xED,0xA4,0x06,0x12,0xC4,0x3B,0xE1,
+0x91,0xC1,0x56,0x35,0xF0,0x14,0xDC,0x65,0x36,0x09,0x6E,0xAB,0xA4,0x07,0xC7,0x35,
+0xD1,0xC2,0x03,0x33,0x36,0x5B,0x75,0x26,0x6D,0x42,0xF1,0x12,0x6B,0x43,0x6F,0x4B,
+0x71,0x94,0xFA,0x34,0x1D,0xED,0x13,0x6E,0xCA,0x80,0x7F,0x98,0x2F,0x6C,0xB9,0x65,
+0xD8,0xE9,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xD4,0x30,0x81,0xD1,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x39,0x95,0x8B,0x62,0x8B,0x5C,0xC9,0xD4,
+0x80,0xBA,0x58,0x0F,0x97,0x3F,0x15,0x08,0x43,0xCC,0x98,0xA7,0x30,0x0B,0x06,0x03,
+0x55,0x1D,0x0F,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,
+0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x81,0x91,0x06,0x03,0x55,
+0x1D,0x23,0x04,0x81,0x89,0x30,0x81,0x86,0x80,0x14,0x39,0x95,0x8B,0x62,0x8B,0x5C,
+0xC9,0xD4,0x80,0xBA,0x58,0x0F,0x97,0x3F,0x15,0x08,0x43,0xCC,0x98,0xA7,0xA1,0x6B,
+0xA4,0x69,0x30,0x67,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,
+0x45,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,
+0x72,0x75,0x73,0x74,0x20,0x41,0x42,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x14,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x54,0x54,0x50,0x20,0x4E,
+0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x23,0x30,0x21,0x06,0x03,0x55,0x04,0x03,0x13,
+0x1A,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x51,0x75,0x61,0x6C,0x69,0x66,
+0x69,0x65,0x64,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,0x74,0x82,0x01,0x01,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,
+0x01,0x00,0x19,0xAB,0x75,0xEA,0xF8,0x8B,0x65,0x61,0x95,0x13,0xBA,0x69,0x04,0xEF,
+0x86,0xCA,0x13,0xA0,0xC7,0xAA,0x4F,0x64,0x1B,0x3F,0x18,0xF6,0xA8,0x2D,0x2C,0x55,
+0x8F,0x05,0xB7,0x30,0xEA,0x42,0x6A,0x1D,0xC0,0x25,0x51,0x2D,0xA7,0xBF,0x0C,0xB3,
+0xED,0xEF,0x08,0x7F,0x6C,0x3C,0x46,0x1A,0xEA,0x18,0x43,0xDF,0x76,0xCC,0xF9,0x66,
+0x86,0x9C,0x2C,0x68,0xF5,0xE9,0x17,0xF8,0x31,0xB3,0x18,0xC4,0xD6,0x48,0x7D,0x23,
+0x4C,0x68,0xC1,0x7E,0xBB,0x01,0x14,0x6F,0xC5,0xD9,0x6E,0xDE,0xBB,0x04,0x42,0x6A,
+0xF8,0xF6,0x5C,0x7D,0xE5,0xDA,0xFA,0x87,0xEB,0x0D,0x35,0x52,0x67,0xD0,0x9E,0x97,
+0x76,0x05,0x93,0x3F,0x95,0xC7,0x01,0xE6,0x69,0x55,0x38,0x7F,0x10,0x61,0x99,0xC9,
+0xE3,0x5F,0xA6,0xCA,0x3E,0x82,0x63,0x48,0xAA,0xE2,0x08,0x48,0x3E,0xAA,0xF2,0xB2,
+0x85,0x62,0xA6,0xB4,0xA7,0xD9,0xBD,0x37,0x9C,0x68,0xB5,0x2D,0x56,0x7D,0xB0,0xB7,
+0x3F,0xA0,0xB1,0x07,0xD6,0xE9,0x4F,0xDC,0xDE,0x45,0x71,0x30,0x32,0x7F,0x1B,0x2E,
+0x09,0xF9,0xBF,0x52,0xA1,0xEE,0xC2,0x80,0x3E,0x06,0x5C,0x2E,0x55,0x40,0xC1,0x1B,
+0xF5,0x70,0x45,0xB0,0xDC,0x5D,0xFA,0xF6,0x72,0x5A,0x77,0xD2,0x63,0xCD,0xCF,0x58,
+0x89,0x00,0x42,0x63,0x3F,0x79,0x39,0xD0,0x44,0xB0,0x82,0x6E,0x41,0x19,0xE8,0xDD,
+0xE0,0xC1,0x88,0x5A,0xD1,0x1E,0x71,0x93,0x1F,0x24,0x30,0x74,0xE5,0x1E,0xA8,0xDE,
+0x3C,0x27,0x37,0x7F,0x83,0xAE,0x9E,0x77,0xCF,0xF0,0x30,0xB1,0xFF,0x4B,0x99,0xE8,
+0xC6,0xA1,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./OU=(c) 2008 GeoTrust Inc. - For authorized use only/CN=GeoTrust Primary Certification Authority - G3 */
+/* issuer :/C=US/O=GeoTrust Inc./OU=(c) 2008 GeoTrust Inc. - For authorized use only/CN=GeoTrust Primary Certification Authority - G3 */
+
+
+const unsigned char GeoTrust_Primary_Certification_Authority___G3_certificate[1026]={
+0x30,0x82,0x03,0xFE,0x30,0x82,0x02,0xE6,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x15,
+0xAC,0x6E,0x94,0x19,0xB2,0x79,0x4B,0x41,0xF6,0x27,0xA9,0xC3,0x18,0x0F,0x1F,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x81,
+0x98,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,
+0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,
+0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x39,0x30,0x37,0x06,0x03,0x55,0x04,0x0B,0x13,
+0x30,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x38,0x20,0x47,0x65,0x6F,0x54,0x72,0x75,
+0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,
+0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,
+0x79,0x31,0x36,0x30,0x34,0x06,0x03,0x55,0x04,0x03,0x13,0x2D,0x47,0x65,0x6F,0x54,
+0x72,0x75,0x73,0x74,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,
+0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x47,0x33,0x30,0x1E,0x17,0x0D,0x30,0x38,0x30,
+0x34,0x30,0x32,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x37,0x31,0x32,
+0x30,0x31,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0x98,0x31,0x0B,0x30,0x09,
+0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,
+0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,
+0x2E,0x31,0x39,0x30,0x37,0x06,0x03,0x55,0x04,0x0B,0x13,0x30,0x28,0x63,0x29,0x20,
+0x32,0x30,0x30,0x38,0x20,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,
+0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x36,0x30,0x34,
+0x06,0x03,0x55,0x04,0x03,0x13,0x2D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,
+0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,
+0x2D,0x20,0x47,0x33,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,
+0x02,0x82,0x01,0x01,0x00,0xDC,0xE2,0x5E,0x62,0x58,0x1D,0x33,0x57,0x39,0x32,0x33,
+0xFA,0xEB,0xCB,0x87,0x8C,0xA7,0xD4,0x4A,0xDD,0x06,0x88,0xEA,0x64,0x8E,0x31,0x98,
+0xA5,0x38,0x90,0x1E,0x98,0xCF,0x2E,0x63,0x2B,0xF0,0x46,0xBC,0x44,0xB2,0x89,0xA1,
+0xC0,0x28,0x0C,0x49,0x70,0x21,0x95,0x9F,0x64,0xC0,0xA6,0x93,0x12,0x02,0x65,0x26,
+0x86,0xC6,0xA5,0x89,0xF0,0xFA,0xD7,0x84,0xA0,0x70,0xAF,0x4F,0x1A,0x97,0x3F,0x06,
+0x44,0xD5,0xC9,0xEB,0x72,0x10,0x7D,0xE4,0x31,0x28,0xFB,0x1C,0x61,0xE6,0x28,0x07,
+0x44,0x73,0x92,0x22,0x69,0xA7,0x03,0x88,0x6C,0x9D,0x63,0xC8,0x52,0xDA,0x98,0x27,
+0xE7,0x08,0x4C,0x70,0x3E,0xB4,0xC9,0x12,0xC1,0xC5,0x67,0x83,0x5D,0x33,0xF3,0x03,
+0x11,0xEC,0x6A,0xD0,0x53,0xE2,0xD1,0xBA,0x36,0x60,0x94,0x80,0xBB,0x61,0x63,0x6C,
+0x5B,0x17,0x7E,0xDF,0x40,0x94,0x1E,0xAB,0x0D,0xC2,0x21,0x28,0x70,0x88,0xFF,0xD6,
+0x26,0x6C,0x6C,0x60,0x04,0x25,0x4E,0x55,0x7E,0x7D,0xEF,0xBF,0x94,0x48,0xDE,0xB7,
+0x1D,0xDD,0x70,0x8D,0x05,0x5F,0x88,0xA5,0x9B,0xF2,0xC2,0xEE,0xEA,0xD1,0x40,0x41,
+0x6D,0x62,0x38,0x1D,0x56,0x06,0xC5,0x03,0x47,0x51,0x20,0x19,0xFC,0x7B,0x10,0x0B,
+0x0E,0x62,0xAE,0x76,0x55,0xBF,0x5F,0x77,0xBE,0x3E,0x49,0x01,0x53,0x3D,0x98,0x25,
+0x03,0x76,0x24,0x5A,0x1D,0xB4,0xDB,0x89,0xEA,0x79,0xE5,0xB6,0xB3,0x3B,0x3F,0xBA,
+0x4C,0x28,0x41,0x7F,0x06,0xAC,0x6A,0x8E,0xC1,0xD0,0xF6,0x05,0x1D,0x7D,0xE6,0x42,
+0x86,0xE3,0xA5,0xD5,0x47,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,
+0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,
+0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,
+0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xC4,0x79,0xCA,0x8E,0xA1,0x4E,
+0x03,0x1D,0x1C,0xDC,0x6B,0xDB,0x31,0x5B,0x94,0x3E,0x3F,0x30,0x7F,0x2D,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,
+0x01,0x00,0x2D,0xC5,0x13,0xCF,0x56,0x80,0x7B,0x7A,0x78,0xBD,0x9F,0xAE,0x2C,0x99,
+0xE7,0xEF,0xDA,0xDF,0x94,0x5E,0x09,0x69,0xA7,0xE7,0x6E,0x68,0x8C,0xBD,0x72,0xBE,
+0x47,0xA9,0x0E,0x97,0x12,0xB8,0x4A,0xF1,0x64,0xD3,0x39,0xDF,0x25,0x34,0xD4,0xC1,
+0xCD,0x4E,0x81,0xF0,0x0F,0x04,0xC4,0x24,0xB3,0x34,0x96,0xC6,0xA6,0xAA,0x30,0xDF,
+0x68,0x61,0x73,0xD7,0xF9,0x8E,0x85,0x89,0xEF,0x0E,0x5E,0x95,0x28,0x4A,0x2A,0x27,
+0x8F,0x10,0x8E,0x2E,0x7C,0x86,0xC4,0x02,0x9E,0xDA,0x0C,0x77,0x65,0x0E,0x44,0x0D,
+0x92,0xFD,0xFD,0xB3,0x16,0x36,0xFA,0x11,0x0D,0x1D,0x8C,0x0E,0x07,0x89,0x6A,0x29,
+0x56,0xF7,0x72,0xF4,0xDD,0x15,0x9C,0x77,0x35,0x66,0x57,0xAB,0x13,0x53,0xD8,0x8E,
+0xC1,0x40,0xC5,0xD7,0x13,0x16,0x5A,0x72,0xC7,0xB7,0x69,0x01,0xC4,0x7A,0xB1,0x83,
+0x01,0x68,0x7D,0x8D,0x41,0xA1,0x94,0x18,0xC1,0x25,0x5C,0xFC,0xF0,0xFE,0x83,0x02,
+0x87,0x7C,0x0D,0x0D,0xCF,0x2E,0x08,0x5C,0x4A,0x40,0x0D,0x3E,0xEC,0x81,0x61,0xE6,
+0x24,0xDB,0xCA,0xE0,0x0E,0x2D,0x07,0xB2,0x3E,0x56,0xDC,0x8D,0xF5,0x41,0x85,0x07,
+0x48,0x9B,0x0C,0x0B,0xCB,0x49,0x3F,0x7D,0xEC,0xB7,0xFD,0xCB,0x8D,0x67,0x89,0x1A,
+0xAB,0xED,0xBB,0x1E,0xA3,0x00,0x08,0x08,0x17,0x2A,0x82,0x5C,0x31,0x5D,0x46,0x8A,
+0x2D,0x0F,0x86,0x9B,0x74,0xD9,0x45,0xFB,0xD4,0x40,0xB1,0x7A,0xAA,0x68,0x2D,0x86,
+0xB2,0x99,0x22,0xE1,0xC1,0x2B,0xC7,0x9C,0xF8,0xF3,0x5F,0xA8,0x82,0x12,0xEB,0x19,
+0x11,0x2D,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./CN=GeoTrust Universal CA 2 */
+/* issuer :/C=US/O=GeoTrust Inc./CN=GeoTrust Universal CA 2 */
+
+
+const unsigned char GeoTrust_Universal_CA_2_certificate[1392]={
+0x30,0x82,0x05,0x6C,0x30,0x82,0x03,0x54,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x47,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,
+0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,
+0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,
+0x17,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,
+0x73,0x61,0x6C,0x20,0x43,0x41,0x20,0x32,0x30,0x1E,0x17,0x0D,0x30,0x34,0x30,0x33,
+0x30,0x34,0x30,0x35,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,0x39,0x30,0x33,0x30,
+0x34,0x30,0x35,0x30,0x30,0x30,0x30,0x5A,0x30,0x47,0x31,0x0B,0x30,0x09,0x06,0x03,
+0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,
+0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,
+0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x47,0x65,0x6F,0x54,0x72,0x75,
+0x73,0x74,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,0x61,0x6C,0x20,0x43,0x41,0x20,
+0x32,0x30,0x82,0x02,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x01,0x05,0x00,0x03,0x82,0x02,0x0F,0x00,0x30,0x82,0x02,0x0A,0x02,0x82,0x02,
+0x01,0x00,0xB3,0x54,0x52,0xC1,0xC9,0x3E,0xF2,0xD9,0xDC,0xB1,0x53,0x1A,0x59,0x29,
+0xE7,0xB1,0xC3,0x45,0x28,0xE5,0xD7,0xD1,0xED,0xC5,0xC5,0x4B,0xA1,0xAA,0x74,0x7B,
+0x57,0xAF,0x4A,0x26,0xFC,0xD8,0xF5,0x5E,0xA7,0x6E,0x19,0xDB,0x74,0x0C,0x4F,0x35,
+0x5B,0x32,0x0B,0x01,0xE3,0xDB,0xEB,0x7A,0x77,0x35,0xEA,0xAA,0x5A,0xE0,0xD6,0xE8,
+0xA1,0x57,0x94,0xF0,0x90,0xA3,0x74,0x56,0x94,0x44,0x30,0x03,0x1E,0x5C,0x4E,0x2B,
+0x85,0x26,0x74,0x82,0x7A,0x0C,0x76,0xA0,0x6F,0x4D,0xCE,0x41,0x2D,0xA0,0x15,0x06,
+0x14,0x5F,0xB7,0x42,0xCD,0x7B,0x8F,0x58,0x61,0x34,0xDC,0x2A,0x08,0xF9,0x2E,0xC3,
+0x01,0xA6,0x22,0x44,0x1C,0x4C,0x07,0x82,0xE6,0x5B,0xCE,0xD0,0x4A,0x7C,0x04,0xD3,
+0x19,0x73,0x27,0xF0,0xAA,0x98,0x7F,0x2E,0xAF,0x4E,0xEB,0x87,0x1E,0x24,0x77,0x6A,
+0x5D,0xB6,0xE8,0x5B,0x45,0xBA,0xDC,0xC3,0xA1,0x05,0x6F,0x56,0x8E,0x8F,0x10,0x26,
+0xA5,0x49,0xC3,0x2E,0xD7,0x41,0x87,0x22,0xE0,0x4F,0x86,0xCA,0x60,0xB5,0xEA,0xA1,
+0x63,0xC0,0x01,0x97,0x10,0x79,0xBD,0x00,0x3C,0x12,0x6D,0x2B,0x15,0xB1,0xAC,0x4B,
+0xB1,0xEE,0x18,0xB9,0x4E,0x96,0xDC,0xDC,0x76,0xFF,0x3B,0xBE,0xCF,0x5F,0x03,0xC0,
+0xFC,0x3B,0xE8,0xBE,0x46,0x1B,0xFF,0xDA,0x40,0xC2,0x52,0xF7,0xFE,0xE3,0x3A,0xF7,
+0x6A,0x77,0x35,0xD0,0xDA,0x8D,0xEB,0x5E,0x18,0x6A,0x31,0xC7,0x1E,0xBA,0x3C,0x1B,
+0x28,0xD6,0x6B,0x54,0xC6,0xAA,0x5B,0xD7,0xA2,0x2C,0x1B,0x19,0xCC,0xA2,0x02,0xF6,
+0x9B,0x59,0xBD,0x37,0x6B,0x86,0xB5,0x6D,0x82,0xBA,0xD8,0xEA,0xC9,0x56,0xBC,0xA9,
+0x36,0x58,0xFD,0x3E,0x19,0xF3,0xED,0x0C,0x26,0xA9,0x93,0x38,0xF8,0x4F,0xC1,0x5D,
+0x22,0x06,0xD0,0x97,0xEA,0xE1,0xAD,0xC6,0x55,0xE0,0x81,0x2B,0x28,0x83,0x3A,0xFA,
+0xF4,0x7B,0x21,0x51,0x00,0xBE,0x52,0x38,0xCE,0xCD,0x66,0x79,0xA8,0xF4,0x81,0x56,
+0xE2,0xD0,0x83,0x09,0x47,0x51,0x5B,0x50,0x6A,0xCF,0xDB,0x48,0x1A,0x5D,0x3E,0xF7,
+0xCB,0xF6,0x65,0xF7,0x6C,0xF1,0x95,0xF8,0x02,0x3B,0x32,0x56,0x82,0x39,0x7A,0x5B,
+0xBD,0x2F,0x89,0x1B,0xBF,0xA1,0xB4,0xE8,0xFF,0x7F,0x8D,0x8C,0xDF,0x03,0xF1,0x60,
+0x4E,0x58,0x11,0x4C,0xEB,0xA3,0x3F,0x10,0x2B,0x83,0x9A,0x01,0x73,0xD9,0x94,0x6D,
+0x84,0x00,0x27,0x66,0xAC,0xF0,0x70,0x40,0x09,0x42,0x92,0xAD,0x4F,0x93,0x0D,0x61,
+0x09,0x51,0x24,0xD8,0x92,0xD5,0x0B,0x94,0x61,0xB2,0x87,0xB2,0xED,0xFF,0x9A,0x35,
+0xFF,0x85,0x54,0xCA,0xED,0x44,0x43,0xAC,0x1B,0x3C,0x16,0x6B,0x48,0x4A,0x0A,0x1C,
+0x40,0x88,0x1F,0x92,0xC2,0x0B,0x00,0x05,0xFF,0xF2,0xC8,0x02,0x4A,0xA4,0xAA,0xA9,
+0xCC,0x99,0x96,0x9C,0x2F,0x58,0xE0,0x7D,0xE1,0xBE,0xBB,0x07,0xDC,0x5F,0x04,0x72,
+0x5C,0x31,0x34,0xC3,0xEC,0x5F,0x2D,0xE0,0x3D,0x64,0x90,0x22,0xE6,0xD1,0xEC,0xB8,
+0x2E,0xDD,0x59,0xAE,0xD9,0xA1,0x37,0xBF,0x54,0x35,0xDC,0x73,0x32,0x4F,0x8C,0x04,
+0x1E,0x33,0xB2,0xC9,0x46,0xF1,0xD8,0x5C,0xC8,0x55,0x50,0xC9,0x68,0xBD,0xA8,0xBA,
+0x36,0x09,0x02,0x03,0x01,0x00,0x01,0xA3,0x63,0x30,0x61,0x30,0x0F,0x06,0x03,0x55,
+0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,
+0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x76,0xF3,0x55,0xE1,0xFA,0xA4,0x36,0xFB,0xF0,
+0x9F,0x5C,0x62,0x71,0xED,0x3C,0xF4,0x47,0x38,0x10,0x2B,0x30,0x1F,0x06,0x03,0x55,
+0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,0x76,0xF3,0x55,0xE1,0xFA,0xA4,0x36,0xFB,
+0xF0,0x9F,0x5C,0x62,0x71,0xED,0x3C,0xF4,0x47,0x38,0x10,0x2B,0x30,0x0E,0x06,0x03,
+0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x02,0x01,0x00,
+0x66,0xC1,0xC6,0x23,0xF3,0xD9,0xE0,0x2E,0x6E,0x5F,0xE8,0xCF,0xAE,0xB0,0xB0,0x25,
+0x4D,0x2B,0xF8,0x3B,0x58,0x9B,0x40,0x24,0x37,0x5A,0xCB,0xAB,0x16,0x49,0xFF,0xB3,
+0x75,0x79,0x33,0xA1,0x2F,0x6D,0x70,0x17,0x34,0x91,0xFE,0x67,0x7E,0x8F,0xEC,0x9B,
+0xE5,0x5E,0x82,0xA9,0x55,0x1F,0x2F,0xDC,0xD4,0x51,0x07,0x12,0xFE,0xAC,0x16,0x3E,
+0x2C,0x35,0xC6,0x63,0xFC,0xDC,0x10,0xEB,0x0D,0xA3,0xAA,0xD0,0x7C,0xCC,0xD1,0xD0,
+0x2F,0x51,0x2E,0xC4,0x14,0x5A,0xDE,0xE8,0x19,0xE1,0x3E,0xC6,0xCC,0xA4,0x29,0xE7,
+0x2E,0x84,0xAA,0x06,0x30,0x78,0x76,0x54,0x73,0x28,0x98,0x59,0x38,0xE0,0x00,0x0D,
+0x62,0xD3,0x42,0x7D,0x21,0x9F,0xAE,0x3D,0x3A,0x8C,0xD5,0xFA,0x77,0x0D,0x18,0x2B,
+0x16,0x0E,0x5F,0x36,0xE1,0xFC,0x2A,0xB5,0x30,0x24,0xCF,0xE0,0x63,0x0C,0x7B,0x58,
+0x1A,0xFE,0x99,0xBA,0x42,0x12,0xB1,0x91,0xF4,0x7C,0x68,0xE2,0xC8,0xE8,0xAF,0x2C,
+0xEA,0xC9,0x7E,0xAE,0xBB,0x2A,0x3D,0x0D,0x15,0xDC,0x34,0x95,0xB6,0x18,0x74,0xA8,
+0x6A,0x0F,0xC7,0xB4,0xF4,0x13,0xC4,0xE4,0x5B,0xED,0x0A,0xD2,0xA4,0x97,0x4C,0x2A,
+0xED,0x2F,0x6C,0x12,0x89,0x3D,0xF1,0x27,0x70,0xAA,0x6A,0x03,0x52,0x21,0x9F,0x40,
+0xA8,0x67,0x50,0xF2,0xF3,0x5A,0x1F,0xDF,0xDF,0x23,0xF6,0xDC,0x78,0x4E,0xE6,0x98,
+0x4F,0x55,0x3A,0x53,0xE3,0xEF,0xF2,0xF4,0x9F,0xC7,0x7C,0xD8,0x58,0xAF,0x29,0x22,
+0x97,0xB8,0xE0,0xBD,0x91,0x2E,0xB0,0x76,0xEC,0x57,0x11,0xCF,0xEF,0x29,0x44,0xF3,
+0xE9,0x85,0x7A,0x60,0x63,0xE4,0x5D,0x33,0x89,0x17,0xD9,0x31,0xAA,0xDA,0xD6,0xF3,
+0x18,0x35,0x72,0xCF,0x87,0x2B,0x2F,0x63,0x23,0x84,0x5D,0x84,0x8C,0x3F,0x57,0xA0,
+0x88,0xFC,0x99,0x91,0x28,0x26,0x69,0x99,0xD4,0x8F,0x97,0x44,0xBE,0x8E,0xD5,0x48,
+0xB1,0xA4,0x28,0x29,0xF1,0x15,0xB4,0xE1,0xE5,0x9E,0xDD,0xF8,0x8F,0xA6,0x6F,0x26,
+0xD7,0x09,0x3C,0x3A,0x1C,0x11,0x0E,0xA6,0x6C,0x37,0xF7,0xAD,0x44,0x87,0x2C,0x28,
+0xC7,0xD8,0x74,0x82,0xB3,0xD0,0x6F,0x4A,0x57,0xBB,0x35,0x29,0x27,0xA0,0x8B,0xE8,
+0x21,0xA7,0x87,0x64,0x36,0x5D,0xCC,0xD8,0x16,0xAC,0xC7,0xB2,0x27,0x40,0x92,0x55,
+0x38,0x28,0x8D,0x51,0x6E,0xDD,0x14,0x67,0x53,0x6C,0x71,0x5C,0x26,0x84,0x4D,0x75,
+0x5A,0xB6,0x7E,0x60,0x56,0xA9,0x4D,0xAD,0xFB,0x9B,0x1E,0x97,0xF3,0x0D,0xD9,0xD2,
+0x97,0x54,0x77,0xDA,0x3D,0x12,0xB7,0xE0,0x1E,0xEF,0x08,0x06,0xAC,0xF9,0x85,0x87,
+0xE9,0xA2,0xDC,0xAF,0x7E,0x18,0x12,0x83,0xFD,0x56,0x17,0x41,0x2E,0xD5,0x29,0x82,
+0x7D,0x99,0xF4,0x31,0xF6,0x71,0xA9,0xCF,0x2C,0x01,0x27,0xA5,0x05,0xB9,0xAA,0xB2,
+0x48,0x4E,0x2A,0xEF,0x9F,0x93,0x52,0x51,0x95,0x3C,0x52,0x73,0x8E,0x56,0x4C,0x17,
+0x40,0xC0,0x09,0x28,0xE4,0x8B,0x6A,0x48,0x53,0xDB,0xEC,0xCD,0x55,0x55,0xF1,0xC6,
+0xF8,0xE9,0xA2,0x2C,0x4C,0xA6,0xD1,0x26,0x5F,0x7E,0xAF,0x5A,0x4C,0xDA,0x1F,0xA6,
+0xF2,0x1C,0x2C,0x7E,0xAE,0x02,0x16,0xD2,0x56,0xD0,0x2F,0x57,0x53,0x47,0xE8,0x92,
+};
+
+
+/* subject:/C=IE/O=Baltimore/OU=CyberTrust/CN=Baltimore CyberTrust Root */
+/* issuer :/C=IE/O=Baltimore/OU=CyberTrust/CN=Baltimore CyberTrust Root */
+
+
+const unsigned char Baltimore_CyberTrust_Root_certificate[891]={
+0x30,0x82,0x03,0x77,0x30,0x82,0x02,0x5F,0xA0,0x03,0x02,0x01,0x02,0x02,0x04,0x02,
+0x00,0x00,0xB9,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,
+0x05,0x00,0x30,0x5A,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x49,
+0x45,0x31,0x12,0x30,0x10,0x06,0x03,0x55,0x04,0x0A,0x13,0x09,0x42,0x61,0x6C,0x74,
+0x69,0x6D,0x6F,0x72,0x65,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x0B,0x13,0x0A,
+0x43,0x79,0x62,0x65,0x72,0x54,0x72,0x75,0x73,0x74,0x31,0x22,0x30,0x20,0x06,0x03,
+0x55,0x04,0x03,0x13,0x19,0x42,0x61,0x6C,0x74,0x69,0x6D,0x6F,0x72,0x65,0x20,0x43,
+0x79,0x62,0x65,0x72,0x54,0x72,0x75,0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,0x30,0x1E,
+0x17,0x0D,0x30,0x30,0x30,0x35,0x31,0x32,0x31,0x38,0x34,0x36,0x30,0x30,0x5A,0x17,
+0x0D,0x32,0x35,0x30,0x35,0x31,0x32,0x32,0x33,0x35,0x39,0x30,0x30,0x5A,0x30,0x5A,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x49,0x45,0x31,0x12,0x30,
+0x10,0x06,0x03,0x55,0x04,0x0A,0x13,0x09,0x42,0x61,0x6C,0x74,0x69,0x6D,0x6F,0x72,
+0x65,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x0B,0x13,0x0A,0x43,0x79,0x62,0x65,
+0x72,0x54,0x72,0x75,0x73,0x74,0x31,0x22,0x30,0x20,0x06,0x03,0x55,0x04,0x03,0x13,
+0x19,0x42,0x61,0x6C,0x74,0x69,0x6D,0x6F,0x72,0x65,0x20,0x43,0x79,0x62,0x65,0x72,
+0x54,0x72,0x75,0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,0x30,0x82,0x01,0x22,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,
+0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xA3,0x04,0xBB,0x22,0xAB,
+0x98,0x3D,0x57,0xE8,0x26,0x72,0x9A,0xB5,0x79,0xD4,0x29,0xE2,0xE1,0xE8,0x95,0x80,
+0xB1,0xB0,0xE3,0x5B,0x8E,0x2B,0x29,0x9A,0x64,0xDF,0xA1,0x5D,0xED,0xB0,0x09,0x05,
+0x6D,0xDB,0x28,0x2E,0xCE,0x62,0xA2,0x62,0xFE,0xB4,0x88,0xDA,0x12,0xEB,0x38,0xEB,
+0x21,0x9D,0xC0,0x41,0x2B,0x01,0x52,0x7B,0x88,0x77,0xD3,0x1C,0x8F,0xC7,0xBA,0xB9,
+0x88,0xB5,0x6A,0x09,0xE7,0x73,0xE8,0x11,0x40,0xA7,0xD1,0xCC,0xCA,0x62,0x8D,0x2D,
+0xE5,0x8F,0x0B,0xA6,0x50,0xD2,0xA8,0x50,0xC3,0x28,0xEA,0xF5,0xAB,0x25,0x87,0x8A,
+0x9A,0x96,0x1C,0xA9,0x67,0xB8,0x3F,0x0C,0xD5,0xF7,0xF9,0x52,0x13,0x2F,0xC2,0x1B,
+0xD5,0x70,0x70,0xF0,0x8F,0xC0,0x12,0xCA,0x06,0xCB,0x9A,0xE1,0xD9,0xCA,0x33,0x7A,
+0x77,0xD6,0xF8,0xEC,0xB9,0xF1,0x68,0x44,0x42,0x48,0x13,0xD2,0xC0,0xC2,0xA4,0xAE,
+0x5E,0x60,0xFE,0xB6,0xA6,0x05,0xFC,0xB4,0xDD,0x07,0x59,0x02,0xD4,0x59,0x18,0x98,
+0x63,0xF5,0xA5,0x63,0xE0,0x90,0x0C,0x7D,0x5D,0xB2,0x06,0x7A,0xF3,0x85,0xEA,0xEB,
+0xD4,0x03,0xAE,0x5E,0x84,0x3E,0x5F,0xFF,0x15,0xED,0x69,0xBC,0xF9,0x39,0x36,0x72,
+0x75,0xCF,0x77,0x52,0x4D,0xF3,0xC9,0x90,0x2C,0xB9,0x3D,0xE5,0xC9,0x23,0x53,0x3F,
+0x1F,0x24,0x98,0x21,0x5C,0x07,0x99,0x29,0xBD,0xC6,0x3A,0xEC,0xE7,0x6E,0x86,0x3A,
+0x6B,0x97,0x74,0x63,0x33,0xBD,0x68,0x18,0x31,0xF0,0x78,0x8D,0x76,0xBF,0xFC,0x9E,
+0x8E,0x5D,0x2A,0x86,0xA7,0x4D,0x90,0xDC,0x27,0x1A,0x39,0x02,0x03,0x01,0x00,0x01,
+0xA3,0x45,0x30,0x43,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xE5,
+0x9D,0x59,0x30,0x82,0x47,0x58,0xCC,0xAC,0xFA,0x08,0x54,0x36,0x86,0x7B,0x3A,0xB5,
+0x04,0x4D,0xF0,0x30,0x12,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x08,0x30,
+0x06,0x01,0x01,0xFF,0x02,0x01,0x03,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,
+0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x85,0x0C,0x5D,0x8E,0xE4,
+0x6F,0x51,0x68,0x42,0x05,0xA0,0xDD,0xBB,0x4F,0x27,0x25,0x84,0x03,0xBD,0xF7,0x64,
+0xFD,0x2D,0xD7,0x30,0xE3,0xA4,0x10,0x17,0xEB,0xDA,0x29,0x29,0xB6,0x79,0x3F,0x76,
+0xF6,0x19,0x13,0x23,0xB8,0x10,0x0A,0xF9,0x58,0xA4,0xD4,0x61,0x70,0xBD,0x04,0x61,
+0x6A,0x12,0x8A,0x17,0xD5,0x0A,0xBD,0xC5,0xBC,0x30,0x7C,0xD6,0xE9,0x0C,0x25,0x8D,
+0x86,0x40,0x4F,0xEC,0xCC,0xA3,0x7E,0x38,0xC6,0x37,0x11,0x4F,0xED,0xDD,0x68,0x31,
+0x8E,0x4C,0xD2,0xB3,0x01,0x74,0xEE,0xBE,0x75,0x5E,0x07,0x48,0x1A,0x7F,0x70,0xFF,
+0x16,0x5C,0x84,0xC0,0x79,0x85,0xB8,0x05,0xFD,0x7F,0xBE,0x65,0x11,0xA3,0x0F,0xC0,
+0x02,0xB4,0xF8,0x52,0x37,0x39,0x04,0xD5,0xA9,0x31,0x7A,0x18,0xBF,0xA0,0x2A,0xF4,
+0x12,0x99,0xF7,0xA3,0x45,0x82,0xE3,0x3C,0x5E,0xF5,0x9D,0x9E,0xB5,0xC8,0x9E,0x7C,
+0x2E,0xC8,0xA4,0x9E,0x4E,0x08,0x14,0x4B,0x6D,0xFD,0x70,0x6D,0x6B,0x1A,0x63,0xBD,
+0x64,0xE6,0x1F,0xB7,0xCE,0xF0,0xF2,0x9F,0x2E,0xBB,0x1B,0xB7,0xF2,0x50,0x88,0x73,
+0x92,0xC2,0xE2,0xE3,0x16,0x8D,0x9A,0x32,0x02,0xAB,0x8E,0x18,0xDD,0xE9,0x10,0x11,
+0xEE,0x7E,0x35,0xAB,0x90,0xAF,0x3E,0x30,0x94,0x7A,0xD0,0x33,0x3D,0xA7,0x65,0x0F,
+0xF5,0xFC,0x8E,0x9E,0x62,0xCF,0x47,0x44,0x2C,0x01,0x5D,0xBB,0x1D,0xB5,0x32,0xD2,
+0x47,0xD2,0x38,0x2E,0xD0,0xFE,0x81,0xDC,0x32,0x6A,0x1E,0xB5,0xEE,0x3C,0xD5,0xFC,
+0xE7,0x81,0x1D,0x19,0xC3,0x24,0x42,0xEA,0x63,0x39,0xA9,
+};
+
+
+/* subject:/OU=GlobalSign Root CA - R2/O=GlobalSign/CN=GlobalSign */
+/* issuer :/OU=GlobalSign Root CA - R2/O=GlobalSign/CN=GlobalSign */
+
+
+const unsigned char GlobalSign_Root_CA___R2_certificate[958]={
+0x30,0x82,0x03,0xBA,0x30,0x82,0x02,0xA2,0xA0,0x03,0x02,0x01,0x02,0x02,0x0B,0x04,
+0x00,0x00,0x00,0x00,0x01,0x0F,0x86,0x26,0xE6,0x0D,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x4C,0x31,0x20,0x30,0x1E,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x17,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,
+0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x52,0x32,0x31,0x13,0x30,
+0x11,0x06,0x03,0x55,0x04,0x0A,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,
+0x67,0x6E,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,
+0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x32,0x31,
+0x35,0x30,0x38,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,0x31,0x31,0x32,0x31,0x35,
+0x30,0x38,0x30,0x30,0x30,0x30,0x5A,0x30,0x4C,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,
+0x04,0x0B,0x13,0x17,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x52,
+0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x52,0x32,0x31,0x13,0x30,0x11,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,
+0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,
+0x6C,0x53,0x69,0x67,0x6E,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,
+0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,
+0x0A,0x02,0x82,0x01,0x01,0x00,0xA6,0xCF,0x24,0x0E,0xBE,0x2E,0x6F,0x28,0x99,0x45,
+0x42,0xC4,0xAB,0x3E,0x21,0x54,0x9B,0x0B,0xD3,0x7F,0x84,0x70,0xFA,0x12,0xB3,0xCB,
+0xBF,0x87,0x5F,0xC6,0x7F,0x86,0xD3,0xB2,0x30,0x5C,0xD6,0xFD,0xAD,0xF1,0x7B,0xDC,
+0xE5,0xF8,0x60,0x96,0x09,0x92,0x10,0xF5,0xD0,0x53,0xDE,0xFB,0x7B,0x7E,0x73,0x88,
+0xAC,0x52,0x88,0x7B,0x4A,0xA6,0xCA,0x49,0xA6,0x5E,0xA8,0xA7,0x8C,0x5A,0x11,0xBC,
+0x7A,0x82,0xEB,0xBE,0x8C,0xE9,0xB3,0xAC,0x96,0x25,0x07,0x97,0x4A,0x99,0x2A,0x07,
+0x2F,0xB4,0x1E,0x77,0xBF,0x8A,0x0F,0xB5,0x02,0x7C,0x1B,0x96,0xB8,0xC5,0xB9,0x3A,
+0x2C,0xBC,0xD6,0x12,0xB9,0xEB,0x59,0x7D,0xE2,0xD0,0x06,0x86,0x5F,0x5E,0x49,0x6A,
+0xB5,0x39,0x5E,0x88,0x34,0xEC,0xBC,0x78,0x0C,0x08,0x98,0x84,0x6C,0xA8,0xCD,0x4B,
+0xB4,0xA0,0x7D,0x0C,0x79,0x4D,0xF0,0xB8,0x2D,0xCB,0x21,0xCA,0xD5,0x6C,0x5B,0x7D,
+0xE1,0xA0,0x29,0x84,0xA1,0xF9,0xD3,0x94,0x49,0xCB,0x24,0x62,0x91,0x20,0xBC,0xDD,
+0x0B,0xD5,0xD9,0xCC,0xF9,0xEA,0x27,0x0A,0x2B,0x73,0x91,0xC6,0x9D,0x1B,0xAC,0xC8,
+0xCB,0xE8,0xE0,0xA0,0xF4,0x2F,0x90,0x8B,0x4D,0xFB,0xB0,0x36,0x1B,0xF6,0x19,0x7A,
+0x85,0xE0,0x6D,0xF2,0x61,0x13,0x88,0x5C,0x9F,0xE0,0x93,0x0A,0x51,0x97,0x8A,0x5A,
+0xCE,0xAF,0xAB,0xD5,0xF7,0xAA,0x09,0xAA,0x60,0xBD,0xDC,0xD9,0x5F,0xDF,0x72,0xA9,
+0x60,0x13,0x5E,0x00,0x01,0xC9,0x4A,0xFA,0x3F,0xA4,0xEA,0x07,0x03,0x21,0x02,0x8E,
+0x82,0xCA,0x03,0xC2,0x9B,0x8F,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0x9C,0x30,0x81,
+0x99,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,
+0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,
+0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x9B,0xE2,0x07,
+0x57,0x67,0x1C,0x1E,0xC0,0x6A,0x06,0xDE,0x59,0xB4,0x9A,0x2D,0xDF,0xDC,0x19,0x86,
+0x2E,0x30,0x36,0x06,0x03,0x55,0x1D,0x1F,0x04,0x2F,0x30,0x2D,0x30,0x2B,0xA0,0x29,
+0xA0,0x27,0x86,0x25,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x67,
+0x6C,0x6F,0x62,0x61,0x6C,0x73,0x69,0x67,0x6E,0x2E,0x6E,0x65,0x74,0x2F,0x72,0x6F,
+0x6F,0x74,0x2D,0x72,0x32,0x2E,0x63,0x72,0x6C,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,
+0x04,0x18,0x30,0x16,0x80,0x14,0x9B,0xE2,0x07,0x57,0x67,0x1C,0x1E,0xC0,0x6A,0x06,
+0xDE,0x59,0xB4,0x9A,0x2D,0xDF,0xDC,0x19,0x86,0x2E,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x99,0x81,
+0x53,0x87,0x1C,0x68,0x97,0x86,0x91,0xEC,0xE0,0x4A,0xB8,0x44,0x0B,0xAB,0x81,0xAC,
+0x27,0x4F,0xD6,0xC1,0xB8,0x1C,0x43,0x78,0xB3,0x0C,0x9A,0xFC,0xEA,0x2C,0x3C,0x6E,
+0x61,0x1B,0x4D,0x4B,0x29,0xF5,0x9F,0x05,0x1D,0x26,0xC1,0xB8,0xE9,0x83,0x00,0x62,
+0x45,0xB6,0xA9,0x08,0x93,0xB9,0xA9,0x33,0x4B,0x18,0x9A,0xC2,0xF8,0x87,0x88,0x4E,
+0xDB,0xDD,0x71,0x34,0x1A,0xC1,0x54,0xDA,0x46,0x3F,0xE0,0xD3,0x2A,0xAB,0x6D,0x54,
+0x22,0xF5,0x3A,0x62,0xCD,0x20,0x6F,0xBA,0x29,0x89,0xD7,0xDD,0x91,0xEE,0xD3,0x5C,
+0xA2,0x3E,0xA1,0x5B,0x41,0xF5,0xDF,0xE5,0x64,0x43,0x2D,0xE9,0xD5,0x39,0xAB,0xD2,
+0xA2,0xDF,0xB7,0x8B,0xD0,0xC0,0x80,0x19,0x1C,0x45,0xC0,0x2D,0x8C,0xE8,0xF8,0x2D,
+0xA4,0x74,0x56,0x49,0xC5,0x05,0xB5,0x4F,0x15,0xDE,0x6E,0x44,0x78,0x39,0x87,0xA8,
+0x7E,0xBB,0xF3,0x79,0x18,0x91,0xBB,0xF4,0x6F,0x9D,0xC1,0xF0,0x8C,0x35,0x8C,0x5D,
+0x01,0xFB,0xC3,0x6D,0xB9,0xEF,0x44,0x6D,0x79,0x46,0x31,0x7E,0x0A,0xFE,0xA9,0x82,
+0xC1,0xFF,0xEF,0xAB,0x6E,0x20,0xC4,0x50,0xC9,0x5F,0x9D,0x4D,0x9B,0x17,0x8C,0x0C,
+0xE5,0x01,0xC9,0xA0,0x41,0x6A,0x73,0x53,0xFA,0xA5,0x50,0xB4,0x6E,0x25,0x0F,0xFB,
+0x4C,0x18,0xF4,0xFD,0x52,0xD9,0x8E,0x69,0xB1,0xE8,0x11,0x0F,0xDE,0x88,0xD8,0xFB,
+0x1D,0x49,0xF7,0xAA,0xDE,0x95,0xCF,0x20,0x78,0xC2,0x60,0x12,0xDB,0x25,0x40,0x8C,
+0x6A,0xFC,0x7E,0x42,0x38,0x40,0x64,0x12,0xF7,0x9E,0x81,0xE1,0x93,0x2E,
+};
+
+
+/* subject:/OU=GlobalSign Root CA - R3/O=GlobalSign/CN=GlobalSign */
+/* issuer :/OU=GlobalSign Root CA - R3/O=GlobalSign/CN=GlobalSign */
+
+
+const unsigned char GlobalSign_Root_CA___R3_certificate[867]={
+0x30,0x82,0x03,0x5F,0x30,0x82,0x02,0x47,0xA0,0x03,0x02,0x01,0x02,0x02,0x0B,0x04,
+0x00,0x00,0x00,0x00,0x01,0x21,0x58,0x53,0x08,0xA2,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x4C,0x31,0x20,0x30,0x1E,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x17,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,
+0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x52,0x33,0x31,0x13,0x30,
+0x11,0x06,0x03,0x55,0x04,0x0A,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,
+0x67,0x6E,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,
+0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x30,0x1E,0x17,0x0D,0x30,0x39,0x30,0x33,0x31,
+0x38,0x31,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,0x39,0x30,0x33,0x31,0x38,
+0x31,0x30,0x30,0x30,0x30,0x30,0x5A,0x30,0x4C,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,
+0x04,0x0B,0x13,0x17,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x52,
+0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x52,0x33,0x31,0x13,0x30,0x11,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,
+0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,
+0x6C,0x53,0x69,0x67,0x6E,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,
+0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,
+0x0A,0x02,0x82,0x01,0x01,0x00,0xCC,0x25,0x76,0x90,0x79,0x06,0x78,0x22,0x16,0xF5,
+0xC0,0x83,0xB6,0x84,0xCA,0x28,0x9E,0xFD,0x05,0x76,0x11,0xC5,0xAD,0x88,0x72,0xFC,
+0x46,0x02,0x43,0xC7,0xB2,0x8A,0x9D,0x04,0x5F,0x24,0xCB,0x2E,0x4B,0xE1,0x60,0x82,
+0x46,0xE1,0x52,0xAB,0x0C,0x81,0x47,0x70,0x6C,0xDD,0x64,0xD1,0xEB,0xF5,0x2C,0xA3,
+0x0F,0x82,0x3D,0x0C,0x2B,0xAE,0x97,0xD7,0xB6,0x14,0x86,0x10,0x79,0xBB,0x3B,0x13,
+0x80,0x77,0x8C,0x08,0xE1,0x49,0xD2,0x6A,0x62,0x2F,0x1F,0x5E,0xFA,0x96,0x68,0xDF,
+0x89,0x27,0x95,0x38,0x9F,0x06,0xD7,0x3E,0xC9,0xCB,0x26,0x59,0x0D,0x73,0xDE,0xB0,
+0xC8,0xE9,0x26,0x0E,0x83,0x15,0xC6,0xEF,0x5B,0x8B,0xD2,0x04,0x60,0xCA,0x49,0xA6,
+0x28,0xF6,0x69,0x3B,0xF6,0xCB,0xC8,0x28,0x91,0xE5,0x9D,0x8A,0x61,0x57,0x37,0xAC,
+0x74,0x14,0xDC,0x74,0xE0,0x3A,0xEE,0x72,0x2F,0x2E,0x9C,0xFB,0xD0,0xBB,0xBF,0xF5,
+0x3D,0x00,0xE1,0x06,0x33,0xE8,0x82,0x2B,0xAE,0x53,0xA6,0x3A,0x16,0x73,0x8C,0xDD,
+0x41,0x0E,0x20,0x3A,0xC0,0xB4,0xA7,0xA1,0xE9,0xB2,0x4F,0x90,0x2E,0x32,0x60,0xE9,
+0x57,0xCB,0xB9,0x04,0x92,0x68,0x68,0xE5,0x38,0x26,0x60,0x75,0xB2,0x9F,0x77,0xFF,
+0x91,0x14,0xEF,0xAE,0x20,0x49,0xFC,0xAD,0x40,0x15,0x48,0xD1,0x02,0x31,0x61,0x19,
+0x5E,0xB8,0x97,0xEF,0xAD,0x77,0xB7,0x64,0x9A,0x7A,0xBF,0x5F,0xC1,0x13,0xEF,0x9B,
+0x62,0xFB,0x0D,0x6C,0xE0,0x54,0x69,0x16,0xA9,0x03,0xDA,0x6E,0xE9,0x83,0x93,0x71,
+0x76,0xC6,0x69,0x85,0x82,0x17,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,
+0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,
+0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,
+0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x8F,0xF0,0x4B,0x7F,0xA8,
+0x2E,0x45,0x24,0xAE,0x4D,0x50,0xFA,0x63,0x9A,0x8B,0xDE,0xE2,0xDD,0x1B,0xBC,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,
+0x01,0x01,0x00,0x4B,0x40,0xDB,0xC0,0x50,0xAA,0xFE,0xC8,0x0C,0xEF,0xF7,0x96,0x54,
+0x45,0x49,0xBB,0x96,0x00,0x09,0x41,0xAC,0xB3,0x13,0x86,0x86,0x28,0x07,0x33,0xCA,
+0x6B,0xE6,0x74,0xB9,0xBA,0x00,0x2D,0xAE,0xA4,0x0A,0xD3,0xF5,0xF1,0xF1,0x0F,0x8A,
+0xBF,0x73,0x67,0x4A,0x83,0xC7,0x44,0x7B,0x78,0xE0,0xAF,0x6E,0x6C,0x6F,0x03,0x29,
+0x8E,0x33,0x39,0x45,0xC3,0x8E,0xE4,0xB9,0x57,0x6C,0xAA,0xFC,0x12,0x96,0xEC,0x53,
+0xC6,0x2D,0xE4,0x24,0x6C,0xB9,0x94,0x63,0xFB,0xDC,0x53,0x68,0x67,0x56,0x3E,0x83,
+0xB8,0xCF,0x35,0x21,0xC3,0xC9,0x68,0xFE,0xCE,0xDA,0xC2,0x53,0xAA,0xCC,0x90,0x8A,
+0xE9,0xF0,0x5D,0x46,0x8C,0x95,0xDD,0x7A,0x58,0x28,0x1A,0x2F,0x1D,0xDE,0xCD,0x00,
+0x37,0x41,0x8F,0xED,0x44,0x6D,0xD7,0x53,0x28,0x97,0x7E,0xF3,0x67,0x04,0x1E,0x15,
+0xD7,0x8A,0x96,0xB4,0xD3,0xDE,0x4C,0x27,0xA4,0x4C,0x1B,0x73,0x73,0x76,0xF4,0x17,
+0x99,0xC2,0x1F,0x7A,0x0E,0xE3,0x2D,0x08,0xAD,0x0A,0x1C,0x2C,0xFF,0x3C,0xAB,0x55,
+0x0E,0x0F,0x91,0x7E,0x36,0xEB,0xC3,0x57,0x49,0xBE,0xE1,0x2E,0x2D,0x7C,0x60,0x8B,
+0xC3,0x41,0x51,0x13,0x23,0x9D,0xCE,0xF7,0x32,0x6B,0x94,0x01,0xA8,0x99,0xE7,0x2C,
+0x33,0x1F,0x3A,0x3B,0x25,0xD2,0x86,0x40,0xCE,0x3B,0x2C,0x86,0x78,0xC9,0x61,0x2F,
+0x14,0xBA,0xEE,0xDB,0x55,0x6F,0xDF,0x84,0xEE,0x05,0x09,0x4D,0xBD,0x28,0xD8,0x72,
+0xCE,0xD3,0x62,0x50,0x65,0x1E,0xEB,0x92,0x97,0x83,0x31,0xD9,0xB3,0xB5,0xCA,0x47,
+0x58,0x3F,0x5F,
+};
+
+
+/* subject:/C=US/O=AffirmTrust/CN=AffirmTrust Networking */
+/* issuer :/C=US/O=AffirmTrust/CN=AffirmTrust Networking */
+
+
+const unsigned char AffirmTrust_Networking_certificate[848]={
+0x30,0x82,0x03,0x4C,0x30,0x82,0x02,0x34,0xA0,0x03,0x02,0x01,0x02,0x02,0x08,0x7C,
+0x4F,0x04,0x39,0x1C,0xD4,0x99,0x2D,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x44,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x0C,0x0B,
+0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x31,0x1F,0x30,0x1D,0x06,
+0x03,0x55,0x04,0x03,0x0C,0x16,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,
+0x74,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x69,0x6E,0x67,0x30,0x1E,0x17,0x0D,
+0x31,0x30,0x30,0x31,0x32,0x39,0x31,0x34,0x30,0x38,0x32,0x34,0x5A,0x17,0x0D,0x33,
+0x30,0x31,0x32,0x33,0x31,0x31,0x34,0x30,0x38,0x32,0x34,0x5A,0x30,0x44,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,
+0x03,0x55,0x04,0x0A,0x0C,0x0B,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,
+0x74,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,0x0C,0x16,0x41,0x66,0x66,0x69,
+0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x69,
+0x6E,0x67,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,
+0x01,0x01,0x00,0xB4,0x84,0xCC,0x33,0x17,0x2E,0x6B,0x94,0x6C,0x6B,0x61,0x52,0xA0,
+0xEB,0xA3,0xCF,0x79,0x94,0x4C,0xE5,0x94,0x80,0x99,0xCB,0x55,0x64,0x44,0x65,0x8F,
+0x67,0x64,0xE2,0x06,0xE3,0x5C,0x37,0x49,0xF6,0x2F,0x9B,0x84,0x84,0x1E,0x2D,0xF2,
+0x60,0x9D,0x30,0x4E,0xCC,0x84,0x85,0xE2,0x2C,0xCF,0x1E,0x9E,0xFE,0x36,0xAB,0x33,
+0x77,0x35,0x44,0xD8,0x35,0x96,0x1A,0x3D,0x36,0xE8,0x7A,0x0E,0xD8,0xD5,0x47,0xA1,
+0x6A,0x69,0x8B,0xD9,0xFC,0xBB,0x3A,0xAE,0x79,0x5A,0xD5,0xF4,0xD6,0x71,0xBB,0x9A,
+0x90,0x23,0x6B,0x9A,0xB7,0x88,0x74,0x87,0x0C,0x1E,0x5F,0xB9,0x9E,0x2D,0xFA,0xAB,
+0x53,0x2B,0xDC,0xBB,0x76,0x3E,0x93,0x4C,0x08,0x08,0x8C,0x1E,0xA2,0x23,0x1C,0xD4,
+0x6A,0xAD,0x22,0xBA,0x99,0x01,0x2E,0x6D,0x65,0xCB,0xBE,0x24,0x66,0x55,0x24,0x4B,
+0x40,0x44,0xB1,0x1B,0xD7,0xE1,0xC2,0x85,0xC0,0xDE,0x10,0x3F,0x3D,0xED,0xB8,0xFC,
+0xF1,0xF1,0x23,0x53,0xDC,0xBF,0x65,0x97,0x6F,0xD9,0xF9,0x40,0x71,0x8D,0x7D,0xBD,
+0x95,0xD4,0xCE,0xBE,0xA0,0x5E,0x27,0x23,0xDE,0xFD,0xA6,0xD0,0x26,0x0E,0x00,0x29,
+0xEB,0x3C,0x46,0xF0,0x3D,0x60,0xBF,0x3F,0x50,0xD2,0xDC,0x26,0x41,0x51,0x9E,0x14,
+0x37,0x42,0x04,0xA3,0x70,0x57,0xA8,0x1B,0x87,0xED,0x2D,0xFA,0x7B,0xEE,0x8C,0x0A,
+0xE3,0xA9,0x66,0x89,0x19,0xCB,0x41,0xF9,0xDD,0x44,0x36,0x61,0xCF,0xE2,0x77,0x46,
+0xC8,0x7D,0xF6,0xF4,0x92,0x81,0x36,0xFD,0xDB,0x34,0xF1,0x72,0x7E,0xF3,0x0C,0x16,
+0xBD,0xB4,0x15,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,0x03,
+0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x07,0x1F,0xD2,0xE7,0x9C,0xDA,0xC2,0x6E,0xA2,
+0x40,0xB4,0xB0,0x7A,0x50,0x10,0x50,0x74,0xC4,0xC8,0xBD,0x30,0x0F,0x06,0x03,0x55,
+0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,
+0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,
+0x89,0x57,0xB2,0x16,0x7A,0xA8,0xC2,0xFD,0xD6,0xD9,0x9B,0x9B,0x34,0xC2,0x9C,0xB4,
+0x32,0x14,0x4D,0xA7,0xA4,0xDF,0xEC,0xBE,0xA7,0xBE,0xF8,0x43,0xDB,0x91,0x37,0xCE,
+0xB4,0x32,0x2E,0x50,0x55,0x1A,0x35,0x4E,0x76,0x43,0x71,0x20,0xEF,0x93,0x77,0x4E,
+0x15,0x70,0x2E,0x87,0xC3,0xC1,0x1D,0x6D,0xDC,0xCB,0xB5,0x27,0xD4,0x2C,0x56,0xD1,
+0x52,0x53,0x3A,0x44,0xD2,0x73,0xC8,0xC4,0x1B,0x05,0x65,0x5A,0x62,0x92,0x9C,0xEE,
+0x41,0x8D,0x31,0xDB,0xE7,0x34,0xEA,0x59,0x21,0xD5,0x01,0x7A,0xD7,0x64,0xB8,0x64,
+0x39,0xCD,0xC9,0xED,0xAF,0xED,0x4B,0x03,0x48,0xA7,0xA0,0x99,0x01,0x80,0xDC,0x65,
+0xA3,0x36,0xAE,0x65,0x59,0x48,0x4F,0x82,0x4B,0xC8,0x65,0xF1,0x57,0x1D,0xE5,0x59,
+0x2E,0x0A,0x3F,0x6C,0xD8,0xD1,0xF5,0xE5,0x09,0xB4,0x6C,0x54,0x00,0x0A,0xE0,0x15,
+0x4D,0x87,0x75,0x6D,0xB7,0x58,0x96,0x5A,0xDD,0x6D,0xD2,0x00,0xA0,0xF4,0x9B,0x48,
+0xBE,0xC3,0x37,0xA4,0xBA,0x36,0xE0,0x7C,0x87,0x85,0x97,0x1A,0x15,0xA2,0xDE,0x2E,
+0xA2,0x5B,0xBD,0xAF,0x18,0xF9,0x90,0x50,0xCD,0x70,0x59,0xF8,0x27,0x67,0x47,0xCB,
+0xC7,0xA0,0x07,0x3A,0x7D,0xD1,0x2C,0x5D,0x6C,0x19,0x3A,0x66,0xB5,0x7D,0xFD,0x91,
+0x6F,0x82,0xB1,0xBE,0x08,0x93,0xDB,0x14,0x47,0xF1,0xA2,0x37,0xC7,0x45,0x9E,0x3C,
+0xC7,0x77,0xAF,0x64,0xA8,0x93,0xDF,0xF6,0x69,0x83,0x82,0x60,0xF2,0x49,0x42,0x34,
+0xED,0x5A,0x00,0x54,0x85,0x1C,0x16,0x36,0x92,0x0C,0x5C,0xFA,0xA6,0xAD,0xBF,0xDB,
+};
+
+
+/* subject:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root */
+/* issuer :/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root */
+
+
+const unsigned char AddTrust_External_Root_certificate[1082]={
+0x30,0x82,0x04,0x36,0x30,0x82,0x03,0x1E,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x6F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14,
+0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73,
+0x74,0x20,0x41,0x42,0x31,0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x0B,0x13,0x1D,0x41,
+0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,
+0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x22,0x30,0x20,
+0x06,0x03,0x55,0x04,0x03,0x13,0x19,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,
+0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,0x74,
+0x30,0x1E,0x17,0x0D,0x30,0x30,0x30,0x35,0x33,0x30,0x31,0x30,0x34,0x38,0x33,0x38,
+0x5A,0x17,0x0D,0x32,0x30,0x30,0x35,0x33,0x30,0x31,0x30,0x34,0x38,0x33,0x38,0x5A,
+0x30,0x6F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,
+0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,
+0x73,0x74,0x20,0x41,0x42,0x31,0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x0B,0x13,0x1D,
+0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,
+0x6C,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x22,0x30,
+0x20,0x06,0x03,0x55,0x04,0x03,0x13,0x19,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,
+0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,
+0x74,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,
+0x01,0x00,0xB7,0xF7,0x1A,0x33,0xE6,0xF2,0x00,0x04,0x2D,0x39,0xE0,0x4E,0x5B,0xED,
+0x1F,0xBC,0x6C,0x0F,0xCD,0xB5,0xFA,0x23,0xB6,0xCE,0xDE,0x9B,0x11,0x33,0x97,0xA4,
+0x29,0x4C,0x7D,0x93,0x9F,0xBD,0x4A,0xBC,0x93,0xED,0x03,0x1A,0xE3,0x8F,0xCF,0xE5,
+0x6D,0x50,0x5A,0xD6,0x97,0x29,0x94,0x5A,0x80,0xB0,0x49,0x7A,0xDB,0x2E,0x95,0xFD,
+0xB8,0xCA,0xBF,0x37,0x38,0x2D,0x1E,0x3E,0x91,0x41,0xAD,0x70,0x56,0xC7,0xF0,0x4F,
+0x3F,0xE8,0x32,0x9E,0x74,0xCA,0xC8,0x90,0x54,0xE9,0xC6,0x5F,0x0F,0x78,0x9D,0x9A,
+0x40,0x3C,0x0E,0xAC,0x61,0xAA,0x5E,0x14,0x8F,0x9E,0x87,0xA1,0x6A,0x50,0xDC,0xD7,
+0x9A,0x4E,0xAF,0x05,0xB3,0xA6,0x71,0x94,0x9C,0x71,0xB3,0x50,0x60,0x0A,0xC7,0x13,
+0x9D,0x38,0x07,0x86,0x02,0xA8,0xE9,0xA8,0x69,0x26,0x18,0x90,0xAB,0x4C,0xB0,0x4F,
+0x23,0xAB,0x3A,0x4F,0x84,0xD8,0xDF,0xCE,0x9F,0xE1,0x69,0x6F,0xBB,0xD7,0x42,0xD7,
+0x6B,0x44,0xE4,0xC7,0xAD,0xEE,0x6D,0x41,0x5F,0x72,0x5A,0x71,0x08,0x37,0xB3,0x79,
+0x65,0xA4,0x59,0xA0,0x94,0x37,0xF7,0x00,0x2F,0x0D,0xC2,0x92,0x72,0xDA,0xD0,0x38,
+0x72,0xDB,0x14,0xA8,0x45,0xC4,0x5D,0x2A,0x7D,0xB7,0xB4,0xD6,0xC4,0xEE,0xAC,0xCD,
+0x13,0x44,0xB7,0xC9,0x2B,0xDD,0x43,0x00,0x25,0xFA,0x61,0xB9,0x69,0x6A,0x58,0x23,
+0x11,0xB7,0xA7,0x33,0x8F,0x56,0x75,0x59,0xF5,0xCD,0x29,0xD7,0x46,0xB7,0x0A,0x2B,
+0x65,0xB6,0xD3,0x42,0x6F,0x15,0xB2,0xB8,0x7B,0xFB,0xEF,0xE9,0x5D,0x53,0xD5,0x34,
+0x5A,0x27,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xDC,0x30,0x81,0xD9,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xAD,0xBD,0x98,0x7A,0x34,0xB4,0x26,0xF7,
+0xFA,0xC4,0x26,0x54,0xEF,0x03,0xBD,0xE0,0x24,0xCB,0x54,0x1A,0x30,0x0B,0x06,0x03,
+0x55,0x1D,0x0F,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,
+0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x81,0x99,0x06,0x03,0x55,
+0x1D,0x23,0x04,0x81,0x91,0x30,0x81,0x8E,0x80,0x14,0xAD,0xBD,0x98,0x7A,0x34,0xB4,
+0x26,0xF7,0xFA,0xC4,0x26,0x54,0xEF,0x03,0xBD,0xE0,0x24,0xCB,0x54,0x1A,0xA1,0x73,
+0xA4,0x71,0x30,0x6F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,
+0x45,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,
+0x72,0x75,0x73,0x74,0x20,0x41,0x42,0x31,0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x1D,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,
+0x6E,0x61,0x6C,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,
+0x22,0x30,0x20,0x06,0x03,0x55,0x04,0x03,0x13,0x19,0x41,0x64,0x64,0x54,0x72,0x75,
+0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,0x20,0x43,0x41,0x20,0x52,
+0x6F,0x6F,0x74,0x82,0x01,0x01,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0xB0,0x9B,0xE0,0x85,0x25,0xC2,
+0xD6,0x23,0xE2,0x0F,0x96,0x06,0x92,0x9D,0x41,0x98,0x9C,0xD9,0x84,0x79,0x81,0xD9,
+0x1E,0x5B,0x14,0x07,0x23,0x36,0x65,0x8F,0xB0,0xD8,0x77,0xBB,0xAC,0x41,0x6C,0x47,
+0x60,0x83,0x51,0xB0,0xF9,0x32,0x3D,0xE7,0xFC,0xF6,0x26,0x13,0xC7,0x80,0x16,0xA5,
+0xBF,0x5A,0xFC,0x87,0xCF,0x78,0x79,0x89,0x21,0x9A,0xE2,0x4C,0x07,0x0A,0x86,0x35,
+0xBC,0xF2,0xDE,0x51,0xC4,0xD2,0x96,0xB7,0xDC,0x7E,0x4E,0xEE,0x70,0xFD,0x1C,0x39,
+0xEB,0x0C,0x02,0x51,0x14,0x2D,0x8E,0xBD,0x16,0xE0,0xC1,0xDF,0x46,0x75,0xE7,0x24,
+0xAD,0xEC,0xF4,0x42,0xB4,0x85,0x93,0x70,0x10,0x67,0xBA,0x9D,0x06,0x35,0x4A,0x18,
+0xD3,0x2B,0x7A,0xCC,0x51,0x42,0xA1,0x7A,0x63,0xD1,0xE6,0xBB,0xA1,0xC5,0x2B,0xC2,
+0x36,0xBE,0x13,0x0D,0xE6,0xBD,0x63,0x7E,0x79,0x7B,0xA7,0x09,0x0D,0x40,0xAB,0x6A,
+0xDD,0x8F,0x8A,0xC3,0xF6,0xF6,0x8C,0x1A,0x42,0x05,0x51,0xD4,0x45,0xF5,0x9F,0xA7,
+0x62,0x21,0x68,0x15,0x20,0x43,0x3C,0x99,0xE7,0x7C,0xBD,0x24,0xD8,0xA9,0x91,0x17,
+0x73,0x88,0x3F,0x56,0x1B,0x31,0x38,0x18,0xB4,0x71,0x0F,0x9A,0xCD,0xC8,0x0E,0x9E,
+0x8E,0x2E,0x1B,0xE1,0x8C,0x98,0x83,0xCB,0x1F,0x31,0xF1,0x44,0x4C,0xC6,0x04,0x73,
+0x49,0x76,0x60,0x0F,0xC7,0xF8,0xBD,0x17,0x80,0x6B,0x2E,0xE9,0xCC,0x4C,0x0E,0x5A,
+0x9A,0x79,0x0F,0x20,0x0A,0x2E,0xD5,0x9E,0x63,0x26,0x1E,0x55,0x92,0x94,0xD8,0x82,
+0x17,0x5A,0x7B,0xD0,0xBC,0xC7,0x8F,0x4E,0x86,0x04,
+};
+
+
+/* subject:/C=US/O=thawte, Inc./OU=Certification Services Division/OU=(c) 2008 thawte, Inc. - For authorized use only/CN=thawte Primary Root CA - G3 */
+/* issuer :/C=US/O=thawte, Inc./OU=Certification Services Division/OU=(c) 2008 thawte, Inc. - For authorized use only/CN=thawte Primary Root CA - G3 */
+
+
+const unsigned char thawte_Primary_Root_CA___G3_certificate[1070]={
+0x30,0x82,0x04,0x2A,0x30,0x82,0x03,0x12,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x60,
+0x01,0x97,0xB7,0x46,0xA7,0xEA,0xB4,0xB4,0x9A,0xD6,0x4B,0x2F,0xF7,0x90,0xFB,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x81,
+0xAE,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,
+0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,
+0x20,0x49,0x6E,0x63,0x2E,0x31,0x28,0x30,0x26,0x06,0x03,0x55,0x04,0x0B,0x13,0x1F,
+0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x53,0x65,
+0x72,0x76,0x69,0x63,0x65,0x73,0x20,0x44,0x69,0x76,0x69,0x73,0x69,0x6F,0x6E,0x31,
+0x38,0x30,0x36,0x06,0x03,0x55,0x04,0x0B,0x13,0x2F,0x28,0x63,0x29,0x20,0x32,0x30,
+0x30,0x38,0x20,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,
+0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,
+0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x24,0x30,0x22,0x06,0x03,0x55,
+0x04,0x03,0x13,0x1B,0x74,0x68,0x61,0x77,0x74,0x65,0x20,0x50,0x72,0x69,0x6D,0x61,
+0x72,0x79,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x47,0x33,0x30,
+0x1E,0x17,0x0D,0x30,0x38,0x30,0x34,0x30,0x32,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,
+0x17,0x0D,0x33,0x37,0x31,0x32,0x30,0x31,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,
+0x81,0xAE,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,
+0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x74,0x68,0x61,0x77,0x74,0x65,
+0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x28,0x30,0x26,0x06,0x03,0x55,0x04,0x0B,0x13,
+0x1F,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x53,
+0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x20,0x44,0x69,0x76,0x69,0x73,0x69,0x6F,0x6E,
+0x31,0x38,0x30,0x36,0x06,0x03,0x55,0x04,0x0B,0x13,0x2F,0x28,0x63,0x29,0x20,0x32,
+0x30,0x30,0x38,0x20,0x74,0x68,0x61,0x77,0x74,0x65,0x2C,0x20,0x49,0x6E,0x63,0x2E,
+0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,
+0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x24,0x30,0x22,0x06,0x03,
+0x55,0x04,0x03,0x13,0x1B,0x74,0x68,0x61,0x77,0x74,0x65,0x20,0x50,0x72,0x69,0x6D,
+0x61,0x72,0x79,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,0x2D,0x20,0x47,0x33,
+0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,
+0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,
+0x00,0xB2,0xBF,0x27,0x2C,0xFB,0xDB,0xD8,0x5B,0xDD,0x78,0x7B,0x1B,0x9E,0x77,0x66,
+0x81,0xCB,0x3E,0xBC,0x7C,0xAE,0xF3,0xA6,0x27,0x9A,0x34,0xA3,0x68,0x31,0x71,0x38,
+0x33,0x62,0xE4,0xF3,0x71,0x66,0x79,0xB1,0xA9,0x65,0xA3,0xA5,0x8B,0xD5,0x8F,0x60,
+0x2D,0x3F,0x42,0xCC,0xAA,0x6B,0x32,0xC0,0x23,0xCB,0x2C,0x41,0xDD,0xE4,0xDF,0xFC,
+0x61,0x9C,0xE2,0x73,0xB2,0x22,0x95,0x11,0x43,0x18,0x5F,0xC4,0xB6,0x1F,0x57,0x6C,
+0x0A,0x05,0x58,0x22,0xC8,0x36,0x4C,0x3A,0x7C,0xA5,0xD1,0xCF,0x86,0xAF,0x88,0xA7,
+0x44,0x02,0x13,0x74,0x71,0x73,0x0A,0x42,0x59,0x02,0xF8,0x1B,0x14,0x6B,0x42,0xDF,
+0x6F,0x5F,0xBA,0x6B,0x82,0xA2,0x9D,0x5B,0xE7,0x4A,0xBD,0x1E,0x01,0x72,0xDB,0x4B,
+0x74,0xE8,0x3B,0x7F,0x7F,0x7D,0x1F,0x04,0xB4,0x26,0x9B,0xE0,0xB4,0x5A,0xAC,0x47,
+0x3D,0x55,0xB8,0xD7,0xB0,0x26,0x52,0x28,0x01,0x31,0x40,0x66,0xD8,0xD9,0x24,0xBD,
+0xF6,0x2A,0xD8,0xEC,0x21,0x49,0x5C,0x9B,0xF6,0x7A,0xE9,0x7F,0x55,0x35,0x7E,0x96,
+0x6B,0x8D,0x93,0x93,0x27,0xCB,0x92,0xBB,0xEA,0xAC,0x40,0xC0,0x9F,0xC2,0xF8,0x80,
+0xCF,0x5D,0xF4,0x5A,0xDC,0xCE,0x74,0x86,0xA6,0x3E,0x6C,0x0B,0x53,0xCA,0xBD,0x92,
+0xCE,0x19,0x06,0x72,0xE6,0x0C,0x5C,0x38,0x69,0xC7,0x04,0xD6,0xBC,0x6C,0xCE,0x5B,
+0xF6,0xF7,0x68,0x9C,0xDC,0x25,0x15,0x48,0x88,0xA1,0xE9,0xA9,0xF8,0x98,0x9C,0xE0,
+0xF3,0xD5,0x31,0x28,0x61,0x11,0x6C,0x67,0x96,0x8D,0x39,0x99,0xCB,0xC2,0x45,0x24,
+0x39,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,0x03,0x55,0x1D,
+0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,
+0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,0x06,0x03,0x55,
+0x1D,0x0E,0x04,0x16,0x04,0x14,0xAD,0x6C,0xAA,0x94,0x60,0x9C,0xED,0xE4,0xFF,0xFA,
+0x3E,0x0A,0x74,0x2B,0x63,0x03,0xF7,0xB6,0x59,0xBF,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x1A,0x40,
+0xD8,0x95,0x65,0xAC,0x09,0x92,0x89,0xC6,0x39,0xF4,0x10,0xE5,0xA9,0x0E,0x66,0x53,
+0x5D,0x78,0xDE,0xFA,0x24,0x91,0xBB,0xE7,0x44,0x51,0xDF,0xC6,0x16,0x34,0x0A,0xEF,
+0x6A,0x44,0x51,0xEA,0x2B,0x07,0x8A,0x03,0x7A,0xC3,0xEB,0x3F,0x0A,0x2C,0x52,0x16,
+0xA0,0x2B,0x43,0xB9,0x25,0x90,0x3F,0x70,0xA9,0x33,0x25,0x6D,0x45,0x1A,0x28,0x3B,
+0x27,0xCF,0xAA,0xC3,0x29,0x42,0x1B,0xDF,0x3B,0x4C,0xC0,0x33,0x34,0x5B,0x41,0x88,
+0xBF,0x6B,0x2B,0x65,0xAF,0x28,0xEF,0xB2,0xF5,0xC3,0xAA,0x66,0xCE,0x7B,0x56,0xEE,
+0xB7,0xC8,0xCB,0x67,0xC1,0xC9,0x9C,0x1A,0x18,0xB8,0xC4,0xC3,0x49,0x03,0xF1,0x60,
+0x0E,0x50,0xCD,0x46,0xC5,0xF3,0x77,0x79,0xF7,0xB6,0x15,0xE0,0x38,0xDB,0xC7,0x2F,
+0x28,0xA0,0x0C,0x3F,0x77,0x26,0x74,0xD9,0x25,0x12,0xDA,0x31,0xDA,0x1A,0x1E,0xDC,
+0x29,0x41,0x91,0x22,0x3C,0x69,0xA7,0xBB,0x02,0xF2,0xB6,0x5C,0x27,0x03,0x89,0xF4,
+0x06,0xEA,0x9B,0xE4,0x72,0x82,0xE3,0xA1,0x09,0xC1,0xE9,0x00,0x19,0xD3,0x3E,0xD4,
+0x70,0x6B,0xBA,0x71,0xA6,0xAA,0x58,0xAE,0xF4,0xBB,0xE9,0x6C,0xB6,0xEF,0x87,0xCC,
+0x9B,0xBB,0xFF,0x39,0xE6,0x56,0x61,0xD3,0x0A,0xA7,0xC4,0x5C,0x4C,0x60,0x7B,0x05,
+0x77,0x26,0x7A,0xBF,0xD8,0x07,0x52,0x2C,0x62,0xF7,0x70,0x63,0xD9,0x39,0xBC,0x6F,
+0x1C,0xC2,0x79,0xDC,0x76,0x29,0xAF,0xCE,0xC5,0x2C,0x64,0x04,0x5E,0x88,0x36,0x6E,
+0x31,0xD4,0x40,0x1A,0x62,0x34,0x36,0x3F,0x35,0x01,0xAE,0xAC,0x63,0xA0,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root CA */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root CA */
+
+
+const unsigned char DigiCert_Assured_ID_Root_CA_certificate[955]={
+0x30,0x82,0x03,0xB7,0x30,0x82,0x02,0x9F,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x0C,
+0xE7,0xE0,0xE5,0x17,0xD8,0x46,0xFE,0x8F,0xE5,0x60,0xFC,0x1B,0xF0,0x30,0x39,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x65,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x03,0x13,0x1B,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x41,0x73,0x73,0x75,0x72,0x65,0x64,0x20,0x49,0x44,0x20,0x52,0x6F,
+0x6F,0x74,0x20,0x43,0x41,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x31,0x31,0x30,0x30,
+0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x31,0x31,0x31,0x31,0x30,0x30,0x30,
+0x30,0x30,0x30,0x30,0x5A,0x30,0x65,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,
+0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,
+0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x10,0x77,0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,
+0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x03,0x13,
+0x1B,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x41,0x73,0x73,0x75,0x72,0x65,
+0x64,0x20,0x49,0x44,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x30,0x82,0x01,0x22,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,
+0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xAD,0x0E,0x15,
+0xCE,0xE4,0x43,0x80,0x5C,0xB1,0x87,0xF3,0xB7,0x60,0xF9,0x71,0x12,0xA5,0xAE,0xDC,
+0x26,0x94,0x88,0xAA,0xF4,0xCE,0xF5,0x20,0x39,0x28,0x58,0x60,0x0C,0xF8,0x80,0xDA,
+0xA9,0x15,0x95,0x32,0x61,0x3C,0xB5,0xB1,0x28,0x84,0x8A,0x8A,0xDC,0x9F,0x0A,0x0C,
+0x83,0x17,0x7A,0x8F,0x90,0xAC,0x8A,0xE7,0x79,0x53,0x5C,0x31,0x84,0x2A,0xF6,0x0F,
+0x98,0x32,0x36,0x76,0xCC,0xDE,0xDD,0x3C,0xA8,0xA2,0xEF,0x6A,0xFB,0x21,0xF2,0x52,
+0x61,0xDF,0x9F,0x20,0xD7,0x1F,0xE2,0xB1,0xD9,0xFE,0x18,0x64,0xD2,0x12,0x5B,0x5F,
+0xF9,0x58,0x18,0x35,0xBC,0x47,0xCD,0xA1,0x36,0xF9,0x6B,0x7F,0xD4,0xB0,0x38,0x3E,
+0xC1,0x1B,0xC3,0x8C,0x33,0xD9,0xD8,0x2F,0x18,0xFE,0x28,0x0F,0xB3,0xA7,0x83,0xD6,
+0xC3,0x6E,0x44,0xC0,0x61,0x35,0x96,0x16,0xFE,0x59,0x9C,0x8B,0x76,0x6D,0xD7,0xF1,
+0xA2,0x4B,0x0D,0x2B,0xFF,0x0B,0x72,0xDA,0x9E,0x60,0xD0,0x8E,0x90,0x35,0xC6,0x78,
+0x55,0x87,0x20,0xA1,0xCF,0xE5,0x6D,0x0A,0xC8,0x49,0x7C,0x31,0x98,0x33,0x6C,0x22,
+0xE9,0x87,0xD0,0x32,0x5A,0xA2,0xBA,0x13,0x82,0x11,0xED,0x39,0x17,0x9D,0x99,0x3A,
+0x72,0xA1,0xE6,0xFA,0xA4,0xD9,0xD5,0x17,0x31,0x75,0xAE,0x85,0x7D,0x22,0xAE,0x3F,
+0x01,0x46,0x86,0xF6,0x28,0x79,0xC8,0xB1,0xDA,0xE4,0x57,0x17,0xC4,0x7E,0x1C,0x0E,
+0xB0,0xB4,0x92,0xA6,0x56,0xB3,0xBD,0xB2,0x97,0xED,0xAA,0xA7,0xF0,0xB7,0xC5,0xA8,
+0x3F,0x95,0x16,0xD0,0xFF,0xA1,0x96,0xEB,0x08,0x5F,0x18,0x77,0x4F,0x02,0x03,0x01,
+0x00,0x01,0xA3,0x63,0x30,0x61,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,
+0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,
+0x04,0x14,0x45,0xEB,0xA2,0xAF,0xF4,0x92,0xCB,0x82,0x31,0x2D,0x51,0x8B,0xA7,0xA7,
+0x21,0x9D,0xF3,0x6D,0xC8,0x0F,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,
+0x16,0x80,0x14,0x45,0xEB,0xA2,0xAF,0xF4,0x92,0xCB,0x82,0x31,0x2D,0x51,0x8B,0xA7,
+0xA7,0x21,0x9D,0xF3,0x6D,0xC8,0x0F,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0xA2,0x0E,0xBC,0xDF,0xE2,
+0xED,0xF0,0xE3,0x72,0x73,0x7A,0x64,0x94,0xBF,0xF7,0x72,0x66,0xD8,0x32,0xE4,0x42,
+0x75,0x62,0xAE,0x87,0xEB,0xF2,0xD5,0xD9,0xDE,0x56,0xB3,0x9F,0xCC,0xCE,0x14,0x28,
+0xB9,0x0D,0x97,0x60,0x5C,0x12,0x4C,0x58,0xE4,0xD3,0x3D,0x83,0x49,0x45,0x58,0x97,
+0x35,0x69,0x1A,0xA8,0x47,0xEA,0x56,0xC6,0x79,0xAB,0x12,0xD8,0x67,0x81,0x84,0xDF,
+0x7F,0x09,0x3C,0x94,0xE6,0xB8,0x26,0x2C,0x20,0xBD,0x3D,0xB3,0x28,0x89,0xF7,0x5F,
+0xFF,0x22,0xE2,0x97,0x84,0x1F,0xE9,0x65,0xEF,0x87,0xE0,0xDF,0xC1,0x67,0x49,0xB3,
+0x5D,0xEB,0xB2,0x09,0x2A,0xEB,0x26,0xED,0x78,0xBE,0x7D,0x3F,0x2B,0xF3,0xB7,0x26,
+0x35,0x6D,0x5F,0x89,0x01,0xB6,0x49,0x5B,0x9F,0x01,0x05,0x9B,0xAB,0x3D,0x25,0xC1,
+0xCC,0xB6,0x7F,0xC2,0xF1,0x6F,0x86,0xC6,0xFA,0x64,0x68,0xEB,0x81,0x2D,0x94,0xEB,
+0x42,0xB7,0xFA,0x8C,0x1E,0xDD,0x62,0xF1,0xBE,0x50,0x67,0xB7,0x6C,0xBD,0xF3,0xF1,
+0x1F,0x6B,0x0C,0x36,0x07,0x16,0x7F,0x37,0x7C,0xA9,0x5B,0x6D,0x7A,0xF1,0x12,0x46,
+0x60,0x83,0xD7,0x27,0x04,0xBE,0x4B,0xCE,0x97,0xBE,0xC3,0x67,0x2A,0x68,0x11,0xDF,
+0x80,0xE7,0x0C,0x33,0x66,0xBF,0x13,0x0D,0x14,0x6E,0xF3,0x7F,0x1F,0x63,0x10,0x1E,
+0xFA,0x8D,0x1B,0x25,0x6D,0x6C,0x8F,0xA5,0xB7,0x61,0x01,0xB1,0xD2,0xA3,0x26,0xA1,
+0x10,0x71,0x9D,0xAD,0xE2,0xC3,0xF9,0xC3,0x99,0x51,0xB7,0x2B,0x07,0x08,0xCE,0x2E,
+0xE6,0x50,0xB2,0xA7,0xFA,0x0A,0x45,0x2F,0xA2,0xF0,0xF2,
+};
+
+
+/* subject:/C=US/O=The Go Daddy Group, Inc./OU=Go Daddy Class 2 Certification Authority */
+/* issuer :/C=US/O=The Go Daddy Group, Inc./OU=Go Daddy Class 2 Certification Authority */
+
+
+const unsigned char Go_Daddy_Class_2_CA_certificate[1028]={
+0x30,0x82,0x04,0x00,0x30,0x82,0x02,0xE8,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x00,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x63,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x21,
+0x30,0x1F,0x06,0x03,0x55,0x04,0x0A,0x13,0x18,0x54,0x68,0x65,0x20,0x47,0x6F,0x20,
+0x44,0x61,0x64,0x64,0x79,0x20,0x47,0x72,0x6F,0x75,0x70,0x2C,0x20,0x49,0x6E,0x63,
+0x2E,0x31,0x31,0x30,0x2F,0x06,0x03,0x55,0x04,0x0B,0x13,0x28,0x47,0x6F,0x20,0x44,
+0x61,0x64,0x64,0x79,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,
+0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x30,0x34,0x30,0x36,0x32,0x39,0x31,0x37,
+0x30,0x36,0x32,0x30,0x5A,0x17,0x0D,0x33,0x34,0x30,0x36,0x32,0x39,0x31,0x37,0x30,
+0x36,0x32,0x30,0x5A,0x30,0x63,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,
+0x02,0x55,0x53,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x0A,0x13,0x18,0x54,0x68,
+0x65,0x20,0x47,0x6F,0x20,0x44,0x61,0x64,0x64,0x79,0x20,0x47,0x72,0x6F,0x75,0x70,
+0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x31,0x30,0x2F,0x06,0x03,0x55,0x04,0x0B,0x13,
+0x28,0x47,0x6F,0x20,0x44,0x61,0x64,0x64,0x79,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,
+0x32,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,
+0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x82,0x01,0x20,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0D,
+0x00,0x30,0x82,0x01,0x08,0x02,0x82,0x01,0x01,0x00,0xDE,0x9D,0xD7,0xEA,0x57,0x18,
+0x49,0xA1,0x5B,0xEB,0xD7,0x5F,0x48,0x86,0xEA,0xBE,0xDD,0xFF,0xE4,0xEF,0x67,0x1C,
+0xF4,0x65,0x68,0xB3,0x57,0x71,0xA0,0x5E,0x77,0xBB,0xED,0x9B,0x49,0xE9,0x70,0x80,
+0x3D,0x56,0x18,0x63,0x08,0x6F,0xDA,0xF2,0xCC,0xD0,0x3F,0x7F,0x02,0x54,0x22,0x54,
+0x10,0xD8,0xB2,0x81,0xD4,0xC0,0x75,0x3D,0x4B,0x7F,0xC7,0x77,0xC3,0x3E,0x78,0xAB,
+0x1A,0x03,0xB5,0x20,0x6B,0x2F,0x6A,0x2B,0xB1,0xC5,0x88,0x7E,0xC4,0xBB,0x1E,0xB0,
+0xC1,0xD8,0x45,0x27,0x6F,0xAA,0x37,0x58,0xF7,0x87,0x26,0xD7,0xD8,0x2D,0xF6,0xA9,
+0x17,0xB7,0x1F,0x72,0x36,0x4E,0xA6,0x17,0x3F,0x65,0x98,0x92,0xDB,0x2A,0x6E,0x5D,
+0xA2,0xFE,0x88,0xE0,0x0B,0xDE,0x7F,0xE5,0x8D,0x15,0xE1,0xEB,0xCB,0x3A,0xD5,0xE2,
+0x12,0xA2,0x13,0x2D,0xD8,0x8E,0xAF,0x5F,0x12,0x3D,0xA0,0x08,0x05,0x08,0xB6,0x5C,
+0xA5,0x65,0x38,0x04,0x45,0x99,0x1E,0xA3,0x60,0x60,0x74,0xC5,0x41,0xA5,0x72,0x62,
+0x1B,0x62,0xC5,0x1F,0x6F,0x5F,0x1A,0x42,0xBE,0x02,0x51,0x65,0xA8,0xAE,0x23,0x18,
+0x6A,0xFC,0x78,0x03,0xA9,0x4D,0x7F,0x80,0xC3,0xFA,0xAB,0x5A,0xFC,0xA1,0x40,0xA4,
+0xCA,0x19,0x16,0xFE,0xB2,0xC8,0xEF,0x5E,0x73,0x0D,0xEE,0x77,0xBD,0x9A,0xF6,0x79,
+0x98,0xBC,0xB1,0x07,0x67,0xA2,0x15,0x0D,0xDD,0xA0,0x58,0xC6,0x44,0x7B,0x0A,0x3E,
+0x62,0x28,0x5F,0xBA,0x41,0x07,0x53,0x58,0xCF,0x11,0x7E,0x38,0x74,0xC5,0xF8,0xFF,
+0xB5,0x69,0x90,0x8F,0x84,0x74,0xEA,0x97,0x1B,0xAF,0x02,0x01,0x03,0xA3,0x81,0xC0,
+0x30,0x81,0xBD,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xD2,0xC4,
+0xB0,0xD2,0x91,0xD4,0x4C,0x11,0x71,0xB3,0x61,0xCB,0x3D,0xA1,0xFE,0xDD,0xA8,0x6A,
+0xD4,0xE3,0x30,0x81,0x8D,0x06,0x03,0x55,0x1D,0x23,0x04,0x81,0x85,0x30,0x81,0x82,
+0x80,0x14,0xD2,0xC4,0xB0,0xD2,0x91,0xD4,0x4C,0x11,0x71,0xB3,0x61,0xCB,0x3D,0xA1,
+0xFE,0xDD,0xA8,0x6A,0xD4,0xE3,0xA1,0x67,0xA4,0x65,0x30,0x63,0x31,0x0B,0x30,0x09,
+0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,
+0x04,0x0A,0x13,0x18,0x54,0x68,0x65,0x20,0x47,0x6F,0x20,0x44,0x61,0x64,0x64,0x79,
+0x20,0x47,0x72,0x6F,0x75,0x70,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x31,0x30,0x2F,
+0x06,0x03,0x55,0x04,0x0B,0x13,0x28,0x47,0x6F,0x20,0x44,0x61,0x64,0x64,0x79,0x20,
+0x43,0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x82,
+0x01,0x00,0x30,0x0C,0x06,0x03,0x55,0x1D,0x13,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,
+0x82,0x01,0x01,0x00,0x32,0x4B,0xF3,0xB2,0xCA,0x3E,0x91,0xFC,0x12,0xC6,0xA1,0x07,
+0x8C,0x8E,0x77,0xA0,0x33,0x06,0x14,0x5C,0x90,0x1E,0x18,0xF7,0x08,0xA6,0x3D,0x0A,
+0x19,0xF9,0x87,0x80,0x11,0x6E,0x69,0xE4,0x96,0x17,0x30,0xFF,0x34,0x91,0x63,0x72,
+0x38,0xEE,0xCC,0x1C,0x01,0xA3,0x1D,0x94,0x28,0xA4,0x31,0xF6,0x7A,0xC4,0x54,0xD7,
+0xF6,0xE5,0x31,0x58,0x03,0xA2,0xCC,0xCE,0x62,0xDB,0x94,0x45,0x73,0xB5,0xBF,0x45,
+0xC9,0x24,0xB5,0xD5,0x82,0x02,0xAD,0x23,0x79,0x69,0x8D,0xB8,0xB6,0x4D,0xCE,0xCF,
+0x4C,0xCA,0x33,0x23,0xE8,0x1C,0x88,0xAA,0x9D,0x8B,0x41,0x6E,0x16,0xC9,0x20,0xE5,
+0x89,0x9E,0xCD,0x3B,0xDA,0x70,0xF7,0x7E,0x99,0x26,0x20,0x14,0x54,0x25,0xAB,0x6E,
+0x73,0x85,0xE6,0x9B,0x21,0x9D,0x0A,0x6C,0x82,0x0E,0xA8,0xF8,0xC2,0x0C,0xFA,0x10,
+0x1E,0x6C,0x96,0xEF,0x87,0x0D,0xC4,0x0F,0x61,0x8B,0xAD,0xEE,0x83,0x2B,0x95,0xF8,
+0x8E,0x92,0x84,0x72,0x39,0xEB,0x20,0xEA,0x83,0xED,0x83,0xCD,0x97,0x6E,0x08,0xBC,
+0xEB,0x4E,0x26,0xB6,0x73,0x2B,0xE4,0xD3,0xF6,0x4C,0xFE,0x26,0x71,0xE2,0x61,0x11,
+0x74,0x4A,0xFF,0x57,0x1A,0x87,0x0F,0x75,0x48,0x2E,0xCF,0x51,0x69,0x17,0xA0,0x02,
+0x12,0x61,0x95,0xD5,0xD1,0x40,0xB2,0x10,0x4C,0xEE,0xC4,0xAC,0x10,0x43,0xA6,0xA5,
+0x9E,0x0A,0xD5,0x95,0x62,0x9A,0x0D,0xCF,0x88,0x82,0xC5,0x32,0x0C,0xE4,0x2B,0x9F,
+0x45,0xE6,0x0D,0x9F,0x28,0x9C,0xB1,0xB9,0x2A,0x5A,0x57,0xAD,0x37,0x0F,0xAF,0x1D,
+0x7F,0xDB,0xBD,0x9F,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./CN=GeoTrust Primary Certification Authority */
+/* issuer :/C=US/O=GeoTrust Inc./CN=GeoTrust Primary Certification Authority */
+
+
+const unsigned char GeoTrust_Primary_Certification_Authority_certificate[896]={
+0x30,0x82,0x03,0x7C,0x30,0x82,0x02,0x64,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x18,
+0xAC,0xB5,0x6A,0xFD,0x69,0xB6,0x15,0x3A,0x63,0x6C,0xAF,0xDA,0xFA,0xC4,0xA1,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x58,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,
+0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,
+0x20,0x49,0x6E,0x63,0x2E,0x31,0x31,0x30,0x2F,0x06,0x03,0x55,0x04,0x03,0x13,0x28,
+0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,
+0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,
+0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x31,
+0x32,0x37,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x36,0x30,0x37,0x31,
+0x36,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x58,0x31,0x0B,0x30,0x09,0x06,0x03,
+0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,
+0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,
+0x31,0x30,0x2F,0x06,0x03,0x55,0x04,0x03,0x13,0x28,0x47,0x65,0x6F,0x54,0x72,0x75,
+0x73,0x74,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,
+0x01,0x01,0x00,0xBE,0xB8,0x15,0x7B,0xFF,0xD4,0x7C,0x7D,0x67,0xAD,0x83,0x64,0x7B,
+0xC8,0x42,0x53,0x2D,0xDF,0xF6,0x84,0x08,0x20,0x61,0xD6,0x01,0x59,0x6A,0x9C,0x44,
+0x11,0xAF,0xEF,0x76,0xFD,0x95,0x7E,0xCE,0x61,0x30,0xBB,0x7A,0x83,0x5F,0x02,0xBD,
+0x01,0x66,0xCA,0xEE,0x15,0x8D,0x6F,0xA1,0x30,0x9C,0xBD,0xA1,0x85,0x9E,0x94,0x3A,
+0xF3,0x56,0x88,0x00,0x31,0xCF,0xD8,0xEE,0x6A,0x96,0x02,0xD9,0xED,0x03,0x8C,0xFB,
+0x75,0x6D,0xE7,0xEA,0xB8,0x55,0x16,0x05,0x16,0x9A,0xF4,0xE0,0x5E,0xB1,0x88,0xC0,
+0x64,0x85,0x5C,0x15,0x4D,0x88,0xC7,0xB7,0xBA,0xE0,0x75,0xE9,0xAD,0x05,0x3D,0x9D,
+0xC7,0x89,0x48,0xE0,0xBB,0x28,0xC8,0x03,0xE1,0x30,0x93,0x64,0x5E,0x52,0xC0,0x59,
+0x70,0x22,0x35,0x57,0x88,0x8A,0xF1,0x95,0x0A,0x83,0xD7,0xBC,0x31,0x73,0x01,0x34,
+0xED,0xEF,0x46,0x71,0xE0,0x6B,0x02,0xA8,0x35,0x72,0x6B,0x97,0x9B,0x66,0xE0,0xCB,
+0x1C,0x79,0x5F,0xD8,0x1A,0x04,0x68,0x1E,0x47,0x02,0xE6,0x9D,0x60,0xE2,0x36,0x97,
+0x01,0xDF,0xCE,0x35,0x92,0xDF,0xBE,0x67,0xC7,0x6D,0x77,0x59,0x3B,0x8F,0x9D,0xD6,
+0x90,0x15,0x94,0xBC,0x42,0x34,0x10,0xC1,0x39,0xF9,0xB1,0x27,0x3E,0x7E,0xD6,0x8A,
+0x75,0xC5,0xB2,0xAF,0x96,0xD3,0xA2,0xDE,0x9B,0xE4,0x98,0xBE,0x7D,0xE1,0xE9,0x81,
+0xAD,0xB6,0x6F,0xFC,0xD7,0x0E,0xDA,0xE0,0x34,0xB0,0x0D,0x1A,0x77,0xE7,0xE3,0x08,
+0x98,0xEF,0x58,0xFA,0x9C,0x84,0xB7,0x36,0xAF,0xC2,0xDF,0xAC,0xD2,0xF4,0x10,0x06,
+0x70,0x71,0x35,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,0x03,
+0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,
+0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x2C,0xD5,0x50,0x41,0x97,0x15,0x8B,0xF0,
+0x8F,0x36,0x61,0x5B,0x4A,0xFB,0x6B,0xD9,0x99,0xC9,0x33,0x92,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,
+0x5A,0x70,0x7F,0x2C,0xDD,0xB7,0x34,0x4F,0xF5,0x86,0x51,0xA9,0x26,0xBE,0x4B,0xB8,
+0xAA,0xF1,0x71,0x0D,0xDC,0x61,0xC7,0xA0,0xEA,0x34,0x1E,0x7A,0x77,0x0F,0x04,0x35,
+0xE8,0x27,0x8F,0x6C,0x90,0xBF,0x91,0x16,0x24,0x46,0x3E,0x4A,0x4E,0xCE,0x2B,0x16,
+0xD5,0x0B,0x52,0x1D,0xFC,0x1F,0x67,0xA2,0x02,0x45,0x31,0x4F,0xCE,0xF3,0xFA,0x03,
+0xA7,0x79,0x9D,0x53,0x6A,0xD9,0xDA,0x63,0x3A,0xF8,0x80,0xD7,0xD3,0x99,0xE1,0xA5,
+0xE1,0xBE,0xD4,0x55,0x71,0x98,0x35,0x3A,0xBE,0x93,0xEA,0xAE,0xAD,0x42,0xB2,0x90,
+0x6F,0xE0,0xFC,0x21,0x4D,0x35,0x63,0x33,0x89,0x49,0xD6,0x9B,0x4E,0xCA,0xC7,0xE7,
+0x4E,0x09,0x00,0xF7,0xDA,0xC7,0xEF,0x99,0x62,0x99,0x77,0xB6,0x95,0x22,0x5E,0x8A,
+0xA0,0xAB,0xF4,0xB8,0x78,0x98,0xCA,0x38,0x19,0x99,0xC9,0x72,0x9E,0x78,0xCD,0x4B,
+0xAC,0xAF,0x19,0xA0,0x73,0x12,0x2D,0xFC,0xC2,0x41,0xBA,0x81,0x91,0xDA,0x16,0x5A,
+0x31,0xB7,0xF9,0xB4,0x71,0x80,0x12,0x48,0x99,0x72,0x73,0x5A,0x59,0x53,0xC1,0x63,
+0x52,0x33,0xED,0xA7,0xC9,0xD2,0x39,0x02,0x70,0xFA,0xE0,0xB1,0x42,0x66,0x29,0xAA,
+0x9B,0x51,0xED,0x30,0x54,0x22,0x14,0x5F,0xD9,0xAB,0x1D,0xC1,0xE4,0x94,0xF0,0xF8,
+0xF5,0x2B,0xF7,0xEA,0xCA,0x78,0x46,0xD6,0xB8,0x91,0xFD,0xA6,0x0D,0x2B,0x1A,0x14,
+0x01,0x3E,0x80,0xF0,0x42,0xA0,0x95,0x07,0x5E,0x6D,0xCD,0xCC,0x4B,0xA4,0x45,0x8D,
+0xAB,0x12,0xE8,0xB3,0xDE,0x5A,0xE5,0xA0,0x7C,0xE8,0x0F,0x22,0x1D,0x5A,0xE9,0x59,
+};
+
+
+/* subject:/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 2006 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 3 Public Primary Certification Authority - G5 */
+/* issuer :/C=US/O=VeriSign, Inc./OU=VeriSign Trust Network/OU=(c) 2006 VeriSign, Inc. - For authorized use only/CN=VeriSign Class 3 Public Primary Certification Authority - G5 */
+
+
+const unsigned char VeriSign_Class_3_Public_Primary_Certification_Authority___G5_certificate[1239]={
+0x30,0x82,0x04,0xD3,0x30,0x82,0x03,0xBB,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x18,
+0xDA,0xD1,0x9E,0x26,0x7D,0xE8,0xBB,0x4A,0x21,0x58,0xCD,0xCC,0x6B,0x3B,0x4A,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,
+0xCA,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,
+0x30,0x15,0x06,0x03,0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,
+0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x16,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,
+0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,
+0x0B,0x13,0x31,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x36,0x20,0x56,0x65,0x72,0x69,
+0x53,0x69,0x67,0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,
+0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,
+0x6F,0x6E,0x6C,0x79,0x31,0x45,0x30,0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,
+0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x33,0x20,
+0x50,0x75,0x62,0x6C,0x69,0x63,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,
+0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x47,0x35,0x30,0x1E,0x17,0x0D,0x30,
+0x36,0x31,0x31,0x30,0x38,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x36,
+0x30,0x37,0x31,0x36,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0xCA,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x17,0x30,0x15,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0E,0x56,0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x2C,0x20,
+0x49,0x6E,0x63,0x2E,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,0x56,
+0x65,0x72,0x69,0x53,0x69,0x67,0x6E,0x20,0x54,0x72,0x75,0x73,0x74,0x20,0x4E,0x65,
+0x74,0x77,0x6F,0x72,0x6B,0x31,0x3A,0x30,0x38,0x06,0x03,0x55,0x04,0x0B,0x13,0x31,
+0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x36,0x20,0x56,0x65,0x72,0x69,0x53,0x69,0x67,
+0x6E,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,
+0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,
+0x79,0x31,0x45,0x30,0x43,0x06,0x03,0x55,0x04,0x03,0x13,0x3C,0x56,0x65,0x72,0x69,
+0x53,0x69,0x67,0x6E,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x33,0x20,0x50,0x75,0x62,
+0x6C,0x69,0x63,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,
+0x69,0x74,0x79,0x20,0x2D,0x20,0x47,0x35,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,
+0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xAF,0x24,0x08,0x08,0x29,0x7A,0x35,
+0x9E,0x60,0x0C,0xAA,0xE7,0x4B,0x3B,0x4E,0xDC,0x7C,0xBC,0x3C,0x45,0x1C,0xBB,0x2B,
+0xE0,0xFE,0x29,0x02,0xF9,0x57,0x08,0xA3,0x64,0x85,0x15,0x27,0xF5,0xF1,0xAD,0xC8,
+0x31,0x89,0x5D,0x22,0xE8,0x2A,0xAA,0xA6,0x42,0xB3,0x8F,0xF8,0xB9,0x55,0xB7,0xB1,
+0xB7,0x4B,0xB3,0xFE,0x8F,0x7E,0x07,0x57,0xEC,0xEF,0x43,0xDB,0x66,0x62,0x15,0x61,
+0xCF,0x60,0x0D,0xA4,0xD8,0xDE,0xF8,0xE0,0xC3,0x62,0x08,0x3D,0x54,0x13,0xEB,0x49,
+0xCA,0x59,0x54,0x85,0x26,0xE5,0x2B,0x8F,0x1B,0x9F,0xEB,0xF5,0xA1,0x91,0xC2,0x33,
+0x49,0xD8,0x43,0x63,0x6A,0x52,0x4B,0xD2,0x8F,0xE8,0x70,0x51,0x4D,0xD1,0x89,0x69,
+0x7B,0xC7,0x70,0xF6,0xB3,0xDC,0x12,0x74,0xDB,0x7B,0x5D,0x4B,0x56,0xD3,0x96,0xBF,
+0x15,0x77,0xA1,0xB0,0xF4,0xA2,0x25,0xF2,0xAF,0x1C,0x92,0x67,0x18,0xE5,0xF4,0x06,
+0x04,0xEF,0x90,0xB9,0xE4,0x00,0xE4,0xDD,0x3A,0xB5,0x19,0xFF,0x02,0xBA,0xF4,0x3C,
+0xEE,0xE0,0x8B,0xEB,0x37,0x8B,0xEC,0xF4,0xD7,0xAC,0xF2,0xF6,0xF0,0x3D,0xAF,0xDD,
+0x75,0x91,0x33,0x19,0x1D,0x1C,0x40,0xCB,0x74,0x24,0x19,0x21,0x93,0xD9,0x14,0xFE,
+0xAC,0x2A,0x52,0xC7,0x8F,0xD5,0x04,0x49,0xE4,0x8D,0x63,0x47,0x88,0x3C,0x69,0x83,
+0xCB,0xFE,0x47,0xBD,0x2B,0x7E,0x4F,0xC5,0x95,0xAE,0x0E,0x9D,0xD4,0xD1,0x43,0xC0,
+0x67,0x73,0xE3,0x14,0x08,0x7E,0xE5,0x3F,0x9F,0x73,0xB8,0x33,0x0A,0xCF,0x5D,0x3F,
+0x34,0x87,0x96,0x8A,0xEE,0x53,0xE8,0x25,0x15,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,
+0xB2,0x30,0x81,0xAF,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,
+0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,
+0x04,0x03,0x02,0x01,0x06,0x30,0x6D,0x06,0x08,0x2B,0x06,0x01,0x05,0x05,0x07,0x01,
+0x0C,0x04,0x61,0x30,0x5F,0xA1,0x5D,0xA0,0x5B,0x30,0x59,0x30,0x57,0x30,0x55,0x16,
+0x09,0x69,0x6D,0x61,0x67,0x65,0x2F,0x67,0x69,0x66,0x30,0x21,0x30,0x1F,0x30,0x07,
+0x06,0x05,0x2B,0x0E,0x03,0x02,0x1A,0x04,0x14,0x8F,0xE5,0xD3,0x1A,0x86,0xAC,0x8D,
+0x8E,0x6B,0xC3,0xCF,0x80,0x6A,0xD4,0x48,0x18,0x2C,0x7B,0x19,0x2E,0x30,0x25,0x16,
+0x23,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x6C,0x6F,0x67,0x6F,0x2E,0x76,0x65,0x72,
+0x69,0x73,0x69,0x67,0x6E,0x2E,0x63,0x6F,0x6D,0x2F,0x76,0x73,0x6C,0x6F,0x67,0x6F,
+0x2E,0x67,0x69,0x66,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x7F,
+0xD3,0x65,0xA7,0xC2,0xDD,0xEC,0xBB,0xF0,0x30,0x09,0xF3,0x43,0x39,0xFA,0x02,0xAF,
+0x33,0x31,0x33,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,
+0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x93,0x24,0x4A,0x30,0x5F,0x62,0xCF,0xD8,0x1A,
+0x98,0x2F,0x3D,0xEA,0xDC,0x99,0x2D,0xBD,0x77,0xF6,0xA5,0x79,0x22,0x38,0xEC,0xC4,
+0xA7,0xA0,0x78,0x12,0xAD,0x62,0x0E,0x45,0x70,0x64,0xC5,0xE7,0x97,0x66,0x2D,0x98,
+0x09,0x7E,0x5F,0xAF,0xD6,0xCC,0x28,0x65,0xF2,0x01,0xAA,0x08,0x1A,0x47,0xDE,0xF9,
+0xF9,0x7C,0x92,0x5A,0x08,0x69,0x20,0x0D,0xD9,0x3E,0x6D,0x6E,0x3C,0x0D,0x6E,0xD8,
+0xE6,0x06,0x91,0x40,0x18,0xB9,0xF8,0xC1,0xED,0xDF,0xDB,0x41,0xAA,0xE0,0x96,0x20,
+0xC9,0xCD,0x64,0x15,0x38,0x81,0xC9,0x94,0xEE,0xA2,0x84,0x29,0x0B,0x13,0x6F,0x8E,
+0xDB,0x0C,0xDD,0x25,0x02,0xDB,0xA4,0x8B,0x19,0x44,0xD2,0x41,0x7A,0x05,0x69,0x4A,
+0x58,0x4F,0x60,0xCA,0x7E,0x82,0x6A,0x0B,0x02,0xAA,0x25,0x17,0x39,0xB5,0xDB,0x7F,
+0xE7,0x84,0x65,0x2A,0x95,0x8A,0xBD,0x86,0xDE,0x5E,0x81,0x16,0x83,0x2D,0x10,0xCC,
+0xDE,0xFD,0xA8,0x82,0x2A,0x6D,0x28,0x1F,0x0D,0x0B,0xC4,0xE5,0xE7,0x1A,0x26,0x19,
+0xE1,0xF4,0x11,0x6F,0x10,0xB5,0x95,0xFC,0xE7,0x42,0x05,0x32,0xDB,0xCE,0x9D,0x51,
+0x5E,0x28,0xB6,0x9E,0x85,0xD3,0x5B,0xEF,0xA5,0x7D,0x45,0x40,0x72,0x8E,0xB7,0x0E,
+0x6B,0x0E,0x06,0xFB,0x33,0x35,0x48,0x71,0xB8,0x9D,0x27,0x8B,0xC4,0x65,0x5F,0x0D,
+0x86,0x76,0x9C,0x44,0x7A,0xF6,0x95,0x5C,0xF6,0x5D,0x32,0x08,0x33,0xA4,0x54,0xB6,
+0x18,0x3F,0x68,0x5C,0xF2,0x42,0x4A,0x85,0x38,0x54,0x83,0x5F,0xD1,0xE8,0x2C,0xF2,
+0xAC,0x11,0xD6,0xA8,0xED,0x63,0x6A,
+};
+
+
+/* subject:/C=US/O=Equifax/OU=Equifax Secure Certificate Authority */
+/* issuer :/C=US/O=Equifax/OU=Equifax Secure Certificate Authority */
+
+
+const unsigned char Equifax_Secure_CA_certificate[804]={
+0x30,0x82,0x03,0x20,0x30,0x82,0x02,0x89,0xA0,0x03,0x02,0x01,0x02,0x02,0x04,0x35,
+0xDE,0xF4,0xCF,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,
+0x05,0x00,0x30,0x4E,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,
+0x53,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x0A,0x13,0x07,0x45,0x71,0x75,0x69,
+0x66,0x61,0x78,0x31,0x2D,0x30,0x2B,0x06,0x03,0x55,0x04,0x0B,0x13,0x24,0x45,0x71,
+0x75,0x69,0x66,0x61,0x78,0x20,0x53,0x65,0x63,0x75,0x72,0x65,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x30,0x1E,0x17,0x0D,0x39,0x38,0x30,0x38,0x32,0x32,0x31,0x36,0x34,0x31,
+0x35,0x31,0x5A,0x17,0x0D,0x31,0x38,0x30,0x38,0x32,0x32,0x31,0x36,0x34,0x31,0x35,
+0x31,0x5A,0x30,0x4E,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,
+0x53,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x0A,0x13,0x07,0x45,0x71,0x75,0x69,
+0x66,0x61,0x78,0x31,0x2D,0x30,0x2B,0x06,0x03,0x55,0x04,0x0B,0x13,0x24,0x45,0x71,
+0x75,0x69,0x66,0x61,0x78,0x20,0x53,0x65,0x63,0x75,0x72,0x65,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x30,0x81,0x9F,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x01,0x05,0x00,0x03,0x81,0x8D,0x00,0x30,0x81,0x89,0x02,0x81,0x81,0x00,0xC1,
+0x5D,0xB1,0x58,0x67,0x08,0x62,0xEE,0xA0,0x9A,0x2D,0x1F,0x08,0x6D,0x91,0x14,0x68,
+0x98,0x0A,0x1E,0xFE,0xDA,0x04,0x6F,0x13,0x84,0x62,0x21,0xC3,0xD1,0x7C,0xCE,0x9F,
+0x05,0xE0,0xB8,0x01,0xF0,0x4E,0x34,0xEC,0xE2,0x8A,0x95,0x04,0x64,0xAC,0xF1,0x6B,
+0x53,0x5F,0x05,0xB3,0xCB,0x67,0x80,0xBF,0x42,0x02,0x8E,0xFE,0xDD,0x01,0x09,0xEC,
+0xE1,0x00,0x14,0x4F,0xFC,0xFB,0xF0,0x0C,0xDD,0x43,0xBA,0x5B,0x2B,0xE1,0x1F,0x80,
+0x70,0x99,0x15,0x57,0x93,0x16,0xF1,0x0F,0x97,0x6A,0xB7,0xC2,0x68,0x23,0x1C,0xCC,
+0x4D,0x59,0x30,0xAC,0x51,0x1E,0x3B,0xAF,0x2B,0xD6,0xEE,0x63,0x45,0x7B,0xC5,0xD9,
+0x5F,0x50,0xD2,0xE3,0x50,0x0F,0x3A,0x88,0xE7,0xBF,0x14,0xFD,0xE0,0xC7,0xB9,0x02,
+0x03,0x01,0x00,0x01,0xA3,0x82,0x01,0x09,0x30,0x82,0x01,0x05,0x30,0x70,0x06,0x03,
+0x55,0x1D,0x1F,0x04,0x69,0x30,0x67,0x30,0x65,0xA0,0x63,0xA0,0x61,0xA4,0x5F,0x30,
+0x5D,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x10,
+0x30,0x0E,0x06,0x03,0x55,0x04,0x0A,0x13,0x07,0x45,0x71,0x75,0x69,0x66,0x61,0x78,
+0x31,0x2D,0x30,0x2B,0x06,0x03,0x55,0x04,0x0B,0x13,0x24,0x45,0x71,0x75,0x69,0x66,
+0x61,0x78,0x20,0x53,0x65,0x63,0x75,0x72,0x65,0x20,0x43,0x65,0x72,0x74,0x69,0x66,
+0x69,0x63,0x61,0x74,0x65,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x31,
+0x0D,0x30,0x0B,0x06,0x03,0x55,0x04,0x03,0x13,0x04,0x43,0x52,0x4C,0x31,0x30,0x1A,
+0x06,0x03,0x55,0x1D,0x10,0x04,0x13,0x30,0x11,0x81,0x0F,0x32,0x30,0x31,0x38,0x30,
+0x38,0x32,0x32,0x31,0x36,0x34,0x31,0x35,0x31,0x5A,0x30,0x0B,0x06,0x03,0x55,0x1D,
+0x0F,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,
+0x30,0x16,0x80,0x14,0x48,0xE6,0x68,0xF9,0x2B,0xD2,0xB2,0x95,0xD7,0x47,0xD8,0x23,
+0x20,0x10,0x4F,0x33,0x98,0x90,0x9F,0xD4,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,
+0x16,0x04,0x14,0x48,0xE6,0x68,0xF9,0x2B,0xD2,0xB2,0x95,0xD7,0x47,0xD8,0x23,0x20,
+0x10,0x4F,0x33,0x98,0x90,0x9F,0xD4,0x30,0x0C,0x06,0x03,0x55,0x1D,0x13,0x04,0x05,
+0x30,0x03,0x01,0x01,0xFF,0x30,0x1A,0x06,0x09,0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,
+0x41,0x00,0x04,0x0D,0x30,0x0B,0x1B,0x05,0x56,0x33,0x2E,0x30,0x63,0x03,0x02,0x06,
+0xC0,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,
+0x03,0x81,0x81,0x00,0x58,0xCE,0x29,0xEA,0xFC,0xF7,0xDE,0xB5,0xCE,0x02,0xB9,0x17,
+0xB5,0x85,0xD1,0xB9,0xE3,0xE0,0x95,0xCC,0x25,0x31,0x0D,0x00,0xA6,0x92,0x6E,0x7F,
+0xB6,0x92,0x63,0x9E,0x50,0x95,0xD1,0x9A,0x6F,0xE4,0x11,0xDE,0x63,0x85,0x6E,0x98,
+0xEE,0xA8,0xFF,0x5A,0xC8,0xD3,0x55,0xB2,0x66,0x71,0x57,0xDE,0xC0,0x21,0xEB,0x3D,
+0x2A,0xA7,0x23,0x49,0x01,0x04,0x86,0x42,0x7B,0xFC,0xEE,0x7F,0xA2,0x16,0x52,0xB5,
+0x67,0x67,0xD3,0x40,0xDB,0x3B,0x26,0x58,0xB2,0x28,0x77,0x3D,0xAE,0x14,0x77,0x61,
+0xD6,0xFA,0x2A,0x66,0x27,0xA0,0x0D,0xFA,0xA7,0x73,0x5C,0xEA,0x70,0xF1,0x94,0x21,
+0x65,0x44,0x5F,0xFA,0xFC,0xEF,0x29,0x68,0xA9,0xA2,0x87,0x79,0xEF,0x79,0xEF,0x4F,
+0xAC,0x07,0x77,0x38,
+};
+
+
+/* subject:/O=Entrust.net/OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/OU=(c) 1999 Entrust.net Limited/CN=Entrust.net Certification Authority (2048) */
+/* issuer :/O=Entrust.net/OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/OU=(c) 1999 Entrust.net Limited/CN=Entrust.net Certification Authority (2048) */
+
+
+const unsigned char Entrust_net_Premium_2048_Secure_Server_CA_certificate[1120]={
+0x30,0x82,0x04,0x5C,0x30,0x82,0x03,0x44,0xA0,0x03,0x02,0x01,0x02,0x02,0x04,0x38,
+0x63,0xB9,0x66,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,
+0x05,0x00,0x30,0x81,0xB4,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,
+0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x31,0x40,0x30,0x3E,0x06,
+0x03,0x55,0x04,0x0B,0x14,0x37,0x77,0x77,0x77,0x2E,0x65,0x6E,0x74,0x72,0x75,0x73,
+0x74,0x2E,0x6E,0x65,0x74,0x2F,0x43,0x50,0x53,0x5F,0x32,0x30,0x34,0x38,0x20,0x69,
+0x6E,0x63,0x6F,0x72,0x70,0x2E,0x20,0x62,0x79,0x20,0x72,0x65,0x66,0x2E,0x20,0x28,
+0x6C,0x69,0x6D,0x69,0x74,0x73,0x20,0x6C,0x69,0x61,0x62,0x2E,0x29,0x31,0x25,0x30,
+0x23,0x06,0x03,0x55,0x04,0x0B,0x13,0x1C,0x28,0x63,0x29,0x20,0x31,0x39,0x39,0x39,
+0x20,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x20,0x4C,0x69,0x6D,
+0x69,0x74,0x65,0x64,0x31,0x33,0x30,0x31,0x06,0x03,0x55,0x04,0x03,0x13,0x2A,0x45,
+0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x20,0x28,0x32,0x30,0x34,0x38,0x29,0x30,0x1E,0x17,0x0D,0x39,0x39,0x31,
+0x32,0x32,0x34,0x31,0x37,0x35,0x30,0x35,0x31,0x5A,0x17,0x0D,0x31,0x39,0x31,0x32,
+0x32,0x34,0x31,0x38,0x32,0x30,0x35,0x31,0x5A,0x30,0x81,0xB4,0x31,0x14,0x30,0x12,
+0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,
+0x65,0x74,0x31,0x40,0x30,0x3E,0x06,0x03,0x55,0x04,0x0B,0x14,0x37,0x77,0x77,0x77,
+0x2E,0x65,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x2F,0x43,0x50,0x53,
+0x5F,0x32,0x30,0x34,0x38,0x20,0x69,0x6E,0x63,0x6F,0x72,0x70,0x2E,0x20,0x62,0x79,
+0x20,0x72,0x65,0x66,0x2E,0x20,0x28,0x6C,0x69,0x6D,0x69,0x74,0x73,0x20,0x6C,0x69,
+0x61,0x62,0x2E,0x29,0x31,0x25,0x30,0x23,0x06,0x03,0x55,0x04,0x0B,0x13,0x1C,0x28,
+0x63,0x29,0x20,0x31,0x39,0x39,0x39,0x20,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,
+0x6E,0x65,0x74,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x33,0x30,0x31,0x06,
+0x03,0x55,0x04,0x03,0x13,0x2A,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,
+0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,
+0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x28,0x32,0x30,0x34,0x38,0x29,
+0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,
+0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,
+0x00,0xAD,0x4D,0x4B,0xA9,0x12,0x86,0xB2,0xEA,0xA3,0x20,0x07,0x15,0x16,0x64,0x2A,
+0x2B,0x4B,0xD1,0xBF,0x0B,0x4A,0x4D,0x8E,0xED,0x80,0x76,0xA5,0x67,0xB7,0x78,0x40,
+0xC0,0x73,0x42,0xC8,0x68,0xC0,0xDB,0x53,0x2B,0xDD,0x5E,0xB8,0x76,0x98,0x35,0x93,
+0x8B,0x1A,0x9D,0x7C,0x13,0x3A,0x0E,0x1F,0x5B,0xB7,0x1E,0xCF,0xE5,0x24,0x14,0x1E,
+0xB1,0x81,0xA9,0x8D,0x7D,0xB8,0xCC,0x6B,0x4B,0x03,0xF1,0x02,0x0C,0xDC,0xAB,0xA5,
+0x40,0x24,0x00,0x7F,0x74,0x94,0xA1,0x9D,0x08,0x29,0xB3,0x88,0x0B,0xF5,0x87,0x77,
+0x9D,0x55,0xCD,0xE4,0xC3,0x7E,0xD7,0x6A,0x64,0xAB,0x85,0x14,0x86,0x95,0x5B,0x97,
+0x32,0x50,0x6F,0x3D,0xC8,0xBA,0x66,0x0C,0xE3,0xFC,0xBD,0xB8,0x49,0xC1,0x76,0x89,
+0x49,0x19,0xFD,0xC0,0xA8,0xBD,0x89,0xA3,0x67,0x2F,0xC6,0x9F,0xBC,0x71,0x19,0x60,
+0xB8,0x2D,0xE9,0x2C,0xC9,0x90,0x76,0x66,0x7B,0x94,0xE2,0xAF,0x78,0xD6,0x65,0x53,
+0x5D,0x3C,0xD6,0x9C,0xB2,0xCF,0x29,0x03,0xF9,0x2F,0xA4,0x50,0xB2,0xD4,0x48,0xCE,
+0x05,0x32,0x55,0x8A,0xFD,0xB2,0x64,0x4C,0x0E,0xE4,0x98,0x07,0x75,0xDB,0x7F,0xDF,
+0xB9,0x08,0x55,0x60,0x85,0x30,0x29,0xF9,0x7B,0x48,0xA4,0x69,0x86,0xE3,0x35,0x3F,
+0x1E,0x86,0x5D,0x7A,0x7A,0x15,0xBD,0xEF,0x00,0x8E,0x15,0x22,0x54,0x17,0x00,0x90,
+0x26,0x93,0xBC,0x0E,0x49,0x68,0x91,0xBF,0xF8,0x47,0xD3,0x9D,0x95,0x42,0xC1,0x0E,
+0x4D,0xDF,0x6F,0x26,0xCF,0xC3,0x18,0x21,0x62,0x66,0x43,0x70,0xD6,0xD5,0xC0,0x07,
+0xE1,0x02,0x03,0x01,0x00,0x01,0xA3,0x74,0x30,0x72,0x30,0x11,0x06,0x09,0x60,0x86,
+0x48,0x01,0x86,0xF8,0x42,0x01,0x01,0x04,0x04,0x03,0x02,0x00,0x07,0x30,0x1F,0x06,
+0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,0x55,0xE4,0x81,0xD1,0x11,0x80,
+0xBE,0xD8,0x89,0xB9,0x08,0xA3,0x31,0xF9,0xA1,0x24,0x09,0x16,0xB9,0x70,0x30,0x1D,
+0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x55,0xE4,0x81,0xD1,0x11,0x80,0xBE,
+0xD8,0x89,0xB9,0x08,0xA3,0x31,0xF9,0xA1,0x24,0x09,0x16,0xB9,0x70,0x30,0x1D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x41,0x00,0x04,0x10,0x30,0x0E,0x1B,0x08,
+0x56,0x35,0x2E,0x30,0x3A,0x34,0x2E,0x30,0x03,0x02,0x04,0x90,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,
+0x59,0x47,0xAC,0x21,0x84,0x8A,0x17,0xC9,0x9C,0x89,0x53,0x1E,0xBA,0x80,0x85,0x1A,
+0xC6,0x3C,0x4E,0x3E,0xB1,0x9C,0xB6,0x7C,0xC6,0x92,0x5D,0x18,0x64,0x02,0xE3,0xD3,
+0x06,0x08,0x11,0x61,0x7C,0x63,0xE3,0x2B,0x9D,0x31,0x03,0x70,0x76,0xD2,0xA3,0x28,
+0xA0,0xF4,0xBB,0x9A,0x63,0x73,0xED,0x6D,0xE5,0x2A,0xDB,0xED,0x14,0xA9,0x2B,0xC6,
+0x36,0x11,0xD0,0x2B,0xEB,0x07,0x8B,0xA5,0xDA,0x9E,0x5C,0x19,0x9D,0x56,0x12,0xF5,
+0x54,0x29,0xC8,0x05,0xED,0xB2,0x12,0x2A,0x8D,0xF4,0x03,0x1B,0xFF,0xE7,0x92,0x10,
+0x87,0xB0,0x3A,0xB5,0xC3,0x9D,0x05,0x37,0x12,0xA3,0xC7,0xF4,0x15,0xB9,0xD5,0xA4,
+0x39,0x16,0x9B,0x53,0x3A,0x23,0x91,0xF1,0xA8,0x82,0xA2,0x6A,0x88,0x68,0xC1,0x79,
+0x02,0x22,0xBC,0xAA,0xA6,0xD6,0xAE,0xDF,0xB0,0x14,0x5F,0xB8,0x87,0xD0,0xDD,0x7C,
+0x7F,0x7B,0xFF,0xAF,0x1C,0xCF,0xE6,0xDB,0x07,0xAD,0x5E,0xDB,0x85,0x9D,0xD0,0x2B,
+0x0D,0x33,0xDB,0x04,0xD1,0xE6,0x49,0x40,0x13,0x2B,0x76,0xFB,0x3E,0xE9,0x9C,0x89,
+0x0F,0x15,0xCE,0x18,0xB0,0x85,0x78,0x21,0x4F,0x6B,0x4F,0x0E,0xFA,0x36,0x67,0xCD,
+0x07,0xF2,0xFF,0x08,0xD0,0xE2,0xDE,0xD9,0xBF,0x2A,0xAF,0xB8,0x87,0x86,0x21,0x3C,
+0x04,0xCA,0xB7,0x94,0x68,0x7F,0xCF,0x3C,0xE9,0x98,0xD7,0x38,0xFF,0xEC,0xC0,0xD9,
+0x50,0xF0,0x2E,0x4B,0x58,0xAE,0x46,0x6F,0xD0,0x2E,0xC3,0x60,0xDA,0x72,0x55,0x72,
+0xBD,0x4C,0x45,0x9E,0x61,0xBA,0xBF,0x84,0x81,0x92,0x03,0xD1,0xD2,0x69,0x7C,0xC5,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root G3 */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root G3 */
+
+
+const unsigned char DigiCert_Assured_ID_Root_G3_certificate[586]={
+0x30,0x82,0x02,0x46,0x30,0x82,0x01,0xCD,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x0B,
+0xA1,0x5A,0xFA,0x1D,0xDF,0xA0,0xB5,0x49,0x44,0xAF,0xCD,0x24,0xA0,0x6C,0xEC,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x65,0x31,0x0B,0x30,
+0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x49,0x6E,
+0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,0x77,0x77,0x2E,
+0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x24,0x30,0x22,
+0x06,0x03,0x55,0x04,0x03,0x13,0x1B,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,
+0x41,0x73,0x73,0x75,0x72,0x65,0x64,0x20,0x49,0x44,0x20,0x52,0x6F,0x6F,0x74,0x20,
+0x47,0x33,0x30,0x1E,0x17,0x0D,0x31,0x33,0x30,0x38,0x30,0x31,0x31,0x32,0x30,0x30,
+0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x35,0x31,0x32,0x30,0x30,0x30,
+0x30,0x5A,0x30,0x65,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,
+0x53,0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,
+0x43,0x65,0x72,0x74,0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,
+0x0B,0x13,0x10,0x77,0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,
+0x63,0x6F,0x6D,0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x03,0x13,0x1B,0x44,0x69,
+0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x41,0x73,0x73,0x75,0x72,0x65,0x64,0x20,0x49,
+0x44,0x20,0x52,0x6F,0x6F,0x74,0x20,0x47,0x33,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,
+0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,
+0x04,0x19,0xE7,0xBC,0xAC,0x44,0x65,0xED,0xCD,0xB8,0x3F,0x58,0xFB,0x8D,0xB1,0x57,
+0xA9,0x44,0x2D,0x05,0x15,0xF2,0xEF,0x0B,0xFF,0x10,0x74,0x9F,0xB5,0x62,0x52,0x5F,
+0x66,0x7E,0x1F,0xE5,0xDC,0x1B,0x45,0x79,0x0B,0xCC,0xC6,0x53,0x0A,0x9D,0x8D,0x5D,
+0x02,0xD9,0xA9,0x59,0xDE,0x02,0x5A,0xF6,0x95,0x2A,0x0E,0x8D,0x38,0x4A,0x8A,0x49,
+0xC6,0xBC,0xC6,0x03,0x38,0x07,0x5F,0x55,0xDA,0x7E,0x09,0x6E,0xE2,0x7F,0x5E,0xD0,
+0x45,0x20,0x0F,0x59,0x76,0x10,0xD6,0xA0,0x24,0xF0,0x2D,0xDE,0x36,0xF2,0x6C,0x29,
+0x39,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,
+0x14,0xCB,0xD0,0xBD,0xA9,0xE1,0x98,0x05,0x51,0xA1,0x4D,0x37,0xA2,0x83,0x79,0xCE,
+0x8D,0x1D,0x2A,0xE4,0x84,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,
+0x03,0x03,0x67,0x00,0x30,0x64,0x02,0x30,0x25,0xA4,0x81,0x45,0x02,0x6B,0x12,0x4B,
+0x75,0x74,0x4F,0xC8,0x23,0xE3,0x70,0xF2,0x75,0x72,0xDE,0x7C,0x89,0xF0,0xCF,0x91,
+0x72,0x61,0x9E,0x5E,0x10,0x92,0x59,0x56,0xB9,0x83,0xC7,0x10,0xE7,0x38,0xE9,0x58,
+0x26,0x36,0x7D,0xD5,0xE4,0x34,0x86,0x39,0x02,0x30,0x7C,0x36,0x53,0xF0,0x30,0xE5,
+0x62,0x63,0x3A,0x99,0xE2,0xB6,0xA3,0x3B,0x9B,0x34,0xFA,0x1E,0xDA,0x10,0x92,0x71,
+0x5E,0x91,0x13,0xA7,0xDD,0xA4,0x6E,0x92,0xCC,0x32,0xD6,0xF5,0x21,0x66,0xC7,0x2F,
+0xEA,0x96,0x63,0x6A,0x65,0x45,0x92,0x95,0x01,0xB4,
+};
+
+
+/* subject:/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO Certification Authority */
+/* issuer :/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO Certification Authority */
+
+
+const unsigned char COMODO_Certification_Authority_certificate[1057]={
+0x30,0x82,0x04,0x1D,0x30,0x82,0x03,0x05,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x4E,
+0x81,0x2D,0x8A,0x82,0x65,0xE0,0x0B,0x02,0xEE,0x3E,0x35,0x02,0x46,0xE5,0x3D,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,
+0x81,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x13,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,
+0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,
+0x03,0x55,0x04,0x07,0x13,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,
+0x18,0x06,0x03,0x55,0x04,0x0A,0x13,0x11,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x43,
+0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x27,0x30,0x25,0x06,0x03,0x55,
+0x04,0x03,0x13,0x1E,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,
+0x74,0x79,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x32,0x30,0x31,0x30,0x30,0x30,0x30,
+0x30,0x30,0x5A,0x17,0x0D,0x32,0x39,0x31,0x32,0x33,0x31,0x32,0x33,0x35,0x39,0x35,
+0x39,0x5A,0x30,0x81,0x81,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,
+0x47,0x42,0x31,0x1B,0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x13,0x12,0x47,0x72,0x65,
+0x61,0x74,0x65,0x72,0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,
+0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x07,0x13,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,
+0x64,0x31,0x1A,0x30,0x18,0x06,0x03,0x55,0x04,0x0A,0x13,0x11,0x43,0x4F,0x4D,0x4F,
+0x44,0x4F,0x20,0x43,0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x27,0x30,
+0x25,0x06,0x03,0x55,0x04,0x03,0x13,0x1E,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x43,
+0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,
+0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xD0,0x40,0x8B,0x8B,0x72,0xE3,0x91,0x1B,0xF7,
+0x51,0xC1,0x1B,0x54,0x04,0x98,0xD3,0xA9,0xBF,0xC1,0xE6,0x8A,0x5D,0x3B,0x87,0xFB,
+0xBB,0x88,0xCE,0x0D,0xE3,0x2F,0x3F,0x06,0x96,0xF0,0xA2,0x29,0x50,0x99,0xAE,0xDB,
+0x3B,0xA1,0x57,0xB0,0x74,0x51,0x71,0xCD,0xED,0x42,0x91,0x4D,0x41,0xFE,0xA9,0xC8,
+0xD8,0x6A,0x86,0x77,0x44,0xBB,0x59,0x66,0x97,0x50,0x5E,0xB4,0xD4,0x2C,0x70,0x44,
+0xCF,0xDA,0x37,0x95,0x42,0x69,0x3C,0x30,0xC4,0x71,0xB3,0x52,0xF0,0x21,0x4D,0xA1,
+0xD8,0xBA,0x39,0x7C,0x1C,0x9E,0xA3,0x24,0x9D,0xF2,0x83,0x16,0x98,0xAA,0x16,0x7C,
+0x43,0x9B,0x15,0x5B,0xB7,0xAE,0x34,0x91,0xFE,0xD4,0x62,0x26,0x18,0x46,0x9A,0x3F,
+0xEB,0xC1,0xF9,0xF1,0x90,0x57,0xEB,0xAC,0x7A,0x0D,0x8B,0xDB,0x72,0x30,0x6A,0x66,
+0xD5,0xE0,0x46,0xA3,0x70,0xDC,0x68,0xD9,0xFF,0x04,0x48,0x89,0x77,0xDE,0xB5,0xE9,
+0xFB,0x67,0x6D,0x41,0xE9,0xBC,0x39,0xBD,0x32,0xD9,0x62,0x02,0xF1,0xB1,0xA8,0x3D,
+0x6E,0x37,0x9C,0xE2,0x2F,0xE2,0xD3,0xA2,0x26,0x8B,0xC6,0xB8,0x55,0x43,0x88,0xE1,
+0x23,0x3E,0xA5,0xD2,0x24,0x39,0x6A,0x47,0xAB,0x00,0xD4,0xA1,0xB3,0xA9,0x25,0xFE,
+0x0D,0x3F,0xA7,0x1D,0xBA,0xD3,0x51,0xC1,0x0B,0xA4,0xDA,0xAC,0x38,0xEF,0x55,0x50,
+0x24,0x05,0x65,0x46,0x93,0x34,0x4F,0x2D,0x8D,0xAD,0xC6,0xD4,0x21,0x19,0xD2,0x8E,
+0xCA,0x05,0x61,0x71,0x07,0x73,0x47,0xE5,0x8A,0x19,0x12,0xBD,0x04,0x4D,0xCE,0x4E,
+0x9C,0xA5,0x48,0xAC,0xBB,0x26,0xF7,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0x8E,0x30,
+0x81,0x8B,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x0B,0x58,0xE5,
+0x8B,0xC6,0x4C,0x15,0x37,0xA4,0x40,0xA9,0x30,0xA9,0x21,0xBE,0x47,0x36,0x5A,0x56,
+0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,
+0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,
+0x01,0xFF,0x30,0x49,0x06,0x03,0x55,0x1D,0x1F,0x04,0x42,0x30,0x40,0x30,0x3E,0xA0,
+0x3C,0xA0,0x3A,0x86,0x38,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,
+0x63,0x6F,0x6D,0x6F,0x64,0x6F,0x63,0x61,0x2E,0x63,0x6F,0x6D,0x2F,0x43,0x4F,0x4D,
+0x4F,0x44,0x4F,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,
+0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x2E,0x63,0x72,0x6C,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,
+0x00,0x3E,0x98,0x9E,0x9B,0xF6,0x1B,0xE9,0xD7,0x39,0xB7,0x78,0xAE,0x1D,0x72,0x18,
+0x49,0xD3,0x87,0xE4,0x43,0x82,0xEB,0x3F,0xC9,0xAA,0xF5,0xA8,0xB5,0xEF,0x55,0x7C,
+0x21,0x52,0x65,0xF9,0xD5,0x0D,0xE1,0x6C,0xF4,0x3E,0x8C,0x93,0x73,0x91,0x2E,0x02,
+0xC4,0x4E,0x07,0x71,0x6F,0xC0,0x8F,0x38,0x61,0x08,0xA8,0x1E,0x81,0x0A,0xC0,0x2F,
+0x20,0x2F,0x41,0x8B,0x91,0xDC,0x48,0x45,0xBC,0xF1,0xC6,0xDE,0xBA,0x76,0x6B,0x33,
+0xC8,0x00,0x2D,0x31,0x46,0x4C,0xED,0xE7,0x9D,0xCF,0x88,0x94,0xFF,0x33,0xC0,0x56,
+0xE8,0x24,0x86,0x26,0xB8,0xD8,0x38,0x38,0xDF,0x2A,0x6B,0xDD,0x12,0xCC,0xC7,0x3F,
+0x47,0x17,0x4C,0xA2,0xC2,0x06,0x96,0x09,0xD6,0xDB,0xFE,0x3F,0x3C,0x46,0x41,0xDF,
+0x58,0xE2,0x56,0x0F,0x3C,0x3B,0xC1,0x1C,0x93,0x35,0xD9,0x38,0x52,0xAC,0xEE,0xC8,
+0xEC,0x2E,0x30,0x4E,0x94,0x35,0xB4,0x24,0x1F,0x4B,0x78,0x69,0xDA,0xF2,0x02,0x38,
+0xCC,0x95,0x52,0x93,0xF0,0x70,0x25,0x59,0x9C,0x20,0x67,0xC4,0xEE,0xF9,0x8B,0x57,
+0x61,0xF4,0x92,0x76,0x7D,0x3F,0x84,0x8D,0x55,0xB7,0xE8,0xE5,0xAC,0xD5,0xF1,0xF5,
+0x19,0x56,0xA6,0x5A,0xFB,0x90,0x1C,0xAF,0x93,0xEB,0xE5,0x1C,0xD4,0x67,0x97,0x5D,
+0x04,0x0E,0xBE,0x0B,0x83,0xA6,0x17,0x83,0xB9,0x30,0x12,0xA0,0xC5,0x33,0x15,0x05,
+0xB9,0x0D,0xFB,0xC7,0x05,0x76,0xE3,0xD8,0x4A,0x8D,0xFC,0x34,0x17,0xA3,0xC6,0x21,
+0x28,0xBE,0x30,0x45,0x31,0x1E,0xC7,0x78,0xBE,0x58,0x61,0x38,0xAC,0x3B,0xE2,0x01,
+0x65,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root CA */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root CA */
+
+
+const unsigned char DigiCert_Global_Root_CA_certificate[947]={
+0x30,0x82,0x03,0xAF,0x30,0x82,0x02,0x97,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x08,
+0x3B,0xE0,0x56,0x90,0x42,0x46,0xB1,0xA1,0x75,0x6A,0xC9,0x59,0x91,0xC7,0x4A,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x61,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,
+0x41,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x31,0x31,0x30,0x30,0x30,0x30,0x30,0x30,
+0x30,0x5A,0x17,0x0D,0x33,0x31,0x31,0x31,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x30,
+0x5A,0x30,0x61,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,
+0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,
+0x65,0x72,0x74,0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,
+0x13,0x10,0x77,0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,
+0x6F,0x6D,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x44,0x69,0x67,
+0x69,0x43,0x65,0x72,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,0x6F,0x6F,
+0x74,0x20,0x43,0x41,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,
+0x02,0x82,0x01,0x01,0x00,0xE2,0x3B,0xE1,0x11,0x72,0xDE,0xA8,0xA4,0xD3,0xA3,0x57,
+0xAA,0x50,0xA2,0x8F,0x0B,0x77,0x90,0xC9,0xA2,0xA5,0xEE,0x12,0xCE,0x96,0x5B,0x01,
+0x09,0x20,0xCC,0x01,0x93,0xA7,0x4E,0x30,0xB7,0x53,0xF7,0x43,0xC4,0x69,0x00,0x57,
+0x9D,0xE2,0x8D,0x22,0xDD,0x87,0x06,0x40,0x00,0x81,0x09,0xCE,0xCE,0x1B,0x83,0xBF,
+0xDF,0xCD,0x3B,0x71,0x46,0xE2,0xD6,0x66,0xC7,0x05,0xB3,0x76,0x27,0x16,0x8F,0x7B,
+0x9E,0x1E,0x95,0x7D,0xEE,0xB7,0x48,0xA3,0x08,0xDA,0xD6,0xAF,0x7A,0x0C,0x39,0x06,
+0x65,0x7F,0x4A,0x5D,0x1F,0xBC,0x17,0xF8,0xAB,0xBE,0xEE,0x28,0xD7,0x74,0x7F,0x7A,
+0x78,0x99,0x59,0x85,0x68,0x6E,0x5C,0x23,0x32,0x4B,0xBF,0x4E,0xC0,0xE8,0x5A,0x6D,
+0xE3,0x70,0xBF,0x77,0x10,0xBF,0xFC,0x01,0xF6,0x85,0xD9,0xA8,0x44,0x10,0x58,0x32,
+0xA9,0x75,0x18,0xD5,0xD1,0xA2,0xBE,0x47,0xE2,0x27,0x6A,0xF4,0x9A,0x33,0xF8,0x49,
+0x08,0x60,0x8B,0xD4,0x5F,0xB4,0x3A,0x84,0xBF,0xA1,0xAA,0x4A,0x4C,0x7D,0x3E,0xCF,
+0x4F,0x5F,0x6C,0x76,0x5E,0xA0,0x4B,0x37,0x91,0x9E,0xDC,0x22,0xE6,0x6D,0xCE,0x14,
+0x1A,0x8E,0x6A,0xCB,0xFE,0xCD,0xB3,0x14,0x64,0x17,0xC7,0x5B,0x29,0x9E,0x32,0xBF,
+0xF2,0xEE,0xFA,0xD3,0x0B,0x42,0xD4,0xAB,0xB7,0x41,0x32,0xDA,0x0C,0xD4,0xEF,0xF8,
+0x81,0xD5,0xBB,0x8D,0x58,0x3F,0xB5,0x1B,0xE8,0x49,0x28,0xA2,0x70,0xDA,0x31,0x04,
+0xDD,0xF7,0xB2,0x16,0xF2,0x4C,0x0A,0x4E,0x07,0xA8,0xED,0x4A,0x3D,0x5E,0xB5,0x7F,
+0xA3,0x90,0xC3,0xAF,0x27,0x02,0x03,0x01,0x00,0x01,0xA3,0x63,0x30,0x61,0x30,0x0E,
+0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x0F,
+0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,
+0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x03,0xDE,0x50,0x35,0x56,0xD1,
+0x4C,0xBB,0x66,0xF0,0xA3,0xE2,0x1B,0x1B,0xC3,0x97,0xB2,0x3D,0xD1,0x55,0x30,0x1F,
+0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,0x03,0xDE,0x50,0x35,0x56,
+0xD1,0x4C,0xBB,0x66,0xF0,0xA3,0xE2,0x1B,0x1B,0xC3,0x97,0xB2,0x3D,0xD1,0x55,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,
+0x01,0x01,0x00,0xCB,0x9C,0x37,0xAA,0x48,0x13,0x12,0x0A,0xFA,0xDD,0x44,0x9C,0x4F,
+0x52,0xB0,0xF4,0xDF,0xAE,0x04,0xF5,0x79,0x79,0x08,0xA3,0x24,0x18,0xFC,0x4B,0x2B,
+0x84,0xC0,0x2D,0xB9,0xD5,0xC7,0xFE,0xF4,0xC1,0x1F,0x58,0xCB,0xB8,0x6D,0x9C,0x7A,
+0x74,0xE7,0x98,0x29,0xAB,0x11,0xB5,0xE3,0x70,0xA0,0xA1,0xCD,0x4C,0x88,0x99,0x93,
+0x8C,0x91,0x70,0xE2,0xAB,0x0F,0x1C,0xBE,0x93,0xA9,0xFF,0x63,0xD5,0xE4,0x07,0x60,
+0xD3,0xA3,0xBF,0x9D,0x5B,0x09,0xF1,0xD5,0x8E,0xE3,0x53,0xF4,0x8E,0x63,0xFA,0x3F,
+0xA7,0xDB,0xB4,0x66,0xDF,0x62,0x66,0xD6,0xD1,0x6E,0x41,0x8D,0xF2,0x2D,0xB5,0xEA,
+0x77,0x4A,0x9F,0x9D,0x58,0xE2,0x2B,0x59,0xC0,0x40,0x23,0xED,0x2D,0x28,0x82,0x45,
+0x3E,0x79,0x54,0x92,0x26,0x98,0xE0,0x80,0x48,0xA8,0x37,0xEF,0xF0,0xD6,0x79,0x60,
+0x16,0xDE,0xAC,0xE8,0x0E,0xCD,0x6E,0xAC,0x44,0x17,0x38,0x2F,0x49,0xDA,0xE1,0x45,
+0x3E,0x2A,0xB9,0x36,0x53,0xCF,0x3A,0x50,0x06,0xF7,0x2E,0xE8,0xC4,0x57,0x49,0x6C,
+0x61,0x21,0x18,0xD5,0x04,0xAD,0x78,0x3C,0x2C,0x3A,0x80,0x6B,0xA7,0xEB,0xAF,0x15,
+0x14,0xE9,0xD8,0x89,0xC1,0xB9,0x38,0x6C,0xE2,0x91,0x6C,0x8A,0xFF,0x64,0xB9,0x77,
+0x25,0x57,0x30,0xC0,0x1B,0x24,0xA3,0xE1,0xDC,0xE9,0xDF,0x47,0x7C,0xB5,0xB4,0x24,
+0x08,0x05,0x30,0xEC,0x2D,0xBD,0x0B,0xBF,0x45,0xBF,0x50,0xB9,0xA9,0xF3,0xEB,0x98,
+0x01,0x12,0xAD,0xC8,0x88,0xC6,0x98,0x34,0x5F,0x8D,0x0A,0x3C,0xC6,0xE9,0xD5,0x95,
+0x95,0x6D,0xDE,
+};
+
+
+/* subject:/C=GB/ST=Greater Manchester/L=Salford/O=Comodo CA Limited/CN=AAA Certificate Services */
+/* issuer :/C=GB/ST=Greater Manchester/L=Salford/O=Comodo CA Limited/CN=AAA Certificate Services */
+
+
+const unsigned char Comodo_AAA_Services_root_certificate[1078]={
+0x30,0x82,0x04,0x32,0x30,0x82,0x03,0x1A,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x7B,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x0C,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,
+0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,
+0x03,0x55,0x04,0x07,0x0C,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,
+0x18,0x06,0x03,0x55,0x04,0x0A,0x0C,0x11,0x43,0x6F,0x6D,0x6F,0x64,0x6F,0x20,0x43,
+0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,
+0x04,0x03,0x0C,0x18,0x41,0x41,0x41,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x65,0x20,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x30,0x1E,0x17,0x0D,
+0x30,0x34,0x30,0x31,0x30,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,
+0x38,0x31,0x32,0x33,0x31,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x7B,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,0x30,0x19,0x06,
+0x03,0x55,0x04,0x08,0x0C,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,0x20,0x4D,0x61,
+0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,
+0x07,0x0C,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,0x18,0x06,0x03,
+0x55,0x04,0x0A,0x0C,0x11,0x43,0x6F,0x6D,0x6F,0x64,0x6F,0x20,0x43,0x41,0x20,0x4C,
+0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x03,0x0C,
+0x18,0x41,0x41,0x41,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,
+0x20,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,
+0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xBE,0x40,0x9D,0xF4,0x6E,0xE1,
+0xEA,0x76,0x87,0x1C,0x4D,0x45,0x44,0x8E,0xBE,0x46,0xC8,0x83,0x06,0x9D,0xC1,0x2A,
+0xFE,0x18,0x1F,0x8E,0xE4,0x02,0xFA,0xF3,0xAB,0x5D,0x50,0x8A,0x16,0x31,0x0B,0x9A,
+0x06,0xD0,0xC5,0x70,0x22,0xCD,0x49,0x2D,0x54,0x63,0xCC,0xB6,0x6E,0x68,0x46,0x0B,
+0x53,0xEA,0xCB,0x4C,0x24,0xC0,0xBC,0x72,0x4E,0xEA,0xF1,0x15,0xAE,0xF4,0x54,0x9A,
+0x12,0x0A,0xC3,0x7A,0xB2,0x33,0x60,0xE2,0xDA,0x89,0x55,0xF3,0x22,0x58,0xF3,0xDE,
+0xDC,0xCF,0xEF,0x83,0x86,0xA2,0x8C,0x94,0x4F,0x9F,0x68,0xF2,0x98,0x90,0x46,0x84,
+0x27,0xC7,0x76,0xBF,0xE3,0xCC,0x35,0x2C,0x8B,0x5E,0x07,0x64,0x65,0x82,0xC0,0x48,
+0xB0,0xA8,0x91,0xF9,0x61,0x9F,0x76,0x20,0x50,0xA8,0x91,0xC7,0x66,0xB5,0xEB,0x78,
+0x62,0x03,0x56,0xF0,0x8A,0x1A,0x13,0xEA,0x31,0xA3,0x1E,0xA0,0x99,0xFD,0x38,0xF6,
+0xF6,0x27,0x32,0x58,0x6F,0x07,0xF5,0x6B,0xB8,0xFB,0x14,0x2B,0xAF,0xB7,0xAA,0xCC,
+0xD6,0x63,0x5F,0x73,0x8C,0xDA,0x05,0x99,0xA8,0x38,0xA8,0xCB,0x17,0x78,0x36,0x51,
+0xAC,0xE9,0x9E,0xF4,0x78,0x3A,0x8D,0xCF,0x0F,0xD9,0x42,0xE2,0x98,0x0C,0xAB,0x2F,
+0x9F,0x0E,0x01,0xDE,0xEF,0x9F,0x99,0x49,0xF1,0x2D,0xDF,0xAC,0x74,0x4D,0x1B,0x98,
+0xB5,0x47,0xC5,0xE5,0x29,0xD1,0xF9,0x90,0x18,0xC7,0x62,0x9C,0xBE,0x83,0xC7,0x26,
+0x7B,0x3E,0x8A,0x25,0xC7,0xC0,0xDD,0x9D,0xE6,0x35,0x68,0x10,0x20,0x9D,0x8F,0xD8,
+0xDE,0xD2,0xC3,0x84,0x9C,0x0D,0x5E,0xE8,0x2F,0xC9,0x02,0x03,0x01,0x00,0x01,0xA3,
+0x81,0xC0,0x30,0x81,0xBD,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,
+0xA0,0x11,0x0A,0x23,0x3E,0x96,0xF1,0x07,0xEC,0xE2,0xAF,0x29,0xEF,0x82,0xA5,0x7F,
+0xD0,0x30,0xA4,0xB4,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,
+0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,
+0x30,0x03,0x01,0x01,0xFF,0x30,0x7B,0x06,0x03,0x55,0x1D,0x1F,0x04,0x74,0x30,0x72,
+0x30,0x38,0xA0,0x36,0xA0,0x34,0x86,0x32,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,
+0x72,0x6C,0x2E,0x63,0x6F,0x6D,0x6F,0x64,0x6F,0x63,0x61,0x2E,0x63,0x6F,0x6D,0x2F,
+0x41,0x41,0x41,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x53,0x65,
+0x72,0x76,0x69,0x63,0x65,0x73,0x2E,0x63,0x72,0x6C,0x30,0x36,0xA0,0x34,0xA0,0x32,
+0x86,0x30,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x63,0x6F,0x6D,
+0x6F,0x64,0x6F,0x2E,0x6E,0x65,0x74,0x2F,0x41,0x41,0x41,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x65,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x2E,0x63,
+0x72,0x6C,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,
+0x00,0x03,0x82,0x01,0x01,0x00,0x08,0x56,0xFC,0x02,0xF0,0x9B,0xE8,0xFF,0xA4,0xFA,
+0xD6,0x7B,0xC6,0x44,0x80,0xCE,0x4F,0xC4,0xC5,0xF6,0x00,0x58,0xCC,0xA6,0xB6,0xBC,
+0x14,0x49,0x68,0x04,0x76,0xE8,0xE6,0xEE,0x5D,0xEC,0x02,0x0F,0x60,0xD6,0x8D,0x50,
+0x18,0x4F,0x26,0x4E,0x01,0xE3,0xE6,0xB0,0xA5,0xEE,0xBF,0xBC,0x74,0x54,0x41,0xBF,
+0xFD,0xFC,0x12,0xB8,0xC7,0x4F,0x5A,0xF4,0x89,0x60,0x05,0x7F,0x60,0xB7,0x05,0x4A,
+0xF3,0xF6,0xF1,0xC2,0xBF,0xC4,0xB9,0x74,0x86,0xB6,0x2D,0x7D,0x6B,0xCC,0xD2,0xF3,
+0x46,0xDD,0x2F,0xC6,0xE0,0x6A,0xC3,0xC3,0x34,0x03,0x2C,0x7D,0x96,0xDD,0x5A,0xC2,
+0x0E,0xA7,0x0A,0x99,0xC1,0x05,0x8B,0xAB,0x0C,0x2F,0xF3,0x5C,0x3A,0xCF,0x6C,0x37,
+0x55,0x09,0x87,0xDE,0x53,0x40,0x6C,0x58,0xEF,0xFC,0xB6,0xAB,0x65,0x6E,0x04,0xF6,
+0x1B,0xDC,0x3C,0xE0,0x5A,0x15,0xC6,0x9E,0xD9,0xF1,0x59,0x48,0x30,0x21,0x65,0x03,
+0x6C,0xEC,0xE9,0x21,0x73,0xEC,0x9B,0x03,0xA1,0xE0,0x37,0xAD,0xA0,0x15,0x18,0x8F,
+0xFA,0xBA,0x02,0xCE,0xA7,0x2C,0xA9,0x10,0x13,0x2C,0xD4,0xE5,0x08,0x26,0xAB,0x22,
+0x97,0x60,0xF8,0x90,0x5E,0x74,0xD4,0xA2,0x9A,0x53,0xBD,0xF2,0xA9,0x68,0xE0,0xA2,
+0x6E,0xC2,0xD7,0x6C,0xB1,0xA3,0x0F,0x9E,0xBF,0xEB,0x68,0xE7,0x56,0xF2,0xAE,0xF2,
+0xE3,0x2B,0x38,0x3A,0x09,0x81,0xB5,0x6B,0x85,0xD7,0xBE,0x2D,0xED,0x3F,0x1A,0xB7,
+0xB2,0x63,0xE2,0xF5,0x62,0x2C,0x82,0xD4,0x6A,0x00,0x41,0x50,0xF1,0x39,0x83,0x9F,
+0x95,0xE9,0x36,0x96,0x98,0x6E,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert High Assurance EV Root CA */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert High Assurance EV Root CA */
+
+
+const unsigned char DigiCert_High_Assurance_EV_Root_CA_certificate[969]={
+0x30,0x82,0x03,0xC5,0x30,0x82,0x02,0xAD,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x02,
+0xAC,0x5C,0x26,0x6A,0x0B,0x40,0x9B,0x8F,0x0B,0x79,0xF2,0xAE,0x46,0x25,0x77,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x6C,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x2B,0x30,0x29,0x06,0x03,0x55,0x04,0x03,0x13,0x22,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x48,0x69,0x67,0x68,0x20,0x41,0x73,0x73,0x75,0x72,0x61,0x6E,0x63,
+0x65,0x20,0x45,0x56,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x30,0x1E,0x17,0x0D,
+0x30,0x36,0x31,0x31,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,
+0x31,0x31,0x31,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x30,0x6C,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x49,
+0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,0x77,0x77,
+0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x2B,0x30,
+0x29,0x06,0x03,0x55,0x04,0x03,0x13,0x22,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x48,0x69,0x67,0x68,0x20,0x41,0x73,0x73,0x75,0x72,0x61,0x6E,0x63,0x65,0x20,
+0x45,0x56,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x30,0x82,0x01,0x22,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,
+0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xC6,0xCC,0xE5,0x73,0xE6,
+0xFB,0xD4,0xBB,0xE5,0x2D,0x2D,0x32,0xA6,0xDF,0xE5,0x81,0x3F,0xC9,0xCD,0x25,0x49,
+0xB6,0x71,0x2A,0xC3,0xD5,0x94,0x34,0x67,0xA2,0x0A,0x1C,0xB0,0x5F,0x69,0xA6,0x40,
+0xB1,0xC4,0xB7,0xB2,0x8F,0xD0,0x98,0xA4,0xA9,0x41,0x59,0x3A,0xD3,0xDC,0x94,0xD6,
+0x3C,0xDB,0x74,0x38,0xA4,0x4A,0xCC,0x4D,0x25,0x82,0xF7,0x4A,0xA5,0x53,0x12,0x38,
+0xEE,0xF3,0x49,0x6D,0x71,0x91,0x7E,0x63,0xB6,0xAB,0xA6,0x5F,0xC3,0xA4,0x84,0xF8,
+0x4F,0x62,0x51,0xBE,0xF8,0xC5,0xEC,0xDB,0x38,0x92,0xE3,0x06,0xE5,0x08,0x91,0x0C,
+0xC4,0x28,0x41,0x55,0xFB,0xCB,0x5A,0x89,0x15,0x7E,0x71,0xE8,0x35,0xBF,0x4D,0x72,
+0x09,0x3D,0xBE,0x3A,0x38,0x50,0x5B,0x77,0x31,0x1B,0x8D,0xB3,0xC7,0x24,0x45,0x9A,
+0xA7,0xAC,0x6D,0x00,0x14,0x5A,0x04,0xB7,0xBA,0x13,0xEB,0x51,0x0A,0x98,0x41,0x41,
+0x22,0x4E,0x65,0x61,0x87,0x81,0x41,0x50,0xA6,0x79,0x5C,0x89,0xDE,0x19,0x4A,0x57,
+0xD5,0x2E,0xE6,0x5D,0x1C,0x53,0x2C,0x7E,0x98,0xCD,0x1A,0x06,0x16,0xA4,0x68,0x73,
+0xD0,0x34,0x04,0x13,0x5C,0xA1,0x71,0xD3,0x5A,0x7C,0x55,0xDB,0x5E,0x64,0xE1,0x37,
+0x87,0x30,0x56,0x04,0xE5,0x11,0xB4,0x29,0x80,0x12,0xF1,0x79,0x39,0x88,0xA2,0x02,
+0x11,0x7C,0x27,0x66,0xB7,0x88,0xB7,0x78,0xF2,0xCA,0x0A,0xA8,0x38,0xAB,0x0A,0x64,
+0xC2,0xBF,0x66,0x5D,0x95,0x84,0xC1,0xA1,0x25,0x1E,0x87,0x5D,0x1A,0x50,0x0B,0x20,
+0x12,0xCC,0x41,0xBB,0x6E,0x0B,0x51,0x38,0xB8,0x4B,0xCB,0x02,0x03,0x01,0x00,0x01,
+0xA3,0x63,0x30,0x61,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,
+0x03,0x02,0x01,0x86,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,
+0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,
+0xB1,0x3E,0xC3,0x69,0x03,0xF8,0xBF,0x47,0x01,0xD4,0x98,0x26,0x1A,0x08,0x02,0xEF,
+0x63,0x64,0x2B,0xC3,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,
+0x14,0xB1,0x3E,0xC3,0x69,0x03,0xF8,0xBF,0x47,0x01,0xD4,0x98,0x26,0x1A,0x08,0x02,
+0xEF,0x63,0x64,0x2B,0xC3,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x1C,0x1A,0x06,0x97,0xDC,0xD7,0x9C,
+0x9F,0x3C,0x88,0x66,0x06,0x08,0x57,0x21,0xDB,0x21,0x47,0xF8,0x2A,0x67,0xAA,0xBF,
+0x18,0x32,0x76,0x40,0x10,0x57,0xC1,0x8A,0xF3,0x7A,0xD9,0x11,0x65,0x8E,0x35,0xFA,
+0x9E,0xFC,0x45,0xB5,0x9E,0xD9,0x4C,0x31,0x4B,0xB8,0x91,0xE8,0x43,0x2C,0x8E,0xB3,
+0x78,0xCE,0xDB,0xE3,0x53,0x79,0x71,0xD6,0xE5,0x21,0x94,0x01,0xDA,0x55,0x87,0x9A,
+0x24,0x64,0xF6,0x8A,0x66,0xCC,0xDE,0x9C,0x37,0xCD,0xA8,0x34,0xB1,0x69,0x9B,0x23,
+0xC8,0x9E,0x78,0x22,0x2B,0x70,0x43,0xE3,0x55,0x47,0x31,0x61,0x19,0xEF,0x58,0xC5,
+0x85,0x2F,0x4E,0x30,0xF6,0xA0,0x31,0x16,0x23,0xC8,0xE7,0xE2,0x65,0x16,0x33,0xCB,
+0xBF,0x1A,0x1B,0xA0,0x3D,0xF8,0xCA,0x5E,0x8B,0x31,0x8B,0x60,0x08,0x89,0x2D,0x0C,
+0x06,0x5C,0x52,0xB7,0xC4,0xF9,0x0A,0x98,0xD1,0x15,0x5F,0x9F,0x12,0xBE,0x7C,0x36,
+0x63,0x38,0xBD,0x44,0xA4,0x7F,0xE4,0x26,0x2B,0x0A,0xC4,0x97,0x69,0x0D,0xE9,0x8C,
+0xE2,0xC0,0x10,0x57,0xB8,0xC8,0x76,0x12,0x91,0x55,0xF2,0x48,0x69,0xD8,0xBC,0x2A,
+0x02,0x5B,0x0F,0x44,0xD4,0x20,0x31,0xDB,0xF4,0xBA,0x70,0x26,0x5D,0x90,0x60,0x9E,
+0xBC,0x4B,0x17,0x09,0x2F,0xB4,0xCB,0x1E,0x43,0x68,0xC9,0x07,0x27,0xC1,0xD2,0x5C,
+0xF7,0xEA,0x21,0xB9,0x68,0x12,0x9C,0x3C,0x9C,0xBF,0x9E,0xFC,0x80,0x5C,0x9B,0x63,
+0xCD,0xEC,0x47,0xAA,0x25,0x27,0x67,0xA0,0x37,0xF3,0x00,0x82,0x7D,0x54,0xD7,0xA9,
+0xF8,0xE9,0x2E,0x13,0xA3,0x77,0xE8,0x1F,0x4A,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./CN=GeoTrust Universal CA */
+/* issuer :/C=US/O=GeoTrust Inc./CN=GeoTrust Universal CA */
+
+
+const unsigned char GeoTrust_Universal_CA_certificate[1388]={
+0x30,0x82,0x05,0x68,0x30,0x82,0x03,0x50,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x45,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,
+0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,
+0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,0x04,0x03,0x13,
+0x15,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,
+0x73,0x61,0x6C,0x20,0x43,0x41,0x30,0x1E,0x17,0x0D,0x30,0x34,0x30,0x33,0x30,0x34,
+0x30,0x35,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,0x39,0x30,0x33,0x30,0x34,0x30,
+0x35,0x30,0x30,0x30,0x30,0x5A,0x30,0x45,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,
+0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1E,0x30,
+0x1C,0x06,0x03,0x55,0x04,0x03,0x13,0x15,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,
+0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,0x61,0x6C,0x20,0x43,0x41,0x30,0x82,0x02,
+0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,
+0x03,0x82,0x02,0x0F,0x00,0x30,0x82,0x02,0x0A,0x02,0x82,0x02,0x01,0x00,0xA6,0x15,
+0x55,0xA0,0xA3,0xC6,0xE0,0x1F,0x8C,0x9D,0x21,0x50,0xD7,0xC1,0xBE,0x2B,0x5B,0xB5,
+0xA4,0x9E,0xA1,0xD9,0x72,0x58,0xBD,0x00,0x1B,0x4C,0xBF,0x61,0xC9,0x14,0x1D,0x45,
+0x82,0xAB,0xC6,0x1D,0x80,0xD6,0x3D,0xEB,0x10,0x9C,0x3A,0xAF,0x6D,0x24,0xF8,0xBC,
+0x71,0x01,0x9E,0x06,0xF5,0x7C,0x5F,0x1E,0xC1,0x0E,0x55,0xCA,0x83,0x9A,0x59,0x30,
+0xAE,0x19,0xCB,0x30,0x48,0x95,0xED,0x22,0x37,0x8D,0xF4,0x4A,0x9A,0x72,0x66,0x3E,
+0xAD,0x95,0xC0,0xE0,0x16,0x00,0xE0,0x10,0x1F,0x2B,0x31,0x0E,0xD7,0x94,0x54,0xD3,
+0x42,0x33,0xA0,0x34,0x1D,0x1E,0x45,0x76,0xDD,0x4F,0xCA,0x18,0x37,0xEC,0x85,0x15,
+0x7A,0x19,0x08,0xFC,0xD5,0xC7,0x9C,0xF0,0xF2,0xA9,0x2E,0x10,0xA9,0x92,0xE6,0x3D,
+0x58,0x3D,0xA9,0x16,0x68,0x3C,0x2F,0x75,0x21,0x18,0x7F,0x28,0x77,0xA5,0xE1,0x61,
+0x17,0xB7,0xA6,0xE9,0xF8,0x1E,0x99,0xDB,0x73,0x6E,0xF4,0x0A,0xA2,0x21,0x6C,0xEE,
+0xDA,0xAA,0x85,0x92,0x66,0xAF,0xF6,0x7A,0x6B,0x82,0xDA,0xBA,0x22,0x08,0x35,0x0F,
+0xCF,0x42,0xF1,0x35,0xFA,0x6A,0xEE,0x7E,0x2B,0x25,0xCC,0x3A,0x11,0xE4,0x6D,0xAF,
+0x73,0xB2,0x76,0x1D,0xAD,0xD0,0xB2,0x78,0x67,0x1A,0xA4,0x39,0x1C,0x51,0x0B,0x67,
+0x56,0x83,0xFD,0x38,0x5D,0x0D,0xCE,0xDD,0xF0,0xBB,0x2B,0x96,0x1F,0xDE,0x7B,0x32,
+0x52,0xFD,0x1D,0xBB,0xB5,0x06,0xA1,0xB2,0x21,0x5E,0xA5,0xD6,0x95,0x68,0x7F,0xF0,
+0x99,0x9E,0xDC,0x45,0x08,0x3E,0xE7,0xD2,0x09,0x0D,0x35,0x94,0xDD,0x80,0x4E,0x53,
+0x97,0xD7,0xB5,0x09,0x44,0x20,0x64,0x16,0x17,0x03,0x02,0x4C,0x53,0x0D,0x68,0xDE,
+0xD5,0xAA,0x72,0x4D,0x93,0x6D,0x82,0x0E,0xDB,0x9C,0xBD,0xCF,0xB4,0xF3,0x5C,0x5D,
+0x54,0x7A,0x69,0x09,0x96,0xD6,0xDB,0x11,0xC1,0x8D,0x75,0xA8,0xB4,0xCF,0x39,0xC8,
+0xCE,0x3C,0xBC,0x24,0x7C,0xE6,0x62,0xCA,0xE1,0xBD,0x7D,0xA7,0xBD,0x57,0x65,0x0B,
+0xE4,0xFE,0x25,0xED,0xB6,0x69,0x10,0xDC,0x28,0x1A,0x46,0xBD,0x01,0x1D,0xD0,0x97,
+0xB5,0xE1,0x98,0x3B,0xC0,0x37,0x64,0xD6,0x3D,0x94,0xEE,0x0B,0xE1,0xF5,0x28,0xAE,
+0x0B,0x56,0xBF,0x71,0x8B,0x23,0x29,0x41,0x8E,0x86,0xC5,0x4B,0x52,0x7B,0xD8,0x71,
+0xAB,0x1F,0x8A,0x15,0xA6,0x3B,0x83,0x5A,0xD7,0x58,0x01,0x51,0xC6,0x4C,0x41,0xD9,
+0x7F,0xD8,0x41,0x67,0x72,0xA2,0x28,0xDF,0x60,0x83,0xA9,0x9E,0xC8,0x7B,0xFC,0x53,
+0x73,0x72,0x59,0xF5,0x93,0x7A,0x17,0x76,0x0E,0xCE,0xF7,0xE5,0x5C,0xD9,0x0B,0x55,
+0x34,0xA2,0xAA,0x5B,0xB5,0x6A,0x54,0xE7,0x13,0xCA,0x57,0xEC,0x97,0x6D,0xF4,0x5E,
+0x06,0x2F,0x45,0x8B,0x58,0xD4,0x23,0x16,0x92,0xE4,0x16,0x6E,0x28,0x63,0x59,0x30,
+0xDF,0x50,0x01,0x9C,0x63,0x89,0x1A,0x9F,0xDB,0x17,0x94,0x82,0x70,0x37,0xC3,0x24,
+0x9E,0x9A,0x47,0xD6,0x5A,0xCA,0x4E,0xA8,0x69,0x89,0x72,0x1F,0x91,0x6C,0xDB,0x7E,
+0x9E,0x1B,0xAD,0xC7,0x1F,0x73,0xDD,0x2C,0x4F,0x19,0x65,0xFD,0x7F,0x93,0x40,0x10,
+0x2E,0xD2,0xF0,0xED,0x3C,0x9E,0x2E,0x28,0x3E,0x69,0x26,0x33,0xC5,0x7B,0x02,0x03,
+0x01,0x00,0x01,0xA3,0x63,0x30,0x61,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,
+0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,
+0x16,0x04,0x14,0xDA,0xBB,0x2E,0xAA,0xB0,0x0C,0xB8,0x88,0x26,0x51,0x74,0x5C,0x6D,
+0x03,0xD3,0xC0,0xD8,0x8F,0x7A,0xD6,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,
+0x30,0x16,0x80,0x14,0xDA,0xBB,0x2E,0xAA,0xB0,0x0C,0xB8,0x88,0x26,0x51,0x74,0x5C,
+0x6D,0x03,0xD3,0xC0,0xD8,0x8F,0x7A,0xD6,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,
+0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x02,0x01,0x00,0x31,0x78,0xE6,0xC7,
+0xB5,0xDF,0xB8,0x94,0x40,0xC9,0x71,0xC4,0xA8,0x35,0xEC,0x46,0x1D,0xC2,0x85,0xF3,
+0x28,0x58,0x86,0xB0,0x0B,0xFC,0x8E,0xB2,0x39,0x8F,0x44,0x55,0xAB,0x64,0x84,0x5C,
+0x69,0xA9,0xD0,0x9A,0x38,0x3C,0xFA,0xE5,0x1F,0x35,0xE5,0x44,0xE3,0x80,0x79,0x94,
+0x68,0xA4,0xBB,0xC4,0x9F,0x3D,0xE1,0x34,0xCD,0x30,0x46,0x8B,0x54,0x2B,0x95,0xA5,
+0xEF,0xF7,0x3F,0x99,0x84,0xFD,0x35,0xE6,0xCF,0x31,0xC6,0xDC,0x6A,0xBF,0xA7,0xD7,
+0x23,0x08,0xE1,0x98,0x5E,0xC3,0x5A,0x08,0x76,0xA9,0xA6,0xAF,0x77,0x2F,0xB7,0x60,
+0xBD,0x44,0x46,0x6A,0xEF,0x97,0xFF,0x73,0x95,0xC1,0x8E,0xE8,0x93,0xFB,0xFD,0x31,
+0xB7,0xEC,0x57,0x11,0x11,0x45,0x9B,0x30,0xF1,0x1A,0x88,0x39,0xC1,0x4F,0x3C,0xA7,
+0x00,0xD5,0xC7,0xFC,0xAB,0x6D,0x80,0x22,0x70,0xA5,0x0C,0xE0,0x5D,0x04,0x29,0x02,
+0xFB,0xCB,0xA0,0x91,0xD1,0x7C,0xD6,0xC3,0x7E,0x50,0xD5,0x9D,0x58,0xBE,0x41,0x38,
+0xEB,0xB9,0x75,0x3C,0x15,0xD9,0x9B,0xC9,0x4A,0x83,0x59,0xC0,0xDA,0x53,0xFD,0x33,
+0xBB,0x36,0x18,0x9B,0x85,0x0F,0x15,0xDD,0xEE,0x2D,0xAC,0x76,0x93,0xB9,0xD9,0x01,
+0x8D,0x48,0x10,0xA8,0xFB,0xF5,0x38,0x86,0xF1,0xDB,0x0A,0xC6,0xBD,0x84,0xA3,0x23,
+0x41,0xDE,0xD6,0x77,0x6F,0x85,0xD4,0x85,0x1C,0x50,0xE0,0xAE,0x51,0x8A,0xBA,0x8D,
+0x3E,0x76,0xE2,0xB9,0xCA,0x27,0xF2,0x5F,0x9F,0xEF,0x6E,0x59,0x0D,0x06,0xD8,0x2B,
+0x17,0xA4,0xD2,0x7C,0x6B,0xBB,0x5F,0x14,0x1A,0x48,0x8F,0x1A,0x4C,0xE7,0xB3,0x47,
+0x1C,0x8E,0x4C,0x45,0x2B,0x20,0xEE,0x48,0xDF,0xE7,0xDD,0x09,0x8E,0x18,0xA8,0xDA,
+0x40,0x8D,0x92,0x26,0x11,0x53,0x61,0x73,0x5D,0xEB,0xBD,0xE7,0xC4,0x4D,0x29,0x37,
+0x61,0xEB,0xAC,0x39,0x2D,0x67,0x2E,0x16,0xD6,0xF5,0x00,0x83,0x85,0xA1,0xCC,0x7F,
+0x76,0xC4,0x7D,0xE4,0xB7,0x4B,0x66,0xEF,0x03,0x45,0x60,0x69,0xB6,0x0C,0x52,0x96,
+0x92,0x84,0x5E,0xA6,0xA3,0xB5,0xA4,0x3E,0x2B,0xD9,0xCC,0xD8,0x1B,0x47,0xAA,0xF2,
+0x44,0xDA,0x4F,0xF9,0x03,0xE8,0xF0,0x14,0xCB,0x3F,0xF3,0x83,0xDE,0xD0,0xC1,0x54,
+0xE3,0xB7,0xE8,0x0A,0x37,0x4D,0x8B,0x20,0x59,0x03,0x30,0x19,0xA1,0x2C,0xC8,0xBD,
+0x11,0x1F,0xDF,0xAE,0xC9,0x4A,0xC5,0xF3,0x27,0x66,0x66,0x86,0xAC,0x68,0x91,0xFF,
+0xD9,0xE6,0x53,0x1C,0x0F,0x8B,0x5C,0x69,0x65,0x0A,0x26,0xC8,0x1E,0x34,0xC3,0x5D,
+0x51,0x7B,0xD7,0xA9,0x9C,0x06,0xA1,0x36,0xDD,0xD5,0x89,0x94,0xBC,0xD9,0xE4,0x2D,
+0x0C,0x5E,0x09,0x6C,0x08,0x97,0x7C,0xA3,0x3D,0x7C,0x93,0xFF,0x3F,0xA1,0x14,0xA7,
+0xCF,0xB5,0x5D,0xEB,0xDB,0xDB,0x1C,0xC4,0x76,0xDF,0x88,0xB9,0xBD,0x45,0x05,0x95,
+0x1B,0xAE,0xFC,0x46,0x6A,0x4C,0xAF,0x48,0xE3,0xCE,0xAE,0x0F,0xD2,0x7E,0xEB,0xE6,
+0x6C,0x9C,0x4F,0x81,0x6A,0x7A,0x64,0xAC,0xBB,0x3E,0xD5,0xE7,0xCB,0x76,0x2E,0xC5,
+0xA7,0x48,0xC1,0x5C,0x90,0x0F,0xCB,0xC8,0x3F,0xFA,0xE6,0x32,0xE1,0x8D,0x1B,0x6F,
+0xA4,0xE6,0x8E,0xD8,0xF9,0x29,0x48,0x8A,0xCE,0x73,0xFE,0x2C,
+};
+
+
+/* subject:/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO ECC Certification Authority */
+/* issuer :/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO ECC Certification Authority */
+
+
+const unsigned char COMODO_ECC_Certification_Authority_certificate[653]={
+0x30,0x82,0x02,0x89,0x30,0x82,0x02,0x0F,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x1F,
+0x47,0xAF,0xAA,0x62,0x00,0x70,0x50,0x54,0x4C,0x01,0x9E,0x9B,0x63,0x99,0x2A,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x81,0x85,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,0x30,0x19,0x06,
+0x03,0x55,0x04,0x08,0x13,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,0x20,0x4D,0x61,
+0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,
+0x07,0x13,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,0x18,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x11,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x43,0x41,0x20,0x4C,
+0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x2B,0x30,0x29,0x06,0x03,0x55,0x04,0x03,0x13,
+0x22,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x45,0x43,0x43,0x20,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,
+0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x30,0x38,0x30,0x33,0x30,0x36,0x30,0x30,0x30,
+0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x38,0x32,0x33,0x35,0x39,
+0x35,0x39,0x5A,0x30,0x81,0x85,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,
+0x02,0x47,0x42,0x31,0x1B,0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x13,0x12,0x47,0x72,
+0x65,0x61,0x74,0x65,0x72,0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,
+0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x07,0x13,0x07,0x53,0x61,0x6C,0x66,0x6F,
+0x72,0x64,0x31,0x1A,0x30,0x18,0x06,0x03,0x55,0x04,0x0A,0x13,0x11,0x43,0x4F,0x4D,
+0x4F,0x44,0x4F,0x20,0x43,0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x2B,
+0x30,0x29,0x06,0x03,0x55,0x04,0x03,0x13,0x22,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,
+0x45,0x43,0x43,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,
+0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x76,0x30,0x10,0x06,
+0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,0x2B,0x81,0x04,0x00,0x22,0x03,
+0x62,0x00,0x04,0x03,0x47,0x7B,0x2F,0x75,0xC9,0x82,0x15,0x85,0xFB,0x75,0xE4,0x91,
+0x16,0xD4,0xAB,0x62,0x99,0xF5,0x3E,0x52,0x0B,0x06,0xCE,0x41,0x00,0x7F,0x97,0xE1,
+0x0A,0x24,0x3C,0x1D,0x01,0x04,0xEE,0x3D,0xD2,0x8D,0x09,0x97,0x0C,0xE0,0x75,0xE4,
+0xFA,0xFB,0x77,0x8A,0x2A,0xF5,0x03,0x60,0x4B,0x36,0x8B,0x16,0x23,0x16,0xAD,0x09,
+0x71,0xF4,0x4A,0xF4,0x28,0x50,0xB4,0xFE,0x88,0x1C,0x6E,0x3F,0x6C,0x2F,0x2F,0x09,
+0x59,0x5B,0xA5,0x5B,0x0B,0x33,0x99,0xE2,0xC3,0x3D,0x89,0xF9,0x6A,0x2C,0xEF,0xB2,
+0xD3,0x06,0xE9,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,
+0x04,0x14,0x75,0x71,0xA7,0x19,0x48,0x19,0xBC,0x9D,0x9D,0xEA,0x41,0x47,0xDF,0x94,
+0xC4,0x48,0x77,0x99,0xD3,0x79,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,
+0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,
+0x04,0x03,0x03,0x03,0x68,0x00,0x30,0x65,0x02,0x31,0x00,0xEF,0x03,0x5B,0x7A,0xAC,
+0xB7,0x78,0x0A,0x72,0xB7,0x88,0xDF,0xFF,0xB5,0x46,0x14,0x09,0x0A,0xFA,0xA0,0xE6,
+0x7D,0x08,0xC6,0x1A,0x87,0xBD,0x18,0xA8,0x73,0xBD,0x26,0xCA,0x60,0x0C,0x9D,0xCE,
+0x99,0x9F,0xCF,0x5C,0x0F,0x30,0xE1,0xBE,0x14,0x31,0xEA,0x02,0x30,0x14,0xF4,0x93,
+0x3C,0x49,0xA7,0x33,0x7A,0x90,0x46,0x47,0xB3,0x63,0x7D,0x13,0x9B,0x4E,0xB7,0x6F,
+0x18,0x37,0x80,0x53,0xFE,0xDD,0x20,0xE0,0x35,0x9A,0x36,0xD1,0xC7,0x01,0xB9,0xE6,
+0xDC,0xDD,0xF3,0xFF,0x1D,0x2C,0x3A,0x16,0x57,0xD9,0x92,0x39,0xD6,
+};
+
+
+/* subject:/C=US/O=Entrust, Inc./OU=See www.entrust.net/legal-terms/OU=(c) 2009 Entrust, Inc. - for authorized use only/CN=Entrust Root Certification Authority - G2 */
+/* issuer :/C=US/O=Entrust, Inc./OU=See www.entrust.net/legal-terms/OU=(c) 2009 Entrust, Inc. - for authorized use only/CN=Entrust Root Certification Authority - G2 */
+
+
+const unsigned char Entrust_Root_Certification_Authority___G2_certificate[1090]={
+0x30,0x82,0x04,0x3E,0x30,0x82,0x03,0x26,0xA0,0x03,0x02,0x01,0x02,0x02,0x04,0x4A,
+0x53,0x8C,0x28,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,
+0x05,0x00,0x30,0x81,0xBE,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,
+0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x45,0x6E,0x74,
+0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x28,0x30,0x26,0x06,0x03,
+0x55,0x04,0x0B,0x13,0x1F,0x53,0x65,0x65,0x20,0x77,0x77,0x77,0x2E,0x65,0x6E,0x74,
+0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x2F,0x6C,0x65,0x67,0x61,0x6C,0x2D,0x74,
+0x65,0x72,0x6D,0x73,0x31,0x39,0x30,0x37,0x06,0x03,0x55,0x04,0x0B,0x13,0x30,0x28,
+0x63,0x29,0x20,0x32,0x30,0x30,0x39,0x20,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2C,
+0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x66,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,
+0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,
+0x32,0x30,0x30,0x06,0x03,0x55,0x04,0x03,0x13,0x29,0x45,0x6E,0x74,0x72,0x75,0x73,
+0x74,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,
+0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,
+0x20,0x47,0x32,0x30,0x1E,0x17,0x0D,0x30,0x39,0x30,0x37,0x30,0x37,0x31,0x37,0x32,
+0x35,0x35,0x34,0x5A,0x17,0x0D,0x33,0x30,0x31,0x32,0x30,0x37,0x31,0x37,0x35,0x35,
+0x35,0x34,0x5A,0x30,0x81,0xBE,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,
+0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x45,0x6E,
+0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x28,0x30,0x26,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x1F,0x53,0x65,0x65,0x20,0x77,0x77,0x77,0x2E,0x65,0x6E,
+0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x2F,0x6C,0x65,0x67,0x61,0x6C,0x2D,
+0x74,0x65,0x72,0x6D,0x73,0x31,0x39,0x30,0x37,0x06,0x03,0x55,0x04,0x0B,0x13,0x30,
+0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x39,0x20,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,
+0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x66,0x6F,0x72,0x20,0x61,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,
+0x31,0x32,0x30,0x30,0x06,0x03,0x55,0x04,0x03,0x13,0x29,0x45,0x6E,0x74,0x72,0x75,
+0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,
+0x2D,0x20,0x47,0x32,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,
+0x02,0x82,0x01,0x01,0x00,0xBA,0x84,0xB6,0x72,0xDB,0x9E,0x0C,0x6B,0xE2,0x99,0xE9,
+0x30,0x01,0xA7,0x76,0xEA,0x32,0xB8,0x95,0x41,0x1A,0xC9,0xDA,0x61,0x4E,0x58,0x72,
+0xCF,0xFE,0xF6,0x82,0x79,0xBF,0x73,0x61,0x06,0x0A,0xA5,0x27,0xD8,0xB3,0x5F,0xD3,
+0x45,0x4E,0x1C,0x72,0xD6,0x4E,0x32,0xF2,0x72,0x8A,0x0F,0xF7,0x83,0x19,0xD0,0x6A,
+0x80,0x80,0x00,0x45,0x1E,0xB0,0xC7,0xE7,0x9A,0xBF,0x12,0x57,0x27,0x1C,0xA3,0x68,
+0x2F,0x0A,0x87,0xBD,0x6A,0x6B,0x0E,0x5E,0x65,0xF3,0x1C,0x77,0xD5,0xD4,0x85,0x8D,
+0x70,0x21,0xB4,0xB3,0x32,0xE7,0x8B,0xA2,0xD5,0x86,0x39,0x02,0xB1,0xB8,0xD2,0x47,
+0xCE,0xE4,0xC9,0x49,0xC4,0x3B,0xA7,0xDE,0xFB,0x54,0x7D,0x57,0xBE,0xF0,0xE8,0x6E,
+0xC2,0x79,0xB2,0x3A,0x0B,0x55,0xE2,0x50,0x98,0x16,0x32,0x13,0x5C,0x2F,0x78,0x56,
+0xC1,0xC2,0x94,0xB3,0xF2,0x5A,0xE4,0x27,0x9A,0x9F,0x24,0xD7,0xC6,0xEC,0xD0,0x9B,
+0x25,0x82,0xE3,0xCC,0xC2,0xC4,0x45,0xC5,0x8C,0x97,0x7A,0x06,0x6B,0x2A,0x11,0x9F,
+0xA9,0x0A,0x6E,0x48,0x3B,0x6F,0xDB,0xD4,0x11,0x19,0x42,0xF7,0x8F,0x07,0xBF,0xF5,
+0x53,0x5F,0x9C,0x3E,0xF4,0x17,0x2C,0xE6,0x69,0xAC,0x4E,0x32,0x4C,0x62,0x77,0xEA,
+0xB7,0xE8,0xE5,0xBB,0x34,0xBC,0x19,0x8B,0xAE,0x9C,0x51,0xE7,0xB7,0x7E,0xB5,0x53,
+0xB1,0x33,0x22,0xE5,0x6D,0xCF,0x70,0x3C,0x1A,0xFA,0xE2,0x9B,0x67,0xB6,0x83,0xF4,
+0x8D,0xA5,0xAF,0x62,0x4C,0x4D,0xE0,0x58,0xAC,0x64,0x34,0x12,0x03,0xF8,0xB6,0x8D,
+0x94,0x63,0x24,0xA4,0x71,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0E,
+0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,
+0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,
+0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x6A,0x72,0x26,0x7A,0xD0,0x1E,
+0xEF,0x7D,0xE7,0x3B,0x69,0x51,0xD4,0x6C,0x8D,0x9F,0x90,0x12,0x66,0xAB,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,
+0x01,0x00,0x79,0x9F,0x1D,0x96,0xC6,0xB6,0x79,0x3F,0x22,0x8D,0x87,0xD3,0x87,0x03,
+0x04,0x60,0x6A,0x6B,0x9A,0x2E,0x59,0x89,0x73,0x11,0xAC,0x43,0xD1,0xF5,0x13,0xFF,
+0x8D,0x39,0x2B,0xC0,0xF2,0xBD,0x4F,0x70,0x8C,0xA9,0x2F,0xEA,0x17,0xC4,0x0B,0x54,
+0x9E,0xD4,0x1B,0x96,0x98,0x33,0x3C,0xA8,0xAD,0x62,0xA2,0x00,0x76,0xAB,0x59,0x69,
+0x6E,0x06,0x1D,0x7E,0xC4,0xB9,0x44,0x8D,0x98,0xAF,0x12,0xD4,0x61,0xDB,0x0A,0x19,
+0x46,0x47,0xF3,0xEB,0xF7,0x63,0xC1,0x40,0x05,0x40,0xA5,0xD2,0xB7,0xF4,0xB5,0x9A,
+0x36,0xBF,0xA9,0x88,0x76,0x88,0x04,0x55,0x04,0x2B,0x9C,0x87,0x7F,0x1A,0x37,0x3C,
+0x7E,0x2D,0xA5,0x1A,0xD8,0xD4,0x89,0x5E,0xCA,0xBD,0xAC,0x3D,0x6C,0xD8,0x6D,0xAF,
+0xD5,0xF3,0x76,0x0F,0xCD,0x3B,0x88,0x38,0x22,0x9D,0x6C,0x93,0x9A,0xC4,0x3D,0xBF,
+0x82,0x1B,0x65,0x3F,0xA6,0x0F,0x5D,0xAA,0xFC,0xE5,0xB2,0x15,0xCA,0xB5,0xAD,0xC6,
+0xBC,0x3D,0xD0,0x84,0xE8,0xEA,0x06,0x72,0xB0,0x4D,0x39,0x32,0x78,0xBF,0x3E,0x11,
+0x9C,0x0B,0xA4,0x9D,0x9A,0x21,0xF3,0xF0,0x9B,0x0B,0x30,0x78,0xDB,0xC1,0xDC,0x87,
+0x43,0xFE,0xBC,0x63,0x9A,0xCA,0xC5,0xC2,0x1C,0xC9,0xC7,0x8D,0xFF,0x3B,0x12,0x58,
+0x08,0xE6,0xB6,0x3D,0xEC,0x7A,0x2C,0x4E,0xFB,0x83,0x96,0xCE,0x0C,0x3C,0x69,0x87,
+0x54,0x73,0xA4,0x73,0xC2,0x93,0xFF,0x51,0x10,0xAC,0x15,0x54,0x01,0xD8,0xFC,0x05,
+0xB1,0x89,0xA1,0x7F,0x74,0x83,0x9A,0x49,0xD7,0xDC,0x4E,0x7B,0x8A,0x48,0x6F,0x8B,
+0x45,0xF6,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root G2 */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root G2 */
+
+
+const unsigned char DigiCert_Assured_ID_Root_G2_certificate[922]={
+0x30,0x82,0x03,0x96,0x30,0x82,0x02,0x7E,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x0B,
+0x93,0x1C,0x3A,0xD6,0x39,0x67,0xEA,0x67,0x23,0xBF,0xC3,0xAF,0x9A,0xF4,0x4B,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x65,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x03,0x13,0x1B,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x41,0x73,0x73,0x75,0x72,0x65,0x64,0x20,0x49,0x44,0x20,0x52,0x6F,
+0x6F,0x74,0x20,0x47,0x32,0x30,0x1E,0x17,0x0D,0x31,0x33,0x30,0x38,0x30,0x31,0x31,
+0x32,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x35,0x31,0x32,
+0x30,0x30,0x30,0x30,0x5A,0x30,0x65,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,
+0x13,0x02,0x55,0x53,0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,
+0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,
+0x03,0x55,0x04,0x0B,0x13,0x10,0x77,0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,
+0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x03,0x13,
+0x1B,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x41,0x73,0x73,0x75,0x72,0x65,
+0x64,0x20,0x49,0x44,0x20,0x52,0x6F,0x6F,0x74,0x20,0x47,0x32,0x30,0x82,0x01,0x22,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,
+0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xD9,0xE7,0x28,
+0x2F,0x52,0x3F,0x36,0x72,0x49,0x88,0x93,0x34,0xF3,0xF8,0x6A,0x1E,0x31,0x54,0x80,
+0x9F,0xAD,0x54,0x41,0xB5,0x47,0xDF,0x96,0xA8,0xD4,0xAF,0x80,0x2D,0xB9,0x0A,0xCF,
+0x75,0xFD,0x89,0xA5,0x7D,0x24,0xFA,0xE3,0x22,0x0C,0x2B,0xBC,0x95,0x17,0x0B,0x33,
+0xBF,0x19,0x4D,0x41,0x06,0x90,0x00,0xBD,0x0C,0x4D,0x10,0xFE,0x07,0xB5,0xE7,0x1C,
+0x6E,0x22,0x55,0x31,0x65,0x97,0xBD,0xD3,0x17,0xD2,0x1E,0x62,0xF3,0xDB,0xEA,0x6C,
+0x50,0x8C,0x3F,0x84,0x0C,0x96,0xCF,0xB7,0xCB,0x03,0xE0,0xCA,0x6D,0xA1,0x14,0x4C,
+0x1B,0x89,0xDD,0xED,0x00,0xB0,0x52,0x7C,0xAF,0x91,0x6C,0xB1,0x38,0x13,0xD1,0xE9,
+0x12,0x08,0xC0,0x00,0xB0,0x1C,0x2B,0x11,0xDA,0x77,0x70,0x36,0x9B,0xAE,0xCE,0x79,
+0x87,0xDC,0x82,0x70,0xE6,0x09,0x74,0x70,0x55,0x69,0xAF,0xA3,0x68,0x9F,0xBF,0xDD,
+0xB6,0x79,0xB3,0xF2,0x9D,0x70,0x29,0x55,0xF4,0xAB,0xFF,0x95,0x61,0xF3,0xC9,0x40,
+0x6F,0x1D,0xD1,0xBE,0x93,0xBB,0xD3,0x88,0x2A,0xBB,0x9D,0xBF,0x72,0x5A,0x56,0x71,
+0x3B,0x3F,0xD4,0xF3,0xD1,0x0A,0xFE,0x28,0xEF,0xA3,0xEE,0xD9,0x99,0xAF,0x03,0xD3,
+0x8F,0x60,0xB7,0xF2,0x92,0xA1,0xB1,0xBD,0x89,0x89,0x1F,0x30,0xCD,0xC3,0xA6,0x2E,
+0x62,0x33,0xAE,0x16,0x02,0x77,0x44,0x5A,0xE7,0x81,0x0A,0x3C,0xA7,0x44,0x2E,0x79,
+0xB8,0x3F,0x04,0xBC,0x5C,0xA0,0x87,0xE1,0x1B,0xAF,0x51,0x8E,0xCD,0xEC,0x2C,0xFA,
+0xF8,0xFE,0x6D,0xF0,0x3A,0x7C,0xAA,0x8B,0xE4,0x67,0x95,0x31,0x8D,0x02,0x03,0x01,
+0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,
+0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,
+0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,
+0x04,0x14,0xCE,0xC3,0x4A,0xB9,0x99,0x55,0xF2,0xB8,0xDB,0x60,0xBF,0xA9,0x7E,0xBD,
+0x56,0xB5,0x97,0x36,0xA7,0xD6,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0xCA,0xA5,0x55,0x8C,0xE3,0xC8,
+0x41,0x6E,0x69,0x27,0xA7,0x75,0x11,0xEF,0x3C,0x86,0x36,0x6F,0xD2,0x9D,0xC6,0x78,
+0x38,0x1D,0x69,0x96,0xA2,0x92,0x69,0x2E,0x38,0x6C,0x9B,0x7D,0x04,0xD4,0x89,0xA5,
+0xB1,0x31,0x37,0x8A,0xC9,0x21,0xCC,0xAB,0x6C,0xCD,0x8B,0x1C,0x9A,0xD6,0xBF,0x48,
+0xD2,0x32,0x66,0xC1,0x8A,0xC0,0xF3,0x2F,0x3A,0xEF,0xC0,0xE3,0xD4,0x91,0x86,0xD1,
+0x50,0xE3,0x03,0xDB,0x73,0x77,0x6F,0x4A,0x39,0x53,0xED,0xDE,0x26,0xC7,0xB5,0x7D,
+0xAF,0x2B,0x42,0xD1,0x75,0x62,0xE3,0x4A,0x2B,0x02,0xC7,0x50,0x4B,0xE0,0x69,0xE2,
+0x96,0x6C,0x0E,0x44,0x66,0x10,0x44,0x8F,0xAD,0x05,0xEB,0xF8,0x79,0xAC,0xA6,0x1B,
+0xE8,0x37,0x34,0x9D,0x53,0xC9,0x61,0xAA,0xA2,0x52,0xAF,0x4A,0x70,0x16,0x86,0xC2,
+0x3A,0xC8,0xB1,0x13,0x70,0x36,0xD8,0xCF,0xEE,0xF4,0x0A,0x34,0xD5,0x5B,0x4C,0xFD,
+0x07,0x9C,0xA2,0xBA,0xD9,0x01,0x72,0x5C,0xF3,0x4D,0xC1,0xDD,0x0E,0xB1,0x1C,0x0D,
+0xC4,0x63,0xBE,0xAD,0xF4,0x14,0xFB,0x89,0xEC,0xA2,0x41,0x0E,0x4C,0xCC,0xC8,0x57,
+0x40,0xD0,0x6E,0x03,0xAA,0xCD,0x0C,0x8E,0x89,0x99,0x99,0x6C,0xF0,0x3C,0x30,0xAF,
+0x38,0xDF,0x6F,0xBC,0xA3,0xBE,0x29,0x20,0x27,0xAB,0x74,0xFF,0x13,0x22,0x78,0xDE,
+0x97,0x52,0x55,0x1E,0x83,0xB5,0x54,0x20,0x03,0xEE,0xAE,0xC0,0x4F,0x56,0xDE,0x37,
+0xCC,0xC3,0x7F,0xAA,0x04,0x27,0xBB,0xD3,0x77,0xB8,0x62,0xDB,0x17,0x7C,0x9C,0x28,
+0x22,0x13,0x73,0x6C,0xCF,0x26,0xF5,0x8A,0x29,0xE7,
+};
+
+
+/* subject:/C=US/O=AffirmTrust/CN=AffirmTrust Commercial */
+/* issuer :/C=US/O=AffirmTrust/CN=AffirmTrust Commercial */
+
+
+const unsigned char AffirmTrust_Commercial_certificate[848]={
+0x30,0x82,0x03,0x4C,0x30,0x82,0x02,0x34,0xA0,0x03,0x02,0x01,0x02,0x02,0x08,0x77,
+0x77,0x06,0x27,0x26,0xA9,0xB1,0x7C,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,0x44,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x0C,0x0B,
+0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x31,0x1F,0x30,0x1D,0x06,
+0x03,0x55,0x04,0x03,0x0C,0x16,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,
+0x74,0x20,0x43,0x6F,0x6D,0x6D,0x65,0x72,0x63,0x69,0x61,0x6C,0x30,0x1E,0x17,0x0D,
+0x31,0x30,0x30,0x31,0x32,0x39,0x31,0x34,0x30,0x36,0x30,0x36,0x5A,0x17,0x0D,0x33,
+0x30,0x31,0x32,0x33,0x31,0x31,0x34,0x30,0x36,0x30,0x36,0x5A,0x30,0x44,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,
+0x03,0x55,0x04,0x0A,0x0C,0x0B,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,
+0x74,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,0x0C,0x16,0x41,0x66,0x66,0x69,
+0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x20,0x43,0x6F,0x6D,0x6D,0x65,0x72,0x63,0x69,
+0x61,0x6C,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,
+0x01,0x01,0x00,0xF6,0x1B,0x4F,0x67,0x07,0x2B,0xA1,0x15,0xF5,0x06,0x22,0xCB,0x1F,
+0x01,0xB2,0xE3,0x73,0x45,0x06,0x44,0x49,0x2C,0xBB,0x49,0x25,0x14,0xD6,0xCE,0xC3,
+0xB7,0xAB,0x2C,0x4F,0xC6,0x41,0x32,0x94,0x57,0xFA,0x12,0xA7,0x5B,0x0E,0xE2,0x8F,
+0x1F,0x1E,0x86,0x19,0xA7,0xAA,0xB5,0x2D,0xB9,0x5F,0x0D,0x8A,0xC2,0xAF,0x85,0x35,
+0x79,0x32,0x2D,0xBB,0x1C,0x62,0x37,0xF2,0xB1,0x5B,0x4A,0x3D,0xCA,0xCD,0x71,0x5F,
+0xE9,0x42,0xBE,0x94,0xE8,0xC8,0xDE,0xF9,0x22,0x48,0x64,0xC6,0xE5,0xAB,0xC6,0x2B,
+0x6D,0xAD,0x05,0xF0,0xFA,0xD5,0x0B,0xCF,0x9A,0xE5,0xF0,0x50,0xA4,0x8B,0x3B,0x47,
+0xA5,0x23,0x5B,0x7A,0x7A,0xF8,0x33,0x3F,0xB8,0xEF,0x99,0x97,0xE3,0x20,0xC1,0xD6,
+0x28,0x89,0xCF,0x94,0xFB,0xB9,0x45,0xED,0xE3,0x40,0x17,0x11,0xD4,0x74,0xF0,0x0B,
+0x31,0xE2,0x2B,0x26,0x6A,0x9B,0x4C,0x57,0xAE,0xAC,0x20,0x3E,0xBA,0x45,0x7A,0x05,
+0xF3,0xBD,0x9B,0x69,0x15,0xAE,0x7D,0x4E,0x20,0x63,0xC4,0x35,0x76,0x3A,0x07,0x02,
+0xC9,0x37,0xFD,0xC7,0x47,0xEE,0xE8,0xF1,0x76,0x1D,0x73,0x15,0xF2,0x97,0xA4,0xB5,
+0xC8,0x7A,0x79,0xD9,0x42,0xAA,0x2B,0x7F,0x5C,0xFE,0xCE,0x26,0x4F,0xA3,0x66,0x81,
+0x35,0xAF,0x44,0xBA,0x54,0x1E,0x1C,0x30,0x32,0x65,0x9D,0xE6,0x3C,0x93,0x5E,0x50,
+0x4E,0x7A,0xE3,0x3A,0xD4,0x6E,0xCC,0x1A,0xFB,0xF9,0xD2,0x37,0xAE,0x24,0x2A,0xAB,
+0x57,0x03,0x22,0x28,0x0D,0x49,0x75,0x7F,0xB7,0x28,0xDA,0x75,0xBF,0x8E,0xE3,0xDC,
+0x0E,0x79,0x31,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,0x03,
+0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x9D,0x93,0xC6,0x53,0x8B,0x5E,0xCA,0xAF,0x3F,
+0x9F,0x1E,0x0F,0xE5,0x99,0x95,0xBC,0x24,0xF6,0x94,0x8F,0x30,0x0F,0x06,0x03,0x55,
+0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,
+0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x03,0x82,0x01,0x01,0x00,
+0x58,0xAC,0xF4,0x04,0x0E,0xCD,0xC0,0x0D,0xFF,0x0A,0xFD,0xD4,0xBA,0x16,0x5F,0x29,
+0xBD,0x7B,0x68,0x99,0x58,0x49,0xD2,0xB4,0x1D,0x37,0x4D,0x7F,0x27,0x7D,0x46,0x06,
+0x5D,0x43,0xC6,0x86,0x2E,0x3E,0x73,0xB2,0x26,0x7D,0x4F,0x93,0xA9,0xB6,0xC4,0x2A,
+0x9A,0xAB,0x21,0x97,0x14,0xB1,0xDE,0x8C,0xD3,0xAB,0x89,0x15,0xD8,0x6B,0x24,0xD4,
+0xF1,0x16,0xAE,0xD8,0xA4,0x5C,0xD4,0x7F,0x51,0x8E,0xED,0x18,0x01,0xB1,0x93,0x63,
+0xBD,0xBC,0xF8,0x61,0x80,0x9A,0x9E,0xB1,0xCE,0x42,0x70,0xE2,0xA9,0x7D,0x06,0x25,
+0x7D,0x27,0xA1,0xFE,0x6F,0xEC,0xB3,0x1E,0x24,0xDA,0xE3,0x4B,0x55,0x1A,0x00,0x3B,
+0x35,0xB4,0x3B,0xD9,0xD7,0x5D,0x30,0xFD,0x81,0x13,0x89,0xF2,0xC2,0x06,0x2B,0xED,
+0x67,0xC4,0x8E,0xC9,0x43,0xB2,0x5C,0x6B,0x15,0x89,0x02,0xBC,0x62,0xFC,0x4E,0xF2,
+0xB5,0x33,0xAA,0xB2,0x6F,0xD3,0x0A,0xA2,0x50,0xE3,0xF6,0x3B,0xE8,0x2E,0x44,0xC2,
+0xDB,0x66,0x38,0xA9,0x33,0x56,0x48,0xF1,0x6D,0x1B,0x33,0x8D,0x0D,0x8C,0x3F,0x60,
+0x37,0x9D,0xD3,0xCA,0x6D,0x7E,0x34,0x7E,0x0D,0x9F,0x72,0x76,0x8B,0x1B,0x9F,0x72,
+0xFD,0x52,0x35,0x41,0x45,0x02,0x96,0x2F,0x1C,0xB2,0x9A,0x73,0x49,0x21,0xB1,0x49,
+0x47,0x45,0x47,0xB4,0xEF,0x6A,0x34,0x11,0xC9,0x4D,0x9A,0xCC,0x59,0xB7,0xD6,0x02,
+0x9E,0x5A,0x4E,0x65,0xB5,0x94,0xAE,0x1B,0xDF,0x29,0xB0,0x16,0xF1,0xBF,0x00,0x9E,
+0x07,0x3A,0x17,0x64,0xB5,0x04,0xB5,0x23,0x21,0x99,0x0A,0x95,0x3B,0x97,0x7C,0xEF,
+};
+
+
+/* subject:/C=US/O=AffirmTrust/CN=AffirmTrust Premium */
+/* issuer :/C=US/O=AffirmTrust/CN=AffirmTrust Premium */
+
+
+const unsigned char AffirmTrust_Premium_certificate[1354]={
+0x30,0x82,0x05,0x46,0x30,0x82,0x03,0x2E,0xA0,0x03,0x02,0x01,0x02,0x02,0x08,0x6D,
+0x8C,0x14,0x46,0xB1,0xA6,0x0A,0xEE,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x0C,0x05,0x00,0x30,0x41,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x0C,0x0B,
+0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x31,0x1C,0x30,0x1A,0x06,
+0x03,0x55,0x04,0x03,0x0C,0x13,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,
+0x74,0x20,0x50,0x72,0x65,0x6D,0x69,0x75,0x6D,0x30,0x1E,0x17,0x0D,0x31,0x30,0x30,
+0x31,0x32,0x39,0x31,0x34,0x31,0x30,0x33,0x36,0x5A,0x17,0x0D,0x34,0x30,0x31,0x32,
+0x33,0x31,0x31,0x34,0x31,0x30,0x33,0x36,0x5A,0x30,0x41,0x31,0x0B,0x30,0x09,0x06,
+0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,
+0x0A,0x0C,0x0B,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,0x72,0x75,0x73,0x74,0x31,0x1C,
+0x30,0x1A,0x06,0x03,0x55,0x04,0x03,0x0C,0x13,0x41,0x66,0x66,0x69,0x72,0x6D,0x54,
+0x72,0x75,0x73,0x74,0x20,0x50,0x72,0x65,0x6D,0x69,0x75,0x6D,0x30,0x82,0x02,0x22,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,
+0x82,0x02,0x0F,0x00,0x30,0x82,0x02,0x0A,0x02,0x82,0x02,0x01,0x00,0xC4,0x12,0xDF,
+0xA9,0x5F,0xFE,0x41,0xDD,0xDD,0xF5,0x9F,0x8A,0xE3,0xF6,0xAC,0xE1,0x3C,0x78,0x9A,
+0xBC,0xD8,0xF0,0x7F,0x7A,0xA0,0x33,0x2A,0xDC,0x8D,0x20,0x5B,0xAE,0x2D,0x6F,0xE7,
+0x93,0xD9,0x36,0x70,0x6A,0x68,0xCF,0x8E,0x51,0xA3,0x85,0x5B,0x67,0x04,0xA0,0x10,
+0x24,0x6F,0x5D,0x28,0x82,0xC1,0x97,0x57,0xD8,0x48,0x29,0x13,0xB6,0xE1,0xBE,0x91,
+0x4D,0xDF,0x85,0x0C,0x53,0x18,0x9A,0x1E,0x24,0xA2,0x4F,0x8F,0xF0,0xA2,0x85,0x0B,
+0xCB,0xF4,0x29,0x7F,0xD2,0xA4,0x58,0xEE,0x26,0x4D,0xC9,0xAA,0xA8,0x7B,0x9A,0xD9,
+0xFA,0x38,0xDE,0x44,0x57,0x15,0xE5,0xF8,0x8C,0xC8,0xD9,0x48,0xE2,0x0D,0x16,0x27,
+0x1D,0x1E,0xC8,0x83,0x85,0x25,0xB7,0xBA,0xAA,0x55,0x41,0xCC,0x03,0x22,0x4B,0x2D,
+0x91,0x8D,0x8B,0xE6,0x89,0xAF,0x66,0xC7,0xE9,0xFF,0x2B,0xE9,0x3C,0xAC,0xDA,0xD2,
+0xB3,0xC3,0xE1,0x68,0x9C,0x89,0xF8,0x7A,0x00,0x56,0xDE,0xF4,0x55,0x95,0x6C,0xFB,
+0xBA,0x64,0xDD,0x62,0x8B,0xDF,0x0B,0x77,0x32,0xEB,0x62,0xCC,0x26,0x9A,0x9B,0xBB,
+0xAA,0x62,0x83,0x4C,0xB4,0x06,0x7A,0x30,0xC8,0x29,0xBF,0xED,0x06,0x4D,0x97,0xB9,
+0x1C,0xC4,0x31,0x2B,0xD5,0x5F,0xBC,0x53,0x12,0x17,0x9C,0x99,0x57,0x29,0x66,0x77,
+0x61,0x21,0x31,0x07,0x2E,0x25,0x49,0x9D,0x18,0xF2,0xEE,0xF3,0x2B,0x71,0x8C,0xB5,
+0xBA,0x39,0x07,0x49,0x77,0xFC,0xEF,0x2E,0x92,0x90,0x05,0x8D,0x2D,0x2F,0x77,0x7B,
+0xEF,0x43,0xBF,0x35,0xBB,0x9A,0xD8,0xF9,0x73,0xA7,0x2C,0xF2,0xD0,0x57,0xEE,0x28,
+0x4E,0x26,0x5F,0x8F,0x90,0x68,0x09,0x2F,0xB8,0xF8,0xDC,0x06,0xE9,0x2E,0x9A,0x3E,
+0x51,0xA7,0xD1,0x22,0xC4,0x0A,0xA7,0x38,0x48,0x6C,0xB3,0xF9,0xFF,0x7D,0xAB,0x86,
+0x57,0xE3,0xBA,0xD6,0x85,0x78,0x77,0xBA,0x43,0xEA,0x48,0x7F,0xF6,0xD8,0xBE,0x23,
+0x6D,0x1E,0xBF,0xD1,0x36,0x6C,0x58,0x5C,0xF1,0xEE,0xA4,0x19,0x54,0x1A,0xF5,0x03,
+0xD2,0x76,0xE6,0xE1,0x8C,0xBD,0x3C,0xB3,0xD3,0x48,0x4B,0xE2,0xC8,0xF8,0x7F,0x92,
+0xA8,0x76,0x46,0x9C,0x42,0x65,0x3E,0xA4,0x1E,0xC1,0x07,0x03,0x5A,0x46,0x2D,0xB8,
+0x97,0xF3,0xB7,0xD5,0xB2,0x55,0x21,0xEF,0xBA,0xDC,0x4C,0x00,0x97,0xFB,0x14,0x95,
+0x27,0x33,0xBF,0xE8,0x43,0x47,0x46,0xD2,0x08,0x99,0x16,0x60,0x3B,0x9A,0x7E,0xD2,
+0xE6,0xED,0x38,0xEA,0xEC,0x01,0x1E,0x3C,0x48,0x56,0x49,0x09,0xC7,0x4C,0x37,0x00,
+0x9E,0x88,0x0E,0xC0,0x73,0xE1,0x6F,0x66,0xE9,0x72,0x47,0x30,0x3E,0x10,0xE5,0x0B,
+0x03,0xC9,0x9A,0x42,0x00,0x6C,0xC5,0x94,0x7E,0x61,0xC4,0x8A,0xDF,0x7F,0x82,0x1A,
+0x0B,0x59,0xC4,0x59,0x32,0x77,0xB3,0xBC,0x60,0x69,0x56,0x39,0xFD,0xB4,0x06,0x7B,
+0x2C,0xD6,0x64,0x36,0xD9,0xBD,0x48,0xED,0x84,0x1F,0x7E,0xA5,0x22,0x8F,0x2A,0xB8,
+0x42,0xF4,0x82,0xB7,0xD4,0x53,0x90,0x78,0x4E,0x2D,0x1A,0xFD,0x81,0x6F,0x44,0xD7,
+0x3B,0x01,0x74,0x96,0x42,0xE0,0x00,0xE2,0x2E,0x6B,0xEA,0xC5,0xEE,0x72,0xAC,0xBB,
+0xBF,0xFE,0xEA,0xAA,0xA8,0xF8,0xDC,0xF6,0xB2,0x79,0x8A,0xB6,0x67,0x02,0x03,0x01,
+0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,
+0x14,0x9D,0xC0,0x67,0xA6,0x0C,0x22,0xD9,0x26,0xF5,0x45,0xAB,0xA6,0x65,0x52,0x11,
+0x27,0xD8,0x45,0xAC,0x63,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x0C,0x05,0x00,0x03,0x82,0x02,0x01,0x00,0xB3,0x57,0x4D,0x10,0x62,0x4E,
+0x3A,0xE4,0xAC,0xEA,0xB8,0x1C,0xAF,0x32,0x23,0xC8,0xB3,0x49,0x5A,0x51,0x9C,0x76,
+0x28,0x8D,0x79,0xAA,0x57,0x46,0x17,0xD5,0xF5,0x52,0xF6,0xB7,0x44,0xE8,0x08,0x44,
+0xBF,0x18,0x84,0xD2,0x0B,0x80,0xCD,0xC5,0x12,0xFD,0x00,0x55,0x05,0x61,0x87,0x41,
+0xDC,0xB5,0x24,0x9E,0x3C,0xC4,0xD8,0xC8,0xFB,0x70,0x9E,0x2F,0x78,0x96,0x83,0x20,
+0x36,0xDE,0x7C,0x0F,0x69,0x13,0x88,0xA5,0x75,0x36,0x98,0x08,0xA6,0xC6,0xDF,0xAC,
+0xCE,0xE3,0x58,0xD6,0xB7,0x3E,0xDE,0xBA,0xF3,0xEB,0x34,0x40,0xD8,0xA2,0x81,0xF5,
+0x78,0x3F,0x2F,0xD5,0xA5,0xFC,0xD9,0xA2,0xD4,0x5E,0x04,0x0E,0x17,0xAD,0xFE,0x41,
+0xF0,0xE5,0xB2,0x72,0xFA,0x44,0x82,0x33,0x42,0xE8,0x2D,0x58,0xF7,0x56,0x8C,0x62,
+0x3F,0xBA,0x42,0xB0,0x9C,0x0C,0x5C,0x7E,0x2E,0x65,0x26,0x5C,0x53,0x4F,0x00,0xB2,
+0x78,0x7E,0xA1,0x0D,0x99,0x2D,0x8D,0xB8,0x1D,0x8E,0xA2,0xC4,0xB0,0xFD,0x60,0xD0,
+0x30,0xA4,0x8E,0xC8,0x04,0x62,0xA9,0xC4,0xED,0x35,0xDE,0x7A,0x97,0xED,0x0E,0x38,
+0x5E,0x92,0x2F,0x93,0x70,0xA5,0xA9,0x9C,0x6F,0xA7,0x7D,0x13,0x1D,0x7E,0xC6,0x08,
+0x48,0xB1,0x5E,0x67,0xEB,0x51,0x08,0x25,0xE9,0xE6,0x25,0x6B,0x52,0x29,0x91,0x9C,
+0xD2,0x39,0x73,0x08,0x57,0xDE,0x99,0x06,0xB4,0x5B,0x9D,0x10,0x06,0xE1,0xC2,0x00,
+0xA8,0xB8,0x1C,0x4A,0x02,0x0A,0x14,0xD0,0xC1,0x41,0xCA,0xFB,0x8C,0x35,0x21,0x7D,
+0x82,0x38,0xF2,0xA9,0x54,0x91,0x19,0x35,0x93,0x94,0x6D,0x6A,0x3A,0xC5,0xB2,0xD0,
+0xBB,0x89,0x86,0x93,0xE8,0x9B,0xC9,0x0F,0x3A,0xA7,0x7A,0xB8,0xA1,0xF0,0x78,0x46,
+0xFA,0xFC,0x37,0x2F,0xE5,0x8A,0x84,0xF3,0xDF,0xFE,0x04,0xD9,0xA1,0x68,0xA0,0x2F,
+0x24,0xE2,0x09,0x95,0x06,0xD5,0x95,0xCA,0xE1,0x24,0x96,0xEB,0x7C,0xF6,0x93,0x05,
+0xBB,0xED,0x73,0xE9,0x2D,0xD1,0x75,0x39,0xD7,0xE7,0x24,0xDB,0xD8,0x4E,0x5F,0x43,
+0x8F,0x9E,0xD0,0x14,0x39,0xBF,0x55,0x70,0x48,0x99,0x57,0x31,0xB4,0x9C,0xEE,0x4A,
+0x98,0x03,0x96,0x30,0x1F,0x60,0x06,0xEE,0x1B,0x23,0xFE,0x81,0x60,0x23,0x1A,0x47,
+0x62,0x85,0xA5,0xCC,0x19,0x34,0x80,0x6F,0xB3,0xAC,0x1A,0xE3,0x9F,0xF0,0x7B,0x48,
+0xAD,0xD5,0x01,0xD9,0x67,0xB6,0xA9,0x72,0x93,0xEA,0x2D,0x66,0xB5,0xB2,0xB8,0xE4,
+0x3D,0x3C,0xB2,0xEF,0x4C,0x8C,0xEA,0xEB,0x07,0xBF,0xAB,0x35,0x9A,0x55,0x86,0xBC,
+0x18,0xA6,0xB5,0xA8,0x5E,0xB4,0x83,0x6C,0x6B,0x69,0x40,0xD3,0x9F,0xDC,0xF1,0xC3,
+0x69,0x6B,0xB9,0xE1,0x6D,0x09,0xF4,0xF1,0xAA,0x50,0x76,0x0A,0x7A,0x7D,0x7A,0x17,
+0xA1,0x55,0x96,0x42,0x99,0x31,0x09,0xDD,0x60,0x11,0x8D,0x05,0x30,0x7E,0xE6,0x8E,
+0x46,0xD1,0x9D,0x14,0xDA,0xC7,0x17,0xE4,0x05,0x96,0x8C,0xC4,0x24,0xB5,0x1B,0xCF,
+0x14,0x07,0xB2,0x40,0xF8,0xA3,0x9E,0x41,0x86,0xBC,0x04,0xD0,0x6B,0x96,0xC8,0x2A,
+0x80,0x34,0xFD,0xBF,0xEF,0x06,0xA3,0xDD,0x58,0xC5,0x85,0x3D,0x3E,0x8F,0xFE,0x9E,
+0x29,0xE0,0xB6,0xB8,0x09,0x68,0x19,0x1C,0x18,0x43,
+};
+
+
+/* subject:/C=US/ST=Arizona/L=Scottsdale/O=GoDaddy.com, Inc./CN=Go Daddy Root Certificate Authority - G2 */
+/* issuer :/C=US/ST=Arizona/L=Scottsdale/O=GoDaddy.com, Inc./CN=Go Daddy Root Certificate Authority - G2 */
+
+
+const unsigned char Go_Daddy_Root_Certificate_Authority___G2_certificate[969]={
+0x30,0x82,0x03,0xC5,0x30,0x82,0x02,0xAD,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x00,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0B,0x05,0x00,0x30,
+0x81,0x83,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,
+0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x08,0x13,0x07,0x41,0x72,0x69,0x7A,0x6F,0x6E,
+0x61,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x07,0x13,0x0A,0x53,0x63,0x6F,0x74,
+0x74,0x73,0x64,0x61,0x6C,0x65,0x31,0x1A,0x30,0x18,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x11,0x47,0x6F,0x44,0x61,0x64,0x64,0x79,0x2E,0x63,0x6F,0x6D,0x2C,0x20,0x49,0x6E,
+0x63,0x2E,0x31,0x31,0x30,0x2F,0x06,0x03,0x55,0x04,0x03,0x13,0x28,0x47,0x6F,0x20,
+0x44,0x61,0x64,0x64,0x79,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,
+0x20,0x2D,0x20,0x47,0x32,0x30,0x1E,0x17,0x0D,0x30,0x39,0x30,0x39,0x30,0x31,0x30,
+0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x37,0x31,0x32,0x33,0x31,0x32,0x33,
+0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0x83,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x55,0x53,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x08,0x13,0x07,
+0x41,0x72,0x69,0x7A,0x6F,0x6E,0x61,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x07,
+0x13,0x0A,0x53,0x63,0x6F,0x74,0x74,0x73,0x64,0x61,0x6C,0x65,0x31,0x1A,0x30,0x18,
+0x06,0x03,0x55,0x04,0x0A,0x13,0x11,0x47,0x6F,0x44,0x61,0x64,0x64,0x79,0x2E,0x63,
+0x6F,0x6D,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x31,0x30,0x2F,0x06,0x03,0x55,0x04,
+0x03,0x13,0x28,0x47,0x6F,0x20,0x44,0x61,0x64,0x64,0x79,0x20,0x52,0x6F,0x6F,0x74,
+0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x41,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x47,0x32,0x30,0x82,0x01,0x22,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,
+0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xBF,0x71,0x62,0x08,
+0xF1,0xFA,0x59,0x34,0xF7,0x1B,0xC9,0x18,0xA3,0xF7,0x80,0x49,0x58,0xE9,0x22,0x83,
+0x13,0xA6,0xC5,0x20,0x43,0x01,0x3B,0x84,0xF1,0xE6,0x85,0x49,0x9F,0x27,0xEA,0xF6,
+0x84,0x1B,0x4E,0xA0,0xB4,0xDB,0x70,0x98,0xC7,0x32,0x01,0xB1,0x05,0x3E,0x07,0x4E,
+0xEE,0xF4,0xFA,0x4F,0x2F,0x59,0x30,0x22,0xE7,0xAB,0x19,0x56,0x6B,0xE2,0x80,0x07,
+0xFC,0xF3,0x16,0x75,0x80,0x39,0x51,0x7B,0xE5,0xF9,0x35,0xB6,0x74,0x4E,0xA9,0x8D,
+0x82,0x13,0xE4,0xB6,0x3F,0xA9,0x03,0x83,0xFA,0xA2,0xBE,0x8A,0x15,0x6A,0x7F,0xDE,
+0x0B,0xC3,0xB6,0x19,0x14,0x05,0xCA,0xEA,0xC3,0xA8,0x04,0x94,0x3B,0x46,0x7C,0x32,
+0x0D,0xF3,0x00,0x66,0x22,0xC8,0x8D,0x69,0x6D,0x36,0x8C,0x11,0x18,0xB7,0xD3,0xB2,
+0x1C,0x60,0xB4,0x38,0xFA,0x02,0x8C,0xCE,0xD3,0xDD,0x46,0x07,0xDE,0x0A,0x3E,0xEB,
+0x5D,0x7C,0xC8,0x7C,0xFB,0xB0,0x2B,0x53,0xA4,0x92,0x62,0x69,0x51,0x25,0x05,0x61,
+0x1A,0x44,0x81,0x8C,0x2C,0xA9,0x43,0x96,0x23,0xDF,0xAC,0x3A,0x81,0x9A,0x0E,0x29,
+0xC5,0x1C,0xA9,0xE9,0x5D,0x1E,0xB6,0x9E,0x9E,0x30,0x0A,0x39,0xCE,0xF1,0x88,0x80,
+0xFB,0x4B,0x5D,0xCC,0x32,0xEC,0x85,0x62,0x43,0x25,0x34,0x02,0x56,0x27,0x01,0x91,
+0xB4,0x3B,0x70,0x2A,0x3F,0x6E,0xB1,0xE8,0x9C,0x88,0x01,0x7D,0x9F,0xD4,0xF9,0xDB,
+0x53,0x6D,0x60,0x9D,0xBF,0x2C,0xE7,0x58,0xAB,0xB8,0x5F,0x46,0xFC,0xCE,0xC4,0x1B,
+0x03,0x3C,0x09,0xEB,0x49,0x31,0x5C,0x69,0x46,0xB3,0xE0,0x47,0x02,0x03,0x01,0x00,
+0x01,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,
+0x14,0x3A,0x9A,0x85,0x07,0x10,0x67,0x28,0xB6,0xEF,0xF6,0xBD,0x05,0x41,0x6E,0x20,
+0xC1,0x94,0xDA,0x0F,0xDE,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x0B,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x99,0xDB,0x5D,0x79,0xD5,0xF9,0x97,
+0x59,0x67,0x03,0x61,0xF1,0x7E,0x3B,0x06,0x31,0x75,0x2D,0xA1,0x20,0x8E,0x4F,0x65,
+0x87,0xB4,0xF7,0xA6,0x9C,0xBC,0xD8,0xE9,0x2F,0xD0,0xDB,0x5A,0xEE,0xCF,0x74,0x8C,
+0x73,0xB4,0x38,0x42,0xDA,0x05,0x7B,0xF8,0x02,0x75,0xB8,0xFD,0xA5,0xB1,0xD7,0xAE,
+0xF6,0xD7,0xDE,0x13,0xCB,0x53,0x10,0x7E,0x8A,0x46,0xD1,0x97,0xFA,0xB7,0x2E,0x2B,
+0x11,0xAB,0x90,0xB0,0x27,0x80,0xF9,0xE8,0x9F,0x5A,0xE9,0x37,0x9F,0xAB,0xE4,0xDF,
+0x6C,0xB3,0x85,0x17,0x9D,0x3D,0xD9,0x24,0x4F,0x79,0x91,0x35,0xD6,0x5F,0x04,0xEB,
+0x80,0x83,0xAB,0x9A,0x02,0x2D,0xB5,0x10,0xF4,0xD8,0x90,0xC7,0x04,0x73,0x40,0xED,
+0x72,0x25,0xA0,0xA9,0x9F,0xEC,0x9E,0xAB,0x68,0x12,0x99,0x57,0xC6,0x8F,0x12,0x3A,
+0x09,0xA4,0xBD,0x44,0xFD,0x06,0x15,0x37,0xC1,0x9B,0xE4,0x32,0xA3,0xED,0x38,0xE8,
+0xD8,0x64,0xF3,0x2C,0x7E,0x14,0xFC,0x02,0xEA,0x9F,0xCD,0xFF,0x07,0x68,0x17,0xDB,
+0x22,0x90,0x38,0x2D,0x7A,0x8D,0xD1,0x54,0xF1,0x69,0xE3,0x5F,0x33,0xCA,0x7A,0x3D,
+0x7B,0x0A,0xE3,0xCA,0x7F,0x5F,0x39,0xE5,0xE2,0x75,0xBA,0xC5,0x76,0x18,0x33,0xCE,
+0x2C,0xF0,0x2F,0x4C,0xAD,0xF7,0xB1,0xE7,0xCE,0x4F,0xA8,0xC4,0x9B,0x4A,0x54,0x06,
+0xC5,0x7F,0x7D,0xD5,0x08,0x0F,0xE2,0x1C,0xFE,0x7E,0x17,0xB8,0xAC,0x5E,0xF6,0xD4,
+0x16,0xB2,0x43,0x09,0x0C,0x4D,0xF6,0xA7,0x6B,0xB4,0x99,0x84,0x65,0xCA,0x7A,0x88,
+0xE2,0xE2,0x44,0xBE,0x5C,0xF7,0xEA,0x1C,0xF5,
+};
+
+
+/* subject:/C=GB/ST=Greater Manchester/L=Salford/O=Comodo CA Limited/CN=Secure Certificate Services */
+/* issuer :/C=GB/ST=Greater Manchester/L=Salford/O=Comodo CA Limited/CN=Secure Certificate Services */
+
+
+const unsigned char Comodo_Secure_Services_root_certificate[1091]={
+0x30,0x82,0x04,0x3F,0x30,0x82,0x03,0x27,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x7E,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x0C,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,
+0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,
+0x03,0x55,0x04,0x07,0x0C,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,
+0x18,0x06,0x03,0x55,0x04,0x0A,0x0C,0x11,0x43,0x6F,0x6D,0x6F,0x64,0x6F,0x20,0x43,
+0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x24,0x30,0x22,0x06,0x03,0x55,
+0x04,0x03,0x0C,0x1B,0x53,0x65,0x63,0x75,0x72,0x65,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x30,
+0x1E,0x17,0x0D,0x30,0x34,0x30,0x31,0x30,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,
+0x17,0x0D,0x32,0x38,0x31,0x32,0x33,0x31,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,0x30,
+0x7E,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x0C,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,
+0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,
+0x03,0x55,0x04,0x07,0x0C,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,
+0x18,0x06,0x03,0x55,0x04,0x0A,0x0C,0x11,0x43,0x6F,0x6D,0x6F,0x64,0x6F,0x20,0x43,
+0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x24,0x30,0x22,0x06,0x03,0x55,
+0x04,0x03,0x0C,0x1B,0x53,0x65,0x63,0x75,0x72,0x65,0x20,0x43,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x30,
+0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,
+0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,
+0xC0,0x71,0x33,0x82,0x8A,0xD0,0x70,0xEB,0x73,0x87,0x82,0x40,0xD5,0x1D,0xE4,0xCB,
+0xC9,0x0E,0x42,0x90,0xF9,0xDE,0x34,0xB9,0xA1,0xBA,0x11,0xF4,0x25,0x85,0xF3,0xCC,
+0x72,0x6D,0xF2,0x7B,0x97,0x6B,0xB3,0x07,0xF1,0x77,0x24,0x91,0x5F,0x25,0x8F,0xF6,
+0x74,0x3D,0xE4,0x80,0xC2,0xF8,0x3C,0x0D,0xF3,0xBF,0x40,0xEA,0xF7,0xC8,0x52,0xD1,
+0x72,0x6F,0xEF,0xC8,0xAB,0x41,0xB8,0x6E,0x2E,0x17,0x2A,0x95,0x69,0x0C,0xCD,0xD2,
+0x1E,0x94,0x7B,0x2D,0x94,0x1D,0xAA,0x75,0xD7,0xB3,0x98,0xCB,0xAC,0xBC,0x64,0x53,
+0x40,0xBC,0x8F,0xAC,0xAC,0x36,0xCB,0x5C,0xAD,0xBB,0xDD,0xE0,0x94,0x17,0xEC,0xD1,
+0x5C,0xD0,0xBF,0xEF,0xA5,0x95,0xC9,0x90,0xC5,0xB0,0xAC,0xFB,0x1B,0x43,0xDF,0x7A,
+0x08,0x5D,0xB7,0xB8,0xF2,0x40,0x1B,0x2B,0x27,0x9E,0x50,0xCE,0x5E,0x65,0x82,0x88,
+0x8C,0x5E,0xD3,0x4E,0x0C,0x7A,0xEA,0x08,0x91,0xB6,0x36,0xAA,0x2B,0x42,0xFB,0xEA,
+0xC2,0xA3,0x39,0xE5,0xDB,0x26,0x38,0xAD,0x8B,0x0A,0xEE,0x19,0x63,0xC7,0x1C,0x24,
+0xDF,0x03,0x78,0xDA,0xE6,0xEA,0xC1,0x47,0x1A,0x0B,0x0B,0x46,0x09,0xDD,0x02,0xFC,
+0xDE,0xCB,0x87,0x5F,0xD7,0x30,0x63,0x68,0xA1,0xAE,0xDC,0x32,0xA1,0xBA,0xBE,0xFE,
+0x44,0xAB,0x68,0xB6,0xA5,0x17,0x15,0xFD,0xBD,0xD5,0xA7,0xA7,0x9A,0xE4,0x44,0x33,
+0xE9,0x88,0x8E,0xFC,0xED,0x51,0xEB,0x93,0x71,0x4E,0xAD,0x01,0xE7,0x44,0x8E,0xAB,
+0x2D,0xCB,0xA8,0xFE,0x01,0x49,0x48,0xF0,0xC0,0xDD,0xC7,0x68,0xD8,0x92,0xFE,0x3D,
+0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xC7,0x30,0x81,0xC4,0x30,0x1D,0x06,0x03,0x55,
+0x1D,0x0E,0x04,0x16,0x04,0x14,0x3C,0xD8,0x93,0x88,0xC2,0xC0,0x82,0x09,0xCC,0x01,
+0x99,0x06,0x93,0x20,0xE9,0x9E,0x70,0x09,0x63,0x4F,0x30,0x0E,0x06,0x03,0x55,0x1D,
+0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,
+0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x81,0x81,0x06,0x03,
+0x55,0x1D,0x1F,0x04,0x7A,0x30,0x78,0x30,0x3B,0xA0,0x39,0xA0,0x37,0x86,0x35,0x68,
+0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x63,0x6F,0x6D,0x6F,0x64,0x6F,
+0x63,0x61,0x2E,0x63,0x6F,0x6D,0x2F,0x53,0x65,0x63,0x75,0x72,0x65,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,
+0x2E,0x63,0x72,0x6C,0x30,0x39,0xA0,0x37,0xA0,0x35,0x86,0x33,0x68,0x74,0x74,0x70,
+0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x63,0x6F,0x6D,0x6F,0x64,0x6F,0x2E,0x6E,0x65,
+0x74,0x2F,0x53,0x65,0x63,0x75,0x72,0x65,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,
+0x61,0x74,0x65,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x2E,0x63,0x72,0x6C,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,
+0x01,0x01,0x00,0x87,0x01,0x6D,0x23,0x1D,0x7E,0x5B,0x17,0x7D,0xC1,0x61,0x32,0xCF,
+0x8F,0xE7,0xF3,0x8A,0x94,0x59,0x66,0xE0,0x9E,0x28,0xA8,0x5E,0xD3,0xB7,0xF4,0x34,
+0xE6,0xAA,0x39,0xB2,0x97,0x16,0xC5,0x82,0x6F,0x32,0xA4,0xE9,0x8C,0xE7,0xAF,0xFD,
+0xEF,0xC2,0xE8,0xB9,0x4B,0xAA,0xA3,0xF4,0xE6,0xDA,0x8D,0x65,0x21,0xFB,0xBA,0x80,
+0xEB,0x26,0x28,0x85,0x1A,0xFE,0x39,0x8C,0xDE,0x5B,0x04,0x04,0xB4,0x54,0xF9,0xA3,
+0x67,0x9E,0x41,0xFA,0x09,0x52,0xCC,0x05,0x48,0xA8,0xC9,0x3F,0x21,0x04,0x1E,0xCE,
+0x48,0x6B,0xFC,0x85,0xE8,0xC2,0x7B,0xAF,0x7F,0xB7,0xCC,0xF8,0x5F,0x3A,0xFD,0x35,
+0xC6,0x0D,0xEF,0x97,0xDC,0x4C,0xAB,0x11,0xE1,0x6B,0xCB,0x31,0xD1,0x6C,0xFB,0x48,
+0x80,0xAB,0xDC,0x9C,0x37,0xB8,0x21,0x14,0x4B,0x0D,0x71,0x3D,0xEC,0x83,0x33,0x6E,
+0xD1,0x6E,0x32,0x16,0xEC,0x98,0xC7,0x16,0x8B,0x59,0xA6,0x34,0xAB,0x05,0x57,0x2D,
+0x93,0xF7,0xAA,0x13,0xCB,0xD2,0x13,0xE2,0xB7,0x2E,0x3B,0xCD,0x6B,0x50,0x17,0x09,
+0x68,0x3E,0xB5,0x26,0x57,0xEE,0xB6,0xE0,0xB6,0xDD,0xB9,0x29,0x80,0x79,0x7D,0x8F,
+0xA3,0xF0,0xA4,0x28,0xA4,0x15,0xC4,0x85,0xF4,0x27,0xD4,0x6B,0xBF,0xE5,0x5C,0xE4,
+0x65,0x02,0x76,0x54,0xB4,0xE3,0x37,0x66,0x24,0xD3,0x19,0x61,0xC8,0x52,0x10,0xE5,
+0x8B,0x37,0x9A,0xB9,0xA9,0xF9,0x1D,0xBF,0xEA,0x99,0x92,0x61,0x96,0xFF,0x01,0xCD,
+0xA1,0x5F,0x0D,0xBC,0x71,0xBC,0x0E,0xAC,0x0B,0x1D,0x47,0x45,0x1D,0xC1,0xEC,0x7C,
+0xEC,0xFD,0x29,
+};
+
+
+/* subject:/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Trusted Root G4 */
+/* issuer :/C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Trusted Root G4 */
+
+
+const unsigned char DigiCert_Trusted_Root_G4_certificate[1428]={
+0x30,0x82,0x05,0x90,0x30,0x82,0x03,0x78,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x05,
+0x9B,0x1B,0x57,0x9E,0x8E,0x21,0x32,0xE2,0x39,0x07,0xBD,0xA7,0x77,0x75,0x5C,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,0x05,0x00,0x30,0x62,
+0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x15,0x30,
+0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,0x43,0x65,0x72,0x74,
+0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0B,0x13,0x10,0x77,
+0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,0x63,0x6F,0x6D,0x31,
+0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x03,0x13,0x18,0x44,0x69,0x67,0x69,0x43,0x65,
+0x72,0x74,0x20,0x54,0x72,0x75,0x73,0x74,0x65,0x64,0x20,0x52,0x6F,0x6F,0x74,0x20,
+0x47,0x34,0x30,0x1E,0x17,0x0D,0x31,0x33,0x30,0x38,0x30,0x31,0x31,0x32,0x30,0x30,
+0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x35,0x31,0x32,0x30,0x30,0x30,
+0x30,0x5A,0x30,0x62,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,
+0x53,0x31,0x15,0x30,0x13,0x06,0x03,0x55,0x04,0x0A,0x13,0x0C,0x44,0x69,0x67,0x69,
+0x43,0x65,0x72,0x74,0x20,0x49,0x6E,0x63,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,
+0x0B,0x13,0x10,0x77,0x77,0x77,0x2E,0x64,0x69,0x67,0x69,0x63,0x65,0x72,0x74,0x2E,
+0x63,0x6F,0x6D,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x03,0x13,0x18,0x44,0x69,
+0x67,0x69,0x43,0x65,0x72,0x74,0x20,0x54,0x72,0x75,0x73,0x74,0x65,0x64,0x20,0x52,
+0x6F,0x6F,0x74,0x20,0x47,0x34,0x30,0x82,0x02,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x02,0x0F,0x00,0x30,0x82,
+0x02,0x0A,0x02,0x82,0x02,0x01,0x00,0xBF,0xE6,0x90,0x73,0x68,0xDE,0xBB,0xE4,0x5D,
+0x4A,0x3C,0x30,0x22,0x30,0x69,0x33,0xEC,0xC2,0xA7,0x25,0x2E,0xC9,0x21,0x3D,0xF2,
+0x8A,0xD8,0x59,0xC2,0xE1,0x29,0xA7,0x3D,0x58,0xAB,0x76,0x9A,0xCD,0xAE,0x7B,0x1B,
+0x84,0x0D,0xC4,0x30,0x1F,0xF3,0x1B,0xA4,0x38,0x16,0xEB,0x56,0xC6,0x97,0x6D,0x1D,
+0xAB,0xB2,0x79,0xF2,0xCA,0x11,0xD2,0xE4,0x5F,0xD6,0x05,0x3C,0x52,0x0F,0x52,0x1F,
+0xC6,0x9E,0x15,0xA5,0x7E,0xBE,0x9F,0xA9,0x57,0x16,0x59,0x55,0x72,0xAF,0x68,0x93,
+0x70,0xC2,0xB2,0xBA,0x75,0x99,0x6A,0x73,0x32,0x94,0xD1,0x10,0x44,0x10,0x2E,0xDF,
+0x82,0xF3,0x07,0x84,0xE6,0x74,0x3B,0x6D,0x71,0xE2,0x2D,0x0C,0x1B,0xEE,0x20,0xD5,
+0xC9,0x20,0x1D,0x63,0x29,0x2D,0xCE,0xEC,0x5E,0x4E,0xC8,0x93,0xF8,0x21,0x61,0x9B,
+0x34,0xEB,0x05,0xC6,0x5E,0xEC,0x5B,0x1A,0xBC,0xEB,0xC9,0xCF,0xCD,0xAC,0x34,0x40,
+0x5F,0xB1,0x7A,0x66,0xEE,0x77,0xC8,0x48,0xA8,0x66,0x57,0x57,0x9F,0x54,0x58,0x8E,
+0x0C,0x2B,0xB7,0x4F,0xA7,0x30,0xD9,0x56,0xEE,0xCA,0x7B,0x5D,0xE3,0xAD,0xC9,0x4F,
+0x5E,0xE5,0x35,0xE7,0x31,0xCB,0xDA,0x93,0x5E,0xDC,0x8E,0x8F,0x80,0xDA,0xB6,0x91,
+0x98,0x40,0x90,0x79,0xC3,0x78,0xC7,0xB6,0xB1,0xC4,0xB5,0x6A,0x18,0x38,0x03,0x10,
+0x8D,0xD8,0xD4,0x37,0xA4,0x2E,0x05,0x7D,0x88,0xF5,0x82,0x3E,0x10,0x91,0x70,0xAB,
+0x55,0x82,0x41,0x32,0xD7,0xDB,0x04,0x73,0x2A,0x6E,0x91,0x01,0x7C,0x21,0x4C,0xD4,
+0xBC,0xAE,0x1B,0x03,0x75,0x5D,0x78,0x66,0xD9,0x3A,0x31,0x44,0x9A,0x33,0x40,0xBF,
+0x08,0xD7,0x5A,0x49,0xA4,0xC2,0xE6,0xA9,0xA0,0x67,0xDD,0xA4,0x27,0xBC,0xA1,0x4F,
+0x39,0xB5,0x11,0x58,0x17,0xF7,0x24,0x5C,0x46,0x8F,0x64,0xF7,0xC1,0x69,0x88,0x76,
+0x98,0x76,0x3D,0x59,0x5D,0x42,0x76,0x87,0x89,0x97,0x69,0x7A,0x48,0xF0,0xE0,0xA2,
+0x12,0x1B,0x66,0x9A,0x74,0xCA,0xDE,0x4B,0x1E,0xE7,0x0E,0x63,0xAE,0xE6,0xD4,0xEF,
+0x92,0x92,0x3A,0x9E,0x3D,0xDC,0x00,0xE4,0x45,0x25,0x89,0xB6,0x9A,0x44,0x19,0x2B,
+0x7E,0xC0,0x94,0xB4,0xD2,0x61,0x6D,0xEB,0x33,0xD9,0xC5,0xDF,0x4B,0x04,0x00,0xCC,
+0x7D,0x1C,0x95,0xC3,0x8F,0xF7,0x21,0xB2,0xB2,0x11,0xB7,0xBB,0x7F,0xF2,0xD5,0x8C,
+0x70,0x2C,0x41,0x60,0xAA,0xB1,0x63,0x18,0x44,0x95,0x1A,0x76,0x62,0x7E,0xF6,0x80,
+0xB0,0xFB,0xE8,0x64,0xA6,0x33,0xD1,0x89,0x07,0xE1,0xBD,0xB7,0xE6,0x43,0xA4,0x18,
+0xB8,0xA6,0x77,0x01,0xE1,0x0F,0x94,0x0C,0x21,0x1D,0xB2,0x54,0x29,0x25,0x89,0x6C,
+0xE5,0x0E,0x52,0x51,0x47,0x74,0xBE,0x26,0xAC,0xB6,0x41,0x75,0xDE,0x7A,0xAC,0x5F,
+0x8D,0x3F,0xC9,0xBC,0xD3,0x41,0x11,0x12,0x5B,0xE5,0x10,0x50,0xEB,0x31,0xC5,0xCA,
+0x72,0x16,0x22,0x09,0xDF,0x7C,0x4C,0x75,0x3F,0x63,0xEC,0x21,0x5F,0xC4,0x20,0x51,
+0x6B,0x6F,0xB1,0xAB,0x86,0x8B,0x4F,0xC2,0xD6,0x45,0x5F,0x9D,0x20,0xFC,0xA1,0x1E,
+0xC5,0xC0,0x8F,0xA2,0xB1,0x7E,0x0A,0x26,0x99,0xF5,0xE4,0x69,0x2F,0x98,0x1D,0x2D,
+0xF5,0xD9,0xA9,0xB2,0x1D,0xE5,0x1B,0x02,0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,
+0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,
+0xFF,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,
+0x86,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xEC,0xD7,0xE3,0x82,
+0xD2,0x71,0x5D,0x64,0x4C,0xDF,0x2E,0x67,0x3F,0xE7,0xBA,0x98,0xAE,0x1C,0x0F,0x4F,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,0x05,0x00,0x03,
+0x82,0x02,0x01,0x00,0xBB,0x61,0xD9,0x7D,0xA9,0x6C,0xBE,0x17,0xC4,0x91,0x1B,0xC3,
+0xA1,0xA2,0x00,0x8D,0xE3,0x64,0x68,0x0F,0x56,0xCF,0x77,0xAE,0x70,0xF9,0xFD,0x9A,
+0x4A,0x99,0xB9,0xC9,0x78,0x5C,0x0C,0x0C,0x5F,0xE4,0xE6,0x14,0x29,0x56,0x0B,0x36,
+0x49,0x5D,0x44,0x63,0xE0,0xAD,0x9C,0x96,0x18,0x66,0x1B,0x23,0x0D,0x3D,0x79,0xE9,
+0x6D,0x6B,0xD6,0x54,0xF8,0xD2,0x3C,0xC1,0x43,0x40,0xAE,0x1D,0x50,0xF5,0x52,0xFC,
+0x90,0x3B,0xBB,0x98,0x99,0x69,0x6B,0xC7,0xC1,0xA7,0xA8,0x68,0xA4,0x27,0xDC,0x9D,
+0xF9,0x27,0xAE,0x30,0x85,0xB9,0xF6,0x67,0x4D,0x3A,0x3E,0x8F,0x59,0x39,0x22,0x53,
+0x44,0xEB,0xC8,0x5D,0x03,0xCA,0xED,0x50,0x7A,0x7D,0x62,0x21,0x0A,0x80,0xC8,0x73,
+0x66,0xD1,0xA0,0x05,0x60,0x5F,0xE8,0xA5,0xB4,0xA7,0xAF,0xA8,0xF7,0x6D,0x35,0x9C,
+0x7C,0x5A,0x8A,0xD6,0xA2,0x38,0x99,0xF3,0x78,0x8B,0xF4,0x4D,0xD2,0x20,0x0B,0xDE,
+0x04,0xEE,0x8C,0x9B,0x47,0x81,0x72,0x0D,0xC0,0x14,0x32,0xEF,0x30,0x59,0x2E,0xAE,
+0xE0,0x71,0xF2,0x56,0xE4,0x6A,0x97,0x6F,0x92,0x50,0x6D,0x96,0x8D,0x68,0x7A,0x9A,
+0xB2,0x36,0x14,0x7A,0x06,0xF2,0x24,0xB9,0x09,0x11,0x50,0xD7,0x08,0xB1,0xB8,0x89,
+0x7A,0x84,0x23,0x61,0x42,0x29,0xE5,0xA3,0xCD,0xA2,0x20,0x41,0xD7,0xD1,0x9C,0x64,
+0xD9,0xEA,0x26,0xA1,0x8B,0x14,0xD7,0x4C,0x19,0xB2,0x50,0x41,0x71,0x3D,0x3F,0x4D,
+0x70,0x23,0x86,0x0C,0x4A,0xDC,0x81,0xD2,0xCC,0x32,0x94,0x84,0x0D,0x08,0x09,0x97,
+0x1C,0x4F,0xC0,0xEE,0x6B,0x20,0x74,0x30,0xD2,0xE0,0x39,0x34,0x10,0x85,0x21,0x15,
+0x01,0x08,0xE8,0x55,0x32,0xDE,0x71,0x49,0xD9,0x28,0x17,0x50,0x4D,0xE6,0xBE,0x4D,
+0xD1,0x75,0xAC,0xD0,0xCA,0xFB,0x41,0xB8,0x43,0xA5,0xAA,0xD3,0xC3,0x05,0x44,0x4F,
+0x2C,0x36,0x9B,0xE2,0xFA,0xE2,0x45,0xB8,0x23,0x53,0x6C,0x06,0x6F,0x67,0x55,0x7F,
+0x46,0xB5,0x4C,0x3F,0x6E,0x28,0x5A,0x79,0x26,0xD2,0xA4,0xA8,0x62,0x97,0xD2,0x1E,
+0xE2,0xED,0x4A,0x8B,0xBC,0x1B,0xFD,0x47,0x4A,0x0D,0xDF,0x67,0x66,0x7E,0xB2,0x5B,
+0x41,0xD0,0x3B,0xE4,0xF4,0x3B,0xF4,0x04,0x63,0xE9,0xEF,0xC2,0x54,0x00,0x51,0xA0,
+0x8A,0x2A,0xC9,0xCE,0x78,0xCC,0xD5,0xEA,0x87,0x04,0x18,0xB3,0xCE,0xAF,0x49,0x88,
+0xAF,0xF3,0x92,0x99,0xB6,0xB3,0xE6,0x61,0x0F,0xD2,0x85,0x00,0xE7,0x50,0x1A,0xE4,
+0x1B,0x95,0x9D,0x19,0xA1,0xB9,0x9C,0xB1,0x9B,0xB1,0x00,0x1E,0xEF,0xD0,0x0F,0x4F,
+0x42,0x6C,0xC9,0x0A,0xBC,0xEE,0x43,0xFA,0x3A,0x71,0xA5,0xC8,0x4D,0x26,0xA5,0x35,
+0xFD,0x89,0x5D,0xBC,0x85,0x62,0x1D,0x32,0xD2,0xA0,0x2B,0x54,0xED,0x9A,0x57,0xC1,
+0xDB,0xFA,0x10,0xCF,0x19,0xB7,0x8B,0x4A,0x1B,0x8F,0x01,0xB6,0x27,0x95,0x53,0xE8,
+0xB6,0x89,0x6D,0x5B,0xBC,0x68,0xD4,0x23,0xE8,0x8B,0x51,0xA2,0x56,0xF9,0xF0,0xA6,
+0x80,0xA0,0xD6,0x1E,0xB3,0xBC,0x0F,0x0F,0x53,0x75,0x29,0xAA,0xEA,0x13,0x77,0xE4,
+0xDE,0x8C,0x81,0x21,0xAD,0x07,0x10,0x47,0x11,0xAD,0x87,0x3D,0x07,0xD1,0x75,0xBC,
+0xCF,0xF3,0x66,0x7E,
+};
+
+
+/* subject:/OU=GlobalSign ECC Root CA - R5/O=GlobalSign/CN=GlobalSign */
+/* issuer :/OU=GlobalSign ECC Root CA - R5/O=GlobalSign/CN=GlobalSign */
+
+
+const unsigned char GlobalSign_ECC_Root_CA___R5_certificate[546]={
+0x30,0x82,0x02,0x1E,0x30,0x82,0x01,0xA4,0xA0,0x03,0x02,0x01,0x02,0x02,0x11,0x60,
+0x59,0x49,0xE0,0x26,0x2E,0xBB,0x55,0xF9,0x0A,0x77,0x8A,0x71,0xF9,0x4A,0xD8,0x6C,
+0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x50,0x31,0x24,
+0x30,0x22,0x06,0x03,0x55,0x04,0x0B,0x13,0x1B,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,
+0x69,0x67,0x6E,0x20,0x45,0x43,0x43,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,
+0x2D,0x20,0x52,0x35,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x0A,0x13,0x0A,0x47,
+0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x31,0x13,0x30,0x11,0x06,0x03,0x55,
+0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x30,0x1E,
+0x17,0x0D,0x31,0x32,0x31,0x31,0x31,0x33,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,
+0x0D,0x33,0x38,0x30,0x31,0x31,0x39,0x30,0x33,0x31,0x34,0x30,0x37,0x5A,0x30,0x50,
+0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x0B,0x13,0x1B,0x47,0x6C,0x6F,0x62,0x61,
+0x6C,0x53,0x69,0x67,0x6E,0x20,0x45,0x43,0x43,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,
+0x41,0x20,0x2D,0x20,0x52,0x35,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x31,0x13,0x30,0x11,0x06,
+0x03,0x55,0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,
+0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,0x2B,
+0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0x47,0x45,0x0E,0x96,0xFB,0x7D,0x5D,0xBF,
+0xE9,0x39,0xD1,0x21,0xF8,0x9F,0x0B,0xB6,0xD5,0x7B,0x1E,0x92,0x3A,0x48,0x59,0x1C,
+0xF0,0x62,0x31,0x2D,0xC0,0x7A,0x28,0xFE,0x1A,0xA7,0x5C,0xB3,0xB6,0xCC,0x97,0xE7,
+0x45,0xD4,0x58,0xFA,0xD1,0x77,0x6D,0x43,0xA2,0xC0,0x87,0x65,0x34,0x0A,0x1F,0x7A,
+0xDD,0xEB,0x3C,0x33,0xA1,0xC5,0x9D,0x4D,0xA4,0x6F,0x41,0x95,0x38,0x7F,0xC9,0x1E,
+0x84,0xEB,0xD1,0x9E,0x49,0x92,0x87,0x94,0x87,0x0C,0x3A,0x85,0x4A,0x66,0x9F,0x9D,
+0x59,0x93,0x4D,0x97,0x61,0x06,0x86,0x4A,0xA3,0x42,0x30,0x40,0x30,0x0E,0x06,0x03,
+0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,
+0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x3D,0xE6,0x29,0x48,0x9B,0xEA,0x07,0xCA,
+0x21,0x44,0x4A,0x26,0xDE,0x6E,0xDE,0xD2,0x83,0xD0,0x9F,0x59,0x30,0x0A,0x06,0x08,
+0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x68,0x00,0x30,0x65,0x02,0x31,0x00,
+0xE5,0x69,0x12,0xC9,0x6E,0xDB,0xC6,0x31,0xBA,0x09,0x41,0xE1,0x97,0xF8,0xFB,0xFD,
+0x9A,0xE2,0x7D,0x12,0xC9,0xED,0x7C,0x64,0xD3,0xCB,0x05,0x25,0x8B,0x56,0xD9,0xA0,
+0xE7,0x5E,0x5D,0x4E,0x0B,0x83,0x9C,0x5B,0x76,0x29,0xA0,0x09,0x26,0x21,0x6A,0x62,
+0x02,0x30,0x71,0xD2,0xB5,0x8F,0x5C,0xEA,0x3B,0xE1,0x78,0x09,0x85,0xA8,0x75,0x92,
+0x3B,0xC8,0x5C,0xFD,0x48,0xEF,0x0D,0x74,0x22,0xA8,0x08,0xE2,0x6E,0xC5,0x49,0xCE,
+0xC7,0x0C,0xBC,0xA7,0x61,0x69,0xF1,0xF7,0x3B,0xE1,0x2A,0xCB,0xF9,0x2B,0xF3,0x66,
+0x90,0x37,
+};
+
+
+/* subject:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware */
+/* issuer :/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware */
+
+
+const unsigned char UTN_USERFirst_Hardware_Root_CA_certificate[1144]={
+0x30,0x82,0x04,0x74,0x30,0x82,0x03,0x5C,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x44,
+0xBE,0x0C,0x8B,0x50,0x00,0x24,0xB4,0x11,0xD3,0x36,0x2A,0xFE,0x65,0x0A,0xFD,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,
+0x97,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x08,0x13,0x02,0x55,0x54,0x31,0x17,0x30,0x15,0x06,
+0x03,0x55,0x04,0x07,0x13,0x0E,0x53,0x61,0x6C,0x74,0x20,0x4C,0x61,0x6B,0x65,0x20,
+0x43,0x69,0x74,0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,0x04,0x0A,0x13,0x15,0x54,
+0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55,0x53,0x54,0x20,0x4E,0x65,0x74,
+0x77,0x6F,0x72,0x6B,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x0B,0x13,0x18,0x68,
+0x74,0x74,0x70,0x3A,0x2F,0x2F,0x77,0x77,0x77,0x2E,0x75,0x73,0x65,0x72,0x74,0x72,
+0x75,0x73,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,
+0x13,0x16,0x55,0x54,0x4E,0x2D,0x55,0x53,0x45,0x52,0x46,0x69,0x72,0x73,0x74,0x2D,
+0x48,0x61,0x72,0x64,0x77,0x61,0x72,0x65,0x30,0x1E,0x17,0x0D,0x39,0x39,0x30,0x37,
+0x30,0x39,0x31,0x38,0x31,0x30,0x34,0x32,0x5A,0x17,0x0D,0x31,0x39,0x30,0x37,0x30,
+0x39,0x31,0x38,0x31,0x39,0x32,0x32,0x5A,0x30,0x81,0x97,0x31,0x0B,0x30,0x09,0x06,
+0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x08,0x13,0x02,0x55,0x54,0x31,0x17,0x30,0x15,0x06,0x03,0x55,0x04,0x07,0x13,0x0E,
+0x53,0x61,0x6C,0x74,0x20,0x4C,0x61,0x6B,0x65,0x20,0x43,0x69,0x74,0x79,0x31,0x1E,
+0x30,0x1C,0x06,0x03,0x55,0x04,0x0A,0x13,0x15,0x54,0x68,0x65,0x20,0x55,0x53,0x45,
+0x52,0x54,0x52,0x55,0x53,0x54,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x21,
+0x30,0x1F,0x06,0x03,0x55,0x04,0x0B,0x13,0x18,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,
+0x77,0x77,0x77,0x2E,0x75,0x73,0x65,0x72,0x74,0x72,0x75,0x73,0x74,0x2E,0x63,0x6F,
+0x6D,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,0x13,0x16,0x55,0x54,0x4E,0x2D,
+0x55,0x53,0x45,0x52,0x46,0x69,0x72,0x73,0x74,0x2D,0x48,0x61,0x72,0x64,0x77,0x61,
+0x72,0x65,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,
+0x01,0x01,0x00,0xB1,0xF7,0xC3,0x38,0x3F,0xB4,0xA8,0x7F,0xCF,0x39,0x82,0x51,0x67,
+0xD0,0x6D,0x9F,0xD2,0xFF,0x58,0xF3,0xE7,0x9F,0x2B,0xEC,0x0D,0x89,0x54,0x99,0xB9,
+0x38,0x99,0x16,0xF7,0xE0,0x21,0x79,0x48,0xC2,0xBB,0x61,0x74,0x12,0x96,0x1D,0x3C,
+0x6A,0x72,0xD5,0x3C,0x10,0x67,0x3A,0x39,0xED,0x2B,0x13,0xCD,0x66,0xEB,0x95,0x09,
+0x33,0xA4,0x6C,0x97,0xB1,0xE8,0xC6,0xEC,0xC1,0x75,0x79,0x9C,0x46,0x5E,0x8D,0xAB,
+0xD0,0x6A,0xFD,0xB9,0x2A,0x55,0x17,0x10,0x54,0xB3,0x19,0xF0,0x9A,0xF6,0xF1,0xB1,
+0x5D,0xB6,0xA7,0x6D,0xFB,0xE0,0x71,0x17,0x6B,0xA2,0x88,0xFB,0x00,0xDF,0xFE,0x1A,
+0x31,0x77,0x0C,0x9A,0x01,0x7A,0xB1,0x32,0xE3,0x2B,0x01,0x07,0x38,0x6E,0xC3,0xA5,
+0x5E,0x23,0xBC,0x45,0x9B,0x7B,0x50,0xC1,0xC9,0x30,0x8F,0xDB,0xE5,0x2B,0x7A,0xD3,
+0x5B,0xFB,0x33,0x40,0x1E,0xA0,0xD5,0x98,0x17,0xBC,0x8B,0x87,0xC3,0x89,0xD3,0x5D,
+0xA0,0x8E,0xB2,0xAA,0xAA,0xF6,0x8E,0x69,0x88,0x06,0xC5,0xFA,0x89,0x21,0xF3,0x08,
+0x9D,0x69,0x2E,0x09,0x33,0x9B,0x29,0x0D,0x46,0x0F,0x8C,0xCC,0x49,0x34,0xB0,0x69,
+0x51,0xBD,0xF9,0x06,0xCD,0x68,0xAD,0x66,0x4C,0xBC,0x3E,0xAC,0x61,0xBD,0x0A,0x88,
+0x0E,0xC8,0xDF,0x3D,0xEE,0x7C,0x04,0x4C,0x9D,0x0A,0x5E,0x6B,0x91,0xD6,0xEE,0xC7,
+0xED,0x28,0x8D,0xAB,0x4D,0x87,0x89,0x73,0xD0,0x6E,0xA4,0xD0,0x1E,0x16,0x8B,0x14,
+0xE1,0x76,0x44,0x03,0x7F,0x63,0xAC,0xE4,0xCD,0x49,0x9C,0xC5,0x92,0xF4,0xAB,0x32,
+0xA1,0x48,0x5B,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xB9,0x30,0x81,0xB6,0x30,0x0B,
+0x06,0x03,0x55,0x1D,0x0F,0x04,0x04,0x03,0x02,0x01,0xC6,0x30,0x0F,0x06,0x03,0x55,
+0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,
+0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xA1,0x72,0x5F,0x26,0x1B,0x28,0x98,0x43,0x95,
+0x5D,0x07,0x37,0xD5,0x85,0x96,0x9D,0x4B,0xD2,0xC3,0x45,0x30,0x44,0x06,0x03,0x55,
+0x1D,0x1F,0x04,0x3D,0x30,0x3B,0x30,0x39,0xA0,0x37,0xA0,0x35,0x86,0x33,0x68,0x74,
+0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x75,0x73,0x65,0x72,0x74,0x72,0x75,
+0x73,0x74,0x2E,0x63,0x6F,0x6D,0x2F,0x55,0x54,0x4E,0x2D,0x55,0x53,0x45,0x52,0x46,
+0x69,0x72,0x73,0x74,0x2D,0x48,0x61,0x72,0x64,0x77,0x61,0x72,0x65,0x2E,0x63,0x72,
+0x6C,0x30,0x31,0x06,0x03,0x55,0x1D,0x25,0x04,0x2A,0x30,0x28,0x06,0x08,0x2B,0x06,
+0x01,0x05,0x05,0x07,0x03,0x01,0x06,0x08,0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x05,
+0x06,0x08,0x2B,0x06,0x01,0x05,0x05,0x07,0x03,0x06,0x06,0x08,0x2B,0x06,0x01,0x05,
+0x05,0x07,0x03,0x07,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,
+0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x47,0x19,0x0F,0xDE,0x74,0xC6,0x99,0x97,
+0xAF,0xFC,0xAD,0x28,0x5E,0x75,0x8E,0xEB,0x2D,0x67,0xEE,0x4E,0x7B,0x2B,0xD7,0x0C,
+0xFF,0xF6,0xDE,0xCB,0x55,0xA2,0x0A,0xE1,0x4C,0x54,0x65,0x93,0x60,0x6B,0x9F,0x12,
+0x9C,0xAD,0x5E,0x83,0x2C,0xEB,0x5A,0xAE,0xC0,0xE4,0x2D,0xF4,0x00,0x63,0x1D,0xB8,
+0xC0,0x6C,0xF2,0xCF,0x49,0xBB,0x4D,0x93,0x6F,0x06,0xA6,0x0A,0x22,0xB2,0x49,0x62,
+0x08,0x4E,0xFF,0xC8,0xC8,0x14,0xB2,0x88,0x16,0x5D,0xE7,0x01,0xE4,0x12,0x95,0xE5,
+0x45,0x34,0xB3,0x8B,0x69,0xBD,0xCF,0xB4,0x85,0x8F,0x75,0x51,0x9E,0x7D,0x3A,0x38,
+0x3A,0x14,0x48,0x12,0xC6,0xFB,0xA7,0x3B,0x1A,0x8D,0x0D,0x82,0x40,0x07,0xE8,0x04,
+0x08,0x90,0xA1,0x89,0xCB,0x19,0x50,0xDF,0xCA,0x1C,0x01,0xBC,0x1D,0x04,0x19,0x7B,
+0x10,0x76,0x97,0x3B,0xEE,0x90,0x90,0xCA,0xC4,0x0E,0x1F,0x16,0x6E,0x75,0xEF,0x33,
+0xF8,0xD3,0x6F,0x5B,0x1E,0x96,0xE3,0xE0,0x74,0x77,0x74,0x7B,0x8A,0xA2,0x6E,0x2D,
+0xDD,0x76,0xD6,0x39,0x30,0x82,0xF0,0xAB,0x9C,0x52,0xF2,0x2A,0xC7,0xAF,0x49,0x5E,
+0x7E,0xC7,0x68,0xE5,0x82,0x81,0xC8,0x6A,0x27,0xF9,0x27,0x88,0x2A,0xD5,0x58,0x50,
+0x95,0x1F,0xF0,0x3B,0x1C,0x57,0xBB,0x7D,0x14,0x39,0x62,0x2B,0x9A,0xC9,0x94,0x92,
+0x2A,0xA3,0x22,0x0C,0xFF,0x89,0x26,0x7D,0x5F,0x23,0x2B,0x47,0xD7,0x15,0x1D,0xA9,
+0x6A,0x9E,0x51,0x0D,0x2A,0x51,0x9E,0x81,0xF9,0xD4,0x3B,0x5E,0x70,0x12,0x7F,0x10,
+0x32,0x9C,0x1E,0xBB,0x9D,0xF8,0x66,0xA8,
+};
+
+
+/* subject:/OU=GlobalSign ECC Root CA - R4/O=GlobalSign/CN=GlobalSign */
+/* issuer :/OU=GlobalSign ECC Root CA - R4/O=GlobalSign/CN=GlobalSign */
+
+
+const unsigned char GlobalSign_ECC_Root_CA___R4_certificate[485]={
+0x30,0x82,0x01,0xE1,0x30,0x82,0x01,0x87,0xA0,0x03,0x02,0x01,0x02,0x02,0x11,0x2A,
+0x38,0xA4,0x1C,0x96,0x0A,0x04,0xDE,0x42,0xB2,0x28,0xA5,0x0B,0xE8,0x34,0x98,0x02,
+0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x02,0x30,0x50,0x31,0x24,
+0x30,0x22,0x06,0x03,0x55,0x04,0x0B,0x13,0x1B,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,
+0x69,0x67,0x6E,0x20,0x45,0x43,0x43,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41,0x20,
+0x2D,0x20,0x52,0x34,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x0A,0x13,0x0A,0x47,
+0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x31,0x13,0x30,0x11,0x06,0x03,0x55,
+0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x30,0x1E,
+0x17,0x0D,0x31,0x32,0x31,0x31,0x31,0x33,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,
+0x0D,0x33,0x38,0x30,0x31,0x31,0x39,0x30,0x33,0x31,0x34,0x30,0x37,0x5A,0x30,0x50,
+0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x0B,0x13,0x1B,0x47,0x6C,0x6F,0x62,0x61,
+0x6C,0x53,0x69,0x67,0x6E,0x20,0x45,0x43,0x43,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,
+0x41,0x20,0x2D,0x20,0x52,0x34,0x31,0x13,0x30,0x11,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x31,0x13,0x30,0x11,0x06,
+0x03,0x55,0x04,0x03,0x13,0x0A,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,
+0x30,0x59,0x30,0x13,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x08,0x2A,
+0x86,0x48,0xCE,0x3D,0x03,0x01,0x07,0x03,0x42,0x00,0x04,0xB8,0xC6,0x79,0xD3,0x8F,
+0x6C,0x25,0x0E,0x9F,0x2E,0x39,0x19,0x1C,0x03,0xA4,0xAE,0x9A,0xE5,0x39,0x07,0x09,
+0x16,0xCA,0x63,0xB1,0xB9,0x86,0xF8,0x8A,0x57,0xC1,0x57,0xCE,0x42,0xFA,0x73,0xA1,
+0xF7,0x65,0x42,0xFF,0x1E,0xC1,0x00,0xB2,0x6E,0x73,0x0E,0xFF,0xC7,0x21,0xE5,0x18,
+0xA4,0xAA,0xD9,0x71,0x3F,0xA8,0xD4,0xB9,0xCE,0x8C,0x1D,0xA3,0x42,0x30,0x40,0x30,
+0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,
+0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,
+0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x54,0xB0,0x7B,0xAD,0x45,
+0xB8,0xE2,0x40,0x7F,0xFB,0x0A,0x6E,0xFB,0xBE,0x33,0xC9,0x3C,0xA3,0x84,0xD5,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x02,0x03,0x48,0x00,0x30,0x45,
+0x02,0x21,0x00,0xDC,0x92,0xA1,0xA0,0x13,0xA6,0xCF,0x03,0xB0,0xE6,0xC4,0x21,0x97,
+0x90,0xFA,0x14,0x57,0x2D,0x03,0xEC,0xEE,0x3C,0xD3,0x6E,0xCA,0xA8,0x6C,0x76,0xBC,
+0xA2,0xDE,0xBB,0x02,0x20,0x27,0xA8,0x85,0x27,0x35,0x9B,0x56,0xC6,0xA3,0xF2,0x47,
+0xD2,0xB7,0x6E,0x1B,0x02,0x00,0x17,0xAA,0x67,0xA6,0x15,0x91,0xDE,0xFA,0x94,0xEC,
+0x7B,0x0B,0xF8,0x9F,0x84,
+};
+
+
+/* subject:/C=DE/O=TC TrustCenter GmbH/OU=TC TrustCenter Universal CA/CN=TC TrustCenter Universal CA I */
+/* issuer :/C=DE/O=TC TrustCenter GmbH/OU=TC TrustCenter Universal CA/CN=TC TrustCenter Universal CA I */
+
+
+const unsigned char TC_TrustCenter_Universal_CA_I_certificate[993]={
+0x30,0x82,0x03,0xDD,0x30,0x82,0x02,0xC5,0xA0,0x03,0x02,0x01,0x02,0x02,0x0E,0x1D,
+0xA2,0x00,0x01,0x00,0x02,0xEC,0xB7,0x60,0x80,0x78,0x8D,0xB6,0x06,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x79,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x44,0x45,0x31,0x1C,0x30,0x1A,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x13,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,
+0x6E,0x74,0x65,0x72,0x20,0x47,0x6D,0x62,0x48,0x31,0x24,0x30,0x22,0x06,0x03,0x55,
+0x04,0x0B,0x13,0x1B,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,
+0x65,0x72,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,0x61,0x6C,0x20,0x43,0x41,0x31,
+0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x03,0x13,0x1D,0x54,0x43,0x20,0x54,0x72,0x75,
+0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,
+0x61,0x6C,0x20,0x43,0x41,0x20,0x49,0x30,0x1E,0x17,0x0D,0x30,0x36,0x30,0x33,0x32,
+0x32,0x31,0x35,0x35,0x34,0x32,0x38,0x5A,0x17,0x0D,0x32,0x35,0x31,0x32,0x33,0x31,
+0x32,0x32,0x35,0x39,0x35,0x39,0x5A,0x30,0x79,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,
+0x04,0x06,0x13,0x02,0x44,0x45,0x31,0x1C,0x30,0x1A,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x13,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x20,
+0x47,0x6D,0x62,0x48,0x31,0x24,0x30,0x22,0x06,0x03,0x55,0x04,0x0B,0x13,0x1B,0x54,
+0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x20,0x55,0x6E,
+0x69,0x76,0x65,0x72,0x73,0x61,0x6C,0x20,0x43,0x41,0x31,0x26,0x30,0x24,0x06,0x03,
+0x55,0x04,0x03,0x13,0x1D,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,
+0x74,0x65,0x72,0x20,0x55,0x6E,0x69,0x76,0x65,0x72,0x73,0x61,0x6C,0x20,0x43,0x41,
+0x20,0x49,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,
+0x01,0x01,0x00,0xA4,0x77,0x23,0x96,0x44,0xAF,0x90,0xF4,0x31,0xA7,0x10,0xF4,0x26,
+0x87,0x9C,0xF3,0x38,0xD9,0x0F,0x5E,0xDE,0xCF,0x41,0xE8,0x31,0xAD,0xC6,0x74,0x91,
+0x24,0x96,0x78,0x1E,0x09,0xA0,0x9B,0x9A,0x95,0x4A,0x4A,0xF5,0x62,0x7C,0x02,0xA8,
+0xCA,0xAC,0xFB,0x5A,0x04,0x76,0x39,0xDE,0x5F,0xF1,0xF9,0xB3,0xBF,0xF3,0x03,0x58,
+0x55,0xD2,0xAA,0xB7,0xE3,0x04,0x22,0xD1,0xF8,0x94,0xDA,0x22,0x08,0x00,0x8D,0xD3,
+0x7C,0x26,0x5D,0xCC,0x77,0x79,0xE7,0x2C,0x78,0x39,0xA8,0x26,0x73,0x0E,0xA2,0x5D,
+0x25,0x69,0x85,0x4F,0x55,0x0E,0x9A,0xEF,0xC6,0xB9,0x44,0xE1,0x57,0x3D,0xDF,0x1F,
+0x54,0x22,0xE5,0x6F,0x65,0xAA,0x33,0x84,0x3A,0xF3,0xCE,0x7A,0xBE,0x55,0x97,0xAE,
+0x8D,0x12,0x0F,0x14,0x33,0xE2,0x50,0x70,0xC3,0x49,0x87,0x13,0xBC,0x51,0xDE,0xD7,
+0x98,0x12,0x5A,0xEF,0x3A,0x83,0x33,0x92,0x06,0x75,0x8B,0x92,0x7C,0x12,0x68,0x7B,
+0x70,0x6A,0x0F,0xB5,0x9B,0xB6,0x77,0x5B,0x48,0x59,0x9D,0xE4,0xEF,0x5A,0xAD,0xF3,
+0xC1,0x9E,0xD4,0xD7,0x45,0x4E,0xCA,0x56,0x34,0x21,0xBC,0x3E,0x17,0x5B,0x6F,0x77,
+0x0C,0x48,0x01,0x43,0x29,0xB0,0xDD,0x3F,0x96,0x6E,0xE6,0x95,0xAA,0x0C,0xC0,0x20,
+0xB6,0xFD,0x3E,0x36,0x27,0x9C,0xE3,0x5C,0xCF,0x4E,0x81,0xDC,0x19,0xBB,0x91,0x90,
+0x7D,0xEC,0xE6,0x97,0x04,0x1E,0x93,0xCC,0x22,0x49,0xD7,0x97,0x86,0xB6,0x13,0x0A,
+0x3C,0x43,0x23,0x77,0x7E,0xF0,0xDC,0xE6,0xCD,0x24,0x1F,0x3B,0x83,0x9B,0x34,0x3A,
+0x83,0x34,0xE3,0x02,0x03,0x01,0x00,0x01,0xA3,0x63,0x30,0x61,0x30,0x1F,0x06,0x03,
+0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,0x92,0xA4,0x75,0x2C,0xA4,0x9E,0xBE,
+0x81,0x44,0xEB,0x79,0xFC,0x8A,0xC5,0x95,0xA5,0xEB,0x10,0x75,0x73,0x30,0x0F,0x06,
+0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,
+0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x1D,
+0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x92,0xA4,0x75,0x2C,0xA4,0x9E,0xBE,
+0x81,0x44,0xEB,0x79,0xFC,0x8A,0xC5,0x95,0xA5,0xEB,0x10,0x75,0x73,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,
+0x00,0x28,0xD2,0xE0,0x86,0xD5,0xE6,0xF8,0x7B,0xF0,0x97,0xDC,0x22,0x6B,0x3B,0x95,
+0x14,0x56,0x0F,0x11,0x30,0xA5,0x9A,0x4F,0x3A,0xB0,0x3A,0xE0,0x06,0xCB,0x65,0xF5,
+0xED,0xC6,0x97,0x27,0xFE,0x25,0xF2,0x57,0xE6,0x5E,0x95,0x8C,0x3E,0x64,0x60,0x15,
+0x5A,0x7F,0x2F,0x0D,0x01,0xC5,0xB1,0x60,0xFD,0x45,0x35,0xCF,0xF0,0xB2,0xBF,0x06,
+0xD9,0xEF,0x5A,0xBE,0xB3,0x62,0x21,0xB4,0xD7,0xAB,0x35,0x7C,0x53,0x3E,0xA6,0x27,
+0xF1,0xA1,0x2D,0xDA,0x1A,0x23,0x9D,0xCC,0xDD,0xEC,0x3C,0x2D,0x9E,0x27,0x34,0x5D,
+0x0F,0xC2,0x36,0x79,0xBC,0xC9,0x4A,0x62,0x2D,0xED,0x6B,0xD9,0x7D,0x41,0x43,0x7C,
+0xB6,0xAA,0xCA,0xED,0x61,0xB1,0x37,0x82,0x15,0x09,0x1A,0x8A,0x16,0x30,0xD8,0xEC,
+0xC9,0xD6,0x47,0x72,0x78,0x4B,0x10,0x46,0x14,0x8E,0x5F,0x0E,0xAF,0xEC,0xC7,0x2F,
+0xAB,0x10,0xD7,0xB6,0xF1,0x6E,0xEC,0x86,0xB2,0xC2,0xE8,0x0D,0x92,0x73,0xDC,0xA2,
+0xF4,0x0F,0x3A,0xBF,0x61,0x23,0x10,0x89,0x9C,0x48,0x40,0x6E,0x70,0x00,0xB3,0xD3,
+0xBA,0x37,0x44,0x58,0x11,0x7A,0x02,0x6A,0x88,0xF0,0x37,0x34,0xF0,0x19,0xE9,0xAC,
+0xD4,0x65,0x73,0xF6,0x69,0x8C,0x64,0x94,0x3A,0x79,0x85,0x29,0xB0,0x16,0x2B,0x0C,
+0x82,0x3F,0x06,0x9C,0xC7,0xFD,0x10,0x2B,0x9E,0x0F,0x2C,0xB6,0x9E,0xE3,0x15,0xBF,
+0xD9,0x36,0x1C,0xBA,0x25,0x1A,0x52,0x3D,0x1A,0xEC,0x22,0x0C,0x1C,0xE0,0xA4,0xA2,
+0x3D,0xF0,0xE8,0x39,0xCF,0x81,0xC0,0x7B,0xED,0x5D,0x1F,0x6F,0xC5,0xD0,0x0B,0xD7,
+0x98,
+};
+
+
+/* subject:/C=GB/ST=Greater Manchester/L=Salford/O=Comodo CA Limited/CN=Trusted Certificate Services */
+/* issuer :/C=GB/ST=Greater Manchester/L=Salford/O=Comodo CA Limited/CN=Trusted Certificate Services */
+
+
+const unsigned char Comodo_Trusted_Services_root_certificate[1095]={
+0x30,0x82,0x04,0x43,0x30,0x82,0x03,0x2B,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x7F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x0C,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,
+0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,
+0x03,0x55,0x04,0x07,0x0C,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,
+0x18,0x06,0x03,0x55,0x04,0x0A,0x0C,0x11,0x43,0x6F,0x6D,0x6F,0x64,0x6F,0x20,0x43,
+0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x25,0x30,0x23,0x06,0x03,0x55,
+0x04,0x03,0x0C,0x1C,0x54,0x72,0x75,0x73,0x74,0x65,0x64,0x20,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,
+0x30,0x1E,0x17,0x0D,0x30,0x34,0x30,0x31,0x30,0x31,0x30,0x30,0x30,0x30,0x30,0x30,
+0x5A,0x17,0x0D,0x32,0x38,0x31,0x32,0x33,0x31,0x32,0x33,0x35,0x39,0x35,0x39,0x5A,
+0x30,0x7F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,
+0x1B,0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x0C,0x12,0x47,0x72,0x65,0x61,0x74,0x65,
+0x72,0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,
+0x06,0x03,0x55,0x04,0x07,0x0C,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,
+0x30,0x18,0x06,0x03,0x55,0x04,0x0A,0x0C,0x11,0x43,0x6F,0x6D,0x6F,0x64,0x6F,0x20,
+0x43,0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x25,0x30,0x23,0x06,0x03,
+0x55,0x04,0x03,0x0C,0x1C,0x54,0x72,0x75,0x73,0x74,0x65,0x64,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x20,0x53,0x65,0x72,0x76,0x69,0x63,0x65,
+0x73,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,
+0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,
+0x01,0x00,0xDF,0x71,0x6F,0x36,0x58,0x53,0x5A,0xF2,0x36,0x54,0x57,0x80,0xC4,0x74,
+0x08,0x20,0xED,0x18,0x7F,0x2A,0x1D,0xE6,0x35,0x9A,0x1E,0x25,0xAC,0x9C,0xE5,0x96,
+0x7E,0x72,0x52,0xA0,0x15,0x42,0xDB,0x59,0xDD,0x64,0x7A,0x1A,0xD0,0xB8,0x7B,0xDD,
+0x39,0x15,0xBC,0x55,0x48,0xC4,0xED,0x3A,0x00,0xEA,0x31,0x11,0xBA,0xF2,0x71,0x74,
+0x1A,0x67,0xB8,0xCF,0x33,0xCC,0xA8,0x31,0xAF,0xA3,0xE3,0xD7,0x7F,0xBF,0x33,0x2D,
+0x4C,0x6A,0x3C,0xEC,0x8B,0xC3,0x92,0xD2,0x53,0x77,0x24,0x74,0x9C,0x07,0x6E,0x70,
+0xFC,0xBD,0x0B,0x5B,0x76,0xBA,0x5F,0xF2,0xFF,0xD7,0x37,0x4B,0x4A,0x60,0x78,0xF7,
+0xF0,0xFA,0xCA,0x70,0xB4,0xEA,0x59,0xAA,0xA3,0xCE,0x48,0x2F,0xA9,0xC3,0xB2,0x0B,
+0x7E,0x17,0x72,0x16,0x0C,0xA6,0x07,0x0C,0x1B,0x38,0xCF,0xC9,0x62,0xB7,0x3F,0xA0,
+0x93,0xA5,0x87,0x41,0xF2,0xB7,0x70,0x40,0x77,0xD8,0xBE,0x14,0x7C,0xE3,0xA8,0xC0,
+0x7A,0x8E,0xE9,0x63,0x6A,0xD1,0x0F,0x9A,0xC6,0xD2,0xF4,0x8B,0x3A,0x14,0x04,0x56,
+0xD4,0xED,0xB8,0xCC,0x6E,0xF5,0xFB,0xE2,0x2C,0x58,0xBD,0x7F,0x4F,0x6B,0x2B,0xF7,
+0x60,0x24,0x58,0x24,0xCE,0x26,0xEF,0x34,0x91,0x3A,0xD5,0xE3,0x81,0xD0,0xB2,0xF0,
+0x04,0x02,0xD7,0x5B,0xB7,0x3E,0x92,0xAC,0x6B,0x12,0x8A,0xF9,0xE4,0x05,0xB0,0x3B,
+0x91,0x49,0x5C,0xB2,0xEB,0x53,0xEA,0xF8,0x9F,0x47,0x86,0xEE,0xBF,0x95,0xC0,0xC0,
+0x06,0x9F,0xD2,0x5B,0x5E,0x11,0x1B,0xF4,0xC7,0x04,0x35,0x29,0xD2,0x55,0x5C,0xE4,
+0xED,0xEB,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xC9,0x30,0x81,0xC6,0x30,0x1D,0x06,
+0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xC5,0x7B,0x58,0xBD,0xED,0xDA,0x25,0x69,
+0xD2,0xF7,0x59,0x16,0xA8,0xB3,0x32,0xC0,0x7B,0x27,0x5B,0xF4,0x30,0x0E,0x06,0x03,
+0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,
+0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x81,0x83,
+0x06,0x03,0x55,0x1D,0x1F,0x04,0x7C,0x30,0x7A,0x30,0x3C,0xA0,0x3A,0xA0,0x38,0x86,
+0x36,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x63,0x6F,0x6D,0x6F,
+0x64,0x6F,0x63,0x61,0x2E,0x63,0x6F,0x6D,0x2F,0x54,0x72,0x75,0x73,0x74,0x65,0x64,
+0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x53,0x65,0x72,0x76,0x69,
+0x63,0x65,0x73,0x2E,0x63,0x72,0x6C,0x30,0x3A,0xA0,0x38,0xA0,0x36,0x86,0x34,0x68,
+0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,0x2E,0x63,0x6F,0x6D,0x6F,0x64,0x6F,
+0x2E,0x6E,0x65,0x74,0x2F,0x54,0x72,0x75,0x73,0x74,0x65,0x64,0x43,0x65,0x72,0x74,
+0x69,0x66,0x69,0x63,0x61,0x74,0x65,0x53,0x65,0x72,0x76,0x69,0x63,0x65,0x73,0x2E,
+0x63,0x72,0x6C,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,
+0x05,0x00,0x03,0x82,0x01,0x01,0x00,0xC8,0x93,0x81,0x3B,0x89,0xB4,0xAF,0xB8,0x84,
+0x12,0x4C,0x8D,0xD2,0xF0,0xDB,0x70,0xBA,0x57,0x86,0x15,0x34,0x10,0xB9,0x2F,0x7F,
+0x1E,0xB0,0xA8,0x89,0x60,0xA1,0x8A,0xC2,0x77,0x0C,0x50,0x4A,0x9B,0x00,0x8B,0xD8,
+0x8B,0xF4,0x41,0xE2,0xD0,0x83,0x8A,0x4A,0x1C,0x14,0x06,0xB0,0xA3,0x68,0x05,0x70,
+0x31,0x30,0xA7,0x53,0x9B,0x0E,0xE9,0x4A,0xA0,0x58,0x69,0x67,0x0E,0xAE,0x9D,0xF6,
+0xA5,0x2C,0x41,0xBF,0x3C,0x06,0x6B,0xE4,0x59,0xCC,0x6D,0x10,0xF1,0x96,0x6F,0x1F,
+0xDF,0xF4,0x04,0x02,0xA4,0x9F,0x45,0x3E,0xC8,0xD8,0xFA,0x36,0x46,0x44,0x50,0x3F,
+0x82,0x97,0x91,0x1F,0x28,0xDB,0x18,0x11,0x8C,0x2A,0xE4,0x65,0x83,0x57,0x12,0x12,
+0x8C,0x17,0x3F,0x94,0x36,0xFE,0x5D,0xB0,0xC0,0x04,0x77,0x13,0xB8,0xF4,0x15,0xD5,
+0x3F,0x38,0xCC,0x94,0x3A,0x55,0xD0,0xAC,0x98,0xF5,0xBA,0x00,0x5F,0xE0,0x86,0x19,
+0x81,0x78,0x2F,0x28,0xC0,0x7E,0xD3,0xCC,0x42,0x0A,0xF5,0xAE,0x50,0xA0,0xD1,0x3E,
+0xC6,0xA1,0x71,0xEC,0x3F,0xA0,0x20,0x8C,0x66,0x3A,0x89,0xB4,0x8E,0xD4,0xD8,0xB1,
+0x4D,0x25,0x47,0xEE,0x2F,0x88,0xC8,0xB5,0xE1,0x05,0x45,0xC0,0xBE,0x14,0x71,0xDE,
+0x7A,0xFD,0x8E,0x7B,0x7D,0x4D,0x08,0x96,0xA5,0x12,0x73,0xF0,0x2D,0xCA,0x37,0x27,
+0x74,0x12,0x27,0x4C,0xCB,0xB6,0x97,0xE9,0xD9,0xAE,0x08,0x6D,0x5A,0x39,0x40,0xDD,
+0x05,0x47,0x75,0x6A,0x5A,0x21,0xB3,0xA3,0x18,0xCF,0x4E,0xF7,0x2E,0x57,0xB7,0x98,
+0x70,0x5E,0xC8,0xC4,0x78,0xB0,0x62,
+};
+
+
+/* subject:/C=US/O=Entrust, Inc./OU=www.entrust.net/CPS is incorporated by reference/OU=(c) 2006 Entrust, Inc./CN=Entrust Root Certification Authority */
+/* issuer :/C=US/O=Entrust, Inc./OU=www.entrust.net/CPS is incorporated by reference/OU=(c) 2006 Entrust, Inc./CN=Entrust Root Certification Authority */
+
+
+const unsigned char Entrust_Root_Certification_Authority_certificate[1173]={
+0x30,0x82,0x04,0x91,0x30,0x82,0x03,0x79,0xA0,0x03,0x02,0x01,0x02,0x02,0x04,0x45,
+0x6B,0x50,0x54,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,
+0x05,0x00,0x30,0x81,0xB0,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,
+0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x45,0x6E,0x74,
+0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x39,0x30,0x37,0x06,0x03,
+0x55,0x04,0x0B,0x13,0x30,0x77,0x77,0x77,0x2E,0x65,0x6E,0x74,0x72,0x75,0x73,0x74,
+0x2E,0x6E,0x65,0x74,0x2F,0x43,0x50,0x53,0x20,0x69,0x73,0x20,0x69,0x6E,0x63,0x6F,
+0x72,0x70,0x6F,0x72,0x61,0x74,0x65,0x64,0x20,0x62,0x79,0x20,0x72,0x65,0x66,0x65,
+0x72,0x65,0x6E,0x63,0x65,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x0B,0x13,0x16,
+0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x36,0x20,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,
+0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x2D,0x30,0x2B,0x06,0x03,0x55,0x04,0x03,0x13,
+0x24,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,
+0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,
+0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x31,0x32,0x37,0x32,
+0x30,0x32,0x33,0x34,0x32,0x5A,0x17,0x0D,0x32,0x36,0x31,0x31,0x32,0x37,0x32,0x30,
+0x35,0x33,0x34,0x32,0x5A,0x30,0x81,0xB0,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,
+0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,
+0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x39,0x30,
+0x37,0x06,0x03,0x55,0x04,0x0B,0x13,0x30,0x77,0x77,0x77,0x2E,0x65,0x6E,0x74,0x72,
+0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x2F,0x43,0x50,0x53,0x20,0x69,0x73,0x20,0x69,
+0x6E,0x63,0x6F,0x72,0x70,0x6F,0x72,0x61,0x74,0x65,0x64,0x20,0x62,0x79,0x20,0x72,
+0x65,0x66,0x65,0x72,0x65,0x6E,0x63,0x65,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,
+0x0B,0x13,0x16,0x28,0x63,0x29,0x20,0x32,0x30,0x30,0x36,0x20,0x45,0x6E,0x74,0x72,
+0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x31,0x2D,0x30,0x2B,0x06,0x03,0x55,
+0x04,0x03,0x13,0x24,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,
+0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,
+0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,
+0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,
+0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xB6,0x95,0xB6,0x43,0x42,0xFA,0xC6,
+0x6D,0x2A,0x6F,0x48,0xDF,0x94,0x4C,0x39,0x57,0x05,0xEE,0xC3,0x79,0x11,0x41,0x68,
+0x36,0xED,0xEC,0xFE,0x9A,0x01,0x8F,0xA1,0x38,0x28,0xFC,0xF7,0x10,0x46,0x66,0x2E,
+0x4D,0x1E,0x1A,0xB1,0x1A,0x4E,0xC6,0xD1,0xC0,0x95,0x88,0xB0,0xC9,0xFF,0x31,0x8B,
+0x33,0x03,0xDB,0xB7,0x83,0x7B,0x3E,0x20,0x84,0x5E,0xED,0xB2,0x56,0x28,0xA7,0xF8,
+0xE0,0xB9,0x40,0x71,0x37,0xC5,0xCB,0x47,0x0E,0x97,0x2A,0x68,0xC0,0x22,0x95,0x62,
+0x15,0xDB,0x47,0xD9,0xF5,0xD0,0x2B,0xFF,0x82,0x4B,0xC9,0xAD,0x3E,0xDE,0x4C,0xDB,
+0x90,0x80,0x50,0x3F,0x09,0x8A,0x84,0x00,0xEC,0x30,0x0A,0x3D,0x18,0xCD,0xFB,0xFD,
+0x2A,0x59,0x9A,0x23,0x95,0x17,0x2C,0x45,0x9E,0x1F,0x6E,0x43,0x79,0x6D,0x0C,0x5C,
+0x98,0xFE,0x48,0xA7,0xC5,0x23,0x47,0x5C,0x5E,0xFD,0x6E,0xE7,0x1E,0xB4,0xF6,0x68,
+0x45,0xD1,0x86,0x83,0x5B,0xA2,0x8A,0x8D,0xB1,0xE3,0x29,0x80,0xFE,0x25,0x71,0x88,
+0xAD,0xBE,0xBC,0x8F,0xAC,0x52,0x96,0x4B,0xAA,0x51,0x8D,0xE4,0x13,0x31,0x19,0xE8,
+0x4E,0x4D,0x9F,0xDB,0xAC,0xB3,0x6A,0xD5,0xBC,0x39,0x54,0x71,0xCA,0x7A,0x7A,0x7F,
+0x90,0xDD,0x7D,0x1D,0x80,0xD9,0x81,0xBB,0x59,0x26,0xC2,0x11,0xFE,0xE6,0x93,0xE2,
+0xF7,0x80,0xE4,0x65,0xFB,0x34,0x37,0x0E,0x29,0x80,0x70,0x4D,0xAF,0x38,0x86,0x2E,
+0x9E,0x7F,0x57,0xAF,0x9E,0x17,0xAE,0xEB,0x1C,0xCB,0x28,0x21,0x5F,0xB6,0x1C,0xD8,
+0xE7,0xA2,0x04,0x22,0xF9,0xD3,0xDA,0xD8,0xCB,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,
+0xB0,0x30,0x81,0xAD,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,
+0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,
+0x30,0x03,0x01,0x01,0xFF,0x30,0x2B,0x06,0x03,0x55,0x1D,0x10,0x04,0x24,0x30,0x22,
+0x80,0x0F,0x32,0x30,0x30,0x36,0x31,0x31,0x32,0x37,0x32,0x30,0x32,0x33,0x34,0x32,
+0x5A,0x81,0x0F,0x32,0x30,0x32,0x36,0x31,0x31,0x32,0x37,0x32,0x30,0x35,0x33,0x34,
+0x32,0x5A,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,0x68,
+0x90,0xE4,0x67,0xA4,0xA6,0x53,0x80,0xC7,0x86,0x66,0xA4,0xF1,0xF7,0x4B,0x43,0xFB,
+0x84,0xBD,0x6D,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x68,0x90,
+0xE4,0x67,0xA4,0xA6,0x53,0x80,0xC7,0x86,0x66,0xA4,0xF1,0xF7,0x4B,0x43,0xFB,0x84,
+0xBD,0x6D,0x30,0x1D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF6,0x7D,0x07,0x41,0x00,0x04,
+0x10,0x30,0x0E,0x1B,0x08,0x56,0x37,0x2E,0x31,0x3A,0x34,0x2E,0x30,0x03,0x02,0x04,
+0x90,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,
+0x03,0x82,0x01,0x01,0x00,0x93,0xD4,0x30,0xB0,0xD7,0x03,0x20,0x2A,0xD0,0xF9,0x63,
+0xE8,0x91,0x0C,0x05,0x20,0xA9,0x5F,0x19,0xCA,0x7B,0x72,0x4E,0xD4,0xB1,0xDB,0xD0,
+0x96,0xFB,0x54,0x5A,0x19,0x2C,0x0C,0x08,0xF7,0xB2,0xBC,0x85,0xA8,0x9D,0x7F,0x6D,
+0x3B,0x52,0xB3,0x2A,0xDB,0xE7,0xD4,0x84,0x8C,0x63,0xF6,0x0F,0xCB,0x26,0x01,0x91,
+0x50,0x6C,0xF4,0x5F,0x14,0xE2,0x93,0x74,0xC0,0x13,0x9E,0x30,0x3A,0x50,0xE3,0xB4,
+0x60,0xC5,0x1C,0xF0,0x22,0x44,0x8D,0x71,0x47,0xAC,0xC8,0x1A,0xC9,0xE9,0x9B,0x9A,
+0x00,0x60,0x13,0xFF,0x70,0x7E,0x5F,0x11,0x4D,0x49,0x1B,0xB3,0x15,0x52,0x7B,0xC9,
+0x54,0xDA,0xBF,0x9D,0x95,0xAF,0x6B,0x9A,0xD8,0x9E,0xE9,0xF1,0xE4,0x43,0x8D,0xE2,
+0x11,0x44,0x3A,0xBF,0xAF,0xBD,0x83,0x42,0x73,0x52,0x8B,0xAA,0xBB,0xA7,0x29,0xCF,
+0xF5,0x64,0x1C,0x0A,0x4D,0xD1,0xBC,0xAA,0xAC,0x9F,0x2A,0xD0,0xFF,0x7F,0x7F,0xDA,
+0x7D,0xEA,0xB1,0xED,0x30,0x25,0xC1,0x84,0xDA,0x34,0xD2,0x5B,0x78,0x83,0x56,0xEC,
+0x9C,0x36,0xC3,0x26,0xE2,0x11,0xF6,0x67,0x49,0x1D,0x92,0xAB,0x8C,0xFB,0xEB,0xFF,
+0x7A,0xEE,0x85,0x4A,0xA7,0x50,0x80,0xF0,0xA7,0x5C,0x4A,0x94,0x2E,0x5F,0x05,0x99,
+0x3C,0x52,0x41,0xE0,0xCD,0xB4,0x63,0xCF,0x01,0x43,0xBA,0x9C,0x83,0xDC,0x8F,0x60,
+0x3B,0xF3,0x5A,0xB4,0xB4,0x7B,0xAE,0xDA,0x0B,0x90,0x38,0x75,0xEF,0x81,0x1D,0x66,
+0xD2,0xF7,0x57,0x70,0x36,0xB3,0xBF,0xFC,0x28,0xAF,0x71,0x25,0x85,0x5B,0x13,0xFE,
+0x1E,0x7F,0x5A,0xB4,0x3C,
+};
+
+
+/* subject:/C=DE/O=TC TrustCenter GmbH/OU=TC TrustCenter Class 2 CA/CN=TC TrustCenter Class 2 CA II */
+/* issuer :/C=DE/O=TC TrustCenter GmbH/OU=TC TrustCenter Class 2 CA/CN=TC TrustCenter Class 2 CA II */
+
+
+const unsigned char TC_TrustCenter_Class_2_CA_II_certificate[1198]={
+0x30,0x82,0x04,0xAA,0x30,0x82,0x03,0x92,0xA0,0x03,0x02,0x01,0x02,0x02,0x0E,0x2E,
+0x6A,0x00,0x01,0x00,0x02,0x1F,0xD7,0x52,0x21,0x2C,0x11,0x5C,0x3B,0x30,0x0D,0x06,
+0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x76,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x44,0x45,0x31,0x1C,0x30,0x1A,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x13,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,
+0x6E,0x74,0x65,0x72,0x20,0x47,0x6D,0x62,0x48,0x31,0x22,0x30,0x20,0x06,0x03,0x55,
+0x04,0x0B,0x13,0x19,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,
+0x65,0x72,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,0x41,0x31,0x25,0x30,
+0x23,0x06,0x03,0x55,0x04,0x03,0x13,0x1C,0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,
+0x43,0x65,0x6E,0x74,0x65,0x72,0x20,0x43,0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,
+0x41,0x20,0x49,0x49,0x30,0x1E,0x17,0x0D,0x30,0x36,0x30,0x31,0x31,0x32,0x31,0x34,
+0x33,0x38,0x34,0x33,0x5A,0x17,0x0D,0x32,0x35,0x31,0x32,0x33,0x31,0x32,0x32,0x35,
+0x39,0x35,0x39,0x5A,0x30,0x76,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,
+0x02,0x44,0x45,0x31,0x1C,0x30,0x1A,0x06,0x03,0x55,0x04,0x0A,0x13,0x13,0x54,0x43,
+0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x20,0x47,0x6D,0x62,
+0x48,0x31,0x22,0x30,0x20,0x06,0x03,0x55,0x04,0x0B,0x13,0x19,0x54,0x43,0x20,0x54,
+0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x20,0x43,0x6C,0x61,0x73,0x73,
+0x20,0x32,0x20,0x43,0x41,0x31,0x25,0x30,0x23,0x06,0x03,0x55,0x04,0x03,0x13,0x1C,
+0x54,0x43,0x20,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x20,0x43,
+0x6C,0x61,0x73,0x73,0x20,0x32,0x20,0x43,0x41,0x20,0x49,0x49,0x30,0x82,0x01,0x22,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,
+0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xAB,0x80,0x87,
+0x9B,0x8E,0xF0,0xC3,0x7C,0x87,0xD7,0xE8,0x24,0x82,0x11,0xB3,0x3C,0xDD,0x43,0x62,
+0xEE,0xF8,0xC3,0x45,0xDA,0xE8,0xE1,0xA0,0x5F,0xD1,0x2A,0xB2,0xEA,0x93,0x68,0xDF,
+0xB4,0xC8,0xD6,0x43,0xE9,0xC4,0x75,0x59,0x7F,0xFC,0xE1,0x1D,0xF8,0x31,0x70,0x23,
+0x1B,0x88,0x9E,0x27,0xB9,0x7B,0xFD,0x3A,0xD2,0xC9,0xA9,0xE9,0x14,0x2F,0x90,0xBE,
+0x03,0x52,0xC1,0x49,0xCD,0xF6,0xFD,0xE4,0x08,0x66,0x0B,0x57,0x8A,0xA2,0x42,0xA0,
+0xB8,0xD5,0x7F,0x69,0x5C,0x90,0x32,0xB2,0x97,0x0D,0xCA,0x4A,0xDC,0x46,0x3E,0x02,
+0x55,0x89,0x53,0xE3,0x1A,0x5A,0xCB,0x36,0xC6,0x07,0x56,0xF7,0x8C,0xCF,0x11,0xF4,
+0x4C,0xBB,0x30,0x70,0x04,0x95,0xA5,0xF6,0x39,0x8C,0xFD,0x73,0x81,0x08,0x7D,0x89,
+0x5E,0x32,0x1E,0x22,0xA9,0x22,0x45,0x4B,0xB0,0x66,0x2E,0x30,0xCC,0x9F,0x65,0xFD,
+0xFC,0xCB,0x81,0xA9,0xF1,0xE0,0x3B,0xAF,0xA3,0x86,0xD1,0x89,0xEA,0xC4,0x45,0x79,
+0x50,0x5D,0xAE,0xE9,0x21,0x74,0x92,0x4D,0x8B,0x59,0x82,0x8F,0x94,0xE3,0xE9,0x4A,
+0xF1,0xE7,0x49,0xB0,0x14,0xE3,0xF5,0x62,0xCB,0xD5,0x72,0xBD,0x1F,0xB9,0xD2,0x9F,
+0xA0,0xCD,0xA8,0xFA,0x01,0xC8,0xD9,0x0D,0xDF,0xDA,0xFC,0x47,0x9D,0xB3,0xC8,0x54,
+0xDF,0x49,0x4A,0xF1,0x21,0xA9,0xFE,0x18,0x4E,0xEE,0x48,0xD4,0x19,0xBB,0xEF,0x7D,
+0xE4,0xE2,0x9D,0xCB,0x5B,0xB6,0x6E,0xFF,0xE3,0xCD,0x5A,0xE7,0x74,0x82,0x05,0xBA,
+0x80,0x25,0x38,0xCB,0xE4,0x69,0x9E,0xAF,0x41,0xAA,0x1A,0x84,0xF5,0x02,0x03,0x01,
+0x00,0x01,0xA3,0x82,0x01,0x34,0x30,0x82,0x01,0x30,0x30,0x0F,0x06,0x03,0x55,0x1D,
+0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,0x06,0x03,0x55,
+0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,0x06,0x03,0x55,
+0x1D,0x0E,0x04,0x16,0x04,0x14,0xE3,0xAB,0x54,0x4C,0x80,0xA1,0xDB,0x56,0x43,0xB7,
+0x91,0x4A,0xCB,0xF3,0x82,0x7A,0x13,0x5C,0x08,0xAB,0x30,0x81,0xED,0x06,0x03,0x55,
+0x1D,0x1F,0x04,0x81,0xE5,0x30,0x81,0xE2,0x30,0x81,0xDF,0xA0,0x81,0xDC,0xA0,0x81,
+0xD9,0x86,0x35,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x77,0x77,0x77,0x2E,0x74,0x72,
+0x75,0x73,0x74,0x63,0x65,0x6E,0x74,0x65,0x72,0x2E,0x64,0x65,0x2F,0x63,0x72,0x6C,
+0x2F,0x76,0x32,0x2F,0x74,0x63,0x5F,0x63,0x6C,0x61,0x73,0x73,0x5F,0x32,0x5F,0x63,
+0x61,0x5F,0x49,0x49,0x2E,0x63,0x72,0x6C,0x86,0x81,0x9F,0x6C,0x64,0x61,0x70,0x3A,
+0x2F,0x2F,0x77,0x77,0x77,0x2E,0x74,0x72,0x75,0x73,0x74,0x63,0x65,0x6E,0x74,0x65,
+0x72,0x2E,0x64,0x65,0x2F,0x43,0x4E,0x3D,0x54,0x43,0x25,0x32,0x30,0x54,0x72,0x75,
+0x73,0x74,0x43,0x65,0x6E,0x74,0x65,0x72,0x25,0x32,0x30,0x43,0x6C,0x61,0x73,0x73,
+0x25,0x32,0x30,0x32,0x25,0x32,0x30,0x43,0x41,0x25,0x32,0x30,0x49,0x49,0x2C,0x4F,
+0x3D,0x54,0x43,0x25,0x32,0x30,0x54,0x72,0x75,0x73,0x74,0x43,0x65,0x6E,0x74,0x65,
+0x72,0x25,0x32,0x30,0x47,0x6D,0x62,0x48,0x2C,0x4F,0x55,0x3D,0x72,0x6F,0x6F,0x74,
+0x63,0x65,0x72,0x74,0x73,0x2C,0x44,0x43,0x3D,0x74,0x72,0x75,0x73,0x74,0x63,0x65,
+0x6E,0x74,0x65,0x72,0x2C,0x44,0x43,0x3D,0x64,0x65,0x3F,0x63,0x65,0x72,0x74,0x69,
+0x66,0x69,0x63,0x61,0x74,0x65,0x52,0x65,0x76,0x6F,0x63,0x61,0x74,0x69,0x6F,0x6E,
+0x4C,0x69,0x73,0x74,0x3F,0x62,0x61,0x73,0x65,0x3F,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x8C,0xD7,
+0xDF,0x7E,0xEE,0x1B,0x80,0x10,0xB3,0x83,0xF5,0xDB,0x11,0xEA,0x6B,0x4B,0xA8,0x92,
+0x18,0xD9,0xF7,0x07,0x39,0xF5,0x2C,0xBE,0x06,0x75,0x7A,0x68,0x53,0x15,0x1C,0xEA,
+0x4A,0xED,0x5E,0xFC,0x23,0xB2,0x13,0xA0,0xD3,0x09,0xFF,0xF6,0xF6,0x2E,0x6B,0x41,
+0x71,0x79,0xCD,0xE2,0x6D,0xFD,0xAE,0x59,0x6B,0x85,0x1D,0xB8,0x4E,0x22,0x9A,0xED,
+0x66,0x39,0x6E,0x4B,0x94,0xE6,0x55,0xFC,0x0B,0x1B,0x8B,0x77,0xC1,0x53,0x13,0x66,
+0x89,0xD9,0x28,0xD6,0x8B,0xF3,0x45,0x4A,0x63,0xB7,0xFD,0x7B,0x0B,0x61,0x5D,0xB8,
+0x6D,0xBE,0xC3,0xDC,0x5B,0x79,0xD2,0xED,0x86,0xE5,0xA2,0x4D,0xBE,0x5E,0x74,0x7C,
+0x6A,0xED,0x16,0x38,0x1F,0x7F,0x58,0x81,0x5A,0x1A,0xEB,0x32,0x88,0x2D,0xB2,0xF3,
+0x39,0x77,0x80,0xAF,0x5E,0xB6,0x61,0x75,0x29,0xDB,0x23,0x4D,0x88,0xCA,0x50,0x28,
+0xCB,0x85,0xD2,0xD3,0x10,0xA2,0x59,0x6E,0xD3,0x93,0x54,0x00,0x7A,0xA2,0x46,0x95,
+0x86,0x05,0x9C,0xA9,0x19,0x98,0xE5,0x31,0x72,0x0C,0x00,0xE2,0x67,0xD9,0x40,0xE0,
+0x24,0x33,0x7B,0x6F,0x2C,0xB9,0x5C,0xAB,0x65,0x9D,0x2C,0xAC,0x76,0xEA,0x35,0x99,
+0xF5,0x97,0xB9,0x0F,0x24,0xEC,0xC7,0x76,0x21,0x28,0x65,0xAE,0x57,0xE8,0x07,0x88,
+0x75,0x4A,0x56,0xA0,0xD2,0x05,0x3A,0xA4,0xE6,0x8D,0x92,0x88,0x2C,0xF3,0xF2,0xE1,
+0xC1,0xC6,0x61,0xDB,0x41,0xC5,0xC7,0x9B,0xF7,0x0E,0x1A,0x51,0x45,0xC2,0x61,0x6B,
+0xDC,0x64,0x27,0x17,0x8C,0x5A,0xB7,0xDA,0x74,0x28,0xCD,0x97,0xE4,0xBD,
+};
+
+
+/* subject:/O=Cybertrust, Inc/CN=Cybertrust Global Root */
+/* issuer :/O=Cybertrust, Inc/CN=Cybertrust Global Root */
+
+
+const unsigned char Cybertrust_Global_Root_certificate[933]={
+0x30,0x82,0x03,0xA1,0x30,0x82,0x02,0x89,0xA0,0x03,0x02,0x01,0x02,0x02,0x0B,0x04,
+0x00,0x00,0x00,0x00,0x01,0x0F,0x85,0xAA,0x2D,0x48,0x30,0x0D,0x06,0x09,0x2A,0x86,
+0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x3B,0x31,0x18,0x30,0x16,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0F,0x43,0x79,0x62,0x65,0x72,0x74,0x72,0x75,0x73,0x74,
+0x2C,0x20,0x49,0x6E,0x63,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,0x13,0x16,
+0x43,0x79,0x62,0x65,0x72,0x74,0x72,0x75,0x73,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,
+0x6C,0x20,0x52,0x6F,0x6F,0x74,0x30,0x1E,0x17,0x0D,0x30,0x36,0x31,0x32,0x31,0x35,
+0x30,0x38,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x32,0x31,0x31,0x32,0x31,0x35,0x30,
+0x38,0x30,0x30,0x30,0x30,0x5A,0x30,0x3B,0x31,0x18,0x30,0x16,0x06,0x03,0x55,0x04,
+0x0A,0x13,0x0F,0x43,0x79,0x62,0x65,0x72,0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,
+0x6E,0x63,0x31,0x1F,0x30,0x1D,0x06,0x03,0x55,0x04,0x03,0x13,0x16,0x43,0x79,0x62,
+0x65,0x72,0x74,0x72,0x75,0x73,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x52,
+0x6F,0x6F,0x74,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,
+0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,
+0x82,0x01,0x01,0x00,0xF8,0xC8,0xBC,0xBD,0x14,0x50,0x66,0x13,0xFF,0xF0,0xD3,0x79,
+0xEC,0x23,0xF2,0xB7,0x1A,0xC7,0x8E,0x85,0xF1,0x12,0x73,0xA6,0x19,0xAA,0x10,0xDB,
+0x9C,0xA2,0x65,0x74,0x5A,0x77,0x3E,0x51,0x7D,0x56,0xF6,0xDC,0x23,0xB6,0xD4,0xED,
+0x5F,0x58,0xB1,0x37,0x4D,0xD5,0x49,0x0E,0x6E,0xF5,0x6A,0x87,0xD6,0xD2,0x8C,0xD2,
+0x27,0xC6,0xE2,0xFF,0x36,0x9F,0x98,0x65,0xA0,0x13,0x4E,0xC6,0x2A,0x64,0x9B,0xD5,
+0x90,0x12,0xCF,0x14,0x06,0xF4,0x3B,0xE3,0xD4,0x28,0xBE,0xE8,0x0E,0xF8,0xAB,0x4E,
+0x48,0x94,0x6D,0x8E,0x95,0x31,0x10,0x5C,0xED,0xA2,0x2D,0xBD,0xD5,0x3A,0x6D,0xB2,
+0x1C,0xBB,0x60,0xC0,0x46,0x4B,0x01,0xF5,0x49,0xAE,0x7E,0x46,0x8A,0xD0,0x74,0x8D,
+0xA1,0x0C,0x02,0xCE,0xEE,0xFC,0xE7,0x8F,0xB8,0x6B,0x66,0xF3,0x7F,0x44,0x00,0xBF,
+0x66,0x25,0x14,0x2B,0xDD,0x10,0x30,0x1D,0x07,0x96,0x3F,0x4D,0xF6,0x6B,0xB8,0x8F,
+0xB7,0x7B,0x0C,0xA5,0x38,0xEB,0xDE,0x47,0xDB,0xD5,0x5D,0x39,0xFC,0x88,0xA7,0xF3,
+0xD7,0x2A,0x74,0xF1,0xE8,0x5A,0xA2,0x3B,0x9F,0x50,0xBA,0xA6,0x8C,0x45,0x35,0xC2,
+0x50,0x65,0x95,0xDC,0x63,0x82,0xEF,0xDD,0xBF,0x77,0x4D,0x9C,0x62,0xC9,0x63,0x73,
+0x16,0xD0,0x29,0x0F,0x49,0xA9,0x48,0xF0,0xB3,0xAA,0xB7,0x6C,0xC5,0xA7,0x30,0x39,
+0x40,0x5D,0xAE,0xC4,0xE2,0x5D,0x26,0x53,0xF0,0xCE,0x1C,0x23,0x08,0x61,0xA8,0x94,
+0x19,0xBA,0x04,0x62,0x40,0xEC,0x1F,0x38,0x70,0x77,0x12,0x06,0x71,0xA7,0x30,0x18,
+0x5D,0x25,0x27,0xA5,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xA5,0x30,0x81,0xA2,0x30,
+0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,
+0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,
+0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xB6,0x08,0x7B,0x0D,0x7A,
+0xCC,0xAC,0x20,0x4C,0x86,0x56,0x32,0x5E,0xCF,0xAB,0x6E,0x85,0x2D,0x70,0x57,0x30,
+0x3F,0x06,0x03,0x55,0x1D,0x1F,0x04,0x38,0x30,0x36,0x30,0x34,0xA0,0x32,0xA0,0x30,
+0x86,0x2E,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x77,0x77,0x77,0x32,0x2E,0x70,0x75,
+0x62,0x6C,0x69,0x63,0x2D,0x74,0x72,0x75,0x73,0x74,0x2E,0x63,0x6F,0x6D,0x2F,0x63,
+0x72,0x6C,0x2F,0x63,0x74,0x2F,0x63,0x74,0x72,0x6F,0x6F,0x74,0x2E,0x63,0x72,0x6C,
+0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,0x80,0x14,0xB6,0x08,0x7B,
+0x0D,0x7A,0xCC,0xAC,0x20,0x4C,0x86,0x56,0x32,0x5E,0xCF,0xAB,0x6E,0x85,0x2D,0x70,
+0x57,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,
+0x03,0x82,0x01,0x01,0x00,0x56,0xEF,0x0A,0x23,0xA0,0x54,0x4E,0x95,0x97,0xC9,0xF8,
+0x89,0xDA,0x45,0xC1,0xD4,0xA3,0x00,0x25,0xF4,0x1F,0x13,0xAB,0xB7,0xA3,0x85,0x58,
+0x69,0xC2,0x30,0xAD,0xD8,0x15,0x8A,0x2D,0xE3,0xC9,0xCD,0x81,0x5A,0xF8,0x73,0x23,
+0x5A,0xA7,0x7C,0x05,0xF3,0xFD,0x22,0x3B,0x0E,0xD1,0x06,0xC4,0xDB,0x36,0x4C,0x73,
+0x04,0x8E,0xE5,0xB0,0x22,0xE4,0xC5,0xF3,0x2E,0xA5,0xD9,0x23,0xE3,0xB8,0x4E,0x4A,
+0x20,0xA7,0x6E,0x02,0x24,0x9F,0x22,0x60,0x67,0x7B,0x8B,0x1D,0x72,0x09,0xC5,0x31,
+0x5C,0xE9,0x79,0x9F,0x80,0x47,0x3D,0xAD,0xA1,0x0B,0x07,0x14,0x3D,0x47,0xFF,0x03,
+0x69,0x1A,0x0C,0x0B,0x44,0xE7,0x63,0x25,0xA7,0x7F,0xB2,0xC9,0xB8,0x76,0x84,0xED,
+0x23,0xF6,0x7D,0x07,0xAB,0x45,0x7E,0xD3,0xDF,0xB3,0xBF,0xE9,0x8A,0xB6,0xCD,0xA8,
+0xA2,0x67,0x2B,0x52,0xD5,0xB7,0x65,0xF0,0x39,0x4C,0x63,0xA0,0x91,0x79,0x93,0x52,
+0x0F,0x54,0xDD,0x83,0xBB,0x9F,0xD1,0x8F,0xA7,0x53,0x73,0xC3,0xCB,0xFF,0x30,0xEC,
+0x7C,0x04,0xB8,0xD8,0x44,0x1F,0x93,0x5F,0x71,0x09,0x22,0xB7,0x6E,0x3E,0xEA,0x1C,
+0x03,0x4E,0x9D,0x1A,0x20,0x61,0xFB,0x81,0x37,0xEC,0x5E,0xFC,0x0A,0x45,0xAB,0xD7,
+0xE7,0x17,0x55,0xD0,0xA0,0xEA,0x60,0x9B,0xA6,0xF6,0xE3,0x8C,0x5B,0x29,0xC2,0x06,
+0x60,0x14,0x9D,0x2D,0x97,0x4C,0xA9,0x93,0x15,0x9D,0x61,0xC4,0x01,0x5F,0x48,0xD6,
+0x58,0xBD,0x56,0x31,0x12,0x4E,0x11,0xC8,0x21,0xE0,0xB3,0x11,0x91,0x65,0xDB,0xB4,
+0xA6,0x88,0x38,0xCE,0x55,
+};
+
+
+/* subject:/C=US/O=Entrust, Inc./OU=See www.entrust.net/legal-terms/OU=(c) 2012 Entrust, Inc. - for authorized use only/CN=Entrust Root Certification Authority - EC1 */
+/* issuer :/C=US/O=Entrust, Inc./OU=See www.entrust.net/legal-terms/OU=(c) 2012 Entrust, Inc. - for authorized use only/CN=Entrust Root Certification Authority - EC1 */
+
+
+const unsigned char Entrust_Root_Certification_Authority___EC1_certificate[765]={
+0x30,0x82,0x02,0xF9,0x30,0x82,0x02,0x80,0xA0,0x03,0x02,0x01,0x02,0x02,0x0D,0x00,
+0xA6,0x8B,0x79,0x29,0x00,0x00,0x00,0x00,0x50,0xD0,0x91,0xF9,0x30,0x0A,0x06,0x08,
+0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x81,0xBF,0x31,0x0B,0x30,0x09,0x06,
+0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,
+0x0A,0x13,0x0D,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,
+0x31,0x28,0x30,0x26,0x06,0x03,0x55,0x04,0x0B,0x13,0x1F,0x53,0x65,0x65,0x20,0x77,
+0x77,0x77,0x2E,0x65,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,0x2F,0x6C,
+0x65,0x67,0x61,0x6C,0x2D,0x74,0x65,0x72,0x6D,0x73,0x31,0x39,0x30,0x37,0x06,0x03,
+0x55,0x04,0x0B,0x13,0x30,0x28,0x63,0x29,0x20,0x32,0x30,0x31,0x32,0x20,0x45,0x6E,
+0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x66,0x6F,
+0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,
+0x20,0x6F,0x6E,0x6C,0x79,0x31,0x33,0x30,0x31,0x06,0x03,0x55,0x04,0x03,0x13,0x2A,
+0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x65,0x72,
+0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,
+0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x45,0x43,0x31,0x30,0x1E,0x17,0x0D,0x31,0x32,
+0x31,0x32,0x31,0x38,0x31,0x35,0x32,0x35,0x33,0x36,0x5A,0x17,0x0D,0x33,0x37,0x31,
+0x32,0x31,0x38,0x31,0x35,0x35,0x35,0x33,0x36,0x5A,0x30,0x81,0xBF,0x31,0x0B,0x30,
+0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x0D,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,
+0x63,0x2E,0x31,0x28,0x30,0x26,0x06,0x03,0x55,0x04,0x0B,0x13,0x1F,0x53,0x65,0x65,
+0x20,0x77,0x77,0x77,0x2E,0x65,0x6E,0x74,0x72,0x75,0x73,0x74,0x2E,0x6E,0x65,0x74,
+0x2F,0x6C,0x65,0x67,0x61,0x6C,0x2D,0x74,0x65,0x72,0x6D,0x73,0x31,0x39,0x30,0x37,
+0x06,0x03,0x55,0x04,0x0B,0x13,0x30,0x28,0x63,0x29,0x20,0x32,0x30,0x31,0x32,0x20,
+0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x2C,0x20,0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,
+0x66,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,0x20,0x75,
+0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x33,0x30,0x31,0x06,0x03,0x55,0x04,0x03,
+0x13,0x2A,0x45,0x6E,0x74,0x72,0x75,0x73,0x74,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,
+0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x45,0x43,0x31,0x30,0x76,0x30,0x10,
+0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,0x2B,0x81,0x04,0x00,0x22,
+0x03,0x62,0x00,0x04,0x84,0x13,0xC9,0xD0,0xBA,0x6D,0x41,0x7B,0xE2,0x6C,0xD0,0xEB,
+0x55,0x5F,0x66,0x02,0x1A,0x24,0xF4,0x5B,0x89,0x69,0x47,0xE3,0xB8,0xC2,0x7D,0xF1,
+0xF2,0x02,0xC5,0x9F,0xA0,0xF6,0x5B,0xD5,0x8B,0x06,0x19,0x86,0x4F,0x53,0x10,0x6D,
+0x07,0x24,0x27,0xA1,0xA0,0xF8,0xD5,0x47,0x19,0x61,0x4C,0x7D,0xCA,0x93,0x27,0xEA,
+0x74,0x0C,0xEF,0x6F,0x96,0x09,0xFE,0x63,0xEC,0x70,0x5D,0x36,0xAD,0x67,0x77,0xAE,
+0xC9,0x9D,0x7C,0x55,0x44,0x3A,0xA2,0x63,0x51,0x1F,0xF5,0xE3,0x62,0xD4,0xA9,0x47,
+0x07,0x3E,0xCC,0x20,0xA3,0x42,0x30,0x40,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,
+0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,
+0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,
+0x04,0x16,0x04,0x14,0xB7,0x63,0xE7,0x1A,0xDD,0x8D,0xE9,0x08,0xA6,0x55,0x83,0xA4,
+0xE0,0x6A,0x50,0x41,0x65,0x11,0x42,0x49,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,
+0x3D,0x04,0x03,0x03,0x03,0x67,0x00,0x30,0x64,0x02,0x30,0x61,0x79,0xD8,0xE5,0x42,
+0x47,0xDF,0x1C,0xAE,0x53,0x99,0x17,0xB6,0x6F,0x1C,0x7D,0xE1,0xBF,0x11,0x94,0xD1,
+0x03,0x88,0x75,0xE4,0x8D,0x89,0xA4,0x8A,0x77,0x46,0xDE,0x6D,0x61,0xEF,0x02,0xF5,
+0xFB,0xB5,0xDF,0xCC,0xFE,0x4E,0xFF,0xFE,0xA9,0xE6,0xA7,0x02,0x30,0x5B,0x99,0xD7,
+0x85,0x37,0x06,0xB5,0x7B,0x08,0xFD,0xEB,0x27,0x8B,0x4A,0x94,0xF9,0xE1,0xFA,0xA7,
+0x8E,0x26,0x08,0xE8,0x7C,0x92,0x68,0x6D,0x73,0xD8,0x6F,0x26,0xAC,0x21,0x02,0xB8,
+0x99,0xB7,0x26,0x41,0x5B,0x25,0x60,0xAE,0xD0,0x48,0x1A,0xEE,0x06,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./OU=(c) 2007 GeoTrust Inc. - For authorized use only/CN=GeoTrust Primary Certification Authority - G2 */
+/* issuer :/C=US/O=GeoTrust Inc./OU=(c) 2007 GeoTrust Inc. - For authorized use only/CN=GeoTrust Primary Certification Authority - G2 */
+
+
+const unsigned char GeoTrust_Primary_Certification_Authority___G2_certificate[690]={
+0x30,0x82,0x02,0xAE,0x30,0x82,0x02,0x35,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x3C,
+0xB2,0xF4,0x48,0x0A,0x00,0xE2,0xFE,0xEB,0x24,0x3B,0x5E,0x60,0x3E,0xC3,0x6B,0x30,
+0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x81,0x98,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,
+0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,
+0x6E,0x63,0x2E,0x31,0x39,0x30,0x37,0x06,0x03,0x55,0x04,0x0B,0x13,0x30,0x28,0x63,
+0x29,0x20,0x32,0x30,0x30,0x37,0x20,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,
+0x49,0x6E,0x63,0x2E,0x20,0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,
+0x72,0x69,0x7A,0x65,0x64,0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x36,
+0x30,0x34,0x06,0x03,0x55,0x04,0x03,0x13,0x2D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,
+0x74,0x20,0x50,0x72,0x69,0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,0x66,
+0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,
+0x79,0x20,0x2D,0x20,0x47,0x32,0x30,0x1E,0x17,0x0D,0x30,0x37,0x31,0x31,0x30,0x35,
+0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x38,0x32,
+0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0x98,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,
+0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,
+0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x39,
+0x30,0x37,0x06,0x03,0x55,0x04,0x0B,0x13,0x30,0x28,0x63,0x29,0x20,0x32,0x30,0x30,
+0x37,0x20,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x20,
+0x2D,0x20,0x46,0x6F,0x72,0x20,0x61,0x75,0x74,0x68,0x6F,0x72,0x69,0x7A,0x65,0x64,
+0x20,0x75,0x73,0x65,0x20,0x6F,0x6E,0x6C,0x79,0x31,0x36,0x30,0x34,0x06,0x03,0x55,
+0x04,0x03,0x13,0x2D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x50,0x72,0x69,
+0x6D,0x61,0x72,0x79,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,
+0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x20,0x2D,0x20,0x47,
+0x32,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D,0x02,0x01,0x06,0x05,
+0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0x15,0xB1,0xE8,0xFD,0x03,0x15,0x43,
+0xE5,0xAC,0xEB,0x87,0x37,0x11,0x62,0xEF,0xD2,0x83,0x36,0x52,0x7D,0x45,0x57,0x0B,
+0x4A,0x8D,0x7B,0x54,0x3B,0x3A,0x6E,0x5F,0x15,0x02,0xC0,0x50,0xA6,0xCF,0x25,0x2F,
+0x7D,0xCA,0x48,0xB8,0xC7,0x50,0x63,0x1C,0x2A,0x21,0x08,0x7C,0x9A,0x36,0xD8,0x0B,
+0xFE,0xD1,0x26,0xC5,0x58,0x31,0x30,0x28,0x25,0xF3,0x5D,0x5D,0xA3,0xB8,0xB6,0xA5,
+0xB4,0x92,0xED,0x6C,0x2C,0x9F,0xEB,0xDD,0x43,0x89,0xA2,0x3C,0x4B,0x48,0x91,0x1D,
+0x50,0xEC,0x26,0xDF,0xD6,0x60,0x2E,0xBD,0x21,0xA3,0x42,0x30,0x40,0x30,0x0F,0x06,
+0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0E,
+0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x1D,
+0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x15,0x5F,0x35,0x57,0x51,0x55,0xFB,
+0x25,0xB2,0xAD,0x03,0x69,0xFC,0x01,0xA3,0xFA,0xBE,0x11,0x55,0xD5,0x30,0x0A,0x06,
+0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x67,0x00,0x30,0x64,0x02,0x30,
+0x64,0x96,0x59,0xA6,0xE8,0x09,0xDE,0x8B,0xBA,0xFA,0x5A,0x88,0x88,0xF0,0x1F,0x91,
+0xD3,0x46,0xA8,0xF2,0x4A,0x4C,0x02,0x63,0xFB,0x6C,0x5F,0x38,0xDB,0x2E,0x41,0x93,
+0xA9,0x0E,0xE6,0x9D,0xDC,0x31,0x1C,0xB2,0xA0,0xA7,0x18,0x1C,0x79,0xE1,0xC7,0x36,
+0x02,0x30,0x3A,0x56,0xAF,0x9A,0x74,0x6C,0xF6,0xFB,0x83,0xE0,0x33,0xD3,0x08,0x5F,
+0xA1,0x9C,0xC2,0x5B,0x9F,0x46,0xD6,0xB6,0xCB,0x91,0x06,0x63,0xA2,0x06,0xE7,0x33,
+0xAC,0x3E,0xA8,0x81,0x12,0xD0,0xCB,0xBA,0xD0,0x92,0x0B,0xB6,0x9E,0x96,0xAA,0x04,
+0x0F,0x8A,
+};
+
+
+/* subject:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA 2 */
+/* issuer :/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA 2 */
+
+
+const unsigned char GeoTrust_Global_CA_2_certificate[874]={
+0x30,0x82,0x03,0x66,0x30,0x82,0x02,0x4E,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01,
+0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,
+0x44,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x16,
+0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,
+0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1D,0x30,0x1B,0x06,0x03,0x55,0x04,0x03,0x13,
+0x14,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x47,0x6C,0x6F,0x62,0x61,0x6C,
+0x20,0x43,0x41,0x20,0x32,0x30,0x1E,0x17,0x0D,0x30,0x34,0x30,0x33,0x30,0x34,0x30,
+0x35,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x31,0x39,0x30,0x33,0x30,0x34,0x30,0x35,
+0x30,0x30,0x30,0x30,0x5A,0x30,0x44,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,
+0x13,0x02,0x55,0x53,0x31,0x16,0x30,0x14,0x06,0x03,0x55,0x04,0x0A,0x13,0x0D,0x47,
+0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,0x49,0x6E,0x63,0x2E,0x31,0x1D,0x30,0x1B,
+0x06,0x03,0x55,0x04,0x03,0x13,0x14,0x47,0x65,0x6F,0x54,0x72,0x75,0x73,0x74,0x20,
+0x47,0x6C,0x6F,0x62,0x61,0x6C,0x20,0x43,0x41,0x20,0x32,0x30,0x82,0x01,0x22,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,
+0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xEF,0x3C,0x4D,0x40,
+0x3D,0x10,0xDF,0x3B,0x53,0x00,0xE1,0x67,0xFE,0x94,0x60,0x15,0x3E,0x85,0x88,0xF1,
+0x89,0x0D,0x90,0xC8,0x28,0x23,0x99,0x05,0xE8,0x2B,0x20,0x9D,0xC6,0xF3,0x60,0x46,
+0xD8,0xC1,0xB2,0xD5,0x8C,0x31,0xD9,0xDC,0x20,0x79,0x24,0x81,0xBF,0x35,0x32,0xFC,
+0x63,0x69,0xDB,0xB1,0x2A,0x6B,0xEE,0x21,0x58,0xF2,0x08,0xE9,0x78,0xCB,0x6F,0xCB,
+0xFC,0x16,0x52,0xC8,0x91,0xC4,0xFF,0x3D,0x73,0xDE,0xB1,0x3E,0xA7,0xC2,0x7D,0x66,
+0xC1,0xF5,0x7E,0x52,0x24,0x1A,0xE2,0xD5,0x67,0x91,0xD0,0x82,0x10,0xD7,0x78,0x4B,
+0x4F,0x2B,0x42,0x39,0xBD,0x64,0x2D,0x40,0xA0,0xB0,0x10,0xD3,0x38,0x48,0x46,0x88,
+0xA1,0x0C,0xBB,0x3A,0x33,0x2A,0x62,0x98,0xFB,0x00,0x9D,0x13,0x59,0x7F,0x6F,0x3B,
+0x72,0xAA,0xEE,0xA6,0x0F,0x86,0xF9,0x05,0x61,0xEA,0x67,0x7F,0x0C,0x37,0x96,0x8B,
+0xE6,0x69,0x16,0x47,0x11,0xC2,0x27,0x59,0x03,0xB3,0xA6,0x60,0xC2,0x21,0x40,0x56,
+0xFA,0xA0,0xC7,0x7D,0x3A,0x13,0xE3,0xEC,0x57,0xC7,0xB3,0xD6,0xAE,0x9D,0x89,0x80,
+0xF7,0x01,0xE7,0x2C,0xF6,0x96,0x2B,0x13,0x0D,0x79,0x2C,0xD9,0xC0,0xE4,0x86,0x7B,
+0x4B,0x8C,0x0C,0x72,0x82,0x8A,0xFB,0x17,0xCD,0x00,0x6C,0x3A,0x13,0x3C,0xB0,0x84,
+0x87,0x4B,0x16,0x7A,0x29,0xB2,0x4F,0xDB,0x1D,0xD4,0x0B,0xF3,0x66,0x37,0xBD,0xD8,
+0xF6,0x57,0xBB,0x5E,0x24,0x7A,0xB8,0x3C,0x8B,0xB9,0xFA,0x92,0x1A,0x1A,0x84,0x9E,
+0xD8,0x74,0x8F,0xAA,0x1B,0x7F,0x5E,0xF4,0xFE,0x45,0x22,0x21,0x02,0x03,0x01,0x00,
+0x01,0xA3,0x63,0x30,0x61,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,
+0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,
+0x14,0x71,0x38,0x36,0xF2,0x02,0x31,0x53,0x47,0x2B,0x6E,0xBA,0x65,0x46,0xA9,0x10,
+0x15,0x58,0x20,0x05,0x09,0x30,0x1F,0x06,0x03,0x55,0x1D,0x23,0x04,0x18,0x30,0x16,
+0x80,0x14,0x71,0x38,0x36,0xF2,0x02,0x31,0x53,0x47,0x2B,0x6E,0xBA,0x65,0x46,0xA9,
+0x10,0x15,0x58,0x20,0x05,0x09,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,
+0x04,0x04,0x03,0x02,0x01,0x86,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,
+0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0x03,0xF7,0xB5,0x2B,0xAB,0x5D,
+0x10,0xFC,0x7B,0xB2,0xB2,0x5E,0xAC,0x9B,0x0E,0x7E,0x53,0x78,0x59,0x3E,0x42,0x04,
+0xFE,0x75,0xA3,0xAD,0xAC,0x81,0x4E,0xD7,0x02,0x8B,0x5E,0xC4,0x2D,0xC8,0x52,0x76,
+0xC7,0x2C,0x1F,0xFC,0x81,0x32,0x98,0xD1,0x4B,0xC6,0x92,0x93,0x33,0x35,0x31,0x2F,
+0xFC,0xD8,0x1D,0x44,0xDD,0xE0,0x81,0x7F,0x9D,0xE9,0x8B,0xE1,0x64,0x91,0x62,0x0B,
+0x39,0x08,0x8C,0xAC,0x74,0x9D,0x59,0xD9,0x7A,0x59,0x52,0x97,0x11,0xB9,0x16,0x7B,
+0x6F,0x45,0xD3,0x96,0xD9,0x31,0x7D,0x02,0x36,0x0F,0x9C,0x3B,0x6E,0xCF,0x2C,0x0D,
+0x03,0x46,0x45,0xEB,0xA0,0xF4,0x7F,0x48,0x44,0xC6,0x08,0x40,0xCC,0xDE,0x1B,0x70,
+0xB5,0x29,0xAD,0xBA,0x8B,0x3B,0x34,0x65,0x75,0x1B,0x71,0x21,0x1D,0x2C,0x14,0x0A,
+0xB0,0x96,0x95,0xB8,0xD6,0xEA,0xF2,0x65,0xFB,0x29,0xBA,0x4F,0xEA,0x91,0x93,0x74,
+0x69,0xB6,0xF2,0xFF,0xE1,0x1A,0xD0,0x0C,0xD1,0x76,0x85,0xCB,0x8A,0x25,0xBD,0x97,
+0x5E,0x2C,0x6F,0x15,0x99,0x26,0xE7,0xB6,0x29,0xFF,0x22,0xEC,0xC9,0x02,0xC7,0x56,
+0x00,0xCD,0x49,0xB9,0xB3,0x6C,0x7B,0x53,0x04,0x1A,0xE2,0xA8,0xC9,0xAA,0x12,0x05,
+0x23,0xC2,0xCE,0xE7,0xBB,0x04,0x02,0xCC,0xC0,0x47,0xA2,0xE4,0xC4,0x29,0x2F,0x5B,
+0x45,0x57,0x89,0x51,0xEE,0x3C,0xEB,0x52,0x08,0xFF,0x07,0x35,0x1E,0x9F,0x35,0x6A,
+0x47,0x4A,0x56,0x98,0xD1,0x5A,0x85,0x1F,0x8C,0xF5,0x22,0xBF,0xAB,0xCE,0x83,0xF3,
+0xE2,0x22,0x29,0xAE,0x7D,0x83,0x40,0xA8,0xBA,0x6C,
+};
+
+
+/* subject:/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Certification Authority */
+/* issuer :/C=GB/ST=Greater Manchester/L=Salford/O=COMODO CA Limited/CN=COMODO RSA Certification Authority */
+
+
+const unsigned char COMODO_RSA_Certification_Authority_certificate[1500]={
+0x30,0x82,0x05,0xD8,0x30,0x82,0x03,0xC0,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x4C,
+0xAA,0xF9,0xCA,0xDB,0x63,0x6F,0xE0,0x1F,0xF7,0x4E,0xD8,0x5B,0x03,0x86,0x9D,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x0C,0x05,0x00,0x30,0x81,
+0x85,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,
+0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x13,0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,
+0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,
+0x03,0x55,0x04,0x07,0x13,0x07,0x53,0x61,0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,
+0x18,0x06,0x03,0x55,0x04,0x0A,0x13,0x11,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x43,
+0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,0x64,0x31,0x2B,0x30,0x29,0x06,0x03,0x55,
+0x04,0x03,0x13,0x22,0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x52,0x53,0x41,0x20,0x43,
+0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,
+0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x1E,0x17,0x0D,0x31,0x30,0x30,0x31,0x31,0x39,
+0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x38,0x30,0x31,0x31,0x38,0x32,
+0x33,0x35,0x39,0x35,0x39,0x5A,0x30,0x81,0x85,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,
+0x04,0x06,0x13,0x02,0x47,0x42,0x31,0x1B,0x30,0x19,0x06,0x03,0x55,0x04,0x08,0x13,
+0x12,0x47,0x72,0x65,0x61,0x74,0x65,0x72,0x20,0x4D,0x61,0x6E,0x63,0x68,0x65,0x73,
+0x74,0x65,0x72,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x07,0x13,0x07,0x53,0x61,
+0x6C,0x66,0x6F,0x72,0x64,0x31,0x1A,0x30,0x18,0x06,0x03,0x55,0x04,0x0A,0x13,0x11,
+0x43,0x4F,0x4D,0x4F,0x44,0x4F,0x20,0x43,0x41,0x20,0x4C,0x69,0x6D,0x69,0x74,0x65,
+0x64,0x31,0x2B,0x30,0x29,0x06,0x03,0x55,0x04,0x03,0x13,0x22,0x43,0x4F,0x4D,0x4F,
+0x44,0x4F,0x20,0x52,0x53,0x41,0x20,0x43,0x65,0x72,0x74,0x69,0x66,0x69,0x63,0x61,
+0x74,0x69,0x6F,0x6E,0x20,0x41,0x75,0x74,0x68,0x6F,0x72,0x69,0x74,0x79,0x30,0x82,
+0x02,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,
+0x00,0x03,0x82,0x02,0x0F,0x00,0x30,0x82,0x02,0x0A,0x02,0x82,0x02,0x01,0x00,0x91,
+0xE8,0x54,0x92,0xD2,0x0A,0x56,0xB1,0xAC,0x0D,0x24,0xDD,0xC5,0xCF,0x44,0x67,0x74,
+0x99,0x2B,0x37,0xA3,0x7D,0x23,0x70,0x00,0x71,0xBC,0x53,0xDF,0xC4,0xFA,0x2A,0x12,
+0x8F,0x4B,0x7F,0x10,0x56,0xBD,0x9F,0x70,0x72,0xB7,0x61,0x7F,0xC9,0x4B,0x0F,0x17,
+0xA7,0x3D,0xE3,0xB0,0x04,0x61,0xEE,0xFF,0x11,0x97,0xC7,0xF4,0x86,0x3E,0x0A,0xFA,
+0x3E,0x5C,0xF9,0x93,0xE6,0x34,0x7A,0xD9,0x14,0x6B,0xE7,0x9C,0xB3,0x85,0xA0,0x82,
+0x7A,0x76,0xAF,0x71,0x90,0xD7,0xEC,0xFD,0x0D,0xFA,0x9C,0x6C,0xFA,0xDF,0xB0,0x82,
+0xF4,0x14,0x7E,0xF9,0xBE,0xC4,0xA6,0x2F,0x4F,0x7F,0x99,0x7F,0xB5,0xFC,0x67,0x43,
+0x72,0xBD,0x0C,0x00,0xD6,0x89,0xEB,0x6B,0x2C,0xD3,0xED,0x8F,0x98,0x1C,0x14,0xAB,
+0x7E,0xE5,0xE3,0x6E,0xFC,0xD8,0xA8,0xE4,0x92,0x24,0xDA,0x43,0x6B,0x62,0xB8,0x55,
+0xFD,0xEA,0xC1,0xBC,0x6C,0xB6,0x8B,0xF3,0x0E,0x8D,0x9A,0xE4,0x9B,0x6C,0x69,0x99,
+0xF8,0x78,0x48,0x30,0x45,0xD5,0xAD,0xE1,0x0D,0x3C,0x45,0x60,0xFC,0x32,0x96,0x51,
+0x27,0xBC,0x67,0xC3,0xCA,0x2E,0xB6,0x6B,0xEA,0x46,0xC7,0xC7,0x20,0xA0,0xB1,0x1F,
+0x65,0xDE,0x48,0x08,0xBA,0xA4,0x4E,0xA9,0xF2,0x83,0x46,0x37,0x84,0xEB,0xE8,0xCC,
+0x81,0x48,0x43,0x67,0x4E,0x72,0x2A,0x9B,0x5C,0xBD,0x4C,0x1B,0x28,0x8A,0x5C,0x22,
+0x7B,0xB4,0xAB,0x98,0xD9,0xEE,0xE0,0x51,0x83,0xC3,0x09,0x46,0x4E,0x6D,0x3E,0x99,
+0xFA,0x95,0x17,0xDA,0x7C,0x33,0x57,0x41,0x3C,0x8D,0x51,0xED,0x0B,0xB6,0x5C,0xAF,
+0x2C,0x63,0x1A,0xDF,0x57,0xC8,0x3F,0xBC,0xE9,0x5D,0xC4,0x9B,0xAF,0x45,0x99,0xE2,
+0xA3,0x5A,0x24,0xB4,0xBA,0xA9,0x56,0x3D,0xCF,0x6F,0xAA,0xFF,0x49,0x58,0xBE,0xF0,
+0xA8,0xFF,0xF4,0xB8,0xAD,0xE9,0x37,0xFB,0xBA,0xB8,0xF4,0x0B,0x3A,0xF9,0xE8,0x43,
+0x42,0x1E,0x89,0xD8,0x84,0xCB,0x13,0xF1,0xD9,0xBB,0xE1,0x89,0x60,0xB8,0x8C,0x28,
+0x56,0xAC,0x14,0x1D,0x9C,0x0A,0xE7,0x71,0xEB,0xCF,0x0E,0xDD,0x3D,0xA9,0x96,0xA1,
+0x48,0xBD,0x3C,0xF7,0xAF,0xB5,0x0D,0x22,0x4C,0xC0,0x11,0x81,0xEC,0x56,0x3B,0xF6,
+0xD3,0xA2,0xE2,0x5B,0xB7,0xB2,0x04,0x22,0x52,0x95,0x80,0x93,0x69,0xE8,0x8E,0x4C,
+0x65,0xF1,0x91,0x03,0x2D,0x70,0x74,0x02,0xEA,0x8B,0x67,0x15,0x29,0x69,0x52,0x02,
+0xBB,0xD7,0xDF,0x50,0x6A,0x55,0x46,0xBF,0xA0,0xA3,0x28,0x61,0x7F,0x70,0xD0,0xC3,
+0xA2,0xAA,0x2C,0x21,0xAA,0x47,0xCE,0x28,0x9C,0x06,0x45,0x76,0xBF,0x82,0x18,0x27,
+0xB4,0xD5,0xAE,0xB4,0xCB,0x50,0xE6,0x6B,0xF4,0x4C,0x86,0x71,0x30,0xE9,0xA6,0xDF,
+0x16,0x86,0xE0,0xD8,0xFF,0x40,0xDD,0xFB,0xD0,0x42,0x88,0x7F,0xA3,0x33,0x3A,0x2E,
+0x5C,0x1E,0x41,0x11,0x81,0x63,0xCE,0x18,0x71,0x6B,0x2B,0xEC,0xA6,0x8A,0xB7,0x31,
+0x5C,0x3A,0x6A,0x47,0xE0,0xC3,0x79,0x59,0xD6,0x20,0x1A,0xAF,0xF2,0x6A,0x98,0xAA,
+0x72,0xBC,0x57,0x4A,0xD2,0x4B,0x9D,0xBB,0x10,0xFC,0xB0,0x4C,0x41,0xE5,0xED,0x1D,
+0x3D,0x5E,0x28,0x9D,0x9C,0xCC,0xBF,0xB3,0x51,0xDA,0xA7,0x47,0xE5,0x84,0x53,0x02,
+0x03,0x01,0x00,0x01,0xA3,0x42,0x30,0x40,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,
+0x16,0x04,0x14,0xBB,0xAF,0x7E,0x02,0x3D,0xFA,0xA6,0xF1,0x3C,0x84,0x8E,0xAD,0xEE,
+0x38,0x98,0xEC,0xD9,0x32,0x32,0xD4,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,
+0xFF,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,
+0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,
+0xF7,0x0D,0x01,0x01,0x0C,0x05,0x00,0x03,0x82,0x02,0x01,0x00,0x0A,0xF1,0xD5,0x46,
+0x84,0xB7,0xAE,0x51,0xBB,0x6C,0xB2,0x4D,0x41,0x14,0x00,0x93,0x4C,0x9C,0xCB,0xE5,
+0xC0,0x54,0xCF,0xA0,0x25,0x8E,0x02,0xF9,0xFD,0xB0,0xA2,0x0D,0xF5,0x20,0x98,0x3C,
+0x13,0x2D,0xAC,0x56,0xA2,0xB0,0xD6,0x7E,0x11,0x92,0xE9,0x2E,0xBA,0x9E,0x2E,0x9A,
+0x72,0xB1,0xBD,0x19,0x44,0x6C,0x61,0x35,0xA2,0x9A,0xB4,0x16,0x12,0x69,0x5A,0x8C,
+0xE1,0xD7,0x3E,0xA4,0x1A,0xE8,0x2F,0x03,0xF4,0xAE,0x61,0x1D,0x10,0x1B,0x2A,0xA4,
+0x8B,0x7A,0xC5,0xFE,0x05,0xA6,0xE1,0xC0,0xD6,0xC8,0xFE,0x9E,0xAE,0x8F,0x2B,0xBA,
+0x3D,0x99,0xF8,0xD8,0x73,0x09,0x58,0x46,0x6E,0xA6,0x9C,0xF4,0xD7,0x27,0xD3,0x95,
+0xDA,0x37,0x83,0x72,0x1C,0xD3,0x73,0xE0,0xA2,0x47,0x99,0x03,0x38,0x5D,0xD5,0x49,
+0x79,0x00,0x29,0x1C,0xC7,0xEC,0x9B,0x20,0x1C,0x07,0x24,0x69,0x57,0x78,0xB2,0x39,
+0xFC,0x3A,0x84,0xA0,0xB5,0x9C,0x7C,0x8D,0xBF,0x2E,0x93,0x62,0x27,0xB7,0x39,0xDA,
+0x17,0x18,0xAE,0xBD,0x3C,0x09,0x68,0xFF,0x84,0x9B,0x3C,0xD5,0xD6,0x0B,0x03,0xE3,
+0x57,0x9E,0x14,0xF7,0xD1,0xEB,0x4F,0xC8,0xBD,0x87,0x23,0xB7,0xB6,0x49,0x43,0x79,
+0x85,0x5C,0xBA,0xEB,0x92,0x0B,0xA1,0xC6,0xE8,0x68,0xA8,0x4C,0x16,0xB1,0x1A,0x99,
+0x0A,0xE8,0x53,0x2C,0x92,0xBB,0xA1,0x09,0x18,0x75,0x0C,0x65,0xA8,0x7B,0xCB,0x23,
+0xB7,0x1A,0xC2,0x28,0x85,0xC3,0x1B,0xFF,0xD0,0x2B,0x62,0xEF,0xA4,0x7B,0x09,0x91,
+0x98,0x67,0x8C,0x14,0x01,0xCD,0x68,0x06,0x6A,0x63,0x21,0x75,0x03,0x80,0x88,0x8A,
+0x6E,0x81,0xC6,0x85,0xF2,0xA9,0xA4,0x2D,0xE7,0xF4,0xA5,0x24,0x10,0x47,0x83,0xCA,
+0xCD,0xF4,0x8D,0x79,0x58,0xB1,0x06,0x9B,0xE7,0x1A,0x2A,0xD9,0x9D,0x01,0xD7,0x94,
+0x7D,0xED,0x03,0x4A,0xCA,0xF0,0xDB,0xE8,0xA9,0x01,0x3E,0xF5,0x56,0x99,0xC9,0x1E,
+0x8E,0x49,0x3D,0xBB,0xE5,0x09,0xB9,0xE0,0x4F,0x49,0x92,0x3D,0x16,0x82,0x40,0xCC,
+0xCC,0x59,0xC6,0xE6,0x3A,0xED,0x12,0x2E,0x69,0x3C,0x6C,0x95,0xB1,0xFD,0xAA,0x1D,
+0x7B,0x7F,0x86,0xBE,0x1E,0x0E,0x32,0x46,0xFB,0xFB,0x13,0x8F,0x75,0x7F,0x4C,0x8B,
+0x4B,0x46,0x63,0xFE,0x00,0x34,0x40,0x70,0xC1,0xC3,0xB9,0xA1,0xDD,0xA6,0x70,0xE2,
+0x04,0xB3,0x41,0xBC,0xE9,0x80,0x91,0xEA,0x64,0x9C,0x7A,0xE1,0x22,0x03,0xA9,0x9C,
+0x6E,0x6F,0x0E,0x65,0x4F,0x6C,0x87,0x87,0x5E,0xF3,0x6E,0xA0,0xF9,0x75,0xA5,0x9B,
+0x40,0xE8,0x53,0xB2,0x27,0x9D,0x4A,0xB9,0xC0,0x77,0x21,0x8D,0xFF,0x87,0xF2,0xDE,
+0xBC,0x8C,0xEF,0x17,0xDF,0xB7,0x49,0x0B,0xD1,0xF2,0x6E,0x30,0x0B,0x1A,0x0E,0x4E,
+0x76,0xED,0x11,0xFC,0xF5,0xE9,0x56,0xB2,0x7D,0xBF,0xC7,0x6D,0x0A,0x93,0x8C,0xA5,
+0xD0,0xC0,0xB6,0x1D,0xBE,0x3A,0x4E,0x94,0xA2,0xD7,0x6E,0x6C,0x0B,0xC2,0x8A,0x7C,
+0xFA,0x20,0xF3,0xC4,0xE4,0xE5,0xCD,0x0D,0xA8,0xCB,0x91,0x92,0xB1,0x7C,0x85,0xEC,
+0xB5,0x14,0x69,0x66,0x0E,0x82,0xE7,0xCD,0xCE,0xC8,0x2D,0xA6,0x51,0x7F,0x21,0xC1,
+0x35,0x53,0x85,0x06,0x4A,0x5D,0x9F,0xAD,0xBB,0x1B,0x5F,0x74,
+};
+
+
+/* subject:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC */
+/* issuer :/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC */
+
+
+const unsigned char UTN_DATACorp_SGC_Root_CA_certificate[1122]={
+0x30,0x82,0x04,0x5E,0x30,0x82,0x03,0x46,0xA0,0x03,0x02,0x01,0x02,0x02,0x10,0x44,
+0xBE,0x0C,0x8B,0x50,0x00,0x21,0xB4,0x11,0xD3,0x2A,0x68,0x06,0xA9,0xAD,0x69,0x30,
+0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30,0x81,
+0x93,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x55,0x53,0x31,0x0B,
+0x30,0x09,0x06,0x03,0x55,0x04,0x08,0x13,0x02,0x55,0x54,0x31,0x17,0x30,0x15,0x06,
+0x03,0x55,0x04,0x07,0x13,0x0E,0x53,0x61,0x6C,0x74,0x20,0x4C,0x61,0x6B,0x65,0x20,
+0x43,0x69,0x74,0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,0x55,0x04,0x0A,0x13,0x15,0x54,
+0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55,0x53,0x54,0x20,0x4E,0x65,0x74,
+0x77,0x6F,0x72,0x6B,0x31,0x21,0x30,0x1F,0x06,0x03,0x55,0x04,0x0B,0x13,0x18,0x68,
+0x74,0x74,0x70,0x3A,0x2F,0x2F,0x77,0x77,0x77,0x2E,0x75,0x73,0x65,0x72,0x74,0x72,
+0x75,0x73,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x1B,0x30,0x19,0x06,0x03,0x55,0x04,0x03,
+0x13,0x12,0x55,0x54,0x4E,0x20,0x2D,0x20,0x44,0x41,0x54,0x41,0x43,0x6F,0x72,0x70,
+0x20,0x53,0x47,0x43,0x30,0x1E,0x17,0x0D,0x39,0x39,0x30,0x36,0x32,0x34,0x31,0x38,
+0x35,0x37,0x32,0x31,0x5A,0x17,0x0D,0x31,0x39,0x30,0x36,0x32,0x34,0x31,0x39,0x30,
+0x36,0x33,0x30,0x5A,0x30,0x81,0x93,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,
+0x13,0x02,0x55,0x53,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x08,0x13,0x02,0x55,
+0x54,0x31,0x17,0x30,0x15,0x06,0x03,0x55,0x04,0x07,0x13,0x0E,0x53,0x61,0x6C,0x74,
+0x20,0x4C,0x61,0x6B,0x65,0x20,0x43,0x69,0x74,0x79,0x31,0x1E,0x30,0x1C,0x06,0x03,
+0x55,0x04,0x0A,0x13,0x15,0x54,0x68,0x65,0x20,0x55,0x53,0x45,0x52,0x54,0x52,0x55,
+0x53,0x54,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x21,0x30,0x1F,0x06,0x03,
+0x55,0x04,0x0B,0x13,0x18,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x77,0x77,0x77,0x2E,
+0x75,0x73,0x65,0x72,0x74,0x72,0x75,0x73,0x74,0x2E,0x63,0x6F,0x6D,0x31,0x1B,0x30,
+0x19,0x06,0x03,0x55,0x04,0x03,0x13,0x12,0x55,0x54,0x4E,0x20,0x2D,0x20,0x44,0x41,
+0x54,0x41,0x43,0x6F,0x72,0x70,0x20,0x53,0x47,0x43,0x30,0x82,0x01,0x22,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x01,0x05,0x00,0x03,0x82,0x01,
+0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01,0x01,0x00,0xDF,0xEE,0x58,0x10,0xA2,
+0x2B,0x6E,0x55,0xC4,0x8E,0xBF,0x2E,0x46,0x09,0xE7,0xE0,0x08,0x0F,0x2E,0x2B,0x7A,
+0x13,0x94,0x1B,0xBD,0xF6,0xB6,0x80,0x8E,0x65,0x05,0x93,0x00,0x1E,0xBC,0xAF,0xE2,
+0x0F,0x8E,0x19,0x0D,0x12,0x47,0xEC,0xAC,0xAD,0xA3,0xFA,0x2E,0x70,0xF8,0xDE,0x6E,
+0xFB,0x56,0x42,0x15,0x9E,0x2E,0x5C,0xEF,0x23,0xDE,0x21,0xB9,0x05,0x76,0x27,0x19,
+0x0F,0x4F,0xD6,0xC3,0x9C,0xB4,0xBE,0x94,0x19,0x63,0xF2,0xA6,0x11,0x0A,0xEB,0x53,
+0x48,0x9C,0xBE,0xF2,0x29,0x3B,0x16,0xE8,0x1A,0xA0,0x4C,0xA6,0xC9,0xF4,0x18,0x59,
+0x68,0xC0,0x70,0xF2,0x53,0x00,0xC0,0x5E,0x50,0x82,0xA5,0x56,0x6F,0x36,0xF9,0x4A,
+0xE0,0x44,0x86,0xA0,0x4D,0x4E,0xD6,0x47,0x6E,0x49,0x4A,0xCB,0x67,0xD7,0xA6,0xC4,
+0x05,0xB9,0x8E,0x1E,0xF4,0xFC,0xFF,0xCD,0xE7,0x36,0xE0,0x9C,0x05,0x6C,0xB2,0x33,
+0x22,0x15,0xD0,0xB4,0xE0,0xCC,0x17,0xC0,0xB2,0xC0,0xF4,0xFE,0x32,0x3F,0x29,0x2A,
+0x95,0x7B,0xD8,0xF2,0xA7,0x4E,0x0F,0x54,0x7C,0xA1,0x0D,0x80,0xB3,0x09,0x03,0xC1,
+0xFF,0x5C,0xDD,0x5E,0x9A,0x3E,0xBC,0xAE,0xBC,0x47,0x8A,0x6A,0xAE,0x71,0xCA,0x1F,
+0xB1,0x2A,0xB8,0x5F,0x42,0x05,0x0B,0xEC,0x46,0x30,0xD1,0x72,0x0B,0xCA,0xE9,0x56,
+0x6D,0xF5,0xEF,0xDF,0x78,0xBE,0x61,0xBA,0xB2,0xA5,0xAE,0x04,0x4C,0xBC,0xA8,0xAC,
+0x69,0x15,0x97,0xBD,0xEF,0xEB,0xB4,0x8C,0xBF,0x35,0xF8,0xD4,0xC3,0xD1,0x28,0x0E,
+0x5C,0x3A,0x9F,0x70,0x18,0x33,0x20,0x77,0xC4,0xA2,0xAF,0x02,0x03,0x01,0x00,0x01,
+0xA3,0x81,0xAB,0x30,0x81,0xA8,0x30,0x0B,0x06,0x03,0x55,0x1D,0x0F,0x04,0x04,0x03,
+0x02,0x01,0xC6,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,
+0x03,0x01,0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x53,
+0x32,0xD1,0xB3,0xCF,0x7F,0xFA,0xE0,0xF1,0xA0,0x5D,0x85,0x4E,0x92,0xD2,0x9E,0x45,
+0x1D,0xB4,0x4F,0x30,0x3D,0x06,0x03,0x55,0x1D,0x1F,0x04,0x36,0x30,0x34,0x30,0x32,
+0xA0,0x30,0xA0,0x2E,0x86,0x2C,0x68,0x74,0x74,0x70,0x3A,0x2F,0x2F,0x63,0x72,0x6C,
+0x2E,0x75,0x73,0x65,0x72,0x74,0x72,0x75,0x73,0x74,0x2E,0x63,0x6F,0x6D,0x2F,0x55,
+0x54,0x4E,0x2D,0x44,0x41,0x54,0x41,0x43,0x6F,0x72,0x70,0x53,0x47,0x43,0x2E,0x63,
+0x72,0x6C,0x30,0x2A,0x06,0x03,0x55,0x1D,0x25,0x04,0x23,0x30,0x21,0x06,0x08,0x2B,
+0x06,0x01,0x05,0x05,0x07,0x03,0x01,0x06,0x0A,0x2B,0x06,0x01,0x04,0x01,0x82,0x37,
+0x0A,0x03,0x03,0x06,0x09,0x60,0x86,0x48,0x01,0x86,0xF8,0x42,0x04,0x01,0x30,0x0D,
+0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,
+0x01,0x00,0x27,0x35,0x97,0x00,0x8A,0x8B,0x28,0xBD,0xC6,0x33,0x30,0x1E,0x29,0xFC,
+0xE2,0xF7,0xD5,0x98,0xD4,0x40,0xBB,0x60,0xCA,0xBF,0xAB,0x17,0x2C,0x09,0x36,0x7F,
+0x50,0xFA,0x41,0xDC,0xAE,0x96,0x3A,0x0A,0x23,0x3E,0x89,0x59,0xC9,0xA3,0x07,0xED,
+0x1B,0x37,0xAD,0xFC,0x7C,0xBE,0x51,0x49,0x5A,0xDE,0x3A,0x0A,0x54,0x08,0x16,0x45,
+0xC2,0x99,0xB1,0x87,0xCD,0x8C,0x68,0xE0,0x69,0x03,0xE9,0xC4,0x4E,0x98,0xB2,0x3B,
+0x8C,0x16,0xB3,0x0E,0xA0,0x0C,0x98,0x50,0x9B,0x93,0xA9,0x70,0x09,0xC8,0x2C,0xA3,
+0x8F,0xDF,0x02,0xE4,0xE0,0x71,0x3A,0xF1,0xB4,0x23,0x72,0xA0,0xAA,0x01,0xDF,0xDF,
+0x98,0x3E,0x14,0x50,0xA0,0x31,0x26,0xBD,0x28,0xE9,0x5A,0x30,0x26,0x75,0xF9,0x7B,
+0x60,0x1C,0x8D,0xF3,0xCD,0x50,0x26,0x6D,0x04,0x27,0x9A,0xDF,0xD5,0x0D,0x45,0x47,
+0x29,0x6B,0x2C,0xE6,0x76,0xD9,0xA9,0x29,0x7D,0x32,0xDD,0xC9,0x36,0x3C,0xBD,0xAE,
+0x35,0xF1,0x11,0x9E,0x1D,0xBB,0x90,0x3F,0x12,0x47,0x4E,0x8E,0xD7,0x7E,0x0F,0x62,
+0x73,0x1D,0x52,0x26,0x38,0x1C,0x18,0x49,0xFD,0x30,0x74,0x9A,0xC4,0xE5,0x22,0x2F,
+0xD8,0xC0,0x8D,0xED,0x91,0x7A,0x4C,0x00,0x8F,0x72,0x7F,0x5D,0xDA,0xDD,0x1B,0x8B,
+0x45,0x6B,0xE7,0xDD,0x69,0x97,0xA8,0xC5,0x56,0x4C,0x0F,0x0C,0xF6,0x9F,0x7A,0x91,
+0x37,0xF6,0x97,0x82,0xE0,0xDD,0x71,0x69,0xFF,0x76,0x3F,0x60,0x4D,0x3C,0xCF,0xF7,
+0x99,0xF9,0xC6,0x57,0xF4,0xC9,0x55,0x39,0x78,0xBA,0x2C,0x79,0xC9,0xA6,0x88,0x2B,
+0xF4,0x08,
+};
+
+
+const unsigned char* const kSSLCertCertificateList[] = {
+ GlobalSign_Root_CA_certificate,
+ USERTrust_RSA_Certification_Authority_certificate,
+ Starfield_Class_2_CA_certificate,
+ Verisign_Class_3_Public_Primary_Certification_Authority___G3_certificate,
+ USERTrust_ECC_Certification_Authority_certificate,
+ GeoTrust_Global_CA_certificate,
+ Starfield_Root_Certificate_Authority___G2_certificate,
+ DigiCert_Global_Root_G3_certificate,
+ thawte_Primary_Root_CA___G2_certificate,
+ VeriSign_Universal_Root_Certification_Authority_certificate,
+ VeriSign_Class_3_Public_Primary_Certification_Authority___G4_certificate,
+ DigiCert_Global_Root_G2_certificate,
+ AddTrust_Low_Value_Services_Root_certificate,
+ AffirmTrust_Premium_ECC_certificate,
+ Verisign_Class_4_Public_Primary_Certification_Authority___G3_certificate,
+ thawte_Primary_Root_CA_certificate,
+ AddTrust_Public_Services_Root_certificate,
+ AddTrust_Qualified_Certificates_Root_certificate,
+ GeoTrust_Primary_Certification_Authority___G3_certificate,
+ GeoTrust_Universal_CA_2_certificate,
+ Baltimore_CyberTrust_Root_certificate,
+ GlobalSign_Root_CA___R2_certificate,
+ GlobalSign_Root_CA___R3_certificate,
+ AffirmTrust_Networking_certificate,
+ AddTrust_External_Root_certificate,
+ thawte_Primary_Root_CA___G3_certificate,
+ DigiCert_Assured_ID_Root_CA_certificate,
+ Go_Daddy_Class_2_CA_certificate,
+ GeoTrust_Primary_Certification_Authority_certificate,
+ VeriSign_Class_3_Public_Primary_Certification_Authority___G5_certificate,
+ Equifax_Secure_CA_certificate,
+ Entrust_net_Premium_2048_Secure_Server_CA_certificate,
+ DigiCert_Assured_ID_Root_G3_certificate,
+ COMODO_Certification_Authority_certificate,
+ DigiCert_Global_Root_CA_certificate,
+ Comodo_AAA_Services_root_certificate,
+ DigiCert_High_Assurance_EV_Root_CA_certificate,
+ GeoTrust_Universal_CA_certificate,
+ COMODO_ECC_Certification_Authority_certificate,
+ Entrust_Root_Certification_Authority___G2_certificate,
+ DigiCert_Assured_ID_Root_G2_certificate,
+ AffirmTrust_Commercial_certificate,
+ AffirmTrust_Premium_certificate,
+ Go_Daddy_Root_Certificate_Authority___G2_certificate,
+ Comodo_Secure_Services_root_certificate,
+ DigiCert_Trusted_Root_G4_certificate,
+ GlobalSign_ECC_Root_CA___R5_certificate,
+ UTN_USERFirst_Hardware_Root_CA_certificate,
+ GlobalSign_ECC_Root_CA___R4_certificate,
+ TC_TrustCenter_Universal_CA_I_certificate,
+ Comodo_Trusted_Services_root_certificate,
+ Entrust_Root_Certification_Authority_certificate,
+ TC_TrustCenter_Class_2_CA_II_certificate,
+ Cybertrust_Global_Root_certificate,
+ Entrust_Root_Certification_Authority___EC1_certificate,
+ GeoTrust_Primary_Certification_Authority___G2_certificate,
+ GeoTrust_Global_CA_2_certificate,
+ COMODO_RSA_Certification_Authority_certificate,
+ UTN_DATACorp_SGC_Root_CA_certificate,
+};
+
+const size_t kSSLCertCertificateSizeList[] = {
+  889,
+  1506,
+  1043,
+  1054,
+  659,
+  856,
+  993,
+  579,
+  652,
+  1213,
+  904,
+  914,
+  1052,
+  514,
+  1054,
+  1060,
+  1049,
+  1058,
+  1026,
+  1392,
+  891,
+  958,
+  867,
+  848,
+  1082,
+  1070,
+  955,
+  1028,
+  896,
+  1239,
+  804,
+  1120,
+  586,
+  1057,
+  947,
+  1078,
+  969,
+  1388,
+  653,
+  1090,
+  922,
+  848,
+  1354,
+  969,
+  1091,
+  1428,
+  546,
+  1144,
+  485,
+  993,
+  1095,
+  1173,
+  1198,
+  933,
+  765,
+  690,
+  874,
+  1500,
+  1122,
+};
+
+#endif  // RTC_BASE_SSLROOTS_H_
diff --git a/rtc_base/sslstreamadapter.cc b/rtc_base/sslstreamadapter.cc
new file mode 100644
index 0000000..b09c144
--- /dev/null
+++ b/rtc_base/sslstreamadapter.cc
@@ -0,0 +1,165 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/sslstreamadapter.h"
+
+#include "rtc_base/opensslstreamadapter.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+namespace rtc {
+
+// TODO(guoweis): Move this to SDP layer and use int form internally.
+// webrtc:5043.
+const char CS_AES_CM_128_HMAC_SHA1_80[] = "AES_CM_128_HMAC_SHA1_80";
+const char CS_AES_CM_128_HMAC_SHA1_32[] = "AES_CM_128_HMAC_SHA1_32";
+const char CS_AEAD_AES_128_GCM[] = "AEAD_AES_128_GCM";
+const char CS_AEAD_AES_256_GCM[] = "AEAD_AES_256_GCM";
+
+std::string SrtpCryptoSuiteToName(int crypto_suite) {
+  switch (crypto_suite) {
+  case SRTP_AES128_CM_SHA1_32:
+    return CS_AES_CM_128_HMAC_SHA1_32;
+  case SRTP_AES128_CM_SHA1_80:
+    return CS_AES_CM_128_HMAC_SHA1_80;
+  case SRTP_AEAD_AES_128_GCM:
+    return CS_AEAD_AES_128_GCM;
+  case SRTP_AEAD_AES_256_GCM:
+    return CS_AEAD_AES_256_GCM;
+  default:
+    return std::string();
+  }
+}
+
+int SrtpCryptoSuiteFromName(const std::string& crypto_suite) {
+  if (crypto_suite == CS_AES_CM_128_HMAC_SHA1_32)
+    return SRTP_AES128_CM_SHA1_32;
+  if (crypto_suite == CS_AES_CM_128_HMAC_SHA1_80)
+    return SRTP_AES128_CM_SHA1_80;
+  if (crypto_suite == CS_AEAD_AES_128_GCM)
+    return SRTP_AEAD_AES_128_GCM;
+  if (crypto_suite == CS_AEAD_AES_256_GCM)
+    return SRTP_AEAD_AES_256_GCM;
+  return SRTP_INVALID_CRYPTO_SUITE;
+}
+
+bool GetSrtpKeyAndSaltLengths(int crypto_suite, int *key_length,
+    int *salt_length) {
+  switch (crypto_suite) {
+  case SRTP_AES128_CM_SHA1_32:
+  case SRTP_AES128_CM_SHA1_80:
+    // SRTP_AES128_CM_HMAC_SHA1_32 and SRTP_AES128_CM_HMAC_SHA1_80 are defined
+    // in RFC 5764 to use a 128 bits key and 112 bits salt for the cipher.
+    *key_length = 16;
+    *salt_length = 14;
+    break;
+  case SRTP_AEAD_AES_128_GCM:
+    // SRTP_AEAD_AES_128_GCM is defined in RFC 7714 to use a 128 bits key and
+    // a 96 bits salt for the cipher.
+    *key_length = 16;
+    *salt_length = 12;
+    break;
+  case SRTP_AEAD_AES_256_GCM:
+    // SRTP_AEAD_AES_256_GCM is defined in RFC 7714 to use a 256 bits key and
+    // a 96 bits salt for the cipher.
+    *key_length = 32;
+    *salt_length = 12;
+    break;
+  default:
+    return false;
+  }
+  return true;
+}
+
+bool IsGcmCryptoSuite(int crypto_suite) {
+  return (crypto_suite == SRTP_AEAD_AES_256_GCM ||
+          crypto_suite == SRTP_AEAD_AES_128_GCM);
+}
+
+bool IsGcmCryptoSuiteName(const std::string& crypto_suite) {
+  return (crypto_suite == CS_AEAD_AES_256_GCM ||
+          crypto_suite == CS_AEAD_AES_128_GCM);
+}
+
+// static
+CryptoOptions CryptoOptions::NoGcm() {
+  CryptoOptions options;
+  options.enable_gcm_crypto_suites = false;
+  return options;
+}
+
+std::vector<int> GetSupportedDtlsSrtpCryptoSuites(
+    const rtc::CryptoOptions& crypto_options) {
+  std::vector<int> crypto_suites;
+  if (crypto_options.enable_gcm_crypto_suites) {
+    crypto_suites.push_back(rtc::SRTP_AEAD_AES_256_GCM);
+    crypto_suites.push_back(rtc::SRTP_AEAD_AES_128_GCM);
+  }
+  // Note: SRTP_AES128_CM_SHA1_80 is what is required to be supported (by
+  // draft-ietf-rtcweb-security-arch), but SRTP_AES128_CM_SHA1_32 is allowed as
+  // well, and saves a few bytes per packet if it ends up selected.
+  crypto_suites.push_back(rtc::SRTP_AES128_CM_SHA1_32);
+  crypto_suites.push_back(rtc::SRTP_AES128_CM_SHA1_80);
+  return crypto_suites;
+}
+
+SSLStreamAdapter* SSLStreamAdapter::Create(StreamInterface* stream) {
+  return new OpenSSLStreamAdapter(stream);
+}
+
+SSLStreamAdapter::SSLStreamAdapter(StreamInterface* stream)
+    : StreamAdapterInterface(stream),
+      ignore_bad_cert_(false),
+      client_auth_enabled_(true) {}
+
+SSLStreamAdapter::~SSLStreamAdapter() {}
+
+bool SSLStreamAdapter::GetSslCipherSuite(int* cipher_suite) {
+  return false;
+}
+
+bool SSLStreamAdapter::ExportKeyingMaterial(const std::string& label,
+                                            const uint8_t* context,
+                                            size_t context_len,
+                                            bool use_context,
+                                            uint8_t* result,
+                                            size_t result_len) {
+  return false;  // Default is unsupported
+}
+
+bool SSLStreamAdapter::SetDtlsSrtpCryptoSuites(
+    const std::vector<int>& crypto_suites) {
+  return false;
+}
+
+bool SSLStreamAdapter::GetDtlsSrtpCryptoSuite(int* crypto_suite) {
+  return false;
+}
+
+bool SSLStreamAdapter::IsBoringSsl() {
+  return OpenSSLStreamAdapter::IsBoringSsl();
+}
+bool SSLStreamAdapter::IsAcceptableCipher(int cipher, KeyType key_type) {
+  return OpenSSLStreamAdapter::IsAcceptableCipher(cipher, key_type);
+}
+bool SSLStreamAdapter::IsAcceptableCipher(const std::string& cipher,
+                                          KeyType key_type) {
+  return OpenSSLStreamAdapter::IsAcceptableCipher(cipher, key_type);
+}
+std::string SSLStreamAdapter::SslCipherSuiteToName(int cipher_suite) {
+  return OpenSSLStreamAdapter::SslCipherSuiteToName(cipher_suite);
+}
+void SSLStreamAdapter::enable_time_callback_for_testing() {
+  OpenSSLStreamAdapter::enable_time_callback_for_testing();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
diff --git a/rtc_base/sslstreamadapter.h b/rtc_base/sslstreamadapter.h
new file mode 100644
index 0000000..c04fb34
--- /dev/null
+++ b/rtc_base/sslstreamadapter.h
@@ -0,0 +1,278 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SSLSTREAMADAPTER_H_
+#define RTC_BASE_SSLSTREAMADAPTER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "rtc_base/sslidentity.h"
+#include "rtc_base/stream.h"
+
+namespace rtc {
+
+// Constants for SSL profile.
+const int TLS_NULL_WITH_NULL_NULL = 0;
+
+// Constants for SRTP profiles.
+const int SRTP_INVALID_CRYPTO_SUITE = 0;
+#ifndef SRTP_AES128_CM_SHA1_80
+const int SRTP_AES128_CM_SHA1_80 = 0x0001;
+#endif
+#ifndef SRTP_AES128_CM_SHA1_32
+const int SRTP_AES128_CM_SHA1_32 = 0x0002;
+#endif
+#ifndef SRTP_AEAD_AES_128_GCM
+const int SRTP_AEAD_AES_128_GCM = 0x0007;
+#endif
+#ifndef SRTP_AEAD_AES_256_GCM
+const int SRTP_AEAD_AES_256_GCM = 0x0008;
+#endif
+
+// Names of SRTP profiles listed above.
+// 128-bit AES with 80-bit SHA-1 HMAC.
+extern const char CS_AES_CM_128_HMAC_SHA1_80[];
+// 128-bit AES with 32-bit SHA-1 HMAC.
+extern const char CS_AES_CM_128_HMAC_SHA1_32[];
+// 128-bit AES GCM with 16 byte AEAD auth tag.
+extern const char CS_AEAD_AES_128_GCM[];
+// 256-bit AES GCM with 16 byte AEAD auth tag.
+extern const char CS_AEAD_AES_256_GCM[];
+
+// Given the DTLS-SRTP protection profile ID, as defined in
+// https://tools.ietf.org/html/rfc4568#section-6.2 , return the SRTP profile
+// name, as defined in https://tools.ietf.org/html/rfc5764#section-4.1.2.
+std::string SrtpCryptoSuiteToName(int crypto_suite);
+
+// The reverse of above conversion.
+int SrtpCryptoSuiteFromName(const std::string& crypto_suite);
+
+// Get key length and salt length for given crypto suite. Returns true for
+// valid suites, otherwise false.
+bool GetSrtpKeyAndSaltLengths(int crypto_suite,
+                              int* key_length,
+                              int* salt_length);
+
+// Returns true if the given crypto suite id uses a GCM cipher.
+bool IsGcmCryptoSuite(int crypto_suite);
+
+// Returns true if the given crypto suite name uses a GCM cipher.
+bool IsGcmCryptoSuiteName(const std::string& crypto_suite);
+
+struct CryptoOptions {
+  CryptoOptions() {}
+
+  // Helper method to return an instance of the CryptoOptions with GCM crypto
+  // suites disabled. This method should be used instead of depending on current
+  // default values set by the constructor.
+  static CryptoOptions NoGcm();
+
+  // Enable GCM crypto suites from RFC 7714 for SRTP. GCM will only be used
+  // if both sides enable it.
+  bool enable_gcm_crypto_suites = false;
+
+  // If set to true, encrypted RTP header extensions as defined in RFC 6904
+  // will be negotiated. They will only be used if both peers support them.
+  bool enable_encrypted_rtp_header_extensions = false;
+};
+
+// Returns supported crypto suites, given |crypto_options|.
+// CS_AES_CM_128_HMAC_SHA1_32 will be preferred by default.
+std::vector<int> GetSupportedDtlsSrtpCryptoSuites(
+    const rtc::CryptoOptions& crypto_options);
+
+// SSLStreamAdapter : A StreamInterfaceAdapter that does SSL/TLS.
+// After SSL has been started, the stream will only open on successful
+// SSL verification of certificates, and the communication is
+// encrypted of course.
+//
+// This class was written with SSLAdapter as a starting point. It
+// offers a similar interface, with two differences: there is no
+// support for a restartable SSL connection, and this class has a
+// peer-to-peer mode.
+//
+// The SSL library requires initialization and cleanup. Static method
+// for doing this are in SSLAdapter. They should possibly be moved out
+// to a neutral class.
+
+enum SSLRole { SSL_CLIENT, SSL_SERVER };
+enum SSLMode { SSL_MODE_TLS, SSL_MODE_DTLS };
+enum SSLProtocolVersion {
+  SSL_PROTOCOL_TLS_10,
+  SSL_PROTOCOL_TLS_11,
+  SSL_PROTOCOL_TLS_12,
+  SSL_PROTOCOL_DTLS_10 = SSL_PROTOCOL_TLS_11,
+  SSL_PROTOCOL_DTLS_12 = SSL_PROTOCOL_TLS_12,
+};
+enum class SSLPeerCertificateDigestError {
+  NONE,
+  UNKNOWN_ALGORITHM,
+  INVALID_LENGTH,
+  VERIFICATION_FAILED,
+};
+
+// Errors for Read -- in the high range so no conflict with OpenSSL.
+enum { SSE_MSG_TRUNC = 0xff0001 };
+
+// Used to send back UMA histogram value. Logged when Dtls handshake fails.
+enum class SSLHandshakeError { UNKNOWN, INCOMPATIBLE_CIPHERSUITE, MAX_VALUE };
+
+class SSLStreamAdapter : public StreamAdapterInterface {
+ public:
+  // Instantiate an SSLStreamAdapter wrapping the given stream,
+  // (using the selected implementation for the platform).
+  // Caller is responsible for freeing the returned object.
+  static SSLStreamAdapter* Create(StreamInterface* stream);
+
+  explicit SSLStreamAdapter(StreamInterface* stream);
+  ~SSLStreamAdapter() override;
+
+  void set_ignore_bad_cert(bool ignore) { ignore_bad_cert_ = ignore; }
+  bool ignore_bad_cert() const { return ignore_bad_cert_; }
+
+  void set_client_auth_enabled(bool enabled) { client_auth_enabled_ = enabled; }
+  bool client_auth_enabled() const { return client_auth_enabled_; }
+
+  // Specify our SSL identity: key and certificate. SSLStream takes ownership
+  // of the SSLIdentity object and will free it when appropriate. Should be
+  // called no more than once on a given SSLStream instance.
+  virtual void SetIdentity(SSLIdentity* identity) = 0;
+
+  // Call this to indicate that we are to play the server role (or client role,
+  // if the default argument is replaced by SSL_CLIENT).
+  // The default argument is for backward compatibility.
+  // TODO(ekr@rtfm.com): rename this SetRole to reflect its new function
+  virtual void SetServerRole(SSLRole role = SSL_SERVER) = 0;
+
+  // Do DTLS or TLS.
+  virtual void SetMode(SSLMode mode) = 0;
+
+  // Set maximum supported protocol version. The highest version supported by
+  // both ends will be used for the connection, i.e. if one party supports
+  // DTLS 1.0 and the other DTLS 1.2, DTLS 1.0 will be used.
+  // If requested version is not supported by underlying crypto library, the
+  // next lower will be used.
+  virtual void SetMaxProtocolVersion(SSLProtocolVersion version) = 0;
+
+  // Set the initial retransmission timeout for DTLS messages. When the timeout
+  // expires, the message gets retransmitted and the timeout is exponentially
+  // increased.
+  // This should only be called before StartSSL().
+  virtual void SetInitialRetransmissionTimeout(int timeout_ms) = 0;
+
+  // StartSSL starts negotiation with a peer, whose certificate is verified
+  // using the certificate digest. Generally, SetIdentity() and possibly
+  // SetServerRole() should have been called before this.
+  // SetPeerCertificateDigest() must also be called. It may be called after
+  // StartSSLWithPeer() but must be called before the underlying stream opens.
+  //
+  // Use of the stream prior to calling StartSSL will pass data in clear text.
+  // Calling StartSSL causes SSL negotiation to begin as soon as possible: right
+  // away if the underlying wrapped stream is already opened, or else as soon as
+  // it opens.
+  //
+  // StartSSL returns a negative error code on failure. Returning 0 means
+  // success so far, but negotiation is probably not complete and will continue
+  // asynchronously. In that case, the exposed stream will open after
+  // successful negotiation and verification, or an SE_CLOSE event will be
+  // raised if negotiation fails.
+  virtual int StartSSL() = 0;
+
+  // Specify the digest of the certificate that our peer is expected to use.
+  // Only this certificate will be accepted during SSL verification. The
+  // certificate is assumed to have been obtained through some other secure
+  // channel (such as the signaling channel). This must specify the terminal
+  // certificate, not just a CA. SSLStream makes a copy of the digest value.
+  //
+  // Returns true if successful.
+  // |error| is optional and provides more information about the failure.
+  virtual bool SetPeerCertificateDigest(
+      const std::string& digest_alg,
+      const unsigned char* digest_val,
+      size_t digest_len,
+      SSLPeerCertificateDigestError* error = nullptr) = 0;
+
+  // Retrieves the peer's certificate chain including leaf certificate, if a
+  // connection has been established.
+  virtual std::unique_ptr<SSLCertChain> GetPeerSSLCertChain() const = 0;
+
+  // Retrieves the IANA registration id of the cipher suite used for the
+  // connection (e.g. 0x2F for "TLS_RSA_WITH_AES_128_CBC_SHA").
+  virtual bool GetSslCipherSuite(int* cipher_suite);
+
+  virtual int GetSslVersion() const = 0;
+
+  // Key Exporter interface from RFC 5705
+  // Arguments are:
+  // label               -- the exporter label.
+  //                        part of the RFC defining each exporter
+  //                        usage (IN)
+  // context/context_len -- a context to bind to for this connection;
+  //                        optional, can be null, 0 (IN)
+  // use_context         -- whether to use the context value
+  //                        (needed to distinguish no context from
+  //                        zero-length ones).
+  // result              -- where to put the computed value
+  // result_len          -- the length of the computed value
+  virtual bool ExportKeyingMaterial(const std::string& label,
+                                    const uint8_t* context,
+                                    size_t context_len,
+                                    bool use_context,
+                                    uint8_t* result,
+                                    size_t result_len);
+
+  // DTLS-SRTP interface
+  virtual bool SetDtlsSrtpCryptoSuites(const std::vector<int>& crypto_suites);
+  virtual bool GetDtlsSrtpCryptoSuite(int* crypto_suite);
+
+  // Returns true if a TLS connection has been established.
+  // The only difference between this and "GetState() == SE_OPEN" is that if
+  // the peer certificate digest hasn't been verified, the state will still be
+  // SS_OPENING but IsTlsConnected should return true.
+  virtual bool IsTlsConnected() = 0;
+
+  // Capabilities testing.
+  // Used to have "DTLS supported", "DTLS-SRTP supported" etc. methods, but now
+  // that's assumed.
+  static bool IsBoringSsl();
+
+  // Returns true iff the supplied cipher is deemed to be strong.
+  // TODO(torbjorng): Consider removing the KeyType argument.
+  static bool IsAcceptableCipher(int cipher, KeyType key_type);
+  static bool IsAcceptableCipher(const std::string& cipher, KeyType key_type);
+
+  // TODO(guoweis): Move this away from a static class method. Currently this is
+  // introduced such that any caller could depend on sslstreamadapter.h without
+  // depending on specific SSL implementation.
+  static std::string SslCipherSuiteToName(int cipher_suite);
+
+  // Use our timeutils.h source of timing in BoringSSL, allowing us to test
+  // using a fake clock.
+  static void enable_time_callback_for_testing();
+
+  sigslot::signal1<SSLHandshakeError> SignalSSLHandshakeError;
+
+ private:
+  // If true, the server certificate need not match the configured
+  // server_name, and in fact missing certificate authority and other
+  // verification errors are ignored.
+  bool ignore_bad_cert_;
+
+  // If true (default), the client is required to provide a certificate during
+  // handshake. If no certificate is given, handshake fails. This applies to
+  // server mode only.
+  bool client_auth_enabled_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_SSLSTREAMADAPTER_H_
diff --git a/rtc_base/sslstreamadapter_unittest.cc b/rtc_base/sslstreamadapter_unittest.cc
new file mode 100644
index 0000000..ce96274
--- /dev/null
+++ b/rtc_base/sslstreamadapter_unittest.cc
@@ -0,0 +1,1496 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+
+#include "rtc_base/bufferqueue.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/helpers.h"
+#include "rtc_base/ssladapter.h"
+#include "rtc_base/sslidentity.h"
+#include "rtc_base/sslstreamadapter.h"
+#include "rtc_base/stream.h"
+
+using ::testing::WithParamInterface;
+using ::testing::Values;
+using ::testing::Combine;
+using ::testing::tuple;
+
+static const int kBlockSize = 4096;
+static const char kExporterLabel[] = "label";
+static const unsigned char kExporterContext[] = "context";
+static int kExporterContextLen = sizeof(kExporterContext);
+
+static const char kRSA_PRIVATE_KEY_PEM[] =
+    "-----BEGIN RSA PRIVATE KEY-----\n"
+    "MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAMYRkbhmI7kVA/rM\n"
+    "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
+    "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
+    "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAECgYAvgOs4FJcgvp+TuREx7YtiYVsH\n"
+    "mwQPTum2z/8VzWGwR8BBHBvIpVe1MbD/Y4seyI2aco/7UaisatSgJhsU46/9Y4fq\n"
+    "2TwXH9QANf4at4d9n/R6rzwpAJOpgwZgKvdQjkfrKTtgLV+/dawvpxUYkRH4JZM1\n"
+    "CVGukMfKNrSVH4Ap4QJBAOJmGV1ASPnB4r4nc99at7JuIJmd7fmuVUwUgYi4XgaR\n"
+    "WhScBsgYwZ/JoywdyZJgnbcrTDuVcWG56B3vXbhdpMsCQQDf9zeJrjnPZ3Cqm79y\n"
+    "kdqANep0uwZciiNiWxsQrCHztywOvbFhdp8iYVFG9EK8DMY41Y5TxUwsHD+67zao\n"
+    "ZNqJAkEA1suLUP/GvL8IwuRneQd2tWDqqRQ/Td3qq03hP7e77XtF/buya3Ghclo5\n"
+    "54czUR89QyVfJEC6278nzA7n2h1uVQJAcG6mztNL6ja/dKZjYZye2CY44QjSlLo0\n"
+    "MTgTSjdfg/28fFn2Jjtqf9Pi/X+50LWI/RcYMC2no606wRk9kyOuIQJBAK6VSAim\n"
+    "1pOEjsYQn0X5KEIrz1G3bfCbB848Ime3U2/FWlCHMr6ch8kCZ5d1WUeJD3LbwMNG\n"
+    "UCXiYxSsu20QNVw=\n"
+    "-----END RSA PRIVATE KEY-----\n";
+
+static const char kCERT_PEM[] =
+    "-----BEGIN CERTIFICATE-----\n"
+    "MIIBmTCCAQKgAwIBAgIEbzBSAjANBgkqhkiG9w0BAQsFADARMQ8wDQYDVQQDEwZX\n"
+    "ZWJSVEMwHhcNMTQwMTAyMTgyNDQ3WhcNMTQwMjAxMTgyNDQ3WjARMQ8wDQYDVQQD\n"
+    "EwZXZWJSVEMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMYRkbhmI7kVA/rM\n"
+    "czsZ+6JDhDvnkF+vn6yCAGuRPV03zuRqZtDy4N4to7PZu9PjqrRl7nDMXrG3YG9y\n"
+    "rlIAZ72KjcKKFAJxQyAKLCIdawKRyp8RdK3LEySWEZb0AV58IadqPZDTNHHRX8dz\n"
+    "5aTSMsbbkZ+C/OzTnbiMqLL/vg6jAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAUflI\n"
+    "VUe5Krqf5RVa5C3u/UTAOAUJBiDS3VANTCLBxjuMsvqOG0WvaYWP3HYPgrz0jXK2\n"
+    "LJE/mGw3MyFHEqi81jh95J+ypl6xKW6Rm8jKLR87gUvCaVYn/Z4/P3AqcQTB7wOv\n"
+    "UD0A8qfhfDM+LK6rPAnCsVN0NRDY3jvd6rzix9M=\n"
+    "-----END CERTIFICATE-----\n";
+
+static const char kIntCert1[] =
+    "-----BEGIN CERTIFICATE-----\n"
+    "MIIEUjCCAjqgAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBljELMAkGA1UEBhMCVVMx\n"
+    "EzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDAS\n"
+    "BgNVBAoMC0dvb2dsZSwgSW5jMQwwCgYDVQQLDANHVFAxFzAVBgNVBAMMDnRlbGVw\n"
+    "aG9ueS5nb29nMR0wGwYJKoZIhvcNAQkBFg5ndHBAZ29vZ2xlLmNvbTAeFw0xNzA5\n"
+    "MjYwNDA5MDNaFw0yMDA2MjIwNDA5MDNaMGQxCzAJBgNVBAYTAlVTMQswCQYDVQQI\n"
+    "DAJDQTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEXMBUGA1UECgwOdGVsZXBob255\n"
+    "Lmdvb2cxFzAVBgNVBAMMDnRlbGVwaG9ueS5nb29nMIGfMA0GCSqGSIb3DQEBAQUA\n"
+    "A4GNADCBiQKBgQDJXWeeU1v1+wlqkVobzI3aN7Uh2iVQA9YCdq5suuabtiD/qoOD\n"
+    "NKpmQqsx7WZGGWSZTDFEBaUpvIK7Hb+nzRqk6iioPCFOFuarm6GxO1xVneImMuE6\n"
+    "tuWb3YZPr+ikChJbl11y5UcSbg0QsbeUc+jHl5umNvrL85Y+z8SP0rxbBwIDAQAB\n"
+    "o2AwXjAdBgNVHQ4EFgQU7tdZobqlN8R8V72FQnRxmqq8tKswHwYDVR0jBBgwFoAU\n"
+    "5GgKMUtcxkQ2dJrtNR5YOlIAPDswDwYDVR0TAQH/BAUwAwEB/zALBgNVHQ8EBAMC\n"
+    "AQYwDQYJKoZIhvcNAQELBQADggIBADObh9Z+z14FmP9zSenhFtq7hFnmNrSkklk8\n"
+    "eyYWXKfOuIriEQQBZsz76ZcnzStih8Rj+yQ0AXydk4fJ5LOwC2cUqQBar17g6Pd2\n"
+    "8g4SIL4azR9WvtiSvpuGlwp25b+yunaacDne6ebnf/MUiiKT5w61Xo3cEPVfl38e\n"
+    "/Up2l0bioid5enUTmg6LY6RxDO6tnZQkz3XD+nNSwT4ehtkqFpHYWjErj0BbkDM2\n"
+    "hiVc/JsYOZn3DmuOlHVHU6sKwqh3JEyvHO/d7DGzMGWHpHwv2mCTJq6l/sR95Tc2\n"
+    "GaQZgGDVNs9pdEouJCDm9e/PbQWRYhnat82PTkXx/6mDAAwdZlIi/pACzq8K4p7e\n"
+    "6hF0t8uKGnXJubHPXxlnJU6yxZ0yWmivAGjwWK4ur832gKlho4jeMDhiI/T3QPpl\n"
+    "iMNsIvxRhdD+GxJkQP1ezayw8s+Uc9KwKglrkBSRRDLCJUfPOvMmXLUDSTMX7kp4\n"
+    "/Ak1CA8dVLJIlfEjLBUuvAttlP7+7lsKNgxAjCxZkWLXIyGULzNPQwVWkGfCbrQs\n"
+    "XyMvSbFsSIb7blV7eLlmf9a+2RprUUkc2ALXLLCI9YQXmxm2beBfMyNmmebwBJzT\n"
+    "B0OR+5pFFNTJPoNlqpdrDsGrDu7JlUtk0ZLZzYyKXbgy2qXxfd4OWzXXjxpLMszZ\n"
+    "LDIpOAkj\n"
+    "-----END CERTIFICATE-----\n";
+
+static const char kCACert[] =
+    "-----BEGIN CERTIFICATE-----\n"
+    "MIIGETCCA/mgAwIBAgIJAKN9r/BdbGUJMA0GCSqGSIb3DQEBCwUAMIGWMQswCQYD\n"
+    "VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g\n"
+    "VmlldzEUMBIGA1UECgwLR29vZ2xlLCBJbmMxDDAKBgNVBAsMA0dUUDEXMBUGA1UE\n"
+    "AwwOdGVsZXBob255Lmdvb2cxHTAbBgkqhkiG9w0BCQEWDmd0cEBnb29nbGUuY29t\n"
+    "MB4XDTE3MDcyNzIzMDE0NVoXDTE3MDgyNjIzMDE0NVowgZYxCzAJBgNVBAYTAlVT\n"
+    "MRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRQw\n"
+    "EgYDVQQKDAtHb29nbGUsIEluYzEMMAoGA1UECwwDR1RQMRcwFQYDVQQDDA50ZWxl\n"
+    "cGhvbnkuZ29vZzEdMBsGCSqGSIb3DQEJARYOZ3RwQGdvb2dsZS5jb20wggIiMA0G\n"
+    "CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCfvpF7aBV5Hp1EHsWoIlL3GeHwh8dS\n"
+    "lv9VQCegN9rD06Ny7MgcED5AiK2vqXmUmOVS+7NbATkdVYN/eozDhKtN3Q3n87kJ\n"
+    "Nt/TD/TcZZHOZIGsRPbrf2URK26E/5KzTzbzXVBOA1e+gSj+EBbltGqb01ZO5ErF\n"
+    "iPGViPM/HpYKdq6mfz2bS5PhU67XZMM2zvToyReQ/Fjm/6PJhwKSRXSgZF5djPhk\n"
+    "2LfOKMLS0AeZtd2C4DFsCU41lfLUkybioDgFuzTQ3TFi1K8A07KYTMmLY/yQppnf\n"
+    "SpNX58shlVhM+Ed37K1Z0rU0OfVCZ5P+KKaSSfMranjlU7zeUIhZYjqq/EYrEhbS\n"
+    "dLnNHwgJrqxzId3kq8uuLM6+VB7JZKnZLfT90GdAbX4+tutNe21smmogF9f80vEy\n"
+    "gM4tOp9rXrvz9vCwWHXVY9kdKemdLAsREoO6MS9k2ctK4jj80o2dROuFC6Q3e7mz\n"
+    "RjvZr5Tvi464c2o9o/jNlJ0O6q7V2eQzohD+7VnV5QPpRGXxlIeqpR2zoAg+WtRS\n"
+    "4OgHOVYiD3M6uAlggJA5pcDjMfkEZ+pkhtVcT4qMCEoruk6GbyPxS565oSHu16bH\n"
+    "EjeCqbZOVND5T3oA7nz6aQSs8sJabt0jmxUkGVnE+4ZDIuuRtkRma+0P/96Mtqor\n"
+    "OlpNWY1OBDY64QIDAQABo2AwXjAdBgNVHQ4EFgQU5GgKMUtcxkQ2dJrtNR5YOlIA\n"
+    "PDswHwYDVR0jBBgwFoAU5GgKMUtcxkQ2dJrtNR5YOlIAPDswDwYDVR0TAQH/BAUw\n"
+    "AwEB/zALBgNVHQ8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAARQly5/bB6VUL2C\n"
+    "ykDYgWt48go407pAra6tL2kjpdfxV5PdL7iMZRkeht00vj+BVahIqZKrNOa/f5Fx\n"
+    "vlpahZFu0PDN436aQwRZ9qWut2qDOK0/z9Hhj6NWybquRFwMwqkPG/ivLMDU8Dmj\n"
+    "CIplpngPYNwXCs0KzdjSXYxqxJbwMjQXELD+/RcurY0oTtJMM1/2vKQMzw24UJqe\n"
+    "XLJAlsnd2AnWzWNUEviDZY89j9NdkHerBmV2gGzcU+X5lgOO5M8odBv0ZC9D+a6Z\n"
+    "QPZAOfdGVw60hhGvTW5s/s0dHwCpegRidhs0MD0fTmwwjYFBSmUx3Gztr4JTzOOr\n"
+    "7e5daJuak2ujQ5DqcGBvt1gePjSudb5brS7JQtN8tI/FyrnR4q/OuOwv1EvlC5RG\n"
+    "hLX+TXaWqFxB1Hd8ebKRR40mboFG6KcUI3lLBthDvQE7jnq48QfZMjlMQK0ZF1l7\n"
+    "SrlwRXWA74bU8CLJvnZKKo9p4TsTiDYGSYC6tNHKj5s3TGWL46oqGyZ0KdGNhrtC\n"
+    "rIGenMhth1vPYjyy0XuGBndXT85yi+IM2l8g8oU845+plxIhgpSI8bbC0oLwnhQ5\n"
+    "ARfsiYLkXDE7imSS0CSUmye76372mlzAIB1is4bBB/SzpPQtBuB9LDKtONgpSGHn\n"
+    "dGaXBy+qbVXVyGXaeEbIRjtJ6m92\n"
+    "-----END CERTIFICATE-----\n";
+
+class SSLStreamAdapterTestBase;
+
+class SSLDummyStreamBase : public rtc::StreamInterface,
+                           public sigslot::has_slots<> {
+ public:
+  SSLDummyStreamBase(SSLStreamAdapterTestBase* test,
+                     const std::string &side,
+                     rtc::StreamInterface* in,
+                     rtc::StreamInterface* out) :
+      test_base_(test),
+      side_(side),
+      in_(in),
+      out_(out),
+      first_packet_(true) {
+    in_->SignalEvent.connect(this, &SSLDummyStreamBase::OnEventIn);
+    out_->SignalEvent.connect(this, &SSLDummyStreamBase::OnEventOut);
+  }
+
+  rtc::StreamState GetState() const override { return rtc::SS_OPEN; }
+
+  rtc::StreamResult Read(void* buffer, size_t buffer_len,
+                         size_t* read, int* error) override {
+    rtc::StreamResult r;
+
+    r = in_->Read(buffer, buffer_len, read, error);
+    if (r == rtc::SR_BLOCK)
+      return rtc::SR_BLOCK;
+    if (r == rtc::SR_EOS)
+      return rtc::SR_EOS;
+
+    if (r != rtc::SR_SUCCESS) {
+      ADD_FAILURE();
+      return rtc::SR_ERROR;
+    }
+
+    return rtc::SR_SUCCESS;
+  }
+
+  // Catch readability events on in and pass them up.
+  void OnEventIn(rtc::StreamInterface* stream, int sig, int err) {
+    int mask = (rtc::SE_READ | rtc::SE_CLOSE);
+
+    if (sig & mask) {
+      RTC_LOG(LS_VERBOSE) << "SSLDummyStreamBase::OnEvent side=" << side_
+                          << " sig=" << sig << " forwarding upward";
+      PostEvent(sig & mask, 0);
+    }
+  }
+
+  // Catch writeability events on out and pass them up.
+  void OnEventOut(rtc::StreamInterface* stream, int sig, int err) {
+    if (sig & rtc::SE_WRITE) {
+      RTC_LOG(LS_VERBOSE) << "SSLDummyStreamBase::OnEvent side=" << side_
+                          << " sig=" << sig << " forwarding upward";
+
+      PostEvent(sig & rtc::SE_WRITE, 0);
+    }
+  }
+
+  // Write to the outgoing FifoBuffer
+  rtc::StreamResult WriteData(const void* data, size_t data_len,
+                              size_t* written, int* error) {
+    return out_->Write(data, data_len, written, error);
+  }
+
+  rtc::StreamResult Write(const void* data, size_t data_len,
+                          size_t* written, int* error) override;
+
+  void Close() override {
+    RTC_LOG(LS_INFO) << "Closing outbound stream";
+    out_->Close();
+  }
+
+ protected:
+  SSLStreamAdapterTestBase* test_base_;
+  const std::string side_;
+  rtc::StreamInterface* in_;
+  rtc::StreamInterface* out_;
+  bool first_packet_;
+};
+
+class SSLDummyStreamTLS : public SSLDummyStreamBase {
+ public:
+  SSLDummyStreamTLS(SSLStreamAdapterTestBase* test,
+                    const std::string& side,
+                    rtc::FifoBuffer* in,
+                    rtc::FifoBuffer* out) :
+      SSLDummyStreamBase(test, side, in, out) {
+  }
+};
+
+class BufferQueueStream : public rtc::BufferQueue,
+                          public rtc::StreamInterface {
+ public:
+  BufferQueueStream(size_t capacity, size_t default_size)
+      : rtc::BufferQueue(capacity, default_size) {
+  }
+
+  // Implementation of abstract StreamInterface methods.
+
+  // A buffer queue stream is always "open".
+  rtc::StreamState GetState() const override { return rtc::SS_OPEN; }
+
+  // Reading a buffer queue stream will either succeed or block.
+  rtc::StreamResult Read(void* buffer, size_t buffer_len,
+                         size_t* read, int* error) override {
+    if (!ReadFront(buffer, buffer_len, read)) {
+      return rtc::SR_BLOCK;
+    }
+    return rtc::SR_SUCCESS;
+  }
+
+  // Writing to a buffer queue stream will either succeed or block.
+  rtc::StreamResult Write(const void* data, size_t data_len,
+                          size_t* written, int* error) override {
+    if (!WriteBack(data, data_len, written)) {
+      return rtc::SR_BLOCK;
+    }
+    return rtc::SR_SUCCESS;
+  }
+
+  // A buffer queue stream can not be closed.
+  void Close() override {}
+
+ protected:
+  void NotifyReadableForTest() override {
+    PostEvent(rtc::SE_READ, 0);
+  }
+
+  void NotifyWritableForTest() override {
+    PostEvent(rtc::SE_WRITE, 0);
+  }
+};
+
+class SSLDummyStreamDTLS : public SSLDummyStreamBase {
+ public:
+  SSLDummyStreamDTLS(SSLStreamAdapterTestBase* test,
+                     const std::string& side,
+                     BufferQueueStream* in,
+                     BufferQueueStream* out) :
+      SSLDummyStreamBase(test, side, in, out) {
+  }
+};
+
+static const int kFifoBufferSize = 4096;
+static const int kBufferCapacity = 1;
+static const size_t kDefaultBufferSize = 2048;
+
+class SSLStreamAdapterTestBase : public testing::Test,
+                                 public sigslot::has_slots<> {
+ public:
+  SSLStreamAdapterTestBase(
+      const std::string& client_cert_pem,
+      const std::string& client_private_key_pem,
+      bool dtls,
+      rtc::KeyParams client_key_type = rtc::KeyParams(rtc::KT_DEFAULT),
+      rtc::KeyParams server_key_type = rtc::KeyParams(rtc::KT_DEFAULT))
+      : client_cert_pem_(client_cert_pem),
+        client_private_key_pem_(client_private_key_pem),
+        client_key_type_(client_key_type),
+        server_key_type_(server_key_type),
+        client_stream_(nullptr),
+        server_stream_(nullptr),
+        client_identity_(nullptr),
+        server_identity_(nullptr),
+        delay_(0),
+        mtu_(1460),
+        loss_(0),
+        lose_first_packet_(false),
+        damage_(false),
+        dtls_(dtls),
+        handshake_wait_(5000),
+        identities_set_(false) {
+    // Set use of the test RNG to get predictable loss patterns.
+    rtc::SetRandomTestMode(true);
+  }
+
+  ~SSLStreamAdapterTestBase() override {
+    // Put it back for the next test.
+    rtc::SetRandomTestMode(false);
+  }
+
+  void SetUp() override {
+    CreateStreams();
+
+    client_ssl_.reset(rtc::SSLStreamAdapter::Create(client_stream_));
+    server_ssl_.reset(rtc::SSLStreamAdapter::Create(server_stream_));
+
+    // Set up the slots
+    client_ssl_->SignalEvent.connect(this, &SSLStreamAdapterTestBase::OnEvent);
+    server_ssl_->SignalEvent.connect(this, &SSLStreamAdapterTestBase::OnEvent);
+
+    if (!client_cert_pem_.empty() && !client_private_key_pem_.empty()) {
+      client_identity_ = rtc::SSLIdentity::FromPEMStrings(
+          client_private_key_pem_, client_cert_pem_);
+    } else {
+      client_identity_ = rtc::SSLIdentity::Generate("client", client_key_type_);
+    }
+    server_identity_ = rtc::SSLIdentity::Generate("server", server_key_type_);
+
+    client_ssl_->SetIdentity(client_identity_);
+    server_ssl_->SetIdentity(server_identity_);
+  }
+
+  void TearDown() override {
+    client_ssl_.reset(nullptr);
+    server_ssl_.reset(nullptr);
+  }
+
+  virtual void CreateStreams() = 0;
+
+  // Recreate the client/server identities with the specified validity period.
+  // |not_before| and |not_after| are offsets from the current time in number
+  // of seconds.
+  void ResetIdentitiesWithValidity(int not_before, int not_after) {
+    CreateStreams();
+
+    client_ssl_.reset(rtc::SSLStreamAdapter::Create(client_stream_));
+    server_ssl_.reset(rtc::SSLStreamAdapter::Create(server_stream_));
+
+    client_ssl_->SignalEvent.connect(this, &SSLStreamAdapterTestBase::OnEvent);
+    server_ssl_->SignalEvent.connect(this, &SSLStreamAdapterTestBase::OnEvent);
+
+    time_t now = time(nullptr);
+
+    rtc::SSLIdentityParams client_params;
+    client_params.key_params = rtc::KeyParams(rtc::KT_DEFAULT);
+    client_params.common_name = "client";
+    client_params.not_before = now + not_before;
+    client_params.not_after = now + not_after;
+    client_identity_ = rtc::SSLIdentity::GenerateForTest(client_params);
+
+    rtc::SSLIdentityParams server_params;
+    server_params.key_params = rtc::KeyParams(rtc::KT_DEFAULT);
+    server_params.common_name = "server";
+    server_params.not_before = now + not_before;
+    server_params.not_after = now + not_after;
+    server_identity_ = rtc::SSLIdentity::GenerateForTest(server_params);
+
+    client_ssl_->SetIdentity(client_identity_);
+    server_ssl_->SetIdentity(server_identity_);
+  }
+
+  virtual void OnEvent(rtc::StreamInterface *stream, int sig, int err) {
+    RTC_LOG(LS_VERBOSE) << "SSLStreamAdapterTestBase::OnEvent sig=" << sig;
+
+    if (sig & rtc::SE_READ) {
+      ReadData(stream);
+    }
+
+    if ((stream == client_ssl_.get()) && (sig & rtc::SE_WRITE)) {
+      WriteData();
+    }
+  }
+
+  void SetPeerIdentitiesByDigest(bool correct, bool expect_success) {
+    unsigned char server_digest[20];
+    size_t server_digest_len;
+    unsigned char client_digest[20];
+    size_t client_digest_len;
+    bool rv;
+    rtc::SSLPeerCertificateDigestError err;
+    rtc::SSLPeerCertificateDigestError expected_err =
+        expect_success
+            ? rtc::SSLPeerCertificateDigestError::NONE
+            : rtc::SSLPeerCertificateDigestError::VERIFICATION_FAILED;
+
+    RTC_LOG(LS_INFO) << "Setting peer identities by digest";
+
+    rv = server_identity_->certificate().ComputeDigest(
+        rtc::DIGEST_SHA_1, server_digest, 20, &server_digest_len);
+    ASSERT_TRUE(rv);
+    rv = client_identity_->certificate().ComputeDigest(
+        rtc::DIGEST_SHA_1, client_digest, 20, &client_digest_len);
+    ASSERT_TRUE(rv);
+
+    if (!correct) {
+      RTC_LOG(LS_INFO) << "Setting bogus digest for server cert";
+      server_digest[0]++;
+    }
+    rv = client_ssl_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, server_digest,
+                                               server_digest_len, &err);
+    EXPECT_EQ(expected_err, err);
+    EXPECT_EQ(expect_success, rv);
+
+    if (!correct) {
+      RTC_LOG(LS_INFO) << "Setting bogus digest for client cert";
+      client_digest[0]++;
+    }
+    rv = server_ssl_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, client_digest,
+                                               client_digest_len, &err);
+    EXPECT_EQ(expected_err, err);
+    EXPECT_EQ(expect_success, rv);
+
+    identities_set_ = true;
+  }
+
+  void SetupProtocolVersions(rtc::SSLProtocolVersion server_version,
+                             rtc::SSLProtocolVersion client_version) {
+    server_ssl_->SetMaxProtocolVersion(server_version);
+    client_ssl_->SetMaxProtocolVersion(client_version);
+  }
+
+  void TestHandshake(bool expect_success = true) {
+    server_ssl_->SetMode(dtls_ ? rtc::SSL_MODE_DTLS :
+                         rtc::SSL_MODE_TLS);
+    client_ssl_->SetMode(dtls_ ? rtc::SSL_MODE_DTLS :
+                         rtc::SSL_MODE_TLS);
+
+    if (!dtls_) {
+      // Make sure we simulate a reliable network for TLS.
+      // This is just a check to make sure that people don't write wrong
+      // tests.
+      RTC_CHECK_EQ(1460, mtu_);
+      RTC_CHECK(!loss_);
+      RTC_CHECK(!lose_first_packet_);
+    }
+
+    if (!identities_set_)
+      SetPeerIdentitiesByDigest(true, true);
+
+    // Start the handshake
+    int rv;
+
+    server_ssl_->SetServerRole();
+    rv = server_ssl_->StartSSL();
+    ASSERT_EQ(0, rv);
+
+    rv = client_ssl_->StartSSL();
+    ASSERT_EQ(0, rv);
+
+    // Now run the handshake
+    if (expect_success) {
+      EXPECT_TRUE_WAIT((client_ssl_->GetState() == rtc::SS_OPEN)
+                       && (server_ssl_->GetState() == rtc::SS_OPEN),
+                       handshake_wait_);
+    } else {
+      EXPECT_TRUE_WAIT(client_ssl_->GetState() == rtc::SS_CLOSED,
+                       handshake_wait_);
+    }
+  }
+
+  // This tests that the handshake can complete before the identity is
+  // verified, and the identity will be verified after the fact.
+  void TestHandshakeWithDelayedIdentity(bool valid_identity) {
+    server_ssl_->SetMode(dtls_ ? rtc::SSL_MODE_DTLS : rtc::SSL_MODE_TLS);
+    client_ssl_->SetMode(dtls_ ? rtc::SSL_MODE_DTLS : rtc::SSL_MODE_TLS);
+
+    if (!dtls_) {
+      // Make sure we simulate a reliable network for TLS.
+      // This is just a check to make sure that people don't write wrong
+      // tests.
+      RTC_CHECK_EQ(1460, mtu_);
+      RTC_CHECK(!loss_);
+      RTC_CHECK(!lose_first_packet_);
+    }
+
+    // Start the handshake
+    int rv;
+
+    server_ssl_->SetServerRole();
+    rv = server_ssl_->StartSSL();
+    ASSERT_EQ(0, rv);
+
+    rv = client_ssl_->StartSSL();
+    ASSERT_EQ(0, rv);
+
+    // Now run the handshake.
+    EXPECT_TRUE_WAIT(
+        client_ssl_->IsTlsConnected() && server_ssl_->IsTlsConnected(),
+        handshake_wait_);
+
+    // Until the identity has been verified, the state should still be
+    // SS_OPENING and writes should return SR_BLOCK.
+    EXPECT_EQ(rtc::SS_OPENING, client_ssl_->GetState());
+    EXPECT_EQ(rtc::SS_OPENING, server_ssl_->GetState());
+    unsigned char packet[1];
+    size_t sent;
+    EXPECT_EQ(rtc::SR_BLOCK, client_ssl_->Write(&packet, 1, &sent, 0));
+    EXPECT_EQ(rtc::SR_BLOCK, server_ssl_->Write(&packet, 1, &sent, 0));
+
+    // If we set an invalid identity at this point, SetPeerCertificateDigest
+    // should return false.
+    SetPeerIdentitiesByDigest(valid_identity, valid_identity);
+    // State should then transition to SS_OPEN or SS_CLOSED based on validation
+    // of the identity.
+    if (valid_identity) {
+      EXPECT_EQ(rtc::SS_OPEN, client_ssl_->GetState());
+      EXPECT_EQ(rtc::SS_OPEN, server_ssl_->GetState());
+    } else {
+      EXPECT_EQ(rtc::SS_CLOSED, client_ssl_->GetState());
+      EXPECT_EQ(rtc::SS_CLOSED, server_ssl_->GetState());
+    }
+  }
+
+  rtc::StreamResult DataWritten(SSLDummyStreamBase *from, const void *data,
+                                size_t data_len, size_t *written,
+                                int *error) {
+    // Randomly drop loss_ percent of packets
+    if (rtc::CreateRandomId() % 100 < static_cast<uint32_t>(loss_)) {
+      RTC_LOG(LS_VERBOSE) << "Randomly dropping packet, size=" << data_len;
+      *written = data_len;
+      return rtc::SR_SUCCESS;
+    }
+    if (dtls_ && (data_len > mtu_)) {
+      RTC_LOG(LS_VERBOSE) << "Dropping packet > mtu, size=" << data_len;
+      *written = data_len;
+      return rtc::SR_SUCCESS;
+    }
+
+    // Optionally damage application data (type 23). Note that we don't damage
+    // handshake packets and we damage the last byte to keep the header
+    // intact but break the MAC.
+    if (damage_ && (*static_cast<const unsigned char *>(data) == 23)) {
+      std::vector<char> buf(data_len);
+
+      RTC_LOG(LS_VERBOSE) << "Damaging packet";
+
+      memcpy(&buf[0], data, data_len);
+      buf[data_len - 1]++;
+
+      return from->WriteData(&buf[0], data_len, written, error);
+    }
+
+    return from->WriteData(data, data_len, written, error);
+  }
+
+  void SetDelay(int delay) {
+    delay_ = delay;
+  }
+  int GetDelay() { return delay_; }
+
+  void SetLoseFirstPacket(bool lose) {
+    lose_first_packet_ = lose;
+  }
+  bool GetLoseFirstPacket() { return lose_first_packet_; }
+
+  void SetLoss(int percent) {
+    loss_ = percent;
+  }
+
+  void SetDamage() {
+    damage_ = true;
+  }
+
+  void SetMtu(size_t mtu) {
+    mtu_ = mtu;
+  }
+
+  void SetHandshakeWait(int wait) {
+    handshake_wait_ = wait;
+  }
+
+  void SetDtlsSrtpCryptoSuites(const std::vector<int>& ciphers, bool client) {
+    if (client)
+      client_ssl_->SetDtlsSrtpCryptoSuites(ciphers);
+    else
+      server_ssl_->SetDtlsSrtpCryptoSuites(ciphers);
+  }
+
+  bool GetDtlsSrtpCryptoSuite(bool client, int* retval) {
+    if (client)
+      return client_ssl_->GetDtlsSrtpCryptoSuite(retval);
+    else
+      return server_ssl_->GetDtlsSrtpCryptoSuite(retval);
+  }
+
+  std::unique_ptr<rtc::SSLCertificate> GetPeerCertificate(bool client) {
+    std::unique_ptr<rtc::SSLCertChain> chain;
+    if (client)
+      chain = client_ssl_->GetPeerSSLCertChain();
+    else
+      chain = server_ssl_->GetPeerSSLCertChain();
+    return (chain && chain->GetSize()) ? chain->Get(0).GetUniqueReference()
+                                       : nullptr;
+  }
+
+  bool GetSslCipherSuite(bool client, int* retval) {
+    if (client)
+      return client_ssl_->GetSslCipherSuite(retval);
+    else
+      return server_ssl_->GetSslCipherSuite(retval);
+  }
+
+  int GetSslVersion(bool client) {
+    if (client)
+      return client_ssl_->GetSslVersion();
+    else
+      return server_ssl_->GetSslVersion();
+  }
+
+  bool ExportKeyingMaterial(const char *label,
+                            const unsigned char *context,
+                            size_t context_len,
+                            bool use_context,
+                            bool client,
+                            unsigned char *result,
+                            size_t result_len) {
+    if (client)
+      return client_ssl_->ExportKeyingMaterial(label,
+                                               context, context_len,
+                                               use_context,
+                                               result, result_len);
+    else
+      return server_ssl_->ExportKeyingMaterial(label,
+                                               context, context_len,
+                                               use_context,
+                                               result, result_len);
+  }
+
+  // To be implemented by subclasses.
+  virtual void WriteData() = 0;
+  virtual void ReadData(rtc::StreamInterface *stream) = 0;
+  virtual void TestTransfer(int size) = 0;
+
+ protected:
+  std::string client_cert_pem_;
+  std::string client_private_key_pem_;
+  rtc::KeyParams client_key_type_;
+  rtc::KeyParams server_key_type_;
+  SSLDummyStreamBase *client_stream_;  // freed by client_ssl_ destructor
+  SSLDummyStreamBase *server_stream_;  // freed by server_ssl_ destructor
+  std::unique_ptr<rtc::SSLStreamAdapter> client_ssl_;
+  std::unique_ptr<rtc::SSLStreamAdapter> server_ssl_;
+  rtc::SSLIdentity *client_identity_;  // freed by client_ssl_ destructor
+  rtc::SSLIdentity *server_identity_;  // freed by server_ssl_ destructor
+  int delay_;
+  size_t mtu_;
+  int loss_;
+  bool lose_first_packet_;
+  bool damage_;
+  bool dtls_;
+  int handshake_wait_;
+  bool identities_set_;
+};
+
+class SSLStreamAdapterTestTLS
+    : public SSLStreamAdapterTestBase,
+      public WithParamInterface<tuple<rtc::KeyParams, rtc::KeyParams>> {
+ public:
+  SSLStreamAdapterTestTLS()
+      : SSLStreamAdapterTestBase("",
+                                 "",
+                                 false,
+                                 ::testing::get<0>(GetParam()),
+                                 ::testing::get<1>(GetParam())),
+        client_buffer_(kFifoBufferSize),
+        server_buffer_(kFifoBufferSize) {
+  }
+
+  void CreateStreams() override {
+    client_stream_ =
+        new SSLDummyStreamTLS(this, "c2s", &client_buffer_, &server_buffer_);
+    server_stream_ =
+        new SSLDummyStreamTLS(this, "s2c", &server_buffer_, &client_buffer_);
+  }
+
+  // Test data transfer for TLS
+  void TestTransfer(int size) override {
+    RTC_LOG(LS_INFO) << "Starting transfer test with " << size << " bytes";
+    // Create some dummy data to send.
+    size_t received;
+
+    send_stream_.ReserveSize(size);
+    for (int i = 0; i < size; ++i) {
+      char ch = static_cast<char>(i);
+      send_stream_.Write(&ch, 1, nullptr, nullptr);
+    }
+    send_stream_.Rewind();
+
+    // Prepare the receive stream.
+    recv_stream_.ReserveSize(size);
+
+    // Start sending
+    WriteData();
+
+    // Wait for the client to close
+    EXPECT_TRUE_WAIT(server_ssl_->GetState() == rtc::SS_CLOSED, 10000);
+
+    // Now check the data
+    recv_stream_.GetSize(&received);
+
+    EXPECT_EQ(static_cast<size_t>(size), received);
+    EXPECT_EQ(0, memcmp(send_stream_.GetBuffer(),
+                        recv_stream_.GetBuffer(), size));
+  }
+
+  void WriteData() override {
+    size_t position, tosend, size;
+    rtc::StreamResult rv;
+    size_t sent;
+    char block[kBlockSize];
+
+    send_stream_.GetSize(&size);
+    if (!size)
+      return;
+
+    for (;;) {
+      send_stream_.GetPosition(&position);
+      if (send_stream_.Read(block, sizeof(block), &tosend, nullptr) !=
+          rtc::SR_EOS) {
+        rv = client_ssl_->Write(block, tosend, &sent, 0);
+
+        if (rv == rtc::SR_SUCCESS) {
+          send_stream_.SetPosition(position + sent);
+          RTC_LOG(LS_VERBOSE) << "Sent: " << position + sent;
+        } else if (rv == rtc::SR_BLOCK) {
+          RTC_LOG(LS_VERBOSE) << "Blocked...";
+          send_stream_.SetPosition(position);
+          break;
+        } else {
+          ADD_FAILURE();
+          break;
+        }
+      } else {
+        // Now close
+        RTC_LOG(LS_INFO) << "Wrote " << position << " bytes. Closing";
+        client_ssl_->Close();
+        break;
+      }
+    }
+  };
+
+  void ReadData(rtc::StreamInterface *stream) override {
+    char buffer[1600];
+    size_t bread;
+    int err2;
+    rtc::StreamResult r;
+
+    for (;;) {
+      r = stream->Read(buffer, sizeof(buffer), &bread, &err2);
+
+      if (r == rtc::SR_ERROR || r == rtc::SR_EOS) {
+        // Unfortunately, errors are the way that the stream adapter
+        // signals close in OpenSSL.
+        stream->Close();
+        return;
+      }
+
+      if (r == rtc::SR_BLOCK)
+        break;
+
+      ASSERT_EQ(rtc::SR_SUCCESS, r);
+      RTC_LOG(LS_VERBOSE) << "Read " << bread;
+
+      recv_stream_.Write(buffer, bread, nullptr, nullptr);
+    }
+  }
+
+ private:
+  rtc::FifoBuffer client_buffer_;
+  rtc::FifoBuffer server_buffer_;
+  rtc::MemoryStream send_stream_;
+  rtc::MemoryStream recv_stream_;
+};
+
+class SSLStreamAdapterTestDTLS
+    : public SSLStreamAdapterTestBase,
+      public WithParamInterface<tuple<rtc::KeyParams, rtc::KeyParams>> {
+ public:
+  SSLStreamAdapterTestDTLS()
+      : SSLStreamAdapterTestBase("",
+                                 "",
+                                 true,
+                                 ::testing::get<0>(GetParam()),
+                                 ::testing::get<1>(GetParam())),
+        client_buffer_(kBufferCapacity, kDefaultBufferSize),
+        server_buffer_(kBufferCapacity, kDefaultBufferSize),
+        packet_size_(1000),
+        count_(0),
+        sent_(0) {}
+
+  SSLStreamAdapterTestDTLS(const std::string& cert_pem,
+                           const std::string& private_key_pem) :
+      SSLStreamAdapterTestBase(cert_pem, private_key_pem, true),
+      client_buffer_(kBufferCapacity, kDefaultBufferSize),
+      server_buffer_(kBufferCapacity, kDefaultBufferSize),
+      packet_size_(1000), count_(0), sent_(0) {
+  }
+
+  void CreateStreams() override {
+    client_stream_ =
+        new SSLDummyStreamDTLS(this, "c2s", &client_buffer_, &server_buffer_);
+    server_stream_ =
+        new SSLDummyStreamDTLS(this, "s2c", &server_buffer_, &client_buffer_);
+  }
+
+  void WriteData() override {
+    unsigned char *packet = new unsigned char[1600];
+
+    while (sent_ < count_) {
+      unsigned int rand_state = sent_;
+      packet[0] = sent_;
+      for (size_t i = 1; i < packet_size_; i++) {
+        // This is a simple LC PRNG.  Keep in synch with identical code below.
+        rand_state = (rand_state * 251 + 19937) >> 7;
+        packet[i] = rand_state & 0xff;
+      }
+
+      size_t sent;
+      rtc::StreamResult rv = client_ssl_->Write(packet, packet_size_, &sent, 0);
+      if (rv == rtc::SR_SUCCESS) {
+        RTC_LOG(LS_VERBOSE) << "Sent: " << sent_;
+        sent_++;
+      } else if (rv == rtc::SR_BLOCK) {
+        RTC_LOG(LS_VERBOSE) << "Blocked...";
+        break;
+      } else {
+        ADD_FAILURE();
+        break;
+      }
+    }
+
+    delete [] packet;
+  }
+
+  void ReadData(rtc::StreamInterface *stream) override {
+    unsigned char buffer[2000];
+    size_t bread;
+    int err2;
+    rtc::StreamResult r;
+
+    for (;;) {
+      r = stream->Read(buffer, 2000, &bread, &err2);
+
+      if (r == rtc::SR_ERROR) {
+        // Unfortunately, errors are the way that the stream adapter
+        // signals close right now
+        stream->Close();
+        return;
+      }
+
+      if (r == rtc::SR_BLOCK)
+        break;
+
+      ASSERT_EQ(rtc::SR_SUCCESS, r);
+      RTC_LOG(LS_VERBOSE) << "Read " << bread;
+
+      // Now parse the datagram
+      ASSERT_EQ(packet_size_, bread);
+      unsigned char packet_num = buffer[0];
+
+      unsigned int rand_state = packet_num;
+      for (size_t i = 1; i < packet_size_; i++) {
+        // This is a simple LC PRNG.  Keep in synch with identical code above.
+        rand_state = (rand_state * 251 + 19937) >> 7;
+        ASSERT_EQ(rand_state & 0xff, buffer[i]);
+      }
+      received_.insert(packet_num);
+    }
+  }
+
+  void TestTransfer(int count) override {
+    count_ = count;
+
+    WriteData();
+
+    EXPECT_TRUE_WAIT(sent_ == count_, 10000);
+    RTC_LOG(LS_INFO) << "sent_ == " << sent_;
+
+    if (damage_) {
+      WAIT(false, 2000);
+      EXPECT_EQ(0U, received_.size());
+    } else if (loss_ == 0) {
+        EXPECT_EQ_WAIT(static_cast<size_t>(sent_), received_.size(), 1000);
+    } else {
+      RTC_LOG(LS_INFO) << "Sent " << sent_ << " packets; received "
+                       << received_.size();
+    }
+  };
+
+ private:
+  BufferQueueStream client_buffer_;
+  BufferQueueStream server_buffer_;
+  size_t packet_size_;
+  int count_;
+  int sent_;
+  std::set<int> received_;
+};
+
+
+rtc::StreamResult SSLDummyStreamBase::Write(const void* data, size_t data_len,
+                                              size_t* written, int* error) {
+  RTC_LOG(LS_VERBOSE) << "Writing to loopback " << data_len;
+
+  if (first_packet_) {
+    first_packet_ = false;
+    if (test_base_->GetLoseFirstPacket()) {
+      RTC_LOG(LS_INFO) << "Losing initial packet of length " << data_len;
+      *written = data_len;  // Fake successful writing also to writer.
+      return rtc::SR_SUCCESS;
+    }
+  }
+
+  return test_base_->DataWritten(this, data, data_len, written, error);
+};
+
+class SSLStreamAdapterTestDTLSFromPEMStrings : public SSLStreamAdapterTestDTLS {
+ public:
+  SSLStreamAdapterTestDTLSFromPEMStrings() :
+      SSLStreamAdapterTestDTLS(kCERT_PEM, kRSA_PRIVATE_KEY_PEM) {
+  }
+};
+
+// Test fixture for certificate chaining. Server will push more than one
+// certificate.
+class SSLStreamAdapterTestDTLSCertChain : public SSLStreamAdapterTestDTLS {
+ public:
+  SSLStreamAdapterTestDTLSCertChain() : SSLStreamAdapterTestDTLS("", ""){};
+  void SetUp() override {
+    CreateStreams();
+
+    client_ssl_.reset(rtc::SSLStreamAdapter::Create(client_stream_));
+    server_ssl_.reset(rtc::SSLStreamAdapter::Create(server_stream_));
+
+    // Set up the slots
+    client_ssl_->SignalEvent.connect(
+        reinterpret_cast<SSLStreamAdapterTestBase*>(this),
+        &SSLStreamAdapterTestBase::OnEvent);
+    server_ssl_->SignalEvent.connect(
+        reinterpret_cast<SSLStreamAdapterTestBase*>(this),
+        &SSLStreamAdapterTestBase::OnEvent);
+
+    if (!client_cert_pem_.empty() && !client_private_key_pem_.empty()) {
+      client_identity_ = rtc::SSLIdentity::FromPEMStrings(
+          client_private_key_pem_, client_cert_pem_);
+    } else {
+      client_identity_ = rtc::SSLIdentity::Generate("client", client_key_type_);
+    }
+
+    client_ssl_->SetIdentity(client_identity_);
+  }
+};
+
+// Basic tests: TLS
+
+// Test that we can make a handshake work
+TEST_P(SSLStreamAdapterTestTLS, TestTLSConnect) {
+  TestHandshake();
+};
+
+TEST_P(SSLStreamAdapterTestTLS, GetPeerCertChainWithOneCertificate) {
+  TestHandshake();
+  std::unique_ptr<rtc::SSLCertChain> cert_chain =
+      client_ssl_->GetPeerSSLCertChain();
+  ASSERT_NE(nullptr, cert_chain);
+  EXPECT_EQ(1u, cert_chain->GetSize());
+  EXPECT_EQ(cert_chain->Get(0).ToPEMString(),
+            server_identity_->certificate().ToPEMString());
+}
+
+TEST_F(SSLStreamAdapterTestDTLSCertChain, TwoCertHandshake) {
+  server_identity_ = rtc::SSLIdentity::FromPEMChainStrings(
+      kRSA_PRIVATE_KEY_PEM, std::string(kCERT_PEM) + kCACert);
+  server_ssl_->SetIdentity(server_identity_);
+  TestHandshake();
+  std::unique_ptr<rtc::SSLCertChain> peer_cert_chain =
+      client_ssl_->GetPeerSSLCertChain();
+  ASSERT_NE(nullptr, peer_cert_chain);
+  ASSERT_EQ(2u, peer_cert_chain->GetSize());
+  EXPECT_EQ(kCERT_PEM, peer_cert_chain->Get(0).ToPEMString());
+  EXPECT_EQ(kCACert, peer_cert_chain->Get(1).ToPEMString());
+}
+
+TEST_F(SSLStreamAdapterTestDTLSCertChain, TwoCertHandshakeWithCopy) {
+  std::unique_ptr<rtc::SSLIdentity> identity(
+      rtc::SSLIdentity::FromPEMChainStrings(kRSA_PRIVATE_KEY_PEM,
+                                            std::string(kCERT_PEM) + kCACert));
+  server_identity_ = identity->GetReference();
+  server_ssl_->SetIdentity(server_identity_);
+  TestHandshake();
+  std::unique_ptr<rtc::SSLCertChain> peer_cert_chain =
+      client_ssl_->GetPeerSSLCertChain();
+  ASSERT_NE(nullptr, peer_cert_chain);
+  ASSERT_EQ(2u, peer_cert_chain->GetSize());
+  EXPECT_EQ(kCERT_PEM, peer_cert_chain->Get(0).ToPEMString());
+  EXPECT_EQ(kCACert, peer_cert_chain->Get(1).ToPEMString());
+}
+
+TEST_F(SSLStreamAdapterTestDTLSCertChain, ThreeCertHandshake) {
+  server_identity_ = rtc::SSLIdentity::FromPEMChainStrings(
+      kRSA_PRIVATE_KEY_PEM, std::string(kCERT_PEM) + kIntCert1 + kCACert);
+  server_ssl_->SetIdentity(server_identity_);
+  TestHandshake();
+  std::unique_ptr<rtc::SSLCertChain> peer_cert_chain =
+      client_ssl_->GetPeerSSLCertChain();
+  ASSERT_NE(nullptr, peer_cert_chain);
+  ASSERT_EQ(3u, peer_cert_chain->GetSize());
+  EXPECT_EQ(kCERT_PEM, peer_cert_chain->Get(0).ToPEMString());
+  EXPECT_EQ(kIntCert1, peer_cert_chain->Get(1).ToPEMString());
+  EXPECT_EQ(kCACert, peer_cert_chain->Get(2).ToPEMString());
+}
+
+// Test that closing the connection on one side updates the other side.
+TEST_P(SSLStreamAdapterTestTLS, TestTLSClose) {
+  TestHandshake();
+  client_ssl_->Close();
+  EXPECT_EQ_WAIT(rtc::SS_CLOSED, server_ssl_->GetState(), handshake_wait_);
+};
+
+// Test transfer -- trivial
+TEST_P(SSLStreamAdapterTestTLS, TestTLSTransfer) {
+  TestHandshake();
+  TestTransfer(100000);
+};
+
+// Test read-write after close.
+TEST_P(SSLStreamAdapterTestTLS, ReadWriteAfterClose) {
+  TestHandshake();
+  TestTransfer(100000);
+  client_ssl_->Close();
+
+  rtc::StreamResult rv;
+  char block[kBlockSize];
+  size_t dummy;
+
+  // It's an error to write after closed.
+  rv = client_ssl_->Write(block, sizeof(block), &dummy, nullptr);
+  ASSERT_EQ(rtc::SR_ERROR, rv);
+
+  // But after closed read gives you EOS.
+  rv = client_ssl_->Read(block, sizeof(block), &dummy, nullptr);
+  ASSERT_EQ(rtc::SR_EOS, rv);
+};
+
+// Test a handshake with a bogus peer digest
+TEST_P(SSLStreamAdapterTestTLS, TestTLSBogusDigest) {
+  SetPeerIdentitiesByDigest(false, true);
+  TestHandshake(false);
+};
+
+TEST_P(SSLStreamAdapterTestTLS, TestTLSDelayedIdentity) {
+  TestHandshakeWithDelayedIdentity(true);
+};
+
+TEST_P(SSLStreamAdapterTestTLS, TestTLSDelayedIdentityWithBogusDigest) {
+  TestHandshakeWithDelayedIdentity(false);
+};
+
+// Test that the correct error is returned when SetPeerCertificateDigest is
+// called with an unknown algorithm.
+TEST_P(SSLStreamAdapterTestTLS,
+       TestSetPeerCertificateDigestWithUnknownAlgorithm) {
+  unsigned char server_digest[20];
+  size_t server_digest_len;
+  bool rv;
+  rtc::SSLPeerCertificateDigestError err;
+
+  rv = server_identity_->certificate().ComputeDigest(
+      rtc::DIGEST_SHA_1, server_digest, 20, &server_digest_len);
+  ASSERT_TRUE(rv);
+
+  rv = client_ssl_->SetPeerCertificateDigest("unknown algorithm", server_digest,
+                                             server_digest_len, &err);
+  EXPECT_EQ(rtc::SSLPeerCertificateDigestError::UNKNOWN_ALGORITHM, err);
+  EXPECT_FALSE(rv);
+}
+
+// Test that the correct error is returned when SetPeerCertificateDigest is
+// called with an invalid digest length.
+TEST_P(SSLStreamAdapterTestTLS, TestSetPeerCertificateDigestWithInvalidLength) {
+  unsigned char server_digest[20];
+  size_t server_digest_len;
+  bool rv;
+  rtc::SSLPeerCertificateDigestError err;
+
+  rv = server_identity_->certificate().ComputeDigest(
+      rtc::DIGEST_SHA_1, server_digest, 20, &server_digest_len);
+  ASSERT_TRUE(rv);
+
+  rv = client_ssl_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, server_digest,
+                                             server_digest_len - 1, &err);
+  EXPECT_EQ(rtc::SSLPeerCertificateDigestError::INVALID_LENGTH, err);
+  EXPECT_FALSE(rv);
+}
+
+// Test moving a bunch of data
+
+// Basic tests: DTLS
+// Test that we can make a handshake work
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSConnect) {
+  TestHandshake();
+};
+
+// Test that we can make a handshake work if the first packet in
+// each direction is lost. This gives us predictable loss
+// rather than having to tune random
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSConnectWithLostFirstPacket) {
+  SetLoseFirstPacket(true);
+  TestHandshake();
+};
+
+// Test a handshake with loss and delay
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSConnectWithLostFirstPacketDelay2s) {
+  SetLoseFirstPacket(true);
+  SetDelay(2000);
+  SetHandshakeWait(20000);
+  TestHandshake();
+};
+
+// Test a handshake with small MTU
+// Disabled due to https://code.google.com/p/webrtc/issues/detail?id=3910
+TEST_P(SSLStreamAdapterTestDTLS, DISABLED_TestDTLSConnectWithSmallMtu) {
+  SetMtu(700);
+  SetHandshakeWait(20000);
+  TestHandshake();
+};
+
+// Test transfer -- trivial
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSTransfer) {
+  TestHandshake();
+  TestTransfer(100);
+};
+
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSTransferWithLoss) {
+  TestHandshake();
+  SetLoss(10);
+  TestTransfer(100);
+};
+
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSTransferWithDamage) {
+  SetDamage();  // Must be called first because first packet
+                // write happens at end of handshake.
+  TestHandshake();
+  TestTransfer(100);
+};
+
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSDelayedIdentity) {
+  TestHandshakeWithDelayedIdentity(true);
+};
+
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSDelayedIdentityWithBogusDigest) {
+  TestHandshakeWithDelayedIdentity(false);
+};
+
+// Test DTLS-SRTP with all high ciphers
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpHigh) {
+  std::vector<int> high;
+  high.push_back(rtc::SRTP_AES128_CM_SHA1_80);
+  SetDtlsSrtpCryptoSuites(high, true);
+  SetDtlsSrtpCryptoSuites(high, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_EQ(client_cipher, rtc::SRTP_AES128_CM_SHA1_80);
+};
+
+// Test DTLS-SRTP with all low ciphers
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpLow) {
+  std::vector<int> low;
+  low.push_back(rtc::SRTP_AES128_CM_SHA1_32);
+  SetDtlsSrtpCryptoSuites(low, true);
+  SetDtlsSrtpCryptoSuites(low, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_EQ(client_cipher, rtc::SRTP_AES128_CM_SHA1_32);
+};
+
+// Test DTLS-SRTP with a mismatch -- should not converge
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpHighLow) {
+  std::vector<int> high;
+  high.push_back(rtc::SRTP_AES128_CM_SHA1_80);
+  std::vector<int> low;
+  low.push_back(rtc::SRTP_AES128_CM_SHA1_32);
+  SetDtlsSrtpCryptoSuites(high, true);
+  SetDtlsSrtpCryptoSuites(low, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_FALSE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_FALSE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+};
+
+// Test DTLS-SRTP with each side being mixed -- should select high
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpMixed) {
+  std::vector<int> mixed;
+  mixed.push_back(rtc::SRTP_AES128_CM_SHA1_80);
+  mixed.push_back(rtc::SRTP_AES128_CM_SHA1_32);
+  SetDtlsSrtpCryptoSuites(mixed, true);
+  SetDtlsSrtpCryptoSuites(mixed, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_EQ(client_cipher, rtc::SRTP_AES128_CM_SHA1_80);
+};
+
+// Test DTLS-SRTP with all GCM-128 ciphers.
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpGCM128) {
+  std::vector<int> gcm128;
+  gcm128.push_back(rtc::SRTP_AEAD_AES_128_GCM);
+  SetDtlsSrtpCryptoSuites(gcm128, true);
+  SetDtlsSrtpCryptoSuites(gcm128, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_EQ(client_cipher, rtc::SRTP_AEAD_AES_128_GCM);
+};
+
+// Test DTLS-SRTP with all GCM-256 ciphers.
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpGCM256) {
+  std::vector<int> gcm256;
+  gcm256.push_back(rtc::SRTP_AEAD_AES_256_GCM);
+  SetDtlsSrtpCryptoSuites(gcm256, true);
+  SetDtlsSrtpCryptoSuites(gcm256, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_EQ(client_cipher, rtc::SRTP_AEAD_AES_256_GCM);
+};
+
+// Test DTLS-SRTP with mixed GCM-128/-256 ciphers -- should not converge.
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpGCMMismatch) {
+  std::vector<int> gcm128;
+  gcm128.push_back(rtc::SRTP_AEAD_AES_128_GCM);
+  std::vector<int> gcm256;
+  gcm256.push_back(rtc::SRTP_AEAD_AES_256_GCM);
+  SetDtlsSrtpCryptoSuites(gcm128, true);
+  SetDtlsSrtpCryptoSuites(gcm256, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_FALSE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_FALSE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+};
+
+// Test DTLS-SRTP with both GCM-128/-256 ciphers -- should select GCM-256.
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpGCMMixed) {
+  std::vector<int> gcmBoth;
+  gcmBoth.push_back(rtc::SRTP_AEAD_AES_256_GCM);
+  gcmBoth.push_back(rtc::SRTP_AEAD_AES_128_GCM);
+  SetDtlsSrtpCryptoSuites(gcmBoth, true);
+  SetDtlsSrtpCryptoSuites(gcmBoth, false);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetDtlsSrtpCryptoSuite(false, &server_cipher));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_EQ(client_cipher, rtc::SRTP_AEAD_AES_256_GCM);
+};
+
+// Test SRTP cipher suite lengths.
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSSrtpKeyAndSaltLengths) {
+  int key_len;
+  int salt_len;
+
+  ASSERT_FALSE(rtc::GetSrtpKeyAndSaltLengths(
+      rtc::SRTP_INVALID_CRYPTO_SUITE, &key_len, &salt_len));
+
+  ASSERT_TRUE(rtc::GetSrtpKeyAndSaltLengths(
+      rtc::SRTP_AES128_CM_SHA1_32, &key_len, &salt_len));
+  ASSERT_EQ(128/8, key_len);
+  ASSERT_EQ(112/8, salt_len);
+
+  ASSERT_TRUE(rtc::GetSrtpKeyAndSaltLengths(
+      rtc::SRTP_AES128_CM_SHA1_80, &key_len, &salt_len));
+  ASSERT_EQ(128/8, key_len);
+  ASSERT_EQ(112/8, salt_len);
+
+  ASSERT_TRUE(rtc::GetSrtpKeyAndSaltLengths(
+      rtc::SRTP_AEAD_AES_128_GCM, &key_len, &salt_len));
+  ASSERT_EQ(128/8, key_len);
+  ASSERT_EQ(96/8, salt_len);
+
+  ASSERT_TRUE(rtc::GetSrtpKeyAndSaltLengths(
+      rtc::SRTP_AEAD_AES_256_GCM, &key_len, &salt_len));
+  ASSERT_EQ(256/8, key_len);
+  ASSERT_EQ(96/8, salt_len);
+};
+
+// Test an exporter
+TEST_P(SSLStreamAdapterTestDTLS, TestDTLSExporter) {
+  TestHandshake();
+  unsigned char client_out[20];
+  unsigned char server_out[20];
+
+  bool result;
+  result = ExportKeyingMaterial(kExporterLabel,
+                                kExporterContext, kExporterContextLen,
+                                true, true,
+                                client_out, sizeof(client_out));
+  ASSERT_TRUE(result);
+
+  result = ExportKeyingMaterial(kExporterLabel,
+                                kExporterContext, kExporterContextLen,
+                                true, false,
+                                server_out, sizeof(server_out));
+  ASSERT_TRUE(result);
+
+  ASSERT_TRUE(!memcmp(client_out, server_out, sizeof(client_out)));
+}
+
+// Test not yet valid certificates are not rejected.
+TEST_P(SSLStreamAdapterTestDTLS, TestCertNotYetValid) {
+  long one_day = 60 * 60 * 24;
+  // Make the certificates not valid until one day later.
+  ResetIdentitiesWithValidity(one_day, one_day);
+  TestHandshake();
+}
+
+// Test expired certificates are not rejected.
+TEST_P(SSLStreamAdapterTestDTLS, TestCertExpired) {
+  long one_day = 60 * 60 * 24;
+  // Make the certificates already expired.
+  ResetIdentitiesWithValidity(-one_day, -one_day);
+  TestHandshake();
+}
+
+// Test data transfer using certs created from strings.
+TEST_F(SSLStreamAdapterTestDTLSFromPEMStrings, TestTransfer) {
+  TestHandshake();
+  TestTransfer(100);
+}
+
+// Test getting the remote certificate.
+TEST_F(SSLStreamAdapterTestDTLSFromPEMStrings, TestDTLSGetPeerCertificate) {
+  // Peer certificates haven't been received yet.
+  ASSERT_FALSE(GetPeerCertificate(true));
+  ASSERT_FALSE(GetPeerCertificate(false));
+
+  TestHandshake();
+
+  // The client should have a peer certificate after the handshake.
+  std::unique_ptr<rtc::SSLCertificate> client_peer_cert =
+      GetPeerCertificate(true);
+  ASSERT_TRUE(client_peer_cert);
+
+  // It's not kCERT_PEM.
+  std::string client_peer_string = client_peer_cert->ToPEMString();
+  ASSERT_NE(kCERT_PEM, client_peer_string);
+
+  // The server should have a peer certificate after the handshake.
+  std::unique_ptr<rtc::SSLCertificate> server_peer_cert =
+      GetPeerCertificate(false);
+  ASSERT_TRUE(server_peer_cert);
+
+  // It's kCERT_PEM
+  ASSERT_EQ(kCERT_PEM, server_peer_cert->ToPEMString());
+}
+
+// Test getting the used DTLS ciphers.
+// DTLS 1.2 enabled for neither client nor server -> DTLS 1.0 will be used.
+TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuite) {
+  SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(false, &server_cipher));
+
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(true));
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(false));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_TRUE(rtc::SSLStreamAdapter::IsAcceptableCipher(
+      server_cipher, ::testing::get<1>(GetParam()).type()));
+}
+
+// Test getting the used DTLS 1.2 ciphers.
+// DTLS 1.2 enabled for client and server -> DTLS 1.2 will be used.
+TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuiteDtls12Both) {
+  SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_12, rtc::SSL_PROTOCOL_DTLS_12);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(false, &server_cipher));
+
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_12, GetSslVersion(true));
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_12, GetSslVersion(false));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_TRUE(rtc::SSLStreamAdapter::IsAcceptableCipher(
+      server_cipher, ::testing::get<1>(GetParam()).type()));
+}
+
+// DTLS 1.2 enabled for client only -> DTLS 1.0 will be used.
+TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuiteDtls12Client) {
+  SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_12);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(false, &server_cipher));
+
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(true));
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(false));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_TRUE(rtc::SSLStreamAdapter::IsAcceptableCipher(
+      server_cipher, ::testing::get<1>(GetParam()).type()));
+}
+
+// DTLS 1.2 enabled for server only -> DTLS 1.0 will be used.
+TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuiteDtls12Server) {
+  SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_12, rtc::SSL_PROTOCOL_DTLS_10);
+  TestHandshake();
+
+  int client_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(true, &client_cipher));
+  int server_cipher;
+  ASSERT_TRUE(GetSslCipherSuite(false, &server_cipher));
+
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(true));
+  ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(false));
+
+  ASSERT_EQ(client_cipher, server_cipher);
+  ASSERT_TRUE(rtc::SSLStreamAdapter::IsAcceptableCipher(
+      server_cipher, ::testing::get<1>(GetParam()).type()));
+}
+
+// The RSA keysizes here might look strange, why not include the RFC's size
+// 2048?. The reason is test case slowness; testing two sizes to exercise
+// parametrization is sufficient.
+INSTANTIATE_TEST_CASE_P(
+    SSLStreamAdapterTestsTLS,
+    SSLStreamAdapterTestTLS,
+    Combine(Values(rtc::KeyParams::RSA(1024, 65537),
+                   rtc::KeyParams::RSA(1152, 65537),
+                   rtc::KeyParams::ECDSA(rtc::EC_NIST_P256)),
+            Values(rtc::KeyParams::RSA(1024, 65537),
+                   rtc::KeyParams::RSA(1152, 65537),
+                   rtc::KeyParams::ECDSA(rtc::EC_NIST_P256))));
+INSTANTIATE_TEST_CASE_P(
+    SSLStreamAdapterTestsDTLS,
+    SSLStreamAdapterTestDTLS,
+    Combine(Values(rtc::KeyParams::RSA(1024, 65537),
+                   rtc::KeyParams::RSA(1152, 65537),
+                   rtc::KeyParams::ECDSA(rtc::EC_NIST_P256)),
+            Values(rtc::KeyParams::RSA(1024, 65537),
+                   rtc::KeyParams::RSA(1152, 65537),
+                   rtc::KeyParams::ECDSA(rtc::EC_NIST_P256))));
diff --git a/rtc_base/stream.cc b/rtc_base/stream.cc
new file mode 100644
index 0000000..d937353
--- /dev/null
+++ b/rtc_base/stream.cc
@@ -0,0 +1,1032 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_POSIX)
+#include <sys/file.h>
+#endif  // WEBRTC_POSIX
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+
+#include <algorithm>
+#include <string>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringencode.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#define fileno _fileno
+#endif
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamInterface
+///////////////////////////////////////////////////////////////////////////////
+StreamInterface::~StreamInterface() {
+}
+
+StreamResult StreamInterface::WriteAll(const void* data, size_t data_len,
+                                       size_t* written, int* error) {
+  StreamResult result = SR_SUCCESS;
+  size_t total_written = 0, current_written;
+  while (total_written < data_len) {
+    result = Write(static_cast<const char*>(data) + total_written,
+                   data_len - total_written, &current_written, error);
+    if (result != SR_SUCCESS)
+      break;
+    total_written += current_written;
+  }
+  if (written)
+    *written = total_written;
+  return result;
+}
+
+StreamResult StreamInterface::ReadAll(void* buffer, size_t buffer_len,
+                                      size_t* read, int* error) {
+  StreamResult result = SR_SUCCESS;
+  size_t total_read = 0, current_read;
+  while (total_read < buffer_len) {
+    result = Read(static_cast<char*>(buffer) + total_read,
+                  buffer_len - total_read, &current_read, error);
+    if (result != SR_SUCCESS)
+      break;
+    total_read += current_read;
+  }
+  if (read)
+    *read = total_read;
+  return result;
+}
+
+StreamResult StreamInterface::ReadLine(std::string* line) {
+  line->clear();
+  StreamResult result = SR_SUCCESS;
+  while (true) {
+    char ch;
+    result = Read(&ch, sizeof(ch), nullptr, nullptr);
+    if (result != SR_SUCCESS) {
+      break;
+    }
+    if (ch == '\n') {
+      break;
+    }
+    line->push_back(ch);
+  }
+  if (!line->empty()) {   // give back the line we've collected so far with
+    result = SR_SUCCESS;  // a success code.  Otherwise return the last code
+  }
+  return result;
+}
+
+void StreamInterface::PostEvent(Thread* t, int events, int err) {
+  t->Post(RTC_FROM_HERE, this, MSG_POST_EVENT,
+          new StreamEventData(events, err));
+}
+
+void StreamInterface::PostEvent(int events, int err) {
+  PostEvent(Thread::Current(), events, err);
+}
+
+const void* StreamInterface::GetReadData(size_t* data_len) {
+  return nullptr;
+}
+
+void* StreamInterface::GetWriteBuffer(size_t* buf_len) {
+  return nullptr;
+}
+
+bool StreamInterface::SetPosition(size_t position) {
+  return false;
+}
+
+bool StreamInterface::GetPosition(size_t* position) const {
+  return false;
+}
+
+bool StreamInterface::GetSize(size_t* size) const {
+  return false;
+}
+
+bool StreamInterface::GetAvailable(size_t* size) const {
+  return false;
+}
+
+bool StreamInterface::GetWriteRemaining(size_t* size) const {
+  return false;
+}
+
+bool StreamInterface::Flush() {
+  return false;
+}
+
+bool StreamInterface::ReserveSize(size_t size) {
+  return true;
+}
+
+StreamInterface::StreamInterface() {
+}
+
+void StreamInterface::OnMessage(Message* msg) {
+  if (MSG_POST_EVENT == msg->message_id) {
+    StreamEventData* pe = static_cast<StreamEventData*>(msg->pdata);
+    SignalEvent(this, pe->events, pe->error);
+    delete msg->pdata;
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamAdapterInterface
+///////////////////////////////////////////////////////////////////////////////
+
+StreamAdapterInterface::StreamAdapterInterface(StreamInterface* stream,
+                                               bool owned)
+    : stream_(stream), owned_(owned) {
+  if (nullptr != stream_)
+    stream_->SignalEvent.connect(this, &StreamAdapterInterface::OnEvent);
+}
+
+StreamState StreamAdapterInterface::GetState() const {
+  return stream_->GetState();
+}
+StreamResult StreamAdapterInterface::Read(void* buffer,
+                                          size_t buffer_len,
+                                          size_t* read,
+                                          int* error) {
+  return stream_->Read(buffer, buffer_len, read, error);
+}
+StreamResult StreamAdapterInterface::Write(const void* data,
+                                           size_t data_len,
+                                           size_t* written,
+                                           int* error) {
+  return stream_->Write(data, data_len, written, error);
+}
+void StreamAdapterInterface::Close() {
+  stream_->Close();
+}
+
+bool StreamAdapterInterface::SetPosition(size_t position) {
+  return stream_->SetPosition(position);
+}
+
+bool StreamAdapterInterface::GetPosition(size_t* position) const {
+  return stream_->GetPosition(position);
+}
+
+bool StreamAdapterInterface::GetSize(size_t* size) const {
+  return stream_->GetSize(size);
+}
+
+bool StreamAdapterInterface::GetAvailable(size_t* size) const {
+  return stream_->GetAvailable(size);
+}
+
+bool StreamAdapterInterface::GetWriteRemaining(size_t* size) const {
+  return stream_->GetWriteRemaining(size);
+}
+
+bool StreamAdapterInterface::ReserveSize(size_t size) {
+  return stream_->ReserveSize(size);
+}
+
+bool StreamAdapterInterface::Flush() {
+  return stream_->Flush();
+}
+
+void StreamAdapterInterface::Attach(StreamInterface* stream, bool owned) {
+  if (nullptr != stream_)
+    stream_->SignalEvent.disconnect(this);
+  if (owned_)
+    delete stream_;
+  stream_ = stream;
+  owned_ = owned;
+  if (nullptr != stream_)
+    stream_->SignalEvent.connect(this, &StreamAdapterInterface::OnEvent);
+}
+
+StreamInterface* StreamAdapterInterface::Detach() {
+  if (nullptr != stream_)
+    stream_->SignalEvent.disconnect(this);
+  StreamInterface* stream = stream_;
+  stream_ = nullptr;
+  return stream;
+}
+
+StreamAdapterInterface::~StreamAdapterInterface() {
+  if (owned_)
+    delete stream_;
+}
+
+void StreamAdapterInterface::OnEvent(StreamInterface* stream,
+                                     int events,
+                                     int err) {
+  SignalEvent(this, events, err);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamTap
+///////////////////////////////////////////////////////////////////////////////
+
+StreamTap::StreamTap(StreamInterface* stream, StreamInterface* tap)
+    : StreamAdapterInterface(stream), tap_(), tap_result_(SR_SUCCESS),
+        tap_error_(0) {
+  AttachTap(tap);
+}
+
+StreamTap::~StreamTap() = default;
+
+void StreamTap::AttachTap(StreamInterface* tap) {
+  tap_.reset(tap);
+}
+
+StreamInterface* StreamTap::DetachTap() {
+  return tap_.release();
+}
+
+StreamResult StreamTap::GetTapResult(int* error) {
+  if (error) {
+    *error = tap_error_;
+  }
+  return tap_result_;
+}
+
+StreamResult StreamTap::Read(void* buffer, size_t buffer_len,
+                             size_t* read, int* error) {
+  size_t backup_read;
+  if (!read) {
+    read = &backup_read;
+  }
+  StreamResult res = StreamAdapterInterface::Read(buffer, buffer_len,
+                                                  read, error);
+  if ((res == SR_SUCCESS) && (tap_result_ == SR_SUCCESS)) {
+    tap_result_ = tap_->WriteAll(buffer, *read, nullptr, &tap_error_);
+  }
+  return res;
+}
+
+StreamResult StreamTap::Write(const void* data, size_t data_len,
+                              size_t* written, int* error) {
+  size_t backup_written;
+  if (!written) {
+    written = &backup_written;
+  }
+  StreamResult res = StreamAdapterInterface::Write(data, data_len,
+                                                   written, error);
+  if ((res == SR_SUCCESS) && (tap_result_ == SR_SUCCESS)) {
+    tap_result_ = tap_->WriteAll(data, *written, nullptr, &tap_error_);
+  }
+  return res;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// NullStream
+///////////////////////////////////////////////////////////////////////////////
+
+NullStream::NullStream() {
+}
+
+NullStream::~NullStream() {
+}
+
+StreamState NullStream::GetState() const {
+  return SS_OPEN;
+}
+
+StreamResult NullStream::Read(void* buffer, size_t buffer_len,
+                              size_t* read, int* error) {
+  if (error) *error = -1;
+  return SR_ERROR;
+}
+
+StreamResult NullStream::Write(const void* data, size_t data_len,
+                               size_t* written, int* error) {
+  if (written) *written = data_len;
+  return SR_SUCCESS;
+}
+
+void NullStream::Close() {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// FileStream
+///////////////////////////////////////////////////////////////////////////////
+
+FileStream::FileStream() : file_(nullptr) {}
+
+FileStream::~FileStream() {
+  FileStream::Close();
+}
+
+bool FileStream::Open(const std::string& filename, const char* mode,
+                      int* error) {
+  Close();
+#if defined(WEBRTC_WIN)
+  std::wstring wfilename;
+  if (Utf8ToWindowsFilename(filename, &wfilename)) {
+    file_ = _wfopen(wfilename.c_str(), ToUtf16(mode).c_str());
+  } else {
+    if (error) {
+      *error = -1;
+      return false;
+    }
+  }
+#else
+  file_ = fopen(filename.c_str(), mode);
+#endif
+  if (!file_ && error) {
+    *error = errno;
+  }
+  return (file_ != nullptr);
+}
+
+bool FileStream::OpenShare(const std::string& filename, const char* mode,
+                           int shflag, int* error) {
+  Close();
+#if defined(WEBRTC_WIN)
+  std::wstring wfilename;
+  if (Utf8ToWindowsFilename(filename, &wfilename)) {
+    file_ = _wfsopen(wfilename.c_str(), ToUtf16(mode).c_str(), shflag);
+    if (!file_ && error) {
+      *error = errno;
+      return false;
+    }
+    return file_ != nullptr;
+  } else {
+    if (error) {
+      *error = -1;
+    }
+    return false;
+  }
+#else
+  return Open(filename, mode, error);
+#endif
+}
+
+bool FileStream::DisableBuffering() {
+  if (!file_)
+    return false;
+  return (setvbuf(file_, nullptr, _IONBF, 0) == 0);
+}
+
+StreamState FileStream::GetState() const {
+  return (file_ == nullptr) ? SS_CLOSED : SS_OPEN;
+}
+
+StreamResult FileStream::Read(void* buffer, size_t buffer_len,
+                              size_t* read, int* error) {
+  if (!file_)
+    return SR_EOS;
+  size_t result = fread(buffer, 1, buffer_len, file_);
+  if ((result == 0) && (buffer_len > 0)) {
+    if (feof(file_))
+      return SR_EOS;
+    if (error)
+      *error = errno;
+    return SR_ERROR;
+  }
+  if (read)
+    *read = result;
+  return SR_SUCCESS;
+}
+
+StreamResult FileStream::Write(const void* data, size_t data_len,
+                               size_t* written, int* error) {
+  if (!file_)
+    return SR_EOS;
+  size_t result = fwrite(data, 1, data_len, file_);
+  if ((result == 0) && (data_len > 0)) {
+    if (error)
+      *error = errno;
+    return SR_ERROR;
+  }
+  if (written)
+    *written = result;
+  return SR_SUCCESS;
+}
+
+void FileStream::Close() {
+  if (file_) {
+    DoClose();
+    file_ = nullptr;
+  }
+}
+
+bool FileStream::SetPosition(size_t position) {
+  if (!file_)
+    return false;
+  return (fseek(file_, static_cast<int>(position), SEEK_SET) == 0);
+}
+
+bool FileStream::GetPosition(size_t* position) const {
+  RTC_DCHECK(nullptr != position);
+  if (!file_)
+    return false;
+  long result = ftell(file_);
+  if (result < 0)
+    return false;
+  if (position)
+    *position = result;
+  return true;
+}
+
+bool FileStream::GetSize(size_t* size) const {
+  RTC_DCHECK(nullptr != size);
+  if (!file_)
+    return false;
+  struct stat file_stats;
+  if (fstat(fileno(file_), &file_stats) != 0)
+    return false;
+  if (size)
+    *size = file_stats.st_size;
+  return true;
+}
+
+bool FileStream::GetAvailable(size_t* size) const {
+  RTC_DCHECK(nullptr != size);
+  if (!GetSize(size))
+    return false;
+  long result = ftell(file_);
+  if (result < 0)
+    return false;
+  if (size)
+    *size -= result;
+  return true;
+}
+
+bool FileStream::ReserveSize(size_t size) {
+  // TODO: extend the file to the proper length
+  return true;
+}
+
+bool FileStream::Flush() {
+  if (file_) {
+    return (0 == fflush(file_));
+  }
+  // try to flush empty file?
+  RTC_NOTREACHED();
+  return false;
+}
+
+void FileStream::DoClose() {
+  fclose(file_);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// MemoryStream
+///////////////////////////////////////////////////////////////////////////////
+
+MemoryStreamBase::MemoryStreamBase()
+    : buffer_(nullptr), buffer_length_(0), data_length_(0), seek_position_(0) {}
+
+StreamState MemoryStreamBase::GetState() const {
+  return SS_OPEN;
+}
+
+StreamResult MemoryStreamBase::Read(void* buffer, size_t bytes,
+                                    size_t* bytes_read, int* error) {
+  if (seek_position_ >= data_length_) {
+    return SR_EOS;
+  }
+  size_t available = data_length_ - seek_position_;
+  if (bytes > available) {
+    // Read partial buffer
+    bytes = available;
+  }
+  memcpy(buffer, &buffer_[seek_position_], bytes);
+  seek_position_ += bytes;
+  if (bytes_read) {
+    *bytes_read = bytes;
+  }
+  return SR_SUCCESS;
+}
+
+StreamResult MemoryStreamBase::Write(const void* buffer, size_t bytes,
+                                     size_t* bytes_written, int* error) {
+  size_t available = buffer_length_ - seek_position_;
+  if (0 == available) {
+    // Increase buffer size to the larger of:
+    // a) new position rounded up to next 256 bytes
+    // b) double the previous length
+    size_t new_buffer_length =
+        std::max(((seek_position_ + bytes) | 0xFF) + 1, buffer_length_ * 2);
+    StreamResult result = DoReserve(new_buffer_length, error);
+    if (SR_SUCCESS != result) {
+      return result;
+    }
+    RTC_DCHECK(buffer_length_ >= new_buffer_length);
+    available = buffer_length_ - seek_position_;
+  }
+
+  if (bytes > available) {
+    bytes = available;
+  }
+  memcpy(&buffer_[seek_position_], buffer, bytes);
+  seek_position_ += bytes;
+  if (data_length_ < seek_position_) {
+    data_length_ = seek_position_;
+  }
+  if (bytes_written) {
+    *bytes_written = bytes;
+  }
+  return SR_SUCCESS;
+}
+
+void MemoryStreamBase::Close() {
+  // nothing to do
+}
+
+bool MemoryStreamBase::SetPosition(size_t position) {
+  if (position > data_length_)
+    return false;
+  seek_position_ = position;
+  return true;
+}
+
+bool MemoryStreamBase::GetPosition(size_t* position) const {
+  if (position)
+    *position = seek_position_;
+  return true;
+}
+
+bool MemoryStreamBase::GetSize(size_t* size) const {
+  if (size)
+    *size = data_length_;
+  return true;
+}
+
+bool MemoryStreamBase::GetAvailable(size_t* size) const {
+  if (size)
+    *size = data_length_ - seek_position_;
+  return true;
+}
+
+bool MemoryStreamBase::ReserveSize(size_t size) {
+  return (SR_SUCCESS == DoReserve(size, nullptr));
+}
+
+StreamResult MemoryStreamBase::DoReserve(size_t size, int* error) {
+  return (buffer_length_ >= size) ? SR_SUCCESS : SR_EOS;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+MemoryStream::MemoryStream() : buffer_alloc_(nullptr) {}
+
+MemoryStream::MemoryStream(const char* data) : buffer_alloc_(nullptr) {
+  SetData(data, strlen(data));
+}
+
+MemoryStream::MemoryStream(const void* data, size_t length)
+    : buffer_alloc_(nullptr) {
+  SetData(data, length);
+}
+
+MemoryStream::~MemoryStream() {
+  delete [] buffer_alloc_;
+}
+
+void MemoryStream::SetData(const void* data, size_t length) {
+  data_length_ = buffer_length_ = length;
+  delete [] buffer_alloc_;
+  buffer_alloc_ = new char[buffer_length_ + kAlignment];
+  buffer_ = reinterpret_cast<char*>(ALIGNP(buffer_alloc_, kAlignment));
+  memcpy(buffer_, data, data_length_);
+  seek_position_ = 0;
+}
+
+StreamResult MemoryStream::DoReserve(size_t size, int* error) {
+  if (buffer_length_ >= size)
+    return SR_SUCCESS;
+
+  if (char* new_buffer_alloc = new char[size + kAlignment]) {
+    char* new_buffer = reinterpret_cast<char*>(
+        ALIGNP(new_buffer_alloc, kAlignment));
+    memcpy(new_buffer, buffer_, data_length_);
+    delete [] buffer_alloc_;
+    buffer_alloc_ = new_buffer_alloc;
+    buffer_ = new_buffer;
+    buffer_length_ = size;
+    return SR_SUCCESS;
+  }
+
+  if (error) {
+    *error = ENOMEM;
+  }
+  return SR_ERROR;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+ExternalMemoryStream::ExternalMemoryStream() {
+}
+
+ExternalMemoryStream::ExternalMemoryStream(void* data, size_t length) {
+  SetData(data, length);
+}
+
+ExternalMemoryStream::~ExternalMemoryStream() {
+}
+
+void ExternalMemoryStream::SetData(void* data, size_t length) {
+  data_length_ = buffer_length_ = length;
+  buffer_ = static_cast<char*>(data);
+  seek_position_ = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// FifoBuffer
+///////////////////////////////////////////////////////////////////////////////
+
+FifoBuffer::FifoBuffer(size_t size)
+    : state_(SS_OPEN), buffer_(new char[size]), buffer_length_(size),
+      data_length_(0), read_position_(0), owner_(Thread::Current()) {
+  // all events are done on the owner_ thread
+}
+
+FifoBuffer::FifoBuffer(size_t size, Thread* owner)
+    : state_(SS_OPEN), buffer_(new char[size]), buffer_length_(size),
+      data_length_(0), read_position_(0), owner_(owner) {
+  // all events are done on the owner_ thread
+}
+
+FifoBuffer::~FifoBuffer() {
+}
+
+bool FifoBuffer::GetBuffered(size_t* size) const {
+  CritScope cs(&crit_);
+  *size = data_length_;
+  return true;
+}
+
+bool FifoBuffer::SetCapacity(size_t size) {
+  CritScope cs(&crit_);
+  if (data_length_ > size) {
+    return false;
+  }
+
+  if (size != buffer_length_) {
+    char* buffer = new char[size];
+    const size_t copy = data_length_;
+    const size_t tail_copy = std::min(copy, buffer_length_ - read_position_);
+    memcpy(buffer, &buffer_[read_position_], tail_copy);
+    memcpy(buffer + tail_copy, &buffer_[0], copy - tail_copy);
+    buffer_.reset(buffer);
+    read_position_ = 0;
+    buffer_length_ = size;
+  }
+  return true;
+}
+
+StreamResult FifoBuffer::ReadOffset(void* buffer, size_t bytes,
+                                    size_t offset, size_t* bytes_read) {
+  CritScope cs(&crit_);
+  return ReadOffsetLocked(buffer, bytes, offset, bytes_read);
+}
+
+StreamResult FifoBuffer::WriteOffset(const void* buffer, size_t bytes,
+                                     size_t offset, size_t* bytes_written) {
+  CritScope cs(&crit_);
+  return WriteOffsetLocked(buffer, bytes, offset, bytes_written);
+}
+
+StreamState FifoBuffer::GetState() const {
+  CritScope cs(&crit_);
+  return state_;
+}
+
+StreamResult FifoBuffer::Read(void* buffer, size_t bytes,
+                              size_t* bytes_read, int* error) {
+  CritScope cs(&crit_);
+  const bool was_writable = data_length_ < buffer_length_;
+  size_t copy = 0;
+  StreamResult result = ReadOffsetLocked(buffer, bytes, 0, &copy);
+
+  if (result == SR_SUCCESS) {
+    // If read was successful then adjust the read position and number of
+    // bytes buffered.
+    read_position_ = (read_position_ + copy) % buffer_length_;
+    data_length_ -= copy;
+    if (bytes_read) {
+      *bytes_read = copy;
+    }
+
+    // if we were full before, and now we're not, post an event
+    if (!was_writable && copy > 0) {
+      PostEvent(owner_, SE_WRITE, 0);
+    }
+  }
+  return result;
+}
+
+StreamResult FifoBuffer::Write(const void* buffer, size_t bytes,
+                               size_t* bytes_written, int* error) {
+  CritScope cs(&crit_);
+
+  const bool was_readable = (data_length_ > 0);
+  size_t copy = 0;
+  StreamResult result = WriteOffsetLocked(buffer, bytes, 0, &copy);
+
+  if (result == SR_SUCCESS) {
+    // If write was successful then adjust the number of readable bytes.
+    data_length_ += copy;
+    if (bytes_written) {
+      *bytes_written = copy;
+    }
+
+    // if we didn't have any data to read before, and now we do, post an event
+    if (!was_readable && copy > 0) {
+      PostEvent(owner_, SE_READ, 0);
+    }
+  }
+  return result;
+}
+
+void FifoBuffer::Close() {
+  CritScope cs(&crit_);
+  state_ = SS_CLOSED;
+}
+
+const void* FifoBuffer::GetReadData(size_t* size) {
+  CritScope cs(&crit_);
+  *size = (read_position_ + data_length_ <= buffer_length_) ?
+      data_length_ : buffer_length_ - read_position_;
+  return &buffer_[read_position_];
+}
+
+void FifoBuffer::ConsumeReadData(size_t size) {
+  CritScope cs(&crit_);
+  RTC_DCHECK(size <= data_length_);
+  const bool was_writable = data_length_ < buffer_length_;
+  read_position_ = (read_position_ + size) % buffer_length_;
+  data_length_ -= size;
+  if (!was_writable && size > 0) {
+    PostEvent(owner_, SE_WRITE, 0);
+  }
+}
+
+void* FifoBuffer::GetWriteBuffer(size_t* size) {
+  CritScope cs(&crit_);
+  if (state_ == SS_CLOSED) {
+    return nullptr;
+  }
+
+  // if empty, reset the write position to the beginning, so we can get
+  // the biggest possible block
+  if (data_length_ == 0) {
+    read_position_ = 0;
+  }
+
+  const size_t write_position = (read_position_ + data_length_)
+      % buffer_length_;
+  *size = (write_position > read_position_ || data_length_ == 0) ?
+      buffer_length_ - write_position : read_position_ - write_position;
+  return &buffer_[write_position];
+}
+
+void FifoBuffer::ConsumeWriteBuffer(size_t size) {
+  CritScope cs(&crit_);
+  RTC_DCHECK(size <= buffer_length_ - data_length_);
+  const bool was_readable = (data_length_ > 0);
+  data_length_ += size;
+  if (!was_readable && size > 0) {
+    PostEvent(owner_, SE_READ, 0);
+  }
+}
+
+bool FifoBuffer::GetWriteRemaining(size_t* size) const {
+  CritScope cs(&crit_);
+  *size = buffer_length_ - data_length_;
+  return true;
+}
+
+StreamResult FifoBuffer::ReadOffsetLocked(void* buffer,
+                                          size_t bytes,
+                                          size_t offset,
+                                          size_t* bytes_read) {
+  if (offset >= data_length_) {
+    return (state_ != SS_CLOSED) ? SR_BLOCK : SR_EOS;
+  }
+
+  const size_t available = data_length_ - offset;
+  const size_t read_position = (read_position_ + offset) % buffer_length_;
+  const size_t copy = std::min(bytes, available);
+  const size_t tail_copy = std::min(copy, buffer_length_ - read_position);
+  char* const p = static_cast<char*>(buffer);
+  memcpy(p, &buffer_[read_position], tail_copy);
+  memcpy(p + tail_copy, &buffer_[0], copy - tail_copy);
+
+  if (bytes_read) {
+    *bytes_read = copy;
+  }
+  return SR_SUCCESS;
+}
+
+StreamResult FifoBuffer::WriteOffsetLocked(const void* buffer,
+                                           size_t bytes,
+                                           size_t offset,
+                                           size_t* bytes_written) {
+  if (state_ == SS_CLOSED) {
+    return SR_EOS;
+  }
+
+  if (data_length_ + offset >= buffer_length_) {
+    return SR_BLOCK;
+  }
+
+  const size_t available = buffer_length_ - data_length_ - offset;
+  const size_t write_position = (read_position_ + data_length_ + offset)
+      % buffer_length_;
+  const size_t copy = std::min(bytes, available);
+  const size_t tail_copy = std::min(copy, buffer_length_ - write_position);
+  const char* const p = static_cast<const char*>(buffer);
+  memcpy(&buffer_[write_position], p, tail_copy);
+  memcpy(&buffer_[0], p + tail_copy, copy - tail_copy);
+
+  if (bytes_written) {
+    *bytes_written = copy;
+  }
+  return SR_SUCCESS;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// StringStream - Reads/Writes to an external std::string
+///////////////////////////////////////////////////////////////////////////////
+
+StringStream::StringStream(std::string* str)
+    : str_(*str), read_pos_(0), read_only_(false) {
+}
+
+StringStream::StringStream(const std::string& str)
+    : str_(const_cast<std::string&>(str)), read_pos_(0), read_only_(true) {
+}
+
+StreamState StringStream::GetState() const {
+  return SS_OPEN;
+}
+
+StreamResult StringStream::Read(void* buffer, size_t buffer_len,
+                                      size_t* read, int* error) {
+  size_t available = std::min(buffer_len, str_.size() - read_pos_);
+  if (!available)
+    return SR_EOS;
+  memcpy(buffer, str_.data() + read_pos_, available);
+  read_pos_ += available;
+  if (read)
+    *read = available;
+  return SR_SUCCESS;
+}
+
+StreamResult StringStream::Write(const void* data, size_t data_len,
+                                      size_t* written, int* error) {
+  if (read_only_) {
+    if (error) {
+      *error = -1;
+    }
+    return SR_ERROR;
+  }
+  str_.append(static_cast<const char*>(data),
+              static_cast<const char*>(data) + data_len);
+  if (written)
+    *written = data_len;
+  return SR_SUCCESS;
+}
+
+void StringStream::Close() {
+}
+
+bool StringStream::SetPosition(size_t position) {
+  if (position > str_.size())
+    return false;
+  read_pos_ = position;
+  return true;
+}
+
+bool StringStream::GetPosition(size_t* position) const {
+  if (position)
+    *position = read_pos_;
+  return true;
+}
+
+bool StringStream::GetSize(size_t* size) const {
+  if (size)
+    *size = str_.size();
+  return true;
+}
+
+bool StringStream::GetAvailable(size_t* size) const {
+  if (size)
+    *size = str_.size() - read_pos_;
+  return true;
+}
+
+bool StringStream::ReserveSize(size_t size) {
+  if (read_only_)
+    return false;
+  str_.reserve(size);
+  return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamReference
+///////////////////////////////////////////////////////////////////////////////
+
+StreamReference::StreamReference(StreamInterface* stream)
+    : StreamAdapterInterface(stream, false) {
+  // owner set to false so the destructor does not free the stream.
+  stream_ref_count_ = new StreamRefCount(stream);
+}
+
+StreamInterface* StreamReference::NewReference() {
+  stream_ref_count_->AddReference();
+  return new StreamReference(stream_ref_count_, stream());
+}
+
+StreamReference::~StreamReference() {
+  stream_ref_count_->Release();
+}
+
+StreamReference::StreamReference(StreamRefCount* stream_ref_count,
+                                 StreamInterface* stream)
+    : StreamAdapterInterface(stream, false),
+      stream_ref_count_(stream_ref_count) {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+StreamResult Flow(StreamInterface* source,
+                  char* buffer,
+                  size_t buffer_len,
+                  StreamInterface* sink,
+                  size_t* data_len /* = nullptr */) {
+  RTC_DCHECK(buffer_len > 0);
+
+  StreamResult result;
+  size_t count, read_pos, write_pos;
+  if (data_len) {
+    read_pos = *data_len;
+  } else {
+    read_pos = 0;
+  }
+
+  bool end_of_stream = false;
+  do {
+    // Read until buffer is full, end of stream, or error
+    while (!end_of_stream && (read_pos < buffer_len)) {
+      result = source->Read(buffer + read_pos, buffer_len - read_pos, &count,
+                            nullptr);
+      if (result == SR_EOS) {
+        end_of_stream = true;
+      } else if (result != SR_SUCCESS) {
+        if (data_len) {
+          *data_len = read_pos;
+        }
+        return result;
+      } else {
+        read_pos += count;
+      }
+    }
+
+    // Write until buffer is empty, or error (including end of stream)
+    write_pos = 0;
+    while (write_pos < read_pos) {
+      result = sink->Write(buffer + write_pos, read_pos - write_pos, &count,
+                           nullptr);
+      if (result != SR_SUCCESS) {
+        if (data_len) {
+          *data_len = read_pos - write_pos;
+          if (write_pos > 0) {
+            memmove(buffer, buffer + write_pos, *data_len);
+          }
+        }
+        return result;
+      }
+      write_pos += count;
+    }
+
+    read_pos = 0;
+  } while (!end_of_stream);
+
+  if (data_len) {
+    *data_len = 0;
+  }
+  return SR_SUCCESS;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
diff --git a/rtc_base/stream.h b/rtc_base/stream.h
new file mode 100644
index 0000000..77e4bd8
--- /dev/null
+++ b/rtc_base/stream.h
@@ -0,0 +1,678 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_STREAM_H_
+#define RTC_BASE_STREAM_H_
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "rtc_base/buffer.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/messagehandler.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/sigslot.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamInterface is a generic asynchronous stream interface, supporting read,
+// write, and close operations, and asynchronous signalling of state changes.
+// The interface is designed with file, memory, and socket implementations in
+// mind.  Some implementations offer extended operations, such as seeking.
+///////////////////////////////////////////////////////////////////////////////
+
+// The following enumerations are declared outside of the StreamInterface
+// class for brevity in use.
+
+// The SS_OPENING state indicates that the stream will signal open or closed
+// in the future.
+enum StreamState { SS_CLOSED, SS_OPENING, SS_OPEN };
+
+// Stream read/write methods return this value to indicate various success
+// and failure conditions described below.
+enum StreamResult { SR_ERROR, SR_SUCCESS, SR_BLOCK, SR_EOS };
+
+// StreamEvents are used to asynchronously signal state transitionss.  The flags
+// may be combined.
+//  SE_OPEN: The stream has transitioned to the SS_OPEN state
+//  SE_CLOSE: The stream has transitioned to the SS_CLOSED state
+//  SE_READ: Data is available, so Read is likely to not return SR_BLOCK
+//  SE_WRITE: Data can be written, so Write is likely to not return SR_BLOCK
+enum StreamEvent { SE_OPEN = 1, SE_READ = 2, SE_WRITE = 4, SE_CLOSE = 8 };
+
+class Thread;
+
+struct StreamEventData : public MessageData {
+  int events, error;
+  StreamEventData(int ev, int er) : events(ev), error(er) { }
+};
+
+class StreamInterface : public MessageHandler {
+ public:
+  enum {
+    MSG_POST_EVENT = 0xF1F1, MSG_MAX = MSG_POST_EVENT
+  };
+
+  ~StreamInterface() override;
+
+  virtual StreamState GetState() const = 0;
+
+  // Read attempts to fill buffer of size buffer_len.  Write attempts to send
+  // data_len bytes stored in data.  The variables read and write are set only
+  // on SR_SUCCESS (see below).  Likewise, error is only set on SR_ERROR.
+  // Read and Write return a value indicating:
+  //  SR_ERROR: an error occurred, which is returned in a non-null error
+  //    argument.  Interpretation of the error requires knowledge of the
+  //    stream's concrete type, which limits its usefulness.
+  //  SR_SUCCESS: some number of bytes were successfully written, which is
+  //    returned in a non-null read/write argument.
+  //  SR_BLOCK: the stream is in non-blocking mode, and the operation would
+  //    block, or the stream is in SS_OPENING state.
+  //  SR_EOS: the end-of-stream has been reached, or the stream is in the
+  //    SS_CLOSED state.
+  virtual StreamResult Read(void* buffer, size_t buffer_len,
+                            size_t* read, int* error) = 0;
+  virtual StreamResult Write(const void* data, size_t data_len,
+                             size_t* written, int* error) = 0;
+  // Attempt to transition to the SS_CLOSED state.  SE_CLOSE will not be
+  // signalled as a result of this call.
+  virtual void Close() = 0;
+
+  // Streams may signal one or more StreamEvents to indicate state changes.
+  // The first argument identifies the stream on which the state change occured.
+  // The second argument is a bit-wise combination of StreamEvents.
+  // If SE_CLOSE is signalled, then the third argument is the associated error
+  // code.  Otherwise, the value is undefined.
+  // Note: Not all streams will support asynchronous event signalling.  However,
+  // SS_OPENING and SR_BLOCK returned from stream member functions imply that
+  // certain events will be raised in the future.
+  sigslot::signal3<StreamInterface*, int, int> SignalEvent;
+
+  // Like calling SignalEvent, but posts a message to the specified thread,
+  // which will call SignalEvent.  This helps unroll the stack and prevent
+  // re-entrancy.
+  void PostEvent(Thread* t, int events, int err);
+  // Like the aforementioned method, but posts to the current thread.
+  void PostEvent(int events, int err);
+
+  //
+  // OPTIONAL OPERATIONS
+  //
+  // Not all implementations will support the following operations.  In general,
+  // a stream will only support an operation if it reasonably efficient to do
+  // so.  For example, while a socket could buffer incoming data to support
+  // seeking, it will not do so.  Instead, a buffering stream adapter should
+  // be used.
+  //
+  // Even though several of these operations are related, you should
+  // always use whichever operation is most relevant.  For example, you may
+  // be tempted to use GetSize() and GetPosition() to deduce the result of
+  // GetAvailable().  However, a stream which is read-once may support the
+  // latter operation but not the former.
+  //
+
+  // The following four methods are used to avoid copying data multiple times.
+
+  // GetReadData returns a pointer to a buffer which is owned by the stream.
+  // The buffer contains data_len bytes.  null is returned if no data is
+  // available, or if the method fails.  If the caller processes the data, it
+  // must call ConsumeReadData with the number of processed bytes.  GetReadData
+  // does not require a matching call to ConsumeReadData if the data is not
+  // processed.  Read and ConsumeReadData invalidate the buffer returned by
+  // GetReadData.
+  virtual const void* GetReadData(size_t* data_len);
+  virtual void ConsumeReadData(size_t used) {}
+
+  // GetWriteBuffer returns a pointer to a buffer which is owned by the stream.
+  // The buffer has a capacity of buf_len bytes.  null is returned if there is
+  // no buffer available, or if the method fails.  The call may write data to
+  // the buffer, and then call ConsumeWriteBuffer with the number of bytes
+  // written.  GetWriteBuffer does not require a matching call to
+  // ConsumeWriteData if no data is written.  Write, ForceWrite, and
+  // ConsumeWriteData invalidate the buffer returned by GetWriteBuffer.
+  // TODO: Allow the caller to specify a minimum buffer size.  If the specified
+  // amount of buffer is not yet available, return null and Signal SE_WRITE
+  // when it is available.  If the requested amount is too large, return an
+  // error.
+  virtual void* GetWriteBuffer(size_t* buf_len);
+  virtual void ConsumeWriteBuffer(size_t used) {}
+
+  // Write data_len bytes found in data, circumventing any throttling which
+  // would could cause SR_BLOCK to be returned.  Returns true if all the data
+  // was written.  Otherwise, the method is unsupported, or an unrecoverable
+  // error occurred, and the error value is set.  This method should be used
+  // sparingly to write critical data which should not be throttled.  A stream
+  // which cannot circumvent its blocking constraints should not implement this
+  // method.
+  // NOTE: This interface is being considered experimentally at the moment.  It
+  // would be used by JUDP and BandwidthStream as a way to circumvent certain
+  // soft limits in writing.
+  //virtual bool ForceWrite(const void* data, size_t data_len, int* error) {
+  //  if (error) *error = -1;
+  //  return false;
+  //}
+
+  // Seek to a byte offset from the beginning of the stream.  Returns false if
+  // the stream does not support seeking, or cannot seek to the specified
+  // position.
+  virtual bool SetPosition(size_t position);
+
+  // Get the byte offset of the current position from the start of the stream.
+  // Returns false if the position is not known.
+  virtual bool GetPosition(size_t* position) const;
+
+  // Get the byte length of the entire stream.  Returns false if the length
+  // is not known.
+  virtual bool GetSize(size_t* size) const;
+
+  // Return the number of Read()-able bytes remaining before end-of-stream.
+  // Returns false if not known.
+  virtual bool GetAvailable(size_t* size) const;
+
+  // Return the number of Write()-able bytes remaining before end-of-stream.
+  // Returns false if not known.
+  virtual bool GetWriteRemaining(size_t* size) const;
+
+  // Return true if flush is successful.
+  virtual bool Flush();
+
+  // Communicates the amount of data which will be written to the stream.  The
+  // stream may choose to preallocate memory to accomodate this data.  The
+  // stream may return false to indicate that there is not enough room (ie,
+  // Write will return SR_EOS/SR_ERROR at some point).  Note that calling this
+  // function should not affect the existing state of data in the stream.
+  virtual bool ReserveSize(size_t size);
+
+  //
+  // CONVENIENCE METHODS
+  //
+  // These methods are implemented in terms of other methods, for convenience.
+  //
+
+  // Seek to the start of the stream.
+  inline bool Rewind() { return SetPosition(0); }
+
+  // WriteAll is a helper function which repeatedly calls Write until all the
+  // data is written, or something other than SR_SUCCESS is returned.  Note that
+  // unlike Write, the argument 'written' is always set, and may be non-zero
+  // on results other than SR_SUCCESS.  The remaining arguments have the
+  // same semantics as Write.
+  StreamResult WriteAll(const void* data, size_t data_len,
+                        size_t* written, int* error);
+
+  // Similar to ReadAll.  Calls Read until buffer_len bytes have been read, or
+  // until a non-SR_SUCCESS result is returned.  'read' is always set.
+  StreamResult ReadAll(void* buffer, size_t buffer_len,
+                       size_t* read, int* error);
+
+  // ReadLine is a helper function which repeatedly calls Read until it hits
+  // the end-of-line character, or something other than SR_SUCCESS.
+  // TODO: this is too inefficient to keep here.  Break this out into a buffered
+  // readline object or adapter
+  StreamResult ReadLine(std::string* line);
+
+ protected:
+  StreamInterface();
+
+  // MessageHandler Interface
+  void OnMessage(Message* msg) override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(StreamInterface);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamAdapterInterface is a convenient base-class for adapting a stream.
+// By default, all operations are pass-through.  Override the methods that you
+// require adaptation.  Streams should really be upgraded to reference-counted.
+// In the meantime, use the owned flag to indicate whether the adapter should
+// own the adapted stream.
+///////////////////////////////////////////////////////////////////////////////
+
+class StreamAdapterInterface : public StreamInterface,
+                               public sigslot::has_slots<> {
+ public:
+  explicit StreamAdapterInterface(StreamInterface* stream, bool owned = true);
+
+  // Core Stream Interface
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+
+  // Optional Stream Interface
+  /*  Note: Many stream adapters were implemented prior to this Read/Write
+      interface.  Therefore, a simple pass through of data in those cases may
+      be broken.  At a later time, we should do a once-over pass of all
+      adapters, and make them compliant with these interfaces, after which this
+      code can be uncommented.
+  virtual const void* GetReadData(size_t* data_len) {
+    return stream_->GetReadData(data_len);
+  }
+  virtual void ConsumeReadData(size_t used) {
+    stream_->ConsumeReadData(used);
+  }
+
+  virtual void* GetWriteBuffer(size_t* buf_len) {
+    return stream_->GetWriteBuffer(buf_len);
+  }
+  virtual void ConsumeWriteBuffer(size_t used) {
+    stream_->ConsumeWriteBuffer(used);
+  }
+  */
+
+  /*  Note: This interface is currently undergoing evaluation.
+  virtual bool ForceWrite(const void* data, size_t data_len, int* error) {
+    return stream_->ForceWrite(data, data_len, error);
+  }
+  */
+
+  bool SetPosition(size_t position) override;
+  bool GetPosition(size_t* position) const override;
+  bool GetSize(size_t* size) const override;
+  bool GetAvailable(size_t* size) const override;
+  bool GetWriteRemaining(size_t* size) const override;
+  bool ReserveSize(size_t size) override;
+  bool Flush() override;
+
+  void Attach(StreamInterface* stream, bool owned = true);
+  StreamInterface* Detach();
+
+ protected:
+  ~StreamAdapterInterface() override;
+
+  // Note that the adapter presents itself as the origin of the stream events,
+  // since users of the adapter may not recognize the adapted object.
+  virtual void OnEvent(StreamInterface* stream, int events, int err);
+  StreamInterface* stream() { return stream_; }
+
+ private:
+  StreamInterface* stream_;
+  bool owned_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(StreamAdapterInterface);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamTap is a non-modifying, pass-through adapter, which copies all data
+// in either direction to the tap.  Note that errors or blocking on writing to
+// the tap will prevent further tap writes from occurring.
+///////////////////////////////////////////////////////////////////////////////
+
+class StreamTap : public StreamAdapterInterface {
+ public:
+  explicit StreamTap(StreamInterface* stream, StreamInterface* tap);
+  ~StreamTap() override;
+
+  void AttachTap(StreamInterface* tap);
+  StreamInterface* DetachTap();
+  StreamResult GetTapResult(int* error);
+
+  // StreamAdapterInterface Interface
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+
+ private:
+  std::unique_ptr<StreamInterface> tap_;
+  StreamResult tap_result_;
+  int tap_error_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(StreamTap);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// NullStream gives errors on read, and silently discards all written data.
+///////////////////////////////////////////////////////////////////////////////
+
+class NullStream : public StreamInterface {
+ public:
+  NullStream();
+  ~NullStream() override;
+
+  // StreamInterface Interface
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// FileStream is a simple implementation of a StreamInterface, which does not
+// support asynchronous notification.
+///////////////////////////////////////////////////////////////////////////////
+
+class FileStream : public StreamInterface {
+ public:
+  FileStream();
+  ~FileStream() override;
+
+  // The semantics of filename and mode are the same as stdio's fopen
+  virtual bool Open(const std::string& filename, const char* mode, int* error);
+  virtual bool OpenShare(const std::string& filename, const char* mode,
+                         int shflag, int* error);
+
+  // By default, reads and writes are buffered for efficiency.  Disabling
+  // buffering causes writes to block until the bytes on disk are updated.
+  virtual bool DisableBuffering();
+
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+  bool SetPosition(size_t position) override;
+  bool GetPosition(size_t* position) const override;
+  bool GetSize(size_t* size) const override;
+  bool GetAvailable(size_t* size) const override;
+  bool ReserveSize(size_t size) override;
+
+  bool Flush() override;
+
+ protected:
+  virtual void DoClose();
+
+  FILE* file_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(FileStream);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// MemoryStream is a simple implementation of a StreamInterface over in-memory
+// data.  Data is read and written at the current seek position.  Reads return
+// end-of-stream when they reach the end of data.  Writes actually extend the
+// end of data mark.
+///////////////////////////////////////////////////////////////////////////////
+
+class MemoryStreamBase : public StreamInterface {
+ public:
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t bytes,
+                    size_t* bytes_read,
+                    int* error) override;
+  StreamResult Write(const void* buffer,
+                     size_t bytes,
+                     size_t* bytes_written,
+                     int* error) override;
+  void Close() override;
+  bool SetPosition(size_t position) override;
+  bool GetPosition(size_t* position) const override;
+  bool GetSize(size_t* size) const override;
+  bool GetAvailable(size_t* size) const override;
+  bool ReserveSize(size_t size) override;
+
+  char* GetBuffer() { return buffer_; }
+  const char* GetBuffer() const { return buffer_; }
+
+ protected:
+  MemoryStreamBase();
+
+  virtual StreamResult DoReserve(size_t size, int* error);
+
+  // Invariant: 0 <= seek_position <= data_length_ <= buffer_length_
+  char* buffer_;
+  size_t buffer_length_;
+  size_t data_length_;
+  size_t seek_position_;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(MemoryStreamBase);
+};
+
+// MemoryStream dynamically resizes to accomodate written data.
+
+class MemoryStream : public MemoryStreamBase {
+ public:
+  MemoryStream();
+  explicit MemoryStream(const char* data);  // Calls SetData(data, strlen(data))
+  MemoryStream(const void* data, size_t length);  // Calls SetData(data, length)
+  ~MemoryStream() override;
+
+  void SetData(const void* data, size_t length);
+
+ protected:
+  StreamResult DoReserve(size_t size, int* error) override;
+  // Memory Streams are aligned for efficiency.
+  static const int kAlignment = 16;
+  char* buffer_alloc_;
+};
+
+// ExternalMemoryStream adapts an external memory buffer, so writes which would
+// extend past the end of the buffer will return end-of-stream.
+
+class ExternalMemoryStream : public MemoryStreamBase {
+ public:
+  ExternalMemoryStream();
+  ExternalMemoryStream(void* data, size_t length);
+  ~ExternalMemoryStream() override;
+
+  void SetData(void* data, size_t length);
+};
+
+// FifoBuffer allows for efficient, thread-safe buffering of data between
+// writer and reader. As the data can wrap around the end of the buffer,
+// MemoryStreamBase can't help us here.
+
+class FifoBuffer : public StreamInterface {
+ public:
+  // Creates a FIFO buffer with the specified capacity.
+  explicit FifoBuffer(size_t length);
+  // Creates a FIFO buffer with the specified capacity and owner
+  FifoBuffer(size_t length, Thread* owner);
+  ~FifoBuffer() override;
+  // Gets the amount of data currently readable from the buffer.
+  bool GetBuffered(size_t* data_len) const;
+  // Resizes the buffer to the specified capacity. Fails if data_length_ > size
+  bool SetCapacity(size_t length);
+
+  // Read into |buffer| with an offset from the current read position, offset
+  // is specified in number of bytes.
+  // This method doesn't adjust read position nor the number of available
+  // bytes, user has to call ConsumeReadData() to do this.
+  StreamResult ReadOffset(void* buffer, size_t bytes, size_t offset,
+                          size_t* bytes_read);
+
+  // Write |buffer| with an offset from the current write position, offset is
+  // specified in number of bytes.
+  // This method doesn't adjust the number of buffered bytes, user has to call
+  // ConsumeWriteBuffer() to do this.
+  StreamResult WriteOffset(const void* buffer, size_t bytes, size_t offset,
+                           size_t* bytes_written);
+
+  // StreamInterface methods
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t bytes,
+                    size_t* bytes_read,
+                    int* error) override;
+  StreamResult Write(const void* buffer,
+                     size_t bytes,
+                     size_t* bytes_written,
+                     int* error) override;
+  void Close() override;
+  const void* GetReadData(size_t* data_len) override;
+  void ConsumeReadData(size_t used) override;
+  void* GetWriteBuffer(size_t* buf_len) override;
+  void ConsumeWriteBuffer(size_t used) override;
+  bool GetWriteRemaining(size_t* size) const override;
+
+ private:
+  // Helper method that implements ReadOffset. Caller must acquire a lock
+  // when calling this method.
+  StreamResult ReadOffsetLocked(void* buffer,
+                                size_t bytes,
+                                size_t offset,
+                                size_t* bytes_read)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+  // Helper method that implements WriteOffset. Caller must acquire a lock
+  // when calling this method.
+  StreamResult WriteOffsetLocked(const void* buffer,
+                                 size_t bytes,
+                                 size_t offset,
+                                 size_t* bytes_written)
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
+
+  // keeps the opened/closed state of the stream
+  StreamState state_ RTC_GUARDED_BY(crit_);
+  // the allocated buffer
+  std::unique_ptr<char[]> buffer_ RTC_GUARDED_BY(crit_);
+  // size of the allocated buffer
+  size_t buffer_length_ RTC_GUARDED_BY(crit_);
+  // amount of readable data in the buffer
+  size_t data_length_ RTC_GUARDED_BY(crit_);
+  // offset to the readable data
+  size_t read_position_ RTC_GUARDED_BY(crit_);
+  // stream callbacks are dispatched on this thread
+  Thread* owner_;
+  // object lock
+  CriticalSection crit_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(FifoBuffer);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// StringStream - Reads/Writes to an external std::string
+///////////////////////////////////////////////////////////////////////////////
+
+class StringStream : public StreamInterface {
+ public:
+  explicit StringStream(std::string* str);
+  explicit StringStream(const std::string& str);
+
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+  bool SetPosition(size_t position) override;
+  bool GetPosition(size_t* position) const override;
+  bool GetSize(size_t* size) const override;
+  bool GetAvailable(size_t* size) const override;
+  bool ReserveSize(size_t size) override;
+
+ private:
+  std::string& str_;
+  size_t read_pos_;
+  bool read_only_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamReference - A reference counting stream adapter
+///////////////////////////////////////////////////////////////////////////////
+
+// Keep in mind that the streams and adapters defined in this file are
+// not thread-safe, so this has limited uses.
+
+// A StreamRefCount holds the reference count and a pointer to the
+// wrapped stream. It deletes the wrapped stream when there are no
+// more references. We can then have multiple StreamReference
+// instances pointing to one StreamRefCount, all wrapping the same
+// stream.
+
+class StreamReference : public StreamAdapterInterface {
+  class StreamRefCount;
+ public:
+  // Constructor for the first reference to a stream
+  // Note: get more references through NewReference(). Use this
+  // constructor only once on a given stream.
+  explicit StreamReference(StreamInterface* stream);
+  StreamInterface* GetStream() { return stream(); }
+  StreamInterface* NewReference();
+  ~StreamReference() override;
+
+ private:
+  class StreamRefCount {
+   public:
+    explicit StreamRefCount(StreamInterface* stream)
+        : stream_(stream), ref_count_(1) {
+    }
+    void AddReference() {
+      CritScope lock(&cs_);
+      ++ref_count_;
+    }
+    void Release() {
+      int ref_count;
+      {  // Atomic ops would have been a better fit here.
+        CritScope lock(&cs_);
+        ref_count = --ref_count_;
+      }
+      if (ref_count == 0) {
+        delete stream_;
+        delete this;
+      }
+    }
+   private:
+    StreamInterface* stream_;
+    int ref_count_;
+    CriticalSection cs_;
+    RTC_DISALLOW_COPY_AND_ASSIGN(StreamRefCount);
+  };
+
+  // Constructor for adding references
+  explicit StreamReference(StreamRefCount* stream_ref_count,
+                           StreamInterface* stream);
+
+  StreamRefCount* stream_ref_count_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(StreamReference);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// Flow attempts to move bytes from source to sink via buffer of size
+// buffer_len.  The function returns SR_SUCCESS when source reaches
+// end-of-stream (returns SR_EOS), and all the data has been written successful
+// to sink.  Alternately, if source returns SR_BLOCK or SR_ERROR, or if sink
+// returns SR_BLOCK, SR_ERROR, or SR_EOS, then the function immediately returns
+// with the unexpected StreamResult value.
+// data_len is the length of the valid data in buffer. in case of error
+// this is the data that read from source but can't move to destination.
+// as a pass in parameter, it indicates data in buffer that should move to sink
+StreamResult Flow(StreamInterface* source,
+                  char* buffer,
+                  size_t buffer_len,
+                  StreamInterface* sink,
+                  size_t* data_len = nullptr);
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_STREAM_H_
diff --git a/rtc_base/stream_unittest.cc b/rtc_base/stream_unittest.cc
new file mode 100644
index 0000000..8c305c5
--- /dev/null
+++ b/rtc_base/stream_unittest.cc
@@ -0,0 +1,379 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/stream.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// TestStream
+///////////////////////////////////////////////////////////////////////////////
+
+class TestStream : public StreamInterface {
+ public:
+  TestStream() : pos_(0) { }
+
+  StreamState GetState() const override { return SS_OPEN; }
+
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override {
+    unsigned char* uc_buffer = static_cast<unsigned char*>(buffer);
+    for (size_t i = 0; i < buffer_len; ++i) {
+      uc_buffer[i] = static_cast<unsigned char>(pos_++);
+    }
+    if (read)
+      *read = buffer_len;
+    return SR_SUCCESS;
+  }
+
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override {
+    if (error)
+      *error = -1;
+    return SR_ERROR;
+  }
+
+  void Close() override {}
+
+  bool SetPosition(size_t position) override {
+    pos_ = position;
+    return true;
+  }
+
+  bool GetPosition(size_t* position) const override {
+    if (position) *position = pos_;
+    return true;
+  }
+
+  bool GetSize(size_t* size) const override { return false; }
+
+  bool GetAvailable(size_t* size) const override { return false; }
+
+ private:
+  size_t pos_;
+};
+
+bool VerifyTestBuffer(unsigned char* buffer, size_t len,
+                      unsigned char value) {
+  bool passed = true;
+  for (size_t i = 0; i < len; ++i) {
+    if (buffer[i] != value++) {
+      passed = false;
+      break;
+    }
+  }
+  // Ensure that we don't pass again without re-writing
+  memset(buffer, 0, len);
+  return passed;
+}
+
+void SeekTest(StreamInterface* stream, const unsigned char value) {
+  size_t bytes;
+  unsigned char buffer[13] = { 0 };
+  const size_t kBufSize = sizeof(buffer);
+
+  EXPECT_EQ(stream->Read(buffer, kBufSize, &bytes, nullptr), SR_SUCCESS);
+  EXPECT_EQ(bytes, kBufSize);
+  EXPECT_TRUE(VerifyTestBuffer(buffer, kBufSize, value));
+  EXPECT_TRUE(stream->GetPosition(&bytes));
+  EXPECT_EQ(13U, bytes);
+
+  EXPECT_TRUE(stream->SetPosition(7));
+
+  EXPECT_EQ(stream->Read(buffer, kBufSize, &bytes, nullptr), SR_SUCCESS);
+  EXPECT_EQ(bytes, kBufSize);
+  EXPECT_TRUE(VerifyTestBuffer(buffer, kBufSize, value + 7));
+  EXPECT_TRUE(stream->GetPosition(&bytes));
+  EXPECT_EQ(20U, bytes);
+}
+
+TEST(FifoBufferTest, TestAll) {
+  const size_t kSize = 16;
+  const char in[kSize * 2 + 1] = "0123456789ABCDEFGHIJKLMNOPQRSTUV";
+  char out[kSize * 2];
+  void* p;
+  const void* q;
+  size_t bytes;
+  FifoBuffer buf(kSize);
+  StreamInterface* stream = &buf;
+
+  // Test assumptions about base state
+  EXPECT_EQ(SS_OPEN, stream->GetState());
+  EXPECT_EQ(SR_BLOCK, stream->Read(out, kSize, &bytes, nullptr));
+  EXPECT_TRUE(nullptr != stream->GetReadData(&bytes));
+  EXPECT_EQ((size_t)0, bytes);
+  stream->ConsumeReadData(0);
+  EXPECT_TRUE(nullptr != stream->GetWriteBuffer(&bytes));
+  EXPECT_EQ(kSize, bytes);
+  stream->ConsumeWriteBuffer(0);
+
+  // Try a full write
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+
+  // Try a write that should block
+  EXPECT_EQ(SR_BLOCK, stream->Write(in, kSize, &bytes, nullptr));
+
+  // Try a full read
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+
+  // Try a read that should block
+  EXPECT_EQ(SR_BLOCK, stream->Read(out, kSize, &bytes, nullptr));
+
+  // Try a too-big write
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize * 2, &bytes, nullptr));
+  EXPECT_EQ(bytes, kSize);
+
+  // Try a too-big read
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize * 2, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+
+  // Try some small writes and reads
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+
+  // Try wraparound reads and writes in the following pattern
+  // WWWWWWWWWWWW.... 0123456789AB....
+  // RRRRRRRRXXXX.... ........89AB....
+  // WWWW....XXXXWWWW 4567....89AB0123
+  // XXXX....RRRRXXXX 4567........0123
+  // XXXXWWWWWWWWXXXX 4567012345670123
+  // RRRRXXXXXXXXRRRR ....01234567....
+  // ....RRRRRRRR.... ................
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize * 3 / 4, &bytes, nullptr));
+  EXPECT_EQ(kSize * 3 / 4, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 4, &bytes, nullptr));
+  EXPECT_EQ(kSize / 4 , bytes);
+  EXPECT_EQ(0, memcmp(in + kSize / 2, out, kSize / 4));
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2 , bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(kSize / 2 , bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+
+  // Use GetWriteBuffer to reset the read_position for the next tests
+  stream->GetWriteBuffer(&bytes);
+  stream->ConsumeWriteBuffer(0);
+
+  // Try using GetReadData to do a full read
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize, &bytes, nullptr));
+  q = stream->GetReadData(&bytes);
+  EXPECT_TRUE(nullptr != q);
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(q, in, kSize));
+  stream->ConsumeReadData(kSize);
+  EXPECT_EQ(SR_BLOCK, stream->Read(out, kSize, &bytes, nullptr));
+
+  // Try using GetReadData to do some small reads
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize, &bytes, nullptr));
+  q = stream->GetReadData(&bytes);
+  EXPECT_TRUE(nullptr != q);
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(q, in, kSize / 2));
+  stream->ConsumeReadData(kSize / 2);
+  q = stream->GetReadData(&bytes);
+  EXPECT_TRUE(nullptr != q);
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(q, in + kSize / 2, kSize / 2));
+  stream->ConsumeReadData(kSize / 2);
+  EXPECT_EQ(SR_BLOCK, stream->Read(out, kSize, &bytes, nullptr));
+
+  // Try using GetReadData in a wraparound case
+  // WWWWWWWWWWWWWWWW 0123456789ABCDEF
+  // RRRRRRRRRRRRXXXX ............CDEF
+  // WWWWWWWW....XXXX 01234567....CDEF
+  // ............RRRR 01234567........
+  // RRRRRRRR........ ................
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize, &bytes, nullptr));
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize * 3 / 4, &bytes, nullptr));
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  q = stream->GetReadData(&bytes);
+  EXPECT_TRUE(nullptr != q);
+  EXPECT_EQ(kSize / 4, bytes);
+  EXPECT_EQ(0, memcmp(q, in + kSize * 3 / 4, kSize / 4));
+  stream->ConsumeReadData(kSize / 4);
+  q = stream->GetReadData(&bytes);
+  EXPECT_TRUE(nullptr != q);
+  EXPECT_EQ(kSize / 2, bytes);
+  EXPECT_EQ(0, memcmp(q, in, kSize / 2));
+  stream->ConsumeReadData(kSize / 2);
+
+  // Use GetWriteBuffer to reset the read_position for the next tests
+  stream->GetWriteBuffer(&bytes);
+  stream->ConsumeWriteBuffer(0);
+
+  // Try using GetWriteBuffer to do a full write
+  p = stream->GetWriteBuffer(&bytes);
+  EXPECT_TRUE(nullptr != p);
+  EXPECT_EQ(kSize, bytes);
+  memcpy(p, in, kSize);
+  stream->ConsumeWriteBuffer(kSize);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+
+  // Try using GetWriteBuffer to do some small writes
+  p = stream->GetWriteBuffer(&bytes);
+  EXPECT_TRUE(nullptr != p);
+  EXPECT_EQ(kSize, bytes);
+  memcpy(p, in, kSize / 2);
+  stream->ConsumeWriteBuffer(kSize / 2);
+  p = stream->GetWriteBuffer(&bytes);
+  EXPECT_TRUE(nullptr != p);
+  EXPECT_EQ(kSize / 2, bytes);
+  memcpy(p, in + kSize / 2, kSize / 2);
+  stream->ConsumeWriteBuffer(kSize / 2);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+
+  // Try using GetWriteBuffer in a wraparound case
+  // WWWWWWWWWWWW.... 0123456789AB....
+  // RRRRRRRRXXXX.... ........89AB....
+  // ........XXXXWWWW ........89AB0123
+  // WWWW....XXXXXXXX 4567....89AB0123
+  // RRRR....RRRRRRRR ................
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize * 3 / 4, &bytes, nullptr));
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  p = stream->GetWriteBuffer(&bytes);
+  EXPECT_TRUE(nullptr != p);
+  EXPECT_EQ(kSize / 4, bytes);
+  memcpy(p, in, kSize / 4);
+  stream->ConsumeWriteBuffer(kSize / 4);
+  p = stream->GetWriteBuffer(&bytes);
+  EXPECT_TRUE(nullptr != p);
+  EXPECT_EQ(kSize / 2, bytes);
+  memcpy(p, in + kSize / 4, kSize / 4);
+  stream->ConsumeWriteBuffer(kSize / 4);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize * 3 / 4, &bytes, nullptr));
+  EXPECT_EQ(kSize * 3 / 4, bytes);
+  EXPECT_EQ(0, memcmp(in + kSize / 2, out, kSize / 4));
+  EXPECT_EQ(0, memcmp(in, out + kSize / 4, kSize / 4));
+
+  // Check that the stream is now empty
+  EXPECT_EQ(SR_BLOCK, stream->Read(out, kSize, &bytes, nullptr));
+
+  // Try growing the buffer
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_TRUE(buf.SetCapacity(kSize * 2));
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in + kSize, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize * 2, &bytes, nullptr));
+  EXPECT_EQ(kSize * 2, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize * 2));
+
+  // Try shrinking the buffer
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_TRUE(buf.SetCapacity(kSize));
+  EXPECT_EQ(SR_BLOCK, stream->Write(in, kSize, &bytes, nullptr));
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize, &bytes, nullptr));
+  EXPECT_EQ(kSize, bytes);
+  EXPECT_EQ(0, memcmp(in, out, kSize));
+
+  // Write to the stream, close it, read the remaining bytes
+  EXPECT_EQ(SR_SUCCESS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  stream->Close();
+  EXPECT_EQ(SS_CLOSED, stream->GetState());
+  EXPECT_EQ(SR_EOS, stream->Write(in, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(SR_SUCCESS, stream->Read(out, kSize / 2, &bytes, nullptr));
+  EXPECT_EQ(0, memcmp(in, out, kSize / 2));
+  EXPECT_EQ(SR_EOS, stream->Read(out, kSize / 2, &bytes, nullptr));
+}
+
+TEST(FifoBufferTest, FullBufferCheck) {
+  FifoBuffer buff(10);
+  buff.ConsumeWriteBuffer(10);
+
+  size_t free;
+  EXPECT_TRUE(buff.GetWriteBuffer(&free) != nullptr);
+  EXPECT_EQ(0U, free);
+}
+
+TEST(FifoBufferTest, WriteOffsetAndReadOffset) {
+  const size_t kSize = 16;
+  const char in[kSize * 2 + 1] = "0123456789ABCDEFGHIJKLMNOPQRSTUV";
+  char out[kSize * 2];
+  FifoBuffer buf(kSize);
+
+  // Write 14 bytes.
+  EXPECT_EQ(SR_SUCCESS, buf.Write(in, 14, nullptr, nullptr));
+
+  // Make sure data is in |buf|.
+  size_t buffered;
+  EXPECT_TRUE(buf.GetBuffered(&buffered));
+  EXPECT_EQ(14u, buffered);
+
+  // Read 10 bytes.
+  buf.ConsumeReadData(10);
+
+  // There should be now 12 bytes of available space.
+  size_t remaining;
+  EXPECT_TRUE(buf.GetWriteRemaining(&remaining));
+  EXPECT_EQ(12u, remaining);
+
+  // Write at offset 12, this should fail.
+  EXPECT_EQ(SR_BLOCK, buf.WriteOffset(in, 10, 12, nullptr));
+
+  // Write 8 bytes at offset 4, this wraps around the buffer.
+  EXPECT_EQ(SR_SUCCESS, buf.WriteOffset(in, 8, 4, nullptr));
+
+  // Number of available space remains the same until we call
+  // ConsumeWriteBuffer().
+  EXPECT_TRUE(buf.GetWriteRemaining(&remaining));
+  EXPECT_EQ(12u, remaining);
+  buf.ConsumeWriteBuffer(12);
+
+  // There's 4 bytes bypassed and 4 bytes no read so skip them and verify the
+  // 8 bytes written.
+  size_t read;
+  EXPECT_EQ(SR_SUCCESS, buf.ReadOffset(out, 8, 8, &read));
+  EXPECT_EQ(8u, read);
+  EXPECT_EQ(0, memcmp(out, in, 8));
+
+  // There should still be 16 bytes available for reading.
+  EXPECT_TRUE(buf.GetBuffered(&buffered));
+  EXPECT_EQ(16u, buffered);
+
+  // Read at offset 16, this should fail since we don't have that much data.
+  EXPECT_EQ(SR_BLOCK, buf.ReadOffset(out, 10, 16, nullptr));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/string_to_number.cc b/rtc_base/string_to_number.cc
new file mode 100644
index 0000000..ad49d64
--- /dev/null
+++ b/rtc_base/string_to_number.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cerrno>
+#include <cstdlib>
+
+#include "rtc_base/string_to_number.h"
+
+namespace rtc {
+namespace string_to_number_internal {
+
+rtc::Optional<signed_type> ParseSigned(const char* str, int base) {
+  RTC_DCHECK(str);
+  if (isdigit(str[0]) || str[0] == '-') {
+    char* end = nullptr;
+    errno = 0;
+    const signed_type value = std::strtoll(str, &end, base);
+    if (end && *end == '\0' && errno == 0) {
+      return value;
+    }
+  }
+  return rtc::nullopt;
+}
+
+rtc::Optional<unsigned_type> ParseUnsigned(const char* str, int base) {
+  RTC_DCHECK(str);
+  if (isdigit(str[0]) || str[0] == '-') {
+    // Explicitly discard negative values. std::strtoull parsing causes unsigned
+    // wraparound. We cannot just reject values that start with -, though, since
+    // -0 is perfectly fine, as is -0000000000000000000000000000000.
+    const bool is_negative = str[0] == '-';
+    char* end = nullptr;
+    errno = 0;
+    const unsigned_type value = std::strtoull(str, &end, base);
+    if (end && *end == '\0' && errno == 0 && (value == 0 || !is_negative)) {
+      return value;
+    }
+  }
+  return rtc::nullopt;
+}
+
+}  // namespace string_to_number_internal
+}  // namespace rtc
diff --git a/rtc_base/string_to_number.h b/rtc_base/string_to_number.h
new file mode 100644
index 0000000..4a8fb8f
--- /dev/null
+++ b/rtc_base/string_to_number.h
@@ -0,0 +1,101 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_STRING_TO_NUMBER_H_
+#define RTC_BASE_STRING_TO_NUMBER_H_
+
+#include <string>
+#include <limits>
+
+#include "api/optional.h"
+
+namespace rtc {
+
+// This file declares a family of functions to parse integers from strings.
+// The standard C library functions either fail to indicate errors (atoi, etc.)
+// or are a hassle to work with (strtol, sscanf, etc.). The standard C++ library
+// functions (std::stoi, etc.) indicate errors by throwing exceptions, which
+// are disabled in WebRTC.
+//
+// Integers are parsed using one of the following functions:
+//   rtc::Optional<int-type> StringToNumber(const char* str, int base = 10);
+//   rtc::Optional<int-type> StringToNumber(const std::string& str,
+//                                          int base = 10);
+//
+// These functions parse a value from the beginning of a string into one of the
+// fundamental integer types, or returns an empty Optional if parsing
+// failed. Values outside of the range supported by the type will be
+// rejected. The strings must begin with a digit or a minus sign. No leading
+// space nor trailing contents are allowed.
+// By setting base to 0, one of octal, decimal or hexadecimal will be
+// detected from the string's prefix (0, nothing or 0x, respectively).
+// If non-zero, base can be set to a value between 2 and 36 inclusively.
+//
+// If desired, this interface could be extended with support for floating-point
+// types.
+
+namespace string_to_number_internal {
+// These must be (unsigned) long long, to match the signature of strto(u)ll.
+using unsigned_type = unsigned long long;  // NOLINT(runtime/int)
+using signed_type = long long;  // NOLINT(runtime/int)
+
+rtc::Optional<signed_type> ParseSigned(const char* str, int base);
+rtc::Optional<unsigned_type> ParseUnsigned(const char* str, int base);
+}  // namespace string_to_number_internal
+
+template <typename T>
+typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value,
+                        rtc::Optional<T>>::type
+StringToNumber(const char* str, int base = 10) {
+  using string_to_number_internal::signed_type;
+  static_assert(
+      std::numeric_limits<T>::max() <=
+              std::numeric_limits<signed_type>::max() &&
+          std::numeric_limits<T>::lowest() >=
+              std::numeric_limits<signed_type>::lowest(),
+      "StringToNumber only supports signed integers as large as long long int");
+  rtc::Optional<signed_type> value =
+      string_to_number_internal::ParseSigned(str, base);
+  if (value && *value >= std::numeric_limits<T>::lowest() &&
+      *value <= std::numeric_limits<T>::max()) {
+    return static_cast<T>(*value);
+  }
+  return rtc::nullopt;
+}
+
+template <typename T>
+typename std::enable_if<std::is_integral<T>::value &&
+                            std::is_unsigned<T>::value,
+                        rtc::Optional<T>>::type
+StringToNumber(const char* str, int base = 10) {
+  using string_to_number_internal::unsigned_type;
+  static_assert(std::numeric_limits<T>::max() <=
+                    std::numeric_limits<unsigned_type>::max(),
+                "StringToNumber only supports unsigned integers as large as "
+                "unsigned long long int");
+  rtc::Optional<unsigned_type> value =
+      string_to_number_internal::ParseUnsigned(str, base);
+  if (value && *value <= std::numeric_limits<T>::max()) {
+    return static_cast<T>(*value);
+  }
+  return rtc::nullopt;
+}
+
+// The std::string overloads only exists if there is a matching const char*
+// version.
+template <typename T>
+auto StringToNumber(const std::string& str, int base = 10)
+    -> decltype(StringToNumber<T>(str.c_str(), base)) {
+  return StringToNumber<T>(str.c_str(), base);
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_STRING_TO_NUMBER_H_
diff --git a/rtc_base/string_to_number_unittest.cc b/rtc_base/string_to_number_unittest.cc
new file mode 100644
index 0000000..f5e5b57
--- /dev/null
+++ b/rtc_base/string_to_number_unittest.cc
@@ -0,0 +1,113 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/string_to_number.h"
+
+#include <string>
+#include <type_traits>
+#include <limits>
+
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+namespace {
+// clang-format off
+using IntegerTypes =
+    ::testing::Types<char,
+                     signed char, unsigned char,       // NOLINT(runtime/int)
+                     short,       unsigned short,      // NOLINT(runtime/int)
+                     int,         unsigned int,        // NOLINT(runtime/int)
+                     long,        unsigned long,       // NOLINT(runtime/int)
+                     long long,   unsigned long long,  // NOLINT(runtime/int)
+                     int8_t,      uint8_t,
+                     int16_t,     uint16_t,
+                     int32_t,     uint32_t,
+                     int64_t,     uint64_t>;
+// clang-format on
+
+template <typename T>
+class BasicNumberTest : public ::testing::Test {};
+
+TYPED_TEST_CASE_P(BasicNumberTest);
+
+TYPED_TEST_P(BasicNumberTest, TestValidNumbers) {
+  using T = TypeParam;
+  constexpr T min_value = std::numeric_limits<T>::lowest();
+  constexpr T max_value = std::numeric_limits<T>::max();
+  const std::string min_string = std::to_string(min_value);
+  const std::string max_string = std::to_string(max_value);
+  EXPECT_EQ(min_value, StringToNumber<T>(min_string));
+  EXPECT_EQ(min_value, StringToNumber<T>(min_string.c_str()));
+  EXPECT_EQ(max_value, StringToNumber<T>(max_string));
+  EXPECT_EQ(max_value, StringToNumber<T>(max_string.c_str()));
+  EXPECT_EQ(0, StringToNumber<T>("0"));
+  EXPECT_EQ(0, StringToNumber<T>("-0"));
+  EXPECT_EQ(0, StringToNumber<T>(std::string("-0000000000000")));
+}
+
+TYPED_TEST_P(BasicNumberTest, TestInvalidNumbers) {
+  using T = TypeParam;
+  // Value ranges aren't strictly enforced in this test, since that would either
+  // require doctoring specific strings for each data type, which is a hassle
+  // across platforms, or to be able to do addition of values larger than the
+  // largest type, which is another hassle.
+  constexpr T min_value = std::numeric_limits<T>::lowest();
+  constexpr T max_value = std::numeric_limits<T>::max();
+  // If the type supports negative values, make the large negative value
+  // approximately ten times larger. If the type is unsigned, just use -2.
+  const std::string too_low_string =
+      (min_value == 0) ? "-2" : (std::to_string(min_value) + "1");
+  // Make the large value approximately ten times larger than the maximum.
+  const std::string too_large_string = std::to_string(max_value) + "1";
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(too_low_string));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(too_low_string.c_str()));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(too_large_string));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(too_large_string.c_str()));
+}
+
+TYPED_TEST_P(BasicNumberTest, TestInvalidInputs) {
+  using T = TypeParam;
+  const char kInvalidCharArray[] = "Invalid string containing 47";
+  const char kPlusMinusCharArray[] = "+-100";
+  const char kNumberFollowedByCruft[] = "640x480";
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(kInvalidCharArray));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(std::string(kInvalidCharArray)));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(kPlusMinusCharArray));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(std::string(kPlusMinusCharArray)));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(kNumberFollowedByCruft));
+  EXPECT_EQ(rtc::nullopt,
+            StringToNumber<T>(std::string(kNumberFollowedByCruft)));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(" 5"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(" - 5"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>("- 5"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>(" -5"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<T>("5 "));
+}
+
+REGISTER_TYPED_TEST_CASE_P(BasicNumberTest,
+                           TestValidNumbers,
+                           TestInvalidNumbers,
+                           TestInvalidInputs);
+
+}  // namespace
+
+INSTANTIATE_TYPED_TEST_CASE_P(StringToNumberTest_Integers,
+                              BasicNumberTest,
+                              IntegerTypes);
+
+TEST(StringToNumberTest, TestSpecificValues) {
+  EXPECT_EQ(rtc::nullopt, StringToNumber<uint8_t>("256"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<uint8_t>("-256"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<int8_t>("256"));
+  EXPECT_EQ(rtc::nullopt, StringToNumber<int8_t>("-256"));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/stringencode.cc b/rtc_base/stringencode.cc
new file mode 100644
index 0000000..755cb2c
--- /dev/null
+++ b/rtc_base/stringencode.cc
@@ -0,0 +1,389 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/stringencode.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+/////////////////////////////////////////////////////////////////////////////
+// String Encoding Utilities
+/////////////////////////////////////////////////////////////////////////////
+
+size_t url_decode(char * buffer, size_t buflen,
+                  const char * source, size_t srclen) {
+  if (nullptr == buffer)
+    return srclen + 1;
+  if (buflen <= 0)
+    return 0;
+
+  unsigned char h1, h2;
+  size_t srcpos = 0, bufpos = 0;
+  while ((srcpos < srclen) && (bufpos + 1 < buflen)) {
+    unsigned char ch = source[srcpos++];
+    if (ch == '+') {
+      buffer[bufpos++] = ' ';
+    } else if ((ch == '%')
+               && (srcpos + 1 < srclen)
+               && hex_decode(source[srcpos], &h1)
+               && hex_decode(source[srcpos+1], &h2))
+    {
+      buffer[bufpos++] = (h1 << 4) | h2;
+      srcpos += 2;
+    } else {
+      buffer[bufpos++] = ch;
+    }
+  }
+  buffer[bufpos] = '\0';
+  return bufpos;
+}
+
+size_t utf8_decode(const char* source, size_t srclen, unsigned long* value) {
+  const unsigned char* s = reinterpret_cast<const unsigned char*>(source);
+  if ((s[0] & 0x80) == 0x00) {                    // Check s[0] == 0xxxxxxx
+    *value = s[0];
+    return 1;
+  }
+  if ((srclen < 2) || ((s[1] & 0xC0) != 0x80)) {  // Check s[1] != 10xxxxxx
+    return 0;
+  }
+  // Accumulate the trailer byte values in value16, and combine it with the
+  // relevant bits from s[0], once we've determined the sequence length.
+  unsigned long value16 = (s[1] & 0x3F);
+  if ((s[0] & 0xE0) == 0xC0) {                    // Check s[0] == 110xxxxx
+    *value = ((s[0] & 0x1F) << 6) | value16;
+    return 2;
+  }
+  if ((srclen < 3) || ((s[2] & 0xC0) != 0x80)) {  // Check s[2] != 10xxxxxx
+    return 0;
+  }
+  value16 = (value16 << 6) | (s[2] & 0x3F);
+  if ((s[0] & 0xF0) == 0xE0) {                    // Check s[0] == 1110xxxx
+    *value = ((s[0] & 0x0F) << 12) | value16;
+    return 3;
+  }
+  if ((srclen < 4) || ((s[3] & 0xC0) != 0x80)) {  // Check s[3] != 10xxxxxx
+    return 0;
+  }
+  value16 = (value16 << 6) | (s[3] & 0x3F);
+  if ((s[0] & 0xF8) == 0xF0) {                    // Check s[0] == 11110xxx
+    *value = ((s[0] & 0x07) << 18) | value16;
+    return 4;
+  }
+  return 0;
+}
+
+size_t utf8_encode(char* buffer, size_t buflen, unsigned long value) {
+  if ((value <= 0x7F) && (buflen >= 1)) {
+    buffer[0] = static_cast<unsigned char>(value);
+    return 1;
+  }
+  if ((value <= 0x7FF) && (buflen >= 2)) {
+    buffer[0] = 0xC0 | static_cast<unsigned char>(value >> 6);
+    buffer[1] = 0x80 | static_cast<unsigned char>(value & 0x3F);
+    return 2;
+  }
+  if ((value <= 0xFFFF) && (buflen >= 3)) {
+    buffer[0] = 0xE0 | static_cast<unsigned char>(value >> 12);
+    buffer[1] = 0x80 | static_cast<unsigned char>((value >> 6) & 0x3F);
+    buffer[2] = 0x80 | static_cast<unsigned char>(value & 0x3F);
+    return 3;
+  }
+  if ((value <= 0x1FFFFF) && (buflen >= 4)) {
+    buffer[0] = 0xF0 | static_cast<unsigned char>(value >> 18);
+    buffer[1] = 0x80 | static_cast<unsigned char>((value >> 12) & 0x3F);
+    buffer[2] = 0x80 | static_cast<unsigned char>((value >> 6) & 0x3F);
+    buffer[3] = 0x80 | static_cast<unsigned char>(value & 0x3F);
+    return 4;
+  }
+  return 0;
+}
+
+static const char HEX[] = "0123456789abcdef";
+
+char hex_encode(unsigned char val) {
+  RTC_DCHECK_LT(val, 16);
+  return (val < 16) ? HEX[val] : '!';
+}
+
+bool hex_decode(char ch, unsigned char* val) {
+  if ((ch >= '0') && (ch <= '9')) {
+    *val = ch - '0';
+  } else if ((ch >= 'A') && (ch <= 'Z')) {
+    *val = (ch - 'A') + 10;
+  } else if ((ch >= 'a') && (ch <= 'z')) {
+    *val = (ch - 'a') + 10;
+  } else {
+    return false;
+  }
+  return true;
+}
+
+size_t hex_encode(char* buffer, size_t buflen,
+                  const char* csource, size_t srclen) {
+  return hex_encode_with_delimiter(buffer, buflen, csource, srclen, 0);
+}
+
+size_t hex_encode_with_delimiter(char* buffer, size_t buflen,
+                                 const char* csource, size_t srclen,
+                                 char delimiter) {
+  RTC_DCHECK(buffer);  // TODO(grunell): estimate output size
+  if (buflen == 0)
+    return 0;
+
+  // Init and check bounds.
+  const unsigned char* bsource =
+      reinterpret_cast<const unsigned char*>(csource);
+  size_t srcpos = 0, bufpos = 0;
+  size_t needed = delimiter ? (srclen * 3) : (srclen * 2 + 1);
+  if (buflen < needed)
+    return 0;
+
+  while (srcpos < srclen) {
+    unsigned char ch = bsource[srcpos++];
+    buffer[bufpos  ] = hex_encode((ch >> 4) & 0xF);
+    buffer[bufpos+1] = hex_encode((ch     ) & 0xF);
+    bufpos += 2;
+
+    // Don't write a delimiter after the last byte.
+    if (delimiter && (srcpos < srclen)) {
+      buffer[bufpos] = delimiter;
+      ++bufpos;
+    }
+  }
+
+  // Null terminate.
+  buffer[bufpos] = '\0';
+  return bufpos;
+}
+
+std::string hex_encode(const std::string& str) {
+  return hex_encode(str.c_str(), str.size());
+}
+
+std::string hex_encode(const char* source, size_t srclen) {
+  return hex_encode_with_delimiter(source, srclen, 0);
+}
+
+std::string hex_encode_with_delimiter(const char* source, size_t srclen,
+                                      char delimiter) {
+  const size_t kBufferSize = srclen * 3;
+  char* buffer = STACK_ARRAY(char, kBufferSize);
+  size_t length = hex_encode_with_delimiter(buffer, kBufferSize,
+                                            source, srclen, delimiter);
+  RTC_DCHECK(srclen == 0 || length > 0);
+  return std::string(buffer, length);
+}
+
+size_t hex_decode(char * cbuffer, size_t buflen,
+                  const char * source, size_t srclen) {
+  return hex_decode_with_delimiter(cbuffer, buflen, source, srclen, 0);
+}
+
+size_t hex_decode_with_delimiter(char* cbuffer, size_t buflen,
+                                 const char* source, size_t srclen,
+                                 char delimiter) {
+  RTC_DCHECK(cbuffer);  // TODO(grunell): estimate output size
+  if (buflen == 0)
+    return 0;
+
+  // Init and bounds check.
+  unsigned char* bbuffer = reinterpret_cast<unsigned char*>(cbuffer);
+  size_t srcpos = 0, bufpos = 0;
+  size_t needed = (delimiter) ? (srclen + 1) / 3 : srclen / 2;
+  if (buflen < needed)
+    return 0;
+
+  while (srcpos < srclen) {
+    if ((srclen - srcpos) < 2) {
+      // This means we have an odd number of bytes.
+      return 0;
+    }
+
+    unsigned char h1, h2;
+    if (!hex_decode(source[srcpos], &h1) ||
+        !hex_decode(source[srcpos + 1], &h2))
+      return 0;
+
+    bbuffer[bufpos++] = (h1 << 4) | h2;
+    srcpos += 2;
+
+    // Remove the delimiter if needed.
+    if (delimiter && (srclen - srcpos) > 1) {
+      if (source[srcpos] != delimiter)
+        return 0;
+      ++srcpos;
+    }
+  }
+
+  return bufpos;
+}
+
+size_t hex_decode(char* buffer, size_t buflen, const std::string& source) {
+  return hex_decode_with_delimiter(buffer, buflen, source, 0);
+}
+size_t hex_decode_with_delimiter(char* buffer, size_t buflen,
+                                 const std::string& source, char delimiter) {
+  return hex_decode_with_delimiter(buffer, buflen,
+                                   source.c_str(), source.length(), delimiter);
+}
+
+size_t transform(std::string& value, size_t maxlen, const std::string& source,
+                 Transform t) {
+  char* buffer = STACK_ARRAY(char, maxlen + 1);
+  size_t length = t(buffer, maxlen + 1, source.data(), source.length());
+  value.assign(buffer, length);
+  return length;
+}
+
+std::string s_transform(const std::string& source, Transform t) {
+  // Ask transformation function to approximate the destination size (returns upper bound)
+  size_t maxlen = t(nullptr, 0, source.data(), source.length());
+  char * buffer = STACK_ARRAY(char, maxlen);
+  size_t len = t(buffer, maxlen, source.data(), source.length());
+  std::string result(buffer, len);
+  return result;
+}
+
+size_t tokenize(const std::string& source, char delimiter,
+                std::vector<std::string>* fields) {
+  fields->clear();
+  size_t last = 0;
+  for (size_t i = 0; i < source.length(); ++i) {
+    if (source[i] == delimiter) {
+      if (i != last) {
+        fields->push_back(source.substr(last, i - last));
+      }
+      last = i + 1;
+    }
+  }
+  if (last != source.length()) {
+    fields->push_back(source.substr(last, source.length() - last));
+  }
+  return fields->size();
+}
+
+size_t tokenize_with_empty_tokens(const std::string& source,
+                                  char delimiter,
+                                  std::vector<std::string>* fields) {
+  fields->clear();
+  size_t last = 0;
+  for (size_t i = 0; i < source.length(); ++i) {
+    if (source[i] == delimiter) {
+      fields->push_back(source.substr(last, i - last));
+      last = i + 1;
+    }
+  }
+  fields->push_back(source.substr(last, source.length() - last));
+  return fields->size();
+}
+
+size_t tokenize_append(const std::string& source, char delimiter,
+                       std::vector<std::string>* fields) {
+  if (!fields) return 0;
+
+  std::vector<std::string> new_fields;
+  tokenize(source, delimiter, &new_fields);
+  fields->insert(fields->end(), new_fields.begin(), new_fields.end());
+  return fields->size();
+}
+
+size_t tokenize(const std::string& source, char delimiter, char start_mark,
+                char end_mark, std::vector<std::string>* fields) {
+  if (!fields) return 0;
+  fields->clear();
+
+  std::string remain_source = source;
+  while (!remain_source.empty()) {
+    size_t start_pos = remain_source.find(start_mark);
+    if (std::string::npos == start_pos) break;
+    std::string pre_mark;
+    if (start_pos > 0) {
+      pre_mark = remain_source.substr(0, start_pos - 1);
+    }
+
+    ++start_pos;
+    size_t end_pos = remain_source.find(end_mark, start_pos);
+    if (std::string::npos == end_pos) break;
+
+    // We have found the matching marks. First tokenize the pre-mask. Then add
+    // the marked part as a single field. Finally, loop back for the post-mark.
+    tokenize_append(pre_mark, delimiter, fields);
+    fields->push_back(remain_source.substr(start_pos, end_pos - start_pos));
+    remain_source = remain_source.substr(end_pos + 1);
+  }
+
+  return tokenize_append(remain_source, delimiter, fields);
+}
+
+bool tokenize_first(const std::string& source,
+                    const char delimiter,
+                    std::string* token,
+                    std::string* rest) {
+  // Find the first delimiter
+  size_t left_pos = source.find(delimiter);
+  if (left_pos == std::string::npos) {
+    return false;
+  }
+
+  // Look for additional occurrances of delimiter.
+  size_t right_pos = left_pos + 1;
+  while (source[right_pos] == delimiter) {
+    right_pos++;
+  }
+
+  *token = source.substr(0, left_pos);
+  *rest = source.substr(right_pos);
+  return true;
+}
+
+std::string join(const std::vector<std::string>& source, char delimiter) {
+  if (source.size() == 0) {
+    return std::string();
+  }
+  // Find length of the string to be returned to pre-allocate memory.
+  size_t source_string_length = 0;
+  for (size_t i = 0; i < source.size(); ++i) {
+    source_string_length += source[i].length();
+  }
+
+  // Build the joined string.
+  std::string joined_string;
+  joined_string.reserve(source_string_length + source.size() - 1);
+  for (size_t i = 0; i < source.size(); ++i) {
+    if (i != 0) {
+      joined_string += delimiter;
+    }
+    joined_string += source[i];
+  }
+  return joined_string;
+}
+
+size_t split(const std::string& source, char delimiter,
+             std::vector<std::string>* fields) {
+  RTC_DCHECK(fields);
+  fields->clear();
+  size_t last = 0;
+  for (size_t i = 0; i < source.length(); ++i) {
+    if (source[i] == delimiter) {
+      fields->push_back(source.substr(last, i - last));
+      last = i + 1;
+    }
+  }
+  fields->push_back(source.substr(last, source.length() - last));
+  return fields->size();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/stringencode.h b/rtc_base/stringencode.h
new file mode 100644
index 0000000..e5395b7
--- /dev/null
+++ b/rtc_base/stringencode.h
@@ -0,0 +1,166 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_STRINGENCODE_H_
+#define RTC_BASE_STRINGENCODE_H_
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+//////////////////////////////////////////////////////////////////////
+// String Encoding Utilities
+//////////////////////////////////////////////////////////////////////
+
+// Note: in-place decoding (buffer == source) is allowed.
+size_t url_decode(char * buffer, size_t buflen,
+                  const char * source, size_t srclen);
+
+// Convert an unsigned value from 0 to 15 to the hex character equivalent...
+char hex_encode(unsigned char val);
+// ...and vice-versa.
+bool hex_decode(char ch, unsigned char* val);
+
+// hex_encode shows the hex representation of binary data in ascii.
+size_t hex_encode(char* buffer, size_t buflen,
+                  const char* source, size_t srclen);
+
+// hex_encode, but separate each byte representation with a delimiter.
+// |delimiter| == 0 means no delimiter
+// If the buffer is too short, we return 0
+size_t hex_encode_with_delimiter(char* buffer, size_t buflen,
+                                 const char* source, size_t srclen,
+                                 char delimiter);
+
+// Helper functions for hex_encode.
+std::string hex_encode(const std::string& str);
+std::string hex_encode(const char* source, size_t srclen);
+std::string hex_encode_with_delimiter(const char* source, size_t srclen,
+                                      char delimiter);
+
+// hex_decode converts ascii hex to binary.
+size_t hex_decode(char* buffer, size_t buflen,
+                  const char* source, size_t srclen);
+
+// hex_decode, assuming that there is a delimiter between every byte
+// pair.
+// |delimiter| == 0 means no delimiter
+// If the buffer is too short or the data is invalid, we return 0.
+size_t hex_decode_with_delimiter(char* buffer, size_t buflen,
+                                 const char* source, size_t srclen,
+                                 char delimiter);
+
+// Helper functions for hex_decode.
+size_t hex_decode(char* buffer, size_t buflen, const std::string& source);
+size_t hex_decode_with_delimiter(char* buffer, size_t buflen,
+                                 const std::string& source, char delimiter);
+
+// Apply any suitable string transform (including the ones above) to an STL
+// string.  Stack-allocated temporary space is used for the transformation,
+// so value and source may refer to the same string.
+typedef size_t (*Transform)(char * buffer, size_t buflen,
+                            const char * source, size_t srclen);
+size_t transform(std::string& value, size_t maxlen, const std::string& source,
+                 Transform t);
+
+// Return the result of applying transform t to source.
+std::string s_transform(const std::string& source, Transform t);
+
+// Convenience wrappers.
+inline std::string s_url_decode(const std::string& source) {
+  return s_transform(source, url_decode);
+}
+
+// Joins the source vector of strings into a single string, with each
+// field in source being separated by delimiter. No trailing delimiter is added.
+std::string join(const std::vector<std::string>& source, char delimiter);
+
+// Splits the source string into multiple fields separated by delimiter,
+// with duplicates of delimiter creating empty fields.
+size_t split(const std::string& source, char delimiter,
+             std::vector<std::string>* fields);
+
+// Splits the source string into multiple fields separated by delimiter,
+// with duplicates of delimiter ignored.  Trailing delimiter ignored.
+size_t tokenize(const std::string& source, char delimiter,
+                std::vector<std::string>* fields);
+
+// Tokenize, including the empty tokens.
+size_t tokenize_with_empty_tokens(const std::string& source,
+                                  char delimiter,
+                                  std::vector<std::string>* fields);
+
+// Tokenize and append the tokens to fields. Return the new size of fields.
+size_t tokenize_append(const std::string& source, char delimiter,
+                       std::vector<std::string>* fields);
+
+// Splits the source string into multiple fields separated by delimiter, with
+// duplicates of delimiter ignored. Trailing delimiter ignored. A substring in
+// between the start_mark and the end_mark is treated as a single field. Return
+// the size of fields. For example, if source is "filename
+// \"/Library/Application Support/media content.txt\"", delimiter is ' ', and
+// the start_mark and end_mark are '"', this method returns two fields:
+// "filename" and "/Library/Application Support/media content.txt".
+size_t tokenize(const std::string& source, char delimiter, char start_mark,
+                char end_mark, std::vector<std::string>* fields);
+
+// Extract the first token from source as separated by delimiter, with
+// duplicates of delimiter ignored. Return false if the delimiter could not be
+// found, otherwise return true.
+bool tokenize_first(const std::string& source,
+                    const char delimiter,
+                    std::string* token,
+                    std::string* rest);
+
+// Convert arbitrary values to/from a string.
+
+template <class T>
+static bool ToString(const T &t, std::string* s) {
+  RTC_DCHECK(s);
+  std::ostringstream oss;
+  oss << std::boolalpha << t;
+  *s = oss.str();
+  return !oss.fail();
+}
+
+template <class T>
+static bool FromString(const std::string& s, T* t) {
+  RTC_DCHECK(t);
+  std::istringstream iss(s);
+  iss >> std::boolalpha >> *t;
+  return !iss.fail();
+}
+
+// Inline versions of the string conversion routines.
+
+template<typename T>
+static inline std::string ToString(const T& val) {
+  std::string str; ToString(val, &str); return str;
+}
+
+template<typename T>
+static inline T FromString(const std::string& str) {
+  T val; FromString(str, &val); return val;
+}
+
+template<typename T>
+static inline T FromString(const T& defaultValue, const std::string& str) {
+  T val(defaultValue); FromString(str, &val); return val;
+}
+
+//////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_STRINGENCODE_H__
diff --git a/rtc_base/stringencode_unittest.cc b/rtc_base/stringencode_unittest.cc
new file mode 100644
index 0000000..63d8290
--- /dev/null
+++ b/rtc_base/stringencode_unittest.cc
@@ -0,0 +1,403 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/stringencode.h"
+#include "rtc_base/arraysize.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+class HexEncodeTest : public testing::Test {
+ public:
+  HexEncodeTest() : enc_res_(0), dec_res_(0) {
+    for (size_t i = 0; i < sizeof(data_); ++i) {
+      data_[i] = (i + 128) & 0xff;
+    }
+    memset(decoded_, 0x7f, sizeof(decoded_));
+  }
+
+  char data_[10];
+  char encoded_[31];
+  char decoded_[11];
+  size_t enc_res_;
+  size_t dec_res_;
+};
+
+// Test that we can convert to/from hex with no delimiter.
+TEST_F(HexEncodeTest, TestWithNoDelimiter) {
+  enc_res_ = hex_encode(encoded_, sizeof(encoded_), data_, sizeof(data_));
+  ASSERT_EQ(sizeof(data_) * 2, enc_res_);
+  ASSERT_STREQ("80818283848586878889", encoded_);
+  dec_res_ = hex_decode(decoded_, sizeof(decoded_), encoded_, enc_res_);
+  ASSERT_EQ(sizeof(data_), dec_res_);
+  ASSERT_EQ(0, memcmp(data_, decoded_, dec_res_));
+}
+
+// Test that we can convert to/from hex with a colon delimiter.
+TEST_F(HexEncodeTest, TestWithDelimiter) {
+  enc_res_ = hex_encode_with_delimiter(encoded_, sizeof(encoded_),
+                                       data_, sizeof(data_), ':');
+  ASSERT_EQ(sizeof(data_) * 3 - 1, enc_res_);
+  ASSERT_STREQ("80:81:82:83:84:85:86:87:88:89", encoded_);
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_),
+                                       encoded_, enc_res_, ':');
+  ASSERT_EQ(sizeof(data_), dec_res_);
+  ASSERT_EQ(0, memcmp(data_, decoded_, dec_res_));
+}
+
+// Test that encoding with one delimiter and decoding with another fails.
+TEST_F(HexEncodeTest, TestWithWrongDelimiter) {
+  enc_res_ = hex_encode_with_delimiter(encoded_, sizeof(encoded_),
+                                       data_, sizeof(data_), ':');
+  ASSERT_EQ(sizeof(data_) * 3 - 1, enc_res_);
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_),
+                                       encoded_, enc_res_, '/');
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that encoding without a delimiter and decoding with one fails.
+TEST_F(HexEncodeTest, TestExpectedDelimiter) {
+  enc_res_ = hex_encode(encoded_, sizeof(encoded_), data_, sizeof(data_));
+  ASSERT_EQ(sizeof(data_) * 2, enc_res_);
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_),
+                                       encoded_, enc_res_, ':');
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that encoding with a delimiter and decoding without one fails.
+TEST_F(HexEncodeTest, TestExpectedNoDelimiter) {
+  enc_res_ = hex_encode_with_delimiter(encoded_, sizeof(encoded_),
+                                       data_, sizeof(data_), ':');
+  ASSERT_EQ(sizeof(data_) * 3 - 1, enc_res_);
+  dec_res_ = hex_decode(decoded_, sizeof(decoded_), encoded_, enc_res_);
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that we handle a zero-length buffer with no delimiter.
+TEST_F(HexEncodeTest, TestZeroLengthNoDelimiter) {
+  enc_res_ = hex_encode(encoded_, sizeof(encoded_), "", 0);
+  ASSERT_EQ(0U, enc_res_);
+  dec_res_ = hex_decode(decoded_, sizeof(decoded_), encoded_, enc_res_);
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that we handle a zero-length buffer with a delimiter.
+TEST_F(HexEncodeTest, TestZeroLengthWithDelimiter) {
+  enc_res_ = hex_encode_with_delimiter(encoded_, sizeof(encoded_), "", 0, ':');
+  ASSERT_EQ(0U, enc_res_);
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_),
+                                       encoded_, enc_res_, ':');
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test the std::string variants that take no delimiter.
+TEST_F(HexEncodeTest, TestHelpersNoDelimiter) {
+  std::string result = hex_encode(data_, sizeof(data_));
+  ASSERT_EQ("80818283848586878889", result);
+  dec_res_ = hex_decode(decoded_, sizeof(decoded_), result);
+  ASSERT_EQ(sizeof(data_), dec_res_);
+  ASSERT_EQ(0, memcmp(data_, decoded_, dec_res_));
+}
+
+// Test the std::string variants that use a delimiter.
+TEST_F(HexEncodeTest, TestHelpersWithDelimiter) {
+  std::string result = hex_encode_with_delimiter(data_, sizeof(data_), ':');
+  ASSERT_EQ("80:81:82:83:84:85:86:87:88:89", result);
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_), result, ':');
+  ASSERT_EQ(sizeof(data_), dec_res_);
+  ASSERT_EQ(0, memcmp(data_, decoded_, dec_res_));
+}
+
+// Test that encoding into a too-small output buffer (without delimiter) fails.
+TEST_F(HexEncodeTest, TestEncodeTooShort) {
+  enc_res_ = hex_encode_with_delimiter(encoded_, sizeof(data_) * 2,
+                                       data_, sizeof(data_), 0);
+  ASSERT_EQ(0U, enc_res_);
+}
+
+// Test that encoding into a too-small output buffer (with delimiter) fails.
+TEST_F(HexEncodeTest, TestEncodeWithDelimiterTooShort) {
+  enc_res_ = hex_encode_with_delimiter(encoded_, sizeof(data_) * 3 - 1,
+                                       data_, sizeof(data_), ':');
+  ASSERT_EQ(0U, enc_res_);
+}
+
+// Test that decoding into a too-small output buffer fails.
+TEST_F(HexEncodeTest, TestDecodeTooShort) {
+  dec_res_ = hex_decode_with_delimiter(decoded_, 4, "0123456789", 10, 0);
+  ASSERT_EQ(0U, dec_res_);
+  ASSERT_EQ(0x7f, decoded_[4]);
+}
+
+// Test that decoding non-hex data fails.
+TEST_F(HexEncodeTest, TestDecodeBogusData) {
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_), "xyz", 3, 0);
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that decoding an odd number of hex characters fails.
+TEST_F(HexEncodeTest, TestDecodeOddHexDigits) {
+  dec_res_ = hex_decode_with_delimiter(decoded_, sizeof(decoded_), "012", 3, 0);
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that decoding a string with too many delimiters fails.
+TEST_F(HexEncodeTest, TestDecodeWithDelimiterTooManyDelimiters) {
+  dec_res_ = hex_decode_with_delimiter(decoded_, 4, "01::23::45::67", 14, ':');
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that decoding a string with a leading delimiter fails.
+TEST_F(HexEncodeTest, TestDecodeWithDelimiterLeadingDelimiter) {
+  dec_res_ = hex_decode_with_delimiter(decoded_, 4, ":01:23:45:67", 12, ':');
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Test that decoding a string with a trailing delimiter fails.
+TEST_F(HexEncodeTest, TestDecodeWithDelimiterTrailingDelimiter) {
+  dec_res_ = hex_decode_with_delimiter(decoded_, 4, "01:23:45:67:", 12, ':');
+  ASSERT_EQ(0U, dec_res_);
+}
+
+// Tests counting substrings.
+TEST(TokenizeTest, CountSubstrings) {
+  std::vector<std::string> fields;
+
+  EXPECT_EQ(5ul, tokenize("one two three four five", ' ', &fields));
+  fields.clear();
+  EXPECT_EQ(1ul, tokenize("one", ' ', &fields));
+
+  // Extra spaces should be ignored.
+  fields.clear();
+  EXPECT_EQ(5ul, tokenize("  one    two  three    four five  ", ' ', &fields));
+  fields.clear();
+  EXPECT_EQ(1ul, tokenize("  one  ", ' ', &fields));
+  fields.clear();
+  EXPECT_EQ(0ul, tokenize(" ", ' ', &fields));
+}
+
+// Tests comparing substrings.
+TEST(TokenizeTest, CompareSubstrings) {
+  std::vector<std::string> fields;
+
+  tokenize("find middle one", ' ', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("middle", fields.at(1).c_str());
+  fields.clear();
+
+  // Extra spaces should be ignored.
+  tokenize("  find   middle  one    ", ' ', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("middle", fields.at(1).c_str());
+  fields.clear();
+  tokenize(" ", ' ', &fields);
+  ASSERT_EQ(0ul, fields.size());
+}
+
+TEST(TokenizeTest, TokenizeAppend) {
+  ASSERT_EQ(0ul, tokenize_append("A B C", ' ', nullptr));
+
+  std::vector<std::string> fields;
+
+  tokenize_append("A B C", ' ', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("B", fields.at(1).c_str());
+
+  tokenize_append("D E", ' ', &fields);
+  ASSERT_EQ(5ul, fields.size());
+  ASSERT_STREQ("B", fields.at(1).c_str());
+  ASSERT_STREQ("E", fields.at(4).c_str());
+}
+
+TEST(TokenizeTest, TokenizeWithMarks) {
+  ASSERT_EQ(0ul, tokenize("D \"A B", ' ', '(', ')', nullptr));
+
+  std::vector<std::string> fields;
+  tokenize("A B C", ' ', '"', '"', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("C", fields.at(2).c_str());
+
+  tokenize("\"A B\" C", ' ', '"', '"', &fields);
+  ASSERT_EQ(2ul, fields.size());
+  ASSERT_STREQ("A B", fields.at(0).c_str());
+
+  tokenize("D \"A B\" C", ' ', '"', '"', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("D", fields.at(0).c_str());
+  ASSERT_STREQ("A B", fields.at(1).c_str());
+
+  tokenize("D \"A B\" C \"E F\"", ' ', '"', '"', &fields);
+  ASSERT_EQ(4ul, fields.size());
+  ASSERT_STREQ("D", fields.at(0).c_str());
+  ASSERT_STREQ("A B", fields.at(1).c_str());
+  ASSERT_STREQ("E F", fields.at(3).c_str());
+
+  // No matching marks.
+  tokenize("D \"A B", ' ', '"', '"', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("D", fields.at(0).c_str());
+  ASSERT_STREQ("\"A", fields.at(1).c_str());
+
+  tokenize("D (A B) C (E F) G", ' ', '(', ')', &fields);
+  ASSERT_EQ(5ul, fields.size());
+  ASSERT_STREQ("D", fields.at(0).c_str());
+  ASSERT_STREQ("A B", fields.at(1).c_str());
+  ASSERT_STREQ("E F", fields.at(3).c_str());
+}
+
+TEST(TokenizeTest, TokenizeWithEmptyTokens) {
+  std::vector<std::string> fields;
+  EXPECT_EQ(3ul, tokenize_with_empty_tokens("a.b.c", '.', &fields));
+  EXPECT_EQ("a", fields[0]);
+  EXPECT_EQ("b", fields[1]);
+  EXPECT_EQ("c", fields[2]);
+
+  EXPECT_EQ(3ul, tokenize_with_empty_tokens("..c", '.', &fields));
+  EXPECT_TRUE(fields[0].empty());
+  EXPECT_TRUE(fields[1].empty());
+  EXPECT_EQ("c", fields[2]);
+
+  EXPECT_EQ(1ul, tokenize_with_empty_tokens("", '.', &fields));
+  EXPECT_TRUE(fields[0].empty());
+}
+
+TEST(TokenizeFirstTest, NoLeadingSpaces) {
+  std::string token;
+  std::string rest;
+
+  ASSERT_TRUE(tokenize_first("A &*${}", ' ', &token, &rest));
+  ASSERT_STREQ("A", token.c_str());
+  ASSERT_STREQ("&*${}", rest.c_str());
+
+  ASSERT_TRUE(tokenize_first("A B& *${}", ' ', &token, &rest));
+  ASSERT_STREQ("A", token.c_str());
+  ASSERT_STREQ("B& *${}", rest.c_str());
+
+  ASSERT_TRUE(tokenize_first("A    B& *${}    ", ' ', &token, &rest));
+  ASSERT_STREQ("A", token.c_str());
+  ASSERT_STREQ("B& *${}    ", rest.c_str());
+}
+
+TEST(TokenizeFirstTest, LeadingSpaces) {
+  std::string token;
+  std::string rest;
+
+  ASSERT_TRUE(tokenize_first("     A B C", ' ', &token, &rest));
+  ASSERT_STREQ("", token.c_str());
+  ASSERT_STREQ("A B C", rest.c_str());
+
+  ASSERT_TRUE(tokenize_first("     A    B   C    ", ' ', &token, &rest));
+  ASSERT_STREQ("", token.c_str());
+  ASSERT_STREQ("A    B   C    ", rest.c_str());
+}
+
+TEST(TokenizeFirstTest, SingleToken) {
+  std::string token;
+  std::string rest;
+
+  // In the case where we cannot find delimiter the whole string is a token.
+  ASSERT_FALSE(tokenize_first("ABC", ' ', &token, &rest));
+
+  ASSERT_TRUE(tokenize_first("ABC    ", ' ', &token, &rest));
+  ASSERT_STREQ("ABC", token.c_str());
+  ASSERT_STREQ("", rest.c_str());
+
+  ASSERT_TRUE(tokenize_first("    ABC    ", ' ', &token, &rest));
+  ASSERT_STREQ("", token.c_str());
+  ASSERT_STREQ("ABC    ", rest.c_str());
+}
+
+// Tests counting substrings.
+TEST(SplitTest, CountSubstrings) {
+  std::vector<std::string> fields;
+
+  EXPECT_EQ(5ul, split("one,two,three,four,five", ',', &fields));
+  fields.clear();
+  EXPECT_EQ(1ul, split("one", ',', &fields));
+
+  // Empty fields between commas count.
+  fields.clear();
+  EXPECT_EQ(5ul, split("one,,three,four,five", ',', &fields));
+  fields.clear();
+  EXPECT_EQ(3ul, split(",three,", ',', &fields));
+  fields.clear();
+  EXPECT_EQ(1ul, split("", ',', &fields));
+}
+
+// Tests comparing substrings.
+TEST(SplitTest, CompareSubstrings) {
+  std::vector<std::string> fields;
+
+  split("find,middle,one", ',', &fields);
+  ASSERT_EQ(3ul, fields.size());
+  ASSERT_STREQ("middle", fields.at(1).c_str());
+  fields.clear();
+
+  // Empty fields between commas count.
+  split("find,,middle,one", ',', &fields);
+  ASSERT_EQ(4ul, fields.size());
+  ASSERT_STREQ("middle", fields.at(2).c_str());
+  fields.clear();
+  split("", ',', &fields);
+  ASSERT_EQ(1ul, fields.size());
+  ASSERT_STREQ("", fields.at(0).c_str());
+}
+
+TEST(BoolTest, DecodeValid) {
+  bool value;
+  EXPECT_TRUE(FromString("true", &value));
+  EXPECT_TRUE(value);
+  EXPECT_TRUE(FromString("true,", &value));
+  EXPECT_TRUE(value);
+  EXPECT_TRUE(FromString("true , true", &value));
+  EXPECT_TRUE(value);
+  EXPECT_TRUE(FromString("true ,\n false", &value));
+  EXPECT_TRUE(value);
+  EXPECT_TRUE(FromString("  true  \n", &value));
+  EXPECT_TRUE(value);
+
+  EXPECT_TRUE(FromString("false", &value));
+  EXPECT_FALSE(value);
+  EXPECT_TRUE(FromString("  false ", &value));
+  EXPECT_FALSE(value);
+  EXPECT_TRUE(FromString("  false, ", &value));
+  EXPECT_FALSE(value);
+
+  EXPECT_TRUE(FromString<bool>("true\n"));
+  EXPECT_FALSE(FromString<bool>("false\n"));
+}
+
+TEST(BoolTest, DecodeInvalid) {
+  bool value;
+  EXPECT_FALSE(FromString("True", &value));
+  EXPECT_FALSE(FromString("TRUE", &value));
+  EXPECT_FALSE(FromString("False", &value));
+  EXPECT_FALSE(FromString("FALSE", &value));
+  EXPECT_FALSE(FromString("0", &value));
+  EXPECT_FALSE(FromString("1", &value));
+  EXPECT_FALSE(FromString("0,", &value));
+  EXPECT_FALSE(FromString("1,", &value));
+  EXPECT_FALSE(FromString("1,0", &value));
+  EXPECT_FALSE(FromString("1.", &value));
+  EXPECT_FALSE(FromString("1.0", &value));
+  EXPECT_FALSE(FromString("", &value));
+  EXPECT_FALSE(FromString<bool>("false\nfalse"));
+}
+
+TEST(BoolTest, RoundTrip) {
+  bool value;
+  EXPECT_TRUE(FromString(ToString(true), &value));
+  EXPECT_TRUE(value);
+  EXPECT_TRUE(FromString(ToString(false), &value));
+  EXPECT_FALSE(value);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/stringize_macros.h b/rtc_base/stringize_macros.h
new file mode 100644
index 0000000..aee8d14
--- /dev/null
+++ b/rtc_base/stringize_macros.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Modified from the Chromium original:
+// src/base/strings/stringize_macros.h
+
+// This file defines preprocessor macros for stringizing preprocessor
+// symbols (or their output) and manipulating preprocessor symbols
+// that define strings.
+
+#ifndef RTC_BASE_STRINGIZE_MACROS_H_
+#define RTC_BASE_STRINGIZE_MACROS_H_
+
+// This is not very useful as it does not expand defined symbols if
+// called directly. Use its counterpart without the _NO_EXPANSION
+// suffix, below.
+#define STRINGIZE_NO_EXPANSION(x) #x
+
+// Use this to quote the provided parameter, first expanding it if it
+// is a preprocessor symbol.
+//
+// For example, if:
+//   #define A FOO
+//   #define B(x) myobj->FunctionCall(x)
+//
+// Then:
+//   STRINGIZE(A) produces "FOO"
+//   STRINGIZE(B(y)) produces "myobj->FunctionCall(y)"
+#define STRINGIZE(x) STRINGIZE_NO_EXPANSION(x)
+
+#endif  // RTC_BASE_STRINGIZE_MACROS_H_
diff --git a/rtc_base/stringize_macros_unittest.cc b/rtc_base/stringize_macros_unittest.cc
new file mode 100644
index 0000000..d1dea5e
--- /dev/null
+++ b/rtc_base/stringize_macros_unittest.cc
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/stringize_macros.h"
+
+#include "test/gtest.h"
+
+// Macros as per documentation in header file.
+#define PREPROCESSOR_UTIL_UNITTEST_A FOO
+#define PREPROCESSOR_UTIL_UNITTEST_B(x) myobj->FunctionCall(x)
+#define PREPROCESSOR_UTIL_UNITTEST_C "foo"
+
+TEST(StringizeTest, Ansi) {
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_A",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_B(y)",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_C",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_C));
+
+  EXPECT_STREQ("FOO", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ("myobj->FunctionCall(y)",
+               STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ("\"foo\"", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_C));
+}
diff --git a/rtc_base/strings/string_builder.cc b/rtc_base/strings/string_builder.cc
new file mode 100644
index 0000000..528f099
--- /dev/null
+++ b/rtc_base/strings/string_builder.cc
@@ -0,0 +1,112 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/strings/string_builder.h"
+
+namespace rtc {
+
+SimpleStringBuilder::SimpleStringBuilder(rtc::ArrayView<char> buffer)
+    : buffer_(buffer) {
+  buffer_[0] = '\0';
+  RTC_DCHECK(IsConsistent());
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(const char* str) {
+  return Append(str);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(char ch) {
+  return Append(&ch, 1);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(const std::string& str) {
+  return Append(str.c_str(), str.length());
+}
+
+// Numeric conversion routines.
+//
+// We use std::[v]snprintf instead of std::to_string because:
+// * std::to_string relies on the current locale for formatting purposes,
+//   and therefore concurrent calls to std::to_string from multiple threads
+//   may result in partial serialization of calls
+// * snprintf allows us to print the number directly into our buffer.
+// * avoid allocating a std::string (potential heap alloc).
+// TODO(tommi): Switch to std::to_chars in C++17.
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(int i) {
+  return AppendFormat("%d", i);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(unsigned i) {
+  return AppendFormat("%u", i);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(long i) {  // NOLINT
+  return AppendFormat("%ld", i);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(long long i) {  // NOLINT
+  return AppendFormat("%lld", i);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(
+    unsigned long i) {  // NOLINT
+  return AppendFormat("%lu", i);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(
+    unsigned long long i) {  // NOLINT
+  return AppendFormat("%llu", i);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(float f) {
+  return AppendFormat("%f", f);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(double f) {
+  return AppendFormat("%f", f);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::operator<<(long double f) {
+  return AppendFormat("%Lf", f);
+}
+
+SimpleStringBuilder& SimpleStringBuilder::AppendFormat(const char* fmt, ...) {
+  va_list args;
+  va_start(args, fmt);
+  const int len =
+      std::vsnprintf(&buffer_[size_], buffer_.size() - size_, fmt, args);
+  if (len >= 0) {
+    const size_t chars_added = rtc::SafeMin(len, buffer_.size() - 1 - size_);
+    size_ += chars_added;
+    RTC_DCHECK_EQ(len, chars_added) << "Buffer size was insufficient";
+  } else {
+    // This should never happen, but we're paranoid, so re-write the
+    // terminator in case vsnprintf() overwrote it.
+    RTC_NOTREACHED();
+    buffer_[size_] = '\0';
+  }
+  va_end(args);
+  RTC_DCHECK(IsConsistent());
+  return *this;
+}
+
+SimpleStringBuilder& SimpleStringBuilder::Append(const char* str,
+                                                 size_t length) {
+  const size_t chars_added =
+      rtc::strcpyn(&buffer_[size_], buffer_.size() - size_, str, length);
+  size_ += chars_added;
+  RTC_DCHECK_EQ(chars_added, length == SIZE_UNKNOWN ? std::strlen(str) : length)
+      << "Buffer size was insufficient";
+  RTC_DCHECK(IsConsistent());
+  return *this;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/strings/string_builder.h b/rtc_base/strings/string_builder.h
new file mode 100644
index 0000000..d095385
--- /dev/null
+++ b/rtc_base/strings/string_builder.h
@@ -0,0 +1,87 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_STRINGS_STRING_BUILDER_H_
+#define RTC_BASE_STRINGS_STRING_BUILDER_H_
+
+#include <cstdio>
+#include <cstring>
+#include <string>
+
+#include "api/array_view.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+// This is a minimalistic string builder class meant to cover the most cases of
+// when you might otherwise be tempted to use a stringstream (discouraged for
+// anything except logging). It uses a fixed-size buffer provided by the caller
+// and concatenates strings and numbers into it, allowing the results to be
+// read via |str()|.
+class SimpleStringBuilder {
+ public:
+  explicit SimpleStringBuilder(rtc::ArrayView<char> buffer);
+  SimpleStringBuilder(const SimpleStringBuilder&) = delete;
+  SimpleStringBuilder& operator=(const SimpleStringBuilder&) = delete;
+
+  SimpleStringBuilder& operator<<(const char* str);
+  SimpleStringBuilder& operator<<(char ch);
+  SimpleStringBuilder& operator<<(const std::string& str);
+  SimpleStringBuilder& operator<<(int i);
+  SimpleStringBuilder& operator<<(unsigned i);
+  SimpleStringBuilder& operator<<(long i);                // NOLINT
+  SimpleStringBuilder& operator<<(long long i);           // NOLINT
+  SimpleStringBuilder& operator<<(unsigned long i);       // NOLINT
+  SimpleStringBuilder& operator<<(unsigned long long i);  // NOLINT
+  SimpleStringBuilder& operator<<(float f);
+  SimpleStringBuilder& operator<<(double f);
+  SimpleStringBuilder& operator<<(long double f);
+
+  // Returns a pointer to the built string. The name |str()| is borrowed for
+  // compatibility reasons as we replace usage of stringstream throughout the
+  // code base.
+  const char* str() const { return buffer_.data(); }
+
+  // Returns the length of the string. The name |size()| is picked for STL
+  // compatibility reasons.
+  size_t size() const { return size_; }
+
+  // Allows appending a printf style formatted string.
+#if defined(__GNUC__)
+  __attribute__((__format__(__printf__, 2, 3)))
+#endif
+  SimpleStringBuilder&
+  AppendFormat(const char* fmt, ...);
+
+  // An alternate way from operator<<() to append a string. This variant is
+  // slightly more efficient when the length of the string to append, is known.
+  SimpleStringBuilder& Append(const char* str, size_t length = SIZE_UNKNOWN);
+
+ private:
+  bool IsConsistent() const {
+    return size_ <= buffer_.size() - 1 && buffer_[size_] == '\0';
+  }
+
+  // An always-zero-terminated fixed-size buffer that we write to. The fixed
+  // size allows the buffer to be stack allocated, which helps performance.
+  // Having a fixed size is furthermore useful to avoid unnecessary resizing
+  // while building it.
+  const rtc::ArrayView<char> buffer_;
+
+  // Represents the number of characters written to the buffer.
+  // This does not include the terminating '\0'.
+  size_t size_ = 0;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_STRINGS_STRING_BUILDER_H_
diff --git a/rtc_base/strings/string_builder_unittest.cc b/rtc_base/strings/string_builder_unittest.cc
new file mode 100644
index 0000000..8d6312f
--- /dev/null
+++ b/rtc_base/strings/string_builder_unittest.cc
@@ -0,0 +1,143 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/strings/string_builder.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/stringutils.h"
+#include "test/gmock.h"
+#include "test/gtest.h"
+
+namespace rtc {
+
+TEST(SimpleStringBuilder, Limit) {
+  char sb_buf[10];
+  SimpleStringBuilder sb(sb_buf);
+  EXPECT_EQ(0u, strlen(sb.str()));
+
+  // Test that for a SSB with a buffer size of 10, that we can write 9 chars
+  // into it.
+  sb << "012345678";  // 9 characters + '\0'.
+  EXPECT_EQ(0, strcmp(sb.str(), "012345678"));
+}
+
+TEST(SimpleStringBuilder, NumbersAndChars) {
+  char sb_buf[100];
+  SimpleStringBuilder sb(sb_buf);
+  sb << 1 << ':' << 2.1 << ":" << 2.2f << ':' << 78187493520ll << ':'
+     << 78187493520ul;
+  EXPECT_EQ(0, strcmp(sb.str(), "1:2.100000:2.200000:78187493520:78187493520"));
+}
+
+TEST(SimpleStringBuilder, Format) {
+  char sb_buf[100];
+  SimpleStringBuilder sb(sb_buf);
+  sb << "Here we go - ";
+  sb.AppendFormat("This is a hex formatted value: 0x%08llx", 3735928559ULL);
+  EXPECT_EQ(0,
+            strcmp(sb.str(),
+                   "Here we go - This is a hex formatted value: 0xdeadbeef"));
+}
+
+TEST(SimpleStringBuilder, StdString) {
+  char sb_buf[100];
+  SimpleStringBuilder sb(sb_buf);
+  std::string str = "does this work?";
+  sb << str;
+  EXPECT_EQ(str, sb.str());
+}
+
+// These tests are safe to run if we have death test support or if DCHECKs are
+// off.
+#if (GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)) || !RTC_DCHECK_IS_ON
+
+TEST(SimpleStringBuilder, BufferOverrunConstCharP) {
+  char sb_buf[4];
+  SimpleStringBuilder sb(sb_buf);
+  const char* const msg = "This is just too much";
+#if RTC_DCHECK_IS_ON
+  EXPECT_DEATH(sb << msg, "");
+#else
+  sb << msg;
+  EXPECT_THAT(sb.str(), testing::StrEq("Thi"));
+#endif
+}
+
+TEST(SimpleStringBuilder, BufferOverrunStdString) {
+  char sb_buf[4];
+  SimpleStringBuilder sb(sb_buf);
+  sb << 12;
+  const std::string msg = "Aw, come on!";
+#if RTC_DCHECK_IS_ON
+  EXPECT_DEATH(sb << msg, "");
+#else
+  sb << msg;
+  EXPECT_THAT(sb.str(), testing::StrEq("12A"));
+#endif
+}
+
+TEST(SimpleStringBuilder, BufferOverrunInt) {
+  char sb_buf[4];
+  SimpleStringBuilder sb(sb_buf);
+  constexpr int num = -12345;
+#if RTC_DCHECK_IS_ON
+  EXPECT_DEATH(sb << num, "");
+#else
+  sb << num;
+  // If we run into the end of the buffer, resonable results are either that
+  // the append has no effect or that it's truncated at the point where the
+  // buffer ends.
+  EXPECT_THAT(sb.str(),
+              testing::AnyOf(testing::StrEq(""), testing::StrEq("-12")));
+#endif
+}
+
+TEST(SimpleStringBuilder, BufferOverrunDouble) {
+  char sb_buf[5];
+  SimpleStringBuilder sb(sb_buf);
+  constexpr double num = 123.456;
+#if RTC_DCHECK_IS_ON
+  EXPECT_DEATH(sb << num, "");
+#else
+  sb << num;
+  EXPECT_THAT(sb.str(),
+              testing::AnyOf(testing::StrEq(""), testing::StrEq("123.")));
+#endif
+}
+
+TEST(SimpleStringBuilder, BufferOverrunConstCharPAlreadyFull) {
+  char sb_buf[4];
+  SimpleStringBuilder sb(sb_buf);
+  sb << 123;
+  const char* const msg = "This is just too much";
+#if RTC_DCHECK_IS_ON
+  EXPECT_DEATH(sb << msg, "");
+#else
+  sb << msg;
+  EXPECT_THAT(sb.str(), testing::StrEq("123"));
+#endif
+}
+
+TEST(SimpleStringBuilder, BufferOverrunIntAlreadyFull) {
+  char sb_buf[4];
+  SimpleStringBuilder sb(sb_buf);
+  sb << "xyz";
+  constexpr int num = -12345;
+#if RTC_DCHECK_IS_ON
+  EXPECT_DEATH(sb << num, "");
+#else
+  sb << num;
+  EXPECT_THAT(sb.str(), testing::StrEq("xyz"));
+#endif
+}
+
+#endif
+
+}  // namespace rtc
diff --git a/rtc_base/stringutils.cc b/rtc_base/stringutils.cc
new file mode 100644
index 0000000..8671b52
--- /dev/null
+++ b/rtc_base/stringutils.cc
@@ -0,0 +1,133 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/stringutils.h"
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+bool memory_check(const void* memory, int c, size_t count) {
+  const char* char_memory = static_cast<const char*>(memory);
+  char char_c = static_cast<char>(c);
+  for (size_t i = 0; i < count; ++i) {
+    if (char_memory[i] != char_c) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool string_match(const char* target, const char* pattern) {
+  while (*pattern) {
+    if (*pattern == '*') {
+      if (!*++pattern) {
+        return true;
+      }
+      while (*target) {
+        if ((toupper(*pattern) == toupper(*target))
+            && string_match(target + 1, pattern + 1)) {
+          return true;
+        }
+        ++target;
+      }
+      return false;
+    } else {
+      if (toupper(*pattern) != toupper(*target)) {
+        return false;
+      }
+      ++target;
+      ++pattern;
+    }
+  }
+  return !*target;
+}
+
+#if defined(WEBRTC_WIN)
+int ascii_string_compare(const wchar_t* s1, const char* s2, size_t n,
+                         CharacterTransformation transformation) {
+  wchar_t c1, c2;
+  while (true) {
+    if (n-- == 0) return 0;
+    c1 = transformation(*s1);
+    // Double check that characters are not UTF-8
+    RTC_DCHECK_LT(*s2, 128);
+    // Note: *s2 gets implicitly promoted to wchar_t
+    c2 = transformation(*s2);
+    if (c1 != c2) return (c1 < c2) ? -1 : 1;
+    if (!c1) return 0;
+    ++s1;
+    ++s2;
+  }
+}
+
+size_t asccpyn(wchar_t* buffer, size_t buflen,
+               const char* source, size_t srclen) {
+  if (buflen <= 0)
+    return 0;
+
+  if (srclen == SIZE_UNKNOWN) {
+    srclen = strlenn(source, buflen - 1);
+  } else if (srclen >= buflen) {
+    srclen = buflen - 1;
+  }
+#if RTC_DCHECK_IS_ON
+  // Double check that characters are not UTF-8
+  for (size_t pos = 0; pos < srclen; ++pos)
+    RTC_DCHECK_LT(source[pos], 128);
+#endif
+  std::copy(source, source + srclen, buffer);
+  buffer[srclen] = 0;
+  return srclen;
+}
+
+#endif  // WEBRTC_WIN
+
+void replace_substrs(const char *search,
+                     size_t search_len,
+                     const char *replace,
+                     size_t replace_len,
+                     std::string *s) {
+  size_t pos = 0;
+  while ((pos = s->find(search, pos, search_len)) != std::string::npos) {
+    s->replace(pos, search_len, replace, replace_len);
+    pos += replace_len;
+  }
+}
+
+bool starts_with(const char *s1, const char *s2) {
+  return strncmp(s1, s2, strlen(s2)) == 0;
+}
+
+bool ends_with(const char *s1, const char *s2) {
+  size_t s1_length = strlen(s1);
+  size_t s2_length = strlen(s2);
+
+  if (s2_length > s1_length) {
+    return false;
+  }
+
+  const char* start = s1 + (s1_length - s2_length);
+  return strncmp(start, s2, s2_length) == 0;
+}
+
+static const char kWhitespace[] = " \n\r\t";
+
+std::string string_trim(const std::string& s) {
+  std::string::size_type first = s.find_first_not_of(kWhitespace);
+  std::string::size_type last  = s.find_last_not_of(kWhitespace);
+
+  if (first == std::string::npos || last == std::string::npos) {
+    return std::string("");
+  }
+
+  return s.substr(first, last - first + 1);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/stringutils.h b/rtc_base/stringutils.h
new file mode 100644
index 0000000..686402c
--- /dev/null
+++ b/rtc_base/stringutils.h
@@ -0,0 +1,317 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_STRINGUTILS_H_
+#define RTC_BASE_STRINGUTILS_H_
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if defined(WEBRTC_WIN)
+#include <malloc.h>
+#include <wchar.h>
+#include <windows.h>
+#define alloca _alloca
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_POSIX)
+#ifdef BSD
+#include <stdlib.h>
+#else  // BSD
+#include <alloca.h>
+#endif  // !BSD
+#endif  // WEBRTC_POSIX
+
+#include <string>
+
+///////////////////////////////////////////////////////////////////////////////
+// Generic string/memory utilities
+///////////////////////////////////////////////////////////////////////////////
+
+#define STACK_ARRAY(TYPE, LEN) static_cast<TYPE*>(::alloca((LEN)*sizeof(TYPE)))
+
+namespace rtc {
+
+// Determines whether the simple wildcard pattern matches target.
+// Alpha characters in pattern match case-insensitively.
+// Asterisks in pattern match 0 or more characters.
+// Ex: string_match("www.TEST.GOOGLE.COM", "www.*.com") -> true
+bool string_match(const char* target, const char* pattern);
+
+}  // namespace rtc
+
+///////////////////////////////////////////////////////////////////////////////
+// Rename a few common string functions so they are consistent across platforms.
+// tolowercase is like tolower, but not compatible with end-of-file value
+//
+// It's not clear if we will ever use wchar_t strings on unix.  In theory,
+// all strings should be Utf8 all the time, except when interfacing with Win32
+// APIs that require Utf16.
+///////////////////////////////////////////////////////////////////////////////
+inline char tolowercase(char c) {
+  return static_cast<char>(tolower(c));
+}
+
+#if defined(WEBRTC_WIN)
+
+inline wchar_t tolowercase(wchar_t c) {
+  return static_cast<wchar_t>(towlower(c));
+}
+
+#endif  // WEBRTC_WIN
+
+#if defined(WEBRTC_POSIX)
+
+inline int _stricmp(const char* s1, const char* s2) {
+  return strcasecmp(s1, s2);
+}
+inline int _strnicmp(const char* s1, const char* s2, size_t n) {
+  return strncasecmp(s1, s2, n);
+}
+
+#endif // WEBRTC_POSIX
+
+///////////////////////////////////////////////////////////////////////////////
+// Traits simplifies porting string functions to be CTYPE-agnostic
+///////////////////////////////////////////////////////////////////////////////
+
+namespace rtc {
+
+const size_t SIZE_UNKNOWN = static_cast<size_t>(-1);
+
+template<class CTYPE>
+struct Traits {
+  // STL string type
+  //typedef XXX string;
+  // Null-terminated string
+  //inline static const CTYPE* empty_str();
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// String utilities which work with char or wchar_t
+///////////////////////////////////////////////////////////////////////////////
+
+template <class CTYPE>
+inline const CTYPE* nonnull(const CTYPE* str, const CTYPE* def_str = nullptr) {
+  return str ? str : (def_str ? def_str : Traits<CTYPE>::empty_str());
+}
+
+template<class CTYPE>
+const CTYPE* strchr(const CTYPE* str, const CTYPE* chs) {
+  for (size_t i=0; str[i]; ++i) {
+    for (size_t j=0; chs[j]; ++j) {
+      if (str[i] == chs[j]) {
+        return str + i;
+      }
+    }
+  }
+  return 0;
+}
+
+template<class CTYPE>
+const CTYPE* strchrn(const CTYPE* str, size_t slen, CTYPE ch) {
+  for (size_t i=0; i<slen && str[i]; ++i) {
+    if (str[i] == ch) {
+      return str + i;
+    }
+  }
+  return 0;
+}
+
+template<class CTYPE>
+size_t strlenn(const CTYPE* buffer, size_t buflen) {
+  size_t bufpos = 0;
+  while (buffer[bufpos] && (bufpos < buflen)) {
+    ++bufpos;
+  }
+  return bufpos;
+}
+
+// Safe versions of strncpy, strncat, snprintf and vsnprintf that always
+// null-terminate.
+
+template<class CTYPE>
+size_t strcpyn(CTYPE* buffer, size_t buflen,
+               const CTYPE* source, size_t srclen = SIZE_UNKNOWN) {
+  if (buflen <= 0)
+    return 0;
+
+  if (srclen == SIZE_UNKNOWN) {
+    srclen = strlenn(source, buflen - 1);
+  } else if (srclen >= buflen) {
+    srclen = buflen - 1;
+  }
+  memcpy(buffer, source, srclen * sizeof(CTYPE));
+  buffer[srclen] = 0;
+  return srclen;
+}
+
+template<class CTYPE>
+size_t strcatn(CTYPE* buffer, size_t buflen,
+               const CTYPE* source, size_t srclen = SIZE_UNKNOWN) {
+  if (buflen <= 0)
+    return 0;
+
+  size_t bufpos = strlenn(buffer, buflen - 1);
+  return bufpos + strcpyn(buffer + bufpos, buflen - bufpos, source, srclen);
+}
+
+// Some compilers (clang specifically) require vsprintfn be defined before
+// sprintfn.
+template<class CTYPE>
+size_t vsprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format,
+                 va_list args) {
+  int len = vsnprintf(buffer, buflen, format, args);
+  if ((len < 0) || (static_cast<size_t>(len) >= buflen)) {
+    len = static_cast<int>(buflen - 1);
+    buffer[len] = 0;
+  }
+  return len;
+}
+
+template<class CTYPE>
+size_t sprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, ...);
+template<class CTYPE>
+size_t sprintfn(CTYPE* buffer, size_t buflen, const CTYPE* format, ...) {
+  va_list args;
+  va_start(args, format);
+  size_t len = vsprintfn(buffer, buflen, format, args);
+  va_end(args);
+  return len;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Allow safe comparing and copying ascii (not UTF-8) with both wide and
+// non-wide character strings.
+///////////////////////////////////////////////////////////////////////////////
+
+inline int asccmp(const char* s1, const char* s2) {
+  return strcmp(s1, s2);
+}
+inline int ascicmp(const char* s1, const char* s2) {
+  return _stricmp(s1, s2);
+}
+inline int ascncmp(const char* s1, const char* s2, size_t n) {
+  return strncmp(s1, s2, n);
+}
+inline int ascnicmp(const char* s1, const char* s2, size_t n) {
+  return _strnicmp(s1, s2, n);
+}
+inline size_t asccpyn(char* buffer, size_t buflen,
+                      const char* source, size_t srclen = SIZE_UNKNOWN) {
+  return strcpyn(buffer, buflen, source, srclen);
+}
+
+#if defined(WEBRTC_WIN)
+
+typedef wchar_t(*CharacterTransformation)(wchar_t);
+inline wchar_t identity(wchar_t c) { return c; }
+int ascii_string_compare(const wchar_t* s1, const char* s2, size_t n,
+                         CharacterTransformation transformation);
+
+inline int asccmp(const wchar_t* s1, const char* s2) {
+  return ascii_string_compare(s1, s2, static_cast<size_t>(-1), identity);
+}
+inline int ascicmp(const wchar_t* s1, const char* s2) {
+  return ascii_string_compare(s1, s2, static_cast<size_t>(-1), tolowercase);
+}
+inline int ascncmp(const wchar_t* s1, const char* s2, size_t n) {
+  return ascii_string_compare(s1, s2, n, identity);
+}
+inline int ascnicmp(const wchar_t* s1, const char* s2, size_t n) {
+  return ascii_string_compare(s1, s2, n, tolowercase);
+}
+size_t asccpyn(wchar_t* buffer, size_t buflen,
+               const char* source, size_t srclen = SIZE_UNKNOWN);
+
+#endif  // WEBRTC_WIN
+
+///////////////////////////////////////////////////////////////////////////////
+// Traits<char> specializations
+///////////////////////////////////////////////////////////////////////////////
+
+template<>
+struct Traits<char> {
+  typedef std::string string;
+  inline static const char* empty_str() { return ""; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Traits<wchar_t> specializations (Windows only, currently)
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(WEBRTC_WIN)
+
+template<>
+struct Traits<wchar_t> {
+  typedef std::wstring string;
+  inline static const wchar_t* empty_str() { return L""; }
+};
+
+#endif  // WEBRTC_WIN
+
+///////////////////////////////////////////////////////////////////////////////
+// UTF helpers (Windows only)
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(WEBRTC_WIN)
+
+inline std::wstring ToUtf16(const char* utf8, size_t len) {
+  int len16 = ::MultiByteToWideChar(CP_UTF8, 0, utf8, static_cast<int>(len),
+                                    nullptr, 0);
+  wchar_t* ws = STACK_ARRAY(wchar_t, len16);
+  ::MultiByteToWideChar(CP_UTF8, 0, utf8, static_cast<int>(len), ws, len16);
+  return std::wstring(ws, len16);
+}
+
+inline std::wstring ToUtf16(const std::string& str) {
+  return ToUtf16(str.data(), str.length());
+}
+
+inline std::string ToUtf8(const wchar_t* wide, size_t len) {
+  int len8 = ::WideCharToMultiByte(CP_UTF8, 0, wide, static_cast<int>(len),
+                                   nullptr, 0, nullptr, nullptr);
+  char* ns = STACK_ARRAY(char, len8);
+  ::WideCharToMultiByte(CP_UTF8, 0, wide, static_cast<int>(len), ns, len8,
+                        nullptr, nullptr);
+  return std::string(ns, len8);
+}
+
+inline std::string ToUtf8(const wchar_t* wide) {
+  return ToUtf8(wide, wcslen(wide));
+}
+
+inline std::string ToUtf8(const std::wstring& wstr) {
+  return ToUtf8(wstr.data(), wstr.length());
+}
+
+#endif  // WEBRTC_WIN
+
+// Replaces all occurrences of "search" with "replace".
+void replace_substrs(const char *search,
+                     size_t search_len,
+                     const char *replace,
+                     size_t replace_len,
+                     std::string *s);
+
+// True iff s1 starts with s2.
+bool starts_with(const char *s1, const char *s2);
+
+// True iff s1 ends with s2.
+bool ends_with(const char *s1, const char *s2);
+
+// Remove leading and trailing whitespaces.
+std::string string_trim(const std::string& s);
+
+}  // namespace rtc
+
+#endif // RTC_BASE_STRINGUTILS_H_
diff --git a/rtc_base/stringutils_unittest.cc b/rtc_base/stringutils_unittest.cc
new file mode 100644
index 0000000..85d0c3c
--- /dev/null
+++ b/rtc_base/stringutils_unittest.cc
@@ -0,0 +1,108 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/stringutils.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+// Tests for string_match().
+
+TEST(string_matchTest, Matches) {
+  EXPECT_TRUE( string_match("A.B.C.D", "a.b.c.d"));
+  EXPECT_TRUE( string_match("www.TEST.GOOGLE.COM", "www.*.com"));
+  EXPECT_TRUE( string_match("127.0.0.1",  "12*.0.*1"));
+  EXPECT_TRUE( string_match("127.1.0.21", "12*.0.*1"));
+  EXPECT_FALSE(string_match("127.0.0.0",  "12*.0.*1"));
+  EXPECT_FALSE(string_match("127.0.0.0",  "12*.0.*1"));
+  EXPECT_FALSE(string_match("127.1.1.21", "12*.0.*1"));
+}
+
+// It's not clear if we will ever use wchar_t strings on unix.  In theory,
+// all strings should be Utf8 all the time, except when interfacing with Win32
+// APIs that require Utf16.
+
+#if defined(WEBRTC_WIN)
+
+// Tests for ascii_string_compare().
+
+// Tests null input.
+TEST(ascii_string_compareTest, NullInput) {
+  // The following results in an access violation in
+  // ascii_string_compare.  Is this a bug or by design?  stringutils.h
+  // should document the expected behavior in this case.
+
+  // EXPECT_EQ(0, ascii_string_compare(nullptr, nullptr, 1, identity));
+}
+
+// Tests comparing two strings of different lengths.
+TEST(ascii_string_compareTest, DifferentLengths) {
+  EXPECT_EQ(-1, ascii_string_compare(L"Test", "Test1", 5, identity));
+}
+
+// Tests the case where the buffer size is smaller than the string
+// lengths.
+TEST(ascii_string_compareTest, SmallBuffer) {
+  EXPECT_EQ(0, ascii_string_compare(L"Test", "Test1", 3, identity));
+}
+
+// Tests the case where the buffer is not full.
+TEST(ascii_string_compareTest, LargeBuffer) {
+  EXPECT_EQ(0, ascii_string_compare(L"Test", "Test", 10, identity));
+}
+
+// Tests comparing two eqaul strings.
+TEST(ascii_string_compareTest, Equal) {
+  EXPECT_EQ(0, ascii_string_compare(L"Test", "Test", 5, identity));
+  EXPECT_EQ(0, ascii_string_compare(L"TeSt", "tEsT", 5, tolowercase));
+}
+
+// Tests comparing a smller string to a larger one.
+TEST(ascii_string_compareTest, LessThan) {
+  EXPECT_EQ(-1, ascii_string_compare(L"abc", "abd", 4, identity));
+  EXPECT_EQ(-1, ascii_string_compare(L"ABC", "abD", 5, tolowercase));
+}
+
+// Tests comparing a larger string to a smaller one.
+TEST(ascii_string_compareTest, GreaterThan) {
+  EXPECT_EQ(1, ascii_string_compare(L"xyz", "xy", 5, identity));
+  EXPECT_EQ(1, ascii_string_compare(L"abc", "ABB", 5, tolowercase));
+}
+#endif  // WEBRTC_WIN 
+
+TEST(string_trim_Test, Trimming) {
+  EXPECT_EQ("temp", string_trim("\n\r\t temp \n\r\t"));
+  EXPECT_EQ("temp\n\r\t temp", string_trim(" temp\n\r\t temp "));
+  EXPECT_EQ("temp temp", string_trim("temp temp"));
+  EXPECT_EQ("", string_trim(" \r\n\t"));
+  EXPECT_EQ("", string_trim(""));
+}
+
+TEST(string_startsTest, StartsWith) {
+  EXPECT_TRUE(starts_with("foobar", "foo"));
+  EXPECT_TRUE(starts_with("foobar", "foobar"));
+  EXPECT_TRUE(starts_with("foobar", ""));
+  EXPECT_TRUE(starts_with("", ""));
+  EXPECT_FALSE(starts_with("foobar", "bar"));
+  EXPECT_FALSE(starts_with("foobar", "foobarbaz"));
+  EXPECT_FALSE(starts_with("", "f"));
+}
+
+TEST(string_endsTest, EndsWith) {
+  EXPECT_TRUE(ends_with("foobar", "bar"));
+  EXPECT_TRUE(ends_with("foobar", "foobar"));
+  EXPECT_TRUE(ends_with("foobar", ""));
+  EXPECT_TRUE(ends_with("", ""));
+  EXPECT_FALSE(ends_with("foobar", "foo"));
+  EXPECT_FALSE(ends_with("foobar", "foobarbaz"));
+  EXPECT_FALSE(ends_with("", "f"));
+}
+
+} // namespace rtc
diff --git a/rtc_base/swap_queue.h b/rtc_base/swap_queue.h
new file mode 100644
index 0000000..f794ad9
--- /dev/null
+++ b/rtc_base/swap_queue.h
@@ -0,0 +1,211 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SWAP_QUEUE_H_
+#define RTC_BASE_SWAP_QUEUE_H_
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+
+namespace webrtc {
+
+namespace internal {
+
+// (Internal; please don't use outside this file.)
+template <typename T>
+bool NoopSwapQueueItemVerifierFunction(const T&) {
+  return true;
+}
+
+}  // namespace internal
+
+// Functor to use when supplying a verifier function for the queue.
+template <typename T,
+          bool (*QueueItemVerifierFunction)(const T&) =
+              internal::NoopSwapQueueItemVerifierFunction>
+class SwapQueueItemVerifier {
+ public:
+  bool operator()(const T& t) const { return QueueItemVerifierFunction(t); }
+};
+
+// This class is a fixed-size queue. A producer calls Insert() to insert
+// an element of type T at the back of the queue, and a consumer calls
+// Remove() to remove an element from the front of the queue. It's safe
+// for the producer(s) and the consumer(s) to access the queue
+// concurrently, from different threads.
+//
+// To avoid the construction, copying, and destruction of Ts that a naive
+// queue implementation would require, for each "full" T passed from
+// producer to consumer, SwapQueue<T> passes an "empty" T in the other
+// direction (an "empty" T is one that contains nothing of value for the
+// consumer). This bidirectional movement is implemented with swap().
+//
+// // Create queue:
+// Bottle proto(568);  // Prepare an empty Bottle. Heap allocates space for
+//                     // 568 ml.
+// SwapQueue<Bottle> q(N, proto);  // Init queue with N copies of proto.
+//                                 // Each copy allocates on the heap.
+// // Producer pseudo-code:
+// Bottle b(568); // Prepare an empty Bottle. Heap allocates space for 568 ml.
+// loop {
+//   b.Fill(amount);  // Where amount <= 568 ml.
+//   q.Insert(&b);    // Swap our full Bottle for an empty one from q.
+// }
+//
+// // Consumer pseudo-code:
+// Bottle b(568);  // Prepare an empty Bottle. Heap allocates space for 568 ml.
+// loop {
+//   q.Remove(&b); // Swap our empty Bottle for the next-in-line full Bottle.
+//   Drink(&b);
+// }
+//
+// For a well-behaved Bottle class, there are no allocations in the
+// producer, since it just fills an empty Bottle that's already large
+// enough; no deallocations in the consumer, since it returns each empty
+// Bottle to the queue after having drunk it; and no copies along the
+// way, since the queue uses swap() everywhere to move full Bottles in
+// one direction and empty ones in the other.
+template <typename T, typename QueueItemVerifier = SwapQueueItemVerifier<T>>
+class SwapQueue {
+ public:
+  // Creates a queue of size size and fills it with default constructed Ts.
+  explicit SwapQueue(size_t size) : queue_(size) {
+    RTC_DCHECK(VerifyQueueSlots());
+  }
+
+  // Same as above and accepts an item verification functor.
+  SwapQueue(size_t size, const QueueItemVerifier& queue_item_verifier)
+      : queue_item_verifier_(queue_item_verifier), queue_(size) {
+    RTC_DCHECK(VerifyQueueSlots());
+  }
+
+  // Creates a queue of size size and fills it with copies of prototype.
+  SwapQueue(size_t size, const T& prototype) : queue_(size, prototype) {
+    RTC_DCHECK(VerifyQueueSlots());
+  }
+
+  // Same as above and accepts an item verification functor.
+  SwapQueue(size_t size,
+            const T& prototype,
+            const QueueItemVerifier& queue_item_verifier)
+      : queue_item_verifier_(queue_item_verifier), queue_(size, prototype) {
+    RTC_DCHECK(VerifyQueueSlots());
+  }
+
+  // Resets the queue to have zero content wile maintaining the queue size.
+  void Clear() {
+    rtc::CritScope cs(&crit_queue_);
+    next_write_index_ = 0;
+    next_read_index_ = 0;
+    num_elements_ = 0;
+  }
+
+  // Inserts a "full" T at the back of the queue by swapping *input with an
+  // "empty" T from the queue.
+  // Returns true if the item was inserted or false if not (the queue was full).
+  // When specified, the T given in *input must pass the ItemVerifier() test.
+  // The contents of *input after the call are then also guaranteed to pass the
+  // ItemVerifier() test.
+  bool Insert(T* input) RTC_WARN_UNUSED_RESULT {
+    RTC_DCHECK(input);
+
+    rtc::CritScope cs(&crit_queue_);
+
+    RTC_DCHECK(queue_item_verifier_(*input));
+
+    if (num_elements_ == queue_.size()) {
+      return false;
+    }
+
+    using std::swap;
+    swap(*input, queue_[next_write_index_]);
+
+    ++next_write_index_;
+    if (next_write_index_ == queue_.size()) {
+      next_write_index_ = 0;
+    }
+
+    ++num_elements_;
+
+    RTC_DCHECK_LT(next_write_index_, queue_.size());
+    RTC_DCHECK_LE(num_elements_, queue_.size());
+
+    return true;
+  }
+
+  // Removes the frontmost "full" T from the queue by swapping it with
+  // the "empty" T in *output.
+  // Returns true if an item could be removed or false if not (the queue was
+  // empty). When specified, The T given in *output must pass the ItemVerifier()
+  // test and the contents of *output after the call are then also guaranteed to
+  // pass the ItemVerifier() test.
+  bool Remove(T* output) RTC_WARN_UNUSED_RESULT {
+    RTC_DCHECK(output);
+
+    rtc::CritScope cs(&crit_queue_);
+
+    RTC_DCHECK(queue_item_verifier_(*output));
+
+    if (num_elements_ == 0) {
+      return false;
+    }
+
+    using std::swap;
+    swap(*output, queue_[next_read_index_]);
+
+    ++next_read_index_;
+    if (next_read_index_ == queue_.size()) {
+      next_read_index_ = 0;
+    }
+
+    --num_elements_;
+
+    RTC_DCHECK_LT(next_read_index_, queue_.size());
+    RTC_DCHECK_LE(num_elements_, queue_.size());
+
+    return true;
+  }
+
+ private:
+  // Verify that the queue slots complies with the ItemVerifier test.
+  bool VerifyQueueSlots() {
+    rtc::CritScope cs(&crit_queue_);
+    for (const auto& v : queue_) {
+      RTC_DCHECK(queue_item_verifier_(v));
+    }
+    return true;
+  }
+
+  rtc::CriticalSection crit_queue_;
+
+  // TODO(peah): Change this to use std::function() once we can use C++11 std
+  // lib.
+  QueueItemVerifier queue_item_verifier_ RTC_GUARDED_BY(crit_queue_);
+
+  // (next_read_index_ + num_elements_) % queue_.size() =
+  //  next_write_index_
+  size_t next_write_index_ RTC_GUARDED_BY(crit_queue_) = 0;
+  size_t next_read_index_ RTC_GUARDED_BY(crit_queue_) = 0;
+  size_t num_elements_ RTC_GUARDED_BY(crit_queue_) = 0;
+
+  // queue_.size() is constant.
+  std::vector<T> queue_ RTC_GUARDED_BY(crit_queue_);
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(SwapQueue);
+};
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_SWAP_QUEUE_H_
diff --git a/rtc_base/swap_queue_unittest.cc b/rtc_base/swap_queue_unittest.cc
new file mode 100644
index 0000000..28f870b
--- /dev/null
+++ b/rtc_base/swap_queue_unittest.cc
@@ -0,0 +1,225 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/swap_queue.h"
+
+#include <vector>
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+// Test parameter for the basic sample based SwapQueue Tests.
+const size_t kChunkSize = 3;
+
+// Queue item verification function for the vector test.
+bool LengthVerifierFunction(const std::vector<int>& v) {
+  return v.size() == kChunkSize;
+}
+
+// Queue item verifier for the vector test.
+class LengthVerifierFunctor {
+ public:
+  explicit LengthVerifierFunctor(size_t length) : length_(length) {}
+
+  bool operator()(const std::vector<int>& v) const {
+    return v.size() == length_;
+  }
+
+ private:
+  size_t length_;
+};
+
+}  // anonymous namespace
+
+TEST(SwapQueueTest, BasicOperation) {
+  std::vector<int> i(kChunkSize, 0);
+  SwapQueue<std::vector<int>> queue(2, i);
+
+  EXPECT_TRUE(queue.Insert(&i));
+  EXPECT_EQ(i.size(), kChunkSize);
+  EXPECT_TRUE(queue.Insert(&i));
+  EXPECT_EQ(i.size(), kChunkSize);
+  EXPECT_TRUE(queue.Remove(&i));
+  EXPECT_EQ(i.size(), kChunkSize);
+  EXPECT_TRUE(queue.Remove(&i));
+  EXPECT_EQ(i.size(), kChunkSize);
+}
+
+TEST(SwapQueueTest, FullQueue) {
+  SwapQueue<int> queue(2);
+
+  // Fill the queue.
+  int i = 0;
+  EXPECT_TRUE(queue.Insert(&i));
+  i = 1;
+  EXPECT_TRUE(queue.Insert(&i));
+
+  // Ensure that the value is not swapped when doing an Insert
+  // on a full queue.
+  i = 2;
+  EXPECT_FALSE(queue.Insert(&i));
+  EXPECT_EQ(i, 2);
+
+  // Ensure that the Insert didn't overwrite anything in the queue.
+  EXPECT_TRUE(queue.Remove(&i));
+  EXPECT_EQ(i, 0);
+  EXPECT_TRUE(queue.Remove(&i));
+  EXPECT_EQ(i, 1);
+}
+
+TEST(SwapQueueTest, EmptyQueue) {
+  SwapQueue<int> queue(2);
+  int i = 0;
+  EXPECT_FALSE(queue.Remove(&i));
+  EXPECT_TRUE(queue.Insert(&i));
+  EXPECT_TRUE(queue.Remove(&i));
+  EXPECT_FALSE(queue.Remove(&i));
+}
+
+TEST(SwapQueueTest, Clear) {
+  SwapQueue<int> queue(2);
+  int i = 0;
+
+  // Fill the queue.
+  EXPECT_TRUE(queue.Insert(&i));
+  EXPECT_TRUE(queue.Insert(&i));
+
+  // Ensure full queue.
+  EXPECT_FALSE(queue.Insert(&i));
+
+  // Empty the queue.
+  queue.Clear();
+
+  // Ensure that the queue is empty
+  EXPECT_FALSE(queue.Remove(&i));
+
+  // Ensure that the queue is no longer full.
+  EXPECT_TRUE(queue.Insert(&i));
+}
+
+TEST(SwapQueueTest, SuccessfulItemVerifyFunction) {
+  std::vector<int> template_element(kChunkSize);
+  SwapQueue<std::vector<int>,
+            SwapQueueItemVerifier<std::vector<int>, LengthVerifierFunction>>
+      queue(2, template_element);
+  std::vector<int> valid_chunk(kChunkSize, 0);
+
+  EXPECT_TRUE(queue.Insert(&valid_chunk));
+  EXPECT_EQ(valid_chunk.size(), kChunkSize);
+  EXPECT_TRUE(queue.Remove(&valid_chunk));
+  EXPECT_EQ(valid_chunk.size(), kChunkSize);
+}
+
+TEST(SwapQueueTest, SuccessfulItemVerifyFunctor) {
+  std::vector<int> template_element(kChunkSize);
+  LengthVerifierFunctor verifier(kChunkSize);
+  SwapQueue<std::vector<int>, LengthVerifierFunctor> queue(2, template_element,
+                                                           verifier);
+  std::vector<int> valid_chunk(kChunkSize, 0);
+
+  EXPECT_TRUE(queue.Insert(&valid_chunk));
+  EXPECT_EQ(valid_chunk.size(), kChunkSize);
+  EXPECT_TRUE(queue.Remove(&valid_chunk));
+  EXPECT_EQ(valid_chunk.size(), kChunkSize);
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(SwapQueueTest, UnsuccessfulItemVerifyFunctor) {
+  // Queue item verifier for the test.
+  auto minus_2_verifier = [](const int& i) { return i > -2; };
+  SwapQueue<int, decltype(minus_2_verifier)> queue(2, minus_2_verifier);
+
+  int valid_value = 1;
+  int invalid_value = -4;
+  EXPECT_TRUE(queue.Insert(&valid_value));
+  EXPECT_TRUE(queue.Remove(&valid_value));
+  bool result;
+  EXPECT_DEATH(result = queue.Insert(&invalid_value), "");
+}
+
+TEST(SwapQueueTest, UnSuccessfulItemVerifyInsert) {
+  std::vector<int> template_element(kChunkSize);
+  SwapQueue<std::vector<int>,
+            SwapQueueItemVerifier<std::vector<int>, &LengthVerifierFunction>>
+      queue(2, template_element);
+  std::vector<int> invalid_chunk(kChunkSize - 1, 0);
+  bool result;
+  EXPECT_DEATH(result = queue.Insert(&invalid_chunk), "");
+}
+
+TEST(SwapQueueTest, UnSuccessfulItemVerifyRemove) {
+  std::vector<int> template_element(kChunkSize);
+  SwapQueue<std::vector<int>,
+            SwapQueueItemVerifier<std::vector<int>, &LengthVerifierFunction>>
+      queue(2, template_element);
+  std::vector<int> invalid_chunk(kChunkSize - 1, 0);
+  std::vector<int> valid_chunk(kChunkSize, 0);
+  EXPECT_TRUE(queue.Insert(&valid_chunk));
+  EXPECT_EQ(valid_chunk.size(), kChunkSize);
+  bool result;
+  EXPECT_DEATH(result = queue.Remove(&invalid_chunk), "");
+}
+#endif
+
+TEST(SwapQueueTest, VectorContentTest) {
+  const size_t kQueueSize = 10;
+  const size_t kFrameLength = 160;
+  const size_t kDataLength = kQueueSize * kFrameLength;
+  std::vector<int16_t> buffer_reader(kFrameLength, 0);
+  std::vector<int16_t> buffer_writer(kFrameLength, 0);
+  SwapQueue<std::vector<int16_t>> queue(kQueueSize,
+                                        std::vector<int16_t>(kFrameLength));
+  std::vector<int16_t> samples(kDataLength);
+
+  for (size_t k = 0; k < kDataLength; k++) {
+    samples[k] = k % 9;
+  }
+
+  for (size_t k = 0; k < kQueueSize; k++) {
+    buffer_writer.clear();
+    buffer_writer.insert(buffer_writer.end(), &samples[0] + k * kFrameLength,
+                         &samples[0] + (k + 1) * kFrameLength);
+
+    EXPECT_TRUE(queue.Insert(&buffer_writer));
+  }
+
+  for (size_t k = 0; k < kQueueSize; k++) {
+    EXPECT_TRUE(queue.Remove(&buffer_reader));
+
+    for (size_t j = 0; j < buffer_reader.size(); j++) {
+      EXPECT_EQ(buffer_reader[j], samples[k * kFrameLength + j]);
+    }
+  }
+}
+
+TEST(SwapQueueTest, ZeroSlotQueue) {
+  SwapQueue<int> queue(0);
+  int i = 42;
+  EXPECT_FALSE(queue.Insert(&i));
+  EXPECT_FALSE(queue.Remove(&i));
+  EXPECT_EQ(i, 42);
+}
+
+TEST(SwapQueueTest, OneSlotQueue) {
+  SwapQueue<int> queue(1);
+  int i = 42;
+  EXPECT_TRUE(queue.Insert(&i));
+  i = 43;
+  EXPECT_FALSE(queue.Insert(&i));
+  EXPECT_EQ(i, 43);
+  EXPECT_TRUE(queue.Remove(&i));
+  EXPECT_EQ(i, 42);
+  EXPECT_FALSE(queue.Remove(&i));
+}
+
+}  // namespace webrtc
diff --git a/rtc_base/system/BUILD.gn b/rtc_base/system/BUILD.gn
new file mode 100644
index 0000000..23d802a
--- /dev/null
+++ b/rtc_base/system/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+if (is_android) {
+  import("//build/config/android/config.gni")
+  import("//build/config/android/rules.gni")
+}
+
+rtc_source_set("fallthrough") {
+  sources = [
+    "fallthrough.h",
+  ]
+}
diff --git a/rtc_base/system/fallthrough.h b/rtc_base/system/fallthrough.h
new file mode 100644
index 0000000..2bf0fea
--- /dev/null
+++ b/rtc_base/system/fallthrough.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_SYSTEM_FALLTHROUGH_H_
+#define RTC_BASE_SYSTEM_FALLTHROUGH_H_
+
+// Macro to be used for switch-case fallthrough (required for enabling
+// -Wimplicit-fallthrough warning on Clang).
+
+// This macro definition must not be included from public headers! Because
+// clang's diagnostic checks if there's a macro expanding to
+// [[clang::fallthrough]] defined, and if so it suggests the first macro
+// expanding to it. So if this macro is included in a public header, clang may
+// suggest it instead of the client's own macro, which can cause confusion.
+
+#ifdef __clang__
+#define RTC_FALLTHROUGH() [[clang::fallthrough]]
+#else
+#define RTC_FALLTHROUGH() \
+  do {                    \
+  } while (0)
+#endif
+
+#endif  // RTC_BASE_SYSTEM_FALLTHROUGH_H_
diff --git a/rtc_base/task_queue.h b/rtc_base/task_queue.h
new file mode 100644
index 0000000..41d6a10
--- /dev/null
+++ b/rtc_base/task_queue.h
@@ -0,0 +1,241 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TASK_QUEUE_H_
+#define RTC_BASE_TASK_QUEUE_H_
+
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/thread_annotations.h"
+
+namespace rtc {
+
+// Base interface for asynchronously executed tasks.
+// The interface basically consists of a single function, Run(), that executes
+// on the target queue.  For more details see the Run() method and TaskQueue.
+class QueuedTask {
+ public:
+  QueuedTask() {}
+  virtual ~QueuedTask() {}
+
+  // Main routine that will run when the task is executed on the desired queue.
+  // The task should return |true| to indicate that it should be deleted or
+  // |false| to indicate that the queue should consider ownership of the task
+  // having been transferred.  Returning |false| can be useful if a task has
+  // re-posted itself to a different queue or is otherwise being re-used.
+  virtual bool Run() = 0;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(QueuedTask);
+};
+
+// Simple implementation of QueuedTask for use with rtc::Bind and lambdas.
+template <class Closure>
+class ClosureTask : public QueuedTask {
+ public:
+  explicit ClosureTask(Closure&& closure)
+      : closure_(std::forward<Closure>(closure)) {}
+
+ private:
+  bool Run() override {
+    closure_();
+    return true;
+  }
+
+  typename std::remove_const<
+      typename std::remove_reference<Closure>::type>::type closure_;
+};
+
+// Extends ClosureTask to also allow specifying cleanup code.
+// This is useful when using lambdas if guaranteeing cleanup, even if a task
+// was dropped (queue is too full), is required.
+template <class Closure, class Cleanup>
+class ClosureTaskWithCleanup : public ClosureTask<Closure> {
+ public:
+  ClosureTaskWithCleanup(Closure&& closure, Cleanup&& cleanup)
+      : ClosureTask<Closure>(std::forward<Closure>(closure)),
+        cleanup_(std::forward<Cleanup>(cleanup)) {}
+  ~ClosureTaskWithCleanup() { cleanup_(); }
+
+ private:
+  typename std::remove_const<
+      typename std::remove_reference<Cleanup>::type>::type cleanup_;
+};
+
+// Convenience function to construct closures that can be passed directly
+// to methods that support std::unique_ptr<QueuedTask> but not template
+// based parameters.
+template <class Closure>
+static std::unique_ptr<QueuedTask> NewClosure(Closure&& closure) {
+  return rtc::MakeUnique<ClosureTask<Closure>>(std::forward<Closure>(closure));
+}
+
+template <class Closure, class Cleanup>
+static std::unique_ptr<QueuedTask> NewClosure(Closure&& closure,
+                                              Cleanup&& cleanup) {
+  return rtc::MakeUnique<ClosureTaskWithCleanup<Closure, Cleanup>>(
+      std::forward<Closure>(closure), std::forward<Cleanup>(cleanup));
+}
+
+// Implements a task queue that asynchronously executes tasks in a way that
+// guarantees that they're executed in FIFO order and that tasks never overlap.
+// Tasks may always execute on the same worker thread and they may not.
+// To DCHECK that tasks are executing on a known task queue, use IsCurrent().
+//
+// Here are some usage examples:
+//
+//   1) Asynchronously running a lambda:
+//
+//     class MyClass {
+//       ...
+//       TaskQueue queue_("MyQueue");
+//     };
+//
+//     void MyClass::StartWork() {
+//       queue_.PostTask([]() { Work(); });
+//     ...
+//
+//   2) Doing work asynchronously on a worker queue and providing a notification
+//      callback on the current queue, when the work has been done:
+//
+//     void MyClass::StartWorkAndLetMeKnowWhenDone(
+//         std::unique_ptr<QueuedTask> callback) {
+//       DCHECK(TaskQueue::Current()) << "Need to be running on a queue";
+//       queue_.PostTaskAndReply([]() { Work(); }, std::move(callback));
+//     }
+//     ...
+//     my_class->StartWorkAndLetMeKnowWhenDone(
+//         NewClosure([]() { RTC_LOG(INFO) << "The work is done!";}));
+//
+//   3) Posting a custom task on a timer.  The task posts itself again after
+//      every running:
+//
+//     class TimerTask : public QueuedTask {
+//      public:
+//       TimerTask() {}
+//      private:
+//       bool Run() override {
+//         ++count_;
+//         TaskQueue::Current()->PostDelayedTask(
+//             std::unique_ptr<QueuedTask>(this), 1000);
+//         // Ownership has been transferred to the next occurance,
+//         // so return false to prevent from being deleted now.
+//         return false;
+//       }
+//       int count_ = 0;
+//     };
+//     ...
+//     queue_.PostDelayedTask(
+//         std::unique_ptr<QueuedTask>(new TimerTask()), 1000);
+//
+// For more examples, see task_queue_unittests.cc.
+//
+// A note on destruction:
+//
+// When a TaskQueue is deleted, pending tasks will not be executed but they will
+// be deleted.  The deletion of tasks may happen asynchronously after the
+// TaskQueue itself has been deleted or it may happen synchronously while the
+// TaskQueue instance is being deleted.  This may vary from one OS to the next
+// so assumptions about lifetimes of pending tasks should not be made.
+class RTC_LOCKABLE TaskQueue {
+ public:
+  // TaskQueue priority levels. On some platforms these will map to thread
+  // priorities, on others such as Mac and iOS, GCD queue priorities.
+  enum class Priority {
+    NORMAL = 0,
+    HIGH,
+    LOW,
+  };
+
+  explicit TaskQueue(const char* queue_name,
+                     Priority priority = Priority::NORMAL);
+  ~TaskQueue();
+
+  static TaskQueue* Current();
+
+  // Used for DCHECKing the current queue.
+  bool IsCurrent() const;
+
+  // TODO(tommi): For better debuggability, implement RTC_FROM_HERE.
+
+  // Ownership of the task is passed to PostTask.
+  void PostTask(std::unique_ptr<QueuedTask> task);
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                        std::unique_ptr<QueuedTask> reply,
+                        TaskQueue* reply_queue);
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                        std::unique_ptr<QueuedTask> reply);
+
+  // Schedules a task to execute a specified number of milliseconds from when
+  // the call is made. The precision should be considered as "best effort"
+  // and in some cases, such as on Windows when all high precision timers have
+  // been used up, can be off by as much as 15 millseconds (although 8 would be
+  // more likely). This can be mitigated by limiting the use of delayed tasks.
+  void PostDelayedTask(std::unique_ptr<QueuedTask> task, uint32_t milliseconds);
+
+  // std::enable_if is used here to make sure that calls to PostTask() with
+  // std::unique_ptr<SomeClassDerivedFromQueuedTask> would not end up being
+  // caught by this template.
+  template <class Closure,
+            typename std::enable_if<!std::is_convertible<
+                Closure,
+                std::unique_ptr<QueuedTask>>::value>::type* = nullptr>
+  void PostTask(Closure&& closure) {
+    PostTask(NewClosure(std::forward<Closure>(closure)));
+  }
+
+  // See documentation above for performance expectations.
+  template <class Closure,
+            typename std::enable_if<!std::is_convertible<
+                Closure,
+                std::unique_ptr<QueuedTask>>::value>::type* = nullptr>
+  void PostDelayedTask(Closure&& closure, uint32_t milliseconds) {
+    PostDelayedTask(NewClosure(std::forward<Closure>(closure)), milliseconds);
+  }
+
+  template <class Closure1, class Closure2>
+  void PostTaskAndReply(Closure1&& task,
+                        Closure2&& reply,
+                        TaskQueue* reply_queue) {
+    PostTaskAndReply(NewClosure(std::forward<Closure1>(task)),
+                     NewClosure(std::forward<Closure2>(reply)), reply_queue);
+  }
+
+  template <class Closure>
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task, Closure&& reply) {
+    PostTaskAndReply(std::move(task), NewClosure(std::forward<Closure>(reply)));
+  }
+
+  template <class Closure>
+  void PostTaskAndReply(Closure&& task, std::unique_ptr<QueuedTask> reply) {
+    PostTaskAndReply(NewClosure(std::forward<Closure>(task)), std::move(reply));
+  }
+
+  template <class Closure1, class Closure2>
+  void PostTaskAndReply(Closure1&& task, Closure2&& reply) {
+    PostTaskAndReply(NewClosure(std::forward(task)),
+                     NewClosure(std::forward(reply)));
+  }
+
+ private:
+  class Impl;
+  const scoped_refptr<Impl> impl_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(TaskQueue);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TASK_QUEUE_H_
diff --git a/rtc_base/task_queue_for_test.cc b/rtc_base/task_queue_for_test.cc
new file mode 100644
index 0000000..5d99df5
--- /dev/null
+++ b/rtc_base/task_queue_for_test.cc
@@ -0,0 +1,19 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/task_queue_for_test.h"
+
+namespace rtc {
+namespace test {
+TaskQueueForTest::TaskQueueForTest(const char* queue_name, Priority priority)
+    : TaskQueue(queue_name, priority) {}
+TaskQueueForTest::~TaskQueueForTest() {}
+}  // namespace test
+}  // namespace rtc
diff --git a/rtc_base/task_queue_for_test.h b/rtc_base/task_queue_for_test.h
new file mode 100644
index 0000000..70c58fb
--- /dev/null
+++ b/rtc_base/task_queue_for_test.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TASK_QUEUE_FOR_TEST_H_
+#define RTC_BASE_TASK_QUEUE_FOR_TEST_H_
+
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/event.h"
+#include "rtc_base/task_queue.h"
+
+namespace rtc {
+namespace test {
+class RTC_LOCKABLE TaskQueueForTest : public TaskQueue {
+ public:
+  explicit TaskQueueForTest(const char* queue_name,
+                            Priority priority = Priority::NORMAL);
+  ~TaskQueueForTest();
+
+  // A convenience, test-only method that blocks the current thread while
+  // a task executes on the task queue.
+  // This variant is specifically for posting custom QueuedTask derived
+  // implementations that tests do not want to pass ownership of over to the
+  // task queue (i.e. the Run() method always returns |false|.).
+  template <class Closure>
+  void SendTask(Closure* task) {
+    RTC_DCHECK(!IsCurrent());
+    rtc::Event event(false, false);
+    PostTask(rtc::NewClosure(
+        [&task]() {
+          RTC_CHECK_EQ(false, static_cast<QueuedTask*>(task)->Run());
+        },
+        [&event]() { event.Set(); }));
+    event.Wait(rtc::Event::kForever);
+  }
+
+  // A convenience, test-only method that blocks the current thread while
+  // a task executes on the task queue.
+  template <class Closure>
+  void SendTask(Closure&& task) {
+    RTC_DCHECK(!IsCurrent());
+    rtc::Event event(false, false);
+    PostTask(rtc::NewClosure(std::move(task), [&event]() { event.Set(); }));
+    event.Wait(rtc::Event::kForever);
+  }
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(TaskQueueForTest);
+};
+}  // namespace test
+}  // namespace rtc
+
+#endif  // RTC_BASE_TASK_QUEUE_FOR_TEST_H_
diff --git a/rtc_base/task_queue_gcd.cc b/rtc_base/task_queue_gcd.cc
new file mode 100644
index 0000000..a13e088
--- /dev/null
+++ b/rtc_base/task_queue_gcd.cc
@@ -0,0 +1,242 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains the implementation of TaskQueue for Mac and iOS.
+// The implementation uses Grand Central Dispatch queues (GCD) to
+// do the actual task queuing.
+
+#include "rtc_base/task_queue.h"
+
+#include <string.h>
+
+#include <dispatch/dispatch.h>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/task_queue_posix.h"
+
+namespace rtc {
+namespace {
+
+using Priority = TaskQueue::Priority;
+
+int TaskQueuePriorityToGCD(Priority priority) {
+  switch (priority) {
+    case Priority::NORMAL:
+      return DISPATCH_QUEUE_PRIORITY_DEFAULT;
+    case Priority::HIGH:
+      return DISPATCH_QUEUE_PRIORITY_HIGH;
+    case Priority::LOW:
+      return DISPATCH_QUEUE_PRIORITY_LOW;
+  }
+}
+}
+
+using internal::GetQueuePtrTls;
+using internal::AutoSetCurrentQueuePtr;
+
+class TaskQueue::Impl : public RefCountInterface {
+ public:
+  Impl(const char* queue_name, TaskQueue* task_queue, Priority priority);
+  ~Impl() override;
+
+  static TaskQueue* Current();
+
+  // Used for DCHECKing the current queue.
+  bool IsCurrent() const;
+
+  void PostTask(std::unique_ptr<QueuedTask> task);
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                        std::unique_ptr<QueuedTask> reply,
+                        TaskQueue::Impl* reply_queue);
+
+  void PostDelayedTask(std::unique_ptr<QueuedTask> task, uint32_t milliseconds);
+
+ private:
+  struct QueueContext {
+    explicit QueueContext(TaskQueue* q) : queue(q), is_active(true) {}
+
+    static void SetNotActive(void* context) {
+      QueueContext* qc = static_cast<QueueContext*>(context);
+      qc->is_active = false;
+    }
+
+    static void DeleteContext(void* context) {
+      QueueContext* qc = static_cast<QueueContext*>(context);
+      delete qc;
+    }
+
+    TaskQueue* const queue;
+    bool is_active;
+  };
+
+  struct TaskContext {
+    TaskContext(QueueContext* queue_ctx, std::unique_ptr<QueuedTask> task)
+        : queue_ctx(queue_ctx), task(std::move(task)) {}
+    virtual ~TaskContext() {}
+
+    static void RunTask(void* context) {
+      std::unique_ptr<TaskContext> tc(static_cast<TaskContext*>(context));
+      if (tc->queue_ctx->is_active) {
+        AutoSetCurrentQueuePtr set_current(tc->queue_ctx->queue);
+        if (!tc->task->Run())
+          tc->task.release();
+      }
+    }
+
+    QueueContext* const queue_ctx;
+    std::unique_ptr<QueuedTask> task;
+  };
+
+  // Special case context for holding two tasks, a |first_task| + the task
+  // that's owned by the parent struct, TaskContext, that then becomes the
+  // second (i.e. 'reply') task.
+  struct PostTaskAndReplyContext : public TaskContext {
+    explicit PostTaskAndReplyContext(QueueContext* first_queue_ctx,
+                                     std::unique_ptr<QueuedTask> first_task,
+                                     QueueContext* second_queue_ctx,
+                                     std::unique_ptr<QueuedTask> second_task)
+        : TaskContext(second_queue_ctx, std::move(second_task)),
+          first_queue_ctx(first_queue_ctx),
+          first_task(std::move(first_task)),
+          reply_queue_(second_queue_ctx->queue->impl_->queue_) {
+      // Retain the reply queue for as long as this object lives.
+      // If we don't, we may have memory leaks and/or failures.
+      dispatch_retain(reply_queue_);
+    }
+    ~PostTaskAndReplyContext() override { dispatch_release(reply_queue_); }
+
+    static void RunTask(void* context) {
+      auto* rc = static_cast<PostTaskAndReplyContext*>(context);
+      if (rc->first_queue_ctx->is_active) {
+        AutoSetCurrentQueuePtr set_current(rc->first_queue_ctx->queue);
+        if (!rc->first_task->Run())
+          rc->first_task.release();
+      }
+      // Post the reply task.  This hands the work over to the parent struct.
+      // This task will eventually delete |this|.
+      dispatch_async_f(rc->reply_queue_, rc, &TaskContext::RunTask);
+    }
+
+    QueueContext* const first_queue_ctx;
+    std::unique_ptr<QueuedTask> first_task;
+    dispatch_queue_t reply_queue_;
+  };
+
+  dispatch_queue_t queue_;
+  QueueContext* const context_;
+};
+
+TaskQueue::Impl::Impl(const char* queue_name,
+                      TaskQueue* task_queue,
+                      Priority priority)
+    : queue_(dispatch_queue_create(queue_name, DISPATCH_QUEUE_SERIAL)),
+      context_(new QueueContext(task_queue)) {
+  RTC_DCHECK(queue_name);
+  RTC_CHECK(queue_);
+  dispatch_set_context(queue_, context_);
+  // Assign a finalizer that will delete the context when the last reference
+  // to the queue is released.  This may run after the TaskQueue object has
+  // been deleted.
+  dispatch_set_finalizer_f(queue_, &QueueContext::DeleteContext);
+
+  dispatch_set_target_queue(
+      queue_, dispatch_get_global_queue(TaskQueuePriorityToGCD(priority), 0));
+}
+
+TaskQueue::Impl::~Impl() {
+  RTC_DCHECK(!IsCurrent());
+  // Implementation/behavioral note:
+  // Dispatch queues are reference counted via calls to dispatch_retain and
+  // dispatch_release. Pending blocks submitted to a queue also hold a
+  // reference to the queue until they have finished. Once all references to a
+  // queue have been released, the queue will be deallocated by the system.
+  // This is why we check the context before running tasks.
+
+  // Use dispatch_sync to set the context to null to guarantee that there's not
+  // a race between checking the context and using it from a task.
+  dispatch_sync_f(queue_, context_, &QueueContext::SetNotActive);
+  dispatch_release(queue_);
+}
+
+// static
+TaskQueue* TaskQueue::Impl::Current() {
+  return static_cast<TaskQueue*>(pthread_getspecific(GetQueuePtrTls()));
+}
+
+bool TaskQueue::Impl::IsCurrent() const {
+  RTC_DCHECK(queue_);
+  const TaskQueue* current = Current();
+  return current && this == current->impl_.get();
+}
+
+void TaskQueue::Impl::PostTask(std::unique_ptr<QueuedTask> task) {
+  auto* context = new TaskContext(context_, std::move(task));
+  dispatch_async_f(queue_, context, &TaskContext::RunTask);
+}
+
+void TaskQueue::Impl::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                      uint32_t milliseconds) {
+  auto* context = new TaskContext(context_, std::move(task));
+  dispatch_after_f(
+      dispatch_time(DISPATCH_TIME_NOW, milliseconds * NSEC_PER_MSEC), queue_,
+      context, &TaskContext::RunTask);
+}
+
+void TaskQueue::Impl::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                       std::unique_ptr<QueuedTask> reply,
+                                       TaskQueue::Impl* reply_queue) {
+  auto* context = new PostTaskAndReplyContext(
+      context_, std::move(task), reply_queue->context_, std::move(reply));
+  dispatch_async_f(queue_, context, &PostTaskAndReplyContext::RunTask);
+}
+
+// Boilerplate for the PIMPL pattern.
+TaskQueue::TaskQueue(const char* queue_name, Priority priority)
+    : impl_(new RefCountedObject<TaskQueue::Impl>(queue_name, this, priority)) {
+}
+
+TaskQueue::~TaskQueue() {}
+
+// static
+TaskQueue* TaskQueue::Current() {
+  return TaskQueue::Impl::Current();
+}
+
+// Used for DCHECKing the current queue.
+bool TaskQueue::IsCurrent() const {
+  return impl_->IsCurrent();
+}
+
+void TaskQueue::PostTask(std::unique_ptr<QueuedTask> task) {
+  return TaskQueue::impl_->PostTask(std::move(task));
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply,
+                                 TaskQueue* reply_queue) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            reply_queue->impl_.get());
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            impl_.get());
+}
+
+void TaskQueue::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                uint32_t milliseconds) {
+  return TaskQueue::impl_->PostDelayedTask(std::move(task), milliseconds);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/task_queue_libevent.cc b/rtc_base/task_queue_libevent.cc
new file mode 100644
index 0000000..4db4827
--- /dev/null
+++ b/rtc_base/task_queue_libevent.cc
@@ -0,0 +1,527 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/task_queue.h"
+
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <list>
+
+#include <event.h>
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/task_queue_posix.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+using internal::GetQueuePtrTls;
+using internal::AutoSetCurrentQueuePtr;
+
+namespace {
+static const char kQuit = 1;
+static const char kRunTask = 2;
+static const char kRunReplyTask = 3;
+
+using Priority = TaskQueue::Priority;
+
+// This ignores the SIGPIPE signal on the calling thread.
+// This signal can be fired when trying to write() to a pipe that's being
+// closed or while closing a pipe that's being written to.
+// We can run into that situation (e.g. reply tasks that don't get a chance to
+// run because the task queue is being deleted) so we ignore this signal and
+// continue as normal.
+// As a side note for this implementation, it would be great if we could safely
+// restore the sigmask, but unfortunately the operation of restoring it, can
+// itself actually cause SIGPIPE to be signaled :-| (e.g. on MacOS)
+// The SIGPIPE signal by default causes the process to be terminated, so we
+// don't want to risk that.
+// An alternative to this approach is to ignore the signal for the whole
+// process:
+//   signal(SIGPIPE, SIG_IGN);
+void IgnoreSigPipeSignalOnCurrentThread() {
+  sigset_t sigpipe_mask;
+  sigemptyset(&sigpipe_mask);
+  sigaddset(&sigpipe_mask, SIGPIPE);
+  pthread_sigmask(SIG_BLOCK, &sigpipe_mask, nullptr);
+}
+
+struct TimerEvent {
+  explicit TimerEvent(std::unique_ptr<QueuedTask> task)
+      : task(std::move(task)) {}
+  ~TimerEvent() { event_del(&ev); }
+  event ev;
+  std::unique_ptr<QueuedTask> task;
+};
+
+bool SetNonBlocking(int fd) {
+  const int flags = fcntl(fd, F_GETFL);
+  RTC_CHECK(flags != -1);
+  return (flags & O_NONBLOCK) || fcntl(fd, F_SETFL, flags | O_NONBLOCK) != -1;
+}
+
+// TODO(tommi): This is a hack to support two versions of libevent that we're
+// compatible with.  The method we really want to call is event_assign(),
+// since event_set() has been marked as deprecated (and doesn't accept
+// passing event_base__ as a parameter).  However, the version of libevent
+// that we have in Chromium, doesn't have event_assign(), so we need to call
+// event_set() there.
+void EventAssign(struct event* ev,
+                 struct event_base* base,
+                 int fd,
+                 short events,
+                 void (*callback)(int, short, void*),
+                 void* arg) {
+#if defined(_EVENT2_EVENT_H_)
+  RTC_CHECK_EQ(0, event_assign(ev, base, fd, events, callback, arg));
+#else
+  event_set(ev, fd, events, callback, arg);
+  RTC_CHECK_EQ(0, event_base_set(base, ev));
+#endif
+}
+
+ThreadPriority TaskQueuePriorityToThreadPriority(Priority priority) {
+  switch (priority) {
+    case Priority::HIGH:
+      return kRealtimePriority;
+    case Priority::LOW:
+      return kLowPriority;
+    case Priority::NORMAL:
+      return kNormalPriority;
+    default:
+      RTC_NOTREACHED();
+      break;
+  }
+  return kNormalPriority;
+}
+}  // namespace
+
+class TaskQueue::Impl : public RefCountInterface {
+ public:
+  explicit Impl(const char* queue_name,
+                TaskQueue* queue,
+                Priority priority = Priority::NORMAL);
+  ~Impl() override;
+
+  static TaskQueue::Impl* Current();
+  static TaskQueue* CurrentQueue();
+
+  // Used for DCHECKing the current queue.
+  bool IsCurrent() const;
+
+  void PostTask(std::unique_ptr<QueuedTask> task);
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                        std::unique_ptr<QueuedTask> reply,
+                        TaskQueue::Impl* reply_queue);
+
+  void PostDelayedTask(std::unique_ptr<QueuedTask> task, uint32_t milliseconds);
+
+ private:
+  static void ThreadMain(void* context);
+  static void OnWakeup(int socket, short flags, void* context);  // NOLINT
+  static void RunTask(int fd, short flags, void* context);       // NOLINT
+  static void RunTimer(int fd, short flags, void* context);      // NOLINT
+
+  class ReplyTaskOwner;
+  class PostAndReplyTask;
+  class SetTimerTask;
+
+  typedef RefCountedObject<ReplyTaskOwner> ReplyTaskOwnerRef;
+
+  void PrepareReplyTask(scoped_refptr<ReplyTaskOwnerRef> reply_task);
+
+  struct QueueContext;
+  TaskQueue* const queue_;
+  int wakeup_pipe_in_ = -1;
+  int wakeup_pipe_out_ = -1;
+  event_base* event_base_;
+  std::unique_ptr<event> wakeup_event_;
+  PlatformThread thread_;
+  rtc::CriticalSection pending_lock_;
+  std::list<std::unique_ptr<QueuedTask>> pending_ RTC_GUARDED_BY(pending_lock_);
+  std::list<scoped_refptr<ReplyTaskOwnerRef>> pending_replies_
+      RTC_GUARDED_BY(pending_lock_);
+};
+
+struct TaskQueue::Impl::QueueContext {
+  explicit QueueContext(TaskQueue::Impl* q) : queue(q), is_active(true) {}
+  TaskQueue::Impl* queue;
+  bool is_active;
+  // Holds a list of events pending timers for cleanup when the loop exits.
+  std::list<TimerEvent*> pending_timers_;
+};
+
+// Posting a reply task is tricky business. This class owns the reply task
+// and a reference to it is held by both the reply queue and the first task.
+// Here's an outline of what happens when dealing with a reply task.
+// * The ReplyTaskOwner owns the |reply_| task.
+// * One ref owned by PostAndReplyTask
+// * One ref owned by the reply TaskQueue
+// * ReplyTaskOwner has a flag |run_task_| initially set to false.
+// * ReplyTaskOwner has a method: HasOneRef() (provided by RefCountedObject).
+// * After successfully running the original |task_|, PostAndReplyTask() calls
+//   set_should_run_task(). This sets |run_task_| to true.
+// * In PostAndReplyTask's dtor:
+//   * It releases its reference to ReplyTaskOwner (important to do this first).
+//   * Sends (write()) a kRunReplyTask message to the reply queue's pipe.
+// * PostAndReplyTask doesn't care if write() fails, but when it does:
+//   * The reply queue is gone.
+//   * ReplyTaskOwner has already been deleted and the reply task too.
+// * If write() succeeds:
+//   * ReplyQueue receives the kRunReplyTask message
+//   * Goes through all pending tasks, finding the first that HasOneRef()
+//   * Calls ReplyTaskOwner::Run()
+//     * if set_should_run_task() was called, the reply task will be run
+//   * Release the reference to ReplyTaskOwner
+//   * ReplyTaskOwner and associated |reply_| are deleted.
+class TaskQueue::Impl::ReplyTaskOwner {
+ public:
+  ReplyTaskOwner(std::unique_ptr<QueuedTask> reply)
+      : reply_(std::move(reply)) {}
+
+  void Run() {
+    RTC_DCHECK(reply_);
+    if (run_task_) {
+      if (!reply_->Run())
+        reply_.release();
+    }
+    reply_.reset();
+  }
+
+  void set_should_run_task() {
+    RTC_DCHECK(!run_task_);
+    run_task_ = true;
+  }
+
+ private:
+  std::unique_ptr<QueuedTask> reply_;
+  bool run_task_ = false;
+};
+
+class TaskQueue::Impl::PostAndReplyTask : public QueuedTask {
+ public:
+  PostAndReplyTask(std::unique_ptr<QueuedTask> task,
+                   std::unique_ptr<QueuedTask> reply,
+                   TaskQueue::Impl* reply_queue,
+                   int reply_pipe)
+      : task_(std::move(task)),
+        reply_pipe_(reply_pipe),
+        reply_task_owner_(
+            new RefCountedObject<ReplyTaskOwner>(std::move(reply))) {
+    reply_queue->PrepareReplyTask(reply_task_owner_);
+  }
+
+  ~PostAndReplyTask() override {
+    reply_task_owner_ = nullptr;
+    IgnoreSigPipeSignalOnCurrentThread();
+    // Send a signal to the reply queue that the reply task can run now.
+    // Depending on whether |set_should_run_task()| was called by the
+    // PostAndReplyTask(), the reply task may or may not actually run.
+    // In either case, it will be deleted.
+    char message = kRunReplyTask;
+    RTC_UNUSED(write(reply_pipe_, &message, sizeof(message)));
+  }
+
+ private:
+  bool Run() override {
+    if (!task_->Run())
+      task_.release();
+    reply_task_owner_->set_should_run_task();
+    return true;
+  }
+
+  std::unique_ptr<QueuedTask> task_;
+  int reply_pipe_;
+  scoped_refptr<RefCountedObject<ReplyTaskOwner>> reply_task_owner_;
+};
+
+class TaskQueue::Impl::SetTimerTask : public QueuedTask {
+ public:
+  SetTimerTask(std::unique_ptr<QueuedTask> task, uint32_t milliseconds)
+      : task_(std::move(task)),
+        milliseconds_(milliseconds),
+        posted_(Time32()) {}
+
+ private:
+  bool Run() override {
+    // Compensate for the time that has passed since construction
+    // and until we got here.
+    uint32_t post_time = Time32() - posted_;
+    TaskQueue::Impl::Current()->PostDelayedTask(
+        std::move(task_),
+        post_time > milliseconds_ ? 0 : milliseconds_ - post_time);
+    return true;
+  }
+
+  std::unique_ptr<QueuedTask> task_;
+  const uint32_t milliseconds_;
+  const uint32_t posted_;
+};
+
+TaskQueue::Impl::Impl(const char* queue_name,
+                      TaskQueue* queue,
+                      Priority priority /*= NORMAL*/)
+    : queue_(queue),
+      event_base_(event_base_new()),
+      wakeup_event_(new event()),
+      thread_(&TaskQueue::Impl::ThreadMain,
+              this,
+              queue_name,
+              TaskQueuePriorityToThreadPriority(priority)) {
+  RTC_DCHECK(queue_name);
+  int fds[2];
+  RTC_CHECK(pipe(fds) == 0);
+  SetNonBlocking(fds[0]);
+  SetNonBlocking(fds[1]);
+  wakeup_pipe_out_ = fds[0];
+  wakeup_pipe_in_ = fds[1];
+
+  EventAssign(wakeup_event_.get(), event_base_, wakeup_pipe_out_,
+              EV_READ | EV_PERSIST, OnWakeup, this);
+  event_add(wakeup_event_.get(), 0);
+  thread_.Start();
+}
+
+TaskQueue::Impl::~Impl() {
+  RTC_DCHECK(!IsCurrent());
+  struct timespec ts;
+  char message = kQuit;
+  while (write(wakeup_pipe_in_, &message, sizeof(message)) != sizeof(message)) {
+    // The queue is full, so we have no choice but to wait and retry.
+    RTC_CHECK_EQ(EAGAIN, errno);
+    ts.tv_sec = 0;
+    ts.tv_nsec = 1000000;
+    nanosleep(&ts, nullptr);
+  }
+
+  thread_.Stop();
+
+  event_del(wakeup_event_.get());
+
+  IgnoreSigPipeSignalOnCurrentThread();
+
+  close(wakeup_pipe_in_);
+  close(wakeup_pipe_out_);
+  wakeup_pipe_in_ = -1;
+  wakeup_pipe_out_ = -1;
+
+  event_base_free(event_base_);
+}
+
+// static
+TaskQueue::Impl* TaskQueue::Impl::Current() {
+  QueueContext* ctx =
+      static_cast<QueueContext*>(pthread_getspecific(GetQueuePtrTls()));
+  return ctx ? ctx->queue : nullptr;
+}
+
+// static
+TaskQueue* TaskQueue::Impl::CurrentQueue() {
+  TaskQueue::Impl* current = Current();
+  if (current) {
+    return current->queue_;
+  }
+  return nullptr;
+}
+
+bool TaskQueue::Impl::IsCurrent() const {
+  return IsThreadRefEqual(thread_.GetThreadRef(), CurrentThreadRef());
+}
+
+void TaskQueue::Impl::PostTask(std::unique_ptr<QueuedTask> task) {
+  RTC_DCHECK(task.get());
+  // libevent isn't thread safe.  This means that we can't use methods such
+  // as event_base_once to post tasks to the worker thread from a different
+  // thread.  However, we can use it when posting from the worker thread itself.
+  if (IsCurrent()) {
+    if (event_base_once(event_base_, -1, EV_TIMEOUT, &TaskQueue::Impl::RunTask,
+                        task.get(), nullptr) == 0) {
+      task.release();
+    }
+  } else {
+    QueuedTask* task_id = task.get();  // Only used for comparison.
+    {
+      CritScope lock(&pending_lock_);
+      pending_.push_back(std::move(task));
+    }
+    char message = kRunTask;
+    if (write(wakeup_pipe_in_, &message, sizeof(message)) != sizeof(message)) {
+      RTC_LOG(WARNING) << "Failed to queue task.";
+      CritScope lock(&pending_lock_);
+      pending_.remove_if([task_id](std::unique_ptr<QueuedTask>& t) {
+        return t.get() == task_id;
+      });
+    }
+  }
+}
+
+void TaskQueue::Impl::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                      uint32_t milliseconds) {
+  if (IsCurrent()) {
+    TimerEvent* timer = new TimerEvent(std::move(task));
+    EventAssign(&timer->ev, event_base_, -1, 0, &TaskQueue::Impl::RunTimer,
+                timer);
+    QueueContext* ctx =
+        static_cast<QueueContext*>(pthread_getspecific(GetQueuePtrTls()));
+    ctx->pending_timers_.push_back(timer);
+    timeval tv = {rtc::dchecked_cast<int>(milliseconds / 1000),
+                  rtc::dchecked_cast<int>(milliseconds % 1000) * 1000};
+    event_add(&timer->ev, &tv);
+  } else {
+    PostTask(std::unique_ptr<QueuedTask>(
+        new SetTimerTask(std::move(task), milliseconds)));
+  }
+}
+
+void TaskQueue::Impl::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                       std::unique_ptr<QueuedTask> reply,
+                                       TaskQueue::Impl* reply_queue) {
+  std::unique_ptr<QueuedTask> wrapper_task(
+      new PostAndReplyTask(std::move(task), std::move(reply), reply_queue,
+                           reply_queue->wakeup_pipe_in_));
+  PostTask(std::move(wrapper_task));
+}
+
+// static
+void TaskQueue::Impl::ThreadMain(void* context) {
+  TaskQueue::Impl* me = static_cast<TaskQueue::Impl*>(context);
+
+  QueueContext queue_context(me);
+  pthread_setspecific(GetQueuePtrTls(), &queue_context);
+
+  while (queue_context.is_active)
+    event_base_loop(me->event_base_, 0);
+
+  pthread_setspecific(GetQueuePtrTls(), nullptr);
+
+  for (TimerEvent* timer : queue_context.pending_timers_)
+    delete timer;
+}
+
+// static
+void TaskQueue::Impl::OnWakeup(int socket,
+                               short flags,
+                               void* context) {  // NOLINT
+  QueueContext* ctx =
+      static_cast<QueueContext*>(pthread_getspecific(GetQueuePtrTls()));
+  RTC_DCHECK(ctx->queue->wakeup_pipe_out_ == socket);
+  char buf;
+  RTC_CHECK(sizeof(buf) == read(socket, &buf, sizeof(buf)));
+  switch (buf) {
+    case kQuit:
+      ctx->is_active = false;
+      event_base_loopbreak(ctx->queue->event_base_);
+      break;
+    case kRunTask: {
+      std::unique_ptr<QueuedTask> task;
+      {
+        CritScope lock(&ctx->queue->pending_lock_);
+        RTC_DCHECK(!ctx->queue->pending_.empty());
+        task = std::move(ctx->queue->pending_.front());
+        ctx->queue->pending_.pop_front();
+        RTC_DCHECK(task.get());
+      }
+      if (!task->Run())
+        task.release();
+      break;
+    }
+    case kRunReplyTask: {
+      scoped_refptr<ReplyTaskOwnerRef> reply_task;
+      {
+        CritScope lock(&ctx->queue->pending_lock_);
+        for (auto it = ctx->queue->pending_replies_.begin();
+             it != ctx->queue->pending_replies_.end(); ++it) {
+          if ((*it)->HasOneRef()) {
+            reply_task = std::move(*it);
+            ctx->queue->pending_replies_.erase(it);
+            break;
+          }
+        }
+      }
+      reply_task->Run();
+      break;
+    }
+    default:
+      RTC_NOTREACHED();
+      break;
+  }
+}
+
+// static
+void TaskQueue::Impl::RunTask(int fd, short flags, void* context) {  // NOLINT
+  auto* task = static_cast<QueuedTask*>(context);
+  if (task->Run())
+    delete task;
+}
+
+// static
+void TaskQueue::Impl::RunTimer(int fd, short flags, void* context) {  // NOLINT
+  TimerEvent* timer = static_cast<TimerEvent*>(context);
+  if (!timer->task->Run())
+    timer->task.release();
+  QueueContext* ctx =
+      static_cast<QueueContext*>(pthread_getspecific(GetQueuePtrTls()));
+  ctx->pending_timers_.remove(timer);
+  delete timer;
+}
+
+void TaskQueue::Impl::PrepareReplyTask(
+    scoped_refptr<ReplyTaskOwnerRef> reply_task) {
+  RTC_DCHECK(reply_task);
+  CritScope lock(&pending_lock_);
+  pending_replies_.push_back(std::move(reply_task));
+}
+
+TaskQueue::TaskQueue(const char* queue_name, Priority priority)
+    : impl_(new RefCountedObject<TaskQueue::Impl>(queue_name, this, priority)) {
+}
+
+TaskQueue::~TaskQueue() {}
+
+// static
+TaskQueue* TaskQueue::Current() {
+  return TaskQueue::Impl::CurrentQueue();
+}
+
+// Used for DCHECKing the current queue.
+bool TaskQueue::IsCurrent() const {
+  return impl_->IsCurrent();
+}
+
+void TaskQueue::PostTask(std::unique_ptr<QueuedTask> task) {
+  return TaskQueue::impl_->PostTask(std::move(task));
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply,
+                                 TaskQueue* reply_queue) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            reply_queue->impl_.get());
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            impl_.get());
+}
+
+void TaskQueue::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                uint32_t milliseconds) {
+  return TaskQueue::impl_->PostDelayedTask(std::move(task), milliseconds);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/task_queue_posix.cc b/rtc_base/task_queue_posix.cc
new file mode 100644
index 0000000..520b8e9
--- /dev/null
+++ b/rtc_base/task_queue_posix.cc
@@ -0,0 +1,40 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/task_queue_posix.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/task_queue.h"
+
+namespace rtc {
+namespace internal {
+pthread_key_t g_queue_ptr_tls = 0;
+
+void InitializeTls() {
+  RTC_CHECK(pthread_key_create(&g_queue_ptr_tls, nullptr) == 0);
+}
+
+pthread_key_t GetQueuePtrTls() {
+  static pthread_once_t init_once = PTHREAD_ONCE_INIT;
+  RTC_CHECK(pthread_once(&init_once, &InitializeTls) == 0);
+  return g_queue_ptr_tls;
+}
+
+AutoSetCurrentQueuePtr::AutoSetCurrentQueuePtr(TaskQueue* q)
+    : prev_(TaskQueue::Current()) {
+  pthread_setspecific(GetQueuePtrTls(), q);
+}
+
+AutoSetCurrentQueuePtr::~AutoSetCurrentQueuePtr() {
+  pthread_setspecific(GetQueuePtrTls(), prev_);
+}
+
+}  // namespace internal
+}  // namespace rtc
diff --git a/rtc_base/task_queue_posix.h b/rtc_base/task_queue_posix.h
new file mode 100644
index 0000000..3014e20
--- /dev/null
+++ b/rtc_base/task_queue_posix.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TASK_QUEUE_POSIX_H_
+#define RTC_BASE_TASK_QUEUE_POSIX_H_
+
+#include <pthread.h>
+
+namespace rtc {
+
+class TaskQueue;
+
+namespace internal {
+
+class AutoSetCurrentQueuePtr {
+ public:
+  explicit AutoSetCurrentQueuePtr(TaskQueue* q);
+  ~AutoSetCurrentQueuePtr();
+
+ private:
+  TaskQueue* const prev_;
+};
+
+pthread_key_t GetQueuePtrTls();
+
+}  // namespace internal
+}  // namespace rtc
+
+#endif  // RTC_BASE_TASK_QUEUE_POSIX_H_
diff --git a/rtc_base/task_queue_unittest.cc b/rtc_base/task_queue_unittest.cc
new file mode 100644
index 0000000..51956d2
--- /dev/null
+++ b/rtc_base/task_queue_unittest.cc
@@ -0,0 +1,426 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_WIN)
+// clang-format off
+#include <windows.h>  // Must come first.
+#include <mmsystem.h>
+// clang-format on
+#endif
+
+#include <memory>
+#include <vector>
+
+#include "rtc_base/bind.h"
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/task_queue_for_test.h"
+#include "rtc_base/timeutils.h"
+
+using rtc::test::TaskQueueForTest;
+
+namespace rtc {
+
+namespace {
+// Noop on all platforms except Windows, where it turns on high precision
+// multimedia timers which increases the precision of TimeMillis() while in
+// scope.
+class EnableHighResTimers {
+ public:
+#if !defined(WEBRTC_WIN)
+  EnableHighResTimers() {}
+#else
+  EnableHighResTimers() : enabled_(timeBeginPeriod(1) == TIMERR_NOERROR) {}
+  ~EnableHighResTimers() {
+    if (enabled_)
+      timeEndPeriod(1);
+  }
+
+ private:
+  const bool enabled_;
+#endif
+};
+
+void CheckCurrent(Event* signal, TaskQueue* queue) {
+  EXPECT_TRUE(queue->IsCurrent());
+  if (signal)
+    signal->Set();
+}
+
+}  // namespace
+
+TEST(TaskQueueTest, Construct) {
+  static const char kQueueName[] = "Construct";
+  TaskQueue queue(kQueueName);
+  EXPECT_FALSE(queue.IsCurrent());
+}
+
+TEST(TaskQueueTest, PostAndCheckCurrent) {
+  static const char kQueueName[] = "PostAndCheckCurrent";
+  Event event(false, false);
+  TaskQueue queue(kQueueName);
+
+  // We're not running a task, so there shouldn't be a current queue.
+  EXPECT_FALSE(queue.IsCurrent());
+  EXPECT_FALSE(TaskQueue::Current());
+
+  queue.PostTask(Bind(&CheckCurrent, &event, &queue));
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+TEST(TaskQueueTest, PostCustomTask) {
+  static const char kQueueName[] = "PostCustomImplementation";
+  TaskQueueForTest queue(kQueueName);
+
+  class CustomTask : public QueuedTask {
+   public:
+    CustomTask() {}
+    bool ran() const { return ran_; }
+
+   private:
+    bool Run() override {
+      ran_ = true;
+      return false;  // Never allow the task to be deleted by the queue.
+    }
+
+    bool ran_ = false;
+  } my_task;
+
+  queue.SendTask(&my_task);
+  EXPECT_TRUE(my_task.ran());
+}
+
+TEST(TaskQueueTest, PostLambda) {
+  TaskQueueForTest queue("PostLambda");
+  bool ran = false;
+  queue.SendTask([&ran]() { ran = true; });
+  EXPECT_TRUE(ran);
+}
+
+TEST(TaskQueueTest, PostDelayedZero) {
+  static const char kQueueName[] = "PostDelayedZero";
+  Event event(false, false);
+  TaskQueue queue(kQueueName);
+
+  queue.PostDelayedTask([&event]() { event.Set(); }, 0);
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+TEST(TaskQueueTest, PostFromQueue) {
+  static const char kQueueName[] = "PostFromQueue";
+  Event event(false, false);
+  TaskQueue queue(kQueueName);
+
+  queue.PostTask(
+      [&event, &queue]() { queue.PostTask([&event]() { event.Set(); }); });
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+TEST(TaskQueueTest, PostDelayed) {
+  static const char kQueueName[] = "PostDelayed";
+  Event event(false, false);
+  TaskQueue queue(kQueueName, TaskQueue::Priority::HIGH);
+
+  uint32_t start = Time();
+  queue.PostDelayedTask(Bind(&CheckCurrent, &event, &queue), 100);
+  EXPECT_TRUE(event.Wait(1000));
+  uint32_t end = Time();
+  // These tests are a little relaxed due to how "powerful" our test bots can
+  // be.  Most recently we've seen windows bots fire the callback after 94-99ms,
+  // which is why we have a little bit of leeway backwards as well.
+  EXPECT_GE(end - start, 90u);
+  EXPECT_NEAR(end - start, 190u, 100u);  // Accept 90-290.
+}
+
+// This task needs to be run manually due to the slowness of some of our bots.
+// TODO(tommi): Can we run this on the perf bots?
+TEST(TaskQueueTest, DISABLED_PostDelayedHighRes) {
+  EnableHighResTimers high_res_scope;
+
+  static const char kQueueName[] = "PostDelayedHighRes";
+  Event event(false, false);
+  TaskQueue queue(kQueueName, TaskQueue::Priority::HIGH);
+
+  uint32_t start = Time();
+  queue.PostDelayedTask(Bind(&CheckCurrent, &event, &queue), 3);
+  EXPECT_TRUE(event.Wait(1000));
+  uint32_t end = TimeMillis();
+  // These tests are a little relaxed due to how "powerful" our test bots can
+  // be.  Most recently we've seen windows bots fire the callback after 94-99ms,
+  // which is why we have a little bit of leeway backwards as well.
+  EXPECT_GE(end - start, 3u);
+  EXPECT_NEAR(end - start, 3, 3u);
+}
+
+TEST(TaskQueueTest, PostMultipleDelayed) {
+  static const char kQueueName[] = "PostMultipleDelayed";
+  TaskQueue queue(kQueueName);
+
+  std::vector<std::unique_ptr<Event>> events;
+  for (int i = 0; i < 100; ++i) {
+    events.push_back(std::unique_ptr<Event>(new Event(false, false)));
+    queue.PostDelayedTask(
+        Bind(&CheckCurrent, events.back().get(), &queue), i);
+  }
+
+  for (const auto& e : events)
+    EXPECT_TRUE(e->Wait(1000));
+}
+
+TEST(TaskQueueTest, PostDelayedAfterDestruct) {
+  static const char kQueueName[] = "PostDelayedAfterDestruct";
+  Event event(false, false);
+  {
+    TaskQueue queue(kQueueName);
+    queue.PostDelayedTask(Bind(&CheckCurrent, &event, &queue), 100);
+  }
+  EXPECT_FALSE(event.Wait(200));  // Task should not run.
+}
+
+TEST(TaskQueueTest, PostAndReply) {
+  static const char kPostQueue[] = "PostQueue";
+  static const char kReplyQueue[] = "ReplyQueue";
+  Event event(false, false);
+  TaskQueue post_queue(kPostQueue);
+  TaskQueue reply_queue(kReplyQueue);
+
+  post_queue.PostTaskAndReply(
+      Bind(&CheckCurrent, nullptr, &post_queue),
+      Bind(&CheckCurrent, &event, &reply_queue), &reply_queue);
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+TEST(TaskQueueTest, PostAndReuse) {
+  static const char kPostQueue[] = "PostQueue";
+  static const char kReplyQueue[] = "ReplyQueue";
+  Event event(false, false);
+  TaskQueue post_queue(kPostQueue);
+  TaskQueue reply_queue(kReplyQueue);
+
+  int call_count = 0;
+
+  class ReusedTask : public QueuedTask {
+   public:
+    ReusedTask(int* counter, TaskQueue* reply_queue, Event* event)
+        : counter_(counter), reply_queue_(reply_queue), event_(event) {
+      EXPECT_EQ(0, *counter_);
+    }
+
+   private:
+    bool Run() override {
+      if (++(*counter_) == 1) {
+        std::unique_ptr<QueuedTask> myself(this);
+        reply_queue_->PostTask(std::move(myself));
+        // At this point, the object is owned by reply_queue_ and it's
+        // theoratically possible that the object has been deleted (e.g. if
+        // posting wasn't possible).  So, don't touch any member variables here.
+
+        // Indicate to the current queue that ownership has been transferred.
+        return false;
+      } else {
+        EXPECT_EQ(2, *counter_);
+        EXPECT_TRUE(reply_queue_->IsCurrent());
+        event_->Set();
+        return true;  // Indicate that the object should be deleted.
+      }
+    }
+
+    int* const counter_;
+    TaskQueue* const reply_queue_;
+    Event* const event_;
+  };
+
+  std::unique_ptr<ReusedTask> task(
+      new ReusedTask(&call_count, &reply_queue, &event));
+
+  post_queue.PostTask(std::move(task));
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+TEST(TaskQueueTest, PostAndReplyLambda) {
+  static const char kPostQueue[] = "PostQueue";
+  static const char kReplyQueue[] = "ReplyQueue";
+  Event event(false, false);
+  TaskQueue post_queue(kPostQueue);
+  TaskQueue reply_queue(kReplyQueue);
+
+  bool my_flag = false;
+  post_queue.PostTaskAndReply([&my_flag]() { my_flag = true; },
+                              [&event]() { event.Set(); }, &reply_queue);
+  EXPECT_TRUE(event.Wait(1000));
+  EXPECT_TRUE(my_flag);
+}
+
+TEST(TaskQueueTest, PostCopyableClosure) {
+  struct CopyableClosure {
+    CopyableClosure(int* num_copies, int* num_moves, Event* event)
+        : num_copies(num_copies), num_moves(num_moves), event(event) {}
+    CopyableClosure(const CopyableClosure& other)
+        : num_copies(other.num_copies),
+          num_moves(other.num_moves),
+          event(other.event) {
+      ++*num_copies;
+    }
+    CopyableClosure(CopyableClosure&& other)
+        : num_copies(other.num_copies),
+          num_moves(other.num_moves),
+          event(other.event) {
+      ++*num_moves;
+    }
+    void operator()() { event->Set(); }
+
+    int* num_copies;
+    int* num_moves;
+    Event* event;
+  };
+
+  int num_copies = 0;
+  int num_moves = 0;
+  Event event(false, false);
+
+  static const char kPostQueue[] = "PostCopyableClosure";
+  TaskQueue post_queue(kPostQueue);
+  {
+    CopyableClosure closure(&num_copies, &num_moves, &event);
+    post_queue.PostTask(closure);
+    // Destroy closure to check with msan and tsan posted task has own copy.
+  }
+
+  EXPECT_TRUE(event.Wait(1000));
+  EXPECT_EQ(num_copies, 1);
+  EXPECT_EQ(num_moves, 0);
+}
+
+TEST(TaskQueueTest, PostMoveOnlyClosure) {
+  struct SomeState {
+    explicit SomeState(Event* event) : event(event) {}
+    ~SomeState() { event->Set(); }
+    Event* event;
+  };
+  struct MoveOnlyClosure {
+    MoveOnlyClosure(int* num_moves, std::unique_ptr<SomeState> state)
+        : num_moves(num_moves), state(std::move(state)) {}
+    MoveOnlyClosure(const MoveOnlyClosure&) = delete;
+    MoveOnlyClosure(MoveOnlyClosure&& other)
+        : num_moves(other.num_moves), state(std::move(other.state)) {
+      ++*num_moves;
+    }
+    void operator()() { state.reset(); }
+
+    int* num_moves;
+    std::unique_ptr<SomeState> state;
+  };
+
+  int num_moves = 0;
+  Event event(false, false);
+  std::unique_ptr<SomeState> state(new SomeState(&event));
+
+  static const char kPostQueue[] = "PostMoveOnlyClosure";
+  TaskQueue post_queue(kPostQueue);
+  post_queue.PostTask(MoveOnlyClosure(&num_moves, std::move(state)));
+
+  EXPECT_TRUE(event.Wait(1000));
+  EXPECT_EQ(num_moves, 1);
+}
+
+TEST(TaskQueueTest, PostMoveOnlyCleanup) {
+  struct SomeState {
+    explicit SomeState(Event* event) : event(event) {}
+    ~SomeState() { event->Set(); }
+    Event* event;
+  };
+  struct MoveOnlyClosure {
+    void operator()() { state.reset(); }
+
+    std::unique_ptr<SomeState> state;
+  };
+
+  Event event_run(false, false);
+  Event event_cleanup(false, false);
+  std::unique_ptr<SomeState> state_run(new SomeState(&event_run));
+  std::unique_ptr<SomeState> state_cleanup(new SomeState(&event_cleanup));
+
+  static const char kPostQueue[] = "PostMoveOnlyCleanup";
+  TaskQueue post_queue(kPostQueue);
+  post_queue.PostTask(NewClosure(MoveOnlyClosure{std::move(state_run)},
+                                 MoveOnlyClosure{std::move(state_cleanup)}));
+
+  EXPECT_TRUE(event_cleanup.Wait(1000));
+  // Expect run closure to complete before cleanup closure.
+  EXPECT_TRUE(event_run.Wait(0));
+}
+
+// This test covers a particular bug that we had in the libevent implementation
+// where we could hit a deadlock while trying to post a reply task to a queue
+// that was being deleted.  The test isn't guaranteed to hit that case but it's
+// written in a way that makes it likely and by running with --gtest_repeat=1000
+// the bug would occur. Alas, now it should be fixed.
+TEST(TaskQueueTest, PostAndReplyDeadlock) {
+  Event event(false, false);
+  TaskQueue post_queue("PostQueue");
+  TaskQueue reply_queue("ReplyQueue");
+
+  post_queue.PostTaskAndReply([&event]() { event.Set(); }, []() {},
+                              &reply_queue);
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+void TestPostTaskAndReply(TaskQueue* work_queue,
+                          Event* event) {
+  ASSERT_FALSE(work_queue->IsCurrent());
+  work_queue->PostTaskAndReply(
+      Bind(&CheckCurrent, nullptr, work_queue),
+      NewClosure([event]() { event->Set(); }));
+}
+
+// Does a PostTaskAndReply from within a task to post and reply to the current
+// queue.  All in all there will be 3 tasks posted and run.
+TEST(TaskQueueTest, PostAndReply2) {
+  static const char kQueueName[] = "PostAndReply2";
+  static const char kWorkQueueName[] = "PostAndReply2_Worker";
+  Event event(false, false);
+  TaskQueue queue(kQueueName);
+  TaskQueue work_queue(kWorkQueueName);
+
+  queue.PostTask(
+      Bind(&TestPostTaskAndReply, &work_queue, &event));
+  EXPECT_TRUE(event.Wait(1000));
+}
+
+// Tests posting more messages than a queue can queue up.
+// In situations like that, tasks will get dropped.
+TEST(TaskQueueTest, PostALot) {
+  // To destruct the event after the queue has gone out of scope.
+  Event event(false, false);
+
+  int tasks_executed = 0;
+  int tasks_cleaned_up = 0;
+  static const int kTaskCount = 0xffff;
+
+  {
+    static const char kQueueName[] = "PostALot";
+    TaskQueue queue(kQueueName);
+
+    // On linux, the limit of pending bytes in the pipe buffer is 0xffff.
+    // So here we post a total of 0xffff+1 messages, which triggers a failure
+    // case inside of the libevent queue implementation.
+
+    queue.PostTask([&event]() { event.Wait(Event::kForever); });
+    for (int i = 0; i < kTaskCount; ++i)
+      queue.PostTask(NewClosure([&tasks_executed]() { ++tasks_executed; },
+                                [&tasks_cleaned_up]() { ++tasks_cleaned_up; }));
+    event.Set();  // Unblock the first task.
+  }
+
+  EXPECT_GE(tasks_cleaned_up, tasks_executed);
+  EXPECT_EQ(kTaskCount, tasks_cleaned_up);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/task_queue_win.cc b/rtc_base/task_queue_win.cc
new file mode 100644
index 0000000..cbf86a6
--- /dev/null
+++ b/rtc_base/task_queue_win.cc
@@ -0,0 +1,517 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/task_queue.h"
+
+// Include winsock2.h before including <windows.h> to maintain consistency with
+// win32.h.  We can't include win32.h directly here since it pulls in
+// headers such as basictypes.h which causes problems in Chromium where webrtc
+// exists as two separate projects, webrtc and libjingle.
+#include <winsock2.h>
+#include <windows.h>
+#include <sal.h>  // Must come after windows headers.
+#include <mmsystem.h>  // Must come after windows headers.
+#include <string.h>
+
+#include <algorithm>
+#include <queue>
+#include <utility>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+namespace {
+#define WM_RUN_TASK WM_USER + 1
+#define WM_QUEUE_DELAYED_TASK WM_USER + 2
+
+using Priority = TaskQueue::Priority;
+
+DWORD g_queue_ptr_tls = 0;
+
+BOOL CALLBACK InitializeTls(PINIT_ONCE init_once, void* param, void** context) {
+  g_queue_ptr_tls = TlsAlloc();
+  return TRUE;
+}
+
+DWORD GetQueuePtrTls() {
+  static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
+  ::InitOnceExecuteOnce(&init_once, InitializeTls, nullptr, nullptr);
+  return g_queue_ptr_tls;
+}
+
+struct ThreadStartupData {
+  Event* started;
+  void* thread_context;
+};
+
+void CALLBACK InitializeQueueThread(ULONG_PTR param) {
+  MSG msg;
+  ::PeekMessage(&msg, nullptr, WM_USER, WM_USER, PM_NOREMOVE);
+  ThreadStartupData* data = reinterpret_cast<ThreadStartupData*>(param);
+  ::TlsSetValue(GetQueuePtrTls(), data->thread_context);
+  data->started->Set();
+}
+
+ThreadPriority TaskQueuePriorityToThreadPriority(Priority priority) {
+  switch (priority) {
+    case Priority::HIGH:
+      return kRealtimePriority;
+    case Priority::LOW:
+      return kLowPriority;
+    case Priority::NORMAL:
+      return kNormalPriority;
+    default:
+      RTC_NOTREACHED();
+      break;
+  }
+  return kNormalPriority;
+}
+
+int64_t GetTick() {
+  static const UINT kPeriod = 1;
+  bool high_res = (timeBeginPeriod(kPeriod) == TIMERR_NOERROR);
+  int64_t ret = TimeMillis();
+  if (high_res)
+    timeEndPeriod(kPeriod);
+  return ret;
+}
+
+class DelayedTaskInfo {
+ public:
+  // Default ctor needed to support priority_queue::pop().
+  DelayedTaskInfo() {}
+  DelayedTaskInfo(uint32_t milliseconds, std::unique_ptr<QueuedTask> task)
+      : due_time_(GetTick() + milliseconds), task_(std::move(task)) {}
+  DelayedTaskInfo(DelayedTaskInfo&&) = default;
+
+  // Implement for priority_queue.
+  bool operator>(const DelayedTaskInfo& other) const {
+    return due_time_ > other.due_time_;
+  }
+
+  // Required by priority_queue::pop().
+  DelayedTaskInfo& operator=(DelayedTaskInfo&& other) = default;
+
+  // See below for why this method is const.
+  void Run() const {
+    RTC_DCHECK(due_time_);
+    task_->Run() ? task_.reset() : static_cast<void>(task_.release());
+  }
+
+  int64_t due_time() const { return due_time_; }
+
+ private:
+  int64_t due_time_ = 0;  // Absolute timestamp in milliseconds.
+
+  // |task| needs to be mutable because std::priority_queue::top() returns
+  // a const reference and a key in an ordered queue must not be changed.
+  // There are two basic workarounds, one using const_cast, which would also
+  // make the key (|due_time|), non-const and the other is to make the non-key
+  // (|task|), mutable.
+  // Because of this, the |task| variable is made private and can only be
+  // mutated by calling the |Run()| method.
+  mutable std::unique_ptr<QueuedTask> task_;
+};
+
+class MultimediaTimer {
+ public:
+  // Note: We create an event that requires manual reset.
+  MultimediaTimer() : event_(::CreateEvent(nullptr, true, false, nullptr)) {}
+
+  ~MultimediaTimer() {
+    Cancel();
+    ::CloseHandle(event_);
+  }
+
+  bool StartOneShotTimer(UINT delay_ms) {
+    RTC_DCHECK_EQ(0, timer_id_);
+    RTC_DCHECK(event_ != nullptr);
+    timer_id_ =
+        ::timeSetEvent(delay_ms, 0, reinterpret_cast<LPTIMECALLBACK>(event_), 0,
+                       TIME_ONESHOT | TIME_CALLBACK_EVENT_SET);
+    return timer_id_ != 0;
+  }
+
+  void Cancel() {
+    ::ResetEvent(event_);
+    if (timer_id_) {
+      ::timeKillEvent(timer_id_);
+      timer_id_ = 0;
+    }
+  }
+
+  HANDLE* event_for_wait() { return &event_; }
+
+ private:
+  HANDLE event_ = nullptr;
+  MMRESULT timer_id_ = 0;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(MultimediaTimer);
+};
+
+}  // namespace
+
+class TaskQueue::Impl : public RefCountInterface {
+ public:
+  Impl(const char* queue_name, TaskQueue* queue, Priority priority);
+  ~Impl() override;
+
+  static TaskQueue::Impl* Current();
+  static TaskQueue* CurrentQueue();
+
+  // Used for DCHECKing the current queue.
+  bool IsCurrent() const;
+
+  template <class Closure,
+            typename std::enable_if<!std::is_convertible<
+                Closure,
+                std::unique_ptr<QueuedTask>>::value>::type* = nullptr>
+  void PostTask(Closure&& closure) {
+    PostTask(NewClosure(std::forward<Closure>(closure)));
+  }
+
+  void PostTask(std::unique_ptr<QueuedTask> task);
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                        std::unique_ptr<QueuedTask> reply,
+                        TaskQueue::Impl* reply_queue);
+
+  void PostDelayedTask(std::unique_ptr<QueuedTask> task, uint32_t milliseconds);
+
+  void RunPendingTasks();
+
+ private:
+  static void ThreadMain(void* context);
+
+  class WorkerThread : public PlatformThread {
+   public:
+    WorkerThread(ThreadRunFunction func,
+                 void* obj,
+                 const char* thread_name,
+                 ThreadPriority priority)
+        : PlatformThread(func, obj, thread_name, priority) {}
+
+    bool QueueAPC(PAPCFUNC apc_function, ULONG_PTR data) {
+      return PlatformThread::QueueAPC(apc_function, data);
+    }
+  };
+
+  class ThreadState {
+   public:
+    explicit ThreadState(HANDLE in_queue) : in_queue_(in_queue) {}
+    ~ThreadState() {}
+
+    void RunThreadMain();
+
+   private:
+    bool ProcessQueuedMessages();
+    void RunDueTasks();
+    void ScheduleNextTimer();
+    void CancelTimers();
+
+    // Since priority_queue<> by defult orders items in terms of
+    // largest->smallest, using std::less<>, and we want smallest->largest,
+    // we would like to use std::greater<> here. Alas it's only available in
+    // C++14 and later, so we roll our own compare template that that relies on
+    // operator<().
+    template <typename T>
+    struct greater {
+      bool operator()(const T& l, const T& r) { return l > r; }
+    };
+
+    MultimediaTimer timer_;
+    std::priority_queue<DelayedTaskInfo,
+                        std::vector<DelayedTaskInfo>,
+                        greater<DelayedTaskInfo>>
+        timer_tasks_;
+    UINT_PTR timer_id_ = 0;
+    HANDLE in_queue_;
+  };
+
+  TaskQueue* const queue_;
+  WorkerThread thread_;
+  rtc::CriticalSection pending_lock_;
+  std::queue<std::unique_ptr<QueuedTask>> pending_
+      RTC_GUARDED_BY(pending_lock_);
+  HANDLE in_queue_;
+};
+
+TaskQueue::Impl::Impl(const char* queue_name,
+                      TaskQueue* queue,
+                      Priority priority)
+    : queue_(queue),
+      thread_(&TaskQueue::Impl::ThreadMain,
+              this,
+              queue_name,
+              TaskQueuePriorityToThreadPriority(priority)),
+      in_queue_(::CreateEvent(nullptr, true, false, nullptr)) {
+  RTC_DCHECK(queue_name);
+  RTC_DCHECK(in_queue_);
+  thread_.Start();
+  Event event(false, false);
+  ThreadStartupData startup = {&event, this};
+  RTC_CHECK(thread_.QueueAPC(&InitializeQueueThread,
+                             reinterpret_cast<ULONG_PTR>(&startup)));
+  event.Wait(Event::kForever);
+}
+
+TaskQueue::Impl::~Impl() {
+  RTC_DCHECK(!IsCurrent());
+  while (!::PostThreadMessage(thread_.GetThreadRef(), WM_QUIT, 0, 0)) {
+    RTC_CHECK_EQ(ERROR_NOT_ENOUGH_QUOTA, ::GetLastError());
+    Sleep(1);
+  }
+  thread_.Stop();
+  ::CloseHandle(in_queue_);
+}
+
+// static
+TaskQueue::Impl* TaskQueue::Impl::Current() {
+  return static_cast<TaskQueue::Impl*>(::TlsGetValue(GetQueuePtrTls()));
+}
+
+// static
+TaskQueue* TaskQueue::Impl::CurrentQueue() {
+  TaskQueue::Impl* current = Current();
+  return current ? current->queue_ : nullptr;
+}
+
+bool TaskQueue::Impl::IsCurrent() const {
+  return IsThreadRefEqual(thread_.GetThreadRef(), CurrentThreadRef());
+}
+
+void TaskQueue::Impl::PostTask(std::unique_ptr<QueuedTask> task) {
+  rtc::CritScope lock(&pending_lock_);
+  pending_.push(std::move(task));
+  ::SetEvent(in_queue_);
+}
+
+void TaskQueue::Impl::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                      uint32_t milliseconds) {
+  if (!milliseconds) {
+    PostTask(std::move(task));
+    return;
+  }
+
+  // TODO(tommi): Avoid this allocation.  It is currently here since
+  // the timestamp stored in the task info object, is a 64bit timestamp
+  // and WPARAM is 32bits in 32bit builds.  Otherwise, we could pass the
+  // task pointer and timestamp as LPARAM and WPARAM.
+  auto* task_info = new DelayedTaskInfo(milliseconds, std::move(task));
+  if (!::PostThreadMessage(thread_.GetThreadRef(), WM_QUEUE_DELAYED_TASK, 0,
+                           reinterpret_cast<LPARAM>(task_info))) {
+    delete task_info;
+  }
+}
+
+void TaskQueue::Impl::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                       std::unique_ptr<QueuedTask> reply,
+                                       TaskQueue::Impl* reply_queue) {
+  QueuedTask* task_ptr = task.release();
+  QueuedTask* reply_task_ptr = reply.release();
+  DWORD reply_thread_id = reply_queue->thread_.GetThreadRef();
+  PostTask([task_ptr, reply_task_ptr, reply_thread_id]() {
+    if (task_ptr->Run())
+      delete task_ptr;
+    // If the thread's message queue is full, we can't queue the task and will
+    // have to drop it (i.e. delete).
+    if (!::PostThreadMessage(reply_thread_id, WM_RUN_TASK, 0,
+                             reinterpret_cast<LPARAM>(reply_task_ptr))) {
+      delete reply_task_ptr;
+    }
+  });
+}
+
+void TaskQueue::Impl::RunPendingTasks() {
+  while (true) {
+    std::unique_ptr<QueuedTask> task;
+    {
+      rtc::CritScope lock(&pending_lock_);
+      if (pending_.empty())
+        break;
+      task = std::move(pending_.front());
+      pending_.pop();
+    }
+
+    if (!task->Run())
+      task.release();
+  }
+}
+
+// static
+void TaskQueue::Impl::ThreadMain(void* context) {
+  ThreadState state(static_cast<TaskQueue::Impl*>(context)->in_queue_);
+  state.RunThreadMain();
+}
+
+void TaskQueue::Impl::ThreadState::RunThreadMain() {
+  HANDLE handles[2] = { *timer_.event_for_wait(), in_queue_ };
+  while (true) {
+    // Make sure we do an alertable wait as that's required to allow APCs to run
+    // (e.g. required for InitializeQueueThread and stopping the thread in
+    // PlatformThread).
+    DWORD result = ::MsgWaitForMultipleObjectsEx(
+        arraysize(handles), handles, INFINITE, QS_ALLEVENTS, MWMO_ALERTABLE);
+    RTC_CHECK_NE(WAIT_FAILED, result);
+    if (result == (WAIT_OBJECT_0 + 2)) {
+      // There are messages in the message queue that need to be handled.
+      if (!ProcessQueuedMessages())
+        break;
+    }
+
+    if (result == WAIT_OBJECT_0 || (!timer_tasks_.empty() &&
+        ::WaitForSingleObject(*timer_.event_for_wait(), 0) == WAIT_OBJECT_0)) {
+      // The multimedia timer was signaled.
+      timer_.Cancel();
+      RunDueTasks();
+      ScheduleNextTimer();
+    }
+
+    if (result == (WAIT_OBJECT_0 + 1)) {
+      ::ResetEvent(in_queue_);
+      TaskQueue::Impl::Current()->RunPendingTasks();
+    }
+  }
+}
+
+bool TaskQueue::Impl::ThreadState::ProcessQueuedMessages() {
+  MSG msg = {};
+  // To protect against overly busy message queues, we limit the time
+  // we process tasks to a few milliseconds. If we don't do that, there's
+  // a chance that timer tasks won't ever run.
+  static const int kMaxTaskProcessingTimeMs = 500;
+  auto start = GetTick();
+  while (::PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) &&
+         msg.message != WM_QUIT) {
+    if (!msg.hwnd) {
+      switch (msg.message) {
+        // TODO(tommi): Stop using this way of queueing tasks.
+        case WM_RUN_TASK: {
+          QueuedTask* task = reinterpret_cast<QueuedTask*>(msg.lParam);
+          if (task->Run())
+            delete task;
+          break;
+        }
+        case WM_QUEUE_DELAYED_TASK: {
+          std::unique_ptr<DelayedTaskInfo> info(
+              reinterpret_cast<DelayedTaskInfo*>(msg.lParam));
+          bool need_to_schedule_timers =
+              timer_tasks_.empty() ||
+              timer_tasks_.top().due_time() > info->due_time();
+          timer_tasks_.emplace(std::move(*info.get()));
+          if (need_to_schedule_timers) {
+            CancelTimers();
+            ScheduleNextTimer();
+          }
+          break;
+        }
+        case WM_TIMER: {
+          RTC_DCHECK_EQ(timer_id_, msg.wParam);
+          ::KillTimer(nullptr, msg.wParam);
+          timer_id_ = 0;
+          RunDueTasks();
+          ScheduleNextTimer();
+          break;
+        }
+        default:
+          RTC_NOTREACHED();
+          break;
+      }
+    } else {
+      ::TranslateMessage(&msg);
+      ::DispatchMessage(&msg);
+    }
+
+    if (GetTick() > start + kMaxTaskProcessingTimeMs)
+      break;
+  }
+  return msg.message != WM_QUIT;
+}
+
+void TaskQueue::Impl::ThreadState::RunDueTasks() {
+  RTC_DCHECK(!timer_tasks_.empty());
+  auto now = GetTick();
+  do {
+    const auto& top = timer_tasks_.top();
+    if (top.due_time() > now)
+      break;
+    top.Run();
+    timer_tasks_.pop();
+  } while (!timer_tasks_.empty());
+}
+
+void TaskQueue::Impl::ThreadState::ScheduleNextTimer() {
+  RTC_DCHECK_EQ(timer_id_, 0);
+  if (timer_tasks_.empty())
+    return;
+
+  const auto& next_task = timer_tasks_.top();
+  int64_t delay_ms = std::max(0ll, next_task.due_time() - GetTick());
+  uint32_t milliseconds = rtc::dchecked_cast<uint32_t>(delay_ms);
+  if (!timer_.StartOneShotTimer(milliseconds))
+    timer_id_ = ::SetTimer(nullptr, 0, milliseconds, nullptr);
+}
+
+void TaskQueue::Impl::ThreadState::CancelTimers() {
+  timer_.Cancel();
+  if (timer_id_) {
+    ::KillTimer(nullptr, timer_id_);
+    timer_id_ = 0;
+  }
+}
+
+// Boilerplate for the PIMPL pattern.
+TaskQueue::TaskQueue(const char* queue_name, Priority priority)
+    : impl_(new RefCountedObject<TaskQueue::Impl>(queue_name, this, priority)) {
+}
+
+TaskQueue::~TaskQueue() {}
+
+// static
+TaskQueue* TaskQueue::Current() {
+  return TaskQueue::Impl::CurrentQueue();
+}
+
+// Used for DCHECKing the current queue.
+bool TaskQueue::IsCurrent() const {
+  return impl_->IsCurrent();
+}
+
+void TaskQueue::PostTask(std::unique_ptr<QueuedTask> task) {
+  return TaskQueue::impl_->PostTask(std::move(task));
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply,
+                                 TaskQueue* reply_queue) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            reply_queue->impl_.get());
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            impl_.get());
+}
+
+void TaskQueue::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                uint32_t milliseconds) {
+  return TaskQueue::impl_->PostDelayedTask(std::move(task), milliseconds);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/template_util.h b/rtc_base/template_util.h
new file mode 100644
index 0000000..04e5e37
--- /dev/null
+++ b/rtc_base/template_util.h
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/template_util.h.
+
+#ifndef RTC_BASE_TEMPLATE_UTIL_H_
+#define RTC_BASE_TEMPLATE_UTIL_H_
+
+#include <stddef.h>  // For size_t.
+
+namespace rtc {
+
+// Template definitions from tr1.
+
+template<class T, T v>
+struct integral_constant {
+  static const T value = v;
+  typedef T value_type;
+  typedef integral_constant<T, v> type;
+};
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
+template <class T> struct is_pointer : false_type {};
+template <class T> struct is_pointer<T*> : true_type {};
+
+template <class T, class U> struct is_same : public false_type {};
+template <class T> struct is_same<T, T> : true_type {};
+
+template<class> struct is_array : public false_type {};
+template<class T, size_t n> struct is_array<T[n]> : public true_type {};
+template<class T> struct is_array<T[]> : public true_type {};
+
+template <class T> struct is_non_const_reference : false_type {};
+template <class T> struct is_non_const_reference<T&> : true_type {};
+template <class T> struct is_non_const_reference<const T&> : false_type {};
+
+template <class T> struct is_void : false_type {};
+template <> struct is_void<void> : true_type {};
+
+// Helper useful for converting a tuple to variadic template function
+// arguments.
+//
+// sequence_generator<3>::type will be sequence<0, 1, 2>.
+template <int...>
+struct sequence {};
+template <int N, int... S>
+struct sequence_generator : sequence_generator<N - 1, N - 1, S...> {};
+template <int... S>
+struct sequence_generator<0, S...> {
+  typedef sequence<S...> type;
+};
+
+namespace internal {
+
+// Types YesType and NoType are guaranteed such that sizeof(YesType) <
+// sizeof(NoType).
+typedef char YesType;
+
+struct NoType {
+  YesType dummy[2];
+};
+
+// This class is an implementation detail for is_convertible, and you
+// don't need to know how it works to use is_convertible. For those
+// who care: we declare two different functions, one whose argument is
+// of type To and one with a variadic argument list. We give them
+// return types of different size, so we can use sizeof to trick the
+// compiler into telling us which function it would have chosen if we
+// had called it with an argument of type From.  See Alexandrescu's
+// _Modern C++ Design_ for more details on this sort of trick.
+
+struct ConvertHelper {
+  template <typename To>
+  static YesType Test(To);
+
+  template <typename To>
+  static NoType Test(...);
+
+  template <typename From>
+  static From& Create();
+};
+
+// Used to determine if a type is a struct/union/class. Inspired by Boost's
+// is_class type_trait implementation.
+struct IsClassHelper {
+  template <typename C>
+  static YesType Test(void(C::*)(void));
+
+  template <typename C>
+  static NoType Test(...);
+};
+
+}  // namespace internal
+
+// Inherits from true_type if From is convertible to To, false_type otherwise.
+//
+// Note that if the type is convertible, this will be a true_type REGARDLESS
+// of whether or not the conversion would emit a warning.
+template <typename From, typename To>
+struct is_convertible
+    : integral_constant<bool,
+                        sizeof(internal::ConvertHelper::Test<To>(
+                                   internal::ConvertHelper::Create<From>())) ==
+                        sizeof(internal::YesType)> {
+};
+
+template <typename T>
+struct is_class
+    : integral_constant<bool,
+                        sizeof(internal::IsClassHelper::Test<T>(0)) ==
+                            sizeof(internal::YesType)> {
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TEMPLATE_UTIL_H_
diff --git a/rtc_base/testbase64.h b/rtc_base/testbase64.h
new file mode 100644
index 0000000..dbbdec9
--- /dev/null
+++ b/rtc_base/testbase64.h
@@ -0,0 +1,20 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TESTBASE64_H_
+#define RTC_BASE_TESTBASE64_H_
+
+/* This file was generated by googleclient/talk/binary2header.sh */
+
+static unsigned char testbase64[] = {
+0xff, 0xd8, 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46, 0x00, 0x01, 0x02, 0x01, 0x00, 0x48, 0x00, 0x48, 0x00, 0x00, 0xff, 0xe1, 0x0d, 0x07, 0x45, 0x78, 0x69, 0x66, 0x00, 0x00, 0x4d, 0x4d, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x01, 0x0e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x9e, 0x01, 0x0f, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0xbe, 0x01, 0x10, 0x00, 0x02, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0xc3, 0x01, 0x12, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x1a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xcc, 0x01, 0x1b, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xd4, 0x01, 0x28, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00, 0x01, 0x31, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xdc, 0x01, 0x32, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0xf0, 0x01, 0x3c, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x01, 0x04, 0x02, 0x13, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00, 0x87, 0x69, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x01, 0x14, 0x00, 0x00, 0x02, 0xc4, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x53, 0x4f, 0x4e, 0x59, 0x00, 0x44, 0x53, 0x43, 0x2d, 0x50, 0x32, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x01, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x20, 0x50, 0x68, 0x6f, 0x74, 0x6f, 0x73, 0x68, 0x6f, 0x70, 0x20, 0x37, 0x2e, 0x30, 0x00, 0x32, 0x30, 0x30, 0x37, 0x3a, 0x30, 0x31, 0x3a, 0x33, 0x30, 0x20, 0x32, 0x33, 0x3a, 0x31, 0x30, 0x3a, 0x30, 0x34, 0x00, 0x4d, 0x61, 0x63, 0x20, 0x4f, 0x53, 0x20, 0x58, 0x20, 0x31, 0x30, 0x2e, 0x34, 0x2e, 0x38, 0x00, 0x00, 0x1c, 0x82, 0x9a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x6a, 0x82, 0x9d, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x72, 0x88, 0x22, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00, 0x88, 0x27, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x64, 0x00, 0x00, 0x90, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x04, 0x30, 0x32, 0x32, 0x30, 0x90, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x02, 0x7a, 0x90, 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x02, 0x8e, 0x91, 0x01, 0x00, 0x07, 0x00, 0x00, 0x00, 0x04, 0x01, 0x02, 0x03, 0x00, 0x91, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0xa2, 0x92, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0xaa, 0x92, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0xb2, 0x92, 0x07, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x00, 0x00, 0x92, 0x08, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x92, 0x09, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x0f, 0x00, 0x00, 0x92, 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0xba, 0xa0, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x04, 0x30, 0x31, 0x30, 0x30, 0xa0, 0x01, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x00, 0xa0, 0x02, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0xa0, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0xa3, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x01, 0x03, 0x00, 0x00, 0x00, 0xa3, 0x01, 0x00, 0x07, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0xa4, 0x01, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x02, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x06, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x08, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x09, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x0a, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x01, 0x90, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x0a, 0x32, 0x30, 0x30, 0x37, 0x3a, 0x30, 0x31, 0x3a, 0x32, 0x30, 0x20, 0x32, 0x33, 0x3a, 0x30, 0x35, 0x3a, 0x35, 0x32, 0x00, 0x32, 0x30, 0x30, 0x37, 0x3a, 0x30, 0x31, 0x3a, 0x32, 0x30, 0x20, 0x32, 0x33, 0x3a, 0x30, 0x35, 0x3a, 0x35, 0x32, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x4f, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x06, 0x01, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x06, 0x00, 0x00, 0x01, 0x1a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0x12, 0x01, 0x1b, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0x1a, 0x01, 0x28, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x00, 0x02, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0x22, 0x02, 0x02, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x09, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x01, 0xff, 0xd8, 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46, 0x00, 0x01, 0x02, 0x01, 0x00, 0x48, 0x00, 0x48, 0x00, 0x00, 0xff, 0xed, 0x00, 0x0c, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x5f, 0x43, 0x4d, 0x00, 0x02, 0xff, 0xee, 0x00, 0x0e, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x00, 0x64, 0x80, 0x00, 0x00, 0x00, 0x01, 0xff, 0xdb, 0x00, 0x84, 0x00, 0x0c, 0x08, 0x08, 0x08, 0x09, 0x08, 0x0c, 0x09, 0x09, 0x0c, 0x11, 0x0b, 0x0a, 0x0b, 0x11, 0x15, 0x0f, 0x0c, 0x0c, 0x0f, 0x15, 0x18, 0x13, 0x13, 0x15, 0x13, 0x13, 0x18, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x01, 0x0d, 0x0b, 0x0b, 0x0d, 0x0e, 0x0d, 0x10, 0x0e, 0x0e, 0x10, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14, 0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0x64, 0x00, 0x64, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01, 0xff, 0xdd, 0x00, 0x04, 0x00, 0x07, 0xff, 0xc4, 0x01, 0x3f, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, 0x02, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x01, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x10, 0x00, 0x01, 0x04, 0x01, 0x03, 0x02, 0x04, 0x02, 0x05, 0x07, 0x06, 0x08, 0x05, 0x03, 0x0c, 0x33, 0x01, 0x00, 0x02, 0x11, 0x03, 0x04, 0x21, 0x12, 0x31, 0x05, 0x41, 0x51, 0x61, 0x13, 0x22, 0x71, 0x81, 0x32, 0x06, 0x14, 0x91, 0xa1, 0xb1, 0x42, 0x23, 0x24, 0x15, 0x52, 0xc1, 0x62, 0x33, 0x34, 0x72, 0x82, 0xd1, 0x43, 0x07, 0x25, 0x92, 0x53, 0xf0, 0xe1, 0xf1, 0x63, 0x73, 0x35, 0x16, 0xa2, 0xb2, 0x83, 0x26, 0x44, 0x93, 0x54, 0x64, 0x45, 0xc2, 0xa3, 0x74, 0x36, 0x17, 0xd2, 0x55, 0xe2, 0x65, 0xf2, 0xb3, 0x84, 0xc3, 0xd3, 0x75, 0xe3, 0xf3, 0x46, 0x27, 0x94, 0xa4, 0x85, 0xb4, 0x95, 0xc4, 0xd4, 0xe4, 0xf4, 0xa5, 0xb5, 0xc5, 0xd5, 0xe5, 0xf5, 0x56, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6, 0xc6, 0xd6, 0xe6, 0xf6, 0x37, 0x47, 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7, 0xc7, 0xd7, 0xe7, 0xf7, 0x11, 0x00, 0x02, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x05, 0x06, 0x07, 0x07, 0x06, 0x05, 0x35, 0x01, 0x00, 0x02, 0x11, 0x03, 0x21, 0x31, 0x12, 0x04, 0x41, 0x51, 0x61, 0x71, 0x22, 0x13, 0x05, 0x32, 0x81, 0x91, 0x14, 0xa1, 0xb1, 0x42, 0x23, 0xc1, 0x52, 0xd1, 0xf0, 0x33, 0x24, 0x62, 0xe1, 0x72, 0x82, 0x92, 0x43, 0x53, 0x15, 0x63, 0x73, 0x34, 0xf1, 0x25, 0x06, 0x16, 0xa2, 0xb2, 0x83, 0x07, 0x26, 0x35, 0xc2, 0xd2, 0x44, 0x93, 0x54, 0xa3, 0x17, 0x64, 0x45, 0x55, 0x36, 0x74, 0x65, 0xe2, 0xf2, 0xb3, 0x84, 0xc3, 0xd3, 0x75, 0xe3, 0xf3, 0x46, 0x94, 0xa4, 0x85, 0xb4, 0x95, 0xc4, 0xd4, 0xe4, 0xf4, 0xa5, 0xb5, 0xc5, 0xd5, 0xe5, 0xf5, 0x56, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6, 0xc6, 0xd6, 0xe6, 0xf6, 0x27, 0x37, 0x47, 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7, 0xc7, 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00, 0xf2, 0xed, 0xb2, 0x8d, 0x4d, 0x45, 0xcd, 0x2f, 0x3f, 0x44, 0x68, 0x93, 0xc3, 0x58, 0xc8, 0xf1, 0x1f, 0x8a, 0x33, 0x86, 0xda, 0x58, 0xc1, 0xa0, 0x02, 0x4f, 0xc4, 0xa1, 0x69, 0xa5, 0x9b, 0x5b, 0x4b, 0x84, 0x73, 0xdf, 0xc9, 0x15, 0xf8, 0xe3, 0xd1, 0x0e, 0x07, 0x93, 0xf3, 0xd1, 0x0f, 0x1c, 0x17, 0xef, 0x2e, 0x3b, 0x5b, 0xdc, 0xff, 0x00, 0xdf, 0x42, 0xbf, 0x8f, 0x8e, 0xdc, 0x82, 0xca, 0xd8, 0x37, 0x11, 0xa9, 0x3d, 0x82, 0x69, 0x2b, 0xc4, 0x6d, 0xc9, 0x75, 0x25, 0xbc, 0xf7, 0xec, 0xa1, 0xb5, 0x74, 0x19, 0x5d, 0x2e, 0x8a, 0x9a, 0x4b, 0x89, 0x7d, 0xc4, 0x68, 0xc6, 0xf6, 0xfe, 0xb2, 0xa0, 0x30, 0x1d, 0x60, 0x86, 0x88, 0x8d, 0x49, 0x3e, 0x01, 0x11, 0x20, 0xa3, 0x8c, 0xb9, 0xb1, 0xaa, 0x62, 0xad, 0xbf, 0x18, 0x97, 0x43, 0x47, 0x1d, 0xd2, 0xaf, 0x04, 0xd9, 0xb8, 0xc8, 0x0d, 0x68, 0xe4, 0xf7, 0x3e, 0x48, 0xf1, 0x05, 0xbc, 0x25, 0xaa, 0x07, 0x71, 0xd9, 0x14, 0x78, 0xf6, 0x49, 0xb5, 0x90, 0xfd, 0xa7, 0xc6, 0x14, 0xfd, 0x1b, 0x1c, 0xff, 0x00, 0x4d, 0x8d, 0x2e, 0x73, 0x8c, 0x35, 0xa3, 0x52, 0x4f, 0x92, 0x48, 0xa6, 0x1a, 0x24, 0xb6, 0x2a, 0xfa, 0xa5, 0x9e, 0x60, 0x64, 0x39, 0x94, 0x13, 0xcb, 0x27, 0x73, 0x80, 0xf3, 0x0c, 0xf6, 0xff, 0x00, 0xd2, 0x5a, 0x78, 0xbf, 0x53, 0x71, 0xf6, 0x01, 0x75, 0xb6, 0x97, 0x6a, 0x25, 0xa1, 0xad, 0x1f, 0xf4, 0xb7, 0x23, 0x48, 0xb7, 0x94, 0x84, 0x97, 0x5b, 0xff, 0x00, 0x32, 0xa9, 0xdd, 0xfc, 0xed, 0x9b, 0x7e, 0x0d, 0x9e, 0x52, 0x4a, 0x95, 0x61, 0xff, 0xd0, 0xf3, 0x3b, 0xa7, 0x70, 0xee, 0x01, 0x8f, 0xb9, 0x59, 0xfa, 0x7e, 0xdf, 0xe4, 0xc8, 0xf9, 0x2a, 0xc2, 0x5c, 0x63, 0xc3, 0x54, 0x67, 0x87, 0x6e, 0x10, 0x35, 0x68, 0xd4, 0x79, 0x1e, 0x53, 0x4a, 0xe0, 0xdc, 0xe9, 0xb8, 0x1f, 0x6a, 0xda, 0x6c, 0x25, 0x94, 0x37, 0xb0, 0xd0, 0xb8, 0xad, 0x67, 0xe4, 0x55, 0x8a, 0x5b, 0x8b, 0x82, 0xc0, 0x6f, 0x76, 0x80, 0x34, 0x49, 0x05, 0x2e, 0x9e, 0xc6, 0x1c, 0x66, 0x31, 0xba, 0x10, 0x23, 0xe0, 0xaf, 0xe1, 0x61, 0x53, 0x43, 0x8d, 0x81, 0xb3, 0x67, 0xef, 0x9e, 0x49, 0x2a, 0x12, 0x6c, 0xb6, 0x63, 0x1a, 0x0c, 0x31, 0xba, 0x55, 0xcd, 0xac, 0xfa, 0x8e, 0xdf, 0x91, 0x6e, 0x91, 0xd9, 0xb3, 0xc9, 0x73, 0x90, 0x7a, 0xab, 0x6a, 0xc2, 0xa4, 0x60, 0xe2, 0x8f, 0xd2, 0x38, 0x03, 0x7d, 0x9e, 0x0d, 0xff, 0x00, 0xcc, 0xd6, 0xd3, 0x6b, 0x71, 0x67, 0xd2, 0x3e, 0x64, 0x72, 0xab, 0xdb, 0x8d, 0x54, 0x39, 0xc5, 0x83, 0x6b, 0x3d, 0xee, 0x2e, 0xd4, 0x92, 0x3c, 0x4a, 0x56, 0xba, 0xb4, 0x79, 0x5c, 0xf7, 0xb2, 0x96, 0x6c, 0x8d, 0xaf, 0x80, 0x48, 0x3c, 0xf0, 0xb2, 0x1f, 0x63, 0x9c, 0xe9, 0x3f, 0x24, 0x5c, 0xdb, 0xdd, 0x76, 0x43, 0xde, 0xfd, 0x5c, 0xe3, 0x24, 0xfc, 0x50, 0x00, 0x93, 0x0a, 0x78, 0x8a, 0x0d, 0x49, 0xca, 0xcf, 0x93, 0x63, 0x1b, 0x7d, 0xd7, 0x57, 0x50, 0xd5, 0xef, 0x70, 0x6b, 0x4f, 0xc7, 0x45, 0xdb, 0x74, 0x9e, 0x8d, 0x5e, 0x33, 0x83, 0xd8, 0x37, 0xdd, 0xc3, 0xac, 0x3d, 0xbf, 0x92, 0xc5, 0x5b, 0xea, 0xbf, 0xd5, 0x62, 0xc0, 0xdc, 0xbc, 0xbd, 0x2d, 0x22, 0x5a, 0xcf, 0xdd, 0x69, 0xff, 0x00, 0xd1, 0x8e, 0x5d, 0xa5, 0x38, 0xb5, 0xb0, 0x00, 0xc6, 0xc4, 0x24, 0x4a, 0xd6, 0x8d, 0x18, 0x04, 0x49, 0x88, 0x9e, 0x55, 0xd6, 0x61, 0xb0, 0xc1, 0x70, 0x32, 0xdd, 0x3c, 0x95, 0xda, 0xf1, 0xfe, 0xf5, 0x62, 0xbc, 0x76, 0x8e, 0x75, 0x28, 0x02, 0xa2, 0xe7, 0x7d, 0x92, 0xb9, 0x84, 0x96, 0x96, 0xda, 0xf7, 0x70, 0x12, 0x4e, 0x5a, 0xff, 0x00, 0xff, 0xd1, 0xf3, 0x7a, 0x21, 0xaf, 0xde, 0xef, 0xa2, 0x22, 0x55, 0xfc, 0x5a, 0xbd, 0x42, 0xfb, 0x08, 0xfa, 0x67, 0x4f, 0x82, 0xcd, 0x6d, 0x85, 0xc0, 0x56, 0x3b, 0x90, 0xb7, 0xf0, 0x2a, 0x0e, 0x63, 0x58, 0x3b, 0xf2, 0xa3, 0x9e, 0x8c, 0xb8, 0x86, 0xbe, 0x49, 0xf1, 0x2c, 0x0c, 0x86, 0xb4, 0x4c, 0x69, 0xe4, 0xaf, 0x6e, 0xcc, 0x6b, 0x7d, 0x46, 0xb3, 0x70, 0xec, 0x38, 0x51, 0x7d, 0x02, 0x8a, 0xc7, 0xa6, 0xd9, 0x20, 0x68, 0x0f, 0x8f, 0x8a, 0xcf, 0xc9, 0xc2, 0xea, 0x59, 0x5b, 0x48, 0xb0, 0x91, 0xae, 0xe6, 0xc9, 0x03, 0xc9, 0x30, 0x51, 0x66, 0xd4, 0x0d, 0xad, 0xbd, 0x5f, 0x53, 0xcc, 0x6b, 0xb6, 0x90, 0x5a, 0x3b, 0x83, 0x0b, 0x43, 0x17, 0x31, 0xd6, 0xc3, 0x6e, 0x12, 0x3b, 0x79, 0xac, 0xc1, 0x89, 0x47, 0xd9, 0xe8, 0x63, 0x98, 0x45, 0xed, 0x6c, 0x5a, 0xf1, 0xa0, 0x27, 0xc5, 0x5b, 0xc3, 0x6f, 0xa6, 0xe0, 0x1c, 0x7d, 0xb3, 0xa2, 0x69, 0x34, 0x7b, 0xae, 0x1a, 0x8d, 0x45, 0x17, 0x9d, 0xeb, 0xfd, 0x21, 0xd8, 0xb9, 0xae, 0xb5, 0x80, 0xbb, 0x1e, 0xd2, 0x5c, 0xd7, 0x78, 0x13, 0xf9, 0xae, 0x4b, 0xea, 0xc7, 0x4a, 0x39, 0xbd, 0x55, 0xb3, 0xed, 0x66, 0x38, 0xf5, 0x09, 0x22, 0x41, 0x23, 0xe8, 0x37, 0xfb, 0x4b, 0xa1, 0xeb, 0xd6, 0xfe, 0x88, 0x31, 0xbf, 0x41, 0xc0, 0xee, 0xd2, 0x74, 0x02, 0x78, 0x53, 0xfa, 0x97, 0x43, 0x19, 0x85, 0x65, 0xff, 0x00, 0x9d, 0x71, 0x33, 0xe4, 0x1a, 0x7d, 0x8d, 0x53, 0x42, 0x56, 0x35, 0x6b, 0xe5, 0x80, 0x06, 0xc7, 0x57, 0xa7, 0xc4, 0xa9, 0xdb, 0xb6, 0x81, 0x1f, 0xeb, 0xd9, 0x69, 0x56, 0xc2, 0xd0, 0x00, 0xe5, 0x55, 0xc0, 0x12, 0xc2, 0xd7, 0x4e, 0xa2, 0x5a, 0x7c, 0x0a, 0xd0, 0x63, 0x9a, 0xd1, 0xaf, 0xd2, 0xe2, 0x3c, 0x12, 0x62, 0x66, 0xc6, 0x42, 0x23, 0x5a, 0x49, 0x8f, 0x10, 0xa2, 0xd2, 0x3e, 0x28, 0x9d, 0xc4, 0x88, 0x09, 0x29, 0x16, 0xc3, 0x3c, 0x24, 0x8d, 0xe6, 0x92, 0x72, 0x1f, 0xff, 0xd2, 0xf3, 0xbb, 0xb0, 0xfe, 0xcb, 0x99, 0xe9, 0xce, 0xf6, 0x88, 0x2d, 0x77, 0x91, 0x5b, 0x3d, 0x3d, 0xd0, 0xe6, 0x90, 0xa9, 0x65, 0x57, 0x38, 0x95, 0xdd, 0xcb, 0x9a, 0x7d, 0xce, 0xf2, 0x3f, 0x44, 0x23, 0x60, 0x58, 0x76, 0xe9, 0xca, 0x8c, 0xea, 0x1b, 0x31, 0x02, 0x32, 0x23, 0xea, 0xee, 0xb1, 0xcd, 0xb0, 0xc7, 0x87, 0x74, 0x7a, 0xeb, 0x70, 0x1a, 0x71, 0xe1, 0xfe, 0xe4, 0x1c, 0x1d, 0xae, 0xe5, 0x69, 0xd8, 0xfa, 0x99, 0x50, 0x0d, 0x1a, 0xf7, 0x2a, 0x3a, 0x0c, 0xf4, 0x1a, 0x8e, 0xc7, 0x27, 0x5d, 0xbf, 0x18, 0x41, 0xdc, 0xc2, 0xf0, 0x7f, 0x74, 0xf6, 0x3a, 0x22, 0x66, 0xdb, 0x68, 0xc6, 0x80, 0x48, 0x6b, 0x88, 0x06, 0x39, 0x0d, 0xee, 0xaa, 0x1f, 0xb3, 0xd5, 0x1b, 0x83, 0xd8, 0x3b, 0x38, 0x8f, 0x69, 0xfe, 0xdf, 0xd1, 0x4d, 0x29, 0xa1, 0x4c, 0x7a, 0xf4, 0xbf, 0xa7, 0x92, 0xcf, 0xa5, 0x20, 0x08, 0xf3, 0xf6, 0xff, 0x00, 0x15, 0xbb, 0xd1, 0x31, 0xd9, 0x5e, 0x3d, 0x75, 0x56, 0x36, 0x88, 0x00, 0x81, 0xe0, 0x16, 0x5e, 0x55, 0x74, 0x3f, 0x00, 0x9d, 0xe0, 0xcc, 0x69, 0xe7, 0x3a, 0x2d, 0xbe, 0x90, 0x00, 0xa9, 0xae, 0xef, 0x1f, 0x95, 0x4b, 0x0d, 0x9a, 0xdc, 0xc7, 0x45, 0xfe, 0xb1, 0x7d, 0x60, 0xa7, 0xa1, 0xe0, 0x1f, 0x4e, 0x1d, 0x99, 0x69, 0x02, 0x9a, 0xcf, 0x1f, 0xca, 0x7b, 0xbf, 0x90, 0xc5, 0xc2, 0xb3, 0xeb, 0x57, 0xd6, 0x03, 0x6b, 0xae, 0x39, 0xb6, 0x82, 0xe3, 0x31, 0xa1, 0x68, 0xf2, 0x6b, 0x5c, 0x12, 0xfa, 0xe1, 0x91, 0x66, 0x47, 0x5d, 0xb8, 0x3b, 0x4f, 0x44, 0x36, 0xb6, 0x8f, 0x28, 0xdd, 0xff, 0x00, 0x7e, 0x46, 0xab, 0x12, 0x2b, 0x65, 0x55, 0x32, 0xa7, 0x62, 0xb6, 0xbd, 0xf7, 0x64, 0x10, 0xdb, 0x03, 0x9f, 0x1b, 0x9e, 0xc7, 0xd9, 0xb8, 0x3b, 0x1f, 0x67, 0xf3, 0x6c, 0x52, 0x80, 0xd7, 0x7d, 0x0f, 0xea, 0x7f, 0x5d, 0x1d, 0x67, 0xa6, 0x0b, 0x1e, 0x47, 0xda, 0x69, 0x3b, 0x2e, 0x03, 0xc7, 0xf3, 0x5f, 0x1f, 0xf0, 0x8b, 0xa1, 0x02, 0x46, 0xba, 0x79, 0xaf, 0x32, 0xff, 0x00, 0x16, 0xad, 0xca, 0x1d, 0x57, 0x2a, 0xdc, 0x79, 0x18, 0x41, 0xb0, 0xf6, 0x9e, 0xe4, 0x9f, 0xd0, 0x8f, 0xeb, 0x31, 0xab, 0xd2, 0x83, 0xa4, 0xcb, 0x8c, 0xb8, 0xa0, 0x42, 0x12, 0x7b, 0x67, 0x9f, 0x2f, 0xf5, 0x09, 0x26, 0x96, 0xc4, 0xce, 0xa9, 0x20, 0xa7, 0xff, 0xd3, 0xf3, 0x2f, 0xb4, 0x5d, 0xe9, 0x0a, 0xb7, 0x9f, 0x4c, 0x19, 0xdb, 0x3a, 0x2d, 0x5e, 0x94, 0xfd, 0xc4, 0xb7, 0xc5, 0x62, 0xf9, 0x2b, 0xfd, 0x2e, 0xe3, 0x5d, 0xe0, 0x7c, 0x13, 0x48, 0xd1, 0x92, 0x12, 0xa9, 0x0b, 0x7a, 0xbc, 0x2d, 0xc2, 0x7f, 0x92, 0x60, 0xab, 0x4e, 0x79, 0x2e, 0x00, 0xf0, 0xaa, 0xe1, 0xda, 0x3d, 0x43, 0xfc, 0xad, 0x55, 0xbb, 0x80, 0x79, 0x81, 0xa0, 0xe6, 0x54, 0x32, 0x6d, 0x02, 0xbe, 0xf3, 0x61, 0x81, 0xa8, 0x44, 0x14, 0x03, 0x59, 0x0e, 0x1c, 0xf6, 0x1f, 0xdc, 0xb2, 0xec, 0xa3, 0x23, 0x77, 0xe8, 0x6e, 0x70, 0xf2, 0x25, 0x1f, 0x1f, 0x17, 0xa9, 0x6d, 0x71, 0x36, 0x97, 0x47, 0x00, 0xa4, 0x02, 0xe0, 0x2c, 0x7c, 0xc1, 0xab, 0xd5, 0x31, 0x85, 0x35, 0xd4, 0xe6, 0x13, 0x02, 0xd6, 0x4b, 0x67, 0x48, 0x2b, 0xa9, 0xe9, 0x2e, 0x02, 0xb6, 0x4f, 0x82, 0xe5, 0x7a, 0x95, 0x19, 0xc6, 0x87, 0x3d, 0xfb, 0xa2, 0xb8, 0x79, 0x1e, 0x4d, 0x3b, 0x96, 0xcf, 0x4f, 0xbd, 0xcd, 0xa2, 0xa2, 0x1f, 0xa0, 0x82, 0xd3, 0xfc, 0x97, 0x05, 0x24, 0x36, 0x6b, 0xf3, 0x31, 0xa2, 0x35, 0x79, 0xef, 0xad, 0xf8, 0xae, 0xaf, 0xaf, 0xd8, 0xf2, 0xd8, 0x6d, 0xed, 0x6b, 0xda, 0x7b, 0x18, 0x1b, 0x5d, 0xff, 0x00, 0x52, 0xb1, 0x6d, 0xf0, 0x81, 0x31, 0xca, 0xf4, 0x6e, 0xb1, 0x80, 0xce, 0xb1, 0x84, 0xc0, 0x21, 0xb7, 0xd6, 0x77, 0x31, 0xd1, 0x27, 0xc1, 0xcd, 0xfe, 0xd2, 0xe3, 0xec, 0xe8, 0x1d, 0x45, 0x96, 0xb0, 0x9a, 0xb7, 0x87, 0x3f, 0x68, 0x2d, 0xf7, 0x01, 0x1f, 0xbe, 0xd1, 0xf4, 0x7f, 0xb4, 0xa4, 0x0d, 0x77, 0xbb, 0xfa, 0x8f, 0x80, 0x3a, 0x7f, 0x43, 0xaa, 0xe2, 0xdf, 0xd2, 0x65, 0x7e, 0x95, 0xe4, 0x0f, 0x1f, 0xa1, 0xfe, 0x6b, 0x16, 0x9f, 0x52, 0xfa, 0xc1, 0xd3, 0xba, 0x6d, 0x26, 0xdc, 0xac, 0x86, 0xd4, 0xd9, 0x0d, 0x31, 0x2e, 0x74, 0x9e, 0xdb, 0x59, 0x2e, 0x55, 0xe8, 0xc9, 0xb2, 0x96, 0xd5, 0x4b, 0x9f, 0xb8, 0x6d, 0xda, 0x1c, 0x04, 0x09, 0x03, 0xfe, 0x8a, 0xc6, 0xfa, 0xd3, 0xf5, 0x6a, 0xbe, 0xbb, 0x5b, 0x2e, 0xc6, 0xb5, 0x94, 0xe6, 0xd5, 0x20, 0x97, 0x7d, 0x1b, 0x1b, 0xf9, 0xad, 0x7c, 0x7d, 0x17, 0xb7, 0xf3, 0x1e, 0x92, 0x1b, 0x7f, 0xf8, 0xe0, 0x7d, 0x59, 0xdd, 0xfd, 0x32, 0xd8, 0x8f, 0xa5, 0xe8, 0x3a, 0x12, 0x5c, 0x3f, 0xfc, 0xc4, 0xfa, 0xc3, 0xb3, 0x77, 0xa7, 0x56, 0xed, 0xdb, 0x76, 0x7a, 0x8d, 0xdd, 0x1f, 0xbf, 0xfd, 0x44, 0x92, 0x56, 0x8f, 0xff, 0xd4, 0xf2, 0xe8, 0x86, 0x17, 0x1e, 0xfa, 0x04, 0x56, 0x4b, 0x43, 0x6c, 0x6f, 0x2d, 0xe5, 0x46, 0x01, 0x64, 0x2b, 0x14, 0x32, 0x5b, 0xb4, 0xa0, 0x52, 0x1d, 0xde, 0x9b, 0x94, 0xdb, 0xab, 0x6b, 0x81, 0xf7, 0x05, 0xb0, 0xd7, 0x07, 0xb2, 0x27, 0x55, 0xc6, 0x57, 0x65, 0xd8, 0x76, 0x6e, 0x64, 0xed, 0xee, 0x16, 0xce, 0x27, 0x57, 0x63, 0xda, 0x0c, 0xc2, 0x8e, 0x51, 0x67, 0x84, 0xfa, 0x1d, 0xdd, 0x62, 0xc7, 0x07, 0xe9, 0xf7, 0xa3, 0xd6, 0x6c, 0x02, 0x41, 0x55, 0x31, 0xf3, 0x2b, 0xb3, 0xba, 0x2b, 0x2e, 0x68, 0x24, 0x1d, 0x47, 0x64, 0xca, 0xa6, 0x50, 0x41, 0x65, 0x90, 0x6c, 0xb1, 0xa5, 0xae, 0x33, 0x23, 0x51, 0xe4, 0xab, 0x7d, 0x5d, 0xcb, 0xb6, 0xcc, 0x37, 0xd0, 0x40, 0x73, 0x71, 0xde, 0x58, 0x09, 0xe7, 0x6f, 0x2c, 0x44, 0xc9, 0xc9, 0xae, 0xba, 0x9d, 0x63, 0x88, 0x01, 0xa0, 0x95, 0x9d, 0xf5, 0x3f, 0x2a, 0xe6, 0x67, 0xdb, 0x50, 0x83, 0x55, 0xad, 0x36, 0x3e, 0x78, 0x10, 0x74, 0x77, 0xfd, 0x2d, 0xaa, 0x4c, 0x7d, 0x58, 0x73, 0x91, 0xa0, 0x0f, 0x51, 0x45, 0xb7, 0x33, 0xdd, 0x58, 0x69, 0x1d, 0xd8, 0x0c, 0x9f, 0x96, 0x88, 0x19, 0x99, 0x19, 0xac, 0xcf, 0xa3, 0xd2, 0xad, 0xb5, 0xdb, 0x76, 0x8f, 0xad, 0xc4, 0xea, 0xcf, 0xdf, 0x7e, 0xdf, 0xdd, 0xfc, 0xd5, 0xa3, 0x5e, 0x43, 0x2b, 0x6b, 0xb2, 0xad, 0x3b, 0x6a, 0xa4, 0x13, 0xa7, 0x04, 0xac, 0x7a, 0x6f, 0xb3, 0x23, 0x26, 0xcc, 0xfb, 0xb4, 0x75, 0x8e, 0x01, 0x83, 0xf7, 0x58, 0x3e, 0x8b, 0x53, 0xa7, 0x2a, 0x1a, 0x31, 0x42, 0x36, 0x5d, 0x4c, 0x9a, 0xf2, 0xdc, 0xc6, 0xfe, 0x98, 0xb4, 0x34, 0xcb, 0x48, 0x0a, 0x8f, 0xdb, 0xb2, 0xeb, 0x76, 0xd6, 0x07, 0x5c, 0x59, 0xc9, 0x64, 0x8f, 0x93, 0xa7, 0x73, 0x16, 0x83, 0xaf, 0x0e, 0xa4, 0x33, 0xef, 0x50, 0xc5, 0x0c, 0xda, 0x59, 0x10, 0x06, 0x8a, 0x2e, 0x29, 0x0e, 0xac, 0xc2, 0x31, 0x3d, 0x36, 0x69, 0x7e, 0xd6, 0xcc, 0xf5, 0x3d, 0x6f, 0xb3, 0xeb, 0x1b, 0x76, 0xef, 0x3b, 0xa3, 0xfa, 0xc9, 0x2b, 0x5f, 0x66, 0x6f, 0xa9, 0x1e, 0x73, 0xf2, 0x49, 0x2e, 0x39, 0xf7, 0x4f, 0xb7, 0x8d, 0xff, 0xd5, 0xf3, 0x26, 0xfe, 0x0a, 0xc5, 0x1b, 0xa7, 0xcb, 0xb2, 0xcf, 0x49, 0x03, 0xb2, 0x46, 0xee, 0xd9, 0xd9, 0xb3, 0xf4, 0x9f, 0x25, 0x4a, 0xdf, 0x4b, 0x77, 0xe8, 0x27, 0xd4, 0xef, 0x1c, 0x2a, 0x29, 0x26, 0xc5, 0x7c, 0x9d, 0x6c, 0x7f, 0xb7, 0x6e, 0x1b, 0x26, 0x7f, 0x05, 0xa3, 0xfe, 0x53, 0x8d, 0x62, 0x57, 0x30, 0x92, 0x12, 0xfa, 0x2f, 0x86, 0xdf, 0xa4, 0xec, 0x67, 0xfe, 0xd0, 0xf4, 0xff, 0x00, 0x4d, 0xfc, 0xdf, 0x78, 0xe1, 0x68, 0x7d, 0x54, 0x99, 0xbf, 0x6f, 0xf3, 0xbe, 0xdf, 0x8e, 0xdd, 0x7f, 0xef, 0xeb, 0x97, 0x49, 0x3e, 0x3b, 0x7f, 0x06, 0x2c, 0x9f, 0x37, 0x5f, 0xf0, 0x9f, 0x4c, 0xeb, 0x7b, 0xbf, 0x67, 0x55, 0xe8, 0xff, 0x00, 0x31, 0xbc, 0x7a, 0x9e, 0x31, 0xdb, 0xfe, 0x92, 0xae, 0x37, 0x7a, 0x4d, 0xdb, 0xe2, 0x17, 0x9d, 0xa4, 0xa3, 0xc9, 0xba, 0xfc, 0x7b, 0x7d, 0x5f, 0x52, 0xa7, 0x7e, 0xd1, 0x28, 0xf8, 0xf3, 0xb0, 0xc7, 0x32, 0xbc, 0x99, 0x24, 0xc5, 0xe3, 0xab, 0xeb, 0x1f, 0xa4, 0xf5, 0xfc, 0xe1, 0x25, 0xe4, 0xe9, 0x24, 0x97, 0xff, 0xd9, 0xff, 0xed, 0x2e, 0x1c, 0x50, 0x68, 0x6f, 0x74, 0x6f, 0x73, 0x68, 0x6f, 0x70, 0x20, 0x33, 0x2e, 0x30, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x1c, 0x02, 0x00, 0x00, 0x02, 0x00, 0x02, 0x1c, 0x02, 0x78, 0x00, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0xfb, 0x09, 0xa6, 0xbd, 0x07, 0x4c, 0x2a, 0x36, 0x9d, 0x8f, 0xe2, 0xcc, 0x57, 0xa9, 0xac, 0x85, 0x38, 0x42, 0x49, 0x4d, 0x03, 0xea, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xb0, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x0a, 0x3c, 0x21, 0x44, 0x4f, 0x43, 0x54, 0x59, 0x50, 0x45, 0x20, 0x70, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x20, 0x22, 0x2d, 0x2f, 0x2f, 0x41, 0x70, 0x70, 0x6c, 0x65, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x72, 0x2f, 0x2f, 0x44, 0x54, 0x44, 0x20, 0x50, 0x4c, 0x49, 0x53, 0x54, 0x20, 0x31, 0x2e, 0x30, 0x2f, 0x2f, 0x45, 0x4e, 0x22, 0x20, 0x22, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x54, 0x44, 0x73, 0x2f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x4c, 0x69, 0x73, 0x74, 0x2d, 0x31, 0x2e, 0x30, 0x2e, 0x64, 0x74, 0x64, 0x22, 0x3e, 0x0a, 0x3c, 0x70, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x3e, 0x0a, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x74, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x48, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x74, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x37, 0x32, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x4f, 0x72, 0x69, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x4f, 0x72, 0x69, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x31, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x53, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x53, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x31, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x37, 0x32, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x56, 0x65, 0x72, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x31, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x73, 0x75, 0x62, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x61, 0x70, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x30, 0x2e, 0x30, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x30, 0x2e, 0x30, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x37, 0x33, 0x34, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x35, 0x37, 0x36, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x70, 0x65, 0x72, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x4d, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x70, 0x65, 0x72, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x2d, 0x31, 0x38, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x2d, 0x31, 0x38, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x37, 0x37, 0x34, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x35, 0x39, 0x34, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x4d, 0x50, 0x61, 0x70, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x6d, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x4d, 0x50, 0x61, 0x70, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x6e, 0x61, 0x2d, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x6d, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x33, 0x2d, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x54, 0x31, 0x37, 0x3a, 0x34, 0x39, 0x3a, 0x33, 0x36, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x31, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x4d, 0x55, 0x6e, 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x6d, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x4d, 0x55, 0x6e, 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x30, 0x2e, 0x30, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x30, 0x2e, 0x30, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x37, 0x33, 0x34, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x35, 0x37, 0x36, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x4d, 0x55, 0x6e, 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x70, 0x65, 0x72, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x6d, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x4d, 0x55, 0x6e, 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x70, 0x65, 0x72, 0x52, 0x65, 0x63, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x2d, 0x31, 0x38, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x2d, 0x31, 0x38, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x37, 0x37, 0x34, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x35, 0x39, 0x34, 0x3c, 0x2f, 0x72, 0x65, 0x61, 0x6c, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x2d, 0x33, 0x30, 0x54, 0x32, 0x32, 0x3a, 0x30, 0x38, 0x3a, 0x34, 0x31, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x30, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x70, 0x64, 0x2e, 0x50, 0x4d, 0x50, 0x61, 0x70, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x6d, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x69, 0x74, 0x65, 0x6d, 0x41, 0x72, 0x72, 0x61, 0x79, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x70, 0x64, 0x2e, 0x50, 0x4d, 0x50, 0x61, 0x70, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x55, 0x53, 0x20, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x70, 0x6d, 0x2e, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x44, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x32, 0x30, 0x30, 0x33, 0x2d, 0x30, 0x37, 0x2d, 0x30, 0x31, 0x54, 0x31, 0x37, 0x3a, 0x34, 0x39, 0x3a, 0x33, 0x36, 0x5a, 0x3c, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x31, 0x3c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x09, 0x3c, 0x2f, 0x61, 0x72, 0x72, 0x61, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x30, 0x30, 0x2e, 0x32, 0x30, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x6b, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x2f, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x70, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x30, 0x30, 0x2e, 0x32, 0x30, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x63, 0x6b, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x2f, 0x3e, 0x0a, 0x09, 0x3c, 0x6b, 0x65, 0x79, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x3c, 0x2f, 0x6b, 0x65, 0x79, 0x3e, 0x0a, 0x09, 0x3c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x70, 0x70, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x3e, 0x0a, 0x3c, 0x2f, 0x64, 0x69, 0x63, 0x74, 0x3e, 0x0a, 0x3c, 0x2f, 0x70, 0x6c, 0x69, 0x73, 0x74, 0x3e, 0x0a, 0x38, 0x42, 0x49, 0x4d, 0x03, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x00, 0x00, 0x48, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x02, 0xde, 0x02, 0x40, 0xff, 0xee, 0xff, 0xee, 0x03, 0x06, 0x02, 0x52, 0x03, 0x67, 0x05, 0x28, 0x03, 0xfc, 0x00, 0x02, 0x00, 0x00, 0x00, 0x48, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x02, 0xd8, 0x02, 0x28, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x01, 0x7f, 0xff, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x08, 0x00, 0x19, 0x01, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x03, 0xed, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x48, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x48, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x80, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1e, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1e, 0x38, 0x42, 0x49, 0x4d, 0x03, 0xf3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x27, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x38, 0x42, 0x49, 0x4d, 0x03, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x2f, 0x66, 0x66, 0x00, 0x01, 0x00, 0x6c, 0x66, 0x66, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x2f, 0x66, 0x66, 0x00, 0x01, 0x00, 0xa1, 0x99, 0x9a, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x32, 0x00, 0x00, 0x00, 0x01, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x38, 0x42, 0x49, 0x4d, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x40, 0x00, 0x00, 0x02, 0x40, 0x00, 0x00, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x03, 0x45, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x08, 0x00, 0x44, 0x00, 0x53, 0x00, 0x43, 0x00, 0x30, 0x00, 0x32, 0x00, 0x33, 0x00, 0x32, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x75, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x4f, 0x62, 0x6a, 0x63, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x63, 0x74, 0x31, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x54, 0x6f, 0x70, 0x20, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x65, 0x66, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x74, 0x6f, 0x6d, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x52, 0x67, 0x68, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x06, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x56, 0x6c, 0x4c, 0x73, 0x00, 0x00, 0x00, 0x01, 0x4f, 0x62, 0x6a, 0x63, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x07, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x49, 0x44, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x65, 0x6e, 0x75, 0x6d, 0x00, 0x00, 0x00, 0x0c, 0x45, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x00, 0x00, 0x00, 0x00, 0x54, 0x79, 0x70, 0x65, 0x65, 0x6e, 0x75, 0x6d, 0x00, 0x00, 0x00, 0x0a, 0x45, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x00, 0x00, 0x00, 0x00, 0x49, 0x6d, 0x67, 0x20, 0x00, 0x00, 0x00, 0x06, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x4f, 0x62, 0x6a, 0x63, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x63, 0x74, 0x31, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x54, 0x6f, 0x70, 0x20, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x65, 0x66, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x74, 0x6f, 0x6d, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x52, 0x67, 0x68, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, 0x6c, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x75, 0x6c, 0x6c, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x73, 0x67, 0x65, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x61, 0x6c, 0x74, 0x54, 0x61, 0x67, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x63, 0x65, 0x6c, 0x6c, 0x54, 0x65, 0x78, 0x74, 0x49, 0x73, 0x48, 0x54, 0x4d, 0x4c, 0x62, 0x6f, 0x6f, 0x6c, 0x01, 0x00, 0x00, 0x00, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x54, 0x65, 0x78, 0x74, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x68, 0x6f, 0x72, 0x7a, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x6e, 0x75, 0x6d, 0x00, 0x00, 0x00, 0x0f, 0x45, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x48, 0x6f, 0x72, 0x7a, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x00, 0x00, 0x00, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, 0x00, 0x00, 0x09, 0x76, 0x65, 0x72, 0x74, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x6e, 0x75, 0x6d, 0x00, 0x00, 0x00, 0x0f, 0x45, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x56, 0x65, 0x72, 0x74, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x00, 0x00, 0x00, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, 0x00, 0x00, 0x0b, 0x62, 0x67, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x65, 0x6e, 0x75, 0x6d, 0x00, 0x00, 0x00, 0x11, 0x45, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x42, 0x47, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x6f, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x09, 0x74, 0x6f, 0x70, 0x4f, 0x75, 0x74, 0x73, 0x65, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x6c, 0x65, 0x66, 0x74, 0x4f, 0x75, 0x74, 0x73, 0x65, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, 0x4f, 0x75, 0x74, 0x73, 0x65, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x72, 0x69, 0x67, 0x68, 0x74, 0x4f, 0x75, 0x74, 0x73, 0x65, 0x74, 0x6c, 0x6f, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x09, 0xf9, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x01, 0x2c, 0x00, 0x00, 0x75, 0x30, 0x00, 0x00, 0x09, 0xdd, 0x00, 0x18, 0x00, 0x01, 0xff, 0xd8, 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46, 0x00, 0x01, 0x02, 0x01, 0x00, 0x48, 0x00, 0x48, 0x00, 0x00, 0xff, 0xed, 0x00, 0x0c, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x5f, 0x43, 0x4d, 0x00, 0x02, 0xff, 0xee, 0x00, 0x0e, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x00, 0x64, 0x80, 0x00, 0x00, 0x00, 0x01, 0xff, 0xdb, 0x00, 0x84, 0x00, 0x0c, 0x08, 0x08, 0x08, 0x09, 0x08, 0x0c, 0x09, 0x09, 0x0c, 0x11, 0x0b, 0x0a, 0x0b, 0x11, 0x15, 0x0f, 0x0c, 0x0c, 0x0f, 0x15, 0x18, 0x13, 0x13, 0x15, 0x13, 0x13, 0x18, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x01, 0x0d, 0x0b, 0x0b, 0x0d, 0x0e, 0x0d, 0x10, 0x0e, 0x0e, 0x10, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14, 0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0x64, 0x00, 0x64, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01, 0xff, 0xdd, 0x00, 0x04, 0x00, 0x07, 0xff, 0xc4, 0x01, 0x3f, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, 0x02, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x01, 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x10, 0x00, 0x01, 0x04, 0x01, 0x03, 0x02, 0x04, 0x02, 0x05, 0x07, 0x06, 0x08, 0x05, 0x03, 0x0c, 0x33, 0x01, 0x00, 0x02, 0x11, 0x03, 0x04, 0x21, 0x12, 0x31, 0x05, 0x41, 0x51, 0x61, 0x13, 0x22, 0x71, 0x81, 0x32, 0x06, 0x14, 0x91, 0xa1, 0xb1, 0x42, 0x23, 0x24, 0x15, 0x52, 0xc1, 0x62, 0x33, 0x34, 0x72, 0x82, 0xd1, 0x43, 0x07, 0x25, 0x92, 0x53, 0xf0, 0xe1, 0xf1, 0x63, 0x73, 0x35, 0x16, 0xa2, 0xb2, 0x83, 0x26, 0x44, 0x93, 0x54, 0x64, 0x45, 0xc2, 0xa3, 0x74, 0x36, 0x17, 0xd2, 0x55, 0xe2, 0x65, 0xf2, 0xb3, 0x84, 0xc3, 0xd3, 0x75, 0xe3, 0xf3, 0x46, 0x27, 0x94, 0xa4, 0x85, 0xb4, 0x95, 0xc4, 0xd4, 0xe4, 0xf4, 0xa5, 0xb5, 0xc5, 0xd5, 0xe5, 0xf5, 0x56, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6, 0xc6, 0xd6, 0xe6, 0xf6, 0x37, 0x47, 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7, 0xc7, 0xd7, 0xe7, 0xf7, 0x11, 0x00, 0x02, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x05, 0x06, 0x07, 0x07, 0x06, 0x05, 0x35, 0x01, 0x00, 0x02, 0x11, 0x03, 0x21, 0x31, 0x12, 0x04, 0x41, 0x51, 0x61, 0x71, 0x22, 0x13, 0x05, 0x32, 0x81, 0x91, 0x14, 0xa1, 0xb1, 0x42, 0x23, 0xc1, 0x52, 0xd1, 0xf0, 0x33, 0x24, 0x62, 0xe1, 0x72, 0x82, 0x92, 0x43, 0x53, 0x15, 0x63, 0x73, 0x34, 0xf1, 0x25, 0x06, 0x16, 0xa2, 0xb2, 0x83, 0x07, 0x26, 0x35, 0xc2, 0xd2, 0x44, 0x93, 0x54, 0xa3, 0x17, 0x64, 0x45, 0x55, 0x36, 0x74, 0x65, 0xe2, 0xf2, 0xb3, 0x84, 0xc3, 0xd3, 0x75, 0xe3, 0xf3, 0x46, 0x94, 0xa4, 0x85, 0xb4, 0x95, 0xc4, 0xd4, 0xe4, 0xf4, 0xa5, 0xb5, 0xc5, 0xd5, 0xe5, 0xf5, 0x56, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6, 0xc6, 0xd6, 0xe6, 0xf6, 0x27, 0x37, 0x47, 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7, 0xc7, 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00, 0xf2, 0xed, 0xb2, 0x8d, 0x4d, 0x45, 0xcd, 0x2f, 0x3f, 0x44, 0x68, 0x93, 0xc3, 0x58, 0xc8, 0xf1, 0x1f, 0x8a, 0x33, 0x86, 0xda, 0x58, 0xc1, 0xa0, 0x02, 0x4f, 0xc4, 0xa1, 0x69, 0xa5, 0x9b, 0x5b, 0x4b, 0x84, 0x73, 0xdf, 0xc9, 0x15, 0xf8, 0xe3, 0xd1, 0x0e, 0x07, 0x93, 0xf3, 0xd1, 0x0f, 0x1c, 0x17, 0xef, 0x2e, 0x3b, 0x5b, 0xdc, 0xff, 0x00, 0xdf, 0x42, 0xbf, 0x8f, 0x8e, 0xdc, 0x82, 0xca, 0xd8, 0x37, 0x11, 0xa9, 0x3d, 0x82, 0x69, 0x2b, 0xc4, 0x6d, 0xc9, 0x75, 0x25, 0xbc, 0xf7, 0xec, 0xa1, 0xb5, 0x74, 0x19, 0x5d, 0x2e, 0x8a, 0x9a, 0x4b, 0x89, 0x7d, 0xc4, 0x68, 0xc6, 0xf6, 0xfe, 0xb2, 0xa0, 0x30, 0x1d, 0x60, 0x86, 0x88, 0x8d, 0x49, 0x3e, 0x01, 0x11, 0x20, 0xa3, 0x8c, 0xb9, 0xb1, 0xaa, 0x62, 0xad, 0xbf, 0x18, 0x97, 0x43, 0x47, 0x1d, 0xd2, 0xaf, 0x04, 0xd9, 0xb8, 0xc8, 0x0d, 0x68, 0xe4, 0xf7, 0x3e, 0x48, 0xf1, 0x05, 0xbc, 0x25, 0xaa, 0x07, 0x71, 0xd9, 0x14, 0x78, 0xf6, 0x49, 0xb5, 0x90, 0xfd, 0xa7, 0xc6, 0x14, 0xfd, 0x1b, 0x1c, 0xff, 0x00, 0x4d, 0x8d, 0x2e, 0x73, 0x8c, 0x35, 0xa3, 0x52, 0x4f, 0x92, 0x48, 0xa6, 0x1a, 0x24, 0xb6, 0x2a, 0xfa, 0xa5, 0x9e, 0x60, 0x64, 0x39, 0x94, 0x13, 0xcb, 0x27, 0x73, 0x80, 0xf3, 0x0c, 0xf6, 0xff, 0x00, 0xd2, 0x5a, 0x78, 0xbf, 0x53, 0x71, 0xf6, 0x01, 0x75, 0xb6, 0x97, 0x6a, 0x25, 0xa1, 0xad, 0x1f, 0xf4, 0xb7, 0x23, 0x48, 0xb7, 0x94, 0x84, 0x97, 0x5b, 0xff, 0x00, 0x32, 0xa9, 0xdd, 0xfc, 0xed, 0x9b, 0x7e, 0x0d, 0x9e, 0x52, 0x4a, 0x95, 0x61, 0xff, 0xd0, 0xf3, 0x3b, 0xa7, 0x70, 0xee, 0x01, 0x8f, 0xb9, 0x59, 0xfa, 0x7e, 0xdf, 0xe4, 0xc8, 0xf9, 0x2a, 0xc2, 0x5c, 0x63, 0xc3, 0x54, 0x67, 0x87, 0x6e, 0x10, 0x35, 0x68, 0xd4, 0x79, 0x1e, 0x53, 0x4a, 0xe0, 0xdc, 0xe9, 0xb8, 0x1f, 0x6a, 0xda, 0x6c, 0x25, 0x94, 0x37, 0xb0, 0xd0, 0xb8, 0xad, 0x67, 0xe4, 0x55, 0x8a, 0x5b, 0x8b, 0x82, 0xc0, 0x6f, 0x76, 0x80, 0x34, 0x49, 0x05, 0x2e, 0x9e, 0xc6, 0x1c, 0x66, 0x31, 0xba, 0x10, 0x23, 0xe0, 0xaf, 0xe1, 0x61, 0x53, 0x43, 0x8d, 0x81, 0xb3, 0x67, 0xef, 0x9e, 0x49, 0x2a, 0x12, 0x6c, 0xb6, 0x63, 0x1a, 0x0c, 0x31, 0xba, 0x55, 0xcd, 0xac, 0xfa, 0x8e, 0xdf, 0x91, 0x6e, 0x91, 0xd9, 0xb3, 0xc9, 0x73, 0x90, 0x7a, 0xab, 0x6a, 0xc2, 0xa4, 0x60, 0xe2, 0x8f, 0xd2, 0x38, 0x03, 0x7d, 0x9e, 0x0d, 0xff, 0x00, 0xcc, 0xd6, 0xd3, 0x6b, 0x71, 0x67, 0xd2, 0x3e, 0x64, 0x72, 0xab, 0xdb, 0x8d, 0x54, 0x39, 0xc5, 0x83, 0x6b, 0x3d, 0xee, 0x2e, 0xd4, 0x92, 0x3c, 0x4a, 0x56, 0xba, 0xb4, 0x79, 0x5c, 0xf7, 0xb2, 0x96, 0x6c, 0x8d, 0xaf, 0x80, 0x48, 0x3c, 0xf0, 0xb2, 0x1f, 0x63, 0x9c, 0xe9, 0x3f, 0x24, 0x5c, 0xdb, 0xdd, 0x76, 0x43, 0xde, 0xfd, 0x5c, 0xe3, 0x24, 0xfc, 0x50, 0x00, 0x93, 0x0a, 0x78, 0x8a, 0x0d, 0x49, 0xca, 0xcf, 0x93, 0x63, 0x1b, 0x7d, 0xd7, 0x57, 0x50, 0xd5, 0xef, 0x70, 0x6b, 0x4f, 0xc7, 0x45, 0xdb, 0x74, 0x9e, 0x8d, 0x5e, 0x33, 0x83, 0xd8, 0x37, 0xdd, 0xc3, 0xac, 0x3d, 0xbf, 0x92, 0xc5, 0x5b, 0xea, 0xbf, 0xd5, 0x62, 0xc0, 0xdc, 0xbc, 0xbd, 0x2d, 0x22, 0x5a, 0xcf, 0xdd, 0x69, 0xff, 0x00, 0xd1, 0x8e, 0x5d, 0xa5, 0x38, 0xb5, 0xb0, 0x00, 0xc6, 0xc4, 0x24, 0x4a, 0xd6, 0x8d, 0x18, 0x04, 0x49, 0x88, 0x9e, 0x55, 0xd6, 0x61, 0xb0, 0xc1, 0x70, 0x32, 0xdd, 0x3c, 0x95, 0xda, 0xf1, 0xfe, 0xf5, 0x62, 0xbc, 0x76, 0x8e, 0x75, 0x28, 0x02, 0xa2, 0xe7, 0x7d, 0x92, 0xb9, 0x84, 0x96, 0x96, 0xda, 0xf7, 0x70, 0x12, 0x4e, 0x5a, 0xff, 0x00, 0xff, 0xd1, 0xf3, 0x7a, 0x21, 0xaf, 0xde, 0xef, 0xa2, 0x22, 0x55, 0xfc, 0x5a, 0xbd, 0x42, 0xfb, 0x08, 0xfa, 0x67, 0x4f, 0x82, 0xcd, 0x6d, 0x85, 0xc0, 0x56, 0x3b, 0x90, 0xb7, 0xf0, 0x2a, 0x0e, 0x63, 0x58, 0x3b, 0xf2, 0xa3, 0x9e, 0x8c, 0xb8, 0x86, 0xbe, 0x49, 0xf1, 0x2c, 0x0c, 0x86, 0xb4, 0x4c, 0x69, 0xe4, 0xaf, 0x6e, 0xcc, 0x6b, 0x7d, 0x46, 0xb3, 0x70, 0xec, 0x38, 0x51, 0x7d, 0x02, 0x8a, 0xc7, 0xa6, 0xd9, 0x20, 0x68, 0x0f, 0x8f, 0x8a, 0xcf, 0xc9, 0xc2, 0xea, 0x59, 0x5b, 0x48, 0xb0, 0x91, 0xae, 0xe6, 0xc9, 0x03, 0xc9, 0x30, 0x51, 0x66, 0xd4, 0x0d, 0xad, 0xbd, 0x5f, 0x53, 0xcc, 0x6b, 0xb6, 0x90, 0x5a, 0x3b, 0x83, 0x0b, 0x43, 0x17, 0x31, 0xd6, 0xc3, 0x6e, 0x12, 0x3b, 0x79, 0xac, 0xc1, 0x89, 0x47, 0xd9, 0xe8, 0x63, 0x98, 0x45, 0xed, 0x6c, 0x5a, 0xf1, 0xa0, 0x27, 0xc5, 0x5b, 0xc3, 0x6f, 0xa6, 0xe0, 0x1c, 0x7d, 0xb3, 0xa2, 0x69, 0x34, 0x7b, 0xae, 0x1a, 0x8d, 0x45, 0x17, 0x9d, 0xeb, 0xfd, 0x21, 0xd8, 0xb9, 0xae, 0xb5, 0x80, 0xbb, 0x1e, 0xd2, 0x5c, 0xd7, 0x78, 0x13, 0xf9, 0xae, 0x4b, 0xea, 0xc7, 0x4a, 0x39, 0xbd, 0x55, 0xb3, 0xed, 0x66, 0x38, 0xf5, 0x09, 0x22, 0x41, 0x23, 0xe8, 0x37, 0xfb, 0x4b, 0xa1, 0xeb, 0xd6, 0xfe, 0x88, 0x31, 0xbf, 0x41, 0xc0, 0xee, 0xd2, 0x74, 0x02, 0x78, 0x53, 0xfa, 0x97, 0x43, 0x19, 0x85, 0x65, 0xff, 0x00, 0x9d, 0x71, 0x33, 0xe4, 0x1a, 0x7d, 0x8d, 0x53, 0x42, 0x56, 0x35, 0x6b, 0xe5, 0x80, 0x06, 0xc7, 0x57, 0xa7, 0xc4, 0xa9, 0xdb, 0xb6, 0x81, 0x1f, 0xeb, 0xd9, 0x69, 0x56, 0xc2, 0xd0, 0x00, 0xe5, 0x55, 0xc0, 0x12, 0xc2, 0xd7, 0x4e, 0xa2, 0x5a, 0x7c, 0x0a, 0xd0, 0x63, 0x9a, 0xd1, 0xaf, 0xd2, 0xe2, 0x3c, 0x12, 0x62, 0x66, 0xc6, 0x42, 0x23, 0x5a, 0x49, 0x8f, 0x10, 0xa2, 0xd2, 0x3e, 0x28, 0x9d, 0xc4, 0x88, 0x09, 0x29, 0x16, 0xc3, 0x3c, 0x24, 0x8d, 0xe6, 0x92, 0x72, 0x1f, 0xff, 0xd2, 0xf3, 0xbb, 0xb0, 0xfe, 0xcb, 0x99, 0xe9, 0xce, 0xf6, 0x88, 0x2d, 0x77, 0x91, 0x5b, 0x3d, 0x3d, 0xd0, 0xe6, 0x90, 0xa9, 0x65, 0x57, 0x38, 0x95, 0xdd, 0xcb, 0x9a, 0x7d, 0xce, 0xf2, 0x3f, 0x44, 0x23, 0x60, 0x58, 0x76, 0xe9, 0xca, 0x8c, 0xea, 0x1b, 0x31, 0x02, 0x32, 0x23, 0xea, 0xee, 0xb1, 0xcd, 0xb0, 0xc7, 0x87, 0x74, 0x7a, 0xeb, 0x70, 0x1a, 0x71, 0xe1, 0xfe, 0xe4, 0x1c, 0x1d, 0xae, 0xe5, 0x69, 0xd8, 0xfa, 0x99, 0x50, 0x0d, 0x1a, 0xf7, 0x2a, 0x3a, 0x0c, 0xf4, 0x1a, 0x8e, 0xc7, 0x27, 0x5d, 0xbf, 0x18, 0x41, 0xdc, 0xc2, 0xf0, 0x7f, 0x74, 0xf6, 0x3a, 0x22, 0x66, 0xdb, 0x68, 0xc6, 0x80, 0x48, 0x6b, 0x88, 0x06, 0x39, 0x0d, 0xee, 0xaa, 0x1f, 0xb3, 0xd5, 0x1b, 0x83, 0xd8, 0x3b, 0x38, 0x8f, 0x69, 0xfe, 0xdf, 0xd1, 0x4d, 0x29, 0xa1, 0x4c, 0x7a, 0xf4, 0xbf, 0xa7, 0x92, 0xcf, 0xa5, 0x20, 0x08, 0xf3, 0xf6, 0xff, 0x00, 0x15, 0xbb, 0xd1, 0x31, 0xd9, 0x5e, 0x3d, 0x75, 0x56, 0x36, 0x88, 0x00, 0x81, 0xe0, 0x16, 0x5e, 0x55, 0x74, 0x3f, 0x00, 0x9d, 0xe0, 0xcc, 0x69, 0xe7, 0x3a, 0x2d, 0xbe, 0x90, 0x00, 0xa9, 0xae, 0xef, 0x1f, 0x95, 0x4b, 0x0d, 0x9a, 0xdc, 0xc7, 0x45, 0xfe, 0xb1, 0x7d, 0x60, 0xa7, 0xa1, 0xe0, 0x1f, 0x4e, 0x1d, 0x99, 0x69, 0x02, 0x9a, 0xcf, 0x1f, 0xca, 0x7b, 0xbf, 0x90, 0xc5, 0xc2, 0xb3, 0xeb, 0x57, 0xd6, 0x03, 0x6b, 0xae, 0x39, 0xb6, 0x82, 0xe3, 0x31, 0xa1, 0x68, 0xf2, 0x6b, 0x5c, 0x12, 0xfa, 0xe1, 0x91, 0x66, 0x47, 0x5d, 0xb8, 0x3b, 0x4f, 0x44, 0x36, 0xb6, 0x8f, 0x28, 0xdd, 0xff, 0x00, 0x7e, 0x46, 0xab, 0x12, 0x2b, 0x65, 0x55, 0x32, 0xa7, 0x62, 0xb6, 0xbd, 0xf7, 0x64, 0x10, 0xdb, 0x03, 0x9f, 0x1b, 0x9e, 0xc7, 0xd9, 0xb8, 0x3b, 0x1f, 0x67, 0xf3, 0x6c, 0x52, 0x80, 0xd7, 0x7d, 0x0f, 0xea, 0x7f, 0x5d, 0x1d, 0x67, 0xa6, 0x0b, 0x1e, 0x47, 0xda, 0x69, 0x3b, 0x2e, 0x03, 0xc7, 0xf3, 0x5f, 0x1f, 0xf0, 0x8b, 0xa1, 0x02, 0x46, 0xba, 0x79, 0xaf, 0x32, 0xff, 0x00, 0x16, 0xad, 0xca, 0x1d, 0x57, 0x2a, 0xdc, 0x79, 0x18, 0x41, 0xb0, 0xf6, 0x9e, 0xe4, 0x9f, 0xd0, 0x8f, 0xeb, 0x31, 0xab, 0xd2, 0x83, 0xa4, 0xcb, 0x8c, 0xb8, 0xa0, 0x42, 0x12, 0x7b, 0x67, 0x9f, 0x2f, 0xf5, 0x09, 0x26, 0x96, 0xc4, 0xce, 0xa9, 0x20, 0xa7, 0xff, 0xd3, 0xf3, 0x2f, 0xb4, 0x5d, 0xe9, 0x0a, 0xb7, 0x9f, 0x4c, 0x19, 0xdb, 0x3a, 0x2d, 0x5e, 0x94, 0xfd, 0xc4, 0xb7, 0xc5, 0x62, 0xf9, 0x2b, 0xfd, 0x2e, 0xe3, 0x5d, 0xe0, 0x7c, 0x13, 0x48, 0xd1, 0x92, 0x12, 0xa9, 0x0b, 0x7a, 0xbc, 0x2d, 0xc2, 0x7f, 0x92, 0x60, 0xab, 0x4e, 0x79, 0x2e, 0x00, 0xf0, 0xaa, 0xe1, 0xda, 0x3d, 0x43, 0xfc, 0xad, 0x55, 0xbb, 0x80, 0x79, 0x81, 0xa0, 0xe6, 0x54, 0x32, 0x6d, 0x02, 0xbe, 0xf3, 0x61, 0x81, 0xa8, 0x44, 0x14, 0x03, 0x59, 0x0e, 0x1c, 0xf6, 0x1f, 0xdc, 0xb2, 0xec, 0xa3, 0x23, 0x77, 0xe8, 0x6e, 0x70, 0xf2, 0x25, 0x1f, 0x1f, 0x17, 0xa9, 0x6d, 0x71, 0x36, 0x97, 0x47, 0x00, 0xa4, 0x02, 0xe0, 0x2c, 0x7c, 0xc1, 0xab, 0xd5, 0x31, 0x85, 0x35, 0xd4, 0xe6, 0x13, 0x02, 0xd6, 0x4b, 0x67, 0x48, 0x2b, 0xa9, 0xe9, 0x2e, 0x02, 0xb6, 0x4f, 0x82, 0xe5, 0x7a, 0x95, 0x19, 0xc6, 0x87, 0x3d, 0xfb, 0xa2, 0xb8, 0x79, 0x1e, 0x4d, 0x3b, 0x96, 0xcf, 0x4f, 0xbd, 0xcd, 0xa2, 0xa2, 0x1f, 0xa0, 0x82, 0xd3, 0xfc, 0x97, 0x05, 0x24, 0x36, 0x6b, 0xf3, 0x31, 0xa2, 0x35, 0x79, 0xef, 0xad, 0xf8, 0xae, 0xaf, 0xaf, 0xd8, 0xf2, 0xd8, 0x6d, 0xed, 0x6b, 0xda, 0x7b, 0x18, 0x1b, 0x5d, 0xff, 0x00, 0x52, 0xb1, 0x6d, 0xf0, 0x81, 0x31, 0xca, 0xf4, 0x6e, 0xb1, 0x80, 0xce, 0xb1, 0x84, 0xc0, 0x21, 0xb7, 0xd6, 0x77, 0x31, 0xd1, 0x27, 0xc1, 0xcd, 0xfe, 0xd2, 0xe3, 0xec, 0xe8, 0x1d, 0x45, 0x96, 0xb0, 0x9a, 0xb7, 0x87, 0x3f, 0x68, 0x2d, 0xf7, 0x01, 0x1f, 0xbe, 0xd1, 0xf4, 0x7f, 0xb4, 0xa4, 0x0d, 0x77, 0xbb, 0xfa, 0x8f, 0x80, 0x3a, 0x7f, 0x43, 0xaa, 0xe2, 0xdf, 0xd2, 0x65, 0x7e, 0x95, 0xe4, 0x0f, 0x1f, 0xa1, 0xfe, 0x6b, 0x16, 0x9f, 0x52, 0xfa, 0xc1, 0xd3, 0xba, 0x6d, 0x26, 0xdc, 0xac, 0x86, 0xd4, 0xd9, 0x0d, 0x31, 0x2e, 0x74, 0x9e, 0xdb, 0x59, 0x2e, 0x55, 0xe8, 0xc9, 0xb2, 0x96, 0xd5, 0x4b, 0x9f, 0xb8, 0x6d, 0xda, 0x1c, 0x04, 0x09, 0x03, 0xfe, 0x8a, 0xc6, 0xfa, 0xd3, 0xf5, 0x6a, 0xbe, 0xbb, 0x5b, 0x2e, 0xc6, 0xb5, 0x94, 0xe6, 0xd5, 0x20, 0x97, 0x7d, 0x1b, 0x1b, 0xf9, 0xad, 0x7c, 0x7d, 0x17, 0xb7, 0xf3, 0x1e, 0x92, 0x1b, 0x7f, 0xf8, 0xe0, 0x7d, 0x59, 0xdd, 0xfd, 0x32, 0xd8, 0x8f, 0xa5, 0xe8, 0x3a, 0x12, 0x5c, 0x3f, 0xfc, 0xc4, 0xfa, 0xc3, 0xb3, 0x77, 0xa7, 0x56, 0xed, 0xdb, 0x76, 0x7a, 0x8d, 0xdd, 0x1f, 0xbf, 0xfd, 0x44, 0x92, 0x56, 0x8f, 0xff, 0xd4, 0xf2, 0xe8, 0x86, 0x17, 0x1e, 0xfa, 0x04, 0x56, 0x4b, 0x43, 0x6c, 0x6f, 0x2d, 0xe5, 0x46, 0x01, 0x64, 0x2b, 0x14, 0x32, 0x5b, 0xb4, 0xa0, 0x52, 0x1d, 0xde, 0x9b, 0x94, 0xdb, 0xab, 0x6b, 0x81, 0xf7, 0x05, 0xb0, 0xd7, 0x07, 0xb2, 0x27, 0x55, 0xc6, 0x57, 0x65, 0xd8, 0x76, 0x6e, 0x64, 0xed, 0xee, 0x16, 0xce, 0x27, 0x57, 0x63, 0xda, 0x0c, 0xc2, 0x8e, 0x51, 0x67, 0x84, 0xfa, 0x1d, 0xdd, 0x62, 0xc7, 0x07, 0xe9, 0xf7, 0xa3, 0xd6, 0x6c, 0x02, 0x41, 0x55, 0x31, 0xf3, 0x2b, 0xb3, 0xba, 0x2b, 0x2e, 0x68, 0x24, 0x1d, 0x47, 0x64, 0xca, 0xa6, 0x50, 0x41, 0x65, 0x90, 0x6c, 0xb1, 0xa5, 0xae, 0x33, 0x23, 0x51, 0xe4, 0xab, 0x7d, 0x5d, 0xcb, 0xb6, 0xcc, 0x37, 0xd0, 0x40, 0x73, 0x71, 0xde, 0x58, 0x09, 0xe7, 0x6f, 0x2c, 0x44, 0xc9, 0xc9, 0xae, 0xba, 0x9d, 0x63, 0x88, 0x01, 0xa0, 0x95, 0x9d, 0xf5, 0x3f, 0x2a, 0xe6, 0x67, 0xdb, 0x50, 0x83, 0x55, 0xad, 0x36, 0x3e, 0x78, 0x10, 0x74, 0x77, 0xfd, 0x2d, 0xaa, 0x4c, 0x7d, 0x58, 0x73, 0x91, 0xa0, 0x0f, 0x51, 0x45, 0xb7, 0x33, 0xdd, 0x58, 0x69, 0x1d, 0xd8, 0x0c, 0x9f, 0x96, 0x88, 0x19, 0x99, 0x19, 0xac, 0xcf, 0xa3, 0xd2, 0xad, 0xb5, 0xdb, 0x76, 0x8f, 0xad, 0xc4, 0xea, 0xcf, 0xdf, 0x7e, 0xdf, 0xdd, 0xfc, 0xd5, 0xa3, 0x5e, 0x43, 0x2b, 0x6b, 0xb2, 0xad, 0x3b, 0x6a, 0xa4, 0x13, 0xa7, 0x04, 0xac, 0x7a, 0x6f, 0xb3, 0x23, 0x26, 0xcc, 0xfb, 0xb4, 0x75, 0x8e, 0x01, 0x83, 0xf7, 0x58, 0x3e, 0x8b, 0x53, 0xa7, 0x2a, 0x1a, 0x31, 0x42, 0x36, 0x5d, 0x4c, 0x9a, 0xf2, 0xdc, 0xc6, 0xfe, 0x98, 0xb4, 0x34, 0xcb, 0x48, 0x0a, 0x8f, 0xdb, 0xb2, 0xeb, 0x76, 0xd6, 0x07, 0x5c, 0x59, 0xc9, 0x64, 0x8f, 0x93, 0xa7, 0x73, 0x16, 0x83, 0xaf, 0x0e, 0xa4, 0x33, 0xef, 0x50, 0xc5, 0x0c, 0xda, 0x59, 0x10, 0x06, 0x8a, 0x2e, 0x29, 0x0e, 0xac, 0xc2, 0x31, 0x3d, 0x36, 0x69, 0x7e, 0xd6, 0xcc, 0xf5, 0x3d, 0x6f, 0xb3, 0xeb, 0x1b, 0x76, 0xef, 0x3b, 0xa3, 0xfa, 0xc9, 0x2b, 0x5f, 0x66, 0x6f, 0xa9, 0x1e, 0x73, 0xf2, 0x49, 0x2e, 0x39, 0xf7, 0x4f, 0xb7, 0x8d, 0xff, 0xd5, 0xf3, 0x26, 0xfe, 0x0a, 0xc5, 0x1b, 0xa7, 0xcb, 0xb2, 0xcf, 0x49, 0x03, 0xb2, 0x46, 0xee, 0xd9, 0xd9, 0xb3, 0xf4, 0x9f, 0x25, 0x4a, 0xdf, 0x4b, 0x77, 0xe8, 0x27, 0xd4, 0xef, 0x1c, 0x2a, 0x29, 0x26, 0xc5, 0x7c, 0x9d, 0x6c, 0x7f, 0xb7, 0x6e, 0x1b, 0x26, 0x7f, 0x05, 0xa3, 0xfe, 0x53, 0x8d, 0x62, 0x57, 0x30, 0x92, 0x12, 0xfa, 0x2f, 0x86, 0xdf, 0xa4, 0xec, 0x67, 0xfe, 0xd0, 0xf4, 0xff, 0x00, 0x4d, 0xfc, 0xdf, 0x78, 0xe1, 0x68, 0x7d, 0x54, 0x99, 0xbf, 0x6f, 0xf3, 0xbe, 0xdf, 0x8e, 0xdd, 0x7f, 0xef, 0xeb, 0x97, 0x49, 0x3e, 0x3b, 0x7f, 0x06, 0x2c, 0x9f, 0x37, 0x5f, 0xf0, 0x9f, 0x4c, 0xeb, 0x7b, 0xbf, 0x67, 0x55, 0xe8, 0xff, 0x00, 0x31, 0xbc, 0x7a, 0x9e, 0x31, 0xdb, 0xfe, 0x92, 0xae, 0x37, 0x7a, 0x4d, 0xdb, 0xe2, 0x17, 0x9d, 0xa4, 0xa3, 0xc9, 0xba, 0xfc, 0x7b, 0x7d, 0x5f, 0x52, 0xa7, 0x7e, 0xd1, 0x28, 0xf8, 0xf3, 0xb0, 0xc7, 0x32, 0xbc, 0x99, 0x24, 0xc5, 0xe3, 0xab, 0xeb, 0x1f, 0xa4, 0xf5, 0xfc, 0xe1, 0x25, 0xe4, 0xe9, 0x24, 0x97, 0xff, 0xd9, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x41, 0x00, 0x64, 0x00, 0x6f, 0x00, 0x62, 0x00, 0x65, 0x00, 0x20, 0x00, 0x50, 0x00, 0x68, 0x00, 0x6f, 0x00, 0x74, 0x00, 0x6f, 0x00, 0x73, 0x00, 0x68, 0x00, 0x6f, 0x00, 0x70, 0x00, 0x00, 0x00, 0x13, 0x00, 0x41, 0x00, 0x64, 0x00, 0x6f, 0x00, 0x62, 0x00, 0x65, 0x00, 0x20, 0x00, 0x50, 0x00, 0x68, 0x00, 0x6f, 0x00, 0x74, 0x00, 0x6f, 0x00, 0x73, 0x00, 0x68, 0x00, 0x6f, 0x00, 0x70, 0x00, 0x20, 0x00, 0x37, 0x00, 0x2e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x38, 0x42, 0x49, 0x4d, 0x04, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0xff, 0xe1, 0x15, 0x67, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6e, 0x73, 0x2e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x78, 0x61, 0x70, 0x2f, 0x31, 0x2e, 0x30, 0x2f, 0x00, 0x3c, 0x3f, 0x78, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x20, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x3d, 0x27, 0xef, 0xbb, 0xbf, 0x27, 0x20, 0x69, 0x64, 0x3d, 0x27, 0x57, 0x35, 0x4d, 0x30, 0x4d, 0x70, 0x43, 0x65, 0x68, 0x69, 0x48, 0x7a, 0x72, 0x65, 0x53, 0x7a, 0x4e, 0x54, 0x63, 0x7a, 0x6b, 0x63, 0x39, 0x64, 0x27, 0x3f, 0x3e, 0x0a, 0x3c, 0x3f, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2d, 0x78, 0x61, 0x70, 0x2d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x20, 0x65, 0x73, 0x63, 0x3d, 0x22, 0x43, 0x52, 0x22, 0x3f, 0x3e, 0x0a, 0x3c, 0x78, 0x3a, 0x78, 0x61, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x78, 0x3d, 0x27, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x3a, 0x6e, 0x73, 0x3a, 0x6d, 0x65, 0x74, 0x61, 0x2f, 0x27, 0x20, 0x78, 0x3a, 0x78, 0x61, 0x70, 0x74, 0x6b, 0x3d, 0x27, 0x58, 0x4d, 0x50, 0x20, 0x74, 0x6f, 0x6f, 0x6c, 0x6b, 0x69, 0x74, 0x20, 0x32, 0x2e, 0x38, 0x2e, 0x32, 0x2d, 0x33, 0x33, 0x2c, 0x20, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x20, 0x31, 0x2e, 0x35, 0x27, 0x3e, 0x0a, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x52, 0x44, 0x46, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x72, 0x64, 0x66, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x77, 0x33, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x31, 0x39, 0x39, 0x39, 0x2f, 0x30, 0x32, 0x2f, 0x32, 0x32, 0x2d, 0x72, 0x64, 0x66, 0x2d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2d, 0x6e, 0x73, 0x23, 0x27, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x69, 0x58, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6e, 0x73, 0x2e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x58, 0x2f, 0x31, 0x2e, 0x30, 0x2f, 0x27, 0x3e, 0x0a, 0x0a, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x3d, 0x27, 0x75, 0x75, 0x69, 0x64, 0x3a, 0x32, 0x32, 0x64, 0x30, 0x32, 0x62, 0x30, 0x61, 0x2d, 0x62, 0x32, 0x34, 0x39, 0x2d, 0x31, 0x31, 0x64, 0x62, 0x2d, 0x38, 0x61, 0x66, 0x38, 0x2d, 0x39, 0x31, 0x64, 0x35, 0x34, 0x30, 0x33, 0x66, 0x39, 0x32, 0x66, 0x39, 0x27, 0x0a, 0x20, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x70, 0x64, 0x66, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6e, 0x73, 0x2e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x64, 0x66, 0x2f, 0x31, 0x2e, 0x33, 0x2f, 0x27, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x21, 0x2d, 0x2d, 0x20, 0x70, 0x64, 0x66, 0x3a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x2d, 0x2d, 0x3e, 0x0a, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x3d, 0x27, 0x75, 0x75, 0x69, 0x64, 0x3a, 0x32, 0x32, 0x64, 0x30, 0x32, 0x62, 0x30, 0x61, 0x2d, 0x62, 0x32, 0x34, 0x39, 0x2d, 0x31, 0x31, 0x64, 0x62, 0x2d, 0x38, 0x61, 0x66, 0x38, 0x2d, 0x39, 0x31, 0x64, 0x35, 0x34, 0x30, 0x33, 0x66, 0x39, 0x32, 0x66, 0x39, 0x27, 0x0a, 0x20, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x70, 0x68, 0x6f, 0x74, 0x6f, 0x73, 0x68, 0x6f, 0x70, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6e, 0x73, 0x2e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x68, 0x6f, 0x74, 0x6f, 0x73, 0x68, 0x6f, 0x70, 0x2f, 0x31, 0x2e, 0x30, 0x2f, 0x27, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x21, 0x2d, 0x2d, 0x20, 0x70, 0x68, 0x6f, 0x74, 0x6f, 0x73, 0x68, 0x6f, 0x70, 0x3a, 0x43, 0x61, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x2d, 0x2d, 0x3e, 0x0a, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x3d, 0x27, 0x75, 0x75, 0x69, 0x64, 0x3a, 0x32, 0x32, 0x64, 0x30, 0x32, 0x62, 0x30, 0x61, 0x2d, 0x62, 0x32, 0x34, 0x39, 0x2d, 0x31, 0x31, 0x64, 0x62, 0x2d, 0x38, 0x61, 0x66, 0x38, 0x2d, 0x39, 0x31, 0x64, 0x35, 0x34, 0x30, 0x33, 0x66, 0x39, 0x32, 0x66, 0x39, 0x27, 0x0a, 0x20, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x78, 0x61, 0x70, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6e, 0x73, 0x2e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x78, 0x61, 0x70, 0x2f, 0x31, 0x2e, 0x30, 0x2f, 0x27, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x21, 0x2d, 0x2d, 0x20, 0x78, 0x61, 0x70, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x64, 0x20, 0x2d, 0x2d, 0x3e, 0x0a, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x3d, 0x27, 0x75, 0x75, 0x69, 0x64, 0x3a, 0x32, 0x32, 0x64, 0x30, 0x32, 0x62, 0x30, 0x61, 0x2d, 0x62, 0x32, 0x34, 0x39, 0x2d, 0x31, 0x31, 0x64, 0x62, 0x2d, 0x38, 0x61, 0x66, 0x38, 0x2d, 0x39, 0x31, 0x64, 0x35, 0x34, 0x30, 0x33, 0x66, 0x39, 0x32, 0x66, 0x39, 0x27, 0x0a, 0x20, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x78, 0x61, 0x70, 0x4d, 0x4d, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6e, 0x73, 0x2e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x78, 0x61, 0x70, 0x2f, 0x31, 0x2e, 0x30, 0x2f, 0x6d, 0x6d, 0x2f, 0x27, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x78, 0x61, 0x70, 0x4d, 0x4d, 0x3a, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x3e, 0x61, 0x64, 0x6f, 0x62, 0x65, 0x3a, 0x64, 0x6f, 0x63, 0x69, 0x64, 0x3a, 0x70, 0x68, 0x6f, 0x74, 0x6f, 0x73, 0x68, 0x6f, 0x70, 0x3a, 0x32, 0x32, 0x64, 0x30, 0x32, 0x62, 0x30, 0x36, 0x2d, 0x62, 0x32, 0x34, 0x39, 0x2d, 0x31, 0x31, 0x64, 0x62, 0x2d, 0x38, 0x61, 0x66, 0x38, 0x2d, 0x39, 0x31, 0x64, 0x35, 0x34, 0x30, 0x33, 0x66, 0x39, 0x32, 0x66, 0x39, 0x3c, 0x2f, 0x78, 0x61, 0x70, 0x4d, 0x4d, 0x3a, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x3e, 0x0a, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x3d, 0x27, 0x75, 0x75, 0x69, 0x64, 0x3a, 0x32, 0x32, 0x64, 0x30, 0x32, 0x62, 0x30, 0x61, 0x2d, 0x62, 0x32, 0x34, 0x39, 0x2d, 0x31, 0x31, 0x64, 0x62, 0x2d, 0x38, 0x61, 0x66, 0x38, 0x2d, 0x39, 0x31, 0x64, 0x35, 0x34, 0x30, 0x33, 0x66, 0x39, 0x32, 0x66, 0x39, 0x27, 0x0a, 0x20, 0x20, 0x78, 0x6d, 0x6c, 0x6e, 0x73, 0x3a, 0x64, 0x63, 0x3d, 0x27, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x70, 0x75, 0x72, 0x6c, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x64, 0x63, 0x2f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x31, 0x2e, 0x31, 0x2f, 0x27, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, 0x63, 0x3a, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x41, 0x6c, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x72, 0x64, 0x66, 0x3a, 0x6c, 0x69, 0x20, 0x78, 0x6d, 0x6c, 0x3a, 0x6c, 0x61, 0x6e, 0x67, 0x3d, 0x27, 0x78, 0x2d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x3e, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x6c, 0x69, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x41, 0x6c, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x63, 0x3a, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, 0x72, 0x64, 0x66, 0x3a, 0x52, 0x44, 0x46, 0x3e, 0x0a, 0x3c, 0x2f, 0x78, 0x3a, 0x78, 0x61, 0x70, 0x6d, 0x65, 0x74, 0x61, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x0a, 0x3c, 0x3f, 0x78, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x20, 0x65, 0x6e, 0x64, 0x3d, 0x27, 0x77, 0x27, 0x3f, 0x3e, 0xff, 0xee, 0x00, 0x0e, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x00, 0x64, 0x40, 0x00, 0x00, 0x00, 0x01, 0xff, 0xdb, 0x00, 0x84, 0x00, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03, 0x04, 0x06, 0x04, 0x03, 0x04, 0x06, 0x07, 0x05, 0x04, 0x04, 0x05, 0x07, 0x08, 0x06, 0x06, 0x07, 0x06, 0x06, 0x08, 0x0a, 0x08, 0x09, 0x09, 0x09, 0x09, 0x08, 0x0a, 0x0a, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0a, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x01, 0x04, 0x05, 0x05, 0x08, 0x07, 0x08, 0x0f, 0x0a, 0x0a, 0x0f, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14, 0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0x64, 0x00, 0x64, 0x03, 0x01, 0x11, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01, 0xff, 0xdd, 0x00, 0x04, 0x00, 0x0d, 0xff, 0xc4, 0x01, 0xa2, 0x00, 0x00, 0x00, 0x07, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x05, 0x03, 0x02, 0x06, 0x01, 0x00, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x01, 0x00, 0x02, 0x02, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x02, 0x06, 0x07, 0x03, 0x04, 0x02, 0x06, 0x02, 0x73, 0x01, 0x02, 0x03, 0x11, 0x04, 0x00, 0x05, 0x21, 0x12, 0x31, 0x41, 0x51, 0x06, 0x13, 0x61, 0x22, 0x71, 0x81, 0x14, 0x32, 0x91, 0xa1, 0x07, 0x15, 0xb1, 0x42, 0x23, 0xc1, 0x52, 0xd1, 0xe1, 0x33, 0x16, 0x62, 0xf0, 0x24, 0x72, 0x82, 0xf1, 0x25, 0x43, 0x34, 0x53, 0x92, 0xa2, 0xb2, 0x63, 0x73, 0xc2, 0x35, 0x44, 0x27, 0x93, 0xa3, 0xb3, 0x36, 0x17, 0x54, 0x64, 0x74, 0xc3, 0xd2, 0xe2, 0x08, 0x26, 0x83, 0x09, 0x0a, 0x18, 0x19, 0x84, 0x94, 0x45, 0x46, 0xa4, 0xb4, 0x56, 0xd3, 0x55, 0x28, 0x1a, 0xf2, 0xe3, 0xf3, 0xc4, 0xd4, 0xe4, 0xf4, 0x65, 0x75, 0x85, 0x95, 0xa5, 0xb5, 0xc5, 0xd5, 0xe5, 0xf5, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6, 0xc6, 0xd6, 0xe6, 0xf6, 0x37, 0x47, 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7, 0xc7, 0xd7, 0xe7, 0xf7, 0x38, 0x48, 0x58, 0x68, 0x78, 0x88, 0x98, 0xa8, 0xb8, 0xc8, 0xd8, 0xe8, 0xf8, 0x29, 0x39, 0x49, 0x59, 0x69, 0x79, 0x89, 0x99, 0xa9, 0xb9, 0xc9, 0xd9, 0xe9, 0xf9, 0x2a, 0x3a, 0x4a, 0x5a, 0x6a, 0x7a, 0x8a, 0x9a, 0xaa, 0xba, 0xca, 0xda, 0xea, 0xfa, 0x11, 0x00, 0x02, 0x02, 0x01, 0x02, 0x03, 0x05, 0x05, 0x04, 0x05, 0x06, 0x04, 0x08, 0x03, 0x03, 0x6d, 0x01, 0x00, 0x02, 0x11, 0x03, 0x04, 0x21, 0x12, 0x31, 0x41, 0x05, 0x51, 0x13, 0x61, 0x22, 0x06, 0x71, 0x81, 0x91, 0x32, 0xa1, 0xb1, 0xf0, 0x14, 0xc1, 0xd1, 0xe1, 0x23, 0x42, 0x15, 0x52, 0x62, 0x72, 0xf1, 0x33, 0x24, 0x34, 0x43, 0x82, 0x16, 0x92, 0x53, 0x25, 0xa2, 0x63, 0xb2, 0xc2, 0x07, 0x73, 0xd2, 0x35, 0xe2, 0x44, 0x83, 0x17, 0x54, 0x93, 0x08, 0x09, 0x0a, 0x18, 0x19, 0x26, 0x36, 0x45, 0x1a, 0x27, 0x64, 0x74, 0x55, 0x37, 0xf2, 0xa3, 0xb3, 0xc3, 0x28, 0x29, 0xd3, 0xe3, 0xf3, 0x84, 0x94, 0xa4, 0xb4, 0xc4, 0xd4, 0xe4, 0xf4, 0x65, 0x75, 0x85, 0x95, 0xa5, 0xb5, 0xc5, 0xd5, 0xe5, 0xf5, 0x46, 0x56, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6, 0xc6, 0xd6, 0xe6, 0xf6, 0x47, 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7, 0xc7, 0xd7, 0xe7, 0xf7, 0x38, 0x48, 0x58, 0x68, 0x78, 0x88, 0x98, 0xa8, 0xb8, 0xc8, 0xd8, 0xe8, 0xf8, 0x39, 0x49, 0x59, 0x69, 0x79, 0x89, 0x99, 0xa9, 0xb9, 0xc9, 0xd9, 0xe9, 0xf9, 0x2a, 0x3a, 0x4a, 0x5a, 0x6a, 0x7a, 0x8a, 0x9a, 0xaa, 0xba, 0xca, 0xda, 0xea, 0xfa, 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00, 0xf0, 0x67, 0xa6, 0x5c, 0x0f, 0x01, 0xd4, 0x7e, 0x18, 0x12, 0x98, 0xe9, 0xd6, 0x2d, 0x34, 0x6d, 0x70, 0xdf, 0xdc, 0xa1, 0xe3, 0xec, 0x5b, 0xfb, 0x32, 0x24, 0xb2, 0x01, 0x1f, 0x15, 0xa4, 0x52, 0x4a, 0x82, 0x31, 0xf1, 0xfe, 0xd1, 0x3d, 0x14, 0x64, 0x49, 0x64, 0x22, 0x98, 0xcf, 0xa5, 0x46, 0x6c, 0x16, 0x55, 0x71, 0x56, 0x62, 0x28, 0x07, 0xc5, 0x45, 0x15, 0xa0, 0xc8, 0x89, 0x33, 0xe1, 0x63, 0xd2, 0xd8, 0x34, 0x44, 0x17, 0xa0, 0x2c, 0x4d, 0x16, 0xbb, 0xed, 0xdc, 0xf8, 0x64, 0xc1, 0x6b, 0x31, 0x42, 0x18, 0x8e, 0xc7, 0xb5, 0x2a, 0x7d, 0xb2, 0x56, 0xc5, 0x61, 0x8c, 0xf2, 0xa0, 0x1b, 0x1e, 0x83, 0x0d, 0xa1, 0x63, 0x50, 0x1f, 0x97, 0x7c, 0x2a, 0xa9, 0x1a, 0x9a, 0x86, 0x4f, 0xb4, 0xb4, 0x38, 0x0a, 0xa6, 0x0b, 0xb8, 0x0c, 0x05, 0x14, 0xf8, 0x76, 0x3e, 0x19, 0x14, 0xb6, 0x78, 0xf8, 0x8c, 0x2a, 0xd5, 0x01, 0xdc, 0x6f, 0x8a, 0x1a, 0xe3, 0x8d, 0xab, 0xff, 0xd0, 0xf0, 0xec, 0xe9, 0x15, 0xb5, 0xb9, 0x5a, 0x7c, 0x4c, 0xa2, 0x9e, 0x24, 0xf5, 0xca, 0xc6, 0xe5, 0x99, 0xd9, 0x34, 0x99, 0x04, 0x3a, 0x7d, 0xb5, 0xba, 0xd5, 0x51, 0x63, 0x0e, 0xc7, 0xc5, 0x9b, 0x73, 0xf8, 0xe4, 0x6f, 0x76, 0xca, 0xd9, 0xda, 0x54, 0x6d, 0x72, 0x2e, 0x1a, 0x57, 0x11, 0x44, 0x40, 0x0d, 0x27, 0x7a, 0x0f, 0xd9, 0x5f, 0x12, 0x69, 0x4c, 0x84, 0xcd, 0x36, 0xe3, 0x85, 0xb2, 0xcd, 0x2f, 0x4a, 0x8b, 0x58, 0x36, 0xf6, 0x76, 0xa8, 0x64, 0x64, 0x3c, 0xa4, 0x93, 0xaa, 0x25, 0x3c, 0x49, 0xda, 0xa4, 0xe5, 0x26, 0x54, 0xe4, 0x8c, 0x7c, 0x5c, 0x93, 0x4d, 0x67, 0xc9, 0x3a, 0x6e, 0x9f, 0x13, 0xb4, 0xce, 0xf7, 0x3a, 0x9b, 0xad, 0x52, 0xd6, 0x2a, 0xd1, 0x49, 0xee, 0xc7, 0xf8, 0x64, 0x46, 0x42, 0x4e, 0xcd, 0x92, 0xc2, 0x00, 0xdd, 0x8a, 0x47, 0xe5, 0x69, 0x6e, 0xd4, 0xa4, 0x08, 0x16, 0x83, 0x9c, 0x8c, 0xdd, 0x95, 0x6b, 0xb9, 0xf6, 0xef, 0x97, 0x78, 0x94, 0xe3, 0x78, 0x04, 0xa4, 0xf3, 0xe8, 0xee, 0x64, 0xe1, 0x12, 0x10, 0x05, 0x6a, 0xc7, 0xc0, 0x6f, 0x53, 0xf3, 0xc9, 0x89, 0xb4, 0x9c, 0x4e, 0xb4, 0xf2, 0xd3, 0xde, 0x7a, 0xd2, 0x19, 0x16, 0x38, 0x61, 0x5d, 0xd9, 0x88, 0x05, 0x9c, 0xf4, 0x0a, 0x0f, 0x5f, 0x73, 0x84, 0xe4, 0xa4, 0xc7, 0x0d, 0xa5, 0xf1, 0x59, 0xba, 0x5c, 0x08, 0x98, 0x6f, 0xc8, 0x20, 0xfa, 0x4e, 0x4e, 0xf6, 0x69, 0xe1, 0xa2, 0x89, 0xfd, 0x1f, 0x77, 0x2c, 0xe6, 0xce, 0xd6, 0x17, 0x9a, 0x69, 0xdb, 0xd3, 0x86, 0x18, 0xc1, 0x67, 0x77, 0x26, 0x80, 0x28, 0x1b, 0x93, 0x88, 0x41, 0x0f, 0x40, 0xb0, 0xfc, 0x87, 0xf3, 0x43, 0x98, 0xd7, 0x58, 0x96, 0xdb, 0x4d, 0x91, 0x88, 0xe5, 0x6c, 0x58, 0xdc, 0x5c, 0x2a, 0xf7, 0x2c, 0xb1, 0xfc, 0x20, 0x8f, 0x02, 0xd9, 0x65, 0x06, 0xbe, 0x26, 0x6f, 0xa2, 0x7f, 0xce, 0x3d, 0x69, 0x26, 0xdd, 0x13, 0x52, 0xbf, 0xbd, 0x92, 0x62, 0x59, 0x4c, 0x90, 0xac, 0x50, 0x45, 0x5e, 0xbb, 0x09, 0x03, 0x12, 0x29, 0x84, 0x00, 0xc4, 0xc9, 0x11, 0xff, 0x00, 0x42, 0xe7, 0xa7, 0x7a, 0xd4, 0xfd, 0x21, 0x79, 0xe9, 0x78, 0x71, 0x8b, 0x95, 0x39, 0x75, 0xaf, 0x4e, 0x98, 0x78, 0x42, 0x38, 0xdf, 0xff, 0xd1, 0xf0, 0xe6, 0xa0, 0x58, 0xc8, 0x84, 0x9a, 0xaa, 0x30, 0x55, 0xf9, 0x0a, 0x6f, 0x90, 0x0c, 0xca, 0x72, 0x48, 0xb8, 0x1e, 0x89, 0xa7, 0x23, 0x17, 0x24, 0xff, 0x00, 0x61, 0xb6, 0x54, 0x76, 0x6e, 0x1b, 0xa7, 0xbe, 0x50, 0xf2, 0xc1, 0xd7, 0x4c, 0x52, 0x5e, 0x33, 0x5b, 0xe9, 0x10, 0xf4, 0x54, 0x3c, 0x5e, 0x77, 0xee, 0x49, 0xec, 0x2b, 0xb6, 0x63, 0xe4, 0xc9, 0xc3, 0xef, 0x73, 0xf0, 0xe1, 0x32, 0x1b, 0xf2, 0x7a, 0x05, 0xce, 0xad, 0x65, 0xa1, 0x98, 0xb4, 0x0f, 0x2a, 0x5b, 0x23, 0xeb, 0x12, 0x00, 0x88, 0xb0, 0xa8, 0x66, 0x46, 0x3d, 0xea, 0x7b, 0xfb, 0x9e, 0x99, 0x89, 0xbc, 0x8d, 0x97, 0x3a, 0x34, 0x05, 0x32, 0x5d, 0x1f, 0xc9, 0x1a, 0x8c, 0x36, 0x8c, 0x6f, 0x66, 0xfa, 0xc6, 0xb7, 0x7d, 0xf0, 0x94, 0x04, 0xf0, 0x88, 0xc9, 0xd5, 0x9d, 0x8d, 0x4b, 0x11, 0xd4, 0x9f, 0xbb, 0x25, 0xc5, 0xdc, 0xa2, 0x03, 0x99, 0x4b, 0xbc, 0xf3, 0x0d, 0x97, 0x96, 0x74, 0xe5, 0xf2, 0xb6, 0x80, 0x95, 0xbd, 0x99, 0x15, 0xf5, 0x4b, 0xd2, 0x37, 0x58, 0x46, 0xd4, 0x27, 0xc5, 0xce, 0xc1, 0x7c, 0x30, 0x8e, 0x68, 0x94, 0x7b, 0x9e, 0x6d, 0xe6, 0x7b, 0x9b, 0x5d, 0x3a, 0xd8, 0xdb, 0x32, 0xfa, 0x77, 0x65, 0x15, 0xe4, 0x57, 0xa7, 0x21, 0x55, 0x04, 0x57, 0xef, 0xd8, 0x66, 0x56, 0x38, 0x19, 0x1b, 0xe8, 0xe0, 0x67, 0x98, 0xc7, 0x1a, 0x1c, 0xde, 0x71, 0x71, 0x79, 0x2c, 0xf2, 0xfa, 0x8c, 0x48, 0xec, 0xb5, 0x24, 0x9a, 0x0c, 0xce, 0x75, 0x29, 0xae, 0x8c, 0x67, 0xd4, 0xb5, 0x0b, 0x4b, 0x04, 0x05, 0xef, 0x2e, 0x66, 0x8e, 0x18, 0x08, 0x15, 0xdd, 0x8f, 0x11, 0xb0, 0xeb, 0x4c, 0x04, 0x5b, 0x21, 0x2a, 0x7d, 0x41, 0xe4, 0x4f, 0xcb, 0xcb, 0x5d, 0x12, 0x45, 0xb8, 0xb7, 0x53, 0x71, 0xaa, 0x9f, 0x86, 0x5b, 0xd6, 0x50, 0x4a, 0xed, 0xba, 0x46, 0x77, 0x00, 0x13, 0xd4, 0x8c, 0x85, 0xd3, 0x12, 0x6d, 0xeb, 0x1a, 0x67, 0x95, 0xd9, 0x39, 0x39, 0x50, 0xac, 0xff, 0x00, 0x6f, 0xc4, 0xff, 0x00, 0x1c, 0x81, 0x92, 0xb2, 0x6b, 0x6d, 0x02, 0xdd, 0xbd, 0x36, 0x92, 0x36, 0x2d, 0x1f, 0xc0, 0x2a, 0x0b, 0x28, 0x1b, 0x91, 0x41, 0xf4, 0x9c, 0xb6, 0x25, 0x81, 0x46, 0xfe, 0x81, 0xb5, 0xad, 0x3d, 0xba, 0x57, 0xb7, 0xf9, 0xf6, 0xc9, 0xb0, 0x7f, 0xff, 0xd2, 0xf0, 0xe2, 0x86, 0x95, 0xc4, 0x67, 0x7e, 0x3f, 0x11, 0xf7, 0xa8, 0x19, 0x06, 0x69, 0x8d, 0xca, 0xca, 0x24, 0x8f, 0xd3, 0x52, 0x24, 0x89, 0x47, 0x25, 0x1f, 0xcb, 0x20, 0xf8, 0xb2, 0xb2, 0x76, 0x6e, 0x88, 0x36, 0xf6, 0x6f, 0x2a, 0xc1, 0x6e, 0xfa, 0x45, 0xad, 0xbc, 0x3f, 0x0b, 0x46, 0x81, 0x4d, 0x46, 0xea, 0x7a, 0x9a, 0x83, 0x9a, 0xa9, 0xdd, 0xbb, 0xec, 0x7b, 0x06, 0x5b, 0xe5, 0xcf, 0x2e, 0x69, 0xfa, 0x5c, 0xcd, 0x7b, 0x14, 0x5e, 0xa5, 0xee, 0xf5, 0xb8, 0x7d, 0xdd, 0x99, 0xba, 0xef, 0x91, 0x16, 0x5b, 0x36, 0xb6, 0x65, 0x0d, 0xac, 0xb2, 0x5b, 0xed, 0x34, 0x81, 0x7a, 0xbb, 0x46, 0x40, 0x6a, 0x9e, 0xb4, 0x39, 0x31, 0x13, 0x49, 0xda, 0xd2, 0x9b, 0xed, 0x1e, 0xc4, 0x24, 0xb3, 0x35, 0xb2, 0x88, 0x60, 0x06, 0xe6, 0x56, 0x98, 0x96, 0x79, 0x1e, 0x31, 0x51, 0xc9, 0x8f, 0xcb, 0x00, 0xe6, 0xb3, 0xe4, 0xf9, 0x2b, 0xcc, 0x7a, 0x94, 0xda, 0x96, 0xa9, 0x71, 0x77, 0x70, 0x79, 0xcd, 0x33, 0x97, 0x76, 0x3f, 0xcc, 0xc6, 0xa6, 0x9f, 0x2e, 0x99, 0xb9, 0xc6, 0x2a, 0x21, 0xe6, 0x73, 0xca, 0xe6, 0x4a, 0x51, 0x1a, 0x99, 0x1c, 0x28, 0x04, 0x93, 0xd0, 0x0e, 0xa4, 0xe4, 0xda, 0x5f, 0x50, 0xfe, 0x4a, 0xfe, 0x48, 0xb5, 0xb2, 0xc1, 0xe6, 0x1f, 0x31, 0x7e, 0xef, 0x52, 0x91, 0x43, 0xc3, 0x6e, 0x77, 0xf4, 0x22, 0x6d, 0xbf, 0xe4, 0x63, 0x0e, 0xbf, 0xca, 0x36, 0xeb, 0x5c, 0x84, 0xa5, 0x48, 0x7d, 0x3b, 0x61, 0xa1, 0xdb, 0x5b, 0x2c, 0x71, 0xda, 0x45, 0xc4, 0x28, 0x00, 0x81, 0xdb, 0x31, 0xc9, 0xb4, 0xb2, 0x3b, 0x5d, 0x27, 0xa5, 0x05, 0x1b, 0xc7, 0xdb, 0x10, 0xa9, 0xbd, 0xa6, 0x93, 0x0c, 0x75, 0xe4, 0x39, 0x35, 0x41, 0x3d, 0xc5, 0x06, 0xdb, 0x8e, 0xfd, 0x46, 0x5b, 0x1d, 0x98, 0x95, 0x4f, 0x46, 0xdb, 0xd5, 0xfb, 0x29, 0x5e, 0x9d, 0x0d, 0x32, 0xeb, 0x61, 0x4f, 0xff, 0xd3, 0xf1, 0x46, 0x9a, 0x16, 0x1b, 0x91, 0x71, 0x28, 0xac, 0x4a, 0x14, 0x30, 0x3e, 0x19, 0x54, 0xb9, 0x36, 0xc7, 0x9b, 0x2d, 0xd1, 0x6c, 0x45, 0xe3, 0xdc, 0xde, 0xc8, 0x95, 0x5b, 0x87, 0xf8, 0x41, 0x1d, 0x10, 0x54, 0x01, 0x98, 0x79, 0x25, 0xd1, 0xda, 0xe9, 0xe1, 0xb5, 0x9e, 0xac, 0xeb, 0x42, 0xba, 0x8e, 0xdf, 0x8c, 0x31, 0x21, 0x70, 0xb4, 0x5d, 0xbe, 0xc5, 0x7c, 0x2b, 0xed, 0xe1, 0x94, 0x18, 0xb9, 0x51, 0x3d, 0x03, 0x2c, 0x13, 0x6b, 0xf1, 0x42, 0x6e, 0xe2, 0xb7, 0x12, 0xa0, 0xdd, 0x50, 0x9f, 0x4f, 0x6f, 0xa7, 0x6f, 0xc7, 0x03, 0x61, 0xa0, 0x83, 0xb5, 0xf3, 0x97, 0x98, 0x20, 0x9c, 0x44, 0xea, 0xd0, 0xad, 0x48, 0x64, 0x90, 0x21, 0xd8, 0x9f, 0xa7, 0xa6, 0x44, 0xca, 0x99, 0xc6, 0x36, 0xcb, 0x74, 0x5d, 0x7e, 0x5b, 0xfe, 0x31, 0x6a, 0x31, 0xf3, 0x8c, 0xd0, 0xad, 0x40, 0xa3, 0x1f, 0x7c, 0x44, 0xd6, 0x51, 0xd9, 0xe0, 0x5f, 0x9a, 0x7e, 0x41, 0x9f, 0x40, 0xf3, 0x14, 0xba, 0x85, 0xba, 0x34, 0xba, 0x2d, 0xfb, 0x34, 0xd0, 0xcf, 0x4f, 0xb0, 0xce, 0x6a, 0x51, 0xe9, 0xb0, 0x20, 0xf4, 0xf1, 0x19, 0xb2, 0xc3, 0x90, 0x11, 0x4e, 0x97, 0x55, 0x80, 0x83, 0xc4, 0x17, 0x7e, 0x4c, 0x79, 0x19, 0xfc, 0xd1, 0xe7, 0x78, 0x4b, 0x91, 0x1d, 0xae, 0x92, 0xa6, 0xf6, 0x46, 0x75, 0xe4, 0xad, 0x22, 0x1f, 0xdd, 0xa1, 0x07, 0xb3, 0x1e, 0xfe, 0xd9, 0x92, 0xeb, 0x4b, 0xed, 0xfd, 0x0a, 0xc2, 0x63, 0x27, 0xa4, 0x88, 0x17, 0x60, 0x49, 0x35, 0xdc, 0x8e, 0xa5, 0x7d, 0xab, 0xd3, 0x28, 0x90, 0x50, 0xcd, 0xed, 0x2d, 0xda, 0x15, 0x55, 0x51, 0xf1, 0x1a, 0x0a, 0xf7, 0x39, 0x5d, 0xaa, 0x77, 0x6f, 0x01, 0x8e, 0xa7, 0x7d, 0xfa, 0xff, 0x00, 0x66, 0x10, 0xa8, 0xb8, 0x63, 0x76, 0x90, 0xa8, 0x20, 0x06, 0x56, 0xdb, 0x61, 0xda, 0xbd, 0x4f, 0xcb, 0x24, 0x15, 0x0f, 0xf5, 0x66, 0xe5, 0x5f, 0x4c, 0x53, 0xc3, 0xb7, 0xce, 0x99, 0x6b, 0x17, 0xff, 0xd4, 0xf0, 0xec, 0x57, 0x6f, 0x32, 0xa5, 0xa4, 0x43, 0x76, 0x75, 0xa9, 0xf1, 0x03, 0xfa, 0x64, 0x08, 0x6c, 0x8e, 0xfb, 0x3d, 0x7f, 0xcb, 0x16, 0x2b, 0x3d, 0xbc, 0x16, 0xa3, 0x66, 0x6d, 0x98, 0xfb, 0x1e, 0xb9, 0xac, 0xc8, 0x77, 0xb7, 0x7d, 0x01, 0xb3, 0x37, 0xb8, 0xd3, 0x46, 0x95, 0x68, 0x86, 0xd2, 0x2e, 0x4e, 0xab, 0xf0, 0x23, 0x11, 0x4e, 0x5f, 0xcd, 0x98, 0xe7, 0x25, 0x96, 0x71, 0x83, 0x0f, 0xd6, 0x3c, 0xb9, 0xe7, 0x0d, 0x7c, 0x41, 0x22, 0x5e, 0xb3, 0x20, 0x0c, 0x65, 0x80, 0xc8, 0x63, 0x8e, 0xbb, 0x95, 0xa5, 0x07, 0xeb, 0xcc, 0xac, 0x73, 0x83, 0x4e, 0x5c, 0x59, 0x09, 0xd8, 0xec, 0xc8, 0x57, 0x41, 0xd3, 0x4e, 0x95, 0xa5, 0x5b, 0x4b, 0x6a, 0xcb, 0xab, 0x43, 0x10, 0x4b, 0xeb, 0x85, 0xa2, 0x2c, 0x8e, 0x3f, 0x68, 0x54, 0xf5, 0x00, 0xd3, 0x97, 0x7a, 0x65, 0x79, 0xa6, 0x24, 0x76, 0x6f, 0xd3, 0x62, 0x96, 0x30, 0x78, 0xcb, 0x21, 0xf2, 0xf4, 0x22, 0xce, 0x54, 0x8e, 0x46, 0x26, 0x10, 0x7e, 0x0a, 0xf5, 0xd8, 0xf5, 0x1f, 0x31, 0x98, 0x83, 0x73, 0xb3, 0x91, 0xcd, 0x67, 0xe6, 0x7d, 0xe8, 0x16, 0x69, 0x6f, 0x10, 0x1f, 0x54, 0x9a, 0x37, 0xf5, 0x41, 0x5e, 0x7f, 0x0a, 0x29, 0x62, 0x02, 0xf8, 0x9c, 0xc8, 0x8c, 0x77, 0x6a, 0x99, 0xa0, 0x89, 0xff, 0x00, 0x9c, 0x74, 0xd2, 0xed, 0xed, 0xfc, 0xbb, 0x7b, 0xaa, 0x9a, 0x7d, 0x62, 0xfe, 0x46, 0x2d, 0xfe, 0x4c, 0x51, 0x31, 0x11, 0xa9, 0xf6, 0xef, 0x9b, 0x30, 0x5e, 0x7b, 0x38, 0xdd, 0xf4, 0x7f, 0x95, 0x94, 0xbc, 0x12, 0x43, 0x30, 0x6a, 0xb2, 0xf3, 0x86, 0x40, 0x3e, 0xcb, 0xd7, 0x6a, 0xd7, 0xb1, 0xe9, 0x8f, 0x37, 0x19, 0x97, 0x41, 0x2c, 0x71, 0x20, 0xf5, 0x36, 0x9c, 0x55, 0x78, 0x1d, 0x8a, 0x91, 0xd7, 0x11, 0x14, 0x5a, 0x3e, 0x19, 0x03, 0x10, 0x6b, 0xca, 0xbd, 0x86, 0xf8, 0x9d, 0x95, 0x18, 0x36, 0x65, 0x2e, 0xbc, 0x54, 0x1f, 0xa2, 0x99, 0x00, 0x59, 0x2a, 0x6f, 0x5e, 0x55, 0x15, 0xe9, 0x5f, 0xc3, 0x2f, 0xb6, 0x14, 0xff, 0x00, 0xff, 0xd5, 0xf1, 0x95, 0xfe, 0x80, 0x74, 0x0d, 0x7c, 0xd9, 0x89, 0x3d, 0x78, 0x57, 0x8b, 0xc5, 0x28, 0xe8, 0x55, 0xf7, 0x1f, 0x48, 0xca, 0x38, 0xb8, 0x83, 0x9f, 0x93, 0x07, 0x85, 0x3a, 0x7a, 0x6f, 0x95, 0x66, 0x2b, 0x2c, 0x4c, 0x0d, 0x14, 0x00, 0x3e, 0x9c, 0xc3, 0x98, 0x76, 0xb8, 0x45, 0xbd, 0x02, 0xde, 0x48, 0xee, 0xdc, 0xa0, 0x15, 0xe2, 0x2b, 0xc8, 0x8a, 0x8a, 0xfd, 0x3b, 0x66, 0x3f, 0x00, 0x73, 0x84, 0x2d, 0x36, 0xb5, 0xb5, 0x9e, 0x35, 0x1c, 0x29, 0xc4, 0xfe, 0xc8, 0x04, 0x7f, 0xc4, 0x69, 0x91, 0xe1, 0x67, 0x2c, 0x4a, 0xd2, 0xe9, 0x4e, 0xe3, 0xd4, 0xf4, 0x81, 0x5a, 0x12, 0xc5, 0x41, 0x3f, 0x79, 0x38, 0x9b, 0x60, 0x20, 0x07, 0x34, 0xb0, 0xc9, 0x03, 0x5c, 0x23, 0x03, 0x53, 0x13, 0x56, 0x88, 0xdf, 0x09, 0xda, 0x9b, 0xd3, 0xb6, 0x52, 0x0e, 0xec, 0xe4, 0x29, 0x24, 0xfc, 0xd0, 0xe7, 0x75, 0xe5, 0x57, 0x6b, 0x61, 0xfb, 0xf0, 0xca, 0xaa, 0x57, 0xa8, 0xe6, 0x78, 0x1a, 0x7d, 0xf9, 0x95, 0x8a, 0x5e, 0xa0, 0xe3, 0x67, 0x8f, 0xa0, 0xbd, 0x5b, 0xf2, 0xdf, 0x4a, 0x82, 0xcb, 0x4a, 0xb3, 0xb0, 0xb4, 0x41, 0x0a, 0x70, 0x48, 0xd9, 0x57, 0x60, 0x51, 0x3a, 0x8f, 0xbc, 0xe6, 0x7b, 0xcb, 0xe4, 0x3b, 0xa7, 0x3f, 0x9b, 0x9f, 0x9a, 0xba, 0x77, 0xe5, 0x5f, 0x95, 0x9c, 0x59, 0x94, 0x9f, 0xcd, 0x37, 0x8c, 0xa9, 0xa6, 0xd9, 0x39, 0xaa, 0xd0, 0x7d, 0xa9, 0x1c, 0x03, 0x5e, 0x09, 0xff, 0x00, 0x0c, 0x76, 0xcb, 0x62, 0x2d, 0xa5, 0xf2, 0x85, 0xbf, 0xe7, 0x87, 0xe6, 0xa3, 0x5e, 0x4d, 0xa8, 0xc9, 0xe6, 0x8b, 0xd5, 0x69, 0x5c, 0xb0, 0x4a, 0xab, 0xc4, 0xb5, 0x35, 0x0a, 0xaa, 0xea, 0x40, 0x03, 0xa0, 0xf6, 0xcb, 0x40, 0x4d, 0x3e, 0xdb, 0xff, 0x00, 0x9c, 0x7f, 0xfc, 0xce, 0x4f, 0xcc, 0xbf, 0x26, 0x25, 0xe5, 0xd3, 0x2f, 0xe9, 0xdd, 0x3d, 0xfe, 0xab, 0xa9, 0xaa, 0xd2, 0xa6, 0x40, 0x2a, 0xb2, 0x71, 0x00, 0x01, 0xea, 0x0d, 0xe8, 0x3a, 0x64, 0x25, 0x16, 0x1c, 0x8b, 0xd9, 0x51, 0x39, 0x28, 0x12, 0x51, 0x41, 0xfd, 0xa3, 0xd2, 0xb9, 0x4f, 0x0d, 0x33, 0xb5, 0xf4, 0x87, 0x9d, 0x79, 0x0e, 0xb4, 0xaf, 0x6a, 0xf8, 0xf1, 0xf0, 0xc9, 0xda, 0xbf, 0xff, 0xd6, 0xf2, 0xc6, 0xb5, 0x68, 0x64, 0xd0, 0x6d, 0x35, 0x20, 0x39, 0xcd, 0x13, 0x0f, 0x5e, 0x61, 0xfc, 0x8f, 0x40, 0x8b, 0x5e, 0xe0, 0x66, 0x1c, 0x4f, 0xaa, 0x9d, 0xe6, 0xa6, 0x1e, 0x91, 0x2e, 0xa9, 0x87, 0x95, 0xee, 0x9c, 0xc5, 0x55, 0x34, 0x60, 0x40, 0xae, 0x57, 0x30, 0xd9, 0xa7, 0x95, 0xbd, 0x6f, 0xcb, 0x26, 0x39, 0x40, 0x0d, 0x4e, 0xc0, 0x9f, 0x9e, 0x50, 0x5d, 0xac, 0x79, 0x33, 0x8b, 0xbb, 0x9b, 0x3b, 0x6b, 0x35, 0x48, 0x54, 0x09, 0x29, 0x56, 0x7f, 0xe1, 0x86, 0x72, 0x00, 0x2c, 0x6e, 0xf7, 0x63, 0x3e, 0x63, 0xbd, 0xbd, 0x5d, 0x20, 0x2a, 0xb3, 0xa4, 0x33, 0x48, 0xab, 0x21, 0x43, 0xf1, 0x2c, 0x47, 0xed, 0x1d, 0xbc, 0x73, 0x18, 0x9b, 0x64, 0x28, 0x96, 0x3a, 0xc7, 0x49, 0xb0, 0xf4, 0xcc, 0xe9, 0x73, 0x6c, 0xb4, 0xf8, 0x67, 0x92, 0x32, 0x21, 0x70, 0x7b, 0x89, 0x05, 0x57, 0xef, 0x38, 0x28, 0x94, 0x4a, 0x7d, 0x13, 0x7d, 0x6a, 0xd3, 0x4c, 0xb8, 0xf2, 0xc3, 0xc8, 0x2e, 0x03, 0xf3, 0xe2, 0x7d, 0x33, 0xb7, 0xc5, 0xcc, 0x71, 0x03, 0xc6, 0xb9, 0x64, 0x06, 0xe2, 0x9a, 0xf2, 0x4f, 0xd2, 0x6d, 0xe9, 0xfe, 0x41, 0x45, 0x5b, 0x18, 0x66, 0xa5, 0x64, 0x09, 0xf4, 0xd5, 0xb7, 0xcd, 0x93, 0xc7, 0xcf, 0x9b, 0xe5, 0x6f, 0xf9, 0xc8, 0x0d, 0x56, 0xeb, 0x59, 0xfc, 0xce, 0xd5, 0x12, 0x61, 0xc4, 0x69, 0xe9, 0x0d, 0xa4, 0x4b, 0xfe, 0x48, 0x40, 0xd5, 0x3e, 0xe4, 0xb6, 0x64, 0x8e, 0x4c, 0x02, 0x61, 0x65, 0xa0, 0x14, 0xb4, 0xb6, 0xb0, 0xb1, 0xb6, 0xb2, 0x97, 0xcb, 0xf1, 0x5a, 0x2d, 0xc6, 0xa5, 0xac, 0xb4, 0x70, 0x5d, 0xc7, 0x3d, 0xc1, 0x51, 0x24, 0x91, 0xc9, 0x31, 0x75, 0x6b, 0x70, 0x9f, 0x14, 0x68, 0x01, 0x46, 0xe4, 0xb5, 0xa3, 0x17, 0xcb, 0x40, 0x61, 0x6f, 0x47, 0xff, 0x00, 0x9c, 0x3a, 0x8f, 0x5b, 0x4f, 0x3c, 0x6b, 0xb7, 0xfa, 0x30, 0x91, 0x3c, 0xa4, 0xb1, 0x95, 0xb9, 0x82, 0x42, 0x0a, 0xbc, 0x8e, 0xe4, 0xdb, 0xa9, 0xef, 0xc9, 0x17, 0x91, 0x24, 0x7c, 0xb2, 0x05, 0x64, 0xfb, 0x75, 0x64, 0x32, 0x39, 0x69, 0x5b, 0x9c, 0xad, 0xb9, 0xdb, 0xa7, 0xb5, 0x3b, 0x53, 0x2a, 0x21, 0x41, 0x44, 0xf3, 0x8b, 0x8f, 0x2e, 0x43, 0x9d, 0x2b, 0xd4, 0x57, 0x23, 0x41, 0x36, 0xff, 0x00, 0xff, 0xd7, 0xf0, 0xc0, 0xd5, 0xb5, 0x11, 0x64, 0xb6, 0x3f, 0x59, 0x90, 0xd9, 0xab, 0x06, 0xf4, 0x79, 0x7c, 0x3b, 0x74, 0xc8, 0x08, 0x8b, 0xb6, 0xe3, 0x96, 0x55, 0x57, 0xb3, 0x3e, 0xf2, 0x35, 0xc7, 0xd6, 0x0b, 0x45, 0x5d, 0xdc, 0x8a, 0x7d, 0xd9, 0x8d, 0x94, 0x3b, 0x3d, 0x1c, 0x9e, 0xc3, 0xe5, 0xc3, 0x2c, 0x7c, 0xc5, 0x0f, 0xee, 0xdb, 0x8b, 0x0c, 0xc4, 0x26, 0x9d, 0xa0, 0x9a, 0x7d, 0x2c, 0xe5, 0xe4, 0x55, 0x7f, 0xee, 0xc1, 0x15, 0x04, 0xd0, 0x12, 0x3c, 0x72, 0x89, 0x1b, 0x2c, 0xcc, 0xa8, 0x2a, 0x8b, 0x87, 0xbb, 0x63, 0x1a, 0x28, 0x65, 0xf0, 0xed, 0xf2, 0xc3, 0xc2, 0x0a, 0x06, 0x4a, 0x46, 0xc7, 0xa5, 0xa3, 0x59, 0xc8, 0xb2, 0xc7, 0x45, 0x22, 0x9c, 0x14, 0x54, 0x10, 0x46, 0xf5, 0x1d, 0x32, 0x5c, 0x14, 0x14, 0xe4, 0x32, 0x2f, 0x3a, 0xf3, 0xb6, 0x90, 0x9a, 0x6d, 0xae, 0x9f, 0x3d, 0xab, 0xb8, 0x8a, 0x3b, 0xf8, 0x39, 0x44, 0x58, 0xf0, 0x08, 0xd5, 0x14, 0xa5, 0x7b, 0x65, 0x98, 0x8e, 0xfb, 0xb5, 0x67, 0x87, 0xa5, 0xef, 0x5e, 0x44, 0x96, 0x35, 0xb5, 0xb6, 0x59, 0x36, 0xfd, 0xd8, 0xa0, 0xf1, 0x20, 0x53, 0x33, 0xc0, 0x79, 0x59, 0x73, 0x7c, 0xd7, 0xf9, 0xfb, 0xa2, 0xcd, 0x67, 0xf9, 0xa7, 0x7b, 0x72, 0xf1, 0x71, 0x83, 0x53, 0x86, 0x0b, 0x98, 0x24, 0x22, 0x8a, 0xcc, 0x88, 0x23, 0x7f, 0xb8, 0xae, 0xf9, 0x7c, 0x50, 0x1e, 0x5f, 0x7c, 0x48, 0x21, 0x44, 0x6b, 0xce, 0x9b, 0xb0, 0x1b, 0x9e, 0xf5, 0xaf, 0x8e, 0x4d, 0x5f, 0x7a, 0x7f, 0xce, 0x34, 0xf9, 0x5d, 0x3c, 0xa3, 0xf9, 0x69, 0x63, 0xa9, 0x3c, 0x27, 0xeb, 0xda, 0xe1, 0x37, 0xd7, 0x2e, 0xaa, 0xdb, 0x06, 0xda, 0x30, 0x49, 0xfe, 0x54, 0x03, 0x03, 0x49, 0xdc, 0xb3, 0xaf, 0x38, 0xfe, 0x6a, 0xf9, 0x47, 0xc9, 0x3a, 0x74, 0x97, 0xfa, 0xf6, 0xaf, 0x15, 0x85, 0xb8, 0x75, 0x89, 0xb8, 0x87, 0x9a, 0x72, 0xee, 0x2a, 0x14, 0x24, 0x60, 0xb1, 0xa8, 0xdf, 0x07, 0x0b, 0x2d, 0xcb, 0xcf, 0x7f, 0xe8, 0x6a, 0xff, 0x00, 0x26, 0xbd, 0x6a, 0x7f, 0x89, 0x2f, 0xf8, 0x52, 0x9e, 0xb7, 0xe8, 0xb9, 0xb8, 0x57, 0xc2, 0x95, 0xe9, 0x8f, 0x08, 0x5a, 0x2f, 0xff, 0xd0, 0xf0, 0x4d, 0x40, 0xaa, 0xd7, 0x00, 0x64, 0xcb, 0x3c, 0x97, 0xa8, 0xb5, 0x9e, 0xa3, 0x1a, 0xd6, 0x84, 0x95, 0x3f, 0x45, 0x72, 0x9c, 0xa2, 0xc3, 0x99, 0xa5, 0x9d, 0x49, 0xf4, 0x17, 0x97, 0xaf, 0x63, 0x17, 0x52, 0x6f, 0xf0, 0xc8, 0x43, 0x6f, 0x9a, 0xe9, 0x07, 0x70, 0x0e, 0xec, 0x83, 0x51, 0x44, 0xb8, 0x61, 0x1a, 0x9e, 0x11, 0xd3, 0x91, 0x60, 0x68, 0x6b, 0xd3, 0x31, 0x4f, 0x36, 0xd3, 0x4c, 0x52, 0xef, 0x4c, 0xd5, 0x0c, 0xc4, 0x69, 0xda, 0x94, 0xc8, 0x3a, 0xf0, 0x66, 0x07, 0x73, 0xe0, 0x40, 0xfd, 0x79, 0x93, 0x12, 0x1c, 0x9c, 0x32, 0xc7, 0xfc, 0x41, 0x33, 0xd2, 0xb4, 0x6f, 0x38, 0x98, 0x65, 0x76, 0xbf, 0x69, 0x42, 0xd0, 0xaa, 0xc9, 0xde, 0x95, 0xad, 0x28, 0x46, 0x4e, 0xac, 0x39, 0x77, 0x80, 0x11, 0xbf, 0xd8, 0xc7, 0x7c, 0xe1, 0xa5, 0xf9, 0x92, 0x4d, 0x32, 0x5b, 0x8b, 0x93, 0x27, 0xa7, 0x68, 0x56, 0xe2, 0x45, 0xda, 0x85, 0x61, 0x6e, 0x67, 0xad, 0x6b, 0xb0, 0x38, 0xc2, 0x81, 0xe4, 0xc7, 0x52, 0x31, 0x1c, 0x67, 0x86, 0x5b, 0xbd, 0x37, 0xca, 0x7a, 0x94, 0xb1, 0x69, 0xb6, 0x2e, 0xb7, 0x15, 0x48, 0xc2, 0xb4, 0x52, 0x53, 0xac, 0x32, 0xaf, 0xb1, 0xed, 0x9b, 0x10, 0x36, 0x78, 0x5c, 0x9f, 0x51, 0x64, 0x1f, 0x98, 0x3e, 0x58, 0xb6, 0xfc, 0xc8, 0xf2, 0xe5, 0xbc, 0x68, 0x52, 0x2d, 0x5a, 0xd1, 0x84, 0xb6, 0xf3, 0x95, 0x0e, 0xc0, 0x85, 0xe2, 0xcb, 0xd8, 0xd1, 0xbb, 0xe4, 0xc1, 0xa6, 0x97, 0xce, 0x17, 0x5f, 0x95, 0xde, 0x6d, 0xb6, 0xbe, 0xb7, 0x69, 0x34, 0xf3, 0x3c, 0x72, 0xcf, 0xe8, 0xa3, 0x45, 0x49, 0x95, 0x4a, 0x90, 0x3e, 0x35, 0x5a, 0x95, 0x1d, 0xfe, 0x21, 0x93, 0x4d, 0xbe, 0xd2, 0xd2, 0xf5, 0x8b, 0xbd, 0x32, 0x2d, 0x3f, 0x4c, 0x9a, 0xe4, 0xca, 0x9e, 0x90, 0x85, 0x65, 0x55, 0x08, 0x85, 0x91, 0x01, 0x3b, 0x0a, 0x05, 0xe9, 0xb0, 0xc0, 0x5a, 0xc3, 0xcd, 0x3f, 0x3b, 0x7f, 0x26, 0xec, 0xff, 0x00, 0x35, 0x6d, 0x6d, 0xb5, 0x3d, 0x16, 0xfe, 0x0d, 0x3b, 0xcd, 0x96, 0x01, 0x92, 0x46, 0x9e, 0xa2, 0x0b, 0xc8, 0xb7, 0x28, 0x92, 0x71, 0xfb, 0x2e, 0xa7, 0xec, 0x3d, 0x0f, 0xc2, 0x68, 0x71, 0x05, 0x95, 0xd3, 0xe7, 0x9f, 0xfa, 0x16, 0x2f, 0xcd, 0x7f, 0x43, 0xd6, 0xfa, 0xa5, 0x97, 0xab, 0xeb, 0x7a, 0x5f, 0x55, 0xfa, 0xec, 0x5e, 0xaf, 0x0f, 0xf7, 0xed, 0x2b, 0x4e, 0x15, 0xff, 0x00, 0x65, 0xdf, 0x8e, 0x14, 0xf1, 0xbf, 0xff, 0xd1, 0xf0, 0x5a, 0xa7, 0x18, 0x5e, 0x56, 0x1f, 0x68, 0x71, 0x5f, 0xa7, 0xbe, 0x2a, 0x98, 0xdb, 0xfa, 0x90, 0x24, 0x37, 0xb0, 0xfd, 0xb8, 0xa8, 0x58, 0x78, 0xae, 0x43, 0xc9, 0xb4, 0x6d, 0xbb, 0xda, 0x3c, 0xa1, 0xad, 0x43, 0xa8, 0xda, 0xc5, 0x2a, 0x3d, 0x26, 0x5a, 0x02, 0x2b, 0xbe, 0x60, 0x64, 0x8d, 0x17, 0x6f, 0x8b, 0x20, 0x90, 0x7a, 0x3c, 0x32, 0x8b, 0xa8, 0x02, 0xf3, 0xfd, 0xe0, 0x1b, 0x11, 0x98, 0x66, 0x3b, 0xb9, 0x62, 0x54, 0x83, 0x36, 0xf2, 0xa4, 0xe4, 0x29, 0x34, 0xeb, 0xc8, 0x74, 0xae, 0x0d, 0xc3, 0x65, 0x82, 0x13, 0x6b, 0x57, 0xba, 0x54, 0xe4, 0x8c, 0x41, 0x1b, 0x75, 0xa7, 0xe0, 0x72, 0x5c, 0x4c, 0x84, 0x50, 0x5a, 0xb3, 0xdd, 0xdd, 0xc3, 0x24, 0x33, 0xb1, 0x60, 0xe0, 0x86, 0x52, 0x45, 0x38, 0xd2, 0x87, 0x24, 0x26, 0x6d, 0x8c, 0xe1, 0x41, 0x25, 0xfc, 0xa3, 0xd7, 0x2f, 0x6f, 0x3c, 0xbf, 0x73, 0xa5, 0xb2, 0x2c, 0xd1, 0x69, 0x17, 0x2f, 0x6b, 0x14, 0x8c, 0x0f, 0x21, 0x0d, 0x79, 0x46, 0x09, 0x15, 0xed, 0xb7, 0x4e, 0xd9, 0xb9, 0x8b, 0xcb, 0xe4, 0xa2, 0x5e, 0xa3, 0xa6, 0xdf, 0x6a, 0x36, 0xe4, 0xcd, 0x69, 0x1c, 0x4e, 0x84, 0x7c, 0x76, 0xab, 0x21, 0x67, 0xa8, 0xa7, 0xd9, 0xf8, 0x4d, 0x2b, 0xf3, 0xc3, 0x4d, 0x49, 0x57, 0x98, 0x75, 0x6f, 0x31, 0xda, 0xf9, 0xa3, 0x4b, 0xfd, 0x1f, 0x69, 0x1d, 0xae, 0xa1, 0xa9, 0x7e, 0xee, 0xe6, 0xd2, 0x79, 0x18, 0xf3, 0xb5, 0x1f, 0xee, 0xd9, 0x0a, 0x01, 0x4e, 0x3f, 0xb3, 0x4d, 0xf2, 0x9c, 0xb9, 0x04, 0x05, 0xb7, 0xe2, 0x87, 0x1e, 0xdd, 0x19, 0x3e, 0xaf, 0x6b, 0xae, 0xcb, 0x6d, 0x13, 0x0d, 0x45, 0xa2, 0x8e, 0x06, 0xe5, 0x13, 0x2a, 0x02, 0x01, 0x5e, 0x82, 0xb5, 0x04, 0xe6, 0x11, 0xd4, 0xcd, 0xda, 0x43, 0x49, 0x8e, 0xb7, 0xdc, 0xb1, 0x51, 0xe6, 0x4d, 0x76, 0xd2, 0x61, 0x15, 0xaa, 0x4b, 0xa8, 0xc9, 0x6e, 0x49, 0x79, 0x20, 0xe6, 0x8c, 0x49, 0xad, 0x43, 0x16, 0xe4, 0xa7, 0xaf, 0x43, 0xd3, 0x26, 0x35, 0x75, 0xcd, 0xa8, 0xe8, 0x87, 0x46, 0xbf, 0xc7, 0x9a, 0xff, 0x00, 0xd6, 0xbf, 0x48, 0xfe, 0x88, 0xfd, 0xe7, 0x0f, 0xab, 0xfa, 0x3f, 0x58, 0x7f, 0x5f, 0x8d, 0x3f, 0x9f, 0xa7, 0x5e, 0xd4, 0xc3, 0xf9, 0xd1, 0x7c, 0xb6, 0x47, 0xe4, 0x3a, 0x5b, 0xff, 0xd2, 0xf0, 0xb7, 0xa6, 0x1e, 0xdf, 0xd3, 0xf6, 0xa5, 0x71, 0x54, 0xdb, 0x4b, 0x80, 0x3c, 0x42, 0x26, 0xee, 0x29, 0xbe, 0x51, 0x23, 0x4e, 0x44, 0x05, 0x84, 0x45, 0xa5, 0xd5, 0xf7, 0x97, 0x2e, 0xfd, 0x6b, 0x6a, 0x98, 0x09, 0xab, 0xc7, 0xfc, 0x46, 0x3b, 0x4c, 0x26, 0x32, 0x30, 0x3e, 0x4f, 0x49, 0xd0, 0xfc, 0xfb, 0x05, 0xd4, 0x4a, 0x7d, 0x40, 0xac, 0x3a, 0x8e, 0x84, 0x1c, 0xc5, 0x96, 0x2a, 0x73, 0xe1, 0x9c, 0x16, 0x6d, 0xa5, 0x79, 0x86, 0xd6, 0xec, 0x80, 0x5a, 0xa0, 0xf5, 0xca, 0xcc, 0x5c, 0xa1, 0x2b, 0x1b, 0x26, 0x30, 0x6a, 0x31, 0x46, 0xcf, 0x1c, 0x87, 0x94, 0x64, 0x9e, 0x3d, 0xb6, 0xf0, 0xca, 0xa8, 0x39, 0x51, 0x99, 0x42, 0x6b, 0x1a, 0xc5, 0xa5, 0xa5, 0x94, 0xf7, 0x92, 0xc8, 0xaa, 0xb1, 0x23, 0x30, 0x04, 0xf8, 0x0e, 0x9f, 0x4e, 0x4a, 0x11, 0xb2, 0xd5, 0x9b, 0x25, 0x06, 0x1b, 0xff, 0x00, 0x38, 0xfd, 0xad, 0xdf, 0xda, 0xf9, 0xa2, 0xfe, 0xc5, 0x42, 0xbe, 0x9b, 0x7f, 0x0b, 0xdd, 0xdd, 0x07, 0xaf, 0x14, 0x68, 0xd8, 0x71, 0x6d, 0xbb, 0x90, 0xfc, 0x73, 0x6e, 0xf2, 0xf2, 0xdd, 0xf4, 0xad, 0xa6, 0xab, 0x6d, 0x69, 0x14, 0xfa, 0xee, 0xa0, 0xe2, 0x0b, 0x0d, 0x39, 0x19, 0xfe, 0x11, 0xc5, 0x1a, 0x4a, 0x1d, 0x8f, 0x73, 0x4f, 0xf8, 0x96, 0x0b, 0x40, 0x8d, 0xec, 0xf3, 0x6d, 0x3f, 0x52, 0xba, 0xd6, 0x35, 0x8b, 0xbf, 0x36, 0x6a, 0x5f, 0x0d, 0xc5, 0xdc, 0xa8, 0xb6, 0xa8, 0x7a, 0xc5, 0x6c, 0x9b, 0x22, 0x0f, 0xa3, 0x73, 0x9a, 0xbc, 0xb3, 0xe2, 0x36, 0xed, 0xb1, 0x43, 0x80, 0x53, 0xd0, 0xa7, 0xd4, 0x44, 0xfa, 0x7a, 0xda, 0x83, 0xbd, 0x3e, 0x2f, 0xa7, 0x2b, 0xad, 0x9b, 0xb8, 0x8d, 0xa8, 0xe8, 0x91, 0xdb, 0xfa, 0x2d, 0x6f, 0xc3, 0x8a, 0x2d, 0x56, 0xa3, 0xad, 0x4f, 0x5c, 0xa4, 0x0d, 0xdc, 0xa3, 0xca, 0xd0, 0xbf, 0xa1, 0xe3, 0xfa, 0xe7, 0x0f, 0xf2, 0xb9, 0x57, 0xbf, 0x1a, 0xe4, 0xb8, 0x57, 0xc5, 0xdd, 0xff, 0xd3, 0xf0, 0xcc, 0x5d, 0x7b, 0x70, 0xc5, 0x53, 0x6d, 0x2f, 0xd5, 0xe4, 0x69, 0xfd, 0xdf, 0xec, 0xd7, 0xad, 0x7d, 0xb2, 0x8c, 0x8d, 0xd8, 0xed, 0x91, 0x9f, 0x43, 0xea, 0xe7, 0xeb, 0x94, 0xad, 0x3e, 0x1e, 0x95, 0xfc, 0x72, 0x81, 0x7d, 0x1c, 0x9d, 0xba, 0xb1, 0x7b, 0xdf, 0xa9, 0x7a, 0xdf, 0xee, 0x2f, 0xd4, 0xfa, 0xe7, 0xed, 0x7a, 0x7f, 0xdd, 0xff, 0x00, 0xb2, 0xae, 0x64, 0x0b, 0xea, 0xe3, 0x9a, 0xbf, 0x4a, 0x6f, 0xa4, 0xff, 0x00, 0x89, 0xbd, 0x45, 0xfa, 0xb5, 0x79, 0xf7, 0xeb, 0xc7, 0xe9, 0xae, 0x57, 0x2e, 0x17, 0x23, 0x1f, 0x89, 0xd1, 0x99, 0x8f, 0xf1, 0xa7, 0x11, 0xcf, 0xd3, 0xf5, 0x29, 0xb5, 0x6b, 0xd3, 0xe8, 0xcc, 0x7f, 0x45, 0xb9, 0xa3, 0xc5, 0x62, 0xbe, 0x68, 0xff, 0x00, 0x15, 0xfd, 0x4c, 0xfe, 0x90, 0xaf, 0xd4, 0xab, 0xf1, 0x7a, 0x7f, 0x62, 0x9d, 0xab, 0xdf, 0x32, 0xb1, 0x70, 0x5e, 0xdc, 0xdc, 0x2d, 0x47, 0x8b, 0x5e, 0xae, 0x4c, 0xbf, 0xf2, 0x37, 0x9f, 0x3d, 0x5b, 0xd2, 0xff, 0x00, 0x8e, 0x87, 0xee, 0x29, 0x5a, 0xf2, 0xf4, 0xaa, 0xd4, 0xa5, 0x36, 0xa7, 0x3a, 0x57, 0xfd, 0x8e, 0x64, 0x3a, 0xf2, 0xf6, 0xbf, 0xcc, 0x7f, 0x5b, 0xfc, 0x23, 0xa7, 0xfe, 0x8e, 0xff, 0x00, 0x8e, 0x37, 0xd6, 0x63, 0xfa, 0xe5, 0x2b, 0xcb, 0x87, 0xec, 0xd6, 0xbd, 0xb9, 0x7d, 0xac, 0xc7, 0xcd, 0x7c, 0x2d, 0xf8, 0x2b, 0x89, 0x26, 0x8f, 0xd4, 0xfa, 0x94, 0x3e, 0x85, 0x29, 0xc9, 0x69, 0xfc, 0x33, 0x58, 0x5d, 0x9c, 0x79, 0xb2, 0xbb, 0x0f, 0xac, 0x7a, 0x2b, 0xea, 0x75, 0xef, 0x92, 0x0c, 0x53, 0x3d, 0x2f, 0xd4, 0xfa, 0xbb, 0xfa, 0x74, 0xf5, 0x39, 0x9a, 0xd7, 0xe7, 0x80, 0x53, 0x79, 0xba, 0x5b, 0xfe, 0x97, 0xfa, 0x4b, 0xfc, 0xba, 0x7f, 0xb1, 0xc7, 0xab, 0x1e, 0x8f, 0xff, 0xd9
+};
+
+#endif  // RTC_BASE_TESTBASE64_H_
diff --git a/rtc_base/testclient.cc b/rtc_base/testclient.cc
new file mode 100644
index 0000000..585db77
--- /dev/null
+++ b/rtc_base/testclient.cc
@@ -0,0 +1,171 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/testclient.h"
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+// DESIGN: Each packet received is put it into a list of packets.
+//         Callers can retrieve received packets from any thread by calling
+//         NextPacket.
+
+TestClient::TestClient(std::unique_ptr<AsyncPacketSocket> socket)
+    : TestClient(std::move(socket), nullptr) {}
+
+TestClient::TestClient(std::unique_ptr<AsyncPacketSocket> socket,
+                       FakeClock* fake_clock)
+    : fake_clock_(fake_clock),
+      socket_(std::move(socket)),
+      prev_packet_timestamp_(-1) {
+  socket_->SignalReadPacket.connect(this, &TestClient::OnPacket);
+  socket_->SignalReadyToSend.connect(this, &TestClient::OnReadyToSend);
+}
+
+TestClient::~TestClient() {}
+
+bool TestClient::CheckConnState(AsyncPacketSocket::State state) {
+  // Wait for our timeout value until the socket reaches the desired state.
+  int64_t end = TimeAfter(kTimeoutMs);
+  while (socket_->GetState() != state && TimeUntil(end) > 0) {
+    AdvanceTime(1);
+  }
+  return (socket_->GetState() == state);
+}
+
+int TestClient::Send(const char* buf, size_t size) {
+  rtc::PacketOptions options;
+  return socket_->Send(buf, size, options);
+}
+
+int TestClient::SendTo(const char* buf, size_t size,
+                       const SocketAddress& dest) {
+  rtc::PacketOptions options;
+  return socket_->SendTo(buf, size, dest, options);
+}
+
+std::unique_ptr<TestClient::Packet> TestClient::NextPacket(int timeout_ms) {
+  // If no packets are currently available, we go into a get/dispatch loop for
+  // at most timeout_ms.  If, during the loop, a packet arrives, then we can
+  // stop early and return it.
+
+  // Note that the case where no packet arrives is important.  We often want to
+  // test that a packet does not arrive.
+
+  // Note also that we only try to pump our current thread's message queue.
+  // Pumping another thread's queue could lead to messages being dispatched from
+  // the wrong thread to non-thread-safe objects.
+
+  int64_t end = TimeAfter(timeout_ms);
+  while (TimeUntil(end) > 0) {
+    {
+      CritScope cs(&crit_);
+      if (packets_.size() != 0) {
+        break;
+      }
+    }
+    AdvanceTime(1);
+  }
+
+  // Return the first packet placed in the queue.
+  std::unique_ptr<Packet> packet;
+  CritScope cs(&crit_);
+  if (packets_.size() > 0) {
+    packet = std::move(packets_.front());
+    packets_.erase(packets_.begin());
+  }
+
+  return packet;
+}
+
+bool TestClient::CheckNextPacket(const char* buf, size_t size,
+                                 SocketAddress* addr) {
+  bool res = false;
+  std::unique_ptr<Packet> packet = NextPacket(kTimeoutMs);
+  if (packet) {
+    res = (packet->size == size && memcmp(packet->buf, buf, size) == 0 &&
+           CheckTimestamp(packet->packet_time.timestamp));
+    if (addr)
+      *addr = packet->addr;
+  }
+  return res;
+}
+
+bool TestClient::CheckTimestamp(int64_t packet_timestamp) {
+  bool res = true;
+  if (packet_timestamp == -1) {
+    res = false;
+  }
+  if (prev_packet_timestamp_ != -1) {
+    if (packet_timestamp < prev_packet_timestamp_) {
+      res = false;
+    }
+  }
+  prev_packet_timestamp_ = packet_timestamp;
+  return res;
+}
+
+void TestClient::AdvanceTime(int ms) {
+  // If the test is using a fake clock, we must advance the fake clock to
+  // advance time. Otherwise, ProcessMessages will work.
+  if (fake_clock_) {
+    SIMULATED_WAIT(false, ms, *fake_clock_);
+  } else {
+    Thread::Current()->ProcessMessages(1);
+  }
+}
+
+bool TestClient::CheckNoPacket() {
+  return NextPacket(kNoPacketTimeoutMs) == nullptr;
+}
+
+int TestClient::GetError() {
+  return socket_->GetError();
+}
+
+int TestClient::SetOption(Socket::Option opt, int value) {
+  return socket_->SetOption(opt, value);
+}
+
+void TestClient::OnPacket(AsyncPacketSocket* socket, const char* buf,
+                          size_t size, const SocketAddress& remote_addr,
+                          const PacketTime& packet_time) {
+  CritScope cs(&crit_);
+  packets_.push_back(MakeUnique<Packet>(remote_addr, buf, size, packet_time));
+}
+
+void TestClient::OnReadyToSend(AsyncPacketSocket* socket) {
+  ++ready_to_send_count_;
+}
+
+TestClient::Packet::Packet(const SocketAddress& a,
+                           const char* b,
+                           size_t s,
+                           const PacketTime& packet_time)
+    : addr(a), buf(0), size(s), packet_time(packet_time) {
+  buf = new char[size];
+  memcpy(buf, b, size);
+}
+
+TestClient::Packet::Packet(const Packet& p)
+    : addr(p.addr), buf(0), size(p.size), packet_time(p.packet_time) {
+  buf = new char[size];
+  memcpy(buf, p.buf, size);
+}
+
+TestClient::Packet::~Packet() {
+  delete[] buf;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/testclient.h b/rtc_base/testclient.h
new file mode 100644
index 0000000..c0dbe65
--- /dev/null
+++ b/rtc_base/testclient.h
@@ -0,0 +1,114 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TESTCLIENT_H_
+#define RTC_BASE_TESTCLIENT_H_
+
+#include <memory>
+#include <vector>
+#include "rtc_base/asyncudpsocket.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/fakeclock.h"
+
+namespace rtc {
+
+// A simple client that can send TCP or UDP data and check that it receives
+// what it expects to receive. Useful for testing server functionality.
+class TestClient : public sigslot::has_slots<> {
+ public:
+  // Records the contents of a packet that was received.
+  struct Packet {
+    Packet(const SocketAddress& a,
+           const char* b,
+           size_t s,
+           const PacketTime& packet_time);
+    Packet(const Packet& p);
+    virtual ~Packet();
+
+    SocketAddress addr;
+    char*  buf;
+    size_t size;
+    PacketTime packet_time;
+  };
+
+  // Default timeout for NextPacket reads.
+  static const int kTimeoutMs = 5000;
+
+  // Creates a client that will send and receive with the given socket and
+  // will post itself messages with the given thread.
+  explicit TestClient(std::unique_ptr<AsyncPacketSocket> socket);
+  // Create a test client that will use a fake clock. NextPacket needs to wait
+  // for a packet to be received, and thus it needs to advance the fake clock
+  // if the test is using one, rather than just sleeping.
+  TestClient(std::unique_ptr<AsyncPacketSocket> socket, FakeClock* fake_clock);
+  ~TestClient() override;
+
+  SocketAddress address() const { return socket_->GetLocalAddress(); }
+  SocketAddress remote_address() const { return socket_->GetRemoteAddress(); }
+
+  // Checks that the socket moves to the specified connect state.
+  bool CheckConnState(AsyncPacketSocket::State state);
+
+  // Checks that the socket is connected to the remote side.
+  bool CheckConnected() {
+    return CheckConnState(AsyncPacketSocket::STATE_CONNECTED);
+  }
+
+  // Sends using the clients socket.
+  int Send(const char* buf, size_t size);
+
+  // Sends using the clients socket to the given destination.
+  int SendTo(const char* buf, size_t size, const SocketAddress& dest);
+
+  // Returns the next packet received by the client or null if none is received
+  // within the specified timeout.
+  std::unique_ptr<Packet> NextPacket(int timeout_ms);
+
+  // Checks that the next packet has the given contents. Returns the remote
+  // address that the packet was sent from.
+  bool CheckNextPacket(const char* buf, size_t len, SocketAddress* addr);
+
+  // Checks that no packets have arrived or will arrive in the next second.
+  bool CheckNoPacket();
+
+  int GetError();
+  int SetOption(Socket::Option opt, int value);
+
+  bool ready_to_send() const { return ready_to_send_count() > 0; }
+
+  // How many times SignalReadyToSend has been fired.
+  int ready_to_send_count() const { return ready_to_send_count_; }
+
+ private:
+  // Timeout for reads when no packet is expected.
+  static const int kNoPacketTimeoutMs = 1000;
+  // Workaround for the fact that AsyncPacketSocket::GetConnState doesn't exist.
+  Socket::ConnState GetState();
+  // Slot for packets read on the socket.
+  void OnPacket(AsyncPacketSocket* socket, const char* buf, size_t len,
+                const SocketAddress& remote_addr,
+                const PacketTime& packet_time);
+  void OnReadyToSend(AsyncPacketSocket* socket);
+  bool CheckTimestamp(int64_t packet_timestamp);
+  void AdvanceTime(int ms);
+
+  FakeClock* fake_clock_ = nullptr;
+  CriticalSection crit_;
+  std::unique_ptr<AsyncPacketSocket> socket_;
+  std::vector<std::unique_ptr<Packet>> packets_;
+  int ready_to_send_count_ = 0;
+  int64_t prev_packet_timestamp_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(TestClient);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TESTCLIENT_H_
diff --git a/rtc_base/testclient_unittest.cc b/rtc_base/testclient_unittest.cc
new file mode 100644
index 0000000..1d1d3f2
--- /dev/null
+++ b/rtc_base/testclient_unittest.cc
@@ -0,0 +1,96 @@
+/*
+ *  Copyright 2006 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/testclient.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/physicalsocketserver.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/testechoserver.h"
+#include "rtc_base/thread.h"
+
+using namespace rtc;
+
+#define MAYBE_SKIP_IPV4                        \
+  if (!HasIPv4Enabled()) {                     \
+    RTC_LOG(LS_INFO) << "No IPv4... skipping"; \
+    return;                                    \
+  }
+
+#define MAYBE_SKIP_IPV6                        \
+  if (!HasIPv6Enabled()) {                     \
+    RTC_LOG(LS_INFO) << "No IPv6... skipping"; \
+    return;                                    \
+  }
+
+void TestUdpInternal(const SocketAddress& loopback) {
+  Thread *main = Thread::Current();
+  AsyncSocket* socket = main->socketserver()
+      ->CreateAsyncSocket(loopback.family(), SOCK_DGRAM);
+  socket->Bind(loopback);
+
+  TestClient client(MakeUnique<AsyncUDPSocket>(socket));
+  SocketAddress addr = client.address(), from;
+  EXPECT_EQ(3, client.SendTo("foo", 3, addr));
+  EXPECT_TRUE(client.CheckNextPacket("foo", 3, &from));
+  EXPECT_EQ(from, addr);
+  EXPECT_TRUE(client.CheckNoPacket());
+}
+
+void TestTcpInternal(const SocketAddress& loopback) {
+  Thread *main = Thread::Current();
+  TestEchoServer server(main, loopback);
+
+  AsyncSocket* socket = main->socketserver()
+      ->CreateAsyncSocket(loopback.family(), SOCK_STREAM);
+  std::unique_ptr<AsyncTCPSocket> tcp_socket =
+      WrapUnique(AsyncTCPSocket::Create(socket, loopback, server.address()));
+  ASSERT_TRUE(tcp_socket != nullptr);
+
+  TestClient client(std::move(tcp_socket));
+  SocketAddress addr = client.address(), from;
+  EXPECT_TRUE(client.CheckConnected());
+  EXPECT_EQ(3, client.Send("foo", 3));
+  EXPECT_TRUE(client.CheckNextPacket("foo", 3, &from));
+  EXPECT_EQ(from, server.address());
+  EXPECT_TRUE(client.CheckNoPacket());
+}
+
+// Tests whether the TestClient can send UDP to itself.
+TEST(TestClientTest, TestUdpIPv4) {
+  MAYBE_SKIP_IPV4;
+  TestUdpInternal(SocketAddress("127.0.0.1", 0));
+}
+
+#if defined(WEBRTC_LINUX)
+#define MAYBE_TestUdpIPv6 DISABLED_TestUdpIPv6
+#else
+#define MAYBE_TestUdpIPv6 TestUdpIPv6
+#endif
+TEST(TestClientTest, MAYBE_TestUdpIPv6) {
+  MAYBE_SKIP_IPV6;
+  TestUdpInternal(SocketAddress("::1", 0));
+}
+
+// Tests whether the TestClient can connect to a server and exchange data.
+TEST(TestClientTest, TestTcpIPv4) {
+  MAYBE_SKIP_IPV4;
+  TestTcpInternal(SocketAddress("127.0.0.1", 0));
+}
+
+#if defined(WEBRTC_LINUX)
+#define MAYBE_TestTcpIPv6 DISABLED_TestTcpIPv6
+#else
+#define MAYBE_TestTcpIPv6 TestTcpIPv6
+#endif
+TEST(TestClientTest, MAYBE_TestTcpIPv6) {
+  MAYBE_SKIP_IPV6;
+  TestTcpInternal(SocketAddress("::1", 0));
+}
diff --git a/rtc_base/testechoserver.cc b/rtc_base/testechoserver.cc
new file mode 100644
index 0000000..a5eb7de
--- /dev/null
+++ b/rtc_base/testechoserver.cc
@@ -0,0 +1,30 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/testechoserver.h"
+
+namespace rtc {
+
+TestEchoServer::TestEchoServer(Thread* thread, const SocketAddress& addr)
+    : server_socket_(thread->socketserver()->CreateAsyncSocket(addr.family(),
+                                                               SOCK_STREAM)) {
+  server_socket_->Bind(addr);
+  server_socket_->Listen(5);
+  server_socket_->SignalReadEvent.connect(this, &TestEchoServer::OnAccept);
+}
+
+TestEchoServer::~TestEchoServer() {
+  for (ClientList::iterator it = client_sockets_.begin();
+       it != client_sockets_.end(); ++it) {
+    delete *it;
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/testechoserver.h b/rtc_base/testechoserver.h
new file mode 100644
index 0000000..672dda0
--- /dev/null
+++ b/rtc_base/testechoserver.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TESTECHOSERVER_H_
+#define RTC_BASE_TESTECHOSERVER_H_
+
+#include <list>
+#include <memory>
+
+#include "rtc_base/asynctcpsocket.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+// A test echo server, echoes back any packets sent to it.
+// Useful for unit tests.
+class TestEchoServer : public sigslot::has_slots<> {
+ public:
+  TestEchoServer(Thread* thread, const SocketAddress& addr);
+  ~TestEchoServer() override;
+
+  SocketAddress address() const { return server_socket_->GetLocalAddress(); }
+
+ private:
+  void OnAccept(AsyncSocket* socket) {
+    AsyncSocket* raw_socket = socket->Accept(nullptr);
+    if (raw_socket) {
+      AsyncTCPSocket* packet_socket = new AsyncTCPSocket(raw_socket, false);
+      packet_socket->SignalReadPacket.connect(this, &TestEchoServer::OnPacket);
+      packet_socket->SignalClose.connect(this, &TestEchoServer::OnClose);
+      client_sockets_.push_back(packet_socket);
+    }
+  }
+  void OnPacket(AsyncPacketSocket* socket, const char* buf, size_t size,
+                const SocketAddress& remote_addr,
+                const PacketTime& packet_time) {
+    rtc::PacketOptions options;
+    socket->Send(buf, size, options);
+  }
+  void OnClose(AsyncPacketSocket* socket, int err) {
+    ClientList::iterator it =
+        std::find(client_sockets_.begin(), client_sockets_.end(), socket);
+    client_sockets_.erase(it);
+    Thread::Current()->Dispose(socket);
+  }
+
+  typedef std::list<AsyncTCPSocket*> ClientList;
+  std::unique_ptr<AsyncSocket> server_socket_;
+  ClientList client_sockets_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(TestEchoServer);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TESTECHOSERVER_H_
diff --git a/rtc_base/testutils.cc b/rtc_base/testutils.cc
new file mode 100644
index 0000000..b4a7433
--- /dev/null
+++ b/rtc_base/testutils.cc
@@ -0,0 +1,88 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/testutils.h"
+
+namespace webrtc {
+namespace testing {
+
+StreamSink::StreamSink() = default;
+
+StreamSink::~StreamSink() = default;
+
+StreamSource::StreamSource() {
+  Clear();
+}
+
+StreamSource::~StreamSource() = default;
+
+StreamState StreamSource::GetState() const {
+  return state_;
+}
+
+StreamResult StreamSource::Read(void* buffer,
+                                size_t buffer_len,
+                                size_t* read,
+                                int* error) {
+  if (SS_CLOSED == state_) {
+    if (error)
+      *error = -1;
+    return SR_ERROR;
+  }
+  if ((SS_OPENING == state_) || (readable_data_.size() <= read_block_)) {
+    return SR_BLOCK;
+  }
+  size_t count = std::min(buffer_len, readable_data_.size() - read_block_);
+  memcpy(buffer, &readable_data_[0], count);
+  size_t new_size = readable_data_.size() - count;
+  // Avoid undefined access beyond the last element of the vector.
+  // This only happens when new_size is 0.
+  if (count < readable_data_.size()) {
+    memmove(&readable_data_[0], &readable_data_[count], new_size);
+  }
+  readable_data_.resize(new_size);
+  if (read)
+    *read = count;
+  return SR_SUCCESS;
+}
+
+StreamResult StreamSource::Write(const void* data,
+                                 size_t data_len,
+                                 size_t* written,
+                                 int* error) {
+  if (SS_CLOSED == state_) {
+    if (error)
+      *error = -1;
+    return SR_ERROR;
+  }
+  if (SS_OPENING == state_) {
+    return SR_BLOCK;
+  }
+  if (SIZE_UNKNOWN != write_block_) {
+    if (written_data_.size() >= write_block_) {
+      return SR_BLOCK;
+    }
+    if (data_len > (write_block_ - written_data_.size())) {
+      data_len = write_block_ - written_data_.size();
+    }
+  }
+  if (written)
+    *written = data_len;
+  const char* cdata = static_cast<const char*>(data);
+  written_data_.insert(written_data_.end(), cdata, cdata + data_len);
+  return SR_SUCCESS;
+}
+
+void StreamSource::Close() {
+  state_ = SS_CLOSED;
+}
+
+}  // namespace testing
+}  // namespace webrtc
diff --git a/rtc_base/testutils.h b/rtc_base/testutils.h
new file mode 100644
index 0000000..b000384
--- /dev/null
+++ b/rtc_base/testutils.h
@@ -0,0 +1,233 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TESTUTILS_H_
+#define RTC_BASE_TESTUTILS_H_
+
+// Utilities for testing rtc infrastructure in unittests
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <vector>
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringutils.h"
+
+namespace webrtc {
+namespace testing {
+
+using namespace rtc;
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamSink - Monitor asynchronously signalled events from StreamInterface
+// or AsyncSocket (which should probably be a StreamInterface.
+///////////////////////////////////////////////////////////////////////////////
+
+// Note: Any event that is an error is treaded as SSE_ERROR instead of that
+// event.
+
+enum StreamSinkEvent {
+  SSE_OPEN  = SE_OPEN,
+  SSE_READ  = SE_READ,
+  SSE_WRITE = SE_WRITE,
+  SSE_CLOSE = SE_CLOSE,
+  SSE_ERROR = 16
+};
+
+class StreamSink : public sigslot::has_slots<> {
+ public:
+  StreamSink();
+  ~StreamSink() override;
+
+  void Monitor(StreamInterface* stream) {
+   stream->SignalEvent.connect(this, &StreamSink::OnEvent);
+   events_.erase(stream);
+  }
+  void Unmonitor(StreamInterface* stream) {
+   stream->SignalEvent.disconnect(this);
+   // In case you forgot to unmonitor a previous object with this address
+   events_.erase(stream);
+  }
+  bool Check(StreamInterface* stream, StreamSinkEvent event, bool reset = true) {
+    return DoCheck(stream, event, reset);
+  }
+  int Events(StreamInterface* stream, bool reset = true) {
+    return DoEvents(stream, reset);
+  }
+
+  void Monitor(AsyncSocket* socket) {
+   socket->SignalConnectEvent.connect(this, &StreamSink::OnConnectEvent);
+   socket->SignalReadEvent.connect(this, &StreamSink::OnReadEvent);
+   socket->SignalWriteEvent.connect(this, &StreamSink::OnWriteEvent);
+   socket->SignalCloseEvent.connect(this, &StreamSink::OnCloseEvent);
+   // In case you forgot to unmonitor a previous object with this address
+   events_.erase(socket);
+  }
+  void Unmonitor(AsyncSocket* socket) {
+   socket->SignalConnectEvent.disconnect(this);
+   socket->SignalReadEvent.disconnect(this);
+   socket->SignalWriteEvent.disconnect(this);
+   socket->SignalCloseEvent.disconnect(this);
+   events_.erase(socket);
+  }
+  bool Check(AsyncSocket* socket, StreamSinkEvent event, bool reset = true) {
+    return DoCheck(socket, event, reset);
+  }
+  int Events(AsyncSocket* socket, bool reset = true) {
+    return DoEvents(socket, reset);
+  }
+
+ private:
+  typedef std::map<void*,int> EventMap;
+
+  void OnEvent(StreamInterface* stream, int events, int error) {
+    if (error) {
+      events = SSE_ERROR;
+    }
+    AddEvents(stream, events);
+  }
+  void OnConnectEvent(AsyncSocket* socket) {
+    AddEvents(socket, SSE_OPEN);
+  }
+  void OnReadEvent(AsyncSocket* socket) {
+    AddEvents(socket, SSE_READ);
+  }
+  void OnWriteEvent(AsyncSocket* socket) {
+    AddEvents(socket, SSE_WRITE);
+  }
+  void OnCloseEvent(AsyncSocket* socket, int error) {
+    AddEvents(socket, (0 == error) ? SSE_CLOSE : SSE_ERROR);
+  }
+
+  void AddEvents(void* obj, int events) {
+    EventMap::iterator it = events_.find(obj);
+    if (events_.end() == it) {
+      events_.insert(EventMap::value_type(obj, events));
+    } else {
+      it->second |= events;
+    }
+  }
+  bool DoCheck(void* obj, StreamSinkEvent event, bool reset) {
+    EventMap::iterator it = events_.find(obj);
+    if ((events_.end() == it) || (0 == (it->second & event))) {
+      return false;
+    }
+    if (reset) {
+      it->second &= ~event;
+    }
+    return true;
+  }
+  int DoEvents(void* obj, bool reset) {
+    EventMap::iterator it = events_.find(obj);
+    if (events_.end() == it)
+      return 0;
+    int events = it->second;
+    if (reset) {
+      it->second = 0;
+    }
+    return events;
+  }
+
+  EventMap events_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// StreamSource - Implements stream interface and simulates asynchronous
+// events on the stream, without a network.  Also buffers written data.
+///////////////////////////////////////////////////////////////////////////////
+
+class StreamSource : public StreamInterface {
+public:
+ StreamSource();
+ ~StreamSource() override;
+
+ void Clear() {
+   readable_data_.clear();
+   written_data_.clear();
+   state_ = SS_CLOSED;
+   read_block_ = 0;
+   write_block_ = SIZE_UNKNOWN;
+  }
+  void QueueString(const char* data) {
+    QueueData(data, strlen(data));
+  }
+#if defined(__GNUC__)
+  // Note: Implicit |this| argument counts as the first argument.
+  __attribute__((__format__(__printf__, 2, 3)))
+#endif
+  void QueueStringF(const char* format, ...) {
+    va_list args;
+    va_start(args, format);
+    char buffer[1024];
+    size_t len = vsprintfn(buffer, sizeof(buffer), format, args);
+    RTC_CHECK(len < sizeof(buffer) - 1);
+    va_end(args);
+    QueueData(buffer, len);
+  }
+  void QueueData(const char* data, size_t len) {
+    readable_data_.insert(readable_data_.end(), data, data + len);
+    if ((SS_OPEN == state_) && (readable_data_.size() == len)) {
+      SignalEvent(this, SE_READ, 0);
+    }
+  }
+  std::string ReadData() {
+    std::string data;
+    // avoid accessing written_data_[0] if it is undefined
+    if (written_data_.size() > 0) {
+      data.insert(0, &written_data_[0], written_data_.size());
+    }
+    written_data_.clear();
+    return data;
+  }
+  void SetState(StreamState state) {
+    int events = 0;
+    if ((SS_OPENING == state_) && (SS_OPEN == state)) {
+      events |= SE_OPEN;
+      if (!readable_data_.empty()) {
+        events |= SE_READ;
+      }
+    } else if ((SS_CLOSED != state_) && (SS_CLOSED == state)) {
+      events |= SE_CLOSE;
+    }
+    state_ = state;
+    if (events) {
+      SignalEvent(this, events, 0);
+    }
+  }
+  // Will cause Read to block when there are pos bytes in the read queue.
+  void SetReadBlock(size_t pos) { read_block_ = pos; }
+  // Will cause Write to block when there are pos bytes in the write queue.
+  void SetWriteBlock(size_t pos) { write_block_ = pos; }
+
+  StreamState GetState() const override;
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+
+ private:
+  typedef std::vector<char> Buffer;
+  Buffer readable_data_, written_data_;
+  StreamState state_;
+  size_t read_block_, write_block_;
+};
+
+}  // namespace testing
+}  // namespace webrtc
+
+#endif  // RTC_BASE_TESTUTILS_H_
diff --git a/rtc_base/thread.cc b/rtc_base/thread.cc
new file mode 100644
index 0000000..bb7b591
--- /dev/null
+++ b/rtc_base/thread.cc
@@ -0,0 +1,586 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/thread.h"
+
+#if defined(WEBRTC_WIN)
+#include <comdef.h>
+#elif defined(WEBRTC_POSIX)
+#include <time.h>
+#else
+#error "Either WEBRTC_WIN or WEBRTC_POSIX needs to be defined."
+#endif
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/stringutils.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/trace_event.h"
+
+namespace rtc {
+
+ThreadManager* ThreadManager::Instance() {
+  RTC_DEFINE_STATIC_LOCAL(ThreadManager, thread_manager, ());
+  return &thread_manager;
+}
+
+ThreadManager::~ThreadManager() {
+  // By above RTC_DEFINE_STATIC_LOCAL.
+  RTC_NOTREACHED() << "ThreadManager should never be destructed.";
+}
+
+// static
+Thread* Thread::Current() {
+  ThreadManager* manager = ThreadManager::Instance();
+  Thread* thread = manager->CurrentThread();
+
+#ifndef NO_MAIN_THREAD_WRAPPING
+  // Only autowrap the thread which instantiated the ThreadManager.
+  if (!thread && manager->IsMainThread()) {
+    thread = new Thread(SocketServer::CreateDefault());
+    thread->WrapCurrentWithThreadManager(manager, true);
+  }
+#endif
+
+  return thread;
+}
+
+#if defined(WEBRTC_POSIX)
+#if !defined(WEBRTC_MAC)
+ThreadManager::ThreadManager() : main_thread_ref_(CurrentThreadRef()) {
+  pthread_key_create(&key_, nullptr);
+}
+#endif
+
+Thread *ThreadManager::CurrentThread() {
+  return static_cast<Thread *>(pthread_getspecific(key_));
+}
+
+void ThreadManager::SetCurrentThread(Thread* thread) {
+#if RTC_DLOG_IS_ON
+  if (CurrentThread() && thread) {
+    RTC_DLOG(LS_ERROR) << "SetCurrentThread: Overwriting an existing value?";
+  }
+#endif  // RTC_DLOG_IS_ON
+  pthread_setspecific(key_, thread);
+}
+#endif
+
+#if defined(WEBRTC_WIN)
+ThreadManager::ThreadManager()
+    : key_(TlsAlloc()), main_thread_ref_(CurrentThreadRef()) {
+}
+
+Thread *ThreadManager::CurrentThread() {
+  return static_cast<Thread *>(TlsGetValue(key_));
+}
+
+void ThreadManager::SetCurrentThread(Thread *thread) {
+  RTC_DCHECK(!CurrentThread() || !thread);
+  TlsSetValue(key_, thread);
+}
+#endif
+
+Thread *ThreadManager::WrapCurrentThread() {
+  Thread* result = CurrentThread();
+  if (nullptr == result) {
+    result = new Thread(SocketServer::CreateDefault());
+    result->WrapCurrentWithThreadManager(this, true);
+  }
+  return result;
+}
+
+void ThreadManager::UnwrapCurrentThread() {
+  Thread* t = CurrentThread();
+  if (t && !(t->IsOwned())) {
+    t->UnwrapCurrent();
+    delete t;
+  }
+}
+
+bool ThreadManager::IsMainThread() {
+  return IsThreadRefEqual(CurrentThreadRef(), main_thread_ref_);
+}
+
+Thread::ScopedDisallowBlockingCalls::ScopedDisallowBlockingCalls()
+  : thread_(Thread::Current()),
+    previous_state_(thread_->SetAllowBlockingCalls(false)) {
+}
+
+Thread::ScopedDisallowBlockingCalls::~ScopedDisallowBlockingCalls() {
+  RTC_DCHECK(thread_->IsCurrent());
+  thread_->SetAllowBlockingCalls(previous_state_);
+}
+
+// DEPRECATED.
+Thread::Thread() : Thread(SocketServer::CreateDefault()) {}
+
+Thread::Thread(SocketServer* ss) : Thread(ss, /*do_init=*/true) {}
+
+Thread::Thread(std::unique_ptr<SocketServer> ss)
+    : Thread(std::move(ss), /*do_init=*/true) {}
+
+Thread::Thread(SocketServer* ss, bool do_init)
+    : MessageQueue(ss, /*do_init=*/false) {
+  SetName("Thread", this);  // default name
+  if (do_init) {
+    DoInit();
+  }
+}
+
+Thread::Thread(std::unique_ptr<SocketServer> ss, bool do_init)
+    : MessageQueue(std::move(ss), false) {
+  SetName("Thread", this);  // default name
+  if (do_init) {
+    DoInit();
+  }
+}
+
+Thread::~Thread() {
+  Stop();
+  DoDestroy();
+}
+
+bool Thread::IsCurrent() const {
+  return ThreadManager::Instance()->CurrentThread() == this;
+}
+
+std::unique_ptr<Thread> Thread::CreateWithSocketServer() {
+  return std::unique_ptr<Thread>(new Thread(SocketServer::CreateDefault()));
+}
+
+std::unique_ptr<Thread> Thread::Create() {
+  return std::unique_ptr<Thread>(
+      new Thread(std::unique_ptr<SocketServer>(new NullSocketServer())));
+}
+
+bool Thread::SleepMs(int milliseconds) {
+  AssertBlockingIsAllowedOnCurrentThread();
+
+#if defined(WEBRTC_WIN)
+  ::Sleep(milliseconds);
+  return true;
+#else
+  // POSIX has both a usleep() and a nanosleep(), but the former is deprecated,
+  // so we use nanosleep() even though it has greater precision than necessary.
+  struct timespec ts;
+  ts.tv_sec = milliseconds / 1000;
+  ts.tv_nsec = (milliseconds % 1000) * 1000000;
+  int ret = nanosleep(&ts, nullptr);
+  if (ret != 0) {
+    RTC_LOG_ERR(LS_WARNING) << "nanosleep() returning early";
+    return false;
+  }
+  return true;
+#endif
+}
+
+bool Thread::SetName(const std::string& name, const void* obj) {
+  RTC_DCHECK(!IsRunning());
+
+  name_ = name;
+  if (obj) {
+    char buf[16];
+    sprintfn(buf, sizeof(buf), " 0x%p", obj);
+    name_ += buf;
+  }
+  return true;
+}
+
+bool Thread::Start(Runnable* runnable) {
+  RTC_DCHECK(!IsRunning());
+
+  if (IsRunning())
+    return false;
+
+  Restart();  // reset IsQuitting() if the thread is being restarted
+
+  // Make sure that ThreadManager is created on the main thread before
+  // we start a new thread.
+  ThreadManager::Instance();
+
+  owned_ = true;
+
+  ThreadInit* init = new ThreadInit;
+  init->thread = this;
+  init->runnable = runnable;
+#if defined(WEBRTC_WIN)
+  thread_ = CreateThread(nullptr, 0, PreRun, init, 0, &thread_id_);
+  if (!thread_) {
+    return false;
+  }
+#elif defined(WEBRTC_POSIX)
+  pthread_attr_t attr;
+  pthread_attr_init(&attr);
+
+  int error_code = pthread_create(&thread_, &attr, PreRun, init);
+  if (0 != error_code) {
+    RTC_LOG(LS_ERROR) << "Unable to create pthread, error " << error_code;
+    thread_ = 0;
+    return false;
+  }
+  RTC_DCHECK(thread_);
+#endif
+  return true;
+}
+
+bool Thread::WrapCurrent() {
+  return WrapCurrentWithThreadManager(ThreadManager::Instance(), true);
+}
+
+void Thread::UnwrapCurrent() {
+  // Clears the platform-specific thread-specific storage.
+  ThreadManager::Instance()->SetCurrentThread(nullptr);
+#if defined(WEBRTC_WIN)
+  if (thread_ != nullptr) {
+    if (!CloseHandle(thread_)) {
+      RTC_LOG_GLE(LS_ERROR)
+          << "When unwrapping thread, failed to close handle.";
+    }
+    thread_ = nullptr;
+    thread_id_ = 0;
+  }
+#elif defined(WEBRTC_POSIX)
+  thread_ = 0;
+#endif
+}
+
+void Thread::SafeWrapCurrent() {
+  WrapCurrentWithThreadManager(ThreadManager::Instance(), false);
+}
+
+void Thread::Join() {
+  if (!IsRunning())
+    return;
+
+  RTC_DCHECK(!IsCurrent());
+  if (Current() && !Current()->blocking_calls_allowed_) {
+    RTC_LOG(LS_WARNING) << "Waiting for the thread to join, "
+                        << "but blocking calls have been disallowed";
+  }
+
+#if defined(WEBRTC_WIN)
+  RTC_DCHECK(thread_ != nullptr);
+  WaitForSingleObject(thread_, INFINITE);
+  CloseHandle(thread_);
+  thread_ = nullptr;
+  thread_id_ = 0;
+#elif defined(WEBRTC_POSIX)
+  pthread_join(thread_, nullptr);
+  thread_ = 0;
+#endif
+}
+
+bool Thread::SetAllowBlockingCalls(bool allow) {
+  RTC_DCHECK(IsCurrent());
+  bool previous = blocking_calls_allowed_;
+  blocking_calls_allowed_ = allow;
+  return previous;
+}
+
+// static
+void Thread::AssertBlockingIsAllowedOnCurrentThread() {
+#if !defined(NDEBUG)
+  Thread* current = Thread::Current();
+  RTC_DCHECK(!current || current->blocking_calls_allowed_);
+#endif
+}
+
+// static
+#if !defined(WEBRTC_MAC)
+#if defined(WEBRTC_WIN)
+DWORD WINAPI Thread::PreRun(LPVOID pv) {
+#else
+void* Thread::PreRun(void* pv) {
+#endif
+  ThreadInit* init = static_cast<ThreadInit*>(pv);
+  ThreadManager::Instance()->SetCurrentThread(init->thread);
+  rtc::SetCurrentThreadName(init->thread->name_.c_str());
+  if (init->runnable) {
+    init->runnable->Run(init->thread);
+  } else {
+    init->thread->Run();
+  }
+  ThreadManager::Instance()->SetCurrentThread(nullptr);
+  delete init;
+#ifdef WEBRTC_WIN
+  return 0;
+#else
+  return nullptr;
+#endif
+}
+#endif
+
+void Thread::Run() {
+  ProcessMessages(kForever);
+}
+
+bool Thread::IsOwned() {
+  RTC_DCHECK(IsRunning());
+  return owned_;
+}
+
+void Thread::Stop() {
+  MessageQueue::Quit();
+  Join();
+}
+
+void Thread::Send(const Location& posted_from,
+                  MessageHandler* phandler,
+                  uint32_t id,
+                  MessageData* pdata) {
+  if (IsQuitting())
+    return;
+
+  // Sent messages are sent to the MessageHandler directly, in the context
+  // of "thread", like Win32 SendMessage. If in the right context,
+  // call the handler directly.
+  Message msg;
+  msg.posted_from = posted_from;
+  msg.phandler = phandler;
+  msg.message_id = id;
+  msg.pdata = pdata;
+  if (IsCurrent()) {
+    phandler->OnMessage(&msg);
+    return;
+  }
+
+  AssertBlockingIsAllowedOnCurrentThread();
+
+  AutoThread thread;
+  Thread *current_thread = Thread::Current();
+  RTC_DCHECK(current_thread != nullptr);  // AutoThread ensures this
+
+  bool ready = false;
+  {
+    CritScope cs(&crit_);
+    _SendMessage smsg;
+    smsg.thread = current_thread;
+    smsg.msg = msg;
+    smsg.ready = &ready;
+    sendlist_.push_back(smsg);
+  }
+
+  // Wait for a reply
+  WakeUpSocketServer();
+
+  bool waited = false;
+  crit_.Enter();
+  while (!ready) {
+    crit_.Leave();
+    // We need to limit "ReceiveSends" to |this| thread to avoid an arbitrary
+    // thread invoking calls on the current thread.
+    current_thread->ReceiveSendsFromThread(this);
+    current_thread->socketserver()->Wait(kForever, false);
+    waited = true;
+    crit_.Enter();
+  }
+  crit_.Leave();
+
+  // Our Wait loop above may have consumed some WakeUp events for this
+  // MessageQueue, that weren't relevant to this Send.  Losing these WakeUps can
+  // cause problems for some SocketServers.
+  //
+  // Concrete example:
+  // Win32SocketServer on thread A calls Send on thread B.  While processing the
+  // message, thread B Posts a message to A.  We consume the wakeup for that
+  // Post while waiting for the Send to complete, which means that when we exit
+  // this loop, we need to issue another WakeUp, or else the Posted message
+  // won't be processed in a timely manner.
+
+  if (waited) {
+    current_thread->socketserver()->WakeUp();
+  }
+}
+
+void Thread::ReceiveSends() {
+  ReceiveSendsFromThread(nullptr);
+}
+
+void Thread::ReceiveSendsFromThread(const Thread* source) {
+  // Receive a sent message. Cleanup scenarios:
+  // - thread sending exits: We don't allow this, since thread can exit
+  //   only via Join, so Send must complete.
+  // - thread receiving exits: Wakeup/set ready in Thread::Clear()
+  // - object target cleared: Wakeup/set ready in Thread::Clear()
+  _SendMessage smsg;
+
+  crit_.Enter();
+  while (PopSendMessageFromThread(source, &smsg)) {
+    crit_.Leave();
+
+    smsg.msg.phandler->OnMessage(&smsg.msg);
+
+    crit_.Enter();
+    *smsg.ready = true;
+    smsg.thread->socketserver()->WakeUp();
+  }
+  crit_.Leave();
+}
+
+bool Thread::PopSendMessageFromThread(const Thread* source, _SendMessage* msg) {
+  for (std::list<_SendMessage>::iterator it = sendlist_.begin();
+       it != sendlist_.end(); ++it) {
+    if (it->thread == source || source == nullptr) {
+      *msg = *it;
+      sendlist_.erase(it);
+      return true;
+    }
+  }
+  return false;
+}
+
+void Thread::InvokeInternal(const Location& posted_from,
+                            MessageHandler* handler) {
+  TRACE_EVENT2("webrtc", "Thread::Invoke", "src_file_and_line",
+               posted_from.file_and_line(), "src_func",
+               posted_from.function_name());
+  Send(posted_from, handler);
+}
+
+void Thread::Clear(MessageHandler* phandler,
+                   uint32_t id,
+                   MessageList* removed) {
+  CritScope cs(&crit_);
+
+  // Remove messages on sendlist_ with phandler
+  // Object target cleared: remove from send list, wakeup/set ready
+  // if sender not null.
+
+  std::list<_SendMessage>::iterator iter = sendlist_.begin();
+  while (iter != sendlist_.end()) {
+    _SendMessage smsg = *iter;
+    if (smsg.msg.Match(phandler, id)) {
+      if (removed) {
+        removed->push_back(smsg.msg);
+      } else {
+        delete smsg.msg.pdata;
+      }
+      iter = sendlist_.erase(iter);
+      *smsg.ready = true;
+      smsg.thread->socketserver()->WakeUp();
+      continue;
+    }
+    ++iter;
+  }
+
+  MessageQueue::Clear(phandler, id, removed);
+}
+
+#if !defined(WEBRTC_MAC)
+// Note that these methods have a separate implementation for mac and ios
+// defined in webrtc/rtc_base/thread_darwin.mm.
+bool Thread::ProcessMessages(int cmsLoop) {
+  // Using ProcessMessages with a custom clock for testing and a time greater
+  // than 0 doesn't work, since it's not guaranteed to advance the custom
+  // clock's time, and may get stuck in an infinite loop.
+  RTC_DCHECK(GetClockForTesting() == nullptr || cmsLoop == 0 ||
+             cmsLoop == kForever);
+  int64_t msEnd = (kForever == cmsLoop) ? 0 : TimeAfter(cmsLoop);
+  int cmsNext = cmsLoop;
+
+  while (true) {
+    Message msg;
+    if (!Get(&msg, cmsNext))
+      return !IsQuitting();
+    Dispatch(&msg);
+
+    if (cmsLoop != kForever) {
+      cmsNext = static_cast<int>(TimeUntil(msEnd));
+      if (cmsNext < 0)
+        return true;
+    }
+  }
+}
+#endif
+
+bool Thread::WrapCurrentWithThreadManager(ThreadManager* thread_manager,
+                                          bool need_synchronize_access) {
+  RTC_DCHECK(!IsRunning());
+
+#if defined(WEBRTC_WIN)
+  if (need_synchronize_access) {
+    // We explicitly ask for no rights other than synchronization.
+    // This gives us the best chance of succeeding.
+    thread_ = OpenThread(SYNCHRONIZE, FALSE, GetCurrentThreadId());
+    if (!thread_) {
+      RTC_LOG_GLE(LS_ERROR) << "Unable to get handle to thread.";
+      return false;
+    }
+    thread_id_ = GetCurrentThreadId();
+  }
+#elif defined(WEBRTC_POSIX)
+  thread_ = pthread_self();
+#endif
+  owned_ = false;
+  thread_manager->SetCurrentThread(this);
+  return true;
+}
+
+bool Thread::IsRunning() {
+#if defined(WEBRTC_WIN)
+  return thread_ != nullptr;
+#elif defined(WEBRTC_POSIX)
+  return thread_ != 0;
+#endif
+}
+
+AutoThread::AutoThread()
+    : Thread(SocketServer::CreateDefault(), /*do_init=*/false) {
+  DoInit();
+  if (!ThreadManager::Instance()->CurrentThread()) {
+    ThreadManager::Instance()->SetCurrentThread(this);
+  }
+}
+
+AutoThread::~AutoThread() {
+  Stop();
+  DoDestroy();
+  if (ThreadManager::Instance()->CurrentThread() == this) {
+    ThreadManager::Instance()->SetCurrentThread(nullptr);
+  }
+}
+
+AutoSocketServerThread::AutoSocketServerThread(SocketServer* ss)
+    : Thread(ss, /*do_init=*/false) {
+  DoInit();
+  old_thread_ = ThreadManager::Instance()->CurrentThread();
+  // Temporarily set the current thread to nullptr so that we can keep checks
+  // around that catch unintentional pointer overwrites.
+  rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
+  rtc::ThreadManager::Instance()->SetCurrentThread(this);
+  if (old_thread_) {
+    MessageQueueManager::Remove(old_thread_);
+  }
+}
+
+AutoSocketServerThread::~AutoSocketServerThread() {
+  RTC_DCHECK(ThreadManager::Instance()->CurrentThread() == this);
+  // Some tests post destroy messages to this thread. To avoid memory
+  // leaks, we have to process those messages. In particular
+  // P2PTransportChannelPingTest, relying on the message posted in
+  // cricket::Connection::Destroy.
+  ProcessMessages(0);
+  // Stop and destroy the thread before clearing it as the current thread.
+  // Sometimes there are messages left in the MessageQueue that will be
+  // destroyed by DoDestroy, and sometimes the destructors of the message and/or
+  // its contents rely on this thread still being set as the current thread.
+  Stop();
+  DoDestroy();
+  rtc::ThreadManager::Instance()->SetCurrentThread(nullptr);
+  rtc::ThreadManager::Instance()->SetCurrentThread(old_thread_);
+  if (old_thread_) {
+    MessageQueueManager::Add(old_thread_);
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/thread.h b/rtc_base/thread.h
new file mode 100644
index 0000000..568764e
--- /dev/null
+++ b/rtc_base/thread.h
@@ -0,0 +1,337 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_THREAD_H_
+#define RTC_BASE_THREAD_H_
+
+#include <algorithm>
+#include <list>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#if defined(WEBRTC_POSIX)
+#include <pthread.h>
+#endif
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/platform_thread_types.h"
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/win32.h"
+#endif
+
+namespace rtc {
+
+class Thread;
+
+class ThreadManager {
+ public:
+  static const int kForever = -1;
+
+  // Singleton, constructor and destructor are private.
+  static ThreadManager* Instance();
+
+  Thread* CurrentThread();
+  void SetCurrentThread(Thread* thread);
+
+  // Returns a thread object with its thread_ ivar set
+  // to whatever the OS uses to represent the thread.
+  // If there already *is* a Thread object corresponding to this thread,
+  // this method will return that.  Otherwise it creates a new Thread
+  // object whose wrapped() method will return true, and whose
+  // handle will, on Win32, be opened with only synchronization privileges -
+  // if you need more privilegs, rather than changing this method, please
+  // write additional code to adjust the privileges, or call a different
+  // factory method of your own devising, because this one gets used in
+  // unexpected contexts (like inside browser plugins) and it would be a
+  // shame to break it.  It is also conceivable on Win32 that we won't even
+  // be able to get synchronization privileges, in which case the result
+  // will have a null handle.
+  Thread *WrapCurrentThread();
+  void UnwrapCurrentThread();
+
+  bool IsMainThread();
+
+ private:
+  ThreadManager();
+  ~ThreadManager();
+
+#if defined(WEBRTC_POSIX)
+  pthread_key_t key_;
+#endif
+
+#if defined(WEBRTC_WIN)
+  const DWORD key_;
+#endif
+
+  // The thread to potentially autowrap.
+  const PlatformThreadRef main_thread_ref_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(ThreadManager);
+};
+
+struct _SendMessage {
+  _SendMessage() {}
+  Thread *thread;
+  Message msg;
+  bool *ready;
+};
+
+class Runnable {
+ public:
+  virtual ~Runnable() {}
+  virtual void Run(Thread* thread) = 0;
+
+ protected:
+  Runnable() {}
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(Runnable);
+};
+
+// WARNING! SUBCLASSES MUST CALL Stop() IN THEIR DESTRUCTORS!  See ~Thread().
+
+class RTC_LOCKABLE Thread : public MessageQueue {
+ public:
+  // DEPRECATED.
+  // The default constructor should not be used because it hides whether or
+  // not a socket server will be associated with the thread. Most instances
+  // of Thread do actually not need one, so please use either of the Create*
+  // methods to construct an instance of Thread.
+  Thread();
+
+  explicit Thread(SocketServer* ss);
+  explicit Thread(std::unique_ptr<SocketServer> ss);
+  // Constructors meant for subclasses; they should call DoInit themselves and
+  // pass false for |do_init|, so that DoInit is called only on the fully
+  // instantiated class, which avoids a vptr data race.
+  Thread(SocketServer* ss, bool do_init);
+  Thread(std::unique_ptr<SocketServer> ss, bool do_init);
+
+  // NOTE: ALL SUBCLASSES OF Thread MUST CALL Stop() IN THEIR DESTRUCTORS (or
+  // guarantee Stop() is explicitly called before the subclass is destroyed).
+  // This is required to avoid a data race between the destructor modifying the
+  // vtable, and the Thread::PreRun calling the virtual method Run().
+  ~Thread() override;
+
+  static std::unique_ptr<Thread> CreateWithSocketServer();
+  static std::unique_ptr<Thread> Create();
+  static Thread* Current();
+
+  // Used to catch performance regressions. Use this to disallow blocking calls
+  // (Invoke) for a given scope.  If a synchronous call is made while this is in
+  // effect, an assert will be triggered.
+  // Note that this is a single threaded class.
+  class ScopedDisallowBlockingCalls {
+   public:
+    ScopedDisallowBlockingCalls();
+    ~ScopedDisallowBlockingCalls();
+   private:
+    Thread* const thread_;
+    const bool previous_state_;
+  };
+
+  bool IsCurrent() const;
+
+  // Sleeps the calling thread for the specified number of milliseconds, during
+  // which time no processing is performed. Returns false if sleeping was
+  // interrupted by a signal (POSIX only).
+  static bool SleepMs(int millis);
+
+  // Sets the thread's name, for debugging. Must be called before Start().
+  // If |obj| is non-null, its value is appended to |name|.
+  const std::string& name() const { return name_; }
+  bool SetName(const std::string& name, const void* obj);
+
+  // Starts the execution of the thread.
+  bool Start(Runnable* runnable = nullptr);
+
+  // Tells the thread to stop and waits until it is joined.
+  // Never call Stop on the current thread.  Instead use the inherited Quit
+  // function which will exit the base MessageQueue without terminating the
+  // underlying OS thread.
+  virtual void Stop();
+
+  // By default, Thread::Run() calls ProcessMessages(kForever).  To do other
+  // work, override Run().  To receive and dispatch messages, call
+  // ProcessMessages occasionally.
+  virtual void Run();
+
+  virtual void Send(const Location& posted_from,
+                    MessageHandler* phandler,
+                    uint32_t id = 0,
+                    MessageData* pdata = nullptr);
+
+  // Convenience method to invoke a functor on another thread.  Caller must
+  // provide the |ReturnT| template argument, which cannot (easily) be deduced.
+  // Uses Send() internally, which blocks the current thread until execution
+  // is complete.
+  // Ex: bool result = thread.Invoke<bool>(RTC_FROM_HERE,
+  // &MyFunctionReturningBool);
+  // NOTE: This function can only be called when synchronous calls are allowed.
+  // See ScopedDisallowBlockingCalls for details.
+  template <class ReturnT, class FunctorT>
+  ReturnT Invoke(const Location& posted_from, FunctorT&& functor) {
+    FunctorMessageHandler<ReturnT, FunctorT> handler(
+        std::forward<FunctorT>(functor));
+    InvokeInternal(posted_from, &handler);
+    return handler.MoveResult();
+  }
+
+  // From MessageQueue
+  void Clear(MessageHandler* phandler,
+             uint32_t id = MQID_ANY,
+             MessageList* removed = nullptr) override;
+  void ReceiveSends() override;
+
+  // ProcessMessages will process I/O and dispatch messages until:
+  //  1) cms milliseconds have elapsed (returns true)
+  //  2) Stop() is called (returns false)
+  bool ProcessMessages(int cms);
+
+  // Returns true if this is a thread that we created using the standard
+  // constructor, false if it was created by a call to
+  // ThreadManager::WrapCurrentThread().  The main thread of an application
+  // is generally not owned, since the OS representation of the thread
+  // obviously exists before we can get to it.
+  // You cannot call Start on non-owned threads.
+  bool IsOwned();
+
+  // Expose private method IsRunning() for tests.
+  //
+  // DANGER: this is a terrible public API.  Most callers that might want to
+  // call this likely do not have enough control/knowledge of the Thread in
+  // question to guarantee that the returned value remains true for the duration
+  // of whatever code is conditionally executing because of the return value!
+  bool RunningForTest() { return IsRunning(); }
+
+  // Sets the per-thread allow-blocking-calls flag and returns the previous
+  // value. Must be called on this thread.
+  bool SetAllowBlockingCalls(bool allow);
+
+  // These functions are public to avoid injecting test hooks. Don't call them
+  // outside of tests.
+  // This method should be called when thread is created using non standard
+  // method, like derived implementation of rtc::Thread and it can not be
+  // started by calling Start(). This will set started flag to true and
+  // owned to false. This must be called from the current thread.
+  bool WrapCurrent();
+  void UnwrapCurrent();
+
+ protected:
+  // Same as WrapCurrent except that it never fails as it does not try to
+  // acquire the synchronization access of the thread. The caller should never
+  // call Stop() or Join() on this thread.
+  void SafeWrapCurrent();
+
+  // Blocks the calling thread until this thread has terminated.
+  void Join();
+
+  static void AssertBlockingIsAllowedOnCurrentThread();
+
+  friend class ScopedDisallowBlockingCalls;
+
+ private:
+  struct ThreadInit {
+    Thread* thread;
+    Runnable* runnable;
+  };
+
+#if defined(WEBRTC_WIN)
+  static DWORD WINAPI PreRun(LPVOID context);
+#else
+  static void *PreRun(void *pv);
+#endif
+
+  // ThreadManager calls this instead WrapCurrent() because
+  // ThreadManager::Instance() cannot be used while ThreadManager is
+  // being created.
+  // The method tries to get synchronization rights of the thread on Windows if
+  // |need_synchronize_access| is true.
+  bool WrapCurrentWithThreadManager(ThreadManager* thread_manager,
+                                    bool need_synchronize_access);
+
+  // Return true if the thread is currently running.
+  bool IsRunning();
+
+  // Processes received "Send" requests. If |source| is not null, only requests
+  // from |source| are processed, otherwise, all requests are processed.
+  void ReceiveSendsFromThread(const Thread* source);
+
+  // If |source| is not null, pops the first "Send" message from |source| in
+  // |sendlist_|, otherwise, pops the first "Send" message of |sendlist_|.
+  // The caller must lock |crit_| before calling.
+  // Returns true if there is such a message.
+  bool PopSendMessageFromThread(const Thread* source, _SendMessage* msg);
+
+  void InvokeInternal(const Location& posted_from, MessageHandler* handler);
+
+  std::list<_SendMessage> sendlist_;
+  std::string name_;
+
+  // TODO(tommi): Add thread checks for proper use of control methods.
+  // Ideally we should be able to just use PlatformThread.
+
+#if defined(WEBRTC_POSIX)
+  pthread_t thread_ = 0;
+#endif
+
+#if defined(WEBRTC_WIN)
+  HANDLE thread_ = nullptr;
+  DWORD thread_id_ = 0;
+#endif
+
+  // Indicates whether or not ownership of the worker thread lies with
+  // this instance or not. (i.e. owned_ == !wrapped).
+  // Must only be modified when the worker thread is not running.
+  bool owned_ = true;
+
+  // Only touched from the worker thread itself.
+  bool blocking_calls_allowed_ = true;
+
+  friend class ThreadManager;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+// AutoThread automatically installs itself at construction
+// uninstalls at destruction, if a Thread object is
+// _not already_ associated with the current OS thread.
+
+class AutoThread : public Thread {
+ public:
+  AutoThread();
+  ~AutoThread() override;
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(AutoThread);
+};
+
+// AutoSocketServerThread automatically installs itself at
+// construction and uninstalls at destruction. If a Thread object is
+// already associated with the current OS thread, it is temporarily
+// disassociated and restored by the destructor.
+
+class AutoSocketServerThread : public Thread {
+ public:
+  explicit AutoSocketServerThread(SocketServer* ss);
+  ~AutoSocketServerThread() override;
+
+ private:
+  rtc::Thread* old_thread_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AutoSocketServerThread);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_THREAD_H_
diff --git a/rtc_base/thread_annotations.h b/rtc_base/thread_annotations.h
new file mode 100644
index 0000000..8569fab
--- /dev/null
+++ b/rtc_base/thread_annotations.h
@@ -0,0 +1,95 @@
+//
+// Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS.  All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+// Borrowed from
+// https://code.google.com/p/gperftools/source/browse/src/base/thread_annotations.h
+// but adapted for clang attributes instead of the gcc.
+//
+// This header file contains the macro definitions for thread safety
+// annotations that allow the developers to document the locking policies
+// of their multi-threaded code. The annotations can also help program
+// analysis tools to identify potential thread safety issues.
+
+#ifndef RTC_BASE_THREAD_ANNOTATIONS_H_
+#define RTC_BASE_THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__) && (!defined(SWIG))
+#define RTC_THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define RTC_THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
+#endif
+
+// Document if a shared variable/field needs to be protected by a lock.
+// GUARDED_BY allows the user to specify a particular lock that should be
+// held when accessing the annotated variable.
+#define RTC_GUARDED_BY(x) RTC_THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+
+// Document if the memory location pointed to by a pointer should be guarded
+// by a lock when dereferencing the pointer. Note that a pointer variable to a
+// shared memory location could itself be a shared variable. For example, if a
+// shared global pointer q, which is guarded by mu1, points to a shared memory
+// location that is guarded by mu2, q should be annotated as follows:
+//     int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
+#define RTC_PT_GUARDED_BY(x) RTC_THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+
+// Document the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+#define RTC_ACQUIRED_AFTER(x) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(x))
+#define RTC_ACQUIRED_BEFORE(x) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(x))
+
+// The following three annotations document the lock requirements for
+// functions/methods.
+
+// Document if a function expects certain locks to be held before it is called
+#define RTC_EXCLUSIVE_LOCKS_REQUIRED(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+#define RTC_SHARED_LOCKS_REQUIRED(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// Document the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (as google3's Mutex locks are
+// non-reentrant).
+#define RTC_LOCKS_EXCLUDED(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// Document the lock the annotated function returns without acquiring it.
+#define RTC_LOCK_RETURNED(x) RTC_THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// Document if a class/type is a lockable type (such as the Mutex class).
+#define RTC_LOCKABLE RTC_THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// Document if a class is a scoped lockable type (such as the MutexLock class).
+#define RTC_SCOPED_LOCKABLE RTC_THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// The following annotations specify lock and unlock primitives.
+#define RTC_EXCLUSIVE_LOCK_FUNCTION(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+#define RTC_SHARED_LOCK_FUNCTION(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+#define RTC_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define RTC_SHARED_TRYLOCK_FUNCTION(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+#define RTC_UNLOCK_FUNCTION(...) \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// An escape hatch for thread safety analysis to ignore the annotated function.
+#define RTC_NO_THREAD_SAFETY_ANALYSIS \
+  RTC_THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+#endif  // RTC_BASE_THREAD_ANNOTATIONS_H_
diff --git a/rtc_base/thread_annotations_unittest.cc b/rtc_base/thread_annotations_unittest.cc
new file mode 100644
index 0000000..d8a4af1
--- /dev/null
+++ b/rtc_base/thread_annotations_unittest.cc
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/thread_annotations.h"
+#include "test/gtest.h"
+
+namespace {
+
+class RTC_LOCKABLE Lock {
+ public:
+  void EnterWrite() const RTC_EXCLUSIVE_LOCK_FUNCTION() {}
+  void EnterRead() const RTC_SHARED_LOCK_FUNCTION() {}
+  bool TryEnterWrite() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+    return true;
+  }
+  bool TryEnterRead() const RTC_SHARED_TRYLOCK_FUNCTION(true) { return true; }
+  void Leave() const RTC_UNLOCK_FUNCTION() {}
+};
+
+class RTC_SCOPED_LOCKABLE ScopeLock {
+ public:
+  explicit ScopeLock(const Lock& lock) RTC_EXCLUSIVE_LOCK_FUNCTION(lock) {}
+  ~ScopeLock() RTC_UNLOCK_FUNCTION() {}
+};
+
+class ThreadSafe {
+ public:
+  ThreadSafe() {
+    pt_protected_by_lock_ = new int;
+  }
+
+  ~ThreadSafe() {
+    delete pt_protected_by_lock_;
+  }
+
+  void LockInOrder() {
+    beforelock_.EnterWrite();
+    lock_.EnterWrite();
+    pt_lock_.EnterWrite();
+
+    pt_lock_.Leave();
+    lock_.Leave();
+    beforelock_.Leave();
+  }
+
+  void UnprotectedFunction() RTC_LOCKS_EXCLUDED(lock_, pt_lock_) {
+    // Can access unprotected Value.
+    unprotected_ = 15;
+    // Can access pointers themself, but not data they point to.
+    int* tmp = pt_protected_by_lock_;
+    pt_protected_by_lock_ = tmp;
+  }
+
+  void ReadProtected() {
+    lock_.EnterRead();
+    unprotected_ = protected_by_lock_;
+    lock_.Leave();
+
+    if (pt_lock_.TryEnterRead()) {
+      unprotected_ = *pt_protected_by_lock_;
+      pt_lock_.Leave();
+    }
+  }
+
+  void WriteProtected() {
+    lock_.EnterWrite();
+    protected_by_lock_ = unprotected_;
+    lock_.Leave();
+
+    if (pt_lock_.TryEnterWrite()) {
+      *pt_protected_by_lock_ = unprotected_;
+      pt_lock_.Leave();
+    }
+  }
+
+  void CallReadProtectedFunction() {
+    lock_.EnterRead();
+    pt_lock_.EnterRead();
+    ReadProtectedFunction();
+    pt_lock_.Leave();
+    lock_.Leave();
+  }
+
+  void CallWriteProtectedFunction() {
+    ScopeLock scope_lock(GetLock());
+    ScopeLock pt_scope_lock(pt_lock_);
+    WriteProtectedFunction();
+  }
+
+ private:
+  void ReadProtectedFunction() RTC_SHARED_LOCKS_REQUIRED(lock_, pt_lock_) {
+    unprotected_ = protected_by_lock_;
+    unprotected_ = *pt_protected_by_lock_;
+  }
+
+  void WriteProtectedFunction() RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_, pt_lock_) {
+    int x = protected_by_lock_;
+    *pt_protected_by_lock_ = x;
+    protected_by_lock_ = unprotected_;
+  }
+
+  const Lock& GetLock() RTC_LOCK_RETURNED(lock_) { return lock_; }
+
+  Lock beforelock_ RTC_ACQUIRED_BEFORE(lock_);
+  Lock lock_;
+  Lock pt_lock_ RTC_ACQUIRED_AFTER(lock_);
+
+  int unprotected_ = 0;
+
+  int protected_by_lock_ RTC_GUARDED_BY(lock_) = 0;
+
+  int* pt_protected_by_lock_ RTC_PT_GUARDED_BY(pt_lock_);
+};
+
+}  // namespace
+
+TEST(ThreadAnnotationsTest, Test) {
+  // This test ensure thread annotations doesn't break compilation.
+  // Thus no run-time expectations.
+  ThreadSafe t;
+  t.LockInOrder();
+  t.UnprotectedFunction();
+  t.ReadProtected();
+  t.WriteProtected();
+  t.CallReadProtectedFunction();
+  t.CallWriteProtectedFunction();
+}
diff --git a/rtc_base/thread_checker.h b/rtc_base/thread_checker.h
new file mode 100644
index 0000000..cc03b8d
--- /dev/null
+++ b/rtc_base/thread_checker.h
@@ -0,0 +1,172 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/threading/thread_checker.h.
+
+#ifndef RTC_BASE_THREAD_CHECKER_H_
+#define RTC_BASE_THREAD_CHECKER_H_
+
+// Apart from debug builds, we also enable the thread checker in
+// builds with RTC_DCHECK_IS_ON so that trybots and waterfall bots
+// with this define will get the same level of thread checking as
+// debug bots.
+#define RTC_ENABLE_THREAD_CHECKER RTC_DCHECK_IS_ON
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/thread_checker_impl.h"
+
+namespace rtc {
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the ThreadChecker class to get the
+// right version for your build configuration.
+class ThreadCheckerDoNothing {
+ public:
+  bool CalledOnValidThread() const {
+    return true;
+  }
+
+  void DetachFromThread() {}
+};
+
+// ThreadChecker is a helper class used to help verify that some methods of a
+// class are called from the same thread. It provides identical functionality to
+// base::NonThreadSafe, but it is meant to be held as a member variable, rather
+// than inherited from base::NonThreadSafe.
+//
+// While inheriting from base::NonThreadSafe may give a clear indication about
+// the thread-safety of a class, it may also lead to violations of the style
+// guide with regard to multiple inheritance. The choice between having a
+// ThreadChecker member and inheriting from base::NonThreadSafe should be based
+// on whether:
+//  - Derived classes need to know the thread they belong to, as opposed to
+//    having that functionality fully encapsulated in the base class.
+//  - Derived classes should be able to reassign the base class to another
+//    thread, via DetachFromThread.
+//
+// If neither of these are true, then having a ThreadChecker member and calling
+// CalledOnValidThread is the preferable solution.
+//
+// Example:
+// class MyClass {
+//  public:
+//   void Foo() {
+//     RTC_DCHECK(thread_checker_.CalledOnValidThread());
+//     ... (do stuff) ...
+//   }
+//
+//  private:
+//   ThreadChecker thread_checker_;
+// }
+//
+// In Release mode, CalledOnValidThread will always return true.
+#if RTC_ENABLE_THREAD_CHECKER
+class RTC_LOCKABLE ThreadChecker : public ThreadCheckerImpl {};
+#else
+class RTC_LOCKABLE ThreadChecker : public ThreadCheckerDoNothing {};
+#endif  // RTC_ENABLE_THREAD_CHECKER
+
+#undef RTC_ENABLE_THREAD_CHECKER
+
+namespace internal {
+class RTC_SCOPED_LOCKABLE AnnounceOnThread {
+ public:
+  template <typename ThreadLikeObject>
+  explicit AnnounceOnThread(const ThreadLikeObject* thread_like_object)
+      RTC_EXCLUSIVE_LOCK_FUNCTION(thread_like_object) {}
+  ~AnnounceOnThread() RTC_UNLOCK_FUNCTION() {}
+
+  template<typename ThreadLikeObject>
+  static bool IsCurrent(const ThreadLikeObject* thread_like_object) {
+    return thread_like_object->IsCurrent();
+  }
+  static bool IsCurrent(const rtc::ThreadChecker* checker) {
+    return checker->CalledOnValidThread();
+  }
+
+ private:
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AnnounceOnThread);
+};
+
+}  // namespace internal
+}  // namespace rtc
+
+// RTC_RUN_ON/RTC_GUARDED_BY/RTC_DCHECK_RUN_ON macros allows to annotate
+// variables are accessed from same thread/task queue.
+// Using tools designed to check mutexes, it checks at compile time everywhere
+// variable is access, there is a run-time dcheck thread/task queue is correct.
+//
+// class ExampleThread {
+//  public:
+//   void NeedVar1() {
+//     RTC_DCHECK_RUN_ON(network_thread_);
+//     transport_->Send();
+//   }
+//
+//  private:
+//   rtc::Thread* network_thread_;
+//   int transport_ RTC_GUARDED_BY(network_thread_);
+// };
+//
+// class ExampleThreadChecker {
+//  public:
+//   int CalledFromPacer() RTC_RUN_ON(pacer_thread_checker_) {
+//     return var2_;
+//   }
+//
+//   void CallMeFromPacer() {
+//     RTC_DCHECK_RUN_ON(&pacer_thread_checker_)
+//        << "Should be called from pacer";
+//     CalledFromPacer();
+//   }
+//
+//  private:
+//   int pacer_var_ RTC_GUARDED_BY(pacer_thread_checker_);
+//   rtc::ThreadChecker pacer_thread_checker_;
+// };
+//
+// class TaskQueueExample {
+//  public:
+//   class Encoder {
+//    public:
+//     rtc::TaskQueue* Queue() { return encoder_queue_; }
+//     void Encode() {
+//       RTC_DCHECK_RUN_ON(encoder_queue_);
+//       DoSomething(var_);
+//     }
+//
+//    private:
+//     rtc::TaskQueue* const encoder_queue_;
+//     Frame var_ RTC_GUARDED_BY(encoder_queue_);
+//   };
+//
+//   void Encode() {
+//     // Will fail at runtime when DCHECK is enabled:
+//     // encoder_->Encode();
+//     // Will work:
+//     rtc::scoped_ref_ptr<Encoder> encoder = encoder_;
+//     encoder_->Queue()->PostTask([encoder] { encoder->Encode(); });
+//   }
+//
+//  private:
+//   rtc::scoped_ref_ptr<Encoder> encoder_;
+// }
+
+// Document if a function expected to be called from same thread/task queue.
+#define RTC_RUN_ON(x) RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
+
+#define RTC_DCHECK_RUN_ON(thread_like_object) \
+  rtc::internal::AnnounceOnThread thread_announcer(thread_like_object); \
+  RTC_DCHECK(rtc::internal::AnnounceOnThread::IsCurrent(thread_like_object))
+
+#endif  // RTC_BASE_THREAD_CHECKER_H_
diff --git a/rtc_base/thread_checker_impl.cc b/rtc_base/thread_checker_impl.cc
new file mode 100644
index 0000000..6ec5c91
--- /dev/null
+++ b/rtc_base/thread_checker_impl.cc
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/threading/thread_checker_impl.cc.
+
+#include "rtc_base/thread_checker_impl.h"
+
+#include "rtc_base/platform_thread.h"
+
+namespace rtc {
+
+ThreadCheckerImpl::ThreadCheckerImpl() : valid_thread_(CurrentThreadRef()) {
+}
+
+ThreadCheckerImpl::~ThreadCheckerImpl() {
+}
+
+bool ThreadCheckerImpl::CalledOnValidThread() const {
+  const PlatformThreadRef current_thread = CurrentThreadRef();
+  CritScope scoped_lock(&lock_);
+  if (!valid_thread_)  // Set if previously detached.
+    valid_thread_ = current_thread;
+  return IsThreadRefEqual(valid_thread_, current_thread);
+}
+
+void ThreadCheckerImpl::DetachFromThread() {
+  CritScope scoped_lock(&lock_);
+  valid_thread_ = 0;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/thread_checker_impl.h b/rtc_base/thread_checker_impl.h
new file mode 100644
index 0000000..c82fe1d
--- /dev/null
+++ b/rtc_base/thread_checker_impl.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/threading/thread_checker_impl.h.
+
+#ifndef RTC_BASE_THREAD_CHECKER_IMPL_H_
+#define RTC_BASE_THREAD_CHECKER_IMPL_H_
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/platform_thread_types.h"
+
+namespace rtc {
+
+// Real implementation of ThreadChecker, for use in debug mode, or
+// for temporary use in release mode (e.g. to RTC_CHECK on a threading issue
+// seen only in the wild).
+//
+// Note: You should almost always use the ThreadChecker class to get the
+// right version for your build configuration.
+class ThreadCheckerImpl {
+ public:
+  ThreadCheckerImpl();
+  ~ThreadCheckerImpl();
+
+  bool CalledOnValidThread() const;
+
+  // Changes the thread that is checked for in CalledOnValidThread.  This may
+  // be useful when an object may be created on one thread and then used
+  // exclusively on another thread.
+  void DetachFromThread();
+
+ private:
+  CriticalSection lock_;
+  // This is mutable so that CalledOnValidThread can set it.
+  // It's guarded by |lock_|.
+  mutable PlatformThreadRef valid_thread_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_THREAD_CHECKER_IMPL_H_
diff --git a/rtc_base/thread_checker_unittest.cc b/rtc_base/thread_checker_unittest.cc
new file mode 100644
index 0000000..ba5ac9e
--- /dev/null
+++ b/rtc_base/thread_checker_unittest.cc
@@ -0,0 +1,259 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Borrowed from Chromium's src/base/threading/thread_checker_unittest.cc.
+
+#include <memory>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_checker.h"
+#include "test/gtest.h"
+
+// Duplicated from base/threading/thread_checker.h so that we can be
+// good citizens there and undef the macro.
+#define ENABLE_THREAD_CHECKER RTC_DCHECK_IS_ON
+
+namespace rtc {
+
+namespace {
+
+// Simple class to exercise the basics of ThreadChecker.
+// Both the destructor and DoStuff should verify that they were
+// called on the same thread as the constructor.
+class ThreadCheckerClass : public ThreadChecker {
+ public:
+  ThreadCheckerClass() {}
+
+  // Verifies that it was called on the same thread as the constructor.
+  void DoStuff() { RTC_DCHECK(CalledOnValidThread()); }
+
+  void DetachFromThread() {
+    ThreadChecker::DetachFromThread();
+  }
+
+  static void MethodOnDifferentThreadImpl();
+  static void DetachThenCallFromDifferentThreadImpl();
+
+ private:
+  RTC_DISALLOW_COPY_AND_ASSIGN(ThreadCheckerClass);
+};
+
+// Calls ThreadCheckerClass::DoStuff on another thread.
+class CallDoStuffOnThread : public Thread {
+ public:
+  explicit CallDoStuffOnThread(ThreadCheckerClass* thread_checker_class)
+      : Thread(std::unique_ptr<SocketServer>(new rtc::NullSocketServer())),
+        thread_checker_class_(thread_checker_class) {
+    SetName("call_do_stuff_on_thread", nullptr);
+  }
+
+  void Run() override { thread_checker_class_->DoStuff(); }
+
+  // New method. Needed since Thread::Join is protected, and it is called by
+  // the TEST.
+  void Join() {
+    Thread::Join();
+  }
+
+ private:
+  ThreadCheckerClass* thread_checker_class_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(CallDoStuffOnThread);
+};
+
+// Deletes ThreadCheckerClass on a different thread.
+class DeleteThreadCheckerClassOnThread : public Thread {
+ public:
+  explicit DeleteThreadCheckerClassOnThread(
+      std::unique_ptr<ThreadCheckerClass> thread_checker_class)
+      : Thread(std::unique_ptr<SocketServer>(new rtc::NullSocketServer())),
+        thread_checker_class_(std::move(thread_checker_class)) {
+    SetName("delete_thread_checker_class_on_thread", nullptr);
+  }
+
+  void Run() override { thread_checker_class_.reset(); }
+
+  // New method. Needed since Thread::Join is protected, and it is called by
+  // the TEST.
+  void Join() {
+    Thread::Join();
+  }
+
+  bool has_been_deleted() const { return !thread_checker_class_; }
+
+ private:
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread);
+};
+
+}  // namespace
+
+TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // Verify that DoStuff doesn't assert.
+  thread_checker_class->DoStuff();
+
+  // Verify that the destructor doesn't assert.
+  thread_checker_class.reset();
+}
+
+TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // Verify that the destructor doesn't assert
+  // when called on a different thread.
+  DeleteThreadCheckerClassOnThread delete_on_thread(
+      std::move(thread_checker_class));
+
+  EXPECT_FALSE(delete_on_thread.has_been_deleted());
+
+  delete_on_thread.Start();
+  delete_on_thread.Join();
+
+  EXPECT_TRUE(delete_on_thread.has_been_deleted());
+}
+
+TEST(ThreadCheckerTest, DetachFromThread) {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // Verify that DoStuff doesn't assert when called on a different thread after
+  // a call to DetachFromThread.
+  thread_checker_class->DetachFromThread();
+  CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+  call_on_thread.Start();
+  call_on_thread.Join();
+}
+
+#if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+
+void ThreadCheckerClass::MethodOnDifferentThreadImpl() {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // DoStuff should assert in debug builds only when called on a
+  // different thread.
+  CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+  call_on_thread.Start();
+  call_on_thread.Join();
+}
+
+#if ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
+  ASSERT_DEATH({
+      ThreadCheckerClass::MethodOnDifferentThreadImpl();
+    }, "");
+}
+#else
+TEST(ThreadCheckerTest, MethodAllowedOnDifferentThreadInRelease) {
+  ThreadCheckerClass::MethodOnDifferentThreadImpl();
+}
+#endif  // ENABLE_THREAD_CHECKER
+
+void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // DoStuff doesn't assert when called on a different thread
+  // after a call to DetachFromThread.
+  thread_checker_class->DetachFromThread();
+  CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+  call_on_thread.Start();
+  call_on_thread.Join();
+
+  // DoStuff should assert in debug builds only after moving to
+  // another thread.
+  thread_checker_class->DoStuff();
+}
+
+#if ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerDeathTest, DetachFromThreadInDebug) {
+  ASSERT_DEATH({
+    ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+    }, "");
+}
+#else
+TEST(ThreadCheckerTest, DetachFromThreadInRelease) {
+  ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+}
+#endif  // ENABLE_THREAD_CHECKER
+
+#endif  // GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+
+class ThreadAnnotateTest {
+ public:
+  // Next two function should create warnings when compile (e.g. if used with
+  // specific T).
+  // TODO(danilchap): Find a way to test they do not compile when thread
+  // annotation checks enabled.
+  template<typename T>
+  void access_var_no_annotate() {
+    var_thread_ = 42;
+  }
+
+  template<typename T>
+  void access_fun_no_annotate() {
+    function();
+  }
+
+  // Functions below should be able to compile.
+  void access_var_annotate_thread() {
+    RTC_DCHECK_RUN_ON(thread_);
+    var_thread_ = 42;
+  }
+
+  void access_var_annotate_checker() {
+    RTC_DCHECK_RUN_ON(&checker_);
+    var_checker_ = 44;
+  }
+
+  void access_var_annotate_queue() {
+    RTC_DCHECK_RUN_ON(queue_);
+    var_queue_ = 46;
+  }
+
+  void access_fun_annotate() {
+    RTC_DCHECK_RUN_ON(thread_);
+    function();
+  }
+
+  void access_fun_and_var() {
+    RTC_DCHECK_RUN_ON(thread_);
+    fun_acccess_var();
+  }
+
+ private:
+  void function() RTC_RUN_ON(thread_) {}
+  void fun_acccess_var() RTC_RUN_ON(thread_) { var_thread_ = 13; }
+
+  rtc::Thread* thread_;
+  rtc::ThreadChecker checker_;
+  rtc::TaskQueue* queue_;
+
+  int var_thread_ RTC_GUARDED_BY(thread_);
+  int var_checker_ RTC_GUARDED_BY(checker_);
+  int var_queue_ RTC_GUARDED_BY(queue_);
+};
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_THREAD_CHECKER
+
+}  // namespace rtc
diff --git a/rtc_base/thread_darwin.mm b/rtc_base/thread_darwin.mm
new file mode 100644
index 0000000..a404849
--- /dev/null
+++ b/rtc_base/thread_darwin.mm
@@ -0,0 +1,84 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/thread.h"
+
+#import <Foundation/Foundation.h>
+
+#include "rtc_base/platform_thread.h"
+
+/*
+ * This file contains platform-specific implementations for several
+ * methods in rtc::Thread.
+ */
+
+namespace {
+void InitCocoaMultiThreading() {
+  if ([NSThread isMultiThreaded] == NO) {
+    // The sole purpose of this autorelease pool is to avoid a console
+    // message on Leopard that tells us we're autoreleasing the thread
+    // with no autorelease pool in place.
+    @autoreleasepool {
+      [NSThread detachNewThreadSelector:@selector(class)
+                               toTarget:[NSObject class]
+                             withObject:nil];
+    }
+  }
+
+  RTC_DCHECK([NSThread isMultiThreaded]);
+}
+}
+
+namespace rtc {
+
+ThreadManager::ThreadManager() : main_thread_ref_(CurrentThreadRef()) {
+  pthread_key_create(&key_, nullptr);
+  // This is necessary to alert the cocoa runtime of the fact that
+  // we are running in a multithreaded environment.
+  InitCocoaMultiThreading();
+}
+
+// static
+void* Thread::PreRun(void* pv) {
+  ThreadInit* init = static_cast<ThreadInit*>(pv);
+  ThreadManager::Instance()->SetCurrentThread(init->thread);
+  rtc::SetCurrentThreadName(init->thread->name_.c_str());
+  @autoreleasepool {
+    if (init->runnable) {
+      init->runnable->Run(init->thread);
+    } else {
+      init->thread->Run();
+    }
+  }
+  ThreadManager::Instance()->SetCurrentThread(nullptr);
+  delete init;
+  return nullptr;
+}
+
+bool Thread::ProcessMessages(int cmsLoop) {
+  int64_t msEnd = (kForever == cmsLoop) ? 0 : TimeAfter(cmsLoop);
+  int cmsNext = cmsLoop;
+
+  while (true) {
+    @autoreleasepool {
+      Message msg;
+      if (!Get(&msg, cmsNext))
+        return !IsQuitting();
+      Dispatch(&msg);
+
+      if (cmsLoop != kForever) {
+        cmsNext = static_cast<int>(TimeUntil(msEnd));
+        if (cmsNext < 0)
+          return true;
+      }
+    }
+  }
+}
+}  // namespace rtc
diff --git a/rtc_base/thread_unittest.cc b/rtc_base/thread_unittest.cc
new file mode 100644
index 0000000..01022e9
--- /dev/null
+++ b/rtc_base/thread_unittest.cc
@@ -0,0 +1,670 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "rtc_base/asyncinvoker.h"
+#include "rtc_base/asyncudpsocket.h"
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/nullsocketserver.h"
+#include "rtc_base/physicalsocketserver.h"
+#include "rtc_base/sigslot.h"
+#include "rtc_base/socketaddress.h"
+#include "rtc_base/thread.h"
+
+#if defined(WEBRTC_WIN)
+#include <comdef.h>  // NOLINT
+#endif
+
+using namespace rtc;
+
+// Generates a sequence of numbers (collaboratively).
+class TestGenerator {
+ public:
+  TestGenerator() : last(0), count(0) {}
+
+  int Next(int prev) {
+    int result = prev + last;
+    last = result;
+    count += 1;
+    return result;
+  }
+
+  int last;
+  int count;
+};
+
+struct TestMessage : public MessageData {
+  explicit TestMessage(int v) : value(v) {}
+
+  int value;
+};
+
+// Receives on a socket and sends by posting messages.
+class SocketClient : public TestGenerator, public sigslot::has_slots<> {
+ public:
+  SocketClient(AsyncSocket* socket, const SocketAddress& addr,
+               Thread* post_thread, MessageHandler* phandler)
+      : socket_(AsyncUDPSocket::Create(socket, addr)),
+        post_thread_(post_thread),
+        post_handler_(phandler) {
+    socket_->SignalReadPacket.connect(this, &SocketClient::OnPacket);
+  }
+
+  ~SocketClient() override { delete socket_; }
+
+  SocketAddress address() const { return socket_->GetLocalAddress(); }
+
+  void OnPacket(AsyncPacketSocket* socket, const char* buf, size_t size,
+                const SocketAddress& remote_addr,
+                const PacketTime& packet_time) {
+    EXPECT_EQ(size, sizeof(uint32_t));
+    uint32_t prev = reinterpret_cast<const uint32_t*>(buf)[0];
+    uint32_t result = Next(prev);
+
+    post_thread_->PostDelayed(RTC_FROM_HERE, 200, post_handler_, 0,
+                              new TestMessage(result));
+  }
+
+ private:
+  AsyncUDPSocket* socket_;
+  Thread* post_thread_;
+  MessageHandler* post_handler_;
+};
+
+// Receives messages and sends on a socket.
+class MessageClient : public MessageHandler, public TestGenerator {
+ public:
+  MessageClient(Thread* pth, Socket* socket)
+      : socket_(socket) {
+  }
+
+  ~MessageClient() override { delete socket_; }
+
+  void OnMessage(Message* pmsg) override {
+    TestMessage* msg = static_cast<TestMessage*>(pmsg->pdata);
+    int result = Next(msg->value);
+    EXPECT_GE(socket_->Send(&result, sizeof(result)), 0);
+    delete msg;
+  }
+
+ private:
+  Socket* socket_;
+};
+
+class CustomThread : public rtc::Thread {
+ public:
+  CustomThread()
+      : Thread(std::unique_ptr<SocketServer>(new rtc::NullSocketServer())) {}
+  ~CustomThread() override { Stop(); }
+  bool Start() { return false; }
+
+  bool WrapCurrent() {
+    return Thread::WrapCurrent();
+  }
+  void UnwrapCurrent() {
+    Thread::UnwrapCurrent();
+  }
+};
+
+
+// A thread that does nothing when it runs and signals an event
+// when it is destroyed.
+class SignalWhenDestroyedThread : public Thread {
+ public:
+  SignalWhenDestroyedThread(Event* event)
+      : Thread(std::unique_ptr<SocketServer>(new NullSocketServer())),
+        event_(event) {}
+
+  ~SignalWhenDestroyedThread() override {
+    Stop();
+    event_->Set();
+  }
+
+  void Run() override {
+    // Do nothing.
+  }
+
+ private:
+  Event* event_;
+};
+
+// A bool wrapped in a mutex, to avoid data races. Using a volatile
+// bool should be sufficient for correct code ("eventual consistency"
+// between caches is sufficient), but we can't tell the compiler about
+// that, and then tsan complains about a data race.
+
+// See also discussion at
+// http://stackoverflow.com/questions/7223164/is-mutex-needed-to-synchronize-a-simple-flag-between-pthreads
+
+// Using std::atomic<bool> or std::atomic_flag in C++11 is probably
+// the right thing to do, but those features are not yet allowed. Or
+// rtc::AtomicInt, if/when that is added. Since the use isn't
+// performance critical, use a plain critical section for the time
+// being.
+
+class AtomicBool {
+ public:
+  explicit AtomicBool(bool value = false) : flag_(value) {}
+  AtomicBool& operator=(bool value) {
+    CritScope scoped_lock(&cs_);
+    flag_ = value;
+    return *this;
+  }
+  bool get() const {
+    CritScope scoped_lock(&cs_);
+    return flag_;
+  }
+
+ private:
+  CriticalSection cs_;
+  bool flag_;
+};
+
+// Function objects to test Thread::Invoke.
+struct FunctorA {
+  int operator()() { return 42; }
+};
+class FunctorB {
+ public:
+  explicit FunctorB(AtomicBool* flag) : flag_(flag) {}
+  void operator()() { if (flag_) *flag_ = true; }
+ private:
+  AtomicBool* flag_;
+};
+struct FunctorC {
+  int operator()() {
+    Thread::Current()->ProcessMessages(50);
+    return 24;
+  }
+};
+struct FunctorD {
+ public:
+  explicit FunctorD(AtomicBool* flag) : flag_(flag) {}
+  FunctorD(FunctorD&&) = default;
+  FunctorD& operator=(FunctorD&&) = default;
+  void operator()() { if (flag_) *flag_ = true; }
+ private:
+  AtomicBool* flag_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(FunctorD);
+};
+
+// See: https://code.google.com/p/webrtc/issues/detail?id=2409
+TEST(ThreadTest, DISABLED_Main) {
+  const SocketAddress addr("127.0.0.1", 0);
+
+  // Create the messaging client on its own thread.
+  auto th1 = Thread::CreateWithSocketServer();
+  Socket* socket =
+      th1->socketserver()->CreateAsyncSocket(addr.family(), SOCK_DGRAM);
+  MessageClient msg_client(th1.get(), socket);
+
+  // Create the socket client on its own thread.
+  auto th2 = Thread::CreateWithSocketServer();
+  AsyncSocket* asocket =
+      th2->socketserver()->CreateAsyncSocket(addr.family(), SOCK_DGRAM);
+  SocketClient sock_client(asocket, addr, th1.get(), &msg_client);
+
+  socket->Connect(sock_client.address());
+
+  th1->Start();
+  th2->Start();
+
+  // Get the messages started.
+  th1->PostDelayed(RTC_FROM_HERE, 100, &msg_client, 0, new TestMessage(1));
+
+  // Give the clients a little while to run.
+  // Messages will be processed at 100, 300, 500, 700, 900.
+  Thread* th_main = Thread::Current();
+  th_main->ProcessMessages(1000);
+
+  // Stop the sending client. Give the receiver a bit longer to run, in case
+  // it is running on a machine that is under load (e.g. the build machine).
+  th1->Stop();
+  th_main->ProcessMessages(200);
+  th2->Stop();
+
+  // Make sure the results were correct
+  EXPECT_EQ(5, msg_client.count);
+  EXPECT_EQ(34, msg_client.last);
+  EXPECT_EQ(5, sock_client.count);
+  EXPECT_EQ(55, sock_client.last);
+}
+
+// Test that setting thread names doesn't cause a malfunction.
+// There's no easy way to verify the name was set properly at this time.
+TEST(ThreadTest, Names) {
+  // Default name
+  auto thread = Thread::CreateWithSocketServer();
+  EXPECT_TRUE(thread->Start());
+  thread->Stop();
+  // Name with no object parameter
+  thread = Thread::CreateWithSocketServer();
+  EXPECT_TRUE(thread->SetName("No object", nullptr));
+  EXPECT_TRUE(thread->Start());
+  thread->Stop();
+  // Really long name
+  thread = Thread::CreateWithSocketServer();
+  EXPECT_TRUE(thread->SetName("Abcdefghijklmnopqrstuvwxyz1234567890", this));
+  EXPECT_TRUE(thread->Start());
+  thread->Stop();
+}
+
+TEST(ThreadTest, Wrap) {
+  Thread* current_thread = Thread::Current();
+  current_thread->UnwrapCurrent();
+  CustomThread* cthread = new CustomThread();
+  EXPECT_TRUE(cthread->WrapCurrent());
+  EXPECT_TRUE(cthread->RunningForTest());
+  EXPECT_FALSE(cthread->IsOwned());
+  cthread->UnwrapCurrent();
+  EXPECT_FALSE(cthread->RunningForTest());
+  delete cthread;
+  current_thread->WrapCurrent();
+}
+
+TEST(ThreadTest, Invoke) {
+  // Create and start the thread.
+  auto thread = Thread::CreateWithSocketServer();
+  thread->Start();
+  // Try calling functors.
+  EXPECT_EQ(42, thread->Invoke<int>(RTC_FROM_HERE, FunctorA()));
+  AtomicBool called;
+  FunctorB f2(&called);
+  thread->Invoke<void>(RTC_FROM_HERE, f2);
+  EXPECT_TRUE(called.get());
+  // Try calling bare functions.
+  struct LocalFuncs {
+    static int Func1() { return 999; }
+    static void Func2() {}
+  };
+  EXPECT_EQ(999, thread->Invoke<int>(RTC_FROM_HERE, &LocalFuncs::Func1));
+  thread->Invoke<void>(RTC_FROM_HERE, &LocalFuncs::Func2);
+}
+
+// Verifies that two threads calling Invoke on each other at the same time does
+// not deadlock.
+TEST(ThreadTest, TwoThreadsInvokeNoDeadlock) {
+  AutoThread thread;
+  Thread* current_thread = Thread::Current();
+  ASSERT_TRUE(current_thread != nullptr);
+
+  auto other_thread = Thread::CreateWithSocketServer();
+  other_thread->Start();
+
+  struct LocalFuncs {
+    static void Set(bool* out) { *out = true; }
+    static void InvokeSet(Thread* thread, bool* out) {
+      thread->Invoke<void>(RTC_FROM_HERE, Bind(&Set, out));
+    }
+  };
+
+  bool called = false;
+  other_thread->Invoke<void>(
+      RTC_FROM_HERE, Bind(&LocalFuncs::InvokeSet, current_thread, &called));
+
+  EXPECT_TRUE(called);
+}
+
+// Verifies that if thread A invokes a call on thread B and thread C is trying
+// to invoke A at the same time, thread A does not handle C's invoke while
+// invoking B.
+TEST(ThreadTest, ThreeThreadsInvoke) {
+  AutoThread thread;
+  Thread* thread_a = Thread::Current();
+  auto thread_b = Thread::CreateWithSocketServer();
+  auto thread_c = Thread::CreateWithSocketServer();
+  thread_b->Start();
+  thread_c->Start();
+
+  class LockedBool {
+   public:
+    explicit LockedBool(bool value) : value_(value) {}
+
+    void Set(bool value) {
+      CritScope lock(&crit_);
+      value_ = value;
+    }
+
+    bool Get() {
+      CritScope lock(&crit_);
+      return value_;
+    }
+
+   private:
+    CriticalSection crit_;
+    bool value_ RTC_GUARDED_BY(crit_);
+  };
+
+  struct LocalFuncs {
+    static void Set(LockedBool* out) { out->Set(true); }
+    static void InvokeSet(Thread* thread, LockedBool* out) {
+      thread->Invoke<void>(RTC_FROM_HERE, Bind(&Set, out));
+    }
+
+    // Set |out| true and call InvokeSet on |thread|.
+    static void SetAndInvokeSet(LockedBool* out,
+                                Thread* thread,
+                                LockedBool* out_inner) {
+      out->Set(true);
+      InvokeSet(thread, out_inner);
+    }
+
+    // Asynchronously invoke SetAndInvokeSet on |thread1| and wait until
+    // |thread1| starts the call.
+    static void AsyncInvokeSetAndWait(AsyncInvoker* invoker,
+                                      Thread* thread1,
+                                      Thread* thread2,
+                                      LockedBool* out) {
+      CriticalSection crit;
+      LockedBool async_invoked(false);
+
+      invoker->AsyncInvoke<void>(
+          RTC_FROM_HERE, thread1,
+          Bind(&SetAndInvokeSet, &async_invoked, thread2, out));
+
+      EXPECT_TRUE_WAIT(async_invoked.Get(), 2000);
+    }
+  };
+
+  AsyncInvoker invoker;
+  LockedBool thread_a_called(false);
+
+  // Start the sequence A --(invoke)--> B --(async invoke)--> C --(invoke)--> A.
+  // Thread B returns when C receives the call and C should be blocked until A
+  // starts to process messages.
+  thread_b->Invoke<void>(RTC_FROM_HERE,
+                         Bind(&LocalFuncs::AsyncInvokeSetAndWait, &invoker,
+                              thread_c.get(), thread_a, &thread_a_called));
+  EXPECT_FALSE(thread_a_called.Get());
+
+  EXPECT_TRUE_WAIT(thread_a_called.Get(), 2000);
+}
+
+// Set the name on a thread when the underlying QueueDestroyed signal is
+// triggered. This causes an error if the object is already partially
+// destroyed.
+class SetNameOnSignalQueueDestroyedTester : public sigslot::has_slots<> {
+ public:
+  SetNameOnSignalQueueDestroyedTester(Thread* thread) : thread_(thread) {
+    thread->SignalQueueDestroyed.connect(
+        this, &SetNameOnSignalQueueDestroyedTester::OnQueueDestroyed);
+  }
+
+  void OnQueueDestroyed() {
+    // Makes sure that if we access the Thread while it's being destroyed, that
+    // it doesn't cause a problem because the vtable has been modified.
+    thread_->SetName("foo", nullptr);
+  }
+
+ private:
+  Thread* thread_;
+};
+
+TEST(ThreadTest, SetNameOnSignalQueueDestroyed) {
+  auto thread1 = Thread::CreateWithSocketServer();
+  SetNameOnSignalQueueDestroyedTester tester1(thread1.get());
+  thread1.reset();
+
+  Thread* thread2 = new AutoThread();
+  SetNameOnSignalQueueDestroyedTester tester2(thread2);
+  delete thread2;
+}
+
+class AsyncInvokeTest : public testing::Test {
+ public:
+  void IntCallback(int value) {
+    EXPECT_EQ(expected_thread_, Thread::Current());
+    int_value_ = value;
+  }
+  void SetExpectedThreadForIntCallback(Thread* thread) {
+    expected_thread_ = thread;
+  }
+
+ protected:
+  enum { kWaitTimeout = 1000 };
+  AsyncInvokeTest()
+      : int_value_(0),
+        expected_thread_(nullptr) {}
+
+  int int_value_;
+  Thread* expected_thread_;
+};
+
+TEST_F(AsyncInvokeTest, FireAndForget) {
+  AsyncInvoker invoker;
+  // Create and start the thread.
+  auto thread = Thread::CreateWithSocketServer();
+  thread->Start();
+  // Try calling functor.
+  AtomicBool called;
+  invoker.AsyncInvoke<void>(RTC_FROM_HERE, thread.get(), FunctorB(&called));
+  EXPECT_TRUE_WAIT(called.get(), kWaitTimeout);
+  thread->Stop();
+}
+
+TEST_F(AsyncInvokeTest, NonCopyableFunctor) {
+  AsyncInvoker invoker;
+  // Create and start the thread.
+  auto thread = Thread::CreateWithSocketServer();
+  thread->Start();
+  // Try calling functor.
+  AtomicBool called;
+  invoker.AsyncInvoke<void>(RTC_FROM_HERE, thread.get(), FunctorD(&called));
+  EXPECT_TRUE_WAIT(called.get(), kWaitTimeout);
+  thread->Stop();
+}
+
+TEST_F(AsyncInvokeTest, KillInvokerDuringExecute) {
+  // Use these events to get in a state where the functor is in the middle of
+  // executing, and then to wait for it to finish, ensuring the "EXPECT_FALSE"
+  // is run.
+  Event functor_started(false, false);
+  Event functor_continue(false, false);
+  Event functor_finished(false, false);
+
+  auto thread = Thread::CreateWithSocketServer();
+  thread->Start();
+  volatile bool invoker_destroyed = false;
+  {
+    auto functor = [&functor_started, &functor_continue, &functor_finished,
+                    &invoker_destroyed] {
+      functor_started.Set();
+      functor_continue.Wait(Event::kForever);
+      rtc::Thread::Current()->SleepMs(kWaitTimeout);
+      EXPECT_FALSE(invoker_destroyed);
+      functor_finished.Set();
+    };
+    AsyncInvoker invoker;
+    invoker.AsyncInvoke<void>(RTC_FROM_HERE, thread.get(), functor);
+    functor_started.Wait(Event::kForever);
+
+    // Destroy the invoker while the functor is still executing (doing
+    // SleepMs).
+    functor_continue.Set();
+  }
+
+  // If the destructor DIDN'T wait for the functor to finish executing, it will
+  // hit the EXPECT_FALSE(invoker_destroyed) after it finishes sleeping for a
+  // second.
+  invoker_destroyed = true;
+  functor_finished.Wait(Event::kForever);
+}
+
+// Variant of the above test where the async-invoked task calls AsyncInvoke
+// *again*, for the thread on which the AsyncInvoker is currently being
+// destroyed. This shouldn't deadlock or crash; this second invocation should
+// just be ignored.
+TEST_F(AsyncInvokeTest, KillInvokerDuringExecuteWithReentrantInvoke) {
+  Event functor_started(false, false);
+  // Flag used to verify that the recursively invoked task never actually runs.
+  bool reentrant_functor_run = false;
+
+  Thread* main = Thread::Current();
+  Thread thread;
+  thread.Start();
+  {
+    AsyncInvoker invoker;
+    auto reentrant_functor = [&reentrant_functor_run] {
+      reentrant_functor_run = true;
+    };
+    auto functor = [&functor_started, &invoker, main, reentrant_functor] {
+      functor_started.Set();
+      Thread::Current()->SleepMs(kWaitTimeout);
+      invoker.AsyncInvoke<void>(RTC_FROM_HERE, main, reentrant_functor);
+    };
+    // This queues a task on |thread| to sleep for |kWaitTimeout| then queue a
+    // task on |main|. But this second queued task should never run, since the
+    // destructor will be entered before it's even invoked.
+    invoker.AsyncInvoke<void>(RTC_FROM_HERE, &thread, functor);
+    functor_started.Wait(Event::kForever);
+  }
+  EXPECT_FALSE(reentrant_functor_run);
+}
+
+TEST_F(AsyncInvokeTest, Flush) {
+  AsyncInvoker invoker;
+  AtomicBool flag1;
+  AtomicBool flag2;
+  // Queue two async calls to the current thread.
+  invoker.AsyncInvoke<void>(RTC_FROM_HERE, Thread::Current(), FunctorB(&flag1));
+  invoker.AsyncInvoke<void>(RTC_FROM_HERE, Thread::Current(), FunctorB(&flag2));
+  // Because we haven't pumped messages, these should not have run yet.
+  EXPECT_FALSE(flag1.get());
+  EXPECT_FALSE(flag2.get());
+  // Force them to run now.
+  invoker.Flush(Thread::Current());
+  EXPECT_TRUE(flag1.get());
+  EXPECT_TRUE(flag2.get());
+}
+
+TEST_F(AsyncInvokeTest, FlushWithIds) {
+  AsyncInvoker invoker;
+  AtomicBool flag1;
+  AtomicBool flag2;
+  // Queue two async calls to the current thread, one with a message id.
+  invoker.AsyncInvoke<void>(RTC_FROM_HERE, Thread::Current(), FunctorB(&flag1),
+                            5);
+  invoker.AsyncInvoke<void>(RTC_FROM_HERE, Thread::Current(), FunctorB(&flag2));
+  // Because we haven't pumped messages, these should not have run yet.
+  EXPECT_FALSE(flag1.get());
+  EXPECT_FALSE(flag2.get());
+  // Execute pending calls with id == 5.
+  invoker.Flush(Thread::Current(), 5);
+  EXPECT_TRUE(flag1.get());
+  EXPECT_FALSE(flag2.get());
+  flag1 = false;
+  // Execute all pending calls. The id == 5 call should not execute again.
+  invoker.Flush(Thread::Current());
+  EXPECT_FALSE(flag1.get());
+  EXPECT_TRUE(flag2.get());
+}
+
+class GuardedAsyncInvokeTest : public testing::Test {
+ public:
+  void IntCallback(int value) {
+    EXPECT_EQ(expected_thread_, Thread::Current());
+    int_value_ = value;
+  }
+  void SetExpectedThreadForIntCallback(Thread* thread) {
+    expected_thread_ = thread;
+  }
+
+ protected:
+  const static int kWaitTimeout = 1000;
+  GuardedAsyncInvokeTest()
+      : int_value_(0),
+        expected_thread_(nullptr) {}
+
+  int int_value_;
+  Thread* expected_thread_;
+};
+
+// Functor for creating an invoker.
+struct CreateInvoker {
+  CreateInvoker(std::unique_ptr<GuardedAsyncInvoker>* invoker)
+      : invoker_(invoker) {}
+  void operator()() { invoker_->reset(new GuardedAsyncInvoker()); }
+  std::unique_ptr<GuardedAsyncInvoker>* invoker_;
+};
+
+// Test that we can call AsyncInvoke<void>() after the thread died.
+TEST_F(GuardedAsyncInvokeTest, KillThreadFireAndForget) {
+  // Create and start the thread.
+  std::unique_ptr<Thread> thread(Thread::Create());
+  thread->Start();
+  std::unique_ptr<GuardedAsyncInvoker> invoker;
+  // Create the invoker on |thread|.
+  thread->Invoke<void>(RTC_FROM_HERE, CreateInvoker(&invoker));
+  // Kill |thread|.
+  thread = nullptr;
+  // Try calling functor.
+  AtomicBool called;
+  EXPECT_FALSE(invoker->AsyncInvoke<void>(RTC_FROM_HERE, FunctorB(&called)));
+  // With thread gone, nothing should happen.
+  WAIT(called.get(), kWaitTimeout);
+  EXPECT_FALSE(called.get());
+}
+
+// The remaining tests check that GuardedAsyncInvoker behaves as AsyncInvoker
+// when Thread is still alive.
+TEST_F(GuardedAsyncInvokeTest, FireAndForget) {
+  GuardedAsyncInvoker invoker;
+  // Try calling functor.
+  AtomicBool called;
+  EXPECT_TRUE(invoker.AsyncInvoke<void>(RTC_FROM_HERE, FunctorB(&called)));
+  EXPECT_TRUE_WAIT(called.get(), kWaitTimeout);
+}
+
+TEST_F(GuardedAsyncInvokeTest, NonCopyableFunctor) {
+  GuardedAsyncInvoker invoker;
+  // Try calling functor.
+  AtomicBool called;
+  EXPECT_TRUE(invoker.AsyncInvoke<void>(RTC_FROM_HERE, FunctorD(&called)));
+  EXPECT_TRUE_WAIT(called.get(), kWaitTimeout);
+}
+
+TEST_F(GuardedAsyncInvokeTest, Flush) {
+  GuardedAsyncInvoker invoker;
+  AtomicBool flag1;
+  AtomicBool flag2;
+  // Queue two async calls to the current thread.
+  EXPECT_TRUE(invoker.AsyncInvoke<void>(RTC_FROM_HERE, FunctorB(&flag1)));
+  EXPECT_TRUE(invoker.AsyncInvoke<void>(RTC_FROM_HERE, FunctorB(&flag2)));
+  // Because we haven't pumped messages, these should not have run yet.
+  EXPECT_FALSE(flag1.get());
+  EXPECT_FALSE(flag2.get());
+  // Force them to run now.
+  EXPECT_TRUE(invoker.Flush());
+  EXPECT_TRUE(flag1.get());
+  EXPECT_TRUE(flag2.get());
+}
+
+TEST_F(GuardedAsyncInvokeTest, FlushWithIds) {
+  GuardedAsyncInvoker invoker;
+  AtomicBool flag1;
+  AtomicBool flag2;
+  // Queue two async calls to the current thread, one with a message id.
+  EXPECT_TRUE(invoker.AsyncInvoke<void>(RTC_FROM_HERE, FunctorB(&flag1), 5));
+  EXPECT_TRUE(invoker.AsyncInvoke<void>(RTC_FROM_HERE, FunctorB(&flag2)));
+  // Because we haven't pumped messages, these should not have run yet.
+  EXPECT_FALSE(flag1.get());
+  EXPECT_FALSE(flag2.get());
+  // Execute pending calls with id == 5.
+  EXPECT_TRUE(invoker.Flush(5));
+  EXPECT_TRUE(flag1.get());
+  EXPECT_FALSE(flag2.get());
+  flag1 = false;
+  // Execute all pending calls. The id == 5 call should not execute again.
+  EXPECT_TRUE(invoker.Flush());
+  EXPECT_FALSE(flag1.get());
+  EXPECT_TRUE(flag2.get());
+}
diff --git a/rtc_base/timedelta.h b/rtc_base/timedelta.h
new file mode 100644
index 0000000..c8dcf03
--- /dev/null
+++ b/rtc_base/timedelta.h
@@ -0,0 +1,129 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TIMEDELTA_H_
+#define RTC_BASE_TIMEDELTA_H_
+
+#include <stdint.h>
+
+#include "rtc_base/timeutils.h"
+
+// Convenience class to convert between different units of relative time.
+// Stores time to precision of nanoseconds, as int64_t internally.
+// Doesn't check for overflow/underflow.
+//
+// Based on TimeDelta in:
+// https://code.google.com/p/chromium/codesearch#chromium/src/base/time/time.h
+namespace rtc {
+
+class TimeDelta {
+ public:
+  TimeDelta() : delta_(0) {}
+
+  // Converts units of time to TimeDeltas.
+  static constexpr TimeDelta FromSeconds(int64_t secs) {
+    return TimeDelta(secs * kNumNanosecsPerSec);
+  }
+  static constexpr TimeDelta FromMilliseconds(int64_t ms) {
+    return TimeDelta(ms * kNumNanosecsPerMillisec);
+  }
+  static constexpr TimeDelta FromMicroseconds(int64_t us) {
+    return TimeDelta(us * kNumNanosecsPerMicrosec);
+  }
+  static constexpr TimeDelta FromNanoseconds(int64_t ns) {
+    return TimeDelta(ns);
+  }
+
+  // Returns true if the time delta is zero.
+  bool is_zero() const { return delta_ == 0; }
+
+  // Converts TimeDelta to units of time.
+  int64_t ToSeconds() const { return delta_ / kNumNanosecsPerSec; }
+  int64_t ToMilliseconds() const { return delta_ / kNumNanosecsPerMillisec; }
+  int64_t ToMicroseconds() const { return delta_ / kNumNanosecsPerMicrosec; }
+  int64_t ToNanoseconds() const { return delta_; }
+
+  TimeDelta& operator=(TimeDelta other) {
+    delta_ = other.delta_;
+    return *this;
+  }
+
+  // Computations with other deltas.
+  TimeDelta operator+(TimeDelta other) const {
+    return TimeDelta(delta_ + other.delta_);
+  }
+  TimeDelta operator-(TimeDelta other) const {
+    return TimeDelta(delta_ + other.delta_);
+  }
+
+  TimeDelta& operator+=(TimeDelta other) { return *this = (*this + other); }
+  TimeDelta& operator-=(TimeDelta other) { return *this = (*this - other); }
+  TimeDelta operator-() const { return TimeDelta(-delta_); }
+
+  // Computations with numeric types.
+  template <typename T>
+  TimeDelta operator*(T a) const {
+    return TimeDelta(delta_ * a);
+  }
+  template <typename T>
+  TimeDelta operator/(T a) const {
+    return TimeDelta(delta_ / a);
+  }
+  template <typename T>
+  TimeDelta& operator*=(T a) {
+    return *this = (*this * a);
+  }
+  template <typename T>
+  TimeDelta& operator/=(T a) {
+    return *this = (*this / a);
+  }
+
+  TimeDelta operator%(TimeDelta a) const {
+    return TimeDelta(delta_ % a.delta_);
+  }
+
+  // Comparison operators.
+  constexpr bool operator==(TimeDelta other) const {
+    return delta_ == other.delta_;
+  }
+  constexpr bool operator!=(TimeDelta other) const {
+    return delta_ != other.delta_;
+  }
+  constexpr bool operator<(TimeDelta other) const {
+    return delta_ < other.delta_;
+  }
+  constexpr bool operator<=(TimeDelta other) const {
+    return delta_ <= other.delta_;
+  }
+  constexpr bool operator>(TimeDelta other) const {
+    return delta_ > other.delta_;
+  }
+  constexpr bool operator>=(TimeDelta other) const {
+    return delta_ >= other.delta_;
+  }
+
+ private:
+  // Constructs a delta given the duration in nanoseconds. This is private
+  // to avoid confusion by callers with an integer constructor. Use
+  // FromSeconds, FromMilliseconds, etc. instead.
+  constexpr explicit TimeDelta(int64_t delta_ns) : delta_(delta_ns) {}
+
+  // Delta in nanoseconds.
+  int64_t delta_;
+};
+
+template <typename T>
+inline TimeDelta operator*(T a, TimeDelta td) {
+  return td * a;
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TIMEDELTA_H_
diff --git a/rtc_base/timestampaligner.cc b/rtc_base/timestampaligner.cc
new file mode 100644
index 0000000..a9bcafb
--- /dev/null
+++ b/rtc_base/timestampaligner.cc
@@ -0,0 +1,135 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/timestampaligner.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+TimestampAligner::TimestampAligner()
+    : frames_seen_(0),
+      offset_us_(0),
+      clip_bias_us_(0),
+      prev_translated_time_us_(std::numeric_limits<int64_t>::min()) {}
+
+TimestampAligner::~TimestampAligner() {}
+
+int64_t TimestampAligner::TranslateTimestamp(int64_t camera_time_us,
+                                             int64_t system_time_us) {
+  return ClipTimestamp(
+      camera_time_us + UpdateOffset(camera_time_us, system_time_us),
+      system_time_us);
+}
+
+int64_t TimestampAligner::UpdateOffset(int64_t camera_time_us,
+                                       int64_t system_time_us) {
+  // Estimate the offset between system monotonic time and the capture
+  // time from the camera. The camera is assumed to provide more
+  // accurate timestamps than we get from the system time. But the
+  // camera may use its own free-running clock with a large offset and
+  // a small drift compared to the system clock. So the model is
+  // basically
+  //
+  //   y_k = c_0 + c_1 * x_k + v_k
+  //
+  // where x_k is the camera timestamp, believed to be accurate in its
+  // own scale. y_k is our reading of the system clock. v_k is the
+  // measurement noise, i.e., the delay from frame capture until the
+  // system clock was read.
+  //
+  // It's possible to do (weighted) least-squares estimation of both
+  // c_0 and c_1. Then we get the constants as c_1 = Cov(x,y) /
+  // Var(x), and c_0 = mean(y) - c_1 * mean(x). Substituting this c_0,
+  // we can rearrange the model as
+  //
+  //   y_k = mean(y) + (x_k - mean(x)) + (c_1 - 1) * (x_k - mean(x)) + v_k
+  //
+  // Now if we use a weighted average which gradually forgets old
+  // values, x_k - mean(x) is bounded, of the same order as the time
+  // constant (and close to constant for a steady frame rate). In
+  // addition, the frequency error |c_1 - 1| should be small. Cameras
+  // with a frequency error up to 3000 ppm (3 ms drift per second)
+  // have been observed, but frequency errors below 100 ppm could be
+  // expected of any cheap crystal.
+  //
+  // Bottom line is that we ignore the c_1 term, and use only the estimator
+  //
+  //    x_k + mean(y-x)
+  //
+  // where mean is plain averaging for initial samples, followed by
+  // exponential averaging.
+
+  // The input for averaging, y_k - x_k in the above notation.
+  int64_t diff_us = system_time_us - camera_time_us;
+  // The deviation from the current average.
+  int64_t error_us = diff_us - offset_us_;
+
+  // If the current difference is far from the currently estimated
+  // offset, the filter is reset. This could happen, e.g., if the
+  // camera clock is reset, or cameras are plugged in and out, or if
+  // the application process is temporarily suspended. Expected to
+  // happen for the very first timestamp (|frames_seen_| = 0). The
+  // threshold of 300 ms should make this unlikely in normal
+  // operation, and at the same time, converging gradually rather than
+  // resetting the filter should be tolerable for jumps in camera time
+  // below this threshold.
+  static const int64_t kResetThresholdUs = 300000;
+  if (std::abs(error_us) > kResetThresholdUs) {
+    RTC_LOG(LS_INFO) << "Resetting timestamp translation after averaging "
+                     << frames_seen_ << " frames. Old offset: " << offset_us_
+                     << ", new offset: " << diff_us;
+    frames_seen_ = 0;
+    clip_bias_us_ = 0;
+  }
+
+  static const int kWindowSize = 100;
+  if (frames_seen_ < kWindowSize) {
+    ++frames_seen_;
+  }
+  offset_us_ += error_us / frames_seen_;
+  return offset_us_;
+}
+
+int64_t TimestampAligner::ClipTimestamp(int64_t filtered_time_us,
+                                        int64_t system_time_us) {
+  const int64_t kMinFrameIntervalUs = rtc::kNumMicrosecsPerMillisec;
+  // Clip to make sure we don't produce timestamps in the future.
+  int64_t time_us = filtered_time_us - clip_bias_us_;
+  if (time_us > system_time_us) {
+    clip_bias_us_ += time_us - system_time_us;
+    time_us = system_time_us;
+  }
+  // Make timestamps monotonic, with a minimum inter-frame interval of 1 ms.
+  else if (time_us < prev_translated_time_us_ + kMinFrameIntervalUs) {
+    time_us = prev_translated_time_us_ + kMinFrameIntervalUs;
+    if (time_us > system_time_us) {
+      // In the anomalous case that this function is called with values of
+      // |system_time_us| less than |kMinFrameIntervalUs| apart, we may output
+      // timestamps with with too short inter-frame interval. We may even return
+      // duplicate timestamps in case this function is called several times with
+      // exactly the same |system_time_us|.
+      RTC_LOG(LS_WARNING) << "too short translated timestamp interval: "
+                          << "system time (us) = " << system_time_us
+                          << ", interval (us) = "
+                          << system_time_us - prev_translated_time_us_;
+      time_us = system_time_us;
+    }
+  }
+  RTC_DCHECK_GE(time_us, prev_translated_time_us_);
+  RTC_DCHECK_LE(time_us, system_time_us);
+  prev_translated_time_us_ = time_us;
+  return time_us;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/timestampaligner.h b/rtc_base/timestampaligner.h
new file mode 100644
index 0000000..6fa8d69
--- /dev/null
+++ b/rtc_base/timestampaligner.h
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (c) 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TIMESTAMPALIGNER_H_
+#define RTC_BASE_TIMESTAMPALIGNER_H_
+
+#include <stdint.h>
+
+#include "rtc_base/constructormagic.h"
+
+namespace rtc {
+
+// The TimestampAligner class helps translating camera timestamps into
+// the same timescale as is used by rtc::TimeMicros(). Some cameras
+// have built in timestamping which is more accurate than reading the
+// system clock, but using a different epoch and unknown clock drift.
+// Frame timestamps in webrtc should use rtc::TimeMicros (system monotonic
+// time), and this class provides a filter which lets us use the
+// rtc::TimeMicros timescale, and at the same time take advantage of
+// higher accuracy of the camera clock.
+
+// This class is not thread safe, so all calls to it must be synchronized
+// externally.
+class TimestampAligner {
+ public:
+  TimestampAligner();
+  ~TimestampAligner();
+
+ public:
+  // Translates camera timestamps to the same timescale as is used by
+  // rtc::TimeMicros(). |camera_time_us| is assumed to be accurate, but
+  // with an unknown epoch and clock drift. |system_time_us| is
+  // time according to rtc::TimeMicros(), preferably read as soon as
+  // possible when the frame is captured. It may have poor accuracy
+  // due to poor resolution or scheduling delays. Returns the
+  // translated timestamp.
+  int64_t TranslateTimestamp(int64_t camera_time_us, int64_t system_time_us);
+
+ protected:
+  // Update the estimated offset between camera time and system monotonic time.
+  int64_t UpdateOffset(int64_t camera_time_us, int64_t system_time_us);
+
+  // Clip timestamp, return value is always
+  //    <= |system_time_us|, and
+  //    >= min(|prev_translated_time_us_| + |kMinFrameIntervalUs|,
+  //           |system_time_us|).
+  int64_t ClipTimestamp(int64_t filtered_time_us, int64_t system_time_us);
+
+ private:
+  // State for the timestamp translation.
+  int frames_seen_;
+  // Estimated offset between camera time and system monotonic time.
+  int64_t offset_us_;
+
+  // State for the ClipTimestamp method, applied after the filter.
+  // A large negative camera clock drift tends to push translated
+  // timestamps into the future. |clip_bias_us_| is subtracted from the
+  // translated timestamps, to get them back from the future.
+  int64_t clip_bias_us_;
+  // Used to ensure that translated timestamps are monotonous.
+  int64_t prev_translated_time_us_;
+  RTC_DISALLOW_COPY_AND_ASSIGN(TimestampAligner);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TIMESTAMPALIGNER_H_
diff --git a/rtc_base/timestampaligner_unittest.cc b/rtc_base/timestampaligner_unittest.cc
new file mode 100644
index 0000000..8ba5be9
--- /dev/null
+++ b/rtc_base/timestampaligner_unittest.cc
@@ -0,0 +1,187 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/random.h"
+#include "rtc_base/timestampaligner.h"
+
+namespace rtc {
+
+namespace {
+// Computes the difference x_k - mean(x), when x_k is the linear sequence x_k =
+// k, and the "mean" is plain mean for the first |window_size| samples, followed
+// by exponential averaging with weight 1 / |window_size| for each new sample.
+// This is needed to predict the effect of camera clock drift on the timestamp
+// translation. See the comment on TimestampAligner::UpdateOffset for more
+// context.
+double MeanTimeDifference(int nsamples, int window_size) {
+  if (nsamples <= window_size) {
+    // Plain averaging.
+    return nsamples / 2.0;
+  } else {
+    // Exponential convergence towards
+    // interval_error * (window_size - 1)
+    double alpha = 1.0 - 1.0 / window_size;
+
+    return ((window_size - 1) -
+            (window_size / 2.0 - 1) * pow(alpha, nsamples - window_size));
+  }
+}
+
+class TimestampAlignerForTest : public TimestampAligner {
+  // Make internal methods accessible to testing.
+ public:
+  using TimestampAligner::UpdateOffset;
+  using TimestampAligner::ClipTimestamp;
+};
+
+void TestTimestampFilter(double rel_freq_error) {
+  TimestampAlignerForTest timestamp_aligner_for_test;
+  TimestampAligner timestamp_aligner;
+  const int64_t kEpoch = 10000;
+  const int64_t kJitterUs = 5000;
+  const int64_t kIntervalUs = 33333;  // 30 FPS
+  const int kWindowSize = 100;
+  const int kNumFrames = 3 * kWindowSize;
+
+  int64_t interval_error_us = kIntervalUs * rel_freq_error;
+  int64_t system_start_us = rtc::TimeMicros();
+  webrtc::Random random(17);
+
+  int64_t prev_translated_time_us = system_start_us;
+
+  for (int i = 0; i < kNumFrames; i++) {
+    // Camera time subject to drift.
+    int64_t camera_time_us = kEpoch + i * (kIntervalUs + interval_error_us);
+    int64_t system_time_us = system_start_us + i * kIntervalUs;
+    // And system time readings are subject to jitter.
+    int64_t system_measured_us = system_time_us + random.Rand(kJitterUs);
+
+    int64_t offset_us = timestamp_aligner_for_test.UpdateOffset(
+        camera_time_us, system_measured_us);
+
+    int64_t filtered_time_us = camera_time_us + offset_us;
+    int64_t translated_time_us = timestamp_aligner_for_test.ClipTimestamp(
+        filtered_time_us, system_measured_us);
+
+    // Check that we get identical result from the all-in-one helper method.
+    ASSERT_EQ(translated_time_us, timestamp_aligner.TranslateTimestamp(
+                                      camera_time_us, system_measured_us));
+
+    EXPECT_LE(translated_time_us, system_measured_us);
+    EXPECT_GE(translated_time_us,
+              prev_translated_time_us + rtc::kNumMicrosecsPerMillisec);
+
+    // The relative frequency error contributes to the expected error
+    // by a factor which is the difference between the current time
+    // and the average of earlier sample times.
+    int64_t expected_error_us =
+        kJitterUs / 2 +
+        rel_freq_error * kIntervalUs * MeanTimeDifference(i, kWindowSize);
+
+    int64_t bias_us = filtered_time_us - translated_time_us;
+    EXPECT_GE(bias_us, 0);
+
+    if (i == 0) {
+      EXPECT_EQ(translated_time_us, system_measured_us);
+    } else {
+      EXPECT_NEAR(filtered_time_us, system_time_us + expected_error_us,
+                  2.0 * kJitterUs / sqrt(std::max(i, kWindowSize)));
+    }
+    // If the camera clock runs too fast (rel_freq_error > 0.0), The
+    // bias is expected to roughly cancel the expected error from the
+    // clock drift, as this grows. Otherwise, it reflects the
+    // measurement noise. The tolerances here were selected after some
+    // trial and error.
+    if (i < 10 || rel_freq_error <= 0.0) {
+      EXPECT_LE(bias_us, 3000);
+    } else {
+      EXPECT_NEAR(bias_us, expected_error_us, 1500);
+    }
+    prev_translated_time_us = translated_time_us;
+  }
+}
+
+}  // Anonymous namespace
+
+TEST(TimestampAlignerTest, AttenuateTimestampJitterNoDrift) {
+  TestTimestampFilter(0.0);
+}
+
+// 100 ppm is a worst case for a reasonable crystal.
+TEST(TimestampAlignerTest, AttenuateTimestampJitterSmallPosDrift) {
+  TestTimestampFilter(0.0001);
+}
+
+TEST(TimestampAlignerTest, AttenuateTimestampJitterSmallNegDrift) {
+  TestTimestampFilter(-0.0001);
+}
+
+// 3000 ppm, 3 ms / s, is the worst observed drift, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5456
+TEST(TimestampAlignerTest, AttenuateTimestampJitterLargePosDrift) {
+  TestTimestampFilter(0.003);
+}
+
+TEST(TimestampAlignerTest, AttenuateTimestampJitterLargeNegDrift) {
+  TestTimestampFilter(-0.003);
+}
+
+// Exhibits a mostly hypothetical problem, where certain inputs to the
+// TimestampAligner.UpdateOffset filter result in non-monotonous
+// translated timestamps. This test verifies that the ClipTimestamp
+// logic handles this case correctly.
+TEST(TimestampAlignerTest, ClipToMonotonous) {
+  TimestampAlignerForTest timestamp_aligner;
+
+  // For system time stamps { 0, s1, s1 + s2 }, and camera timestamps
+  // {0, c1, c1 + c2}, we exhibit non-monotonous behaviour if and only
+  // if c1 > s1 + 2 s2 + 4 c2.
+  const int kNumSamples = 3;
+  const int64_t camera_time_us[kNumSamples] = {0, 80000, 90001};
+  const int64_t system_time_us[kNumSamples] = {0, 10000, 20000};
+  const int64_t expected_offset_us[kNumSamples] = {0, -35000, -46667};
+
+  // Non-monotonic translated timestamps can happen when only for
+  // translated timestamps in the future. Which is tolerated if
+  // |timestamp_aligner.clip_bias_us| is large enough. Instead of
+  // changing that private member for this test, just add the bias to
+  // |system_time_us| when calling ClipTimestamp.
+  const int64_t kClipBiasUs = 100000;
+
+  bool did_clip = false;
+  int64_t prev_timestamp_us = std::numeric_limits<int64_t>::min();
+  for (int i = 0; i < kNumSamples; i++) {
+    int64_t offset_us =
+        timestamp_aligner.UpdateOffset(camera_time_us[i], system_time_us[i]);
+    EXPECT_EQ(offset_us, expected_offset_us[i]);
+
+    int64_t translated_timestamp_us = camera_time_us[i] + offset_us;
+    int64_t clip_timestamp_us = timestamp_aligner.ClipTimestamp(
+        translated_timestamp_us, system_time_us[i] + kClipBiasUs);
+    if (translated_timestamp_us <= prev_timestamp_us) {
+      did_clip = true;
+      EXPECT_EQ(clip_timestamp_us,
+                prev_timestamp_us + rtc::kNumMicrosecsPerMillisec);
+    } else {
+      // No change from clipping.
+      EXPECT_EQ(clip_timestamp_us, translated_timestamp_us);
+    }
+    prev_timestamp_us = clip_timestamp_us;
+  }
+  EXPECT_TRUE(did_clip);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/timeutils.cc b/rtc_base/timeutils.cc
new file mode 100644
index 0000000..35c25c7
--- /dev/null
+++ b/rtc_base/timeutils.cc
@@ -0,0 +1,217 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+
+#if defined(WEBRTC_POSIX)
+#include <sys/time.h>
+#if defined(WEBRTC_MAC)
+#include <mach/mach_time.h>
+#endif
+#endif
+
+#if defined(WEBRTC_WIN)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+#include <mmsystem.h>
+#include <sys/timeb.h>
+#endif
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+
+ClockInterface* g_clock = nullptr;
+
+ClockInterface* SetClockForTesting(ClockInterface* clock) {
+  ClockInterface* prev = g_clock;
+  g_clock = clock;
+  return prev;
+}
+
+ClockInterface* GetClockForTesting() {
+  return g_clock;
+}
+
+int64_t SystemTimeNanos() {
+  int64_t ticks;
+#if defined(WEBRTC_MAC)
+  static mach_timebase_info_data_t timebase;
+  if (timebase.denom == 0) {
+    // Get the timebase if this is the first time we run.
+    // Recommended by Apple's QA1398.
+    if (mach_timebase_info(&timebase) != KERN_SUCCESS) {
+      RTC_NOTREACHED();
+    }
+  }
+  // Use timebase to convert absolute time tick units into nanoseconds.
+  const auto mul = [](uint64_t a, uint32_t b) -> int64_t {
+    RTC_DCHECK_NE(b, 0);
+    RTC_DCHECK_LE(a, std::numeric_limits<int64_t>::max() / b)
+        << "The multiplication " << a << " * " << b << " overflows";
+    return rtc::dchecked_cast<int64_t>(a * b);
+  };
+  ticks = mul(mach_absolute_time(), timebase.numer) / timebase.denom;
+#elif defined(WEBRTC_POSIX)
+  struct timespec ts;
+  // TODO(deadbeef): Do we need to handle the case when CLOCK_MONOTONIC is not
+  // supported?
+  clock_gettime(CLOCK_MONOTONIC, &ts);
+  ticks = kNumNanosecsPerSec * static_cast<int64_t>(ts.tv_sec) +
+          static_cast<int64_t>(ts.tv_nsec);
+#elif defined(WEBRTC_WIN)
+  static volatile LONG last_timegettime = 0;
+  static volatile int64_t num_wrap_timegettime = 0;
+  volatile LONG* last_timegettime_ptr = &last_timegettime;
+  DWORD now = timeGetTime();
+  // Atomically update the last gotten time
+  DWORD old = InterlockedExchange(last_timegettime_ptr, now);
+  if (now < old) {
+    // If now is earlier than old, there may have been a race between threads.
+    // 0x0fffffff ~3.1 days, the code will not take that long to execute
+    // so it must have been a wrap around.
+    if (old > 0xf0000000 && now < 0x0fffffff) {
+      num_wrap_timegettime++;
+    }
+  }
+  ticks = now + (num_wrap_timegettime << 32);
+  // TODO(deadbeef): Calculate with nanosecond precision. Otherwise, we're
+  // just wasting a multiply and divide when doing Time() on Windows.
+  ticks = ticks * kNumNanosecsPerMillisec;
+#else
+#error Unsupported platform.
+#endif
+  return ticks;
+}
+
+int64_t SystemTimeMillis() {
+  return static_cast<int64_t>(SystemTimeNanos() / kNumNanosecsPerMillisec);
+}
+
+int64_t TimeNanos() {
+  if (g_clock) {
+    return g_clock->TimeNanos();
+  }
+  return SystemTimeNanos();
+}
+
+uint32_t Time32() {
+  return static_cast<uint32_t>(TimeNanos() / kNumNanosecsPerMillisec);
+}
+
+int64_t TimeMillis() {
+  return TimeNanos() / kNumNanosecsPerMillisec;
+}
+
+int64_t TimeMicros() {
+  return TimeNanos() / kNumNanosecsPerMicrosec;
+}
+
+int64_t TimeAfter(int64_t elapsed) {
+  RTC_DCHECK_GE(elapsed, 0);
+  return TimeMillis() + elapsed;
+}
+
+int32_t TimeDiff32(uint32_t later, uint32_t earlier) {
+  return later - earlier;
+}
+
+int64_t TimeDiff(int64_t later, int64_t earlier) {
+  return later - earlier;
+}
+
+TimestampWrapAroundHandler::TimestampWrapAroundHandler()
+    : last_ts_(0), num_wrap_(-1) {}
+
+int64_t TimestampWrapAroundHandler::Unwrap(uint32_t ts) {
+  if (num_wrap_ == -1) {
+    last_ts_ = ts;
+    num_wrap_ = 0;
+    return ts;
+  }
+
+  if (ts < last_ts_) {
+    if (last_ts_ >= 0xf0000000 && ts < 0x0fffffff)
+      ++num_wrap_;
+  } else if ((ts - last_ts_) > 0xf0000000) {
+    // Backwards wrap. Unwrap with last wrap count and don't update last_ts_.
+    return ts + ((num_wrap_ - 1) << 32);
+  }
+
+  last_ts_ = ts;
+  return ts + (num_wrap_ << 32);
+}
+
+int64_t TmToSeconds(const std::tm& tm) {
+  static short int mdays[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+  static short int cumul_mdays[12] = {0,   31,  59,  90,  120, 151,
+                                      181, 212, 243, 273, 304, 334};
+  int year = tm.tm_year + 1900;
+  int month = tm.tm_mon;
+  int day = tm.tm_mday - 1;  // Make 0-based like the rest.
+  int hour = tm.tm_hour;
+  int min = tm.tm_min;
+  int sec = tm.tm_sec;
+
+  bool expiry_in_leap_year = (year % 4 == 0 &&
+                              (year % 100 != 0 || year % 400 == 0));
+
+  if (year < 1970)
+    return -1;
+  if (month < 0 || month > 11)
+    return -1;
+  if (day < 0 || day >= mdays[month] + (expiry_in_leap_year && month == 2 - 1))
+    return -1;
+  if (hour < 0 || hour > 23)
+    return -1;
+  if (min < 0 || min > 59)
+    return -1;
+  if (sec < 0 || sec > 59)
+    return -1;
+
+  day += cumul_mdays[month];
+
+  // Add number of leap days between 1970 and the expiration year, inclusive.
+  day += ((year / 4 - 1970 / 4) - (year / 100 - 1970 / 100) +
+          (year / 400 - 1970 / 400));
+
+  // We will have added one day too much above if expiration is during a leap
+  // year, and expiration is in January or February.
+  if (expiry_in_leap_year && month <= 2 - 1) // |month| is zero based.
+    day -= 1;
+
+  // Combine all variables into seconds from 1970-01-01 00:00 (except |month|
+  // which was accumulated into |day| above).
+  return (((static_cast<int64_t>
+            (year - 1970) * 365 + day) * 24 + hour) * 60 + min) * 60 + sec;
+}
+
+int64_t TimeUTCMicros() {
+#if defined(WEBRTC_POSIX)
+  struct timeval time;
+  gettimeofday(&time, nullptr);
+  // Convert from second (1.0) and microsecond (1e-6).
+  return (static_cast<int64_t>(time.tv_sec) * rtc::kNumMicrosecsPerSec +
+          time.tv_usec);
+
+#elif defined(WEBRTC_WIN)
+  struct _timeb time;
+  _ftime(&time);
+  // Convert from second (1.0) and milliseconds (1e-3).
+  return (static_cast<int64_t>(time.time) * rtc::kNumMicrosecsPerSec +
+          static_cast<int64_t>(time.millitm) * rtc::kNumMicrosecsPerMillisec);
+#endif
+}
+
+} // namespace rtc
diff --git a/rtc_base/timeutils.h b/rtc_base/timeutils.h
new file mode 100644
index 0000000..f602d48
--- /dev/null
+++ b/rtc_base/timeutils.h
@@ -0,0 +1,158 @@
+/*
+ *  Copyright 2005 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TIMEUTILS_H_
+#define RTC_BASE_TIMEUTILS_H_
+
+#include <stdint.h>
+#include <time.h>
+
+#include <ctime>
+#include <string>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+static const int64_t kNumMillisecsPerSec = INT64_C(1000);
+static const int64_t kNumMicrosecsPerSec = INT64_C(1000000);
+static const int64_t kNumNanosecsPerSec = INT64_C(1000000000);
+
+static const int64_t kNumMicrosecsPerMillisec =
+    kNumMicrosecsPerSec / kNumMillisecsPerSec;
+static const int64_t kNumNanosecsPerMillisec =
+    kNumNanosecsPerSec / kNumMillisecsPerSec;
+static const int64_t kNumNanosecsPerMicrosec =
+    kNumNanosecsPerSec / kNumMicrosecsPerSec;
+
+// TODO(honghaiz): Define a type for the time value specifically.
+
+class ClockInterface {
+ public:
+  virtual ~ClockInterface() {}
+  virtual int64_t TimeNanos() const = 0;
+};
+
+// Sets the global source of time. This is useful mainly for unit tests.
+//
+// Returns the previously set ClockInterface, or nullptr if none is set.
+//
+// Does not transfer ownership of the clock. SetClockForTesting(nullptr)
+// should be called before the ClockInterface is deleted.
+//
+// This method is not thread-safe; it should only be used when no other thread
+// is running (for example, at the start/end of a unit test, or start/end of
+// main()).
+//
+// TODO(deadbeef): Instead of having functions that access this global
+// ClockInterface, we may want to pass the ClockInterface into everything
+// that uses it, eliminating the need for a global variable and this function.
+ClockInterface* SetClockForTesting(ClockInterface* clock);
+
+// Returns previously set clock, or nullptr if no custom clock is being used.
+ClockInterface* GetClockForTesting();
+
+// Returns the actual system time, even if a clock is set for testing.
+// Useful for timeouts while using a test clock, or for logging.
+int64_t SystemTimeNanos();
+int64_t SystemTimeMillis();
+
+// Returns the current time in milliseconds in 32 bits.
+uint32_t Time32();
+
+// Returns the current time in milliseconds in 64 bits.
+int64_t TimeMillis();
+// Deprecated. Do not use this in any new code.
+inline int64_t Time() {
+  return TimeMillis();
+}
+
+// Returns the current time in microseconds.
+int64_t TimeMicros();
+
+// Returns the current time in nanoseconds.
+int64_t TimeNanos();
+
+
+// Returns a future timestamp, 'elapsed' milliseconds from now.
+int64_t TimeAfter(int64_t elapsed);
+
+// Number of milliseconds that would elapse between 'earlier' and 'later'
+// timestamps.  The value is negative if 'later' occurs before 'earlier'.
+int64_t TimeDiff(int64_t later, int64_t earlier);
+int32_t TimeDiff32(uint32_t later, uint32_t earlier);
+
+// The number of milliseconds that have elapsed since 'earlier'.
+inline int64_t TimeSince(int64_t earlier) {
+  return TimeMillis() - earlier;
+}
+
+// The number of milliseconds that will elapse between now and 'later'.
+inline int64_t TimeUntil(int64_t later) {
+  return later - TimeMillis();
+}
+
+class TimestampWrapAroundHandler {
+ public:
+  TimestampWrapAroundHandler();
+
+  int64_t Unwrap(uint32_t ts);
+
+ private:
+  uint32_t last_ts_;
+  int64_t num_wrap_;
+};
+
+// Convert from std::tm, which is relative to 1900-01-01 00:00 to number of
+// seconds from 1970-01-01 00:00 ("epoch").  Don't return time_t since that
+// is still 32 bits on many systems.
+int64_t TmToSeconds(const std::tm& tm);
+
+// Return the number of microseconds since January 1, 1970, UTC.
+// Useful mainly when producing logs to be correlated with other
+// devices, and when the devices in question all have properly
+// synchronized clocks.
+//
+// Note that this function obeys the system's idea about what the time
+// is. It is not guaranteed to be monotonic; it will jump in case the
+// system time is changed, e.g., by some other process calling
+// settimeofday. Always use rtc::TimeMicros(), not this function, for
+// measuring time intervals and timeouts.
+int64_t TimeUTCMicros();
+
+// Interval of time from the range [min, max] inclusive.
+class IntervalRange {
+ public:
+  IntervalRange() : min_(0), max_(0) {}
+  IntervalRange(int min, int max) : min_(min), max_(max) {
+    RTC_DCHECK_LE(min, max);
+  }
+
+  int min() const { return min_; }
+  int max() const { return max_; }
+
+  std::string ToString() const {
+    std::stringstream ss;
+    ss << "[" << min_ << "," << max_ << "]";
+    return ss.str();
+  }
+
+  bool operator==(const IntervalRange& o) const {
+    return min_ == o.min_ && max_ == o.max_;
+  }
+
+ private:
+  int min_;
+  int max_;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TIMEUTILS_H_
diff --git a/rtc_base/timeutils_unittest.cc b/rtc_base/timeutils_unittest.cc
new file mode 100644
index 0000000..592b7f8
--- /dev/null
+++ b/rtc_base/timeutils_unittest.cc
@@ -0,0 +1,382 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/timeutils.h"
+#include "rtc_base/event.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/helpers.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+TEST(TimeTest, TimeInMs) {
+  int64_t ts_earlier = TimeMillis();
+  Thread::SleepMs(100);
+  int64_t ts_now = TimeMillis();
+  // Allow for the thread to wakeup ~20ms early.
+  EXPECT_GE(ts_now, ts_earlier + 80);
+  // Make sure the Time is not returning in smaller unit like microseconds.
+  EXPECT_LT(ts_now, ts_earlier + 1000);
+}
+
+TEST(TimeTest, Intervals) {
+  int64_t ts_earlier = TimeMillis();
+  int64_t ts_later = TimeAfter(500);
+
+  // We can't depend on ts_later and ts_earlier to be exactly 500 apart
+  // since time elapses between the calls to TimeMillis() and TimeAfter(500)
+  EXPECT_LE(500,  TimeDiff(ts_later, ts_earlier));
+  EXPECT_GE(-500, TimeDiff(ts_earlier, ts_later));
+
+  // Time has elapsed since ts_earlier
+  EXPECT_GE(TimeSince(ts_earlier), 0);
+
+  // ts_earlier is earlier than now, so TimeUntil ts_earlier is -ve
+  EXPECT_LE(TimeUntil(ts_earlier), 0);
+
+  // ts_later likely hasn't happened yet, so TimeSince could be -ve
+  // but within 500
+  EXPECT_GE(TimeSince(ts_later), -500);
+
+  // TimeUntil ts_later is at most 500
+  EXPECT_LE(TimeUntil(ts_later), 500);
+}
+
+TEST(TimeTest, TestTimeDiff64) {
+  int64_t ts_diff = 100;
+  int64_t ts_earlier = rtc::TimeMillis();
+  int64_t ts_later = ts_earlier + ts_diff;
+  EXPECT_EQ(ts_diff, rtc::TimeDiff(ts_later, ts_earlier));
+  EXPECT_EQ(-ts_diff, rtc::TimeDiff(ts_earlier, ts_later));
+}
+
+class TimestampWrapAroundHandlerTest : public testing::Test {
+ public:
+  TimestampWrapAroundHandlerTest() {}
+
+ protected:
+  TimestampWrapAroundHandler wraparound_handler_;
+};
+
+TEST_F(TimestampWrapAroundHandlerTest, Unwrap) {
+  // Start value.
+  int64_t ts = 2;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+
+  // Wrap backwards.
+  ts = -2;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+
+  // Forward to 2 again.
+  ts = 2;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+
+  // Max positive skip ahead, until max value (0xffffffff).
+  for (uint32_t i = 0; i <= 0xf; ++i) {
+    ts = (i << 28) + 0x0fffffff;
+    EXPECT_EQ(
+        ts, wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+  }
+
+  // Wrap around.
+  ts += 2;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+
+  // Max wrap backward...
+  ts -= 0x0fffffff;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+
+  // ...and back again.
+  ts += 0x0fffffff;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+}
+
+TEST_F(TimestampWrapAroundHandlerTest, NoNegativeStart) {
+  int64_t ts = 0xfffffff0;
+  EXPECT_EQ(ts,
+            wraparound_handler_.Unwrap(static_cast<uint32_t>(ts & 0xffffffff)));
+}
+
+class TmToSeconds : public testing::Test {
+ public:
+  TmToSeconds() {
+    // Set use of the test RNG to get deterministic expiration timestamp.
+    rtc::SetRandomTestMode(true);
+  }
+  ~TmToSeconds() override {
+    // Put it back for the next test.
+    rtc::SetRandomTestMode(false);
+  }
+
+  void TestTmToSeconds(int times) {
+    static char mdays[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+    for (int i = 0; i < times; i++) {
+
+      // First generate something correct and check that TmToSeconds is happy.
+      int year = rtc::CreateRandomId() % 400 + 1970;
+
+      bool leap_year = false;
+      if (year % 4 == 0)
+        leap_year = true;
+      if (year % 100 == 0)
+        leap_year = false;
+      if (year % 400 == 0)
+        leap_year = true;
+
+      std::tm tm;
+      tm.tm_year = year - 1900;  // std::tm is year 1900 based.
+      tm.tm_mon = rtc::CreateRandomId() % 12;
+      tm.tm_mday = rtc::CreateRandomId() % mdays[tm.tm_mon] + 1;
+      tm.tm_hour = rtc::CreateRandomId() % 24;
+      tm.tm_min = rtc::CreateRandomId() % 60;
+      tm.tm_sec = rtc::CreateRandomId() % 60;
+      int64_t t = rtc::TmToSeconds(tm);
+      EXPECT_TRUE(t >= 0);
+
+      // Now damage a random field and check that TmToSeconds is unhappy.
+      switch (rtc::CreateRandomId() % 11) {
+        case 0:
+          tm.tm_year = 1969 - 1900;
+          break;
+        case 1:
+          tm.tm_mon = -1;
+          break;
+        case 2:
+          tm.tm_mon = 12;
+          break;
+        case 3:
+          tm.tm_mday = 0;
+          break;
+        case 4:
+          tm.tm_mday = mdays[tm.tm_mon] + (leap_year && tm.tm_mon == 1) + 1;
+          break;
+        case 5:
+          tm.tm_hour = -1;
+          break;
+        case 6:
+          tm.tm_hour = 24;
+          break;
+        case 7:
+          tm.tm_min = -1;
+          break;
+        case 8:
+          tm.tm_min = 60;
+          break;
+        case 9:
+          tm.tm_sec = -1;
+          break;
+        case 10:
+          tm.tm_sec = 60;
+          break;
+      }
+      EXPECT_EQ(rtc::TmToSeconds(tm), -1);
+    }
+    // Check consistency with the system gmtime_r.  With time_t, we can only
+    // portably test dates until 2038, which is achieved by the % 0x80000000.
+    for (int i = 0; i < times; i++) {
+      time_t t = rtc::CreateRandomId() % 0x80000000;
+#if defined(WEBRTC_WIN)
+      std::tm* tm = std::gmtime(&t);
+      EXPECT_TRUE(tm);
+      EXPECT_TRUE(rtc::TmToSeconds(*tm) == t);
+#else
+      std::tm tm;
+      EXPECT_TRUE(gmtime_r(&t, &tm));
+      EXPECT_TRUE(rtc::TmToSeconds(tm) == t);
+#endif
+    }
+  }
+};
+
+TEST_F(TmToSeconds, TestTmToSeconds) {
+  TestTmToSeconds(100000);
+}
+
+TEST(TimeDelta, FromAndTo) {
+  EXPECT_TRUE(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000));
+  EXPECT_TRUE(TimeDelta::FromMilliseconds(3) ==
+              TimeDelta::FromMicroseconds(3000));
+  EXPECT_TRUE(TimeDelta::FromMicroseconds(4) ==
+              TimeDelta::FromNanoseconds(4000));
+  EXPECT_EQ(13, TimeDelta::FromSeconds(13).ToSeconds());
+  EXPECT_EQ(13, TimeDelta::FromMilliseconds(13).ToMilliseconds());
+  EXPECT_EQ(13, TimeDelta::FromMicroseconds(13).ToMicroseconds());
+  EXPECT_EQ(13, TimeDelta::FromNanoseconds(13).ToNanoseconds());
+}
+
+TEST(TimeDelta, ComparisonOperators) {
+  EXPECT_LT(TimeDelta::FromSeconds(1), TimeDelta::FromSeconds(2));
+  EXPECT_EQ(TimeDelta::FromSeconds(3), TimeDelta::FromSeconds(3));
+  EXPECT_GT(TimeDelta::FromSeconds(5), TimeDelta::FromSeconds(4));
+}
+
+TEST(TimeDelta, NumericOperators) {
+  double d = 0.5;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) * d);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) / d);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) *= d);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) /= d);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            d * TimeDelta::FromMilliseconds(1000));
+
+  float f = 0.5;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) * f);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) / f);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) *= f);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) /= f);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            f * TimeDelta::FromMilliseconds(1000));
+
+  int i = 2;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) * i);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) / i);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) *= i);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) /= i);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            i * TimeDelta::FromMilliseconds(1000));
+
+  int64_t i64 = 2;
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) * i64);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) / i64);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) *= i64);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) /= i64);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            i64 * TimeDelta::FromMilliseconds(1000));
+
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) * 0.5);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) / 0.5);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) *= 0.5);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) /= 0.5);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            0.5 * TimeDelta::FromMilliseconds(1000));
+
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) * 2);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) / 2);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            TimeDelta::FromMilliseconds(1000) *= 2);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+            TimeDelta::FromMilliseconds(1000) /= 2);
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+            2 * TimeDelta::FromMilliseconds(1000));
+}
+
+// Test that all the time functions exposed by TimeUtils get time from the
+// fake clock when it's set.
+TEST(FakeClock, TimeFunctionsUseFakeClock) {
+  FakeClock clock;
+  SetClockForTesting(&clock);
+
+  clock.SetTimeNanos(987654321);
+  EXPECT_EQ(987u, Time32());
+  EXPECT_EQ(987, TimeMillis());
+  EXPECT_EQ(987654, TimeMicros());
+  EXPECT_EQ(987654321, TimeNanos());
+  EXPECT_EQ(1000u, TimeAfter(13));
+
+  SetClockForTesting(nullptr);
+  // After it's unset, we should get a normal time.
+  EXPECT_NE(987, TimeMillis());
+}
+
+TEST(FakeClock, InitialTime) {
+  FakeClock clock;
+  EXPECT_EQ(0, clock.TimeNanos());
+}
+
+TEST(FakeClock, SetTimeNanos) {
+  FakeClock clock;
+  clock.SetTimeNanos(123);
+  EXPECT_EQ(123, clock.TimeNanos());
+  clock.SetTimeNanos(456);
+  EXPECT_EQ(456, clock.TimeNanos());
+}
+
+TEST(FakeClock, AdvanceTime) {
+  FakeClock clock;
+  clock.AdvanceTime(TimeDelta::FromNanoseconds(1111u));
+  EXPECT_EQ(1111, clock.TimeNanos());
+  clock.AdvanceTime(TimeDelta::FromMicroseconds(2222u));
+  EXPECT_EQ(2223111, clock.TimeNanos());
+  clock.AdvanceTime(TimeDelta::FromMilliseconds(3333u));
+  EXPECT_EQ(3335223111, clock.TimeNanos());
+  clock.AdvanceTime(TimeDelta::FromSeconds(4444u));
+  EXPECT_EQ(4447335223111, clock.TimeNanos());
+}
+
+// When the clock is advanced, threads that are waiting in a socket select
+// should wake up and look at the new time. This allows tests using the
+// fake clock to run much faster, if the test is bound by time constraints
+// (such as a test for a STUN ping timeout).
+TEST(FakeClock, SettingTimeWakesThreads) {
+  int64_t real_start_time_ms = TimeMillis();
+
+  FakeClock clock;
+  SetClockForTesting(&clock);
+
+  std::unique_ptr<Thread> worker(Thread::CreateWithSocketServer());
+  worker->Start();
+
+  // Post an event that won't be executed for 10 seconds.
+  Event message_handler_dispatched(false, false);
+  auto functor = [&message_handler_dispatched] {
+    message_handler_dispatched.Set();
+  };
+  FunctorMessageHandler<void, decltype(functor)> handler(functor);
+  worker->PostDelayed(RTC_FROM_HERE, 60000, &handler);
+
+  // Wait for a bit for the worker thread to be started and enter its socket
+  // select(). Otherwise this test would be trivial since the worker thread
+  // would process the event as soon as it was started.
+  Thread::Current()->SleepMs(1000);
+
+  // Advance the fake clock, expecting the worker thread to wake up
+  // and dispatch the message instantly.
+  clock.AdvanceTime(TimeDelta::FromSeconds(60u));
+  EXPECT_TRUE(message_handler_dispatched.Wait(0));
+  worker->Stop();
+
+  SetClockForTesting(nullptr);
+
+  // The message should have been dispatched long before the 60 seconds fully
+  // elapsed (just a sanity check).
+  int64_t real_end_time_ms = TimeMillis();
+  EXPECT_LT(real_end_time_ms - real_start_time_ms, 10000);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/trace_event.h b/rtc_base/trace_event.h
new file mode 100644
index 0000000..7a9f2dd
--- /dev/null
+++ b/rtc_base/trace_event.h
@@ -0,0 +1,915 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file under third_party_mods/chromium or at:
+// http://src.chromium.org/svn/trunk/src/LICENSE
+
+#ifndef RTC_BASE_TRACE_EVENT_H_
+#define RTC_BASE_TRACE_EVENT_H_
+
+#include <string>
+
+#include "rtc_base/event_tracer.h"
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of trace_event.h has already been included."
+#endif
+
+// Extracted from Chromium's src/base/debug/trace_event.h.
+
+// This header is designed to give you trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// event to some other universe, you can copy-and-paste this file,
+// implement the TRACE_EVENT_API macros, and do any other necessary fixup for
+// the target platform. The end result is that multiple libraries can funnel
+// events through to a shared trace event collector.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+//    Begin and end of function calls
+//    Counters
+//
+// Events are issued against categories. Whereas RTC_LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+//   TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent")
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+//   TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+//   doSomethingCostly()
+//   TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+//   void doSomethingCostly() {
+//     TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+//     ...
+//   }
+//
+// Additional parameters can be associated with an event:
+//   void doSomethingCostly2(int howMuch) {
+//     TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+//         "howMuch", howMuch);
+//     ...
+//   }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+//   [single threaded sender code]
+//     static int send_count = 0;
+//     ++send_count;
+//     TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+//     Send(new MyMessage(send_count));
+//   [receive code]
+//     void OnMyMessage(send_count) {
+//       TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+//     }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+//   class MyTracedClass {
+//    public:
+//     MyTracedClass() {
+//       TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+//     }
+//     ~MyTracedClass() {
+//       TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+//     }
+//   }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+//   TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+//   TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+//       "bytesPinned", g_myCounterValue[0],
+//       "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disembiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category, name, and arg_names.  Thus, the following code will
+// cause problems:
+//     char* str = strdup("impprtantName");
+//     TRACE_EVENT_INSTANT0("SUBSYSTEM", str);  // BAD!
+//     free(str);                   // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+//        The |arg_values|, when used, are always deep copied with the _COPY
+//        macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+//     TRACE_EVENT1("category", "name",
+//                  "arg1", std::string("string will be copied"));
+//
+//
+// Thread Safety:
+// Thread safety is provided by methods defined in event_tracer.h. See the file
+// for details.
+
+// By default, const char* argument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) \
+    webrtc::trace_event_internal::TraceStringWithCopy(str)
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) \
+    webrtc::trace_event_internal::TraceID::ForceMangle(id)
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name)
+#define TRACE_EVENT1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val)
+#define TRACE_EVENT2(category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+
+// Same as TRACE_EVENT except that they are not included in official builds.
+#ifdef OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category, name, arg1_name, arg1_val) (void)0
+#define UNSHIPPED_TRACE_EVENT2(category, name, arg1_name, arg1_val, \
+                               arg2_name, arg2_val) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category, name) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category, name, arg1_name, arg1_val) \
+    (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category, name, arg1_name, arg1_val, \
+                                       arg2_name, arg2_val) (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category, name) \
+    TRACE_EVENT0(category, name)
+#define UNSHIPPED_TRACE_EVENT1(category, name, arg1_name, arg1_val) \
+    TRACE_EVENT1(category, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category, name, arg1_name, arg1_val, \
+                               arg2_name, arg2_val) \
+    TRACE_EVENT2(category, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category, name) \
+    TRACE_EVENT_INSTANT0(category, name)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category, name, arg1_name, arg1_val) \
+    TRACE_EVENT_INSTANT1(category, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category, name, arg1_name, arg1_val, \
+                                       arg2_name, arg2_val) \
+    TRACE_EVENT_INSTANT2(category, name, arg1_name, arg1_val, \
+                         arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+        category, name, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_INSTANT1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+        category, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+        category, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+        category, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_INSTANT1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+        category, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+        category, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+        category, name, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+        category, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+        category, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+        category, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+        category, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+        category, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_EVENT_END0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+        category, name, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+        category, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+        category, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category, name) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+        category, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category, name, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+        category, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category, name, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+        category, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+        arg2_name, arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_COUNTER1(category, name, value) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, TRACE_EVENT_FLAG_NONE, \
+        "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category, name, value) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, TRACE_EVENT_FLAG_COPY, \
+        "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+#define TRACE_COUNTER2(category, name, value1_name, value1_val, \
+        value2_name, value2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, TRACE_EVENT_FLAG_NONE, \
+        value1_name, static_cast<int>(value1_val), \
+        value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category, name, value1_name, value1_val, \
+        value2_name, value2_val) \
+    INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, TRACE_EVENT_FLAG_COPY, \
+        value1_name, static_cast<int>(value1_val), \
+        value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+//   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+//   will be xored with a hash of the process ID so that the same pointer on
+//   two different processes will not collide.
+#define TRACE_COUNTER_ID1(category, name, id, value) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, \
+        "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category, name, id, value) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+//   be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+//   will be xored with a hash of the process ID so that the same pointer on
+//   two different processes will not collide.
+#define TRACE_COUNTER_ID2(category, name, id, value1_name, value1_val, \
+        value2_name, value2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, \
+        value1_name, static_cast<int>(value1_val), \
+        value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category, name, id, value1_name, value1_val, \
+        value2_name, value2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        value1_name, static_cast<int>(value1_val), \
+        value2_name, static_cast<int>(value2_val))
+
+
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+//   events are considered to match if their category, name and id values all
+//   match. |id| must either be a pointer or an integer value up to 64 bits. If
+//   it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP macros. When the operation completes, call ASYNC_END.
+// An ASYNC trace typically occur on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each event can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single ASYNC_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// ASYNC_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_ASYNC_STEP0(category, name, id, step) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP1(category, name, id, step, \
+                                      arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
+        arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_STEP0(category, name, id, step) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_ASYNC_STEP1(category, name, id, step, \
+        arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \
+        arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+        category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+        category, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+//   literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+//   events are considered to match if their category, name and id values all
+//   match. |id| must either be a pointer or an integer value up to 64 bits. If
+//   it's a pointer, the bits will be xored with a hash of the process ID so
+//   that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category, name, id, step) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category, name, id, step, \
+        arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
+        arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category, name, id, step) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category, name, id, step, \
+        arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \
+        arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+        category, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_END2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+        category, name, id, TRACE_EVENT_FLAG_NONE, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category, name, id) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+        category, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category, name, id, arg1_name, arg1_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category, name, id, arg1_name, arg1_val, \
+        arg2_name, arg2_val) \
+    INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+        category, name, id, TRACE_EVENT_FLAG_COPY, \
+        arg1_name, arg1_val, arg2_name, arg2_val)
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category name. The returned
+// pointer can be held permanently in a local static for example. If the
+// unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const unsigned char*
+//     TRACE_EVENT_API_GET_CATEGORY_ENABLED(const char* category_name)
+#define TRACE_EVENT_API_GET_CATEGORY_ENABLED \
+    webrtc::EventTracer::GetCategoryEnabled
+
+// Add a trace event to the platform tracing system.
+// void TRACE_EVENT_API_ADD_TRACE_EVENT(
+//                    char phase,
+//                    const unsigned char* category_enabled,
+//                    const char* name,
+//                    unsigned long long id,
+//                    int num_args,
+//                    const char** arg_names,
+//                    const unsigned char* arg_types,
+//                    const unsigned long long* arg_values,
+//                    unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT webrtc::EventTracer::AddTraceEvent
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collissions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+    trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+    INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+    INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+#if WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS
+#define INTERNAL_TRACE_EVENT_INFO_TYPE const unsigned char*
+#else
+#define INTERNAL_TRACE_EVENT_INFO_TYPE static const unsigned char*
+#endif  // WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS
+
+// Implementation detail: internal macro to create static category.
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category) \
+    INTERNAL_TRACE_EVENT_INFO_TYPE INTERNAL_TRACE_EVENT_UID(catstatic) = \
+        TRACE_EVENT_API_GET_CATEGORY_ENABLED(category);
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category, name, flags, ...) \
+    do { \
+      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \
+      if (*INTERNAL_TRACE_EVENT_UID(catstatic)) { \
+        webrtc::trace_event_internal::AddTraceEvent(          \
+            phase, INTERNAL_TRACE_EVENT_UID(catstatic), name, \
+            webrtc::trace_event_internal::kNoEventId, flags, ##__VA_ARGS__); \
+      } \
+    } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, ...) \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \
+    webrtc::trace_event_internal::TraceEndOnScopeClose  \
+        INTERNAL_TRACE_EVENT_UID(profileScope); \
+    if (*INTERNAL_TRACE_EVENT_UID(catstatic)) { \
+      webrtc::trace_event_internal::AddTraceEvent(      \
+          TRACE_EVENT_PHASE_BEGIN, \
+          INTERNAL_TRACE_EVENT_UID(catstatic), \
+          name, webrtc::trace_event_internal::kNoEventId,       \
+          TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+      INTERNAL_TRACE_EVENT_UID(profileScope).Initialize( \
+          INTERNAL_TRACE_EVENT_UID(catstatic), name); \
+    }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category, name, id, flags, \
+                                         ...) \
+    do { \
+      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \
+      if (*INTERNAL_TRACE_EVENT_UID(catstatic)) { \
+        unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+        webrtc::trace_event_internal::TraceID trace_event_trace_id( \
+            id, &trace_event_flags); \
+        webrtc::trace_event_internal::AddTraceEvent( \
+            phase, INTERNAL_TRACE_EVENT_UID(catstatic), \
+            name, trace_event_trace_id.data(), trace_event_flags, \
+            ##__VA_ARGS__); \
+      } \
+    } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN    ('B')
+#define TRACE_EVENT_PHASE_END      ('E')
+#define TRACE_EVENT_PHASE_INSTANT  ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP  ('T')
+#define TRACE_EVENT_PHASE_ASYNC_END   ('F')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP  ('t')
+#define TRACE_EVENT_PHASE_FLOW_END   ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER  ('C')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE        (static_cast<unsigned char>(0))
+#define TRACE_EVENT_FLAG_COPY        (static_cast<unsigned char>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID      (static_cast<unsigned char>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID   (static_cast<unsigned char>(1 << 2))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL         (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT         (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT          (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE       (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER      (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING       (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING  (static_cast<unsigned char>(7))
+
+namespace webrtc {
+namespace trace_event_internal {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const unsigned long long kNoEventId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are mangled with the Process ID so that they are unlikely to collide when the
+// same pointer is used on different processes.
+class TraceID {
+ public:
+  class ForceMangle {
+    public:
+     explicit ForceMangle(unsigned long long id) : data_(id) {}
+     explicit ForceMangle(unsigned long id) : data_(id) {}
+     explicit ForceMangle(unsigned int id) : data_(id) {}
+     explicit ForceMangle(unsigned short id) : data_(id) {}
+     explicit ForceMangle(unsigned char id) : data_(id) {}
+     explicit ForceMangle(long long id)
+         : data_(static_cast<unsigned long long>(id)) {}
+     explicit ForceMangle(long id)
+         : data_(static_cast<unsigned long long>(id)) {}
+     explicit ForceMangle(int id)
+         : data_(static_cast<unsigned long long>(id)) {}
+     explicit ForceMangle(short id)
+         : data_(static_cast<unsigned long long>(id)) {}
+     explicit ForceMangle(signed char id)
+         : data_(static_cast<unsigned long long>(id)) {}
+
+     unsigned long long data() const { return data_; }
+
+    private:
+     unsigned long long data_;
+  };
+
+  explicit TraceID(const void* id, unsigned char* flags)
+      : data_(static_cast<unsigned long long>(
+              reinterpret_cast<uintptr_t>(id))) {
+    *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+  }
+  explicit TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
+    *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+  }
+  explicit TraceID(unsigned long long id, unsigned char* flags)
+      : data_(id) { (void)flags; }
+  explicit TraceID(unsigned long id, unsigned char* flags)
+      : data_(id) { (void)flags; }
+  explicit TraceID(unsigned int id, unsigned char* flags)
+      : data_(id) { (void)flags; }
+  explicit TraceID(unsigned short id, unsigned char* flags)
+      : data_(id) { (void)flags; }
+  explicit TraceID(unsigned char id, unsigned char* flags)
+      : data_(id) { (void)flags; }
+  explicit TraceID(long long id, unsigned char* flags)
+      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+  explicit TraceID(long id, unsigned char* flags)
+      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+  explicit TraceID(int id, unsigned char* flags)
+      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+  explicit TraceID(short id, unsigned char* flags)
+      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+  explicit TraceID(signed char id, unsigned char* flags)
+      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+
+  unsigned long long data() const { return data_; }
+
+ private:
+  unsigned long long data_;
+};
+
+// Simple union to store various types as unsigned long long.
+union TraceValueUnion {
+  bool as_bool;
+  unsigned long long as_uint;
+  long long as_int;
+  double as_double;
+  const void* as_pointer;
+  const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+  explicit TraceStringWithCopy(const char* str) : str_(str) {}
+  operator const char* () const { return str_; }
+ private:
+  const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+                                         union_member, \
+                                         value_type_id) \
+    static inline void SetTraceValue(actual_type arg, \
+                                     unsigned char* type, \
+                                     unsigned long long* value) { \
+      TraceValueUnion type_value; \
+      type_value.union_member = arg; \
+      *type = value_type_id; \
+      *value = type_value.as_uint; \
+    }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+                                             value_type_id) \
+    static inline void SetTraceValue(actual_type arg, \
+                                     unsigned char* type, \
+                                     unsigned long long* value) { \
+      *type = value_type_id; \
+      *value = static_cast<unsigned long long>(arg); \
+    }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
+                                 TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
+                                 TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+                                 TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// std::string version of SetTraceValue so that trace arguments can be strings.
+static inline void SetTraceValue(const std::string& arg,
+                                 unsigned char* type,
+                                 unsigned long long* value) {
+  TraceValueUnion type_value;
+  type_value.as_string = arg.c_str();
+  *type = TRACE_VALUE_TYPE_COPY_STRING;
+  *value = type_value.as_uint;
+}
+
+// These AddTraceEvent template functions are defined here instead of in the
+// macro, because the arg_values could be temporary objects, such as
+// std::string. In order to store pointers to the internal c_str and pass
+// through to the tracing API, the arg_values must live throughout
+// these procedures.
+
+static inline void AddTraceEvent(char phase,
+                                const unsigned char* category_enabled,
+                                const char* name,
+                                unsigned long long id,
+                                unsigned char flags) {
+  TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_enabled, name, id,
+                                  kZeroNumArgs, nullptr, nullptr, nullptr,
+                                  flags);
+}
+
+template<class ARG1_TYPE>
+static inline void AddTraceEvent(char phase,
+                                const unsigned char* category_enabled,
+                                const char* name,
+                                unsigned long long id,
+                                unsigned char flags,
+                                const char* arg1_name,
+                                const ARG1_TYPE& arg1_val) {
+  const int num_args = 1;
+  unsigned char arg_types[1];
+  unsigned long long arg_values[1];
+  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+  TRACE_EVENT_API_ADD_TRACE_EVENT(
+      phase, category_enabled, name, id,
+      num_args, &arg1_name, arg_types, arg_values,
+      flags);
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline void AddTraceEvent(char phase,
+                                const unsigned char* category_enabled,
+                                const char* name,
+                                unsigned long long id,
+                                unsigned char flags,
+                                const char* arg1_name,
+                                const ARG1_TYPE& arg1_val,
+                                const char* arg2_name,
+                                const ARG2_TYPE& arg2_val) {
+  const int num_args = 2;
+  const char* arg_names[2] = { arg1_name, arg2_name };
+  unsigned char arg_types[2];
+  unsigned long long arg_values[2];
+  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+  SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+  TRACE_EVENT_API_ADD_TRACE_EVENT(
+      phase, category_enabled, name, id,
+      num_args, arg_names, arg_types, arg_values,
+      flags);
+}
+
+// Used by TRACE_EVENTx macro. Do not use directly.
+class TraceEndOnScopeClose {
+ public:
+  // Note: members of data_ intentionally left uninitialized. See Initialize.
+  TraceEndOnScopeClose() : p_data_(nullptr) {}
+  ~TraceEndOnScopeClose() {
+    if (p_data_)
+      AddEventIfEnabled();
+  }
+
+  void Initialize(const unsigned char* category_enabled,
+                  const char* name) {
+    data_.category_enabled = category_enabled;
+    data_.name = name;
+    p_data_ = &data_;
+  }
+
+ private:
+  // Add the end event if the category is still enabled.
+  void AddEventIfEnabled() {
+    // Only called when p_data_ is non-null.
+    if (*p_data_->category_enabled) {
+      TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_END,
+                                      p_data_->category_enabled, p_data_->name,
+                                      kNoEventId, kZeroNumArgs, nullptr,
+                                      nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+    }
+  }
+
+  // This Data struct workaround is to avoid initializing all the members
+  // in Data during construction of this object, since this object is always
+  // constructed, even when tracing is disabled. If the members of Data were
+  // members of this class instead, compiler warnings occur about potential
+  // uninitialized accesses.
+  struct Data {
+    const unsigned char* category_enabled;
+    const char* name;
+  };
+  Data* p_data_;
+  Data data_;
+};
+
+}  // namespace trace_event_internal
+}  // namespace webrtc
+
+#endif  // RTC_BASE_TRACE_EVENT_H_
diff --git a/rtc_base/transformadapter.cc b/rtc_base/transformadapter.cc
new file mode 100644
index 0000000..943a5b9
--- /dev/null
+++ b/rtc_base/transformadapter.cc
@@ -0,0 +1,197 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/transformadapter.h"
+
+#include <string.h>
+
+#include "rtc_base/checks.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+
+TransformAdapter::TransformAdapter(StreamInterface * stream,
+                                   TransformInterface * transform,
+                                   bool direction_read)
+    : StreamAdapterInterface(stream), transform_(transform),
+      direction_read_(direction_read), state_(ST_PROCESSING), len_(0) {
+}
+
+TransformAdapter::~TransformAdapter() {
+  TransformAdapter::Close();
+  delete transform_;
+}
+
+StreamResult
+TransformAdapter::Read(void * buffer, size_t buffer_len,
+                       size_t * read, int * error) {
+  if (!direction_read_)
+    return SR_EOS;
+
+  while (state_ != ST_ERROR) {
+    if (state_ == ST_COMPLETE)
+      return SR_EOS;
+
+    // Buffer more data
+    if ((state_ == ST_PROCESSING) && (len_ < sizeof(buffer_))) {
+      size_t subread;
+      StreamResult result = StreamAdapterInterface::Read(
+                              buffer_ + len_,
+                              sizeof(buffer_) - len_,
+                              &subread,
+                              &error_);
+      if (result == SR_BLOCK) {
+        return SR_BLOCK;
+      } else if (result == SR_ERROR) {
+        state_ = ST_ERROR;
+        break;
+      } else if (result == SR_EOS) {
+        state_ = ST_FLUSHING;
+      } else {
+        len_ += subread;
+      }
+    }
+
+    // Process buffered data
+    size_t in_len = len_;
+    size_t out_len = buffer_len;
+    StreamResult result = transform_->Transform(buffer_, &in_len,
+                                                buffer, &out_len,
+                                                (state_ == ST_FLUSHING));
+    RTC_DCHECK(result != SR_BLOCK);
+    if (result == SR_EOS) {
+      // Note: Don't signal SR_EOS this iteration, unless out_len is zero
+      state_ = ST_COMPLETE;
+    } else if (result == SR_ERROR) {
+      state_ = ST_ERROR;
+      error_ = -1; // TODO: propagate error
+      break;
+    } else if ((out_len == 0) && (state_ == ST_FLUSHING)) {
+      // If there is no output AND no more input, then something is wrong
+      state_ = ST_ERROR;
+      error_ = -1; // TODO: better error code?
+      break;
+    }
+
+    len_ -= in_len;
+    if (len_ > 0)
+      memmove(buffer_, buffer_ + in_len, len_);
+
+    if (out_len == 0)
+      continue;
+
+    if (read)
+      *read = out_len;
+    return SR_SUCCESS;
+  }
+
+  if (error)
+    *error = error_;
+  return SR_ERROR;
+}
+
+StreamResult
+TransformAdapter::Write(const void * data, size_t data_len,
+                        size_t * written, int * error) {
+  if (direction_read_)
+    return SR_EOS;
+
+  size_t bytes_written = 0;
+  while (state_ != ST_ERROR) {
+    if (state_ == ST_COMPLETE)
+      return SR_EOS;
+
+    if (len_ < sizeof(buffer_)) {
+      // Process buffered data
+      size_t in_len = data_len;
+      size_t out_len = sizeof(buffer_) - len_;
+      StreamResult result = transform_->Transform(data, &in_len,
+                                                  buffer_ + len_, &out_len,
+                                                  (state_ == ST_FLUSHING));
+
+      RTC_DCHECK(result != SR_BLOCK);
+      if (result == SR_EOS) {
+        // Note: Don't signal SR_EOS this iteration, unless no data written
+        state_ = ST_COMPLETE;
+      } else if (result == SR_ERROR) {
+        RTC_NOTREACHED();  // When this happens, think about what should be done
+        state_ = ST_ERROR;
+        error_ = -1; // TODO: propagate error
+        break;
+      }
+
+      len_ = out_len;
+      bytes_written = in_len;
+    }
+
+    size_t pos = 0;
+    while (pos < len_) {
+      size_t subwritten;
+      StreamResult result = StreamAdapterInterface::Write(buffer_ + pos,
+                                                          len_ - pos,
+                                                          &subwritten,
+                                                          &error_);
+      if (result == SR_BLOCK) {
+        RTC_NOTREACHED();  // We should handle this
+        return SR_BLOCK;
+      } else if (result == SR_ERROR) {
+        state_ = ST_ERROR;
+        break;
+      } else if (result == SR_EOS) {
+        state_ = ST_COMPLETE;
+        break;
+      }
+
+      pos += subwritten;
+    }
+
+    len_ -= pos;
+    if (len_ > 0)
+      memmove(buffer_, buffer_ + pos, len_);
+
+    if (bytes_written == 0)
+      continue;
+
+    if (written)
+      *written = bytes_written;
+    return SR_SUCCESS;
+  }
+
+  if (error)
+    *error = error_;
+  return SR_ERROR;
+}
+
+void
+TransformAdapter::Close() {
+  if (!direction_read_ && (state_ == ST_PROCESSING)) {
+    state_ = ST_FLUSHING;
+    do {
+      Write(0, 0, nullptr, nullptr);
+    } while (state_ == ST_FLUSHING);
+  }
+  state_ = ST_COMPLETE;
+  StreamAdapterInterface::Close();
+}
+
+bool TransformAdapter::GetAvailable(size_t* size) const {
+  return false;
+}
+
+bool TransformAdapter::ReserveSize(size_t size) {
+  return true;
+}
+
+bool TransformAdapter::Rewind() {
+  return false;
+}
+
+} // namespace rtc
diff --git a/rtc_base/transformadapter.h b/rtc_base/transformadapter.h
new file mode 100644
index 0000000..5e8aa9a
--- /dev/null
+++ b/rtc_base/transformadapter.h
@@ -0,0 +1,84 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TRANSFORMADAPTER_H_
+#define RTC_BASE_TRANSFORMADAPTER_H_
+
+#include "rtc_base/stream.h"
+
+namespace rtc {
+///////////////////////////////////////////////////////////////////////////////
+
+class TransformInterface {
+public:
+  virtual ~TransformInterface() { }
+
+  // Transform should convert the in_len bytes of input into the out_len-sized
+  // output buffer.  If flush is true, there will be no more data following
+  // input.
+  // After the transformation, in_len contains the number of bytes consumed, and
+  // out_len contains the number of bytes ready in output.
+  // Note: Transform should not return SR_BLOCK, as there is no asynchronous
+  // notification available.
+  virtual StreamResult Transform(const void * input, size_t * in_len,
+                                 void * output, size_t * out_len,
+                                 bool flush) = 0;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+// TransformAdapter causes all data passed through to be transformed by the
+// supplied TransformInterface object, which may apply compression, encryption,
+// etc.
+
+class TransformAdapter : public StreamAdapterInterface {
+public:
+  // Note that the transformation is unidirectional, in the direction specified
+  // by the constructor.  Operations in the opposite direction result in SR_EOS.
+  TransformAdapter(StreamInterface * stream,
+                   TransformInterface * transform,
+                   bool direction_read);
+  ~TransformAdapter() override;
+
+  StreamResult Read(void* buffer,
+                    size_t buffer_len,
+                    size_t* read,
+                    int* error) override;
+  StreamResult Write(const void* data,
+                     size_t data_len,
+                     size_t* written,
+                     int* error) override;
+  void Close() override;
+
+  // Apriori, we can't tell what the transformation does to the stream length.
+  bool GetAvailable(size_t* size) const override;
+  bool ReserveSize(size_t size) override;
+
+  // Transformations might not be restartable
+  virtual bool Rewind();
+
+private:
+  enum State { ST_PROCESSING, ST_FLUSHING, ST_COMPLETE, ST_ERROR };
+  enum { BUFFER_SIZE = 1024 };
+
+  TransformInterface * transform_;
+  bool direction_read_;
+  State state_;
+  int error_;
+
+  char buffer_[BUFFER_SIZE];
+  size_t len_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace rtc
+
+#endif // RTC_BASE_TRANSFORMADAPTER_H_
diff --git a/rtc_base/type_traits.h b/rtc_base/type_traits.h
new file mode 100644
index 0000000..4f004cd
--- /dev/null
+++ b/rtc_base/type_traits.h
@@ -0,0 +1,140 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_TYPE_TRAITS_H_
+#define RTC_BASE_TYPE_TRAITS_H_
+
+#include <cstddef>
+#include <type_traits>
+
+namespace rtc {
+
+// Determines if the given class has zero-argument .data() and .size() methods
+// whose return values are convertible to T* and size_t, respectively.
+template <typename DS, typename T>
+class HasDataAndSize {
+ private:
+  template <
+      typename C,
+      typename std::enable_if<
+          std::is_convertible<decltype(std::declval<C>().data()), T*>::value &&
+          std::is_convertible<decltype(std::declval<C>().size()),
+                              std::size_t>::value>::type* = nullptr>
+  static int Test(int);
+
+  template <typename>
+  static char Test(...);
+
+ public:
+  static constexpr bool value = std::is_same<decltype(Test<DS>(0)), int>::value;
+};
+
+namespace test_has_data_and_size {
+
+template <typename DR, typename SR>
+struct Test1 {
+  DR data();
+  SR size();
+};
+static_assert(HasDataAndSize<Test1<int*, int>, int>::value, "");
+static_assert(HasDataAndSize<Test1<int*, int>, const int>::value, "");
+static_assert(HasDataAndSize<Test1<const int*, int>, const int>::value, "");
+static_assert(!HasDataAndSize<Test1<const int*, int>, int>::value,
+              "implicit cast of const int* to int*");
+static_assert(!HasDataAndSize<Test1<char*, size_t>, int>::value,
+              "implicit cast of char* to int*");
+
+struct Test2 {
+  int* data;
+  size_t size;
+};
+static_assert(!HasDataAndSize<Test2, int>::value,
+              ".data and .size aren't functions");
+
+struct Test3 {
+  int* data();
+};
+static_assert(!HasDataAndSize<Test3, int>::value, ".size() is missing");
+
+class Test4 {
+  int* data();
+  size_t size();
+};
+static_assert(!HasDataAndSize<Test4, int>::value,
+              ".data() and .size() are private");
+
+}  // namespace test_has_data_and_size
+
+namespace type_traits_impl {
+
+// Determines if the given type is an enum that converts implicitly to
+// an integral type.
+template <typename T>
+struct IsIntEnum {
+ private:
+  // This overload is used if the type is an enum, and unary plus
+  // compiles and turns it into an integral type.
+  template <typename X,
+            typename std::enable_if<
+                std::is_enum<X>::value &&
+                std::is_integral<decltype(+std::declval<X>())>::value>::type* =
+                nullptr>
+  static int Test(int);
+
+  // Otherwise, this overload is used.
+  template <typename>
+  static char Test(...);
+
+ public:
+  static constexpr bool value =
+      std::is_same<decltype(Test<typename std::remove_reference<T>::type>(0)),
+                   int>::value;
+};
+
+}  // namespace type_traits_impl
+
+// Determines if the given type is integral, or an enum that
+// converts implicitly to an integral type.
+template <typename T>
+struct IsIntlike {
+ private:
+  using X = typename std::remove_reference<T>::type;
+
+ public:
+  static constexpr bool value =
+      std::is_integral<X>::value || type_traits_impl::IsIntEnum<X>::value;
+};
+
+namespace test_enum_intlike {
+
+enum E1 { e1 };
+enum { e2 };
+enum class E3 { e3 };
+struct S {};
+
+static_assert(type_traits_impl::IsIntEnum<E1>::value, "");
+static_assert(type_traits_impl::IsIntEnum<decltype(e2)>::value, "");
+static_assert(!type_traits_impl::IsIntEnum<E3>::value, "");
+static_assert(!type_traits_impl::IsIntEnum<int>::value, "");
+static_assert(!type_traits_impl::IsIntEnum<float>::value, "");
+static_assert(!type_traits_impl::IsIntEnum<S>::value, "");
+
+static_assert(IsIntlike<E1>::value, "");
+static_assert(IsIntlike<decltype(e2)>::value, "");
+static_assert(!IsIntlike<E3>::value, "");
+static_assert(IsIntlike<int>::value, "");
+static_assert(!IsIntlike<float>::value, "");
+static_assert(!IsIntlike<S>::value, "");
+
+}  // test_enum_intlike
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_TYPE_TRAITS_H_
diff --git a/rtc_base/unittest_main.cc b/rtc_base/unittest_main.cc
new file mode 100644
index 0000000..aa8a11b
--- /dev/null
+++ b/rtc_base/unittest_main.cc
@@ -0,0 +1,134 @@
+/*
+ *  Copyright 2007 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+//
+// A reuseable entry point for gunit tests.
+
+#if defined(WEBRTC_WIN)
+#include <crtdbg.h>
+#endif
+
+#include "rtc_base/flags.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ssladapter.h"
+#include "rtc_base/sslstreamadapter.h"
+#include "system_wrappers/include/field_trial_default.h"
+#include "test/field_trial.h"
+#include "test/testsupport/fileutils.h"
+
+#if defined(WEBRTC_IOS)
+#include "test/ios/test_support.h"
+#endif
+
+DEFINE_bool(help, false, "prints this message");
+DEFINE_string(log, "", "logging options to use");
+DEFINE_string(
+    force_fieldtrials,
+    "",
+    "Field trials control experimental feature code which can be forced. "
+    "E.g. running with --force_fieldtrials=WebRTC-FooFeature/Enable/"
+    " will assign the group Enable to field trial WebRTC-FooFeature.");
+#if defined(WEBRTC_WIN)
+DEFINE_int(crt_break_alloc, -1, "memory allocation to break on");
+DEFINE_bool(default_error_handlers, false,
+            "leave the default exception/dbg handler functions in place");
+
+void TestInvalidParameterHandler(const wchar_t* expression,
+                                 const wchar_t* function,
+                                 const wchar_t* file,
+                                 unsigned int line,
+                                 uintptr_t pReserved) {
+  RTC_LOG(LS_ERROR) << "InvalidParameter Handler called.  Exiting.";
+  RTC_LOG(LS_ERROR) << expression << std::endl
+                    << function << std::endl
+                    << file << std::endl
+                    << line;
+  exit(1);
+}
+void TestPureCallHandler() {
+  RTC_LOG(LS_ERROR) << "Purecall Handler called.  Exiting.";
+  exit(1);
+}
+int TestCrtReportHandler(int report_type, char* msg, int* retval) {
+  RTC_LOG(LS_ERROR) << "CrtReport Handler called...";
+  RTC_LOG(LS_ERROR) << msg;
+  if (report_type == _CRT_ASSERT) {
+    exit(1);
+  } else {
+    *retval = 0;
+    return TRUE;
+  }
+}
+#endif  // WEBRTC_WIN
+
+int main(int argc, char* argv[]) {
+  testing::InitGoogleTest(&argc, argv);
+  rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, false);
+  if (FLAG_help) {
+    rtc::FlagList::Print(nullptr, false);
+    return 0;
+  }
+
+  webrtc::test::SetExecutablePath(argv[0]);
+  webrtc::test::ValidateFieldTrialsStringOrDie(FLAG_force_fieldtrials);
+  // InitFieldTrialsFromString stores the char*, so the char array must outlive
+  // the application.
+  webrtc::field_trial::InitFieldTrialsFromString(FLAG_force_fieldtrials);
+
+#if defined(WEBRTC_WIN)
+  if (!FLAG_default_error_handlers) {
+    // Make sure any errors don't throw dialogs hanging the test run.
+    _set_invalid_parameter_handler(TestInvalidParameterHandler);
+    _set_purecall_handler(TestPureCallHandler);
+    _CrtSetReportHook2(_CRT_RPTHOOK_INSTALL, TestCrtReportHandler);
+  }
+
+#if !defined(NDEBUG)  // Turn on memory leak checking on Windows.
+  _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF |_CRTDBG_LEAK_CHECK_DF);
+  if (FLAG_crt_break_alloc >= 0) {
+    _crtBreakAlloc = FLAG_crt_break_alloc;
+  }
+#endif
+#endif  // WEBRTC_WIN
+
+  // By default, log timestamps. Allow overrides by used of a --log flag.
+  rtc::LogMessage::LogTimestamps();
+  if (*FLAG_log != '\0') {
+    rtc::LogMessage::ConfigureLogging(FLAG_log);
+  } else if (rtc::LogMessage::GetLogToDebug() > rtc::LS_INFO) {
+    // Default to LS_INFO, even for release builds to provide better test
+    // logging.
+    rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+  }
+
+  // Initialize SSL which are used by several tests.
+  rtc::InitializeSSL();
+  rtc::SSLStreamAdapter::enable_time_callback_for_testing();
+
+#if defined(WEBRTC_IOS)
+  rtc::test::InitTestSuite(RUN_ALL_TESTS, argc, argv, false);
+  rtc::test::RunTestsFromIOSApp();
+#endif
+  const int res = RUN_ALL_TESTS();
+
+  rtc::CleanupSSL();
+
+  // clean up logging so we don't appear to leak memory.
+  rtc::LogMessage::ConfigureLogging("");
+
+#if defined(WEBRTC_WIN)
+  // Unhook crt function so that we don't ever log after statics have been
+  // uninitialized.
+  if (!FLAG_default_error_handlers)
+    _CrtSetReportHook2(_CRT_RPTHOOK_REMOVE, TestCrtReportHandler);
+#endif
+
+  return res;
+}
diff --git a/rtc_base/unixfilesystem.cc b/rtc_base/unixfilesystem.cc
new file mode 100644
index 0000000..a48aca1
--- /dev/null
+++ b/rtc_base/unixfilesystem.cc
@@ -0,0 +1,114 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/unixfilesystem.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include <CoreServices/CoreServices.h>
+#include <IOKit/IOCFBundle.h>
+#include <sys/statvfs.h>
+#include "rtc_base/macutils.h"
+#endif  // WEBRTC_MAC && !defined(WEBRTC_IOS)
+
+#if defined(WEBRTC_POSIX) && !defined(WEBRTC_MAC) || defined(WEBRTC_IOS)
+#include <sys/types.h>
+#if defined(WEBRTC_ANDROID)
+#include <sys/statfs.h>
+#elif !defined(__native_client__)
+#include <sys/statvfs.h>
+#endif  //  !defined(__native_client__)
+#include <limits.h>
+#include <pwd.h>
+#include <stdio.h>
+#endif  // WEBRTC_POSIX && !WEBRTC_MAC || WEBRTC_IOS
+
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+#include <ctype.h>
+#include <algorithm>
+#endif
+
+#if defined(__native_client__) && !defined(__GLIBC__)
+#include <sys/syslimits.h>
+#endif
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fileutils.h"
+#include "rtc_base/pathutils.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+UnixFilesystem::UnixFilesystem() {}
+
+UnixFilesystem::~UnixFilesystem() {}
+
+bool UnixFilesystem::DeleteFile(const Pathname &filename) {
+  RTC_LOG(LS_INFO) << "Deleting file:" << filename.pathname();
+
+  if (!IsFile(filename)) {
+    RTC_DCHECK(IsFile(filename));
+    return false;
+  }
+  return ::unlink(filename.pathname().c_str()) == 0;
+}
+
+bool UnixFilesystem::MoveFile(const Pathname &old_path,
+                              const Pathname &new_path) {
+  if (!IsFile(old_path)) {
+    RTC_DCHECK(IsFile(old_path));
+    return false;
+  }
+  RTC_LOG(LS_VERBOSE) << "Moving " << old_path.pathname() << " to "
+                      << new_path.pathname();
+  if (rename(old_path.pathname().c_str(), new_path.pathname().c_str()) != 0) {
+    return false;
+  }
+  return true;
+}
+
+bool UnixFilesystem::IsFolder(const Pathname &path) {
+  struct stat st;
+  if (stat(path.pathname().c_str(), &st) < 0)
+    return false;
+  return S_ISDIR(st.st_mode);
+}
+
+bool UnixFilesystem::IsFile(const Pathname& pathname) {
+  struct stat st;
+  int res = ::stat(pathname.pathname().c_str(), &st);
+  // Treat symlinks, named pipes, etc. all as files.
+  return res == 0 && !S_ISDIR(st.st_mode);
+}
+
+bool UnixFilesystem::GetFileSize(const Pathname& pathname, size_t *size) {
+  struct stat st;
+  if (::stat(pathname.pathname().c_str(), &st) != 0)
+    return false;
+  *size = st.st_size;
+  return true;
+}
+
+}  // namespace rtc
+
+#if defined(__native_client__)
+extern "C" int __attribute__((weak))
+link(const char* oldpath, const char* newpath) {
+  errno = EACCES;
+  return -1;
+}
+#endif
diff --git a/rtc_base/unixfilesystem.h b/rtc_base/unixfilesystem.h
new file mode 100644
index 0000000..711d7b3
--- /dev/null
+++ b/rtc_base/unixfilesystem.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_UNIXFILESYSTEM_H_
+#define RTC_BASE_UNIXFILESYSTEM_H_
+
+#include <sys/types.h>
+
+#include "rtc_base/fileutils.h"
+
+namespace rtc {
+
+class UnixFilesystem : public FilesystemInterface {
+ public:
+  UnixFilesystem();
+  ~UnixFilesystem() override;
+
+  // This will attempt to delete the file located at filename.
+  // It will fail with VERIY if you pass it a non-existant file, or a directory.
+  bool DeleteFile(const Pathname& filename) override;
+
+  // This moves a file from old_path to new_path, where "file" can be a plain
+  // file or directory, which will be moved recursively.
+  // Returns true if function succeeds.
+  bool MoveFile(const Pathname& old_path, const Pathname& new_path) override;
+
+  // Returns true if a pathname is a directory
+  bool IsFolder(const Pathname& pathname) override;
+
+  // Returns true of pathname represents an existing file
+  bool IsFile(const Pathname& pathname) override;
+
+  bool GetFileSize(const Pathname& path, size_t* size) override;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_UNIXFILESYSTEM_H_
diff --git a/rtc_base/virtualsocket_unittest.cc b/rtc_base/virtualsocket_unittest.cc
new file mode 100644
index 0000000..a6a4ee1
--- /dev/null
+++ b/rtc_base/virtualsocket_unittest.cc
@@ -0,0 +1,1145 @@
+/*
+ *  Copyright 2006 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <time.h>
+#if defined(WEBRTC_POSIX)
+#include <netinet/in.h>
+#endif
+
+#include <memory>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/ptr_util.h"
+#include "rtc_base/testclient.h"
+#include "rtc_base/testutils.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+#include "rtc_base/virtualsocketserver.h"
+
+using namespace rtc;
+
+using webrtc::testing::SSE_CLOSE;
+using webrtc::testing::SSE_ERROR;
+using webrtc::testing::SSE_OPEN;
+using webrtc::testing::SSE_READ;
+using webrtc::testing::SSE_WRITE;
+using webrtc::testing::StreamSink;
+
+// Sends at a constant rate but with random packet sizes.
+struct Sender : public MessageHandler {
+  Sender(Thread* th, AsyncSocket* s, uint32_t rt)
+      : thread(th),
+        socket(MakeUnique<AsyncUDPSocket>(s)),
+        done(false),
+        rate(rt),
+        count(0) {
+    last_send = rtc::TimeMillis();
+    thread->PostDelayed(RTC_FROM_HERE, NextDelay(), this, 1);
+  }
+
+  uint32_t NextDelay() {
+    uint32_t size = (rand() % 4096) + 1;
+    return 1000 * size / rate;
+  }
+
+  void OnMessage(Message* pmsg) override {
+    ASSERT_EQ(1u, pmsg->message_id);
+
+    if (done)
+      return;
+
+    int64_t cur_time = rtc::TimeMillis();
+    int64_t delay = cur_time - last_send;
+    uint32_t size = static_cast<uint32_t>(rate * delay / 1000);
+    size = std::min<uint32_t>(size, 4096);
+    size = std::max<uint32_t>(size, sizeof(uint32_t));
+
+    count += size;
+    memcpy(dummy, &cur_time, sizeof(cur_time));
+    socket->Send(dummy, size, options);
+
+    last_send = cur_time;
+    thread->PostDelayed(RTC_FROM_HERE, NextDelay(), this, 1);
+  }
+
+  Thread* thread;
+  std::unique_ptr<AsyncUDPSocket> socket;
+  rtc::PacketOptions options;
+  bool done;
+  uint32_t rate;  // bytes per second
+  uint32_t count;
+  int64_t last_send;
+  char dummy[4096];
+};
+
+struct Receiver : public MessageHandler, public sigslot::has_slots<> {
+  Receiver(Thread* th, AsyncSocket* s, uint32_t bw)
+      : thread(th),
+        socket(MakeUnique<AsyncUDPSocket>(s)),
+        bandwidth(bw),
+        done(false),
+        count(0),
+        sec_count(0),
+        sum(0),
+        sum_sq(0),
+        samples(0) {
+    socket->SignalReadPacket.connect(this, &Receiver::OnReadPacket);
+    thread->PostDelayed(RTC_FROM_HERE, 1000, this, 1);
+  }
+
+  ~Receiver() override { thread->Clear(this); }
+
+  void OnReadPacket(AsyncPacketSocket* s, const char* data, size_t size,
+                    const SocketAddress& remote_addr,
+                    const PacketTime& packet_time) {
+    ASSERT_EQ(socket.get(), s);
+    ASSERT_GE(size, 4U);
+
+    count += size;
+    sec_count += size;
+
+    uint32_t send_time = *reinterpret_cast<const uint32_t*>(data);
+    uint32_t recv_time = rtc::TimeMillis();
+    uint32_t delay = recv_time - send_time;
+    sum += delay;
+    sum_sq += delay * delay;
+    samples += 1;
+  }
+
+  void OnMessage(Message* pmsg) override {
+    ASSERT_EQ(1u, pmsg->message_id);
+
+    if (done)
+      return;
+
+    // It is always possible for us to receive more than expected because
+    // packets can be further delayed in delivery.
+    if (bandwidth > 0)
+      ASSERT_TRUE(sec_count <= 5 * bandwidth / 4);
+    sec_count = 0;
+    thread->PostDelayed(RTC_FROM_HERE, 1000, this, 1);
+  }
+
+  Thread* thread;
+  std::unique_ptr<AsyncUDPSocket> socket;
+  uint32_t bandwidth;
+  bool done;
+  size_t count;
+  size_t sec_count;
+  double sum;
+  double sum_sq;
+  uint32_t samples;
+};
+
+// Note: This test uses a fake clock in addition to a virtual network.
+class VirtualSocketServerTest : public testing::Test {
+ public:
+  VirtualSocketServerTest()
+      : ss_(&fake_clock_),
+        thread_(&ss_),
+        kIPv4AnyAddress(IPAddress(INADDR_ANY), 0),
+        kIPv6AnyAddress(IPAddress(in6addr_any), 0) {}
+
+  void CheckPortIncrementalization(const SocketAddress& post,
+                                   const SocketAddress& pre) {
+    EXPECT_EQ(post.port(), pre.port() + 1);
+    IPAddress post_ip = post.ipaddr();
+    IPAddress pre_ip = pre.ipaddr();
+    EXPECT_EQ(pre_ip.family(), post_ip.family());
+    if (post_ip.family() == AF_INET) {
+      in_addr pre_ipv4 = pre_ip.ipv4_address();
+      in_addr post_ipv4 = post_ip.ipv4_address();
+      EXPECT_EQ(post_ipv4.s_addr, pre_ipv4.s_addr);
+    } else if (post_ip.family() == AF_INET6) {
+      in6_addr post_ip6 = post_ip.ipv6_address();
+      in6_addr pre_ip6 = pre_ip.ipv6_address();
+      uint32_t* post_as_ints = reinterpret_cast<uint32_t*>(&post_ip6.s6_addr);
+      uint32_t* pre_as_ints = reinterpret_cast<uint32_t*>(&pre_ip6.s6_addr);
+      EXPECT_EQ(post_as_ints[3], pre_as_ints[3]);
+    }
+  }
+
+  // Test a client can bind to the any address, and all sent packets will have
+  // the default route as the source address. Also, it can receive packets sent
+  // to the default route.
+  void TestDefaultRoute(const IPAddress& default_route) {
+    ss_.SetDefaultRoute(default_route);
+
+    // Create client1 bound to the any address.
+    AsyncSocket* socket =
+        ss_.CreateAsyncSocket(default_route.family(), SOCK_DGRAM);
+    socket->Bind(EmptySocketAddressWithFamily(default_route.family()));
+    SocketAddress client1_any_addr = socket->GetLocalAddress();
+    EXPECT_TRUE(client1_any_addr.IsAnyIP());
+    auto client1 = MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket),
+                                          &fake_clock_);
+
+    // Create client2 bound to the default route.
+    AsyncSocket* socket2 =
+        ss_.CreateAsyncSocket(default_route.family(), SOCK_DGRAM);
+    socket2->Bind(SocketAddress(default_route, 0));
+    SocketAddress client2_addr = socket2->GetLocalAddress();
+    EXPECT_FALSE(client2_addr.IsAnyIP());
+    auto client2 = MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket2),
+                                          &fake_clock_);
+
+    // Client1 sends to client2, client2 should see the default route as
+    // client1's address.
+    SocketAddress client1_addr;
+    EXPECT_EQ(6, client1->SendTo("bizbaz", 6, client2_addr));
+    EXPECT_TRUE(client2->CheckNextPacket("bizbaz", 6, &client1_addr));
+    EXPECT_EQ(client1_addr,
+              SocketAddress(default_route, client1_any_addr.port()));
+
+    // Client2 can send back to client1's default route address.
+    EXPECT_EQ(3, client2->SendTo("foo", 3, client1_addr));
+    EXPECT_TRUE(client1->CheckNextPacket("foo", 3, &client2_addr));
+  }
+
+  void BasicTest(const SocketAddress& initial_addr) {
+    AsyncSocket* socket =
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_DGRAM);
+    socket->Bind(initial_addr);
+    SocketAddress server_addr = socket->GetLocalAddress();
+    // Make sure VSS didn't switch families on us.
+    EXPECT_EQ(server_addr.family(), initial_addr.family());
+
+    auto client1 = MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket),
+                                          &fake_clock_);
+    AsyncSocket* socket2 =
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_DGRAM);
+    auto client2 = MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket2),
+                                          &fake_clock_);
+
+    SocketAddress client2_addr;
+    EXPECT_EQ(3, client2->SendTo("foo", 3, server_addr));
+    EXPECT_TRUE(client1->CheckNextPacket("foo", 3, &client2_addr));
+
+    SocketAddress client1_addr;
+    EXPECT_EQ(6, client1->SendTo("bizbaz", 6, client2_addr));
+    EXPECT_TRUE(client2->CheckNextPacket("bizbaz", 6, &client1_addr));
+    EXPECT_EQ(client1_addr, server_addr);
+
+    SocketAddress empty = EmptySocketAddressWithFamily(initial_addr.family());
+    for (int i = 0; i < 10; i++) {
+      client2 = MakeUnique<TestClient>(
+          WrapUnique(AsyncUDPSocket::Create(&ss_, empty)), &fake_clock_);
+
+      SocketAddress next_client2_addr;
+      EXPECT_EQ(3, client2->SendTo("foo", 3, server_addr));
+      EXPECT_TRUE(client1->CheckNextPacket("foo", 3, &next_client2_addr));
+      CheckPortIncrementalization(next_client2_addr, client2_addr);
+      // EXPECT_EQ(next_client2_addr.port(), client2_addr.port() + 1);
+
+      SocketAddress server_addr2;
+      EXPECT_EQ(6, client1->SendTo("bizbaz", 6, next_client2_addr));
+      EXPECT_TRUE(client2->CheckNextPacket("bizbaz", 6, &server_addr2));
+      EXPECT_EQ(server_addr2, server_addr);
+
+      client2_addr = next_client2_addr;
+    }
+  }
+
+  // initial_addr should be made from either INADDR_ANY or in6addr_any.
+  void ConnectTest(const SocketAddress& initial_addr) {
+    StreamSink sink;
+    SocketAddress accept_addr;
+    const SocketAddress kEmptyAddr =
+        EmptySocketAddressWithFamily(initial_addr.family());
+
+    // Create client
+    std::unique_ptr<AsyncSocket> client =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(client.get());
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_TRUE(client->GetLocalAddress().IsNil());
+
+    // Create server
+    std::unique_ptr<AsyncSocket> server =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(server.get());
+    EXPECT_NE(0, server->Listen(5));  // Bind required
+    EXPECT_EQ(0, server->Bind(initial_addr));
+    EXPECT_EQ(server->GetLocalAddress().family(), initial_addr.family());
+    EXPECT_EQ(0, server->Listen(5));
+    EXPECT_EQ(server->GetState(), AsyncSocket::CS_CONNECTING);
+
+    // No pending server connections
+    EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+    EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+    EXPECT_EQ(AF_UNSPEC, accept_addr.family());
+
+    // Attempt connect to listening socket
+    EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+    EXPECT_NE(client->GetLocalAddress(), kEmptyAddr);  // Implicit Bind
+    EXPECT_NE(AF_UNSPEC, client->GetLocalAddress().family());  // Implicit Bind
+    EXPECT_NE(client->GetLocalAddress(), server->GetLocalAddress());
+
+    // Client is connecting
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CONNECTING);
+    EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+    EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Client still connecting
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CONNECTING);
+    EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+    EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+
+    // Server has pending connection
+    EXPECT_TRUE(sink.Check(server.get(), SSE_READ));
+    std::unique_ptr<Socket> accepted = WrapUnique(server->Accept(&accept_addr));
+    EXPECT_TRUE(nullptr != accepted);
+    EXPECT_NE(accept_addr, kEmptyAddr);
+    EXPECT_EQ(accepted->GetRemoteAddress(), accept_addr);
+
+    EXPECT_EQ(accepted->GetState(), AsyncSocket::CS_CONNECTED);
+    EXPECT_EQ(accepted->GetLocalAddress(), server->GetLocalAddress());
+    EXPECT_EQ(accepted->GetRemoteAddress(), client->GetLocalAddress());
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Client has connected
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CONNECTED);
+    EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+    EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+    EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+    EXPECT_EQ(client->GetRemoteAddress(), accepted->GetLocalAddress());
+  }
+
+  void ConnectToNonListenerTest(const SocketAddress& initial_addr) {
+    StreamSink sink;
+    SocketAddress accept_addr;
+    const SocketAddress nil_addr;
+    const SocketAddress empty_addr =
+        EmptySocketAddressWithFamily(initial_addr.family());
+
+    // Create client
+    std::unique_ptr<AsyncSocket> client =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(client.get());
+
+    // Create server
+    std::unique_ptr<AsyncSocket> server =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(server.get());
+    EXPECT_EQ(0, server->Bind(initial_addr));
+    EXPECT_EQ(server->GetLocalAddress().family(), initial_addr.family());
+    // Attempt connect to non-listening socket
+    EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // No pending server connections
+    EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+    EXPECT_TRUE(nullptr == server->Accept(&accept_addr));
+    EXPECT_EQ(accept_addr, nil_addr);
+
+    // Connection failed
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+    EXPECT_TRUE(sink.Check(client.get(), SSE_ERROR));
+    EXPECT_EQ(client->GetRemoteAddress(), nil_addr);
+  }
+
+  void CloseDuringConnectTest(const SocketAddress& initial_addr) {
+    StreamSink sink;
+    SocketAddress accept_addr;
+    const SocketAddress empty_addr =
+        EmptySocketAddressWithFamily(initial_addr.family());
+
+    // Create client and server
+    std::unique_ptr<AsyncSocket> client(
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(client.get());
+    std::unique_ptr<AsyncSocket> server(
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(server.get());
+
+    // Initiate connect
+    EXPECT_EQ(0, server->Bind(initial_addr));
+    EXPECT_EQ(server->GetLocalAddress().family(), initial_addr.family());
+
+    EXPECT_EQ(0, server->Listen(5));
+    EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+    // Server close before socket enters accept queue
+    EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+    server->Close();
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Result: connection failed
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_TRUE(sink.Check(client.get(), SSE_ERROR));
+
+    server.reset(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(server.get());
+
+    // Initiate connect
+    EXPECT_EQ(0, server->Bind(initial_addr));
+    EXPECT_EQ(server->GetLocalAddress().family(), initial_addr.family());
+
+    EXPECT_EQ(0, server->Listen(5));
+    EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Server close while socket is in accept queue
+    EXPECT_TRUE(sink.Check(server.get(), SSE_READ));
+    server->Close();
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Result: connection failed
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_TRUE(sink.Check(client.get(), SSE_ERROR));
+
+    // New server
+    server.reset(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(server.get());
+
+    // Initiate connect
+    EXPECT_EQ(0, server->Bind(initial_addr));
+    EXPECT_EQ(server->GetLocalAddress().family(), initial_addr.family());
+
+    EXPECT_EQ(0, server->Listen(5));
+    EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Server accepts connection
+    EXPECT_TRUE(sink.Check(server.get(), SSE_READ));
+    std::unique_ptr<AsyncSocket> accepted(server->Accept(&accept_addr));
+    ASSERT_TRUE(nullptr != accepted.get());
+    sink.Monitor(accepted.get());
+
+    // Client closes before connection complets
+    EXPECT_EQ(accepted->GetState(), AsyncSocket::CS_CONNECTED);
+
+    // Connected message has not been processed yet.
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CONNECTING);
+    client->Close();
+
+    ss_.ProcessMessagesUntilIdle();
+
+    // Result: accepted socket closes
+    EXPECT_EQ(accepted->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_TRUE(sink.Check(accepted.get(), SSE_CLOSE));
+    EXPECT_FALSE(sink.Check(client.get(), SSE_CLOSE));
+  }
+
+  void CloseTest(const SocketAddress& initial_addr) {
+    StreamSink sink;
+    const SocketAddress kEmptyAddr;
+
+    // Create clients
+    std::unique_ptr<AsyncSocket> a =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(a.get());
+    a->Bind(initial_addr);
+    EXPECT_EQ(a->GetLocalAddress().family(), initial_addr.family());
+
+    std::unique_ptr<AsyncSocket> b =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(b.get());
+    b->Bind(initial_addr);
+    EXPECT_EQ(b->GetLocalAddress().family(), initial_addr.family());
+
+    EXPECT_EQ(0, a->Connect(b->GetLocalAddress()));
+    EXPECT_EQ(0, b->Connect(a->GetLocalAddress()));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    EXPECT_TRUE(sink.Check(a.get(), SSE_OPEN));
+    EXPECT_EQ(a->GetState(), AsyncSocket::CS_CONNECTED);
+    EXPECT_EQ(a->GetRemoteAddress(), b->GetLocalAddress());
+
+    EXPECT_TRUE(sink.Check(b.get(), SSE_OPEN));
+    EXPECT_EQ(b->GetState(), AsyncSocket::CS_CONNECTED);
+    EXPECT_EQ(b->GetRemoteAddress(), a->GetLocalAddress());
+
+    EXPECT_EQ(1, a->Send("a", 1));
+    b->Close();
+    EXPECT_EQ(1, a->Send("b", 1));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    char buffer[10];
+    EXPECT_FALSE(sink.Check(b.get(), SSE_READ));
+    EXPECT_EQ(-1, b->Recv(buffer, 10, nullptr));
+
+    EXPECT_TRUE(sink.Check(a.get(), SSE_CLOSE));
+    EXPECT_EQ(a->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_EQ(a->GetRemoteAddress(), kEmptyAddr);
+
+    // No signal for Closer
+    EXPECT_FALSE(sink.Check(b.get(), SSE_CLOSE));
+    EXPECT_EQ(b->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_EQ(b->GetRemoteAddress(), kEmptyAddr);
+  }
+
+  void TcpSendTest(const SocketAddress& initial_addr) {
+    StreamSink sink;
+    const SocketAddress kEmptyAddr;
+
+    // Connect two sockets
+    std::unique_ptr<AsyncSocket> a =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(a.get());
+    a->Bind(initial_addr);
+    EXPECT_EQ(a->GetLocalAddress().family(), initial_addr.family());
+
+    std::unique_ptr<AsyncSocket> b =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    sink.Monitor(b.get());
+    b->Bind(initial_addr);
+    EXPECT_EQ(b->GetLocalAddress().family(), initial_addr.family());
+
+    EXPECT_EQ(0, a->Connect(b->GetLocalAddress()));
+    EXPECT_EQ(0, b->Connect(a->GetLocalAddress()));
+
+    ss_.ProcessMessagesUntilIdle();
+
+    const size_t kBufferSize = 2000;
+    ss_.set_send_buffer_capacity(kBufferSize);
+    ss_.set_recv_buffer_capacity(kBufferSize);
+
+    const size_t kDataSize = 5000;
+    char send_buffer[kDataSize], recv_buffer[kDataSize];
+    for (size_t i = 0; i < kDataSize; ++i)
+      send_buffer[i] = static_cast<char>(i % 256);
+    memset(recv_buffer, 0, sizeof(recv_buffer));
+    size_t send_pos = 0, recv_pos = 0;
+
+    // Can't send more than send buffer in one write
+    int result = a->Send(send_buffer + send_pos, kDataSize - send_pos);
+    EXPECT_EQ(static_cast<int>(kBufferSize), result);
+    send_pos += result;
+
+    ss_.ProcessMessagesUntilIdle();
+    EXPECT_FALSE(sink.Check(a.get(), SSE_WRITE));
+    EXPECT_TRUE(sink.Check(b.get(), SSE_READ));
+
+    // Receive buffer is already filled, fill send buffer again
+    result = a->Send(send_buffer + send_pos, kDataSize - send_pos);
+    EXPECT_EQ(static_cast<int>(kBufferSize), result);
+    send_pos += result;
+
+    ss_.ProcessMessagesUntilIdle();
+    EXPECT_FALSE(sink.Check(a.get(), SSE_WRITE));
+    EXPECT_FALSE(sink.Check(b.get(), SSE_READ));
+
+    // No more room in send or receive buffer
+    result = a->Send(send_buffer + send_pos, kDataSize - send_pos);
+    EXPECT_EQ(-1, result);
+    EXPECT_TRUE(a->IsBlocking());
+
+    // Read a subset of the data
+    result = b->Recv(recv_buffer + recv_pos, 500, nullptr);
+    EXPECT_EQ(500, result);
+    recv_pos += result;
+
+    ss_.ProcessMessagesUntilIdle();
+    EXPECT_TRUE(sink.Check(a.get(), SSE_WRITE));
+    EXPECT_TRUE(sink.Check(b.get(), SSE_READ));
+
+    // Room for more on the sending side
+    result = a->Send(send_buffer + send_pos, kDataSize - send_pos);
+    EXPECT_EQ(500, result);
+    send_pos += result;
+
+    // Empty the recv buffer
+    while (true) {
+      result = b->Recv(recv_buffer + recv_pos, kDataSize - recv_pos, nullptr);
+      if (result < 0) {
+        EXPECT_EQ(-1, result);
+        EXPECT_TRUE(b->IsBlocking());
+        break;
+      }
+      recv_pos += result;
+    }
+
+    ss_.ProcessMessagesUntilIdle();
+    EXPECT_TRUE(sink.Check(b.get(), SSE_READ));
+
+    // Continue to empty the recv buffer
+    while (true) {
+      result = b->Recv(recv_buffer + recv_pos, kDataSize - recv_pos, nullptr);
+      if (result < 0) {
+        EXPECT_EQ(-1, result);
+        EXPECT_TRUE(b->IsBlocking());
+        break;
+      }
+      recv_pos += result;
+    }
+
+    // Send last of the data
+    result = a->Send(send_buffer + send_pos, kDataSize - send_pos);
+    EXPECT_EQ(500, result);
+    send_pos += result;
+
+    ss_.ProcessMessagesUntilIdle();
+    EXPECT_TRUE(sink.Check(b.get(), SSE_READ));
+
+    // Receive the last of the data
+    while (true) {
+      result = b->Recv(recv_buffer + recv_pos, kDataSize - recv_pos, nullptr);
+      if (result < 0) {
+        EXPECT_EQ(-1, result);
+        EXPECT_TRUE(b->IsBlocking());
+        break;
+      }
+      recv_pos += result;
+    }
+
+    ss_.ProcessMessagesUntilIdle();
+    EXPECT_FALSE(sink.Check(b.get(), SSE_READ));
+
+    // The received data matches the sent data
+    EXPECT_EQ(kDataSize, send_pos);
+    EXPECT_EQ(kDataSize, recv_pos);
+    EXPECT_EQ(0, memcmp(recv_buffer, send_buffer, kDataSize));
+  }
+
+  void TcpSendsPacketsInOrderTest(const SocketAddress& initial_addr) {
+    const SocketAddress kEmptyAddr;
+
+    // Connect two sockets
+    std::unique_ptr<AsyncSocket> a =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    std::unique_ptr<AsyncSocket> b =
+        WrapUnique(ss_.CreateAsyncSocket(initial_addr.family(), SOCK_STREAM));
+    a->Bind(initial_addr);
+    EXPECT_EQ(a->GetLocalAddress().family(), initial_addr.family());
+
+    b->Bind(initial_addr);
+    EXPECT_EQ(b->GetLocalAddress().family(), initial_addr.family());
+
+    EXPECT_EQ(0, a->Connect(b->GetLocalAddress()));
+    EXPECT_EQ(0, b->Connect(a->GetLocalAddress()));
+    ss_.ProcessMessagesUntilIdle();
+
+    // First, deliver all packets in 0 ms.
+    char buffer[2] = { 0, 0 };
+    const char cNumPackets = 10;
+    for (char i = 0; i < cNumPackets; ++i) {
+      buffer[0] = '0' + i;
+      EXPECT_EQ(1, a->Send(buffer, 1));
+    }
+
+    ss_.ProcessMessagesUntilIdle();
+
+    for (char i = 0; i < cNumPackets; ++i) {
+      EXPECT_EQ(1, b->Recv(buffer, sizeof(buffer), nullptr));
+      EXPECT_EQ(static_cast<char>('0' + i), buffer[0]);
+    }
+
+    // Next, deliver packets at random intervals
+    const uint32_t mean = 50;
+    const uint32_t stddev = 50;
+
+    ss_.set_delay_mean(mean);
+    ss_.set_delay_stddev(stddev);
+    ss_.UpdateDelayDistribution();
+
+    for (char i = 0; i < cNumPackets; ++i) {
+      buffer[0] = 'A' + i;
+      EXPECT_EQ(1, a->Send(buffer, 1));
+    }
+
+    ss_.ProcessMessagesUntilIdle();
+
+    for (char i = 0; i < cNumPackets; ++i) {
+      EXPECT_EQ(1, b->Recv(buffer, sizeof(buffer), nullptr));
+      EXPECT_EQ(static_cast<char>('A' + i), buffer[0]);
+    }
+  }
+
+  // It is important that initial_addr's port has to be 0 such that the
+  // incremental port behavior could ensure the 2 Binds result in different
+  // address.
+  void BandwidthTest(const SocketAddress& initial_addr) {
+    AsyncSocket* send_socket =
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_DGRAM);
+    AsyncSocket* recv_socket =
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_DGRAM);
+    ASSERT_EQ(0, send_socket->Bind(initial_addr));
+    ASSERT_EQ(0, recv_socket->Bind(initial_addr));
+    EXPECT_EQ(send_socket->GetLocalAddress().family(), initial_addr.family());
+    EXPECT_EQ(recv_socket->GetLocalAddress().family(), initial_addr.family());
+    ASSERT_EQ(0, send_socket->Connect(recv_socket->GetLocalAddress()));
+
+    uint32_t bandwidth = 64 * 1024;
+    ss_.set_bandwidth(bandwidth);
+
+    Thread* pthMain = Thread::Current();
+    Sender sender(pthMain, send_socket, 80 * 1024);
+    Receiver receiver(pthMain, recv_socket, bandwidth);
+
+    // Allow the sender to run for 5 (simulated) seconds, then be stopped for 5
+    // seconds.
+    SIMULATED_WAIT(false, 5000, fake_clock_);
+    sender.done = true;
+    SIMULATED_WAIT(false, 5000, fake_clock_);
+
+    // Ensure the observed bandwidth fell within a reasonable margin of error.
+    EXPECT_TRUE(receiver.count >= 5 * 3 * bandwidth / 4);
+    EXPECT_TRUE(receiver.count <= 6 * bandwidth);  // queue could drain for 1s
+
+    ss_.set_bandwidth(0);
+  }
+
+  // It is important that initial_addr's port has to be 0 such that the
+  // incremental port behavior could ensure the 2 Binds result in different
+  // address.
+  void DelayTest(const SocketAddress& initial_addr) {
+    time_t seed = ::time(nullptr);
+    RTC_LOG(LS_VERBOSE) << "seed = " << seed;
+    srand(static_cast<unsigned int>(seed));
+
+    const uint32_t mean = 2000;
+    const uint32_t stddev = 500;
+
+    ss_.set_delay_mean(mean);
+    ss_.set_delay_stddev(stddev);
+    ss_.UpdateDelayDistribution();
+
+    AsyncSocket* send_socket =
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_DGRAM);
+    AsyncSocket* recv_socket =
+        ss_.CreateAsyncSocket(initial_addr.family(), SOCK_DGRAM);
+    ASSERT_EQ(0, send_socket->Bind(initial_addr));
+    ASSERT_EQ(0, recv_socket->Bind(initial_addr));
+    EXPECT_EQ(send_socket->GetLocalAddress().family(), initial_addr.family());
+    EXPECT_EQ(recv_socket->GetLocalAddress().family(), initial_addr.family());
+    ASSERT_EQ(0, send_socket->Connect(recv_socket->GetLocalAddress()));
+
+    Thread* pthMain = Thread::Current();
+    // Avg packet size is 2K, so at 200KB/s for 10s, we should see about
+    // 1000 packets, which is necessary to get a good distribution.
+    Sender sender(pthMain, send_socket, 100 * 2 * 1024);
+    Receiver receiver(pthMain, recv_socket, 0);
+
+    // Simulate 10 seconds of packets being sent, then check the observed delay
+    // distribution.
+    SIMULATED_WAIT(false, 10000, fake_clock_);
+    sender.done = receiver.done = true;
+    ss_.ProcessMessagesUntilIdle();
+
+    const double sample_mean = receiver.sum / receiver.samples;
+    double num =
+        receiver.samples * receiver.sum_sq - receiver.sum * receiver.sum;
+    double den = receiver.samples * (receiver.samples - 1);
+    const double sample_stddev = sqrt(num / den);
+    RTC_LOG(LS_VERBOSE) << "mean=" << sample_mean
+                        << " stddev=" << sample_stddev;
+
+    EXPECT_LE(500u, receiver.samples);
+    // We initially used a 0.1 fudge factor, but on the build machine, we
+    // have seen the value differ by as much as 0.13.
+    EXPECT_NEAR(mean, sample_mean, 0.15 * mean);
+    EXPECT_NEAR(stddev, sample_stddev, 0.15 * stddev);
+
+    ss_.set_delay_mean(0);
+    ss_.set_delay_stddev(0);
+    ss_.UpdateDelayDistribution();
+  }
+
+  // Test cross-family communication between a client bound to client_addr and a
+  // server bound to server_addr. shouldSucceed indicates if communication is
+  // expected to work or not.
+  void CrossFamilyConnectionTest(const SocketAddress& client_addr,
+                                 const SocketAddress& server_addr,
+                                 bool shouldSucceed) {
+    StreamSink sink;
+    SocketAddress accept_address;
+    const SocketAddress kEmptyAddr;
+
+    // Client gets a IPv4 address
+    std::unique_ptr<AsyncSocket> client =
+        WrapUnique(ss_.CreateAsyncSocket(client_addr.family(), SOCK_STREAM));
+    sink.Monitor(client.get());
+    EXPECT_EQ(client->GetState(), AsyncSocket::CS_CLOSED);
+    EXPECT_EQ(client->GetLocalAddress(), kEmptyAddr);
+    client->Bind(client_addr);
+
+    // Server gets a non-mapped non-any IPv6 address.
+    // IPv4 sockets should not be able to connect to this.
+    std::unique_ptr<AsyncSocket> server =
+        WrapUnique(ss_.CreateAsyncSocket(server_addr.family(), SOCK_STREAM));
+    sink.Monitor(server.get());
+    server->Bind(server_addr);
+    server->Listen(5);
+
+    if (shouldSucceed) {
+      EXPECT_EQ(0, client->Connect(server->GetLocalAddress()));
+      ss_.ProcessMessagesUntilIdle();
+      EXPECT_TRUE(sink.Check(server.get(), SSE_READ));
+      std::unique_ptr<Socket> accepted =
+          WrapUnique(server->Accept(&accept_address));
+      EXPECT_TRUE(nullptr != accepted);
+      EXPECT_NE(kEmptyAddr, accept_address);
+      ss_.ProcessMessagesUntilIdle();
+      EXPECT_TRUE(sink.Check(client.get(), SSE_OPEN));
+      EXPECT_EQ(client->GetRemoteAddress(), server->GetLocalAddress());
+    } else {
+      // Check that the connection failed.
+      EXPECT_EQ(-1, client->Connect(server->GetLocalAddress()));
+      ss_.ProcessMessagesUntilIdle();
+
+      EXPECT_FALSE(sink.Check(server.get(), SSE_READ));
+      EXPECT_TRUE(nullptr == server->Accept(&accept_address));
+      EXPECT_EQ(accept_address, kEmptyAddr);
+      EXPECT_EQ(client->GetState(), AsyncSocket::CS_CLOSED);
+      EXPECT_FALSE(sink.Check(client.get(), SSE_OPEN));
+      EXPECT_EQ(client->GetRemoteAddress(), kEmptyAddr);
+    }
+  }
+
+  // Test cross-family datagram sending between a client bound to client_addr
+  // and a server bound to server_addr. shouldSucceed indicates if sending is
+  // expected to succeed or not.
+  void CrossFamilyDatagramTest(const SocketAddress& client_addr,
+                               const SocketAddress& server_addr,
+                               bool shouldSucceed) {
+    AsyncSocket* socket = ss_.CreateAsyncSocket(SOCK_DGRAM);
+    socket->Bind(server_addr);
+    SocketAddress bound_server_addr = socket->GetLocalAddress();
+    auto client1 = MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket),
+                                          &fake_clock_);
+
+    AsyncSocket* socket2 = ss_.CreateAsyncSocket(SOCK_DGRAM);
+    socket2->Bind(client_addr);
+    auto client2 = MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket2),
+                                          &fake_clock_);
+    SocketAddress client2_addr;
+
+    if (shouldSucceed) {
+      EXPECT_EQ(3, client2->SendTo("foo", 3, bound_server_addr));
+      EXPECT_TRUE(client1->CheckNextPacket("foo", 3, &client2_addr));
+      SocketAddress client1_addr;
+      EXPECT_EQ(6, client1->SendTo("bizbaz", 6, client2_addr));
+      EXPECT_TRUE(client2->CheckNextPacket("bizbaz", 6, &client1_addr));
+      EXPECT_EQ(client1_addr, bound_server_addr);
+    } else {
+      EXPECT_EQ(-1, client2->SendTo("foo", 3, bound_server_addr));
+      EXPECT_TRUE(client1->CheckNoPacket());
+    }
+  }
+
+ protected:
+  rtc::ScopedFakeClock fake_clock_;
+  VirtualSocketServer ss_;
+  AutoSocketServerThread thread_;
+  const SocketAddress kIPv4AnyAddress;
+  const SocketAddress kIPv6AnyAddress;
+};
+
+TEST_F(VirtualSocketServerTest, basic_v4) {
+  SocketAddress ipv4_test_addr(IPAddress(INADDR_ANY), 5000);
+  BasicTest(ipv4_test_addr);
+}
+
+TEST_F(VirtualSocketServerTest, basic_v6) {
+  SocketAddress ipv6_test_addr(IPAddress(in6addr_any), 5000);
+  BasicTest(ipv6_test_addr);
+}
+
+TEST_F(VirtualSocketServerTest, TestDefaultRoute_v4) {
+  IPAddress ipv4_default_addr(0x01020304);
+  TestDefaultRoute(ipv4_default_addr);
+}
+
+TEST_F(VirtualSocketServerTest, TestDefaultRoute_v6) {
+  IPAddress ipv6_default_addr;
+  EXPECT_TRUE(
+      IPFromString("2401:fa00:4:1000:be30:5bff:fee5:c3", &ipv6_default_addr));
+  TestDefaultRoute(ipv6_default_addr);
+}
+
+TEST_F(VirtualSocketServerTest, connect_v4) {
+  ConnectTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, connect_v6) {
+  ConnectTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, connect_to_non_listener_v4) {
+  ConnectToNonListenerTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, connect_to_non_listener_v6) {
+  ConnectToNonListenerTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, close_during_connect_v4) {
+  CloseDuringConnectTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, close_during_connect_v6) {
+  CloseDuringConnectTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, close_v4) {
+  CloseTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, close_v6) {
+  CloseTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, tcp_send_v4) {
+  TcpSendTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, tcp_send_v6) {
+  TcpSendTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, TcpSendsPacketsInOrder_v4) {
+  TcpSendsPacketsInOrderTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, TcpSendsPacketsInOrder_v6) {
+  TcpSendsPacketsInOrderTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, bandwidth_v4) {
+  BandwidthTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, bandwidth_v6) {
+  BandwidthTest(kIPv6AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, delay_v4) {
+  DelayTest(kIPv4AnyAddress);
+}
+
+TEST_F(VirtualSocketServerTest, delay_v6) {
+  DelayTest(kIPv6AnyAddress);
+}
+
+// Works, receiving socket sees 127.0.0.2.
+TEST_F(VirtualSocketServerTest, CanConnectFromMappedIPv6ToIPv4Any) {
+  CrossFamilyConnectionTest(SocketAddress("::ffff:127.0.0.2", 0),
+                            SocketAddress("0.0.0.0", 5000),
+                            true);
+}
+
+// Fails.
+TEST_F(VirtualSocketServerTest, CantConnectFromUnMappedIPv6ToIPv4Any) {
+  CrossFamilyConnectionTest(SocketAddress("::2", 0),
+                            SocketAddress("0.0.0.0", 5000),
+                            false);
+}
+
+// Fails.
+TEST_F(VirtualSocketServerTest, CantConnectFromUnMappedIPv6ToMappedIPv6) {
+  CrossFamilyConnectionTest(SocketAddress("::2", 0),
+                            SocketAddress("::ffff:127.0.0.1", 5000),
+                            false);
+}
+
+// Works. receiving socket sees ::ffff:127.0.0.2.
+TEST_F(VirtualSocketServerTest, CanConnectFromIPv4ToIPv6Any) {
+  CrossFamilyConnectionTest(SocketAddress("127.0.0.2", 0),
+                            SocketAddress("::", 5000),
+                            true);
+}
+
+// Fails.
+TEST_F(VirtualSocketServerTest, CantConnectFromIPv4ToUnMappedIPv6) {
+  CrossFamilyConnectionTest(SocketAddress("127.0.0.2", 0),
+                            SocketAddress("::1", 5000),
+                            false);
+}
+
+// Works. Receiving socket sees ::ffff:127.0.0.1.
+TEST_F(VirtualSocketServerTest, CanConnectFromIPv4ToMappedIPv6) {
+  CrossFamilyConnectionTest(SocketAddress("127.0.0.1", 0),
+                            SocketAddress("::ffff:127.0.0.2", 5000),
+                            true);
+}
+
+// Works, receiving socket sees a result from GetNextIP.
+TEST_F(VirtualSocketServerTest, CanConnectFromUnboundIPv6ToIPv4Any) {
+  CrossFamilyConnectionTest(SocketAddress("::", 0),
+                            SocketAddress("0.0.0.0", 5000),
+                            true);
+}
+
+// Works, receiving socket sees whatever GetNextIP gave the client.
+TEST_F(VirtualSocketServerTest, CanConnectFromUnboundIPv4ToIPv6Any) {
+  CrossFamilyConnectionTest(SocketAddress("0.0.0.0", 0),
+                            SocketAddress("::", 5000),
+                            true);
+}
+
+TEST_F(VirtualSocketServerTest, CanSendDatagramFromUnboundIPv4ToIPv6Any) {
+  CrossFamilyDatagramTest(SocketAddress("0.0.0.0", 0),
+                          SocketAddress("::", 5000),
+                          true);
+}
+
+TEST_F(VirtualSocketServerTest, CanSendDatagramFromMappedIPv6ToIPv4Any) {
+  CrossFamilyDatagramTest(SocketAddress("::ffff:127.0.0.1", 0),
+                          SocketAddress("0.0.0.0", 5000),
+                          true);
+}
+
+TEST_F(VirtualSocketServerTest, CantSendDatagramFromUnMappedIPv6ToIPv4Any) {
+  CrossFamilyDatagramTest(SocketAddress("::2", 0),
+                          SocketAddress("0.0.0.0", 5000),
+                          false);
+}
+
+TEST_F(VirtualSocketServerTest, CantSendDatagramFromUnMappedIPv6ToMappedIPv6) {
+  CrossFamilyDatagramTest(SocketAddress("::2", 0),
+                          SocketAddress("::ffff:127.0.0.1", 5000),
+                          false);
+}
+
+TEST_F(VirtualSocketServerTest, CanSendDatagramFromIPv4ToIPv6Any) {
+  CrossFamilyDatagramTest(SocketAddress("127.0.0.2", 0),
+                          SocketAddress("::", 5000),
+                          true);
+}
+
+TEST_F(VirtualSocketServerTest, CantSendDatagramFromIPv4ToUnMappedIPv6) {
+  CrossFamilyDatagramTest(SocketAddress("127.0.0.2", 0),
+                          SocketAddress("::1", 5000),
+                          false);
+}
+
+TEST_F(VirtualSocketServerTest, CanSendDatagramFromIPv4ToMappedIPv6) {
+  CrossFamilyDatagramTest(SocketAddress("127.0.0.1", 0),
+                          SocketAddress("::ffff:127.0.0.2", 5000),
+                          true);
+}
+
+TEST_F(VirtualSocketServerTest, CanSendDatagramFromUnboundIPv6ToIPv4Any) {
+  CrossFamilyDatagramTest(SocketAddress("::", 0),
+                          SocketAddress("0.0.0.0", 5000),
+                          true);
+}
+
+TEST_F(VirtualSocketServerTest, SetSendingBlockedWithUdpSocket) {
+  AsyncSocket* socket1 =
+      ss_.CreateAsyncSocket(kIPv4AnyAddress.family(), SOCK_DGRAM);
+  std::unique_ptr<AsyncSocket> socket2 =
+      WrapUnique(ss_.CreateAsyncSocket(kIPv4AnyAddress.family(), SOCK_DGRAM));
+  socket1->Bind(kIPv4AnyAddress);
+  socket2->Bind(kIPv4AnyAddress);
+  auto client1 =
+      MakeUnique<TestClient>(MakeUnique<AsyncUDPSocket>(socket1), &fake_clock_);
+
+  ss_.SetSendingBlocked(true);
+  EXPECT_EQ(-1, client1->SendTo("foo", 3, socket2->GetLocalAddress()));
+  EXPECT_TRUE(socket1->IsBlocking());
+  EXPECT_EQ(0, client1->ready_to_send_count());
+
+  ss_.SetSendingBlocked(false);
+  EXPECT_EQ(1, client1->ready_to_send_count());
+  EXPECT_EQ(3, client1->SendTo("foo", 3, socket2->GetLocalAddress()));
+}
+
+TEST_F(VirtualSocketServerTest, SetSendingBlockedWithTcpSocket) {
+  constexpr size_t kBufferSize = 1024;
+  ss_.set_send_buffer_capacity(kBufferSize);
+  ss_.set_recv_buffer_capacity(kBufferSize);
+
+  StreamSink sink;
+  std::unique_ptr<AsyncSocket> socket1 =
+      WrapUnique(ss_.CreateAsyncSocket(kIPv4AnyAddress.family(), SOCK_STREAM));
+  std::unique_ptr<AsyncSocket> socket2 =
+      WrapUnique(ss_.CreateAsyncSocket(kIPv4AnyAddress.family(), SOCK_STREAM));
+  sink.Monitor(socket1.get());
+  sink.Monitor(socket2.get());
+  socket1->Bind(kIPv4AnyAddress);
+  socket2->Bind(kIPv4AnyAddress);
+
+  // Connect sockets.
+  EXPECT_EQ(0, socket1->Connect(socket2->GetLocalAddress()));
+  EXPECT_EQ(0, socket2->Connect(socket1->GetLocalAddress()));
+  ss_.ProcessMessagesUntilIdle();
+
+  char data[kBufferSize] = {};
+
+  // First Send call will fill the send buffer but not send anything.
+  ss_.SetSendingBlocked(true);
+  EXPECT_EQ(static_cast<int>(kBufferSize), socket1->Send(data, kBufferSize));
+  ss_.ProcessMessagesUntilIdle();
+  EXPECT_FALSE(sink.Check(socket1.get(), SSE_WRITE));
+  EXPECT_FALSE(sink.Check(socket2.get(), SSE_READ));
+  EXPECT_FALSE(socket1->IsBlocking());
+
+  // Since the send buffer is full, next Send will result in EWOULDBLOCK.
+  EXPECT_EQ(-1, socket1->Send(data, kBufferSize));
+  EXPECT_FALSE(sink.Check(socket1.get(), SSE_WRITE));
+  EXPECT_FALSE(sink.Check(socket2.get(), SSE_READ));
+  EXPECT_TRUE(socket1->IsBlocking());
+
+  // When sending is unblocked, the buffered data should be sent and
+  // SignalWriteEvent should fire.
+  ss_.SetSendingBlocked(false);
+  ss_.ProcessMessagesUntilIdle();
+  EXPECT_TRUE(sink.Check(socket1.get(), SSE_WRITE));
+  EXPECT_TRUE(sink.Check(socket2.get(), SSE_READ));
+}
+
+TEST_F(VirtualSocketServerTest, CreatesStandardDistribution) {
+  const uint32_t kTestMean[] = {10, 100, 333, 1000};
+  const double kTestDev[] = { 0.25, 0.1, 0.01 };
+  // TODO(deadbeef): The current code only works for 1000 data points or more.
+  const uint32_t kTestSamples[] = {/*10, 100,*/ 1000};
+  for (size_t midx = 0; midx < arraysize(kTestMean); ++midx) {
+    for (size_t didx = 0; didx < arraysize(kTestDev); ++didx) {
+      for (size_t sidx = 0; sidx < arraysize(kTestSamples); ++sidx) {
+        ASSERT_LT(0u, kTestSamples[sidx]);
+        const uint32_t kStdDev =
+            static_cast<uint32_t>(kTestDev[didx] * kTestMean[midx]);
+        VirtualSocketServer::Function* f =
+            VirtualSocketServer::CreateDistribution(kTestMean[midx],
+                                                    kStdDev,
+                                                    kTestSamples[sidx]);
+        ASSERT_TRUE(nullptr != f);
+        ASSERT_EQ(kTestSamples[sidx], f->size());
+        double sum = 0;
+        for (uint32_t i = 0; i < f->size(); ++i) {
+          sum += (*f)[i].second;
+        }
+        const double mean = sum / f->size();
+        double sum_sq_dev = 0;
+        for (uint32_t i = 0; i < f->size(); ++i) {
+          double dev = (*f)[i].second - mean;
+          sum_sq_dev += dev * dev;
+        }
+        const double stddev = sqrt(sum_sq_dev / f->size());
+        EXPECT_NEAR(kTestMean[midx], mean, 0.1 * kTestMean[midx])
+          << "M=" << kTestMean[midx]
+          << " SD=" << kStdDev
+          << " N=" << kTestSamples[sidx];
+        EXPECT_NEAR(kStdDev, stddev, 0.1 * kStdDev)
+          << "M=" << kTestMean[midx]
+          << " SD=" << kStdDev
+          << " N=" << kTestSamples[sidx];
+        delete f;
+      }
+    }
+  }
+}
diff --git a/rtc_base/virtualsocketserver.cc b/rtc_base/virtualsocketserver.cc
new file mode 100644
index 0000000..d8771e7
--- /dev/null
+++ b/rtc_base/virtualsocketserver.cc
@@ -0,0 +1,1226 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/virtualsocketserver.h"
+
+#include <errno.h>
+#include <math.h>
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/physicalsocketserver.h"
+#include "rtc_base/socketaddresspair.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+#if defined(WEBRTC_WIN)
+const in_addr kInitialNextIPv4 = { { { 0x01, 0, 0, 0 } } };
+#else
+// This value is entirely arbitrary, hence the lack of concern about endianness.
+const in_addr kInitialNextIPv4 = { 0x01000000 };
+#endif
+// Starts at ::2 so as to not cause confusion with ::1.
+const in6_addr kInitialNextIPv6 = { { {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+    } } };
+
+const uint16_t kFirstEphemeralPort = 49152;
+const uint16_t kLastEphemeralPort = 65535;
+const uint16_t kEphemeralPortCount =
+    kLastEphemeralPort - kFirstEphemeralPort + 1;
+const uint32_t kDefaultNetworkCapacity = 64 * 1024;
+const uint32_t kDefaultTcpBufferSize = 32 * 1024;
+
+const uint32_t UDP_HEADER_SIZE = 28;  // IP + UDP headers
+const uint32_t TCP_HEADER_SIZE = 40;  // IP + TCP headers
+const uint32_t TCP_MSS = 1400;        // Maximum segment size
+
+// Note: The current algorithm doesn't work for sample sizes smaller than this.
+const int NUM_SAMPLES = 1000;
+
+enum {
+  MSG_ID_PACKET,
+  MSG_ID_ADDRESS_BOUND,
+  MSG_ID_CONNECT,
+  MSG_ID_DISCONNECT,
+  MSG_ID_SIGNALREADEVENT,
+};
+
+// Packets are passed between sockets as messages.  We copy the data just like
+// the kernel does.
+class Packet : public MessageData {
+ public:
+  Packet(const char* data, size_t size, const SocketAddress& from)
+        : size_(size), consumed_(0), from_(from) {
+    RTC_DCHECK(nullptr != data);
+    data_ = new char[size_];
+    memcpy(data_, data, size_);
+  }
+
+  ~Packet() override {
+    delete[] data_;
+  }
+
+  const char* data() const { return data_ + consumed_; }
+  size_t size() const { return size_ - consumed_; }
+  const SocketAddress& from() const { return from_; }
+
+  // Remove the first size bytes from the data.
+  void Consume(size_t size) {
+    RTC_DCHECK(size + consumed_ < size_);
+    consumed_ += size;
+  }
+
+ private:
+  char* data_;
+  size_t size_, consumed_;
+  SocketAddress from_;
+};
+
+struct MessageAddress : public MessageData {
+  explicit MessageAddress(const SocketAddress& a) : addr(a) { }
+  SocketAddress addr;
+};
+
+VirtualSocket::VirtualSocket(VirtualSocketServer* server,
+                             int family,
+                             int type,
+                             bool async)
+    : server_(server),
+      type_(type),
+      async_(async),
+      state_(CS_CLOSED),
+      error_(0),
+      listen_queue_(nullptr),
+      network_size_(0),
+      recv_buffer_size_(0),
+      bound_(false),
+      was_any_(false) {
+  RTC_DCHECK((type_ == SOCK_DGRAM) || (type_ == SOCK_STREAM));
+  RTC_DCHECK(async_ ||
+             (type_ != SOCK_STREAM));  // We only support async streams
+  server->SignalReadyToSend.connect(this,
+                                    &VirtualSocket::OnSocketServerReadyToSend);
+}
+
+VirtualSocket::~VirtualSocket() {
+  Close();
+
+  for (RecvBuffer::iterator it = recv_buffer_.begin(); it != recv_buffer_.end();
+       ++it) {
+    delete *it;
+  }
+}
+
+SocketAddress VirtualSocket::GetLocalAddress() const {
+  return local_addr_;
+}
+
+SocketAddress VirtualSocket::GetRemoteAddress() const {
+  return remote_addr_;
+}
+
+void VirtualSocket::SetLocalAddress(const SocketAddress& addr) {
+  local_addr_ = addr;
+}
+
+int VirtualSocket::Bind(const SocketAddress& addr) {
+  if (!local_addr_.IsNil()) {
+    error_ = EINVAL;
+    return -1;
+  }
+  local_addr_ = addr;
+  int result = server_->Bind(this, &local_addr_);
+  if (result != 0) {
+    local_addr_.Clear();
+    error_ = EADDRINUSE;
+  } else {
+    bound_ = true;
+    was_any_ = addr.IsAnyIP();
+    // Post a message here such that test case could have chance to
+    // process the local address. (i.e. SetAlternativeLocalAddress).
+    server_->msg_queue_->Post(RTC_FROM_HERE, this, MSG_ID_ADDRESS_BOUND);
+  }
+  return result;
+}
+
+int VirtualSocket::Connect(const SocketAddress& addr) {
+  return InitiateConnect(addr, true);
+}
+
+int VirtualSocket::Close() {
+  if (!local_addr_.IsNil() && bound_) {
+    // Remove from the binding table.
+    server_->Unbind(local_addr_, this);
+    bound_ = false;
+  }
+
+  if (SOCK_STREAM == type_) {
+    // Cancel pending sockets
+    if (listen_queue_) {
+      while (!listen_queue_->empty()) {
+        SocketAddress addr = listen_queue_->front();
+
+        // Disconnect listening socket.
+        server_->Disconnect(server_->LookupBinding(addr));
+        listen_queue_->pop_front();
+      }
+      delete listen_queue_;
+      listen_queue_ = nullptr;
+    }
+    // Disconnect stream sockets
+    if (CS_CONNECTED == state_) {
+      // Disconnect remote socket, check if it is a child of a server socket.
+      VirtualSocket* socket =
+          server_->LookupConnection(local_addr_, remote_addr_);
+      if (!socket) {
+        // Not a server socket child, then see if it is bound.
+        // TODO(tbd): If this is indeed a server socket that has no
+        // children this will cause the server socket to be
+        // closed. This might lead to unexpected results, how to fix this?
+        socket = server_->LookupBinding(remote_addr_);
+      }
+      server_->Disconnect(socket);
+
+      // Remove mapping for both directions.
+      server_->RemoveConnection(remote_addr_, local_addr_);
+      server_->RemoveConnection(local_addr_, remote_addr_);
+    }
+    // Cancel potential connects
+    MessageList msgs;
+    if (server_->msg_queue_) {
+      server_->msg_queue_->Clear(this, MSG_ID_CONNECT, &msgs);
+    }
+    for (MessageList::iterator it = msgs.begin(); it != msgs.end(); ++it) {
+      RTC_DCHECK(nullptr != it->pdata);
+      MessageAddress* data = static_cast<MessageAddress*>(it->pdata);
+
+      // Lookup remote side.
+      VirtualSocket* socket =
+          server_->LookupConnection(local_addr_, data->addr);
+      if (socket) {
+        // Server socket, remote side is a socket retreived by
+        // accept. Accepted sockets are not bound so we will not
+        // find it by looking in the bindings table.
+        server_->Disconnect(socket);
+        server_->RemoveConnection(local_addr_, data->addr);
+      } else {
+        server_->Disconnect(server_->LookupBinding(data->addr));
+      }
+      delete data;
+    }
+    // Clear incoming packets and disconnect messages
+    if (server_->msg_queue_) {
+      server_->msg_queue_->Clear(this);
+    }
+  }
+
+  state_ = CS_CLOSED;
+  local_addr_.Clear();
+  remote_addr_.Clear();
+  return 0;
+}
+
+int VirtualSocket::Send(const void* pv, size_t cb) {
+    if (CS_CONNECTED != state_) {
+      error_ = ENOTCONN;
+      return -1;
+    }
+    if (SOCK_DGRAM == type_) {
+      return SendUdp(pv, cb, remote_addr_);
+    } else {
+      return SendTcp(pv, cb);
+    }
+}
+
+int VirtualSocket::SendTo(const void* pv,
+                          size_t cb,
+                          const SocketAddress& addr) {
+  if (SOCK_DGRAM == type_) {
+    return SendUdp(pv, cb, addr);
+  } else {
+    if (CS_CONNECTED != state_) {
+      error_ = ENOTCONN;
+      return -1;
+    }
+    return SendTcp(pv, cb);
+  }
+}
+
+int VirtualSocket::Recv(void* pv, size_t cb, int64_t* timestamp) {
+  SocketAddress addr;
+  return RecvFrom(pv, cb, &addr, timestamp);
+}
+
+int VirtualSocket::RecvFrom(void* pv,
+                            size_t cb,
+                            SocketAddress* paddr,
+                            int64_t* timestamp) {
+  if (timestamp) {
+    *timestamp = -1;
+  }
+  // If we don't have a packet, then either error or wait for one to arrive.
+  if (recv_buffer_.empty()) {
+    if (async_) {
+      error_ = EAGAIN;
+      return -1;
+    }
+    while (recv_buffer_.empty()) {
+      Message msg;
+      server_->msg_queue_->Get(&msg);
+      server_->msg_queue_->Dispatch(&msg);
+    }
+  }
+
+  // Return the packet at the front of the queue.
+  Packet* packet = recv_buffer_.front();
+  size_t data_read = std::min(cb, packet->size());
+  memcpy(pv, packet->data(), data_read);
+  *paddr = packet->from();
+
+  if (data_read < packet->size()) {
+    packet->Consume(data_read);
+  } else {
+    recv_buffer_.pop_front();
+    delete packet;
+  }
+
+  // To behave like a real socket, SignalReadEvent should fire in the next
+  // message loop pass if there's still data buffered.
+  if (!recv_buffer_.empty()) {
+    // Clear the message so it doesn't end up posted multiple times.
+    server_->msg_queue_->Clear(this, MSG_ID_SIGNALREADEVENT);
+    server_->msg_queue_->Post(RTC_FROM_HERE, this, MSG_ID_SIGNALREADEVENT);
+  }
+
+  if (SOCK_STREAM == type_) {
+    bool was_full = (recv_buffer_size_ == server_->recv_buffer_capacity_);
+    recv_buffer_size_ -= data_read;
+    if (was_full) {
+      VirtualSocket* sender = server_->LookupBinding(remote_addr_);
+      RTC_DCHECK(nullptr != sender);
+      server_->SendTcp(sender);
+    }
+  }
+
+  return static_cast<int>(data_read);
+}
+
+int VirtualSocket::Listen(int backlog) {
+  RTC_DCHECK(SOCK_STREAM == type_);
+  RTC_DCHECK(CS_CLOSED == state_);
+  if (local_addr_.IsNil()) {
+    error_ = EINVAL;
+    return -1;
+  }
+  RTC_DCHECK(nullptr == listen_queue_);
+  listen_queue_ = new ListenQueue;
+  state_ = CS_CONNECTING;
+  return 0;
+}
+
+VirtualSocket* VirtualSocket::Accept(SocketAddress* paddr) {
+  if (nullptr == listen_queue_) {
+    error_ = EINVAL;
+    return nullptr;
+  }
+  while (!listen_queue_->empty()) {
+    VirtualSocket* socket = new VirtualSocket(server_, AF_INET, type_, async_);
+
+    // Set the new local address to the same as this server socket.
+    socket->SetLocalAddress(local_addr_);
+    // Sockets made from a socket that 'was Any' need to inherit that.
+    socket->set_was_any(was_any_);
+    SocketAddress remote_addr(listen_queue_->front());
+    int result = socket->InitiateConnect(remote_addr, false);
+    listen_queue_->pop_front();
+    if (result != 0) {
+      delete socket;
+      continue;
+    }
+    socket->CompleteConnect(remote_addr, false);
+    if (paddr) {
+      *paddr = remote_addr;
+    }
+    return socket;
+  }
+  error_ = EWOULDBLOCK;
+  return nullptr;
+}
+
+int VirtualSocket::GetError() const {
+  return error_;
+}
+
+void VirtualSocket::SetError(int error) {
+  error_ = error;
+}
+
+Socket::ConnState VirtualSocket::GetState() const {
+  return state_;
+}
+
+int VirtualSocket::GetOption(Option opt, int* value) {
+  OptionsMap::const_iterator it = options_map_.find(opt);
+  if (it == options_map_.end()) {
+    return -1;
+  }
+  *value = it->second;
+  return 0;  // 0 is success to emulate getsockopt()
+}
+
+int VirtualSocket::SetOption(Option opt, int value) {
+  options_map_[opt] = value;
+  return 0;  // 0 is success to emulate setsockopt()
+}
+
+void VirtualSocket::OnMessage(Message* pmsg) {
+  if (pmsg->message_id == MSG_ID_PACKET) {
+    RTC_DCHECK(nullptr != pmsg->pdata);
+    Packet* packet = static_cast<Packet*>(pmsg->pdata);
+
+    recv_buffer_.push_back(packet);
+
+    if (async_) {
+      SignalReadEvent(this);
+    }
+  } else if (pmsg->message_id == MSG_ID_CONNECT) {
+    RTC_DCHECK(nullptr != pmsg->pdata);
+    MessageAddress* data = static_cast<MessageAddress*>(pmsg->pdata);
+    if (listen_queue_ != nullptr) {
+      listen_queue_->push_back(data->addr);
+      if (async_) {
+        SignalReadEvent(this);
+      }
+    } else if ((SOCK_STREAM == type_) && (CS_CONNECTING == state_)) {
+      CompleteConnect(data->addr, true);
+    } else {
+      RTC_LOG(LS_VERBOSE) << "Socket at " << local_addr_ << " is not listening";
+      server_->Disconnect(server_->LookupBinding(data->addr));
+    }
+    delete data;
+  } else if (pmsg->message_id == MSG_ID_DISCONNECT) {
+    RTC_DCHECK(SOCK_STREAM == type_);
+    if (CS_CLOSED != state_) {
+      int error = (CS_CONNECTING == state_) ? ECONNREFUSED : 0;
+      state_ = CS_CLOSED;
+      remote_addr_.Clear();
+      if (async_) {
+        SignalCloseEvent(this, error);
+      }
+    }
+  } else if (pmsg->message_id == MSG_ID_ADDRESS_BOUND) {
+    SignalAddressReady(this, GetLocalAddress());
+  } else if (pmsg->message_id == MSG_ID_SIGNALREADEVENT) {
+    if (!recv_buffer_.empty()) {
+      SignalReadEvent(this);
+    }
+  } else {
+    RTC_NOTREACHED();
+  }
+}
+
+int VirtualSocket::InitiateConnect(const SocketAddress& addr, bool use_delay) {
+  if (!remote_addr_.IsNil()) {
+    error_ = (CS_CONNECTED == state_) ? EISCONN : EINPROGRESS;
+    return -1;
+  }
+  if (local_addr_.IsNil()) {
+    // If there's no local address set, grab a random one in the correct AF.
+    int result = 0;
+    if (addr.ipaddr().family() == AF_INET) {
+      result = Bind(SocketAddress("0.0.0.0", 0));
+    } else if (addr.ipaddr().family() == AF_INET6) {
+      result = Bind(SocketAddress("::", 0));
+    }
+    if (result != 0) {
+      return result;
+    }
+  }
+  if (type_ == SOCK_DGRAM) {
+    remote_addr_ = addr;
+    state_ = CS_CONNECTED;
+  } else {
+    int result = server_->Connect(this, addr, use_delay);
+    if (result != 0) {
+      error_ = EHOSTUNREACH;
+      return -1;
+    }
+    state_ = CS_CONNECTING;
+  }
+  return 0;
+}
+
+void VirtualSocket::CompleteConnect(const SocketAddress& addr, bool notify) {
+  RTC_DCHECK(CS_CONNECTING == state_);
+  remote_addr_ = addr;
+  state_ = CS_CONNECTED;
+  server_->AddConnection(remote_addr_, local_addr_, this);
+  if (async_ && notify) {
+    SignalConnectEvent(this);
+  }
+}
+
+int VirtualSocket::SendUdp(const void* pv,
+                           size_t cb,
+                           const SocketAddress& addr) {
+  // If we have not been assigned a local port, then get one.
+  if (local_addr_.IsNil()) {
+    local_addr_ = EmptySocketAddressWithFamily(addr.ipaddr().family());
+    int result = server_->Bind(this, &local_addr_);
+    if (result != 0) {
+      local_addr_.Clear();
+      error_ = EADDRINUSE;
+      return result;
+    }
+  }
+
+  // Send the data in a message to the appropriate socket.
+  return server_->SendUdp(this, static_cast<const char*>(pv), cb, addr);
+}
+
+int VirtualSocket::SendTcp(const void* pv, size_t cb) {
+  size_t capacity = server_->send_buffer_capacity_ - send_buffer_.size();
+  if (0 == capacity) {
+    ready_to_send_ = false;
+    error_ = EWOULDBLOCK;
+    return -1;
+  }
+  size_t consumed = std::min(cb, capacity);
+  const char* cpv = static_cast<const char*>(pv);
+  send_buffer_.insert(send_buffer_.end(), cpv, cpv + consumed);
+  server_->SendTcp(this);
+  return static_cast<int>(consumed);
+}
+
+void VirtualSocket::OnSocketServerReadyToSend() {
+  if (ready_to_send_) {
+    // This socket didn't encounter EWOULDBLOCK, so there's nothing to do.
+    return;
+  }
+  if (type_ == SOCK_DGRAM) {
+    ready_to_send_ = true;
+    SignalWriteEvent(this);
+  } else {
+    RTC_DCHECK(type_ == SOCK_STREAM);
+    // This will attempt to empty the full send buffer, and will fire
+    // SignalWriteEvent if successful.
+    server_->SendTcp(this);
+  }
+}
+
+VirtualSocketServer::VirtualSocketServer() : VirtualSocketServer(nullptr) {}
+
+VirtualSocketServer::VirtualSocketServer(FakeClock* fake_clock)
+    : fake_clock_(fake_clock),
+      wakeup_(/*manual_reset=*/false, /*initially_signaled=*/false),
+      msg_queue_(nullptr),
+      stop_on_idle_(false),
+      next_ipv4_(kInitialNextIPv4),
+      next_ipv6_(kInitialNextIPv6),
+      next_port_(kFirstEphemeralPort),
+      bindings_(new AddressMap()),
+      connections_(new ConnectionMap()),
+      bandwidth_(0),
+      network_capacity_(kDefaultNetworkCapacity),
+      send_buffer_capacity_(kDefaultTcpBufferSize),
+      recv_buffer_capacity_(kDefaultTcpBufferSize),
+      delay_mean_(0),
+      delay_stddev_(0),
+      delay_samples_(NUM_SAMPLES),
+      drop_prob_(0.0) {
+  UpdateDelayDistribution();
+}
+
+VirtualSocketServer::~VirtualSocketServer() {
+  delete bindings_;
+  delete connections_;
+}
+
+IPAddress VirtualSocketServer::GetNextIP(int family) {
+  if (family == AF_INET) {
+    IPAddress next_ip(next_ipv4_);
+    next_ipv4_.s_addr =
+        HostToNetwork32(NetworkToHost32(next_ipv4_.s_addr) + 1);
+    return next_ip;
+  } else if (family == AF_INET6) {
+    IPAddress next_ip(next_ipv6_);
+    uint32_t* as_ints = reinterpret_cast<uint32_t*>(&next_ipv6_.s6_addr);
+    as_ints[3] += 1;
+    return next_ip;
+  }
+  return IPAddress();
+}
+
+uint16_t VirtualSocketServer::GetNextPort() {
+  uint16_t port = next_port_;
+  if (next_port_ < kLastEphemeralPort) {
+    ++next_port_;
+  } else {
+    next_port_ = kFirstEphemeralPort;
+  }
+  return port;
+}
+
+void VirtualSocketServer::SetSendingBlocked(bool blocked) {
+  if (blocked == sending_blocked_) {
+    // Unchanged; nothing to do.
+    return;
+  }
+  sending_blocked_ = blocked;
+  if (!sending_blocked_) {
+    // Sending was blocked, but is now unblocked. This signal gives sockets a
+    // chance to fire SignalWriteEvent, and for TCP, send buffered data.
+    SignalReadyToSend();
+  }
+}
+
+Socket* VirtualSocketServer::CreateSocket(int type) {
+  return CreateSocket(AF_INET, type);
+}
+
+Socket* VirtualSocketServer::CreateSocket(int family, int type) {
+  return CreateSocketInternal(family, type);
+}
+
+AsyncSocket* VirtualSocketServer::CreateAsyncSocket(int type) {
+  return CreateAsyncSocket(AF_INET, type);
+}
+
+AsyncSocket* VirtualSocketServer::CreateAsyncSocket(int family, int type) {
+  return CreateSocketInternal(family, type);
+}
+
+VirtualSocket* VirtualSocketServer::CreateSocketInternal(int family, int type) {
+  VirtualSocket* socket = new VirtualSocket(this, family, type, true);
+  SignalSocketCreated(socket);
+  return socket;
+}
+
+void VirtualSocketServer::SetMessageQueue(MessageQueue* msg_queue) {
+  msg_queue_ = msg_queue;
+  if (msg_queue_) {
+    msg_queue_->SignalQueueDestroyed.connect(this,
+        &VirtualSocketServer::OnMessageQueueDestroyed);
+  }
+}
+
+bool VirtualSocketServer::Wait(int cmsWait, bool process_io) {
+  RTC_DCHECK(msg_queue_ == Thread::Current());
+  if (stop_on_idle_ && Thread::Current()->empty()) {
+    return false;
+  }
+  // Note: we don't need to do anything with |process_io| since we don't have
+  // any real I/O. Received packets come in the form of queued messages, so
+  // MessageQueue will ensure WakeUp is called if another thread sends a
+  // packet.
+  wakeup_.Wait(cmsWait);
+  return true;
+}
+
+void VirtualSocketServer::WakeUp() {
+  wakeup_.Set();
+}
+
+void VirtualSocketServer::SetAlternativeLocalAddress(
+    const rtc::IPAddress& address,
+    const rtc::IPAddress& alternative) {
+  alternative_address_mapping_[address] = alternative;
+}
+
+bool VirtualSocketServer::ProcessMessagesUntilIdle() {
+  RTC_DCHECK(msg_queue_ == Thread::Current());
+  stop_on_idle_ = true;
+  while (!msg_queue_->empty()) {
+    if (fake_clock_) {
+      // If using a fake clock, advance it in millisecond increments until the
+      // queue is empty.
+      fake_clock_->AdvanceTime(rtc::TimeDelta::FromMilliseconds(1));
+    } else {
+      // Otherwise, run a normal message loop.
+      Message msg;
+      if (msg_queue_->Get(&msg, Thread::kForever)) {
+        msg_queue_->Dispatch(&msg);
+      }
+    }
+  }
+  stop_on_idle_ = false;
+  return !msg_queue_->IsQuitting();
+}
+
+void VirtualSocketServer::SetNextPortForTesting(uint16_t port) {
+  next_port_ = port;
+}
+
+bool VirtualSocketServer::CloseTcpConnections(
+    const SocketAddress& addr_local,
+    const SocketAddress& addr_remote) {
+  VirtualSocket* socket = LookupConnection(addr_local, addr_remote);
+  if (!socket) {
+    return false;
+  }
+  // Signal the close event on the local connection first.
+  socket->SignalCloseEvent(socket, 0);
+
+  // Trigger the remote connection's close event.
+  socket->Close();
+
+  return true;
+}
+
+int VirtualSocketServer::Bind(VirtualSocket* socket,
+                              const SocketAddress& addr) {
+  RTC_DCHECK(nullptr != socket);
+  // Address must be completely specified at this point
+  RTC_DCHECK(!IPIsUnspec(addr.ipaddr()));
+  RTC_DCHECK(addr.port() != 0);
+
+  // Normalize the address (turns v6-mapped addresses into v4-addresses).
+  SocketAddress normalized(addr.ipaddr().Normalized(), addr.port());
+
+  AddressMap::value_type entry(normalized, socket);
+  return bindings_->insert(entry).second ? 0 : -1;
+}
+
+int VirtualSocketServer::Bind(VirtualSocket* socket, SocketAddress* addr) {
+  RTC_DCHECK(nullptr != socket);
+
+  // Normalize the IP.
+  if (!IPIsUnspec(addr->ipaddr())) {
+    addr->SetIP(addr->ipaddr().Normalized());
+  } else {
+    RTC_NOTREACHED();
+  }
+
+  // If the IP appears in |alternative_address_mapping_|, meaning the test has
+  // configured sockets bound to this IP to actually use another IP, replace
+  // the IP here.
+  auto alternative = alternative_address_mapping_.find(addr->ipaddr());
+  if (alternative != alternative_address_mapping_.end()) {
+    addr->SetIP(alternative->second);
+  }
+
+  // Assign a port if not assigned.
+  if (addr->port() == 0) {
+    for (int i = 0; i < kEphemeralPortCount; ++i) {
+      addr->SetPort(GetNextPort());
+      if (bindings_->find(*addr) == bindings_->end()) {
+        break;
+      }
+    }
+  }
+
+  return Bind(socket, *addr);
+}
+
+VirtualSocket* VirtualSocketServer::LookupBinding(const SocketAddress& addr) {
+  SocketAddress normalized(addr.ipaddr().Normalized(),
+                           addr.port());
+  AddressMap::iterator it = bindings_->find(normalized);
+  if (it != bindings_->end()) {
+    return it->second;
+  }
+
+  IPAddress default_ip = GetDefaultRoute(addr.ipaddr().family());
+  if (!IPIsUnspec(default_ip) && addr.ipaddr() == default_ip) {
+    // If we can't find a binding for the packet which is sent to the interface
+    // corresponding to the default route, it should match a binding with the
+    // correct port to the any address.
+    SocketAddress sock_addr =
+        EmptySocketAddressWithFamily(addr.ipaddr().family());
+    sock_addr.SetPort(addr.port());
+    return LookupBinding(sock_addr);
+  }
+
+  return nullptr;
+}
+
+int VirtualSocketServer::Unbind(const SocketAddress& addr,
+                                VirtualSocket* socket) {
+  SocketAddress normalized(addr.ipaddr().Normalized(),
+                           addr.port());
+  RTC_DCHECK((*bindings_)[normalized] == socket);
+  bindings_->erase(bindings_->find(normalized));
+  return 0;
+}
+
+void VirtualSocketServer::AddConnection(const SocketAddress& local,
+                                        const SocketAddress& remote,
+                                        VirtualSocket* remote_socket) {
+  // Add this socket pair to our routing table. This will allow
+  // multiple clients to connect to the same server address.
+  SocketAddress local_normalized(local.ipaddr().Normalized(),
+                                 local.port());
+  SocketAddress remote_normalized(remote.ipaddr().Normalized(),
+                                  remote.port());
+  SocketAddressPair address_pair(local_normalized, remote_normalized);
+  connections_->insert(std::pair<SocketAddressPair,
+                       VirtualSocket*>(address_pair, remote_socket));
+}
+
+VirtualSocket* VirtualSocketServer::LookupConnection(
+    const SocketAddress& local,
+    const SocketAddress& remote) {
+  SocketAddress local_normalized(local.ipaddr().Normalized(),
+                                 local.port());
+  SocketAddress remote_normalized(remote.ipaddr().Normalized(),
+                                  remote.port());
+  SocketAddressPair address_pair(local_normalized, remote_normalized);
+  ConnectionMap::iterator it = connections_->find(address_pair);
+  return (connections_->end() != it) ? it->second : nullptr;
+}
+
+void VirtualSocketServer::RemoveConnection(const SocketAddress& local,
+                                           const SocketAddress& remote) {
+  SocketAddress local_normalized(local.ipaddr().Normalized(),
+                                local.port());
+  SocketAddress remote_normalized(remote.ipaddr().Normalized(),
+                                 remote.port());
+  SocketAddressPair address_pair(local_normalized, remote_normalized);
+  connections_->erase(address_pair);
+}
+
+static double Random() {
+  return static_cast<double>(rand()) / RAND_MAX;
+}
+
+int VirtualSocketServer::Connect(VirtualSocket* socket,
+                                 const SocketAddress& remote_addr,
+                                 bool use_delay) {
+  uint32_t delay = use_delay ? GetTransitDelay(socket) : 0;
+  VirtualSocket* remote = LookupBinding(remote_addr);
+  if (!CanInteractWith(socket, remote)) {
+    RTC_LOG(LS_INFO) << "Address family mismatch between "
+                     << socket->GetLocalAddress() << " and " << remote_addr;
+    return -1;
+  }
+  if (remote != nullptr) {
+    SocketAddress addr = socket->GetLocalAddress();
+    msg_queue_->PostDelayed(RTC_FROM_HERE, delay, remote, MSG_ID_CONNECT,
+                            new MessageAddress(addr));
+  } else {
+    RTC_LOG(LS_INFO) << "No one listening at " << remote_addr;
+    msg_queue_->PostDelayed(RTC_FROM_HERE, delay, socket, MSG_ID_DISCONNECT);
+  }
+  return 0;
+}
+
+bool VirtualSocketServer::Disconnect(VirtualSocket* socket) {
+  if (socket) {
+    // If we simulate packets being delayed, we should simulate the
+    // equivalent of a FIN being delayed as well.
+    uint32_t delay = GetTransitDelay(socket);
+    // Remove the mapping.
+    msg_queue_->PostDelayed(RTC_FROM_HERE, delay, socket, MSG_ID_DISCONNECT);
+    return true;
+  }
+  return false;
+}
+
+int VirtualSocketServer::SendUdp(VirtualSocket* socket,
+                                 const char* data, size_t data_size,
+                                 const SocketAddress& remote_addr) {
+  ++sent_packets_;
+  if (sending_blocked_) {
+    CritScope cs(&socket->crit_);
+    socket->ready_to_send_ = false;
+    socket->error_ = EWOULDBLOCK;
+    return -1;
+  }
+
+  // See if we want to drop this packet.
+  if (Random() < drop_prob_) {
+    RTC_LOG(LS_VERBOSE) << "Dropping packet: bad luck";
+    return static_cast<int>(data_size);
+  }
+
+  VirtualSocket* recipient = LookupBinding(remote_addr);
+  if (!recipient) {
+    // Make a fake recipient for address family checking.
+    std::unique_ptr<VirtualSocket> dummy_socket(
+        CreateSocketInternal(AF_INET, SOCK_DGRAM));
+    dummy_socket->SetLocalAddress(remote_addr);
+    if (!CanInteractWith(socket, dummy_socket.get())) {
+      RTC_LOG(LS_VERBOSE) << "Incompatible address families: "
+                          << socket->GetLocalAddress() << " and "
+                          << remote_addr;
+      return -1;
+    }
+    RTC_LOG(LS_VERBOSE) << "No one listening at " << remote_addr;
+    return static_cast<int>(data_size);
+  }
+
+  if (!CanInteractWith(socket, recipient)) {
+    RTC_LOG(LS_VERBOSE) << "Incompatible address families: "
+                        << socket->GetLocalAddress() << " and " << remote_addr;
+    return -1;
+  }
+
+  {
+    CritScope cs(&socket->crit_);
+
+    int64_t cur_time = TimeMillis();
+    PurgeNetworkPackets(socket, cur_time);
+
+    // Determine whether we have enough bandwidth to accept this packet.  To do
+    // this, we need to update the send queue.  Once we know it's current size,
+    // we know whether we can fit this packet.
+    //
+    // NOTE: There are better algorithms for maintaining such a queue (such as
+    // "Derivative Random Drop"); however, this algorithm is a more accurate
+    // simulation of what a normal network would do.
+
+    size_t packet_size = data_size + UDP_HEADER_SIZE;
+    if (socket->network_size_ + packet_size > network_capacity_) {
+      RTC_LOG(LS_VERBOSE) << "Dropping packet: network capacity exceeded";
+      return static_cast<int>(data_size);
+    }
+
+    AddPacketToNetwork(socket, recipient, cur_time, data, data_size,
+                       UDP_HEADER_SIZE, false);
+
+    return static_cast<int>(data_size);
+  }
+}
+
+void VirtualSocketServer::SendTcp(VirtualSocket* socket) {
+  ++sent_packets_;
+  if (sending_blocked_) {
+    // Eventually the socket's buffer will fill and VirtualSocket::SendTcp will
+    // set EWOULDBLOCK.
+    return;
+  }
+
+  // TCP can't send more data than will fill up the receiver's buffer.
+  // We track the data that is in the buffer plus data in flight using the
+  // recipient's recv_buffer_size_.  Anything beyond that must be stored in the
+  // sender's buffer.  We will trigger the buffered data to be sent when data
+  // is read from the recv_buffer.
+
+  // Lookup the local/remote pair in the connections table.
+  VirtualSocket* recipient = LookupConnection(socket->local_addr_,
+                                              socket->remote_addr_);
+  if (!recipient) {
+    RTC_LOG(LS_VERBOSE) << "Sending data to no one.";
+    return;
+  }
+
+  CritScope cs(&socket->crit_);
+
+  int64_t cur_time = TimeMillis();
+  PurgeNetworkPackets(socket, cur_time);
+
+  while (true) {
+    size_t available = recv_buffer_capacity_ - recipient->recv_buffer_size_;
+    size_t max_data_size =
+        std::min<size_t>(available, TCP_MSS - TCP_HEADER_SIZE);
+    size_t data_size = std::min(socket->send_buffer_.size(), max_data_size);
+    if (0 == data_size)
+      break;
+
+    AddPacketToNetwork(socket, recipient, cur_time, &socket->send_buffer_[0],
+                       data_size, TCP_HEADER_SIZE, true);
+    recipient->recv_buffer_size_ += data_size;
+
+    size_t new_buffer_size = socket->send_buffer_.size() - data_size;
+    // Avoid undefined access beyond the last element of the vector.
+    // This only happens when new_buffer_size is 0.
+    if (data_size < socket->send_buffer_.size()) {
+      // memmove is required for potentially overlapping source/destination.
+      memmove(&socket->send_buffer_[0], &socket->send_buffer_[data_size],
+              new_buffer_size);
+    }
+    socket->send_buffer_.resize(new_buffer_size);
+  }
+
+  if (!socket->ready_to_send_ &&
+      (socket->send_buffer_.size() < send_buffer_capacity_)) {
+    socket->ready_to_send_ = true;
+    socket->SignalWriteEvent(socket);
+  }
+}
+
+void VirtualSocketServer::AddPacketToNetwork(VirtualSocket* sender,
+                                             VirtualSocket* recipient,
+                                             int64_t cur_time,
+                                             const char* data,
+                                             size_t data_size,
+                                             size_t header_size,
+                                             bool ordered) {
+  VirtualSocket::NetworkEntry entry;
+  entry.size = data_size + header_size;
+
+  sender->network_size_ += entry.size;
+  uint32_t send_delay = SendDelay(static_cast<uint32_t>(sender->network_size_));
+  entry.done_time = cur_time + send_delay;
+  sender->network_.push_back(entry);
+
+  // Find the delay for crossing the many virtual hops of the network.
+  uint32_t transit_delay = GetTransitDelay(sender);
+
+  // When the incoming packet is from a binding of the any address, translate it
+  // to the default route here such that the recipient will see the default
+  // route.
+  SocketAddress sender_addr = sender->local_addr_;
+  IPAddress default_ip = GetDefaultRoute(sender_addr.ipaddr().family());
+  if (sender_addr.IsAnyIP() && !IPIsUnspec(default_ip)) {
+    sender_addr.SetIP(default_ip);
+  }
+
+  // Post the packet as a message to be delivered (on our own thread)
+  Packet* p = new Packet(data, data_size, sender_addr);
+
+  int64_t ts = TimeAfter(send_delay + transit_delay);
+  if (ordered) {
+    // Ensure that new packets arrive after previous ones
+    ts = std::max(ts, sender->last_delivery_time_);
+    // A socket should not have both ordered and unordered delivery, so its last
+    // delivery time only needs to be updated when it has ordered delivery.
+    sender->last_delivery_time_ = ts;
+  }
+  msg_queue_->PostAt(RTC_FROM_HERE, ts, recipient, MSG_ID_PACKET, p);
+}
+
+void VirtualSocketServer::PurgeNetworkPackets(VirtualSocket* socket,
+                                              int64_t cur_time) {
+  while (!socket->network_.empty() &&
+         (socket->network_.front().done_time <= cur_time)) {
+    RTC_DCHECK(socket->network_size_ >= socket->network_.front().size);
+    socket->network_size_ -= socket->network_.front().size;
+    socket->network_.pop_front();
+  }
+}
+
+uint32_t VirtualSocketServer::SendDelay(uint32_t size) {
+  if (bandwidth_ == 0)
+    return 0;
+  else
+    return 1000 * size / bandwidth_;
+}
+
+#if 0
+void PrintFunction(std::vector<std::pair<double, double> >* f) {
+  return;
+  double sum = 0;
+  for (uint32_t i = 0; i < f->size(); ++i) {
+    std::cout << (*f)[i].first << '\t' << (*f)[i].second << std::endl;
+    sum += (*f)[i].second;
+  }
+  if (!f->empty()) {
+    const double mean = sum / f->size();
+    double sum_sq_dev = 0;
+    for (uint32_t i = 0; i < f->size(); ++i) {
+      double dev = (*f)[i].second - mean;
+      sum_sq_dev += dev * dev;
+    }
+    std::cout << "Mean = " << mean << " StdDev = "
+              << sqrt(sum_sq_dev / f->size()) << std::endl;
+  }
+}
+#endif  // <unused>
+
+void VirtualSocketServer::UpdateDelayDistribution() {
+  Function* dist = CreateDistribution(delay_mean_, delay_stddev_,
+                                      delay_samples_);
+  // We take a lock just to make sure we don't leak memory.
+  {
+    CritScope cs(&delay_crit_);
+    delay_dist_.reset(dist);
+  }
+}
+
+static double PI = 4 * atan(1.0);
+
+static double Normal(double x, double mean, double stddev) {
+  double a = (x - mean) * (x - mean) / (2 * stddev * stddev);
+  return exp(-a) / (stddev * sqrt(2 * PI));
+}
+
+#if 0  // static unused gives a warning
+static double Pareto(double x, double min, double k) {
+  if (x < min)
+    return 0;
+  else
+    return k * std::pow(min, k) / std::pow(x, k+1);
+}
+#endif
+
+VirtualSocketServer::Function* VirtualSocketServer::CreateDistribution(
+    uint32_t mean,
+    uint32_t stddev,
+    uint32_t samples) {
+  Function* f = new Function();
+
+  if (0 == stddev) {
+    f->push_back(Point(mean, 1.0));
+  } else {
+    double start = 0;
+    if (mean >= 4 * static_cast<double>(stddev))
+      start = mean - 4 * static_cast<double>(stddev);
+    double end = mean + 4 * static_cast<double>(stddev);
+
+    for (uint32_t i = 0; i < samples; i++) {
+      double x = start + (end - start) * i / (samples - 1);
+      double y = Normal(x, mean, stddev);
+      f->push_back(Point(x, y));
+    }
+  }
+  return Resample(Invert(Accumulate(f)), 0, 1, samples);
+}
+
+uint32_t VirtualSocketServer::GetTransitDelay(Socket* socket) {
+  // Use the delay based on the address if it is set.
+  auto iter = delay_by_ip_.find(socket->GetLocalAddress().ipaddr());
+  if (iter != delay_by_ip_.end()) {
+    return static_cast<uint32_t>(iter->second);
+  }
+  // Otherwise, use the delay from the distribution distribution.
+  size_t index = rand() % delay_dist_->size();
+  double delay = (*delay_dist_)[index].second;
+  // RTC_LOG_F(LS_INFO) << "random[" << index << "] = " << delay;
+  return static_cast<uint32_t>(delay);
+}
+
+struct FunctionDomainCmp {
+  bool operator()(const VirtualSocketServer::Point& p1,
+                   const VirtualSocketServer::Point& p2) {
+    return p1.first < p2.first;
+  }
+  bool operator()(double v1, const VirtualSocketServer::Point& p2) {
+    return v1 < p2.first;
+  }
+  bool operator()(const VirtualSocketServer::Point& p1, double v2) {
+    return p1.first < v2;
+  }
+};
+
+VirtualSocketServer::Function* VirtualSocketServer::Accumulate(Function* f) {
+  RTC_DCHECK(f->size() >= 1);
+  double v = 0;
+  for (Function::size_type i = 0; i < f->size() - 1; ++i) {
+    double dx = (*f)[i + 1].first - (*f)[i].first;
+    double avgy = ((*f)[i + 1].second + (*f)[i].second) / 2;
+    (*f)[i].second = v;
+    v = v + dx * avgy;
+  }
+  (*f)[f->size()-1].second = v;
+  return f;
+}
+
+VirtualSocketServer::Function* VirtualSocketServer::Invert(Function* f) {
+  for (Function::size_type i = 0; i < f->size(); ++i)
+    std::swap((*f)[i].first, (*f)[i].second);
+
+  std::sort(f->begin(), f->end(), FunctionDomainCmp());
+  return f;
+}
+
+VirtualSocketServer::Function* VirtualSocketServer::Resample(Function* f,
+                                                             double x1,
+                                                             double x2,
+                                                             uint32_t samples) {
+  Function* g = new Function();
+
+  for (size_t i = 0; i < samples; i++) {
+    double x = x1 + (x2 - x1) * i / (samples - 1);
+    double y = Evaluate(f, x);
+    g->push_back(Point(x, y));
+  }
+
+  delete f;
+  return g;
+}
+
+double VirtualSocketServer::Evaluate(Function* f, double x) {
+  Function::iterator iter =
+      std::lower_bound(f->begin(), f->end(), x, FunctionDomainCmp());
+  if (iter == f->begin()) {
+    return (*f)[0].second;
+  } else if (iter == f->end()) {
+    RTC_DCHECK(f->size() >= 1);
+    return (*f)[f->size() - 1].second;
+  } else if (iter->first == x) {
+    return iter->second;
+  } else {
+    double x1 = (iter - 1)->first;
+    double y1 = (iter - 1)->second;
+    double x2 = iter->first;
+    double y2 = iter->second;
+    return y1 + (y2 - y1) * (x - x1) / (x2 - x1);
+  }
+}
+
+bool VirtualSocketServer::CanInteractWith(VirtualSocket* local,
+                                          VirtualSocket* remote) {
+  if (!local || !remote) {
+    return false;
+  }
+  IPAddress local_ip = local->GetLocalAddress().ipaddr();
+  IPAddress remote_ip = remote->GetLocalAddress().ipaddr();
+  IPAddress local_normalized = local_ip.Normalized();
+  IPAddress remote_normalized = remote_ip.Normalized();
+  // Check if the addresses are the same family after Normalization (turns
+  // mapped IPv6 address into IPv4 addresses).
+  // This will stop unmapped V6 addresses from talking to mapped V6 addresses.
+  if (local_normalized.family() == remote_normalized.family()) {
+    return true;
+  }
+
+  // If ip1 is IPv4 and ip2 is :: and ip2 is not IPV6_V6ONLY.
+  int remote_v6_only = 0;
+  remote->GetOption(Socket::OPT_IPV6_V6ONLY, &remote_v6_only);
+  if (local_ip.family() == AF_INET && !remote_v6_only && IPIsAny(remote_ip)) {
+    return true;
+  }
+  // Same check, backwards.
+  int local_v6_only = 0;
+  local->GetOption(Socket::OPT_IPV6_V6ONLY, &local_v6_only);
+  if (remote_ip.family() == AF_INET && !local_v6_only && IPIsAny(local_ip)) {
+    return true;
+  }
+
+  // Check to see if either socket was explicitly bound to IPv6-any.
+  // These sockets can talk with anyone.
+  if (local_ip.family() == AF_INET6 && local->was_any()) {
+    return true;
+  }
+  if (remote_ip.family() == AF_INET6 && remote->was_any()) {
+    return true;
+  }
+
+  return false;
+}
+
+IPAddress VirtualSocketServer::GetDefaultRoute(int family) {
+  if (family == AF_INET) {
+    return default_route_v4_;
+  }
+  if (family == AF_INET6) {
+    return default_route_v6_;
+  }
+  return IPAddress();
+}
+void VirtualSocketServer::SetDefaultRoute(const IPAddress& from_addr) {
+  RTC_DCHECK(!IPIsAny(from_addr));
+  if (from_addr.family() == AF_INET) {
+    default_route_v4_ = from_addr;
+  } else if (from_addr.family() == AF_INET6) {
+    default_route_v6_ = from_addr;
+  }
+}
+
+}  // namespace rtc
diff --git a/rtc_base/virtualsocketserver.h b/rtc_base/virtualsocketserver.h
new file mode 100644
index 0000000..e17caae
--- /dev/null
+++ b/rtc_base/virtualsocketserver.h
@@ -0,0 +1,412 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_VIRTUALSOCKETSERVER_H_
+#define RTC_BASE_VIRTUALSOCKETSERVER_H_
+
+#include <deque>
+#include <map>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/constructormagic.h"
+#include "rtc_base/event.h"
+#include "rtc_base/fakeclock.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/socketserver.h"
+
+namespace rtc {
+
+class Packet;
+class VirtualSocket;
+class SocketAddressPair;
+
+// Simulates a network in the same manner as a loopback interface.  The
+// interface can create as many addresses as you want.  All of the sockets
+// created by this network will be able to communicate with one another, unless
+// they are bound to addresses from incompatible families.
+class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> {
+ public:
+  VirtualSocketServer();
+  // This constructor needs to be used if the test uses a fake clock and
+  // ProcessMessagesUntilIdle, since ProcessMessagesUntilIdle needs a way of
+  // advancing time.
+  explicit VirtualSocketServer(FakeClock* fake_clock);
+  ~VirtualSocketServer() override;
+
+  // The default route indicates which local address to use when a socket is
+  // bound to the 'any' address, e.g. 0.0.0.0.
+  IPAddress GetDefaultRoute(int family);
+  void SetDefaultRoute(const IPAddress& from_addr);
+
+  // Limits the network bandwidth (maximum bytes per second).  Zero means that
+  // all sends occur instantly.  Defaults to 0.
+  uint32_t bandwidth() const { return bandwidth_; }
+  void set_bandwidth(uint32_t bandwidth) { bandwidth_ = bandwidth; }
+
+  // Limits the amount of data which can be in flight on the network without
+  // packet loss (on a per sender basis).  Defaults to 64 KB.
+  uint32_t network_capacity() const { return network_capacity_; }
+  void set_network_capacity(uint32_t capacity) { network_capacity_ = capacity; }
+
+  // The amount of data which can be buffered by tcp on the sender's side
+  uint32_t send_buffer_capacity() const { return send_buffer_capacity_; }
+  void set_send_buffer_capacity(uint32_t capacity) {
+    send_buffer_capacity_ = capacity;
+  }
+
+  // The amount of data which can be buffered by tcp on the receiver's side
+  uint32_t recv_buffer_capacity() const { return recv_buffer_capacity_; }
+  void set_recv_buffer_capacity(uint32_t capacity) {
+    recv_buffer_capacity_ = capacity;
+  }
+
+  // Controls the (transit) delay for packets sent in the network.  This does
+  // not inclue the time required to sit in the send queue.  Both of these
+  // values are measured in milliseconds.  Defaults to no delay.
+  uint32_t delay_mean() const { return delay_mean_; }
+  uint32_t delay_stddev() const { return delay_stddev_; }
+  uint32_t delay_samples() const { return delay_samples_; }
+  void set_delay_mean(uint32_t delay_mean) { delay_mean_ = delay_mean; }
+  void set_delay_stddev(uint32_t delay_stddev) { delay_stddev_ = delay_stddev; }
+  void set_delay_samples(uint32_t delay_samples) {
+    delay_samples_ = delay_samples;
+  }
+
+  // If the (transit) delay parameters are modified, this method should be
+  // called to recompute the new distribution.
+  void UpdateDelayDistribution();
+
+  // Controls the (uniform) probability that any sent packet is dropped.  This
+  // is separate from calculations to drop based on queue size.
+  double drop_probability() { return drop_prob_; }
+  void set_drop_probability(double drop_prob) {
+    RTC_DCHECK_GE(drop_prob, 0.0);
+    RTC_DCHECK_LE(drop_prob, 1.0);
+    drop_prob_ = drop_prob;
+  }
+
+  // If |blocked| is true, subsequent attempts to send will result in -1 being
+  // returned, with the socket error set to EWOULDBLOCK.
+  //
+  // If this method is later called with |blocked| set to false, any sockets
+  // that previously failed to send with EWOULDBLOCK will emit SignalWriteEvent.
+  //
+  // This can be used to simulate the send buffer on a network interface being
+  // full, and test functionality related to EWOULDBLOCK/SignalWriteEvent.
+  void SetSendingBlocked(bool blocked);
+
+  // SocketFactory:
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+  // SocketServer:
+  void SetMessageQueue(MessageQueue* queue) override;
+  bool Wait(int cms, bool process_io) override;
+  void WakeUp() override;
+
+  void SetDelayOnAddress(const rtc::SocketAddress& address, int delay_ms) {
+    delay_by_ip_[address.ipaddr()] = delay_ms;
+  }
+
+  // Used by TurnPortTest and TcpPortTest (for example), to mimic a case where
+  // a proxy returns the local host address instead of the original one the
+  // port was bound against. Please see WebRTC issue 3927 for more detail.
+  //
+  // If SetAlternativeLocalAddress(A, B) is called, then when something
+  // attempts to bind a socket to address A, it will get a socket bound to
+  // address B instead.
+  void SetAlternativeLocalAddress(const rtc::IPAddress& address,
+                                  const rtc::IPAddress& alternative);
+
+  typedef std::pair<double, double> Point;
+  typedef std::vector<Point> Function;
+
+  static Function* CreateDistribution(uint32_t mean,
+                                      uint32_t stddev,
+                                      uint32_t samples);
+
+  // Similar to Thread::ProcessMessages, but it only processes messages until
+  // there are no immediate messages or pending network traffic.  Returns false
+  // if Thread::Stop() was called.
+  bool ProcessMessagesUntilIdle();
+
+  // Sets the next port number to use for testing.
+  void SetNextPortForTesting(uint16_t port);
+
+  // Close a pair of Tcp connections by addresses. Both connections will have
+  // its own OnClose invoked.
+  bool CloseTcpConnections(const SocketAddress& addr_local,
+                           const SocketAddress& addr_remote);
+
+  // Number of packets that clients have attempted to send through this virtual
+  // socket server. Intended to be used for test assertions.
+  uint32_t sent_packets() const { return sent_packets_; }
+
+  // For testing purpose only. Fired when a client socket is created.
+  sigslot::signal1<VirtualSocket*> SignalSocketCreated;
+
+ protected:
+  // Returns a new IP not used before in this network.
+  IPAddress GetNextIP(int family);
+  uint16_t GetNextPort();
+
+  VirtualSocket* CreateSocketInternal(int family, int type);
+
+  // Binds the given socket to addr, assigning and IP and Port if necessary
+  int Bind(VirtualSocket* socket, SocketAddress* addr);
+
+  // Binds the given socket to the given (fully-defined) address.
+  int Bind(VirtualSocket* socket, const SocketAddress& addr);
+
+  // Find the socket bound to the given address
+  VirtualSocket* LookupBinding(const SocketAddress& addr);
+
+  int Unbind(const SocketAddress& addr, VirtualSocket* socket);
+
+  // Adds a mapping between this socket pair and the socket.
+  void AddConnection(const SocketAddress& client,
+                     const SocketAddress& server,
+                     VirtualSocket* socket);
+
+  // Find the socket pair corresponding to this server address.
+  VirtualSocket* LookupConnection(const SocketAddress& client,
+                                  const SocketAddress& server);
+
+  void RemoveConnection(const SocketAddress& client,
+                        const SocketAddress& server);
+
+  // Connects the given socket to the socket at the given address
+  int Connect(VirtualSocket* socket, const SocketAddress& remote_addr,
+              bool use_delay);
+
+  // Sends a disconnect message to the socket at the given address
+  bool Disconnect(VirtualSocket* socket);
+
+  // Sends the given packet to the socket at the given address (if one exists).
+  int SendUdp(VirtualSocket* socket, const char* data, size_t data_size,
+              const SocketAddress& remote_addr);
+
+  // Moves as much data as possible from the sender's buffer to the network
+  void SendTcp(VirtualSocket* socket);
+
+  // Places a packet on the network.
+  void AddPacketToNetwork(VirtualSocket* socket,
+                          VirtualSocket* recipient,
+                          int64_t cur_time,
+                          const char* data,
+                          size_t data_size,
+                          size_t header_size,
+                          bool ordered);
+
+  // Removes stale packets from the network
+  void PurgeNetworkPackets(VirtualSocket* socket, int64_t cur_time);
+
+  // Computes the number of milliseconds required to send a packet of this size.
+  uint32_t SendDelay(uint32_t size);
+
+  // If the delay has been set for the address of the socket, returns the set
+  // delay. Otherwise, returns a random transit delay chosen from the
+  // appropriate distribution.
+  uint32_t GetTransitDelay(Socket* socket);
+
+  // Basic operations on functions.  Those that return a function also take
+  // ownership of the function given (and hence, may modify or delete it).
+  static Function* Accumulate(Function* f);
+  static Function* Invert(Function* f);
+  static Function* Resample(Function* f,
+                            double x1,
+                            double x2,
+                            uint32_t samples);
+  static double Evaluate(Function* f, double x);
+
+  // Null out our message queue if it goes away. Necessary in the case where
+  // our lifetime is greater than that of the thread we are using, since we
+  // try to send Close messages for all connected sockets when we shutdown.
+  void OnMessageQueueDestroyed() { msg_queue_ = nullptr; }
+
+  // Determine if two sockets should be able to communicate.
+  // We don't (currently) specify an address family for sockets; instead,
+  // the currently bound address is used to infer the address family.
+  // Any socket that is not explicitly bound to an IPv4 address is assumed to be
+  // dual-stack capable.
+  // This function tests if two addresses can communicate, as well as the
+  // sockets to which they may be bound (the addresses may or may not yet be
+  // bound to the sockets).
+  // First the addresses are tested (after normalization):
+  // If both have the same family, then communication is OK.
+  // If only one is IPv4 then false, unless the other is bound to ::.
+  // This applies even if the IPv4 address is 0.0.0.0.
+  // The socket arguments are optional; the sockets are checked to see if they
+  // were explicitly bound to IPv6-any ('::'), and if so communication is
+  // permitted.
+  // NB: This scheme doesn't permit non-dualstack IPv6 sockets.
+  static bool CanInteractWith(VirtualSocket* local, VirtualSocket* remote);
+
+ private:
+  friend class VirtualSocket;
+
+  // Sending was previously blocked, but now isn't.
+  sigslot::signal0<> SignalReadyToSend;
+
+  typedef std::map<SocketAddress, VirtualSocket*> AddressMap;
+  typedef std::map<SocketAddressPair, VirtualSocket*> ConnectionMap;
+
+  // May be null if the test doesn't use a fake clock, or it does but doesn't
+  // use ProcessMessagesUntilIdle.
+  FakeClock* fake_clock_ = nullptr;
+
+  // Used to implement Wait/WakeUp.
+  Event wakeup_;
+  MessageQueue* msg_queue_;
+  bool stop_on_idle_;
+  in_addr next_ipv4_;
+  in6_addr next_ipv6_;
+  uint16_t next_port_;
+  AddressMap* bindings_;
+  ConnectionMap* connections_;
+
+  IPAddress default_route_v4_;
+  IPAddress default_route_v6_;
+
+  uint32_t bandwidth_;
+  uint32_t network_capacity_;
+  uint32_t send_buffer_capacity_;
+  uint32_t recv_buffer_capacity_;
+  uint32_t delay_mean_;
+  uint32_t delay_stddev_;
+  uint32_t delay_samples_;
+
+  // Used for testing.
+  uint32_t sent_packets_ = 0;
+
+  std::map<rtc::IPAddress, int> delay_by_ip_;
+  std::map<rtc::IPAddress, rtc::IPAddress> alternative_address_mapping_;
+  std::unique_ptr<Function> delay_dist_;
+
+  CriticalSection delay_crit_;
+
+  double drop_prob_;
+  bool sending_blocked_ = false;
+  RTC_DISALLOW_COPY_AND_ASSIGN(VirtualSocketServer);
+};
+
+// Implements the socket interface using the virtual network.  Packets are
+// passed as messages using the message queue of the socket server.
+class VirtualSocket : public AsyncSocket,
+                      public MessageHandler,
+                      public sigslot::has_slots<> {
+ public:
+  VirtualSocket(VirtualSocketServer* server, int family, int type, bool async);
+  ~VirtualSocket() override;
+
+  SocketAddress GetLocalAddress() const override;
+  SocketAddress GetRemoteAddress() const override;
+
+  int Bind(const SocketAddress& addr) override;
+  int Connect(const SocketAddress& addr) override;
+  int Close() override;
+  int Send(const void* pv, size_t cb) override;
+  int SendTo(const void* pv, size_t cb, const SocketAddress& addr) override;
+  int Recv(void* pv, size_t cb, int64_t* timestamp) override;
+  int RecvFrom(void* pv,
+               size_t cb,
+               SocketAddress* paddr,
+               int64_t* timestamp) override;
+  int Listen(int backlog) override;
+  VirtualSocket* Accept(SocketAddress* paddr) override;
+
+  int GetError() const override;
+  void SetError(int error) override;
+  ConnState GetState() const override;
+  int GetOption(Option opt, int* value) override;
+  int SetOption(Option opt, int value) override;
+  void OnMessage(Message* pmsg) override;
+
+  bool was_any() { return was_any_; }
+  void set_was_any(bool was_any) { was_any_ = was_any; }
+
+  // For testing purpose only. Fired when client socket is bound to an address.
+  sigslot::signal2<VirtualSocket*, const SocketAddress&> SignalAddressReady;
+
+ private:
+  struct NetworkEntry {
+    size_t size;
+    int64_t done_time;
+  };
+
+  typedef std::deque<SocketAddress> ListenQueue;
+  typedef std::deque<NetworkEntry> NetworkQueue;
+  typedef std::vector<char> SendBuffer;
+  typedef std::list<Packet*> RecvBuffer;
+  typedef std::map<Option, int> OptionsMap;
+
+  int InitiateConnect(const SocketAddress& addr, bool use_delay);
+  void CompleteConnect(const SocketAddress& addr, bool notify);
+  int SendUdp(const void* pv, size_t cb, const SocketAddress& addr);
+  int SendTcp(const void* pv, size_t cb);
+
+  // Used by server sockets to set the local address without binding.
+  void SetLocalAddress(const SocketAddress& addr);
+
+  void OnSocketServerReadyToSend();
+
+  VirtualSocketServer* server_;
+  int type_;
+  bool async_;
+  ConnState state_;
+  int error_;
+  SocketAddress local_addr_;
+  SocketAddress remote_addr_;
+
+  // Pending sockets which can be Accepted
+  ListenQueue* listen_queue_;
+
+  // Data which tcp has buffered for sending
+  SendBuffer send_buffer_;
+  // Set to false if the last attempt to send resulted in EWOULDBLOCK.
+  // Set back to true when the socket can send again.
+  bool ready_to_send_ = true;
+
+  // Critical section to protect the recv_buffer and queue_
+  CriticalSection crit_;
+
+  // Network model that enforces bandwidth and capacity constraints
+  NetworkQueue network_;
+  size_t network_size_;
+  // The scheduled delivery time of the last packet sent on this socket.
+  // It is used to ensure ordered delivery of packets sent on this socket.
+  int64_t last_delivery_time_ = 0;
+
+  // Data which has been received from the network
+  RecvBuffer recv_buffer_;
+  // The amount of data which is in flight or in recv_buffer_
+  size_t recv_buffer_size_;
+
+  // Is this socket bound?
+  bool bound_;
+
+  // When we bind a socket to Any, VSS's Bind gives it another address. For
+  // dual-stack sockets, we want to distinguish between sockets that were
+  // explicitly given a particular address and sockets that had one picked
+  // for them by VSS.
+  bool was_any_;
+
+  // Store the options that are set
+  OptionsMap options_map_;
+
+  friend class VirtualSocketServer;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_VIRTUALSOCKETSERVER_H_
diff --git a/rtc_base/weak_ptr.cc b/rtc_base/weak_ptr.cc
new file mode 100644
index 0000000..c76256a
--- /dev/null
+++ b/rtc_base/weak_ptr.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/weak_ptr.h"
+
+// The implementation is borrowed from chromium except that it does not
+// implement SupportsWeakPtr.
+
+namespace rtc {
+namespace internal {
+
+WeakReference::Flag::Flag() : is_valid_(true) {
+  // Flags only become bound when checked for validity, or invalidated,
+  // so that we can check that later validity/invalidation operations on
+  // the same Flag take place on the same sequence.
+  checker_.Detach();
+}
+
+void WeakReference::Flag::Invalidate() {
+  RTC_DCHECK(checker_.CalledSequentially())
+      << "WeakPtrs must be invalidated on the same sequence.";
+  is_valid_ = false;
+}
+
+bool WeakReference::Flag::IsValid() const {
+  RTC_DCHECK(checker_.CalledSequentially())
+      << "WeakPtrs must be checked on the same sequence.";
+  return is_valid_;
+}
+
+WeakReference::Flag::~Flag() {}
+
+WeakReference::WeakReference() {}
+
+WeakReference::WeakReference(const Flag* flag) : flag_(flag) {}
+
+WeakReference::~WeakReference() {}
+
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
+bool WeakReference::is_valid() const {
+  return flag_.get() && flag_->IsValid();
+}
+
+WeakReferenceOwner::WeakReferenceOwner() {
+}
+
+WeakReferenceOwner::~WeakReferenceOwner() {
+  Invalidate();
+}
+
+WeakReference WeakReferenceOwner::GetRef() const {
+  // If we hold the last reference to the Flag then create a new one.
+  if (!HasRefs())
+    flag_ = new RefCountedObject<WeakReference::Flag>();
+
+  return WeakReference(flag_.get());
+}
+
+void WeakReferenceOwner::Invalidate() {
+  if (flag_.get()) {
+    flag_->Invalidate();
+    flag_ = nullptr;
+  }
+}
+
+WeakPtrBase::WeakPtrBase() {}
+
+WeakPtrBase::~WeakPtrBase() {}
+
+WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) {}
+
+}  // namespace internal
+}  // namespace rtc
diff --git a/rtc_base/weak_ptr.h b/rtc_base/weak_ptr.h
new file mode 100644
index 0000000..8acfab0
--- /dev/null
+++ b/rtc_base/weak_ptr.h
@@ -0,0 +1,272 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_WEAK_PTR_H_
+#define RTC_BASE_WEAK_PTR_H_
+
+#include <memory>
+
+#include <utility>
+
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/sequenced_task_checker.h"
+
+// The implementation is borrowed from chromium except that it does not
+// implement SupportsWeakPtr.
+
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
+// EXAMPLE:
+//
+//  class Controller {
+//   public:
+//    Controller() : weak_factory_(this) {}
+//    void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
+//    void WorkComplete(const Result& result) { ... }
+//   private:
+//    // Member variables should appear before the WeakPtrFactory, to ensure
+//    // that any WeakPtrs to Controller are invalidated before its members
+//    // variable's destructors are executed, rendering them invalid.
+//    WeakPtrFactory<Controller> weak_factory_;
+//  };
+//
+//  class Worker {
+//   public:
+//    static void StartNew(const WeakPtr<Controller>& controller) {
+//      Worker* worker = new Worker(controller);
+//      // Kick off asynchronous processing...
+//    }
+//   private:
+//    Worker(const WeakPtr<Controller>& controller)
+//        : controller_(controller) {}
+//    void DidCompleteAsynchronousProcessing(const Result& result) {
+//      if (controller_)
+//        controller_->WorkComplete(result);
+//    }
+//    WeakPtr<Controller> controller_;
+//  };
+//
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between threads, but must always be
+// dereferenced and invalidated on the same TaskQueue or thread, otherwise
+// checking the pointer would be racey.
+//
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// TaskQueue/thread, and cannot be dereferenced or
+// invalidated on any other TaskQueue/thread. Bound WeakPtrs can still be handed
+// off to other TaskQueues, e.g. to use to post tasks back to object on the
+// bound sequence.
+//
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct thread to enforce that other WeakPtr objects will enforce they
+// are used on the desired thread.
+
+namespace rtc {
+
+namespace internal {
+
+class WeakReference {
+ public:
+  // Although Flag is bound to a specific sequence, it may be
+  // deleted from another via base::WeakPtr::~WeakPtr().
+  class Flag : public RefCountInterface {
+   public:
+    Flag();
+
+    void Invalidate();
+    bool IsValid() const;
+
+   private:
+    friend class RefCountedObject<Flag>;
+
+    ~Flag() override;
+
+    SequencedTaskChecker checker_;
+    bool is_valid_;
+  };
+
+  WeakReference();
+  explicit WeakReference(const Flag* flag);
+  ~WeakReference();
+
+  WeakReference(WeakReference&& other);
+  WeakReference(const WeakReference& other);
+  WeakReference& operator=(WeakReference&& other) = default;
+  WeakReference& operator=(const WeakReference& other) = default;
+
+  bool is_valid() const;
+
+ private:
+  scoped_refptr<const Flag> flag_;
+};
+
+class WeakReferenceOwner {
+ public:
+  WeakReferenceOwner();
+  ~WeakReferenceOwner();
+
+  WeakReference GetRef() const;
+
+  bool HasRefs() const { return flag_.get() && !flag_->HasOneRef(); }
+
+  void Invalidate();
+
+ private:
+  mutable scoped_refptr<RefCountedObject<WeakReference::Flag>> flag_;
+};
+
+// This class simplifies the implementation of WeakPtr's type conversion
+// constructor by avoiding the need for a public accessor for ref_.  A
+// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
+// base class gives us a way to access ref_ in a protected fashion.
+class WeakPtrBase {
+ public:
+  WeakPtrBase();
+  ~WeakPtrBase();
+
+  WeakPtrBase(const WeakPtrBase& other) = default;
+  WeakPtrBase(WeakPtrBase&& other) = default;
+  WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+  WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
+ protected:
+  explicit WeakPtrBase(const WeakReference& ref);
+
+  WeakReference ref_;
+};
+
+}  // namespace internal
+
+template <typename T>
+class WeakPtrFactory;
+
+template <typename T>
+class WeakPtr : public internal::WeakPtrBase {
+ public:
+  WeakPtr() : ptr_(nullptr) {}
+
+  // Allow conversion from U to T provided U "is a" T. Note that this
+  // is separate from the (implicit) copy and move constructors.
+  template <typename U>
+  WeakPtr(const WeakPtr<U>& other)
+      : internal::WeakPtrBase(other), ptr_(other.ptr_) {}
+  template <typename U>
+  WeakPtr(WeakPtr<U>&& other)
+      : internal::WeakPtrBase(std::move(other)), ptr_(other.ptr_) {}
+
+  T* get() const { return ref_.is_valid() ? ptr_ : nullptr; }
+
+  T& operator*() const {
+    RTC_DCHECK(get() != nullptr);
+    return *get();
+  }
+  T* operator->() const {
+    RTC_DCHECK(get() != nullptr);
+    return get();
+  }
+
+  void reset() {
+    ref_ = internal::WeakReference();
+    ptr_ = nullptr;
+  }
+
+  // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+  explicit operator bool() const { return get() != nullptr; }
+
+ private:
+  template <typename U>
+  friend class WeakPtr;
+  friend class WeakPtrFactory<T>;
+
+  WeakPtr(const internal::WeakReference& ref, T* ptr)
+      : internal::WeakPtrBase(ref), ptr_(ptr) {}
+
+  // This pointer is only valid when ref_.is_valid() is true.  Otherwise, its
+  // value is undefined (as opposed to nullptr).
+  T* ptr_;
+};
+
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr == nullptr;
+}
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself.  This is helpful if you only
+// need weak pointers within the implementation of a class.  This class is also
+// useful when working with primitive types.  For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+
+// Note that GetWeakPtr must be called on one and only one TaskQueue or thread
+// and the WeakPtr must only be dereferenced and invalidated on that same
+// TaskQueue/thread. A WeakPtr instance can be copied and posted to other
+// sequences though as long as it is not dereferenced (WeakPtr<T>::get()).
+template <class T>
+class WeakPtrFactory {
+ public:
+  explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {}
+
+  ~WeakPtrFactory() { ptr_ = nullptr; }
+
+  WeakPtr<T> GetWeakPtr() {
+    RTC_DCHECK(ptr_);
+    return WeakPtr<T>(weak_reference_owner_.GetRef(), ptr_);
+  }
+
+  // Call this method to invalidate all existing weak pointers.
+  void InvalidateWeakPtrs() {
+    RTC_DCHECK(ptr_);
+    weak_reference_owner_.Invalidate();
+  }
+
+  // Call this method to determine if any weak pointers exist.
+  bool HasWeakPtrs() const {
+    RTC_DCHECK(ptr_);
+    return weak_reference_owner_.HasRefs();
+  }
+
+ private:
+  internal::WeakReferenceOwner weak_reference_owner_;
+  T* ptr_;
+  RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_WEAK_PTR_H_
diff --git a/rtc_base/weak_ptr_unittest.cc b/rtc_base/weak_ptr_unittest.cc
new file mode 100644
index 0000000..6b0b452
--- /dev/null
+++ b/rtc_base/weak_ptr_unittest.cc
@@ -0,0 +1,242 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "rtc_base/event.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/task_queue.h"
+#include "rtc_base/weak_ptr.h"
+
+namespace rtc {
+
+namespace {
+
+struct Base {
+  std::string member;
+};
+struct Derived : public Base {};
+
+struct Target {};
+
+struct Arrow {
+  WeakPtr<Target> target;
+};
+
+struct TargetWithFactory : public Target {
+  TargetWithFactory() : factory(this) {}
+  WeakPtrFactory<Target> factory;
+};
+
+}  // namespace
+
+TEST(WeakPtrFactoryTest, Basic) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, Comparison) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  WeakPtr<int> ptr2 = ptr;
+  EXPECT_EQ(ptr.get(), ptr2.get());
+}
+
+TEST(WeakPtrFactoryTest, Move) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  WeakPtr<int> ptr3 = std::move(ptr2);
+  EXPECT_NE(ptr.get(), ptr2.get());
+  EXPECT_EQ(ptr.get(), ptr3.get());
+}
+
+TEST(WeakPtrFactoryTest, OutOfScope) {
+  WeakPtr<int> ptr;
+  EXPECT_EQ(nullptr, ptr.get());
+  {
+    int data;
+    WeakPtrFactory<int> factory(&data);
+    ptr = factory.GetWeakPtr();
+    EXPECT_EQ(&data, ptr.get());
+  }
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, Multiple) {
+  WeakPtr<int> a, b;
+  {
+    int data;
+    WeakPtrFactory<int> factory(&data);
+    a = factory.GetWeakPtr();
+    b = factory.GetWeakPtr();
+    EXPECT_EQ(&data, a.get());
+    EXPECT_EQ(&data, b.get());
+  }
+  EXPECT_EQ(nullptr, a.get());
+  EXPECT_EQ(nullptr, b.get());
+}
+
+TEST(WeakPtrFactoryTest, MultipleStaged) {
+  WeakPtr<int> a;
+  {
+    int data;
+    WeakPtrFactory<int> factory(&data);
+    a = factory.GetWeakPtr();
+    { WeakPtr<int> b = factory.GetWeakPtr(); }
+    EXPECT_NE(nullptr, a.get());
+  }
+  EXPECT_EQ(nullptr, a.get());
+}
+
+TEST(WeakPtrFactoryTest, Dereference) {
+  Base data;
+  data.member = "123456";
+  WeakPtrFactory<Base> factory(&data);
+  WeakPtr<Base> ptr = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr.get());
+  EXPECT_EQ(data.member, (*ptr).member);
+  EXPECT_EQ(data.member, ptr->member);
+}
+
+TEST(WeakPtrFactoryTest, UpCast) {
+  Derived data;
+  WeakPtrFactory<Derived> factory(&data);
+  WeakPtr<Base> ptr = factory.GetWeakPtr();
+  ptr = factory.GetWeakPtr();
+  EXPECT_EQ(ptr.get(), &data);
+}
+
+TEST(WeakPtrTest, DefaultConstructor) {
+  WeakPtr<int> ptr;
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, BooleanTesting) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  WeakPtr<int> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+
+  WeakPtr<int> null_ptr;
+  EXPECT_EQ(null_ptr, nullptr);
+  EXPECT_EQ(nullptr, null_ptr);
+}
+
+TEST(WeakPtrTest, InvalidateWeakPtrs) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr.get());
+  EXPECT_TRUE(factory.HasWeakPtrs());
+  factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, ptr.get());
+  EXPECT_FALSE(factory.HasWeakPtrs());
+
+  // Test that the factory can create new weak pointers after a
+  // InvalidateWeakPtrs call, and they remain valid until the next
+  // InvalidateWeakPtrs call.
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr2.get());
+  EXPECT_TRUE(factory.HasWeakPtrs());
+  factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, ptr2.get());
+  EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+TEST(WeakPtrTest, HasWeakPtrs) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  {
+    WeakPtr<int> ptr = factory.GetWeakPtr();
+    EXPECT_TRUE(factory.HasWeakPtrs());
+  }
+  EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+template <class T>
+std::unique_ptr<T> NewObjectCreatedOnTaskQueue() {
+  std::unique_ptr<T> obj;
+  TaskQueue queue("NewObjectCreatedOnTaskQueue");
+  Event event(false, false);
+  queue.PostTask([&event, &obj] {
+    obj.reset(new T());
+    event.Set();
+  });
+  EXPECT_TRUE(event.Wait(1000));
+  return obj;
+}
+
+TEST(WeakPtrTest, ObjectAndWeakPtrOnDifferentThreads) {
+  // Test that it is OK to create an object with a WeakPtrFactory one thread,
+  // but use it on another.  This tests that we do not trip runtime checks that
+  // ensure that a WeakPtr is not used by multiple threads.
+  std::unique_ptr<TargetWithFactory> target(
+      NewObjectCreatedOnTaskQueue<TargetWithFactory>());
+  WeakPtr<Target> weak_ptr = target->factory.GetWeakPtr();
+  EXPECT_EQ(target.get(), weak_ptr.get());
+}
+
+TEST(WeakPtrTest, WeakPtrInitiateAndUseOnDifferentThreads) {
+  // Test that it is OK to create a WeakPtr on one thread, but use it on
+  // another. This tests that we do not trip runtime checks that ensure that a
+  // WeakPtr is not used by multiple threads.
+  auto target = rtc::MakeUnique<TargetWithFactory>();
+  // Create weak ptr on main thread
+  WeakPtr<Target> weak_ptr = target->factory.GetWeakPtr();
+  rtc::TaskQueue queue("queue");
+  rtc::Event done(false, false);
+  queue.PostTask([&] {
+    // Dereference and invalide weak_ptr on another thread.
+    EXPECT_EQ(weak_ptr.get(), target.get());
+    target.reset();
+    done.Set();
+  });
+  EXPECT_TRUE(done.Wait(1000));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32.cc b/rtc_base/win32.cc
new file mode 100644
index 0000000..cbf6fbb
--- /dev/null
+++ b/rtc_base/win32.cc
@@ -0,0 +1,401 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/win32.h"
+
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <algorithm>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/basictypes.h"
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+// Helper function declarations for inet_ntop/inet_pton.
+static const char* inet_ntop_v4(const void* src, char* dst, socklen_t size);
+static const char* inet_ntop_v6(const void* src, char* dst, socklen_t size);
+static int inet_pton_v4(const char* src, void* dst);
+static int inet_pton_v6(const char* src, void* dst);
+
+// Implementation of inet_ntop (create a printable representation of an
+// ip address). XP doesn't have its own inet_ntop, and
+// WSAAddressToString requires both IPv6 to be  installed and for Winsock
+// to be initialized.
+const char* win32_inet_ntop(int af, const void *src,
+                            char* dst, socklen_t size) {
+  if (!src || !dst) {
+    return nullptr;
+  }
+  switch (af) {
+    case AF_INET: {
+      return inet_ntop_v4(src, dst, size);
+    }
+    case AF_INET6: {
+      return inet_ntop_v6(src, dst, size);
+    }
+  }
+  return nullptr;
+}
+
+// As above, but for inet_pton. Implements inet_pton for v4 and v6.
+// Note that our inet_ntop will output normal 'dotted' v4 addresses only.
+int win32_inet_pton(int af, const char* src, void* dst) {
+  if (!src || !dst) {
+    return 0;
+  }
+  if (af == AF_INET) {
+    return inet_pton_v4(src, dst);
+  } else if (af == AF_INET6) {
+    return inet_pton_v6(src, dst);
+  }
+  return -1;
+}
+
+// Helper function for inet_ntop for IPv4 addresses.
+// Outputs "dotted-quad" decimal notation.
+const char* inet_ntop_v4(const void* src, char* dst, socklen_t size) {
+  if (size < INET_ADDRSTRLEN) {
+    return nullptr;
+  }
+  const struct in_addr* as_in_addr =
+      reinterpret_cast<const struct in_addr*>(src);
+  rtc::sprintfn(dst, size, "%d.%d.%d.%d",
+                      as_in_addr->S_un.S_un_b.s_b1,
+                      as_in_addr->S_un.S_un_b.s_b2,
+                      as_in_addr->S_un.S_un_b.s_b3,
+                      as_in_addr->S_un.S_un_b.s_b4);
+  return dst;
+}
+
+// Helper function for inet_ntop for IPv6 addresses.
+const char* inet_ntop_v6(const void* src, char* dst, socklen_t size) {
+  if (size < INET6_ADDRSTRLEN) {
+    return nullptr;
+  }
+  const uint16_t* as_shorts = reinterpret_cast<const uint16_t*>(src);
+  int runpos[8];
+  int current = 1;
+  int max = 0;
+  int maxpos = -1;
+  int run_array_size = arraysize(runpos);
+  // Run over the address marking runs of 0s.
+  for (int i = 0; i < run_array_size; ++i) {
+    if (as_shorts[i] == 0) {
+      runpos[i] = current;
+      if (current > max) {
+        maxpos = i;
+        max = current;
+      }
+      ++current;
+    } else {
+      runpos[i] = -1;
+      current = 1;
+    }
+  }
+
+  if (max > 0) {
+    int tmpmax = maxpos;
+    // Run back through, setting -1 for all but the longest run.
+    for (int i = run_array_size - 1; i >= 0; i--) {
+      if (i > tmpmax) {
+        runpos[i] = -1;
+      } else if (runpos[i] == -1) {
+        // We're less than maxpos, we hit a -1, so the 'good' run is done.
+        // Setting tmpmax -1 means all remaining positions get set to -1.
+        tmpmax = -1;
+      }
+    }
+  }
+
+  char* cursor = dst;
+  // Print IPv4 compatible and IPv4 mapped addresses using the IPv4 helper.
+  // These addresses have an initial run of either eight zero-bytes followed
+  // by 0xFFFF, or an initial run of ten zero-bytes.
+  if (runpos[0] == 1 && (maxpos == 5 ||
+                         (maxpos == 4 && as_shorts[5] == 0xFFFF))) {
+    *cursor++ = ':';
+    *cursor++ = ':';
+    if (maxpos == 4) {
+      cursor += rtc::sprintfn(cursor, INET6_ADDRSTRLEN - 2, "ffff:");
+    }
+    const struct in_addr* as_v4 =
+        reinterpret_cast<const struct in_addr*>(&(as_shorts[6]));
+    inet_ntop_v4(as_v4, cursor,
+                 static_cast<socklen_t>(INET6_ADDRSTRLEN - (cursor - dst)));
+  } else {
+    for (int i = 0; i < run_array_size; ++i) {
+      if (runpos[i] == -1) {
+        cursor += rtc::sprintfn(cursor,
+                                      INET6_ADDRSTRLEN - (cursor - dst),
+                                      "%x", NetworkToHost16(as_shorts[i]));
+        if (i != 7 && runpos[i + 1] != 1) {
+          *cursor++ = ':';
+        }
+      } else if (runpos[i] == 1) {
+        // Entered the run; print the colons and skip the run.
+        *cursor++ = ':';
+        *cursor++ = ':';
+        i += (max - 1);
+      }
+    }
+  }
+  return dst;
+}
+
+// Helper function for inet_pton for IPv4 addresses.
+// |src| points to a character string containing an IPv4 network address in
+// dotted-decimal format, "ddd.ddd.ddd.ddd", where ddd is a decimal number
+// of up to three digits in the range 0 to 255.
+// The address is converted and copied to dst,
+// which must be sizeof(struct in_addr) (4) bytes (32 bits) long.
+int inet_pton_v4(const char* src, void* dst) {
+  const int kIpv4AddressSize = 4;
+  int found = 0;
+  const char* src_pos = src;
+  unsigned char result[kIpv4AddressSize] = {0};
+
+  while (*src_pos != '\0') {
+    // strtol won't treat whitespace characters in the begining as an error,
+    // so check to ensure this is started with digit before passing to strtol.
+    if (!isdigit(*src_pos)) {
+      return 0;
+    }
+    char* end_pos;
+    long value = strtol(src_pos, &end_pos, 10);
+    if (value < 0 || value > 255 || src_pos == end_pos) {
+      return 0;
+    }
+    ++found;
+    if (found > kIpv4AddressSize) {
+      return 0;
+    }
+    result[found - 1] = static_cast<unsigned char>(value);
+    src_pos = end_pos;
+    if (*src_pos == '.') {
+      // There's more.
+      ++src_pos;
+    } else if (*src_pos != '\0') {
+      // If it's neither '.' nor '\0' then return fail.
+      return 0;
+    }
+  }
+  if (found != kIpv4AddressSize) {
+    return 0;
+  }
+  memcpy(dst, result, sizeof(result));
+  return 1;
+}
+
+// Helper function for inet_pton for IPv6 addresses.
+int inet_pton_v6(const char* src, void* dst) {
+  // sscanf will pick any other invalid chars up, but it parses 0xnnnn as hex.
+  // Check for literal x in the input string.
+  const char* readcursor = src;
+  char c = *readcursor++;
+  while (c) {
+    if (c == 'x') {
+      return 0;
+    }
+    c = *readcursor++;
+  }
+  readcursor = src;
+
+  struct in6_addr an_addr;
+  memset(&an_addr, 0, sizeof(an_addr));
+
+  uint16_t* addr_cursor = reinterpret_cast<uint16_t*>(&an_addr.s6_addr[0]);
+  uint16_t* addr_end = reinterpret_cast<uint16_t*>(&an_addr.s6_addr[16]);
+  bool seencompressed = false;
+
+  // Addresses that start with "::" (i.e., a run of initial zeros) or
+  // "::ffff:" can potentially be IPv4 mapped or compatibility addresses.
+  // These have dotted-style IPv4 addresses on the end (e.g. "::192.168.7.1").
+  if (*readcursor == ':' && *(readcursor+1) == ':' &&
+      *(readcursor + 2) != 0) {
+    // Check for periods, which we'll take as a sign of v4 addresses.
+    const char* addrstart = readcursor + 2;
+    if (rtc::strchr(addrstart, ".")) {
+      const char* colon = rtc::strchr(addrstart, "::");
+      if (colon) {
+        uint16_t a_short;
+        int bytesread = 0;
+        if (sscanf(addrstart, "%hx%n", &a_short, &bytesread) != 1 ||
+            a_short != 0xFFFF || bytesread != 4) {
+          // Colons + periods means has to be ::ffff:a.b.c.d. But it wasn't.
+          return 0;
+        } else {
+          an_addr.s6_addr[10] = 0xFF;
+          an_addr.s6_addr[11] = 0xFF;
+          addrstart = colon + 1;
+        }
+      }
+      struct in_addr v4;
+      if (inet_pton_v4(addrstart, &v4.s_addr)) {
+        memcpy(&an_addr.s6_addr[12], &v4, sizeof(v4));
+        memcpy(dst, &an_addr, sizeof(an_addr));
+        return 1;
+      } else {
+        // Invalid v4 address.
+        return 0;
+      }
+    }
+  }
+
+  // For addresses without a trailing IPv4 component ('normal' IPv6 addresses).
+  while (*readcursor != 0 && addr_cursor < addr_end) {
+    if (*readcursor == ':') {
+      if (*(readcursor + 1) == ':') {
+        if (seencompressed) {
+          // Can only have one compressed run of zeroes ("::") per address.
+          return 0;
+        }
+        // Hit a compressed run. Count colons to figure out how much of the
+        // address is skipped.
+        readcursor += 2;
+        const char* coloncounter = readcursor;
+        int coloncount = 0;
+        if (*coloncounter == 0) {
+          // Special case - trailing ::.
+          addr_cursor = addr_end;
+        } else {
+          while (*coloncounter) {
+            if (*coloncounter == ':') {
+              ++coloncount;
+            }
+            ++coloncounter;
+          }
+          // (coloncount + 1) is the number of shorts left in the address.
+          // If this number is greater than the number of available shorts, the
+          // address is malformed.
+          if (coloncount + 1 > addr_end - addr_cursor) {
+            return 0;
+          }
+          addr_cursor = addr_end - (coloncount + 1);
+          seencompressed = true;
+        }
+      } else {
+        ++readcursor;
+      }
+    } else {
+      uint16_t word;
+      int bytesread = 0;
+      if (sscanf(readcursor, "%4hx%n", &word, &bytesread) != 1) {
+        return 0;
+      } else {
+        *addr_cursor = HostToNetwork16(word);
+        ++addr_cursor;
+        readcursor += bytesread;
+        if (*readcursor != ':' && *readcursor != '\0') {
+          return 0;
+        }
+      }
+    }
+  }
+
+  if (*readcursor != '\0' || addr_cursor < addr_end) {
+    // Catches addresses too short or too long.
+    return 0;
+  }
+  memcpy(dst, &an_addr, sizeof(an_addr));
+  return 1;
+}
+
+bool Utf8ToWindowsFilename(const std::string& utf8, std::wstring* filename) {
+  // TODO: Integrate into fileutils.h
+  // TODO: Handle wide and non-wide cases via TCHAR?
+  // TODO: Skip \\?\ processing if the length is not > MAX_PATH?
+  // TODO: Write unittests
+
+  // Convert to Utf16
+  int wlen =
+      ::MultiByteToWideChar(CP_UTF8, 0, utf8.c_str(),
+                            static_cast<int>(utf8.length() + 1), nullptr, 0);
+  if (0 == wlen) {
+    return false;
+  }
+  wchar_t* wfilename = STACK_ARRAY(wchar_t, wlen);
+  if (0 == ::MultiByteToWideChar(CP_UTF8, 0, utf8.c_str(),
+                                 static_cast<int>(utf8.length() + 1),
+                                 wfilename, wlen)) {
+    return false;
+  }
+  // Replace forward slashes with backslashes
+  std::replace(wfilename, wfilename + wlen, L'/', L'\\');
+  // Convert to complete filename
+  DWORD full_len = ::GetFullPathName(wfilename, 0, nullptr, nullptr);
+  if (0 == full_len) {
+    return false;
+  }
+  wchar_t* filepart = nullptr;
+  wchar_t* full_filename = STACK_ARRAY(wchar_t, full_len + 6);
+  wchar_t* start = full_filename + 6;
+  if (0 == ::GetFullPathName(wfilename, full_len, start, &filepart)) {
+    return false;
+  }
+  // Add long-path prefix
+  const wchar_t kLongPathPrefix[] = L"\\\\?\\UNC";
+  if ((start[0] != L'\\') || (start[1] != L'\\')) {
+    // Non-unc path:     <pathname>
+    //      Becomes: \\?\<pathname>
+    start -= 4;
+    RTC_DCHECK(start >= full_filename);
+    memcpy(start, kLongPathPrefix, 4 * sizeof(wchar_t));
+  } else if (start[2] != L'?') {
+    // Unc path:       \\<server>\<pathname>
+    //  Becomes: \\?\UNC\<server>\<pathname>
+    start -= 6;
+    RTC_DCHECK(start >= full_filename);
+    memcpy(start, kLongPathPrefix, 7 * sizeof(wchar_t));
+  } else {
+    // Already in long-path form.
+  }
+  filename->assign(start);
+  return true;
+}
+
+bool GetOsVersion(int* major, int* minor, int* build) {
+  OSVERSIONINFO info = {0};
+  info.dwOSVersionInfoSize = sizeof(info);
+  if (GetVersionEx(&info)) {
+    if (major) *major = info.dwMajorVersion;
+    if (minor) *minor = info.dwMinorVersion;
+    if (build) *build = info.dwBuildNumber;
+    return true;
+  }
+  return false;
+}
+
+bool GetCurrentProcessIntegrityLevel(int* level) {
+  bool ret = false;
+  HANDLE process = ::GetCurrentProcess(), token;
+  if (OpenProcessToken(process, TOKEN_QUERY | TOKEN_QUERY_SOURCE, &token)) {
+    DWORD size;
+    if (!GetTokenInformation(token, TokenIntegrityLevel, nullptr, 0, &size) &&
+        GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
+      char* buf = STACK_ARRAY(char, size);
+      TOKEN_MANDATORY_LABEL* til =
+          reinterpret_cast<TOKEN_MANDATORY_LABEL*>(buf);
+      if (GetTokenInformation(token, TokenIntegrityLevel, til, size, &size)) {
+
+        DWORD count = *GetSidSubAuthorityCount(til->Label.Sid);
+        *level = *GetSidSubAuthority(til->Label.Sid, count - 1);
+        ret = true;
+      }
+    }
+    CloseHandle(token);
+  }
+  return ret;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32.h b/rtc_base/win32.h
new file mode 100644
index 0000000..78f66a7
--- /dev/null
+++ b/rtc_base/win32.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_WIN32_H_
+#define RTC_BASE_WIN32_H_
+
+#ifndef WEBRTC_WIN
+#error "Only #include this header in Windows builds"
+#endif
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+
+// Make sure we don't get min/max macros
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+
+#include <winsock2.h>
+#include <windows.h>
+
+#ifndef SECURITY_MANDATORY_LABEL_AUTHORITY
+// Add defines that we use if we are compiling against older sdks
+#define SECURITY_MANDATORY_MEDIUM_RID               (0x00002000L)
+#define TokenIntegrityLevel static_cast<TOKEN_INFORMATION_CLASS>(0x19)
+typedef struct _TOKEN_MANDATORY_LABEL {
+    SID_AND_ATTRIBUTES Label;
+} TOKEN_MANDATORY_LABEL, *PTOKEN_MANDATORY_LABEL;
+#endif  // SECURITY_MANDATORY_LABEL_AUTHORITY
+
+#undef SetPort
+
+#include <string>
+
+#include "rtc_base/basictypes.h"
+#include "rtc_base/stringutils.h"
+
+namespace rtc {
+
+const char* win32_inet_ntop(int af, const void *src, char* dst, socklen_t size);
+int win32_inet_pton(int af, const char* src, void *dst);
+
+// Convert a Utf8 path representation to a non-length-limited Unicode pathname.
+bool Utf8ToWindowsFilename(const std::string& utf8, std::wstring* filename);
+
+enum WindowsMajorVersions {
+  kWindows2000 = 5,
+  kWindowsVista = 6,
+};
+bool GetOsVersion(int* major, int* minor, int* build);
+
+inline bool IsWindowsVistaOrLater() {
+  int major;
+  return (GetOsVersion(&major, nullptr, nullptr) && major >= kWindowsVista);
+}
+
+inline bool IsWindowsXpOrLater() {
+  int major, minor;
+  return (GetOsVersion(&major, &minor, nullptr) &&
+          (major >= kWindowsVista || (major == kWindows2000 && minor >= 1)));
+}
+
+inline bool IsWindows8OrLater() {
+  int major, minor;
+  return (GetOsVersion(&major, &minor, nullptr) &&
+          (major > kWindowsVista || (major == kWindowsVista && minor >= 2)));
+}
+
+// Determine the current integrity level of the process.
+bool GetCurrentProcessIntegrityLevel(int* level);
+
+inline bool IsCurrentProcessLowIntegrity() {
+  int level;
+  return (GetCurrentProcessIntegrityLevel(&level) &&
+      level < SECURITY_MANDATORY_MEDIUM_RID);
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_WIN32_H_
diff --git a/rtc_base/win32_unittest.cc b/rtc_base/win32_unittest.cc
new file mode 100644
index 0000000..5ee3ff4
--- /dev/null
+++ b/rtc_base/win32_unittest.cc
@@ -0,0 +1,89 @@
+/*
+ *  Copyright 2010 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+
+#include "rtc_base/gunit.h"
+#include "rtc_base/nethelpers.h"
+#include "rtc_base/win32.h"
+
+#if !defined(WEBRTC_WIN)
+#error Only for Windows
+#endif
+
+namespace rtc {
+
+class Win32Test : public testing::Test {
+ public:
+  Win32Test() {
+  }
+};
+
+TEST_F(Win32Test, IPv6AddressCompression) {
+  IPAddress ipv6;
+
+  // Zero compression should be done on the leftmost 0s when there are
+  // multiple longest series.
+  ASSERT_TRUE(IPFromString("2a00:8a00:a000:1190:0000:0001:000:252", &ipv6));
+  EXPECT_EQ("2a00:8a00:a000:1190::1:0:252", ipv6.ToString());
+
+  // Ensure the zero compression could handle multiple octects.
+  ASSERT_TRUE(IPFromString("0:0:0:0:0:0:0:1", &ipv6));
+  EXPECT_EQ("::1", ipv6.ToString());
+
+  // Make sure multiple 0 octects compressed.
+  ASSERT_TRUE(IPFromString("fe80:0:0:0:2aa:ff:fe9a:4ca2", &ipv6));
+  EXPECT_EQ("fe80::2aa:ff:fe9a:4ca2", ipv6.ToString());
+
+  // Test zero compression at the end of string.
+  ASSERT_TRUE(IPFromString("2a00:8a00:a000:1190:0000:0001:000:00", &ipv6));
+  EXPECT_EQ("2a00:8a00:a000:1190:0:1::", ipv6.ToString());
+
+  // Test zero compression at the beginning of string.
+  ASSERT_TRUE(IPFromString("0:0:000:1190:0000:0001:000:00", &ipv6));
+  EXPECT_EQ("::1190:0:1:0:0", ipv6.ToString());
+
+  // Test zero compression only done once.
+  ASSERT_TRUE(IPFromString("0:1:000:1190:0000:0001:000:01", &ipv6));
+  EXPECT_EQ("::1:0:1190:0:1:0:1", ipv6.ToString());
+
+  // Make sure noncompressable IPv6 is the same.
+  ASSERT_TRUE(IPFromString("1234:5678:abcd:1234:5678:abcd:1234:5678", &ipv6));
+  EXPECT_EQ("1234:5678:abcd:1234:5678:abcd:1234:5678", ipv6.ToString());
+}
+
+// Test that invalid IPv6 addresses are recognized and false is returned.
+TEST_F(Win32Test, InvalidIPv6AddressParsing) {
+  IPAddress ipv6;
+
+  // More than 1 run of "::"s.
+  EXPECT_FALSE(IPFromString("1::2::3", &ipv6));
+
+  // More than 1 run of "::"s in a longer address.
+  // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=7592
+  EXPECT_FALSE(IPFromString("1::2::3::4::5::6::7::8", &ipv6));
+
+  // Three ':'s in a row.
+  EXPECT_FALSE(IPFromString("1:::2", &ipv6));
+
+  // Non-hex character.
+  EXPECT_FALSE(IPFromString("test::1", &ipv6));
+
+  // More than 4 hex digits per group.
+  EXPECT_FALSE(IPFromString("abcde::1", &ipv6));
+
+  // More than 8 groups.
+  EXPECT_FALSE(IPFromString("1:2:3:4:5:6:7:8:9", &ipv6));
+
+  // Less than 8 groups.
+  EXPECT_FALSE(IPFromString("1:2:3:4:5:6:7", &ipv6));
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32filesystem.cc b/rtc_base/win32filesystem.cc
new file mode 100644
index 0000000..8ca84c3
--- /dev/null
+++ b/rtc_base/win32filesystem.cc
@@ -0,0 +1,83 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/win32filesystem.h"
+
+#include <shellapi.h>
+#include <shlobj.h>
+#include <tchar.h>
+#include "rtc_base/win32.h"
+
+#include <memory>
+
+#include "rtc_base/arraysize.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/fileutils.h"
+#include "rtc_base/pathutils.h"
+#include "rtc_base/stream.h"
+#include "rtc_base/stringutils.h"
+
+// In several places in this file, we test the integrity level of the process
+// before calling GetLongPathName. We do this because calling GetLongPathName
+// when running under protected mode IE (a low integrity process) can result in
+// a virtualized path being returned, which is wrong if you only plan to read.
+// TODO: Waiting to hear back from IE team on whether this is the
+// best approach; IEIsProtectedModeProcess is another possible solution.
+
+namespace rtc {
+
+bool Win32Filesystem::DeleteFile(const Pathname &filename) {
+  RTC_LOG(LS_INFO) << "Deleting file " << filename.pathname();
+  if (!IsFile(filename)) {
+    RTC_DCHECK(IsFile(filename));
+    return false;
+  }
+  return ::DeleteFile(ToUtf16(filename.pathname()).c_str()) != 0;
+}
+
+bool Win32Filesystem::MoveFile(const Pathname &old_path,
+                               const Pathname &new_path) {
+  if (!IsFile(old_path)) {
+    RTC_DCHECK(IsFile(old_path));
+    return false;
+  }
+  RTC_LOG(LS_INFO) << "Moving " << old_path.pathname() << " to "
+                   << new_path.pathname();
+  return ::MoveFile(ToUtf16(old_path.pathname()).c_str(),
+                    ToUtf16(new_path.pathname()).c_str()) != 0;
+}
+
+bool Win32Filesystem::IsFolder(const Pathname &path) {
+  WIN32_FILE_ATTRIBUTE_DATA data = {0};
+  if (0 == ::GetFileAttributesEx(ToUtf16(path.pathname()).c_str(),
+                                 GetFileExInfoStandard, &data))
+    return false;
+  return (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) ==
+      FILE_ATTRIBUTE_DIRECTORY;
+}
+
+bool Win32Filesystem::IsFile(const Pathname &path) {
+  WIN32_FILE_ATTRIBUTE_DATA data = {0};
+  if (0 == ::GetFileAttributesEx(ToUtf16(path.pathname()).c_str(),
+                                 GetFileExInfoStandard, &data))
+    return false;
+  return (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == 0;
+}
+
+bool Win32Filesystem::GetFileSize(const Pathname &pathname, size_t *size) {
+  WIN32_FILE_ATTRIBUTE_DATA data = {0};
+  if (::GetFileAttributesEx(ToUtf16(pathname.pathname()).c_str(),
+                            GetFileExInfoStandard, &data) == 0)
+  return false;
+  *size = data.nFileSizeLow;
+  return true;
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32filesystem.h b/rtc_base/win32filesystem.h
new file mode 100644
index 0000000..d26741e
--- /dev/null
+++ b/rtc_base/win32filesystem.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_WIN32FILESYSTEM_H_
+#define RTC_BASE_WIN32FILESYSTEM_H_
+
+#include "fileutils.h"
+
+namespace rtc {
+
+class Win32Filesystem : public FilesystemInterface {
+ public:
+  // This will attempt to delete the path located at filename.
+  // If the path points to a folder, it will fail with VERIFY
+  bool DeleteFile(const Pathname& filename) override;
+
+  // This moves a file from old_path to new_path. If the new path is on a
+  // different volume than the old, it will attempt to copy and then delete
+  // the folder
+  // Returns true if the file is successfully moved
+  bool MoveFile(const Pathname& old_path, const Pathname& new_path) override;
+
+  // Returns true if a pathname is a directory
+  bool IsFolder(const Pathname& pathname) override;
+
+  // Returns true if a file exists at path
+  bool IsFile(const Pathname& path) override;
+
+  bool GetFileSize(const Pathname& path, size_t* size) override;
+};
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_WIN32FILESYSTEM_H_
diff --git a/rtc_base/win32socketinit.cc b/rtc_base/win32socketinit.cc
new file mode 100644
index 0000000..5bf6546
--- /dev/null
+++ b/rtc_base/win32socketinit.cc
@@ -0,0 +1,46 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/win32socketinit.h"
+
+#include "rtc_base/win32.h"
+
+namespace rtc {
+
+// Please don't remove this function.
+void EnsureWinsockInit() {
+  // The default implementation uses a global initializer, so WSAStartup
+  // happens at module load time.  Thus we don't need to do anything here.
+  // The hook is provided so that a client that statically links with
+  // libjingle can override it, to provide its own initialization.
+}
+
+#if defined(WEBRTC_WIN)
+class WinsockInitializer {
+ public:
+  WinsockInitializer() {
+    WSADATA wsaData;
+    WORD wVersionRequested = MAKEWORD(1, 0);
+    err_ = WSAStartup(wVersionRequested, &wsaData);
+  }
+  ~WinsockInitializer() {
+    if (!err_)
+      WSACleanup();
+  }
+  int error() {
+    return err_;
+  }
+ private:
+  int err_;
+};
+WinsockInitializer g_winsockinit;
+#endif
+
+}  // namespace rtc
diff --git a/rtc_base/win32socketinit.h b/rtc_base/win32socketinit.h
new file mode 100644
index 0000000..ea74809
--- /dev/null
+++ b/rtc_base/win32socketinit.h
@@ -0,0 +1,20 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_WIN32SOCKETINIT_H_
+#define RTC_BASE_WIN32SOCKETINIT_H_
+
+namespace rtc {
+
+void EnsureWinsockInit();
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_WIN32SOCKETINIT_H_
diff --git a/rtc_base/win32socketserver.cc b/rtc_base/win32socketserver.cc
new file mode 100644
index 0000000..d79a1b3
--- /dev/null
+++ b/rtc_base/win32socketserver.cc
@@ -0,0 +1,851 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/win32socketserver.h"
+
+#include <algorithm>
+#include <ws2tcpip.h>  // NOLINT
+
+#include "rtc_base/byteorder.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/win32window.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32Socket
+///////////////////////////////////////////////////////////////////////////////
+
+// TODO: Move this to a common place where PhysicalSocketServer can
+// share it.
+// Standard MTUs
+static const uint16_t PACKET_MAXIMUMS[] = {
+    65535,  // Theoretical maximum, Hyperchannel
+    32000,  // Nothing
+    17914,  // 16Mb IBM Token Ring
+    8166,   // IEEE 802.4
+    // 4464   // IEEE 802.5 (4Mb max)
+    4352,   // FDDI
+    // 2048,  // Wideband Network
+    2002,   // IEEE 802.5 (4Mb recommended)
+    // 1536,  // Expermental Ethernet Networks
+    // 1500,  // Ethernet, Point-to-Point (default)
+    1492,   // IEEE 802.3
+    1006,   // SLIP, ARPANET
+    // 576,   // X.25 Networks
+    // 544,   // DEC IP Portal
+    // 512,   // NETBIOS
+    508,    // IEEE 802/Source-Rt Bridge, ARCNET
+    296,    // Point-to-Point (low delay)
+    68,     // Official minimum
+    0,      // End of list marker
+};
+
+static const int IP_HEADER_SIZE = 20u;
+static const int ICMP_HEADER_SIZE = 8u;
+static const int ICMP_PING_TIMEOUT_MILLIS = 10000u;
+
+// TODO: Enable for production builds also? Use FormatMessage?
+#if !defined(NDEBUG)
+LPCSTR WSAErrorToString(int error, LPCSTR *description_result) {
+  LPCSTR string = "Unspecified";
+  LPCSTR description = "Unspecified description";
+  switch (error) {
+    case ERROR_SUCCESS:
+      string = "SUCCESS";
+      description = "Operation succeeded";
+      break;
+    case WSAEWOULDBLOCK:
+      string = "WSAEWOULDBLOCK";
+      description = "Using a non-blocking socket, will notify later";
+      break;
+    case WSAEACCES:
+      string = "WSAEACCES";
+      description = "Access denied, or sharing violation";
+      break;
+    case WSAEADDRNOTAVAIL:
+      string = "WSAEADDRNOTAVAIL";
+      description = "Address is not valid in this context";
+      break;
+    case WSAENETDOWN:
+      string = "WSAENETDOWN";
+      description = "Network is down";
+      break;
+    case WSAENETUNREACH:
+      string = "WSAENETUNREACH";
+      description = "Network is up, but unreachable";
+      break;
+    case WSAENETRESET:
+      string = "WSANETRESET";
+      description = "Connection has been reset due to keep-alive activity";
+      break;
+    case WSAECONNABORTED:
+      string = "WSAECONNABORTED";
+      description = "Aborted by host";
+      break;
+    case WSAECONNRESET:
+      string = "WSAECONNRESET";
+      description = "Connection reset by host";
+      break;
+    case WSAETIMEDOUT:
+      string = "WSAETIMEDOUT";
+      description = "Timed out, host failed to respond";
+      break;
+    case WSAECONNREFUSED:
+      string = "WSAECONNREFUSED";
+      description = "Host actively refused connection";
+      break;
+    case WSAEHOSTDOWN:
+      string = "WSAEHOSTDOWN";
+      description = "Host is down";
+      break;
+    case WSAEHOSTUNREACH:
+      string = "WSAEHOSTUNREACH";
+      description = "Host is unreachable";
+      break;
+    case WSAHOST_NOT_FOUND:
+      string = "WSAHOST_NOT_FOUND";
+      description = "No such host is known";
+      break;
+  }
+  if (description_result) {
+    *description_result = description;
+  }
+  return string;
+}
+
+void ReportWSAError(LPCSTR context, int error, const SocketAddress& address) {
+  LPCSTR description_string;
+  LPCSTR error_string = WSAErrorToString(error, &description_string);
+  RTC_LOG(LS_INFO) << context << " = " << error << " (" << error_string << ":"
+                   << description_string << ") [" << address.ToString() << "]";
+}
+#else
+void ReportWSAError(LPCSTR context, int error, const SocketAddress& address) {}
+#endif
+
+/////////////////////////////////////////////////////////////////////////////
+// Win32Socket::EventSink
+/////////////////////////////////////////////////////////////////////////////
+
+#define WM_SOCKETNOTIFY  (WM_USER + 50)
+#define WM_DNSNOTIFY     (WM_USER + 51)
+
+struct Win32Socket::DnsLookup {
+  HANDLE handle;
+  uint16_t port;
+  char buffer[MAXGETHOSTSTRUCT];
+};
+
+class Win32Socket::EventSink : public Win32Window {
+ public:
+  explicit EventSink(Win32Socket * parent) : parent_(parent) { }
+
+  void Dispose();
+
+  bool OnMessage(UINT uMsg,
+                 WPARAM wParam,
+                 LPARAM lParam,
+                 LRESULT& result) override;
+  void OnNcDestroy() override;
+
+ private:
+  bool OnSocketNotify(UINT uMsg, WPARAM wParam, LPARAM lParam, LRESULT& result);
+  bool OnDnsNotify(WPARAM wParam, LPARAM lParam, LRESULT& result);
+
+  Win32Socket * parent_;
+};
+
+void Win32Socket::EventSink::Dispose() {
+  parent_ = nullptr;
+  if (::IsWindow(handle())) {
+    ::DestroyWindow(handle());
+  } else {
+    delete this;
+  }
+}
+
+bool Win32Socket::EventSink::OnMessage(UINT uMsg, WPARAM wParam,
+                                       LPARAM lParam, LRESULT& result) {
+  switch (uMsg) {
+  case WM_SOCKETNOTIFY:
+  case WM_TIMER:
+    return OnSocketNotify(uMsg, wParam, lParam, result);
+  case WM_DNSNOTIFY:
+    return OnDnsNotify(wParam, lParam, result);
+  }
+  return false;
+}
+
+bool Win32Socket::EventSink::OnSocketNotify(UINT uMsg, WPARAM wParam,
+                                            LPARAM lParam, LRESULT& result) {
+  result = 0;
+
+  int wsa_event = WSAGETSELECTEVENT(lParam);
+  int wsa_error = WSAGETSELECTERROR(lParam);
+
+  // Treat connect timeouts as close notifications
+  if (uMsg == WM_TIMER) {
+    wsa_event = FD_CLOSE;
+    wsa_error = WSAETIMEDOUT;
+  }
+
+  if (parent_)
+    parent_->OnSocketNotify(static_cast<SOCKET>(wParam), wsa_event, wsa_error);
+  return true;
+}
+
+bool Win32Socket::EventSink::OnDnsNotify(WPARAM wParam, LPARAM lParam,
+                                         LRESULT& result) {
+  result = 0;
+
+  int error = WSAGETASYNCERROR(lParam);
+  if (parent_)
+    parent_->OnDnsNotify(reinterpret_cast<HANDLE>(wParam), error);
+  return true;
+}
+
+void Win32Socket::EventSink::OnNcDestroy() {
+  if (parent_) {
+    RTC_LOG(LS_ERROR) << "EventSink hwnd is being destroyed, but the event sink"
+                         " hasn't yet been disposed.";
+  } else {
+    delete this;
+  }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Win32Socket
+/////////////////////////////////////////////////////////////////////////////
+
+Win32Socket::Win32Socket()
+    : socket_(INVALID_SOCKET),
+      error_(0),
+      state_(CS_CLOSED),
+      connect_time_(0),
+      closing_(false),
+      close_error_(0),
+      sink_(nullptr),
+      dns_(nullptr) {}
+
+Win32Socket::~Win32Socket() {
+  Close();
+}
+
+bool Win32Socket::CreateT(int family, int type) {
+  Close();
+  int proto = (SOCK_DGRAM == type) ? IPPROTO_UDP : IPPROTO_TCP;
+  socket_ = ::WSASocket(family, type, proto, nullptr, 0, 0);
+  if (socket_ == INVALID_SOCKET) {
+    UpdateLastError();
+    return false;
+  }
+  if ((SOCK_DGRAM == type) && !SetAsync(FD_READ | FD_WRITE)) {
+    return false;
+  }
+  return true;
+}
+
+int Win32Socket::Attach(SOCKET s) {
+  RTC_DCHECK(socket_ == INVALID_SOCKET);
+  if (socket_ != INVALID_SOCKET)
+    return SOCKET_ERROR;
+
+  RTC_DCHECK(s != INVALID_SOCKET);
+  if (s == INVALID_SOCKET)
+    return SOCKET_ERROR;
+
+  socket_ = s;
+  state_ = CS_CONNECTED;
+
+  if (!SetAsync(FD_READ | FD_WRITE | FD_CLOSE))
+    return SOCKET_ERROR;
+
+  return 0;
+}
+
+void Win32Socket::SetTimeout(int ms) {
+  if (sink_)
+    ::SetTimer(sink_->handle(), 1, ms, 0);
+}
+
+SocketAddress Win32Socket::GetLocalAddress() const {
+  sockaddr_storage addr = {0};
+  socklen_t addrlen = sizeof(addr);
+  int result = ::getsockname(socket_, reinterpret_cast<sockaddr*>(&addr),
+                             &addrlen);
+  SocketAddress address;
+  if (result >= 0) {
+    SocketAddressFromSockAddrStorage(addr, &address);
+  } else {
+    RTC_LOG(LS_WARNING) << "GetLocalAddress: unable to get local addr, socket="
+                        << socket_;
+  }
+  return address;
+}
+
+SocketAddress Win32Socket::GetRemoteAddress() const {
+  sockaddr_storage addr = {0};
+  socklen_t addrlen = sizeof(addr);
+  int result = ::getpeername(socket_, reinterpret_cast<sockaddr*>(&addr),
+                             &addrlen);
+  SocketAddress address;
+  if (result >= 0) {
+    SocketAddressFromSockAddrStorage(addr, &address);
+  } else {
+    RTC_LOG(LS_WARNING)
+        << "GetRemoteAddress: unable to get remote addr, socket=" << socket_;
+  }
+  return address;
+}
+
+int Win32Socket::Bind(const SocketAddress& addr) {
+  RTC_DCHECK(socket_ != INVALID_SOCKET);
+  if (socket_ == INVALID_SOCKET)
+    return SOCKET_ERROR;
+
+  sockaddr_storage saddr;
+  size_t len = addr.ToSockAddrStorage(&saddr);
+  int err = ::bind(socket_,
+                   reinterpret_cast<sockaddr*>(&saddr),
+                   static_cast<int>(len));
+  UpdateLastError();
+  return err;
+}
+
+int Win32Socket::Connect(const SocketAddress& addr) {
+  if (state_ != CS_CLOSED) {
+    SetError(EALREADY);
+    return SOCKET_ERROR;
+  }
+
+  if (!addr.IsUnresolvedIP()) {
+    return DoConnect(addr);
+  }
+
+  RTC_LOG_F(LS_INFO) << "async dns lookup (" << addr.hostname() << ")";
+  DnsLookup * dns = new DnsLookup;
+  if (!sink_) {
+    // Explicitly create the sink ourselves here; we can't rely on SetAsync
+    // because we don't have a socket_ yet.
+    CreateSink();
+  }
+  // TODO: Replace with IPv6 compatible lookup.
+  dns->handle = WSAAsyncGetHostByName(sink_->handle(), WM_DNSNOTIFY,
+                                      addr.hostname().c_str(), dns->buffer,
+                                      sizeof(dns->buffer));
+
+  if (!dns->handle) {
+    RTC_LOG_F(LS_ERROR) << "WSAAsyncGetHostByName error: " << WSAGetLastError();
+    delete dns;
+    UpdateLastError();
+    Close();
+    return SOCKET_ERROR;
+  }
+
+  dns->port = addr.port();
+  dns_ = dns;
+  state_ = CS_CONNECTING;
+  return 0;
+}
+
+int Win32Socket::DoConnect(const SocketAddress& addr) {
+  if ((socket_ == INVALID_SOCKET) && !CreateT(addr.family(), SOCK_STREAM)) {
+    return SOCKET_ERROR;
+  }
+  if (!SetAsync(FD_READ | FD_WRITE | FD_CONNECT | FD_CLOSE)) {
+    return SOCKET_ERROR;
+  }
+
+  sockaddr_storage saddr = {0};
+  size_t len = addr.ToSockAddrStorage(&saddr);
+  connect_time_ = Time();
+  int result = connect(socket_,
+                       reinterpret_cast<SOCKADDR*>(&saddr),
+                       static_cast<int>(len));
+  if (result != SOCKET_ERROR) {
+    state_ = CS_CONNECTED;
+  } else {
+    int code = WSAGetLastError();
+    if (code == WSAEWOULDBLOCK) {
+      state_ = CS_CONNECTING;
+    } else {
+      ReportWSAError("WSAAsync:connect", code, addr);
+      error_ = code;
+      Close();
+      return SOCKET_ERROR;
+    }
+  }
+  addr_ = addr;
+
+  return 0;
+}
+
+int Win32Socket::GetError() const {
+  return error_;
+}
+
+void Win32Socket::SetError(int error) {
+  error_ = error;
+}
+
+Socket::ConnState Win32Socket::GetState() const {
+  return state_;
+}
+
+int Win32Socket::GetOption(Option opt, int* value) {
+  int slevel;
+  int sopt;
+  if (TranslateOption(opt, &slevel, &sopt) == -1)
+    return -1;
+
+  char* p = reinterpret_cast<char*>(value);
+  int optlen = sizeof(value);
+  return ::getsockopt(socket_, slevel, sopt, p, &optlen);
+}
+
+int Win32Socket::SetOption(Option opt, int value) {
+  int slevel;
+  int sopt;
+  if (TranslateOption(opt, &slevel, &sopt) == -1)
+    return -1;
+
+  const char* p = reinterpret_cast<const char*>(&value);
+  return ::setsockopt(socket_, slevel, sopt, p, sizeof(value));
+}
+
+int Win32Socket::Send(const void* buffer, size_t length) {
+  int sent = ::send(socket_,
+                    reinterpret_cast<const char*>(buffer),
+                    static_cast<int>(length),
+                    0);
+  UpdateLastError();
+  return sent;
+}
+
+int Win32Socket::SendTo(const void* buffer, size_t length,
+                        const SocketAddress& addr) {
+  sockaddr_storage saddr;
+  size_t addr_len = addr.ToSockAddrStorage(&saddr);
+  int sent = ::sendto(socket_, reinterpret_cast<const char*>(buffer),
+                      static_cast<int>(length), 0,
+                      reinterpret_cast<sockaddr*>(&saddr),
+                      static_cast<int>(addr_len));
+  UpdateLastError();
+  return sent;
+}
+
+int Win32Socket::Recv(void* buffer, size_t length, int64_t* timestamp) {
+  if (timestamp) {
+    *timestamp = -1;
+  }
+  int received = ::recv(socket_, static_cast<char*>(buffer),
+                        static_cast<int>(length), 0);
+  UpdateLastError();
+  if (closing_ && received <= static_cast<int>(length))
+    PostClosed();
+  return received;
+}
+
+int Win32Socket::RecvFrom(void* buffer,
+                          size_t length,
+                          SocketAddress* out_addr,
+                          int64_t* timestamp) {
+  if (timestamp) {
+    *timestamp = -1;
+  }
+  sockaddr_storage saddr;
+  socklen_t addr_len = sizeof(saddr);
+  int received = ::recvfrom(socket_, static_cast<char*>(buffer),
+                            static_cast<int>(length), 0,
+                            reinterpret_cast<sockaddr*>(&saddr), &addr_len);
+  UpdateLastError();
+  if (received != SOCKET_ERROR)
+    SocketAddressFromSockAddrStorage(saddr, out_addr);
+  if (closing_ && received <= static_cast<int>(length))
+    PostClosed();
+  return received;
+}
+
+int Win32Socket::Listen(int backlog) {
+  int err = ::listen(socket_, backlog);
+  if (!SetAsync(FD_ACCEPT))
+    return SOCKET_ERROR;
+
+  UpdateLastError();
+  if (err == 0)
+    state_ = CS_CONNECTING;
+  return err;
+}
+
+Win32Socket* Win32Socket::Accept(SocketAddress* out_addr) {
+  sockaddr_storage saddr;
+  socklen_t addr_len = sizeof(saddr);
+  SOCKET s = ::accept(socket_, reinterpret_cast<sockaddr*>(&saddr), &addr_len);
+  UpdateLastError();
+  if (s == INVALID_SOCKET)
+    return nullptr;
+  if (out_addr)
+    SocketAddressFromSockAddrStorage(saddr, out_addr);
+  Win32Socket* socket = new Win32Socket;
+  if (0 == socket->Attach(s))
+    return socket;
+  delete socket;
+  return nullptr;
+}
+
+int Win32Socket::Close() {
+  int err = 0;
+  if (socket_ != INVALID_SOCKET) {
+    err = ::closesocket(socket_);
+    socket_ = INVALID_SOCKET;
+    closing_ = false;
+    close_error_ = 0;
+    UpdateLastError();
+  }
+  if (dns_) {
+    WSACancelAsyncRequest(dns_->handle);
+    delete dns_;
+    dns_ = nullptr;
+  }
+  if (sink_) {
+    sink_->Dispose();
+    sink_ = nullptr;
+  }
+  addr_.Clear();
+  state_ = CS_CLOSED;
+  return err;
+}
+
+void Win32Socket::CreateSink() {
+  RTC_DCHECK(nullptr == sink_);
+
+  // Create window
+  sink_ = new EventSink(this);
+  sink_->Create(nullptr, L"EventSink", 0, 0, 0, 0, 10, 10);
+}
+
+bool Win32Socket::SetAsync(int events) {
+  if (nullptr == sink_) {
+    CreateSink();
+    RTC_DCHECK(nullptr != sink_);
+  }
+
+  // start the async select
+  if (WSAAsyncSelect(socket_, sink_->handle(), WM_SOCKETNOTIFY, events)
+      == SOCKET_ERROR) {
+    UpdateLastError();
+    Close();
+    return false;
+  }
+
+  return true;
+}
+
+bool Win32Socket::HandleClosed(int close_error) {
+  // WM_CLOSE will be received before all data has been read, so we need to
+  // hold on to it until the read buffer has been drained.
+  char ch;
+  closing_ = true;
+  close_error_ = close_error;
+  return (::recv(socket_, &ch, 1, MSG_PEEK) <= 0);
+}
+
+void Win32Socket::PostClosed() {
+  // If we see that the buffer is indeed drained, then send the close.
+  closing_ = false;
+  ::PostMessage(sink_->handle(), WM_SOCKETNOTIFY,
+                socket_, WSAMAKESELECTREPLY(FD_CLOSE, close_error_));
+}
+
+void Win32Socket::UpdateLastError() {
+  error_ = WSAGetLastError();
+}
+
+int Win32Socket::TranslateOption(Option opt, int* slevel, int* sopt) {
+  switch (opt) {
+    case OPT_DONTFRAGMENT:
+      *slevel = IPPROTO_IP;
+      *sopt = IP_DONTFRAGMENT;
+      break;
+    case OPT_RCVBUF:
+      *slevel = SOL_SOCKET;
+      *sopt = SO_RCVBUF;
+      break;
+    case OPT_SNDBUF:
+      *slevel = SOL_SOCKET;
+      *sopt = SO_SNDBUF;
+      break;
+    case OPT_NODELAY:
+      *slevel = IPPROTO_TCP;
+      *sopt = TCP_NODELAY;
+      break;
+    case OPT_DSCP:
+      RTC_LOG(LS_WARNING) << "Socket::OPT_DSCP not supported.";
+      return -1;
+    default:
+      RTC_NOTREACHED();
+      return -1;
+  }
+  return 0;
+}
+
+void Win32Socket::OnSocketNotify(SOCKET socket, int event, int error) {
+  // Ignore events if we're already closed.
+  if (socket != socket_)
+    return;
+
+  error_ = error;
+  switch (event) {
+    case FD_CONNECT:
+      if (error != ERROR_SUCCESS) {
+        ReportWSAError("WSAAsync:connect notify", error, addr_);
+#if !defined(NDEBUG)
+        int64_t duration = TimeSince(connect_time_);
+        RTC_LOG(LS_INFO) << "WSAAsync:connect error (" << duration
+                         << " ms), faking close";
+#endif
+        state_ = CS_CLOSED;
+        // If you get an error connecting, close doesn't really do anything
+        // and it certainly doesn't send back any close notification, but
+        // we really only maintain a few states, so it is easiest to get
+        // back into a known state by pretending that a close happened, even
+        // though the connect event never did occur.
+        SignalCloseEvent(this, error);
+      } else {
+#if !defined(NDEBUG)
+        int64_t duration = TimeSince(connect_time_);
+        RTC_LOG(LS_INFO) << "WSAAsync:connect (" << duration << " ms)";
+#endif
+        state_ = CS_CONNECTED;
+        SignalConnectEvent(this);
+      }
+      break;
+
+    case FD_ACCEPT:
+    case FD_READ:
+      if (error != ERROR_SUCCESS) {
+        ReportWSAError("WSAAsync:read notify", error, addr_);
+      } else {
+        SignalReadEvent(this);
+      }
+      break;
+
+    case FD_WRITE:
+      if (error != ERROR_SUCCESS) {
+        ReportWSAError("WSAAsync:write notify", error, addr_);
+      } else {
+        SignalWriteEvent(this);
+      }
+      break;
+
+    case FD_CLOSE:
+      if (HandleClosed(error)) {
+        ReportWSAError("WSAAsync:close notify", error, addr_);
+        state_ = CS_CLOSED;
+        SignalCloseEvent(this, error);
+      }
+      break;
+  }
+}
+
+void Win32Socket::OnDnsNotify(HANDLE task, int error) {
+  if (!dns_ || dns_->handle != task)
+    return;
+
+  uint32_t ip = 0;
+  if (error == 0) {
+    hostent* pHost = reinterpret_cast<hostent*>(dns_->buffer);
+    uint32_t net_ip = *reinterpret_cast<uint32_t*>(pHost->h_addr_list[0]);
+    ip = NetworkToHost32(net_ip);
+  }
+
+  RTC_LOG_F(LS_INFO) << "(" << IPAddress(ip).ToSensitiveString() << ", "
+                     << error << ")";
+
+  if (error == 0) {
+    SocketAddress address(ip, dns_->port);
+    error = DoConnect(address);
+  } else {
+    Close();
+  }
+
+  if (error) {
+    error_ = error;
+    SignalCloseEvent(this, error_);
+  } else {
+    delete dns_;
+    dns_ = nullptr;
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32SocketServer
+// Provides cricket base services on top of a win32 gui thread
+///////////////////////////////////////////////////////////////////////////////
+
+static UINT s_wm_wakeup_id = 0;
+const TCHAR Win32SocketServer::kWindowName[] = L"libjingle Message Window";
+
+Win32SocketServer::Win32SocketServer()
+    : wnd_(this),
+      posted_(false),
+      hdlg_(nullptr) {
+  if (s_wm_wakeup_id == 0)
+    s_wm_wakeup_id = RegisterWindowMessage(L"WM_WAKEUP");
+  if (!wnd_.Create(nullptr, kWindowName, 0, 0, 0, 0, 0, 0)) {
+    RTC_LOG_GLE(LS_ERROR) << "Failed to create message window.";
+  }
+}
+
+Win32SocketServer::~Win32SocketServer() {
+  if (wnd_.handle() != nullptr) {
+    KillTimer(wnd_.handle(), 1);
+    wnd_.Destroy();
+  }
+}
+
+Socket* Win32SocketServer::CreateSocket(int type) {
+  return CreateSocket(AF_INET, type);
+}
+
+Socket* Win32SocketServer::CreateSocket(int family, int type) {
+  return CreateAsyncSocket(family, type);
+}
+
+AsyncSocket* Win32SocketServer::CreateAsyncSocket(int type) {
+  return CreateAsyncSocket(AF_INET, type);
+}
+
+AsyncSocket* Win32SocketServer::CreateAsyncSocket(int family, int type) {
+  Win32Socket* socket = new Win32Socket;
+  if (socket->CreateT(family, type)) {
+    return socket;
+  }
+  delete socket;
+  return nullptr;
+}
+
+void Win32SocketServer::SetMessageQueue(MessageQueue* queue) {
+  message_queue_ = queue;
+}
+
+bool Win32SocketServer::Wait(int cms, bool process_io) {
+  BOOL b;
+  if (process_io) {
+    // Spin the Win32 message pump at least once, and as long as requested.
+    // This is the Thread::ProcessMessages case.
+    uint32_t start = Time();
+    do {
+      MSG msg;
+      SetTimer(wnd_.handle(), 0, cms, nullptr);
+      // Get the next available message. If we have a modeless dialog, give
+      // give the message to IsDialogMessage, which will return true if it
+      // was a message for the dialog that it handled internally.
+      // Otherwise, dispatch as usual via Translate/DispatchMessage.
+      b = GetMessage(&msg, nullptr, 0, 0);
+      if (b == -1) {
+        RTC_LOG_GLE(LS_ERROR) << "GetMessage failed.";
+        return false;
+      } else if(b) {
+        if (!hdlg_ || !IsDialogMessage(hdlg_, &msg)) {
+          TranslateMessage(&msg);
+          DispatchMessage(&msg);
+        }
+      }
+      KillTimer(wnd_.handle(), 0);
+    } while (b && TimeSince(start) < cms);
+  } else if (cms != 0) {
+    // Sit and wait forever for a WakeUp. This is the Thread::Send case.
+    RTC_DCHECK(cms == -1);
+    MSG msg;
+    b = GetMessage(&msg, nullptr, s_wm_wakeup_id, s_wm_wakeup_id);
+    {
+      CritScope scope(&cs_);
+      posted_ = false;
+    }
+  } else {
+    // No-op (cms == 0 && !process_io). This is the Pump case.
+    b = TRUE;
+  }
+  return (b != FALSE);
+}
+
+void Win32SocketServer::WakeUp() {
+  if (wnd_.handle()) {
+    // Set the "message pending" flag, if not already set.
+    {
+      CritScope scope(&cs_);
+      if (posted_)
+        return;
+      posted_ = true;
+    }
+
+    PostMessage(wnd_.handle(), s_wm_wakeup_id, 0, 0);
+  }
+}
+
+void Win32SocketServer::Pump() {
+  // Clear the "message pending" flag.
+  {
+    CritScope scope(&cs_);
+    posted_ = false;
+  }
+
+  // Dispatch all the messages that are currently in our queue. If new messages
+  // are posted during the dispatch, they will be handled in the next Pump.
+  // We use max(1, ...) to make sure we try to dispatch at least once, since
+  // this allow us to process "sent" messages, not included in the size() count.
+  Message msg;
+  for (size_t max_messages_to_process =
+           std::max<size_t>(1, message_queue_->size());
+       max_messages_to_process > 0 && message_queue_->Get(&msg, 0, false);
+       --max_messages_to_process) {
+    message_queue_->Dispatch(&msg);
+  }
+
+  // Anything remaining?
+  int delay = message_queue_->GetDelay();
+  if (delay == -1) {
+    KillTimer(wnd_.handle(), 1);
+  } else {
+    SetTimer(wnd_.handle(), 1, delay, nullptr);
+  }
+}
+
+bool Win32SocketServer::MessageWindow::OnMessage(UINT wm, WPARAM wp,
+                                                 LPARAM lp, LRESULT& lr) {
+  bool handled = false;
+  if (wm == s_wm_wakeup_id || (wm == WM_TIMER && wp == 1)) {
+    ss_->Pump();
+    lr = 0;
+    handled = true;
+  }
+  return handled;
+}
+
+Win32Thread::Win32Thread(SocketServer* ss) : Thread(ss), id_(0) {}
+
+Win32Thread::~Win32Thread() {
+  Stop();
+}
+
+void Win32Thread::Run() {
+  id_ = GetCurrentThreadId();
+  Thread::Run();
+  id_ = 0;
+}
+
+void Win32Thread::Quit() {
+  PostThreadMessage(id_, WM_QUIT, 0, 0);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32socketserver.h b/rtc_base/win32socketserver.h
new file mode 100644
index 0000000..ce29bc5
--- /dev/null
+++ b/rtc_base/win32socketserver.h
@@ -0,0 +1,158 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_WIN32SOCKETSERVER_H_
+#define RTC_BASE_WIN32SOCKETSERVER_H_
+
+#if defined(WEBRTC_WIN)
+#include "rtc_base/asyncsocket.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/messagequeue.h"
+#include "rtc_base/socket.h"
+#include "rtc_base/socketfactory.h"
+#include "rtc_base/socketserver.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/win32window.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32Socket
+///////////////////////////////////////////////////////////////////////////////
+
+class Win32Socket : public AsyncSocket {
+ public:
+  Win32Socket();
+  ~Win32Socket() override;
+
+  bool CreateT(int family, int type);
+
+  int Attach(SOCKET s);
+  void SetTimeout(int ms);
+
+  // AsyncSocket Interface
+  SocketAddress GetLocalAddress() const override;
+  SocketAddress GetRemoteAddress() const override;
+  int Bind(const SocketAddress& addr) override;
+  int Connect(const SocketAddress& addr) override;
+  int Send(const void* buffer, size_t length) override;
+  int SendTo(const void* buffer,
+             size_t length,
+             const SocketAddress& addr) override;
+  int Recv(void* buffer, size_t length, int64_t* timestamp) override;
+  int RecvFrom(void* buffer,
+               size_t length,
+               SocketAddress* out_addr,
+               int64_t* timestamp) override;
+  int Listen(int backlog) override;
+  Win32Socket* Accept(SocketAddress* out_addr) override;
+  int Close() override;
+  int GetError() const override;
+  void SetError(int error) override;
+  ConnState GetState() const override;
+  int GetOption(Option opt, int* value) override;
+  int SetOption(Option opt, int value) override;
+
+ private:
+  void CreateSink();
+  bool SetAsync(int events);
+  int DoConnect(const SocketAddress& addr);
+  bool HandleClosed(int close_error);
+  void PostClosed();
+  void UpdateLastError();
+  static int TranslateOption(Option opt, int* slevel, int* sopt);
+
+  void OnSocketNotify(SOCKET socket, int event, int error);
+  void OnDnsNotify(HANDLE task, int error);
+
+  SOCKET socket_;
+  int error_;
+  ConnState state_;
+  SocketAddress addr_;         // address that we connected to (see DoConnect)
+  uint32_t connect_time_;
+  bool closing_;
+  int close_error_;
+
+  class EventSink;
+  friend class EventSink;
+  EventSink * sink_;
+
+  struct DnsLookup;
+  DnsLookup * dns_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32SocketServer
+///////////////////////////////////////////////////////////////////////////////
+
+class Win32SocketServer : public SocketServer {
+ public:
+  Win32SocketServer();
+  ~Win32SocketServer() override;
+
+  void set_modeless_dialog(HWND hdlg) {
+    hdlg_ = hdlg;
+  }
+
+  // SocketServer Interface
+  Socket* CreateSocket(int type) override;
+  Socket* CreateSocket(int family, int type) override;
+
+  AsyncSocket* CreateAsyncSocket(int type) override;
+  AsyncSocket* CreateAsyncSocket(int family, int type) override;
+
+  void SetMessageQueue(MessageQueue* queue) override;
+  bool Wait(int cms, bool process_io) override;
+  void WakeUp() override;
+
+  void Pump();
+
+  HWND handle() { return wnd_.handle(); }
+
+ private:
+  class MessageWindow : public Win32Window {
+   public:
+    explicit MessageWindow(Win32SocketServer* ss) : ss_(ss) {}
+   private:
+    bool OnMessage(UINT msg, WPARAM wp, LPARAM lp, LRESULT& result) override;
+    Win32SocketServer* ss_;
+  };
+
+  static const TCHAR kWindowName[];
+  MessageQueue *message_queue_;
+  MessageWindow wnd_;
+  CriticalSection cs_;
+  bool posted_;
+  HWND hdlg_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32Thread. Automatically pumps Windows messages.
+///////////////////////////////////////////////////////////////////////////////
+
+class Win32Thread : public Thread {
+ public:
+  explicit Win32Thread(SocketServer* ss);
+  ~Win32Thread() override;
+
+  void Run() override;
+  void Quit() override;
+
+ private:
+  DWORD id_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // WEBRTC_WIN
+
+#endif  // RTC_BASE_WIN32SOCKETSERVER_H_
diff --git a/rtc_base/win32socketserver_unittest.cc b/rtc_base/win32socketserver_unittest.cc
new file mode 100644
index 0000000..60295f1
--- /dev/null
+++ b/rtc_base/win32socketserver_unittest.cc
@@ -0,0 +1,161 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "rtc_base/win32socketserver.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/socket_unittest.h"
+#include "rtc_base/thread.h"
+
+namespace rtc {
+
+// Test that Win32SocketServer::Wait works as expected.
+TEST(Win32SocketServerTest, TestWait) {
+  Win32SocketServer server;
+  uint32_t start = Time();
+  server.Wait(1000, true);
+  EXPECT_GE(TimeSince(start), 1000);
+}
+
+// Test that Win32Socket::Pump does not touch general Windows messages.
+TEST(Win32SocketServerTest, TestPump) {
+  Win32SocketServer server;
+  rtc::AutoSocketServerThread thread(&server);
+  EXPECT_EQ(TRUE, PostMessage(nullptr, WM_USER, 999, 0));
+  server.Pump();
+  MSG msg;
+  EXPECT_EQ(TRUE, PeekMessage(&msg, nullptr, WM_USER, 0, PM_REMOVE));
+  EXPECT_EQ(static_cast<UINT>(WM_USER), msg.message);
+  EXPECT_EQ(999u, msg.wParam);
+}
+
+// Test that Win32Socket passes all the generic Socket tests.
+class Win32SocketTest : public SocketTest {
+ protected:
+  Win32SocketTest() : thread_(&server_) {}
+  Win32SocketServer server_;
+  rtc::AutoSocketServerThread thread_;
+};
+
+TEST_F(Win32SocketTest, TestConnectIPv4) {
+  SocketTest::TestConnectIPv4();
+}
+
+TEST_F(Win32SocketTest, TestConnectIPv6) {
+  SocketTest::TestConnectIPv6();
+}
+
+TEST_F(Win32SocketTest, TestConnectWithDnsLookupIPv4) {
+  SocketTest::TestConnectWithDnsLookupIPv4();
+}
+
+TEST_F(Win32SocketTest, TestConnectWithDnsLookupIPv6) {
+  SocketTest::TestConnectWithDnsLookupIPv6();
+}
+
+TEST_F(Win32SocketTest, TestConnectFailIPv4) {
+  SocketTest::TestConnectFailIPv4();
+}
+
+TEST_F(Win32SocketTest, TestConnectFailIPv6) {
+  SocketTest::TestConnectFailIPv6();
+}
+
+TEST_F(Win32SocketTest, TestConnectWithDnsLookupFailIPv4) {
+  SocketTest::TestConnectWithDnsLookupFailIPv4();
+}
+
+TEST_F(Win32SocketTest, TestConnectWithDnsLookupFailIPv6) {
+  SocketTest::TestConnectWithDnsLookupFailIPv6();
+}
+
+TEST_F(Win32SocketTest, TestConnectWithClosedSocketIPv4) {
+  SocketTest::TestConnectWithClosedSocketIPv4();
+}
+
+TEST_F(Win32SocketTest, TestConnectWithClosedSocketIPv6) {
+  SocketTest::TestConnectWithClosedSocketIPv6();
+}
+
+TEST_F(Win32SocketTest, TestConnectWhileNotClosedIPv4) {
+  SocketTest::TestConnectWhileNotClosedIPv4();
+}
+
+TEST_F(Win32SocketTest, TestConnectWhileNotClosedIPv6) {
+  SocketTest::TestConnectWhileNotClosedIPv6();
+}
+
+TEST_F(Win32SocketTest, TestServerCloseDuringConnectIPv4) {
+  SocketTest::TestServerCloseDuringConnectIPv4();
+}
+
+TEST_F(Win32SocketTest, TestServerCloseDuringConnectIPv6) {
+  SocketTest::TestServerCloseDuringConnectIPv6();
+}
+
+TEST_F(Win32SocketTest, TestClientCloseDuringConnectIPv4) {
+  SocketTest::TestClientCloseDuringConnectIPv4();
+}
+
+TEST_F(Win32SocketTest, TestClientCloseDuringConnectIPv6) {
+  SocketTest::TestClientCloseDuringConnectIPv6();
+}
+
+TEST_F(Win32SocketTest, TestServerCloseIPv4) {
+  SocketTest::TestServerCloseIPv4();
+}
+
+TEST_F(Win32SocketTest, TestServerCloseIPv6) {
+  SocketTest::TestServerCloseIPv6();
+}
+
+TEST_F(Win32SocketTest, TestCloseInClosedCallbackIPv4) {
+  SocketTest::TestCloseInClosedCallbackIPv4();
+}
+
+TEST_F(Win32SocketTest, TestCloseInClosedCallbackIPv6) {
+  SocketTest::TestCloseInClosedCallbackIPv6();
+}
+
+TEST_F(Win32SocketTest, TestSocketServerWaitIPv4) {
+  SocketTest::TestSocketServerWaitIPv4();
+}
+
+TEST_F(Win32SocketTest, TestSocketServerWaitIPv6) {
+  SocketTest::TestSocketServerWaitIPv6();
+}
+
+TEST_F(Win32SocketTest, TestTcpIPv4) {
+  SocketTest::TestTcpIPv4();
+}
+
+TEST_F(Win32SocketTest, TestTcpIPv6) {
+  SocketTest::TestTcpIPv6();
+}
+
+TEST_F(Win32SocketTest, TestUdpIPv4) {
+  SocketTest::TestUdpIPv4();
+}
+
+TEST_F(Win32SocketTest, TestUdpIPv6) {
+  SocketTest::TestUdpIPv6();
+}
+
+// Breaks win_x64_dbg bot.
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=6178
+TEST_F(Win32SocketTest, DISABLED_TestGetSetOptionsIPv4) {
+  SocketTest::TestGetSetOptionsIPv4();
+}
+
+// Breaks win_x64_dbg bot.
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=6178
+TEST_F(Win32SocketTest, DISABLED_TestGetSetOptionsIPv6) {
+  SocketTest::TestGetSetOptionsIPv6();
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32window.cc b/rtc_base/win32window.cc
new file mode 100644
index 0000000..d71c68e
--- /dev/null
+++ b/rtc_base/win32window.cc
@@ -0,0 +1,131 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/win32window.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32Window
+///////////////////////////////////////////////////////////////////////////////
+
+static const wchar_t kWindowBaseClassName[] = L"WindowBaseClass";
+HINSTANCE Win32Window::instance_ = nullptr;
+ATOM Win32Window::window_class_ = 0;
+
+Win32Window::Win32Window() : wnd_(nullptr) {}
+
+Win32Window::~Win32Window() {
+  RTC_DCHECK(nullptr == wnd_);
+}
+
+bool Win32Window::Create(HWND parent, const wchar_t* title, DWORD style,
+                         DWORD exstyle, int x, int y, int cx, int cy) {
+  if (wnd_) {
+    // Window already exists.
+    return false;
+  }
+
+  if (!window_class_) {
+    if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                           GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+                           reinterpret_cast<LPCWSTR>(&Win32Window::WndProc),
+                           &instance_)) {
+      RTC_LOG_GLE(LS_ERROR) << "GetModuleHandleEx failed";
+      return false;
+    }
+
+    // Class not registered, register it.
+    WNDCLASSEX wcex;
+    memset(&wcex, 0, sizeof(wcex));
+    wcex.cbSize = sizeof(wcex);
+    wcex.hInstance = instance_;
+    wcex.lpfnWndProc = &Win32Window::WndProc;
+    wcex.lpszClassName = kWindowBaseClassName;
+    window_class_ = ::RegisterClassEx(&wcex);
+    if (!window_class_) {
+      RTC_LOG_GLE(LS_ERROR) << "RegisterClassEx failed";
+      return false;
+    }
+  }
+  wnd_ = ::CreateWindowEx(exstyle, kWindowBaseClassName, title, style, x, y, cx,
+                          cy, parent, nullptr, instance_, this);
+  return (nullptr != wnd_);
+}
+
+void Win32Window::Destroy() {
+  const bool success = ::DestroyWindow(wnd_);
+  RTC_DCHECK(success);
+}
+
+void Win32Window::Shutdown() {
+  if (window_class_) {
+    ::UnregisterClass(MAKEINTATOM(window_class_), instance_);
+    window_class_ = 0;
+  }
+}
+
+bool Win32Window::OnMessage(UINT uMsg, WPARAM wParam, LPARAM lParam,
+                            LRESULT& result) {
+  switch (uMsg) {
+  case WM_CLOSE:
+    if (!OnClose()) {
+      result = 0;
+      return true;
+    }
+    break;
+  }
+  return false;
+}
+
+bool Win32Window::OnClose() {
+  return true;
+}
+
+void Win32Window::OnNcDestroy() {
+  // Do nothing. }
+}
+
+LRESULT Win32Window::WndProc(HWND hwnd,
+                             UINT uMsg,
+                             WPARAM wParam,
+                             LPARAM lParam) {
+  Win32Window* that =
+      reinterpret_cast<Win32Window*>(::GetWindowLongPtr(hwnd, GWLP_USERDATA));
+  if (!that && (WM_CREATE == uMsg)) {
+    CREATESTRUCT* cs = reinterpret_cast<CREATESTRUCT*>(lParam);
+    that = static_cast<Win32Window*>(cs->lpCreateParams);
+    that->wnd_ = hwnd;
+    ::SetWindowLongPtr(hwnd, GWLP_USERDATA, reinterpret_cast<LONG_PTR>(that));
+  }
+  if (that) {
+    LRESULT result;
+    bool handled = that->OnMessage(uMsg, wParam, lParam, result);
+    if (WM_DESTROY == uMsg) {
+      for (HWND child = ::GetWindow(hwnd, GW_CHILD); child;
+           child = ::GetWindow(child, GW_HWNDNEXT)) {
+        RTC_LOG(LS_INFO) << "Child window: " << static_cast<void*>(child);
+      }
+    }
+    if (WM_NCDESTROY == uMsg) {
+      ::SetWindowLongPtr(hwnd, GWLP_USERDATA, NULL);
+      that->wnd_ = nullptr;
+      that->OnNcDestroy();
+    }
+    if (handled) {
+      return result;
+    }
+  }
+  return ::DefWindowProc(hwnd, uMsg, wParam, lParam);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/win32window.h b/rtc_base/win32window.h
new file mode 100644
index 0000000..fa026d1
--- /dev/null
+++ b/rtc_base/win32window.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright 2004 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_WIN32WINDOW_H_
+#define RTC_BASE_WIN32WINDOW_H_
+
+#if defined(WEBRTC_WIN)
+
+#include "rtc_base/win32.h"
+
+namespace rtc {
+
+///////////////////////////////////////////////////////////////////////////////
+// Win32Window
+///////////////////////////////////////////////////////////////////////////////
+
+class Win32Window {
+ public:
+  Win32Window();
+  virtual ~Win32Window();
+
+  HWND handle() const { return wnd_; }
+
+  bool Create(HWND parent, const wchar_t* title, DWORD style, DWORD exstyle,
+              int x, int y, int cx, int cy);
+  void Destroy();
+
+  // Call this when your DLL unloads.
+  static void Shutdown();
+
+ protected:
+  virtual bool OnMessage(UINT uMsg, WPARAM wParam, LPARAM lParam,
+                         LRESULT& result);
+
+  virtual bool OnClose();
+  virtual void OnNcDestroy();
+
+ private:
+  static LRESULT CALLBACK WndProc(HWND hwnd, UINT uMsg, WPARAM wParam,
+                                  LPARAM lParam);
+
+  HWND wnd_;
+  static HINSTANCE instance_;
+  static ATOM window_class_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+}  // namespace rtc
+
+#endif  // WEBRTC_WIN
+
+#endif  // RTC_BASE_WIN32WINDOW_H_
diff --git a/rtc_base/win32window_unittest.cc b/rtc_base/win32window_unittest.cc
new file mode 100644
index 0000000..c659349
--- /dev/null
+++ b/rtc_base/win32window_unittest.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright 2009 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/win32window.h"
+#include "rtc_base/gunit.h"
+#include "rtc_base/logging.h"
+
+static LRESULT kDummyResult = 0x1234ABCD;
+
+class TestWindow : public rtc::Win32Window {
+ public:
+  TestWindow() : destroyed_(false) { memset(&msg_, 0, sizeof(msg_)); }
+  const MSG& msg() const { return msg_; }
+  bool destroyed() const { return destroyed_; }
+
+  bool OnMessage(UINT uMsg,
+                 WPARAM wParam,
+                 LPARAM lParam,
+                 LRESULT& result) override {
+    msg_.message = uMsg;
+    msg_.wParam = wParam;
+    msg_.lParam = lParam;
+    result = kDummyResult;
+    return true;
+  }
+  void OnNcDestroy() override { destroyed_ = true; }
+
+ private:
+  MSG msg_;
+  bool destroyed_;
+};
+
+TEST(Win32WindowTest, Basics) {
+  TestWindow wnd;
+  EXPECT_TRUE(wnd.handle() == nullptr);
+  EXPECT_FALSE(wnd.destroyed());
+  EXPECT_TRUE(wnd.Create(0, L"Test", 0, 0, 0, 0, 100, 100));
+  EXPECT_TRUE(wnd.handle() != nullptr);
+  EXPECT_EQ(kDummyResult, ::SendMessage(wnd.handle(), WM_USER, 1, 2));
+  EXPECT_EQ(static_cast<UINT>(WM_USER), wnd.msg().message);
+  EXPECT_EQ(1u, wnd.msg().wParam);
+  EXPECT_EQ(2l, wnd.msg().lParam);
+  wnd.Destroy();
+  EXPECT_TRUE(wnd.handle() == nullptr);
+  EXPECT_TRUE(wnd.destroyed());
+}
+
+TEST(Win32WindowTest, MultipleWindows) {
+  TestWindow wnd1, wnd2;
+  EXPECT_TRUE(wnd1.Create(0, L"Test", 0, 0, 0, 0, 100, 100));
+  EXPECT_TRUE(wnd2.Create(0, L"Test", 0, 0, 0, 0, 100, 100));
+  EXPECT_TRUE(wnd1.handle() != nullptr);
+  EXPECT_TRUE(wnd2.handle() != nullptr);
+  wnd1.Destroy();
+  wnd2.Destroy();
+  EXPECT_TRUE(wnd2.handle() == nullptr);
+  EXPECT_TRUE(wnd1.handle() == nullptr);
+}
diff --git a/rtc_base/zero_memory.cc b/rtc_base/zero_memory.cc
new file mode 100644
index 0000000..b9c5b38
--- /dev/null
+++ b/rtc_base/zero_memory.cc
@@ -0,0 +1,38 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#else
+#include <string.h>
+#endif
+
+#include "rtc_base/checks.h"
+#include "rtc_base/zero_memory.h"
+
+namespace rtc {
+
+// Code and comment taken from "OPENSSL_cleanse" of BoringSSL.
+void ExplicitZeroMemory(void* ptr, size_t len) {
+  RTC_DCHECK(ptr || !len);
+#if defined(WEBRTC_WIN)
+  SecureZeroMemory(ptr, len);
+#else
+  memset(ptr, 0, len);
+#if !defined(__pnacl__)
+  /* As best as we can tell, this is sufficient to break any optimisations that
+     might try to eliminate "superfluous" memsets. If there's an easy way to
+     detect memset_s, it would be better to use that. */
+  __asm__ __volatile__("" : : "r"(ptr) : "memory");  // NOLINT
+#endif
+#endif  // !WEBRTC_WIN
+}
+
+}  // namespace rtc
diff --git a/rtc_base/zero_memory.h b/rtc_base/zero_memory.h
new file mode 100644
index 0000000..cb4646c
--- /dev/null
+++ b/rtc_base/zero_memory.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTC_BASE_ZERO_MEMORY_H_
+#define RTC_BASE_ZERO_MEMORY_H_
+
+#include <type_traits>
+
+#include "api/array_view.h"
+
+namespace rtc {
+
+// Fill memory with zeros in a way that the compiler doesn't optimize it away
+// even if the pointer is not used afterwards.
+void ExplicitZeroMemory(void* ptr, size_t len);
+
+template <typename T,
+          typename std::enable_if<!std::is_const<T>::value &&
+                                  std::is_trivial<T>::value>::type* = nullptr>
+void ExplicitZeroMemory(rtc::ArrayView<T> a) {
+  ExplicitZeroMemory(a.data(), a.size());
+}
+
+}  // namespace rtc
+
+#endif  // RTC_BASE_ZERO_MEMORY_H_
diff --git a/rtc_base/zero_memory_unittest.cc b/rtc_base/zero_memory_unittest.cc
new file mode 100644
index 0000000..0f49670
--- /dev/null
+++ b/rtc_base/zero_memory_unittest.cc
@@ -0,0 +1,52 @@
+/*
+ *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/zero_memory.h"
+
+#include "api/array_view.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/gunit.h"
+
+namespace rtc {
+
+TEST(ZeroMemoryTest, TestZeroMemory) {
+  static const size_t kBufferSize = 32;
+  uint8_t buffer[kBufferSize];
+  for (size_t i = 0; i < kBufferSize; i++) {
+    buffer[i] = static_cast<uint8_t>(i + 1);
+  }
+  ExplicitZeroMemory(buffer, sizeof(buffer));
+  for (size_t i = 0; i < kBufferSize; i++) {
+    EXPECT_EQ(buffer[i], 0);
+  }
+}
+
+TEST(ZeroMemoryTest, TestZeroArrayView) {
+  static const size_t kBufferSize = 32;
+  uint8_t buffer[kBufferSize];
+  for (size_t i = 0; i < kBufferSize; i++) {
+    buffer[i] = static_cast<uint8_t>(i + 1);
+  }
+  ExplicitZeroMemory(rtc::ArrayView<uint8_t>(buffer, sizeof(buffer)));
+  for (size_t i = 0; i < kBufferSize; i++) {
+    EXPECT_EQ(buffer[i], 0);
+  }
+}
+
+// While this test doesn't actually test anything, it can be used to check
+// the compiler output to make sure the call to "ExplicitZeroMemory" is not
+// optimized away.
+TEST(ZeroMemoryTest, TestZeroMemoryUnused) {
+  static const size_t kBufferSize = 32;
+  uint8_t buffer[kBufferSize];
+  ExplicitZeroMemory(buffer, sizeof(buffer));
+}
+
+}  // namespace rtc
diff --git a/script/sync-apm.sh b/script/sync-apm.sh
index b16b8a6..de4f77f 100755
--- a/script/sync-apm.sh
+++ b/script/sync-apm.sh
@@ -10,7 +10,7 @@
 FROM=$1
 TO=$2
 
-OPTIONS=(-av --delete --filter="P module.mk")
+OPTIONS=(-av --delete --exclude="*/test" --filter="P module.mk")
 
 rsync "${OPTIONS[@]}" ${FROM}/rtc_base ${TO}
 rsync "${OPTIONS[@]}" ${FROM}/audio ${TO}
diff --git a/system_wrappers/BUILD.gn b/system_wrappers/BUILD.gn
new file mode 100644
index 0000000..93549fd
--- /dev/null
+++ b/system_wrappers/BUILD.gn
@@ -0,0 +1,261 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+if (is_android) {
+  import("//build/config/android/config.gni")
+  import("//build/config/android/rules.gni")
+}
+import("../webrtc.gni")
+
+rtc_static_library("system_wrappers") {
+  visibility = [ "*" ]
+  sources = [
+    "include/aligned_array.h",
+    "include/aligned_malloc.h",
+    "include/clock.h",
+    "include/cpu_info.h",
+    "include/event_wrapper.h",
+    "include/file_wrapper.h",
+    "include/ntp_time.h",
+    "include/rtp_to_ntp_estimator.h",
+    "include/rw_lock_wrapper.h",
+    "include/sleep.h",
+    "include/timestamp_extrapolator.h",
+    "source/aligned_malloc.cc",
+    "source/clock.cc",
+    "source/cpu_features.cc",
+    "source/cpu_info.cc",
+    "source/event.cc",
+    "source/event_timer_posix.cc",
+    "source/event_timer_posix.h",
+    "source/event_timer_win.cc",
+    "source/event_timer_win.h",
+    "source/file_impl.cc",
+    "source/rtp_to_ntp_estimator.cc",
+    "source/rw_lock.cc",
+    "source/rw_lock_posix.cc",
+    "source/rw_lock_posix.h",
+    "source/rw_lock_win.cc",
+    "source/rw_lock_win.h",
+    "source/sleep.cc",
+    "source/timestamp_extrapolator.cc",
+  ]
+
+  defines = []
+  libs = []
+  deps = [
+    ":cpu_features_api",
+    ":field_trial_api",
+    ":metrics_api",
+    ":runtime_enabled_features_api",
+    "..:webrtc_common",
+    "../:typedefs",
+    "../api:optional",
+    "../modules:module_api_public",
+    "../rtc_base:checks",
+  ]
+
+  if (is_android) {
+    defines += [ "WEBRTC_THREAD_RR" ]
+
+    if (build_with_mozilla) {
+      include_dirs = [
+        "/config/external/nspr",
+        "/nsprpub/lib/ds",
+        "/nsprpub/pr/include",
+      ]
+    } else {
+      deps += [ ":cpu_features_android" ]
+    }
+
+    libs += [ "log" ]
+  }
+
+  if (is_linux) {
+    defines += [ "WEBRTC_THREAD_RR" ]
+
+    if (!build_with_chromium) {
+      deps += [ ":cpu_features_linux" ]
+    }
+
+    libs += [ "rt" ]
+  }
+
+  if (is_ios || is_mac) {
+    defines += [ "WEBRTC_THREAD_RR" ]
+  }
+
+  # TODO(jschuh): Bug 1348: fix this warning.
+  configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+  if (is_win) {
+    libs += [ "winmm.lib" ]
+
+    cflags = [ "/wd4334" ]  # Ignore warning on shift operator promotion.
+
+    # Windows needs ../rtc_base:rtc_base due to include of
+    # webrtc/rtc_base/win32.h in source/clock.cc.
+    deps += [ "../rtc_base:rtc_base" ]
+  }
+
+  if (is_win && is_clang) {
+    # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+    suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+  }
+
+  deps += [
+    "../rtc_base:rtc_base_approved",
+    "../rtc_base:rtc_numerics",
+  ]
+}
+
+rtc_source_set("cpu_features_api") {
+  sources = [
+    "include/cpu_features_wrapper.h",
+  ]
+  deps = [
+    "..:webrtc_common",
+    "../:typedefs",
+  ]
+}
+
+rtc_source_set("asm_defines") {
+  sources = [
+    "include/asm_defines.h",
+  ]
+}
+
+rtc_source_set("field_trial_api") {
+  sources = [
+    "include/field_trial.h",
+  ]
+}
+
+rtc_source_set("runtime_enabled_features_api") {
+  visibility = [ "*" ]
+  sources = [
+    "include/runtime_enabled_features.h",
+  ]
+}
+
+rtc_source_set("metrics_api") {
+  visibility = [ "*" ]
+  sources = [
+    "include/metrics.h",
+  ]
+  deps = [
+    "..:webrtc_common",
+    "../rtc_base:checks",
+    "../rtc_base:rtc_base_approved",
+  ]
+}
+
+rtc_source_set("field_trial_default") {
+  visibility = [ "*" ]
+  sources = [
+    "include/field_trial_default.h",
+    "source/field_trial_default.cc",
+  ]
+  deps = [
+    ":field_trial_api",
+  ]
+}
+
+rtc_source_set("runtime_enabled_features_default") {
+  visibility = [ "*" ]
+  sources = [
+    "source/runtime_enabled_features_default.cc",
+  ]
+  deps = [
+    ":runtime_enabled_features_api",
+    "../rtc_base:rtc_base_approved",
+  ]
+}
+
+rtc_source_set("metrics_default") {
+  visibility = [ "*" ]
+  sources = [
+    "include/metrics_default.h",
+    "source/metrics_default.cc",
+  ]
+  deps = [
+    ":metrics_api",
+    "../rtc_base:rtc_base_approved",
+  ]
+}
+
+group("system_wrappers_default") {
+  deps = [
+    ":field_trial_default",
+    ":metrics_default",
+    ":runtime_enabled_features_default",
+    ":system_wrappers",
+  ]
+}
+
+if (is_android && !build_with_mozilla) {
+  rtc_static_library("cpu_features_android") {
+    sources = [
+      "source/cpu_features_android.c",
+    ]
+
+    deps = [
+      "//third_party/android_tools:cpu_features",
+    ]
+  }
+}
+
+if (is_linux) {
+  rtc_static_library("cpu_features_linux") {
+    sources = [
+      "source/cpu_features_linux.c",
+    ]
+    deps = [
+      ":cpu_features_api",
+    ]
+  }
+}
+
+if (rtc_include_tests) {
+  rtc_test("system_wrappers_unittests") {
+    testonly = true
+    sources = [
+      "source/aligned_array_unittest.cc",
+      "source/aligned_malloc_unittest.cc",
+      "source/clock_unittest.cc",
+      "source/event_timer_posix_unittest.cc",
+      "source/metrics_default_unittest.cc",
+      "source/metrics_unittest.cc",
+      "source/ntp_time_unittest.cc",
+      "source/rtp_to_ntp_estimator_unittest.cc",
+    ]
+    configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+    if (!build_with_chromium && is_clang) {
+      # Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
+      suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
+    }
+
+    deps = [
+      ":metrics_api",
+      ":metrics_default",
+      ":system_wrappers",
+      "..:webrtc_common",
+      "../:typedefs",
+      "../rtc_base:rtc_base_approved",
+      "../test:test_main",
+      "//testing/gtest",
+    ]
+
+    if (is_android) {
+      deps += [ "//testing/android/native_test:native_test_support" ]
+
+      shard_timeout = 900
+    }
+  }
+}
diff --git a/system_wrappers/DEPS b/system_wrappers/DEPS
new file mode 100644
index 0000000..f1bede5
--- /dev/null
+++ b/system_wrappers/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+]
+
diff --git a/system_wrappers/OWNERS b/system_wrappers/OWNERS
new file mode 100644
index 0000000..65b8dee
--- /dev/null
+++ b/system_wrappers/OWNERS
@@ -0,0 +1,11 @@
+perkj@webrtc.org
+henrika@webrtc.org
+henrikg@webrtc.org
+mflodman@webrtc.org
+niklas.enbom@webrtc.org
+nisse@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gn=*
+per-file *.gni=*
diff --git a/system_wrappers/include/aligned_array.h b/system_wrappers/include/aligned_array.h
new file mode 100644
index 0000000..793c785
--- /dev/null
+++ b/system_wrappers/include/aligned_array.h
@@ -0,0 +1,78 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_SYSTEM_WRAPPERS_INCLUDE_ALIGNED_ARRAY_
+#define WEBRTC_SYSTEM_WRAPPERS_INCLUDE_ALIGNED_ARRAY_
+
+#include "rtc_base/checks.h"
+#include "system_wrappers/include/aligned_malloc.h"
+
+namespace webrtc {
+
+// Wrapper class for aligned arrays. Every row (and the first dimension) are
+// aligned to the given byte alignment.
+template <typename T>
+class AlignedArray {
+ public:
+  AlignedArray(size_t rows, size_t cols, size_t alignment)
+      : rows_(rows), cols_(cols) {
+    RTC_CHECK_GT(alignment, 0);
+    head_row_ =
+        static_cast<T**>(AlignedMalloc(rows_ * sizeof(*head_row_), alignment));
+    for (size_t i = 0; i < rows_; ++i) {
+      head_row_[i] = static_cast<T*>(
+          AlignedMalloc(cols_ * sizeof(**head_row_), alignment));
+    }
+  }
+
+  ~AlignedArray() {
+    for (size_t i = 0; i < rows_; ++i) {
+      AlignedFree(head_row_[i]);
+    }
+    AlignedFree(head_row_);
+  }
+
+  T* const* Array() { return head_row_; }
+
+  const T* const* Array() const { return head_row_; }
+
+  T* Row(size_t row) {
+    RTC_CHECK_LE(row, rows_);
+    return head_row_[row];
+  }
+
+  const T* Row(size_t row) const {
+    RTC_CHECK_LE(row, rows_);
+    return head_row_[row];
+  }
+
+  T& At(size_t row, size_t col) {
+    RTC_CHECK_LE(col, cols_);
+    return Row(row)[col];
+  }
+
+  const T& At(size_t row, size_t col) const {
+    RTC_CHECK_LE(col, cols_);
+    return Row(row)[col];
+  }
+
+  size_t rows() const { return rows_; }
+
+  size_t cols() const { return cols_; }
+
+ private:
+  size_t rows_;
+  size_t cols_;
+  T** head_row_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_SYSTEM_WRAPPERS_INCLUDE_ALIGNED_ARRAY_
diff --git a/system_wrappers/include/aligned_malloc.h b/system_wrappers/include/aligned_malloc.h
new file mode 100644
index 0000000..33b23d2
--- /dev/null
+++ b/system_wrappers/include/aligned_malloc.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_ALIGNED_MALLOC_H_
+#define SYSTEM_WRAPPERS_INCLUDE_ALIGNED_MALLOC_H_
+
+// The functions declared here
+// 1) Allocates block of aligned memory.
+// 2) Re-calculates a pointer such that it is aligned to a higher or equal
+//    address.
+// Note: alignment must be a power of two. The alignment is in bytes.
+
+#include <stddef.h>
+
+namespace webrtc {
+
+// Returns a pointer to the first boundry of |alignment| bytes following the
+// address of |ptr|.
+// Note that there is no guarantee that the memory in question is available.
+// |ptr| has no requirements other than it can't be NULL.
+void* GetRightAlign(const void* ptr, size_t alignment);
+
+// Allocates memory of |size| bytes aligned on an |alignment| boundry.
+// The return value is a pointer to the memory. Note that the memory must
+// be de-allocated using AlignedFree.
+void* AlignedMalloc(size_t size, size_t alignment);
+// De-allocates memory created using the AlignedMalloc() API.
+void AlignedFree(void* mem_block);
+
+// Templated versions to facilitate usage of aligned malloc without casting
+// to and from void*.
+template <typename T>
+T* GetRightAlign(const T* ptr, size_t alignment) {
+  return reinterpret_cast<T*>(
+      GetRightAlign(reinterpret_cast<const void*>(ptr), alignment));
+}
+template <typename T>
+T* AlignedMalloc(size_t size, size_t alignment) {
+  return reinterpret_cast<T*>(AlignedMalloc(size, alignment));
+}
+
+// Deleter for use with unique_ptr. E.g., use as
+//   std::unique_ptr<Foo, AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
+  inline void operator()(void* ptr) const { AlignedFree(ptr); }
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_ALIGNED_MALLOC_H_
diff --git a/system_wrappers/include/asm_defines.h b/system_wrappers/include/asm_defines.h
new file mode 100644
index 0000000..7f4c80e
--- /dev/null
+++ b/system_wrappers/include/asm_defines.h
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_ASM_DEFINES_H_
+#define SYSTEM_WRAPPERS_INCLUDE_ASM_DEFINES_H_
+
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+// Define the macros used in ARM assembly code, so that for Mac or iOS builds
+// we add leading underscores for the function names.
+#ifdef __APPLE__
+.macro GLOBAL_FUNCTION name
+.global _\name
+.private_extern _\name
+.endm
+.macro DEFINE_FUNCTION name
+_\name:
+.endm
+.macro CALL_FUNCTION name
+bl _\name
+.endm
+.macro GLOBAL_LABEL name
+.global _\name
+.private_extern _\name
+.endm
+#else
+.macro GLOBAL_FUNCTION name
+.global \name
+.hidden \name
+.endm
+.macro DEFINE_FUNCTION name
+#if defined(__linux__) && defined(__ELF__)
+.type \name,%function
+#endif
+\name:
+.endm
+.macro CALL_FUNCTION name
+bl \name
+.endm
+.macro GLOBAL_LABEL name
+.global \name
+.hidden \name
+.endm
+#endif
+
+// With Apple's clang compiler, for instructions ldrb, strh, etc.,
+// the condition code is after the width specifier. Here we define
+// only the ones that are actually used in the assembly files.
+#if (defined __llvm__) && (defined __APPLE__)
+.macro streqh reg1, reg2, num
+strheq \reg1, \reg2, \num
+.endm
+#endif
+
+.text
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_ASM_DEFINES_H_
diff --git a/system_wrappers/include/clock.h b/system_wrappers/include/clock.h
new file mode 100644
index 0000000..aec5ca5
--- /dev/null
+++ b/system_wrappers/include/clock.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_CLOCK_H_
+#define SYSTEM_WRAPPERS_INCLUDE_CLOCK_H_
+
+#include <memory>
+
+#include "system_wrappers/include/ntp_time.h"
+#include "system_wrappers/include/rw_lock_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// January 1970, in NTP seconds.
+const uint32_t kNtpJan1970 = 2208988800UL;
+
+// Magic NTP fractional unit.
+const double kMagicNtpFractionalUnit = 4.294967296E+9;
+
+// A clock interface that allows reading of absolute and relative timestamps.
+class Clock {
+ public:
+  virtual ~Clock() {}
+
+  // Return a timestamp in milliseconds relative to some arbitrary source; the
+  // source is fixed for this clock.
+  virtual int64_t TimeInMilliseconds() const = 0;
+
+  // Return a timestamp in microseconds relative to some arbitrary source; the
+  // source is fixed for this clock.
+  virtual int64_t TimeInMicroseconds() const = 0;
+
+  // Retrieve an NTP absolute timestamp.
+  virtual NtpTime CurrentNtpTime() const = 0;
+
+  // Retrieve an NTP absolute timestamp in milliseconds.
+  virtual int64_t CurrentNtpInMilliseconds() const = 0;
+
+  // Converts an NTP timestamp to a millisecond timestamp.
+  static int64_t NtpToMs(uint32_t seconds, uint32_t fractions) {
+    return NtpTime(seconds, fractions).ToMs();
+  }
+
+  // Returns an instance of the real-time system clock implementation.
+  static Clock* GetRealTimeClock();
+};
+
+class SimulatedClock : public Clock {
+ public:
+  explicit SimulatedClock(int64_t initial_time_us);
+
+  ~SimulatedClock() override;
+
+  // Return a timestamp in milliseconds relative to some arbitrary source; the
+  // source is fixed for this clock.
+  int64_t TimeInMilliseconds() const override;
+
+  // Return a timestamp in microseconds relative to some arbitrary source; the
+  // source is fixed for this clock.
+  int64_t TimeInMicroseconds() const override;
+
+  // Retrieve an NTP absolute timestamp.
+  NtpTime CurrentNtpTime() const override;
+
+  // Converts an NTP timestamp to a millisecond timestamp.
+  int64_t CurrentNtpInMilliseconds() const override;
+
+  // Advance the simulated clock with a given number of milliseconds or
+  // microseconds.
+  void AdvanceTimeMilliseconds(int64_t milliseconds);
+  void AdvanceTimeMicroseconds(int64_t microseconds);
+
+ private:
+  int64_t time_us_;
+  std::unique_ptr<RWLockWrapper> lock_;
+};
+
+};  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_CLOCK_H_
diff --git a/system_wrappers/include/cpu_features_wrapper.h b/system_wrappers/include/cpu_features_wrapper.h
new file mode 100644
index 0000000..07ee912
--- /dev/null
+++ b/system_wrappers/include/cpu_features_wrapper.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_
+#define SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// List of features in x86.
+typedef enum { kSSE2, kSSE3 } CPUFeature;
+
+// List of features in ARM.
+enum {
+  kCPUFeatureARMv7 = (1 << 0),
+  kCPUFeatureVFPv3 = (1 << 1),
+  kCPUFeatureNEON = (1 << 2),
+  kCPUFeatureLDREXSTREX = (1 << 3)
+};
+
+typedef int (*WebRtc_CPUInfo)(CPUFeature feature);
+
+// Returns true if the CPU supports the feature.
+extern WebRtc_CPUInfo WebRtc_GetCPUInfo;
+
+// No CPU feature is available => straight C path.
+extern WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM;
+
+// Return the features in an ARM device.
+// It detects the features in the hardware platform, and returns supported
+// values in the above enum definition as a bitmask.
+extern uint64_t WebRtc_GetCPUFeaturesARM(void);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+}  // extern "C"
+#endif
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_
diff --git a/system_wrappers/include/cpu_info.h b/system_wrappers/include/cpu_info.h
new file mode 100644
index 0000000..dbd5d60
--- /dev/null
+++ b/system_wrappers/include/cpu_info.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_CPU_INFO_H_
+#define SYSTEM_WRAPPERS_INCLUDE_CPU_INFO_H_
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class CpuInfo {
+ public:
+  static uint32_t DetectNumberOfCores();
+
+ private:
+  CpuInfo() {}
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_CPU_INFO_H_
diff --git a/system_wrappers/include/event_wrapper.h b/system_wrappers/include/event_wrapper.h
new file mode 100644
index 0000000..0c29138
--- /dev/null
+++ b/system_wrappers/include/event_wrapper.h
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_EVENT_WRAPPER_H_
+#define SYSTEM_WRAPPERS_INCLUDE_EVENT_WRAPPER_H_
+
+namespace webrtc {
+enum EventTypeWrapper {
+  kEventSignaled = 1,
+  kEventError = 2,
+  kEventTimeout = 3
+};
+
+#define WEBRTC_EVENT_INFINITE 0xffffffff
+
+class EventTimerWrapper;
+
+class EventWrapper {
+ public:
+  // Factory method. Constructor disabled.
+  static EventWrapper* Create();
+
+  virtual ~EventWrapper() {}
+
+  // Releases threads who are calling Wait() and has started waiting. Please
+  // note that a thread calling Wait() will not start waiting immediately.
+  // assumptions to the contrary is a very common source of issues in
+  // multithreaded programming.
+  // Set is sticky in the sense that it will release at least one thread
+  // either immediately or some time in the future.
+  virtual bool Set() = 0;
+
+  // Puts the calling thread into a wait state. The thread may be released
+  // by a Set() call depending on if other threads are waiting and if so on
+  // timing. The thread that was released will reset the event before leaving
+  // preventing more threads from being released. If multiple threads
+  // are waiting for the same Set(), only one (random) thread is guaranteed to
+  // be released. It is possible that multiple (random) threads are released
+  // Depending on timing.
+  //
+  // |max_time| is the maximum time to wait in milliseconds or
+  // WEBRTC_EVENT_INFINITE to wait infinitely.
+  virtual EventTypeWrapper Wait(unsigned long max_time) = 0;
+};
+
+class EventTimerWrapper : public EventWrapper {
+ public:
+  static EventTimerWrapper* Create();
+
+  // Starts a timer that will call a non-sticky version of Set() either once
+  // or periodically. If the timer is periodic it ensures that there is no
+  // drift over time relative to the system clock.
+  //
+  // |time| is in milliseconds.
+  virtual bool StartTimer(bool periodic, unsigned long time) = 0;
+
+  virtual bool StopTimer() = 0;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_EVENT_WRAPPER_H_
diff --git a/system_wrappers/include/field_trial.h b/system_wrappers/include/field_trial.h
new file mode 100644
index 0000000..c6a6223
--- /dev/null
+++ b/system_wrappers/include/field_trial.h
@@ -0,0 +1,81 @@
+//
+// Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS.  All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_FIELD_TRIAL_H_
+#define SYSTEM_WRAPPERS_INCLUDE_FIELD_TRIAL_H_
+
+#include <string>
+
+// Field trials allow webrtc clients (such as Chrome) to turn on feature code
+// in binaries out in the field and gather information with that.
+//
+// WebRTC clients MUST provide an implementation of:
+//
+//   std::string webrtc::field_trial::FindFullName(const std::string& trial).
+//
+// Or link with a default one provided in:
+//
+//   system_wrappers/system_wrappers.gyp:field_trial_default
+//
+//
+// They are designed to wire up directly to chrome field trials and to speed up
+// developers by reducing the need to wire APIs to control whether a feature is
+// on/off. E.g. to experiment with a new method that could lead to a different
+// trade-off between CPU/bandwidth:
+//
+// 1 - Develop the feature with default behaviour off:
+//
+//   if (FieldTrial::FindFullName("WebRTCExperimenMethod2") == "Enabled")
+//     method2();
+//   else
+//     method1();
+//
+// 2 - Once the changes are rolled to chrome, the new code path can be
+//     controlled as normal chrome field trials.
+//
+// 3 - Evaluate the new feature and clean the code paths.
+//
+// Notes:
+//   - NOT every feature is a candidate to be controlled by this mechanism as
+//     it may require negotation between involved parties (e.g. SDP).
+//
+// TODO(andresp): since chrome --force-fieldtrials does not marks the trial
+//     as active it does not gets propaged to renderer process. For now one
+//     needs to push a config with start_active:true or run a local finch
+//     server.
+//
+// TODO(andresp): find out how to get bots to run tests with trials enabled.
+
+namespace webrtc {
+namespace field_trial {
+
+// Returns the group name chosen for the named trial, or the empty string
+// if the trial does not exists.
+//
+// Note: To keep things tidy append all the trial names with WebRTC.
+std::string FindFullName(const std::string& name);
+
+// Convenience method, returns true iff FindFullName(name) return a string that
+// starts with "Enabled".
+// TODO(tommi): Make sure all implementations support this.
+inline bool IsEnabled(const char* name) {
+  return FindFullName(name).find("Enabled") == 0;
+}
+
+// Convenience method, returns true iff FindFullName(name) return a string that
+// starts with "Disabled".
+inline bool IsDisabled(const char* name) {
+  return FindFullName(name).find("Disabled") == 0;
+}
+
+}  // namespace field_trial
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_FIELD_TRIAL_H_
diff --git a/system_wrappers/include/field_trial_default.h b/system_wrappers/include/field_trial_default.h
new file mode 100644
index 0000000..1774c2d
--- /dev/null
+++ b/system_wrappers/include/field_trial_default.h
@@ -0,0 +1,28 @@
+//
+// Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS.  All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_FIELD_TRIAL_DEFAULT_H_
+#define SYSTEM_WRAPPERS_INCLUDE_FIELD_TRIAL_DEFAULT_H_
+
+namespace webrtc {
+namespace field_trial {
+
+// Optionally initialize field trial from a string.
+// This method can be called at most once before any other call into webrtc.
+// E.g. before the peer connection factory is constructed.
+// Note: trials_string must never be destroyed.
+void InitFieldTrialsFromString(const char* trials_string);
+
+const char* GetFieldTrialString();
+
+}  // namespace field_trial
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_FIELD_TRIAL_DEFAULT_H_
diff --git a/system_wrappers/include/file_wrapper.h b/system_wrappers/include/file_wrapper.h
new file mode 100644
index 0000000..143da13
--- /dev/null
+++ b/system_wrappers/include/file_wrapper.h
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_FILE_WRAPPER_H_
+#define SYSTEM_WRAPPERS_INCLUDE_FILE_WRAPPER_H_
+
+#include <stddef.h>
+#include <stdio.h>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "rtc_base/criticalsection.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Implementation of an InStream and OutStream that can read (exclusive) or
+// write from/to a file.
+
+namespace webrtc {
+
+// TODO(tommi): Remove the base classes, rename to rtc::File and move to base.
+class FileWrapper : public InStream, public OutStream {
+ public:
+  static const size_t kMaxFileNameSize = 1024;
+
+  // Factory methods.
+  // TODO(tommi): Remove Create().
+  static FileWrapper* Create();
+  static FileWrapper Open(const char* file_name_utf8, bool read_only);
+
+  FileWrapper(FILE* file, size_t max_size);
+  ~FileWrapper() override;
+
+  // Support for move semantics.
+  FileWrapper(FileWrapper&& other);
+  FileWrapper& operator=(FileWrapper&& other);
+
+  // Returns true if a file has been opened.
+  bool is_open() const { return file_ != nullptr; }
+
+  // Opens a file in read or write mode, decided by the read_only parameter.
+  bool OpenFile(const char* file_name_utf8, bool read_only);
+
+  // Initializes the wrapper from an existing handle.  The wrapper
+  // takes ownership of |handle| and closes it in CloseFile().
+  bool OpenFromFileHandle(FILE* handle);
+
+  void CloseFile();
+
+  // Limits the file size to |bytes|. Writing will fail after the cap
+  // is hit. Pass zero to use an unlimited size.
+  // TODO(tommi): Could we move this out into a separate class?
+  void SetMaxFileSize(size_t bytes);
+
+  // Flush any pending writes.  Note: Flushing when closing, is not required.
+  int Flush();
+
+  // Rewinds the file to the start.
+  int Rewind() override;
+  int Read(void* buf, size_t length) override;
+  bool Write(const void* buf, size_t length) override;
+
+ private:
+  FileWrapper();
+
+  void CloseFileImpl();
+  int FlushImpl();
+
+  // TODO(tommi): Remove the lock.
+  rtc::CriticalSection lock_;
+
+  FILE* file_ = nullptr;
+  size_t position_ = 0;
+  size_t max_size_in_bytes_ = 0;
+
+  // Copying is not supported.
+  FileWrapper(const FileWrapper&) = delete;
+  FileWrapper& operator=(const FileWrapper&) = delete;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_FILE_WRAPPER_H_
diff --git a/system_wrappers/include/metrics.h b/system_wrappers/include/metrics.h
new file mode 100644
index 0000000..13f2483
--- /dev/null
+++ b/system_wrappers/include/metrics.h
@@ -0,0 +1,272 @@
+//
+// Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS.  All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_METRICS_H_
+#define SYSTEM_WRAPPERS_INCLUDE_METRICS_H_
+
+#include <string>
+
+#include "common_types.h"  // NOLINT(build/include)
+#include "rtc_base/atomicops.h"
+#include "rtc_base/checks.h"
+
+// Macros for allowing WebRTC clients (e.g. Chrome) to gather and aggregate
+// statistics.
+//
+// Histogram for counters.
+// RTC_HISTOGRAM_COUNTS(name, sample, min, max, bucket_count);
+//
+// Histogram for enumerators.
+// The boundary should be above the max enumerator sample.
+// RTC_HISTOGRAM_ENUMERATION(name, sample, boundary);
+//
+//
+// The macros use the methods HistogramFactoryGetCounts,
+// HistogramFactoryGetEnumeration and HistogramAdd.
+//
+// Therefore, WebRTC clients must either:
+//
+// - provide implementations of
+//   Histogram* webrtc::metrics::HistogramFactoryGetCounts(
+//       const std::string& name, int sample, int min, int max,
+//       int bucket_count);
+//   Histogram* webrtc::metrics::HistogramFactoryGetEnumeration(
+//       const std::string& name, int sample, int boundary);
+//   void webrtc::metrics::HistogramAdd(
+//       Histogram* histogram_pointer, const std::string& name, int sample);
+//
+// - or link with the default implementations (i.e.
+//   system_wrappers:metrics_default).
+//
+//
+// Example usage:
+//
+// RTC_HISTOGRAM_COUNTS("WebRTC.Video.NacksSent", nacks_sent, 1, 100000, 100);
+//
+// enum Types {
+//   kTypeX,
+//   kTypeY,
+//   kBoundary,
+// };
+//
+// RTC_HISTOGRAM_ENUMERATION("WebRTC.Types", kTypeX, kBoundary);
+//
+// NOTE: It is recommended to do the Chromium review for modifications to
+// histograms.xml before new metrics are committed to WebRTC.
+
+// Macros for adding samples to a named histogram.
+
+// Histogram for counters (exponentially spaced buckets).
+#define RTC_HISTOGRAM_COUNTS_100(name, sample) \
+  RTC_HISTOGRAM_COUNTS(name, sample, 1, 100, 50)
+
+#define RTC_HISTOGRAM_COUNTS_200(name, sample) \
+  RTC_HISTOGRAM_COUNTS(name, sample, 1, 200, 50)
+
+#define RTC_HISTOGRAM_COUNTS_500(name, sample) \
+  RTC_HISTOGRAM_COUNTS(name, sample, 1, 500, 50)
+
+#define RTC_HISTOGRAM_COUNTS_1000(name, sample) \
+  RTC_HISTOGRAM_COUNTS(name, sample, 1, 1000, 50)
+
+#define RTC_HISTOGRAM_COUNTS_10000(name, sample) \
+  RTC_HISTOGRAM_COUNTS(name, sample, 1, 10000, 50)
+
+#define RTC_HISTOGRAM_COUNTS_100000(name, sample) \
+  RTC_HISTOGRAM_COUNTS(name, sample, 1, 100000, 50)
+
+#define RTC_HISTOGRAM_COUNTS(name, sample, min, max, bucket_count)       \
+  RTC_HISTOGRAM_COMMON_BLOCK(name, sample,                               \
+                             webrtc::metrics::HistogramFactoryGetCounts( \
+                                 name, min, max, bucket_count))
+
+#define RTC_HISTOGRAM_COUNTS_LINEAR(name, sample, min, max, bucket_count)      \
+  RTC_HISTOGRAM_COMMON_BLOCK(name, sample,                                     \
+                             webrtc::metrics::HistogramFactoryGetCountsLinear( \
+                                 name, min, max, bucket_count))
+
+// Slow metrics: pointer to metric is acquired at each call and is not cached.
+//
+#define RTC_HISTOGRAM_COUNTS_SPARSE_100(name, sample) \
+  RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 100, 50)
+
+#define RTC_HISTOGRAM_COUNTS_SPARSE_200(name, sample) \
+  RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 200, 50)
+
+#define RTC_HISTOGRAM_COUNTS_SPARSE_500(name, sample) \
+  RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 500, 50)
+
+#define RTC_HISTOGRAM_COUNTS_SPARSE_1000(name, sample) \
+  RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 1000, 50)
+
+#define RTC_HISTOGRAM_COUNTS_SPARSE_10000(name, sample) \
+  RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 10000, 50)
+
+#define RTC_HISTOGRAM_COUNTS_SPARSE_100000(name, sample) \
+  RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, 1, 100000, 50)
+
+#define RTC_HISTOGRAM_COUNTS_SPARSE(name, sample, min, max, bucket_count)     \
+  RTC_HISTOGRAM_COMMON_BLOCK_SLOW(name, sample,                               \
+                                  webrtc::metrics::HistogramFactoryGetCounts( \
+                                      name, min, max, bucket_count))
+
+// Histogram for percentage (evenly spaced buckets).
+#define RTC_HISTOGRAM_PERCENTAGE_SPARSE(name, sample) \
+  RTC_HISTOGRAM_ENUMERATION_SPARSE(name, sample, 101)
+
+// Histogram for booleans.
+#define RTC_HISTOGRAM_BOOLEAN_SPARSE(name, sample) \
+  RTC_HISTOGRAM_ENUMERATION_SPARSE(name, sample, 2)
+
+// Histogram for enumerators (evenly spaced buckets).
+// |boundary| should be above the max enumerator sample.
+#define RTC_HISTOGRAM_ENUMERATION_SPARSE(name, sample, boundary) \
+  RTC_HISTOGRAM_COMMON_BLOCK_SLOW(                               \
+      name, sample,                                              \
+      webrtc::metrics::HistogramFactoryGetEnumeration(name, boundary))
+
+// Histogram for percentage (evenly spaced buckets).
+#define RTC_HISTOGRAM_PERCENTAGE(name, sample) \
+  RTC_HISTOGRAM_ENUMERATION(name, sample, 101)
+
+// Histogram for booleans.
+#define RTC_HISTOGRAM_BOOLEAN(name, sample) \
+  RTC_HISTOGRAM_ENUMERATION(name, sample, 2)
+
+// Histogram for enumerators (evenly spaced buckets).
+// |boundary| should be above the max enumerator sample.
+#define RTC_HISTOGRAM_ENUMERATION(name, sample, boundary) \
+  RTC_HISTOGRAM_COMMON_BLOCK(                             \
+      name, sample,                                       \
+      webrtc::metrics::HistogramFactoryGetEnumeration(name, boundary))
+
+// The name of the histogram should not vary.
+// TODO(asapersson): Consider changing string to const char*.
+#define RTC_HISTOGRAM_COMMON_BLOCK(constant_name, sample,                  \
+                                   factory_get_invocation)                 \
+  do {                                                                     \
+    static webrtc::metrics::Histogram* atomic_histogram_pointer = nullptr; \
+    webrtc::metrics::Histogram* histogram_pointer =                        \
+        rtc::AtomicOps::AcquireLoadPtr(&atomic_histogram_pointer);         \
+    if (!histogram_pointer) {                                              \
+      histogram_pointer = factory_get_invocation;                          \
+      webrtc::metrics::Histogram* prev_pointer =                           \
+          rtc::AtomicOps::CompareAndSwapPtr(                               \
+              &atomic_histogram_pointer,                                   \
+              static_cast<webrtc::metrics::Histogram*>(nullptr),           \
+              histogram_pointer);                                          \
+      RTC_DCHECK(prev_pointer == nullptr ||                                \
+                 prev_pointer == histogram_pointer);                       \
+    }                                                                      \
+    if (histogram_pointer) {                                               \
+      webrtc::metrics::HistogramAdd(histogram_pointer, sample);            \
+    }                                                                      \
+  } while (0)
+
+// Deprecated.
+// The histogram is constructed/found for each call.
+// May be used for histograms with infrequent updates.`
+#define RTC_HISTOGRAM_COMMON_BLOCK_SLOW(name, sample, factory_get_invocation) \
+  do {                                                                        \
+    webrtc::metrics::Histogram* histogram_pointer = factory_get_invocation;   \
+    if (histogram_pointer) {                                                  \
+      webrtc::metrics::HistogramAdd(histogram_pointer, sample);               \
+    }                                                                         \
+  } while (0)
+
+// Helper macros.
+// Macros for calling a histogram with varying name (e.g. when using a metric
+// in different modes such as real-time vs screenshare). Fast, because pointer
+// is cached. |index| should be different for different names. Allowed |index|
+// values are 0, 1, and 2.
+#define RTC_HISTOGRAMS_COUNTS_100(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,           \
+                        RTC_HISTOGRAM_COUNTS(name, sample, 1, 100, 50))
+
+#define RTC_HISTOGRAMS_COUNTS_200(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,           \
+                        RTC_HISTOGRAM_COUNTS(name, sample, 1, 200, 50))
+
+#define RTC_HISTOGRAMS_COUNTS_500(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,           \
+                        RTC_HISTOGRAM_COUNTS(name, sample, 1, 500, 50))
+
+#define RTC_HISTOGRAMS_COUNTS_1000(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,            \
+                        RTC_HISTOGRAM_COUNTS(name, sample, 1, 1000, 50))
+
+#define RTC_HISTOGRAMS_COUNTS_10000(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,             \
+                        RTC_HISTOGRAM_COUNTS(name, sample, 1, 10000, 50))
+
+#define RTC_HISTOGRAMS_COUNTS_100000(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,              \
+                        RTC_HISTOGRAM_COUNTS(name, sample, 1, 100000, 50))
+
+#define RTC_HISTOGRAMS_ENUMERATION(index, name, sample, boundary) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,                      \
+                        RTC_HISTOGRAM_ENUMERATION(name, sample, boundary))
+
+#define RTC_HISTOGRAMS_PERCENTAGE(index, name, sample) \
+  RTC_HISTOGRAMS_COMMON(index, name, sample,           \
+                        RTC_HISTOGRAM_PERCENTAGE(name, sample))
+
+#define RTC_HISTOGRAMS_COMMON(index, name, sample, macro_invocation) \
+  do {                                                               \
+    switch (index) {                                                 \
+      case 0:                                                        \
+        macro_invocation;                                            \
+        break;                                                       \
+      case 1:                                                        \
+        macro_invocation;                                            \
+        break;                                                       \
+      case 2:                                                        \
+        macro_invocation;                                            \
+        break;                                                       \
+      default:                                                       \
+        RTC_NOTREACHED();                                            \
+    }                                                                \
+  } while (0)
+
+namespace webrtc {
+namespace metrics {
+
+// Time that should have elapsed for stats that are gathered once per call.
+enum { kMinRunTimeInSeconds = 10 };
+
+class Histogram;
+
+// Functions for getting pointer to histogram (constructs or finds the named
+// histogram).
+
+// Get histogram for counters.
+Histogram* HistogramFactoryGetCounts(const std::string& name,
+                                     int min,
+                                     int max,
+                                     int bucket_count);
+
+// Get histogram for counters with linear bucket spacing.
+Histogram* HistogramFactoryGetCountsLinear(const std::string& name,
+                                           int min,
+                                           int max,
+                                           int bucket_count);
+
+// Get histogram for enumerators.
+// |boundary| should be above the max enumerator sample.
+Histogram* HistogramFactoryGetEnumeration(const std::string& name,
+                                          int boundary);
+
+// Function for adding a |sample| to a histogram.
+void HistogramAdd(Histogram* histogram_pointer, int sample);
+
+}  // namespace metrics
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_METRICS_H_
diff --git a/system_wrappers/include/metrics_default.h b/system_wrappers/include/metrics_default.h
new file mode 100644
index 0000000..5ce3582
--- /dev/null
+++ b/system_wrappers/include/metrics_default.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_METRICS_DEFAULT_H_
+#define SYSTEM_WRAPPERS_INCLUDE_METRICS_DEFAULT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+namespace webrtc {
+namespace metrics {
+
+// This class does not actually exist. It is casted to an implementation defined
+// pointer inside the functions.
+class Histogram;
+
+struct SampleInfo {
+  SampleInfo(const std::string& name, int min, int max, size_t bucket_count);
+  ~SampleInfo();
+
+  const std::string name;
+  const int min;
+  const int max;
+  const size_t bucket_count;
+  std::map<int, int> samples;  // <value, # of events>
+};
+
+// Enables collection of samples.
+// This method should be called before any other call into webrtc.
+void Enable();
+
+// Gets histograms and clears all samples.
+void GetAndReset(
+    std::map<std::string, std::unique_ptr<SampleInfo>>* histograms);
+
+// Functions below are mainly for testing.
+
+// Clears all samples.
+void Reset();
+
+// Returns the number of times the |sample| has been added to the histogram.
+int NumEvents(const std::string& name, int sample);
+
+// Returns the total number of added samples to the histogram.
+int NumSamples(const std::string& name);
+
+// Returns the minimum sample value (or -1 if the histogram has no samples).
+int MinSample(const std::string& name);
+
+}  // namespace metrics
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_METRICS_DEFAULT_H_
diff --git a/system_wrappers/include/ntp_time.h b/system_wrappers/include/ntp_time.h
new file mode 100644
index 0000000..1c32184
--- /dev/null
+++ b/system_wrappers/include/ntp_time.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef SYSTEM_WRAPPERS_INCLUDE_NTP_TIME_H_
+#define SYSTEM_WRAPPERS_INCLUDE_NTP_TIME_H_
+
+#include <stdint.h>
+
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+
+class NtpTime {
+ public:
+  static constexpr uint64_t kFractionsPerSecond = 0x100000000;
+  NtpTime() : value_(0) {}
+  explicit NtpTime(uint64_t value) : value_(value) {}
+  NtpTime(uint32_t seconds, uint32_t fractions)
+      : value_(seconds * kFractionsPerSecond + fractions) {}
+
+  NtpTime(const NtpTime&) = default;
+  NtpTime& operator=(const NtpTime&) = default;
+  explicit operator uint64_t() const { return value_; }
+
+  void Set(uint32_t seconds, uint32_t fractions) {
+    value_ = seconds * kFractionsPerSecond + fractions;
+  }
+  void Reset() { value_ = 0; }
+
+  int64_t ToMs() const {
+    static constexpr double kNtpFracPerMs = 4.294967296E6;  // 2^32 / 1000.
+    const double frac_ms = static_cast<double>(fractions()) / kNtpFracPerMs;
+    return 1000 * static_cast<int64_t>(seconds()) +
+           static_cast<int64_t>(frac_ms + 0.5);
+  }
+  // NTP standard (RFC1305, section 3.1) explicitly state value 0 is invalid.
+  bool Valid() const { return value_ != 0; }
+
+  uint32_t seconds() const {
+    return rtc::dchecked_cast<uint32_t>(value_ / kFractionsPerSecond);
+  }
+  uint32_t fractions() const {
+    return rtc::dchecked_cast<uint32_t>(value_ % kFractionsPerSecond);
+  }
+
+ private:
+  uint64_t value_;
+};
+
+inline bool operator==(const NtpTime& n1, const NtpTime& n2) {
+  return static_cast<uint64_t>(n1) == static_cast<uint64_t>(n2);
+}
+inline bool operator!=(const NtpTime& n1, const NtpTime& n2) {
+  return !(n1 == n2);
+}
+
+}  // namespace webrtc
+#endif  // SYSTEM_WRAPPERS_INCLUDE_NTP_TIME_H_
diff --git a/system_wrappers/include/rtp_to_ntp_estimator.h b/system_wrappers/include/rtp_to_ntp_estimator.h
new file mode 100644
index 0000000..7c0757c
--- /dev/null
+++ b/system_wrappers/include/rtp_to_ntp_estimator.h
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_RTP_TO_NTP_ESTIMATOR_H_
+#define SYSTEM_WRAPPERS_INCLUDE_RTP_TO_NTP_ESTIMATOR_H_
+
+#include <list>
+
+#include "api/optional.h"
+#include "modules/include/module_common_types_public.h"
+#include "rtc_base/numerics/moving_median_filter.h"
+#include "system_wrappers/include/ntp_time.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+// Class for converting an RTP timestamp to the NTP domain in milliseconds.
+// The class needs to be trained with (at least 2) RTP/NTP timestamp pairs from
+// RTCP sender reports before the convertion can be done.
+class RtpToNtpEstimator {
+ public:
+  RtpToNtpEstimator();
+  ~RtpToNtpEstimator();
+
+  // RTP and NTP timestamp pair from a RTCP SR report.
+  struct RtcpMeasurement {
+    RtcpMeasurement(uint32_t ntp_secs,
+                    uint32_t ntp_frac,
+                    int64_t unwrapped_timestamp);
+    bool IsEqual(const RtcpMeasurement& other) const;
+
+    NtpTime ntp_time;
+    int64_t unwrapped_rtp_timestamp;
+  };
+
+  // Estimated parameters from RTP and NTP timestamp pairs in |measurements_|.
+  struct Parameters {
+    // Implicit conversion from int because MovingMedianFilter returns 0
+    // internally if no samples are present. However, it should never happen as
+    // we don't ask smoothing_filter_ to return anything if there were no
+    // samples.
+    Parameters(const int& value) {  // NOLINT
+      RTC_NOTREACHED();
+    }
+    Parameters() : frequency_khz(0.0), offset_ms(0.0) {}
+
+    double frequency_khz;
+    double offset_ms;
+
+    // Needed to make it work inside MovingMedianFilter
+    bool operator<(const Parameters& other) const;
+    bool operator==(const Parameters& other) const;
+    bool operator<=(const Parameters& other) const;
+    bool operator!=(const Parameters& other) const;
+  };
+
+  // Updates measurements with RTP/NTP timestamp pair from a RTCP sender report.
+  // |new_rtcp_sr| is set to true if a new report is added.
+  bool UpdateMeasurements(uint32_t ntp_secs,
+                          uint32_t ntp_frac,
+                          uint32_t rtp_timestamp,
+                          bool* new_rtcp_sr);
+
+  // Converts an RTP timestamp to the NTP domain in milliseconds.
+  // Returns true on success, false otherwise.
+  bool Estimate(int64_t rtp_timestamp, int64_t* rtp_timestamp_ms) const;
+
+  // Returns estimated rtp to ntp linear transform parameters.
+  const rtc::Optional<Parameters> params() const;
+
+  static const int kMaxInvalidSamples = 3;
+
+ private:
+  void UpdateParameters();
+
+  int consecutive_invalid_samples_;
+  std::list<RtcpMeasurement> measurements_;
+  MovingMedianFilter<Parameters> smoothing_filter_;
+  bool params_calculated_;
+  mutable TimestampUnwrapper unwrapper_;
+};
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_RTP_TO_NTP_ESTIMATOR_H_
diff --git a/system_wrappers/include/runtime_enabled_features.h b/system_wrappers/include/runtime_enabled_features.h
new file mode 100644
index 0000000..9ccbedc
--- /dev/null
+++ b/system_wrappers/include/runtime_enabled_features.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef SYSTEM_WRAPPERS_INCLUDE_RUNTIME_ENABLED_FEATURES_H_
+#define SYSTEM_WRAPPERS_INCLUDE_RUNTIME_ENABLED_FEATURES_H_
+
+#include <string>
+
+// These functions for querying enabled runtime features must be implemented
+// by all webrtc clients (such as Chrome).
+// Default implementation is provided in:
+//
+//    system_wrappers/system_wrappers:runtime_enabled_features_default
+
+// TODO(ilnik): Find a more flexible way to use Chrome features.
+// This interface requires manual translation from feature name to
+// Chrome feature class in third_party/webrtc_overrides.
+
+namespace webrtc {
+namespace runtime_enabled_features {
+
+const char kDualStreamModeFeatureName[] = "WebRtcDualStreamMode";
+
+bool IsFeatureEnabled(std::string feature_name);
+
+}  // namespace runtime_enabled_features
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_RUNTIME_ENABLED_FEATURES_H_
diff --git a/system_wrappers/include/rw_lock_wrapper.h b/system_wrappers/include/rw_lock_wrapper.h
new file mode 100644
index 0000000..a22b6ab
--- /dev/null
+++ b/system_wrappers/include/rw_lock_wrapper.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_RW_LOCK_WRAPPER_H_
+#define SYSTEM_WRAPPERS_INCLUDE_RW_LOCK_WRAPPER_H_
+
+#include "rtc_base/thread_annotations.h"
+
+// Note, Windows pre-Vista version of RW locks are not supported natively. For
+// these OSs regular critical sections have been used to approximate RW lock
+// functionality and will therefore have worse performance.
+
+namespace webrtc {
+
+class RTC_LOCKABLE RWLockWrapper {
+ public:
+  static RWLockWrapper* CreateRWLock();
+  virtual ~RWLockWrapper() {}
+
+  virtual void AcquireLockExclusive() RTC_EXCLUSIVE_LOCK_FUNCTION() = 0;
+  virtual void ReleaseLockExclusive() RTC_UNLOCK_FUNCTION() = 0;
+
+  virtual void AcquireLockShared() RTC_SHARED_LOCK_FUNCTION() = 0;
+  virtual void ReleaseLockShared() RTC_UNLOCK_FUNCTION() = 0;
+};
+
+// RAII extensions of the RW lock. Prevents Acquire/Release missmatches and
+// provides more compact locking syntax.
+class RTC_SCOPED_LOCKABLE ReadLockScoped {
+ public:
+  ReadLockScoped(RWLockWrapper& rw_lock) RTC_SHARED_LOCK_FUNCTION(rw_lock)
+      : rw_lock_(rw_lock) {
+    rw_lock_.AcquireLockShared();
+  }
+
+  ~ReadLockScoped() RTC_UNLOCK_FUNCTION() { rw_lock_.ReleaseLockShared(); }
+
+ private:
+  RWLockWrapper& rw_lock_;
+};
+
+class RTC_SCOPED_LOCKABLE WriteLockScoped {
+ public:
+  WriteLockScoped(RWLockWrapper& rw_lock) RTC_EXCLUSIVE_LOCK_FUNCTION(rw_lock)
+      : rw_lock_(rw_lock) {
+    rw_lock_.AcquireLockExclusive();
+  }
+
+  ~WriteLockScoped() RTC_UNLOCK_FUNCTION() { rw_lock_.ReleaseLockExclusive(); }
+
+ private:
+  RWLockWrapper& rw_lock_;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_RW_LOCK_WRAPPER_H_
diff --git a/system_wrappers/include/sleep.h b/system_wrappers/include/sleep.h
new file mode 100644
index 0000000..3bf8df2
--- /dev/null
+++ b/system_wrappers/include/sleep.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+// An OS-independent sleep function.
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_SLEEP_H_
+#define SYSTEM_WRAPPERS_INCLUDE_SLEEP_H_
+
+namespace webrtc {
+
+// This function sleeps for the specified number of milliseconds.
+// It may return early if the thread is woken by some other event,
+// such as the delivery of a signal on Unix.
+void SleepMs(int msecs);
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_SLEEP_H_
diff --git a/system_wrappers/include/timestamp_extrapolator.h b/system_wrappers/include/timestamp_extrapolator.h
new file mode 100644
index 0000000..9418100
--- /dev/null
+++ b/system_wrappers/include/timestamp_extrapolator.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_INCLUDE_TIMESTAMP_EXTRAPOLATOR_H_
+#define SYSTEM_WRAPPERS_INCLUDE_TIMESTAMP_EXTRAPOLATOR_H_
+
+#include "system_wrappers/include/rw_lock_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class TimestampExtrapolator {
+ public:
+  explicit TimestampExtrapolator(int64_t start_ms);
+  ~TimestampExtrapolator();
+  void Update(int64_t tMs, uint32_t ts90khz);
+  int64_t ExtrapolateLocalTime(uint32_t timestamp90khz);
+  void Reset(int64_t start_ms);
+
+ private:
+  void CheckForWrapArounds(uint32_t ts90khz);
+  bool DelayChangeDetection(double error);
+  RWLockWrapper* _rwLock;
+  double _w[2];
+  double _pP[2][2];
+  int64_t _startMs;
+  int64_t _prevMs;
+  uint32_t _firstTimestamp;
+  int32_t _wrapArounds;
+  int64_t _prevUnwrappedTimestamp;
+  int64_t _prevWrapTimestamp;
+  const double _lambda;
+  bool _firstAfterReset;
+  uint32_t _packetCount;
+  const uint32_t _startUpFilterDelayInPackets;
+
+  double _detectorAccumulatorPos;
+  double _detectorAccumulatorNeg;
+  const double _alarmThreshold;
+  const double _accDrift;
+  const double _accMaxError;
+  const double _pP11;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_INCLUDE_TIMESTAMP_EXTRAPOLATOR_H_
diff --git a/system_wrappers/source/aligned_array_unittest.cc b/system_wrappers/source/aligned_array_unittest.cc
new file mode 100644
index 0000000..e5a3c18
--- /dev/null
+++ b/system_wrappers/source/aligned_array_unittest.cc
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/aligned_array.h"
+
+#include <stdint.h>
+
+#include "test/gtest.h"
+
+namespace {
+
+bool IsAligned(const void* ptr, size_t alignment) {
+  return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
+}
+
+}  // namespace
+
+namespace webrtc {
+
+TEST(AlignedArrayTest, CheckAlignment) {
+  AlignedArray<bool> arr(10, 7, 128);
+  ASSERT_TRUE(IsAligned(arr.Array(), 128));
+  for (size_t i = 0; i < 10; ++i) {
+    ASSERT_TRUE(IsAligned(arr.Row(i), 128));
+    ASSERT_EQ(arr.Row(i), arr.Array()[i]);
+  }
+}
+
+TEST(AlignedArrayTest, CheckOverlap) {
+  AlignedArray<size_t> arr(10, 7, 128);
+
+  for (size_t i = 0; i < 10; ++i) {
+    for (size_t j = 0; j < 7; ++j) {
+      arr.At(i, j) = 20 * i + j;
+    }
+  }
+
+  for (size_t i = 0; i < 10; ++i) {
+    for (size_t j = 0; j < 7; ++j) {
+      ASSERT_EQ(arr.At(i, j), 20 * i + j);
+      ASSERT_EQ(arr.Row(i)[j], 20 * i + j);
+      ASSERT_EQ(arr.Array()[i][j], 20 * i + j);
+    }
+  }
+}
+
+TEST(AlignedArrayTest, CheckRowsCols) {
+  AlignedArray<bool> arr(10, 7, 128);
+  ASSERT_EQ(arr.rows(), 10u);
+  ASSERT_EQ(arr.cols(), 7u);
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/aligned_malloc.cc b/system_wrappers/source/aligned_malloc.cc
new file mode 100644
index 0000000..43ece9e
--- /dev/null
+++ b/system_wrappers/source/aligned_malloc.cc
@@ -0,0 +1,100 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/aligned_malloc.h"
+
+#include <memory.h>
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <stdint.h>
+#endif
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// Reference on memory alignment:
+// http://stackoverflow.com/questions/227897/solve-the-memory-alignment-in-c-interview-question-that-stumped-me
+namespace webrtc {
+
+uintptr_t GetRightAlign(uintptr_t start_pos, size_t alignment) {
+  // The pointer should be aligned with |alignment| bytes. The - 1 guarantees
+  // that it is aligned towards the closest higher (right) address.
+  return (start_pos + alignment - 1) & ~(alignment - 1);
+}
+
+// Alignment must be an integer power of two.
+bool ValidAlignment(size_t alignment) {
+  if (!alignment) {
+    return false;
+  }
+  return (alignment & (alignment - 1)) == 0;
+}
+
+void* GetRightAlign(const void* pointer, size_t alignment) {
+  if (!pointer) {
+    return NULL;
+  }
+  if (!ValidAlignment(alignment)) {
+    return NULL;
+  }
+  uintptr_t start_pos = reinterpret_cast<uintptr_t>(pointer);
+  return reinterpret_cast<void*>(GetRightAlign(start_pos, alignment));
+}
+
+void* AlignedMalloc(size_t size, size_t alignment) {
+  if (size == 0) {
+    return NULL;
+  }
+  if (!ValidAlignment(alignment)) {
+    return NULL;
+  }
+
+  // The memory is aligned towards the lowest address that so only
+  // alignment - 1 bytes needs to be allocated.
+  // A pointer to the start of the memory must be stored so that it can be
+  // retreived for deletion, ergo the sizeof(uintptr_t).
+  void* memory_pointer = malloc(size + sizeof(uintptr_t) + alignment - 1);
+  if (memory_pointer == NULL) {
+    return NULL;
+  }
+
+  // Aligning after the sizeof(uintptr_t) bytes will leave room for the header
+  // in the same memory block.
+  uintptr_t align_start_pos = reinterpret_cast<uintptr_t>(memory_pointer);
+  align_start_pos += sizeof(uintptr_t);
+  uintptr_t aligned_pos = GetRightAlign(align_start_pos, alignment);
+  void* aligned_pointer = reinterpret_cast<void*>(aligned_pos);
+
+  // Store the address to the beginning of the memory just before the aligned
+  // memory.
+  uintptr_t header_pos = aligned_pos - sizeof(uintptr_t);
+  void* header_pointer = reinterpret_cast<void*>(header_pos);
+  uintptr_t memory_start = reinterpret_cast<uintptr_t>(memory_pointer);
+  memcpy(header_pointer, &memory_start, sizeof(uintptr_t));
+
+  return aligned_pointer;
+}
+
+void AlignedFree(void* mem_block) {
+  if (mem_block == NULL) {
+    return;
+  }
+  uintptr_t aligned_pos = reinterpret_cast<uintptr_t>(mem_block);
+  uintptr_t header_pos = aligned_pos - sizeof(uintptr_t);
+
+  // Read out the address of the AlignedMemory struct from the header.
+  uintptr_t memory_start_pos = *reinterpret_cast<uintptr_t*>(header_pos);
+  void* memory_start = reinterpret_cast<void*>(memory_start_pos);
+  free(memory_start);
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/aligned_malloc_unittest.cc b/system_wrappers/source/aligned_malloc_unittest.cc
new file mode 100644
index 0000000..7afbf6d
--- /dev/null
+++ b/system_wrappers/source/aligned_malloc_unittest.cc
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/aligned_malloc.h"
+
+#include <memory>
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <stdint.h>
+#endif
+
+#include "test/gtest.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+// Returns true if |size| and |alignment| are valid combinations.
+bool CorrectUsage(size_t size, size_t alignment) {
+  std::unique_ptr<char, AlignedFreeDeleter> scoped(
+      static_cast<char*>(AlignedMalloc(size, alignment)));
+  if (scoped.get() == NULL) {
+    return false;
+  }
+  const uintptr_t scoped_address = reinterpret_cast<uintptr_t>(scoped.get());
+  return 0u == scoped_address % alignment;
+}
+
+TEST(AlignedMalloc, GetRightAlign) {
+  const size_t size = 100;
+  const size_t alignment = 32;
+  const size_t left_misalignment = 1;
+  std::unique_ptr<char, AlignedFreeDeleter> scoped(
+      static_cast<char*>(AlignedMalloc(size, alignment)));
+  EXPECT_TRUE(scoped.get() != NULL);
+  const uintptr_t aligned_address = reinterpret_cast<uintptr_t>(scoped.get());
+  const uintptr_t misaligned_address = aligned_address - left_misalignment;
+  const char* misaligned_ptr =
+      reinterpret_cast<const char*>(misaligned_address);
+  const char* realigned_ptr = GetRightAlign(misaligned_ptr, alignment);
+  EXPECT_EQ(scoped.get(), realigned_ptr);
+}
+
+TEST(AlignedMalloc, IncorrectSize) {
+  const size_t incorrect_size = 0;
+  const size_t alignment = 64;
+  EXPECT_FALSE(CorrectUsage(incorrect_size, alignment));
+}
+
+TEST(AlignedMalloc, IncorrectAlignment) {
+  const size_t size = 100;
+  const size_t incorrect_alignment = 63;
+  EXPECT_FALSE(CorrectUsage(size, incorrect_alignment));
+}
+
+TEST(AlignedMalloc, AlignTo2Bytes) {
+  size_t size = 100;
+  size_t alignment = 2;
+  EXPECT_TRUE(CorrectUsage(size, alignment));
+}
+
+TEST(AlignedMalloc, AlignTo32Bytes) {
+  size_t size = 100;
+  size_t alignment = 32;
+  EXPECT_TRUE(CorrectUsage(size, alignment));
+}
+
+TEST(AlignedMalloc, AlignTo128Bytes) {
+  size_t size = 100;
+  size_t alignment = 128;
+  EXPECT_TRUE(CorrectUsage(size, alignment));
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/clock.cc b/system_wrappers/source/clock.cc
new file mode 100644
index 0000000..631974d
--- /dev/null
+++ b/system_wrappers/source/clock.cc
@@ -0,0 +1,267 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/clock.h"
+
+#if defined(WEBRTC_WIN)
+
+// Windows needs to be included before mmsystem.h
+#include "rtc_base/win32.h"
+
+#include <MMSystem.h>
+
+#elif defined(WEBRTC_POSIX)
+
+#include <sys/time.h>
+#include <time.h>
+
+#endif  // defined(WEBRTC_POSIX)
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/timeutils.h"
+#include "system_wrappers/include/rw_lock_wrapper.h"
+
+namespace webrtc {
+
+class RealTimeClock : public Clock {
+  // Return a timestamp in milliseconds relative to some arbitrary source; the
+  // source is fixed for this clock.
+  int64_t TimeInMilliseconds() const override { return rtc::TimeMillis(); }
+
+  // Return a timestamp in microseconds relative to some arbitrary source; the
+  // source is fixed for this clock.
+  int64_t TimeInMicroseconds() const override { return rtc::TimeMicros(); }
+
+  // Retrieve an NTP absolute timestamp.
+  NtpTime CurrentNtpTime() const override {
+    timeval tv = CurrentTimeVal();
+    double microseconds_in_seconds;
+    uint32_t seconds;
+    Adjust(tv, &seconds, &microseconds_in_seconds);
+    uint32_t fractions = static_cast<uint32_t>(
+        microseconds_in_seconds * kMagicNtpFractionalUnit + 0.5);
+    return NtpTime(seconds, fractions);
+  }
+
+  // Retrieve an NTP absolute timestamp in milliseconds.
+  int64_t CurrentNtpInMilliseconds() const override {
+    timeval tv = CurrentTimeVal();
+    uint32_t seconds;
+    double microseconds_in_seconds;
+    Adjust(tv, &seconds, &microseconds_in_seconds);
+    return 1000 * static_cast<int64_t>(seconds) +
+           static_cast<int64_t>(1000.0 * microseconds_in_seconds + 0.5);
+  }
+
+ protected:
+  virtual timeval CurrentTimeVal() const = 0;
+
+  static void Adjust(const timeval& tv,
+                     uint32_t* adjusted_s,
+                     double* adjusted_us_in_s) {
+    *adjusted_s = tv.tv_sec + kNtpJan1970;
+    *adjusted_us_in_s = tv.tv_usec / 1e6;
+
+    if (*adjusted_us_in_s >= 1) {
+      *adjusted_us_in_s -= 1;
+      ++*adjusted_s;
+    } else if (*adjusted_us_in_s < -1) {
+      *adjusted_us_in_s += 1;
+      --*adjusted_s;
+    }
+  }
+};
+
+#if defined(WEBRTC_WIN)
+// TODO(pbos): Consider modifying the implementation to synchronize itself
+// against system time (update ref_point_, make it non-const) periodically to
+// prevent clock drift.
+class WindowsRealTimeClock : public RealTimeClock {
+ public:
+  WindowsRealTimeClock()
+      : last_time_ms_(0),
+        num_timer_wraps_(0),
+        ref_point_(GetSystemReferencePoint()) {}
+
+  virtual ~WindowsRealTimeClock() {}
+
+ protected:
+  struct ReferencePoint {
+    FILETIME file_time;
+    LARGE_INTEGER counter_ms;
+  };
+
+  timeval CurrentTimeVal() const override {
+    const uint64_t FILETIME_1970 = 0x019db1ded53e8000;
+
+    FILETIME StartTime;
+    uint64_t Time;
+    struct timeval tv;
+
+    // We can't use query performance counter since they can change depending on
+    // speed stepping.
+    GetTime(&StartTime);
+
+    Time = (((uint64_t)StartTime.dwHighDateTime) << 32) +
+           (uint64_t)StartTime.dwLowDateTime;
+
+    // Convert the hecto-nano second time to tv format.
+    Time -= FILETIME_1970;
+
+    tv.tv_sec = (uint32_t)(Time / (uint64_t)10000000);
+    tv.tv_usec = (uint32_t)((Time % (uint64_t)10000000) / 10);
+    return tv;
+  }
+
+  void GetTime(FILETIME* current_time) const {
+    DWORD t;
+    LARGE_INTEGER elapsed_ms;
+    {
+      rtc::CritScope lock(&crit_);
+      // time MUST be fetched inside the critical section to avoid non-monotonic
+      // last_time_ms_ values that'll register as incorrect wraparounds due to
+      // concurrent calls to GetTime.
+      t = timeGetTime();
+      if (t < last_time_ms_)
+        num_timer_wraps_++;
+      last_time_ms_ = t;
+      elapsed_ms.HighPart = num_timer_wraps_;
+    }
+    elapsed_ms.LowPart = t;
+    elapsed_ms.QuadPart = elapsed_ms.QuadPart - ref_point_.counter_ms.QuadPart;
+
+    // Translate to 100-nanoseconds intervals (FILETIME resolution)
+    // and add to reference FILETIME to get current FILETIME.
+    ULARGE_INTEGER filetime_ref_as_ul;
+    filetime_ref_as_ul.HighPart = ref_point_.file_time.dwHighDateTime;
+    filetime_ref_as_ul.LowPart = ref_point_.file_time.dwLowDateTime;
+    filetime_ref_as_ul.QuadPart +=
+        static_cast<ULONGLONG>((elapsed_ms.QuadPart) * 1000 * 10);
+
+    // Copy to result
+    current_time->dwHighDateTime = filetime_ref_as_ul.HighPart;
+    current_time->dwLowDateTime = filetime_ref_as_ul.LowPart;
+  }
+
+  static ReferencePoint GetSystemReferencePoint() {
+    ReferencePoint ref = {};
+    FILETIME ft0 = {};
+    FILETIME ft1 = {};
+    // Spin waiting for a change in system time. As soon as this change happens,
+    // get the matching call for timeGetTime() as soon as possible. This is
+    // assumed to be the most accurate offset that we can get between
+    // timeGetTime() and system time.
+
+    // Set timer accuracy to 1 ms.
+    timeBeginPeriod(1);
+    GetSystemTimeAsFileTime(&ft0);
+    do {
+      GetSystemTimeAsFileTime(&ft1);
+
+      ref.counter_ms.QuadPart = timeGetTime();
+      Sleep(0);
+    } while ((ft0.dwHighDateTime == ft1.dwHighDateTime) &&
+             (ft0.dwLowDateTime == ft1.dwLowDateTime));
+    ref.file_time = ft1;
+    timeEndPeriod(1);
+    return ref;
+  }
+
+  // mutable as time-accessing functions are const.
+  rtc::CriticalSection crit_;
+  mutable DWORD last_time_ms_;
+  mutable LONG num_timer_wraps_;
+  const ReferencePoint ref_point_;
+};
+
+#elif defined(WEBRTC_POSIX)
+class UnixRealTimeClock : public RealTimeClock {
+ public:
+  UnixRealTimeClock() {}
+
+  ~UnixRealTimeClock() override {}
+
+ protected:
+  timeval CurrentTimeVal() const override {
+    struct timeval tv;
+    struct timezone tz;
+    tz.tz_minuteswest = 0;
+    tz.tz_dsttime = 0;
+    gettimeofday(&tv, &tz);
+    return tv;
+  }
+};
+#endif  // defined(WEBRTC_POSIX)
+
+#if defined(WEBRTC_WIN)
+static WindowsRealTimeClock* volatile g_shared_clock = nullptr;
+#endif  // defined(WEBRTC_WIN)
+
+Clock* Clock::GetRealTimeClock() {
+#if defined(WEBRTC_WIN)
+  // This read relies on volatile read being atomic-load-acquire. This is
+  // true in MSVC since at least 2005:
+  // "A read of a volatile object (volatile read) has Acquire semantics"
+  if (g_shared_clock != nullptr)
+    return g_shared_clock;
+  WindowsRealTimeClock* clock = new WindowsRealTimeClock;
+  if (InterlockedCompareExchangePointer(
+          reinterpret_cast<void* volatile*>(&g_shared_clock), clock, nullptr) !=
+      nullptr) {
+    // g_shared_clock was assigned while we constructed/tried to assign our
+    // instance, delete our instance and use the existing one.
+    delete clock;
+  }
+  return g_shared_clock;
+#elif defined(WEBRTC_POSIX)
+  static UnixRealTimeClock clock;
+  return &clock;
+#else  // defined(WEBRTC_POSIX)
+  return nullptr;
+#endif  // !defined(WEBRTC_WIN) || defined(WEBRTC_POSIX)
+}
+
+SimulatedClock::SimulatedClock(int64_t initial_time_us)
+    : time_us_(initial_time_us), lock_(RWLockWrapper::CreateRWLock()) {}
+
+SimulatedClock::~SimulatedClock() {}
+
+int64_t SimulatedClock::TimeInMilliseconds() const {
+  ReadLockScoped synchronize(*lock_);
+  return (time_us_ + 500) / 1000;
+}
+
+int64_t SimulatedClock::TimeInMicroseconds() const {
+  ReadLockScoped synchronize(*lock_);
+  return time_us_;
+}
+
+NtpTime SimulatedClock::CurrentNtpTime() const {
+  int64_t now_ms = TimeInMilliseconds();
+  uint32_t seconds = (now_ms / 1000) + kNtpJan1970;
+  uint32_t fractions =
+      static_cast<uint32_t>((now_ms % 1000) * kMagicNtpFractionalUnit / 1000);
+  return NtpTime(seconds, fractions);
+}
+
+int64_t SimulatedClock::CurrentNtpInMilliseconds() const {
+  return TimeInMilliseconds() + 1000 * static_cast<int64_t>(kNtpJan1970);
+}
+
+void SimulatedClock::AdvanceTimeMilliseconds(int64_t milliseconds) {
+  AdvanceTimeMicroseconds(1000 * milliseconds);
+}
+
+void SimulatedClock::AdvanceTimeMicroseconds(int64_t microseconds) {
+  WriteLockScoped synchronize(*lock_);
+  time_us_ += microseconds;
+}
+
+};  // namespace webrtc
diff --git a/system_wrappers/source/clock_unittest.cc b/system_wrappers/source/clock_unittest.cc
new file mode 100644
index 0000000..f7b0ed7
--- /dev/null
+++ b/system_wrappers/source/clock_unittest.cc
@@ -0,0 +1,34 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/clock.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+TEST(ClockTest, NtpTime) {
+  Clock* clock = Clock::GetRealTimeClock();
+
+  // To ensure the test runs correctly even on a heavily loaded system, do not
+  // compare the seconds/fractions and millisecond values directly. Instead,
+  // we check that the NTP time is between the "milliseconds" values returned
+  // right before and right after the call.
+  // The comparison includes 1 ms of margin to account for the rounding error in
+  // the conversion.
+  int64_t milliseconds_lower_bound = clock->CurrentNtpInMilliseconds();
+  NtpTime ntp_time = clock->CurrentNtpTime();
+  int64_t milliseconds_upper_bound = clock->CurrentNtpInMilliseconds();
+  EXPECT_GT(milliseconds_lower_bound / 1000, kNtpJan1970);
+  EXPECT_LE(milliseconds_lower_bound - 1, ntp_time.ToMs());
+  EXPECT_GE(milliseconds_upper_bound + 1, ntp_time.ToMs());
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/cpu_features.cc b/system_wrappers/source/cpu_features.cc
new file mode 100644
index 0000000..7417f53
--- /dev/null
+++ b/system_wrappers/source/cpu_features.cc
@@ -0,0 +1,73 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Parts of this file derived from Chromium's base/cpu.cc.
+
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+#if defined(WEBRTC_ARCH_X86_FAMILY) && defined(_MSC_VER)
+#include <intrin.h>
+#endif
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+// No CPU feature is available => straight C path.
+int GetCPUInfoNoASM(CPUFeature feature) {
+  (void)feature;
+  return 0;
+}
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+#ifndef _MSC_VER
+// Intrinsic for "cpuid".
+#if defined(__pic__) && defined(__i386__)
+static inline void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile(
+      "mov %%ebx, %%edi\n"
+      "cpuid\n"
+      "xchg %%edi, %%ebx\n"
+      : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
+        "=d"(cpu_info[3])
+      : "a"(info_type));
+}
+#else
+static inline void __cpuid(int cpu_info[4], int info_type) {
+  __asm__ volatile("cpuid\n"
+                   : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+                     "=d"(cpu_info[3])
+                   : "a"(info_type));
+}
+#endif
+#endif  // _MSC_VER
+#endif  // WEBRTC_ARCH_X86_FAMILY
+
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+// Actual feature detection for x86.
+static int GetCPUInfo(CPUFeature feature) {
+  int cpu_info[4];
+  __cpuid(cpu_info, 1);
+  if (feature == kSSE2) {
+    return 0 != (cpu_info[3] & 0x04000000);
+  }
+  if (feature == kSSE3) {
+    return 0 != (cpu_info[2] & 0x00000001);
+  }
+  return 0;
+}
+#else
+// Default to straight C for other platforms.
+static int GetCPUInfo(CPUFeature feature) {
+  (void)feature;
+  return 0;
+}
+#endif
+
+WebRtc_CPUInfo WebRtc_GetCPUInfo = GetCPUInfo;
+WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM = GetCPUInfoNoASM;
diff --git a/system_wrappers/source/cpu_features_android.c b/system_wrappers/source/cpu_features_android.c
new file mode 100644
index 0000000..0cb3a6c
--- /dev/null
+++ b/system_wrappers/source/cpu_features_android.c
@@ -0,0 +1,15 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cpu-features.h>
+
+uint64_t WebRtc_GetCPUFeaturesARM(void) {
+  return android_getCpuFeatures();
+}
diff --git a/system_wrappers/source/cpu_features_linux.c b/system_wrappers/source/cpu_features_linux.c
new file mode 100644
index 0000000..9c56450
--- /dev/null
+++ b/system_wrappers/source/cpu_features_linux.c
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <features.h>
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(a, b) 0
+#endif
+#if __GLIBC_PREREQ(2, 16)
+#include <sys/auxv.h>
+#else
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <link.h>
+#endif
+#include "system_wrappers/include/cpu_features_wrapper.h"
+
+#if defined(WEBRTC_ARCH_ARM_FAMILY)
+#include <asm/hwcap.h>
+
+uint64_t WebRtc_GetCPUFeaturesARM(void) {
+  uint64_t result = 0;
+  int architecture = 0;
+  unsigned long hwcap = 0;
+  const char* platform = NULL;
+#if __GLIBC_PREREQ(2, 16)
+  hwcap = getauxval(AT_HWCAP);
+  platform = (const char*)getauxval(AT_PLATFORM);
+#else
+  ElfW(auxv_t) auxv;
+  int fd = open("/proc/self/auxv", O_RDONLY);
+  if (fd >= 0) {
+    while (hwcap == 0 || platform == NULL) {
+      if (read(fd, &auxv, sizeof(auxv)) < (ssize_t)sizeof(auxv)) {
+        if (errno == EINTR)
+          continue;
+        break;
+      }
+      switch (auxv.a_type) {
+        case AT_HWCAP:
+          hwcap = auxv.a_un.a_val;
+          break;
+        case AT_PLATFORM:
+          platform = (const char*)auxv.a_un.a_val;
+          break;
+      }
+    }
+    close(fd);
+  }
+#endif  // __GLIBC_PREREQ(2,16)
+#if defined(__aarch64__)
+  architecture = 8;
+  if ((hwcap & HWCAP_FP) != 0)
+    result |= kCPUFeatureVFPv3;
+  if ((hwcap & HWCAP_ASIMD) != 0)
+    result |= kCPUFeatureNEON;
+#else
+  if (platform != NULL) {
+    /* expect a string in the form "v6l" or "v7l", etc.
+     */
+    if (platform[0] == 'v' && '0' <= platform[1] && platform[1] <= '9' &&
+        (platform[2] == 'l' || platform[2] == 'b')) {
+      architecture = platform[1] - '0';
+    }
+  }
+  if ((hwcap & HWCAP_VFPv3) != 0)
+    result |= kCPUFeatureVFPv3;
+  if ((hwcap & HWCAP_NEON) != 0)
+    result |= kCPUFeatureNEON;
+#endif
+  if (architecture >= 7)
+    result |= kCPUFeatureARMv7;
+  if (architecture >= 6)
+    result |= kCPUFeatureLDREXSTREX;
+  return result;
+}
+#endif  // WEBRTC_ARCH_ARM_FAMILY
diff --git a/system_wrappers/source/cpu_info.cc b/system_wrappers/source/cpu_info.cc
new file mode 100644
index 0000000..0bfe015
--- /dev/null
+++ b/system_wrappers/source/cpu_info.cc
@@ -0,0 +1,69 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/cpu_info.h"
+
+#if defined(WEBRTC_WIN)
+#include <windows.h>
+#include <winsock2.h>
+#ifndef EXCLUDE_D3D9
+#include <d3d9.h>
+#endif
+#elif defined(WEBRTC_LINUX)
+#include <unistd.h>
+#endif
+#if defined(WEBRTC_MAC)
+#include <sys/sysctl.h>
+#endif
+
+#include "rtc_base/logging.h"
+
+namespace internal {
+static int DetectNumberOfCores() {
+  // We fall back on assuming a single core in case of errors.
+  int number_of_cores = 1;
+
+#if defined(WEBRTC_WIN)
+  SYSTEM_INFO si;
+  GetNativeSystemInfo(&si);
+  number_of_cores = static_cast<int>(si.dwNumberOfProcessors);
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_ANDROID)
+  number_of_cores = static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
+#elif defined(WEBRTC_MAC)
+  int name[] = {CTL_HW, HW_AVAILCPU};
+  size_t size = sizeof(number_of_cores);
+  if (0 != sysctl(name, 2, &number_of_cores, &size, NULL, 0)) {
+    RTC_LOG(LS_ERROR) << "Failed to get number of cores";
+    number_of_cores = 1;
+  }
+#else
+  RTC_LOG(LS_ERROR) << "No function to get number of cores";
+#endif
+
+  RTC_LOG(LS_INFO) << "Available number of cores: " << number_of_cores;
+
+  return number_of_cores;
+}
+}  // namespace internal
+
+namespace webrtc {
+
+uint32_t CpuInfo::DetectNumberOfCores() {
+  // Statically cache the number of system cores available since if the process
+  // is running in a sandbox, we may only be able to read the value once (before
+  // the sandbox is initialized) and not thereafter.
+  // For more information see crbug.com/176522.
+  static uint32_t logical_cpus = 0;
+  if (!logical_cpus)
+    logical_cpus = static_cast<uint32_t>(internal::DetectNumberOfCores());
+  return logical_cpus;
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/event.cc b/system_wrappers/source/event.cc
new file mode 100644
index 0000000..aac69f6
--- /dev/null
+++ b/system_wrappers/source/event.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/event_wrapper.h"
+
+#if defined(_WIN32)
+#include <windows.h>
+#include "system_wrappers/source/event_timer_win.h"
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
+#include <ApplicationServices/ApplicationServices.h>
+#include <pthread.h>
+#include "system_wrappers/source/event_timer_posix.h"
+#else
+#include <pthread.h>
+#include "system_wrappers/source/event_timer_posix.h"
+#endif
+
+#include "rtc_base/event.h"
+
+namespace webrtc {
+
+class EventWrapperImpl : public EventWrapper {
+ public:
+  EventWrapperImpl() : event_(false, false) {}
+  ~EventWrapperImpl() override {}
+
+  bool Set() override {
+    event_.Set();
+    return true;
+  }
+
+  EventTypeWrapper Wait(unsigned long max_time) override {
+    int to_wait = max_time == WEBRTC_EVENT_INFINITE
+                      ? rtc::Event::kForever
+                      : static_cast<int>(max_time);
+    return event_.Wait(to_wait) ? kEventSignaled : kEventTimeout;
+  }
+
+ private:
+  rtc::Event event_;
+};
+
+// static
+EventWrapper* EventWrapper::Create() {
+  return new EventWrapperImpl();
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/event_timer_posix.cc b/system_wrappers/source/event_timer_posix.cc
new file mode 100644
index 0000000..e79aa99
--- /dev/null
+++ b/system_wrappers/source/event_timer_posix.cc
@@ -0,0 +1,275 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/source/event_timer_posix.h"
+
+#if defined(WEBRTC_ANDROID)
+#include <android/api-level.h>
+#endif
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "rtc_base/checks.h"
+
+#if defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+// Chromium build is always defining this macro if __ANDROID_API__ < 20.
+#undef HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+#endif
+
+#if defined(WEBRTC_ANDROID) && defined(__ANDROID_API__)
+#define HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC (__ANDROID_API__ < 21)
+#else
+#define HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC 0
+#endif
+
+namespace webrtc {
+
+// static
+EventTimerWrapper* EventTimerWrapper::Create() {
+  return new EventTimerPosix();
+}
+
+const int64_t kNanosecondsPerMillisecond = 1000000;
+const int64_t kNanosecondsPerSecond = 1000000000;
+
+EventTimerPosix::EventTimerPosix()
+    : event_set_(false),
+      timer_thread_(nullptr),
+      created_at_(),
+      periodic_(false),
+      time_ms_(0),
+      count_(0),
+      is_stopping_(false) {
+  pthread_mutexattr_t attr;
+  pthread_mutexattr_init(&attr);
+  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+  pthread_mutex_init(&mutex_, &attr);
+  pthread_condattr_t cond_attr;
+  pthread_condattr_init(&cond_attr);
+// TODO(sprang): Remove HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC special case once
+// all supported Android platforms support pthread_condattr_setclock.
+// TODO(sprang): Add support for monotonic clock on Apple platforms.
+#if !(defined(WEBRTC_MAC) || defined(WEBRTC_IOS)) && \
+    !(defined(WEBRTC_ANDROID) && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+  pthread_condattr_setclock(&cond_attr, CLOCK_MONOTONIC);
+#endif
+  pthread_cond_init(&cond_, &cond_attr);
+  pthread_condattr_destroy(&cond_attr);
+}
+
+EventTimerPosix::~EventTimerPosix() {
+  StopTimer();
+  pthread_cond_destroy(&cond_);
+  pthread_mutex_destroy(&mutex_);
+}
+
+// TODO(pbos): Make this void.
+bool EventTimerPosix::Set() {
+  RTC_CHECK_EQ(0, pthread_mutex_lock(&mutex_));
+  event_set_ = true;
+  pthread_cond_signal(&cond_);
+  pthread_mutex_unlock(&mutex_);
+  return true;
+}
+
+EventTypeWrapper EventTimerPosix::Wait(unsigned long timeout_ms) {
+  int ret_val = 0;
+  RTC_CHECK_EQ(0, pthread_mutex_lock(&mutex_));
+
+  if (!event_set_) {
+    if (WEBRTC_EVENT_INFINITE != timeout_ms) {
+      timespec end_at;
+#ifndef WEBRTC_MAC
+      clock_gettime(CLOCK_MONOTONIC, &end_at);
+#else
+      timeval value;
+      struct timezone time_zone;
+      time_zone.tz_minuteswest = 0;
+      time_zone.tz_dsttime = 0;
+      gettimeofday(&value, &time_zone);
+      TIMEVAL_TO_TIMESPEC(&value, &end_at);
+#endif
+      end_at.tv_sec += timeout_ms / 1000;
+      end_at.tv_nsec += (timeout_ms % 1000) * kNanosecondsPerMillisecond;
+
+      if (end_at.tv_nsec >= kNanosecondsPerSecond) {
+        end_at.tv_sec++;
+        end_at.tv_nsec -= kNanosecondsPerSecond;
+      }
+      while (ret_val == 0 && !event_set_) {
+#if defined(WEBRTC_ANDROID) && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+        ret_val = pthread_cond_timedwait_monotonic_np(&cond_, &mutex_, &end_at);
+#else
+        ret_val = pthread_cond_timedwait(&cond_, &mutex_, &end_at);
+#endif  // WEBRTC_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+      }
+    } else {
+      while (ret_val == 0 && !event_set_)
+        ret_val = pthread_cond_wait(&cond_, &mutex_);
+    }
+  }
+
+  RTC_DCHECK(ret_val == 0 || ret_val == ETIMEDOUT);
+
+  // Reset and signal if set, regardless of why the thread woke up.
+  if (event_set_) {
+    ret_val = 0;
+    event_set_ = false;
+  }
+  pthread_mutex_unlock(&mutex_);
+
+  return ret_val == 0 ? kEventSignaled : kEventTimeout;
+}
+
+EventTypeWrapper EventTimerPosix::Wait(timespec* end_at, bool reset_event) {
+  int ret_val = 0;
+  RTC_CHECK_EQ(0, pthread_mutex_lock(&mutex_));
+  if (reset_event) {
+    // Only wake for new events or timeouts.
+    event_set_ = false;
+  }
+
+  while (ret_val == 0 && !event_set_) {
+#if defined(WEBRTC_ANDROID) && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+    ret_val = pthread_cond_timedwait_monotonic_np(&cond_, &mutex_, end_at);
+#else
+    ret_val = pthread_cond_timedwait(&cond_, &mutex_, end_at);
+#endif  // WEBRTC_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+  }
+
+  RTC_DCHECK(ret_val == 0 || ret_val == ETIMEDOUT);
+
+  // Reset and signal if set, regardless of why the thread woke up.
+  if (event_set_) {
+    ret_val = 0;
+    event_set_ = false;
+  }
+  pthread_mutex_unlock(&mutex_);
+
+  return ret_val == 0 ? kEventSignaled : kEventTimeout;
+}
+
+rtc::PlatformThread* EventTimerPosix::CreateThread() {
+  const char* kThreadName = "WebRtc_event_timer_thread";
+  return new rtc::PlatformThread(Run, this, kThreadName);
+}
+
+bool EventTimerPosix::StartTimer(bool periodic, unsigned long time_ms) {
+  pthread_mutex_lock(&mutex_);
+  if (timer_thread_) {
+    if (periodic_) {
+      // Timer already started.
+      pthread_mutex_unlock(&mutex_);
+      return false;
+    } else {
+      // New one shot timer.
+      time_ms_ = time_ms;
+      created_at_.tv_sec = 0;
+      timer_event_->Set();
+      pthread_mutex_unlock(&mutex_);
+      return true;
+    }
+  }
+
+  // Start the timer thread.
+  timer_event_.reset(new EventTimerPosix());
+  timer_thread_.reset(CreateThread());
+  periodic_ = periodic;
+  time_ms_ = time_ms;
+  timer_thread_->Start();
+  timer_thread_->SetPriority(rtc::kRealtimePriority);
+  pthread_mutex_unlock(&mutex_);
+
+  return true;
+}
+
+bool EventTimerPosix::Run(void* obj) {
+  return static_cast<EventTimerPosix*>(obj)->Process();
+}
+
+bool EventTimerPosix::Process() {
+  pthread_mutex_lock(&mutex_);
+  if (is_stopping_) {
+    pthread_mutex_unlock(&mutex_);
+    return false;
+  }
+  if (created_at_.tv_sec == 0) {
+#ifndef WEBRTC_MAC
+    RTC_CHECK_EQ(0, clock_gettime(CLOCK_MONOTONIC, &created_at_));
+#else
+    timeval value;
+    struct timezone time_zone;
+    time_zone.tz_minuteswest = 0;
+    time_zone.tz_dsttime = 0;
+    gettimeofday(&value, &time_zone);
+    TIMEVAL_TO_TIMESPEC(&value, &created_at_);
+#endif
+    count_ = 0;
+  }
+
+  timespec end_at;
+  unsigned long long total_delta_ms = time_ms_ * ++count_;
+  if (!periodic_ && count_ >= 1) {
+    // No need to wake up often if we're not going to signal waiting threads.
+    total_delta_ms =
+        std::min<uint64_t>(total_delta_ms, 60 * kNanosecondsPerSecond);
+  }
+
+  end_at.tv_sec = created_at_.tv_sec + total_delta_ms / 1000;
+  end_at.tv_nsec = created_at_.tv_nsec +
+                   (total_delta_ms % 1000) * kNanosecondsPerMillisecond;
+
+  if (end_at.tv_nsec >= kNanosecondsPerSecond) {
+    end_at.tv_sec++;
+    end_at.tv_nsec -= kNanosecondsPerSecond;
+  }
+
+  pthread_mutex_unlock(&mutex_);
+  // Reset event on first call so that we don't immediately return here if this
+  // thread was not blocked on timer_event_->Wait when the StartTimer() call
+  // was made.
+  if (timer_event_->Wait(&end_at, count_ == 1) == kEventSignaled)
+    return true;
+
+  pthread_mutex_lock(&mutex_);
+  if (periodic_ || count_ == 1)
+    Set();
+  pthread_mutex_unlock(&mutex_);
+
+  return true;
+}
+
+bool EventTimerPosix::StopTimer() {
+  pthread_mutex_lock(&mutex_);
+  is_stopping_ = true;
+  pthread_mutex_unlock(&mutex_);
+
+  if (timer_event_)
+    timer_event_->Set();
+
+  if (timer_thread_) {
+    timer_thread_->Stop();
+    timer_thread_.reset();
+  }
+  timer_event_.reset();
+
+  // Set time to zero to force new reference time for the timer.
+  memset(&created_at_, 0, sizeof(created_at_));
+  count_ = 0;
+  return true;
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/event_timer_posix.h b/system_wrappers/source/event_timer_posix.h
new file mode 100644
index 0000000..72d6753
--- /dev/null
+++ b/system_wrappers/source/event_timer_posix.h
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_SOURCE_EVENT_POSIX_H_
+#define SYSTEM_WRAPPERS_SOURCE_EVENT_POSIX_H_
+
+#include "system_wrappers/include/event_wrapper.h"
+
+#include <memory>
+
+#include <pthread.h>
+#include <time.h>
+
+#include "rtc_base/platform_thread.h"
+
+namespace webrtc {
+
+enum State { kUp = 1, kDown = 2 };
+
+class EventTimerPosix : public EventTimerWrapper {
+ public:
+  EventTimerPosix();
+  ~EventTimerPosix() override;
+
+  EventTypeWrapper Wait(unsigned long max_time) override;
+  bool Set() override;
+
+  bool StartTimer(bool periodic, unsigned long time) override;
+  bool StopTimer() override;
+
+ private:
+  friend class EventTimerPosixTest;
+
+  static bool Run(void* obj);
+  bool Process();
+  EventTypeWrapper Wait(timespec* end_at, bool reset_state);
+
+  virtual rtc::PlatformThread* CreateThread();
+
+  pthread_cond_t cond_;
+  pthread_mutex_t mutex_;
+  bool event_set_;
+
+  // TODO(pbos): Remove unique_ptr and use PlatformThread directly.
+  std::unique_ptr<rtc::PlatformThread> timer_thread_;
+  std::unique_ptr<EventTimerPosix> timer_event_;
+  timespec created_at_;
+
+  bool periodic_;
+  unsigned long time_ms_;
+  unsigned long count_;
+  bool is_stopping_;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_SOURCE_EVENT_POSIX_H_
diff --git a/system_wrappers/source/event_timer_posix_unittest.cc b/system_wrappers/source/event_timer_posix_unittest.cc
new file mode 100644
index 0000000..e0c5cbc
--- /dev/null
+++ b/system_wrappers/source/event_timer_posix_unittest.cc
@@ -0,0 +1,198 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/source/event_timer_posix.h"
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+enum class ThreadState {
+  kNotStarted,
+  kWaiting,
+  kRequestProcessCall,
+  kCallingProcess,
+  kProcessDone,
+  kContinue,
+  kExiting,
+  kDead
+};
+
+class EventTimerPosixTest : public testing::Test, public EventTimerPosix {
+ public:
+  EventTimerPosixTest()
+      : thread_state_(ThreadState::kNotStarted),
+        process_event_(false, true),
+        main_event_(false, true),
+        process_thread_id_(0),
+        process_thread_(nullptr) {}
+  virtual ~EventTimerPosixTest() {}
+
+  rtc::PlatformThread* CreateThread() override {
+    EXPECT_TRUE(process_thread_ == nullptr);
+    process_thread_ =
+        new rtc::PlatformThread(Run, this, "EventTimerPosixTestThread");
+    return process_thread_;
+  }
+
+  static bool Run(void* obj) {
+    return static_cast<EventTimerPosixTest*>(obj)->Process();
+  }
+
+  bool Process() {
+    bool res = ProcessInternal();
+    if (!res) {
+      rtc::CritScope cs(&lock_);
+      thread_state_ = ThreadState::kDead;
+      main_event_.Set();
+    }
+    return res;
+  }
+
+  bool ProcessInternal() {
+    {
+      rtc::CritScope cs(&lock_);
+      if (thread_state_ == ThreadState::kNotStarted) {
+        if (!ChangeThreadState(ThreadState::kNotStarted,
+                               ThreadState::kContinue)) {
+          ADD_FAILURE() << "Unable to start process thread";
+          return false;
+        }
+        process_thread_id_ = rtc::CurrentThreadId();
+      }
+    }
+
+    if (!ChangeThreadState(ThreadState::kContinue, ThreadState::kWaiting))
+      return false;
+
+    if (!AwaitThreadState(ThreadState::kRequestProcessCall,
+                          rtc::Event::kForever))
+      return false;
+
+    if (!ChangeThreadState(ThreadState::kRequestProcessCall,
+                           ThreadState::kCallingProcess))
+      return false;
+
+    EventTimerPosix::Process();
+
+    if (!ChangeThreadState(ThreadState::kCallingProcess,
+                           ThreadState::kProcessDone))
+      return false;
+
+    if (!AwaitThreadState(ThreadState::kContinue, rtc::Event::kForever))
+      return false;
+
+    return true;
+  }
+
+  bool IsProcessThread() {
+    rtc::CritScope cs(&lock_);
+    return process_thread_id_ == rtc::CurrentThreadId();
+  }
+
+  bool ChangeThreadState(ThreadState prev_state, ThreadState new_state) {
+    rtc::CritScope cs(&lock_);
+    if (thread_state_ != prev_state)
+      return false;
+    thread_state_ = new_state;
+    if (IsProcessThread()) {
+      main_event_.Set();
+    } else {
+      process_event_.Set();
+    }
+    return true;
+  }
+
+  bool AwaitThreadState(ThreadState state, int timeout) {
+    rtc::Event* event = IsProcessThread() ? &process_event_ : &main_event_;
+    do {
+      rtc::CritScope cs(&lock_);
+      if (state != ThreadState::kDead && thread_state_ == ThreadState::kExiting)
+        return false;
+      if (thread_state_ == state)
+        return true;
+    } while (event->Wait(timeout));
+    return false;
+  }
+
+  bool CallProcess(int timeout_ms) {
+    return AwaitThreadState(ThreadState::kWaiting, timeout_ms) &&
+           ChangeThreadState(ThreadState::kWaiting,
+                             ThreadState::kRequestProcessCall);
+  }
+
+  bool AwaitProcessDone(int timeout_ms) {
+    return AwaitThreadState(ThreadState::kProcessDone, timeout_ms) &&
+           ChangeThreadState(ThreadState::kProcessDone, ThreadState::kContinue);
+  }
+
+  void TearDown() override {
+    if (process_thread_) {
+      {
+        rtc::CritScope cs(&lock_);
+        if (thread_state_ != ThreadState::kDead) {
+          thread_state_ = ThreadState::kExiting;
+          process_event_.Set();
+        }
+      }
+      ASSERT_TRUE(AwaitThreadState(ThreadState::kDead, 5000));
+    }
+  }
+
+  ThreadState thread_state_;
+  rtc::CriticalSection lock_;
+  rtc::Event process_event_;
+  rtc::Event main_event_;
+  rtc::PlatformThreadId process_thread_id_;
+  rtc::PlatformThread* process_thread_;
+};
+
+TEST_F(EventTimerPosixTest, WaiterBlocksUntilTimeout) {
+  const int kTimerIntervalMs = 100;
+  const int kTimeoutMs = 5000;
+  ASSERT_TRUE(StartTimer(false, kTimerIntervalMs));
+  ASSERT_TRUE(CallProcess(kTimeoutMs));
+  EventTypeWrapper res = Wait(kTimeoutMs);
+  EXPECT_EQ(kEventSignaled, res);
+  ASSERT_TRUE(AwaitProcessDone(kTimeoutMs));
+}
+
+TEST_F(EventTimerPosixTest, WaiterWakesImmediatelyAfterTimeout) {
+  const int kTimerIntervalMs = 100;
+  const int kTimeoutMs = 5000;
+  ASSERT_TRUE(StartTimer(false, kTimerIntervalMs));
+  ASSERT_TRUE(CallProcess(kTimeoutMs));
+  ASSERT_TRUE(AwaitProcessDone(kTimeoutMs));
+  EventTypeWrapper res = Wait(0);
+  EXPECT_EQ(kEventSignaled, res);
+}
+
+TEST_F(EventTimerPosixTest, WaiterBlocksUntilTimeoutProcessInactiveOnStart) {
+  const int kTimerIntervalMs = 100;
+  const int kTimeoutMs = 5000;
+  // First call to StartTimer initializes thread.
+  ASSERT_TRUE(StartTimer(false, kTimerIntervalMs));
+
+  // Process thread currently _not_ blocking on Process() call.
+  ASSERT_TRUE(AwaitThreadState(ThreadState::kWaiting, kTimeoutMs));
+
+  // Start new one-off timer, then call Process().
+  ASSERT_TRUE(StartTimer(false, kTimerIntervalMs));
+  ASSERT_TRUE(CallProcess(kTimeoutMs));
+
+  EventTypeWrapper res = Wait(kTimeoutMs);
+  EXPECT_EQ(kEventSignaled, res);
+
+  ASSERT_TRUE(AwaitProcessDone(kTimeoutMs));
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/event_timer_win.cc b/system_wrappers/source/event_timer_win.cc
new file mode 100644
index 0000000..b6a93fe
--- /dev/null
+++ b/system_wrappers/source/event_timer_win.cc
@@ -0,0 +1,77 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/source/event_timer_win.h"
+
+#include "Mmsystem.h"
+
+namespace webrtc {
+
+// static
+EventTimerWrapper* EventTimerWrapper::Create() {
+  return new EventTimerWin();
+}
+
+EventTimerWin::EventTimerWin()
+    : event_(::CreateEvent(NULL,    // security attributes
+                           FALSE,   // manual reset
+                           FALSE,   // initial state
+                           NULL)),  // name of event
+      timerID_(NULL) {}
+
+EventTimerWin::~EventTimerWin() {
+  StopTimer();
+  CloseHandle(event_);
+}
+
+bool EventTimerWin::Set() {
+  // Note: setting an event that is already set has no effect.
+  return SetEvent(event_) == 1;
+}
+
+EventTypeWrapper EventTimerWin::Wait(unsigned long max_time) {
+  unsigned long res = WaitForSingleObject(event_, max_time);
+  switch (res) {
+    case WAIT_OBJECT_0:
+      return kEventSignaled;
+    case WAIT_TIMEOUT:
+      return kEventTimeout;
+    default:
+      return kEventError;
+  }
+}
+
+bool EventTimerWin::StartTimer(bool periodic, unsigned long time) {
+  if (timerID_ != NULL) {
+    timeKillEvent(timerID_);
+    timerID_ = NULL;
+  }
+
+  if (periodic) {
+    timerID_ = timeSetEvent(time, 0, (LPTIMECALLBACK)HANDLE(event_), 0,
+                            TIME_PERIODIC | TIME_CALLBACK_EVENT_PULSE);
+  } else {
+    timerID_ = timeSetEvent(time, 0, (LPTIMECALLBACK)HANDLE(event_), 0,
+                            TIME_ONESHOT | TIME_CALLBACK_EVENT_SET);
+  }
+
+  return timerID_ != NULL;
+}
+
+bool EventTimerWin::StopTimer() {
+  if (timerID_ != NULL) {
+    timeKillEvent(timerID_);
+    timerID_ = NULL;
+  }
+
+  return true;
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/event_timer_win.h b/system_wrappers/source/event_timer_win.h
new file mode 100644
index 0000000..5631a3f
--- /dev/null
+++ b/system_wrappers/source/event_timer_win.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_SOURCE_EVENT_WIN_H_
+#define SYSTEM_WRAPPERS_SOURCE_EVENT_WIN_H_
+
+#include <windows.h>
+
+#include "system_wrappers/include/event_wrapper.h"
+
+#include "typedefs.h"  // NOLINT(build/include)
+
+namespace webrtc {
+
+class EventTimerWin : public EventTimerWrapper {
+ public:
+  EventTimerWin();
+  virtual ~EventTimerWin();
+
+  virtual EventTypeWrapper Wait(unsigned long max_time);
+  virtual bool Set();
+
+  virtual bool StartTimer(bool periodic, unsigned long time);
+  virtual bool StopTimer();
+
+ private:
+  HANDLE event_;
+  uint32_t timerID_;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_SOURCE_EVENT_WIN_H_
diff --git a/system_wrappers/source/field_trial_default.cc b/system_wrappers/source/field_trial_default.cc
new file mode 100644
index 0000000..e8d8917
--- /dev/null
+++ b/system_wrappers/source/field_trial_default.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS.  All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+
+#include "system_wrappers/include/field_trial_default.h"
+#include "system_wrappers/include/field_trial.h"
+
+#include <string>
+
+// Simple field trial implementation, which allows client to
+// specify desired flags in InitFieldTrialsFromString.
+namespace webrtc {
+namespace field_trial {
+
+static const char* trials_init_string = NULL;
+
+std::string FindFullName(const std::string& name) {
+  if (trials_init_string == NULL)
+    return std::string();
+
+  std::string trials_string(trials_init_string);
+  if (trials_string.empty())
+    return std::string();
+
+  static const char kPersistentStringSeparator = '/';
+  size_t next_item = 0;
+  while (next_item < trials_string.length()) {
+    // Find next name/value pair in field trial configuration string.
+    size_t field_name_end =
+        trials_string.find(kPersistentStringSeparator, next_item);
+    if (field_name_end == trials_string.npos || field_name_end == next_item)
+      break;
+    size_t field_value_end =
+        trials_string.find(kPersistentStringSeparator, field_name_end + 1);
+    if (field_value_end == trials_string.npos ||
+        field_value_end == field_name_end + 1)
+      break;
+    std::string field_name(trials_string, next_item,
+                           field_name_end - next_item);
+    std::string field_value(trials_string, field_name_end + 1,
+                            field_value_end - field_name_end - 1);
+    next_item = field_value_end + 1;
+
+    if (name == field_name)
+      return field_value;
+  }
+  return std::string();
+}
+
+// Optionally initialize field trial from a string.
+void InitFieldTrialsFromString(const char* trials_string) {
+  trials_init_string = trials_string;
+}
+
+const char* GetFieldTrialString() {
+  return trials_init_string;
+}
+
+}  // namespace field_trial
+}  // namespace webrtc
diff --git a/system_wrappers/source/file_impl.cc b/system_wrappers/source/file_impl.cc
new file mode 100644
index 0000000..350aaeb
--- /dev/null
+++ b/system_wrappers/source/file_impl.cc
@@ -0,0 +1,152 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/file_wrapper.h"
+
+#ifdef _WIN32
+#include <Windows.h>
+#else
+#include <stdarg.h>
+#include <string.h>
+#endif
+
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+namespace {
+FILE* FileOpen(const char* file_name_utf8, bool read_only) {
+#if defined(_WIN32)
+  int len = MultiByteToWideChar(CP_UTF8, 0, file_name_utf8, -1, nullptr, 0);
+  std::wstring wstr(len, 0);
+  MultiByteToWideChar(CP_UTF8, 0, file_name_utf8, -1, &wstr[0], len);
+  FILE* file = _wfopen(wstr.c_str(), read_only ? L"rb" : L"wb");
+#else
+  FILE* file = fopen(file_name_utf8, read_only ? "rb" : "wb");
+#endif
+  return file;
+}
+}  // namespace
+
+// static
+FileWrapper* FileWrapper::Create() {
+  return new FileWrapper();
+}
+
+// static
+FileWrapper FileWrapper::Open(const char* file_name_utf8, bool read_only) {
+  return FileWrapper(FileOpen(file_name_utf8, read_only), 0);
+}
+
+FileWrapper::FileWrapper() {}
+
+FileWrapper::FileWrapper(FILE* file, size_t max_size)
+    : file_(file), max_size_in_bytes_(max_size) {}
+
+FileWrapper::~FileWrapper() {
+  CloseFileImpl();
+}
+
+FileWrapper::FileWrapper(FileWrapper&& other) {
+  operator=(std::move(other));
+}
+
+FileWrapper& FileWrapper::operator=(FileWrapper&& other) {
+  file_ = other.file_;
+  max_size_in_bytes_ = other.max_size_in_bytes_;
+  position_ = other.position_;
+  other.file_ = nullptr;
+  return *this;
+}
+
+void FileWrapper::CloseFile() {
+  rtc::CritScope lock(&lock_);
+  CloseFileImpl();
+}
+
+int FileWrapper::Rewind() {
+  rtc::CritScope lock(&lock_);
+  if (file_ != nullptr) {
+    position_ = 0;
+    return fseek(file_, 0, SEEK_SET);
+  }
+  return -1;
+}
+
+void FileWrapper::SetMaxFileSize(size_t bytes) {
+  rtc::CritScope lock(&lock_);
+  max_size_in_bytes_ = bytes;
+}
+
+int FileWrapper::Flush() {
+  rtc::CritScope lock(&lock_);
+  return FlushImpl();
+}
+
+bool FileWrapper::OpenFile(const char* file_name_utf8, bool read_only) {
+  size_t length = strlen(file_name_utf8);
+  if (length > kMaxFileNameSize - 1)
+    return false;
+
+  rtc::CritScope lock(&lock_);
+  if (file_ != nullptr)
+    return false;
+
+  file_ = FileOpen(file_name_utf8, read_only);
+  return file_ != nullptr;
+}
+
+bool FileWrapper::OpenFromFileHandle(FILE* handle) {
+  if (!handle)
+    return false;
+  rtc::CritScope lock(&lock_);
+  CloseFileImpl();
+  file_ = handle;
+  return true;
+}
+
+int FileWrapper::Read(void* buf, size_t length) {
+  rtc::CritScope lock(&lock_);
+  if (file_ == nullptr)
+    return -1;
+
+  size_t bytes_read = fread(buf, 1, length, file_);
+  return static_cast<int>(bytes_read);
+}
+
+bool FileWrapper::Write(const void* buf, size_t length) {
+  if (buf == nullptr)
+    return false;
+
+  rtc::CritScope lock(&lock_);
+
+  if (file_ == nullptr)
+    return false;
+
+  // Check if it's time to stop writing.
+  if (max_size_in_bytes_ > 0 && (position_ + length) > max_size_in_bytes_)
+    return false;
+
+  size_t num_bytes = fwrite(buf, 1, length, file_);
+  position_ += num_bytes;
+
+  return num_bytes == length;
+}
+
+void FileWrapper::CloseFileImpl() {
+  if (file_ != nullptr)
+    fclose(file_);
+  file_ = nullptr;
+}
+
+int FileWrapper::FlushImpl() {
+  return (file_ != nullptr) ? fflush(file_) : -1;
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/metrics_default.cc b/system_wrappers/source/metrics_default.cc
new file mode 100644
index 0000000..fbb2956
--- /dev/null
+++ b/system_wrappers/source/metrics_default.cc
@@ -0,0 +1,303 @@
+// Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS.  All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+//
+
+#include "system_wrappers/include/metrics_default.h"
+
+#include <algorithm>
+
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/thread_annotations.h"
+#include "system_wrappers/include/metrics.h"
+
+// Default implementation of histogram methods for WebRTC clients that do not
+// want to provide their own implementation.
+
+namespace webrtc {
+namespace metrics {
+class Histogram;
+
+namespace {
+// Limit for the maximum number of sample values that can be stored.
+// TODO(asapersson): Consider using bucket count (and set up
+// linearly/exponentially spaced buckets) if samples are logged more frequently.
+const int kMaxSampleMapSize = 300;
+
+class RtcHistogram {
+ public:
+  RtcHistogram(const std::string& name, int min, int max, int bucket_count)
+      : min_(min), max_(max), info_(name, min, max, bucket_count) {
+    RTC_DCHECK_GT(bucket_count, 0);
+  }
+
+  void Add(int sample) {
+    sample = std::min(sample, max_);
+    sample = std::max(sample, min_ - 1);  // Underflow bucket.
+
+    rtc::CritScope cs(&crit_);
+    if (info_.samples.size() == kMaxSampleMapSize &&
+        info_.samples.find(sample) == info_.samples.end()) {
+      return;
+    }
+    ++info_.samples[sample];
+  }
+
+  // Returns a copy (or nullptr if there are no samples) and clears samples.
+  std::unique_ptr<SampleInfo> GetAndReset() {
+    rtc::CritScope cs(&crit_);
+    if (info_.samples.empty())
+      return nullptr;
+
+    SampleInfo* copy =
+        new SampleInfo(info_.name, info_.min, info_.max, info_.bucket_count);
+
+    std::swap(info_.samples, copy->samples);
+
+    return std::unique_ptr<SampleInfo>(copy);
+  }
+
+  const std::string& name() const { return info_.name; }
+
+  // Functions only for testing.
+  void Reset() {
+    rtc::CritScope cs(&crit_);
+    info_.samples.clear();
+  }
+
+  int NumEvents(int sample) const {
+    rtc::CritScope cs(&crit_);
+    const auto it = info_.samples.find(sample);
+    return (it == info_.samples.end()) ? 0 : it->second;
+  }
+
+  int NumSamples() const {
+    int num_samples = 0;
+    rtc::CritScope cs(&crit_);
+    for (const auto& sample : info_.samples) {
+      num_samples += sample.second;
+    }
+    return num_samples;
+  }
+
+  int MinSample() const {
+    rtc::CritScope cs(&crit_);
+    return (info_.samples.empty()) ? -1 : info_.samples.begin()->first;
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  const int min_;
+  const int max_;
+  SampleInfo info_ RTC_GUARDED_BY(crit_);
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtcHistogram);
+};
+
+class RtcHistogramMap {
+ public:
+  RtcHistogramMap() {}
+  ~RtcHistogramMap() {}
+
+  Histogram* GetCountsHistogram(const std::string& name,
+                                int min,
+                                int max,
+                                int bucket_count) {
+    rtc::CritScope cs(&crit_);
+    const auto& it = map_.find(name);
+    if (it != map_.end())
+      return reinterpret_cast<Histogram*>(it->second.get());
+
+    RtcHistogram* hist = new RtcHistogram(name, min, max, bucket_count);
+    map_[name].reset(hist);
+    return reinterpret_cast<Histogram*>(hist);
+  }
+
+  Histogram* GetEnumerationHistogram(const std::string& name, int boundary) {
+    rtc::CritScope cs(&crit_);
+    const auto& it = map_.find(name);
+    if (it != map_.end())
+      return reinterpret_cast<Histogram*>(it->second.get());
+
+    RtcHistogram* hist = new RtcHistogram(name, 1, boundary, boundary + 1);
+    map_[name].reset(hist);
+    return reinterpret_cast<Histogram*>(hist);
+  }
+
+  void GetAndReset(
+      std::map<std::string, std::unique_ptr<SampleInfo>>* histograms) {
+    rtc::CritScope cs(&crit_);
+    for (const auto& kv : map_) {
+      std::unique_ptr<SampleInfo> info = kv.second->GetAndReset();
+      if (info)
+        histograms->insert(std::make_pair(kv.first, std::move(info)));
+    }
+  }
+
+  // Functions only for testing.
+  void Reset() {
+    rtc::CritScope cs(&crit_);
+    for (const auto& kv : map_)
+      kv.second->Reset();
+  }
+
+  int NumEvents(const std::string& name, int sample) const {
+    rtc::CritScope cs(&crit_);
+    const auto& it = map_.find(name);
+    return (it == map_.end()) ? 0 : it->second->NumEvents(sample);
+  }
+
+  int NumSamples(const std::string& name) const {
+    rtc::CritScope cs(&crit_);
+    const auto& it = map_.find(name);
+    return (it == map_.end()) ? 0 : it->second->NumSamples();
+  }
+
+  int MinSample(const std::string& name) const {
+    rtc::CritScope cs(&crit_);
+    const auto& it = map_.find(name);
+    return (it == map_.end()) ? -1 : it->second->MinSample();
+  }
+
+ private:
+  rtc::CriticalSection crit_;
+  std::map<std::string, std::unique_ptr<RtcHistogram>> map_
+      RTC_GUARDED_BY(crit_);
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RtcHistogramMap);
+};
+
+// RtcHistogramMap is allocated upon call to Enable().
+// The histogram getter functions, which return pointer values to the histograms
+// in the map, are cached in WebRTC. Therefore, this memory is not freed by the
+// application (the memory will be reclaimed by the OS).
+static RtcHistogramMap* volatile g_rtc_histogram_map = nullptr;
+
+void CreateMap() {
+  RtcHistogramMap* map = rtc::AtomicOps::AcquireLoadPtr(&g_rtc_histogram_map);
+  if (map == nullptr) {
+    RtcHistogramMap* new_map = new RtcHistogramMap();
+    RtcHistogramMap* old_map = rtc::AtomicOps::CompareAndSwapPtr(
+        &g_rtc_histogram_map, static_cast<RtcHistogramMap*>(nullptr), new_map);
+    if (old_map != nullptr)
+      delete new_map;
+  }
+}
+
+// Set the first time we start using histograms. Used to make sure Enable() is
+// not called thereafter.
+#if RTC_DCHECK_IS_ON
+static volatile int g_rtc_histogram_called = 0;
+#endif
+
+// Gets the map (or nullptr).
+RtcHistogramMap* GetMap() {
+#if RTC_DCHECK_IS_ON
+  rtc::AtomicOps::ReleaseStore(&g_rtc_histogram_called, 1);
+#endif
+  return g_rtc_histogram_map;
+}
+}  // namespace
+
+// Implementation of histogram methods in
+// webrtc/system_wrappers/interface/metrics.h.
+
+// Histogram with exponentially spaced buckets.
+// Creates (or finds) histogram.
+// The returned histogram pointer is cached (and used for adding samples in
+// subsequent calls).
+Histogram* HistogramFactoryGetCounts(const std::string& name,
+                                     int min,
+                                     int max,
+                                     int bucket_count) {
+  // TODO(asapersson): Alternative implementation will be needed if this
+  // histogram type should be truly exponential.
+  return HistogramFactoryGetCountsLinear(name, min, max, bucket_count);
+}
+
+// Histogram with linearly spaced buckets.
+// Creates (or finds) histogram.
+// The returned histogram pointer is cached (and used for adding samples in
+// subsequent calls).
+Histogram* HistogramFactoryGetCountsLinear(const std::string& name,
+                                           int min,
+                                           int max,
+                                           int bucket_count) {
+  RtcHistogramMap* map = GetMap();
+  if (!map)
+    return nullptr;
+
+  return map->GetCountsHistogram(name, min, max, bucket_count);
+}
+
+// Histogram with linearly spaced buckets.
+// Creates (or finds) histogram.
+// The returned histogram pointer is cached (and used for adding samples in
+// subsequent calls).
+Histogram* HistogramFactoryGetEnumeration(const std::string& name,
+                                          int boundary) {
+  RtcHistogramMap* map = GetMap();
+  if (!map)
+    return nullptr;
+
+  return map->GetEnumerationHistogram(name, boundary);
+}
+
+// Fast path. Adds |sample| to cached |histogram_pointer|.
+void HistogramAdd(Histogram* histogram_pointer, int sample) {
+  RtcHistogram* ptr = reinterpret_cast<RtcHistogram*>(histogram_pointer);
+  ptr->Add(sample);
+}
+
+SampleInfo::SampleInfo(const std::string& name,
+                       int min,
+                       int max,
+                       size_t bucket_count)
+    : name(name), min(min), max(max), bucket_count(bucket_count) {}
+
+SampleInfo::~SampleInfo() {}
+
+// Implementation of global functions in metrics_default.h.
+void Enable() {
+  RTC_DCHECK(g_rtc_histogram_map == nullptr);
+#if RTC_DCHECK_IS_ON
+  RTC_DCHECK_EQ(0, rtc::AtomicOps::AcquireLoad(&g_rtc_histogram_called));
+#endif
+  CreateMap();
+}
+
+void GetAndReset(
+    std::map<std::string, std::unique_ptr<SampleInfo>>* histograms) {
+  histograms->clear();
+  RtcHistogramMap* map = GetMap();
+  if (map)
+    map->GetAndReset(histograms);
+}
+
+void Reset() {
+  RtcHistogramMap* map = GetMap();
+  if (map)
+    map->Reset();
+}
+
+int NumEvents(const std::string& name, int sample) {
+  RtcHistogramMap* map = GetMap();
+  return map ? map->NumEvents(name, sample) : 0;
+}
+
+int NumSamples(const std::string& name) {
+  RtcHistogramMap* map = GetMap();
+  return map ? map->NumSamples(name) : 0;
+}
+
+int MinSample(const std::string& name) {
+  RtcHistogramMap* map = GetMap();
+  return map ? map->MinSample(name) : -1;
+}
+
+}  // namespace metrics
+}  // namespace webrtc
diff --git a/system_wrappers/source/metrics_default_unittest.cc b/system_wrappers/source/metrics_default_unittest.cc
new file mode 100644
index 0000000..fa253a9
--- /dev/null
+++ b/system_wrappers/source/metrics_default_unittest.cc
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/metrics_default.h"
+#include "system_wrappers/include/metrics.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+const int kSample = 22;
+const char kName[] = "Name";
+
+int NumSamples(
+    const std::string& name,
+    const std::map<std::string, std::unique_ptr<metrics::SampleInfo>>&
+        histograms) {
+  const auto it = histograms.find(name);
+  if (it == histograms.end())
+    return 0;
+
+  int num_samples = 0;
+  for (const auto& sample : it->second->samples)
+    num_samples += sample.second;
+
+  return num_samples;
+}
+
+int NumEvents(const std::string& name,
+              int sample,
+              const std::map<std::string, std::unique_ptr<metrics::SampleInfo>>&
+                  histograms) {
+  const auto it = histograms.find(name);
+  if (it == histograms.end())
+    return 0;
+
+  const auto it_sample = it->second->samples.find(sample);
+  if (it_sample == it->second->samples.end())
+    return 0;
+
+  return it_sample->second;
+}
+}  // namespace
+
+class MetricsDefaultTest : public ::testing::Test {
+ public:
+  MetricsDefaultTest() {}
+
+ protected:
+  virtual void SetUp() { metrics::Reset(); }
+};
+
+TEST_F(MetricsDefaultTest, Reset) {
+  RTC_HISTOGRAM_PERCENTAGE(kName, kSample);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  metrics::Reset();
+  EXPECT_EQ(0, metrics::NumSamples(kName));
+}
+
+TEST_F(MetricsDefaultTest, NumSamples) {
+  RTC_HISTOGRAM_PERCENTAGE(kName, 5);
+  RTC_HISTOGRAM_PERCENTAGE(kName, 5);
+  RTC_HISTOGRAM_PERCENTAGE(kName, 10);
+  EXPECT_EQ(3, metrics::NumSamples(kName));
+  EXPECT_EQ(0, metrics::NumSamples("NonExisting"));
+}
+
+TEST_F(MetricsDefaultTest, NumEvents) {
+  RTC_HISTOGRAM_PERCENTAGE(kName, 5);
+  RTC_HISTOGRAM_PERCENTAGE(kName, 5);
+  RTC_HISTOGRAM_PERCENTAGE(kName, 10);
+  EXPECT_EQ(2, metrics::NumEvents(kName, 5));
+  EXPECT_EQ(1, metrics::NumEvents(kName, 10));
+  EXPECT_EQ(0, metrics::NumEvents(kName, 11));
+  EXPECT_EQ(0, metrics::NumEvents("NonExisting", 5));
+}
+
+TEST_F(MetricsDefaultTest, MinSample) {
+  RTC_HISTOGRAM_PERCENTAGE(kName, kSample);
+  RTC_HISTOGRAM_PERCENTAGE(kName, kSample + 1);
+  EXPECT_EQ(kSample, metrics::MinSample(kName));
+  EXPECT_EQ(-1, metrics::MinSample("NonExisting"));
+}
+
+TEST_F(MetricsDefaultTest, Overflow) {
+  const std::string kName = "Overflow";
+  // Samples should end up in overflow bucket.
+  RTC_HISTOGRAM_PERCENTAGE(kName, 101);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, 101));
+  RTC_HISTOGRAM_PERCENTAGE(kName, 102);
+  EXPECT_EQ(2, metrics::NumSamples(kName));
+  EXPECT_EQ(2, metrics::NumEvents(kName, 101));
+}
+
+TEST_F(MetricsDefaultTest, Underflow) {
+  const std::string kName = "Underflow";
+  // Samples should end up in underflow bucket.
+  RTC_HISTOGRAM_COUNTS_10000(kName, 0);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, 0));
+  RTC_HISTOGRAM_COUNTS_10000(kName, -1);
+  EXPECT_EQ(2, metrics::NumSamples(kName));
+  EXPECT_EQ(2, metrics::NumEvents(kName, 0));
+}
+
+TEST_F(MetricsDefaultTest, GetAndReset) {
+  std::map<std::string, std::unique_ptr<metrics::SampleInfo>> histograms;
+  metrics::GetAndReset(&histograms);
+  EXPECT_EQ(0u, histograms.size());
+  RTC_HISTOGRAM_PERCENTAGE("Histogram1", 4);
+  RTC_HISTOGRAM_PERCENTAGE("Histogram1", 5);
+  RTC_HISTOGRAM_PERCENTAGE("Histogram1", 5);
+  RTC_HISTOGRAM_PERCENTAGE("Histogram2", 10);
+  EXPECT_EQ(3, metrics::NumSamples("Histogram1"));
+  EXPECT_EQ(1, metrics::NumSamples("Histogram2"));
+
+  metrics::GetAndReset(&histograms);
+  EXPECT_EQ(2u, histograms.size());
+  EXPECT_EQ(0, metrics::NumSamples("Histogram1"));
+  EXPECT_EQ(0, metrics::NumSamples("Histogram2"));
+
+  EXPECT_EQ(3, NumSamples("Histogram1", histograms));
+  EXPECT_EQ(1, NumSamples("Histogram2", histograms));
+  EXPECT_EQ(1, NumEvents("Histogram1", 4, histograms));
+  EXPECT_EQ(2, NumEvents("Histogram1", 5, histograms));
+  EXPECT_EQ(1, NumEvents("Histogram2", 10, histograms));
+
+  // Add samples after reset.
+  metrics::GetAndReset(&histograms);
+  EXPECT_EQ(0u, histograms.size());
+  RTC_HISTOGRAM_PERCENTAGE("Histogram1", 50);
+  RTC_HISTOGRAM_PERCENTAGE("Histogram2", 8);
+  EXPECT_EQ(1, metrics::NumSamples("Histogram1"));
+  EXPECT_EQ(1, metrics::NumSamples("Histogram2"));
+  EXPECT_EQ(1, metrics::NumEvents("Histogram1", 50));
+  EXPECT_EQ(1, metrics::NumEvents("Histogram2", 8));
+}
+
+TEST_F(MetricsDefaultTest, TestMinMaxBucket) {
+  const std::string kName = "MinMaxCounts100";
+  RTC_HISTOGRAM_COUNTS_100(kName, 4);
+
+  std::map<std::string, std::unique_ptr<metrics::SampleInfo>> histograms;
+  metrics::GetAndReset(&histograms);
+  EXPECT_EQ(1u, histograms.size());
+  EXPECT_EQ(kName, histograms.begin()->second->name);
+  EXPECT_EQ(1, histograms.begin()->second->min);
+  EXPECT_EQ(100, histograms.begin()->second->max);
+  EXPECT_EQ(50u, histograms.begin()->second->bucket_count);
+  EXPECT_EQ(1u, histograms.begin()->second->samples.size());
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/metrics_unittest.cc b/system_wrappers/source/metrics_unittest.cc
new file mode 100644
index 0000000..53d43cd
--- /dev/null
+++ b/system_wrappers/source/metrics_unittest.cc
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/metrics.h"
+#include "system_wrappers/include/metrics_default.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const int kSample = 22;
+
+void AddSparseSample(const std::string& name, int sample) {
+  RTC_HISTOGRAM_COUNTS_SPARSE_100(name, sample);
+}
+void AddSampleWithVaryingName(int index, const std::string& name, int sample) {
+  RTC_HISTOGRAMS_COUNTS_100(index, name, sample);
+}
+}  // namespace
+
+class MetricsTest : public ::testing::Test {
+ public:
+  MetricsTest() {}
+
+ protected:
+  virtual void SetUp() { metrics::Reset(); }
+};
+
+TEST_F(MetricsTest, InitiallyNoSamples) {
+  EXPECT_EQ(0, metrics::NumSamples("NonExisting"));
+  EXPECT_EQ(0, metrics::NumEvents("NonExisting", kSample));
+}
+
+TEST_F(MetricsTest, RtcHistogramPercent_AddSample) {
+  const std::string kName = "Percentage";
+  RTC_HISTOGRAM_PERCENTAGE(kName, kSample);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, kSample));
+}
+
+TEST_F(MetricsTest, RtcHistogramEnumeration_AddSample) {
+  const std::string kName = "Enumeration";
+  RTC_HISTOGRAM_ENUMERATION(kName, kSample, kSample + 1);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, kSample));
+}
+
+TEST_F(MetricsTest, RtcHistogramBoolean_AddSample) {
+  const std::string kName = "Boolean";
+  const int kSample = 0;
+  RTC_HISTOGRAM_BOOLEAN(kName, kSample);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, kSample));
+}
+
+TEST_F(MetricsTest, RtcHistogramCountsSparse_AddSample) {
+  const std::string kName = "CountsSparse100";
+  RTC_HISTOGRAM_COUNTS_SPARSE_100(kName, kSample);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, kSample));
+}
+
+TEST_F(MetricsTest, RtcHistogramCounts_AddSample) {
+  const std::string kName = "Counts100";
+  RTC_HISTOGRAM_COUNTS_100(kName, kSample);
+  EXPECT_EQ(1, metrics::NumSamples(kName));
+  EXPECT_EQ(1, metrics::NumEvents(kName, kSample));
+}
+
+TEST_F(MetricsTest, RtcHistogramCounts_AddMultipleSamples) {
+  const std::string kName = "Counts200";
+  const int kNumSamples = 10;
+  for (int i = 1; i <= kNumSamples; ++i) {
+    RTC_HISTOGRAM_COUNTS_200(kName, i);
+    EXPECT_EQ(1, metrics::NumEvents(kName, i));
+    EXPECT_EQ(i, metrics::NumSamples(kName));
+  }
+}
+
+TEST_F(MetricsTest, RtcHistogramsCounts_AddSample) {
+  AddSampleWithVaryingName(0, "Name1", kSample);
+  AddSampleWithVaryingName(1, "Name2", kSample + 1);
+  AddSampleWithVaryingName(2, "Name3", kSample + 2);
+  EXPECT_EQ(1, metrics::NumSamples("Name1"));
+  EXPECT_EQ(1, metrics::NumSamples("Name2"));
+  EXPECT_EQ(1, metrics::NumSamples("Name3"));
+  EXPECT_EQ(1, metrics::NumEvents("Name1", kSample + 0));
+  EXPECT_EQ(1, metrics::NumEvents("Name2", kSample + 1));
+  EXPECT_EQ(1, metrics::NumEvents("Name3", kSample + 2));
+}
+
+#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST_F(MetricsTest, RtcHistogramsCounts_InvalidIndex) {
+  EXPECT_DEATH(RTC_HISTOGRAMS_COUNTS_1000(-1, "Name", kSample), "");
+  EXPECT_DEATH(RTC_HISTOGRAMS_COUNTS_1000(3, "Name", kSample), "");
+  EXPECT_DEATH(RTC_HISTOGRAMS_COUNTS_1000(3u, "Name", kSample), "");
+}
+#endif
+
+TEST_F(MetricsTest, RtcHistogramSparse_NonConstantNameWorks) {
+  AddSparseSample("Sparse1", kSample);
+  AddSparseSample("Sparse2", kSample);
+  EXPECT_EQ(1, metrics::NumSamples("Sparse1"));
+  EXPECT_EQ(1, metrics::NumSamples("Sparse2"));
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/ntp_time_unittest.cc b/system_wrappers/source/ntp_time_unittest.cc
new file mode 100644
index 0000000..7be464d
--- /dev/null
+++ b/system_wrappers/source/ntp_time_unittest.cc
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/ntp_time.h"
+#include "system_wrappers/include/clock.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+
+const uint32_t kNtpSec = 0x12345678;
+const uint32_t kNtpFrac = 0x23456789;
+
+TEST(NtpTimeTest, NoValueMeansInvalid) {
+  NtpTime ntp;
+  EXPECT_FALSE(ntp.Valid());
+}
+
+TEST(NtpTimeTest, CanResetValue) {
+  NtpTime ntp(kNtpSec, kNtpFrac);
+  EXPECT_TRUE(ntp.Valid());
+  ntp.Reset();
+  EXPECT_FALSE(ntp.Valid());
+}
+
+TEST(NtpTimeTest, CanGetWhatIsSet) {
+  NtpTime ntp;
+  ntp.Set(kNtpSec, kNtpFrac);
+  EXPECT_EQ(kNtpSec, ntp.seconds());
+  EXPECT_EQ(kNtpFrac, ntp.fractions());
+}
+
+TEST(NtpTimeTest, SetIsSameAs2ParameterConstructor) {
+  NtpTime ntp1(kNtpSec, kNtpFrac);
+  NtpTime ntp2;
+  EXPECT_NE(ntp1, ntp2);
+
+  ntp2.Set(kNtpSec, kNtpFrac);
+  EXPECT_EQ(ntp1, ntp2);
+}
+
+TEST(NtpTimeTest, ToMsMeansToNtpMilliseconds) {
+  SimulatedClock clock(0x123456789abc);
+
+  NtpTime ntp = clock.CurrentNtpTime();
+  EXPECT_EQ(ntp.ToMs(), Clock::NtpToMs(ntp.seconds(), ntp.fractions()));
+  EXPECT_EQ(ntp.ToMs(), clock.CurrentNtpInMilliseconds());
+}
+
+TEST(NtpTimeTest, CanExplicitlyConvertToAndFromUint64) {
+  uint64_t untyped_time = 0x123456789;
+  NtpTime time(untyped_time);
+  EXPECT_EQ(untyped_time, static_cast<uint64_t>(time));
+  EXPECT_EQ(NtpTime(0x12345678, 0x90abcdef), NtpTime(0x1234567890abcdef));
+}
+
+}  // namespace
+}  // namespace webrtc
diff --git a/system_wrappers/source/rtp_to_ntp_estimator.cc b/system_wrappers/source/rtp_to_ntp_estimator.cc
new file mode 100644
index 0000000..21f64ec
--- /dev/null
+++ b/system_wrappers/source/rtp_to_ntp_estimator.cc
@@ -0,0 +1,205 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/rtp_to_ntp_estimator.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace {
+// Number of RTCP SR reports to use to map between RTP and NTP.
+const size_t kNumRtcpReportsToUse = 2;
+// Number of parameters samples used to smooth.
+const size_t kNumSamplesToSmooth = 20;
+
+
+// Calculates the RTP timestamp frequency from two pairs of NTP/RTP timestamps.
+bool CalculateFrequency(int64_t ntp_ms1,
+                        uint32_t rtp_timestamp1,
+                        int64_t ntp_ms2,
+                        uint32_t rtp_timestamp2,
+                        double* frequency_khz) {
+  if (ntp_ms1 <= ntp_ms2)
+    return false;
+
+  *frequency_khz = static_cast<double>(rtp_timestamp1 - rtp_timestamp2) /
+                   static_cast<double>(ntp_ms1 - ntp_ms2);
+  return true;
+}
+
+bool Contains(const std::list<RtpToNtpEstimator::RtcpMeasurement>& measurements,
+              const RtpToNtpEstimator::RtcpMeasurement& other) {
+  for (const auto& measurement : measurements) {
+    if (measurement.IsEqual(other))
+      return true;
+  }
+  return false;
+}
+}  // namespace
+
+bool RtpToNtpEstimator::Parameters::operator<(const Parameters& other) const {
+  if (frequency_khz < other.frequency_khz - 1e-6) {
+    return true;
+  } else if (frequency_khz > other.frequency_khz + 1e-6) {
+    return false;
+  } else {
+    return offset_ms < other.offset_ms - 1e-6;
+  }
+}
+
+bool RtpToNtpEstimator::Parameters::operator==(const Parameters& other) const {
+  return !(other < *this || *this < other);
+}
+
+bool RtpToNtpEstimator::Parameters::operator!=(const Parameters& other) const {
+  return other < *this || *this < other;
+}
+
+bool RtpToNtpEstimator::Parameters::operator<=(const Parameters& other) const {
+  return !(other < *this);
+}
+
+RtpToNtpEstimator::RtcpMeasurement::RtcpMeasurement(uint32_t ntp_secs,
+                                                    uint32_t ntp_frac,
+                                                    int64_t unwrapped_timestamp)
+    : ntp_time(ntp_secs, ntp_frac),
+      unwrapped_rtp_timestamp(unwrapped_timestamp) {}
+
+bool RtpToNtpEstimator::RtcpMeasurement::IsEqual(
+    const RtcpMeasurement& other) const {
+  // Use || since two equal timestamps will result in zero frequency and in
+  // RtpToNtpMs, |rtp_timestamp_ms| is estimated by dividing by the frequency.
+  return (ntp_time == other.ntp_time) ||
+         (unwrapped_rtp_timestamp == other.unwrapped_rtp_timestamp);
+}
+
+// Class for converting an RTP timestamp to the NTP domain.
+RtpToNtpEstimator::RtpToNtpEstimator()
+    : consecutive_invalid_samples_(0),
+      smoothing_filter_(kNumSamplesToSmooth),
+      params_calculated_(false) {}
+
+RtpToNtpEstimator::~RtpToNtpEstimator() {}
+
+void RtpToNtpEstimator::UpdateParameters() {
+  if (measurements_.size() != kNumRtcpReportsToUse)
+    return;
+
+  Parameters params;
+  int64_t timestamp_new = measurements_.front().unwrapped_rtp_timestamp;
+  int64_t timestamp_old = measurements_.back().unwrapped_rtp_timestamp;
+
+  int64_t ntp_ms_new = measurements_.front().ntp_time.ToMs();
+  int64_t ntp_ms_old = measurements_.back().ntp_time.ToMs();
+
+  if (!CalculateFrequency(ntp_ms_new, timestamp_new, ntp_ms_old, timestamp_old,
+                          &params.frequency_khz)) {
+    return;
+  }
+  params.offset_ms = timestamp_new - params.frequency_khz * ntp_ms_new;
+  params_calculated_ = true;
+  smoothing_filter_.Insert(params);
+}
+
+bool RtpToNtpEstimator::UpdateMeasurements(uint32_t ntp_secs,
+                                           uint32_t ntp_frac,
+                                           uint32_t rtp_timestamp,
+                                           bool* new_rtcp_sr) {
+  *new_rtcp_sr = false;
+
+  int64_t unwrapped_rtp_timestamp = unwrapper_.Unwrap(rtp_timestamp);
+
+  RtcpMeasurement new_measurement(ntp_secs, ntp_frac, unwrapped_rtp_timestamp);
+
+  if (Contains(measurements_, new_measurement)) {
+    // RTCP SR report already added.
+    return true;
+  }
+
+  if (!new_measurement.ntp_time.Valid())
+    return false;
+
+  int64_t ntp_ms_new = new_measurement.ntp_time.ToMs();
+  bool invalid_sample = false;
+  if (!measurements_.empty()) {
+    int64_t old_rtp_timestamp = measurements_.front().unwrapped_rtp_timestamp;
+    int64_t old_ntp_ms = measurements_.front().ntp_time.ToMs();
+    if (ntp_ms_new <= old_ntp_ms) {
+      invalid_sample = true;
+    } else if (unwrapped_rtp_timestamp <= old_rtp_timestamp) {
+      RTC_LOG(LS_WARNING)
+          << "Newer RTCP SR report with older RTP timestamp, dropping";
+      invalid_sample = true;
+    } else if (unwrapped_rtp_timestamp - old_rtp_timestamp > (1 << 25)) {
+      // Sanity check. No jumps too far into the future in rtp.
+      invalid_sample = true;
+    }
+  }
+
+  if (invalid_sample) {
+    ++consecutive_invalid_samples_;
+    if (consecutive_invalid_samples_ < kMaxInvalidSamples) {
+      return false;
+    }
+    RTC_LOG(LS_WARNING) << "Multiple consecutively invalid RTCP SR reports, "
+                           "clearing measurements.";
+    measurements_.clear();
+    smoothing_filter_.Reset();
+    params_calculated_ = false;
+  }
+  consecutive_invalid_samples_ = 0;
+
+  // Insert new RTCP SR report.
+  if (measurements_.size() == kNumRtcpReportsToUse)
+    measurements_.pop_back();
+
+  measurements_.push_front(new_measurement);
+  *new_rtcp_sr = true;
+
+  // List updated, calculate new parameters.
+  UpdateParameters();
+  return true;
+}
+
+bool RtpToNtpEstimator::Estimate(int64_t rtp_timestamp,
+                                 int64_t* rtp_timestamp_ms) const {
+  if (!params_calculated_)
+    return false;
+
+  int64_t rtp_timestamp_unwrapped = unwrapper_.Unwrap(rtp_timestamp);
+
+  Parameters params = smoothing_filter_.GetFilteredValue();
+
+  // params_calculated_ should not be true unless ms params.frequency_khz has
+  // been calculated to something non zero.
+  RTC_DCHECK_NE(params.frequency_khz, 0.0);
+  double rtp_ms =
+      (static_cast<double>(rtp_timestamp_unwrapped) - params.offset_ms) /
+          params.frequency_khz +
+      0.5f;
+
+  if (rtp_ms < 0)
+    return false;
+
+  *rtp_timestamp_ms = rtp_ms;
+  return true;
+}
+
+const rtc::Optional<RtpToNtpEstimator::Parameters> RtpToNtpEstimator::params()
+    const {
+  rtc::Optional<Parameters> res;
+  if (params_calculated_) {
+    res.emplace(smoothing_filter_.GetFilteredValue());
+  }
+  return res;
+}
+}  // namespace webrtc
diff --git a/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc b/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc
new file mode 100644
index 0000000..0647ec8
--- /dev/null
+++ b/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc
@@ -0,0 +1,295 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/rtp_to_ntp_estimator.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+const uint32_t kOneMsInNtpFrac = 4294967;
+const uint32_t kTimestampTicksPerMs = 90;
+}  // namespace
+
+TEST(WrapAroundTests, OldRtcpWrapped_OldRtpTimestamp) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp -= kTimestampTicksPerMs;
+  // No wraparound will be detected, since we are not allowed to wrap below 0,
+  // but there will be huge rtp timestamp jump, e.g. old_timestamp = 0,
+  // new_timestamp = 4294967295, which should be detected.
+  EXPECT_FALSE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+}
+
+TEST(WrapAroundTests, OldRtcpWrapped_OldRtpTimestamp_Wraparound_Detected) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0xFFFFFFFE;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += 2 * kOneMsInNtpFrac;
+  timestamp += 2 * kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp -= kTimestampTicksPerMs;
+  // Expected to fail since the older RTCP has a smaller RTP timestamp than the
+  // newer (old:10, new:4294967206).
+  EXPECT_FALSE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+}
+
+TEST(WrapAroundTests, NewRtcpWrapped) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0xFFFFFFFF;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  int64_t timestamp_ms = -1;
+  EXPECT_TRUE(estimator.Estimate(0xFFFFFFFF, &timestamp_ms));
+  // Since this RTP packet has the same timestamp as the RTCP packet constructed
+  // at time 0 it should be mapped to 0 as well.
+  EXPECT_EQ(0, timestamp_ms);
+}
+
+TEST(WrapAroundTests, RtpWrapped) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0xFFFFFFFF - 2 * kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+
+  int64_t timestamp_ms = -1;
+  EXPECT_TRUE(
+      estimator.Estimate(0xFFFFFFFF - 2 * kTimestampTicksPerMs, &timestamp_ms));
+  // Since this RTP packet has the same timestamp as the RTCP packet constructed
+  // at time 0 it should be mapped to 0 as well.
+  EXPECT_EQ(0, timestamp_ms);
+  // Two kTimestampTicksPerMs advanced.
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(estimator.Estimate(timestamp, &timestamp_ms));
+  EXPECT_EQ(2, timestamp_ms);
+  // Wrapped rtp.
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(estimator.Estimate(timestamp, &timestamp_ms));
+  EXPECT_EQ(3, timestamp_ms);
+}
+
+TEST(WrapAroundTests, OldRtp_RtcpsWrapped) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0xFFFFFFFF;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  timestamp -= 2 * kTimestampTicksPerMs;
+  int64_t timestamp_ms = 0xFFFFFFFF;
+  EXPECT_FALSE(estimator.Estimate(timestamp, &timestamp_ms));
+}
+
+TEST(WrapAroundTests, OldRtp_NewRtcpWrapped) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0xFFFFFFFF;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  timestamp -= kTimestampTicksPerMs;
+  int64_t timestamp_ms = -1;
+  EXPECT_TRUE(estimator.Estimate(timestamp, &timestamp_ms));
+  // Constructed at the same time as the first RTCP and should therefore be
+  // mapped to zero.
+  EXPECT_EQ(0, timestamp_ms);
+}
+
+TEST(WrapAroundTests, GracefullyHandleRtpJump) {
+  RtpToNtpEstimator estimator;
+  bool new_sr;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 1;
+  uint32_t timestamp = 0;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp -= kTimestampTicksPerMs;
+  int64_t timestamp_ms = -1;
+  EXPECT_TRUE(estimator.Estimate(timestamp, &timestamp_ms));
+  // Constructed at the same time as the first RTCP and should therefore be
+  // mapped to zero.
+  EXPECT_EQ(0, timestamp_ms);
+
+  timestamp -= 0xFFFFF;
+  for (int i = 0; i < RtpToNtpEstimator::kMaxInvalidSamples - 1; ++i) {
+    EXPECT_FALSE(
+        estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+    ntp_frac += kOneMsInNtpFrac;
+    timestamp += kTimestampTicksPerMs;
+  }
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+
+  timestamp_ms = -1;
+  EXPECT_TRUE(estimator.Estimate(timestamp, &timestamp_ms));
+  // 6 milliseconds has passed since the start of the test.
+  EXPECT_EQ(6, timestamp_ms);
+}
+
+TEST(UpdateRtcpMeasurementTests, FailsForZeroNtp) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 0;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_FALSE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_FALSE(new_sr);
+}
+
+TEST(UpdateRtcpMeasurementTests, FailsForEqualNtp) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 699925050;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  // Ntp time already added, list not updated.
+  ++timestamp;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_FALSE(new_sr);
+}
+
+TEST(UpdateRtcpMeasurementTests, FailsForOldNtp) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 1;
+  uint32_t ntp_frac = 699925050;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  // Old ntp time, list not updated.
+  ntp_frac -= kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_FALSE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+}
+
+TEST(UpdateRtcpMeasurementTests, FailsForEqualTimestamp) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 2;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  // Timestamp already added, list not updated.
+  ++ntp_frac;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_FALSE(new_sr);
+}
+
+TEST(UpdateRtcpMeasurementTests, FailsForOldRtpTimestamp) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 0;
+  uint32_t ntp_frac = 2;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  // Old timestamp, list not updated.
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp -= kTimestampTicksPerMs;
+  EXPECT_FALSE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_FALSE(new_sr);
+}
+
+TEST(UpdateRtcpMeasurementTests, VerifyParameters) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 1;
+  uint32_t ntp_frac = 2;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  EXPECT_FALSE(estimator.params());
+  // Add second report, parameters should be calculated.
+  ntp_frac += kOneMsInNtpFrac;
+  timestamp += kTimestampTicksPerMs;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(estimator.params());
+  EXPECT_DOUBLE_EQ(90.0, estimator.params()->frequency_khz);
+  EXPECT_NE(0.0, estimator.params()->offset_ms);
+}
+
+TEST(RtpToNtpTests, FailsForNoParameters) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 1;
+  uint32_t ntp_frac = 2;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  // Parameters are not calculated, conversion of RTP to NTP time should fail.
+  EXPECT_FALSE(estimator.params());
+  int64_t timestamp_ms = -1;
+  EXPECT_FALSE(estimator.Estimate(timestamp, &timestamp_ms));
+}
+
+};  // namespace webrtc
diff --git a/system_wrappers/source/runtime_enabled_features_default.cc b/system_wrappers/source/runtime_enabled_features_default.cc
new file mode 100644
index 0000000..31a3ff7
--- /dev/null
+++ b/system_wrappers/source/runtime_enabled_features_default.cc
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/runtime_enabled_features.h"
+
+#include "rtc_base/flags.h"
+
+namespace flags {
+DEFINE_bool(enable_dual_stream_mode,
+            false,
+            "Enables dual video stream mode.");
+}
+
+namespace webrtc {
+namespace runtime_enabled_features {
+
+bool IsFeatureEnabled(std::string feature_name) {
+  if (feature_name == kDualStreamModeFeatureName)
+    return flags::FLAG_enable_dual_stream_mode;
+  return false;
+}
+
+}  // namespace runtime_enabled_features
+}  // namespace webrtc
diff --git a/system_wrappers/source/rw_lock.cc b/system_wrappers/source/rw_lock.cc
new file mode 100644
index 0000000..c38c44a
--- /dev/null
+++ b/system_wrappers/source/rw_lock.cc
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/rw_lock_wrapper.h"
+
+#include <assert.h>
+
+#if defined(_WIN32)
+#include "system_wrappers/source/rw_lock_win.h"
+#else
+#include "system_wrappers/source/rw_lock_posix.h"
+#endif
+
+namespace webrtc {
+
+RWLockWrapper* RWLockWrapper::CreateRWLock() {
+#ifdef _WIN32
+  return RWLockWin::Create();
+#else
+  return RWLockPosix::Create();
+#endif
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/rw_lock_posix.cc b/system_wrappers/source/rw_lock_posix.cc
new file mode 100644
index 0000000..412873c
--- /dev/null
+++ b/system_wrappers/source/rw_lock_posix.cc
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/source/rw_lock_posix.h"
+
+namespace webrtc {
+
+RWLockPosix::RWLockPosix() : lock_() {}
+
+RWLockPosix::~RWLockPosix() {
+  pthread_rwlock_destroy(&lock_);
+}
+
+RWLockPosix* RWLockPosix::Create() {
+  RWLockPosix* ret_val = new RWLockPosix();
+  if (!ret_val->Init()) {
+    delete ret_val;
+    return NULL;
+  }
+  return ret_val;
+}
+
+bool RWLockPosix::Init() {
+  return pthread_rwlock_init(&lock_, 0) == 0;
+}
+
+void RWLockPosix::AcquireLockExclusive() {
+  pthread_rwlock_wrlock(&lock_);
+}
+
+void RWLockPosix::ReleaseLockExclusive() {
+  pthread_rwlock_unlock(&lock_);
+}
+
+void RWLockPosix::AcquireLockShared() {
+  pthread_rwlock_rdlock(&lock_);
+}
+
+void RWLockPosix::ReleaseLockShared() {
+  pthread_rwlock_unlock(&lock_);
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/rw_lock_posix.h b/system_wrappers/source/rw_lock_posix.h
new file mode 100644
index 0000000..d15682b
--- /dev/null
+++ b/system_wrappers/source/rw_lock_posix.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_SOURCE_RW_LOCK_POSIX_H_
+#define SYSTEM_WRAPPERS_SOURCE_RW_LOCK_POSIX_H_
+
+#include "system_wrappers/include/rw_lock_wrapper.h"
+#include "typedefs.h"  // NOLINT(build/include)
+
+#include <pthread.h>
+
+namespace webrtc {
+
+class RWLockPosix : public RWLockWrapper {
+ public:
+  static RWLockPosix* Create();
+  ~RWLockPosix() override;
+
+  void AcquireLockExclusive() override;
+  void ReleaseLockExclusive() override;
+
+  void AcquireLockShared() override;
+  void ReleaseLockShared() override;
+
+ private:
+  RWLockPosix();
+  bool Init();
+
+  pthread_rwlock_t lock_;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_SOURCE_RW_LOCK_POSIX_H_
diff --git a/system_wrappers/source/rw_lock_win.cc b/system_wrappers/source/rw_lock_win.cc
new file mode 100644
index 0000000..23df15a
--- /dev/null
+++ b/system_wrappers/source/rw_lock_win.cc
@@ -0,0 +1,95 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/source/rw_lock_win.h"
+
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+static bool native_rw_locks_supported = false;
+static bool module_load_attempted = false;
+static HMODULE library = NULL;
+
+typedef void(WINAPI* InitializeSRWLock)(PSRWLOCK);
+
+typedef void(WINAPI* AcquireSRWLockExclusive)(PSRWLOCK);
+typedef void(WINAPI* ReleaseSRWLockExclusive)(PSRWLOCK);
+
+typedef void(WINAPI* AcquireSRWLockShared)(PSRWLOCK);
+typedef void(WINAPI* ReleaseSRWLockShared)(PSRWLOCK);
+
+InitializeSRWLock initialize_srw_lock;
+AcquireSRWLockExclusive acquire_srw_lock_exclusive;
+AcquireSRWLockShared acquire_srw_lock_shared;
+ReleaseSRWLockShared release_srw_lock_shared;
+ReleaseSRWLockExclusive release_srw_lock_exclusive;
+
+RWLockWin::RWLockWin() {
+  initialize_srw_lock(&lock_);
+}
+
+RWLockWin* RWLockWin::Create() {
+  if (!LoadModule()) {
+    return NULL;
+  }
+  return new RWLockWin();
+}
+
+void RWLockWin::AcquireLockExclusive() {
+  acquire_srw_lock_exclusive(&lock_);
+}
+
+void RWLockWin::ReleaseLockExclusive() {
+  release_srw_lock_exclusive(&lock_);
+}
+
+void RWLockWin::AcquireLockShared() {
+  acquire_srw_lock_shared(&lock_);
+}
+
+void RWLockWin::ReleaseLockShared() {
+  release_srw_lock_shared(&lock_);
+}
+
+bool RWLockWin::LoadModule() {
+  if (module_load_attempted) {
+    return native_rw_locks_supported;
+  }
+  module_load_attempted = true;
+  // Use native implementation if supported (i.e Vista+)
+  library = LoadLibrary(TEXT("Kernel32.dll"));
+  if (!library) {
+    return false;
+  }
+  RTC_LOG(LS_VERBOSE) << "Loaded Kernel.dll";
+
+  initialize_srw_lock =
+      (InitializeSRWLock)GetProcAddress(library, "InitializeSRWLock");
+
+  acquire_srw_lock_exclusive = (AcquireSRWLockExclusive)GetProcAddress(
+      library, "AcquireSRWLockExclusive");
+  release_srw_lock_exclusive = (ReleaseSRWLockExclusive)GetProcAddress(
+      library, "ReleaseSRWLockExclusive");
+  acquire_srw_lock_shared =
+      (AcquireSRWLockShared)GetProcAddress(library, "AcquireSRWLockShared");
+  release_srw_lock_shared =
+      (ReleaseSRWLockShared)GetProcAddress(library, "ReleaseSRWLockShared");
+
+  if (initialize_srw_lock && acquire_srw_lock_exclusive &&
+      release_srw_lock_exclusive && acquire_srw_lock_shared &&
+      release_srw_lock_shared) {
+    RTC_LOG(LS_VERBOSE) << "Loaded Native RW Lock";
+    native_rw_locks_supported = true;
+  }
+  return native_rw_locks_supported;
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/rw_lock_win.h b/system_wrappers/source/rw_lock_win.h
new file mode 100644
index 0000000..41537ba
--- /dev/null
+++ b/system_wrappers/source/rw_lock_win.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SYSTEM_WRAPPERS_SOURCE_RW_LOCK_WIN_H_
+#define SYSTEM_WRAPPERS_SOURCE_RW_LOCK_WIN_H_
+
+#include "system_wrappers/include/rw_lock_wrapper.h"
+
+#include <Windows.h>
+
+namespace webrtc {
+
+class RWLockWin : public RWLockWrapper {
+ public:
+  static RWLockWin* Create();
+  ~RWLockWin() {}
+
+  virtual void AcquireLockExclusive();
+  virtual void ReleaseLockExclusive();
+
+  virtual void AcquireLockShared();
+  virtual void ReleaseLockShared();
+
+ private:
+  RWLockWin();
+  static bool LoadModule();
+
+  SRWLOCK lock_;
+};
+
+}  // namespace webrtc
+
+#endif  // SYSTEM_WRAPPERS_SOURCE_RW_LOCK_WIN_H_
diff --git a/system_wrappers/source/sleep.cc b/system_wrappers/source/sleep.cc
new file mode 100644
index 0000000..e2fa486
--- /dev/null
+++ b/system_wrappers/source/sleep.cc
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+// An OS-independent sleep function.
+
+#include "system_wrappers/include/sleep.h"
+
+#ifdef _WIN32
+// For Sleep()
+#include <windows.h>
+#else
+// For nanosleep()
+#include <time.h>
+#endif
+
+namespace webrtc {
+
+void SleepMs(int msecs) {
+#ifdef _WIN32
+  Sleep(msecs);
+#else
+  struct timespec short_wait;
+  struct timespec remainder;
+  short_wait.tv_sec = msecs / 1000;
+  short_wait.tv_nsec = (msecs % 1000) * 1000 * 1000;
+  nanosleep(&short_wait, &remainder);
+#endif
+}
+
+}  // namespace webrtc
diff --git a/system_wrappers/source/timestamp_extrapolator.cc b/system_wrappers/source/timestamp_extrapolator.cc
new file mode 100644
index 0000000..b8c6ba0
--- /dev/null
+++ b/system_wrappers/source/timestamp_extrapolator.cc
@@ -0,0 +1,208 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/include/timestamp_extrapolator.h"
+
+#include <algorithm>
+
+namespace webrtc {
+
+TimestampExtrapolator::TimestampExtrapolator(int64_t start_ms)
+    : _rwLock(RWLockWrapper::CreateRWLock()),
+      _startMs(0),
+      _firstTimestamp(0),
+      _wrapArounds(0),
+      _prevUnwrappedTimestamp(-1),
+      _prevWrapTimestamp(-1),
+      _lambda(1),
+      _firstAfterReset(true),
+      _packetCount(0),
+      _startUpFilterDelayInPackets(2),
+      _detectorAccumulatorPos(0),
+      _detectorAccumulatorNeg(0),
+      _alarmThreshold(60e3),
+      _accDrift(6600),  // in timestamp ticks, i.e. 15 ms
+      _accMaxError(7000),
+      _pP11(1e10) {
+  Reset(start_ms);
+}
+
+TimestampExtrapolator::~TimestampExtrapolator() {
+  delete _rwLock;
+}
+
+void TimestampExtrapolator::Reset(int64_t start_ms) {
+  WriteLockScoped wl(*_rwLock);
+  _startMs = start_ms;
+  _prevMs = _startMs;
+  _firstTimestamp = 0;
+  _w[0] = 90.0;
+  _w[1] = 0;
+  _pP[0][0] = 1;
+  _pP[1][1] = _pP11;
+  _pP[0][1] = _pP[1][0] = 0;
+  _firstAfterReset = true;
+  _prevUnwrappedTimestamp = -1;
+  _prevWrapTimestamp = -1;
+  _wrapArounds = 0;
+  _packetCount = 0;
+  _detectorAccumulatorPos = 0;
+  _detectorAccumulatorNeg = 0;
+}
+
+void TimestampExtrapolator::Update(int64_t tMs, uint32_t ts90khz) {
+  _rwLock->AcquireLockExclusive();
+  if (tMs - _prevMs > 10e3) {
+    // Ten seconds without a complete frame.
+    // Reset the extrapolator
+    _rwLock->ReleaseLockExclusive();
+    Reset(tMs);
+    _rwLock->AcquireLockExclusive();
+  } else {
+    _prevMs = tMs;
+  }
+
+  // Remove offset to prevent badly scaled matrices
+  tMs -= _startMs;
+
+  CheckForWrapArounds(ts90khz);
+
+  int64_t unwrapped_ts90khz =
+      static_cast<int64_t>(ts90khz) +
+      _wrapArounds * ((static_cast<int64_t>(1) << 32) - 1);
+
+  if (_firstAfterReset) {
+    // Make an initial guess of the offset,
+    // should be almost correct since tMs - _startMs
+    // should about zero at this time.
+    _w[1] = -_w[0] * tMs;
+    _firstTimestamp = unwrapped_ts90khz;
+    _firstAfterReset = false;
+  }
+
+  double residual = (static_cast<double>(unwrapped_ts90khz) - _firstTimestamp) -
+                    static_cast<double>(tMs) * _w[0] - _w[1];
+  if (DelayChangeDetection(residual) &&
+      _packetCount >= _startUpFilterDelayInPackets) {
+    // A sudden change of average network delay has been detected.
+    // Force the filter to adjust its offset parameter by changing
+    // the offset uncertainty. Don't do this during startup.
+    _pP[1][1] = _pP11;
+  }
+
+  if (_prevUnwrappedTimestamp >= 0 &&
+      unwrapped_ts90khz < _prevUnwrappedTimestamp) {
+    // Drop reordered frames.
+    _rwLock->ReleaseLockExclusive();
+    return;
+  }
+
+  // T = [t(k) 1]';
+  // that = T'*w;
+  // K = P*T/(lambda + T'*P*T);
+  double K[2];
+  K[0] = _pP[0][0] * tMs + _pP[0][1];
+  K[1] = _pP[1][0] * tMs + _pP[1][1];
+  double TPT = _lambda + tMs * K[0] + K[1];
+  K[0] /= TPT;
+  K[1] /= TPT;
+  // w = w + K*(ts(k) - that);
+  _w[0] = _w[0] + K[0] * residual;
+  _w[1] = _w[1] + K[1] * residual;
+  // P = 1/lambda*(P - K*T'*P);
+  double p00 =
+      1 / _lambda * (_pP[0][0] - (K[0] * tMs * _pP[0][0] + K[0] * _pP[1][0]));
+  double p01 =
+      1 / _lambda * (_pP[0][1] - (K[0] * tMs * _pP[0][1] + K[0] * _pP[1][1]));
+  _pP[1][0] =
+      1 / _lambda * (_pP[1][0] - (K[1] * tMs * _pP[0][0] + K[1] * _pP[1][0]));
+  _pP[1][1] =
+      1 / _lambda * (_pP[1][1] - (K[1] * tMs * _pP[0][1] + K[1] * _pP[1][1]));
+  _pP[0][0] = p00;
+  _pP[0][1] = p01;
+  _prevUnwrappedTimestamp = unwrapped_ts90khz;
+  if (_packetCount < _startUpFilterDelayInPackets) {
+    _packetCount++;
+  }
+  _rwLock->ReleaseLockExclusive();
+}
+
+int64_t TimestampExtrapolator::ExtrapolateLocalTime(uint32_t timestamp90khz) {
+  ReadLockScoped rl(*_rwLock);
+  int64_t localTimeMs = 0;
+  CheckForWrapArounds(timestamp90khz);
+  double unwrapped_ts90khz =
+      static_cast<double>(timestamp90khz) +
+      _wrapArounds * ((static_cast<int64_t>(1) << 32) - 1);
+  if (_packetCount == 0) {
+    localTimeMs = -1;
+  } else if (_packetCount < _startUpFilterDelayInPackets) {
+    localTimeMs =
+        _prevMs +
+        static_cast<int64_t>(
+            static_cast<double>(unwrapped_ts90khz - _prevUnwrappedTimestamp) /
+                90.0 +
+            0.5);
+  } else {
+    if (_w[0] < 1e-3) {
+      localTimeMs = _startMs;
+    } else {
+      double timestampDiff =
+          unwrapped_ts90khz - static_cast<double>(_firstTimestamp);
+      localTimeMs = static_cast<int64_t>(static_cast<double>(_startMs) +
+                                         (timestampDiff - _w[1]) / _w[0] + 0.5);
+    }
+  }
+  return localTimeMs;
+}
+
+// Investigates if the timestamp clock has overflowed since the last timestamp
+// and keeps track of the number of wrap arounds since reset.
+void TimestampExtrapolator::CheckForWrapArounds(uint32_t ts90khz) {
+  if (_prevWrapTimestamp == -1) {
+    _prevWrapTimestamp = ts90khz;
+    return;
+  }
+  if (ts90khz < _prevWrapTimestamp) {
+    // This difference will probably be less than -2^31 if we have had a wrap
+    // around (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is
+    // casted to a Word32, it should be positive.
+    if (static_cast<int32_t>(ts90khz - _prevWrapTimestamp) > 0) {
+      // Forward wrap around
+      _wrapArounds++;
+    }
+  }
+  // This difference will probably be less than -2^31 if we have had a backward
+  // wrap around. Since it is casted to a Word32, it should be positive.
+  else if (static_cast<int32_t>(_prevWrapTimestamp - ts90khz) > 0) {
+    // Backward wrap around
+    _wrapArounds--;
+  }
+  _prevWrapTimestamp = ts90khz;
+}
+
+bool TimestampExtrapolator::DelayChangeDetection(double error) {
+  // CUSUM detection of sudden delay changes
+  error = (error > 0) ? std::min(error, _accMaxError)
+                      : std::max(error, -_accMaxError);
+  _detectorAccumulatorPos =
+      std::max(_detectorAccumulatorPos + error - _accDrift, (double)0);
+  _detectorAccumulatorNeg =
+      std::min(_detectorAccumulatorNeg + error + _accDrift, (double)0);
+  if (_detectorAccumulatorPos > _alarmThreshold ||
+      _detectorAccumulatorNeg < -_alarmThreshold) {
+    // Alarm
+    _detectorAccumulatorPos = _detectorAccumulatorNeg = 0;
+    return true;
+  }
+  return false;
+}
+
+}  // namespace webrtc
diff --git a/typedefs.h b/typedefs.h
new file mode 100644
index 0000000..073b180
--- /dev/null
+++ b/typedefs.h
@@ -0,0 +1,115 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains platform-specific typedefs and defines.
+// Much of it is derived from Chromium's build/build_config.h.
+
+#ifndef TYPEDEFS_H_
+#define TYPEDEFS_H_
+
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define WEBRTC_ARCH_X86_FAMILY
+#define WEBRTC_ARCH_X86_64
+#define WEBRTC_ARCH_64_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(__aarch64__)
+#define WEBRTC_ARCH_ARM_FAMILY
+#define WEBRTC_ARCH_64_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(_M_IX86) || defined(__i386__)
+#define WEBRTC_ARCH_X86_FAMILY
+#define WEBRTC_ARCH_X86
+#define WEBRTC_ARCH_32_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(__ARMEL__)
+#define WEBRTC_ARCH_ARM_FAMILY
+#define WEBRTC_ARCH_32_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(__MIPSEL__)
+#define WEBRTC_ARCH_MIPS_FAMILY
+#if defined(__LP64__)
+#define WEBRTC_ARCH_64_BITS
+#else
+#define WEBRTC_ARCH_32_BITS
+#endif
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#elif defined(__pnacl__)
+#define WEBRTC_ARCH_32_BITS
+#define WEBRTC_ARCH_LITTLE_ENDIAN
+#else
+#error Please add support for your architecture in typedefs.h
+#endif
+
+#if !(defined(WEBRTC_ARCH_LITTLE_ENDIAN) ^ defined(WEBRTC_ARCH_BIG_ENDIAN))
+#error Define either WEBRTC_ARCH_LITTLE_ENDIAN or WEBRTC_ARCH_BIG_ENDIAN
+#endif
+
+// TODO(zhongwei.yao): WEBRTC_CPU_DETECTION is only used in one place; we should
+// probably just remove it.
+#if (defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__))
+#define WEBRTC_CPU_DETECTION
+#endif
+
+#include <stdint.h>
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() RTC_WARN_UNUSED_RESULT;
+// To explicitly ignore a result, cast to void.
+// TODO(kwiberg): Remove when we can use [[nodiscard]] from C++17.
+#if defined(__clang__)
+#define RTC_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__))
+#elif defined(__GNUC__)
+// gcc has a __warn_unused_result__ attribute, but you can't quiet it by
+// casting to void, so we don't use it.
+#define RTC_WARN_UNUSED_RESULT
+#else
+#define RTC_WARN_UNUSED_RESULT
+#endif
+
+// Put after a variable that might not be used, to prevent compiler warnings:
+//   int result ATTRIBUTE_UNUSED = DoSomething();
+//   assert(result == 17);
+// Deprecated since it only works with GCC & clang. See RTC_UNUSED below.
+// TODO(terelius): Remove.
+#ifndef ATTRIBUTE_UNUSED
+#if defined(__GNUC__) || defined(__clang__)
+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#else
+#define ATTRIBUTE_UNUSED
+#endif
+#endif
+
+#ifndef NO_RETURN
+// Annotate a function that will not return control flow to the caller.
+#if defined(_MSC_VER)
+#define NO_RETURN __declspec(noreturn)
+#elif defined(__GNUC__)
+#define NO_RETURN __attribute__ ((__noreturn__))
+#else
+#define NO_RETURN
+#endif
+#endif
+
+// Prevent the compiler from warning about an unused variable. For example:
+//   int result = DoSomething();
+//   assert(result == 17);
+//   RTC_UNUSED(result);
+// Note: In most cases it is better to remove the unused variable rather than
+// suppressing the compiler warning.
+#ifndef RTC_UNUSED
+#define RTC_UNUSED(x) static_cast<void>(x)
+#endif  // RTC_UNUSED
+
+#endif  // TYPEDEFS_H_